Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3951 lines
103 KiB

  1. /*++
  2. Copyright (c) 1990 Microsoft Corporation
  3. Copyright (c) 1995 Intel Corporation
  4. Module Name:
  5. miia64.h
  6. Abstract:
  7. This module contains the private data structures and procedure
  8. prototypes for the hardware dependent portion of the
  9. memory management system.
  10. This module is specifically tailored for the IA64.
  11. Author:
  12. Lou Perazzoli (loup) 6-Jan-1990
  13. Landy Wang (landyw) 2-June-1997
  14. Koichi Yamada (kyamada) 9-Jan-1996
  15. Revision History:
  16. --*/
  17. /*++
  18. Virtual Memory Layout on IA64 is:
  19. +------------------------------------+
  20. 0000000000000000 | User mode addresses - 7tb - 16gb | UADDRESS_BASE
  21. | |
  22. | |
  23. 000006FBFFFEFFFF | | MM_HIGHEST_USER_ADDRESS
  24. +------------------------------------+
  25. 000006FBFFFF0000 | 64k No Access Region | MM_USER_PROBE_ADDRESS
  26. +------------------------------------+
  27. 000006FC00000000 | Alternate 4K-page mappings | ALT4KB_BASE
  28. | for x86 process emulation |
  29. | Alternate 4K-page mappings | ALT4KB_END
  30. | Spans 8mb to allow for 4gb VA space| ALT4KB_END
  31. +------------------------------------+
  32. 000006FC00800000 | HyperSpace - working set lists | HYPER_SPACE
  33. | and per process memory management |
  34. | structures mapped in this 16gb |
  35. 000006FFFFFFFFFF | region. | HYPER_SPACE_END
  36. +------------------------------------+
  37. 0000070000000000 | |
  38. | Page table selfmapping structures |
  39. 000007FFFFFFFFFF | |
  40. +------------------------------------+
  41. .
  42. .
  43. +------------------------------------+
  44. 1FFFFF0000000000 | 8gb leaf level page table map | PTE_UBASE
  45. | for user space |
  46. 1FFFFF01FFFFFFFF | | PTE_UTOP
  47. +------------------------------------+
  48. +------------------------------------+
  49. 1FFFFFFFC0000000 | 8mb page directory (2nd level) | PDE_UBASE
  50. | table map for user space |
  51. 1FFFFFFFC07FFFFF | | PDE_UTOP
  52. +------------------------------------+
  53. +------------------------------------+
  54. 1FFFFFFFFFF00000 | 8KB parent directory (1st level) | PDE_UTBASE
  55. +------------------------------------+
  56. .
  57. .
  58. +------------------------------------+
  59. 2000000000000000 | 1. Win32k.sys | MM_SESSION_SPACE_DEFAULT
  60. | 2. Hydra - 8gb |
  61. | and per session memory management |
  62. | structures mapped in this 8gb |
  63. | region. |
  64. +------------------------------------+
  65. .
  66. +------------------------------------+
  67. 3FFFFF0000000000 | 8gb leaf level page table map | PTE_SBASE
  68. | for session space |
  69. 3FFFFF01FFFFFFFF | | PTE_STOP
  70. +------------------------------------+
  71. +------------------------------------+
  72. 3FFFFFFFC0000000 | 8mb page directory (2nd level) | PDE_SBASE
  73. | table map for session space |
  74. 3FFFFFFFC07FFFFF | | PDE_STOP
  75. +------------------------------------+
  76. +------------------------------------+
  77. 3FFFFFFFFFF00000 | 8KB parent directory (1st level) | PDE_STBASE
  78. +------------------------------------+
  79. .
  80. +------------------------------------+
  81. 8000000000000000 | physical addressable memory | KSEG3_BASE
  82. | for 44-bit of address space |
  83. 80000FFFFFFFFFFF | mapped by VHPT 64KB page | KSEG3_LIMIT
  84. +------------------------------------+
  85. .
  86. +------------------------------------+
  87. 9FFFFF00000000000| vhpt 64kb page for KSEG3 space |
  88. | (not used) |
  89. +------------------------------------+
  90. .
  91. .
  92. +------------------------------------+ MM_SYSTEM_RANGE_START
  93. E000000000000000 | | KADDRESS_BASE
  94. +------------------------------------+
  95. E000000080000000 | The HAL, kernel, initial drivers, | KSEG0_BASE
  96. | NLS data, and registry load in the |
  97. | first 16mb of this region which |
  98. | physically addresses memory. |
  99. | |
  100. | Kernel mode access only. |
  101. | |
  102. | Initial NonPaged Pool is within |
  103. | KSEG0 |
  104. | | KSEG2_BASE
  105. +------------------------------------+
  106. E0000000FF000000 | Shared system page | KI_USER_SHARED_DATA
  107. +------------------------------------+
  108. E0000000FF002000 | Reserved for the HAL. |
  109. | |
  110. | |
  111. E0000000FFFFFFFF | |
  112. +------------------------------------+
  113. .
  114. .
  115. +------------------------------------+
  116. E000000200000000 | |
  117. | |
  118. | |
  119. | |
  120. +------------------------------------+
  121. E000000400000000 | The system cache working set | MM_SYSTEM_CACHE_WORKING_SET
  122. | | MM_SYSTEM_SPACE_START
  123. | information resides in this 8gb |
  124. | region. |
  125. +------------------------------------+
  126. E000000600000000 | System cache resides here. | MM_SYSTEM_CACHE_START
  127. | Kernel mode access only. |
  128. | 1tb. |
  129. +------------------------------------+
  130. E000010600000000 | Start of paged system area. | MM_PAGED_POOL_START
  131. | Kernel mode access only. |
  132. | 128gb. |
  133. +------------------------------------+
  134. | System mapped views start just |
  135. | after paged pool. Default is |
  136. | 104MB, can be registry-overridden. |
  137. | 8GB maximum. |
  138. +------------------------------------+
  139. | |
  140. .
  141. .
  142. In general, the next two areas (system PTE pool and nonpaged pool) will both
  143. be shifted upwards to conserve a PPE...
  144. .
  145. .
  146. +------------------------------------+
  147. E000012600000000 | System PTE pool. | MM_LOWEST_NONPAGED_SYSTEM_START
  148. | Kernel mode access only. |
  149. | 128gb. |
  150. +------------------------------------+
  151. | PFN Database. |
  152. +------------------------------------+
  153. E000014600000000 | NonPaged pool. | MM_NON_PAGED_POOL_START
  154. | Kernel mode access only. |
  155. | 128gb. |
  156. | |
  157. E0000165FFFFFFFF | NonPaged System area | MM_NONPAGED_POOL_END
  158. +------------------------------------+
  159. .
  160. .
  161. E000060000000000 +------------------------------------+ MM_SYSTEM_SPACE_END
  162. .
  163. .
  164. +------------------------------------+
  165. FFFFFF0000000000 | 8gb leaf level page table map | PTE_KBASE
  166. | for kernel space |
  167. FFFFFF01FFFFFFFF | | PTE_KTOP
  168. +------------------------------------+
  169. +------------------------------------+
  170. FFFFFFFFC0000000 | 8mb page directory (2nd level) | PDE_KBASE
  171. | table map for kernel space |
  172. FFFFFFFFC07FFFFF | | PDE_KTOP
  173. +------------------------------------+
  174. +------------------------------------+
  175. FFFFFFFFFFF00000 | 8KB parent directory (1st level) | PDE_KTBASE
  176. +------------------------------------+
  177. --*/
  178. #define _MI_PAGING_LEVELS 3
  179. #define _MI_MORE_THAN_4GB_ 1
  180. #define IMAGE_FILE_MACHINE_NATIVE IMAGE_FILE_MACHINE_IA64
  181. #define _MIALT4K_ 1
  182. //
  183. // Define empty list markers.
  184. //
  185. #define MM_EMPTY_LIST ((ULONG_PTR)-1) //
  186. #define MM_EMPTY_PTE_LIST ((ULONG)0xFFFFFFFF) // N.B. tied to MMPTE definition
  187. #define MI_PTE_BASE_FOR_LOWEST_KERNEL_ADDRESS ((PMMPTE)PTE_KBASE)
  188. //
  189. // Define the session PTE base.
  190. //
  191. #define MI_PTE_BASE_FOR_LOWEST_SESSION_ADDRESS ((PMMPTE)PTE_SBASE)
  192. //
  193. // 43-Bit virtual address mask.
  194. //
  195. #define MASK_43 0x7FFFFFFFFFFUI64 //
  196. //
  197. // 44-Bit Physical address mask.
  198. //
  199. #define MASK_44 0xFFFFFFFFFFFUI64
  200. #define MM_PAGES_IN_KSEG0 ((ULONG)((KSEG2_BASE - KSEG0_BASE) >> PAGE_SHIFT))
  201. #define MM_USER_ADDRESS_RANGE_LIMIT (0xFFFFFFFFFFFFFFFFUI64) // user address range limit
  202. #define MM_MAXIMUM_ZERO_BITS 53 // maximum number of zero bits
  203. //
  204. // PAGE_SIZE for IA64 is 8k, virtual page is 20 bits with a PAGE_SHIFT
  205. // byte offset.
  206. //
  207. #define MM_VIRTUAL_PAGE_FILLER (PAGE_SHIFT - 12)
  208. #define MM_VIRTUAL_PAGE_SIZE (64-PAGE_SHIFT)
  209. //
  210. // Address space layout definitions.
  211. //
  212. #define CODE_START KSEG0_BASE
  213. #define CODE_END KSEG2_BASE
  214. #define MM_SYSTEM_SPACE_START (KADDRESS_BASE + 0x400000000UI64)
  215. #define MM_SYSTEM_SPACE_END (KADDRESS_BASE + 0x60000000000UI64)
  216. #define PDE_TOP PDE_UTOP
  217. #define PTE_TOP PTE_UTOP
  218. //
  219. // Define Alternate 4KB permission table space for X86 emulation mappings.
  220. //
  221. #define ALT4KB_PERMISSION_TABLE_START ((PVOID)(UADDRESS_BASE + 0x6FC00000000))
  222. #define ALT4KB_PERMISSION_TABLE_END ((PVOID)(UADDRESS_BASE + 0x6FC00800000))
  223. // #define _MI_DEBUG_ALTPTE 1 // Enable this to get ALTPTE logging
  224. VOID
  225. MiLogPteInAltTrace (
  226. IN PVOID NativeInformation
  227. );
  228. //
  229. // Define hyper space.
  230. //
  231. #define HYPER_SPACE ((PVOID)(UADDRESS_BASE + 0x6FC00800000))
  232. #define HYPER_SPACE_END ((PVOID)(UADDRESS_BASE + 0x6FFFFFFFFFF))
  233. //
  234. // Define area for mapping views into system space.
  235. //
  236. #define MM_SYSTEM_VIEW_SIZE (104*1024*1024)
  237. //
  238. // Hydra lives in region 1.
  239. //
  240. #define MM_SESSION_SPACE_DEFAULT (0x2000000000000000UI64)
  241. #define MM_SESSION_SPACE_DEFAULT_END (0x2000000200000000UI64)
  242. //
  243. // Define the start and maximum size for the system cache.
  244. //
  245. #define MM_SYSTEM_CACHE_WORKING_SET (KADDRESS_BASE + 0x400000000UI64)
  246. #define MM_SYSTEM_CACHE_START (KADDRESS_BASE + 0x600000000UI64)
  247. #define MM_SYSTEM_CACHE_END (KADDRESS_BASE + 0x10600000000UI64)
  248. #define MM_MAXIMUM_SYSTEM_CACHE_SIZE \
  249. (((ULONG_PTR)MM_SYSTEM_CACHE_END - (ULONG_PTR)MM_SYSTEM_CACHE_START) >> PAGE_SHIFT)
  250. #define MM_PAGED_POOL_START ((PVOID) MM_SYSTEM_CACHE_END)
  251. #define MM_LOWEST_NONPAGED_SYSTEM_START ((PVOID)(KADDRESS_BASE + 0x12600000000UI64))
  252. #define MmProtopte_Base (KADDRESS_BASE)
  253. #define MM_NONPAGED_POOL_END ((PVOID)(KADDRESS_BASE + 0x16600000000UI64)) // 16mb aligned.
  254. #define MM_CRASH_DUMP_VA ((PVOID)(KADDRESS_BASE + 0xFF800000))
  255. // EPC VA at 0xFFA00000 (see ntia64.h)
  256. #define MM_DEBUG_VA ((PVOID)(KADDRESS_BASE + 0xFF900000))
  257. #define NON_PAGED_SYSTEM_END (KADDRESS_BASE + 0x16600000000UI64) //quadword aligned.
  258. extern ULONG MiMaximumSystemCacheSize;
  259. //
  260. // Define absolute minimum and maximum count for system ptes.
  261. //
  262. #define MM_MINIMUM_SYSTEM_PTES 7000
  263. #define MM_MAXIMUM_SYSTEM_PTES (16*1024*1024)
  264. #define MM_DEFAULT_SYSTEM_PTES 11000
  265. //
  266. // Pool limits
  267. //
  268. //
  269. // The maximum amount of nonpaged pool that can be initially created.
  270. //
  271. #define MM_MAX_INITIAL_NONPAGED_POOL ((SIZE_T)(128 * 1024 * 1024))
  272. //
  273. // The total amount of nonpaged pool (initial pool + expansion + system PTEs).
  274. //
  275. #define MM_MAX_ADDITIONAL_NONPAGED_POOL (((SIZE_T)128 * 1024 * 1024 * 1024))
  276. //
  277. // The maximum amount of paged pool that can be created.
  278. //
  279. #define MM_MAX_PAGED_POOL ((SIZE_T)128 * 1024 * 1024 * 1024)
  280. //
  281. // Define the maximum default for pool (user specified 0 in registry).
  282. //
  283. #define MM_MAX_DEFAULT_NONPAGED_POOL ((SIZE_T)8 * 1024 * 1024 * 1024)
  284. //
  285. // Structure layout defintions.
  286. //
  287. #define MM_PROTO_PTE_ALIGNMENT ((ULONG)PAGE_SIZE)
  288. //
  289. // Define the address bits mapped by PPE and PDE entries.
  290. //
  291. // A PPE entry maps 10+10+13 = 33 bits of address space.
  292. // A PDE entry maps 10+13 = 23 bits of address space.
  293. //
  294. #define PAGE_DIRECTORY1_MASK (((ULONG_PTR)1 << PDI1_SHIFT) - 1)
  295. #define PAGE_DIRECTORY2_MASK (((ULONG_PTR)1 << PDI_SHIFT) -1)
  296. #define MM_VA_MAPPED_BY_PDE ((ULONG_PTR)1 << PDI_SHIFT)
  297. #define MM_VA_MAPPED_BY_PPE ((ULONG_PTR)1 << PDI1_SHIFT)
  298. #define LOWEST_IO_ADDRESS 0xa0000
  299. //
  300. // IA64 supports page sizes of 4k, 8k, 16k, 64k, 256k,
  301. // 1mb, 4mb, 16mb, 64mb & 256mb.
  302. //
  303. #define MM_MINIMUM_VA_FOR_LARGE_PAGE (2 * MM_VA_MAPPED_BY_PDE)
  304. //
  305. // The number of bits in a physical address.
  306. //
  307. #define PHYSICAL_ADDRESS_BITS 44
  308. #define MM_MAXIMUM_NUMBER_OF_COLORS (1)
  309. //
  310. // IA64 does not require support for colored pages.
  311. //
  312. #define MM_NUMBER_OF_COLORS (1)
  313. //
  314. // Mask for obtaining color from a physical page number.
  315. //
  316. #define MM_COLOR_MASK (0)
  317. //
  318. // Boundary for aligned pages of like color upon.
  319. //
  320. #define MM_COLOR_ALIGNMENT (0)
  321. //
  322. // Mask for isolating color from virtual address.
  323. //
  324. #define MM_COLOR_MASK_VIRTUAL (0)
  325. //
  326. // Define 256k worth of secondary colors.
  327. //
  328. #define MM_SECONDARY_COLORS_DEFAULT (64)
  329. #define MM_SECONDARY_COLORS_MIN (2)
  330. #define MM_SECONDARY_COLORS_MAX (1024)
  331. //
  332. // Maximum number of paging files.
  333. //
  334. #define MAX_PAGE_FILES 16
  335. //
  336. // Hyper space definitions.
  337. //
  338. #define FIRST_MAPPING_PTE ((PMMPTE)HYPER_SPACE)
  339. #define NUMBER_OF_MAPPING_PTES 253
  340. #define LAST_MAPPING_PTE \
  341. ((PVOID)((ULONG_PTR)FIRST_MAPPING_PTE + (NUMBER_OF_MAPPING_PTES * PAGE_SIZE)))
  342. #define COMPRESSION_MAPPING_PTE ((PMMPTE)((ULONG_PTR)LAST_MAPPING_PTE + PAGE_SIZE))
  343. #define IMAGE_MAPPING_PTE ((PMMPTE)((ULONG_PTR)COMPRESSION_MAPPING_PTE + PAGE_SIZE))
  344. #define NUMBER_OF_ZEROING_PTES 32
  345. #define VAD_BITMAP_SPACE ((PVOID)((ULONG_PTR)IMAGE_MAPPING_PTE + PAGE_SIZE))
  346. #define WORKING_SET_LIST ((PVOID)((ULONG_PTR)VAD_BITMAP_SPACE + PAGE_SIZE))
  347. #define MM_MAXIMUM_WORKING_SET (((ULONG_PTR)(HYPER_SPACE)) >> PAGE_SHIFT)
  348. #define MmWorkingSetList ((PMMWSL)WORKING_SET_LIST)
  349. #define MmWsle ((PMMWSLE)((PUCHAR)WORKING_SET_LIST + sizeof(MMWSL)))
  350. #define MM_WORKING_SET_END (UADDRESS_BASE + 0x3FFFFFFFFFFUI64)
  351. //
  352. // Define memory attributes fields within PTE.
  353. //
  354. #define MM_PTE_TB_MA_WB (0x0 << 2) // cacheable, write-back
  355. #define MM_PTE_TB_MA_UC (0x4 << 2) // uncacheable
  356. #define MM_PTE_TB_MA_UCE (0x5 << 2) // uncacheable, exporting fetchadd
  357. #define MM_PTE_TB_MA_WC (0x6 << 2) // uncacheable, coalescing
  358. #define MM_PTE_TB_MA_NATPAGE (0x7 << 2) // Nat Page
  359. //
  360. // Define masks for the PTE cache attributes.
  361. //
  362. #define MM_PTE_CACHE_ENABLED 0 // WB
  363. #define MM_PTE_CACHE_RESERVED 1 // special encoding to cause a TLB miss
  364. #define MM_PTE_CACHE_DISABLED 4 // UC
  365. #define MM_PTE_CACHE_DISPLAY 6 // WC
  366. #define MM_PTE_CACHE_NATPAGE 7 // Nat Page
  367. //
  368. // Define masks for fields within the PTE.
  369. //
  370. #define MM_PTE_OWNER_MASK 0x0180
  371. #define MM_PTE_VALID_MASK 1
  372. #define MM_PTE_CACHE_DISABLE_MASK MM_PTE_TB_MA_UC
  373. #define MM_PTE_ACCESS_MASK 0x0020
  374. #define MM_PTE_DIRTY_MASK 0x0040
  375. #define MM_PTE_EXECUTE_MASK 0x0200
  376. #define MM_PTE_WRITE_MASK 0x0400
  377. #define MM_PTE_LARGE_PAGE_MASK 0
  378. #define MM_PTE_COPY_ON_WRITE_MASK ((ULONG)1 << (PAGE_SHIFT-1))
  379. #define MM_PTE_PROTOTYPE_MASK 0x0002
  380. #define MM_PTE_TRANSITION_MASK 0x0080
  381. //
  382. // Bit fields to or into PTE to make a PTE valid based on the
  383. // protection field of the invalid PTE.
  384. //
  385. #define MM_PTE_NOACCESS 0x0
  386. #define MM_PTE_READONLY 0x0
  387. #define MM_PTE_READWRITE MM_PTE_WRITE_MASK
  388. #define MM_PTE_WRITECOPY MM_PTE_COPY_ON_WRITE_MASK
  389. #define MM_PTE_EXECUTE MM_PTE_EXECUTE_MASK
  390. #define MM_PTE_EXECUTE_READ MM_PTE_EXECUTE_MASK
  391. #define MM_PTE_EXECUTE_READWRITE MM_PTE_EXECUTE_MASK | MM_PTE_WRITE_MASK
  392. #define MM_PTE_EXECUTE_WRITECOPY MM_PTE_EXECUTE_MASK | MM_PTE_COPY_ON_WRITE_MASK
  393. #define MM_PTE_GUARD 0x0
  394. #define MM_PTE_CACHE MM_PTE_TB_MA_WB
  395. #define MM_PTE_NOCACHE MM_PTE_CACHE // PAGE_NOCACHE is cached
  396. #define MM_PTE_EXC_DEFER 0x10000000000000 // defer exception
  397. #define MM_PROTECT_FIELD_SHIFT 2
  398. //
  399. // Define masks for fields within the IA64 TB entry.
  400. //
  401. #define MM_PTE_TB_VALID 0x0001
  402. #define MM_PTE_TB_ACCESSED 0x0020
  403. #define MM_PTE_TB_MODIFIED 0x0040
  404. #define MM_PTE_TB_WRITE 0x0400
  405. #define MM_PTE_TB_EXECUTE 0x0200 // read/execute
  406. #define MM_PTE_TB_EXC_DEFER 0x10000000000000 // defer exception
  407. //
  408. // Define the number of VHPT pages.
  409. //
  410. #define MM_VHPT_PAGES 32
  411. //
  412. // Bits available for the software working set index within the hardware PTE.
  413. //
  414. #define MI_MAXIMUM_PTE_WORKING_SET_INDEX (1 << _HARDWARE_PTE_WORKING_SET_BITS)
  415. //
  416. // Zero PTE.
  417. //
  418. #define MM_ZERO_PTE 0
  419. //
  420. // Zero Kernel PTE.
  421. //
  422. #define MM_ZERO_KERNEL_PTE 0
  423. //
  424. // A demand zero PTE with a protection of PAGE_READWRITE.
  425. //
  426. #define MM_DEMAND_ZERO_WRITE_PTE ((ULONGLONG)MM_READWRITE << MM_PROTECT_FIELD_SHIFT)
  427. //
  428. // A demand zero PTE with a protection of PAGE_READWRITE for system space.
  429. //
  430. #define MM_KERNEL_DEMAND_ZERO_PTE ((ULONGLONG)MM_READWRITE << MM_PROTECT_FIELD_SHIFT)
  431. //
  432. // A no access PTE for system space.
  433. //
  434. #define MM_KERNEL_NOACCESS_PTE ((ULONGLONG)MM_NOACCESS << MM_PROTECT_FIELD_SHIFT)
  435. //
  436. // Kernel stack alignment requirements.
  437. //
  438. #define MM_STACK_ALIGNMENT 0x0
  439. #define MM_STACK_OFFSET 0x0
  440. #define PDE_PER_PAGE ((ULONG)(PAGE_SIZE/(1 << PTE_SHIFT)))
  441. #define PTE_PER_PAGE ((ULONG)(PAGE_SIZE/(1 << PTE_SHIFT)))
  442. #define PTE_PER_PAGE_BITS 11 // This handles the case where the page is full
  443. #if PTE_PER_PAGE_BITS > 32
  444. error - too many bits to fit into MMPTE_SOFTWARE or MMPFN.u1
  445. #endif
  446. //
  447. // Number of page table pages for user addresses.
  448. //
  449. #define MM_USER_PAGE_TABLE_PAGES ((ULONG_PTR)MI_SYSTEM_RANGE_START / (PTE_PER_PAGE * PAGE_SIZE))
  450. #define MM_USER_PAGE_DIRECTORY_PAGES ((ULONG_PTR)MI_SYSTEM_RANGE_START / ((ULONG_PTR)PDE_PER_PAGE * PTE_PER_PAGE * PAGE_SIZE))
  451. //++
  452. //VOID
  453. //MI_MAKE_VALID_PTE (
  454. // OUT OUTPTE,
  455. // IN FRAME,
  456. // IN PMASK,
  457. // IN PPTE
  458. // );
  459. //
  460. // Routine Description:
  461. //
  462. // This macro makes a valid PTE from a page frame number, protection mask,
  463. // and owner.
  464. //
  465. // Arguments:
  466. //
  467. // OUTPTE - Supplies the PTE in which to build the transition PTE.
  468. //
  469. // FRAME - Supplies the page frame number for the PTE.
  470. //
  471. // PMASK - Supplies the protection to set in the transition PTE.
  472. //
  473. // PPTE - Supplies a pointer to the PTE which is being made valid.
  474. // For prototype PTEs NULL should be specified.
  475. //
  476. // Return Value:
  477. //
  478. // None.
  479. //
  480. //--
  481. #define _ALTPERM_BITMAP_MASK ((_4gb - 1) >> PTI_SHIFT)
  482. #if defined(_MIALT4K_)
  483. extern PVOID MiMaxWow64Pte;
  484. #define MI_SET_VALID_PTE_BITS(OUTPTE,PMASK,PPTE) { \
  485. PWOW64_PROCESS _Wow64Process; \
  486. if ((PPTE >= (PMMPTE)PTE_UBASE) && (PPTE < (PMMPTE)MiMaxWow64Pte)) { \
  487. _Wow64Process = PsGetCurrentProcess()->Wow64Process; \
  488. if ((_Wow64Process != NULL) && (PPTE < MmWorkingSetList->HighestUserPte)) { \
  489. if (MI_CHECK_BIT(_Wow64Process->AltPermBitmap, \
  490. ((ULONG_PTR)PPTE >> PTE_SHIFT) & _ALTPERM_BITMAP_MASK) != 0) { \
  491. (OUTPTE).u.Long |= (MmProtectToPteMaskForSplit[PMASK]); \
  492. } \
  493. else { \
  494. (OUTPTE).u.Long |= (MmProtectToPteMaskForIA32[PMASK]); \
  495. (OUTPTE).u.Hard.Accessed = 1; \
  496. } \
  497. } \
  498. else { \
  499. (OUTPTE).u.Hard.Accessed = 1; \
  500. (OUTPTE).u.Long |= (MmProtectToPteMask[PMASK]); \
  501. } \
  502. } \
  503. else { \
  504. (OUTPTE).u.Hard.Accessed = 1; \
  505. (OUTPTE).u.Long |= (MmProtectToPteMask[PMASK]); \
  506. } \
  507. }
  508. #else
  509. #define MI_SET_VALID_PTE_BITS(OUTPTE,PMASK,PPTE) { \
  510. (OUTPTE).u.Hard.Accessed = 1; \
  511. (OUTPTE).u.Long |= (MmProtectToPteMask[PMASK]);
  512. }
  513. #endif
  514. #define MI_MAKE_VALID_PTE(OUTPTE,FRAME,PMASK,PPTE) \
  515. (OUTPTE).u.Long = 0; \
  516. (OUTPTE).u.Hard.Valid = 1; \
  517. (OUTPTE).u.Hard.Cache = MM_PTE_CACHE_ENABLED; \
  518. (OUTPTE).u.Hard.Exception = 1; \
  519. (OUTPTE).u.Hard.PageFrameNumber = FRAME; \
  520. (OUTPTE).u.Hard.Owner = MI_DETERMINE_OWNER(PPTE); \
  521. MI_SET_VALID_PTE_BITS(OUTPTE,PMASK,PPTE)
  522. //++
  523. //VOID
  524. //MI_MAKE_VALID_PTE_TRANSITION (
  525. // IN OUT OUTPTE
  526. // IN PROTECT
  527. // );
  528. //
  529. // Routine Description:
  530. //
  531. // This macro takes a valid PTE and turns it into a transition PTE.
  532. //
  533. // Arguments:
  534. //
  535. // OUTPTE - Supplies the current valid PTE. This PTE is then
  536. // modified into a transition PTE.
  537. //
  538. // PROTECT - Supplies the protection to set in the transition PTE.
  539. //
  540. // Return Value:
  541. //
  542. // None.
  543. //
  544. //--
  545. #if defined(_MIALT4K_)
  546. #define MI_MAKE_VALID_PTE_TRANSITION(OUTPTE,PROTECT) \
  547. if ((OUTPTE).u.Hard.Cache == MM_PTE_CACHE_RESERVED) { \
  548. (OUTPTE).u.Trans.SplitPermissions = 1; \
  549. } \
  550. else { \
  551. (OUTPTE).u.Trans.SplitPermissions = 0; \
  552. } \
  553. (OUTPTE).u.Soft.Transition = 1; \
  554. (OUTPTE).u.Soft.Valid = 0; \
  555. (OUTPTE).u.Soft.Prototype = 0; \
  556. (OUTPTE).u.Soft.Protection = PROTECT;
  557. #else
  558. #define MI_MAKE_VALID_PTE_TRANSITION(OUTPTE,PROTECT) \
  559. (OUTPTE).u.Soft.Transition = 1; \
  560. (OUTPTE).u.Soft.Valid = 0; \
  561. (OUTPTE).u.Soft.Prototype = 0; \
  562. (OUTPTE).u.Soft.Protection = PROTECT;
  563. #endif
  564. //++
  565. //VOID
  566. //MI_MAKE_TRANSITION_PTE (
  567. // OUT OUTPTE,
  568. // IN PAGE,
  569. // IN PROTECT,
  570. // IN PPTE
  571. // );
  572. //
  573. // Routine Description:
  574. //
  575. // This macro constructs a transition PTE.
  576. //
  577. // Arguments:
  578. //
  579. // OUTPTE - Supplies the PTE in which to build the transition PTE.
  580. //
  581. // PAGE - Supplies the page frame number for the PTE.
  582. //
  583. // PROTECT - Supplies the protection to set in the transition PTE.
  584. //
  585. // PPTE - Supplies a pointer to the PTE, this is used to determine
  586. // the split permissions (if any) of the PTE.
  587. //
  588. // Return Value:
  589. //
  590. // None.
  591. //
  592. //--
  593. #define MI_MAKE_TRANSITION_PTE(OUTPTE,PAGE,PROTECT,PPTE) \
  594. (OUTPTE).u.Long = 0; \
  595. (OUTPTE).u.Trans.PageFrameNumber = PAGE; \
  596. (OUTPTE).u.Trans.Transition = 1; \
  597. ASSERT (PPTE->u.Hard.Valid == 0); \
  598. (OUTPTE).u.Trans.SplitPermissions = PPTE->u.Soft.SplitPermissions; \
  599. (OUTPTE).u.Trans.Protection = PROTECT;
  600. //++
  601. //VOID
  602. //MI_MAKE_TRANSITION_PTE_VALID (
  603. // OUT OUTPTE,
  604. // IN PPTE
  605. // );
  606. //
  607. // Routine Description:
  608. //
  609. // This macro takes a transition PTE and makes it a valid PTE.
  610. //
  611. // Arguments:
  612. //
  613. // OUTPTE - Supplies the PTE in which to build the valid PTE.
  614. //
  615. // PPTE - Supplies a pointer to the transition PTE.
  616. //
  617. // Return Value:
  618. //
  619. // None.
  620. //
  621. //--
  622. #define MI_MAKE_TRANSITION_PTE_VALID(OUTPTE,PPTE) { \
  623. ASSERT (((PPTE)->u.Hard.Valid == 0) && \
  624. ((PPTE)->u.Trans.Prototype == 0) && \
  625. ((PPTE)->u.Trans.Transition == 1)); \
  626. (OUTPTE).u.Long = (PPTE)->u.Long & 0x1FFFFFFFE000; \
  627. (OUTPTE).u.Hard.Valid = 1; \
  628. ASSERT (PPTE->u.Hard.Valid == 0); \
  629. if (PPTE->u.Trans.SplitPermissions == 0) { \
  630. (OUTPTE).u.Hard.Cache = MM_PTE_CACHE_ENABLED; \
  631. } \
  632. else { \
  633. (OUTPTE).u.Hard.Cache = MM_PTE_CACHE_RESERVED; \
  634. } \
  635. (OUTPTE).u.Hard.Exception = 1; \
  636. (OUTPTE).u.Hard.Owner = MI_DETERMINE_OWNER(PPTE); \
  637. MI_SET_VALID_PTE_BITS(OUTPTE,(PPTE)->u.Trans.Protection,PPTE) \
  638. }
  639. #define MI_MAKE_TRANSITION_PROTOPTE_VALID(OUTPTE,PPTE) \
  640. MI_MAKE_TRANSITION_PTE_VALID(OUTPTE,PPTE)
  641. #define MI_FAULT_STATUS_INDICATES_EXECUTION(_FaultStatus) (_FaultStatus & 0x2)
  642. #define MI_FAULT_STATUS_INDICATES_WRITE(_FaultStatus) (_FaultStatus & 0x1)
  643. #define MI_CLEAR_FAULT_STATUS(_FaultStatus) (_FaultStatus = 0)
  644. #define MI_IS_PTE_EXECUTABLE(_TempPte) ((_TempPte)->u.Hard.Execute != 0)
  645. //++
  646. //VOID
  647. //MI_SET_PTE_IN_WORKING_SET (
  648. // OUT PMMPTE PTE,
  649. // IN ULONG WSINDEX
  650. // );
  651. //
  652. // Routine Description:
  653. //
  654. // This macro inserts the specified working set index into the argument PTE.
  655. //
  656. // No TB invalidation is needed for other processors (or this one) even
  657. // though the entry may already be in a TB - it's just a software field
  658. // update and doesn't affect miss resolution.
  659. //
  660. // Arguments
  661. //
  662. // OUTPTE - Supplies the PTE in which to insert the working set index.
  663. //
  664. // WSINDEX - Supplies the working set index for the PTE.
  665. //
  666. // Return Value:
  667. //
  668. // None.
  669. //
  670. //--
  671. #define MI_SET_PTE_IN_WORKING_SET(PTE, WSINDEX) { \
  672. MMPTE _TempPte; \
  673. _TempPte = *(PTE); \
  674. _TempPte.u.Hard.SoftwareWsIndex = (WSINDEX); \
  675. *(PTE) = _TempPte; \
  676. }
  677. //++
  678. //ULONG WsIndex
  679. //MI_GET_WORKING_SET_FROM_PTE(
  680. // IN PMMPTE PTE
  681. // );
  682. //
  683. // Routine Description:
  684. //
  685. // This macro returns the working set index from the argument PTE.
  686. //
  687. // Arguments
  688. //
  689. // PTE - Supplies the PTE to extract the working set index from.
  690. //
  691. // Return Value:
  692. //
  693. // This macro returns the working set index for the argument PTE.
  694. //
  695. //--
  696. #define MI_GET_WORKING_SET_FROM_PTE(PTE) (ULONG)(PTE)->u.Hard.SoftwareWsIndex
  697. extern BOOLEAN MiWriteCombiningPtes;
  698. //++
  699. //VOID
  700. //MI_SET_PTE_WRITE_COMBINE (
  701. // IN MMPTE PTE
  702. // );
  703. //
  704. // Routine Description:
  705. //
  706. // This macro sets the write combined bit(s) in the specified PTE.
  707. //
  708. // Arguments
  709. //
  710. // PTE - Supplies the PTE to set dirty.
  711. //
  712. // Return Value:
  713. //
  714. // None.
  715. //
  716. //--
  717. #define MI_SET_PTE_WRITE_COMBINE(PTE) \
  718. ((PTE).u.Hard.Cache = MM_PTE_CACHE_DISABLED)
  719. #define MI_SET_PTE_WRITE_COMBINE2(PTE) \
  720. if (MiWriteCombiningPtes == TRUE) { \
  721. (PTE).u.Hard.Cache = MM_PTE_CACHE_DISPLAY; \
  722. } \
  723. else { \
  724. (PTE).u.Hard.Cache = MM_PTE_CACHE_DISABLED; \
  725. }
  726. #define MI_SET_LARGE_PTE_WRITE_COMBINE(PTE) \
  727. ASSERT ((PTE).u.Hard.Cache == MM_PTE_CACHE_RESERVED); \
  728. ((PTE).u.Hard.SoftwareWsIndex = MM_PTE_CACHE_DISPLAY);
  729. //++
  730. //VOID
  731. //MI_PREPARE_FOR_NONCACHED (
  732. // IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute
  733. // );
  734. //
  735. // Routine Description:
  736. //
  737. // This macro prepares the system prior to noncached PTEs being created.
  738. //
  739. // Note the entire TB must be flushed on all processors because there may
  740. // be stale system PTE (or hyperspace or zeropage) mappings in the TB which
  741. // may refer to the same physical page but with a different cache attribute.
  742. //
  743. // Arguments
  744. //
  745. // CacheAttribute - Supplies the cache attribute the PTEs will be filled
  746. // with.
  747. //
  748. // Return Value:
  749. //
  750. // None.
  751. //
  752. //--
  753. #define MI_PREPARE_FOR_NONCACHED(_CacheAttribute) \
  754. if (_CacheAttribute != MiCached) { \
  755. KeFlushEntireTb (FALSE, TRUE); \
  756. }
  757. //++
  758. //VOID
  759. //MI_SWEEP_CACHE (
  760. // IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
  761. // IN PVOID StartVa,
  762. // IN ULONG NumberOfBytes
  763. // );
  764. //
  765. // Routine Description:
  766. //
  767. // This macro prepares the system prior to noncached PTEs being created.
  768. //
  769. // Arguments
  770. //
  771. // CacheAttribute - Supplies the cache attribute the PTEs were filled with.
  772. //
  773. // StartVa - Supplies the starting address that's been mapped.
  774. //
  775. // NumberOfBytes - Supplies the number of bytes that have been mapped.
  776. //
  777. // Return Value:
  778. //
  779. // None.
  780. //
  781. //--
  782. #define MI_SWEEP_CACHE(_CacheAttribute,_StartVa,_NumberOfBytes) \
  783. if (_CacheAttribute != MiCached) { \
  784. MiSweepCacheMachineDependent (_StartVa, \
  785. _NumberOfBytes, \
  786. (ULONG)(_CacheAttribute)); \
  787. }
  788. //++
  789. //VOID
  790. //MI_SET_PTE_DIRTY (
  791. // IN MMPTE PTE
  792. // );
  793. //
  794. // Routine Description:
  795. //
  796. // This macro sets the dirty bit(s) in the specified PTE.
  797. //
  798. // Arguments:
  799. //
  800. // PTE - Supplies the PTE to set dirty.
  801. //
  802. // Return Value:
  803. //
  804. // None.
  805. //
  806. //--
  807. #define MI_SET_PTE_DIRTY(PTE) (PTE).u.Hard.Dirty = 1
  808. //++
  809. //VOID
  810. //MI_SET_PTE_CLEAN (
  811. // IN MMPTE PTE
  812. // );
  813. //
  814. // Routine Description:
  815. //
  816. // This macro clears the dirty bit(s) in the specified PTE.
  817. //
  818. // Arguments:
  819. //
  820. // PTE - Supplies the PTE to set clear.
  821. //
  822. // Return Value:
  823. //
  824. // None.
  825. //
  826. //--
  827. #define MI_SET_PTE_CLEAN(PTE) (PTE).u.Hard.Dirty = 0
  828. //++
  829. //VOID
  830. //MI_IS_PTE_DIRTY (
  831. // IN MMPTE PTE
  832. // );
  833. //
  834. // Routine Description:
  835. //
  836. // This macro checks the dirty bit(s) in the specified PTE.
  837. //
  838. // Arguments:
  839. //
  840. // PTE - Supplies the PTE to check.
  841. //
  842. // Return Value:
  843. //
  844. // TRUE if the page is dirty (modified), FALSE otherwise.
  845. //
  846. //--
  847. #define MI_IS_PTE_DIRTY(PTE) ((PTE).u.Hard.Dirty != 0)
  848. //++
  849. //VOID
  850. //MI_SET_GLOBAL_BIT_IF_SYSTEM (
  851. // OUT OUTPTE,
  852. // IN PPTE
  853. // );
  854. //
  855. // Routine Description:
  856. //
  857. // This macro sets the global bit if the pointer PTE is within
  858. // system space.
  859. //
  860. // Arguments:
  861. //
  862. // OUTPTE - Supplies the PTE in which to build the valid PTE.
  863. //
  864. // PPTE - Supplies a pointer to the PTE becoming valid.
  865. //
  866. // Return Value:
  867. //
  868. // None.
  869. //
  870. //--
  871. #define MI_SET_GLOBAL_BIT_IF_SYSTEM(OUTPTE,PPTE)
  872. //++
  873. //VOID
  874. //MI_SET_GLOBAL_STATE (
  875. // IN MMPTE PTE,
  876. // IN ULONG STATE
  877. // );
  878. //
  879. // Routine Description:
  880. //
  881. // This macro sets the global bit in the PTE. if the pointer PTE is within
  882. //
  883. // Arguments:
  884. //
  885. // PTE - Supplies the PTE to set global state into.
  886. //
  887. // STATE - Supplies 1 if global, 0 if not.
  888. //
  889. // Return Value:
  890. //
  891. // None.
  892. //
  893. //--
  894. #define MI_SET_GLOBAL_STATE(PTE,STATE)
  895. //++
  896. //VOID
  897. //MI_ENABLE_CACHING (
  898. // IN MMPTE PTE
  899. // );
  900. //
  901. // Routine Description:
  902. //
  903. // This macro takes a valid PTE and sets the caching state to be
  904. // enabled.
  905. //
  906. // Arguments:
  907. //
  908. // PTE - Supplies a valid PTE.
  909. //
  910. // Return Value:
  911. //
  912. // None.
  913. //
  914. //--
  915. #define MI_ENABLE_CACHING(PTE) ((PTE).u.Hard.Cache = MM_PTE_CACHE_ENABLED)
  916. //++
  917. //VOID
  918. //MI_DISABLE_CACHING (
  919. // IN MMPTE PTE
  920. // );
  921. //
  922. // Routine Description:
  923. //
  924. // This macro takes a valid PTE and sets the caching state to be
  925. // disabled.
  926. //
  927. // Arguments:
  928. //
  929. // PTE - Supplies a pointer to the valid PTE.
  930. //
  931. // Return Value:
  932. //
  933. // None.
  934. //
  935. //--
  936. #define MI_DISABLE_CACHING(PTE) ((PTE).u.Hard.Cache = MM_PTE_CACHE_DISABLED)
  937. #define MI_DISABLE_LARGE_PTE_CACHING(PTE) \
  938. ASSERT ((PTE).u.Hard.Cache == MM_PTE_CACHE_RESERVED); \
  939. ((PTE).u.Hard.SoftwareWsIndex = MM_PTE_CACHE_DISABLED); \
  940. //++
  941. //BOOLEAN
  942. //MI_IS_CACHING_DISABLED (
  943. // IN PMMPTE PPTE
  944. // );
  945. //
  946. // Routine Description:
  947. //
  948. // This macro takes a valid PTE and returns TRUE if caching is
  949. // disabled.
  950. //
  951. // Arguments:
  952. //
  953. // PPTE - Supplies a pointer to the valid PTE.
  954. //
  955. // Return Value:
  956. //
  957. // TRUE if caching is disabled, FALSE if it is enabled.
  958. //
  959. //--
  960. #define MI_IS_CACHING_DISABLED(PPTE) \
  961. ((PPTE)->u.Hard.Cache == MM_PTE_CACHE_DISABLED)
  962. //++
  963. //VOID
  964. //MI_SET_PFN_DELETED (
  965. // IN PMMPFN PPFN
  966. // );
  967. //
  968. // Routine Description:
  969. //
  970. // This macro takes a pointer to a PFN element and indicates that
  971. // the PFN is no longer in use.
  972. //
  973. // Arguments:
  974. //
  975. // PPTE - Supplies a pointer to the PFN element.
  976. //
  977. // Return Value:
  978. //
  979. // none.
  980. //
  981. //--
  982. #define MI_SET_PFN_DELETED(PPFN) \
  983. (PPFN)->PteAddress = (PMMPTE)((ULONG_PTR)PPFN->PteAddress | 0x1);
  984. //++
  985. //VOID
  986. //MI_MARK_PFN_UNDELETED (
  987. // IN PMMPFN PPFN
  988. // );
  989. //
  990. // Routine Description:
  991. //
  992. // This macro takes a pointer to a deleted PFN element and mark that
  993. // the PFN is not deleted.
  994. //
  995. // Arguments
  996. //
  997. // PPTE - Supplies a pointer to the PFN element.
  998. //
  999. // Return Value:
  1000. //
  1001. // none.
  1002. //
  1003. //--
  1004. #define MI_MARK_PFN_UNDELETED(PPFN) \
  1005. PPFN->PteAddress = (PMMPTE)((ULONG_PTR)PPFN->PteAddress & ~0x1);
  1006. //++
  1007. //BOOLEAN
  1008. //MI_IS_PFN_DELETED (
  1009. // IN PMMPFN PPFN
  1010. // );
  1011. //
  1012. // Routine Description:
  1013. //
  1014. // This macro takes a pointer to a PFN element and determines if
  1015. // the PFN is no longer in use.
  1016. //
  1017. // Arguments:
  1018. //
  1019. // PPTE - Supplies a pointer to the PFN element.
  1020. //
  1021. // Return Value:
  1022. //
  1023. // TRUE if PFN is no longer used, FALSE if it is still being used.
  1024. //
  1025. //--
  1026. #define MI_IS_PFN_DELETED(PPFN) \
  1027. ((ULONG_PTR)(PPFN)->PteAddress & 0x1)
  1028. //++
  1029. //VOID
  1030. //MI_CHECK_PAGE_ALIGNMENT (
  1031. // IN ULONG PAGE,
  1032. // IN PMMPTE PPTE
  1033. // );
  1034. //
  1035. // Routine Description:
  1036. //
  1037. // This macro takes a PFN element number (Page) and checks to see
  1038. // if the virtual alignment for the previous address of the page
  1039. // is compatable with the new address of the page. If they are
  1040. // not compatible, the D cache is flushed.
  1041. //
  1042. // Arguments:
  1043. //
  1044. // PAGE - Supplies the PFN element.
  1045. // PPTE - Supplies a pointer to the new PTE which will contain the page.
  1046. //
  1047. // Return Value:
  1048. //
  1049. // none.
  1050. //
  1051. //--
  1052. // does nothing on IA64.
  1053. #define MI_CHECK_PAGE_ALIGNMENT(PAGE,PPTE)
  1054. //++
  1055. //VOID
  1056. //MI_INITIALIZE_HYPERSPACE_MAP (
  1057. // VOID
  1058. // );
  1059. //
  1060. // Routine Description:
  1061. //
  1062. // This macro initializes the PTEs reserved for double mapping within
  1063. // hyperspace.
  1064. //
  1065. // Arguments:
  1066. //
  1067. // None.
  1068. //
  1069. // Return Value:
  1070. //
  1071. // None.
  1072. //
  1073. //--
  1074. // does nothing on IA64.
  1075. #define MI_INITIALIZE_HYPERSPACE_MAP(INDEX)
  1076. //++
  1077. //ULONG
  1078. //MI_GET_PAGE_COLOR_FROM_PTE (
  1079. // IN PMMPTE PTEADDRESS
  1080. // );
  1081. //
  1082. // Routine Description:
  1083. //
  1084. // This macro determines the pages color based on the PTE address
  1085. // that maps the page.
  1086. //
  1087. // Arguments:
  1088. //
  1089. // PTEADDRESS - Supplies the PTE address the page is (or was) mapped at.
  1090. //
  1091. // Return Value:
  1092. //
  1093. // The pages color.
  1094. //
  1095. //--
  1096. #define MI_GET_PAGE_COLOR_FROM_PTE(PTEADDRESS) \
  1097. (((ULONG)((MI_SYSTEM_PAGE_COLOR++) & MmSecondaryColorMask)) | MI_CURRENT_NODE_COLOR)
  1098. //++
  1099. //ULONG
  1100. //MI_GET_PAGE_COLOR_FROM_VA (
  1101. // IN PVOID ADDRESS
  1102. // );
  1103. //
  1104. // Routine Description:
  1105. //
  1106. // This macro determines the pages color based on the PTE address
  1107. // that maps the page.
  1108. //
  1109. // Arguments:
  1110. //
  1111. // ADDRESS - Supplies the address the page is (or was) mapped at.
  1112. //
  1113. // Return Value:
  1114. //
  1115. // The pages color.
  1116. //
  1117. //--
  1118. #define MI_GET_PAGE_COLOR_FROM_VA(ADDRESS) \
  1119. (((ULONG)((MI_SYSTEM_PAGE_COLOR++) & MmSecondaryColorMask)) | MI_CURRENT_NODE_COLOR)
  1120. //++
  1121. //ULONG
  1122. //MI_GET_PAGE_COLOR_FROM_SESSION (
  1123. // IN PMM_SESSION_SPACE SessionSpace
  1124. // );
  1125. //
  1126. // Routine Description:
  1127. //
  1128. // This macro determines the page's color based on the PTE address
  1129. // that maps the page.
  1130. //
  1131. // Arguments
  1132. //
  1133. // SessionSpace - Supplies the session space the page will be mapped into.
  1134. //
  1135. // Return Value:
  1136. //
  1137. // The page's color.
  1138. //
  1139. //--
  1140. #define MI_GET_PAGE_COLOR_FROM_SESSION(_SessionSpace) \
  1141. (((ULONG)((_SessionSpace->Color++) & MmSecondaryColorMask)) | MI_CURRENT_NODE_COLOR)
  1142. //++
  1143. //ULONG
  1144. //MI_PAGE_COLOR_PTE_PROCESS (
  1145. // IN PMMPTE PTE,
  1146. // IN PUSHORT COLOR
  1147. // );
  1148. //
  1149. // Routine Description:
  1150. //
  1151. // Select page color for this process.
  1152. //
  1153. // Arguments
  1154. //
  1155. // PTE Not used.
  1156. // COLOR Value from which color is determined. This
  1157. // variable is incremented.
  1158. //
  1159. // Return Value:
  1160. //
  1161. // Page color.
  1162. //
  1163. //--
  1164. #define MI_PAGE_COLOR_PTE_PROCESS(PTE,COLOR) \
  1165. (((ULONG)((*(COLOR))++) & MmSecondaryColorMask) | MI_CURRENT_NODE_COLOR)
  1166. //++
  1167. //ULONG
  1168. //MI_PAGE_COLOR_VA_PROCESS (
  1169. // IN PVOID ADDRESS,
  1170. // IN PEPROCESS COLOR
  1171. // );
  1172. //
  1173. // Routine Description:
  1174. //
  1175. // This macro determines the pages color based on the PTE address
  1176. // that maps the page.
  1177. //
  1178. // Arguments:
  1179. //
  1180. // ADDRESS - Supplies the address the page is (or was) mapped at.
  1181. //
  1182. // Return Value:
  1183. //
  1184. // The pages color.
  1185. //
  1186. //--
  1187. #define MI_PAGE_COLOR_VA_PROCESS(ADDRESS,COLOR) \
  1188. (((ULONG)((*(COLOR))++) & MmSecondaryColorMask) | MI_CURRENT_NODE_COLOR)
  1189. //++
  1190. //ULONG
  1191. //MI_GET_NEXT_COLOR (
  1192. // IN ULONG COLOR
  1193. // );
  1194. //
  1195. // Routine Description:
  1196. //
  1197. // This macro returns the next color in the sequence.
  1198. //
  1199. // Arguments:
  1200. //
  1201. // COLOR - Supplies the color to return the next of.
  1202. //
  1203. // Return Value:
  1204. //
  1205. // Next color in sequence.
  1206. //
  1207. //--
  1208. #define MI_GET_NEXT_COLOR(COLOR) ((COLOR + 1) & MM_COLOR_MASK)
  1209. //++
  1210. //ULONG
  1211. //MI_GET_PREVIOUS_COLOR (
  1212. // IN ULONG COLOR
  1213. // );
  1214. //
  1215. // Routine Description:
  1216. //
  1217. // This macro returns the previous color in the sequence.
  1218. //
  1219. // Arguments:
  1220. //
  1221. // COLOR - Supplies the color to return the previous of.
  1222. //
  1223. // Return Value:
  1224. //
  1225. // Previous color in sequence.
  1226. //
  1227. //--
  1228. #define MI_GET_PREVIOUS_COLOR(COLOR) (0)
  1229. #define MI_GET_SECONDARY_COLOR(PAGE,PFN) (PAGE & MmSecondaryColorMask)
  1230. #define MI_GET_COLOR_FROM_SECONDARY(SECONDARY_COLOR) (0)
  1231. //++
  1232. //VOID
  1233. //MI_GET_MODIFIED_PAGE_BY_COLOR (
  1234. // OUT ULONG PAGE,
  1235. // IN ULONG COLOR
  1236. // );
  1237. //
  1238. // Routine Description:
  1239. //
  1240. // This macro returns the first page destined for a paging
  1241. // file with the desired color. It does NOT remove the page
  1242. // from its list.
  1243. //
  1244. // Arguments:
  1245. //
  1246. // PAGE - Returns the page located, the value MM_EMPTY_LIST is
  1247. // returned if there is no page of the specified color.
  1248. //
  1249. // COLOR - Supplies the color of page to locate.
  1250. //
  1251. // Return Value:
  1252. //
  1253. // none.
  1254. //
  1255. //--
  1256. #define MI_GET_MODIFIED_PAGE_BY_COLOR(PAGE,COLOR) \
  1257. PAGE = MmModifiedPageListByColor[COLOR].Flink
  1258. //++
  1259. //VOID
  1260. //MI_GET_MODIFIED_PAGE_ANY_COLOR (
  1261. // OUT ULONG PAGE,
  1262. // IN OUT ULONG COLOR
  1263. // );
  1264. //
  1265. // Routine Description:
  1266. //
  1267. // This macro returns the first page destined for a paging
  1268. // file with the desired color. If not page of the desired
  1269. // color exists, all colored lists are searched for a page.
  1270. // It does NOT remove the page from its list.
  1271. //
  1272. // Arguments:
  1273. //
  1274. // PAGE - Returns the page located, the value MM_EMPTY_LIST is
  1275. // returned if there is no page of the specified color.
  1276. //
  1277. // COLOR - Supplies the color of page to locate and returns the
  1278. // color of the page located.
  1279. //
  1280. // Return Value:
  1281. //
  1282. // none.
  1283. //
  1284. //--
  1285. #define MI_GET_MODIFIED_PAGE_ANY_COLOR(PAGE,COLOR) \
  1286. { \
  1287. if (MmTotalPagesForPagingFile == 0) { \
  1288. PAGE = MM_EMPTY_LIST; \
  1289. } else { \
  1290. PAGE = MmModifiedPageListByColor[COLOR].Flink; \
  1291. } \
  1292. }
  1293. //++
  1294. //VOID
  1295. //MI_MAKE_VALID_PTE_WRITE_COPY (
  1296. // IN OUT PMMPTE PTE
  1297. // );
  1298. //
  1299. // Routine Description:
  1300. //
  1301. // This macro checks to see if the PTE indicates that the
  1302. // page is writable and if so it clears the write bit and
  1303. // sets the copy-on-write bit.
  1304. //
  1305. // Arguments:
  1306. //
  1307. // PTE - Supplies the PTE to operate upon.
  1308. //
  1309. // Return Value:
  1310. //
  1311. // None.
  1312. //
  1313. //--
  1314. #define MI_MAKE_VALID_PTE_WRITE_COPY(PPTE) \
  1315. if ((PPTE)->u.Hard.Write == 1) { \
  1316. (PPTE)->u.Hard.CopyOnWrite = 1; \
  1317. (PPTE)->u.Hard.Write = 0; \
  1318. }
  1319. #define MI_PTE_OWNER_USER 3
  1320. #define MI_PTE_OWNER_KERNEL 0
  1321. #if defined(_MIALT4K_)
  1322. #define MI_IS_ALT_PAGE_TABLE_ADDRESS(PPTE) \
  1323. (((PPTE) >= (PMMPTE)ALT4KB_PERMISSION_TABLE_START) && \
  1324. ((PPTE) < (PMMPTE)ALT4KB_PERMISSION_TABLE_END))
  1325. #else
  1326. #define MI_IS_ALT_PAGE_TABLE_ADDRESS(PPTE) 0
  1327. #endif
  1328. //++
  1329. //ULONG
  1330. //MI_DETERMINE_OWNER (
  1331. // IN MMPTE PPTE
  1332. // );
  1333. //
  1334. // Routine Description:
  1335. //
  1336. // This macro examines the virtual address of the PTE and determines
  1337. // if the PTE resides in system space or user space.
  1338. //
  1339. // Arguments:
  1340. //
  1341. // PTE - Supplies the PTE to operate upon.
  1342. //
  1343. // Return Value:
  1344. //
  1345. // 3 if the owner is USER_MODE, 0 if the owner is KERNEL_MODE.
  1346. //
  1347. //--
  1348. #define MI_DETERMINE_OWNER(PPTE) \
  1349. ((((((PPTE) >= (PMMPTE)PTE_UBASE) && ((PPTE) <= MiHighestUserPte))) || \
  1350. (MI_IS_ALT_PAGE_TABLE_ADDRESS(PPTE))) ? MI_PTE_OWNER_USER : MI_PTE_OWNER_KERNEL)
  1351. //++
  1352. //VOID
  1353. //MI_SET_ACCESSED_IN_PTE (
  1354. // IN OUT MMPTE PPTE,
  1355. // IN ULONG ACCESSED
  1356. // );
  1357. //
  1358. // Routine Description:
  1359. //
  1360. // This macro sets the ACCESSED field in the PTE. Note that this must
  1361. // not be cleared in PPEs or PDEs as they are not checked for this in
  1362. // memory management before referencing the hierarchy beneath them.
  1363. //
  1364. // Arguments:
  1365. //
  1366. // PTE - Supplies the PTE to operate upon.
  1367. //
  1368. // Return Value:
  1369. //
  1370. // None.
  1371. //
  1372. //--
  1373. #define MI_SET_ACCESSED_IN_PTE(PPTE,ACCESSED) { \
  1374. PWOW64_PROCESS _Wow64Process; \
  1375. if (ACCESSED == 0) { \
  1376. if (MI_IS_PTE_ADDRESS(PPTE)) { \
  1377. (PPTE)->u.Hard.Accessed = 0; \
  1378. } \
  1379. } \
  1380. else if ((PPTE >= (PMMPTE)PTE_UBASE) && (PPTE < (PMMPTE)MiMaxWow64Pte)) { \
  1381. _Wow64Process = PsGetCurrentProcess()->Wow64Process; \
  1382. if ((_Wow64Process != NULL) && (PPTE < MmWorkingSetList->HighestUserPte)) { \
  1383. if (MI_CHECK_BIT(_Wow64Process->AltPermBitmap, \
  1384. ((ULONG_PTR)PPTE >> PTE_SHIFT) & _ALTPERM_BITMAP_MASK) != 0) { \
  1385. NOTHING; \
  1386. } \
  1387. else { \
  1388. (PPTE)->u.Hard.Accessed = 1; \
  1389. } \
  1390. } \
  1391. else { \
  1392. (PPTE)->u.Hard.Accessed = 1; \
  1393. } \
  1394. } \
  1395. else { \
  1396. (PPTE)->u.Hard.Accessed = 1; \
  1397. } \
  1398. }
  1399. //++
  1400. //ULONG
  1401. //MI_GET_ACCESSED_IN_PTE (
  1402. // IN OUT MMPTE PPTE
  1403. // );
  1404. //
  1405. // Routine Description:
  1406. //
  1407. // This macro returns the state of the ACCESSED field in the PTE.
  1408. //
  1409. // Arguments:
  1410. //
  1411. // PTE - Supplies the PTE to operate upon.
  1412. //
  1413. // Return Value:
  1414. //
  1415. // The state of the ACCESSED field.
  1416. //
  1417. //--
  1418. #define MI_GET_ACCESSED_IN_PTE(PPTE) ((PPTE)->u.Hard.Accessed)
  1419. //++
  1420. //VOID
  1421. //MI_SET_OWNER_IN_PTE (
  1422. // IN PMMPTE PPTE
  1423. // IN ULONG OWNER
  1424. // );
  1425. //
  1426. // Routine Description:
  1427. //
  1428. // This macro sets the owner field in the PTE.
  1429. //
  1430. // Arguments:
  1431. //
  1432. // PTE - Supplies the PTE to operate upon.
  1433. //
  1434. // Return Value:
  1435. //
  1436. // None.
  1437. //
  1438. //--
  1439. #define MI_SET_OWNER_IN_PTE(PPTE,OWNER)
  1440. //++
  1441. //VOID
  1442. //MI_SET_PAGING_FILE_INFO (
  1443. // OUT MMPTE OUTPTE,
  1444. // IN MMPTE PPTE,
  1445. // IN ULONG FILEINFO,
  1446. // IN ULONG OFFSET
  1447. // );
  1448. //
  1449. // Routine Description:
  1450. //
  1451. // This macro sets into the specified PTE the supplied information
  1452. // to indicate where the backing store for the page is located.
  1453. //
  1454. // Arguments:
  1455. //
  1456. // OUTPTE - Supplies the PTE in which to store the result.
  1457. //
  1458. // PTE - Supplies the PTE to operate upon.
  1459. //
  1460. // FILEINFO - Supplies the number of the paging file.
  1461. //
  1462. // OFFSET - Supplies the offset into the paging file.
  1463. //
  1464. // Return Value:
  1465. //
  1466. // None.
  1467. //
  1468. //--
  1469. #define MI_SET_PAGING_FILE_INFO(OUTPTE,PTE,FILEINFO,OFFSET) \
  1470. (OUTPTE).u.Long = (((PTE).u.Soft.Protection << MM_PROTECT_FIELD_SHIFT) | \
  1471. ((ULONGLONG)(FILEINFO) << _MM_PAGING_FILE_LOW_SHIFT) | \
  1472. ((ULONGLONG)(OFFSET) << _MM_PAGING_FILE_HIGH_SHIFT));
  1473. //++
  1474. //PMMPTE
  1475. //MiPteToProto (
  1476. // IN OUT MMPTE PPTE,
  1477. // IN ULONG FILEINFO,
  1478. // IN ULONG OFFSET
  1479. // );
  1480. //
  1481. // Routine Description:
  1482. //
  1483. // This macro returns the address of the corresponding prototype which
  1484. // was encoded earlier into the supplied PTE.
  1485. //
  1486. // Arguments:
  1487. //
  1488. // lpte - Supplies the PTE to operate upon.
  1489. //
  1490. // Return Value:
  1491. //
  1492. // Pointer to the prototype PTE that backs this PTE.
  1493. //
  1494. //--
  1495. #define MiPteToProto(lpte) \
  1496. ((PMMPTE) ((ULONG_PTR)((lpte)->u.Proto.ProtoAddress) + MmProtopte_Base))
  1497. //++
  1498. //ULONG_PTR
  1499. //MiProtoAddressForPte (
  1500. // IN PMMPTE proto_va
  1501. // );
  1502. //
  1503. // Routine Description:
  1504. //
  1505. // This macro sets into the specified PTE the supplied information
  1506. // to indicate where the backing store for the page is located.
  1507. // MiProtoAddressForPte returns the bit field to OR into the PTE to
  1508. // reference a prototype PTE.
  1509. //
  1510. // And set the protoPTE MM_PTE_PROTOTYPE_MASK bit.
  1511. //
  1512. // Arguments:
  1513. //
  1514. // proto_va - Supplies the address of the prototype PTE.
  1515. //
  1516. // Return Value:
  1517. //
  1518. // Mask to set into the PTE.
  1519. //
  1520. //--
  1521. #define MiProtoAddressForPte(proto_va) \
  1522. (( (ULONGLONG)((ULONG_PTR)proto_va - MmProtopte_Base) << \
  1523. (_MM_PROTO_ADDRESS_SHIFT)) | MM_PTE_PROTOTYPE_MASK)
  1524. #define MISetProtoAddressForPte(PTE, proto_va) \
  1525. (PTE).u.Long = 0; \
  1526. (PTE).u.Proto.Prototype = 1; \
  1527. (PTE).u.Proto.ProtoAddress = (ULONG_PTR)proto_va - MmProtopte_Base;
  1528. //++
  1529. //ULONG_PTR
  1530. //MiProtoAddressForKernelPte (
  1531. // IN PMMPTE proto_va
  1532. // );
  1533. //
  1534. // Routine Description:
  1535. //
  1536. // This macro sets into the specified PTE the supplied information
  1537. // to indicate where the backing store for the page is located.
  1538. // MiProtoAddressForPte returns the bit field to OR into the PTE to
  1539. // reference a prototype PTE. And set the protoPTE bit,
  1540. // MM_PTE_PROTOTYPE_MASK.
  1541. //
  1542. // This macro also sets any other information (such as global bits)
  1543. // required for kernel mode PTEs.
  1544. //
  1545. // Arguments:
  1546. //
  1547. // proto_va - Supplies the address of the prototype PTE.
  1548. //
  1549. // Return Value:
  1550. //
  1551. // Mask to set into the PTE.
  1552. //
  1553. //--
  1554. // not different on IA64.
  1555. #define MiProtoAddressForKernelPte(proto_va) MiProtoAddressForPte(proto_va)
  1556. #define MM_SUBSECTION_MAP (128*1024*1024)
  1557. //++
  1558. //PSUBSECTION
  1559. //MiGetSubsectionAddress (
  1560. // IN PMMPTE lpte
  1561. // );
  1562. //
  1563. // Routine Description:
  1564. //
  1565. // This macro takes a PTE and returns the address of the subsection that
  1566. // the PTE refers to. Subsections are quadword structures allocated
  1567. // from nonpaged pool.
  1568. //
  1569. // Arguments:
  1570. //
  1571. // lpte - Supplies the PTE to operate upon.
  1572. //
  1573. // Return Value:
  1574. //
  1575. // A pointer to the subsection referred to by the supplied PTE.
  1576. //
  1577. //--
  1578. #define MiGetSubsectionAddress(lpte) \
  1579. (((lpte)->u.Subsect.WhichPool == 1) ? \
  1580. ((PSUBSECTION)((ULONG_PTR)MmSubsectionBase + \
  1581. ((ULONG_PTR)(lpte)->u.Subsect.SubsectionAddress))) \
  1582. : \
  1583. ((PSUBSECTION)((ULONG_PTR)MM_NONPAGED_POOL_END - \
  1584. ((ULONG_PTR)(lpte)->u.Subsect.SubsectionAddress))))
  1585. //++
  1586. //ULONGLONG
  1587. //MiGetSubsectionAddressForPte (
  1588. // IN PSUBSECTION VA
  1589. // );
  1590. //
  1591. // Routine Description:
  1592. //
  1593. // This macro takes the address of a subsection and encodes it for use
  1594. // in a PTE.
  1595. //
  1596. // NOTE - THE SUBSECTION ADDRESS MUST BE QUADWORD ALIGNED!
  1597. //
  1598. // Arguments:
  1599. //
  1600. // VA - Supplies a pointer to the subsection to encode.
  1601. //
  1602. // Return Value:
  1603. //
  1604. // The mask to set into the PTE to make it reference the supplied
  1605. // subsetion.
  1606. //
  1607. //--
  1608. #define MiGetSubsectionAddressForPte(VA) \
  1609. ( ((ULONG_PTR)(VA) < (ULONG_PTR)KSEG2_BASE) ? \
  1610. ( ((ULONGLONG)((ULONG_PTR)VA - (ULONG_PTR)MmSubsectionBase) \
  1611. << (_MM_PTE_SUBSECTION_ADDRESS_SHIFT)) | 0x80) \
  1612. : \
  1613. ((ULONGLONG)((ULONG_PTR)MM_NONPAGED_POOL_END - (ULONG_PTR)VA) \
  1614. << (_MM_PTE_SUBSECTION_ADDRESS_SHIFT)) )
  1615. //++
  1616. //ULONG
  1617. //MiGetPpeOffset (
  1618. // IN PVOID va
  1619. // );
  1620. //
  1621. // Routine Description:
  1622. //
  1623. // MiGetPpeOffset returns the offset into a page directory parent for a
  1624. // given virtual address.
  1625. //
  1626. // Arguments
  1627. //
  1628. // Va - Supplies the virtual address to locate the offset for.
  1629. //
  1630. // Return Value:
  1631. //
  1632. // The offset into the page root table the corresponding PPE is at.
  1633. //
  1634. //--
  1635. #define MiGetPpeOffset(va) \
  1636. ((((ULONG_PTR)(va) & PDE_TBASE) == PDE_TBASE) ? \
  1637. ((PDE_SELFMAP & ((sizeof(MMPTE)*PTE_PER_PAGE) - 1))/sizeof(MMPTE)) : \
  1638. ((ULONG)(((ULONG_PTR)(va) >> PDI1_SHIFT) & PDI_MASK)))
  1639. //++
  1640. //ULONG
  1641. //MiGetPpeIndex (
  1642. // IN PVOID va
  1643. // );
  1644. //
  1645. // Routine Description:
  1646. //
  1647. // MiGetPpeIndex returns the page directory parent index
  1648. // for a given virtual address.
  1649. //
  1650. // N.B. This does not mask off PXE bits.
  1651. //
  1652. // Arguments
  1653. //
  1654. // Va - Supplies the virtual address to locate the index for.
  1655. //
  1656. // Return Value:
  1657. //
  1658. // The index into the page directory parent - ie: the virtual page directory
  1659. // number. This is different from the page directory parent offset because
  1660. // this spans page directory parents on supported platforms.
  1661. //
  1662. // N.B. This macro only works on user addresses - the region ID bits
  1663. // are not masked off !
  1664. //--
  1665. #define MiGetPpeIndex(va) ((ULONG)((ULONG_PTR)(va) >> PDI1_SHIFT))
  1666. //++
  1667. //ULONG_PTR
  1668. //MiGetPdeOffset (
  1669. // IN PVOID va
  1670. // );
  1671. //
  1672. // Routine Description:
  1673. //
  1674. // MiGetPdeOffset returns the offset into a page directory
  1675. // for a given virtual address.
  1676. //
  1677. // Arguments:
  1678. //
  1679. // Va - Supplies the virtual address to locate the offset for.
  1680. //
  1681. // Return Value:
  1682. //
  1683. // The offset into the page directory table the corresponding PDE is at.
  1684. //
  1685. //--
  1686. #define MiGetPdeOffset(va) ((ULONG) (((ULONG_PTR)(va) >> PDI_SHIFT) & PDI_MASK))
  1687. //++
  1688. //ULONG
  1689. //MiGetPdeIndex (
  1690. // IN PVOID va
  1691. // );
  1692. //
  1693. // Routine Description:
  1694. //
  1695. // MiGetPdeIndex returns the page directory index
  1696. // for a given virtual address.
  1697. //
  1698. // N.B. This does not mask off PPE bits.
  1699. //
  1700. // Arguments
  1701. //
  1702. // Va - Supplies the virtual address to locate the index for.
  1703. //
  1704. // Return Value:
  1705. //
  1706. // The index into the page directory - ie: the virtual page table number.
  1707. // This is different from the page directory offset because this spans
  1708. // page directories on supported platforms.
  1709. //
  1710. // N.B. This macro only works on user addresses - the region ID bits
  1711. // are not masked off !
  1712. //
  1713. //--
  1714. #define MiGetPdeIndex(va) ((ULONG) ((ULONG_PTR)(va) >> PDI_SHIFT))
  1715. //++
  1716. //ULONG_PTR
  1717. //MiGetPteOffset (
  1718. // IN PVOID va
  1719. // );
  1720. //
  1721. // Routine Description:
  1722. //
  1723. // MiGetPteOffset returns the offset into a page table page
  1724. // for a given virtual address.
  1725. //
  1726. // Arguments:
  1727. //
  1728. // Va - Supplies the virtual address to locate the offset for.
  1729. //
  1730. // Return Value:
  1731. //
  1732. // The offset into the page table page table the corresponding PTE is at.
  1733. //
  1734. //--
  1735. #define MiGetPteOffset(va) ((ULONG) (((ULONG_PTR)(va) >> PTI_SHIFT) & PDI_MASK))
  1736. //++
  1737. //++
  1738. //PVOID
  1739. //MiGetVirtualAddressMappedByPpe (
  1740. // IN PMMPTE PTE
  1741. // );
  1742. //
  1743. // Routine Description:
  1744. //
  1745. // MiGetVirtualAddressMappedByPpe returns the virtual address
  1746. // which is mapped by a given PPE address.
  1747. //
  1748. // Arguments
  1749. //
  1750. // PPE - Supplies the PPE to get the virtual address for.
  1751. //
  1752. // Return Value:
  1753. //
  1754. // Virtual address mapped by the PPE.
  1755. //
  1756. //--
  1757. #define MiGetVirtualAddressMappedByPpe(PPE) \
  1758. MiGetVirtualAddressMappedByPte(MiGetVirtualAddressMappedByPde(PPE))
  1759. //++
  1760. //PVOID
  1761. //MiGetVirtualAddressMappedByPde (
  1762. // IN PMMPTE PDE
  1763. // );
  1764. //
  1765. // Routine Description:
  1766. //
  1767. // MiGetVirtualAddressMappedByPde returns the virtual address
  1768. // which is mapped by a given PDE address.
  1769. //
  1770. // Arguments
  1771. //
  1772. // PDE - Supplies the PDE to get the virtual address for.
  1773. //
  1774. // Return Value:
  1775. //
  1776. // Virtual address mapped by the PDE.
  1777. //
  1778. //--
  1779. #define MiGetVirtualAddressMappedByPde(Pde) \
  1780. MiGetVirtualAddressMappedByPte(MiGetVirtualAddressMappedByPte(Pde))
  1781. //++
  1782. //PVOID
  1783. //MiGetVirtualAddressMappedByPte (
  1784. // IN PMMPTE PTE
  1785. // );
  1786. //
  1787. // Routine Description:
  1788. //
  1789. // MiGetVirtualAddressMappedByPte returns the virtual address
  1790. // which is mapped by a given PTE address.
  1791. //
  1792. // Arguments:
  1793. //
  1794. // PTE - Supplies the PTE to get the virtual address for.
  1795. //
  1796. // Return Value:
  1797. //
  1798. // Virtual address mapped by the PTE.
  1799. //
  1800. //--
  1801. #define MiGetVirtualAddressMappedByPte(PTE) \
  1802. (((ULONG_PTR)(PTE) & PTA_SIGN) ? \
  1803. (PVOID)(((ULONG_PTR)(PTE) & VRN_MASK) | VA_FILL | \
  1804. (((ULONG_PTR)(PTE)-PTE_BASE) << (PAGE_SHIFT - PTE_SHIFT))) : \
  1805. (PVOID)(((ULONG_PTR)(PTE) & VRN_MASK) | (((ULONG_PTR)(PTE)-PTE_BASE) << (PAGE_SHIFT - PTE_SHIFT))))
  1806. //++
  1807. //LOGICAL
  1808. //MiIsVirtualAddressOnPpeBoundary (
  1809. // IN PVOID VA
  1810. // );
  1811. //
  1812. // Routine Description:
  1813. //
  1814. // MiIsVirtualAddressOnPpeBoundary returns TRUE if the virtual address is
  1815. // on a page directory entry boundary.
  1816. //
  1817. // Arguments
  1818. //
  1819. // VA - Supplies the virtual address to check.
  1820. //
  1821. // Return Value:
  1822. //
  1823. // TRUE if on a boundary, FALSE if not.
  1824. //
  1825. //--
  1826. #define MiIsVirtualAddressOnPpeBoundary(VA) (((ULONG_PTR)(VA) & PAGE_DIRECTORY1_MASK) == 0)
  1827. //++
  1828. //LOGICAL
  1829. //MiIsVirtualAddressOnPdeBoundary (
  1830. // IN PVOID VA
  1831. // );
  1832. //
  1833. // Routine Description:
  1834. //
  1835. // MiIsVirtualAddressOnPdeBoundary returns TRUE if the virtual address is
  1836. // on a page directory entry boundary.
  1837. //
  1838. // Arguments
  1839. //
  1840. // VA - Supplies the virtual address to check.
  1841. //
  1842. // Return Value:
  1843. //
  1844. // TRUE if on an 8MB PDE boundary, FALSE if not.
  1845. //
  1846. //--
  1847. #define MiIsVirtualAddressOnPdeBoundary(VA) (((ULONG_PTR)(VA) & PAGE_DIRECTORY2_MASK) == 0)
  1848. //++
  1849. //LOGICAL
  1850. //MiIsPteOnPpeBoundary (
  1851. // IN PVOID VA
  1852. // );
  1853. //
  1854. // Routine Description:
  1855. //
  1856. // MiIsPteOnPpeBoundary returns TRUE if the PTE is
  1857. // on a page directory parent entry boundary.
  1858. //
  1859. // Arguments
  1860. //
  1861. // VA - Supplies the virtual address to check.
  1862. //
  1863. // Return Value:
  1864. //
  1865. // TRUE if on a boundary, FALSE if not.
  1866. //
  1867. //--
  1868. #define MiIsPteOnPpeBoundary(PTE) (((ULONG_PTR)(PTE) & (MM_VA_MAPPED_BY_PDE - 1)) == 0)
  1869. //++
  1870. //LOGICAL
  1871. //MiIsPteOnPdeBoundary (
  1872. // IN PVOID PTE
  1873. // );
  1874. //
  1875. // Routine Description:
  1876. //
  1877. // MiIsPteOnPdeBoundary returns TRUE if the PTE is
  1878. // on a page directory entry boundary.
  1879. //
  1880. // Arguments
  1881. //
  1882. // PTE - Supplies the PTE to check.
  1883. //
  1884. // Return Value:
  1885. //
  1886. // TRUE if on a 8MB PDE boundary, FALSE if not.
  1887. //
  1888. //--
  1889. #define MiIsPteOnPdeBoundary(PTE) (((ULONG_PTR)(PTE) & (PAGE_SIZE - 1)) == 0)
  1890. //++
  1891. //ULONG
  1892. //GET_PAGING_FILE_NUMBER (
  1893. // IN MMPTE PTE
  1894. // );
  1895. //
  1896. // Routine Description:
  1897. //
  1898. // This macro extracts the paging file number from a PTE.
  1899. //
  1900. // Arguments:
  1901. //
  1902. // PTE - Supplies the PTE to operate upon.
  1903. //
  1904. // Return Value:
  1905. //
  1906. // The paging file number.
  1907. //
  1908. //--
  1909. #define GET_PAGING_FILE_NUMBER(PTE) ((ULONG) (PTE).u.Soft.PageFileLow)
  1910. //++
  1911. //ULONG
  1912. //GET_PAGING_FILE_OFFSET (
  1913. // IN MMPTE PTE
  1914. // );
  1915. //
  1916. // Routine Description:
  1917. //
  1918. // This macro extracts the offset into the paging file from a PTE.
  1919. //
  1920. // Arguments:
  1921. //
  1922. // PTE - Supplies the PTE to operate upon.
  1923. //
  1924. // Return Value:
  1925. //
  1926. // The paging file offset.
  1927. //
  1928. //--
  1929. #define GET_PAGING_FILE_OFFSET(PTE) ((ULONG) (PTE).u.Soft.PageFileHigh)
  1930. //++
  1931. //ULONG_PTR
  1932. //IS_PTE_NOT_DEMAND_ZERO (
  1933. // IN PMMPTE PPTE
  1934. // );
  1935. //
  1936. // Routine Description:
  1937. //
  1938. // This macro checks to see if a given PTE is NOT a demand zero PTE.
  1939. //
  1940. // Arguments:
  1941. //
  1942. // PTE - Supplies the PTE to operate upon.
  1943. //
  1944. // Return Value:
  1945. //
  1946. // Returns 0 if the PTE is demand zero, non-zero otherwise.
  1947. //
  1948. //--
  1949. #define IS_PTE_NOT_DEMAND_ZERO(PTE) \
  1950. ((PTE).u.Long & ((ULONG_PTR)0xFFFFFFFFF0000000 | \
  1951. MM_PTE_VALID_MASK | \
  1952. MM_PTE_PROTOTYPE_MASK | \
  1953. MM_PTE_TRANSITION_MASK))
  1954. //++
  1955. //VOID
  1956. //MI_MAKING_VALID_PTE_INVALID(
  1957. // IN PMMPTE PPTE
  1958. // );
  1959. //
  1960. // Routine Description:
  1961. //
  1962. // Prepare to make a single valid PTE invalid.
  1963. // No action is required on IA64.
  1964. //
  1965. // Arguments:
  1966. //
  1967. // SYSTEM_WIDE - Supplies TRUE if this will happen on all processors.
  1968. //
  1969. // Return Value:
  1970. //
  1971. // None.
  1972. //
  1973. //--
  1974. #define MI_MAKING_VALID_PTE_INVALID(SYSTEM_WIDE)
  1975. //++
  1976. //VOID
  1977. //MI_MAKING_VALID_MULTIPLE_PTES_INVALID(
  1978. // IN PMMPTE PPTE
  1979. // );
  1980. //
  1981. // Routine Description:
  1982. //
  1983. // Prepare to make multiple valid PTEs invalid.
  1984. // No action is required on IA64.
  1985. //
  1986. // Arguments:
  1987. //
  1988. // SYSTEM_WIDE - Supplies TRUE if this will happen on all processors.
  1989. //
  1990. // Return Value:
  1991. //
  1992. // None.
  1993. //
  1994. //--
  1995. #define MI_MAKING_MULTIPLE_PTES_INVALID(SYSTEM_WIDE)
  1996. //++
  1997. //VOID
  1998. //MI_MAKE_PROTECT_WRITE_COPY (
  1999. // IN OUT MMPTE PPTE
  2000. // );
  2001. //
  2002. // Routine Description:
  2003. //
  2004. // This macro makes a writable PTE a writeable-copy PTE.
  2005. //
  2006. // Arguments:
  2007. //
  2008. // PTE - Supplies the PTE to operate upon.
  2009. //
  2010. // Return Value:
  2011. //
  2012. // NONE
  2013. //
  2014. //--
  2015. #define MI_MAKE_PROTECT_WRITE_COPY(PTE) \
  2016. if ((PTE).u.Soft.Protection & MM_PROTECTION_WRITE_MASK) { \
  2017. (PTE).u.Long |= MM_PROTECTION_COPY_MASK << MM_PROTECT_FIELD_SHIFT; \
  2018. }
  2019. //++
  2020. //VOID
  2021. //MI_SET_PAGE_DIRTY(
  2022. // IN PMMPTE PPTE,
  2023. // IN PVOID VA,
  2024. // IN PVOID PFNHELD
  2025. // );
  2026. //
  2027. // Routine Description:
  2028. //
  2029. // This macro sets the dirty bit (and release page file space).
  2030. //
  2031. // Arguments:
  2032. //
  2033. // TEMP - Supplies a temporary for usage.
  2034. //
  2035. // PPTE - Supplies a pointer to the PTE that corresponds to VA.
  2036. //
  2037. // VA - Supplies a the virtual address of the page fault.
  2038. //
  2039. // PFNHELD - Supplies TRUE if the PFN lock is held.
  2040. //
  2041. // Return Value:
  2042. //
  2043. // None.
  2044. //
  2045. //--
  2046. #define MI_SET_PAGE_DIRTY(PPTE,VA,PFNHELD) \
  2047. if ((PPTE)->u.Hard.Dirty == 1) { \
  2048. MiSetDirtyBit ((VA),(PPTE),(PFNHELD)); \
  2049. }
  2050. //++
  2051. //VOID
  2052. //MI_NO_FAULT_FOUND(
  2053. // IN FAULTSTATUS,
  2054. // IN PMMPTE PPTE,
  2055. // IN PVOID VA,
  2056. // IN PVOID PFNHELD
  2057. // );
  2058. //
  2059. // Routine Description:
  2060. //
  2061. // This macro handles the case when a page fault is taken and no
  2062. // PTE with the valid bit clear is found.
  2063. //
  2064. // Arguments:
  2065. //
  2066. // FAULTSTATUS - Supplies the fault status.
  2067. //
  2068. // PPTE - Supplies a pointer to the PTE that corresponds to VA.
  2069. //
  2070. // VA - Supplies a the virtual address of the page fault.
  2071. //
  2072. // PFNHELD - Supplies TRUE if the PFN lock is held.
  2073. //
  2074. // Return Value:
  2075. //
  2076. // None.
  2077. //
  2078. //--
  2079. #define MI_NO_FAULT_FOUND(FAULTSTATUS,PPTE,VA,PFNHELD) \
  2080. if ((MI_FAULT_STATUS_INDICATES_WRITE(FAULTSTATUS)) && ((PPTE)->u.Hard.Dirty == 0)) { \
  2081. MiSetDirtyBit ((VA),(PPTE),(PFNHELD)); \
  2082. } else { \
  2083. MMPTE TempPte; \
  2084. TempPte = *(PPTE); \
  2085. MI_SET_ACCESSED_IN_PTE (&TempPte, 1); \
  2086. MI_WRITE_VALID_PTE_NEW_PROTECTION((PPTE), TempPte); \
  2087. KiFlushSingleTb((VA)); \
  2088. }
  2089. //++
  2090. //ULONG_PTR
  2091. //MI_CAPTURE_DIRTY_BIT_TO_PFN (
  2092. // IN PMMPTE PPTE,
  2093. // IN PMMPFN PPFN
  2094. // );
  2095. //
  2096. // Routine Description:
  2097. //
  2098. // This macro gets captures the state of the dirty bit to the PFN
  2099. // and frees any associated page file space if the PTE has been
  2100. // modified element.
  2101. //
  2102. // NOTE - THE PFN LOCK MUST BE HELD!
  2103. //
  2104. // Arguments:
  2105. //
  2106. // PPTE - Supplies the PTE to operate upon.
  2107. //
  2108. // PPFN - Supplies a pointer to the PFN database element that corresponds
  2109. // to the page mapped by the PTE.
  2110. //
  2111. // Return Value:
  2112. //
  2113. // None.
  2114. //
  2115. //--
  2116. #define MI_CAPTURE_DIRTY_BIT_TO_PFN(PPTE,PPFN) \
  2117. ASSERT (KeGetCurrentIrql() > APC_LEVEL); \
  2118. if (((PPFN)->u3.e1.Modified == 0) && \
  2119. ((PPTE)->u.Hard.Dirty != 0)) { \
  2120. MI_SET_MODIFIED (PPFN, 1, 0x18); \
  2121. if (((PPFN)->OriginalPte.u.Soft.Prototype == 0) && \
  2122. ((PPFN)->u3.e1.WriteInProgress == 0)) { \
  2123. MiReleasePageFileSpace ((PPFN)->OriginalPte); \
  2124. (PPFN)->OriginalPte.u.Soft.PageFileHigh = 0; \
  2125. } \
  2126. }
  2127. //++
  2128. //BOOLEAN
  2129. //MI_IS_PHYSICAL_ADDRESS (
  2130. // IN PVOID VA
  2131. // );
  2132. //
  2133. // Routine Description:
  2134. //
  2135. // This macro determines if a given virtual address is really a
  2136. // physical address.
  2137. //
  2138. // Arguments:
  2139. //
  2140. // VA - Supplies the virtual address.
  2141. //
  2142. // Return Value:
  2143. //
  2144. // FALSE if it is not a physical address, TRUE if it is.
  2145. //
  2146. //--
  2147. #define MI_IS_PHYSICAL_ADDRESS(Va) \
  2148. ((((ULONG_PTR)(Va) >= KSEG3_BASE) && ((ULONG_PTR)(Va) < KSEG3_LIMIT)) || \
  2149. (((ULONG_PTR)(Va) >= KSEG4_BASE) && ((ULONG_PTR)(Va) < KSEG4_LIMIT)) || \
  2150. (((ULONG_PTR)Va >= KSEG0_BASE) && ((ULONG_PTR)Va < KSEG2_BASE)) || \
  2151. ((MiGetPpeAddress(Va)->u.Hard.Valid == 1) && \
  2152. (MiGetPdeAddress(Va)->u.Hard.Valid == 1) && \
  2153. (MI_PDE_MAPS_LARGE_PAGE (MiGetPdeAddress (Va)))))
  2154. //++
  2155. //ULONG_PTR
  2156. //MI_CONVERT_PHYSICAL_TO_PFN (
  2157. // IN PVOID VA
  2158. // );
  2159. //
  2160. // Routine Description:
  2161. //
  2162. // This macro converts a physical address (see MI_IS_PHYSICAL_ADDRESS)
  2163. // to its corresponding physical frame number.
  2164. //
  2165. // Arguments:
  2166. //
  2167. // VA - Supplies a pointer to the physical address.
  2168. //
  2169. // Return Value:
  2170. //
  2171. // Returns the PFN for the page.
  2172. //
  2173. //--
  2174. PVOID
  2175. KiGetPhysicalAddress (
  2176. IN PVOID VirtualAddress
  2177. );
  2178. #define MI_CONVERT_PHYSICAL_TO_PFN(Va) \
  2179. ((((ULONG_PTR)(Va) >= KSEG0_BASE) && ((ULONG_PTR)(Va) < KSEG2_BASE)) ? \
  2180. ((PFN_NUMBER)(((ULONG_PTR)KiGetPhysicalAddress(Va)) >> PAGE_SHIFT)) : \
  2181. ((((ULONG_PTR)(Va) >= KSEG3_BASE) && ((ULONG_PTR)(Va) < KSEG3_LIMIT)) || \
  2182. (((ULONG_PTR)(Va) >= KSEG4_BASE) && ((ULONG_PTR)(Va) < KSEG4_LIMIT))) ? \
  2183. ((PFN_NUMBER)(((ULONG_PTR)(Va) & ~VRN_MASK) >> PAGE_SHIFT)) : \
  2184. ((PFN_NUMBER)(MiGetPdeAddress(Va)->u.Hard.PageFrameNumber) + (MiGetPteOffset((ULONG_PTR)Va))))
  2185. typedef struct _MMCOLOR_TABLES {
  2186. PFN_NUMBER Flink;
  2187. PVOID Blink;
  2188. PFN_NUMBER Count;
  2189. } MMCOLOR_TABLES, *PMMCOLOR_TABLES;
  2190. #if MM_MAXIMUM_NUMBER_OF_COLORS > 1
  2191. extern MMPFNLIST MmFreePagesByPrimaryColor[2][MM_MAXIMUM_NUMBER_OF_COLORS];
  2192. #endif
  2193. extern PMMCOLOR_TABLES MmFreePagesByColor[2];
  2194. extern PFN_NUMBER MmTotalPagesForPagingFile;
  2195. //
  2196. // A VALID Page Table Entry on the IA64 has the following definition.
  2197. //
  2198. #define _MM_PAGING_FILE_LOW_SHIFT 28
  2199. #define _MM_PAGING_FILE_HIGH_SHIFT 32
  2200. #define MI_MAXIMUM_PAGEFILE_SIZE (((UINT64)4 * 1024 * 1024 * 1024 - 1) * PAGE_SIZE)
  2201. #define MI_PTE_LOOKUP_NEEDED ((ULONG64)0xffffffff)
  2202. typedef struct _MMPTE_SOFTWARE {
  2203. ULONGLONG Valid : 1;
  2204. ULONGLONG Prototype : 1;
  2205. ULONGLONG Protection : 5;
  2206. ULONGLONG Transition : 1;
  2207. ULONGLONG Reserved0 : 3;
  2208. ULONGLONG SplitPermissions : 1;
  2209. ULONGLONG UsedPageTableEntries : PTE_PER_PAGE_BITS;
  2210. ULONGLONG Reserved : 16 - PTE_PER_PAGE_BITS;
  2211. ULONGLONG PageFileLow: 4;
  2212. ULONGLONG PageFileHigh : 32;
  2213. } MMPTE_SOFTWARE;
  2214. typedef struct _MMPTE_TRANSITION {
  2215. ULONGLONG Valid : 1;
  2216. ULONGLONG Prototype : 1;
  2217. ULONGLONG Protection : 5;
  2218. ULONGLONG Transition : 1;
  2219. ULONGLONG Rsvd0 : 3;
  2220. ULONGLONG SplitPermissions : 1;
  2221. ULONGLONG Reserved1 : 1;
  2222. ULONGLONG PageFrameNumber : 50 - PAGE_SHIFT;
  2223. ULONGLONG Rsvd1 : 14;
  2224. } MMPTE_TRANSITION;
  2225. #define _MM_PROTO_ADDRESS_SHIFT 12
  2226. typedef struct _MMPTE_PROTOTYPE {
  2227. ULONGLONG Valid : 1;
  2228. ULONGLONG Prototype : 1;
  2229. ULONGLONG ReadOnly : 1; // if set allow read only access.
  2230. ULONGLONG Rsvd : 8;
  2231. ULONGLONG SplitPermissions : 1;
  2232. ULONGLONG ProtoAddress : 52;
  2233. } MMPTE_PROTOTYPE;
  2234. #define _MM_PTE_SUBSECTION_ADDRESS_SHIFT 12
  2235. typedef struct _MMPTE_SUBSECTION {
  2236. ULONGLONG Valid : 1;
  2237. ULONGLONG Prototype : 1;
  2238. ULONGLONG Protection : 5;
  2239. ULONGLONG WhichPool : 1;
  2240. ULONGLONG Rsvd : 3;
  2241. ULONGLONG SplitPermissions : 1;
  2242. ULONGLONG SubsectionAddress : 52;
  2243. } MMPTE_SUBSECTION;
  2244. typedef struct _MMPTE_LIST {
  2245. ULONGLONG Valid : 1;
  2246. //
  2247. // Note the Prototype bit must not be used for lists like freed nonpaged
  2248. // pool because lookaside pops can legitimately reference bogus addresses
  2249. // (since the pop is unsynchronized) and the fault handler must be able to
  2250. // distinguish lists from protos so a retry status can be returned (vs a
  2251. // fatal bugcheck).
  2252. //
  2253. // The same caveat applies to both the Transition and the Protection
  2254. // fields as they are similarly examined in the fault handler and would
  2255. // be misinterpreted if ever nonzero in the freed nonpaged pool chains.
  2256. //
  2257. ULONGLONG Prototype : 1; // MUST BE ZERO as per above comment.
  2258. ULONGLONG Protection : 5;
  2259. ULONGLONG Transition : 1;
  2260. ULONGLONG OneEntry : 1;
  2261. ULONGLONG filler10 : 23;
  2262. ULONGLONG NextEntry : 32;
  2263. } MMPTE_LIST;
  2264. //
  2265. // A Page Table Entry on the IA64 has the following definition.
  2266. //
  2267. #define _HARDWARE_PTE_WORKING_SET_BITS 11
  2268. typedef struct _MMPTE_HARDWARE {
  2269. ULONGLONG Valid : 1;
  2270. ULONGLONG Rsvd0 : 1;
  2271. ULONGLONG Cache : 3;
  2272. ULONGLONG Accessed : 1;
  2273. ULONGLONG Dirty : 1;
  2274. ULONGLONG Owner : 2;
  2275. ULONGLONG Execute : 1;
  2276. ULONGLONG Write : 1;
  2277. ULONGLONG Rsvd1 : PAGE_SHIFT - 12;
  2278. ULONGLONG CopyOnWrite : 1;
  2279. ULONGLONG PageFrameNumber : 50 - PAGE_SHIFT;
  2280. ULONGLONG Rsvd2 : 2;
  2281. ULONGLONG Exception : 1;
  2282. ULONGLONG SoftwareWsIndex : _HARDWARE_PTE_WORKING_SET_BITS;
  2283. } MMPTE_HARDWARE, *PMMPTE_HARDWARE;
  2284. typedef struct _MMPTE_LARGEPAGE {
  2285. ULONGLONG Valid : 1;
  2286. ULONGLONG Rsvd0 : 1;
  2287. ULONGLONG Cache : 3;
  2288. ULONGLONG Accessed : 1;
  2289. ULONGLONG Dirty : 1;
  2290. ULONGLONG Owner : 2;
  2291. ULONGLONG Execute : 1;
  2292. ULONGLONG Write : 1;
  2293. ULONGLONG Rsvd1 : PAGE_SHIFT - 12;
  2294. ULONGLONG CopyOnWrite : 1;
  2295. ULONGLONG PageFrameNumber : 50 - PAGE_SHIFT;
  2296. ULONGLONG Rsvd2 : 2;
  2297. ULONGLONG Exception : 1;
  2298. ULONGLONG Rsvd3 : 1;
  2299. ULONGLONG LargePage : 1;
  2300. ULONGLONG PageSize : 6;
  2301. ULONGLONG Rsvd4 : 3;
  2302. } MMPTE_LARGEPAGE, *PMMPTE_LARGEPAGE;
  2303. typedef struct _ALT_4KPTE {
  2304. ULONGLONG Commit : 1;
  2305. ULONGLONG Rsvd0 : 1;
  2306. ULONGLONG Cache : 3;
  2307. ULONGLONG Accessed : 1;
  2308. ULONGLONG InPageInProgress : 1;
  2309. ULONGLONG Owner : 2;
  2310. ULONGLONG Execute : 1;
  2311. ULONGLONG Write : 1;
  2312. ULONGLONG Rsvd1 : 1;
  2313. ULONGLONG PteOffset : 32;
  2314. ULONGLONG Rsvd2 : 8;
  2315. ULONGLONG Exception : 1;
  2316. ULONGLONG Protection : 5;
  2317. ULONGLONG Lock : 1;
  2318. ULONGLONG FillZero : 1;
  2319. ULONGLONG NoAccess : 1;
  2320. ULONGLONG CopyOnWrite : 1;
  2321. ULONGLONG PteIndirect : 1;
  2322. ULONGLONG Private : 1;
  2323. } ALT_4KPTE, *PALT_4KPTE;
  2324. //
  2325. // Use MM_PTE_CACHE_RESERVED to prevent the VHPT walker from speculatively
  2326. // filling this entry and also so the TB miss handler knows that this is
  2327. // a large (8mb) page directory entry.
  2328. //
  2329. #define MI_PDE_MAPS_LARGE_PAGE(PDE) ((PDE)->u.Hard.Cache == MM_PTE_CACHE_RESERVED)
  2330. #define MI_MAKE_PDE_MAP_LARGE_PAGE(PDE) ((PDE)->u.Hard.Cache = MM_PTE_CACHE_RESERVED)
  2331. #define MI_GET_PAGE_FRAME_FROM_PTE(PTE) ((ULONG)((PTE)->u.Hard.PageFrameNumber))
  2332. #define MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE(PTE) ((ULONG)((PTE)->u.Trans.PageFrameNumber))
  2333. #define MI_GET_PROTECTION_FROM_SOFT_PTE(PTE) ((ULONG)((PTE)->u.Soft.Protection))
  2334. #define MI_GET_PROTECTION_FROM_TRANSITION_PTE(PTE) ((ULONG)((PTE)->u.Trans.Protection))
  2335. typedef struct _MMPTE {
  2336. union {
  2337. ULONGLONG Long;
  2338. MMPTE_HARDWARE Hard;
  2339. MMPTE_LARGEPAGE Large;
  2340. HARDWARE_PTE Flush;
  2341. MMPTE_PROTOTYPE Proto;
  2342. MMPTE_SOFTWARE Soft;
  2343. MMPTE_TRANSITION Trans;
  2344. MMPTE_SUBSECTION Subsect;
  2345. MMPTE_LIST List;
  2346. ALT_4KPTE Alt;
  2347. } u;
  2348. } MMPTE;
  2349. typedef MMPTE *PMMPTE;
  2350. extern PMMPTE MiFirstReservedZeroingPte;
  2351. #define InterlockedCompareExchangePte(_PointerPte, _NewContents, _OldContents) \
  2352. InterlockedCompareExchange64 ((PLONGLONG)(_PointerPte), (LONGLONG)(_NewContents), (LONGLONG)(_OldContents))
  2353. #define InterlockedExchangePte(_PointerPte, _NewContents) InterlockedExchange64((PLONG64)(_PointerPte), _NewContents)
  2354. //++
  2355. //VOID
  2356. //MI_WRITE_VALID_PTE (
  2357. // IN PMMPTE PointerPte,
  2358. // IN MMPTE PteContents
  2359. // );
  2360. //
  2361. // Routine Description:
  2362. //
  2363. // MI_WRITE_VALID_PTE fills in the specified PTE making it valid with the
  2364. // specified contents.
  2365. //
  2366. // Arguments
  2367. //
  2368. // PointerPte - Supplies a PTE to fill.
  2369. //
  2370. // PteContents - Supplies the contents to put in the PTE.
  2371. //
  2372. // Return Value:
  2373. //
  2374. // None.
  2375. //
  2376. //--
  2377. #define MI_WRITE_VALID_PTE(_PointerPte, _PteContents) \
  2378. ASSERT ((_PointerPte)->u.Hard.Valid == 0); \
  2379. ASSERT ((_PteContents).u.Hard.Valid == 1); \
  2380. MI_LOG_PTE_CHANGE (_PointerPte, _PteContents); \
  2381. (*((volatile MMPTE *)(_PointerPte)) = (_PteContents))
  2382. //++
  2383. //VOID
  2384. //MI_WRITE_INVALID_PTE (
  2385. // IN PMMPTE PointerPte,
  2386. // IN MMPTE PteContents
  2387. // );
  2388. //
  2389. // Routine Description:
  2390. //
  2391. // MI_WRITE_INVALID_PTE fills in the specified PTE making it invalid with the
  2392. // specified contents.
  2393. //
  2394. // Arguments
  2395. //
  2396. // PointerPte - Supplies a PTE to fill.
  2397. //
  2398. // PteContents - Supplies the contents to put in the PTE.
  2399. //
  2400. // Return Value:
  2401. //
  2402. // None.
  2403. //
  2404. //--
  2405. #define MI_WRITE_INVALID_PTE(_PointerPte, _PteContents) \
  2406. ASSERT ((_PteContents).u.Hard.Valid == 0); \
  2407. MI_LOG_PTE_CHANGE (_PointerPte, _PteContents); \
  2408. (*(_PointerPte) = (_PteContents))
  2409. //++
  2410. //VOID
  2411. //MI_WRITE_VALID_PTE_NEW_PROTECTION (
  2412. // IN PMMPTE PointerPte,
  2413. // IN MMPTE PteContents
  2414. // );
  2415. //
  2416. // Routine Description:
  2417. //
  2418. // MI_WRITE_VALID_PTE_NEW_PROTECTION fills in the specified PTE (which was
  2419. // already valid) changing only the protection or the dirty bit.
  2420. //
  2421. // Arguments
  2422. //
  2423. // PointerPte - Supplies a PTE to fill.
  2424. //
  2425. // PteContents - Supplies the contents to put in the PTE.
  2426. //
  2427. // Return Value:
  2428. //
  2429. // None.
  2430. //
  2431. //--
  2432. #define MI_WRITE_VALID_PTE_NEW_PROTECTION(_PointerPte, _PteContents) \
  2433. ASSERT ((_PointerPte)->u.Hard.Valid == 1); \
  2434. ASSERT ((_PteContents).u.Hard.Valid == 1); \
  2435. ASSERT ((_PointerPte)->u.Hard.PageFrameNumber == (_PteContents).u.Hard.PageFrameNumber); \
  2436. MI_LOG_PTE_CHANGE (_PointerPte, _PteContents); \
  2437. (*(_PointerPte) = (_PteContents))
  2438. //++
  2439. //VOID
  2440. //MI_WRITE_VALID_PTE_NEW_PAGE (
  2441. // IN PMMPTE PointerPte,
  2442. // IN MMPTE PteContents
  2443. // );
  2444. //
  2445. // Routine Description:
  2446. //
  2447. // MI_WRITE_VALID_PTE_NEW_PAGE fills in the specified PTE (which was
  2448. // already valid) changing the page and the protection.
  2449. // Note that the contents are very carefully written.
  2450. //
  2451. // Arguments
  2452. //
  2453. // PointerPte - Supplies a PTE to fill.
  2454. //
  2455. // PteContents - Supplies the contents to put in the PTE.
  2456. //
  2457. // Return Value:
  2458. //
  2459. // None.
  2460. //
  2461. //--
  2462. #define MI_WRITE_VALID_PTE_NEW_PAGE(_PointerPte, _PteContents) \
  2463. ASSERT ((_PointerPte)->u.Hard.Valid == 1); \
  2464. ASSERT ((_PteContents).u.Hard.Valid == 1); \
  2465. ASSERT ((_PointerPte)->u.Hard.PageFrameNumber != (_PteContents).u.Hard.PageFrameNumber); \
  2466. MI_LOG_PTE_CHANGE (_PointerPte, _PteContents); \
  2467. (*(_PointerPte) = (_PteContents))
  2468. //++
  2469. //VOID
  2470. //MiFillMemoryPte (
  2471. // IN PMMPTE Destination,
  2472. // IN ULONGLONG NumberOfPtes,
  2473. // IN MMPTE Pattern
  2474. // };
  2475. //
  2476. // Routine Description:
  2477. //
  2478. // This function fills memory with the specified PTE pattern.
  2479. //
  2480. // Arguments
  2481. //
  2482. // Destination - Supplies a pointer to the memory to fill.
  2483. //
  2484. // NumberOfPtes - Supplies the number of PTEs (not bytes!) to be filled.
  2485. //
  2486. // Pattern - Supplies the PTE fill pattern.
  2487. //
  2488. // Return Value:
  2489. //
  2490. // None.
  2491. //
  2492. //--
  2493. #define MiFillMemoryPte(Destination, Length, Pattern) \
  2494. RtlFillMemoryUlonglong ((Destination), (Length) * sizeof (MMPTE), (Pattern))
  2495. #define MiZeroMemoryPte(Destination, Length) \
  2496. RtlZeroMemory ((Destination), (Length) * sizeof (MMPTE))
  2497. #define KiWbInvalidateCache
  2498. //++
  2499. //BOOLEAN
  2500. //MI_IS_PAGE_TABLE_ADDRESS (
  2501. // IN PVOID VA
  2502. // );
  2503. //
  2504. // Routine Description:
  2505. //
  2506. // This macro determines if a given virtual address is really a
  2507. // page table address (PTE, PDE, PPE).
  2508. //
  2509. // Arguments
  2510. //
  2511. // VA - Supplies the virtual address.
  2512. //
  2513. // Return Value:
  2514. //
  2515. // FALSE if it is not a page table address, TRUE if it is.
  2516. //
  2517. //--
  2518. #define MI_IS_PAGE_TABLE_ADDRESS(VA) \
  2519. ((((ULONG_PTR)VA >= PTE_UBASE) && ((ULONG_PTR)VA < (PDE_UTBASE + PAGE_SIZE))) || \
  2520. (((ULONG_PTR)VA >= PTE_KBASE) && ((ULONG_PTR)VA < (PDE_KTBASE + PAGE_SIZE))) || \
  2521. (((ULONG_PTR)VA >= PTE_SBASE) && ((ULONG_PTR)VA < (PDE_STBASE + PAGE_SIZE))) || \
  2522. MI_IS_ALT_PAGE_TABLE_ADDRESS((PMMPTE)VA))
  2523. //++
  2524. //BOOLEAN
  2525. //MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS (
  2526. // IN PVOID VA
  2527. // );
  2528. //
  2529. // Routine Description:
  2530. //
  2531. // This macro takes a virtual address and determines if
  2532. // it is a page table or hyperspace address.
  2533. //
  2534. // Arguments
  2535. //
  2536. // VA - Supplies a virtual address.
  2537. //
  2538. // Return Value:
  2539. //
  2540. // TRUE if the address is a page table or hyperspace address, FALSE if not.
  2541. //
  2542. //--
  2543. #define MI_IS_PAGE_TABLE_OR_HYPER_ADDRESS(VA) \
  2544. (MI_IS_PAGE_TABLE_ADDRESS(VA) || MI_IS_HYPER_SPACE_ADDRESS(VA))
  2545. //++
  2546. //BOOLEAN
  2547. //MI_IS_HYPER_SPACE_ADDRESS (
  2548. // IN PVOID VA
  2549. // );
  2550. //
  2551. // Routine Description:
  2552. //
  2553. // This macro determines if a given virtual address resides in
  2554. // hyperspace.
  2555. //
  2556. // Arguments
  2557. //
  2558. // VA - Supplies the virtual address.
  2559. //
  2560. // Return Value:
  2561. //
  2562. // FALSE if it is not a hyperspace address, TRUE if it is.
  2563. //
  2564. //--
  2565. #define MI_IS_HYPER_SPACE_ADDRESS(VA) \
  2566. (((PVOID)VA >= HYPER_SPACE) && ((PVOID)VA <= HYPER_SPACE_END))
  2567. //++
  2568. //BOOLEAN
  2569. //MI_IS_PTE_ADDRESS (
  2570. // IN PMMPTE PTE
  2571. // );
  2572. //
  2573. // Routine Description:
  2574. //
  2575. // This macro determines if a given virtual address is really a
  2576. // page table page (PTE) address.
  2577. //
  2578. // Arguments
  2579. //
  2580. // PTE - Supplies the PTE virtual address.
  2581. //
  2582. // Return Value:
  2583. //
  2584. // FALSE if it is not a PTE address, TRUE if it is.
  2585. //
  2586. //--
  2587. #define MI_IS_PTE_ADDRESS(PTE) \
  2588. (((PTE >= (PMMPTE)PTE_UBASE) && (PTE <= (PMMPTE)PTE_UTOP)) || \
  2589. ((PTE >= (PMMPTE)PTE_KBASE) && (PTE <= (PMMPTE)PTE_KTOP)) || \
  2590. ((PTE >= (PMMPTE)PTE_SBASE) && (PTE <= (PMMPTE)PTE_STOP)))
  2591. #define MI_IS_PPE_ADDRESS(PTE) \
  2592. (((PTE >= (PMMPTE)PDE_UTBASE) && (PTE <= (PMMPTE)(PDE_UTBASE + PAGE_SIZE))) || \
  2593. ((PTE >= (PMMPTE)PDE_KTBASE) && (PTE <= (PMMPTE)(PDE_KTBASE + PAGE_SIZE))) || \
  2594. ((PTE >= (PMMPTE)PDE_STBASE) && (PTE <= (PMMPTE)(PDE_STBASE + PAGE_SIZE))))
  2595. //++
  2596. //BOOLEAN
  2597. //MI_IS_KERNEL_PTE_ADDRESS (
  2598. // IN PMMPTE PTE
  2599. // );
  2600. //
  2601. // Routine Description:
  2602. //
  2603. // This macro determines if a given virtual address is really a
  2604. // kernel page table page (PTE) address.
  2605. //
  2606. // Arguments
  2607. //
  2608. // PTE - Supplies the PTE virtual address.
  2609. //
  2610. // Return Value:
  2611. //
  2612. // FALSE if it is not a kernel PTE address, TRUE if it is.
  2613. //
  2614. //--
  2615. #define MI_IS_KERNEL_PTE_ADDRESS(PTE) \
  2616. (((PMMPTE)PTE >= (PMMPTE)PTE_KBASE) && ((PMMPTE)PTE <= (PMMPTE)PTE_KTOP))
  2617. //++
  2618. //BOOLEAN
  2619. //MI_IS_USER_PTE_ADDRESS (
  2620. // IN PMMPTE PTE
  2621. // );
  2622. //
  2623. // Routine Description:
  2624. //
  2625. // This macro determines if a given virtual address is really a
  2626. // page table page (PTE) address.
  2627. //
  2628. // Arguments
  2629. //
  2630. // PTE - Supplies the PTE virtual address.
  2631. //
  2632. // Return Value:
  2633. //
  2634. // FALSE if it is not a PTE address, TRUE if it is.
  2635. //
  2636. //--
  2637. #define MI_IS_USER_PTE_ADDRESS(PTE) \
  2638. ((PTE >= (PMMPTE)PTE_UBASE) && (PTE <= (PMMPTE)PTE_UTOP))
  2639. //++
  2640. //BOOLEAN
  2641. //MI_IS_PAGE_DIRECTORY_ADDRESS (
  2642. // IN PMMPTE PDE
  2643. // );
  2644. //
  2645. // Routine Description:
  2646. //
  2647. // This macro determines if a given virtual address is really a
  2648. // page directory page (PDE) address.
  2649. //
  2650. // Arguments
  2651. //
  2652. // PDE - Supplies the virtual address.
  2653. //
  2654. // Return Value:
  2655. //
  2656. // FALSE if it is not a PDE address, TRUE if it is.
  2657. //
  2658. //--
  2659. #define MI_IS_PAGE_DIRECTORY_ADDRESS(PDE) \
  2660. (((PDE >= (PMMPTE)PDE_UBASE) && (PDE <= (PMMPTE)PDE_UTOP)) || \
  2661. ((PDE >= (PMMPTE)PDE_KBASE) && (PDE <= (PMMPTE)PDE_KTOP)) || \
  2662. ((PDE >= (PMMPTE)PDE_SBASE) && (PDE <= (PMMPTE)PDE_STOP)))
  2663. //++
  2664. //BOOLEAN
  2665. //MI_IS_USER_PDE_ADDRESS (
  2666. // IN PMMPTE PDE
  2667. // );
  2668. //
  2669. // Routine Description:
  2670. //
  2671. // This macro determines if a given virtual address is really a
  2672. // user page directory page (PDE) address.
  2673. //
  2674. // Arguments
  2675. //
  2676. // PDE - Supplies the PDE virtual address.
  2677. //
  2678. // Return Value:
  2679. //
  2680. // FALSE if it is not a user PDE address, TRUE if it is.
  2681. //
  2682. //--
  2683. #define MI_IS_USER_PDE_ADDRESS(PDE) \
  2684. ((PDE >= (PMMPTE)PDE_UBASE) && (PDE <= (PMMPTE)PDE_UTOP))
  2685. //++
  2686. //BOOLEAN
  2687. //MI_IS_KERNEL_PDE_ADDRESS (
  2688. // IN PMMPTE PDE
  2689. // );
  2690. //
  2691. // Routine Description:
  2692. //
  2693. // This macro determines if a given virtual address is really a
  2694. // kernel page directory page (PDE) address.
  2695. //
  2696. // Arguments
  2697. //
  2698. // PDE - Supplies the PDE virtual address.
  2699. //
  2700. // Return Value:
  2701. //
  2702. // FALSE if it is not a user PDE address, TRUE if it is.
  2703. //
  2704. //--
  2705. #define MI_IS_KERNEL_PDE_ADDRESS(PDE) \
  2706. ((PDE >= (PMMPTE)PDE_KBASE) && (PDE <= (PMMPTE)PDE_KTOP))
  2707. //++
  2708. //BOOLEAN
  2709. //MI_IS_PROCESS_SPACE_ADDRESS (
  2710. // IN PVOID VA
  2711. // );
  2712. //
  2713. // Routine Description:
  2714. //
  2715. // This macro determines if a given virtual address resides in
  2716. // the per-process space.
  2717. //
  2718. // Arguments
  2719. //
  2720. // VA - Supplies the virtual address.
  2721. //
  2722. // Return Value:
  2723. //
  2724. // FALSE if it is not a per-process address, TRUE if it is.
  2725. //
  2726. //--
  2727. #define MI_IS_PROCESS_SPACE_ADDRESS(VA) (((ULONG_PTR)VA >> 61) == UREGION_INDEX)
  2728. //++
  2729. //BOOLEAN
  2730. //MI_IS_SYSTEM_ADDRESS (
  2731. // IN PVOID VA
  2732. // );
  2733. //
  2734. // Routine Description:
  2735. //
  2736. // This macro determines if a given virtual address resides in
  2737. // the system (global) space.
  2738. //
  2739. // Arguments
  2740. //
  2741. // VA - Supplies the virtual address.
  2742. //
  2743. // Return Value:
  2744. //
  2745. // FALSE if it is not a system (global) address, TRUE if it is.
  2746. //
  2747. //--
  2748. #define MI_IS_SYSTEM_ADDRESS(VA) (((ULONG_PTR)VA >> 61) == KREGION_INDEX)
  2749. //
  2750. //++
  2751. //PVOID
  2752. //KSEG0_ADDRESS (
  2753. // IN PFN_NUMBER PAGE
  2754. // );
  2755. //
  2756. // Routine Description:
  2757. //
  2758. // This macro returns a KSEG0 virtual address which maps the page.
  2759. //
  2760. // Arguments:
  2761. //
  2762. // PAGE - Supplies the physical page frame number.
  2763. //
  2764. // Return Value:
  2765. //
  2766. // The KSEG0 virtual address.
  2767. //
  2768. //--
  2769. #define KSEG0_ADDRESS(PAGE) \
  2770. (PVOID)(KSEG0_BASE | ((PAGE) << PAGE_SHIFT))
  2771. extern MMPTE ValidPpePte;
  2772. //++
  2773. //PMMPTE
  2774. //MiGetPpeAddress (
  2775. // IN PVOID va
  2776. // );
  2777. //
  2778. // Routine Description:
  2779. //
  2780. // MiGetPpeAddress returns the address of the page directory parent entry
  2781. // which maps the given virtual address. This is one level above the
  2782. // page directory.
  2783. //
  2784. // Arguments
  2785. //
  2786. // Va - Supplies the virtual address to locate the PPE for.
  2787. //
  2788. // Return Value:
  2789. //
  2790. // The address of the PPE.
  2791. //
  2792. //--
  2793. __forceinline
  2794. PMMPTE
  2795. MiGetPpeAddress(
  2796. IN PVOID Va
  2797. )
  2798. {
  2799. if ((((ULONG_PTR)(Va) & PTE_BASE) == PTE_BASE) &&
  2800. ((((ULONG_PTR)(Va)) & ~(VRN_MASK|PTE_BASE)) < (ULONG_PTR)PDE_PER_PAGE * PTE_PER_PAGE * PAGE_SIZE)) {
  2801. return (PMMPTE) (((ULONG_PTR)Va & VRN_MASK) |
  2802. (PDE_TBASE + PAGE_SIZE - sizeof(MMPTE)));
  2803. }
  2804. if (((((ULONG_PTR)(Va)) & PDE_BASE) == PDE_BASE) &&
  2805. ((((ULONG_PTR)(Va)) & ~(VRN_MASK|PDE_BASE)) < PDE_PER_PAGE * PAGE_SIZE)) {
  2806. return (PMMPTE) ((((ULONG_PTR)(Va)) & VRN_MASK) |
  2807. (PDE_TBASE + PAGE_SIZE - sizeof(MMPTE)));
  2808. }
  2809. if (((((ULONG_PTR)(Va)) & PDE_TBASE) == PDE_TBASE) &&
  2810. ((((ULONG_PTR)(Va)) & ~(VRN_MASK|PDE_TBASE)) < PAGE_SIZE)) {
  2811. return (PMMPTE) ((((ULONG_PTR)(Va)) & VRN_MASK) |
  2812. (PDE_TBASE + PAGE_SIZE - sizeof(MMPTE)));
  2813. }
  2814. return (PMMPTE) (((((ULONG_PTR)(Va)) & VRN_MASK)) |
  2815. ((((((ULONG_PTR)(Va)) >> PDI1_SHIFT) << PTE_SHIFT) &
  2816. (~(PDE_TBASE|VRN_MASK)) ) + PDE_TBASE));
  2817. }
  2818. //MiGetPdeAddress (
  2819. // IN PVOID va
  2820. // );
  2821. //
  2822. // Routine Description:
  2823. //
  2824. // MiGetPdeAddress returns the address of the PDE which maps the
  2825. // given virtual address.
  2826. //
  2827. // Arguments:
  2828. //
  2829. // Va - Supplies the virtual address to locate the PDE for.
  2830. //
  2831. // Return Value:
  2832. //
  2833. // The address of the PDE.
  2834. //
  2835. //--
  2836. __forceinline
  2837. PMMPTE
  2838. MiGetPdeAddress(
  2839. IN PVOID Va
  2840. )
  2841. {
  2842. if (((((ULONG_PTR)(Va)) & PDE_BASE) == PDE_BASE) &&
  2843. ((((ULONG_PTR)(Va)) & ~(VRN_MASK|PDE_BASE)) < PDE_PER_PAGE * PAGE_SIZE)) {
  2844. return (PMMPTE) ((((ULONG_PTR)(Va)) & VRN_MASK) |
  2845. (PDE_TBASE + PAGE_SIZE - sizeof(MMPTE)));
  2846. }
  2847. if (((((ULONG_PTR)(Va)) & PDE_TBASE) == PDE_TBASE) &&
  2848. ((((ULONG_PTR)(Va)) & ~(VRN_MASK|PDE_TBASE)) < PAGE_SIZE)) {
  2849. return (PMMPTE) ((((ULONG_PTR)(Va)) & VRN_MASK) |
  2850. (PDE_TBASE + PAGE_SIZE - sizeof(MMPTE)));
  2851. }
  2852. return (PMMPTE) (((((ULONG_PTR)(Va)) & VRN_MASK)) |
  2853. ((((((ULONG_PTR)(Va)) >> PDI_SHIFT) << PTE_SHIFT) & (~(PDE_BASE|VRN_MASK))) + PDE_BASE));
  2854. }
  2855. //++
  2856. //PMMPTE
  2857. //MiGetPteAddress (
  2858. // IN PVOID va
  2859. // );
  2860. //
  2861. // Routine Description:
  2862. //
  2863. // MiGetPteAddress returns the address of the PTE which maps the
  2864. // given virtual address.
  2865. //
  2866. // Arguments:
  2867. //
  2868. // Va - Supplies the virtual address to locate the PTE for.
  2869. //
  2870. // Return Value:
  2871. //
  2872. // The address of the PTE.
  2873. //
  2874. //--
  2875. __forceinline
  2876. PMMPTE
  2877. MiGetPteAddress(
  2878. IN PVOID Va
  2879. )
  2880. {
  2881. if (((((ULONG_PTR)(Va)) & PDE_TBASE) == PDE_TBASE) &&
  2882. ((((ULONG_PTR)(Va)) & ~(VRN_MASK|PDE_TBASE)) < PAGE_SIZE)) {
  2883. return (PMMPTE) ((((ULONG_PTR)(Va)) & VRN_MASK) |
  2884. (PDE_TBASE + PAGE_SIZE - sizeof(MMPTE)));
  2885. }
  2886. return (PMMPTE) (((((ULONG_PTR)(Va)) & VRN_MASK)) |
  2887. ((((((ULONG_PTR)(Va)) >> PTI_SHIFT) << PTE_SHIFT) & (~(PTE_BASE|VRN_MASK))) + PTE_BASE));
  2888. }
  2889. #define MI_IS_PTE_PROTOTYPE(PointerPte) (!MI_IS_USER_PTE_ADDRESS (PointerPte))
  2890. //++
  2891. //BOOLEAN
  2892. //MI_IS_SYSTEM_CACHE_ADDRESS (
  2893. // IN PVOID VA
  2894. // );
  2895. //
  2896. // Routine Description:
  2897. //
  2898. // This macro takes a virtual address and determines if
  2899. // it is a system cache address.
  2900. //
  2901. // Arguments
  2902. //
  2903. // VA - Supplies a virtual address.
  2904. //
  2905. // Return Value:
  2906. //
  2907. // TRUE if the address is in the system cache, FALSE if not.
  2908. //
  2909. //--
  2910. #define MI_IS_SYSTEM_CACHE_ADDRESS(VA) \
  2911. (((PVOID)(VA) >= (PVOID)MmSystemCacheStart && \
  2912. (PVOID)(VA) <= (PVOID)MmSystemCacheEnd))
  2913. #if defined(_MIALT4K_)
  2914. //
  2915. // Define constants and macros for the alternate 4kb table.
  2916. //
  2917. // These are constants and defines that mimic the PAGE_SIZE constant but are
  2918. // hard coded to use 4K page values.
  2919. //
  2920. #define PAGE_4K 4096
  2921. #define PAGE_4K_SHIFT 12
  2922. #define PAGE_4K_MASK (PAGE_4K - 1)
  2923. #define PAGE_4K_ALIGN(Va) ((PVOID)((ULONG_PTR)(Va) & ~(PAGE_4K - 1)))
  2924. #define ROUND_TO_4K_PAGES(Size) (((ULONG_PTR)(Size) + PAGE_4K - 1) & ~(PAGE_4K - 1))
  2925. #define PAGE_NEXT_ALIGN(Va) ((PVOID)(PAGE_ALIGN((ULONG_PTR)Va + PAGE_SIZE - 1)))
  2926. #define BYTES_TO_4K_PAGES(Size) ((ULONG)((ULONG_PTR)(Size) >> PAGE_4K_SHIFT) + \
  2927. (((ULONG)(Size) & (PAGE_4K - 1)) != 0))
  2928. //
  2929. // Relative constants between native pages and 4K pages.
  2930. //
  2931. #define SPLITS_PER_PAGE (PAGE_SIZE / PAGE_4K)
  2932. #define PAGE_SHIFT_DIFF (PAGE_SHIFT - PAGE_4K_SHIFT)
  2933. #define ALT_PTE_SHIFT 3
  2934. #define ALT_PROTECTION_MASK (MM_PTE_EXECUTE_MASK|MM_PTE_WRITE_MASK)
  2935. #define MiGetAltPteAddress(VA) \
  2936. ((PMMPTE) ((ULONG_PTR)ALT4KB_PERMISSION_TABLE_START + \
  2937. ((((ULONG_PTR) (VA)) >> PAGE_4K_SHIFT) << ALT_PTE_SHIFT)))
  2938. //
  2939. // Alternate 4k table flags.
  2940. //
  2941. #define MI_ALTFLG_FLUSH2G 0x0000000000000001
  2942. //
  2943. // MiProtectFor4kPage flags.
  2944. //
  2945. #define ALT_ALLOCATE 1
  2946. #define ALT_COMMIT 2
  2947. #define ALT_CHANGE 4
  2948. //
  2949. // ATE (Alternate PTE) protection bits.
  2950. //
  2951. #define MM_ATE_COMMIT 0x0000000000000001
  2952. #define MM_ATE_ACCESS 0x0000000000000020
  2953. #define MM_ATE_READONLY 0x0000000000000200
  2954. #define MM_ATE_EXECUTE 0x0400000000000200
  2955. #define MM_ATE_EXECUTE_READ 0x0400000000000200
  2956. #define MM_ATE_READWRITE 0x0000000000000600
  2957. #define MM_ATE_WRITECOPY 0x0020000000000200
  2958. #define MM_ATE_EXECUTE_READWRITE 0x0400000000000600
  2959. #define MM_ATE_EXECUTE_WRITECOPY 0x0420000000000400
  2960. #define MM_ATE_ZEROFILL 0x0800000000000000
  2961. #define MM_ATE_NOACCESS 0x1000000000000000
  2962. #define MM_ATE_COPY_ON_WRITE 0x2000000000000000
  2963. #define MM_ATE_PRIVATE 0x8000000000000000
  2964. #define MM_ATE_PROTO_MASK 0x0000000000000621
  2965. NTSTATUS
  2966. MmX86Fault (
  2967. IN ULONG_PTR FaultStatus,
  2968. IN PVOID VirtualAddress,
  2969. IN KPROCESSOR_MODE PreviousMode,
  2970. IN PVOID TrapInformation
  2971. );
  2972. VOID
  2973. MiSyncAltPte (
  2974. IN PVOID VirtualAddress
  2975. );
  2976. VOID
  2977. MiProtectImageFileFor4kPage (
  2978. IN PVOID VirtualAddress,
  2979. IN SIZE_T ViewSize
  2980. );
  2981. VOID
  2982. MiProtectFor4kPage (
  2983. IN PVOID Base,
  2984. IN SIZE_T Size,
  2985. IN ULONG NewProtect,
  2986. IN ULONG Flags,
  2987. IN PEPROCESS Process
  2988. );
  2989. VOID
  2990. MiProtectMapFileFor4kPage (
  2991. IN PVOID Base,
  2992. IN SIZE_T Size,
  2993. IN ULONG NewProtect,
  2994. IN SIZE_T CommitSize,
  2995. IN PMMPTE PointerPte,
  2996. IN PMMPTE LastPte,
  2997. IN PEPROCESS Process
  2998. );
  2999. VOID
  3000. MiReleaseFor4kPage (
  3001. IN PVOID StartVirtual,
  3002. IN PVOID EndVirtual,
  3003. IN PEPROCESS Process
  3004. );
  3005. VOID
  3006. MiDecommitFor4kPage (
  3007. IN PVOID StartVirtual,
  3008. IN PVOID EndVirtual,
  3009. IN PEPROCESS Process
  3010. );
  3011. VOID
  3012. MiDeleteFor4kPage (
  3013. IN PVOID StartVirtual,
  3014. IN PVOID EndVirtual,
  3015. IN PEPROCESS Process
  3016. );
  3017. VOID
  3018. MiQueryRegionFor4kPage (
  3019. IN PVOID BaseAddress,
  3020. IN PVOID EndAddress,
  3021. IN OUT PSIZE_T RegionSize,
  3022. IN OUT PULONG RegionState,
  3023. IN OUT PULONG RegionProtect,
  3024. IN PEPROCESS Process
  3025. );
  3026. ULONG
  3027. MiQueryProtectionFor4kPage (
  3028. IN PVOID BaseAddress,
  3029. IN PEPROCESS Process
  3030. );
  3031. NTSTATUS
  3032. MiInitializeAlternateTable (
  3033. IN PEPROCESS Process,
  3034. IN PVOID HighestUserAddress
  3035. );
  3036. VOID
  3037. MiDuplicateAlternateTable (
  3038. PEPROCESS CurrentProcess,
  3039. PEPROCESS ProcessToInitialize
  3040. );
  3041. VOID
  3042. MiDeleteAlternateTable (
  3043. PEPROCESS Process
  3044. );
  3045. VOID
  3046. MiLockFor4kPage (
  3047. PVOID CapturedBase,
  3048. SIZE_T CapturedRegionSize,
  3049. PEPROCESS Process
  3050. );
  3051. NTSTATUS
  3052. MiUnlockFor4kPage (
  3053. PVOID CapturedBase,
  3054. SIZE_T CapturedRegionSize,
  3055. PEPROCESS Process
  3056. );
  3057. LOGICAL
  3058. MiShouldBeUnlockedFor4kPage (
  3059. PVOID VirtualAddress,
  3060. PEPROCESS Process
  3061. );
  3062. ULONG
  3063. MiMakeProtectForNativePage (
  3064. IN PVOID VirtualAddress,
  3065. IN ULONG NewProtect,
  3066. IN PEPROCESS Process
  3067. );
  3068. LOGICAL
  3069. MiArePreceding4kPagesAllocated (
  3070. IN PVOID VirtualAddress
  3071. );
  3072. LOGICAL
  3073. MiAreFollowing4kPagesAllocated (
  3074. IN PVOID VirtualAddress
  3075. );
  3076. extern ULONG MmProtectToPteMaskForIA32[32];
  3077. extern ULONG MmProtectToPteMaskForSplit[32];
  3078. extern ULONGLONG MmProtectToAteMask[32];
  3079. #define MiMakeProtectionAteMask(NewProtect) MmProtectToAteMask[NewProtect]
  3080. #define LOCK_ALTERNATE_TABLE_UNSAFE(PWOW64) \
  3081. ASSERT (KeAreAllApcsDisabled () == TRUE); \
  3082. KeAcquireGuardedMutexUnsafe (&(PWOW64)->AlternateTableLock); \
  3083. #define UNLOCK_ALTERNATE_TABLE_UNSAFE(PWOW64) \
  3084. ASSERT (KeAreAllApcsDisabled () == TRUE); \
  3085. KeReleaseGuardedMutexUnsafe (&(PWOW64)->AlternateTableLock); \
  3086. ASSERT (KeAreAllApcsDisabled () == TRUE);
  3087. #define LOCK_ALTERNATE_TABLE(PWOW64) \
  3088. KeAcquireGuardedMutex (&(PWOW64)->AlternateTableLock)
  3089. #define UNLOCK_ALTERNATE_TABLE(PWOW64) \
  3090. KeReleaseGuardedMutex (&(PWOW64)->AlternateTableLock)
  3091. #endif
  3092. //++
  3093. //VOID
  3094. //MI_BARRIER_SYNCHRONIZE (
  3095. // IN ULONG TimeStamp
  3096. // );
  3097. //
  3098. // Routine Description:
  3099. //
  3100. // MI_BARRIER_SYNCHRONIZE compares the argument timestamp against the
  3101. // current IPI barrier sequence stamp. When equal, all processors will
  3102. // issue memory barriers to ensure that newly created pages remain coherent.
  3103. //
  3104. // When a page is put in the zeroed or free page list the current
  3105. // barrier sequence stamp is read (interlocked - this is necessary
  3106. // to get the correct value - memory barriers won't do the trick)
  3107. // and stored in the pfn entry for the page. The current barrier
  3108. // sequence stamp is maintained by the IPI send logic and is
  3109. // incremented (interlocked) when the target set of an IPI send
  3110. // includes all processors, but the one doing the send. When a page
  3111. // is needed its sequence number is compared against the current
  3112. // barrier sequence number. If it is equal, then the contents of
  3113. // the page may not be coherent on all processors, and an IPI must
  3114. // be sent to all processors to ensure a memory barrier is
  3115. // executed (generic call can be used for this). Sending the IPI
  3116. // automatically updates the barrier sequence number. The compare
  3117. // is for equality as this is the only value that requires the IPI
  3118. // (i.e., the sequence number wraps, values in both directions are
  3119. // older). When a page is removed in this fashion and either found
  3120. // to be coherent or made coherent, it cannot be modified between
  3121. // that time and writing the PTE. If the page is modified between
  3122. // these times, then an IPI must be sent.
  3123. //
  3124. // Arguments
  3125. //
  3126. // TimeStamp - Supplies the timestamp at the time when the page was zeroed.
  3127. //
  3128. // Return Value:
  3129. //
  3130. // None.
  3131. //
  3132. //--
  3133. #define MI_BARRIER_SYNCHRONIZE(TimeStamp) NOTHING
  3134. //++
  3135. //VOID
  3136. //MI_BARRIER_STAMP_ZEROED_PAGE (
  3137. // IN PULONG PointerTimeStamp
  3138. // );
  3139. //
  3140. // Routine Description:
  3141. //
  3142. // MI_BARRIER_STAMP_ZEROED_PAGE issues an interlocked read to get the
  3143. // current IPI barrier sequence stamp. This is called AFTER a page is
  3144. // zeroed.
  3145. //
  3146. // Arguments
  3147. //
  3148. // PointerTimeStamp - Supplies a timestamp pointer to fill with the
  3149. // current IPI barrier sequence stamp.
  3150. //
  3151. // Return Value:
  3152. //
  3153. // None.
  3154. //
  3155. //--
  3156. #define MI_BARRIER_STAMP_ZEROED_PAGE(PointerTimeStamp) NOTHING
  3157. //++
  3158. //VOID
  3159. //MI_FLUSH_SINGLE_SESSION_TB (
  3160. // IN PVOID Virtual
  3161. // );
  3162. //
  3163. // Routine Description:
  3164. //
  3165. // MI_FLUSH_SINGLE_SESSION_TB flushes the requested single address
  3166. // translation from the TB.
  3167. //
  3168. // Since IA64 supports ASNs and session space doesn't have one, the entire
  3169. // TB needs to be flushed.
  3170. //
  3171. // Arguments
  3172. //
  3173. // Virtual - Supplies the virtual address to invalidate.
  3174. //
  3175. // Return Value:
  3176. //
  3177. // None.
  3178. //
  3179. //--
  3180. #define MI_FLUSH_SINGLE_SESSION_TB(Virtual) \
  3181. KeFlushEntireTb (TRUE, TRUE);
  3182. //++
  3183. //VOID
  3184. //MI_FLUSH_ENTIRE_SESSION_TB (
  3185. // IN ULONG Invalid,
  3186. // IN LOGICAL AllProcessors
  3187. // );
  3188. //
  3189. // Routine Description:
  3190. //
  3191. // MI_FLUSH_ENTIRE_SESSION_TB flushes the entire TB on IA64 since
  3192. // the IA64 supports ASNs.
  3193. //
  3194. // Arguments
  3195. //
  3196. // Invalid - TRUE if invalidating.
  3197. //
  3198. // AllProcessors - TRUE if all processors need to be IPI'd.
  3199. //
  3200. // Return Value:
  3201. //
  3202. // None.
  3203. //
  3204. #define MI_FLUSH_ENTIRE_SESSION_TB(Invalid, AllProcessors) \
  3205. KeFlushEntireTb (Invalid, AllProcessors);
  3206. VOID
  3207. MiSweepCacheMachineDependent (
  3208. IN PVOID VirtualAddress,
  3209. IN SIZE_T Size,
  3210. IN ULONG CacheAttribute
  3211. );
  3212. extern LOGICAL MiMappingsInitialized;
  3213. extern BOOLEAN MiKseg0Mapping;
  3214. extern PVOID MiKseg0Start;
  3215. extern PVOID MiKseg0End;
  3216. VOID
  3217. MiEliminateDriverTrEntries (
  3218. VOID
  3219. );
  3220. LOGICAL
  3221. MiIsVirtualAddressMappedByTr (
  3222. IN PVOID VirtualAddress
  3223. );
  3224. //++
  3225. //LOGICAL
  3226. //MI_RESERVED_BITS_CANONICAL (
  3227. // IN PVOID VirtualAddress
  3228. // );
  3229. //
  3230. // Routine Description:
  3231. //
  3232. // This routine checks whether all of the reserved bits are correct.
  3233. //
  3234. // The processor implements at least 51 bits of VA (in addition to the 3
  3235. // bits of VRN) - this is greater than the 43 bits of VA decode implemented
  3236. // by memory management so the VA is checked against 43 bits to prevent
  3237. // bogus address crashes which would not be caught by the processor.
  3238. //
  3239. // Arguments
  3240. //
  3241. // VirtualAddress - Supplies the virtual address to check.
  3242. //
  3243. // Return Value:
  3244. //
  3245. // TRUE if the address is ok, FALSE if not.
  3246. //
  3247. LOGICAL
  3248. __forceinline
  3249. MI_RESERVED_BITS_CANONICAL (
  3250. IN PVOID VirtualAddress
  3251. )
  3252. {
  3253. LONG_PTR ReservedBits;
  3254. ULONG_PTR ImplVirtualMsb;
  3255. PMMPTE PointerPte;
  3256. LOGICAL ReservedBitsOn;
  3257. //
  3258. // The address must be validated as NT-canonical. Note this is different
  3259. // than being processor-canonical (which must also be done as well). Of
  3260. // course if the NT validation is stricter then it is sufficient for both.
  3261. // Note however, there are certain addresses used by memory management for
  3262. // internal purposes (ie: McKinley page table VHPT space) which are never
  3263. // made visible to any external components and are thus allowed to violate
  3264. // the NT-canonical rule because it is not possible for anyone else to
  3265. // use them and thus they cannot encode values into them. (We don't want
  3266. // anyone trying to encode unused bits because if we ever expand the
  3267. // virtual address space, they will break).
  3268. //
  3269. // NT uses 43 bits of virtual address (not including VRN bits) and Merced
  3270. // has 51 while McKinley has 61. All valid Merced addresses can be
  3271. // validated via the 43 bit NT checking. However, McKinley VHPT addresses
  3272. // begin at 0x1FF8.0000.0000.0000, so they need to be checked separately.
  3273. //
  3274. ImplVirtualMsb = 43;
  3275. ReservedBitsOn = FALSE;
  3276. if ((ULONG_PTR)VirtualAddress & ((ULONG_PTR)1 << ImplVirtualMsb)) {
  3277. //
  3278. // All the reserved bits (not including the VRN) must also be set
  3279. // unless this is a special memory management-internal address.
  3280. //
  3281. ReservedBits = (LONG_PTR) VirtualAddress | VRN_MASK;
  3282. ReservedBits >>= (ImplVirtualMsb + 1);
  3283. if (ReservedBits != (LONG_PTR)-1) {
  3284. ReservedBitsOn = TRUE;
  3285. }
  3286. }
  3287. else {
  3288. //
  3289. // All the reserved bits (not including the VRN) must also be clear
  3290. // unless this is a special memory management-internal address.
  3291. //
  3292. ReservedBits = (LONG_PTR) VirtualAddress & ~VRN_MASK;
  3293. ReservedBits >>= (ImplVirtualMsb + 1);
  3294. if (ReservedBits != 0) {
  3295. ReservedBitsOn = TRUE;
  3296. }
  3297. }
  3298. //
  3299. // Note region registers are initialized for all regions so the VRN bits
  3300. // are stripped now for speed (ie: only the region 0 PTE ranges need to
  3301. // be checked below).
  3302. //
  3303. VirtualAddress = (PVOID) ((LONG_PTR) VirtualAddress & ~VRN_MASK);
  3304. if (ReservedBitsOn == FALSE) {
  3305. //
  3306. // No reserved bits were on, ensure that the virtual address is
  3307. // okay by ensuring the PPE/PDE/PTE are within bounds.
  3308. //
  3309. PointerPte = MiGetPteAddress (VirtualAddress);
  3310. }
  3311. else {
  3312. //
  3313. // Some reserved bits are on. This better be an internal address
  3314. // (ie: the McKinley VHPT), otherwise it's a bogus address.
  3315. //
  3316. // Note the Merced VHPT is NT-canonical so the checks below are
  3317. // no-ops on that processor, but this would be an error path on
  3318. // Merced anyway so the slight overhead is not critical.
  3319. //
  3320. PointerPte = (PMMPTE) VirtualAddress;
  3321. }
  3322. //
  3323. // Because the IA64 VHPT must cover the number of virtual address bits
  3324. // implemented by the processor and it must be on a natural boundary, the
  3325. // following window exists and must be explicitly checked for here.
  3326. //
  3327. // The initial Merced implementation supports 50 bits of virtual address.
  3328. // Hence the VHPT must cover 50-PAGE_SHIFT+PTE_SHIFT == 40 bits.
  3329. //
  3330. // However, NT uses PPE_PER_PAGE+PDE_PER_PAGE+PTE_PER_PAGE+PTE_SHIFT ==
  3331. // 33 bits.
  3332. //
  3333. // This seven bit difference between what the VHPT actually covers and
  3334. // what NT actually handles is what must be explicitly checked.
  3335. //
  3336. // Depending on what the VirtualAddress really represents, the PTE below
  3337. // may really be a PPE or PDE so check for all cases.
  3338. //
  3339. if ((PointerPte >= (PMMPTE)PTE_BASE) &&
  3340. (PointerPte < (PMMPTE)(PTE_BASE + (ULONG_PTR)PDE_PER_PAGE * PTE_PER_PAGE * PAGE_SIZE))) {
  3341. return TRUE;
  3342. }
  3343. if ((PointerPte >= (PMMPTE)PDE_BASE) &&
  3344. (PointerPte < (PMMPTE)(PDE_BASE + PDE_PER_PAGE * PAGE_SIZE))) {
  3345. return TRUE;
  3346. }
  3347. if ((PointerPte >= (PMMPTE)PDE_TBASE) &&
  3348. (PointerPte < (PMMPTE)(PDE_TBASE + PAGE_SIZE))) {
  3349. return TRUE;
  3350. }
  3351. return FALSE;
  3352. }
  3353. //++
  3354. //VOID
  3355. //MI_DISPLAY_TRAP_INFORMATION (
  3356. // IN PVOID TrapInformation
  3357. // );
  3358. //
  3359. // Routine Description:
  3360. //
  3361. // Display any relevant trap information to aid debugging.
  3362. //
  3363. // Arguments
  3364. //
  3365. // TrapInformation - Supplies a pointer to a trap frame.
  3366. //
  3367. // Return Value:
  3368. //
  3369. // None.
  3370. //
  3371. #define MI_DISPLAY_TRAP_INFORMATION(TrapInformation) \
  3372. KdPrint(("MM:***IIP %p, IIPA %p\n", \
  3373. ((PKTRAP_FRAME) (TrapInformation))->StIIP, \
  3374. ((PKTRAP_FRAME) (TrapInformation))->StIIPA));