Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3031 lines
91 KiB

  1. /*++
  2. Copyright (c) 1990 Microsoft Corporation
  3. Module Name:
  4. initia64.c
  5. Abstract:
  6. This module contains the machine dependent initialization for the
  7. memory management component. It is specifically tailored to the
  8. IA64 architecture.
  9. Author:
  10. Koichi Yamada (kyamada) 9-Jan-1996
  11. Landy Wang (landyw) 2-June-1997
  12. Revision History:
  13. --*/
  14. #include "mi.h"
  15. VOID
  16. MiConvertToSuperPages (
  17. PVOID StartVirtual,
  18. PVOID EndVirtual,
  19. SIZE_T PageSize
  20. );
  21. VOID
  22. MiConvertBackToStandardPages (
  23. IN PVOID StartVirtual,
  24. IN PVOID EndVirtual
  25. );
  26. VOID
  27. MiBuildPageTableForLoaderMemory (
  28. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  29. );
  30. PVOID
  31. MiConvertToLoaderVirtual (
  32. IN PFN_NUMBER Page,
  33. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  34. );
  35. VOID
  36. MiRemoveLoaderSuperPages (
  37. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  38. );
  39. VOID
  40. MiCompactMemoryDescriptorList (
  41. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  42. );
  43. VOID
  44. MiInitializeTbImage (
  45. VOID
  46. );
  47. VOID
  48. MiAddTrEntry (
  49. ULONG_PTR BaseAddress,
  50. ULONG_PTR EndAddress
  51. );
  52. VOID
  53. MiEliminateDriverTrEntries (
  54. VOID
  55. );
  56. PVOID MiMaxWow64Pte;
  57. //
  58. // This is enabled once the memory management page table structures and TB
  59. // entries have been initialized and can be safely referenced.
  60. //
  61. LOGICAL MiMappingsInitialized = FALSE;
  62. PFN_NUMBER MmSystemParentTablePage;
  63. PFN_NUMBER MmSessionParentTablePage;
  64. REGION_MAP_INFO MmSessionMapInfo;
  65. MMPTE MiSystemSelfMappedPte;
  66. MMPTE MiUserSelfMappedPte;
  67. PVOID MiKseg0Start;
  68. PVOID MiKseg0End;
  69. PFN_NUMBER MiKseg0StartFrame;
  70. PFN_NUMBER MiKseg0EndFrame;
  71. BOOLEAN MiKseg0Mapping;
  72. PFN_NUMBER MiNtoskrnlPhysicalBase;
  73. ULONG_PTR MiNtoskrnlVirtualBase;
  74. ULONG MiNtoskrnlPageShift;
  75. MMPTE MiDefaultPpe;
  76. PFN_NUMBER MiWasteStart;
  77. PFN_NUMBER MiWasteEnd;
  78. #define _x1mb (1024*1024)
  79. #define _x1mbnp ((1024*1024) >> PAGE_SHIFT)
  80. #define _x4mb (1024*1024*4)
  81. #define _x4mbnp ((1024*1024*4) >> PAGE_SHIFT)
  82. #define _x16mb (1024*1024*16)
  83. #define _x16mbnp ((1024*1024*16) >> PAGE_SHIFT)
  84. #define _x64mb (1024*1024*64)
  85. #define _x64mbnp ((1024*1024*64) >> PAGE_SHIFT)
  86. #define _x256mb (1024*1024*256)
  87. #define _x256mbnp ((1024*1024*256) >> PAGE_SHIFT)
  88. #define _x4gb (0x100000000UI64)
  89. #define _x4gbnp (0x100000000UI64 >> PAGE_SHIFT)
  90. PMEMORY_ALLOCATION_DESCRIPTOR MiFreeDescriptor;
  91. PMEMORY_ALLOCATION_DESCRIPTOR MiFreeDescriptorLargest;
  92. PMEMORY_ALLOCATION_DESCRIPTOR MiFreeDescriptorLowMem;
  93. PMEMORY_ALLOCATION_DESCRIPTOR MiFreeDescriptorNonPaged;
  94. PFN_NUMBER MiNextPhysicalPage;
  95. PFN_NUMBER MiNumberOfPages;
  96. PFN_NUMBER MiOldFreeDescriptorBase;
  97. PFN_NUMBER MiOldFreeDescriptorCount;
  98. extern KEVENT MiImageMappingPteEvent;
  99. //
  100. // Examine the 8 icache & dcache TR entries looking for a match.
  101. // It is too bad the number of entries is hardcoded into the
  102. // loader block. Since it is this way, declare our own static array
  103. // and also assume also that the ITR and DTR entries are contiguous
  104. // and just keep walking into the DTR if a match cannot be found in the ITR.
  105. //
  106. #define NUMBER_OF_LOADER_TR_ENTRIES 8
  107. typedef struct _CACHED_FRAME_RUN {
  108. PFN_NUMBER BasePage;
  109. PFN_NUMBER LastPage;
  110. } CACHED_FRAME_RUN, *PCACHED_FRAME_RUN;
  111. CACHED_FRAME_RUN MiCachedFrames[2 * NUMBER_OF_LOADER_TR_ENTRIES];
  112. PCACHED_FRAME_RUN MiLastCachedFrame;
  113. TR_INFO MiTrInfo[2 * NUMBER_OF_LOADER_TR_ENTRIES];
  114. TR_INFO MiBootedTrInfo[2 * NUMBER_OF_LOADER_TR_ENTRIES];
  115. PTR_INFO MiLastTrEntry;
  116. extern LOGICAL MiAllDriversRelocated;
  117. PFN_NUMBER
  118. MiGetNextPhysicalPage (
  119. VOID
  120. );
  121. BOOLEAN
  122. MiEnsureAvailablePagesInFreeDescriptor (
  123. IN PFN_NUMBER Pages,
  124. IN PFN_NUMBER MaxPage
  125. );
  126. VOID
  127. MiCompactMemoryDescriptorList (
  128. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  129. );
  130. #ifdef ALLOC_PRAGMA
  131. #pragma alloc_text(INIT,MiInitMachineDependent)
  132. #pragma alloc_text(INIT,MiGetNextPhysicalPage)
  133. #pragma alloc_text(INIT,MiEnsureAvailablePagesInFreeDescriptor)
  134. #pragma alloc_text(INIT,MiBuildPageTableForLoaderMemory)
  135. #pragma alloc_text(INIT,MiConvertToLoaderVirtual)
  136. #pragma alloc_text(INIT,MiInitializeTbImage)
  137. #pragma alloc_text(INIT,MiAddTrEntry)
  138. #pragma alloc_text(INIT,MiCompactMemoryDescriptorList)
  139. #pragma alloc_text(INIT,MiRemoveLoaderSuperPages)
  140. #pragma alloc_text(INIT,MiConvertToSuperPages)
  141. #endif
  142. PFN_NUMBER
  143. MiGetNextPhysicalPage (
  144. VOID
  145. )
  146. /*++
  147. Routine Description:
  148. This function returns the next physical page number from either the
  149. largest low memory descriptor or the largest free descriptor. If there
  150. are no physical pages left, then a bugcheck is executed since the
  151. system cannot be initialized.
  152. Arguments:
  153. None.
  154. Return Value:
  155. The next physical page number.
  156. Environment:
  157. Kernel mode.
  158. --*/
  159. {
  160. //
  161. // If there are free pages left in the current descriptor, then
  162. // return the next physical page. Otherwise, attempt to switch
  163. // descriptors.
  164. //
  165. if (MiNumberOfPages != 0) {
  166. MiNumberOfPages -= 1;
  167. }
  168. else {
  169. //
  170. // If the current descriptor is not the largest free descriptor,
  171. // then switch to the next descriptor.
  172. //
  173. // The order is nonpaged, then low, then largest. Otherwise, bugcheck.
  174. //
  175. if (MiFreeDescriptor == MiFreeDescriptorLargest) {
  176. KeBugCheckEx (INSTALL_MORE_MEMORY,
  177. MmNumberOfPhysicalPages,
  178. MmLowestPhysicalPage,
  179. MmHighestPhysicalPage,
  180. 0);
  181. }
  182. if ((MiFreeDescriptor != MiFreeDescriptorLowMem) &&
  183. (MiFreeDescriptorLowMem != NULL)) {
  184. MiFreeDescriptor = MiFreeDescriptorLowMem;
  185. }
  186. else if (MiFreeDescriptorLargest != NULL) {
  187. MiFreeDescriptor = MiFreeDescriptorLargest;
  188. }
  189. else {
  190. KeBugCheckEx (INSTALL_MORE_MEMORY,
  191. MmNumberOfPhysicalPages,
  192. MmLowestPhysicalPage,
  193. MmHighestPhysicalPage,
  194. 1);
  195. }
  196. MiOldFreeDescriptorCount = MiFreeDescriptor->PageCount;
  197. MiOldFreeDescriptorBase = MiFreeDescriptor->BasePage;
  198. MiNumberOfPages = MiFreeDescriptor->PageCount - 1;
  199. MiNextPhysicalPage = MiFreeDescriptor->BasePage;
  200. }
  201. return MiNextPhysicalPage++;
  202. }
  203. BOOLEAN
  204. MiEnsureAvailablePagesInFreeDescriptor (
  205. IN PFN_NUMBER PagesDesired,
  206. IN PFN_NUMBER MaxPage
  207. )
  208. {
  209. //
  210. // The order of descriptor usage (assuming all are present) is
  211. // nonpaged, then low, then largest. Note also that low and largest
  212. // may be the same descriptor.
  213. //
  214. // If we are still in the nonpaged descriptor, then switch now as
  215. // only the low or largest will have pages in the range our caller
  216. // desires (KSEG0). This is because the nonpaged descriptor is mapped
  217. // with its own kernel TB entry.
  218. //
  219. if (MiFreeDescriptor == MiFreeDescriptorNonPaged) {
  220. //
  221. // Switch to the next descriptor as this one is unsatisfactory
  222. // for our needs.
  223. //
  224. if (MiFreeDescriptorLowMem != NULL) {
  225. MiFreeDescriptor = MiFreeDescriptorLowMem;
  226. }
  227. else if (MiFreeDescriptorLargest != NULL) {
  228. MiFreeDescriptor = MiFreeDescriptorLargest;
  229. }
  230. else {
  231. return FALSE;
  232. }
  233. MiOldFreeDescriptorCount = MiFreeDescriptor->PageCount;
  234. MiOldFreeDescriptorBase = MiFreeDescriptor->BasePage;
  235. MiNumberOfPages = MiFreeDescriptor->PageCount;
  236. MiNextPhysicalPage = MiFreeDescriptor->BasePage;
  237. }
  238. //
  239. // If there are not enough pages in the descriptor then return FALSE.
  240. //
  241. if (MiNumberOfPages < PagesDesired) {
  242. return FALSE;
  243. }
  244. //
  245. // Besides having enough pages, they must also fit our caller's
  246. // physical range.
  247. //
  248. if (MiNextPhysicalPage + PagesDesired > MaxPage) {
  249. return FALSE;
  250. }
  251. return TRUE;
  252. }
  253. VOID
  254. MiInitMachineDependent (
  255. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  256. )
  257. /*++
  258. Routine Description:
  259. This routine performs the necessary operations to enable virtual
  260. memory. This includes building the page directory page, building
  261. page table pages to map the code section, the data section, the
  262. stack section and the trap handler.
  263. It also initializes the PFN database and populates the free list.
  264. Arguments:
  265. LoaderBlock - Supplies a pointer to the firmware setup loader block.
  266. Return Value:
  267. None.
  268. Environment:
  269. Kernel mode.
  270. N.B. This routine uses memory from the loader block descriptors, but
  271. the descriptors themselves must be restored prior to return as our caller
  272. walks them to create the MmPhysicalMemoryBlock.
  273. --*/
  274. {
  275. #if 0
  276. PMMPFN BasePfn;
  277. PMMPFN TopPfn;
  278. PMMPFN BottomPfn;
  279. SIZE_T Range;
  280. #endif
  281. ULONG BasePage;
  282. ULONG PageCount;
  283. PHYSICAL_ADDRESS MaxHotPlugMemoryAddress;
  284. PFN_NUMBER i;
  285. ULONG j;
  286. PFN_NUMBER PdePageNumber;
  287. PFN_NUMBER PdePage;
  288. PFN_NUMBER PpePage;
  289. PFN_NUMBER PageFrameIndex;
  290. PFN_NUMBER NextPhysicalPage;
  291. SPFN_NUMBER PfnAllocation;
  292. SIZE_T MaxPool;
  293. PEPROCESS CurrentProcess;
  294. PFN_NUMBER MostFreePage;
  295. PFN_NUMBER MostFreeLowMem;
  296. PFN_NUMBER MostFreeNonPaged;
  297. PLIST_ENTRY NextMd;
  298. PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
  299. MMPTE TempPte;
  300. PMMPTE PointerPde;
  301. PMMPTE PointerPte;
  302. PMMPTE StartPte;
  303. PMMPTE LastPte;
  304. PMMPTE Pde;
  305. PMMPTE StartPde;
  306. PMMPTE StartPpe;
  307. PMMPTE EndPde;
  308. PMMPFN Pfn1;
  309. PMMPFN Pfn2;
  310. ULONG First;
  311. SIZE_T Kseg0Size;
  312. PFN_NUMBER Kseg0Base;
  313. PFN_NUMBER Kseg0Pages;
  314. PVOID NonPagedPoolStartVirtual;
  315. ULONG_PTR PageSize;
  316. PFN_NUMBER KernelStart;
  317. PFN_NUMBER KernelEnd;
  318. SIZE_T MaximumNonPagedPoolInBytesLimit;
  319. ULONG OriginalLowMemDescriptorBase;
  320. ULONG OriginalLowMemDescriptorCount;
  321. ULONG OriginalLargestDescriptorBase;
  322. ULONG OriginalLargestDescriptorCount;
  323. ULONG OriginalNonPagedDescriptorBase;
  324. ULONG OriginalNonPagedDescriptorCount;
  325. PVOID SystemPteStart;
  326. ULONG ReturnedLength;
  327. NTSTATUS status;
  328. PTR_INFO ItrInfo;
  329. OriginalLargestDescriptorBase = 0;
  330. OriginalLargestDescriptorCount = 0;
  331. OriginalLowMemDescriptorBase = 0;
  332. OriginalLowMemDescriptorCount = 0;
  333. OriginalNonPagedDescriptorBase = 0;
  334. OriginalNonPagedDescriptorCount = 0;
  335. MaximumNonPagedPoolInBytesLimit = 0;
  336. MostFreePage = 0;
  337. MostFreeLowMem = 0;
  338. MostFreeNonPaged = 0;
  339. Kseg0Base = 0;
  340. Kseg0Size = 0;
  341. Kseg0Pages = 0;
  342. //
  343. // Initialize some variables so they do not need to be constantly
  344. // recalculated throughout the life of the system.
  345. //
  346. MiMaxWow64Pte = (PVOID) MiGetPteAddress ((PVOID)MM_MAX_WOW64_ADDRESS);
  347. //
  348. // Initialize the kernel mapping info.
  349. //
  350. ItrInfo = &LoaderBlock->u.Ia64.ItrInfo[ITR_KERNEL_INDEX];
  351. MiNtoskrnlPhysicalBase = ItrInfo->PhysicalAddress;
  352. MiNtoskrnlVirtualBase = ItrInfo->VirtualAddress;
  353. MiNtoskrnlPageShift = ItrInfo->PageSize;
  354. KernelStart = MiNtoskrnlPhysicalBase >> PAGE_SHIFT;
  355. PageSize = (ULONG_PTR)1 << MiNtoskrnlPageShift;
  356. KernelEnd = KernelStart + (PageSize >> PAGE_SHIFT);
  357. //
  358. // Initialize MmDebugPte and MmCrashDumpPte.
  359. //
  360. MmDebugPte = MiGetPteAddress (MM_DEBUG_VA);
  361. MmCrashDumpPte = MiGetPteAddress (MM_CRASH_DUMP_VA);
  362. //
  363. // Set TempPte to ValidKernelPte for future use.
  364. //
  365. TempPte = ValidKernelPte;
  366. //
  367. // Compact the memory descriptor list from the loader.
  368. //
  369. MiCompactMemoryDescriptorList (LoaderBlock);
  370. //
  371. // Get the lower bound of the free physical memory and the
  372. // number of physical pages by walking the memory descriptor lists.
  373. //
  374. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  375. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  376. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  377. MEMORY_ALLOCATION_DESCRIPTOR,
  378. ListEntry);
  379. if ((MemoryDescriptor->MemoryType != LoaderBBTMemory) &&
  380. (MemoryDescriptor->MemoryType != LoaderSpecialMemory)) {
  381. BasePage = MemoryDescriptor->BasePage;
  382. PageCount = MemoryDescriptor->PageCount;
  383. //
  384. // This check results in /BURNMEMORY chunks not being counted.
  385. //
  386. if (MemoryDescriptor->MemoryType != LoaderBad) {
  387. MmNumberOfPhysicalPages += PageCount;
  388. }
  389. if (BasePage < MmLowestPhysicalPage) {
  390. MmLowestPhysicalPage = BasePage;
  391. }
  392. if ((MemoryDescriptor->MemoryType != LoaderFirmwarePermanent) &&
  393. (MemoryDescriptor->MemoryType != LoaderBad)) {
  394. if ((BasePage + PageCount) > MmHighestPhysicalPage) {
  395. MmHighestPhysicalPage = BasePage + PageCount -1;
  396. }
  397. }
  398. if ((MemoryDescriptor->MemoryType == LoaderFree) ||
  399. (MemoryDescriptor->MemoryType == LoaderLoadedProgram) ||
  400. (MemoryDescriptor->MemoryType == LoaderFirmwareTemporary) ||
  401. (MemoryDescriptor->MemoryType == LoaderOsloaderStack)) {
  402. if (PageCount > MostFreePage) {
  403. MostFreePage = PageCount;
  404. MiFreeDescriptorLargest = MemoryDescriptor;
  405. }
  406. if (MemoryDescriptor->BasePage < _x256mbnp) {
  407. //
  408. // This memory descriptor starts below 256mb.
  409. //
  410. if ((MostFreeLowMem < PageCount) &&
  411. (MostFreeLowMem < _x256mbnp - BasePage)) {
  412. MostFreeLowMem = _x256mbnp - BasePage;
  413. if (PageCount < MostFreeLowMem) {
  414. MostFreeLowMem = PageCount;
  415. }
  416. MiFreeDescriptorLowMem = MemoryDescriptor;
  417. }
  418. }
  419. if ((BasePage >= KernelStart) &&
  420. (BasePage < KernelEnd) &&
  421. (PageCount > MostFreeNonPaged)) {
  422. MostFreeNonPaged = PageCount;
  423. MiFreeDescriptorNonPaged = MemoryDescriptor;
  424. }
  425. }
  426. }
  427. NextMd = MemoryDescriptor->ListEntry.Flink;
  428. }
  429. if (MiFreeDescriptorLargest != NULL) {
  430. OriginalLargestDescriptorBase = MiFreeDescriptorLargest->BasePage;
  431. OriginalLargestDescriptorCount = MiFreeDescriptorLargest->PageCount;
  432. }
  433. if (MiFreeDescriptorLowMem != NULL) {
  434. OriginalLowMemDescriptorBase = MiFreeDescriptorLowMem->BasePage;
  435. OriginalLowMemDescriptorCount = MiFreeDescriptorLowMem->PageCount;
  436. }
  437. if (MiFreeDescriptorNonPaged != NULL) {
  438. OriginalNonPagedDescriptorBase = MiFreeDescriptorNonPaged->BasePage;
  439. OriginalNonPagedDescriptorCount = MiFreeDescriptorNonPaged->PageCount;
  440. }
  441. //
  442. // MmDynamicPfn may have been initialized based on the registry to
  443. // a value representing the highest physical address in gigabytes.
  444. //
  445. MmDynamicPfn *= ((1024 * 1024 * 1024) / PAGE_SIZE);
  446. //
  447. // Retrieve highest hot plug memory range from the HAL if
  448. // available and not otherwise retrieved from the registry.
  449. //
  450. if (MmDynamicPfn == 0) {
  451. status = HalQuerySystemInformation(
  452. HalQueryMaxHotPlugMemoryAddress,
  453. sizeof(PHYSICAL_ADDRESS),
  454. (PPHYSICAL_ADDRESS) &MaxHotPlugMemoryAddress,
  455. &ReturnedLength);
  456. if (NT_SUCCESS(status)) {
  457. ASSERT (ReturnedLength == sizeof(PHYSICAL_ADDRESS));
  458. MmDynamicPfn = (PFN_NUMBER) (MaxHotPlugMemoryAddress.QuadPart / PAGE_SIZE);
  459. }
  460. }
  461. if (MmDynamicPfn != 0) {
  462. MmHighestPossiblePhysicalPage = MI_DTC_MAX_PAGES - 1;
  463. if (MmDynamicPfn - 1 < MmHighestPossiblePhysicalPage) {
  464. if (MmDynamicPfn - 1 < MmHighestPhysicalPage) {
  465. MmDynamicPfn = MmHighestPhysicalPage + 1;
  466. }
  467. MmHighestPossiblePhysicalPage = MmDynamicPfn - 1;
  468. }
  469. }
  470. else {
  471. MmHighestPossiblePhysicalPage = MmHighestPhysicalPage;
  472. }
  473. //
  474. // Only machines with at least 5GB of physical memory get to use this.
  475. //
  476. if (strstr(LoaderBlock->LoadOptions, "NOLOWMEM")) {
  477. if (MmNumberOfPhysicalPages >= ((ULONGLONG)5 * 1024 * 1024 * 1024 / PAGE_SIZE)) {
  478. MiNoLowMemory = (PFN_NUMBER)((ULONGLONG)_4gb / PAGE_SIZE);
  479. }
  480. }
  481. if (MiNoLowMemory != 0) {
  482. MmMakeLowMemory = TRUE;
  483. }
  484. //
  485. // Initialize the Phase0 page allocation structures.
  486. //
  487. if (MiFreeDescriptorNonPaged != NULL) {
  488. MiFreeDescriptor = MiFreeDescriptorNonPaged;
  489. }
  490. else if (MiFreeDescriptorLowMem != NULL) {
  491. MiFreeDescriptor = MiFreeDescriptorLowMem;
  492. }
  493. else {
  494. MiFreeDescriptor = MiFreeDescriptorLargest;
  495. }
  496. MiNextPhysicalPage = MiFreeDescriptor->BasePage;
  497. MiNumberOfPages = MiFreeDescriptor->PageCount;
  498. MiOldFreeDescriptorCount = MiFreeDescriptor->PageCount;
  499. MiOldFreeDescriptorBase = MiFreeDescriptor->BasePage;
  500. MiKseg0Mapping = FALSE;
  501. //
  502. // Compute the size of the Kseg 0 space.
  503. //
  504. if (MiFreeDescriptorLowMem != NULL) {
  505. MiKseg0Mapping = TRUE;
  506. Kseg0Base = MiFreeDescriptorLowMem->BasePage;
  507. Kseg0Pages = MiFreeDescriptorLowMem->PageCount;
  508. MiKseg0Start = KSEG0_ADDRESS(Kseg0Base);
  509. if (Kseg0Base + Kseg0Pages > MM_PAGES_IN_KSEG0) {
  510. Kseg0Pages = MM_PAGES_IN_KSEG0 - Kseg0Base;
  511. }
  512. Kseg0Size = Kseg0Pages << PAGE_SHIFT;
  513. }
  514. //
  515. // Build the parent directory page table for kernel space.
  516. //
  517. PdePageNumber = (PFN_NUMBER)LoaderBlock->u.Ia64.PdrPage;
  518. MmSystemParentTablePage = MiGetNextPhysicalPage ();
  519. RtlZeroMemory (KSEG_ADDRESS(MmSystemParentTablePage), PAGE_SIZE);
  520. TempPte.u.Hard.PageFrameNumber = MmSystemParentTablePage;
  521. MiSystemSelfMappedPte = TempPte;
  522. KeFillFixedEntryTb ((PHARDWARE_PTE)&TempPte,
  523. (PVOID)PDE_KTBASE,
  524. PAGE_SHIFT,
  525. DTR_KTBASE_INDEX_TMP);
  526. //
  527. // Initialize the selfmap PPE entry in the kernel parent directory table.
  528. //
  529. PointerPte = MiGetPteAddress((PVOID)PDE_KTBASE);
  530. MI_WRITE_VALID_PTE(PointerPte, TempPte);
  531. //
  532. // Initialize the kernel image PPE entry in the parent directory table.
  533. //
  534. PointerPte = MiGetPteAddress((PVOID)PDE_KBASE);
  535. TempPte.u.Hard.PageFrameNumber = PdePageNumber;
  536. MI_WRITE_VALID_PTE(PointerPte, TempPte);
  537. //
  538. // Build the parent directory page table for user space.
  539. //
  540. NextPhysicalPage = MiGetNextPhysicalPage ();
  541. RtlZeroMemory (KSEG_ADDRESS(NextPhysicalPage), PAGE_SIZE);
  542. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  543. CurrentProcess = PsGetCurrentProcess ();
  544. INITIALIZE_DIRECTORY_TABLE_BASE (
  545. &CurrentProcess->Pcb.DirectoryTableBase[0], NextPhysicalPage);
  546. MiUserSelfMappedPte = TempPte;
  547. KeFillFixedEntryTb ((PHARDWARE_PTE)&TempPte,
  548. (PVOID)PDE_UTBASE,
  549. PAGE_SHIFT,
  550. DTR_UTBASE_INDEX_TMP);
  551. //
  552. // Initialize the selfmap PPE entry in the user parent directory table.
  553. //
  554. PointerPte = MiGetPteAddress((PVOID)PDE_UTBASE);
  555. MI_WRITE_VALID_PTE(PointerPte, TempPte);
  556. //
  557. // Build the parent directory page table for win32k (session) space.
  558. //
  559. // TS will only allocate a map for session space when each one is
  560. // actually created by smss.
  561. //
  562. // Note TS must NOT map session space into the system process.
  563. // The system process is kept Hydra-free so that trims can happen
  564. // properly and also so that renegade worker items are caught.
  565. //
  566. NextPhysicalPage = MiGetNextPhysicalPage ();
  567. RtlZeroMemory (KSEG_ADDRESS(NextPhysicalPage), PAGE_SIZE);
  568. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  569. MmSessionParentTablePage = NextPhysicalPage;
  570. INITIALIZE_DIRECTORY_TABLE_BASE
  571. (&CurrentProcess->Pcb.SessionParentBase, NextPhysicalPage);
  572. KeFillFixedEntryTb ((PHARDWARE_PTE)&TempPte,
  573. (PVOID)PDE_STBASE,
  574. PAGE_SHIFT,
  575. DTR_STBASE_INDEX);
  576. //
  577. // Initialize the selfmap PPE entry in the Hydra parent directory table.
  578. //
  579. PointerPte = MiGetPteAddress((PVOID)PDE_STBASE);
  580. MI_WRITE_VALID_PTE(PointerPte, TempPte);
  581. //
  582. // Initialize the default PPE for the unused regions.
  583. //
  584. NextPhysicalPage = MiGetNextPhysicalPage ();
  585. PointerPte = KSEG_ADDRESS(NextPhysicalPage);
  586. RtlZeroMemory ((PVOID)PointerPte, PAGE_SIZE);
  587. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  588. MiDefaultPpe = TempPte;
  589. PointerPte[MiGetPpeOffset(PDE_TBASE)] = TempPte;
  590. //
  591. // Build non-paged pool using the physical pages following the
  592. // data page in which to build the pool from. Non-paged pool grows
  593. // from the high range of the virtual address space and expands
  594. // downward.
  595. //
  596. // Initial non-paged pool is constructed so virtual addresses
  597. // are also physically contiguous.
  598. //
  599. if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) >
  600. (7 * (MmNumberOfPhysicalPages >> 3))) {
  601. //
  602. // More than 7/8 of memory allocated to nonpagedpool, reset to 0.
  603. //
  604. MmSizeOfNonPagedPoolInBytes = 0;
  605. }
  606. if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize) {
  607. //
  608. // Calculate the size of nonpaged pool.
  609. // Use the minimum size, then for every MB above 16mb add extra pages.
  610. //
  611. MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
  612. MmSizeOfNonPagedPoolInBytes +=
  613. ((MmNumberOfPhysicalPages - _x16mbnp)/_x1mbnp) *
  614. MmMinAdditionNonPagedPoolPerMb;
  615. }
  616. if (MmSizeOfNonPagedPoolInBytes > MM_MAX_INITIAL_NONPAGED_POOL) {
  617. MmSizeOfNonPagedPoolInBytes = MM_MAX_INITIAL_NONPAGED_POOL;
  618. }
  619. //
  620. // If the registry specifies a total nonpaged pool percentage cap, enforce
  621. // it here.
  622. //
  623. if (MmMaximumNonPagedPoolPercent != 0) {
  624. if (MmMaximumNonPagedPoolPercent < 5) {
  625. MmMaximumNonPagedPoolPercent = 5;
  626. }
  627. else if (MmMaximumNonPagedPoolPercent > 80) {
  628. MmMaximumNonPagedPoolPercent = 80;
  629. }
  630. //
  631. // Use the registry-expressed percentage value.
  632. //
  633. MaximumNonPagedPoolInBytesLimit =
  634. ((MmNumberOfPhysicalPages * MmMaximumNonPagedPoolPercent) / 100);
  635. MaximumNonPagedPoolInBytesLimit *= PAGE_SIZE;
  636. if (MaximumNonPagedPoolInBytesLimit < 6 * 1024 * 1024) {
  637. MaximumNonPagedPoolInBytesLimit = 6 * 1024 * 1024;
  638. }
  639. if (MmSizeOfNonPagedPoolInBytes > MaximumNonPagedPoolInBytesLimit) {
  640. MmSizeOfNonPagedPoolInBytes = MaximumNonPagedPoolInBytesLimit;
  641. }
  642. }
  643. //
  644. // Align to page size boundary.
  645. //
  646. MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
  647. //
  648. // Calculate the maximum size of pool.
  649. //
  650. if (MmMaximumNonPagedPoolInBytes == 0) {
  651. //
  652. // Calculate the size of nonpaged pool, adding extra pages for
  653. // every MB above 16mb.
  654. //
  655. MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
  656. //
  657. // Make sure enough expansion for the PFN database exists.
  658. //
  659. MmMaximumNonPagedPoolInBytes += (MmHighestPossiblePhysicalPage * sizeof(MMPFN)) & ~(PAGE_SIZE-1);
  660. MmMaximumNonPagedPoolInBytes +=
  661. ((SIZE_T)((MmNumberOfPhysicalPages - _x16mbnp)/_x1mbnp) *
  662. MmMaxAdditionNonPagedPoolPerMb);
  663. if ((MmMaximumNonPagedPoolPercent != 0) &&
  664. (MmMaximumNonPagedPoolInBytes > MaximumNonPagedPoolInBytesLimit)) {
  665. MmMaximumNonPagedPoolInBytes = MaximumNonPagedPoolInBytesLimit;
  666. }
  667. }
  668. MaxPool = MmSizeOfNonPagedPoolInBytes + PAGE_SIZE * 16 +
  669. ((MmHighestPossiblePhysicalPage * sizeof(MMPFN)) & ~(PAGE_SIZE -1));
  670. if (MmMaximumNonPagedPoolInBytes < MaxPool) {
  671. MmMaximumNonPagedPoolInBytes = MaxPool;
  672. }
  673. if (MmMaximumNonPagedPoolInBytes > MM_MAX_ADDITIONAL_NONPAGED_POOL) {
  674. MmMaximumNonPagedPoolInBytes = MM_MAX_ADDITIONAL_NONPAGED_POOL;
  675. }
  676. MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmNonPagedPoolEnd
  677. - MmMaximumNonPagedPoolInBytes);
  678. MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart);
  679. MmPageAlignedPoolBase[NonPagedPool] = MmNonPagedPoolStart;
  680. //
  681. // Calculate the starting address for the system PTE pool which is
  682. // right below the nonpaged pool.
  683. //
  684. MmNonPagedSystemStart = (PVOID)(((ULONG_PTR)MmNonPagedPoolStart -
  685. (((ULONG_PTR)MmNumberOfSystemPtes + 1) * PAGE_SIZE)) &
  686. (~PAGE_DIRECTORY2_MASK));
  687. if (MmNonPagedSystemStart < MM_LOWEST_NONPAGED_SYSTEM_START) {
  688. MmNonPagedSystemStart = MM_LOWEST_NONPAGED_SYSTEM_START;
  689. MmNumberOfSystemPtes = (ULONG)(((ULONG_PTR)MmNonPagedPoolStart -
  690. (ULONG_PTR)MmNonPagedSystemStart) >> PAGE_SHIFT)-1;
  691. ASSERT (MmNumberOfSystemPtes > 1000);
  692. }
  693. //
  694. // Snap the system PTE start address as page directories and tables
  695. // will be preallocated for this range.
  696. //
  697. SystemPteStart = (PVOID) MmNonPagedSystemStart;
  698. //
  699. // If special pool and/or the driver verifier is enabled, reserve
  700. // extra virtual address space for special pooling now. For now,
  701. // arbitrarily don't let it be larger than paged pool (128gb).
  702. //
  703. if ((MmVerifyDriverBufferLength != (ULONG)-1) ||
  704. ((MmSpecialPoolTag != 0) && (MmSpecialPoolTag != (ULONG)-1))) {
  705. if (MmNonPagedSystemStart > MM_LOWEST_NONPAGED_SYSTEM_START) {
  706. MaxPool = (ULONG_PTR)MmNonPagedSystemStart -
  707. (ULONG_PTR)MM_LOWEST_NONPAGED_SYSTEM_START;
  708. if (MaxPool > MM_MAX_PAGED_POOL) {
  709. MaxPool = MM_MAX_PAGED_POOL;
  710. }
  711. MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedSystemStart - MaxPool);
  712. MmSpecialPoolStart = MmNonPagedSystemStart;
  713. MmSpecialPoolEnd = (PVOID)((ULONG_PTR)MmNonPagedSystemStart + MaxPool);
  714. }
  715. }
  716. //
  717. // Map the hyper space page directory page into the top level parent
  718. // directory & the hyper space page table page into the page directory.
  719. // Additional page parents, directories & tables are set up later
  720. // on during individual process working set initialization.
  721. //
  722. TempPte = ValidPdePde;
  723. StartPpe = MiGetPpeAddress(HYPER_SPACE);
  724. if (StartPpe->u.Hard.Valid == 0) {
  725. ASSERT (StartPpe->u.Long == 0);
  726. NextPhysicalPage = MiGetNextPhysicalPage ();
  727. RtlZeroMemory (KSEG_ADDRESS(NextPhysicalPage), PAGE_SIZE);
  728. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  729. MI_WRITE_VALID_PTE (StartPpe, TempPte);
  730. }
  731. StartPde = MiGetPdeAddress (HYPER_SPACE);
  732. NextPhysicalPage = MiGetNextPhysicalPage ();
  733. RtlZeroMemory (KSEG_ADDRESS(NextPhysicalPage), PAGE_SIZE);
  734. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  735. MI_WRITE_VALID_PTE (StartPde, TempPte);
  736. //
  737. // Allocate page directory and page table pages for
  738. // system PTEs and nonpaged pool (but not the special pool area).
  739. //
  740. TempPte = ValidKernelPte;
  741. StartPpe = MiGetPpeAddress (SystemPteStart);
  742. StartPde = MiGetPdeAddress (SystemPteStart);
  743. EndPde = MiGetPdeAddress ((PVOID)((ULONG_PTR)MmNonPagedPoolEnd - 1));
  744. First = (StartPpe->u.Hard.Valid == 0) ? TRUE : FALSE;
  745. while (StartPde <= EndPde) {
  746. if (First == TRUE || MiIsPteOnPdeBoundary(StartPde)) {
  747. First = FALSE;
  748. StartPpe = MiGetPteAddress(StartPde);
  749. if (StartPpe->u.Hard.Valid == 0) {
  750. NextPhysicalPage = MiGetNextPhysicalPage ();
  751. RtlZeroMemory (KSEG_ADDRESS(NextPhysicalPage), PAGE_SIZE);
  752. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  753. MI_WRITE_VALID_PTE (StartPpe, TempPte);
  754. }
  755. }
  756. if (StartPde->u.Hard.Valid == 0) {
  757. NextPhysicalPage = MiGetNextPhysicalPage ();
  758. RtlZeroMemory (KSEG_ADDRESS(NextPhysicalPage), PAGE_SIZE);
  759. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  760. MI_WRITE_VALID_PTE (StartPde, TempPte);
  761. }
  762. StartPde += 1;
  763. }
  764. NonPagedPoolStartVirtual = MmNonPagedPoolStart;
  765. MiBuildPageTableForLoaderMemory (LoaderBlock);
  766. MiRemoveLoaderSuperPages (LoaderBlock);
  767. //
  768. // Remove the temporary super pages for the root page table pages,
  769. // and remap them with DTR_KTBASE_INDEX and DTR_UTBASE_INDEX.
  770. //
  771. KiFlushFixedDataTb (FALSE, (PVOID)PDE_KTBASE);
  772. KiFlushFixedDataTb (FALSE, (PVOID)PDE_UTBASE);
  773. KeFillFixedEntryTb ((PHARDWARE_PTE)&MiSystemSelfMappedPte,
  774. (PVOID)PDE_KTBASE,
  775. PAGE_SHIFT,
  776. DTR_KTBASE_INDEX);
  777. KeFillFixedEntryTb ((PHARDWARE_PTE)&MiUserSelfMappedPte,
  778. (PVOID)PDE_UTBASE,
  779. PAGE_SHIFT,
  780. DTR_UTBASE_INDEX);
  781. if (MiKseg0Mapping == TRUE) {
  782. //
  783. // Fill in the PDEs for KSEG0 space.
  784. //
  785. StartPde = MiGetPdeAddress (MiKseg0Start);
  786. MiKseg0End = (PVOID) ((PCHAR)MiKseg0Start + Kseg0Size);
  787. EndPde = MiGetPdeAddress ((PCHAR)MiKseg0End - 1);
  788. while (StartPde <= EndPde) {
  789. if (StartPde->u.Hard.Valid == 0) {
  790. NextPhysicalPage = MiGetNextPhysicalPage ();
  791. RtlZeroMemory (KSEG_ADDRESS(NextPhysicalPage), PAGE_SIZE);
  792. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  793. MI_WRITE_VALID_PTE (StartPde, TempPte);
  794. }
  795. StartPde += 1;
  796. }
  797. }
  798. MiInitializeTbImage ();
  799. MiMappingsInitialized = TRUE;
  800. //
  801. // The initial nonpaged pool is created from low memory space (first 256mb)
  802. // and uses KSEG0 mappings. If KSEG0 cannot cover the size of the initial
  803. // nonpaged pool, then allocate pages from the largest free memory
  804. // descriptor list and use regular nonpaged pool addressing.
  805. //
  806. if (MiKseg0Mapping == TRUE) {
  807. //
  808. // If MiKseg0Mapping is enabled, make sure the free descriptor
  809. // has enough pages to cover it. If not, this disables the mapping.
  810. //
  811. MiKseg0Mapping =
  812. MiEnsureAvailablePagesInFreeDescriptor (BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes),
  813. Kseg0Base + Kseg0Pages);
  814. }
  815. //
  816. // Fill in the PTEs to cover the initial nonpaged pool. The physical
  817. // page frames to cover this range were reserved earlier from the
  818. // largest low memory free descriptor. This initial allocation is both
  819. // physically and virtually contiguous.
  820. //
  821. PointerPte = MiGetPteAddress (MmNonPagedPoolStart);
  822. LastPte = MiGetPteAddress ((PCHAR)MmNonPagedPoolStart +
  823. MmSizeOfNonPagedPoolInBytes - 1);
  824. while (PointerPte <= LastPte) {
  825. NextPhysicalPage = MiGetNextPhysicalPage ();
  826. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  827. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  828. PointerPte += 1;
  829. }
  830. //
  831. // Zero the remaining PTEs (if any) for the initial nonpaged pool up to
  832. // the end of the current page table page.
  833. //
  834. while (!MiIsPteOnPdeBoundary (PointerPte)) {
  835. MI_WRITE_INVALID_PTE (PointerPte, ZeroKernelPte);
  836. PointerPte += 1;
  837. }
  838. //
  839. // Convert the starting nonpaged pool address to a KSEG0 address.
  840. //
  841. if (MiKseg0Mapping == TRUE) {
  842. //
  843. // The page table pages for these mappings were allocated above.
  844. // Now is the time to initialize them properly.
  845. //
  846. PointerPte = MiGetPteAddress (MmNonPagedPoolStart);
  847. MmNonPagedPoolStart = KSEG0_ADDRESS(PointerPte->u.Hard.PageFrameNumber);
  848. MmSubsectionBase = KSEG0_BASE;
  849. StartPte = MiGetPteAddress (MmNonPagedPoolStart);
  850. LastPte = MiGetPteAddress ((PCHAR)MmNonPagedPoolStart +
  851. MmSizeOfNonPagedPoolInBytes - 1);
  852. MiKseg0Start = MiGetVirtualAddressMappedByPte (StartPte);
  853. //
  854. // Initialize the necessary KSEG0 PTEs to map the initial nonpaged pool.
  855. //
  856. while (StartPte <= LastPte) {
  857. MI_WRITE_VALID_PTE (StartPte, *PointerPte);
  858. StartPte += 1;
  859. PointerPte += 1;
  860. }
  861. MiKseg0End = MiGetVirtualAddressMappedByPte (LastPte);
  862. }
  863. else {
  864. MiKseg0Mapping = FALSE;
  865. MmSubsectionBase = 0;
  866. }
  867. //
  868. // As only the initial nonpaged pool is mapped through superpages,
  869. // MmSubsectionTopPage is always set to zero.
  870. //
  871. MmSubsectionTopPage = 0;
  872. MmNonPagedPoolExpansionStart = (PVOID)((PCHAR)NonPagedPoolStartVirtual +
  873. MmSizeOfNonPagedPoolInBytes);
  874. MmPageAlignedPoolBase[NonPagedPool] = MmNonPagedPoolStart;
  875. //
  876. // Non-paged pages now exist, build the pool structures.
  877. //
  878. MiInitializeNonPagedPool ();
  879. //
  880. // Before nonpaged pool can be used, the PFN database must
  881. // be built. This is due to the fact that the start and end of
  882. // allocation bits for nonpaged pool are maintained in the
  883. // PFN elements for the corresponding pages.
  884. //
  885. //
  886. // Calculate the number of pages required from page zero to
  887. // the highest page.
  888. //
  889. // Allow secondary color value override from registry.
  890. //
  891. MmSecondaryColors = MmSecondaryColors >> PAGE_SHIFT;
  892. if (MmSecondaryColors == 0) {
  893. MmSecondaryColors = MM_SECONDARY_COLORS_DEFAULT;
  894. }
  895. else {
  896. //
  897. // Make sure the value is power of two and within limits.
  898. //
  899. if (((MmSecondaryColors & (MmSecondaryColors -1)) != 0) ||
  900. (MmSecondaryColors < MM_SECONDARY_COLORS_MIN) ||
  901. (MmSecondaryColors > MM_SECONDARY_COLORS_MAX)) {
  902. MmSecondaryColors = MM_SECONDARY_COLORS_DEFAULT;
  903. }
  904. }
  905. MmSecondaryColorMask = MmSecondaryColors - 1;
  906. #if defined(MI_MULTINODE)
  907. //
  908. // Determine number of bits in MmSecondayColorMask. This
  909. // is the number of bits the Node color must be shifted
  910. // by before it is included in colors.
  911. //
  912. i = MmSecondaryColorMask;
  913. MmSecondaryColorNodeShift = 0;
  914. while (i) {
  915. i >>= 1;
  916. MmSecondaryColorNodeShift += 1;
  917. }
  918. //
  919. // Adjust the number of secondary colors by the number of nodes
  920. // in the machine. The secondary color mask is NOT adjusted
  921. // as it is used to control coloring within a node. The node
  922. // color is added to the color AFTER normal color calculations
  923. // are performed.
  924. //
  925. MmSecondaryColors *= KeNumberNodes;
  926. for (i = 0; i < KeNumberNodes; i += 1) {
  927. KeNodeBlock[i]->Color = (ULONG)i;
  928. KeNodeBlock[i]->MmShiftedColor = (ULONG)(i << MmSecondaryColorNodeShift);
  929. InitializeSListHead(&KeNodeBlock[i]->DeadStackList);
  930. }
  931. #endif
  932. //
  933. // Get the number of secondary colors and add the array for tracking
  934. // secondary colors to the end of the PFN database.
  935. //
  936. PfnAllocation = 1 + ((((MmHighestPossiblePhysicalPage + 1) * sizeof(MMPFN)) +
  937. (MmSecondaryColors * sizeof(MMCOLOR_TABLES)*2))
  938. >> PAGE_SHIFT);
  939. if ((MmHighestPhysicalPage < _x4gbnp) &&
  940. (MiKseg0Mapping == TRUE) &&
  941. (MiEnsureAvailablePagesInFreeDescriptor (PfnAllocation,
  942. Kseg0Base + Kseg0Pages))) {
  943. //
  944. // Allocate the PFN database in the superpage space.
  945. //
  946. // Compute the address of the PFN by allocating the appropriate
  947. // number of pages from the end of the free descriptor.
  948. //
  949. MmPfnDatabase = (PMMPFN)KSEG0_ADDRESS (MiNextPhysicalPage);
  950. StartPte = MiGetPteAddress(MmPfnDatabase);
  951. LastPte = MiGetPteAddress((PCHAR)MmPfnDatabase + (PfnAllocation << PAGE_SHIFT) - 1);
  952. while (StartPte <= LastPte) {
  953. TempPte.u.Hard.PageFrameNumber = MiGetNextPhysicalPage();
  954. MI_WRITE_VALID_PTE(StartPte, TempPte);
  955. StartPte += 1;
  956. }
  957. RtlZeroMemory (MmPfnDatabase, PfnAllocation * PAGE_SIZE);
  958. MiKseg0End = MiGetVirtualAddressMappedByPte (LastPte);
  959. }
  960. else {
  961. MmPfnDatabase = (PMMPFN)MM_PFN_DATABASE_START;
  962. //
  963. // Go through the memory descriptors and for each physical page
  964. // make sure the PFN database has a valid PTE to map it. This allows
  965. // machines with sparse physical memory to have a minimal PFN
  966. // database.
  967. //
  968. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  969. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  970. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  971. MEMORY_ALLOCATION_DESCRIPTOR,
  972. ListEntry);
  973. if ((MemoryDescriptor->MemoryType == LoaderBBTMemory) ||
  974. (MemoryDescriptor->MemoryType == LoaderSpecialMemory) ||
  975. (MemoryDescriptor->MemoryType == LoaderFirmwarePermanent) ||
  976. (MemoryDescriptor->MemoryType == LoaderBad)) {
  977. //
  978. // If the descriptor lies within the highest PFN database entry
  979. // then create PFN pages for this range. Note the PFN entries
  980. // must be created to support \Device\PhysicalMemory.
  981. //
  982. if (MemoryDescriptor->BasePage > MmHighestPhysicalPage) {
  983. NextMd = MemoryDescriptor->ListEntry.Flink;
  984. continue;
  985. }
  986. ASSERT (MemoryDescriptor->BasePage <= MmHighestPhysicalPage);
  987. if (MemoryDescriptor->BasePage + MemoryDescriptor->PageCount >
  988. MmHighestPhysicalPage + 1) {
  989. MemoryDescriptor->PageCount = (PFN_COUNT)MmHighestPhysicalPage -
  990. MemoryDescriptor->BasePage + 1;
  991. }
  992. }
  993. PointerPte = MiGetPteAddress (MI_PFN_ELEMENT(
  994. MemoryDescriptor->BasePage));
  995. LastPte = MiGetPteAddress (((PCHAR)(MI_PFN_ELEMENT(
  996. MemoryDescriptor->BasePage +
  997. MemoryDescriptor->PageCount))) - 1);
  998. First = TRUE;
  999. while (PointerPte <= LastPte) {
  1000. if (First == TRUE || MiIsPteOnPpeBoundary(PointerPte)) {
  1001. StartPpe = MiGetPdeAddress(PointerPte);
  1002. if (StartPpe->u.Hard.Valid == 0) {
  1003. ASSERT (StartPpe->u.Long == 0);
  1004. NextPhysicalPage = MiGetNextPhysicalPage();
  1005. RtlZeroMemory(KSEG_ADDRESS(NextPhysicalPage), PAGE_SIZE);
  1006. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  1007. MI_WRITE_VALID_PTE(StartPpe, TempPte);
  1008. }
  1009. }
  1010. if ((First == TRUE) || MiIsPteOnPdeBoundary(PointerPte)) {
  1011. First = FALSE;
  1012. StartPde = MiGetPteAddress(PointerPte);
  1013. if (StartPde->u.Hard.Valid == 0) {
  1014. NextPhysicalPage = MiGetNextPhysicalPage();
  1015. RtlZeroMemory(KSEG_ADDRESS(NextPhysicalPage), PAGE_SIZE);
  1016. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  1017. MI_WRITE_VALID_PTE(StartPde, TempPte);
  1018. }
  1019. }
  1020. if (PointerPte->u.Hard.Valid == 0) {
  1021. NextPhysicalPage = MiGetNextPhysicalPage();
  1022. RtlZeroMemory(KSEG_ADDRESS(NextPhysicalPage), PAGE_SIZE);
  1023. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  1024. MI_WRITE_VALID_PTE(PointerPte, TempPte);
  1025. }
  1026. PointerPte += 1;
  1027. }
  1028. NextMd = MemoryDescriptor->ListEntry.Flink;
  1029. }
  1030. //
  1031. // Create the mapping for the secondary page color array.
  1032. //
  1033. PointerPte =
  1034. MiGetPteAddress(&MmPfnDatabase[MmHighestPossiblePhysicalPage + 1]);
  1035. LastPte =
  1036. MiGetPteAddress((PUCHAR)&MmPfnDatabase[MmHighestPossiblePhysicalPage + 1] +
  1037. (MmSecondaryColors * sizeof(MMCOLOR_TABLES)*2) - 1);
  1038. while (PointerPte <= LastPte) {
  1039. if (MiIsPteOnPpeBoundary(PointerPte)) {
  1040. StartPpe = MiGetPdeAddress(PointerPte);
  1041. if (StartPpe->u.Hard.Valid == 0) {
  1042. ASSERT (StartPpe->u.Long == 0);
  1043. NextPhysicalPage = MiGetNextPhysicalPage();
  1044. RtlZeroMemory(KSEG_ADDRESS(NextPhysicalPage), PAGE_SIZE);
  1045. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  1046. MI_WRITE_VALID_PTE(StartPpe, TempPte);
  1047. }
  1048. }
  1049. if (MiIsPteOnPdeBoundary(PointerPte)) {
  1050. StartPde = MiGetPteAddress(PointerPte);
  1051. if (StartPde->u.Hard.Valid == 0) {
  1052. NextPhysicalPage = MiGetNextPhysicalPage();
  1053. RtlZeroMemory(KSEG_ADDRESS(NextPhysicalPage), PAGE_SIZE);
  1054. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  1055. MI_WRITE_VALID_PTE(StartPde, TempPte);
  1056. }
  1057. }
  1058. if (PointerPte->u.Hard.Valid == 0) {
  1059. NextPhysicalPage = MiGetNextPhysicalPage();
  1060. RtlZeroMemory(KSEG_ADDRESS(NextPhysicalPage), PAGE_SIZE);
  1061. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  1062. MI_WRITE_VALID_PTE(PointerPte, TempPte);
  1063. }
  1064. PointerPte += 1;
  1065. }
  1066. }
  1067. if (MiKseg0Mapping == TRUE) {
  1068. //
  1069. // Try to convert to superpages.
  1070. //
  1071. MiConvertToSuperPages (MiKseg0Start, MiKseg0End, _x1mb);
  1072. MiConvertToSuperPages (MiKseg0Start, MiKseg0End, _x4mb);
  1073. MiConvertToSuperPages (MiKseg0Start, MiKseg0End, _x16mb);
  1074. MiConvertToSuperPages (MiKseg0Start, MiKseg0End, _x64mb);
  1075. MiConvertToSuperPages (MiKseg0Start, MiKseg0End, _x256mb);
  1076. PointerPte = MiGetPteAddress (MiKseg0Start);
  1077. MiKseg0StartFrame = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  1078. PointerPte = MiGetPteAddress (MiKseg0End) - 1;
  1079. MiKseg0EndFrame = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  1080. //
  1081. // Add the KSEG0 range to the translation register list.
  1082. //
  1083. MiAddTrEntry ((ULONG_PTR)MiKseg0Start, (ULONG_PTR)MiKseg0End);
  1084. MiLastCachedFrame->BasePage = MiKseg0StartFrame;
  1085. MiLastCachedFrame->LastPage = MiKseg0EndFrame + 1;
  1086. MiLastCachedFrame += 1;
  1087. }
  1088. //
  1089. // Initialize support for colored pages.
  1090. //
  1091. MmFreePagesByColor[0] = (PMMCOLOR_TABLES)
  1092. &MmPfnDatabase[MmHighestPossiblePhysicalPage + 1];
  1093. MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
  1094. //
  1095. // Make sure the PTEs are mapped.
  1096. //
  1097. if (!MI_IS_PHYSICAL_ADDRESS(MmFreePagesByColor[0])) {
  1098. PointerPte = MiGetPteAddress (&MmFreePagesByColor[0][0]);
  1099. LastPte = MiGetPteAddress (
  1100. (PVOID)((PCHAR)&MmFreePagesByColor[1][MmSecondaryColors] - 1));
  1101. while (PointerPte <= LastPte) {
  1102. if (PointerPte->u.Hard.Valid == 0) {
  1103. NextPhysicalPage = MiGetNextPhysicalPage();
  1104. RtlZeroMemory(KSEG_ADDRESS(NextPhysicalPage), PAGE_SIZE);
  1105. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  1106. MI_WRITE_VALID_PTE(PointerPte, TempPte);
  1107. }
  1108. PointerPte += 1;
  1109. }
  1110. }
  1111. for (i = 0; i < MmSecondaryColors; i += 1) {
  1112. MmFreePagesByColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST;
  1113. MmFreePagesByColor[ZeroedPageList][i].Count = 0;
  1114. MmFreePagesByColor[FreePageList][i].Flink = MM_EMPTY_LIST;
  1115. MmFreePagesByColor[FreePageList][i].Count = 0;
  1116. }
  1117. #if MM_MAXIMUM_NUMBER_OF_COLORS > 1
  1118. for (i = 0; i < MM_MAXIMUM_NUMBER_OF_COLORS; i += 1) {
  1119. MmFreePagesByPrimaryColor[ZeroedPageList][i].ListName = ZeroedPageList;
  1120. MmFreePagesByPrimaryColor[FreePageList][i].ListName = FreePageList;
  1121. MmFreePagesByPrimaryColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST;
  1122. MmFreePagesByPrimaryColor[FreePageList][i].Flink = MM_EMPTY_LIST;
  1123. MmFreePagesByPrimaryColor[ZeroedPageList][i].Blink = MM_EMPTY_LIST;
  1124. MmFreePagesByPrimaryColor[FreePageList][i].Blink = MM_EMPTY_LIST;
  1125. }
  1126. #endif
  1127. //
  1128. // Go through the page table entries for hyper space and for any page
  1129. // which is valid, update the corresponding PFN database element.
  1130. //
  1131. StartPde = MiGetPdeAddress (HYPER_SPACE);
  1132. StartPpe = MiGetPpeAddress (HYPER_SPACE);
  1133. EndPde = MiGetPdeAddress(HYPER_SPACE_END);
  1134. First = (StartPpe->u.Hard.Valid == 0) ? TRUE : FALSE;
  1135. while (StartPde <= EndPde) {
  1136. if (First == TRUE || MiIsPteOnPdeBoundary(StartPde)) {
  1137. First = FALSE;
  1138. StartPpe = MiGetPteAddress(StartPde);
  1139. if (StartPpe->u.Hard.Valid == 0) {
  1140. StartPpe += 1;
  1141. StartPde = MiGetVirtualAddressMappedByPte (StartPpe);
  1142. continue;
  1143. }
  1144. PdePage = MI_GET_PAGE_FRAME_FROM_PTE(StartPpe);
  1145. Pfn1 = MI_PFN_ELEMENT(PdePage);
  1146. Pfn1->u4.PteFrame = MmSystemParentTablePage;
  1147. Pfn1->PteAddress = StartPde;
  1148. Pfn1->u2.ShareCount += 1;
  1149. Pfn1->u3.e2.ReferenceCount = 1;
  1150. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1151. Pfn1->u3.e1.CacheAttribute = MiCached;
  1152. Pfn1->u3.e1.PageColor =
  1153. MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE(StartPpe));
  1154. }
  1155. if (StartPde->u.Hard.Valid == 1) {
  1156. PdePage = MI_GET_PAGE_FRAME_FROM_PTE(StartPde);
  1157. Pfn1 = MI_PFN_ELEMENT(PdePage);
  1158. PointerPde = MiGetPteAddress(StartPde);
  1159. Pfn1->u4.PteFrame = MI_GET_PAGE_FRAME_FROM_PTE(PointerPde);
  1160. Pfn1->PteAddress = StartPde;
  1161. Pfn1->u2.ShareCount += 1;
  1162. Pfn1->u3.e2.ReferenceCount = 1;
  1163. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1164. Pfn1->u3.e1.CacheAttribute = MiCached;
  1165. Pfn1->u3.e1.PageColor =
  1166. MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (StartPde));
  1167. PointerPte = MiGetVirtualAddressMappedByPte(StartPde);
  1168. for (j = 0 ; j < PTE_PER_PAGE; j += 1) {
  1169. if (PointerPte->u.Hard.Valid == 1) {
  1170. Pfn1->u2.ShareCount += 1;
  1171. if (PointerPte->u.Hard.PageFrameNumber <=
  1172. MmHighestPhysicalPage) {
  1173. Pfn2 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
  1174. Pfn2->u4.PteFrame = PdePage;
  1175. Pfn2->PteAddress = PointerPte;
  1176. Pfn2->u2.ShareCount += 1;
  1177. Pfn2->u3.e2.ReferenceCount = 1;
  1178. Pfn2->u3.e1.PageLocation = ActiveAndValid;
  1179. Pfn2->u3.e1.CacheAttribute = MiCached;
  1180. Pfn2->u3.e1.PageColor =
  1181. MI_GET_COLOR_FROM_SECONDARY(
  1182. MI_GET_PAGE_COLOR_FROM_PTE (
  1183. PointerPte));
  1184. }
  1185. }
  1186. PointerPte += 1;
  1187. }
  1188. }
  1189. StartPde += 1;
  1190. }
  1191. //
  1192. // Go through the page table entries for kernel space and for any page
  1193. // which is valid, update the corresponding PFN database element.
  1194. //
  1195. StartPde = MiGetPdeAddress ((PVOID)KADDRESS_BASE);
  1196. StartPpe = MiGetPpeAddress ((PVOID)KADDRESS_BASE);
  1197. EndPde = MiGetPdeAddress((PVOID)MM_SYSTEM_SPACE_END);
  1198. First = (StartPpe->u.Hard.Valid == 0) ? TRUE : FALSE;
  1199. PpePage = 0;
  1200. while (StartPde <= EndPde) {
  1201. if (First == TRUE || MiIsPteOnPdeBoundary(StartPde)) {
  1202. First = FALSE;
  1203. StartPpe = MiGetPteAddress(StartPde);
  1204. if (StartPpe->u.Hard.Valid == 0) {
  1205. StartPpe += 1;
  1206. StartPde = MiGetVirtualAddressMappedByPte (StartPpe);
  1207. continue;
  1208. }
  1209. PpePage = MI_GET_PAGE_FRAME_FROM_PTE(StartPpe);
  1210. Pfn1 = MI_PFN_ELEMENT(PpePage);
  1211. Pfn1->u4.PteFrame = MmSystemParentTablePage;
  1212. Pfn1->PteAddress = StartPpe;
  1213. Pfn1->u2.ShareCount += 1;
  1214. Pfn1->u3.e2.ReferenceCount = 1;
  1215. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1216. Pfn1->u3.e1.CacheAttribute = MiCached;
  1217. Pfn1->u3.e1.PageColor =
  1218. MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE(StartPpe));
  1219. }
  1220. if (StartPde->u.Hard.Valid == 1) {
  1221. PdePage = MI_GET_PAGE_FRAME_FROM_PTE(StartPde);
  1222. Pfn1 = MI_PFN_ELEMENT(PdePage);
  1223. PointerPde = MiGetPteAddress(StartPde);
  1224. Pfn1->u4.PteFrame = PpePage;
  1225. Pfn1->PteAddress = StartPde;
  1226. Pfn1->u2.ShareCount += 1;
  1227. Pfn1->u3.e2.ReferenceCount = 1;
  1228. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1229. Pfn1->u3.e1.CacheAttribute = MiCached;
  1230. Pfn1->u3.e1.PageColor =
  1231. MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (StartPde));
  1232. PointerPte = MiGetVirtualAddressMappedByPte(StartPde);
  1233. for (j = 0 ; j < PTE_PER_PAGE; j += 1) {
  1234. if (PointerPte->u.Hard.Valid == 1) {
  1235. Pfn1->u2.ShareCount += 1;
  1236. if (PointerPte->u.Hard.PageFrameNumber <=
  1237. MmHighestPhysicalPage) {
  1238. Pfn2 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
  1239. Pfn2->u4.PteFrame = PdePage;
  1240. Pfn2->PteAddress = PointerPte;
  1241. Pfn2->u2.ShareCount += 1;
  1242. Pfn2->u3.e2.ReferenceCount = 1;
  1243. Pfn2->u3.e1.PageLocation = ActiveAndValid;
  1244. Pfn2->u3.e1.CacheAttribute = MiCached;
  1245. Pfn2->u3.e1.PageColor =
  1246. MI_GET_COLOR_FROM_SECONDARY(
  1247. MI_GET_PAGE_COLOR_FROM_PTE (
  1248. PointerPte));
  1249. }
  1250. }
  1251. PointerPte += 1;
  1252. }
  1253. }
  1254. StartPde += 1;
  1255. }
  1256. //
  1257. // Mark the system top level page directory parent page as in use.
  1258. //
  1259. PointerPte = MiGetPteAddress((PVOID)PDE_KTBASE);
  1260. Pfn2 = MI_PFN_ELEMENT(MmSystemParentTablePage);
  1261. Pfn2->u4.PteFrame = MmSystemParentTablePage;
  1262. Pfn2->PteAddress = PointerPte;
  1263. Pfn2->u1.Event = (PVOID) CurrentProcess;
  1264. Pfn2->u2.ShareCount += 1;
  1265. Pfn2->u3.e2.ReferenceCount = 1;
  1266. Pfn2->u3.e1.PageLocation = ActiveAndValid;
  1267. Pfn2->u3.e1.CacheAttribute = MiCached;
  1268. Pfn2->u3.e1.PageColor =
  1269. MI_GET_COLOR_FROM_SECONDARY(MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
  1270. //
  1271. // Temporarily mark the user top level page directory parent page as in use
  1272. // so this page will not be put in the free list.
  1273. //
  1274. PointerPte = MiGetPteAddress((PVOID)PDE_UTBASE);
  1275. Pfn2 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
  1276. Pfn2->u4.PteFrame = PointerPte->u.Hard.PageFrameNumber;
  1277. Pfn2->PteAddress = PointerPte;
  1278. Pfn2->u1.Event = NULL;
  1279. Pfn2->u2.ShareCount += 1;
  1280. Pfn2->u3.e2.ReferenceCount = 1;
  1281. Pfn2->u3.e1.PageLocation = ActiveAndValid;
  1282. Pfn2->u3.e1.CacheAttribute = MiCached;
  1283. Pfn2->u3.e1.PageColor =
  1284. MI_GET_COLOR_FROM_SECONDARY(MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
  1285. //
  1286. // Mark the region 1 session top level page directory parent page as in use.
  1287. // This page will never be freed.
  1288. //
  1289. PointerPte = MiGetPteAddress((PVOID)PDE_STBASE);
  1290. Pfn2 = MI_PFN_ELEMENT(MmSessionParentTablePage);
  1291. Pfn2->u4.PteFrame = MmSessionParentTablePage;
  1292. Pfn2->PteAddress = PointerPte;
  1293. Pfn2->u2.ShareCount += 1;
  1294. Pfn2->u3.e2.ReferenceCount = 1;
  1295. Pfn2->u3.e1.PageLocation = ActiveAndValid;
  1296. Pfn2->u3.e1.CacheAttribute = MiCached;
  1297. Pfn2->u3.e1.PageColor =
  1298. MI_GET_COLOR_FROM_SECONDARY(MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
  1299. //
  1300. // Mark the default PPE table page as in use so that this page will never
  1301. // be used.
  1302. //
  1303. PageFrameIndex = MiDefaultPpe.u.Hard.PageFrameNumber;
  1304. PointerPte = KSEG_ADDRESS(PageFrameIndex);
  1305. Pfn2 = MI_PFN_ELEMENT(PageFrameIndex);
  1306. Pfn2->u4.PteFrame = PageFrameIndex;
  1307. Pfn2->PteAddress = PointerPte;
  1308. Pfn2->u1.Event = (PVOID) CurrentProcess;
  1309. Pfn2->u2.ShareCount += 1;
  1310. Pfn2->u3.e2.ReferenceCount = 1;
  1311. Pfn2->u3.e1.PageLocation = ActiveAndValid;
  1312. Pfn2->u3.e1.CacheAttribute = MiCached;
  1313. Pfn2->u3.e1.PageColor =
  1314. MI_GET_COLOR_FROM_SECONDARY(MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
  1315. //
  1316. // If page zero is still unused, mark it as in use. This is
  1317. // temporary as we want to find bugs where a physical page
  1318. // is specified as zero.
  1319. //
  1320. Pfn1 = &MmPfnDatabase[MmLowestPhysicalPage];
  1321. if (Pfn1->u3.e2.ReferenceCount == 0) {
  1322. //
  1323. // Make the reference count non-zero and point it into a
  1324. // page directory.
  1325. //
  1326. Pde = MiGetPdeAddress ((PVOID)(KADDRESS_BASE + 0xb0000000));
  1327. PdePage = MI_GET_PAGE_FRAME_FROM_PTE(Pde);
  1328. Pfn1->u4.PteFrame = PdePageNumber;
  1329. Pfn1->PteAddress = Pde;
  1330. Pfn1->u2.ShareCount += 1;
  1331. Pfn1->u3.e2.ReferenceCount = 1;
  1332. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1333. Pfn1->u3.e1.CacheAttribute = MiCached;
  1334. Pfn1->u3.e1.PageColor = MI_GET_COLOR_FROM_SECONDARY(
  1335. MI_GET_PAGE_COLOR_FROM_PTE (Pde));
  1336. }
  1337. // end of temporary set to physical page zero.
  1338. //
  1339. //
  1340. // Walk through the memory descriptors and add pages to the
  1341. // free list in the PFN database.
  1342. //
  1343. MiFreeDescriptor->PageCount -=
  1344. (PFN_COUNT)(MiNextPhysicalPage - MiOldFreeDescriptorBase);
  1345. //
  1346. // Until BasePage (arc.h) is changed to PFN_NUMBER, NextPhysicalPage
  1347. // needs (ULONG) cast.
  1348. //
  1349. MiFreeDescriptor->BasePage = (ULONG)MiNextPhysicalPage;
  1350. //
  1351. // Make unused pages inside the kernel super page mapping unusable
  1352. // so that no one can try to map it for uncached access as this would
  1353. // cause a fatal processor error.
  1354. //
  1355. // In practice, no pages are actually wasted because all of the ones
  1356. // in the 16mb kernel superpage have been put to use during the startup
  1357. // code already.
  1358. //
  1359. if (MiFreeDescriptorNonPaged != NULL) {
  1360. if (MiFreeDescriptorNonPaged->BasePage > KernelEnd) {
  1361. MiWasteStart = KernelEnd;
  1362. MiWasteEnd = KernelEnd;
  1363. }
  1364. else if ((MiFreeDescriptorNonPaged->BasePage +
  1365. MiFreeDescriptorNonPaged->PageCount) > KernelEnd) {
  1366. MiWasteStart = MiFreeDescriptorNonPaged->BasePage;
  1367. MiWasteEnd = KernelEnd;
  1368. MiFreeDescriptorNonPaged->PageCount -=
  1369. (PFN_COUNT) (KernelEnd - MiFreeDescriptorNonPaged->BasePage);
  1370. MiFreeDescriptorNonPaged->BasePage = (ULONG) KernelEnd;
  1371. }
  1372. else if (MiFreeDescriptorNonPaged->PageCount != 0) {
  1373. MiWasteStart = MiFreeDescriptorNonPaged->BasePage;
  1374. MiWasteEnd = MiWasteStart + MiFreeDescriptorNonPaged->PageCount;
  1375. MiFreeDescriptorNonPaged->PageCount = 0;
  1376. }
  1377. }
  1378. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  1379. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  1380. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  1381. MEMORY_ALLOCATION_DESCRIPTOR,
  1382. ListEntry);
  1383. i = MemoryDescriptor->PageCount;
  1384. NextPhysicalPage = MemoryDescriptor->BasePage;
  1385. switch (MemoryDescriptor->MemoryType) {
  1386. case LoaderBad:
  1387. if (MemoryDescriptor->BasePage + i > MmHighestPhysicalPage + 1) {
  1388. i = 0;
  1389. }
  1390. else if (MemoryDescriptor->BasePage <= MmHighestPhysicalPage) {
  1391. i = MmHighestPhysicalPage + 1 - MemoryDescriptor->BasePage;
  1392. }
  1393. while (i != 0) {
  1394. MiInsertPageInList (&MmBadPageListHead, NextPhysicalPage);
  1395. i -= 1;
  1396. NextPhysicalPage += 1;
  1397. }
  1398. break;
  1399. case LoaderFree:
  1400. case LoaderLoadedProgram:
  1401. case LoaderFirmwareTemporary:
  1402. case LoaderOsloaderStack:
  1403. Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage);
  1404. while (i != 0) {
  1405. if (Pfn1->u3.e2.ReferenceCount == 0) {
  1406. //
  1407. // Set the PTE address to the physical page for
  1408. // virtual address alignment checking.
  1409. //
  1410. Pfn1->PteAddress = KSEG_ADDRESS (NextPhysicalPage);
  1411. Pfn1->u3.e1.CacheAttribute = MiCached;
  1412. MiDetermineNode(NextPhysicalPage, Pfn1);
  1413. MiInsertPageInFreeList (NextPhysicalPage);
  1414. }
  1415. Pfn1 += 1;
  1416. i -= 1;
  1417. NextPhysicalPage += 1;
  1418. }
  1419. break;
  1420. case LoaderSpecialMemory:
  1421. case LoaderBBTMemory:
  1422. case LoaderFirmwarePermanent:
  1423. //
  1424. // If the descriptor lies within the highest PFN database entry
  1425. // then create PFN pages for this range. Note the PFN entries
  1426. // must be created to support \Device\PhysicalMemory.
  1427. //
  1428. if (MemoryDescriptor->BasePage > MmHighestPhysicalPage) {
  1429. break;
  1430. }
  1431. if (MemoryDescriptor->BasePage + MemoryDescriptor->PageCount >
  1432. MmHighestPhysicalPage + 1) {
  1433. MemoryDescriptor->PageCount = (PFN_COUNT)MmHighestPhysicalPage -
  1434. MemoryDescriptor->BasePage + 1;
  1435. i = MemoryDescriptor->PageCount;
  1436. }
  1437. //
  1438. // Fall through as these pages must be marked in use as they
  1439. // lie within the PFN limits and may be accessed through
  1440. // \Device\PhysicalMemory.
  1441. default:
  1442. PointerPte = KSEG_ADDRESS(NextPhysicalPage);
  1443. Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage);
  1444. while (i != 0) {
  1445. //
  1446. // Set page as in use.
  1447. //
  1448. if (Pfn1->u3.e2.ReferenceCount == 0) {
  1449. Pfn1->u4.PteFrame = PdePageNumber;
  1450. Pfn1->PteAddress = PointerPte;
  1451. Pfn1->u2.ShareCount += 1;
  1452. Pfn1->u3.e2.ReferenceCount = 1;
  1453. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1454. Pfn1->u3.e1.CacheAttribute = MiCached;
  1455. Pfn1->u3.e1.PageColor = MI_GET_COLOR_FROM_SECONDARY(
  1456. MI_GET_PAGE_COLOR_FROM_PTE (
  1457. PointerPte));
  1458. if (MemoryDescriptor->MemoryType == LoaderXIPRom) {
  1459. Pfn1->u1.Flink = 0;
  1460. Pfn1->u2.ShareCount = 0;
  1461. Pfn1->u3.e2.ReferenceCount = 0;
  1462. Pfn1->u3.e1.PageLocation = 0;
  1463. Pfn1->u3.e1.Rom = 1;
  1464. Pfn1->u4.InPageError = 0;
  1465. Pfn1->u3.e1.PrototypePte = 1;
  1466. }
  1467. }
  1468. Pfn1 += 1;
  1469. i -= 1;
  1470. NextPhysicalPage += 1;
  1471. PointerPte += 1;
  1472. }
  1473. break;
  1474. }
  1475. NextMd = MemoryDescriptor->ListEntry.Flink;
  1476. }
  1477. if (MI_IS_PHYSICAL_ADDRESS(MmPfnDatabase) == FALSE) {
  1478. //
  1479. // Indicate that the PFN database is allocated in NonPaged pool.
  1480. //
  1481. PointerPte = MiGetPteAddress (&MmPfnDatabase[MmLowestPhysicalPage]);
  1482. Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
  1483. Pfn1->u3.e1.StartOfAllocation = 1;
  1484. //
  1485. // Set the end of the allocation.
  1486. //
  1487. PointerPte = MiGetPteAddress (&MmPfnDatabase[MmHighestPhysicalPage]);
  1488. Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
  1489. Pfn1->u3.e1.EndOfAllocation = 1;
  1490. }
  1491. else {
  1492. //
  1493. // The PFN database is allocated in the superpage space
  1494. //
  1495. // Mark all PFN entries for the PFN pages in use.
  1496. //
  1497. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (MmPfnDatabase);
  1498. Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
  1499. do {
  1500. Pfn1->PteAddress = KSEG_ADDRESS(PageFrameIndex);
  1501. Pfn1->u3.e1.PageColor = 0;
  1502. Pfn1->u3.e2.ReferenceCount = 1;
  1503. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1504. Pfn1->u3.e1.CacheAttribute = MiCached;
  1505. PageFrameIndex += 1;
  1506. Pfn1 += 1;
  1507. PfnAllocation -= 1;
  1508. } while (PfnAllocation != 0);
  1509. //
  1510. // To avoid creating WB/UC/WC aliasing problem, we should not scan
  1511. // and add free pages to the free list.
  1512. //
  1513. #if 0
  1514. // Scan the PFN database backward for pages that are completely zero.
  1515. // These pages are unused and can be added to the free list
  1516. //
  1517. BottomPfn = MI_PFN_ELEMENT(MmHighestPhysicalPage);
  1518. do {
  1519. //
  1520. // Compute the address of the start of the page that is next
  1521. // lower in memory and scan backwards until that page address
  1522. // is reached or just crossed.
  1523. //
  1524. if (((ULONG_PTR)BottomPfn & (PAGE_SIZE - 1)) != 0) {
  1525. BasePfn = (PMMPFN)((ULONG_PTR)BottomPfn & ~(PAGE_SIZE - 1));
  1526. TopPfn = BottomPfn + 1;
  1527. }
  1528. else {
  1529. BasePfn = (PMMPFN)((ULONG_PTR)BottomPfn - PAGE_SIZE);
  1530. TopPfn = BottomPfn;
  1531. }
  1532. while (BottomPfn > BasePfn) {
  1533. BottomPfn -= 1;
  1534. }
  1535. //
  1536. // If the entire range over which the PFN entries span is
  1537. // completely zero and the PFN entry that maps the page is
  1538. // not in the range, then add the page to the appropriate
  1539. // free list.
  1540. //
  1541. Range = (ULONG_PTR)TopPfn - (ULONG_PTR)BottomPfn;
  1542. if (RtlCompareMemoryUlong((PVOID)BottomPfn, Range, 0) == Range) {
  1543. //
  1544. // Set the PTE address to the physical page for virtual
  1545. // address alignment checking.
  1546. //
  1547. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (BasePfn);
  1548. Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
  1549. ASSERT (Pfn1->u3.e2.ReferenceCount == 1);
  1550. ASSERT (Pfn1->PteAddress == KSEG_ADDRESS(PageFrameIndex));
  1551. Pfn1->u3.e2.ReferenceCount = 0;
  1552. PfnAllocation += 1;
  1553. Pfn1->PteAddress = (PMMPTE)((ULONG_PTR)PageFrameIndex << PTE_SHIFT);
  1554. Pfn1->u3.e1.PageColor = 0;
  1555. MiInsertPageInFreeList (PageFrameIndex);
  1556. }
  1557. } while (BottomPfn > MmPfnDatabase);
  1558. #endif
  1559. }
  1560. //
  1561. // Initialize the nonpaged pool.
  1562. //
  1563. InitializePool (NonPagedPool, 0);
  1564. //
  1565. // Initialize the nonpaged available PTEs for mapping I/O space
  1566. // and kernel stacks.
  1567. //
  1568. PointerPte = MiGetPteAddress (SystemPteStart);
  1569. ASSERT (((ULONG_PTR)PointerPte & (PAGE_SIZE - 1)) == 0);
  1570. MmNumberOfSystemPtes = (ULONG)(MiGetPteAddress(NonPagedPoolStartVirtual) - PointerPte - 1);
  1571. MiInitializeSystemPtes (PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
  1572. //
  1573. // Initialize memory management structures for the system process.
  1574. //
  1575. // Set the address of the first and last reserved PTE in hyper space.
  1576. //
  1577. MmFirstReservedMappingPte = MiGetPteAddress (FIRST_MAPPING_PTE);
  1578. MmLastReservedMappingPte = MiGetPteAddress (LAST_MAPPING_PTE);
  1579. //
  1580. // Create zeroing PTEs for the zero page thread.
  1581. //
  1582. MiFirstReservedZeroingPte = MiReserveSystemPtes (NUMBER_OF_ZEROING_PTES + 1,
  1583. SystemPteSpace);
  1584. RtlZeroMemory (MiFirstReservedZeroingPte,
  1585. (NUMBER_OF_ZEROING_PTES + 1) * sizeof(MMPTE));
  1586. //
  1587. // Use the page frame number field of the first PTE as an
  1588. // offset into the available zeroing PTEs.
  1589. //
  1590. MiFirstReservedZeroingPte->u.Hard.PageFrameNumber = NUMBER_OF_ZEROING_PTES;
  1591. //
  1592. // Create the VAD bitmap for this process.
  1593. //
  1594. PointerPte = MiGetPteAddress (VAD_BITMAP_SPACE);
  1595. PageFrameIndex = MiRemoveAnyPage (0);
  1596. //
  1597. // Note the global bit must be off for the bitmap data.
  1598. //
  1599. TempPte = ValidPdePde;
  1600. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1601. *PointerPte = TempPte;
  1602. //
  1603. // Point to the page we just created and zero it.
  1604. //
  1605. RtlZeroMemory (VAD_BITMAP_SPACE, PAGE_SIZE);
  1606. MiLastVadBit = (ULONG)((((ULONG_PTR) MI_64K_ALIGN (MM_HIGHEST_VAD_ADDRESS))) / X64K);
  1607. if (MiLastVadBit > PAGE_SIZE * 8 - 1) {
  1608. MiLastVadBit = PAGE_SIZE * 8 - 1;
  1609. }
  1610. //
  1611. // The PFN element for the page directory parent will be initialized
  1612. // a second time when the process address space is initialized. Therefore,
  1613. // the share count and the reference count must be set to zero.
  1614. //
  1615. Pfn1 = MI_PFN_ELEMENT(MI_GET_PAGE_FRAME_FROM_PTE((PMMPTE)PDE_SELFMAP));
  1616. Pfn1->u2.ShareCount = 0;
  1617. Pfn1->u3.e2.ReferenceCount = 0;
  1618. //
  1619. // The PFN element for the hyper space page directory page will be
  1620. // initialized a second time when the process address space is initialized.
  1621. // Therefore, the share count and the reference count must be set to zero.
  1622. //
  1623. PointerPte = MiGetPpeAddress(HYPER_SPACE);
  1624. Pfn1 = MI_PFN_ELEMENT(MI_GET_PAGE_FRAME_FROM_PTE(PointerPte));
  1625. Pfn1->u2.ShareCount = 0;
  1626. Pfn1->u3.e2.ReferenceCount = 0;
  1627. //
  1628. // The PFN elements for the hyper space page table page and working set list
  1629. // page will be initialized a second time when the process address space
  1630. // is initialized. Therefore, the share count and the reference must be
  1631. // set to zero.
  1632. //
  1633. StartPde = MiGetPdeAddress(HYPER_SPACE);
  1634. Pfn1 = MI_PFN_ELEMENT(MI_GET_PAGE_FRAME_FROM_PTE(StartPde));
  1635. Pfn1->u2.ShareCount = 0;
  1636. Pfn1->u3.e2.ReferenceCount = 0;
  1637. KeInitializeEvent (&MiImageMappingPteEvent,
  1638. NotificationEvent,
  1639. FALSE);
  1640. //
  1641. // Initialize this process's memory management structures including
  1642. // the working set list.
  1643. //
  1644. //
  1645. // The PFN element for the page directory has already been initialized,
  1646. // zero the reference count and the share count so they won't be
  1647. // wrong.
  1648. //
  1649. Pfn1 = MI_PFN_ELEMENT (PdePageNumber);
  1650. Pfn1->u2.ShareCount = 0;
  1651. Pfn1->u3.e2.ReferenceCount = 0;
  1652. //
  1653. // Get a page for the working set list and map it into the page
  1654. // directory at the page after hyperspace.
  1655. //
  1656. PageFrameIndex = MiRemoveAnyPage (0);
  1657. CurrentProcess->WorkingSetPage = PageFrameIndex;
  1658. TempPte = ValidPdePde;
  1659. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1660. PointerPte = MiGetPteAddress (MmWorkingSetList);
  1661. *PointerPte = TempPte;
  1662. RtlZeroMemory (KSEG_ADDRESS(PageFrameIndex), PAGE_SIZE);
  1663. CurrentProcess->Vm.MaximumWorkingSetSize = (ULONG)MmSystemProcessWorkingSetMax;
  1664. CurrentProcess->Vm.MinimumWorkingSetSize = (ULONG)MmSystemProcessWorkingSetMin;
  1665. MmSessionMapInfo.RegionId = START_SESSION_RID;
  1666. MmSessionMapInfo.SequenceNumber = START_SEQUENCE;
  1667. KeAttachSessionSpace (&MmSessionMapInfo, MmSessionParentTablePage);
  1668. MmInitializeProcessAddressSpace (CurrentProcess, NULL, NULL, NULL);
  1669. KeFlushCurrentTb ();
  1670. //
  1671. // Restore the loader block memory descriptors to their original contents
  1672. // as our caller relies on it.
  1673. //
  1674. if (MiFreeDescriptorLargest != NULL) {
  1675. MiFreeDescriptorLargest->BasePage = OriginalLargestDescriptorBase;
  1676. MiFreeDescriptorLargest->PageCount = OriginalLargestDescriptorCount;
  1677. }
  1678. if (MiFreeDescriptorLowMem != NULL) {
  1679. MiFreeDescriptorLowMem->BasePage = OriginalLowMemDescriptorBase;
  1680. MiFreeDescriptorLowMem->PageCount = OriginalLowMemDescriptorCount;
  1681. }
  1682. if (MiFreeDescriptorNonPaged != NULL) {
  1683. MiFreeDescriptorNonPaged->BasePage = OriginalNonPagedDescriptorBase;
  1684. MiFreeDescriptorNonPaged->PageCount = OriginalNonPagedDescriptorCount;
  1685. }
  1686. return;
  1687. }
  1688. PVOID
  1689. MiGetKSegAddress (
  1690. IN PFN_NUMBER FrameNumber
  1691. )
  1692. /*++
  1693. Routine Description:
  1694. This function returns the KSEG3 address which maps the given physical page.
  1695. Arguments:
  1696. FrameNumber - Supplies the physical page number to get the KSEG3 address for
  1697. Return Value:
  1698. Virtual address mapped in KSEG3 space
  1699. --*/
  1700. {
  1701. PVOID Virtual;
  1702. ASSERT (FrameNumber <= MmHighestPhysicalPage);
  1703. Virtual = ((PVOID)(KSEG3_BASE | ((ULONG_PTR)(FrameNumber) << PAGE_SHIFT)));
  1704. return Virtual;
  1705. }
  1706. VOID
  1707. MiConvertToSuperPages (
  1708. IN PVOID StartVirtual,
  1709. IN PVOID EndVirtual,
  1710. IN SIZE_T SuperPageSize
  1711. )
  1712. /*++
  1713. Routine Description:
  1714. This function makes contiguous non-paged memory use super pages rather than
  1715. using page tables.
  1716. Arguments:
  1717. StartVirtual - Supplies the start address of the region of pages to be
  1718. mapped by super pages.
  1719. EndVirtual - Supplies the end address of the region of pages to be mapped
  1720. by super pages.
  1721. SuperPageSize - Supplies the page size to be used by the super page.
  1722. SuperPageShift - Supplies the page shift count to be used by the super page.
  1723. Return Value:
  1724. None.
  1725. --*/
  1726. {
  1727. ULONG_PTR VirtualAddress;
  1728. ULONG_PTR i;
  1729. ULONG_PTR NumberOfPtes;
  1730. PMMPTE StartPte;
  1731. ULONG_PTR SuperPageShift;
  1732. //
  1733. // Start the superpage mapping on a natural boundary, rounding up the
  1734. // argument address to one if need be.
  1735. //
  1736. VirtualAddress = (ULONG_PTR) PAGE_ALIGN (StartVirtual);
  1737. VirtualAddress = MI_ROUND_TO_SIZE (VirtualAddress, SuperPageSize);
  1738. StartPte = MiGetPteAddress ((PVOID)VirtualAddress);
  1739. NumberOfPtes = SuperPageSize >> PAGE_SHIFT;
  1740. //
  1741. // Calculate the shift needed to span the super page size.
  1742. //
  1743. i = SuperPageSize;
  1744. SuperPageShift = 0;
  1745. while (i != 0x1) {
  1746. i = i >> 1;
  1747. SuperPageShift += 1;
  1748. }
  1749. while (VirtualAddress + SuperPageSize <= (ULONG_PTR)EndVirtual) {
  1750. for (i = 0; i < NumberOfPtes; i += 1) {
  1751. StartPte->u.Hard.Valid = 0;
  1752. StartPte->u.Large.LargePage = 1;
  1753. StartPte->u.Large.PageSize = SuperPageShift;
  1754. StartPte += 1;
  1755. }
  1756. VirtualAddress += SuperPageSize;
  1757. }
  1758. }
  1759. VOID
  1760. MiConvertBackToStandardPages (
  1761. IN PVOID StartVirtual,
  1762. IN PVOID EndVirtual
  1763. )
  1764. /*++
  1765. Routine Description:
  1766. This function disables the use of the super pages.
  1767. Arguments:
  1768. StartVirtual - Supplies the start address of the region of pages to disable
  1769. super pages.
  1770. EndVirtual - Supplies the end address of the region of pages to disable
  1771. super pages.
  1772. Return Value:
  1773. None.
  1774. --*/
  1775. {
  1776. PMMPTE StartPte;
  1777. PMMPTE EndPte;
  1778. MMPTE TempPte;
  1779. StartPte = MiGetPteAddress (StartVirtual);
  1780. EndPte = MiGetPteAddress (EndVirtual);
  1781. while (StartPte <= EndPte) {
  1782. TempPte = *StartPte;
  1783. TempPte.u.Large.LargePage = 0;
  1784. TempPte.u.Large.PageSize = 0;
  1785. TempPte.u.Hard.Valid = 1;
  1786. MI_WRITE_VALID_PTE (StartPte, TempPte);
  1787. StartPte += 1;
  1788. }
  1789. }
  1790. VOID
  1791. MiSweepCacheMachineDependent (
  1792. IN PVOID VirtualAddress,
  1793. IN SIZE_T Size,
  1794. IN ULONG InputAttribute
  1795. )
  1796. /*++
  1797. Routine Description:
  1798. This function checks and performs appropriate cache flushing operations.
  1799. Arguments:
  1800. StartVirtual - Supplies the start address of the region of pages.
  1801. Size - Supplies the size of the region in pages.
  1802. CacheAttribute - Supplies the new cache attribute.
  1803. Return Value:
  1804. None.
  1805. --*/
  1806. {
  1807. PFN_NUMBER NumberOfPages;
  1808. PMMPTE PointerPte;
  1809. MMPTE TempPte;
  1810. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  1811. CacheAttribute = (MI_PFN_CACHE_ATTRIBUTE) InputAttribute;
  1812. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (VirtualAddress, Size);
  1813. VirtualAddress = PAGE_ALIGN(VirtualAddress);
  1814. Size = NumberOfPages * PAGE_SIZE;
  1815. KeSweepCacheRangeWithDrain (TRUE, VirtualAddress, (ULONG)Size);
  1816. if (CacheAttribute == MiWriteCombined) {
  1817. PointerPte = MiGetPteAddress(VirtualAddress);
  1818. while (NumberOfPages != 0) {
  1819. TempPte = *PointerPte;
  1820. MI_SET_PTE_WRITE_COMBINE2 (TempPte);
  1821. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1822. PointerPte += 1;
  1823. NumberOfPages -= 1;
  1824. }
  1825. }
  1826. }
  1827. PVOID
  1828. MiConvertToLoaderVirtual (
  1829. IN PFN_NUMBER Page,
  1830. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  1831. )
  1832. {
  1833. ULONG_PTR PageAddress;
  1834. PTR_INFO ItrInfo;
  1835. PageAddress = Page << PAGE_SHIFT;
  1836. ItrInfo = &LoaderBlock->u.Ia64.ItrInfo[0];
  1837. if ((PageAddress >= ItrInfo[ITR_KERNEL_INDEX].PhysicalAddress) &&
  1838. (PageAddress <= ItrInfo[ITR_KERNEL_INDEX].PhysicalAddress +
  1839. ((ULONG_PTR)1 << ItrInfo[ITR_KERNEL_INDEX].PageSize))) {
  1840. return (PVOID)(ItrInfo[ITR_KERNEL_INDEX].VirtualAddress +
  1841. (PageAddress - ItrInfo[ITR_KERNEL_INDEX].PhysicalAddress));
  1842. }
  1843. else if ((PageAddress >= ItrInfo[ITR_DRIVER0_INDEX].PhysicalAddress) &&
  1844. (PageAddress <= ItrInfo[ITR_DRIVER0_INDEX].PhysicalAddress +
  1845. ((ULONG_PTR)1 << ItrInfo[ITR_DRIVER0_INDEX].PageSize))) {
  1846. return (PVOID)(ItrInfo[ITR_DRIVER0_INDEX].VirtualAddress +
  1847. (PageAddress - ItrInfo[ITR_DRIVER0_INDEX].PhysicalAddress));
  1848. }
  1849. else if ((PageAddress >= ItrInfo[ITR_DRIVER1_INDEX].PhysicalAddress) &&
  1850. (PageAddress <= ItrInfo[ITR_DRIVER1_INDEX].PhysicalAddress +
  1851. ((ULONG_PTR)1 << ItrInfo[ITR_DRIVER1_INDEX].PageSize))) {
  1852. return (PVOID)(ItrInfo[ITR_DRIVER1_INDEX].VirtualAddress +
  1853. (PageAddress - ItrInfo[ITR_DRIVER1_INDEX].PhysicalAddress));
  1854. }
  1855. else {
  1856. KeBugCheckEx (MEMORY_MANAGEMENT,
  1857. 0x01010101,
  1858. PageAddress,
  1859. (ULONG_PTR)&ItrInfo[0],
  1860. (ULONG_PTR)LoaderBlock);
  1861. }
  1862. }
  1863. VOID
  1864. MiBuildPageTableForLoaderMemory (
  1865. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  1866. )
  1867. /*++
  1868. Routine Description:
  1869. This function builds page tables for loader loaded drivers and loader
  1870. allocated memory.
  1871. Arguments:
  1872. LoaderBlock - Supplies the address of the loader block.
  1873. Return Value:
  1874. None.
  1875. --*/
  1876. {
  1877. PMMPTE StartPte;
  1878. PMMPTE EndPte;
  1879. PMMPTE StartPde;
  1880. PMMPTE StartPpe;
  1881. MMPTE TempPte;
  1882. MMPTE TempPte2;
  1883. ULONG First;
  1884. PLIST_ENTRY NextEntry;
  1885. PFN_NUMBER NextPhysicalPage;
  1886. PVOID Va;
  1887. PFN_NUMBER PfnNumber;
  1888. PTR_INFO DtrInfo;
  1889. PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
  1890. TempPte = ValidKernelPte;
  1891. NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
  1892. for ( ; NextEntry != &LoaderBlock->MemoryDescriptorListHead; NextEntry = NextEntry->Flink) {
  1893. MemoryDescriptor = CONTAINING_RECORD(NextEntry,
  1894. MEMORY_ALLOCATION_DESCRIPTOR,
  1895. ListEntry);
  1896. if ((MemoryDescriptor->MemoryType == LoaderOsloaderHeap) ||
  1897. (MemoryDescriptor->MemoryType == LoaderRegistryData) ||
  1898. (MemoryDescriptor->MemoryType == LoaderNlsData) ||
  1899. (MemoryDescriptor->MemoryType == LoaderStartupDpcStack) ||
  1900. (MemoryDescriptor->MemoryType == LoaderStartupKernelStack) ||
  1901. (MemoryDescriptor->MemoryType == LoaderStartupPanicStack) ||
  1902. (MemoryDescriptor->MemoryType == LoaderStartupPdrPage) ||
  1903. (MemoryDescriptor->MemoryType == LoaderMemoryData)) {
  1904. TempPte.u.Hard.Execute = 0;
  1905. }
  1906. else if ((MemoryDescriptor->MemoryType == LoaderSystemCode) ||
  1907. (MemoryDescriptor->MemoryType == LoaderHalCode) ||
  1908. (MemoryDescriptor->MemoryType == LoaderBootDriver) ||
  1909. (MemoryDescriptor->MemoryType == LoaderStartupDpcStack)) {
  1910. TempPte.u.Hard.Execute = 1;
  1911. }
  1912. else {
  1913. continue;
  1914. }
  1915. PfnNumber = MemoryDescriptor->BasePage;
  1916. Va = MiConvertToLoaderVirtual (MemoryDescriptor->BasePage, LoaderBlock);
  1917. StartPte = MiGetPteAddress (Va);
  1918. EndPte = StartPte + MemoryDescriptor->PageCount;
  1919. First = TRUE;
  1920. while (StartPte < EndPte) {
  1921. if (First == TRUE || MiIsPteOnPpeBoundary(StartPte)) {
  1922. StartPpe = MiGetPdeAddress(StartPte);
  1923. if (StartPpe->u.Hard.Valid == 0) {
  1924. ASSERT (StartPpe->u.Long == 0);
  1925. NextPhysicalPage = MiGetNextPhysicalPage ();
  1926. RtlZeroMemory (KSEG_ADDRESS(NextPhysicalPage), PAGE_SIZE);
  1927. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  1928. MI_WRITE_VALID_PTE (StartPpe, TempPte);
  1929. }
  1930. }
  1931. if ((First == TRUE) || MiIsPteOnPdeBoundary(StartPte)) {
  1932. First = FALSE;
  1933. StartPde = MiGetPteAddress (StartPte);
  1934. if (StartPde->u.Hard.Valid == 0) {
  1935. NextPhysicalPage = MiGetNextPhysicalPage ();
  1936. RtlZeroMemory (KSEG_ADDRESS(NextPhysicalPage), PAGE_SIZE);
  1937. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  1938. MI_WRITE_VALID_PTE (StartPde, TempPte);
  1939. }
  1940. }
  1941. TempPte.u.Hard.PageFrameNumber = PfnNumber;
  1942. MI_WRITE_VALID_PTE (StartPte, TempPte);
  1943. StartPte += 1;
  1944. PfnNumber += 1;
  1945. Va = (PVOID)((ULONG_PTR)Va + PAGE_SIZE);
  1946. }
  1947. }
  1948. //
  1949. // Build a mapping for the I/O port space with caching disabled.
  1950. //
  1951. DtrInfo = &LoaderBlock->u.Ia64.DtrInfo[DTR_IO_PORT_INDEX];
  1952. Va = (PVOID) DtrInfo->VirtualAddress;
  1953. PfnNumber = (DtrInfo->PhysicalAddress >> PAGE_SHIFT);
  1954. StartPte = MiGetPteAddress (Va);
  1955. EndPte = MiGetPteAddress (
  1956. (PVOID) ((ULONG_PTR)Va + ((ULONG_PTR)1 << DtrInfo->PageSize) - 1));
  1957. TempPte2 = ValidKernelPte;
  1958. MI_DISABLE_CACHING (TempPte2);
  1959. First = TRUE;
  1960. while (StartPte <= EndPte) {
  1961. if (First == TRUE || MiIsPteOnPpeBoundary (StartPte)) {
  1962. StartPpe = MiGetPdeAddress(StartPte);
  1963. if (StartPpe->u.Hard.Valid == 0) {
  1964. ASSERT (StartPpe->u.Long == 0);
  1965. NextPhysicalPage = MiGetNextPhysicalPage ();
  1966. RtlZeroMemory (KSEG_ADDRESS(NextPhysicalPage), PAGE_SIZE);
  1967. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  1968. MI_WRITE_VALID_PTE (StartPpe, TempPte);
  1969. }
  1970. }
  1971. if ((First == TRUE) || MiIsPteOnPdeBoundary (StartPte)) {
  1972. First = FALSE;
  1973. StartPde = MiGetPteAddress (StartPte);
  1974. if (StartPde->u.Hard.Valid == 0) {
  1975. NextPhysicalPage = MiGetNextPhysicalPage ();
  1976. RtlZeroMemory (KSEG_ADDRESS(NextPhysicalPage), PAGE_SIZE);
  1977. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  1978. MI_WRITE_VALID_PTE (StartPde, TempPte);
  1979. }
  1980. }
  1981. TempPte2.u.Hard.PageFrameNumber = PfnNumber;
  1982. MI_WRITE_VALID_PTE (StartPte, TempPte2);
  1983. StartPte += 1;
  1984. PfnNumber += 1;
  1985. }
  1986. }
  1987. VOID
  1988. MiRemoveLoaderSuperPages (
  1989. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  1990. )
  1991. {
  1992. //
  1993. // Remove the super page fixed TB entries used for the boot drivers.
  1994. //
  1995. KiFlushFixedInstTb(FALSE, LoaderBlock->u.Ia64.ItrInfo[ITR_DRIVER0_INDEX].VirtualAddress);
  1996. KiFlushFixedInstTb(FALSE, LoaderBlock->u.Ia64.ItrInfo[ITR_DRIVER1_INDEX].VirtualAddress);
  1997. KiFlushFixedDataTb(FALSE, LoaderBlock->u.Ia64.DtrInfo[DTR_DRIVER0_INDEX].VirtualAddress);
  1998. KiFlushFixedDataTb(FALSE, LoaderBlock->u.Ia64.DtrInfo[DTR_DRIVER1_INDEX].VirtualAddress);
  1999. KiFlushFixedDataTb(FALSE, LoaderBlock->u.Ia64.DtrInfo[DTR_IO_PORT_INDEX].VirtualAddress);
  2000. }
  2001. VOID
  2002. MiCompactMemoryDescriptorList (
  2003. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  2004. )
  2005. {
  2006. PFN_NUMBER KernelStart;
  2007. PFN_NUMBER KernelEnd;
  2008. ULONG_PTR PageSize;
  2009. PLIST_ENTRY NextEntry;
  2010. PLIST_ENTRY PreviousEntry;
  2011. PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
  2012. PMEMORY_ALLOCATION_DESCRIPTOR PreviousMemoryDescriptor;
  2013. KernelStart = MiNtoskrnlPhysicalBase >> PAGE_SHIFT;
  2014. PageSize = (ULONG_PTR)1 << MiNtoskrnlPageShift;
  2015. KernelEnd = KernelStart + (PageSize >> PAGE_SHIFT);
  2016. PreviousMemoryDescriptor = NULL;
  2017. PreviousEntry = NULL;
  2018. NextEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
  2019. for ( ; NextEntry != &LoaderBlock->MemoryDescriptorListHead; NextEntry = NextEntry->Flink) {
  2020. MemoryDescriptor = CONTAINING_RECORD(NextEntry,
  2021. MEMORY_ALLOCATION_DESCRIPTOR,
  2022. ListEntry);
  2023. if ((MemoryDescriptor->BasePage >= KernelStart) &&
  2024. (MemoryDescriptor->BasePage + MemoryDescriptor->PageCount <= KernelEnd)) {
  2025. if (MemoryDescriptor->MemoryType == LoaderSystemBlock) {
  2026. MemoryDescriptor->MemoryType = LoaderFirmwareTemporary;
  2027. }
  2028. else if (MemoryDescriptor->MemoryType == LoaderSpecialMemory) {
  2029. MemoryDescriptor->MemoryType = LoaderFirmwareTemporary;
  2030. }
  2031. }
  2032. if ((PreviousMemoryDescriptor != NULL) &&
  2033. (MemoryDescriptor->MemoryType == PreviousMemoryDescriptor->MemoryType) &&
  2034. (MemoryDescriptor->BasePage ==
  2035. (PreviousMemoryDescriptor->BasePage + PreviousMemoryDescriptor->PageCount))) {
  2036. PreviousMemoryDescriptor->PageCount += MemoryDescriptor->PageCount;
  2037. RemoveEntryList (NextEntry);
  2038. }
  2039. else {
  2040. PreviousMemoryDescriptor = MemoryDescriptor;
  2041. PreviousEntry = NextEntry;
  2042. }
  2043. }
  2044. }
  2045. VOID
  2046. MiInitializeTbImage (
  2047. VOID
  2048. )
  2049. /*++
  2050. Routine Description:
  2051. Initialize the software map of the translation register mappings wired
  2052. into the TB by the loader.
  2053. Arguments:
  2054. None.
  2055. Return Value:
  2056. None.
  2057. Environment:
  2058. Kernel mode, Phase 0 INIT only so no locks needed.
  2059. --*/
  2060. {
  2061. ULONG PageSize;
  2062. ULONG_PTR TranslationLength;
  2063. ULONG_PTR BaseAddress;
  2064. ULONG_PTR EndAddress;
  2065. PTR_INFO TranslationRegisterEntry;
  2066. PTR_INFO AliasTranslationRegisterEntry;
  2067. PTR_INFO LastTranslationRegisterEntry;
  2068. MiLastCachedFrame = MiCachedFrames;
  2069. //
  2070. // Snap the boot TRs.
  2071. //
  2072. RtlCopyMemory (&MiBootedTrInfo[0],
  2073. &KeLoaderBlock->u.Ia64.ItrInfo[0],
  2074. NUMBER_OF_LOADER_TR_ENTRIES * sizeof (TR_INFO));
  2075. RtlCopyMemory (&MiBootedTrInfo[NUMBER_OF_LOADER_TR_ENTRIES],
  2076. &KeLoaderBlock->u.Ia64.DtrInfo[0],
  2077. NUMBER_OF_LOADER_TR_ENTRIES * sizeof (TR_INFO));
  2078. //
  2079. // Capture information regarding the translation register entry that
  2080. // maps the kernel.
  2081. //
  2082. LastTranslationRegisterEntry = MiTrInfo;
  2083. TranslationRegisterEntry = &KeLoaderBlock->u.Ia64.ItrInfo[ITR_KERNEL_INDEX];
  2084. AliasTranslationRegisterEntry = TranslationRegisterEntry + NUMBER_OF_LOADER_TR_ENTRIES;
  2085. ASSERT (TranslationRegisterEntry->PageSize != 0);
  2086. ASSERT (TranslationRegisterEntry->PageSize == AliasTranslationRegisterEntry->PageSize);
  2087. ASSERT (TranslationRegisterEntry->VirtualAddress == AliasTranslationRegisterEntry->VirtualAddress);
  2088. ASSERT (TranslationRegisterEntry->PhysicalAddress == AliasTranslationRegisterEntry->PhysicalAddress);
  2089. *LastTranslationRegisterEntry = *TranslationRegisterEntry;
  2090. //
  2091. // Calculate the ending address for each range to speed up
  2092. // subsequent searches.
  2093. //
  2094. PageSize = TranslationRegisterEntry->PageSize;
  2095. ASSERT (PageSize != 0);
  2096. BaseAddress = TranslationRegisterEntry->VirtualAddress;
  2097. TranslationLength = 1 << PageSize;
  2098. MiLastCachedFrame->BasePage = MI_VA_TO_PAGE (TranslationRegisterEntry->PhysicalAddress);
  2099. MiLastCachedFrame->LastPage = MiLastCachedFrame->BasePage + BYTES_TO_PAGES (TranslationLength);
  2100. MiLastCachedFrame += 1;
  2101. EndAddress = BaseAddress + TranslationLength;
  2102. LastTranslationRegisterEntry->PhysicalAddress = EndAddress;
  2103. MiLastTrEntry = LastTranslationRegisterEntry + 1;
  2104. //
  2105. // Add in the KSEG3 range.
  2106. //
  2107. MiAddTrEntry (KSEG3_BASE, KSEG3_LIMIT);
  2108. //
  2109. // Add in the PCR range.
  2110. //
  2111. MiAddTrEntry ((ULONG_PTR)PCR, (ULONG_PTR)PCR + PAGE_SIZE);
  2112. return;
  2113. }
  2114. VOID
  2115. MiAddTrEntry (
  2116. ULONG_PTR BaseAddress,
  2117. ULONG_PTR EndAddress
  2118. )
  2119. /*++
  2120. Routine Description:
  2121. Add a translation cache entry to our software table.
  2122. Arguments:
  2123. BaseAddress - Supplies the starting virtual address of the range.
  2124. EndAddress - Supplies the ending virtual address of the range.
  2125. Return Value:
  2126. None.
  2127. Environment:
  2128. Kernel mode, Phase 0 INIT only so no locks needed.
  2129. --*/
  2130. {
  2131. PTR_INFO TranslationRegisterEntry;
  2132. if ((MiLastTrEntry == NULL) ||
  2133. (MiLastTrEntry == MiTrInfo + NUMBER_OF_LOADER_TR_ENTRIES)) {
  2134. //
  2135. // This should never happen.
  2136. //
  2137. KeBugCheckEx (MEMORY_MANAGEMENT,
  2138. 0x02020202,
  2139. (ULONG_PTR) MiTrInfo,
  2140. (ULONG_PTR) MiLastTrEntry,
  2141. NUMBER_OF_LOADER_TR_ENTRIES);
  2142. }
  2143. TranslationRegisterEntry = MiLastTrEntry;
  2144. TranslationRegisterEntry->VirtualAddress = (ULONGLONG) BaseAddress;
  2145. TranslationRegisterEntry->PhysicalAddress = (ULONGLONG) EndAddress;
  2146. TranslationRegisterEntry->PageSize = 1;
  2147. MiLastTrEntry += 1;
  2148. return;
  2149. }
  2150. LOGICAL
  2151. MiIsVirtualAddressMappedByTr (
  2152. IN PVOID VirtualAddress
  2153. )
  2154. /*++
  2155. Routine Description:
  2156. For a given virtual address this function returns TRUE if no page fault
  2157. will occur for a read operation on the address, FALSE otherwise.
  2158. Note that after this routine was called, if appropriate locks are not
  2159. held, a non-faulting address could fault.
  2160. Arguments:
  2161. VirtualAddress - Supplies the virtual address to check.
  2162. Return Value:
  2163. TRUE if no page fault would be generated reading the virtual address,
  2164. FALSE otherwise.
  2165. Environment:
  2166. Kernel mode.
  2167. --*/
  2168. {
  2169. ULONG i;
  2170. ULONG start;
  2171. ULONG PageSize;
  2172. PMMPFN Pfn1;
  2173. PFN_NUMBER BasePage;
  2174. PFN_NUMBER PageCount;
  2175. PTR_INFO TranslationRegisterEntry;
  2176. ULONG_PTR TranslationLength;
  2177. ULONG_PTR BaseAddress;
  2178. ULONG_PTR EndAddress;
  2179. PFN_NUMBER PageFrameIndex;
  2180. PLIST_ENTRY NextMd;
  2181. PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
  2182. if ((VirtualAddress >= (PVOID)KSEG3_BASE) && (VirtualAddress < (PVOID)KSEG3_LIMIT)) {
  2183. //
  2184. // Bound this with the actual physical pages so that a busted
  2185. // debugger access can't tube the machine. Note only pages
  2186. // with attributes of fully cached should be accessed this way
  2187. // to avoid corrupting the TB.
  2188. //
  2189. // N.B. You cannot use the line below as on IA64 this translates
  2190. // into a direct TB query (tpa) and this address has not been
  2191. // validated against the actual PFNs. Instead, convert it manually
  2192. // and then validate it.
  2193. //
  2194. // PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (VirtualAddress);
  2195. //
  2196. PageFrameIndex = (ULONG_PTR)VirtualAddress - KSEG3_BASE;
  2197. PageFrameIndex = MI_VA_TO_PAGE (PageFrameIndex);
  2198. if (MmPhysicalMemoryBlock != NULL) {
  2199. start = 0;
  2200. do {
  2201. PageCount = MmPhysicalMemoryBlock->Run[start].PageCount;
  2202. if (PageCount != 0) {
  2203. BasePage = MmPhysicalMemoryBlock->Run[start].BasePage;
  2204. if ((PageFrameIndex >= BasePage) &&
  2205. (PageFrameIndex < BasePage + PageCount)) {
  2206. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  2207. if ((Pfn1->u3.e1.CacheAttribute == MiCached) ||
  2208. (Pfn1->u3.e1.CacheAttribute == MiNotMapped)) {
  2209. return TRUE;
  2210. }
  2211. return FALSE;
  2212. }
  2213. }
  2214. start += 1;
  2215. } while (start != MmPhysicalMemoryBlock->NumberOfRuns);
  2216. return FALSE;
  2217. }
  2218. //
  2219. // Walk loader blocks as it's all we have.
  2220. //
  2221. NextMd = KeLoaderBlock->MemoryDescriptorListHead.Flink;
  2222. while (NextMd != &KeLoaderBlock->MemoryDescriptorListHead) {
  2223. MemoryDescriptor = CONTAINING_RECORD (NextMd,
  2224. MEMORY_ALLOCATION_DESCRIPTOR,
  2225. ListEntry);
  2226. BasePage = MemoryDescriptor->BasePage;
  2227. PageCount = MemoryDescriptor->PageCount;
  2228. if ((PageFrameIndex >= BasePage) &&
  2229. (PageFrameIndex < BasePage + PageCount)) {
  2230. //
  2231. // Changes to the memory type requirements below need
  2232. // to be done carefully as the debugger may not only
  2233. // accidentally try to read this range, it may try
  2234. // to write it !
  2235. //
  2236. switch (MemoryDescriptor->MemoryType) {
  2237. case LoaderFree:
  2238. case LoaderLoadedProgram:
  2239. case LoaderFirmwareTemporary:
  2240. case LoaderOsloaderStack:
  2241. return TRUE;
  2242. }
  2243. return FALSE;
  2244. }
  2245. NextMd = MemoryDescriptor->ListEntry.Flink;
  2246. }
  2247. return FALSE;
  2248. }
  2249. if (MiMappingsInitialized == FALSE) {
  2250. TranslationRegisterEntry = &KeLoaderBlock->u.Ia64.ItrInfo[0];
  2251. }
  2252. else {
  2253. TranslationRegisterEntry = &MiTrInfo[0];
  2254. }
  2255. //
  2256. // Examine the 8 icache & dcache TR entries looking for a match.
  2257. // It is too bad this the number of entries is hardcoded into the
  2258. // loader block. Since it is this way, assume also that the ITR
  2259. // and DTR entries are contiguous and just keep walking into the DTR
  2260. // if a match cannot be found in the ITR.
  2261. //
  2262. for (i = 0; i < 2 * NUMBER_OF_LOADER_TR_ENTRIES; i += 1) {
  2263. PageSize = TranslationRegisterEntry->PageSize;
  2264. if (PageSize != 0) {
  2265. BaseAddress = TranslationRegisterEntry->VirtualAddress;
  2266. //
  2267. // Convert PageSize (really the power of 2 to use) into the
  2268. // correct byte length the translation maps. Note that the MiTrInfo
  2269. // is already converted.
  2270. //
  2271. if (MiMappingsInitialized == FALSE) {
  2272. TranslationLength = 1;
  2273. while (PageSize != 0) {
  2274. TranslationLength = TranslationLength << 1;
  2275. PageSize -= 1;
  2276. }
  2277. EndAddress = BaseAddress + TranslationLength;
  2278. }
  2279. else {
  2280. EndAddress = TranslationRegisterEntry->PhysicalAddress;
  2281. }
  2282. if ((VirtualAddress >= (PVOID) BaseAddress) &&
  2283. (VirtualAddress < (PVOID) EndAddress)) {
  2284. return TRUE;
  2285. }
  2286. }
  2287. TranslationRegisterEntry += 1;
  2288. if (TranslationRegisterEntry == MiLastTrEntry) {
  2289. break;
  2290. }
  2291. }
  2292. return FALSE;
  2293. }
  2294. LOGICAL
  2295. MiPageFrameIndexMustBeCached (
  2296. IN PFN_NUMBER PageFrameIndex
  2297. )
  2298. {
  2299. PCACHED_FRAME_RUN CachedFrame;
  2300. CachedFrame = MiCachedFrames;
  2301. while (CachedFrame < MiLastCachedFrame) {
  2302. if ((PageFrameIndex >= CachedFrame->BasePage) &&
  2303. (PageFrameIndex < CachedFrame->LastPage)) {
  2304. return TRUE;
  2305. }
  2306. CachedFrame += 1;
  2307. }
  2308. return FALSE;
  2309. }