Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2443 lines
74 KiB

  1. /*++
  2. Copyright (c) 1990 Microsoft Corporation
  3. Module Name:
  4. initamd.c
  5. Abstract:
  6. This module contains the machine dependent initialization for the
  7. memory management component. It is specifically tailored to the
  8. AMD64 architecture.
  9. Author:
  10. Landy Wang (landyw) 08-Apr-2000
  11. Revision History:
  12. --*/
  13. #include "mi.h"
  14. PFN_NUMBER
  15. MxGetNextPage (
  16. IN PFN_NUMBER PagesNeeded
  17. );
  18. PFN_NUMBER
  19. MxPagesAvailable (
  20. VOID
  21. );
  22. VOID
  23. MxConvertToLargePage (
  24. IN PVOID VirtualAddress,
  25. IN PVOID EndVirtualAddress
  26. );
  27. VOID
  28. MxPopulatePageDirectories (
  29. IN PMMPTE StartPde,
  30. IN PMMPTE EndPde
  31. );
  32. #ifdef ALLOC_PRAGMA
  33. #pragma alloc_text(INIT,MiInitMachineDependent)
  34. #pragma alloc_text(INIT,MxGetNextPage)
  35. #pragma alloc_text(INIT,MxPagesAvailable)
  36. #pragma alloc_text(INIT,MxConvertToLargePage)
  37. #pragma alloc_text(INIT,MiReportPhysicalMemory)
  38. #pragma alloc_text(INIT,MxPopulatePageDirectories)
  39. #endif
  40. #define _1mbInPages (0x100000 >> PAGE_SHIFT)
  41. #define _4gbInPages (0x100000000 >> PAGE_SHIFT)
  42. #define MM_BIOS_START (0xA0000 >> PAGE_SHIFT)
  43. #define MM_BIOS_END (0xFFFFF >> PAGE_SHIFT)
  44. #define MM_LARGE_PAGE_MINIMUM ((255*1024*1024) >> PAGE_SHIFT)
  45. #ifdef ALLOC_PRAGMA
  46. #pragma alloc_text(INIT,MiInitMachineDependent)
  47. #pragma alloc_text(INIT,MiRemoveLowPages)
  48. #endif
  49. extern KEVENT MiImageMappingPteEvent;
  50. #define MI_LOWMEM_MAGIC_BIT (0x80000000)
  51. //
  52. // Local data.
  53. //
  54. #ifdef ALLOC_DATA_PRAGMA
  55. #pragma data_seg("INITDATA")
  56. #endif
  57. PFN_NUMBER MxPfnAllocation;
  58. PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
  59. #ifdef ALLOC_DATA_PRAGMA
  60. #pragma data_seg()
  61. #endif
  62. MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
  63. typedef struct _MI_LARGE_VA_RANGES {
  64. PVOID VirtualAddress;
  65. PVOID EndVirtualAddress;
  66. } MI_LARGE_VA_RANGES, *PMI_LARGE_VA_RANGES;
  67. //
  68. // There are potentially 4 large page ranges:
  69. //
  70. // 1. PFN database
  71. // 2. Initial nonpaged pool
  72. // 3. Kernel code/data
  73. // 4. HAL code/data
  74. //
  75. #define MI_LARGE_PFN_DATABASE 0x1
  76. #define MI_LARGE_NONPAGED_POOL 0x2
  77. #define MI_LARGE_KERNEL_HAL 0x4
  78. #define MI_LARGE_ALL 0x7
  79. ULONG MxMapLargePages = MI_LARGE_ALL;
  80. #define MI_MAX_LARGE_PAGE_RANGES 4
  81. ULONG MiLargeVaRangeIndex;
  82. MI_LARGE_VA_RANGES MiLargeVaRanges[MI_MAX_LARGE_PAGE_RANGES];
  83. ULONG MiLargePageRangeIndex;
  84. MI_LARGE_PAGE_RANGES MiLargePageRanges[MI_MAX_LARGE_PAGE_RANGES];
  85. #define MM_PFN_MAPPED_BY_PDE (MM_VA_MAPPED_BY_PDE >> PAGE_SHIFT)
  86. PFN_NUMBER
  87. MxGetNextPage (
  88. IN PFN_NUMBER PagesNeeded
  89. )
  90. /*++
  91. Routine Description:
  92. This function returns the next physical page number from the largest
  93. largest free descriptor. If there are not enough physical pages left
  94. to satisfy the request then a bugcheck is executed since the system
  95. cannot be initialized.
  96. Arguments:
  97. PagesNeeded - Supplies the number of pages needed.
  98. Return Value:
  99. The base of the range of physically contiguous pages.
  100. Environment:
  101. Kernel mode, Phase 0 only.
  102. --*/
  103. {
  104. PFN_NUMBER PageFrameIndex;
  105. //
  106. // Examine the free descriptor to see if enough usable memory is available.
  107. //
  108. if (PagesNeeded > MxFreeDescriptor->PageCount) {
  109. KeBugCheckEx (INSTALL_MORE_MEMORY,
  110. MmNumberOfPhysicalPages,
  111. MxFreeDescriptor->PageCount,
  112. MxOldFreeDescriptor.PageCount,
  113. PagesNeeded);
  114. }
  115. PageFrameIndex = MxFreeDescriptor->BasePage;
  116. MxFreeDescriptor->BasePage += (ULONG) PagesNeeded;
  117. MxFreeDescriptor->PageCount -= (ULONG) PagesNeeded;
  118. return PageFrameIndex;
  119. }
  120. PFN_NUMBER
  121. MxPagesAvailable (
  122. VOID
  123. )
  124. /*++
  125. Routine Description:
  126. This function returns the number of pages available.
  127. Arguments:
  128. None.
  129. Return Value:
  130. The number of physically contiguous pages currently available.
  131. Environment:
  132. Kernel mode, Phase 0 only.
  133. --*/
  134. {
  135. return MxFreeDescriptor->PageCount;
  136. }
  137. VOID
  138. MxConvertToLargePage (
  139. IN PVOID VirtualAddress,
  140. IN PVOID EndVirtualAddress
  141. )
  142. /*++
  143. Routine Description:
  144. This function converts the backing for the supplied virtual address range
  145. to a large page mapping.
  146. Arguments:
  147. VirtualAddress - Supplies the virtual address to convert to a large page.
  148. EndVirtualAddress - Supplies the end virtual address to convert to a
  149. large page.
  150. Return Value:
  151. None.
  152. Environment:
  153. Kernel mode, Phase 1 only.
  154. --*/
  155. {
  156. ULONG i;
  157. MMPTE TempPde;
  158. PMMPTE PointerPde;
  159. PMMPTE LastPde;
  160. PMMPTE PointerPte;
  161. KIRQL OldIrql;
  162. PMMPFN Pfn1;
  163. PFN_NUMBER PageFrameIndex;
  164. LOGICAL ValidPteFound;
  165. PFN_NUMBER LargePageBaseFrame;
  166. ASSERT (MxMapLargePages != 0);
  167. PointerPde = MiGetPdeAddress (VirtualAddress);
  168. LastPde = MiGetPdeAddress (EndVirtualAddress);
  169. TempPde = ValidKernelPde;
  170. TempPde.u.Hard.LargePage = 1;
  171. TempPde.u.Hard.Global = 1;
  172. LOCK_PFN (OldIrql);
  173. for ( ; PointerPde <= LastPde; PointerPde += 1) {
  174. ASSERT (PointerPde->u.Hard.Valid == 1);
  175. if (PointerPde->u.Hard.LargePage == 1) {
  176. continue;
  177. }
  178. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  179. //
  180. // Here's a nasty little hack - the page table page mapping the kernel
  181. // and HAL (built by the loader) does not necessarily fill all the
  182. // page table entries (ie: any number of leading entries may be zero).
  183. //
  184. // To deal with this, walk forward until a nonzero entry is found
  185. // and re-index the large page based on this.
  186. //
  187. ValidPteFound = FALSE;
  188. LargePageBaseFrame = (ULONG)-1;
  189. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  190. ASSERT ((PageFrameIndex & (MM_PFN_MAPPED_BY_PDE - 1)) == 0);
  191. for (i = 0; i < PTE_PER_PAGE; i += 1) {
  192. ASSERT ((PointerPte->u.Long == ZeroKernelPte.u.Long) ||
  193. (ValidPteFound == FALSE) ||
  194. (PageFrameIndex == MI_GET_PAGE_FRAME_FROM_PTE (PointerPte)));
  195. if (PointerPte->u.Hard.Valid == 1) {
  196. if (ValidPteFound == FALSE) {
  197. ValidPteFound = TRUE;
  198. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  199. LargePageBaseFrame = PageFrameIndex - i;
  200. }
  201. }
  202. PointerPte += 1;
  203. PageFrameIndex += 1;
  204. }
  205. if (ValidPteFound == FALSE) {
  206. continue;
  207. }
  208. TempPde.u.Hard.PageFrameNumber = LargePageBaseFrame;
  209. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE(PointerPde);
  210. KeFlushSingleTb (MiGetVirtualAddressMappedByPte (PointerPde),
  211. TRUE,
  212. TRUE,
  213. (PHARDWARE_PTE)PointerPde,
  214. TempPde.u.Flush);
  215. KeFlushEntireTb (TRUE, TRUE);
  216. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  217. Pfn1->u2.ShareCount = 0;
  218. Pfn1->u3.e2.ReferenceCount = 1;
  219. Pfn1->u3.e1.PageLocation = StandbyPageList;
  220. MI_SET_PFN_DELETED (Pfn1);
  221. MiDecrementReferenceCount (PageFrameIndex);
  222. }
  223. UNLOCK_PFN (OldIrql);
  224. }
  225. VOID
  226. MiReportPhysicalMemory (
  227. VOID
  228. )
  229. /*++
  230. Routine Description:
  231. This routine is called during Phase 0 initialization once the
  232. MmPhysicalMemoryBlock has been constructed. It's job is to decide
  233. which large page ranges to enable later and also to construct a
  234. large page comparison list so any requests which are not fully cached
  235. can check this list in order to refuse conflicting requests.
  236. Arguments:
  237. None.
  238. Return Value:
  239. None.
  240. Environment:
  241. Kernel mode. Phase 0 only.
  242. This is called before any non-MmCached allocations are made.
  243. --*/
  244. {
  245. ULONG i, j;
  246. PMMPTE PointerPte;
  247. LOGICAL EntryFound;
  248. PFN_NUMBER count;
  249. PFN_NUMBER Page;
  250. PFN_NUMBER LastPage;
  251. PFN_NUMBER PageFrameIndex;
  252. PFN_NUMBER LastPageFrameIndex;
  253. //
  254. // Examine the physical memory block to see whether large pages should
  255. // be enabled. The key point is that all the physical pages within a
  256. // given large page range must have the same cache attributes (MmCached)
  257. // in order to maintain TB coherency. This can be done provided all
  258. // the pages within the large page range represent real RAM (as described
  259. // by the loader) so that memory management can control it. If any
  260. // portion of the large page range is not RAM, it is possible that it
  261. // may get used as noncached or writecombined device memory and
  262. // therefore large pages cannot be used.
  263. //
  264. if (MxMapLargePages == 0) {
  265. return;
  266. }
  267. for (i = 0; i < MiLargeVaRangeIndex; i += 1) {
  268. PointerPte = MiGetPteAddress (MiLargeVaRanges[i].VirtualAddress);
  269. ASSERT (PointerPte->u.Hard.Valid == 1);
  270. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  271. PointerPte = MiGetPteAddress (MiLargeVaRanges[i].EndVirtualAddress);
  272. ASSERT (PointerPte->u.Hard.Valid == 1);
  273. LastPageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  274. //
  275. // Round the start down to a page directory boundary and the end to
  276. // the last page directory entry before the next boundary.
  277. //
  278. PageFrameIndex &= ~(MM_PFN_MAPPED_BY_PDE - 1);
  279. LastPageFrameIndex |= (MM_PFN_MAPPED_BY_PDE - 1);
  280. EntryFound = FALSE;
  281. j = 0;
  282. do {
  283. count = MmPhysicalMemoryBlock->Run[j].PageCount;
  284. Page = MmPhysicalMemoryBlock->Run[j].BasePage;
  285. LastPage = Page + count;
  286. if ((PageFrameIndex >= Page) && (LastPageFrameIndex < LastPage)) {
  287. EntryFound = TRUE;
  288. break;
  289. }
  290. j += 1;
  291. } while (j != MmPhysicalMemoryBlock->NumberOfRuns);
  292. if (EntryFound == FALSE) {
  293. //
  294. // No entry was found that completely spans this large page range.
  295. // Zero it so this range will not be converted into large pages
  296. // later.
  297. //
  298. DbgPrint ("MM: Loader/HAL memory block indicates large pages cannot be used\n");
  299. MiLargeVaRanges[i].VirtualAddress = NULL;
  300. //
  301. // Don't use large pages for anything if any individual range
  302. // could not be used. This is because 2 separate ranges may
  303. // share a straddling large page. If the first range was unable
  304. // to use large pages, but the second one does ... then only part
  305. // of the first range will get large pages if we enable large
  306. // pages for the second range. This would be vey bad as we use
  307. // the MI_IS_PHYSICAL macro everywhere and assume the entire
  308. // range is in or out, so disable all large pages here instead.
  309. //
  310. MiLargeVaRangeIndex = 0;
  311. MiLargePageRangeIndex = 0;
  312. break;
  313. }
  314. else {
  315. //
  316. // Someday get clever and merge and sort ranges.
  317. //
  318. MiLargePageRanges[MiLargePageRangeIndex].StartFrame = PageFrameIndex;
  319. MiLargePageRanges[MiLargePageRangeIndex].LastFrame = LastPageFrameIndex;
  320. MiLargePageRangeIndex += 1;
  321. }
  322. }
  323. }
  324. LOGICAL
  325. MiMustFrameBeCached (
  326. IN PFN_NUMBER PageFrameIndex
  327. )
  328. /*++
  329. Routine Description:
  330. This routine checks whether the specified page frame must be mapped
  331. fully cached because it is already part of a large page which is fully
  332. cached. This must be detected otherwise we would be creating an
  333. incoherent overlapping TB entry as the same physical page would be
  334. mapped by 2 different TB entries with different cache attributes.
  335. Arguments:
  336. PageFrameIndex - Supplies the page frame index in question.
  337. Return Value:
  338. TRUE if the page must be mapped as fully cachable, FALSE if not.
  339. Environment:
  340. Kernel mode. IRQL of DISPATCH_LEVEL or below, PFN lock may be held.
  341. --*/
  342. {
  343. ULONG i;
  344. PMI_LARGE_PAGE_RANGES Range;
  345. Range = MiLargePageRanges;
  346. for (i = 0; i < MiLargePageRangeIndex; i += 1, Range += 1) {
  347. if ((PageFrameIndex >= Range->StartFrame) && (PageFrameIndex <= Range->StartFrame)) {
  348. return TRUE;
  349. }
  350. }
  351. return FALSE;
  352. }
  353. VOID
  354. MxPopulatePageDirectories (
  355. IN PMMPTE StartPde,
  356. IN PMMPTE EndPde
  357. )
  358. /*++
  359. Routine Description:
  360. This routine allocates page parents, directories and tables as needed.
  361. Note any new page tables needed to map the range get zero filled.
  362. Arguments:
  363. StartPde - Supplies the PDE to begin the population at.
  364. EndPde - Supplies the PDE to end the population at.
  365. Return Value:
  366. None.
  367. Environment:
  368. Kernel mode. Phase 0 initialization.
  369. --*/
  370. {
  371. PMMPTE StartPxe;
  372. PMMPTE StartPpe;
  373. MMPTE TempPte;
  374. LOGICAL First;
  375. First = TRUE;
  376. TempPte = ValidKernelPte;
  377. while (StartPde <= EndPde) {
  378. if (First == TRUE || MiIsPteOnPdeBoundary(StartPde)) {
  379. First = FALSE;
  380. StartPxe = MiGetPdeAddress(StartPde);
  381. if (StartPxe->u.Hard.Valid == 0) {
  382. TempPte.u.Hard.PageFrameNumber = MxGetNextPage (1);
  383. *StartPxe = TempPte;
  384. RtlZeroMemory (MiGetVirtualAddressMappedByPte (StartPxe),
  385. PAGE_SIZE);
  386. }
  387. StartPpe = MiGetPteAddress(StartPde);
  388. if (StartPpe->u.Hard.Valid == 0) {
  389. TempPte.u.Hard.PageFrameNumber = MxGetNextPage (1);
  390. *StartPpe = TempPte;
  391. RtlZeroMemory (MiGetVirtualAddressMappedByPte (StartPpe),
  392. PAGE_SIZE);
  393. }
  394. }
  395. if (StartPde->u.Hard.Valid == 0) {
  396. TempPte.u.Hard.PageFrameNumber = MxGetNextPage (1);
  397. *StartPde = TempPte;
  398. }
  399. StartPde += 1;
  400. }
  401. }
  402. VOID
  403. MiInitMachineDependent (
  404. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  405. )
  406. /*++
  407. Routine Description:
  408. This routine performs the necessary operations to enable virtual
  409. memory. This includes building the page directory parent pages and
  410. the page directories for the system, building page table pages to map
  411. the code section, the data section, the stack section and the trap handler.
  412. It also initializes the PFN database and populates the free list.
  413. Arguments:
  414. LoaderBlock - Supplies the address of the loader block.
  415. Return Value:
  416. None.
  417. Environment:
  418. Kernel mode.
  419. N.B. This routine uses memory from the loader block descriptors, but
  420. the descriptors themselves must be restored prior to return as our caller
  421. walks them to create the MmPhysicalMemoryBlock.
  422. --*/
  423. {
  424. PHYSICAL_ADDRESS MaxHotPlugMemoryAddress;
  425. PVOID va;
  426. PVOID SystemPteStart;
  427. ULONG UseGlobal;
  428. PFN_NUMBER LargestFreePfnStart;
  429. PFN_NUMBER FreePfnCount;
  430. PMMPTE BasePte;
  431. ULONG BasePage;
  432. ULONG PageCount;
  433. PFN_NUMBER PagesLeft;
  434. ULONG_PTR DirBase;
  435. LOGICAL First;
  436. PMMPFN BasePfn;
  437. PMMPFN BottomPfn;
  438. PMMPFN TopPfn;
  439. PFN_NUMBER i;
  440. PFN_NUMBER PdePageNumber;
  441. PFN_NUMBER FirstNonPagedPoolPage;
  442. PFN_NUMBER FirstPfnDatabasePage;
  443. PFN_NUMBER j;
  444. LOGICAL PfnInLargePages;
  445. PFN_NUMBER PxePage;
  446. PFN_NUMBER PpePage;
  447. PFN_NUMBER PdePage;
  448. PFN_NUMBER PtePage;
  449. PEPROCESS CurrentProcess;
  450. PFN_NUMBER MostFreePage;
  451. PFN_NUMBER MostFreeLowMem;
  452. PLIST_ENTRY NextMd;
  453. SIZE_T MaxPool;
  454. KIRQL OldIrql;
  455. MMPTE TempPte;
  456. MMPTE TempPde;
  457. PMMPTE PointerPde;
  458. PMMPTE PointerPte;
  459. PMMPTE LastPte;
  460. PMMPTE Pde;
  461. PMMPTE StartPxe;
  462. PMMPTE EndPxe;
  463. PMMPTE StartPpe;
  464. PMMPTE EndPpe;
  465. PMMPTE StartPde;
  466. PMMPTE EndPde;
  467. PMMPTE StartPte;
  468. PMMPTE EndPte;
  469. PMMPFN Pfn1;
  470. PFN_NUMBER PageFrameIndex;
  471. PMMPFN Pfn2;
  472. PMMPFN Pfn3;
  473. PMMPFN Pfn4;
  474. ULONG_PTR Range;
  475. SIZE_T MaximumNonPagedPoolInBytesLimit;
  476. PFN_NUMBER LargestFreePfnCount;
  477. PLDR_DATA_TABLE_ENTRY DataTableEntry;
  478. PLIST_ENTRY NextEntry;
  479. UCHAR Associativity;
  480. PVOID NonPagedPoolStartLow;
  481. PVOID VirtualAddress;
  482. PFN_NUMBER PagesNeeded;
  483. PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
  484. ULONG ReturnedLength;
  485. NTSTATUS status;
  486. if (InitializationPhase == 1) {
  487. //
  488. // If the host processor supports large pages, and the number of
  489. // physical pages is greater than 255mb, then map the kernel image,
  490. // HAL, PFN database and initial nonpaged pool with large pages.
  491. //
  492. if (MxMapLargePages != 0) {
  493. for (i = 0; i < MiLargeVaRangeIndex; i += 1) {
  494. if (MiLargeVaRanges[i].VirtualAddress != NULL) {
  495. MxConvertToLargePage (MiLargeVaRanges[i].VirtualAddress,
  496. MiLargeVaRanges[i].EndVirtualAddress);
  497. }
  498. }
  499. }
  500. return;
  501. }
  502. ASSERT (InitializationPhase == 0);
  503. //
  504. // All AMD64 processors support PAT mode and global pages.
  505. //
  506. ASSERT (KeFeatureBits & KF_PAT);
  507. ASSERT (KeFeatureBits & KF_GLOBAL_PAGE);
  508. ASSERT (MxMapLargePages == MI_LARGE_ALL);
  509. FirstPfnDatabasePage = 0;
  510. PfnInLargePages = FALSE;
  511. MostFreePage = 0;
  512. MostFreeLowMem = 0;
  513. LargestFreePfnCount = 0;
  514. //
  515. // If the chip doesn't support large pages or the system is booted /3GB,
  516. // then disable large page support.
  517. //
  518. if ((KeFeatureBits & KF_LARGE_PAGE) == 0) {
  519. MxMapLargePages = 0;
  520. }
  521. //
  522. // This flag is registry-settable so check before overriding.
  523. //
  524. if (MmProtectFreedNonPagedPool == TRUE) {
  525. MxMapLargePages &= ~(MI_LARGE_PFN_DATABASE | MI_LARGE_NONPAGED_POOL);
  526. }
  527. #if 0
  528. //
  529. // Since the host processor supports global bits, then set the global
  530. // bit in the template kernel PTE and PDE entries.
  531. //
  532. ValidKernelPte.u.Long |= MM_PTE_GLOBAL_MASK;
  533. #else
  534. ValidKernelPte.u.Long = ValidKernelPteLocal.u.Long;
  535. ValidKernelPde.u.Long = ValidKernelPdeLocal.u.Long;
  536. #endif
  537. //
  538. // Note that the PAE mode of the processor does not support the
  539. // global bit in PDEs which map 4K page table pages.
  540. //
  541. TempPte = ValidKernelPte;
  542. TempPde = ValidKernelPde;
  543. //
  544. // Set the directory base for the system process.
  545. //
  546. PointerPte = MiGetPxeAddress (PXE_BASE);
  547. PdePageNumber = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte);
  548. DirBase = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte) << PAGE_SHIFT;
  549. PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = DirBase;
  550. KeSweepDcache (FALSE);
  551. //
  552. // Unmap the user memory space.
  553. //
  554. PointerPde = MiGetPxeAddress (0);
  555. LastPte = MiGetPxeAddress (MM_SYSTEM_RANGE_START);
  556. MiFillMemoryPte (PointerPde,
  557. (LastPte - PointerPde) * sizeof(MMPTE),
  558. ZeroKernelPte.u.Long);
  559. //
  560. // Get the lower bound of the free physical memory and the number of
  561. // physical pages by walking the memory descriptor lists.
  562. //
  563. MxFreeDescriptor = NULL;
  564. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  565. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  566. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  567. MEMORY_ALLOCATION_DESCRIPTOR,
  568. ListEntry);
  569. if ((MemoryDescriptor->MemoryType != LoaderFirmwarePermanent) &&
  570. (MemoryDescriptor->MemoryType != LoaderBBTMemory) &&
  571. (MemoryDescriptor->MemoryType != LoaderHALCachedMemory) &&
  572. (MemoryDescriptor->MemoryType != LoaderSpecialMemory)) {
  573. //
  574. // This check results in /BURNMEMORY chunks not being counted.
  575. //
  576. if (MemoryDescriptor->MemoryType != LoaderBad) {
  577. MmNumberOfPhysicalPages += MemoryDescriptor->PageCount;
  578. }
  579. if (MemoryDescriptor->BasePage < MmLowestPhysicalPage) {
  580. MmLowestPhysicalPage = MemoryDescriptor->BasePage;
  581. }
  582. if ((MemoryDescriptor->BasePage + MemoryDescriptor->PageCount) >
  583. MmHighestPhysicalPage) {
  584. MmHighestPhysicalPage =
  585. MemoryDescriptor->BasePage + MemoryDescriptor->PageCount - 1;
  586. }
  587. //
  588. // Locate the largest free descriptor.
  589. //
  590. if ((MemoryDescriptor->MemoryType == LoaderFree) ||
  591. (MemoryDescriptor->MemoryType == LoaderLoadedProgram) ||
  592. (MemoryDescriptor->MemoryType == LoaderFirmwareTemporary) ||
  593. (MemoryDescriptor->MemoryType == LoaderOsloaderStack)) {
  594. if (MemoryDescriptor->PageCount > MostFreePage) {
  595. MostFreePage = MemoryDescriptor->PageCount;
  596. MxFreeDescriptor = MemoryDescriptor;
  597. }
  598. }
  599. }
  600. NextMd = MemoryDescriptor->ListEntry.Flink;
  601. }
  602. //
  603. // This flag is registry-settable so check before overriding.
  604. //
  605. // Enabling special IRQL automatically disables mapping the kernel with
  606. // large pages so we can catch kernel and HAL code.
  607. //
  608. if (MmVerifyDriverBufferLength != (ULONG)-1) {
  609. MmLargePageMinimum = (ULONG)-2;
  610. }
  611. else if (MmLargePageMinimum == 0) {
  612. MmLargePageMinimum = MM_LARGE_PAGE_MINIMUM;
  613. }
  614. if (MmNumberOfPhysicalPages <= MmLargePageMinimum) {
  615. MxMapLargePages &= ~MI_LARGE_KERNEL_HAL;
  616. }
  617. //
  618. // MmDynamicPfn may have been initialized based on the registry to
  619. // a value representing the highest physical address in gigabytes.
  620. //
  621. MmDynamicPfn *= ((1024 * 1024 * 1024) / PAGE_SIZE);
  622. //
  623. // Retrieve highest hot plug memory range from the HAL if
  624. // available and not otherwise retrieved from the registry.
  625. //
  626. if (MmDynamicPfn == 0) {
  627. status = HalQuerySystemInformation(
  628. HalQueryMaxHotPlugMemoryAddress,
  629. sizeof(PHYSICAL_ADDRESS),
  630. (PPHYSICAL_ADDRESS) &MaxHotPlugMemoryAddress,
  631. &ReturnedLength);
  632. if (NT_SUCCESS(status)) {
  633. ASSERT (ReturnedLength == sizeof(PHYSICAL_ADDRESS));
  634. MmDynamicPfn = (PFN_NUMBER) (MaxHotPlugMemoryAddress.QuadPart / PAGE_SIZE);
  635. }
  636. }
  637. if (MmDynamicPfn != 0) {
  638. MmDynamicPfn *= ((1024 * 1024 * 1024) / PAGE_SIZE);
  639. MmHighestPossiblePhysicalPage = MI_DTC_MAX_PAGES - 1;
  640. if (MmDynamicPfn - 1 < MmHighestPossiblePhysicalPage) {
  641. if (MmDynamicPfn - 1 < MmHighestPhysicalPage) {
  642. MmDynamicPfn = MmHighestPhysicalPage + 1;
  643. }
  644. MmHighestPossiblePhysicalPage = MmDynamicPfn - 1;
  645. }
  646. }
  647. else {
  648. MmHighestPossiblePhysicalPage = MmHighestPhysicalPage;
  649. }
  650. //
  651. // Only machines with at least 5GB of physical memory get to use this.
  652. //
  653. if (strstr(LoaderBlock->LoadOptions, "NOLOWMEM")) {
  654. if (MmNumberOfPhysicalPages >= ((ULONGLONG)5 * 1024 * 1024 * 1024 / PAGE_SIZE)) {
  655. MiNoLowMemory = (PFN_NUMBER)((ULONGLONG)_4gb / PAGE_SIZE);
  656. }
  657. }
  658. if (MiNoLowMemory != 0) {
  659. MmMakeLowMemory = TRUE;
  660. }
  661. //
  662. // Save the original descriptor value as everything must be restored
  663. // prior to this function returning.
  664. //
  665. *(PMEMORY_ALLOCATION_DESCRIPTOR)&MxOldFreeDescriptor = *MxFreeDescriptor;
  666. if (MmNumberOfPhysicalPages < 2048) {
  667. KeBugCheckEx(INSTALL_MORE_MEMORY,
  668. MmNumberOfPhysicalPages,
  669. MmLowestPhysicalPage,
  670. MmHighestPhysicalPage,
  671. 0);
  672. }
  673. //
  674. // Compute the initial and maximum size of nonpaged pool. The initial
  675. // allocation of nonpaged pool is such that it is both virtually and
  676. // physically contiguous.
  677. //
  678. // If the size of the initial nonpaged pool was initialized from the
  679. // registry and is greater than 7/8 of physical memory, then force the
  680. // size of the initial nonpaged pool to be computed.
  681. //
  682. if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) >
  683. (7 * (MmNumberOfPhysicalPages >> 3))) {
  684. MmSizeOfNonPagedPoolInBytes = 0;
  685. }
  686. //
  687. // If the size of the initial nonpaged pool is less than the minimum
  688. // amount, then compute the size of initial nonpaged pool as the minimum
  689. // size up to 8mb and a computed amount for every 1mb thereafter.
  690. //
  691. if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize) {
  692. MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
  693. if (MmNumberOfPhysicalPages > 1024) {
  694. MmSizeOfNonPagedPoolInBytes +=
  695. ((MmNumberOfPhysicalPages - 1024) / _1mbInPages) *
  696. MmMinAdditionNonPagedPoolPerMb;
  697. }
  698. }
  699. //
  700. // Limit initial nonpaged pool size to the maximum allowable size.
  701. //
  702. if (MmSizeOfNonPagedPoolInBytes > MM_MAX_INITIAL_NONPAGED_POOL) {
  703. MmSizeOfNonPagedPoolInBytes = MM_MAX_INITIAL_NONPAGED_POOL;
  704. }
  705. MaximumNonPagedPoolInBytesLimit = 0;
  706. //
  707. // If the registry specifies a total nonpaged pool percentage cap, enforce
  708. // it here.
  709. //
  710. if (MmMaximumNonPagedPoolPercent != 0) {
  711. if (MmMaximumNonPagedPoolPercent < 5) {
  712. MmMaximumNonPagedPoolPercent = 5;
  713. }
  714. else if (MmMaximumNonPagedPoolPercent > 80) {
  715. MmMaximumNonPagedPoolPercent = 80;
  716. }
  717. //
  718. // Use the registry-expressed percentage value.
  719. //
  720. MaximumNonPagedPoolInBytesLimit =
  721. ((MmNumberOfPhysicalPages * MmMaximumNonPagedPoolPercent) / 100);
  722. MaximumNonPagedPoolInBytesLimit *= PAGE_SIZE;
  723. if (MaximumNonPagedPoolInBytesLimit < 6 * 1024 * 1024) {
  724. MaximumNonPagedPoolInBytesLimit = 6 * 1024 * 1024;
  725. }
  726. if (MmSizeOfNonPagedPoolInBytes > MaximumNonPagedPoolInBytesLimit) {
  727. MmSizeOfNonPagedPoolInBytes = MaximumNonPagedPoolInBytesLimit;
  728. }
  729. }
  730. //
  731. // Align the size of the initial nonpaged pool to page size boundary.
  732. //
  733. MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
  734. //
  735. // Calculate the maximum size of nonpaged pool.
  736. //
  737. if (MmMaximumNonPagedPoolInBytes == 0) {
  738. //
  739. // Calculate the size of nonpaged pool. If 8mb or less use the
  740. // minimum size, then for every MB above 8mb add extra pages.
  741. //
  742. MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
  743. //
  744. // Make sure enough expansion for PFN database exists.
  745. //
  746. MmMaximumNonPagedPoolInBytes +=
  747. ((ULONG_PTR)PAGE_ALIGN((MmHighestPhysicalPage + 1) * sizeof(MMPFN)));
  748. //
  749. // Use the new formula for autosizing nonpaged pool on machines
  750. // with at least 512MB. The new formula allocates 1/2 as much nonpaged
  751. // pool per MB but scales much higher - machines with ~1.2GB or more
  752. // get 256MB of nonpaged pool. Note that the old formula gave machines
  753. // with 512MB of RAM 128MB of nonpaged pool so this behavior is
  754. // preserved with the new formula as well.
  755. //
  756. if (MmNumberOfPhysicalPages >= 0x1f000) {
  757. MmMaximumNonPagedPoolInBytes +=
  758. ((MmNumberOfPhysicalPages - 1024)/256) *
  759. (MmMaxAdditionNonPagedPoolPerMb / 2);
  760. if (MmMaximumNonPagedPoolInBytes < MM_MAX_ADDITIONAL_NONPAGED_POOL) {
  761. MmMaximumNonPagedPoolInBytes = MM_MAX_ADDITIONAL_NONPAGED_POOL;
  762. }
  763. }
  764. else {
  765. MmMaximumNonPagedPoolInBytes +=
  766. ((MmNumberOfPhysicalPages - 1024)/256) *
  767. MmMaxAdditionNonPagedPoolPerMb;
  768. }
  769. if ((MmMaximumNonPagedPoolPercent != 0) &&
  770. (MmMaximumNonPagedPoolInBytes > MaximumNonPagedPoolInBytesLimit)) {
  771. MmMaximumNonPagedPoolInBytes = MaximumNonPagedPoolInBytesLimit;
  772. }
  773. }
  774. //
  775. // Align the maximum size of nonpaged pool to page size boundary.
  776. //
  777. MmMaximumNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
  778. //
  779. // Compute the maximum size of nonpaged pool to include 16 additional
  780. // pages and enough space to map the PFN database.
  781. //
  782. MaxPool = MmSizeOfNonPagedPoolInBytes + (PAGE_SIZE * 16) +
  783. ((ULONG_PTR)PAGE_ALIGN((MmHighestPhysicalPage + 1) * sizeof(MMPFN)));
  784. //
  785. // If the maximum size of nonpaged pool is less than the computed
  786. // maximum size of nonpaged pool, then set the maximum size of nonpaged
  787. // pool to the computed maximum size.
  788. //
  789. if (MmMaximumNonPagedPoolInBytes < MaxPool) {
  790. MmMaximumNonPagedPoolInBytes = MaxPool;
  791. }
  792. //
  793. // Limit maximum nonpaged pool to MM_MAX_ADDITIONAL_NONPAGED_POOL.
  794. //
  795. if (MmMaximumNonPagedPoolInBytes > MM_MAX_ADDITIONAL_NONPAGED_POOL) {
  796. MmMaximumNonPagedPoolInBytes = MM_MAX_ADDITIONAL_NONPAGED_POOL;
  797. }
  798. //
  799. // Get secondary color value from:
  800. //
  801. // (a) from the registry (already filled in) or
  802. // (b) from the PCR or
  803. // (c) default value.
  804. //
  805. if (MmSecondaryColors == 0) {
  806. Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
  807. MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
  808. if (Associativity != 0) {
  809. MmSecondaryColors /= Associativity;
  810. }
  811. }
  812. MmSecondaryColors = MmSecondaryColors >> PAGE_SHIFT;
  813. if (MmSecondaryColors == 0) {
  814. MmSecondaryColors = MM_SECONDARY_COLORS_DEFAULT;
  815. }
  816. else {
  817. //
  818. // Make sure the value is a power of two and within limits.
  819. //
  820. if (((MmSecondaryColors & (MmSecondaryColors - 1)) != 0) ||
  821. (MmSecondaryColors < MM_SECONDARY_COLORS_MIN) ||
  822. (MmSecondaryColors > MM_SECONDARY_COLORS_MAX)) {
  823. MmSecondaryColors = MM_SECONDARY_COLORS_DEFAULT;
  824. }
  825. }
  826. MmSecondaryColorMask = MmSecondaryColors - 1;
  827. //
  828. // Determine the number of bits in MmSecondaryColorMask. This
  829. // is the number of bits the Node color must be shifted
  830. // by before it is included in colors.
  831. //
  832. i = MmSecondaryColorMask;
  833. MmSecondaryColorNodeShift = 0;
  834. while (i != 0) {
  835. i >>= 1;
  836. MmSecondaryColorNodeShift += 1;
  837. }
  838. //
  839. // Adjust the number of secondary colors by the number of nodes
  840. // in the machine. The secondary color mask is NOT adjusted
  841. // as it is used to control coloring within a node. The node
  842. // color is added to the color AFTER normal color calculations
  843. // are performed.
  844. //
  845. MmSecondaryColors *= KeNumberNodes;
  846. for (i = 0; i < KeNumberNodes; i += 1) {
  847. KeNodeBlock[i]->Color = (ULONG)i;
  848. KeNodeBlock[i]->MmShiftedColor = (ULONG)(i << MmSecondaryColorNodeShift);
  849. InitializeSListHead(&KeNodeBlock[i]->DeadStackList);
  850. }
  851. //
  852. // Add in the PFN database size (based on the number of pages required
  853. // from page zero to the highest page).
  854. //
  855. // Get the number of secondary colors and add the array for tracking
  856. // secondary colors to the end of the PFN database.
  857. //
  858. MxPfnAllocation = 1 + ((((MmHighestPossiblePhysicalPage + 1) * sizeof(MMPFN)) +
  859. (MmSecondaryColors * sizeof(MMCOLOR_TABLES)*2))
  860. >> PAGE_SHIFT);
  861. //
  862. // Compute the starting address of nonpaged pool.
  863. //
  864. MmNonPagedPoolStart = (PCHAR)MmNonPagedPoolEnd - MmMaximumNonPagedPoolInBytes;
  865. MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart);
  866. MmPageAlignedPoolBase[NonPagedPool] = MmNonPagedPoolStart;
  867. //
  868. // Calculate the starting address for nonpaged system space rounded
  869. // down to a second level PDE mapping boundary.
  870. //
  871. MmNonPagedSystemStart = (PVOID)(((ULONG_PTR)MmNonPagedPoolStart -
  872. (((ULONG_PTR)MmNumberOfSystemPtes + 1) * PAGE_SIZE)) &
  873. (~PAGE_DIRECTORY2_MASK));
  874. if (MmNonPagedSystemStart < MM_LOWEST_NONPAGED_SYSTEM_START) {
  875. MmNonPagedSystemStart = MM_LOWEST_NONPAGED_SYSTEM_START;
  876. MmNumberOfSystemPtes = (ULONG)(((ULONG_PTR)MmNonPagedPoolStart -
  877. (ULONG_PTR)MmNonPagedSystemStart) >> PAGE_SHIFT)-1;
  878. ASSERT (MmNumberOfSystemPtes > 1000);
  879. }
  880. //
  881. // Snap the system PTE start address as page directories and tables
  882. // will be preallocated for this range.
  883. //
  884. SystemPteStart = (PVOID) MmNonPagedSystemStart;
  885. //
  886. // If special pool and/or the driver verifier is enabled, reserve
  887. // extra virtual address space for special pooling now. For now,
  888. // arbitrarily don't let it be larger than paged pool (128gb).
  889. //
  890. if ((MmVerifyDriverBufferLength != (ULONG)-1) ||
  891. ((MmSpecialPoolTag != 0) && (MmSpecialPoolTag != (ULONG)-1))) {
  892. if (MmNonPagedSystemStart > MM_LOWEST_NONPAGED_SYSTEM_START) {
  893. MaxPool = (ULONG_PTR)MmNonPagedSystemStart -
  894. (ULONG_PTR)MM_LOWEST_NONPAGED_SYSTEM_START;
  895. if (MaxPool > MM_MAX_PAGED_POOL) {
  896. MaxPool = MM_MAX_PAGED_POOL;
  897. }
  898. MmNonPagedSystemStart = (PVOID)((ULONG_PTR)MmNonPagedSystemStart - MaxPool);
  899. MmSpecialPoolStart = MmNonPagedSystemStart;
  900. MmSpecialPoolEnd = (PVOID)((ULONG_PTR)MmNonPagedSystemStart + MaxPool);
  901. }
  902. }
  903. //
  904. // Recompute the actual number of system PTEs.
  905. //
  906. MmNumberOfSystemPtes = (ULONG)(((ULONG_PTR)MmNonPagedPoolStart -
  907. (ULONG_PTR)SystemPteStart) >> PAGE_SHIFT) - 1;
  908. ASSERT(MmNumberOfSystemPtes > 1000);
  909. //
  910. // Set the global bit for all PDEs in system space.
  911. //
  912. StartPde = MiGetPdeAddress (MM_SYSTEM_SPACE_START);
  913. EndPde = MiGetPdeAddress (MM_SYSTEM_SPACE_END);
  914. First = TRUE;
  915. while (StartPde <= EndPde) {
  916. if (First == TRUE || MiIsPteOnPdeBoundary(StartPde)) {
  917. First = FALSE;
  918. StartPxe = MiGetPdeAddress(StartPde);
  919. if (StartPxe->u.Hard.Valid == 0) {
  920. StartPxe += 1;
  921. StartPpe = MiGetVirtualAddressMappedByPte (StartPxe);
  922. StartPde = MiGetVirtualAddressMappedByPte (StartPpe);
  923. continue;
  924. }
  925. StartPpe = MiGetPteAddress(StartPde);
  926. if (StartPpe->u.Hard.Valid == 0) {
  927. StartPpe += 1;
  928. StartPde = MiGetVirtualAddressMappedByPte (StartPpe);
  929. continue;
  930. }
  931. }
  932. TempPte = *StartPde;
  933. TempPte.u.Hard.Global = 1;
  934. *StartPde = TempPte;
  935. StartPde += 1;
  936. }
  937. //
  938. // Allocate page directory parents, directories and page table pages for
  939. // system PTEs and nonpaged pool.
  940. //
  941. TempPte = ValidKernelPte;
  942. StartPde = MiGetPdeAddress (SystemPteStart);
  943. EndPde = MiGetPdeAddress ((PCHAR)MmNonPagedPoolEnd - 1);
  944. MxPopulatePageDirectories (StartPde, EndPde);
  945. //
  946. // If the host processor supports large pages, and the number of
  947. // physical pages is greater than 255mb, then map the kernel image and
  948. // HAL into a large page.
  949. //
  950. if (MxMapLargePages & MI_LARGE_KERNEL_HAL) {
  951. //
  952. // Add the kernel and HAL ranges to the large page ranges.
  953. //
  954. i = 0;
  955. NextEntry = LoaderBlock->LoadOrderListHead.Flink;
  956. for ( ; NextEntry != &LoaderBlock->LoadOrderListHead; NextEntry = NextEntry->Flink) {
  957. DataTableEntry = CONTAINING_RECORD (NextEntry,
  958. LDR_DATA_TABLE_ENTRY,
  959. InLoadOrderLinks);
  960. MiLargeVaRanges[MiLargeVaRangeIndex].VirtualAddress = DataTableEntry->DllBase;
  961. MiLargeVaRanges[MiLargeVaRangeIndex].EndVirtualAddress =
  962. (PVOID)((ULONG_PTR)DataTableEntry->DllBase + DataTableEntry->SizeOfImage - 1);
  963. MiLargeVaRangeIndex += 1;
  964. i += 1;
  965. if (i == 2) {
  966. break;
  967. }
  968. }
  969. }
  970. //
  971. // If the processor supports large pages - if the descriptor has
  972. // enough contiguous pages for the entire PFN database then use
  973. // large pages to map it. Regardless of large page support, put
  974. // the PFN database in low virtual memory just above the loaded images.
  975. //
  976. PagesLeft = MxPagesAvailable ();
  977. if ((MxMapLargePages & (MI_LARGE_PFN_DATABASE | MI_LARGE_NONPAGED_POOL)) &&
  978. (PagesLeft > MxPfnAllocation)) {
  979. //
  980. // Allocate the PFN database using large pages as there is enough
  981. // physically contiguous and decently aligned memory available.
  982. //
  983. PfnInLargePages = TRUE;
  984. FirstPfnDatabasePage = MxGetNextPage (MxPfnAllocation);
  985. MmPfnDatabase = (PMMPFN)(MM_KSEG0_BASE | MmBootImageSize);
  986. if ((FirstPfnDatabasePage & (MM_PFN_MAPPED_BY_PDE - 1)) > ((MmBootImageSize >> PAGE_SHIFT) & (MM_PFN_MAPPED_BY_PDE - 1))) {
  987. MmPfnDatabase = (PMMPFN) ((ULONG_PTR)MmPfnDatabase & ~(MM_VA_MAPPED_BY_PDE - 1));
  988. MmPfnDatabase = (PMMPFN) ((ULONG_PTR)MmPfnDatabase + (((FirstPfnDatabasePage & (MM_PFN_MAPPED_BY_PDE - 1))) << PAGE_SHIFT));
  989. }
  990. else {
  991. ASSERT (((ULONG_PTR)MmPfnDatabase & (MM_VA_MAPPED_BY_PDE - 1)) != 0);
  992. MmPfnDatabase = (PMMPFN) MI_ROUND_TO_SIZE (((ULONG_PTR)MmPfnDatabase),
  993. MM_VA_MAPPED_BY_PDE);
  994. MmPfnDatabase = (PMMPFN) ((ULONG_PTR)MmPfnDatabase + (((FirstPfnDatabasePage & (MM_PFN_MAPPED_BY_PDE - 1))) << PAGE_SHIFT));
  995. }
  996. //
  997. // Add the PFN database range to the large page ranges.
  998. //
  999. MiLargeVaRanges[MiLargeVaRangeIndex].VirtualAddress = MmPfnDatabase;
  1000. MiLargeVaRanges[MiLargeVaRangeIndex].EndVirtualAddress =
  1001. (PVOID) (((ULONG_PTR)MmPfnDatabase + (MxPfnAllocation << PAGE_SHIFT)) - 1);
  1002. MiLargeVaRangeIndex += 1;
  1003. }
  1004. else {
  1005. MxMapLargePages &= ~(MI_LARGE_PFN_DATABASE | MI_LARGE_NONPAGED_POOL);
  1006. MmPfnDatabase = (PMMPFN)(MM_KSEG0_BASE | MmBootImageSize);
  1007. }
  1008. //
  1009. // The initial nonpaged pool immediately follows the PFN database.
  1010. //
  1011. // Since the PFN database and the initial nonpaged pool are physically
  1012. // adjacent, a single PXE is shared, thus reducing the number of pages
  1013. // that would otherwise might need to be marked as must-be-cachable.
  1014. //
  1015. // Calculate the correct initial nonpaged pool virtual address and
  1016. // maximum size now. Don't allocate pages for any other use at this
  1017. // point to guarantee that the PFN database and nonpaged pool are
  1018. // physically contiguous so large pages can be enabled.
  1019. //
  1020. NonPagedPoolStartLow = (PVOID)((ULONG_PTR)MmPfnDatabase + (MxPfnAllocation << PAGE_SHIFT));
  1021. //
  1022. // Allocate pages and fill in the PTEs for the initial nonpaged pool.
  1023. //
  1024. PagesNeeded = MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT;
  1025. //
  1026. // Don't ask for more than is reasonable both in terms of physical pages
  1027. // left and virtual space available.
  1028. //
  1029. PagesLeft = MxPagesAvailable ();
  1030. if (PagesNeeded > PagesLeft) {
  1031. PagesNeeded = PagesLeft;
  1032. }
  1033. if (MxMapLargePages & MI_LARGE_NONPAGED_POOL) {
  1034. //
  1035. // The PFN database has already been allocated (but not mapped).
  1036. // Shortly we will transition from the descriptors to the real PFN
  1037. // database so eat up the slush now.
  1038. //
  1039. VirtualAddress = (PVOID) ((ULONG_PTR)NonPagedPoolStartLow + (PagesNeeded << PAGE_SHIFT));
  1040. if (((ULONG_PTR)VirtualAddress & (MM_VA_MAPPED_BY_PDE - 1)) &&
  1041. (PagesLeft - PagesNeeded > MM_PFN_MAPPED_BY_PDE) &&
  1042. (MmSizeOfNonPagedPoolInBytes + MM_VA_MAPPED_BY_PDE < MM_MAX_INITIAL_NONPAGED_POOL)) {
  1043. //
  1044. // Expand the initial nonpaged pool to use the slush efficiently.
  1045. //
  1046. VirtualAddress = (PVOID) MI_ROUND_TO_SIZE ((ULONG_PTR)VirtualAddress, MM_VA_MAPPED_BY_PDE);
  1047. PagesNeeded = ((ULONG_PTR)VirtualAddress - (ULONG_PTR)NonPagedPoolStartLow) >> PAGE_SHIFT;
  1048. }
  1049. }
  1050. //
  1051. // Update various globals since the size of initial pool may have
  1052. // changed.
  1053. //
  1054. MmSizeOfNonPagedPoolInBytes = PagesNeeded << PAGE_SHIFT;
  1055. //
  1056. // Allocate the actual pages for the initial nonpaged pool and map them in.
  1057. //
  1058. PageFrameIndex = MxGetNextPage (PagesNeeded);
  1059. FirstNonPagedPoolPage = PageFrameIndex;
  1060. if (MxMapLargePages & MI_LARGE_PFN_DATABASE) {
  1061. ASSERT (FirstNonPagedPoolPage == FirstPfnDatabasePage + MxPfnAllocation);
  1062. }
  1063. //
  1064. // Allocate the page table pages to map the PFN database and the
  1065. // initial nonpaged pool now. If the system switches to large
  1066. // pages in Phase 1, these pages will be discarded then.
  1067. //
  1068. StartPde = MiGetPdeAddress (MmPfnDatabase);
  1069. VirtualAddress = (PVOID) ((ULONG_PTR)NonPagedPoolStartLow + MmSizeOfNonPagedPoolInBytes - 1);
  1070. EndPde = MiGetPdeAddress (VirtualAddress);
  1071. MxPopulatePageDirectories (StartPde, EndPde);
  1072. MmNonPagedPoolExpansionStart = (PVOID)((PCHAR)MmNonPagedPoolStart +
  1073. MmSizeOfNonPagedPoolInBytes);
  1074. //
  1075. // The virtual address, length and page tables to map the initial
  1076. // nonpaged pool are already allocated - just fill in the mappings.
  1077. //
  1078. MmNonPagedPoolStart = NonPagedPoolStartLow;
  1079. //
  1080. // Set subsection base to the address to zero as the PTE format allows the
  1081. // complete address space to be spanned.
  1082. //
  1083. MmSubsectionBase = 0;
  1084. PointerPte = MiGetPteAddress (MmNonPagedPoolStart);
  1085. LastPte = MiGetPteAddress ((ULONG_PTR)MmNonPagedPoolStart +
  1086. MmSizeOfNonPagedPoolInBytes);
  1087. if (MxMapLargePages & (MI_LARGE_PFN_DATABASE | MI_LARGE_NONPAGED_POOL)) {
  1088. //
  1089. // Since every page table page needs to be filled, ensure PointerPte
  1090. // and LastPte span entire page table pages, and adjust
  1091. // PageFrameIndex to account for this.
  1092. //
  1093. if (!MiIsPteOnPdeBoundary(PointerPte)) {
  1094. PageFrameIndex -= (BYTE_OFFSET (PointerPte) / sizeof (MMPTE));
  1095. PointerPte = PAGE_ALIGN (PointerPte);
  1096. }
  1097. if (!MiIsPteOnPdeBoundary(LastPte)) {
  1098. LastPte = (PMMPTE) (PAGE_ALIGN (LastPte)) + PTE_PER_PAGE;
  1099. }
  1100. //
  1101. // Add the initial nonpaged pool range to the large page ranges.
  1102. //
  1103. MiLargeVaRanges[MiLargeVaRangeIndex].VirtualAddress = MmNonPagedPoolStart;
  1104. MiLargeVaRanges[MiLargeVaRangeIndex].EndVirtualAddress =
  1105. (PVOID) ((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1);
  1106. MiLargeVaRangeIndex += 1;
  1107. }
  1108. while (PointerPte < LastPte) {
  1109. ASSERT (PointerPte->u.Hard.Valid == 0);
  1110. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1111. *PointerPte = TempPte;
  1112. PointerPte += 1;
  1113. PageFrameIndex += 1;
  1114. }
  1115. //
  1116. // There must be at least one page of system PTEs before the expanded
  1117. // nonpaged pool.
  1118. //
  1119. ASSERT (MiGetPteAddress(SystemPteStart) < MiGetPteAddress(MmNonPagedPoolExpansionStart));
  1120. //
  1121. // Non-paged pages now exist, build the pool structures.
  1122. //
  1123. MmPageAlignedPoolBase[NonPagedPool] = MmNonPagedPoolStart;
  1124. MiInitializeNonPagedPool ();
  1125. //
  1126. // Before nonpaged pool can be used, the PFN database must
  1127. // be built. This is due to the fact that the start and end of
  1128. // allocation bits for nonpaged pool are maintained in the
  1129. // PFN elements for the corresponding pages.
  1130. //
  1131. if (MxMapLargePages & MI_LARGE_PFN_DATABASE) {
  1132. //
  1133. // The physical pages to be used for the PFN database have already
  1134. // been allocated. Initialize their mappings now.
  1135. //
  1136. //
  1137. // Initialize the page table mappings (the directory mappings are
  1138. // already initialized) for the PFN database until the switch to large
  1139. // pages occurs in Phase 1.
  1140. //
  1141. PointerPte = MiGetPteAddress (MmPfnDatabase);
  1142. BasePte = MiGetVirtualAddressMappedByPte (MiGetPdeAddress (MmPfnDatabase));
  1143. LastPte = MiGetPteAddress ((ULONG_PTR)MmPfnDatabase + (MxPfnAllocation << PAGE_SHIFT));
  1144. if (!MiIsPteOnPdeBoundary(LastPte)) {
  1145. LastPte = MiGetVirtualAddressMappedByPte (MiGetPteAddress (LastPte) + 1);
  1146. }
  1147. PageFrameIndex = FirstPfnDatabasePage - (PointerPte - BasePte);
  1148. PointerPte = BasePte;
  1149. while (PointerPte < LastPte) {
  1150. ASSERT ((PointerPte->u.Hard.Valid == 0) ||
  1151. (PointerPte->u.Hard.PageFrameNumber == PageFrameIndex));
  1152. if (MiIsPteOnPdeBoundary(PointerPte)) {
  1153. ASSERT ((PageFrameIndex & (MM_PFN_MAPPED_BY_PDE - 1)) == 0);
  1154. }
  1155. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1156. *PointerPte = TempPte;
  1157. PointerPte += 1;
  1158. PageFrameIndex += 1;
  1159. }
  1160. RtlZeroMemory (MmPfnDatabase, MxPfnAllocation << PAGE_SHIFT);
  1161. }
  1162. else {
  1163. ULONG FreeNextPhysicalPage;
  1164. ULONG FreeNumberOfPages;
  1165. //
  1166. // Calculate the start of the PFN database (it starts at physical
  1167. // page zero, even if the lowest physical page is not zero).
  1168. //
  1169. ASSERT (MmPfnDatabase != NULL);
  1170. PointerPte = MiGetPteAddress (MmPfnDatabase);
  1171. //
  1172. // Go through the memory descriptors and for each physical page make
  1173. // sure the PFN database has a valid PTE to map it. This allows
  1174. // machines with sparse physical memory to have a minimal PFN database.
  1175. //
  1176. FreeNextPhysicalPage = MxFreeDescriptor->BasePage;
  1177. FreeNumberOfPages = MxFreeDescriptor->PageCount;
  1178. PagesLeft = 0;
  1179. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  1180. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  1181. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  1182. MEMORY_ALLOCATION_DESCRIPTOR,
  1183. ListEntry);
  1184. if ((MemoryDescriptor->MemoryType == LoaderFirmwarePermanent) ||
  1185. (MemoryDescriptor->MemoryType == LoaderBBTMemory) ||
  1186. (MemoryDescriptor->MemoryType == LoaderSpecialMemory)) {
  1187. //
  1188. // If the descriptor lies within the highest PFN database entry
  1189. // then create PFN pages for this range as they are needed
  1190. // to support \Device\PhysicalMemory.
  1191. //
  1192. if (MemoryDescriptor->BasePage > MmHighestPhysicalPage) {
  1193. NextMd = MemoryDescriptor->ListEntry.Flink;
  1194. continue;
  1195. }
  1196. if (MemoryDescriptor->BasePage + MemoryDescriptor->PageCount > MmHighestPhysicalPage + 1) {
  1197. MemoryDescriptor->PageCount = (ULONG)MmHighestPhysicalPage - MemoryDescriptor->BasePage + 1;
  1198. }
  1199. }
  1200. //
  1201. // Temporarily add back in the memory allocated since Phase 0
  1202. // began so PFN entries for it will be created and mapped.
  1203. //
  1204. // Note actual PFN entry allocations must be done carefully as
  1205. // memory from the descriptor itself could get used to map
  1206. // the PFNs for the descriptor !
  1207. //
  1208. if (MemoryDescriptor == MxFreeDescriptor) {
  1209. BasePage = MxOldFreeDescriptor.BasePage;
  1210. PageCount = MxOldFreeDescriptor.PageCount;
  1211. }
  1212. else {
  1213. BasePage = MemoryDescriptor->BasePage;
  1214. PageCount = MemoryDescriptor->PageCount;
  1215. }
  1216. PointerPte = MiGetPteAddress (MI_PFN_ELEMENT(BasePage));
  1217. LastPte = MiGetPteAddress (((PCHAR)(MI_PFN_ELEMENT(
  1218. BasePage + PageCount))) - 1);
  1219. while (PointerPte <= LastPte) {
  1220. if (PointerPte->u.Hard.Valid == 0) {
  1221. TempPte.u.Hard.PageFrameNumber = FreeNextPhysicalPage;
  1222. ASSERT (FreeNumberOfPages != 0);
  1223. FreeNextPhysicalPage += 1;
  1224. FreeNumberOfPages -= 1;
  1225. if (FreeNumberOfPages == 0) {
  1226. KeBugCheckEx (INSTALL_MORE_MEMORY,
  1227. MmNumberOfPhysicalPages,
  1228. FreeNumberOfPages,
  1229. MxOldFreeDescriptor.PageCount,
  1230. 1);
  1231. }
  1232. PagesLeft += 1;
  1233. *PointerPte = TempPte;
  1234. RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte),
  1235. PAGE_SIZE);
  1236. }
  1237. PointerPte += 1;
  1238. }
  1239. NextMd = MemoryDescriptor->ListEntry.Flink;
  1240. }
  1241. //
  1242. // Handle the BIOS range here as some machines have big gaps in
  1243. // their physical memory maps. Big meaning > 3.5mb from page 0x37
  1244. // up to page 0x350.
  1245. //
  1246. PointerPte = MiGetPteAddress (MI_PFN_ELEMENT(MM_BIOS_START));
  1247. LastPte = MiGetPteAddress ((PCHAR)(MI_PFN_ELEMENT(MM_BIOS_END)));
  1248. while (PointerPte <= LastPte) {
  1249. if (PointerPte->u.Hard.Valid == 0) {
  1250. TempPte.u.Hard.PageFrameNumber = FreeNextPhysicalPage;
  1251. ASSERT (FreeNumberOfPages != 0);
  1252. FreeNextPhysicalPage += 1;
  1253. FreeNumberOfPages -= 1;
  1254. if (FreeNumberOfPages == 0) {
  1255. KeBugCheckEx (INSTALL_MORE_MEMORY,
  1256. MmNumberOfPhysicalPages,
  1257. FreeNumberOfPages,
  1258. MxOldFreeDescriptor.PageCount,
  1259. 1);
  1260. }
  1261. PagesLeft += 1;
  1262. *PointerPte = TempPte;
  1263. RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte),
  1264. PAGE_SIZE);
  1265. }
  1266. PointerPte += 1;
  1267. }
  1268. //
  1269. // Update the global counts - this would have been tricky to do while
  1270. // removing pages from them as we looped above.
  1271. //
  1272. // Later we will walk the memory descriptors and add pages to the free
  1273. // list in the PFN database.
  1274. //
  1275. // To do this correctly:
  1276. //
  1277. // The FreeDescriptor fields must be updated so the PFN database
  1278. // consumption isn't added to the freelist.
  1279. //
  1280. MxFreeDescriptor->BasePage = FreeNextPhysicalPage;
  1281. MxFreeDescriptor->PageCount = FreeNumberOfPages;
  1282. }
  1283. //
  1284. // Initialize support for colored pages.
  1285. //
  1286. MmFreePagesByColor[0] = (PMMCOLOR_TABLES)
  1287. &MmPfnDatabase[MmHighestPossiblePhysicalPage + 1];
  1288. MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
  1289. //
  1290. // Make sure the PTEs are mapped.
  1291. //
  1292. PointerPte = MiGetPteAddress (&MmFreePagesByColor[0][0]);
  1293. LastPte = MiGetPteAddress (
  1294. (PVOID)((PCHAR)&MmFreePagesByColor[1][MmSecondaryColors] - 1));
  1295. while (PointerPte <= LastPte) {
  1296. if (PointerPte->u.Hard.Valid == 0) {
  1297. TempPte.u.Hard.PageFrameNumber = MxGetNextPage (1);
  1298. *PointerPte = TempPte;
  1299. RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte),
  1300. PAGE_SIZE);
  1301. }
  1302. PointerPte += 1;
  1303. }
  1304. for (i = 0; i < MmSecondaryColors; i += 1) {
  1305. MmFreePagesByColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST;
  1306. MmFreePagesByColor[ZeroedPageList][i].Count = 0;
  1307. MmFreePagesByColor[FreePageList][i].Flink = MM_EMPTY_LIST;
  1308. MmFreePagesByColor[FreePageList][i].Count = 0;
  1309. }
  1310. //
  1311. // Add nonpaged pool to PFN database if mapped via large pages.
  1312. //
  1313. PointerPde = MiGetPdeAddress (PTE_BASE);
  1314. if (MxMapLargePages & MI_LARGE_NONPAGED_POOL) {
  1315. j = FirstNonPagedPoolPage;
  1316. Pfn1 = MI_PFN_ELEMENT (j);
  1317. i = MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT;
  1318. do {
  1319. PointerPde = MiGetPdeAddress ((ULONG_PTR)MmNonPagedPoolStart + ((j - FirstNonPagedPoolPage) << PAGE_SHIFT));
  1320. Pfn1->u4.PteFrame = MI_GET_PAGE_FRAME_FROM_PTE(PointerPde);
  1321. Pfn1->PteAddress = (PMMPTE)(j << PAGE_SHIFT);
  1322. Pfn1->u2.ShareCount += 1;
  1323. Pfn1->u3.e2.ReferenceCount = 1;
  1324. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1325. Pfn1->u3.e1.CacheAttribute = MiCached;
  1326. MiDetermineNode(j, Pfn1);
  1327. j += 1;
  1328. Pfn1 += 1;
  1329. i -= 1;
  1330. } while (i != 0);
  1331. }
  1332. //
  1333. // Ensure the hyperspace and session spaces are not mapped so they don't
  1334. // get made global by the loops below.
  1335. //
  1336. ASSERT (MiGetPxeAddress (HYPER_SPACE)->u.Hard.Valid == 0);
  1337. ASSERT (MiGetPxeAddress (MM_SESSION_SPACE_DEFAULT)->u.Hard.Valid == 0);
  1338. //
  1339. // Go through the page table entries and for any page which is valid,
  1340. // update the corresponding PFN database element.
  1341. //
  1342. StartPxe = MiGetPxeAddress (NULL);
  1343. EndPxe = StartPxe + PXE_PER_PAGE;
  1344. for ( ; StartPxe < EndPxe; StartPxe += 1) {
  1345. if (StartPxe->u.Hard.Valid == 0) {
  1346. continue;
  1347. }
  1348. va = MiGetVirtualAddressMappedByPxe (StartPxe);
  1349. ASSERT (va >= MM_SYSTEM_RANGE_START);
  1350. if (MI_IS_PAGE_TABLE_ADDRESS (va)) {
  1351. UseGlobal = 0;
  1352. }
  1353. else {
  1354. UseGlobal = 1;
  1355. }
  1356. ASSERT (StartPxe->u.Hard.LargePage == 0);
  1357. ASSERT (StartPxe->u.Hard.Owner == 0);
  1358. ASSERT (StartPxe->u.Hard.Global == 0);
  1359. PxePage = MI_GET_PAGE_FRAME_FROM_PTE(StartPxe);
  1360. Pfn1 = MI_PFN_ELEMENT(PxePage);
  1361. Pfn1->u4.PteFrame = DirBase;
  1362. Pfn1->PteAddress = StartPxe;
  1363. Pfn1->u2.ShareCount += 1;
  1364. Pfn1->u3.e2.ReferenceCount = 1;
  1365. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1366. Pfn1->u3.e1.CacheAttribute = MiCached;
  1367. MiDetermineNode (PxePage, Pfn1);
  1368. StartPpe = MiGetVirtualAddressMappedByPte (StartPxe);
  1369. EndPpe = StartPpe + PPE_PER_PAGE;
  1370. for ( ; StartPpe < EndPpe; StartPpe += 1) {
  1371. if (StartPpe->u.Hard.Valid == 0) {
  1372. continue;
  1373. }
  1374. ASSERT (StartPpe->u.Hard.LargePage == 0);
  1375. ASSERT (StartPpe->u.Hard.Owner == 0);
  1376. ASSERT (StartPpe->u.Hard.Global == 0);
  1377. PpePage = MI_GET_PAGE_FRAME_FROM_PTE(StartPpe);
  1378. Pfn2 = MI_PFN_ELEMENT(PpePage);
  1379. Pfn1->u2.ShareCount += 1;
  1380. Pfn2->u4.PteFrame = PxePage;
  1381. Pfn2->PteAddress = StartPpe;
  1382. Pfn2->u2.ShareCount += 1;
  1383. Pfn2->u3.e2.ReferenceCount = 1;
  1384. Pfn2->u3.e1.PageLocation = ActiveAndValid;
  1385. Pfn2->u3.e1.CacheAttribute = MiCached;
  1386. MiDetermineNode (PpePage, Pfn2);
  1387. StartPde = MiGetVirtualAddressMappedByPte (StartPpe);
  1388. EndPde = StartPde + PDE_PER_PAGE;
  1389. for ( ; StartPde < EndPde; StartPde += 1) {
  1390. if (StartPde->u.Hard.Valid == 0) {
  1391. continue;
  1392. }
  1393. ASSERT (StartPde->u.Hard.LargePage == 0);
  1394. ASSERT (StartPde->u.Hard.Owner == 0);
  1395. StartPde->u.Hard.Global = UseGlobal;
  1396. PdePage = MI_GET_PAGE_FRAME_FROM_PTE(StartPde);
  1397. Pfn3 = MI_PFN_ELEMENT(PdePage);
  1398. Pfn2->u2.ShareCount += 1;
  1399. Pfn3->u4.PteFrame = PpePage;
  1400. Pfn3->PteAddress = StartPde;
  1401. Pfn3->u2.ShareCount += 1;
  1402. Pfn3->u3.e2.ReferenceCount = 1;
  1403. Pfn3->u3.e1.PageLocation = ActiveAndValid;
  1404. Pfn3->u3.e1.CacheAttribute = MiCached;
  1405. MiDetermineNode (PdePage, Pfn3);
  1406. StartPte = MiGetVirtualAddressMappedByPte (StartPde);
  1407. EndPte = StartPte + PDE_PER_PAGE;
  1408. for ( ; StartPte < EndPte; StartPte += 1) {
  1409. if (StartPte->u.Hard.Valid == 0) {
  1410. continue;
  1411. }
  1412. ASSERT (StartPte->u.Hard.LargePage == 0);
  1413. ASSERT (StartPte->u.Hard.Owner == 0);
  1414. StartPte->u.Hard.Global = UseGlobal;
  1415. PtePage = MI_GET_PAGE_FRAME_FROM_PTE(StartPte);
  1416. Pfn3->u2.ShareCount += 1;
  1417. if (PtePage > MmHighestPhysicalPage) {
  1418. continue;
  1419. }
  1420. Pfn4 = MI_PFN_ELEMENT(PtePage);
  1421. if ((MmIsAddressValid(Pfn4)) &&
  1422. MmIsAddressValid((PUCHAR)(Pfn4+1)-1)) {
  1423. Pfn4->u4.PteFrame = PdePage;
  1424. Pfn4->PteAddress = StartPte;
  1425. Pfn4->u2.ShareCount += 1;
  1426. Pfn4->u3.e2.ReferenceCount = 1;
  1427. Pfn4->u3.e1.PageLocation = ActiveAndValid;
  1428. Pfn4->u3.e1.CacheAttribute = MiCached;
  1429. MiDetermineNode (PtePage, Pfn4);
  1430. }
  1431. }
  1432. }
  1433. }
  1434. }
  1435. KeRaiseIrql (DISPATCH_LEVEL, &OldIrql);
  1436. KeFlushCurrentTb ();
  1437. KeLowerIrql (OldIrql);
  1438. //
  1439. // If the lowest physical page is zero and the page is still unused, mark
  1440. // it as in use. This is temporary as we want to find bugs where a physical
  1441. // page is specified as zero.
  1442. //
  1443. Pfn1 = &MmPfnDatabase[MmLowestPhysicalPage];
  1444. if ((MmLowestPhysicalPage == 0) && (Pfn1->u3.e2.ReferenceCount == 0)) {
  1445. ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
  1446. //
  1447. // Make the reference count non-zero and point it into a
  1448. // page directory.
  1449. //
  1450. Pde = MiGetPxeAddress(0xFFFFFFFFB0000000);
  1451. PdePage = MI_GET_PAGE_FRAME_FROM_PTE(Pde);
  1452. Pfn1->u4.PteFrame = PdePage;
  1453. Pfn1->PteAddress = Pde;
  1454. Pfn1->u2.ShareCount += 1;
  1455. Pfn1->u3.e2.ReferenceCount = 0xfff0;
  1456. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1457. Pfn1->u3.e1.CacheAttribute = MiCached;
  1458. MiDetermineNode(0, Pfn1);
  1459. }
  1460. // end of temporary set to physical page zero.
  1461. //
  1462. // Walk through the memory descriptors and add pages to the
  1463. // free list in the PFN database. Before doing this, adjust the
  1464. // two descriptors we used so they only contain memory that can be
  1465. // freed now (ie: any memory we removed from them earlier in this routine
  1466. // without updating the descriptor for must be updated now).
  1467. //
  1468. //
  1469. // We may have taken memory out of the MxFreeDescriptor - but
  1470. // that's ok because we wouldn't want to free that memory right now
  1471. // (or ever) anyway.
  1472. //
  1473. //
  1474. // Since the LoaderBlock memory descriptors are ordered
  1475. // from low physical memory address to high, walk it backwards so the
  1476. // high physical pages go to the front of the freelists. The thinking
  1477. // is that pages initially allocated by the system are less likely to be
  1478. // freed so don't waste memory below 16mb (or 4gb) that may be needed
  1479. // by ISA drivers later.
  1480. //
  1481. NextMd = LoaderBlock->MemoryDescriptorListHead.Blink;
  1482. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  1483. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  1484. MEMORY_ALLOCATION_DESCRIPTOR,
  1485. ListEntry);
  1486. i = MemoryDescriptor->PageCount;
  1487. PageFrameIndex = MemoryDescriptor->BasePage;
  1488. switch (MemoryDescriptor->MemoryType) {
  1489. case LoaderBad:
  1490. while (i != 0) {
  1491. MiInsertPageInList (&MmBadPageListHead, PageFrameIndex);
  1492. i -= 1;
  1493. PageFrameIndex += 1;
  1494. }
  1495. break;
  1496. case LoaderFree:
  1497. case LoaderLoadedProgram:
  1498. case LoaderFirmwareTemporary:
  1499. case LoaderOsloaderStack:
  1500. FreePfnCount = 0;
  1501. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1502. while (i != 0) {
  1503. if (Pfn1->u3.e2.ReferenceCount == 0) {
  1504. //
  1505. // Set the PTE address to the physical page for
  1506. // virtual address alignment checking.
  1507. //
  1508. Pfn1->PteAddress =
  1509. (PMMPTE)(PageFrameIndex << PTE_SHIFT);
  1510. MiDetermineNode(PageFrameIndex, Pfn1);
  1511. MiInsertPageInFreeList (PageFrameIndex);
  1512. FreePfnCount += 1;
  1513. }
  1514. else {
  1515. if (FreePfnCount > LargestFreePfnCount) {
  1516. LargestFreePfnCount = FreePfnCount;
  1517. LargestFreePfnStart = PageFrameIndex - FreePfnCount;
  1518. FreePfnCount = 0;
  1519. }
  1520. }
  1521. Pfn1 += 1;
  1522. i -= 1;
  1523. PageFrameIndex += 1;
  1524. }
  1525. if (FreePfnCount > LargestFreePfnCount) {
  1526. LargestFreePfnCount = FreePfnCount;
  1527. LargestFreePfnStart = PageFrameIndex - FreePfnCount;
  1528. }
  1529. break;
  1530. case LoaderFirmwarePermanent:
  1531. case LoaderSpecialMemory:
  1532. case LoaderBBTMemory:
  1533. //
  1534. // If the descriptor lies within the highest PFN database entry
  1535. // then create PFN pages for this range. Note the PFN entries
  1536. // must be created to support \Device\PhysicalMemory.
  1537. //
  1538. if (MemoryDescriptor->BasePage <= MmHighestPhysicalPage) {
  1539. if (MemoryDescriptor->BasePage + MemoryDescriptor->PageCount > MmHighestPhysicalPage + 1) {
  1540. MemoryDescriptor->PageCount = (ULONG)MmHighestPhysicalPage - MemoryDescriptor->BasePage + 1;
  1541. i = MemoryDescriptor->PageCount;
  1542. }
  1543. }
  1544. else {
  1545. break;
  1546. }
  1547. //
  1548. // Fall through as these pages must be marked in use as they
  1549. // lie within the PFN limits and may be accessed through
  1550. // \Device\PhysicalMemory.
  1551. //
  1552. default:
  1553. PointerPte = MiGetPteAddress (KSEG0_BASE +
  1554. (PageFrameIndex << PAGE_SHIFT));
  1555. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1556. while (i != 0) {
  1557. //
  1558. // Set page as in use.
  1559. //
  1560. PointerPde = MiGetPdeAddress (KSEG0_BASE +
  1561. (PageFrameIndex << PAGE_SHIFT));
  1562. if (Pfn1->u3.e2.ReferenceCount == 0) {
  1563. Pfn1->u4.PteFrame = MI_GET_PAGE_FRAME_FROM_PTE(PointerPde);
  1564. Pfn1->PteAddress = PointerPte;
  1565. Pfn1->u2.ShareCount += 1;
  1566. Pfn1->u3.e2.ReferenceCount = 1;
  1567. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1568. Pfn1->u3.e1.CacheAttribute = MiCached;
  1569. MiDetermineNode(PageFrameIndex, Pfn1);
  1570. if (MemoryDescriptor->MemoryType == LoaderXIPRom) {
  1571. Pfn1->u1.Flink = 0;
  1572. Pfn1->u2.ShareCount = 0;
  1573. Pfn1->u3.e2.ReferenceCount = 0;
  1574. Pfn1->u3.e1.PageLocation = 0;
  1575. Pfn1->u3.e1.CacheAttribute = MiCached;
  1576. Pfn1->u3.e1.Rom = 1;
  1577. Pfn1->u4.InPageError = 0;
  1578. Pfn1->u3.e1.PrototypePte = 1;
  1579. }
  1580. }
  1581. Pfn1 += 1;
  1582. i -= 1;
  1583. PageFrameIndex += 1;
  1584. PointerPte += 1;
  1585. }
  1586. break;
  1587. }
  1588. NextMd = MemoryDescriptor->ListEntry.Blink;
  1589. }
  1590. if (PfnInLargePages == FALSE) {
  1591. //
  1592. // Indicate that the PFN database is allocated in nonpaged pool.
  1593. //
  1594. PointerPte = MiGetPteAddress (&MmPfnDatabase[MmLowestPhysicalPage]);
  1595. Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
  1596. Pfn1->u3.e1.StartOfAllocation = 1;
  1597. LastPte = MiGetPteAddress (&MmPfnDatabase[MmHighestPossiblePhysicalPage]);
  1598. while (PointerPte <= LastPte) {
  1599. Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
  1600. Pfn1->u2.ShareCount = 1;
  1601. Pfn1->u3.e2.ReferenceCount = 1;
  1602. PointerPte += 1;
  1603. }
  1604. //
  1605. // Set the end of the allocation.
  1606. //
  1607. PointerPte = MiGetPteAddress (&MmPfnDatabase[MmHighestPossiblePhysicalPage]);
  1608. Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
  1609. Pfn1->u3.e1.EndOfAllocation = 1;
  1610. }
  1611. else {
  1612. //
  1613. // The PFN database is allocated using large pages.
  1614. //
  1615. // Mark all PFN entries for the PFN pages in use.
  1616. //
  1617. PointerPte = MiGetPteAddress (MmPfnDatabase);
  1618. PageFrameIndex = (PFN_NUMBER)PointerPte->u.Hard.PageFrameNumber;
  1619. Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
  1620. i = MxPfnAllocation;
  1621. do {
  1622. Pfn1->PteAddress = (PMMPTE)(PageFrameIndex << PTE_SHIFT);
  1623. MiDetermineNode(PageFrameIndex, Pfn1);
  1624. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1625. Pfn1->u3.e1.CacheAttribute = MiCached;
  1626. Pfn1->u3.e2.ReferenceCount += 1;
  1627. PageFrameIndex += 1;
  1628. Pfn1 += 1;
  1629. i -= 1;
  1630. } while (i != 0);
  1631. //
  1632. // Scan the PFN database backward for pages that are completely zero.
  1633. // These pages are unused and can be added to the free list.
  1634. //
  1635. BottomPfn = MI_PFN_ELEMENT(MmHighestPhysicalPage);
  1636. do {
  1637. //
  1638. // Compute the address of the start of the page that is next
  1639. // lower in memory and scan backwards until that page address
  1640. // is reached or just crossed.
  1641. //
  1642. if (((ULONG_PTR)BottomPfn & (PAGE_SIZE - 1)) != 0) {
  1643. BasePfn = (PMMPFN)((ULONG_PTR)BottomPfn & ~(PAGE_SIZE - 1));
  1644. TopPfn = BottomPfn + 1;
  1645. }
  1646. else {
  1647. BasePfn = (PMMPFN)((ULONG_PTR)BottomPfn - PAGE_SIZE);
  1648. TopPfn = BottomPfn;
  1649. }
  1650. while (BottomPfn > BasePfn) {
  1651. BottomPfn -= 1;
  1652. }
  1653. //
  1654. // If the entire range over which the PFN entries span is
  1655. // completely zero and the PFN entry that maps the page is
  1656. // not in the range, then add the page to the appropriate
  1657. // free list.
  1658. //
  1659. Range = (ULONG_PTR)TopPfn - (ULONG_PTR)BottomPfn;
  1660. if (RtlCompareMemoryUlong((PVOID)BottomPfn, Range, 0) == Range) {
  1661. //
  1662. // Set the PTE address to the physical page for virtual
  1663. // address alignment checking.
  1664. //
  1665. PointerPte = MiGetPteAddress (BasePfn);
  1666. PageFrameIndex = (PFN_NUMBER)PointerPte->u.Hard.PageFrameNumber;
  1667. Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
  1668. ASSERT (Pfn1->u3.e2.ReferenceCount == 1);
  1669. ASSERT (Pfn1->PteAddress == (PMMPTE)(PageFrameIndex << PTE_SHIFT));
  1670. Pfn1->u3.e2.ReferenceCount = 0;
  1671. Pfn1->PteAddress = (PMMPTE)(PageFrameIndex << PTE_SHIFT);
  1672. MiDetermineNode(PageFrameIndex, Pfn1);
  1673. MiInsertPageInFreeList (PageFrameIndex);
  1674. }
  1675. } while (BottomPfn > MmPfnDatabase);
  1676. }
  1677. //
  1678. // Adjust the memory descriptor to indicate that free pool has
  1679. // been used for nonpaged pool creation.
  1680. //
  1681. // N.B. This is required because the descriptors are walked upon
  1682. // return from this routine to create the MmPhysicalMemoryBlock.
  1683. //
  1684. *MxFreeDescriptor = *(PMEMORY_ALLOCATION_DESCRIPTOR)&MxOldFreeDescriptor;
  1685. //
  1686. // Initialize the nonpaged available PTEs for mapping I/O space
  1687. // and kernel stacks.
  1688. //
  1689. PointerPte = MiGetPteAddress (SystemPteStart);
  1690. ASSERT (((ULONG_PTR)PointerPte & (PAGE_SIZE - 1)) == 0);
  1691. MmNumberOfSystemPtes = (ULONG)(MiGetPteAddress(MmNonPagedPoolExpansionStart) - PointerPte - 1);
  1692. //
  1693. // Initialize the nonpaged pool.
  1694. //
  1695. InitializePool (NonPagedPool, 0);
  1696. //
  1697. // Initialize the system PTE pool now that nonpaged pool exists.
  1698. //
  1699. MiInitializeSystemPtes (PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
  1700. MmDebugPte = MiReserveSystemPtes (1, SystemPteSpace);
  1701. MmDebugPte->u.Long = 0;
  1702. MmDebugVa = MiGetVirtualAddressMappedByPte (MmDebugPte);
  1703. MmCrashDumpPte = MiReserveSystemPtes (16, SystemPteSpace);
  1704. MmCrashDumpVa = MiGetVirtualAddressMappedByPte (MmCrashDumpPte);
  1705. //
  1706. // Allocate a page directory and a pair of page table pages.
  1707. // Map the hyper space page directory page into the top level parent
  1708. // directory & the hyper space page table page into the page directory
  1709. // and map an additional page that will eventually be used for the
  1710. // working set list. Page tables after the first two are set up later
  1711. // on during individual process working set initialization.
  1712. //
  1713. // The working set list page will eventually be a part of hyper space.
  1714. // It is mapped into the second level page directory page so it can be
  1715. // zeroed and so it will be accounted for in the PFN database. Later
  1716. // the page will be unmapped, and its page frame number captured in the
  1717. // system process object.
  1718. //
  1719. TempPte = ValidKernelPte;
  1720. TempPte.u.Hard.Global = 0;
  1721. StartPxe = MiGetPxeAddress(HYPER_SPACE);
  1722. StartPpe = MiGetPpeAddress(HYPER_SPACE);
  1723. StartPde = MiGetPdeAddress(HYPER_SPACE);
  1724. LOCK_PFN (OldIrql);
  1725. if (StartPxe->u.Hard.Valid == 0) {
  1726. ASSERT (StartPxe->u.Long == 0);
  1727. TempPte.u.Hard.PageFrameNumber = MiRemoveAnyPage (0);
  1728. *StartPxe = TempPte;
  1729. RtlZeroMemory (MiGetVirtualAddressMappedByPte (StartPxe), PAGE_SIZE);
  1730. }
  1731. else {
  1732. ASSERT (StartPxe->u.Hard.Global == 0);
  1733. }
  1734. if (StartPpe->u.Hard.Valid == 0) {
  1735. ASSERT (StartPpe->u.Long == 0);
  1736. TempPte.u.Hard.PageFrameNumber = MiRemoveAnyPage (0);
  1737. *StartPpe = TempPte;
  1738. RtlZeroMemory (MiGetVirtualAddressMappedByPte (StartPpe), PAGE_SIZE);
  1739. }
  1740. else {
  1741. ASSERT (StartPpe->u.Hard.Global == 0);
  1742. }
  1743. TempPte.u.Hard.PageFrameNumber = MiRemoveAnyPage (0);
  1744. *StartPde = TempPte;
  1745. //
  1746. // Zero the hyper space page table page.
  1747. //
  1748. StartPte = MiGetPteAddress(HYPER_SPACE);
  1749. RtlZeroMemory(StartPte, PAGE_SIZE);
  1750. KeFlushCurrentTb();
  1751. PageFrameIndex = MiRemoveAnyPage (0);
  1752. UNLOCK_PFN (OldIrql);
  1753. //
  1754. // Hyper space now exists, set the necessary variables.
  1755. //
  1756. MmFirstReservedMappingPte = MiGetPteAddress (FIRST_MAPPING_PTE);
  1757. MmLastReservedMappingPte = MiGetPteAddress (LAST_MAPPING_PTE);
  1758. //
  1759. // Create zeroing PTEs for the zero page thread.
  1760. //
  1761. MiFirstReservedZeroingPte = MiReserveSystemPtes (NUMBER_OF_ZEROING_PTES + 1,
  1762. SystemPteSpace);
  1763. RtlZeroMemory (MiFirstReservedZeroingPte,
  1764. (NUMBER_OF_ZEROING_PTES + 1) * sizeof(MMPTE));
  1765. //
  1766. // Use the page frame number field of the first PTE as an
  1767. // offset into the available zeroing PTEs.
  1768. //
  1769. MiFirstReservedZeroingPte->u.Hard.PageFrameNumber = NUMBER_OF_ZEROING_PTES;
  1770. //
  1771. // Create the VAD bitmap for this process.
  1772. //
  1773. PointerPte = MiGetPteAddress (VAD_BITMAP_SPACE);
  1774. //
  1775. // Note the global bit must be off for the bitmap data.
  1776. //
  1777. TempPte = ValidKernelPteLocal;
  1778. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1779. *PointerPte = TempPte;
  1780. //
  1781. // Point to the page we just created and zero it.
  1782. //
  1783. RtlZeroMemory (VAD_BITMAP_SPACE, PAGE_SIZE);
  1784. MiLastVadBit = (ULONG)((((ULONG_PTR) MI_64K_ALIGN (MM_HIGHEST_VAD_ADDRESS))) / X64K);
  1785. if (MiLastVadBit > PAGE_SIZE * 8 - 1) {
  1786. MiLastVadBit = PAGE_SIZE * 8 - 1;
  1787. }
  1788. KeInitializeEvent (&MiImageMappingPteEvent,
  1789. NotificationEvent,
  1790. FALSE);
  1791. //
  1792. // Initialize this process's memory management structures including
  1793. // the working set list.
  1794. //
  1795. CurrentProcess = PsGetCurrentProcess ();
  1796. //
  1797. // The PFN element for the page directory has already been initialized,
  1798. // zero the reference count and the share count so they won't be
  1799. // wrong.
  1800. //
  1801. Pfn1 = MI_PFN_ELEMENT (PdePageNumber);
  1802. LOCK_PFN (OldIrql);
  1803. Pfn1->u2.ShareCount = 0;
  1804. Pfn1->u3.e2.ReferenceCount = 0;
  1805. //
  1806. // Get a page for the working set list and zero it.
  1807. //
  1808. PageFrameIndex = MiRemoveAnyPage (0);
  1809. UNLOCK_PFN (OldIrql);
  1810. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1811. PointerPte = MiGetPteAddress (MmWorkingSetList);
  1812. *PointerPte = TempPte;
  1813. CurrentProcess->WorkingSetPage = PageFrameIndex;
  1814. KeFlushCurrentTb();
  1815. CurrentProcess->Vm.MaximumWorkingSetSize = (ULONG)MmSystemProcessWorkingSetMax;
  1816. CurrentProcess->Vm.MinimumWorkingSetSize = (ULONG)MmSystemProcessWorkingSetMin;
  1817. MmInitializeProcessAddressSpace (CurrentProcess, NULL, NULL, NULL);
  1818. //
  1819. // Ensure the secondary page structures are marked as in use.
  1820. //
  1821. ASSERT (MmFreePagesByColor[0] < (PMMCOLOR_TABLES)MM_KSEG2_BASE);
  1822. PointerPde = MiGetPdeAddress(MmFreePagesByColor[0]);
  1823. ASSERT (PointerPde->u.Hard.Valid == 1);
  1824. PointerPte = MiGetPteAddress(MmFreePagesByColor[0]);
  1825. ASSERT (PointerPte->u.Hard.Valid == 1);
  1826. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte);
  1827. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1828. LOCK_PFN (OldIrql);
  1829. if (Pfn1->u3.e2.ReferenceCount == 0) {
  1830. Pfn1->u4.PteFrame = MI_GET_PAGE_FRAME_FROM_PTE(PointerPde);
  1831. Pfn1->PteAddress = PointerPte;
  1832. Pfn1->u2.ShareCount += 1;
  1833. Pfn1->u3.e2.ReferenceCount = 1;
  1834. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1835. Pfn1->u3.e1.CacheAttribute = MiCached;
  1836. MiDetermineNode(PageFrameIndex, Pfn1);
  1837. }
  1838. UNLOCK_PFN (OldIrql);
  1839. //
  1840. // Handle physical pages in BIOS memory range (640k to 1mb) by
  1841. // explicitly initializing them in the PFN database so that they
  1842. // can be handled properly when I/O is done to these pages (or virtual
  1843. // reads across processes).
  1844. //
  1845. Pfn1 = MI_PFN_ELEMENT (MM_BIOS_START);
  1846. Pfn2 = MI_PFN_ELEMENT (MM_BIOS_END);
  1847. PointerPte = MiGetPteAddress (MM_BIOS_START);
  1848. LOCK_PFN (OldIrql);
  1849. do {
  1850. if ((Pfn1->u2.ShareCount == 0) &&
  1851. (Pfn1->u3.e2.ReferenceCount == 0) &&
  1852. (Pfn1->PteAddress == 0)) {
  1853. //
  1854. // Set this as in use.
  1855. //
  1856. Pfn1->u3.e2.ReferenceCount = 1;
  1857. Pfn1->PteAddress = PointerPte;
  1858. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1859. Pfn1->u3.e1.CacheAttribute = MiNotMapped;
  1860. Pfn1->u3.e1.PageColor = 0;
  1861. }
  1862. Pfn1 += 1;
  1863. PointerPte += 1;
  1864. } while (Pfn1 <= Pfn2);
  1865. UNLOCK_PFN (OldIrql);
  1866. return;
  1867. }