Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3349 lines
108 KiB

  1. /*++
  2. Copyright (c) 1990 Microsoft Corporation
  3. Module Name:
  4. init386.c
  5. Abstract:
  6. This module contains the machine dependent initialization for the
  7. memory management component. It is specifically tailored to the
  8. INTEL x86 and PAE machines.
  9. Author:
  10. Lou Perazzoli (loup) 6-Jan-1990
  11. Landy Wang (landyw) 2-Jun-1997
  12. Revision History:
  13. --*/
  14. #include "mi.h"
  15. PFN_NUMBER
  16. MxGetNextPage (
  17. IN PFN_NUMBER PagesNeeded
  18. );
  19. PFN_NUMBER
  20. MxPagesAvailable (
  21. VOID
  22. );
  23. VOID
  24. MxConvertToLargePage (
  25. IN PVOID VirtualAddress,
  26. IN PVOID EndVirtualAddress
  27. );
  28. LOGICAL
  29. MiIsRegularMemory (
  30. IN PLOADER_PARAMETER_BLOCK LoaderBlock,
  31. IN PFN_NUMBER PageFrameIndex
  32. );
  33. #ifdef ALLOC_PRAGMA
  34. #pragma alloc_text(INIT,MiInitMachineDependent)
  35. #pragma alloc_text(INIT,MxGetNextPage)
  36. #pragma alloc_text(INIT,MxPagesAvailable)
  37. #pragma alloc_text(INIT,MxConvertToLargePage)
  38. #pragma alloc_text(INIT,MiReportPhysicalMemory)
  39. #pragma alloc_text(INIT,MiIsRegularMemory)
  40. #endif
  41. #define MM_LARGE_PAGE_MINIMUM ((255*1024*1024) >> PAGE_SHIFT)
  42. extern ULONG MmLargeSystemCache;
  43. extern ULONG MmLargeStackSize;
  44. extern LOGICAL MmMakeLowMemory;
  45. extern LOGICAL MmPagedPoolMaximumDesired;
  46. #if defined(_X86PAE_)
  47. LOGICAL MiUseGlobalBitInLargePdes;
  48. PVOID MmHyperSpaceEnd;
  49. #endif
  50. extern KEVENT MiImageMappingPteEvent;
  51. //
  52. // Local data.
  53. //
  54. #ifdef ALLOC_DATA_PRAGMA
  55. #pragma data_seg("INITDATA")
  56. #endif
  57. ULONG MxPfnAllocation;
  58. PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
  59. #ifdef ALLOC_DATA_PRAGMA
  60. #pragma data_seg()
  61. #endif
  62. MEMORY_ALLOCATION_DESCRIPTOR MxOldFreeDescriptor;
  63. typedef struct _MI_LARGE_VA_RANGES {
  64. PVOID VirtualAddress;
  65. PVOID EndVirtualAddress;
  66. } MI_LARGE_VA_RANGES, *PMI_LARGE_VA_RANGES;
  67. //
  68. // There are potentially 4 large page ranges:
  69. //
  70. // 1. PFN database
  71. // 2. Initial nonpaged pool
  72. // 3. Kernel code/data
  73. // 4. HAL code/data
  74. //
  75. #define MI_LARGE_PFN_DATABASE 0x1
  76. #define MI_LARGE_NONPAGED_POOL 0x2
  77. #define MI_LARGE_KERNEL_HAL 0x4
  78. #define MI_LARGE_ALL 0x7
  79. ULONG MxMapLargePages = MI_LARGE_ALL;
  80. #define MI_MAX_LARGE_VA_RANGES 4
  81. ULONG MiLargeVaRangeIndex;
  82. MI_LARGE_VA_RANGES MiLargeVaRanges[MI_MAX_LARGE_VA_RANGES];
  83. #define MM_PFN_MAPPED_BY_PDE (MM_VA_MAPPED_BY_PDE >> PAGE_SHIFT)
  84. PFN_NUMBER
  85. MxGetNextPage (
  86. IN PFN_NUMBER PagesNeeded
  87. )
  88. /*++
  89. Routine Description:
  90. This function returns the next physical page number from the largest
  91. largest free descriptor. If there are not enough physical pages left
  92. to satisfy the request then a bugcheck is executed since the system
  93. cannot be initialized.
  94. Arguments:
  95. PagesNeeded - Supplies the number of pages needed.
  96. Return Value:
  97. The base of the range of physically contiguous pages.
  98. Environment:
  99. Kernel mode, Phase 0 only.
  100. --*/
  101. {
  102. PFN_NUMBER PageFrameIndex;
  103. //
  104. // Examine the free descriptor to see if enough usable memory is available.
  105. //
  106. if (PagesNeeded > MxFreeDescriptor->PageCount) {
  107. KeBugCheckEx (INSTALL_MORE_MEMORY,
  108. MmNumberOfPhysicalPages,
  109. MxFreeDescriptor->PageCount,
  110. MxOldFreeDescriptor.PageCount,
  111. PagesNeeded);
  112. }
  113. PageFrameIndex = MxFreeDescriptor->BasePage;
  114. MxFreeDescriptor->BasePage += PagesNeeded;
  115. MxFreeDescriptor->PageCount -= PagesNeeded;
  116. return PageFrameIndex;
  117. }
  118. PFN_NUMBER
  119. MxPagesAvailable (
  120. VOID
  121. )
  122. /*++
  123. Routine Description:
  124. This function returns the number of pages available.
  125. Arguments:
  126. None.
  127. Return Value:
  128. The number of physically contiguous pages currently available.
  129. Environment:
  130. Kernel mode, Phase 0 only.
  131. --*/
  132. {
  133. return MxFreeDescriptor->PageCount;
  134. }
  135. VOID
  136. MxConvertToLargePage (
  137. IN PVOID VirtualAddress,
  138. IN PVOID EndVirtualAddress
  139. )
  140. /*++
  141. Routine Description:
  142. This function converts the backing for the supplied virtual address range
  143. to a large page mapping.
  144. Arguments:
  145. VirtualAddress - Supplies the virtual address to convert to a large page.
  146. EndVirtualAddress - Supplies the end virtual address to convert to a
  147. large page.
  148. Return Value:
  149. None.
  150. Environment:
  151. Kernel mode, Phase 1 only.
  152. --*/
  153. {
  154. ULONG i;
  155. MMPTE TempPde;
  156. PMMPTE PointerPde;
  157. PMMPTE LastPde;
  158. PMMPTE PointerPte;
  159. KIRQL OldIrql;
  160. PMMPFN Pfn1;
  161. PFN_NUMBER PageFrameIndex;
  162. LOGICAL ValidPteFound;
  163. PFN_NUMBER LargePageBaseFrame;
  164. ASSERT (MxMapLargePages != 0);
  165. PointerPde = MiGetPdeAddress (VirtualAddress);
  166. LastPde = MiGetPdeAddress (EndVirtualAddress);
  167. TempPde = ValidKernelPde;
  168. TempPde.u.Hard.LargePage = 1;
  169. #if defined(_X86PAE_)
  170. if (MiUseGlobalBitInLargePdes == TRUE) {
  171. TempPde.u.Hard.Global = 1;
  172. }
  173. #endif
  174. LOCK_PFN (OldIrql);
  175. do {
  176. ASSERT (PointerPde->u.Hard.Valid == 1);
  177. if (PointerPde->u.Hard.LargePage == 1) {
  178. goto skip;
  179. }
  180. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  181. //
  182. // Here's a nasty little hack - the page table page mapping the kernel
  183. // and HAL (built by the loader) does not necessarily fill all the
  184. // page table entries (ie: any number of leading entries may be zero).
  185. //
  186. // To deal with this, walk forward until a nonzero entry is found
  187. // and re-index the large page based on this.
  188. //
  189. ValidPteFound = FALSE;
  190. LargePageBaseFrame = (ULONG)-1;
  191. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  192. ASSERT ((PageFrameIndex & (MM_PFN_MAPPED_BY_PDE - 1)) == 0);
  193. for (i = 0; i < PTE_PER_PAGE; i += 1) {
  194. ASSERT ((PointerPte->u.Long == ZeroKernelPte.u.Long) ||
  195. (ValidPteFound == FALSE) ||
  196. (PageFrameIndex == MI_GET_PAGE_FRAME_FROM_PTE (PointerPte)));
  197. if (PointerPte->u.Hard.Valid == 1) {
  198. if (ValidPteFound == FALSE) {
  199. ValidPteFound = TRUE;
  200. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  201. LargePageBaseFrame = PageFrameIndex - i;
  202. }
  203. }
  204. PointerPte += 1;
  205. PageFrameIndex += 1;
  206. }
  207. if (ValidPteFound == FALSE) {
  208. goto skip;
  209. }
  210. TempPde.u.Hard.PageFrameNumber = LargePageBaseFrame;
  211. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPde);
  212. MI_WRITE_VALID_PTE_NEW_PAGE (PointerPde, TempPde);
  213. //
  214. // Update the idle process to use the large page mapping also as
  215. // the page table page is going to be freed.
  216. //
  217. MmSystemPagePtes [((ULONG_PTR)PointerPde &
  218. (PD_PER_SYSTEM * (sizeof(MMPTE) * PDE_PER_PAGE) - 1)) / sizeof(MMPTE)] = TempPde;
  219. KeFlushEntireTb (TRUE, TRUE);
  220. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  221. Pfn1->u2.ShareCount = 0;
  222. Pfn1->u3.e2.ReferenceCount = 1;
  223. Pfn1->u3.e1.PageLocation = StandbyPageList;
  224. MI_SET_PFN_DELETED (Pfn1);
  225. MiDecrementReferenceCount (Pfn1, PageFrameIndex);
  226. skip:
  227. PointerPde += 1;
  228. } while (PointerPde <= LastPde);
  229. UNLOCK_PFN (OldIrql);
  230. }
  231. VOID
  232. MiReportPhysicalMemory (
  233. VOID
  234. )
  235. /*++
  236. Routine Description:
  237. This routine is called during Phase 0 initialization once the
  238. MmPhysicalMemoryBlock has been constructed. It's job is to decide
  239. which large page ranges to enable later and also to construct a
  240. large page comparison list so any requests which are not fully cached
  241. can check this list in order to refuse conflicting requests.
  242. Arguments:
  243. None.
  244. Return Value:
  245. None.
  246. Environment:
  247. Kernel mode. Phase 0 only.
  248. This is called before any non-MmCached allocations are made.
  249. --*/
  250. {
  251. ULONG i, j;
  252. PMMPTE PointerPte;
  253. LOGICAL EntryFound;
  254. PFN_NUMBER count;
  255. PFN_NUMBER Page;
  256. PFN_NUMBER LastPage;
  257. PFN_NUMBER PageFrameIndex;
  258. PFN_NUMBER LastPageFrameIndex;
  259. PFN_NUMBER PageFrameIndex2;
  260. //
  261. // Examine the physical memory block to see whether large pages should
  262. // be enabled. The key point is that all the physical pages within a
  263. // given large page range must have the same cache attributes (MmCached)
  264. // in order to maintain TB coherency. This can be done provided all
  265. // the pages within the large page range represent real RAM (as described
  266. // by the loader) so that memory management can control it. If any
  267. // portion of the large page range is not RAM, it is possible that it
  268. // may get used as noncached or writecombined device memory and
  269. // therefore large pages cannot be used.
  270. //
  271. if (MxMapLargePages == 0) {
  272. return;
  273. }
  274. for (i = 0; i < MiLargeVaRangeIndex; i += 1) {
  275. PointerPte = MiGetPteAddress (MiLargeVaRanges[i].VirtualAddress);
  276. ASSERT (PointerPte->u.Hard.Valid == 1);
  277. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  278. PointerPte = MiGetPteAddress (MiLargeVaRanges[i].EndVirtualAddress);
  279. ASSERT (PointerPte->u.Hard.Valid == 1);
  280. LastPageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  281. //
  282. // Round the start down to a page directory boundary and the end to
  283. // the last page directory entry before the next boundary.
  284. //
  285. PageFrameIndex &= ~(MM_PFN_MAPPED_BY_PDE - 1);
  286. LastPageFrameIndex |= (MM_PFN_MAPPED_BY_PDE - 1);
  287. EntryFound = FALSE;
  288. j = 0;
  289. do {
  290. count = MmPhysicalMemoryBlock->Run[j].PageCount;
  291. Page = MmPhysicalMemoryBlock->Run[j].BasePage;
  292. LastPage = Page + count;
  293. if ((PageFrameIndex >= Page) && (LastPageFrameIndex < LastPage)) {
  294. EntryFound = TRUE;
  295. break;
  296. }
  297. j += 1;
  298. } while (j != MmPhysicalMemoryBlock->NumberOfRuns);
  299. if (EntryFound == FALSE) {
  300. //
  301. // No entry was found that completely spans this large page range.
  302. // Zero it so this range will not be converted into large pages
  303. // later.
  304. //
  305. DbgPrint ("MM: Loader/HAL memory block indicates large pages cannot be used for %p->%p\n",
  306. MiLargeVaRanges[i].VirtualAddress,
  307. MiLargeVaRanges[i].EndVirtualAddress);
  308. MiLargeVaRanges[i].VirtualAddress = NULL;
  309. //
  310. // Don't use large pages for anything if this chunk overlaps any
  311. // others in the request list. This is because 2 separate ranges
  312. // may share a straddling large page. If the first range was unable
  313. // to use large pages, but the second one does ... then only part
  314. // of the first range will get large pages if we enable large
  315. // pages for the second range. This would be vey bad as we use
  316. // the MI_IS_PHYSICAL macro everywhere and assume the entire
  317. // range is in or out, so disable all large pages here instead.
  318. //
  319. for (j = 0; j < MiLargeVaRangeIndex; j += 1) {
  320. //
  321. // Skip the range that is already being rejected.
  322. //
  323. if (i == j) {
  324. continue;
  325. }
  326. //
  327. // Skip any range which has already been removed.
  328. //
  329. if (MiLargeVaRanges[j].VirtualAddress == NULL) {
  330. continue;
  331. }
  332. PointerPte = MiGetPteAddress (MiLargeVaRanges[j].VirtualAddress);
  333. ASSERT (PointerPte->u.Hard.Valid == 1);
  334. PageFrameIndex2 = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  335. if ((PageFrameIndex2 >= PageFrameIndex) &&
  336. (PageFrameIndex2 <= LastPageFrameIndex)) {
  337. DbgPrint ("MM: Disabling large pages for all ranges due to overlap\n");
  338. goto RemoveAllRanges;
  339. }
  340. //
  341. // Since it is not possible for any request chunk to completely
  342. // encompass another one, checking only the start and end
  343. // addresses is sufficient.
  344. //
  345. PointerPte = MiGetPteAddress (MiLargeVaRanges[j].EndVirtualAddress);
  346. ASSERT (PointerPte->u.Hard.Valid == 1);
  347. PageFrameIndex2 = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  348. if ((PageFrameIndex2 >= PageFrameIndex) &&
  349. (PageFrameIndex2 <= LastPageFrameIndex)) {
  350. DbgPrint ("MM: Disabling large pages for all ranges due to overlap\n");
  351. goto RemoveAllRanges;
  352. }
  353. }
  354. //
  355. // No other ranges overlapped with this one, it is sufficient to
  356. // just disable this range and continue to attempt to use large
  357. // pages for any others.
  358. //
  359. continue;
  360. }
  361. MiAddCachedRange (PageFrameIndex, LastPageFrameIndex);
  362. }
  363. return;
  364. RemoveAllRanges:
  365. while (i != 0) {
  366. i -= 1;
  367. if (MiLargeVaRanges[i].VirtualAddress != NULL) {
  368. PointerPte = MiGetPteAddress (MiLargeVaRanges[i].VirtualAddress);
  369. ASSERT (PointerPte->u.Hard.Valid == 1);
  370. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  371. PointerPte = MiGetPteAddress (MiLargeVaRanges[i].EndVirtualAddress);
  372. ASSERT (PointerPte->u.Hard.Valid == 1);
  373. LastPageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  374. //
  375. // Round the start down to a page directory boundary and the end to
  376. // the last page directory entry before the next boundary.
  377. //
  378. PageFrameIndex &= ~(MM_PFN_MAPPED_BY_PDE - 1);
  379. LastPageFrameIndex |= (MM_PFN_MAPPED_BY_PDE - 1);
  380. MiRemoveCachedRange (PageFrameIndex, LastPageFrameIndex);
  381. }
  382. }
  383. MiLargeVaRangeIndex = 0;
  384. return;
  385. }
  386. LONG MiAddPtesCount;
  387. PMMPTE MiExtraPtes1Pointer;
  388. ULONG MiExtraPtes1;
  389. ULONG MiExtraPtes2;
  390. LOGICAL
  391. MiRecoverExtraPtes (
  392. VOID
  393. )
  394. /*++
  395. Routine Description:
  396. This routine is called to recover extra PTEs for the system PTE pool.
  397. These are not just added in earlier in Phase 0 because the system PTE
  398. allocator uses the low addresses first which would fragment these
  399. bigger ranges.
  400. Arguments:
  401. None.
  402. Return Value:
  403. TRUE if any PTEs were added, FALSE if not.
  404. Environment:
  405. Kernel mode.
  406. --*/
  407. {
  408. LOGICAL PtesAdded;
  409. PMMPTE PointerPte;
  410. ULONG OriginalAddPtesCount;
  411. //
  412. // Make sure the add is only done once as this is called multiple times.
  413. //
  414. OriginalAddPtesCount = InterlockedCompareExchange (&MiAddPtesCount, 1, 0);
  415. if (OriginalAddPtesCount != 0) {
  416. return FALSE;
  417. }
  418. PtesAdded = FALSE;
  419. if (MiExtraPtes1 != 0) {
  420. //
  421. // Add extra system PTEs to the pool.
  422. //
  423. MiAddSystemPtes (MiExtraPtes1Pointer, MiExtraPtes1, SystemPteSpace);
  424. PtesAdded = TRUE;
  425. }
  426. if (MiExtraPtes2 != 0) {
  427. //
  428. // Add extra system PTEs to the pool.
  429. //
  430. if (MM_SHARED_USER_DATA_VA > MiUseMaximumSystemSpace) {
  431. if (MiUseMaximumSystemSpaceEnd > MM_SHARED_USER_DATA_VA) {
  432. MiExtraPtes2 = BYTES_TO_PAGES(MM_SHARED_USER_DATA_VA - MiUseMaximumSystemSpace);
  433. }
  434. }
  435. if (MiExtraPtes2 != 0) {
  436. PointerPte = MiGetPteAddress (MiUseMaximumSystemSpace);
  437. MiAddSystemPtes (PointerPte, MiExtraPtes2, SystemPteSpace);
  438. }
  439. PtesAdded = TRUE;
  440. }
  441. return PtesAdded;
  442. }
  443. LOGICAL
  444. MiIsRegularMemory (
  445. IN PLOADER_PARAMETER_BLOCK LoaderBlock,
  446. IN PFN_NUMBER PageFrameIndex
  447. )
  448. /*++
  449. Routine Description:
  450. This routine checks whether the argument page frame index represents
  451. regular memory in the loader descriptor block. It is only used very
  452. early during Phase0 init because the MmPhysicalMemoryBlock is not yet
  453. initialized.
  454. Arguments:
  455. LoaderBlock - Supplies a pointer to the firmware setup loader block.
  456. PageFrameIndex - Supplies the page frame index to check.
  457. Return Value:
  458. TRUE if the frame represents regular memory, FALSE if not.
  459. Environment:
  460. Kernel mode.
  461. --*/
  462. {
  463. PLIST_ENTRY NextMd;
  464. PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
  465. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  466. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  467. MemoryDescriptor = CONTAINING_RECORD (NextMd,
  468. MEMORY_ALLOCATION_DESCRIPTOR,
  469. ListEntry);
  470. if (PageFrameIndex >= MemoryDescriptor->BasePage) {
  471. if (PageFrameIndex < MemoryDescriptor->BasePage + MemoryDescriptor->PageCount) {
  472. if ((MemoryDescriptor->MemoryType == LoaderFirmwarePermanent) ||
  473. (MemoryDescriptor->MemoryType == LoaderBBTMemory) ||
  474. (MemoryDescriptor->MemoryType == LoaderSpecialMemory)) {
  475. //
  476. // This page lies in a memory descriptor for which we will
  477. // never create PFN entries, hence return FALSE.
  478. //
  479. break;
  480. }
  481. return TRUE;
  482. }
  483. }
  484. else {
  485. //
  486. // Since the loader memory list is sorted in ascending order,
  487. // the requested page must not be in the loader list at all.
  488. //
  489. break;
  490. }
  491. NextMd = MemoryDescriptor->ListEntry.Flink;
  492. }
  493. //
  494. // The final check before returning FALSE is to ensure that the requested
  495. // page wasn't one of the ones we used to normal-map the loader mappings,
  496. // etc.
  497. //
  498. if ((PageFrameIndex >= MxOldFreeDescriptor.BasePage) &&
  499. (PageFrameIndex < MxOldFreeDescriptor.BasePage + MxOldFreeDescriptor.PageCount)) {
  500. return TRUE;
  501. }
  502. return FALSE;
  503. }
  504. VOID
  505. MiInitMachineDependent (
  506. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  507. )
  508. /*++
  509. Routine Description:
  510. This routine performs the necessary operations to enable virtual
  511. memory. This includes building the page directory page, building
  512. page table pages to map the code section, the data section, the
  513. stack section and the trap handler.
  514. It also initializes the PFN database and populates the free list.
  515. Arguments:
  516. LoaderBlock - Supplies a pointer to the firmware setup loader block.
  517. Return Value:
  518. None.
  519. Environment:
  520. Kernel mode.
  521. N.B. This routine uses memory from the loader block descriptors, but
  522. the descriptors themselves must be restored prior to return as our caller
  523. walks them to create the MmPhysicalMemoryBlock.
  524. --*/
  525. {
  526. LOGICAL InitialNonPagedPoolSetViaRegistry;
  527. PHYSICAL_ADDRESS MaxHotPlugMemoryAddress;
  528. ULONG Bias;
  529. PMMPTE BasePte;
  530. PMMPFN BasePfn;
  531. PMMPFN BottomPfn;
  532. PMMPFN TopPfn;
  533. PFN_NUMBER FirstNonPagedPoolPage;
  534. PFN_NUMBER FirstPfnDatabasePage;
  535. LOGICAL PfnInLargePages;
  536. ULONG BasePage;
  537. ULONG PagesLeft;
  538. ULONG Range;
  539. ULONG PageCount;
  540. ULONG i, j;
  541. ULONG PdePageNumber;
  542. ULONG PdePage;
  543. ULONG PageFrameIndex;
  544. ULONG MaxPool;
  545. PEPROCESS CurrentProcess;
  546. ULONG DirBase;
  547. ULONG MostFreePage;
  548. ULONG MostFreeLowMem;
  549. PFN_NUMBER PagesNeeded;
  550. PLIST_ENTRY NextMd;
  551. PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
  552. MMPTE TempPde;
  553. MMPTE TempPte;
  554. PMMPTE PointerPde;
  555. PMMPTE PointerPte;
  556. PMMPTE LastPte;
  557. PMMPTE Pde;
  558. PMMPTE StartPde;
  559. PMMPTE EndPde;
  560. PMMPFN Pfn1;
  561. PMMPFN Pfn2;
  562. ULONG PdeCount;
  563. ULONG va;
  564. KIRQL OldIrql;
  565. PVOID VirtualAddress;
  566. PVOID NonPagedPoolStartVirtual;
  567. ULONG LargestFreePfnCount;
  568. ULONG LargestFreePfnStart;
  569. ULONG FreePfnCount;
  570. PVOID NonPagedPoolStartLow;
  571. LOGICAL ExtraSystemCacheViews;
  572. SIZE_T MaximumNonPagedPoolInBytesLimit;
  573. PKLDR_DATA_TABLE_ENTRY DataTableEntry;
  574. PLIST_ENTRY NextEntry;
  575. ULONG ReturnedLength;
  576. NTSTATUS status;
  577. UCHAR Associativity;
  578. ULONG NonPagedSystemStart;
  579. LOGICAL PagedPoolMaximumDesired;
  580. SIZE_T NumberOfBytes;
  581. if (InitializationPhase == 1) {
  582. //
  583. // If the kernel image has not been biased to allow for 3gb of user
  584. // space, *ALL* the booted processors support large pages, and the
  585. // number of physical pages is greater than the threshold, then map
  586. // the kernel image, HAL, PFN database and initial nonpaged pool
  587. // with large pages.
  588. //
  589. if ((KeFeatureBits & KF_LARGE_PAGE) && (MxMapLargePages != 0)) {
  590. for (i = 0; i < MiLargeVaRangeIndex; i += 1) {
  591. if (MiLargeVaRanges[i].VirtualAddress != NULL) {
  592. MxConvertToLargePage (MiLargeVaRanges[i].VirtualAddress,
  593. MiLargeVaRanges[i].EndVirtualAddress);
  594. }
  595. }
  596. }
  597. return;
  598. }
  599. ASSERT (InitializationPhase == 0);
  600. ASSERT (MxMapLargePages == MI_LARGE_ALL);
  601. PfnInLargePages = FALSE;
  602. ExtraSystemCacheViews = FALSE;
  603. MostFreePage = 0;
  604. MostFreeLowMem = 0;
  605. LargestFreePfnCount = 0;
  606. NonPagedPoolStartLow = NULL;
  607. PagedPoolMaximumDesired = FALSE;
  608. //
  609. // Initializing these is not needed for correctness, but without it
  610. // the compiler cannot compile this code W4 to check for use of
  611. // uninitialized variables.
  612. //
  613. LargestFreePfnStart = 0;
  614. FirstPfnDatabasePage = 0;
  615. MaximumNonPagedPoolInBytesLimit = 0;
  616. //
  617. // If the chip doesn't support large pages or the system is booted /3GB,
  618. // then disable large page support.
  619. //
  620. if (((KeFeatureBits & KF_LARGE_PAGE) == 0) || (MmVirtualBias != 0)) {
  621. MxMapLargePages = 0;
  622. }
  623. //
  624. // This flag is registry-settable so check before overriding.
  625. //
  626. if (MmProtectFreedNonPagedPool == TRUE) {
  627. MxMapLargePages &= ~(MI_LARGE_PFN_DATABASE | MI_LARGE_NONPAGED_POOL);
  628. }
  629. //
  630. // Sanitize this registry-specifiable large stack size. Note the registry
  631. // size is in 1K chunks, ie: 32 means 32k. Note also that the registry
  632. // setting does not include the guard page and we don't want to burden
  633. // administrators with knowing about it so we automatically subtract one
  634. // page from their request.
  635. //
  636. if (MmLargeStackSize > (KERNEL_LARGE_STACK_SIZE / 1024)) {
  637. //
  638. // No registry override or the override is too high.
  639. // Set it to the default.
  640. //
  641. MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
  642. }
  643. else {
  644. //
  645. // Convert registry override from 1K units to bytes. Note intelligent
  646. // choices are 16k or 32k because we bin those sizes in sysptes.
  647. //
  648. MmLargeStackSize *= 1024;
  649. MmLargeStackSize = MI_ROUND_TO_SIZE (MmLargeStackSize, PAGE_SIZE);
  650. MmLargeStackSize -= PAGE_SIZE;
  651. ASSERT (MmLargeStackSize <= KERNEL_LARGE_STACK_SIZE);
  652. ASSERT ((MmLargeStackSize & (PAGE_SIZE-1)) == 0);
  653. //
  654. // Don't allow a value that is too low either.
  655. //
  656. if (MmLargeStackSize < KERNEL_STACK_SIZE) {
  657. MmLargeStackSize = KERNEL_STACK_SIZE;
  658. }
  659. }
  660. //
  661. // If the host processor supports global bits, then set the global
  662. // bit in the template kernel PTE and PDE entries.
  663. //
  664. if (KeFeatureBits & KF_GLOBAL_PAGE) {
  665. ValidKernelPte.u.Long |= MM_PTE_GLOBAL_MASK;
  666. #if defined(_X86PAE_)
  667. //
  668. // Note that the PAE mode of the processor does not support the
  669. // global bit in PDEs which map 4K page table pages.
  670. //
  671. MiUseGlobalBitInLargePdes = TRUE;
  672. #else
  673. ValidKernelPde.u.Long |= MM_PTE_GLOBAL_MASK;
  674. #endif
  675. MmPteGlobal.u.Long = MM_PTE_GLOBAL_MASK;
  676. }
  677. TempPte = ValidKernelPte;
  678. TempPde = ValidKernelPde;
  679. //
  680. // Set the directory base for the system process.
  681. //
  682. PointerPte = MiGetPdeAddress (PDE_BASE);
  683. PdePageNumber = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte);
  684. CurrentProcess = PsGetCurrentProcess ();
  685. #if defined(_X86PAE_)
  686. PrototypePte.u.Soft.PageFileHigh = MI_PTE_LOOKUP_NEEDED;
  687. _asm {
  688. mov eax, cr3
  689. mov DirBase, eax
  690. }
  691. //
  692. // Note cr3 must be 32-byte aligned.
  693. //
  694. ASSERT ((DirBase & 0x1f) == 0);
  695. //
  696. // Initialize the PaeTop for this process right away.
  697. //
  698. RtlCopyMemory ((PVOID) &MiSystemPaeVa,
  699. (PVOID) (KSEG0_BASE | DirBase),
  700. sizeof (MiSystemPaeVa));
  701. CurrentProcess->PaeTop = &MiSystemPaeVa;
  702. #else
  703. DirBase = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte) << PAGE_SHIFT;
  704. #endif
  705. CurrentProcess->Pcb.DirectoryTableBase[0] = DirBase;
  706. KeSweepDcache (FALSE);
  707. //
  708. // Unmap the low 2Gb of memory.
  709. //
  710. PointerPde = MiGetPdeAddress (0);
  711. LastPte = MiGetPdeAddress (KSEG0_BASE);
  712. MiFillMemoryPte (PointerPde,
  713. LastPte - PointerPde,
  714. ZeroKernelPte.u.Long);
  715. //
  716. // Get the lower bound of the free physical memory and the
  717. // number of physical pages by walking the memory descriptor lists.
  718. //
  719. MxFreeDescriptor = NULL;
  720. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  721. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  722. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  723. MEMORY_ALLOCATION_DESCRIPTOR,
  724. ListEntry);
  725. if ((MemoryDescriptor->MemoryType != LoaderFirmwarePermanent) &&
  726. (MemoryDescriptor->MemoryType != LoaderBBTMemory) &&
  727. (MemoryDescriptor->MemoryType != LoaderHALCachedMemory) &&
  728. (MemoryDescriptor->MemoryType != LoaderSpecialMemory)) {
  729. //
  730. // This check results in /BURNMEMORY chunks not being counted.
  731. //
  732. if (MemoryDescriptor->MemoryType != LoaderBad) {
  733. MmNumberOfPhysicalPages += MemoryDescriptor->PageCount;
  734. }
  735. if (MemoryDescriptor->BasePage < MmLowestPhysicalPage) {
  736. MmLowestPhysicalPage = MemoryDescriptor->BasePage;
  737. }
  738. if ((MemoryDescriptor->BasePage + MemoryDescriptor->PageCount) >
  739. MmHighestPhysicalPage) {
  740. MmHighestPhysicalPage =
  741. MemoryDescriptor->BasePage + MemoryDescriptor->PageCount - 1;
  742. }
  743. //
  744. // Locate the largest free descriptor.
  745. //
  746. if ((MemoryDescriptor->MemoryType == LoaderFree) ||
  747. (MemoryDescriptor->MemoryType == LoaderLoadedProgram) ||
  748. (MemoryDescriptor->MemoryType == LoaderFirmwareTemporary) ||
  749. (MemoryDescriptor->MemoryType == LoaderOsloaderStack)) {
  750. if (MemoryDescriptor->PageCount > MostFreePage) {
  751. MostFreePage = MemoryDescriptor->PageCount;
  752. MxFreeDescriptor = MemoryDescriptor;
  753. }
  754. }
  755. }
  756. NextMd = MemoryDescriptor->ListEntry.Flink;
  757. }
  758. if (MmLargeSystemCache != 0) {
  759. ExtraSystemCacheViews = TRUE;
  760. }
  761. //
  762. // This flag is registry-settable so check before overriding.
  763. //
  764. // Enabling special IRQL automatically disables mapping the kernel with
  765. // large pages so we can catch kernel and HAL code.
  766. //
  767. if (MmVerifyDriverBufferLength != (ULONG)-1) {
  768. MmLargePageMinimum = (ULONG)-2;
  769. }
  770. else if (MmLargePageMinimum == 0) {
  771. MmLargePageMinimum = MM_LARGE_PAGE_MINIMUM;
  772. }
  773. //
  774. // Capture the registry-specified initial nonpaged pool setting as we
  775. // will modify the variable later.
  776. //
  777. if ((MmSizeOfNonPagedPoolInBytes != 0) ||
  778. (MmMaximumNonPagedPoolPercent != 0)) {
  779. InitialNonPagedPoolSetViaRegistry = TRUE;
  780. }
  781. else {
  782. InitialNonPagedPoolSetViaRegistry = FALSE;
  783. }
  784. if (MmNumberOfPhysicalPages <= MmLargePageMinimum) {
  785. MxMapLargePages = 0;
  786. //
  787. // Reduce the size of the initial nonpaged pool on small configurations
  788. // as RAM is precious (unless the registry has overridden it).
  789. //
  790. if ((MmNumberOfPhysicalPages <= MM_LARGE_PAGE_MINIMUM) &&
  791. (MmSizeOfNonPagedPoolInBytes == 0)) {
  792. MmSizeOfNonPagedPoolInBytes = 2*1024*1024;
  793. }
  794. }
  795. //
  796. // MmDynamicPfn may have been initialized based on the registry to
  797. // a value representing the highest physical address in gigabytes.
  798. //
  799. MmDynamicPfn *= ((1024 * 1024 * 1024) / PAGE_SIZE);
  800. //
  801. // Retrieve highest hot plug memory range from the HAL if
  802. // available and not otherwise retrieved from the registry.
  803. //
  804. if (MmDynamicPfn == 0) {
  805. status = HalQuerySystemInformation (HalQueryMaxHotPlugMemoryAddress,
  806. sizeof(PHYSICAL_ADDRESS),
  807. (PPHYSICAL_ADDRESS) &MaxHotPlugMemoryAddress,
  808. &ReturnedLength);
  809. if (NT_SUCCESS (status)) {
  810. ASSERT (ReturnedLength == sizeof(PHYSICAL_ADDRESS));
  811. MmDynamicPfn = (PFN_NUMBER) (MaxHotPlugMemoryAddress.QuadPart / PAGE_SIZE);
  812. }
  813. }
  814. if (MmDynamicPfn != 0) {
  815. if (MmVirtualBias != 0) {
  816. MmDynamicPfn = 0;
  817. }
  818. }
  819. if (MmDynamicPfn != 0) {
  820. #if defined(_X86PAE_)
  821. MmHighestPossiblePhysicalPage = MI_DTC_MAX_PAGES - 1;
  822. #else
  823. MmHighestPossiblePhysicalPage = MI_DEFAULT_MAX_PAGES - 1;
  824. #endif
  825. if (MmDynamicPfn - 1 < MmHighestPossiblePhysicalPage) {
  826. if (MmDynamicPfn - 1 < MmHighestPhysicalPage) {
  827. MmDynamicPfn = MmHighestPhysicalPage + 1;
  828. }
  829. MmHighestPossiblePhysicalPage = MmDynamicPfn - 1;
  830. }
  831. }
  832. else {
  833. MmHighestPossiblePhysicalPage = MmHighestPhysicalPage;
  834. }
  835. if (MmHighestPossiblePhysicalPage > 0x400000 - 1) {
  836. //
  837. // The PFN database is more than 112mb. Force it to come from the
  838. // 2GB->3GB virtual address range. Note the administrator cannot be
  839. // booting /3GB as when he does, the loader throws away memory
  840. // above the physical 16GB line, so this must be a hot-add
  841. // configuration. Since the loader has already put the system at
  842. // 3GB, the highest possible hot add page must be reduced now.
  843. //
  844. if (MmVirtualBias != 0) {
  845. MmHighestPossiblePhysicalPage = 0x400000 - 1;
  846. if (MmHighestPhysicalPage > MmHighestPossiblePhysicalPage) {
  847. MmHighestPhysicalPage = MmHighestPossiblePhysicalPage;
  848. }
  849. }
  850. else {
  851. //
  852. // The virtual space between 2 and 3GB virtual is best used
  853. // for system PTEs when this much physical memory is present.
  854. //
  855. ExtraSystemCacheViews = FALSE;
  856. }
  857. }
  858. //
  859. // Don't enable extra system cache views as virtual addresses are limited.
  860. // Only a kernel-verifier special case can trigger this.
  861. //
  862. if ((KernelVerifier == TRUE) &&
  863. (MmVirtualBias == 0) &&
  864. (MmNumberOfPhysicalPages <= MmLargePageMinimum) &&
  865. (MmHighestPossiblePhysicalPage > 0x100000)) {
  866. ExtraSystemCacheViews = FALSE;
  867. }
  868. #if defined(_X86PAE_)
  869. if (MmVirtualBias != 0) {
  870. //
  871. // User space is larger than 2GB, make extra room for the user space
  872. // working set list & associated hash tables.
  873. //
  874. MmSystemCacheWorkingSetList = (PMMWSL) ((ULONG_PTR)
  875. MmSystemCacheWorkingSetList + MM_SYSTEM_CACHE_WORKING_SET_3GB_DELTA);
  876. }
  877. MmHyperSpaceEnd = (PVOID)((ULONG_PTR)MmSystemCacheWorkingSetList - 1);
  878. MmSystemCacheWorkingSetListPte = MiGetPteAddress (MmSystemCacheWorkingSetList);
  879. //
  880. // Only PAE machines with at least 5GB of physical memory get to use this
  881. // and then only if they are NOT booted /3GB.
  882. //
  883. if (strstr(LoaderBlock->LoadOptions, "NOLOWMEM")) {
  884. if ((MmVirtualBias == 0) &&
  885. (MmNumberOfPhysicalPages >= 5 * 1024 * 1024 / 4)) {
  886. MiNoLowMemory = (PFN_NUMBER)((ULONGLONG)_4gb / PAGE_SIZE);
  887. }
  888. }
  889. if (MiNoLowMemory != 0) {
  890. MmMakeLowMemory = TRUE;
  891. }
  892. #endif
  893. //
  894. // Save the original descriptor value as everything must be restored
  895. // prior to this function returning.
  896. //
  897. *(PMEMORY_ALLOCATION_DESCRIPTOR)&MxOldFreeDescriptor = *MxFreeDescriptor;
  898. if (MmNumberOfPhysicalPages < 1100) {
  899. KeBugCheckEx (INSTALL_MORE_MEMORY,
  900. MmNumberOfPhysicalPages,
  901. MmLowestPhysicalPage,
  902. MmHighestPhysicalPage,
  903. 0);
  904. }
  905. //
  906. // Build non-paged pool using the physical pages following the
  907. // data page in which to build the pool from. Non-paged pool grows
  908. // from the high range of the virtual address space and expands
  909. // downward.
  910. //
  911. // At this time non-paged pool is constructed so virtual addresses
  912. // are also physically contiguous.
  913. //
  914. if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) >
  915. (7 * (MmNumberOfPhysicalPages >> 3))) {
  916. //
  917. // More than 7/8 of memory is allocated to nonpagedpool, reset to 0.
  918. //
  919. MmSizeOfNonPagedPoolInBytes = 0;
  920. if (MmMaximumNonPagedPoolPercent == 0) {
  921. InitialNonPagedPoolSetViaRegistry = FALSE;
  922. }
  923. }
  924. if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize) {
  925. //
  926. // Calculate the size of nonpaged pool.
  927. // Use the minimum size, then for every MB above 4mb add extra
  928. // pages.
  929. //
  930. MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
  931. MmSizeOfNonPagedPoolInBytes +=
  932. ((MmNumberOfPhysicalPages - 1024)/256) *
  933. MmMinAdditionNonPagedPoolPerMb;
  934. }
  935. if (MmSizeOfNonPagedPoolInBytes > MM_MAX_INITIAL_NONPAGED_POOL) {
  936. MmSizeOfNonPagedPoolInBytes = MM_MAX_INITIAL_NONPAGED_POOL;
  937. }
  938. //
  939. // If the registry specifies a total nonpaged pool percentage cap, enforce
  940. // it here.
  941. //
  942. if (MmMaximumNonPagedPoolPercent != 0) {
  943. if (MmMaximumNonPagedPoolPercent < 5) {
  944. MmMaximumNonPagedPoolPercent = 5;
  945. }
  946. else if (MmMaximumNonPagedPoolPercent > 80) {
  947. MmMaximumNonPagedPoolPercent = 80;
  948. }
  949. //
  950. // Use the registry-expressed percentage value.
  951. //
  952. MaximumNonPagedPoolInBytesLimit =
  953. ((MmNumberOfPhysicalPages * MmMaximumNonPagedPoolPercent) / 100);
  954. //
  955. // Carefully set the maximum keeping in mind that maximum PAE
  956. // machines can have 16*1024*1024 pages so care must be taken
  957. // that multiplying by PAGE_SIZE doesn't overflow here.
  958. //
  959. if (MaximumNonPagedPoolInBytesLimit > ((MM_MAX_INITIAL_NONPAGED_POOL + MM_MAX_ADDITIONAL_NONPAGED_POOL) / PAGE_SIZE)) {
  960. MaximumNonPagedPoolInBytesLimit = MM_MAX_INITIAL_NONPAGED_POOL + MM_MAX_ADDITIONAL_NONPAGED_POOL;
  961. }
  962. else {
  963. MaximumNonPagedPoolInBytesLimit *= PAGE_SIZE;
  964. }
  965. if (MaximumNonPagedPoolInBytesLimit < 6 * 1024 * 1024) {
  966. MaximumNonPagedPoolInBytesLimit = 6 * 1024 * 1024;
  967. }
  968. if (MmSizeOfNonPagedPoolInBytes > MaximumNonPagedPoolInBytesLimit) {
  969. MmSizeOfNonPagedPoolInBytes = MaximumNonPagedPoolInBytesLimit;
  970. }
  971. }
  972. //
  973. // Align to page size boundary.
  974. //
  975. MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
  976. //
  977. // Calculate the maximum size of pool.
  978. //
  979. if (MmMaximumNonPagedPoolInBytes == 0) {
  980. //
  981. // Calculate the size of nonpaged pool. If 4mb or less use
  982. // the minimum size, then for every MB above 4mb add extra
  983. // pages.
  984. //
  985. MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
  986. //
  987. // Make sure enough expansion for the PFN database exists.
  988. //
  989. MmMaximumNonPagedPoolInBytes += (ULONG)PAGE_ALIGN (
  990. (MmHighestPossiblePhysicalPage + 1) * sizeof(MMPFN));
  991. //
  992. // Only use the new formula for autosizing nonpaged pool on machines
  993. // with at least 512MB. The new formula allocates 1/2 as much nonpaged
  994. // pool per MB but scales much higher - machines with ~1.2GB or more
  995. // get 256MB of nonpaged pool. Note that the old formula gave machines
  996. // with 512MB of RAM 128MB of nonpaged pool so this behavior is
  997. // preserved with the new formula as well.
  998. //
  999. if (MmNumberOfPhysicalPages >= 0x1f000) {
  1000. MmMaximumNonPagedPoolInBytes +=
  1001. ((MmNumberOfPhysicalPages - 1024)/256) *
  1002. (MmMaxAdditionNonPagedPoolPerMb / 2);
  1003. if (MmMaximumNonPagedPoolInBytes < MM_MAX_ADDITIONAL_NONPAGED_POOL) {
  1004. MmMaximumNonPagedPoolInBytes = MM_MAX_ADDITIONAL_NONPAGED_POOL;
  1005. }
  1006. }
  1007. else {
  1008. MmMaximumNonPagedPoolInBytes +=
  1009. ((MmNumberOfPhysicalPages - 1024)/256) *
  1010. MmMaxAdditionNonPagedPoolPerMb;
  1011. }
  1012. if ((MmMaximumNonPagedPoolPercent != 0) &&
  1013. (MmMaximumNonPagedPoolInBytes > MaximumNonPagedPoolInBytesLimit)) {
  1014. MmMaximumNonPagedPoolInBytes = MaximumNonPagedPoolInBytesLimit;
  1015. }
  1016. }
  1017. MaxPool = MmSizeOfNonPagedPoolInBytes + PAGE_SIZE * 16 +
  1018. (ULONG)PAGE_ALIGN (
  1019. (MmHighestPossiblePhysicalPage + 1) * sizeof(MMPFN));
  1020. if (MmMaximumNonPagedPoolInBytes < MaxPool) {
  1021. MmMaximumNonPagedPoolInBytes = MaxPool;
  1022. }
  1023. //
  1024. // Systems that are booted /3GB have a 128MB nonpaged pool maximum,
  1025. //
  1026. // Systems that have a full 2GB system virtual address space can enjoy an
  1027. // extra 128MB of nonpaged pool in the upper GB of the address space.
  1028. //
  1029. MaxPool = MM_MAX_INITIAL_NONPAGED_POOL;
  1030. if (MmVirtualBias == 0) {
  1031. MaxPool += MM_MAX_ADDITIONAL_NONPAGED_POOL;
  1032. }
  1033. if (InitialNonPagedPoolSetViaRegistry == TRUE) {
  1034. MaxPool = MmSizeOfNonPagedPoolInBytes + MM_MAX_ADDITIONAL_NONPAGED_POOL;
  1035. }
  1036. if (MmMaximumNonPagedPoolInBytes > MaxPool) {
  1037. MmMaximumNonPagedPoolInBytes = MaxPool;
  1038. }
  1039. //
  1040. // Grow the initial nonpaged pool if necessary so that the overall pool
  1041. // will aggregate to the right size.
  1042. //
  1043. if ((MmMaximumNonPagedPoolInBytes > MM_MAX_INITIAL_NONPAGED_POOL) &&
  1044. (InitialNonPagedPoolSetViaRegistry == FALSE)) {
  1045. if (MmSizeOfNonPagedPoolInBytes < MmMaximumNonPagedPoolInBytes - MM_MAX_ADDITIONAL_NONPAGED_POOL) {
  1046. //
  1047. // Note the initial nonpaged pool can only be grown if there
  1048. // is a sufficient contiguous physical memory chunk it can
  1049. // be carved from immediately.
  1050. //
  1051. PagesLeft = MxPagesAvailable ();
  1052. if (((MmMaximumNonPagedPoolInBytes - MM_MAX_ADDITIONAL_NONPAGED_POOL) >> PAGE_SHIFT) + ((32 * 1024 * 1024) >> PAGE_SHIFT) < PagesLeft) {
  1053. MmSizeOfNonPagedPoolInBytes = MmMaximumNonPagedPoolInBytes - MM_MAX_ADDITIONAL_NONPAGED_POOL;
  1054. }
  1055. else {
  1056. //
  1057. // Since the initial nonpaged pool could not be grown, don't
  1058. // leave any excess in the expansion nonpaged pool as we
  1059. // cannot encode it into subsection format on non-pae
  1060. // machines.
  1061. //
  1062. if (MmMaximumNonPagedPoolInBytes > MmSizeOfNonPagedPoolInBytes + MM_MAX_ADDITIONAL_NONPAGED_POOL) {
  1063. MmMaximumNonPagedPoolInBytes = MmSizeOfNonPagedPoolInBytes + MM_MAX_ADDITIONAL_NONPAGED_POOL;
  1064. }
  1065. }
  1066. }
  1067. }
  1068. //
  1069. // Get secondary color value from:
  1070. //
  1071. // (a) from the registry (already filled in) or
  1072. // (b) from the PCR or
  1073. // (c) default value.
  1074. //
  1075. if (MmSecondaryColors == 0) {
  1076. Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
  1077. MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
  1078. if (Associativity != 0) {
  1079. MmSecondaryColors /= Associativity;
  1080. }
  1081. }
  1082. MmSecondaryColors = MmSecondaryColors >> PAGE_SHIFT;
  1083. if (MmSecondaryColors == 0) {
  1084. MmSecondaryColors = MM_SECONDARY_COLORS_DEFAULT;
  1085. }
  1086. else {
  1087. //
  1088. // Make sure the value is a power of two and within limits.
  1089. //
  1090. if (((MmSecondaryColors & (MmSecondaryColors - 1)) != 0) ||
  1091. (MmSecondaryColors < MM_SECONDARY_COLORS_MIN) ||
  1092. (MmSecondaryColors > MM_SECONDARY_COLORS_MAX)) {
  1093. MmSecondaryColors = MM_SECONDARY_COLORS_DEFAULT;
  1094. }
  1095. }
  1096. MmSecondaryColorMask = MmSecondaryColors - 1;
  1097. #if defined(MI_MULTINODE)
  1098. //
  1099. // Determine the number of bits in MmSecondaryColorMask. This
  1100. // is the number of bits the Node color must be shifted
  1101. // by before it is included in colors.
  1102. //
  1103. i = MmSecondaryColorMask;
  1104. MmSecondaryColorNodeShift = 0;
  1105. while (i != 0) {
  1106. i >>= 1;
  1107. MmSecondaryColorNodeShift += 1;
  1108. }
  1109. //
  1110. // Adjust the number of secondary colors by the number of nodes
  1111. // in the machine. The secondary color mask is NOT adjusted
  1112. // as it is used to control coloring within a node. The node
  1113. // color is added to the color AFTER normal color calculations
  1114. // are performed.
  1115. //
  1116. MmSecondaryColors *= KeNumberNodes;
  1117. for (i = 0; i < KeNumberNodes; i += 1) {
  1118. KeNodeBlock[i]->Color = i;
  1119. KeNodeBlock[i]->MmShiftedColor = i << MmSecondaryColorNodeShift;
  1120. InitializeSListHead(&KeNodeBlock[i]->DeadStackList);
  1121. }
  1122. #endif
  1123. MiMaximumSystemCacheSizeExtra = 0;
  1124. //
  1125. // Add in the PFN database size (based on the number of pages required
  1126. // from page zero to the highest page).
  1127. //
  1128. // Get the number of secondary colors and add the array for tracking
  1129. // secondary colors to the end of the PFN database.
  1130. //
  1131. MxPfnAllocation = 1 + ((((MmHighestPossiblePhysicalPage + 1) * sizeof(MMPFN)) +
  1132. (MmSecondaryColors * sizeof(MMCOLOR_TABLES)*2))
  1133. >> PAGE_SHIFT);
  1134. if (MmVirtualBias == 0) {
  1135. MmNonPagedPoolStart = (PVOID)((ULONG)MmNonPagedPoolEnd
  1136. - MmMaximumNonPagedPoolInBytes
  1137. + MmSizeOfNonPagedPoolInBytes);
  1138. }
  1139. else {
  1140. MmNonPagedPoolStart = (PVOID)((ULONG) MmNonPagedPoolEnd -
  1141. (MmMaximumNonPagedPoolInBytes +
  1142. (MxPfnAllocation << PAGE_SHIFT)));
  1143. }
  1144. MmNonPagedPoolStart = (PVOID) PAGE_ALIGN (MmNonPagedPoolStart);
  1145. NonPagedPoolStartVirtual = MmNonPagedPoolStart;
  1146. //
  1147. // Allocate additional paged pool provided it can fit and either the
  1148. // user asked for it or we decide 460MB of PTE space is sufficient.
  1149. //
  1150. // Note at 64GB of RAM, the PFN database spans 464mb. Given that plus
  1151. // initial nonpaged pool at 128mb and space for the loader's highest
  1152. // page and session space, there may not be any room left to guarantee
  1153. // we will be able to allocate system PTEs out of the virtual address
  1154. // space below 3gb. So don't crimp for more than 64GB.
  1155. //
  1156. if ((MmVirtualBias == 0) &&
  1157. (MmHighestPossiblePhysicalPage <= 0x1000000)) {
  1158. if (((MmLargeStackSize <= (32 * 1024 - PAGE_SIZE)) && (MiUseMaximumSystemSpace != 0)) ||
  1159. ((MmSizeOfPagedPoolInBytes == (SIZE_T)-1) ||
  1160. ((MmSizeOfPagedPoolInBytes == 0) &&
  1161. (MmNumberOfPhysicalPages >= (1 * 1024 * 1024 * 1024 / PAGE_SIZE)) &&
  1162. (MiRequestedSystemPtes != (ULONG)-1)))) {
  1163. if ((ExpMultiUserTS == FALSE) || (MmSizeOfPagedPoolInBytes != 0)) {
  1164. PagedPoolMaximumDesired = TRUE;
  1165. MmPagedPoolMaximumDesired = TRUE;
  1166. }
  1167. else {
  1168. //
  1169. // This is a multiuser TS machine defaulting to
  1170. // autoconfiguration. These machines use approximately
  1171. // 3.25x PTEs compared to paged pool per session.
  1172. // If the stack size is halved, then 1.6x becomes the ratio.
  1173. //
  1174. // Estimate how many PTEs and paged pool virtual space
  1175. // will be available and divide it up now.
  1176. //
  1177. ULONG LowVa;
  1178. ULONG TotalVirtualSpace;
  1179. ULONG PagedPoolPortion;
  1180. ULONG PtePortion;
  1181. TotalVirtualSpace = (ULONG) NonPagedPoolStartVirtual - (ULONG) MM_PAGED_POOL_START;
  1182. LowVa = (MM_KSEG0_BASE | MmBootImageSize) + MxPfnAllocation * PAGE_SIZE + MmSizeOfNonPagedPoolInBytes;
  1183. if (LowVa < MiSystemViewStart) {
  1184. TotalVirtualSpace += (MiSystemViewStart - LowVa);
  1185. }
  1186. PtePortion = 77;
  1187. PagedPoolPortion = 100 - PtePortion;
  1188. //
  1189. // If the large stack size has been reduced, then adjust the
  1190. // ratio automatically as well.
  1191. //
  1192. if (MmLargeStackSize != KERNEL_LARGE_STACK_SIZE) {
  1193. PtePortion = (PtePortion * MmLargeStackSize) / KERNEL_LARGE_STACK_SIZE;
  1194. }
  1195. MmSizeOfPagedPoolInBytes = (TotalVirtualSpace / (PagedPoolPortion + PtePortion)) * PagedPoolPortion;
  1196. }
  1197. //
  1198. // Make sure we always allocate extra PTEs later as we have crimped
  1199. // the initial allocation here.
  1200. //
  1201. ExtraSystemCacheViews = FALSE;
  1202. MmNumberOfSystemPtes = 3000;
  1203. MiRequestedSystemPtes = (ULONG)-1;
  1204. }
  1205. }
  1206. //
  1207. // Calculate the starting PDE for the system PTE pool which is
  1208. // right below the nonpaged pool.
  1209. //
  1210. MmNonPagedSystemStart = (PVOID)(((ULONG)NonPagedPoolStartVirtual -
  1211. ((MmNumberOfSystemPtes + 1) * PAGE_SIZE)) &
  1212. (~PAGE_DIRECTORY_MASK));
  1213. if (MmNonPagedSystemStart < MM_LOWEST_NONPAGED_SYSTEM_START) {
  1214. MmNonPagedSystemStart = MM_LOWEST_NONPAGED_SYSTEM_START;
  1215. MmNumberOfSystemPtes = (((ULONG)NonPagedPoolStartVirtual -
  1216. (ULONG)MmNonPagedSystemStart) >> PAGE_SHIFT)-1;
  1217. ASSERT (MmNumberOfSystemPtes > 1000);
  1218. }
  1219. if (MmVirtualBias == 0) {
  1220. if ((MmSizeOfPagedPoolInBytes > ((ULONG) MmNonPagedSystemStart - (ULONG) MM_PAGED_POOL_START)) &&
  1221. (MmPagedPoolMaximumDesired == FALSE)) {
  1222. ULONG OldNonPagedSystemStart;
  1223. ULONG ExtraPtesNeeded;
  1224. ULONG InitialPagedPoolSize;
  1225. MmSizeOfPagedPoolInBytes = MI_ROUND_TO_SIZE (MmSizeOfPagedPoolInBytes, MM_VA_MAPPED_BY_PDE);
  1226. //
  1227. // Recalculate the starting PDE for the system PTE pool which is
  1228. // right below the nonpaged pool. Leave at least 3000 high
  1229. // system PTEs.
  1230. //
  1231. OldNonPagedSystemStart = (ULONG) MmNonPagedSystemStart;
  1232. NonPagedSystemStart = ((ULONG)NonPagedPoolStartVirtual -
  1233. ((3000 + 1) * PAGE_SIZE)) &
  1234. ~PAGE_DIRECTORY_MASK;
  1235. if (NonPagedSystemStart < (ULONG) MM_LOWEST_NONPAGED_SYSTEM_START) {
  1236. NonPagedSystemStart = (ULONG) MM_LOWEST_NONPAGED_SYSTEM_START;
  1237. }
  1238. InitialPagedPoolSize = NonPagedSystemStart - (ULONG) MM_PAGED_POOL_START;
  1239. if (MmSizeOfPagedPoolInBytes > InitialPagedPoolSize) {
  1240. MmSizeOfPagedPoolInBytes = InitialPagedPoolSize;
  1241. }
  1242. else {
  1243. NonPagedSystemStart = ((ULONG) MM_PAGED_POOL_START +
  1244. MmSizeOfPagedPoolInBytes);
  1245. ASSERT ((NonPagedSystemStart & PAGE_DIRECTORY_MASK) == 0);
  1246. ASSERT (NonPagedSystemStart >= (ULONG) MM_LOWEST_NONPAGED_SYSTEM_START);
  1247. }
  1248. ASSERT (NonPagedSystemStart >= OldNonPagedSystemStart);
  1249. ExtraPtesNeeded = (NonPagedSystemStart - OldNonPagedSystemStart) >> PAGE_SHIFT;
  1250. //
  1251. // Note the PagedPoolMaximumDesired local is deliberately not set
  1252. // because we don't want or need to delete PDEs later in this
  1253. // routine. The exact amount has been allocated here.
  1254. // The global MmPagedPoolMaximumDesired is set because other parts
  1255. // of memory management use it to finish sizing properly.
  1256. //
  1257. MmPagedPoolMaximumDesired = TRUE;
  1258. MmNonPagedSystemStart = (PVOID) NonPagedSystemStart;
  1259. MmNumberOfSystemPtes = (((ULONG)NonPagedPoolStartVirtual -
  1260. (ULONG)NonPagedSystemStart) >> PAGE_SHIFT)-1;
  1261. }
  1262. //
  1263. // If the kernel image has not been biased to allow for 3gb of user
  1264. // space, the host processor supports large pages, and the number of
  1265. // physical pages is greater than the threshold, then map the kernel
  1266. // image and HAL into a large page.
  1267. //
  1268. if (MxMapLargePages & MI_LARGE_KERNEL_HAL) {
  1269. //
  1270. // Add the kernel and HAL ranges to the large page ranges.
  1271. //
  1272. i = 0;
  1273. NextEntry = LoaderBlock->LoadOrderListHead.Flink;
  1274. for ( ; NextEntry != &LoaderBlock->LoadOrderListHead; NextEntry = NextEntry->Flink) {
  1275. DataTableEntry = CONTAINING_RECORD (NextEntry,
  1276. KLDR_DATA_TABLE_ENTRY,
  1277. InLoadOrderLinks);
  1278. MiLargeVaRanges[MiLargeVaRangeIndex].VirtualAddress = DataTableEntry->DllBase;
  1279. MiLargeVaRanges[MiLargeVaRangeIndex].EndVirtualAddress =
  1280. (PVOID)((ULONG_PTR)DataTableEntry->DllBase + DataTableEntry->SizeOfImage - 1);
  1281. MiLargeVaRangeIndex += 1;
  1282. i += 1;
  1283. if (i == 2) {
  1284. break;
  1285. }
  1286. }
  1287. }
  1288. //
  1289. // If the processor supports large pages and the descriptor has
  1290. // enough contiguous pages for the entire PFN database then use
  1291. // large pages to map it. Regardless of large page support, put
  1292. // the PFN database in low virtual memory just above the loaded images.
  1293. //
  1294. PagesLeft = MxPagesAvailable ();
  1295. if ((MxMapLargePages & (MI_LARGE_PFN_DATABASE | MI_LARGE_NONPAGED_POOL))&&
  1296. (PagesLeft > MxPfnAllocation + (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) + ((32 * 1024 * 1024) >> PAGE_SHIFT))) {
  1297. //
  1298. // Allocate the PFN database using large pages as there is enough
  1299. // physically contiguous and decently aligned memory available.
  1300. //
  1301. PfnInLargePages = TRUE;
  1302. FirstPfnDatabasePage = MxGetNextPage (MxPfnAllocation);
  1303. MmPfnDatabase = (PMMPFN)(MM_KSEG0_BASE | MmBootImageSize);
  1304. ASSERT (((ULONG_PTR)MmPfnDatabase & (MM_VA_MAPPED_BY_PDE - 1)) == 0);
  1305. MmPfnDatabase = (PMMPFN) ((ULONG_PTR)MmPfnDatabase + (((FirstPfnDatabasePage & (MM_PFN_MAPPED_BY_PDE - 1))) << PAGE_SHIFT));
  1306. //
  1307. // Add the PFN database range to the large page ranges.
  1308. //
  1309. MiLargeVaRanges[MiLargeVaRangeIndex].VirtualAddress = MmPfnDatabase;
  1310. MiLargeVaRanges[MiLargeVaRangeIndex].EndVirtualAddress =
  1311. (PVOID) (((ULONG_PTR)MmPfnDatabase + (MxPfnAllocation << PAGE_SHIFT)) - 1);
  1312. MiLargeVaRangeIndex += 1;
  1313. }
  1314. else {
  1315. MxMapLargePages &= ~(MI_LARGE_PFN_DATABASE | MI_LARGE_NONPAGED_POOL);
  1316. MmPfnDatabase = (PMMPFN)(MM_KSEG0_BASE | MmBootImageSize);
  1317. }
  1318. //
  1319. // The system is booted 2GB, initial nonpaged pool immediately
  1320. // follows the PFN database.
  1321. //
  1322. // Since the PFN database and the initial nonpaged pool are physically
  1323. // adjacent, a single PDE is shared, thus reducing the number of pages
  1324. // that otherwise might need to be marked as must-be-cachable.
  1325. //
  1326. // Calculate the correct initial nonpaged pool virtual address and
  1327. // maximum size now. Don't allocate pages for any other use at this
  1328. // point to guarantee that the PFN database and nonpaged pool are
  1329. // physically contiguous so large pages can be enabled.
  1330. //
  1331. MmNonPagedPoolStart = (PVOID)((ULONG_PTR)MmPfnDatabase + (MxPfnAllocation << PAGE_SHIFT));
  1332. //
  1333. // Systems with extremely large PFN databases (ie: spanning 64GB of RAM)
  1334. // and bumped session space sizes may require a reduction in the initial
  1335. // nonpaged pool size in order to fit.
  1336. //
  1337. NumberOfBytes = MiSystemViewStart - (ULONG_PTR) MmNonPagedPoolStart;
  1338. if (MmSizeOfNonPagedPoolInBytes > NumberOfBytes) {
  1339. MmMaximumNonPagedPoolInBytes -= (MmSizeOfNonPagedPoolInBytes - NumberOfBytes);
  1340. MmSizeOfNonPagedPoolInBytes = NumberOfBytes;
  1341. }
  1342. if (PagedPoolMaximumDesired == TRUE) {
  1343. //
  1344. // Maximum paged pool was requested. This means slice away most of
  1345. // the system PTEs being used at the high end of the virtual address
  1346. // space and use that address range for more paged pool instead.
  1347. //
  1348. ASSERT (MiIsVirtualAddressOnPdeBoundary (MmNonPagedSystemStart));
  1349. PointerPde = MiGetPdeAddress (NonPagedPoolStartVirtual);
  1350. PointerPde -= 2;
  1351. MmNonPagedSystemStart = MiGetVirtualAddressMappedByPde (PointerPde);
  1352. MmNumberOfSystemPtes = (((ULONG)MmNonPagedPoolStart -
  1353. (ULONG)MmNonPagedSystemStart) >> PAGE_SHIFT)-1;
  1354. }
  1355. }
  1356. else {
  1357. if ((MxPfnAllocation + 500) * PAGE_SIZE > MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes) {
  1358. //
  1359. // Recarve portions of the initial and expansion nonpaged pools
  1360. // so enough expansion PTEs will be available to map the PFN
  1361. // database on large memory systems that are booted /3GB.
  1362. //
  1363. if ((MxPfnAllocation + 500) * PAGE_SIZE < MmSizeOfNonPagedPoolInBytes) {
  1364. MmSizeOfNonPagedPoolInBytes -= ((MxPfnAllocation + 500) * PAGE_SIZE);
  1365. }
  1366. }
  1367. }
  1368. //
  1369. // Allocate pages and fill in the PTEs for the initial nonpaged pool.
  1370. //
  1371. PagesNeeded = MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT;
  1372. //
  1373. // Don't ask for more than is reasonable both in terms of physical pages
  1374. // left and virtual space available.
  1375. //
  1376. PagesLeft = MxPagesAvailable ();
  1377. if (PagesNeeded > PagesLeft) {
  1378. PagesNeeded = PagesLeft;
  1379. }
  1380. if (MxMapLargePages & MI_LARGE_NONPAGED_POOL) {
  1381. ASSERT (MmVirtualBias == 0);
  1382. //
  1383. // The PFN database has already been allocated (but not mapped).
  1384. // Shortly we will transition from the descriptors to the real PFN
  1385. // database so eat up the slush now.
  1386. //
  1387. VirtualAddress = (PVOID) ((ULONG_PTR)NonPagedPoolStartLow + (PagesNeeded << PAGE_SHIFT));
  1388. if (((ULONG_PTR)VirtualAddress & (MM_VA_MAPPED_BY_PDE - 1)) &&
  1389. (PagesLeft - PagesNeeded > MM_PFN_MAPPED_BY_PDE) &&
  1390. (MmSizeOfNonPagedPoolInBytes + MM_VA_MAPPED_BY_PDE < MM_MAX_INITIAL_NONPAGED_POOL)) {
  1391. //
  1392. // Expand the initial nonpaged pool to use the slush efficiently.
  1393. //
  1394. VirtualAddress = (PVOID) MI_ROUND_TO_SIZE ((ULONG_PTR)VirtualAddress, MM_VA_MAPPED_BY_PDE);
  1395. PagesNeeded = ((ULONG_PTR)VirtualAddress - (ULONG_PTR)NonPagedPoolStartLow) >> PAGE_SHIFT;
  1396. }
  1397. }
  1398. //
  1399. // Update various globals since the size of initial pool may have
  1400. // changed.
  1401. //
  1402. if (MmSizeOfNonPagedPoolInBytes != (PagesNeeded << PAGE_SHIFT)) {
  1403. MmMaximumNonPagedPoolInBytes -= (MmSizeOfNonPagedPoolInBytes - (PagesNeeded << PAGE_SHIFT));
  1404. MmSizeOfNonPagedPoolInBytes = PagesNeeded << PAGE_SHIFT;
  1405. }
  1406. MmMaximumNonPagedPoolInPages = (MmMaximumNonPagedPoolInBytes >> PAGE_SHIFT);
  1407. //
  1408. // Allocate the actual pages for the initial nonpaged pool before
  1409. // any other Mx allocations as these will be sharing the large page
  1410. // with the PFN database when large pages are enabled.
  1411. //
  1412. PageFrameIndex = MxGetNextPage (PagesNeeded);
  1413. FirstNonPagedPoolPage = PageFrameIndex;
  1414. //
  1415. // Set up page table pages to map system PTEs and the expansion nonpaged
  1416. // pool. If the system was booted /3GB, then the initial nonpaged pool
  1417. // is mapped here as well.
  1418. //
  1419. StartPde = MiGetPdeAddress (MmNonPagedSystemStart);
  1420. EndPde = MiGetPdeAddress ((PVOID)((PCHAR)MmNonPagedPoolEnd - 1));
  1421. while (StartPde <= EndPde) {
  1422. ASSERT (StartPde->u.Hard.Valid == 0);
  1423. //
  1424. // Map in a page table page.
  1425. //
  1426. TempPde.u.Hard.PageFrameNumber = MxGetNextPage (1);
  1427. *StartPde = TempPde;
  1428. PointerPte = MiGetVirtualAddressMappedByPte (StartPde);
  1429. RtlZeroMemory (PointerPte, PAGE_SIZE);
  1430. StartPde += 1;
  1431. }
  1432. if (MmVirtualBias == 0) {
  1433. if (MxMapLargePages & MI_LARGE_PFN_DATABASE) {
  1434. ASSERT (FirstNonPagedPoolPage == FirstPfnDatabasePage + MxPfnAllocation);
  1435. }
  1436. //
  1437. // Allocate the page table pages to map the PFN database and the
  1438. // initial nonpaged pool now. If the system switches to large
  1439. // pages in Phase 1, these pages will be discarded then.
  1440. //
  1441. StartPde = MiGetPdeAddress (MmPfnDatabase);
  1442. VirtualAddress = (PVOID) ((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1);
  1443. EndPde = MiGetPdeAddress (VirtualAddress);
  1444. //
  1445. // Use any extra virtual address space between the top of initial
  1446. // nonpaged pool and session space for additional system PTEs or
  1447. // caching.
  1448. //
  1449. PointerPde = EndPde + 1;
  1450. EndPde = MiGetPdeAddress (MiSystemViewStart - 1);
  1451. if (PointerPde <= EndPde) {
  1452. //
  1453. // There is available virtual space - consume everything up
  1454. // to the system view area (which is always rounded to a page
  1455. // directory boundary to avoid wasting valuable virtual
  1456. // address space.
  1457. //
  1458. MiExtraResourceStart = (ULONG) MiGetVirtualAddressMappedByPde (PointerPde);
  1459. MiExtraResourceEnd = MiSystemViewStart;
  1460. MiNumberOfExtraSystemPdes = EndPde - PointerPde + 1;
  1461. //
  1462. // Mark the new range as PTEs iff maximum PTEs are requested,
  1463. // TS in app server mode is selected or special pooling is
  1464. // enabled. Otherwise if large system caching was selected
  1465. // then use it for that. Finally default to PTEs if neither
  1466. // of the above.
  1467. //
  1468. if ((MiRequestedSystemPtes == (ULONG)-1) ||
  1469. (ExpMultiUserTS == TRUE) ||
  1470. (MmVerifyDriverBufferLength != (ULONG)-1) ||
  1471. ((MmSpecialPoolTag != 0) && (MmSpecialPoolTag != (ULONG)-1))) {
  1472. ExtraSystemCacheViews = FALSE;
  1473. }
  1474. if (ExtraSystemCacheViews == TRUE) {
  1475. //
  1476. // The system is configured to favor large system caching,
  1477. // so share the remaining virtual address space between the
  1478. // system cache and system PTEs.
  1479. //
  1480. MiMaximumSystemCacheSizeExtra =
  1481. (MiNumberOfExtraSystemPdes * 5) / 6;
  1482. MiExtraPtes1 = MiNumberOfExtraSystemPdes -
  1483. MiMaximumSystemCacheSizeExtra;
  1484. MiExtraPtes1 *= (MM_VA_MAPPED_BY_PDE / PAGE_SIZE);
  1485. MiMaximumSystemCacheSizeExtra *= MM_VA_MAPPED_BY_PDE;
  1486. MiExtraPtes1Pointer = MiGetPteAddress (MiExtraResourceStart +
  1487. MiMaximumSystemCacheSizeExtra);
  1488. MiMaximumSystemCacheSizeExtra >>= PAGE_SHIFT;
  1489. }
  1490. else {
  1491. MiExtraPtes1 = BYTES_TO_PAGES(MiExtraResourceEnd - MiExtraResourceStart);
  1492. MiExtraPtes1Pointer = MiGetPteAddress (MiExtraResourceStart);
  1493. }
  1494. }
  1495. //
  1496. // Allocate and initialize the page table pages.
  1497. //
  1498. while (StartPde <= EndPde) {
  1499. ASSERT (StartPde->u.Hard.Valid == 0);
  1500. if (StartPde->u.Hard.Valid == 0) {
  1501. //
  1502. // Map in a page directory page.
  1503. //
  1504. TempPde.u.Hard.PageFrameNumber = MxGetNextPage (1);
  1505. *StartPde = TempPde;
  1506. PointerPte = MiGetVirtualAddressMappedByPte (StartPde);
  1507. RtlZeroMemory (PointerPte, PAGE_SIZE);
  1508. }
  1509. StartPde += 1;
  1510. }
  1511. if (MiUseMaximumSystemSpace != 0) {
  1512. //
  1513. // Use the 1GB->2GB virtual range for even more system PTEs.
  1514. // Note the shared user data PTE (and PDE) must be left user
  1515. // accessible, but everything else is kernelmode only.
  1516. //
  1517. MiExtraPtes2 = BYTES_TO_PAGES(MiUseMaximumSystemSpaceEnd - MiUseMaximumSystemSpace);
  1518. StartPde = MiGetPdeAddress (MiUseMaximumSystemSpace);
  1519. EndPde = MiGetPdeAddress (MiUseMaximumSystemSpaceEnd);
  1520. while (StartPde < EndPde) {
  1521. ASSERT (StartPde->u.Hard.Valid == 0);
  1522. //
  1523. // Map in a page directory page.
  1524. //
  1525. TempPde.u.Hard.PageFrameNumber = MxGetNextPage (1);
  1526. *StartPde = TempPde;
  1527. PointerPte = MiGetVirtualAddressMappedByPte (StartPde);
  1528. RtlZeroMemory (PointerPte, PAGE_SIZE);
  1529. StartPde += 1;
  1530. MiMaximumSystemExtraSystemPdes += 1;
  1531. }
  1532. ASSERT (MiExtraPtes2 == MiMaximumSystemExtraSystemPdes * PTE_PER_PAGE);
  1533. }
  1534. //
  1535. // The virtual address, length and page tables to map the initial
  1536. // nonpaged pool are already allocated - just fill in the mappings.
  1537. //
  1538. MmSubsectionBase = (ULONG)MmNonPagedPoolStart;
  1539. PointerPte = MiGetPteAddress (MmNonPagedPoolStart);
  1540. LastPte = MiGetPteAddress ((ULONG)MmNonPagedPoolStart +
  1541. MmSizeOfNonPagedPoolInBytes);
  1542. if (MxMapLargePages & (MI_LARGE_PFN_DATABASE | MI_LARGE_NONPAGED_POOL)) {
  1543. //
  1544. // Since every page table page needs to be filled, ensure PointerPte
  1545. // and LastPte span entire page table pages, and adjust
  1546. // PageFrameIndex to account for this.
  1547. //
  1548. if (!MiIsPteOnPdeBoundary(PointerPte)) {
  1549. PageFrameIndex -= (BYTE_OFFSET (PointerPte) / sizeof (MMPTE));
  1550. PointerPte = PAGE_ALIGN (PointerPte);
  1551. }
  1552. if (!MiIsPteOnPdeBoundary(LastPte)) {
  1553. LastPte = (PMMPTE) (PAGE_ALIGN (LastPte)) + PTE_PER_PAGE;
  1554. }
  1555. //
  1556. // Add the initial nonpaged pool range to the large page ranges.
  1557. //
  1558. MiLargeVaRanges[MiLargeVaRangeIndex].VirtualAddress = MmNonPagedPoolStart;
  1559. MiLargeVaRanges[MiLargeVaRangeIndex].EndVirtualAddress =
  1560. (PVOID) ((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1);
  1561. MiLargeVaRangeIndex += 1;
  1562. }
  1563. MI_ADD_EXECUTE_TO_VALID_PTE_IF_PAE (TempPte);
  1564. while (PointerPte < LastPte) {
  1565. ASSERT (PointerPte->u.Hard.Valid == 0);
  1566. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1567. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1568. PointerPte += 1;
  1569. PageFrameIndex += 1;
  1570. }
  1571. TempPte = ValidKernelPte;
  1572. MmNonPagedPoolExpansionStart = NonPagedPoolStartVirtual;
  1573. }
  1574. else {
  1575. PointerPte = MiGetPteAddress (MmNonPagedPoolStart);
  1576. LastPte = MiGetPteAddress((ULONG)MmNonPagedPoolStart +
  1577. MmSizeOfNonPagedPoolInBytes - 1);
  1578. ASSERT (PagesNeeded == (PFN_NUMBER)(LastPte - PointerPte + 1));
  1579. MI_ADD_EXECUTE_TO_VALID_PTE_IF_PAE (TempPte);
  1580. while (PointerPte <= LastPte) {
  1581. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1582. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1583. PointerPte += 1;
  1584. PageFrameIndex += 1;
  1585. }
  1586. TempPte = ValidKernelPte;
  1587. MmNonPagedPoolExpansionStart = (PVOID)((PCHAR)NonPagedPoolStartVirtual +
  1588. MmSizeOfNonPagedPoolInBytes);
  1589. //
  1590. // When booted /3GB, if /USERVA was specified then use any leftover
  1591. // virtual space between 2 and 3gb for extra system PTEs.
  1592. //
  1593. if (MiUseMaximumSystemSpace != 0) {
  1594. MiExtraPtes2 = BYTES_TO_PAGES(MiUseMaximumSystemSpaceEnd - MiUseMaximumSystemSpace);
  1595. StartPde = MiGetPdeAddress (MiUseMaximumSystemSpace);
  1596. EndPde = MiGetPdeAddress (MiUseMaximumSystemSpaceEnd);
  1597. while (StartPde < EndPde) {
  1598. ASSERT (StartPde->u.Hard.Valid == 0);
  1599. //
  1600. // Map in a page directory page.
  1601. //
  1602. TempPde.u.Hard.PageFrameNumber = MxGetNextPage (1);
  1603. *StartPde = TempPde;
  1604. PointerPte = MiGetVirtualAddressMappedByPte (StartPde);
  1605. RtlZeroMemory (PointerPte, PAGE_SIZE);
  1606. StartPde += 1;
  1607. MiMaximumSystemExtraSystemPdes += 1;
  1608. }
  1609. ASSERT (MiExtraPtes2 == MiMaximumSystemExtraSystemPdes * PTE_PER_PAGE);
  1610. }
  1611. }
  1612. //
  1613. // There must be at least one page of system PTEs before the expanded
  1614. // nonpaged pool.
  1615. //
  1616. ASSERT (MiGetPteAddress(MmNonPagedSystemStart) < MiGetPteAddress(MmNonPagedPoolExpansionStart));
  1617. //
  1618. // Non-paged pages now exist, build the pool structures.
  1619. //
  1620. MmPageAlignedPoolBase[NonPagedPool] = MmNonPagedPoolStart;
  1621. if (MmVirtualBias != 0) {
  1622. ULONG NonPagedVa;
  1623. NonPagedVa = (ULONG) MmNonPagedPoolEnd - (ULONG) MmNonPagedPoolExpansionStart;
  1624. ASSERT (NonPagedVa >= (MxPfnAllocation << PAGE_SHIFT));
  1625. //
  1626. // Add one to account for the system PTE top guard page VA.
  1627. //
  1628. NonPagedVa -= ((MxPfnAllocation + 1) << PAGE_SHIFT);
  1629. if (NonPagedVa > MM_MAX_ADDITIONAL_NONPAGED_POOL) {
  1630. NonPagedVa = MM_MAX_ADDITIONAL_NONPAGED_POOL;
  1631. }
  1632. MmMaximumNonPagedPoolInBytes = NonPagedVa + (MxPfnAllocation << PAGE_SHIFT) + MmSizeOfNonPagedPoolInBytes;
  1633. MmMaximumNonPagedPoolInPages = (MmMaximumNonPagedPoolInBytes >> PAGE_SHIFT);
  1634. }
  1635. MiInitializeNonPagedPool ();
  1636. MiInitializeNonPagedPoolThresholds ();
  1637. //
  1638. // Before nonpaged pool can be used, the PFN database must
  1639. // be built. This is due to the fact that the start and end of
  1640. // allocation bits for nonpaged pool are maintained in the
  1641. // PFN elements for the corresponding pages.
  1642. //
  1643. if (MxMapLargePages & MI_LARGE_PFN_DATABASE) {
  1644. //
  1645. // The physical pages to be used for the PFN database have already
  1646. // been allocated. Initialize their mappings now.
  1647. //
  1648. //
  1649. // Initialize the page table mappings (the directory mappings are
  1650. // already initialized) for the PFN database until the switch to large
  1651. // pages occurs in Phase 1.
  1652. //
  1653. PointerPte = MiGetPteAddress (MmPfnDatabase);
  1654. BasePte = MiGetVirtualAddressMappedByPte (MiGetPdeAddress (MmPfnDatabase));
  1655. LastPte = MiGetPteAddress ((ULONG_PTR)MmPfnDatabase + (MxPfnAllocation << PAGE_SHIFT));
  1656. if (!MiIsPteOnPdeBoundary(LastPte)) {
  1657. LastPte = MiGetVirtualAddressMappedByPte (MiGetPteAddress (LastPte) + 1);
  1658. }
  1659. PageFrameIndex = FirstPfnDatabasePage - (PointerPte - BasePte);
  1660. PointerPte = BasePte;
  1661. while (PointerPte < LastPte) {
  1662. ASSERT ((PointerPte->u.Hard.Valid == 0) ||
  1663. (PointerPte->u.Hard.PageFrameNumber == PageFrameIndex));
  1664. if (MiIsPteOnPdeBoundary(PointerPte)) {
  1665. ASSERT ((PageFrameIndex & (MM_PFN_MAPPED_BY_PDE - 1)) == 0);
  1666. }
  1667. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1668. if (PointerPte->u.Hard.Valid == 0) {
  1669. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1670. }
  1671. else {
  1672. MI_WRITE_VALID_PTE_NEW_PROTECTION (PointerPte, TempPte);
  1673. }
  1674. PointerPte += 1;
  1675. PageFrameIndex += 1;
  1676. }
  1677. RtlZeroMemory (MmPfnDatabase, MxPfnAllocation << PAGE_SHIFT);
  1678. //
  1679. // The PFN database was allocated in large pages. Since space was left
  1680. // for it virtually (in the nonpaged pool expansion PTEs), remove this
  1681. // now unused space if it can cause PTE encoding to exceed the 27 bits.
  1682. //
  1683. if (MmTotalFreeSystemPtes[NonPagedPoolExpansion] >
  1684. (MM_MAX_ADDITIONAL_NONPAGED_POOL >> PAGE_SHIFT)) {
  1685. //
  1686. // Reserve the expanded pool PTEs so they cannot be used.
  1687. //
  1688. ULONG PfnDatabaseSpace;
  1689. PfnDatabaseSpace = MmTotalFreeSystemPtes[NonPagedPoolExpansion] -
  1690. (MM_MAX_ADDITIONAL_NONPAGED_POOL >> PAGE_SHIFT);
  1691. if (MiReserveSystemPtes (PfnDatabaseSpace, NonPagedPoolExpansion) == NULL) {
  1692. MiIssueNoPtesBugcheck (PfnDatabaseSpace, NonPagedPoolExpansion);
  1693. }
  1694. //
  1695. // Adjust the end of nonpaged pool to reflect this reservation.
  1696. // This is so the entire nonpaged pool expansion space is available
  1697. // not just for general purpose consumption, but also for subsection
  1698. // encoding into protoptes when subsections are allocated from the
  1699. // very end of the expansion range.
  1700. //
  1701. MmNonPagedPoolEnd = (PVOID)((PCHAR)MmNonPagedPoolEnd - PfnDatabaseSpace * PAGE_SIZE);
  1702. }
  1703. else {
  1704. //
  1705. // Allocate one more PTE just below the PFN database. This provides
  1706. // protection against the caller of the first real nonpaged
  1707. // expansion allocation in case he accidentally overruns his pool
  1708. // block. (We'll trap instead of corrupting the PFN database).
  1709. // This also allows us to freely increment in MiFreePoolPages
  1710. // without having to worry about a valid PTE just after the end of
  1711. // the highest nonpaged pool allocation.
  1712. //
  1713. if (MiReserveSystemPtes (1, NonPagedPoolExpansion) == NULL) {
  1714. MiIssueNoPtesBugcheck (1, NonPagedPoolExpansion);
  1715. }
  1716. }
  1717. }
  1718. else {
  1719. ULONG FreeNextPhysicalPage;
  1720. ULONG FreeNumberOfPages;
  1721. //
  1722. // Calculate the start of the PFN database (it starts at physical
  1723. // page zero, even if the lowest physical page is not zero).
  1724. //
  1725. if (MmVirtualBias == 0) {
  1726. ASSERT (MmPfnDatabase != NULL);
  1727. PointerPte = MiGetPteAddress (MmPfnDatabase);
  1728. }
  1729. else {
  1730. ASSERT (MxPagesAvailable () >= MxPfnAllocation);
  1731. PointerPte = MiReserveSystemPtes (MxPfnAllocation,
  1732. NonPagedPoolExpansion);
  1733. if (PointerPte == NULL) {
  1734. MiIssueNoPtesBugcheck (MxPfnAllocation, NonPagedPoolExpansion);
  1735. }
  1736. MmPfnDatabase = (PMMPFN)(MiGetVirtualAddressMappedByPte (PointerPte));
  1737. //
  1738. // Adjust the end of nonpaged pool to reflect the PFN database
  1739. // allocation. This is so the entire nonpaged pool expansion space
  1740. // is available not just for general purpose consumption, but also
  1741. // for subsection encoding into protoptes when subsections are
  1742. // allocated from the very beginning of the initial nonpaged pool
  1743. // range.
  1744. //
  1745. MmMaximumNonPagedPoolInBytes -= (MxPfnAllocation << PAGE_SHIFT);
  1746. MmMaximumNonPagedPoolInPages = (MmMaximumNonPagedPoolInBytes >> PAGE_SHIFT);
  1747. MmNonPagedPoolEnd = (PVOID)MmPfnDatabase;
  1748. //
  1749. // Allocate one more PTE just below the PFN database. This provides
  1750. // protection against the caller of the first real nonpaged
  1751. // expansion allocation in case he accidentally overruns his pool
  1752. // block. (We'll trap instead of corrupting the PFN database).
  1753. // This also allows us to freely increment in MiFreePoolPages
  1754. // without having to worry about a valid PTE just after the end of
  1755. // the highest nonpaged pool allocation.
  1756. //
  1757. if (MiReserveSystemPtes (1, NonPagedPoolExpansion) == NULL) {
  1758. MiIssueNoPtesBugcheck (1, NonPagedPoolExpansion);
  1759. }
  1760. }
  1761. //
  1762. // Go through the memory descriptors and for each physical page make
  1763. // sure the PFN database has a valid PTE to map it. This allows
  1764. // machines with sparse physical memory to have a minimal PFN database.
  1765. //
  1766. FreeNextPhysicalPage = MxFreeDescriptor->BasePage;
  1767. FreeNumberOfPages = MxFreeDescriptor->PageCount;
  1768. PagesLeft = 0;
  1769. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  1770. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  1771. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  1772. MEMORY_ALLOCATION_DESCRIPTOR,
  1773. ListEntry);
  1774. if ((MemoryDescriptor->MemoryType == LoaderFirmwarePermanent) ||
  1775. (MemoryDescriptor->MemoryType == LoaderBBTMemory) ||
  1776. (MemoryDescriptor->MemoryType == LoaderSpecialMemory)) {
  1777. //
  1778. // Skip these ranges.
  1779. //
  1780. NextMd = MemoryDescriptor->ListEntry.Flink;
  1781. continue;
  1782. }
  1783. //
  1784. // Temporarily add back in the memory allocated since Phase 0
  1785. // began so PFN entries for it will be created and mapped.
  1786. //
  1787. // Note actual PFN entry allocations must be done carefully as
  1788. // memory from the descriptor itself could get used to map
  1789. // the PFNs for the descriptor !
  1790. //
  1791. if (MemoryDescriptor == MxFreeDescriptor) {
  1792. BasePage = MxOldFreeDescriptor.BasePage;
  1793. PageCount = MxOldFreeDescriptor.PageCount;
  1794. }
  1795. else {
  1796. BasePage = MemoryDescriptor->BasePage;
  1797. PageCount = MemoryDescriptor->PageCount;
  1798. }
  1799. PointerPte = MiGetPteAddress (MI_PFN_ELEMENT(BasePage));
  1800. LastPte = MiGetPteAddress (((PCHAR)(MI_PFN_ELEMENT(
  1801. BasePage + PageCount))) - 1);
  1802. while (PointerPte <= LastPte) {
  1803. if (PointerPte->u.Hard.Valid == 0) {
  1804. TempPte.u.Hard.PageFrameNumber = FreeNextPhysicalPage;
  1805. ASSERT (FreeNumberOfPages != 0);
  1806. FreeNextPhysicalPage += 1;
  1807. FreeNumberOfPages -= 1;
  1808. if (FreeNumberOfPages == 0) {
  1809. KeBugCheckEx (INSTALL_MORE_MEMORY,
  1810. MmNumberOfPhysicalPages,
  1811. FreeNumberOfPages,
  1812. MxOldFreeDescriptor.PageCount,
  1813. 1);
  1814. }
  1815. PagesLeft += 1;
  1816. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1817. RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte),
  1818. PAGE_SIZE);
  1819. }
  1820. PointerPte += 1;
  1821. }
  1822. NextMd = MemoryDescriptor->ListEntry.Flink;
  1823. }
  1824. //
  1825. // Update the global counts - this would have been tricky to do while
  1826. // removing pages from them as we looped above.
  1827. //
  1828. // Later we will walk the memory descriptors and add pages to the free
  1829. // list in the PFN database.
  1830. //
  1831. // To do this correctly:
  1832. //
  1833. // The FreeDescriptor fields must be updated so the PFN database
  1834. // consumption isn't added to the freelist.
  1835. //
  1836. MxFreeDescriptor->BasePage = FreeNextPhysicalPage;
  1837. MxFreeDescriptor->PageCount = FreeNumberOfPages;
  1838. }
  1839. #if defined (_X86PAE_)
  1840. for (i = 0; i < 32; i += 1) {
  1841. j = i & 7;
  1842. switch (j) {
  1843. case MM_READONLY:
  1844. case MM_READWRITE:
  1845. case MM_WRITECOPY:
  1846. MmProtectToPteMask[i] |= MmPaeMask;
  1847. break;
  1848. default:
  1849. break;
  1850. }
  1851. }
  1852. #endif
  1853. //
  1854. // Initialize support for colored pages.
  1855. //
  1856. MmFreePagesByColor[0] = (PMMCOLOR_TABLES)
  1857. &MmPfnDatabase[MmHighestPossiblePhysicalPage + 1];
  1858. MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
  1859. //
  1860. // Make sure the PTEs are mapped.
  1861. //
  1862. PointerPte = MiGetPteAddress (&MmFreePagesByColor[0][0]);
  1863. LastPte = MiGetPteAddress (
  1864. (PVOID)((PCHAR)&MmFreePagesByColor[1][MmSecondaryColors] - 1));
  1865. while (PointerPte <= LastPte) {
  1866. if (PointerPte->u.Hard.Valid == 0) {
  1867. TempPte.u.Hard.PageFrameNumber = MxGetNextPage (1);
  1868. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1869. RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte),
  1870. PAGE_SIZE);
  1871. }
  1872. PointerPte += 1;
  1873. }
  1874. for (i = 0; i < MmSecondaryColors; i += 1) {
  1875. MmFreePagesByColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST;
  1876. MmFreePagesByColor[ZeroedPageList][i].Blink = (PVOID) MM_EMPTY_LIST;
  1877. MmFreePagesByColor[ZeroedPageList][i].Count = 0;
  1878. MmFreePagesByColor[FreePageList][i].Flink = MM_EMPTY_LIST;
  1879. MmFreePagesByColor[FreePageList][i].Blink = (PVOID) MM_EMPTY_LIST;
  1880. MmFreePagesByColor[FreePageList][i].Count = 0;
  1881. }
  1882. //
  1883. // Add nonpaged pool to PFN database if mapped via large pages.
  1884. //
  1885. PointerPde = MiGetPdeAddress (PTE_BASE);
  1886. if ((MmNonPagedPoolStart < (PVOID)MM_SYSTEM_CACHE_END_EXTRA) &&
  1887. (MxMapLargePages & MI_LARGE_NONPAGED_POOL)) {
  1888. j = FirstNonPagedPoolPage;
  1889. Pfn1 = MI_PFN_ELEMENT (j);
  1890. i = MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT;
  1891. do {
  1892. PointerPde = MiGetPdeAddress ((ULONG_PTR)MmNonPagedPoolStart + ((j - FirstNonPagedPoolPage) << PAGE_SHIFT));
  1893. Pfn1->u4.PteFrame = MI_GET_PAGE_FRAME_FROM_PTE(PointerPde);
  1894. Pfn1->PteAddress = (PMMPTE)(j << PAGE_SHIFT);
  1895. Pfn1->u2.ShareCount += 1;
  1896. Pfn1->u3.e2.ReferenceCount = 1;
  1897. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1898. Pfn1->u3.e1.CacheAttribute = MiCached;
  1899. MiDetermineNode (j, Pfn1);
  1900. j += 1;
  1901. Pfn1 += 1;
  1902. i -= 1;
  1903. } while (i != 0);
  1904. }
  1905. //
  1906. // Go through the page table entries and for any page which is valid,
  1907. // update the corresponding PFN database element.
  1908. //
  1909. Pde = MiGetPdeAddress (NULL);
  1910. va = 0;
  1911. PdeCount = PD_PER_SYSTEM * PDE_PER_PAGE;
  1912. for (i = 0; i < PdeCount; i += 1) {
  1913. //
  1914. // If the kernel image has been biased to allow for 3gb of user
  1915. // address space, then the first several mb of memory is
  1916. // double mapped to KSEG0_BASE and to ALTERNATE_BASE. Therefore,
  1917. // the KSEG0_BASE entries must be skipped.
  1918. //
  1919. if (MmVirtualBias != 0) {
  1920. if ((Pde >= MiGetPdeAddress(KSEG0_BASE)) &&
  1921. (Pde < MiGetPdeAddress(KSEG0_BASE + MmBootImageSize))) {
  1922. Pde += 1;
  1923. va += (ULONG)PDE_PER_PAGE * (ULONG)PAGE_SIZE;
  1924. continue;
  1925. }
  1926. }
  1927. if ((Pde->u.Hard.Valid == 1) && (Pde->u.Hard.LargePage == 0)) {
  1928. PdePage = MI_GET_PAGE_FRAME_FROM_PTE(Pde);
  1929. if (MiIsRegularMemory (LoaderBlock, PdePage)) {
  1930. Pfn1 = MI_PFN_ELEMENT(PdePage);
  1931. Pfn1->u4.PteFrame = PdePageNumber;
  1932. Pfn1->PteAddress = Pde;
  1933. Pfn1->u2.ShareCount += 1;
  1934. Pfn1->u3.e2.ReferenceCount = 1;
  1935. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1936. Pfn1->u3.e1.CacheAttribute = MiCached;
  1937. MiDetermineNode (PdePage, Pfn1);
  1938. }
  1939. else {
  1940. Pfn1 = NULL;
  1941. }
  1942. PointerPte = MiGetPteAddress (va);
  1943. //
  1944. // Set global bit.
  1945. //
  1946. TempPde.u.Long = MiDetermineUserGlobalPteMask (PointerPte) &
  1947. ~MM_PTE_ACCESS_MASK;
  1948. #if defined(_X86PAE_)
  1949. //
  1950. // Note that the PAE mode of the processor does not support the
  1951. // global bit in PDEs which map 4K page table pages.
  1952. //
  1953. TempPde.u.Hard.Global = 0;
  1954. #endif
  1955. Pde->u.Long |= TempPde.u.Long;
  1956. for (j = 0 ; j < PTE_PER_PAGE; j += 1) {
  1957. if (PointerPte->u.Hard.Valid == 1) {
  1958. PointerPte->u.Long |= MiDetermineUserGlobalPteMask (PointerPte) &
  1959. ~MM_PTE_ACCESS_MASK;
  1960. ASSERT (Pfn1 != NULL);
  1961. Pfn1->u2.ShareCount += 1;
  1962. if ((MiIsRegularMemory (LoaderBlock, (PFN_NUMBER) PointerPte->u.Hard.PageFrameNumber)) &&
  1963. ((va >= MM_KSEG2_BASE) &&
  1964. ((va < KSEG0_BASE + MmVirtualBias) ||
  1965. (va >= (KSEG0_BASE + MmVirtualBias + MmBootImageSize)))) ||
  1966. ((MmVirtualBias == 0) &&
  1967. (va >= (ULONG)MmNonPagedPoolStart) &&
  1968. (va < (ULONG)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes))) {
  1969. Pfn2 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
  1970. if (MmIsAddressValid(Pfn2) &&
  1971. MmIsAddressValid((PUCHAR)(Pfn2+1)-1)) {
  1972. Pfn2->u4.PteFrame = PdePage;
  1973. Pfn2->PteAddress = PointerPte;
  1974. Pfn2->u2.ShareCount += 1;
  1975. Pfn2->u3.e2.ReferenceCount = 1;
  1976. Pfn2->u3.e1.PageLocation = ActiveAndValid;
  1977. Pfn2->u3.e1.CacheAttribute = MiCached;
  1978. MiDetermineNode(
  1979. (PFN_NUMBER)PointerPte->u.Hard.PageFrameNumber,
  1980. Pfn2);
  1981. }
  1982. }
  1983. }
  1984. va += PAGE_SIZE;
  1985. PointerPte += 1;
  1986. }
  1987. }
  1988. else {
  1989. va += (ULONG)PDE_PER_PAGE * (ULONG)PAGE_SIZE;
  1990. }
  1991. Pde += 1;
  1992. }
  1993. KeFlushCurrentTb ();
  1994. //
  1995. // If the lowest physical page is zero and the page is still unused, mark
  1996. // it as in use. This is because we want to find bugs where a physical
  1997. // page is specified as zero.
  1998. //
  1999. Pfn1 = &MmPfnDatabase[MmLowestPhysicalPage];
  2000. if ((MmLowestPhysicalPage == 0) && (Pfn1->u3.e2.ReferenceCount == 0)) {
  2001. ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
  2002. //
  2003. // Make the reference count non-zero and point it into a
  2004. // page directory.
  2005. //
  2006. Pde = MiGetPdeAddress (0xffffffff);
  2007. PdePage = MI_GET_PAGE_FRAME_FROM_PTE(Pde);
  2008. Pfn1->u4.PteFrame = PdePageNumber;
  2009. Pfn1->PteAddress = Pde;
  2010. Pfn1->u2.ShareCount += 1;
  2011. Pfn1->u3.e2.ReferenceCount = 0xfff0;
  2012. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  2013. Pfn1->u3.e1.CacheAttribute = MiCached;
  2014. MiDetermineNode (0, Pfn1);
  2015. }
  2016. //
  2017. // Walk through the memory descriptors and add pages to the
  2018. // free list in the PFN database. Before doing this, adjust the
  2019. // two descriptors we used so they only contain memory that can be
  2020. // freed now (ie: any memory we removed from them earlier in this routine
  2021. // without updating the descriptor for must be updated now).
  2022. //
  2023. //
  2024. // We may have taken memory out of the MxFreeDescriptor - but
  2025. // that's ok because we wouldn't want to free that memory right now
  2026. // (or ever) anyway.
  2027. //
  2028. //
  2029. // Since the LoaderBlock memory descriptors are ordered
  2030. // from low physical memory address to high, walk it backwards so the
  2031. // high physical pages go to the front of the freelists. The thinking
  2032. // is that pages initially allocated by the system are less likely to be
  2033. // freed so don't waste memory below 16mb (or 4gb) that may be needed
  2034. // by ISA drivers later.
  2035. //
  2036. NextMd = LoaderBlock->MemoryDescriptorListHead.Blink;
  2037. Bias = 0;
  2038. if (MmVirtualBias != 0) {
  2039. //
  2040. // This is nasty. You don't want to know. Cleanup needed.
  2041. //
  2042. Bias = ALTERNATE_BASE - KSEG0_BASE;
  2043. }
  2044. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  2045. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  2046. MEMORY_ALLOCATION_DESCRIPTOR,
  2047. ListEntry);
  2048. i = MemoryDescriptor->PageCount;
  2049. PageFrameIndex = MemoryDescriptor->BasePage;
  2050. //
  2051. // Ensure no frames are inserted beyond the end of the PFN
  2052. // database. This can happen for example if the system
  2053. // has > 16GB of RAM and is booted /3GB - the top of this
  2054. // routine reduces the highest physical page and then
  2055. // creates the PFN database. But the loader block still
  2056. // contains descriptions of the pages above 16GB.
  2057. //
  2058. if (PageFrameIndex > MmHighestPhysicalPage) {
  2059. NextMd = MemoryDescriptor->ListEntry.Blink;
  2060. continue;
  2061. }
  2062. if (PageFrameIndex + i > MmHighestPhysicalPage + 1) {
  2063. i = MmHighestPhysicalPage + 1 - PageFrameIndex;
  2064. MemoryDescriptor->PageCount = i;
  2065. if (i == 0) {
  2066. NextMd = MemoryDescriptor->ListEntry.Blink;
  2067. continue;
  2068. }
  2069. }
  2070. switch (MemoryDescriptor->MemoryType) {
  2071. case LoaderBad:
  2072. while (i != 0) {
  2073. MiInsertPageInList (&MmBadPageListHead, PageFrameIndex);
  2074. i -= 1;
  2075. PageFrameIndex += 1;
  2076. }
  2077. break;
  2078. case LoaderFree:
  2079. case LoaderLoadedProgram:
  2080. case LoaderFirmwareTemporary:
  2081. case LoaderOsloaderStack:
  2082. FreePfnCount = 0;
  2083. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  2084. LOCK_PFN (OldIrql);
  2085. while (i != 0) {
  2086. if (Pfn1->u3.e2.ReferenceCount == 0) {
  2087. //
  2088. // Set the PTE address to the physical page for
  2089. // virtual address alignment checking.
  2090. //
  2091. Pfn1->PteAddress =
  2092. (PMMPTE)(PageFrameIndex << PTE_SHIFT);
  2093. //
  2094. // No need to initialize Pfn1->u3.e1.CacheAttribute
  2095. // here as the freelist insertion will mark it as
  2096. // not-mapped.
  2097. //
  2098. MiDetermineNode (PageFrameIndex, Pfn1);
  2099. MiInsertPageInFreeList (PageFrameIndex);
  2100. FreePfnCount += 1;
  2101. }
  2102. else {
  2103. if (FreePfnCount > LargestFreePfnCount) {
  2104. LargestFreePfnCount = FreePfnCount;
  2105. LargestFreePfnStart = PageFrameIndex - FreePfnCount;
  2106. FreePfnCount = 0;
  2107. }
  2108. }
  2109. Pfn1 += 1;
  2110. i -= 1;
  2111. PageFrameIndex += 1;
  2112. }
  2113. UNLOCK_PFN (OldIrql);
  2114. if (FreePfnCount > LargestFreePfnCount) {
  2115. LargestFreePfnCount = FreePfnCount;
  2116. LargestFreePfnStart = PageFrameIndex - FreePfnCount;
  2117. }
  2118. break;
  2119. case LoaderFirmwarePermanent:
  2120. case LoaderSpecialMemory:
  2121. case LoaderBBTMemory:
  2122. //
  2123. // Skip these ranges.
  2124. //
  2125. break;
  2126. default:
  2127. PointerPte = MiGetPteAddress (KSEG0_BASE + Bias +
  2128. (PageFrameIndex << PAGE_SHIFT));
  2129. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  2130. while (i != 0) {
  2131. //
  2132. // Set page as in use.
  2133. //
  2134. PointerPde = MiGetPdeAddress (KSEG0_BASE + Bias +
  2135. (PageFrameIndex << PAGE_SHIFT));
  2136. if (Pfn1->u3.e2.ReferenceCount == 0) {
  2137. Pfn1->u4.PteFrame = MI_GET_PAGE_FRAME_FROM_PTE(PointerPde);
  2138. Pfn1->PteAddress = PointerPte;
  2139. Pfn1->u2.ShareCount += 1;
  2140. Pfn1->u3.e2.ReferenceCount = 1;
  2141. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  2142. MiDetermineNode (PageFrameIndex, Pfn1);
  2143. if (MemoryDescriptor->MemoryType == LoaderXIPRom) {
  2144. Pfn1->u1.Flink = 0;
  2145. Pfn1->u2.ShareCount = 0;
  2146. Pfn1->u3.e2.ReferenceCount = 0;
  2147. Pfn1->u3.e1.PageLocation = 0;
  2148. Pfn1->u3.e1.Rom = 1;
  2149. Pfn1->u4.InPageError = 0;
  2150. Pfn1->u3.e1.PrototypePte = 1;
  2151. }
  2152. Pfn1->u3.e1.CacheAttribute = MiCached;
  2153. }
  2154. Pfn1 += 1;
  2155. i -= 1;
  2156. PageFrameIndex += 1;
  2157. PointerPte += 1;
  2158. }
  2159. break;
  2160. }
  2161. NextMd = MemoryDescriptor->ListEntry.Blink;
  2162. }
  2163. if (PfnInLargePages == FALSE) {
  2164. //
  2165. // Indicate that the PFN database is allocated in nonpaged pool.
  2166. //
  2167. PointerPte = MiGetPteAddress (&MmPfnDatabase[MmLowestPhysicalPage]);
  2168. Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
  2169. Pfn1->u3.e1.StartOfAllocation = 1;
  2170. if (MmVirtualBias == 0) {
  2171. LastPte = MiGetPteAddress (&MmPfnDatabase[MmHighestPossiblePhysicalPage]);
  2172. while (PointerPte <= LastPte) {
  2173. Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
  2174. Pfn1->u2.ShareCount = 1;
  2175. Pfn1->u3.e2.ReferenceCount = 1;
  2176. PointerPte += 1;
  2177. }
  2178. }
  2179. //
  2180. // Set the end of the allocation.
  2181. //
  2182. PointerPte = MiGetPteAddress (&MmPfnDatabase[MmHighestPossiblePhysicalPage]);
  2183. Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
  2184. Pfn1->u3.e1.EndOfAllocation = 1;
  2185. }
  2186. else {
  2187. //
  2188. // The PFN database is allocated using large pages.
  2189. //
  2190. // Mark all PFN entries for the PFN pages in use.
  2191. //
  2192. PointerPte = MiGetPteAddress (MmPfnDatabase);
  2193. PageFrameIndex = (PFN_NUMBER)PointerPte->u.Hard.PageFrameNumber;
  2194. Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
  2195. i = MxPfnAllocation;
  2196. do {
  2197. Pfn1->PteAddress = (PMMPTE)(PageFrameIndex << PTE_SHIFT);
  2198. Pfn1->u3.e1.CacheAttribute = MiCached;
  2199. MiDetermineNode (PageFrameIndex, Pfn1);
  2200. Pfn1->u3.e2.ReferenceCount += 1;
  2201. PageFrameIndex += 1;
  2202. Pfn1 += 1;
  2203. i -= 1;
  2204. } while (i != 0);
  2205. if (MmDynamicPfn == 0) {
  2206. //
  2207. // Scan the PFN database backward for pages that are completely
  2208. // zero. These pages are unused and can be added to the free list.
  2209. //
  2210. BottomPfn = MI_PFN_ELEMENT(MmHighestPhysicalPage);
  2211. do {
  2212. //
  2213. // Compute the address of the start of the page that is next
  2214. // lower in memory and scan backwards until that page address
  2215. // is reached or just crossed.
  2216. //
  2217. if (((ULONG)BottomPfn & (PAGE_SIZE - 1)) != 0) {
  2218. BasePfn = (PMMPFN)((ULONG)BottomPfn & ~(PAGE_SIZE - 1));
  2219. TopPfn = BottomPfn + 1;
  2220. }
  2221. else {
  2222. BasePfn = (PMMPFN)((ULONG)BottomPfn - PAGE_SIZE);
  2223. TopPfn = BottomPfn;
  2224. }
  2225. while (BottomPfn > BasePfn) {
  2226. BottomPfn -= 1;
  2227. }
  2228. //
  2229. // If the entire range over which the PFN entries span is
  2230. // completely zero and the PFN entry that maps the page is
  2231. // not in the range, then add the page to the appropriate
  2232. // free list.
  2233. //
  2234. Range = (ULONG)TopPfn - (ULONG)BottomPfn;
  2235. if (RtlCompareMemoryUlong((PVOID)BottomPfn, Range, 0) == Range) {
  2236. //
  2237. // Set the PTE address to the physical page for virtual
  2238. // address alignment checking.
  2239. //
  2240. PointerPte = MiGetPteAddress (BasePfn);
  2241. PageFrameIndex = (PFN_NUMBER)PointerPte->u.Hard.PageFrameNumber;
  2242. Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
  2243. ASSERT (Pfn1->u3.e2.ReferenceCount == 1);
  2244. ASSERT (Pfn1->PteAddress == (PMMPTE)(PageFrameIndex << PTE_SHIFT));
  2245. Pfn1->u3.e2.ReferenceCount = 0;
  2246. Pfn1->PteAddress = (PMMPTE)(PageFrameIndex << PTE_SHIFT);
  2247. //
  2248. // No need to initialize Pfn1->u3.e1.CacheAttribute
  2249. // here as the freelist insertion will mark it as
  2250. // not-mapped.
  2251. //
  2252. MiDetermineNode (PageFrameIndex, Pfn1);
  2253. LOCK_PFN (OldIrql);
  2254. MiInsertPageInFreeList (PageFrameIndex);
  2255. UNLOCK_PFN (OldIrql);
  2256. }
  2257. } while (BottomPfn > MmPfnDatabase);
  2258. }
  2259. }
  2260. //
  2261. // Adjust the memory descriptor to indicate that free pool has
  2262. // been used for nonpaged pool creation.
  2263. //
  2264. // N.B. This is required because the descriptors are walked upon
  2265. // return from this routine to create the MmPhysicalMemoryBlock.
  2266. //
  2267. *MxFreeDescriptor = *(PMEMORY_ALLOCATION_DESCRIPTOR)&MxOldFreeDescriptor;
  2268. //
  2269. // Initialize the nonpaged pool.
  2270. //
  2271. InitializePool (NonPagedPool, 0);
  2272. //
  2273. // Initialize the system PTE pool now that nonpaged pool exists.
  2274. // This is used for mapping I/O space, driver images and kernel stacks.
  2275. // Note this expands the initial PTE allocation to use all possible
  2276. // available virtual space by reclaiming the initial nonpaged
  2277. // pool range (in non /3GB systems) because that range has already been
  2278. // moved into the 2GB virtual range.
  2279. //
  2280. PointerPte = MiGetPteAddress (MmNonPagedSystemStart);
  2281. ASSERT (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0);
  2282. MmNumberOfSystemPtes = MiGetPteAddress (NonPagedPoolStartVirtual) - PointerPte - 1;
  2283. MiInitializeSystemPtes (PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
  2284. if (MiExtraPtes1 != 0) {
  2285. //
  2286. // Increment the system PTEs (for autoconfiguration purposes) but
  2287. // don't actually add the PTEs till later (to prevent fragmentation).
  2288. //
  2289. MiIncrementSystemPtes (MiExtraPtes1);
  2290. }
  2291. if (MiExtraPtes2 != 0) {
  2292. //
  2293. // Add extra system PTEs to the pool.
  2294. //
  2295. if (MM_SHARED_USER_DATA_VA > MiUseMaximumSystemSpace) {
  2296. if (MiUseMaximumSystemSpaceEnd > MM_SHARED_USER_DATA_VA) {
  2297. MiExtraPtes2 = BYTES_TO_PAGES(MM_SHARED_USER_DATA_VA - MiUseMaximumSystemSpace);
  2298. }
  2299. }
  2300. else {
  2301. ASSERT (MmVirtualBias != 0);
  2302. }
  2303. if (MiExtraPtes2 != 0) {
  2304. //
  2305. // Increment the system PTEs (for autoconfiguration purposes) but
  2306. // don't actually add the PTEs till later (to prevent
  2307. // fragmentation).
  2308. //
  2309. MiIncrementSystemPtes (MiExtraPtes2);
  2310. }
  2311. }
  2312. //
  2313. // Recover the extra PTE ranges immediately if special pool is enabled
  2314. // so the special pool range can be made as large as possible by consuming
  2315. // these.
  2316. //
  2317. if ((MmVerifyDriverBufferLength != (ULONG)-1) ||
  2318. ((MmSpecialPoolTag != 0) && (MmSpecialPoolTag != (ULONG)-1))) {
  2319. MiRecoverExtraPtes ();
  2320. }
  2321. //
  2322. // Initialize memory management structures for this process.
  2323. //
  2324. // Build the working set list. This requires the creation of a PDE
  2325. // to map hyperspace and the page table page pointed to
  2326. // by the PDE must be initialized.
  2327. //
  2328. // Note we can't remove a zeroed page as hyperspace does not
  2329. // exist and we map non-zeroed pages into hyperspace to zero them.
  2330. //
  2331. TempPde = ValidKernelPdeLocal;
  2332. PointerPde = MiGetPdeAddress (HYPER_SPACE);
  2333. LOCK_PFN (OldIrql);
  2334. PageFrameIndex = MiRemoveAnyPage (0);
  2335. TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
  2336. MI_WRITE_VALID_PTE (PointerPde, TempPde);
  2337. #if defined (_X86PAE_)
  2338. PointerPde = MiGetPdeAddress((PVOID)((PCHAR)HYPER_SPACE + MM_VA_MAPPED_BY_PDE));
  2339. PageFrameIndex = MiRemoveAnyPage (0);
  2340. TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
  2341. MI_WRITE_VALID_PTE (PointerPde, TempPde);
  2342. //
  2343. // Point to the page table page we just created and zero it.
  2344. //
  2345. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  2346. RtlZeroMemory (PointerPte, PAGE_SIZE);
  2347. #endif
  2348. KeFlushCurrentTb();
  2349. UNLOCK_PFN (OldIrql);
  2350. //
  2351. // Point to the page table page we just created and zero it.
  2352. //
  2353. PointerPte = MiGetPteAddress(HYPER_SPACE);
  2354. RtlZeroMemory ((PVOID)PointerPte, PAGE_SIZE);
  2355. //
  2356. // Hyper space now exists, set the necessary variables.
  2357. //
  2358. MmFirstReservedMappingPte = MiGetPteAddress (FIRST_MAPPING_PTE);
  2359. MmLastReservedMappingPte = MiGetPteAddress (LAST_MAPPING_PTE);
  2360. MmFirstReservedMappingPte->u.Hard.PageFrameNumber = NUMBER_OF_MAPPING_PTES;
  2361. MmWorkingSetList = (PMMWSL) ((ULONG_PTR)VAD_BITMAP_SPACE + PAGE_SIZE);
  2362. //
  2363. // Create zeroing PTEs for the zero page thread.
  2364. //
  2365. MiFirstReservedZeroingPte = MiReserveSystemPtes (NUMBER_OF_ZEROING_PTES + 1,
  2366. SystemPteSpace);
  2367. RtlZeroMemory (MiFirstReservedZeroingPte,
  2368. (NUMBER_OF_ZEROING_PTES + 1) * sizeof(MMPTE));
  2369. //
  2370. // Use the page frame number field of the first PTE as an
  2371. // offset into the available zeroing PTEs.
  2372. //
  2373. MiFirstReservedZeroingPte->u.Hard.PageFrameNumber = NUMBER_OF_ZEROING_PTES;
  2374. //
  2375. // Create the VAD bitmap for this process.
  2376. //
  2377. PointerPte = MiGetPteAddress (VAD_BITMAP_SPACE);
  2378. LOCK_PFN (OldIrql);
  2379. PageFrameIndex = MiRemoveAnyPage (0);
  2380. UNLOCK_PFN (OldIrql);
  2381. //
  2382. // Note the global bit must be off for the bitmap data.
  2383. //
  2384. TempPte = ValidKernelPteLocal;
  2385. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  2386. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  2387. //
  2388. // Point to the page we just created and zero it.
  2389. //
  2390. RtlZeroMemory (VAD_BITMAP_SPACE, PAGE_SIZE);
  2391. //
  2392. // If booted /3GB, then the bitmap needs to be 2K bigger, shift
  2393. // the working set accordingly as well.
  2394. //
  2395. // Note the 2K expansion portion of the bitmap is automatically
  2396. // carved out of the working set page allocated below.
  2397. //
  2398. if (MmVirtualBias != 0) {
  2399. MmWorkingSetList = (PMMWSL) ((ULONG_PTR)MmWorkingSetList + PAGE_SIZE / 2);
  2400. }
  2401. MiLastVadBit = (((ULONG_PTR) MI_64K_ALIGN (MM_HIGHEST_VAD_ADDRESS))) / X64K;
  2402. #if defined (_X86PAE_)
  2403. //
  2404. // Only bitmap the first 2GB of the PAE address space when booted /3GB.
  2405. // This is because PAE has twice as many pagetable pages as non-PAE which
  2406. // causes the MMWSL structure to be larger than 2K. If we bitmapped the
  2407. // entire user address space in this configuration then we'd need a 6K
  2408. // bitmap and this would cause the initial MMWSL structure to overflow
  2409. // into a second page. This would require a bunch of extra code throughout
  2410. // process support and other areas so just limit the bitmap for now.
  2411. //
  2412. if (MiLastVadBit > PAGE_SIZE * 8 - 1) {
  2413. ASSERT (MmVirtualBias != 0);
  2414. MiLastVadBit = PAGE_SIZE * 8 - 1;
  2415. MmWorkingSetList = (PMMWSL) ((ULONG_PTR)VAD_BITMAP_SPACE + PAGE_SIZE);
  2416. }
  2417. #endif
  2418. KeInitializeEvent (&MiImageMappingPteEvent,
  2419. NotificationEvent,
  2420. FALSE);
  2421. //
  2422. // Initialize this process's memory management structures including
  2423. // the working set list.
  2424. //
  2425. // The PFN element for the page directory has already been initialized,
  2426. // zero the reference count and the share count so they won't be
  2427. // wrong.
  2428. //
  2429. Pfn1 = MI_PFN_ELEMENT (PdePageNumber);
  2430. LOCK_PFN (OldIrql);
  2431. Pfn1->u2.ShareCount = 0;
  2432. Pfn1->u3.e2.ReferenceCount = 0;
  2433. #if defined (_X86PAE_)
  2434. PointerPte = MiGetPteAddress (PDE_BASE);
  2435. for (i = 0; i < PD_PER_SYSTEM; i += 1) {
  2436. PdePageNumber = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte);
  2437. Pfn1 = MI_PFN_ELEMENT (PdePageNumber);
  2438. Pfn1->u2.ShareCount = 0;
  2439. Pfn1->u3.e2.ReferenceCount = 0;
  2440. PointerPte += 1;
  2441. }
  2442. #endif
  2443. //
  2444. // Get a page for the working set list and zero it.
  2445. //
  2446. TempPte = ValidKernelPteLocal;
  2447. PageFrameIndex = MiRemoveAnyPage (0);
  2448. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  2449. PointerPte = MiGetPteAddress (MmWorkingSetList);
  2450. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  2451. //
  2452. // Note that when booted /3GB, MmWorkingSetList is not page aligned, so
  2453. // always start zeroing from the start of the page regardless.
  2454. //
  2455. RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte), PAGE_SIZE);
  2456. CurrentProcess->WorkingSetPage = PageFrameIndex;
  2457. #if defined (_X86PAE_)
  2458. MiPaeInitialize ();
  2459. #endif
  2460. KeFlushCurrentTb();
  2461. UNLOCK_PFN (OldIrql);
  2462. CurrentProcess->Vm.MaximumWorkingSetSize = MmSystemProcessWorkingSetMax;
  2463. CurrentProcess->Vm.MinimumWorkingSetSize = MmSystemProcessWorkingSetMin;
  2464. MmInitializeProcessAddressSpace (CurrentProcess, NULL, NULL, NULL);
  2465. //
  2466. // Ensure the secondary page structures are marked as in use.
  2467. //
  2468. if (MmVirtualBias == 0) {
  2469. ASSERT (MmFreePagesByColor[0] < (PMMCOLOR_TABLES)MM_SYSTEM_CACHE_END_EXTRA);
  2470. PointerPde = MiGetPdeAddress(MmFreePagesByColor[0]);
  2471. ASSERT (PointerPde->u.Hard.Valid == 1);
  2472. PointerPte = MiGetPteAddress(MmFreePagesByColor[0]);
  2473. ASSERT (PointerPte->u.Hard.Valid == 1);
  2474. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte);
  2475. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  2476. LOCK_PFN (OldIrql);
  2477. if (Pfn1->u3.e2.ReferenceCount == 0) {
  2478. Pfn1->u4.PteFrame = MI_GET_PAGE_FRAME_FROM_PTE(PointerPde);
  2479. Pfn1->PteAddress = PointerPte;
  2480. Pfn1->u2.ShareCount += 1;
  2481. Pfn1->u3.e2.ReferenceCount = 1;
  2482. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  2483. Pfn1->u3.e1.CacheAttribute = MiCached;
  2484. MiDetermineNode (PageFrameIndex, Pfn1);
  2485. }
  2486. UNLOCK_PFN (OldIrql);
  2487. }
  2488. else if ((((ULONG)MmFreePagesByColor[0] & (PAGE_SIZE - 1)) == 0) &&
  2489. ((MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES)) < PAGE_SIZE)) {
  2490. PMMCOLOR_TABLES c;
  2491. c = MmFreePagesByColor[0];
  2492. MmFreePagesByColor[0] = ExAllocatePoolWithTag (NonPagedPool,
  2493. MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES),
  2494. ' mM');
  2495. if (MmFreePagesByColor[0] != NULL) {
  2496. MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
  2497. RtlCopyMemory (MmFreePagesByColor[0],
  2498. c,
  2499. MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES));
  2500. //
  2501. // Free the page.
  2502. //
  2503. PointerPte = MiGetPteAddress (c);
  2504. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  2505. ASSERT (c > (PMMCOLOR_TABLES)MM_SYSTEM_CACHE_END_EXTRA);
  2506. MI_WRITE_INVALID_PTE (PointerPte, ZeroKernelPte);
  2507. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  2508. LOCK_PFN (OldIrql);
  2509. ASSERT ((Pfn1->u2.ShareCount <= 1) && (Pfn1->u3.e2.ReferenceCount <= 1));
  2510. Pfn1->u2.ShareCount = 0;
  2511. Pfn1->u3.e2.ReferenceCount = 1;
  2512. MI_SET_PFN_DELETED (Pfn1);
  2513. #if DBG
  2514. Pfn1->u3.e1.PageLocation = StandbyPageList;
  2515. #endif
  2516. MiDecrementReferenceCount (Pfn1, PageFrameIndex);
  2517. UNLOCK_PFN (OldIrql);
  2518. KeFlushSingleTb (c, FALSE);
  2519. }
  2520. else {
  2521. MmFreePagesByColor[0] = c;
  2522. }
  2523. }
  2524. return;
  2525. }