Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1442 lines
45 KiB

  1. /*++
  2. Copyright (c) 1990 Microsoft Corporation
  3. Copyright (c) 1992 Digital Equipment Corporation
  4. Module Name:
  5. initalpha.c
  6. Abstract:
  7. This module contains the machine dependent initialization for the
  8. memory management component. It is specifically tailored to the
  9. ALPHA architecture.
  10. Author:
  11. Lou Perazzoli (loup) 3-Apr-1990
  12. Joe Notarangelo 23-Apr-1992 ALPHA version
  13. Revision History:
  14. Landy Wang (landyw) 02-June-1998 : Modifications for full 3-level 64-bit NT.
  15. --*/
  16. #include "mi.h"
  17. #include <inbv.h>
  18. //
  19. // Local definitions.
  20. //
  21. #define _1mbInPages (0x100000 >> PAGE_SHIFT)
  22. #define _4gbInPages (0x100000000 >> PAGE_SHIFT)
  23. //
  24. // Local data.
  25. //
  26. PMEMORY_ALLOCATION_DESCRIPTOR MxFreeDescriptor;
  27. PFN_NUMBER MxNextPhysicalPage;
  28. PFN_NUMBER MxNumberOfPages;
  29. PFN_NUMBER
  30. MxGetNextPage (
  31. VOID
  32. )
  33. /*++
  34. Routine Description:
  35. This function returns the next physical page number from either the
  36. largest low memory descritor or the largest free descriptor. If there
  37. are no physical pages left, then a bugcheck is executed since the
  38. system cannot be initialized.
  39. Arguments:
  40. LoaderBlock - Supplies the address of the loader block.
  41. Return Value:
  42. None.
  43. Environment:
  44. Kernel mode.
  45. --*/
  46. {
  47. //
  48. // If there are free pages left in the current descriptor, then
  49. // return the next physical page. Otherwise, attempt to switch
  50. // descriptors.
  51. //
  52. if (MxNumberOfPages != 0) {
  53. MxNumberOfPages -= 1;
  54. return MxNextPhysicalPage++;
  55. } else {
  56. //
  57. // If the current descriptor is not the largest free descriptor,
  58. // then switch to the largest free descriptor. Otherwise, bugcheck.
  59. //
  60. if (MxNextPhysicalPage ==
  61. (MxFreeDescriptor->BasePage + MxFreeDescriptor->PageCount)) {
  62. KeBugCheckEx(INSTALL_MORE_MEMORY,
  63. MmNumberOfPhysicalPages,
  64. MmLowestPhysicalPage,
  65. MmHighestPhysicalPage,
  66. 0);
  67. return 0;
  68. } else {
  69. MxNumberOfPages = MxFreeDescriptor->PageCount - 1;
  70. MxNextPhysicalPage = MxFreeDescriptor->BasePage;
  71. return MxNextPhysicalPage++;
  72. }
  73. }
  74. }
  75. VOID
  76. MiInitMachineDependent (
  77. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  78. )
  79. /*++
  80. Routine Description:
  81. This routine performs the necessary operations to enable virtual
  82. memory. This includes building the page directory parent pages and
  83. the page directories for the system, building page table pages to map
  84. the code section, the data section, the stack section and the trap handler.
  85. It also initializes the PFN database and populates the free list.
  86. Arguments:
  87. LoaderBlock - Supplies the address of the loader block.
  88. Return Value:
  89. None.
  90. Environment:
  91. Kernel mode.
  92. --*/
  93. {
  94. LOGICAL First;
  95. CHAR Buffer[256];
  96. PMMPFN BasePfn;
  97. PMMPFN BottomPfn;
  98. PMMPFN TopPfn;
  99. PFN_NUMBER i;
  100. ULONG j;
  101. PFN_NUMBER HighPage;
  102. PFN_NUMBER PagesLeft;
  103. PFN_NUMBER PageNumber;
  104. PFN_NUMBER PtePage;
  105. PFN_NUMBER PdePage;
  106. PFN_NUMBER PpePage;
  107. PFN_NUMBER FrameNumber;
  108. PFN_NUMBER PfnAllocation;
  109. PEPROCESS CurrentProcess;
  110. PVOID SpinLockPage;
  111. PFN_NUMBER MostFreePage;
  112. PFN_NUMBER MostFreeLowMem;
  113. PLIST_ENTRY NextMd;
  114. SIZE_T MaxPool;
  115. PFN_NUMBER NextPhysicalPage;
  116. KIRQL OldIrql;
  117. PMEMORY_ALLOCATION_DESCRIPTOR FreeDescriptorLowMem;
  118. PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
  119. MMPTE TempPte;
  120. PMMPTE PointerPde;
  121. PMMPTE PointerPte;
  122. PMMPTE LastPte;
  123. PMMPTE CacheStackPage;
  124. PMMPTE Pde;
  125. PMMPTE StartPpe;
  126. PMMPTE StartPde;
  127. PMMPTE StartPte;
  128. PMMPTE EndPpe;
  129. PMMPTE EndPde;
  130. PMMPTE EndPte;
  131. PMMPFN Pfn1;
  132. PMMPFN Pfn2;
  133. PULONG PointerLong;
  134. PMMFREE_POOL_ENTRY Entry;
  135. PVOID NonPagedPoolStartVirtual;
  136. ULONG Range;
  137. MostFreePage = 0;
  138. MostFreeLowMem = 0;
  139. FreeDescriptorLowMem = NULL;
  140. //
  141. // Get the lower bound of the free physical memory and the number of
  142. // physical pages by walking the memory descriptor lists. In addition,
  143. // find the memory descriptor with the most free pages that is within
  144. // the first 4gb of physical memory. This memory can be used to allocate
  145. // common buffers for use by PCI devices that cannot address more than
  146. // 32 bits. Also find the largest free memory descriptor.
  147. //
  148. //
  149. // When restoring a hibernation image, OS Loader needs to use "a few" extra
  150. // pages of LoaderFree memory.
  151. // This is not accounted for when reserving memory for hibernation below.
  152. // Start with a safety margin to allow for this plus modest future increase.
  153. //
  154. MmHiberPages = 96;
  155. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  156. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  157. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  158. MEMORY_ALLOCATION_DESCRIPTOR,
  159. ListEntry);
  160. HighPage = MemoryDescriptor->BasePage + MemoryDescriptor->PageCount - 1;
  161. //
  162. // This check results in /BURNMEMORY chunks not being counted.
  163. //
  164. if (MemoryDescriptor->MemoryType != LoaderBad) {
  165. MmNumberOfPhysicalPages += (PFN_COUNT)MemoryDescriptor->PageCount;
  166. }
  167. //
  168. // If the lowest page is lower than the lowest page encountered
  169. // so far, then set the new low page number.
  170. //
  171. if (MemoryDescriptor->BasePage < MmLowestPhysicalPage) {
  172. MmLowestPhysicalPage = MemoryDescriptor->BasePage;
  173. }
  174. //
  175. // If the highest page is greater than the highest page encountered
  176. // so far, then set the new high page number.
  177. //
  178. if (HighPage > MmHighestPhysicalPage) {
  179. MmHighestPhysicalPage = HighPage;
  180. }
  181. //
  182. // Locate the largest free block starting below 4GB and the largest
  183. // free block.
  184. //
  185. if ((MemoryDescriptor->MemoryType == LoaderFree) ||
  186. (MemoryDescriptor->MemoryType == LoaderLoadedProgram) ||
  187. (MemoryDescriptor->MemoryType == LoaderFirmwareTemporary) ||
  188. (MemoryDescriptor->MemoryType == LoaderOsloaderStack)) {
  189. //
  190. // Every page that will be used as free memory that is not already
  191. // marked as LoaderFree must be counted so a hibernate can reserve
  192. // the proper amount.
  193. //
  194. if (MemoryDescriptor->MemoryType != LoaderFree) {
  195. MmHiberPages += MemoryDescriptor->PageCount;
  196. }
  197. if ((MemoryDescriptor->PageCount > MostFreeLowMem) &&
  198. (MemoryDescriptor->BasePage < _4gbInPages) &&
  199. (HighPage < _4gbInPages)) {
  200. MostFreeLowMem = MemoryDescriptor->PageCount;
  201. FreeDescriptorLowMem = MemoryDescriptor;
  202. } else if (MemoryDescriptor->PageCount > MostFreePage) {
  203. MostFreePage = MemoryDescriptor->PageCount;
  204. MxFreeDescriptor = MemoryDescriptor;
  205. }
  206. } else if (MemoryDescriptor->MemoryType == LoaderOsloaderHeap) {
  207. //
  208. // We do not want to use this memory yet as it still has important
  209. // data structures in it. But we still want to account for this in
  210. // the hibernation pages
  211. //
  212. MmHiberPages += MemoryDescriptor->PageCount;
  213. }
  214. NextMd = MemoryDescriptor->ListEntry.Flink;
  215. }
  216. MmHighestPossiblePhysicalPage = MmHighestPhysicalPage;
  217. //
  218. // Perform sanity checks on the results of walking the memory
  219. // descriptors.
  220. //
  221. // If the number of physical pages is less that 1024 (i.e., 8mb), then
  222. // bugcheck. There is not enough memory to run the system.
  223. //
  224. if (MmNumberOfPhysicalPages < 1024) {
  225. KeBugCheckEx(INSTALL_MORE_MEMORY,
  226. MmNumberOfPhysicalPages,
  227. MmLowestPhysicalPage,
  228. MmHighestPhysicalPage,
  229. 0);
  230. }
  231. //
  232. // If there is no free descriptor below 4gb, then it is not possible to
  233. // run devices that only support 32 address bits. It is also highly
  234. // unlikely that the configuration data is correct so bugcheck.
  235. //
  236. if (FreeDescriptorLowMem == NULL) {
  237. InbvDisplayString("MmInit *** FATAL ERROR *** no free memory below 4gb\n");
  238. KeBugCheck(MEMORY_MANAGEMENT);
  239. }
  240. //
  241. // Set the initial nonpaged frame allocation parameters.
  242. //
  243. MxNextPhysicalPage = FreeDescriptorLowMem->BasePage;
  244. MxNumberOfPages = FreeDescriptorLowMem->PageCount;
  245. //
  246. // Compute the initial and maximum size of nonpaged pool. The initial
  247. // allocation of nonpaged pool is such that it is both virtually and
  248. // physically contiguous.
  249. //
  250. // If the size of the initial nonpaged pool was initialized from the
  251. // registry and is greater than 7/8 of physical memory, then force the
  252. // size of the initial nonpaged pool to be computed.
  253. //
  254. if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) >
  255. (7 * (MmNumberOfPhysicalPages >> 3))) {
  256. MmSizeOfNonPagedPoolInBytes = 0;
  257. }
  258. //
  259. // If the size of the initial nonpaged pool is less than the minimum
  260. // amount, then compute the size of initial nonpaged pool as the minimum
  261. // size up to 8mb and a computed amount for every 1mb thereafter.
  262. //
  263. if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize) {
  264. MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
  265. if (MmNumberOfPhysicalPages > 1024) {
  266. MmSizeOfNonPagedPoolInBytes +=
  267. ((MmNumberOfPhysicalPages - 1024) / _1mbInPages) *
  268. MmMinAdditionNonPagedPoolPerMb;
  269. }
  270. }
  271. //
  272. // Align the size of the initial nonpaged pool to page size boundary.
  273. //
  274. MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
  275. //
  276. // Limit initial nonpaged pool size to the maximum allowable size.
  277. //
  278. if (MmSizeOfNonPagedPoolInBytes > MM_MAX_INITIAL_NONPAGED_POOL) {
  279. MmSizeOfNonPagedPoolInBytes = MM_MAX_INITIAL_NONPAGED_POOL;
  280. }
  281. //
  282. // If the computed size of the initial nonpaged pool will not fit in the
  283. // largest low memory descriptor, then recompute the size of nonpaged pool
  284. // to be the size of the largest low memory descriptor. If the largest
  285. // low memory descriptor does not contain the minimum initial nonpaged
  286. // pool size, then the system cannot be booted.
  287. //
  288. if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) > MxNumberOfPages) {
  289. //
  290. // Reserve all of low memory for nonpaged pool.
  291. //
  292. MmSizeOfNonPagedPoolInBytes = MxNumberOfPages << PAGE_SHIFT;
  293. if(MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize) {
  294. InbvDisplayString("MmInit *** FATAL ERROR *** cannot allocate nonpaged pool\n");
  295. sprintf(Buffer,
  296. "Largest description = %d pages, require %d pages\n",
  297. MxNumberOfPages,
  298. MmMinimumNonPagedPoolSize >> PAGE_SHIFT);
  299. InbvDisplayString(Buffer);
  300. KeBugCheck(MEMORY_MANAGEMENT);
  301. }
  302. }
  303. //
  304. // Reserve the physically and virtually contiguous memory that maps
  305. // the initial nonpaged pool and set page frame allocation parameters.
  306. //
  307. MxNextPhysicalPage += (PFN_NUMBER)(MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT);
  308. MxNumberOfPages -= (PFN_NUMBER)(MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT);
  309. //
  310. // Calculate the maximum size of nonpaged pool.
  311. //
  312. if (MmMaximumNonPagedPoolInBytes == 0) {
  313. //
  314. // Calculate the size of nonpaged pool. If 8mb or less use the
  315. // minimum size, then for every MB above 8mb add extra pages.
  316. //
  317. MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
  318. //
  319. // Make sure enough expansion for PFN database exists.
  320. //
  321. MmMaximumNonPagedPoolInBytes +=
  322. ((ULONG_PTR)PAGE_ALIGN((MmHighestPhysicalPage + 1) * sizeof(MMPFN)));
  323. //
  324. // If the number of physical pages is greater than 8mb, then compute
  325. // an additional amount for every 1mb thereafter.
  326. //
  327. if (MmNumberOfPhysicalPages > 1024) {
  328. MmMaximumNonPagedPoolInBytes +=
  329. ((MmNumberOfPhysicalPages - 1024) / _1mbInPages) *
  330. MmMaxAdditionNonPagedPoolPerMb;
  331. }
  332. //
  333. // If the maximum size of nonpaged pool is greater than the maximum
  334. // default size of nonpaged pool, then limit the maximum size of
  335. // onopaged pool to the maximum default size.
  336. //
  337. if (MmMaximumNonPagedPoolInBytes > MM_MAX_DEFAULT_NONPAGED_POOL) {
  338. MmMaximumNonPagedPoolInBytes = MM_MAX_DEFAULT_NONPAGED_POOL;
  339. }
  340. }
  341. //
  342. // Align the maximum size of nonpaged pool to page size boundary.
  343. //
  344. MmMaximumNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
  345. //
  346. // Compute the maximum size of nonpaged pool to include 16 additional
  347. // pages and enough space to map the PFN database.
  348. //
  349. MaxPool = MmSizeOfNonPagedPoolInBytes + (PAGE_SIZE * 16) +
  350. ((ULONG_PTR)PAGE_ALIGN((MmHighestPhysicalPage + 1) * sizeof(MMPFN)));
  351. //
  352. // If the maximum size of nonpaged pool is less than the computed
  353. // maximum size of nonpaged pool, then set the maximum size of nonpaged
  354. // pool to the computed maximum size.
  355. //
  356. if (MmMaximumNonPagedPoolInBytes < MaxPool) {
  357. MmMaximumNonPagedPoolInBytes = MaxPool;
  358. }
  359. //
  360. // Limit maximum nonpaged pool to MM_MAX_ADDITIONAL_NONPAGED_POOL.
  361. //
  362. if (MmMaximumNonPagedPoolInBytes > MM_MAX_ADDITIONAL_NONPAGED_POOL) {
  363. MmMaximumNonPagedPoolInBytes = MM_MAX_ADDITIONAL_NONPAGED_POOL;
  364. }
  365. //
  366. // Compute the starting address of nonpaged pool.
  367. //
  368. MmNonPagedPoolStart = (PCHAR)MmNonPagedPoolEnd - MmMaximumNonPagedPoolInBytes;
  369. NonPagedPoolStartVirtual = MmNonPagedPoolStart;
  370. //
  371. // Calculate the starting address for nonpaged system space rounded
  372. // down to a second level PDE mapping boundary.
  373. //
  374. MmNonPagedSystemStart = (PVOID)(((ULONG_PTR)MmNonPagedPoolStart -
  375. (((ULONG_PTR)MmNumberOfSystemPtes + 1) * PAGE_SIZE)) &
  376. (~PAGE_DIRECTORY2_MASK));
  377. //
  378. // Limit the starting address of system space to the lowest allowable
  379. // address for nonpaged system space.
  380. //
  381. if (MmNonPagedSystemStart < MM_LOWEST_NONPAGED_SYSTEM_START) {
  382. MmNonPagedSystemStart = MM_LOWEST_NONPAGED_SYSTEM_START;
  383. }
  384. //
  385. // Recompute the actual number of system PTEs.
  386. //
  387. MmNumberOfSystemPtes = (ULONG)(((ULONG_PTR)MmNonPagedPoolStart -
  388. (ULONG_PTR)MmNonPagedSystemStart) >> PAGE_SHIFT) - 1;
  389. ASSERT(MmNumberOfSystemPtes > 1000);
  390. //
  391. // Set the global bit for all PPEs and PDEs in system space.
  392. //
  393. StartPde = MiGetPdeAddress(MM_SYSTEM_SPACE_START);
  394. EndPde = MiGetPdeAddress(MM_SYSTEM_SPACE_END);
  395. First = TRUE;
  396. while (StartPde <= EndPde) {
  397. if (First == TRUE || MiIsPteOnPdeBoundary(StartPde)) {
  398. First = FALSE;
  399. StartPpe = MiGetPteAddress(StartPde);
  400. if (StartPpe->u.Hard.Valid == 0) {
  401. StartPpe += 1;
  402. StartPde = MiGetVirtualAddressMappedByPte (StartPpe);
  403. continue;
  404. }
  405. TempPte = *StartPpe;
  406. TempPte.u.Hard.Global = 1;
  407. *StartPpe = TempPte;
  408. }
  409. TempPte = *StartPde;
  410. TempPte.u.Hard.Global = 1;
  411. *StartPde = TempPte;
  412. StartPde += 1;
  413. }
  414. //
  415. // Reset the global bit for all PPE & PDEs in session space.
  416. //
  417. StartPde = MiGetPdeAddress(MmSessionBase);
  418. EndPde = MiGetPdeAddress(MiSessionSpaceEnd);
  419. First = TRUE;
  420. while (StartPde < EndPde) {
  421. if (First == TRUE || MiIsPteOnPdeBoundary(StartPde)) {
  422. First = FALSE;
  423. StartPpe = MiGetPteAddress(StartPde);
  424. if (StartPpe->u.Hard.Valid == 0) {
  425. StartPpe += 1;
  426. StartPde = MiGetVirtualAddressMappedByPte (StartPpe);
  427. continue;
  428. }
  429. TempPte = *StartPpe;
  430. TempPte.u.Hard.Global = 0;
  431. *StartPpe = TempPte;
  432. }
  433. TempPte = *StartPde;
  434. TempPte.u.Hard.Global = 0;
  435. *StartPde = TempPte;
  436. ASSERT(StartPde->u.Long == 0);
  437. StartPde += 1;
  438. }
  439. //
  440. // Allocate a page directory and a pair of page table pages.
  441. // Map the hyper space page directory page into the top level parent
  442. // directory & the hyper space page table page into the page directory
  443. // and map an additional page that will eventually be used for the
  444. // working set list. Page tables after the first two are set up later
  445. // on during individual process working set initialization.
  446. //
  447. // The working set list page will eventually be a part of hyper space.
  448. // It is mapped into the second level page directory page so it can be
  449. // zeroed and so it will be accounted for in the PFN database. Later
  450. // the page will be unmapped, and its page frame number captured in the
  451. // system process object.
  452. //
  453. TempPte = ValidKernelPte;
  454. TempPte.u.Hard.Global = 0;
  455. StartPde = MiGetPdeAddress(HYPER_SPACE);
  456. StartPpe = MiGetPteAddress(StartPde);
  457. if (StartPpe->u.Hard.Valid == 0) {
  458. ASSERT (StartPpe->u.Long == 0);
  459. TempPte.u.Hard.PageFrameNumber = MxGetNextPage();
  460. *StartPpe = TempPte;
  461. RtlZeroMemory (MiGetVirtualAddressMappedByPte (StartPpe),
  462. PAGE_SIZE);
  463. }
  464. TempPte.u.Hard.PageFrameNumber = MxGetNextPage();
  465. *StartPde = TempPte;
  466. //
  467. // Zero the hyper space page table page.
  468. //
  469. StartPte = MiGetPteAddress(HYPER_SPACE);
  470. RtlZeroMemory(StartPte, PAGE_SIZE);
  471. //
  472. // Allocate page directory and page table pages for
  473. // system PTEs and nonpaged pool.
  474. //
  475. TempPte = ValidKernelPte;
  476. StartPde = MiGetPdeAddress(MmNonPagedSystemStart);
  477. EndPde = MiGetPdeAddress(MmNonPagedPoolEnd);
  478. First = TRUE;
  479. while (StartPde <= EndPde) {
  480. if (First == TRUE || MiIsPteOnPdeBoundary(StartPde)) {
  481. First = FALSE;
  482. StartPpe = MiGetPteAddress(StartPde);
  483. if (StartPpe->u.Hard.Valid == 0) {
  484. TempPte.u.Hard.PageFrameNumber = MxGetNextPage();
  485. *StartPpe = TempPte;
  486. RtlZeroMemory (MiGetVirtualAddressMappedByPte (StartPpe),
  487. PAGE_SIZE);
  488. }
  489. }
  490. if (StartPde->u.Hard.Valid == 0) {
  491. TempPte.u.Hard.PageFrameNumber = MxGetNextPage();
  492. *StartPde = TempPte;
  493. }
  494. StartPde += 1;
  495. }
  496. //
  497. // Zero the PTEs that map the nonpaged region just before nonpaged pool.
  498. //
  499. StartPte = MiGetPteAddress(MmNonPagedSystemStart);
  500. EndPte = MiGetPteAddress(MmNonPagedPoolEnd);
  501. if (!MiIsPteOnPdeBoundary (EndPte)) {
  502. EndPte = (PMMPTE)((ULONG_PTR)PAGE_ALIGN (EndPte) + PAGE_SIZE);
  503. }
  504. RtlZeroMemory(StartPte, (ULONG_PTR)EndPte - (ULONG_PTR)StartPte);
  505. //
  506. // Fill in the PTEs to cover the initial nonpaged pool. The physical
  507. // page frames to cover this range were reserved earlier from the
  508. // largest low memory free descriptor. The initial allocation is both
  509. // physically and virtually contiguous.
  510. //
  511. StartPte = MiGetPteAddress(MmNonPagedPoolStart);
  512. EndPte = MiGetPteAddress((PCHAR)MmNonPagedPoolStart +
  513. MmSizeOfNonPagedPoolInBytes);
  514. PageNumber = FreeDescriptorLowMem->BasePage;
  515. #if 0
  516. ASSERT (MxFreeDescriptor == FreeDescriptorLowMem);
  517. MxNumberOfPages -= (EndPte - StartPte);
  518. MxNextPhysicalPage += (EndPte - StartPte);
  519. #endif
  520. while (StartPte < EndPte) {
  521. TempPte.u.Hard.PageFrameNumber = PageNumber;
  522. PageNumber += 1;
  523. *StartPte = TempPte;
  524. StartPte += 1;
  525. }
  526. //
  527. // Zero the remaining PTEs (if any) for the initial nonpaged pool up to
  528. // the end of the current page table page.
  529. //
  530. while (!MiIsPteOnPdeBoundary (StartPte)) {
  531. *StartPte = ZeroKernelPte;
  532. StartPte += 1;
  533. }
  534. //
  535. // Convert the starting nonpaged pool address to a 43-bit superpage
  536. // address and set the address of the initial nonpaged pool allocation.
  537. //
  538. PointerPte = MiGetPteAddress(MmNonPagedPoolStart);
  539. MmNonPagedPoolStart = KSEG_ADDRESS(PointerPte->u.Hard.PageFrameNumber);
  540. MmPageAlignedPoolBase[NonPagedPool] = MmNonPagedPoolStart;
  541. //
  542. // Set subsection base to the address to zero (the PTE format allows the
  543. // complete address space to be spanned) and the top subsection page.
  544. //
  545. MmSubsectionBase = 0;
  546. MmSubsectionTopPage = (KSEG2_BASE - KSEG0_BASE) >> PAGE_SHIFT;
  547. //
  548. // Initialize the pool structures in the nonpaged memory just mapped.
  549. //
  550. MmNonPagedPoolExpansionStart =
  551. (PCHAR)NonPagedPoolStartVirtual + MmSizeOfNonPagedPoolInBytes;
  552. MiInitializeNonPagedPool ();
  553. //
  554. // Before Nonpaged pool can be used, the PFN database must be built.
  555. // This is due to the fact that the start and ending allocation bits
  556. // for nonpaged pool are stored in the PFN elements for the corresponding
  557. // pages.
  558. //
  559. // Calculate the number of pages required from page zero to the highest
  560. // page.
  561. //
  562. // Get the number of secondary colors and add the array for tracking
  563. // secondary colors to the end of the PFN database.
  564. //
  565. if (MmSecondaryColors == 0) {
  566. MmSecondaryColors = PCR->SecondLevelCacheSize;
  567. }
  568. MmSecondaryColors = MmSecondaryColors >> PAGE_SHIFT;
  569. //
  570. // Make sure value is power of two and within limits.
  571. //
  572. if (((MmSecondaryColors & (MmSecondaryColors - 1)) != 0) ||
  573. (MmSecondaryColors < MM_SECONDARY_COLORS_MIN) ||
  574. (MmSecondaryColors > MM_SECONDARY_COLORS_MAX)) {
  575. MmSecondaryColors = MM_SECONDARY_COLORS_DEFAULT;
  576. }
  577. MmSecondaryColorMask = MmSecondaryColors - 1;
  578. #if defined(MI_MULTINODE)
  579. //
  580. // Determine number of bits in MmSecondayColorMask. This
  581. // is the number of bits the Node color must be shifted
  582. // by before it is included in colors.
  583. //
  584. i = MmSecondaryColorMask;
  585. MmSecondaryColorNodeShift = 0;
  586. while (i) {
  587. i >>= 1;
  588. MmSecondaryColorNodeShift++;
  589. }
  590. //
  591. // Adjust the number of secondary colors by the number of nodes
  592. // in the machine. The secondary color mask is NOT adjusted
  593. // as it is used to control coloring within a node. The node
  594. // color is added to the color AFTER normal color calculations
  595. // are performed.
  596. //
  597. MmSecondaryColors *= KeNumberNodes;
  598. for (i = 0; i < KeNumberNodes; i++) {
  599. KeNodeBlock[i]->Color = (ULONG)(i << MmSecondaryColorNodeShift);
  600. InitializeSListHead(&KeNodeBlock[i]->DeadStackList);
  601. }
  602. #endif
  603. PfnAllocation =
  604. 1 + ((((MmHighestPhysicalPage + 1) * sizeof(MMPFN)) +
  605. ((PFN_NUMBER)MmSecondaryColors * sizeof(MMCOLOR_TABLES) * 2)) >> PAGE_SHIFT);
  606. //
  607. // If the number of pages remaining in the current descriptor is
  608. // greater than the number of pages needed for the PFN database,
  609. // then allocate the PFN database from the current free descriptor.
  610. // Otherwise, allocate the PFN database virtually.
  611. //
  612. #ifndef PFN_CONSISTENCY
  613. if (MxNumberOfPages >= PfnAllocation) {
  614. //
  615. // Allocate the PFN database in the 43-bit superpage.
  616. //
  617. // Compute the address of the PFN by allocating the appropriate
  618. // number of pages from the end of the free descriptor.
  619. //
  620. HighPage = MxNextPhysicalPage + MxNumberOfPages;
  621. MmPfnDatabase = KSEG_ADDRESS(HighPage - PfnAllocation);
  622. RtlZeroMemory(MmPfnDatabase, PfnAllocation * PAGE_SIZE);
  623. //
  624. // Mark off the chunk of memory used for the PFN database from
  625. // either the largest low free memory descriptor or the largest
  626. // free memory descriptor.
  627. //
  628. // N.B. The PFN database size is subtracted from the appropriate
  629. // memory descriptor so it will not appear to be free when
  630. // the memory descriptors are scanned to initialize the PFN
  631. // database.
  632. //
  633. MxNumberOfPages -= PfnAllocation;
  634. if ((MxNextPhysicalPage >= FreeDescriptorLowMem->BasePage) &&
  635. (MxNextPhysicalPage < (FreeDescriptorLowMem->BasePage +
  636. FreeDescriptorLowMem->PageCount))) {
  637. FreeDescriptorLowMem->PageCount -= (PFN_COUNT)PfnAllocation;
  638. } else {
  639. MxFreeDescriptor->PageCount -= (PFN_COUNT)PfnAllocation;
  640. }
  641. //
  642. // Allocate one PTE at the very top of nonpaged pool. This provides
  643. // protection against the caller of the first real nonpaged expansion allocation in case he accidentally overruns his
  644. // pool block. (We'll trap instead of corrupting the PFN database).
  645. // This also allows us to freely increment in MiFreePoolPages without
  646. // having to worry about a valid PTE after the end of the highest
  647. // nonpaged pool allocation.
  648. //
  649. if (MiReserveSystemPtes(1, NonPagedPoolExpansion) == NULL) {
  650. MiIssueNoPtesBugcheck (1, NonPagedPoolExpansion);
  651. }
  652. } else {
  653. #endif // PFN_CONSISTENCY
  654. //
  655. // Calculate the start of the PFN database (it starts at physical
  656. // page zero, even if the lowest physical page is not zero).
  657. //
  658. PointerPte = MiReserveSystemPtes((ULONG)PfnAllocation,
  659. NonPagedPoolExpansion);
  660. if (PointerPte == NULL) {
  661. MiIssueNoPtesBugcheck ((ULONG)PfnAllocation, NonPagedPoolExpansion);
  662. }
  663. #if PFN_CONSISTENCY
  664. MiPfnStartPte = PointerPte;
  665. MiPfnPtes = PfnAllocation;
  666. #endif
  667. MmPfnDatabase = (PMMPFN)(MiGetVirtualAddressMappedByPte(PointerPte));
  668. //
  669. // Allocate one more PTE just below the PFN database. This provides
  670. // protection against the caller of the first real nonpaged
  671. // expansion allocation in case he accidentally overruns his pool
  672. // block. (We'll trap instead of corrupting the PFN database).
  673. // This also allows us to freely increment in MiFreePoolPages
  674. // without having to worry about a valid PTE just after the end of
  675. // the highest nonpaged pool allocation.
  676. //
  677. if (MiReserveSystemPtes(1, NonPagedPoolExpansion) == NULL) {
  678. MiIssueNoPtesBugcheck (1, NonPagedPoolExpansion);
  679. }
  680. //
  681. // Go through the memory descriptors and for each physical page
  682. // make the PFN database have a valid PTE to map it. This allows
  683. // machines with sparse physical memory to have a minimal PFN
  684. // database.
  685. //
  686. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  687. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  688. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  689. MEMORY_ALLOCATION_DESCRIPTOR,
  690. ListEntry);
  691. PointerPte = MiGetPteAddress(MI_PFN_ELEMENT(
  692. MemoryDescriptor->BasePage));
  693. HighPage = MemoryDescriptor->BasePage + MemoryDescriptor->PageCount;
  694. LastPte = MiGetPteAddress((PCHAR)MI_PFN_ELEMENT(HighPage) - 1);
  695. while (PointerPte <= LastPte) {
  696. if (PointerPte->u.Hard.Valid == 0) {
  697. TempPte.u.Hard.PageFrameNumber = MxGetNextPage();
  698. *PointerPte = TempPte;
  699. RtlZeroMemory(MiGetVirtualAddressMappedByPte(PointerPte),
  700. PAGE_SIZE);
  701. }
  702. PointerPte += 1;
  703. }
  704. NextMd = MemoryDescriptor->ListEntry.Flink;
  705. }
  706. #ifndef PFN_CONSISTENCY
  707. }
  708. #endif // PFN_CONSISTENCY
  709. //
  710. // Initialize support for colored pages.
  711. //
  712. MmFreePagesByColor[0] =
  713. (PMMCOLOR_TABLES)&MmPfnDatabase[MmHighestPhysicalPage + 1];
  714. MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
  715. //
  716. // Make sure the color table are mapped if they are not physically
  717. // allocated.
  718. //
  719. if (MI_IS_PHYSICAL_ADDRESS(MmFreePagesByColor[0]) == FALSE) {
  720. PointerPte = MiGetPteAddress(&MmFreePagesByColor[0][0]);
  721. LastPte =
  722. MiGetPteAddress((PCHAR)&MmFreePagesByColor[1][MmSecondaryColors] - 1);
  723. while (PointerPte <= LastPte) {
  724. if (PointerPte->u.Hard.Valid == 0) {
  725. TempPte.u.Hard.PageFrameNumber = MxGetNextPage();
  726. *PointerPte = TempPte;
  727. RtlZeroMemory(MiGetVirtualAddressMappedByPte(PointerPte),
  728. PAGE_SIZE);
  729. }
  730. PointerPte += 1;
  731. }
  732. }
  733. //
  734. // Initialize the secondary color free page listheads.
  735. //
  736. for (i = 0; i < MmSecondaryColors; i += 1) {
  737. MmFreePagesByColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST;
  738. MmFreePagesByColor[FreePageList][i].Flink = MM_EMPTY_LIST;
  739. }
  740. //
  741. // Go through the page table entries and for any page which is valid,
  742. // update the corresponding PFN database element.
  743. //
  744. // Add the level one page directory parent page to the PFN database.
  745. //
  746. PointerPde = (PMMPTE)PDE_SELFMAP;
  747. PpePage = MI_GET_PAGE_FRAME_FROM_PTE(PointerPde);
  748. Pfn1 = MI_PFN_ELEMENT(PpePage);
  749. Pfn1->PteFrame = PpePage;
  750. Pfn1->PteAddress = PointerPde;
  751. Pfn1->u2.ShareCount += 1;
  752. Pfn1->u3.e2.ReferenceCount = 1;
  753. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  754. MiDetermineNode(PpePage, Pfn1);
  755. //
  756. // Add the pages which were used to construct the nonpaged part of the
  757. // system, hyper space, and the system process working set list to the
  758. // PFN database.
  759. //
  760. // The scan begins at the start of hyper space so the hyper space page
  761. // table page and the working set list page will be accounted for in
  762. // the PFN database.
  763. //
  764. StartPde = MiGetPdeAddress(HYPER_SPACE);
  765. EndPde = MiGetPdeAddress(NON_PAGED_SYSTEM_END);
  766. First = TRUE;
  767. while (StartPde <= EndPde) {
  768. if (First == TRUE || MiIsPteOnPdeBoundary(StartPde)) {
  769. First = FALSE;
  770. StartPpe = MiGetPteAddress(StartPde);
  771. if (StartPpe->u.Hard.Valid == 0) {
  772. StartPpe += 1;
  773. StartPde = MiGetVirtualAddressMappedByPte (StartPpe);
  774. continue;
  775. }
  776. PdePage = MI_GET_PAGE_FRAME_FROM_PTE(StartPpe);
  777. Pfn1 = MI_PFN_ELEMENT(PdePage);
  778. Pfn1->PteFrame = PpePage;
  779. Pfn1->PteAddress = StartPde;
  780. Pfn1->u2.ShareCount += 1;
  781. Pfn1->u3.e2.ReferenceCount = 1;
  782. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  783. MiDetermineNode(PdePage, Pfn1);
  784. }
  785. //
  786. // If the second level PDE entry is valid, then add the page to the
  787. // PFN database.
  788. //
  789. if (StartPde->u.Hard.Valid == 1) {
  790. PtePage = MI_GET_PAGE_FRAME_FROM_PTE(StartPde);
  791. Pfn1 = MI_PFN_ELEMENT(PtePage);
  792. Pfn1->PteFrame = PdePage;
  793. Pfn1->PteAddress = StartPde;
  794. Pfn1->u2.ShareCount += 1;
  795. Pfn1->u3.e2.ReferenceCount = 1;
  796. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  797. MiDetermineNode(PtePage, Pfn1);
  798. //
  799. // Scan the page table page for valid PTEs.
  800. //
  801. PointerPte = MiGetVirtualAddressMappedByPte(StartPde);
  802. if ((PointerPte < MiGetPteAddress (KSEG0_BASE)) ||
  803. (PointerPte >= MiGetPteAddress (KSEG2_BASE))) {
  804. for (j = 0 ; j < PTE_PER_PAGE; j += 1) {
  805. //
  806. // If the page table page is valid, then add the page
  807. // to the PFN database.
  808. //
  809. if (PointerPte->u.Hard.Valid == 1) {
  810. FrameNumber = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte);
  811. Pfn2 = MI_PFN_ELEMENT(FrameNumber);
  812. Pfn2->PteFrame = PtePage;
  813. Pfn2->PteAddress = (PMMPTE)KSEG_ADDRESS(PtePage) + j;
  814. Pfn2->u2.ShareCount += 1;
  815. Pfn2->u3.e2.ReferenceCount = 1;
  816. Pfn2->u3.e1.PageLocation = ActiveAndValid;
  817. MiDetermineNode(FrameNumber, Pfn2);
  818. }
  819. PointerPte += 1;
  820. }
  821. }
  822. }
  823. StartPde += 1;
  824. }
  825. //
  826. // If the lowest physical page is still unused add it to the PFN
  827. // database by making its reference count nonzero and pointing
  828. // it to a second level page directory entry.
  829. //
  830. Pfn1 = &MmPfnDatabase[MmLowestPhysicalPage];
  831. if (Pfn1->u3.e2.ReferenceCount == 0) {
  832. Pde = MiGetPdeAddress(0xFFFFFFFFB0000000);
  833. PdePage = MI_GET_PAGE_FRAME_FROM_PTE(Pde);
  834. Pfn1->PteFrame = PdePage;
  835. Pfn1->PteAddress = Pde;
  836. Pfn1->u2.ShareCount += 1;
  837. Pfn1->u3.e2.ReferenceCount = 1;
  838. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  839. MiDetermineNode(Pfn1 - MmPfnDatabase, Pfn1);
  840. }
  841. //
  842. // Walk through the memory descriptors and add pages to the free list
  843. // in the PFN database as appropriate.
  844. //
  845. // Since the LoaderBlock memory descriptors are generally ordered
  846. // from low physical memory address to high, walk it backwards so the
  847. // high physical pages go to the front of the freelists. The thinking
  848. // is that pages initially allocated by the system less likely to be
  849. // freed so don't waste memory below 16mb (or 4gb) that may be needed
  850. // by drivers later.
  851. //
  852. NextMd = LoaderBlock->MemoryDescriptorListHead.Blink;
  853. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  854. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  855. MEMORY_ALLOCATION_DESCRIPTOR,
  856. ListEntry);
  857. //
  858. // Set the base page number and the number of pages and switch
  859. // on the memory type.
  860. //
  861. i = MemoryDescriptor->PageCount;
  862. NextPhysicalPage = MemoryDescriptor->BasePage;
  863. switch (MemoryDescriptor->MemoryType) {
  864. //
  865. // Bad pages are not usable and are placed on the bad
  866. // page list.
  867. //
  868. case LoaderBad:
  869. while (i != 0) {
  870. MiInsertPageInList (&MmBadPageListHead, NextPhysicalPage);
  871. i -= 1;
  872. NextPhysicalPage += 1;
  873. }
  874. break;
  875. //
  876. // Pages from descriptor types free, loaded program, firmware
  877. // temporary, and OS Loader stack are potentially free.
  878. //
  879. case LoaderFree:
  880. case LoaderLoadedProgram:
  881. case LoaderFirmwareTemporary:
  882. case LoaderOsloaderStack:
  883. Pfn1 = MI_PFN_ELEMENT(NextPhysicalPage);
  884. while (i != 0) {
  885. //
  886. // If the PFN database entry for the respective page
  887. // is not referenced, then insert the page in the free
  888. // page list.
  889. //
  890. if (Pfn1->u3.e2.ReferenceCount == 0) {
  891. Pfn1->PteAddress = KSEG_ADDRESS(NextPhysicalPage);
  892. MiDetermineNode(NextPhysicalPage, Pfn1);
  893. MiInsertPageInFreeList (NextPhysicalPage);
  894. }
  895. Pfn1 += 1;
  896. i -= 1;
  897. NextPhysicalPage += 1;
  898. }
  899. break;
  900. //
  901. // All the remaining memory descriptor types represent memory
  902. // that has been already allocated and is not available.
  903. //
  904. default:
  905. PointerPte = KSEG_ADDRESS(NextPhysicalPage);
  906. Pfn1 = MI_PFN_ELEMENT(NextPhysicalPage);
  907. while (i != 0) {
  908. //
  909. // Set the page in use.
  910. //
  911. Pfn1->PteFrame = PpePage;
  912. Pfn1->PteAddress = PointerPte;
  913. Pfn1->u2.ShareCount += 1;
  914. Pfn1->u3.e2.ReferenceCount = 1;
  915. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  916. MiDetermineNode(NextPhysicalPage, Pfn1);
  917. Pfn1 += 1;
  918. i -= 1;
  919. NextPhysicalPage += 1;
  920. PointerPte += 1;
  921. }
  922. break;
  923. }
  924. NextMd = MemoryDescriptor->ListEntry.Blink;
  925. }
  926. //
  927. // Everything has been accounted for except the PFN database.
  928. //
  929. if (MI_IS_PHYSICAL_ADDRESS(MmPfnDatabase) == FALSE) {
  930. //
  931. // The PFN database is allocated in virtual memory.
  932. //
  933. // Set the start and end of allocation in the PFN database.
  934. //
  935. Pfn1 = MI_PFN_ELEMENT(MiGetPteAddress(&MmPfnDatabase[MmLowestPhysicalPage])->u.Hard.PageFrameNumber);
  936. Pfn1->u3.e1.StartOfAllocation = 1;
  937. Pfn1 = MI_PFN_ELEMENT(MiGetPteAddress(&MmPfnDatabase[MmHighestPhysicalPage])->u.Hard.PageFrameNumber);
  938. Pfn1->u3.e1.EndOfAllocation = 1;
  939. } else {
  940. //
  941. // The PFN database is allocated in KSEG43.
  942. //
  943. // Mark all PFN entries for the PFN pages in use.
  944. //
  945. PageNumber = MI_CONVERT_PHYSICAL_TO_PFN(MmPfnDatabase);
  946. Pfn1 = MI_PFN_ELEMENT(PageNumber);
  947. do {
  948. Pfn1->PteAddress = KSEG_ADDRESS(PageNumber);
  949. MiDetermineNode(PageNumber, Pfn1);
  950. Pfn1->u3.e2.ReferenceCount += 1;
  951. PageNumber += 1;
  952. Pfn1 += 1;
  953. PfnAllocation -= 1;
  954. } while (PfnAllocation != 0);
  955. //
  956. // Scan the PFN database backward for pages that are completely zero.
  957. // These pages are unused and can be added to the free list.
  958. //
  959. BottomPfn = MI_PFN_ELEMENT(MmHighestPhysicalPage);
  960. do {
  961. //
  962. // Compute the address of the start of the page that is next
  963. // lower in memory and scan backwards until that page address
  964. // is reached or just crossed.
  965. //
  966. if (((ULONG_PTR)BottomPfn & (PAGE_SIZE - 1)) != 0) {
  967. BasePfn = (PMMPFN)((ULONG_PTR)BottomPfn & ~(PAGE_SIZE - 1));
  968. TopPfn = BottomPfn + 1;
  969. } else {
  970. BasePfn = (PMMPFN)((ULONG_PTR)BottomPfn - PAGE_SIZE);
  971. TopPfn = BottomPfn;
  972. }
  973. while (BottomPfn > BasePfn) {
  974. BottomPfn -= 1;
  975. }
  976. //
  977. // If the entire range over which the PFN entries span is
  978. // completely zero and the PFN entry that maps the page is
  979. // not in the range, then add the page to the appropriate
  980. // free list.
  981. //
  982. Range = (ULONG)((ULONG_PTR)TopPfn - (ULONG_PTR)BottomPfn);
  983. if (RtlCompareMemoryUlong((PVOID)BottomPfn, Range, 0) == Range) {
  984. //
  985. // Set the PTE address to the physical page for virtual
  986. // address alignment checking.
  987. //
  988. PageNumber = (PFN_NUMBER)(((ULONG_PTR)BasePfn - KSEG43_BASE) >> PAGE_SHIFT);
  989. Pfn1 = MI_PFN_ELEMENT(PageNumber);
  990. ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
  991. Pfn1->u3.e2.ReferenceCount = 0;
  992. PfnAllocation += 1;
  993. Pfn1->PteAddress = KSEG_ADDRESS(PageNumber);
  994. MiDetermineNode(PageNumber, Pfn1);
  995. MiInsertPageInFreeList (PageNumber);
  996. }
  997. } while (BottomPfn > MmPfnDatabase);
  998. }
  999. //
  1000. // Indicate that nonpaged pool must succeed is allocated in nonpaged pool.
  1001. //
  1002. i = MmSizeOfNonPagedMustSucceed;
  1003. Pfn1 = MI_PFN_ELEMENT(MI_CONVERT_PHYSICAL_TO_PFN(MmNonPagedMustSucceed));
  1004. while (i != 0) {
  1005. Pfn1->u3.e1.StartOfAllocation = 1;
  1006. Pfn1->u3.e1.EndOfAllocation = 1;
  1007. i -= PAGE_SIZE;
  1008. Pfn1 += 1;
  1009. }
  1010. //
  1011. // Initialize the nonpaged pool.
  1012. //
  1013. InitializePool(NonPagedPool, 0);
  1014. //
  1015. // Recompute the number of system PTEs to include the virtual space
  1016. // occupied by the initialize nonpaged pool allocation in KSEG43, and
  1017. // initialize the nonpaged available PTEs for mapping I/O space and
  1018. // kernel stacks.
  1019. //
  1020. PointerPte = MiGetPteAddress(MmNonPagedSystemStart);
  1021. MmNumberOfSystemPtes = (ULONG)(MiGetPteAddress(MmNonPagedPoolExpansionStart) - PointerPte - 1);
  1022. MiInitializeSystemPtes(PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
  1023. //
  1024. // Initialize memory management structures for the system process.
  1025. //
  1026. // Set the address of the first and last reserved PTE in hyper space.
  1027. //
  1028. MmFirstReservedMappingPte = MiGetPteAddress(FIRST_MAPPING_PTE);
  1029. MmLastReservedMappingPte = MiGetPteAddress(LAST_MAPPING_PTE);
  1030. //
  1031. // The PFN element for the page directory parent will be initialized
  1032. // a second time when the process address space is initialized. Therefore,
  1033. // the share count and the reference count must be set to zero.
  1034. //
  1035. Pfn1 = MI_PFN_ELEMENT(MI_GET_PAGE_FRAME_FROM_PTE((PMMPTE)PDE_SELFMAP));
  1036. Pfn1->u2.ShareCount = 0;
  1037. Pfn1->u3.e2.ReferenceCount = 0;
  1038. //
  1039. // The PFN element for the hyper space page directory page will be
  1040. // initialized a second time when the process address space is initialized.
  1041. // Therefore, the share count and the reference count must be set to zero.
  1042. //
  1043. PointerPte = MiGetPpeAddress(HYPER_SPACE);
  1044. Pfn1 = MI_PFN_ELEMENT(MI_GET_PAGE_FRAME_FROM_PTE(PointerPte));
  1045. Pfn1->u2.ShareCount = 0;
  1046. Pfn1->u3.e2.ReferenceCount = 0;
  1047. //
  1048. // The PFN elements for the hyper space page table page and working set list
  1049. // page will be initialized a second time when the process address space
  1050. // is initialized. Therefore, the share count and the reference must be
  1051. // set to zero.
  1052. //
  1053. StartPde = MiGetPdeAddress(HYPER_SPACE);
  1054. Pfn1 = MI_PFN_ELEMENT(MI_GET_PAGE_FRAME_FROM_PTE(StartPde));
  1055. Pfn1->u2.ShareCount = 0;
  1056. Pfn1->u3.e2.ReferenceCount = 0;
  1057. //
  1058. // Save the page frame number of the working set page in the system
  1059. // process object and unmap the working set page from the second level
  1060. // page directory page.
  1061. //
  1062. LOCK_PFN(OldIrql);
  1063. FrameNumber = MiRemoveZeroPageIfAny (0);
  1064. if (FrameNumber == 0) {
  1065. FrameNumber = MiRemoveAnyPage (0);
  1066. UNLOCK_PFN (OldIrql);
  1067. MiZeroPhysicalPage (FrameNumber, 0);
  1068. LOCK_PFN (OldIrql);
  1069. Pfn1 = MI_PFN_ELEMENT(FrameNumber);
  1070. Pfn1->u2.ShareCount = 0;
  1071. Pfn1->u3.e2.ReferenceCount = 0;
  1072. }
  1073. CurrentProcess = PsGetCurrentProcess();
  1074. CurrentProcess->WorkingSetPage = FrameNumber;
  1075. PointerPte = MiGetVirtualAddressMappedByPte(EndPde);
  1076. UNLOCK_PFN(OldIrql);
  1077. //
  1078. // Initialize the system process memory management structures including
  1079. // the working set list.
  1080. //
  1081. PointerPte = MmFirstReservedMappingPte;
  1082. PointerPte->u.Hard.PageFrameNumber = NUMBER_OF_MAPPING_PTES;
  1083. CurrentProcess->Vm.MaximumWorkingSetSize = (ULONG)MmSystemProcessWorkingSetMax;
  1084. CurrentProcess->Vm.MinimumWorkingSetSize = (ULONG)MmSystemProcessWorkingSetMin;
  1085. MmInitializeProcessAddressSpace(CurrentProcess, NULL, NULL, NULL);
  1086. //
  1087. // Check to see if moving the secondary page structures to the end
  1088. // of the PFN database is a waste of memory. And if so, copy it
  1089. // to paged pool.
  1090. //
  1091. // If the PFN database ends on a page aligned boundary and the
  1092. // size of the two arrays is less than a page, free the page
  1093. // and allocate nonpagedpool for this.
  1094. //
  1095. if ((((ULONG_PTR)MmFreePagesByColor[0] & (PAGE_SIZE - 1)) == 0) &&
  1096. ((MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES)) < PAGE_SIZE)) {
  1097. PMMCOLOR_TABLES c;
  1098. c = MmFreePagesByColor[0];
  1099. MmFreePagesByColor[0] =
  1100. ExAllocatePoolWithTag(NonPagedPoolMustSucceed,
  1101. MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES),
  1102. ' mM');
  1103. MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
  1104. RtlCopyMemory (MmFreePagesByColor[0],
  1105. c,
  1106. MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES));
  1107. //
  1108. // Free the page.
  1109. //
  1110. if (!MI_IS_PHYSICAL_ADDRESS(c)) {
  1111. PointerPte = MiGetPteAddress(c);
  1112. FrameNumber = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte);
  1113. *PointerPte = ZeroKernelPte;
  1114. } else {
  1115. FrameNumber = MI_CONVERT_PHYSICAL_TO_PFN(c);
  1116. }
  1117. LOCK_PFN (OldIrql);
  1118. Pfn1 = MI_PFN_ELEMENT (FrameNumber);
  1119. ASSERT ((Pfn1->u3.e2.ReferenceCount <= 1) && (Pfn1->u2.ShareCount <= 1));
  1120. Pfn1->u2.ShareCount = 0;
  1121. Pfn1->u3.e2.ReferenceCount = 0;
  1122. MI_SET_PFN_DELETED(Pfn1);
  1123. #if DBG
  1124. Pfn1->u3.e1.PageLocation = StandbyPageList;
  1125. #endif //DBG
  1126. MiInsertPageInFreeList (FrameNumber);
  1127. UNLOCK_PFN (OldIrql);
  1128. }
  1129. return;
  1130. }