Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1474 lines
46 KiB

  1. /*++
  2. Copyright (c) 1990 Microsoft Corporation
  3. Copyright (c) 1992 Digital Equipment Corporation
  4. Module Name:
  5. inialpha.c
  6. Abstract:
  7. This module contains the machine dependent initialization for the
  8. memory management component. It is specifically tailored to the
  9. ALPHA architecture.
  10. Author:
  11. Lou Perazzoli (loup) 3-Apr-1990
  12. Joe Notarangelo 23-Apr-1992 ALPHA version
  13. Revision History:
  14. --*/
  15. #include "mi.h"
  16. #include <inbv.h>
  17. //
  18. // Local definitions
  19. //
  20. #define _1MB (0x100000)
  21. #define _16MB (0x1000000)
  22. #define _24MB (0x1800000)
  23. #define _32MB (0x2000000)
  24. SIZE_T MmExpandedNonPagedPoolInBytes;
  25. VOID
  26. MiInitMachineDependent (
  27. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  28. )
  29. /*++
  30. Routine Description:
  31. This routine performs the necessary operations to enable virtual
  32. memory. This includes building the page directory page, building
  33. page table pages to map the code section, the data section, the'
  34. stack section and the trap handler.
  35. It also initializes the PFN database and populates the free list.
  36. Arguments:
  37. None.
  38. Return Value:
  39. None.
  40. Environment:
  41. Kernel mode.
  42. --*/
  43. {
  44. PMMPFN BasePfn;
  45. PMMPFN BottomPfn;
  46. PMMPFN TopPfn;
  47. BOOLEAN PfnInKseg0;
  48. ULONG LowMemoryReserved;
  49. ULONG i, j;
  50. ULONG HighPage;
  51. ULONG PagesLeft;
  52. ULONG PageNumber;
  53. ULONG PdePageNumber;
  54. ULONG PdePage;
  55. ULONG PageFrameIndex;
  56. ULONG NextPhysicalPage;
  57. ULONG PfnAllocation;
  58. ULONG NumberOfPages;
  59. PEPROCESS CurrentProcess;
  60. PVOID SpinLockPage;
  61. ULONG MostFreePage;
  62. ULONG MostFreeLowMem;
  63. PLIST_ENTRY NextMd;
  64. ULONG MaxPool;
  65. KIRQL OldIrql;
  66. PMEMORY_ALLOCATION_DESCRIPTOR FreeDescriptor;
  67. PMEMORY_ALLOCATION_DESCRIPTOR FreeDescriptorLowMem;
  68. PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
  69. MMPTE TempPte;
  70. PMMPTE PointerPde;
  71. PMMPTE PointerPte;
  72. PMMPTE LastPte;
  73. PMMPTE CacheStackPage;
  74. PMMPTE Pde;
  75. PMMPTE StartPde;
  76. PMMPTE EndPde;
  77. PMMPFN Pfn1;
  78. PMMPFN Pfn2;
  79. PULONG PointerLong;
  80. CHAR Buffer[256];
  81. PMMFREE_POOL_ENTRY Entry;
  82. PVOID NonPagedPoolStartVirtual;
  83. ULONG Range;
  84. ULONG RemovedLowPage;
  85. ULONG RemovedLowCount;
  86. RemovedLowPage = 0;
  87. RemovedLowCount = 0;
  88. LowMemoryReserved = 0;
  89. MostFreePage = 0;
  90. MostFreeLowMem = 0;
  91. FreeDescriptor = NULL;
  92. FreeDescriptorLowMem = NULL;
  93. PointerPte = MiGetPdeAddress (PDE_BASE);
  94. PdePageNumber = PointerPte->u.Hard.PageFrameNumber;
  95. PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = PointerPte->u.Long;
  96. KeSweepDcache (FALSE);
  97. //
  98. // Get the lower bound of the free physical memory and the
  99. // number of physical pages by walking the memory descriptor lists.
  100. // In addition, find the memory descriptor with the most free pages
  101. // that begins at a physical address less than 16MB. The 16 MB
  102. // boundary is necessary for allocating common buffers for use by
  103. // ISA devices that cannot address more than 24 bits.
  104. //
  105. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  106. //
  107. // When restoring a hibernation image, OS Loader needs to use "a few" extra
  108. // pages of LoaderFree memory.
  109. // This is not accounted for when reserving memory for hibernation below.
  110. // Start with a safety margin to allow for this plus modest future increase.
  111. //
  112. MmHiberPages = 96;
  113. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  114. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  115. MEMORY_ALLOCATION_DESCRIPTOR,
  116. ListEntry);
  117. HighPage = MemoryDescriptor->BasePage + MemoryDescriptor->PageCount-1;
  118. //
  119. // This check results in /BURNMEMORY chunks not being counted.
  120. //
  121. if (MemoryDescriptor->MemoryType != LoaderBad) {
  122. MmNumberOfPhysicalPages += MemoryDescriptor->PageCount;
  123. }
  124. if (MemoryDescriptor->BasePage < MmLowestPhysicalPage) {
  125. MmLowestPhysicalPage = MemoryDescriptor->BasePage;
  126. }
  127. if (HighPage > MmHighestPhysicalPage) {
  128. MmHighestPhysicalPage = HighPage;
  129. }
  130. //
  131. // Locate the largest free block starting below 16 megs
  132. // and the largest free block.
  133. //
  134. if ((MemoryDescriptor->MemoryType == LoaderFree) ||
  135. (MemoryDescriptor->MemoryType == LoaderLoadedProgram) ||
  136. (MemoryDescriptor->MemoryType == LoaderFirmwareTemporary) ||
  137. (MemoryDescriptor->MemoryType == LoaderOsloaderStack)) {
  138. //
  139. // Every page that will be used as free memory that is not already
  140. // marked as LoaderFree must be counted so a hibernate can reserve
  141. // the proper amount.
  142. //
  143. if (MemoryDescriptor->MemoryType != LoaderFree) {
  144. MmHiberPages += MemoryDescriptor->PageCount;
  145. }
  146. if ((MemoryDescriptor->PageCount > MostFreeLowMem) &&
  147. (MemoryDescriptor->BasePage < (_16MB >> PAGE_SHIFT)) &&
  148. (HighPage < MM_PAGES_IN_KSEG0)) {
  149. MostFreeLowMem = MemoryDescriptor->PageCount;
  150. FreeDescriptorLowMem = MemoryDescriptor;
  151. } else if (MemoryDescriptor->PageCount > MostFreePage) {
  152. MostFreePage = MemoryDescriptor->PageCount;
  153. FreeDescriptor = MemoryDescriptor;
  154. }
  155. } else if (MemoryDescriptor->MemoryType == LoaderOsloaderHeap) {
  156. //
  157. // We do not want to use this memory yet as it still has important
  158. // data structures in it. But we still want to account for this in
  159. // the hibernation pages
  160. //
  161. MmHiberPages += MemoryDescriptor->PageCount;
  162. }
  163. NextMd = MemoryDescriptor->ListEntry.Flink;
  164. }
  165. //
  166. // Perform sanity checks on the results of walking the memory
  167. // descriptors.
  168. //
  169. if (MmNumberOfPhysicalPages < 1024) {
  170. KeBugCheckEx (INSTALL_MORE_MEMORY,
  171. MmNumberOfPhysicalPages,
  172. MmLowestPhysicalPage,
  173. MmHighestPhysicalPage,
  174. 0);
  175. }
  176. if (FreeDescriptorLowMem == NULL){
  177. InbvDisplayString("MmInit *** FATAL ERROR *** no free descriptors that begin below physical address 16MB\n");
  178. KeBugCheck (MEMORY_MANAGEMENT);
  179. }
  180. if (MmDynamicPfn == TRUE) {
  181. //
  182. // Since a ~128mb PFN database is required to span the 32GB supported
  183. // by Alpha, require 256mb of memory to be present to support
  184. // this option.
  185. //
  186. if (MmNumberOfPhysicalPages >= (256 * 1024 * 1024) / PAGE_SIZE) {
  187. MmHighestPossiblePhysicalPage = 0x400000 - 1;
  188. }
  189. else {
  190. MmDynamicPfn = FALSE;
  191. }
  192. }
  193. else {
  194. MmHighestPossiblePhysicalPage = MmHighestPhysicalPage;
  195. }
  196. //
  197. // Used later to build nonpaged pool.
  198. //
  199. NextPhysicalPage = FreeDescriptorLowMem->BasePage;
  200. NumberOfPages = FreeDescriptorLowMem->PageCount;
  201. //
  202. // Build non-paged pool using the physical pages following the
  203. // data page in which to build the pool from. Non-paged pool grows
  204. // from the high range of the virtual address space and expands
  205. // downward.
  206. //
  207. // At this time non-paged pool is constructed so virtual addresses
  208. // are also physically contiguous.
  209. //
  210. if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) >
  211. (7 * (MmNumberOfPhysicalPages >> 3))) {
  212. //
  213. // More than 7/8 of memory allocated to nonpagedpool, reset to 0.
  214. //
  215. MmSizeOfNonPagedPoolInBytes = 0;
  216. }
  217. if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize) {
  218. //
  219. // Calculate the size of nonpaged pool. Use the minimum size,
  220. // then for every MB above 8mb add extra pages.
  221. //
  222. MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
  223. MmSizeOfNonPagedPoolInBytes +=
  224. ((MmNumberOfPhysicalPages - 1024) /
  225. (_1MB >> PAGE_SHIFT) ) *
  226. MmMinAdditionNonPagedPoolPerMb;
  227. }
  228. //
  229. // Align to page size boundary.
  230. //
  231. MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
  232. //
  233. // Limit initial nonpaged pool size to MM_MAX_INITIAL_NONPAGED_POOL
  234. //
  235. if (MmSizeOfNonPagedPoolInBytes > MM_MAX_INITIAL_NONPAGED_POOL) {
  236. MmSizeOfNonPagedPoolInBytes = MM_MAX_INITIAL_NONPAGED_POOL;
  237. }
  238. //
  239. // If the non-paged pool that we want to allocate will not fit in
  240. // the free memory descriptor that we have available then recompute
  241. // the size of non-paged pool to be the size of the free memory
  242. // descriptor. If the free memory descriptor cannot fit the
  243. // minimum non-paged pool size (MmMinimumNonPagedPoolSize) then we
  244. // cannot boot the operating system.
  245. //
  246. if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) > NumberOfPages) {
  247. //
  248. // Reserve all of low memory for nonpaged pool.
  249. //
  250. MmSizeOfNonPagedPoolInBytes = NumberOfPages << PAGE_SHIFT;
  251. LowMemoryReserved = NextPhysicalPage;
  252. //
  253. // Switch to backup descriptor for all other allocations.
  254. //
  255. NextPhysicalPage = FreeDescriptor->BasePage;
  256. NumberOfPages = FreeDescriptor->PageCount;
  257. if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize) {
  258. InbvDisplayString("MmInit *** FATAL ERROR *** cannot allocate non-paged pool\n");
  259. sprintf(Buffer,
  260. "Largest description = %d pages, require %d pages\n",
  261. NumberOfPages,
  262. MmMinimumNonPagedPoolSize >> PAGE_SHIFT);
  263. InbvDisplayString (Buffer );
  264. KeBugCheck (MEMORY_MANAGEMENT);
  265. }
  266. }
  267. //
  268. // Calculate the maximum size of pool.
  269. //
  270. if (MmMaximumNonPagedPoolInBytes == 0) {
  271. //
  272. // Calculate the size of nonpaged pool.
  273. // For every MB above 8mb add extra pages.
  274. //
  275. MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
  276. //
  277. // Make sure enough expansion for the PFN database exists.
  278. //
  279. MmMaximumNonPagedPoolInBytes += (ULONG)PAGE_ALIGN (
  280. MmHighestPhysicalPage * sizeof(MMPFN));
  281. MmMaximumNonPagedPoolInBytes +=
  282. ((MmNumberOfPhysicalPages - 1024) /
  283. (_1MB >> PAGE_SHIFT) ) *
  284. MmMaxAdditionNonPagedPoolPerMb;
  285. }
  286. MaxPool = MmSizeOfNonPagedPoolInBytes + PAGE_SIZE * 16 +
  287. (ULONG)PAGE_ALIGN (
  288. MmHighestPhysicalPage * sizeof(MMPFN));
  289. if (MmMaximumNonPagedPoolInBytes < MaxPool) {
  290. MmMaximumNonPagedPoolInBytes = MaxPool;
  291. }
  292. //
  293. // If the system is configured for maximum system PTEs then limit maximum
  294. // nonpaged pool to 128mb so the rest of the virtual address space can
  295. // be used for the PTEs. Also push as much nonpaged pool as possible
  296. // into kseg0 to free up more PTEs.
  297. //
  298. if (MmMaximumNonPagedPoolInBytes > MM_MAX_ADDITIONAL_NONPAGED_POOL) {
  299. ULONG InitialNonPagedPages;
  300. ULONG ExpansionPagesToMove;
  301. ULONG LowAvailPages;
  302. if (MiRequestedSystemPtes == (ULONG)-1) {
  303. MmMaximumNonPagedPoolInBytes = MM_MAX_ADDITIONAL_NONPAGED_POOL;
  304. if (LowMemoryReserved == 0) {
  305. InitialNonPagedPages = (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT);
  306. if (InitialNonPagedPages + 1024 < NumberOfPages) {
  307. LowAvailPages = NumberOfPages - 1024 - InitialNonPagedPages;
  308. ExpansionPagesToMove = (MmMaximumNonPagedPoolInBytes >> PAGE_SHIFT) - InitialNonPagedPages;
  309. if (ExpansionPagesToMove > 32) {
  310. ExpansionPagesToMove -= 32;
  311. if (LowAvailPages > ExpansionPagesToMove) {
  312. LowAvailPages = ExpansionPagesToMove;
  313. }
  314. MmSizeOfNonPagedPoolInBytes += (LowAvailPages << PAGE_SHIFT);
  315. }
  316. }
  317. }
  318. if (MmSizeOfNonPagedPoolInBytes == MmMaximumNonPagedPoolInBytes) {
  319. ASSERT (MmSizeOfNonPagedPoolInBytes > (32 << PAGE_SHIFT));
  320. MmSizeOfNonPagedPoolInBytes -= (32 << PAGE_SHIFT);
  321. }
  322. }
  323. }
  324. //
  325. // Limit maximum nonpaged pool to MM_MAX_ADDITIONAL_NONPAGED_POOL.
  326. //
  327. if (MmMaximumNonPagedPoolInBytes > MM_MAX_ADDITIONAL_NONPAGED_POOL) {
  328. if (MmMaximumNonPagedPoolInBytes > MM_MAX_INITIAL_NONPAGED_POOL + MM_MAX_ADDITIONAL_NONPAGED_POOL) {
  329. MmMaximumNonPagedPoolInBytes = MM_MAX_INITIAL_NONPAGED_POOL + MM_MAX_ADDITIONAL_NONPAGED_POOL;
  330. }
  331. if (LowMemoryReserved != 0) {
  332. if (MmMaximumNonPagedPoolInBytes > MmSizeOfNonPagedPoolInBytes + MM_MAX_ADDITIONAL_NONPAGED_POOL) {
  333. MmMaximumNonPagedPoolInBytes = MmSizeOfNonPagedPoolInBytes + MM_MAX_ADDITIONAL_NONPAGED_POOL;
  334. }
  335. MmExpandedNonPagedPoolInBytes = MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes;
  336. }
  337. else {
  338. if ((MM_MAX_INITIAL_NONPAGED_POOL >> PAGE_SHIFT) >= NumberOfPages) {
  339. //
  340. // Reserve all of low memory for nonpaged pool.
  341. //
  342. SIZE_T Diff;
  343. Diff = MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes;
  344. if (Diff > MM_MAX_ADDITIONAL_NONPAGED_POOL) {
  345. Diff = MM_MAX_ADDITIONAL_NONPAGED_POOL;
  346. }
  347. MmSizeOfNonPagedPoolInBytes = NumberOfPages << PAGE_SHIFT;
  348. MmMaximumNonPagedPoolInBytes = MmSizeOfNonPagedPoolInBytes + Diff;
  349. LowMemoryReserved = NextPhysicalPage;
  350. //
  351. // Switch to backup descriptor for all other allocations.
  352. //
  353. NextPhysicalPage = FreeDescriptor->BasePage;
  354. NumberOfPages = FreeDescriptor->PageCount;
  355. }
  356. else {
  357. MmSizeOfNonPagedPoolInBytes = MM_MAX_INITIAL_NONPAGED_POOL;
  358. //
  359. // The pages must be subtracted from the low descriptor so
  360. // they are not used for anything else or put on the freelist.
  361. // But they must be added back in later when initializing PFNs
  362. // for all the descriptor ranges.
  363. //
  364. RemovedLowCount = (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT);
  365. FreeDescriptorLowMem->PageCount -= RemovedLowCount;
  366. RemovedLowPage = FreeDescriptorLowMem->BasePage + FreeDescriptorLowMem->PageCount;
  367. NumberOfPages = FreeDescriptorLowMem->PageCount;
  368. }
  369. MmExpandedNonPagedPoolInBytes = MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes;
  370. if (MmExpandedNonPagedPoolInBytes > MM_MAX_ADDITIONAL_NONPAGED_POOL) {
  371. MmExpandedNonPagedPoolInBytes = MM_MAX_ADDITIONAL_NONPAGED_POOL;
  372. }
  373. }
  374. }
  375. if (MmExpandedNonPagedPoolInBytes) {
  376. MmNonPagedPoolStart = (PVOID)((ULONG)MmNonPagedPoolEnd
  377. - MmExpandedNonPagedPoolInBytes);
  378. }
  379. else {
  380. MmNonPagedPoolStart = (PVOID)((ULONG)MmNonPagedPoolEnd
  381. - (MmMaximumNonPagedPoolInBytes - 1));
  382. }
  383. MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart);
  384. NonPagedPoolStartVirtual = MmNonPagedPoolStart;
  385. //
  386. // Calculate the starting PDE for the system PTE pool which is
  387. // right below the nonpaged pool.
  388. //
  389. MmNonPagedSystemStart = (PVOID)(((ULONG)MmNonPagedPoolStart -
  390. ((MmNumberOfSystemPtes + 1) * PAGE_SIZE)) &
  391. (~PAGE_DIRECTORY_MASK));
  392. if (MmNonPagedSystemStart < MM_LOWEST_NONPAGED_SYSTEM_START) {
  393. MmNonPagedSystemStart = MM_LOWEST_NONPAGED_SYSTEM_START;
  394. }
  395. MmNumberOfSystemPtes = (((ULONG)MmNonPagedPoolStart -
  396. (ULONG)MmNonPagedSystemStart) >> PAGE_SHIFT)-1;
  397. ASSERT (MmNumberOfSystemPtes > 1000);
  398. //
  399. // Set the global bit for all PDEs in system space.
  400. //
  401. StartPde = MiGetPdeAddress (MM_SYSTEM_SPACE_START);
  402. EndPde = MiGetPdeAddress (MM_SYSTEM_SPACE_END);
  403. while (StartPde <= EndPde) {
  404. if (StartPde->u.Hard.Global == 0) {
  405. TempPte = *StartPde;
  406. TempPte.u.Hard.Global = 1;
  407. *StartPde = TempPte;
  408. }
  409. StartPde += 1;
  410. }
  411. //
  412. // Clear the global bit for all session space addresses.
  413. //
  414. StartPde = MiGetPdeAddress (MmSessionBase);
  415. EndPde = MiGetPdeAddress (MI_SESSION_SPACE_END);
  416. while (StartPde < EndPde) {
  417. if (StartPde->u.Hard.Global == 1) {
  418. TempPte = *StartPde;
  419. TempPte.u.Hard.Global = 0;
  420. *StartPde = TempPte;
  421. }
  422. ASSERT (StartPde->u.Long == 0);
  423. StartPde += 1;
  424. }
  425. StartPde = MiGetPdeAddress (MmNonPagedSystemStart);
  426. EndPde = MiGetPdeAddress (MmNonPagedPoolEnd);
  427. ASSERT ((EndPde - StartPde) < (LONG)NumberOfPages);
  428. TempPte = ValidKernelPte;
  429. while (StartPde <= EndPde) {
  430. if (StartPde->u.Hard.Valid == 0) {
  431. //
  432. // Map in a page directory page.
  433. //
  434. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  435. NumberOfPages -= 1;
  436. NextPhysicalPage += 1;
  437. if (NumberOfPages == 0) {
  438. ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage +
  439. FreeDescriptor->PageCount));
  440. NextPhysicalPage = FreeDescriptor->BasePage;
  441. NumberOfPages = FreeDescriptor->PageCount;
  442. }
  443. *StartPde = TempPte;
  444. }
  445. StartPde += 1;
  446. }
  447. //
  448. // Zero the PTEs before non-paged pool.
  449. //
  450. StartPde = MiGetPteAddress (MmNonPagedSystemStart);
  451. PointerPte = MiGetPteAddress (MmNonPagedPoolStart);
  452. RtlZeroMemory (StartPde, (ULONG)PointerPte - (ULONG)StartPde);
  453. //
  454. // Fill in the PTEs for non-paged pool.
  455. //
  456. PointerPte = MiGetPteAddress(MmNonPagedPoolStart);
  457. LastPte = MiGetPteAddress((ULONG)MmNonPagedPoolStart +
  458. MmSizeOfNonPagedPoolInBytes - 1);
  459. if (MmExpandedNonPagedPoolInBytes == 0) {
  460. if (!LowMemoryReserved) {
  461. if (NumberOfPages < (ULONG)(LastPte - PointerPte + 1)) {
  462. //
  463. // Can't just switch descriptors here - the initial nonpaged
  464. // pool is always mapped via KSEG0 and is thus required to be
  465. // virtually and physically contiguous.
  466. //
  467. KeBugCheckEx (INSTALL_MORE_MEMORY,
  468. MmNumberOfPhysicalPages,
  469. NumberOfPages,
  470. LastPte - PointerPte + 1,
  471. 1);
  472. }
  473. while (PointerPte <= LastPte) {
  474. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  475. NextPhysicalPage += 1;
  476. NumberOfPages -= 1;
  477. ASSERT (NumberOfPages != 0);
  478. *PointerPte = TempPte;
  479. PointerPte += 1;
  480. }
  481. } else {
  482. ULONG ReservedPage = FreeDescriptorLowMem->BasePage;
  483. while (PointerPte <= LastPte) {
  484. TempPte.u.Hard.PageFrameNumber = ReservedPage;
  485. ReservedPage += 1;
  486. *PointerPte = TempPte;
  487. PointerPte += 1;
  488. }
  489. }
  490. LastPte = MiGetPteAddress ((ULONG)MmNonPagedPoolStart +
  491. MmMaximumNonPagedPoolInBytes - 1);
  492. }
  493. else {
  494. LastPte = MiGetPteAddress ((ULONG)MmNonPagedPoolStart +
  495. MmExpandedNonPagedPoolInBytes - 1);
  496. }
  497. //
  498. // Zero the remaining PTEs for non-paged pool maximum.
  499. //
  500. while (PointerPte <= LastPte) {
  501. *PointerPte = ZeroKernelPte;
  502. PointerPte += 1;
  503. }
  504. //
  505. // Zero the remaining PTEs (if any).
  506. //
  507. while (((ULONG)PointerPte & (PAGE_SIZE - 1)) != 0) {
  508. *PointerPte = ZeroKernelPte;
  509. PointerPte += 1;
  510. }
  511. if (MmExpandedNonPagedPoolInBytes) {
  512. if (LowMemoryReserved) {
  513. MmNonPagedPoolStart = (PVOID)((LowMemoryReserved << PAGE_SHIFT) |
  514. KSEG0_BASE);
  515. }
  516. else if (RemovedLowPage) {
  517. MmNonPagedPoolStart = (PVOID)((RemovedLowPage << PAGE_SHIFT) |
  518. KSEG0_BASE);
  519. }
  520. else {
  521. ASSERT (FALSE);
  522. }
  523. }
  524. else {
  525. PointerPte = MiGetPteAddress (MmNonPagedPoolStart);
  526. MmNonPagedPoolStart = (PVOID)((PointerPte->u.Hard.PageFrameNumber << PAGE_SHIFT) |
  527. KSEG0_BASE);
  528. }
  529. MmPageAlignedPoolBase[NonPagedPool] = MmNonPagedPoolStart;
  530. MmSubsectionBase = (ULONG)MmNonPagedPoolStart;
  531. if (MmExpandedNonPagedPoolInBytes == 0) {
  532. if (NextPhysicalPage < (MM_SUBSECTION_MAP >> PAGE_SHIFT)) {
  533. MmSubsectionBase = KSEG0_BASE;
  534. }
  535. }
  536. MmSubsectionTopPage = (((MmSubsectionBase & ~KSEG0_BASE) + MM_SUBSECTION_MAP) >> PAGE_SHIFT);
  537. //
  538. // Non-paged pages now exist, build the pool structures.
  539. //
  540. if (MmExpandedNonPagedPoolInBytes) {
  541. MmNonPagedPoolExpansionStart = (PVOID)NonPagedPoolStartVirtual;
  542. }
  543. else {
  544. MmNonPagedPoolExpansionStart = (PVOID)((PCHAR)NonPagedPoolStartVirtual +
  545. MmSizeOfNonPagedPoolInBytes);
  546. }
  547. MiInitializeNonPagedPool ();
  548. //
  549. // Before Non-paged pool can be used, the PFN database must
  550. // be built. This is due to the fact that the start and end of
  551. // allocation bits for nonpaged pool are maintained in the
  552. // PFN elements for the corresponding pages.
  553. //
  554. //
  555. // Calculate the number of pages required from page zero to
  556. // the highest page.
  557. //
  558. // Get the number of secondary colors and add the array for tracking
  559. // secondary colors to the end of the PFN database.
  560. //
  561. if (MmSecondaryColors == 0) {
  562. MmSecondaryColors = PCR->SecondLevelCacheSize;
  563. }
  564. MmSecondaryColors = MmSecondaryColors >> PAGE_SHIFT;
  565. //
  566. // Make sure value is power of two and within limits.
  567. //
  568. if (((MmSecondaryColors & (MmSecondaryColors -1)) != 0) ||
  569. (MmSecondaryColors < MM_SECONDARY_COLORS_MIN) ||
  570. (MmSecondaryColors > MM_SECONDARY_COLORS_MAX)) {
  571. MmSecondaryColors = MM_SECONDARY_COLORS_DEFAULT;
  572. }
  573. MmSecondaryColorMask = MmSecondaryColors - 1;
  574. PfnAllocation = 1 + ((((MmHighestPossiblePhysicalPage + 1) * sizeof(MMPFN)) +
  575. (MmSecondaryColors * sizeof(MMCOLOR_TABLES)*2))
  576. >> PAGE_SHIFT);
  577. //
  578. // If the number of pages remaining in the current descriptor is
  579. // greater than the number of pages needed for the PFN database,
  580. // and the descriptor is for memory below 1 gig, then allocate the
  581. // PFN database from the current free descriptor.
  582. // Note: FW creates a new memory descriptor for any memory above 1GB.
  583. // Thus we don't need to worry if the highest page will go beyond 1GB for
  584. // this memory descriptor.
  585. //
  586. #ifndef PFN_CONSISTENCY
  587. if ((NumberOfPages >= PfnAllocation) &&
  588. (NextPhysicalPage + NumberOfPages <= MM_PAGES_IN_KSEG0)) {
  589. //
  590. // Allocate the PFN database in kseg0.
  591. //
  592. // Compute the address of the PFN by allocating the appropriate
  593. // number of pages from the end of the free descriptor.
  594. //
  595. PfnInKseg0 = TRUE;
  596. HighPage = NextPhysicalPage + NumberOfPages;
  597. MmPfnDatabase = (PMMPFN)(KSEG0_BASE |
  598. ((HighPage - PfnAllocation) << PAGE_SHIFT));
  599. RtlZeroMemory(MmPfnDatabase, PfnAllocation * PAGE_SIZE);
  600. //
  601. // Mark off the chunk of memory used for the PFN database.
  602. //
  603. NumberOfPages -= PfnAllocation;
  604. if (NextPhysicalPage >= FreeDescriptorLowMem->BasePage &&
  605. NextPhysicalPage < (FreeDescriptorLowMem->BasePage +
  606. FreeDescriptorLowMem->PageCount)) {
  607. //
  608. // We haven't used the other descriptor.
  609. //
  610. FreeDescriptorLowMem->PageCount -= PfnAllocation;
  611. } else {
  612. FreeDescriptor->PageCount -= PfnAllocation;
  613. }
  614. //
  615. // Allocate one PTE at the very top of the Mm virtual address space.
  616. // This provides protection against the caller of the first real
  617. // nonpaged expansion allocation in case he accidentally overruns his
  618. // pool block. (We'll trap instead of corrupting the crashdump PTEs).
  619. // This also allows us to freely increment in MiFreePoolPages
  620. // without having to worry about a valid PTE just after the end of
  621. // the highest nonpaged pool allocation.
  622. //
  623. if (MiReserveSystemPtes (1, NonPagedPoolExpansion) == NULL) {
  624. MiIssueNoPtesBugcheck (1, NonPagedPoolExpansion);
  625. }
  626. }
  627. else {
  628. #endif // PFN_CONSISTENCY
  629. //
  630. // Calculate the start of the Pfn database (it starts at physical
  631. // page zero, even if the lowest physical page is not zero).
  632. //
  633. PfnInKseg0 = FALSE;
  634. PointerPte = MiReserveSystemPtes (PfnAllocation, NonPagedPoolExpansion);
  635. if (PointerPte == NULL) {
  636. MiIssueNoPtesBugcheck (PfnAllocation, NonPagedPoolExpansion);
  637. }
  638. #if PFN_CONSISTENCY
  639. MiPfnStartPte = PointerPte;
  640. MiPfnPtes = PfnAllocation;
  641. #endif
  642. MmPfnDatabase = (PMMPFN)(MiGetVirtualAddressMappedByPte (PointerPte));
  643. //
  644. // Allocate one more PTE just below the PFN database. This provides
  645. // protection against the caller of the first real nonpaged
  646. // expansion allocation in case he accidentally overruns his pool
  647. // block. (We'll trap instead of corrupting the PFN database).
  648. // This also allows us to freely increment in MiFreePoolPages
  649. // without having to worry about a valid PTE just after the end of
  650. // the highest nonpaged pool allocation.
  651. //
  652. if (MiReserveSystemPtes (1, NonPagedPoolExpansion) == NULL) {
  653. MiIssueNoPtesBugcheck (1, NonPagedPoolExpansion);
  654. }
  655. //
  656. // Go through the memory descriptors and for each physical page
  657. // make sure the PFN database has a valid PTE to map it. This allows
  658. // machines with sparse physical memory to have a minimal PFN
  659. // database.
  660. //
  661. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  662. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  663. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  664. MEMORY_ALLOCATION_DESCRIPTOR,
  665. ListEntry);
  666. PointerPte = MiGetPteAddress (MI_PFN_ELEMENT(
  667. MemoryDescriptor->BasePage));
  668. LastPte = MiGetPteAddress (((PCHAR)(MI_PFN_ELEMENT(
  669. MemoryDescriptor->BasePage +
  670. MemoryDescriptor->PageCount))) - 1);
  671. //
  672. // If memory was temporarily removed to create the initial non
  673. // paged pool, account for it now so PFN entries are created for it.
  674. //
  675. if (MemoryDescriptor == FreeDescriptorLowMem) {
  676. if (RemovedLowPage) {
  677. ASSERT (MemoryDescriptor->BasePage + MemoryDescriptor->PageCount == RemovedLowPage);
  678. LastPte = MiGetPteAddress (((PCHAR)(MI_PFN_ELEMENT(
  679. MemoryDescriptor->BasePage +
  680. RemovedLowCount +
  681. MemoryDescriptor->PageCount))) - 1);
  682. }
  683. }
  684. while (PointerPte <= LastPte) {
  685. if (PointerPte->u.Hard.Valid == 0) {
  686. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  687. NextPhysicalPage += 1;
  688. NumberOfPages -= 1;
  689. if (NumberOfPages == 0) {
  690. ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage +
  691. FreeDescriptor->PageCount));
  692. NextPhysicalPage = FreeDescriptor->BasePage;
  693. NumberOfPages = FreeDescriptor->PageCount;
  694. }
  695. *PointerPte = TempPte;
  696. RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte),
  697. PAGE_SIZE);
  698. }
  699. PointerPte += 1;
  700. }
  701. NextMd = MemoryDescriptor->ListEntry.Flink;
  702. }
  703. #ifndef PFN_CONSISTENCY
  704. }
  705. #endif // PFN_CONSISTENCY
  706. //
  707. // Initialize support for colored pages.
  708. //
  709. MmFreePagesByColor[0] = (PMMCOLOR_TABLES)
  710. &MmPfnDatabase[MmHighestPossiblePhysicalPage + 1];
  711. MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
  712. //
  713. // Make sure the PTEs are mapped.
  714. //
  715. if (!MI_IS_PHYSICAL_ADDRESS(MmFreePagesByColor[0])) {
  716. PointerPte = MiGetPteAddress (&MmFreePagesByColor[0][0]);
  717. LastPte = MiGetPteAddress (
  718. (PVOID)((PCHAR)&MmFreePagesByColor[1][MmSecondaryColors]-1));
  719. while (PointerPte <= LastPte) {
  720. if (PointerPte->u.Hard.Valid == 0) {
  721. TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
  722. NextPhysicalPage += 1;
  723. NumberOfPages -= 1;
  724. if (NumberOfPages == 0) {
  725. ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage +
  726. FreeDescriptor->PageCount));
  727. NextPhysicalPage = FreeDescriptor->BasePage;
  728. NumberOfPages = FreeDescriptor->PageCount;
  729. }
  730. *PointerPte = TempPte;
  731. RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte),
  732. PAGE_SIZE);
  733. }
  734. PointerPte += 1;
  735. }
  736. }
  737. for (i = 0; i < MmSecondaryColors; i += 1) {
  738. MmFreePagesByColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST;
  739. MmFreePagesByColor[FreePageList][i].Flink = MM_EMPTY_LIST;
  740. }
  741. #if MM_MAXIMUM_NUMBER_OF_COLORS > 1
  742. for (i = 0; i < MM_MAXIMUM_NUMBER_OF_COLORS; i += 1) {
  743. MmFreePagesByPrimaryColor[ZeroedPageList][i].ListName = ZeroedPageList;
  744. MmFreePagesByPrimaryColor[FreePageList][i].ListName = FreePageList;
  745. MmFreePagesByPrimaryColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST;
  746. MmFreePagesByPrimaryColor[FreePageList][i].Flink = MM_EMPTY_LIST;
  747. MmFreePagesByPrimaryColor[ZeroedPageList][i].Blink = MM_EMPTY_LIST;
  748. MmFreePagesByPrimaryColor[FreePageList][i].Blink = MM_EMPTY_LIST;
  749. }
  750. #endif
  751. //
  752. // Go through the page table entries and for any page which is
  753. // valid, update the corresponding PFN database element.
  754. //
  755. PointerPde = MiGetPdeAddress (PTE_BASE);
  756. PdePage = PointerPde->u.Hard.PageFrameNumber;
  757. Pfn1 = MI_PFN_ELEMENT(PdePage);
  758. Pfn1->PteFrame = PdePage;
  759. Pfn1->PteAddress = PointerPde;
  760. Pfn1->u2.ShareCount += 1;
  761. Pfn1->u3.e2.ReferenceCount = 1;
  762. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  763. Pfn1->u3.e1.PageColor =
  764. MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (PointerPde));
  765. //
  766. // Add the pages which were used to construct nonpaged pool to
  767. // the PFN database.
  768. //
  769. Pde = MiGetPdeAddress (MmNonPagedSystemStart);
  770. EndPde = MiGetPdeAddress(NON_PAGED_SYSTEM_END);
  771. while (Pde <= EndPde) {
  772. if (Pde->u.Hard.Valid == 1) {
  773. PdePage = Pde->u.Hard.PageFrameNumber;
  774. Pfn1 = MI_PFN_ELEMENT(PdePage);
  775. Pfn1->PteFrame = PointerPde->u.Hard.PageFrameNumber;
  776. Pfn1->PteAddress = Pde;
  777. Pfn1->u2.ShareCount += 1;
  778. Pfn1->u3.e2.ReferenceCount = 1;
  779. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  780. Pfn1->u3.e1.PageColor =
  781. MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pde));
  782. PointerPte = MiGetVirtualAddressMappedByPte (Pde);
  783. for (j = 0 ; j < PTE_PER_PAGE; j += 1) {
  784. if (PointerPte->u.Hard.Valid == 1) {
  785. PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
  786. Pfn2 = MI_PFN_ELEMENT(PageFrameIndex);
  787. Pfn2->PteFrame = PdePage;
  788. Pfn2->u2.ShareCount += 1;
  789. Pfn2->u3.e2.ReferenceCount = 1;
  790. Pfn2->u3.e1.PageLocation = ActiveAndValid;
  791. Pfn2->PteAddress =
  792. (PMMPTE)(KSEG0_BASE | (PageFrameIndex << PTE_SHIFT));
  793. Pfn2->u3.e1.PageColor =
  794. MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pfn2->PteAddress));
  795. }
  796. PointerPte += 1;
  797. }
  798. }
  799. Pde += 1;
  800. }
  801. //
  802. // Handle the initial nonpaged pool on expanded systems.
  803. //
  804. if (MmExpandedNonPagedPoolInBytes) {
  805. PageFrameIndex = (((ULONG_PTR)MmNonPagedPoolStart & ~KSEG0_BASE) >> PAGE_SHIFT);
  806. Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
  807. j = PageFrameIndex + (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT);
  808. while (PageFrameIndex < j) {
  809. Pfn1->PteFrame = PdePage;
  810. Pfn1->u2.ShareCount += 1;
  811. Pfn1->u3.e2.ReferenceCount = 1;
  812. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  813. Pfn1->PteAddress =
  814. (PMMPTE)(KSEG0_BASE | (PageFrameIndex << PTE_SHIFT));
  815. Pfn1->u3.e1.PageColor =
  816. MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pfn1->PteAddress));
  817. PageFrameIndex += 1;
  818. Pfn1 += 1;
  819. }
  820. }
  821. //
  822. // If page zero is still unused, mark it as in use. This is
  823. // temporary as we want to find bugs where a physical page
  824. // is specified as zero.
  825. //
  826. Pfn1 = &MmPfnDatabase[MmLowestPhysicalPage];
  827. if (Pfn1->u3.e2.ReferenceCount == 0) {
  828. //
  829. // Make the reference count non-zero and point it into a
  830. // page directory.
  831. //
  832. Pde = MiGetPdeAddress (0xb0000000);
  833. PdePage = Pde->u.Hard.PageFrameNumber;
  834. Pfn1->PteFrame = PdePageNumber;
  835. Pfn1->PteAddress = Pde;
  836. Pfn1->u2.ShareCount += 1;
  837. Pfn1->u3.e2.ReferenceCount = 1;
  838. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  839. Pfn1->u3.e1.PageColor =
  840. MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pde));
  841. }
  842. // end of temporary set to physical page zero.
  843. //
  844. // Walk through the memory descriptors and add pages to the
  845. // free list in the PFN database.
  846. //
  847. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  848. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  849. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  850. MEMORY_ALLOCATION_DESCRIPTOR,
  851. ListEntry);
  852. i = MemoryDescriptor->PageCount;
  853. NextPhysicalPage = MemoryDescriptor->BasePage;
  854. switch (MemoryDescriptor->MemoryType) {
  855. case LoaderBad:
  856. while (i != 0) {
  857. MiInsertPageInList (&MmBadPageListHead, NextPhysicalPage);
  858. i -= 1;
  859. NextPhysicalPage += 1;
  860. }
  861. break;
  862. case LoaderFree:
  863. case LoaderLoadedProgram:
  864. case LoaderFirmwareTemporary:
  865. case LoaderOsloaderStack:
  866. Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage);
  867. while (i != 0) {
  868. if (Pfn1->u3.e2.ReferenceCount == 0) {
  869. //
  870. // Set the PTE address to the physical page for
  871. // virtual address alignment checking.
  872. //
  873. Pfn1->PteAddress =
  874. (PMMPTE)(NextPhysicalPage << PTE_SHIFT);
  875. Pfn1->u3.e1.PageColor =
  876. MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pfn1->PteAddress));
  877. MiInsertPageInFreeList (NextPhysicalPage);
  878. }
  879. Pfn1 += 1;
  880. i -= 1;
  881. NextPhysicalPage += 1;
  882. }
  883. break;
  884. default:
  885. PointerPte = MiGetPteAddress (KSEG0_BASE |
  886. (NextPhysicalPage << PAGE_SHIFT));
  887. Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage);
  888. while (i != 0) {
  889. //
  890. // Set page as in use.
  891. //
  892. Pfn1->PteFrame = PdePageNumber;
  893. Pfn1->PteAddress = PointerPte;
  894. Pfn1->u2.ShareCount += 1;
  895. Pfn1->u3.e2.ReferenceCount = 1;
  896. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  897. Pfn1->u3.e1.PageColor =
  898. MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (PointerPte));
  899. Pfn1 += 1;
  900. i -= 1;
  901. NextPhysicalPage += 1;
  902. PointerPte += 1;
  903. }
  904. break;
  905. }
  906. NextMd = MemoryDescriptor->ListEntry.Flink;
  907. }
  908. //
  909. // Indicate that the PFN database is allocated in NonPaged pool.
  910. //
  911. if (PfnInKseg0 == FALSE) {
  912. //
  913. // The PFN database is allocated in virtual memory
  914. //
  915. // Set the start and end of allocation.
  916. //
  917. Pfn1 = MI_PFN_ELEMENT(MiGetPteAddress(&MmPfnDatabase[MmLowestPhysicalPage])->u.Hard.PageFrameNumber);
  918. Pfn1->u3.e1.StartOfAllocation = 1;
  919. Pfn1 = MI_PFN_ELEMENT(MiGetPteAddress(&MmPfnDatabase[MmHighestPossiblePhysicalPage])->u.Hard.PageFrameNumber);
  920. Pfn1->u3.e1.EndOfAllocation = 1;
  921. } else {
  922. //
  923. // The PFN database is allocated in KSEG0.
  924. //
  925. // Mark all PFN entries for the PFN pages in use.
  926. //
  927. PageNumber = ((ULONG)MmPfnDatabase - KSEG0_BASE) >> PAGE_SHIFT;
  928. Pfn1 = MI_PFN_ELEMENT(PageNumber);
  929. do {
  930. Pfn1->PteAddress = (PMMPTE)(PageNumber << PTE_SHIFT);
  931. Pfn1->u3.e1.PageColor =
  932. MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pfn1->PteAddress));
  933. Pfn1 += 1;
  934. PfnAllocation -= 1;
  935. } while (PfnAllocation != 0);
  936. //
  937. // Scan the PFN database backward for pages that are completely zero.
  938. // These pages are unused and can be added to the free list
  939. //
  940. if (MmDynamicPfn == FALSE) {
  941. BottomPfn = MI_PFN_ELEMENT(MmHighestPhysicalPage);
  942. do {
  943. //
  944. // Compute the address of the start of the page that is next
  945. // lower in memory and scan backwards until that page address
  946. // is reached or just crossed.
  947. //
  948. if (((ULONG)BottomPfn & (PAGE_SIZE - 1)) != 0) {
  949. BasePfn = (PMMPFN)((ULONG)BottomPfn & ~(PAGE_SIZE - 1));
  950. TopPfn = BottomPfn + 1;
  951. } else {
  952. BasePfn = (PMMPFN)((ULONG)BottomPfn - PAGE_SIZE);
  953. TopPfn = BottomPfn;
  954. }
  955. while (BottomPfn > BasePfn) {
  956. BottomPfn -= 1;
  957. }
  958. //
  959. // If the entire range over which the PFN entries span is
  960. // completely zero and the PFN entry that maps the page is
  961. // not in the range, then add the page to the appropriate
  962. // free list.
  963. //
  964. Range = (ULONG)TopPfn - (ULONG)BottomPfn;
  965. if (RtlCompareMemoryUlong((PVOID)BottomPfn, Range, 0) == Range) {
  966. //
  967. // Set the PTE address to the physical page for
  968. // virtual address alignment checking.
  969. //
  970. PageNumber = ((ULONG)BasePfn - KSEG0_BASE) >> PAGE_SHIFT;
  971. Pfn1 = MI_PFN_ELEMENT(PageNumber);
  972. ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
  973. PfnAllocation += 1;
  974. Pfn1->PteAddress = (PMMPTE)(PageNumber << PTE_SHIFT);
  975. Pfn1->u3.e1.PageColor =
  976. MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pfn1->PteAddress));
  977. MiInsertPageInFreeList (PageNumber);
  978. }
  979. } while (BottomPfn > MmPfnDatabase);
  980. }
  981. }
  982. //
  983. // Indicate that nonpaged pool must succeed is allocated in
  984. // nonpaged pool.
  985. //
  986. i = MmSizeOfNonPagedMustSucceed;
  987. Pfn1 = MI_PFN_ELEMENT(MI_CONVERT_PHYSICAL_TO_PFN (MmNonPagedMustSucceed));
  988. while ((LONG)i > 0) {
  989. Pfn1->u3.e1.StartOfAllocation = 1;
  990. Pfn1->u3.e1.EndOfAllocation = 1;
  991. i -= PAGE_SIZE;
  992. Pfn1 += 1;
  993. }
  994. //
  995. // Initialize the nonpaged pool.
  996. //
  997. InitializePool (NonPagedPool, 0);
  998. //
  999. // Initialize the nonpaged available PTEs for mapping I/O space
  1000. // and kernel stacks.
  1001. //
  1002. PointerPte = MiGetPteAddress (MmNonPagedSystemStart);
  1003. //
  1004. // Since the initial nonpaged pool must always reside in KSEG0 (many changes
  1005. // would be needed in this routine otherwise), reallocate the PTEs for it
  1006. // to the pagable system PTE pool now.
  1007. //
  1008. MmNumberOfSystemPtes = MiGetPteAddress(MmNonPagedPoolExpansionStart) - PointerPte - 1;
  1009. MiInitializeSystemPtes (PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
  1010. //
  1011. // Initialize memory management structures for this process.
  1012. //
  1013. //
  1014. // Build working set list. System initialization has created
  1015. // a PTE for hyperspace.
  1016. //
  1017. // Note, we can't remove a zeroed page as hyper space does not
  1018. // exist and we map non-zeroed pages into hyper space to zero.
  1019. //
  1020. PointerPte = MiGetPdeAddress(HYPER_SPACE);
  1021. ASSERT (PointerPte->u.Hard.Valid == 1);
  1022. PointerPte->u.Hard.Global = 0;
  1023. PointerPte->u.Hard.Write = 1;
  1024. PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
  1025. //
  1026. // Point to the page table page we just created and zero it.
  1027. //
  1028. PointerPte = MiGetPteAddress(HYPER_SPACE);
  1029. RtlZeroMemory ((PVOID)PointerPte, PAGE_SIZE);
  1030. //
  1031. // Hyper space now exists, set the necessary variables.
  1032. //
  1033. MmFirstReservedMappingPte = MiGetPteAddress (FIRST_MAPPING_PTE);
  1034. MmLastReservedMappingPte = MiGetPteAddress (LAST_MAPPING_PTE);
  1035. //
  1036. // Initialize this process's memory management structures including
  1037. // the working set list.
  1038. //
  1039. //
  1040. // The PFN element for the page directory has already been initialized,
  1041. // zero the reference count and the share count so they won't be
  1042. // wrong.
  1043. //
  1044. Pfn1 = MI_PFN_ELEMENT (PdePageNumber);
  1045. LOCK_PFN (OldIrql);
  1046. Pfn1->u2.ShareCount = 0;
  1047. Pfn1->u3.e2.ReferenceCount = 0;
  1048. //
  1049. // The PFN element for the PDE which maps hyperspace has already
  1050. // been initialized, zero the reference count and the share count
  1051. // so they won't be wrong.
  1052. //
  1053. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1054. Pfn1->u2.ShareCount = 0;
  1055. Pfn1->u3.e2.ReferenceCount = 0;
  1056. CurrentProcess = PsGetCurrentProcess ();
  1057. //
  1058. // Get a page for the working set list and map it into the Page
  1059. // directory at the page after hyperspace.
  1060. //
  1061. PointerPte = MiGetPteAddress (HYPER_SPACE);
  1062. PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE(PointerPte));
  1063. CurrentProcess->WorkingSetPage = PageFrameIndex;
  1064. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1065. PointerPde = MiGetPdeAddress (HYPER_SPACE) + 1;
  1066. //
  1067. // Assert that the double mapped pages have the same alignment.
  1068. //
  1069. ASSERT ((PointerPte->u.Long & (0xF << PTE_SHIFT)) ==
  1070. (PointerPde->u.Long & (0xF << PTE_SHIFT)));
  1071. *PointerPde = TempPte;
  1072. PointerPde->u.Hard.Global = 0;
  1073. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  1074. KeFillEntryTb ((PHARDWARE_PTE)PointerPde,
  1075. PointerPte,
  1076. TRUE);
  1077. RtlZeroMemory ((PVOID)PointerPte, PAGE_SIZE);
  1078. TempPte = *PointerPde;
  1079. TempPte.u.Hard.Valid = 0;
  1080. TempPte.u.Hard.Global = 0;
  1081. KeFlushSingleTb (PointerPte,
  1082. TRUE,
  1083. FALSE,
  1084. (PHARDWARE_PTE)PointerPde,
  1085. TempPte.u.Hard);
  1086. UNLOCK_PFN (OldIrql);
  1087. //
  1088. // Initialize hyperspace for this process.
  1089. //
  1090. PointerPte = MmFirstReservedMappingPte;
  1091. PointerPte->u.Hard.PageFrameNumber = NUMBER_OF_MAPPING_PTES;
  1092. CurrentProcess->Vm.MaximumWorkingSetSize = MmSystemProcessWorkingSetMax;
  1093. CurrentProcess->Vm.MinimumWorkingSetSize = MmSystemProcessWorkingSetMin;
  1094. MmInitializeProcessAddressSpace (CurrentProcess,
  1095. (PEPROCESS)NULL,
  1096. (PVOID)NULL,
  1097. (PVOID)NULL);
  1098. *PointerPde = ZeroKernelPte;
  1099. //
  1100. // Check to see if moving the secondary page structures to the end
  1101. // of the PFN database is a waste of memory. And if so, copy it
  1102. // to paged pool.
  1103. //
  1104. // If the PFN database ends on a page aligned boundary and the
  1105. // size of the two arrays is less than a page, free the page
  1106. // and allocate nonpagedpool for this.
  1107. //
  1108. if ((((ULONG)MmFreePagesByColor[0] & (PAGE_SIZE - 1)) == 0) &&
  1109. ((MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES)) < PAGE_SIZE)) {
  1110. PMMCOLOR_TABLES c;
  1111. c = MmFreePagesByColor[0];
  1112. MmFreePagesByColor[0] = ExAllocatePoolWithTag (NonPagedPoolMustSucceed,
  1113. MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES),
  1114. ' mM');
  1115. MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
  1116. RtlCopyMemory (MmFreePagesByColor[0],
  1117. c,
  1118. MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES));
  1119. //
  1120. // Free the page.
  1121. //
  1122. if (!MI_IS_PHYSICAL_ADDRESS(c)) {
  1123. PointerPte = MiGetPteAddress(c);
  1124. PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
  1125. *PointerPte = ZeroKernelPte;
  1126. } else {
  1127. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (c);
  1128. }
  1129. LOCK_PFN (OldIrql);
  1130. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1131. ASSERT ((Pfn1->u3.e2.ReferenceCount <= 1) && (Pfn1->u2.ShareCount <= 1));
  1132. Pfn1->u2.ShareCount = 0;
  1133. Pfn1->u3.e2.ReferenceCount = 0;
  1134. MI_SET_PFN_DELETED (Pfn1);
  1135. #if DBG
  1136. Pfn1->u3.e1.PageLocation = StandbyPageList;
  1137. #endif //DBG
  1138. MiInsertPageInFreeList (PageFrameIndex);
  1139. UNLOCK_PFN (OldIrql);
  1140. }
  1141. return;
  1142. }