Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4812 lines
140 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. mminit.c
  5. Abstract:
  6. This module contains the initialization for the memory management
  7. system.
  8. Author:
  9. Lou Perazzoli (loup) 20-Mar-1989
  10. Landy Wang (landyw) 02-Jun-1997
  11. Revision History:
  12. --*/
  13. #include "mi.h"
  14. PMMPTE MmSharedUserDataPte;
  15. extern ULONG_PTR MmSystemPtesStart[MaximumPtePoolTypes];
  16. extern PMMPTE MiSpecialPoolFirstPte;
  17. extern ULONG MmPagedPoolCommit;
  18. extern ULONG MmInPageSupportMinimum;
  19. extern PFN_NUMBER MiExpansionPoolPagesInitialCharge;
  20. extern ULONG MmAllocationPreference;
  21. extern PVOID BBTBuffer;
  22. extern PFN_COUNT BBTPagesToReserve;
  23. ULONG_PTR MmSubsectionBase;
  24. ULONG_PTR MmSubsectionTopPage;
  25. ULONG MmDataClusterSize;
  26. ULONG MmCodeClusterSize;
  27. PFN_NUMBER MmResidentAvailableAtInit;
  28. PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
  29. LIST_ENTRY MmLockConflictList;
  30. LIST_ENTRY MmProtectedPteList;
  31. KSPIN_LOCK MmProtectedPteLock;
  32. LOGICAL MmPagedPoolMaximumDesired = FALSE;
  33. #if defined (_MI_DEBUG_SUB)
  34. ULONG MiTrackSubs = 0x2000; // Set to nonzero to enable subsection tracking code.
  35. LONG MiSubsectionIndex;
  36. PMI_SUB_TRACES MiSubsectionTraces;
  37. #endif
  38. #if defined (_MI_DEBUG_DIRTY)
  39. ULONG MiTrackDirtys = 0x10000; // Set to nonzero to enable subsection tracking code.
  40. LONG MiDirtyIndex;
  41. PMI_DIRTY_TRACES MiDirtyTraces;
  42. #endif
  43. #if defined (_MI_DEBUG_DATA)
  44. ULONG MiTrackData = 0x10000; // Set to nonzero to enable data tracking code.
  45. LONG MiDataIndex;
  46. PMI_DATA_TRACES MiDataTraces;
  47. #endif
  48. VOID
  49. MiMapBBTMemory (
  50. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  51. );
  52. VOID
  53. MiEnablePagingTheExecutive(
  54. VOID
  55. );
  56. VOID
  57. MiEnablePagingOfDriverAtInit (
  58. IN PMMPTE PointerPte,
  59. IN PMMPTE LastPte
  60. );
  61. VOID
  62. MiBuildPagedPool (
  63. );
  64. VOID
  65. MiWriteProtectSystemImage (
  66. IN PVOID DllBase
  67. );
  68. VOID
  69. MiInitializePfnTracing (
  70. VOID
  71. );
  72. PFN_NUMBER
  73. MiPagesInLoaderBlock (
  74. IN PLOADER_PARAMETER_BLOCK LoaderBlock,
  75. IN PBOOLEAN IncludeType
  76. );
  77. #ifndef NO_POOL_CHECKS
  78. VOID
  79. MiInitializeSpecialPoolCriteria (
  80. IN VOID
  81. );
  82. #endif
  83. #ifdef _MI_MESSAGE_SERVER
  84. VOID
  85. MiInitializeMessageQueue (
  86. VOID
  87. );
  88. #endif
  89. static
  90. VOID
  91. MiMemoryLicense (
  92. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  93. );
  94. VOID
  95. MiInitializeCacheOverrides (
  96. VOID
  97. );
  98. //
  99. // The thresholds can be overridden by the registry.
  100. //
  101. PFN_NUMBER MmLowMemoryThreshold;
  102. PFN_NUMBER MmHighMemoryThreshold;
  103. PKEVENT MiLowMemoryEvent;
  104. PKEVENT MiHighMemoryEvent;
  105. NTSTATUS
  106. MiCreateMemoryEvent (
  107. IN PUNICODE_STRING EventName,
  108. OUT PKEVENT *Event
  109. );
  110. LOGICAL
  111. MiInitializeMemoryEvents (
  112. VOID
  113. );
  114. #ifdef ALLOC_PRAGMA
  115. #pragma alloc_text(INIT,MmInitSystem)
  116. #pragma alloc_text(INIT,MiMapBBTMemory)
  117. #pragma alloc_text(INIT,MmInitializeMemoryLimits)
  118. #pragma alloc_text(INIT,MmFreeLoaderBlock)
  119. #pragma alloc_text(INIT,MiBuildPagedPool)
  120. #pragma alloc_text(INIT,MiFindInitializationCode)
  121. #pragma alloc_text(INIT,MiEnablePagingTheExecutive)
  122. #pragma alloc_text(INIT,MiEnablePagingOfDriverAtInit)
  123. #pragma alloc_text(INIT,MiPagesInLoaderBlock)
  124. #pragma alloc_text(INIT,MiCreateMemoryEvent)
  125. #pragma alloc_text(INIT,MiInitializeMemoryEvents)
  126. #pragma alloc_text(INIT,MiInitializeCacheOverrides)
  127. #pragma alloc_text(INIT,MiMemoryLicense)
  128. #pragma alloc_text(PAGELK,MiFreeInitializationCode)
  129. #endif
  130. //
  131. // Default is a 300 second life span for modified mapped pages -
  132. // This can be overridden in the registry.
  133. //
  134. #ifdef ALLOC_DATA_PRAGMA
  135. #pragma data_seg("INITDATA")
  136. #endif
  137. ULONG MmModifiedPageLifeInSeconds = 300;
  138. #ifdef ALLOC_DATA_PRAGMA
  139. #pragma data_seg()
  140. #endif
  141. LARGE_INTEGER MiModifiedPageLife;
  142. BOOLEAN MiTimerPending = FALSE;
  143. KEVENT MiMappedPagesTooOldEvent;
  144. KDPC MiModifiedPageWriterTimerDpc;
  145. KTIMER MiModifiedPageWriterTimer;
  146. //
  147. // The following constants are based on the number PAGES not the
  148. // memory size. For convenience the number of pages is calculated
  149. // based on a 4k page size. Hence 12mb with 4k page is 3072.
  150. //
  151. #define MM_SMALL_SYSTEM ((13*1024*1024) / 4096)
  152. #define MM_MEDIUM_SYSTEM ((19*1024*1024) / 4096)
  153. #define MM_MIN_INITIAL_PAGED_POOL ((32*1024*1024) >> PAGE_SHIFT)
  154. #define MM_DEFAULT_IO_LOCK_LIMIT (2 * 1024 * 1024)
  155. extern WSLE_NUMBER MmMaximumWorkingSetSize;
  156. extern ULONG MmEnforceWriteProtection;
  157. extern CHAR MiPteStr[];
  158. extern LONG MiTrimInProgressCount;
  159. #if (_MI_PAGING_LEVELS < 3)
  160. PFN_NUMBER MmSystemPageDirectory[PD_PER_SYSTEM];
  161. PMMPTE MmSystemPagePtes;
  162. #endif
  163. ULONG MmTotalSystemCodePages;
  164. MM_SYSTEMSIZE MmSystemSize;
  165. ULONG MmLargeSystemCache;
  166. ULONG MmProductType;
  167. extern ULONG MiVerifyAllDrivers;
  168. LIST_ENTRY MmLoadedUserImageList;
  169. PPAGE_FAULT_NOTIFY_ROUTINE MmPageFaultNotifyRoutine;
  170. #if defined (_WIN64)
  171. #define MM_ALLOCATION_FRAGMENT (64 * 1024 * 1024)
  172. #else
  173. #define MM_ALLOCATION_FRAGMENT (64 * 1024)
  174. #endif
  175. //
  176. // Registry-settable.
  177. //
  178. SIZE_T MmAllocationFragment;
  179. #if defined(MI_MULTINODE)
  180. HALNUMAPAGETONODE
  181. MiNonNumaPageToNodeColor (
  182. IN PFN_NUMBER PageFrameIndex
  183. )
  184. /*++
  185. Routine Description:
  186. Return the node color of the page.
  187. Arguments:
  188. PageFrameIndex - Supplies the physical page number.
  189. Return Value:
  190. Node color is always zero in non-NUMA configurations.
  191. --*/
  192. {
  193. UNREFERENCED_PARAMETER (PageFrameIndex);
  194. return 0;
  195. }
  196. //
  197. // This node determination function pointer is initialized to return 0.
  198. //
  199. // Architecture-dependent initialization may repoint it to a HAL routine
  200. // for NUMA configurations.
  201. //
  202. PHALNUMAPAGETONODE MmPageToNode = MiNonNumaPageToNodeColor;
  203. VOID
  204. MiDetermineNode (
  205. IN PFN_NUMBER PageFrameIndex,
  206. IN PMMPFN Pfn
  207. )
  208. /*++
  209. Routine Description:
  210. This routine is called during initial freelist population or when
  211. physical memory is being hot-added. It then determines which node
  212. (in a multinode NUMA system) the physical memory resides in, and
  213. marks the PFN entry accordingly.
  214. N.B. The actual page to node determination is machine dependent
  215. and done by a routine in the chipset driver or the HAL, called
  216. via the MmPageToNode function pointer.
  217. Arguments:
  218. PageFrameIndex - Supplies the physical page number.
  219. Pfn - Supplies a pointer to the PFN database element.
  220. Return Value:
  221. None.
  222. Environment:
  223. None although typically this routine is called with the PFN
  224. database locked.
  225. --*/
  226. {
  227. ULONG Temp;
  228. ASSERT (Pfn == MI_PFN_ELEMENT(PageFrameIndex));
  229. Temp = MmPageToNode (PageFrameIndex);
  230. ASSERT (Temp < MAXIMUM_CCNUMA_NODES);
  231. Pfn->u3.e1.PageColor = Temp;
  232. }
  233. #endif
  234. BOOLEAN
  235. MmInitSystem (
  236. IN ULONG Phase,
  237. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  238. )
  239. /*++
  240. Routine Description:
  241. This function is called during Phase 0, phase 1 and at the end
  242. of phase 1 ("phase 2") initialization.
  243. Phase 0 initializes the memory management paging functions,
  244. nonpaged and paged pool, the PFN database, etc.
  245. Phase 1 initializes the section objects, the physical memory
  246. object, and starts the memory management system threads.
  247. Phase 2 frees memory used by the OsLoader.
  248. Arguments:
  249. Phase - System initialization phase.
  250. LoaderBlock - Supplies a pointer to the system loader block.
  251. Return Value:
  252. Returns TRUE if the initialization was successful.
  253. Environment:
  254. Kernel Mode Only. System initialization.
  255. --*/
  256. {
  257. PEPROCESS Process;
  258. PSINGLE_LIST_ENTRY SingleListEntry;
  259. PFN_NUMBER NumberOfPages;
  260. HANDLE ThreadHandle;
  261. OBJECT_ATTRIBUTES ObjectAttributes;
  262. PMMPTE PointerPte;
  263. PMMPTE PointerPde;
  264. PMMPTE StartPde;
  265. PMMPTE EndPde;
  266. PMMPFN Pfn1;
  267. PFN_NUMBER i, j;
  268. PFN_NUMBER DeferredMdlEntries;
  269. PFN_NUMBER PageFrameIndex;
  270. PFN_NUMBER DirectoryFrameIndex;
  271. MMPTE TempPte;
  272. KIRQL OldIrql;
  273. PLIST_ENTRY NextEntry;
  274. PKLDR_DATA_TABLE_ENTRY DataTableEntry;
  275. ULONG MaximumSystemCacheSize;
  276. ULONG MaximumSystemCacheSizeTotal;
  277. PIMAGE_NT_HEADERS NtHeaders;
  278. ULONG_PTR SystemPteMultiplier;
  279. ULONG_PTR DefaultSystemViewSize;
  280. ULONG_PTR SessionEnd;
  281. SIZE_T SystemViewMax;
  282. SIZE_T HydraImageMax;
  283. SIZE_T HydraViewMax;
  284. SIZE_T HydraPoolMax;
  285. SIZE_T HydraSpaceUsedForSystemViews;
  286. BOOLEAN IncludeType[LoaderMaximum];
  287. LOGICAL AutosizingFragment;
  288. ULONG VerifierFlags;
  289. #if DBG
  290. MMPTE Pointer;
  291. #endif
  292. #if (_MI_PAGING_LEVELS >= 3)
  293. LOGICAL FirstPpe;
  294. PMMPTE StartPpe;
  295. #endif
  296. #if (_MI_PAGING_LEVELS >= 4)
  297. LOGICAL FirstPxe;
  298. PMMPTE StartPxe;
  299. #endif
  300. #if defined(_X86_)
  301. PCHAR ReducedUserVaOption;
  302. ULONG UserVaLimit;
  303. ULONG ReductionInBytes;
  304. #endif
  305. j = 0;
  306. PointerPde = NULL;
  307. //
  308. // Make sure structure alignment is okay.
  309. //
  310. if (Phase == 0) {
  311. MmThrottleTop = 450;
  312. MmThrottleBottom = 127;
  313. //
  314. // Set the highest user address, the system range start address, the
  315. // user probe address, and the virtual bias.
  316. //
  317. #if defined(_WIN64)
  318. MmHighestUserAddress = MI_HIGHEST_USER_ADDRESS;
  319. MmUserProbeAddress = MI_USER_PROBE_ADDRESS;
  320. MmSystemRangeStart = MI_SYSTEM_RANGE_START;
  321. #else
  322. MmHighestUserAddress = (PVOID)(KSEG0_BASE - 0x10000 - 1);
  323. MmUserProbeAddress = KSEG0_BASE - 0x10000;
  324. MmSystemRangeStart = (PVOID)KSEG0_BASE;
  325. #endif
  326. MiHighestUserPte = MiGetPteAddress (MmHighestUserAddress);
  327. MiHighestUserPde = MiGetPdeAddress (MmHighestUserAddress);
  328. #if (_MI_PAGING_LEVELS >= 4)
  329. MiHighestUserPpe = MiGetPpeAddress (MmHighestUserAddress);
  330. MiHighestUserPxe = MiGetPxeAddress (MmHighestUserAddress);
  331. #endif
  332. #if defined(_X86_) || defined(_AMD64_)
  333. MmBootImageSize = LoaderBlock->Extension->LoaderPagesSpanned;
  334. MmBootImageSize *= PAGE_SIZE;
  335. MmBootImageSize = MI_ROUND_TO_SIZE (MmBootImageSize,
  336. MM_VA_MAPPED_BY_PDE);
  337. ASSERT ((MmBootImageSize % MM_VA_MAPPED_BY_PDE) == 0);
  338. #endif
  339. #if defined(_X86_)
  340. MmVirtualBias = LoaderBlock->u.I386.VirtualBias;
  341. #endif
  342. //
  343. // Initialize system and Hydra mapped view sizes.
  344. //
  345. DefaultSystemViewSize = MM_SYSTEM_VIEW_SIZE;
  346. MmSessionSize = MI_SESSION_SPACE_DEFAULT_TOTAL_SIZE;
  347. SessionEnd = (ULONG_PTR) MM_SESSION_SPACE_DEFAULT_END;
  348. #define MM_MB_MAPPED_BY_PDE (MM_VA_MAPPED_BY_PDE / (1024*1024))
  349. //
  350. // A PDE of virtual space is the minimum system view size allowed.
  351. //
  352. if (MmSystemViewSize < (MM_VA_MAPPED_BY_PDE / (1024*1024))) {
  353. MmSystemViewSize = DefaultSystemViewSize;
  354. }
  355. else {
  356. //
  357. // The view size has been specified (in megabytes) by the registry.
  358. // Validate it.
  359. //
  360. if (MmVirtualBias == 0) {
  361. //
  362. // Round the system view size (in megabytes) to a PDE multiple.
  363. //
  364. MmSystemViewSize = MI_ROUND_TO_SIZE (MmSystemViewSize,
  365. MM_MB_MAPPED_BY_PDE);
  366. //
  367. // NT64 locates system views just after systemwide paged pool,
  368. // so the size of the system views are not limited by session
  369. // space. Arbitrarily make the maximum a PPE's worth.
  370. //
  371. //
  372. // NT32 shares system view VA space with session VA space due
  373. // to the shortage of virtual addresses. Thus increasing the
  374. // system view size means potentially decreasing the maximum
  375. // session space size.
  376. //
  377. SystemViewMax = (MI_SESSION_SPACE_MAXIMUM_TOTAL_SIZE) / (1024*1024);
  378. #if !defined(_WIN64)
  379. //
  380. // Ensure at least enough space is left for
  381. // the standard default session layout.
  382. //
  383. SystemViewMax -= (MmSessionSize / (1024*1024));
  384. #endif
  385. //
  386. // Note a view size of -1 will be rounded to zero. Treat -1
  387. // as requesting the maximum.
  388. //
  389. if ((MmSystemViewSize > SystemViewMax) ||
  390. (MmSystemViewSize == 0)) {
  391. MmSystemViewSize = SystemViewMax;
  392. }
  393. MmSystemViewSize *= (1024*1024);
  394. }
  395. else {
  396. MmSystemViewSize = DefaultSystemViewSize;
  397. }
  398. }
  399. #if defined(_WIN64)
  400. HydraSpaceUsedForSystemViews = 0;
  401. #else
  402. HydraSpaceUsedForSystemViews = MmSystemViewSize;
  403. #endif
  404. MiSessionImageEnd = SessionEnd;
  405. //
  406. // Select reasonable Hydra image, pool and view virtual sizes.
  407. // A PDE of virtual space is the minimum size allowed for each type.
  408. //
  409. if (MmVirtualBias == 0) {
  410. if (MmSessionImageSize < MM_MB_MAPPED_BY_PDE) {
  411. MmSessionImageSize = MI_SESSION_DEFAULT_IMAGE_SIZE;
  412. }
  413. else {
  414. //
  415. // The Hydra image size has been specified (in megabytes)
  416. // by the registry.
  417. //
  418. // Round it to a PDE multiple and validate it.
  419. //
  420. MmSessionImageSize = MI_ROUND_TO_SIZE (MmSessionImageSize,
  421. MM_MB_MAPPED_BY_PDE);
  422. HydraImageMax = (MI_SESSION_SPACE_MAXIMUM_TOTAL_SIZE - HydraSpaceUsedForSystemViews - (MmSessionSize - MI_SESSION_DEFAULT_IMAGE_SIZE)) / (1024*1024);
  423. //
  424. // Note a view size of -1 will be rounded to zero.
  425. // Treat -1 as requesting the maximum.
  426. //
  427. if ((MmSessionImageSize > HydraImageMax) ||
  428. (MmSessionImageSize == 0)) {
  429. MmSessionImageSize = HydraImageMax;
  430. }
  431. MmSessionImageSize *= (1024*1024);
  432. MmSessionSize -= MI_SESSION_DEFAULT_IMAGE_SIZE;
  433. MmSessionSize += MmSessionImageSize;
  434. }
  435. MiSessionImageStart = SessionEnd - MmSessionImageSize;
  436. //
  437. // The session image start and size has been established.
  438. //
  439. // Now initialize the session pool and view ranges which lie
  440. // virtually below it.
  441. //
  442. if (MmSessionViewSize < MM_MB_MAPPED_BY_PDE) {
  443. MmSessionViewSize = MI_SESSION_DEFAULT_VIEW_SIZE;
  444. }
  445. else {
  446. //
  447. // The Hydra view size has been specified (in megabytes)
  448. // by the registry. Validate it.
  449. //
  450. // Round the Hydra view size to a PDE multiple.
  451. //
  452. MmSessionViewSize = MI_ROUND_TO_SIZE (MmSessionViewSize,
  453. MM_MB_MAPPED_BY_PDE);
  454. HydraViewMax = (MI_SESSION_SPACE_MAXIMUM_TOTAL_SIZE - HydraSpaceUsedForSystemViews - (MmSessionSize - MI_SESSION_DEFAULT_VIEW_SIZE)) / (1024*1024);
  455. //
  456. // Note a view size of -1 will be rounded to zero.
  457. // Treat -1 as requesting the maximum.
  458. //
  459. if ((MmSessionViewSize > HydraViewMax) ||
  460. (MmSessionViewSize == 0)) {
  461. MmSessionViewSize = HydraViewMax;
  462. }
  463. MmSessionViewSize *= (1024*1024);
  464. MmSessionSize -= MI_SESSION_DEFAULT_VIEW_SIZE;
  465. MmSessionSize += MmSessionViewSize;
  466. }
  467. MiSessionViewStart = SessionEnd - MmSessionImageSize - MI_SESSION_SPACE_WS_SIZE - MI_SESSION_SPACE_STRUCT_SIZE - MmSessionViewSize;
  468. //
  469. // The session view start and size has been established.
  470. //
  471. // Now initialize the session pool start and size which lies
  472. // virtually just below it.
  473. //
  474. MiSessionPoolEnd = MiSessionViewStart;
  475. if (MmSessionPoolSize < MM_MB_MAPPED_BY_PDE) {
  476. #if !defined(_WIN64)
  477. //
  478. // Professional and below use systemwide paged pool for session
  479. // allocations (this decision is made in win32k.sys). Server
  480. // and above use real session pool and 16mb isn't enough to
  481. // play high end game applications, etc. Since we're not
  482. // booted /3GB, try for an additional 16mb now.
  483. //
  484. if ((MmSessionPoolSize == 0) && (MmProductType != 0x00690057)) {
  485. HydraPoolMax = MI_SESSION_SPACE_MAXIMUM_TOTAL_SIZE - HydraSpaceUsedForSystemViews - MmSessionSize;
  486. if (HydraPoolMax >= 2 * MI_SESSION_DEFAULT_POOL_SIZE) {
  487. MmSessionPoolSize = 2 * MI_SESSION_DEFAULT_POOL_SIZE;
  488. MmSessionSize -= MI_SESSION_DEFAULT_POOL_SIZE;
  489. MmSessionSize += MmSessionPoolSize;
  490. }
  491. else {
  492. MmSessionPoolSize = MI_SESSION_DEFAULT_POOL_SIZE;
  493. }
  494. }
  495. else
  496. #endif
  497. MmSessionPoolSize = MI_SESSION_DEFAULT_POOL_SIZE;
  498. }
  499. else {
  500. //
  501. // The Hydra pool size has been specified (in megabytes)
  502. // by the registry. Validate it.
  503. //
  504. // Round the Hydra pool size to a PDE multiple.
  505. //
  506. MmSessionPoolSize = MI_ROUND_TO_SIZE (MmSessionPoolSize,
  507. MM_MB_MAPPED_BY_PDE);
  508. HydraPoolMax = (MI_SESSION_SPACE_MAXIMUM_TOTAL_SIZE - HydraSpaceUsedForSystemViews - (MmSessionSize - MI_SESSION_DEFAULT_POOL_SIZE)) / (1024*1024);
  509. //
  510. // Note a view size of -1 will be rounded to zero.
  511. // Treat -1 as requesting the maximum.
  512. //
  513. if ((MmSessionPoolSize > HydraPoolMax) ||
  514. (MmSessionPoolSize == 0)) {
  515. MmSessionPoolSize = HydraPoolMax;
  516. }
  517. MmSessionPoolSize *= (1024*1024);
  518. MmSessionSize -= MI_SESSION_DEFAULT_POOL_SIZE;
  519. MmSessionSize += MmSessionPoolSize;
  520. }
  521. MiSessionPoolStart = MiSessionPoolEnd - MmSessionPoolSize;
  522. MmSessionBase = (ULONG_PTR) MiSessionPoolStart;
  523. #if defined (_WIN64)
  524. //
  525. // Session special pool immediately follows session regular pool
  526. // assuming the user has enabled either the verifier or special
  527. // pool.
  528. //
  529. if ((MmVerifyDriverBufferLength != (ULONG)-1) ||
  530. ((MmSpecialPoolTag != 0) && (MmSpecialPoolTag != (ULONG)-1))) {
  531. MmSessionSize = MI_SESSION_SPACE_MAXIMUM_TOTAL_SIZE;
  532. MmSessionSpecialPoolEnd = (PVOID) MiSessionPoolStart;
  533. MmSessionBase = MM_SESSION_SPACE_DEFAULT;
  534. MmSessionSpecialPoolStart = (PVOID) MmSessionBase;
  535. }
  536. #endif
  537. ASSERT (MmSessionBase + MmSessionSize == SessionEnd);
  538. MiSessionSpaceEnd = SessionEnd;
  539. MiSessionSpacePageTables = (ULONG)(MmSessionSize / MM_VA_MAPPED_BY_PDE);
  540. #if !defined (_WIN64)
  541. MiSystemViewStart = MmSessionBase - MmSystemViewSize;
  542. #endif
  543. }
  544. else {
  545. //
  546. // When booted /3GB, no size overrides are allowed due to the
  547. // already severely limited virtual address space.
  548. // Initialize the other Hydra variables after the system cache.
  549. //
  550. MmSessionViewSize = MI_SESSION_DEFAULT_VIEW_SIZE;
  551. MmSessionPoolSize = MI_SESSION_DEFAULT_POOL_SIZE;
  552. MmSessionImageSize = MI_SESSION_DEFAULT_IMAGE_SIZE;
  553. MiSessionImageStart = MiSessionImageEnd - MmSessionImageSize;
  554. }
  555. //
  556. // Set the highest section base address.
  557. //
  558. // N.B. In 32-bit systems this address must be 2gb or less even for
  559. // systems that run with 3gb enabled. Otherwise, it would not
  560. // be possible to map based sections identically in all processes.
  561. //
  562. MmHighSectionBase = ((PCHAR)MmHighestUserAddress - 0x800000);
  563. MaximumSystemCacheSize = (MM_SYSTEM_CACHE_END - MM_SYSTEM_CACHE_START) >> PAGE_SHIFT;
  564. #if defined(_X86_)
  565. //
  566. // If boot.ini specified a sane number of MB that the administrator
  567. // wants to use for user virtual address space then use it.
  568. //
  569. UserVaLimit = 0;
  570. ReducedUserVaOption = strstr(LoaderBlock->LoadOptions, "USERVA");
  571. if (ReducedUserVaOption != NULL) {
  572. ReducedUserVaOption = strstr(ReducedUserVaOption,"=");
  573. if (ReducedUserVaOption != NULL) {
  574. UserVaLimit = atol(ReducedUserVaOption+1);
  575. UserVaLimit = MI_ROUND_TO_SIZE (UserVaLimit, ((MM_VA_MAPPED_BY_PDE) / (1024*1024)));
  576. }
  577. //
  578. // Ignore the USERVA switch if the limit is too small.
  579. //
  580. if (UserVaLimit <= (2048 + 16)) {
  581. UserVaLimit = 0;
  582. }
  583. }
  584. if (MmVirtualBias != 0) {
  585. //
  586. // If the size of the boot image (likely due to a large registry)
  587. // overflows into where paged pool would normally start, then
  588. // move paged pool up now. This costs virtual address space (ie:
  589. // performance) but more importantly, allows the system to boot.
  590. //
  591. if (MmBootImageSize > 16 * 1024 * 1024) {
  592. MmPagedPoolStart = (PVOID)((PCHAR)MmPagedPoolStart + (MmBootImageSize - 16 * 1024 * 1024));
  593. ASSERT (((ULONG_PTR)MmPagedPoolStart % MM_VA_MAPPED_BY_PDE) == 0);
  594. }
  595. //
  596. // The system has been biased to an alternate base address to
  597. // allow 3gb of user address space, set the user probe address
  598. // and the maximum system cache size.
  599. //
  600. // If the system has been biased to an alternate base address to
  601. // allow 3gb of user address space, then set the user probe address
  602. // and the maximum system cache size.
  603. if ((UserVaLimit > 2048) && (UserVaLimit < 3072)) {
  604. //
  605. // Use any space between the maximum user virtual address
  606. // and the system for extra system PTEs.
  607. //
  608. // Convert input MB to bytes.
  609. //
  610. UserVaLimit -= 2048;
  611. UserVaLimit *= (1024*1024);
  612. //
  613. // Don't let the user specify a value which would cause us to
  614. // prematurely overwrite portions of the kernel & loader block.
  615. //
  616. if (UserVaLimit < MmBootImageSize) {
  617. UserVaLimit = MmBootImageSize;
  618. }
  619. }
  620. else {
  621. UserVaLimit = 0x40000000;
  622. }
  623. MmHighestUserAddress = ((PCHAR)MmHighestUserAddress + UserVaLimit);
  624. MmSystemRangeStart = ((PCHAR)MmSystemRangeStart + UserVaLimit);
  625. MmUserProbeAddress += UserVaLimit;
  626. MiMaximumWorkingSet += UserVaLimit >> PAGE_SHIFT;
  627. if (UserVaLimit != 0x40000000) {
  628. MiUseMaximumSystemSpace = (ULONG_PTR)MmSystemRangeStart;
  629. MiUseMaximumSystemSpaceEnd = 0xC0000000;
  630. }
  631. MiHighestUserPte = MiGetPteAddress (MmHighestUserAddress);
  632. MiHighestUserPde = MiGetPdeAddress (MmHighestUserAddress);
  633. //
  634. // Moving to 3GB means moving session space to just above
  635. // the system cache (and lowering the system cache max size
  636. // accordingly). Here's the visual:
  637. //
  638. // +------------------------------------+
  639. // C1000000 | System cache resides here |
  640. // | and grows upward. |
  641. // | | |
  642. // | | |
  643. // | \/ |
  644. // | |
  645. // +------------------------------------+
  646. // | Session space (Hydra). |
  647. // +------------------------------------+
  648. // | Systemwide global mapped views. |
  649. // +------------------------------------+
  650. // | |
  651. // | ^ |
  652. // | | |
  653. // | | |
  654. // | |
  655. // | Kernel, HAL & boot loaded images |
  656. // | grow downward from E1000000. |
  657. // | Total size is specified by |
  658. // | LoaderBlock->u.I386.BootImageSize. |
  659. // | Note only ntldrs after Build 2195 |
  660. // | are capable of loading the boot |
  661. // | images in descending order from |
  662. // | a hardcoded E1000000 on down. |
  663. // E1000000 +------------------------------------+
  664. //
  665. MaximumSystemCacheSize -= MmBootImageSize >> PAGE_SHIFT;
  666. MaximumSystemCacheSize -= MmSessionSize >> PAGE_SHIFT;
  667. MaximumSystemCacheSize -= MmSystemViewSize >> PAGE_SHIFT;
  668. MmSessionBase = (ULONG_PTR)(MM_SYSTEM_CACHE_START +
  669. (MaximumSystemCacheSize << PAGE_SHIFT));
  670. MiSystemViewStart = MmSessionBase + MmSessionSize;
  671. MiSessionPoolStart = MmSessionBase;
  672. MiSessionPoolEnd = MiSessionPoolStart + MmSessionPoolSize;
  673. MiSessionViewStart = MiSessionPoolEnd;
  674. MiSessionSpaceEnd = (ULONG_PTR)MmSessionBase + MmSessionSize;
  675. MiSessionSpacePageTables = MmSessionSize / MM_VA_MAPPED_BY_PDE;
  676. MiSessionImageEnd = MiSessionSpaceEnd;
  677. MiSessionImageStart = MiSessionImageEnd - MmSessionImageSize;
  678. }
  679. else if ((UserVaLimit >= 64) && (UserVaLimit < 2048)) {
  680. //
  681. // Convert input MB to bytes.
  682. //
  683. UserVaLimit *= (1024*1024);
  684. ReductionInBytes = 0x80000000 - UserVaLimit;
  685. MmHighestUserAddress = ((PCHAR)MmHighestUserAddress - ReductionInBytes);
  686. MmSystemRangeStart = ((PCHAR)MmSystemRangeStart - ReductionInBytes);
  687. MmUserProbeAddress -= ReductionInBytes;
  688. MiMaximumWorkingSet -= ReductionInBytes >> PAGE_SHIFT;
  689. MiUseMaximumSystemSpace = (ULONG_PTR)MmSystemRangeStart;
  690. MiUseMaximumSystemSpaceEnd = (ULONG_PTR)MiUseMaximumSystemSpace + ReductionInBytes;
  691. MmHighSectionBase = (PVOID)((PCHAR)MmHighSectionBase - ReductionInBytes);
  692. MiHighestUserPte = MiGetPteAddress (MmHighestUserAddress);
  693. MiHighestUserPde = MiGetPdeAddress (MmHighestUserAddress);
  694. }
  695. #else
  696. #if !defined (_WIN64)
  697. MaximumSystemCacheSize -= (MmSystemViewSize >> PAGE_SHIFT);
  698. #endif
  699. #endif
  700. //
  701. // Initialize some global session variables.
  702. //
  703. MmSessionSpace = (PMM_SESSION_SPACE)((ULONG_PTR)MmSessionBase + MmSessionSize - MmSessionImageSize - MI_SESSION_SPACE_STRUCT_SIZE);
  704. MiSessionImagePteStart = MiGetPteAddress ((PVOID) MiSessionImageStart);
  705. MiSessionImagePteEnd = MiGetPteAddress ((PVOID) MiSessionImageEnd);
  706. MiSessionBasePte = MiGetPteAddress ((PVOID)MmSessionBase);
  707. MiSessionSpaceWs = MiSessionViewStart + MmSessionViewSize;
  708. MiSessionLastPte = MiGetPteAddress ((PVOID)MiSessionSpaceEnd);
  709. #if DBG
  710. //
  711. // A few sanity checks to ensure things are as they should be.
  712. //
  713. if ((sizeof(CONTROL_AREA) % 8) != 0) {
  714. DbgPrint("control area list is not a quadword sized structure\n");
  715. }
  716. if ((sizeof(SUBSECTION) % 8) != 0) {
  717. DbgPrint("subsection list is not a quadword sized structure\n");
  718. }
  719. //
  720. // Some checks to make sure prototype PTEs can be placed in
  721. // either paged or nonpaged (prototype PTEs for paged pool are here)
  722. // can be put into PTE format.
  723. //
  724. PointerPte = (PMMPTE)MmPagedPoolStart;
  725. Pointer.u.Long = MiProtoAddressForPte (PointerPte);
  726. TempPte = Pointer;
  727. PointerPde = MiPteToProto(&TempPte);
  728. if (PointerPte != PointerPde) {
  729. DbgPrint("unable to map start of paged pool as prototype PTE %p %p\n",
  730. PointerPde,
  731. PointerPte);
  732. }
  733. PointerPte =
  734. (PMMPTE)((ULONG_PTR)MM_NONPAGED_POOL_END & ~((1 << PTE_SHIFT) - 1));
  735. Pointer.u.Long = MiProtoAddressForPte (PointerPte);
  736. TempPte = Pointer;
  737. PointerPde = MiPteToProto(&TempPte);
  738. if (PointerPte != PointerPde) {
  739. DbgPrint("unable to map end of nonpaged pool as prototype PTE %p %p\n",
  740. PointerPde,
  741. PointerPte);
  742. }
  743. PointerPte = (PMMPTE)(((ULONG_PTR)NON_PAGED_SYSTEM_END -
  744. 0x37000 + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1));
  745. for (j = 0; j < 20; j += 1) {
  746. Pointer.u.Long = MiProtoAddressForPte (PointerPte);
  747. TempPte = Pointer;
  748. PointerPde = MiPteToProto(&TempPte);
  749. if (PointerPte != PointerPde) {
  750. DbgPrint("unable to map end of nonpaged pool as prototype PTE %p %p\n",
  751. PointerPde,
  752. PointerPte);
  753. }
  754. PointerPte += 1;
  755. }
  756. PointerPte = (PMMPTE)(((ULONG_PTR)MM_NONPAGED_POOL_END - 0x133448) & ~(ULONG_PTR)7);
  757. Pointer.u.Long = MiGetSubsectionAddressForPte (PointerPte);
  758. TempPte = Pointer;
  759. PointerPde = (PMMPTE)MiGetSubsectionAddress(&TempPte);
  760. if (PointerPte != PointerPde) {
  761. DbgPrint("unable to map end of nonpaged pool as section PTE %p %p\n",
  762. PointerPde,
  763. PointerPte);
  764. MiFormatPte(&TempPte);
  765. }
  766. //
  767. // End of sanity checks.
  768. //
  769. #endif
  770. if (MmEnforceWriteProtection) {
  771. MiPteStr[0] = (CHAR)1;
  772. }
  773. InitializeListHead (&MmLoadedUserImageList);
  774. InitializeListHead (&MmLockConflictList);
  775. InitializeListHead (&MmProtectedPteList);
  776. KeInitializeSpinLock (&MmProtectedPteLock);
  777. MmCriticalSectionTimeout.QuadPart = Int32x32To64(
  778. MmCritsectTimeoutSeconds,
  779. -10000000);
  780. //
  781. // Initialize System Address Space creation mutex.
  782. //
  783. ExInitializeFastMutex (&MmSectionCommitMutex);
  784. ExInitializeFastMutex (&MmSectionBasedMutex);
  785. ExInitializeFastMutex (&MmDynamicMemoryMutex);
  786. KeInitializeMutant (&MmSystemLoadLock, FALSE);
  787. KeInitializeEvent (&MmAvailablePagesEvent, NotificationEvent, TRUE);
  788. KeInitializeEvent (&MmAvailablePagesEventMedium, NotificationEvent, TRUE);
  789. KeInitializeEvent (&MmAvailablePagesEventHigh, NotificationEvent, TRUE);
  790. KeInitializeEvent (&MmMappedFileIoComplete, NotificationEvent, FALSE);
  791. KeInitializeEvent (&MmZeroingPageEvent, SynchronizationEvent, FALSE);
  792. KeInitializeEvent (&MmCollidedFlushEvent, NotificationEvent, FALSE);
  793. KeInitializeEvent (&MmCollidedLockEvent, NotificationEvent, FALSE);
  794. KeInitializeEvent (&MiMappedPagesTooOldEvent, NotificationEvent, FALSE);
  795. KeInitializeDpc (&MiModifiedPageWriterTimerDpc,
  796. MiModifiedPageWriterTimerDispatch,
  797. NULL);
  798. KeInitializeTimerEx (&MiModifiedPageWriterTimer, SynchronizationTimer);
  799. MiModifiedPageLife.QuadPart = Int32x32To64(
  800. MmModifiedPageLifeInSeconds,
  801. -10000000);
  802. InitializeListHead (&MmWorkingSetExpansionHead.ListHead);
  803. InitializeSListHead (&MmDeadStackSListHead);
  804. InitializeSListHead (&MmEventCountSListHead);
  805. InitializeSListHead (&MmInPageSupportSListHead);
  806. MmZeroingPageThreadActive = FALSE;
  807. MiMemoryLicense (LoaderBlock);
  808. //
  809. // include all memory types ...
  810. //
  811. for (i = 0; i < LoaderMaximum; i += 1) {
  812. IncludeType[i] = TRUE;
  813. }
  814. //
  815. // ... expect these..
  816. //
  817. IncludeType[LoaderBad] = FALSE;
  818. IncludeType[LoaderFirmwarePermanent] = FALSE;
  819. IncludeType[LoaderSpecialMemory] = FALSE;
  820. IncludeType[LoaderBBTMemory] = FALSE;
  821. //
  822. // Compute number of pages in the system.
  823. //
  824. NumberOfPages = MiPagesInLoaderBlock (LoaderBlock, IncludeType);
  825. #if defined (_MI_MORE_THAN_4GB_)
  826. Mm64BitPhysicalAddress = TRUE;
  827. #endif
  828. //
  829. // When safebooting, don't enable special pool, the verifier or any
  830. // other options that track corruption regardless of registry settings.
  831. //
  832. if (strstr(LoaderBlock->LoadOptions, SAFEBOOT_LOAD_OPTION_A)) {
  833. MmVerifyDriverBufferLength = (ULONG)-1;
  834. MiVerifyAllDrivers = 0;
  835. MmVerifyDriverLevel = 0;
  836. MmDontVerifyRandomDrivers = TRUE;
  837. MmSpecialPoolTag = (ULONG)-1;
  838. MmSnapUnloads = FALSE;
  839. MmProtectFreedNonPagedPool = FALSE;
  840. MmEnforceWriteProtection = 0;
  841. MmTrackLockedPages = FALSE;
  842. MmTrackPtes = 0;
  843. #if defined (_WIN64)
  844. MmSessionSpecialPoolEnd = NULL;
  845. MmSessionSpecialPoolStart = NULL;
  846. #endif
  847. SharedUserData->SafeBootMode = TRUE;
  848. }
  849. else {
  850. MiTriageSystem (LoaderBlock);
  851. }
  852. SystemPteMultiplier = 0;
  853. if (MmNumberOfSystemPtes == 0) {
  854. #if defined (_WIN64)
  855. //
  856. // 64-bit NT is not constrained by virtual address space. No
  857. // tradeoffs between nonpaged pool, paged pool and system PTEs
  858. // need to be made. So just allocate PTEs on a linear scale as
  859. // a function of the amount of RAM.
  860. //
  861. // For example on a Hydra NT64, 4gb of RAM gets 128gb of PTEs.
  862. // The page table cost is the inversion of the multiplier based
  863. // on the PTE_PER_PAGE.
  864. //
  865. if (ExpMultiUserTS == TRUE) {
  866. SystemPteMultiplier = 32;
  867. }
  868. else {
  869. SystemPteMultiplier = 16;
  870. }
  871. if (NumberOfPages < 0x8000) {
  872. SystemPteMultiplier >>= 1;
  873. }
  874. #else
  875. if (NumberOfPages < MM_MEDIUM_SYSTEM) {
  876. MmNumberOfSystemPtes = MM_MINIMUM_SYSTEM_PTES;
  877. }
  878. else {
  879. MmNumberOfSystemPtes = MM_DEFAULT_SYSTEM_PTES;
  880. if (NumberOfPages > 8192) {
  881. MmNumberOfSystemPtes += MmNumberOfSystemPtes;
  882. //
  883. // Any reasonable Hydra machine gets the maximum.
  884. //
  885. if (ExpMultiUserTS == TRUE) {
  886. MmNumberOfSystemPtes = MM_MAXIMUM_SYSTEM_PTES;
  887. }
  888. }
  889. }
  890. #endif
  891. }
  892. else if (MmNumberOfSystemPtes == (ULONG)-1) {
  893. //
  894. // This registry setting indicates the maximum number of
  895. // system PTEs possible for this machine must be allocated.
  896. // Snap this for later reference.
  897. //
  898. MiRequestedSystemPtes = MmNumberOfSystemPtes;
  899. #if defined (_WIN64)
  900. SystemPteMultiplier = 256;
  901. #else
  902. MmNumberOfSystemPtes = MM_MAXIMUM_SYSTEM_PTES;
  903. #endif
  904. }
  905. if (SystemPteMultiplier != 0) {
  906. if (NumberOfPages * SystemPteMultiplier > MM_MAXIMUM_SYSTEM_PTES) {
  907. MmNumberOfSystemPtes = MM_MAXIMUM_SYSTEM_PTES;
  908. }
  909. else {
  910. MmNumberOfSystemPtes = (ULONG)(NumberOfPages * SystemPteMultiplier);
  911. }
  912. }
  913. if (MmNumberOfSystemPtes > MM_MAXIMUM_SYSTEM_PTES) {
  914. MmNumberOfSystemPtes = MM_MAXIMUM_SYSTEM_PTES;
  915. }
  916. if (MmNumberOfSystemPtes < MM_MINIMUM_SYSTEM_PTES) {
  917. MmNumberOfSystemPtes = MM_MINIMUM_SYSTEM_PTES;
  918. }
  919. if (MmHeapSegmentReserve == 0) {
  920. MmHeapSegmentReserve = 1024 * 1024;
  921. }
  922. if (MmHeapSegmentCommit == 0) {
  923. MmHeapSegmentCommit = PAGE_SIZE * 2;
  924. }
  925. if (MmHeapDeCommitTotalFreeThreshold == 0) {
  926. MmHeapDeCommitTotalFreeThreshold = 64 * 1024;
  927. }
  928. if (MmHeapDeCommitFreeBlockThreshold == 0) {
  929. MmHeapDeCommitFreeBlockThreshold = PAGE_SIZE;
  930. }
  931. #ifndef NO_POOL_CHECKS
  932. MiInitializeSpecialPoolCriteria ();
  933. #endif
  934. //
  935. // If the registry indicates drivers are in the suspect list,
  936. // extra system PTEs need to be allocated to support special pool
  937. // for their allocations.
  938. //
  939. if ((MmVerifyDriverBufferLength != (ULONG)-1) ||
  940. ((MmSpecialPoolTag != 0) && (MmSpecialPoolTag != (ULONG)-1))) {
  941. MmNumberOfSystemPtes += MM_SPECIAL_POOL_PTES;
  942. }
  943. MmNumberOfSystemPtes += BBTPagesToReserve;
  944. #if defined(_X86_)
  945. //
  946. // The allocation preference key must be carefully managed. This is
  947. // because doing every allocation top-down can caused failures if
  948. // an ntdll process startup allocation (like the stack trace database)
  949. // gets a high address which then causes a subsequent system DLL rebase
  950. // collision.
  951. //
  952. // This is circumvented as follows:
  953. //
  954. // 1. For 32-bit machines, the allocation preference key is only
  955. // useful when booted /3GB as only then can this key help track
  956. // down apps with high virtual address bit sign extension problems.
  957. // In 3GB mode, the system DLLs are based just below 2GB so ntdll
  958. // would have to allocate more than 1GB of VA space before this
  959. // becomes a problem. So really the problem can only occur for
  960. // machines in 2GB mode and since the key doesn't help these
  961. // machines anyway, just turn it off in these cases.
  962. //
  963. // 2. For 64-bit machines, there is plenty of VA space above the
  964. // addresses system DLLs are based at so it is a non-issue.
  965. // EXCEPT for wow64 binaries which run in sandboxed 2GB address
  966. // spaces. Explicit checks are made to detect a wow64 process in
  967. // the Mm APIs which check this key and the key is ignored in
  968. // this case as it doesn't provide any sign extension help and
  969. // therefore we don't allow it to burn up any valuable VA space
  970. // which could cause a collision.
  971. //
  972. if (MmVirtualBias == 0) {
  973. MmAllocationPreference = 0;
  974. }
  975. #endif
  976. if (MmAllocationPreference != 0) {
  977. MmAllocationPreference = MEM_TOP_DOWN;
  978. }
  979. ExInitializeResourceLite (&MmSystemWsLock);
  980. MiInitializeDriverVerifierList (LoaderBlock);
  981. //
  982. // Set the initial commit page limit high enough so initial pool
  983. // allocations (which happen in the machine dependent init) can
  984. // succeed.
  985. //
  986. MmTotalCommitLimit = _2gb / PAGE_SIZE;
  987. MmTotalCommitLimitMaximum = MmTotalCommitLimit;
  988. //
  989. // Pick a reasonable size for the default prototype PTE allocation
  990. // chunk size. Make sure it's always a PAGE_SIZE multiple. The
  991. // registry entry is treated as the number of 1K chunks.
  992. //
  993. if (MmAllocationFragment == 0) {
  994. AutosizingFragment = TRUE;
  995. MmAllocationFragment = MM_ALLOCATION_FRAGMENT;
  996. #if !defined (_WIN64)
  997. if (NumberOfPages < 64 * 1024) {
  998. MmAllocationFragment = MM_ALLOCATION_FRAGMENT / 4;
  999. }
  1000. else if (NumberOfPages < 256 * 1024) {
  1001. MmAllocationFragment = MM_ALLOCATION_FRAGMENT / 2;
  1002. }
  1003. #endif
  1004. }
  1005. else {
  1006. //
  1007. // Convert the registry entry from 1K chunks into bytes.
  1008. // Then round it to a PAGE_SIZE multiple. Finally bound it
  1009. // reasonably.
  1010. //
  1011. AutosizingFragment = FALSE;
  1012. MmAllocationFragment *= 1024;
  1013. MmAllocationFragment = ROUND_TO_PAGES (MmAllocationFragment);
  1014. if (MmAllocationFragment > MM_ALLOCATION_FRAGMENT) {
  1015. MmAllocationFragment = MM_ALLOCATION_FRAGMENT;
  1016. }
  1017. else if (MmAllocationFragment < PAGE_SIZE) {
  1018. MmAllocationFragment = PAGE_SIZE;
  1019. }
  1020. }
  1021. MiInitializeIoTrackers ();
  1022. MiInitializeCacheOverrides ();
  1023. //
  1024. // Initialize the machine dependent portion of the hardware.
  1025. //
  1026. MiInitMachineDependent (LoaderBlock);
  1027. MmPhysicalMemoryBlock = MmInitializeMemoryLimits (LoaderBlock,
  1028. IncludeType,
  1029. NULL);
  1030. if (MmPhysicalMemoryBlock == NULL) {
  1031. KeBugCheckEx (INSTALL_MORE_MEMORY,
  1032. MmNumberOfPhysicalPages,
  1033. MmLowestPhysicalPage,
  1034. MmHighestPhysicalPage,
  1035. 0x100);
  1036. }
  1037. #if defined(_X86_)
  1038. MiReportPhysicalMemory ();
  1039. #endif
  1040. #if defined (_MI_MORE_THAN_4GB_)
  1041. if (MiNoLowMemory != 0) {
  1042. MiRemoveLowPages (0);
  1043. }
  1044. #endif
  1045. //
  1046. // Initialize listhead, spinlock and semaphore for
  1047. // segment dereferencing thread.
  1048. //
  1049. KeInitializeSpinLock (&MmDereferenceSegmentHeader.Lock);
  1050. InitializeListHead (&MmDereferenceSegmentHeader.ListHead);
  1051. KeInitializeSemaphore (&MmDereferenceSegmentHeader.Semaphore, 0, MAXLONG);
  1052. InitializeListHead (&MmUnusedSegmentList);
  1053. InitializeListHead (&MmUnusedSubsectionList);
  1054. KeInitializeEvent (&MmUnusedSegmentCleanup, NotificationEvent, FALSE);
  1055. MiInitializeCommitment ();
  1056. MiInitializePfnTracing ();
  1057. #if defined(_X86_)
  1058. //
  1059. // Virtual bias indicates the offset that needs to be added to
  1060. // 0x80000000 to get to the start of the loaded images. Update it
  1061. // now to indicate the offset to MmSessionBase as that is the lowest
  1062. // system address that process creation needs to make sure to duplicate.
  1063. //
  1064. // This is not done until after machine dependent initialization runs
  1065. // as that initialization relies on the original meaning of VirtualBias.
  1066. //
  1067. // Note if the system is booted with both /3GB & /USERVA, then system
  1068. // PTEs will be allocated below virtual 3GB and that will end up being
  1069. // the lowest system address the process creation needs to duplicate.
  1070. //
  1071. if (MmVirtualBias != 0) {
  1072. MmVirtualBias = (ULONG_PTR)MmSessionBase - CODE_START;
  1073. }
  1074. #endif
  1075. if (MmMirroring & MM_MIRRORING_ENABLED) {
  1076. #if defined (_WIN64)
  1077. //
  1078. // All page frame numbers must fit in 32 bits because the bitmap
  1079. // package is currently 32-bit.
  1080. //
  1081. // The bitmaps are deliberately not initialized as each mirroring
  1082. // must reinitialize them anyway.
  1083. //
  1084. if (MmHighestPossiblePhysicalPage + 1 < _4gb) {
  1085. #endif
  1086. MiCreateBitMap (&MiMirrorBitMap,
  1087. MmHighestPossiblePhysicalPage + 1,
  1088. NonPagedPool);
  1089. if (MiMirrorBitMap != NULL) {
  1090. MiCreateBitMap (&MiMirrorBitMap2,
  1091. MmHighestPossiblePhysicalPage + 1,
  1092. NonPagedPool);
  1093. if (MiMirrorBitMap2 == NULL) {
  1094. MiRemoveBitMap (&MiMirrorBitMap);
  1095. }
  1096. }
  1097. #if defined (_WIN64)
  1098. }
  1099. #endif
  1100. }
  1101. #if !defined (_WIN64)
  1102. if ((AutosizingFragment == TRUE) &&
  1103. (NumberOfPages >= 256 * 1024)) {
  1104. //
  1105. // This is a system with at least 1GB of RAM. Presumably it
  1106. // will be used to cache many files. Maybe we should factor in
  1107. // pool size here and adjust it accordingly.
  1108. //
  1109. MmAllocationFragment;
  1110. }
  1111. #endif
  1112. MiReloadBootLoadedDrivers (LoaderBlock);
  1113. #if defined (_MI_MORE_THAN_4GB_)
  1114. if (MiNoLowMemory != 0) {
  1115. MiRemoveLowPages (1);
  1116. }
  1117. #endif
  1118. MiInitializeVerifyingComponents (LoaderBlock);
  1119. //
  1120. // Setup the system size as small, medium, or large depending
  1121. // on memory available.
  1122. //
  1123. // For internal MM tuning, the following applies
  1124. //
  1125. // 12Mb is small
  1126. // 12-19 is medium
  1127. // > 19 is large
  1128. //
  1129. //
  1130. // For all other external tuning,
  1131. // < 19 is small
  1132. // 19 - 31 is medium for workstation
  1133. // 19 - 63 is medium for server
  1134. // >= 32 is large for workstation
  1135. // >= 64 is large for server
  1136. //
  1137. if (MmNumberOfPhysicalPages <= MM_SMALL_SYSTEM) {
  1138. MmSystemSize = MmSmallSystem;
  1139. MmMaximumDeadKernelStacks = 0;
  1140. MmModifiedPageMinimum = 40;
  1141. MmModifiedPageMaximum = 100;
  1142. MmDataClusterSize = 0;
  1143. MmCodeClusterSize = 1;
  1144. MmReadClusterSize = 2;
  1145. MmInPageSupportMinimum = 2;
  1146. }
  1147. else if (MmNumberOfPhysicalPages <= MM_MEDIUM_SYSTEM) {
  1148. MmSystemSize = MmSmallSystem;
  1149. MmMaximumDeadKernelStacks = 2;
  1150. MmModifiedPageMinimum = 80;
  1151. MmModifiedPageMaximum = 150;
  1152. MmSystemCacheWsMinimum += 100;
  1153. MmSystemCacheWsMaximum += 150;
  1154. MmDataClusterSize = 1;
  1155. MmCodeClusterSize = 2;
  1156. MmReadClusterSize = 4;
  1157. MmInPageSupportMinimum = 3;
  1158. }
  1159. else {
  1160. MmSystemSize = MmMediumSystem;
  1161. MmMaximumDeadKernelStacks = 5;
  1162. MmModifiedPageMinimum = 150;
  1163. MmModifiedPageMaximum = 300;
  1164. MmSystemCacheWsMinimum += 400;
  1165. MmSystemCacheWsMaximum += 800;
  1166. MmDataClusterSize = 3;
  1167. MmCodeClusterSize = 7;
  1168. MmReadClusterSize = 7;
  1169. MmInPageSupportMinimum = 4;
  1170. }
  1171. if (MmNumberOfPhysicalPages < ((24*1024*1024)/PAGE_SIZE)) {
  1172. MmSystemCacheWsMinimum = 32;
  1173. }
  1174. if (MmNumberOfPhysicalPages >= ((32*1024*1024)/PAGE_SIZE)) {
  1175. //
  1176. // If we are on a workstation, 32Mb and above are considered
  1177. // large systems.
  1178. //
  1179. if (MmProductType == 0x00690057) {
  1180. MmSystemSize = MmLargeSystem;
  1181. }
  1182. else {
  1183. //
  1184. // For servers, 64Mb and greater is a large system
  1185. //
  1186. if (MmNumberOfPhysicalPages >= ((64*1024*1024)/PAGE_SIZE)) {
  1187. MmSystemSize = MmLargeSystem;
  1188. }
  1189. }
  1190. }
  1191. if (MmNumberOfPhysicalPages > ((33*1024*1024)/PAGE_SIZE)) {
  1192. MmModifiedPageMinimum = 400;
  1193. MmModifiedPageMaximum = 800;
  1194. MmSystemCacheWsMinimum += 500;
  1195. MmSystemCacheWsMaximum += 900;
  1196. MmInPageSupportMinimum += 4;
  1197. }
  1198. if (MmNumberOfPhysicalPages > ((220*1024*1024)/PAGE_SIZE)) { // bump max cache size a bit more
  1199. if ( (LONG)MmSystemCacheWsMinimum < (LONG)((24*1024*1024) >> PAGE_SHIFT) &&
  1200. (LONG)MmSystemCacheWsMaximum < (LONG)((24*1024*1024) >> PAGE_SHIFT)){
  1201. MmSystemCacheWsMaximum = ((24*1024*1024) >> PAGE_SHIFT);
  1202. }
  1203. ASSERT ((LONG)MmSystemCacheWsMaximum > (LONG)MmSystemCacheWsMinimum);
  1204. }
  1205. else if (MmNumberOfPhysicalPages > ((110*1024*1024)/PAGE_SIZE)) { // bump max cache size a bit
  1206. if ( (LONG)MmSystemCacheWsMinimum < (LONG)((16*1024*1024) >> PAGE_SHIFT) &&
  1207. (LONG)MmSystemCacheWsMaximum < (LONG)((16*1024*1024) >> PAGE_SHIFT)){
  1208. MmSystemCacheWsMaximum = ((16*1024*1024) >> PAGE_SHIFT);
  1209. }
  1210. ASSERT ((LONG)MmSystemCacheWsMaximum > (LONG)MmSystemCacheWsMinimum);
  1211. }
  1212. if (NT_SUCCESS (MmIsVerifierEnabled (&VerifierFlags))) {
  1213. //
  1214. // The verifier is enabled so don't defer any MDL unlocks because
  1215. // without state, debugging driver bugs in this area is very
  1216. // difficult.
  1217. //
  1218. DeferredMdlEntries = 0;
  1219. }
  1220. else if (MmNumberOfPhysicalPages > ((255*1024*1024)/PAGE_SIZE)) {
  1221. DeferredMdlEntries = 32;
  1222. }
  1223. else if (MmNumberOfPhysicalPages > ((127*1024*1024)/PAGE_SIZE)) {
  1224. DeferredMdlEntries = 8;
  1225. }
  1226. else {
  1227. DeferredMdlEntries = 4;
  1228. }
  1229. #if defined(MI_MULTINODE)
  1230. for (i = 0; i < KeNumberNodes; i += 1) {
  1231. InitializeSListHead (&KeNodeBlock[i]->PfnDereferenceSListHead);
  1232. KeNodeBlock[i]->PfnDeferredList = NULL;
  1233. for (j = 0; j < DeferredMdlEntries; j += 1) {
  1234. SingleListEntry = ExAllocatePoolWithTag (NonPagedPool,
  1235. sizeof(MI_PFN_DEREFERENCE_CHUNK),
  1236. 'mDmM');
  1237. if (SingleListEntry != NULL) {
  1238. InterlockedPushEntrySList (&KeNodeBlock[i]->PfnDereferenceSListHead,
  1239. SingleListEntry);
  1240. }
  1241. }
  1242. }
  1243. #else
  1244. InitializeSListHead (&MmPfnDereferenceSListHead);
  1245. for (j = 0; j < DeferredMdlEntries; j += 1) {
  1246. SingleListEntry = ExAllocatePoolWithTag (NonPagedPool,
  1247. sizeof(MI_PFN_DEREFERENCE_CHUNK),
  1248. 'mDmM');
  1249. if (SingleListEntry != NULL) {
  1250. InterlockedPushEntrySList (&MmPfnDereferenceSListHead,
  1251. SingleListEntry);
  1252. }
  1253. }
  1254. #endif
  1255. ASSERT (SharedUserData->NumberOfPhysicalPages == 0);
  1256. SharedUserData->NumberOfPhysicalPages = (ULONG) MmNumberOfPhysicalPages;
  1257. //
  1258. // Determine if we are on an AS system (Winnt is not AS).
  1259. //
  1260. if (MmProductType == 0x00690057) {
  1261. SharedUserData->NtProductType = NtProductWinNt;
  1262. MmProductType = 0;
  1263. MmThrottleTop = 250;
  1264. MmThrottleBottom = 30;
  1265. }
  1266. else {
  1267. if (MmProductType == 0x0061004c) {
  1268. SharedUserData->NtProductType = NtProductLanManNt;
  1269. }
  1270. else {
  1271. SharedUserData->NtProductType = NtProductServer;
  1272. }
  1273. MmProductType = 1;
  1274. MmThrottleTop = 450;
  1275. MmThrottleBottom = 80;
  1276. MmMinimumFreePages = 81;
  1277. MmInPageSupportMinimum += 8;
  1278. }
  1279. MiAdjustWorkingSetManagerParameters ((LOGICAL)(MmProductType == 0 ? TRUE : FALSE));
  1280. //
  1281. // Set the ResidentAvailablePages to the number of available
  1282. // pages minus the fluid value.
  1283. //
  1284. MmResidentAvailablePages = MmAvailablePages - MM_FLUID_PHYSICAL_PAGES;
  1285. //
  1286. // Subtract off the size of future nonpaged pool expansion
  1287. // so that nonpaged pool will always be able to expand regardless of
  1288. // prior system load activity.
  1289. //
  1290. MmResidentAvailablePages -= MiExpansionPoolPagesInitialCharge;
  1291. //
  1292. // Subtract off the size of the system cache working set.
  1293. //
  1294. MmResidentAvailablePages -= MmSystemCacheWsMinimum;
  1295. MmResidentAvailableAtInit = MmResidentAvailablePages;
  1296. if (MmResidentAvailablePages < 0) {
  1297. #if DBG
  1298. DbgPrint("system cache working set too big\n");
  1299. #endif
  1300. return FALSE;
  1301. }
  1302. //
  1303. // Initialize spin lock for allowing working set expansion.
  1304. //
  1305. KeInitializeSpinLock (&MmExpansionLock);
  1306. ExInitializeFastMutex (&MmPageFileCreationLock);
  1307. //
  1308. // Initialize resource for extending sections.
  1309. //
  1310. ExInitializeResourceLite (&MmSectionExtendResource);
  1311. ExInitializeResourceLite (&MmSectionExtendSetResource);
  1312. //
  1313. // Build the system cache structures.
  1314. //
  1315. StartPde = MiGetPdeAddress (MmSystemCacheWorkingSetList);
  1316. PointerPte = MiGetPteAddress (MmSystemCacheWorkingSetList);
  1317. #if (_MI_PAGING_LEVELS >= 3)
  1318. TempPte = ValidKernelPte;
  1319. #if (_MI_PAGING_LEVELS >= 4)
  1320. StartPxe = MiGetPdeAddress(StartPde);
  1321. if (StartPxe->u.Hard.Valid == 0) {
  1322. //
  1323. // Map in a page directory parent page for the system cache working
  1324. // set. Note that we only populate one page table for this.
  1325. //
  1326. DirectoryFrameIndex = MiRemoveAnyPage(
  1327. MI_GET_PAGE_COLOR_FROM_PTE (StartPxe));
  1328. TempPte.u.Hard.PageFrameNumber = DirectoryFrameIndex;
  1329. *StartPxe = TempPte;
  1330. MiInitializePfn (DirectoryFrameIndex, StartPxe, 1);
  1331. MiFillMemoryPte (MiGetVirtualAddressMappedByPte(StartPxe),
  1332. PAGE_SIZE,
  1333. ZeroKernelPte.u.Long);
  1334. }
  1335. #endif
  1336. StartPpe = MiGetPteAddress(StartPde);
  1337. if (StartPpe->u.Hard.Valid == 0) {
  1338. //
  1339. // Map in a page directory page for the system cache working set.
  1340. // Note that we only populate one page table for this.
  1341. //
  1342. DirectoryFrameIndex = MiRemoveAnyPage(
  1343. MI_GET_PAGE_COLOR_FROM_PTE (StartPpe));
  1344. TempPte.u.Hard.PageFrameNumber = DirectoryFrameIndex;
  1345. *StartPpe = TempPte;
  1346. MiInitializePfn (DirectoryFrameIndex, StartPpe, 1);
  1347. MiFillMemoryPte (MiGetVirtualAddressMappedByPte(StartPpe),
  1348. PAGE_SIZE,
  1349. ZeroKernelPte.u.Long);
  1350. }
  1351. #if (_MI_PAGING_LEVELS >= 4)
  1352. //
  1353. // The shared user data is already initialized and it shares the
  1354. // page table page with the system cache working set list.
  1355. //
  1356. ASSERT (StartPde->u.Hard.Valid == 1);
  1357. #else
  1358. //
  1359. // Map in a page table page.
  1360. //
  1361. ASSERT (StartPde->u.Hard.Valid == 0);
  1362. PageFrameIndex = MiRemoveAnyPage(
  1363. MI_GET_PAGE_COLOR_FROM_PTE (StartPde));
  1364. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1365. MI_WRITE_VALID_PTE (StartPde, TempPte);
  1366. MiInitializePfn (PageFrameIndex, StartPde, 1);
  1367. MiFillMemoryPte (MiGetVirtualAddressMappedByPte (StartPde),
  1368. PAGE_SIZE,
  1369. ZeroKernelPte.u.Long);
  1370. #endif
  1371. StartPpe = MiGetPpeAddress(MmSystemCacheStart);
  1372. StartPde = MiGetPdeAddress(MmSystemCacheStart);
  1373. PointerPte = MiGetVirtualAddressMappedByPte (StartPde);
  1374. #else
  1375. #if !defined(_X86PAE_)
  1376. ASSERT ((StartPde + 1) == MiGetPdeAddress (MmSystemCacheStart));
  1377. #endif
  1378. #endif
  1379. MaximumSystemCacheSizeTotal = MaximumSystemCacheSize;
  1380. #if defined(_X86_)
  1381. MaximumSystemCacheSizeTotal += MiMaximumSystemCacheSizeExtra;
  1382. #endif
  1383. //
  1384. // Size the system cache based on the amount of physical memory.
  1385. //
  1386. i = (MmNumberOfPhysicalPages + 65) / 1024;
  1387. if (i >= 4) {
  1388. //
  1389. // System has at least 4032 pages. Make the system
  1390. // cache 128mb + 64mb for each additional 1024 pages.
  1391. //
  1392. MmSizeOfSystemCacheInPages = (PFN_COUNT)(
  1393. ((128*1024*1024) >> PAGE_SHIFT) +
  1394. ((i - 4) * ((64*1024*1024) >> PAGE_SHIFT)));
  1395. if (MmSizeOfSystemCacheInPages > MaximumSystemCacheSizeTotal) {
  1396. MmSizeOfSystemCacheInPages = MaximumSystemCacheSizeTotal;
  1397. }
  1398. }
  1399. MmSystemCacheEnd = (PVOID)(((PCHAR)MmSystemCacheStart +
  1400. MmSizeOfSystemCacheInPages * PAGE_SIZE) - 1);
  1401. #if defined(_X86_)
  1402. if (MmSizeOfSystemCacheInPages > MaximumSystemCacheSize) {
  1403. ASSERT (MiMaximumSystemCacheSizeExtra != 0);
  1404. MmSystemCacheEnd = (PVOID)(((PCHAR)MmSystemCacheStart +
  1405. MaximumSystemCacheSize * PAGE_SIZE) - 1);
  1406. MiSystemCacheStartExtra = (PVOID)MiExtraResourceStart;
  1407. MiSystemCacheEndExtra = (PVOID)(((PCHAR)MiSystemCacheStartExtra +
  1408. (MmSizeOfSystemCacheInPages - MaximumSystemCacheSize) * PAGE_SIZE) - 1);
  1409. }
  1410. else {
  1411. MiSystemCacheStartExtra = MmSystemCacheStart;
  1412. MiSystemCacheEndExtra = MmSystemCacheEnd;
  1413. }
  1414. #endif
  1415. EndPde = MiGetPdeAddress(MmSystemCacheEnd);
  1416. TempPte = ValidKernelPte;
  1417. #if (_MI_PAGING_LEVELS >= 4)
  1418. StartPxe = MiGetPxeAddress(MmSystemCacheStart);
  1419. if (StartPxe->u.Hard.Valid == 0) {
  1420. FirstPxe = TRUE;
  1421. FirstPpe = TRUE;
  1422. }
  1423. else {
  1424. FirstPxe = FALSE;
  1425. FirstPpe = (StartPpe->u.Hard.Valid == 0) ? TRUE : FALSE;
  1426. }
  1427. #elif (_MI_PAGING_LEVELS >= 3)
  1428. FirstPpe = (StartPpe->u.Hard.Valid == 0) ? TRUE : FALSE;
  1429. #else
  1430. DirectoryFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (MiGetPteAddress(PDE_BASE));
  1431. #endif
  1432. LOCK_PFN (OldIrql);
  1433. while (StartPde <= EndPde) {
  1434. #if (_MI_PAGING_LEVELS >= 4)
  1435. if (FirstPxe == TRUE || MiIsPteOnPpeBoundary(StartPde)) {
  1436. FirstPxe = FALSE;
  1437. StartPxe = MiGetPdeAddress(StartPde);
  1438. //
  1439. // Map in a page directory page.
  1440. //
  1441. DirectoryFrameIndex = MiRemoveAnyPage(
  1442. MI_GET_PAGE_COLOR_FROM_PTE (StartPxe));
  1443. TempPte.u.Hard.PageFrameNumber = DirectoryFrameIndex;
  1444. MI_WRITE_VALID_PTE (StartPxe, TempPte);
  1445. MiInitializePfn (DirectoryFrameIndex,
  1446. StartPxe,
  1447. 1);
  1448. MiFillMemoryPte (MiGetVirtualAddressMappedByPte(StartPxe),
  1449. PAGE_SIZE,
  1450. ZeroKernelPte.u.Long);
  1451. }
  1452. #endif
  1453. #if (_MI_PAGING_LEVELS >= 3)
  1454. if (FirstPpe == TRUE || MiIsPteOnPdeBoundary(StartPde)) {
  1455. FirstPpe = FALSE;
  1456. StartPpe = MiGetPteAddress(StartPde);
  1457. //
  1458. // Map in a page directory page.
  1459. //
  1460. DirectoryFrameIndex = MiRemoveAnyPage(
  1461. MI_GET_PAGE_COLOR_FROM_PTE (StartPpe));
  1462. TempPte.u.Hard.PageFrameNumber = DirectoryFrameIndex;
  1463. MI_WRITE_VALID_PTE (StartPpe, TempPte);
  1464. MiInitializePfn (DirectoryFrameIndex,
  1465. StartPpe,
  1466. 1);
  1467. MiFillMemoryPte (MiGetVirtualAddressMappedByPte(StartPpe),
  1468. PAGE_SIZE,
  1469. ZeroKernelPte.u.Long);
  1470. }
  1471. #endif
  1472. ASSERT (StartPde->u.Hard.Valid == 0);
  1473. //
  1474. // Map in a page table page.
  1475. //
  1476. PageFrameIndex = MiRemoveAnyPage(
  1477. MI_GET_PAGE_COLOR_FROM_PTE (StartPde));
  1478. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1479. MI_WRITE_VALID_PTE (StartPde, TempPte);
  1480. MiInitializePfn (PageFrameIndex, StartPde, 1);
  1481. MiFillMemoryPte (MiGetVirtualAddressMappedByPte(StartPde),
  1482. PAGE_SIZE,
  1483. ZeroKernelPte.u.Long);
  1484. StartPde += 1;
  1485. }
  1486. UNLOCK_PFN (OldIrql);
  1487. //
  1488. // Initialize the system cache. Only set the large system cache if
  1489. // we have a large amount of physical memory.
  1490. //
  1491. if (MmLargeSystemCache != 0 && MmNumberOfPhysicalPages > 0x7FF0) {
  1492. if ((MmAvailablePages >
  1493. MmSystemCacheWsMaximum + ((64*1024*1024) >> PAGE_SHIFT))) {
  1494. MmSystemCacheWsMaximum =
  1495. MmAvailablePages - ((32*1024*1024) >> PAGE_SHIFT);
  1496. ASSERT ((LONG)MmSystemCacheWsMaximum > (LONG)MmSystemCacheWsMinimum);
  1497. }
  1498. }
  1499. if (MmSystemCacheWsMaximum > (MM_MAXIMUM_WORKING_SET - 5)) {
  1500. MmSystemCacheWsMaximum = MM_MAXIMUM_WORKING_SET - 5;
  1501. }
  1502. if (MmSystemCacheWsMaximum > MmSizeOfSystemCacheInPages) {
  1503. MmSystemCacheWsMaximum = MmSizeOfSystemCacheInPages;
  1504. if ((MmSystemCacheWsMinimum + 500) > MmSystemCacheWsMaximum) {
  1505. MmSystemCacheWsMinimum = MmSystemCacheWsMaximum - 500;
  1506. }
  1507. }
  1508. MiInitializeSystemCache ((ULONG)MmSystemCacheWsMinimum,
  1509. (ULONG)MmSystemCacheWsMaximum);
  1510. MmAttemptForCantExtend.Segment = NULL;
  1511. MmAttemptForCantExtend.RequestedExpansionSize = 1;
  1512. MmAttemptForCantExtend.ActualExpansion = 0;
  1513. MmAttemptForCantExtend.InProgress = FALSE;
  1514. MmAttemptForCantExtend.PageFileNumber = MI_EXTEND_ANY_PAGEFILE;
  1515. KeInitializeEvent (&MmAttemptForCantExtend.Event,
  1516. NotificationEvent,
  1517. FALSE);
  1518. //
  1519. // Now that we have booted far enough, replace the temporary
  1520. // commit limits with real ones: set the initial commit page
  1521. // limit to the number of available pages. This value is
  1522. // updated as paging files are created.
  1523. //
  1524. MmTotalCommitLimit = MmAvailablePages;
  1525. if (MmTotalCommitLimit > 1024) {
  1526. MmTotalCommitLimit -= 1024;
  1527. }
  1528. MmTotalCommitLimitMaximum = MmTotalCommitLimit;
  1529. //
  1530. // Set maximum working set size to 512 pages less than the
  1531. // total available memory.
  1532. //
  1533. MmMaximumWorkingSetSize = (WSLE_NUMBER)(MmAvailablePages - 512);
  1534. if (MmMaximumWorkingSetSize > (MM_MAXIMUM_WORKING_SET - 5)) {
  1535. MmMaximumWorkingSetSize = MM_MAXIMUM_WORKING_SET - 5;
  1536. }
  1537. //
  1538. // Create the modified page writer event.
  1539. //
  1540. KeInitializeEvent (&MmModifiedPageWriterEvent, NotificationEvent, FALSE);
  1541. //
  1542. // Build paged pool.
  1543. //
  1544. MiBuildPagedPool ();
  1545. //
  1546. // Initialize the loaded module list. This cannot be done until
  1547. // paged pool has been built.
  1548. //
  1549. if (MiInitializeLoadedModuleList (LoaderBlock) == FALSE) {
  1550. #if DBG
  1551. DbgPrint("Loaded module list initialization failed\n");
  1552. #endif
  1553. return FALSE;
  1554. }
  1555. //
  1556. // Initialize the unused segment threshold. Attempt to keep pool usage
  1557. // below this percentage (by trimming the cache) if pool requests
  1558. // can fail.
  1559. //
  1560. if (MmConsumedPoolPercentage == 0) {
  1561. MmConsumedPoolPercentage = 80;
  1562. }
  1563. else if (MmConsumedPoolPercentage < 5) {
  1564. MmConsumedPoolPercentage = 5;
  1565. }
  1566. else if (MmConsumedPoolPercentage > 100) {
  1567. MmConsumedPoolPercentage = 100;
  1568. }
  1569. //
  1570. // Add more system PTEs if this is a large memory system.
  1571. // Note that 64 bit systems can determine the right value at the
  1572. // beginning since there is no virtual address space crunch.
  1573. //
  1574. #if !defined (_WIN64)
  1575. if (MmNumberOfPhysicalPages > ((127*1024*1024) >> PAGE_SHIFT)) {
  1576. PMMPTE StartingPte;
  1577. PointerPde = MiGetPdeAddress ((PCHAR)MmPagedPoolEnd + 1);
  1578. StartingPte = MiGetPteAddress ((PCHAR)MmPagedPoolEnd + 1);
  1579. j = 0;
  1580. TempPte = ValidKernelPde;
  1581. LOCK_PFN (OldIrql);
  1582. while (PointerPde->u.Hard.Valid == 0) {
  1583. MiChargeCommitmentCantExpand (1, TRUE);
  1584. MM_TRACK_COMMIT (MM_DBG_COMMIT_EXTRA_SYSTEM_PTES, 1);
  1585. PageFrameIndex = MiRemoveZeroPage (
  1586. MI_GET_PAGE_COLOR_FROM_PTE (PointerPde));
  1587. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1588. MI_WRITE_VALID_PTE (PointerPde, TempPte);
  1589. MiInitializePfn (PageFrameIndex, PointerPde, 1);
  1590. PointerPde += 1;
  1591. StartingPte += PAGE_SIZE / sizeof(MMPTE);
  1592. j += PAGE_SIZE / sizeof(MMPTE);
  1593. }
  1594. UNLOCK_PFN (OldIrql);
  1595. if (j != 0) {
  1596. StartingPte = MiGetPteAddress ((PCHAR)MmPagedPoolEnd + 1);
  1597. MmNonPagedSystemStart = MiGetVirtualAddressMappedByPte (StartingPte);
  1598. MmNumberOfSystemPtes += j;
  1599. MiAddSystemPtes (StartingPte, j, SystemPteSpace);
  1600. MiIncrementSystemPtes (j);
  1601. }
  1602. }
  1603. #endif
  1604. #if defined (_MI_DEBUG_SUB)
  1605. if (MiTrackSubs != 0) {
  1606. MiSubsectionTraces = ExAllocatePoolWithTag (NonPagedPool,
  1607. MiTrackSubs * sizeof (MI_SUB_TRACES),
  1608. 'tCmM');
  1609. }
  1610. #endif
  1611. #if defined (_MI_DEBUG_DIRTY)
  1612. if (MiTrackDirtys != 0) {
  1613. MiDirtyTraces = ExAllocatePoolWithTag (NonPagedPool,
  1614. MiTrackDirtys * sizeof (MI_DIRTY_TRACES),
  1615. 'tCmM');
  1616. }
  1617. #endif
  1618. #if defined (_MI_DEBUG_DATA)
  1619. if (MiTrackData != 0) {
  1620. MiDataTraces = ExAllocatePoolWithTag (NonPagedPool,
  1621. MiTrackData * sizeof (MI_DATA_TRACES),
  1622. 'tCmM');
  1623. }
  1624. #endif
  1625. #if DBG
  1626. if (MmDebug & MM_DBG_DUMP_BOOT_PTES) {
  1627. MiDumpValidAddresses ();
  1628. MiDumpPfn ();
  1629. }
  1630. #endif
  1631. MmPageFaultNotifyRoutine = NULL;
  1632. #ifdef _MI_MESSAGE_SERVER
  1633. MiInitializeMessageQueue ();
  1634. #endif
  1635. return TRUE;
  1636. }
  1637. if (Phase == 1) {
  1638. #if DBG
  1639. MmDebug |= MM_DBG_CHECK_PFN_LOCK;
  1640. #endif
  1641. #if defined(_X86_) || defined(_AMD64_)
  1642. MiInitMachineDependent (LoaderBlock);
  1643. #endif
  1644. MiMapBBTMemory(LoaderBlock);
  1645. if (!MiSectionInitialization ()) {
  1646. return FALSE;
  1647. }
  1648. Process = PsGetCurrentProcess ();
  1649. if (Process->PhysicalVadList.Flink == NULL) {
  1650. InitializeListHead (&Process->PhysicalVadList);
  1651. }
  1652. #if defined(MM_SHARED_USER_DATA_VA)
  1653. //
  1654. // Create double mapped page between kernel and user mode.
  1655. // The PTE is deliberately allocated from paged pool so that
  1656. // it will always have a PTE itself instead of being superpaged.
  1657. // This way, checks throughout the fault handler can assume that
  1658. // the PTE can be checked without having to special case this.
  1659. //
  1660. MmSharedUserDataPte = ExAllocatePoolWithTag (PagedPool,
  1661. sizeof(MMPTE),
  1662. ' mM');
  1663. if (MmSharedUserDataPte == NULL) {
  1664. return FALSE;
  1665. }
  1666. PointerPte = MiGetPteAddress ((PVOID)KI_USER_SHARED_DATA);
  1667. ASSERT (PointerPte->u.Hard.Valid == 1);
  1668. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  1669. MI_MAKE_VALID_PTE (TempPte,
  1670. PageFrameIndex,
  1671. MM_READONLY,
  1672. PointerPte);
  1673. *MmSharedUserDataPte = TempPte;
  1674. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1675. LOCK_PFN (OldIrql);
  1676. Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  1677. UNLOCK_PFN (OldIrql);
  1678. #ifdef _X86_
  1679. if (MmHighestUserAddress < (PVOID) MM_SHARED_USER_DATA_VA) {
  1680. //
  1681. // Install the PTE mapping now as faults will not because the
  1682. // shared user data is in the system portion of the address space.
  1683. // Note the pagetable page has already been allocated and locked
  1684. // down.
  1685. //
  1686. //
  1687. // Make the mapping user accessible.
  1688. //
  1689. ASSERT (MmSharedUserDataPte->u.Hard.Owner == 0);
  1690. MmSharedUserDataPte->u.Hard.Owner = 1;
  1691. PointerPde = MiGetPdeAddress (MM_SHARED_USER_DATA_VA);
  1692. ASSERT (PointerPde->u.Hard.Owner == 0);
  1693. PointerPde->u.Hard.Owner = 1;
  1694. ASSERT (MiUseMaximumSystemSpace != 0);
  1695. PointerPte = MiGetPteAddress (MM_SHARED_USER_DATA_VA);
  1696. ASSERT (PointerPte->u.Hard.Valid == 0);
  1697. MI_WRITE_VALID_PTE (PointerPte, *MmSharedUserDataPte);
  1698. }
  1699. #endif
  1700. #endif
  1701. MiSessionWideInitializeAddresses ();
  1702. MiInitializeSessionWsSupport ();
  1703. MiInitializeSessionIds ();
  1704. //
  1705. // Start the modified page writer.
  1706. //
  1707. InitializeObjectAttributes (&ObjectAttributes, NULL, 0, NULL, NULL);
  1708. if (!NT_SUCCESS(PsCreateSystemThread(
  1709. &ThreadHandle,
  1710. THREAD_ALL_ACCESS,
  1711. &ObjectAttributes,
  1712. 0L,
  1713. NULL,
  1714. MiModifiedPageWriter,
  1715. NULL
  1716. ))) {
  1717. return FALSE;
  1718. }
  1719. ZwClose (ThreadHandle);
  1720. //
  1721. // Initialize the low and high memory events. This must be done
  1722. // before starting the working set manager.
  1723. //
  1724. if (MiInitializeMemoryEvents () == FALSE) {
  1725. return FALSE;
  1726. }
  1727. //
  1728. // Start the balance set manager.
  1729. //
  1730. // The balance set manager performs stack swapping and working
  1731. // set management and requires two threads.
  1732. //
  1733. KeInitializeEvent (&MmWorkingSetManagerEvent,
  1734. SynchronizationEvent,
  1735. FALSE);
  1736. InitializeObjectAttributes (&ObjectAttributes, NULL, 0, NULL, NULL);
  1737. if (!NT_SUCCESS(PsCreateSystemThread(
  1738. &ThreadHandle,
  1739. THREAD_ALL_ACCESS,
  1740. &ObjectAttributes,
  1741. 0L,
  1742. NULL,
  1743. KeBalanceSetManager,
  1744. NULL
  1745. ))) {
  1746. return FALSE;
  1747. }
  1748. ZwClose (ThreadHandle);
  1749. if (!NT_SUCCESS(PsCreateSystemThread(
  1750. &ThreadHandle,
  1751. THREAD_ALL_ACCESS,
  1752. &ObjectAttributes,
  1753. 0L,
  1754. NULL,
  1755. KeSwapProcessOrStack,
  1756. NULL
  1757. ))) {
  1758. return FALSE;
  1759. }
  1760. ZwClose (ThreadHandle);
  1761. #ifndef NO_POOL_CHECKS
  1762. MiInitializeSpecialPoolCriteria ();
  1763. #endif
  1764. #if defined(_X86_)
  1765. MiEnableKernelVerifier ();
  1766. #endif
  1767. ExAcquireResourceExclusiveLite (&PsLoadedModuleResource, TRUE);
  1768. NextEntry = PsLoadedModuleList.Flink;
  1769. for ( ; NextEntry != &PsLoadedModuleList; NextEntry = NextEntry->Flink) {
  1770. DataTableEntry = CONTAINING_RECORD(NextEntry,
  1771. KLDR_DATA_TABLE_ENTRY,
  1772. InLoadOrderLinks);
  1773. NtHeaders = RtlImageNtHeader(DataTableEntry->DllBase);
  1774. if ((NtHeaders->OptionalHeader.MajorOperatingSystemVersion >= 5) &&
  1775. (NtHeaders->OptionalHeader.MajorImageVersion >= 5)) {
  1776. DataTableEntry->Flags |= LDRP_ENTRY_NATIVE;
  1777. }
  1778. MiWriteProtectSystemImage (DataTableEntry->DllBase);
  1779. }
  1780. ExReleaseResourceLite (&PsLoadedModuleResource);
  1781. InterlockedDecrement (&MiTrimInProgressCount);
  1782. return TRUE;
  1783. }
  1784. if (Phase == 2) {
  1785. MiEnablePagingTheExecutive();
  1786. return TRUE;
  1787. }
  1788. return FALSE;
  1789. }
  1790. VOID
  1791. MiMapBBTMemory (
  1792. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  1793. )
  1794. /*++
  1795. Routine Description:
  1796. This function walks through the loader block's memory descriptor list
  1797. and maps memory reserved for the BBT buffer into the system.
  1798. The mapped PTEs are PDE-aligned and made user accessible.
  1799. Arguments:
  1800. LoaderBlock - Supplies a pointer to the system loader block.
  1801. Return Value:
  1802. None.
  1803. Environment:
  1804. Kernel Mode Only. System initialization.
  1805. --*/
  1806. {
  1807. PVOID Va;
  1808. PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
  1809. PLIST_ENTRY NextMd;
  1810. PFN_NUMBER NumberOfPagesMapped;
  1811. PFN_NUMBER NumberOfPages;
  1812. PFN_NUMBER PageFrameIndex;
  1813. PMMPTE PointerPte;
  1814. PMMPTE PointerPde;
  1815. PMMPTE LastPde;
  1816. MMPTE TempPte;
  1817. if (BBTPagesToReserve <= 0) {
  1818. return;
  1819. }
  1820. //
  1821. // Request enough PTEs such that protection can be applied to the PDEs.
  1822. //
  1823. NumberOfPages = (BBTPagesToReserve + (PTE_PER_PAGE - 1)) & ~(PTE_PER_PAGE - 1);
  1824. PointerPte = MiReserveAlignedSystemPtes ((ULONG)NumberOfPages,
  1825. SystemPteSpace,
  1826. MM_VA_MAPPED_BY_PDE);
  1827. if (PointerPte == NULL) {
  1828. BBTPagesToReserve = 0;
  1829. return;
  1830. }
  1831. //
  1832. // Allow user access to the buffer.
  1833. //
  1834. PointerPde = MiGetPteAddress (PointerPte);
  1835. LastPde = MiGetPteAddress (PointerPte + NumberOfPages);
  1836. ASSERT (LastPde != PointerPde);
  1837. do {
  1838. TempPte = *PointerPde;
  1839. TempPte.u.Long |= MM_PTE_OWNER_MASK;
  1840. MI_WRITE_VALID_PTE (PointerPde, TempPte);
  1841. PointerPde += 1;
  1842. } while (PointerPde < LastPde);
  1843. KeFlushEntireTb (TRUE, TRUE);
  1844. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  1845. TempPte = ValidUserPte;
  1846. NumberOfPagesMapped = 0;
  1847. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  1848. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  1849. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  1850. MEMORY_ALLOCATION_DESCRIPTOR,
  1851. ListEntry);
  1852. if (MemoryDescriptor->MemoryType == LoaderBBTMemory) {
  1853. PageFrameIndex = MemoryDescriptor->BasePage;
  1854. NumberOfPages = MemoryDescriptor->PageCount;
  1855. if (NumberOfPagesMapped + NumberOfPages > BBTPagesToReserve) {
  1856. NumberOfPages = BBTPagesToReserve - NumberOfPagesMapped;
  1857. }
  1858. NumberOfPagesMapped += NumberOfPages;
  1859. do {
  1860. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1861. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1862. PointerPte += 1;
  1863. PageFrameIndex += 1;
  1864. NumberOfPages -= 1;
  1865. } while (NumberOfPages);
  1866. if (NumberOfPagesMapped == BBTPagesToReserve) {
  1867. break;
  1868. }
  1869. }
  1870. NextMd = MemoryDescriptor->ListEntry.Flink;
  1871. }
  1872. RtlZeroMemory(Va, BBTPagesToReserve << PAGE_SHIFT);
  1873. //
  1874. // Tell BBT_Init how many pages were allocated.
  1875. //
  1876. if (NumberOfPagesMapped < BBTPagesToReserve) {
  1877. BBTPagesToReserve = (ULONG)NumberOfPagesMapped;
  1878. }
  1879. *(PULONG)Va = BBTPagesToReserve;
  1880. //
  1881. // At this point instrumentation code will detect the existence of
  1882. // buffer and initialize the structures.
  1883. //
  1884. BBTBuffer = Va;
  1885. PERFINFO_MMINIT_START();
  1886. }
  1887. PPHYSICAL_MEMORY_DESCRIPTOR
  1888. MmInitializeMemoryLimits (
  1889. IN PLOADER_PARAMETER_BLOCK LoaderBlock,
  1890. IN PBOOLEAN IncludeType,
  1891. IN OUT PPHYSICAL_MEMORY_DESCRIPTOR InputMemory OPTIONAL
  1892. )
  1893. /*++
  1894. Routine Description:
  1895. This function walks through the loader block's memory
  1896. descriptor list and builds a list of contiguous physical
  1897. memory blocks of the desired types.
  1898. Arguments:
  1899. LoaderBlock - Supplies a pointer the system loader block.
  1900. IncludeType - Array of BOOLEANS of size LoaderMaximum.
  1901. TRUE means include this type of memory in return.
  1902. Memory - If non-NULL, supplies the physical memory blocks to place the
  1903. search results in. If NULL, pool is allocated to hold the
  1904. returned search results in - the caller must free this pool.
  1905. Return Value:
  1906. A pointer to the physical memory blocks for the requested search or NULL
  1907. on failure.
  1908. Environment:
  1909. Kernel Mode Only. System initialization.
  1910. --*/
  1911. {
  1912. PLIST_ENTRY NextMd;
  1913. ULONG i;
  1914. ULONG InitialAllocation;
  1915. PFN_NUMBER NextPage;
  1916. PFN_NUMBER TotalPages;
  1917. PPHYSICAL_MEMORY_DESCRIPTOR Memory;
  1918. PPHYSICAL_MEMORY_DESCRIPTOR Memory2;
  1919. PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
  1920. InitialAllocation = 0;
  1921. if (ARGUMENT_PRESENT (InputMemory)) {
  1922. Memory = InputMemory;
  1923. }
  1924. else {
  1925. //
  1926. // The caller wants us to allocate the return result buffer. Size it
  1927. // by allocating the maximum possibly needed as this should not be
  1928. // very big (relatively). It is the caller's responsibility to free
  1929. // this. Obviously this option can only be requested after pool has
  1930. // been initialized.
  1931. //
  1932. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  1933. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  1934. InitialAllocation += 1;
  1935. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  1936. MEMORY_ALLOCATION_DESCRIPTOR,
  1937. ListEntry);
  1938. NextMd = MemoryDescriptor->ListEntry.Flink;
  1939. }
  1940. Memory = ExAllocatePoolWithTag (NonPagedPool,
  1941. sizeof(PHYSICAL_MEMORY_DESCRIPTOR) + sizeof(PHYSICAL_MEMORY_RUN) * (InitialAllocation - 1),
  1942. 'lMmM');
  1943. if (Memory == NULL) {
  1944. return NULL;
  1945. }
  1946. Memory->NumberOfRuns = InitialAllocation;
  1947. }
  1948. //
  1949. // Walk through the memory descriptors and build the physical memory list.
  1950. //
  1951. i = 0;
  1952. TotalPages = 0;
  1953. NextPage = (PFN_NUMBER) -1;
  1954. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  1955. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  1956. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  1957. MEMORY_ALLOCATION_DESCRIPTOR,
  1958. ListEntry);
  1959. if (MemoryDescriptor->MemoryType < LoaderMaximum &&
  1960. IncludeType [MemoryDescriptor->MemoryType]) {
  1961. TotalPages += MemoryDescriptor->PageCount;
  1962. //
  1963. // Merge runs whenever possible.
  1964. //
  1965. if (MemoryDescriptor->BasePage == NextPage) {
  1966. ASSERT (MemoryDescriptor->PageCount != 0);
  1967. Memory->Run[i - 1].PageCount += MemoryDescriptor->PageCount;
  1968. NextPage += MemoryDescriptor->PageCount;
  1969. }
  1970. else {
  1971. Memory->Run[i].BasePage = MemoryDescriptor->BasePage;
  1972. Memory->Run[i].PageCount = MemoryDescriptor->PageCount;
  1973. NextPage = Memory->Run[i].BasePage + Memory->Run[i].PageCount;
  1974. i += 1;
  1975. }
  1976. }
  1977. NextMd = MemoryDescriptor->ListEntry.Flink;
  1978. }
  1979. ASSERT (i <= Memory->NumberOfRuns);
  1980. if (i == 0) {
  1981. //
  1982. // Don't bother shrinking this as the caller will be freeing it
  1983. // shortly as it is just an empty list.
  1984. //
  1985. Memory->Run[i].BasePage = 0;
  1986. Memory->Run[i].PageCount = 0;
  1987. }
  1988. else if (!ARGUMENT_PRESENT (InputMemory)) {
  1989. //
  1990. // Shrink the buffer (if possible) now that the final size is known.
  1991. //
  1992. if (InitialAllocation > i) {
  1993. Memory2 = ExAllocatePoolWithTag (NonPagedPool,
  1994. sizeof(PHYSICAL_MEMORY_DESCRIPTOR) + sizeof(PHYSICAL_MEMORY_RUN) * (i - 1),
  1995. 'lMmM');
  1996. if (Memory2 != NULL) {
  1997. RtlCopyMemory (Memory2->Run,
  1998. Memory->Run,
  1999. sizeof(PHYSICAL_MEMORY_RUN) * i);
  2000. ExFreePool (Memory);
  2001. Memory = Memory2;
  2002. }
  2003. }
  2004. }
  2005. Memory->NumberOfRuns = i;
  2006. Memory->NumberOfPages = TotalPages;
  2007. return Memory;
  2008. }
  2009. PFN_NUMBER
  2010. MiPagesInLoaderBlock (
  2011. IN PLOADER_PARAMETER_BLOCK LoaderBlock,
  2012. IN PBOOLEAN IncludeType
  2013. )
  2014. /*++
  2015. Routine Description:
  2016. This function walks through the loader block's memory
  2017. descriptor list and returns the number of pages of the desired type.
  2018. Arguments:
  2019. LoaderBlock - Supplies a pointer the system loader block.
  2020. IncludeType - Array of BOOLEANS of size LoaderMaximum.
  2021. TRUE means include this type of memory in the returned count.
  2022. Return Value:
  2023. The number of pages of the requested type in the loader block list.
  2024. Environment:
  2025. Kernel Mode Only. System initialization.
  2026. --*/
  2027. {
  2028. PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
  2029. PLIST_ENTRY NextMd;
  2030. PFN_NUMBER TotalPages;
  2031. //
  2032. // Walk through the memory descriptors counting pages.
  2033. //
  2034. TotalPages = 0;
  2035. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  2036. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  2037. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  2038. MEMORY_ALLOCATION_DESCRIPTOR,
  2039. ListEntry);
  2040. if (MemoryDescriptor->MemoryType < LoaderMaximum &&
  2041. IncludeType [MemoryDescriptor->MemoryType]) {
  2042. TotalPages += MemoryDescriptor->PageCount;
  2043. }
  2044. NextMd = MemoryDescriptor->ListEntry.Flink;
  2045. }
  2046. return TotalPages;
  2047. }
  2048. static
  2049. VOID
  2050. MiMemoryLicense (
  2051. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  2052. )
  2053. /*++
  2054. Routine Description:
  2055. This function walks through the loader block's memory descriptor list
  2056. and based on the system's license, ensures only the proper amount of
  2057. physical memory is used.
  2058. Arguments:
  2059. LoaderBlock - Supplies a pointer to the system loader block.
  2060. Return Value:
  2061. None.
  2062. Environment:
  2063. Kernel Mode Only. System initialization.
  2064. --*/
  2065. {
  2066. PLIST_ENTRY NextMd;
  2067. PFN_NUMBER TotalPagesAllowed;
  2068. PFN_NUMBER PageCount;
  2069. ULONG VirtualBias;
  2070. PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
  2071. //
  2072. // The default configuration gets a maximum of 4gb physical memory.
  2073. // On PAE machines the system continues to operate in 8-byte PTE mode.
  2074. //
  2075. TotalPagesAllowed = MI_DEFAULT_MAX_PAGES;
  2076. //
  2077. // If properly licensed (ie: DataCenter) and booted without the
  2078. // 3gb switch, then use all available physical memory.
  2079. //
  2080. if (ExVerifySuite(DataCenter) == TRUE) {
  2081. //
  2082. // Note MmVirtualBias has not yet been initialized at the time of the
  2083. // first call to this routine, so use the LoaderBlock directly.
  2084. //
  2085. #if defined(_X86_)
  2086. VirtualBias = LoaderBlock->u.I386.VirtualBias;
  2087. #else
  2088. VirtualBias = 0;
  2089. #endif
  2090. if (VirtualBias == 0) {
  2091. //
  2092. // Limit the maximum physical memory to the amount we have
  2093. // actually physically seen in a machine inhouse.
  2094. //
  2095. TotalPagesAllowed = MI_DTC_MAX_PAGES;
  2096. }
  2097. else {
  2098. //
  2099. // The system is booting /3gb, so don't use more than 16gb of
  2100. // physical memory. This ensures enough virtual space to map
  2101. // the PFN database.
  2102. //
  2103. TotalPagesAllowed = MI_DTC_BOOTED_3GB_MAX_PAGES;
  2104. }
  2105. }
  2106. else if ((MmProductType != 0x00690057) &&
  2107. (ExVerifySuite(Enterprise) == TRUE)) {
  2108. //
  2109. // Enforce the Advanced Server physical memory limit.
  2110. // On PAE machines the system continues to operate in 8-byte PTE mode.
  2111. //
  2112. TotalPagesAllowed = MI_ADS_MAX_PAGES;
  2113. }
  2114. else if (ExVerifySuite(Blade) == TRUE) {
  2115. //
  2116. // Enforce the Blade physical memory limit.
  2117. //
  2118. TotalPagesAllowed = MI_BLADE_MAX_PAGES;
  2119. }
  2120. //
  2121. // Walk through the memory descriptors and remove or truncate descriptors
  2122. // that exceed the maximum physical memory to be used.
  2123. //
  2124. PageCount = 0;
  2125. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  2126. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  2127. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  2128. MEMORY_ALLOCATION_DESCRIPTOR,
  2129. ListEntry);
  2130. if ((MemoryDescriptor->MemoryType == LoaderFirmwarePermanent) ||
  2131. (MemoryDescriptor->MemoryType == LoaderBBTMemory) ||
  2132. (MemoryDescriptor->MemoryType == LoaderBad) ||
  2133. (MemoryDescriptor->MemoryType == LoaderSpecialMemory)) {
  2134. NextMd = MemoryDescriptor->ListEntry.Flink;
  2135. continue;
  2136. }
  2137. PageCount += MemoryDescriptor->PageCount;
  2138. if (PageCount <= TotalPagesAllowed) {
  2139. NextMd = MemoryDescriptor->ListEntry.Flink;
  2140. continue;
  2141. }
  2142. //
  2143. // This descriptor needs to be removed or truncated.
  2144. //
  2145. if (PageCount - MemoryDescriptor->PageCount >= TotalPagesAllowed) {
  2146. //
  2147. // Completely remove this descriptor.
  2148. //
  2149. // Note since this only adjusts the links and since the entry is
  2150. // not freed, it can still be safely referenced again below to
  2151. // obtain the NextMd. N.B. This keeps the memory descriptors
  2152. // sorted in ascending order.
  2153. //
  2154. RemoveEntryList (NextMd);
  2155. }
  2156. else {
  2157. //
  2158. // Truncate this descriptor.
  2159. //
  2160. ASSERT (PageCount - MemoryDescriptor->PageCount < TotalPagesAllowed);
  2161. MemoryDescriptor->PageCount -= (ULONG)(PageCount - TotalPagesAllowed);
  2162. PageCount = TotalPagesAllowed;
  2163. }
  2164. NextMd = MemoryDescriptor->ListEntry.Flink;
  2165. }
  2166. return;
  2167. }
  2168. VOID
  2169. MmFreeLoaderBlock (
  2170. IN PLOADER_PARAMETER_BLOCK LoaderBlock
  2171. )
  2172. /*++
  2173. Routine Description:
  2174. This function is called as the last routine in phase 1 initialization.
  2175. It frees memory used by the OsLoader.
  2176. Arguments:
  2177. LoaderBlock - Supplies a pointer to the system loader block.
  2178. Return Value:
  2179. None.
  2180. Environment:
  2181. Kernel Mode Only. System initialization.
  2182. --*/
  2183. {
  2184. PLIST_ENTRY NextMd;
  2185. PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
  2186. ULONG i;
  2187. PFN_NUMBER NextPhysicalPage;
  2188. PFN_NUMBER PagesFreed;
  2189. PMMPFN Pfn1;
  2190. KIRQL OldIrql;
  2191. PPHYSICAL_MEMORY_RUN RunBase;
  2192. PPHYSICAL_MEMORY_RUN Runs;
  2193. i = 0;
  2194. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  2195. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  2196. i += 1;
  2197. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  2198. MEMORY_ALLOCATION_DESCRIPTOR,
  2199. ListEntry);
  2200. NextMd = MemoryDescriptor->ListEntry.Flink;
  2201. }
  2202. RunBase = ExAllocatePoolWithTag (NonPagedPool,
  2203. sizeof(PHYSICAL_MEMORY_RUN) * i,
  2204. 'lMmM');
  2205. if (RunBase == NULL) {
  2206. return;
  2207. }
  2208. Runs = RunBase;
  2209. //
  2210. //
  2211. // Walk through the memory descriptors and add pages to the
  2212. // free list in the PFN database.
  2213. //
  2214. NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
  2215. while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
  2216. MemoryDescriptor = CONTAINING_RECORD(NextMd,
  2217. MEMORY_ALLOCATION_DESCRIPTOR,
  2218. ListEntry);
  2219. switch (MemoryDescriptor->MemoryType) {
  2220. case LoaderOsloaderHeap:
  2221. case LoaderRegistryData:
  2222. case LoaderNlsData:
  2223. //case LoaderMemoryData: //this has page table and other stuff.
  2224. //
  2225. // Capture the data to temporary storage so we won't
  2226. // free memory we are referencing.
  2227. //
  2228. Runs->BasePage = MemoryDescriptor->BasePage;
  2229. Runs->PageCount = MemoryDescriptor->PageCount;
  2230. Runs += 1;
  2231. break;
  2232. default:
  2233. break;
  2234. }
  2235. NextMd = MemoryDescriptor->ListEntry.Flink;
  2236. }
  2237. PagesFreed = 0;
  2238. LOCK_PFN (OldIrql);
  2239. if (Runs != RunBase) {
  2240. Runs -= 1;
  2241. do {
  2242. i = (ULONG)Runs->PageCount;
  2243. NextPhysicalPage = Runs->BasePage;
  2244. #if defined (_MI_MORE_THAN_4GB_)
  2245. if (MiNoLowMemory != 0) {
  2246. if (NextPhysicalPage < MiNoLowMemory) {
  2247. //
  2248. // Don't free this run as it is below the memory threshold
  2249. // configured for this system.
  2250. //
  2251. Runs -= 1;
  2252. continue;
  2253. }
  2254. }
  2255. #endif
  2256. Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage);
  2257. PagesFreed += i;
  2258. while (i != 0) {
  2259. if (Pfn1->u3.e2.ReferenceCount == 0) {
  2260. if (Pfn1->u1.Flink == 0) {
  2261. //
  2262. // Set the PTE address to the physical page for
  2263. // virtual address alignment checking.
  2264. //
  2265. Pfn1->PteAddress =
  2266. (PMMPTE)(NextPhysicalPage << PTE_SHIFT);
  2267. MiDetermineNode (NextPhysicalPage, Pfn1);
  2268. MiInsertPageInFreeList (NextPhysicalPage);
  2269. }
  2270. }
  2271. else {
  2272. if (NextPhysicalPage != 0) {
  2273. //
  2274. // Remove PTE and insert into the free list. If it is
  2275. // a physical address within the PFN database, the PTE
  2276. // element does not exist and therefore cannot be updated.
  2277. //
  2278. if (!MI_IS_PHYSICAL_ADDRESS (
  2279. MiGetVirtualAddressMappedByPte (Pfn1->PteAddress))) {
  2280. //
  2281. // Not a physical address.
  2282. //
  2283. *(Pfn1->PteAddress) = ZeroPte;
  2284. }
  2285. MI_SET_PFN_DELETED (Pfn1);
  2286. MiDecrementShareCountOnly (NextPhysicalPage);
  2287. }
  2288. }
  2289. Pfn1 += 1;
  2290. i -= 1;
  2291. NextPhysicalPage += 1;
  2292. }
  2293. Runs -= 1;
  2294. } while (Runs >= RunBase);
  2295. }
  2296. //
  2297. // Since systemwide commitment was determined early in Phase 0 and
  2298. // excluded the ranges just freed, add them back in now.
  2299. //
  2300. if (PagesFreed != 0) {
  2301. InterlockedExchangeAddSizeT (&MmTotalCommitLimitMaximum, PagesFreed);
  2302. InterlockedExchangeAddSizeT (&MmTotalCommitLimit, PagesFreed);
  2303. }
  2304. #if defined(_X86_)
  2305. if (MmVirtualBias != 0) {
  2306. //
  2307. // If the kernel has been biased to allow for 3gb of user address space,
  2308. // then the first 16mb of memory is doubly mapped to KSEG0_BASE and to
  2309. // ALTERNATE_BASE. Therefore, the KSEG0_BASE entries must be unmapped.
  2310. //
  2311. PMMPTE Pde;
  2312. ULONG NumberOfPdes;
  2313. NumberOfPdes = MmBootImageSize / MM_VA_MAPPED_BY_PDE;
  2314. Pde = MiGetPdeAddress((PVOID)KSEG0_BASE);
  2315. for (i = 0; i < NumberOfPdes; i += 1) {
  2316. MI_WRITE_INVALID_PTE (Pde, ZeroKernelPte);
  2317. Pde += 1;
  2318. }
  2319. }
  2320. #endif
  2321. KeFlushEntireTb (TRUE, TRUE);
  2322. UNLOCK_PFN (OldIrql);
  2323. ExFreePool (RunBase);
  2324. return;
  2325. }
  2326. VOID
  2327. MiBuildPagedPool (
  2328. VOID
  2329. )
  2330. /*++
  2331. Routine Description:
  2332. This function is called to build the structures required for paged
  2333. pool and initialize the pool. Once this routine is called, paged
  2334. pool may be allocated.
  2335. Arguments:
  2336. None.
  2337. Return Value:
  2338. None.
  2339. Environment:
  2340. Kernel Mode Only. System initialization.
  2341. --*/
  2342. {
  2343. SIZE_T Size;
  2344. PMMPTE PointerPte;
  2345. PMMPTE LastPte;
  2346. PMMPTE LastPde;
  2347. PMMPTE PointerPde;
  2348. MMPTE TempPte;
  2349. PFN_NUMBER PageFrameIndex;
  2350. PFN_NUMBER ContainingFrame;
  2351. SIZE_T AdditionalCommittedPages;
  2352. KIRQL OldIrql;
  2353. ULONG i;
  2354. #if (_MI_PAGING_LEVELS >= 4)
  2355. PMMPTE PointerPxe;
  2356. PMMPTE PointerPxeEnd;
  2357. #endif
  2358. #if (_MI_PAGING_LEVELS >= 3)
  2359. PVOID LastVa;
  2360. PMMPTE PointerPpe;
  2361. PMMPTE PointerPpeEnd;
  2362. #else
  2363. PMMPFN Pfn1;
  2364. #endif
  2365. i = 0;
  2366. AdditionalCommittedPages = 0;
  2367. #if (_MI_PAGING_LEVELS < 3)
  2368. //
  2369. // Double map system page directory page.
  2370. //
  2371. PointerPte = MiGetPteAddress(PDE_BASE);
  2372. for (i = 0 ; i < PD_PER_SYSTEM; i += 1) {
  2373. MmSystemPageDirectory[i] = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  2374. Pfn1 = MI_PFN_ELEMENT(MmSystemPageDirectory[i]);
  2375. Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  2376. PointerPte += 1;
  2377. }
  2378. //
  2379. // Was not mapped physically, map it virtually in system space.
  2380. //
  2381. PointerPte = MiReserveSystemPtes (PD_PER_SYSTEM, SystemPteSpace);
  2382. if (PointerPte == NULL) {
  2383. MiIssueNoPtesBugcheck (PD_PER_SYSTEM, SystemPteSpace);
  2384. }
  2385. MmSystemPagePtes = (PMMPTE)MiGetVirtualAddressMappedByPte (PointerPte);
  2386. TempPte = ValidKernelPde;
  2387. for (i = 0 ; i < PD_PER_SYSTEM; i += 1) {
  2388. TempPte.u.Hard.PageFrameNumber = MmSystemPageDirectory[i];
  2389. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  2390. PointerPte += 1;
  2391. }
  2392. #endif
  2393. if (MmPagedPoolMaximumDesired == TRUE) {
  2394. MmSizeOfPagedPoolInBytes =
  2395. ((PCHAR)MmNonPagedSystemStart - (PCHAR)MmPagedPoolStart);
  2396. }
  2397. else if (MmSizeOfPagedPoolInBytes == 0) {
  2398. //
  2399. // A size of 0 means size the pool based on physical memory.
  2400. //
  2401. MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
  2402. #if (_MI_PAGING_LEVELS >= 3)
  2403. MmSizeOfPagedPoolInBytes *= 2;
  2404. #endif
  2405. }
  2406. if (MmIsThisAnNtAsSystem()) {
  2407. if ((MmNumberOfPhysicalPages > ((24*1024*1024) >> PAGE_SHIFT)) &&
  2408. (MmSizeOfPagedPoolInBytes < MM_MINIMUM_PAGED_POOL_NTAS)) {
  2409. MmSizeOfPagedPoolInBytes = MM_MINIMUM_PAGED_POOL_NTAS;
  2410. }
  2411. }
  2412. if (MmSizeOfPagedPoolInBytes >
  2413. (ULONG_PTR)((PCHAR)MmNonPagedSystemStart - (PCHAR)MmPagedPoolStart)) {
  2414. MmSizeOfPagedPoolInBytes =
  2415. ((PCHAR)MmNonPagedSystemStart - (PCHAR)MmPagedPoolStart);
  2416. }
  2417. Size = BYTES_TO_PAGES(MmSizeOfPagedPoolInBytes);
  2418. if (Size < MM_MIN_INITIAL_PAGED_POOL) {
  2419. Size = MM_MIN_INITIAL_PAGED_POOL;
  2420. }
  2421. if (Size > (MM_MAX_PAGED_POOL >> PAGE_SHIFT)) {
  2422. Size = MM_MAX_PAGED_POOL >> PAGE_SHIFT;
  2423. }
  2424. #if defined (_WIN64)
  2425. //
  2426. // NT64 places system mapped views directly after paged pool. Ensure
  2427. // enough VA space is available.
  2428. //
  2429. if (Size + (MmSystemViewSize >> PAGE_SHIFT) > (MM_MAX_PAGED_POOL >> PAGE_SHIFT)) {
  2430. ASSERT (MmSizeOfPagedPoolInBytes > 2 * MmSystemViewSize);
  2431. MmSizeOfPagedPoolInBytes -= MmSystemViewSize;
  2432. Size = BYTES_TO_PAGES(MmSizeOfPagedPoolInBytes);
  2433. }
  2434. #endif
  2435. Size = (Size + (PTE_PER_PAGE - 1)) / PTE_PER_PAGE;
  2436. MmSizeOfPagedPoolInBytes = (ULONG_PTR)Size * PAGE_SIZE * PTE_PER_PAGE;
  2437. //
  2438. // Set size to the number of pages in the pool.
  2439. //
  2440. Size = Size * PTE_PER_PAGE;
  2441. //
  2442. // If paged pool is really nonpagable then limit the size based
  2443. // on how much physical memory is actually present. Disable this
  2444. // feature if not enough physical memory is present to do it.
  2445. //
  2446. if (MmDisablePagingExecutive & MM_PAGED_POOL_LOCKED_DOWN) {
  2447. Size = MmSizeOfPagedPoolInBytes / PAGE_SIZE;
  2448. if ((MI_NONPAGABLE_MEMORY_AVAILABLE() < 2048) ||
  2449. (MmAvailablePages < 2048)) {
  2450. Size = 0;
  2451. }
  2452. else {
  2453. if ((SPFN_NUMBER)(Size) > MI_NONPAGABLE_MEMORY_AVAILABLE() - 2048) {
  2454. Size = (MI_NONPAGABLE_MEMORY_AVAILABLE() - 2048);
  2455. }
  2456. if (Size > MmAvailablePages - 2048) {
  2457. Size = MmAvailablePages - 2048;
  2458. }
  2459. }
  2460. Size = ((Size * PAGE_SIZE) / MM_VA_MAPPED_BY_PDE) * MM_VA_MAPPED_BY_PDE;
  2461. if ((((Size / 5) * 4) >= MmSizeOfPagedPoolInBytes) &&
  2462. (Size >= MM_MIN_INITIAL_PAGED_POOL)) {
  2463. MmSizeOfPagedPoolInBytes = Size;
  2464. }
  2465. else {
  2466. MmDisablePagingExecutive &= ~MM_PAGED_POOL_LOCKED_DOWN;
  2467. }
  2468. Size = MmSizeOfPagedPoolInBytes >> PAGE_SHIFT;
  2469. }
  2470. ASSERT ((MmSizeOfPagedPoolInBytes + (PCHAR)MmPagedPoolStart) <=
  2471. (PCHAR)MmNonPagedSystemStart);
  2472. ASSERT64 ((MmSizeOfPagedPoolInBytes + (PCHAR)MmPagedPoolStart + MmSystemViewSize) <=
  2473. (PCHAR)MmNonPagedSystemStart);
  2474. MmPagedPoolEnd = (PVOID)(((PUCHAR)MmPagedPoolStart +
  2475. MmSizeOfPagedPoolInBytes) - 1);
  2476. MmPageAlignedPoolBase[PagedPool] = MmPagedPoolStart;
  2477. //
  2478. // Build page table page for paged pool.
  2479. //
  2480. PointerPde = MiGetPdeAddress (MmPagedPoolStart);
  2481. TempPte = ValidKernelPde;
  2482. #if (_MI_PAGING_LEVELS >= 3)
  2483. //
  2484. // Map in all the page directory pages to span all of paged pool.
  2485. // This removes the need for a system lookup directory.
  2486. //
  2487. LastVa = (PVOID)((PCHAR)MmPagedPoolEnd + MmSystemViewSize);
  2488. PointerPpe = MiGetPpeAddress (MmPagedPoolStart);
  2489. PointerPpeEnd = MiGetPpeAddress (LastVa);
  2490. MiSystemViewStart = (ULONG_PTR)MmPagedPoolEnd + 1;
  2491. PointerPde = MiGetPdeAddress (MmPagedPoolEnd) + 1;
  2492. LastPde = MiGetPdeAddress (LastVa);
  2493. LOCK_PFN (OldIrql);
  2494. #if (_MI_PAGING_LEVELS >= 4)
  2495. PointerPxe = MiGetPxeAddress (MmPagedPoolStart);
  2496. PointerPxeEnd = MiGetPxeAddress (LastVa);
  2497. while (PointerPxe <= PointerPxeEnd) {
  2498. if (PointerPxe->u.Hard.Valid == 0) {
  2499. PageFrameIndex = MiRemoveAnyPage(
  2500. MI_GET_PAGE_COLOR_FROM_PTE (PointerPxe));
  2501. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  2502. MI_WRITE_VALID_PTE (PointerPxe, TempPte);
  2503. MiInitializePfn (PageFrameIndex, PointerPxe, 1);
  2504. //
  2505. // Make all entries no access since the PDEs may not fill the page.
  2506. //
  2507. MiFillMemoryPte (MiGetVirtualAddressMappedByPte (PointerPxe),
  2508. PAGE_SIZE,
  2509. MM_KERNEL_NOACCESS_PTE);
  2510. MmResidentAvailablePages -= 1;
  2511. AdditionalCommittedPages += 1;
  2512. }
  2513. PointerPxe += 1;
  2514. }
  2515. #endif
  2516. while (PointerPpe <= PointerPpeEnd) {
  2517. if (PointerPpe->u.Hard.Valid == 0) {
  2518. PageFrameIndex = MiRemoveAnyPage(
  2519. MI_GET_PAGE_COLOR_FROM_PTE (PointerPpe));
  2520. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  2521. MI_WRITE_VALID_PTE (PointerPpe, TempPte);
  2522. MiInitializePfn (PageFrameIndex, PointerPpe, 1);
  2523. //
  2524. // Make all entries no access since the PDEs may not fill the page.
  2525. //
  2526. MiFillMemoryPte (MiGetVirtualAddressMappedByPte (PointerPpe),
  2527. PAGE_SIZE,
  2528. MM_KERNEL_NOACCESS_PTE);
  2529. MmResidentAvailablePages -= 1;
  2530. AdditionalCommittedPages += 1;
  2531. }
  2532. PointerPpe += 1;
  2533. }
  2534. //
  2535. // Initialize the system view page table pages.
  2536. //
  2537. MmResidentAvailablePages -= (LastPde - PointerPde + 1);
  2538. AdditionalCommittedPages += (LastPde - PointerPde + 1);
  2539. while (PointerPde <= LastPde) {
  2540. ASSERT (PointerPde->u.Hard.Valid == 0);
  2541. PageFrameIndex = MiRemoveAnyPage(
  2542. MI_GET_PAGE_COLOR_FROM_PTE (PointerPde));
  2543. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  2544. MI_WRITE_VALID_PTE (PointerPde, TempPte);
  2545. MiInitializePfn (PageFrameIndex, PointerPde, 1);
  2546. MiFillMemoryPte (MiGetVirtualAddressMappedByPte (PointerPde),
  2547. PAGE_SIZE,
  2548. ZeroKernelPte.u.Long);
  2549. PointerPde += 1;
  2550. }
  2551. UNLOCK_PFN (OldIrql);
  2552. PointerPde = MiGetPdeAddress (MmPagedPoolStart);
  2553. #endif
  2554. PointerPte = MiGetPteAddress (MmPagedPoolStart);
  2555. MmPagedPoolInfo.FirstPteForPagedPool = PointerPte;
  2556. MmPagedPoolInfo.LastPteForPagedPool = MiGetPteAddress (MmPagedPoolEnd);
  2557. MiFillMemoryPte (PointerPde,
  2558. sizeof(MMPTE) *
  2559. (1 + MiGetPdeAddress (MmPagedPoolEnd) - PointerPde),
  2560. MM_KERNEL_NOACCESS_PTE);
  2561. LOCK_PFN (OldIrql);
  2562. //
  2563. // Map in a page table page.
  2564. //
  2565. PageFrameIndex = MiRemoveAnyPage(
  2566. MI_GET_PAGE_COLOR_FROM_PTE (PointerPde));
  2567. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  2568. MI_WRITE_VALID_PTE (PointerPde, TempPte);
  2569. #if (_MI_PAGING_LEVELS >= 3)
  2570. ContainingFrame = MI_GET_PAGE_FRAME_FROM_PTE(MiGetPpeAddress (MmPagedPoolStart));
  2571. #else
  2572. ContainingFrame = MmSystemPageDirectory[(PointerPde - MiGetPdeAddress(0)) / PDE_PER_PAGE];
  2573. #endif
  2574. MiInitializePfnForOtherProcess (PageFrameIndex,
  2575. PointerPde,
  2576. ContainingFrame);
  2577. MiFillMemoryPte (PointerPte, PAGE_SIZE, MM_KERNEL_NOACCESS_PTE);
  2578. UNLOCK_PFN (OldIrql);
  2579. MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
  2580. //
  2581. // Build bitmaps for paged pool.
  2582. //
  2583. MiCreateBitMap (&MmPagedPoolInfo.PagedPoolAllocationMap, Size, NonPagedPool);
  2584. RtlSetAllBits (MmPagedPoolInfo.PagedPoolAllocationMap);
  2585. //
  2586. // Indicate first page worth of PTEs are available.
  2587. //
  2588. RtlClearBits (MmPagedPoolInfo.PagedPoolAllocationMap, 0, PTE_PER_PAGE);
  2589. MiCreateBitMap (&MmPagedPoolInfo.EndOfPagedPoolBitmap, Size, NonPagedPool);
  2590. RtlClearAllBits (MmPagedPoolInfo.EndOfPagedPoolBitmap);
  2591. //
  2592. // If verifier is present then build the verifier paged pool bitmap.
  2593. //
  2594. if (MmVerifyDriverBufferLength != (ULONG)-1) {
  2595. MiCreateBitMap (&VerifierLargePagedPoolMap, Size, NonPagedPool);
  2596. RtlClearAllBits (VerifierLargePagedPoolMap);
  2597. }
  2598. //
  2599. // Initialize paged pool.
  2600. //
  2601. InitializePool (PagedPool, 0L);
  2602. //
  2603. // If paged pool is really nonpagable then allocate the memory now.
  2604. //
  2605. if (MmDisablePagingExecutive & MM_PAGED_POOL_LOCKED_DOWN) {
  2606. PointerPde = MiGetPdeAddress (MmPagedPoolStart);
  2607. PointerPde += 1;
  2608. LastPde = MiGetPdeAddress (MmPagedPoolEnd);
  2609. TempPte = ValidKernelPde;
  2610. PointerPte = MiGetPteAddress (MmPagedPoolStart);
  2611. LastPte = MiGetPteAddress (MmPagedPoolEnd);
  2612. ASSERT (MmPagedPoolCommit == 0);
  2613. MmPagedPoolCommit = (ULONG)(LastPte - PointerPte + 1);
  2614. ASSERT (MmPagedPoolInfo.PagedPoolCommit == 0);
  2615. MmPagedPoolInfo.PagedPoolCommit = MmPagedPoolCommit;
  2616. #if DBG
  2617. //
  2618. // Ensure no paged pool has been allocated yet.
  2619. //
  2620. for (i = 0; i < PTE_PER_PAGE; i += 1) {
  2621. ASSERT (!RtlCheckBit (MmPagedPoolInfo.PagedPoolAllocationMap, i));
  2622. }
  2623. while (i < MmSizeOfPagedPoolInBytes / PAGE_SIZE) {
  2624. ASSERT (RtlCheckBit (MmPagedPoolInfo.PagedPoolAllocationMap, i));
  2625. i += 1;
  2626. }
  2627. #endif
  2628. RtlClearAllBits (MmPagedPoolInfo.PagedPoolAllocationMap);
  2629. LOCK_PFN (OldIrql);
  2630. //
  2631. // Map in the page table pages.
  2632. //
  2633. MmResidentAvailablePages -= (LastPde - PointerPde + 1);
  2634. AdditionalCommittedPages += (LastPde - PointerPde + 1);
  2635. while (PointerPde <= LastPde) {
  2636. ASSERT (PointerPde->u.Hard.Valid == 0);
  2637. PageFrameIndex = MiRemoveAnyPage(
  2638. MI_GET_PAGE_COLOR_FROM_PTE (PointerPde));
  2639. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  2640. MI_WRITE_VALID_PTE (PointerPde, TempPte);
  2641. #if (_MI_PAGING_LEVELS >= 3)
  2642. ContainingFrame = MI_GET_PAGE_FRAME_FROM_PTE(MiGetPteAddress (PointerPde));
  2643. #else
  2644. ContainingFrame = MmSystemPageDirectory[(PointerPde - MiGetPdeAddress(0)) / PDE_PER_PAGE];
  2645. #endif
  2646. MiInitializePfnForOtherProcess (PageFrameIndex,
  2647. MiGetPteAddress (PointerPde),
  2648. ContainingFrame);
  2649. MiFillMemoryPte (MiGetVirtualAddressMappedByPte (PointerPde),
  2650. PAGE_SIZE,
  2651. MM_KERNEL_NOACCESS_PTE);
  2652. PointerPde += 1;
  2653. }
  2654. MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde;
  2655. TempPte = ValidKernelPte;
  2656. MI_SET_PTE_DIRTY (TempPte);
  2657. ASSERT (MmAvailablePages > (PFN_COUNT)(LastPte - PointerPte + 1));
  2658. ASSERT (MmResidentAvailablePages > (SPFN_NUMBER)(LastPte - PointerPte + 1));
  2659. MmResidentAvailablePages -= (LastPte - PointerPte + 1);
  2660. AdditionalCommittedPages += (LastPte - PointerPte + 1);
  2661. while (PointerPte <= LastPte) {
  2662. ASSERT (PointerPte->u.Hard.Valid == 0);
  2663. PageFrameIndex = MiRemoveAnyPage(
  2664. MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
  2665. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  2666. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  2667. MiInitializePfn (PageFrameIndex, PointerPte, 1);
  2668. PointerPte += 1;
  2669. }
  2670. UNLOCK_PFN (OldIrql);
  2671. }
  2672. //
  2673. // Since the commitment return path is lock free, the total committed
  2674. // page count must be atomically incremented.
  2675. //
  2676. InterlockedExchangeAddSizeT (&MmTotalCommittedPages, AdditionalCommittedPages);
  2677. MiInitializeSpecialPool (NonPagedPool);
  2678. //
  2679. // Allow mapping of views into system space.
  2680. //
  2681. MiInitializeSystemSpaceMap (NULL);
  2682. return;
  2683. }
  2684. VOID
  2685. MiFindInitializationCode (
  2686. OUT PVOID *StartVa,
  2687. OUT PVOID *EndVa
  2688. )
  2689. /*++
  2690. Routine Description:
  2691. This function locates the start and end of the kernel initialization
  2692. code. This code resides in the "init" section of the kernel image.
  2693. Arguments:
  2694. StartVa - Returns the starting address of the init section.
  2695. EndVa - Returns the ending address of the init section.
  2696. Return Value:
  2697. None.
  2698. Environment:
  2699. Kernel Mode Only. End of system initialization.
  2700. --*/
  2701. {
  2702. PKLDR_DATA_TABLE_ENTRY LdrDataTableEntry;
  2703. PVOID CurrentBase;
  2704. PVOID InitStart;
  2705. PVOID InitEnd;
  2706. PLIST_ENTRY Next;
  2707. PIMAGE_NT_HEADERS NtHeader;
  2708. PIMAGE_SECTION_HEADER SectionTableEntry;
  2709. PIMAGE_SECTION_HEADER LastDiscard;
  2710. LONG i;
  2711. LOGICAL DiscardSection;
  2712. PVOID MiFindInitializationCodeAddress;
  2713. PKTHREAD CurrentThread;
  2714. MiFindInitializationCodeAddress = MmGetProcedureAddress((PVOID)(ULONG_PTR)&MiFindInitializationCode);
  2715. #if defined(_IA64_)
  2716. //
  2717. // One more indirection is needed due to the PLABEL.
  2718. //
  2719. MiFindInitializationCodeAddress = (PVOID)(*((PULONGLONG)MiFindInitializationCodeAddress));
  2720. #endif
  2721. *StartVa = NULL;
  2722. //
  2723. // Walk through the loader blocks looking for the base which
  2724. // contains this routine.
  2725. //
  2726. CurrentThread = KeGetCurrentThread ();
  2727. KeEnterCriticalRegionThread (CurrentThread);
  2728. ExAcquireResourceExclusiveLite (&PsLoadedModuleResource, TRUE);
  2729. Next = PsLoadedModuleList.Flink;
  2730. while (Next != &PsLoadedModuleList) {
  2731. LdrDataTableEntry = CONTAINING_RECORD (Next,
  2732. KLDR_DATA_TABLE_ENTRY,
  2733. InLoadOrderLinks);
  2734. if (LdrDataTableEntry->SectionPointer != NULL) {
  2735. //
  2736. // This entry was loaded by MmLoadSystemImage so it's already
  2737. // had its init section removed.
  2738. //
  2739. Next = Next->Flink;
  2740. continue;
  2741. }
  2742. CurrentBase = (PVOID)LdrDataTableEntry->DllBase;
  2743. NtHeader = RtlImageNtHeader(CurrentBase);
  2744. SectionTableEntry = (PIMAGE_SECTION_HEADER)((PCHAR)NtHeader +
  2745. sizeof(ULONG) +
  2746. sizeof(IMAGE_FILE_HEADER) +
  2747. NtHeader->FileHeader.SizeOfOptionalHeader);
  2748. //
  2749. // From the image header, locate the sections named 'INIT',
  2750. // PAGEVRF* and PAGESPEC. INIT always goes, the others go depending
  2751. // on registry configuration.
  2752. //
  2753. i = NtHeader->FileHeader.NumberOfSections;
  2754. InitStart = NULL;
  2755. while (i > 0) {
  2756. #if DBG
  2757. if ((*(PULONG)SectionTableEntry->Name == 'tini') ||
  2758. (*(PULONG)SectionTableEntry->Name == 'egap')) {
  2759. DbgPrint("driver %wZ has lower case sections (init or pagexxx)\n",
  2760. &LdrDataTableEntry->FullDllName);
  2761. }
  2762. #endif
  2763. DiscardSection = FALSE;
  2764. //
  2765. // Free any INIT sections (or relocation sections that haven't
  2766. // been already). Note a driver may have a relocation section
  2767. // but not have any INIT code.
  2768. //
  2769. if ((*(PULONG)SectionTableEntry->Name == 'TINI') ||
  2770. ((SectionTableEntry->Characteristics & IMAGE_SCN_MEM_DISCARDABLE) != 0)) {
  2771. DiscardSection = TRUE;
  2772. }
  2773. else if ((*(PULONG)SectionTableEntry->Name == 'EGAP') &&
  2774. (SectionTableEntry->Name[4] == 'V') &&
  2775. (SectionTableEntry->Name[5] == 'R') &&
  2776. (SectionTableEntry->Name[6] == 'F')) {
  2777. //
  2778. // Discard PAGEVRF* if no drivers are being instrumented.
  2779. //
  2780. if (MmVerifyDriverBufferLength == (ULONG)-1) {
  2781. DiscardSection = TRUE;
  2782. }
  2783. }
  2784. else if ((*(PULONG)SectionTableEntry->Name == 'EGAP') &&
  2785. (*(PULONG)&SectionTableEntry->Name[4] == 'CEPS')) {
  2786. //
  2787. // Discard PAGESPEC special pool code if it's not enabled.
  2788. //
  2789. if (MiSpecialPoolFirstPte == NULL) {
  2790. DiscardSection = TRUE;
  2791. }
  2792. }
  2793. if (DiscardSection == TRUE) {
  2794. InitStart = (PVOID)((PCHAR)CurrentBase + SectionTableEntry->VirtualAddress);
  2795. InitEnd = (PVOID)((PCHAR)InitStart + SectionTableEntry->SizeOfRawData - 1);
  2796. InitEnd = (PVOID)((PCHAR)PAGE_ALIGN ((PCHAR)InitEnd +
  2797. (NtHeader->OptionalHeader.SectionAlignment - 1)) - 1);
  2798. InitStart = (PVOID)ROUND_TO_PAGES (InitStart);
  2799. //
  2800. // Check if more sections are discardable after this one so
  2801. // even small INIT sections can be discarded.
  2802. //
  2803. if (i == 1) {
  2804. LastDiscard = SectionTableEntry;
  2805. }
  2806. else {
  2807. LastDiscard = NULL;
  2808. do {
  2809. i -= 1;
  2810. SectionTableEntry += 1;
  2811. if ((SectionTableEntry->Characteristics &
  2812. IMAGE_SCN_MEM_DISCARDABLE) != 0) {
  2813. //
  2814. // Discard this too.
  2815. //
  2816. LastDiscard = SectionTableEntry;
  2817. }
  2818. else {
  2819. break;
  2820. }
  2821. } while (i > 1);
  2822. }
  2823. if (LastDiscard) {
  2824. InitEnd = (PVOID)(((PCHAR)CurrentBase +
  2825. LastDiscard->VirtualAddress) +
  2826. (LastDiscard->SizeOfRawData - 1));
  2827. //
  2828. // If this isn't the last section in the driver then the
  2829. // the next section is not discardable. So the last
  2830. // section is not rounded down, but all others must be.
  2831. //
  2832. if (i != 1) {
  2833. InitEnd = (PVOID)((PCHAR)PAGE_ALIGN ((PCHAR)InitEnd +
  2834. (NtHeader->OptionalHeader.SectionAlignment - 1)) - 1);
  2835. }
  2836. }
  2837. if (InitEnd > (PVOID)((PCHAR)CurrentBase +
  2838. LdrDataTableEntry->SizeOfImage)) {
  2839. InitEnd = (PVOID)(((ULONG_PTR)CurrentBase +
  2840. (LdrDataTableEntry->SizeOfImage - 1)) |
  2841. (PAGE_SIZE - 1));
  2842. }
  2843. if (InitStart <= InitEnd) {
  2844. if ((MiFindInitializationCodeAddress >= InitStart) &&
  2845. (MiFindInitializationCodeAddress <= InitEnd)) {
  2846. //
  2847. // This init section is in the kernel, don't free it
  2848. // now as it would free this code!
  2849. //
  2850. ASSERT (*StartVa == NULL);
  2851. *StartVa = InitStart;
  2852. *EndVa = InitEnd;
  2853. }
  2854. else {
  2855. MiFreeInitializationCode (InitStart, InitEnd);
  2856. }
  2857. }
  2858. }
  2859. i -= 1;
  2860. SectionTableEntry += 1;
  2861. }
  2862. Next = Next->Flink;
  2863. }
  2864. ExReleaseResourceLite (&PsLoadedModuleResource);
  2865. KeLeaveCriticalRegionThread (CurrentThread);
  2866. return;
  2867. }
  2868. VOID
  2869. MiFreeInitializationCode (
  2870. IN PVOID StartVa,
  2871. IN PVOID EndVa
  2872. )
  2873. /*++
  2874. Routine Description:
  2875. This function is called to delete the initialization code.
  2876. Arguments:
  2877. StartVa - Supplies the starting address of the range to delete.
  2878. EndVa - Supplies the ending address of the range to delete.
  2879. Return Value:
  2880. None.
  2881. Environment:
  2882. Kernel Mode Only. Runs after system initialization.
  2883. --*/
  2884. {
  2885. PMMPFN Pfn1;
  2886. PMMPTE PointerPte;
  2887. PFN_NUMBER PageFrameIndex;
  2888. PFN_NUMBER PagesFreed;
  2889. KIRQL OldIrql;
  2890. ASSERT(ExPageLockHandle);
  2891. #if defined (_MI_MORE_THAN_4GB_)
  2892. if (MiNoLowMemory != 0) {
  2893. //
  2894. // Don't free this range as the kernel is always below the memory
  2895. // threshold configured for this system.
  2896. //
  2897. return;
  2898. }
  2899. #endif
  2900. PagesFreed = 0;
  2901. MmLockPagableSectionByHandle(ExPageLockHandle);
  2902. if (MI_IS_PHYSICAL_ADDRESS(StartVa)) {
  2903. LOCK_PFN (OldIrql);
  2904. while (StartVa < EndVa) {
  2905. //
  2906. // On certain architectures (e.g., MIPS) virtual addresses
  2907. // may be physical and hence have no corresponding PTE.
  2908. //
  2909. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (StartVa);
  2910. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  2911. Pfn1->u2.ShareCount = 0;
  2912. Pfn1->u3.e2.ReferenceCount = 0;
  2913. MI_SET_PFN_DELETED (Pfn1);
  2914. MiInsertPageInFreeList (PageFrameIndex);
  2915. StartVa = (PVOID)((PUCHAR)StartVa + PAGE_SIZE);
  2916. PagesFreed += 1;
  2917. }
  2918. UNLOCK_PFN (OldIrql);
  2919. }
  2920. else {
  2921. PointerPte = MiGetPteAddress (StartVa);
  2922. PagesFreed = MiDeleteSystemPagableVm (PointerPte,
  2923. (PFN_NUMBER) (1 + MiGetPteAddress (EndVa) -
  2924. PointerPte),
  2925. ZeroKernelPte,
  2926. FALSE,
  2927. NULL);
  2928. }
  2929. MmUnlockPagableImageSection(ExPageLockHandle);
  2930. //
  2931. // Since systemwide commitment was determined early in Phase 0 and
  2932. // excluded the ranges just freed, add them back in now.
  2933. //
  2934. if (PagesFreed != 0) {
  2935. //
  2936. // Since systemwide commitment was determined early in Phase 0
  2937. // and excluded the ranges just freed, increase the limits
  2938. // accordingly now. Note that there is no commitment to be
  2939. // returned (as none was ever charged earlier) for boot
  2940. // loaded drivers.
  2941. //
  2942. InterlockedExchangeAddSizeT (&MmTotalCommitLimitMaximum, PagesFreed);
  2943. InterlockedExchangeAddSizeT (&MmTotalCommitLimit, PagesFreed);
  2944. }
  2945. return;
  2946. }
  2947. VOID
  2948. MiEnablePagingTheExecutive (
  2949. VOID
  2950. )
  2951. /*++
  2952. Routine Description:
  2953. This function locates the start and end of the kernel initialization
  2954. code. This code resides in the "init" section of the kernel image.
  2955. Arguments:
  2956. StartVa - Returns the starting address of the init section.
  2957. EndVa - Returns the ending address of the init section.
  2958. Return Value:
  2959. None.
  2960. Environment:
  2961. Kernel Mode Only. End of system initialization.
  2962. --*/
  2963. {
  2964. KIRQL OldIrql;
  2965. KIRQL OldIrqlWs;
  2966. PVOID StartVa;
  2967. PETHREAD CurrentThread;
  2968. PLONG SectionLockCountPointer;
  2969. PKLDR_DATA_TABLE_ENTRY LdrDataTableEntry;
  2970. PVOID CurrentBase;
  2971. PLIST_ENTRY Next;
  2972. PIMAGE_NT_HEADERS NtHeader;
  2973. PIMAGE_SECTION_HEADER StartSectionTableEntry;
  2974. PIMAGE_SECTION_HEADER SectionTableEntry;
  2975. LONG i;
  2976. PMMPTE PointerPte;
  2977. PMMPTE LastPte;
  2978. PMMPTE SubsectionStartPte;
  2979. PMMPTE SubsectionLastPte;
  2980. LOGICAL PageSection;
  2981. PVOID SectionBaseAddress;
  2982. LOGICAL AlreadyLockedOnce;
  2983. ULONG Waited;
  2984. PEPROCESS CurrentProcess;
  2985. //
  2986. // Don't page kernel mode code if customer does not want it paged or if
  2987. // this is a diskless remote boot client.
  2988. //
  2989. if (MmDisablePagingExecutive & MM_SYSTEM_CODE_LOCKED_DOWN) {
  2990. return;
  2991. }
  2992. #if defined(REMOTE_BOOT)
  2993. if (IoRemoteBootClient && IoCscInitializationFailed) {
  2994. return;
  2995. }
  2996. #endif
  2997. //
  2998. // Initializing LastPte is not needed for correctness, but
  2999. // without it the compiler cannot compile this code W4 to check
  3000. // for use of uninitialized variables.
  3001. //
  3002. LastPte = NULL;
  3003. //
  3004. // Walk through the loader blocks looking for the base which
  3005. // contains this routine.
  3006. //
  3007. CurrentThread = PsGetCurrentThread ();
  3008. CurrentProcess = PsGetCurrentProcessByThread (CurrentThread);
  3009. KeEnterCriticalRegionThread (&CurrentThread->Tcb);
  3010. ExAcquireResourceExclusiveLite (&PsLoadedModuleResource, TRUE);
  3011. Next = PsLoadedModuleList.Flink;
  3012. while (Next != &PsLoadedModuleList) {
  3013. LdrDataTableEntry = CONTAINING_RECORD (Next,
  3014. KLDR_DATA_TABLE_ENTRY,
  3015. InLoadOrderLinks);
  3016. if (LdrDataTableEntry->SectionPointer != NULL) {
  3017. //
  3018. // This entry was loaded by MmLoadSystemImage so it's already paged.
  3019. //
  3020. Next = Next->Flink;
  3021. continue;
  3022. }
  3023. CurrentBase = (PVOID)LdrDataTableEntry->DllBase;
  3024. if (MI_IS_PHYSICAL_ADDRESS (CurrentBase)) {
  3025. //
  3026. // Mapped physically, can't be paged.
  3027. //
  3028. Next = Next->Flink;
  3029. continue;
  3030. }
  3031. NtHeader = RtlImageNtHeader (CurrentBase);
  3032. restart:
  3033. StartSectionTableEntry = NULL;
  3034. SectionTableEntry = (PIMAGE_SECTION_HEADER)((PCHAR)NtHeader +
  3035. sizeof(ULONG) +
  3036. sizeof(IMAGE_FILE_HEADER) +
  3037. NtHeader->FileHeader.SizeOfOptionalHeader);
  3038. //
  3039. // From the image header, locate the section named 'PAGE' or '.edata'.
  3040. //
  3041. i = NtHeader->FileHeader.NumberOfSections;
  3042. PointerPte = NULL;
  3043. while (i > 0) {
  3044. SectionBaseAddress = SECTION_BASE_ADDRESS(SectionTableEntry);
  3045. if ((PUCHAR)SectionBaseAddress ==
  3046. ((PUCHAR)CurrentBase + SectionTableEntry->VirtualAddress)) {
  3047. AlreadyLockedOnce = TRUE;
  3048. //
  3049. // This subsection has already been locked down (and possibly
  3050. // unlocked as well) at least once. If it is NOT locked down
  3051. // right now and the pages are not in the system working set
  3052. // then include it in the chunk to be paged.
  3053. //
  3054. SectionLockCountPointer = SECTION_LOCK_COUNT_POINTER (SectionTableEntry);
  3055. if (*SectionLockCountPointer == 0) {
  3056. SubsectionStartPte = MiGetPteAddress ((PVOID)(ROUND_TO_PAGES (
  3057. (ULONG_PTR)CurrentBase +
  3058. SectionTableEntry->VirtualAddress)));
  3059. SubsectionLastPte = MiGetPteAddress ((PVOID)((ULONG_PTR)CurrentBase +
  3060. SectionTableEntry->VirtualAddress +
  3061. (NtHeader->OptionalHeader.SectionAlignment - 1) +
  3062. SectionTableEntry->SizeOfRawData -
  3063. PAGE_SIZE));
  3064. if (SubsectionLastPte >= SubsectionStartPte) {
  3065. AlreadyLockedOnce = FALSE;
  3066. }
  3067. }
  3068. }
  3069. else {
  3070. AlreadyLockedOnce = FALSE;
  3071. }
  3072. PageSection = ((*(PULONG)SectionTableEntry->Name == 'EGAP') ||
  3073. (*(PULONG)SectionTableEntry->Name == 'ade.')) &&
  3074. (AlreadyLockedOnce == FALSE);
  3075. if (*(PULONG)SectionTableEntry->Name == 'EGAP' &&
  3076. SectionTableEntry->Name[4] == 'K' &&
  3077. SectionTableEntry->Name[5] == 'D') {
  3078. //
  3079. // Only pageout PAGEKD if KdPitchDebugger is TRUE.
  3080. //
  3081. PageSection = KdPitchDebugger;
  3082. }
  3083. if ((*(PULONG)SectionTableEntry->Name == 'EGAP') &&
  3084. (SectionTableEntry->Name[4] == 'V') &&
  3085. (SectionTableEntry->Name[5] == 'R') &&
  3086. (SectionTableEntry->Name[6] == 'F')) {
  3087. //
  3088. // Pageout PAGEVRF* if no drivers are being instrumented.
  3089. //
  3090. if (MmVerifyDriverBufferLength != (ULONG)-1) {
  3091. PageSection = FALSE;
  3092. }
  3093. }
  3094. if ((*(PULONG)SectionTableEntry->Name == 'EGAP') &&
  3095. (*(PULONG)&SectionTableEntry->Name[4] == 'CEPS')) {
  3096. //
  3097. // Pageout PAGESPEC special pool code if it's not enabled.
  3098. //
  3099. if (MiSpecialPoolFirstPte != NULL) {
  3100. PageSection = FALSE;
  3101. }
  3102. }
  3103. if (PageSection) {
  3104. //
  3105. // This section is pagable, save away the start and end.
  3106. //
  3107. if (PointerPte == NULL) {
  3108. //
  3109. // Previous section was NOT pagable, get the start address.
  3110. //
  3111. ASSERT (StartSectionTableEntry == NULL);
  3112. StartSectionTableEntry = SectionTableEntry;
  3113. PointerPte = MiGetPteAddress ((PVOID)(ROUND_TO_PAGES (
  3114. (ULONG_PTR)CurrentBase +
  3115. SectionTableEntry->VirtualAddress)));
  3116. }
  3117. LastPte = MiGetPteAddress ((PVOID)((ULONG_PTR)CurrentBase +
  3118. SectionTableEntry->VirtualAddress +
  3119. (NtHeader->OptionalHeader.SectionAlignment - 1) +
  3120. SectionTableEntry->SizeOfRawData -
  3121. PAGE_SIZE));
  3122. }
  3123. else {
  3124. //
  3125. // This section is not pagable, if the previous section was
  3126. // pagable, enable it.
  3127. //
  3128. if (PointerPte != NULL) {
  3129. ASSERT (StartSectionTableEntry != NULL);
  3130. LOCK_SYSTEM_WS (OldIrqlWs, CurrentThread);
  3131. LOCK_PFN (OldIrql);
  3132. StartVa = PAGE_ALIGN (StartSectionTableEntry);
  3133. while (StartVa < (PVOID) SectionTableEntry) {
  3134. Waited = MiMakeSystemAddressValidPfnSystemWs (StartVa);
  3135. if (Waited != 0) {
  3136. //
  3137. // Restart at the top as the locks were released.
  3138. //
  3139. UNLOCK_PFN (OldIrql);
  3140. UNLOCK_SYSTEM_WS (OldIrqlWs);
  3141. goto restart;
  3142. }
  3143. StartVa = (PVOID)((PCHAR)StartVa + PAGE_SIZE);
  3144. }
  3145. //
  3146. // Now that we're holding the proper locks, rewalk all
  3147. // the sections to make sure they weren't locked down
  3148. // after we checked above.
  3149. //
  3150. while (StartSectionTableEntry < SectionTableEntry) {
  3151. SectionBaseAddress = SECTION_BASE_ADDRESS(StartSectionTableEntry);
  3152. SectionLockCountPointer = SECTION_LOCK_COUNT_POINTER (StartSectionTableEntry);
  3153. if (((PUCHAR)SectionBaseAddress ==
  3154. ((PUCHAR)CurrentBase + StartSectionTableEntry->VirtualAddress)) &&
  3155. (*SectionLockCountPointer != 0)) {
  3156. //
  3157. // Restart at the top as the section has been
  3158. // explicitly locked by a driver since we first
  3159. // checked above.
  3160. //
  3161. UNLOCK_PFN (OldIrql);
  3162. UNLOCK_SYSTEM_WS (OldIrqlWs);
  3163. goto restart;
  3164. }
  3165. StartSectionTableEntry += 1;
  3166. }
  3167. MiEnablePagingOfDriverAtInit (PointerPte, LastPte);
  3168. UNLOCK_PFN (OldIrql);
  3169. UNLOCK_SYSTEM_WS (OldIrqlWs);
  3170. PointerPte = NULL;
  3171. StartSectionTableEntry = NULL;
  3172. }
  3173. }
  3174. i -= 1;
  3175. SectionTableEntry += 1;
  3176. }
  3177. if (PointerPte != NULL) {
  3178. ASSERT (StartSectionTableEntry != NULL);
  3179. LOCK_SYSTEM_WS (OldIrqlWs, CurrentThread);
  3180. LOCK_PFN (OldIrql);
  3181. StartVa = PAGE_ALIGN (StartSectionTableEntry);
  3182. while (StartVa < (PVOID) SectionTableEntry) {
  3183. Waited = MiMakeSystemAddressValidPfnSystemWs (StartVa);
  3184. if (Waited != 0) {
  3185. //
  3186. // Restart at the top as the locks were released.
  3187. //
  3188. UNLOCK_PFN (OldIrql);
  3189. UNLOCK_SYSTEM_WS (OldIrqlWs);
  3190. goto restart;
  3191. }
  3192. StartVa = (PVOID)((PCHAR)StartVa + PAGE_SIZE);
  3193. }
  3194. //
  3195. // Now that we're holding the proper locks, rewalk all
  3196. // the sections to make sure they weren't locked down
  3197. // after we checked above.
  3198. //
  3199. while (StartSectionTableEntry < SectionTableEntry) {
  3200. SectionBaseAddress = SECTION_BASE_ADDRESS(StartSectionTableEntry);
  3201. SectionLockCountPointer = SECTION_LOCK_COUNT_POINTER (StartSectionTableEntry);
  3202. if (((PUCHAR)SectionBaseAddress ==
  3203. ((PUCHAR)CurrentBase + StartSectionTableEntry->VirtualAddress)) &&
  3204. (*SectionLockCountPointer != 0)) {
  3205. //
  3206. // Restart at the top as the section has been
  3207. // explicitly locked by a driver since we first
  3208. // checked above.
  3209. //
  3210. UNLOCK_PFN (OldIrql);
  3211. UNLOCK_SYSTEM_WS (OldIrqlWs);
  3212. goto restart;
  3213. }
  3214. StartSectionTableEntry += 1;
  3215. }
  3216. MiEnablePagingOfDriverAtInit (PointerPte, LastPte);
  3217. UNLOCK_PFN (OldIrql);
  3218. UNLOCK_SYSTEM_WS (OldIrqlWs);
  3219. }
  3220. Next = Next->Flink;
  3221. }
  3222. ExReleaseResourceLite (&PsLoadedModuleResource);
  3223. KeLeaveCriticalRegionThread (&CurrentThread->Tcb);
  3224. return;
  3225. }
  3226. VOID
  3227. MiEnablePagingOfDriverAtInit (
  3228. IN PMMPTE PointerPte,
  3229. IN PMMPTE LastPte
  3230. )
  3231. /*++
  3232. Routine Description:
  3233. This routine marks the specified range of PTEs as pagable.
  3234. Arguments:
  3235. PointerPte - Supplies the starting PTE.
  3236. LastPte - Supplies the ending PTE.
  3237. Return Value:
  3238. None.
  3239. Environment:
  3240. Working set mutex AND PFN lock held.
  3241. --*/
  3242. {
  3243. PVOID Base;
  3244. PFN_NUMBER PageFrameIndex;
  3245. PMMPFN Pfn;
  3246. MMPTE TempPte;
  3247. LOGICAL SessionAddress;
  3248. MM_PFN_LOCK_ASSERT();
  3249. Base = MiGetVirtualAddressMappedByPte (PointerPte);
  3250. SessionAddress = MI_IS_SESSION_PTE (PointerPte);
  3251. while (PointerPte <= LastPte) {
  3252. //
  3253. // The PTE must be carefully checked as drivers may call MmPageEntire
  3254. // during their DriverEntry yet faults may occur prior to this routine
  3255. // running which cause pages to already be resident and in the working
  3256. // set at this point. So checks for validity and wsindex must be
  3257. // applied.
  3258. //
  3259. if (PointerPte->u.Hard.Valid == 1) {
  3260. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  3261. Pfn = MI_PFN_ELEMENT (PageFrameIndex);
  3262. ASSERT (Pfn->u2.ShareCount == 1);
  3263. if (Pfn->u1.WsIndex == 0) {
  3264. //
  3265. // Set the working set index to zero. This allows page table
  3266. // pages to be brought back in with the proper WSINDEX.
  3267. //
  3268. MI_ZERO_WSINDEX (Pfn);
  3269. //
  3270. // Original PTE may need to be set for drivers loaded via
  3271. // ntldr.
  3272. //
  3273. if (Pfn->OriginalPte.u.Long == 0) {
  3274. Pfn->OriginalPte.u.Long = MM_KERNEL_DEMAND_ZERO_PTE;
  3275. Pfn->OriginalPte.u.Soft.Protection |= MM_EXECUTE;
  3276. }
  3277. MI_SET_MODIFIED (Pfn, 1, 0x11);
  3278. TempPte = *PointerPte;
  3279. MI_MAKE_VALID_PTE_TRANSITION (TempPte,
  3280. Pfn->OriginalPte.u.Soft.Protection);
  3281. KeFlushSingleTb (Base,
  3282. TRUE,
  3283. TRUE,
  3284. (PHARDWARE_PTE)PointerPte,
  3285. TempPte.u.Flush);
  3286. //
  3287. // Flush the translation buffer and decrement the number of valid
  3288. // PTEs within the containing page table page. Note that for a
  3289. // private page, the page table page is still needed because the
  3290. // page is in transition.
  3291. //
  3292. MiDecrementShareCount (PageFrameIndex);
  3293. MmResidentAvailablePages += 1;
  3294. MmTotalSystemCodePages += 1;
  3295. }
  3296. else {
  3297. //
  3298. // This would need to be taken out of the WSLEs so skip it for
  3299. // now and let the normal paging algorithms remove it if we
  3300. // run into memory pressure.
  3301. //
  3302. }
  3303. }
  3304. Base = (PVOID)((PCHAR)Base + PAGE_SIZE);
  3305. PointerPte += 1;
  3306. }
  3307. if (SessionAddress == TRUE) {
  3308. //
  3309. // Session space has no ASN - flush the entire TB.
  3310. //
  3311. MI_FLUSH_ENTIRE_SESSION_TB (TRUE, TRUE);
  3312. }
  3313. return;
  3314. }
  3315. MM_SYSTEMSIZE
  3316. MmQuerySystemSize(
  3317. VOID
  3318. )
  3319. {
  3320. //
  3321. // 12Mb is small
  3322. // 12-19 is medium
  3323. // > 19 is large
  3324. //
  3325. return MmSystemSize;
  3326. }
  3327. NTKERNELAPI
  3328. BOOLEAN
  3329. MmIsThisAnNtAsSystem(
  3330. VOID
  3331. )
  3332. {
  3333. return (BOOLEAN)MmProductType;
  3334. }
  3335. NTKERNELAPI
  3336. VOID
  3337. FASTCALL
  3338. MmSetPageFaultNotifyRoutine(
  3339. PPAGE_FAULT_NOTIFY_ROUTINE NotifyRoutine
  3340. )
  3341. {
  3342. MmPageFaultNotifyRoutine = NotifyRoutine;
  3343. }
  3344. #ifdef _MI_MESSAGE_SERVER
  3345. LIST_ENTRY MiMessageInfoListHead;
  3346. KSPIN_LOCK MiMessageLock;
  3347. KEVENT MiMessageEvent;
  3348. ULONG MiMessageCount;
  3349. VOID
  3350. MiInitializeMessageQueue (
  3351. VOID
  3352. )
  3353. {
  3354. MiMessageCount = 0;
  3355. InitializeListHead (&MiMessageInfoListHead);
  3356. KeInitializeSpinLock (&MiMessageLock);
  3357. //
  3358. // Use a synchronization event so the event's signal state is
  3359. // auto cleared on a successful wait.
  3360. //
  3361. KeInitializeEvent (&MiMessageEvent, SynchronizationEvent, FALSE);
  3362. }
  3363. LOGICAL
  3364. MiQueueMessage (
  3365. IN PVOID Message
  3366. )
  3367. {
  3368. KIRQL OldIrql;
  3369. LOGICAL Enqueued;
  3370. Enqueued = TRUE;
  3371. ExAcquireSpinLock (&MiMessageLock, &OldIrql);
  3372. if (MiMessageCount <= 500) {
  3373. InsertTailList (&MiMessageInfoListHead, (PLIST_ENTRY)Message);
  3374. MiMessageCount += 1;
  3375. }
  3376. else {
  3377. Enqueued = FALSE;
  3378. }
  3379. ExReleaseSpinLock (&MiMessageLock, OldIrql);
  3380. if (Enqueued == TRUE) {
  3381. KeSetEvent (&MiMessageEvent, 0, FALSE);
  3382. }
  3383. else {
  3384. ExFreePool (Message);
  3385. }
  3386. return Enqueued;
  3387. }
  3388. //
  3389. // sr: free the pool when done.
  3390. //
  3391. PVOID
  3392. MiRemoveMessage (
  3393. VOID
  3394. )
  3395. {
  3396. PVOID Message;
  3397. KIRQL OldIrql;
  3398. NTSTATUS Status;
  3399. Message = NULL;
  3400. //
  3401. // N.B. waiting with a timeout and return so caller can support unload.
  3402. //
  3403. Status = KeWaitForSingleObject (&MiMessageEvent,
  3404. WrVirtualMemory,
  3405. KernelMode,
  3406. FALSE,
  3407. (PLARGE_INTEGER) &MmTwentySeconds);
  3408. if (Status != STATUS_TIMEOUT) {
  3409. ExAcquireSpinLock (&MiMessageLock, &OldIrql);
  3410. if (!IsListEmpty (&MiMessageInfoListHead)) {
  3411. Message = (PVOID)RemoveHeadList(&MiMessageInfoListHead);
  3412. MiMessageCount -= 1;
  3413. if (!IsListEmpty (&MiMessageInfoListHead)) {
  3414. //
  3415. // The list still contains entries so undo the event autoreset.
  3416. //
  3417. KeSetEvent (&MiMessageEvent, 0, FALSE);
  3418. }
  3419. }
  3420. ExReleaseSpinLock (&MiMessageLock, OldIrql);
  3421. }
  3422. return Message;
  3423. }
  3424. #endif
  3425. #define CONSTANT_UNICODE_STRING(s) { sizeof( s ) - sizeof( WCHAR ), sizeof( s ), s }
  3426. LOGICAL
  3427. MiInitializeMemoryEvents (
  3428. VOID
  3429. )
  3430. {
  3431. KIRQL OldIrql;
  3432. NTSTATUS Status;
  3433. UNICODE_STRING LowMem = CONSTANT_UNICODE_STRING(L"\\KernelObjects\\LowMemoryCondition");
  3434. UNICODE_STRING HighMem = CONSTANT_UNICODE_STRING(L"\\KernelObjects\\HighMemoryCondition");
  3435. //
  3436. // The thresholds may be set in the registry, if so, they are interpreted
  3437. // in megabytes so convert them to pages now.
  3438. //
  3439. // If the user modifies the registry to introduce his own values, don't
  3440. // bother error checking them as they can't hurt the system regardless (bad
  3441. // values just may result in events not getting signaled or staying
  3442. // signaled when they shouldn't, but that's not fatal).
  3443. //
  3444. if (MmLowMemoryThreshold != 0) {
  3445. MmLowMemoryThreshold *= ((1024 * 1024) / PAGE_SIZE);
  3446. }
  3447. else {
  3448. //
  3449. // Scale the threshold so on servers the low threshold is
  3450. // approximately 32MB per 4GB, capping it at 64MB.
  3451. //
  3452. MmLowMemoryThreshold = MmPlentyFreePages;
  3453. if (MmNumberOfPhysicalPages > 0x40000) {
  3454. MmLowMemoryThreshold = (32 * 1024 * 1024) / PAGE_SIZE;
  3455. MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x40000) >> 7);
  3456. }
  3457. else if (MmNumberOfPhysicalPages > 0x8000) {
  3458. MmLowMemoryThreshold += ((MmNumberOfPhysicalPages - 0x8000) >> 5);
  3459. }
  3460. if (MmLowMemoryThreshold > (64 * 1024 * 1024) / PAGE_SIZE) {
  3461. MmLowMemoryThreshold = (64 * 1024 * 1024) / PAGE_SIZE;
  3462. }
  3463. }
  3464. if (MmHighMemoryThreshold != 0) {
  3465. MmHighMemoryThreshold *= ((1024 * 1024) / PAGE_SIZE);
  3466. }
  3467. else {
  3468. MmHighMemoryThreshold = 3 * MmLowMemoryThreshold;
  3469. ASSERT (MmHighMemoryThreshold > MmLowMemoryThreshold);
  3470. }
  3471. if (MmHighMemoryThreshold < MmLowMemoryThreshold) {
  3472. MmHighMemoryThreshold = MmLowMemoryThreshold;
  3473. }
  3474. Status = MiCreateMemoryEvent (&LowMem, &MiLowMemoryEvent);
  3475. if (!NT_SUCCESS (Status)) {
  3476. #if DBG
  3477. DbgPrint ("MM: Memory event initialization failed %x\n", Status);
  3478. #endif
  3479. return FALSE;
  3480. }
  3481. Status = MiCreateMemoryEvent (&HighMem, &MiHighMemoryEvent);
  3482. if (!NT_SUCCESS (Status)) {
  3483. #if DBG
  3484. DbgPrint ("MM: Memory event initialization failed %x\n", Status);
  3485. #endif
  3486. return FALSE;
  3487. }
  3488. //
  3489. // Initialize the event values.
  3490. //
  3491. LOCK_PFN (OldIrql);
  3492. MiNotifyMemoryEvents ();
  3493. UNLOCK_PFN (OldIrql);
  3494. return TRUE;
  3495. }
  3496. extern POBJECT_TYPE ExEventObjectType;
  3497. NTSTATUS
  3498. MiCreateMemoryEvent (
  3499. IN PUNICODE_STRING EventName,
  3500. OUT PKEVENT *Event
  3501. )
  3502. {
  3503. PACL Dacl;
  3504. HANDLE EventHandle;
  3505. ULONG DaclLength;
  3506. NTSTATUS Status;
  3507. OBJECT_ATTRIBUTES ObjectAttributes;
  3508. SECURITY_DESCRIPTOR SecurityDescriptor;
  3509. Status = RtlCreateSecurityDescriptor (&SecurityDescriptor,
  3510. SECURITY_DESCRIPTOR_REVISION);
  3511. if (!NT_SUCCESS (Status)) {
  3512. return Status;
  3513. }
  3514. DaclLength = sizeof (ACL) + sizeof (ACCESS_ALLOWED_ACE) * 3 +
  3515. RtlLengthSid (SeLocalSystemSid) +
  3516. RtlLengthSid (SeAliasAdminsSid) +
  3517. RtlLengthSid (SeWorldSid);
  3518. Dacl = ExAllocatePoolWithTag (PagedPool, DaclLength, 'lcaD');
  3519. if (Dacl == NULL) {
  3520. return STATUS_INSUFFICIENT_RESOURCES;
  3521. }
  3522. Status = RtlCreateAcl (Dacl, DaclLength, ACL_REVISION);
  3523. if (!NT_SUCCESS (Status)) {
  3524. ExFreePool (Dacl);
  3525. return Status;
  3526. }
  3527. Status = RtlAddAccessAllowedAce (Dacl,
  3528. ACL_REVISION,
  3529. EVENT_ALL_ACCESS,
  3530. SeAliasAdminsSid);
  3531. if (!NT_SUCCESS (Status)) {
  3532. ExFreePool (Dacl);
  3533. return Status;
  3534. }
  3535. Status = RtlAddAccessAllowedAce (Dacl,
  3536. ACL_REVISION,
  3537. EVENT_ALL_ACCESS,
  3538. SeLocalSystemSid);
  3539. if (!NT_SUCCESS (Status)) {
  3540. ExFreePool (Dacl);
  3541. return Status;
  3542. }
  3543. Status = RtlAddAccessAllowedAce (Dacl,
  3544. ACL_REVISION,
  3545. SYNCHRONIZE|EVENT_QUERY_STATE|READ_CONTROL,
  3546. SeWorldSid);
  3547. if (!NT_SUCCESS (Status)) {
  3548. ExFreePool (Dacl);
  3549. return Status;
  3550. }
  3551. Status = RtlSetDaclSecurityDescriptor (&SecurityDescriptor,
  3552. TRUE,
  3553. Dacl,
  3554. FALSE);
  3555. if (!NT_SUCCESS (Status)) {
  3556. ExFreePool (Dacl);
  3557. return Status;
  3558. }
  3559. InitializeObjectAttributes (&ObjectAttributes,
  3560. EventName,
  3561. OBJ_KERNEL_HANDLE | OBJ_PERMANENT,
  3562. NULL,
  3563. &SecurityDescriptor);
  3564. Status = ZwCreateEvent (&EventHandle,
  3565. EVENT_ALL_ACCESS,
  3566. &ObjectAttributes,
  3567. NotificationEvent,
  3568. FALSE);
  3569. ExFreePool (Dacl);
  3570. if (NT_SUCCESS (Status)) {
  3571. Status = ObReferenceObjectByHandle (EventHandle,
  3572. EVENT_MODIFY_STATE,
  3573. ExEventObjectType,
  3574. KernelMode,
  3575. (PVOID *)Event,
  3576. NULL);
  3577. }
  3578. ZwClose (EventHandle);
  3579. return Status;
  3580. }
  3581. VOID
  3582. MiNotifyMemoryEvents (
  3583. VOID
  3584. )
  3585. // PFN lock is held.
  3586. {
  3587. if (MmAvailablePages <= MmLowMemoryThreshold) {
  3588. if (KeReadStateEvent (MiHighMemoryEvent) != 0) {
  3589. KeClearEvent (MiHighMemoryEvent);
  3590. }
  3591. if (KeReadStateEvent (MiLowMemoryEvent) == 0) {
  3592. KeSetEvent (MiLowMemoryEvent, 0, FALSE);
  3593. }
  3594. }
  3595. else if (MmAvailablePages < MmHighMemoryThreshold) {
  3596. //
  3597. // Gray zone, make sure both events are cleared.
  3598. //
  3599. if (KeReadStateEvent (MiHighMemoryEvent) != 0) {
  3600. KeClearEvent (MiHighMemoryEvent);
  3601. }
  3602. if (KeReadStateEvent (MiLowMemoryEvent) != 0) {
  3603. KeClearEvent (MiLowMemoryEvent);
  3604. }
  3605. }
  3606. else {
  3607. if (KeReadStateEvent (MiHighMemoryEvent) == 0) {
  3608. KeSetEvent (MiHighMemoryEvent, 0, FALSE);
  3609. }
  3610. if (KeReadStateEvent (MiLowMemoryEvent) != 0) {
  3611. KeClearEvent (MiLowMemoryEvent);
  3612. }
  3613. }
  3614. return;
  3615. }
  3616. VOID
  3617. MiInitializeCacheOverrides (
  3618. VOID
  3619. )
  3620. {
  3621. #if defined (_WIN64)
  3622. ULONG NumberOfBytes;
  3623. NTSTATUS Status;
  3624. HAL_PLATFORM_INFORMATION Information;
  3625. //
  3626. // Gather platform information from the HAL.
  3627. //
  3628. Status = HalQuerySystemInformation (HalPlatformInformation,
  3629. sizeof (Information),
  3630. &Information,
  3631. &NumberOfBytes);
  3632. if (!NT_SUCCESS (Status)) {
  3633. return;
  3634. }
  3635. //
  3636. // Apply mapping modifications based on platform information flags.
  3637. //
  3638. // It would be better if the platform returned what the new cachetype
  3639. // should be.
  3640. //
  3641. if (Information.PlatformFlags & HAL_PLATFORM_DISABLE_UC_MAIN_MEMORY) {
  3642. MI_SET_CACHETYPE_TRANSLATION (MmNonCached, 0, MiCached);
  3643. }
  3644. #endif
  3645. return;
  3646. }