Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3102 lines
84 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. sysptes.c
  5. Abstract:
  6. This module contains the routines which reserve and release
  7. system wide PTEs reserved within the non paged portion of the
  8. system space. These PTEs are used for mapping I/O devices
  9. and mapping kernel stacks for threads.
  10. Author:
  11. Lou Perazzoli (loup) 6-Apr-1989
  12. Landy Wang (landyw) 02-June-1997
  13. Revision History:
  14. --*/
  15. #include "mi.h"
  16. VOID
  17. MiFeedSysPtePool (
  18. IN ULONG Index
  19. );
  20. ULONG
  21. MiGetSystemPteListCount (
  22. IN ULONG ListSize
  23. );
  24. VOID
  25. MiPteSListExpansionWorker (
  26. IN PVOID Context
  27. );
  28. #ifdef ALLOC_PRAGMA
  29. #pragma alloc_text(INIT,MiInitializeSystemPtes)
  30. #pragma alloc_text(PAGE,MiPteSListExpansionWorker)
  31. #pragma alloc_text(MISYSPTE,MiReserveAlignedSystemPtes)
  32. #pragma alloc_text(MISYSPTE,MiReserveSystemPtes)
  33. #pragma alloc_text(MISYSPTE,MiFeedSysPtePool)
  34. #pragma alloc_text(MISYSPTE,MiReleaseSystemPtes)
  35. #pragma alloc_text(MISYSPTE,MiGetSystemPteListCount)
  36. #endif
  37. ULONG MmTotalSystemPtes;
  38. ULONG MmTotalFreeSystemPtes[MaximumPtePoolTypes];
  39. PMMPTE MmSystemPtesStart[MaximumPtePoolTypes];
  40. PMMPTE MmSystemPtesEnd[MaximumPtePoolTypes];
  41. ULONG MmPteFailures[MaximumPtePoolTypes];
  42. PMMPTE MiPteStart;
  43. PRTL_BITMAP MiPteStartBitmap;
  44. PRTL_BITMAP MiPteEndBitmap;
  45. extern KSPIN_LOCK MiPteTrackerLock;
  46. ULONG MiSystemPteAllocationFailed;
  47. #if defined(_IA64_)
  48. //
  49. // IA64 has an 8k page size.
  50. //
  51. // Mm cluster MDLs consume 8 pages.
  52. // Small stacks consume 9 pages (including backing store and guard pages).
  53. // Large stacks consume 22 pages (including backing store and guard pages).
  54. //
  55. // PTEs are binned at sizes 1, 2, 4, 8, 9 and 23.
  56. //
  57. #define MM_SYS_PTE_TABLES_MAX 6
  58. //
  59. // Make sure when changing MM_PTE_TABLE_LIMIT that you also increase the
  60. // number of entries in MmSysPteTables.
  61. //
  62. #define MM_PTE_TABLE_LIMIT 23
  63. ULONG MmSysPteIndex[MM_SYS_PTE_TABLES_MAX] = {1,2,4,8,9,MM_PTE_TABLE_LIMIT};
  64. UCHAR MmSysPteTables[MM_PTE_TABLE_LIMIT+1] = {0,0,1,2,2,3,3,3,3,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5};
  65. ULONG MmSysPteMinimumFree [MM_SYS_PTE_TABLES_MAX] = {100,50,30,20,20,20};
  66. #elif defined (_AMD64_)
  67. //
  68. // AMD64 has a 4k page size.
  69. // Small stacks consume 6 pages (including the guard page).
  70. // Large stacks consume 16 pages (including the guard page).
  71. //
  72. // PTEs are binned at sizes 1, 2, 4, 6, 8, and 16.
  73. //
  74. #define MM_SYS_PTE_TABLES_MAX 6
  75. #define MM_PTE_TABLE_LIMIT 16
  76. ULONG MmSysPteIndex[MM_SYS_PTE_TABLES_MAX] = {1,2,4,6,8,MM_PTE_TABLE_LIMIT};
  77. UCHAR MmSysPteTables[MM_PTE_TABLE_LIMIT+1] = {0,0,1,2,2,3,3,4,4,5,5,5,5,5,5,5,5};
  78. ULONG MmSysPteMinimumFree [MM_SYS_PTE_TABLES_MAX] = {100,50,30,100,20,20};
  79. #else
  80. //
  81. // x86 has a 4k page size.
  82. // Small stacks consume 4 pages (including the guard page).
  83. // Large stacks consume 16 pages (including the guard page).
  84. //
  85. // PTEs are binned at sizes 1, 2, 4, 8, and 16.
  86. //
  87. #define MM_SYS_PTE_TABLES_MAX 5
  88. #define MM_PTE_TABLE_LIMIT 16
  89. ULONG MmSysPteIndex[MM_SYS_PTE_TABLES_MAX] = {1,2,4,8,MM_PTE_TABLE_LIMIT};
  90. UCHAR MmSysPteTables[MM_PTE_TABLE_LIMIT+1] = {0,0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4};
  91. ULONG MmSysPteMinimumFree [MM_SYS_PTE_TABLES_MAX] = {100,50,30,20,20};
  92. #endif
  93. KSPIN_LOCK MiSystemPteSListHeadLock;
  94. SLIST_HEADER MiSystemPteSListHead;
  95. #define MM_MIN_SYSPTE_FREE 500
  96. #define MM_MAX_SYSPTE_FREE 3000
  97. ULONG MmSysPteListBySizeCount [MM_SYS_PTE_TABLES_MAX];
  98. //
  99. // Initial sizes for PTE lists.
  100. //
  101. #define MM_PTE_LIST_1 400
  102. #define MM_PTE_LIST_2 100
  103. #define MM_PTE_LIST_4 60
  104. #define MM_PTE_LIST_6 100
  105. #define MM_PTE_LIST_8 50
  106. #define MM_PTE_LIST_9 50
  107. #define MM_PTE_LIST_16 40
  108. #define MM_PTE_LIST_18 40
  109. PVOID MiSystemPteNBHead[MM_SYS_PTE_TABLES_MAX];
  110. LONG MiSystemPteFreeCount[MM_SYS_PTE_TABLES_MAX];
  111. #if defined(_WIN64)
  112. #define MI_MAXIMUM_SLIST_PTE_PAGES 16
  113. #else
  114. #define MI_MAXIMUM_SLIST_PTE_PAGES 8
  115. #endif
  116. typedef struct _MM_PTE_SLIST_EXPANSION_WORK_CONTEXT {
  117. WORK_QUEUE_ITEM WorkItem;
  118. LONG Active;
  119. ULONG SListPages;
  120. } MM_PTE_SLIST_EXPANSION_WORK_CONTEXT, *PMM_PTE_SLIST_EXPANSION_WORK_CONTEXT;
  121. MM_PTE_SLIST_EXPANSION_WORK_CONTEXT MiPteSListExpand;
  122. VOID
  123. MiDumpSystemPtes (
  124. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  125. );
  126. ULONG
  127. MiCountFreeSystemPtes (
  128. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  129. );
  130. PVOID
  131. MiGetHighestPteConsumer (
  132. OUT PULONG_PTR NumberOfPtes
  133. );
  134. VOID
  135. MiCheckPteReserve (
  136. IN PMMPTE StartingPte,
  137. IN ULONG NumberOfPtes
  138. );
  139. VOID
  140. MiCheckPteRelease (
  141. IN PMMPTE StartingPte,
  142. IN ULONG NumberOfPtes
  143. );
  144. extern ULONG MiCacheOverride[4];
  145. #if DBG
  146. extern PFN_NUMBER MiCurrentAdvancedPages;
  147. extern PFN_NUMBER MiAdvancesGiven;
  148. extern PFN_NUMBER MiAdvancesFreed;
  149. #endif
  150. PVOID
  151. MiMapLockedPagesInUserSpace (
  152. IN PMDL MemoryDescriptorList,
  153. IN PVOID StartingVa,
  154. IN MEMORY_CACHING_TYPE CacheType,
  155. IN PVOID BaseVa
  156. );
  157. VOID
  158. MiUnmapLockedPagesInUserSpace (
  159. IN PVOID BaseAddress,
  160. IN PMDL MemoryDescriptorList
  161. );
  162. VOID
  163. MiInsertPteTracker (
  164. IN PMDL MemoryDescriptorList,
  165. IN ULONG Flags,
  166. IN LOGICAL IoMapping,
  167. IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
  168. IN PVOID MyCaller,
  169. IN PVOID MyCallersCaller
  170. );
  171. VOID
  172. MiRemovePteTracker (
  173. IN PMDL MemoryDescriptorList OPTIONAL,
  174. IN PVOID VirtualAddress,
  175. IN PFN_NUMBER NumberOfPtes
  176. );
  177. LOGICAL
  178. MiGetSystemPteAvailability (
  179. IN ULONG NumberOfPtes,
  180. IN MM_PAGE_PRIORITY Priority
  181. );
  182. //
  183. // Define inline functions to pack and unpack pointers in the platform
  184. // specific non-blocking queue pointer structure.
  185. //
  186. typedef struct _PTE_SLIST {
  187. union {
  188. struct {
  189. SINGLE_LIST_ENTRY ListEntry;
  190. } Slist;
  191. NBQUEUE_BLOCK QueueBlock;
  192. } u1;
  193. } PTE_SLIST, *PPTE_SLIST;
  194. #if defined (_AMD64_)
  195. typedef union _PTE_QUEUE_POINTER {
  196. struct {
  197. LONG64 PointerPte : 48;
  198. ULONG64 TimeStamp : 16;
  199. };
  200. LONG64 Data;
  201. } PTE_QUEUE_POINTER, *PPTE_QUEUE_POINTER;
  202. #elif defined(_X86_)
  203. typedef union _PTE_QUEUE_POINTER {
  204. struct {
  205. LONG PointerPte;
  206. LONG TimeStamp;
  207. };
  208. LONG64 Data;
  209. } PTE_QUEUE_POINTER, *PPTE_QUEUE_POINTER;
  210. #elif defined(_IA64_)
  211. typedef union _PTE_QUEUE_POINTER {
  212. struct {
  213. ULONG64 PointerPte : 45;
  214. ULONG64 Region : 3;
  215. ULONG64 TimeStamp : 16;
  216. };
  217. LONG64 Data;
  218. } PTE_QUEUE_POINTER, *PPTE_QUEUE_POINTER;
  219. #else
  220. #error "no target architecture"
  221. #endif
  222. #if defined(_AMD64_)
  223. __inline
  224. VOID
  225. PackPTEValue (
  226. IN PPTE_QUEUE_POINTER Entry,
  227. IN PMMPTE PointerPte,
  228. IN ULONG TimeStamp
  229. )
  230. {
  231. Entry->PointerPte = (LONG64)PointerPte;
  232. Entry->TimeStamp = (LONG64)TimeStamp;
  233. return;
  234. }
  235. __inline
  236. PMMPTE
  237. UnpackPTEPointer (
  238. IN PPTE_QUEUE_POINTER Entry
  239. )
  240. {
  241. return (PMMPTE)(Entry->PointerPte);
  242. }
  243. __inline
  244. ULONG
  245. MiReadTbFlushTimeStamp (
  246. VOID
  247. )
  248. {
  249. return (KeReadTbFlushTimeStamp() & (ULONG)0xFFFF);
  250. }
  251. #elif defined(_X86_)
  252. __inline
  253. VOID
  254. PackPTEValue (
  255. IN PPTE_QUEUE_POINTER Entry,
  256. IN PMMPTE PointerPte,
  257. IN ULONG TimeStamp
  258. )
  259. {
  260. Entry->PointerPte = (LONG)PointerPte;
  261. Entry->TimeStamp = (LONG)TimeStamp;
  262. return;
  263. }
  264. __inline
  265. PMMPTE
  266. UnpackPTEPointer (
  267. IN PPTE_QUEUE_POINTER Entry
  268. )
  269. {
  270. return (PMMPTE)(Entry->PointerPte);
  271. }
  272. __inline
  273. ULONG
  274. MiReadTbFlushTimeStamp (
  275. VOID
  276. )
  277. {
  278. return (KeReadTbFlushTimeStamp());
  279. }
  280. #elif defined(_IA64_)
  281. __inline
  282. VOID
  283. PackPTEValue (
  284. IN PPTE_QUEUE_POINTER Entry,
  285. IN PMMPTE PointerPte,
  286. IN ULONG TimeStamp
  287. )
  288. {
  289. Entry->PointerPte = (ULONG64)PointerPte - PTE_BASE;
  290. Entry->TimeStamp = (ULONG64)TimeStamp;
  291. Entry->Region = (ULONG64)PointerPte >> 61;
  292. return;
  293. }
  294. __inline
  295. PMMPTE
  296. UnpackPTEPointer (
  297. IN PPTE_QUEUE_POINTER Entry
  298. )
  299. {
  300. LONG64 Value;
  301. Value = (ULONG64)Entry->PointerPte + PTE_BASE;
  302. Value |= Entry->Region << 61;
  303. return (PMMPTE)(Value);
  304. }
  305. __inline
  306. ULONG
  307. MiReadTbFlushTimeStamp (
  308. VOID
  309. )
  310. {
  311. return (KeReadTbFlushTimeStamp() & (ULONG)0xFFFF);
  312. }
  313. #else
  314. #error "no target architecture"
  315. #endif
  316. __inline
  317. ULONG
  318. UnpackPTETimeStamp (
  319. IN PPTE_QUEUE_POINTER Entry
  320. )
  321. {
  322. return (ULONG)(Entry->TimeStamp);
  323. }
  324. PMMPTE
  325. MiReserveSystemPtes (
  326. IN ULONG NumberOfPtes,
  327. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  328. )
  329. /*++
  330. Routine Description:
  331. This function locates the specified number of unused PTEs
  332. within the non paged portion of system space.
  333. Arguments:
  334. NumberOfPtes - Supplies the number of PTEs to locate.
  335. SystemPtePoolType - Supplies the PTE type of the pool to expand, one of
  336. SystemPteSpace or NonPagedPoolExpansion.
  337. Return Value:
  338. Returns the address of the first PTE located.
  339. NULL if no system PTEs can be located.
  340. Environment:
  341. Kernel mode, DISPATCH_LEVEL or below.
  342. --*/
  343. {
  344. PMMPTE PointerPte;
  345. ULONG Index;
  346. ULONG TimeStamp;
  347. PTE_QUEUE_POINTER Value;
  348. #if DBG
  349. ULONG j;
  350. PMMPTE PointerFreedPte;
  351. #endif
  352. if (SystemPtePoolType == SystemPteSpace) {
  353. if (NumberOfPtes <= MM_PTE_TABLE_LIMIT) {
  354. Index = MmSysPteTables [NumberOfPtes];
  355. ASSERT (NumberOfPtes <= MmSysPteIndex[Index]);
  356. if (ExRemoveHeadNBQueue (MiSystemPteNBHead[Index], (PULONG64)&Value) == TRUE) {
  357. InterlockedDecrement ((PLONG)&MmSysPteListBySizeCount[Index]);
  358. PointerPte = UnpackPTEPointer (&Value);
  359. TimeStamp = UnpackPTETimeStamp (&Value);
  360. #if DBG
  361. PointerPte->u.List.NextEntry = 0xABCDE;
  362. if (MmDebug & MM_DBG_SYS_PTES) {
  363. PointerFreedPte = PointerPte;
  364. for (j = 0; j < MmSysPteIndex[Index]; j += 1) {
  365. ASSERT (PointerFreedPte->u.Hard.Valid == 0);
  366. PointerFreedPte += 1;
  367. }
  368. }
  369. #endif
  370. ASSERT (PointerPte >= MmSystemPtesStart[SystemPtePoolType]);
  371. ASSERT (PointerPte <= MmSystemPtesEnd[SystemPtePoolType]);
  372. if (MmSysPteListBySizeCount[Index] < MmSysPteMinimumFree[Index]) {
  373. MiFeedSysPtePool (Index);
  374. }
  375. //
  376. // The last thing is to check whether the TB needs flushing.
  377. //
  378. if (TimeStamp == MiReadTbFlushTimeStamp ()) {
  379. KeFlushEntireTb (TRUE, TRUE);
  380. }
  381. if (MmTrackPtes & 0x2) {
  382. MiCheckPteReserve (PointerPte, MmSysPteIndex[Index]);
  383. }
  384. return PointerPte;
  385. }
  386. //
  387. // Fall through and go the long way to satisfy the PTE request.
  388. //
  389. NumberOfPtes = MmSysPteIndex [Index];
  390. }
  391. }
  392. PointerPte = MiReserveAlignedSystemPtes (NumberOfPtes,
  393. SystemPtePoolType,
  394. 0);
  395. if (PointerPte == NULL) {
  396. MiSystemPteAllocationFailed += 1;
  397. }
  398. #if DBG
  399. if (MmDebug & MM_DBG_SYS_PTES) {
  400. if (PointerPte != NULL) {
  401. PointerFreedPte = PointerPte;
  402. for (j = 0; j < NumberOfPtes; j += 1) {
  403. ASSERT (PointerFreedPte->u.Hard.Valid == 0);
  404. PointerFreedPte += 1;
  405. }
  406. }
  407. }
  408. #endif
  409. return PointerPte;
  410. }
  411. VOID
  412. MiFeedSysPtePool (
  413. IN ULONG Index
  414. )
  415. /*++
  416. Routine Description:
  417. This routine adds PTEs to the nonblocking queue lists.
  418. Arguments:
  419. Index - Supplies the index for the nonblocking queue list to fill.
  420. Return Value:
  421. None.
  422. Environment:
  423. Kernel mode, internal to SysPtes.
  424. --*/
  425. {
  426. ULONG i;
  427. ULONG NumberOfPtes;
  428. PMMPTE PointerPte;
  429. if (MmTotalFreeSystemPtes[SystemPteSpace] < MM_MIN_SYSPTE_FREE) {
  430. #if defined (_X86_)
  431. if (MiRecoverExtraPtes () == FALSE) {
  432. MiRecoverSpecialPtes (PTE_PER_PAGE);
  433. }
  434. #endif
  435. return;
  436. }
  437. NumberOfPtes = MmSysPteIndex[Index];
  438. for (i = 0; i < 10 ; i += 1) {
  439. PointerPte = MiReserveAlignedSystemPtes (NumberOfPtes,
  440. SystemPteSpace,
  441. 0);
  442. if (PointerPte == NULL) {
  443. return;
  444. }
  445. MiReleaseSystemPtes (PointerPte, NumberOfPtes, SystemPteSpace);
  446. }
  447. return;
  448. }
  449. PMMPTE
  450. MiReserveAlignedSystemPtes (
  451. IN ULONG NumberOfPtes,
  452. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType,
  453. IN ULONG Alignment
  454. )
  455. /*++
  456. Routine Description:
  457. This function locates the specified number of unused PTEs to locate
  458. within the non paged portion of system space.
  459. Arguments:
  460. NumberOfPtes - Supplies the number of PTEs to locate.
  461. SystemPtePoolType - Supplies the PTE type of the pool to expand, one of
  462. SystemPteSpace or NonPagedPoolExpansion.
  463. Alignment - Supplies the virtual address alignment for the address
  464. the returned PTE maps. For example, if the value is 64K,
  465. the returned PTE will map an address on a 64K boundary.
  466. An alignment of zero means to align on a page boundary.
  467. Return Value:
  468. Returns the address of the first PTE located.
  469. NULL if no system PTEs can be located.
  470. Environment:
  471. Kernel mode, DISPATCH_LEVEL or below.
  472. --*/
  473. {
  474. PMMPTE PointerPte;
  475. PMMPTE PointerFollowingPte;
  476. PMMPTE Previous;
  477. ULONG_PTR SizeInSet;
  478. KIRQL OldIrql;
  479. ULONG MaskSize;
  480. ULONG NumberOfRequiredPtes;
  481. ULONG OffsetSum;
  482. ULONG PtesToObtainAlignment;
  483. PMMPTE NextSetPointer;
  484. ULONG_PTR LeftInSet;
  485. ULONG_PTR PteOffset;
  486. MMPTE_FLUSH_LIST PteFlushList;
  487. PVOID BaseAddress;
  488. ULONG j;
  489. MaskSize = (Alignment - 1) >> (PAGE_SHIFT - PTE_SHIFT);
  490. OffsetSum = (Alignment >> (PAGE_SHIFT - PTE_SHIFT));
  491. #if defined (_X86_)
  492. restart:
  493. #endif
  494. //
  495. // The nonpaged PTE pool uses the invalid PTEs to define the pool
  496. // structure. A global pointer points to the first free set
  497. // in the list, each free set contains the number free and a pointer
  498. // to the next free set. The free sets are kept in an ordered list
  499. // such that the pointer to the next free set is always greater
  500. // than the address of the current free set.
  501. //
  502. // As to not limit the size of this pool, two PTEs are used
  503. // to define a free region. If the region is a single PTE, the
  504. // prototype field within the PTE is set indicating the set
  505. // consists of a single PTE.
  506. //
  507. // The page frame number field is used to define the next set
  508. // and the number free. The two flavors are:
  509. //
  510. // o V
  511. // n l
  512. // e d
  513. // +-----------------------+-+----------+
  514. // | next set |0|0 0|
  515. // +-----------------------+-+----------+
  516. // | number in this set |0|0 0|
  517. // +-----------------------+-+----------+
  518. //
  519. //
  520. // +-----------------------+-+----------+
  521. // | next set |1|0 0|
  522. // +-----------------------+-+----------+
  523. // ...
  524. //
  525. //
  526. // Acquire the system space lock to synchronize access.
  527. //
  528. MiLockSystemSpace (OldIrql);
  529. PointerPte = &MmFirstFreeSystemPte[SystemPtePoolType];
  530. if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
  531. //
  532. // End of list and none found.
  533. //
  534. MiUnlockSystemSpace (OldIrql);
  535. #if defined (_X86_)
  536. if (MiRecoverExtraPtes () == TRUE) {
  537. goto restart;
  538. }
  539. if (MiRecoverSpecialPtes (NumberOfPtes) == TRUE) {
  540. goto restart;
  541. }
  542. #endif
  543. MmPteFailures[SystemPtePoolType] += 1;
  544. return NULL;
  545. }
  546. Previous = PointerPte;
  547. PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
  548. if (Alignment <= PAGE_SIZE) {
  549. //
  550. // Don't deal with alignment issues.
  551. //
  552. while (TRUE) {
  553. if (PointerPte->u.List.OneEntry) {
  554. if (NumberOfPtes == 1) {
  555. goto ExactFit;
  556. }
  557. goto NextEntry;
  558. }
  559. PointerFollowingPte = PointerPte + 1;
  560. SizeInSet = (ULONG_PTR) PointerFollowingPte->u.List.NextEntry;
  561. if (NumberOfPtes < SizeInSet) {
  562. //
  563. // Get the PTEs from this set and reduce the size of the
  564. // set. Note that the size of the current set cannot be 1.
  565. //
  566. if ((SizeInSet - NumberOfPtes) == 1) {
  567. //
  568. // Collapse to the single PTE format.
  569. //
  570. PointerPte->u.List.OneEntry = 1;
  571. }
  572. else {
  573. //
  574. // Get the required PTEs from the end of the set.
  575. //
  576. PointerFollowingPte->u.List.NextEntry = SizeInSet - NumberOfPtes;
  577. }
  578. MmTotalFreeSystemPtes[SystemPtePoolType] -= NumberOfPtes;
  579. #if DBG
  580. if (MmDebug & MM_DBG_SYS_PTES) {
  581. ASSERT (MmTotalFreeSystemPtes[SystemPtePoolType] ==
  582. MiCountFreeSystemPtes (SystemPtePoolType));
  583. }
  584. #endif
  585. //
  586. // Release the lock and flush the TB.
  587. //
  588. MiUnlockSystemSpace (OldIrql);
  589. PointerPte += (SizeInSet - NumberOfPtes);
  590. break;
  591. }
  592. if (NumberOfPtes == SizeInSet) {
  593. ExactFit:
  594. //
  595. // Satisfy the request with this complete set and change
  596. // the list to reflect the fact that this set is gone.
  597. //
  598. Previous->u.List.NextEntry = PointerPte->u.List.NextEntry;
  599. MmTotalFreeSystemPtes[SystemPtePoolType] -= NumberOfPtes;
  600. #if DBG
  601. if (MmDebug & MM_DBG_SYS_PTES) {
  602. ASSERT (MmTotalFreeSystemPtes[SystemPtePoolType] ==
  603. MiCountFreeSystemPtes (SystemPtePoolType));
  604. }
  605. #endif
  606. //
  607. // Release the lock and flush the TB.
  608. //
  609. MiUnlockSystemSpace (OldIrql);
  610. break;
  611. }
  612. NextEntry:
  613. //
  614. // Point to the next set and try again.
  615. //
  616. if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
  617. //
  618. // End of list and none found.
  619. //
  620. MiUnlockSystemSpace (OldIrql);
  621. #if defined (_X86_)
  622. if (MiRecoverExtraPtes () == TRUE) {
  623. goto restart;
  624. }
  625. if (MiRecoverSpecialPtes (NumberOfPtes) == TRUE) {
  626. goto restart;
  627. }
  628. #endif
  629. MmPteFailures[SystemPtePoolType] += 1;
  630. return NULL;
  631. }
  632. Previous = PointerPte;
  633. PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
  634. ASSERT (PointerPte > Previous);
  635. }
  636. }
  637. else {
  638. //
  639. // Deal with the alignment issues.
  640. //
  641. while (TRUE) {
  642. if (PointerPte->u.List.OneEntry) {
  643. //
  644. // Initializing PointerFollowingPte is not needed for
  645. // correctness, but without it the compiler cannot compile
  646. // this code W4 to check for use of uninitialized variables.
  647. //
  648. PointerFollowingPte = NULL;
  649. SizeInSet = 1;
  650. }
  651. else {
  652. PointerFollowingPte = PointerPte + 1;
  653. SizeInSet = (ULONG_PTR) PointerFollowingPte->u.List.NextEntry;
  654. }
  655. PtesToObtainAlignment = (ULONG)
  656. (((OffsetSum - ((ULONG_PTR)PointerPte & MaskSize)) & MaskSize) >>
  657. PTE_SHIFT);
  658. NumberOfRequiredPtes = NumberOfPtes + PtesToObtainAlignment;
  659. if (NumberOfRequiredPtes < SizeInSet) {
  660. //
  661. // Get the PTEs from this set and reduce the size of the
  662. // set. Note that the size of the current set cannot be 1.
  663. //
  664. // This current block will be slit into 2 blocks if
  665. // the PointerPte does not match the alignment.
  666. //
  667. //
  668. // Check to see if the first PTE is on the proper
  669. // alignment, if so, eliminate this block.
  670. //
  671. LeftInSet = SizeInSet - NumberOfRequiredPtes;
  672. //
  673. // Set up the new set at the end of this block.
  674. //
  675. NextSetPointer = PointerPte + NumberOfRequiredPtes;
  676. NextSetPointer->u.List.NextEntry =
  677. PointerPte->u.List.NextEntry;
  678. PteOffset = (ULONG_PTR)(NextSetPointer - MmSystemPteBase);
  679. if (PtesToObtainAlignment == 0) {
  680. Previous->u.List.NextEntry += NumberOfRequiredPtes;
  681. }
  682. else {
  683. //
  684. // Point to the new set at the end of the block
  685. // we are giving away.
  686. //
  687. PointerPte->u.List.NextEntry = PteOffset;
  688. //
  689. // Update the size of the current set.
  690. //
  691. if (PtesToObtainAlignment == 1) {
  692. //
  693. // Collapse to the single PTE format.
  694. //
  695. PointerPte->u.List.OneEntry = 1;
  696. }
  697. else {
  698. //
  699. // Set the set size in the next PTE.
  700. //
  701. PointerFollowingPte->u.List.NextEntry =
  702. PtesToObtainAlignment;
  703. }
  704. }
  705. //
  706. // Set up the new set at the end of the block.
  707. //
  708. if (LeftInSet == 1) {
  709. NextSetPointer->u.List.OneEntry = 1;
  710. }
  711. else {
  712. NextSetPointer->u.List.OneEntry = 0;
  713. NextSetPointer += 1;
  714. NextSetPointer->u.List.NextEntry = LeftInSet;
  715. }
  716. MmTotalFreeSystemPtes[SystemPtePoolType] -= NumberOfPtes;
  717. #if DBG
  718. if (MmDebug & MM_DBG_SYS_PTES) {
  719. ASSERT (MmTotalFreeSystemPtes[SystemPtePoolType] ==
  720. MiCountFreeSystemPtes (SystemPtePoolType));
  721. }
  722. #endif
  723. //
  724. // Release the lock and flush the TB.
  725. //
  726. MiUnlockSystemSpace (OldIrql);
  727. PointerPte += PtesToObtainAlignment;
  728. break;
  729. }
  730. if (NumberOfRequiredPtes == SizeInSet) {
  731. //
  732. // Satisfy the request with this complete set and change
  733. // the list to reflect the fact that this set is gone.
  734. //
  735. if (PtesToObtainAlignment == 0) {
  736. //
  737. // This block exactly satisfies the request.
  738. //
  739. Previous->u.List.NextEntry =
  740. PointerPte->u.List.NextEntry;
  741. }
  742. else {
  743. //
  744. // A portion at the start of this block remains.
  745. //
  746. if (PtesToObtainAlignment == 1) {
  747. //
  748. // Collapse to the single PTE format.
  749. //
  750. PointerPte->u.List.OneEntry = 1;
  751. }
  752. else {
  753. PointerFollowingPte->u.List.NextEntry =
  754. PtesToObtainAlignment;
  755. }
  756. }
  757. MmTotalFreeSystemPtes[SystemPtePoolType] -= NumberOfPtes;
  758. #if DBG
  759. if (MmDebug & MM_DBG_SYS_PTES) {
  760. ASSERT (MmTotalFreeSystemPtes[SystemPtePoolType] ==
  761. MiCountFreeSystemPtes (SystemPtePoolType));
  762. }
  763. #endif
  764. //
  765. // Release the lock and flush the TB.
  766. //
  767. MiUnlockSystemSpace (OldIrql);
  768. PointerPte += PtesToObtainAlignment;
  769. break;
  770. }
  771. //
  772. // Point to the next set and try again.
  773. //
  774. if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
  775. //
  776. // End of list and none found.
  777. //
  778. MiUnlockSystemSpace (OldIrql);
  779. #if defined (_X86_)
  780. if (MiRecoverExtraPtes () == TRUE) {
  781. goto restart;
  782. }
  783. if (MiRecoverSpecialPtes (NumberOfPtes) == TRUE) {
  784. goto restart;
  785. }
  786. #endif
  787. MmPteFailures[SystemPtePoolType] += 1;
  788. return NULL;
  789. }
  790. Previous = PointerPte;
  791. PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
  792. ASSERT (PointerPte > Previous);
  793. }
  794. }
  795. //
  796. // Flush the TB for dynamic mappings.
  797. //
  798. if (SystemPtePoolType == SystemPteSpace) {
  799. PteFlushList.Count = 0;
  800. Previous = PointerPte;
  801. BaseAddress = MiGetVirtualAddressMappedByPte (Previous);
  802. for (j = 0; j < NumberOfPtes; j += 1) {
  803. if (PteFlushList.Count != MM_MAXIMUM_FLUSH_COUNT) {
  804. PteFlushList.FlushVa[PteFlushList.Count] = BaseAddress;
  805. PteFlushList.Count += 1;
  806. }
  807. //
  808. // PTEs being freed better be invalid.
  809. //
  810. ASSERT (Previous->u.Hard.Valid == 0);
  811. *Previous = ZeroKernelPte;
  812. BaseAddress = (PVOID)((PCHAR)BaseAddress + PAGE_SIZE);
  813. Previous += 1;
  814. }
  815. MiFlushPteList (&PteFlushList, TRUE);
  816. if (MmTrackPtes & 0x2) {
  817. MiCheckPteReserve (PointerPte, NumberOfPtes);
  818. }
  819. }
  820. return PointerPte;
  821. }
  822. VOID
  823. MiIssueNoPtesBugcheck (
  824. IN ULONG NumberOfPtes,
  825. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  826. )
  827. /*++
  828. Routine Description:
  829. This function bugchecks when no PTEs are left.
  830. Arguments:
  831. SystemPtePoolType - Supplies the PTE type of the pool that is empty.
  832. NumberOfPtes - Supplies the number of PTEs requested that failed.
  833. Return Value:
  834. None.
  835. Environment:
  836. Kernel mode.
  837. --*/
  838. {
  839. PVOID HighConsumer;
  840. ULONG_PTR HighPteUse;
  841. if (SystemPtePoolType == SystemPteSpace) {
  842. HighConsumer = MiGetHighestPteConsumer (&HighPteUse);
  843. if (HighConsumer != NULL) {
  844. KeBugCheckEx (DRIVER_USED_EXCESSIVE_PTES,
  845. (ULONG_PTR)HighConsumer,
  846. HighPteUse,
  847. MmTotalFreeSystemPtes[SystemPtePoolType],
  848. MmNumberOfSystemPtes);
  849. }
  850. }
  851. KeBugCheckEx (NO_MORE_SYSTEM_PTES,
  852. (ULONG_PTR)SystemPtePoolType,
  853. NumberOfPtes,
  854. MmTotalFreeSystemPtes[SystemPtePoolType],
  855. MmNumberOfSystemPtes);
  856. }
  857. VOID
  858. MiPteSListExpansionWorker (
  859. IN PVOID Context
  860. )
  861. /*++
  862. Routine Description:
  863. This routine is the worker routine to add additional SLISTs for the
  864. system PTE nonblocking queues.
  865. Arguments:
  866. Context - Supplies a pointer to the MM_PTE_SLIST_EXPANSION_WORK_CONTEXT.
  867. Return Value:
  868. None.
  869. Environment:
  870. Kernel mode, PASSIVE_LEVEL.
  871. --*/
  872. {
  873. ULONG i;
  874. ULONG SListEntries;
  875. PPTE_SLIST SListChunks;
  876. PMM_PTE_SLIST_EXPANSION_WORK_CONTEXT Expansion;
  877. ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL);
  878. Expansion = (PMM_PTE_SLIST_EXPANSION_WORK_CONTEXT) Context;
  879. ASSERT (Expansion->Active == 1);
  880. if (Expansion->SListPages < MI_MAXIMUM_SLIST_PTE_PAGES) {
  881. //
  882. // Allocate another page of SLIST entries for the
  883. // nonblocking PTE queues.
  884. //
  885. SListChunks = (PPTE_SLIST) ExAllocatePoolWithTag (NonPagedPool,
  886. PAGE_SIZE,
  887. 'PSmM');
  888. if (SListChunks != NULL) {
  889. //
  890. // Carve up the pages into SLIST entries (with no pool headers).
  891. //
  892. Expansion->SListPages += 1;
  893. SListEntries = PAGE_SIZE / sizeof (PTE_SLIST);
  894. for (i = 0; i < SListEntries; i += 1) {
  895. InterlockedPushEntrySList (&MiSystemPteSListHead,
  896. (PSLIST_ENTRY)SListChunks);
  897. SListChunks += 1;
  898. }
  899. }
  900. }
  901. ASSERT (Expansion->Active == 1);
  902. InterlockedExchange (&Expansion->Active, 0);
  903. }
  904. PVOID
  905. MmMapLockedPagesSpecifyCache (
  906. IN PMDL MemoryDescriptorList,
  907. IN KPROCESSOR_MODE AccessMode,
  908. IN MEMORY_CACHING_TYPE CacheType,
  909. IN PVOID RequestedAddress,
  910. IN ULONG BugCheckOnFailure,
  911. IN MM_PAGE_PRIORITY Priority
  912. )
  913. /*++
  914. Routine Description:
  915. This function maps physical pages described by a memory descriptor
  916. list into the system virtual address space or the user portion of
  917. the virtual address space.
  918. Arguments:
  919. MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
  920. been updated by MmProbeAndLockPages.
  921. AccessMode - Supplies an indicator of where to map the pages;
  922. KernelMode indicates that the pages should be mapped in the
  923. system part of the address space, UserMode indicates the
  924. pages should be mapped in the user part of the address space.
  925. CacheType - Supplies the type of cache mapping to use for the MDL.
  926. MmCached indicates "normal" kernel or user mappings.
  927. RequestedAddress - Supplies the base user address of the view.
  928. This is only treated as an address if the AccessMode
  929. is UserMode. If the initial value of this argument
  930. is not NULL, then the view will be allocated starting
  931. at the specified virtual address rounded down to the
  932. next 64kb address boundary. If the initial value of
  933. this argument is NULL, then the operating system
  934. will determine where to allocate the view.
  935. If the AccessMode is KernelMode, then this argument is
  936. treated as a bit field of attributes.
  937. BugCheckOnFailure - Supplies whether to bugcheck if the mapping cannot be
  938. obtained. This flag is only checked if the MDL's
  939. MDL_MAPPING_CAN_FAIL is zero, which implies that the
  940. default MDL behavior is to bugcheck. This flag then
  941. provides an additional avenue to avoid the bugcheck.
  942. Done this way in order to provide WDM compatibility.
  943. Priority - Supplies an indication as to how important it is that this
  944. request succeed under low available PTE conditions.
  945. Return Value:
  946. Returns the base address where the pages are mapped. The base address
  947. has the same offset as the virtual address in the MDL.
  948. This routine will raise an exception if the processor mode is USER_MODE
  949. and quota limits or VM limits are exceeded.
  950. Environment:
  951. Kernel mode. DISPATCH_LEVEL or below if access mode is KernelMode,
  952. APC_LEVEL or below if access mode is UserMode.
  953. --*/
  954. {
  955. ULONG TimeStamp;
  956. PTE_QUEUE_POINTER Value;
  957. ULONG Index;
  958. KIRQL OldIrql;
  959. CSHORT IoMapping;
  960. PFN_NUMBER NumberOfPages;
  961. PPFN_NUMBER Page;
  962. PPFN_NUMBER LastPage;
  963. PMMPTE PointerPte;
  964. PVOID BaseVa;
  965. MMPTE TempPte;
  966. MMPTE DefaultPte;
  967. PVOID StartingVa;
  968. PVOID CallingAddress;
  969. PVOID CallersCaller;
  970. PFN_NUMBER PageFrameIndex;
  971. PMMPFN Pfn2;
  972. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  973. //
  974. // If this assert fires, the MiPlatformCacheAttributes array
  975. // initialization needs to be checked.
  976. //
  977. ASSERT (MmMaximumCacheType == 6);
  978. StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
  979. MemoryDescriptorList->ByteOffset);
  980. ASSERT (MemoryDescriptorList->ByteCount != 0);
  981. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
  982. if (AccessMode == KernelMode) {
  983. Page = (PPFN_NUMBER) (MemoryDescriptorList + 1);
  984. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingVa,
  985. MemoryDescriptorList->ByteCount);
  986. LastPage = Page + NumberOfPages;
  987. //
  988. // Map the pages into the system part of the address space as
  989. // kernel read/write.
  990. //
  991. ASSERT ((MemoryDescriptorList->MdlFlags & (
  992. MDL_MAPPED_TO_SYSTEM_VA |
  993. MDL_SOURCE_IS_NONPAGED_POOL |
  994. MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
  995. ASSERT ((MemoryDescriptorList->MdlFlags & (
  996. MDL_PAGES_LOCKED |
  997. MDL_PARTIAL)) != 0);
  998. //
  999. // Make sure there are enough PTEs of the requested size.
  1000. // Try to ensure available PTEs inline when we're rich.
  1001. // Otherwise go the long way.
  1002. //
  1003. if ((Priority != HighPagePriority) &&
  1004. ((LONG)(NumberOfPages) > (LONG)MmTotalFreeSystemPtes[SystemPteSpace] - 2048) &&
  1005. (MiGetSystemPteAvailability ((ULONG)NumberOfPages, Priority) == FALSE) &&
  1006. (PsGetCurrentThread()->MemoryMaker == 0)) {
  1007. return NULL;
  1008. }
  1009. IoMapping = MemoryDescriptorList->MdlFlags & MDL_IO_SPACE;
  1010. CacheAttribute = MI_TRANSLATE_CACHETYPE (CacheType, IoMapping);
  1011. //
  1012. // If a noncachable mapping is requested, none of the pages in the
  1013. // requested MDL can reside in a large page. Otherwise we would be
  1014. // creating an incoherent overlapping TB entry as the same physical
  1015. // page would be mapped by 2 different TB entries with different
  1016. // cache attributes.
  1017. //
  1018. if (CacheAttribute != MiCached) {
  1019. LOCK_PFN2 (OldIrql);
  1020. do {
  1021. if (*Page == MM_EMPTY_LIST) {
  1022. break;
  1023. }
  1024. PageFrameIndex = *Page;
  1025. if (MI_PAGE_FRAME_INDEX_MUST_BE_CACHED (PageFrameIndex)) {
  1026. UNLOCK_PFN2 (OldIrql);
  1027. MiNonCachedCollisions += 1;
  1028. if (((MemoryDescriptorList->MdlFlags & MDL_MAPPING_CAN_FAIL) == 0) && (BugCheckOnFailure)) {
  1029. KeBugCheckEx (MEMORY_MANAGEMENT,
  1030. 0x1000,
  1031. (ULONG_PTR)MemoryDescriptorList,
  1032. (ULONG_PTR)PageFrameIndex,
  1033. (ULONG_PTR)CacheAttribute);
  1034. }
  1035. return NULL;
  1036. }
  1037. Page += 1;
  1038. } while (Page < LastPage);
  1039. UNLOCK_PFN2 (OldIrql);
  1040. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  1041. }
  1042. PointerPte = NULL;
  1043. if (NumberOfPages <= MM_PTE_TABLE_LIMIT) {
  1044. Index = MmSysPteTables [NumberOfPages];
  1045. ASSERT (NumberOfPages <= MmSysPteIndex[Index]);
  1046. if (ExRemoveHeadNBQueue (MiSystemPteNBHead[Index], (PULONG64)&Value) == TRUE) {
  1047. InterlockedDecrement ((PLONG)&MmSysPteListBySizeCount[Index]);
  1048. PointerPte = UnpackPTEPointer (&Value);
  1049. ASSERT (PointerPte >= MmSystemPtesStart[SystemPteSpace]);
  1050. ASSERT (PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
  1051. TimeStamp = UnpackPTETimeStamp (&Value);
  1052. #if DBG
  1053. PointerPte->u.List.NextEntry = 0xABCDE;
  1054. if (MmDebug & MM_DBG_SYS_PTES) {
  1055. ULONG j;
  1056. for (j = 0; j < MmSysPteIndex[Index]; j += 1) {
  1057. ASSERT (PointerPte->u.Hard.Valid == 0);
  1058. PointerPte += 1;
  1059. }
  1060. PointerPte -= j;
  1061. }
  1062. #endif
  1063. ASSERT (PointerPte >= MmSystemPtesStart[SystemPteSpace]);
  1064. ASSERT (PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
  1065. if (MmSysPteListBySizeCount[Index] < MmSysPteMinimumFree[Index]) {
  1066. MiFeedSysPtePool (Index);
  1067. }
  1068. //
  1069. // The last thing is to check whether the TB needs flushing.
  1070. //
  1071. if (TimeStamp == MiReadTbFlushTimeStamp ()) {
  1072. KeFlushEntireTb (TRUE, TRUE);
  1073. }
  1074. if (MmTrackPtes & 0x2) {
  1075. MiCheckPteReserve (PointerPte, MmSysPteIndex[Index]);
  1076. }
  1077. }
  1078. else {
  1079. //
  1080. // Fall through and go the long way to satisfy the PTE request.
  1081. //
  1082. }
  1083. }
  1084. if (PointerPte == NULL) {
  1085. PointerPte = MiReserveSystemPtes ((ULONG)NumberOfPages,
  1086. SystemPteSpace);
  1087. if (PointerPte == NULL) {
  1088. if (((MemoryDescriptorList->MdlFlags & MDL_MAPPING_CAN_FAIL) == 0) &&
  1089. (BugCheckOnFailure)) {
  1090. MiIssueNoPtesBugcheck ((ULONG)NumberOfPages, SystemPteSpace);
  1091. }
  1092. //
  1093. // Not enough system PTES are available.
  1094. //
  1095. return NULL;
  1096. }
  1097. }
  1098. BaseVa = (PVOID)((PCHAR)MiGetVirtualAddressMappedByPte (PointerPte) +
  1099. MemoryDescriptorList->ByteOffset);
  1100. TempPte = ValidKernelPte;
  1101. MI_ADD_EXECUTE_TO_VALID_PTE_IF_PAE (TempPte);
  1102. if (CacheAttribute != MiCached) {
  1103. switch (CacheAttribute) {
  1104. case MiNonCached:
  1105. MI_DISABLE_CACHING (TempPte);
  1106. break;
  1107. case MiWriteCombined:
  1108. MI_SET_PTE_WRITE_COMBINE (TempPte);
  1109. break;
  1110. default:
  1111. ASSERT (FALSE);
  1112. break;
  1113. }
  1114. MI_PREPARE_FOR_NONCACHED (CacheAttribute);
  1115. }
  1116. if (IoMapping == 0) {
  1117. OldIrql = HIGH_LEVEL;
  1118. DefaultPte = TempPte;
  1119. do {
  1120. if (*Page == MM_EMPTY_LIST) {
  1121. break;
  1122. }
  1123. ASSERT (PointerPte->u.Hard.Valid == 0);
  1124. Pfn2 = MI_PFN_ELEMENT (*Page);
  1125. ASSERT (Pfn2->u3.e2.ReferenceCount != 0);
  1126. if (CacheAttribute == (MI_PFN_CACHE_ATTRIBUTE)Pfn2->u3.e1.CacheAttribute) {
  1127. TempPte.u.Hard.PageFrameNumber = *Page;
  1128. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1129. }
  1130. else {
  1131. TempPte = ValidKernelPte;
  1132. switch (Pfn2->u3.e1.CacheAttribute) {
  1133. case MiCached:
  1134. //
  1135. // The caller asked for a noncached or
  1136. // writecombined mapping, but the page is
  1137. // already mapped cached by someone else.
  1138. // Override the caller's request in order
  1139. // to keep the TB page attribute coherent.
  1140. //
  1141. MiCacheOverride[0] += 1;
  1142. break;
  1143. case MiNonCached:
  1144. //
  1145. // The caller asked for a cached or
  1146. // writecombined mapping, but the page is
  1147. // already mapped noncached by someone else.
  1148. // Override the caller's request in order to
  1149. // keep the TB page attribute coherent.
  1150. //
  1151. MiCacheOverride[1] += 1;
  1152. MI_DISABLE_CACHING (TempPte);
  1153. break;
  1154. case MiWriteCombined:
  1155. //
  1156. // The caller asked for a cached or noncached
  1157. // mapping, but the page is already mapped
  1158. // writecombined by someone else. Override the
  1159. // caller's request in order to keep the TB page
  1160. // attribute coherent.
  1161. //
  1162. MiCacheOverride[2] += 1;
  1163. MI_SET_PTE_WRITE_COMBINE (TempPte);
  1164. break;
  1165. case MiNotMapped:
  1166. //
  1167. // This better be for a page allocated with
  1168. // MmAllocatePagesForMdl. Otherwise it might be a
  1169. // page on the freelist which could subsequently be
  1170. // given out with a different attribute !
  1171. //
  1172. ASSERT ((Pfn2->u4.PteFrame == MI_MAGIC_AWE_PTEFRAME) ||
  1173. (Pfn2->PteAddress == (PVOID) (ULONG_PTR)(X64K | 0x1)));
  1174. if (OldIrql == HIGH_LEVEL) {
  1175. LOCK_PFN2 (OldIrql);
  1176. }
  1177. switch (CacheAttribute) {
  1178. case MiCached:
  1179. Pfn2->u3.e1.CacheAttribute = MiCached;
  1180. break;
  1181. case MiNonCached:
  1182. Pfn2->u3.e1.CacheAttribute = MiNonCached;
  1183. MI_DISABLE_CACHING (TempPte);
  1184. break;
  1185. case MiWriteCombined:
  1186. Pfn2->u3.e1.CacheAttribute = MiWriteCombined;
  1187. MI_SET_PTE_WRITE_COMBINE (TempPte);
  1188. break;
  1189. default:
  1190. ASSERT (FALSE);
  1191. break;
  1192. }
  1193. break;
  1194. default:
  1195. ASSERT (FALSE);
  1196. break;
  1197. }
  1198. TempPte.u.Hard.PageFrameNumber = *Page;
  1199. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1200. //
  1201. // We had to override the requested cache type for the
  1202. // current page, so reset the PTE for the next page back
  1203. // to the original entry requested cache type.
  1204. //
  1205. TempPte = DefaultPte;
  1206. }
  1207. Page += 1;
  1208. PointerPte += 1;
  1209. } while (Page < LastPage);
  1210. if (OldIrql != HIGH_LEVEL) {
  1211. UNLOCK_PFN2 (OldIrql);
  1212. }
  1213. }
  1214. else {
  1215. do {
  1216. if (*Page == MM_EMPTY_LIST) {
  1217. break;
  1218. }
  1219. ASSERT (PointerPte->u.Hard.Valid == 0);
  1220. TempPte.u.Hard.PageFrameNumber = *Page;
  1221. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1222. Page += 1;
  1223. PointerPte += 1;
  1224. } while (Page < LastPage);
  1225. }
  1226. MI_SWEEP_CACHE (CacheAttribute, BaseVa, NumberOfPages * PAGE_SIZE);
  1227. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
  1228. MemoryDescriptorList->MappedSystemVa = BaseVa;
  1229. MemoryDescriptorList->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
  1230. if (MmTrackPtes & 0x1) {
  1231. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  1232. MiInsertPteTracker (MemoryDescriptorList,
  1233. 0,
  1234. IoMapping,
  1235. CacheAttribute,
  1236. CallingAddress,
  1237. CallersCaller);
  1238. }
  1239. if ((MemoryDescriptorList->MdlFlags & MDL_PARTIAL) != 0) {
  1240. MemoryDescriptorList->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
  1241. }
  1242. return BaseVa;
  1243. }
  1244. return MiMapLockedPagesInUserSpace (MemoryDescriptorList,
  1245. StartingVa,
  1246. CacheType,
  1247. RequestedAddress);
  1248. }
  1249. VOID
  1250. MmUnmapLockedPages (
  1251. IN PVOID BaseAddress,
  1252. IN PMDL MemoryDescriptorList
  1253. )
  1254. /*++
  1255. Routine Description:
  1256. This routine unmaps locked pages which were previously mapped via
  1257. an MmMapLockedPages call.
  1258. Arguments:
  1259. BaseAddress - Supplies the base address where the pages were previously
  1260. mapped.
  1261. MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
  1262. been updated by MmProbeAndLockPages.
  1263. Return Value:
  1264. None.
  1265. Environment:
  1266. Kernel mode. DISPATCH_LEVEL or below if base address is within
  1267. system space; APC_LEVEL or below if base address is user space.
  1268. Note that in some instances the PFN lock is held by the caller.
  1269. --*/
  1270. {
  1271. PFN_NUMBER NumberOfPages;
  1272. PMMPTE PointerPte;
  1273. PVOID StartingVa;
  1274. PPFN_NUMBER Page;
  1275. ULONG TimeStamp;
  1276. PTE_QUEUE_POINTER Value;
  1277. ULONG Index;
  1278. PFN_NUMBER i;
  1279. ASSERT (MemoryDescriptorList->ByteCount != 0);
  1280. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
  1281. ASSERT (!MI_IS_PHYSICAL_ADDRESS (BaseAddress));
  1282. if (BaseAddress > MM_HIGHEST_USER_ADDRESS) {
  1283. StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
  1284. MemoryDescriptorList->ByteOffset);
  1285. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingVa,
  1286. MemoryDescriptorList->ByteCount);
  1287. ASSERT (NumberOfPages != 0);
  1288. PointerPte = MiGetPteAddress (BaseAddress);
  1289. //
  1290. // Check to make sure the PTE address is within bounds.
  1291. //
  1292. ASSERT (PointerPte >= MmSystemPtesStart[SystemPteSpace]);
  1293. ASSERT (PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
  1294. ASSERT (MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
  1295. #if DBG
  1296. i = NumberOfPages;
  1297. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  1298. while (i != 0) {
  1299. ASSERT (PointerPte->u.Hard.Valid == 1);
  1300. ASSERT (*Page == MI_GET_PAGE_FRAME_FROM_PTE (PointerPte));
  1301. if ((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0) {
  1302. PMMPFN Pfn3;
  1303. Pfn3 = MI_PFN_ELEMENT (*Page);
  1304. ASSERT (Pfn3->u3.e2.ReferenceCount != 0);
  1305. }
  1306. Page += 1;
  1307. PointerPte += 1;
  1308. i -= 1;
  1309. }
  1310. PointerPte -= NumberOfPages;
  1311. #endif
  1312. if (MemoryDescriptorList->MdlFlags & MDL_FREE_EXTRA_PTES) {
  1313. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  1314. Page += NumberOfPages;
  1315. ASSERT (*Page <= MiCurrentAdvancedPages);
  1316. NumberOfPages += *Page;
  1317. PointerPte -= *Page;
  1318. ASSERT (PointerPte >= MmSystemPtesStart[SystemPteSpace]);
  1319. ASSERT (PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
  1320. BaseAddress = (PVOID)((PCHAR)BaseAddress - ((*Page) << PAGE_SHIFT));
  1321. #if DBG
  1322. InterlockedExchangeAddSizeT (&MiCurrentAdvancedPages, 0 - *Page);
  1323. MiAdvancesFreed += *Page;
  1324. #endif
  1325. }
  1326. if (MmTrackPtes != 0) {
  1327. if (MmTrackPtes & 0x1) {
  1328. MiRemovePteTracker (MemoryDescriptorList,
  1329. BaseAddress,
  1330. NumberOfPages);
  1331. }
  1332. if (MmTrackPtes & 0x2) {
  1333. MiCheckPteRelease (PointerPte, (ULONG) NumberOfPages);
  1334. }
  1335. }
  1336. MemoryDescriptorList->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
  1337. MDL_PARTIAL_HAS_BEEN_MAPPED |
  1338. MDL_FREE_EXTRA_PTES);
  1339. //
  1340. // If it's a small request (most are), try to finish it inline.
  1341. //
  1342. if (NumberOfPages <= MM_PTE_TABLE_LIMIT) {
  1343. Index = MmSysPteTables [NumberOfPages];
  1344. ASSERT (NumberOfPages <= MmSysPteIndex [Index]);
  1345. if (MmTotalFreeSystemPtes[SystemPteSpace] >= MM_MIN_SYSPTE_FREE) {
  1346. //
  1347. // Add to the pool if the size is less than 15 + the minimum.
  1348. //
  1349. i = MmSysPteMinimumFree[Index];
  1350. if (MmTotalFreeSystemPtes[SystemPteSpace] >= MM_MAX_SYSPTE_FREE) {
  1351. //
  1352. // Lots of free PTEs, quadruple the limit.
  1353. //
  1354. i = i * 4;
  1355. }
  1356. i += 15;
  1357. if (MmSysPteListBySizeCount[Index] <= i) {
  1358. //
  1359. // Zero PTEs, then encode the PTE pointer and the TB flush
  1360. // counter into Value.
  1361. //
  1362. MiZeroMemoryPte (PointerPte, NumberOfPages);
  1363. TimeStamp = KeReadTbFlushTimeStamp();
  1364. PackPTEValue (&Value, PointerPte, TimeStamp);
  1365. if (ExInsertTailNBQueue (MiSystemPteNBHead[Index], Value.Data) == TRUE) {
  1366. InterlockedIncrement ((PLONG)&MmSysPteListBySizeCount[Index]);
  1367. return;
  1368. }
  1369. }
  1370. }
  1371. }
  1372. if (MmTrackPtes & 0x2) {
  1373. //
  1374. // This release has already been updated in the tracking bitmaps
  1375. // so mark it so that MiReleaseSystemPtes doesn't attempt to do
  1376. // it also.
  1377. //
  1378. PointerPte = (PMMPTE) ((ULONG_PTR)PointerPte | 0x1);
  1379. }
  1380. MiReleaseSystemPtes (PointerPte, (ULONG)NumberOfPages, SystemPteSpace);
  1381. }
  1382. else {
  1383. MiUnmapLockedPagesInUserSpace (BaseAddress, MemoryDescriptorList);
  1384. }
  1385. return;
  1386. }
  1387. VOID
  1388. MiReleaseSystemPtes (
  1389. IN PMMPTE StartingPte,
  1390. IN ULONG NumberOfPtes,
  1391. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  1392. )
  1393. /*++
  1394. Routine Description:
  1395. This function releases the specified number of PTEs
  1396. within the non paged portion of system space.
  1397. Note that the PTEs must be invalid and the page frame number
  1398. must have been set to zero.
  1399. Arguments:
  1400. StartingPte - Supplies the address of the first PTE to release.
  1401. NumberOfPtes - Supplies the number of PTEs to release.
  1402. SystemPtePoolType - Supplies the PTE type of the pool to release PTEs to,
  1403. one of SystemPteSpace or NonPagedPoolExpansion.
  1404. Return Value:
  1405. None.
  1406. Environment:
  1407. Kernel mode.
  1408. --*/
  1409. {
  1410. ULONG_PTR Size;
  1411. ULONG i;
  1412. ULONG_PTR PteOffset;
  1413. PMMPTE PointerPte;
  1414. PMMPTE PointerFollowingPte;
  1415. PMMPTE NextPte;
  1416. KIRQL OldIrql;
  1417. ULONG Index;
  1418. ULONG TimeStamp;
  1419. PTE_QUEUE_POINTER Value;
  1420. ULONG ExtensionInProgress;
  1421. if ((MmTrackPtes & 0x2) && (SystemPtePoolType == SystemPteSpace)) {
  1422. //
  1423. // If the low bit is set, this range was never reserved and therefore
  1424. // should not be validated during the release.
  1425. //
  1426. if ((ULONG_PTR)StartingPte & 0x1) {
  1427. StartingPte = (PMMPTE) ((ULONG_PTR)StartingPte & ~0x1);
  1428. }
  1429. else {
  1430. MiCheckPteRelease (StartingPte, NumberOfPtes);
  1431. }
  1432. }
  1433. //
  1434. // Check to make sure the PTE address is within bounds.
  1435. //
  1436. ASSERT (NumberOfPtes != 0);
  1437. ASSERT (StartingPte >= MmSystemPtesStart[SystemPtePoolType]);
  1438. ASSERT (StartingPte <= MmSystemPtesEnd[SystemPtePoolType]);
  1439. //
  1440. // Zero PTEs.
  1441. //
  1442. MiZeroMemoryPte (StartingPte, NumberOfPtes);
  1443. if ((SystemPtePoolType == SystemPteSpace) &&
  1444. (NumberOfPtes <= MM_PTE_TABLE_LIMIT)) {
  1445. //
  1446. // Encode the PTE pointer and the TB flush counter into Value.
  1447. //
  1448. TimeStamp = KeReadTbFlushTimeStamp();
  1449. PackPTEValue (&Value, StartingPte, TimeStamp);
  1450. Index = MmSysPteTables [NumberOfPtes];
  1451. ASSERT (NumberOfPtes <= MmSysPteIndex [Index]);
  1452. if (MmTotalFreeSystemPtes[SystemPteSpace] >= MM_MIN_SYSPTE_FREE) {
  1453. //
  1454. // Add to the pool if the size is less than 15 + the minimum.
  1455. //
  1456. i = MmSysPteMinimumFree[Index];
  1457. if (MmTotalFreeSystemPtes[SystemPteSpace] >= MM_MAX_SYSPTE_FREE) {
  1458. //
  1459. // Lots of free PTEs, quadruple the limit.
  1460. //
  1461. i = i * 4;
  1462. }
  1463. i += 15;
  1464. if (MmSysPteListBySizeCount[Index] <= i) {
  1465. if (ExInsertTailNBQueue (MiSystemPteNBHead[Index], Value.Data) == TRUE) {
  1466. InterlockedIncrement ((PLONG)&MmSysPteListBySizeCount[Index]);
  1467. return;
  1468. }
  1469. //
  1470. // No lookasides are left for inserting this PTE allocation
  1471. // into the nonblocking queues. Queue an extension to a
  1472. // worker thread so it can be done in a deadlock-free
  1473. // manner.
  1474. //
  1475. if (MiPteSListExpand.SListPages < MI_MAXIMUM_SLIST_PTE_PAGES) {
  1476. //
  1477. // If an extension is not in progress then queue one now.
  1478. //
  1479. ExtensionInProgress = InterlockedCompareExchange (&MiPteSListExpand.Active, 1, 0);
  1480. if (ExtensionInProgress == 0) {
  1481. ExInitializeWorkItem (&MiPteSListExpand.WorkItem,
  1482. MiPteSListExpansionWorker,
  1483. (PVOID)&MiPteSListExpand);
  1484. ExQueueWorkItem (&MiPteSListExpand.WorkItem, CriticalWorkQueue);
  1485. }
  1486. }
  1487. }
  1488. }
  1489. //
  1490. // The insert failed - our lookaside list must be empty or we are
  1491. // low on PTEs systemwide or we already had plenty on our list and
  1492. // didn't try to insert. Fall through to queue this in the long way.
  1493. //
  1494. NumberOfPtes = MmSysPteIndex [Index];
  1495. }
  1496. //
  1497. // Acquire system space spin lock to synchronize access.
  1498. //
  1499. PteOffset = (ULONG_PTR)(StartingPte - MmSystemPteBase);
  1500. MiLockSystemSpace (OldIrql);
  1501. MmTotalFreeSystemPtes[SystemPtePoolType] += NumberOfPtes;
  1502. PointerPte = &MmFirstFreeSystemPte[SystemPtePoolType];
  1503. while (TRUE) {
  1504. NextPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
  1505. if (PteOffset < PointerPte->u.List.NextEntry) {
  1506. //
  1507. // Insert in the list at this point. The
  1508. // previous one should point to the new freed set and
  1509. // the new freed set should point to the place
  1510. // the previous set points to.
  1511. //
  1512. // Attempt to combine the clusters before we
  1513. // insert.
  1514. //
  1515. // Locate the end of the current structure.
  1516. //
  1517. ASSERT (((StartingPte + NumberOfPtes) <= NextPte) ||
  1518. (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST));
  1519. PointerFollowingPte = PointerPte + 1;
  1520. if (PointerPte->u.List.OneEntry) {
  1521. Size = 1;
  1522. }
  1523. else {
  1524. Size = (ULONG_PTR) PointerFollowingPte->u.List.NextEntry;
  1525. }
  1526. if ((PointerPte + Size) == StartingPte) {
  1527. //
  1528. // We can combine the clusters.
  1529. //
  1530. NumberOfPtes += (ULONG)Size;
  1531. PointerFollowingPte->u.List.NextEntry = NumberOfPtes;
  1532. PointerPte->u.List.OneEntry = 0;
  1533. //
  1534. // Point the starting PTE to the beginning of
  1535. // the new free set and try to combine with the
  1536. // following free cluster.
  1537. //
  1538. StartingPte = PointerPte;
  1539. }
  1540. else {
  1541. //
  1542. // Can't combine with previous. Make this PTE the
  1543. // start of a cluster.
  1544. //
  1545. //
  1546. // Point this cluster to the next cluster.
  1547. //
  1548. StartingPte->u.List.NextEntry = PointerPte->u.List.NextEntry;
  1549. //
  1550. // Point the current cluster to this cluster.
  1551. //
  1552. PointerPte->u.List.NextEntry = PteOffset;
  1553. //
  1554. // Set the size of this cluster.
  1555. //
  1556. if (NumberOfPtes == 1) {
  1557. StartingPte->u.List.OneEntry = 1;
  1558. }
  1559. else {
  1560. StartingPte->u.List.OneEntry = 0;
  1561. PointerFollowingPte = StartingPte + 1;
  1562. PointerFollowingPte->u.List.NextEntry = NumberOfPtes;
  1563. }
  1564. }
  1565. //
  1566. // Attempt to combine the newly created cluster with
  1567. // the following cluster.
  1568. //
  1569. if ((StartingPte + NumberOfPtes) == NextPte) {
  1570. //
  1571. // Combine with following cluster.
  1572. //
  1573. //
  1574. // Set the next cluster to the value contained in the
  1575. // cluster we are merging into this one.
  1576. //
  1577. StartingPte->u.List.NextEntry = NextPte->u.List.NextEntry;
  1578. StartingPte->u.List.OneEntry = 0;
  1579. PointerFollowingPte = StartingPte + 1;
  1580. if (NextPte->u.List.OneEntry) {
  1581. Size = 1;
  1582. }
  1583. else {
  1584. NextPte++;
  1585. Size = (ULONG_PTR) NextPte->u.List.NextEntry;
  1586. }
  1587. PointerFollowingPte->u.List.NextEntry = NumberOfPtes + Size;
  1588. }
  1589. #if 0
  1590. if (MmDebug & MM_DBG_SYS_PTES) {
  1591. MiDumpSystemPtes(SystemPtePoolType);
  1592. }
  1593. #endif
  1594. #if DBG
  1595. if (MmDebug & MM_DBG_SYS_PTES) {
  1596. ASSERT (MmTotalFreeSystemPtes[SystemPtePoolType] ==
  1597. MiCountFreeSystemPtes (SystemPtePoolType));
  1598. }
  1599. #endif
  1600. MiUnlockSystemSpace (OldIrql);
  1601. return;
  1602. }
  1603. //
  1604. // Point to next freed cluster.
  1605. //
  1606. PointerPte = NextPte;
  1607. }
  1608. }
  1609. VOID
  1610. MiReleaseSplitSystemPtes (
  1611. IN PMMPTE StartingPte,
  1612. IN ULONG NumberOfPtes,
  1613. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  1614. )
  1615. /*++
  1616. Routine Description:
  1617. This function releases the specified number of PTEs
  1618. within the non paged portion of system space.
  1619. Note that the PTEs must be invalid and the page frame number
  1620. must have been set to zero.
  1621. This portion is a split portion from a larger allocation so
  1622. careful updating of the tracking bitmaps must be done here.
  1623. Arguments:
  1624. StartingPte - Supplies the address of the first PTE to release.
  1625. NumberOfPtes - Supplies the number of PTEs to release.
  1626. SystemPtePoolType - Supplies the PTE type of the pool to release PTEs to,
  1627. one of SystemPteSpace or NonPagedPoolExpansion.
  1628. Return Value:
  1629. None.
  1630. Environment:
  1631. Kernel mode.
  1632. --*/
  1633. {
  1634. ULONG i;
  1635. ULONG StartBit;
  1636. KIRQL OldIrql;
  1637. PULONG StartBitMapBuffer;
  1638. PULONG EndBitMapBuffer;
  1639. PVOID VirtualAddress;
  1640. //
  1641. // Check to make sure the PTE address is within bounds.
  1642. //
  1643. ASSERT (NumberOfPtes != 0);
  1644. ASSERT (StartingPte >= MmSystemPtesStart[SystemPtePoolType]);
  1645. ASSERT (StartingPte <= MmSystemPtesEnd[SystemPtePoolType]);
  1646. if ((MmTrackPtes & 0x2) && (SystemPtePoolType == SystemPteSpace)) {
  1647. ASSERT (MmTrackPtes & 0x2);
  1648. VirtualAddress = MiGetVirtualAddressMappedByPte (StartingPte);
  1649. StartBit = (ULONG) (StartingPte - MiPteStart);
  1650. ExAcquireSpinLock (&MiPteTrackerLock, &OldIrql);
  1651. //
  1652. // Verify start and size of allocation using the tracking bitmaps.
  1653. //
  1654. StartBitMapBuffer = MiPteStartBitmap->Buffer;
  1655. EndBitMapBuffer = MiPteEndBitmap->Buffer;
  1656. //
  1657. // All the start bits better be set.
  1658. //
  1659. for (i = StartBit; i < StartBit + NumberOfPtes; i += 1) {
  1660. ASSERT (MI_CHECK_BIT (StartBitMapBuffer, i) == 1);
  1661. }
  1662. if (StartBit != 0) {
  1663. if (RtlCheckBit (MiPteStartBitmap, StartBit - 1)) {
  1664. if (!RtlCheckBit (MiPteEndBitmap, StartBit - 1)) {
  1665. //
  1666. // In the middle of an allocation - update the previous
  1667. // so it ends here.
  1668. //
  1669. MI_SET_BIT (EndBitMapBuffer, StartBit - 1);
  1670. }
  1671. else {
  1672. //
  1673. // The range being freed is the start of an allocation.
  1674. //
  1675. }
  1676. }
  1677. }
  1678. //
  1679. // Unconditionally set the end bit (and clear any others) in case the
  1680. // split chunk crosses multiple allocations.
  1681. //
  1682. MI_SET_BIT (EndBitMapBuffer, StartBit + NumberOfPtes - 1);
  1683. ExReleaseSpinLock (&MiPteTrackerLock, OldIrql);
  1684. }
  1685. MiReleaseSystemPtes (StartingPte, NumberOfPtes, SystemPteSpace);
  1686. }
  1687. VOID
  1688. MiInitializeSystemPtes (
  1689. IN PMMPTE StartingPte,
  1690. IN PFN_NUMBER NumberOfPtes,
  1691. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  1692. )
  1693. /*++
  1694. Routine Description:
  1695. This routine initializes the system PTE pool.
  1696. Arguments:
  1697. StartingPte - Supplies the address of the first PTE to put in the pool.
  1698. NumberOfPtes - Supplies the number of PTEs to put in the pool.
  1699. SystemPtePoolType - Supplies the PTE type of the pool to initialize, one of
  1700. SystemPteSpace or NonPagedPoolExpansion.
  1701. Return Value:
  1702. none.
  1703. Environment:
  1704. Kernel mode.
  1705. --*/
  1706. {
  1707. ULONG i;
  1708. ULONG TotalPtes;
  1709. ULONG SListEntries;
  1710. SIZE_T SListBytes;
  1711. ULONG TotalChunks;
  1712. PMMPTE PointerPte;
  1713. PPTE_SLIST Chunk;
  1714. PPTE_SLIST SListChunks;
  1715. //
  1716. // Set the base of the system PTE pool to this PTE. This takes into
  1717. // account that systems may have additional PTE pools below the PTE_BASE.
  1718. //
  1719. ASSERT64 (NumberOfPtes < _4gb);
  1720. MmSystemPteBase = MI_PTE_BASE_FOR_LOWEST_KERNEL_ADDRESS;
  1721. MmSystemPtesStart[SystemPtePoolType] = StartingPte;
  1722. MmSystemPtesEnd[SystemPtePoolType] = StartingPte + NumberOfPtes - 1;
  1723. //
  1724. // If there are no PTEs specified, then make a valid chain by indicating
  1725. // that the list is empty.
  1726. //
  1727. if (NumberOfPtes == 0) {
  1728. MmFirstFreeSystemPte[SystemPtePoolType] = ZeroKernelPte;
  1729. MmFirstFreeSystemPte[SystemPtePoolType].u.List.NextEntry =
  1730. MM_EMPTY_LIST;
  1731. return;
  1732. }
  1733. //
  1734. // Initialize the specified system PTE pool.
  1735. //
  1736. MiZeroMemoryPte (StartingPte, NumberOfPtes);
  1737. //
  1738. // The page frame field points to the next cluster. As we only
  1739. // have one cluster at initialization time, mark it as the last
  1740. // cluster.
  1741. //
  1742. StartingPte->u.List.NextEntry = MM_EMPTY_LIST;
  1743. MmFirstFreeSystemPte[SystemPtePoolType] = ZeroKernelPte;
  1744. MmFirstFreeSystemPte[SystemPtePoolType].u.List.NextEntry =
  1745. StartingPte - MmSystemPteBase;
  1746. //
  1747. // If there is only one PTE in the pool, then mark it as a one entry
  1748. // PTE. Otherwise, store the cluster size in the following PTE.
  1749. //
  1750. if (NumberOfPtes == 1) {
  1751. StartingPte->u.List.OneEntry = TRUE;
  1752. }
  1753. else {
  1754. StartingPte += 1;
  1755. MI_WRITE_INVALID_PTE (StartingPte, ZeroKernelPte);
  1756. StartingPte->u.List.NextEntry = NumberOfPtes;
  1757. }
  1758. //
  1759. // Set the total number of free PTEs for the specified type.
  1760. //
  1761. MmTotalFreeSystemPtes[SystemPtePoolType] = (ULONG) NumberOfPtes;
  1762. ASSERT (MmTotalFreeSystemPtes[SystemPtePoolType] ==
  1763. MiCountFreeSystemPtes (SystemPtePoolType));
  1764. if (SystemPtePoolType == SystemPteSpace) {
  1765. ULONG Lists[MM_SYS_PTE_TABLES_MAX] = {
  1766. #if defined(_IA64_)
  1767. MM_PTE_LIST_1,
  1768. MM_PTE_LIST_2,
  1769. MM_PTE_LIST_4,
  1770. MM_PTE_LIST_8,
  1771. MM_PTE_LIST_9,
  1772. MM_PTE_LIST_18
  1773. #elif defined(_AMD64_)
  1774. MM_PTE_LIST_1,
  1775. MM_PTE_LIST_2,
  1776. MM_PTE_LIST_4,
  1777. MM_PTE_LIST_6,
  1778. MM_PTE_LIST_8,
  1779. MM_PTE_LIST_16
  1780. #else
  1781. MM_PTE_LIST_1,
  1782. MM_PTE_LIST_2,
  1783. MM_PTE_LIST_4,
  1784. MM_PTE_LIST_8,
  1785. MM_PTE_LIST_16
  1786. #endif
  1787. };
  1788. MmTotalSystemPtes = (ULONG) NumberOfPtes;
  1789. TotalPtes = 0;
  1790. TotalChunks = 0;
  1791. KeInitializeSpinLock (&MiSystemPteSListHeadLock);
  1792. InitializeSListHead (&MiSystemPteSListHead);
  1793. for (i = 0; i < MM_SYS_PTE_TABLES_MAX ; i += 1) {
  1794. TotalPtes += (Lists[i] * MmSysPteIndex[i]);
  1795. TotalChunks += Lists[i];
  1796. }
  1797. SListBytes = TotalChunks * sizeof (PTE_SLIST);
  1798. SListBytes = MI_ROUND_TO_SIZE (SListBytes, PAGE_SIZE);
  1799. SListEntries = (ULONG)(SListBytes / sizeof (PTE_SLIST));
  1800. SListChunks = (PPTE_SLIST) ExAllocatePoolWithTag (NonPagedPool,
  1801. SListBytes,
  1802. 'PSmM');
  1803. if (SListChunks == NULL) {
  1804. MiIssueNoPtesBugcheck (TotalPtes, SystemPteSpace);
  1805. }
  1806. ASSERT (MiPteSListExpand.Active == FALSE);
  1807. ASSERT (MiPteSListExpand.SListPages == 0);
  1808. MiPteSListExpand.SListPages = (ULONG)(SListBytes / PAGE_SIZE);
  1809. ASSERT (MiPteSListExpand.SListPages != 0);
  1810. //
  1811. // Carve up the pages into SLIST entries (with no pool headers).
  1812. //
  1813. Chunk = SListChunks;
  1814. for (i = 0; i < SListEntries; i += 1) {
  1815. InterlockedPushEntrySList (&MiSystemPteSListHead,
  1816. (PSLIST_ENTRY)Chunk);
  1817. Chunk += 1;
  1818. }
  1819. //
  1820. // Now that the SLIST is populated, initialize the nonblocking heads.
  1821. //
  1822. for (i = 0; i < MM_SYS_PTE_TABLES_MAX ; i += 1) {
  1823. MiSystemPteNBHead[i] = ExInitializeNBQueueHead (&MiSystemPteSListHead);
  1824. if (MiSystemPteNBHead[i] == NULL) {
  1825. MiIssueNoPtesBugcheck (TotalPtes, SystemPteSpace);
  1826. }
  1827. }
  1828. if (MmTrackPtes & 0x2) {
  1829. //
  1830. // Allocate PTE mapping verification bitmaps.
  1831. //
  1832. ULONG BitmapSize;
  1833. #if defined(_WIN64)
  1834. BitmapSize = (ULONG) MmNumberOfSystemPtes;
  1835. MiPteStart = MmSystemPtesStart[SystemPteSpace];
  1836. #else
  1837. MiPteStart = MiGetPteAddress (MmSystemRangeStart);
  1838. BitmapSize = ((ULONG_PTR)PTE_TOP + 1) - (ULONG_PTR) MiPteStart;
  1839. BitmapSize /= sizeof (MMPTE);
  1840. #endif
  1841. MiCreateBitMap (&MiPteStartBitmap, BitmapSize, NonPagedPool);
  1842. if (MiPteStartBitmap != NULL) {
  1843. MiCreateBitMap (&MiPteEndBitmap, BitmapSize, NonPagedPool);
  1844. if (MiPteEndBitmap == NULL) {
  1845. ExFreePool (MiPteStartBitmap);
  1846. MiPteStartBitmap = NULL;
  1847. }
  1848. }
  1849. if ((MiPteStartBitmap != NULL) && (MiPteEndBitmap != NULL)) {
  1850. RtlClearAllBits (MiPteStartBitmap);
  1851. RtlClearAllBits (MiPteEndBitmap);
  1852. }
  1853. MmTrackPtes &= ~0x2;
  1854. }
  1855. //
  1856. // Initialize the by size lists.
  1857. //
  1858. PointerPte = MiReserveSystemPtes (TotalPtes, SystemPteSpace);
  1859. if (PointerPte == NULL) {
  1860. MiIssueNoPtesBugcheck (TotalPtes, SystemPteSpace);
  1861. }
  1862. i = MM_SYS_PTE_TABLES_MAX;
  1863. do {
  1864. i -= 1;
  1865. do {
  1866. Lists[i] -= 1;
  1867. MiReleaseSystemPtes (PointerPte,
  1868. MmSysPteIndex[i],
  1869. SystemPteSpace);
  1870. PointerPte += MmSysPteIndex[i];
  1871. } while (Lists[i] != 0);
  1872. } while (i != 0);
  1873. //
  1874. // Turn this on after the multiple releases of the binned PTEs (that
  1875. // came from a single reservation) above.
  1876. //
  1877. if (MiPteStartBitmap != NULL) {
  1878. MmTrackPtes |= 0x2;
  1879. }
  1880. }
  1881. return;
  1882. }
  1883. VOID
  1884. MiIncrementSystemPtes (
  1885. IN ULONG NumberOfPtes
  1886. )
  1887. /*++
  1888. Routine Description:
  1889. This routine increments the total number of PTEs. This is done
  1890. separately from actually adding the PTEs to the pool so that
  1891. autoconfiguration can use the high number in advance of the PTEs
  1892. actually getting added.
  1893. Arguments:
  1894. NumberOfPtes - Supplies the number of PTEs to increment the total by.
  1895. Return Value:
  1896. None.
  1897. Environment:
  1898. Kernel mode. Synchronization provided by the caller.
  1899. --*/
  1900. {
  1901. MmTotalSystemPtes += NumberOfPtes;
  1902. }
  1903. VOID
  1904. MiAddSystemPtes (
  1905. IN PMMPTE StartingPte,
  1906. IN ULONG NumberOfPtes,
  1907. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  1908. )
  1909. /*++
  1910. Routine Description:
  1911. This routine adds newly created PTEs to the specified pool.
  1912. Arguments:
  1913. StartingPte - Supplies the address of the first PTE to put in the pool.
  1914. NumberOfPtes - Supplies the number of PTEs to put in the pool.
  1915. SystemPtePoolType - Supplies the PTE type of the pool to expand, one of
  1916. SystemPteSpace or NonPagedPoolExpansion.
  1917. Return Value:
  1918. None.
  1919. Environment:
  1920. Kernel mode.
  1921. --*/
  1922. {
  1923. PMMPTE EndingPte;
  1924. ASSERT (SystemPtePoolType == SystemPteSpace);
  1925. EndingPte = StartingPte + NumberOfPtes - 1;
  1926. if (StartingPte < MmSystemPtesStart[SystemPtePoolType]) {
  1927. MmSystemPtesStart[SystemPtePoolType] = StartingPte;
  1928. }
  1929. if (EndingPte > MmSystemPtesEnd[SystemPtePoolType]) {
  1930. MmSystemPtesEnd[SystemPtePoolType] = EndingPte;
  1931. }
  1932. //
  1933. // Set the low bit to signify this range was never reserved and therefore
  1934. // should not be validated during the release.
  1935. //
  1936. if (MmTrackPtes & 0x2) {
  1937. StartingPte = (PMMPTE) ((ULONG_PTR)StartingPte | 0x1);
  1938. }
  1939. MiReleaseSystemPtes (StartingPte, NumberOfPtes, SystemPtePoolType);
  1940. }
  1941. ULONG
  1942. MiGetSystemPteListCount (
  1943. IN ULONG ListSize
  1944. )
  1945. /*++
  1946. Routine Description:
  1947. This routine returns the number of free entries of the list which
  1948. covers the specified size. The size must be less than or equal to the
  1949. largest list index.
  1950. Arguments:
  1951. ListSize - Supplies the number of PTEs needed.
  1952. Return Value:
  1953. Number of free entries on the list which contains ListSize PTEs.
  1954. Environment:
  1955. Kernel mode.
  1956. --*/
  1957. {
  1958. ULONG Index;
  1959. ASSERT (ListSize <= MM_PTE_TABLE_LIMIT);
  1960. Index = MmSysPteTables [ListSize];
  1961. return MmSysPteListBySizeCount[Index];
  1962. }
  1963. LOGICAL
  1964. MiGetSystemPteAvailability (
  1965. IN ULONG NumberOfPtes,
  1966. IN MM_PAGE_PRIORITY Priority
  1967. )
  1968. /*++
  1969. Routine Description:
  1970. This routine checks how many SystemPteSpace PTEs are available for the
  1971. requested size. If plenty are available then TRUE is returned.
  1972. If we are reaching a low resource situation, then the request is evaluated
  1973. based on the argument priority.
  1974. Arguments:
  1975. NumberOfPtes - Supplies the number of PTEs needed.
  1976. Priority - Supplies the priority of the request.
  1977. Return Value:
  1978. TRUE if the caller should allocate the PTEs, FALSE if not.
  1979. Environment:
  1980. Kernel mode.
  1981. --*/
  1982. {
  1983. ULONG Index;
  1984. ULONG FreePtes;
  1985. ULONG FreeBinnedPtes;
  1986. ASSERT (Priority != HighPagePriority);
  1987. FreePtes = MmTotalFreeSystemPtes[SystemPteSpace];
  1988. if (NumberOfPtes <= MM_PTE_TABLE_LIMIT) {
  1989. Index = MmSysPteTables [NumberOfPtes];
  1990. FreeBinnedPtes = MmSysPteListBySizeCount[Index];
  1991. if (FreeBinnedPtes > MmSysPteMinimumFree[Index]) {
  1992. return TRUE;
  1993. }
  1994. if (FreeBinnedPtes != 0) {
  1995. if (Priority == NormalPagePriority) {
  1996. if (FreeBinnedPtes > 1 || FreePtes > 512) {
  1997. return TRUE;
  1998. }
  1999. #if defined (_X86_)
  2000. if (MiRecoverExtraPtes () == TRUE) {
  2001. return TRUE;
  2002. }
  2003. if (MiRecoverSpecialPtes (NumberOfPtes) == TRUE) {
  2004. return TRUE;
  2005. }
  2006. #endif
  2007. MmPteFailures[SystemPteSpace] += 1;
  2008. return FALSE;
  2009. }
  2010. if (FreePtes > 2048) {
  2011. return TRUE;
  2012. }
  2013. #if defined (_X86_)
  2014. if (MiRecoverExtraPtes () == TRUE) {
  2015. return TRUE;
  2016. }
  2017. if (MiRecoverSpecialPtes (NumberOfPtes) == TRUE) {
  2018. return TRUE;
  2019. }
  2020. #endif
  2021. MmPteFailures[SystemPteSpace] += 1;
  2022. return FALSE;
  2023. }
  2024. }
  2025. if (Priority == NormalPagePriority) {
  2026. if ((LONG)NumberOfPtes < (LONG)FreePtes - 512) {
  2027. return TRUE;
  2028. }
  2029. #if defined (_X86_)
  2030. if (MiRecoverExtraPtes () == TRUE) {
  2031. return TRUE;
  2032. }
  2033. if (MiRecoverSpecialPtes (NumberOfPtes) == TRUE) {
  2034. return TRUE;
  2035. }
  2036. #endif
  2037. MmPteFailures[SystemPteSpace] += 1;
  2038. return FALSE;
  2039. }
  2040. if ((LONG)NumberOfPtes < (LONG)FreePtes - 2048) {
  2041. return TRUE;
  2042. }
  2043. #if defined (_X86_)
  2044. if (MiRecoverExtraPtes () == TRUE) {
  2045. return TRUE;
  2046. }
  2047. if (MiRecoverSpecialPtes (NumberOfPtes) == TRUE) {
  2048. return TRUE;
  2049. }
  2050. #endif
  2051. MmPteFailures[SystemPteSpace] += 1;
  2052. return FALSE;
  2053. }
  2054. VOID
  2055. MiCheckPteReserve (
  2056. IN PMMPTE PointerPte,
  2057. IN ULONG NumberOfPtes
  2058. )
  2059. /*++
  2060. Routine Description:
  2061. This function checks the reserve of the specified number of system
  2062. space PTEs.
  2063. Arguments:
  2064. StartingPte - Supplies the address of the first PTE to reserve.
  2065. NumberOfPtes - Supplies the number of PTEs to reserve.
  2066. Return Value:
  2067. None.
  2068. Environment:
  2069. Kernel mode.
  2070. --*/
  2071. {
  2072. ULONG i;
  2073. KIRQL OldIrql;
  2074. ULONG StartBit;
  2075. PULONG StartBitMapBuffer;
  2076. PULONG EndBitMapBuffer;
  2077. PVOID VirtualAddress;
  2078. ASSERT (MmTrackPtes & 0x2);
  2079. VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte);
  2080. if (NumberOfPtes == 0) {
  2081. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2082. 0x200,
  2083. (ULONG_PTR) VirtualAddress,
  2084. 0,
  2085. 0);
  2086. }
  2087. StartBit = (ULONG) (PointerPte - MiPteStart);
  2088. i = StartBit;
  2089. StartBitMapBuffer = MiPteStartBitmap->Buffer;
  2090. EndBitMapBuffer = MiPteEndBitmap->Buffer;
  2091. ExAcquireSpinLock (&MiPteTrackerLock, &OldIrql);
  2092. for ( ; i < StartBit + NumberOfPtes; i += 1) {
  2093. if (MI_CHECK_BIT (StartBitMapBuffer, i)) {
  2094. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2095. 0x201,
  2096. (ULONG_PTR) VirtualAddress,
  2097. (ULONG_PTR) VirtualAddress + ((i - StartBit) << PAGE_SHIFT),
  2098. NumberOfPtes);
  2099. }
  2100. }
  2101. RtlSetBits (MiPteStartBitmap, StartBit, NumberOfPtes);
  2102. for (i = StartBit; i < StartBit + NumberOfPtes; i += 1) {
  2103. if (MI_CHECK_BIT (EndBitMapBuffer, i)) {
  2104. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2105. 0x202,
  2106. (ULONG_PTR) VirtualAddress,
  2107. (ULONG_PTR) VirtualAddress + ((i - StartBit) << PAGE_SHIFT),
  2108. NumberOfPtes);
  2109. }
  2110. }
  2111. MI_SET_BIT (EndBitMapBuffer, i - 1);
  2112. ExReleaseSpinLock (&MiPteTrackerLock, OldIrql);
  2113. }
  2114. VOID
  2115. MiCheckPteRelease (
  2116. IN PMMPTE StartingPte,
  2117. IN ULONG NumberOfPtes
  2118. )
  2119. /*++
  2120. Routine Description:
  2121. This function checks the release of the specified number of system
  2122. space PTEs.
  2123. Arguments:
  2124. StartingPte - Supplies the address of the first PTE to release.
  2125. NumberOfPtes - Supplies the number of PTEs to release.
  2126. Return Value:
  2127. None.
  2128. Environment:
  2129. Kernel mode.
  2130. --*/
  2131. {
  2132. ULONG i;
  2133. ULONG Index;
  2134. ULONG StartBit;
  2135. KIRQL OldIrql;
  2136. ULONG CalculatedPtes;
  2137. ULONG NumberOfPtesRoundedUp;
  2138. PULONG StartBitMapBuffer;
  2139. PULONG EndBitMapBuffer;
  2140. PVOID VirtualAddress;
  2141. PVOID LowestVirtualAddress;
  2142. PVOID HighestVirtualAddress;
  2143. ASSERT (MmTrackPtes & 0x2);
  2144. VirtualAddress = MiGetVirtualAddressMappedByPte (StartingPte);
  2145. LowestVirtualAddress = MiGetVirtualAddressMappedByPte (MmSystemPtesStart[SystemPteSpace]);
  2146. HighestVirtualAddress = MiGetVirtualAddressMappedByPte (MmSystemPtesEnd[SystemPteSpace]);
  2147. if (NumberOfPtes == 0) {
  2148. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2149. 0x300,
  2150. (ULONG_PTR) VirtualAddress,
  2151. (ULONG_PTR) LowestVirtualAddress,
  2152. (ULONG_PTR) HighestVirtualAddress);
  2153. }
  2154. if (StartingPte < MmSystemPtesStart[SystemPteSpace]) {
  2155. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2156. 0x301,
  2157. (ULONG_PTR) VirtualAddress,
  2158. (ULONG_PTR) LowestVirtualAddress,
  2159. (ULONG_PTR) HighestVirtualAddress);
  2160. }
  2161. if (StartingPte > MmSystemPtesEnd[SystemPteSpace]) {
  2162. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2163. 0x302,
  2164. (ULONG_PTR) VirtualAddress,
  2165. (ULONG_PTR) LowestVirtualAddress,
  2166. (ULONG_PTR) HighestVirtualAddress);
  2167. }
  2168. StartBit = (ULONG) (StartingPte - MiPteStart);
  2169. ExAcquireSpinLock (&MiPteTrackerLock, &OldIrql);
  2170. //
  2171. // Verify start and size of allocation using the tracking bitmaps.
  2172. //
  2173. if (!RtlCheckBit (MiPteStartBitmap, StartBit)) {
  2174. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2175. 0x303,
  2176. (ULONG_PTR) VirtualAddress,
  2177. NumberOfPtes,
  2178. 0);
  2179. }
  2180. if (StartBit != 0) {
  2181. if (RtlCheckBit (MiPteStartBitmap, StartBit - 1)) {
  2182. if (!RtlCheckBit (MiPteEndBitmap, StartBit - 1)) {
  2183. //
  2184. // In the middle of an allocation... bugcheck.
  2185. //
  2186. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2187. 0x304,
  2188. (ULONG_PTR) VirtualAddress,
  2189. NumberOfPtes,
  2190. 0);
  2191. }
  2192. }
  2193. }
  2194. //
  2195. // Find the last allocated PTE to calculate the correct size.
  2196. //
  2197. EndBitMapBuffer = MiPteEndBitmap->Buffer;
  2198. i = StartBit;
  2199. while (!MI_CHECK_BIT (EndBitMapBuffer, i)) {
  2200. i += 1;
  2201. }
  2202. CalculatedPtes = i - StartBit + 1;
  2203. NumberOfPtesRoundedUp = NumberOfPtes;
  2204. if (CalculatedPtes <= MM_PTE_TABLE_LIMIT) {
  2205. Index = MmSysPteTables [NumberOfPtes];
  2206. NumberOfPtesRoundedUp = MmSysPteIndex [Index];
  2207. }
  2208. if (CalculatedPtes != NumberOfPtesRoundedUp) {
  2209. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2210. 0x305,
  2211. (ULONG_PTR) VirtualAddress,
  2212. NumberOfPtes,
  2213. CalculatedPtes);
  2214. }
  2215. StartBitMapBuffer = MiPteStartBitmap->Buffer;
  2216. for (i = StartBit; i < StartBit + CalculatedPtes; i += 1) {
  2217. if (MI_CHECK_BIT (StartBitMapBuffer, i) == 0) {
  2218. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2219. 0x306,
  2220. (ULONG_PTR) VirtualAddress,
  2221. (ULONG_PTR) VirtualAddress + ((i - StartBit) << PAGE_SHIFT),
  2222. CalculatedPtes);
  2223. }
  2224. }
  2225. RtlClearBits (MiPteStartBitmap, StartBit, CalculatedPtes);
  2226. MI_CLEAR_BIT (EndBitMapBuffer, i - 1);
  2227. ExReleaseSpinLock (&MiPteTrackerLock, OldIrql);
  2228. }
  2229. #if DBG
  2230. VOID
  2231. MiDumpSystemPtes (
  2232. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  2233. )
  2234. {
  2235. PMMPTE PointerPte;
  2236. PMMPTE PointerNextPte;
  2237. ULONG_PTR ClusterSize;
  2238. PMMPTE EndOfCluster;
  2239. PointerPte = &MmFirstFreeSystemPte[SystemPtePoolType];
  2240. if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
  2241. return;
  2242. }
  2243. PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
  2244. for (;;) {
  2245. if (PointerPte->u.List.OneEntry) {
  2246. ClusterSize = 1;
  2247. }
  2248. else {
  2249. PointerNextPte = PointerPte + 1;
  2250. ClusterSize = (ULONG_PTR) PointerNextPte->u.List.NextEntry;
  2251. }
  2252. EndOfCluster = PointerPte + (ClusterSize - 1);
  2253. DbgPrint("System Pte at %p for %p entries (%p)\n",
  2254. PointerPte, ClusterSize, EndOfCluster);
  2255. if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
  2256. break;
  2257. }
  2258. PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
  2259. }
  2260. return;
  2261. }
  2262. ULONG
  2263. MiCountFreeSystemPtes (
  2264. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  2265. )
  2266. {
  2267. PMMPTE PointerPte;
  2268. PMMPTE PointerNextPte;
  2269. ULONG_PTR ClusterSize;
  2270. ULONG_PTR FreeCount;
  2271. PointerPte = &MmFirstFreeSystemPte[SystemPtePoolType];
  2272. if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
  2273. return 0;
  2274. }
  2275. FreeCount = 0;
  2276. PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
  2277. for (;;) {
  2278. if (PointerPte->u.List.OneEntry) {
  2279. ClusterSize = 1;
  2280. }
  2281. else {
  2282. PointerNextPte = PointerPte + 1;
  2283. ClusterSize = (ULONG_PTR) PointerNextPte->u.List.NextEntry;
  2284. }
  2285. FreeCount += ClusterSize;
  2286. if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
  2287. break;
  2288. }
  2289. PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
  2290. }
  2291. return (ULONG)FreeCount;
  2292. }
  2293. #endif