Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2415 lines
59 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. sysptes.c
  5. Abstract:
  6. This module contains the routines which reserve and release
  7. system wide PTEs reserved within the non paged portion of the
  8. system space. These PTEs are used for mapping I/O devices
  9. and mapping kernel stacks for threads.
  10. Author:
  11. Lou Perazzoli (loup) 6-Apr-1989
  12. Landy Wang (landyw) 02-June-1997
  13. Revision History:
  14. --*/
  15. #include "mi.h"
  16. VOID
  17. MiFeedSysPtePool (
  18. IN ULONG Index
  19. );
  20. ULONG
  21. MiGetSystemPteListCount (
  22. IN ULONG ListSize
  23. );
  24. VOID
  25. MiPteSListExpansionWorker (
  26. IN PVOID Context
  27. );
  28. #ifdef ALLOC_PRAGMA
  29. #pragma alloc_text(INIT,MiInitializeSystemPtes)
  30. #pragma alloc_text(PAGE,MiPteSListExpansionWorker)
  31. #pragma alloc_text(MISYSPTE,MiReserveAlignedSystemPtes)
  32. #pragma alloc_text(MISYSPTE,MiReserveSystemPtes)
  33. #pragma alloc_text(MISYSPTE,MiFeedSysPtePool)
  34. #pragma alloc_text(MISYSPTE,MiReleaseSystemPtes)
  35. #pragma alloc_text(MISYSPTE,MiGetSystemPteListCount)
  36. #endif
  37. ULONG MmTotalSystemPtes;
  38. ULONG MmTotalFreeSystemPtes[MaximumPtePoolTypes];
  39. PMMPTE MmSystemPtesStart[MaximumPtePoolTypes];
  40. PMMPTE MmSystemPtesEnd[MaximumPtePoolTypes];
  41. ULONG MmPteFailures[MaximumPtePoolTypes];
  42. PMMPTE MiPteStart;
  43. PRTL_BITMAP MiPteStartBitmap;
  44. PRTL_BITMAP MiPteEndBitmap;
  45. extern KSPIN_LOCK MiPteTrackerLock;
  46. ULONG MiSystemPteAllocationFailed;
  47. #if defined(_IA64_)
  48. //
  49. // IA64 has an 8k page size.
  50. //
  51. // Mm cluster MDLs consume 8 pages.
  52. // Small stacks consume 9 pages (including backing store and guard pages).
  53. // Large stacks consume 22 pages (including backing store and guard pages).
  54. //
  55. // PTEs are binned at sizes 1, 2, 4, 8, 9 and 23.
  56. //
  57. #define MM_SYS_PTE_TABLES_MAX 6
  58. //
  59. // Make sure when changing MM_PTE_TABLE_LIMIT that you also increase the
  60. // number of entries in MmSysPteTables.
  61. //
  62. #define MM_PTE_TABLE_LIMIT 23
  63. ULONG MmSysPteIndex[MM_SYS_PTE_TABLES_MAX] = {1,2,4,8,9,MM_PTE_TABLE_LIMIT};
  64. UCHAR MmSysPteTables[MM_PTE_TABLE_LIMIT+1] = {0,0,1,2,2,3,3,3,3,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5};
  65. ULONG MmSysPteMinimumFree [MM_SYS_PTE_TABLES_MAX] = {100,50,30,20,20,20};
  66. #elif defined (_AMD64_)
  67. //
  68. // AMD64 has a 4k page size.
  69. // Small stacks consume 6 pages (including the guard page).
  70. // Large stacks consume 16 pages (including the guard page).
  71. //
  72. // PTEs are binned at sizes 1, 2, 4, 6, 8, and 16.
  73. //
  74. #define MM_SYS_PTE_TABLES_MAX 6
  75. #define MM_PTE_TABLE_LIMIT 16
  76. ULONG MmSysPteIndex[MM_SYS_PTE_TABLES_MAX] = {1,2,4,6,8,MM_PTE_TABLE_LIMIT};
  77. UCHAR MmSysPteTables[MM_PTE_TABLE_LIMIT+1] = {0,0,1,2,2,3,3,4,4,5,5,5,5,5,5,5,5};
  78. ULONG MmSysPteMinimumFree [MM_SYS_PTE_TABLES_MAX] = {100,50,30,100,20,20};
  79. #else
  80. //
  81. // x86 has a 4k page size.
  82. // Small stacks consume 4 pages (including the guard page).
  83. // Large stacks consume 16 pages (including the guard page).
  84. //
  85. // PTEs are binned at sizes 1, 2, 4, 8, and 16.
  86. //
  87. #define MM_SYS_PTE_TABLES_MAX 5
  88. #define MM_PTE_TABLE_LIMIT 16
  89. ULONG MmSysPteIndex[MM_SYS_PTE_TABLES_MAX] = {1,2,4,8,MM_PTE_TABLE_LIMIT};
  90. UCHAR MmSysPteTables[MM_PTE_TABLE_LIMIT+1] = {0,0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4};
  91. ULONG MmSysPteMinimumFree [MM_SYS_PTE_TABLES_MAX] = {100,50,30,20,20};
  92. #endif
  93. KSPIN_LOCK MiSystemPteSListHeadLock;
  94. SLIST_HEADER MiSystemPteSListHead;
  95. #define MM_MIN_SYSPTE_FREE 500
  96. #define MM_MAX_SYSPTE_FREE 3000
  97. ULONG MmSysPteListBySizeCount [MM_SYS_PTE_TABLES_MAX];
  98. //
  99. // Initial sizes for PTE lists.
  100. //
  101. #define MM_PTE_LIST_1 400
  102. #define MM_PTE_LIST_2 100
  103. #define MM_PTE_LIST_4 60
  104. #define MM_PTE_LIST_6 100
  105. #define MM_PTE_LIST_8 50
  106. #define MM_PTE_LIST_9 50
  107. #define MM_PTE_LIST_16 40
  108. #define MM_PTE_LIST_18 40
  109. PVOID MiSystemPteNBHead[MM_SYS_PTE_TABLES_MAX];
  110. LONG MiSystemPteFreeCount[MM_SYS_PTE_TABLES_MAX];
  111. #if defined(_WIN64)
  112. #define MI_MAXIMUM_SLIST_PTE_PAGES 16
  113. #else
  114. #define MI_MAXIMUM_SLIST_PTE_PAGES 8
  115. #endif
  116. typedef struct _MM_PTE_SLIST_EXPANSION_WORK_CONTEXT {
  117. WORK_QUEUE_ITEM WorkItem;
  118. LONG Active;
  119. ULONG SListPages;
  120. } MM_PTE_SLIST_EXPANSION_WORK_CONTEXT, *PMM_PTE_SLIST_EXPANSION_WORK_CONTEXT;
  121. MM_PTE_SLIST_EXPANSION_WORK_CONTEXT MiPteSListExpand;
  122. VOID
  123. MiFeedSysPtePool (
  124. IN ULONG Index
  125. );
  126. VOID
  127. MiDumpSystemPtes (
  128. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  129. );
  130. ULONG
  131. MiCountFreeSystemPtes (
  132. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  133. );
  134. PVOID
  135. MiGetHighestPteConsumer (
  136. OUT PULONG_PTR NumberOfPtes
  137. );
  138. VOID
  139. MiCheckPteReserve (
  140. IN PMMPTE StartingPte,
  141. IN ULONG NumberOfPtes
  142. );
  143. VOID
  144. MiCheckPteRelease (
  145. IN PMMPTE StartingPte,
  146. IN ULONG NumberOfPtes
  147. );
  148. //
  149. // Define inline functions to pack and unpack pointers in the platform
  150. // specific non-blocking queue pointer structure.
  151. //
  152. typedef struct _PTE_SLIST {
  153. union {
  154. struct {
  155. SINGLE_LIST_ENTRY ListEntry;
  156. } Slist;
  157. NBQUEUE_BLOCK QueueBlock;
  158. } u1;
  159. } PTE_SLIST, *PPTE_SLIST;
  160. #if defined (_AMD64_)
  161. typedef union _PTE_QUEUE_POINTER {
  162. struct {
  163. LONG64 PointerPte : 48;
  164. LONG64 TimeStamp : 16;
  165. };
  166. LONG64 Data;
  167. } PTE_QUEUE_POINTER, *PPTE_QUEUE_POINTER;
  168. #elif defined(_X86_)
  169. typedef union _PTE_QUEUE_POINTER {
  170. struct {
  171. LONG PointerPte;
  172. LONG TimeStamp;
  173. };
  174. LONG64 Data;
  175. } PTE_QUEUE_POINTER, *PPTE_QUEUE_POINTER;
  176. #elif defined(_IA64_)
  177. typedef union _PTE_QUEUE_POINTER {
  178. struct {
  179. ULONG64 PointerPte : 45;
  180. ULONG64 Region : 3;
  181. ULONG64 TimeStamp : 16;
  182. };
  183. LONG64 Data;
  184. } PTE_QUEUE_POINTER, *PPTE_QUEUE_POINTER;
  185. #else
  186. #error "no target architecture"
  187. #endif
  188. #if defined(_AMD64_)
  189. __inline
  190. VOID
  191. PackPTEValue (
  192. IN PPTE_QUEUE_POINTER Entry,
  193. IN PMMPTE PointerPte,
  194. IN ULONG TimeStamp
  195. )
  196. {
  197. Entry->PointerPte = (LONG64)PointerPte;
  198. Entry->TimeStamp = (LONG64)TimeStamp;
  199. return;
  200. }
  201. __inline
  202. PMMPTE
  203. UnpackPTEPointer (
  204. IN PPTE_QUEUE_POINTER Entry
  205. )
  206. {
  207. return (PMMPTE)(Entry->PointerPte);
  208. }
  209. __inline
  210. ULONG
  211. MiReadTbFlushTimeStamp (
  212. VOID
  213. )
  214. {
  215. return (KeReadTbFlushTimeStamp() & (ULONG)0xFFFF);
  216. }
  217. #elif defined(_X86_)
  218. __inline
  219. VOID
  220. PackPTEValue (
  221. IN PPTE_QUEUE_POINTER Entry,
  222. IN PMMPTE PointerPte,
  223. IN ULONG TimeStamp
  224. )
  225. {
  226. Entry->PointerPte = (LONG)PointerPte;
  227. Entry->TimeStamp = (LONG)TimeStamp;
  228. return;
  229. }
  230. __inline
  231. PMMPTE
  232. UnpackPTEPointer (
  233. IN PPTE_QUEUE_POINTER Entry
  234. )
  235. {
  236. return (PMMPTE)(Entry->PointerPte);
  237. }
  238. __inline
  239. ULONG
  240. MiReadTbFlushTimeStamp (
  241. VOID
  242. )
  243. {
  244. return (KeReadTbFlushTimeStamp());
  245. }
  246. #elif defined(_IA64_)
  247. __inline
  248. VOID
  249. PackPTEValue (
  250. IN PPTE_QUEUE_POINTER Entry,
  251. IN PMMPTE PointerPte,
  252. IN ULONG TimeStamp
  253. )
  254. {
  255. Entry->PointerPte = (ULONG64)PointerPte - PTE_BASE;
  256. Entry->TimeStamp = (ULONG64)TimeStamp;
  257. Entry->Region = (ULONG64)PointerPte >> 61;
  258. return;
  259. }
  260. __inline
  261. PMMPTE
  262. UnpackPTEPointer (
  263. IN PPTE_QUEUE_POINTER Entry
  264. )
  265. {
  266. LONG64 Value;
  267. Value = (ULONG64)Entry->PointerPte + PTE_BASE;
  268. Value |= Entry->Region << 61;
  269. return (PMMPTE)(Value);
  270. }
  271. __inline
  272. ULONG
  273. MiReadTbFlushTimeStamp (
  274. VOID
  275. )
  276. {
  277. return (KeReadTbFlushTimeStamp() & (ULONG)0xFFFF);
  278. }
  279. #else
  280. #error "no target architecture"
  281. #endif
  282. __inline
  283. ULONG
  284. UnpackPTETimeStamp (
  285. IN PPTE_QUEUE_POINTER Entry
  286. )
  287. {
  288. return (ULONG)(Entry->TimeStamp);
  289. }
  290. PMMPTE
  291. MiReserveSystemPtes (
  292. IN ULONG NumberOfPtes,
  293. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  294. )
  295. /*++
  296. Routine Description:
  297. This function locates the specified number of unused PTEs
  298. within the non paged portion of system space.
  299. Arguments:
  300. NumberOfPtes - Supplies the number of PTEs to locate.
  301. SystemPtePoolType - Supplies the PTE type of the pool to expand, one of
  302. SystemPteSpace or NonPagedPoolExpansion.
  303. Return Value:
  304. Returns the address of the first PTE located.
  305. NULL if no system PTEs can be located.
  306. Environment:
  307. Kernel mode, DISPATCH_LEVEL or below.
  308. --*/
  309. {
  310. PMMPTE PointerPte;
  311. ULONG Index;
  312. ULONG TimeStamp;
  313. PTE_QUEUE_POINTER Value;
  314. #if DBG
  315. ULONG j;
  316. PMMPTE PointerFreedPte;
  317. #endif
  318. if (SystemPtePoolType == SystemPteSpace) {
  319. if (NumberOfPtes <= MM_PTE_TABLE_LIMIT) {
  320. Index = MmSysPteTables [NumberOfPtes];
  321. ASSERT (NumberOfPtes <= MmSysPteIndex[Index]);
  322. if (ExRemoveHeadNBQueue (MiSystemPteNBHead[Index], (PULONG64)&Value) == TRUE) {
  323. InterlockedDecrement ((PLONG)&MmSysPteListBySizeCount[Index]);
  324. PointerPte = UnpackPTEPointer (&Value);
  325. TimeStamp = UnpackPTETimeStamp (&Value);
  326. #if DBG
  327. PointerPte->u.List.NextEntry = 0xABCDE;
  328. if (MmDebug & MM_DBG_SYS_PTES) {
  329. PointerFreedPte = PointerPte;
  330. for (j = 0; j < MmSysPteIndex[Index]; j += 1) {
  331. ASSERT (PointerFreedPte->u.Hard.Valid == 0);
  332. PointerFreedPte += 1;
  333. }
  334. }
  335. #endif
  336. ASSERT (PointerPte >= MmSystemPtesStart[SystemPtePoolType]);
  337. ASSERT (PointerPte <= MmSystemPtesEnd[SystemPtePoolType]);
  338. if (MmSysPteListBySizeCount[Index] < MmSysPteMinimumFree[Index]) {
  339. MiFeedSysPtePool (Index);
  340. }
  341. //
  342. // The last thing is to check whether the TB needs flushing.
  343. //
  344. if (TimeStamp == MiReadTbFlushTimeStamp()) {
  345. KeFlushEntireTb (TRUE, TRUE);
  346. }
  347. if (MmTrackPtes & 0x2) {
  348. MiCheckPteReserve (PointerPte, MmSysPteIndex[Index]);
  349. }
  350. return PointerPte;
  351. }
  352. //
  353. // Fall through and go the long way to satisfy the PTE request.
  354. //
  355. NumberOfPtes = MmSysPteIndex [Index];
  356. }
  357. }
  358. //
  359. // Acquire the system space lock to synchronize access to this
  360. // routine.
  361. //
  362. PointerPte = MiReserveAlignedSystemPtes (NumberOfPtes,
  363. SystemPtePoolType,
  364. 0);
  365. #if DBG
  366. if (MmDebug & MM_DBG_SYS_PTES) {
  367. if (PointerPte != NULL) {
  368. PointerFreedPte = PointerPte;
  369. for (j = 0; j < NumberOfPtes; j += 1) {
  370. ASSERT (PointerFreedPte->u.Hard.Valid == 0);
  371. PointerFreedPte += 1;
  372. }
  373. }
  374. }
  375. #endif
  376. if (PointerPte == NULL) {
  377. MiSystemPteAllocationFailed += 1;
  378. }
  379. return PointerPte;
  380. }
  381. VOID
  382. MiFeedSysPtePool (
  383. IN ULONG Index
  384. )
  385. /*++
  386. Routine Description:
  387. This routine adds PTEs to the nonblocking queue lists.
  388. Arguments:
  389. Index - Supplies the index for the nonblocking queue list to fill.
  390. Return Value:
  391. None.
  392. Environment:
  393. Kernel mode, internal to SysPtes.
  394. --*/
  395. {
  396. ULONG i;
  397. PMMPTE PointerPte;
  398. if (MmTotalFreeSystemPtes[SystemPteSpace] < MM_MIN_SYSPTE_FREE) {
  399. #if defined (_X86_)
  400. if (MiRecoverExtraPtes () == FALSE) {
  401. MiRecoverSpecialPtes (PTE_PER_PAGE);
  402. }
  403. #endif
  404. return;
  405. }
  406. for (i = 0; i < 10 ; i += 1) {
  407. PointerPte = MiReserveAlignedSystemPtes (MmSysPteIndex [Index],
  408. SystemPteSpace,
  409. 0);
  410. if (PointerPte == NULL) {
  411. return;
  412. }
  413. MiReleaseSystemPtes (PointerPte,
  414. MmSysPteIndex [Index],
  415. SystemPteSpace);
  416. }
  417. return;
  418. }
  419. PMMPTE
  420. MiReserveAlignedSystemPtes (
  421. IN ULONG NumberOfPtes,
  422. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType,
  423. IN ULONG Alignment
  424. )
  425. /*++
  426. Routine Description:
  427. This function locates the specified number of unused PTEs to locate
  428. within the non paged portion of system space.
  429. Arguments:
  430. NumberOfPtes - Supplies the number of PTEs to locate.
  431. SystemPtePoolType - Supplies the PTE type of the pool to expand, one of
  432. SystemPteSpace or NonPagedPoolExpansion.
  433. Alignment - Supplies the virtual address alignment for the address
  434. the returned PTE maps. For example, if the value is 64K,
  435. the returned PTE will map an address on a 64K boundary.
  436. An alignment of zero means to align on a page boundary.
  437. Return Value:
  438. Returns the address of the first PTE located.
  439. NULL if no system PTEs can be located.
  440. Environment:
  441. Kernel mode, DISPATCH_LEVEL or below.
  442. --*/
  443. {
  444. PMMPTE PointerPte;
  445. PMMPTE PointerFollowingPte;
  446. PMMPTE Previous;
  447. ULONG_PTR SizeInSet;
  448. KIRQL OldIrql;
  449. ULONG MaskSize;
  450. ULONG NumberOfRequiredPtes;
  451. ULONG OffsetSum;
  452. ULONG PtesToObtainAlignment;
  453. PMMPTE NextSetPointer;
  454. ULONG_PTR LeftInSet;
  455. ULONG_PTR PteOffset;
  456. MMPTE_FLUSH_LIST PteFlushList;
  457. MaskSize = (Alignment - 1) >> (PAGE_SHIFT - PTE_SHIFT);
  458. OffsetSum = (Alignment >> (PAGE_SHIFT - PTE_SHIFT));
  459. #if defined (_X86_)
  460. restart:
  461. #endif
  462. //
  463. // Initializing PointerFollowingPte is not needed for correctness,
  464. // but without it the compiler cannot compile this code W4 to
  465. // check for use of uninitialized variables.
  466. //
  467. PointerFollowingPte = NULL;
  468. //
  469. // The nonpaged PTE pool uses the invalid PTEs to define the pool
  470. // structure. A global pointer points to the first free set
  471. // in the list, each free set contains the number free and a pointer
  472. // to the next free set. The free sets are kept in an ordered list
  473. // such that the pointer to the next free set is always greater
  474. // than the address of the current free set.
  475. //
  476. // As to not limit the size of this pool, two PTEs are used
  477. // to define a free region. If the region is a single PTE, the
  478. // prototype field within the PTE is set indicating the set
  479. // consists of a single PTE.
  480. //
  481. // The page frame number field is used to define the next set
  482. // and the number free. The two flavors are:
  483. //
  484. // o V
  485. // n l
  486. // e d
  487. // +-----------------------+-+----------+
  488. // | next set |0|0 0|
  489. // +-----------------------+-+----------+
  490. // | number in this set |0|0 0|
  491. // +-----------------------+-+----------+
  492. //
  493. //
  494. // +-----------------------+-+----------+
  495. // | next set |1|0 0|
  496. // +-----------------------+-+----------+
  497. // ...
  498. //
  499. //
  500. // Acquire the system space lock to synchronize access.
  501. //
  502. MiLockSystemSpace(OldIrql);
  503. PointerPte = &MmFirstFreeSystemPte[SystemPtePoolType];
  504. Previous = PointerPte;
  505. if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
  506. //
  507. // End of list and none found.
  508. //
  509. MiUnlockSystemSpace(OldIrql);
  510. #if defined (_X86_)
  511. if (MiRecoverExtraPtes () == TRUE) {
  512. goto restart;
  513. }
  514. if (MiRecoverSpecialPtes (NumberOfPtes) == TRUE) {
  515. goto restart;
  516. }
  517. #endif
  518. MmPteFailures[SystemPtePoolType] += 1;
  519. return NULL;
  520. }
  521. PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
  522. if (Alignment <= PAGE_SIZE) {
  523. //
  524. // Don't deal with alignment issues.
  525. //
  526. while (TRUE) {
  527. if (PointerPte->u.List.OneEntry) {
  528. SizeInSet = 1;
  529. }
  530. else {
  531. PointerFollowingPte = PointerPte + 1;
  532. SizeInSet = (ULONG_PTR) PointerFollowingPte->u.List.NextEntry;
  533. }
  534. if (NumberOfPtes < SizeInSet) {
  535. //
  536. // Get the PTEs from this set and reduce the size of the
  537. // set. Note that the size of the current set cannot be 1.
  538. //
  539. if ((SizeInSet - NumberOfPtes) == 1) {
  540. //
  541. // Collapse to the single PTE format.
  542. //
  543. PointerPte->u.List.OneEntry = 1;
  544. }
  545. else {
  546. PointerFollowingPte->u.List.NextEntry = SizeInSet - NumberOfPtes;
  547. //
  548. // Get the required PTEs from the end of the set.
  549. //
  550. #if 0
  551. if (MmDebug & MM_DBG_SYS_PTES) {
  552. MiDumpSystemPtes(SystemPtePoolType);
  553. PointerFollowingPte = PointerPte + (SizeInSet - NumberOfPtes);
  554. DbgPrint("allocated 0x%lx Ptes at %p\n",NumberOfPtes,PointerFollowingPte);
  555. }
  556. #endif //0
  557. }
  558. MmTotalFreeSystemPtes[SystemPtePoolType] -= NumberOfPtes;
  559. #if DBG
  560. if (MmDebug & MM_DBG_SYS_PTES) {
  561. ASSERT (MmTotalFreeSystemPtes[SystemPtePoolType] ==
  562. MiCountFreeSystemPtes (SystemPtePoolType));
  563. }
  564. #endif
  565. MiUnlockSystemSpace(OldIrql);
  566. PointerPte = PointerPte + (SizeInSet - NumberOfPtes);
  567. goto Flush;
  568. }
  569. if (NumberOfPtes == SizeInSet) {
  570. //
  571. // Satisfy the request with this complete set and change
  572. // the list to reflect the fact that this set is gone.
  573. //
  574. Previous->u.List.NextEntry = PointerPte->u.List.NextEntry;
  575. //
  576. // Release the system PTE lock.
  577. //
  578. #if 0
  579. if (MmDebug & MM_DBG_SYS_PTES) {
  580. MiDumpSystemPtes(SystemPtePoolType);
  581. PointerFollowingPte = PointerPte + (SizeInSet - NumberOfPtes);
  582. DbgPrint("allocated 0x%lx Ptes at %lx\n",NumberOfPtes,PointerFollowingPte);
  583. }
  584. #endif //0
  585. MmTotalFreeSystemPtes[SystemPtePoolType] -= NumberOfPtes;
  586. #if DBG
  587. if (MmDebug & MM_DBG_SYS_PTES) {
  588. ASSERT (MmTotalFreeSystemPtes[SystemPtePoolType] ==
  589. MiCountFreeSystemPtes (SystemPtePoolType));
  590. }
  591. #endif
  592. MiUnlockSystemSpace(OldIrql);
  593. goto Flush;
  594. }
  595. //
  596. // Point to the next set and try again
  597. //
  598. if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
  599. //
  600. // End of list and none found.
  601. //
  602. MiUnlockSystemSpace(OldIrql);
  603. #if defined (_X86_)
  604. if (MiRecoverExtraPtes () == TRUE) {
  605. goto restart;
  606. }
  607. if (MiRecoverSpecialPtes (NumberOfPtes) == TRUE) {
  608. goto restart;
  609. }
  610. #endif
  611. MmPteFailures[SystemPtePoolType] += 1;
  612. return NULL;
  613. }
  614. Previous = PointerPte;
  615. PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
  616. ASSERT (PointerPte > Previous);
  617. }
  618. }
  619. else {
  620. //
  621. // Deal with the alignment issues.
  622. //
  623. while (TRUE) {
  624. if (PointerPte->u.List.OneEntry) {
  625. SizeInSet = 1;
  626. }
  627. else {
  628. PointerFollowingPte = PointerPte + 1;
  629. SizeInSet = (ULONG_PTR) PointerFollowingPte->u.List.NextEntry;
  630. }
  631. PtesToObtainAlignment = (ULONG)
  632. (((OffsetSum - ((ULONG_PTR)PointerPte & MaskSize)) & MaskSize) >>
  633. PTE_SHIFT);
  634. NumberOfRequiredPtes = NumberOfPtes + PtesToObtainAlignment;
  635. if (NumberOfRequiredPtes < SizeInSet) {
  636. //
  637. // Get the PTEs from this set and reduce the size of the
  638. // set. Note that the size of the current set cannot be 1.
  639. //
  640. // This current block will be slit into 2 blocks if
  641. // the PointerPte does not match the alignment.
  642. //
  643. //
  644. // Check to see if the first PTE is on the proper
  645. // alignment, if so, eliminate this block.
  646. //
  647. LeftInSet = SizeInSet - NumberOfRequiredPtes;
  648. //
  649. // Set up the new set at the end of this block.
  650. //
  651. NextSetPointer = PointerPte + NumberOfRequiredPtes;
  652. NextSetPointer->u.List.NextEntry =
  653. PointerPte->u.List.NextEntry;
  654. PteOffset = (ULONG_PTR)(NextSetPointer - MmSystemPteBase);
  655. if (PtesToObtainAlignment == 0) {
  656. Previous->u.List.NextEntry += NumberOfRequiredPtes;
  657. }
  658. else {
  659. //
  660. // Point to the new set at the end of the block
  661. // we are giving away.
  662. //
  663. PointerPte->u.List.NextEntry = PteOffset;
  664. //
  665. // Update the size of the current set.
  666. //
  667. if (PtesToObtainAlignment == 1) {
  668. //
  669. // Collapse to the single PTE format.
  670. //
  671. PointerPte->u.List.OneEntry = 1;
  672. }
  673. else {
  674. //
  675. // Set the set size in the next PTE.
  676. //
  677. PointerFollowingPte->u.List.NextEntry =
  678. PtesToObtainAlignment;
  679. }
  680. }
  681. //
  682. // Set up the new set at the end of the block.
  683. //
  684. if (LeftInSet == 1) {
  685. NextSetPointer->u.List.OneEntry = 1;
  686. }
  687. else {
  688. NextSetPointer->u.List.OneEntry = 0;
  689. NextSetPointer += 1;
  690. NextSetPointer->u.List.NextEntry = LeftInSet;
  691. }
  692. MmTotalFreeSystemPtes[SystemPtePoolType] -= NumberOfPtes;
  693. #if DBG
  694. if (MmDebug & MM_DBG_SYS_PTES) {
  695. ASSERT (MmTotalFreeSystemPtes[SystemPtePoolType] ==
  696. MiCountFreeSystemPtes (SystemPtePoolType));
  697. }
  698. #endif
  699. MiUnlockSystemSpace(OldIrql);
  700. PointerPte = PointerPte + PtesToObtainAlignment;
  701. goto Flush;
  702. }
  703. if (NumberOfRequiredPtes == SizeInSet) {
  704. //
  705. // Satisfy the request with this complete set and change
  706. // the list to reflect the fact that this set is gone.
  707. //
  708. if (PtesToObtainAlignment == 0) {
  709. //
  710. // This block exactly satisfies the request.
  711. //
  712. Previous->u.List.NextEntry =
  713. PointerPte->u.List.NextEntry;
  714. }
  715. else {
  716. //
  717. // A portion at the start of this block remains.
  718. //
  719. if (PtesToObtainAlignment == 1) {
  720. //
  721. // Collapse to the single PTE format.
  722. //
  723. PointerPte->u.List.OneEntry = 1;
  724. }
  725. else {
  726. PointerFollowingPte->u.List.NextEntry =
  727. PtesToObtainAlignment;
  728. }
  729. }
  730. MmTotalFreeSystemPtes[SystemPtePoolType] -= NumberOfPtes;
  731. #if DBG
  732. if (MmDebug & MM_DBG_SYS_PTES) {
  733. ASSERT (MmTotalFreeSystemPtes[SystemPtePoolType] ==
  734. MiCountFreeSystemPtes (SystemPtePoolType));
  735. }
  736. #endif
  737. MiUnlockSystemSpace(OldIrql);
  738. PointerPte = PointerPte + PtesToObtainAlignment;
  739. goto Flush;
  740. }
  741. //
  742. // Point to the next set and try again.
  743. //
  744. if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
  745. //
  746. // End of list and none found.
  747. //
  748. MiUnlockSystemSpace(OldIrql);
  749. #if defined (_X86_)
  750. if (MiRecoverExtraPtes () == TRUE) {
  751. goto restart;
  752. }
  753. if (MiRecoverSpecialPtes (NumberOfPtes) == TRUE) {
  754. goto restart;
  755. }
  756. #endif
  757. MmPteFailures[SystemPtePoolType] += 1;
  758. return NULL;
  759. }
  760. Previous = PointerPte;
  761. PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
  762. ASSERT (PointerPte > Previous);
  763. }
  764. }
  765. Flush:
  766. if (SystemPtePoolType == SystemPteSpace) {
  767. PVOID BaseAddress;
  768. ULONG j;
  769. PteFlushList.Count = 0;
  770. Previous = PointerPte;
  771. BaseAddress = MiGetVirtualAddressMappedByPte (Previous);
  772. for (j = 0; j < NumberOfPtes; j += 1) {
  773. if (PteFlushList.Count != MM_MAXIMUM_FLUSH_COUNT) {
  774. PteFlushList.FlushPte[PteFlushList.Count] = Previous;
  775. PteFlushList.FlushVa[PteFlushList.Count] = BaseAddress;
  776. PteFlushList.Count += 1;
  777. }
  778. //
  779. // PTEs being freed better be invalid.
  780. //
  781. ASSERT (Previous->u.Hard.Valid == 0);
  782. *Previous = ZeroKernelPte;
  783. BaseAddress = (PVOID)((PCHAR)BaseAddress + PAGE_SIZE);
  784. Previous += 1;
  785. }
  786. MiFlushPteList (&PteFlushList, TRUE, ZeroKernelPte);
  787. if (MmTrackPtes & 0x2) {
  788. MiCheckPteReserve (PointerPte, NumberOfPtes);
  789. }
  790. }
  791. return PointerPte;
  792. }
  793. VOID
  794. MiIssueNoPtesBugcheck (
  795. IN ULONG NumberOfPtes,
  796. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  797. )
  798. /*++
  799. Routine Description:
  800. This function bugchecks when no PTEs are left.
  801. Arguments:
  802. SystemPtePoolType - Supplies the PTE type of the pool that is empty.
  803. NumberOfPtes - Supplies the number of PTEs requested that failed.
  804. Return Value:
  805. None.
  806. Environment:
  807. Kernel mode.
  808. --*/
  809. {
  810. PVOID HighConsumer;
  811. ULONG_PTR HighPteUse;
  812. if (SystemPtePoolType == SystemPteSpace) {
  813. HighConsumer = MiGetHighestPteConsumer (&HighPteUse);
  814. if (HighConsumer != NULL) {
  815. KeBugCheckEx (DRIVER_USED_EXCESSIVE_PTES,
  816. (ULONG_PTR)HighConsumer,
  817. HighPteUse,
  818. MmTotalFreeSystemPtes[SystemPtePoolType],
  819. MmNumberOfSystemPtes);
  820. }
  821. }
  822. KeBugCheckEx (NO_MORE_SYSTEM_PTES,
  823. (ULONG_PTR)SystemPtePoolType,
  824. NumberOfPtes,
  825. MmTotalFreeSystemPtes[SystemPtePoolType],
  826. MmNumberOfSystemPtes);
  827. }
  828. VOID
  829. MiPteSListExpansionWorker (
  830. IN PVOID Context
  831. )
  832. /*++
  833. Routine Description:
  834. This routine is the worker routine to add additional SLISTs for the
  835. system PTE nonblocking queues.
  836. Arguments:
  837. Context - Supplies a pointer to the MM_PTE_SLIST_EXPANSION_WORK_CONTEXT.
  838. Return Value:
  839. None.
  840. Environment:
  841. Kernel mode, PASSIVE_LEVEL.
  842. --*/
  843. {
  844. ULONG i;
  845. ULONG SListEntries;
  846. PPTE_SLIST SListChunks;
  847. PMM_PTE_SLIST_EXPANSION_WORK_CONTEXT Expansion;
  848. ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL);
  849. Expansion = (PMM_PTE_SLIST_EXPANSION_WORK_CONTEXT) Context;
  850. ASSERT (Expansion->Active == 1);
  851. if (Expansion->SListPages < MI_MAXIMUM_SLIST_PTE_PAGES) {
  852. //
  853. // Allocate another page of SLIST entries for the
  854. // nonblocking PTE queues.
  855. //
  856. SListChunks = (PPTE_SLIST) ExAllocatePoolWithTag (NonPagedPool,
  857. PAGE_SIZE,
  858. 'PSmM');
  859. if (SListChunks != NULL) {
  860. //
  861. // Carve up the pages into SLIST entries (with no pool headers).
  862. //
  863. Expansion->SListPages += 1;
  864. SListEntries = PAGE_SIZE / sizeof (PTE_SLIST);
  865. for (i = 0; i < SListEntries; i += 1) {
  866. InterlockedPushEntrySList (&MiSystemPteSListHead,
  867. (PSINGLE_LIST_ENTRY)SListChunks);
  868. SListChunks += 1;
  869. }
  870. }
  871. }
  872. ASSERT (Expansion->Active == 1);
  873. InterlockedExchange (&Expansion->Active, 0);
  874. }
  875. VOID
  876. MiReleaseSystemPtes (
  877. IN PMMPTE StartingPte,
  878. IN ULONG NumberOfPtes,
  879. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  880. )
  881. /*++
  882. Routine Description:
  883. This function releases the specified number of PTEs
  884. within the non paged portion of system space.
  885. Note that the PTEs must be invalid and the page frame number
  886. must have been set to zero.
  887. Arguments:
  888. StartingPte - Supplies the address of the first PTE to release.
  889. NumberOfPtes - Supplies the number of PTEs to release.
  890. SystemPtePoolType - Supplies the PTE type of the pool to release PTEs to,
  891. one of SystemPteSpace or NonPagedPoolExpansion.
  892. Return Value:
  893. None.
  894. Environment:
  895. Kernel mode.
  896. --*/
  897. {
  898. ULONG_PTR Size;
  899. ULONG i;
  900. ULONG_PTR PteOffset;
  901. PMMPTE PointerPte;
  902. PMMPTE PointerFollowingPte;
  903. PMMPTE NextPte;
  904. KIRQL OldIrql;
  905. ULONG Index;
  906. ULONG TimeStamp;
  907. PTE_QUEUE_POINTER Value;
  908. ULONG ExtensionInProgress;
  909. //
  910. // Check to make sure the PTE address is within bounds.
  911. //
  912. ASSERT (NumberOfPtes != 0);
  913. ASSERT (StartingPte >= MmSystemPtesStart[SystemPtePoolType]);
  914. ASSERT (StartingPte <= MmSystemPtesEnd[SystemPtePoolType]);
  915. if ((MmTrackPtes & 0x2) && (SystemPtePoolType == SystemPteSpace)) {
  916. //
  917. // If the low bit is set, this range was never reserved and therefore
  918. // should not be validated during the release.
  919. //
  920. if ((ULONG_PTR)StartingPte & 0x1) {
  921. StartingPte = (PMMPTE) ((ULONG_PTR)StartingPte & ~0x1);
  922. }
  923. else {
  924. MiCheckPteRelease (StartingPte, NumberOfPtes);
  925. }
  926. }
  927. //
  928. // Zero PTEs.
  929. //
  930. MiFillMemoryPte (StartingPte,
  931. NumberOfPtes * sizeof (MMPTE),
  932. ZeroKernelPte.u.Long);
  933. if ((SystemPtePoolType == SystemPteSpace) &&
  934. (NumberOfPtes <= MM_PTE_TABLE_LIMIT)) {
  935. //
  936. // Encode the PTE pointer and the TB flush counter into Value.
  937. //
  938. TimeStamp = KeReadTbFlushTimeStamp();
  939. PackPTEValue (&Value, StartingPte, TimeStamp);
  940. Index = MmSysPteTables [NumberOfPtes];
  941. ASSERT (NumberOfPtes <= MmSysPteIndex [Index]);
  942. //
  943. // N.B. NumberOfPtes must be set here regardless so if this entry
  944. // is not inserted into the nonblocking list, the PTE count will still
  945. // be right when we go the long way.
  946. //
  947. NumberOfPtes = MmSysPteIndex [Index];
  948. if (MmTotalFreeSystemPtes[SystemPteSpace] >= MM_MIN_SYSPTE_FREE) {
  949. //
  950. // Add to the pool if the size is less than 15 + the minimum.
  951. //
  952. i = MmSysPteMinimumFree[Index];
  953. if (MmTotalFreeSystemPtes[SystemPteSpace] >= MM_MAX_SYSPTE_FREE) {
  954. //
  955. // Lots of free PTEs, quadruple the limit.
  956. //
  957. i = i * 4;
  958. }
  959. i += 15;
  960. if (MmSysPteListBySizeCount[Index] <= i) {
  961. if (ExInsertTailNBQueue (MiSystemPteNBHead[Index], Value.Data) == TRUE) {
  962. InterlockedIncrement ((PLONG)&MmSysPteListBySizeCount[Index]);
  963. return;
  964. }
  965. //
  966. // No lookasides are left for inserting this PTE allocation
  967. // into the nonblocking queues. Queue an extension to a
  968. // worker thread so it can be done in a deadlock-free
  969. // manner.
  970. //
  971. if (MiPteSListExpand.SListPages < MI_MAXIMUM_SLIST_PTE_PAGES) {
  972. //
  973. // If an extension is not in progress then queue one now.
  974. //
  975. ExtensionInProgress = InterlockedCompareExchange (&MiPteSListExpand.Active, 1, 0);
  976. if (ExtensionInProgress == 0) {
  977. ExInitializeWorkItem (&MiPteSListExpand.WorkItem,
  978. MiPteSListExpansionWorker,
  979. (PVOID)&MiPteSListExpand);
  980. ExQueueWorkItem (&MiPteSListExpand.WorkItem, CriticalWorkQueue);
  981. }
  982. }
  983. }
  984. }
  985. //
  986. // The insert failed - our lookaside list must be empty or we are
  987. // low on PTEs systemwide or we already had plenty on our list and
  988. // didn't try to insert. Fall through to queue this in the long way.
  989. //
  990. }
  991. //
  992. // Acquire system space spin lock to synchronize access.
  993. //
  994. PteOffset = (ULONG_PTR)(StartingPte - MmSystemPteBase);
  995. MiLockSystemSpace(OldIrql);
  996. MmTotalFreeSystemPtes[SystemPtePoolType] += NumberOfPtes;
  997. PointerPte = &MmFirstFreeSystemPte[SystemPtePoolType];
  998. while (TRUE) {
  999. NextPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
  1000. if (PteOffset < PointerPte->u.List.NextEntry) {
  1001. //
  1002. // Insert in the list at this point. The
  1003. // previous one should point to the new freed set and
  1004. // the new freed set should point to the place
  1005. // the previous set points to.
  1006. //
  1007. // Attempt to combine the clusters before we
  1008. // insert.
  1009. //
  1010. // Locate the end of the current structure.
  1011. //
  1012. ASSERT (((StartingPte + NumberOfPtes) <= NextPte) ||
  1013. (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST));
  1014. PointerFollowingPte = PointerPte + 1;
  1015. if (PointerPte->u.List.OneEntry) {
  1016. Size = 1;
  1017. }
  1018. else {
  1019. Size = (ULONG_PTR) PointerFollowingPte->u.List.NextEntry;
  1020. }
  1021. if ((PointerPte + Size) == StartingPte) {
  1022. //
  1023. // We can combine the clusters.
  1024. //
  1025. NumberOfPtes += (ULONG)Size;
  1026. PointerFollowingPte->u.List.NextEntry = NumberOfPtes;
  1027. PointerPte->u.List.OneEntry = 0;
  1028. //
  1029. // Point the starting PTE to the beginning of
  1030. // the new free set and try to combine with the
  1031. // following free cluster.
  1032. //
  1033. StartingPte = PointerPte;
  1034. }
  1035. else {
  1036. //
  1037. // Can't combine with previous. Make this Pte the
  1038. // start of a cluster.
  1039. //
  1040. //
  1041. // Point this cluster to the next cluster.
  1042. //
  1043. StartingPte->u.List.NextEntry = PointerPte->u.List.NextEntry;
  1044. //
  1045. // Point the current cluster to this cluster.
  1046. //
  1047. PointerPte->u.List.NextEntry = PteOffset;
  1048. //
  1049. // Set the size of this cluster.
  1050. //
  1051. if (NumberOfPtes == 1) {
  1052. StartingPte->u.List.OneEntry = 1;
  1053. }
  1054. else {
  1055. StartingPte->u.List.OneEntry = 0;
  1056. PointerFollowingPte = StartingPte + 1;
  1057. PointerFollowingPte->u.List.NextEntry = NumberOfPtes;
  1058. }
  1059. }
  1060. //
  1061. // Attempt to combine the newly created cluster with
  1062. // the following cluster.
  1063. //
  1064. if ((StartingPte + NumberOfPtes) == NextPte) {
  1065. //
  1066. // Combine with following cluster.
  1067. //
  1068. //
  1069. // Set the next cluster to the value contained in the
  1070. // cluster we are merging into this one.
  1071. //
  1072. StartingPte->u.List.NextEntry = NextPte->u.List.NextEntry;
  1073. StartingPte->u.List.OneEntry = 0;
  1074. PointerFollowingPte = StartingPte + 1;
  1075. if (NextPte->u.List.OneEntry) {
  1076. Size = 1;
  1077. }
  1078. else {
  1079. NextPte++;
  1080. Size = (ULONG_PTR) NextPte->u.List.NextEntry;
  1081. }
  1082. PointerFollowingPte->u.List.NextEntry = NumberOfPtes + Size;
  1083. }
  1084. #if 0
  1085. if (MmDebug & MM_DBG_SYS_PTES) {
  1086. MiDumpSystemPtes(SystemPtePoolType);
  1087. }
  1088. #endif
  1089. #if DBG
  1090. if (MmDebug & MM_DBG_SYS_PTES) {
  1091. ASSERT (MmTotalFreeSystemPtes[SystemPtePoolType] ==
  1092. MiCountFreeSystemPtes (SystemPtePoolType));
  1093. }
  1094. #endif
  1095. MiUnlockSystemSpace(OldIrql);
  1096. return;
  1097. }
  1098. //
  1099. // Point to next freed cluster.
  1100. //
  1101. PointerPte = NextPte;
  1102. }
  1103. }
  1104. VOID
  1105. MiReleaseSplitSystemPtes (
  1106. IN PMMPTE StartingPte,
  1107. IN ULONG NumberOfPtes,
  1108. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  1109. )
  1110. /*++
  1111. Routine Description:
  1112. This function releases the specified number of PTEs
  1113. within the non paged portion of system space.
  1114. Note that the PTEs must be invalid and the page frame number
  1115. must have been set to zero.
  1116. This portion is a split portion from a larger allocation so
  1117. careful updating of the tracking bitmaps must be done here.
  1118. Arguments:
  1119. StartingPte - Supplies the address of the first PTE to release.
  1120. NumberOfPtes - Supplies the number of PTEs to release.
  1121. SystemPtePoolType - Supplies the PTE type of the pool to release PTEs to,
  1122. one of SystemPteSpace or NonPagedPoolExpansion.
  1123. Return Value:
  1124. None.
  1125. Environment:
  1126. Kernel mode.
  1127. --*/
  1128. {
  1129. ULONG i;
  1130. ULONG StartBit;
  1131. KIRQL OldIrql;
  1132. PULONG StartBitMapBuffer;
  1133. PULONG EndBitMapBuffer;
  1134. PVOID VirtualAddress;
  1135. //
  1136. // Check to make sure the PTE address is within bounds.
  1137. //
  1138. ASSERT (NumberOfPtes != 0);
  1139. ASSERT (StartingPte >= MmSystemPtesStart[SystemPtePoolType]);
  1140. ASSERT (StartingPte <= MmSystemPtesEnd[SystemPtePoolType]);
  1141. if ((MmTrackPtes & 0x2) && (SystemPtePoolType == SystemPteSpace)) {
  1142. ASSERT (MmTrackPtes & 0x2);
  1143. VirtualAddress = MiGetVirtualAddressMappedByPte (StartingPte);
  1144. StartBit = (ULONG) (StartingPte - MiPteStart);
  1145. ExAcquireSpinLock (&MiPteTrackerLock, &OldIrql);
  1146. //
  1147. // Verify start and size of allocation using the tracking bitmaps.
  1148. //
  1149. StartBitMapBuffer = MiPteStartBitmap->Buffer;
  1150. EndBitMapBuffer = MiPteEndBitmap->Buffer;
  1151. //
  1152. // All the start bits better be set.
  1153. //
  1154. for (i = StartBit; i < StartBit + NumberOfPtes; i += 1) {
  1155. ASSERT (MI_CHECK_BIT (StartBitMapBuffer, i) == 1);
  1156. }
  1157. if (StartBit != 0) {
  1158. if (RtlCheckBit (MiPteStartBitmap, StartBit - 1)) {
  1159. if (!RtlCheckBit (MiPteEndBitmap, StartBit - 1)) {
  1160. //
  1161. // In the middle of an allocation - update the previous
  1162. // so it ends here.
  1163. //
  1164. MI_SET_BIT (EndBitMapBuffer, StartBit - 1);
  1165. }
  1166. else {
  1167. //
  1168. // The range being freed is the start of an allocation.
  1169. //
  1170. }
  1171. }
  1172. }
  1173. //
  1174. // Unconditionally set the end bit (and clear any others) in case the
  1175. // split chunk crosses multiple allocations.
  1176. //
  1177. MI_SET_BIT (EndBitMapBuffer, StartBit + NumberOfPtes - 1);
  1178. ExReleaseSpinLock (&MiPteTrackerLock, OldIrql);
  1179. }
  1180. MiReleaseSystemPtes (StartingPte, NumberOfPtes, SystemPteSpace);
  1181. }
  1182. VOID
  1183. MiInitializeSystemPtes (
  1184. IN PMMPTE StartingPte,
  1185. IN ULONG NumberOfPtes,
  1186. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  1187. )
  1188. /*++
  1189. Routine Description:
  1190. This routine initializes the system PTE pool.
  1191. Arguments:
  1192. StartingPte - Supplies the address of the first PTE to put in the pool.
  1193. NumberOfPtes - Supplies the number of PTEs to put in the pool.
  1194. SystemPtePoolType - Supplies the PTE type of the pool to initialize, one of
  1195. SystemPteSpace or NonPagedPoolExpansion.
  1196. Return Value:
  1197. none.
  1198. Environment:
  1199. Kernel mode.
  1200. --*/
  1201. {
  1202. ULONG i;
  1203. ULONG TotalPtes;
  1204. ULONG SListEntries;
  1205. SIZE_T SListBytes;
  1206. ULONG TotalChunks;
  1207. PMMPTE PointerPte;
  1208. PPTE_SLIST Chunk;
  1209. PPTE_SLIST SListChunks;
  1210. //
  1211. // Set the base of the system PTE pool to this PTE. This takes into
  1212. // account that systems may have additional PTE pools below the PTE_BASE.
  1213. //
  1214. MmSystemPteBase = MI_PTE_BASE_FOR_LOWEST_KERNEL_ADDRESS;
  1215. MmSystemPtesStart[SystemPtePoolType] = StartingPte;
  1216. MmSystemPtesEnd[SystemPtePoolType] = StartingPte + NumberOfPtes - 1;
  1217. //
  1218. // If there are no PTEs specified, then make a valid chain by indicating
  1219. // that the list is empty.
  1220. //
  1221. if (NumberOfPtes == 0) {
  1222. MmFirstFreeSystemPte[SystemPtePoolType] = ZeroKernelPte;
  1223. MmFirstFreeSystemPte[SystemPtePoolType].u.List.NextEntry =
  1224. MM_EMPTY_LIST;
  1225. return;
  1226. }
  1227. //
  1228. // Initialize the specified system PTE pool.
  1229. //
  1230. MiFillMemoryPte (StartingPte,
  1231. NumberOfPtes * sizeof (MMPTE),
  1232. ZeroKernelPte.u.Long);
  1233. //
  1234. // The page frame field points to the next cluster. As we only
  1235. // have one cluster at initialization time, mark it as the last
  1236. // cluster.
  1237. //
  1238. StartingPte->u.List.NextEntry = MM_EMPTY_LIST;
  1239. MmFirstFreeSystemPte[SystemPtePoolType] = ZeroKernelPte;
  1240. MmFirstFreeSystemPte[SystemPtePoolType].u.List.NextEntry =
  1241. StartingPte - MmSystemPteBase;
  1242. //
  1243. // If there is only one PTE in the pool, then mark it as a one entry
  1244. // PTE. Otherwise, store the cluster size in the following PTE.
  1245. //
  1246. if (NumberOfPtes == 1) {
  1247. StartingPte->u.List.OneEntry = TRUE;
  1248. }
  1249. else {
  1250. StartingPte += 1;
  1251. MI_WRITE_INVALID_PTE (StartingPte, ZeroKernelPte);
  1252. StartingPte->u.List.NextEntry = NumberOfPtes;
  1253. }
  1254. //
  1255. // Set the total number of free PTEs for the specified type.
  1256. //
  1257. MmTotalFreeSystemPtes[SystemPtePoolType] = NumberOfPtes;
  1258. ASSERT (MmTotalFreeSystemPtes[SystemPtePoolType] ==
  1259. MiCountFreeSystemPtes (SystemPtePoolType));
  1260. if (SystemPtePoolType == SystemPteSpace) {
  1261. ULONG Lists[MM_SYS_PTE_TABLES_MAX] = {
  1262. #if defined(_IA64_)
  1263. MM_PTE_LIST_1,
  1264. MM_PTE_LIST_2,
  1265. MM_PTE_LIST_4,
  1266. MM_PTE_LIST_8,
  1267. MM_PTE_LIST_9,
  1268. MM_PTE_LIST_18
  1269. #elif defined(_AMD64_)
  1270. MM_PTE_LIST_1,
  1271. MM_PTE_LIST_2,
  1272. MM_PTE_LIST_4,
  1273. MM_PTE_LIST_6,
  1274. MM_PTE_LIST_8,
  1275. MM_PTE_LIST_16
  1276. #else
  1277. MM_PTE_LIST_1,
  1278. MM_PTE_LIST_2,
  1279. MM_PTE_LIST_4,
  1280. MM_PTE_LIST_8,
  1281. MM_PTE_LIST_16
  1282. #endif
  1283. };
  1284. MmTotalSystemPtes = NumberOfPtes;
  1285. TotalPtes = 0;
  1286. TotalChunks = 0;
  1287. KeInitializeSpinLock (&MiSystemPteSListHeadLock);
  1288. InitializeSListHead (&MiSystemPteSListHead);
  1289. for (i = 0; i < MM_SYS_PTE_TABLES_MAX ; i += 1) {
  1290. TotalPtes += (Lists[i] * MmSysPteIndex[i]);
  1291. TotalChunks += Lists[i];
  1292. }
  1293. SListBytes = TotalChunks * sizeof (PTE_SLIST);
  1294. SListBytes = MI_ROUND_TO_SIZE (SListBytes, PAGE_SIZE);
  1295. SListEntries = (ULONG)(SListBytes / sizeof (PTE_SLIST));
  1296. SListChunks = (PPTE_SLIST) ExAllocatePoolWithTag (NonPagedPool,
  1297. SListBytes,
  1298. 'PSmM');
  1299. if (SListChunks == NULL) {
  1300. MiIssueNoPtesBugcheck (TotalPtes, SystemPteSpace);
  1301. }
  1302. ASSERT (MiPteSListExpand.Active == FALSE);
  1303. ASSERT (MiPteSListExpand.SListPages == 0);
  1304. MiPteSListExpand.SListPages = (ULONG)(SListBytes / PAGE_SIZE);
  1305. ASSERT (MiPteSListExpand.SListPages != 0);
  1306. //
  1307. // Carve up the pages into SLIST entries (with no pool headers).
  1308. //
  1309. Chunk = SListChunks;
  1310. for (i = 0; i < SListEntries; i += 1) {
  1311. InterlockedPushEntrySList (&MiSystemPteSListHead,
  1312. (PSINGLE_LIST_ENTRY)Chunk);
  1313. Chunk += 1;
  1314. }
  1315. //
  1316. // Now that the SLIST is populated, initialize the nonblocking heads.
  1317. //
  1318. for (i = 0; i < MM_SYS_PTE_TABLES_MAX ; i += 1) {
  1319. MiSystemPteNBHead[i] = ExInitializeNBQueueHead (&MiSystemPteSListHead);
  1320. if (MiSystemPteNBHead[i] == NULL) {
  1321. MiIssueNoPtesBugcheck (TotalPtes, SystemPteSpace);
  1322. }
  1323. }
  1324. if (MmTrackPtes & 0x2) {
  1325. //
  1326. // Allocate PTE mapping verification bitmaps.
  1327. //
  1328. ULONG BitmapSize;
  1329. #if defined(_WIN64)
  1330. BitmapSize = MmNumberOfSystemPtes;
  1331. MiPteStart = MmSystemPtesStart[SystemPteSpace];
  1332. #else
  1333. MiPteStart = MiGetPteAddress (MmSystemRangeStart);
  1334. BitmapSize = ((ULONG_PTR)PTE_TOP + 1) - (ULONG_PTR) MiPteStart;
  1335. BitmapSize /= sizeof (MMPTE);
  1336. #endif
  1337. MiCreateBitMap (&MiPteStartBitmap, BitmapSize, NonPagedPool);
  1338. if (MiPteStartBitmap != NULL) {
  1339. MiCreateBitMap (&MiPteEndBitmap, BitmapSize, NonPagedPool);
  1340. if (MiPteEndBitmap == NULL) {
  1341. ExFreePool (MiPteStartBitmap);
  1342. MiPteStartBitmap = NULL;
  1343. }
  1344. }
  1345. if ((MiPteStartBitmap != NULL) && (MiPteEndBitmap != NULL)) {
  1346. RtlClearAllBits (MiPteStartBitmap);
  1347. RtlClearAllBits (MiPteEndBitmap);
  1348. }
  1349. MmTrackPtes &= ~0x2;
  1350. }
  1351. //
  1352. // Initialize the by size lists.
  1353. //
  1354. PointerPte = MiReserveSystemPtes (TotalPtes, SystemPteSpace);
  1355. if (PointerPte == NULL) {
  1356. MiIssueNoPtesBugcheck (TotalPtes, SystemPteSpace);
  1357. }
  1358. i = MM_SYS_PTE_TABLES_MAX;
  1359. do {
  1360. i -= 1;
  1361. do {
  1362. Lists[i] -= 1;
  1363. MiReleaseSystemPtes (PointerPte,
  1364. MmSysPteIndex[i],
  1365. SystemPteSpace);
  1366. PointerPte += MmSysPteIndex[i];
  1367. } while (Lists[i] != 0);
  1368. } while (i != 0);
  1369. //
  1370. // Turn this on after the multiple releases of the binned PTEs (that
  1371. // came from a single reservation) above.
  1372. //
  1373. if (MiPteStartBitmap != NULL) {
  1374. MmTrackPtes |= 0x2;
  1375. }
  1376. }
  1377. return;
  1378. }
  1379. VOID
  1380. MiIncrementSystemPtes (
  1381. IN ULONG NumberOfPtes
  1382. )
  1383. /*++
  1384. Routine Description:
  1385. This routine increments the total number of PTEs. This is done
  1386. separately from actually adding the PTEs to the pool so that
  1387. autoconfiguration can use the high number in advance of the PTEs
  1388. actually getting added.
  1389. Arguments:
  1390. NumberOfPtes - Supplies the number of PTEs to increment the total by.
  1391. Return Value:
  1392. None.
  1393. Environment:
  1394. Kernel mode. Synchronization provided by the caller.
  1395. --*/
  1396. {
  1397. MmTotalSystemPtes += NumberOfPtes;
  1398. }
  1399. VOID
  1400. MiAddSystemPtes (
  1401. IN PMMPTE StartingPte,
  1402. IN ULONG NumberOfPtes,
  1403. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  1404. )
  1405. /*++
  1406. Routine Description:
  1407. This routine adds newly created PTEs to the specified pool.
  1408. Arguments:
  1409. StartingPte - Supplies the address of the first PTE to put in the pool.
  1410. NumberOfPtes - Supplies the number of PTEs to put in the pool.
  1411. SystemPtePoolType - Supplies the PTE type of the pool to expand, one of
  1412. SystemPteSpace or NonPagedPoolExpansion.
  1413. Return Value:
  1414. None.
  1415. Environment:
  1416. Kernel mode.
  1417. --*/
  1418. {
  1419. PMMPTE EndingPte;
  1420. ASSERT (SystemPtePoolType == SystemPteSpace);
  1421. EndingPte = StartingPte + NumberOfPtes - 1;
  1422. if (StartingPte < MmSystemPtesStart[SystemPtePoolType]) {
  1423. MmSystemPtesStart[SystemPtePoolType] = StartingPte;
  1424. }
  1425. if (EndingPte > MmSystemPtesEnd[SystemPtePoolType]) {
  1426. MmSystemPtesEnd[SystemPtePoolType] = EndingPte;
  1427. }
  1428. //
  1429. // Set the low bit to signify this range was never reserved and therefore
  1430. // should not be validated during the release.
  1431. //
  1432. if (MmTrackPtes & 0x2) {
  1433. StartingPte = (PMMPTE) ((ULONG_PTR)StartingPte | 0x1);
  1434. }
  1435. MiReleaseSystemPtes (StartingPte, NumberOfPtes, SystemPtePoolType);
  1436. }
  1437. ULONG
  1438. MiGetSystemPteListCount (
  1439. IN ULONG ListSize
  1440. )
  1441. /*++
  1442. Routine Description:
  1443. This routine returns the number of free entries of the list which
  1444. covers the specified size. The size must be less than or equal to the
  1445. largest list index.
  1446. Arguments:
  1447. ListSize - Supplies the number of PTEs needed.
  1448. Return Value:
  1449. Number of free entries on the list which contains ListSize PTEs.
  1450. Environment:
  1451. Kernel mode.
  1452. --*/
  1453. {
  1454. ULONG Index;
  1455. ASSERT (ListSize <= MM_PTE_TABLE_LIMIT);
  1456. Index = MmSysPteTables [ListSize];
  1457. return MmSysPteListBySizeCount[Index];
  1458. }
  1459. LOGICAL
  1460. MiGetSystemPteAvailability (
  1461. IN ULONG NumberOfPtes,
  1462. IN MM_PAGE_PRIORITY Priority
  1463. )
  1464. /*++
  1465. Routine Description:
  1466. This routine checks how many SystemPteSpace PTEs are available for the
  1467. requested size. If plenty are available then TRUE is returned.
  1468. If we are reaching a low resource situation, then the request is evaluated
  1469. based on the argument priority.
  1470. Arguments:
  1471. NumberOfPtes - Supplies the number of PTEs needed.
  1472. Priority - Supplies the priority of the request.
  1473. Return Value:
  1474. TRUE if the caller should allocate the PTEs, FALSE if not.
  1475. Environment:
  1476. Kernel mode.
  1477. --*/
  1478. {
  1479. ULONG Index;
  1480. ULONG FreePtes;
  1481. ULONG FreeBinnedPtes;
  1482. ASSERT (Priority != HighPagePriority);
  1483. FreePtes = MmTotalFreeSystemPtes[SystemPteSpace];
  1484. if (NumberOfPtes <= MM_PTE_TABLE_LIMIT) {
  1485. Index = MmSysPteTables [NumberOfPtes];
  1486. FreeBinnedPtes = MmSysPteListBySizeCount[Index];
  1487. if (FreeBinnedPtes > MmSysPteMinimumFree[Index]) {
  1488. return TRUE;
  1489. }
  1490. if (FreeBinnedPtes != 0) {
  1491. if (Priority == NormalPagePriority) {
  1492. if (FreeBinnedPtes > 1 || FreePtes > 512) {
  1493. return TRUE;
  1494. }
  1495. #if defined (_X86_)
  1496. if (MiRecoverExtraPtes () == TRUE) {
  1497. return TRUE;
  1498. }
  1499. if (MiRecoverSpecialPtes (NumberOfPtes) == TRUE) {
  1500. return TRUE;
  1501. }
  1502. #endif
  1503. MmPteFailures[SystemPteSpace] += 1;
  1504. return FALSE;
  1505. }
  1506. if (FreePtes > 2048) {
  1507. return TRUE;
  1508. }
  1509. #if defined (_X86_)
  1510. if (MiRecoverExtraPtes () == TRUE) {
  1511. return TRUE;
  1512. }
  1513. if (MiRecoverSpecialPtes (NumberOfPtes) == TRUE) {
  1514. return TRUE;
  1515. }
  1516. #endif
  1517. MmPteFailures[SystemPteSpace] += 1;
  1518. return FALSE;
  1519. }
  1520. }
  1521. if (Priority == NormalPagePriority) {
  1522. if ((LONG)NumberOfPtes < (LONG)FreePtes - 512) {
  1523. return TRUE;
  1524. }
  1525. #if defined (_X86_)
  1526. if (MiRecoverExtraPtes () == TRUE) {
  1527. return TRUE;
  1528. }
  1529. if (MiRecoverSpecialPtes (NumberOfPtes) == TRUE) {
  1530. return TRUE;
  1531. }
  1532. #endif
  1533. MmPteFailures[SystemPteSpace] += 1;
  1534. return FALSE;
  1535. }
  1536. if ((LONG)NumberOfPtes < (LONG)FreePtes - 2048) {
  1537. return TRUE;
  1538. }
  1539. #if defined (_X86_)
  1540. if (MiRecoverExtraPtes () == TRUE) {
  1541. return TRUE;
  1542. }
  1543. if (MiRecoverSpecialPtes (NumberOfPtes) == TRUE) {
  1544. return TRUE;
  1545. }
  1546. #endif
  1547. MmPteFailures[SystemPteSpace] += 1;
  1548. return FALSE;
  1549. }
  1550. VOID
  1551. MiCheckPteReserve (
  1552. IN PMMPTE PointerPte,
  1553. IN ULONG NumberOfPtes
  1554. )
  1555. /*++
  1556. Routine Description:
  1557. This function checks the reserve of the specified number of system
  1558. space PTEs.
  1559. Arguments:
  1560. StartingPte - Supplies the address of the first PTE to reserve.
  1561. NumberOfPtes - Supplies the number of PTEs to reserve.
  1562. Return Value:
  1563. None.
  1564. Environment:
  1565. Kernel mode.
  1566. --*/
  1567. {
  1568. ULONG i;
  1569. KIRQL OldIrql;
  1570. ULONG StartBit;
  1571. PULONG StartBitMapBuffer;
  1572. PULONG EndBitMapBuffer;
  1573. PVOID VirtualAddress;
  1574. ASSERT (MmTrackPtes & 0x2);
  1575. VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte);
  1576. if (NumberOfPtes == 0) {
  1577. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  1578. 0x200,
  1579. (ULONG_PTR) VirtualAddress,
  1580. 0,
  1581. 0);
  1582. }
  1583. StartBit = (ULONG) (PointerPte - MiPteStart);
  1584. i = StartBit;
  1585. StartBitMapBuffer = MiPteStartBitmap->Buffer;
  1586. EndBitMapBuffer = MiPteEndBitmap->Buffer;
  1587. ExAcquireSpinLock (&MiPteTrackerLock, &OldIrql);
  1588. for ( ; i < StartBit + NumberOfPtes; i += 1) {
  1589. if (MI_CHECK_BIT (StartBitMapBuffer, i)) {
  1590. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  1591. 0x201,
  1592. (ULONG_PTR) VirtualAddress,
  1593. (ULONG_PTR) VirtualAddress + ((i - StartBit) << PAGE_SHIFT),
  1594. NumberOfPtes);
  1595. }
  1596. }
  1597. RtlSetBits (MiPteStartBitmap, StartBit, NumberOfPtes);
  1598. for (i = StartBit; i < StartBit + NumberOfPtes; i += 1) {
  1599. if (MI_CHECK_BIT (EndBitMapBuffer, i)) {
  1600. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  1601. 0x202,
  1602. (ULONG_PTR) VirtualAddress,
  1603. (ULONG_PTR) VirtualAddress + ((i - StartBit) << PAGE_SHIFT),
  1604. NumberOfPtes);
  1605. }
  1606. }
  1607. MI_SET_BIT (EndBitMapBuffer, i - 1);
  1608. ExReleaseSpinLock (&MiPteTrackerLock, OldIrql);
  1609. }
  1610. VOID
  1611. MiCheckPteRelease (
  1612. IN PMMPTE StartingPte,
  1613. IN ULONG NumberOfPtes
  1614. )
  1615. /*++
  1616. Routine Description:
  1617. This function checks the release of the specified number of system
  1618. space PTEs.
  1619. Arguments:
  1620. StartingPte - Supplies the address of the first PTE to release.
  1621. NumberOfPtes - Supplies the number of PTEs to release.
  1622. Return Value:
  1623. None.
  1624. Environment:
  1625. Kernel mode.
  1626. --*/
  1627. {
  1628. ULONG i;
  1629. ULONG Index;
  1630. ULONG StartBit;
  1631. KIRQL OldIrql;
  1632. ULONG CalculatedPtes;
  1633. ULONG NumberOfPtesRoundedUp;
  1634. PULONG StartBitMapBuffer;
  1635. PULONG EndBitMapBuffer;
  1636. PVOID VirtualAddress;
  1637. PVOID LowestVirtualAddress;
  1638. PVOID HighestVirtualAddress;
  1639. ASSERT (MmTrackPtes & 0x2);
  1640. VirtualAddress = MiGetVirtualAddressMappedByPte (StartingPte);
  1641. LowestVirtualAddress = MiGetVirtualAddressMappedByPte (MmSystemPtesStart[SystemPteSpace]);
  1642. HighestVirtualAddress = MiGetVirtualAddressMappedByPte (MmSystemPtesEnd[SystemPteSpace]);
  1643. if (NumberOfPtes == 0) {
  1644. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  1645. 0x300,
  1646. (ULONG_PTR) VirtualAddress,
  1647. (ULONG_PTR) LowestVirtualAddress,
  1648. (ULONG_PTR) HighestVirtualAddress);
  1649. }
  1650. if (StartingPte < MmSystemPtesStart[SystemPteSpace]) {
  1651. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  1652. 0x301,
  1653. (ULONG_PTR) VirtualAddress,
  1654. (ULONG_PTR) LowestVirtualAddress,
  1655. (ULONG_PTR) HighestVirtualAddress);
  1656. }
  1657. if (StartingPte > MmSystemPtesEnd[SystemPteSpace]) {
  1658. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  1659. 0x302,
  1660. (ULONG_PTR) VirtualAddress,
  1661. (ULONG_PTR) LowestVirtualAddress,
  1662. (ULONG_PTR) HighestVirtualAddress);
  1663. }
  1664. StartBit = (ULONG) (StartingPte - MiPteStart);
  1665. ExAcquireSpinLock (&MiPteTrackerLock, &OldIrql);
  1666. //
  1667. // Verify start and size of allocation using the tracking bitmaps.
  1668. //
  1669. if (!RtlCheckBit (MiPteStartBitmap, StartBit)) {
  1670. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  1671. 0x303,
  1672. (ULONG_PTR) VirtualAddress,
  1673. NumberOfPtes,
  1674. 0);
  1675. }
  1676. if (StartBit != 0) {
  1677. if (RtlCheckBit (MiPteStartBitmap, StartBit - 1)) {
  1678. if (!RtlCheckBit (MiPteEndBitmap, StartBit - 1)) {
  1679. //
  1680. // In the middle of an allocation... bugcheck.
  1681. //
  1682. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  1683. 0x304,
  1684. (ULONG_PTR) VirtualAddress,
  1685. NumberOfPtes,
  1686. 0);
  1687. }
  1688. }
  1689. }
  1690. //
  1691. // Find the last allocated PTE to calculate the correct size.
  1692. //
  1693. EndBitMapBuffer = MiPteEndBitmap->Buffer;
  1694. i = StartBit;
  1695. while (!MI_CHECK_BIT (EndBitMapBuffer, i)) {
  1696. i += 1;
  1697. }
  1698. CalculatedPtes = i - StartBit + 1;
  1699. NumberOfPtesRoundedUp = NumberOfPtes;
  1700. if (CalculatedPtes <= MM_PTE_TABLE_LIMIT) {
  1701. Index = MmSysPteTables [NumberOfPtes];
  1702. NumberOfPtesRoundedUp = MmSysPteIndex [Index];
  1703. }
  1704. if (CalculatedPtes != NumberOfPtesRoundedUp) {
  1705. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  1706. 0x305,
  1707. (ULONG_PTR) VirtualAddress,
  1708. NumberOfPtes,
  1709. CalculatedPtes);
  1710. }
  1711. StartBitMapBuffer = MiPteStartBitmap->Buffer;
  1712. for (i = StartBit; i < StartBit + CalculatedPtes; i += 1) {
  1713. if (MI_CHECK_BIT (StartBitMapBuffer, i) == 0) {
  1714. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  1715. 0x306,
  1716. (ULONG_PTR) VirtualAddress,
  1717. (ULONG_PTR) VirtualAddress + ((i - StartBit) << PAGE_SHIFT),
  1718. CalculatedPtes);
  1719. }
  1720. }
  1721. RtlClearBits (MiPteStartBitmap, StartBit, CalculatedPtes);
  1722. MI_CLEAR_BIT (EndBitMapBuffer, i - 1);
  1723. ExReleaseSpinLock (&MiPteTrackerLock, OldIrql);
  1724. }
  1725. #if DBG
  1726. VOID
  1727. MiDumpSystemPtes (
  1728. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  1729. )
  1730. {
  1731. PMMPTE PointerPte;
  1732. PMMPTE PointerNextPte;
  1733. ULONG_PTR ClusterSize;
  1734. PMMPTE EndOfCluster;
  1735. PointerPte = &MmFirstFreeSystemPte[SystemPtePoolType];
  1736. if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
  1737. return;
  1738. }
  1739. PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
  1740. for (;;) {
  1741. if (PointerPte->u.List.OneEntry) {
  1742. ClusterSize = 1;
  1743. }
  1744. else {
  1745. PointerNextPte = PointerPte + 1;
  1746. ClusterSize = (ULONG_PTR) PointerNextPte->u.List.NextEntry;
  1747. }
  1748. EndOfCluster = PointerPte + (ClusterSize - 1);
  1749. DbgPrint("System Pte at %p for %p entries (%p)\n",
  1750. PointerPte, ClusterSize, EndOfCluster);
  1751. if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
  1752. break;
  1753. }
  1754. PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
  1755. }
  1756. return;
  1757. }
  1758. ULONG
  1759. MiCountFreeSystemPtes (
  1760. IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
  1761. )
  1762. {
  1763. PMMPTE PointerPte;
  1764. PMMPTE PointerNextPte;
  1765. ULONG_PTR ClusterSize;
  1766. ULONG_PTR FreeCount;
  1767. PointerPte = &MmFirstFreeSystemPte[SystemPtePoolType];
  1768. if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
  1769. return 0;
  1770. }
  1771. FreeCount = 0;
  1772. PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
  1773. for (;;) {
  1774. if (PointerPte->u.List.OneEntry) {
  1775. ClusterSize = 1;
  1776. }
  1777. else {
  1778. PointerNextPte = PointerPte + 1;
  1779. ClusterSize = (ULONG_PTR) PointerNextPte->u.List.NextEntry;
  1780. }
  1781. FreeCount += ClusterSize;
  1782. if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
  1783. break;
  1784. }
  1785. PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
  1786. }
  1787. return (ULONG)FreeCount;
  1788. }
  1789. #endif