Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

6088 lines
174 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. allocpag.c
  5. Abstract:
  6. This module contains the routines which allocate and deallocate
  7. one or more pages from paged or nonpaged pool.
  8. Author:
  9. Lou Perazzoli (loup) 6-Apr-1989
  10. Landy Wang (landyw) 02-June-1997
  11. Revision History:
  12. --*/
  13. #include "mi.h"
  14. #if DBG
  15. extern ULONG MiShowStuckPages;
  16. #endif
  17. PVOID
  18. MiFindContiguousMemoryInPool (
  19. IN PFN_NUMBER LowestPfn,
  20. IN PFN_NUMBER HighestPfn,
  21. IN PFN_NUMBER BoundaryPfn,
  22. IN PFN_NUMBER SizeInPages,
  23. IN PVOID CallingAddress
  24. );
  25. #ifdef ALLOC_PRAGMA
  26. #pragma alloc_text(INIT, MiInitializeNonPagedPool)
  27. #pragma alloc_text(INIT, MiInitializePoolEvents)
  28. #pragma alloc_text(INIT, MiSyncCachedRanges)
  29. #pragma alloc_text(PAGE, MmAvailablePoolInPages)
  30. #pragma alloc_text(PAGE, MiFindContiguousMemory)
  31. #pragma alloc_text(PAGELK, MiFindContiguousMemoryInPool)
  32. #pragma alloc_text(PAGELK, MiFindLargePageMemory)
  33. #pragma alloc_text(PAGELK, MiFreeLargePageMemory)
  34. #pragma alloc_text(PAGE, MiCheckSessionPoolAllocations)
  35. #pragma alloc_text(PAGE, MiSessionPoolVector)
  36. #pragma alloc_text(PAGE, MiSessionPoolMutex)
  37. #pragma alloc_text(PAGE, MiInitializeSessionPool)
  38. #pragma alloc_text(PAGE, MiFreeSessionPoolBitMaps)
  39. #pragma alloc_text(POOLMI, MiAllocatePoolPages)
  40. #pragma alloc_text(POOLMI, MiFreePoolPages)
  41. #endif
  42. ULONG MmPagedPoolCommit; // used by the debugger
  43. SLIST_HEADER MiNonPagedPoolSListHead;
  44. ULONG MiNonPagedPoolSListMaximum = 4;
  45. SLIST_HEADER MiPagedPoolSListHead;
  46. ULONG MiPagedPoolSListMaximum = 8;
  47. PFN_NUMBER MmAllocatedNonPagedPool;
  48. PFN_NUMBER MiStartOfInitialPoolFrame;
  49. PFN_NUMBER MiEndOfInitialPoolFrame;
  50. PVOID MmNonPagedPoolEnd0;
  51. PVOID MmNonPagedPoolExpansionStart;
  52. LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_LIST_HEADS];
  53. extern POOL_DESCRIPTOR NonPagedPoolDescriptor;
  54. extern PFN_NUMBER MmFreedExpansionPoolMaximum;
  55. extern KGUARDED_MUTEX MmPagedPoolMutex;
  56. #define MM_SMALL_ALLOCATIONS 4
  57. #if DBG
  58. ULONG MiClearCache;
  59. //
  60. // Set this to a nonzero (ie: 10000) value to cause every pool allocation to
  61. // be checked and an ASSERT fires if the allocation is larger than this value.
  62. //
  63. ULONG MmCheckRequestInPages = 0;
  64. //
  65. // Set this to a nonzero (ie: 0x23456789) value to cause this pattern to be
  66. // written into freed nonpaged pool pages.
  67. //
  68. ULONG MiFillFreedPool = 0;
  69. #endif
  70. PFN_NUMBER MiExpansionPoolPagesInUse;
  71. PFN_NUMBER MiExpansionPoolPagesInitialCharge;
  72. ULONG MmUnusedSegmentForceFreeDefault = 30;
  73. extern ULONG MmUnusedSegmentForceFree;
  74. //
  75. // For debugging purposes.
  76. //
  77. typedef enum _MM_POOL_TYPES {
  78. MmNonPagedPool,
  79. MmPagedPool,
  80. MmSessionPagedPool,
  81. MmMaximumPoolType
  82. } MM_POOL_TYPES;
  83. typedef enum _MM_POOL_PRIORITIES {
  84. MmHighPriority,
  85. MmNormalPriority,
  86. MmLowPriority,
  87. MmMaximumPoolPriority
  88. } MM_POOL_PRIORITIES;
  89. typedef enum _MM_POOL_FAILURE_REASONS {
  90. MmNonPagedNoPtes,
  91. MmPriorityTooLow,
  92. MmNonPagedNoPagesAvailable,
  93. MmPagedNoPtes,
  94. MmSessionPagedNoPtes,
  95. MmPagedNoPagesAvailable,
  96. MmSessionPagedNoPagesAvailable,
  97. MmPagedNoCommit,
  98. MmSessionPagedNoCommit,
  99. MmNonPagedNoResidentAvailable,
  100. MmNonPagedNoCommit,
  101. MmMaximumFailureReason
  102. } MM_POOL_FAILURE_REASONS;
  103. ULONG MmPoolFailures[MmMaximumPoolType][MmMaximumPoolPriority];
  104. ULONG MmPoolFailureReasons[MmMaximumFailureReason];
  105. typedef enum _MM_PREEMPTIVE_TRIMS {
  106. MmPreemptForNonPaged,
  107. MmPreemptForPaged,
  108. MmPreemptForNonPagedPriority,
  109. MmPreemptForPagedPriority,
  110. MmMaximumPreempt
  111. } MM_PREEMPTIVE_TRIMS;
  112. ULONG MmPreemptiveTrims[MmMaximumPreempt];
  113. VOID
  114. MiProtectFreeNonPagedPool (
  115. IN PVOID VirtualAddress,
  116. IN ULONG SizeInPages
  117. )
  118. /*++
  119. Routine Description:
  120. This function protects freed nonpaged pool.
  121. Arguments:
  122. VirtualAddress - Supplies the freed pool address to protect.
  123. SizeInPages - Supplies the size of the request in pages.
  124. Return Value:
  125. None.
  126. Environment:
  127. Kernel mode.
  128. --*/
  129. {
  130. MMPTE PteContents;
  131. PMMPTE PointerPte;
  132. PMMPTE LastPte;
  133. MMPTE_FLUSH_LIST PteFlushList;
  134. PteFlushList.Count = 0;
  135. //
  136. // Prevent anyone from touching the free non paged pool.
  137. //
  138. if (MI_IS_PHYSICAL_ADDRESS (VirtualAddress) == 0) {
  139. PointerPte = MiGetPteAddress (VirtualAddress);
  140. LastPte = PointerPte + SizeInPages;
  141. do {
  142. PteContents = *PointerPte;
  143. PteContents.u.Hard.Valid = 0;
  144. PteContents.u.Soft.Prototype = 1;
  145. MI_WRITE_INVALID_PTE (PointerPte, PteContents);
  146. if (PteFlushList.Count < MM_MAXIMUM_FLUSH_COUNT) {
  147. PteFlushList.FlushVa[PteFlushList.Count] = VirtualAddress;
  148. PteFlushList.Count += 1;
  149. }
  150. VirtualAddress = (PVOID)((PCHAR)VirtualAddress + PAGE_SIZE);
  151. PointerPte += 1;
  152. } while (PointerPte < LastPte);
  153. }
  154. if (PteFlushList.Count != 0) {
  155. MiFlushPteList (&PteFlushList, TRUE);
  156. }
  157. }
  158. LOGICAL
  159. MiUnProtectFreeNonPagedPool (
  160. IN PVOID VirtualAddress,
  161. IN ULONG SizeInPages
  162. )
  163. /*++
  164. Routine Description:
  165. This function unprotects freed nonpaged pool.
  166. Arguments:
  167. VirtualAddress - Supplies the freed pool address to unprotect.
  168. SizeInPages - Supplies the size of the request in pages - zero indicates
  169. to keep going until there are no more protected PTEs (ie: the
  170. caller doesn't know how many protected PTEs there are).
  171. Return Value:
  172. TRUE if pages were unprotected, FALSE if not.
  173. Environment:
  174. Kernel mode.
  175. --*/
  176. {
  177. PMMPTE PointerPte;
  178. MMPTE PteContents;
  179. ULONG PagesDone;
  180. PagesDone = 0;
  181. //
  182. // Unprotect the previously freed pool so it can be manipulated
  183. //
  184. if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress) == 0) {
  185. PointerPte = MiGetPteAddress((PVOID)VirtualAddress);
  186. PteContents = *PointerPte;
  187. while (PteContents.u.Hard.Valid == 0 && PteContents.u.Soft.Prototype == 1) {
  188. PteContents.u.Hard.Valid = 1;
  189. PteContents.u.Soft.Prototype = 0;
  190. MI_WRITE_VALID_PTE (PointerPte, PteContents);
  191. PagesDone += 1;
  192. if (PagesDone == SizeInPages) {
  193. break;
  194. }
  195. PointerPte += 1;
  196. PteContents = *PointerPte;
  197. }
  198. }
  199. if (PagesDone == 0) {
  200. return FALSE;
  201. }
  202. return TRUE;
  203. }
  204. VOID
  205. MiProtectedPoolInsertList (
  206. IN PLIST_ENTRY ListHead,
  207. IN PLIST_ENTRY Entry,
  208. IN LOGICAL InsertHead
  209. )
  210. /*++
  211. Routine Description:
  212. This function inserts the entry into the protected list.
  213. Arguments:
  214. ListHead - Supplies the list head to add onto.
  215. Entry - Supplies the list entry to insert.
  216. InsertHead - If TRUE, insert at the head otherwise at the tail.
  217. Return Value:
  218. None.
  219. Environment:
  220. Kernel mode.
  221. --*/
  222. {
  223. PVOID FreeFlink;
  224. PVOID FreeBlink;
  225. PVOID VirtualAddress;
  226. //
  227. // Either the flink or the blink may be pointing
  228. // at protected nonpaged pool. Unprotect now.
  229. //
  230. FreeFlink = (PVOID)0;
  231. FreeBlink = (PVOID)0;
  232. if (IsListEmpty(ListHead) == 0) {
  233. VirtualAddress = (PVOID)ListHead->Flink;
  234. if (MiUnProtectFreeNonPagedPool (VirtualAddress, 1) == TRUE) {
  235. FreeFlink = VirtualAddress;
  236. }
  237. }
  238. if (((PVOID)Entry == ListHead->Blink) == 0) {
  239. VirtualAddress = (PVOID)ListHead->Blink;
  240. if (MiUnProtectFreeNonPagedPool (VirtualAddress, 1) == TRUE) {
  241. FreeBlink = VirtualAddress;
  242. }
  243. }
  244. if (InsertHead == TRUE) {
  245. InsertHeadList (ListHead, Entry);
  246. }
  247. else {
  248. InsertTailList (ListHead, Entry);
  249. }
  250. if (FreeFlink) {
  251. //
  252. // Reprotect the flink.
  253. //
  254. MiProtectFreeNonPagedPool (FreeFlink, 1);
  255. }
  256. if (FreeBlink) {
  257. //
  258. // Reprotect the blink.
  259. //
  260. MiProtectFreeNonPagedPool (FreeBlink, 1);
  261. }
  262. }
  263. VOID
  264. MiProtectedPoolRemoveEntryList (
  265. IN PLIST_ENTRY Entry
  266. )
  267. /*++
  268. Routine Description:
  269. This function unlinks the list pointer from protected freed nonpaged pool.
  270. Arguments:
  271. Entry - Supplies the list entry to remove.
  272. Return Value:
  273. None.
  274. Environment:
  275. Kernel mode.
  276. --*/
  277. {
  278. PVOID FreeFlink;
  279. PVOID FreeBlink;
  280. PVOID VirtualAddress;
  281. //
  282. // Either the flink or the blink may be pointing
  283. // at protected nonpaged pool. Unprotect now.
  284. //
  285. FreeFlink = (PVOID)0;
  286. FreeBlink = (PVOID)0;
  287. if (IsListEmpty(Entry) == 0) {
  288. VirtualAddress = (PVOID)Entry->Flink;
  289. if (MiUnProtectFreeNonPagedPool (VirtualAddress, 1) == TRUE) {
  290. FreeFlink = VirtualAddress;
  291. }
  292. }
  293. if (((PVOID)Entry == Entry->Blink) == 0) {
  294. VirtualAddress = (PVOID)Entry->Blink;
  295. if (MiUnProtectFreeNonPagedPool (VirtualAddress, 1) == TRUE) {
  296. FreeBlink = VirtualAddress;
  297. }
  298. }
  299. RemoveEntryList (Entry);
  300. if (FreeFlink) {
  301. //
  302. // Reprotect the flink.
  303. //
  304. MiProtectFreeNonPagedPool (FreeFlink, 1);
  305. }
  306. if (FreeBlink) {
  307. //
  308. // Reprotect the blink.
  309. //
  310. MiProtectFreeNonPagedPool (FreeBlink, 1);
  311. }
  312. }
  313. VOID
  314. MiTrimSegmentCache (
  315. VOID
  316. )
  317. /*++
  318. Routine Description:
  319. This function initiates trimming of the segment cache.
  320. Arguments:
  321. None.
  322. Return Value:
  323. None.
  324. Environment:
  325. Kernel Mode Only.
  326. --*/
  327. {
  328. KIRQL OldIrql;
  329. LOGICAL SignalDereferenceThread;
  330. LOGICAL SignalSystemCache;
  331. SignalDereferenceThread = FALSE;
  332. SignalSystemCache = FALSE;
  333. LOCK_PFN2 (OldIrql);
  334. if (MmUnusedSegmentForceFree == 0) {
  335. if (!IsListEmpty(&MmUnusedSegmentList)) {
  336. SignalDereferenceThread = TRUE;
  337. MmUnusedSegmentForceFree = MmUnusedSegmentForceFreeDefault;
  338. }
  339. else {
  340. if (!IsListEmpty(&MmUnusedSubsectionList)) {
  341. SignalDereferenceThread = TRUE;
  342. MmUnusedSegmentForceFree = MmUnusedSegmentForceFreeDefault;
  343. }
  344. if (MiUnusedSubsectionPagedPool < 4 * PAGE_SIZE) {
  345. //
  346. // No unused segments and tossable subsection usage is low as
  347. // well. Start unmapping system cache views in an attempt
  348. // to get back the paged pool containing its prototype PTEs.
  349. //
  350. SignalSystemCache = TRUE;
  351. }
  352. }
  353. }
  354. UNLOCK_PFN2 (OldIrql);
  355. if (SignalSystemCache == TRUE) {
  356. if (CcHasInactiveViews() == TRUE) {
  357. if (SignalDereferenceThread == FALSE) {
  358. LOCK_PFN2 (OldIrql);
  359. if (MmUnusedSegmentForceFree == 0) {
  360. SignalDereferenceThread = TRUE;
  361. MmUnusedSegmentForceFree = MmUnusedSegmentForceFreeDefault;
  362. }
  363. UNLOCK_PFN2 (OldIrql);
  364. }
  365. }
  366. }
  367. if (SignalDereferenceThread == TRUE) {
  368. KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE);
  369. }
  370. }
  371. POOL_TYPE
  372. MmDeterminePoolType (
  373. IN PVOID VirtualAddress
  374. )
  375. /*++
  376. Routine Description:
  377. This function determines which pool a virtual address resides within.
  378. Arguments:
  379. VirtualAddress - Supplies the virtual address to determine which pool
  380. it resides within.
  381. Return Value:
  382. Returns the POOL_TYPE (PagedPool, NonPagedPool, PagedPoolSession or
  383. NonPagedPoolSession).
  384. Environment:
  385. Kernel Mode Only.
  386. --*/
  387. {
  388. if ((VirtualAddress >= MmPagedPoolStart) &&
  389. (VirtualAddress <= MmPagedPoolEnd)) {
  390. return PagedPool;
  391. }
  392. if (MI_IS_SESSION_POOL_ADDRESS (VirtualAddress) == TRUE) {
  393. return PagedPoolSession;
  394. }
  395. return NonPagedPool;
  396. }
  397. PVOID
  398. MiSessionPoolVector (
  399. VOID
  400. )
  401. /*++
  402. Routine Description:
  403. This function returns the session pool descriptor for the current session.
  404. Arguments:
  405. None.
  406. Return Value:
  407. Pool descriptor.
  408. --*/
  409. {
  410. PAGED_CODE ();
  411. return (PVOID)&MmSessionSpace->PagedPool;
  412. }
  413. SIZE_T
  414. MmAvailablePoolInPages (
  415. IN POOL_TYPE PoolType
  416. )
  417. /*++
  418. Routine Description:
  419. This function returns the number of pages available for the given pool.
  420. Note that it does not account for any executive pool fragmentation.
  421. Arguments:
  422. PoolType - Supplies the type of pool to retrieve information about.
  423. Return Value:
  424. The number of full pool pages remaining.
  425. Environment:
  426. PASSIVE_LEVEL, no mutexes or locks held.
  427. --*/
  428. {
  429. SIZE_T FreePoolInPages;
  430. SIZE_T FreeCommitInPages;
  431. #if !DBG
  432. UNREFERENCED_PARAMETER (PoolType);
  433. #endif
  434. ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL);
  435. ASSERT (PoolType == PagedPool);
  436. FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
  437. FreeCommitInPages = MmTotalCommitLimitMaximum - MmTotalCommittedPages;
  438. if (FreePoolInPages > FreeCommitInPages) {
  439. FreePoolInPages = FreeCommitInPages;
  440. }
  441. return FreePoolInPages;
  442. }
  443. LOGICAL
  444. MmResourcesAvailable (
  445. IN POOL_TYPE PoolType,
  446. IN SIZE_T NumberOfBytes,
  447. IN EX_POOL_PRIORITY Priority
  448. )
  449. /*++
  450. Routine Description:
  451. This function examines various resources to determine if this
  452. pool allocation should be allowed to proceed.
  453. Arguments:
  454. PoolType - Supplies the type of pool to retrieve information about.
  455. NumberOfBytes - Supplies the number of bytes to allocate.
  456. Priority - Supplies an indication as to how important it is that this
  457. request succeed under low available resource conditions.
  458. Return Value:
  459. TRUE if the pool allocation should be allowed to proceed, FALSE if not.
  460. --*/
  461. {
  462. KIRQL OldIrql;
  463. PFN_NUMBER NumberOfPages;
  464. SIZE_T FreePoolInBytes;
  465. LOGICAL Status;
  466. MM_POOL_PRIORITIES Index;
  467. ASSERT (Priority != HighPoolPriority);
  468. ASSERT ((PoolType & MUST_SUCCEED_POOL_TYPE_MASK) == 0);
  469. NumberOfPages = BYTES_TO_PAGES (NumberOfBytes);
  470. if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) {
  471. FreePoolInBytes = ((MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool) << PAGE_SHIFT);
  472. }
  473. else if (PoolType & SESSION_POOL_MASK) {
  474. FreePoolInBytes = MmSessionPoolSize - (MmSessionSpace->PagedPoolInfo.AllocatedPagedPool << PAGE_SHIFT);
  475. }
  476. else {
  477. FreePoolInBytes = ((MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool) << PAGE_SHIFT);
  478. }
  479. Status = FALSE;
  480. //
  481. // Check available VA space.
  482. //
  483. if (Priority == NormalPoolPriority) {
  484. if ((SIZE_T)NumberOfBytes + 512*1024 > FreePoolInBytes) {
  485. if (PsGetCurrentThread()->MemoryMaker == 0) {
  486. goto nopool;
  487. }
  488. }
  489. }
  490. else {
  491. if ((SIZE_T)NumberOfBytes + 2*1024*1024 > FreePoolInBytes) {
  492. if (PsGetCurrentThread()->MemoryMaker == 0) {
  493. goto nopool;
  494. }
  495. }
  496. }
  497. //
  498. // Paged allocations (session and normal) can also fail for lack of commit.
  499. //
  500. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  501. if (MmTotalCommittedPages + NumberOfPages > MmTotalCommitLimitMaximum) {
  502. if (PsGetCurrentThread()->MemoryMaker == 0) {
  503. MiIssuePageExtendRequestNoWait (NumberOfPages);
  504. goto nopool;
  505. }
  506. }
  507. }
  508. //
  509. // If a substantial amount of free pool is still available, return TRUE now.
  510. //
  511. if (((SIZE_T)NumberOfBytes + 10*1024*1024 < FreePoolInBytes) ||
  512. (MmNumberOfPhysicalPages < 256 * 1024)) {
  513. return TRUE;
  514. }
  515. //
  516. // This pool allocation is permitted, but because we're starting to run low,
  517. // trigger a round of dereferencing in parallel before returning success.
  518. // Note this is only done on machines with at least 1GB of RAM as smaller
  519. // configuration machines will already trigger this due to physical page
  520. // consumption.
  521. //
  522. Status = TRUE;
  523. nopool:
  524. //
  525. // Running low on pool - if this request is not for session pool,
  526. // force unused segment trimming when appropriate.
  527. //
  528. if ((PoolType & SESSION_POOL_MASK) == 0) {
  529. if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) {
  530. MmPreemptiveTrims[MmPreemptForNonPagedPriority] += 1;
  531. OldIrql = KeAcquireQueuedSpinLock (LockQueueMmNonPagedPoolLock);
  532. KePulseEvent (MiLowNonPagedPoolEvent, 0, FALSE);
  533. KeReleaseQueuedSpinLock (LockQueueMmNonPagedPoolLock,
  534. OldIrql);
  535. }
  536. else {
  537. MmPreemptiveTrims[MmPreemptForPagedPriority] += 1;
  538. KeAcquireGuardedMutex (&MmPagedPoolMutex);
  539. KePulseEvent (MiLowPagedPoolEvent, 0, FALSE);
  540. KeReleaseGuardedMutex (&MmPagedPoolMutex);
  541. }
  542. if (MI_UNUSED_SEGMENTS_SURPLUS()) {
  543. KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE);
  544. }
  545. else {
  546. MiTrimSegmentCache ();
  547. }
  548. }
  549. if (Status == FALSE) {
  550. //
  551. // Log this failure for debugging purposes.
  552. //
  553. if (Priority == NormalPoolPriority) {
  554. Index = MmNormalPriority;
  555. }
  556. else {
  557. Index = MmLowPriority;
  558. }
  559. if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) {
  560. MmPoolFailures[MmNonPagedPool][Index] += 1;
  561. }
  562. else if (PoolType & SESSION_POOL_MASK) {
  563. MmPoolFailures[MmSessionPagedPool][Index] += 1;
  564. MmSessionSpace->SessionPoolAllocationFailures[0] += 1;
  565. }
  566. else {
  567. MmPoolFailures[MmPagedPool][Index] += 1;
  568. }
  569. MmPoolFailureReasons[MmPriorityTooLow] += 1;
  570. }
  571. return Status;
  572. }
  573. VOID
  574. MiFreeNonPagedPool (
  575. IN PVOID StartingAddress,
  576. IN PFN_NUMBER NumberOfPages
  577. )
  578. /*++
  579. Routine Description:
  580. This function releases virtually mapped nonpaged expansion pool.
  581. Arguments:
  582. StartingAddress - Supplies the starting address.
  583. NumberOfPages - Supplies the number of pages to free.
  584. Return Value:
  585. None.
  586. Environment:
  587. These functions are used by the internal Mm page allocation/free routines
  588. only and should not be called directly.
  589. Mutexes guarding the pool databases must be held when calling
  590. this function.
  591. --*/
  592. {
  593. PFN_NUMBER i;
  594. PMMPFN Pfn1;
  595. PMMPTE PointerPte;
  596. PFN_NUMBER ResAvailToReturn;
  597. PFN_NUMBER PageFrameIndex;
  598. PVOID FlushVa[MM_MAXIMUM_FLUSH_COUNT];
  599. MI_MAKING_MULTIPLE_PTES_INVALID (TRUE);
  600. PointerPte = MiGetPteAddress (StartingAddress);
  601. //
  602. // Return commitment.
  603. //
  604. MiReturnCommitment (NumberOfPages);
  605. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_NONPAGED_POOL_EXPANSION,
  606. NumberOfPages);
  607. ResAvailToReturn = 0;
  608. LOCK_PFN_AT_DPC ();
  609. if (MiExpansionPoolPagesInUse > MiExpansionPoolPagesInitialCharge) {
  610. ResAvailToReturn = MiExpansionPoolPagesInUse - MiExpansionPoolPagesInitialCharge;
  611. }
  612. MiExpansionPoolPagesInUse -= NumberOfPages;
  613. for (i = 0; i < NumberOfPages; i += 1) {
  614. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  615. //
  616. // Set the pointer to the PTE as empty so the page
  617. // is deleted when the reference count goes to zero.
  618. //
  619. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  620. ASSERT (Pfn1->u2.ShareCount == 1);
  621. Pfn1->u2.ShareCount = 0;
  622. MI_SET_PFN_DELETED (Pfn1);
  623. #if DBG
  624. Pfn1->u3.e1.PageLocation = StandbyPageList;
  625. #endif
  626. MiDecrementReferenceCount (Pfn1, PageFrameIndex);
  627. MI_WRITE_INVALID_PTE (PointerPte, ZeroKernelPte);
  628. PointerPte += 1;
  629. }
  630. //
  631. // The PFN lock is not needed for the TB flush - the caller either holds
  632. // the nonpaged pool lock or nothing, but regardless the address range
  633. // cannot be reused until the PTEs are released below.
  634. //
  635. UNLOCK_PFN_FROM_DPC ();
  636. if (NumberOfPages < MM_MAXIMUM_FLUSH_COUNT) {
  637. for (i = 0; i < NumberOfPages; i += 1) {
  638. FlushVa[i] = StartingAddress;
  639. StartingAddress = (PVOID)((PCHAR)StartingAddress + PAGE_SIZE);
  640. }
  641. KeFlushMultipleTb ((ULONG)NumberOfPages, &FlushVa[0], TRUE);
  642. }
  643. else {
  644. KeFlushEntireTb (TRUE, TRUE);
  645. }
  646. KeLowerIrql (DISPATCH_LEVEL);
  647. //
  648. // Generally there is no need to update resident available
  649. // pages at this time as it has all been done during initialization.
  650. // However, only some of the expansion pool was charged at init, so
  651. // calculate how much (if any) resident available page charge to return.
  652. //
  653. if (ResAvailToReturn > NumberOfPages) {
  654. ResAvailToReturn = NumberOfPages;
  655. }
  656. if (ResAvailToReturn != 0) {
  657. MI_INCREMENT_RESIDENT_AVAILABLE (ResAvailToReturn, MM_RESAVAIL_FREE_EXPANSION_NONPAGED_POOL);
  658. }
  659. PointerPte -= NumberOfPages;
  660. MiReleaseSystemPtes (PointerPte,
  661. (ULONG)NumberOfPages,
  662. NonPagedPoolExpansion);
  663. }
  664. LOGICAL
  665. MiFreeAllExpansionNonPagedPool (
  666. VOID
  667. )
  668. /*++
  669. Routine Description:
  670. This function releases all virtually mapped nonpaged expansion pool.
  671. Arguments:
  672. None.
  673. Return Value:
  674. TRUE if pages were freed, FALSE if not.
  675. Environment:
  676. Kernel mode.
  677. --*/
  678. {
  679. ULONG Index;
  680. KIRQL OldIrql;
  681. PLIST_ENTRY Entry;
  682. LOGICAL FreedPool;
  683. PMMFREE_POOL_ENTRY FreePageInfo;
  684. FreedPool = FALSE;
  685. OldIrql = KeAcquireQueuedSpinLock (LockQueueMmNonPagedPoolLock);
  686. for (Index = 0; Index < MI_MAX_FREE_LIST_HEADS; Index += 1) {
  687. Entry = MmNonPagedPoolFreeListHead[Index].Flink;
  688. while (Entry != &MmNonPagedPoolFreeListHead[Index]) {
  689. if (MmProtectFreedNonPagedPool == TRUE) {
  690. MiUnProtectFreeNonPagedPool ((PVOID)Entry, 0);
  691. }
  692. //
  693. // The list is not empty, see if this one is virtually
  694. // mapped.
  695. //
  696. FreePageInfo = CONTAINING_RECORD(Entry,
  697. MMFREE_POOL_ENTRY,
  698. List);
  699. if ((!MI_IS_PHYSICAL_ADDRESS(FreePageInfo)) &&
  700. ((PVOID)FreePageInfo >= MmNonPagedPoolExpansionStart)) {
  701. if (MmProtectFreedNonPagedPool == FALSE) {
  702. RemoveEntryList (&FreePageInfo->List);
  703. }
  704. else {
  705. MiProtectedPoolRemoveEntryList (&FreePageInfo->List);
  706. }
  707. MmNumberOfFreeNonPagedPool -= FreePageInfo->Size;
  708. ASSERT ((LONG)MmNumberOfFreeNonPagedPool >= 0);
  709. FreedPool = TRUE;
  710. MiFreeNonPagedPool ((PVOID)FreePageInfo,
  711. FreePageInfo->Size);
  712. Index = (ULONG)-1;
  713. break;
  714. }
  715. Entry = FreePageInfo->List.Flink;
  716. if (MmProtectFreedNonPagedPool == TRUE) {
  717. MiProtectFreeNonPagedPool ((PVOID)FreePageInfo,
  718. (ULONG)FreePageInfo->Size);
  719. }
  720. }
  721. }
  722. KeReleaseQueuedSpinLock (LockQueueMmNonPagedPoolLock, OldIrql);
  723. return FreedPool;
  724. }
  725. VOID
  726. MiMarkPoolLargeSession (
  727. IN PVOID VirtualAddress
  728. )
  729. /*++
  730. Routine Description:
  731. This function marks a NONPAGED pool allocation as being of
  732. type large session.
  733. Arguments:
  734. VirtualAddress - Supplies the virtual address of the pool allocation.
  735. Return Value:
  736. None.
  737. Environment:
  738. This function is used by the general pool allocation routines
  739. and should not be called directly.
  740. Kernel mode, IRQL <= DISPATCH_LEVEL.
  741. --*/
  742. {
  743. KIRQL OldIrql;
  744. PMMPFN Pfn1;
  745. PMMPTE PointerPte;
  746. PFN_NUMBER PageFrameIndex;
  747. ASSERT (PAGE_ALIGN (VirtualAddress) == VirtualAddress);
  748. if (MI_IS_PHYSICAL_ADDRESS (VirtualAddress)) {
  749. //
  750. // On certain architectures, virtual addresses
  751. // may be physical and hence have no corresponding PTE.
  752. //
  753. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (VirtualAddress);
  754. }
  755. else {
  756. PointerPte = MiGetPteAddress (VirtualAddress);
  757. ASSERT (PointerPte->u.Hard.Valid == 1);
  758. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  759. }
  760. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  761. LOCK_PFN2 (OldIrql);
  762. ASSERT (Pfn1->u3.e1.StartOfAllocation == 1);
  763. ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0);
  764. Pfn1->u3.e1.LargeSessionAllocation = 1;
  765. UNLOCK_PFN2 (OldIrql);
  766. return;
  767. }
  768. LOGICAL
  769. MiIsPoolLargeSession (
  770. IN PVOID VirtualAddress
  771. )
  772. /*++
  773. Routine Description:
  774. This function determines whether the argument nonpaged allocation was
  775. marked as a large session allocation.
  776. Arguments:
  777. VirtualAddress - Supplies the virtual address of the pool allocation.
  778. Return Value:
  779. None.
  780. Environment:
  781. This function is used by the general pool allocation routines
  782. and should not be called directly.
  783. Kernel mode, IRQL <= DISPATCH_LEVEL.
  784. --*/
  785. {
  786. PMMPFN Pfn1;
  787. PMMPTE PointerPte;
  788. PFN_NUMBER PageFrameIndex;
  789. ASSERT (PAGE_ALIGN (VirtualAddress) == VirtualAddress);
  790. if (MI_IS_PHYSICAL_ADDRESS (VirtualAddress)) {
  791. //
  792. // On certain architectures, virtual addresses
  793. // may be physical and hence have no corresponding PTE.
  794. //
  795. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (VirtualAddress);
  796. }
  797. else {
  798. PointerPte = MiGetPteAddress (VirtualAddress);
  799. ASSERT (PointerPte->u.Hard.Valid == 1);
  800. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  801. }
  802. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  803. ASSERT (Pfn1->u3.e1.StartOfAllocation == 1);
  804. if (Pfn1->u3.e1.LargeSessionAllocation == 0) {
  805. return FALSE;
  806. }
  807. return TRUE;
  808. }
  809. PVOID
  810. MiAllocatePoolPages (
  811. IN POOL_TYPE PoolType,
  812. IN SIZE_T SizeInBytes
  813. )
  814. /*++
  815. Routine Description:
  816. This function allocates a set of pages from the specified pool
  817. and returns the starting virtual address to the caller.
  818. Arguments:
  819. PoolType - Supplies the type of pool from which to obtain pages.
  820. SizeInBytes - Supplies the size of the request in bytes. The actual
  821. size returned is rounded up to a page boundary.
  822. Return Value:
  823. Returns a pointer to the allocated pool, or NULL if no more pool is
  824. available.
  825. Environment:
  826. These functions are used by the general pool allocation routines
  827. and should not be called directly.
  828. Kernel mode, IRQL at DISPATCH_LEVEL.
  829. --*/
  830. {
  831. PFN_NUMBER SizeInPages;
  832. ULONG StartPosition;
  833. ULONG EndPosition;
  834. PMMPTE StartingPte;
  835. PMMPTE PointerPte;
  836. PMMPFN Pfn1;
  837. MMPTE TempPte;
  838. PFN_NUMBER PageFrameIndex;
  839. PVOID BaseVa;
  840. KIRQL OldIrql;
  841. PFN_NUMBER i;
  842. PFN_NUMBER j;
  843. PLIST_ENTRY Entry;
  844. PLIST_ENTRY ListHead;
  845. PLIST_ENTRY LastListHead;
  846. PMMFREE_POOL_ENTRY FreePageInfo;
  847. PMM_SESSION_SPACE SessionSpace;
  848. PMM_PAGED_POOL_INFO PagedPoolInfo;
  849. PVOID VirtualAddress;
  850. PVOID VirtualAddressSave;
  851. ULONG_PTR Index;
  852. ULONG PageTableCount;
  853. PFN_NUMBER FreePoolInPages;
  854. SizeInPages = BYTES_TO_PAGES (SizeInBytes);
  855. #if DBG
  856. if (MmCheckRequestInPages != 0) {
  857. ASSERT (SizeInPages < MmCheckRequestInPages);
  858. }
  859. #endif
  860. if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) {
  861. if ((SizeInPages == 1) &&
  862. (ExQueryDepthSList (&MiNonPagedPoolSListHead) != 0)) {
  863. BaseVa = InterlockedPopEntrySList (&MiNonPagedPoolSListHead);
  864. if (BaseVa != NULL) {
  865. if (PoolType & POOL_VERIFIER_MASK) {
  866. if (MI_IS_PHYSICAL_ADDRESS(BaseVa)) {
  867. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (BaseVa);
  868. }
  869. else {
  870. PointerPte = MiGetPteAddress(BaseVa);
  871. ASSERT (PointerPte->u.Hard.Valid == 1);
  872. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  873. }
  874. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  875. Pfn1->u4.VerifierAllocation = 1;
  876. }
  877. return BaseVa;
  878. }
  879. }
  880. Index = SizeInPages - 1;
  881. if (Index >= MI_MAX_FREE_LIST_HEADS) {
  882. Index = MI_MAX_FREE_LIST_HEADS - 1;
  883. }
  884. //
  885. // NonPaged pool is linked together through the pages themselves.
  886. //
  887. ListHead = &MmNonPagedPoolFreeListHead[Index];
  888. LastListHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_LIST_HEADS];
  889. OldIrql = KeAcquireQueuedSpinLock (LockQueueMmNonPagedPoolLock);
  890. do {
  891. Entry = ListHead->Flink;
  892. while (Entry != ListHead) {
  893. if (MmProtectFreedNonPagedPool == TRUE) {
  894. MiUnProtectFreeNonPagedPool ((PVOID)Entry, 0);
  895. }
  896. //
  897. // The list is not empty, see if this one has enough space.
  898. //
  899. FreePageInfo = CONTAINING_RECORD(Entry,
  900. MMFREE_POOL_ENTRY,
  901. List);
  902. ASSERT (FreePageInfo->Signature == MM_FREE_POOL_SIGNATURE);
  903. if (FreePageInfo->Size >= SizeInPages) {
  904. //
  905. // This entry has sufficient space, remove
  906. // the pages from the end of the allocation.
  907. //
  908. FreePageInfo->Size -= SizeInPages;
  909. BaseVa = (PVOID)((PCHAR)FreePageInfo +
  910. (FreePageInfo->Size << PAGE_SHIFT));
  911. if (MmProtectFreedNonPagedPool == FALSE) {
  912. RemoveEntryList (&FreePageInfo->List);
  913. }
  914. else {
  915. MiProtectedPoolRemoveEntryList (&FreePageInfo->List);
  916. }
  917. if (FreePageInfo->Size != 0) {
  918. //
  919. // Insert any remainder into the correct list.
  920. //
  921. Index = (ULONG)(FreePageInfo->Size - 1);
  922. if (Index >= MI_MAX_FREE_LIST_HEADS) {
  923. Index = MI_MAX_FREE_LIST_HEADS - 1;
  924. }
  925. if (MmProtectFreedNonPagedPool == FALSE) {
  926. InsertTailList (&MmNonPagedPoolFreeListHead[Index],
  927. &FreePageInfo->List);
  928. }
  929. else {
  930. MiProtectedPoolInsertList (&MmNonPagedPoolFreeListHead[Index],
  931. &FreePageInfo->List,
  932. FALSE);
  933. MiProtectFreeNonPagedPool ((PVOID)FreePageInfo,
  934. (ULONG)FreePageInfo->Size);
  935. }
  936. }
  937. //
  938. // Adjust the number of free pages remaining in the pool.
  939. //
  940. MmNumberOfFreeNonPagedPool -= SizeInPages;
  941. ASSERT ((LONG)MmNumberOfFreeNonPagedPool >= 0);
  942. //
  943. // Mark start and end of allocation in the PFN database.
  944. //
  945. if (MI_IS_PHYSICAL_ADDRESS(BaseVa)) {
  946. //
  947. // On certain architectures, virtual addresses
  948. // may be physical and hence have no corresponding PTE.
  949. //
  950. PointerPte = NULL;
  951. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (BaseVa);
  952. }
  953. else {
  954. PointerPte = MiGetPteAddress(BaseVa);
  955. ASSERT (PointerPte->u.Hard.Valid == 1);
  956. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  957. }
  958. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  959. ASSERT (Pfn1->u3.e1.StartOfAllocation == 0);
  960. ASSERT (Pfn1->u4.VerifierAllocation == 0);
  961. Pfn1->u3.e1.StartOfAllocation = 1;
  962. if (PoolType & POOL_VERIFIER_MASK) {
  963. Pfn1->u4.VerifierAllocation = 1;
  964. }
  965. //
  966. // Calculate the ending PTE's address.
  967. //
  968. if (SizeInPages != 1) {
  969. if (PointerPte == NULL) {
  970. Pfn1 += SizeInPages - 1;
  971. }
  972. else {
  973. PointerPte += SizeInPages - 1;
  974. ASSERT (PointerPte->u.Hard.Valid == 1);
  975. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  976. }
  977. }
  978. ASSERT (Pfn1->u3.e1.EndOfAllocation == 0);
  979. Pfn1->u3.e1.EndOfAllocation = 1;
  980. MmAllocatedNonPagedPool += SizeInPages;
  981. FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
  982. if (FreePoolInPages < MiHighNonPagedPoolThreshold) {
  983. //
  984. // Read the state directly instead of calling
  985. // KeReadStateEvent since we are holding the nonpaged
  986. // pool lock and want to keep instructions at a
  987. // minimum.
  988. //
  989. if (MiHighNonPagedPoolEvent->Header.SignalState != 0) {
  990. KeClearEvent (MiHighNonPagedPoolEvent);
  991. }
  992. if (FreePoolInPages <= MiLowNonPagedPoolThreshold) {
  993. if (MiLowNonPagedPoolEvent->Header.SignalState == 0) {
  994. KeSetEvent (MiLowNonPagedPoolEvent, 0, FALSE);
  995. }
  996. }
  997. }
  998. KeReleaseQueuedSpinLock (LockQueueMmNonPagedPoolLock,
  999. OldIrql);
  1000. return BaseVa;
  1001. }
  1002. Entry = FreePageInfo->List.Flink;
  1003. if (MmProtectFreedNonPagedPool == TRUE) {
  1004. MiProtectFreeNonPagedPool ((PVOID)FreePageInfo,
  1005. (ULONG)FreePageInfo->Size);
  1006. }
  1007. }
  1008. ListHead += 1;
  1009. } while (ListHead < LastListHead);
  1010. KeReleaseQueuedSpinLock (LockQueueMmNonPagedPoolLock, OldIrql);
  1011. //
  1012. // No more entries on the list, expand nonpaged pool if
  1013. // possible to satisfy this request.
  1014. //
  1015. // If pool is starting to run low then free some page cache up now.
  1016. // While this can never guarantee pool allocations will succeed,
  1017. // it does give allocators a better chance.
  1018. //
  1019. FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
  1020. if (FreePoolInPages < (3 * 1024 * 1024) / PAGE_SIZE) {
  1021. MmPreemptiveTrims[MmPreemptForNonPaged] += 1;
  1022. MiTrimSegmentCache ();
  1023. }
  1024. #if defined (_WIN64)
  1025. if (SizeInPages >= _4gb) {
  1026. return NULL;
  1027. }
  1028. #endif
  1029. //
  1030. // Try to find system PTEs to expand the pool into.
  1031. //
  1032. StartingPte = MiReserveSystemPtes ((ULONG)SizeInPages,
  1033. NonPagedPoolExpansion);
  1034. if (StartingPte == NULL) {
  1035. //
  1036. // There are no free physical PTEs to expand nonpaged pool.
  1037. //
  1038. // Check to see if there are too many unused segments lying
  1039. // around. If so, set an event so they get deleted.
  1040. //
  1041. if (MI_UNUSED_SEGMENTS_SURPLUS()) {
  1042. KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE);
  1043. }
  1044. //
  1045. // If there are any cached expansion PTEs, free them now in
  1046. // an attempt to get enough contiguous VA for our caller.
  1047. //
  1048. if ((SizeInPages > 1) && (MmNumberOfFreeNonPagedPool != 0)) {
  1049. if (MiFreeAllExpansionNonPagedPool () == TRUE) {
  1050. StartingPte = MiReserveSystemPtes ((ULONG)SizeInPages,
  1051. NonPagedPoolExpansion);
  1052. }
  1053. }
  1054. if (StartingPte == NULL) {
  1055. MmPoolFailures[MmNonPagedPool][MmHighPriority] += 1;
  1056. MmPoolFailureReasons[MmNonPagedNoPtes] += 1;
  1057. //
  1058. // Running low on pool - force unused segment trimming.
  1059. //
  1060. MiTrimSegmentCache ();
  1061. return NULL;
  1062. }
  1063. }
  1064. //
  1065. // Charge commitment as nonpaged pool uses physical memory.
  1066. //
  1067. if (MiChargeCommitmentCantExpand (SizeInPages, FALSE) == FALSE) {
  1068. if (PsGetCurrentThread()->MemoryMaker == 1) {
  1069. MiChargeCommitmentCantExpand (SizeInPages, TRUE);
  1070. }
  1071. else {
  1072. MiReleaseSystemPtes (StartingPte,
  1073. (ULONG)SizeInPages,
  1074. NonPagedPoolExpansion);
  1075. MmPoolFailures[MmNonPagedPool][MmHighPriority] += 1;
  1076. MmPoolFailureReasons[MmNonPagedNoCommit] += 1;
  1077. MiTrimSegmentCache ();
  1078. return NULL;
  1079. }
  1080. }
  1081. PointerPte = StartingPte;
  1082. i = SizeInPages;
  1083. TempPte = ValidKernelPte;
  1084. MI_ADD_EXECUTE_TO_VALID_PTE_IF_PAE (TempPte);
  1085. OldIrql = KeAcquireQueuedSpinLock (LockQueueMmNonPagedPoolLock);
  1086. MmAllocatedNonPagedPool += SizeInPages;
  1087. LOCK_PFN_AT_DPC ();
  1088. //
  1089. // Make sure we have 1 more than the number of pages
  1090. // requested available.
  1091. //
  1092. if (MmAvailablePages <= SizeInPages) {
  1093. UNLOCK_PFN_FROM_DPC ();
  1094. //
  1095. // There are no free physical pages to expand nonpaged pool.
  1096. //
  1097. MmAllocatedNonPagedPool -= SizeInPages;
  1098. KeReleaseQueuedSpinLock (LockQueueMmNonPagedPoolLock, OldIrql);
  1099. MmPoolFailureReasons[MmNonPagedNoPagesAvailable] += 1;
  1100. MmPoolFailures[MmNonPagedPool][MmHighPriority] += 1;
  1101. MiReturnCommitment (SizeInPages);
  1102. MiReleaseSystemPtes (StartingPte,
  1103. (ULONG)SizeInPages,
  1104. NonPagedPoolExpansion);
  1105. MiTrimSegmentCache ();
  1106. return NULL;
  1107. }
  1108. //
  1109. // Charge resident available pages now for any excess.
  1110. //
  1111. MiExpansionPoolPagesInUse += SizeInPages;
  1112. if (MiExpansionPoolPagesInUse > MiExpansionPoolPagesInitialCharge) {
  1113. j = MiExpansionPoolPagesInUse - MiExpansionPoolPagesInitialCharge;
  1114. if (j > SizeInPages) {
  1115. j = SizeInPages;
  1116. }
  1117. if (MI_NONPAGABLE_MEMORY_AVAILABLE() >= (SPFN_NUMBER)j) {
  1118. MI_DECREMENT_RESIDENT_AVAILABLE (j, MM_RESAVAIL_ALLOCATE_EXPANSION_NONPAGED_POOL);
  1119. }
  1120. else {
  1121. MiExpansionPoolPagesInUse -= SizeInPages;
  1122. UNLOCK_PFN_FROM_DPC ();
  1123. MmAllocatedNonPagedPool -= SizeInPages;
  1124. KeReleaseQueuedSpinLock (LockQueueMmNonPagedPoolLock, OldIrql);
  1125. MmPoolFailureReasons[MmNonPagedNoResidentAvailable] += 1;
  1126. MmPoolFailures[MmNonPagedPool][MmHighPriority] += 1;
  1127. MiReturnCommitment (SizeInPages);
  1128. MiReleaseSystemPtes (StartingPte,
  1129. (ULONG)SizeInPages,
  1130. NonPagedPoolExpansion);
  1131. MiTrimSegmentCache ();
  1132. return NULL;
  1133. }
  1134. }
  1135. MM_TRACK_COMMIT (MM_DBG_COMMIT_NONPAGED_POOL_EXPANSION, SizeInPages);
  1136. //
  1137. // Expand the pool.
  1138. //
  1139. do {
  1140. PageFrameIndex = MiRemoveAnyPage (
  1141. MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
  1142. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1143. Pfn1->u3.e2.ReferenceCount = 1;
  1144. Pfn1->u2.ShareCount = 1;
  1145. Pfn1->PteAddress = PointerPte;
  1146. Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  1147. Pfn1->u4.PteFrame = MI_GET_PAGE_FRAME_FROM_PTE (MiGetPteAddress(PointerPte));
  1148. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1149. Pfn1->u3.e1.CacheAttribute = MiCached;
  1150. Pfn1->u3.e1.LargeSessionAllocation = 0;
  1151. Pfn1->u4.VerifierAllocation = 0;
  1152. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1153. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1154. PointerPte += 1;
  1155. SizeInPages -= 1;
  1156. } while (SizeInPages > 0);
  1157. Pfn1->u3.e1.EndOfAllocation = 1;
  1158. Pfn1 = MI_PFN_ELEMENT (StartingPte->u.Hard.PageFrameNumber);
  1159. Pfn1->u3.e1.StartOfAllocation = 1;
  1160. ASSERT (Pfn1->u4.VerifierAllocation == 0);
  1161. if (PoolType & POOL_VERIFIER_MASK) {
  1162. Pfn1->u4.VerifierAllocation = 1;
  1163. }
  1164. UNLOCK_PFN_FROM_DPC ();
  1165. FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
  1166. if (FreePoolInPages < MiHighNonPagedPoolThreshold) {
  1167. //
  1168. // Read the state directly instead of calling
  1169. // KeReadStateEvent since we are holding the nonpaged
  1170. // pool lock and want to keep instructions at a
  1171. // minimum.
  1172. //
  1173. if (MiHighNonPagedPoolEvent->Header.SignalState != 0) {
  1174. KeClearEvent (MiHighNonPagedPoolEvent);
  1175. }
  1176. if (FreePoolInPages <= MiLowNonPagedPoolThreshold) {
  1177. if (MiLowNonPagedPoolEvent->Header.SignalState == 0) {
  1178. KeSetEvent (MiLowNonPagedPoolEvent, 0, FALSE);
  1179. }
  1180. }
  1181. }
  1182. KeReleaseQueuedSpinLock (LockQueueMmNonPagedPoolLock, OldIrql);
  1183. BaseVa = MiGetVirtualAddressMappedByPte (StartingPte);
  1184. return BaseVa;
  1185. }
  1186. //
  1187. // Paged Pool.
  1188. //
  1189. if ((PoolType & SESSION_POOL_MASK) == 0) {
  1190. //
  1191. // If pool is starting to run low then free some page cache up now.
  1192. // While this can never guarantee pool allocations will succeed,
  1193. // it does give allocators a better chance.
  1194. //
  1195. FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
  1196. if (FreePoolInPages < (5 * 1024 * 1024) / PAGE_SIZE) {
  1197. MmPreemptiveTrims[MmPreemptForPaged] += 1;
  1198. MiTrimSegmentCache ();
  1199. }
  1200. #if DBG
  1201. if (MiClearCache != 0) {
  1202. LARGE_INTEGER CurrentTime;
  1203. KeQueryTickCount(&CurrentTime);
  1204. if ((CurrentTime.LowPart & MiClearCache) == 0) {
  1205. MmPreemptiveTrims[MmPreemptForPaged] += 1;
  1206. MiTrimSegmentCache ();
  1207. }
  1208. }
  1209. #endif
  1210. if ((SizeInPages == 1) &&
  1211. (ExQueryDepthSList (&MiPagedPoolSListHead) != 0)) {
  1212. BaseVa = InterlockedPopEntrySList (&MiPagedPoolSListHead);
  1213. if (BaseVa != NULL) {
  1214. return BaseVa;
  1215. }
  1216. }
  1217. SessionSpace = NULL;
  1218. PagedPoolInfo = &MmPagedPoolInfo;
  1219. KeAcquireGuardedMutex (&MmPagedPoolMutex);
  1220. }
  1221. else {
  1222. SessionSpace = SESSION_GLOBAL (MmSessionSpace);
  1223. PagedPoolInfo = &SessionSpace->PagedPoolInfo;
  1224. KeAcquireGuardedMutex (&SessionSpace->PagedPoolMutex);
  1225. }
  1226. StartPosition = RtlFindClearBitsAndSet (
  1227. PagedPoolInfo->PagedPoolAllocationMap,
  1228. (ULONG)SizeInPages,
  1229. PagedPoolInfo->PagedPoolHint
  1230. );
  1231. if ((StartPosition == NO_BITS_FOUND) &&
  1232. (PagedPoolInfo->PagedPoolHint != 0)) {
  1233. if (MI_UNUSED_SEGMENTS_SURPLUS()) {
  1234. KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE);
  1235. }
  1236. //
  1237. // No free bits were found, check from the start of the bit map.
  1238. StartPosition = RtlFindClearBitsAndSet (
  1239. PagedPoolInfo->PagedPoolAllocationMap,
  1240. (ULONG)SizeInPages,
  1241. 0
  1242. );
  1243. }
  1244. if (StartPosition == NO_BITS_FOUND) {
  1245. //
  1246. // No room in pool - attempt to expand the paged pool.
  1247. //
  1248. StartPosition = (((ULONG)SizeInPages - 1) / PTE_PER_PAGE) + 1;
  1249. //
  1250. // Make sure there is enough space to create at least some
  1251. // page table pages. Note if we can create even one it's worth
  1252. // doing as there may be free space in the already existing pool
  1253. // (at the end) - and this can be concatenated with the expanded
  1254. // portion below into one big allocation.
  1255. //
  1256. if (PagedPoolInfo->NextPdeForPagedPoolExpansion >
  1257. MiGetPteAddress (PagedPoolInfo->LastPteForPagedPool)) {
  1258. NoVaSpaceLeft:
  1259. //
  1260. // Can't expand pool any more. If this request is not for session
  1261. // pool, force unused segment trimming when appropriate.
  1262. //
  1263. if (SessionSpace == NULL) {
  1264. KeReleaseGuardedMutex (&MmPagedPoolMutex);
  1265. MmPoolFailures[MmPagedPool][MmHighPriority] += 1;
  1266. MmPoolFailureReasons[MmPagedNoPtes] += 1;
  1267. //
  1268. // Running low on pool - force unused segment trimming.
  1269. //
  1270. MiTrimSegmentCache ();
  1271. return NULL;
  1272. }
  1273. KeReleaseGuardedMutex (&SessionSpace->PagedPoolMutex);
  1274. MmPoolFailures[MmSessionPagedPool][MmHighPriority] += 1;
  1275. MmPoolFailureReasons[MmSessionPagedNoPtes] += 1;
  1276. SessionSpace->SessionPoolAllocationFailures[1] += 1;
  1277. return NULL;
  1278. }
  1279. if (((StartPosition - 1) + PagedPoolInfo->NextPdeForPagedPoolExpansion) >
  1280. MiGetPteAddress (PagedPoolInfo->LastPteForPagedPool)) {
  1281. PageTableCount = (ULONG)(MiGetPteAddress (PagedPoolInfo->LastPteForPagedPool) - PagedPoolInfo->NextPdeForPagedPoolExpansion + 1);
  1282. ASSERT (PageTableCount < StartPosition);
  1283. StartPosition = PageTableCount;
  1284. }
  1285. else {
  1286. PageTableCount = StartPosition;
  1287. }
  1288. if (SessionSpace) {
  1289. TempPte = ValidKernelPdeLocal;
  1290. }
  1291. else {
  1292. TempPte = ValidKernelPde;
  1293. }
  1294. //
  1295. // Charge commitment for the pagetable pages for paged pool expansion.
  1296. //
  1297. if (MiChargeCommitmentCantExpand (PageTableCount, FALSE) == FALSE) {
  1298. if (PsGetCurrentThread()->MemoryMaker == 1) {
  1299. MiChargeCommitmentCantExpand (PageTableCount, TRUE);
  1300. }
  1301. else {
  1302. if (SessionSpace) {
  1303. KeReleaseGuardedMutex (&SessionSpace->PagedPoolMutex);
  1304. }
  1305. else {
  1306. KeReleaseGuardedMutex (&MmPagedPoolMutex);
  1307. }
  1308. MmPoolFailures[MmPagedPool][MmHighPriority] += 1;
  1309. MmPoolFailureReasons[MmPagedNoCommit] += 1;
  1310. MiTrimSegmentCache ();
  1311. return NULL;
  1312. }
  1313. }
  1314. EndPosition = (ULONG)((PagedPoolInfo->NextPdeForPagedPoolExpansion -
  1315. MiGetPteAddress(PagedPoolInfo->FirstPteForPagedPool)) *
  1316. PTE_PER_PAGE);
  1317. //
  1318. // Expand the pool.
  1319. //
  1320. PointerPte = PagedPoolInfo->NextPdeForPagedPoolExpansion;
  1321. VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte);
  1322. VirtualAddressSave = VirtualAddress;
  1323. LOCK_PFN (OldIrql);
  1324. //
  1325. // Make sure we have 1 more than the number of pages
  1326. // requested available.
  1327. //
  1328. if (MmAvailablePages <= PageTableCount) {
  1329. //
  1330. // There are no free physical pages to expand paged pool.
  1331. //
  1332. UNLOCK_PFN (OldIrql);
  1333. if (SessionSpace == NULL) {
  1334. KeReleaseGuardedMutex (&MmPagedPoolMutex);
  1335. MmPoolFailures[MmPagedPool][MmHighPriority] += 1;
  1336. MmPoolFailureReasons[MmPagedNoPagesAvailable] += 1;
  1337. }
  1338. else {
  1339. KeReleaseGuardedMutex (&SessionSpace->PagedPoolMutex);
  1340. MmPoolFailures[MmSessionPagedPool][MmHighPriority] += 1;
  1341. MmPoolFailureReasons[MmSessionPagedNoPagesAvailable] += 1;
  1342. SessionSpace->SessionPoolAllocationFailures[2] += 1;
  1343. }
  1344. MiReturnCommitment (PageTableCount);
  1345. return NULL;
  1346. }
  1347. MM_TRACK_COMMIT (MM_DBG_COMMIT_PAGED_POOL_PAGETABLE, PageTableCount);
  1348. //
  1349. // Update the count of available resident pages.
  1350. //
  1351. MI_DECREMENT_RESIDENT_AVAILABLE (PageTableCount, MM_RESAVAIL_ALLOCATE_PAGETABLES_FOR_PAGED_POOL);
  1352. //
  1353. // Allocate the page table pages for the pool expansion.
  1354. //
  1355. do {
  1356. ASSERT (PointerPte->u.Hard.Valid == 0);
  1357. PageFrameIndex = MiRemoveAnyPage (
  1358. MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
  1359. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1360. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1361. //
  1362. // Map valid PDE into system (or session) address space.
  1363. //
  1364. #if (_MI_PAGING_LEVELS >= 3)
  1365. MiInitializePfn (PageFrameIndex, PointerPte, 1);
  1366. #else
  1367. if (SessionSpace) {
  1368. Index = (ULONG)(PointerPte - MiGetPdeAddress (MmSessionBase));
  1369. ASSERT (SessionSpace->PageTables[Index].u.Long == 0);
  1370. SessionSpace->PageTables[Index] = TempPte;
  1371. MiInitializePfnForOtherProcess (PageFrameIndex,
  1372. PointerPte,
  1373. SessionSpace->SessionPageDirectoryIndex);
  1374. MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_PAGEDPOOL_PAGETABLE_ALLOC1, 1);
  1375. }
  1376. else {
  1377. MmSystemPagePtes [((ULONG_PTR)PointerPte &
  1378. (PD_PER_SYSTEM * (sizeof(MMPTE) * PDE_PER_PAGE) - 1)) / sizeof(MMPTE)] = TempPte;
  1379. MiInitializePfnForOtherProcess (PageFrameIndex,
  1380. PointerPte,
  1381. MmSystemPageDirectory[(PointerPte - MiGetPdeAddress(0)) / PDE_PER_PAGE]);
  1382. }
  1383. #endif
  1384. PointerPte += 1;
  1385. VirtualAddress = (PVOID)((PCHAR)VirtualAddress + PAGE_SIZE);
  1386. StartPosition -= 1;
  1387. } while (StartPosition > 0);
  1388. UNLOCK_PFN (OldIrql);
  1389. //
  1390. // Clear the bitmap locations for the expansion area to indicate it
  1391. // is available for consumption.
  1392. //
  1393. RtlClearBits (PagedPoolInfo->PagedPoolAllocationMap,
  1394. EndPosition,
  1395. (ULONG) PageTableCount * PTE_PER_PAGE);
  1396. //
  1397. // Denote where to start the next pool expansion.
  1398. //
  1399. PagedPoolInfo->NextPdeForPagedPoolExpansion += PageTableCount;
  1400. //
  1401. // Mark the PTEs for the expanded pool no-access.
  1402. //
  1403. MiFillMemoryPte (VirtualAddressSave,
  1404. PageTableCount * (PAGE_SIZE / sizeof (MMPTE)),
  1405. MM_KERNEL_NOACCESS_PTE);
  1406. if (SessionSpace) {
  1407. InterlockedExchangeAddSizeT (&SessionSpace->CommittedPages,
  1408. PageTableCount);
  1409. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_NP_PAGETABLE_ALLOC, PageTableCount);
  1410. InterlockedExchangeAddSizeT (&SessionSpace->NonPagablePages,
  1411. PageTableCount);
  1412. }
  1413. //
  1414. // Start searching from the beginning of the bitmap as we may be
  1415. // able to coalesce an earlier entry and only use part of the expansion
  1416. // we just did. This is not only important to reduce fragmentation but
  1417. // in fact, required for the case where we could not expand enough
  1418. // to cover the entire allocation and thus, must coalesce backwards
  1419. // in order to satisfy the request.
  1420. //
  1421. StartPosition = RtlFindClearBitsAndSet (
  1422. PagedPoolInfo->PagedPoolAllocationMap,
  1423. (ULONG)SizeInPages,
  1424. 0);
  1425. if (StartPosition == NO_BITS_FOUND) {
  1426. goto NoVaSpaceLeft;
  1427. }
  1428. }
  1429. //
  1430. // This is paged pool, the start and end can't be saved
  1431. // in the PFN database as the page isn't always resident
  1432. // in memory. The ideal place to save the start and end
  1433. // would be in the prototype PTE, but there are no free
  1434. // bits. To solve this problem, a bitmap which parallels
  1435. // the allocation bitmap exists which contains set bits
  1436. // in the positions where an allocation ends. This
  1437. // allows pages to be deallocated with only their starting
  1438. // address.
  1439. //
  1440. // For sanity's sake, the starting address can be verified
  1441. // from the 2 bitmaps as well. If the page before the starting
  1442. // address is not allocated (bit is zero in allocation bitmap)
  1443. // then this page is obviously a start of an allocation block.
  1444. // If the page before is allocated and the other bit map does
  1445. // not indicate the previous page is the end of an allocation,
  1446. // then the starting address is wrong and a bug check should
  1447. // be issued.
  1448. //
  1449. if (SizeInPages == 1) {
  1450. PagedPoolInfo->PagedPoolHint = StartPosition + (ULONG)SizeInPages;
  1451. }
  1452. //
  1453. // If paged pool has been configured as nonpagable, commitment has
  1454. // already been charged so just set the length and return the address.
  1455. //
  1456. if ((MmDisablePagingExecutive & MM_PAGED_POOL_LOCKED_DOWN) &&
  1457. (SessionSpace == NULL)) {
  1458. BaseVa = (PVOID)((PUCHAR)MmPageAlignedPoolBase[PagedPool] +
  1459. ((ULONG_PTR)StartPosition << PAGE_SHIFT));
  1460. #if DBG
  1461. PointerPte = MiGetPteAddress (BaseVa);
  1462. for (i = 0; i < SizeInPages; i += 1) {
  1463. ASSERT (PointerPte->u.Hard.Valid == 1);
  1464. PointerPte += 1;
  1465. }
  1466. #endif
  1467. EndPosition = StartPosition + (ULONG)SizeInPages - 1;
  1468. RtlSetBit (PagedPoolInfo->EndOfPagedPoolBitmap, EndPosition);
  1469. if (PoolType & POOL_VERIFIER_MASK) {
  1470. RtlSetBit (VerifierLargePagedPoolMap, StartPosition);
  1471. }
  1472. InterlockedExchangeAddSizeT (&PagedPoolInfo->AllocatedPagedPool,
  1473. SizeInPages);
  1474. FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
  1475. if (FreePoolInPages < MiHighPagedPoolThreshold) {
  1476. //
  1477. // Read the state directly instead of calling
  1478. // KeReadStateEvent since we are holding the paged
  1479. // pool mutex and want to keep instructions at a
  1480. // minimum.
  1481. //
  1482. if (MiHighPagedPoolEvent->Header.SignalState != 0) {
  1483. KeClearEvent (MiHighPagedPoolEvent);
  1484. }
  1485. if (FreePoolInPages <= MiLowPagedPoolThreshold) {
  1486. if (MiLowPagedPoolEvent->Header.SignalState == 0) {
  1487. KeSetEvent (MiLowPagedPoolEvent, 0, FALSE);
  1488. }
  1489. }
  1490. }
  1491. KeReleaseGuardedMutex (&MmPagedPoolMutex);
  1492. return BaseVa;
  1493. }
  1494. if (MiChargeCommitmentCantExpand (SizeInPages, FALSE) == FALSE) {
  1495. if (PsGetCurrentThread()->MemoryMaker == 1) {
  1496. MiChargeCommitmentCantExpand (SizeInPages, TRUE);
  1497. }
  1498. else {
  1499. RtlClearBits (PagedPoolInfo->PagedPoolAllocationMap,
  1500. StartPosition,
  1501. (ULONG)SizeInPages);
  1502. //
  1503. // Could not commit the page(s), return NULL indicating
  1504. // no pool was allocated. Note that the lack of commit may be due
  1505. // to unused segments and the MmSharedCommit, prototype PTEs, etc
  1506. // associated with them. So force a reduction now.
  1507. //
  1508. if (SessionSpace == NULL) {
  1509. KeReleaseGuardedMutex (&MmPagedPoolMutex);
  1510. MmPoolFailures[MmPagedPool][MmHighPriority] += 1;
  1511. MmPoolFailureReasons[MmPagedNoCommit] += 1;
  1512. }
  1513. else {
  1514. KeReleaseGuardedMutex (&SessionSpace->PagedPoolMutex);
  1515. MmPoolFailures[MmSessionPagedPool][MmHighPriority] += 1;
  1516. MmPoolFailureReasons[MmSessionPagedNoCommit] += 1;
  1517. SessionSpace->SessionPoolAllocationFailures[3] += 1;
  1518. }
  1519. MiIssuePageExtendRequestNoWait (SizeInPages);
  1520. MiTrimSegmentCache ();
  1521. return NULL;
  1522. }
  1523. }
  1524. MM_TRACK_COMMIT (MM_DBG_COMMIT_PAGED_POOL_PAGES, SizeInPages);
  1525. EndPosition = StartPosition + (ULONG)SizeInPages - 1;
  1526. RtlSetBit (PagedPoolInfo->EndOfPagedPoolBitmap, EndPosition);
  1527. if (SessionSpace) {
  1528. KeReleaseGuardedMutex (&SessionSpace->PagedPoolMutex);
  1529. InterlockedExchangeAddSizeT (&SessionSpace->CommittedPages,
  1530. SizeInPages);
  1531. MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_COMMIT_PAGEDPOOL_PAGES, (ULONG)SizeInPages);
  1532. BaseVa = (PVOID)((PCHAR)SessionSpace->PagedPoolStart +
  1533. ((ULONG_PTR)StartPosition << PAGE_SHIFT));
  1534. InterlockedExchangeAddSizeT (&PagedPoolInfo->AllocatedPagedPool,
  1535. SizeInPages);
  1536. }
  1537. else {
  1538. if (PoolType & POOL_VERIFIER_MASK) {
  1539. RtlSetBit (VerifierLargePagedPoolMap, StartPosition);
  1540. }
  1541. InterlockedExchangeAddSizeT (&PagedPoolInfo->AllocatedPagedPool,
  1542. SizeInPages);
  1543. FreePoolInPages = MmSizeOfPagedPoolInPages - PagedPoolInfo->AllocatedPagedPool;
  1544. if (FreePoolInPages < MiHighPagedPoolThreshold) {
  1545. //
  1546. // Read the state directly instead of calling
  1547. // KeReadStateEvent since we are holding the paged
  1548. // pool mutex and want to keep instructions at a
  1549. // minimum.
  1550. //
  1551. if (MiHighPagedPoolEvent->Header.SignalState != 0) {
  1552. KeClearEvent (MiHighPagedPoolEvent);
  1553. }
  1554. if (FreePoolInPages <= MiLowPagedPoolThreshold) {
  1555. if (MiLowPagedPoolEvent->Header.SignalState == 0) {
  1556. KeSetEvent (MiLowPagedPoolEvent, 0, FALSE);
  1557. }
  1558. }
  1559. }
  1560. KeReleaseGuardedMutex (&MmPagedPoolMutex);
  1561. InterlockedExchangeAdd ((PLONG) &MmPagedPoolCommit, (LONG)SizeInPages);
  1562. BaseVa = (PVOID)((PUCHAR)MmPageAlignedPoolBase[PagedPool] +
  1563. ((ULONG_PTR)StartPosition << PAGE_SHIFT));
  1564. }
  1565. InterlockedExchangeAddSizeT (&PagedPoolInfo->PagedPoolCommit,
  1566. SizeInPages);
  1567. #if DBG
  1568. PointerPte = MiGetPteAddress (BaseVa);
  1569. for (i = 0; i < SizeInPages; i += 1) {
  1570. if (*(ULONG *)PointerPte != MM_KERNEL_NOACCESS_PTE) {
  1571. DbgPrint("MiAllocatePoolPages: PP not zero PTE (%p %p %p)\n",
  1572. BaseVa, PointerPte, *PointerPte);
  1573. DbgBreakPoint();
  1574. }
  1575. PointerPte += 1;
  1576. }
  1577. #endif
  1578. TempPte.u.Long = MM_KERNEL_DEMAND_ZERO_PTE;
  1579. MI_ADD_EXECUTE_TO_INVALID_PTE_IF_PAE (TempPte);
  1580. PointerPte = MiGetPteAddress (BaseVa);
  1581. StartingPte = PointerPte + SizeInPages;
  1582. //
  1583. // Fill the PTEs inline instead of using MiFillMemoryPte because on
  1584. // most platforms MiFillMemoryPte degrades to a function call and
  1585. // typically only a small number of PTEs are filled here.
  1586. //
  1587. do {
  1588. MI_WRITE_INVALID_PTE (PointerPte, TempPte);
  1589. PointerPte += 1;
  1590. } while (PointerPte < StartingPte);
  1591. return BaseVa;
  1592. }
  1593. ULONG
  1594. MiFreePoolPages (
  1595. IN PVOID StartingAddress
  1596. )
  1597. /*++
  1598. Routine Description:
  1599. This function returns a set of pages back to the pool from
  1600. which they were obtained. Once the pages have been deallocated
  1601. the region provided by the allocation becomes available for
  1602. allocation to other callers, i.e. any data in the region is now
  1603. trashed and cannot be referenced.
  1604. Arguments:
  1605. StartingAddress - Supplies the starting address which was returned
  1606. in a previous call to MiAllocatePoolPages.
  1607. Return Value:
  1608. Returns the number of pages deallocated.
  1609. Environment:
  1610. These functions are used by the general pool allocation routines
  1611. and should not be called directly.
  1612. --*/
  1613. {
  1614. KIRQL OldIrql;
  1615. ULONG StartPosition;
  1616. ULONG Index;
  1617. PFN_NUMBER i;
  1618. PFN_NUMBER NumberOfPages;
  1619. PMMPTE PointerPte;
  1620. PMMPTE StartPte;
  1621. PMMPFN Pfn1;
  1622. PMMPFN StartPfn;
  1623. PMMFREE_POOL_ENTRY Entry;
  1624. PMMFREE_POOL_ENTRY NextEntry;
  1625. PMMFREE_POOL_ENTRY LastEntry;
  1626. PMM_PAGED_POOL_INFO PagedPoolInfo;
  1627. PMM_SESSION_SPACE SessionSpace;
  1628. MMPTE LocalNoAccessPte;
  1629. PFN_NUMBER PagesFreed;
  1630. MMPFNENTRY OriginalPfnFlags;
  1631. ULONG_PTR VerifierAllocation;
  1632. PULONG BitMap;
  1633. PKGUARDED_MUTEX PoolMutex;
  1634. PFN_NUMBER FreePoolInPages;
  1635. #if DBG
  1636. PMMPTE DebugPte;
  1637. PMMPFN DebugPfn;
  1638. PMMPFN LastDebugPfn;
  1639. #endif
  1640. //
  1641. // Determine pool type based on the virtual address of the block
  1642. // to deallocate.
  1643. //
  1644. // This assumes paged pool is virtually contiguous.
  1645. //
  1646. if ((StartingAddress >= MmPagedPoolStart) &&
  1647. (StartingAddress <= MmPagedPoolEnd)) {
  1648. SessionSpace = NULL;
  1649. PagedPoolInfo = &MmPagedPoolInfo;
  1650. StartPosition = (ULONG)(((PCHAR)StartingAddress -
  1651. (PCHAR)MmPageAlignedPoolBase[PagedPool]) >> PAGE_SHIFT);
  1652. PoolMutex = &MmPagedPoolMutex;
  1653. }
  1654. else if (MI_IS_SESSION_POOL_ADDRESS (StartingAddress) == TRUE) {
  1655. SessionSpace = SESSION_GLOBAL (MmSessionSpace);
  1656. ASSERT (SessionSpace);
  1657. PagedPoolInfo = &SessionSpace->PagedPoolInfo;
  1658. StartPosition = (ULONG)(((PCHAR)StartingAddress -
  1659. (PCHAR)SessionSpace->PagedPoolStart) >> PAGE_SHIFT);
  1660. PoolMutex = &SessionSpace->PagedPoolMutex;
  1661. }
  1662. else {
  1663. if (StartingAddress < MM_SYSTEM_RANGE_START) {
  1664. KeBugCheckEx (BAD_POOL_CALLER,
  1665. 0x40,
  1666. (ULONG_PTR)StartingAddress,
  1667. (ULONG_PTR)MM_SYSTEM_RANGE_START,
  1668. 0);
  1669. }
  1670. StartPosition = (ULONG)(((PCHAR)StartingAddress -
  1671. (PCHAR)MmPageAlignedPoolBase[NonPagedPool]) >> PAGE_SHIFT);
  1672. //
  1673. // Check to ensure this page is really the start of an allocation.
  1674. //
  1675. if (MI_IS_PHYSICAL_ADDRESS (StartingAddress)) {
  1676. //
  1677. // On certain architectures, virtual addresses
  1678. // may be physical and hence have no corresponding PTE.
  1679. //
  1680. PointerPte = NULL;
  1681. Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (StartingAddress));
  1682. ASSERT (StartPosition < MmExpandedPoolBitPosition);
  1683. if ((StartingAddress < MmNonPagedPoolStart) ||
  1684. (StartingAddress >= MmNonPagedPoolEnd0)) {
  1685. KeBugCheckEx (BAD_POOL_CALLER,
  1686. 0x42,
  1687. (ULONG_PTR)StartingAddress,
  1688. 0,
  1689. 0);
  1690. }
  1691. }
  1692. else {
  1693. PointerPte = MiGetPteAddress (StartingAddress);
  1694. if (((StartingAddress >= MmNonPagedPoolExpansionStart) &&
  1695. (StartingAddress < MmNonPagedPoolEnd)) ||
  1696. ((StartingAddress >= MmNonPagedPoolStart) &&
  1697. (StartingAddress < MmNonPagedPoolEnd0))) {
  1698. NOTHING;
  1699. }
  1700. else {
  1701. KeBugCheckEx (BAD_POOL_CALLER,
  1702. 0x43,
  1703. (ULONG_PTR)StartingAddress,
  1704. 0,
  1705. 0);
  1706. }
  1707. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  1708. }
  1709. if (Pfn1->u3.e1.StartOfAllocation == 0) {
  1710. KeBugCheckEx (BAD_POOL_CALLER,
  1711. 0x41,
  1712. (ULONG_PTR) StartingAddress,
  1713. (ULONG_PTR) MI_PFN_ELEMENT_TO_INDEX (Pfn1),
  1714. MmHighestPhysicalPage);
  1715. }
  1716. ASSERT (Pfn1->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  1717. //
  1718. // Hang single page allocations off our slist header.
  1719. //
  1720. if ((Pfn1->u3.e1.EndOfAllocation == 1) &&
  1721. (Pfn1->u4.VerifierAllocation == 0) &&
  1722. (Pfn1->u3.e1.LargeSessionAllocation == 0) &&
  1723. (ExQueryDepthSList (&MiNonPagedPoolSListHead) < MiNonPagedPoolSListMaximum)) {
  1724. InterlockedPushEntrySList (&MiNonPagedPoolSListHead,
  1725. (PSLIST_ENTRY) StartingAddress);
  1726. return 1;
  1727. }
  1728. //
  1729. // The nonpaged pool being freed may be the target of a delayed unlock.
  1730. // Since these pages may be immediately released, force any pending
  1731. // delayed actions to occur now.
  1732. //
  1733. #if !defined(MI_MULTINODE)
  1734. if (MmPfnDeferredList != NULL) {
  1735. MiDeferredUnlockPages (0);
  1736. }
  1737. #else
  1738. //
  1739. // Each and every node's deferred list would have to be checked so
  1740. // we might as well go the long way and just call.
  1741. //
  1742. MiDeferredUnlockPages (0);
  1743. #endif
  1744. StartPfn = Pfn1;
  1745. OriginalPfnFlags = Pfn1->u3.e1;
  1746. VerifierAllocation = Pfn1->u4.VerifierAllocation;
  1747. #if DBG
  1748. if ((Pfn1->u3.e2.ReferenceCount > 1) &&
  1749. (Pfn1->u3.e1.WriteInProgress == 0)) {
  1750. DbgPrint ("MM: MiFreePoolPages - deleting pool locked for I/O %p\n",
  1751. Pfn1);
  1752. ASSERT (Pfn1->u3.e2.ReferenceCount == 1);
  1753. }
  1754. #endif
  1755. //
  1756. // Find end of allocation and release the pages.
  1757. //
  1758. if (PointerPte == NULL) {
  1759. while (Pfn1->u3.e1.EndOfAllocation == 0) {
  1760. Pfn1 += 1;
  1761. #if DBG
  1762. if ((Pfn1->u3.e2.ReferenceCount > 1) &&
  1763. (Pfn1->u3.e1.WriteInProgress == 0)) {
  1764. DbgPrint ("MM:MiFreePoolPages - deleting pool locked for I/O %p\n", Pfn1);
  1765. ASSERT (Pfn1->u3.e2.ReferenceCount == 1);
  1766. }
  1767. #endif
  1768. }
  1769. NumberOfPages = Pfn1 - StartPfn + 1;
  1770. }
  1771. else {
  1772. StartPte = PointerPte;
  1773. while (Pfn1->u3.e1.EndOfAllocation == 0) {
  1774. PointerPte += 1;
  1775. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  1776. #if DBG
  1777. if ((Pfn1->u3.e2.ReferenceCount > 1) &&
  1778. (Pfn1->u3.e1.WriteInProgress == 0)) {
  1779. DbgPrint ("MM:MiFreePoolPages - deleting pool locked for I/O %p\n", Pfn1);
  1780. ASSERT (Pfn1->u3.e2.ReferenceCount == 1);
  1781. }
  1782. #endif
  1783. }
  1784. NumberOfPages = PointerPte - StartPte + 1;
  1785. }
  1786. if (VerifierAllocation != 0) {
  1787. VerifierFreeTrackedPool (StartingAddress,
  1788. NumberOfPages << PAGE_SHIFT,
  1789. NonPagedPool,
  1790. FALSE);
  1791. }
  1792. #if DBG
  1793. if (MiFillFreedPool != 0) {
  1794. RtlFillMemoryUlong (StartingAddress,
  1795. PAGE_SIZE * NumberOfPages,
  1796. MiFillFreedPool);
  1797. }
  1798. #endif
  1799. OldIrql = KeAcquireQueuedSpinLock (LockQueueMmNonPagedPoolLock);
  1800. StartPfn->u3.e1.StartOfAllocation = 0;
  1801. StartPfn->u3.e1.LargeSessionAllocation = 0;
  1802. StartPfn->u4.VerifierAllocation = 0;
  1803. MmAllocatedNonPagedPool -= NumberOfPages;
  1804. FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
  1805. if (FreePoolInPages > MiLowNonPagedPoolThreshold) {
  1806. //
  1807. // Read the state directly instead of calling
  1808. // KeReadStateEvent since we are holding the nonpaged
  1809. // pool lock and want to keep instructions at a
  1810. // minimum.
  1811. //
  1812. if (MiLowNonPagedPoolEvent->Header.SignalState != 0) {
  1813. KeClearEvent (MiLowNonPagedPoolEvent);
  1814. }
  1815. if (FreePoolInPages >= MiHighNonPagedPoolThreshold) {
  1816. if (MiHighNonPagedPoolEvent->Header.SignalState == 0) {
  1817. KeSetEvent (MiHighNonPagedPoolEvent, 0, FALSE);
  1818. }
  1819. }
  1820. }
  1821. Pfn1->u3.e1.EndOfAllocation = 0;
  1822. if (StartingAddress > MmNonPagedPoolExpansionStart) {
  1823. //
  1824. // This page was from the expanded pool, should
  1825. // it be freed?
  1826. //
  1827. // NOTE: all pages in the expanded pool area have PTEs
  1828. // so no physical address checks need to be performed.
  1829. //
  1830. if ((NumberOfPages > 3) ||
  1831. (MmNumberOfFreeNonPagedPool > MmFreedExpansionPoolMaximum) ||
  1832. ((MmResidentAvailablePages < 200) &&
  1833. (MiExpansionPoolPagesInUse > MiExpansionPoolPagesInitialCharge))) {
  1834. //
  1835. // Free these pages back to the free page list.
  1836. //
  1837. MiFreeNonPagedPool (StartingAddress, NumberOfPages);
  1838. KeReleaseQueuedSpinLock (LockQueueMmNonPagedPoolLock, OldIrql);
  1839. return (ULONG)NumberOfPages;
  1840. }
  1841. }
  1842. //
  1843. // Add the pages to the list of free pages.
  1844. //
  1845. MmNumberOfFreeNonPagedPool += NumberOfPages;
  1846. //
  1847. // Check to see if the next allocation is free.
  1848. // We cannot walk off the end of nonpaged expansion
  1849. // pages as the highest expansion allocation is always
  1850. // virtual and guard-paged.
  1851. //
  1852. i = NumberOfPages;
  1853. ASSERT (MiEndOfInitialPoolFrame != 0);
  1854. if (MI_PFN_ELEMENT_TO_INDEX (Pfn1) == MiEndOfInitialPoolFrame) {
  1855. PointerPte += 1;
  1856. Pfn1 = NULL;
  1857. }
  1858. else if (PointerPte == NULL) {
  1859. Pfn1 += 1;
  1860. ASSERT ((PCHAR)StartingAddress + NumberOfPages < (PCHAR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes);
  1861. }
  1862. else {
  1863. PointerPte += 1;
  1864. ASSERT ((PCHAR)StartingAddress + NumberOfPages <= (PCHAR)MmNonPagedPoolEnd);
  1865. //
  1866. // Unprotect the previously freed pool so it can be merged.
  1867. //
  1868. if (MmProtectFreedNonPagedPool == TRUE) {
  1869. MiUnProtectFreeNonPagedPool (
  1870. (PVOID)MiGetVirtualAddressMappedByPte(PointerPte),
  1871. 0);
  1872. }
  1873. if (PointerPte->u.Hard.Valid == 1) {
  1874. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  1875. }
  1876. else {
  1877. Pfn1 = NULL;
  1878. }
  1879. }
  1880. if ((Pfn1 != NULL) && (Pfn1->u3.e1.StartOfAllocation == 0)) {
  1881. //
  1882. // This range of pages is free. Remove this entry
  1883. // from the list and add these pages to the current
  1884. // range being freed.
  1885. //
  1886. Entry = (PMMFREE_POOL_ENTRY)((PCHAR)StartingAddress
  1887. + (NumberOfPages << PAGE_SHIFT));
  1888. ASSERT (Entry->Signature == MM_FREE_POOL_SIGNATURE);
  1889. ASSERT (Entry->Owner == Entry);
  1890. #if DBG
  1891. if (PointerPte == NULL) {
  1892. ASSERT (MI_IS_PHYSICAL_ADDRESS(StartingAddress));
  1893. //
  1894. // On certain architectures, virtual addresses
  1895. // may be physical and hence have no corresponding PTE.
  1896. //
  1897. DebugPfn = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (Entry));
  1898. DebugPfn += Entry->Size;
  1899. if (MI_PFN_ELEMENT_TO_INDEX (DebugPfn - 1) != MiEndOfInitialPoolFrame) {
  1900. ASSERT (DebugPfn->u3.e1.StartOfAllocation == 1);
  1901. }
  1902. }
  1903. else {
  1904. DebugPte = PointerPte + Entry->Size;
  1905. if ((DebugPte-1)->u.Hard.Valid == 1) {
  1906. DebugPfn = MI_PFN_ELEMENT ((DebugPte-1)->u.Hard.PageFrameNumber);
  1907. if (MI_PFN_ELEMENT_TO_INDEX (DebugPfn) != MiEndOfInitialPoolFrame) {
  1908. if (DebugPte->u.Hard.Valid == 1) {
  1909. DebugPfn = MI_PFN_ELEMENT (DebugPte->u.Hard.PageFrameNumber);
  1910. ASSERT (DebugPfn->u3.e1.StartOfAllocation == 1);
  1911. }
  1912. }
  1913. }
  1914. }
  1915. #endif
  1916. i += Entry->Size;
  1917. if (MmProtectFreedNonPagedPool == FALSE) {
  1918. RemoveEntryList (&Entry->List);
  1919. }
  1920. else {
  1921. MiProtectedPoolRemoveEntryList (&Entry->List);
  1922. }
  1923. }
  1924. //
  1925. // Check to see if the previous page is the end of an allocation.
  1926. // If it is not the end of an allocation, it must be free and
  1927. // therefore this allocation can be tagged onto the end of
  1928. // that allocation.
  1929. //
  1930. // We cannot walk off the beginning of expansion pool because it is
  1931. // guard-paged. If the initial pool is superpaged instead, we are also
  1932. // safe as the must succeed pages always have EndOfAllocation set.
  1933. //
  1934. Entry = (PMMFREE_POOL_ENTRY)StartingAddress;
  1935. ASSERT (MiStartOfInitialPoolFrame != 0);
  1936. if (MI_PFN_ELEMENT_TO_INDEX (StartPfn) == MiStartOfInitialPoolFrame) {
  1937. Pfn1 = NULL;
  1938. }
  1939. else if (PointerPte == NULL) {
  1940. ASSERT (MI_IS_PHYSICAL_ADDRESS(StartingAddress));
  1941. ASSERT (StartingAddress != MmNonPagedPoolStart);
  1942. Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (
  1943. (PVOID)((PCHAR)Entry - PAGE_SIZE)));
  1944. }
  1945. else {
  1946. PointerPte -= NumberOfPages + 1;
  1947. //
  1948. // Unprotect the previously freed pool so it can be merged.
  1949. //
  1950. if (MmProtectFreedNonPagedPool == TRUE) {
  1951. MiUnProtectFreeNonPagedPool (
  1952. (PVOID)MiGetVirtualAddressMappedByPte(PointerPte),
  1953. 0);
  1954. }
  1955. if (PointerPte->u.Hard.Valid == 1) {
  1956. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  1957. }
  1958. else {
  1959. Pfn1 = NULL;
  1960. }
  1961. }
  1962. if (Pfn1 != NULL) {
  1963. if (Pfn1->u3.e1.EndOfAllocation == 0) {
  1964. //
  1965. // This range of pages is free, add these pages to
  1966. // this entry. The owner field points to the address
  1967. // of the list entry which is linked into the free pool
  1968. // pages list.
  1969. //
  1970. Entry = (PMMFREE_POOL_ENTRY)((PCHAR)StartingAddress - PAGE_SIZE);
  1971. ASSERT (Entry->Signature == MM_FREE_POOL_SIGNATURE);
  1972. Entry = Entry->Owner;
  1973. //
  1974. // Unprotect the previously freed pool so we can merge it
  1975. //
  1976. if (MmProtectFreedNonPagedPool == TRUE) {
  1977. MiUnProtectFreeNonPagedPool ((PVOID)Entry, 0);
  1978. }
  1979. //
  1980. // If this entry became larger than MM_SMALL_ALLOCATIONS
  1981. // pages, move it to the tail of the list. This keeps the
  1982. // small allocations at the front of the list.
  1983. //
  1984. if (Entry->Size < MI_MAX_FREE_LIST_HEADS - 1) {
  1985. if (MmProtectFreedNonPagedPool == FALSE) {
  1986. RemoveEntryList (&Entry->List);
  1987. }
  1988. else {
  1989. MiProtectedPoolRemoveEntryList (&Entry->List);
  1990. }
  1991. //
  1992. // Add these pages to the previous entry.
  1993. //
  1994. Entry->Size += i;
  1995. Index = (ULONG)(Entry->Size - 1);
  1996. if (Index >= MI_MAX_FREE_LIST_HEADS) {
  1997. Index = MI_MAX_FREE_LIST_HEADS - 1;
  1998. }
  1999. if (MmProtectFreedNonPagedPool == FALSE) {
  2000. InsertTailList (&MmNonPagedPoolFreeListHead[Index],
  2001. &Entry->List);
  2002. }
  2003. else {
  2004. MiProtectedPoolInsertList (&MmNonPagedPoolFreeListHead[Index],
  2005. &Entry->List,
  2006. Entry->Size < MM_SMALL_ALLOCATIONS ?
  2007. TRUE : FALSE);
  2008. }
  2009. }
  2010. else {
  2011. //
  2012. // Add these pages to the previous entry.
  2013. //
  2014. Entry->Size += i;
  2015. }
  2016. }
  2017. }
  2018. if (Entry == (PMMFREE_POOL_ENTRY)StartingAddress) {
  2019. //
  2020. // This entry was not combined with the previous, insert it
  2021. // into the list.
  2022. //
  2023. Entry->Size = i;
  2024. Index = (ULONG)(Entry->Size - 1);
  2025. if (Index >= MI_MAX_FREE_LIST_HEADS) {
  2026. Index = MI_MAX_FREE_LIST_HEADS - 1;
  2027. }
  2028. if (MmProtectFreedNonPagedPool == FALSE) {
  2029. InsertTailList (&MmNonPagedPoolFreeListHead[Index],
  2030. &Entry->List);
  2031. }
  2032. else {
  2033. MiProtectedPoolInsertList (&MmNonPagedPoolFreeListHead[Index],
  2034. &Entry->List,
  2035. Entry->Size < MM_SMALL_ALLOCATIONS ?
  2036. TRUE : FALSE);
  2037. }
  2038. }
  2039. //
  2040. // Set the owner field in all these pages.
  2041. //
  2042. ASSERT (i != 0);
  2043. NextEntry = (PMMFREE_POOL_ENTRY)StartingAddress;
  2044. LastEntry = (PMMFREE_POOL_ENTRY)((PCHAR)NextEntry + (i << PAGE_SHIFT));
  2045. do {
  2046. NextEntry->Owner = Entry;
  2047. #if DBG
  2048. NextEntry->Signature = MM_FREE_POOL_SIGNATURE;
  2049. #endif
  2050. NextEntry = (PMMFREE_POOL_ENTRY)((PCHAR)NextEntry + PAGE_SIZE);
  2051. } while (NextEntry != LastEntry);
  2052. #if DBG
  2053. NextEntry = Entry;
  2054. if (PointerPte == NULL) {
  2055. ASSERT (MI_IS_PHYSICAL_ADDRESS(StartingAddress));
  2056. DebugPfn = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (NextEntry));
  2057. LastDebugPfn = DebugPfn + Entry->Size;
  2058. for ( ; DebugPfn < LastDebugPfn; DebugPfn += 1) {
  2059. ASSERT ((DebugPfn->u3.e1.StartOfAllocation == 0) &&
  2060. (DebugPfn->u3.e1.EndOfAllocation == 0));
  2061. ASSERT (NextEntry->Owner == Entry);
  2062. NextEntry = (PMMFREE_POOL_ENTRY)((PCHAR)NextEntry + PAGE_SIZE);
  2063. }
  2064. }
  2065. else {
  2066. for (i = 0; i < Entry->Size; i += 1) {
  2067. DebugPte = MiGetPteAddress (NextEntry);
  2068. DebugPfn = MI_PFN_ELEMENT (DebugPte->u.Hard.PageFrameNumber);
  2069. ASSERT ((DebugPfn->u3.e1.StartOfAllocation == 0) &&
  2070. (DebugPfn->u3.e1.EndOfAllocation == 0));
  2071. ASSERT (NextEntry->Owner == Entry);
  2072. NextEntry = (PMMFREE_POOL_ENTRY)((PCHAR)NextEntry + PAGE_SIZE);
  2073. }
  2074. }
  2075. #endif
  2076. //
  2077. // Prevent anyone from accessing non paged pool after freeing it.
  2078. //
  2079. if (MmProtectFreedNonPagedPool == TRUE) {
  2080. MiProtectFreeNonPagedPool ((PVOID)Entry, (ULONG)Entry->Size);
  2081. }
  2082. KeReleaseQueuedSpinLock (LockQueueMmNonPagedPoolLock, OldIrql);
  2083. return (ULONG)NumberOfPages;
  2084. }
  2085. //
  2086. // Paged pool. Need to verify start of allocation using
  2087. // end of allocation bitmap.
  2088. //
  2089. if (!RtlCheckBit (PagedPoolInfo->PagedPoolAllocationMap, StartPosition)) {
  2090. KeBugCheckEx (BAD_POOL_CALLER,
  2091. 0x50,
  2092. (ULONG_PTR)StartingAddress,
  2093. (ULONG_PTR)StartPosition,
  2094. MmSizeOfPagedPoolInBytes);
  2095. }
  2096. #if DBG
  2097. if (StartPosition > 0) {
  2098. KeAcquireGuardedMutex (PoolMutex);
  2099. if (RtlCheckBit (PagedPoolInfo->PagedPoolAllocationMap, StartPosition - 1)) {
  2100. if (!RtlCheckBit (PagedPoolInfo->EndOfPagedPoolBitmap, StartPosition - 1)) {
  2101. //
  2102. // In the middle of an allocation... bugcheck.
  2103. //
  2104. DbgPrint("paged pool in middle of allocation\n");
  2105. KeBugCheckEx (MEMORY_MANAGEMENT,
  2106. 0x41286,
  2107. (ULONG_PTR)PagedPoolInfo->PagedPoolAllocationMap,
  2108. (ULONG_PTR)PagedPoolInfo->EndOfPagedPoolBitmap,
  2109. StartPosition);
  2110. }
  2111. }
  2112. KeReleaseGuardedMutex (PoolMutex);
  2113. }
  2114. #endif
  2115. //
  2116. // Find the last allocated page and check to see if any
  2117. // of the pages being deallocated are in the paging file.
  2118. //
  2119. BitMap = PagedPoolInfo->EndOfPagedPoolBitmap->Buffer;
  2120. i = StartPosition;
  2121. while (!MI_CHECK_BIT (BitMap, i)) {
  2122. i += 1;
  2123. }
  2124. NumberOfPages = i - StartPosition + 1;
  2125. if (SessionSpace == NULL) {
  2126. if (VerifierLargePagedPoolMap != NULL) {
  2127. BitMap = VerifierLargePagedPoolMap->Buffer;
  2128. if (MI_CHECK_BIT (BitMap, StartPosition)) {
  2129. KeAcquireGuardedMutex (&MmPagedPoolMutex);
  2130. ASSERT (MI_CHECK_BIT (BitMap, StartPosition));
  2131. MI_CLEAR_BIT (BitMap, StartPosition);
  2132. KeReleaseGuardedMutex (&MmPagedPoolMutex);
  2133. VerifierFreeTrackedPool (StartingAddress,
  2134. NumberOfPages << PAGE_SHIFT,
  2135. PagedPool,
  2136. FALSE);
  2137. }
  2138. }
  2139. if ((NumberOfPages == 1) &&
  2140. (ExQueryDepthSList (&MiPagedPoolSListHead) < MiPagedPoolSListMaximum)) {
  2141. InterlockedPushEntrySList (&MiPagedPoolSListHead,
  2142. (PSLIST_ENTRY) StartingAddress);
  2143. return 1;
  2144. }
  2145. //
  2146. // If paged pool has been configured as nonpagable, only
  2147. // virtual address space is released.
  2148. //
  2149. if (MmDisablePagingExecutive & MM_PAGED_POOL_LOCKED_DOWN) {
  2150. KeAcquireGuardedMutex (&MmPagedPoolMutex);
  2151. //
  2152. // Clear the end of allocation bit in the bit map.
  2153. //
  2154. RtlClearBit (PagedPoolInfo->EndOfPagedPoolBitmap, (ULONG)i);
  2155. //
  2156. // Clear the allocation bits in the bit map.
  2157. //
  2158. RtlClearBits (PagedPoolInfo->PagedPoolAllocationMap,
  2159. StartPosition,
  2160. (ULONG)NumberOfPages);
  2161. if (StartPosition < PagedPoolInfo->PagedPoolHint) {
  2162. PagedPoolInfo->PagedPoolHint = StartPosition;
  2163. }
  2164. InterlockedExchangeAddSizeT (&PagedPoolInfo->AllocatedPagedPool,
  2165. 0 - NumberOfPages);
  2166. FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
  2167. if (FreePoolInPages > MiLowPagedPoolThreshold) {
  2168. //
  2169. // Read the state directly instead of calling
  2170. // KeReadStateEvent since we are holding the paged
  2171. // pool mutex and want to keep instructions at a
  2172. // minimum.
  2173. //
  2174. if (MiLowPagedPoolEvent->Header.SignalState != 0) {
  2175. KeClearEvent (MiLowPagedPoolEvent);
  2176. }
  2177. if (FreePoolInPages >= MiHighPagedPoolThreshold) {
  2178. if (MiHighPagedPoolEvent->Header.SignalState == 0) {
  2179. KeSetEvent (MiHighPagedPoolEvent, 0, FALSE);
  2180. }
  2181. }
  2182. }
  2183. KeReleaseGuardedMutex (&MmPagedPoolMutex);
  2184. return (ULONG)NumberOfPages;
  2185. }
  2186. }
  2187. LocalNoAccessPte.u.Long = MM_KERNEL_NOACCESS_PTE;
  2188. PointerPte = PagedPoolInfo->FirstPteForPagedPool + StartPosition;
  2189. PagesFreed = MiDeleteSystemPagableVm (PointerPte,
  2190. NumberOfPages,
  2191. LocalNoAccessPte,
  2192. SessionSpace != NULL ? TRUE : FALSE,
  2193. NULL);
  2194. ASSERT (PagesFreed == NumberOfPages);
  2195. //
  2196. // Clear the end of allocation bit in the bit map.
  2197. //
  2198. BitMap = PagedPoolInfo->EndOfPagedPoolBitmap->Buffer;
  2199. KeAcquireGuardedMutex (PoolMutex);
  2200. MI_CLEAR_BIT (BitMap, i);
  2201. //
  2202. // Clear the allocation bits in the bit map.
  2203. //
  2204. RtlClearBits (PagedPoolInfo->PagedPoolAllocationMap,
  2205. StartPosition,
  2206. (ULONG)NumberOfPages);
  2207. if (StartPosition < PagedPoolInfo->PagedPoolHint) {
  2208. PagedPoolInfo->PagedPoolHint = StartPosition;
  2209. }
  2210. if (SessionSpace) {
  2211. KeReleaseGuardedMutex (PoolMutex);
  2212. InterlockedExchangeAddSizeT (&PagedPoolInfo->AllocatedPagedPool,
  2213. 0 - NumberOfPages);
  2214. InterlockedExchangeAddSizeT (&SessionSpace->CommittedPages,
  2215. 0 - NumberOfPages);
  2216. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_COMMIT_POOL_FREED,
  2217. (ULONG)NumberOfPages);
  2218. }
  2219. else {
  2220. InterlockedExchangeAddSizeT (&PagedPoolInfo->AllocatedPagedPool,
  2221. 0 - NumberOfPages);
  2222. FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
  2223. if (FreePoolInPages > MiLowPagedPoolThreshold) {
  2224. //
  2225. // Read the state directly instead of calling
  2226. // KeReadStateEvent since we are holding the paged
  2227. // pool mutex and want to keep instructions at a
  2228. // minimum.
  2229. //
  2230. if (MiLowPagedPoolEvent->Header.SignalState != 0) {
  2231. KeClearEvent (MiLowPagedPoolEvent);
  2232. }
  2233. if (FreePoolInPages >= MiHighPagedPoolThreshold) {
  2234. if (MiHighPagedPoolEvent->Header.SignalState == 0) {
  2235. KeSetEvent (MiHighPagedPoolEvent, 0, FALSE);
  2236. }
  2237. }
  2238. }
  2239. KeReleaseGuardedMutex (PoolMutex);
  2240. InterlockedExchangeAdd ((PLONG) &MmPagedPoolCommit,
  2241. (LONG)(0 - NumberOfPages));
  2242. }
  2243. MiReturnCommitment (NumberOfPages);
  2244. InterlockedExchangeAddSizeT (&PagedPoolInfo->PagedPoolCommit,
  2245. 0 - NumberOfPages);
  2246. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_PAGED_POOL_PAGES, NumberOfPages);
  2247. return (ULONG)NumberOfPages;
  2248. }
  2249. VOID
  2250. MiInitializePoolEvents (
  2251. VOID
  2252. )
  2253. /*++
  2254. Routine Description:
  2255. This function initializes the pool event states.
  2256. Arguments:
  2257. None.
  2258. Return Value:
  2259. None.
  2260. Environment:
  2261. Kernel mode, during initialization.
  2262. --*/
  2263. {
  2264. KIRQL OldIrql;
  2265. PFN_NUMBER FreePoolInPages;
  2266. //
  2267. // Initialize the paged events.
  2268. //
  2269. KeAcquireGuardedMutex (&MmPagedPoolMutex);
  2270. FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
  2271. if (FreePoolInPages >= MiHighPagedPoolThreshold) {
  2272. KeSetEvent (MiHighPagedPoolEvent, 0, FALSE);
  2273. }
  2274. else {
  2275. KeClearEvent (MiHighPagedPoolEvent);
  2276. }
  2277. if (FreePoolInPages <= MiLowPagedPoolThreshold) {
  2278. KeSetEvent (MiLowPagedPoolEvent, 0, FALSE);
  2279. }
  2280. else {
  2281. KeClearEvent (MiLowPagedPoolEvent);
  2282. }
  2283. KeReleaseGuardedMutex (&MmPagedPoolMutex);
  2284. //
  2285. // Initialize the nonpaged events.
  2286. //
  2287. OldIrql = KeAcquireQueuedSpinLock (LockQueueMmNonPagedPoolLock);
  2288. FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
  2289. if (FreePoolInPages >= MiHighNonPagedPoolThreshold) {
  2290. KeSetEvent (MiHighNonPagedPoolEvent, 0, FALSE);
  2291. }
  2292. else {
  2293. KeClearEvent (MiHighNonPagedPoolEvent);
  2294. }
  2295. if (FreePoolInPages <= MiLowNonPagedPoolThreshold) {
  2296. KeSetEvent (MiLowNonPagedPoolEvent, 0, FALSE);
  2297. }
  2298. else {
  2299. KeClearEvent (MiLowNonPagedPoolEvent);
  2300. }
  2301. KeReleaseQueuedSpinLock (LockQueueMmNonPagedPoolLock, OldIrql);
  2302. return;
  2303. }
  2304. VOID
  2305. MiInitializeNonPagedPool (
  2306. VOID
  2307. )
  2308. /*++
  2309. Routine Description:
  2310. This function initializes the NonPaged pool.
  2311. NonPaged Pool is linked together through the pages.
  2312. Arguments:
  2313. None.
  2314. Return Value:
  2315. None.
  2316. Environment:
  2317. Kernel mode, during initialization.
  2318. --*/
  2319. {
  2320. PFN_NUMBER PagesInPool;
  2321. PFN_NUMBER Size;
  2322. ULONG Index;
  2323. PMMFREE_POOL_ENTRY FreeEntry;
  2324. PMMFREE_POOL_ENTRY FirstEntry;
  2325. PMMPTE PointerPte;
  2326. PVOID EndOfInitialPool;
  2327. PFN_NUMBER PageFrameIndex;
  2328. PAGED_CODE();
  2329. //
  2330. // Initialize the slist heads for free pages (both paged & nonpaged).
  2331. //
  2332. InitializeSListHead (&MiPagedPoolSListHead);
  2333. InitializeSListHead (&MiNonPagedPoolSListHead);
  2334. if (MmNumberOfPhysicalPages >= (2*1024*((1024*1024)/PAGE_SIZE))) {
  2335. MiNonPagedPoolSListMaximum <<= 3;
  2336. MiPagedPoolSListMaximum <<= 3;
  2337. }
  2338. else if (MmNumberOfPhysicalPages >= (1*1024*((1024*1024)/PAGE_SIZE))) {
  2339. MiNonPagedPoolSListMaximum <<= 1;
  2340. MiPagedPoolSListMaximum <<= 1;
  2341. }
  2342. //
  2343. // If the verifier or special pool is enabled, then disable lookasides so
  2344. // driver bugs can be found more quickly.
  2345. //
  2346. if ((MmVerifyDriverBufferLength != (ULONG)-1) ||
  2347. (MmProtectFreedNonPagedPool == TRUE) ||
  2348. ((MmSpecialPoolTag != 0) && (MmSpecialPoolTag != (ULONG)-1))) {
  2349. MiNonPagedPoolSListMaximum = 0;
  2350. MiPagedPoolSListMaximum = 0;
  2351. }
  2352. //
  2353. // Initialize the list heads for free pages.
  2354. //
  2355. for (Index = 0; Index < MI_MAX_FREE_LIST_HEADS; Index += 1) {
  2356. InitializeListHead (&MmNonPagedPoolFreeListHead[Index]);
  2357. }
  2358. //
  2359. // Set up the non paged pool pages.
  2360. //
  2361. FreeEntry = (PMMFREE_POOL_ENTRY) MmNonPagedPoolStart;
  2362. FirstEntry = FreeEntry;
  2363. PagesInPool = BYTES_TO_PAGES (MmSizeOfNonPagedPoolInBytes);
  2364. //
  2365. // Set the location of expanded pool.
  2366. //
  2367. MmExpandedPoolBitPosition = (ULONG) BYTES_TO_PAGES (MmSizeOfNonPagedPoolInBytes);
  2368. MmNumberOfFreeNonPagedPool = PagesInPool;
  2369. Index = (ULONG)(MmNumberOfFreeNonPagedPool - 1);
  2370. if (Index >= MI_MAX_FREE_LIST_HEADS) {
  2371. Index = MI_MAX_FREE_LIST_HEADS - 1;
  2372. }
  2373. InsertHeadList (&MmNonPagedPoolFreeListHead[Index], &FreeEntry->List);
  2374. FreeEntry->Size = PagesInPool;
  2375. #if DBG
  2376. FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
  2377. #endif
  2378. FreeEntry->Owner = FirstEntry;
  2379. while (PagesInPool > 1) {
  2380. FreeEntry = (PMMFREE_POOL_ENTRY)((PCHAR)FreeEntry + PAGE_SIZE);
  2381. #if DBG
  2382. FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
  2383. #endif
  2384. FreeEntry->Owner = FirstEntry;
  2385. PagesInPool -= 1;
  2386. }
  2387. //
  2388. // Initialize the first nonpaged pool PFN.
  2389. //
  2390. if (MI_IS_PHYSICAL_ADDRESS(MmNonPagedPoolStart)) {
  2391. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (MmNonPagedPoolStart);
  2392. }
  2393. else {
  2394. PointerPte = MiGetPteAddress(MmNonPagedPoolStart);
  2395. ASSERT (PointerPte->u.Hard.Valid == 1);
  2396. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  2397. }
  2398. MiStartOfInitialPoolFrame = PageFrameIndex;
  2399. //
  2400. // Set the last nonpaged pool PFN so coalescing on free doesn't go
  2401. // past the end of the initial pool.
  2402. //
  2403. MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes);
  2404. EndOfInitialPool = (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1);
  2405. if (MI_IS_PHYSICAL_ADDRESS(EndOfInitialPool)) {
  2406. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (EndOfInitialPool);
  2407. }
  2408. else {
  2409. PointerPte = MiGetPteAddress(EndOfInitialPool);
  2410. ASSERT (PointerPte->u.Hard.Valid == 1);
  2411. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  2412. }
  2413. MiEndOfInitialPoolFrame = PageFrameIndex;
  2414. //
  2415. // Set up the system PTEs for nonpaged pool expansion.
  2416. //
  2417. PointerPte = MiGetPteAddress (MmNonPagedPoolExpansionStart);
  2418. ASSERT (PointerPte->u.Hard.Valid == 0);
  2419. #if defined (_WIN64)
  2420. Size = BYTES_TO_PAGES ((ULONG_PTR)MmNonPagedPoolEnd - (ULONG_PTR)MmNonPagedPoolExpansionStart);
  2421. #else
  2422. Size = BYTES_TO_PAGES (MmMaximumNonPagedPoolInBytes -
  2423. MmSizeOfNonPagedPoolInBytes);
  2424. #endif
  2425. //
  2426. // Insert a guard PTE at the top and bottom of expanded nonpaged pool.
  2427. //
  2428. Size -= 2;
  2429. PointerPte += 1;
  2430. ASSERT (MiExpansionPoolPagesInUse == 0);
  2431. //
  2432. // Initialize the nonpaged pool expansion resident available initial charge.
  2433. // Note that MmResidentAvailablePages & MmAvailablePages are not initialized
  2434. // yet, but this amount is subtracted when MmResidentAvailablePages is
  2435. // initialized later.
  2436. //
  2437. MiExpansionPoolPagesInitialCharge = Size;
  2438. if (Size > MmNumberOfPhysicalPages / 6) {
  2439. MiExpansionPoolPagesInitialCharge = MmNumberOfPhysicalPages / 6;
  2440. }
  2441. MiInitializeSystemPtes (PointerPte, Size, NonPagedPoolExpansion);
  2442. //
  2443. // A guard PTE is built at the top by our caller. This allows us to
  2444. // freely increment virtual addresses in MiFreePoolPages and just check
  2445. // for a blank PTE.
  2446. //
  2447. }
  2448. VOID
  2449. MiCheckSessionPoolAllocations (
  2450. VOID
  2451. )
  2452. /*++
  2453. Routine Description:
  2454. Ensure that the current session has no pool allocations since it is about
  2455. to exit. All session allocations must be freed prior to session exit.
  2456. Arguments:
  2457. None.
  2458. Return Value:
  2459. None.
  2460. Environment:
  2461. Kernel mode.
  2462. --*/
  2463. {
  2464. SIZE_T i;
  2465. ULONG PagedAllocations;
  2466. ULONG NonPagedAllocations;
  2467. SIZE_T PagedBytes;
  2468. SIZE_T NonPagedBytes;
  2469. PMMPTE StartPde;
  2470. PMMPTE EndPde;
  2471. PMMPTE PointerPte;
  2472. PVOID VirtualAddress;
  2473. PPOOL_TRACKER_TABLE TrackTable;
  2474. PPOOL_TRACKER_TABLE TrackTableBase;
  2475. SIZE_T NumberOfEntries;
  2476. PAGED_CODE();
  2477. TrackTableBase = MiSessionPoolTrackTable ();
  2478. NumberOfEntries = MiSessionPoolTrackTableSize ();
  2479. //
  2480. // Note the session pool descriptor TotalPages field is not reliable
  2481. // for leak checking because of the fact that nonpaged session allocations
  2482. // are converted to global session allocations - thus when a small nonpaged
  2483. // session allocation results in splitting a full page, the global
  2484. // nonpaged pool descriptor (not the session pool descriptor) is (and must
  2485. // be because of the remaining fragment) charged.
  2486. //
  2487. //
  2488. // Make sure all the pool tracking entries are zeroed out.
  2489. //
  2490. PagedAllocations = 0;
  2491. NonPagedAllocations = 0;
  2492. PagedBytes = 0;
  2493. NonPagedBytes = 0;
  2494. TrackTable = TrackTableBase;
  2495. for (i = 0; i < NumberOfEntries; i += 1) {
  2496. PagedBytes += TrackTable->PagedBytes;
  2497. NonPagedBytes += TrackTable->NonPagedBytes;
  2498. PagedAllocations += (TrackTable->PagedAllocs - TrackTable->PagedFrees);
  2499. NonPagedAllocations += (TrackTable->NonPagedAllocs - TrackTable->NonPagedFrees);
  2500. TrackTable += 1;
  2501. }
  2502. if (PagedBytes != 0) {
  2503. //
  2504. // All page tables for this session's paged pool must be freed by now.
  2505. // Being here means they aren't - this is fatal. Force in any valid
  2506. // pages so that a debugger can show who the guilty party is.
  2507. //
  2508. StartPde = MiGetPdeAddress (MmSessionSpace->PagedPoolStart);
  2509. EndPde = MiGetPdeAddress (MmSessionSpace->PagedPoolEnd);
  2510. while (StartPde <= EndPde) {
  2511. if (StartPde->u.Long != 0 && StartPde->u.Long != MM_KERNEL_NOACCESS_PTE) {
  2512. //
  2513. // Hunt through the page table page for valid pages and force
  2514. // them in. Note this also forces in the page table page if
  2515. // it is not already.
  2516. //
  2517. PointerPte = MiGetVirtualAddressMappedByPte (StartPde);
  2518. for (i = 0; i < PTE_PER_PAGE; i += 1) {
  2519. if (PointerPte->u.Long != 0 && PointerPte->u.Long != MM_KERNEL_NOACCESS_PTE) {
  2520. VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte);
  2521. *(volatile UCHAR *)VirtualAddress = *(volatile UCHAR *)VirtualAddress;
  2522. }
  2523. PointerPte += 1;
  2524. }
  2525. }
  2526. StartPde += 1;
  2527. }
  2528. }
  2529. if ((NonPagedBytes != 0) || (PagedBytes != 0)) {
  2530. KeBugCheckEx (SESSION_HAS_VALID_POOL_ON_EXIT,
  2531. (ULONG_PTR)MmSessionSpace->SessionId,
  2532. PagedBytes,
  2533. NonPagedBytes,
  2534. #if defined (_WIN64)
  2535. (NonPagedAllocations << 32) | (PagedAllocations)
  2536. #else
  2537. (NonPagedAllocations << 16) | (PagedAllocations)
  2538. #endif
  2539. );
  2540. }
  2541. #if DBG
  2542. TrackTable = TrackTableBase;
  2543. for (i = 0; i < NumberOfEntries; i += 1) {
  2544. ASSERT (TrackTable->NonPagedBytes == 0);
  2545. ASSERT (TrackTable->PagedBytes == 0);
  2546. ASSERT (TrackTable->NonPagedAllocs == TrackTable->NonPagedFrees);
  2547. ASSERT (TrackTable->PagedAllocs == TrackTable->PagedFrees);
  2548. if (TrackTable->Key == 0) {
  2549. ASSERT (TrackTable->NonPagedAllocs == 0);
  2550. ASSERT (TrackTable->PagedAllocs == 0);
  2551. }
  2552. TrackTable += 1;
  2553. }
  2554. ASSERT (MmSessionSpace->PagedPool.TotalPages == 0);
  2555. ASSERT (MmSessionSpace->PagedPool.TotalBigPages == 0);
  2556. ASSERT (MmSessionSpace->PagedPool.RunningAllocs ==
  2557. MmSessionSpace->PagedPool.RunningDeAllocs);
  2558. #endif
  2559. }
  2560. NTSTATUS
  2561. MiInitializeAndChargePfn (
  2562. OUT PPFN_NUMBER PageFrameIndex,
  2563. IN PMMPTE PointerPde,
  2564. IN PFN_NUMBER ContainingPageFrame,
  2565. IN LOGICAL SessionAllocation
  2566. )
  2567. /*++
  2568. Routine Description:
  2569. Nonpaged wrapper to allocate, initialize and charge for a new page.
  2570. Arguments:
  2571. PageFrameIndex - Returns the page frame number which was initialized.
  2572. PointerPde - Supplies the pointer to the PDE to initialize.
  2573. ContainingPageFrame - Supplies the page frame number of the page
  2574. directory page which contains this PDE.
  2575. SessionAllocation - Supplies TRUE if this allocation is in session space,
  2576. FALSE otherwise.
  2577. Return Value:
  2578. Status of the page initialization.
  2579. --*/
  2580. {
  2581. MMPTE TempPte;
  2582. KIRQL OldIrql;
  2583. if (SessionAllocation == TRUE) {
  2584. TempPte = ValidKernelPdeLocal;
  2585. }
  2586. else {
  2587. TempPte = ValidKernelPde;
  2588. }
  2589. LOCK_PFN2 (OldIrql);
  2590. if ((MmAvailablePages < MM_MEDIUM_LIMIT) ||
  2591. (MI_NONPAGABLE_MEMORY_AVAILABLE() <= 1)) {
  2592. UNLOCK_PFN2 (OldIrql);
  2593. return STATUS_NO_MEMORY;
  2594. }
  2595. //
  2596. // Ensure no other thread handled this while this one waited. If one has,
  2597. // then return STATUS_RETRY so the caller knows to try again.
  2598. //
  2599. if (PointerPde->u.Hard.Valid == 1) {
  2600. UNLOCK_PFN2 (OldIrql);
  2601. return STATUS_RETRY;
  2602. }
  2603. MI_DECREMENT_RESIDENT_AVAILABLE (1, MM_RESAVAIL_ALLOCATE_SINGLE_PFN);
  2604. //
  2605. // Allocate and map in the page at the requested address.
  2606. //
  2607. *PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (PointerPde));
  2608. TempPte.u.Hard.PageFrameNumber = *PageFrameIndex;
  2609. MI_WRITE_VALID_PTE (PointerPde, TempPte);
  2610. MiInitializePfnForOtherProcess (*PageFrameIndex,
  2611. PointerPde,
  2612. ContainingPageFrame);
  2613. //
  2614. // This page will be locked into working set and assigned an index when
  2615. // the working set is set up on return.
  2616. //
  2617. ASSERT (MI_PFN_ELEMENT(*PageFrameIndex)->u1.WsIndex == 0);
  2618. UNLOCK_PFN2 (OldIrql);
  2619. return STATUS_SUCCESS;
  2620. }
  2621. VOID
  2622. MiSessionPageTableRelease (
  2623. IN PFN_NUMBER PageFrameIndex
  2624. )
  2625. /*++
  2626. Routine Description:
  2627. Nonpaged wrapper to release a session pool page table page.
  2628. Arguments:
  2629. PageFrameIndex - Returns the page frame number which was initialized.
  2630. Return Value:
  2631. None.
  2632. --*/
  2633. {
  2634. KIRQL OldIrql;
  2635. PMMPFN Pfn1;
  2636. PMMPFN Pfn2;
  2637. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  2638. Pfn2 = MI_PFN_ELEMENT (Pfn1->u4.PteFrame);
  2639. MI_SET_PFN_DELETED (Pfn1);
  2640. LOCK_PFN (OldIrql);
  2641. ASSERT (MmSessionSpace->SessionPageDirectoryIndex == Pfn1->u4.PteFrame);
  2642. ASSERT (Pfn1->u2.ShareCount == 1);
  2643. MiDecrementShareCount (Pfn2, Pfn1->u4.PteFrame);
  2644. MiDecrementShareCount (Pfn1, PageFrameIndex);
  2645. UNLOCK_PFN (OldIrql);
  2646. MI_INCREMENT_RESIDENT_AVAILABLE (1, MM_RESAVAIL_FREE_SESSION_PAGE_TABLE);
  2647. }
  2648. NTSTATUS
  2649. MiInitializeSessionPool (
  2650. VOID
  2651. )
  2652. /*++
  2653. Routine Description:
  2654. Initialize the current session's pool structure.
  2655. Arguments:
  2656. None.
  2657. Return Value:
  2658. Status of the pool initialization.
  2659. Environment:
  2660. Kernel mode.
  2661. --*/
  2662. {
  2663. PMMPTE PointerPde, PointerPte;
  2664. PFN_NUMBER PageFrameIndex;
  2665. PPOOL_DESCRIPTOR PoolDescriptor;
  2666. PMM_SESSION_SPACE SessionGlobal;
  2667. PMM_PAGED_POOL_INFO PagedPoolInfo;
  2668. NTSTATUS Status;
  2669. #if (_MI_PAGING_LEVELS < 3)
  2670. ULONG Index;
  2671. #endif
  2672. #if DBG
  2673. PMMPTE StartPde;
  2674. PMMPTE EndPde;
  2675. #endif
  2676. PAGED_CODE ();
  2677. SessionGlobal = SESSION_GLOBAL(MmSessionSpace);
  2678. KeInitializeGuardedMutex (&SessionGlobal->PagedPoolMutex);
  2679. PoolDescriptor = &MmSessionSpace->PagedPool;
  2680. ExInitializePoolDescriptor (PoolDescriptor,
  2681. PagedPoolSession,
  2682. 0,
  2683. 0,
  2684. &SessionGlobal->PagedPoolMutex);
  2685. MmSessionSpace->PagedPoolStart = (PVOID)MiSessionPoolStart;
  2686. MmSessionSpace->PagedPoolEnd = (PVOID)(MiSessionPoolEnd -1);
  2687. PagedPoolInfo = &MmSessionSpace->PagedPoolInfo;
  2688. PagedPoolInfo->PagedPoolCommit = 0;
  2689. PagedPoolInfo->PagedPoolHint = 0;
  2690. PagedPoolInfo->AllocatedPagedPool = 0;
  2691. //
  2692. // Build the page table page for paged pool.
  2693. //
  2694. PointerPde = MiGetPdeAddress (MmSessionSpace->PagedPoolStart);
  2695. MmSessionSpace->PagedPoolBasePde = PointerPde;
  2696. PointerPte = MiGetPteAddress (MmSessionSpace->PagedPoolStart);
  2697. PagedPoolInfo->FirstPteForPagedPool = PointerPte;
  2698. PagedPoolInfo->LastPteForPagedPool = MiGetPteAddress (MmSessionSpace->PagedPoolEnd);
  2699. #if DBG
  2700. //
  2701. // Session pool better be unused.
  2702. //
  2703. StartPde = MiGetPdeAddress (MmSessionSpace->PagedPoolStart);
  2704. EndPde = MiGetPdeAddress (MmSessionSpace->PagedPoolEnd);
  2705. while (StartPde <= EndPde) {
  2706. ASSERT (StartPde->u.Long == 0);
  2707. StartPde += 1;
  2708. }
  2709. #endif
  2710. //
  2711. // Mark all PDEs as empty.
  2712. //
  2713. MiZeroMemoryPte (PointerPde,
  2714. (1 + MiGetPdeAddress (MmSessionSpace->PagedPoolEnd) - PointerPde));
  2715. if (MiChargeCommitment (1, NULL) == FALSE) {
  2716. MM_BUMP_SESSION_FAILURES (MM_SESSION_FAILURE_NO_COMMIT);
  2717. return STATUS_NO_MEMORY;
  2718. }
  2719. Status = MiInitializeAndChargePfn (&PageFrameIndex,
  2720. PointerPde,
  2721. MmSessionSpace->SessionPageDirectoryIndex,
  2722. TRUE);
  2723. if (!NT_SUCCESS(Status)) {
  2724. MiReturnCommitment (1);
  2725. MM_BUMP_SESSION_FAILURES (MM_SESSION_FAILURE_NO_RESIDENT);
  2726. return Status;
  2727. }
  2728. MM_TRACK_COMMIT (MM_DBG_COMMIT_SESSION_POOL_PAGE_TABLES, 1);
  2729. MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_PAGEDPOOL_PAGETABLE_ALLOC, 1);
  2730. #if (_MI_PAGING_LEVELS < 3)
  2731. Index = MiGetPdeSessionIndex (MmSessionSpace->PagedPoolStart);
  2732. ASSERT (MmSessionSpace->PageTables[Index].u.Long == 0);
  2733. MmSessionSpace->PageTables[Index] = *PointerPde;
  2734. #endif
  2735. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_NP_POOL_CREATE, 1);
  2736. InterlockedExchangeAddSizeT (&MmSessionSpace->NonPagablePages, 1);
  2737. InterlockedExchangeAddSizeT (&MmSessionSpace->CommittedPages, 1);
  2738. MiFillMemoryPte (PointerPte, PAGE_SIZE / sizeof (MMPTE), MM_KERNEL_NOACCESS_PTE);
  2739. PagedPoolInfo->NextPdeForPagedPoolExpansion = PointerPde + 1;
  2740. //
  2741. // Initialize the bitmaps.
  2742. //
  2743. MiCreateBitMap (&PagedPoolInfo->PagedPoolAllocationMap,
  2744. MmSessionPoolSize >> PAGE_SHIFT,
  2745. NonPagedPool);
  2746. if (PagedPoolInfo->PagedPoolAllocationMap == NULL) {
  2747. MM_BUMP_SESSION_FAILURES (MM_SESSION_FAILURE_NO_NONPAGED_POOL);
  2748. goto Failure;
  2749. }
  2750. //
  2751. // We start with all pages in the virtual address space as "busy", and
  2752. // clear bits to make pages available as we dynamically expand the pool.
  2753. //
  2754. RtlSetAllBits( PagedPoolInfo->PagedPoolAllocationMap );
  2755. //
  2756. // Indicate first page worth of PTEs are available.
  2757. //
  2758. RtlClearBits (PagedPoolInfo->PagedPoolAllocationMap, 0, PTE_PER_PAGE);
  2759. //
  2760. // Create the end of allocation range bitmap.
  2761. //
  2762. MiCreateBitMap (&PagedPoolInfo->EndOfPagedPoolBitmap,
  2763. MmSessionPoolSize >> PAGE_SHIFT,
  2764. NonPagedPool);
  2765. if (PagedPoolInfo->EndOfPagedPoolBitmap == NULL) {
  2766. MM_BUMP_SESSION_FAILURES (MM_SESSION_FAILURE_NO_NONPAGED_POOL);
  2767. goto Failure;
  2768. }
  2769. RtlClearAllBits (PagedPoolInfo->EndOfPagedPoolBitmap);
  2770. return STATUS_SUCCESS;
  2771. Failure:
  2772. MiFreeSessionPoolBitMaps ();
  2773. MiSessionPageTableRelease (PageFrameIndex);
  2774. MI_WRITE_INVALID_PTE (PointerPde, ZeroKernelPte);
  2775. MI_FLUSH_SINGLE_SESSION_TB (MiGetVirtualAddressMappedByPte (PointerPde));
  2776. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_NP_POOL_CREATE_FAILED, 1);
  2777. InterlockedExchangeAddSizeT (&MmSessionSpace->NonPagablePages, -1);
  2778. InterlockedExchangeAddSizeT (&MmSessionSpace->CommittedPages, -1);
  2779. MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_PAGEDPOOL_PAGETABLE_FREE_FAIL1, 1);
  2780. MiReturnCommitment (1);
  2781. MM_TRACK_COMMIT_REDUCTION (MM_DBG_COMMIT_SESSION_POOL_PAGE_TABLES, 1);
  2782. return STATUS_NO_MEMORY;
  2783. }
  2784. VOID
  2785. MiFreeSessionPoolBitMaps (
  2786. VOID
  2787. )
  2788. /*++
  2789. Routine Description:
  2790. Free the current session's pool bitmap structures.
  2791. Arguments:
  2792. None.
  2793. Return Value:
  2794. None.
  2795. Environment:
  2796. Kernel mode.
  2797. --*/
  2798. {
  2799. PAGED_CODE();
  2800. if (MmSessionSpace->PagedPoolInfo.PagedPoolAllocationMap ) {
  2801. ExFreePool (MmSessionSpace->PagedPoolInfo.PagedPoolAllocationMap);
  2802. MmSessionSpace->PagedPoolInfo.PagedPoolAllocationMap = NULL;
  2803. }
  2804. if (MmSessionSpace->PagedPoolInfo.EndOfPagedPoolBitmap ) {
  2805. ExFreePool (MmSessionSpace->PagedPoolInfo.EndOfPagedPoolBitmap);
  2806. MmSessionSpace->PagedPoolInfo.EndOfPagedPoolBitmap = NULL;
  2807. }
  2808. return;
  2809. }
  2810. #if DBG
  2811. #define MI_LOG_CONTIGUOUS 100
  2812. typedef struct _MI_CONTIGUOUS_ALLOCATORS {
  2813. PVOID BaseAddress;
  2814. SIZE_T NumberOfBytes;
  2815. PVOID CallingAddress;
  2816. } MI_CONTIGUOUS_ALLOCATORS, *PMI_CONTIGUOUS_ALLOCATORS;
  2817. ULONG MiContiguousIndex;
  2818. MI_CONTIGUOUS_ALLOCATORS MiContiguousAllocators[MI_LOG_CONTIGUOUS];
  2819. VOID
  2820. MiInsertContiguousTag (
  2821. IN PVOID BaseAddress,
  2822. IN SIZE_T NumberOfBytes,
  2823. IN PVOID CallingAddress
  2824. )
  2825. {
  2826. KIRQL OldIrql;
  2827. #if !DBG
  2828. if ((NtGlobalFlag & FLG_POOL_ENABLE_TAGGING) == 0) {
  2829. return;
  2830. }
  2831. #endif
  2832. OldIrql = KeAcquireQueuedSpinLock (LockQueueMmNonPagedPoolLock);
  2833. if (MiContiguousIndex >= MI_LOG_CONTIGUOUS) {
  2834. MiContiguousIndex = 0;
  2835. }
  2836. MiContiguousAllocators[MiContiguousIndex].BaseAddress = BaseAddress;
  2837. MiContiguousAllocators[MiContiguousIndex].NumberOfBytes = NumberOfBytes;
  2838. MiContiguousAllocators[MiContiguousIndex].CallingAddress = CallingAddress;
  2839. MiContiguousIndex += 1;
  2840. KeReleaseQueuedSpinLock (LockQueueMmNonPagedPoolLock, OldIrql);
  2841. }
  2842. #else
  2843. #define MiInsertContiguousTag(a, b, c) (c) = (c)
  2844. #endif
  2845. PVOID
  2846. MiFindContiguousMemoryInPool (
  2847. IN PFN_NUMBER LowestPfn,
  2848. IN PFN_NUMBER HighestPfn,
  2849. IN PFN_NUMBER BoundaryPfn,
  2850. IN PFN_NUMBER SizeInPages,
  2851. IN PVOID CallingAddress
  2852. )
  2853. /*++
  2854. Routine Description:
  2855. This function searches nonpaged pool for contiguous pages to satisfy the
  2856. request. Note the pool address returned maps these pages as MmCached.
  2857. Arguments:
  2858. LowestPfn - Supplies the lowest acceptable physical page number.
  2859. HighestPfn - Supplies the highest acceptable physical page number.
  2860. BoundaryPfn - Supplies the page frame number multiple the allocation must
  2861. not cross. 0 indicates it can cross any boundary.
  2862. SizeInPages - Supplies the number of pages to allocate.
  2863. CallingAddress - Supplies the calling address of the allocator.
  2864. Return Value:
  2865. NULL - a contiguous range could not be found to satisfy the request.
  2866. NON-NULL - Returns a pointer (virtual address in the nonpaged portion
  2867. of the system) to the allocated physically contiguous
  2868. memory.
  2869. Environment:
  2870. Kernel mode, IRQL of APC_LEVEL or below.
  2871. --*/
  2872. {
  2873. PMMPTE PointerPte;
  2874. PMMPFN Pfn1;
  2875. PVOID BaseAddress;
  2876. PVOID BaseAddress2;
  2877. KIRQL OldIrql;
  2878. PMMFREE_POOL_ENTRY FreePageInfo;
  2879. PLIST_ENTRY Entry;
  2880. ULONG Index;
  2881. PFN_NUMBER BoundaryMask;
  2882. ULONG AllocationPosition;
  2883. PVOID Va;
  2884. PFN_NUMBER SpanInPages;
  2885. PFN_NUMBER SpanInPages2;
  2886. PFN_NUMBER FreePoolInPages;
  2887. PAGED_CODE ();
  2888. //
  2889. // Initializing SpanInPages* is not needed for correctness
  2890. // but without it the compiler cannot compile this code
  2891. // W4 to check for use of uninitialized variables.
  2892. //
  2893. SpanInPages = 0;
  2894. SpanInPages2 = 0;
  2895. BaseAddress = NULL;
  2896. BoundaryMask = ~(BoundaryPfn - 1);
  2897. //
  2898. // A suitable pool page was not allocated via the pool allocator.
  2899. // Grab the pool lock and manually search for a page which meets
  2900. // the requirements.
  2901. //
  2902. MmLockPagableSectionByHandle (ExPageLockHandle);
  2903. //
  2904. // Trace through the page allocator's pool headers for a page which
  2905. // meets the requirements.
  2906. //
  2907. // NonPaged pool is linked together through the pages themselves.
  2908. //
  2909. Index = (ULONG)(SizeInPages - 1);
  2910. if (Index >= MI_MAX_FREE_LIST_HEADS) {
  2911. Index = MI_MAX_FREE_LIST_HEADS - 1;
  2912. }
  2913. OldIrql = KeAcquireQueuedSpinLock (LockQueueMmNonPagedPoolLock);
  2914. while (Index < MI_MAX_FREE_LIST_HEADS) {
  2915. Entry = MmNonPagedPoolFreeListHead[Index].Flink;
  2916. while (Entry != &MmNonPagedPoolFreeListHead[Index]) {
  2917. if (MmProtectFreedNonPagedPool == TRUE) {
  2918. MiUnProtectFreeNonPagedPool ((PVOID)Entry, 0);
  2919. }
  2920. //
  2921. // The list is not empty, see if this one meets the physical
  2922. // requirements.
  2923. //
  2924. FreePageInfo = CONTAINING_RECORD(Entry,
  2925. MMFREE_POOL_ENTRY,
  2926. List);
  2927. ASSERT (FreePageInfo->Signature == MM_FREE_POOL_SIGNATURE);
  2928. if (FreePageInfo->Size >= SizeInPages) {
  2929. //
  2930. // This entry has sufficient space, check to see if the
  2931. // pages meet the physical requirements.
  2932. //
  2933. Va = MiCheckForContiguousMemory (PAGE_ALIGN(Entry),
  2934. FreePageInfo->Size,
  2935. SizeInPages,
  2936. LowestPfn,
  2937. HighestPfn,
  2938. BoundaryPfn,
  2939. MiCached);
  2940. if (Va != NULL) {
  2941. //
  2942. // These pages meet the requirements. The returned
  2943. // address may butt up on the end, the front or be
  2944. // somewhere in the middle. Split the Entry based
  2945. // on which case it is.
  2946. //
  2947. Entry = PAGE_ALIGN(Entry);
  2948. if (MmProtectFreedNonPagedPool == FALSE) {
  2949. RemoveEntryList (&FreePageInfo->List);
  2950. }
  2951. else {
  2952. MiProtectedPoolRemoveEntryList (&FreePageInfo->List);
  2953. }
  2954. //
  2955. // Adjust the number of free pages remaining in the pool.
  2956. // The TotalBigPages calculation appears incorrect for the
  2957. // case where we're splitting a block, but it's done this
  2958. // way because ExFreePool corrects it when we free the
  2959. // fragment block below. Likewise for
  2960. // MmAllocatedNonPagedPool and MmNumberOfFreeNonPagedPool
  2961. // which is corrected by MiFreePoolPages for the fragment.
  2962. //
  2963. InterlockedExchangeAdd ((PLONG)&NonPagedPoolDescriptor.TotalBigPages,
  2964. (LONG)FreePageInfo->Size);
  2965. InterlockedExchangeAddSizeT (&NonPagedPoolDescriptor.TotalBytes,
  2966. FreePageInfo->Size << PAGE_SHIFT);
  2967. MmAllocatedNonPagedPool += FreePageInfo->Size;
  2968. FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
  2969. if (FreePoolInPages < MiHighNonPagedPoolThreshold) {
  2970. //
  2971. // Read the state directly instead of calling
  2972. // KeReadStateEvent since we are holding the nonpaged
  2973. // pool lock and want to keep instructions at a
  2974. // minimum.
  2975. //
  2976. if (MiHighNonPagedPoolEvent->Header.SignalState != 0) {
  2977. KeClearEvent (MiHighNonPagedPoolEvent);
  2978. }
  2979. if (FreePoolInPages <= MiLowNonPagedPoolThreshold) {
  2980. if (MiLowNonPagedPoolEvent->Header.SignalState == 0) {
  2981. KeSetEvent (MiLowNonPagedPoolEvent, 0, FALSE);
  2982. }
  2983. }
  2984. }
  2985. MmNumberOfFreeNonPagedPool -= FreePageInfo->Size;
  2986. ASSERT ((LONG)MmNumberOfFreeNonPagedPool >= 0);
  2987. if (Va == Entry) {
  2988. //
  2989. // Butted against the front.
  2990. //
  2991. AllocationPosition = 0;
  2992. }
  2993. else if (((PCHAR)Va + (SizeInPages << PAGE_SHIFT)) == ((PCHAR)Entry + (FreePageInfo->Size << PAGE_SHIFT))) {
  2994. //
  2995. // Butted against the end.
  2996. //
  2997. AllocationPosition = 2;
  2998. }
  2999. else {
  3000. //
  3001. // Somewhere in the middle.
  3002. //
  3003. AllocationPosition = 1;
  3004. }
  3005. //
  3006. // Pages are being removed from the front of
  3007. // the list entry and the whole list entry
  3008. // will be removed and then the remainder inserted.
  3009. //
  3010. //
  3011. // Mark start and end for the block at the top of the
  3012. // list.
  3013. //
  3014. if (MI_IS_PHYSICAL_ADDRESS(Va)) {
  3015. //
  3016. // On certain architectures, virtual addresses
  3017. // may be physical and hence have no corresponding PTE.
  3018. //
  3019. PointerPte = NULL;
  3020. Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (Va));
  3021. }
  3022. else {
  3023. PointerPte = MiGetPteAddress(Va);
  3024. ASSERT (PointerPte->u.Hard.Valid == 1);
  3025. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  3026. }
  3027. ASSERT (Pfn1->u4.VerifierAllocation == 0);
  3028. ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0);
  3029. ASSERT (Pfn1->u3.e1.StartOfAllocation == 0);
  3030. Pfn1->u3.e1.StartOfAllocation = 1;
  3031. //
  3032. // Calculate the ending PFN address, note that since
  3033. // these pages are contiguous, just add to the PFN.
  3034. //
  3035. Pfn1 += SizeInPages - 1;
  3036. ASSERT (Pfn1->u4.VerifierAllocation == 0);
  3037. ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0);
  3038. ASSERT (Pfn1->u3.e1.EndOfAllocation == 0);
  3039. Pfn1->u3.e1.EndOfAllocation = 1;
  3040. if (SizeInPages == FreePageInfo->Size) {
  3041. //
  3042. // Unlock the pool and return.
  3043. //
  3044. KeReleaseQueuedSpinLock (LockQueueMmNonPagedPoolLock,
  3045. OldIrql);
  3046. BaseAddress = (PVOID)Va;
  3047. goto Done;
  3048. }
  3049. BaseAddress = NULL;
  3050. if (AllocationPosition != 2) {
  3051. //
  3052. // The end piece needs to be freed as the removal
  3053. // came from the front or the middle.
  3054. //
  3055. BaseAddress = (PVOID)((PCHAR)Va + (SizeInPages << PAGE_SHIFT));
  3056. SpanInPages = FreePageInfo->Size - SizeInPages -
  3057. (((ULONG_PTR)Va - (ULONG_PTR)Entry) >> PAGE_SHIFT);
  3058. //
  3059. // Mark start and end of the allocation in the PFN database.
  3060. //
  3061. if (PointerPte == NULL) {
  3062. //
  3063. // On certain architectures, virtual addresses
  3064. // may be physical and hence have no corresponding PTE.
  3065. //
  3066. Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (BaseAddress));
  3067. }
  3068. else {
  3069. PointerPte = MiGetPteAddress(BaseAddress);
  3070. ASSERT (PointerPte->u.Hard.Valid == 1);
  3071. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  3072. }
  3073. ASSERT (Pfn1->u4.VerifierAllocation == 0);
  3074. ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0);
  3075. ASSERT (Pfn1->u3.e1.StartOfAllocation == 0);
  3076. Pfn1->u3.e1.StartOfAllocation = 1;
  3077. //
  3078. // Calculate the ending PTE's address, can't depend on
  3079. // these pages being physically contiguous.
  3080. //
  3081. if (PointerPte == NULL) {
  3082. Pfn1 += (SpanInPages - 1);
  3083. }
  3084. else {
  3085. PointerPte += (SpanInPages - 1);
  3086. ASSERT (PointerPte->u.Hard.Valid == 1);
  3087. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  3088. }
  3089. ASSERT (Pfn1->u3.e1.EndOfAllocation == 0);
  3090. Pfn1->u3.e1.EndOfAllocation = 1;
  3091. ASSERT (((ULONG_PTR)BaseAddress & (PAGE_SIZE -1)) == 0);
  3092. SpanInPages2 = SpanInPages;
  3093. }
  3094. BaseAddress2 = BaseAddress;
  3095. BaseAddress = NULL;
  3096. if (AllocationPosition != 0) {
  3097. //
  3098. // The front piece needs to be freed as the removal
  3099. // came from the middle or the end.
  3100. //
  3101. BaseAddress = (PVOID)Entry;
  3102. SpanInPages = ((ULONG_PTR)Va - (ULONG_PTR)Entry) >> PAGE_SHIFT;
  3103. //
  3104. // Mark start and end of the allocation in the PFN database.
  3105. //
  3106. if (PointerPte == NULL) {
  3107. //
  3108. // On certain architectures, virtual addresses
  3109. // may be physical and hence have no corresponding PTE.
  3110. //
  3111. Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (BaseAddress));
  3112. }
  3113. else {
  3114. PointerPte = MiGetPteAddress(BaseAddress);
  3115. ASSERT (PointerPte->u.Hard.Valid == 1);
  3116. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  3117. }
  3118. ASSERT (Pfn1->u4.VerifierAllocation == 0);
  3119. ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0);
  3120. ASSERT (Pfn1->u3.e1.StartOfAllocation == 0);
  3121. Pfn1->u3.e1.StartOfAllocation = 1;
  3122. //
  3123. // Calculate the ending PTE's address, can't depend on
  3124. // these pages being physically contiguous.
  3125. //
  3126. if (PointerPte == NULL) {
  3127. Pfn1 += (SpanInPages - 1);
  3128. }
  3129. else {
  3130. PointerPte += (SpanInPages - 1);
  3131. ASSERT (PointerPte->u.Hard.Valid == 1);
  3132. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  3133. }
  3134. ASSERT (Pfn1->u3.e1.EndOfAllocation == 0);
  3135. Pfn1->u3.e1.EndOfAllocation = 1;
  3136. ASSERT (((ULONG_PTR)BaseAddress & (PAGE_SIZE -1)) == 0);
  3137. }
  3138. //
  3139. // Unlock the pool.
  3140. //
  3141. KeReleaseQueuedSpinLock (LockQueueMmNonPagedPoolLock,
  3142. OldIrql);
  3143. //
  3144. // Free the split entry at BaseAddress back into the pool.
  3145. // Note that we have overcharged the pool - the entire free
  3146. // chunk has been billed. Here we return the piece we
  3147. // didn't use and correct the momentary overbilling.
  3148. //
  3149. // The start and end allocation bits of this split entry
  3150. // which we just set up enable ExFreePool and his callees
  3151. // to correctly adjust the billing.
  3152. //
  3153. if (BaseAddress) {
  3154. ExInsertPoolTag ('tnoC',
  3155. BaseAddress,
  3156. SpanInPages << PAGE_SHIFT,
  3157. NonPagedPool);
  3158. ExFreePool (BaseAddress);
  3159. }
  3160. if (BaseAddress2) {
  3161. ExInsertPoolTag ('tnoC',
  3162. BaseAddress2,
  3163. SpanInPages2 << PAGE_SHIFT,
  3164. NonPagedPool);
  3165. ExFreePool (BaseAddress2);
  3166. }
  3167. BaseAddress = Va;
  3168. goto Done;
  3169. }
  3170. }
  3171. Entry = FreePageInfo->List.Flink;
  3172. if (MmProtectFreedNonPagedPool == TRUE) {
  3173. MiProtectFreeNonPagedPool ((PVOID)FreePageInfo,
  3174. (ULONG)FreePageInfo->Size);
  3175. }
  3176. }
  3177. Index += 1;
  3178. }
  3179. //
  3180. // No entry was found in free nonpaged pool that meets the requirements.
  3181. //
  3182. KeReleaseQueuedSpinLock (LockQueueMmNonPagedPoolLock, OldIrql);
  3183. Done:
  3184. MmUnlockPagableImageSection (ExPageLockHandle);
  3185. if (BaseAddress) {
  3186. MiInsertContiguousTag (BaseAddress,
  3187. SizeInPages << PAGE_SHIFT,
  3188. CallingAddress);
  3189. ExInsertPoolTag ('tnoC',
  3190. BaseAddress,
  3191. SizeInPages << PAGE_SHIFT,
  3192. NonPagedPool);
  3193. }
  3194. return BaseAddress;
  3195. }
  3196. PFN_NUMBER
  3197. MiFindContiguousPages (
  3198. IN PFN_NUMBER LowestPfn,
  3199. IN PFN_NUMBER HighestPfn,
  3200. IN PFN_NUMBER BoundaryPfn,
  3201. IN PFN_NUMBER SizeInPages,
  3202. IN MEMORY_CACHING_TYPE CacheType
  3203. )
  3204. /*++
  3205. Routine Description:
  3206. This function searches nonpaged pool and the free, zeroed,
  3207. and standby lists for contiguous pages that satisfy the
  3208. request.
  3209. Note no virtual address space is used (thus nonpaged pool is not scanned).
  3210. A physical frame number (the caller can map it if he wants to) is returned.
  3211. Arguments:
  3212. LowestPfn - Supplies the lowest acceptable physical page number.
  3213. HighestPfn - Supplies the highest acceptable physical page number.
  3214. BoundaryPfn - Supplies the page frame number multiple the allocation must
  3215. not cross. 0 indicates it can cross any boundary.
  3216. SizeInPages - Supplies the number of pages to allocate.
  3217. CacheType - Supplies the type of cache mapping that will be used for the
  3218. memory.
  3219. Return Value:
  3220. 0 - a contiguous range could not be found to satisfy the request.
  3221. Nonzero - Returns the base physical frame number to the allocated
  3222. physically contiguous memory.
  3223. Environment:
  3224. Kernel mode, IRQL of APC_LEVEL or below.
  3225. Note that in addition to being called at normal runtime, this routine
  3226. is also called during Phase 0 initialization before the loaded module
  3227. list has been initialized - therefore this routine cannot be made PAGELK
  3228. as we wouldn't know how to find it to ensure it was resident.
  3229. --*/
  3230. {
  3231. PMMPTE DummyPte;
  3232. PMMPFN Pfn1;
  3233. PMMPFN EndPfn;
  3234. KIRQL OldIrql;
  3235. ULONG start;
  3236. PFN_NUMBER count;
  3237. PFN_NUMBER Page;
  3238. PFN_NUMBER LastPage;
  3239. PFN_NUMBER found;
  3240. PFN_NUMBER BoundaryMask;
  3241. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  3242. ULONG RetryCount;
  3243. PAGED_CODE ();
  3244. ASSERT (SizeInPages != 0);
  3245. CacheAttribute = MI_TRANSLATE_CACHETYPE (CacheType, 0);
  3246. BoundaryMask = ~(BoundaryPfn - 1);
  3247. Pfn1 = NULL;
  3248. DummyPte = MiGetPteAddress (MmNonPagedPoolExpansionStart);
  3249. //
  3250. // Manually search for a page range which meets the requirements.
  3251. //
  3252. KeAcquireGuardedMutex (&MmDynamicMemoryMutex);
  3253. //
  3254. // Charge commitment.
  3255. //
  3256. // Then search the PFN database for pages that meet the requirements.
  3257. //
  3258. if (MiChargeCommitmentCantExpand (SizeInPages, FALSE) == FALSE) {
  3259. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  3260. return 0;
  3261. }
  3262. //
  3263. // Charge resident available pages.
  3264. //
  3265. LOCK_PFN (OldIrql);
  3266. MiDeferredUnlockPages (MI_DEFER_PFN_HELD);
  3267. if ((SPFN_NUMBER)SizeInPages > MI_NONPAGABLE_MEMORY_AVAILABLE()) {
  3268. UNLOCK_PFN (OldIrql);
  3269. goto Failed;
  3270. }
  3271. //
  3272. // Systems utilizing memory compression may have more
  3273. // pages on the zero, free and standby lists than we
  3274. // want to give out. Explicitly check MmAvailablePages
  3275. // instead (and recheck whenever the PFN lock is released
  3276. // and reacquired).
  3277. //
  3278. if ((SPFN_NUMBER)SizeInPages > (SPFN_NUMBER)(MmAvailablePages - MM_HIGH_LIMIT)) {
  3279. UNLOCK_PFN (OldIrql);
  3280. goto Failed;
  3281. }
  3282. MI_DECREMENT_RESIDENT_AVAILABLE (SizeInPages, MM_RESAVAIL_ALLOCATE_CONTIGUOUS);
  3283. UNLOCK_PFN (OldIrql);
  3284. RetryCount = 4;
  3285. Retry:
  3286. start = 0;
  3287. found = 0;
  3288. do {
  3289. count = MmPhysicalMemoryBlock->Run[start].PageCount;
  3290. Page = MmPhysicalMemoryBlock->Run[start].BasePage;
  3291. //
  3292. // Close the gaps, then examine the range for a fit.
  3293. //
  3294. LastPage = Page + count;
  3295. if (LastPage - 1 > HighestPfn) {
  3296. LastPage = HighestPfn + 1;
  3297. }
  3298. if (Page < LowestPfn) {
  3299. Page = LowestPfn;
  3300. }
  3301. if ((count != 0) && (Page + SizeInPages <= LastPage)) {
  3302. //
  3303. // A fit may be possible in this run, check whether the pages
  3304. // are on the right list.
  3305. //
  3306. found = 0;
  3307. Pfn1 = MI_PFN_ELEMENT (Page);
  3308. for ( ; Page < LastPage; Page += 1, Pfn1 += 1) {
  3309. if ((Pfn1->u3.e1.PageLocation <= StandbyPageList) &&
  3310. (Pfn1->u1.Flink != 0) &&
  3311. (Pfn1->u2.Blink != 0) &&
  3312. (Pfn1->u3.e2.ReferenceCount == 0) &&
  3313. ((CacheAttribute == MiCached) || (Pfn1->u4.MustBeCached == 0))) {
  3314. //
  3315. // Before starting a new run, ensure that it
  3316. // can satisfy the boundary requirements (if any).
  3317. //
  3318. if ((found == 0) && (BoundaryPfn != 0)) {
  3319. if (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask) != 0) {
  3320. //
  3321. // This run's physical address does not meet the
  3322. // requirements.
  3323. //
  3324. continue;
  3325. }
  3326. }
  3327. found += 1;
  3328. if (found == SizeInPages) {
  3329. //
  3330. // Lock the PFN database and see if the pages are
  3331. // still available for us. Note the invariant
  3332. // condition (boundary conformance) does not need
  3333. // to be checked again as it was already checked
  3334. // above.
  3335. //
  3336. Pfn1 -= (found - 1);
  3337. Page -= (found - 1);
  3338. LOCK_PFN (OldIrql);
  3339. do {
  3340. if ((Pfn1->u3.e1.PageLocation <= StandbyPageList) &&
  3341. (Pfn1->u1.Flink != 0) &&
  3342. (Pfn1->u2.Blink != 0) &&
  3343. (Pfn1->u3.e2.ReferenceCount == 0) &&
  3344. ((CacheAttribute == MiCached) || (Pfn1->u4.MustBeCached == 0))) {
  3345. NOTHING; // Good page
  3346. }
  3347. else {
  3348. break;
  3349. }
  3350. found -= 1;
  3351. if (found == 0) {
  3352. //
  3353. // All the pages matched the criteria, keep the
  3354. // PFN lock, remove them and map them for our
  3355. // caller.
  3356. //
  3357. goto Success;
  3358. }
  3359. Pfn1 += 1;
  3360. Page += 1;
  3361. } while (TRUE);
  3362. UNLOCK_PFN (OldIrql);
  3363. //
  3364. // Restart the search at the first possible page.
  3365. //
  3366. found = 0;
  3367. }
  3368. }
  3369. else {
  3370. found = 0;
  3371. }
  3372. }
  3373. }
  3374. start += 1;
  3375. } while (start != MmPhysicalMemoryBlock->NumberOfRuns);
  3376. //
  3377. // The desired physical pages could not be allocated - try harder.
  3378. //
  3379. if (InitializationPhase == 0) {
  3380. goto Failed;
  3381. }
  3382. InterlockedIncrement (&MiDelayPageFaults);
  3383. //
  3384. // Attempt to move pages to the standby list. This is done with
  3385. // gradually increasing aggressiveness so as not to prematurely
  3386. // drain modified writes unless it's truly needed.
  3387. //
  3388. switch (RetryCount) {
  3389. case 4:
  3390. MmEmptyAllWorkingSets ();
  3391. break;
  3392. case 3:
  3393. MiFlushAllPages ();
  3394. KeDelayExecutionThread (KernelMode,
  3395. FALSE,
  3396. (PLARGE_INTEGER)&MmHalfSecond);
  3397. break;
  3398. case 2:
  3399. MmEmptyAllWorkingSets ();
  3400. MiFlushAllPages ();
  3401. KeDelayExecutionThread (KernelMode,
  3402. FALSE,
  3403. (PLARGE_INTEGER)&MmOneSecond);
  3404. break;
  3405. case 1:
  3406. //
  3407. // Purge the transition list as transition pages keep
  3408. // page tables from being taken and we are desperate.
  3409. //
  3410. MiPurgeTransitionList ();
  3411. //
  3412. // Empty all the working sets now that the
  3413. // transition list has been purged. This will put page tables
  3414. // on the modified list.
  3415. //
  3416. MmEmptyAllWorkingSets ();
  3417. //
  3418. // Write out modified pages (including newly trimmed page table
  3419. // pages).
  3420. //
  3421. MiFlushAllPages ();
  3422. //
  3423. // Give the writes a chance to complete so the modified pages
  3424. // can be marked clean and put on the transition list.
  3425. //
  3426. KeDelayExecutionThread (KernelMode,
  3427. FALSE,
  3428. (PLARGE_INTEGER)&MmOneSecond);
  3429. //
  3430. // Purge the transition list one last time to get the now-clean
  3431. // page table pages out.
  3432. //
  3433. MiPurgeTransitionList ();
  3434. //
  3435. // Finally get any straggling active pages onto the transition
  3436. // lists.
  3437. //
  3438. MmEmptyAllWorkingSets ();
  3439. MiFlushAllPages ();
  3440. break;
  3441. default:
  3442. break;
  3443. }
  3444. InterlockedDecrement (&MiDelayPageFaults);
  3445. if (RetryCount != 0) {
  3446. RetryCount -= 1;
  3447. goto Retry;
  3448. }
  3449. Failed:
  3450. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  3451. MiReturnCommitment (SizeInPages);
  3452. return 0;
  3453. Success:
  3454. ASSERT (start != MmPhysicalMemoryBlock->NumberOfRuns);
  3455. //
  3456. // A match has been found, remove these pages
  3457. // and return. The PFN lock is held.
  3458. //
  3459. //
  3460. // Systems utilizing memory compression may have more
  3461. // pages on the zero, free and standby lists than we
  3462. // want to give out. Explicitly check MmAvailablePages
  3463. // instead (and recheck whenever the PFN lock is
  3464. // released and reacquired).
  3465. //
  3466. if ((SPFN_NUMBER)SizeInPages > (SPFN_NUMBER)(MmAvailablePages - MM_HIGH_LIMIT)) {
  3467. UNLOCK_PFN (OldIrql);
  3468. MI_INCREMENT_RESIDENT_AVAILABLE (SizeInPages, MM_RESAVAIL_FREE_CONTIGUOUS);
  3469. MiReturnCommitment (SizeInPages);
  3470. goto Failed;
  3471. }
  3472. EndPfn = Pfn1 - SizeInPages + 1;
  3473. do {
  3474. if (Pfn1->u3.e1.PageLocation == StandbyPageList) {
  3475. MiUnlinkPageFromList (Pfn1);
  3476. ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
  3477. MiRestoreTransitionPte (Pfn1);
  3478. }
  3479. else {
  3480. MiUnlinkFreeOrZeroedPage (Pfn1);
  3481. }
  3482. Pfn1->u3.e2.ReferenceCount = 1;
  3483. Pfn1->u2.ShareCount = 1;
  3484. Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  3485. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  3486. Pfn1->u3.e1.CacheAttribute = CacheAttribute;
  3487. Pfn1->u3.e1.StartOfAllocation = 0;
  3488. Pfn1->u3.e1.EndOfAllocation = 0;
  3489. Pfn1->u3.e1.LargeSessionAllocation = 0;
  3490. Pfn1->u3.e1.PrototypePte = 0;
  3491. Pfn1->u4.VerifierAllocation = 0;
  3492. //
  3493. // Initialize PteAddress so an MiIdentifyPfn scan
  3494. // won't crash. The real value is put in after the loop.
  3495. //
  3496. Pfn1->PteAddress = DummyPte;
  3497. if (Pfn1 == EndPfn) {
  3498. break;
  3499. }
  3500. Pfn1 -= 1;
  3501. } while (TRUE);
  3502. Pfn1->u3.e1.StartOfAllocation = 1;
  3503. (Pfn1 + SizeInPages - 1)->u3.e1.EndOfAllocation = 1;
  3504. UNLOCK_PFN (OldIrql);
  3505. EndPfn = Pfn1 + SizeInPages;
  3506. ASSERT (EndPfn == MI_PFN_ELEMENT (Page + 1));
  3507. Page = Page - SizeInPages + 1;
  3508. ASSERT (Pfn1 == MI_PFN_ELEMENT (Page));
  3509. ASSERT (Page != 0);
  3510. MM_TRACK_COMMIT (MM_DBG_COMMIT_CONTIGUOUS_PAGES, SizeInPages);
  3511. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  3512. return Page;
  3513. }
  3514. VOID
  3515. MiFreeContiguousPages (
  3516. IN PFN_NUMBER PageFrameIndex,
  3517. IN PFN_NUMBER SizeInPages
  3518. )
  3519. /*++
  3520. Routine Description:
  3521. This function frees the specified physical page range, returning both
  3522. commitment and resident available.
  3523. Arguments:
  3524. PageFrameIndex - Supplies the starting physical page number.
  3525. SizeInPages - Supplies the number of pages to free.
  3526. Return Value:
  3527. None.
  3528. Environment:
  3529. Kernel mode, IRQL of APC_LEVEL or below.
  3530. This is callable from MiReloadBootLoadedDrivers->MiUseDriverLargePages
  3531. during Phase 0. ExPageLockHandle and other variables won't exist at
  3532. this point, so don't get too fancy here.
  3533. --*/
  3534. {
  3535. KIRQL OldIrql;
  3536. PMMPFN Pfn1;
  3537. PMMPFN EndPfn;
  3538. ASSERT (KeGetCurrentIrql () <= APC_LEVEL);
  3539. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  3540. EndPfn = Pfn1 + SizeInPages;
  3541. LOCK_PFN2 (OldIrql);
  3542. Pfn1->u3.e1.StartOfAllocation = 0;
  3543. (EndPfn - 1)->u3.e1.EndOfAllocation = 0;
  3544. do {
  3545. MI_SET_PFN_DELETED (Pfn1);
  3546. MiDecrementShareCount (Pfn1, PageFrameIndex);
  3547. PageFrameIndex += 1;
  3548. Pfn1 += 1;
  3549. } while (Pfn1 < EndPfn);
  3550. UNLOCK_PFN2 (OldIrql);
  3551. MI_INCREMENT_RESIDENT_AVAILABLE (SizeInPages, MM_RESAVAIL_FREE_CONTIGUOUS);
  3552. MiReturnCommitment (SizeInPages);
  3553. return;
  3554. }
  3555. PVOID
  3556. MiFindContiguousMemory (
  3557. IN PFN_NUMBER LowestPfn,
  3558. IN PFN_NUMBER HighestPfn,
  3559. IN PFN_NUMBER BoundaryPfn,
  3560. IN PFN_NUMBER SizeInPages,
  3561. IN MEMORY_CACHING_TYPE CacheType,
  3562. IN PVOID CallingAddress
  3563. )
  3564. /*++
  3565. Routine Description:
  3566. This function searches nonpaged pool and the free, zeroed,
  3567. and standby lists for contiguous pages that satisfy the
  3568. request.
  3569. Arguments:
  3570. LowestPfn - Supplies the lowest acceptable physical page number.
  3571. HighestPfn - Supplies the highest acceptable physical page number.
  3572. BoundaryPfn - Supplies the page frame number multiple the allocation must
  3573. not cross. 0 indicates it can cross any boundary.
  3574. SizeInPages - Supplies the number of pages to allocate.
  3575. CacheType - Supplies the type of cache mapping that will be used for the
  3576. memory.
  3577. CallingAddress - Supplies the calling address of the allocator.
  3578. Return Value:
  3579. NULL - a contiguous range could not be found to satisfy the request.
  3580. NON-NULL - Returns a pointer (virtual address in the nonpaged portion
  3581. of the system) to the allocated physically contiguous
  3582. memory.
  3583. Environment:
  3584. Kernel mode, IRQL of APC_LEVEL or below.
  3585. --*/
  3586. {
  3587. PMMPTE PointerPte;
  3588. PMMPFN Pfn1;
  3589. PMMPFN EndPfn;
  3590. PVOID BaseAddress;
  3591. PFN_NUMBER Page;
  3592. PHYSICAL_ADDRESS PhysicalAddress;
  3593. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  3594. PAGED_CODE ();
  3595. ASSERT (SizeInPages != 0);
  3596. CacheAttribute = MI_TRANSLATE_CACHETYPE (CacheType, 0);
  3597. if (CacheAttribute == MiCached) {
  3598. BaseAddress = MiFindContiguousMemoryInPool (LowestPfn,
  3599. HighestPfn,
  3600. BoundaryPfn,
  3601. SizeInPages,
  3602. CallingAddress);
  3603. //
  3604. // An existing range of nonpaged pool satisfies the requirements
  3605. // so return it now.
  3606. //
  3607. if (BaseAddress != NULL) {
  3608. return BaseAddress;
  3609. }
  3610. }
  3611. //
  3612. // Suitable pool was not allocated via the pool allocator.
  3613. // Manually search for a page range which meets the requirements.
  3614. //
  3615. Page = MiFindContiguousPages (LowestPfn,
  3616. HighestPfn,
  3617. BoundaryPfn,
  3618. SizeInPages,
  3619. CacheType);
  3620. if (Page == 0) {
  3621. return NULL;
  3622. }
  3623. PhysicalAddress.QuadPart = Page;
  3624. PhysicalAddress.QuadPart = PhysicalAddress.QuadPart << PAGE_SHIFT;
  3625. BaseAddress = MmMapIoSpace (PhysicalAddress,
  3626. SizeInPages << PAGE_SHIFT,
  3627. CacheType);
  3628. if (BaseAddress == NULL) {
  3629. MiFreeContiguousPages (Page, SizeInPages);
  3630. return NULL;
  3631. }
  3632. Pfn1 = MI_PFN_ELEMENT (Page);
  3633. EndPfn = Pfn1 + SizeInPages;
  3634. PointerPte = MiGetPteAddress (BaseAddress);
  3635. do {
  3636. Pfn1->PteAddress = PointerPte;
  3637. Pfn1->u4.PteFrame = MI_GET_PAGE_FRAME_FROM_PTE (MiGetPteAddress(PointerPte));
  3638. Pfn1 += 1;
  3639. PointerPte += 1;
  3640. } while (Pfn1 < EndPfn);
  3641. MM_TRACK_COMMIT (MM_DBG_COMMIT_CONTIGUOUS_PAGES, SizeInPages);
  3642. MiInsertContiguousTag (BaseAddress,
  3643. SizeInPages << PAGE_SHIFT,
  3644. CallingAddress);
  3645. return BaseAddress;
  3646. }
  3647. PFN_NUMBER
  3648. MiFindLargePageMemory (
  3649. IN PCOLORED_PAGE_INFO ColoredPageInfoBase,
  3650. IN PFN_NUMBER SizeInPages,
  3651. OUT PPFN_NUMBER OutZeroCount
  3652. )
  3653. /*++
  3654. Routine Description:
  3655. This function searches the free, zeroed, standby and modified lists
  3656. for contiguous pages to satisfy the request.
  3657. Note the caller must zero the pages on return if these are made visible
  3658. to the user.
  3659. Arguments:
  3660. ColoredPageInfoBase - Supplies the colored page info structure to hang
  3661. allocated pages off of. This allows the caller to
  3662. zero only pages that need zeroing, and to easily
  3663. do those in parallel.
  3664. SizeInPages - Supplies the number of pages to allocate.
  3665. OutZeroCount - Receives the number of pages that need to be zeroed.
  3666. Return Value:
  3667. 0 - a contiguous range could not be found to satisfy the request.
  3668. NON-0 - Returns the starting page frame number of the allocated physically
  3669. contiguous memory.
  3670. Environment:
  3671. Kernel mode, APCs disabled, AddressCreation mutex held.
  3672. The caller must bring in PAGELK.
  3673. The caller has already charged commitment for the range (typically by
  3674. virtue of the VAD insert) so no commit is charged here.
  3675. --*/
  3676. {
  3677. ULONG Color;
  3678. PFN_NUMBER ZeroCount;
  3679. LOGICAL NeedToZero;
  3680. PMMPTE DummyPte;
  3681. PMMPFN Pfn1;
  3682. PMMPFN EndPfn;
  3683. PMMPFN BoundaryPfn;
  3684. PVOID BaseAddress;
  3685. KIRQL OldIrql;
  3686. ULONG start;
  3687. PFN_NUMBER count;
  3688. PFN_NUMBER Page;
  3689. PFN_NUMBER NewPage;
  3690. PFN_NUMBER LastPage;
  3691. PFN_NUMBER found;
  3692. PFN_NUMBER BoundaryMask;
  3693. PCOLORED_PAGE_INFO ColoredPageInfo;
  3694. PAGED_CODE ();
  3695. #ifdef _X86_
  3696. ASSERT (KeFeatureBits & KF_LARGE_PAGE);
  3697. #endif
  3698. ASSERT (SizeInPages != 0);
  3699. BoundaryMask = (PFN_NUMBER) ((MM_MINIMUM_VA_FOR_LARGE_PAGE >> PAGE_SHIFT) - 1);
  3700. start = 0;
  3701. found = 0;
  3702. Pfn1 = NULL;
  3703. ZeroCount = 0;
  3704. BaseAddress = NULL;
  3705. DummyPte = MiGetPteAddress (MmNonPagedPoolExpansionStart);
  3706. //
  3707. // Charge resident available pages.
  3708. //
  3709. LOCK_PFN (OldIrql);
  3710. MiDeferredUnlockPages (MI_DEFER_PFN_HELD);
  3711. if ((SPFN_NUMBER)SizeInPages > MI_NONPAGABLE_MEMORY_AVAILABLE()) {
  3712. UNLOCK_PFN (OldIrql);
  3713. return 0;
  3714. }
  3715. //
  3716. // Systems utilizing memory compression may have more
  3717. // pages on the zero, free and standby lists than we
  3718. // want to give out. Explicitly check MmAvailablePages
  3719. // instead (and recheck whenever the PFN lock is released
  3720. // and reacquired).
  3721. //
  3722. if ((SPFN_NUMBER)SizeInPages > (SPFN_NUMBER)(MmAvailablePages - MM_HIGH_LIMIT)) {
  3723. UNLOCK_PFN (OldIrql);
  3724. return 0;
  3725. }
  3726. MI_DECREMENT_RESIDENT_AVAILABLE (SizeInPages, MM_RESAVAIL_ALLOCATE_LARGE_PAGES);
  3727. UNLOCK_PFN (OldIrql);
  3728. Page = 0;
  3729. //
  3730. // Search the PFN database for pages that meet the requirements.
  3731. //
  3732. KeAcquireGuardedMutex (&MmDynamicMemoryMutex);
  3733. for ( ; start != MmPhysicalMemoryBlock->NumberOfRuns; start += 1) {
  3734. count = MmPhysicalMemoryBlock->Run[start].PageCount;
  3735. Page = MmPhysicalMemoryBlock->Run[start].BasePage;
  3736. //
  3737. // Close the gaps, then examine the range for a fit.
  3738. //
  3739. LastPage = Page + count;
  3740. if ((Page & BoundaryMask) || (Page == 0)) {
  3741. NewPage = MI_ROUND_TO_SIZE (Page, (MM_MINIMUM_VA_FOR_LARGE_PAGE >> PAGE_SHIFT));
  3742. if (NewPage < Page) {
  3743. continue;
  3744. }
  3745. Page = NewPage;
  3746. if (Page == 0) {
  3747. Page = (MM_MINIMUM_VA_FOR_LARGE_PAGE >> PAGE_SHIFT);
  3748. }
  3749. if (Page >= LastPage) {
  3750. continue;
  3751. }
  3752. }
  3753. if (LastPage & BoundaryMask) {
  3754. LastPage &= ~BoundaryMask;
  3755. if (Page >= LastPage) {
  3756. continue;
  3757. }
  3758. }
  3759. if (Page + SizeInPages > LastPage) {
  3760. continue;
  3761. }
  3762. count = LastPage - Page + 1;
  3763. ASSERT (count != 0);
  3764. //
  3765. // A fit may be possible in this run, check whether the pages
  3766. // are on the right list.
  3767. //
  3768. found = 0;
  3769. Pfn1 = MI_PFN_ELEMENT (Page);
  3770. while (Page < LastPage) {
  3771. if ((Pfn1->u3.e1.PageLocation <= StandbyPageList) &&
  3772. (Pfn1->u1.Flink != 0) &&
  3773. (Pfn1->u2.Blink != 0) &&
  3774. (Pfn1->u3.e2.ReferenceCount == 0)) {
  3775. found += 1;
  3776. if (found == SizeInPages) {
  3777. //
  3778. // Lock the PFN database and see if the pages are
  3779. // still available for us.
  3780. //
  3781. Pfn1 -= (found - 1);
  3782. Page -= (found - 1);
  3783. LOCK_PFN (OldIrql);
  3784. do {
  3785. if ((Pfn1->u3.e1.PageLocation <= StandbyPageList) &&
  3786. (Pfn1->u1.Flink != 0) &&
  3787. (Pfn1->u2.Blink != 0) &&
  3788. (Pfn1->u3.e2.ReferenceCount == 0)) {
  3789. NOTHING; // Good page
  3790. }
  3791. else {
  3792. break;
  3793. }
  3794. found -= 1;
  3795. if (found == 0) {
  3796. //
  3797. // All the pages matched the criteria, keep the
  3798. // PFN lock, remove them and map them for our
  3799. // caller.
  3800. //
  3801. goto Done;
  3802. }
  3803. Pfn1 += 1;
  3804. Page += 1;
  3805. } while (TRUE);
  3806. #if DBG
  3807. if (MiShowStuckPages != 0) {
  3808. DbgPrint("MiFindLargePages : could not claim stolen PFN %p\n",
  3809. Page);
  3810. if (MiShowStuckPages & 0x8) {
  3811. DbgBreakPoint ();
  3812. }
  3813. }
  3814. #endif
  3815. UNLOCK_PFN (OldIrql);
  3816. //
  3817. // Restart the search at the first possible page.
  3818. //
  3819. found = 0;
  3820. }
  3821. }
  3822. else {
  3823. #if DBG
  3824. if (MiShowStuckPages != 0) {
  3825. DbgPrint("MiFindLargePages : could not claim PFN %p %x %x\n",
  3826. Page, Pfn1->u3.e1, Pfn1->u4.EntireFrame);
  3827. if (MiShowStuckPages & 0x8) {
  3828. DbgBreakPoint ();
  3829. }
  3830. }
  3831. #endif
  3832. found = 0;
  3833. }
  3834. Page += 1;
  3835. Pfn1 += 1;
  3836. if (found == 0) {
  3837. //
  3838. // The last page interrogated wasn't available so skip
  3839. // ahead to the next acceptable boundary.
  3840. //
  3841. NewPage = MI_ROUND_TO_SIZE (Page,
  3842. (MM_MINIMUM_VA_FOR_LARGE_PAGE >> PAGE_SHIFT));
  3843. if ((NewPage == 0) || (NewPage < Page) || (NewPage >= LastPage)) {
  3844. //
  3845. // Skip the rest of this entry.
  3846. //
  3847. Page = LastPage;
  3848. continue;
  3849. }
  3850. Page = NewPage;
  3851. Pfn1 = MI_PFN_ELEMENT (Page);
  3852. }
  3853. }
  3854. }
  3855. //
  3856. // The desired physical pages could not be allocated.
  3857. //
  3858. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  3859. MI_INCREMENT_RESIDENT_AVAILABLE (SizeInPages, MM_RESAVAIL_FREE_LARGE_PAGES);
  3860. return 0;
  3861. Done:
  3862. //
  3863. // A match has been found, remove these pages,
  3864. // map them and return. The PFN lock is held.
  3865. //
  3866. ASSERT (start != MmPhysicalMemoryBlock->NumberOfRuns);
  3867. ASSERT (Page - SizeInPages + 1 != 0);
  3868. //
  3869. // Systems utilizing memory compression may have more
  3870. // pages on the zero, free and standby lists than we
  3871. // want to give out. Explicitly check MmAvailablePages
  3872. // instead (and recheck whenever the PFN lock is
  3873. // released and reacquired).
  3874. //
  3875. if ((SPFN_NUMBER)SizeInPages > (SPFN_NUMBER)(MmAvailablePages - MM_HIGH_LIMIT)) {
  3876. UNLOCK_PFN (OldIrql);
  3877. MI_INCREMENT_RESIDENT_AVAILABLE (SizeInPages, MM_RESAVAIL_FREE_LARGE_PAGES);
  3878. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  3879. return 0;
  3880. }
  3881. EndPfn = Pfn1 - SizeInPages + 1;
  3882. BoundaryPfn = Pfn1 - (MM_VA_MAPPED_BY_PDE >> PAGE_SHIFT);
  3883. do {
  3884. NeedToZero = TRUE;
  3885. if (Pfn1->u3.e1.PageLocation == StandbyPageList) {
  3886. MiUnlinkPageFromList (Pfn1);
  3887. ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
  3888. MiRestoreTransitionPte (Pfn1);
  3889. }
  3890. else {
  3891. if (Pfn1->u3.e1.PageLocation == ZeroedPageList) {
  3892. NeedToZero = FALSE;
  3893. }
  3894. MiUnlinkFreeOrZeroedPage (Pfn1);
  3895. }
  3896. Pfn1->u3.e2.ReferenceCount = 1;
  3897. Pfn1->u2.ShareCount = 1;
  3898. MI_SET_PFN_DELETED(Pfn1);
  3899. Pfn1->u4.PteFrame = MI_MAGIC_AWE_PTEFRAME;
  3900. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  3901. Pfn1->u3.e1.CacheAttribute = MiCached;
  3902. Pfn1->u3.e1.StartOfAllocation = 0;
  3903. Pfn1->u3.e1.EndOfAllocation = 0;
  3904. Pfn1->u4.VerifierAllocation = 0;
  3905. Pfn1->u3.e1.LargeSessionAllocation = 0;
  3906. ASSERT (Pfn1->u4.AweAllocation == 0);
  3907. Pfn1->u4.AweAllocation = 1;
  3908. Pfn1->u3.e1.PrototypePte = 0;
  3909. //
  3910. // Add free and standby pages to the list of pages to be zeroed
  3911. // by our caller.
  3912. //
  3913. if (NeedToZero == TRUE) {
  3914. Color = MI_GET_COLOR_FROM_LIST_ENTRY (Page, Pfn1);
  3915. ColoredPageInfo = &ColoredPageInfoBase[Color];
  3916. Pfn1->OriginalPte.u.Long = (ULONG_PTR) ColoredPageInfo->PfnAllocation;
  3917. ColoredPageInfo->PfnAllocation = Pfn1;
  3918. ColoredPageInfo->PagesQueued += 1;
  3919. ZeroCount += 1;
  3920. }
  3921. else {
  3922. Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  3923. }
  3924. if (Pfn1 == EndPfn) {
  3925. break;
  3926. }
  3927. Pfn1 -= 1;
  3928. if (Pfn1 == BoundaryPfn) {
  3929. BoundaryPfn = Pfn1 - (MM_VA_MAPPED_BY_PDE >> PAGE_SHIFT);
  3930. }
  3931. } while (TRUE);
  3932. Pfn1->u3.e1.StartOfAllocation = 1;
  3933. (Pfn1 + SizeInPages - 1)->u3.e1.EndOfAllocation = 1;
  3934. UNLOCK_PFN (OldIrql);
  3935. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  3936. Page = Page - SizeInPages + 1;
  3937. ASSERT (Page != 0);
  3938. ASSERT (Pfn1 == MI_PFN_ELEMENT (Page));
  3939. MM_TRACK_COMMIT (MM_DBG_COMMIT_CHARGE_LARGE_PAGES, SizeInPages);
  3940. *OutZeroCount = ZeroCount;
  3941. return Page;
  3942. }
  3943. VOID
  3944. MiFreeLargePageMemory (
  3945. IN PFN_NUMBER PageFrameIndex,
  3946. IN PFN_NUMBER SizeInPages
  3947. )
  3948. /*++
  3949. Routine Description:
  3950. This function returns a contiguous large page allocation to the free
  3951. memory lists.
  3952. Arguments:
  3953. VirtualAddress - Supplies the starting page frame index to free.
  3954. SizeInPages - Supplies the number of pages to free.
  3955. Return Value:
  3956. None.
  3957. Environment:
  3958. Kernel mode, IRQL of APC_LEVEL or below.
  3959. The caller must bring in PAGELK.
  3960. --*/
  3961. {
  3962. PMMPFN Pfn1;
  3963. KIRQL OldIrql;
  3964. PKTHREAD CurrentThread;
  3965. PFN_NUMBER LastPageFrameIndex;
  3966. LONG EntryCount;
  3967. LONG OriginalCount;
  3968. PAGED_CODE ();
  3969. ASSERT (SizeInPages != 0);
  3970. LastPageFrameIndex = PageFrameIndex + SizeInPages;
  3971. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  3972. //
  3973. // The actual commitment for this range (and its page table pages, etc)
  3974. // is released when the vad is removed. Because we will release commitment
  3975. // below for each physical page, temporarily increase the charge now so
  3976. // it all balances out. Block user APCs so a suspend can't stop us.
  3977. //
  3978. CurrentThread = KeGetCurrentThread ();
  3979. KeEnterCriticalRegionThread (CurrentThread);
  3980. MiChargeCommitmentCantExpand (SizeInPages, TRUE);
  3981. LOCK_PFN (OldIrql);
  3982. do {
  3983. ASSERT (Pfn1->u2.ShareCount == 1);
  3984. ASSERT (Pfn1->u3.e1.PageLocation == ActiveAndValid);
  3985. ASSERT (Pfn1->u3.e1.CacheAttribute == MiCached);
  3986. ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0);
  3987. ASSERT (Pfn1->u3.e1.PrototypePte == 0);
  3988. ASSERT (Pfn1->u4.VerifierAllocation == 0);
  3989. ASSERT (Pfn1->u4.AweAllocation == 1);
  3990. ASSERT (MI_IS_PFN_DELETED (Pfn1) == TRUE);
  3991. Pfn1->u3.e1.StartOfAllocation = 0;
  3992. Pfn1->u3.e1.EndOfAllocation = 0;
  3993. Pfn1->u2.ShareCount = 0;
  3994. #if DBG
  3995. Pfn1->u3.e1.PageLocation = StandbyPageList;
  3996. #endif
  3997. do {
  3998. EntryCount = Pfn1->AweReferenceCount;
  3999. ASSERT ((LONG)EntryCount > 0);
  4000. ASSERT (Pfn1->u3.e2.ReferenceCount != 0);
  4001. OriginalCount = InterlockedCompareExchange (&Pfn1->AweReferenceCount,
  4002. EntryCount - 1,
  4003. EntryCount);
  4004. if (OriginalCount == EntryCount) {
  4005. //
  4006. // This thread can be racing against other threads
  4007. // calling MmUnlockPages. All threads can safely do
  4008. // interlocked decrements on the "AWE reference count".
  4009. // Whichever thread drives it to zero is responsible for
  4010. // decrementing the actual PFN reference count (which may
  4011. // be greater than 1 due to other non-AWE API calls being
  4012. // used on the same page). The thread that drives this
  4013. // reference count to zero must put the page on the actual
  4014. // freelist at that time and decrement various resident
  4015. // available and commitment counters also.
  4016. //
  4017. if (OriginalCount == 1) {
  4018. //
  4019. // This thread has driven the AWE reference count to
  4020. // zero so it must initiate a decrement of the PFN
  4021. // reference count (while holding the PFN lock), etc.
  4022. //
  4023. // This path should be the frequent one since typically
  4024. // I/Os complete before these types of pages are
  4025. // freed by the app.
  4026. //
  4027. // Note this routine returns resident available and
  4028. // commitment for the page.
  4029. //
  4030. MiDecrementReferenceCountForAwePage (Pfn1, TRUE);
  4031. }
  4032. break;
  4033. }
  4034. } while (TRUE);
  4035. //
  4036. // Nothing magic about the divisor here - just releasing the PFN lock
  4037. // periodically to allow other processors and DPCs a chance to execute.
  4038. //
  4039. if ((PageFrameIndex & 0xF) == 0) {
  4040. UNLOCK_PFN (OldIrql);
  4041. LOCK_PFN (OldIrql);
  4042. }
  4043. Pfn1 += 1;
  4044. PageFrameIndex += 1;
  4045. } while (PageFrameIndex < LastPageFrameIndex);
  4046. UNLOCK_PFN (OldIrql);
  4047. KeLeaveCriticalRegionThread (CurrentThread);
  4048. return;
  4049. }
  4050. LOGICAL
  4051. MmIsSessionAddress (
  4052. IN PVOID VirtualAddress
  4053. )
  4054. /*++
  4055. Routine Description:
  4056. This function returns TRUE if a session address is specified.
  4057. FALSE is returned if not.
  4058. Arguments:
  4059. VirtualAddress - Supplies the address in question.
  4060. Return Value:
  4061. See above.
  4062. Environment:
  4063. Kernel mode.
  4064. --*/
  4065. {
  4066. return MI_IS_SESSION_ADDRESS (VirtualAddress);
  4067. }
  4068. ULONG
  4069. MmGetSizeOfBigPoolAllocation (
  4070. IN PVOID StartingAddress
  4071. )
  4072. /*++
  4073. Routine Description:
  4074. This function returns the number of pages consumed by the argument
  4075. big pool allocation. It is assumed that the caller still owns the
  4076. allocation (and guarantees it cannot be freed from underneath us)
  4077. so this routine can run lock-free.
  4078. Arguments:
  4079. StartingAddress - Supplies the starting address which was returned
  4080. in a previous call to MiAllocatePoolPages.
  4081. Return Value:
  4082. Returns the number of pages allocated.
  4083. Environment:
  4084. These functions are used by the general pool free routines
  4085. and should not be called directly.
  4086. --*/
  4087. {
  4088. PMMPFN StartPfn;
  4089. PMMPFN Pfn1;
  4090. PMMPTE PointerPte;
  4091. PMMPTE StartPte;
  4092. ULONG StartPosition;
  4093. PFN_NUMBER i;
  4094. PFN_NUMBER NumberOfPages;
  4095. POOL_TYPE PoolType;
  4096. PMM_PAGED_POOL_INFO PagedPoolInfo;
  4097. PULONG BitMap;
  4098. #if DBG
  4099. PMM_SESSION_SPACE SessionSpace;
  4100. PKGUARDED_MUTEX PoolMutex;
  4101. #endif
  4102. if ((StartingAddress >= MmPagedPoolStart) &&
  4103. (StartingAddress <= MmPagedPoolEnd)) {
  4104. PoolType = PagedPool;
  4105. PagedPoolInfo = &MmPagedPoolInfo;
  4106. StartPosition = (ULONG)(((PCHAR)StartingAddress -
  4107. (PCHAR)MmPageAlignedPoolBase[PoolType]) >> PAGE_SHIFT);
  4108. #if DBG
  4109. PoolMutex = &MmPagedPoolMutex;
  4110. #endif
  4111. }
  4112. else if (MI_IS_SESSION_POOL_ADDRESS (StartingAddress) == TRUE) {
  4113. PoolType = PagedPool;
  4114. ASSERT (MmSessionSpace != NULL);
  4115. PagedPoolInfo = &MmSessionSpace->PagedPoolInfo;
  4116. StartPosition = (ULONG)(((PCHAR)StartingAddress -
  4117. (PCHAR)MmSessionSpace->PagedPoolStart) >> PAGE_SHIFT);
  4118. #if DBG
  4119. SessionSpace = SESSION_GLOBAL (MmSessionSpace);
  4120. PoolMutex = &SessionSpace->PagedPoolMutex;
  4121. #endif
  4122. }
  4123. else {
  4124. if (StartingAddress < MM_SYSTEM_RANGE_START) {
  4125. KeBugCheckEx (BAD_POOL_CALLER,
  4126. 0x44,
  4127. (ULONG_PTR)StartingAddress,
  4128. (ULONG_PTR)MM_SYSTEM_RANGE_START,
  4129. 0);
  4130. }
  4131. PoolType = NonPagedPool;
  4132. PagedPoolInfo = &MmPagedPoolInfo;
  4133. StartPosition = (ULONG)(((PCHAR)StartingAddress -
  4134. (PCHAR)MmPageAlignedPoolBase[PoolType]) >> PAGE_SHIFT);
  4135. //
  4136. // Check to ensure this page is really the start of an allocation.
  4137. //
  4138. if (MI_IS_PHYSICAL_ADDRESS (StartingAddress)) {
  4139. //
  4140. // On certain architectures, virtual addresses
  4141. // may be physical and hence have no corresponding PTE.
  4142. //
  4143. PointerPte = NULL;
  4144. Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (StartingAddress));
  4145. ASSERT (StartPosition < MmExpandedPoolBitPosition);
  4146. if ((StartingAddress < MmNonPagedPoolStart) ||
  4147. (StartingAddress >= MmNonPagedPoolEnd0)) {
  4148. KeBugCheckEx (BAD_POOL_CALLER,
  4149. 0x45,
  4150. (ULONG_PTR)StartingAddress,
  4151. 0,
  4152. 0);
  4153. }
  4154. }
  4155. else {
  4156. PointerPte = MiGetPteAddress (StartingAddress);
  4157. if (((StartingAddress >= MmNonPagedPoolExpansionStart) &&
  4158. (StartingAddress < MmNonPagedPoolEnd)) ||
  4159. ((StartingAddress >= MmNonPagedPoolStart) &&
  4160. (StartingAddress < MmNonPagedPoolEnd0))) {
  4161. NOTHING;
  4162. }
  4163. else {
  4164. KeBugCheckEx (BAD_POOL_CALLER,
  4165. 0x46,
  4166. (ULONG_PTR)StartingAddress,
  4167. 0,
  4168. 0);
  4169. }
  4170. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  4171. }
  4172. if (Pfn1->u3.e1.StartOfAllocation == 0) {
  4173. KeBugCheckEx (BAD_POOL_CALLER,
  4174. 0x47,
  4175. (ULONG_PTR) StartingAddress,
  4176. (ULONG_PTR) MI_PFN_ELEMENT_TO_INDEX (Pfn1),
  4177. MmHighestPhysicalPage);
  4178. }
  4179. StartPfn = Pfn1;
  4180. NumberOfPages = 0;
  4181. ASSERT (Pfn1->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  4182. //
  4183. // Find end of allocation.
  4184. //
  4185. if (PointerPte == NULL) {
  4186. while (Pfn1->u3.e1.EndOfAllocation == 0) {
  4187. Pfn1 += 1;
  4188. }
  4189. NumberOfPages = Pfn1 - StartPfn + 1;
  4190. }
  4191. else {
  4192. StartPte = PointerPte;
  4193. while (Pfn1->u3.e1.EndOfAllocation == 0) {
  4194. PointerPte += 1;
  4195. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  4196. }
  4197. NumberOfPages = PointerPte - StartPte + 1;
  4198. }
  4199. return (ULONG) NumberOfPages;
  4200. }
  4201. //
  4202. // Paged pool (global or session).
  4203. //
  4204. // Check to ensure this page is really the start of an allocation.
  4205. //
  4206. i = StartPosition;
  4207. //
  4208. // Paged pool. Need to verify start of allocation using
  4209. // end of allocation bitmap.
  4210. //
  4211. if (!RtlCheckBit (PagedPoolInfo->PagedPoolAllocationMap, StartPosition)) {
  4212. KeBugCheckEx (BAD_POOL_CALLER,
  4213. 0x48,
  4214. (ULONG_PTR)StartingAddress,
  4215. (ULONG_PTR)StartPosition,
  4216. MmSizeOfPagedPoolInBytes);
  4217. }
  4218. #if DBG
  4219. if (StartPosition > 0) {
  4220. KeAcquireGuardedMutex (PoolMutex);
  4221. if (RtlCheckBit (PagedPoolInfo->PagedPoolAllocationMap, StartPosition - 1)) {
  4222. if (!RtlCheckBit (PagedPoolInfo->EndOfPagedPoolBitmap, StartPosition - 1)) {
  4223. //
  4224. // In the middle of an allocation... bugcheck.
  4225. //
  4226. DbgPrint("paged pool in middle of allocation\n");
  4227. KeBugCheckEx (MEMORY_MANAGEMENT,
  4228. 0x41286,
  4229. (ULONG_PTR)PagedPoolInfo->PagedPoolAllocationMap,
  4230. (ULONG_PTR)PagedPoolInfo->EndOfPagedPoolBitmap,
  4231. StartPosition);
  4232. }
  4233. }
  4234. KeReleaseGuardedMutex (PoolMutex);
  4235. }
  4236. #endif
  4237. //
  4238. // Find the last allocated page.
  4239. //
  4240. BitMap = PagedPoolInfo->EndOfPagedPoolBitmap->Buffer;
  4241. while (!MI_CHECK_BIT (BitMap, i)) {
  4242. i += 1;
  4243. }
  4244. NumberOfPages = i - StartPosition + 1;
  4245. return (ULONG)NumberOfPages;
  4246. }
  4247. //
  4248. // The number of large page ranges must always be larger than the number of
  4249. // translation register entries for the target platform.
  4250. //
  4251. #define MI_MAX_LARGE_PAGE_RANGES 64
  4252. typedef struct _MI_LARGE_PAGE_RANGES {
  4253. PFN_NUMBER StartFrame;
  4254. PFN_NUMBER LastFrame;
  4255. } MI_LARGE_PAGE_RANGES, *PMI_LARGE_PAGE_RANGES;
  4256. ULONG MiLargePageRangeIndex;
  4257. MI_LARGE_PAGE_RANGES MiLargePageRanges[MI_MAX_LARGE_PAGE_RANGES];
  4258. LOGICAL
  4259. MiMustFrameBeCached (
  4260. IN PFN_NUMBER PageFrameIndex
  4261. )
  4262. /*++
  4263. Routine Description:
  4264. This routine checks whether the specified page frame must be mapped
  4265. fully cached because it is already part of a large page which is fully
  4266. cached. This must be detected otherwise we would be creating an
  4267. incoherent overlapping TB entry as the same physical page would be
  4268. mapped by 2 different TB entries with different cache attributes.
  4269. Arguments:
  4270. PageFrameIndex - Supplies the page frame index in question.
  4271. Return Value:
  4272. TRUE if the page must be mapped as fully cachable, FALSE if not.
  4273. Environment:
  4274. Kernel mode. IRQL of DISPATCH_LEVEL or below.
  4275. PFN lock must be held for the results to relied on, but note callers will
  4276. sometimes call without it for a preliminary scan and then repeat it with
  4277. the lock held.
  4278. --*/
  4279. {
  4280. PMI_LARGE_PAGE_RANGES Range;
  4281. PMI_LARGE_PAGE_RANGES LastValidRange;
  4282. Range = MiLargePageRanges;
  4283. LastValidRange = MiLargePageRanges + MiLargePageRangeIndex;
  4284. while (Range < LastValidRange) {
  4285. if ((PageFrameIndex >= Range->StartFrame) &&
  4286. (PageFrameIndex <= Range->LastFrame)) {
  4287. return TRUE;
  4288. }
  4289. Range += 1;
  4290. }
  4291. return FALSE;
  4292. }
  4293. VOID
  4294. MiSyncCachedRanges (
  4295. VOID
  4296. )
  4297. /*++
  4298. Routine Description:
  4299. This routine searches the cached list for PFN-mapped entries and ripples
  4300. the must-be-cached bits into each PFN entry.
  4301. Arguments:
  4302. None.
  4303. Return Value:
  4304. None.
  4305. Environment:
  4306. Kernel mode, PFN lock NOT held.
  4307. --*/
  4308. {
  4309. ULONG i;
  4310. KIRQL OldIrql;
  4311. PMMPFN Pfn1;
  4312. PMMPFN LastPfn;
  4313. PFN_NUMBER PageFrameIndex;
  4314. PFN_NUMBER LastPageFrameIndex;
  4315. for (i = 0; i < MiLargePageRangeIndex; i += 1) {
  4316. PageFrameIndex = MiLargePageRanges[i].StartFrame;
  4317. LastPageFrameIndex = MiLargePageRanges[i].LastFrame;
  4318. if (MI_IS_PFN (PageFrameIndex)) {
  4319. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  4320. LastPfn = MI_PFN_ELEMENT (LastPageFrameIndex);
  4321. LOCK_PFN (OldIrql);
  4322. while (Pfn1 <= LastPfn) {
  4323. Pfn1->u4.MustBeCached = 1;
  4324. Pfn1 += 1;
  4325. }
  4326. UNLOCK_PFN (OldIrql);
  4327. }
  4328. }
  4329. return;
  4330. }
  4331. LOGICAL
  4332. MiAddCachedRange (
  4333. IN PFN_NUMBER PageFrameIndex,
  4334. IN PFN_NUMBER LastPageFrameIndex
  4335. )
  4336. /*++
  4337. Routine Description:
  4338. This routine adds the specified page range to the "must be mapped
  4339. fully cached" list.
  4340. This is typically called with a range which is about to be mapped with
  4341. large pages fully cached and so no portion of the range can ever be
  4342. mapped noncached or writecombined otherwise we would be creating an
  4343. incoherent overlapping TB entry as the same physical page would be
  4344. mapped by 2 different TB entries with different cache attributes.
  4345. Arguments:
  4346. PageFrameIndex - Supplies the starting page frame index to insert.
  4347. LastPageFrameIndex - Supplies the last page frame index to insert.
  4348. Return Value:
  4349. TRUE if the range was successfully inserted, FALSE if not.
  4350. Environment:
  4351. Kernel mode, PFN lock NOT held.
  4352. --*/
  4353. {
  4354. KIRQL OldIrql;
  4355. PMMPFN Pfn1;
  4356. PMMPFN LastPfn;
  4357. if (MiLargePageRangeIndex >= MI_MAX_LARGE_PAGE_RANGES) {
  4358. return FALSE;
  4359. }
  4360. ASSERT (MiLargePageRanges[MiLargePageRangeIndex].StartFrame == 0);
  4361. ASSERT (MiLargePageRanges[MiLargePageRangeIndex].LastFrame == 0);
  4362. MiLargePageRanges[MiLargePageRangeIndex].StartFrame = PageFrameIndex;
  4363. MiLargePageRanges[MiLargePageRangeIndex].LastFrame = LastPageFrameIndex;
  4364. MiLargePageRangeIndex += 1;
  4365. if ((MiPfnBitMap.Buffer != NULL) && (MI_IS_PFN (PageFrameIndex))) {
  4366. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  4367. LastPfn = MI_PFN_ELEMENT (LastPageFrameIndex);
  4368. LOCK_PFN (OldIrql);
  4369. while (Pfn1 <= LastPfn) {
  4370. Pfn1->u4.MustBeCached = 1;
  4371. Pfn1 += 1;
  4372. }
  4373. UNLOCK_PFN (OldIrql);
  4374. }
  4375. return TRUE;
  4376. }
  4377. VOID
  4378. MiRemoveCachedRange (
  4379. IN PFN_NUMBER PageFrameIndex,
  4380. IN PFN_NUMBER LastPageFrameIndex
  4381. )
  4382. /*++
  4383. Routine Description:
  4384. This routine removes the specified page range from the "must be mapped
  4385. fully cached" list.
  4386. This is typically called with a range which was mapped with
  4387. large pages fully cached and so no portion of the range can ever be
  4388. mapped noncached or writecombined otherwise we would be creating an
  4389. incoherent overlapping TB entry as the same physical page would be
  4390. mapped by 2 different TB entries with different cache attributes.
  4391. The range is now being unmapped so we must also remove it from this list.
  4392. Arguments:
  4393. PageFrameIndex - Supplies the starting page frame index to remove.
  4394. LastPageFrameIndex - Supplies the last page frame index to remove.
  4395. Return Value:
  4396. None.
  4397. Environment:
  4398. Kernel mode, PFN lock NOT held.
  4399. --*/
  4400. {
  4401. ULONG i;
  4402. PMI_LARGE_PAGE_RANGES Range;
  4403. PMMPFN Pfn1;
  4404. PMMPFN LastPfn;
  4405. KIRQL OldIrql;
  4406. ASSERT (MiLargePageRangeIndex <= MI_MAX_LARGE_PAGE_RANGES);
  4407. Range = MiLargePageRanges;
  4408. for (i = 0; i < MiLargePageRangeIndex; i += 1, Range += 1) {
  4409. if ((PageFrameIndex == Range->StartFrame) &&
  4410. (LastPageFrameIndex == Range->LastFrame)) {
  4411. //
  4412. // Found it, slide everything else down to preserve any other
  4413. // non zero ranges. Decrement the last valid entry so that
  4414. // searches don't need to walk the whole thing.
  4415. //
  4416. while (i < MI_MAX_LARGE_PAGE_RANGES - 1) {
  4417. *Range = *(Range + 1);
  4418. Range += 1;
  4419. i += 1;
  4420. }
  4421. Range->StartFrame = 0;
  4422. Range->LastFrame = 0;
  4423. MiLargePageRangeIndex -= 1;
  4424. if ((MiPfnBitMap.Buffer != NULL) && (MI_IS_PFN (PageFrameIndex))) {
  4425. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  4426. LastPfn = MI_PFN_ELEMENT (LastPageFrameIndex);
  4427. LOCK_PFN (OldIrql);
  4428. while (Pfn1 <= LastPfn) {
  4429. Pfn1->u4.MustBeCached = 0;
  4430. Pfn1 += 1;
  4431. }
  4432. UNLOCK_PFN (OldIrql);
  4433. }
  4434. return;
  4435. }
  4436. }
  4437. ASSERT (FALSE);
  4438. return;
  4439. }