Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4616 lines
132 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. allocpag.c
  5. Abstract:
  6. This module contains the routines which allocate and deallocate
  7. one or more pages from paged or nonpaged pool.
  8. Author:
  9. Lou Perazzoli (loup) 6-Apr-1989
  10. Landy Wang (landyw) 02-June-1997
  11. Revision History:
  12. --*/
  13. #include "mi.h"
  14. PVOID
  15. MiFindContiguousMemoryInPool (
  16. IN PFN_NUMBER LowestPfn,
  17. IN PFN_NUMBER HighestPfn,
  18. IN PFN_NUMBER BoundaryPfn,
  19. IN PFN_NUMBER SizeInPages,
  20. IN PVOID CallingAddress
  21. );
  22. #ifdef ALLOC_PRAGMA
  23. #pragma alloc_text(INIT, MiInitializeNonPagedPool)
  24. #pragma alloc_text(PAGE, MmAvailablePoolInPages)
  25. #pragma alloc_text(PAGELK, MiFindContiguousMemory)
  26. #pragma alloc_text(PAGELK, MiFindContiguousMemoryInPool)
  27. #pragma alloc_text(PAGE, MiCheckSessionPoolAllocations)
  28. #pragma alloc_text(PAGE, MiSessionPoolVector)
  29. #pragma alloc_text(PAGE, MiSessionPoolMutex)
  30. #pragma alloc_text(PAGE, MiInitializeSessionPool)
  31. #pragma alloc_text(PAGE, MiFreeSessionPoolBitMaps)
  32. #pragma alloc_text(POOLMI, MiAllocatePoolPages)
  33. #pragma alloc_text(POOLMI, MiFreePoolPages)
  34. #if DBG || (i386 && !FPO)
  35. #pragma alloc_text(PAGELK, MmSnapShotPool)
  36. #endif // DBG || (i386 && !FPO)
  37. #endif
  38. ULONG MmPagedPoolCommit; // used by the debugger
  39. PFN_NUMBER MmAllocatedNonPagedPool;
  40. PFN_NUMBER MiStartOfInitialPoolFrame;
  41. PFN_NUMBER MiEndOfInitialPoolFrame;
  42. PVOID MmNonPagedPoolEnd0;
  43. PVOID MmNonPagedPoolExpansionStart;
  44. LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_LIST_HEADS];
  45. extern POOL_DESCRIPTOR NonPagedPoolDescriptor;
  46. #define MM_SMALL_ALLOCATIONS 4
  47. #if DBG
  48. ULONG MiClearCache;
  49. //
  50. // Set this to a nonzero (ie: 10000) value to cause every pool allocation to
  51. // be checked and an ASSERT fires if the allocation is larger than this value.
  52. //
  53. ULONG MmCheckRequestInPages = 0;
  54. //
  55. // Set this to a nonzero (ie: 0x23456789) value to cause this pattern to be
  56. // written into freed nonpaged pool pages.
  57. //
  58. ULONG MiFillFreedPool = 0;
  59. #endif
  60. PFN_NUMBER MiExpansionPoolPagesInUse;
  61. PFN_NUMBER MiExpansionPoolPagesInitialCharge;
  62. ULONG MmUnusedSegmentForceFreeDefault = 30;
  63. extern ULONG MmUnusedSegmentForceFree;
  64. //
  65. // For debugging purposes.
  66. //
  67. typedef enum _MM_POOL_TYPES {
  68. MmNonPagedPool,
  69. MmPagedPool,
  70. MmSessionPagedPool,
  71. MmMaximumPoolType
  72. } MM_POOL_TYPES;
  73. typedef enum _MM_POOL_PRIORITIES {
  74. MmHighPriority,
  75. MmNormalPriority,
  76. MmLowPriority,
  77. MmMaximumPoolPriority
  78. } MM_POOL_PRIORITIES;
  79. typedef enum _MM_POOL_FAILURE_REASONS {
  80. MmNonPagedNoPtes,
  81. MmPriorityTooLow,
  82. MmNonPagedNoPagesAvailable,
  83. MmPagedNoPtes,
  84. MmSessionPagedNoPtes,
  85. MmPagedNoPagesAvailable,
  86. MmSessionPagedNoPagesAvailable,
  87. MmPagedNoCommit,
  88. MmSessionPagedNoCommit,
  89. MmNonPagedNoResidentAvailable,
  90. MmNonPagedNoCommit,
  91. MmMaximumFailureReason
  92. } MM_POOL_FAILURE_REASONS;
  93. ULONG MmPoolFailures[MmMaximumPoolType][MmMaximumPoolPriority];
  94. ULONG MmPoolFailureReasons[MmMaximumFailureReason];
  95. typedef enum _MM_PREEMPTIVE_TRIMS {
  96. MmPreemptForNonPaged,
  97. MmPreemptForPaged,
  98. MmPreemptForNonPagedPriority,
  99. MmPreemptForPagedPriority,
  100. MmMaximumPreempt
  101. } MM_PREEMPTIVE_TRIMS;
  102. ULONG MmPreemptiveTrims[MmMaximumPreempt];
  103. VOID
  104. MiProtectFreeNonPagedPool (
  105. IN PVOID VirtualAddress,
  106. IN ULONG SizeInPages
  107. )
  108. /*++
  109. Routine Description:
  110. This function protects freed nonpaged pool.
  111. Arguments:
  112. VirtualAddress - Supplies the freed pool address to protect.
  113. SizeInPages - Supplies the size of the request in pages.
  114. Return Value:
  115. None.
  116. Environment:
  117. Kernel mode.
  118. --*/
  119. {
  120. ULONG i;
  121. MMPTE PteContents;
  122. PMMPTE PointerPte;
  123. //
  124. // Prevent anyone from touching the free non paged pool
  125. //
  126. if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress) == 0) {
  127. PointerPte = MiGetPteAddress (VirtualAddress);
  128. for (i = 0; i < SizeInPages; i += 1) {
  129. PteContents = *PointerPte;
  130. PteContents.u.Hard.Valid = 0;
  131. PteContents.u.Soft.Prototype = 1;
  132. KeFlushSingleTb (VirtualAddress,
  133. TRUE,
  134. TRUE,
  135. (PHARDWARE_PTE)PointerPte,
  136. PteContents.u.Flush);
  137. VirtualAddress = (PVOID)((PCHAR)VirtualAddress + PAGE_SIZE);
  138. PointerPte += 1;
  139. }
  140. }
  141. }
  142. LOGICAL
  143. MiUnProtectFreeNonPagedPool (
  144. IN PVOID VirtualAddress,
  145. IN ULONG SizeInPages
  146. )
  147. /*++
  148. Routine Description:
  149. This function unprotects freed nonpaged pool.
  150. Arguments:
  151. VirtualAddress - Supplies the freed pool address to unprotect.
  152. SizeInPages - Supplies the size of the request in pages - zero indicates
  153. to keep going until there are no more protected PTEs (ie: the
  154. caller doesn't know how many protected PTEs there are).
  155. Return Value:
  156. TRUE if pages were unprotected, FALSE if not.
  157. Environment:
  158. Kernel mode.
  159. --*/
  160. {
  161. PMMPTE PointerPte;
  162. MMPTE PteContents;
  163. ULONG PagesDone;
  164. PagesDone = 0;
  165. //
  166. // Unprotect the previously freed pool so it can be manipulated
  167. //
  168. if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress) == 0) {
  169. PointerPte = MiGetPteAddress((PVOID)VirtualAddress);
  170. PteContents = *PointerPte;
  171. while (PteContents.u.Hard.Valid == 0 && PteContents.u.Soft.Prototype == 1) {
  172. PteContents.u.Hard.Valid = 1;
  173. PteContents.u.Soft.Prototype = 0;
  174. MI_WRITE_VALID_PTE (PointerPte, PteContents);
  175. PagesDone += 1;
  176. if (PagesDone == SizeInPages) {
  177. break;
  178. }
  179. PointerPte += 1;
  180. PteContents = *PointerPte;
  181. }
  182. }
  183. if (PagesDone == 0) {
  184. return FALSE;
  185. }
  186. return TRUE;
  187. }
  188. VOID
  189. MiProtectedPoolInsertList (
  190. IN PLIST_ENTRY ListHead,
  191. IN PLIST_ENTRY Entry,
  192. IN LOGICAL InsertHead
  193. )
  194. /*++
  195. Routine Description:
  196. This function inserts the entry into the protected list.
  197. Arguments:
  198. ListHead - Supplies the list head to add onto.
  199. Entry - Supplies the list entry to insert.
  200. InsertHead - If TRUE, insert at the head otherwise at the tail.
  201. Return Value:
  202. None.
  203. Environment:
  204. Kernel mode.
  205. --*/
  206. {
  207. PVOID FreeFlink;
  208. PVOID FreeBlink;
  209. PVOID VirtualAddress;
  210. //
  211. // Either the flink or the blink may be pointing
  212. // at protected nonpaged pool. Unprotect now.
  213. //
  214. FreeFlink = (PVOID)0;
  215. FreeBlink = (PVOID)0;
  216. if (IsListEmpty(ListHead) == 0) {
  217. VirtualAddress = (PVOID)ListHead->Flink;
  218. if (MiUnProtectFreeNonPagedPool (VirtualAddress, 1) == TRUE) {
  219. FreeFlink = VirtualAddress;
  220. }
  221. }
  222. if (((PVOID)Entry == ListHead->Blink) == 0) {
  223. VirtualAddress = (PVOID)ListHead->Blink;
  224. if (MiUnProtectFreeNonPagedPool (VirtualAddress, 1) == TRUE) {
  225. FreeBlink = VirtualAddress;
  226. }
  227. }
  228. if (InsertHead == TRUE) {
  229. InsertHeadList (ListHead, Entry);
  230. }
  231. else {
  232. InsertTailList (ListHead, Entry);
  233. }
  234. if (FreeFlink) {
  235. //
  236. // Reprotect the flink.
  237. //
  238. MiProtectFreeNonPagedPool (FreeFlink, 1);
  239. }
  240. if (FreeBlink) {
  241. //
  242. // Reprotect the blink.
  243. //
  244. MiProtectFreeNonPagedPool (FreeBlink, 1);
  245. }
  246. }
  247. VOID
  248. MiProtectedPoolRemoveEntryList (
  249. IN PLIST_ENTRY Entry
  250. )
  251. /*++
  252. Routine Description:
  253. This function unlinks the list pointer from protected freed nonpaged pool.
  254. Arguments:
  255. Entry - Supplies the list entry to remove.
  256. Return Value:
  257. None.
  258. Environment:
  259. Kernel mode.
  260. --*/
  261. {
  262. PVOID FreeFlink;
  263. PVOID FreeBlink;
  264. PVOID VirtualAddress;
  265. //
  266. // Either the flink or the blink may be pointing
  267. // at protected nonpaged pool. Unprotect now.
  268. //
  269. FreeFlink = (PVOID)0;
  270. FreeBlink = (PVOID)0;
  271. if (IsListEmpty(Entry) == 0) {
  272. VirtualAddress = (PVOID)Entry->Flink;
  273. if (MiUnProtectFreeNonPagedPool (VirtualAddress, 1) == TRUE) {
  274. FreeFlink = VirtualAddress;
  275. }
  276. }
  277. if (((PVOID)Entry == Entry->Blink) == 0) {
  278. VirtualAddress = (PVOID)Entry->Blink;
  279. if (MiUnProtectFreeNonPagedPool (VirtualAddress, 1) == TRUE) {
  280. FreeBlink = VirtualAddress;
  281. }
  282. }
  283. RemoveEntryList (Entry);
  284. if (FreeFlink) {
  285. //
  286. // Reprotect the flink.
  287. //
  288. MiProtectFreeNonPagedPool (FreeFlink, 1);
  289. }
  290. if (FreeBlink) {
  291. //
  292. // Reprotect the blink.
  293. //
  294. MiProtectFreeNonPagedPool (FreeBlink, 1);
  295. }
  296. }
  297. VOID
  298. MiTrimSegmentCache (
  299. VOID
  300. )
  301. /*++
  302. Routine Description:
  303. This function initiates trimming of the segment cache.
  304. Arguments:
  305. None.
  306. Return Value:
  307. None.
  308. Environment:
  309. Kernel Mode Only.
  310. --*/
  311. {
  312. KIRQL OldIrql;
  313. LOGICAL SignalDereferenceThread;
  314. LOGICAL SignalSystemCache;
  315. SignalDereferenceThread = FALSE;
  316. SignalSystemCache = FALSE;
  317. LOCK_PFN2 (OldIrql);
  318. if (MmUnusedSegmentForceFree == 0) {
  319. if (!IsListEmpty(&MmUnusedSegmentList)) {
  320. SignalDereferenceThread = TRUE;
  321. MmUnusedSegmentForceFree = MmUnusedSegmentForceFreeDefault;
  322. }
  323. else {
  324. if (!IsListEmpty(&MmUnusedSubsectionList)) {
  325. SignalDereferenceThread = TRUE;
  326. MmUnusedSegmentForceFree = MmUnusedSegmentForceFreeDefault;
  327. }
  328. if (MiUnusedSubsectionPagedPool < 4 * PAGE_SIZE) {
  329. //
  330. // No unused segments and tossable subsection usage is low as
  331. // well. Start unmapping system cache views in an attempt
  332. // to get back the paged pool containing its prototype PTEs.
  333. //
  334. SignalSystemCache = TRUE;
  335. }
  336. }
  337. }
  338. UNLOCK_PFN2 (OldIrql);
  339. if (SignalSystemCache == TRUE) {
  340. if (CcHasInactiveViews() == TRUE) {
  341. if (SignalDereferenceThread == FALSE) {
  342. LOCK_PFN2 (OldIrql);
  343. if (MmUnusedSegmentForceFree == 0) {
  344. SignalDereferenceThread = TRUE;
  345. MmUnusedSegmentForceFree = MmUnusedSegmentForceFreeDefault;
  346. }
  347. UNLOCK_PFN2 (OldIrql);
  348. }
  349. }
  350. }
  351. if (SignalDereferenceThread == TRUE) {
  352. KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE);
  353. }
  354. }
  355. POOL_TYPE
  356. MmDeterminePoolType (
  357. IN PVOID VirtualAddress
  358. )
  359. /*++
  360. Routine Description:
  361. This function determines which pool a virtual address resides within.
  362. Arguments:
  363. VirtualAddress - Supplies the virtual address to determine which pool
  364. it resides within.
  365. Return Value:
  366. Returns the POOL_TYPE (PagedPool, NonPagedPool, PagedPoolSession or
  367. NonPagedPoolSession).
  368. Environment:
  369. Kernel Mode Only.
  370. --*/
  371. {
  372. if ((VirtualAddress >= MmPagedPoolStart) &&
  373. (VirtualAddress <= MmPagedPoolEnd)) {
  374. return PagedPool;
  375. }
  376. if (MI_IS_SESSION_POOL_ADDRESS (VirtualAddress) == TRUE) {
  377. return PagedPoolSession;
  378. }
  379. return NonPagedPool;
  380. }
  381. PVOID
  382. MiSessionPoolVector (
  383. VOID
  384. )
  385. /*++
  386. Routine Description:
  387. This function returns the session pool descriptor for the current session.
  388. Arguments:
  389. None.
  390. Return Value:
  391. Pool descriptor.
  392. --*/
  393. {
  394. PAGED_CODE ();
  395. return (PVOID)&MmSessionSpace->PagedPool;
  396. }
  397. VOID
  398. MiSessionPoolAllocated (
  399. IN PVOID VirtualAddress,
  400. IN SIZE_T NumberOfBytes,
  401. IN POOL_TYPE PoolType
  402. )
  403. /*++
  404. Routine Description:
  405. This function charges the new pool allocation for the current session.
  406. On session exit, this charge must be zero.
  407. Interlocks are used here despite the fact that synchronization is provided
  408. anyway by our caller. This is so the path where the pool is freed can
  409. occur caller-lock-free.
  410. Arguments:
  411. VirtualAddress - Supplies the allocated pool address.
  412. NumberOfBytes - Supplies the number of bytes allocated.
  413. PoolType - Supplies the type of the above pool allocation.
  414. Return Value:
  415. None.
  416. Environment:
  417. Called both from Mm and executive pool.
  418. Holding no pool resources when called from pool. Unfortunately, pool
  419. resources are held when called from Mm.
  420. --*/
  421. {
  422. #if !DBG
  423. UNREFERENCED_PARAMETER (VirtualAddress);
  424. #endif
  425. if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) {
  426. ASSERT (KeGetCurrentIrql () <= DISPATCH_LEVEL);
  427. ASSERT (MI_IS_SESSION_POOL_ADDRESS(VirtualAddress) == FALSE);
  428. InterlockedExchangeAddSizeT (&MmSessionSpace->NonPagedPoolBytes,
  429. NumberOfBytes);
  430. InterlockedIncrement ((PLONG)&MmSessionSpace->NonPagedPoolAllocations);
  431. }
  432. else {
  433. ASSERT (KeGetCurrentIrql () <= APC_LEVEL);
  434. ASSERT (MI_IS_SESSION_POOL_ADDRESS(VirtualAddress) == TRUE);
  435. InterlockedExchangeAddSizeT (&MmSessionSpace->PagedPoolBytes,
  436. NumberOfBytes);
  437. InterlockedIncrement ((PLONG)&MmSessionSpace->PagedPoolAllocations);
  438. }
  439. }
  440. VOID
  441. MiSessionPoolFreed (
  442. IN PVOID VirtualAddress,
  443. IN SIZE_T NumberOfBytes,
  444. IN POOL_TYPE PoolType
  445. )
  446. /*++
  447. Routine Description:
  448. This function returns the specified pool allocation for the current session.
  449. On session exit, this charge must be zero.
  450. Arguments:
  451. VirtualAddress - Supplies the pool address being freed.
  452. NumberOfBytes - Supplies the number of bytes being freed.
  453. PoolType - Supplies the type of the above pool allocation.
  454. Return Value:
  455. None.
  456. Environment:
  457. DISPATCH_LEVEL or below for nonpaged pool allocations,
  458. APC_LEVEL or below for paged pool.
  459. --*/
  460. {
  461. #if !DBG
  462. UNREFERENCED_PARAMETER (VirtualAddress);
  463. #endif
  464. if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) {
  465. ASSERT (KeGetCurrentIrql () <= DISPATCH_LEVEL);
  466. ASSERT (MI_IS_SESSION_POOL_ADDRESS(VirtualAddress) == FALSE);
  467. InterlockedExchangeAddSizeT (&MmSessionSpace->NonPagedPoolBytes,
  468. 0-NumberOfBytes);
  469. InterlockedDecrement ((PLONG)&MmSessionSpace->NonPagedPoolAllocations);
  470. }
  471. else {
  472. ASSERT (KeGetCurrentIrql () <= APC_LEVEL);
  473. ASSERT (MI_IS_SESSION_POOL_ADDRESS(VirtualAddress) == TRUE);
  474. InterlockedExchangeAddSizeT (&MmSessionSpace->PagedPoolBytes,
  475. 0-NumberOfBytes);
  476. InterlockedDecrement ((PLONG)&MmSessionSpace->PagedPoolAllocations);
  477. }
  478. }
  479. SIZE_T
  480. MmAvailablePoolInPages (
  481. IN POOL_TYPE PoolType
  482. )
  483. /*++
  484. Routine Description:
  485. This function returns the number of pages available for the given pool.
  486. Note that it does not account for any executive pool fragmentation.
  487. Arguments:
  488. PoolType - Supplies the type of pool to retrieve information about.
  489. Return Value:
  490. The number of full pool pages remaining.
  491. Environment:
  492. PASSIVE_LEVEL, no mutexes or locks held.
  493. --*/
  494. {
  495. SIZE_T FreePoolInPages;
  496. SIZE_T FreeCommitInPages;
  497. #if !DBG
  498. UNREFERENCED_PARAMETER (PoolType);
  499. #endif
  500. ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL);
  501. ASSERT (PoolType == PagedPool);
  502. FreePoolInPages = (MmSizeOfPagedPoolInBytes >> PAGE_SHIFT) - MmPagedPoolInfo.AllocatedPagedPool;
  503. FreeCommitInPages = MmTotalCommitLimitMaximum - MmTotalCommittedPages;
  504. if (FreePoolInPages > FreeCommitInPages) {
  505. FreePoolInPages = FreeCommitInPages;
  506. }
  507. return FreePoolInPages;
  508. }
  509. LOGICAL
  510. MmResourcesAvailable (
  511. IN POOL_TYPE PoolType,
  512. IN SIZE_T NumberOfBytes,
  513. IN EX_POOL_PRIORITY Priority
  514. )
  515. /*++
  516. Routine Description:
  517. This function examines various resources to determine if this
  518. pool allocation should be allowed to proceed.
  519. Arguments:
  520. PoolType - Supplies the type of pool to retrieve information about.
  521. NumberOfBytes - Supplies the number of bytes to allocate.
  522. Priority - Supplies an indication as to how important it is that this
  523. request succeed under low available resource conditions.
  524. Return Value:
  525. TRUE if the pool allocation should be allowed to proceed, FALSE if not.
  526. --*/
  527. {
  528. PFN_NUMBER NumberOfPages;
  529. SIZE_T FreePoolInBytes;
  530. LOGICAL Status;
  531. MM_POOL_PRIORITIES Index;
  532. ASSERT (Priority != HighPoolPriority);
  533. ASSERT ((PoolType & MUST_SUCCEED_POOL_TYPE_MASK) == 0);
  534. NumberOfPages = BYTES_TO_PAGES (NumberOfBytes);
  535. if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) {
  536. FreePoolInBytes = MmMaximumNonPagedPoolInBytes - (MmAllocatedNonPagedPool << PAGE_SHIFT);
  537. }
  538. else if (PoolType & SESSION_POOL_MASK) {
  539. FreePoolInBytes = MmSessionPoolSize - MmSessionSpace->PagedPoolBytes;
  540. }
  541. else {
  542. FreePoolInBytes = MmSizeOfPagedPoolInBytes - (MmPagedPoolInfo.AllocatedPagedPool << PAGE_SHIFT);
  543. }
  544. Status = FALSE;
  545. //
  546. // Check available VA space.
  547. //
  548. if (Priority == NormalPoolPriority) {
  549. if ((SIZE_T)NumberOfBytes + 512*1024 > FreePoolInBytes) {
  550. if (PsGetCurrentThread()->MemoryMaker == 0) {
  551. goto nopool;
  552. }
  553. }
  554. }
  555. else {
  556. if ((SIZE_T)NumberOfBytes + 2*1024*1024 > FreePoolInBytes) {
  557. if (PsGetCurrentThread()->MemoryMaker == 0) {
  558. goto nopool;
  559. }
  560. }
  561. }
  562. //
  563. // Paged allocations (session and normal) can also fail for lack of commit.
  564. //
  565. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  566. if (MmTotalCommittedPages + NumberOfPages > MmTotalCommitLimitMaximum) {
  567. if (PsGetCurrentThread()->MemoryMaker == 0) {
  568. MiIssuePageExtendRequestNoWait (NumberOfPages);
  569. goto nopool;
  570. }
  571. }
  572. }
  573. //
  574. // If a substantial amount of free pool is still available, return TRUE now.
  575. //
  576. if (((SIZE_T)NumberOfBytes + 10*1024*1024 < FreePoolInBytes) ||
  577. (MmNumberOfPhysicalPages < 256 * 1024)) {
  578. return TRUE;
  579. }
  580. //
  581. // This pool allocation is permitted, but because we're starting to run low,
  582. // trigger a round of dereferencing in parallel before returning success.
  583. // Note this is only done on machines with at least 1GB of RAM as smaller
  584. // configuration machines will already trigger this due to physical page
  585. // consumption.
  586. //
  587. Status = TRUE;
  588. nopool:
  589. //
  590. // Running low on pool - if this request is not for session pool,
  591. // force unused segment trimming when appropriate.
  592. //
  593. if ((PoolType & SESSION_POOL_MASK) == 0) {
  594. if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) {
  595. MmPreemptiveTrims[MmPreemptForNonPagedPriority] += 1;
  596. }
  597. else {
  598. MmPreemptiveTrims[MmPreemptForPagedPriority] += 1;
  599. }
  600. if (MI_UNUSED_SEGMENTS_SURPLUS()) {
  601. KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE);
  602. }
  603. else {
  604. MiTrimSegmentCache ();
  605. }
  606. }
  607. if (Status == FALSE) {
  608. //
  609. // Log this failure for debugging purposes.
  610. //
  611. if (Priority == NormalPoolPriority) {
  612. Index = MmNormalPriority;
  613. }
  614. else {
  615. Index = MmLowPriority;
  616. }
  617. if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) {
  618. MmPoolFailures[MmNonPagedPool][Index] += 1;
  619. }
  620. else if (PoolType & SESSION_POOL_MASK) {
  621. MmPoolFailures[MmSessionPagedPool][Index] += 1;
  622. MmSessionSpace->SessionPoolAllocationFailures[0] += 1;
  623. }
  624. else {
  625. MmPoolFailures[MmPagedPool][Index] += 1;
  626. }
  627. MmPoolFailureReasons[MmPriorityTooLow] += 1;
  628. }
  629. return Status;
  630. }
  631. VOID
  632. MiFreeNonPagedPool (
  633. IN PVOID StartingAddress,
  634. IN PFN_NUMBER NumberOfPages
  635. )
  636. /*++
  637. Routine Description:
  638. This function releases virtually mapped nonpaged expansion pool.
  639. Arguments:
  640. StartingAddress - Supplies the starting address.
  641. NumberOfPages - Supplies the number of pages to free.
  642. Return Value:
  643. None.
  644. Environment:
  645. These functions are used by the internal Mm page allocation/free routines
  646. only and should not be called directly.
  647. Mutexes guarding the pool databases must be held when calling
  648. this function.
  649. --*/
  650. {
  651. PFN_NUMBER i;
  652. PMMPFN Pfn1;
  653. PMMPTE PointerPte;
  654. PFN_NUMBER ResAvailToReturn;
  655. PFN_NUMBER PageFrameIndex;
  656. ULONG Count;
  657. PVOID FlushVa[MM_MAXIMUM_FLUSH_COUNT];
  658. MI_MAKING_MULTIPLE_PTES_INVALID (TRUE);
  659. Count = 0;
  660. PointerPte = MiGetPteAddress (StartingAddress);
  661. //
  662. // Return commitment.
  663. //
  664. MiReturnCommitment (NumberOfPages);
  665. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_NONPAGED_POOL_EXPANSION,
  666. NumberOfPages);
  667. ResAvailToReturn = 0;
  668. LOCK_PFN_AT_DPC ();
  669. if (MiExpansionPoolPagesInUse > MiExpansionPoolPagesInitialCharge) {
  670. ResAvailToReturn = MiExpansionPoolPagesInUse - MiExpansionPoolPagesInitialCharge;
  671. }
  672. MiExpansionPoolPagesInUse -= NumberOfPages;
  673. for (i = 0; i < NumberOfPages; i += 1) {
  674. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  675. //
  676. // Set the pointer to the PTE as empty so the page
  677. // is deleted when the reference count goes to zero.
  678. //
  679. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  680. ASSERT (Pfn1->u2.ShareCount == 1);
  681. Pfn1->u2.ShareCount = 0;
  682. MI_SET_PFN_DELETED (Pfn1);
  683. #if DBG
  684. Pfn1->u3.e1.PageLocation = StandbyPageList;
  685. #endif //DBG
  686. MiDecrementReferenceCount (PageFrameIndex);
  687. if (Count != MM_MAXIMUM_FLUSH_COUNT) {
  688. FlushVa[Count] = StartingAddress;
  689. Count += 1;
  690. }
  691. MI_WRITE_INVALID_PTE (PointerPte, ZeroKernelPte);
  692. StartingAddress = (PVOID)((PCHAR)StartingAddress + PAGE_SIZE);
  693. PointerPte += 1;
  694. }
  695. //
  696. // Generally there is no need to update resident available
  697. // pages at this time as it has all been done during initialization.
  698. // However, only some of the expansion pool was charged at init, so
  699. // calculate how much (if any) resident available page charge to return.
  700. //
  701. if (ResAvailToReturn > NumberOfPages) {
  702. ResAvailToReturn = NumberOfPages;
  703. }
  704. if (ResAvailToReturn != 0) {
  705. MmResidentAvailablePages += ResAvailToReturn;
  706. MM_BUMP_COUNTER(23, ResAvailToReturn);
  707. }
  708. //
  709. // The PFN lock is not needed for the TB flush - the caller either holds
  710. // the nonpaged pool lock or nothing, but regardless the address range
  711. // cannot be reused until the PTEs are released below.
  712. //
  713. UNLOCK_PFN_FROM_DPC ();
  714. if (Count < MM_MAXIMUM_FLUSH_COUNT) {
  715. KeFlushMultipleTb (Count,
  716. &FlushVa[0],
  717. TRUE,
  718. TRUE,
  719. NULL,
  720. *(PHARDWARE_PTE)&ZeroPte.u.Flush);
  721. }
  722. else {
  723. KeFlushEntireTb (TRUE, TRUE);
  724. }
  725. KeLowerIrql (DISPATCH_LEVEL);
  726. PointerPte -= NumberOfPages;
  727. MiReleaseSystemPtes (PointerPte,
  728. (ULONG)NumberOfPages,
  729. NonPagedPoolExpansion);
  730. }
  731. LOGICAL
  732. MiFreeAllExpansionNonPagedPool (
  733. IN LOGICAL PoolLockHeld
  734. )
  735. /*++
  736. Routine Description:
  737. This function releases all virtually mapped nonpaged expansion pool.
  738. Arguments:
  739. NonPoolLockHeld - Supplies TRUE if the nonpaged pool lock is already held,
  740. FALSE if not.
  741. Return Value:
  742. TRUE if pages were freed, FALSE if not.
  743. Environment:
  744. Kernel mode. NonPagedPool lock optionally may be held, PFN lock is NOT held.
  745. --*/
  746. {
  747. ULONG Index;
  748. KIRQL OldIrql;
  749. PLIST_ENTRY Entry;
  750. LOGICAL FreedPool;
  751. PMMFREE_POOL_ENTRY FreePageInfo;
  752. FreedPool = FALSE;
  753. if (PoolLockHeld == FALSE) {
  754. OldIrql = ExLockPool (NonPagedPool);
  755. }
  756. else {
  757. //
  758. // Initializing OldIrql is not needed for correctness, but without it
  759. // the compiler cannot compile this code W4 to check for use of
  760. // uninitialized variables.
  761. //
  762. OldIrql = PASSIVE_LEVEL;
  763. }
  764. for (Index = 0; Index < MI_MAX_FREE_LIST_HEADS; Index += 1) {
  765. Entry = MmNonPagedPoolFreeListHead[Index].Flink;
  766. while (Entry != &MmNonPagedPoolFreeListHead[Index]) {
  767. if (MmProtectFreedNonPagedPool == TRUE) {
  768. MiUnProtectFreeNonPagedPool ((PVOID)Entry, 0);
  769. }
  770. //
  771. // The list is not empty, see if this one is virtually
  772. // mapped.
  773. //
  774. FreePageInfo = CONTAINING_RECORD(Entry,
  775. MMFREE_POOL_ENTRY,
  776. List);
  777. if ((!MI_IS_PHYSICAL_ADDRESS(FreePageInfo)) &&
  778. ((PVOID)FreePageInfo >= MmNonPagedPoolExpansionStart)) {
  779. if (MmProtectFreedNonPagedPool == FALSE) {
  780. RemoveEntryList (&FreePageInfo->List);
  781. }
  782. else {
  783. MiProtectedPoolRemoveEntryList (&FreePageInfo->List);
  784. }
  785. MmNumberOfFreeNonPagedPool -= FreePageInfo->Size;
  786. ASSERT ((LONG)MmNumberOfFreeNonPagedPool >= 0);
  787. FreedPool = TRUE;
  788. MiFreeNonPagedPool ((PVOID)FreePageInfo,
  789. FreePageInfo->Size);
  790. Index = (ULONG)-1;
  791. break;
  792. }
  793. Entry = FreePageInfo->List.Flink;
  794. if (MmProtectFreedNonPagedPool == TRUE) {
  795. MiProtectFreeNonPagedPool ((PVOID)FreePageInfo,
  796. (ULONG)FreePageInfo->Size);
  797. }
  798. }
  799. }
  800. if (PoolLockHeld == FALSE) {
  801. ExUnlockPool (NonPagedPool, OldIrql);
  802. }
  803. return FreedPool;
  804. }
  805. PVOID
  806. MiAllocatePoolPages (
  807. IN POOL_TYPE PoolType,
  808. IN SIZE_T SizeInBytes,
  809. IN ULONG IsLargeSessionAllocation
  810. )
  811. /*++
  812. Routine Description:
  813. This function allocates a set of pages from the specified pool
  814. and returns the starting virtual address to the caller.
  815. Arguments:
  816. PoolType - Supplies the type of pool from which to obtain pages.
  817. SizeInBytes - Supplies the size of the request in bytes. The actual
  818. size returned is rounded up to a page boundary.
  819. IsLargeSessionAllocation - Supplies nonzero if the allocation is a single
  820. large session allocation. Zero otherwise.
  821. Return Value:
  822. Returns a pointer to the allocated pool, or NULL if no more pool is
  823. available.
  824. Environment:
  825. These functions are used by the general pool allocation routines
  826. and should not be called directly.
  827. Mutexes guarding the pool databases must be held when calling
  828. these functions.
  829. Kernel mode, IRQL at DISPATCH_LEVEL.
  830. --*/
  831. {
  832. PETHREAD Thread;
  833. PFN_NUMBER SizeInPages;
  834. ULONG StartPosition;
  835. ULONG EndPosition;
  836. PMMPTE StartingPte;
  837. PMMPTE PointerPte;
  838. PMMPFN Pfn1;
  839. MMPTE TempPte;
  840. PFN_NUMBER PageFrameIndex;
  841. PVOID BaseVa;
  842. KIRQL OldIrql;
  843. KIRQL SessionIrql;
  844. PFN_NUMBER i;
  845. PFN_NUMBER j;
  846. PLIST_ENTRY Entry;
  847. PLIST_ENTRY ListHead;
  848. PLIST_ENTRY LastListHead;
  849. PMMFREE_POOL_ENTRY FreePageInfo;
  850. PMM_SESSION_SPACE SessionSpace;
  851. PMM_PAGED_POOL_INFO PagedPoolInfo;
  852. PVOID VirtualAddress;
  853. PVOID VirtualAddressSave;
  854. ULONG Index;
  855. PMMPTE SessionPte;
  856. WSLE_NUMBER WsEntry;
  857. WSLE_NUMBER WsSwapEntry;
  858. ULONG PageTableCount;
  859. LOGICAL AddressIsPhysical;
  860. SizeInPages = BYTES_TO_PAGES (SizeInBytes);
  861. #if DBG
  862. if (MmCheckRequestInPages != 0) {
  863. ASSERT (SizeInPages < MmCheckRequestInPages);
  864. }
  865. #endif
  866. if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) {
  867. Index = (ULONG)(SizeInPages - 1);
  868. if (Index >= MI_MAX_FREE_LIST_HEADS) {
  869. Index = MI_MAX_FREE_LIST_HEADS - 1;
  870. }
  871. //
  872. // NonPaged pool is linked together through the pages themselves.
  873. //
  874. ListHead = &MmNonPagedPoolFreeListHead[Index];
  875. LastListHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_LIST_HEADS];
  876. do {
  877. Entry = ListHead->Flink;
  878. while (Entry != ListHead) {
  879. if (MmProtectFreedNonPagedPool == TRUE) {
  880. MiUnProtectFreeNonPagedPool ((PVOID)Entry, 0);
  881. }
  882. //
  883. // The list is not empty, see if this one has enough space.
  884. //
  885. FreePageInfo = CONTAINING_RECORD(Entry,
  886. MMFREE_POOL_ENTRY,
  887. List);
  888. ASSERT (FreePageInfo->Signature == MM_FREE_POOL_SIGNATURE);
  889. if (FreePageInfo->Size >= SizeInPages) {
  890. //
  891. // This entry has sufficient space, remove
  892. // the pages from the end of the allocation.
  893. //
  894. FreePageInfo->Size -= SizeInPages;
  895. BaseVa = (PVOID)((PCHAR)FreePageInfo +
  896. (FreePageInfo->Size << PAGE_SHIFT));
  897. if (MmProtectFreedNonPagedPool == FALSE) {
  898. RemoveEntryList (&FreePageInfo->List);
  899. }
  900. else {
  901. MiProtectedPoolRemoveEntryList (&FreePageInfo->List);
  902. }
  903. if (FreePageInfo->Size != 0) {
  904. //
  905. // Insert any remainder into the correct list.
  906. //
  907. Index = (ULONG)(FreePageInfo->Size - 1);
  908. if (Index >= MI_MAX_FREE_LIST_HEADS) {
  909. Index = MI_MAX_FREE_LIST_HEADS - 1;
  910. }
  911. if (MmProtectFreedNonPagedPool == FALSE) {
  912. InsertTailList (&MmNonPagedPoolFreeListHead[Index],
  913. &FreePageInfo->List);
  914. }
  915. else {
  916. MiProtectedPoolInsertList (&MmNonPagedPoolFreeListHead[Index],
  917. &FreePageInfo->List,
  918. FALSE);
  919. MiProtectFreeNonPagedPool ((PVOID)FreePageInfo,
  920. (ULONG)FreePageInfo->Size);
  921. }
  922. }
  923. //
  924. // Adjust the number of free pages remaining in the pool.
  925. //
  926. MmNumberOfFreeNonPagedPool -= SizeInPages;
  927. ASSERT ((LONG)MmNumberOfFreeNonPagedPool >= 0);
  928. //
  929. // Mark start and end of allocation in the PFN database.
  930. //
  931. if (MI_IS_PHYSICAL_ADDRESS(BaseVa)) {
  932. //
  933. // On certain architectures, virtual addresses
  934. // may be physical and hence have no corresponding PTE.
  935. //
  936. AddressIsPhysical = TRUE;
  937. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (BaseVa);
  938. //
  939. // Initializing PointerPte is not needed for correctness
  940. // but without it the compiler cannot compile this code
  941. // W4 to check for use of uninitialized variables.
  942. //
  943. PointerPte = NULL;
  944. }
  945. else {
  946. AddressIsPhysical = FALSE;
  947. PointerPte = MiGetPteAddress(BaseVa);
  948. ASSERT (PointerPte->u.Hard.Valid == 1);
  949. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  950. }
  951. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  952. ASSERT (Pfn1->u3.e1.StartOfAllocation == 0);
  953. ASSERT (Pfn1->u4.VerifierAllocation == 0);
  954. Pfn1->u3.e1.StartOfAllocation = 1;
  955. if (PoolType & POOL_VERIFIER_MASK) {
  956. Pfn1->u4.VerifierAllocation = 1;
  957. }
  958. //
  959. // Mark this as a large session allocation in the PFN database.
  960. //
  961. if (IsLargeSessionAllocation != 0) {
  962. ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0);
  963. Pfn1->u3.e1.LargeSessionAllocation = 1;
  964. MiSessionPoolAllocated (BaseVa,
  965. SizeInPages << PAGE_SHIFT,
  966. NonPagedPool);
  967. }
  968. //
  969. // Calculate the ending PTE's address.
  970. //
  971. if (SizeInPages != 1) {
  972. if (AddressIsPhysical == TRUE) {
  973. Pfn1 += SizeInPages - 1;
  974. }
  975. else {
  976. PointerPte += SizeInPages - 1;
  977. ASSERT (PointerPte->u.Hard.Valid == 1);
  978. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  979. }
  980. }
  981. ASSERT (Pfn1->u3.e1.EndOfAllocation == 0);
  982. Pfn1->u3.e1.EndOfAllocation = 1;
  983. MmAllocatedNonPagedPool += SizeInPages;
  984. return BaseVa;
  985. }
  986. Entry = FreePageInfo->List.Flink;
  987. if (MmProtectFreedNonPagedPool == TRUE) {
  988. MiProtectFreeNonPagedPool ((PVOID)FreePageInfo,
  989. (ULONG)FreePageInfo->Size);
  990. }
  991. }
  992. ListHead += 1;
  993. } while (ListHead < LastListHead);
  994. //
  995. // No more entries on the list, expand nonpaged pool if
  996. // possible to satisfy this request.
  997. //
  998. //
  999. // If pool is starting to run low then free some paged cache up now.
  1000. // While this can never prevent pool allocations from failing, it does
  1001. // give drivers a better chance to always see success.
  1002. //
  1003. if (MmMaximumNonPagedPoolInBytes - (MmAllocatedNonPagedPool << PAGE_SHIFT) < 3 * 1024 * 1024) {
  1004. MmPreemptiveTrims[MmPreemptForNonPaged] += 1;
  1005. MiTrimSegmentCache ();
  1006. }
  1007. #if defined (_WIN64)
  1008. if (SizeInPages >= _4gb) {
  1009. return NULL;
  1010. }
  1011. #endif
  1012. //
  1013. // Try to find system PTEs to expand the pool into.
  1014. //
  1015. StartingPte = MiReserveSystemPtes ((ULONG)SizeInPages,
  1016. NonPagedPoolExpansion);
  1017. if (StartingPte == NULL) {
  1018. //
  1019. // There are no free physical PTEs to expand nonpaged pool.
  1020. //
  1021. // Check to see if there are too many unused segments lying
  1022. // around. If so, set an event so they get deleted.
  1023. //
  1024. if (MI_UNUSED_SEGMENTS_SURPLUS()) {
  1025. KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE);
  1026. }
  1027. //
  1028. // If there are any cached expansion PTEs, free them now in
  1029. // an attempt to get enough contiguous VA for our caller.
  1030. //
  1031. if ((SizeInPages > 1) && (MmNumberOfFreeNonPagedPool != 0)) {
  1032. if (MiFreeAllExpansionNonPagedPool (TRUE) == TRUE) {
  1033. StartingPte = MiReserveSystemPtes ((ULONG)SizeInPages,
  1034. NonPagedPoolExpansion);
  1035. }
  1036. }
  1037. if (StartingPte == NULL) {
  1038. MmPoolFailures[MmNonPagedPool][MmHighPriority] += 1;
  1039. MmPoolFailureReasons[MmNonPagedNoPtes] += 1;
  1040. //
  1041. // Running low on pool - force unused segment trimming.
  1042. //
  1043. MiTrimSegmentCache ();
  1044. return NULL;
  1045. }
  1046. }
  1047. //
  1048. // Charge commitment as nonpaged pool uses physical memory.
  1049. //
  1050. if (MiChargeCommitmentCantExpand (SizeInPages, FALSE) == FALSE) {
  1051. if (PsGetCurrentThread()->MemoryMaker == 1) {
  1052. MiChargeCommitmentCantExpand (SizeInPages, TRUE);
  1053. }
  1054. else {
  1055. MiReleaseSystemPtes (StartingPte,
  1056. (ULONG)SizeInPages,
  1057. NonPagedPoolExpansion);
  1058. MmPoolFailures[MmNonPagedPool][MmHighPriority] += 1;
  1059. MmPoolFailureReasons[MmNonPagedNoCommit] += 1;
  1060. MiTrimSegmentCache ();
  1061. return NULL;
  1062. }
  1063. }
  1064. PointerPte = StartingPte;
  1065. TempPte = ValidKernelPte;
  1066. i = SizeInPages;
  1067. MmAllocatedNonPagedPool += SizeInPages;
  1068. LOCK_PFN_AT_DPC ();
  1069. //
  1070. // Make sure we have 1 more than the number of pages
  1071. // requested available.
  1072. //
  1073. if (MmAvailablePages <= SizeInPages) {
  1074. UNLOCK_PFN_FROM_DPC ();
  1075. //
  1076. // There are no free physical pages to expand nonpaged pool.
  1077. //
  1078. MmPoolFailureReasons[MmNonPagedNoPagesAvailable] += 1;
  1079. MmPoolFailures[MmNonPagedPool][MmHighPriority] += 1;
  1080. MmAllocatedNonPagedPool -= SizeInPages;
  1081. MiReturnCommitment (SizeInPages);
  1082. MiReleaseSystemPtes (StartingPte,
  1083. (ULONG)SizeInPages,
  1084. NonPagedPoolExpansion);
  1085. MiTrimSegmentCache ();
  1086. return NULL;
  1087. }
  1088. //
  1089. // Charge resident available pages now for any excess.
  1090. //
  1091. MiExpansionPoolPagesInUse += SizeInPages;
  1092. if (MiExpansionPoolPagesInUse > MiExpansionPoolPagesInitialCharge) {
  1093. j = MiExpansionPoolPagesInUse - MiExpansionPoolPagesInitialCharge;
  1094. if (j > SizeInPages) {
  1095. j = SizeInPages;
  1096. }
  1097. if (MI_NONPAGABLE_MEMORY_AVAILABLE() >= (SPFN_NUMBER)j) {
  1098. MmResidentAvailablePages -= j;
  1099. MM_BUMP_COUNTER(24, j);
  1100. }
  1101. else {
  1102. MiExpansionPoolPagesInUse -= SizeInPages;
  1103. UNLOCK_PFN_FROM_DPC ();
  1104. MmPoolFailureReasons[MmNonPagedNoResidentAvailable] += 1;
  1105. MmPoolFailures[MmNonPagedPool][MmHighPriority] += 1;
  1106. MmAllocatedNonPagedPool -= SizeInPages;
  1107. MiReturnCommitment (SizeInPages);
  1108. MiReleaseSystemPtes (StartingPte,
  1109. (ULONG)SizeInPages,
  1110. NonPagedPoolExpansion);
  1111. MiTrimSegmentCache ();
  1112. return NULL;
  1113. }
  1114. }
  1115. MM_TRACK_COMMIT (MM_DBG_COMMIT_NONPAGED_POOL_EXPANSION, SizeInPages);
  1116. //
  1117. // Expand the pool.
  1118. //
  1119. do {
  1120. PageFrameIndex = MiRemoveAnyPage (
  1121. MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
  1122. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1123. Pfn1->u3.e2.ReferenceCount = 1;
  1124. Pfn1->u2.ShareCount = 1;
  1125. Pfn1->PteAddress = PointerPte;
  1126. Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  1127. Pfn1->u4.PteFrame = MI_GET_PAGE_FRAME_FROM_PTE (MiGetPteAddress(PointerPte));
  1128. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1129. Pfn1->u3.e1.CacheAttribute = MiCached;
  1130. Pfn1->u3.e1.LargeSessionAllocation = 0;
  1131. Pfn1->u4.VerifierAllocation = 0;
  1132. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1133. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1134. PointerPte += 1;
  1135. SizeInPages -= 1;
  1136. } while (SizeInPages > 0);
  1137. Pfn1->u3.e1.EndOfAllocation = 1;
  1138. Pfn1 = MI_PFN_ELEMENT (StartingPte->u.Hard.PageFrameNumber);
  1139. Pfn1->u3.e1.StartOfAllocation = 1;
  1140. ASSERT (Pfn1->u4.VerifierAllocation == 0);
  1141. if (PoolType & POOL_VERIFIER_MASK) {
  1142. Pfn1->u4.VerifierAllocation = 1;
  1143. }
  1144. //
  1145. // Mark this as a large session allocation in the PFN database.
  1146. //
  1147. ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0);
  1148. if (IsLargeSessionAllocation != 0) {
  1149. Pfn1->u3.e1.LargeSessionAllocation = 1;
  1150. }
  1151. UNLOCK_PFN_FROM_DPC ();
  1152. BaseVa = MiGetVirtualAddressMappedByPte (StartingPte);
  1153. if (IsLargeSessionAllocation != 0) {
  1154. MiSessionPoolAllocated(MiGetVirtualAddressMappedByPte (StartingPte),
  1155. i << PAGE_SHIFT,
  1156. NonPagedPool);
  1157. }
  1158. return BaseVa;
  1159. }
  1160. //
  1161. // Paged Pool.
  1162. //
  1163. if ((PoolType & SESSION_POOL_MASK) == 0) {
  1164. SessionSpace = NULL;
  1165. PagedPoolInfo = &MmPagedPoolInfo;
  1166. //
  1167. // If pool is starting to run low then free some paged cache up now.
  1168. // While this can never prevent pool allocations from failing, it does
  1169. // give drivers a better chance to always see success.
  1170. //
  1171. if (MmSizeOfPagedPoolInBytes - (MmPagedPoolInfo.AllocatedPagedPool << PAGE_SHIFT) < 5 * 1024 * 1024) {
  1172. MmPreemptiveTrims[MmPreemptForPaged] += 1;
  1173. MiTrimSegmentCache ();
  1174. }
  1175. #if DBG
  1176. if (MiClearCache != 0) {
  1177. MmPreemptiveTrims[MmPreemptForPaged] += 1;
  1178. MiTrimSegmentCache ();
  1179. }
  1180. #endif
  1181. }
  1182. else {
  1183. SessionSpace = MmSessionSpace;
  1184. PagedPoolInfo = &SessionSpace->PagedPoolInfo;
  1185. }
  1186. StartPosition = RtlFindClearBitsAndSet (
  1187. PagedPoolInfo->PagedPoolAllocationMap,
  1188. (ULONG)SizeInPages,
  1189. PagedPoolInfo->PagedPoolHint
  1190. );
  1191. if ((StartPosition == NO_BITS_FOUND) &&
  1192. (PagedPoolInfo->PagedPoolHint != 0)) {
  1193. if (MI_UNUSED_SEGMENTS_SURPLUS()) {
  1194. KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE);
  1195. }
  1196. //
  1197. // No free bits were found, check from the start of the bit map.
  1198. StartPosition = RtlFindClearBitsAndSet (
  1199. PagedPoolInfo->PagedPoolAllocationMap,
  1200. (ULONG)SizeInPages,
  1201. 0
  1202. );
  1203. }
  1204. if (StartPosition == NO_BITS_FOUND) {
  1205. //
  1206. // No room in pool - attempt to expand the paged pool.
  1207. //
  1208. StartPosition = (((ULONG)SizeInPages - 1) / PTE_PER_PAGE) + 1;
  1209. //
  1210. // Make sure there is enough space to create the prototype PTEs.
  1211. //
  1212. if (((StartPosition - 1) + PagedPoolInfo->NextPdeForPagedPoolExpansion) >
  1213. MiGetPteAddress (PagedPoolInfo->LastPteForPagedPool)) {
  1214. //
  1215. // Can't expand pool any more. If this request is not for session
  1216. // pool, force unused segment trimming when appropriate.
  1217. //
  1218. if (SessionSpace == NULL) {
  1219. MmPoolFailures[MmPagedPool][MmHighPriority] += 1;
  1220. MmPoolFailureReasons[MmPagedNoPtes] += 1;
  1221. //
  1222. // Running low on pool - force unused segment trimming.
  1223. //
  1224. MiTrimSegmentCache ();
  1225. return NULL;
  1226. }
  1227. MmPoolFailures[MmSessionPagedPool][MmHighPriority] += 1;
  1228. MmPoolFailureReasons[MmSessionPagedNoPtes] += 1;
  1229. MmSessionSpace->SessionPoolAllocationFailures[1] += 1;
  1230. return NULL;
  1231. }
  1232. PageTableCount = StartPosition;
  1233. if (SessionSpace) {
  1234. TempPte = ValidKernelPdeLocal;
  1235. }
  1236. else {
  1237. TempPte = ValidKernelPde;
  1238. }
  1239. //
  1240. // Charge commitment for the pagetable pages for paged pool expansion.
  1241. //
  1242. if (MiChargeCommitmentCantExpand (StartPosition, FALSE) == FALSE) {
  1243. if (PsGetCurrentThread()->MemoryMaker == 1) {
  1244. MiChargeCommitmentCantExpand (StartPosition, TRUE);
  1245. }
  1246. else {
  1247. MmPoolFailures[MmPagedPool][MmHighPriority] += 1;
  1248. MmPoolFailureReasons[MmPagedNoCommit] += 1;
  1249. MiTrimSegmentCache ();
  1250. return NULL;
  1251. }
  1252. }
  1253. EndPosition = (ULONG)((PagedPoolInfo->NextPdeForPagedPoolExpansion -
  1254. MiGetPteAddress(PagedPoolInfo->FirstPteForPagedPool)) *
  1255. PTE_PER_PAGE);
  1256. //
  1257. // Expand the pool.
  1258. //
  1259. RtlClearBits (PagedPoolInfo->PagedPoolAllocationMap,
  1260. EndPosition,
  1261. (ULONG) StartPosition * PTE_PER_PAGE);
  1262. PointerPte = PagedPoolInfo->NextPdeForPagedPoolExpansion;
  1263. VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte);
  1264. VirtualAddressSave = VirtualAddress;
  1265. PagedPoolInfo->NextPdeForPagedPoolExpansion += StartPosition;
  1266. LOCK_PFN (OldIrql);
  1267. //
  1268. // Make sure we have 1 more than the number of pages
  1269. // requested available.
  1270. //
  1271. if (MmAvailablePages <= StartPosition) {
  1272. //
  1273. // There are no free physical pages to expand paged pool.
  1274. //
  1275. UNLOCK_PFN (OldIrql);
  1276. PagedPoolInfo->NextPdeForPagedPoolExpansion -= StartPosition;
  1277. RtlSetBits (PagedPoolInfo->PagedPoolAllocationMap,
  1278. EndPosition,
  1279. (ULONG) StartPosition * PTE_PER_PAGE);
  1280. MiReturnCommitment (StartPosition);
  1281. if (SessionSpace == NULL) {
  1282. MmPoolFailures[MmPagedPool][MmHighPriority] += 1;
  1283. MmPoolFailureReasons[MmPagedNoPagesAvailable] += 1;
  1284. }
  1285. else {
  1286. MmPoolFailures[MmSessionPagedPool][MmHighPriority] += 1;
  1287. MmPoolFailureReasons[MmSessionPagedNoPagesAvailable] += 1;
  1288. MmSessionSpace->SessionPoolAllocationFailures[2] += 1;
  1289. }
  1290. return NULL;
  1291. }
  1292. MM_TRACK_COMMIT (MM_DBG_COMMIT_PAGED_POOL_PAGETABLE, StartPosition);
  1293. //
  1294. // Update the count of available resident pages.
  1295. //
  1296. MmResidentAvailablePages -= StartPosition;
  1297. MM_BUMP_COUNTER(1, StartPosition);
  1298. //
  1299. // Allocate the page table pages for the pool expansion.
  1300. //
  1301. do {
  1302. ASSERT (PointerPte->u.Hard.Valid == 0);
  1303. PageFrameIndex = MiRemoveAnyPage (
  1304. MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
  1305. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1306. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1307. //
  1308. // Map valid PDE into system (or session) address space.
  1309. //
  1310. #if (_MI_PAGING_LEVELS >= 3)
  1311. MiInitializePfn (PageFrameIndex, PointerPte, 1);
  1312. #else
  1313. if (SessionSpace) {
  1314. Index = (ULONG)(PointerPte - MiGetPdeAddress (MmSessionBase));
  1315. ASSERT (MmSessionSpace->PageTables[Index].u.Long == 0);
  1316. MmSessionSpace->PageTables[Index] = TempPte;
  1317. MiInitializePfnForOtherProcess (PageFrameIndex,
  1318. PointerPte,
  1319. MmSessionSpace->SessionPageDirectoryIndex);
  1320. MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_PAGEDPOOL_PAGETABLE_ALLOC1, 1);
  1321. }
  1322. else {
  1323. MmSystemPagePtes [((ULONG_PTR)PointerPte &
  1324. (PD_PER_SYSTEM * (sizeof(MMPTE) * PDE_PER_PAGE) - 1)) / sizeof(MMPTE)] = TempPte;
  1325. MiInitializePfnForOtherProcess (PageFrameIndex,
  1326. PointerPte,
  1327. MmSystemPageDirectory[(PointerPte - MiGetPdeAddress(0)) / PDE_PER_PAGE]);
  1328. }
  1329. #endif
  1330. KeFillEntryTb ((PHARDWARE_PTE) PointerPte, VirtualAddress, FALSE);
  1331. PointerPte += 1;
  1332. VirtualAddress = (PVOID)((PCHAR)VirtualAddress + PAGE_SIZE);
  1333. StartPosition -= 1;
  1334. } while (StartPosition > 0);
  1335. UNLOCK_PFN (OldIrql);
  1336. MiFillMemoryPte (VirtualAddressSave,
  1337. PageTableCount * PAGE_SIZE,
  1338. MM_KERNEL_NOACCESS_PTE);
  1339. if (SessionSpace) {
  1340. PointerPte -= PageTableCount;
  1341. InterlockedExchangeAddSizeT (&MmSessionSpace->CommittedPages,
  1342. PageTableCount);
  1343. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_NP_PAGETABLE_ALLOC, PageTableCount);
  1344. Thread = PsGetCurrentThread ();
  1345. LOCK_SESSION_SPACE_WS (SessionIrql, Thread);
  1346. MmSessionSpace->NonPagablePages += PageTableCount;
  1347. do {
  1348. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  1349. ASSERT (Pfn1->u1.Event == 0);
  1350. Pfn1->u1.Event = (PVOID) Thread;
  1351. SessionPte = MiGetVirtualAddressMappedByPte (PointerPte);
  1352. MiAddValidPageToWorkingSet (SessionPte,
  1353. PointerPte,
  1354. Pfn1,
  1355. 0);
  1356. WsEntry = MiLocateWsle (SessionPte,
  1357. MmSessionSpace->Vm.VmWorkingSetList,
  1358. Pfn1->u1.WsIndex);
  1359. if (WsEntry >= MmSessionSpace->Vm.VmWorkingSetList->FirstDynamic) {
  1360. WsSwapEntry = MmSessionSpace->Vm.VmWorkingSetList->FirstDynamic;
  1361. if (WsEntry != MmSessionSpace->Vm.VmWorkingSetList->FirstDynamic) {
  1362. //
  1363. // Swap this entry with the one at first dynamic.
  1364. //
  1365. MiSwapWslEntries (WsEntry, WsSwapEntry, &MmSessionSpace->Vm);
  1366. }
  1367. MmSessionSpace->Vm.VmWorkingSetList->FirstDynamic += 1;
  1368. }
  1369. else {
  1370. WsSwapEntry = WsEntry;
  1371. }
  1372. //
  1373. // Indicate that the page is locked.
  1374. //
  1375. MmSessionSpace->Wsle[WsSwapEntry].u1.e1.LockedInWs = 1;
  1376. PointerPte += 1;
  1377. PageTableCount -= 1;
  1378. } while (PageTableCount > 0);
  1379. UNLOCK_SESSION_SPACE_WS (SessionIrql);
  1380. }
  1381. StartPosition = RtlFindClearBitsAndSet (
  1382. PagedPoolInfo->PagedPoolAllocationMap,
  1383. (ULONG)SizeInPages,
  1384. EndPosition
  1385. );
  1386. ASSERT (StartPosition != NO_BITS_FOUND);
  1387. }
  1388. //
  1389. // This is paged pool, the start and end can't be saved
  1390. // in the PFN database as the page isn't always resident
  1391. // in memory. The ideal place to save the start and end
  1392. // would be in the prototype PTE, but there are no free
  1393. // bits. To solve this problem, a bitmap which parallels
  1394. // the allocation bitmap exists which contains set bits
  1395. // in the positions where an allocation ends. This
  1396. // allows pages to be deallocated with only their starting
  1397. // address.
  1398. //
  1399. // For sanity's sake, the starting address can be verified
  1400. // from the 2 bitmaps as well. If the page before the starting
  1401. // address is not allocated (bit is zero in allocation bitmap)
  1402. // then this page is obviously a start of an allocation block.
  1403. // If the page before is allocated and the other bit map does
  1404. // not indicate the previous page is the end of an allocation,
  1405. // then the starting address is wrong and a bug check should
  1406. // be issued.
  1407. //
  1408. if (SizeInPages == 1) {
  1409. PagedPoolInfo->PagedPoolHint = StartPosition + (ULONG)SizeInPages;
  1410. }
  1411. //
  1412. // If paged pool has been configured as nonpagable, commitment has
  1413. // already been charged so just set the length and return the address.
  1414. //
  1415. if ((MmDisablePagingExecutive & MM_PAGED_POOL_LOCKED_DOWN) &&
  1416. (SessionSpace == NULL)) {
  1417. BaseVa = (PVOID)((PUCHAR)MmPageAlignedPoolBase[PagedPool] +
  1418. ((ULONG_PTR)StartPosition << PAGE_SHIFT));
  1419. #if DBG
  1420. PointerPte = MiGetPteAddress (BaseVa);
  1421. for (i = 0; i < SizeInPages; i += 1) {
  1422. ASSERT (PointerPte->u.Hard.Valid == 1);
  1423. PointerPte += 1;
  1424. }
  1425. #endif
  1426. EndPosition = StartPosition + (ULONG)SizeInPages - 1;
  1427. RtlSetBit (PagedPoolInfo->EndOfPagedPoolBitmap, EndPosition);
  1428. if (PoolType & POOL_VERIFIER_MASK) {
  1429. RtlSetBit (VerifierLargePagedPoolMap, StartPosition);
  1430. }
  1431. PagedPoolInfo->AllocatedPagedPool += SizeInPages;
  1432. return BaseVa;
  1433. }
  1434. if (MiChargeCommitmentCantExpand (SizeInPages, FALSE) == FALSE) {
  1435. if (PsGetCurrentThread()->MemoryMaker == 1) {
  1436. MiChargeCommitmentCantExpand (SizeInPages, TRUE);
  1437. }
  1438. else {
  1439. RtlClearBits (PagedPoolInfo->PagedPoolAllocationMap,
  1440. StartPosition,
  1441. (ULONG)SizeInPages);
  1442. //
  1443. // Could not commit the page(s), return NULL indicating
  1444. // no pool was allocated. Note that the lack of commit may be due
  1445. // to unused segments and the MmSharedCommit, prototype PTEs, etc
  1446. // associated with them. So force a reduction now.
  1447. //
  1448. MiIssuePageExtendRequestNoWait (SizeInPages);
  1449. MiTrimSegmentCache ();
  1450. if (SessionSpace == NULL) {
  1451. MmPoolFailures[MmPagedPool][MmHighPriority] += 1;
  1452. MmPoolFailureReasons[MmPagedNoCommit] += 1;
  1453. }
  1454. else {
  1455. MmPoolFailures[MmSessionPagedPool][MmHighPriority] += 1;
  1456. MmPoolFailureReasons[MmSessionPagedNoCommit] += 1;
  1457. MmSessionSpace->SessionPoolAllocationFailures[3] += 1;
  1458. }
  1459. return NULL;
  1460. }
  1461. }
  1462. MM_TRACK_COMMIT (MM_DBG_COMMIT_PAGED_POOL_PAGES, SizeInPages);
  1463. if (SessionSpace) {
  1464. InterlockedExchangeAddSizeT (&SessionSpace->CommittedPages,
  1465. SizeInPages);
  1466. MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_COMMIT_PAGEDPOOL_PAGES, (ULONG)SizeInPages);
  1467. BaseVa = (PVOID)((PCHAR)SessionSpace->PagedPoolStart +
  1468. ((ULONG_PTR)StartPosition << PAGE_SHIFT));
  1469. }
  1470. else {
  1471. MmPagedPoolCommit += (ULONG)SizeInPages;
  1472. BaseVa = (PVOID)((PUCHAR)MmPageAlignedPoolBase[PagedPool] +
  1473. ((ULONG_PTR)StartPosition << PAGE_SHIFT));
  1474. }
  1475. #if DBG
  1476. PointerPte = MiGetPteAddress (BaseVa);
  1477. for (i = 0; i < SizeInPages; i += 1) {
  1478. if (*(ULONG *)PointerPte != MM_KERNEL_NOACCESS_PTE) {
  1479. DbgPrint("MiAllocatePoolPages: PP not zero PTE (%p %p %p)\n",
  1480. BaseVa, PointerPte, *PointerPte);
  1481. DbgBreakPoint();
  1482. }
  1483. PointerPte += 1;
  1484. }
  1485. #endif
  1486. PointerPte = MiGetPteAddress (BaseVa);
  1487. MiFillMemoryPte (PointerPte,
  1488. SizeInPages * sizeof(MMPTE),
  1489. MM_KERNEL_DEMAND_ZERO_PTE);
  1490. PagedPoolInfo->PagedPoolCommit += SizeInPages;
  1491. EndPosition = StartPosition + (ULONG)SizeInPages - 1;
  1492. RtlSetBit (PagedPoolInfo->EndOfPagedPoolBitmap, EndPosition);
  1493. //
  1494. // Mark this as a large session allocation in the PFN database.
  1495. //
  1496. if (IsLargeSessionAllocation != 0) {
  1497. RtlSetBit (PagedPoolInfo->PagedPoolLargeSessionAllocationMap,
  1498. StartPosition);
  1499. MiSessionPoolAllocated (BaseVa,
  1500. SizeInPages << PAGE_SHIFT,
  1501. PagedPool);
  1502. }
  1503. else if (PoolType & POOL_VERIFIER_MASK) {
  1504. RtlSetBit (VerifierLargePagedPoolMap, StartPosition);
  1505. }
  1506. PagedPoolInfo->AllocatedPagedPool += SizeInPages;
  1507. return BaseVa;
  1508. }
  1509. ULONG
  1510. MiFreePoolPages (
  1511. IN PVOID StartingAddress
  1512. )
  1513. /*++
  1514. Routine Description:
  1515. This function returns a set of pages back to the pool from
  1516. which they were obtained. Once the pages have been deallocated
  1517. the region provided by the allocation becomes available for
  1518. allocation to other callers, i.e. any data in the region is now
  1519. trashed and cannot be referenced.
  1520. Arguments:
  1521. StartingAddress - Supplies the starting address which was returned
  1522. in a previous call to MiAllocatePoolPages.
  1523. Return Value:
  1524. Returns the number of pages deallocated.
  1525. Environment:
  1526. These functions are used by the general pool allocation routines
  1527. and should not be called directly.
  1528. Mutexes guarding the pool databases must be held when calling
  1529. these functions.
  1530. --*/
  1531. {
  1532. ULONG StartPosition;
  1533. ULONG Index;
  1534. PFN_NUMBER i;
  1535. PFN_NUMBER NumberOfPages;
  1536. POOL_TYPE PoolType;
  1537. PMMPTE PointerPte;
  1538. PMMPTE StartPte;
  1539. PMMPFN Pfn1;
  1540. PMMPFN StartPfn;
  1541. PMMFREE_POOL_ENTRY Entry;
  1542. PMMFREE_POOL_ENTRY NextEntry;
  1543. PMMFREE_POOL_ENTRY LastEntry;
  1544. PMM_PAGED_POOL_INFO PagedPoolInfo;
  1545. PMM_SESSION_SPACE SessionSpace;
  1546. LOGICAL SessionAllocation;
  1547. LOGICAL AddressIsPhysical;
  1548. MMPTE LocalNoAccessPte;
  1549. PFN_NUMBER PagesFreed;
  1550. MMPFNENTRY OriginalPfnFlags;
  1551. ULONG_PTR VerifierAllocation;
  1552. PULONG BitMap;
  1553. #if DBG
  1554. PMMPTE DebugPte;
  1555. PMMPFN DebugPfn;
  1556. PMMPFN LastDebugPfn;
  1557. #endif
  1558. //
  1559. // Determine Pool type base on the virtual address of the block
  1560. // to deallocate.
  1561. //
  1562. // This assumes NonPagedPool starts at a higher virtual address
  1563. // then PagedPool.
  1564. //
  1565. if ((StartingAddress >= MmPagedPoolStart) &&
  1566. (StartingAddress <= MmPagedPoolEnd)) {
  1567. PoolType = PagedPool;
  1568. SessionSpace = NULL;
  1569. PagedPoolInfo = &MmPagedPoolInfo;
  1570. StartPosition = (ULONG)(((PCHAR)StartingAddress -
  1571. (PCHAR)MmPageAlignedPoolBase[PoolType]) >> PAGE_SHIFT);
  1572. }
  1573. else if (MI_IS_SESSION_POOL_ADDRESS (StartingAddress) == TRUE) {
  1574. PoolType = PagedPool;
  1575. SessionSpace = MmSessionSpace;
  1576. ASSERT (SessionSpace);
  1577. PagedPoolInfo = &SessionSpace->PagedPoolInfo;
  1578. StartPosition = (ULONG)(((PCHAR)StartingAddress -
  1579. (PCHAR)SessionSpace->PagedPoolStart) >> PAGE_SHIFT);
  1580. }
  1581. else {
  1582. if (StartingAddress < MM_SYSTEM_RANGE_START) {
  1583. KeBugCheckEx (BAD_POOL_CALLER,
  1584. 0x40,
  1585. (ULONG_PTR)StartingAddress,
  1586. (ULONG_PTR)MM_SYSTEM_RANGE_START,
  1587. 0);
  1588. }
  1589. PoolType = NonPagedPool;
  1590. SessionSpace = NULL;
  1591. PagedPoolInfo = &MmPagedPoolInfo;
  1592. StartPosition = (ULONG)(((PCHAR)StartingAddress -
  1593. (PCHAR)MmPageAlignedPoolBase[PoolType]) >> PAGE_SHIFT);
  1594. }
  1595. //
  1596. // Check to ensure this page is really the start of an allocation.
  1597. //
  1598. if (PoolType == NonPagedPool) {
  1599. //
  1600. // The nonpaged pool being freed may be the target of a delayed unlock.
  1601. // Since these pages may be immediately released, force any pending
  1602. // delayed actions to occur now.
  1603. //
  1604. #if !defined(MI_MULTINODE)
  1605. if (MmPfnDeferredList != NULL) {
  1606. MiDeferredUnlockPages (0);
  1607. }
  1608. #else
  1609. //
  1610. // Each and every node's deferred list would have to be checked so
  1611. // we might as well go the long way and just call.
  1612. //
  1613. MiDeferredUnlockPages (0);
  1614. #endif
  1615. if (MI_IS_PHYSICAL_ADDRESS (StartingAddress)) {
  1616. //
  1617. // On certain architectures, virtual addresses
  1618. // may be physical and hence have no corresponding PTE.
  1619. //
  1620. Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (StartingAddress));
  1621. ASSERT (StartPosition < MmExpandedPoolBitPosition);
  1622. AddressIsPhysical = TRUE;
  1623. //
  1624. // Initializing PointerPte & StartPte is not needed for correctness
  1625. // but without it the compiler cannot compile this code
  1626. // W4 to check for use of uninitialized variables.
  1627. //
  1628. PointerPte = NULL;
  1629. StartPte = NULL;
  1630. if ((StartingAddress < MmNonPagedPoolStart) ||
  1631. (StartingAddress >= MmNonPagedPoolEnd0)) {
  1632. KeBugCheckEx (BAD_POOL_CALLER,
  1633. 0x42,
  1634. (ULONG_PTR)StartingAddress,
  1635. 0,
  1636. 0);
  1637. }
  1638. }
  1639. else {
  1640. PointerPte = MiGetPteAddress (StartingAddress);
  1641. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  1642. AddressIsPhysical = FALSE;
  1643. StartPte = PointerPte;
  1644. if (((StartingAddress >= MmNonPagedPoolExpansionStart) &&
  1645. (StartingAddress < MmNonPagedPoolEnd)) ||
  1646. ((StartingAddress >= MmNonPagedPoolStart) &&
  1647. (StartingAddress < MmNonPagedPoolEnd0))) {
  1648. NOTHING;
  1649. }
  1650. else {
  1651. KeBugCheckEx (BAD_POOL_CALLER,
  1652. 0x43,
  1653. (ULONG_PTR)StartingAddress,
  1654. 0,
  1655. 0);
  1656. }
  1657. }
  1658. if (Pfn1->u3.e1.StartOfAllocation == 0) {
  1659. KeBugCheckEx (BAD_POOL_CALLER,
  1660. 0x41,
  1661. (ULONG_PTR)StartingAddress,
  1662. (ULONG_PTR)(Pfn1 - MmPfnDatabase),
  1663. MmHighestPhysicalPage);
  1664. }
  1665. StartPfn = Pfn1;
  1666. ASSERT (Pfn1->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  1667. OriginalPfnFlags = Pfn1->u3.e1;
  1668. VerifierAllocation = Pfn1->u4.VerifierAllocation;
  1669. Pfn1->u3.e1.StartOfAllocation = 0;
  1670. Pfn1->u3.e1.LargeSessionAllocation = 0;
  1671. Pfn1->u4.VerifierAllocation = 0;
  1672. #if DBG
  1673. if ((Pfn1->u3.e2.ReferenceCount > 1) &&
  1674. (Pfn1->u3.e1.WriteInProgress == 0)) {
  1675. DbgPrint ("MM: MiFreePoolPages - deleting pool locked for I/O %p\n",
  1676. Pfn1);
  1677. ASSERT (Pfn1->u3.e2.ReferenceCount == 1);
  1678. }
  1679. #endif
  1680. //
  1681. // Find end of allocation and release the pages.
  1682. //
  1683. if (AddressIsPhysical == TRUE) {
  1684. while (Pfn1->u3.e1.EndOfAllocation == 0) {
  1685. Pfn1 += 1;
  1686. #if DBG
  1687. if ((Pfn1->u3.e2.ReferenceCount > 1) &&
  1688. (Pfn1->u3.e1.WriteInProgress == 0)) {
  1689. DbgPrint ("MM:MiFreePoolPages - deleting pool locked for I/O %p\n", Pfn1);
  1690. ASSERT (Pfn1->u3.e2.ReferenceCount == 1);
  1691. }
  1692. #endif
  1693. }
  1694. NumberOfPages = Pfn1 - StartPfn + 1;
  1695. }
  1696. else {
  1697. while (Pfn1->u3.e1.EndOfAllocation == 0) {
  1698. PointerPte += 1;
  1699. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  1700. #if DBG
  1701. if ((Pfn1->u3.e2.ReferenceCount > 1) &&
  1702. (Pfn1->u3.e1.WriteInProgress == 0)) {
  1703. DbgPrint ("MM:MiFreePoolPages - deleting pool locked for I/O %p\n", Pfn1);
  1704. ASSERT (Pfn1->u3.e2.ReferenceCount == 1);
  1705. }
  1706. #endif
  1707. }
  1708. NumberOfPages = PointerPte - StartPte + 1;
  1709. }
  1710. MmAllocatedNonPagedPool -= NumberOfPages;
  1711. if (VerifierAllocation != 0) {
  1712. VerifierFreeTrackedPool (StartingAddress,
  1713. NumberOfPages << PAGE_SHIFT,
  1714. NonPagedPool,
  1715. FALSE);
  1716. }
  1717. if (OriginalPfnFlags.LargeSessionAllocation != 0) {
  1718. MiSessionPoolFreed (StartingAddress,
  1719. NumberOfPages << PAGE_SHIFT,
  1720. NonPagedPool);
  1721. }
  1722. Pfn1->u3.e1.EndOfAllocation = 0;
  1723. #if DBG
  1724. if (MiFillFreedPool != 0) {
  1725. RtlFillMemoryUlong (StartingAddress,
  1726. PAGE_SIZE * NumberOfPages,
  1727. MiFillFreedPool);
  1728. }
  1729. #endif
  1730. if (StartingAddress > MmNonPagedPoolExpansionStart) {
  1731. //
  1732. // This page was from the expanded pool, should
  1733. // it be freed?
  1734. //
  1735. // NOTE: all pages in the expanded pool area have PTEs
  1736. // so no physical address checks need to be performed.
  1737. //
  1738. if ((NumberOfPages > 3) ||
  1739. (MmNumberOfFreeNonPagedPool > 5) ||
  1740. ((MmResidentAvailablePages < 200) &&
  1741. (MiExpansionPoolPagesInUse > MiExpansionPoolPagesInitialCharge))) {
  1742. //
  1743. // Free these pages back to the free page list.
  1744. //
  1745. MiFreeNonPagedPool (StartingAddress, NumberOfPages);
  1746. return (ULONG)NumberOfPages;
  1747. }
  1748. }
  1749. //
  1750. // Add the pages to the list of free pages.
  1751. //
  1752. MmNumberOfFreeNonPagedPool += NumberOfPages;
  1753. //
  1754. // Check to see if the next allocation is free.
  1755. // We cannot walk off the end of nonpaged expansion
  1756. // pages as the highest expansion allocation is always
  1757. // virtual and guard-paged.
  1758. //
  1759. i = NumberOfPages;
  1760. ASSERT (MiEndOfInitialPoolFrame != 0);
  1761. if ((PFN_NUMBER)(Pfn1 - MmPfnDatabase) == MiEndOfInitialPoolFrame) {
  1762. PointerPte += 1;
  1763. Pfn1 = NULL;
  1764. }
  1765. else if (AddressIsPhysical == TRUE) {
  1766. Pfn1 += 1;
  1767. ASSERT ((PCHAR)StartingAddress + NumberOfPages < (PCHAR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes);
  1768. }
  1769. else {
  1770. PointerPte += 1;
  1771. ASSERT ((PCHAR)StartingAddress + NumberOfPages <= (PCHAR)MmNonPagedPoolEnd);
  1772. //
  1773. // Unprotect the previously freed pool so it can be merged.
  1774. //
  1775. if (MmProtectFreedNonPagedPool == TRUE) {
  1776. MiUnProtectFreeNonPagedPool (
  1777. (PVOID)MiGetVirtualAddressMappedByPte(PointerPte),
  1778. 0);
  1779. }
  1780. if (PointerPte->u.Hard.Valid == 1) {
  1781. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  1782. }
  1783. else {
  1784. Pfn1 = NULL;
  1785. }
  1786. }
  1787. if ((Pfn1 != NULL) && (Pfn1->u3.e1.StartOfAllocation == 0)) {
  1788. //
  1789. // This range of pages is free. Remove this entry
  1790. // from the list and add these pages to the current
  1791. // range being freed.
  1792. //
  1793. Entry = (PMMFREE_POOL_ENTRY)((PCHAR)StartingAddress
  1794. + (NumberOfPages << PAGE_SHIFT));
  1795. ASSERT (Entry->Signature == MM_FREE_POOL_SIGNATURE);
  1796. ASSERT (Entry->Owner == Entry);
  1797. #if DBG
  1798. if (AddressIsPhysical == TRUE) {
  1799. ASSERT (MI_IS_PHYSICAL_ADDRESS(StartingAddress));
  1800. //
  1801. // On certain architectures, virtual addresses
  1802. // may be physical and hence have no corresponding PTE.
  1803. //
  1804. DebugPfn = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (Entry));
  1805. DebugPfn += Entry->Size;
  1806. if ((PFN_NUMBER)((DebugPfn - 1) - MmPfnDatabase) != MiEndOfInitialPoolFrame) {
  1807. ASSERT (DebugPfn->u3.e1.StartOfAllocation == 1);
  1808. }
  1809. }
  1810. else {
  1811. DebugPte = PointerPte + Entry->Size;
  1812. if ((DebugPte-1)->u.Hard.Valid == 1) {
  1813. DebugPfn = MI_PFN_ELEMENT ((DebugPte-1)->u.Hard.PageFrameNumber);
  1814. if ((PFN_NUMBER)(DebugPfn - MmPfnDatabase) != MiEndOfInitialPoolFrame) {
  1815. if (DebugPte->u.Hard.Valid == 1) {
  1816. DebugPfn = MI_PFN_ELEMENT (DebugPte->u.Hard.PageFrameNumber);
  1817. ASSERT (DebugPfn->u3.e1.StartOfAllocation == 1);
  1818. }
  1819. }
  1820. }
  1821. }
  1822. #endif
  1823. i += Entry->Size;
  1824. if (MmProtectFreedNonPagedPool == FALSE) {
  1825. RemoveEntryList (&Entry->List);
  1826. }
  1827. else {
  1828. MiProtectedPoolRemoveEntryList (&Entry->List);
  1829. }
  1830. }
  1831. //
  1832. // Check to see if the previous page is the end of an allocation.
  1833. // If it is not the end of an allocation, it must be free and
  1834. // therefore this allocation can be tagged onto the end of
  1835. // that allocation.
  1836. //
  1837. // We cannot walk off the beginning of expansion pool because it is
  1838. // guard-paged. If the initial pool is superpaged instead, we are also
  1839. // safe as the must succeed pages always have EndOfAllocation set.
  1840. //
  1841. Entry = (PMMFREE_POOL_ENTRY)StartingAddress;
  1842. ASSERT (MiStartOfInitialPoolFrame != 0);
  1843. if ((PFN_NUMBER)(StartPfn - MmPfnDatabase) == MiStartOfInitialPoolFrame) {
  1844. Pfn1 = NULL;
  1845. }
  1846. else if (AddressIsPhysical == TRUE) {
  1847. ASSERT (MI_IS_PHYSICAL_ADDRESS(StartingAddress));
  1848. ASSERT (StartingAddress != MmNonPagedPoolStart);
  1849. Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (
  1850. (PVOID)((PCHAR)Entry - PAGE_SIZE)));
  1851. }
  1852. else {
  1853. PointerPte -= NumberOfPages + 1;
  1854. //
  1855. // Unprotect the previously freed pool so it can be merged.
  1856. //
  1857. if (MmProtectFreedNonPagedPool == TRUE) {
  1858. MiUnProtectFreeNonPagedPool (
  1859. (PVOID)MiGetVirtualAddressMappedByPte(PointerPte),
  1860. 0);
  1861. }
  1862. if (PointerPte->u.Hard.Valid == 1) {
  1863. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  1864. }
  1865. else {
  1866. Pfn1 = NULL;
  1867. }
  1868. }
  1869. if (Pfn1 != NULL) {
  1870. if (Pfn1->u3.e1.EndOfAllocation == 0) {
  1871. //
  1872. // This range of pages is free, add these pages to
  1873. // this entry. The owner field points to the address
  1874. // of the list entry which is linked into the free pool
  1875. // pages list.
  1876. //
  1877. Entry = (PMMFREE_POOL_ENTRY)((PCHAR)StartingAddress - PAGE_SIZE);
  1878. ASSERT (Entry->Signature == MM_FREE_POOL_SIGNATURE);
  1879. Entry = Entry->Owner;
  1880. //
  1881. // Unprotect the previously freed pool so we can merge it
  1882. //
  1883. if (MmProtectFreedNonPagedPool == TRUE) {
  1884. MiUnProtectFreeNonPagedPool ((PVOID)Entry, 0);
  1885. }
  1886. //
  1887. // If this entry became larger than MM_SMALL_ALLOCATIONS
  1888. // pages, move it to the tail of the list. This keeps the
  1889. // small allocations at the front of the list.
  1890. //
  1891. if (Entry->Size < MI_MAX_FREE_LIST_HEADS - 1) {
  1892. if (MmProtectFreedNonPagedPool == FALSE) {
  1893. RemoveEntryList (&Entry->List);
  1894. }
  1895. else {
  1896. MiProtectedPoolRemoveEntryList (&Entry->List);
  1897. }
  1898. //
  1899. // Add these pages to the previous entry.
  1900. //
  1901. Entry->Size += i;
  1902. Index = (ULONG)(Entry->Size - 1);
  1903. if (Index >= MI_MAX_FREE_LIST_HEADS) {
  1904. Index = MI_MAX_FREE_LIST_HEADS - 1;
  1905. }
  1906. if (MmProtectFreedNonPagedPool == FALSE) {
  1907. InsertTailList (&MmNonPagedPoolFreeListHead[Index],
  1908. &Entry->List);
  1909. }
  1910. else {
  1911. MiProtectedPoolInsertList (&MmNonPagedPoolFreeListHead[Index],
  1912. &Entry->List,
  1913. Entry->Size < MM_SMALL_ALLOCATIONS ?
  1914. TRUE : FALSE);
  1915. }
  1916. }
  1917. else {
  1918. //
  1919. // Add these pages to the previous entry.
  1920. //
  1921. Entry->Size += i;
  1922. }
  1923. }
  1924. }
  1925. if (Entry == (PMMFREE_POOL_ENTRY)StartingAddress) {
  1926. //
  1927. // This entry was not combined with the previous, insert it
  1928. // into the list.
  1929. //
  1930. Entry->Size = i;
  1931. Index = (ULONG)(Entry->Size - 1);
  1932. if (Index >= MI_MAX_FREE_LIST_HEADS) {
  1933. Index = MI_MAX_FREE_LIST_HEADS - 1;
  1934. }
  1935. if (MmProtectFreedNonPagedPool == FALSE) {
  1936. InsertTailList (&MmNonPagedPoolFreeListHead[Index],
  1937. &Entry->List);
  1938. }
  1939. else {
  1940. MiProtectedPoolInsertList (&MmNonPagedPoolFreeListHead[Index],
  1941. &Entry->List,
  1942. Entry->Size < MM_SMALL_ALLOCATIONS ?
  1943. TRUE : FALSE);
  1944. }
  1945. }
  1946. //
  1947. // Set the owner field in all these pages.
  1948. //
  1949. ASSERT (i != 0);
  1950. NextEntry = (PMMFREE_POOL_ENTRY)StartingAddress;
  1951. LastEntry = (PMMFREE_POOL_ENTRY)((PCHAR)NextEntry + (i << PAGE_SHIFT));
  1952. do {
  1953. NextEntry->Owner = Entry;
  1954. #if DBG
  1955. NextEntry->Signature = MM_FREE_POOL_SIGNATURE;
  1956. #endif
  1957. NextEntry = (PMMFREE_POOL_ENTRY)((PCHAR)NextEntry + PAGE_SIZE);
  1958. } while (NextEntry != LastEntry);
  1959. #if DBG
  1960. NextEntry = Entry;
  1961. if (AddressIsPhysical == TRUE) {
  1962. ASSERT (MI_IS_PHYSICAL_ADDRESS(StartingAddress));
  1963. DebugPfn = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (NextEntry));
  1964. LastDebugPfn = DebugPfn + Entry->Size;
  1965. for ( ; DebugPfn < LastDebugPfn; DebugPfn += 1) {
  1966. ASSERT ((DebugPfn->u3.e1.StartOfAllocation == 0) &&
  1967. (DebugPfn->u3.e1.EndOfAllocation == 0));
  1968. ASSERT (NextEntry->Owner == Entry);
  1969. NextEntry = (PMMFREE_POOL_ENTRY)((PCHAR)NextEntry + PAGE_SIZE);
  1970. }
  1971. }
  1972. else {
  1973. for (i = 0; i < Entry->Size; i += 1) {
  1974. DebugPte = MiGetPteAddress (NextEntry);
  1975. DebugPfn = MI_PFN_ELEMENT (DebugPte->u.Hard.PageFrameNumber);
  1976. ASSERT ((DebugPfn->u3.e1.StartOfAllocation == 0) &&
  1977. (DebugPfn->u3.e1.EndOfAllocation == 0));
  1978. ASSERT (NextEntry->Owner == Entry);
  1979. NextEntry = (PMMFREE_POOL_ENTRY)((PCHAR)NextEntry + PAGE_SIZE);
  1980. }
  1981. }
  1982. #endif
  1983. //
  1984. // Prevent anyone from touching non paged pool after freeing it.
  1985. //
  1986. if (MmProtectFreedNonPagedPool == TRUE) {
  1987. MiProtectFreeNonPagedPool ((PVOID)Entry, (ULONG)Entry->Size);
  1988. }
  1989. return (ULONG)NumberOfPages;
  1990. }
  1991. //
  1992. // Paged pool. Need to verify start of allocation using
  1993. // end of allocation bitmap.
  1994. //
  1995. if (!RtlCheckBit (PagedPoolInfo->PagedPoolAllocationMap, StartPosition)) {
  1996. KeBugCheckEx (BAD_POOL_CALLER,
  1997. 0x50,
  1998. (ULONG_PTR)StartingAddress,
  1999. (ULONG_PTR)StartPosition,
  2000. MmSizeOfPagedPoolInBytes);
  2001. }
  2002. #if DBG
  2003. if (StartPosition > 0) {
  2004. if (RtlCheckBit (PagedPoolInfo->PagedPoolAllocationMap, StartPosition - 1)) {
  2005. if (!RtlCheckBit (PagedPoolInfo->EndOfPagedPoolBitmap, StartPosition - 1)) {
  2006. //
  2007. // In the middle of an allocation... bugcheck.
  2008. //
  2009. DbgPrint("paged pool in middle of allocation\n");
  2010. KeBugCheckEx (MEMORY_MANAGEMENT,
  2011. 0x41286,
  2012. (ULONG_PTR)PagedPoolInfo->PagedPoolAllocationMap,
  2013. (ULONG_PTR)PagedPoolInfo->EndOfPagedPoolBitmap,
  2014. StartPosition);
  2015. }
  2016. }
  2017. }
  2018. #endif
  2019. i = StartPosition;
  2020. PointerPte = PagedPoolInfo->FirstPteForPagedPool + i;
  2021. //
  2022. // Find the last allocated page and check to see if any
  2023. // of the pages being deallocated are in the paging file.
  2024. //
  2025. BitMap = PagedPoolInfo->EndOfPagedPoolBitmap->Buffer;
  2026. while (!MI_CHECK_BIT (BitMap, i)) {
  2027. i += 1;
  2028. }
  2029. NumberOfPages = i - StartPosition + 1;
  2030. if (SessionSpace) {
  2031. //
  2032. // This is needed purely to verify no one leaks pool. This
  2033. // could be removed if we believe everyone was good.
  2034. //
  2035. BitMap = PagedPoolInfo->PagedPoolLargeSessionAllocationMap->Buffer;
  2036. if (MI_CHECK_BIT (BitMap, StartPosition)) {
  2037. MI_CLEAR_BIT (BitMap, StartPosition);
  2038. MiSessionPoolFreed (MiGetVirtualAddressMappedByPte (PointerPte),
  2039. NumberOfPages << PAGE_SHIFT,
  2040. PagedPool);
  2041. }
  2042. SessionAllocation = TRUE;
  2043. }
  2044. else {
  2045. SessionAllocation = FALSE;
  2046. if (VerifierLargePagedPoolMap) {
  2047. BitMap = VerifierLargePagedPoolMap->Buffer;
  2048. if (MI_CHECK_BIT (BitMap, StartPosition)) {
  2049. MI_CLEAR_BIT (BitMap, StartPosition);
  2050. VerifierFreeTrackedPool (MiGetVirtualAddressMappedByPte (PointerPte),
  2051. NumberOfPages << PAGE_SHIFT,
  2052. PagedPool,
  2053. FALSE);
  2054. }
  2055. }
  2056. //
  2057. // If paged pool has been configured as nonpagable, only
  2058. // virtual address space is released.
  2059. //
  2060. if (MmDisablePagingExecutive & MM_PAGED_POOL_LOCKED_DOWN) {
  2061. //
  2062. // Clear the end of allocation bit in the bit map.
  2063. //
  2064. RtlClearBit (PagedPoolInfo->EndOfPagedPoolBitmap, (ULONG)i);
  2065. PagedPoolInfo->AllocatedPagedPool -= NumberOfPages;
  2066. //
  2067. // Clear the allocation bits in the bit map.
  2068. //
  2069. RtlClearBits (PagedPoolInfo->PagedPoolAllocationMap,
  2070. StartPosition,
  2071. (ULONG)NumberOfPages);
  2072. if (StartPosition < PagedPoolInfo->PagedPoolHint) {
  2073. PagedPoolInfo->PagedPoolHint = StartPosition;
  2074. }
  2075. return (ULONG)NumberOfPages;
  2076. }
  2077. }
  2078. LocalNoAccessPte.u.Long = MM_KERNEL_NOACCESS_PTE;
  2079. PagesFreed = MiDeleteSystemPagableVm (PointerPte,
  2080. NumberOfPages,
  2081. LocalNoAccessPte,
  2082. SessionAllocation,
  2083. NULL);
  2084. ASSERT (PagesFreed == NumberOfPages);
  2085. if (SessionSpace) {
  2086. InterlockedExchangeAddSizeT (&MmSessionSpace->CommittedPages,
  2087. 0 - NumberOfPages);
  2088. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_COMMIT_POOL_FREED,
  2089. (ULONG)NumberOfPages);
  2090. }
  2091. else {
  2092. MmPagedPoolCommit -= (ULONG)NumberOfPages;
  2093. }
  2094. MiReturnCommitment (NumberOfPages);
  2095. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_PAGED_POOL_PAGES, NumberOfPages);
  2096. //
  2097. // Clear the end of allocation bit in the bit map.
  2098. //
  2099. BitMap = PagedPoolInfo->EndOfPagedPoolBitmap->Buffer;
  2100. MI_CLEAR_BIT (BitMap, i);
  2101. PagedPoolInfo->PagedPoolCommit -= NumberOfPages;
  2102. PagedPoolInfo->AllocatedPagedPool -= NumberOfPages;
  2103. //
  2104. // Clear the allocation bits in the bit map.
  2105. //
  2106. RtlClearBits (PagedPoolInfo->PagedPoolAllocationMap,
  2107. StartPosition,
  2108. (ULONG)NumberOfPages);
  2109. if (StartPosition < PagedPoolInfo->PagedPoolHint) {
  2110. PagedPoolInfo->PagedPoolHint = StartPosition;
  2111. }
  2112. return (ULONG)NumberOfPages;
  2113. }
  2114. VOID
  2115. MiInitializeNonPagedPool (
  2116. VOID
  2117. )
  2118. /*++
  2119. Routine Description:
  2120. This function initializes the NonPaged pool.
  2121. NonPaged Pool is linked together through the pages.
  2122. Arguments:
  2123. None.
  2124. Return Value:
  2125. None.
  2126. Environment:
  2127. Kernel mode, during initialization.
  2128. --*/
  2129. {
  2130. ULONG PagesInPool;
  2131. ULONG Size;
  2132. ULONG Index;
  2133. PMMFREE_POOL_ENTRY FreeEntry;
  2134. PMMFREE_POOL_ENTRY FirstEntry;
  2135. PMMPTE PointerPte;
  2136. PVOID EndOfInitialPool;
  2137. PFN_NUMBER PageFrameIndex;
  2138. PAGED_CODE();
  2139. //
  2140. // Initialize the list heads for free pages.
  2141. //
  2142. for (Index = 0; Index < MI_MAX_FREE_LIST_HEADS; Index += 1) {
  2143. InitializeListHead (&MmNonPagedPoolFreeListHead[Index]);
  2144. }
  2145. //
  2146. // Set up the non paged pool pages.
  2147. //
  2148. FreeEntry = (PMMFREE_POOL_ENTRY) MmNonPagedPoolStart;
  2149. FirstEntry = FreeEntry;
  2150. PagesInPool = BYTES_TO_PAGES (MmSizeOfNonPagedPoolInBytes);
  2151. //
  2152. // Set the location of expanded pool.
  2153. //
  2154. MmExpandedPoolBitPosition = BYTES_TO_PAGES (MmSizeOfNonPagedPoolInBytes);
  2155. MmNumberOfFreeNonPagedPool = PagesInPool;
  2156. Index = (ULONG)(MmNumberOfFreeNonPagedPool - 1);
  2157. if (Index >= MI_MAX_FREE_LIST_HEADS) {
  2158. Index = MI_MAX_FREE_LIST_HEADS - 1;
  2159. }
  2160. InsertHeadList (&MmNonPagedPoolFreeListHead[Index], &FreeEntry->List);
  2161. FreeEntry->Size = PagesInPool;
  2162. #if DBG
  2163. FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
  2164. #endif
  2165. FreeEntry->Owner = FirstEntry;
  2166. while (PagesInPool > 1) {
  2167. FreeEntry = (PMMFREE_POOL_ENTRY)((PCHAR)FreeEntry + PAGE_SIZE);
  2168. #if DBG
  2169. FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
  2170. #endif
  2171. FreeEntry->Owner = FirstEntry;
  2172. PagesInPool -= 1;
  2173. }
  2174. //
  2175. // Initialize the first nonpaged pool PFN.
  2176. //
  2177. if (MI_IS_PHYSICAL_ADDRESS(MmNonPagedPoolStart)) {
  2178. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (MmNonPagedPoolStart);
  2179. }
  2180. else {
  2181. PointerPte = MiGetPteAddress(MmNonPagedPoolStart);
  2182. ASSERT (PointerPte->u.Hard.Valid == 1);
  2183. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  2184. }
  2185. MiStartOfInitialPoolFrame = PageFrameIndex;
  2186. //
  2187. // Set the last nonpaged pool PFN so coalescing on free doesn't go
  2188. // past the end of the initial pool.
  2189. //
  2190. MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes);
  2191. EndOfInitialPool = (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1);
  2192. if (MI_IS_PHYSICAL_ADDRESS(EndOfInitialPool)) {
  2193. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (EndOfInitialPool);
  2194. }
  2195. else {
  2196. PointerPte = MiGetPteAddress(EndOfInitialPool);
  2197. ASSERT (PointerPte->u.Hard.Valid == 1);
  2198. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  2199. }
  2200. MiEndOfInitialPoolFrame = PageFrameIndex;
  2201. //
  2202. // Set up the system PTEs for nonpaged pool expansion.
  2203. //
  2204. PointerPte = MiGetPteAddress (MmNonPagedPoolExpansionStart);
  2205. ASSERT (PointerPte->u.Hard.Valid == 0);
  2206. Size = BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes -
  2207. MmSizeOfNonPagedPoolInBytes);
  2208. //
  2209. // Insert a guard PTE at the bottom of expanded nonpaged pool.
  2210. //
  2211. Size -= 1;
  2212. PointerPte += 1;
  2213. ASSERT (MiExpansionPoolPagesInUse == 0);
  2214. //
  2215. // Initialize the nonpaged pool expansion resident available initial charge.
  2216. // Note that MmResidentAvailablePages & MmAvailablePages are not initialized
  2217. // yet, but this amount is subtracted when MmResidentAvailablePages is
  2218. // initialized later.
  2219. //
  2220. MiExpansionPoolPagesInitialCharge = Size;
  2221. if (Size > MmNumberOfPhysicalPages / 6) {
  2222. MiExpansionPoolPagesInitialCharge = MmNumberOfPhysicalPages / 6;
  2223. }
  2224. MiInitializeSystemPtes (PointerPte, Size, NonPagedPoolExpansion);
  2225. //
  2226. // A guard PTE is built at the top by our caller. This allows us to
  2227. // freely increment virtual addresses in MiFreePoolPages and just check
  2228. // for a blank PTE.
  2229. //
  2230. }
  2231. #if DBG || (i386 && !FPO)
  2232. //
  2233. // This only works on checked builds, because the TraceLargeAllocs array is
  2234. // kept in that case to keep track of page size pool allocations. Otherwise
  2235. // we will call ExpSnapShotPoolPages with a page size pool allocation containing
  2236. // arbitrary data and it will potentially go off in the weeds trying to
  2237. // interpret it as a suballocated pool page. Ideally, there would be another
  2238. // bit map that identified single page pool allocations so
  2239. // ExpSnapShotPoolPages would NOT be called for those.
  2240. //
  2241. NTSTATUS
  2242. MmSnapShotPool(
  2243. IN POOL_TYPE PoolType,
  2244. IN PMM_SNAPSHOT_POOL_PAGE SnapShotPoolPage,
  2245. IN PSYSTEM_POOL_INFORMATION PoolInformation,
  2246. IN ULONG Length,
  2247. IN OUT PULONG RequiredLength
  2248. )
  2249. {
  2250. ULONG Index;
  2251. NTSTATUS Status;
  2252. NTSTATUS xStatus;
  2253. PCHAR p, pStart;
  2254. ULONG Size;
  2255. ULONG BusyFlag;
  2256. ULONG CurrentPage, NumberOfPages;
  2257. PSYSTEM_POOL_ENTRY PoolEntryInfo;
  2258. PLIST_ENTRY Entry;
  2259. PMMFREE_POOL_ENTRY FreePageInfo;
  2260. ULONG StartPosition;
  2261. PMMPTE PointerPte;
  2262. PMMPFN Pfn1;
  2263. LOGICAL NeedsReprotect;
  2264. Status = STATUS_SUCCESS;
  2265. PoolEntryInfo = &PoolInformation->Entries[0];
  2266. if (PoolType == PagedPool) {
  2267. PoolInformation->TotalSize = (PCHAR)MmPagedPoolEnd -
  2268. (PCHAR)MmPagedPoolStart;
  2269. PoolInformation->FirstEntry = MmPagedPoolStart;
  2270. p = MmPagedPoolStart;
  2271. CurrentPage = 0;
  2272. while (p < (PCHAR)MmPagedPoolEnd) {
  2273. pStart = p;
  2274. BusyFlag = RtlCheckBit (MmPagedPoolInfo.PagedPoolAllocationMap, CurrentPage);
  2275. while (~(BusyFlag ^ RtlCheckBit (MmPagedPoolInfo.PagedPoolAllocationMap, CurrentPage))) {
  2276. p += PAGE_SIZE;
  2277. if (RtlCheckBit (MmPagedPoolInfo.EndOfPagedPoolBitmap, CurrentPage)) {
  2278. CurrentPage += 1;
  2279. break;
  2280. }
  2281. CurrentPage += 1;
  2282. if (p > (PCHAR)MmPagedPoolEnd) {
  2283. break;
  2284. }
  2285. }
  2286. Size = (ULONG)(p - pStart);
  2287. if (BusyFlag) {
  2288. xStatus = (*SnapShotPoolPage)(pStart,
  2289. Size,
  2290. PoolInformation,
  2291. &PoolEntryInfo,
  2292. Length,
  2293. RequiredLength
  2294. );
  2295. if (xStatus != STATUS_COMMITMENT_LIMIT) {
  2296. Status = xStatus;
  2297. }
  2298. }
  2299. else {
  2300. PoolInformation->NumberOfEntries += 1;
  2301. *RequiredLength += sizeof (SYSTEM_POOL_ENTRY);
  2302. if (Length < *RequiredLength) {
  2303. Status = STATUS_INFO_LENGTH_MISMATCH;
  2304. }
  2305. else {
  2306. PoolEntryInfo->Allocated = FALSE;
  2307. PoolEntryInfo->Size = Size;
  2308. PoolEntryInfo->AllocatorBackTraceIndex = 0;
  2309. PoolEntryInfo->TagUlong = 0;
  2310. PoolEntryInfo += 1;
  2311. Status = STATUS_SUCCESS;
  2312. }
  2313. }
  2314. }
  2315. }
  2316. else if (PoolType == NonPagedPool) {
  2317. PoolInformation->TotalSize = MmSizeOfNonPagedPoolInBytes;
  2318. PoolInformation->FirstEntry = MmNonPagedPoolStart;
  2319. p = MmNonPagedPoolStart;
  2320. while (p < (PCHAR)MmNonPagedPoolEnd) {
  2321. //
  2322. // NonPaged pool is linked together through the pages themselves.
  2323. //
  2324. NeedsReprotect = FALSE;
  2325. FreePageInfo = NULL;
  2326. for (Index = 0; Index < MI_MAX_FREE_LIST_HEADS; Index += 1) {
  2327. Entry = MmNonPagedPoolFreeListHead[Index].Flink;
  2328. while (Entry != &MmNonPagedPoolFreeListHead[Index]) {
  2329. if (MmProtectFreedNonPagedPool == TRUE) {
  2330. MiUnProtectFreeNonPagedPool ((PVOID)Entry, 0);
  2331. NeedsReprotect = TRUE;
  2332. }
  2333. FreePageInfo = CONTAINING_RECORD (Entry,
  2334. MMFREE_POOL_ENTRY,
  2335. List);
  2336. ASSERT (FreePageInfo->Signature == MM_FREE_POOL_SIGNATURE);
  2337. if (p == (PCHAR)FreePageInfo) {
  2338. Size = (ULONG)(FreePageInfo->Size << PAGE_SHIFT);
  2339. PoolInformation->NumberOfEntries += 1;
  2340. *RequiredLength += sizeof( SYSTEM_POOL_ENTRY );
  2341. if (Length < *RequiredLength) {
  2342. Status = STATUS_INFO_LENGTH_MISMATCH;
  2343. }
  2344. else {
  2345. PoolEntryInfo->Allocated = FALSE;
  2346. PoolEntryInfo->Size = Size;
  2347. PoolEntryInfo->AllocatorBackTraceIndex = 0;
  2348. PoolEntryInfo->TagUlong = 0;
  2349. PoolEntryInfo += 1;
  2350. Status = STATUS_SUCCESS;
  2351. }
  2352. p += Size;
  2353. Index = MI_MAX_FREE_LIST_HEADS;
  2354. break;
  2355. }
  2356. Entry = FreePageInfo->List.Flink;
  2357. if (NeedsReprotect == TRUE) {
  2358. MiProtectFreeNonPagedPool ((PVOID)FreePageInfo,
  2359. (ULONG)FreePageInfo->Size);
  2360. NeedsReprotect = FALSE;
  2361. }
  2362. }
  2363. }
  2364. StartPosition = BYTES_TO_PAGES((PCHAR)p -
  2365. (PCHAR)MmPageAlignedPoolBase[NonPagedPool]);
  2366. if (StartPosition >= MmExpandedPoolBitPosition) {
  2367. if (NeedsReprotect == TRUE) {
  2368. MiProtectFreeNonPagedPool ((PVOID)FreePageInfo,
  2369. (ULONG)FreePageInfo->Size);
  2370. }
  2371. break;
  2372. }
  2373. if (MI_IS_PHYSICAL_ADDRESS(p)) {
  2374. //
  2375. // On certain architectures, virtual addresses
  2376. // may be physical and hence have no corresponding PTE.
  2377. //
  2378. PointerPte = NULL;
  2379. Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (p));
  2380. }
  2381. else {
  2382. PointerPte = MiGetPteAddress (p);
  2383. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  2384. }
  2385. ASSERT (Pfn1->u3.e1.StartOfAllocation != 0);
  2386. //
  2387. // Find end of allocation and determine size.
  2388. //
  2389. NumberOfPages = 1;
  2390. while (Pfn1->u3.e1.EndOfAllocation == 0) {
  2391. NumberOfPages += 1;
  2392. if (PointerPte == NULL) {
  2393. Pfn1 += 1;
  2394. }
  2395. else {
  2396. PointerPte += 1;
  2397. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  2398. }
  2399. }
  2400. Size = NumberOfPages << PAGE_SHIFT;
  2401. xStatus = (*SnapShotPoolPage) (p,
  2402. Size,
  2403. PoolInformation,
  2404. &PoolEntryInfo,
  2405. Length,
  2406. RequiredLength);
  2407. if (NeedsReprotect == TRUE) {
  2408. MiProtectFreeNonPagedPool ((PVOID)FreePageInfo,
  2409. (ULONG)FreePageInfo->Size);
  2410. }
  2411. if (xStatus != STATUS_COMMITMENT_LIMIT) {
  2412. Status = xStatus;
  2413. }
  2414. p += Size;
  2415. }
  2416. }
  2417. else {
  2418. Status = STATUS_NOT_IMPLEMENTED;
  2419. }
  2420. return Status;
  2421. }
  2422. #endif // DBG || (i386 && !FPO)
  2423. VOID
  2424. MiCheckSessionPoolAllocations (
  2425. VOID
  2426. )
  2427. /*++
  2428. Routine Description:
  2429. Ensure that the current session has no pool allocations since it is about
  2430. to exit. All session allocations must be freed prior to session exit.
  2431. Arguments:
  2432. None.
  2433. Return Value:
  2434. None.
  2435. Environment:
  2436. Kernel mode.
  2437. --*/
  2438. {
  2439. ULONG i;
  2440. PMMPTE StartPde;
  2441. PMMPTE EndPde;
  2442. PMMPTE PointerPte;
  2443. PVOID VirtualAddress;
  2444. PAGED_CODE();
  2445. if (MmSessionSpace->NonPagedPoolBytes || MmSessionSpace->PagedPoolBytes) {
  2446. //
  2447. // All page tables for this session's paged pool must be freed by now.
  2448. // Being here means they aren't - this is fatal. Force in any valid
  2449. // pages so that a debugger can show who the guilty party is.
  2450. //
  2451. StartPde = MiGetPdeAddress (MmSessionSpace->PagedPoolStart);
  2452. EndPde = MiGetPdeAddress (MmSessionSpace->PagedPoolEnd);
  2453. while (StartPde <= EndPde) {
  2454. if (StartPde->u.Long != 0 && StartPde->u.Long != MM_KERNEL_NOACCESS_PTE) {
  2455. //
  2456. // Hunt through the page table page for valid pages and force
  2457. // them in. Note this also forces in the page table page if
  2458. // it is not already.
  2459. //
  2460. PointerPte = MiGetVirtualAddressMappedByPte (StartPde);
  2461. for (i = 0; i < PTE_PER_PAGE; i += 1) {
  2462. if (PointerPte->u.Long != 0 && PointerPte->u.Long != MM_KERNEL_NOACCESS_PTE) {
  2463. VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte);
  2464. *(volatile UCHAR *)VirtualAddress = *(volatile UCHAR *)VirtualAddress;
  2465. #if DBG
  2466. DbgPrint("MiCheckSessionPoolAllocations: Address %p still valid\n",
  2467. VirtualAddress);
  2468. #endif
  2469. }
  2470. PointerPte += 1;
  2471. }
  2472. }
  2473. StartPde += 1;
  2474. }
  2475. #if DBG
  2476. DbgPrint ("MiCheckSessionPoolAllocations: This exiting session (ID %d) is leaking pool !\n", MmSessionSpace->SessionId);
  2477. DbgPrint ("This means win32k.sys, rdpdd.sys, atmfd.sys or a video/font driver is broken\n");
  2478. DbgPrint ("%d nonpaged allocation leaks for %d bytes and %d paged allocation leaks for %d bytes\n",
  2479. MmSessionSpace->NonPagedPoolAllocations,
  2480. MmSessionSpace->NonPagedPoolBytes,
  2481. MmSessionSpace->PagedPoolAllocations,
  2482. MmSessionSpace->PagedPoolBytes);
  2483. #endif
  2484. KeBugCheckEx (SESSION_HAS_VALID_POOL_ON_EXIT,
  2485. (ULONG_PTR)MmSessionSpace->SessionId,
  2486. MmSessionSpace->PagedPoolBytes,
  2487. MmSessionSpace->NonPagedPoolBytes,
  2488. #if defined (_WIN64)
  2489. (MmSessionSpace->NonPagedPoolAllocations << 32) |
  2490. (MmSessionSpace->PagedPoolAllocations)
  2491. #else
  2492. (MmSessionSpace->NonPagedPoolAllocations << 16) |
  2493. (MmSessionSpace->PagedPoolAllocations)
  2494. #endif
  2495. );
  2496. }
  2497. ASSERT (MmSessionSpace->NonPagedPoolAllocations == 0);
  2498. ASSERT (MmSessionSpace->PagedPoolAllocations == 0);
  2499. }
  2500. NTSTATUS
  2501. MiInitializeAndChargePfn (
  2502. OUT PPFN_NUMBER PageFrameIndex,
  2503. IN PMMPTE PointerPde,
  2504. IN PFN_NUMBER ContainingPageFrame,
  2505. IN LOGICAL SessionAllocation
  2506. )
  2507. /*++
  2508. Routine Description:
  2509. Nonpaged wrapper to allocate, initialize and charge for a new page.
  2510. Arguments:
  2511. PageFrameIndex - Returns the page frame number which was initialized.
  2512. PointerPde - Supplies the pointer to the PDE to initialize.
  2513. ContainingPageFrame - Supplies the page frame number of the page
  2514. directory page which contains this PDE.
  2515. SessionAllocation - Supplies TRUE if this allocation is in session space,
  2516. FALSE otherwise.
  2517. Return Value:
  2518. Status of the page initialization.
  2519. --*/
  2520. {
  2521. MMPTE TempPte;
  2522. KIRQL OldIrql;
  2523. if (SessionAllocation == TRUE) {
  2524. TempPte = ValidKernelPdeLocal;
  2525. }
  2526. else {
  2527. TempPte = ValidKernelPde;
  2528. }
  2529. LOCK_PFN2 (OldIrql);
  2530. if ((MmAvailablePages <= MM_MEDIUM_LIMIT) || (MI_NONPAGABLE_MEMORY_AVAILABLE() <= 1)) {
  2531. UNLOCK_PFN2 (OldIrql);
  2532. return STATUS_NO_MEMORY;
  2533. }
  2534. MmResidentAvailablePages -= 1;
  2535. MiEnsureAvailablePageOrWait (NULL, NULL);
  2536. //
  2537. // Ensure no other thread handled this while this one waited. If one has,
  2538. // then return STATUS_RETRY so the caller knows to try again.
  2539. //
  2540. if (PointerPde->u.Hard.Valid == 1) {
  2541. MmResidentAvailablePages += 1;
  2542. UNLOCK_PFN2 (OldIrql);
  2543. return STATUS_RETRY;
  2544. }
  2545. //
  2546. // Allocate and map in the page at the requested address.
  2547. //
  2548. *PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (PointerPde));
  2549. TempPte.u.Hard.PageFrameNumber = *PageFrameIndex;
  2550. MI_WRITE_VALID_PTE (PointerPde, TempPte);
  2551. MiInitializePfnForOtherProcess (*PageFrameIndex,
  2552. PointerPde,
  2553. ContainingPageFrame);
  2554. //
  2555. // This page will be locked into working set and assigned an index when
  2556. // the working set is set up on return.
  2557. //
  2558. ASSERT (MI_PFN_ELEMENT(*PageFrameIndex)->u1.WsIndex == 0);
  2559. UNLOCK_PFN2 (OldIrql);
  2560. return STATUS_SUCCESS;
  2561. }
  2562. VOID
  2563. MiSessionPageTableRelease (
  2564. IN PFN_NUMBER PageFrameIndex
  2565. )
  2566. /*++
  2567. Routine Description:
  2568. Nonpaged wrapper to release a session pool page table page.
  2569. Arguments:
  2570. PageFrameIndex - Returns the page frame number which was initialized.
  2571. Return Value:
  2572. None.
  2573. --*/
  2574. {
  2575. KIRQL OldIrql;
  2576. PMMPFN Pfn1;
  2577. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  2578. LOCK_PFN (OldIrql);
  2579. ASSERT (MmSessionSpace->SessionPageDirectoryIndex == Pfn1->u4.PteFrame);
  2580. ASSERT (Pfn1->u2.ShareCount == 1);
  2581. MiDecrementShareAndValidCount (Pfn1->u4.PteFrame);
  2582. MI_SET_PFN_DELETED (Pfn1);
  2583. MiDecrementShareCountOnly (PageFrameIndex);
  2584. MmResidentAvailablePages += 1;
  2585. MM_BUMP_COUNTER(51, 1);
  2586. UNLOCK_PFN (OldIrql);
  2587. }
  2588. NTSTATUS
  2589. MiInitializeSessionPool (
  2590. VOID
  2591. )
  2592. /*++
  2593. Routine Description:
  2594. Initialize the current session's pool structure.
  2595. Arguments:
  2596. None.
  2597. Return Value:
  2598. Status of the pool initialization.
  2599. Environment:
  2600. Kernel mode.
  2601. --*/
  2602. {
  2603. PMMPTE PointerPde, PointerPte;
  2604. PFN_NUMBER PageFrameIndex;
  2605. PPOOL_DESCRIPTOR PoolDescriptor;
  2606. PMM_SESSION_SPACE SessionGlobal;
  2607. PMM_PAGED_POOL_INFO PagedPoolInfo;
  2608. MMPTE PreviousPte;
  2609. NTSTATUS Status;
  2610. #if (_MI_PAGING_LEVELS < 3)
  2611. ULONG Index;
  2612. #endif
  2613. #if DBG
  2614. PMMPTE StartPde;
  2615. PMMPTE EndPde;
  2616. #endif
  2617. PAGED_CODE ();
  2618. SessionGlobal = SESSION_GLOBAL(MmSessionSpace);
  2619. ExInitializeFastMutex (&SessionGlobal->PagedPoolMutex);
  2620. PoolDescriptor = &MmSessionSpace->PagedPool;
  2621. ExInitializePoolDescriptor (PoolDescriptor,
  2622. PagedPoolSession,
  2623. 0,
  2624. 0,
  2625. &SessionGlobal->PagedPoolMutex);
  2626. MmSessionSpace->PagedPoolStart = (PVOID)MiSessionPoolStart;
  2627. MmSessionSpace->PagedPoolEnd = (PVOID)(MiSessionPoolEnd -1);
  2628. PagedPoolInfo = &MmSessionSpace->PagedPoolInfo;
  2629. PagedPoolInfo->PagedPoolCommit = 0;
  2630. PagedPoolInfo->PagedPoolHint = 0;
  2631. PagedPoolInfo->AllocatedPagedPool = 0;
  2632. //
  2633. // Build the page table page for paged pool.
  2634. //
  2635. PointerPde = MiGetPdeAddress (MmSessionSpace->PagedPoolStart);
  2636. MmSessionSpace->PagedPoolBasePde = PointerPde;
  2637. PointerPte = MiGetPteAddress (MmSessionSpace->PagedPoolStart);
  2638. PagedPoolInfo->FirstPteForPagedPool = PointerPte;
  2639. PagedPoolInfo->LastPteForPagedPool = MiGetPteAddress (MmSessionSpace->PagedPoolEnd);
  2640. #if DBG
  2641. //
  2642. // Session pool better be unused.
  2643. //
  2644. StartPde = MiGetPdeAddress (MmSessionSpace->PagedPoolStart);
  2645. EndPde = MiGetPdeAddress (MmSessionSpace->PagedPoolEnd);
  2646. while (StartPde <= EndPde) {
  2647. ASSERT (StartPde->u.Long == 0);
  2648. StartPde += 1;
  2649. }
  2650. #endif
  2651. //
  2652. // Mark all PDEs as empty.
  2653. //
  2654. MiFillMemoryPte (PointerPde,
  2655. sizeof(MMPTE) *
  2656. (1 + MiGetPdeAddress (MmSessionSpace->PagedPoolEnd) - PointerPde),
  2657. ZeroKernelPte.u.Long);
  2658. if (MiChargeCommitment (1, NULL) == FALSE) {
  2659. MM_BUMP_SESSION_FAILURES (MM_SESSION_FAILURE_NO_COMMIT);
  2660. return STATUS_NO_MEMORY;
  2661. }
  2662. Status = MiInitializeAndChargePfn (&PageFrameIndex,
  2663. PointerPde,
  2664. MmSessionSpace->SessionPageDirectoryIndex,
  2665. TRUE);
  2666. if (!NT_SUCCESS(Status)) {
  2667. MiReturnCommitment (1);
  2668. MM_BUMP_SESSION_FAILURES (MM_SESSION_FAILURE_NO_RESIDENT);
  2669. return Status;
  2670. }
  2671. MM_TRACK_COMMIT (MM_DBG_COMMIT_SESSION_POOL_PAGE_TABLES, 1);
  2672. MM_BUMP_COUNTER(42, 1);
  2673. MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_PAGEDPOOL_PAGETABLE_ALLOC, 1);
  2674. KeFillEntryTb ((PHARDWARE_PTE) PointerPde, PointerPte, FALSE);
  2675. #if (_MI_PAGING_LEVELS < 3)
  2676. Index = MiGetPdeSessionIndex (MmSessionSpace->PagedPoolStart);
  2677. ASSERT (MmSessionSpace->PageTables[Index].u.Long == 0);
  2678. MmSessionSpace->PageTables[Index] = *PointerPde;
  2679. #endif
  2680. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_NP_POOL_CREATE, 1);
  2681. MmSessionSpace->NonPagablePages += 1;
  2682. InterlockedExchangeAddSizeT (&MmSessionSpace->CommittedPages, 1);
  2683. MiFillMemoryPte (PointerPte, PAGE_SIZE, MM_KERNEL_NOACCESS_PTE);
  2684. PagedPoolInfo->NextPdeForPagedPoolExpansion = PointerPde + 1;
  2685. //
  2686. // Initialize the bitmaps.
  2687. //
  2688. MiCreateBitMap (&PagedPoolInfo->PagedPoolAllocationMap,
  2689. MmSessionPoolSize >> PAGE_SHIFT,
  2690. NonPagedPool);
  2691. if (PagedPoolInfo->PagedPoolAllocationMap == NULL) {
  2692. MM_BUMP_SESSION_FAILURES (MM_SESSION_FAILURE_NO_NONPAGED_POOL);
  2693. goto Failure;
  2694. }
  2695. //
  2696. // We start with all pages in the virtual address space as "busy", and
  2697. // clear bits to make pages available as we dynamically expand the pool.
  2698. //
  2699. RtlSetAllBits( PagedPoolInfo->PagedPoolAllocationMap );
  2700. //
  2701. // Indicate first page worth of PTEs are available.
  2702. //
  2703. RtlClearBits (PagedPoolInfo->PagedPoolAllocationMap, 0, PTE_PER_PAGE);
  2704. //
  2705. // Create the end of allocation range bitmap.
  2706. //
  2707. MiCreateBitMap (&PagedPoolInfo->EndOfPagedPoolBitmap,
  2708. MmSessionPoolSize >> PAGE_SHIFT,
  2709. NonPagedPool);
  2710. if (PagedPoolInfo->EndOfPagedPoolBitmap == NULL) {
  2711. MM_BUMP_SESSION_FAILURES (MM_SESSION_FAILURE_NO_NONPAGED_POOL);
  2712. goto Failure;
  2713. }
  2714. RtlClearAllBits (PagedPoolInfo->EndOfPagedPoolBitmap);
  2715. //
  2716. // Create the large session allocation bitmap.
  2717. //
  2718. MiCreateBitMap (&PagedPoolInfo->PagedPoolLargeSessionAllocationMap,
  2719. MmSessionPoolSize >> PAGE_SHIFT,
  2720. NonPagedPool);
  2721. if (PagedPoolInfo->PagedPoolLargeSessionAllocationMap == NULL) {
  2722. MM_BUMP_SESSION_FAILURES (MM_SESSION_FAILURE_NO_NONPAGED_POOL);
  2723. goto Failure;
  2724. }
  2725. RtlClearAllBits (PagedPoolInfo->PagedPoolLargeSessionAllocationMap);
  2726. return STATUS_SUCCESS;
  2727. Failure:
  2728. MiFreeSessionPoolBitMaps ();
  2729. MiSessionPageTableRelease (PageFrameIndex);
  2730. MI_FLUSH_SINGLE_SESSION_TB (MiGetPteAddress(PointerPde),
  2731. TRUE,
  2732. TRUE,
  2733. (PHARDWARE_PTE)PointerPde,
  2734. ZeroKernelPte.u.Flush,
  2735. PreviousPte);
  2736. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_NP_POOL_CREATE_FAILED, 1);
  2737. MmSessionSpace->NonPagablePages -= 1;
  2738. InterlockedExchangeAddSizeT (&MmSessionSpace->CommittedPages, -1);
  2739. MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_PAGEDPOOL_PAGETABLE_FREE_FAIL1, 1);
  2740. MiReturnCommitment (1);
  2741. MM_TRACK_COMMIT_REDUCTION (MM_DBG_COMMIT_SESSION_POOL_PAGE_TABLES, 1);
  2742. return STATUS_NO_MEMORY;
  2743. }
  2744. VOID
  2745. MiFreeSessionPoolBitMaps (
  2746. VOID
  2747. )
  2748. /*++
  2749. Routine Description:
  2750. Free the current session's pool bitmap structures.
  2751. Arguments:
  2752. None.
  2753. Return Value:
  2754. None.
  2755. Environment:
  2756. Kernel mode.
  2757. --*/
  2758. {
  2759. PAGED_CODE();
  2760. if (MmSessionSpace->PagedPoolInfo.PagedPoolAllocationMap ) {
  2761. ExFreePool (MmSessionSpace->PagedPoolInfo.PagedPoolAllocationMap);
  2762. MmSessionSpace->PagedPoolInfo.PagedPoolAllocationMap = NULL;
  2763. }
  2764. if (MmSessionSpace->PagedPoolInfo.EndOfPagedPoolBitmap ) {
  2765. ExFreePool (MmSessionSpace->PagedPoolInfo.EndOfPagedPoolBitmap);
  2766. MmSessionSpace->PagedPoolInfo.EndOfPagedPoolBitmap = NULL;
  2767. }
  2768. if (MmSessionSpace->PagedPoolInfo.PagedPoolLargeSessionAllocationMap) {
  2769. ExFreePool (MmSessionSpace->PagedPoolInfo.PagedPoolLargeSessionAllocationMap);
  2770. MmSessionSpace->PagedPoolInfo.PagedPoolLargeSessionAllocationMap = NULL;
  2771. }
  2772. }
  2773. #if DBG
  2774. #define MI_LOG_CONTIGUOUS 100
  2775. typedef struct _MI_CONTIGUOUS_ALLOCATORS {
  2776. PVOID BaseAddress;
  2777. SIZE_T NumberOfBytes;
  2778. PVOID CallingAddress;
  2779. } MI_CONTIGUOUS_ALLOCATORS, *PMI_CONTIGUOUS_ALLOCATORS;
  2780. ULONG MiContiguousIndex;
  2781. MI_CONTIGUOUS_ALLOCATORS MiContiguousAllocators[MI_LOG_CONTIGUOUS];
  2782. VOID
  2783. MiInsertContiguousTag (
  2784. IN PVOID BaseAddress,
  2785. IN SIZE_T NumberOfBytes,
  2786. IN PVOID CallingAddress
  2787. )
  2788. {
  2789. KIRQL OldIrql;
  2790. #if !DBG
  2791. if ((NtGlobalFlag & FLG_POOL_ENABLE_TAGGING) == 0) {
  2792. return;
  2793. }
  2794. #endif
  2795. OldIrql = ExLockPool (NonPagedPool);
  2796. if (MiContiguousIndex >= MI_LOG_CONTIGUOUS) {
  2797. MiContiguousIndex = 0;
  2798. }
  2799. MiContiguousAllocators[MiContiguousIndex].BaseAddress = BaseAddress;
  2800. MiContiguousAllocators[MiContiguousIndex].NumberOfBytes = NumberOfBytes;
  2801. MiContiguousAllocators[MiContiguousIndex].CallingAddress = CallingAddress;
  2802. MiContiguousIndex += 1;
  2803. ExUnlockPool (NonPagedPool, OldIrql);
  2804. }
  2805. #else
  2806. #define MiInsertContiguousTag(a, b, c) (c) = (c)
  2807. #endif
  2808. PVOID
  2809. MiFindContiguousMemoryInPool (
  2810. IN PFN_NUMBER LowestPfn,
  2811. IN PFN_NUMBER HighestPfn,
  2812. IN PFN_NUMBER BoundaryPfn,
  2813. IN PFN_NUMBER SizeInPages,
  2814. IN PVOID CallingAddress
  2815. )
  2816. /*++
  2817. Routine Description:
  2818. This function searches nonpaged pool for contiguous pages to satisfy the
  2819. request. Note the pool address returned maps these pages as MmCached.
  2820. Arguments:
  2821. LowestPfn - Supplies the lowest acceptable physical page number.
  2822. HighestPfn - Supplies the highest acceptable physical page number.
  2823. BoundaryPfn - Supplies the page frame number multiple the allocation must
  2824. not cross. 0 indicates it can cross any boundary.
  2825. SizeInPages - Supplies the number of pages to allocate.
  2826. CallingAddress - Supplies the calling address of the allocator.
  2827. Return Value:
  2828. NULL - a contiguous range could not be found to satisfy the request.
  2829. NON-NULL - Returns a pointer (virtual address in the nonpaged portion
  2830. of the system) to the allocated physically contiguous
  2831. memory.
  2832. Environment:
  2833. Kernel mode, IRQL of APC_LEVEL or below.
  2834. --*/
  2835. {
  2836. PMMPTE PointerPte;
  2837. PMMPFN Pfn1;
  2838. PVOID BaseAddress;
  2839. PVOID BaseAddress2;
  2840. KIRQL OldIrql;
  2841. PMMFREE_POOL_ENTRY FreePageInfo;
  2842. PLIST_ENTRY Entry;
  2843. ULONG Index;
  2844. PFN_NUMBER BoundaryMask;
  2845. ULONG AllocationPosition;
  2846. PVOID Va;
  2847. LOGICAL AddressIsPhysical;
  2848. PFN_NUMBER SpanInPages;
  2849. PFN_NUMBER SpanInPages2;
  2850. PAGED_CODE ();
  2851. //
  2852. // Initializing SpanInPages* is not needed for correctness
  2853. // but without it the compiler cannot compile this code
  2854. // W4 to check for use of uninitialized variables.
  2855. //
  2856. SpanInPages = 0;
  2857. SpanInPages2 = 0;
  2858. BaseAddress = NULL;
  2859. BoundaryMask = ~(BoundaryPfn - 1);
  2860. //
  2861. // A suitable pool page was not allocated via the pool allocator.
  2862. // Grab the pool lock and manually search for a page which meets
  2863. // the requirements.
  2864. //
  2865. MmLockPagableSectionByHandle (ExPageLockHandle);
  2866. OldIrql = ExLockPool (NonPagedPool);
  2867. //
  2868. // Trace through the page allocator's pool headers for a page which
  2869. // meets the requirements.
  2870. //
  2871. // NonPaged pool is linked together through the pages themselves.
  2872. //
  2873. Index = (ULONG)(SizeInPages - 1);
  2874. if (Index >= MI_MAX_FREE_LIST_HEADS) {
  2875. Index = MI_MAX_FREE_LIST_HEADS - 1;
  2876. }
  2877. while (Index < MI_MAX_FREE_LIST_HEADS) {
  2878. Entry = MmNonPagedPoolFreeListHead[Index].Flink;
  2879. while (Entry != &MmNonPagedPoolFreeListHead[Index]) {
  2880. if (MmProtectFreedNonPagedPool == TRUE) {
  2881. MiUnProtectFreeNonPagedPool ((PVOID)Entry, 0);
  2882. }
  2883. //
  2884. // The list is not empty, see if this one meets the physical
  2885. // requirements.
  2886. //
  2887. FreePageInfo = CONTAINING_RECORD(Entry,
  2888. MMFREE_POOL_ENTRY,
  2889. List);
  2890. ASSERT (FreePageInfo->Signature == MM_FREE_POOL_SIGNATURE);
  2891. if (FreePageInfo->Size >= SizeInPages) {
  2892. //
  2893. // This entry has sufficient space, check to see if the
  2894. // pages meet the physical requirements.
  2895. //
  2896. Va = MiCheckForContiguousMemory (PAGE_ALIGN(Entry),
  2897. FreePageInfo->Size,
  2898. SizeInPages,
  2899. LowestPfn,
  2900. HighestPfn,
  2901. BoundaryPfn,
  2902. MiCached);
  2903. if (Va != NULL) {
  2904. //
  2905. // These pages meet the requirements. The returned
  2906. // address may butt up on the end, the front or be
  2907. // somewhere in the middle. Split the Entry based
  2908. // on which case it is.
  2909. //
  2910. Entry = PAGE_ALIGN(Entry);
  2911. if (MmProtectFreedNonPagedPool == FALSE) {
  2912. RemoveEntryList (&FreePageInfo->List);
  2913. }
  2914. else {
  2915. MiProtectedPoolRemoveEntryList (&FreePageInfo->List);
  2916. }
  2917. //
  2918. // Adjust the number of free pages remaining in the pool.
  2919. // The TotalBigPages calculation appears incorrect for the
  2920. // case where we're splitting a block, but it's done this
  2921. // way because ExFreePool corrects it when we free the
  2922. // fragment block below. Likewise for
  2923. // MmAllocatedNonPagedPool and MmNumberOfFreeNonPagedPool
  2924. // which is corrected by MiFreePoolPages for the fragment.
  2925. //
  2926. NonPagedPoolDescriptor.TotalBigPages += (ULONG)FreePageInfo->Size;
  2927. MmAllocatedNonPagedPool += FreePageInfo->Size;
  2928. MmNumberOfFreeNonPagedPool -= FreePageInfo->Size;
  2929. ASSERT ((LONG)MmNumberOfFreeNonPagedPool >= 0);
  2930. if (Va == Entry) {
  2931. //
  2932. // Butted against the front.
  2933. //
  2934. AllocationPosition = 0;
  2935. }
  2936. else if (((PCHAR)Va + (SizeInPages << PAGE_SHIFT)) == ((PCHAR)Entry + (FreePageInfo->Size << PAGE_SHIFT))) {
  2937. //
  2938. // Butted against the end.
  2939. //
  2940. AllocationPosition = 2;
  2941. }
  2942. else {
  2943. //
  2944. // Somewhere in the middle.
  2945. //
  2946. AllocationPosition = 1;
  2947. }
  2948. //
  2949. // Pages are being removed from the front of
  2950. // the list entry and the whole list entry
  2951. // will be removed and then the remainder inserted.
  2952. //
  2953. //
  2954. // Mark start and end for the block at the top of the
  2955. // list.
  2956. //
  2957. if (MI_IS_PHYSICAL_ADDRESS(Va)) {
  2958. //
  2959. // On certain architectures, virtual addresses
  2960. // may be physical and hence have no corresponding PTE.
  2961. //
  2962. AddressIsPhysical = TRUE;
  2963. Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (Va));
  2964. //
  2965. // Initializing PointerPte is not needed for correctness
  2966. // but without it the compiler cannot compile this code
  2967. // W4 to check for use of uninitialized variables.
  2968. //
  2969. PointerPte = NULL;
  2970. }
  2971. else {
  2972. AddressIsPhysical = FALSE;
  2973. PointerPte = MiGetPteAddress(Va);
  2974. ASSERT (PointerPte->u.Hard.Valid == 1);
  2975. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  2976. }
  2977. ASSERT (Pfn1->u4.VerifierAllocation == 0);
  2978. ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0);
  2979. ASSERT (Pfn1->u3.e1.StartOfAllocation == 0);
  2980. Pfn1->u3.e1.StartOfAllocation = 1;
  2981. //
  2982. // Calculate the ending PFN address, note that since
  2983. // these pages are contiguous, just add to the PFN.
  2984. //
  2985. Pfn1 += SizeInPages - 1;
  2986. ASSERT (Pfn1->u4.VerifierAllocation == 0);
  2987. ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0);
  2988. ASSERT (Pfn1->u3.e1.EndOfAllocation == 0);
  2989. Pfn1->u3.e1.EndOfAllocation = 1;
  2990. if (SizeInPages == FreePageInfo->Size) {
  2991. //
  2992. // Unlock the pool and return.
  2993. //
  2994. BaseAddress = (PVOID)Va;
  2995. ExUnlockPool (NonPagedPool, OldIrql);
  2996. goto Done;
  2997. }
  2998. BaseAddress = NULL;
  2999. if (AllocationPosition != 2) {
  3000. //
  3001. // The end piece needs to be freed as the removal
  3002. // came from the front or the middle.
  3003. //
  3004. BaseAddress = (PVOID)((PCHAR)Va + (SizeInPages << PAGE_SHIFT));
  3005. SpanInPages = FreePageInfo->Size - SizeInPages -
  3006. (((ULONG_PTR)Va - (ULONG_PTR)Entry) >> PAGE_SHIFT);
  3007. //
  3008. // Mark start and end of the allocation in the PFN database.
  3009. //
  3010. if (AddressIsPhysical == TRUE) {
  3011. //
  3012. // On certain architectures, virtual addresses
  3013. // may be physical and hence have no corresponding PTE.
  3014. //
  3015. Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (BaseAddress));
  3016. }
  3017. else {
  3018. PointerPte = MiGetPteAddress(BaseAddress);
  3019. ASSERT (PointerPte->u.Hard.Valid == 1);
  3020. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  3021. }
  3022. ASSERT (Pfn1->u4.VerifierAllocation == 0);
  3023. ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0);
  3024. ASSERT (Pfn1->u3.e1.StartOfAllocation == 0);
  3025. Pfn1->u3.e1.StartOfAllocation = 1;
  3026. //
  3027. // Calculate the ending PTE's address, can't depend on
  3028. // these pages being physically contiguous.
  3029. //
  3030. if (AddressIsPhysical == TRUE) {
  3031. Pfn1 += (SpanInPages - 1);
  3032. }
  3033. else {
  3034. PointerPte += (SpanInPages - 1);
  3035. ASSERT (PointerPte->u.Hard.Valid == 1);
  3036. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  3037. }
  3038. ASSERT (Pfn1->u3.e1.EndOfAllocation == 0);
  3039. Pfn1->u3.e1.EndOfAllocation = 1;
  3040. ASSERT (((ULONG_PTR)BaseAddress & (PAGE_SIZE -1)) == 0);
  3041. SpanInPages2 = SpanInPages;
  3042. }
  3043. BaseAddress2 = BaseAddress;
  3044. BaseAddress = NULL;
  3045. if (AllocationPosition != 0) {
  3046. //
  3047. // The front piece needs to be freed as the removal
  3048. // came from the middle or the end.
  3049. //
  3050. BaseAddress = (PVOID)Entry;
  3051. SpanInPages = ((ULONG_PTR)Va - (ULONG_PTR)Entry) >> PAGE_SHIFT;
  3052. //
  3053. // Mark start and end of the allocation in the PFN database.
  3054. //
  3055. if (AddressIsPhysical == TRUE) {
  3056. //
  3057. // On certain architectures, virtual addresses
  3058. // may be physical and hence have no corresponding PTE.
  3059. //
  3060. Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (BaseAddress));
  3061. }
  3062. else {
  3063. PointerPte = MiGetPteAddress(BaseAddress);
  3064. ASSERT (PointerPte->u.Hard.Valid == 1);
  3065. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  3066. }
  3067. ASSERT (Pfn1->u4.VerifierAllocation == 0);
  3068. ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0);
  3069. ASSERT (Pfn1->u3.e1.StartOfAllocation == 0);
  3070. Pfn1->u3.e1.StartOfAllocation = 1;
  3071. //
  3072. // Calculate the ending PTE's address, can't depend on
  3073. // these pages being physically contiguous.
  3074. //
  3075. if (AddressIsPhysical == TRUE) {
  3076. Pfn1 += (SpanInPages - 1);
  3077. }
  3078. else {
  3079. PointerPte += (SpanInPages - 1);
  3080. ASSERT (PointerPte->u.Hard.Valid == 1);
  3081. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  3082. }
  3083. ASSERT (Pfn1->u3.e1.EndOfAllocation == 0);
  3084. Pfn1->u3.e1.EndOfAllocation = 1;
  3085. ASSERT (((ULONG_PTR)BaseAddress & (PAGE_SIZE -1)) == 0);
  3086. }
  3087. //
  3088. // Unlock the pool.
  3089. //
  3090. ExUnlockPool (NonPagedPool, OldIrql);
  3091. //
  3092. // Free the split entry at BaseAddress back into the pool.
  3093. // Note that we have overcharged the pool - the entire free
  3094. // chunk has been billed. Here we return the piece we
  3095. // didn't use and correct the momentary overbilling.
  3096. //
  3097. // The start and end allocation bits of this split entry
  3098. // which we just set up enable ExFreePool and his callees
  3099. // to correctly adjust the billing.
  3100. //
  3101. if (BaseAddress) {
  3102. ExInsertPoolTag ('tnoC',
  3103. BaseAddress,
  3104. SpanInPages << PAGE_SHIFT,
  3105. NonPagedPool);
  3106. ExFreePool (BaseAddress);
  3107. }
  3108. if (BaseAddress2) {
  3109. ExInsertPoolTag ('tnoC',
  3110. BaseAddress2,
  3111. SpanInPages2 << PAGE_SHIFT,
  3112. NonPagedPool);
  3113. ExFreePool (BaseAddress2);
  3114. }
  3115. BaseAddress = Va;
  3116. goto Done;
  3117. }
  3118. }
  3119. Entry = FreePageInfo->List.Flink;
  3120. if (MmProtectFreedNonPagedPool == TRUE) {
  3121. MiProtectFreeNonPagedPool ((PVOID)FreePageInfo,
  3122. (ULONG)FreePageInfo->Size);
  3123. }
  3124. }
  3125. Index += 1;
  3126. }
  3127. //
  3128. // No entry was found in free nonpaged pool that meets the requirements.
  3129. //
  3130. ExUnlockPool (NonPagedPool, OldIrql);
  3131. Done:
  3132. MmUnlockPagableImageSection (ExPageLockHandle);
  3133. if (BaseAddress) {
  3134. MiInsertContiguousTag (BaseAddress,
  3135. SizeInPages << PAGE_SHIFT,
  3136. CallingAddress);
  3137. ExInsertPoolTag ('tnoC',
  3138. BaseAddress,
  3139. SizeInPages << PAGE_SHIFT,
  3140. NonPagedPool);
  3141. }
  3142. return BaseAddress;
  3143. }
  3144. PVOID
  3145. MiFindContiguousMemory (
  3146. IN PFN_NUMBER LowestPfn,
  3147. IN PFN_NUMBER HighestPfn,
  3148. IN PFN_NUMBER BoundaryPfn,
  3149. IN PFN_NUMBER SizeInPages,
  3150. IN MEMORY_CACHING_TYPE CacheType,
  3151. IN PVOID CallingAddress
  3152. )
  3153. /*++
  3154. Routine Description:
  3155. This function searches nonpaged pool and the free, zeroed,
  3156. and standby lists for contiguous pages that satisfy the
  3157. request.
  3158. Arguments:
  3159. LowestPfn - Supplies the lowest acceptable physical page number.
  3160. HighestPfn - Supplies the highest acceptable physical page number.
  3161. BoundaryPfn - Supplies the page frame number multiple the allocation must
  3162. not cross. 0 indicates it can cross any boundary.
  3163. SizeInPages - Supplies the number of pages to allocate.
  3164. CacheType - Supplies the type of cache mapping that will be used for the
  3165. memory.
  3166. CallingAddress - Supplies the calling address of the allocator.
  3167. Return Value:
  3168. NULL - a contiguous range could not be found to satisfy the request.
  3169. NON-NULL - Returns a pointer (virtual address in the nonpaged portion
  3170. of the system) to the allocated physically contiguous
  3171. memory.
  3172. Environment:
  3173. Kernel mode, IRQL of APC_LEVEL or below.
  3174. --*/
  3175. {
  3176. PMMPTE PointerPte;
  3177. PMMPTE DummyPte;
  3178. PMMPFN StartPfn;
  3179. PMMPFN Pfn1;
  3180. PVOID BaseAddress;
  3181. KIRQL OldIrql;
  3182. ULONG start;
  3183. PFN_NUMBER i;
  3184. PFN_NUMBER count;
  3185. PFN_NUMBER Page;
  3186. PFN_NUMBER LastPage;
  3187. PFN_NUMBER found;
  3188. PFN_NUMBER BoundaryMask;
  3189. PFN_NUMBER StartPage;
  3190. PHYSICAL_ADDRESS PhysicalAddress;
  3191. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  3192. PAGED_CODE ();
  3193. CacheAttribute = MI_TRANSLATE_CACHETYPE (CacheType, 0);
  3194. if (CacheAttribute == MiCached) {
  3195. BaseAddress = MiFindContiguousMemoryInPool (LowestPfn,
  3196. HighestPfn,
  3197. BoundaryPfn,
  3198. SizeInPages,
  3199. CallingAddress);
  3200. //
  3201. // An existing range of nonpaged pool satisfies the requirements
  3202. // so return it now.
  3203. //
  3204. if (BaseAddress != NULL) {
  3205. return BaseAddress;
  3206. }
  3207. }
  3208. BaseAddress = NULL;
  3209. BoundaryMask = ~(BoundaryPfn - 1);
  3210. //
  3211. // A suitable pool page was not allocated via the pool allocator.
  3212. // Grab the pool lock and manually search for a page which meets
  3213. // the requirements.
  3214. //
  3215. MmLockPagableSectionByHandle (ExPageLockHandle);
  3216. ExAcquireFastMutex (&MmDynamicMemoryMutex);
  3217. //
  3218. // Charge commitment.
  3219. //
  3220. // Then search the PFN database for pages that meet the requirements.
  3221. //
  3222. if (MiChargeCommitmentCantExpand (SizeInPages, FALSE) == FALSE) {
  3223. goto Done;
  3224. }
  3225. start = 0;
  3226. //
  3227. // Charge resident available pages.
  3228. //
  3229. LOCK_PFN (OldIrql);
  3230. MiDeferredUnlockPages (MI_DEFER_PFN_HELD);
  3231. if ((SPFN_NUMBER)SizeInPages > MI_NONPAGABLE_MEMORY_AVAILABLE()) {
  3232. UNLOCK_PFN (OldIrql);
  3233. goto Done1;
  3234. }
  3235. //
  3236. // Systems utilizing memory compression may have more
  3237. // pages on the zero, free and standby lists than we
  3238. // want to give out. Explicitly check MmAvailablePages
  3239. // instead (and recheck whenever the PFN lock is released
  3240. // and reacquired).
  3241. //
  3242. if (SizeInPages > MmAvailablePages) {
  3243. UNLOCK_PFN (OldIrql);
  3244. goto Done1;
  3245. }
  3246. MmResidentAvailablePages -= SizeInPages;
  3247. MM_BUMP_COUNTER(3, SizeInPages);
  3248. UNLOCK_PFN (OldIrql);
  3249. do {
  3250. count = MmPhysicalMemoryBlock->Run[start].PageCount;
  3251. Page = MmPhysicalMemoryBlock->Run[start].BasePage;
  3252. //
  3253. // Close the gaps, then examine the range for a fit.
  3254. //
  3255. LastPage = Page + count;
  3256. if (LastPage - 1 > HighestPfn) {
  3257. LastPage = HighestPfn + 1;
  3258. }
  3259. if (Page < LowestPfn) {
  3260. Page = LowestPfn;
  3261. }
  3262. if ((count != 0) && (Page + SizeInPages <= LastPage)) {
  3263. //
  3264. // A fit may be possible in this run, check whether the pages
  3265. // are on the right list.
  3266. //
  3267. found = 0;
  3268. i = 0;
  3269. Pfn1 = MI_PFN_ELEMENT (Page);
  3270. LOCK_PFN (OldIrql);
  3271. do {
  3272. if ((Pfn1->u3.e1.PageLocation <= StandbyPageList) &&
  3273. (Pfn1->u1.Flink != 0) &&
  3274. (Pfn1->u2.Blink != 0) &&
  3275. (Pfn1->u3.e2.ReferenceCount == 0) &&
  3276. ((CacheAttribute == MiCached) || (!MI_PAGE_FRAME_INDEX_MUST_BE_CACHED (Page)))) {
  3277. //
  3278. // Before starting a new run, ensure that it
  3279. // can satisfy the boundary requirements (if any).
  3280. //
  3281. if ((found == 0) && (BoundaryPfn != 0)) {
  3282. if (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask) != 0) {
  3283. //
  3284. // This run's physical address does not meet the
  3285. // requirements.
  3286. //
  3287. goto NextPage;
  3288. }
  3289. }
  3290. found += 1;
  3291. if (found == SizeInPages) {
  3292. //
  3293. // A match has been found, remove these pages,
  3294. // map them and return.
  3295. //
  3296. //
  3297. // Systems utilizing memory compression may have more
  3298. // pages on the zero, free and standby lists than we
  3299. // want to give out. Explicitly check MmAvailablePages
  3300. // instead (and recheck whenever the PFN lock is
  3301. // released and reacquired).
  3302. //
  3303. if (MmAvailablePages < SizeInPages) {
  3304. goto Failed;
  3305. }
  3306. Page = 1 + Page - found;
  3307. StartPage = Page;
  3308. MM_TRACK_COMMIT (MM_DBG_COMMIT_CONTIGUOUS_PAGES, SizeInPages);
  3309. StartPfn = MI_PFN_ELEMENT (Page);
  3310. Pfn1 = StartPfn - 1;
  3311. DummyPte = MiGetPteAddress (MmNonPagedPoolExpansionStart);
  3312. do {
  3313. Pfn1 += 1;
  3314. if (Pfn1->u3.e1.PageLocation == StandbyPageList) {
  3315. MiUnlinkPageFromList (Pfn1);
  3316. MiRestoreTransitionPte (Page);
  3317. }
  3318. else {
  3319. MiUnlinkFreeOrZeroedPage (Page);
  3320. }
  3321. Pfn1->u3.e2.ReferenceCount = 1;
  3322. Pfn1->u2.ShareCount = 1;
  3323. Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  3324. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  3325. Pfn1->u3.e1.CacheAttribute = CacheAttribute;
  3326. Pfn1->u3.e1.StartOfAllocation = 0;
  3327. Pfn1->u3.e1.EndOfAllocation = 0;
  3328. Pfn1->u4.VerifierAllocation = 0;
  3329. Pfn1->u3.e1.LargeSessionAllocation = 0;
  3330. Pfn1->u3.e1.PrototypePte = 0;
  3331. //
  3332. // Initialize PteAddress so an MiIdentifyPfn scan
  3333. // won't crash. The real value is put in after the
  3334. // loop.
  3335. //
  3336. Pfn1->PteAddress = DummyPte;
  3337. Page += 1;
  3338. found -= 1;
  3339. } while (found != 0);
  3340. StartPfn->u3.e1.StartOfAllocation = 1;
  3341. Pfn1->u3.e1.EndOfAllocation = 1;
  3342. UNLOCK_PFN (OldIrql);
  3343. LastPage = StartPage + SizeInPages;
  3344. PhysicalAddress.QuadPart = StartPage;
  3345. PhysicalAddress.QuadPart = PhysicalAddress.QuadPart << PAGE_SHIFT;
  3346. BaseAddress = MmMapIoSpace (PhysicalAddress,
  3347. SizeInPages << PAGE_SHIFT,
  3348. CacheType);
  3349. if (BaseAddress == NULL) {
  3350. //
  3351. // Release the actual pages.
  3352. //
  3353. LOCK_PFN2 (OldIrql);
  3354. StartPfn->u3.e1.StartOfAllocation = 0;
  3355. Pfn1->u3.e1.EndOfAllocation = 0;
  3356. do {
  3357. MI_SET_PFN_DELETED (StartPfn);
  3358. MiDecrementShareCount (StartPage);
  3359. StartPage += 1;
  3360. StartPfn += 1;
  3361. } while (StartPage < LastPage);
  3362. UNLOCK_PFN2 (OldIrql);
  3363. goto Failed;
  3364. }
  3365. PointerPte = MiGetPteAddress (BaseAddress);
  3366. do {
  3367. StartPfn->PteAddress = PointerPte;
  3368. StartPfn->u4.PteFrame = MI_GET_PAGE_FRAME_FROM_PTE (MiGetPteAddress(PointerPte));
  3369. StartPfn += 1;
  3370. PointerPte += 1;
  3371. } while (StartPfn <= Pfn1);
  3372. goto Done;
  3373. }
  3374. }
  3375. else {
  3376. //
  3377. // Nothing magic about the divisor here - just releasing
  3378. // the PFN lock periodically to give other processors
  3379. // and DPCs a chance to execute.
  3380. //
  3381. i += 1;
  3382. if ((i & 0xFFFF) == 0) {
  3383. UNLOCK_PFN (OldIrql);
  3384. found = 0;
  3385. LOCK_PFN (OldIrql);
  3386. }
  3387. else {
  3388. found = 0;
  3389. }
  3390. }
  3391. NextPage:
  3392. Page += 1;
  3393. Pfn1 += 1;
  3394. } while (Page < LastPage);
  3395. UNLOCK_PFN (OldIrql);
  3396. }
  3397. start += 1;
  3398. } while (start != MmPhysicalMemoryBlock->NumberOfRuns);
  3399. //
  3400. // The desired physical pages could not be allocated so free the PTEs.
  3401. //
  3402. Failed:
  3403. ASSERT (BaseAddress == NULL);
  3404. LOCK_PFN (OldIrql);
  3405. MmResidentAvailablePages += SizeInPages;
  3406. MM_BUMP_COUNTER(32, SizeInPages);
  3407. UNLOCK_PFN (OldIrql);
  3408. Done1:
  3409. MiReturnCommitment (SizeInPages);
  3410. Done:
  3411. ExReleaseFastMutex (&MmDynamicMemoryMutex);
  3412. MmUnlockPagableImageSection (ExPageLockHandle);
  3413. if (BaseAddress != NULL) {
  3414. MiInsertContiguousTag (BaseAddress,
  3415. SizeInPages << PAGE_SHIFT,
  3416. CallingAddress);
  3417. }
  3418. return BaseAddress;
  3419. }
  3420. LOGICAL
  3421. MmIsSessionAddress (
  3422. IN PVOID VirtualAddress
  3423. )
  3424. /*++
  3425. Routine Description:
  3426. This function returns TRUE if a session address is specified.
  3427. FALSE is returned if not.
  3428. Arguments:
  3429. VirtualAddress - Supplies the address in question.
  3430. Return Value:
  3431. See above.
  3432. Environment:
  3433. Kernel mode.
  3434. --*/
  3435. {
  3436. return MI_IS_SESSION_ADDRESS (VirtualAddress);
  3437. }