Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4778 lines
139 KiB

  1. /*++
  2. Copyright (c) 1989-1994 Microsoft Corporation
  3. Module Name:
  4. pool.c
  5. Abstract:
  6. This module implements the NT executive pool allocator.
  7. Author:
  8. Mark Lucovsky 16-Feb-1989
  9. Lou Perazzoli 31-Aug-1991 (change from binary buddy)
  10. David N. Cutler (davec) 27-May-1994
  11. Landy Wang 17-Oct-1997
  12. Environment:
  13. Kernel mode only
  14. Revision History:
  15. --*/
  16. #include "exp.h"
  17. #pragma hdrstop
  18. #undef ExAllocatePoolWithTag
  19. #undef ExAllocatePool
  20. #undef ExAllocatePoolWithQuota
  21. #undef ExAllocatePoolWithQuotaTag
  22. #undef ExFreePool
  23. #undef ExFreePoolWithTag
  24. #if defined (_WIN64)
  25. #define POOL_QUOTA_ENABLED (TRUE)
  26. #else
  27. #define POOL_QUOTA_ENABLED (PoolTrackTable == NULL)
  28. #endif
  29. //
  30. // These bitfield definitions are based on EX_POOL_PRIORITY in inc\ex.h.
  31. //
  32. #define POOL_SPECIAL_POOL_BIT 0x8
  33. #define POOL_SPECIAL_POOL_UNDERRUN_BIT 0x1
  34. //
  35. // We redefine the LIST_ENTRY macros to have each pointer biased
  36. // by one so any rogue code using these pointers will access
  37. // violate. See \nt\public\sdk\inc\ntrtl.h for the original
  38. // definition of these macros.
  39. //
  40. // This is turned off in the shipping product.
  41. //
  42. #ifndef NO_POOL_CHECKS
  43. #define DecodeLink(Link) ((PLIST_ENTRY)((ULONG_PTR)(Link) & ~1))
  44. #define EncodeLink(Link) ((PLIST_ENTRY)((ULONG_PTR)(Link) | 1))
  45. #define PrivateInitializeListHead(ListHead) ( \
  46. (ListHead)->Flink = (ListHead)->Blink = EncodeLink(ListHead))
  47. #define PrivateIsListEmpty(ListHead) \
  48. (DecodeLink((ListHead)->Flink) == (ListHead))
  49. #define PrivateRemoveHeadList(ListHead) \
  50. DecodeLink((ListHead)->Flink); \
  51. {PrivateRemoveEntryList(DecodeLink((ListHead)->Flink))}
  52. #define PrivateRemoveTailList(ListHead) \
  53. DecodeLink((ListHead)->Blink); \
  54. {PrivateRemoveEntryList(DecodeLink((ListHead)->Blink))}
  55. #define PrivateRemoveEntryList(Entry) { \
  56. PLIST_ENTRY _EX_Blink; \
  57. PLIST_ENTRY _EX_Flink; \
  58. _EX_Flink = DecodeLink((Entry)->Flink); \
  59. _EX_Blink = DecodeLink((Entry)->Blink); \
  60. _EX_Blink->Flink = EncodeLink(_EX_Flink); \
  61. _EX_Flink->Blink = EncodeLink(_EX_Blink); \
  62. }
  63. #define CHECK_LIST(LIST) \
  64. if ((DecodeLink(DecodeLink((LIST)->Flink)->Blink) != (LIST)) || \
  65. (DecodeLink(DecodeLink((LIST)->Blink)->Flink) != (LIST))) { \
  66. KeBugCheckEx (BAD_POOL_HEADER, \
  67. 3, \
  68. (ULONG_PTR)LIST, \
  69. (ULONG_PTR)DecodeLink(DecodeLink((LIST)->Flink)->Blink), \
  70. (ULONG_PTR)DecodeLink(DecodeLink((LIST)->Blink)->Flink)); \
  71. }
  72. #define PrivateInsertTailList(ListHead,Entry) { \
  73. PLIST_ENTRY _EX_Blink; \
  74. PLIST_ENTRY _EX_ListHead; \
  75. _EX_ListHead = (ListHead); \
  76. CHECK_LIST(_EX_ListHead); \
  77. _EX_Blink = DecodeLink(_EX_ListHead->Blink); \
  78. (Entry)->Flink = EncodeLink(_EX_ListHead); \
  79. (Entry)->Blink = EncodeLink(_EX_Blink); \
  80. _EX_Blink->Flink = EncodeLink(Entry); \
  81. _EX_ListHead->Blink = EncodeLink(Entry); \
  82. CHECK_LIST(_EX_ListHead); \
  83. }
  84. #define PrivateInsertHeadList(ListHead,Entry) { \
  85. PLIST_ENTRY _EX_Flink; \
  86. PLIST_ENTRY _EX_ListHead; \
  87. _EX_ListHead = (ListHead); \
  88. CHECK_LIST(_EX_ListHead); \
  89. _EX_Flink = DecodeLink(_EX_ListHead->Flink); \
  90. (Entry)->Flink = EncodeLink(_EX_Flink); \
  91. (Entry)->Blink = EncodeLink(_EX_ListHead); \
  92. _EX_Flink->Blink = EncodeLink(Entry); \
  93. _EX_ListHead->Flink = EncodeLink(Entry); \
  94. CHECK_LIST(_EX_ListHead); \
  95. }
  96. #define CHECK_POOL_HEADER(LINE,ENTRY) { \
  97. PPOOL_HEADER PreviousEntry; \
  98. PPOOL_HEADER NextEntry; \
  99. if ((ENTRY)->PreviousSize != 0) { \
  100. PreviousEntry = (PPOOL_HEADER)((PPOOL_BLOCK)(ENTRY) - (ENTRY)->PreviousSize); \
  101. if ((PreviousEntry->BlockSize != (ENTRY)->PreviousSize) || \
  102. (DECODE_POOL_INDEX(PreviousEntry) != DECODE_POOL_INDEX(ENTRY))) { \
  103. KeBugCheckEx(BAD_POOL_HEADER, 5, (ULONG_PTR)PreviousEntry, LINE, (ULONG_PTR)ENTRY); \
  104. } \
  105. } \
  106. NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)(ENTRY) + (ENTRY)->BlockSize); \
  107. if (!PAGE_END(NextEntry)) { \
  108. if ((NextEntry->PreviousSize != (ENTRY)->BlockSize) || \
  109. (DECODE_POOL_INDEX(NextEntry) != DECODE_POOL_INDEX(ENTRY))) { \
  110. KeBugCheckEx(BAD_POOL_HEADER, 5, (ULONG_PTR)NextEntry, LINE, (ULONG_PTR)ENTRY); \
  111. } \
  112. } \
  113. }
  114. #define ASSERT_ALLOCATE_IRQL(_PoolType, _NumberOfBytes) \
  115. if ((_PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { \
  116. if (KeGetCurrentIrql() > APC_LEVEL) { \
  117. KeBugCheckEx (BAD_POOL_CALLER, 8, KeGetCurrentIrql(), _PoolType, _NumberOfBytes); \
  118. } \
  119. } \
  120. else { \
  121. if (KeGetCurrentIrql() > DISPATCH_LEVEL) { \
  122. KeBugCheckEx (BAD_POOL_CALLER, 8, KeGetCurrentIrql(), _PoolType, _NumberOfBytes); \
  123. } \
  124. }
  125. #define ASSERT_FREE_IRQL(_PoolType, _P) \
  126. if ((_PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { \
  127. if (KeGetCurrentIrql() > APC_LEVEL) { \
  128. KeBugCheckEx (BAD_POOL_CALLER, 9, KeGetCurrentIrql(), _PoolType, (ULONG_PTR)_P); \
  129. } \
  130. } \
  131. else { \
  132. if (KeGetCurrentIrql() > DISPATCH_LEVEL) { \
  133. KeBugCheckEx (BAD_POOL_CALLER, 9, KeGetCurrentIrql(), _PoolType, (ULONG_PTR)P); \
  134. } \
  135. }
  136. #define ASSERT_POOL_NOT_FREE(_Entry) \
  137. if ((_Entry->PoolType & POOL_TYPE_MASK) == 0) { \
  138. KeBugCheckEx (BAD_POOL_CALLER, 6, __LINE__, (ULONG_PTR)_Entry, _Entry->Ulong1); \
  139. }
  140. #define ASSERT_POOL_TYPE_NOT_ZERO(_Entry) \
  141. if (_Entry->PoolType == 0) { \
  142. KeBugCheckEx(BAD_POOL_CALLER, 1, (ULONG_PTR)_Entry, (ULONG_PTR)(*(PULONG)_Entry), 0); \
  143. }
  144. #define CHECK_LOOKASIDE_LIST(LINE,LIST,ENTRY) {NOTHING;}
  145. #else
  146. #define DecodeLink(Link) ((PLIST_ENTRY)((ULONG_PTR)(Link)))
  147. #define EncodeLink(Link) ((PLIST_ENTRY)((ULONG_PTR)(Link)))
  148. #define PrivateInitializeListHead InitializeListHead
  149. #define PrivateIsListEmpty IsListEmpty
  150. #define PrivateRemoveHeadList RemoveHeadList
  151. #define PrivateRemoveTailList RemoveTailList
  152. #define PrivateRemoveEntryList RemoveEntryList
  153. #define PrivateInsertTailList InsertTailList
  154. #define PrivateInsertHeadList InsertHeadList
  155. #define ASSERT_ALLOCATE_IRQL(_PoolType, _P) {NOTHING;}
  156. #define ASSERT_FREE_IRQL(_PoolType, _P) {NOTHING;}
  157. #define ASSERT_POOL_NOT_FREE(_Entry) {NOTHING;}
  158. #define ASSERT_POOL_TYPE_NOT_ZERO(_Entry) {NOTHING;}
  159. //
  160. // The check list macros come in two flavors - there is one in the checked
  161. // and free build that will bugcheck the system if a list is ill-formed, and
  162. // there is one for the final shipping version that has all the checked
  163. // disabled.
  164. //
  165. // The check lookaside list macros also comes in two flavors and is used to
  166. // verify that the look aside lists are well formed.
  167. //
  168. // The check pool header macro (two flavors) verifies that the specified
  169. // pool header matches the preceeding and succeeding pool headers.
  170. //
  171. #define CHECK_LIST(LIST) {NOTHING;}
  172. #define CHECK_POOL_HEADER(LINE,ENTRY) {NOTHING;}
  173. #define CHECK_LOOKASIDE_LIST(LINE,LIST,ENTRY) {NOTHING;}
  174. #define CHECK_POOL_PAGE(PAGE) \
  175. { \
  176. PPOOL_HEADER P = (PPOOL_HEADER)(((ULONG_PTR)(PAGE)) & ~(PAGE_SIZE-1)); \
  177. ULONG SIZE, LSIZE; \
  178. LOGICAL FOUND=FALSE; \
  179. LSIZE = 0; \
  180. SIZE = 0; \
  181. do { \
  182. if (P == (PPOOL_HEADER)PAGE) { \
  183. FOUND = TRUE; \
  184. } \
  185. if (P->PreviousSize != LSIZE) { \
  186. DbgPrint("POOL: Inconsistent size: ( %lx) - %lx->%u != %u\n",\
  187. PAGE, P, P->PreviousSize, LSIZE); \
  188. DbgBreakPoint(); \
  189. } \
  190. LSIZE = P->BlockSize; \
  191. SIZE += LSIZE; \
  192. P = (PPOOL_HEADER)((PPOOL_BLOCK)P + LSIZE); \
  193. } while ((SIZE < (PAGE_SIZE / POOL_SMALLEST_BLOCK)) && \
  194. (PAGE_END(P) == FALSE)); \
  195. if ((PAGE_END(P) == FALSE) || (FOUND == FALSE)) { \
  196. DbgPrint("POOL: Inconsistent page: %lx\n",P); \
  197. DbgBreakPoint(); \
  198. } \
  199. }
  200. #endif
  201. //
  202. // Define forward referenced function prototypes.
  203. //
  204. NTSTATUS
  205. ExpSnapShotPoolPages (
  206. IN PVOID Address,
  207. IN ULONG Size,
  208. IN OUT PSYSTEM_POOL_INFORMATION PoolInformation,
  209. IN OUT PSYSTEM_POOL_ENTRY *PoolEntryInfo,
  210. IN ULONG Length,
  211. IN OUT PULONG RequiredLength
  212. );
  213. #ifdef ALLOC_PRAGMA
  214. PVOID
  215. ExpAllocateStringRoutine (
  216. IN SIZE_T NumberOfBytes
  217. );
  218. VOID
  219. ExDeferredFreePool (
  220. IN PPOOL_DESCRIPTOR PoolDesc
  221. );
  222. #pragma alloc_text(PAGE, ExpAllocateStringRoutine)
  223. #pragma alloc_text(INIT, InitializePool)
  224. #pragma alloc_text(PAGE, ExInitializePoolDescriptor)
  225. #pragma alloc_text(PAGEVRFY, ExAllocatePoolSanityChecks)
  226. #pragma alloc_text(PAGEVRFY, ExFreePoolSanityChecks)
  227. #pragma alloc_text(POOLCODE, ExAllocatePoolWithTag)
  228. #pragma alloc_text(POOLCODE, ExFreePool)
  229. #pragma alloc_text(POOLCODE, ExFreePoolWithTag)
  230. #pragma alloc_text(POOLCODE, ExDeferredFreePool)
  231. #if DBG
  232. #pragma alloc_text(PAGELK, ExSnapShotPool)
  233. #pragma alloc_text(PAGELK, ExpSnapShotPoolPages)
  234. #endif
  235. #endif
  236. #if defined (NT_UP)
  237. #define USING_HOT_COLD_METRICS (ExpPoolFlags & EX_SEPARATE_HOT_PAGES_DURING_BOOT)
  238. #else
  239. #define USING_HOT_COLD_METRICS 0
  240. #endif
  241. #define EXP_MAXIMUM_POOL_FREES_PENDING 128
  242. #define MAX_TRACKER_TABLE 1025
  243. #define MAX_BIGPAGE_TABLE 4096
  244. PPOOL_DESCRIPTOR ExpSessionPoolDescriptor;
  245. ULONG FirstPrint;
  246. #if defined (NT_UP)
  247. KDPC ExpBootFinishedTimerDpc;
  248. KTIMER ExpBootFinishedTimer;
  249. VOID
  250. ExpBootFinishedDispatch (
  251. IN PKDPC Dpc,
  252. IN PVOID DeferredContext,
  253. IN PVOID SystemArgument1,
  254. IN PVOID SystemArgument2
  255. );
  256. #endif
  257. PPOOL_TRACKER_TABLE PoolTrackTable;
  258. SIZE_T PoolTrackTableSize;
  259. SIZE_T PoolTrackTableMask;
  260. PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
  261. SIZE_T PoolBigPageTableSize;
  262. SIZE_T PoolBigPageTableHash;
  263. #define POOL_BIG_TABLE_ENTRY_FREE 0x1
  264. ULONG PoolHitTag = 0xffffff0f;
  265. #define POOLTAG_HASH(Key) ((40543*((((((((PUCHAR)&Key)[0]<<2)^((PUCHAR)&Key)[1])<<2)^((PUCHAR)&Key)[2])<<2)^((PUCHAR)&Key)[3]))>>2)
  266. VOID
  267. ExpInsertPoolTracker (
  268. IN ULONG Key,
  269. IN SIZE_T Size,
  270. IN POOL_TYPE PoolType
  271. );
  272. VOID
  273. ExpRemovePoolTracker (
  274. IN ULONG Key,
  275. IN ULONG Size,
  276. IN POOL_TYPE PoolType
  277. );
  278. LOGICAL
  279. ExpAddTagForBigPages (
  280. IN PVOID Va,
  281. IN ULONG Key,
  282. IN ULONG NumberOfPages
  283. );
  284. ULONG
  285. ExpFindAndRemoveTagBigPages (
  286. IN PVOID Va,
  287. IN PULONG BigPages
  288. );
  289. PVOID
  290. ExpAllocateStringRoutine(
  291. IN SIZE_T NumberOfBytes
  292. )
  293. {
  294. return ExAllocatePoolWithTag(PagedPool,NumberOfBytes,'grtS');
  295. }
  296. BOOLEAN
  297. ExOkayToLockRoutine(
  298. IN PVOID Lock
  299. )
  300. {
  301. UNREFERENCED_PARAMETER (Lock);
  302. if (KeIsExecutingDpc()) {
  303. return FALSE;
  304. }
  305. else {
  306. return TRUE;
  307. }
  308. }
  309. #ifdef ALLOC_DATA_PRAGMA
  310. #pragma const_seg("PAGECONST")
  311. #endif
  312. const PRTL_ALLOCATE_STRING_ROUTINE RtlAllocateStringRoutine = ExpAllocateStringRoutine;
  313. const PRTL_FREE_STRING_ROUTINE RtlFreeStringRoutine = (PRTL_FREE_STRING_ROUTINE)ExFreePool;
  314. #ifdef ALLOC_DATA_PRAGMA
  315. #pragma const_seg()
  316. #endif
  317. ULONG ExPoolFailures;
  318. //
  319. // Define macros to pack and unpack a pool index.
  320. //
  321. #define ENCODE_POOL_INDEX(POOLHEADER,INDEX) {(POOLHEADER)->PoolIndex = ((UCHAR)(INDEX));}
  322. #define DECODE_POOL_INDEX(POOLHEADER) ((ULONG)((POOLHEADER)->PoolIndex))
  323. //
  324. // The allocated bit carefully overlays the unused cachealign bit in the type.
  325. //
  326. #define POOL_IN_USE_MASK 0x4
  327. #define MARK_POOL_HEADER_FREED(POOLHEADER) {(POOLHEADER)->PoolType &= ~POOL_IN_USE_MASK;}
  328. #define IS_POOL_HEADER_MARKED_ALLOCATED(POOLHEADER) ((POOLHEADER)->PoolType & POOL_IN_USE_MASK)
  329. //
  330. // The hotpage bit carefully overlays the raise bit in the type.
  331. //
  332. #define POOL_HOTPAGE_MASK POOL_RAISE_IF_ALLOCATION_FAILURE
  333. //
  334. // Define the number of paged pools. This value may be overridden at boot
  335. // time.
  336. //
  337. ULONG ExpNumberOfPagedPools = NUMBER_OF_PAGED_POOLS;
  338. ULONG ExpNumberOfNonPagedPools = 1;
  339. //
  340. // The pool descriptor for nonpaged pool is static.
  341. // The pool descriptors for paged pool are dynamically allocated
  342. // since there can be more than one paged pool. There is always one more
  343. // paged pool descriptor than there are paged pools. This descriptor is
  344. // used when a page allocation is done for a paged pool and is the first
  345. // descriptor in the paged pool descriptor array.
  346. //
  347. POOL_DESCRIPTOR NonPagedPoolDescriptor;
  348. #define EXP_MAXIMUM_POOL_NODES 16
  349. PPOOL_DESCRIPTOR ExpNonPagedPoolDescriptor[EXP_MAXIMUM_POOL_NODES];
  350. //
  351. // The pool vector contains an array of pointers to pool descriptors. For
  352. // nonpaged pool this is just a pointer to the nonpaged pool descriptor.
  353. // For paged pool, this is a pointer to an array of pool descriptors.
  354. // The pointer to the paged pool descriptor is duplicated so
  355. // it can be found easily by the kernel debugger.
  356. //
  357. PPOOL_DESCRIPTOR PoolVector[NUMBER_OF_POOLS];
  358. PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[EXP_MAXIMUM_POOL_NODES];
  359. PFAST_MUTEX ExpPagedPoolMutex;
  360. volatile ULONG ExpPoolIndex = 1;
  361. KSPIN_LOCK ExpTaggedPoolLock;
  362. #if DBG
  363. PSZ PoolTypeNames[MaxPoolType] = {
  364. "NonPaged",
  365. "Paged",
  366. "NonPagedMustSucceed",
  367. "NotUsed",
  368. "NonPagedCacheAligned",
  369. "PagedCacheAligned",
  370. "NonPagedCacheAlignedMustS"
  371. };
  372. #endif //DBG
  373. //
  374. // Define paged and nonpaged pool lookaside descriptors.
  375. //
  376. GENERAL_LOOKASIDE ExpSmallNPagedPoolLookasideLists[POOL_SMALL_LISTS];
  377. GENERAL_LOOKASIDE ExpSmallPagedPoolLookasideLists[POOL_SMALL_LISTS];
  378. //
  379. // LOCK_POOL is only used within this module.
  380. //
  381. #define ExpLockNonPagedPool(OldIrql) \
  382. OldIrql = KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock)
  383. #define ExpUnlockNonPagedPool(OldIrql) \
  384. KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql)
  385. #define LOCK_POOL(PoolDesc, LockHandle) { \
  386. if ((PoolDesc->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) { \
  387. if (PoolDesc == &NonPagedPoolDescriptor) { \
  388. ExpLockNonPagedPool (LockHandle.OldIrql); \
  389. } \
  390. else { \
  391. ASSERT (ExpNumberOfNonPagedPools > 1); \
  392. KeAcquireInStackQueuedSpinLock (PoolDesc->LockAddress, &LockHandle); \
  393. } \
  394. } \
  395. else { \
  396. ExAcquireFastMutex ((PFAST_MUTEX)PoolDesc->LockAddress); \
  397. } \
  398. }
  399. KIRQL
  400. ExLockPool (
  401. IN POOL_TYPE PoolType
  402. )
  403. /*++
  404. Routine Description:
  405. This function locks the pool specified by pool type.
  406. Arguments:
  407. PoolType - Specifies the pool that should be locked.
  408. Return Value:
  409. The previous IRQL is returned as the function value.
  410. --*/
  411. {
  412. KIRQL OldIrql;
  413. //
  414. // Nonpaged pool is protected by a spinlock, paged pool by a fast mutex.
  415. //
  416. // Always acquire the global main pool for our caller regardless of how
  417. // many subpools this system is using.
  418. //
  419. if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) {
  420. ExpLockNonPagedPool (OldIrql);
  421. }
  422. else {
  423. ExAcquireFastMutex (ExpPagedPoolMutex);
  424. OldIrql = (KIRQL)ExpPagedPoolMutex->OldIrql;
  425. }
  426. return OldIrql;
  427. }
  428. //
  429. // UNLOCK_POOL is only used within this module.
  430. //
  431. #define UNLOCK_POOL(PoolDesc, LockHandle) { \
  432. if ((PoolDesc->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) { \
  433. if (PoolDesc == &NonPagedPoolDescriptor) { \
  434. ExpUnlockNonPagedPool (LockHandle.OldIrql); \
  435. } \
  436. else { \
  437. ASSERT (ExpNumberOfNonPagedPools > 1); \
  438. KeReleaseInStackQueuedSpinLock (&LockHandle); \
  439. } \
  440. } \
  441. else { \
  442. ExReleaseFastMutex ((PFAST_MUTEX)PoolDesc->LockAddress); \
  443. } \
  444. }
  445. VOID
  446. ExUnlockPool (
  447. IN POOL_TYPE PoolType,
  448. IN KIRQL LockHandle
  449. )
  450. /*++
  451. Routine Description:
  452. This function unlocks the pool specified by pool type.
  453. Arguments:
  454. PoolType - Specifies the pool that should be unlocked.
  455. LockHandle - Specifies the lock handle from a previous call to ExLockPool.
  456. Return Value:
  457. None.
  458. --*/
  459. {
  460. //
  461. // Nonpaged pool is protected by a spinlock, paged pool by a fast mutex.
  462. //
  463. // Always release the global main pool for our caller regardless of how
  464. // many subpools this system is using.
  465. //
  466. if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) {
  467. ExpUnlockNonPagedPool (LockHandle);
  468. }
  469. else {
  470. ExReleaseFastMutex (ExpPagedPoolMutex);
  471. }
  472. return;
  473. }
  474. VOID
  475. ExInitializePoolDescriptor (
  476. IN PPOOL_DESCRIPTOR PoolDescriptor,
  477. IN POOL_TYPE PoolType,
  478. IN ULONG PoolIndex,
  479. IN ULONG Threshold,
  480. IN PVOID PoolLock
  481. )
  482. /*++
  483. Routine Description:
  484. This function initializes a pool descriptor.
  485. Note that this routine is called directly by the memory manager.
  486. Arguments:
  487. PoolDescriptor - Supplies a pointer to the pool descriptor.
  488. PoolType - Supplies the type of the pool.
  489. PoolIndex - Supplies the pool descriptor index.
  490. Threshold - Supplies the threshold value for the specified pool.
  491. PoolLock - Supplies a pointer to the lock for the specified pool.
  492. Return Value:
  493. None.
  494. --*/
  495. {
  496. PLIST_ENTRY ListEntry;
  497. PLIST_ENTRY LastListEntry;
  498. //
  499. // Initialize statistics fields, the pool type, the threshold value,
  500. // and the lock address.
  501. //
  502. PoolDescriptor->PoolType = PoolType;
  503. PoolDescriptor->PoolIndex = PoolIndex;
  504. PoolDescriptor->RunningAllocs = 0;
  505. PoolDescriptor->RunningDeAllocs = 0;
  506. PoolDescriptor->TotalPages = 0;
  507. PoolDescriptor->TotalBigPages = 0;
  508. PoolDescriptor->Threshold = Threshold;
  509. PoolDescriptor->LockAddress = PoolLock;
  510. PoolDescriptor->PendingFrees = NULL;
  511. PoolDescriptor->PendingFreeDepth = 0;
  512. //
  513. // Initialize the allocation listheads.
  514. //
  515. ListEntry = PoolDescriptor->ListHeads;
  516. LastListEntry = ListEntry + POOL_LIST_HEADS;
  517. while (ListEntry < LastListEntry) {
  518. PrivateInitializeListHead (ListEntry);
  519. ListEntry += 1;
  520. }
  521. if ((PoolType == PagedPoolSession) && (ExpSessionPoolDescriptor == NULL)) {
  522. ExpSessionPoolDescriptor = (PPOOL_DESCRIPTOR) MiSessionPoolVector ();
  523. }
  524. return;
  525. }
  526. //
  527. // FREE_CHECK_ERESOURCE - If enabled causes each free pool to verify
  528. // no active ERESOURCEs are in the pool block being freed.
  529. //
  530. // FREE_CHECK_KTIMER - If enabled causes each free pool to verify no
  531. // active KTIMERs are in the pool block being freed.
  532. //
  533. //
  534. // Checking for resources in pool being freed is expensive as there can
  535. // easily be thousands of resources, so don't do it by default but do
  536. // leave the capability for individual systems to enable it.
  537. //
  538. //
  539. // Runtime modifications to these flags must use interlocked sequences.
  540. //
  541. #if DBG && !defined(_AMD64_SIMULATOR_)
  542. ULONG ExpPoolFlags = EX_CHECK_POOL_FREES_FOR_ACTIVE_TIMERS | \
  543. EX_CHECK_POOL_FREES_FOR_ACTIVE_WORKERS;
  544. #else
  545. ULONG ExpPoolFlags = 0;
  546. #endif
  547. #define FREE_CHECK_ERESOURCE(Va, NumberOfBytes) \
  548. if (ExpPoolFlags & EX_CHECK_POOL_FREES_FOR_ACTIVE_RESOURCES) { \
  549. ExpCheckForResource(Va, NumberOfBytes); \
  550. }
  551. #define FREE_CHECK_KTIMER(Va, NumberOfBytes) \
  552. if (ExpPoolFlags & EX_CHECK_POOL_FREES_FOR_ACTIVE_TIMERS) { \
  553. KeCheckForTimer(Va, NumberOfBytes); \
  554. }
  555. #define FREE_CHECK_WORKER(Va, NumberOfBytes) \
  556. if (ExpPoolFlags & EX_CHECK_POOL_FREES_FOR_ACTIVE_WORKERS) { \
  557. ExpCheckForWorker(Va, NumberOfBytes); \
  558. }
  559. VOID
  560. ExSetPoolFlags (
  561. IN ULONG PoolFlag
  562. )
  563. /*++
  564. Routine Description:
  565. This procedure enables the specified pool flag(s).
  566. Arguments:
  567. PoolFlag - Supplies the pool flag(s) to enable.
  568. Return Value:
  569. None.
  570. --*/
  571. {
  572. RtlInterlockedSetBits (&ExpPoolFlags, PoolFlag);
  573. }
  574. VOID
  575. InitializePool (
  576. IN POOL_TYPE PoolType,
  577. IN ULONG Threshold
  578. )
  579. /*++
  580. Routine Description:
  581. This procedure initializes a pool descriptor for the specified pool
  582. type. Once initialized, the pool may be used for allocation and
  583. deallocation.
  584. This function should be called once for each base pool type during
  585. system initialization.
  586. Each pool descriptor contains an array of list heads for free
  587. blocks. Each list head holds blocks which are a multiple of
  588. the POOL_BLOCK_SIZE. The first element on the list [0] links
  589. together free entries of size POOL_BLOCK_SIZE, the second element
  590. [1] links together entries of POOL_BLOCK_SIZE * 2, the third
  591. POOL_BLOCK_SIZE * 3, etc, up to the number of blocks which fit
  592. into a page.
  593. Arguments:
  594. PoolType - Supplies the type of pool being initialized (e.g.
  595. nonpaged pool, paged pool...).
  596. Threshold - Supplies the threshold value for the specified pool.
  597. Return Value:
  598. None.
  599. --*/
  600. {
  601. ULONG i;
  602. ULONG GlobalFlag;
  603. PKSPIN_LOCK SpinLock;
  604. PPOOL_TRACKER_BIG_PAGES p;
  605. PPOOL_DESCRIPTOR Descriptor;
  606. ULONG Index;
  607. PFAST_MUTEX FastMutex;
  608. SIZE_T Size;
  609. ASSERT((PoolType & MUST_SUCCEED_POOL_TYPE_MASK) == 0);
  610. if (PoolType == NonPagedPool) {
  611. //
  612. // Initialize nonpaged pools.
  613. //
  614. GlobalFlag = NtGlobalFlag;
  615. #if DBG
  616. GlobalFlag |= FLG_POOL_ENABLE_TAGGING;
  617. #endif
  618. if (GlobalFlag & FLG_POOL_ENABLE_TAGGING) {
  619. PoolTrackTableSize = MAX_TRACKER_TABLE;
  620. PoolTrackTableMask = PoolTrackTableSize - 2;
  621. PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
  622. PoolTrackTableSize *
  623. sizeof(POOL_TRACKER_TABLE),
  624. FALSE);
  625. RtlZeroMemory(PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
  626. PoolBigPageTableSize = MAX_BIGPAGE_TABLE;
  627. PoolBigPageTableHash = PoolBigPageTableSize - 1;
  628. PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
  629. PoolBigPageTableSize *
  630. sizeof(POOL_TRACKER_BIG_PAGES),
  631. FALSE);
  632. RtlZeroMemory(PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
  633. p = &PoolBigPageTable[0];
  634. for (i = 0; i < PoolBigPageTableSize; i += 1, p += 1) {
  635. p->Va = (PVOID) POOL_BIG_TABLE_ENTRY_FREE;
  636. }
  637. ExpInsertPoolTracker ('looP',
  638. (ULONG) ROUND_TO_PAGES((PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES))),
  639. NonPagedPool);
  640. }
  641. if (KeNumberNodes > 1) {
  642. ExpNumberOfNonPagedPools = KeNumberNodes;
  643. //
  644. // Limit the number of pools to the number of bits in the PoolIndex.
  645. //
  646. if (ExpNumberOfNonPagedPools > 127) {
  647. ExpNumberOfNonPagedPools = 127;
  648. }
  649. //
  650. // Further limit the number of pools by our array of pointers.
  651. //
  652. if (ExpNumberOfNonPagedPools > EXP_MAXIMUM_POOL_NODES) {
  653. ExpNumberOfNonPagedPools = EXP_MAXIMUM_POOL_NODES;
  654. }
  655. Size = sizeof(POOL_DESCRIPTOR) + sizeof(KLOCK_QUEUE_HANDLE);
  656. for (Index = 0; Index < ExpNumberOfNonPagedPools; Index += 1) {
  657. //
  658. // Here's a thorny problem. We'd like to use
  659. // MmAllocateIndependentPages but can't because we'd need
  660. // system PTEs to map the pages with and PTEs are not
  661. // available until nonpaged pool exists. So just use
  662. // regular pool pages to hold the descriptors and spinlocks
  663. // and hope they either a) happen to fall onto the right node
  664. // or b) that these lines live in the local processor cache
  665. // all the time anyway due to frequent usage.
  666. //
  667. Descriptor = (PPOOL_DESCRIPTOR) MiAllocatePoolPages (
  668. NonPagedPool,
  669. Size,
  670. FALSE);
  671. if (Descriptor == NULL) {
  672. KeBugCheckEx (MUST_SUCCEED_POOL_EMPTY,
  673. Size,
  674. (ULONG_PTR)-1,
  675. (ULONG_PTR)-1,
  676. (ULONG_PTR)-1);
  677. }
  678. ExpNonPagedPoolDescriptor[Index] = Descriptor;
  679. SpinLock = (PKSPIN_LOCK)(Descriptor + 1);
  680. KeInitializeSpinLock (SpinLock);
  681. ExInitializePoolDescriptor (Descriptor,
  682. NonPagedPool,
  683. Index,
  684. Threshold,
  685. (PVOID)SpinLock);
  686. }
  687. }
  688. //
  689. // Initialize the spinlocks for nonpaged pool.
  690. //
  691. KeInitializeSpinLock (&ExpTaggedPoolLock);
  692. //
  693. // Initialize the nonpaged pool descriptor.
  694. //
  695. PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
  696. ExInitializePoolDescriptor (&NonPagedPoolDescriptor,
  697. NonPagedPool,
  698. 0,
  699. Threshold,
  700. NULL);
  701. }
  702. else {
  703. //
  704. // Allocate memory for the paged pool descriptors and fast mutexes.
  705. //
  706. if (KeNumberNodes > 1) {
  707. ExpNumberOfPagedPools = KeNumberNodes;
  708. //
  709. // Limit the number of pools to the number of bits in the PoolIndex.
  710. //
  711. if (ExpNumberOfPagedPools > 127) {
  712. ExpNumberOfPagedPools = 127;
  713. }
  714. }
  715. //
  716. // Further limit the number of pools by our array of pointers.
  717. //
  718. if (ExpNumberOfPagedPools > EXP_MAXIMUM_POOL_NODES) {
  719. ExpNumberOfPagedPools = EXP_MAXIMUM_POOL_NODES;
  720. }
  721. //
  722. // For NUMA systems, allocate both the pool descriptor and the
  723. // associated lock from the local node for performance (even though
  724. // it costs a little more memory).
  725. //
  726. // For non-NUMA systems, allocate everything together in one chunk
  727. // to reduce memory consumption as there is no performance cost
  728. // for doing it this way.
  729. //
  730. if (KeNumberNodes > 1) {
  731. Size = sizeof(FAST_MUTEX) + sizeof(POOL_DESCRIPTOR);
  732. for (Index = 0; Index < ExpNumberOfPagedPools + 1; Index += 1) {
  733. ULONG Node;
  734. if (Index == 0) {
  735. Node = 0;
  736. }
  737. else {
  738. Node = Index - 1;
  739. }
  740. Descriptor = (PPOOL_DESCRIPTOR) MmAllocateIndependentPages (
  741. Size,
  742. Node);
  743. if (Descriptor == NULL) {
  744. KeBugCheckEx (MUST_SUCCEED_POOL_EMPTY,
  745. Size,
  746. (ULONG_PTR)-1,
  747. (ULONG_PTR)-1,
  748. (ULONG_PTR)-1);
  749. }
  750. ExpPagedPoolDescriptor[Index] = Descriptor;
  751. FastMutex = (PFAST_MUTEX)(Descriptor + 1);
  752. if (Index == 0) {
  753. PoolVector[PagedPool] = Descriptor;
  754. ExpPagedPoolMutex = FastMutex;
  755. }
  756. ExInitializeFastMutex (FastMutex);
  757. ExInitializePoolDescriptor (Descriptor,
  758. PagedPool,
  759. Index,
  760. Threshold,
  761. (PVOID)FastMutex);
  762. }
  763. }
  764. else {
  765. Size = (ExpNumberOfPagedPools + 1) * (sizeof(FAST_MUTEX) + sizeof(POOL_DESCRIPTOR));
  766. Descriptor = (PPOOL_DESCRIPTOR)ExAllocatePoolWithTag (NonPagedPool,
  767. Size,
  768. 'looP');
  769. if (Descriptor == NULL) {
  770. KeBugCheckEx (MUST_SUCCEED_POOL_EMPTY,
  771. Size,
  772. (ULONG_PTR)-1,
  773. (ULONG_PTR)-1,
  774. (ULONG_PTR)-1);
  775. }
  776. FastMutex = (PFAST_MUTEX)(Descriptor + ExpNumberOfPagedPools + 1);
  777. PoolVector[PagedPool] = Descriptor;
  778. ExpPagedPoolMutex = FastMutex;
  779. for (Index = 0; Index < ExpNumberOfPagedPools + 1; Index += 1) {
  780. ExInitializeFastMutex (FastMutex);
  781. ExpPagedPoolDescriptor[Index] = Descriptor;
  782. ExInitializePoolDescriptor (Descriptor,
  783. PagedPool,
  784. Index,
  785. Threshold,
  786. (PVOID)FastMutex);
  787. Descriptor += 1;
  788. FastMutex += 1;
  789. }
  790. }
  791. if (PoolTrackTable) {
  792. ExpInsertPoolTracker('looP',
  793. (ULONG) ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
  794. NonPagedPool);
  795. }
  796. #if defined (NT_UP)
  797. if (MmNumberOfPhysicalPages < 32 * 1024) {
  798. LARGE_INTEGER TwoMinutes;
  799. //
  800. // Set the flag to disable lookasides and use hot/cold page
  801. // separation during bootup.
  802. //
  803. ExSetPoolFlags (EX_SEPARATE_HOT_PAGES_DURING_BOOT);
  804. //
  805. // Start a timer so the above behavior is disabled once bootup
  806. // has finished.
  807. //
  808. KeInitializeTimer (&ExpBootFinishedTimer);
  809. KeInitializeDpc (&ExpBootFinishedTimerDpc,
  810. (PKDEFERRED_ROUTINE) ExpBootFinishedDispatch,
  811. NULL);
  812. TwoMinutes.QuadPart = Int32x32To64 (120, -10000000);
  813. KeSetTimer (&ExpBootFinishedTimer,
  814. TwoMinutes,
  815. &ExpBootFinishedTimerDpc);
  816. }
  817. #endif
  818. if (MmNumberOfPhysicalPages >= 127 * 1024) {
  819. ExSetPoolFlags (EX_DELAY_POOL_FREES);
  820. }
  821. }
  822. }
  823. PVOID
  824. VeAllocatePoolWithTagPriority (
  825. IN POOL_TYPE PoolType,
  826. IN SIZE_T NumberOfBytes,
  827. IN ULONG Tag,
  828. IN EX_POOL_PRIORITY Priority,
  829. IN PVOID CallingAddress
  830. );
  831. PVOID
  832. ExAllocatePoolWithTag (
  833. IN POOL_TYPE PoolType,
  834. IN SIZE_T NumberOfBytes,
  835. IN ULONG Tag
  836. )
  837. /*++
  838. Routine Description:
  839. This function allocates a block of pool of the specified type and
  840. returns a pointer to the allocated block. This function is used to
  841. access both the page-aligned pools and the list head entries (less
  842. than a page) pools.
  843. If the number of bytes specifies a size that is too large to be
  844. satisfied by the appropriate list, then the page-aligned pool
  845. allocator is used. The allocated block will be page-aligned and a
  846. page-sized multiple.
  847. Otherwise, the appropriate pool list entry is used. The allocated
  848. block will be 64-bit aligned, but will not be page aligned. The
  849. pool allocator calculates the smallest number of POOL_BLOCK_SIZE
  850. that can be used to satisfy the request. If there are no blocks
  851. available of this size, then a block of the next larger block size
  852. is allocated and split. One piece is placed back into the pool, and
  853. the other piece is used to satisfy the request. If the allocator
  854. reaches the paged-sized block list, and nothing is there, the
  855. page-aligned pool allocator is called. The page is split and added
  856. to the pool.
  857. Arguments:
  858. PoolType - Supplies the type of pool to allocate. If the pool type
  859. is one of the "MustSucceed" pool types, then this call will
  860. succeed and return a pointer to allocated pool or bugcheck on failure.
  861. For all other cases, if the system cannot allocate the requested amount
  862. of memory, NULL is returned.
  863. Valid pool types:
  864. NonPagedPool
  865. PagedPool
  866. NonPagedPoolMustSucceed,
  867. NonPagedPoolCacheAligned
  868. PagedPoolCacheAligned
  869. NonPagedPoolCacheAlignedMustS
  870. Tag - Supplies the caller's identifying tag.
  871. NumberOfBytes - Supplies the number of bytes to allocate.
  872. Return Value:
  873. NULL - The PoolType is not one of the "MustSucceed" pool types, and
  874. not enough pool exists to satisfy the request.
  875. NON-NULL - Returns a pointer to the allocated pool.
  876. --*/
  877. {
  878. LOGICAL LockHeld;
  879. PVOID Block;
  880. PPOOL_HEADER Entry;
  881. PGENERAL_LOOKASIDE LookasideList;
  882. PPOOL_HEADER NextEntry;
  883. PPOOL_HEADER SplitEntry;
  884. KLOCK_QUEUE_HANDLE LockHandle;
  885. PPOOL_DESCRIPTOR PoolDesc;
  886. ULONG Index;
  887. ULONG ListNumber;
  888. ULONG NeededSize;
  889. ULONG PoolIndex;
  890. POOL_TYPE CheckType;
  891. POOL_TYPE RequestType;
  892. PLIST_ENTRY ListHead;
  893. POOL_TYPE NewPoolType;
  894. LOGICAL GlobalSpace;
  895. ULONG IsLargeSessionAllocation;
  896. PKPRCB Prcb;
  897. ULONG NumberOfPages;
  898. POOL_HEADER TempHeader;
  899. POOL_HEADER TempHeader2;
  900. ULONG RetryCount;
  901. PVOID CallingAddress;
  902. #if defined (_X86_)
  903. PVOID CallersCaller;
  904. #endif
  905. #define CacheOverhead POOL_OVERHEAD
  906. PERFINFO_EXALLOCATEPOOLWITHTAG_DECL();
  907. ASSERT (NumberOfBytes != 0);
  908. ASSERT_ALLOCATE_IRQL (PoolType, NumberOfBytes);
  909. //
  910. // Isolate the base pool type and select a pool from which to allocate
  911. // the specified block size.
  912. //
  913. CheckType = PoolType & BASE_POOL_TYPE_MASK;
  914. if (ExpPoolFlags & (EX_KERNEL_VERIFIER_ENABLED | EX_SPECIAL_POOL_ENABLED)) {
  915. if (ExpPoolFlags & EX_KERNEL_VERIFIER_ENABLED) {
  916. if ((PoolType & POOL_DRIVER_MASK) == 0) {
  917. //
  918. // Use the Driver Verifier pool framework. Note this will
  919. // result in a recursive callback to this routine.
  920. //
  921. #if defined (_X86_)
  922. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  923. #else
  924. CallingAddress = (PVOID)_ReturnAddress();
  925. #endif
  926. return VeAllocatePoolWithTagPriority (PoolType | POOL_DRIVER_MASK,
  927. NumberOfBytes,
  928. Tag,
  929. HighPoolPriority,
  930. CallingAddress);
  931. }
  932. PoolType &= ~POOL_DRIVER_MASK;
  933. }
  934. //
  935. // Use special pool if there is a tag or size match.
  936. //
  937. if ((ExpPoolFlags & EX_SPECIAL_POOL_ENABLED) &&
  938. (MmUseSpecialPool (NumberOfBytes, Tag))) {
  939. Entry = MmAllocateSpecialPool (NumberOfBytes,
  940. Tag,
  941. PoolType,
  942. 2);
  943. if (Entry != NULL) {
  944. return (PVOID)Entry;
  945. }
  946. }
  947. }
  948. //
  949. // Only session paged pool allocations come from the per session
  950. // pools. Nonpaged session pool allocations still come from global pool.
  951. //
  952. if (PoolType & SESSION_POOL_MASK) {
  953. ASSERT (ExpSessionPoolDescriptor != NULL);
  954. GlobalSpace = FALSE;
  955. if (CheckType == NonPagedPool) {
  956. PoolDesc = PoolVector[CheckType];
  957. }
  958. else {
  959. PoolDesc = ExpSessionPoolDescriptor;
  960. }
  961. }
  962. else {
  963. PoolDesc = PoolVector[CheckType];
  964. GlobalSpace = TRUE;
  965. }
  966. //
  967. // Initializing LockHandle is not needed for correctness but without
  968. // it the compiler cannot compile this code W4 to check for use of
  969. // uninitialized variables.
  970. //
  971. LockHandle.OldIrql = 0;
  972. //
  973. // Check to determine if the requested block can be allocated from one
  974. // of the pool lists or must be directly allocated from virtual memory.
  975. //
  976. if (NumberOfBytes > POOL_BUDDY_MAX) {
  977. //
  978. // The requested size is greater than the largest block maintained
  979. // by allocation lists.
  980. //
  981. RetryCount = 0;
  982. IsLargeSessionAllocation = (PoolType & SESSION_POOL_MASK);
  983. RequestType = (PoolType & (BASE_POOL_TYPE_MASK | SESSION_POOL_MASK | POOL_VERIFIER_MASK));
  984. restart1:
  985. LOCK_POOL(PoolDesc, LockHandle);
  986. Entry = (PPOOL_HEADER) MiAllocatePoolPages (RequestType,
  987. NumberOfBytes,
  988. IsLargeSessionAllocation);
  989. //
  990. // Large session pool allocations are accounted for directly by
  991. // the memory manager so no need to call MiSessionPoolAllocated here.
  992. //
  993. if (Entry != NULL) {
  994. NumberOfPages = BYTES_TO_PAGES(NumberOfBytes);
  995. PoolDesc->TotalBigPages += NumberOfPages;
  996. PoolDesc->RunningAllocs += 1;
  997. UNLOCK_POOL(PoolDesc, LockHandle);
  998. if ((PoolBigPageTable) && (IsLargeSessionAllocation == 0)) {
  999. if (ExpAddTagForBigPages((PVOID)Entry,
  1000. Tag,
  1001. NumberOfPages) == FALSE) {
  1002. Tag = ' GIB';
  1003. }
  1004. ExpInsertPoolTracker (Tag,
  1005. (ULONG) ROUND_TO_PAGES(NumberOfBytes),
  1006. PoolType);
  1007. }
  1008. }
  1009. else {
  1010. UNLOCK_POOL(PoolDesc, LockHandle);
  1011. RetryCount += 1;
  1012. //
  1013. // If there are deferred free blocks, free them now and retry.
  1014. //
  1015. if ((RetryCount == 1) && (ExpPoolFlags & EX_DELAY_POOL_FREES)) {
  1016. ExDeferredFreePool (PoolDesc);
  1017. goto restart1;
  1018. }
  1019. if (PoolType & MUST_SUCCEED_POOL_TYPE_MASK) {
  1020. KeBugCheckEx (MUST_SUCCEED_POOL_EMPTY,
  1021. NumberOfBytes,
  1022. NonPagedPoolDescriptor.TotalPages,
  1023. NonPagedPoolDescriptor.TotalBigPages,
  1024. 0);
  1025. }
  1026. ExPoolFailures += 1;
  1027. if (ExpPoolFlags & EX_PRINT_POOL_FAILURES) {
  1028. KdPrint(("EX: ExAllocatePool (%p, 0x%x) returning NULL\n",
  1029. NumberOfBytes,
  1030. PoolType));
  1031. if (ExpPoolFlags & EX_STOP_ON_POOL_FAILURES) {
  1032. DbgBreakPoint ();
  1033. }
  1034. }
  1035. if ((PoolType & POOL_RAISE_IF_ALLOCATION_FAILURE) != 0) {
  1036. ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
  1037. }
  1038. }
  1039. PERFINFO_BIGPOOLALLOC(PoolType, Tag, NumberOfBytes, Entry);
  1040. return Entry;
  1041. }
  1042. if (NumberOfBytes == 0) {
  1043. //
  1044. // Besides fragmenting pool, zero byte requests would not be handled
  1045. // in cases where the minimum pool block size is the same as the
  1046. // pool header size (no room for flink/blinks, etc).
  1047. //
  1048. #if DBG
  1049. KeBugCheckEx (BAD_POOL_CALLER, 0, 0, PoolType, Tag);
  1050. #else
  1051. NumberOfBytes = 1;
  1052. #endif
  1053. }
  1054. //
  1055. // The requested size is less than or equal to the size of the
  1056. // maximum block maintained by the allocation lists.
  1057. //
  1058. PERFINFO_POOLALLOC(PoolType, Tag, NumberOfBytes);
  1059. //
  1060. // Compute the index of the listhead for blocks of the requested size.
  1061. //
  1062. ListNumber = (ULONG)((NumberOfBytes + POOL_OVERHEAD + (POOL_SMALLEST_BLOCK - 1)) >> POOL_BLOCK_SHIFT);
  1063. NeededSize = ListNumber;
  1064. if (CheckType == PagedPool) {
  1065. //
  1066. // If the requested pool block is a small block, then attempt to
  1067. // allocate the requested pool from the per processor lookaside
  1068. // list. If the attempt fails, then attempt to allocate from the
  1069. // system lookaside list. If the attempt fails, then select a
  1070. // pool to allocate from and allocate the block normally.
  1071. //
  1072. // Note session space allocations do not currently use lookaside lists.
  1073. //
  1074. // Also note that if hot/cold separation is enabled, allocations are
  1075. // not satisfied from lookaside lists as these are either :
  1076. //
  1077. // 1. cold references
  1078. //
  1079. // or
  1080. //
  1081. // 2. we are still booting on a small machine, thus keeping pool
  1082. // locality dense (to reduce the working set footprint thereby
  1083. // reducing page stealing) is a bigger win in terms of overall
  1084. // speed than trying to satisfy individual requests more quickly.
  1085. //
  1086. if ((GlobalSpace == TRUE) &&
  1087. (USING_HOT_COLD_METRICS == 0) &&
  1088. (NeededSize <= POOL_SMALL_LISTS)) {
  1089. Prcb = KeGetCurrentPrcb ();
  1090. LookasideList = Prcb->PPPagedLookasideList[NeededSize - 1].P;
  1091. LookasideList->TotalAllocates += 1;
  1092. CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, Entry);
  1093. Entry = (PPOOL_HEADER)
  1094. InterlockedPopEntrySList (&LookasideList->ListHead);
  1095. if (Entry == NULL) {
  1096. LookasideList = Prcb->PPPagedLookasideList[NeededSize - 1].L;
  1097. LookasideList->TotalAllocates += 1;
  1098. CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, Entry);
  1099. Entry = (PPOOL_HEADER)
  1100. InterlockedPopEntrySList (&LookasideList->ListHead);
  1101. }
  1102. if (Entry != NULL) {
  1103. CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, Entry);
  1104. Entry -= 1;
  1105. LookasideList->AllocateHits += 1;
  1106. NewPoolType = (PoolType & (BASE_POOL_TYPE_MASK | POOL_QUOTA_MASK | SESSION_POOL_MASK | POOL_VERIFIER_MASK)) + 1;
  1107. NewPoolType |= POOL_IN_USE_MASK;
  1108. Entry->PoolType = (UCHAR)NewPoolType;
  1109. Entry->PoolTag = Tag;
  1110. ASSERT ((PoolType & SESSION_POOL_MASK) == 0);
  1111. if (PoolTrackTable != NULL) {
  1112. ExpInsertPoolTracker (Tag,
  1113. Entry->BlockSize << POOL_BLOCK_SHIFT,
  1114. PoolType);
  1115. }
  1116. //
  1117. // Zero out any back pointer to our internal structures
  1118. // to stop someone from corrupting us via an
  1119. // uninitialized pointer.
  1120. //
  1121. ((PULONG_PTR)((PCHAR)Entry + CacheOverhead))[0] = 0;
  1122. PERFINFO_POOLALLOC_ADDR((PUCHAR)Entry + CacheOverhead);
  1123. return (PUCHAR)Entry + CacheOverhead;
  1124. }
  1125. }
  1126. //
  1127. // If there is more than one paged pool, then attempt to find
  1128. // one that can be immediately locked.
  1129. //
  1130. //
  1131. // N.B. The paged pool is selected in a round robin fashion using a
  1132. // simple counter. Note that the counter is incremented using a
  1133. // a noninterlocked sequence, but the pool index is never allowed
  1134. // to get out of range.
  1135. //
  1136. if (GlobalSpace == TRUE) {
  1137. PVOID Lock;
  1138. if (USING_HOT_COLD_METRICS) {
  1139. if ((PoolType & POOL_COLD_ALLOCATION) == 0) {
  1140. //
  1141. // Hot allocations come from the first paged pool.
  1142. //
  1143. PoolIndex = 1;
  1144. }
  1145. else {
  1146. //
  1147. // Force cold allocations to come from the last paged pool.
  1148. //
  1149. PoolIndex = ExpNumberOfPagedPools;
  1150. }
  1151. }
  1152. else {
  1153. if (KeNumberNodes > 1) {
  1154. //
  1155. // Use the pool descriptor which contains memory local to
  1156. // the current processor even if we have to wait for it.
  1157. // While it is possible that the paged pool addresses in the
  1158. // local descriptor have been paged out, on large memory
  1159. // NUMA machines this should be less common.
  1160. //
  1161. Prcb = KeGetCurrentPrcb ();
  1162. PoolIndex = Prcb->ParentNode->Color;
  1163. if (PoolIndex < ExpNumberOfPagedPools) {
  1164. PoolIndex += 1;
  1165. PoolDesc = ExpPagedPoolDescriptor[PoolIndex];
  1166. RequestType = PoolType & (BASE_POOL_TYPE_MASK | SESSION_POOL_MASK);
  1167. RetryCount = 0;
  1168. goto restart2;
  1169. }
  1170. }
  1171. PoolIndex = 1;
  1172. if (ExpNumberOfPagedPools != PoolIndex) {
  1173. ExpPoolIndex += 1;
  1174. PoolIndex = ExpPoolIndex;
  1175. if (PoolIndex > ExpNumberOfPagedPools) {
  1176. PoolIndex = 1;
  1177. ExpPoolIndex = 1;
  1178. }
  1179. Index = PoolIndex;
  1180. do {
  1181. Lock = ExpPagedPoolDescriptor[PoolIndex]->LockAddress;
  1182. if (!ExIsFastMutexOwned((PFAST_MUTEX)Lock)) {
  1183. break;
  1184. }
  1185. PoolIndex += 1;
  1186. if (PoolIndex > ExpNumberOfPagedPools) {
  1187. PoolIndex = 1;
  1188. }
  1189. } while (PoolIndex != Index);
  1190. }
  1191. }
  1192. PoolDesc = ExpPagedPoolDescriptor[PoolIndex];
  1193. }
  1194. else {
  1195. //
  1196. // Only one paged pool is currently available per session.
  1197. //
  1198. PoolIndex = 0;
  1199. ASSERT (PoolDesc == ExpSessionPoolDescriptor);
  1200. ASSERT (PoolDesc->PoolIndex == 0);
  1201. }
  1202. }
  1203. else {
  1204. //
  1205. // If the requested pool block is a small block, then attempt to
  1206. // allocate the requested pool from the per processor lookaside
  1207. // list. If the attempt fails, then attempt to allocate from the
  1208. // system lookaside list. If the attempt fails, then select a
  1209. // pool to allocate from and allocate the block normally.
  1210. //
  1211. if ((GlobalSpace == TRUE) && (NeededSize <= POOL_SMALL_LISTS)) {
  1212. Prcb = KeGetCurrentPrcb();
  1213. LookasideList = Prcb->PPNPagedLookasideList[NeededSize - 1].P;
  1214. LookasideList->TotalAllocates += 1;
  1215. CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, 0);
  1216. Entry = (PPOOL_HEADER)
  1217. InterlockedPopEntrySList (&LookasideList->ListHead);
  1218. if (Entry == NULL) {
  1219. LookasideList = Prcb->PPNPagedLookasideList[NeededSize - 1].L;
  1220. LookasideList->TotalAllocates += 1;
  1221. CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, 0);
  1222. Entry = (PPOOL_HEADER)
  1223. InterlockedPopEntrySList (&LookasideList->ListHead);
  1224. }
  1225. if (Entry != NULL) {
  1226. CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, Entry);
  1227. Entry -= 1;
  1228. LookasideList->AllocateHits += 1;
  1229. NewPoolType = (PoolType & (BASE_POOL_TYPE_MASK | POOL_QUOTA_MASK | SESSION_POOL_MASK | POOL_VERIFIER_MASK)) + 1;
  1230. NewPoolType |= POOL_IN_USE_MASK;
  1231. Entry->PoolType = (UCHAR)NewPoolType;
  1232. Entry->PoolTag = Tag;
  1233. if (PoolTrackTable != NULL) {
  1234. ExpInsertPoolTracker (Tag,
  1235. Entry->BlockSize << POOL_BLOCK_SHIFT,
  1236. PoolType);
  1237. }
  1238. //
  1239. // Zero out any back pointer to our internal structures
  1240. // to stop someone from corrupting us via an
  1241. // uninitialized pointer.
  1242. //
  1243. ((PULONG_PTR)((PCHAR)Entry + CacheOverhead))[0] = 0;
  1244. PERFINFO_POOLALLOC_ADDR((PUCHAR)Entry + CacheOverhead);
  1245. return (PUCHAR)Entry + CacheOverhead;
  1246. }
  1247. }
  1248. if (ExpNumberOfNonPagedPools <= 1) {
  1249. PoolIndex = 0;
  1250. }
  1251. else {
  1252. //
  1253. // Use the pool descriptor which contains memory local to
  1254. // the current processor even if we have to contend for its lock.
  1255. //
  1256. Prcb = KeGetCurrentPrcb ();
  1257. PoolIndex = Prcb->ParentNode->Color;
  1258. if (PoolIndex >= ExpNumberOfNonPagedPools) {
  1259. PoolIndex = ExpNumberOfNonPagedPools - 1;
  1260. }
  1261. PoolDesc = ExpNonPagedPoolDescriptor[PoolIndex];
  1262. }
  1263. ASSERT(PoolIndex == PoolDesc->PoolIndex);
  1264. }
  1265. RequestType = PoolType & (BASE_POOL_TYPE_MASK | SESSION_POOL_MASK);
  1266. RetryCount = 0;
  1267. restart2:
  1268. ListHead = &PoolDesc->ListHeads[ListNumber];
  1269. //
  1270. // Walk the listheads looking for a free block.
  1271. //
  1272. LockHeld = FALSE;
  1273. do {
  1274. //
  1275. // If the list is not empty, then allocate a block from the
  1276. // selected list.
  1277. //
  1278. if (PrivateIsListEmpty(ListHead) == FALSE) {
  1279. if (LockHeld == FALSE) {
  1280. LockHeld = TRUE;
  1281. LOCK_POOL (PoolDesc, LockHandle);
  1282. if (PrivateIsListEmpty(ListHead)) {
  1283. //
  1284. // The block is no longer available - restart at the
  1285. // beginning to avoid fragmentation.
  1286. //
  1287. ListHead = &PoolDesc->ListHeads[ListNumber];
  1288. continue;
  1289. }
  1290. }
  1291. CHECK_LIST (ListHead);
  1292. Block = PrivateRemoveHeadList(ListHead);
  1293. CHECK_LIST (ListHead);
  1294. Entry = (PPOOL_HEADER)((PCHAR)Block - POOL_OVERHEAD);
  1295. ASSERT(Entry->BlockSize >= NeededSize);
  1296. ASSERT(DECODE_POOL_INDEX(Entry) == PoolIndex);
  1297. ASSERT(Entry->PoolType == 0);
  1298. if (Entry->BlockSize != NeededSize) {
  1299. //
  1300. // The selected block is larger than the allocation
  1301. // request. Split the block and insert the remaining
  1302. // fragment in the appropriate list.
  1303. //
  1304. // If the entry is at the start of a page, then take
  1305. // the allocation from the front of the block so as
  1306. // to minimize fragmentation. Otherwise, take the
  1307. // allocation from the end of the block which may
  1308. // also reduce fragmentation if the block is at the
  1309. // end of a page.
  1310. //
  1311. if (Entry->PreviousSize == 0) {
  1312. //
  1313. // The entry is at the start of a page.
  1314. //
  1315. SplitEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + NeededSize);
  1316. SplitEntry->BlockSize = (USHORT)(Entry->BlockSize - NeededSize);
  1317. SplitEntry->PreviousSize = (USHORT) NeededSize;
  1318. //
  1319. // If the allocated block is not at the end of a
  1320. // page, then adjust the size of the next block.
  1321. //
  1322. NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)SplitEntry + SplitEntry->BlockSize);
  1323. if (PAGE_END(NextEntry) == FALSE) {
  1324. NextEntry->PreviousSize = SplitEntry->BlockSize;
  1325. }
  1326. }
  1327. else {
  1328. //
  1329. // The entry is not at the start of a page.
  1330. //
  1331. SplitEntry = Entry;
  1332. Entry->BlockSize = (USHORT)(Entry->BlockSize - NeededSize);
  1333. Entry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + Entry->BlockSize);
  1334. Entry->PreviousSize = SplitEntry->BlockSize;
  1335. //
  1336. // If the allocated block is not at the end of a
  1337. // page, then adjust the size of the next block.
  1338. //
  1339. NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + NeededSize);
  1340. if (PAGE_END(NextEntry) == FALSE) {
  1341. NextEntry->PreviousSize = (USHORT) NeededSize;
  1342. }
  1343. }
  1344. //
  1345. // Set the size of the allocated entry, clear the pool
  1346. // type of the split entry, set the index of the split
  1347. // entry, and insert the split entry in the appropriate
  1348. // free list.
  1349. //
  1350. Entry->BlockSize = (USHORT) NeededSize;
  1351. ENCODE_POOL_INDEX(Entry, PoolIndex);
  1352. SplitEntry->PoolType = 0;
  1353. ENCODE_POOL_INDEX(SplitEntry, PoolIndex);
  1354. Index = SplitEntry->BlockSize;
  1355. CHECK_LIST(&PoolDesc->ListHeads[Index - 1]);
  1356. //
  1357. // Only insert split pool blocks which contain more than just
  1358. // a header as only those have room for a flink/blink !
  1359. // Note if the minimum pool block size is bigger than the
  1360. // header then there can be no blocks like this.
  1361. //
  1362. if ((POOL_OVERHEAD != POOL_SMALLEST_BLOCK) ||
  1363. (SplitEntry->BlockSize != 1)) {
  1364. PrivateInsertTailList(&PoolDesc->ListHeads[Index - 1], ((PLIST_ENTRY)((PCHAR)SplitEntry + POOL_OVERHEAD)));
  1365. CHECK_LIST(((PLIST_ENTRY)((PCHAR)SplitEntry + POOL_OVERHEAD)));
  1366. }
  1367. }
  1368. Entry->PoolType = (UCHAR)(((PoolType & (BASE_POOL_TYPE_MASK | POOL_QUOTA_MASK | SESSION_POOL_MASK | POOL_VERIFIER_MASK)) + 1) | POOL_IN_USE_MASK);
  1369. CHECK_POOL_HEADER(__LINE__, Entry);
  1370. PoolDesc->RunningAllocs += 1;
  1371. UNLOCK_POOL(PoolDesc, LockHandle);
  1372. Entry->PoolTag = Tag;
  1373. //
  1374. // Notify the memory manager of session pool allocations
  1375. // so leaked allocations can be caught on session exit.
  1376. //
  1377. if (PoolType & SESSION_POOL_MASK) {
  1378. MiSessionPoolAllocated(
  1379. (PVOID)((PCHAR)Entry + CacheOverhead),
  1380. (ULONG)(Entry->BlockSize << POOL_BLOCK_SHIFT),
  1381. PoolType);
  1382. }
  1383. else if (PoolTrackTable != NULL) {
  1384. ExpInsertPoolTracker (Tag,
  1385. Entry->BlockSize << POOL_BLOCK_SHIFT,
  1386. PoolType);
  1387. }
  1388. //
  1389. // Zero out any back pointer to our internal structures
  1390. // to stop someone from corrupting us via an
  1391. // uninitialized pointer.
  1392. //
  1393. ((PULONGLONG)((PCHAR)Entry + CacheOverhead))[0] = 0;
  1394. PERFINFO_POOLALLOC_ADDR((PUCHAR)Entry + CacheOverhead);
  1395. return (PCHAR)Entry + CacheOverhead;
  1396. }
  1397. ListHead += 1;
  1398. } while (ListHead != &PoolDesc->ListHeads[POOL_LIST_HEADS]);
  1399. //
  1400. // A block of the desired size does not exist and there are
  1401. // no large blocks that can be split to satisfy the allocation.
  1402. // Attempt to expand the pool by allocating another page to be
  1403. // added to the pool.
  1404. //
  1405. // If a different (master) pool lock will be needed for the allocation
  1406. // of full pool pages, then get rid of the local pool lock now.
  1407. //
  1408. // Initialize TempHeader now to reduce lock hold times assuming the
  1409. // allocation will succeed.
  1410. //
  1411. if (LockHeld == TRUE) {
  1412. if (CheckType == PagedPool) {
  1413. if (GlobalSpace == TRUE) {
  1414. ExReleaseFastMutex ((PFAST_MUTEX)PoolDesc->LockAddress);
  1415. LockHeld = FALSE;
  1416. }
  1417. }
  1418. else if (CheckType == NonPagedPool) {
  1419. if (ExpNumberOfNonPagedPools > 1) {
  1420. KeReleaseInStackQueuedSpinLock (&LockHandle);
  1421. LockHeld = FALSE;
  1422. }
  1423. }
  1424. }
  1425. TempHeader.Ulong1 = 0;
  1426. TempHeader.PoolIndex = (UCHAR) PoolIndex;
  1427. TempHeader.BlockSize = (USHORT) NeededSize;
  1428. TempHeader.PoolType = (UCHAR)(((PoolType & (BASE_POOL_TYPE_MASK | POOL_QUOTA_MASK | SESSION_POOL_MASK | POOL_VERIFIER_MASK)) + 1) | POOL_IN_USE_MASK);
  1429. TempHeader2.Ulong1 = 0;
  1430. Index = (PAGE_SIZE / sizeof(POOL_BLOCK)) - NeededSize;
  1431. TempHeader2.BlockSize = (USHORT) Index;
  1432. TempHeader2.PreviousSize = (USHORT) NeededSize;
  1433. TempHeader2.PoolIndex = (UCHAR) PoolIndex;
  1434. //
  1435. // Pool header now initialized, try for a free page.
  1436. //
  1437. if (LockHeld == FALSE) {
  1438. LockHeld = TRUE;
  1439. if (CheckType == PagedPool) {
  1440. if (GlobalSpace == TRUE) {
  1441. ExAcquireFastMutex (ExpPagedPoolMutex);
  1442. }
  1443. else {
  1444. ExAcquireFastMutex (ExpSessionPoolDescriptor->LockAddress);
  1445. }
  1446. }
  1447. else {
  1448. ExpLockNonPagedPool (LockHandle.OldIrql);
  1449. }
  1450. }
  1451. Entry = (PPOOL_HEADER) MiAllocatePoolPages (RequestType, PAGE_SIZE, FALSE);
  1452. ASSERT (LockHeld == TRUE);
  1453. if (CheckType == PagedPool) {
  1454. if (GlobalSpace == TRUE) {
  1455. ExReleaseFastMutex (ExpPagedPoolMutex);
  1456. LockHeld = FALSE;
  1457. }
  1458. }
  1459. else if (CheckType == NonPagedPool) {
  1460. if (ExpNumberOfNonPagedPools > 1) {
  1461. ExpUnlockNonPagedPool (LockHandle.OldIrql);
  1462. LockHeld = FALSE;
  1463. }
  1464. }
  1465. if (Entry == NULL) {
  1466. if (LockHeld == TRUE) {
  1467. if (CheckType == NonPagedPool) {
  1468. if (ExpNumberOfNonPagedPools <= 1) {
  1469. ExpUnlockNonPagedPool (LockHandle.OldIrql);
  1470. }
  1471. }
  1472. else {
  1473. ExReleaseFastMutex (ExpSessionPoolDescriptor->LockAddress);
  1474. }
  1475. LockHeld = FALSE;
  1476. }
  1477. //
  1478. // If there are deferred free blocks, free them now and retry.
  1479. //
  1480. RetryCount += 1;
  1481. if ((RetryCount == 1) && (ExpPoolFlags & EX_DELAY_POOL_FREES)) {
  1482. ExDeferredFreePool (PoolDesc);
  1483. goto restart2;
  1484. }
  1485. if ((PoolType & MUST_SUCCEED_POOL_TYPE_MASK) != 0) {
  1486. //
  1487. // Must succeed pool was requested so bugcheck.
  1488. //
  1489. KeBugCheckEx (MUST_SUCCEED_POOL_EMPTY,
  1490. PAGE_SIZE,
  1491. NonPagedPoolDescriptor.TotalPages,
  1492. NonPagedPoolDescriptor.TotalBigPages,
  1493. 0);
  1494. }
  1495. //
  1496. // No more pool of the specified type is available.
  1497. //
  1498. ExPoolFailures += 1;
  1499. if (ExpPoolFlags & EX_PRINT_POOL_FAILURES) {
  1500. KdPrint(("EX: ExAllocatePool (%p, 0x%x) returning NULL\n",
  1501. NumberOfBytes,
  1502. PoolType));
  1503. if (ExpPoolFlags & EX_STOP_ON_POOL_FAILURES) {
  1504. DbgBreakPoint ();
  1505. }
  1506. }
  1507. if ((PoolType & POOL_RAISE_IF_ALLOCATION_FAILURE) != 0) {
  1508. ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
  1509. }
  1510. PERFINFO_POOLALLOC_ADDR(NULL);
  1511. return NULL;
  1512. }
  1513. //
  1514. // Split the allocated page and insert the remaining
  1515. // fragment in the appropriate listhead.
  1516. //
  1517. // Set the size of the allocated entry, clear the pool
  1518. // type of the split entry, set the index of the split
  1519. // entry, and insert the split entry in the appropriate
  1520. // free list.
  1521. //
  1522. PoolDesc->TotalPages += 1;
  1523. *Entry = TempHeader;
  1524. PERFINFO_ADDPOOLPAGE(CheckType, PoolIndex, Entry, PoolDesc);
  1525. SplitEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + NeededSize);
  1526. *SplitEntry = TempHeader2;
  1527. if (LockHeld == FALSE) {
  1528. LOCK_POOL (PoolDesc, LockHandle);
  1529. }
  1530. //
  1531. // Only insert split pool blocks which contain more than just
  1532. // a header as only those have room for a flink/blink !
  1533. // Note if the minimum pool block size is bigger than the
  1534. // header then there can be no blocks like this.
  1535. //
  1536. if ((POOL_OVERHEAD != POOL_SMALLEST_BLOCK) ||
  1537. (SplitEntry->BlockSize != 1)) {
  1538. CHECK_LIST(&PoolDesc->ListHeads[Index - 1]);
  1539. PrivateInsertTailList(&PoolDesc->ListHeads[Index - 1], ((PLIST_ENTRY)((PCHAR)SplitEntry + POOL_OVERHEAD)));
  1540. CHECK_LIST(((PLIST_ENTRY)((PCHAR)SplitEntry + POOL_OVERHEAD)));
  1541. }
  1542. CHECK_POOL_HEADER(__LINE__, Entry);
  1543. PoolDesc->RunningAllocs += 1;
  1544. UNLOCK_POOL (PoolDesc, LockHandle);
  1545. Block = (PVOID) ((PCHAR)Entry + CacheOverhead);
  1546. NeededSize <<= POOL_BLOCK_SHIFT;
  1547. Entry->PoolTag = Tag;
  1548. //
  1549. // Notify the memory manager of session pool allocations
  1550. // so leaked allocations can be caught on session exit.
  1551. //
  1552. if (PoolType & SESSION_POOL_MASK) {
  1553. MiSessionPoolAllocated (Block, NeededSize, PoolType);
  1554. }
  1555. else if (PoolTrackTable != NULL) {
  1556. ExpInsertPoolTracker (Tag, NeededSize, PoolType);
  1557. }
  1558. PERFINFO_POOLALLOC_ADDR (Block);
  1559. return Block;
  1560. }
  1561. PVOID
  1562. ExAllocatePool (
  1563. IN POOL_TYPE PoolType,
  1564. IN SIZE_T NumberOfBytes
  1565. )
  1566. /*++
  1567. Routine Description:
  1568. This function allocates a block of pool of the specified type and
  1569. returns a pointer to the allocated block. This function is used to
  1570. access both the page-aligned pools, and the list head entries (less than
  1571. a page) pools.
  1572. If the number of bytes specifies a size that is too large to be
  1573. satisfied by the appropriate list, then the page-aligned
  1574. pool allocator is used. The allocated block will be page-aligned
  1575. and a page-sized multiple.
  1576. Otherwise, the appropriate pool list entry is used. The allocated
  1577. block will be 64-bit aligned, but will not be page aligned. The
  1578. pool allocator calculates the smallest number of POOL_BLOCK_SIZE
  1579. that can be used to satisfy the request. If there are no blocks
  1580. available of this size, then a block of the next larger block size
  1581. is allocated and split. One piece is placed back into the pool, and
  1582. the other piece is used to satisfy the request. If the allocator
  1583. reaches the paged-sized block list, and nothing is there, the
  1584. page-aligned pool allocator is called. The page is split and added
  1585. to the pool...
  1586. Arguments:
  1587. PoolType - Supplies the type of pool to allocate. If the pool type
  1588. is one of the "MustSucceed" pool types, then this call will
  1589. succeed and return a pointer to allocated pool or bugcheck on failure.
  1590. For all other cases, if the system cannot allocate the requested amount
  1591. of memory, NULL is returned.
  1592. Valid pool types:
  1593. NonPagedPool
  1594. PagedPool
  1595. NonPagedPoolMustSucceed,
  1596. NonPagedPoolCacheAligned
  1597. PagedPoolCacheAligned
  1598. NonPagedPoolCacheAlignedMustS
  1599. NumberOfBytes - Supplies the number of bytes to allocate.
  1600. Return Value:
  1601. NULL - The PoolType is not one of the "MustSucceed" pool types, and
  1602. not enough pool exists to satisfy the request.
  1603. NON-NULL - Returns a pointer to the allocated pool.
  1604. --*/
  1605. {
  1606. return ExAllocatePoolWithTag (PoolType,
  1607. NumberOfBytes,
  1608. 'enoN');
  1609. }
  1610. PVOID
  1611. ExAllocatePoolWithTagPriority (
  1612. IN POOL_TYPE PoolType,
  1613. IN SIZE_T NumberOfBytes,
  1614. IN ULONG Tag,
  1615. IN EX_POOL_PRIORITY Priority
  1616. )
  1617. /*++
  1618. Routine Description:
  1619. This function allocates a block of pool of the specified type and
  1620. returns a pointer to the allocated block. This function is used to
  1621. access both the page-aligned pools, and the list head entries (less than
  1622. a page) pools.
  1623. If the number of bytes specifies a size that is too large to be
  1624. satisfied by the appropriate list, then the page-aligned
  1625. pool allocator is used. The allocated block will be page-aligned
  1626. and a page-sized multiple.
  1627. Otherwise, the appropriate pool list entry is used. The allocated
  1628. block will be 64-bit aligned, but will not be page aligned. The
  1629. pool allocator calculates the smallest number of POOL_BLOCK_SIZE
  1630. that can be used to satisfy the request. If there are no blocks
  1631. available of this size, then a block of the next larger block size
  1632. is allocated and split. One piece is placed back into the pool, and
  1633. the other piece is used to satisfy the request. If the allocator
  1634. reaches the paged-sized block list, and nothing is there, the
  1635. page-aligned pool allocator is called. The page is split and added
  1636. to the pool...
  1637. Arguments:
  1638. PoolType - Supplies the type of pool to allocate. If the pool type
  1639. is one of the "MustSucceed" pool types, then this call will
  1640. succeed and return a pointer to allocated pool or bugcheck on failure.
  1641. For all other cases, if the system cannot allocate the requested amount
  1642. of memory, NULL is returned.
  1643. Valid pool types:
  1644. NonPagedPool
  1645. PagedPool
  1646. NonPagedPoolMustSucceed,
  1647. NonPagedPoolCacheAligned
  1648. PagedPoolCacheAligned
  1649. NonPagedPoolCacheAlignedMustS
  1650. NumberOfBytes - Supplies the number of bytes to allocate.
  1651. Tag - Supplies the caller's identifying tag.
  1652. Priority - Supplies an indication as to how important it is that this
  1653. request succeed under low available pool conditions. This
  1654. can also be used to specify special pool.
  1655. Return Value:
  1656. NULL - The PoolType is not one of the "MustSucceed" pool types, and
  1657. not enough pool exists to satisfy the request.
  1658. NON-NULL - Returns a pointer to the allocated pool.
  1659. --*/
  1660. {
  1661. PVOID Entry;
  1662. if ((Priority & POOL_SPECIAL_POOL_BIT) && (NumberOfBytes <= POOL_BUDDY_MAX)) {
  1663. Entry = MmAllocateSpecialPool (NumberOfBytes,
  1664. Tag,
  1665. PoolType,
  1666. (Priority & POOL_SPECIAL_POOL_UNDERRUN_BIT) ? 1 : 0);
  1667. if (Entry != NULL) {
  1668. return Entry;
  1669. }
  1670. Priority &= ~(POOL_SPECIAL_POOL_BIT | POOL_SPECIAL_POOL_UNDERRUN_BIT);
  1671. }
  1672. //
  1673. // Pool and other resources can be allocated directly through the Mm
  1674. // without the pool code knowing - so always call the Mm for the
  1675. // up-to-date counters.
  1676. //
  1677. if ((Priority != HighPoolPriority) && ((PoolType & MUST_SUCCEED_POOL_TYPE_MASK) == 0)) {
  1678. if (MmResourcesAvailable (PoolType, NumberOfBytes, Priority) == FALSE) {
  1679. return NULL;
  1680. }
  1681. }
  1682. //
  1683. // There is a window between determining whether to proceed and actually
  1684. // doing the allocation. In this window the pool may deplete. This is not
  1685. // worth closing at this time.
  1686. //
  1687. return ExAllocatePoolWithTag (PoolType, NumberOfBytes, Tag);
  1688. }
  1689. PVOID
  1690. ExAllocatePoolWithQuota (
  1691. IN POOL_TYPE PoolType,
  1692. IN SIZE_T NumberOfBytes
  1693. )
  1694. /*++
  1695. Routine Description:
  1696. This function allocates a block of pool of the specified type,
  1697. returns a pointer to the allocated block, and if the binary buddy
  1698. allocator was used to satisfy the request, charges pool quota to the
  1699. current process. This function is used to access both the
  1700. page-aligned pools, and the binary buddy.
  1701. If the number of bytes specifies a size that is too large to be
  1702. satisfied by the appropriate binary buddy pool, then the
  1703. page-aligned pool allocator is used. The allocated block will be
  1704. page-aligned and a page-sized multiple. No quota is charged to the
  1705. current process if this is the case.
  1706. Otherwise, the appropriate binary buddy pool is used. The allocated
  1707. block will be 64-bit aligned, but will not be page aligned. After
  1708. the allocation completes, an attempt will be made to charge pool
  1709. quota (of the appropriate type) to the current process object. If
  1710. the quota charge succeeds, then the pool block's header is adjusted
  1711. to point to the current process. The process object is not
  1712. dereferenced until the pool is deallocated and the appropriate
  1713. amount of quota is returned to the process. Otherwise, the pool is
  1714. deallocated, a "quota exceeded" condition is raised.
  1715. Arguments:
  1716. PoolType - Supplies the type of pool to allocate. If the pool type
  1717. is one of the "MustSucceed" pool types and sufficient quota
  1718. exists, then this call will always succeed and return a pointer
  1719. to allocated pool. Otherwise, if the system cannot allocate
  1720. the requested amount of memory a STATUS_INSUFFICIENT_RESOURCES
  1721. status is raised.
  1722. NumberOfBytes - Supplies the number of bytes to allocate.
  1723. Return Value:
  1724. NON-NULL - Returns a pointer to the allocated pool.
  1725. Unspecified - If insufficient quota exists to complete the pool
  1726. allocation, the return value is unspecified.
  1727. --*/
  1728. {
  1729. return ExAllocatePoolWithQuotaTag (PoolType, NumberOfBytes, 'enoN');
  1730. }
  1731. PVOID
  1732. ExAllocatePoolWithQuotaTag (
  1733. IN POOL_TYPE PoolType,
  1734. IN SIZE_T NumberOfBytes,
  1735. IN ULONG Tag
  1736. )
  1737. /*++
  1738. Routine Description:
  1739. This function allocates a block of pool of the specified type,
  1740. returns a pointer to the allocated block, and if the binary buddy
  1741. allocator was used to satisfy the request, charges pool quota to the
  1742. current process. This function is used to access both the
  1743. page-aligned pools, and the binary buddy.
  1744. If the number of bytes specifies a size that is too large to be
  1745. satisfied by the appropriate binary buddy pool, then the
  1746. page-aligned pool allocator is used. The allocated block will be
  1747. page-aligned and a page-sized multiple. No quota is charged to the
  1748. current process if this is the case.
  1749. Otherwise, the appropriate binary buddy pool is used. The allocated
  1750. block will be 64-bit aligned, but will not be page aligned. After
  1751. the allocation completes, an attempt will be made to charge pool
  1752. quota (of the appropriate type) to the current process object. If
  1753. the quota charge succeeds, then the pool block's header is adjusted
  1754. to point to the current process. The process object is not
  1755. dereferenced until the pool is deallocated and the appropriate
  1756. amount of quota is returned to the process. Otherwise, the pool is
  1757. deallocated, a "quota exceeded" condition is raised.
  1758. Arguments:
  1759. PoolType - Supplies the type of pool to allocate. If the pool type
  1760. is one of the "MustSucceed" pool types and sufficient quota
  1761. exists, then this call will always succeed and return a pointer
  1762. to allocated pool. Otherwise, if the system cannot allocate
  1763. the requested amount of memory a STATUS_INSUFFICIENT_RESOURCES
  1764. status is raised.
  1765. NumberOfBytes - Supplies the number of bytes to allocate.
  1766. Return Value:
  1767. NON-NULL - Returns a pointer to the allocated pool.
  1768. Unspecified - If insufficient quota exists to complete the pool
  1769. allocation, the return value is unspecified.
  1770. --*/
  1771. {
  1772. PVOID p;
  1773. PEPROCESS Process;
  1774. PPOOL_HEADER Entry;
  1775. LOGICAL IgnoreQuota;
  1776. LOGICAL RaiseOnQuotaFailure;
  1777. NTSTATUS Status;
  1778. IgnoreQuota = FALSE;
  1779. RaiseOnQuotaFailure = TRUE;
  1780. if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE) {
  1781. RaiseOnQuotaFailure = FALSE;
  1782. PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE;
  1783. }
  1784. if ((POOL_QUOTA_ENABLED == FALSE)
  1785. #if i386 && !FPO
  1786. || (NtGlobalFlag & FLG_KERNEL_STACK_TRACE_DB)
  1787. #endif // i386 && !FPO
  1788. ) {
  1789. IgnoreQuota = TRUE;
  1790. }
  1791. else {
  1792. PoolType = (POOL_TYPE)((UCHAR)PoolType + POOL_QUOTA_MASK);
  1793. }
  1794. p = ExAllocatePoolWithTag (PoolType, NumberOfBytes, Tag);
  1795. //
  1796. // Note - NULL is page aligned.
  1797. //
  1798. if (!PAGE_ALIGNED(p) && !IgnoreQuota) {
  1799. if ((ExpPoolFlags & EX_SPECIAL_POOL_ENABLED) &&
  1800. (MmIsSpecialPoolAddress (p))) {
  1801. return p;
  1802. }
  1803. Entry = (PPOOL_HEADER)((PCH)p - POOL_OVERHEAD);
  1804. Process = PsGetCurrentProcess();
  1805. Entry->ProcessBilled = NULL;
  1806. if (Process != PsInitialSystemProcess) {
  1807. Status = PsChargeProcessPoolQuota (Process,
  1808. PoolType & BASE_POOL_TYPE_MASK,
  1809. (ULONG)(Entry->BlockSize << POOL_BLOCK_SHIFT));
  1810. if (!NT_SUCCESS(Status)) {
  1811. //
  1812. // Back out the allocation.
  1813. //
  1814. ExFreePoolWithTag (p, Tag);
  1815. if (RaiseOnQuotaFailure) {
  1816. ExRaiseStatus (Status);
  1817. }
  1818. return NULL;
  1819. }
  1820. ObReferenceObject (Process);
  1821. Entry->ProcessBilled = Process;
  1822. }
  1823. }
  1824. else {
  1825. if ((p == NULL) && (RaiseOnQuotaFailure)) {
  1826. ExRaiseStatus (STATUS_INSUFFICIENT_RESOURCES);
  1827. }
  1828. }
  1829. return p;
  1830. }
  1831. VOID
  1832. ExInsertPoolTag (
  1833. ULONG Tag,
  1834. PVOID Va,
  1835. SIZE_T NumberOfBytes,
  1836. POOL_TYPE PoolType
  1837. )
  1838. /*++
  1839. Routine Description:
  1840. This function inserts a pool tag in the tag table and increments the
  1841. number of allocates and updates the total allocation size.
  1842. This function also inserts the pool tag in the big page tag table.
  1843. N.B. This function is for use by memory management ONLY.
  1844. Arguments:
  1845. Tag - Supplies the tag used to insert an entry in the tag table.
  1846. Va - Supplies the allocated virtual address.
  1847. NumberOfBytes - Supplies the allocation size in bytes.
  1848. PoolType - Supplies the pool type.
  1849. Return Value:
  1850. None.
  1851. Environment:
  1852. No pool locks held so pool may be freely allocated here as needed.
  1853. --*/
  1854. {
  1855. ULONG NumberOfPages;
  1856. #if !DBG
  1857. UNREFERENCED_PARAMETER (PoolType);
  1858. #endif
  1859. ASSERT ((PoolType & SESSION_POOL_MASK) == 0);
  1860. if ((PoolBigPageTable) && (NumberOfBytes >= PAGE_SIZE)) {
  1861. NumberOfPages = BYTES_TO_PAGES(NumberOfBytes);
  1862. if (ExpAddTagForBigPages((PVOID)Va, Tag, NumberOfPages) == FALSE) {
  1863. Tag = ' GIB';
  1864. }
  1865. }
  1866. if (PoolTrackTable != NULL) {
  1867. ExpInsertPoolTracker (Tag, NumberOfBytes, NonPagedPool);
  1868. }
  1869. }
  1870. VOID
  1871. ExRemovePoolTag (
  1872. ULONG Tag,
  1873. PVOID Va,
  1874. SIZE_T NumberOfBytes
  1875. )
  1876. /*++
  1877. Routine Description:
  1878. This function removes a pool tag from the tag table and increments the
  1879. number of frees and updates the total allocation size.
  1880. This function also removes the pool tag from the big page tag table.
  1881. N.B. This function is for use by memory management ONLY.
  1882. Arguments:
  1883. Tag - Supplies the tag used to remove an entry in the tag table.
  1884. Va - Supplies the allocated virtual address.
  1885. NumberOfBytes - Supplies the allocation size in bytes.
  1886. Return Value:
  1887. None.
  1888. Environment:
  1889. No pool locks held so pool may be freely allocated here as needed.
  1890. --*/
  1891. {
  1892. ULONG BigPages;
  1893. if ((PoolBigPageTable) && (NumberOfBytes >= PAGE_SIZE)) {
  1894. ExpFindAndRemoveTagBigPages (Va, &BigPages);
  1895. }
  1896. if (PoolTrackTable != NULL) {
  1897. ExpRemovePoolTracker(Tag, (ULONG)NumberOfBytes, NonPagedPool);
  1898. }
  1899. }
  1900. VOID
  1901. ExpInsertPoolTracker (
  1902. IN ULONG Key,
  1903. IN SIZE_T Size,
  1904. IN POOL_TYPE PoolType
  1905. )
  1906. /*++
  1907. Routine Description:
  1908. This function inserts a pool tag in the tag table and increments the
  1909. number of allocates and updates the total allocation size.
  1910. Arguments:
  1911. Key - Supplies the key value used to locate a matching entry in the
  1912. tag table.
  1913. Size - Supplies the allocation size.
  1914. PoolType - Supplies the pool type.
  1915. Return Value:
  1916. None.
  1917. Environment:
  1918. No pool locks held so pool may be freely allocated here as needed.
  1919. --*/
  1920. {
  1921. ULONG Hash;
  1922. ULONG OriginalKey;
  1923. ULONG OriginalHash;
  1924. ULONG Index;
  1925. KIRQL OldIrql;
  1926. KLOCK_QUEUE_HANDLE LockHandle;
  1927. ULONG BigPages;
  1928. LOGICAL HashedIt;
  1929. SIZE_T NewSize;
  1930. SIZE_T SizeInBytes;
  1931. SIZE_T NewSizeInBytes;
  1932. SIZE_T NewSizeMask;
  1933. PPOOL_TRACKER_TABLE OldTable;
  1934. PPOOL_TRACKER_TABLE NewTable;
  1935. //
  1936. // Ignore protected pool bit except for returned hash index.
  1937. //
  1938. Key &= ~PROTECTED_POOL;
  1939. if (Key == PoolHitTag) {
  1940. DbgBreakPoint();
  1941. }
  1942. retry:
  1943. //
  1944. // Compute hash index and search for pool tag.
  1945. //
  1946. Hash = POOLTAG_HASH(Key);
  1947. ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
  1948. Hash &= (ULONG)PoolTrackTableMask;
  1949. Index = Hash;
  1950. do {
  1951. if (PoolTrackTable[Hash].Key == Key) {
  1952. goto EntryFound;
  1953. }
  1954. if (PoolTrackTable[Hash].Key == 0 && Hash != PoolTrackTableSize - 1) {
  1955. PoolTrackTable[Hash].Key = Key;
  1956. goto EntryFound;
  1957. }
  1958. Hash = (Hash + 1) & (ULONG)PoolTrackTableMask;
  1959. } while (Hash != Index);
  1960. //
  1961. // No matching entry and no free entry was found.
  1962. // If the overflow bucket has been used then expansion of the tracker table
  1963. // is not allowed because a subsequent free of a tag can go negative as the
  1964. // original allocation is in overflow and a newer allocation may be
  1965. // distinct.
  1966. //
  1967. NewSize = ((PoolTrackTableSize - 1) << 1) + 1;
  1968. NewSizeInBytes = NewSize * sizeof(POOL_TRACKER_TABLE);
  1969. SizeInBytes = PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE);
  1970. if ((NewSizeInBytes > SizeInBytes) &&
  1971. (PoolTrackTable[PoolTrackTableSize - 1].Key == 0)) {
  1972. ExpLockNonPagedPool(LockHandle.OldIrql);
  1973. NewTable = MiAllocatePoolPages (NonPagedPool, NewSizeInBytes, FALSE);
  1974. ExpUnlockNonPagedPool(LockHandle.OldIrql);
  1975. if (NewTable != NULL) {
  1976. OldTable = (PVOID)PoolTrackTable;
  1977. RtlZeroMemory ((PVOID)NewTable, NewSizeInBytes);
  1978. //
  1979. // Rehash all the entries into the new table.
  1980. //
  1981. NewSizeMask = NewSize - 2;
  1982. for (OriginalHash = 0; OriginalHash < PoolTrackTableSize; OriginalHash += 1) {
  1983. OriginalKey = PoolTrackTable[OriginalHash].Key;
  1984. if (OriginalKey == 0) {
  1985. continue;
  1986. }
  1987. Hash = (ULONG) (POOLTAG_HASH(OriginalKey) & (ULONG)NewSizeMask);
  1988. Index = Hash;
  1989. HashedIt = FALSE;
  1990. do {
  1991. if (NewTable[Hash].Key == 0 && Hash != NewSize - 1) {
  1992. RtlCopyMemory ((PVOID)&NewTable[Hash],
  1993. (PVOID)&PoolTrackTable[OriginalHash],
  1994. sizeof(POOL_TRACKER_TABLE));
  1995. HashedIt = TRUE;
  1996. break;
  1997. }
  1998. Hash = (Hash + 1) & (ULONG)NewSizeMask;
  1999. } while (Hash != Index);
  2000. //
  2001. // No matching entry and no free entry was found, have to bail.
  2002. //
  2003. if (HashedIt == FALSE) {
  2004. KdPrint(("POOL:rehash of track table failed (%p, %p, %p %p)\n",
  2005. OldTable,
  2006. PoolTrackTableSize,
  2007. NewTable,
  2008. OriginalKey));
  2009. ExpLockNonPagedPool(LockHandle.OldIrql);
  2010. MiFreePoolPages (NewTable);
  2011. ExpUnlockNonPagedPool(LockHandle.OldIrql);
  2012. goto overflow;
  2013. }
  2014. }
  2015. PoolTrackTable = NewTable;
  2016. PoolTrackTableSize = NewSize;
  2017. PoolTrackTableMask = NewSizeMask;
  2018. ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
  2019. ExpLockNonPagedPool(LockHandle.OldIrql);
  2020. BigPages = MiFreePoolPages (OldTable);
  2021. ExpUnlockNonPagedPool(LockHandle.OldIrql);
  2022. ExpRemovePoolTracker ('looP',
  2023. BigPages * PAGE_SIZE,
  2024. NonPagedPool);
  2025. ExpInsertPoolTracker ('looP',
  2026. (ULONG) ROUND_TO_PAGES(NewSizeInBytes),
  2027. NonPagedPool);
  2028. goto retry;
  2029. }
  2030. }
  2031. overflow:
  2032. //
  2033. // Use the very last entry as a bit bucket for overflows.
  2034. //
  2035. Hash = (ULONG)PoolTrackTableSize - 1;
  2036. PoolTrackTable[Hash].Key = 'lfvO';
  2037. //
  2038. // Update pool tracker table entry.
  2039. //
  2040. EntryFound:
  2041. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  2042. PoolTrackTable[Hash].PagedAllocs += 1;
  2043. PoolTrackTable[Hash].PagedBytes += Size;
  2044. }
  2045. else {
  2046. PoolTrackTable[Hash].NonPagedAllocs += 1;
  2047. PoolTrackTable[Hash].NonPagedBytes += Size;
  2048. }
  2049. ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
  2050. return;
  2051. }
  2052. VOID
  2053. ExpRemovePoolTracker (
  2054. IN ULONG Key,
  2055. IN ULONG Size,
  2056. IN POOL_TYPE PoolType
  2057. )
  2058. /*++
  2059. Routine Description:
  2060. This function increments the number of frees and updates the total
  2061. allocation size.
  2062. Arguments:
  2063. Key - Supplies the key value used to locate a matching entry in the
  2064. tag table.
  2065. Size - Supplies the allocation size.
  2066. PoolType - Supplies the pool type.
  2067. Return Value:
  2068. None.
  2069. --*/
  2070. {
  2071. ULONG Hash;
  2072. ULONG Index;
  2073. KIRQL OldIrql;
  2074. //
  2075. // Ignore protected pool bit
  2076. //
  2077. Key &= ~PROTECTED_POOL;
  2078. if (Key == PoolHitTag) {
  2079. DbgBreakPoint();
  2080. }
  2081. //
  2082. // Compute hash index and search for pool tag.
  2083. //
  2084. Hash = POOLTAG_HASH(Key);
  2085. ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
  2086. Hash &= (ULONG)PoolTrackTableMask;
  2087. Index = Hash;
  2088. do {
  2089. if (PoolTrackTable[Hash].Key == Key) {
  2090. goto EntryFound;
  2091. }
  2092. if (PoolTrackTable[Hash].Key == 0 && Hash != PoolTrackTableSize - 1) {
  2093. KdPrint(("POOL: Unable to find tracker %lx, table corrupted\n", Key));
  2094. ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
  2095. return;
  2096. }
  2097. Hash = (Hash + 1) & (ULONG)PoolTrackTableMask;
  2098. } while (Hash != Index);
  2099. //
  2100. // No matching entry and no free entry was found.
  2101. //
  2102. Hash = (ULONG)PoolTrackTableSize - 1;
  2103. //
  2104. // Update pool tracker table entry.
  2105. //
  2106. EntryFound:
  2107. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  2108. PoolTrackTable[Hash].PagedBytes -= Size;
  2109. PoolTrackTable[Hash].PagedFrees += 1;
  2110. }
  2111. else {
  2112. PoolTrackTable[Hash].NonPagedBytes -= Size;
  2113. PoolTrackTable[Hash].NonPagedFrees += 1;
  2114. }
  2115. ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
  2116. return;
  2117. }
  2118. LOGICAL
  2119. ExpAddTagForBigPages (
  2120. IN PVOID Va,
  2121. IN ULONG Key,
  2122. IN ULONG NumberOfPages
  2123. )
  2124. /*++
  2125. Routine Description:
  2126. This function inserts a pool tag in the big page tag table.
  2127. Arguments:
  2128. Va - Supplies the allocated virtual address.
  2129. Key - Supplies the key value used to locate a matching entry in the
  2130. tag table.
  2131. NumberOfPages - Supplies the number of pages that were allocated.
  2132. Return Value:
  2133. TRUE if an entry was allocated, FALSE if not.
  2134. Environment:
  2135. No pool locks held so the table may be freely expanded here as needed.
  2136. --*/
  2137. {
  2138. ULONG i;
  2139. ULONG Hash;
  2140. ULONG BigPages;
  2141. PVOID OldTable;
  2142. LOGICAL Inserted;
  2143. KIRQL OldIrql;
  2144. KLOCK_QUEUE_HANDLE LockHandle;
  2145. SIZE_T SizeInBytes;
  2146. SIZE_T NewSizeInBytes;
  2147. PPOOL_TRACKER_BIG_PAGES NewTable;
  2148. PPOOL_TRACKER_BIG_PAGES p;
  2149. //
  2150. // The low bit of the address is set to indicate a free entry. The high
  2151. // bit cannot be used because in some configurations the high bit is not
  2152. // set for all kernelmode addresses.
  2153. //
  2154. ASSERT (((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
  2155. retry:
  2156. Inserted = TRUE;
  2157. Hash = (ULONG)(((ULONG_PTR)Va >> PAGE_SHIFT) & PoolBigPageTableHash);
  2158. ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
  2159. while (((ULONG_PTR)PoolBigPageTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE) == 0) {
  2160. Hash += 1;
  2161. if (Hash >= PoolBigPageTableSize) {
  2162. if (!Inserted) {
  2163. //
  2164. // Try to expand the tracker table.
  2165. //
  2166. SizeInBytes = PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES);
  2167. NewSizeInBytes = (SizeInBytes << 1);
  2168. if (NewSizeInBytes > SizeInBytes) {
  2169. ExpLockNonPagedPool(LockHandle.OldIrql);
  2170. NewTable = MiAllocatePoolPages (NonPagedPool,
  2171. NewSizeInBytes,
  2172. FALSE);
  2173. ExpUnlockNonPagedPool(LockHandle.OldIrql);
  2174. if (NewTable != NULL) {
  2175. OldTable = (PVOID)PoolBigPageTable;
  2176. RtlCopyMemory ((PVOID)NewTable,
  2177. OldTable,
  2178. SizeInBytes);
  2179. RtlZeroMemory ((PVOID)(NewTable + PoolBigPageTableSize),
  2180. NewSizeInBytes - SizeInBytes);
  2181. //
  2182. // Mark all the new entries as free. Note this loop
  2183. // uses the fact that the table size always doubles.
  2184. //
  2185. i = (ULONG)PoolBigPageTableSize;
  2186. p = &NewTable[i];
  2187. for (i = 0; i < PoolBigPageTableSize; i += 1, p += 1) {
  2188. p->Va = (PVOID) POOL_BIG_TABLE_ENTRY_FREE;
  2189. }
  2190. PoolBigPageTable = NewTable;
  2191. PoolBigPageTableSize <<= 1;
  2192. PoolBigPageTableHash = PoolBigPageTableSize - 1;
  2193. ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
  2194. ExpLockNonPagedPool(LockHandle.OldIrql);
  2195. BigPages = MiFreePoolPages (OldTable);
  2196. ExpUnlockNonPagedPool(LockHandle.OldIrql);
  2197. ExpRemovePoolTracker ('looP',
  2198. BigPages * PAGE_SIZE,
  2199. NonPagedPool);
  2200. ExpInsertPoolTracker ('looP',
  2201. (ULONG) ROUND_TO_PAGES(NewSizeInBytes),
  2202. NonPagedPool);
  2203. goto retry;
  2204. }
  2205. }
  2206. if (!FirstPrint) {
  2207. KdPrint(("POOL:unable to insert big page slot %lx\n",Key));
  2208. FirstPrint = TRUE;
  2209. }
  2210. ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
  2211. return FALSE;
  2212. }
  2213. Hash = 0;
  2214. Inserted = FALSE;
  2215. }
  2216. }
  2217. p = &PoolBigPageTable[Hash];
  2218. ASSERT (((ULONG_PTR)p->Va & POOL_BIG_TABLE_ENTRY_FREE) != 0);
  2219. ASSERT (((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
  2220. p->Va = Va;
  2221. p->Key = Key;
  2222. p->NumberOfPages = NumberOfPages;
  2223. ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
  2224. return TRUE;
  2225. }
  2226. ULONG
  2227. ExpFindAndRemoveTagBigPages (
  2228. IN PVOID Va,
  2229. IN PULONG BigPages
  2230. )
  2231. {
  2232. ULONG Hash;
  2233. LOGICAL Inserted;
  2234. KIRQL OldIrql;
  2235. ULONG ReturnKey;
  2236. Inserted = TRUE;
  2237. Hash = (ULONG)(((ULONG_PTR)Va >> PAGE_SHIFT) & PoolBigPageTableHash);
  2238. ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
  2239. while (PoolBigPageTable[Hash].Va != Va) {
  2240. Hash += 1;
  2241. if (Hash >= PoolBigPageTableSize) {
  2242. if (!Inserted) {
  2243. if (!FirstPrint) {
  2244. KdPrint(("POOL:unable to find big page slot %lx\n",Va));
  2245. FirstPrint = TRUE;
  2246. }
  2247. ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
  2248. *BigPages = 0;
  2249. return ' GIB';
  2250. }
  2251. Hash = 0;
  2252. Inserted = FALSE;
  2253. }
  2254. }
  2255. ASSERT (((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
  2256. PoolBigPageTable[Hash].Va =
  2257. (PVOID)((ULONG_PTR)PoolBigPageTable[Hash].Va | POOL_BIG_TABLE_ENTRY_FREE);
  2258. *BigPages = PoolBigPageTable[Hash].NumberOfPages;
  2259. ReturnKey = PoolBigPageTable[Hash].Key;
  2260. ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
  2261. return ReturnKey;
  2262. }
  2263. const char ExpProtectedPoolBlockMessage[] =
  2264. "EX: Invalid attempt to free protected pool block %x (%c%c%c%c)\n";
  2265. VOID
  2266. ExFreePoolWithTag (
  2267. IN PVOID P,
  2268. IN ULONG TagToFree
  2269. )
  2270. /*++
  2271. Routine Description:
  2272. This function deallocates a block of pool. This function is used to
  2273. deallocate to both the page aligned pools and the buddy (less than
  2274. a page) pools.
  2275. If the address of the block being deallocated is page-aligned, then
  2276. the page-aligned pool deallocator is used.
  2277. Otherwise, the binary buddy pool deallocator is used. Deallocation
  2278. looks at the allocated block's pool header to determine the pool
  2279. type and block size being deallocated. If the pool was allocated
  2280. using ExAllocatePoolWithQuota, then after the deallocation is
  2281. complete, the appropriate process's pool quota is adjusted to reflect
  2282. the deallocation, and the process object is dereferenced.
  2283. Arguments:
  2284. P - Supplies the address of the block of pool being deallocated.
  2285. TagToFree - Supplies the tag of the block being freed.
  2286. Return Value:
  2287. None.
  2288. --*/
  2289. {
  2290. PVOID OldValue;
  2291. POOL_TYPE CheckType;
  2292. PPOOL_HEADER Entry;
  2293. ULONG BlockSize;
  2294. KLOCK_QUEUE_HANDLE LockHandle;
  2295. PPOOL_HEADER NextEntry;
  2296. POOL_TYPE PoolType;
  2297. POOL_TYPE EntryPoolType;
  2298. PPOOL_DESCRIPTOR PoolDesc;
  2299. PEPROCESS ProcessBilled;
  2300. LOGICAL Combined;
  2301. ULONG BigPages;
  2302. SIZE_T NumberOfBytes;
  2303. ULONG Tag;
  2304. PKPRCB Prcb;
  2305. PGENERAL_LOOKASIDE LookasideList;
  2306. PERFINFO_FREEPOOL(P);
  2307. //
  2308. // Initializing LockHandle is not needed for correctness but without
  2309. // it the compiler cannot compile this code W4 to check for use of
  2310. // uninitialized variables.
  2311. //
  2312. LockHandle.OldIrql = 0;
  2313. if (ExpPoolFlags & (EX_CHECK_POOL_FREES_FOR_ACTIVE_TIMERS |
  2314. EX_CHECK_POOL_FREES_FOR_ACTIVE_WORKERS |
  2315. EX_CHECK_POOL_FREES_FOR_ACTIVE_RESOURCES |
  2316. EX_KERNEL_VERIFIER_ENABLED |
  2317. EX_VERIFIER_DEADLOCK_DETECTION_ENABLED |
  2318. EX_SPECIAL_POOL_ENABLED)) {
  2319. if (ExpPoolFlags & EX_SPECIAL_POOL_ENABLED) {
  2320. if (MmIsSpecialPoolAddress (P)) {
  2321. if (ExpPoolFlags & EX_VERIFIER_DEADLOCK_DETECTION_ENABLED) {
  2322. VerifierDeadlockFreePool (P, PAGE_SIZE);
  2323. }
  2324. MmFreeSpecialPool (P);
  2325. return;
  2326. }
  2327. }
  2328. if (!PAGE_ALIGNED(P)) {
  2329. Entry = (PPOOL_HEADER)((PCHAR)P - POOL_OVERHEAD);
  2330. ASSERT_POOL_NOT_FREE(Entry);
  2331. PoolType = (Entry->PoolType & POOL_TYPE_MASK) - 1;
  2332. CheckType = PoolType & BASE_POOL_TYPE_MASK;
  2333. ASSERT_FREE_IRQL(PoolType, P);
  2334. ASSERT_POOL_TYPE_NOT_ZERO(Entry);
  2335. if (!IS_POOL_HEADER_MARKED_ALLOCATED(Entry)) {
  2336. KeBugCheckEx (BAD_POOL_CALLER,
  2337. 7,
  2338. __LINE__,
  2339. (ULONG_PTR)Entry->Ulong1,
  2340. (ULONG_PTR)P);
  2341. }
  2342. NumberOfBytes = (SIZE_T)Entry->BlockSize << POOL_BLOCK_SHIFT;
  2343. if (ExpPoolFlags & EX_VERIFIER_DEADLOCK_DETECTION_ENABLED) {
  2344. VerifierDeadlockFreePool (P, NumberOfBytes);
  2345. }
  2346. if (Entry->PoolType & POOL_VERIFIER_MASK) {
  2347. VerifierFreeTrackedPool (P,
  2348. NumberOfBytes,
  2349. CheckType,
  2350. FALSE);
  2351. }
  2352. //
  2353. // Check if an ERESOURCE is currently active in this memory block.
  2354. //
  2355. FREE_CHECK_ERESOURCE (Entry, NumberOfBytes);
  2356. //
  2357. // Check if a KTIMER is currently active in this memory block.
  2358. //
  2359. FREE_CHECK_KTIMER (Entry, NumberOfBytes);
  2360. //
  2361. // Look for work items still queued.
  2362. //
  2363. FREE_CHECK_WORKER (Entry, NumberOfBytes);
  2364. }
  2365. }
  2366. //
  2367. // If the entry is page aligned, then free the block to the page aligned
  2368. // pool. Otherwise, free the block to the allocation lists.
  2369. //
  2370. if (PAGE_ALIGNED(P)) {
  2371. PoolType = MmDeterminePoolType(P);
  2372. ASSERT_FREE_IRQL(PoolType, P);
  2373. CheckType = PoolType & BASE_POOL_TYPE_MASK;
  2374. if (PoolType == PagedPoolSession) {
  2375. PoolDesc = ExpSessionPoolDescriptor;
  2376. }
  2377. else {
  2378. PoolDesc = PoolVector[PoolType];
  2379. }
  2380. if ((PoolTrackTable != NULL) && (PoolType != PagedPoolSession)) {
  2381. Tag = ExpFindAndRemoveTagBigPages (P, &BigPages);
  2382. if (Tag & PROTECTED_POOL) {
  2383. Tag &= ~PROTECTED_POOL;
  2384. TagToFree &= ~PROTECTED_POOL;
  2385. if (Tag != TagToFree) {
  2386. DbgPrint ((char*)ExpProtectedPoolBlockMessage,
  2387. P,
  2388. Tag,
  2389. Tag >> 8,
  2390. Tag >> 16,
  2391. Tag >> 24);
  2392. DbgBreakPoint ();
  2393. }
  2394. }
  2395. ExpRemovePoolTracker (Tag, BigPages * PAGE_SIZE, PoolType);
  2396. }
  2397. LOCK_POOL(PoolDesc, LockHandle);
  2398. PoolDesc->RunningDeAllocs += 1;
  2399. //
  2400. // Large session pool allocations are accounted for directly by
  2401. // the memory manager so no need to call MiSessionPoolFreed here.
  2402. //
  2403. BigPages = MiFreePoolPages (P);
  2404. if (ExpPoolFlags & (EX_CHECK_POOL_FREES_FOR_ACTIVE_TIMERS |
  2405. EX_CHECK_POOL_FREES_FOR_ACTIVE_WORKERS |
  2406. EX_CHECK_POOL_FREES_FOR_ACTIVE_RESOURCES |
  2407. EX_VERIFIER_DEADLOCK_DETECTION_ENABLED)) {
  2408. NumberOfBytes = (SIZE_T)BigPages << PAGE_SHIFT;
  2409. if (ExpPoolFlags & EX_VERIFIER_DEADLOCK_DETECTION_ENABLED) {
  2410. VerifierDeadlockFreePool (P, NumberOfBytes);
  2411. }
  2412. //
  2413. // Check if an ERESOURCE is currently active in this memory block.
  2414. //
  2415. FREE_CHECK_ERESOURCE (P, NumberOfBytes);
  2416. //
  2417. // Check if a KTIMER is currently active in this memory block.
  2418. //
  2419. FREE_CHECK_KTIMER (P, NumberOfBytes);
  2420. //
  2421. // Search worker queues for work items still queued.
  2422. //
  2423. FREE_CHECK_WORKER (P, NumberOfBytes);
  2424. }
  2425. PoolDesc->TotalBigPages -= BigPages;
  2426. UNLOCK_POOL(PoolDesc, LockHandle);
  2427. return;
  2428. }
  2429. //
  2430. // Align the entry address to a pool allocation boundary.
  2431. //
  2432. Entry = (PPOOL_HEADER)((PCHAR)P - POOL_OVERHEAD);
  2433. BlockSize = Entry->BlockSize;
  2434. EntryPoolType = Entry->PoolType;
  2435. PoolType = (Entry->PoolType & POOL_TYPE_MASK) - 1;
  2436. CheckType = PoolType & BASE_POOL_TYPE_MASK;
  2437. ASSERT_POOL_NOT_FREE (Entry);
  2438. ASSERT_FREE_IRQL (PoolType, P);
  2439. ASSERT_POOL_TYPE_NOT_ZERO (Entry);
  2440. if (!IS_POOL_HEADER_MARKED_ALLOCATED(Entry)) {
  2441. KeBugCheckEx (BAD_POOL_CALLER,
  2442. 7,
  2443. __LINE__,
  2444. (ULONG_PTR)Entry->Ulong1,
  2445. (ULONG_PTR)P);
  2446. }
  2447. PoolDesc = PoolVector[CheckType];
  2448. MARK_POOL_HEADER_FREED (Entry);
  2449. if (EntryPoolType & SESSION_POOL_MASK) {
  2450. if (CheckType == PagedPool) {
  2451. PoolDesc = ExpSessionPoolDescriptor;
  2452. }
  2453. else if (ExpNumberOfNonPagedPools > 1) {
  2454. PoolDesc = ExpNonPagedPoolDescriptor[DECODE_POOL_INDEX(Entry)];
  2455. }
  2456. //
  2457. // All session space allocations have an index of 0 unless there
  2458. // are multiple nonpaged (session) pools.
  2459. //
  2460. ASSERT ((DECODE_POOL_INDEX(Entry) == 0) || (ExpNumberOfNonPagedPools > 1));
  2461. //
  2462. // This allocation was in session space, let the memory
  2463. // manager know to delete it so it won't be considered in use on
  2464. // session exit.
  2465. //
  2466. MiSessionPoolFreed (P,
  2467. BlockSize << POOL_BLOCK_SHIFT,
  2468. CheckType);
  2469. }
  2470. else if (CheckType == PagedPool) {
  2471. ASSERT ((DECODE_POOL_INDEX(Entry) != 0) &&
  2472. (DECODE_POOL_INDEX(Entry) <= ExpNumberOfPagedPools));
  2473. PoolDesc = ExpPagedPoolDescriptor[DECODE_POOL_INDEX(Entry)];
  2474. }
  2475. else {
  2476. ASSERT ((DECODE_POOL_INDEX(Entry) == 0) || (ExpNumberOfNonPagedPools > 1));
  2477. if (ExpNumberOfNonPagedPools > 1) {
  2478. PoolDesc = ExpNonPagedPoolDescriptor[DECODE_POOL_INDEX(Entry)];
  2479. }
  2480. }
  2481. //
  2482. // If pool tagging is enabled, then update the pool tracking database.
  2483. // Otherwise, check to determine if quota was charged when the pool
  2484. // block was allocated.
  2485. //
  2486. #if defined (_WIN64)
  2487. Tag = Entry->PoolTag;
  2488. if (Tag & PROTECTED_POOL) {
  2489. Tag &= ~PROTECTED_POOL;
  2490. TagToFree &= ~PROTECTED_POOL;
  2491. if (Tag != TagToFree) {
  2492. DbgPrint ((char*)ExpProtectedPoolBlockMessage,
  2493. P,
  2494. Tag,
  2495. Tag >> 8,
  2496. Tag >> 16,
  2497. Tag >> 24);
  2498. DbgBreakPoint ();
  2499. }
  2500. }
  2501. if (PoolTrackTable != NULL) {
  2502. if ((EntryPoolType & SESSION_POOL_MASK) == 0) {
  2503. ExpRemovePoolTracker (Tag,
  2504. BlockSize << POOL_BLOCK_SHIFT,
  2505. PoolType);
  2506. }
  2507. }
  2508. #else
  2509. if (PoolTrackTable != NULL) {
  2510. Tag = Entry->PoolTag;
  2511. if (Tag & PROTECTED_POOL) {
  2512. Tag &= ~PROTECTED_POOL;
  2513. TagToFree &= ~PROTECTED_POOL;
  2514. if (Tag != TagToFree) {
  2515. DbgPrint ((char*)ExpProtectedPoolBlockMessage,
  2516. P,
  2517. Tag,
  2518. Tag >> 8,
  2519. Tag >> 16,
  2520. Tag >> 24);
  2521. DbgBreakPoint ();
  2522. }
  2523. }
  2524. if ((EntryPoolType & SESSION_POOL_MASK) == 0) {
  2525. ExpRemovePoolTracker (Tag,
  2526. BlockSize << POOL_BLOCK_SHIFT,
  2527. PoolType);
  2528. }
  2529. EntryPoolType &= ~POOL_QUOTA_MASK;
  2530. }
  2531. #endif
  2532. if (EntryPoolType & POOL_QUOTA_MASK) {
  2533. ProcessBilled = Entry->ProcessBilled;
  2534. if (ProcessBilled != NULL) {
  2535. PsReturnPoolQuota (ProcessBilled,
  2536. PoolType & BASE_POOL_TYPE_MASK,
  2537. BlockSize << POOL_BLOCK_SHIFT);
  2538. ObDereferenceObject (ProcessBilled);
  2539. }
  2540. }
  2541. //
  2542. // If the pool block is a small block, then attempt to free the block
  2543. // to the single entry lookaside list. If the free attempt fails, then
  2544. // free the block by merging it back into the pool data structures.
  2545. //
  2546. if ((BlockSize <= POOL_SMALL_LISTS) &&
  2547. ((EntryPoolType & SESSION_POOL_MASK) == 0) &&
  2548. (USING_HOT_COLD_METRICS == 0)) {
  2549. //
  2550. // Attempt to free the small block to a per processor lookaside list.
  2551. //
  2552. Prcb = KeGetCurrentPrcb ();
  2553. if (CheckType == PagedPool) {
  2554. //
  2555. // Only free the small block to the current processor's
  2556. // lookaside list if the block is local to this node.
  2557. //
  2558. if (KeNumberNodes > 1) {
  2559. if (Prcb->ParentNode->Color != PoolDesc->PoolIndex - 1) {
  2560. goto NoLookaside;
  2561. }
  2562. }
  2563. LookasideList = Prcb->PPPagedLookasideList[BlockSize - 1].P;
  2564. LookasideList->TotalFrees += 1;
  2565. CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, P);
  2566. if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth) {
  2567. LookasideList->FreeHits += 1;
  2568. InterlockedPushEntrySList (&LookasideList->ListHead,
  2569. (PSINGLE_LIST_ENTRY)P);
  2570. CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, P);
  2571. return;
  2572. }
  2573. LookasideList = Prcb->PPPagedLookasideList[BlockSize - 1].L;
  2574. LookasideList->TotalFrees += 1;
  2575. CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, P);
  2576. if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth) {
  2577. LookasideList->FreeHits += 1;
  2578. InterlockedPushEntrySList (&LookasideList->ListHead,
  2579. (PSINGLE_LIST_ENTRY)P);
  2580. CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, P);
  2581. return;
  2582. }
  2583. }
  2584. else {
  2585. //
  2586. // Only free the small block to the current processor's
  2587. // lookaside list if the block is local to this node.
  2588. //
  2589. if (KeNumberNodes > 1) {
  2590. if (Prcb->ParentNode->Color != PoolDesc->PoolIndex) {
  2591. goto NoLookaside;
  2592. }
  2593. }
  2594. LookasideList = Prcb->PPNPagedLookasideList[BlockSize - 1].P;
  2595. LookasideList->TotalFrees += 1;
  2596. CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, P);
  2597. if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth) {
  2598. LookasideList->FreeHits += 1;
  2599. InterlockedPushEntrySList (&LookasideList->ListHead,
  2600. (PSINGLE_LIST_ENTRY)P);
  2601. CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, P);
  2602. return;
  2603. }
  2604. LookasideList = Prcb->PPNPagedLookasideList[BlockSize - 1].L;
  2605. LookasideList->TotalFrees += 1;
  2606. CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, P);
  2607. if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth) {
  2608. LookasideList->FreeHits += 1;
  2609. InterlockedPushEntrySList (&LookasideList->ListHead,
  2610. (PSINGLE_LIST_ENTRY)P);
  2611. CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, P);
  2612. return;
  2613. }
  2614. }
  2615. }
  2616. NoLookaside:
  2617. //
  2618. // If the pool block release can be queued so the pool mutex/spinlock
  2619. // acquisition/release can be amortized then do so. Note "hot" blocks
  2620. // are generally in the lookasides above to provide fast reuse to take
  2621. // advantage of hardware caching.
  2622. //
  2623. if (ExpPoolFlags & EX_DELAY_POOL_FREES) {
  2624. if (PoolDesc->PendingFreeDepth >= EXP_MAXIMUM_POOL_FREES_PENDING) {
  2625. ExDeferredFreePool (PoolDesc);
  2626. }
  2627. //
  2628. // Push this entry on the deferred list.
  2629. //
  2630. do {
  2631. OldValue = PoolDesc->PendingFrees;
  2632. ((PSINGLE_LIST_ENTRY)P)->Next = OldValue;
  2633. } while (InterlockedCompareExchangePointer (
  2634. &PoolDesc->PendingFrees,
  2635. P,
  2636. OldValue) != OldValue);
  2637. InterlockedIncrement (&PoolDesc->PendingFreeDepth);
  2638. return;
  2639. }
  2640. Combined = FALSE;
  2641. LOCK_POOL(PoolDesc, LockHandle);
  2642. CHECK_POOL_HEADER(__LINE__, Entry);
  2643. PoolDesc->RunningDeAllocs += 1;
  2644. //
  2645. // Free the specified pool block.
  2646. //
  2647. // Check to see if the next entry is free.
  2648. //
  2649. ASSERT (BlockSize == Entry->BlockSize);
  2650. NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + BlockSize);
  2651. if (PAGE_END(NextEntry) == FALSE) {
  2652. if (NextEntry->PoolType == 0) {
  2653. //
  2654. // This block is free, combine with the released block.
  2655. //
  2656. Combined = TRUE;
  2657. //
  2658. // If the split pool block contains only a header, then
  2659. // it was not inserted and therefore cannot be removed.
  2660. //
  2661. // Note if the minimum pool block size is bigger than the
  2662. // header then there can be no blocks like this.
  2663. //
  2664. if ((POOL_OVERHEAD != POOL_SMALLEST_BLOCK) ||
  2665. (NextEntry->BlockSize != 1)) {
  2666. CHECK_LIST(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD)));
  2667. PrivateRemoveEntryList(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD)));
  2668. CHECK_LIST(DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Flink));
  2669. CHECK_LIST(DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Blink));
  2670. }
  2671. Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
  2672. }
  2673. }
  2674. //
  2675. // Check to see if the previous entry is free.
  2676. //
  2677. if (Entry->PreviousSize != 0) {
  2678. NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry - Entry->PreviousSize);
  2679. if (NextEntry->PoolType == 0) {
  2680. //
  2681. // This block is free, combine with the released block.
  2682. //
  2683. Combined = TRUE;
  2684. //
  2685. // If the split pool block contains only a header, then
  2686. // it was not inserted and therefore cannot be removed.
  2687. //
  2688. // Note if the minimum pool block size is bigger than the
  2689. // header then there can be no blocks like this.
  2690. //
  2691. if ((POOL_OVERHEAD != POOL_SMALLEST_BLOCK) ||
  2692. (NextEntry->BlockSize != 1)) {
  2693. CHECK_LIST(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD)));
  2694. PrivateRemoveEntryList(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD)));
  2695. CHECK_LIST(DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Flink));
  2696. CHECK_LIST(DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Blink));
  2697. }
  2698. NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
  2699. Entry = NextEntry;
  2700. }
  2701. }
  2702. //
  2703. // If the block being freed has been combined into a full page,
  2704. // then return the free page to memory management.
  2705. //
  2706. if (PAGE_ALIGNED(Entry) &&
  2707. (PAGE_END((PPOOL_BLOCK)Entry + Entry->BlockSize) != FALSE)) {
  2708. PoolDesc->TotalPages -= 1;
  2709. //
  2710. // If the pool type is paged pool, then the global paged pool mutex
  2711. // must be held during the free of the pool pages.
  2712. //
  2713. if ((PoolDesc->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) {
  2714. if (ExpNumberOfNonPagedPools > 1) {
  2715. KeReleaseInStackQueuedSpinLock (&LockHandle);
  2716. ExpLockNonPagedPool (LockHandle.OldIrql);
  2717. }
  2718. }
  2719. else {
  2720. if ((EntryPoolType & SESSION_POOL_MASK) == 0) {
  2721. ExReleaseFastMutex ((PFAST_MUTEX)PoolDesc->LockAddress);
  2722. ExAcquireFastMutex (ExpPagedPoolMutex);
  2723. }
  2724. }
  2725. PERFINFO_FREEPOOLPAGE(CheckType, Entry->PoolIndex, Entry, PoolDesc);
  2726. MiFreePoolPages (Entry);
  2727. if ((PoolDesc->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) {
  2728. ExpUnlockNonPagedPool (LockHandle.OldIrql);
  2729. }
  2730. else if ((EntryPoolType & SESSION_POOL_MASK) == 0) {
  2731. ExReleaseFastMutex (ExpPagedPoolMutex);
  2732. }
  2733. else {
  2734. ExReleaseFastMutex ((PFAST_MUTEX)PoolDesc->LockAddress);
  2735. }
  2736. }
  2737. else {
  2738. //
  2739. // Insert this element into the list.
  2740. //
  2741. Entry->PoolType = 0;
  2742. BlockSize = Entry->BlockSize;
  2743. ASSERT (BlockSize != 1);
  2744. //
  2745. // If the freed block was combined with any other block, then
  2746. // adjust the size of the next block if necessary.
  2747. //
  2748. if (Combined != FALSE) {
  2749. //
  2750. // The size of this entry has changed, if this entry is
  2751. // not the last one in the page, update the pool block
  2752. // after this block to have a new previous allocation size.
  2753. //
  2754. NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + BlockSize);
  2755. if (PAGE_END(NextEntry) == FALSE) {
  2756. NextEntry->PreviousSize = (USHORT) BlockSize;
  2757. }
  2758. }
  2759. //
  2760. // Always insert at the head in hopes of reusing cache lines.
  2761. //
  2762. PrivateInsertHeadList (&PoolDesc->ListHeads[BlockSize - 1],
  2763. ((PLIST_ENTRY)((PCHAR)Entry + POOL_OVERHEAD)));
  2764. CHECK_LIST(((PLIST_ENTRY)((PCHAR)Entry + POOL_OVERHEAD)));
  2765. UNLOCK_POOL(PoolDesc, LockHandle);
  2766. }
  2767. }
  2768. VOID
  2769. ExFreePool (
  2770. IN PVOID P
  2771. )
  2772. {
  2773. ExFreePoolWithTag(P, 0);
  2774. return;
  2775. }
  2776. VOID
  2777. ExDeferredFreePool (
  2778. IN PPOOL_DESCRIPTOR PoolDesc
  2779. )
  2780. /*++
  2781. Routine Description:
  2782. This routine frees a number of pool allocations at once to amortize the
  2783. synchronization overhead cost.
  2784. Arguments:
  2785. PoolDesc - Supplies the relevant pool descriptor.
  2786. Return Value:
  2787. None.
  2788. Environment:
  2789. Kernel mode. May be as high as APC_LEVEL for paged pool or DISPATCH_LEVEL
  2790. for nonpaged pool.
  2791. --*/
  2792. {
  2793. LONG ListCount;
  2794. KLOCK_QUEUE_HANDLE LockHandle;
  2795. POOL_TYPE CheckType;
  2796. PPOOL_HEADER Entry;
  2797. ULONG Index;
  2798. ULONG WholePageCount;
  2799. PPOOL_HEADER NextEntry;
  2800. ULONG PoolIndex;
  2801. LOGICAL Combined;
  2802. LOGICAL GlobalSpace;
  2803. PSINGLE_LIST_ENTRY SingleListEntry;
  2804. PSINGLE_LIST_ENTRY NextSingleListEntry;
  2805. PSINGLE_LIST_ENTRY FirstEntry;
  2806. PSINGLE_LIST_ENTRY LastEntry;
  2807. PSINGLE_LIST_ENTRY WholePages;
  2808. GlobalSpace = TRUE;
  2809. if (PoolDesc == ExpSessionPoolDescriptor) {
  2810. GlobalSpace = FALSE;
  2811. }
  2812. CheckType = PoolDesc->PoolType & BASE_POOL_TYPE_MASK;
  2813. //
  2814. // Initializing LockHandle is not needed for correctness but without
  2815. // it the compiler cannot compile this code W4 to check for use of
  2816. // uninitialized variables.
  2817. //
  2818. LockHandle.OldIrql = 0;
  2819. ListCount = 0;
  2820. WholePages = NULL;
  2821. WholePageCount = 0;
  2822. LOCK_POOL(PoolDesc, LockHandle);
  2823. if (PoolDesc->PendingFrees == NULL) {
  2824. UNLOCK_POOL(PoolDesc, LockHandle);
  2825. return;
  2826. }
  2827. //
  2828. // Free each deferred pool entry until they're all done.
  2829. //
  2830. LastEntry = NULL;
  2831. do {
  2832. SingleListEntry = PoolDesc->PendingFrees;
  2833. FirstEntry = SingleListEntry;
  2834. do {
  2835. NextSingleListEntry = SingleListEntry->Next;
  2836. //
  2837. // Process the deferred entry.
  2838. //
  2839. ListCount += 1;
  2840. Entry = (PPOOL_HEADER)((PCHAR)SingleListEntry - POOL_OVERHEAD);
  2841. PoolIndex = DECODE_POOL_INDEX(Entry);
  2842. //
  2843. // Process the block.
  2844. //
  2845. Combined = FALSE;
  2846. CHECK_POOL_HEADER(__LINE__, Entry);
  2847. PoolDesc->RunningDeAllocs += 1;
  2848. //
  2849. // Free the specified pool block.
  2850. //
  2851. // Check to see if the next entry is free.
  2852. //
  2853. NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + Entry->BlockSize);
  2854. if (PAGE_END(NextEntry) == FALSE) {
  2855. if (NextEntry->PoolType == 0) {
  2856. //
  2857. // This block is free, combine with the released block.
  2858. //
  2859. Combined = TRUE;
  2860. //
  2861. // If the split pool block contains only a header, then
  2862. // it was not inserted and therefore cannot be removed.
  2863. //
  2864. // Note if the minimum pool block size is bigger than the
  2865. // header then there can be no blocks like this.
  2866. //
  2867. if ((POOL_OVERHEAD != POOL_SMALLEST_BLOCK) ||
  2868. (NextEntry->BlockSize != 1)) {
  2869. CHECK_LIST(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD)));
  2870. PrivateRemoveEntryList(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD)));
  2871. CHECK_LIST(DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Flink));
  2872. CHECK_LIST(DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Blink));
  2873. }
  2874. Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
  2875. }
  2876. }
  2877. //
  2878. // Check to see if the previous entry is free.
  2879. //
  2880. if (Entry->PreviousSize != 0) {
  2881. NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry - Entry->PreviousSize);
  2882. if (NextEntry->PoolType == 0) {
  2883. //
  2884. // This block is free, combine with the released block.
  2885. //
  2886. Combined = TRUE;
  2887. //
  2888. // If the split pool block contains only a header, then
  2889. // it was not inserted and therefore cannot be removed.
  2890. //
  2891. // Note if the minimum pool block size is bigger than the
  2892. // header then there can be no blocks like this.
  2893. //
  2894. if ((POOL_OVERHEAD != POOL_SMALLEST_BLOCK) ||
  2895. (NextEntry->BlockSize != 1)) {
  2896. CHECK_LIST(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD)));
  2897. PrivateRemoveEntryList(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD)));
  2898. CHECK_LIST(DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Flink));
  2899. CHECK_LIST(DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Blink));
  2900. }
  2901. NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
  2902. Entry = NextEntry;
  2903. }
  2904. }
  2905. //
  2906. // If the block being freed has been combined into a full page,
  2907. // then return the free page to memory management.
  2908. //
  2909. if (PAGE_ALIGNED(Entry) &&
  2910. (PAGE_END((PPOOL_BLOCK)Entry + Entry->BlockSize) != FALSE)) {
  2911. ((PSINGLE_LIST_ENTRY)Entry)->Next = WholePages;
  2912. WholePages = (PSINGLE_LIST_ENTRY) Entry;
  2913. WholePageCount += 1;
  2914. }
  2915. else {
  2916. //
  2917. // Insert this element into the list.
  2918. //
  2919. Entry->PoolType = 0;
  2920. ENCODE_POOL_INDEX(Entry, PoolIndex);
  2921. Index = Entry->BlockSize;
  2922. ASSERT (Index != 1);
  2923. //
  2924. // If the freed block was combined with any other block, then
  2925. // adjust the size of the next block if necessary.
  2926. //
  2927. if (Combined != FALSE) {
  2928. //
  2929. // The size of this entry has changed, if this entry is
  2930. // not the last one in the page, update the pool block
  2931. // after this block to have a new previous allocation size.
  2932. //
  2933. NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + Index);
  2934. if (PAGE_END(NextEntry) == FALSE) {
  2935. NextEntry->PreviousSize = (USHORT) Index;
  2936. }
  2937. }
  2938. //
  2939. // Always insert at the head in hopes of reusing cache lines.
  2940. //
  2941. PrivateInsertHeadList(&PoolDesc->ListHeads[Index - 1], ((PLIST_ENTRY)((PCHAR)Entry + POOL_OVERHEAD)));
  2942. CHECK_LIST(((PLIST_ENTRY)((PCHAR)Entry + POOL_OVERHEAD)));
  2943. }
  2944. //
  2945. // March on to the next entry if there is one.
  2946. //
  2947. if (NextSingleListEntry == LastEntry) {
  2948. break;
  2949. }
  2950. SingleListEntry = NextSingleListEntry;
  2951. } while (TRUE);
  2952. if ((PoolDesc->PendingFrees == FirstEntry) &&
  2953. (InterlockedCompareExchangePointer (&PoolDesc->PendingFrees,
  2954. NULL,
  2955. FirstEntry) == FirstEntry)) {
  2956. break;
  2957. }
  2958. LastEntry = FirstEntry;
  2959. } while (TRUE);
  2960. if (WholePages != NULL) {
  2961. //
  2962. // If the pool type is paged pool, then the global paged pool mutex
  2963. // must be held during the free of the pool pages. Hence any
  2964. // full pages were batched up and are now dealt with in one go.
  2965. //
  2966. Entry = (PPOOL_HEADER) WholePages;
  2967. PoolDesc->TotalPages -= WholePageCount;
  2968. if (GlobalSpace == TRUE) {
  2969. if ((CheckType & BASE_POOL_TYPE_MASK) == PagedPool) {
  2970. ExReleaseFastMutex ((PFAST_MUTEX)PoolDesc->LockAddress);
  2971. ExAcquireFastMutex (ExpPagedPoolMutex);
  2972. }
  2973. else if (ExpNumberOfNonPagedPools > 1) {
  2974. KeReleaseInStackQueuedSpinLock (&LockHandle);
  2975. ExpLockNonPagedPool (LockHandle.OldIrql);
  2976. }
  2977. }
  2978. do {
  2979. NextEntry = (PPOOL_HEADER) (((PSINGLE_LIST_ENTRY)Entry)->Next);
  2980. PERFINFO_FREEPOOLPAGE(CheckType, PoolIndex, Entry, PoolDesc);
  2981. MiFreePoolPages (Entry);
  2982. Entry = NextEntry;
  2983. } while (Entry != NULL);
  2984. if (GlobalSpace == FALSE) {
  2985. ExReleaseFastMutex ((PFAST_MUTEX)PoolDesc->LockAddress);
  2986. }
  2987. else if ((CheckType & BASE_POOL_TYPE_MASK) == PagedPool) {
  2988. ExReleaseFastMutex (ExpPagedPoolMutex);
  2989. }
  2990. else {
  2991. ExpUnlockNonPagedPool (LockHandle.OldIrql);
  2992. }
  2993. }
  2994. else {
  2995. UNLOCK_POOL(PoolDesc, LockHandle);
  2996. }
  2997. InterlockedExchangeAdd (&PoolDesc->PendingFreeDepth, (0 - ListCount));
  2998. return;
  2999. }
  3000. SIZE_T
  3001. ExQueryPoolBlockSize (
  3002. IN PVOID PoolBlock,
  3003. OUT PBOOLEAN QuotaCharged
  3004. )
  3005. /*++
  3006. Routine Description:
  3007. This function returns the size of the pool block.
  3008. Arguments:
  3009. PoolBlock - Supplies the address of the block of pool.
  3010. QuotaCharged - Supplies a BOOLEAN variable to receive whether or not the
  3011. pool block had quota charged.
  3012. NOTE: If the entry is bigger than a page, the value PAGE_SIZE is returned
  3013. rather than the correct number of bytes.
  3014. Return Value:
  3015. Size of pool block.
  3016. --*/
  3017. {
  3018. PPOOL_HEADER Entry;
  3019. SIZE_T size;
  3020. if ((ExpPoolFlags & EX_SPECIAL_POOL_ENABLED) &&
  3021. (MmIsSpecialPoolAddress (PoolBlock))) {
  3022. *QuotaCharged = FALSE;
  3023. return MmQuerySpecialPoolBlockSize (PoolBlock);
  3024. }
  3025. if (PAGE_ALIGNED(PoolBlock)) {
  3026. *QuotaCharged = FALSE;
  3027. return PAGE_SIZE;
  3028. }
  3029. Entry = (PPOOL_HEADER)((PCHAR)PoolBlock - POOL_OVERHEAD);
  3030. size = (ULONG)((Entry->BlockSize << POOL_BLOCK_SHIFT) - POOL_OVERHEAD);
  3031. #ifdef _WIN64
  3032. *QuotaCharged = (BOOLEAN) (Entry->ProcessBilled != NULL);
  3033. #else
  3034. if ( PoolTrackTable) {
  3035. *QuotaCharged = FALSE;
  3036. }
  3037. else {
  3038. *QuotaCharged = (BOOLEAN) (Entry->ProcessBilled != NULL);
  3039. }
  3040. #endif
  3041. return size;
  3042. }
  3043. VOID
  3044. ExQueryPoolUsage(
  3045. OUT PULONG PagedPoolPages,
  3046. OUT PULONG NonPagedPoolPages,
  3047. OUT PULONG PagedPoolAllocs,
  3048. OUT PULONG PagedPoolFrees,
  3049. OUT PULONG PagedPoolLookasideHits,
  3050. OUT PULONG NonPagedPoolAllocs,
  3051. OUT PULONG NonPagedPoolFrees,
  3052. OUT PULONG NonPagedPoolLookasideHits
  3053. )
  3054. {
  3055. ULONG Index;
  3056. PGENERAL_LOOKASIDE Lookaside;
  3057. PLIST_ENTRY NextEntry;
  3058. PPOOL_DESCRIPTOR pd;
  3059. //
  3060. // Sum all the paged pool usage.
  3061. //
  3062. *PagedPoolPages = 0;
  3063. *PagedPoolAllocs = 0;
  3064. *PagedPoolFrees = 0;
  3065. for (Index = 0; Index < ExpNumberOfPagedPools + 1; Index += 1) {
  3066. pd = ExpPagedPoolDescriptor[Index];
  3067. *PagedPoolPages += pd->TotalPages + pd->TotalBigPages;
  3068. *PagedPoolAllocs += pd->RunningAllocs;
  3069. *PagedPoolFrees += pd->RunningDeAllocs;
  3070. }
  3071. //
  3072. // Sum all the nonpaged pool usage.
  3073. //
  3074. pd = &NonPagedPoolDescriptor;
  3075. *NonPagedPoolPages = pd->TotalPages + pd->TotalBigPages;
  3076. *NonPagedPoolAllocs = pd->RunningAllocs;
  3077. *NonPagedPoolFrees = pd->RunningDeAllocs;
  3078. //
  3079. // Sum all the lookaside hits for paged and nonpaged pool.
  3080. //
  3081. NextEntry = ExPoolLookasideListHead.Flink;
  3082. while (NextEntry != &ExPoolLookasideListHead) {
  3083. Lookaside = CONTAINING_RECORD(NextEntry,
  3084. GENERAL_LOOKASIDE,
  3085. ListEntry);
  3086. if (Lookaside->Type == NonPagedPool) {
  3087. *NonPagedPoolLookasideHits += Lookaside->AllocateHits;
  3088. }
  3089. else {
  3090. *PagedPoolLookasideHits += Lookaside->AllocateHits;
  3091. }
  3092. NextEntry = NextEntry->Flink;
  3093. }
  3094. return;
  3095. }
  3096. VOID
  3097. ExReturnPoolQuota (
  3098. IN PVOID P
  3099. )
  3100. /*++
  3101. Routine Description:
  3102. This function returns quota charged to a subject process when the
  3103. specified pool block was allocated.
  3104. Arguments:
  3105. P - Supplies the address of the block of pool being deallocated.
  3106. Return Value:
  3107. None.
  3108. --*/
  3109. {
  3110. PPOOL_HEADER Entry;
  3111. POOL_TYPE PoolType;
  3112. PEPROCESS Process;
  3113. //
  3114. // Do nothing for special pool. No quota was charged.
  3115. //
  3116. if ((ExpPoolFlags & EX_SPECIAL_POOL_ENABLED) &&
  3117. (MmIsSpecialPoolAddress (P))) {
  3118. return;
  3119. }
  3120. //
  3121. // Align the entry address to a pool allocation boundary.
  3122. //
  3123. Entry = (PPOOL_HEADER)((PCHAR)P - POOL_OVERHEAD);
  3124. //
  3125. // If quota was charged, then return the appropriate quota to the
  3126. // subject process.
  3127. //
  3128. if ((Entry->PoolType & POOL_QUOTA_MASK) && POOL_QUOTA_ENABLED) {
  3129. PoolType = (Entry->PoolType & POOL_TYPE_MASK) - 1;
  3130. Entry->PoolType &= ~POOL_QUOTA_MASK;
  3131. Process = Entry->ProcessBilled;
  3132. if (Process != NULL) {
  3133. PsReturnPoolQuota(Process,
  3134. PoolType & BASE_POOL_TYPE_MASK,
  3135. (ULONG)Entry->BlockSize << POOL_BLOCK_SHIFT);
  3136. ObDereferenceObject(Process);
  3137. }
  3138. }
  3139. return;
  3140. }
  3141. #if DBG || (i386 && !FPO)
  3142. //
  3143. // Only works on checked builds or free x86 builds with FPO turned off
  3144. // See comment in mm\allocpag.c
  3145. //
  3146. NTSTATUS
  3147. ExpSnapShotPoolPages(
  3148. IN PVOID Address,
  3149. IN ULONG Size,
  3150. IN OUT PSYSTEM_POOL_INFORMATION PoolInformation,
  3151. IN OUT PSYSTEM_POOL_ENTRY *PoolEntryInfo,
  3152. IN ULONG Length,
  3153. IN OUT PULONG RequiredLength
  3154. )
  3155. {
  3156. NTSTATUS Status;
  3157. CLONG i;
  3158. PPOOL_HEADER p;
  3159. PPOOL_TRACKER_BIG_PAGES PoolBig;
  3160. LOGICAL ValidSplitBlock;
  3161. ULONG EntrySize;
  3162. KIRQL OldIrql;
  3163. if (PAGE_ALIGNED(Address) && PoolBigPageTable) {
  3164. ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
  3165. PoolBig = PoolBigPageTable;
  3166. for (i = 0; i < PoolBigPageTableSize; i += 1, PoolBig += 1) {
  3167. if (PoolBig->NumberOfPages == 0 || PoolBig->Va != Address) {
  3168. continue;
  3169. }
  3170. PoolInformation->NumberOfEntries += 1;
  3171. *RequiredLength += sizeof(SYSTEM_POOL_ENTRY);
  3172. if (Length < *RequiredLength) {
  3173. Status = STATUS_INFO_LENGTH_MISMATCH;
  3174. }
  3175. else {
  3176. (*PoolEntryInfo)->Allocated = TRUE;
  3177. (*PoolEntryInfo)->Size = PoolBig->NumberOfPages << PAGE_SHIFT;
  3178. (*PoolEntryInfo)->AllocatorBackTraceIndex = 0;
  3179. (*PoolEntryInfo)->ProcessChargedQuota = 0;
  3180. #if !DBG
  3181. if (NtGlobalFlag & FLG_POOL_ENABLE_TAGGING)
  3182. #endif //!DBG
  3183. (*PoolEntryInfo)->TagUlong = PoolBig->Key;
  3184. (*PoolEntryInfo) += 1;
  3185. Status = STATUS_SUCCESS;
  3186. }
  3187. ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
  3188. return Status;
  3189. }
  3190. ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
  3191. }
  3192. p = (PPOOL_HEADER)Address;
  3193. ValidSplitBlock = FALSE;
  3194. if (Size == PAGE_SIZE && p->PreviousSize == 0 && p->BlockSize != 0) {
  3195. PPOOL_HEADER PoolAddress;
  3196. PPOOL_HEADER EndPoolAddress;
  3197. //
  3198. // Validate all the pool links before we regard this as a page that
  3199. // has been split into small pool blocks.
  3200. //
  3201. PoolAddress = p;
  3202. EndPoolAddress = (PPOOL_HEADER)((PCHAR) p + PAGE_SIZE);
  3203. do {
  3204. EntrySize = PoolAddress->BlockSize << POOL_BLOCK_SHIFT;
  3205. PoolAddress = (PPOOL_HEADER)((PCHAR)PoolAddress + EntrySize);
  3206. if (PoolAddress == EndPoolAddress) {
  3207. ValidSplitBlock = TRUE;
  3208. break;
  3209. }
  3210. if (PoolAddress > EndPoolAddress) {
  3211. break;
  3212. }
  3213. if (PoolAddress->PreviousSize != EntrySize) {
  3214. break;
  3215. }
  3216. } while (EntrySize != 0);
  3217. }
  3218. if (ValidSplitBlock == TRUE) {
  3219. p = (PPOOL_HEADER)Address;
  3220. do {
  3221. EntrySize = p->BlockSize << POOL_BLOCK_SHIFT;
  3222. if (EntrySize == 0) {
  3223. return STATUS_COMMITMENT_LIMIT;
  3224. }
  3225. PoolInformation->NumberOfEntries += 1;
  3226. *RequiredLength += sizeof(SYSTEM_POOL_ENTRY);
  3227. if (Length < *RequiredLength) {
  3228. Status = STATUS_INFO_LENGTH_MISMATCH;
  3229. }
  3230. else {
  3231. (*PoolEntryInfo)->Size = EntrySize;
  3232. if (p->PoolType != 0) {
  3233. (*PoolEntryInfo)->Allocated = TRUE;
  3234. (*PoolEntryInfo)->AllocatorBackTraceIndex = 0;
  3235. (*PoolEntryInfo)->ProcessChargedQuota = 0;
  3236. #if !DBG
  3237. if (NtGlobalFlag & FLG_POOL_ENABLE_TAGGING)
  3238. #endif //!DBG
  3239. (*PoolEntryInfo)->TagUlong = p->PoolTag;
  3240. }
  3241. else {
  3242. (*PoolEntryInfo)->Allocated = FALSE;
  3243. (*PoolEntryInfo)->AllocatorBackTraceIndex = 0;
  3244. (*PoolEntryInfo)->ProcessChargedQuota = 0;
  3245. #if !defined(DBG) && !defined(_WIN64)
  3246. if (NtGlobalFlag & FLG_POOL_ENABLE_TAGGING)
  3247. #endif //!DBG
  3248. (*PoolEntryInfo)->TagUlong = p->PoolTag;
  3249. }
  3250. (*PoolEntryInfo) += 1;
  3251. Status = STATUS_SUCCESS;
  3252. }
  3253. p = (PPOOL_HEADER)((PCHAR)p + EntrySize);
  3254. }
  3255. while (PAGE_END(p) == FALSE);
  3256. }
  3257. else {
  3258. PoolInformation->NumberOfEntries += 1;
  3259. *RequiredLength += sizeof(SYSTEM_POOL_ENTRY);
  3260. if (Length < *RequiredLength) {
  3261. Status = STATUS_INFO_LENGTH_MISMATCH;
  3262. }
  3263. else {
  3264. (*PoolEntryInfo)->Allocated = TRUE;
  3265. (*PoolEntryInfo)->Size = Size;
  3266. (*PoolEntryInfo)->AllocatorBackTraceIndex = 0;
  3267. (*PoolEntryInfo)->ProcessChargedQuota = 0;
  3268. (*PoolEntryInfo) += 1;
  3269. Status = STATUS_SUCCESS;
  3270. }
  3271. }
  3272. return Status;
  3273. }
  3274. NTSTATUS
  3275. ExSnapShotPool (
  3276. IN POOL_TYPE PoolType,
  3277. IN PSYSTEM_POOL_INFORMATION PoolInformation,
  3278. IN ULONG Length,
  3279. OUT PULONG ReturnLength OPTIONAL
  3280. )
  3281. {
  3282. ULONG Index;
  3283. PVOID Lock;
  3284. KLOCK_QUEUE_HANDLE LockHandle;
  3285. PPOOL_DESCRIPTOR PoolDesc;
  3286. ULONG RequiredLength;
  3287. NTSTATUS Status;
  3288. KLOCK_QUEUE_HANDLE LockHandles[EXP_MAXIMUM_POOL_NODES];
  3289. RequiredLength = FIELD_OFFSET(SYSTEM_POOL_INFORMATION, Entries);
  3290. if (Length < RequiredLength) {
  3291. return STATUS_INFO_LENGTH_MISMATCH;
  3292. }
  3293. Status = STATUS_SUCCESS;
  3294. //
  3295. // Initializing PoolDesc is not needed for correctness but without
  3296. // it the compiler cannot compile this code W4 to check for use of
  3297. // uninitialized variables.
  3298. //
  3299. PoolDesc = NULL;
  3300. //
  3301. // If the pool type is paged, then lock all of the paged pools.
  3302. // Otherwise, lock the nonpaged pool.
  3303. //
  3304. if (PoolType == PagedPool) {
  3305. Index = 0;
  3306. KeRaiseIrql(APC_LEVEL, &LockHandle.OldIrql);
  3307. do {
  3308. Lock = ExpPagedPoolDescriptor[Index]->LockAddress;
  3309. ExAcquireFastMutex((PFAST_MUTEX)Lock);
  3310. Index += 1;
  3311. } while (Index < ExpNumberOfPagedPools);
  3312. }
  3313. else {
  3314. ASSERT (PoolType == NonPagedPool);
  3315. ExpLockNonPagedPool(LockHandle.OldIrql);
  3316. if (ExpNumberOfNonPagedPools > 1) {
  3317. Index = 0;
  3318. do {
  3319. Lock = ExpNonPagedPoolDescriptor[Index]->LockAddress;
  3320. KeAcquireInStackQueuedSpinLock (Lock, &LockHandles[Index]);
  3321. Index += 1;
  3322. } while (Index < ExpNumberOfNonPagedPools);
  3323. }
  3324. }
  3325. try {
  3326. PoolInformation->EntryOverhead = POOL_OVERHEAD;
  3327. PoolInformation->NumberOfEntries = 0;
  3328. Status = MmSnapShotPool (PoolType,
  3329. ExpSnapShotPoolPages,
  3330. PoolInformation,
  3331. Length,
  3332. &RequiredLength);
  3333. } except (EXCEPTION_EXECUTE_HANDLER) {
  3334. //
  3335. // Return success at this point even if the results
  3336. // cannot be written.
  3337. //
  3338. NOTHING;
  3339. }
  3340. //
  3341. // If the pool type is paged, then unlock all of the paged pools.
  3342. // Otherwise, unlock the nonpaged pool.
  3343. //
  3344. if (PoolType == PagedPool) {
  3345. Index = 0;
  3346. do {
  3347. Lock = ExpPagedPoolDescriptor[Index]->LockAddress;
  3348. ExReleaseFastMutex ((PFAST_MUTEX)Lock);
  3349. Index += 1;
  3350. } while (Index < ExpNumberOfPagedPools);
  3351. KeLowerIrql (LockHandle.OldIrql);
  3352. }
  3353. else {
  3354. if (ExpNumberOfNonPagedPools > 1) {
  3355. Index = 0;
  3356. do {
  3357. KeReleaseInStackQueuedSpinLock (&LockHandles[Index]);
  3358. Index += 1;
  3359. } while (Index < ExpNumberOfNonPagedPools);
  3360. }
  3361. //
  3362. // Release the main nonpaged pool lock last so the IRQL does not
  3363. // prematurely drop below APC_LEVEL which would open a window where
  3364. // a suspend APC could stop us.
  3365. //
  3366. ExpUnlockNonPagedPool (LockHandle.OldIrql);
  3367. }
  3368. if (ARGUMENT_PRESENT(ReturnLength)) {
  3369. *ReturnLength = RequiredLength;
  3370. }
  3371. return Status;
  3372. }
  3373. #endif // DBG || (i386 && !FPO)
  3374. VOID
  3375. ExAllocatePoolSanityChecks(
  3376. IN POOL_TYPE PoolType,
  3377. IN SIZE_T NumberOfBytes
  3378. )
  3379. /*++
  3380. Routine Description:
  3381. This function performs sanity checks on the caller.
  3382. Return Value:
  3383. None.
  3384. Environment:
  3385. Only enabled as part of the driver verification package.
  3386. --*/
  3387. {
  3388. if (NumberOfBytes == 0) {
  3389. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  3390. 0x0,
  3391. KeGetCurrentIrql(),
  3392. PoolType,
  3393. NumberOfBytes);
  3394. }
  3395. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  3396. if (KeGetCurrentIrql() > APC_LEVEL) {
  3397. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  3398. 0x1,
  3399. KeGetCurrentIrql(),
  3400. PoolType,
  3401. NumberOfBytes);
  3402. }
  3403. }
  3404. else {
  3405. if (KeGetCurrentIrql() > DISPATCH_LEVEL) {
  3406. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  3407. 0x2,
  3408. KeGetCurrentIrql(),
  3409. PoolType,
  3410. NumberOfBytes);
  3411. }
  3412. }
  3413. }
  3414. VOID
  3415. ExFreePoolSanityChecks (
  3416. IN PVOID P
  3417. )
  3418. /*++
  3419. Routine Description:
  3420. This function performs sanity checks on the caller.
  3421. Return Value:
  3422. None.
  3423. Environment:
  3424. Only enabled as part of the driver verification package.
  3425. --*/
  3426. {
  3427. PPOOL_HEADER Entry;
  3428. POOL_TYPE PoolType;
  3429. PVOID StillQueued;
  3430. if (P <= (PVOID)(MM_HIGHEST_USER_ADDRESS)) {
  3431. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  3432. 0x10,
  3433. (ULONG_PTR)P,
  3434. 0,
  3435. 0);
  3436. }
  3437. if ((ExpPoolFlags & EX_SPECIAL_POOL_ENABLED) &&
  3438. (MmIsSpecialPoolAddress (P))) {
  3439. KeCheckForTimer (P, PAGE_SIZE - BYTE_OFFSET (P));
  3440. //
  3441. // Check if an ERESOURCE is currently active in this memory block.
  3442. //
  3443. StillQueued = ExpCheckForResource(P, PAGE_SIZE - BYTE_OFFSET (P));
  3444. if (StillQueued != NULL) {
  3445. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  3446. 0x17,
  3447. (ULONG_PTR)StillQueued,
  3448. (ULONG_PTR)-1,
  3449. (ULONG_PTR)P);
  3450. }
  3451. ExpCheckForWorker (P, PAGE_SIZE - BYTE_OFFSET (P)); // bugchecks inside
  3452. return;
  3453. }
  3454. if (PAGE_ALIGNED(P)) {
  3455. PoolType = MmDeterminePoolType(P);
  3456. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  3457. if (KeGetCurrentIrql() > APC_LEVEL) {
  3458. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  3459. 0x11,
  3460. KeGetCurrentIrql(),
  3461. PoolType,
  3462. (ULONG_PTR)P);
  3463. }
  3464. }
  3465. else {
  3466. if (KeGetCurrentIrql() > DISPATCH_LEVEL) {
  3467. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  3468. 0x12,
  3469. KeGetCurrentIrql(),
  3470. PoolType,
  3471. (ULONG_PTR)P);
  3472. }
  3473. }
  3474. //
  3475. // Just check the first page.
  3476. //
  3477. KeCheckForTimer(P, PAGE_SIZE);
  3478. //
  3479. // Check if an ERESOURCE is currently active in this memory block.
  3480. //
  3481. StillQueued = ExpCheckForResource(P, PAGE_SIZE);
  3482. if (StillQueued != NULL) {
  3483. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  3484. 0x17,
  3485. (ULONG_PTR)StillQueued,
  3486. PoolType,
  3487. (ULONG_PTR)P);
  3488. }
  3489. }
  3490. else {
  3491. if (((ULONG_PTR)P & (POOL_OVERHEAD - 1)) != 0) {
  3492. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  3493. 0x16,
  3494. __LINE__,
  3495. (ULONG_PTR)P,
  3496. 0);
  3497. }
  3498. Entry = (PPOOL_HEADER)((PCHAR)P - POOL_OVERHEAD);
  3499. if ((Entry->PoolType & POOL_TYPE_MASK) == 0) {
  3500. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  3501. 0x13,
  3502. __LINE__,
  3503. (ULONG_PTR)Entry,
  3504. Entry->Ulong1);
  3505. }
  3506. PoolType = (Entry->PoolType & POOL_TYPE_MASK) - 1;
  3507. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  3508. if (KeGetCurrentIrql() > APC_LEVEL) {
  3509. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  3510. 0x11,
  3511. KeGetCurrentIrql(),
  3512. PoolType,
  3513. (ULONG_PTR)P);
  3514. }
  3515. }
  3516. else {
  3517. if (KeGetCurrentIrql() > DISPATCH_LEVEL) {
  3518. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  3519. 0x12,
  3520. KeGetCurrentIrql(),
  3521. PoolType,
  3522. (ULONG_PTR)P);
  3523. }
  3524. }
  3525. if (!IS_POOL_HEADER_MARKED_ALLOCATED(Entry)) {
  3526. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  3527. 0x14,
  3528. __LINE__,
  3529. (ULONG_PTR)Entry,
  3530. 0);
  3531. }
  3532. KeCheckForTimer(Entry, (ULONG)(Entry->BlockSize << POOL_BLOCK_SHIFT));
  3533. //
  3534. // Check if an ERESOURCE is currently active in this memory block.
  3535. //
  3536. StillQueued = ExpCheckForResource(Entry, (ULONG)(Entry->BlockSize << POOL_BLOCK_SHIFT));
  3537. if (StillQueued != NULL) {
  3538. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  3539. 0x17,
  3540. (ULONG_PTR)StillQueued,
  3541. PoolType,
  3542. (ULONG_PTR)P);
  3543. }
  3544. }
  3545. }
  3546. #if defined (NT_UP)
  3547. VOID
  3548. ExpBootFinishedDispatch (
  3549. IN PKDPC Dpc,
  3550. IN PVOID DeferredContext,
  3551. IN PVOID SystemArgument1,
  3552. IN PVOID SystemArgument2
  3553. )
  3554. /*++
  3555. Routine Description:
  3556. This function is called when the system has booted into a shell.
  3557. It's job is to disable various pool optimizations that are enabled to
  3558. speed up booting and reduce the memory footprint on small machines.
  3559. Arguments:
  3560. Dpc - Supplies a pointer to a control object of type DPC.
  3561. DeferredContext - Optional deferred context; not used.
  3562. SystemArgument1 - Optional argument 1; not used.
  3563. SystemArgument2 - Optional argument 2; not used.
  3564. Return Value:
  3565. None.
  3566. Environment:
  3567. DISPATCH_LEVEL since this is called from a timer expiration.
  3568. --*/
  3569. {
  3570. UNREFERENCED_PARAMETER (Dpc);
  3571. UNREFERENCED_PARAMETER (DeferredContext);
  3572. UNREFERENCED_PARAMETER (SystemArgument1);
  3573. UNREFERENCED_PARAMETER (SystemArgument2);
  3574. //
  3575. // Pretty much all pages are "hot" after bootup. Since bootup has finished,
  3576. // use lookaside lists and stop trying to separate regular allocations
  3577. // as well.
  3578. //
  3579. RtlInterlockedAndBitsDiscardReturn (&ExpPoolFlags, (ULONG)~EX_SEPARATE_HOT_PAGES_DURING_BOOT);
  3580. }
  3581. #endif