Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

6219 lines
186 KiB

  1. /*++
  2. Copyright (c) 1989-1994 Microsoft Corporation
  3. Module Name:
  4. pool.c
  5. Abstract:
  6. This module implements the NT executive pool allocator.
  7. Author:
  8. Mark Lucovsky 16-Feb-1989
  9. Lou Perazzoli 31-Aug-1991 (change from binary buddy)
  10. David N. Cutler (davec) 27-May-1994
  11. Landy Wang 17-Oct-1997
  12. Environment:
  13. Kernel mode only
  14. Revision History:
  15. --*/
  16. #include "exp.h"
  17. #pragma hdrstop
  18. #undef ExAllocatePoolWithTag
  19. #undef ExAllocatePool
  20. #undef ExAllocatePoolWithQuota
  21. #undef ExAllocatePoolWithQuotaTag
  22. #undef ExFreePool
  23. #undef ExFreePoolWithTag
  24. //
  25. // These bitfield definitions are based on EX_POOL_PRIORITY in inc\ex.h.
  26. //
  27. #define POOL_SPECIAL_POOL_BIT 0x8
  28. #define POOL_SPECIAL_POOL_UNDERRUN_BIT 0x1
  29. #if defined (_WIN64)
  30. #define InterlockedExchangeAddSizeT(a, b) InterlockedExchangeAdd64((PLONGLONG)a, b)
  31. #else
  32. #define InterlockedExchangeAddSizeT(a, b) InterlockedExchangeAdd((PLONG)(a), b)
  33. #endif
  34. //
  35. // Define forward referenced function prototypes.
  36. //
  37. #ifdef ALLOC_PRAGMA
  38. PVOID
  39. ExpAllocateStringRoutine (
  40. IN SIZE_T NumberOfBytes
  41. );
  42. VOID
  43. ExDeferredFreePool (
  44. IN PPOOL_DESCRIPTOR PoolDesc
  45. );
  46. VOID
  47. ExpSeedHotTags (
  48. VOID
  49. );
  50. NTSTATUS
  51. ExGetSessionPoolTagInfo (
  52. IN PVOID SystemInformation,
  53. IN ULONG SystemInformationLength,
  54. IN OUT PULONG ReturnedEntries,
  55. IN OUT PULONG ActualEntries
  56. );
  57. NTSTATUS
  58. ExGetPoolTagInfo (
  59. IN PVOID SystemInformation,
  60. IN ULONG SystemInformationLength,
  61. IN OUT PULONG ReturnLength OPTIONAL
  62. );
  63. #pragma alloc_text(PAGE, ExpAllocateStringRoutine)
  64. #pragma alloc_text(INIT, InitializePool)
  65. #pragma alloc_text(INIT, ExpSeedHotTags)
  66. #pragma alloc_text(PAGE, ExInitializePoolDescriptor)
  67. #pragma alloc_text(PAGE, ExDrainPoolLookasideList)
  68. #pragma alloc_text(PAGE, ExCreatePoolTagTable)
  69. #pragma alloc_text(PAGE, ExGetSessionPoolTagInfo)
  70. #pragma alloc_text(PAGE, ExGetPoolTagInfo)
  71. #pragma alloc_text(PAGEVRFY, ExAllocatePoolSanityChecks)
  72. #pragma alloc_text(PAGEVRFY, ExFreePoolSanityChecks)
  73. #pragma alloc_text(POOLCODE, ExAllocatePoolWithTag)
  74. #pragma alloc_text(POOLCODE, ExFreePool)
  75. #pragma alloc_text(POOLCODE, ExFreePoolWithTag)
  76. #pragma alloc_text(POOLCODE, ExDeferredFreePool)
  77. #endif
  78. #if defined (NT_UP)
  79. #define USING_HOT_COLD_METRICS (ExpPoolFlags & EX_SEPARATE_HOT_PAGES_DURING_BOOT)
  80. #else
  81. #define USING_HOT_COLD_METRICS 0
  82. #endif
  83. #define EXP_MAXIMUM_POOL_FREES_PENDING 32
  84. PPOOL_DESCRIPTOR ExpSessionPoolDescriptor;
  85. PGENERAL_LOOKASIDE ExpSessionPoolLookaside;
  86. PPOOL_TRACKER_TABLE ExpSessionPoolTrackTable;
  87. SIZE_T ExpSessionPoolTrackTableSize;
  88. SIZE_T ExpSessionPoolTrackTableMask;
  89. PPOOL_TRACKER_BIG_PAGES ExpSessionPoolBigPageTable;
  90. SIZE_T ExpSessionPoolBigPageTableSize;
  91. SIZE_T ExpSessionPoolBigPageTableHash;
  92. ULONG ExpSessionPoolSmallLists;
  93. #if DBG
  94. ULONG ExpLargeSessionPoolUnTracked;
  95. #endif
  96. ULONG FirstPrint;
  97. extern SIZE_T MmSizeOfNonPagedPoolInBytes;
  98. #if defined (NT_UP)
  99. KDPC ExpBootFinishedTimerDpc;
  100. KTIMER ExpBootFinishedTimer;
  101. VOID
  102. ExpBootFinishedDispatch (
  103. IN PKDPC Dpc,
  104. IN PVOID DeferredContext,
  105. IN PVOID SystemArgument1,
  106. IN PVOID SystemArgument2
  107. );
  108. #else
  109. #if defined (_WIN64)
  110. #define MAXIMUM_PROCESSOR_TAG_TABLES 64 // Must be a power of 2.
  111. #else
  112. #define MAXIMUM_PROCESSOR_TAG_TABLES 32 // Must be a power of 2.
  113. #endif
  114. PPOOL_TRACKER_TABLE ExPoolTagTables[MAXIMUM_PROCESSOR_TAG_TABLES];
  115. #endif
  116. #define DEFAULT_TRACKER_TABLE 1024
  117. PPOOL_TRACKER_TABLE PoolTrackTable;
  118. //
  119. // Registry-overridable, but must be a power of 2.
  120. //
  121. SIZE_T PoolTrackTableSize;
  122. SIZE_T PoolTrackTableMask;
  123. PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
  124. SIZE_T PoolTrackTableExpansionSize;
  125. SIZE_T PoolTrackTableExpansionPages;
  126. #define DEFAULT_BIGPAGE_TABLE 4096
  127. PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
  128. //
  129. // Registry-overridable, but must be a power of 2.
  130. //
  131. SIZE_T PoolBigPageTableSize; // Must be a power of 2.
  132. SIZE_T PoolBigPageTableHash;
  133. #define POOL_BIG_TABLE_ENTRY_FREE 0x1
  134. ULONG PoolHitTag = 0xffffff0f;
  135. #define POOLTAG_HASH(Key,TableMask) (((40543*((((((((PUCHAR)&Key)[0]<<2)^((PUCHAR)&Key)[1])<<2)^((PUCHAR)&Key)[2])<<2)^((PUCHAR)&Key)[3]))>>2) & (ULONG)TableMask)
  136. VOID
  137. ExpInsertPoolTracker (
  138. IN ULONG Key,
  139. IN SIZE_T NumberOfBytes,
  140. IN POOL_TYPE PoolType
  141. );
  142. VOID
  143. ExpInsertPoolTrackerExpansion (
  144. IN ULONG Key,
  145. IN SIZE_T NumberOfBytes,
  146. IN POOL_TYPE PoolType
  147. );
  148. VOID
  149. ExpRemovePoolTracker (
  150. IN ULONG Key,
  151. IN SIZE_T NumberOfBytes,
  152. IN POOL_TYPE PoolType
  153. );
  154. VOID
  155. ExpRemovePoolTrackerExpansion (
  156. IN ULONG Key,
  157. IN SIZE_T NumberOfBytes,
  158. IN POOL_TYPE PoolType
  159. );
  160. LOGICAL
  161. ExpAddTagForBigPages (
  162. IN PVOID Va,
  163. IN ULONG Key,
  164. IN ULONG NumberOfPages,
  165. IN POOL_TYPE PoolType
  166. );
  167. ULONG
  168. ExpFindAndRemoveTagBigPages (
  169. IN PVOID Va,
  170. OUT PULONG BigPages,
  171. IN POOL_TYPE PoolType
  172. );
  173. PVOID
  174. ExpAllocateStringRoutine (
  175. IN SIZE_T NumberOfBytes
  176. )
  177. {
  178. return ExAllocatePoolWithTag (PagedPool,NumberOfBytes,'grtS');
  179. }
  180. BOOLEAN
  181. ExOkayToLockRoutine (
  182. IN PVOID Lock
  183. )
  184. {
  185. UNREFERENCED_PARAMETER (Lock);
  186. if (KeIsExecutingDpc()) {
  187. return FALSE;
  188. }
  189. else {
  190. return TRUE;
  191. }
  192. }
  193. #ifdef ALLOC_DATA_PRAGMA
  194. #pragma const_seg("PAGECONST")
  195. #endif
  196. const PRTL_ALLOCATE_STRING_ROUTINE RtlAllocateStringRoutine = ExpAllocateStringRoutine;
  197. const PRTL_FREE_STRING_ROUTINE RtlFreeStringRoutine = (PRTL_FREE_STRING_ROUTINE)ExFreePool;
  198. #ifdef ALLOC_DATA_PRAGMA
  199. #pragma const_seg()
  200. #endif
  201. ULONG ExPoolFailures;
  202. //
  203. // Define macros to pack and unpack a pool index.
  204. //
  205. #define ENCODE_POOL_INDEX(POOLHEADER,INDEX) {(POOLHEADER)->PoolIndex = ((UCHAR)(INDEX));}
  206. #define DECODE_POOL_INDEX(POOLHEADER) ((ULONG)((POOLHEADER)->PoolIndex))
  207. //
  208. // The allocated bit carefully overlays the unused cachealign bit in the type.
  209. //
  210. #define POOL_IN_USE_MASK 0x4
  211. #define MARK_POOL_HEADER_FREED(POOLHEADER) {(POOLHEADER)->PoolType &= ~POOL_IN_USE_MASK;}
  212. #define IS_POOL_HEADER_MARKED_ALLOCATED(POOLHEADER) ((POOLHEADER)->PoolType & POOL_IN_USE_MASK)
  213. //
  214. // The hotpage bit carefully overlays the raise bit in the type.
  215. //
  216. #define POOL_HOTPAGE_MASK POOL_RAISE_IF_ALLOCATION_FAILURE
  217. //
  218. // Define the number of paged pools. This value may be overridden at boot
  219. // time.
  220. //
  221. ULONG ExpNumberOfPagedPools = NUMBER_OF_PAGED_POOLS;
  222. ULONG ExpNumberOfNonPagedPools = 1;
  223. //
  224. // The pool descriptor for nonpaged pool is static.
  225. // The pool descriptors for paged pool are dynamically allocated
  226. // since there can be more than one paged pool. There is always one more
  227. // paged pool descriptor than there are paged pools. This descriptor is
  228. // used when a page allocation is done for a paged pool and is the first
  229. // descriptor in the paged pool descriptor array.
  230. //
  231. POOL_DESCRIPTOR NonPagedPoolDescriptor;
  232. #define EXP_MAXIMUM_POOL_NODES 16
  233. PPOOL_DESCRIPTOR ExpNonPagedPoolDescriptor[EXP_MAXIMUM_POOL_NODES];
  234. //
  235. // The pool vector contains an array of pointers to pool descriptors. For
  236. // nonpaged pool this is just a pointer to the nonpaged pool descriptor.
  237. // For paged pool, this is a pointer to an array of pool descriptors.
  238. // The pointer to the paged pool descriptor is duplicated so
  239. // it can be found easily by the kernel debugger.
  240. //
  241. PPOOL_DESCRIPTOR PoolVector[NUMBER_OF_POOLS];
  242. PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[EXP_MAXIMUM_POOL_NODES + 1];
  243. PKGUARDED_MUTEX ExpPagedPoolMutex;
  244. volatile ULONG ExpPoolIndex = 1;
  245. KSPIN_LOCK ExpTaggedPoolLock;
  246. #if DBG
  247. LONG ExConcurrentQuotaPool;
  248. LONG ExConcurrentQuotaPoolMax;
  249. PSZ PoolTypeNames[MaxPoolType] = {
  250. "NonPaged",
  251. "Paged",
  252. "NonPagedMustSucceed",
  253. "NotUsed",
  254. "NonPagedCacheAligned",
  255. "PagedCacheAligned",
  256. "NonPagedCacheAlignedMustS"
  257. };
  258. #endif //DBG
  259. //
  260. // Define paged and nonpaged pool lookaside descriptors.
  261. //
  262. GENERAL_LOOKASIDE ExpSmallNPagedPoolLookasideLists[POOL_SMALL_LISTS];
  263. GENERAL_LOOKASIDE ExpSmallPagedPoolLookasideLists[POOL_SMALL_LISTS];
  264. #define LOCK_POOL(PoolDesc, LockHandle) { \
  265. if ((PoolDesc->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) { \
  266. if (PoolDesc == &NonPagedPoolDescriptor) { \
  267. LockHandle.OldIrql = KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock); \
  268. } \
  269. else { \
  270. ASSERT (ExpNumberOfNonPagedPools > 1); \
  271. KeAcquireInStackQueuedSpinLock (PoolDesc->LockAddress, &LockHandle); \
  272. } \
  273. } \
  274. else { \
  275. KeAcquireGuardedMutex ((PKGUARDED_MUTEX)PoolDesc->LockAddress); \
  276. } \
  277. }
  278. #define UNLOCK_POOL(PoolDesc, LockHandle) { \
  279. if ((PoolDesc->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) { \
  280. if (PoolDesc == &NonPagedPoolDescriptor) { \
  281. KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, LockHandle.OldIrql); \
  282. } \
  283. else { \
  284. ASSERT (ExpNumberOfNonPagedPools > 1); \
  285. KeReleaseInStackQueuedSpinLock (&LockHandle); \
  286. } \
  287. } \
  288. else { \
  289. KeReleaseGuardedMutex ((PKGUARDED_MUTEX)PoolDesc->LockAddress); \
  290. } \
  291. }
  292. #ifndef NO_POOL_CHECKS
  293. //
  294. // We redefine the LIST_ENTRY macros to have each pointer biased
  295. // by one so any rogue code using these pointers will access
  296. // violate. See \nt\public\sdk\inc\ntrtl.h for the original
  297. // definition of these macros.
  298. //
  299. // This is turned off in the shipping product.
  300. //
  301. #define DecodeLink(Link) ((PLIST_ENTRY)((ULONG_PTR)(Link) & ~1))
  302. #define EncodeLink(Link) ((PLIST_ENTRY)((ULONG_PTR)(Link) | 1))
  303. #define PrivateInitializeListHead(ListHead) ( \
  304. (ListHead)->Flink = (ListHead)->Blink = EncodeLink(ListHead))
  305. #define PrivateIsListEmpty(ListHead) \
  306. (DecodeLink((ListHead)->Flink) == (ListHead))
  307. #define PrivateRemoveHeadList(ListHead) \
  308. DecodeLink((ListHead)->Flink); \
  309. {PrivateRemoveEntryList(DecodeLink((ListHead)->Flink))}
  310. #define PrivateRemoveTailList(ListHead) \
  311. DecodeLink((ListHead)->Blink); \
  312. {PrivateRemoveEntryList(DecodeLink((ListHead)->Blink))}
  313. #define PrivateRemoveEntryList(Entry) { \
  314. PLIST_ENTRY _EX_Blink; \
  315. PLIST_ENTRY _EX_Flink; \
  316. _EX_Flink = DecodeLink((Entry)->Flink); \
  317. _EX_Blink = DecodeLink((Entry)->Blink); \
  318. _EX_Blink->Flink = EncodeLink(_EX_Flink); \
  319. _EX_Flink->Blink = EncodeLink(_EX_Blink); \
  320. }
  321. #define CHECK_LIST(LIST) \
  322. if ((DecodeLink(DecodeLink((LIST)->Flink)->Blink) != (LIST)) || \
  323. (DecodeLink(DecodeLink((LIST)->Blink)->Flink) != (LIST))) { \
  324. KeBugCheckEx (BAD_POOL_HEADER, \
  325. 3, \
  326. (ULONG_PTR)LIST, \
  327. (ULONG_PTR)DecodeLink(DecodeLink((LIST)->Flink)->Blink), \
  328. (ULONG_PTR)DecodeLink(DecodeLink((LIST)->Blink)->Flink)); \
  329. }
  330. #define PrivateInsertTailList(ListHead,Entry) { \
  331. PLIST_ENTRY _EX_Blink; \
  332. PLIST_ENTRY _EX_ListHead; \
  333. _EX_ListHead = (ListHead); \
  334. CHECK_LIST(_EX_ListHead); \
  335. _EX_Blink = DecodeLink(_EX_ListHead->Blink); \
  336. (Entry)->Flink = EncodeLink(_EX_ListHead); \
  337. (Entry)->Blink = EncodeLink(_EX_Blink); \
  338. _EX_Blink->Flink = EncodeLink(Entry); \
  339. _EX_ListHead->Blink = EncodeLink(Entry); \
  340. CHECK_LIST(_EX_ListHead); \
  341. }
  342. #define PrivateInsertHeadList(ListHead,Entry) { \
  343. PLIST_ENTRY _EX_Flink; \
  344. PLIST_ENTRY _EX_ListHead; \
  345. _EX_ListHead = (ListHead); \
  346. CHECK_LIST(_EX_ListHead); \
  347. _EX_Flink = DecodeLink(_EX_ListHead->Flink); \
  348. (Entry)->Flink = EncodeLink(_EX_Flink); \
  349. (Entry)->Blink = EncodeLink(_EX_ListHead); \
  350. _EX_Flink->Blink = EncodeLink(Entry); \
  351. _EX_ListHead->Flink = EncodeLink(Entry); \
  352. CHECK_LIST(_EX_ListHead); \
  353. }
  354. VOID
  355. FORCEINLINE
  356. ExCheckPoolHeader (
  357. IN PPOOL_HEADER Entry
  358. )
  359. {
  360. PPOOL_HEADER PreviousEntry;
  361. PPOOL_HEADER NextEntry;
  362. if (Entry->PreviousSize != 0) {
  363. PreviousEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry - Entry->PreviousSize);
  364. if (PAGE_ALIGN (Entry) != PAGE_ALIGN (PreviousEntry)) {
  365. KeBugCheckEx (BAD_POOL_HEADER,
  366. 6,
  367. (ULONG_PTR) PreviousEntry,
  368. __LINE__,
  369. (ULONG_PTR)Entry);
  370. }
  371. if ((PreviousEntry->BlockSize != Entry->PreviousSize) ||
  372. (DECODE_POOL_INDEX(PreviousEntry) != DECODE_POOL_INDEX(Entry))) {
  373. KeBugCheckEx (BAD_POOL_HEADER,
  374. 5,
  375. (ULONG_PTR) PreviousEntry,
  376. __LINE__,
  377. (ULONG_PTR)Entry);
  378. }
  379. }
  380. else if (!PAGE_ALIGNED (Entry)) {
  381. KeBugCheckEx (BAD_POOL_HEADER,
  382. 7,
  383. 0,
  384. __LINE__,
  385. (ULONG_PTR)Entry);
  386. }
  387. if (Entry->BlockSize == 0) {
  388. KeBugCheckEx (BAD_POOL_HEADER,
  389. 8,
  390. 0,
  391. __LINE__,
  392. (ULONG_PTR)Entry);
  393. }
  394. NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + Entry->BlockSize);
  395. if (!PAGE_END(NextEntry)) {
  396. if (PAGE_ALIGN (Entry) != PAGE_ALIGN (NextEntry)) {
  397. KeBugCheckEx (BAD_POOL_HEADER,
  398. 9,
  399. (ULONG_PTR) NextEntry,
  400. __LINE__,
  401. (ULONG_PTR)Entry);
  402. }
  403. if ((NextEntry->PreviousSize != (Entry)->BlockSize) ||
  404. (DECODE_POOL_INDEX(NextEntry) != DECODE_POOL_INDEX(Entry))) {
  405. KeBugCheckEx (BAD_POOL_HEADER,
  406. 5,
  407. (ULONG_PTR) NextEntry,
  408. __LINE__,
  409. (ULONG_PTR)Entry);
  410. }
  411. }
  412. }
  413. #define CHECK_POOL_HEADER(ENTRY) ExCheckPoolHeader(ENTRY)
  414. #define ASSERT_ALLOCATE_IRQL(_PoolType, _NumberOfBytes) \
  415. if ((_PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { \
  416. if (KeGetCurrentIrql() > APC_LEVEL) { \
  417. KeBugCheckEx (BAD_POOL_CALLER, 8, KeGetCurrentIrql(), _PoolType, _NumberOfBytes); \
  418. } \
  419. } \
  420. else { \
  421. if (KeGetCurrentIrql() > DISPATCH_LEVEL) { \
  422. KeBugCheckEx (BAD_POOL_CALLER, 8, KeGetCurrentIrql(), _PoolType, _NumberOfBytes); \
  423. } \
  424. }
  425. #define ASSERT_FREE_IRQL(_PoolType, _P) \
  426. if ((_PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { \
  427. if (KeGetCurrentIrql() > APC_LEVEL) { \
  428. KeBugCheckEx (BAD_POOL_CALLER, 9, KeGetCurrentIrql(), _PoolType, (ULONG_PTR)_P); \
  429. } \
  430. } \
  431. else { \
  432. if (KeGetCurrentIrql() > DISPATCH_LEVEL) { \
  433. KeBugCheckEx (BAD_POOL_CALLER, 9, KeGetCurrentIrql(), _PoolType, (ULONG_PTR)P); \
  434. } \
  435. }
  436. #define ASSERT_POOL_NOT_FREE(_Entry) \
  437. if ((_Entry->PoolType & POOL_TYPE_MASK) == 0) { \
  438. KeBugCheckEx (BAD_POOL_CALLER, 6, __LINE__, (ULONG_PTR)_Entry, _Entry->Ulong1); \
  439. }
  440. #define ASSERT_POOL_TYPE_NOT_ZERO(_Entry) \
  441. if (_Entry->PoolType == 0) { \
  442. KeBugCheckEx(BAD_POOL_CALLER, 1, (ULONG_PTR)_Entry, (ULONG_PTR)(*(PULONG)_Entry), 0); \
  443. }
  444. #define CHECK_POOL_PAGE(PAGE) \
  445. { \
  446. PPOOL_HEADER P = (PPOOL_HEADER)PAGE_ALIGN(PAGE); \
  447. ULONG SIZE = 0; \
  448. LOGICAL FOUND=FALSE; \
  449. ASSERT (P->PreviousSize == 0); \
  450. do { \
  451. if (P == (PPOOL_HEADER)PAGE) { \
  452. FOUND = TRUE; \
  453. } \
  454. CHECK_POOL_HEADER(P); \
  455. SIZE += P->BlockSize; \
  456. P = (PPOOL_HEADER)((PPOOL_BLOCK)P + P->BlockSize); \
  457. } while ((SIZE < (PAGE_SIZE / POOL_SMALLEST_BLOCK)) && \
  458. (PAGE_END(P) == FALSE)); \
  459. if ((PAGE_END(P) == FALSE) || (FOUND == FALSE)) { \
  460. KeBugCheckEx (BAD_POOL_HEADER, 0xA, (ULONG_PTR) PAGE, __LINE__, (ULONG_PTR) P); \
  461. } \
  462. }
  463. #else
  464. #define DecodeLink(Link) ((PLIST_ENTRY)((ULONG_PTR)(Link)))
  465. #define EncodeLink(Link) ((PLIST_ENTRY)((ULONG_PTR)(Link)))
  466. #define PrivateInitializeListHead InitializeListHead
  467. #define PrivateIsListEmpty IsListEmpty
  468. #define PrivateRemoveHeadList RemoveHeadList
  469. #define PrivateRemoveTailList RemoveTailList
  470. #define PrivateRemoveEntryList RemoveEntryList
  471. #define PrivateInsertTailList InsertTailList
  472. #define PrivateInsertHeadList InsertHeadList
  473. #define ASSERT_ALLOCATE_IRQL(_PoolType, _P) {NOTHING;}
  474. #define ASSERT_FREE_IRQL(_PoolType, _P) {NOTHING;}
  475. #define ASSERT_POOL_NOT_FREE(_Entry) {NOTHING;}
  476. #define ASSERT_POOL_TYPE_NOT_ZERO(_Entry) {NOTHING;}
  477. //
  478. // The check list macros come in two flavors - there is one in the checked
  479. // and free build that will bugcheck the system if a list is ill-formed, and
  480. // there is one for the final shipping version that has all the checked
  481. // disabled.
  482. //
  483. // The check lookaside list macros also comes in two flavors and is used to
  484. // verify that the look aside lists are well formed.
  485. //
  486. // The check pool header macro (two flavors) verifies that the specified
  487. // pool header matches the preceeding and succeeding pool headers.
  488. //
  489. #define CHECK_LIST(LIST) {NOTHING;}
  490. #define CHECK_POOL_HEADER(ENTRY) {NOTHING;}
  491. #define CHECK_POOL_PAGE(PAGE) {NOTHING;}
  492. #endif
  493. #define EX_FREE_POOL_BACKTRACE_LENGTH 8
  494. typedef struct _EX_FREE_POOL_TRACES {
  495. PETHREAD Thread;
  496. PVOID PoolAddress;
  497. POOL_HEADER PoolHeader;
  498. PVOID StackTrace [EX_FREE_POOL_BACKTRACE_LENGTH];
  499. } EX_FREE_POOL_TRACES, *PEX_FREE_POOL_TRACES;
  500. LONG ExFreePoolIndex;
  501. LONG ExFreePoolMask = 0x4000 - 1;
  502. PEX_FREE_POOL_TRACES ExFreePoolTraces;
  503. VOID
  504. ExInitializePoolDescriptor (
  505. IN PPOOL_DESCRIPTOR PoolDescriptor,
  506. IN POOL_TYPE PoolType,
  507. IN ULONG PoolIndex,
  508. IN ULONG Threshold,
  509. IN PVOID PoolLock
  510. )
  511. /*++
  512. Routine Description:
  513. This function initializes a pool descriptor.
  514. Note that this routine is called directly by the memory manager.
  515. Arguments:
  516. PoolDescriptor - Supplies a pointer to the pool descriptor.
  517. PoolType - Supplies the type of the pool.
  518. PoolIndex - Supplies the pool descriptor index.
  519. Threshold - Supplies the threshold value for the specified pool.
  520. PoolLock - Supplies a pointer to the lock for the specified pool.
  521. Return Value:
  522. None.
  523. --*/
  524. {
  525. PLIST_ENTRY ListEntry;
  526. PLIST_ENTRY LastListEntry;
  527. PPOOL_TRACKER_BIG_PAGES p;
  528. PPOOL_TRACKER_BIG_PAGES pend;
  529. //
  530. // Initialize statistics fields, the pool type, the threshold value,
  531. // and the lock address.
  532. //
  533. PoolDescriptor->PoolType = PoolType;
  534. PoolDescriptor->PoolIndex = PoolIndex;
  535. PoolDescriptor->RunningAllocs = 0;
  536. PoolDescriptor->RunningDeAllocs = 0;
  537. PoolDescriptor->TotalPages = 0;
  538. PoolDescriptor->TotalBytes = 0;
  539. PoolDescriptor->TotalBigPages = 0;
  540. PoolDescriptor->Threshold = Threshold;
  541. PoolDescriptor->LockAddress = PoolLock;
  542. PoolDescriptor->PendingFrees = NULL;
  543. PoolDescriptor->PendingFreeDepth = 0;
  544. //
  545. // Initialize the allocation listheads.
  546. //
  547. ListEntry = PoolDescriptor->ListHeads;
  548. LastListEntry = ListEntry + POOL_LIST_HEADS;
  549. while (ListEntry < LastListEntry) {
  550. PrivateInitializeListHead (ListEntry);
  551. ListEntry += 1;
  552. }
  553. if (PoolType == PagedPoolSession) {
  554. if (ExpSessionPoolDescriptor == NULL) {
  555. ExpSessionPoolDescriptor = (PPOOL_DESCRIPTOR) MiSessionPoolVector ();
  556. ExpSessionPoolLookaside = MiSessionPoolLookaside ();
  557. ExpSessionPoolTrackTable = (PPOOL_TRACKER_TABLE) MiSessionPoolTrackTable ();
  558. ExpSessionPoolTrackTableSize = MiSessionPoolTrackTableSize ();
  559. ExpSessionPoolTrackTableMask = ExpSessionPoolTrackTableSize - 1;
  560. ExpSessionPoolBigPageTable = (PPOOL_TRACKER_BIG_PAGES) MiSessionPoolBigPageTable ();
  561. ExpSessionPoolBigPageTableSize = MiSessionPoolBigPageTableSize ();
  562. ExpSessionPoolBigPageTableHash = ExpSessionPoolBigPageTableSize - 1;
  563. ExpSessionPoolSmallLists = MiSessionPoolSmallLists ();
  564. }
  565. p = &ExpSessionPoolBigPageTable[0];
  566. pend = p + ExpSessionPoolBigPageTableSize;
  567. while (p < pend) {
  568. p->Va = (PVOID) POOL_BIG_TABLE_ENTRY_FREE;
  569. p += 1;
  570. }
  571. }
  572. return;
  573. }
  574. PVOID
  575. ExpDummyAllocate (
  576. IN POOL_TYPE PoolType,
  577. IN SIZE_T NumberOfBytes,
  578. IN ULONG Tag
  579. );
  580. VOID
  581. ExDrainPoolLookasideList (
  582. IN PPAGED_LOOKASIDE_LIST Lookaside
  583. )
  584. /*++
  585. Routine Description:
  586. This function drains the entries from the specified lookaside list.
  587. This is needed before deleting a pool lookaside list because the
  588. entries on the lookaside are already marked as free (by ExFreePoolWithTag)
  589. and so the normal lookaside deletion macros would hit false double free
  590. bugchecks if the list is not empty when the macros are called.
  591. Arguments:
  592. Lookaside - Supplies a pointer to a lookaside list structure.
  593. Return Value:
  594. None.
  595. --*/
  596. {
  597. PVOID Entry;
  598. PPOOL_HEADER PoolHeader;
  599. //
  600. // Remove all pool entries from the specified lookaside structure,
  601. // mark them as active, then free them.
  602. //
  603. Lookaside->L.Allocate = ExpDummyAllocate;
  604. while ((Entry = ExAllocateFromPagedLookasideList(Lookaside)) != NULL) {
  605. PoolHeader = (PPOOL_HEADER)Entry - 1;
  606. PoolHeader->PoolType = (USHORT)(Lookaside->L.Type + 1);
  607. PoolHeader->PoolType |= POOL_IN_USE_MASK;
  608. ExpInsertPoolTracker (PoolHeader->PoolTag,
  609. PoolHeader->BlockSize << POOL_BLOCK_SHIFT,
  610. Lookaside->L.Type);
  611. //
  612. // Set the depth to zero every time as a periodic scan may set it
  613. // nonzero. This isn't worth interlocking as the list will absolutely
  614. // deplete regardless in this fashion anyway.
  615. //
  616. Lookaside->L.Depth = 0;
  617. (Lookaside->L.Free)(Entry);
  618. }
  619. return;
  620. }
  621. //
  622. // FREE_CHECK_ERESOURCE - If enabled causes each free pool to verify
  623. // no active ERESOURCEs are in the pool block being freed.
  624. //
  625. // FREE_CHECK_KTIMER - If enabled causes each free pool to verify no
  626. // active KTIMERs are in the pool block being freed.
  627. //
  628. //
  629. // Checking for resources in pool being freed is expensive as there can
  630. // easily be thousands of resources, so don't do it by default but do
  631. // leave the capability for individual systems to enable it.
  632. //
  633. //
  634. // Runtime modifications to these flags must use interlocked sequences.
  635. //
  636. #if DBG && !defined(_AMD64_SIMULATOR_PERF_)
  637. ULONG ExpPoolFlags = EX_CHECK_POOL_FREES_FOR_ACTIVE_TIMERS | \
  638. EX_CHECK_POOL_FREES_FOR_ACTIVE_WORKERS;
  639. #else
  640. ULONG ExpPoolFlags = 0;
  641. #endif
  642. #define FREE_CHECK_ERESOURCE(Va, NumberOfBytes) \
  643. if (ExpPoolFlags & EX_CHECK_POOL_FREES_FOR_ACTIVE_RESOURCES) { \
  644. ExpCheckForResource(Va, NumberOfBytes); \
  645. }
  646. #define FREE_CHECK_KTIMER(Va, NumberOfBytes) \
  647. if (ExpPoolFlags & EX_CHECK_POOL_FREES_FOR_ACTIVE_TIMERS) { \
  648. KeCheckForTimer(Va, NumberOfBytes); \
  649. }
  650. #define FREE_CHECK_WORKER(Va, NumberOfBytes) \
  651. if (ExpPoolFlags & EX_CHECK_POOL_FREES_FOR_ACTIVE_WORKERS) { \
  652. ExpCheckForWorker(Va, NumberOfBytes); \
  653. }
  654. VOID
  655. ExSetPoolFlags (
  656. IN ULONG PoolFlag
  657. )
  658. /*++
  659. Routine Description:
  660. This procedure enables the specified pool flag(s).
  661. Arguments:
  662. PoolFlag - Supplies the pool flag(s) to enable.
  663. Return Value:
  664. None.
  665. --*/
  666. {
  667. RtlInterlockedSetBits (&ExpPoolFlags, PoolFlag);
  668. }
  669. VOID
  670. InitializePool (
  671. IN POOL_TYPE PoolType,
  672. IN ULONG Threshold
  673. )
  674. /*++
  675. Routine Description:
  676. This procedure initializes a pool descriptor for the specified pool
  677. type. Once initialized, the pool may be used for allocation and
  678. deallocation.
  679. This function should be called once for each base pool type during
  680. system initialization.
  681. Each pool descriptor contains an array of list heads for free
  682. blocks. Each list head holds blocks which are a multiple of
  683. the POOL_BLOCK_SIZE. The first element on the list [0] links
  684. together free entries of size POOL_BLOCK_SIZE, the second element
  685. [1] links together entries of POOL_BLOCK_SIZE * 2, the third
  686. POOL_BLOCK_SIZE * 3, etc, up to the number of blocks which fit
  687. into a page.
  688. Arguments:
  689. PoolType - Supplies the type of pool being initialized (e.g.
  690. nonpaged pool, paged pool...).
  691. Threshold - Supplies the threshold value for the specified pool.
  692. Return Value:
  693. None.
  694. --*/
  695. {
  696. ULONG i;
  697. PKSPIN_LOCK SpinLock;
  698. PPOOL_TRACKER_BIG_PAGES p;
  699. PPOOL_DESCRIPTOR Descriptor;
  700. ULONG Index;
  701. PKGUARDED_MUTEX GuardedMutex;
  702. SIZE_T NumberOfBytes;
  703. ASSERT((PoolType & MUST_SUCCEED_POOL_TYPE_MASK) == 0);
  704. if (PoolType == NonPagedPool) {
  705. //
  706. // Initialize nonpaged pools.
  707. //
  708. // Ensure PoolTrackTableSize is a power of 2, then add 1 to it.
  709. //
  710. // Ensure PoolBigPageTableSize is a power of 2.
  711. //
  712. NumberOfBytes = PoolTrackTableSize;
  713. if (NumberOfBytes > MmSizeOfNonPagedPoolInBytes >> 8) {
  714. NumberOfBytes = MmSizeOfNonPagedPoolInBytes >> 8;
  715. }
  716. for (i = 0; i < 32; i += 1) {
  717. if (NumberOfBytes & 0x1) {
  718. ASSERT ((NumberOfBytes & ~0x1) == 0);
  719. if ((NumberOfBytes & ~0x1) == 0) {
  720. break;
  721. }
  722. }
  723. NumberOfBytes >>= 1;
  724. }
  725. if (i == 32) {
  726. PoolTrackTableSize = DEFAULT_TRACKER_TABLE;
  727. }
  728. else {
  729. PoolTrackTableSize = 1 << i;
  730. if (PoolTrackTableSize < 64) {
  731. PoolTrackTableSize = 64;
  732. }
  733. }
  734. do {
  735. if (PoolTrackTableSize + 1 > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE))) {
  736. PoolTrackTableSize >>= 1;
  737. continue;
  738. }
  739. PoolTrackTable = MiAllocatePoolPages (NonPagedPool,
  740. (PoolTrackTableSize + 1) *
  741. sizeof(POOL_TRACKER_TABLE));
  742. if (PoolTrackTable != NULL) {
  743. break;
  744. }
  745. if (PoolTrackTableSize == 1) {
  746. KeBugCheckEx (MUST_SUCCEED_POOL_EMPTY,
  747. NumberOfBytes,
  748. (ULONG_PTR)-1,
  749. (ULONG_PTR)-1,
  750. (ULONG_PTR)-1);
  751. }
  752. PoolTrackTableSize >>= 1;
  753. } while (TRUE);
  754. PoolTrackTableSize += 1;
  755. PoolTrackTableMask = PoolTrackTableSize - 2;
  756. #if !defined (NT_UP)
  757. ExPoolTagTables[0] = PoolTrackTable;
  758. #endif
  759. RtlZeroMemory (PoolTrackTable,
  760. PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
  761. ExpSeedHotTags ();
  762. //
  763. // Initialize the large allocation tag table.
  764. //
  765. NumberOfBytes = PoolBigPageTableSize;
  766. if (NumberOfBytes > MmSizeOfNonPagedPoolInBytes >> 8) {
  767. NumberOfBytes = MmSizeOfNonPagedPoolInBytes >> 8;
  768. }
  769. for (i = 0; i < 32; i += 1) {
  770. if (NumberOfBytes & 0x1) {
  771. ASSERT ((NumberOfBytes & ~0x1) == 0);
  772. if ((NumberOfBytes & ~0x1) == 0) {
  773. break;
  774. }
  775. }
  776. NumberOfBytes >>= 1;
  777. }
  778. if (i == 32) {
  779. PoolBigPageTableSize = DEFAULT_BIGPAGE_TABLE;
  780. }
  781. else {
  782. PoolBigPageTableSize = 1 << i;
  783. if (PoolBigPageTableSize < 64) {
  784. PoolBigPageTableSize = 64;
  785. }
  786. }
  787. do {
  788. if (PoolBigPageTableSize > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES))) {
  789. PoolBigPageTableSize >>= 1;
  790. continue;
  791. }
  792. PoolBigPageTable = MiAllocatePoolPages (NonPagedPool,
  793. PoolBigPageTableSize *
  794. sizeof(POOL_TRACKER_BIG_PAGES));
  795. if (PoolBigPageTable != NULL) {
  796. break;
  797. }
  798. if (PoolBigPageTableSize == 1) {
  799. KeBugCheckEx (MUST_SUCCEED_POOL_EMPTY,
  800. NumberOfBytes,
  801. (ULONG_PTR)-1,
  802. (ULONG_PTR)-1,
  803. (ULONG_PTR)-1);
  804. }
  805. PoolBigPageTableSize >>= 1;
  806. } while (TRUE);
  807. PoolBigPageTableHash = PoolBigPageTableSize - 1;
  808. RtlZeroMemory (PoolBigPageTable,
  809. PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
  810. p = &PoolBigPageTable[0];
  811. for (i = 0; i < PoolBigPageTableSize; i += 1, p += 1) {
  812. p->Va = (PVOID) POOL_BIG_TABLE_ENTRY_FREE;
  813. }
  814. ExpInsertPoolTracker ('looP',
  815. ROUND_TO_PAGES(PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES)),
  816. NonPagedPool);
  817. if (KeNumberNodes > 1) {
  818. ExpNumberOfNonPagedPools = KeNumberNodes;
  819. //
  820. // Limit the number of pools to the number of bits in the PoolIndex.
  821. //
  822. if (ExpNumberOfNonPagedPools > 127) {
  823. ExpNumberOfNonPagedPools = 127;
  824. }
  825. //
  826. // Further limit the number of pools by our array of pointers.
  827. //
  828. if (ExpNumberOfNonPagedPools > EXP_MAXIMUM_POOL_NODES) {
  829. ExpNumberOfNonPagedPools = EXP_MAXIMUM_POOL_NODES;
  830. }
  831. NumberOfBytes = sizeof(POOL_DESCRIPTOR) + sizeof(KLOCK_QUEUE_HANDLE);
  832. for (Index = 0; Index < ExpNumberOfNonPagedPools; Index += 1) {
  833. //
  834. // Here's a thorny problem. We'd like to use
  835. // MmAllocateIndependentPages but can't because we'd need
  836. // system PTEs to map the pages with and PTEs are not
  837. // available until nonpaged pool exists. So just use
  838. // regular pool pages to hold the descriptors and spinlocks
  839. // and hope they either a) happen to fall onto the right node
  840. // or b) that these lines live in the local processor cache
  841. // all the time anyway due to frequent usage.
  842. //
  843. Descriptor = (PPOOL_DESCRIPTOR) MiAllocatePoolPages (
  844. NonPagedPool,
  845. NumberOfBytes);
  846. if (Descriptor == NULL) {
  847. KeBugCheckEx (MUST_SUCCEED_POOL_EMPTY,
  848. NumberOfBytes,
  849. (ULONG_PTR)-1,
  850. (ULONG_PTR)-1,
  851. (ULONG_PTR)-1);
  852. }
  853. ExpNonPagedPoolDescriptor[Index] = Descriptor;
  854. SpinLock = (PKSPIN_LOCK)(Descriptor + 1);
  855. KeInitializeSpinLock (SpinLock);
  856. ExInitializePoolDescriptor (Descriptor,
  857. NonPagedPool,
  858. Index,
  859. Threshold,
  860. (PVOID)SpinLock);
  861. }
  862. }
  863. //
  864. // Initialize the spinlocks for nonpaged pool.
  865. //
  866. KeInitializeSpinLock (&ExpTaggedPoolLock);
  867. //
  868. // Initialize the nonpaged pool descriptor.
  869. //
  870. PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
  871. ExInitializePoolDescriptor (&NonPagedPoolDescriptor,
  872. NonPagedPool,
  873. 0,
  874. Threshold,
  875. NULL);
  876. }
  877. else {
  878. //
  879. // Allocate memory for the paged pool descriptors and fast mutexes.
  880. //
  881. if (KeNumberNodes > 1) {
  882. ExpNumberOfPagedPools = KeNumberNodes;
  883. //
  884. // Limit the number of pools to the number of bits in the PoolIndex.
  885. //
  886. if (ExpNumberOfPagedPools > 127) {
  887. ExpNumberOfPagedPools = 127;
  888. }
  889. }
  890. //
  891. // Further limit the number of pools by our array of pointers.
  892. //
  893. if (ExpNumberOfPagedPools > EXP_MAXIMUM_POOL_NODES) {
  894. ExpNumberOfPagedPools = EXP_MAXIMUM_POOL_NODES;
  895. }
  896. //
  897. // For NUMA systems, allocate both the pool descriptor and the
  898. // associated lock from the local node for performance (even though
  899. // it costs a little more memory).
  900. //
  901. // For non-NUMA systems, allocate everything together in one chunk
  902. // to reduce memory consumption as there is no performance cost
  903. // for doing it this way.
  904. //
  905. if (KeNumberNodes > 1) {
  906. NumberOfBytes = sizeof(KGUARDED_MUTEX) + sizeof(POOL_DESCRIPTOR);
  907. for (Index = 0; Index < ExpNumberOfPagedPools + 1; Index += 1) {
  908. ULONG Node;
  909. if (Index == 0) {
  910. Node = 0;
  911. }
  912. else {
  913. Node = Index - 1;
  914. }
  915. Descriptor = (PPOOL_DESCRIPTOR) MmAllocateIndependentPages (
  916. NumberOfBytes,
  917. Node);
  918. if (Descriptor == NULL) {
  919. KeBugCheckEx (MUST_SUCCEED_POOL_EMPTY,
  920. NumberOfBytes,
  921. (ULONG_PTR)-1,
  922. (ULONG_PTR)-1,
  923. (ULONG_PTR)-1);
  924. }
  925. ExpPagedPoolDescriptor[Index] = Descriptor;
  926. GuardedMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
  927. if (Index == 0) {
  928. PoolVector[PagedPool] = Descriptor;
  929. ExpPagedPoolMutex = GuardedMutex;
  930. }
  931. KeInitializeGuardedMutex (GuardedMutex);
  932. ExInitializePoolDescriptor (Descriptor,
  933. PagedPool,
  934. Index,
  935. Threshold,
  936. (PVOID) GuardedMutex);
  937. }
  938. }
  939. else {
  940. NumberOfBytes = (ExpNumberOfPagedPools + 1) * (sizeof(KGUARDED_MUTEX) + sizeof(POOL_DESCRIPTOR));
  941. Descriptor = (PPOOL_DESCRIPTOR)ExAllocatePoolWithTag (NonPagedPool,
  942. NumberOfBytes,
  943. 'looP');
  944. if (Descriptor == NULL) {
  945. KeBugCheckEx (MUST_SUCCEED_POOL_EMPTY,
  946. NumberOfBytes,
  947. (ULONG_PTR)-1,
  948. (ULONG_PTR)-1,
  949. (ULONG_PTR)-1);
  950. }
  951. GuardedMutex = (PKGUARDED_MUTEX)(Descriptor + ExpNumberOfPagedPools + 1);
  952. PoolVector[PagedPool] = Descriptor;
  953. ExpPagedPoolMutex = GuardedMutex;
  954. for (Index = 0; Index < ExpNumberOfPagedPools + 1; Index += 1) {
  955. KeInitializeGuardedMutex (GuardedMutex);
  956. ExpPagedPoolDescriptor[Index] = Descriptor;
  957. ExInitializePoolDescriptor (Descriptor,
  958. PagedPool,
  959. Index,
  960. Threshold,
  961. (PVOID) GuardedMutex);
  962. Descriptor += 1;
  963. GuardedMutex += 1;
  964. }
  965. }
  966. ExpInsertPoolTracker('looP',
  967. ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
  968. NonPagedPool);
  969. #if defined (NT_UP)
  970. if (MmNumberOfPhysicalPages < 32 * 1024) {
  971. LARGE_INTEGER TwoMinutes;
  972. //
  973. // Set the flag to disable lookasides and use hot/cold page
  974. // separation during bootup.
  975. //
  976. ExSetPoolFlags (EX_SEPARATE_HOT_PAGES_DURING_BOOT);
  977. //
  978. // Start a timer so the above behavior is disabled once bootup
  979. // has finished.
  980. //
  981. KeInitializeTimer (&ExpBootFinishedTimer);
  982. KeInitializeDpc (&ExpBootFinishedTimerDpc,
  983. (PKDEFERRED_ROUTINE) ExpBootFinishedDispatch,
  984. NULL);
  985. TwoMinutes.QuadPart = Int32x32To64 (120, -10000000);
  986. KeSetTimer (&ExpBootFinishedTimer,
  987. TwoMinutes,
  988. &ExpBootFinishedTimerDpc);
  989. }
  990. #endif
  991. if ((MmNumberOfPhysicalPages >= 127 * 1024) &&
  992. ((ExpPoolFlags & EX_SPECIAL_POOL_ENABLED) == 0) &&
  993. (!NT_SUCCESS (MmIsVerifierEnabled (&i)))) {
  994. ExSetPoolFlags (EX_DELAY_POOL_FREES);
  995. }
  996. if ((ExpPoolFlags & EX_SPECIAL_POOL_ENABLED) ||
  997. (NT_SUCCESS (MmIsVerifierEnabled (&i)))) {
  998. #if DBG
  999. //
  1000. // Ensure ExFreePoolMask is a power of 2 minus 1 (or zero).
  1001. //
  1002. if (ExFreePoolMask != 0) {
  1003. NumberOfBytes = ExFreePoolMask + 1;
  1004. ASSERT (NumberOfBytes != 0);
  1005. for (i = 0; i < 32; i += 1) {
  1006. if (NumberOfBytes & 0x1) {
  1007. ASSERT ((NumberOfBytes & ~0x1) == 0);
  1008. break;
  1009. }
  1010. NumberOfBytes >>= 1;
  1011. }
  1012. }
  1013. #endif
  1014. ExFreePoolTraces = MiAllocatePoolPages (NonPagedPool,
  1015. (ExFreePoolMask + 1) *
  1016. sizeof (EX_FREE_POOL_TRACES));
  1017. if (ExFreePoolTraces != NULL) {
  1018. RtlZeroMemory (ExFreePoolTraces,
  1019. (ExFreePoolMask + 1) * sizeof (EX_FREE_POOL_TRACES));
  1020. }
  1021. }
  1022. }
  1023. }
  1024. #if DBG
  1025. ULONG ExStopBadTags;
  1026. #endif
  1027. __forceinline
  1028. VOID
  1029. ExpInsertPoolTrackerInline (
  1030. IN ULONG Key,
  1031. IN SIZE_T NumberOfBytes,
  1032. IN POOL_TYPE PoolType
  1033. )
  1034. /*++
  1035. Routine Description:
  1036. This function inserts a pool tag in the tag table, increments the
  1037. number of allocates and updates the total allocation size.
  1038. Arguments:
  1039. Key - Supplies the key value used to locate a matching entry in the
  1040. tag table.
  1041. NumberOfBytes - Supplies the allocation size.
  1042. PoolType - Supplies the pool type.
  1043. Return Value:
  1044. None.
  1045. Environment:
  1046. No pool locks held except during the rare case of expansion table growth.
  1047. so pool may be freely allocated here as needed. In expansion table growth,
  1048. the tagged spinlock is held on entry, but we are guaranteed to find an
  1049. entry in the builtin table so a recursive acquire cannot occur.
  1050. --*/
  1051. {
  1052. ULONG Hash;
  1053. ULONG Index;
  1054. LONG OriginalKey;
  1055. KIRQL OldIrql;
  1056. PPOOL_TRACKER_TABLE TrackTable;
  1057. PPOOL_TRACKER_TABLE TrackTableEntry;
  1058. SIZE_T TrackTableMask;
  1059. SIZE_T TrackTableSize;
  1060. #if !defined (NT_UP)
  1061. ULONG Processor;
  1062. #endif
  1063. //
  1064. // Strip the protected pool bit.
  1065. //
  1066. Key &= ~PROTECTED_POOL;
  1067. if (Key == PoolHitTag) {
  1068. DbgBreakPoint();
  1069. }
  1070. #if DBG
  1071. if (ExStopBadTags) {
  1072. ASSERT (Key & 0xFFFFFF00);
  1073. }
  1074. #endif
  1075. //
  1076. // Compute the hash index and search (lock-free) for the pool tag
  1077. // in the builtin table.
  1078. //
  1079. if (PoolType & SESSION_POOL_MASK) {
  1080. TrackTable = ExpSessionPoolTrackTable;
  1081. TrackTableMask = ExpSessionPoolTrackTableMask;
  1082. TrackTableSize = ExpSessionPoolTrackTableSize;
  1083. }
  1084. else {
  1085. #if !defined (NT_UP)
  1086. //
  1087. // Use the current processor to pick a pool tag table to use. Note that
  1088. // in rare cases, this thread may context switch to another processor
  1089. // but the algorithms below will still be correct.
  1090. //
  1091. Processor = KeGetCurrentProcessorNumber ();
  1092. ASSERT (Processor < MAXIMUM_PROCESSOR_TAG_TABLES);
  1093. TrackTable = ExPoolTagTables[Processor];
  1094. #else
  1095. TrackTable = PoolTrackTable;
  1096. #endif
  1097. TrackTableMask = PoolTrackTableMask;
  1098. TrackTableSize = PoolTrackTableSize;
  1099. }
  1100. Hash = POOLTAG_HASH (Key, TrackTableMask);
  1101. Index = Hash;
  1102. do {
  1103. TrackTableEntry = &TrackTable[Hash];
  1104. if (TrackTableEntry->Key == Key) {
  1105. //
  1106. // Update the fields with interlocked operations as other
  1107. // threads may also have begun doing so by this point.
  1108. //
  1109. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  1110. InterlockedIncrement ((PLONG) &TrackTableEntry->PagedAllocs);
  1111. InterlockedExchangeAddSizeT (&TrackTableEntry->PagedBytes,
  1112. NumberOfBytes);
  1113. }
  1114. else {
  1115. InterlockedIncrement ((PLONG) &TrackTableEntry->NonPagedAllocs);
  1116. InterlockedExchangeAddSizeT (&TrackTableEntry->NonPagedBytes,
  1117. NumberOfBytes);
  1118. }
  1119. return;
  1120. }
  1121. if (TrackTableEntry->Key == 0) {
  1122. if (PoolType & SESSION_POOL_MASK) {
  1123. if (Hash == TrackTableSize - 1) {
  1124. Hash = 0;
  1125. if (Hash == Index) {
  1126. break;
  1127. }
  1128. }
  1129. else {
  1130. OriginalKey = InterlockedCompareExchange ((PLONG)&TrackTable[Hash].Key,
  1131. (LONG)Key,
  1132. 0);
  1133. }
  1134. //
  1135. // Either this thread has won the race and the requested tag
  1136. // is now in or some other thread won the race and took this
  1137. // slot (using this tag or a different one).
  1138. //
  1139. // Just fall through to common checks starting at this slot
  1140. // for both cases.
  1141. //
  1142. continue;
  1143. }
  1144. #if !defined (NT_UP)
  1145. if (PoolTrackTable[Hash].Key != 0) {
  1146. TrackTableEntry->Key = PoolTrackTable[Hash].Key;
  1147. continue;
  1148. }
  1149. #endif
  1150. if (Hash != PoolTrackTableSize - 1) {
  1151. //
  1152. // New entries cannot be created with an interlocked compare
  1153. // exchange because any new entry must reside at the same index
  1154. // in each processor's private PoolTrackTable. This is to make
  1155. // ExGetPoolTagInfo statistics gathering much simpler (faster).
  1156. //
  1157. ExAcquireSpinLock (&ExpTaggedPoolLock, &OldIrql);
  1158. if (PoolTrackTable[Hash].Key == 0) {
  1159. ASSERT (TrackTable[Hash].Key == 0);
  1160. PoolTrackTable[Hash].Key = Key;
  1161. TrackTableEntry->Key = Key;
  1162. }
  1163. ExReleaseSpinLock (&ExpTaggedPoolLock, OldIrql);
  1164. //
  1165. // Either this thread has won the race and the requested tag
  1166. // is now in or some other thread won the race and took this
  1167. // slot (using this tag or a different one).
  1168. //
  1169. // Just fall through to common checks starting at this slot
  1170. // for both cases.
  1171. //
  1172. continue;
  1173. }
  1174. }
  1175. Hash = (Hash + 1) & (ULONG)TrackTableMask;
  1176. if (Hash == Index) {
  1177. break;
  1178. }
  1179. } while (TRUE);
  1180. //
  1181. // No matching entry and no free entry was found.
  1182. //
  1183. // Use the expansion table instead.
  1184. //
  1185. ExpInsertPoolTrackerExpansion (Key, NumberOfBytes, PoolType);
  1186. }
  1187. __forceinline
  1188. VOID
  1189. ExpRemovePoolTrackerInline (
  1190. IN ULONG Key,
  1191. IN SIZE_T NumberOfBytes,
  1192. IN POOL_TYPE PoolType
  1193. )
  1194. /*++
  1195. Routine Description:
  1196. This function increments the number of frees and updates the total
  1197. allocation size.
  1198. Arguments:
  1199. Key - Supplies the key value used to locate a matching entry in the
  1200. tag table.
  1201. NumberOfBytes - Supplies the allocation size.
  1202. PoolType - Supplies the pool type.
  1203. Return Value:
  1204. None.
  1205. --*/
  1206. {
  1207. ULONG Hash;
  1208. ULONG Index;
  1209. PPOOL_TRACKER_TABLE TrackTable;
  1210. PPOOL_TRACKER_TABLE TrackTableEntry;
  1211. SIZE_T TrackTableMask;
  1212. SIZE_T TrackTableSize;
  1213. #if !defined (NT_UP)
  1214. ULONG Processor;
  1215. #endif
  1216. //
  1217. // Strip protected pool bit.
  1218. //
  1219. Key &= ~PROTECTED_POOL;
  1220. if (Key == PoolHitTag) {
  1221. DbgBreakPoint ();
  1222. }
  1223. //
  1224. // Compute the hash index and search (lock-free) for the pool tag
  1225. // in the builtin table.
  1226. //
  1227. if (PoolType & SESSION_POOL_MASK) {
  1228. TrackTable = ExpSessionPoolTrackTable;
  1229. TrackTableMask = ExpSessionPoolTrackTableMask;
  1230. TrackTableSize = ExpSessionPoolTrackTableSize;
  1231. }
  1232. else {
  1233. #if !defined (NT_UP)
  1234. //
  1235. // Use the current processor to pick a pool tag table to use. Note that
  1236. // in rare cases, this thread may context switch to another processor
  1237. // but the algorithms below will still be correct.
  1238. //
  1239. Processor = KeGetCurrentProcessorNumber ();
  1240. ASSERT (Processor < MAXIMUM_PROCESSOR_TAG_TABLES);
  1241. TrackTable = ExPoolTagTables[Processor];
  1242. #else
  1243. TrackTable = PoolTrackTable;
  1244. #endif
  1245. TrackTableMask = PoolTrackTableMask;
  1246. TrackTableSize = PoolTrackTableSize;
  1247. }
  1248. Hash = POOLTAG_HASH (Key, TrackTableMask);
  1249. Index = Hash;
  1250. do {
  1251. TrackTableEntry = &TrackTable[Hash];
  1252. if (TrackTableEntry->Key == Key) {
  1253. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  1254. InterlockedIncrement ((PLONG) &TrackTableEntry->PagedFrees);
  1255. InterlockedExchangeAddSizeT (&TrackTableEntry->PagedBytes,
  1256. 0 - NumberOfBytes);
  1257. }
  1258. else {
  1259. InterlockedIncrement ((PLONG) &TrackTableEntry->NonPagedFrees);
  1260. InterlockedExchangeAddSizeT (&TrackTableEntry->NonPagedBytes,
  1261. 0 - NumberOfBytes);
  1262. }
  1263. return;
  1264. }
  1265. //
  1266. // Since each processor's table is lazy updated, handle the case
  1267. // here where this processor's table still has no entry for the tag
  1268. // being freed because the allocation happened on a different
  1269. // processor.
  1270. //
  1271. if (TrackTableEntry->Key == 0) {
  1272. #if !defined (NT_UP)
  1273. if (((PoolType & SESSION_POOL_MASK) == 0) &&
  1274. (PoolTrackTable[Hash].Key != 0)) {
  1275. TrackTableEntry->Key = PoolTrackTable[Hash].Key;
  1276. continue;
  1277. }
  1278. #endif
  1279. ASSERT (Hash == TrackTableMask);
  1280. }
  1281. Hash = (Hash + 1) & (ULONG)TrackTableMask;
  1282. if (Hash == Index) {
  1283. break;
  1284. }
  1285. } while (TRUE);
  1286. //
  1287. // No matching entry and no free entry was found.
  1288. //
  1289. // Linear search through the expansion table. This is ok because
  1290. // the existence of an expansion table at all is extremely rare.
  1291. //
  1292. ExpRemovePoolTrackerExpansion (Key, NumberOfBytes, PoolType);
  1293. }
  1294. PVOID
  1295. VeAllocatePoolWithTagPriority (
  1296. IN POOL_TYPE PoolType,
  1297. IN SIZE_T NumberOfBytes,
  1298. IN ULONG Tag,
  1299. IN EX_POOL_PRIORITY Priority,
  1300. IN PVOID CallingAddress
  1301. );
  1302. PVOID
  1303. ExAllocatePoolWithTag (
  1304. IN POOL_TYPE PoolType,
  1305. IN SIZE_T NumberOfBytes,
  1306. IN ULONG Tag
  1307. )
  1308. /*++
  1309. Routine Description:
  1310. This function allocates a block of pool of the specified type and
  1311. returns a pointer to the allocated block. This function is used to
  1312. access both the page-aligned pools and the list head entries (less
  1313. than a page) pools.
  1314. If the number of bytes specifies a size that is too large to be
  1315. satisfied by the appropriate list, then the page-aligned pool
  1316. allocator is used. The allocated block will be page-aligned and a
  1317. page-sized multiple.
  1318. Otherwise, the appropriate pool list entry is used. The allocated
  1319. block will be 64-bit aligned, but will not be page aligned. The
  1320. pool allocator calculates the smallest number of POOL_BLOCK_SIZE
  1321. that can be used to satisfy the request. If there are no blocks
  1322. available of this size, then a block of the next larger block size
  1323. is allocated and split. One piece is placed back into the pool, and
  1324. the other piece is used to satisfy the request. If the allocator
  1325. reaches the paged-sized block list, and nothing is there, the
  1326. page-aligned pool allocator is called. The page is split and added
  1327. to the pool.
  1328. Arguments:
  1329. PoolType - Supplies the type of pool to allocate. If the pool type
  1330. is one of the "MustSucceed" pool types, then this call will
  1331. succeed and return a pointer to allocated pool or bugcheck on failure.
  1332. For all other cases, if the system cannot allocate the requested amount
  1333. of memory, NULL is returned.
  1334. Valid pool types:
  1335. NonPagedPool
  1336. PagedPool
  1337. NonPagedPoolMustSucceed,
  1338. NonPagedPoolCacheAligned
  1339. PagedPoolCacheAligned
  1340. NonPagedPoolCacheAlignedMustSucceed
  1341. Tag - Supplies the caller's identifying tag.
  1342. NumberOfBytes - Supplies the number of bytes to allocate.
  1343. Return Value:
  1344. NULL - The PoolType is not one of the "MustSucceed" pool types, and
  1345. not enough pool exists to satisfy the request.
  1346. NON-NULL - Returns a pointer to the allocated pool.
  1347. --*/
  1348. {
  1349. PKGUARDED_MUTEX Lock;
  1350. PVOID Block;
  1351. PPOOL_HEADER Entry;
  1352. PGENERAL_LOOKASIDE LookasideList;
  1353. PPOOL_HEADER NextEntry;
  1354. PPOOL_HEADER SplitEntry;
  1355. KLOCK_QUEUE_HANDLE LockHandle;
  1356. PPOOL_DESCRIPTOR PoolDesc;
  1357. ULONG Index;
  1358. ULONG ListNumber;
  1359. ULONG NeededSize;
  1360. ULONG PoolIndex;
  1361. POOL_TYPE CheckType;
  1362. POOL_TYPE RequestType;
  1363. PLIST_ENTRY ListHead;
  1364. POOL_TYPE NewPoolType;
  1365. PKPRCB Prcb;
  1366. ULONG NumberOfPages;
  1367. ULONG RetryCount;
  1368. PVOID CallingAddress;
  1369. #if defined (_X86_)
  1370. PVOID CallersCaller;
  1371. #endif
  1372. #define CacheOverhead POOL_OVERHEAD
  1373. PERFINFO_EXALLOCATEPOOLWITHTAG_DECL();
  1374. ASSERT (Tag != 0);
  1375. ASSERT (Tag != ' GIB');
  1376. ASSERT (NumberOfBytes != 0);
  1377. ASSERT_ALLOCATE_IRQL (PoolType, NumberOfBytes);
  1378. if (ExpPoolFlags & (EX_KERNEL_VERIFIER_ENABLED | EX_SPECIAL_POOL_ENABLED)) {
  1379. if (ExpPoolFlags & EX_KERNEL_VERIFIER_ENABLED) {
  1380. if ((PoolType & POOL_DRIVER_MASK) == 0) {
  1381. //
  1382. // Use the Driver Verifier pool framework. Note this will
  1383. // result in a recursive callback to this routine.
  1384. //
  1385. #if defined (_X86_)
  1386. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  1387. #else
  1388. CallingAddress = (PVOID)_ReturnAddress();
  1389. #endif
  1390. return VeAllocatePoolWithTagPriority (PoolType | POOL_DRIVER_MASK,
  1391. NumberOfBytes,
  1392. Tag,
  1393. HighPoolPriority,
  1394. CallingAddress);
  1395. }
  1396. PoolType &= ~POOL_DRIVER_MASK;
  1397. }
  1398. //
  1399. // Use special pool if there is a tag or size match.
  1400. //
  1401. if ((ExpPoolFlags & EX_SPECIAL_POOL_ENABLED) &&
  1402. (MmUseSpecialPool (NumberOfBytes, Tag))) {
  1403. Entry = MmAllocateSpecialPool (NumberOfBytes,
  1404. Tag,
  1405. PoolType,
  1406. 2);
  1407. if (Entry != NULL) {
  1408. return (PVOID)Entry;
  1409. }
  1410. }
  1411. }
  1412. //
  1413. // Isolate the base pool type and select a pool from which to allocate
  1414. // the specified block size.
  1415. //
  1416. CheckType = PoolType & BASE_POOL_TYPE_MASK;
  1417. if ((PoolType & SESSION_POOL_MASK) == 0) {
  1418. PoolDesc = PoolVector[CheckType];
  1419. }
  1420. else {
  1421. PoolDesc = ExpSessionPoolDescriptor;
  1422. }
  1423. ASSERT (PoolDesc != NULL);
  1424. //
  1425. // Initializing LockHandle is not needed for correctness but without
  1426. // it the compiler cannot compile this code W4 to check for use of
  1427. // uninitialized variables.
  1428. //
  1429. LockHandle.OldIrql = 0;
  1430. //
  1431. // Check to determine if the requested block can be allocated from one
  1432. // of the pool lists or must be directly allocated from virtual memory.
  1433. //
  1434. if (NumberOfBytes > POOL_BUDDY_MAX) {
  1435. //
  1436. // The requested size is greater than the largest block maintained
  1437. // by allocation lists.
  1438. //
  1439. RequestType = (PoolType & (BASE_POOL_TYPE_MASK | SESSION_POOL_MASK | POOL_VERIFIER_MASK));
  1440. Entry = (PPOOL_HEADER) MiAllocatePoolPages (RequestType,
  1441. NumberOfBytes);
  1442. if (Entry == NULL) {
  1443. //
  1444. // If there are deferred free blocks, free them now and retry.
  1445. //
  1446. if (ExpPoolFlags & EX_DELAY_POOL_FREES) {
  1447. ExDeferredFreePool (PoolDesc);
  1448. Entry = (PPOOL_HEADER) MiAllocatePoolPages (RequestType,
  1449. NumberOfBytes);
  1450. }
  1451. }
  1452. if (Entry == NULL) {
  1453. if (PoolType & MUST_SUCCEED_POOL_TYPE_MASK) {
  1454. KeBugCheckEx (MUST_SUCCEED_POOL_EMPTY,
  1455. NumberOfBytes,
  1456. NonPagedPoolDescriptor.TotalPages,
  1457. NonPagedPoolDescriptor.TotalBigPages,
  1458. 0);
  1459. }
  1460. ExPoolFailures += 1;
  1461. if (ExpPoolFlags & EX_PRINT_POOL_FAILURES) {
  1462. KdPrint(("EX: ExAllocatePool (%p, 0x%x) returning NULL\n",
  1463. NumberOfBytes,
  1464. PoolType));
  1465. if (ExpPoolFlags & EX_STOP_ON_POOL_FAILURES) {
  1466. DbgBreakPoint ();
  1467. }
  1468. }
  1469. if ((PoolType & POOL_RAISE_IF_ALLOCATION_FAILURE) != 0) {
  1470. ExRaiseStatus (STATUS_INSUFFICIENT_RESOURCES);
  1471. }
  1472. return NULL;
  1473. }
  1474. NumberOfPages = (ULONG) BYTES_TO_PAGES (NumberOfBytes);
  1475. InterlockedExchangeAdd ((PLONG)&PoolDesc->TotalBigPages,
  1476. (LONG)NumberOfPages);
  1477. InterlockedExchangeAddSizeT (&PoolDesc->TotalBytes,
  1478. (SIZE_T)NumberOfPages << PAGE_SHIFT);
  1479. InterlockedIncrement ((PLONG)&PoolDesc->RunningAllocs);
  1480. //
  1481. // Mark the allocation as session-based so that when it is freed
  1482. // we can detect that the session pool descriptor is the one to
  1483. // be credited (not the global nonpaged descriptor).
  1484. //
  1485. if ((PoolType & SESSION_POOL_MASK) && (CheckType == NonPagedPool)) {
  1486. MiMarkPoolLargeSession (Entry);
  1487. }
  1488. //
  1489. // Note nonpaged session allocations get turned into global
  1490. // session allocations internally, so they must be added to the
  1491. // global tag tables. Paged session allocations go into their
  1492. // own tables.
  1493. //
  1494. if (ExpAddTagForBigPages ((PVOID)Entry,
  1495. Tag,
  1496. NumberOfPages,
  1497. PoolType) == FALSE) {
  1498. //
  1499. // Note that not being able to add the tag entry above
  1500. // implies 2 things: The allocation must now be tagged
  1501. // as BIG because the subsequent free also won't find it
  1502. // in the big page tag table and so it must use BIG when
  1503. // removing it from the PoolTrackTable. Also that the free
  1504. // must get the size from MiFreePoolPages since the
  1505. // big page tag table won't have the size in this case.
  1506. //
  1507. Tag = ' GIB';
  1508. }
  1509. ExpInsertPoolTracker (Tag,
  1510. ROUND_TO_PAGES(NumberOfBytes),
  1511. PoolType);
  1512. PERFINFO_BIGPOOLALLOC (PoolType, Tag, NumberOfBytes, Entry);
  1513. return Entry;
  1514. }
  1515. if (NumberOfBytes == 0) {
  1516. //
  1517. // Besides fragmenting pool, zero byte requests would not be handled
  1518. // in cases where the minimum pool block size is the same as the
  1519. // pool header size (no room for flink/blinks, etc).
  1520. //
  1521. #if DBG
  1522. KeBugCheckEx (BAD_POOL_CALLER, 0, 0, PoolType, Tag);
  1523. #else
  1524. NumberOfBytes = 1;
  1525. #endif
  1526. }
  1527. //
  1528. // The requested size is less than or equal to the size of the
  1529. // maximum block maintained by the allocation lists.
  1530. //
  1531. PERFINFO_POOLALLOC (PoolType, Tag, NumberOfBytes);
  1532. //
  1533. // Compute the index of the listhead for blocks of the requested size.
  1534. //
  1535. ListNumber = (ULONG)((NumberOfBytes + POOL_OVERHEAD + (POOL_SMALLEST_BLOCK - 1)) >> POOL_BLOCK_SHIFT);
  1536. NeededSize = ListNumber;
  1537. if (CheckType == PagedPool) {
  1538. //
  1539. // If the requested pool block is a small block, then attempt to
  1540. // allocate the requested pool from the per processor lookaside
  1541. // list. If the attempt fails, then attempt to allocate from the
  1542. // system lookaside list. If the attempt fails, then select a
  1543. // pool to allocate from and allocate the block normally.
  1544. //
  1545. // Also note that if hot/cold separation is enabled, allocations are
  1546. // not satisfied from lookaside lists as these are either :
  1547. //
  1548. // 1. cold references
  1549. //
  1550. // or
  1551. //
  1552. // 2. we are still booting on a small machine, thus keeping pool
  1553. // locality dense (to reduce the working set footprint thereby
  1554. // reducing page stealing) is a bigger win in terms of overall
  1555. // speed than trying to satisfy individual requests more quickly.
  1556. //
  1557. if ((PoolType & SESSION_POOL_MASK) == 0) {
  1558. //
  1559. // Check for prototype pool - always allocate it from its own
  1560. // pages as the sharecounts applied on these allocations by
  1561. // memory management make it more difficult to trim these pages.
  1562. // This is an optimization so that other pagable allocation pages
  1563. // (which are much easier to trim because their sharecount is
  1564. // almost always only 1) don't end up being mostly resident because
  1565. // of a single prototype pool allocation within in. Note this
  1566. // also makes it easier to remove specific pages for hot remove
  1567. // or callers that need contiguous physical memory.
  1568. //
  1569. if (PoolType & POOL_MM_ALLOCATION) {
  1570. PoolIndex = 0;
  1571. ASSERT (PoolDesc->PoolIndex == 0);
  1572. goto restart1;
  1573. }
  1574. if ((NeededSize <= POOL_SMALL_LISTS) &&
  1575. (USING_HOT_COLD_METRICS == 0)) {
  1576. Prcb = KeGetCurrentPrcb ();
  1577. LookasideList = Prcb->PPPagedLookasideList[NeededSize - 1].P;
  1578. LookasideList->TotalAllocates += 1;
  1579. Entry = (PPOOL_HEADER)
  1580. InterlockedPopEntrySList (&LookasideList->ListHead);
  1581. if (Entry == NULL) {
  1582. LookasideList = Prcb->PPPagedLookasideList[NeededSize - 1].L;
  1583. LookasideList->TotalAllocates += 1;
  1584. Entry = (PPOOL_HEADER)
  1585. InterlockedPopEntrySList (&LookasideList->ListHead);
  1586. }
  1587. if (Entry != NULL) {
  1588. Entry -= 1;
  1589. LookasideList->AllocateHits += 1;
  1590. NewPoolType = (PoolType & (BASE_POOL_TYPE_MASK | POOL_QUOTA_MASK | SESSION_POOL_MASK | POOL_VERIFIER_MASK)) + 1;
  1591. NewPoolType |= POOL_IN_USE_MASK;
  1592. Entry->PoolType = (UCHAR)NewPoolType;
  1593. Entry->PoolTag = Tag;
  1594. ExpInsertPoolTrackerInline (Tag,
  1595. Entry->BlockSize << POOL_BLOCK_SHIFT,
  1596. PoolType);
  1597. //
  1598. // Zero out any back pointer to our internal structures
  1599. // to stop someone from corrupting us via an
  1600. // uninitialized pointer.
  1601. //
  1602. ((PULONG_PTR)((PCHAR)Entry + CacheOverhead))[0] = 0;
  1603. PERFINFO_POOLALLOC_ADDR((PUCHAR)Entry + CacheOverhead);
  1604. return (PUCHAR)Entry + CacheOverhead;
  1605. }
  1606. }
  1607. //
  1608. // If there is more than one paged pool, then attempt to find
  1609. // one that can be immediately locked.
  1610. //
  1611. //
  1612. // N.B. The paged pool is selected in a round robin fashion using a
  1613. // simple counter. Note that the counter is incremented using
  1614. // a a noninterlocked sequence, but the pool index is never
  1615. // allowed to get out of range.
  1616. //
  1617. if (USING_HOT_COLD_METRICS) {
  1618. if ((PoolType & POOL_COLD_ALLOCATION) == 0) {
  1619. //
  1620. // Hot allocations come from the first paged pool.
  1621. //
  1622. PoolIndex = 1;
  1623. }
  1624. else {
  1625. //
  1626. // Force cold allocations to come from
  1627. // the last paged pool.
  1628. //
  1629. PoolIndex = ExpNumberOfPagedPools;
  1630. }
  1631. }
  1632. else {
  1633. if (KeNumberNodes > 1) {
  1634. //
  1635. // Use the pool descriptor which contains memory
  1636. // local to the current processor even if we have to
  1637. // wait for it. While it is possible that the
  1638. // paged pool addresses in the local descriptor
  1639. // have been paged out, on large memory
  1640. // NUMA machines this should be less common.
  1641. //
  1642. Prcb = KeGetCurrentPrcb ();
  1643. PoolIndex = Prcb->ParentNode->Color;
  1644. if (PoolIndex < ExpNumberOfPagedPools) {
  1645. PoolIndex += 1;
  1646. PoolDesc = ExpPagedPoolDescriptor[PoolIndex];
  1647. RequestType = PoolType & (BASE_POOL_TYPE_MASK | SESSION_POOL_MASK);
  1648. RetryCount = 0;
  1649. goto restart2;
  1650. }
  1651. }
  1652. PoolIndex = 1;
  1653. if (ExpNumberOfPagedPools != PoolIndex) {
  1654. ExpPoolIndex += 1;
  1655. PoolIndex = ExpPoolIndex;
  1656. if (PoolIndex > ExpNumberOfPagedPools) {
  1657. PoolIndex = 1;
  1658. ExpPoolIndex = 1;
  1659. }
  1660. Index = PoolIndex;
  1661. do {
  1662. Lock = (PKGUARDED_MUTEX) ExpPagedPoolDescriptor[PoolIndex]->LockAddress;
  1663. if (KeGetOwnerGuardedMutex (Lock) == NULL) {
  1664. break;
  1665. }
  1666. PoolIndex += 1;
  1667. if (PoolIndex > ExpNumberOfPagedPools) {
  1668. PoolIndex = 1;
  1669. }
  1670. } while (PoolIndex != Index);
  1671. }
  1672. }
  1673. PoolDesc = ExpPagedPoolDescriptor[PoolIndex];
  1674. }
  1675. else {
  1676. if (NeededSize <= ExpSessionPoolSmallLists) {
  1677. LookasideList = (PGENERAL_LOOKASIDE)(ULONG_PTR)(ExpSessionPoolLookaside + NeededSize - 1);
  1678. LookasideList->TotalAllocates += 1;
  1679. Entry = (PPOOL_HEADER)
  1680. InterlockedPopEntrySList (&LookasideList->ListHead);
  1681. if (Entry != NULL) {
  1682. Entry -= 1;
  1683. LookasideList->AllocateHits += 1;
  1684. NewPoolType = (PoolType & (BASE_POOL_TYPE_MASK | POOL_QUOTA_MASK | SESSION_POOL_MASK | POOL_VERIFIER_MASK)) + 1;
  1685. NewPoolType |= POOL_IN_USE_MASK;
  1686. Entry->PoolType = (UCHAR)NewPoolType;
  1687. Entry->PoolTag = Tag;
  1688. ExpInsertPoolTrackerInline (Tag,
  1689. Entry->BlockSize << POOL_BLOCK_SHIFT,
  1690. PoolType);
  1691. //
  1692. // Zero out any back pointer to our internal structures
  1693. // to stop someone from corrupting us via an
  1694. // uninitialized pointer.
  1695. //
  1696. ((PULONG_PTR)((PCHAR)Entry + CacheOverhead))[0] = 0;
  1697. PERFINFO_POOLALLOC_ADDR((PUCHAR)Entry + CacheOverhead);
  1698. return (PUCHAR)Entry + CacheOverhead;
  1699. }
  1700. }
  1701. //
  1702. // Only one paged pool is available per session.
  1703. //
  1704. PoolIndex = 0;
  1705. ASSERT (PoolDesc == ExpSessionPoolDescriptor);
  1706. ASSERT (PoolDesc->PoolIndex == 0);
  1707. }
  1708. }
  1709. else {
  1710. //
  1711. // If the requested pool block is a small block, then attempt to
  1712. // allocate the requested pool from the per processor lookaside
  1713. // list. If the attempt fails, then attempt to allocate from the
  1714. // system lookaside list. If the attempt fails, then select a
  1715. // pool to allocate from and allocate the block normally.
  1716. //
  1717. // Only session paged pool allocations come from the per session pools.
  1718. // Nonpaged session pool allocations still come from global pool.
  1719. //
  1720. if (NeededSize <= POOL_SMALL_LISTS) {
  1721. Prcb = KeGetCurrentPrcb ();
  1722. LookasideList = Prcb->PPNPagedLookasideList[NeededSize - 1].P;
  1723. LookasideList->TotalAllocates += 1;
  1724. Entry = (PPOOL_HEADER)
  1725. InterlockedPopEntrySList (&LookasideList->ListHead);
  1726. if (Entry == NULL) {
  1727. LookasideList = Prcb->PPNPagedLookasideList[NeededSize - 1].L;
  1728. LookasideList->TotalAllocates += 1;
  1729. Entry = (PPOOL_HEADER)
  1730. InterlockedPopEntrySList (&LookasideList->ListHead);
  1731. }
  1732. if (Entry != NULL) {
  1733. Entry -= 1;
  1734. LookasideList->AllocateHits += 1;
  1735. NewPoolType = (PoolType & (BASE_POOL_TYPE_MASK | POOL_QUOTA_MASK | SESSION_POOL_MASK | POOL_VERIFIER_MASK)) + 1;
  1736. NewPoolType |= POOL_IN_USE_MASK;
  1737. Entry->PoolType = (UCHAR)NewPoolType;
  1738. Entry->PoolTag = Tag;
  1739. ExpInsertPoolTrackerInline (Tag,
  1740. Entry->BlockSize << POOL_BLOCK_SHIFT,
  1741. PoolType);
  1742. //
  1743. // Zero out any back pointer to our internal structures
  1744. // to stop someone from corrupting us via an
  1745. // uninitialized pointer.
  1746. //
  1747. ((PULONG_PTR)((PCHAR)Entry + CacheOverhead))[0] = 0;
  1748. PERFINFO_POOLALLOC_ADDR((PUCHAR)Entry + CacheOverhead);
  1749. return (PUCHAR)Entry + CacheOverhead;
  1750. }
  1751. }
  1752. if (PoolType & SESSION_POOL_MASK) {
  1753. PoolDesc = PoolVector[CheckType];
  1754. }
  1755. if (ExpNumberOfNonPagedPools <= 1) {
  1756. PoolIndex = 0;
  1757. }
  1758. else {
  1759. //
  1760. // Use the pool descriptor which contains memory local to
  1761. // the current processor even if we have to contend for its lock.
  1762. //
  1763. Prcb = KeGetCurrentPrcb ();
  1764. PoolIndex = Prcb->ParentNode->Color;
  1765. if (PoolIndex >= ExpNumberOfNonPagedPools) {
  1766. PoolIndex = ExpNumberOfNonPagedPools - 1;
  1767. }
  1768. PoolDesc = ExpNonPagedPoolDescriptor[PoolIndex];
  1769. }
  1770. ASSERT(PoolIndex == PoolDesc->PoolIndex);
  1771. }
  1772. restart1:
  1773. RequestType = PoolType & (BASE_POOL_TYPE_MASK | SESSION_POOL_MASK);
  1774. RetryCount = 0;
  1775. restart2:
  1776. ListHead = &PoolDesc->ListHeads[ListNumber];
  1777. //
  1778. // Walk the listheads looking for a free block.
  1779. //
  1780. do {
  1781. //
  1782. // If the list is not empty, then allocate a block from the
  1783. // selected list.
  1784. //
  1785. if (PrivateIsListEmpty (ListHead) == FALSE) {
  1786. LOCK_POOL (PoolDesc, LockHandle);
  1787. if (PrivateIsListEmpty (ListHead)) {
  1788. //
  1789. // The block is no longer available, march on.
  1790. //
  1791. UNLOCK_POOL (PoolDesc, LockHandle);
  1792. ListHead += 1;
  1793. continue;
  1794. }
  1795. CHECK_LIST (ListHead);
  1796. Block = PrivateRemoveHeadList (ListHead);
  1797. CHECK_LIST (ListHead);
  1798. Entry = (PPOOL_HEADER)((PCHAR)Block - POOL_OVERHEAD);
  1799. CHECK_POOL_PAGE (Entry);
  1800. ASSERT(Entry->BlockSize >= NeededSize);
  1801. ASSERT(DECODE_POOL_INDEX(Entry) == PoolIndex);
  1802. ASSERT(Entry->PoolType == 0);
  1803. if (Entry->BlockSize != NeededSize) {
  1804. //
  1805. // The selected block is larger than the allocation
  1806. // request. Split the block and insert the remaining
  1807. // fragment in the appropriate list.
  1808. //
  1809. // If the entry is at the start of a page, then take
  1810. // the allocation from the front of the block so as
  1811. // to minimize fragmentation. Otherwise, take the
  1812. // allocation from the end of the block which may
  1813. // also reduce fragmentation if the block is at the
  1814. // end of a page.
  1815. //
  1816. if (Entry->PreviousSize == 0) {
  1817. //
  1818. // The entry is at the start of a page.
  1819. //
  1820. SplitEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + NeededSize);
  1821. SplitEntry->BlockSize = (USHORT)(Entry->BlockSize - NeededSize);
  1822. SplitEntry->PreviousSize = (USHORT) NeededSize;
  1823. //
  1824. // If the allocated block is not at the end of a
  1825. // page, then adjust the size of the next block.
  1826. //
  1827. NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)SplitEntry + SplitEntry->BlockSize);
  1828. if (PAGE_END(NextEntry) == FALSE) {
  1829. NextEntry->PreviousSize = SplitEntry->BlockSize;
  1830. }
  1831. }
  1832. else {
  1833. //
  1834. // The entry is not at the start of a page.
  1835. //
  1836. SplitEntry = Entry;
  1837. Entry->BlockSize = (USHORT)(Entry->BlockSize - NeededSize);
  1838. Entry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + Entry->BlockSize);
  1839. Entry->PreviousSize = SplitEntry->BlockSize;
  1840. //
  1841. // If the allocated block is not at the end of a
  1842. // page, then adjust the size of the next block.
  1843. //
  1844. NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + NeededSize);
  1845. if (PAGE_END(NextEntry) == FALSE) {
  1846. NextEntry->PreviousSize = (USHORT) NeededSize;
  1847. }
  1848. }
  1849. //
  1850. // Set the size of the allocated entry, clear the pool
  1851. // type of the split entry, set the index of the split
  1852. // entry, and insert the split entry in the appropriate
  1853. // free list.
  1854. //
  1855. Entry->BlockSize = (USHORT) NeededSize;
  1856. ENCODE_POOL_INDEX(Entry, PoolIndex);
  1857. SplitEntry->PoolType = 0;
  1858. ENCODE_POOL_INDEX(SplitEntry, PoolIndex);
  1859. Index = SplitEntry->BlockSize;
  1860. CHECK_LIST(&PoolDesc->ListHeads[Index - 1]);
  1861. //
  1862. // Only insert split pool blocks which contain more than just
  1863. // a header as only those have room for a flink/blink !
  1864. // Note if the minimum pool block size is bigger than the
  1865. // header then there can be no blocks like this.
  1866. //
  1867. if ((POOL_OVERHEAD != POOL_SMALLEST_BLOCK) ||
  1868. (SplitEntry->BlockSize != 1)) {
  1869. PrivateInsertTailList(&PoolDesc->ListHeads[Index - 1], ((PLIST_ENTRY)((PCHAR)SplitEntry + POOL_OVERHEAD)));
  1870. CHECK_LIST(((PLIST_ENTRY)((PCHAR)SplitEntry + POOL_OVERHEAD)));
  1871. }
  1872. }
  1873. Entry->PoolType = (UCHAR)(((PoolType & (BASE_POOL_TYPE_MASK | POOL_QUOTA_MASK | SESSION_POOL_MASK | POOL_VERIFIER_MASK)) + 1) | POOL_IN_USE_MASK);
  1874. CHECK_POOL_PAGE (Entry);
  1875. UNLOCK_POOL(PoolDesc, LockHandle);
  1876. InterlockedIncrement ((PLONG)&PoolDesc->RunningAllocs);
  1877. InterlockedExchangeAddSizeT (&PoolDesc->TotalBytes,
  1878. Entry->BlockSize << POOL_BLOCK_SHIFT);
  1879. Entry->PoolTag = Tag;
  1880. ExpInsertPoolTrackerInline (Tag,
  1881. Entry->BlockSize << POOL_BLOCK_SHIFT,
  1882. PoolType);
  1883. //
  1884. // Zero out any back pointer to our internal structures
  1885. // to stop someone from corrupting us via an
  1886. // uninitialized pointer.
  1887. //
  1888. ((PULONGLONG)((PCHAR)Entry + CacheOverhead))[0] = 0;
  1889. PERFINFO_POOLALLOC_ADDR((PUCHAR)Entry + CacheOverhead);
  1890. return (PCHAR)Entry + CacheOverhead;
  1891. }
  1892. ListHead += 1;
  1893. } while (ListHead != &PoolDesc->ListHeads[POOL_LIST_HEADS]);
  1894. //
  1895. // A block of the desired size does not exist and there are
  1896. // no large blocks that can be split to satisfy the allocation.
  1897. // Attempt to expand the pool by allocating another page and
  1898. // adding it to the pool.
  1899. //
  1900. Entry = (PPOOL_HEADER) MiAllocatePoolPages (RequestType, PAGE_SIZE);
  1901. if (Entry == NULL) {
  1902. //
  1903. // If there are deferred free blocks, free them now and retry.
  1904. //
  1905. RetryCount += 1;
  1906. if ((RetryCount == 1) && (ExpPoolFlags & EX_DELAY_POOL_FREES)) {
  1907. ExDeferredFreePool (PoolDesc);
  1908. goto restart2;
  1909. }
  1910. if ((PoolType & MUST_SUCCEED_POOL_TYPE_MASK) != 0) {
  1911. //
  1912. // Must succeed pool was requested so bugcheck.
  1913. //
  1914. KeBugCheckEx (MUST_SUCCEED_POOL_EMPTY,
  1915. PAGE_SIZE,
  1916. NonPagedPoolDescriptor.TotalPages,
  1917. NonPagedPoolDescriptor.TotalBigPages,
  1918. 0);
  1919. }
  1920. //
  1921. // No more pool of the specified type is available.
  1922. //
  1923. ExPoolFailures += 1;
  1924. if (ExpPoolFlags & EX_PRINT_POOL_FAILURES) {
  1925. KdPrint(("EX: ExAllocatePool (%p, 0x%x) returning NULL\n",
  1926. NumberOfBytes,
  1927. PoolType));
  1928. if (ExpPoolFlags & EX_STOP_ON_POOL_FAILURES) {
  1929. DbgBreakPoint ();
  1930. }
  1931. }
  1932. if ((PoolType & POOL_RAISE_IF_ALLOCATION_FAILURE) != 0) {
  1933. ExRaiseStatus (STATUS_INSUFFICIENT_RESOURCES);
  1934. }
  1935. PERFINFO_POOLALLOC_ADDR (NULL);
  1936. return NULL;
  1937. }
  1938. //
  1939. // Initialize the pool header for the new allocation.
  1940. //
  1941. Entry->Ulong1 = 0;
  1942. Entry->PoolIndex = (UCHAR) PoolIndex;
  1943. Entry->BlockSize = (USHORT) NeededSize;
  1944. Entry->PoolType = (UCHAR)(((PoolType & (BASE_POOL_TYPE_MASK | POOL_QUOTA_MASK | SESSION_POOL_MASK | POOL_VERIFIER_MASK)) + 1) | POOL_IN_USE_MASK);
  1945. SplitEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + NeededSize);
  1946. SplitEntry->Ulong1 = 0;
  1947. Index = (PAGE_SIZE / sizeof(POOL_BLOCK)) - NeededSize;
  1948. SplitEntry->BlockSize = (USHORT) Index;
  1949. SplitEntry->PreviousSize = (USHORT) NeededSize;
  1950. SplitEntry->PoolIndex = (UCHAR) PoolIndex;
  1951. //
  1952. // Split the allocated page and insert the remaining
  1953. // fragment in the appropriate listhead.
  1954. //
  1955. // Set the size of the allocated entry, clear the pool
  1956. // type of the split entry, set the index of the split
  1957. // entry, and insert the split entry in the appropriate
  1958. // free list.
  1959. //
  1960. //
  1961. // Note that if the request was for nonpaged session pool, we are
  1962. // not updating the session pool descriptor for this. Instead we
  1963. // are deliberately updating the global nonpaged pool descriptor
  1964. // because the rest of the fragment goes into global nonpaged pool.
  1965. // This is ok because the session pool descriptor TotalPages count
  1966. // is not relied upon.
  1967. //
  1968. // The individual pool tracking by tag, however, is critical and
  1969. // is properly maintained below (ie: session allocations are charged
  1970. // to the session tracking table and regular nonpaged allocations are
  1971. // charged to the global nonpaged tracking table).
  1972. //
  1973. InterlockedIncrement ((PLONG)&PoolDesc->TotalPages);
  1974. NeededSize <<= POOL_BLOCK_SHIFT;
  1975. InterlockedExchangeAddSizeT (&PoolDesc->TotalBytes, NeededSize);
  1976. PERFINFO_ADDPOOLPAGE(CheckType, PoolIndex, Entry, PoolDesc);
  1977. //
  1978. // Only insert split pool blocks which contain more than just
  1979. // a header as only those have room for a flink/blink !
  1980. // Note if the minimum pool block size is bigger than the
  1981. // header then there can be no blocks like this.
  1982. //
  1983. if ((POOL_OVERHEAD != POOL_SMALLEST_BLOCK) ||
  1984. (SplitEntry->BlockSize != 1)) {
  1985. //
  1986. // Now lock the pool and insert the fragment.
  1987. //
  1988. LOCK_POOL (PoolDesc, LockHandle);
  1989. CHECK_LIST(&PoolDesc->ListHeads[Index - 1]);
  1990. PrivateInsertTailList(&PoolDesc->ListHeads[Index - 1], ((PLIST_ENTRY)((PCHAR)SplitEntry + POOL_OVERHEAD)));
  1991. CHECK_LIST(((PLIST_ENTRY)((PCHAR)SplitEntry + POOL_OVERHEAD)));
  1992. CHECK_POOL_PAGE (Entry);
  1993. UNLOCK_POOL (PoolDesc, LockHandle);
  1994. }
  1995. else {
  1996. CHECK_POOL_PAGE (Entry);
  1997. }
  1998. InterlockedIncrement ((PLONG)&PoolDesc->RunningAllocs);
  1999. Block = (PVOID) ((PCHAR)Entry + CacheOverhead);
  2000. Entry->PoolTag = Tag;
  2001. ExpInsertPoolTrackerInline (Tag, NeededSize, PoolType);
  2002. PERFINFO_POOLALLOC_ADDR (Block);
  2003. return Block;
  2004. }
  2005. PVOID
  2006. ExAllocatePool (
  2007. IN POOL_TYPE PoolType,
  2008. IN SIZE_T NumberOfBytes
  2009. )
  2010. /*++
  2011. Routine Description:
  2012. This function allocates a block of pool of the specified type and
  2013. returns a pointer to the allocated block. This function is used to
  2014. access both the page-aligned pools, and the list head entries (less than
  2015. a page) pools.
  2016. If the number of bytes specifies a size that is too large to be
  2017. satisfied by the appropriate list, then the page-aligned
  2018. pool allocator is used. The allocated block will be page-aligned
  2019. and a page-sized multiple.
  2020. Otherwise, the appropriate pool list entry is used. The allocated
  2021. block will be 64-bit aligned, but will not be page aligned. The
  2022. pool allocator calculates the smallest number of POOL_BLOCK_SIZE
  2023. that can be used to satisfy the request. If there are no blocks
  2024. available of this size, then a block of the next larger block size
  2025. is allocated and split. One piece is placed back into the pool, and
  2026. the other piece is used to satisfy the request. If the allocator
  2027. reaches the paged-sized block list, and nothing is there, the
  2028. page-aligned pool allocator is called. The page is split and added
  2029. to the pool...
  2030. Arguments:
  2031. PoolType - Supplies the type of pool to allocate. If the pool type
  2032. is one of the "MustSucceed" pool types, then this call will
  2033. succeed and return a pointer to allocated pool or bugcheck on failure.
  2034. For all other cases, if the system cannot allocate the requested amount
  2035. of memory, NULL is returned.
  2036. Valid pool types:
  2037. NonPagedPool
  2038. PagedPool
  2039. NonPagedPoolMustSucceed,
  2040. NonPagedPoolCacheAligned
  2041. PagedPoolCacheAligned
  2042. NonPagedPoolCacheAlignedMustS
  2043. NumberOfBytes - Supplies the number of bytes to allocate.
  2044. Return Value:
  2045. NULL - The PoolType is not one of the "MustSucceed" pool types, and
  2046. not enough pool exists to satisfy the request.
  2047. NON-NULL - Returns a pointer to the allocated pool.
  2048. --*/
  2049. {
  2050. return ExAllocatePoolWithTag (PoolType,
  2051. NumberOfBytes,
  2052. 'enoN');
  2053. }
  2054. PVOID
  2055. ExAllocatePoolWithTagPriority (
  2056. IN POOL_TYPE PoolType,
  2057. IN SIZE_T NumberOfBytes,
  2058. IN ULONG Tag,
  2059. IN EX_POOL_PRIORITY Priority
  2060. )
  2061. /*++
  2062. Routine Description:
  2063. This function allocates a block of pool of the specified type and
  2064. returns a pointer to the allocated block. This function is used to
  2065. access both the page-aligned pools, and the list head entries (less than
  2066. a page) pools.
  2067. If the number of bytes specifies a size that is too large to be
  2068. satisfied by the appropriate list, then the page-aligned
  2069. pool allocator is used. The allocated block will be page-aligned
  2070. and a page-sized multiple.
  2071. Otherwise, the appropriate pool list entry is used. The allocated
  2072. block will be 64-bit aligned, but will not be page aligned. The
  2073. pool allocator calculates the smallest number of POOL_BLOCK_SIZE
  2074. that can be used to satisfy the request. If there are no blocks
  2075. available of this size, then a block of the next larger block size
  2076. is allocated and split. One piece is placed back into the pool, and
  2077. the other piece is used to satisfy the request. If the allocator
  2078. reaches the paged-sized block list, and nothing is there, the
  2079. page-aligned pool allocator is called. The page is split and added
  2080. to the pool...
  2081. Arguments:
  2082. PoolType - Supplies the type of pool to allocate. If the pool type
  2083. is one of the "MustSucceed" pool types, then this call will
  2084. succeed and return a pointer to allocated pool or bugcheck on failure.
  2085. For all other cases, if the system cannot allocate the requested amount
  2086. of memory, NULL is returned.
  2087. Valid pool types:
  2088. NonPagedPool
  2089. PagedPool
  2090. NonPagedPoolMustSucceed,
  2091. NonPagedPoolCacheAligned
  2092. PagedPoolCacheAligned
  2093. NonPagedPoolCacheAlignedMustS
  2094. NumberOfBytes - Supplies the number of bytes to allocate.
  2095. Tag - Supplies the caller's identifying tag.
  2096. Priority - Supplies an indication as to how important it is that this
  2097. request succeed under low available pool conditions. This
  2098. can also be used to specify special pool.
  2099. Return Value:
  2100. NULL - The PoolType is not one of the "MustSucceed" pool types, and
  2101. not enough pool exists to satisfy the request.
  2102. NON-NULL - Returns a pointer to the allocated pool.
  2103. --*/
  2104. {
  2105. ULONG i;
  2106. ULONG Ratio;
  2107. PVOID Entry;
  2108. SIZE_T TotalBytes;
  2109. SIZE_T TotalFullPages;
  2110. POOL_TYPE CheckType;
  2111. PPOOL_DESCRIPTOR PoolDesc;
  2112. if ((Priority & POOL_SPECIAL_POOL_BIT) && (NumberOfBytes <= POOL_BUDDY_MAX)) {
  2113. Entry = MmAllocateSpecialPool (NumberOfBytes,
  2114. Tag,
  2115. PoolType,
  2116. (Priority & POOL_SPECIAL_POOL_UNDERRUN_BIT) ? 1 : 0);
  2117. if (Entry != NULL) {
  2118. return Entry;
  2119. }
  2120. Priority &= ~(POOL_SPECIAL_POOL_BIT | POOL_SPECIAL_POOL_UNDERRUN_BIT);
  2121. }
  2122. //
  2123. // Pool and other resources can be allocated directly through the Mm
  2124. // without the pool code knowing - so always call the Mm for the
  2125. // up-to-date counters.
  2126. //
  2127. if ((Priority != HighPoolPriority) &&
  2128. ((PoolType & MUST_SUCCEED_POOL_TYPE_MASK) == 0)) {
  2129. if (MmResourcesAvailable (PoolType, NumberOfBytes, Priority) == FALSE) {
  2130. //
  2131. // The Mm does not have very many full pages left. Leave those
  2132. // for true high priority callers. But first see if this request
  2133. // is small, and if so, if there is a lot of fragmentation, then
  2134. // it is likely the request can be satisfied from pre-existing
  2135. // fragments.
  2136. //
  2137. if (NumberOfBytes > POOL_BUDDY_MAX) {
  2138. return NULL;
  2139. }
  2140. //
  2141. // Sum the pool descriptors.
  2142. //
  2143. CheckType = PoolType & BASE_POOL_TYPE_MASK;
  2144. if ((CheckType == NonPagedPool) ||
  2145. ((PoolType & SESSION_POOL_MASK) == 0)) {
  2146. PoolDesc = PoolVector[CheckType];
  2147. TotalBytes = 0;
  2148. TotalFullPages = 0;
  2149. if (CheckType == PagedPool) {
  2150. if (KeNumberNodes > 1) {
  2151. for (i = 0; i <= ExpNumberOfPagedPools; i += 1) {
  2152. PoolDesc = ExpPagedPoolDescriptor[i];
  2153. TotalFullPages += PoolDesc->TotalPages;
  2154. TotalFullPages += PoolDesc->TotalBigPages;
  2155. TotalBytes += PoolDesc->TotalBytes;
  2156. }
  2157. }
  2158. else {
  2159. for (i = 0; i <= ExpNumberOfPagedPools; i += 1) {
  2160. TotalFullPages += PoolDesc->TotalPages;
  2161. TotalFullPages += PoolDesc->TotalBigPages;
  2162. TotalBytes += PoolDesc->TotalBytes;
  2163. PoolDesc += 1;
  2164. }
  2165. }
  2166. }
  2167. else {
  2168. if (ExpNumberOfNonPagedPools == 1) {
  2169. TotalFullPages += PoolDesc->TotalPages;
  2170. TotalFullPages += PoolDesc->TotalBigPages;
  2171. TotalBytes += PoolDesc->TotalBytes;
  2172. }
  2173. else {
  2174. for (i = 0; i < ExpNumberOfNonPagedPools; i += 1) {
  2175. PoolDesc = ExpNonPagedPoolDescriptor[i];
  2176. TotalFullPages += PoolDesc->TotalPages;
  2177. TotalFullPages += PoolDesc->TotalBigPages;
  2178. TotalBytes += PoolDesc->TotalBytes;
  2179. }
  2180. }
  2181. }
  2182. }
  2183. else {
  2184. PoolDesc = ExpSessionPoolDescriptor;
  2185. TotalFullPages = PoolDesc->TotalPages;
  2186. TotalFullPages += PoolDesc->TotalBigPages;
  2187. TotalBytes = PoolDesc->TotalBytes;
  2188. }
  2189. //
  2190. // If the pages are more than 80% populated then don't assume
  2191. // we're going to be able to satisfy ths request via a fragment.
  2192. //
  2193. TotalFullPages |= 1; // Ensure we never divide by zero.
  2194. TotalBytes >>= PAGE_SHIFT;
  2195. //
  2196. // The additions above were performed lock free so we must handle
  2197. // slicing which can cause nonexact sums.
  2198. //
  2199. if (TotalBytes > TotalFullPages) {
  2200. TotalBytes = TotalFullPages;
  2201. }
  2202. Ratio = (ULONG)((TotalBytes * 100) / TotalFullPages);
  2203. if (Ratio >= 80) {
  2204. return NULL;
  2205. }
  2206. }
  2207. }
  2208. //
  2209. // There is a window between determining whether to proceed and actually
  2210. // doing the allocation. In this window the pool may deplete. This is not
  2211. // worth closing at this time.
  2212. //
  2213. return ExAllocatePoolWithTag (PoolType, NumberOfBytes, Tag);
  2214. }
  2215. PVOID
  2216. ExAllocatePoolWithQuota (
  2217. IN POOL_TYPE PoolType,
  2218. IN SIZE_T NumberOfBytes
  2219. )
  2220. /*++
  2221. Routine Description:
  2222. This function allocates a block of pool of the specified type,
  2223. returns a pointer to the allocated block, and if the binary buddy
  2224. allocator was used to satisfy the request, charges pool quota to the
  2225. current process. This function is used to access both the
  2226. page-aligned pools, and the binary buddy.
  2227. If the number of bytes specifies a size that is too large to be
  2228. satisfied by the appropriate binary buddy pool, then the
  2229. page-aligned pool allocator is used. The allocated block will be
  2230. page-aligned and a page-sized multiple. No quota is charged to the
  2231. current process if this is the case.
  2232. Otherwise, the appropriate binary buddy pool is used. The allocated
  2233. block will be 64-bit aligned, but will not be page aligned. After
  2234. the allocation completes, an attempt will be made to charge pool
  2235. quota (of the appropriate type) to the current process object. If
  2236. the quota charge succeeds, then the pool block's header is adjusted
  2237. to point to the current process. The process object is not
  2238. dereferenced until the pool is deallocated and the appropriate
  2239. amount of quota is returned to the process. Otherwise, the pool is
  2240. deallocated, a "quota exceeded" condition is raised.
  2241. Arguments:
  2242. PoolType - Supplies the type of pool to allocate. If the pool type
  2243. is one of the "MustSucceed" pool types and sufficient quota
  2244. exists, then this call will always succeed and return a pointer
  2245. to allocated pool. Otherwise, if the system cannot allocate
  2246. the requested amount of memory a STATUS_INSUFFICIENT_RESOURCES
  2247. status is raised.
  2248. NumberOfBytes - Supplies the number of bytes to allocate.
  2249. Return Value:
  2250. NON-NULL - Returns a pointer to the allocated pool.
  2251. Unspecified - If insufficient quota exists to complete the pool
  2252. allocation, the return value is unspecified.
  2253. --*/
  2254. {
  2255. return ExAllocatePoolWithQuotaTag (PoolType, NumberOfBytes, 'enoN');
  2256. }
  2257. //
  2258. // The following assert macro is used to check that an input process object is
  2259. // really a PROCESS and not something else, like deallocated pool.
  2260. //
  2261. #define ASSERT_KPROCESS(P) { \
  2262. ASSERT(((PKPROCESS)(P))->Header.Type == ProcessObject); \
  2263. }
  2264. __forceinline
  2265. PEPROCESS
  2266. ExpGetBilledProcess (
  2267. IN PPOOL_HEADER Entry
  2268. )
  2269. {
  2270. PEPROCESS ProcessBilled;
  2271. if ((Entry->PoolType & POOL_QUOTA_MASK) == 0) {
  2272. return NULL;
  2273. }
  2274. #if defined(_WIN64)
  2275. ProcessBilled = Entry->ProcessBilled;
  2276. #else
  2277. ProcessBilled = * (PVOID *)((PCHAR)Entry + (Entry->BlockSize << POOL_BLOCK_SHIFT) - sizeof (PVOID));
  2278. #endif
  2279. if (ProcessBilled != NULL) {
  2280. if (((PKPROCESS)(ProcessBilled))->Header.Type != ProcessObject) {
  2281. KeBugCheckEx (BAD_POOL_CALLER,
  2282. 0xD,
  2283. (ULONG_PTR)(Entry + 1),
  2284. Entry->PoolTag,
  2285. (ULONG_PTR)ProcessBilled);
  2286. }
  2287. }
  2288. return ProcessBilled;
  2289. }
  2290. PVOID
  2291. ExAllocatePoolWithQuotaTag (
  2292. IN POOL_TYPE PoolType,
  2293. IN SIZE_T NumberOfBytes,
  2294. IN ULONG Tag
  2295. )
  2296. /*++
  2297. Routine Description:
  2298. This function allocates a block of pool of the specified type,
  2299. returns a pointer to the allocated block, and if the binary buddy
  2300. allocator was used to satisfy the request, charges pool quota to the
  2301. current process. This function is used to access both the
  2302. page-aligned pools, and the binary buddy.
  2303. If the number of bytes specifies a size that is too large to be
  2304. satisfied by the appropriate binary buddy pool, then the
  2305. page-aligned pool allocator is used. The allocated block will be
  2306. page-aligned and a page-sized multiple. No quota is charged to the
  2307. current process if this is the case.
  2308. Otherwise, the appropriate binary buddy pool is used. The allocated
  2309. block will be 64-bit aligned, but will not be page aligned. After
  2310. the allocation completes, an attempt will be made to charge pool
  2311. quota (of the appropriate type) to the current process object. If
  2312. the quota charge succeeds, then the pool block's header is adjusted
  2313. to point to the current process. The process object is not
  2314. dereferenced until the pool is deallocated and the appropriate
  2315. amount of quota is returned to the process. Otherwise, the pool is
  2316. deallocated, a "quota exceeded" condition is raised.
  2317. Arguments:
  2318. PoolType - Supplies the type of pool to allocate. If the pool type
  2319. is one of the "MustSucceed" pool types and sufficient quota
  2320. exists, then this call will always succeed and return a pointer
  2321. to allocated pool. Otherwise, if the system cannot allocate
  2322. the requested amount of memory a STATUS_INSUFFICIENT_RESOURCES
  2323. status is raised.
  2324. NumberOfBytes - Supplies the number of bytes to allocate.
  2325. Return Value:
  2326. NON-NULL - Returns a pointer to the allocated pool.
  2327. Unspecified - If insufficient quota exists to complete the pool
  2328. allocation, the return value is unspecified.
  2329. --*/
  2330. {
  2331. PVOID p;
  2332. PEPROCESS Process;
  2333. PPOOL_HEADER Entry;
  2334. LOGICAL RaiseOnQuotaFailure;
  2335. NTSTATUS Status;
  2336. #if DBG
  2337. LONG ConcurrentQuotaPool;
  2338. #endif
  2339. RaiseOnQuotaFailure = TRUE;
  2340. if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE) {
  2341. RaiseOnQuotaFailure = FALSE;
  2342. PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE;
  2343. }
  2344. PoolType = (POOL_TYPE)((UCHAR)PoolType + POOL_QUOTA_MASK);
  2345. Process = PsGetCurrentProcess ();
  2346. #if !defined(_WIN64)
  2347. //
  2348. // Add in room for the quota pointer at the end of the caller's allocation.
  2349. // Note for NT64, there is room in the pool header for both the tag and
  2350. // the quota pointer so no extra space is needed at the end.
  2351. //
  2352. // Only add in the quota pointer if doing so won't cause us to spill the
  2353. // allocation into a full page.
  2354. //
  2355. ASSERT (NumberOfBytes != 0);
  2356. if (NumberOfBytes <= PAGE_SIZE - POOL_OVERHEAD - sizeof (PVOID)) {
  2357. if (Process != PsInitialSystemProcess) {
  2358. NumberOfBytes += sizeof (PVOID);
  2359. }
  2360. else {
  2361. PoolType = (POOL_TYPE)((UCHAR)PoolType - POOL_QUOTA_MASK);
  2362. }
  2363. }
  2364. else {
  2365. //
  2366. // Turn off the quota bit prior to allocating if we're not charging
  2367. // as there's no room to put (or subsequently query for) a quota
  2368. // pointer.
  2369. //
  2370. PoolType = (POOL_TYPE)((UCHAR)PoolType - POOL_QUOTA_MASK);
  2371. }
  2372. #endif
  2373. p = ExAllocatePoolWithTag (PoolType, NumberOfBytes, Tag);
  2374. //
  2375. // Note - NULL is page aligned.
  2376. //
  2377. if (!PAGE_ALIGNED(p)) {
  2378. if ((ExpPoolFlags & EX_SPECIAL_POOL_ENABLED) &&
  2379. (MmIsSpecialPoolAddress (p))) {
  2380. return p;
  2381. }
  2382. Entry = (PPOOL_HEADER)((PCH)p - POOL_OVERHEAD);
  2383. #if defined(_WIN64)
  2384. Entry->ProcessBilled = NULL;
  2385. #endif
  2386. if (Process != PsInitialSystemProcess) {
  2387. Status = PsChargeProcessPoolQuota (Process,
  2388. PoolType & BASE_POOL_TYPE_MASK,
  2389. (ULONG)(Entry->BlockSize << POOL_BLOCK_SHIFT));
  2390. if (!NT_SUCCESS(Status)) {
  2391. //
  2392. // Back out the allocation.
  2393. //
  2394. #if !defined(_WIN64)
  2395. //
  2396. // The quota flag cannot be blindly cleared in NT32 because
  2397. // it's used to denote the allocation is larger (and the
  2398. // verifier finds its own header based on this).
  2399. //
  2400. // Instead of clearing the flag above, instead zero the quota
  2401. // pointer.
  2402. //
  2403. * (PVOID *)((PCHAR)Entry + (Entry->BlockSize << POOL_BLOCK_SHIFT) - sizeof (PVOID)) = NULL;
  2404. #endif
  2405. ExFreePoolWithTag (p, Tag);
  2406. if (RaiseOnQuotaFailure) {
  2407. ExRaiseStatus (Status);
  2408. }
  2409. return NULL;
  2410. }
  2411. #if DBG
  2412. ConcurrentQuotaPool = InterlockedIncrement (&ExConcurrentQuotaPool);
  2413. if (ConcurrentQuotaPool > ExConcurrentQuotaPoolMax) {
  2414. ExConcurrentQuotaPoolMax = ConcurrentQuotaPool;
  2415. }
  2416. #endif
  2417. #if defined(_WIN64)
  2418. Entry->ProcessBilled = Process;
  2419. #else
  2420. if ((UCHAR)PoolType & POOL_QUOTA_MASK) {
  2421. * (PVOID *)((PCHAR)Entry + (Entry->BlockSize << POOL_BLOCK_SHIFT) - sizeof (PVOID)) = Process;
  2422. }
  2423. #endif
  2424. ObReferenceObject (Process);
  2425. }
  2426. }
  2427. else {
  2428. if ((p == NULL) && (RaiseOnQuotaFailure)) {
  2429. ExRaiseStatus (STATUS_INSUFFICIENT_RESOURCES);
  2430. }
  2431. }
  2432. return p;
  2433. }
  2434. VOID
  2435. ExInsertPoolTag (
  2436. ULONG Tag,
  2437. PVOID Va,
  2438. SIZE_T NumberOfBytes,
  2439. POOL_TYPE PoolType
  2440. )
  2441. /*++
  2442. Routine Description:
  2443. This function inserts a pool tag in the tag table and increments the
  2444. number of allocates and updates the total allocation size.
  2445. This function also inserts the pool tag in the big page tag table.
  2446. N.B. This function is for use by memory management ONLY.
  2447. Arguments:
  2448. Tag - Supplies the tag used to insert an entry in the tag table.
  2449. Va - Supplies the allocated virtual address.
  2450. NumberOfBytes - Supplies the allocation size in bytes.
  2451. PoolType - Supplies the pool type.
  2452. Return Value:
  2453. None.
  2454. Environment:
  2455. No pool locks held so pool may be freely allocated here as needed.
  2456. --*/
  2457. {
  2458. ULONG NumberOfPages;
  2459. #if !DBG
  2460. UNREFERENCED_PARAMETER (PoolType);
  2461. #endif
  2462. ASSERT ((PoolType & SESSION_POOL_MASK) == 0);
  2463. if (NumberOfBytes >= PAGE_SIZE) {
  2464. NumberOfPages = (ULONG) BYTES_TO_PAGES (NumberOfBytes);
  2465. if (ExpAddTagForBigPages((PVOID)Va, Tag, NumberOfPages, PoolType) == FALSE) {
  2466. Tag = ' GIB';
  2467. }
  2468. }
  2469. ExpInsertPoolTracker (Tag, NumberOfBytes, NonPagedPool);
  2470. }
  2471. VOID
  2472. ExpSeedHotTags (
  2473. VOID
  2474. )
  2475. /*++
  2476. Routine Description:
  2477. This function seeds well-known hot tags into the pool tag tracking table
  2478. when the table is first created. The goal is to increase the likelihood
  2479. that the hash generated for these tags always gets a direct hit.
  2480. Arguments:
  2481. None.
  2482. Return Value:
  2483. None.
  2484. Environment:
  2485. INIT time, no locks held.
  2486. --*/
  2487. {
  2488. ULONG i;
  2489. ULONG Key;
  2490. ULONG Hash;
  2491. ULONG Index;
  2492. PPOOL_TRACKER_TABLE TrackTable;
  2493. ULONG KeyList[] = {
  2494. ' oI',
  2495. ' laH',
  2496. 'PldM',
  2497. 'LooP',
  2498. 'tSbO',
  2499. ' prI',
  2500. 'bdDN',
  2501. 'LprI',
  2502. 'pOoI',
  2503. ' ldM',
  2504. 'eliF',
  2505. 'aVMC',
  2506. 'dSeS',
  2507. 'CFtN',
  2508. 'looP',
  2509. 'rPCT',
  2510. 'bNMC',
  2511. 'dTeS',
  2512. 'sFtN',
  2513. 'TPCT',
  2514. 'CPCT',
  2515. ' yeK',
  2516. 'qSbO',
  2517. 'mNoI',
  2518. 'aEoI',
  2519. 'cPCT',
  2520. 'aFtN',
  2521. '0ftN',
  2522. 'tceS',
  2523. 'SprI',
  2524. 'ekoT',
  2525. ' eS',
  2526. 'lCbO',
  2527. 'cScC',
  2528. 'lFtN',
  2529. 'cAeS',
  2530. 'mfSF',
  2531. 'kWcC',
  2532. 'miSF',
  2533. 'CdfA',
  2534. 'EdfA',
  2535. 'orSF',
  2536. 'nftN',
  2537. 'PRIU',
  2538. 'rFpN',
  2539. 'RFpN',
  2540. 'aPeS',
  2541. 'sUeS',
  2542. 'FpcA',
  2543. 'MpcA',
  2544. 'cSeS',
  2545. 'mNbO',
  2546. 'sFpN',
  2547. 'uLeS',
  2548. 'DPcS',
  2549. 'nevE',
  2550. 'vrqR',
  2551. 'ldaV',
  2552. ' pP',
  2553. 'SdaV',
  2554. ' daV',
  2555. 'LdaV',
  2556. 'FdaV',
  2557. //
  2558. // BIG is preseeded not because it is hot, but because allocations
  2559. // with this tag must be inserted successfully (ie: cannot be
  2560. // retagged into the Ovfl bucket) because we need a tag to account
  2561. // for them in the PoolTrackTable counting when freeing the pool.
  2562. //
  2563. ' GIB',
  2564. };
  2565. TrackTable = PoolTrackTable;
  2566. for (i = 0; i < sizeof (KeyList) / sizeof (ULONG); i += 1) {
  2567. Key = KeyList[i];
  2568. Hash = POOLTAG_HASH(Key,PoolTrackTableMask);
  2569. Index = Hash;
  2570. do {
  2571. ASSERT (TrackTable[Hash].Key != Key);
  2572. if ((TrackTable[Hash].Key == 0) &&
  2573. (Hash != PoolTrackTableSize - 1)) {
  2574. TrackTable[Hash].Key = Key;
  2575. break;
  2576. }
  2577. ASSERT (TrackTable[Hash].Key != Key);
  2578. Hash = (Hash + 1) & (ULONG)PoolTrackTableMask;
  2579. if (Hash == Index) {
  2580. break;
  2581. }
  2582. } while (TRUE);
  2583. }
  2584. }
  2585. VOID
  2586. ExpInsertPoolTrackerExpansion (
  2587. IN ULONG Key,
  2588. IN SIZE_T NumberOfBytes,
  2589. IN POOL_TYPE PoolType
  2590. )
  2591. /*++
  2592. Routine Description:
  2593. This function inserts a pool tag in the expansion tag table (taking a
  2594. spinlock to do so), increments the number of allocates and updates
  2595. the total allocation size.
  2596. Arguments:
  2597. Key - Supplies the key value used to locate a matching entry in the
  2598. tag table.
  2599. NumberOfBytes - Supplies the allocation size.
  2600. PoolType - Supplies the pool type.
  2601. Return Value:
  2602. None.
  2603. Environment:
  2604. No pool locks held so pool may be freely allocated here as needed.
  2605. This routine is only called if ExpInsertPoolTracker encounters a full
  2606. builtin list.
  2607. --*/
  2608. {
  2609. ULONG Hash;
  2610. KIRQL OldIrql;
  2611. ULONG BigPages;
  2612. SIZE_T NewSize;
  2613. SIZE_T SizeInBytes;
  2614. SIZE_T NewSizeInBytes;
  2615. PPOOL_TRACKER_TABLE OldTable;
  2616. PPOOL_TRACKER_TABLE NewTable;
  2617. //
  2618. // The protected pool bit has already been stripped.
  2619. //
  2620. ASSERT ((Key & PROTECTED_POOL) == 0);
  2621. if (PoolType & SESSION_POOL_MASK) {
  2622. //
  2623. // Use the very last entry as a bit bucket for overflows.
  2624. //
  2625. NewTable = ExpSessionPoolTrackTable + ExpSessionPoolTrackTableSize - 1;
  2626. ASSERT ((NewTable->Key == 0) || (NewTable->Key == 'lfvO'));
  2627. NewTable->Key = 'lfvO';
  2628. //
  2629. // Update the fields with interlocked operations as other
  2630. // threads may also have begun doing so by this point.
  2631. //
  2632. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  2633. InterlockedIncrement ((PLONG) &NewTable->PagedAllocs);
  2634. InterlockedExchangeAddSizeT (&NewTable->PagedBytes,
  2635. NumberOfBytes);
  2636. }
  2637. else {
  2638. InterlockedIncrement ((PLONG) &NewTable->NonPagedAllocs);
  2639. InterlockedExchangeAddSizeT (&NewTable->NonPagedBytes,
  2640. NumberOfBytes);
  2641. }
  2642. return;
  2643. }
  2644. //
  2645. // Linear search through the expansion table. This is ok because
  2646. // the case of no free entries in the built-in table is extremely rare.
  2647. //
  2648. ExAcquireSpinLock (&ExpTaggedPoolLock, &OldIrql);
  2649. for (Hash = 0; Hash < PoolTrackTableExpansionSize; Hash += 1) {
  2650. if (PoolTrackTableExpansion[Hash].Key == Key) {
  2651. break;
  2652. }
  2653. if (PoolTrackTableExpansion[Hash].Key == 0) {
  2654. ASSERT (PoolTrackTable[PoolTrackTableSize - 1].Key == 0);
  2655. PoolTrackTableExpansion[Hash].Key = Key;
  2656. break;
  2657. }
  2658. }
  2659. if (Hash != PoolTrackTableExpansionSize) {
  2660. //
  2661. // The entry was found (or created). Update the other fields now.
  2662. //
  2663. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  2664. PoolTrackTableExpansion[Hash].PagedAllocs += 1;
  2665. PoolTrackTableExpansion[Hash].PagedBytes += NumberOfBytes;
  2666. }
  2667. else {
  2668. PoolTrackTableExpansion[Hash].NonPagedAllocs += 1;
  2669. PoolTrackTableExpansion[Hash].NonPagedBytes += NumberOfBytes;
  2670. }
  2671. ExReleaseSpinLock (&ExpTaggedPoolLock, OldIrql);
  2672. return;
  2673. }
  2674. //
  2675. // The entry was not found and the expansion table is full (or nonexistent).
  2676. // Try to allocate a larger expansion table now.
  2677. //
  2678. if (PoolTrackTable[PoolTrackTableSize - 1].Key != 0) {
  2679. //
  2680. // The overflow bucket has been used so expansion of the tracker table
  2681. // is not allowed because a subsequent free of a tag can go negative
  2682. // as the original allocation is in overflow and a newer allocation
  2683. // may be distinct.
  2684. //
  2685. //
  2686. // Use the very last entry as a bit bucket for overflows.
  2687. //
  2688. ExReleaseSpinLock (&ExpTaggedPoolLock, OldIrql);
  2689. Hash = (ULONG)PoolTrackTableSize - 1;
  2690. //
  2691. // Update the fields with interlocked operations as other
  2692. // threads may also have begun doing so by this point.
  2693. //
  2694. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  2695. InterlockedIncrement ((PLONG) &PoolTrackTable[Hash].PagedAllocs);
  2696. InterlockedExchangeAddSizeT (&PoolTrackTable[Hash].PagedBytes,
  2697. NumberOfBytes);
  2698. }
  2699. else {
  2700. InterlockedIncrement ((PLONG) &PoolTrackTable[Hash].NonPagedAllocs);
  2701. InterlockedExchangeAddSizeT (&PoolTrackTable[Hash].NonPagedBytes,
  2702. NumberOfBytes);
  2703. }
  2704. return;
  2705. }
  2706. SizeInBytes = PoolTrackTableExpansionSize * sizeof(POOL_TRACKER_TABLE);
  2707. //
  2708. // Use as much of the slush in the final page as possible.
  2709. //
  2710. NewSizeInBytes = (PoolTrackTableExpansionPages + 1) << PAGE_SHIFT;
  2711. NewSize = NewSizeInBytes / sizeof (POOL_TRACKER_TABLE);
  2712. NewSizeInBytes = NewSize * sizeof(POOL_TRACKER_TABLE);
  2713. NewTable = MiAllocatePoolPages (NonPagedPool, NewSizeInBytes);
  2714. if (NewTable != NULL) {
  2715. if (PoolTrackTableExpansion != NULL) {
  2716. //
  2717. // Copy all the existing entries into the new table.
  2718. //
  2719. RtlCopyMemory (NewTable,
  2720. PoolTrackTableExpansion,
  2721. SizeInBytes);
  2722. }
  2723. RtlZeroMemory ((PVOID)(NewTable + PoolTrackTableExpansionSize),
  2724. NewSizeInBytes - SizeInBytes);
  2725. OldTable = PoolTrackTableExpansion;
  2726. PoolTrackTableExpansion = NewTable;
  2727. PoolTrackTableExpansionSize = NewSize;
  2728. PoolTrackTableExpansionPages += 1;
  2729. //
  2730. // Recursively call ourself to insert the new table entry. This entry
  2731. // must be inserted before releasing the tagged spinlock because
  2732. // another thread may be further growing the table and as soon as we
  2733. // release the spinlock, that thread may grow and try to free our
  2734. // new table !
  2735. //
  2736. ExpInsertPoolTracker ('looP',
  2737. PoolTrackTableExpansionPages << PAGE_SHIFT,
  2738. NonPagedPool);
  2739. ExReleaseSpinLock (&ExpTaggedPoolLock, OldIrql);
  2740. //
  2741. // Free the old table if there was one.
  2742. //
  2743. if (OldTable != NULL) {
  2744. BigPages = MiFreePoolPages (OldTable);
  2745. ExpRemovePoolTracker ('looP',
  2746. (SIZE_T) BigPages * PAGE_SIZE,
  2747. NonPagedPool);
  2748. }
  2749. //
  2750. // Finally insert the caller's original allocation.
  2751. //
  2752. ExpInsertPoolTrackerExpansion (Key, NumberOfBytes, PoolType);
  2753. }
  2754. else {
  2755. //
  2756. // Use the very last entry as a bit bucket for overflows.
  2757. //
  2758. Hash = (ULONG)PoolTrackTableSize - 1;
  2759. ASSERT (PoolTrackTable[Hash].Key == 0);
  2760. PoolTrackTable[Hash].Key = 'lfvO';
  2761. ExReleaseSpinLock (&ExpTaggedPoolLock, OldIrql);
  2762. //
  2763. // Update the fields with interlocked operations as other
  2764. // threads may also have begun doing so by this point.
  2765. //
  2766. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  2767. InterlockedIncrement ((PLONG) &PoolTrackTable[Hash].PagedAllocs);
  2768. InterlockedExchangeAddSizeT (&PoolTrackTable[Hash].PagedBytes,
  2769. NumberOfBytes);
  2770. }
  2771. else {
  2772. InterlockedIncrement ((PLONG) &PoolTrackTable[Hash].NonPagedAllocs);
  2773. InterlockedExchangeAddSizeT (&PoolTrackTable[Hash].NonPagedBytes,
  2774. NumberOfBytes);
  2775. }
  2776. }
  2777. return;
  2778. }
  2779. VOID
  2780. ExpInsertPoolTracker (
  2781. IN ULONG Key,
  2782. IN SIZE_T NumberOfBytes,
  2783. IN POOL_TYPE PoolType
  2784. )
  2785. {
  2786. ExpInsertPoolTrackerInline (Key, NumberOfBytes, PoolType);
  2787. }
  2788. VOID
  2789. ExpRemovePoolTrackerExpansion (
  2790. IN ULONG Key,
  2791. IN SIZE_T NumberOfBytes,
  2792. IN POOL_TYPE PoolType
  2793. )
  2794. /*++
  2795. Routine Description:
  2796. This function increments the number of frees and updates the total
  2797. allocation size in the expansion table.
  2798. Arguments:
  2799. Key - Supplies the key value used to locate a matching entry in the
  2800. tag table.
  2801. NumberOfBytes - Supplies the allocation size.
  2802. PoolType - Supplies the pool type.
  2803. Return Value:
  2804. None.
  2805. --*/
  2806. {
  2807. ULONG Hash;
  2808. KIRQL OldIrql;
  2809. PPOOL_TRACKER_TABLE TrackTable;
  2810. #if !defined (NT_UP)
  2811. ULONG Processor;
  2812. #endif
  2813. //
  2814. // The protected pool bit has already been stripped.
  2815. //
  2816. ASSERT ((Key & PROTECTED_POOL) == 0);
  2817. if (PoolType & SESSION_POOL_MASK) {
  2818. //
  2819. // This entry must have been charged to the overflow bucket.
  2820. // Update the pool tracker table entry for it.
  2821. //
  2822. Hash = (ULONG)ExpSessionPoolTrackTableSize - 1;
  2823. TrackTable = ExpSessionPoolTrackTable;
  2824. goto OverflowEntry;
  2825. }
  2826. //
  2827. // Linear search through the expansion table. This is ok because
  2828. // the existence of an expansion table at all is extremely rare.
  2829. //
  2830. ExAcquireSpinLock (&ExpTaggedPoolLock, &OldIrql);
  2831. for (Hash = 0; Hash < PoolTrackTableExpansionSize; Hash += 1) {
  2832. if (PoolTrackTableExpansion[Hash].Key == Key) {
  2833. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  2834. ASSERT (PoolTrackTableExpansion[Hash].PagedAllocs != 0);
  2835. ASSERT (PoolTrackTableExpansion[Hash].PagedAllocs >=
  2836. PoolTrackTableExpansion[Hash].PagedFrees);
  2837. ASSERT (PoolTrackTableExpansion[Hash].PagedBytes >= NumberOfBytes);
  2838. PoolTrackTableExpansion[Hash].PagedFrees += 1;
  2839. PoolTrackTableExpansion[Hash].PagedBytes -= NumberOfBytes;
  2840. }
  2841. else {
  2842. ASSERT (PoolTrackTableExpansion[Hash].NonPagedAllocs != 0);
  2843. ASSERT (PoolTrackTableExpansion[Hash].NonPagedAllocs >=
  2844. PoolTrackTableExpansion[Hash].NonPagedFrees);
  2845. ASSERT (PoolTrackTableExpansion[Hash].NonPagedBytes >= NumberOfBytes);
  2846. PoolTrackTableExpansion[Hash].NonPagedFrees += 1;
  2847. PoolTrackTableExpansion[Hash].NonPagedBytes -= NumberOfBytes;
  2848. }
  2849. ExReleaseSpinLock (&ExpTaggedPoolLock, OldIrql);
  2850. return;
  2851. }
  2852. if (PoolTrackTableExpansion[Hash].Key == 0) {
  2853. break;
  2854. }
  2855. }
  2856. ExReleaseSpinLock (&ExpTaggedPoolLock, OldIrql);
  2857. //
  2858. // This entry must have been charged to the overflow bucket.
  2859. // Update the pool tracker table entry for it.
  2860. //
  2861. Hash = (ULONG)PoolTrackTableSize - 1;
  2862. #if !defined (NT_UP)
  2863. //
  2864. // Use the current processor to pick a pool tag table to use. Note that
  2865. // in rare cases, this thread may context switch to another processor but
  2866. // the algorithms below will still be correct.
  2867. //
  2868. Processor = KeGetCurrentProcessorNumber ();
  2869. ASSERT (Processor < MAXIMUM_PROCESSOR_TAG_TABLES);
  2870. TrackTable = ExPoolTagTables[Processor];
  2871. #else
  2872. TrackTable = PoolTrackTable;
  2873. #endif
  2874. OverflowEntry:
  2875. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  2876. ASSERT (TrackTable[Hash].PagedAllocs != 0);
  2877. ASSERT (TrackTable[Hash].PagedBytes >= NumberOfBytes);
  2878. InterlockedIncrement ((PLONG) &TrackTable[Hash].PagedFrees);
  2879. InterlockedExchangeAddSizeT (&TrackTable[Hash].PagedBytes,
  2880. 0 - NumberOfBytes);
  2881. }
  2882. else {
  2883. ASSERT (TrackTable[Hash].NonPagedAllocs != 0);
  2884. ASSERT (TrackTable[Hash].NonPagedBytes >= NumberOfBytes);
  2885. InterlockedIncrement ((PLONG) &TrackTable[Hash].NonPagedFrees);
  2886. InterlockedExchangeAddSizeT (&TrackTable[Hash].NonPagedBytes,
  2887. 0 - NumberOfBytes);
  2888. }
  2889. return;
  2890. }
  2891. VOID
  2892. ExpRemovePoolTracker (
  2893. IN ULONG Key,
  2894. IN SIZE_T NumberOfBytes,
  2895. IN POOL_TYPE PoolType
  2896. )
  2897. {
  2898. ExpRemovePoolTrackerInline (Key, NumberOfBytes, PoolType);
  2899. }
  2900. LOGICAL
  2901. ExpAddTagForBigPages (
  2902. IN PVOID Va,
  2903. IN ULONG Key,
  2904. IN ULONG NumberOfPages,
  2905. IN POOL_TYPE PoolType
  2906. )
  2907. /*++
  2908. Routine Description:
  2909. This function inserts a pool tag in the big page tag table.
  2910. Arguments:
  2911. Va - Supplies the allocated virtual address.
  2912. Key - Supplies the key value used to locate a matching entry in the
  2913. tag table.
  2914. NumberOfPages - Supplies the number of pages that were allocated.
  2915. PoolType - Supplies the type of the pool.
  2916. Return Value:
  2917. TRUE if an entry was allocated, FALSE if not.
  2918. Environment:
  2919. No pool locks held so the table may be freely expanded here as needed.
  2920. --*/
  2921. {
  2922. ULONG i;
  2923. ULONG Hash;
  2924. PVOID OldVa;
  2925. ULONG BigPages;
  2926. PVOID OldTable;
  2927. LOGICAL Inserted;
  2928. KIRQL OldIrql;
  2929. SIZE_T SizeInBytes;
  2930. SIZE_T NewSizeInBytes;
  2931. PPOOL_TRACKER_BIG_PAGES NewTable;
  2932. PPOOL_TRACKER_BIG_PAGES p;
  2933. //
  2934. // The low bit of the address is set to indicate a free entry. The high
  2935. // bit cannot be used because in some configurations the high bit is not
  2936. // set for all kernelmode addresses.
  2937. //
  2938. ASSERT (((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
  2939. if (PoolType & SESSION_POOL_MASK) {
  2940. Hash = (ULONG)(((ULONG_PTR)Va >> PAGE_SHIFT) & ExpSessionPoolBigPageTableHash);
  2941. i = Hash;
  2942. do {
  2943. OldVa = ExpSessionPoolBigPageTable[Hash].Va;
  2944. if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
  2945. (InterlockedCompareExchangePointer (
  2946. &ExpSessionPoolBigPageTable[Hash].Va,
  2947. Va,
  2948. OldVa) == OldVa)) {
  2949. ExpSessionPoolBigPageTable[Hash].Key = Key;
  2950. ExpSessionPoolBigPageTable[Hash].NumberOfPages = NumberOfPages;
  2951. return TRUE;
  2952. }
  2953. Hash += 1;
  2954. if (Hash >= ExpSessionPoolBigPageTableSize) {
  2955. Hash = 0;
  2956. }
  2957. } while (Hash != i);
  2958. #if DBG
  2959. ExpLargeSessionPoolUnTracked += 1;
  2960. #endif
  2961. return FALSE;
  2962. }
  2963. retry:
  2964. Inserted = TRUE;
  2965. Hash = (ULONG)(((ULONG_PTR)Va >> PAGE_SHIFT) & PoolBigPageTableHash);
  2966. ExAcquireSpinLock (&ExpTaggedPoolLock, &OldIrql);
  2967. while (((ULONG_PTR)PoolBigPageTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE) == 0) {
  2968. Hash += 1;
  2969. if (Hash >= PoolBigPageTableSize) {
  2970. if (!Inserted) {
  2971. //
  2972. // Try to expand the tracker table.
  2973. //
  2974. SizeInBytes = PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES);
  2975. NewSizeInBytes = (SizeInBytes << 1);
  2976. if (NewSizeInBytes > SizeInBytes) {
  2977. NewTable = MiAllocatePoolPages (NonPagedPool,
  2978. NewSizeInBytes);
  2979. if (NewTable != NULL) {
  2980. OldTable = (PVOID)PoolBigPageTable;
  2981. RtlCopyMemory ((PVOID)NewTable,
  2982. OldTable,
  2983. SizeInBytes);
  2984. RtlZeroMemory ((PVOID)(NewTable + PoolBigPageTableSize),
  2985. NewSizeInBytes - SizeInBytes);
  2986. //
  2987. // Mark all the new entries as free. Note this loop
  2988. // uses the fact that the table size always doubles.
  2989. //
  2990. i = (ULONG)PoolBigPageTableSize;
  2991. p = &NewTable[i];
  2992. for (i = 0; i < PoolBigPageTableSize; i += 1, p += 1) {
  2993. p->Va = (PVOID) POOL_BIG_TABLE_ENTRY_FREE;
  2994. }
  2995. PoolBigPageTable = NewTable;
  2996. PoolBigPageTableSize <<= 1;
  2997. PoolBigPageTableHash = PoolBigPageTableSize - 1;
  2998. ExReleaseSpinLock (&ExpTaggedPoolLock, OldIrql);
  2999. BigPages = MiFreePoolPages (OldTable);
  3000. ExpRemovePoolTracker ('looP',
  3001. (SIZE_T) BigPages * PAGE_SIZE,
  3002. NonPagedPool);
  3003. ExpInsertPoolTracker ('looP',
  3004. ROUND_TO_PAGES(NewSizeInBytes),
  3005. NonPagedPool);
  3006. goto retry;
  3007. }
  3008. }
  3009. if (!FirstPrint) {
  3010. KdPrint(("POOL:unable to insert big page slot %p\n",Key));
  3011. FirstPrint = TRUE;
  3012. }
  3013. ExReleaseSpinLock (&ExpTaggedPoolLock, OldIrql);
  3014. return FALSE;
  3015. }
  3016. Hash = 0;
  3017. Inserted = FALSE;
  3018. }
  3019. }
  3020. p = &PoolBigPageTable[Hash];
  3021. ASSERT (((ULONG_PTR)p->Va & POOL_BIG_TABLE_ENTRY_FREE) != 0);
  3022. ASSERT (((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
  3023. p->Va = Va;
  3024. p->Key = Key;
  3025. p->NumberOfPages = NumberOfPages;
  3026. ExReleaseSpinLock (&ExpTaggedPoolLock, OldIrql);
  3027. return TRUE;
  3028. }
  3029. ULONG
  3030. ExpFindAndRemoveTagBigPages (
  3031. IN PVOID Va,
  3032. OUT PULONG BigPages,
  3033. IN POOL_TYPE PoolType
  3034. )
  3035. /*++
  3036. Routine Description:
  3037. This function removes a pool tag from the big page tag table.
  3038. Arguments:
  3039. Va - Supplies the allocated virtual address.
  3040. BigPages - Returns the number of pages that were allocated.
  3041. PoolType - Supplies the type of the pool.
  3042. Return Value:
  3043. TRUE if an entry was found and removed, FALSE if not.
  3044. Environment:
  3045. No pool locks held so the table may be freely expanded here as needed.
  3046. --*/
  3047. {
  3048. ULONG Hash;
  3049. LOGICAL Inserted;
  3050. KIRQL OldIrql;
  3051. ULONG ReturnKey;
  3052. ASSERT (((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
  3053. if (PoolType & SESSION_POOL_MASK) {
  3054. Hash = (ULONG)(((ULONG_PTR)Va >> PAGE_SHIFT) & ExpSessionPoolBigPageTableHash);
  3055. ReturnKey = Hash;
  3056. do {
  3057. if (ExpSessionPoolBigPageTable[Hash].Va == Va) {
  3058. *BigPages = ExpSessionPoolBigPageTable[Hash].NumberOfPages;
  3059. ReturnKey = ExpSessionPoolBigPageTable[Hash].Key;
  3060. InterlockedOr ((PLONG) &ExpSessionPoolBigPageTable[Hash].Va,
  3061. POOL_BIG_TABLE_ENTRY_FREE);
  3062. return ReturnKey;
  3063. }
  3064. Hash += 1;
  3065. if (Hash >= ExpSessionPoolBigPageTableSize) {
  3066. Hash = 0;
  3067. }
  3068. } while (Hash != ReturnKey);
  3069. *BigPages = 0;
  3070. return ' GIB';
  3071. }
  3072. Inserted = TRUE;
  3073. Hash = (ULONG)(((ULONG_PTR)Va >> PAGE_SHIFT) & PoolBigPageTableHash);
  3074. ExAcquireSpinLock (&ExpTaggedPoolLock, &OldIrql);
  3075. while (PoolBigPageTable[Hash].Va != Va) {
  3076. Hash += 1;
  3077. if (Hash >= PoolBigPageTableSize) {
  3078. if (!Inserted) {
  3079. if (!FirstPrint) {
  3080. KdPrint(("POOL:unable to find big page slot %p\n",Va));
  3081. FirstPrint = TRUE;
  3082. }
  3083. ExReleaseSpinLock (&ExpTaggedPoolLock, OldIrql);
  3084. *BigPages = 0;
  3085. return ' GIB';
  3086. }
  3087. Hash = 0;
  3088. Inserted = FALSE;
  3089. }
  3090. }
  3091. PoolBigPageTable[Hash].Va =
  3092. (PVOID)((ULONG_PTR)PoolBigPageTable[Hash].Va | POOL_BIG_TABLE_ENTRY_FREE);
  3093. *BigPages = PoolBigPageTable[Hash].NumberOfPages;
  3094. ReturnKey = PoolBigPageTable[Hash].Key;
  3095. ExReleaseSpinLock (&ExpTaggedPoolLock, OldIrql);
  3096. return ReturnKey;
  3097. }
  3098. VOID
  3099. ExFreePoolWithTag (
  3100. IN PVOID P,
  3101. IN ULONG TagToFree
  3102. )
  3103. /*++
  3104. Routine Description:
  3105. This function deallocates a block of pool. This function is used to
  3106. deallocate to both the page aligned pools and the buddy (less than
  3107. a page) pools.
  3108. If the address of the block being deallocated is page-aligned, then
  3109. the page-aligned pool deallocator is used.
  3110. Otherwise, the binary buddy pool deallocator is used. Deallocation
  3111. looks at the allocated block's pool header to determine the pool
  3112. type and block size being deallocated. If the pool was allocated
  3113. using ExAllocatePoolWithQuota, then after the deallocation is
  3114. complete, the appropriate process's pool quota is adjusted to reflect
  3115. the deallocation, and the process object is dereferenced.
  3116. Arguments:
  3117. P - Supplies the address of the block of pool being deallocated.
  3118. TagToFree - Supplies the tag of the block being freed.
  3119. Return Value:
  3120. None.
  3121. --*/
  3122. {
  3123. PVOID OldValue;
  3124. POOL_TYPE CheckType;
  3125. PPOOL_HEADER Entry;
  3126. ULONG BlockSize;
  3127. KLOCK_QUEUE_HANDLE LockHandle;
  3128. PPOOL_HEADER NextEntry;
  3129. POOL_TYPE PoolType;
  3130. POOL_TYPE EntryPoolType;
  3131. PPOOL_DESCRIPTOR PoolDesc;
  3132. PEPROCESS ProcessBilled;
  3133. LOGICAL Combined;
  3134. ULONG BigPages;
  3135. ULONG BigPages2;
  3136. SIZE_T NumberOfBytes;
  3137. ULONG Tag;
  3138. PKPRCB Prcb;
  3139. PGENERAL_LOOKASIDE LookasideList;
  3140. PERFINFO_FREEPOOL(P);
  3141. //
  3142. // Initializing LockHandle is not needed for correctness but without
  3143. // it the compiler cannot compile this code W4 to check for use of
  3144. // uninitialized variables.
  3145. //
  3146. LockHandle.OldIrql = 0;
  3147. if (ExpPoolFlags & (EX_CHECK_POOL_FREES_FOR_ACTIVE_TIMERS |
  3148. EX_CHECK_POOL_FREES_FOR_ACTIVE_WORKERS |
  3149. EX_CHECK_POOL_FREES_FOR_ACTIVE_RESOURCES |
  3150. EX_KERNEL_VERIFIER_ENABLED |
  3151. EX_VERIFIER_DEADLOCK_DETECTION_ENABLED |
  3152. EX_SPECIAL_POOL_ENABLED)) {
  3153. if (ExpPoolFlags & EX_SPECIAL_POOL_ENABLED) {
  3154. //
  3155. // Log all pool frees in this mode.
  3156. //
  3157. ULONG Hash;
  3158. ULONG Index;
  3159. LOGICAL SpecialPool;
  3160. PEX_FREE_POOL_TRACES Information;
  3161. SpecialPool = MmIsSpecialPoolAddress (P);
  3162. if (ExFreePoolTraces != NULL) {
  3163. Index = InterlockedIncrement (&ExFreePoolIndex);
  3164. Index &= ExFreePoolMask;
  3165. Information = &ExFreePoolTraces[Index];
  3166. Information->Thread = PsGetCurrentThread ();
  3167. Information->PoolAddress = P;
  3168. if (SpecialPool == TRUE) {
  3169. Information->PoolHeader = *(PPOOL_HEADER) PAGE_ALIGN (P);
  3170. }
  3171. else if (!PAGE_ALIGNED(P)) {
  3172. Entry = (PPOOL_HEADER)((PCHAR)P - POOL_OVERHEAD);
  3173. Information->PoolHeader = *Entry;
  3174. }
  3175. else {
  3176. RtlZeroMemory (&Information->PoolHeader,
  3177. sizeof (POOL_HEADER));
  3178. Information->PoolHeader.Ulong1 = MmGetSizeOfBigPoolAllocation (P);
  3179. }
  3180. RtlZeroMemory (&Information->StackTrace[0],
  3181. EX_FREE_POOL_BACKTRACE_LENGTH * sizeof(PVOID));
  3182. RtlCaptureStackBackTrace (1,
  3183. EX_FREE_POOL_BACKTRACE_LENGTH,
  3184. Information->StackTrace,
  3185. &Hash);
  3186. }
  3187. if (SpecialPool == TRUE) {
  3188. if (ExpPoolFlags & EX_VERIFIER_DEADLOCK_DETECTION_ENABLED) {
  3189. VerifierDeadlockFreePool (P, PAGE_SIZE);
  3190. }
  3191. MmFreeSpecialPool (P);
  3192. return;
  3193. }
  3194. }
  3195. if (!PAGE_ALIGNED(P)) {
  3196. Entry = (PPOOL_HEADER)((PCHAR)P - POOL_OVERHEAD);
  3197. ASSERT_POOL_NOT_FREE(Entry);
  3198. PoolType = (Entry->PoolType & POOL_TYPE_MASK) - 1;
  3199. CheckType = PoolType & BASE_POOL_TYPE_MASK;
  3200. ASSERT_FREE_IRQL(PoolType, P);
  3201. ASSERT_POOL_TYPE_NOT_ZERO(Entry);
  3202. if (!IS_POOL_HEADER_MARKED_ALLOCATED(Entry)) {
  3203. KeBugCheckEx (BAD_POOL_CALLER,
  3204. 7,
  3205. __LINE__,
  3206. (ULONG_PTR)Entry->Ulong1,
  3207. (ULONG_PTR)P);
  3208. }
  3209. NumberOfBytes = (SIZE_T)Entry->BlockSize << POOL_BLOCK_SHIFT;
  3210. if (ExpPoolFlags & EX_VERIFIER_DEADLOCK_DETECTION_ENABLED) {
  3211. VerifierDeadlockFreePool (P, NumberOfBytes - POOL_OVERHEAD);
  3212. }
  3213. if (Entry->PoolType & POOL_VERIFIER_MASK) {
  3214. VerifierFreeTrackedPool (P,
  3215. NumberOfBytes,
  3216. CheckType,
  3217. FALSE);
  3218. }
  3219. //
  3220. // Check if an ERESOURCE is currently active in this memory block.
  3221. //
  3222. FREE_CHECK_ERESOURCE (Entry, NumberOfBytes);
  3223. //
  3224. // Check if a KTIMER is currently active in this memory block.
  3225. //
  3226. FREE_CHECK_KTIMER (Entry, NumberOfBytes);
  3227. //
  3228. // Look for work items still queued.
  3229. //
  3230. FREE_CHECK_WORKER (Entry, NumberOfBytes);
  3231. }
  3232. }
  3233. //
  3234. // If the entry is page aligned, then free the block to the page aligned
  3235. // pool. Otherwise, free the block to the allocation lists.
  3236. //
  3237. if (PAGE_ALIGNED(P)) {
  3238. PoolType = MmDeterminePoolType (P);
  3239. ASSERT_FREE_IRQL(PoolType, P);
  3240. CheckType = PoolType & BASE_POOL_TYPE_MASK;
  3241. if (PoolType == PagedPoolSession) {
  3242. PoolDesc = ExpSessionPoolDescriptor;
  3243. }
  3244. else {
  3245. PoolDesc = PoolVector[PoolType];
  3246. if (CheckType == NonPagedPool) {
  3247. if (MiIsPoolLargeSession (P) == TRUE) {
  3248. PoolDesc = ExpSessionPoolDescriptor;
  3249. PoolType = NonPagedPoolSession;
  3250. }
  3251. }
  3252. }
  3253. Tag = ExpFindAndRemoveTagBigPages (P, &BigPages, PoolType);
  3254. if (BigPages == 0) {
  3255. //
  3256. // This means the allocator wasn't able to insert this
  3257. // entry into the big page tag table. This allocation must
  3258. // have been re-tagged as BIG at the time, our problem here
  3259. // is that we don't know the size (or the real original tag).
  3260. //
  3261. // Ask Mm directly for the size.
  3262. //
  3263. BigPages = MmGetSizeOfBigPoolAllocation (P);
  3264. ASSERT (BigPages != 0);
  3265. ASSERT (Tag == ' GIB');
  3266. }
  3267. else if (Tag & PROTECTED_POOL) {
  3268. Tag &= ~PROTECTED_POOL;
  3269. TagToFree &= ~PROTECTED_POOL;
  3270. if (Tag != TagToFree) {
  3271. KeBugCheckEx (BAD_POOL_CALLER,
  3272. 0xA,
  3273. (ULONG_PTR)P,
  3274. Tag,
  3275. TagToFree);
  3276. }
  3277. }
  3278. NumberOfBytes = (SIZE_T)BigPages << PAGE_SHIFT;
  3279. ExpRemovePoolTracker (Tag, NumberOfBytes, PoolType);
  3280. if (ExpPoolFlags & (EX_CHECK_POOL_FREES_FOR_ACTIVE_TIMERS |
  3281. EX_CHECK_POOL_FREES_FOR_ACTIVE_WORKERS |
  3282. EX_CHECK_POOL_FREES_FOR_ACTIVE_RESOURCES |
  3283. EX_VERIFIER_DEADLOCK_DETECTION_ENABLED)) {
  3284. if (ExpPoolFlags & EX_VERIFIER_DEADLOCK_DETECTION_ENABLED) {
  3285. VerifierDeadlockFreePool (P, NumberOfBytes);
  3286. }
  3287. //
  3288. // Check if an ERESOURCE is currently active in this memory block.
  3289. //
  3290. FREE_CHECK_ERESOURCE (P, NumberOfBytes);
  3291. //
  3292. // Check if a KTIMER is currently active in this memory block.
  3293. //
  3294. FREE_CHECK_KTIMER (P, NumberOfBytes);
  3295. //
  3296. // Search worker queues for work items still queued.
  3297. //
  3298. FREE_CHECK_WORKER (P, NumberOfBytes);
  3299. }
  3300. InterlockedIncrement ((PLONG)&PoolDesc->RunningDeAllocs);
  3301. InterlockedExchangeAddSizeT (&PoolDesc->TotalBytes, 0 - NumberOfBytes);
  3302. BigPages2 = MiFreePoolPages (P);
  3303. ASSERT (BigPages == BigPages2);
  3304. InterlockedExchangeAdd ((PLONG)&PoolDesc->TotalBigPages, (LONG)(0 - BigPages2));
  3305. return;
  3306. }
  3307. //
  3308. // Align the entry address to a pool allocation boundary.
  3309. //
  3310. Entry = (PPOOL_HEADER)((PCHAR)P - POOL_OVERHEAD);
  3311. BlockSize = Entry->BlockSize;
  3312. EntryPoolType = Entry->PoolType;
  3313. PoolType = (Entry->PoolType & POOL_TYPE_MASK) - 1;
  3314. CheckType = PoolType & BASE_POOL_TYPE_MASK;
  3315. ASSERT_POOL_NOT_FREE (Entry);
  3316. ASSERT_FREE_IRQL (PoolType, P);
  3317. ASSERT_POOL_TYPE_NOT_ZERO (Entry);
  3318. if (!IS_POOL_HEADER_MARKED_ALLOCATED(Entry)) {
  3319. KeBugCheckEx (BAD_POOL_CALLER,
  3320. 7,
  3321. __LINE__,
  3322. (ULONG_PTR)Entry->Ulong1,
  3323. (ULONG_PTR)P);
  3324. }
  3325. Tag = Entry->PoolTag;
  3326. if (Tag & PROTECTED_POOL) {
  3327. Tag &= ~PROTECTED_POOL;
  3328. TagToFree &= ~PROTECTED_POOL;
  3329. if (Tag != TagToFree) {
  3330. KeBugCheckEx (BAD_POOL_CALLER,
  3331. 0xA,
  3332. (ULONG_PTR)P,
  3333. Tag,
  3334. TagToFree);
  3335. }
  3336. }
  3337. PoolDesc = PoolVector[CheckType];
  3338. MARK_POOL_HEADER_FREED (Entry);
  3339. if (EntryPoolType & SESSION_POOL_MASK) {
  3340. if (CheckType == PagedPool) {
  3341. PoolDesc = ExpSessionPoolDescriptor;
  3342. }
  3343. else if (ExpNumberOfNonPagedPools > 1) {
  3344. PoolDesc = ExpNonPagedPoolDescriptor[DECODE_POOL_INDEX(Entry)];
  3345. }
  3346. //
  3347. // All session space allocations have an index of 0 unless there
  3348. // are multiple nonpaged (session) pools.
  3349. //
  3350. ASSERT ((DECODE_POOL_INDEX(Entry) == 0) || (ExpNumberOfNonPagedPools > 1));
  3351. }
  3352. else {
  3353. if (CheckType == PagedPool) {
  3354. ASSERT (DECODE_POOL_INDEX(Entry) <= ExpNumberOfPagedPools);
  3355. PoolDesc = ExpPagedPoolDescriptor[DECODE_POOL_INDEX(Entry)];
  3356. }
  3357. else {
  3358. ASSERT ((DECODE_POOL_INDEX(Entry) == 0) || (ExpNumberOfNonPagedPools > 1));
  3359. if (ExpNumberOfNonPagedPools > 1) {
  3360. PoolDesc = ExpNonPagedPoolDescriptor[DECODE_POOL_INDEX(Entry)];
  3361. }
  3362. }
  3363. }
  3364. //
  3365. // Update the pool tracking database.
  3366. //
  3367. ExpRemovePoolTrackerInline (Tag,
  3368. BlockSize << POOL_BLOCK_SHIFT,
  3369. EntryPoolType - 1);
  3370. //
  3371. // If quota was charged when the pool was allocated, release it now.
  3372. //
  3373. if (EntryPoolType & POOL_QUOTA_MASK) {
  3374. ProcessBilled = ExpGetBilledProcess (Entry);
  3375. if (ProcessBilled != NULL) {
  3376. ASSERT_KPROCESS(ProcessBilled);
  3377. PsReturnPoolQuota (ProcessBilled,
  3378. PoolType & BASE_POOL_TYPE_MASK,
  3379. BlockSize << POOL_BLOCK_SHIFT);
  3380. if (((PKPROCESS)(ProcessBilled))->Header.Type != ProcessObject) {
  3381. KeBugCheckEx (BAD_POOL_CALLER,
  3382. 0xB,
  3383. (ULONG_PTR)P,
  3384. Tag,
  3385. (ULONG_PTR)ProcessBilled);
  3386. }
  3387. ObDereferenceObject (ProcessBilled);
  3388. #if DBG
  3389. InterlockedDecrement (&ExConcurrentQuotaPool);
  3390. #endif
  3391. }
  3392. }
  3393. //
  3394. // If the pool block is a small block, then attempt to free the block
  3395. // to the single entry lookaside list. If the free attempt fails, then
  3396. // free the block by merging it back into the pool data structures.
  3397. //
  3398. if (((EntryPoolType & SESSION_POOL_MASK) == 0) ||
  3399. (CheckType == NonPagedPool)) {
  3400. if ((BlockSize <= POOL_SMALL_LISTS) && (USING_HOT_COLD_METRICS == 0)) {
  3401. //
  3402. // Try to free the small block to a per processor lookaside list.
  3403. //
  3404. Prcb = KeGetCurrentPrcb ();
  3405. if (CheckType == PagedPool) {
  3406. //
  3407. // Prototype pool is never put on general lookaside lists
  3408. // due to the sharecounts applied on these allocations when
  3409. // they are in use (ie: the rest of this page is comprised of
  3410. // prototype allocations even though this allocation is being
  3411. // freed). Pages containing prototype allocations are much
  3412. // more difficult for memory management to trim (unlike the
  3413. // rest of paged pool) due to the sharecounts generally applied.
  3414. //
  3415. if (PoolDesc->PoolIndex == 0) {
  3416. goto NoLookaside;
  3417. }
  3418. //
  3419. // Only free the small block to the current processor's
  3420. // lookaside list if the block is local to this node.
  3421. //
  3422. if (KeNumberNodes > 1) {
  3423. if (Prcb->ParentNode->Color != PoolDesc->PoolIndex - 1) {
  3424. goto NoLookaside;
  3425. }
  3426. }
  3427. LookasideList = Prcb->PPPagedLookasideList[BlockSize - 1].P;
  3428. LookasideList->TotalFrees += 1;
  3429. if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth) {
  3430. LookasideList->FreeHits += 1;
  3431. InterlockedPushEntrySList (&LookasideList->ListHead,
  3432. (PSLIST_ENTRY)P);
  3433. return;
  3434. }
  3435. LookasideList = Prcb->PPPagedLookasideList[BlockSize - 1].L;
  3436. LookasideList->TotalFrees += 1;
  3437. if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth) {
  3438. LookasideList->FreeHits += 1;
  3439. InterlockedPushEntrySList (&LookasideList->ListHead,
  3440. (PSLIST_ENTRY)P);
  3441. return;
  3442. }
  3443. }
  3444. else {
  3445. //
  3446. // Only free the small block to the current processor's
  3447. // lookaside list if the block is local to this node.
  3448. //
  3449. if (KeNumberNodes > 1) {
  3450. if (Prcb->ParentNode->Color != PoolDesc->PoolIndex) {
  3451. goto NoLookaside;
  3452. }
  3453. }
  3454. LookasideList = Prcb->PPNPagedLookasideList[BlockSize - 1].P;
  3455. LookasideList->TotalFrees += 1;
  3456. if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth) {
  3457. LookasideList->FreeHits += 1;
  3458. InterlockedPushEntrySList (&LookasideList->ListHead,
  3459. (PSLIST_ENTRY)P);
  3460. return;
  3461. }
  3462. LookasideList = Prcb->PPNPagedLookasideList[BlockSize - 1].L;
  3463. LookasideList->TotalFrees += 1;
  3464. if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth) {
  3465. LookasideList->FreeHits += 1;
  3466. InterlockedPushEntrySList (&LookasideList->ListHead,
  3467. (PSLIST_ENTRY)P);
  3468. return;
  3469. }
  3470. }
  3471. }
  3472. }
  3473. else {
  3474. if (BlockSize <= ExpSessionPoolSmallLists) {
  3475. //
  3476. // Attempt to free the small block to the session lookaside list.
  3477. //
  3478. LookasideList = (PGENERAL_LOOKASIDE)(ULONG_PTR)(ExpSessionPoolLookaside + BlockSize - 1);
  3479. LookasideList->TotalFrees += 1;
  3480. if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth) {
  3481. LookasideList->FreeHits += 1;
  3482. InterlockedPushEntrySList (&LookasideList->ListHead,
  3483. (PSLIST_ENTRY)P);
  3484. return;
  3485. }
  3486. }
  3487. }
  3488. NoLookaside:
  3489. //
  3490. // If the pool block release can be queued so the pool mutex/spinlock
  3491. // acquisition/release can be amortized then do so. Note "hot" blocks
  3492. // are generally in the lookasides above to provide fast reuse to take
  3493. // advantage of hardware caching.
  3494. //
  3495. if (ExpPoolFlags & EX_DELAY_POOL_FREES) {
  3496. if (PoolDesc->PendingFreeDepth >= EXP_MAXIMUM_POOL_FREES_PENDING) {
  3497. ExDeferredFreePool (PoolDesc);
  3498. }
  3499. //
  3500. // Push this entry on the deferred list.
  3501. //
  3502. do {
  3503. OldValue = PoolDesc->PendingFrees;
  3504. ((PSINGLE_LIST_ENTRY)P)->Next = OldValue;
  3505. } while (InterlockedCompareExchangePointer (
  3506. &PoolDesc->PendingFrees,
  3507. P,
  3508. OldValue) != OldValue);
  3509. InterlockedIncrement (&PoolDesc->PendingFreeDepth);
  3510. return;
  3511. }
  3512. Combined = FALSE;
  3513. ASSERT (BlockSize == Entry->BlockSize);
  3514. NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + BlockSize);
  3515. InterlockedIncrement ((PLONG)&PoolDesc->RunningDeAllocs);
  3516. InterlockedExchangeAddSizeT (&PoolDesc->TotalBytes, 0 - ((SIZE_T)BlockSize << POOL_BLOCK_SHIFT));
  3517. LOCK_POOL (PoolDesc, LockHandle);
  3518. CHECK_POOL_PAGE (Entry);
  3519. //
  3520. // Free the specified pool block.
  3521. //
  3522. // Check to see if the next entry is free.
  3523. //
  3524. if (PAGE_END(NextEntry) == FALSE) {
  3525. if (NextEntry->PoolType == 0) {
  3526. //
  3527. // This block is free, combine with the released block.
  3528. //
  3529. Combined = TRUE;
  3530. //
  3531. // If the split pool block contains only a header, then
  3532. // it was not inserted and therefore cannot be removed.
  3533. //
  3534. // Note if the minimum pool block size is bigger than the
  3535. // header then there can be no blocks like this.
  3536. //
  3537. if ((POOL_OVERHEAD != POOL_SMALLEST_BLOCK) ||
  3538. (NextEntry->BlockSize != 1)) {
  3539. CHECK_LIST(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD)));
  3540. PrivateRemoveEntryList(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD)));
  3541. CHECK_LIST(DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Flink));
  3542. CHECK_LIST(DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Blink));
  3543. }
  3544. Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
  3545. }
  3546. }
  3547. //
  3548. // Check to see if the previous entry is free.
  3549. //
  3550. if (Entry->PreviousSize != 0) {
  3551. NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry - Entry->PreviousSize);
  3552. if (NextEntry->PoolType == 0) {
  3553. //
  3554. // This block is free, combine with the released block.
  3555. //
  3556. Combined = TRUE;
  3557. //
  3558. // If the split pool block contains only a header, then
  3559. // it was not inserted and therefore cannot be removed.
  3560. //
  3561. // Note if the minimum pool block size is bigger than the
  3562. // header then there can be no blocks like this.
  3563. //
  3564. if ((POOL_OVERHEAD != POOL_SMALLEST_BLOCK) ||
  3565. (NextEntry->BlockSize != 1)) {
  3566. CHECK_LIST(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD)));
  3567. PrivateRemoveEntryList(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD)));
  3568. CHECK_LIST(DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Flink));
  3569. CHECK_LIST(DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Blink));
  3570. }
  3571. NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
  3572. Entry = NextEntry;
  3573. }
  3574. }
  3575. //
  3576. // If the block being freed has been combined into a full page,
  3577. // then return the free page to memory management.
  3578. //
  3579. if (PAGE_ALIGNED(Entry) &&
  3580. (PAGE_END((PPOOL_BLOCK)Entry + Entry->BlockSize) != FALSE)) {
  3581. UNLOCK_POOL (PoolDesc, LockHandle);
  3582. InterlockedExchangeAdd ((PLONG)&PoolDesc->TotalPages, (LONG)-1);
  3583. PERFINFO_FREEPOOLPAGE(CheckType, Entry->PoolIndex, Entry, PoolDesc);
  3584. MiFreePoolPages (Entry);
  3585. }
  3586. else {
  3587. //
  3588. // Insert this element into the list.
  3589. //
  3590. Entry->PoolType = 0;
  3591. BlockSize = Entry->BlockSize;
  3592. ASSERT (BlockSize != 1);
  3593. //
  3594. // If the freed block was combined with any other block, then
  3595. // adjust the size of the next block if necessary.
  3596. //
  3597. if (Combined != FALSE) {
  3598. //
  3599. // The size of this entry has changed, if this entry is
  3600. // not the last one in the page, update the pool block
  3601. // after this block to have a new previous allocation size.
  3602. //
  3603. NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + BlockSize);
  3604. if (PAGE_END(NextEntry) == FALSE) {
  3605. NextEntry->PreviousSize = (USHORT) BlockSize;
  3606. }
  3607. }
  3608. //
  3609. // Always insert at the head in hopes of reusing cache lines.
  3610. //
  3611. PrivateInsertHeadList (&PoolDesc->ListHeads[BlockSize - 1],
  3612. ((PLIST_ENTRY)((PCHAR)Entry + POOL_OVERHEAD)));
  3613. CHECK_LIST(((PLIST_ENTRY)((PCHAR)Entry + POOL_OVERHEAD)));
  3614. UNLOCK_POOL(PoolDesc, LockHandle);
  3615. }
  3616. }
  3617. VOID
  3618. ExFreePool (
  3619. IN PVOID P
  3620. )
  3621. {
  3622. ExFreePoolWithTag (P, 0);
  3623. return;
  3624. }
  3625. VOID
  3626. ExDeferredFreePool (
  3627. IN PPOOL_DESCRIPTOR PoolDesc
  3628. )
  3629. /*++
  3630. Routine Description:
  3631. This routine frees a number of pool allocations at once to amortize the
  3632. synchronization overhead cost.
  3633. Arguments:
  3634. PoolDesc - Supplies the relevant pool descriptor.
  3635. Return Value:
  3636. None.
  3637. Environment:
  3638. Kernel mode. May be as high as APC_LEVEL for paged pool or DISPATCH_LEVEL
  3639. for nonpaged pool.
  3640. --*/
  3641. {
  3642. LONG ListCount;
  3643. KLOCK_QUEUE_HANDLE LockHandle;
  3644. POOL_TYPE CheckType;
  3645. PPOOL_HEADER Entry;
  3646. ULONG Index;
  3647. ULONG WholePageCount;
  3648. PPOOL_HEADER NextEntry;
  3649. ULONG PoolIndex;
  3650. LOGICAL Combined;
  3651. PSINGLE_LIST_ENTRY SingleListEntry;
  3652. PSINGLE_LIST_ENTRY NextSingleListEntry;
  3653. PSINGLE_LIST_ENTRY FirstEntry;
  3654. PSINGLE_LIST_ENTRY LastEntry;
  3655. PSINGLE_LIST_ENTRY WholePages;
  3656. CheckType = PoolDesc->PoolType & BASE_POOL_TYPE_MASK;
  3657. //
  3658. // Initializing LockHandle is not needed for correctness but without
  3659. // it the compiler cannot compile this code W4 to check for use of
  3660. // uninitialized variables.
  3661. //
  3662. LockHandle.OldIrql = 0;
  3663. ListCount = 0;
  3664. WholePages = NULL;
  3665. WholePageCount = 0;
  3666. LastEntry = NULL;
  3667. LOCK_POOL (PoolDesc, LockHandle);
  3668. if (PoolDesc->PendingFrees == NULL) {
  3669. UNLOCK_POOL (PoolDesc, LockHandle);
  3670. return;
  3671. }
  3672. //
  3673. // Free each deferred pool entry until they're all done.
  3674. //
  3675. do {
  3676. SingleListEntry = PoolDesc->PendingFrees;
  3677. FirstEntry = SingleListEntry;
  3678. do {
  3679. NextSingleListEntry = SingleListEntry->Next;
  3680. //
  3681. // Process the deferred entry.
  3682. //
  3683. ListCount += 1;
  3684. Entry = (PPOOL_HEADER)((PCHAR)SingleListEntry - POOL_OVERHEAD);
  3685. PoolIndex = DECODE_POOL_INDEX(Entry);
  3686. //
  3687. // Process the block.
  3688. //
  3689. Combined = FALSE;
  3690. CHECK_POOL_PAGE (Entry);
  3691. InterlockedIncrement ((PLONG)&PoolDesc->RunningDeAllocs);
  3692. InterlockedExchangeAddSizeT (&PoolDesc->TotalBytes,
  3693. 0 - ((SIZE_T)Entry->BlockSize << POOL_BLOCK_SHIFT));
  3694. //
  3695. // Free the specified pool block.
  3696. //
  3697. // Check to see if the next entry is free.
  3698. //
  3699. NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + Entry->BlockSize);
  3700. if (PAGE_END(NextEntry) == FALSE) {
  3701. if (NextEntry->PoolType == 0) {
  3702. //
  3703. // This block is free, combine with the released block.
  3704. //
  3705. Combined = TRUE;
  3706. //
  3707. // If the split pool block contains only a header, then
  3708. // it was not inserted and therefore cannot be removed.
  3709. //
  3710. // Note if the minimum pool block size is bigger than the
  3711. // header then there can be no blocks like this.
  3712. //
  3713. if ((POOL_OVERHEAD != POOL_SMALLEST_BLOCK) ||
  3714. (NextEntry->BlockSize != 1)) {
  3715. CHECK_LIST(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD)));
  3716. PrivateRemoveEntryList(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD)));
  3717. CHECK_LIST(DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Flink));
  3718. CHECK_LIST(DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Blink));
  3719. }
  3720. Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
  3721. }
  3722. }
  3723. //
  3724. // Check to see if the previous entry is free.
  3725. //
  3726. if (Entry->PreviousSize != 0) {
  3727. NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry - Entry->PreviousSize);
  3728. if (NextEntry->PoolType == 0) {
  3729. //
  3730. // This block is free, combine with the released block.
  3731. //
  3732. Combined = TRUE;
  3733. //
  3734. // If the split pool block contains only a header, then
  3735. // it was not inserted and therefore cannot be removed.
  3736. //
  3737. // Note if the minimum pool block size is bigger than the
  3738. // header then there can be no blocks like this.
  3739. //
  3740. if ((POOL_OVERHEAD != POOL_SMALLEST_BLOCK) ||
  3741. (NextEntry->BlockSize != 1)) {
  3742. CHECK_LIST(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD)));
  3743. PrivateRemoveEntryList(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD)));
  3744. CHECK_LIST(DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Flink));
  3745. CHECK_LIST(DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Blink));
  3746. }
  3747. NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
  3748. Entry = NextEntry;
  3749. }
  3750. }
  3751. //
  3752. // If the block being freed has been combined into a full page,
  3753. // then return the free page to memory management.
  3754. //
  3755. if (PAGE_ALIGNED(Entry) &&
  3756. (PAGE_END((PPOOL_BLOCK)Entry + Entry->BlockSize) != FALSE)) {
  3757. ((PSINGLE_LIST_ENTRY)Entry)->Next = WholePages;
  3758. WholePages = (PSINGLE_LIST_ENTRY) Entry;
  3759. WholePageCount += 1;
  3760. }
  3761. else {
  3762. //
  3763. // Insert this element into the list.
  3764. //
  3765. Entry->PoolType = 0;
  3766. ENCODE_POOL_INDEX(Entry, PoolIndex);
  3767. Index = Entry->BlockSize;
  3768. ASSERT (Index != 1);
  3769. //
  3770. // If the freed block was combined with any other block, then
  3771. // adjust the size of the next block if necessary.
  3772. //
  3773. if (Combined != FALSE) {
  3774. //
  3775. // The size of this entry has changed, if this entry is
  3776. // not the last one in the page, update the pool block
  3777. // after this block to have a new previous allocation size.
  3778. //
  3779. NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + Index);
  3780. if (PAGE_END(NextEntry) == FALSE) {
  3781. NextEntry->PreviousSize = (USHORT) Index;
  3782. }
  3783. }
  3784. //
  3785. // Always insert at the head in hopes of reusing cache lines.
  3786. //
  3787. PrivateInsertHeadList(&PoolDesc->ListHeads[Index - 1], ((PLIST_ENTRY)((PCHAR)Entry + POOL_OVERHEAD)));
  3788. CHECK_LIST(((PLIST_ENTRY)((PCHAR)Entry + POOL_OVERHEAD)));
  3789. }
  3790. //
  3791. // March on to the next entry if there is one.
  3792. //
  3793. if (NextSingleListEntry == LastEntry) {
  3794. break;
  3795. }
  3796. SingleListEntry = NextSingleListEntry;
  3797. } while (TRUE);
  3798. if ((PoolDesc->PendingFrees == FirstEntry) &&
  3799. (InterlockedCompareExchangePointer (&PoolDesc->PendingFrees,
  3800. NULL,
  3801. FirstEntry) == FirstEntry)) {
  3802. break;
  3803. }
  3804. LastEntry = FirstEntry;
  3805. } while (TRUE);
  3806. UNLOCK_POOL (PoolDesc, LockHandle);
  3807. if (WholePages != NULL) {
  3808. //
  3809. // If the pool type is paged pool, then the global paged pool mutex
  3810. // must be held during the free of the pool pages. Hence any
  3811. // full pages were batched up and are now dealt with in one go.
  3812. //
  3813. Entry = (PPOOL_HEADER) WholePages;
  3814. InterlockedExchangeAdd ((PLONG)&PoolDesc->TotalPages, 0 - WholePageCount);
  3815. do {
  3816. NextEntry = (PPOOL_HEADER) (((PSINGLE_LIST_ENTRY)Entry)->Next);
  3817. PERFINFO_FREEPOOLPAGE(CheckType, PoolIndex, Entry, PoolDesc);
  3818. MiFreePoolPages (Entry);
  3819. Entry = NextEntry;
  3820. } while (Entry != NULL);
  3821. }
  3822. InterlockedExchangeAdd (&PoolDesc->PendingFreeDepth, (0 - ListCount));
  3823. return;
  3824. }
  3825. SIZE_T
  3826. ExQueryPoolBlockSize (
  3827. IN PVOID PoolBlock,
  3828. OUT PBOOLEAN QuotaCharged
  3829. )
  3830. /*++
  3831. Routine Description:
  3832. This function returns the size of the pool block.
  3833. Arguments:
  3834. PoolBlock - Supplies the address of the block of pool.
  3835. QuotaCharged - Supplies a BOOLEAN variable to receive whether or not the
  3836. pool block had quota charged.
  3837. NOTE: If the entry is bigger than a page, the value PAGE_SIZE is returned
  3838. rather than the correct number of bytes.
  3839. Return Value:
  3840. Size of pool block.
  3841. --*/
  3842. {
  3843. PPOOL_HEADER Entry;
  3844. SIZE_T size;
  3845. if ((ExpPoolFlags & EX_SPECIAL_POOL_ENABLED) &&
  3846. (MmIsSpecialPoolAddress (PoolBlock))) {
  3847. *QuotaCharged = FALSE;
  3848. return MmQuerySpecialPoolBlockSize (PoolBlock);
  3849. }
  3850. if (PAGE_ALIGNED(PoolBlock)) {
  3851. *QuotaCharged = FALSE;
  3852. return PAGE_SIZE;
  3853. }
  3854. Entry = (PPOOL_HEADER)((PCHAR)PoolBlock - POOL_OVERHEAD);
  3855. size = (ULONG)((Entry->BlockSize << POOL_BLOCK_SHIFT) - POOL_OVERHEAD);
  3856. if (ExpGetBilledProcess (Entry)) {
  3857. *QuotaCharged = TRUE;
  3858. }
  3859. else {
  3860. *QuotaCharged = FALSE;
  3861. }
  3862. return size;
  3863. }
  3864. VOID
  3865. ExQueryPoolUsage (
  3866. OUT PULONG PagedPoolPages,
  3867. OUT PULONG NonPagedPoolPages,
  3868. OUT PULONG PagedPoolAllocs,
  3869. OUT PULONG PagedPoolFrees,
  3870. OUT PULONG PagedPoolLookasideHits,
  3871. OUT PULONG NonPagedPoolAllocs,
  3872. OUT PULONG NonPagedPoolFrees,
  3873. OUT PULONG NonPagedPoolLookasideHits
  3874. )
  3875. {
  3876. ULONG Index;
  3877. PGENERAL_LOOKASIDE Lookaside;
  3878. PLIST_ENTRY NextEntry;
  3879. PPOOL_DESCRIPTOR pd;
  3880. //
  3881. // Sum all the paged pool usage.
  3882. //
  3883. *PagedPoolPages = 0;
  3884. *PagedPoolAllocs = 0;
  3885. *PagedPoolFrees = 0;
  3886. for (Index = 0; Index < ExpNumberOfPagedPools + 1; Index += 1) {
  3887. pd = ExpPagedPoolDescriptor[Index];
  3888. *PagedPoolPages += pd->TotalPages + pd->TotalBigPages;
  3889. *PagedPoolAllocs += pd->RunningAllocs;
  3890. *PagedPoolFrees += pd->RunningDeAllocs;
  3891. }
  3892. //
  3893. // Sum all the nonpaged pool usage.
  3894. //
  3895. pd = &NonPagedPoolDescriptor;
  3896. *NonPagedPoolPages = pd->TotalPages + pd->TotalBigPages;
  3897. *NonPagedPoolAllocs = pd->RunningAllocs;
  3898. *NonPagedPoolFrees = pd->RunningDeAllocs;
  3899. //
  3900. // Sum all the lookaside hits for paged and nonpaged pool.
  3901. //
  3902. NextEntry = ExPoolLookasideListHead.Flink;
  3903. while (NextEntry != &ExPoolLookasideListHead) {
  3904. Lookaside = CONTAINING_RECORD(NextEntry,
  3905. GENERAL_LOOKASIDE,
  3906. ListEntry);
  3907. if (Lookaside->Type == NonPagedPool) {
  3908. *NonPagedPoolLookasideHits += Lookaside->AllocateHits;
  3909. }
  3910. else {
  3911. *PagedPoolLookasideHits += Lookaside->AllocateHits;
  3912. }
  3913. NextEntry = NextEntry->Flink;
  3914. }
  3915. return;
  3916. }
  3917. VOID
  3918. ExReturnPoolQuota (
  3919. IN PVOID P
  3920. )
  3921. /*++
  3922. Routine Description:
  3923. This function returns quota charged to a subject process when the
  3924. specified pool block was allocated.
  3925. Arguments:
  3926. P - Supplies the address of the block of pool being deallocated.
  3927. Return Value:
  3928. None.
  3929. --*/
  3930. {
  3931. PPOOL_HEADER Entry;
  3932. POOL_TYPE PoolType;
  3933. PEPROCESS ProcessBilled;
  3934. //
  3935. // Do nothing for special pool. No quota was charged.
  3936. //
  3937. if ((ExpPoolFlags & EX_SPECIAL_POOL_ENABLED) &&
  3938. (MmIsSpecialPoolAddress (P))) {
  3939. return;
  3940. }
  3941. //
  3942. // Align the entry address to a pool allocation boundary.
  3943. //
  3944. Entry = (PPOOL_HEADER)((PCHAR)P - POOL_OVERHEAD);
  3945. //
  3946. // If quota was charged, then return the appropriate quota to the
  3947. // subject process.
  3948. //
  3949. if (Entry->PoolType & POOL_QUOTA_MASK) {
  3950. PoolType = (Entry->PoolType & POOL_TYPE_MASK) - 1;
  3951. ProcessBilled = ExpGetBilledProcess (Entry);
  3952. #if defined (_WIN64)
  3953. //
  3954. // This flag cannot be cleared in NT32 because it's used to denote the
  3955. // allocation is larger (and the verifier finds its own header
  3956. // based on this).
  3957. //
  3958. Entry->PoolType &= ~POOL_QUOTA_MASK;
  3959. #else
  3960. //
  3961. // Instead of clearing the flag above, instead zero the quota pointer.
  3962. //
  3963. * (PVOID *)((PCHAR)Entry + (Entry->BlockSize << POOL_BLOCK_SHIFT) - sizeof (PVOID)) = NULL;
  3964. #endif
  3965. if (ProcessBilled != NULL) {
  3966. ASSERT_KPROCESS(ProcessBilled);
  3967. PsReturnPoolQuota (ProcessBilled,
  3968. PoolType & BASE_POOL_TYPE_MASK,
  3969. (ULONG)Entry->BlockSize << POOL_BLOCK_SHIFT);
  3970. if (((PKPROCESS)(ProcessBilled))->Header.Type != ProcessObject) {
  3971. KeBugCheckEx (BAD_POOL_CALLER,
  3972. 0xC,
  3973. (ULONG_PTR)P,
  3974. Entry->PoolTag,
  3975. (ULONG_PTR)ProcessBilled);
  3976. }
  3977. ObDereferenceObject (ProcessBilled);
  3978. #if DBG
  3979. InterlockedDecrement (&ExConcurrentQuotaPool);
  3980. #endif
  3981. }
  3982. }
  3983. return;
  3984. }
  3985. #if !defined (NT_UP)
  3986. PVOID
  3987. ExCreatePoolTagTable (
  3988. IN ULONG NewProcessorNumber,
  3989. IN UCHAR NodeNumber
  3990. )
  3991. {
  3992. SIZE_T NumberOfBytes;
  3993. PPOOL_TRACKER_TABLE NewTagTable;
  3994. ASSERT (KeGetCurrentIrql () == PASSIVE_LEVEL);
  3995. ASSERT (NewProcessorNumber < MAXIMUM_PROCESSOR_TAG_TABLES);
  3996. ASSERT (ExPoolTagTables[NewProcessorNumber] == NULL);
  3997. NumberOfBytes = (PoolTrackTableSize + 1) * sizeof(POOL_TRACKER_TABLE);
  3998. NewTagTable = MmAllocateIndependentPages (NumberOfBytes, NodeNumber);
  3999. if (NewTagTable != NULL) {
  4000. //
  4001. // Just zero the table here, the tags are lazy filled as various pool
  4002. // allocations and frees occur. Note no memory barrier is needed
  4003. // because only this processor will read it except when an
  4004. // ExGetPoolTagInfo call occurs, and in that case, explicit memory
  4005. // barriers are used as needed.
  4006. //
  4007. RtlZeroMemory (NewTagTable,
  4008. PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
  4009. ExPoolTagTables[NewProcessorNumber] = NewTagTable;
  4010. }
  4011. return (PVOID) NewTagTable;
  4012. }
  4013. VOID
  4014. ExDeletePoolTagTable (
  4015. IN ULONG NewProcessorNumber
  4016. )
  4017. /*++
  4018. Routine Description:
  4019. This function deletes the tag table for the specified processor
  4020. number because the processor did not boot.
  4021. Arguments:
  4022. NewProcessorNumber - Supplies the processor number that did not boot.
  4023. Return Value:
  4024. None.
  4025. --*/
  4026. {
  4027. KIRQL OldIrql;
  4028. PVOID VirtualAddress;
  4029. SIZE_T NumberOfBytes;
  4030. ASSERT (KeGetCurrentIrql () == PASSIVE_LEVEL);
  4031. ASSERT (NewProcessorNumber < MAXIMUM_PROCESSOR_TAG_TABLES);
  4032. ASSERT (ExPoolTagTables[NewProcessorNumber] != NULL);
  4033. NumberOfBytes = (PoolTrackTableSize + 1) * sizeof(POOL_TRACKER_TABLE);
  4034. VirtualAddress = ExPoolTagTables[NewProcessorNumber];
  4035. //
  4036. // Raise to DISPATCH to prevent a race when attempting to hot-add a
  4037. // processor while a pool-usage query is active.
  4038. //
  4039. KeRaiseIrql (DISPATCH_LEVEL, &OldIrql);
  4040. ExPoolTagTables[NewProcessorNumber] = NULL;
  4041. KeLowerIrql (OldIrql);
  4042. MmFreeIndependentPages (VirtualAddress, NumberOfBytes);
  4043. return;
  4044. }
  4045. #endif
  4046. typedef struct _POOL_DPC_CONTEXT {
  4047. PPOOL_TRACKER_TABLE PoolTrackTable;
  4048. SIZE_T PoolTrackTableSize;
  4049. PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
  4050. SIZE_T PoolTrackTableSizeExpansion;
  4051. } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
  4052. VOID
  4053. ExpGetPoolTagInfoTarget (
  4054. IN PKDPC Dpc,
  4055. IN PVOID DeferredContext,
  4056. IN PVOID SystemArgument1,
  4057. IN PVOID SystemArgument2
  4058. )
  4059. /*++
  4060. Routine Description:
  4061. Called by all processors during a pool tag table query.
  4062. Arguments:
  4063. Dpc - Supplies a pointer to a control object of type DPC.
  4064. DeferredContext - Deferred context.
  4065. SystemArgument1 - Used to signal completion of this call.
  4066. SystemArgument2 - Used for internal lockstepping during this call.
  4067. Return Value:
  4068. None.
  4069. Environment:
  4070. DISPATCH_LEVEL since this is called from a DPC.
  4071. --*/
  4072. {
  4073. PPOOL_DPC_CONTEXT Context;
  4074. #if !defined (NT_UP)
  4075. ULONG i;
  4076. PPOOL_TRACKER_TABLE TrackerEntry;
  4077. PPOOL_TRACKER_TABLE LastTrackerEntry;
  4078. PPOOL_TRACKER_TABLE TargetTrackerEntry;
  4079. #endif
  4080. UNREFERENCED_PARAMETER (Dpc);
  4081. ASSERT (KeGetCurrentIrql () == DISPATCH_LEVEL);
  4082. Context = DeferredContext;
  4083. //
  4084. // Make sure all DPCs are running (ie: spinning at DISPATCH_LEVEL)
  4085. // to prevent any pool allocations or frees from happening until
  4086. // all the counters are snapped. Otherwise the counters could
  4087. // be misleading (ie: more frees than allocs, etc).
  4088. //
  4089. if (KeSignalCallDpcSynchronize (SystemArgument2)) {
  4090. //
  4091. // This processor (could be the caller or a target) is the final
  4092. // processor to enter the DPC spinloop. Snap the data now.
  4093. //
  4094. #if defined (NT_UP)
  4095. RtlCopyMemory ((PVOID)Context->PoolTrackTable,
  4096. (PVOID)PoolTrackTable,
  4097. Context->PoolTrackTableSize * sizeof (POOL_TRACKER_TABLE));
  4098. #else
  4099. RtlCopyMemory ((PVOID)Context->PoolTrackTable,
  4100. (PVOID)ExPoolTagTables[0],
  4101. Context->PoolTrackTableSize * sizeof (POOL_TRACKER_TABLE));
  4102. LastTrackerEntry = Context->PoolTrackTable + Context->PoolTrackTableSize;
  4103. for (i = 1; i < MAXIMUM_PROCESSOR_TAG_TABLES; i += 1) {
  4104. TargetTrackerEntry = ExPoolTagTables[i];
  4105. if (TargetTrackerEntry == NULL) {
  4106. continue;
  4107. }
  4108. TrackerEntry = Context->PoolTrackTable;
  4109. while (TrackerEntry != LastTrackerEntry) {
  4110. if (TargetTrackerEntry->Key != 0) {
  4111. ASSERT (TargetTrackerEntry->Key == TrackerEntry->Key);
  4112. TrackerEntry->NonPagedAllocs += TargetTrackerEntry->NonPagedAllocs;
  4113. TrackerEntry->NonPagedFrees += TargetTrackerEntry->NonPagedFrees;
  4114. TrackerEntry->NonPagedBytes += TargetTrackerEntry->NonPagedBytes;
  4115. TrackerEntry->PagedAllocs += TargetTrackerEntry->PagedAllocs;
  4116. TrackerEntry->PagedFrees += TargetTrackerEntry->PagedFrees;
  4117. TrackerEntry->PagedBytes += TargetTrackerEntry->PagedBytes;
  4118. }
  4119. TrackerEntry += 1;
  4120. TargetTrackerEntry += 1;
  4121. }
  4122. }
  4123. #endif
  4124. if (Context->PoolTrackTableSizeExpansion != 0) {
  4125. RtlCopyMemory ((PVOID)(Context->PoolTrackTableExpansion),
  4126. (PVOID)PoolTrackTableExpansion,
  4127. Context->PoolTrackTableSizeExpansion * sizeof (POOL_TRACKER_TABLE));
  4128. }
  4129. }
  4130. //
  4131. // Wait until everyone has got to this point before continuing.
  4132. //
  4133. KeSignalCallDpcSynchronize (SystemArgument2);
  4134. //
  4135. // Signal that all processing has been done.
  4136. //
  4137. KeSignalCallDpcDone (SystemArgument1);
  4138. return;
  4139. }
  4140. NTSTATUS
  4141. ExGetPoolTagInfo (
  4142. IN PVOID SystemInformation,
  4143. IN ULONG SystemInformationLength,
  4144. IN OUT PULONG ReturnLength OPTIONAL
  4145. )
  4146. /*++
  4147. Routine Description:
  4148. This function copies the system pool tag information to the supplied
  4149. USER space buffer. Note that the caller has already probed the USER
  4150. address and wrapped this routine inside a try-except.
  4151. Arguments:
  4152. SystemInformation - Supplies a user space buffer to copy the data to.
  4153. SystemInformationLength - Supplies the length of the user buffer.
  4154. ReturnLength - Receives the actual length of the data returned.
  4155. Return Value:
  4156. Various NTSTATUS codes.
  4157. --*/
  4158. {
  4159. SIZE_T NumberOfBytes;
  4160. SIZE_T NumberOfExpansionTableBytes;
  4161. ULONG totalBytes;
  4162. NTSTATUS status;
  4163. PSYSTEM_POOLTAG_INFORMATION taginfo;
  4164. PSYSTEM_POOLTAG poolTag;
  4165. PPOOL_TRACKER_TABLE PoolTrackInfo;
  4166. PPOOL_TRACKER_TABLE TrackerEntry;
  4167. PPOOL_TRACKER_TABLE LastTrackerEntry;
  4168. POOL_DPC_CONTEXT Context;
  4169. SIZE_T LocalTrackTableSize;
  4170. SIZE_T LocalTrackTableSizeExpansion;
  4171. ASSERT (KeGetCurrentIrql () == PASSIVE_LEVEL);
  4172. totalBytes = 0;
  4173. status = STATUS_SUCCESS;
  4174. taginfo = (PSYSTEM_POOLTAG_INFORMATION)SystemInformation;
  4175. poolTag = &taginfo->TagInfo[0];
  4176. totalBytes = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
  4177. taginfo->Count = 0;
  4178. LocalTrackTableSize = PoolTrackTableSize;
  4179. LocalTrackTableSizeExpansion = PoolTrackTableExpansionSize;
  4180. NumberOfBytes = LocalTrackTableSize * sizeof(POOL_TRACKER_TABLE);
  4181. NumberOfExpansionTableBytes = LocalTrackTableSizeExpansion * sizeof (POOL_TRACKER_TABLE);
  4182. PoolTrackInfo = (PPOOL_TRACKER_TABLE) ExAllocatePoolWithTag (
  4183. NonPagedPool,
  4184. NumberOfBytes + NumberOfExpansionTableBytes,
  4185. 'ofnI');
  4186. if (PoolTrackInfo == NULL) {
  4187. return STATUS_INSUFFICIENT_RESOURCES;
  4188. }
  4189. Context.PoolTrackTable = PoolTrackInfo;
  4190. Context.PoolTrackTableSize = PoolTrackTableSize;
  4191. Context.PoolTrackTableExpansion = (PoolTrackInfo + PoolTrackTableSize);
  4192. Context.PoolTrackTableSizeExpansion = PoolTrackTableExpansionSize;
  4193. KeGenericCallDpc (ExpGetPoolTagInfoTarget, &Context);
  4194. TrackerEntry = PoolTrackInfo;
  4195. LastTrackerEntry = PoolTrackInfo + (LocalTrackTableSize + LocalTrackTableSizeExpansion);
  4196. //
  4197. // Wrap the user space accesses with an exception handler so we can free the
  4198. // pool track info allocation if the user address was bogus.
  4199. //
  4200. try {
  4201. while (TrackerEntry < LastTrackerEntry) {
  4202. if (TrackerEntry->Key != 0) {
  4203. taginfo->Count += 1;
  4204. totalBytes += sizeof (SYSTEM_POOLTAG);
  4205. if (SystemInformationLength < totalBytes) {
  4206. status = STATUS_INFO_LENGTH_MISMATCH;
  4207. }
  4208. else {
  4209. ASSERT (TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
  4210. ASSERT (TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
  4211. poolTag->TagUlong = TrackerEntry->Key;
  4212. poolTag->PagedAllocs = TrackerEntry->PagedAllocs;
  4213. poolTag->PagedFrees = TrackerEntry->PagedFrees;
  4214. poolTag->PagedUsed = TrackerEntry->PagedBytes;
  4215. poolTag->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
  4216. poolTag->NonPagedFrees = TrackerEntry->NonPagedFrees;
  4217. poolTag->NonPagedUsed = TrackerEntry->NonPagedBytes;
  4218. poolTag += 1;
  4219. }
  4220. }
  4221. TrackerEntry += 1;
  4222. }
  4223. }
  4224. except (EXCEPTION_EXECUTE_HANDLER) {
  4225. status = GetExceptionCode ();
  4226. }
  4227. ExFreePool (PoolTrackInfo);
  4228. if (ARGUMENT_PRESENT(ReturnLength)) {
  4229. *ReturnLength = totalBytes;
  4230. }
  4231. return status;
  4232. }
  4233. NTSTATUS
  4234. ExGetSessionPoolTagInfo (
  4235. IN PVOID SystemInformation,
  4236. IN ULONG SystemInformationLength,
  4237. IN OUT PULONG ReturnedEntries,
  4238. IN OUT PULONG ActualEntries
  4239. )
  4240. /*++
  4241. Routine Description:
  4242. This function copies the current session's pool tag information to the
  4243. supplied system-mapped buffer.
  4244. Arguments:
  4245. SystemInformation - Supplies a system mapped buffer to copy the data to.
  4246. SystemInformationLength - Supplies the length of the buffer.
  4247. ReturnedEntries - Receives the actual number of entries returned.
  4248. ActualEntries - Receives the total number of entries.
  4249. This can be more than ReturnedEntries if the caller's
  4250. buffer is not large enough to hold all the data.
  4251. Return Value:
  4252. Various NTSTATUS codes.
  4253. --*/
  4254. {
  4255. ULONG totalBytes;
  4256. ULONG ActualCount;
  4257. ULONG ReturnedCount;
  4258. NTSTATUS status;
  4259. PSYSTEM_POOLTAG poolTag;
  4260. PPOOL_TRACKER_TABLE TrackerEntry;
  4261. PPOOL_TRACKER_TABLE LastTrackerEntry;
  4262. ASSERT (KeGetCurrentIrql () == PASSIVE_LEVEL);
  4263. totalBytes = 0;
  4264. ActualCount = 0;
  4265. ReturnedCount = 0;
  4266. status = STATUS_SUCCESS;
  4267. poolTag = (PSYSTEM_POOLTAG) SystemInformation;
  4268. //
  4269. // Capture the current session's pool information.
  4270. //
  4271. TrackerEntry = ExpSessionPoolTrackTable;
  4272. LastTrackerEntry = TrackerEntry + ExpSessionPoolTrackTableSize;
  4273. while (TrackerEntry < LastTrackerEntry) {
  4274. if (TrackerEntry->Key != 0) {
  4275. ActualCount += 1;
  4276. totalBytes += sizeof (SYSTEM_POOLTAG);
  4277. if (totalBytes > SystemInformationLength) {
  4278. status = STATUS_INFO_LENGTH_MISMATCH;
  4279. }
  4280. else {
  4281. ReturnedCount += 1;
  4282. poolTag->TagUlong = TrackerEntry->Key;
  4283. poolTag->PagedAllocs = TrackerEntry->PagedAllocs;
  4284. poolTag->PagedFrees = TrackerEntry->PagedFrees;
  4285. poolTag->PagedUsed = TrackerEntry->PagedBytes;
  4286. poolTag->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
  4287. poolTag->NonPagedFrees = TrackerEntry->NonPagedFrees;
  4288. poolTag->NonPagedUsed = TrackerEntry->NonPagedBytes;
  4289. //
  4290. // Session pool tag entries are updated with interlocked
  4291. // sequences so it is possible here that we can read one
  4292. // that is in the middle of being updated. Sanitize the
  4293. // data here so callers don't have to.
  4294. //
  4295. ASSERT ((SSIZE_T)poolTag->PagedUsed >= 0);
  4296. ASSERT ((SSIZE_T)poolTag->NonPagedUsed >= 0);
  4297. if (poolTag->PagedAllocs < poolTag->PagedFrees) {
  4298. poolTag->PagedAllocs = poolTag->PagedFrees;
  4299. }
  4300. if (poolTag->NonPagedAllocs < poolTag->NonPagedFrees) {
  4301. poolTag->NonPagedAllocs = poolTag->NonPagedFrees;
  4302. }
  4303. poolTag += 1;
  4304. }
  4305. }
  4306. TrackerEntry += 1;
  4307. }
  4308. *ReturnedEntries = ReturnedCount;
  4309. *ActualEntries = ActualCount;
  4310. return status;
  4311. }
  4312. NTSTATUS
  4313. ExGetBigPoolInfo (
  4314. IN PVOID SystemInformation,
  4315. IN ULONG SystemInformationLength,
  4316. IN OUT PULONG ReturnLength OPTIONAL
  4317. )
  4318. /*++
  4319. Routine Description:
  4320. This function copies the system big pool entry information to the supplied
  4321. USER space buffer. Note that the caller has already probed the USER
  4322. address and wrapped this routine inside a try-except.
  4323. PAGELK was not used for this function so that calling it causes minimal
  4324. disruption to actual memory usage.
  4325. Arguments:
  4326. SystemInformation - Supplies a user space buffer to copy the data to.
  4327. SystemInformationLength - Supplies the length of the user buffer.
  4328. ReturnLength - Supplies the actual length of the data returned.
  4329. Return Value:
  4330. Various NTSTATUS codes.
  4331. --*/
  4332. {
  4333. ULONG TotalBytes;
  4334. KIRQL OldIrql;
  4335. NTSTATUS Status;
  4336. PVOID NewTable;
  4337. PPOOL_TRACKER_BIG_PAGES SystemPoolEntry;
  4338. PPOOL_TRACKER_BIG_PAGES SystemPoolEntryEnd;
  4339. SIZE_T SnappedBigTableSize;
  4340. SIZE_T SnappedBigTableSizeInBytes;
  4341. PSYSTEM_BIGPOOL_ENTRY UserPoolEntry;
  4342. PSYSTEM_BIGPOOL_INFORMATION UserPoolInfo;
  4343. ASSERT (KeGetCurrentIrql () == PASSIVE_LEVEL);
  4344. NewTable = NULL;
  4345. Status = STATUS_SUCCESS;
  4346. UserPoolInfo = (PSYSTEM_BIGPOOL_INFORMATION)SystemInformation;
  4347. UserPoolEntry = &UserPoolInfo->AllocatedInfo[0];
  4348. TotalBytes = FIELD_OFFSET(SYSTEM_BIGPOOL_INFORMATION, AllocatedInfo);
  4349. UserPoolInfo->Count = 0;
  4350. do {
  4351. SnappedBigTableSize = PoolBigPageTableSize;
  4352. SnappedBigTableSizeInBytes =
  4353. SnappedBigTableSize * sizeof (POOL_TRACKER_BIG_PAGES);
  4354. if (NewTable != NULL) {
  4355. MiFreePoolPages (NewTable);
  4356. }
  4357. //
  4358. // Use MiAllocatePoolPages for the temporary buffer so we won't have
  4359. // to filter it out of the results before handing them back.
  4360. //
  4361. NewTable = MiAllocatePoolPages (NonPagedPool,
  4362. SnappedBigTableSizeInBytes);
  4363. if (NewTable == NULL) {
  4364. return STATUS_INSUFFICIENT_RESOURCES;
  4365. }
  4366. ExAcquireSpinLock (&ExpTaggedPoolLock, &OldIrql);
  4367. if (SnappedBigTableSize >= PoolBigPageTableSize) {
  4368. //
  4369. // Success - our table is big enough to hold everything.
  4370. //
  4371. break;
  4372. }
  4373. ExReleaseSpinLock (&ExpTaggedPoolLock, OldIrql);
  4374. } while (TRUE);
  4375. RtlCopyMemory (NewTable,
  4376. PoolBigPageTable,
  4377. PoolBigPageTableSize * sizeof (POOL_TRACKER_BIG_PAGES));
  4378. SnappedBigTableSize = PoolBigPageTableSize;
  4379. ExReleaseSpinLock (&ExpTaggedPoolLock, OldIrql);
  4380. SystemPoolEntry = NewTable;
  4381. SystemPoolEntryEnd = SystemPoolEntry + SnappedBigTableSize;
  4382. //
  4383. // Wrap the user space accesses with an exception handler so we can
  4384. // free the temp buffer if the user address was bogus.
  4385. //
  4386. try {
  4387. while (SystemPoolEntry < SystemPoolEntryEnd) {
  4388. if (((ULONG_PTR)SystemPoolEntry->Va & POOL_BIG_TABLE_ENTRY_FREE) == 0) {
  4389. //
  4390. // This entry is in use so capture it.
  4391. //
  4392. UserPoolInfo->Count += 1;
  4393. TotalBytes += sizeof (SYSTEM_BIGPOOL_ENTRY);
  4394. if (SystemInformationLength < TotalBytes) {
  4395. Status = STATUS_INFO_LENGTH_MISMATCH;
  4396. }
  4397. else {
  4398. UserPoolEntry->VirtualAddress = SystemPoolEntry->Va;
  4399. if (MmDeterminePoolType (SystemPoolEntry->Va) == NonPagedPool) {
  4400. UserPoolEntry->NonPaged = 1;
  4401. }
  4402. UserPoolEntry->TagUlong = SystemPoolEntry->Key & ~PROTECTED_POOL;
  4403. UserPoolEntry->SizeInBytes = SystemPoolEntry->NumberOfPages << PAGE_SHIFT;
  4404. UserPoolEntry += 1;
  4405. }
  4406. }
  4407. SystemPoolEntry += 1;
  4408. }
  4409. }
  4410. except (EXCEPTION_EXECUTE_HANDLER) {
  4411. Status = GetExceptionCode ();
  4412. }
  4413. MiFreePoolPages (NewTable);
  4414. if (ARGUMENT_PRESENT(ReturnLength)) {
  4415. *ReturnLength = TotalBytes;
  4416. }
  4417. return Status;
  4418. }
  4419. VOID
  4420. ExAllocatePoolSanityChecks (
  4421. IN POOL_TYPE PoolType,
  4422. IN SIZE_T NumberOfBytes
  4423. )
  4424. /*++
  4425. Routine Description:
  4426. This function performs sanity checks on the caller.
  4427. Return Value:
  4428. None.
  4429. Environment:
  4430. Only enabled as part of the driver verification package.
  4431. --*/
  4432. {
  4433. if (NumberOfBytes == 0) {
  4434. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  4435. 0x0,
  4436. KeGetCurrentIrql(),
  4437. PoolType,
  4438. NumberOfBytes);
  4439. }
  4440. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  4441. if (KeGetCurrentIrql() > APC_LEVEL) {
  4442. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  4443. 0x1,
  4444. KeGetCurrentIrql(),
  4445. PoolType,
  4446. NumberOfBytes);
  4447. }
  4448. }
  4449. else {
  4450. if (KeGetCurrentIrql() > DISPATCH_LEVEL) {
  4451. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  4452. 0x2,
  4453. KeGetCurrentIrql(),
  4454. PoolType,
  4455. NumberOfBytes);
  4456. }
  4457. }
  4458. }
  4459. VOID
  4460. ExFreePoolSanityChecks (
  4461. IN PVOID P
  4462. )
  4463. /*++
  4464. Routine Description:
  4465. This function performs sanity checks on the caller.
  4466. Return Value:
  4467. None.
  4468. Environment:
  4469. Only enabled as part of the driver verification package.
  4470. --*/
  4471. {
  4472. PPOOL_HEADER Entry;
  4473. POOL_TYPE PoolType;
  4474. PVOID StillQueued;
  4475. if (P <= (PVOID)(MM_HIGHEST_USER_ADDRESS)) {
  4476. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  4477. 0x10,
  4478. (ULONG_PTR)P,
  4479. 0,
  4480. 0);
  4481. }
  4482. if ((ExpPoolFlags & EX_SPECIAL_POOL_ENABLED) &&
  4483. (MmIsSpecialPoolAddress (P))) {
  4484. KeCheckForTimer (P, PAGE_SIZE - BYTE_OFFSET (P));
  4485. //
  4486. // Check if an ERESOURCE is currently active in this memory block.
  4487. //
  4488. StillQueued = ExpCheckForResource(P, PAGE_SIZE - BYTE_OFFSET (P));
  4489. if (StillQueued != NULL) {
  4490. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  4491. 0x17,
  4492. (ULONG_PTR)StillQueued,
  4493. (ULONG_PTR)-1,
  4494. (ULONG_PTR)P);
  4495. }
  4496. ExpCheckForWorker (P, PAGE_SIZE - BYTE_OFFSET (P)); // bugchecks inside
  4497. return;
  4498. }
  4499. if (PAGE_ALIGNED(P)) {
  4500. PoolType = MmDeterminePoolType(P);
  4501. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  4502. if (KeGetCurrentIrql() > APC_LEVEL) {
  4503. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  4504. 0x11,
  4505. KeGetCurrentIrql(),
  4506. PoolType,
  4507. (ULONG_PTR)P);
  4508. }
  4509. }
  4510. else {
  4511. if (KeGetCurrentIrql() > DISPATCH_LEVEL) {
  4512. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  4513. 0x12,
  4514. KeGetCurrentIrql(),
  4515. PoolType,
  4516. (ULONG_PTR)P);
  4517. }
  4518. }
  4519. //
  4520. // Just check the first page.
  4521. //
  4522. KeCheckForTimer(P, PAGE_SIZE);
  4523. //
  4524. // Check if an ERESOURCE is currently active in this memory block.
  4525. //
  4526. StillQueued = ExpCheckForResource(P, PAGE_SIZE);
  4527. if (StillQueued != NULL) {
  4528. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  4529. 0x17,
  4530. (ULONG_PTR)StillQueued,
  4531. PoolType,
  4532. (ULONG_PTR)P);
  4533. }
  4534. }
  4535. else {
  4536. if (((ULONG_PTR)P & (POOL_OVERHEAD - 1)) != 0) {
  4537. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  4538. 0x16,
  4539. __LINE__,
  4540. (ULONG_PTR)P,
  4541. 0);
  4542. }
  4543. Entry = (PPOOL_HEADER)((PCHAR)P - POOL_OVERHEAD);
  4544. if ((Entry->PoolType & POOL_TYPE_MASK) == 0) {
  4545. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  4546. 0x13,
  4547. __LINE__,
  4548. (ULONG_PTR)Entry,
  4549. Entry->Ulong1);
  4550. }
  4551. PoolType = (Entry->PoolType & POOL_TYPE_MASK) - 1;
  4552. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  4553. if (KeGetCurrentIrql() > APC_LEVEL) {
  4554. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  4555. 0x11,
  4556. KeGetCurrentIrql(),
  4557. PoolType,
  4558. (ULONG_PTR)P);
  4559. }
  4560. }
  4561. else {
  4562. if (KeGetCurrentIrql() > DISPATCH_LEVEL) {
  4563. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  4564. 0x12,
  4565. KeGetCurrentIrql(),
  4566. PoolType,
  4567. (ULONG_PTR)P);
  4568. }
  4569. }
  4570. if (!IS_POOL_HEADER_MARKED_ALLOCATED(Entry)) {
  4571. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  4572. 0x14,
  4573. __LINE__,
  4574. (ULONG_PTR)Entry,
  4575. 0);
  4576. }
  4577. KeCheckForTimer(Entry, (ULONG)(Entry->BlockSize << POOL_BLOCK_SHIFT));
  4578. //
  4579. // Check if an ERESOURCE is currently active in this memory block.
  4580. //
  4581. StillQueued = ExpCheckForResource(Entry, (ULONG)(Entry->BlockSize << POOL_BLOCK_SHIFT));
  4582. if (StillQueued != NULL) {
  4583. KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION,
  4584. 0x17,
  4585. (ULONG_PTR)StillQueued,
  4586. PoolType,
  4587. (ULONG_PTR)P);
  4588. }
  4589. }
  4590. }
  4591. #if defined (NT_UP)
  4592. VOID
  4593. ExpBootFinishedDispatch (
  4594. IN PKDPC Dpc,
  4595. IN PVOID DeferredContext,
  4596. IN PVOID SystemArgument1,
  4597. IN PVOID SystemArgument2
  4598. )
  4599. /*++
  4600. Routine Description:
  4601. This function is called when the system has booted into a shell.
  4602. It's job is to disable various pool optimizations that are enabled to
  4603. speed up booting and reduce the memory footprint on small machines.
  4604. Arguments:
  4605. Dpc - Supplies a pointer to a control object of type DPC.
  4606. DeferredContext - Optional deferred context; not used.
  4607. SystemArgument1 - Optional argument 1; not used.
  4608. SystemArgument2 - Optional argument 2; not used.
  4609. Return Value:
  4610. None.
  4611. Environment:
  4612. DISPATCH_LEVEL since this is called from a timer expiration.
  4613. --*/
  4614. {
  4615. UNREFERENCED_PARAMETER (Dpc);
  4616. UNREFERENCED_PARAMETER (DeferredContext);
  4617. UNREFERENCED_PARAMETER (SystemArgument1);
  4618. UNREFERENCED_PARAMETER (SystemArgument2);
  4619. //
  4620. // Pretty much all pages are "hot" after bootup. Since bootup has finished,
  4621. // use lookaside lists and stop trying to separate regular allocations
  4622. // as well.
  4623. //
  4624. RtlInterlockedAndBitsDiscardReturn (&ExpPoolFlags, (ULONG)~EX_SEPARATE_HOT_PAGES_DURING_BOOT);
  4625. }
  4626. #endif