Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

753 lines
23 KiB

  1. /*++
  2. Copyright (c) 1999-2000 Microsoft Corporation
  3. Module Name:
  4. fsbpool.c
  5. Abstract:
  6. This file contains the implementation of fixed-size block pool.
  7. Author:
  8. Shaun Cox (shaunco) 10-Dec-1999
  9. --*/
  10. #include "precomp.h"
  11. #define FSB_SCAVENGE_PERIOD_IN_SECONDS 30
  12. #define FSB_MINIMUM_PAGE_LIFETIME_IN_SECONDS 20
  13. #if defined (_WIN64)
  14. #define APPROX_L2_CACHE_LINE_SIZE 128
  15. #else
  16. #define APPROX_L2_CACHE_LINE_SIZE 64
  17. #endif
  18. // The following structures are used in the single allocation that
  19. // a pool handle points to.
  20. // PoolHandle ---> [FSB_POOL_HEADER + FSB_CPU_POOL_HEADER for cpu 0 +
  21. // FSB_CPU_POOL_HEADER for cpu 1 + ...
  22. // FSB_CPU_POOL_HEADER for cpu N]
  23. //
  24. // FSB_POOL_HEADER is the data common to all CPUs for a given pool.
  25. //
  26. typedef struct _FSB_POOL_HEADER
  27. {
  28. // cache-line -----
  29. struct _FSB_POOL_HEADER_BASE
  30. {
  31. ULONG Tag;
  32. USHORT CallerBlockSize; // caller's requested block size
  33. USHORT AlignedBlockSize; // ALIGN_UP(CallerBlockSize, PVOID)
  34. USHORT BlocksPerPage;
  35. USHORT FreeBlockLinkOffset;
  36. NDIS_BLOCK_INITIALIZER BuildFunction;
  37. KSPIN_LOCK Interlock;
  38. };
  39. UCHAR Alignment[APPROX_L2_CACHE_LINE_SIZE
  40. - (sizeof(struct _FSB_POOL_HEADER_BASE) % APPROX_L2_CACHE_LINE_SIZE)];
  41. } FSB_POOL_HEADER, *PFSB_POOL_HEADER;
  42. C_ASSERT(sizeof(FSB_POOL_HEADER) % APPROX_L2_CACHE_LINE_SIZE == 0);
  43. // FSB_CPU_POOL_HEADER is the data specific to a CPU for a given pool.
  44. //
  45. typedef struct _FSB_CPU_POOL_HEADER
  46. {
  47. // cache-line -----
  48. struct _FSB_CPU_POOL_HEADER_BASE
  49. {
  50. // The doubly-linked list of pages that make up this processor's pool.
  51. // These pages have one or more free blocks available.
  52. //
  53. LIST_ENTRY PageList;
  54. // The doubly-linked list of pages that are fully in use. This list
  55. // is separate from the above list so that we do not spend time walking
  56. // a very long list during FsbAllocate when many pages are fully used.
  57. //
  58. LIST_ENTRY UsedPageList;
  59. // The next scheduled time (in units of KeQueryTickCount()) for
  60. // scavenging this pool. The next scavenge will happen no earlier
  61. // that this.
  62. //
  63. LARGE_INTEGER NextScavengeTick;
  64. // The number of the processor that owns this pool.
  65. //
  66. ULONG OwnerCpu;
  67. ULONG TotalBlocksAllocated;
  68. ULONG TotalBlocksFreed;
  69. ULONG PeakBlocksInUse;
  70. ULONG TotalPagesAllocated;
  71. ULONG TotalPagesFreed;
  72. ULONG PeakPagesInUse;
  73. };
  74. UCHAR Alignment[APPROX_L2_CACHE_LINE_SIZE
  75. - (sizeof(struct _FSB_CPU_POOL_HEADER_BASE) % APPROX_L2_CACHE_LINE_SIZE)];
  76. } FSB_CPU_POOL_HEADER, *PFSB_CPU_POOL_HEADER;
  77. C_ASSERT(sizeof(FSB_CPU_POOL_HEADER) % APPROX_L2_CACHE_LINE_SIZE == 0);
  78. // FSB_PAGE_HEADER is the data at the beginning of each allocated pool page
  79. // that describes the current state of the blocks on the page.
  80. //
  81. typedef struct _FSB_PAGE_HEADER
  82. {
  83. // cache-line -----
  84. // Back pointer to the owning cpu pool.
  85. //
  86. PFSB_CPU_POOL_HEADER Pool;
  87. // Linkage entry for the list of pages managed by the cpu pool.
  88. //
  89. LIST_ENTRY PageLink;
  90. // Number of blocks built so far on this page. Blocks are built on
  91. // demand. When this number reaches Pool->BlocksPerPage, all blocks on
  92. // this page have been built.
  93. //
  94. USHORT BlocksBuilt;
  95. // Boolean indicator of whether or not this page is on the cpu pool's
  96. // used-page list. This is checked during MdpFree to see if the page
  97. // should be moved back to the normal page list.
  98. // (it is a USHORT, instead of BOOLEAN, for proper padding)
  99. //
  100. USHORT OnUsedPageList;
  101. // List of free blocks on this page.
  102. //
  103. SLIST_HEADER FreeList;
  104. // The value of KeQueryTickCount (normalized to units of seconds)
  105. // which represents the time after which this page can be freed back
  106. // to the system's pool. This time is only valid if the depth of
  107. // FreeList is Pool->BlocksPerPage. (i.e. this time is only valid if
  108. // the page is completely unused.)
  109. //
  110. LARGE_INTEGER LastUsedTick;
  111. } FSB_PAGE_HEADER, *PFSB_PAGE_HEADER;
  112. // Get a pointer to the overall pool given a pointer to one of
  113. // the per-processor pools within it.
  114. //
  115. __inline
  116. PFSB_POOL_HEADER
  117. PoolFromCpuPool(
  118. IN PFSB_CPU_POOL_HEADER CpuPool
  119. )
  120. {
  121. return (PFSB_POOL_HEADER)(CpuPool - CpuPool->OwnerCpu) - 1;
  122. }
  123. __inline
  124. VOID
  125. ConvertSecondsToTicks(
  126. IN ULONG Seconds,
  127. OUT PLARGE_INTEGER Ticks
  128. )
  129. {
  130. // If the following assert fires, you need to cast Seconds below to
  131. // ULONGLONG so that 64 bit multiplication and division are used.
  132. // The current code assumes less than 430 seconds so that the
  133. // 32 multiplication below won't overflow.
  134. //
  135. ASSERT(Seconds < 430);
  136. Ticks->HighPart = 0;
  137. Ticks->LowPart = (Seconds * 10*1000*1000) / KeQueryTimeIncrement();
  138. }
  139. // Build the next block on the specified pool page.
  140. // This can only be called if not all of the blocks have been built yet.
  141. //
  142. PUCHAR
  143. FsbpBuildNextBlock(
  144. IN const FSB_POOL_HEADER* Pool,
  145. IN OUT PFSB_PAGE_HEADER Page
  146. )
  147. {
  148. PUCHAR Block;
  149. ASSERT(Page->BlocksBuilt < Pool->BlocksPerPage);
  150. ASSERT((PAGE_SIZE - sizeof(FSB_PAGE_HEADER)) / Pool->AlignedBlockSize
  151. == Pool->BlocksPerPage);
  152. ASSERT(Pool->CallerBlockSize <= Pool->AlignedBlockSize);
  153. Block = (PUCHAR)(Page + 1) + (Page->BlocksBuilt * Pool->AlignedBlockSize);
  154. ASSERT(PAGE_ALIGN(Block) == Page);
  155. if (Pool->BuildFunction) {
  156. Pool->BuildFunction(Block, Pool->CallerBlockSize);
  157. }
  158. Page->BlocksBuilt++;
  159. return Block;
  160. }
  161. // Allocate a new pool page and insert it at the head of the specified
  162. // CPU pool. Build the first block on the new page and return a pointer
  163. // to it.
  164. //
  165. PUCHAR
  166. FsbpAllocateNewPageAndBuildOneBlock(
  167. IN const FSB_POOL_HEADER* Pool,
  168. IN PFSB_CPU_POOL_HEADER CpuPool
  169. )
  170. {
  171. PFSB_PAGE_HEADER Page;
  172. PUCHAR Block = NULL;
  173. ULONG PagesInUse;
  174. ASSERT(Pool);
  175. Page = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE, Pool->Tag);
  176. if (Page)
  177. {
  178. ASSERT(Page == PAGE_ALIGN(Page));
  179. RtlZeroMemory(Page, sizeof(FSB_PAGE_HEADER));
  180. Page->Pool = CpuPool;
  181. ExInitializeSListHead(&Page->FreeList);
  182. // Insert the page at the head of the cpu's pool.
  183. //
  184. InsertHeadList(&CpuPool->PageList, &Page->PageLink);
  185. CpuPool->TotalPagesAllocated++;
  186. // Update the pool's statistics.
  187. //
  188. PagesInUse = CpuPool->TotalPagesAllocated - CpuPool->TotalPagesFreed;
  189. if (PagesInUse > CpuPool->PeakPagesInUse)
  190. {
  191. CpuPool->PeakPagesInUse = PagesInUse;
  192. }
  193. Block = FsbpBuildNextBlock(Pool, Page);
  194. ASSERT(Block);
  195. }
  196. return Block;
  197. }
  198. // Free the specified pool page back to the system's pool.
  199. //
  200. VOID
  201. FsbpFreePage(
  202. IN PFSB_CPU_POOL_HEADER CpuPool,
  203. IN PFSB_PAGE_HEADER Page
  204. )
  205. {
  206. ASSERT(Page == PAGE_ALIGN(Page));
  207. ASSERT(Page->Pool == CpuPool);
  208. ExFreePool(Page);
  209. CpuPool->TotalPagesFreed++;
  210. ASSERT(CpuPool->TotalPagesFreed <= CpuPool->TotalPagesAllocated);
  211. }
  212. // Reclaim the memory consumed by completely unused pool pages belonging
  213. // to the specified per-processor pool.
  214. //
  215. // Caller IRQL: [DISPATCH_LEVEL]
  216. //
  217. VOID
  218. FsbpScavengePool(
  219. IN OUT PFSB_CPU_POOL_HEADER CpuPool
  220. )
  221. {
  222. PFSB_POOL_HEADER Pool;
  223. PFSB_PAGE_HEADER Page;
  224. PLIST_ENTRY Scan;
  225. PLIST_ENTRY Next;
  226. LARGE_INTEGER Ticks;
  227. LARGE_INTEGER TicksDelta;
  228. // We must not only be at DISPATCH_LEVEL (or higher), we must also
  229. // be called on the processor that owns the specified pool.
  230. //
  231. ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
  232. ASSERT((ULONG)KeGetCurrentProcessorNumber() == CpuPool->OwnerCpu);
  233. Pool = PoolFromCpuPool(CpuPool);
  234. KeQueryTickCount(&Ticks);
  235. // Compute the next tick value which represents the earliest time
  236. // that we will scavenge this pool again.
  237. //
  238. ConvertSecondsToTicks(FSB_SCAVENGE_PERIOD_IN_SECONDS, &TicksDelta);
  239. CpuPool->NextScavengeTick.QuadPart = Ticks.QuadPart + TicksDelta.QuadPart;
  240. // Compute the tick value which represents the last point at which
  241. // its okay to free a page.
  242. //
  243. ConvertSecondsToTicks(FSB_MINIMUM_PAGE_LIFETIME_IN_SECONDS, &TicksDelta);
  244. Ticks.QuadPart = Ticks.QuadPart - TicksDelta.QuadPart;
  245. for (Scan = CpuPool->PageList.Flink;
  246. Scan != &CpuPool->PageList;
  247. Scan = Next)
  248. {
  249. Page = CONTAINING_RECORD(Scan, FSB_PAGE_HEADER, PageLink);
  250. ASSERT(Page == PAGE_ALIGN(Page));
  251. ASSERT(CpuPool == Page->Pool);
  252. ASSERT(!Page->OnUsedPageList);
  253. // Step to the next link before we possibly unlink this page.
  254. //
  255. Next = Scan->Flink;
  256. if ((Pool->BlocksPerPage == ExQueryDepthSList(&Page->FreeList)) &&
  257. (Ticks.QuadPart > Page->LastUsedTick.QuadPart))
  258. {
  259. RemoveEntryList(Scan);
  260. FsbpFreePage(CpuPool, Page);
  261. }
  262. }
  263. // Scan the used pages to see if they can be moved back to the normal
  264. // list. This can happen if too many frees by non-owning processors
  265. // are done. In that case, the pages get orphaned on the used-page
  266. // list after all of their MDLs have been freed to the page. Un-orhpan
  267. // them here.
  268. //
  269. for (Scan = CpuPool->UsedPageList.Flink;
  270. Scan != &CpuPool->UsedPageList;
  271. Scan = Next)
  272. {
  273. Page = CONTAINING_RECORD(Scan, FSB_PAGE_HEADER, PageLink);
  274. ASSERT(Page == PAGE_ALIGN(Page));
  275. ASSERT(CpuPool == Page->Pool);
  276. ASSERT(Page->OnUsedPageList);
  277. // Step to the next link before we possibly unlink this page.
  278. Next = Scan->Flink;
  279. if (0 != ExQueryDepthSList(&Page->FreeList))
  280. {
  281. RemoveEntryList(Scan);
  282. Page->OnUsedPageList = FALSE;
  283. InsertTailList(&CpuPool->PageList, Scan);
  284. }
  285. }
  286. }
  287. // Creates a pool of fixed-size blocks built over non-paged pool. Each
  288. // block is BlockSize bytes long. If NULL is not returned,
  289. // FsbDestroyPool should be called at a later time to reclaim the
  290. // resources used by the pool.
  291. //
  292. // Arguments:
  293. // BlockSize - The size, in bytes, of each block.
  294. // FreeBlockLinkOffset - The offset, in bytes, from the beginning of a block
  295. // that represenets a pointer-sized storage location that the pool can
  296. // use to chain free blocks together. Most often this will be zero
  297. // (meaning use the first pointer-size bytes of the block.)
  298. // Tag - The pool tag to be used internally for calls to
  299. // ExAllocatePoolWithTag. This allows callers to track
  300. // memory consumption for different pools.
  301. // BuildFunction - An optional pointer to a function which initializes
  302. // blocks when they are first allocated by the pool. This allows the
  303. // caller to perform custom, on-demand initialization of each block.
  304. //
  305. // Returns the handle used to identify the pool.
  306. //
  307. // Caller IRQL: [PASSIVE_LEVEL, DISPATCH_LEVEL]
  308. //
  309. HANDLE
  310. FsbCreatePool(
  311. IN USHORT BlockSize,
  312. IN USHORT FreeBlockLinkOffset,
  313. IN ULONG Tag,
  314. IN NDIS_BLOCK_INITIALIZER BuildFunction OPTIONAL
  315. )
  316. {
  317. SIZE_T Size;
  318. PFSB_POOL_HEADER Pool;
  319. PFSB_CPU_POOL_HEADER CpuPool;
  320. CCHAR NumberCpus = KeNumberProcessors;
  321. CCHAR i;
  322. // We need at least a pointer size worth of space to manage free
  323. // blocks and we don't impose any per-block overhead, so we borrow it
  324. // from the block itself.
  325. //
  326. ASSERT(BlockSize >= FreeBlockLinkOffset + sizeof(PVOID));
  327. // This implementation shouldn't be used if we are not going to get more
  328. // than about 8 blocks per page. Blocks bigger than this should probably
  329. // be allocated with multiple pages at a time.
  330. //
  331. ASSERT(BlockSize < PAGE_SIZE / 8);
  332. // Compute the size of our pool header allocation.
  333. //
  334. Size = sizeof(FSB_POOL_HEADER) + (sizeof(FSB_CPU_POOL_HEADER) * NumberCpus);
  335. // Allocate the pool header.
  336. //
  337. Pool = ExAllocatePoolWithTag(NonPagedPool, Size, ' bsF');
  338. if (Pool)
  339. {
  340. // Initialize the pool header fields.
  341. //
  342. RtlZeroMemory(Pool, Size);
  343. Pool->Tag = Tag;
  344. Pool->CallerBlockSize = BlockSize;
  345. Pool->AlignedBlockSize = (USHORT)ALIGN_UP(BlockSize, PVOID);
  346. Pool->BlocksPerPage = (PAGE_SIZE - sizeof(FSB_PAGE_HEADER))
  347. / Pool->AlignedBlockSize;
  348. Pool->FreeBlockLinkOffset = FreeBlockLinkOffset;
  349. Pool->BuildFunction = BuildFunction;
  350. KeInitializeSpinLock(&Pool->Interlock);
  351. // Initialize the per-cpu pool headers.
  352. //
  353. CpuPool = (PFSB_CPU_POOL_HEADER)(Pool + 1);
  354. for (i = 0; i < NumberCpus; i++)
  355. {
  356. InitializeListHead(&CpuPool[i].PageList);
  357. InitializeListHead(&CpuPool[i].UsedPageList);
  358. CpuPool[i].OwnerCpu = i;
  359. }
  360. }
  361. return Pool;
  362. }
  363. // Destroys a pool of fixed-size blocks previously created by a call to
  364. // FsbCreatePool.
  365. //
  366. // Arguments:
  367. // PoolHandle - Handle which identifies the pool being destroyed.
  368. //
  369. // Caller IRQL: [PASSIVE_LEVEL, DISPATCH_LEVEL]
  370. //
  371. VOID
  372. FsbDestroyPool(
  373. IN HANDLE PoolHandle
  374. )
  375. {
  376. PFSB_POOL_HEADER Pool;
  377. PFSB_PAGE_HEADER Page;
  378. PFSB_CPU_POOL_HEADER CpuPool;
  379. PLIST_ENTRY Scan;
  380. PLIST_ENTRY Next;
  381. CCHAR NumberCpus = KeNumberProcessors;
  382. CCHAR i;
  383. Pool = (PFSB_POOL_HEADER)PoolHandle;
  384. if (!Pool)
  385. {
  386. return;
  387. }
  388. for (i = 0, CpuPool = (PFSB_CPU_POOL_HEADER)(Pool + 1);
  389. i < NumberCpus;
  390. i++, CpuPool++)
  391. {
  392. ASSERT(CpuPool->OwnerCpu == (ULONG)i);
  393. for (Scan = CpuPool->PageList.Flink;
  394. Scan != &CpuPool->PageList;
  395. Scan = Next)
  396. {
  397. Page = CONTAINING_RECORD(Scan, FSB_PAGE_HEADER, PageLink);
  398. ASSERT(Page == PAGE_ALIGN(Page));
  399. ASSERT(CpuPool == Page->Pool);
  400. ASSERT(!Page->OnUsedPageList);
  401. ASSERT(Page->BlocksBuilt <= Pool->BlocksPerPage);
  402. ASSERT(Page->BlocksBuilt == ExQueryDepthSList(&Page->FreeList));
  403. // Step to the next link before we free this page.
  404. //
  405. Next = Scan->Flink;
  406. RemoveEntryList(Scan);
  407. FsbpFreePage(CpuPool, Page);
  408. }
  409. ASSERT(IsListEmpty(&CpuPool->UsedPageList));
  410. ASSERT(CpuPool->TotalPagesAllocated == CpuPool->TotalPagesFreed);
  411. ASSERT(CpuPool->TotalBlocksAllocated == CpuPool->TotalBlocksFreed);
  412. }
  413. }
  414. // Returns a pointer to a block allocated from a pool. NULL is returned if
  415. // the request could not be granted. The returned pointer is guaranteed to
  416. // have 8 byte alignment.
  417. //
  418. // Arguments:
  419. // PoolHandle - Handle which identifies the pool being allocated from.
  420. //
  421. // Caller IRQL: [PASSIVE_LEVEL, DISPATCH_LEVEL]
  422. //
  423. PUCHAR
  424. FsbAllocate(
  425. IN HANDLE PoolHandle
  426. )
  427. {
  428. PFSB_POOL_HEADER Pool;
  429. PFSB_CPU_POOL_HEADER CpuPool;
  430. PFSB_PAGE_HEADER Page;
  431. PSINGLE_LIST_ENTRY BlockLink;
  432. PUCHAR Block = NULL;
  433. KIRQL OldIrql;
  434. ULONG Cpu;
  435. LARGE_INTEGER Ticks;
  436. ASSERT(PoolHandle);
  437. Pool = (PFSB_POOL_HEADER)PoolHandle;
  438. // Raise IRQL before saving the processor number since there is chance
  439. // it could have changed if we saved it while at passive.
  440. //
  441. OldIrql = KeRaiseIrqlToDpcLevel();
  442. Cpu = KeGetCurrentProcessorNumber();
  443. CpuPool = (PFSB_CPU_POOL_HEADER)(Pool + 1) + Cpu;
  444. // See if the minimum time has passed since we last scavenged
  445. // the pool. If it has, we'll scavenge again. Normally, scavenging
  446. // should only be performed when we free. However, for the case when
  447. // the caller constantly frees on a non-owning processor, we'll
  448. // take this chance to do the scavenging.
  449. //
  450. KeQueryTickCount(&Ticks);
  451. if (Ticks.QuadPart > CpuPool->NextScavengeTick.QuadPart)
  452. {
  453. FsbpScavengePool(CpuPool);
  454. }
  455. if (!IsListEmpty(&CpuPool->PageList))
  456. {
  457. Page = CONTAINING_RECORD(CpuPool->PageList.Flink, FSB_PAGE_HEADER, PageLink);
  458. ASSERT(Page == PAGE_ALIGN(Page));
  459. ASSERT(CpuPool == Page->Pool);
  460. ASSERT(!Page->OnUsedPageList);
  461. BlockLink = ExInterlockedPopEntrySList(&Page->FreeList, &Pool->Interlock);
  462. if (BlockLink)
  463. {
  464. Block = (PUCHAR)BlockLink - Pool->FreeBlockLinkOffset;
  465. }
  466. else
  467. {
  468. // If there were no blocks on this page's free list, it had better
  469. // mean we haven't yet built all of the blocks on the page.
  470. // (Otherwise, what is a fully used page doing on the page list
  471. // and not on the used-page list?)
  472. //
  473. ASSERT(Page->BlocksBuilt < Pool->BlocksPerPage);
  474. Block = FsbpBuildNextBlock(Pool, Page);
  475. ASSERT(Block);
  476. }
  477. // Got a block. Now check to see if it was the last one on a fully
  478. // built page. If so, move the page to the used-page list.
  479. //
  480. if ((0 == ExQueryDepthSList(&Page->FreeList)) &&
  481. (Page->BlocksBuilt == Pool->BlocksPerPage))
  482. {
  483. PLIST_ENTRY PageLink;
  484. PageLink = RemoveHeadList(&CpuPool->PageList);
  485. InsertTailList(&CpuPool->UsedPageList, PageLink);
  486. Page->OnUsedPageList = TRUE;
  487. ASSERT(Page == CONTAINING_RECORD(PageLink, FSB_PAGE_HEADER, PageLink));
  488. }
  489. ASSERT(Block);
  490. goto GotABlock;
  491. }
  492. else
  493. {
  494. // The page list is empty so we have to allocate and add a new page.
  495. //
  496. Block = FsbpAllocateNewPageAndBuildOneBlock(Pool, CpuPool);
  497. }
  498. // If we are returning an block, update the statistics.
  499. //
  500. if (Block)
  501. {
  502. ULONG BlocksInUse;
  503. GotABlock:
  504. CpuPool->TotalBlocksAllocated++;
  505. BlocksInUse = CpuPool->TotalBlocksAllocated - CpuPool->TotalBlocksFreed;
  506. if (BlocksInUse > CpuPool->PeakBlocksInUse)
  507. {
  508. CpuPool->PeakBlocksInUse = BlocksInUse;
  509. }
  510. // Don't give anyone ideas about where this might point. I don't
  511. // want anyone trashing my pool because they thought this field
  512. // was valid for some reason.
  513. //
  514. ((PSINGLE_LIST_ENTRY)((PUCHAR)Block + Pool->FreeBlockLinkOffset))->Next = NULL;
  515. }
  516. KeLowerIrql(OldIrql);
  517. return Block;
  518. }
  519. // Free a block back to the pool from which it was allocated.
  520. //
  521. // Arguments:
  522. // Block - A block returned from a prior call to FsbAllocate.
  523. //
  524. // Caller IRQL: [PASSIVE_LEVEL, DISPATCH_LEVEL]
  525. //
  526. VOID
  527. FsbFree(
  528. IN PUCHAR Block
  529. )
  530. {
  531. PFSB_PAGE_HEADER Page;
  532. PFSB_CPU_POOL_HEADER CpuPool;
  533. PFSB_POOL_HEADER Pool;
  534. LARGE_INTEGER Ticks;
  535. LOGICAL PageIsPossiblyUnused;
  536. LOGICAL PageIsOnUsedPageList;
  537. LOGICAL Scavenge = FALSE;
  538. ASSERT(Block);
  539. // Get the address of the page that this block lives on. This is where
  540. // our page header is stored.
  541. //
  542. Page = PAGE_ALIGN(Block);
  543. // Follow the back pointer in the page header to locate the owning
  544. // cpu's pool.
  545. //
  546. CpuPool = Page->Pool;
  547. // Locate the pool header.
  548. //
  549. Pool = PoolFromCpuPool(CpuPool);
  550. // See if the minimum time has passed since we last scavenged
  551. // the pool. If it has, we'll scavenge again.
  552. //
  553. KeQueryTickCount(&Ticks);
  554. if (Ticks.QuadPart > CpuPool->NextScavengeTick.QuadPart)
  555. {
  556. Scavenge = TRUE;
  557. }
  558. // If this is the last block to be returned to this page, the page is
  559. // now unused. Note that since there is no synchronization beyond
  560. // ExInterlockedPush/PopSEntryList between allocate and free, we
  561. // cannot guarantee that it will remain unused even before the next
  562. // few instructions are executed.
  563. //
  564. PageIsPossiblyUnused = (ExQueryDepthSList(&Page->FreeList)
  565. == (Pool->BlocksPerPage - 1));
  566. if (PageIsPossiblyUnused)
  567. {
  568. // Note the tick that this page was last used. This sets the
  569. // minimum time that this page will continue to live unless it
  570. // gets re-used.
  571. //
  572. Page->LastUsedTick.QuadPart = Ticks.QuadPart;
  573. }
  574. // If this page is on the used-page list, we'll put it back on the normal
  575. // page list (only after pushing the block back on the page's free list)
  576. // if, after raising IRQL, we are on the processor that owns this
  577. // pool.
  578. //
  579. PageIsOnUsedPageList = Page->OnUsedPageList;
  580. InterlockedIncrement(&CpuPool->TotalBlocksFreed);
  581. // Now return the block to the page's free list.
  582. //
  583. ExInterlockedPushEntrySList(
  584. &Page->FreeList,
  585. (PSINGLE_LIST_ENTRY)((PUCHAR)Block + Pool->FreeBlockLinkOffset),
  586. &Pool->Interlock);
  587. //
  588. // Warning: Now that the block is back on the page, one cannot reliably
  589. // dereference anything through 'Page' anymore. It may have just been
  590. // scavenged by its owning processor. This is not the case if the
  591. // page was on the used-page list (because scavenging doesn't affect
  592. // the used-page list). We saved off the value of Page->OnUsedPageList
  593. // before returning the block so we would not risk touching Page to get
  594. // this value only to find that it was false.
  595. //
  596. // If we need to move the page from the used-page list to the normal
  597. // page list, or if we need to scavenge, we need to be at DISPATCH_LEVEL
  598. // and be executing on the processor that owns this pool.
  599. // Find out if the CPU we are executing on right now owns this pool.
  600. // Note that if we are running at PASSIVE_LEVEL, the current CPU may
  601. // change over the duration of this function call, so this value is
  602. // not absolute over the life of the function.
  603. //
  604. if ((PageIsOnUsedPageList || Scavenge) &&
  605. ((ULONG)KeGetCurrentProcessorNumber() == CpuPool->OwnerCpu))
  606. {
  607. KIRQL OldIrql;
  608. OldIrql = KeRaiseIrqlToDpcLevel();
  609. // Now that we are at DISPATCH_LEVEL, perform the work if we are still
  610. // executing on the processor that owns the pool.
  611. //
  612. if ((ULONG)KeGetCurrentProcessorNumber() == CpuPool->OwnerCpu)
  613. {
  614. // If the page is still on the used-page list (meaning another
  615. // MdpFree didn't just sneak by), then put the page on the
  616. // normal list. Very important to do this after (not before)
  617. // returning the MDL to the free list because MdpAllocate expects
  618. // MDL's to be available from pages on the page list.
  619. //
  620. if (PageIsOnUsedPageList && Page->OnUsedPageList)
  621. {
  622. RemoveEntryList(&Page->PageLink);
  623. Page->OnUsedPageList = FALSE;
  624. InsertTailList(&CpuPool->PageList, &Page->PageLink);
  625. }
  626. // Perform the scavenge if we previously noted we needed to do so.
  627. //
  628. if (Scavenge)
  629. {
  630. FsbpScavengePool(CpuPool);
  631. }
  632. }
  633. KeLowerIrql(OldIrql);
  634. }
  635. }