Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

761 lines
23 KiB

  1. /*++
  2. Copyright (c) 1999-2000 Microsoft Corporation
  3. Module Name:
  4. fsbpool.c
  5. Abstract:
  6. This file contains the implementation of fixed-size block pool.
  7. Author:
  8. Shaun Cox (shaunco) 10-Dec-1999
  9. --*/
  10. #include "ntddk.h"
  11. #include "fsbpool.h"
  12. #define FSB_SCAVENGE_PERIOD_IN_SECONDS 30
  13. #define FSB_MINIMUM_PAGE_LIFETIME_IN_SECONDS 20
  14. #if defined (_WIN64)
  15. #define MAX_CACHE_LINE_SIZE 128
  16. #else
  17. #define MAX_CACHE_LINE_SIZE 64
  18. #endif
  19. // The following structures are used in the single allocation that
  20. // a pool handle points to.
  21. // PoolHandle ---> [FSB_POOL_HEADER + FSB_CPU_POOL_HEADER for cpu 0 +
  22. // FSB_CPU_POOL_HEADER for cpu 1 + ...
  23. // FSB_CPU_POOL_HEADER for cpu N]
  24. //
  25. // FSB_POOL_HEADER is the data common to all CPUs for a given pool.
  26. //
  27. typedef struct _FSB_POOL_HEADER
  28. {
  29. // cache-line -----
  30. struct _FSB_POOL_HEADER_BASE
  31. {
  32. ULONG Tag;
  33. USHORT CallerBlockSize; // caller's requested block size
  34. USHORT AlignedBlockSize; // ALIGN_UP(CallerBlockSize, PVOID)
  35. USHORT BlocksPerPage;
  36. USHORT FreeBlockLinkOffset;
  37. PFSB_BUILDBLOCK_FUNCTION BuildFunction;
  38. };
  39. UCHAR Alignment[MAX_CACHE_LINE_SIZE
  40. - (sizeof(struct _FSB_POOL_HEADER_BASE) % MAX_CACHE_LINE_SIZE)];
  41. } FSB_POOL_HEADER, *PFSB_POOL_HEADER;
  42. C_ASSERT(sizeof(FSB_POOL_HEADER) % MAX_CACHE_LINE_SIZE == 0);
  43. // FSB_CPU_POOL_HEADER is the data specific to a CPU for a given pool.
  44. //
  45. typedef struct _FSB_CPU_POOL_HEADER
  46. {
  47. // cache-line -----
  48. struct _FSB_CPU_POOL_HEADER_BASE
  49. {
  50. // The doubly-linked list of pages that make up this processor's pool.
  51. // These pages have one or more free blocks available.
  52. //
  53. LIST_ENTRY PageList;
  54. // The doubly-linked list of pages that are fully in use. This list
  55. // is separate from the above list so that we do not spend time walking
  56. // a very long list during FsbAllocate when many pages are fully used.
  57. //
  58. LIST_ENTRY UsedPageList;
  59. // The next scheduled time (in units of KeQueryTickCount()) for
  60. // scavenging this pool. The next scavenge will happen no earlier
  61. // that this.
  62. //
  63. LARGE_INTEGER NextScavengeTick;
  64. // The number of the processor that owns this pool.
  65. //
  66. ULONG OwnerCpu;
  67. ULONG TotalBlocksAllocated;
  68. ULONG TotalBlocksFreed;
  69. ULONG PeakBlocksInUse;
  70. ULONG TotalPagesAllocated;
  71. ULONG TotalPagesFreed;
  72. ULONG PeakPagesInUse;
  73. };
  74. UCHAR Alignment[MAX_CACHE_LINE_SIZE
  75. - (sizeof(struct _FSB_CPU_POOL_HEADER_BASE) % MAX_CACHE_LINE_SIZE)];
  76. } FSB_CPU_POOL_HEADER, *PFSB_CPU_POOL_HEADER;
  77. C_ASSERT(sizeof(FSB_CPU_POOL_HEADER) % MAX_CACHE_LINE_SIZE == 0);
  78. // FSB_PAGE_HEADER is the data at the beginning of each allocated pool page
  79. // that describes the current state of the blocks on the page.
  80. //
  81. typedef struct _FSB_PAGE_HEADER
  82. {
  83. // cache-line -----
  84. // Back pointer to the owning cpu pool.
  85. //
  86. PFSB_CPU_POOL_HEADER Pool;
  87. // Linkage entry for the list of pages managed by the cpu pool.
  88. //
  89. LIST_ENTRY PageLink;
  90. // Number of blocks built so far on this page. Blocks are built on
  91. // demand. When this number reaches Pool->BlocksPerPage, all blocks on
  92. // this page have been built.
  93. //
  94. USHORT BlocksBuilt;
  95. // Boolean indicator of whether or not this page is on the cpu pool's
  96. // used-page list. This is checked during MdpFree to see if the page
  97. // should be moved back to the normal page list.
  98. // (it is a USHORT, instead of BOOLEAN, for proper padding)
  99. //
  100. USHORT OnUsedPageList;
  101. // List of free blocks on this page.
  102. //
  103. SLIST_HEADER FreeList;
  104. // The value of KeQueryTickCount (normalized to units of seconds)
  105. // which represents the time after which this page can be freed back
  106. // to the system's pool. This time is only valid if the depth of
  107. // FreeList is Pool->BlocksPerPage. (i.e. this time is only valid if
  108. // the page is completely unused.)
  109. //
  110. LARGE_INTEGER LastUsedTick;
  111. } FSB_PAGE_HEADER, *PFSB_PAGE_HEADER;
  112. // Get a pointer to the overall pool given a pointer to one of
  113. // the per-processor pools within it.
  114. //
  115. __inline
  116. PFSB_POOL_HEADER
  117. PoolFromCpuPool(
  118. IN PFSB_CPU_POOL_HEADER CpuPool
  119. )
  120. {
  121. return (PFSB_POOL_HEADER)(CpuPool - CpuPool->OwnerCpu) - 1;
  122. }
  123. __inline
  124. VOID
  125. ConvertSecondsToTicks(
  126. IN ULONG Seconds,
  127. OUT PLARGE_INTEGER Ticks
  128. )
  129. {
  130. // If the following assert fires, you need to cast Seconds below to
  131. // ULONGLONG so that 64 bit multiplication and division are used.
  132. // The current code assumes less than 430 seconds so that the
  133. // 32 multiplication below won't overflow.
  134. //
  135. ASSERT(Seconds < 430);
  136. Ticks->HighPart = 0;
  137. Ticks->LowPart = (Seconds * 10*1000*1000) / KeQueryTimeIncrement();
  138. }
  139. // Build the next block on the specified pool page.
  140. // This can only be called if not all of the blocks have been built yet.
  141. //
  142. PUCHAR
  143. FsbpBuildNextBlock(
  144. IN const FSB_POOL_HEADER* Pool,
  145. IN OUT PFSB_PAGE_HEADER Page
  146. )
  147. {
  148. PUCHAR Block;
  149. ASSERT(Page->BlocksBuilt < Pool->BlocksPerPage);
  150. ASSERT((PAGE_SIZE - sizeof(FSB_PAGE_HEADER)) / Pool->AlignedBlockSize
  151. == Pool->BlocksPerPage);
  152. ASSERT(Pool->CallerBlockSize <= Pool->AlignedBlockSize);
  153. Block = (PUCHAR)(Page + 1) + (Page->BlocksBuilt * Pool->AlignedBlockSize);
  154. ASSERT(PAGE_ALIGN(Block) == Page);
  155. if (Pool->BuildFunction) {
  156. Pool->BuildFunction(Block, Pool->CallerBlockSize);
  157. }
  158. Page->BlocksBuilt++;
  159. return Block;
  160. }
  161. // Allocate a new pool page and insert it at the head of the specified
  162. // CPU pool. Build the first block on the new page and return a pointer
  163. // to it.
  164. //
  165. PUCHAR
  166. FsbpAllocateNewPageAndBuildOneBlock(
  167. IN const FSB_POOL_HEADER* Pool,
  168. IN PFSB_CPU_POOL_HEADER CpuPool
  169. )
  170. {
  171. PFSB_PAGE_HEADER Page;
  172. PUCHAR Block = NULL;
  173. ULONG PagesInUse;
  174. ASSERT(Pool);
  175. Page = ExAllocatePoolWithTagPriority(NonPagedPool, PAGE_SIZE, Pool->Tag,
  176. NormalPoolPriority);
  177. if (Page)
  178. {
  179. ASSERT(Page == PAGE_ALIGN(Page));
  180. RtlZeroMemory(Page, sizeof(FSB_PAGE_HEADER));
  181. Page->Pool = CpuPool;
  182. ExInitializeSListHead(&Page->FreeList);
  183. // Insert the page at the head of the cpu's pool.
  184. //
  185. InsertHeadList(&CpuPool->PageList, &Page->PageLink);
  186. CpuPool->TotalPagesAllocated++;
  187. // Update the pool's statistics.
  188. //
  189. PagesInUse = CpuPool->TotalPagesAllocated - CpuPool->TotalPagesFreed;
  190. if (PagesInUse > CpuPool->PeakPagesInUse)
  191. {
  192. CpuPool->PeakPagesInUse = PagesInUse;
  193. }
  194. Block = FsbpBuildNextBlock(Pool, Page);
  195. ASSERT(Block);
  196. }
  197. return Block;
  198. }
  199. // Free the specified pool page back to the system's pool.
  200. //
  201. VOID
  202. FsbpFreePage(
  203. IN PFSB_CPU_POOL_HEADER CpuPool,
  204. IN PFSB_PAGE_HEADER Page
  205. )
  206. {
  207. ASSERT(Page == PAGE_ALIGN(Page));
  208. ASSERT(Page->Pool == CpuPool);
  209. ExFreePool(Page);
  210. CpuPool->TotalPagesFreed++;
  211. ASSERT(CpuPool->TotalPagesFreed <= CpuPool->TotalPagesAllocated);
  212. }
  213. // Reclaim the memory consumed by completely unused pool pages belonging
  214. // to the specified per-processor pool.
  215. //
  216. // Caller IRQL: [DISPATCH_LEVEL]
  217. //
  218. VOID
  219. FsbpScavengePool(
  220. IN OUT PFSB_CPU_POOL_HEADER CpuPool
  221. )
  222. {
  223. PFSB_POOL_HEADER Pool;
  224. PFSB_PAGE_HEADER Page;
  225. PLIST_ENTRY Scan;
  226. PLIST_ENTRY Next;
  227. LARGE_INTEGER Ticks;
  228. LARGE_INTEGER TicksDelta;
  229. // We must not only be at DISPATCH_LEVEL (or higher), we must also
  230. // be called on the processor that owns the specified pool.
  231. //
  232. ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
  233. ASSERT((ULONG)KeGetCurrentProcessorNumber() == CpuPool->OwnerCpu);
  234. Pool = PoolFromCpuPool(CpuPool);
  235. KeQueryTickCount(&Ticks);
  236. // Compute the next tick value which represents the earliest time
  237. // that we will scavenge this pool again.
  238. //
  239. ConvertSecondsToTicks(FSB_SCAVENGE_PERIOD_IN_SECONDS, &TicksDelta);
  240. CpuPool->NextScavengeTick.QuadPart = Ticks.QuadPart + TicksDelta.QuadPart;
  241. // Compute the tick value which represents the last point at which
  242. // its okay to free a page.
  243. //
  244. ConvertSecondsToTicks(FSB_MINIMUM_PAGE_LIFETIME_IN_SECONDS, &TicksDelta);
  245. Ticks.QuadPart = Ticks.QuadPart - TicksDelta.QuadPart;
  246. Scan = CpuPool->PageList.Flink;
  247. while (Scan != &CpuPool->PageList)
  248. {
  249. Page = CONTAINING_RECORD(Scan, FSB_PAGE_HEADER, PageLink);
  250. ASSERT(Page == PAGE_ALIGN(Page));
  251. ASSERT(CpuPool == Page->Pool);
  252. ASSERT(!Page->OnUsedPageList);
  253. // Step to the next link before we possibly unlink this page.
  254. //
  255. Next = Scan->Flink;
  256. if ((Pool->BlocksPerPage == ExQueryDepthSList(&Page->FreeList)) &&
  257. (Ticks.QuadPart > Page->LastUsedTick.QuadPart))
  258. {
  259. RemoveEntryList(Scan);
  260. FsbpFreePage(CpuPool, Page);
  261. }
  262. Scan = Next;
  263. }
  264. // Scan the used pages to see if they can be moved back to the normal
  265. // list. This can happen if too many frees by non-owning processors
  266. // are done. In that case, the pages get orphaned on the used-page
  267. // list after all of their MDLs have been freed to the page. Un-orhpan
  268. // them here.
  269. //
  270. Scan = CpuPool->UsedPageList.Flink;
  271. while (Scan != &CpuPool->UsedPageList)
  272. {
  273. Page = CONTAINING_RECORD(Scan, FSB_PAGE_HEADER, PageLink);
  274. ASSERT(Page == PAGE_ALIGN(Page));
  275. ASSERT(CpuPool == Page->Pool);
  276. ASSERT(Page->OnUsedPageList);
  277. // Step to the next link before we possibly unlink this page.
  278. //
  279. Next = Scan->Flink;
  280. if (0 != ExQueryDepthSList(&Page->FreeList))
  281. {
  282. RemoveEntryList(Scan);
  283. Page->OnUsedPageList = FALSE;
  284. InsertTailList(&CpuPool->PageList, Scan);
  285. }
  286. Scan = Next;
  287. }
  288. }
  289. // Creates a pool of fixed-size blocks built over non-paged pool. Each
  290. // block is BlockSize bytes long. If NULL is not returned,
  291. // FsbDestroyPool should be called at a later time to reclaim the
  292. // resources used by the pool.
  293. //
  294. // Arguments:
  295. // BlockSize - The size, in bytes, of each block.
  296. // FreeBlockLinkOffset - The offset, in bytes, from the beginning of a block
  297. // that represenets a pointer-sized storage location that the pool can
  298. // use to chain free blocks together. Most often this will be zero
  299. // (meaning use the first pointer-size bytes of the block.)
  300. // Tag - The pool tag to be used internally for calls to
  301. // ExAllocatePoolWithTag. This allows callers to track
  302. // memory consumption for different pools.
  303. // BuildFunction - An optional pointer to a function which initializes
  304. // blocks when they are first allocated by the pool. This allows the
  305. // caller to perform custom, on-demand initialization of each block.
  306. //
  307. // Returns the handle used to identify the pool.
  308. //
  309. // Caller IRQL: [PASSIVE_LEVEL, DISPATCH_LEVEL]
  310. //
  311. HANDLE
  312. FsbCreatePool(
  313. IN USHORT BlockSize,
  314. IN USHORT FreeBlockLinkOffset,
  315. IN ULONG Tag,
  316. IN PFSB_BUILDBLOCK_FUNCTION BuildFunction OPTIONAL
  317. )
  318. {
  319. SIZE_T Size;
  320. PFSB_POOL_HEADER Pool;
  321. PFSB_CPU_POOL_HEADER CpuPool;
  322. #if MILLEN
  323. CCHAR NumberCpus = 1;
  324. #else // MILLEN
  325. CCHAR NumberCpus = KeNumberProcessors;
  326. #endif // !MILLEN
  327. CCHAR i;
  328. // We need at least a pointer size worth of space to manage free
  329. // blocks and we don't impose any per-block overhead, so we borrow it
  330. // from the block itself.
  331. //
  332. ASSERT(BlockSize >= FreeBlockLinkOffset + sizeof(PVOID));
  333. // This implementation shouldn't be used if we are not going to get more
  334. // than about 8 blocks per page. Blocks bigger than this should probably
  335. // be allocated with multiple pages at a time.
  336. //
  337. ASSERT(BlockSize < PAGE_SIZE / 8);
  338. // Compute the size of our pool header allocation.
  339. //
  340. Size = sizeof(FSB_POOL_HEADER) + (sizeof(FSB_CPU_POOL_HEADER) * NumberCpus);
  341. // Allocate the pool header.
  342. //
  343. Pool = ExAllocatePoolWithTag(NonPagedPool, Size, ' bsF');
  344. if (Pool)
  345. {
  346. // Initialize the pool header fields.
  347. //
  348. RtlZeroMemory(Pool, Size);
  349. Pool->Tag = Tag;
  350. Pool->CallerBlockSize = BlockSize;
  351. Pool->AlignedBlockSize = (USHORT)ALIGN_UP(BlockSize, PVOID);
  352. Pool->BlocksPerPage = (PAGE_SIZE - sizeof(FSB_PAGE_HEADER))
  353. / Pool->AlignedBlockSize;
  354. Pool->FreeBlockLinkOffset = FreeBlockLinkOffset;
  355. Pool->BuildFunction = BuildFunction;
  356. // Initialize the per-cpu pool headers.
  357. //
  358. CpuPool = (PFSB_CPU_POOL_HEADER)(Pool + 1);
  359. for (i = 0; i < NumberCpus; i++)
  360. {
  361. InitializeListHead(&CpuPool[i].PageList);
  362. InitializeListHead(&CpuPool[i].UsedPageList);
  363. CpuPool[i].OwnerCpu = i;
  364. }
  365. }
  366. return Pool;
  367. }
  368. // Destroys a pool of fixed-size blocks previously created by a call to
  369. // FsbCreatePool.
  370. //
  371. // Arguments:
  372. // PoolHandle - Handle which identifies the pool being destroyed.
  373. //
  374. // Caller IRQL: [PASSIVE_LEVEL, DISPATCH_LEVEL]
  375. //
  376. VOID
  377. FsbDestroyPool(
  378. IN HANDLE PoolHandle
  379. )
  380. {
  381. PFSB_POOL_HEADER Pool;
  382. PFSB_PAGE_HEADER Page;
  383. PFSB_CPU_POOL_HEADER CpuPool;
  384. PLIST_ENTRY Scan;
  385. PLIST_ENTRY Next;
  386. #if MILLEN
  387. CCHAR NumberCpus = 1;
  388. #else // MILLEN
  389. CCHAR NumberCpus = KeNumberProcessors;
  390. #endif // !MILLEN
  391. CCHAR i;
  392. Pool = (PFSB_POOL_HEADER)PoolHandle;
  393. if (!Pool)
  394. {
  395. return;
  396. }
  397. for (i = 0, CpuPool = (PFSB_CPU_POOL_HEADER)(Pool + 1);
  398. i < NumberCpus;
  399. i++, CpuPool++)
  400. {
  401. ASSERT(CpuPool->OwnerCpu == (ULONG)i);
  402. for (Scan = CpuPool->PageList.Flink;
  403. Scan != &CpuPool->PageList;
  404. Scan = Next)
  405. {
  406. Page = CONTAINING_RECORD(Scan, FSB_PAGE_HEADER, PageLink);
  407. ASSERT(Page == PAGE_ALIGN(Page));
  408. ASSERT(CpuPool == Page->Pool);
  409. ASSERT(!Page->OnUsedPageList);
  410. ASSERT(Page->BlocksBuilt <= Pool->BlocksPerPage);
  411. ASSERT(Page->BlocksBuilt == ExQueryDepthSList(&Page->FreeList));
  412. // Step to the next link before we free this page.
  413. //
  414. Next = Scan->Flink;
  415. RemoveEntryList(Scan);
  416. FsbpFreePage(CpuPool, Page);
  417. }
  418. ASSERT(IsListEmpty(&CpuPool->UsedPageList));
  419. ASSERT(CpuPool->TotalPagesAllocated == CpuPool->TotalPagesFreed);
  420. ASSERT(CpuPool->TotalBlocksAllocated == CpuPool->TotalBlocksFreed);
  421. }
  422. }
  423. // Returns a pointer to a block allocated from a pool. NULL is returned if
  424. // the request could not be granted. The returned pointer is guaranteed to
  425. // have 8 byte alignment.
  426. //
  427. // Arguments:
  428. // PoolHandle - Handle which identifies the pool being allocated from.
  429. //
  430. // Caller IRQL: [PASSIVE_LEVEL, DISPATCH_LEVEL]
  431. //
  432. PUCHAR
  433. FsbAllocate(
  434. IN HANDLE PoolHandle
  435. )
  436. {
  437. PFSB_POOL_HEADER Pool;
  438. PFSB_CPU_POOL_HEADER CpuPool;
  439. PFSB_PAGE_HEADER Page;
  440. PSINGLE_LIST_ENTRY BlockLink;
  441. PUCHAR Block = NULL;
  442. KIRQL OldIrql;
  443. ULONG Cpu;
  444. LARGE_INTEGER Ticks;
  445. ASSERT(PoolHandle);
  446. Pool = (PFSB_POOL_HEADER)PoolHandle;
  447. // Raise IRQL before saving the processor number since there is chance
  448. // it could have changed if we saved it while at passive.
  449. //
  450. OldIrql = KeRaiseIrqlToDpcLevel();
  451. Cpu = KeGetCurrentProcessorNumber();
  452. CpuPool = (PFSB_CPU_POOL_HEADER)(Pool + 1) + Cpu;
  453. // See if the minimum time has passed since we last scavenged
  454. // the pool. If it has, we'll scavenge again. Normally, scavenging
  455. // should only be performed when we free. However, for the case when
  456. // the caller constantly frees on a non-owning processor, we'll
  457. // take this chance to do the scavenging.
  458. //
  459. KeQueryTickCount(&Ticks);
  460. if (Ticks.QuadPart > CpuPool->NextScavengeTick.QuadPart)
  461. {
  462. FsbpScavengePool(CpuPool);
  463. }
  464. if (!IsListEmpty(&CpuPool->PageList))
  465. {
  466. Page = CONTAINING_RECORD(CpuPool->PageList.Flink, FSB_PAGE_HEADER, PageLink);
  467. ASSERT(Page == PAGE_ALIGN(Page));
  468. ASSERT(CpuPool == Page->Pool);
  469. ASSERT(!Page->OnUsedPageList);
  470. BlockLink = InterlockedPopEntrySList(&Page->FreeList);
  471. if (BlockLink)
  472. {
  473. Block = (PUCHAR)BlockLink - Pool->FreeBlockLinkOffset;
  474. }
  475. else
  476. {
  477. // If there were no blocks on this page's free list, it had better
  478. // mean we haven't yet built all of the blocks on the page.
  479. // (Otherwise, what is a fully used page doing on the page list
  480. // and not on the used-page list?)
  481. //
  482. ASSERT(Page->BlocksBuilt < Pool->BlocksPerPage);
  483. Block = FsbpBuildNextBlock(Pool, Page);
  484. ASSERT(Block);
  485. }
  486. // Got a block. Now check to see if it was the last one on a fully
  487. // built page. If so, move the page to the used-page list.
  488. //
  489. if ((0 == ExQueryDepthSList(&Page->FreeList)) &&
  490. (Page->BlocksBuilt == Pool->BlocksPerPage))
  491. {
  492. PLIST_ENTRY PageLink;
  493. PageLink = RemoveHeadList(&CpuPool->PageList);
  494. InsertTailList(&CpuPool->UsedPageList, PageLink);
  495. Page->OnUsedPageList = TRUE;
  496. ASSERT(Page == CONTAINING_RECORD(PageLink, FSB_PAGE_HEADER, PageLink));
  497. }
  498. ASSERT(Block);
  499. goto GotABlock;
  500. }
  501. else
  502. {
  503. // The page list is empty so we have to allocate and add a new page.
  504. //
  505. Block = FsbpAllocateNewPageAndBuildOneBlock(Pool, CpuPool);
  506. }
  507. // If we are returning an block, update the statistics.
  508. //
  509. if (Block)
  510. {
  511. ULONG BlocksInUse;
  512. GotABlock:
  513. CpuPool->TotalBlocksAllocated++;
  514. BlocksInUse = CpuPool->TotalBlocksAllocated - CpuPool->TotalBlocksFreed;
  515. if (BlocksInUse > CpuPool->PeakBlocksInUse)
  516. {
  517. CpuPool->PeakBlocksInUse = BlocksInUse;
  518. }
  519. // Don't give anyone ideas about where this might point. I don't
  520. // want anyone trashing my pool because they thought this field
  521. // was valid for some reason.
  522. //
  523. ((PSINGLE_LIST_ENTRY)((PUCHAR)Block + Pool->FreeBlockLinkOffset))->Next = NULL;
  524. }
  525. KeLowerIrql(OldIrql);
  526. return Block;
  527. }
  528. // Free a block back to the pool from which it was allocated.
  529. //
  530. // Arguments:
  531. // Block - A block returned from a prior call to FsbAllocate.
  532. //
  533. // Caller IRQL: [PASSIVE_LEVEL, DISPATCH_LEVEL]
  534. //
  535. VOID
  536. FsbFree(
  537. IN PUCHAR Block
  538. )
  539. {
  540. PFSB_PAGE_HEADER Page;
  541. PFSB_CPU_POOL_HEADER CpuPool;
  542. PFSB_POOL_HEADER Pool;
  543. LARGE_INTEGER Ticks;
  544. LOGICAL PageIsPossiblyUnused;
  545. LOGICAL PageIsOnUsedPageList;
  546. LOGICAL Scavenge = FALSE;
  547. ASSERT(Block);
  548. // Get the address of the page that this block lives on. This is where
  549. // our page header is stored.
  550. //
  551. Page = PAGE_ALIGN(Block);
  552. // Follow the back pointer in the page header to locate the owning
  553. // cpu's pool.
  554. //
  555. CpuPool = Page->Pool;
  556. // Locate the pool header.
  557. //
  558. Pool = PoolFromCpuPool(CpuPool);
  559. // See if the minimum time has passed since we last scavenged
  560. // the pool. If it has, we'll scavenge again.
  561. //
  562. KeQueryTickCount(&Ticks);
  563. if (Ticks.QuadPart > CpuPool->NextScavengeTick.QuadPart)
  564. {
  565. Scavenge = TRUE;
  566. }
  567. // If this is the last block to be returned to this page, the page is
  568. // now unused. Note that since there is no synchronization beyond
  569. // InterlockedPush/PopSEntryList between allocate and free, we
  570. // cannot guarantee that it will remain unused even before the next
  571. // few instructions are executed.
  572. //
  573. PageIsPossiblyUnused = (ExQueryDepthSList(&Page->FreeList)
  574. == (Pool->BlocksPerPage - 1));
  575. if (PageIsPossiblyUnused)
  576. {
  577. // Note the tick that this page was last used. This sets the
  578. // minimum time that this page will continue to live unless it
  579. // gets re-used.
  580. //
  581. Page->LastUsedTick.QuadPart = Ticks.QuadPart;
  582. }
  583. // If this page is on the used-page list, we'll put it back on the normal
  584. // page list (only after pushing the block back on the page's free list)
  585. // if, after raising IRQL, we are on the processor that owns this
  586. // pool.
  587. //
  588. PageIsOnUsedPageList = Page->OnUsedPageList;
  589. InterlockedIncrement(&CpuPool->TotalBlocksFreed);
  590. // Now return the block to the page's free list.
  591. //
  592. InterlockedPushEntrySList(
  593. &Page->FreeList,
  594. (PSINGLE_LIST_ENTRY)((PUCHAR)Block + Pool->FreeBlockLinkOffset));
  595. //
  596. // Warning: Now that the block is back on the page, one cannot reliably
  597. // dereference anything through 'Page' anymore. It may have just been
  598. // scavenged by its owning processor. This is not the case if the
  599. // page was on the used-page list (because scavenging doesn't affect
  600. // the used-page list). We saved off the value of Page->OnUsedPageList
  601. // before returning the block so we would not risk touching Page to get
  602. // this value only to find that it was false.
  603. //
  604. // If we need to move the page from the used-page list to the normal
  605. // page list, or if we need to scavenge, we need to be at DISPATCH_LEVEL
  606. // and be executing on the processor that owns this pool.
  607. // Find out if the CPU we are executing on right now owns this pool.
  608. // Note that if we are running at PASSIVE_LEVEL, the current CPU may
  609. // change over the duration of this function call, so this value is
  610. // not absolute over the life of the function.
  611. //
  612. if ((PageIsOnUsedPageList || Scavenge) &&
  613. ((ULONG)KeGetCurrentProcessorNumber() == CpuPool->OwnerCpu))
  614. {
  615. KIRQL OldIrql;
  616. OldIrql = KeRaiseIrqlToDpcLevel();
  617. // Now that we are at DISPATCH_LEVEL, perform the work if we are still
  618. // executing on the processor that owns the pool.
  619. //
  620. if ((ULONG)KeGetCurrentProcessorNumber() == CpuPool->OwnerCpu)
  621. {
  622. // If the page is still on the used-page list (meaning another
  623. // MdpFree didn't just sneak by), then put the page on the
  624. // normal list. Very important to do this after (not before)
  625. // returning the MDL to the free list because MdpAllocate expects
  626. // MDL's to be available from pages on the page list.
  627. //
  628. if (PageIsOnUsedPageList && Page->OnUsedPageList)
  629. {
  630. RemoveEntryList(&Page->PageLink);
  631. Page->OnUsedPageList = FALSE;
  632. InsertTailList(&CpuPool->PageList, &Page->PageLink);
  633. }
  634. // Perform the scavenge if we previously noted we needed to do so.
  635. //
  636. if (Scavenge)
  637. {
  638. FsbpScavengePool(CpuPool);
  639. }
  640. }
  641. KeLowerIrql(OldIrql);
  642. }
  643. }