Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

684 lines
17 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. heapmgr.c
  5. Abstract:
  6. This module contains initialization and termination routines for
  7. server FSP heap, as well as debug routines for memory tracking.
  8. --*/
  9. #include "precomp.h"
  10. #include "heapmgr.tmh"
  11. #pragma hdrstop
  12. // Make the retry time 15 milli-seconds
  13. #define SRV_LOW_PRIORITY_RETRY_TIME -1*1000*10*15
  14. #ifdef POOL_TAGGING
  15. //
  16. // Array correlating BlockType numbers to pool tags.
  17. //
  18. // *** This array must be maintained in concert with the BlockType
  19. // definitions in srvblock.h!
  20. //
  21. ULONG SrvPoolTags[BlockTypeMax-1] = {
  22. 'fbSL', // BlockTypeBuffer
  23. 'ncSL', // BlockTypeConnection
  24. 'peSL', // BlockTypeEndpoint
  25. 'flSL', // BlockTypeLfcb
  26. 'fmSL', // BlockTypeMfcb
  27. 'frSL', // BlockTypeRfcb
  28. 'rsSL', // BlockTypeSearch
  29. 'csSL', // BlockTypeSearchCore
  30. 'lbSL', // BlockTypeByteRangeLock for persistent handles
  31. 'ssSL', // BlockTypeSession
  32. 'hsSL', // BlockTypeShare
  33. 'rtSL', // BlockTypeTransaction
  34. 'ctSL', // BlockTypeTreeConnect
  35. 'poSL', // BlockTypeOplockBreak
  36. 'dcSL', // BlockTypeCommDevice
  37. 'iwSL', // BlockTypeWorkContextInitial
  38. 'nwSL', // BlockTypeWorkContextNormal
  39. 'rwSL', // BlockTypeWorkContextRaw
  40. 'swSL', // BlockTypeWorkContextSpecial
  41. 'dcSL', // BlockTypeCachedDirectory
  42. 'bdSL', // BlockTypeDataBuffer
  43. 'btSL', // BlockTypeTable
  44. 'hnSL', // BlockTypeNonpagedHeader
  45. 'cpSL', // BlockTypePagedConnection
  46. 'rpSL', // BlockTypePagedRfcb
  47. 'mpSL', // BlockTypePagedMfcb
  48. 'itSL', // BlockTypeTimer
  49. 'caSL', // BlockTypeAdminCheck
  50. 'qwSL', // BlockTypeWorkQueue
  51. 'fsSL', // BlockTypeDfs
  52. 'rlSL', // BlockTypeLargeReadX
  53. 'saSL', // BlockTypeAdapterStatus
  54. 'rsSL', // BlockTypeShareRemark
  55. 'dsSL', // BlockTypeShareSecurityDescriptor
  56. 'ivSL', // BlockTypeVolumeInformation
  57. 'nfSL', // BlockTypeFSName
  58. 'inSL', // BlockTypeNameInfo
  59. 'idSL', // BlockTypeDirectoryInfo
  60. 'cdSL', // BlockTypeDirCache
  61. 'imSL', // BlockTypeMisc
  62. 'nsSL', // BlockTypeSnapShot
  63. #ifdef INCLUDE_SMB_PERSISTENT
  64. 'spSL', // BlockTypePersistentState
  65. 'bpSL', // BlockTypePersistentBitMap
  66. 'hpSL', // BlockTypePersistShareState
  67. #endif
  68. };
  69. //
  70. // Macro to map from block type to pool tag.
  71. //
  72. #define TAG_FROM_TYPE(_type) SrvPoolTags[(_type)-1]
  73. #else
  74. #define TAG_FROM_TYPE(_type) ignoreme
  75. #endif // def POOL_TAGGING
  76. #ifdef ALLOC_PRAGMA
  77. #pragma alloc_text( PAGE, SrvAllocatePagedPool )
  78. #pragma alloc_text( PAGE, SrvFreePagedPool )
  79. #pragma alloc_text( PAGE, SrvClearLookAsideList )
  80. #endif
  81. #if 0
  82. NOT PAGEABLE -- SrvAllocateNonPagedPool
  83. NOT PAGEABLE -- SrvFreeNonPagedPool
  84. #endif
  85. extern LONG SrvMemoryAllocationRetries;
  86. extern LONG SrvMemoryAllocationRetriesSuccessful;
  87. PVOID SRVFASTCALL
  88. SrvInterlockedAllocate( PLOOK_ASIDE_LIST l, ULONG NumberOfBytes, PLONG statistics )
  89. {
  90. PPOOL_HEADER newPool;
  91. PPOOL_HEADER *pentry = NumberOfBytes > LOOK_ASIDE_SWITCHOVER ?
  92. l->LargeFreeList : l->SmallFreeList;
  93. PPOOL_HEADER *pend = pentry + LOOK_ASIDE_MAX_ELEMENTS;
  94. do {
  95. //
  96. // Exchange with the lookaside spot and see if we get anything
  97. //
  98. newPool = NULL;
  99. newPool = (PPOOL_HEADER)InterlockedExchangePointer( pentry, newPool );
  100. if( newPool == NULL ) {
  101. continue;
  102. }
  103. if( newPool->RequestedSize >= NumberOfBytes ) {
  104. //
  105. // The one we got is big enough! Return it.
  106. //
  107. ++(l->AllocHit);
  108. return newPool + 1;
  109. }
  110. //
  111. // It wasn't big enough, so put it back.
  112. //
  113. newPool = (PPOOL_HEADER)InterlockedExchangePointer( pentry, newPool );
  114. if( newPool == NULL ) {
  115. continue;
  116. }
  117. //
  118. // Oops, somebody else freed some memory to this spot. Can we use it?
  119. //
  120. if( newPool->RequestedSize >= NumberOfBytes ) {
  121. //
  122. // We can use it!
  123. //
  124. ++(l->AllocHit);
  125. return newPool + 1;
  126. }
  127. //
  128. // Can't use the memory -- so really free it and keep looking
  129. //
  130. if( statistics ) {
  131. InterlockedExchangeAdd(
  132. statistics,
  133. -(LONG)newPool->RequestedSize
  134. );
  135. }
  136. ExFreePool( newPool );
  137. } while( ++pentry < pend );
  138. ++(l->AllocMiss);
  139. return NULL;
  140. }
  141. PPOOL_HEADER SRVFASTCALL
  142. SrvInterlockedFree( PPOOL_HEADER block )
  143. {
  144. PPOOL_HEADER *pentry = block->FreeList;
  145. PPOOL_HEADER *pend = pentry + LOOK_ASIDE_MAX_ELEMENTS;
  146. do {
  147. block = (PPOOL_HEADER)InterlockedExchangePointer( pentry, block );
  148. } while( block != NULL && ++pentry < pend );
  149. return block;
  150. }
  151. VOID SRVFASTCALL
  152. SrvClearLookAsideList( PLOOK_ASIDE_LIST l, VOID (SRVFASTCALL *FreeRoutine )( PVOID ) )
  153. {
  154. PPOOL_HEADER *pentry, *pend, block;
  155. PAGED_CODE();
  156. //
  157. // Clear out the list of large chunks
  158. //
  159. pentry = l->LargeFreeList;
  160. pend = pentry + LOOK_ASIDE_MAX_ELEMENTS;
  161. do {
  162. block = NULL;
  163. block = (PPOOL_HEADER)InterlockedExchangePointer( pentry, block );
  164. if( block != NULL ) {
  165. block->FreeList = NULL;
  166. FreeRoutine( block + 1 );
  167. }
  168. } while( ++pentry < pend );
  169. //
  170. // Clear out the list of small chunks
  171. //
  172. pentry = l->SmallFreeList;
  173. pend = pentry + LOOK_ASIDE_MAX_ELEMENTS;
  174. do {
  175. block = NULL;
  176. block = (PPOOL_HEADER)InterlockedExchangePointer( pentry, block );
  177. if( block != NULL ) {
  178. block->FreeList = NULL;
  179. FreeRoutine( block + 1 );
  180. }
  181. } while( ++pentry < pend );
  182. }
  183. PVOID SRVFASTCALL
  184. SrvAllocateNonPagedPool (
  185. IN CLONG NumberOfBytes
  186. #ifdef POOL_TAGGING
  187. , IN CLONG BlockType
  188. #endif
  189. )
  190. /*++
  191. Routine Description:
  192. This routine allocates nonpaged pool in the server. A check is
  193. made to ensure that the server's total nonpaged pool usage is below
  194. the configurable limit.
  195. Arguments:
  196. NumberOfBytes - the number of bytes to allocate.
  197. BlockType - the type of block (used to pass pool tag to allocator)
  198. Return Value:
  199. PVOID - a pointer to the allocated memory or NULL if the memory could
  200. not be allocated.
  201. --*/
  202. {
  203. PPOOL_HEADER newPool;
  204. PPOOL_HEADER *FreeList = NULL;
  205. ULONG newUsage;
  206. BOOLEAN IsLowPriority = FALSE;
  207. LARGE_INTEGER interval;
  208. #ifdef POOL_TAGGING
  209. ASSERT( BlockType > 0 && BlockType < BlockTypeMax );
  210. #endif
  211. //
  212. // Pull this allocation off the per-processor free list if we can
  213. //
  214. if( SrvWorkQueues ) {
  215. PWORK_QUEUE queue = PROCESSOR_TO_QUEUE();
  216. if( NumberOfBytes <= queue->NonPagedPoolLookAsideList.MaxSize ) {
  217. newPool = SrvInterlockedAllocate(
  218. &queue->NonPagedPoolLookAsideList,
  219. NumberOfBytes,
  220. (PLONG)&SrvStatistics.CurrentNonPagedPoolUsage
  221. );
  222. if( newPool != NULL ) {
  223. return newPool;
  224. }
  225. FreeList = NumberOfBytes > LOOK_ASIDE_SWITCHOVER ?
  226. queue->NonPagedPoolLookAsideList.LargeFreeList :
  227. queue->NonPagedPoolLookAsideList.SmallFreeList ;
  228. }
  229. }
  230. //
  231. // Account for this allocation in the statistics database and make
  232. // sure that this allocation will not put us over the limit of
  233. // nonpaged pool that we can allocate.
  234. //
  235. newUsage = InterlockedExchangeAdd( (PLONG)&SrvStatistics.CurrentNonPagedPoolUsage,
  236. (LONG)NumberOfBytes );
  237. newUsage += NumberOfBytes;
  238. if ( newUsage > SrvMaxNonPagedPoolUsage ) {
  239. //
  240. // Count the failure, but do NOT log an event. The scavenger
  241. // will log an event when it next wakes up. This keeps us from
  242. // flooding the event log.
  243. //
  244. SrvNonPagedPoolLimitHitCount++;
  245. SrvStatistics.NonPagedPoolFailures++;
  246. InterlockedExchangeAdd( (PLONG)&SrvStatistics.CurrentNonPagedPoolUsage,
  247. -(LONG)NumberOfBytes );
  248. return NULL;
  249. }
  250. if (SrvStatistics.CurrentNonPagedPoolUsage > SrvStatistics.PeakNonPagedPoolUsage) {
  251. SrvStatistics.PeakNonPagedPoolUsage = SrvStatistics.CurrentNonPagedPoolUsage;
  252. }
  253. //
  254. // Do the actual memory allocation. Allocate extra space so that we
  255. // can store the size of the allocation for the free routine.
  256. //
  257. if( NumberOfBytes > 2*4096 )
  258. {
  259. IsLowPriority = TRUE;
  260. }
  261. newPool = ExAllocatePoolWithTagPriority(
  262. NonPagedPool,
  263. NumberOfBytes + sizeof(POOL_HEADER),
  264. TAG_FROM_TYPE(BlockType),
  265. IsLowPriority ? LowPoolPriority : NormalPoolPriority
  266. );
  267. if( (newPool == NULL) && IsLowPriority && (KeGetCurrentIrql() <= APC_LEVEL) )
  268. {
  269. interval.QuadPart = SRV_LOW_PRIORITY_RETRY_TIME;
  270. InterlockedIncrement( &SrvMemoryAllocationRetries );
  271. // Wait and try again
  272. KeDelayExecutionThread( KernelMode, FALSE, &interval );
  273. newPool = ExAllocatePoolWithTagPriority(
  274. NonPagedPool,
  275. NumberOfBytes + sizeof(POOL_HEADER),
  276. TAG_FROM_TYPE(BlockType),
  277. LowPoolPriority
  278. );
  279. if( newPool )
  280. {
  281. InterlockedIncrement( &SrvMemoryAllocationRetriesSuccessful );
  282. }
  283. }
  284. //
  285. // If the system couldn't satisfy the request, return NULL.
  286. //
  287. if ( newPool != NULL ) {
  288. //
  289. // Save the size of this block in the extra space we allocated.
  290. //
  291. newPool->RequestedSize = NumberOfBytes;
  292. newPool->FreeList = FreeList;
  293. //
  294. // Return a pointer to the memory after the size longword.
  295. //
  296. return (PVOID)( newPool + 1 );
  297. }
  298. //
  299. // Count the failure, but do NOT log an event. The scavenger
  300. // will log an event when it next wakes up. This keeps us from
  301. // flooding the event log.
  302. //
  303. SrvStatistics.NonPagedPoolFailures++;
  304. InterlockedExchangeAdd( (PLONG)&SrvStatistics.CurrentNonPagedPoolUsage,
  305. -(LONG)NumberOfBytes );
  306. return NULL;
  307. } // SrvAllocateNonPagedPool
  308. VOID SRVFASTCALL
  309. SrvFreeNonPagedPool (
  310. IN PVOID Address
  311. )
  312. /*++
  313. Routine Description:
  314. Frees the memory allocated by a call to SrvAllocateNonPagedPool.
  315. The statistics database is updated to reflect the current nonpaged
  316. pool usage.
  317. Arguments:
  318. Address - the address of allocated memory returned by
  319. SrvAllocateNonPagedPool.
  320. Return Value:
  321. None.
  322. --*/
  323. {
  324. PPOOL_HEADER actualBlock = (PPOOL_HEADER)Address - 1;
  325. //
  326. // See if we can stash this bit of memory away in the NonPagedPoolFreeList
  327. //
  328. if( actualBlock->FreeList ) {
  329. actualBlock = SrvInterlockedFree( actualBlock );
  330. }
  331. if( actualBlock != NULL ) {
  332. //
  333. // Update the nonpaged pool usage statistic.
  334. //
  335. InterlockedExchangeAdd(
  336. (PLONG)&SrvStatistics.CurrentNonPagedPoolUsage,
  337. -(LONG)actualBlock->RequestedSize
  338. );
  339. //
  340. // Free the pool and return.
  341. //
  342. ExFreePool( actualBlock );
  343. }
  344. } // SrvFreeNonPagedPool
  345. PVOID SRVFASTCALL
  346. SrvAllocatePagedPool (
  347. IN POOL_TYPE PoolType,
  348. IN CLONG NumberOfBytes
  349. #ifdef POOL_TAGGING
  350. , IN CLONG BlockType
  351. #endif
  352. )
  353. /*++
  354. Routine Description:
  355. This routine allocates Paged pool in the server. A check is
  356. made to ensure that the server's total Paged pool usage is below
  357. the configurable limit.
  358. Arguments:
  359. NumberOfBytes - the number of bytes to allocate.
  360. BlockType - the type of block (used to pass pool tag to allocator)
  361. Return Value:
  362. PVOID - a pointer to the allocated memory or NULL if the memory could
  363. not be allocated.
  364. --*/
  365. {
  366. PPOOL_HEADER newPool;
  367. PPOOL_HEADER *FreeList = NULL;
  368. ULONG newUsage;
  369. BOOLEAN IsLowPriority = FALSE;
  370. LARGE_INTEGER interval;
  371. PAGED_CODE();
  372. #ifdef POOL_TAGGING
  373. ASSERT( BlockType > 0 && BlockType < BlockTypeMax );
  374. #endif
  375. //
  376. // Pull this allocation off the per-processor free list if we can
  377. //
  378. if( SrvWorkQueues ) {
  379. PWORK_QUEUE queue = PROCESSOR_TO_QUEUE();
  380. if( NumberOfBytes <= queue->PagedPoolLookAsideList.MaxSize ) {
  381. newPool = SrvInterlockedAllocate(
  382. &queue->PagedPoolLookAsideList,
  383. NumberOfBytes,
  384. (PLONG)&SrvStatistics.CurrentPagedPoolUsage
  385. );
  386. if( newPool != NULL ) {
  387. return newPool;
  388. }
  389. FreeList = NumberOfBytes > LOOK_ASIDE_SWITCHOVER ?
  390. queue->PagedPoolLookAsideList.LargeFreeList :
  391. queue->PagedPoolLookAsideList.SmallFreeList ;
  392. }
  393. }
  394. //
  395. // Account for this allocation in the statistics database and make
  396. // sure that this allocation will not put us over the limit of
  397. // nonpaged pool that we can allocate.
  398. //
  399. newUsage = InterlockedExchangeAdd( (PLONG)&SrvStatistics.CurrentPagedPoolUsage,
  400. (LONG)NumberOfBytes );
  401. newUsage += NumberOfBytes;
  402. if ( newUsage > SrvMaxPagedPoolUsage ) {
  403. //
  404. // Count the failure, but do NOT log an event. The scavenger
  405. // will log an event when it next wakes up. This keeps us from
  406. // flooding the event log.
  407. //
  408. SrvPagedPoolLimitHitCount++;
  409. SrvStatistics.PagedPoolFailures++;
  410. InterlockedExchangeAdd( (PLONG)&SrvStatistics.CurrentPagedPoolUsage,
  411. -(LONG)NumberOfBytes );
  412. return NULL;
  413. }
  414. if (SrvStatistics.CurrentPagedPoolUsage > SrvStatistics.PeakPagedPoolUsage ) {
  415. SrvStatistics.PeakPagedPoolUsage = SrvStatistics.CurrentPagedPoolUsage;
  416. }
  417. //
  418. // Do the actual memory allocation. Allocate extra space so that we
  419. // can store the size of the allocation for the free routine.
  420. //
  421. if( NumberOfBytes > 2*4096 )
  422. {
  423. IsLowPriority = TRUE;
  424. }
  425. newPool = ExAllocatePoolWithTagPriority(
  426. PoolType,
  427. NumberOfBytes + sizeof(POOL_HEADER),
  428. TAG_FROM_TYPE(BlockType),
  429. IsLowPriority ? LowPoolPriority : NormalPoolPriority
  430. );
  431. if( (newPool == NULL) && IsLowPriority && (KeGetCurrentIrql() <= APC_LEVEL) )
  432. {
  433. interval.QuadPart = SRV_LOW_PRIORITY_RETRY_TIME;
  434. InterlockedIncrement( &SrvMemoryAllocationRetries );
  435. // Wait and try again
  436. KeDelayExecutionThread( KernelMode, FALSE, &interval );
  437. newPool = ExAllocatePoolWithTagPriority(
  438. PoolType,
  439. NumberOfBytes + sizeof(POOL_HEADER),
  440. TAG_FROM_TYPE(BlockType),
  441. LowPoolPriority
  442. );
  443. if( newPool )
  444. {
  445. InterlockedIncrement( &SrvMemoryAllocationRetriesSuccessful );
  446. }
  447. }
  448. if( newPool != NULL ) {
  449. newPool->FreeList = FreeList;
  450. newPool->RequestedSize = NumberOfBytes;
  451. //
  452. // Return a pointer to the memory after the POOL_HEADER
  453. //
  454. return newPool + 1;
  455. }
  456. //
  457. // If the system couldn't satisfy the request, return NULL.
  458. //
  459. // Count the failure, but do NOT log an event. The scavenger
  460. // will log an event when it next wakes up. This keeps us from
  461. // flooding the event log.
  462. //
  463. SrvStatistics.PagedPoolFailures++;
  464. InterlockedExchangeAdd( (PLONG)&SrvStatistics.CurrentPagedPoolUsage,
  465. -(LONG)NumberOfBytes );
  466. return NULL;
  467. } // SrvAllocatePagedPool
  468. VOID SRVFASTCALL
  469. SrvFreePagedPool (
  470. IN PVOID Address
  471. )
  472. /*++
  473. Routine Description:
  474. Frees the memory allocated by a call to SrvAllocatePagedPool.
  475. The statistics database is updated to reflect the current Paged
  476. pool usage. If this routine is change, look at scavengr.c
  477. Arguments:
  478. Address - the address of allocated memory returned by
  479. SrvAllocatePagedPool.
  480. Return Value:
  481. None.
  482. --*/
  483. {
  484. PPOOL_HEADER actualBlock = (PPOOL_HEADER)Address - 1;
  485. PAGED_CODE();
  486. ASSERT( actualBlock != NULL );
  487. //
  488. // See if we can stash this bit of memory away in the PagedPoolFreeList
  489. //
  490. if( actualBlock->FreeList ) {
  491. actualBlock = SrvInterlockedFree( actualBlock );
  492. }
  493. if( actualBlock != NULL ) {
  494. //
  495. // Update the Paged pool usage statistic.
  496. //
  497. ASSERT( SrvStatistics.CurrentPagedPoolUsage >= actualBlock->RequestedSize );
  498. InterlockedExchangeAdd(
  499. (PLONG)&SrvStatistics.CurrentPagedPoolUsage,
  500. -(LONG)actualBlock->RequestedSize
  501. );
  502. ASSERT( (LONG)SrvStatistics.CurrentPagedPoolUsage >= 0 );
  503. //
  504. // Free the pool and return.
  505. //
  506. ExFreePool( actualBlock );
  507. }
  508. } // SrvFreePagedPool