Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1301 lines
40 KiB

  1. /*++
  2. Copyright (c) 1992 Microsoft Corporation
  3. Module Name:
  4. heappriv.h
  5. Abstract:
  6. Private include file used by heap allocator (heap.c, heapdll.c and
  7. heapdbg.c)
  8. Author:
  9. Steve Wood (stevewo) 25-Oct-1994
  10. Revision History:
  11. --*/
  12. #ifndef _RTL_HEAP_PRIVATE_
  13. #define _RTL_HEAP_PRIVATE_
  14. #include "heappage.h"
  15. //
  16. // In private builds (PRERELEASE = 1) we allow using the new low fragmentation heap
  17. // for processes that set the DisableLookaside registry key. The main purpose is to
  18. // allow testing the new heap API.
  19. //
  20. #ifndef PRERELEASE
  21. #define DISABLE_REGISTRY_TEST_HOOKS
  22. #endif
  23. //
  24. // Disable FPO optimization so even retail builds get somewhat reasonable
  25. // stack backtraces
  26. //
  27. #if i386
  28. // #pragma optimize("y",off)
  29. #endif
  30. #if DBG
  31. #define HEAPASSERT(exp) if (!(exp)) RtlAssert( #exp, __FILE__, __LINE__, NULL )
  32. #else
  33. #define HEAPASSERT(exp)
  34. #endif
  35. //
  36. // Define Minimum lookaside list depth.
  37. //
  38. #define MINIMUM_LOOKASIDE_DEPTH 4
  39. //
  40. // This variable contains the fill pattern used for heap tail checking
  41. //
  42. extern const UCHAR CheckHeapFillPattern[ CHECK_HEAP_TAIL_SIZE ];
  43. //
  44. // Here are the locking routines for the heap (kernel and user)
  45. //
  46. #ifdef NTOS_KERNEL_RUNTIME
  47. //
  48. // Kernel mode heap uses the kernel resource package for locking
  49. //
  50. #define RtlInitializeLockRoutine(L) ExInitializeResourceLite((PERESOURCE)(L))
  51. #define RtlAcquireLockRoutine(L) ExAcquireResourceExclusiveLite((PERESOURCE)(L),TRUE)
  52. #define RtlReleaseLockRoutine(L) ExReleaseResourceLite((PERESOURCE)(L))
  53. #define RtlDeleteLockRoutine(L) ExDeleteResourceLite((PERESOURCE)(L))
  54. #define RtlOkayToLockRoutine(L) ExOkayToLockRoutineLite((PERESOURCE)(L))
  55. #else // #ifdef NTOS_KERNEL_ROUTINE
  56. //
  57. // User mode heap uses the critical section package for locking
  58. //
  59. #ifndef PREALLOCATE_EVENT_MASK
  60. #define PREALLOCATE_EVENT_MASK 0x80000000 // Defined only in dll\resource.c
  61. #endif // PREALLOCATE_EVENT_MASK
  62. #define RtlInitializeLockRoutine(L) RtlInitializeCriticalSectionAndSpinCount((PRTL_CRITICAL_SECTION)(L),(PREALLOCATE_EVENT_MASK | 4000))
  63. #define RtlAcquireLockRoutine(L) RtlEnterCriticalSection((PRTL_CRITICAL_SECTION)(L))
  64. #define RtlReleaseLockRoutine(L) RtlLeaveCriticalSection((PRTL_CRITICAL_SECTION)(L))
  65. #define RtlDeleteLockRoutine(L) RtlDeleteCriticalSection((PRTL_CRITICAL_SECTION)(L))
  66. #define RtlOkayToLockRoutine(L) NtdllOkayToLockRoutine((PVOID)(L))
  67. #endif // #ifdef NTOS_KERNEL_RUNTIME
  68. //
  69. // Here are some debugging macros for the heap
  70. //
  71. #ifdef NTOS_KERNEL_RUNTIME
  72. #define HEAP_DEBUG_FLAGS 0
  73. #define DEBUG_HEAP(F) FALSE
  74. #define SET_LAST_STATUS(S) NOTHING;
  75. #else // #ifdef NTOS_KERNEL_ROUTINE
  76. #define HEAP_DEBUG_FLAGS (HEAP_VALIDATE_PARAMETERS_ENABLED | \
  77. HEAP_VALIDATE_ALL_ENABLED | \
  78. HEAP_CAPTURE_STACK_BACKTRACES | \
  79. HEAP_CREATE_ENABLE_TRACING | \
  80. HEAP_FLAG_PAGE_ALLOCS)
  81. #define DEBUG_HEAP(F) ((F & HEAP_DEBUG_FLAGS) && !(F & HEAP_SKIP_VALIDATION_CHECKS))
  82. #define SET_LAST_STATUS(S) {NtCurrentTeb()->LastErrorValue = RtlNtStatusToDosError( NtCurrentTeb()->LastStatusValue = (ULONG)(S) );}
  83. #endif // #ifdef NTOS_KERNEL_RUNTIME
  84. //
  85. // Here are the macros used for debug printing and breakpoints
  86. //
  87. #ifdef NTOS_KERNEL_RUNTIME
  88. #define HeapDebugPrint( _x_ ) {DbgPrint _x_;}
  89. #define HeapDebugBreak( _x_ ) {if (KdDebuggerEnabled) DbgBreakPoint();}
  90. #else // #ifdef NTOS_KERNEL_ROUTINE
  91. #define HeapDebugPrint( _x_ ) \
  92. { \
  93. PLIST_ENTRY _Module; \
  94. PLDR_DATA_TABLE_ENTRY _Entry; \
  95. \
  96. _Module = NtCurrentPeb()->Ldr->InLoadOrderModuleList.Flink; \
  97. _Entry = CONTAINING_RECORD( _Module, \
  98. LDR_DATA_TABLE_ENTRY, \
  99. InLoadOrderLinks); \
  100. DbgPrint("HEAP[%wZ]: ", &_Entry->BaseDllName); \
  101. DbgPrint _x_; \
  102. }
  103. #define HeapDebugBreak( _x_ ) \
  104. { \
  105. VOID RtlpBreakPointHeap( PVOID BadAddress ); \
  106. \
  107. RtlpBreakPointHeap( (_x_) ); \
  108. }
  109. #endif // #ifdef NTOS_KERNEL_RUNTIME
  110. //
  111. // Virtual memory hook for virtual alloc functions
  112. //
  113. #ifdef NTOS_KERNEL_RUNTIME
  114. #define RtlpHeapFreeVirtualMemory(P,A,S,F) \
  115. ZwFreeVirtualMemory(P,A,S,F)
  116. #else // NTOS_KERNEL_RUNTIME
  117. //
  118. // The user mode call needs to call the secmem virtual free
  119. // as well to update the memory counters per heap
  120. //
  121. #define RtlpHeapFreeVirtualMemory(P,A,S,F) \
  122. RtlpSecMemFreeVirtualMemory(P,A,S,F)
  123. #endif // NTOS_KERNEL_RUNTIME
  124. //
  125. // Implemented in heap.c
  126. //
  127. BOOLEAN
  128. RtlpInitializeHeapSegment (
  129. IN PHEAP Heap,
  130. IN PHEAP_SEGMENT Segment,
  131. IN UCHAR SegmentIndex,
  132. IN ULONG Flags,
  133. IN PVOID BaseAddress,
  134. IN PVOID UnCommittedAddress,
  135. IN PVOID CommitLimitAddress
  136. );
  137. PHEAP_FREE_ENTRY
  138. RtlpCoalesceFreeBlocks (
  139. IN PHEAP Heap,
  140. IN PHEAP_FREE_ENTRY FreeBlock,
  141. IN OUT PSIZE_T FreeSize,
  142. IN BOOLEAN RemoveFromFreeList
  143. );
  144. VOID
  145. RtlpDeCommitFreeBlock (
  146. IN PHEAP Heap,
  147. IN PHEAP_FREE_ENTRY FreeBlock,
  148. IN SIZE_T FreeSize
  149. );
  150. VOID
  151. RtlpInsertFreeBlock (
  152. IN PHEAP Heap,
  153. IN PHEAP_FREE_ENTRY FreeBlock,
  154. IN SIZE_T FreeSize
  155. );
  156. PHEAP_FREE_ENTRY
  157. RtlpFindAndCommitPages (
  158. IN PHEAP Heap,
  159. IN PHEAP_SEGMENT Segment,
  160. IN OUT PSIZE_T Size,
  161. IN PVOID AddressWanted OPTIONAL
  162. );
  163. PVOID
  164. RtlAllocateHeapSlowly (
  165. IN PVOID HeapHandle,
  166. IN ULONG Flags,
  167. IN SIZE_T Size
  168. );
  169. BOOLEAN
  170. RtlFreeHeapSlowly (
  171. IN PVOID HeapHandle,
  172. IN ULONG Flags,
  173. IN PVOID BaseAddress
  174. );
  175. SIZE_T
  176. RtlpGetSizeOfBigBlock (
  177. IN PHEAP_ENTRY BusyBlock
  178. );
  179. PHEAP_ENTRY_EXTRA
  180. RtlpGetExtraStuffPointer (
  181. PHEAP_ENTRY BusyBlock
  182. );
  183. BOOLEAN
  184. RtlpCheckBusyBlockTail (
  185. IN PHEAP_ENTRY BusyBlock
  186. );
  187. //
  188. // Implemented in heapdll.c
  189. //
  190. VOID
  191. RtlpAddHeapToProcessList (
  192. IN PHEAP Heap
  193. );
  194. VOID
  195. RtlpRemoveHeapFromProcessList (
  196. IN PHEAP Heap
  197. );
  198. PHEAP_FREE_ENTRY
  199. RtlpCoalesceHeap (
  200. IN PHEAP Heap
  201. );
  202. BOOLEAN
  203. RtlpCheckHeapSignature (
  204. IN PHEAP Heap,
  205. IN PCHAR Caller
  206. );
  207. VOID
  208. RtlDetectHeapLeaks();
  209. //
  210. // Implemented in heapdbg.c
  211. //
  212. BOOLEAN
  213. RtlpValidateHeapEntry (
  214. IN PHEAP Heap,
  215. IN PHEAP_ENTRY BusyBlock,
  216. IN PCHAR Reason
  217. );
  218. BOOLEAN
  219. RtlpValidateHeap (
  220. IN PHEAP Heap,
  221. IN BOOLEAN AlwaysValidate
  222. );
  223. VOID
  224. RtlpUpdateHeapListIndex (
  225. USHORT OldIndex,
  226. USHORT NewIndex
  227. );
  228. BOOLEAN
  229. RtlpValidateHeapHeaders(
  230. IN PHEAP Heap,
  231. IN BOOLEAN Recompute
  232. );
  233. #ifndef NTOS_KERNEL_RUNTIME
  234. //
  235. // Nondedicated free list optimization
  236. //
  237. #if DBG
  238. //
  239. // Define HEAP_VALIDATE_INDEX to activate a validation of the index
  240. // after each operation with non-dedicated list
  241. // This is only for debug-test, to make sure the list and index is consistent
  242. //
  243. //#define HEAP_VALIDATE_INDEX
  244. #endif // DBG
  245. #define HEAP_FRONT_LOOKASIDE 1
  246. #define HEAP_FRONT_LOWFRAGHEAP 2
  247. #define RtlpGetLookasideHeap(H) \
  248. (((H)->FrontEndHeapType == HEAP_FRONT_LOOKASIDE) ? (H)->FrontEndHeap : NULL)
  249. #define RtlpGetLowFragHeap(H) \
  250. (((H)->FrontEndHeapType == HEAP_FRONT_LOWFRAGHEAP) ? (H)->FrontEndHeap : NULL)
  251. #define RtlpIsFrontHeapUnlocked(H) \
  252. ((H)->FrontHeapLockCount == 0)
  253. #define RtlpLockFrontHeap(H) \
  254. { \
  255. (H)->FrontHeapLockCount += 1; \
  256. }
  257. #define RtlpUnlockFrontHeap(H) \
  258. { \
  259. (H)->FrontHeapLockCount -= 1; \
  260. }
  261. #define HEAP_INDEX_THRESHOLD 32
  262. //
  263. // Heap performance counter support
  264. //
  265. #define HEAP_OP_COUNT 2
  266. #define HEAP_OP_ALLOC 0
  267. #define HEAP_OP_FREE 1
  268. //
  269. // The time / per operation is measured ones at 16 operations
  270. //
  271. #define HEAP_SAMPLING_MASK 0x000001FF
  272. #define HEAP_SAMPLING_COUNT 100
  273. typedef struct _HEAP_PERF_DATA {
  274. UINT64 CountFrequence;
  275. UINT64 OperationTime[HEAP_OP_COUNT];
  276. //
  277. // The data bellow are only for sampling
  278. //
  279. ULONG Sequence;
  280. UINT64 TempTime[HEAP_OP_COUNT];
  281. ULONG TempCount[HEAP_OP_COUNT];
  282. } HEAP_PERF_DATA, *PHEAP_PERF_DATA;
  283. #define HEAP_PERF_DECLARE_TIMER() \
  284. UINT64 _HeapPerfStartTimer, _HeapPerfEndTimer;
  285. #define HEAP_PERF_START_TIMER(H) \
  286. { \
  287. PHEAP_INDEX HeapIndex = (PHEAP_INDEX)(H)->LargeBlocksIndex; \
  288. if ( (HeapIndex != NULL) && \
  289. (!((HeapIndex->PerfData.Sequence++) & HEAP_SAMPLING_MASK)) ) { \
  290. \
  291. NtQueryPerformanceCounter( (PLARGE_INTEGER)&_HeapPerfStartTimer , NULL); \
  292. } else { \
  293. _HeapPerfStartTimer = 0; \
  294. } \
  295. }
  296. #define HEAP_PERF_STOP_TIMER(H,OP) \
  297. { \
  298. if (_HeapPerfStartTimer) { \
  299. PHEAP_INDEX HeapIndex = (PHEAP_INDEX)(H)->LargeBlocksIndex; \
  300. \
  301. NtQueryPerformanceCounter( (PLARGE_INTEGER)&_HeapPerfEndTimer , NULL); \
  302. HeapIndex->PerfData.TempTime[OP] += (_HeapPerfEndTimer - _HeapPerfStartTimer); \
  303. \
  304. if ((HeapIndex->PerfData.TempCount[OP]++) >= HEAP_SAMPLING_COUNT) { \
  305. HeapIndex->PerfData.OperationTime[OP] = HeapIndex->PerfData.TempTime[OP] / (HeapIndex->PerfData.TempCount[OP] - 1); \
  306. \
  307. HeapIndex->PerfData.TempCount[OP] = 0; \
  308. HeapIndex->PerfData.TempTime[OP] = 0; \
  309. } \
  310. } \
  311. }
  312. #define RtlpRegisterOperation(H,S,Op) \
  313. { \
  314. PHEAP_LOOKASIDE Lookaside; \
  315. \
  316. if ( (Lookaside = (PHEAP_LOOKASIDE)RtlpGetLookasideHeap(H)) ) { \
  317. \
  318. SIZE_T Index = (S) >> 10; \
  319. \
  320. if (Index >= HEAP_MAXIMUM_FREELISTS) { \
  321. \
  322. Index = HEAP_MAXIMUM_FREELISTS - 1; \
  323. } \
  324. \
  325. Lookaside[Index].Counters[(Op)] += 1; \
  326. } \
  327. }
  328. //
  329. // The heap index structure
  330. //
  331. typedef struct _HEAP_INDEX {
  332. ULONG ArraySize;
  333. ULONG VirtualMemorySize;
  334. //
  335. // The timing counters are available only on heaps
  336. // with an index created
  337. //
  338. HEAP_PERF_DATA PerfData;
  339. LONG LargeBlocksCacheDepth;
  340. LONG LargeBlocksCacheMaxDepth;
  341. LONG LargeBlocksCacheMinDepth;
  342. LONG LargeBlocksCacheSequence;
  343. struct {
  344. ULONG Committs;
  345. ULONG Decommitts;
  346. LONG LargestDepth;
  347. LONG LargestRequiredDepth;
  348. } CacheStats;
  349. union {
  350. PULONG FreeListsInUseUlong;
  351. PUCHAR FreeListsInUseBytes;
  352. } u;
  353. PHEAP_FREE_ENTRY * FreeListHints;
  354. } HEAP_INDEX, *PHEAP_INDEX;
  355. //
  356. // Macro for setting a bit in the freelist vector to indicate entries are
  357. // present.
  358. //
  359. #define SET_INDEX_BIT( HeapIndex, AllocIndex ) \
  360. { \
  361. ULONG _Index_; \
  362. ULONG _Bit_; \
  363. \
  364. _Index_ = (AllocIndex) >> 3; \
  365. _Bit_ = (1 << ((AllocIndex) & 7)); \
  366. \
  367. (HeapIndex)->u.FreeListsInUseBytes[ _Index_ ] |= _Bit_; \
  368. }
  369. //
  370. // Macro for clearing a bit in the freelist vector to indicate entries are
  371. // not present.
  372. //
  373. #define CLEAR_INDEX_BIT( HeapIndex, AllocIndex ) \
  374. { \
  375. ULONG _Index_; \
  376. ULONG _Bit_; \
  377. \
  378. _Index_ = (AllocIndex) >> 3; \
  379. _Bit_ = (1 << ((AllocIndex) & 7)); \
  380. \
  381. (HeapIndex)->u.FreeListsInUseBytes[ _Index_ ] ^= _Bit_; \
  382. }
  383. VOID
  384. RtlpInitializeListIndex (
  385. IN PHEAP Heap
  386. );
  387. PLIST_ENTRY
  388. RtlpFindEntry (
  389. IN PHEAP Heap,
  390. IN ULONG Size
  391. );
  392. VOID
  393. RtlpUpdateIndexRemoveBlock (
  394. IN PHEAP Heap,
  395. IN PHEAP_FREE_ENTRY FreeEntry
  396. );
  397. VOID
  398. RtlpUpdateIndexInsertBlock (
  399. IN PHEAP Heap,
  400. IN PHEAP_FREE_ENTRY FreeEntry
  401. );
  402. VOID
  403. RtlpFlushCacheContents (
  404. IN PHEAP Heap
  405. );
  406. extern LONG RtlpSequenceNumberTest;
  407. #define RtlpCheckLargeCache(H) \
  408. { \
  409. PHEAP_INDEX HeapIndex = (PHEAP_INDEX)(H)->LargeBlocksIndex; \
  410. if ((HeapIndex != NULL) && \
  411. (HeapIndex->LargeBlocksCacheSequence >= RtlpSequenceNumberTest)) { \
  412. \
  413. RtlpFlushCacheContents(Heap); \
  414. } \
  415. }
  416. VOID
  417. RtlpFlushLargestCacheBlock (
  418. IN PHEAP Heap
  419. );
  420. #ifdef HEAP_VALIDATE_INDEX
  421. //
  422. // The validation code for index
  423. //
  424. BOOLEAN
  425. RtlpValidateNonDedicatedList (
  426. IN PHEAP Heap
  427. );
  428. #else // HEAP_VALIDATE_INDEX
  429. #define RtlpValidateNonDedicatedList(H)
  430. #endif // HEAP_VALIDATE_INDEX
  431. #else // NTOS_KERNEL_RUNTIME
  432. #define HEAP_PERF_DECLARE_TIMER()
  433. #define HEAP_PERF_START_TIMER(H)
  434. #define HEAP_PERF_STOP_TIMER(H,Op)
  435. #define RtlpRegisterOperation(H,S,Op)
  436. #define RtlpInitializeListIndex(H)
  437. #define RtlpFindEntry(H,S) (NULL)
  438. #define RtlpUpdateIndexRemoveBlock(H,F)
  439. #define RtlpUpdateIndexInsertBlock(H,F)
  440. #define RtlpCheckLargeCache(H)
  441. #define RtlpValidateNonDedicatedList(H)
  442. #endif // NTOS_KERNEL_RUNTIME
  443. //
  444. // An extra bitmap manipulation routine
  445. //
  446. #define RtlFindFirstSetRightMember(Set) \
  447. (((Set) & 0xFFFF) ? \
  448. (((Set) & 0xFF) ? \
  449. RtlpBitsClearLow[(Set) & 0xFF] : \
  450. RtlpBitsClearLow[((Set) >> 8) & 0xFF] + 8) : \
  451. ((((Set) >> 16) & 0xFF) ? \
  452. RtlpBitsClearLow[ ((Set) >> 16) & 0xFF] + 16 : \
  453. RtlpBitsClearLow[ (Set) >> 24] + 24) \
  454. )
  455. //
  456. // Macro for setting a bit in the freelist vector to indicate entries are
  457. // present.
  458. //
  459. #define SET_FREELIST_BIT( H, FB ) \
  460. { \
  461. ULONG _Index_; \
  462. ULONG _Bit_; \
  463. \
  464. HEAPASSERT((FB)->Size < HEAP_MAXIMUM_FREELISTS); \
  465. \
  466. _Index_ = (FB)->Size >> 3; \
  467. _Bit_ = (1 << ((FB)->Size & 7)); \
  468. \
  469. HEAPASSERT(((H)->u.FreeListsInUseBytes[ _Index_ ] & _Bit_) == 0); \
  470. \
  471. (H)->u.FreeListsInUseBytes[ _Index_ ] |= _Bit_; \
  472. }
  473. //
  474. // Macro for clearing a bit in the freelist vector to indicate entries are
  475. // not present.
  476. //
  477. #define CLEAR_FREELIST_BIT( H, FB ) \
  478. { \
  479. ULONG _Index_; \
  480. ULONG _Bit_; \
  481. \
  482. HEAPASSERT((FB)->Size < HEAP_MAXIMUM_FREELISTS); \
  483. \
  484. _Index_ = (FB)->Size >> 3; \
  485. _Bit_ = (1 << ((FB)->Size & 7)); \
  486. \
  487. HEAPASSERT((H)->u.FreeListsInUseBytes[ _Index_ ] & _Bit_); \
  488. HEAPASSERT(IsListEmpty(&(H)->FreeLists[ (FB)->Size ])); \
  489. \
  490. (H)->u.FreeListsInUseBytes[ _Index_ ] ^= _Bit_; \
  491. }
  492. //
  493. // This macro inserts a free block into the appropriate free list including
  494. // the [0] index list with entry filling if necessary
  495. //
  496. #define RtlpInsertFreeBlockDirect( H, FB, SIZE ) \
  497. { \
  498. PLIST_ENTRY _HEAD, _NEXT; \
  499. PHEAP_FREE_ENTRY _FB1; \
  500. \
  501. HEAPASSERT((FB)->Size == (SIZE)); \
  502. (FB)->Flags &= ~(HEAP_ENTRY_FILL_PATTERN | \
  503. HEAP_ENTRY_EXTRA_PRESENT | \
  504. HEAP_ENTRY_BUSY); \
  505. \
  506. if ((H)->Flags & HEAP_FREE_CHECKING_ENABLED) { \
  507. \
  508. RtlFillMemoryUlong( (PCHAR)((FB) + 1), \
  509. ((SIZE) << HEAP_GRANULARITY_SHIFT) - \
  510. sizeof( *(FB) ), \
  511. FREE_HEAP_FILL ); \
  512. \
  513. (FB)->Flags |= HEAP_ENTRY_FILL_PATTERN; \
  514. } \
  515. \
  516. if ((SIZE) < HEAP_MAXIMUM_FREELISTS) { \
  517. \
  518. _HEAD = &(H)->FreeLists[ (SIZE) ]; \
  519. \
  520. if (IsListEmpty(_HEAD)) { \
  521. \
  522. SET_FREELIST_BIT( H, FB ); \
  523. } \
  524. \
  525. } else { \
  526. \
  527. _HEAD = &(H)->FreeLists[ 0 ]; \
  528. _NEXT = (H)->LargeBlocksIndex ? \
  529. RtlpFindEntry(H, SIZE) : \
  530. _HEAD->Flink; \
  531. \
  532. while (_HEAD != _NEXT) { \
  533. \
  534. _FB1 = CONTAINING_RECORD( _NEXT, HEAP_FREE_ENTRY, FreeList ); \
  535. \
  536. if ((SIZE) <= _FB1->Size) { \
  537. \
  538. break; \
  539. \
  540. } else { \
  541. \
  542. _NEXT = _NEXT->Flink; \
  543. } \
  544. } \
  545. \
  546. _HEAD = _NEXT; \
  547. } \
  548. \
  549. InsertTailList( _HEAD, &(FB)->FreeList ); \
  550. RtlpUpdateIndexInsertBlock(H, FB); \
  551. RtlpValidateNonDedicatedList(H); \
  552. }
  553. //
  554. // This version of RtlpInsertFreeBlockDirect does no filling.
  555. //
  556. #define RtlpFastInsertFreeBlockDirect( H, FB, SIZE ) \
  557. { \
  558. if ((SIZE) < HEAP_MAXIMUM_FREELISTS) { \
  559. \
  560. RtlpFastInsertDedicatedFreeBlockDirect( H, FB, SIZE ); \
  561. \
  562. } else { \
  563. \
  564. RtlpFastInsertNonDedicatedFreeBlockDirect( H, FB, SIZE ); \
  565. } \
  566. }
  567. //
  568. // This version of RtlpInsertFreeBlockDirect only works for dedicated free
  569. // lists and doesn't do any filling.
  570. //
  571. #define RtlpFastInsertDedicatedFreeBlockDirect( H, FB, SIZE ) \
  572. { \
  573. PLIST_ENTRY _HEAD; \
  574. \
  575. HEAPASSERT((FB)->Size == (SIZE)); \
  576. \
  577. if (!((FB)->Flags & HEAP_ENTRY_LAST_ENTRY)) { \
  578. \
  579. HEAPASSERT(((PHEAP_ENTRY)(FB) + (SIZE))->PreviousSize == (SIZE)); \
  580. } \
  581. \
  582. (FB)->Flags &= HEAP_ENTRY_LAST_ENTRY; \
  583. \
  584. _HEAD = &(H)->FreeLists[ (SIZE) ]; \
  585. \
  586. if (IsListEmpty(_HEAD)) { \
  587. \
  588. SET_FREELIST_BIT( H, FB ); \
  589. } \
  590. \
  591. InsertTailList( _HEAD, &(FB)->FreeList ); \
  592. }
  593. //
  594. // This version of RtlpInsertFreeBlockDirect only works for nondedicated free
  595. // lists and doesn't do any filling.
  596. //
  597. #define RtlpFastInsertNonDedicatedFreeBlockDirect( H, FB, SIZE ) \
  598. { \
  599. PLIST_ENTRY _HEAD, _NEXT; \
  600. PHEAP_FREE_ENTRY _FB1; \
  601. \
  602. HEAPASSERT((FB)->Size == (SIZE)); \
  603. \
  604. if (!((FB)->Flags & HEAP_ENTRY_LAST_ENTRY)) { \
  605. \
  606. HEAPASSERT(((PHEAP_ENTRY)(FB) + (SIZE))->PreviousSize == (SIZE)); \
  607. } \
  608. \
  609. (FB)->Flags &= (HEAP_ENTRY_LAST_ENTRY); \
  610. \
  611. _HEAD = &(H)->FreeLists[ 0 ]; \
  612. _NEXT = (H)->LargeBlocksIndex ? \
  613. RtlpFindEntry(H, SIZE) : \
  614. _HEAD->Flink; \
  615. \
  616. while (_HEAD != _NEXT) { \
  617. \
  618. _FB1 = CONTAINING_RECORD( _NEXT, HEAP_FREE_ENTRY, FreeList ); \
  619. \
  620. if ((SIZE) <= _FB1->Size) { \
  621. \
  622. break; \
  623. \
  624. } else { \
  625. \
  626. _NEXT = _NEXT->Flink; \
  627. } \
  628. } \
  629. \
  630. InsertTailList( _NEXT, &(FB)->FreeList ); \
  631. RtlpUpdateIndexInsertBlock(H, FB); \
  632. RtlpValidateNonDedicatedList(H); \
  633. }
  634. //
  635. // This macro removes a block from its free list with fill checking if
  636. // necessary
  637. //
  638. #define RtlpRemoveFreeBlock( H, FB ) \
  639. { \
  640. RtlpFastRemoveFreeBlock( H, FB ) \
  641. \
  642. if ((FB)->Flags & HEAP_ENTRY_FILL_PATTERN) { \
  643. \
  644. SIZE_T cb, cbEqual; \
  645. PVOID p; \
  646. \
  647. cb = ((FB)->Size << HEAP_GRANULARITY_SHIFT) - sizeof( *(FB) ); \
  648. \
  649. if ((FB)->Flags & HEAP_ENTRY_EXTRA_PRESENT && \
  650. cb > sizeof( HEAP_FREE_ENTRY_EXTRA )) { \
  651. \
  652. cb -= sizeof( HEAP_FREE_ENTRY_EXTRA ); \
  653. } \
  654. \
  655. cbEqual = RtlCompareMemoryUlong( (PCHAR)((FB) + 1), \
  656. cb, \
  657. FREE_HEAP_FILL ); \
  658. \
  659. if (cbEqual != cb) { \
  660. \
  661. HeapDebugPrint(( \
  662. "HEAP: Free Heap block %lx modified at %lx after it was freed\n", \
  663. (FB), \
  664. (PCHAR)((FB) + 1) + cbEqual )); \
  665. \
  666. HeapDebugBreak((FB)); \
  667. } \
  668. } \
  669. }
  670. //
  671. // This version of RtlpRemoveFreeBlock does no fill checking
  672. //
  673. #define RtlpFastRemoveFreeBlock( H, FB ) \
  674. { \
  675. PLIST_ENTRY _EX_Blink; \
  676. PLIST_ENTRY _EX_Flink; \
  677. RtlpUpdateIndexRemoveBlock(H, FB); \
  678. \
  679. _EX_Flink = (FB)->FreeList.Flink; \
  680. _EX_Blink = (FB)->FreeList.Blink; \
  681. \
  682. _EX_Blink->Flink = _EX_Flink; \
  683. _EX_Flink->Blink = _EX_Blink; \
  684. \
  685. if ((_EX_Flink == _EX_Blink) && \
  686. ((FB)->Size < HEAP_MAXIMUM_FREELISTS)) { \
  687. \
  688. CLEAR_FREELIST_BIT( H, FB ); \
  689. } \
  690. RtlpValidateNonDedicatedList(H); \
  691. }
  692. //
  693. // This version of RtlpRemoveFreeBlock only works for dedicated free lists
  694. // (where we know that (FB)->Mask != 0) and doesn't do any fill checking
  695. //
  696. #define RtlpFastRemoveDedicatedFreeBlock( H, FB ) \
  697. { \
  698. PLIST_ENTRY _EX_Blink; \
  699. PLIST_ENTRY _EX_Flink; \
  700. \
  701. _EX_Flink = (FB)->FreeList.Flink; \
  702. _EX_Blink = (FB)->FreeList.Blink; \
  703. \
  704. _EX_Blink->Flink = _EX_Flink; \
  705. _EX_Flink->Blink = _EX_Blink; \
  706. \
  707. if (_EX_Flink == _EX_Blink) { \
  708. \
  709. CLEAR_FREELIST_BIT( H, FB ); \
  710. } \
  711. }
  712. //
  713. // This version of RtlpRemoveFreeBlock only works for dedicated free lists
  714. // (where we know that (FB)->Mask == 0) and doesn't do any fill checking
  715. //
  716. #define RtlpFastRemoveNonDedicatedFreeBlock( H, FB ) \
  717. { \
  718. RtlpUpdateIndexRemoveBlock(H, FB); \
  719. RemoveEntryList(&(FB)->FreeList); \
  720. RtlpValidateNonDedicatedList(H); \
  721. }
  722. //
  723. // Heap tagging routines implemented in heapdll.c
  724. //
  725. #if DBG
  726. #define IS_HEAP_TAGGING_ENABLED() (TRUE)
  727. #else
  728. #define IS_HEAP_TAGGING_ENABLED() (RtlGetNtGlobalFlags() & FLG_HEAP_ENABLE_TAGGING)
  729. #endif // DBG
  730. //
  731. // ORDER IS IMPORTANT HERE...SEE RtlpUpdateTagEntry sources
  732. //
  733. typedef enum _HEAP_TAG_ACTION {
  734. AllocationAction,
  735. VirtualAllocationAction,
  736. FreeAction,
  737. VirtualFreeAction,
  738. ReAllocationAction,
  739. VirtualReAllocationAction
  740. } HEAP_TAG_ACTION;
  741. PWSTR
  742. RtlpGetTagName (
  743. PHEAP Heap,
  744. USHORT TagIndex
  745. );
  746. USHORT
  747. RtlpUpdateTagEntry (
  748. PHEAP Heap,
  749. USHORT TagIndex,
  750. SIZE_T OldSize, // Only valid for ReAllocation and Free actions
  751. SIZE_T NewSize, // Only valid for ReAllocation and Allocation actions
  752. HEAP_TAG_ACTION Action
  753. );
  754. VOID
  755. RtlpResetTags (
  756. PHEAP Heap
  757. );
  758. VOID
  759. RtlpDestroyTags (
  760. PHEAP Heap
  761. );
  762. //
  763. // Define heap lookaside list allocation functions.
  764. //
  765. typedef struct _HEAP_LOOKASIDE {
  766. SLIST_HEADER ListHead;
  767. USHORT Depth;
  768. USHORT MaximumDepth;
  769. ULONG TotalAllocates;
  770. ULONG AllocateMisses;
  771. ULONG TotalFrees;
  772. ULONG FreeMisses;
  773. ULONG LastTotalAllocates;
  774. ULONG LastAllocateMisses;
  775. ULONG Counters[2];
  776. } HEAP_LOOKASIDE, *PHEAP_LOOKASIDE;
  777. NTKERNELAPI
  778. VOID
  779. RtlpInitializeHeapLookaside (
  780. IN PHEAP_LOOKASIDE Lookaside,
  781. IN USHORT Depth
  782. );
  783. NTKERNELAPI
  784. VOID
  785. RtlpDeleteHeapLookaside (
  786. IN PHEAP_LOOKASIDE Lookaside
  787. );
  788. VOID
  789. RtlpAdjustHeapLookasideDepth (
  790. IN PHEAP_LOOKASIDE Lookaside
  791. );
  792. NTKERNELAPI
  793. PVOID
  794. RtlpAllocateFromHeapLookaside (
  795. IN PHEAP_LOOKASIDE Lookaside
  796. );
  797. NTKERNELAPI
  798. BOOLEAN
  799. RtlpFreeToHeapLookaside (
  800. IN PHEAP_LOOKASIDE Lookaside,
  801. IN PVOID Entry
  802. );
  803. #ifndef NTOS_KERNEL_RUNTIME
  804. //
  805. // Low Fragmentation Heap data structures and internal APIs
  806. //
  807. //
  808. // The memory barrier exists on IA64 only
  809. //
  810. #if defined(_IA64_)
  811. #define RtlMemoryBarrier() __mf ()
  812. #else // #if defined(_IA64_)
  813. //
  814. // On x86 and AMD64 ignore the memory barrier
  815. //
  816. #define RtlMemoryBarrier()
  817. #endif // #if defined(_IA64_)
  818. extern ULONG RtlpDisableHeapLookaside;
  819. #define HEAP_ENABLE_LOW_FRAG_HEAP 8
  820. typedef struct _BLOCK_ENTRY {
  821. HEAP_ENTRY;
  822. USHORT LinkOffset;
  823. USHORT Reserved2;
  824. } BLOCK_ENTRY, *PBLOCK_ENTRY;
  825. typedef struct _INTERLOCK_SEQ {
  826. union {
  827. struct {
  828. union {
  829. struct {
  830. USHORT Depth;
  831. USHORT FreeEntryOffset;
  832. };
  833. volatile ULONG OffsetAndDepth;
  834. };
  835. volatile ULONG Sequence;
  836. };
  837. volatile LONGLONG Exchg;
  838. };
  839. } INTERLOCK_SEQ, *PINTERLOCK_SEQ;
  840. struct _HEAP_USERDATA_HEADER;
  841. typedef struct _HEAP_SUBSEGMENT {
  842. PVOID Bucket;
  843. volatile struct _HEAP_USERDATA_HEADER * UserBlocks;
  844. INTERLOCK_SEQ AggregateExchg;
  845. union {
  846. struct {
  847. USHORT BlockSize;
  848. USHORT FreeThreshold;
  849. USHORT BlockCount;
  850. UCHAR SizeIndex;
  851. UCHAR AffinityIndex;
  852. };
  853. ULONG Alignment[2];
  854. };
  855. SINGLE_LIST_ENTRY SFreeListEntry;
  856. volatile ULONG Lock;
  857. } HEAP_SUBSEGMENT, *PHEAP_SUBSEGMENT;
  858. typedef struct _HEAP_USERDATA_HEADER {
  859. union {
  860. SINGLE_LIST_ENTRY SFreeListEntry;
  861. PHEAP_SUBSEGMENT SubSegment;
  862. };
  863. PVOID HeapHandle;
  864. ULONG_PTR SizeIndex;
  865. ULONG_PTR Signature;
  866. } HEAP_USERDATA_HEADER, *PHEAP_USERDATA_HEADER;
  867. #define HEAP_LFH_INDEX 0xFF
  868. #define HEAP_LFH_IN_CONVERSION 0xFE
  869. #define HEAP_NO_CACHE_BLOCK 0x800000
  870. #define HEAP_LARGEST_LFH_BLOCK 0x4000
  871. #define HEAP_LFH_USER_SIGNATURE 0xF0E0D0C0
  872. #ifdef DISABLE_REGISTRY_TEST_HOOKS
  873. #define RtlpIsLowFragHeapEnabled() FALSE
  874. #else //DISABLE_REGISTRY_TEST_HOOKS
  875. #define RtlpIsLowFragHeapEnabled() \
  876. ((RtlpDisableHeapLookaside & HEAP_ENABLE_LOW_FRAG_HEAP) != 0)
  877. #endif //DISABLE_REGISTRY_TEST_HOOKS
  878. ULONG
  879. FORCEINLINE
  880. RtlpGetAllocationUnits(
  881. PHEAP Heap,
  882. PHEAP_ENTRY Block
  883. )
  884. {
  885. PHEAP_SUBSEGMENT SubSegment = (PHEAP_SUBSEGMENT)Block->SubSegment;
  886. RtlMemoryBarrier();
  887. if (Block->SegmentIndex == HEAP_LFH_INDEX) {
  888. ULONG ReturnSize = *((volatile USHORT *)&SubSegment->BlockSize);
  889. //
  890. // ISSUE: Workaround the x86 compiler bug which eliminates the second test
  891. // if (Block->SegmentIndex == HEAP_LFH_INDEX)
  892. //
  893. #if !defined(_WIN64)
  894. _asm {
  895. nop
  896. }
  897. #endif
  898. RtlMemoryBarrier();
  899. if (Block->SegmentIndex == HEAP_LFH_INDEX) {
  900. return ReturnSize;
  901. }
  902. }
  903. if (Block->SegmentIndex == HEAP_LFH_IN_CONVERSION) {
  904. //
  905. // This should be a very rare case when this query is
  906. // done in the small window when the conversion code sets the
  907. // Size & PrevSize fields.
  908. //
  909. RtlLockHeap(Heap);
  910. RtlUnlockHeap(Heap);
  911. //
  912. // This makes sure the conversion completed, so we can grab the
  913. // block size like for a regular block
  914. //
  915. }
  916. return Block->Size;
  917. }
  918. VOID
  919. FORCEINLINE
  920. RtlpSetUnusedBytes(PHEAP Heap, PHEAP_ENTRY Block, SIZE_T UnusedBytes)
  921. {
  922. if (UnusedBytes < 0xff) {
  923. Block->UnusedBytes = (UCHAR)(UnusedBytes);
  924. } else {
  925. PSIZE_T UnusedBytesULong = (PSIZE_T)(Block + RtlpGetAllocationUnits(Heap, Block));
  926. UnusedBytesULong -= 1;
  927. Block->UnusedBytes = 0xff;
  928. *UnusedBytesULong = UnusedBytes;
  929. }
  930. }
  931. SIZE_T
  932. FORCEINLINE
  933. RtlpGetUnusedBytes(PHEAP Heap, PHEAP_ENTRY Block)
  934. {
  935. if (Block->UnusedBytes < 0xff) {
  936. return Block->UnusedBytes;
  937. } else {
  938. PSIZE_T UnusedBytesULong = (PSIZE_T)(Block + RtlpGetAllocationUnits(Heap, Block));
  939. UnusedBytesULong -= 1;
  940. return (*UnusedBytesULong);
  941. }
  942. }
  943. VOID
  944. RtlpInitializeLowFragHeapManager();
  945. HANDLE
  946. FASTCALL
  947. RtlpCreateLowFragHeap(
  948. HANDLE Heap
  949. );
  950. VOID
  951. FASTCALL
  952. RtlpDestroyLowFragHeap(
  953. HANDLE LowFragHeapHandle
  954. );
  955. PVOID
  956. FASTCALL
  957. RtlpLowFragHeapAlloc(
  958. HANDLE LowFragHeapHandle,
  959. SIZE_T BlockSize
  960. );
  961. BOOLEAN
  962. FASTCALL
  963. RtlpLowFragHeapFree(
  964. HANDLE LowFragHeapHandle,
  965. PVOID p
  966. );
  967. NTSTATUS
  968. RtlpActivateLowFragmentationHeap(
  969. IN PVOID HeapHandle
  970. );
  971. #else // NTOS_KERNEL_RUNTIME
  972. //
  973. // The kernel mode heap does not ajdust the heap granularity
  974. // therefore the unused bytes always fit the UCHAR.
  975. // No need to check for overflow here
  976. //
  977. ULONG
  978. FORCEINLINE
  979. RtlpGetAllocationUnits(
  980. PHEAP Heap,
  981. PHEAP_ENTRY Block
  982. )
  983. {
  984. return Block->Size;
  985. }
  986. VOID
  987. FORCEINLINE
  988. RtlpSetUnusedBytes(PHEAP Heap, PHEAP_ENTRY Block, SIZE_T UnusedBytes)
  989. {
  990. Block->UnusedBytes = (UCHAR)(UnusedBytes);
  991. }
  992. SIZE_T
  993. FORCEINLINE
  994. RtlpGetUnusedBytes(PHEAP Heap, PHEAP_ENTRY Block)
  995. {
  996. return Block->UnusedBytes;
  997. }
  998. #endif // NTOS_KERNEL_RUNTIME
  999. #endif // _RTL_HEAP_PRIVATE_