Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1458 lines
45 KiB

  1. /*++
  2. Copyright (c) 1992 Microsoft Corporation
  3. Module Name:
  4. heappriv.h
  5. Abstract:
  6. Private include file used by heap allocator (heap.c, heapdll.c and
  7. heapdbg.c)
  8. Author:
  9. Steve Wood (stevewo) 25-Oct-1994
  10. Revision History:
  11. --*/
  12. #ifndef _RTL_HEAP_PRIVATE_
  13. #define _RTL_HEAP_PRIVATE_
  14. #include "heappage.h"
  15. //
  16. // In private builds (PRERELEASE = 1) we allow using the new low fragmentation heap
  17. // for processes that set the DisableLookaside registry key. The main purpose is to
  18. // allow testing the new heap API.
  19. //
  20. #ifndef PRERELEASE
  21. #define DISABLE_REGISTRY_TEST_HOOKS
  22. #endif
  23. //
  24. // Disable FPO optimization so even retail builds get somewhat reasonable
  25. // stack backtraces
  26. //
  27. #if i386
  28. // #pragma optimize("y",off)
  29. #endif
  30. #if DBG
  31. #define HEAPASSERT(exp) if (!(exp)) RtlAssert( #exp, __FILE__, __LINE__, NULL )
  32. #else
  33. #define HEAPASSERT(exp)
  34. #endif
  35. //
  36. // Define Minimum lookaside list depth.
  37. //
  38. #define MINIMUM_LOOKASIDE_DEPTH 4
  39. //
  40. // This variable contains the fill pattern used for heap tail checking
  41. //
  42. extern const UCHAR CheckHeapFillPattern[ CHECK_HEAP_TAIL_SIZE ];
  43. //
  44. // Here are the locking routines for the heap (kernel and user)
  45. //
  46. #ifdef NTOS_KERNEL_RUNTIME
  47. //
  48. // Kernel mode heap uses the kernel resource package for locking
  49. //
  50. #define RtlInitializeLockRoutine(L) ExInitializeResourceLite((PERESOURCE)(L))
  51. #define RtlAcquireLockRoutine(L) ExAcquireResourceExclusiveLite((PERESOURCE)(L),TRUE)
  52. #define RtlReleaseLockRoutine(L) ExReleaseResourceLite((PERESOURCE)(L))
  53. #define RtlDeleteLockRoutine(L) ExDeleteResourceLite((PERESOURCE)(L))
  54. #define RtlOkayToLockRoutine(L) ExOkayToLockRoutineLite((PERESOURCE)(L))
  55. #else // #ifdef NTOS_KERNEL_ROUTINE
  56. //
  57. // User mode heap uses the critical section package for locking
  58. //
  59. #ifndef PREALLOCATE_EVENT_MASK
  60. #define PREALLOCATE_EVENT_MASK 0x80000000 // Defined only in dll\resource.c
  61. #endif // PREALLOCATE_EVENT_MASK
  62. #define RtlInitializeLockRoutine(L) RtlInitializeCriticalSectionAndSpinCount((PRTL_CRITICAL_SECTION)(L),(PREALLOCATE_EVENT_MASK | 4000))
  63. #define RtlAcquireLockRoutine(L) RtlEnterCriticalSection((PRTL_CRITICAL_SECTION)(L))
  64. #define RtlReleaseLockRoutine(L) RtlLeaveCriticalSection((PRTL_CRITICAL_SECTION)(L))
  65. #define RtlDeleteLockRoutine(L) RtlDeleteCriticalSection((PRTL_CRITICAL_SECTION)(L))
  66. #define RtlOkayToLockRoutine(L) NtdllOkayToLockRoutine((PVOID)(L))
  67. #endif // #ifdef NTOS_KERNEL_RUNTIME
  68. //
  69. // Here are some debugging macros for the heap
  70. //
  71. #ifdef NTOS_KERNEL_RUNTIME
  72. #define HEAP_DEBUG_FLAGS 0
  73. #define DEBUG_HEAP(F) FALSE
  74. #define SET_LAST_STATUS(S) NOTHING;
  75. #else // #ifdef NTOS_KERNEL_ROUTINE
  76. #define HEAP_DEBUG_FLAGS (HEAP_VALIDATE_PARAMETERS_ENABLED | \
  77. HEAP_VALIDATE_ALL_ENABLED | \
  78. HEAP_CAPTURE_STACK_BACKTRACES | \
  79. HEAP_CREATE_ENABLE_TRACING | \
  80. HEAP_FLAG_PAGE_ALLOCS)
  81. #define DEBUG_HEAP(F) ((F & HEAP_DEBUG_FLAGS) && !(F & HEAP_SKIP_VALIDATION_CHECKS))
  82. #define SET_LAST_STATUS(S) {NtCurrentTeb()->LastErrorValue = RtlNtStatusToDosError( NtCurrentTeb()->LastStatusValue = (ULONG)(S) );}
  83. #endif // #ifdef NTOS_KERNEL_RUNTIME
  84. //
  85. // Here are the macros used for debug printing and breakpoints
  86. //
  87. #ifdef NTOS_KERNEL_RUNTIME
  88. #define HeapDebugPrint( _x_ ) {DbgPrint _x_;}
  89. #define HeapDebugBreak( _x_ ) {if (KdDebuggerEnabled) DbgBreakPoint();}
  90. #else // #ifdef NTOS_KERNEL_ROUTINE
  91. #define HeapDebugPrint( _x_ ) \
  92. { \
  93. PLIST_ENTRY _Module; \
  94. PLDR_DATA_TABLE_ENTRY _Entry; \
  95. \
  96. _Module = NtCurrentPeb()->Ldr->InLoadOrderModuleList.Flink; \
  97. _Entry = CONTAINING_RECORD( _Module, \
  98. LDR_DATA_TABLE_ENTRY, \
  99. InLoadOrderLinks); \
  100. DbgPrint("HEAP[%wZ]: ", &_Entry->BaseDllName); \
  101. DbgPrint _x_; \
  102. }
  103. #define HeapDebugBreak( _x_ ) \
  104. { \
  105. VOID RtlpBreakPointHeap( PVOID BadAddress ); \
  106. \
  107. RtlpBreakPointHeap( (_x_) ); \
  108. }
  109. #endif // #ifdef NTOS_KERNEL_RUNTIME
  110. //
  111. // Virtual memory hook for virtual alloc functions
  112. //
  113. #ifdef NTOS_KERNEL_RUNTIME
  114. #define RtlpHeapFreeVirtualMemory(P,A,S,F) \
  115. ZwFreeVirtualMemory(P,A,S,F)
  116. #else // NTOS_KERNEL_RUNTIME
  117. //
  118. // The user mode call needs to call the secmem virtual free
  119. // as well to update the memory counters per heap
  120. //
  121. #define RtlpHeapFreeVirtualMemory(P,A,S,F) \
  122. RtlpSecMemFreeVirtualMemory(P,A,S,F)
  123. #endif // NTOS_KERNEL_RUNTIME
  124. ULONG
  125. RtlpHeapExceptionFilter (
  126. NTSTATUS ExceptionCode
  127. );
  128. //
  129. // Implemented in heap.c
  130. //
  131. BOOLEAN
  132. RtlpInitializeHeapSegment (
  133. IN PHEAP Heap,
  134. IN PHEAP_SEGMENT Segment,
  135. IN UCHAR SegmentIndex,
  136. IN ULONG Flags,
  137. IN PVOID BaseAddress,
  138. IN PVOID UnCommittedAddress,
  139. IN PVOID CommitLimitAddress
  140. );
  141. PHEAP_FREE_ENTRY
  142. RtlpCoalesceFreeBlocks (
  143. IN PHEAP Heap,
  144. IN PHEAP_FREE_ENTRY FreeBlock,
  145. IN OUT PSIZE_T FreeSize,
  146. IN BOOLEAN RemoveFromFreeList
  147. );
  148. VOID
  149. RtlpDeCommitFreeBlock (
  150. IN PHEAP Heap,
  151. IN PHEAP_FREE_ENTRY FreeBlock,
  152. IN SIZE_T FreeSize
  153. );
  154. VOID
  155. RtlpInsertFreeBlock (
  156. IN PHEAP Heap,
  157. IN PHEAP_FREE_ENTRY FreeBlock,
  158. IN SIZE_T FreeSize
  159. );
  160. PHEAP_FREE_ENTRY
  161. RtlpFindAndCommitPages (
  162. IN PHEAP Heap,
  163. IN PHEAP_SEGMENT Segment,
  164. IN OUT PSIZE_T Size,
  165. IN PVOID AddressWanted OPTIONAL
  166. );
  167. PVOID
  168. RtlAllocateHeapSlowly (
  169. IN PVOID HeapHandle,
  170. IN ULONG Flags,
  171. IN SIZE_T Size
  172. );
  173. BOOLEAN
  174. RtlFreeHeapSlowly (
  175. IN PVOID HeapHandle,
  176. IN ULONG Flags,
  177. IN PVOID BaseAddress
  178. );
  179. SIZE_T
  180. RtlpGetSizeOfBigBlock (
  181. IN PHEAP_ENTRY BusyBlock
  182. );
  183. PHEAP_ENTRY_EXTRA
  184. RtlpGetExtraStuffPointer (
  185. PHEAP_ENTRY BusyBlock
  186. );
  187. BOOLEAN
  188. RtlpCheckBusyBlockTail (
  189. IN PHEAP_ENTRY BusyBlock
  190. );
  191. //
  192. // Implemented in heapdll.c
  193. //
  194. VOID
  195. RtlpAddHeapToProcessList (
  196. IN PHEAP Heap
  197. );
  198. VOID
  199. RtlpRemoveHeapFromProcessList (
  200. IN PHEAP Heap
  201. );
  202. PHEAP_FREE_ENTRY
  203. RtlpCoalesceHeap (
  204. IN PHEAP Heap
  205. );
  206. BOOLEAN
  207. RtlpCheckHeapSignature (
  208. IN PHEAP Heap,
  209. IN PCHAR Caller
  210. );
  211. VOID
  212. RtlDetectHeapLeaks();
  213. //
  214. // Implemented in heapdbg.c
  215. //
  216. BOOLEAN
  217. RtlpValidateHeapEntry (
  218. IN PHEAP Heap,
  219. IN PHEAP_ENTRY BusyBlock,
  220. IN PCHAR Reason
  221. );
  222. BOOLEAN
  223. RtlpValidateHeap (
  224. IN PHEAP Heap,
  225. IN BOOLEAN AlwaysValidate
  226. );
  227. VOID
  228. RtlpUpdateHeapListIndex (
  229. USHORT OldIndex,
  230. USHORT NewIndex
  231. );
  232. BOOLEAN
  233. RtlpValidateHeapHeaders(
  234. IN PHEAP Heap,
  235. IN BOOLEAN Recompute
  236. );
  237. #ifndef NTOS_KERNEL_RUNTIME
  238. //
  239. // Nondedicated free list optimization
  240. //
  241. #if DBG
  242. //
  243. // Define HEAP_VALIDATE_INDEX to activate a validation of the index
  244. // after each operation with non-dedicated list
  245. // This is only for debug-test, to make sure the list and index is consistent
  246. //
  247. //#define HEAP_VALIDATE_INDEX
  248. #endif // DBG
  249. #define HEAP_FRONT_LOOKASIDE 1
  250. #define HEAP_FRONT_LOWFRAGHEAP 2
  251. #define RtlpGetLookasideHeap(H) \
  252. (((H)->FrontEndHeapType == HEAP_FRONT_LOOKASIDE) ? (H)->FrontEndHeap : NULL)
  253. #define RtlpGetLowFragHeap(H) \
  254. (((H)->FrontEndHeapType == HEAP_FRONT_LOWFRAGHEAP) ? (H)->FrontEndHeap : NULL)
  255. #define RtlpIsFrontHeapUnlocked(H) \
  256. ((H)->FrontHeapLockCount == 0)
  257. #define RtlpLockFrontHeap(H) \
  258. { \
  259. (H)->FrontHeapLockCount += 1; \
  260. }
  261. #define RtlpUnlockFrontHeap(H) \
  262. { \
  263. (H)->FrontHeapLockCount -= 1; \
  264. }
  265. #define HEAP_INDEX_THRESHOLD 32
  266. //
  267. // Heap performance counter support
  268. //
  269. #define HEAP_OP_COUNT 2
  270. #define HEAP_OP_ALLOC 0
  271. #define HEAP_OP_FREE 1
  272. //
  273. // The time / per operation is measured ones at 16 operations
  274. //
  275. #define HEAP_SAMPLING_MASK 0x000001FF
  276. #define HEAP_SAMPLING_COUNT 100
  277. typedef struct _HEAP_PERF_DATA {
  278. UINT64 CountFrequence;
  279. UINT64 OperationTime[HEAP_OP_COUNT];
  280. //
  281. // The data bellow are only for sampling
  282. //
  283. ULONG Sequence;
  284. UINT64 TempTime[HEAP_OP_COUNT];
  285. ULONG TempCount[HEAP_OP_COUNT];
  286. } HEAP_PERF_DATA, *PHEAP_PERF_DATA;
  287. #define HEAP_PERF_DECLARE_TIMER() \
  288. UINT64 _HeapPerfStartTimer, _HeapPerfEndTimer;
  289. #define HEAP_PERF_START_TIMER(H) \
  290. { \
  291. PHEAP_INDEX HeapIndex = (PHEAP_INDEX)(H)->LargeBlocksIndex; \
  292. if ( (HeapIndex != NULL) && \
  293. (!((HeapIndex->PerfData.Sequence++) & HEAP_SAMPLING_MASK)) ) { \
  294. \
  295. NtQueryPerformanceCounter( (PLARGE_INTEGER)&_HeapPerfStartTimer , NULL); \
  296. } else { \
  297. _HeapPerfStartTimer = 0; \
  298. } \
  299. }
  300. #define HEAP_PERF_STOP_TIMER(H,OP) \
  301. { \
  302. if (_HeapPerfStartTimer) { \
  303. PHEAP_INDEX HeapIndex = (PHEAP_INDEX)(H)->LargeBlocksIndex; \
  304. \
  305. NtQueryPerformanceCounter( (PLARGE_INTEGER)&_HeapPerfEndTimer , NULL); \
  306. HeapIndex->PerfData.TempTime[OP] += (_HeapPerfEndTimer - _HeapPerfStartTimer); \
  307. \
  308. if ((HeapIndex->PerfData.TempCount[OP]++) >= HEAP_SAMPLING_COUNT) { \
  309. HeapIndex->PerfData.OperationTime[OP] = HeapIndex->PerfData.TempTime[OP] / (HeapIndex->PerfData.TempCount[OP] - 1); \
  310. \
  311. HeapIndex->PerfData.TempCount[OP] = 0; \
  312. HeapIndex->PerfData.TempTime[OP] = 0; \
  313. } \
  314. } \
  315. }
  316. #define RtlpRegisterOperation(H,S,Op) \
  317. { \
  318. PHEAP_LOOKASIDE Lookaside; \
  319. \
  320. if ( (Lookaside = (PHEAP_LOOKASIDE)RtlpGetLookasideHeap(H)) ) { \
  321. \
  322. SIZE_T Index = (S) >> 10; \
  323. \
  324. if (Index >= HEAP_MAXIMUM_FREELISTS) { \
  325. \
  326. Index = HEAP_MAXIMUM_FREELISTS - 1; \
  327. } \
  328. \
  329. Lookaside[Index].Counters[(Op)] += 1; \
  330. } \
  331. }
  332. //
  333. // The heap index structure
  334. //
  335. typedef struct _HEAP_INDEX {
  336. ULONG ArraySize;
  337. ULONG VirtualMemorySize;
  338. //
  339. // The timing counters are available only on heaps
  340. // with an index created
  341. //
  342. HEAP_PERF_DATA PerfData;
  343. LONG LargeBlocksCacheDepth;
  344. LONG LargeBlocksCacheMaxDepth;
  345. LONG LargeBlocksCacheMinDepth;
  346. LONG LargeBlocksCacheSequence;
  347. struct {
  348. ULONG Committs;
  349. ULONG Decommitts;
  350. LONG LargestDepth;
  351. LONG LargestRequiredDepth;
  352. } CacheStats;
  353. union {
  354. PULONG FreeListsInUseUlong;
  355. PUCHAR FreeListsInUseBytes;
  356. } u;
  357. PHEAP_FREE_ENTRY * FreeListHints;
  358. } HEAP_INDEX, *PHEAP_INDEX;
  359. //
  360. // Macro for setting a bit in the freelist vector to indicate entries are
  361. // present.
  362. //
  363. #define SET_INDEX_BIT( HeapIndex, AllocIndex ) \
  364. { \
  365. ULONG _Index_; \
  366. ULONG _Bit_; \
  367. \
  368. _Index_ = (AllocIndex) >> 3; \
  369. _Bit_ = (1 << ((AllocIndex) & 7)); \
  370. \
  371. (HeapIndex)->u.FreeListsInUseBytes[ _Index_ ] |= _Bit_; \
  372. }
  373. //
  374. // Macro for clearing a bit in the freelist vector to indicate entries are
  375. // not present.
  376. //
  377. #define CLEAR_INDEX_BIT( HeapIndex, AllocIndex ) \
  378. { \
  379. ULONG _Index_; \
  380. ULONG _Bit_; \
  381. \
  382. _Index_ = (AllocIndex) >> 3; \
  383. _Bit_ = (1 << ((AllocIndex) & 7)); \
  384. \
  385. (HeapIndex)->u.FreeListsInUseBytes[ _Index_ ] ^= _Bit_; \
  386. }
  387. VOID
  388. RtlpInitializeListIndex (
  389. IN PHEAP Heap
  390. );
  391. PLIST_ENTRY
  392. RtlpFindEntry (
  393. IN PHEAP Heap,
  394. IN ULONG Size
  395. );
  396. VOID
  397. RtlpUpdateIndexRemoveBlock (
  398. IN PHEAP Heap,
  399. IN PHEAP_FREE_ENTRY FreeEntry
  400. );
  401. VOID
  402. RtlpUpdateIndexInsertBlock (
  403. IN PHEAP Heap,
  404. IN PHEAP_FREE_ENTRY FreeEntry
  405. );
  406. VOID
  407. RtlpFlushCacheContents (
  408. IN PHEAP Heap
  409. );
  410. extern LONG RtlpSequenceNumberTest;
  411. #define RtlpCheckLargeCache(H) \
  412. { \
  413. PHEAP_INDEX HeapIndex = (PHEAP_INDEX)(H)->LargeBlocksIndex; \
  414. if ((HeapIndex != NULL) && \
  415. (HeapIndex->LargeBlocksCacheSequence >= RtlpSequenceNumberTest)) { \
  416. \
  417. RtlpFlushCacheContents(Heap); \
  418. } \
  419. }
  420. VOID
  421. RtlpFlushLargestCacheBlock (
  422. IN PHEAP Heap
  423. );
  424. #ifdef HEAP_VALIDATE_INDEX
  425. //
  426. // The validation code for index
  427. //
  428. BOOLEAN
  429. RtlpValidateNonDedicatedList (
  430. IN PHEAP Heap
  431. );
  432. #else // HEAP_VALIDATE_INDEX
  433. #define RtlpValidateNonDedicatedList(H)
  434. #endif // HEAP_VALIDATE_INDEX
  435. #else // NTOS_KERNEL_RUNTIME
  436. #define HEAP_PERF_DECLARE_TIMER()
  437. #define HEAP_PERF_START_TIMER(H)
  438. #define HEAP_PERF_STOP_TIMER(H,Op)
  439. #define RtlpRegisterOperation(H,S,Op)
  440. #define RtlpInitializeListIndex(H)
  441. #define RtlpFindEntry(H,S) (NULL)
  442. #define RtlpUpdateIndexRemoveBlock(H,F)
  443. #define RtlpUpdateIndexInsertBlock(H,F)
  444. #define RtlpCheckLargeCache(H)
  445. #define RtlpValidateNonDedicatedList(H)
  446. #endif // NTOS_KERNEL_RUNTIME
  447. //
  448. // An extra bitmap manipulation routine
  449. //
  450. #define RtlFindFirstSetRightMember(Set) \
  451. (((Set) & 0xFFFF) ? \
  452. (((Set) & 0xFF) ? \
  453. RtlpBitsClearLow[(Set) & 0xFF] : \
  454. RtlpBitsClearLow[((Set) >> 8) & 0xFF] + 8) : \
  455. ((((Set) >> 16) & 0xFF) ? \
  456. RtlpBitsClearLow[ ((Set) >> 16) & 0xFF] + 16 : \
  457. RtlpBitsClearLow[ (Set) >> 24] + 24) \
  458. )
  459. //
  460. // Macro for setting a bit in the freelist vector to indicate entries are
  461. // present.
  462. //
  463. #define SET_FREELIST_BIT( H, FB ) \
  464. { \
  465. ULONG _Index_; \
  466. ULONG _Bit_; \
  467. \
  468. HEAPASSERT((FB)->Size < HEAP_MAXIMUM_FREELISTS); \
  469. \
  470. _Index_ = (FB)->Size >> 3; \
  471. _Bit_ = (1 << ((FB)->Size & 7)); \
  472. \
  473. HEAPASSERT(((H)->u.FreeListsInUseBytes[ _Index_ ] & _Bit_) == 0); \
  474. \
  475. (H)->u.FreeListsInUseBytes[ _Index_ ] |= _Bit_; \
  476. }
  477. //
  478. // Macro for clearing a bit in the freelist vector to indicate entries are
  479. // not present.
  480. //
  481. #define CLEAR_FREELIST_BIT( H, FB ) \
  482. { \
  483. ULONG _Index_; \
  484. ULONG _Bit_; \
  485. \
  486. HEAPASSERT((FB)->Size < HEAP_MAXIMUM_FREELISTS); \
  487. \
  488. _Index_ = (FB)->Size >> 3; \
  489. _Bit_ = (1 << ((FB)->Size & 7)); \
  490. \
  491. HEAPASSERT((H)->u.FreeListsInUseBytes[ _Index_ ] & _Bit_); \
  492. HEAPASSERT(IsListEmpty(&(H)->FreeLists[ (FB)->Size ])); \
  493. \
  494. (H)->u.FreeListsInUseBytes[ _Index_ ] ^= _Bit_; \
  495. }
  496. //
  497. // This macro inserts a free block into the appropriate free list including
  498. // the [0] index list with entry filling if necessary
  499. //
  500. #define RtlpInsertFreeBlockDirect( H, FB, SIZE ) \
  501. { \
  502. PLIST_ENTRY _HEAD, _NEXT; \
  503. PHEAP_FREE_ENTRY _FB1; \
  504. \
  505. HEAPASSERT((FB)->Size == (SIZE)); \
  506. (FB)->Flags &= ~(HEAP_ENTRY_FILL_PATTERN | \
  507. HEAP_ENTRY_EXTRA_PRESENT | \
  508. HEAP_ENTRY_BUSY); \
  509. \
  510. if ((H)->Flags & HEAP_FREE_CHECKING_ENABLED) { \
  511. \
  512. RtlFillMemoryUlong( (PCHAR)((FB) + 1), \
  513. ((SIZE) << HEAP_GRANULARITY_SHIFT) - \
  514. sizeof( *(FB) ), \
  515. FREE_HEAP_FILL ); \
  516. \
  517. (FB)->Flags |= HEAP_ENTRY_FILL_PATTERN; \
  518. } \
  519. \
  520. if ((SIZE) < HEAP_MAXIMUM_FREELISTS) { \
  521. \
  522. _HEAD = &(H)->FreeLists[ (SIZE) ]; \
  523. \
  524. if (IsListEmpty(_HEAD)) { \
  525. \
  526. SET_FREELIST_BIT( H, FB ); \
  527. } \
  528. \
  529. } else { \
  530. \
  531. _HEAD = &(H)->FreeLists[ 0 ]; \
  532. _NEXT = (H)->LargeBlocksIndex ? \
  533. RtlpFindEntry(H, SIZE) : \
  534. _HEAD->Flink; \
  535. \
  536. while (_HEAD != _NEXT) { \
  537. \
  538. _FB1 = CONTAINING_RECORD( _NEXT, HEAP_FREE_ENTRY, FreeList ); \
  539. \
  540. if ((SIZE) <= _FB1->Size) { \
  541. \
  542. break; \
  543. \
  544. } else { \
  545. \
  546. _NEXT = _NEXT->Flink; \
  547. } \
  548. } \
  549. \
  550. _HEAD = _NEXT; \
  551. } \
  552. \
  553. InsertTailList( _HEAD, &(FB)->FreeList ); \
  554. RtlpUpdateIndexInsertBlock(H, FB); \
  555. RtlpValidateNonDedicatedList(H); \
  556. }
  557. //
  558. // This version of RtlpInsertFreeBlockDirect does no filling.
  559. //
  560. #define RtlpFastInsertFreeBlockDirect( H, FB, SIZE ) \
  561. { \
  562. if ((SIZE) < HEAP_MAXIMUM_FREELISTS) { \
  563. \
  564. RtlpFastInsertDedicatedFreeBlockDirect( H, FB, SIZE ); \
  565. \
  566. } else { \
  567. \
  568. RtlpFastInsertNonDedicatedFreeBlockDirect( H, FB, SIZE ); \
  569. } \
  570. }
  571. //
  572. // This version of RtlpInsertFreeBlockDirect only works for dedicated free
  573. // lists and doesn't do any filling.
  574. //
  575. #define RtlpFastInsertDedicatedFreeBlockDirect( H, FB, SIZE ) \
  576. { \
  577. PLIST_ENTRY _HEAD; \
  578. \
  579. HEAPASSERT((FB)->Size == (SIZE)); \
  580. \
  581. if (!((FB)->Flags & HEAP_ENTRY_LAST_ENTRY)) { \
  582. \
  583. HEAPASSERT(((PHEAP_ENTRY)(FB) + (SIZE))->PreviousSize == (SIZE)); \
  584. } \
  585. \
  586. (FB)->Flags &= HEAP_ENTRY_LAST_ENTRY; \
  587. \
  588. _HEAD = &(H)->FreeLists[ (SIZE) ]; \
  589. \
  590. if (IsListEmpty(_HEAD)) { \
  591. \
  592. SET_FREELIST_BIT( H, FB ); \
  593. } \
  594. \
  595. InsertTailList( _HEAD, &(FB)->FreeList ); \
  596. }
  597. //
  598. // This version of RtlpInsertFreeBlockDirect only works for nondedicated free
  599. // lists and doesn't do any filling.
  600. //
  601. #define RtlpFastInsertNonDedicatedFreeBlockDirect( H, FB, SIZE ) \
  602. { \
  603. PLIST_ENTRY _HEAD, _NEXT; \
  604. PHEAP_FREE_ENTRY _FB1; \
  605. \
  606. HEAPASSERT((FB)->Size == (SIZE)); \
  607. \
  608. if (!((FB)->Flags & HEAP_ENTRY_LAST_ENTRY)) { \
  609. \
  610. HEAPASSERT(((PHEAP_ENTRY)(FB) + (SIZE))->PreviousSize == (SIZE)); \
  611. } \
  612. \
  613. (FB)->Flags &= (HEAP_ENTRY_LAST_ENTRY); \
  614. \
  615. _HEAD = &(H)->FreeLists[ 0 ]; \
  616. _NEXT = (H)->LargeBlocksIndex ? \
  617. RtlpFindEntry(H, SIZE) : \
  618. _HEAD->Flink; \
  619. \
  620. while (_HEAD != _NEXT) { \
  621. \
  622. _FB1 = CONTAINING_RECORD( _NEXT, HEAP_FREE_ENTRY, FreeList ); \
  623. \
  624. if ((SIZE) <= _FB1->Size) { \
  625. \
  626. break; \
  627. \
  628. } else { \
  629. \
  630. _NEXT = _NEXT->Flink; \
  631. } \
  632. } \
  633. \
  634. InsertTailList( _NEXT, &(FB)->FreeList ); \
  635. RtlpUpdateIndexInsertBlock(H, FB); \
  636. RtlpValidateNonDedicatedList(H); \
  637. }
  638. //
  639. // This macro removes a block from its free list with fill checking if
  640. // necessary
  641. //
  642. #define RtlpRemoveFreeBlock( H, FB ) \
  643. { \
  644. RtlpFastRemoveFreeBlock( H, FB ) \
  645. \
  646. if ((FB)->Flags & HEAP_ENTRY_FILL_PATTERN) { \
  647. \
  648. SIZE_T cb, cbEqual; \
  649. PVOID p; \
  650. \
  651. cb = ((FB)->Size << HEAP_GRANULARITY_SHIFT) - sizeof( *(FB) ); \
  652. \
  653. if ((FB)->Flags & HEAP_ENTRY_EXTRA_PRESENT && \
  654. cb > sizeof( HEAP_FREE_ENTRY_EXTRA )) { \
  655. \
  656. cb -= sizeof( HEAP_FREE_ENTRY_EXTRA ); \
  657. } \
  658. \
  659. cbEqual = RtlCompareMemoryUlong( (PCHAR)((FB) + 1), \
  660. cb, \
  661. FREE_HEAP_FILL ); \
  662. \
  663. if (cbEqual != cb) { \
  664. \
  665. HeapDebugPrint(( \
  666. "HEAP: Free Heap block %lx modified at %lx after it was freed\n", \
  667. (FB), \
  668. (PCHAR)((FB) + 1) + cbEqual )); \
  669. \
  670. HeapDebugBreak((FB)); \
  671. } \
  672. } \
  673. }
  674. #ifndef NTOS_KERNEL_RUNTIME
  675. VOID
  676. RtlpHeapReportCorruption (
  677. IN PVOID Address );
  678. #else // NTOS_KERNEL_RUNTIME
  679. #define RtlpHeapReportCorruption(__x__)
  680. #endif // NTOS_KERNEL_RUNTIME
  681. //
  682. // This version of RtlpRemoveFreeBlock does no fill checking
  683. //
  684. #define RtlpFastRemoveFreeBlock( H, FB ) \
  685. { \
  686. PLIST_ENTRY _EX_Blink; \
  687. PLIST_ENTRY _EX_Flink; \
  688. \
  689. _EX_Flink = (FB)->FreeList.Flink; \
  690. _EX_Blink = (FB)->FreeList.Blink; \
  691. \
  692. if ( (_EX_Blink->Flink == _EX_Flink->Blink) && \
  693. (_EX_Blink->Flink == &(FB)->FreeList) ) { \
  694. \
  695. RtlpUpdateIndexRemoveBlock(H, FB); \
  696. \
  697. _EX_Blink->Flink = _EX_Flink; \
  698. _EX_Flink->Blink = _EX_Blink; \
  699. \
  700. if ((_EX_Flink == _EX_Blink) && \
  701. ((FB)->Size < HEAP_MAXIMUM_FREELISTS)) { \
  702. \
  703. CLEAR_FREELIST_BIT( H, FB ); \
  704. } \
  705. RtlpValidateNonDedicatedList(H); \
  706. \
  707. } else { \
  708. \
  709. RtlpHeapReportCorruption(&(FB)->FreeList); \
  710. } \
  711. }
  712. //
  713. // This version of RtlpRemoveFreeBlock only works for dedicated free lists
  714. // (where we know that (FB)->Mask != 0) and doesn't do any fill checking
  715. //
  716. #define RtlpFastRemoveDedicatedFreeBlock( H, FB ) \
  717. { \
  718. PLIST_ENTRY _EX_Blink; \
  719. PLIST_ENTRY _EX_Flink; \
  720. \
  721. _EX_Flink = (FB)->FreeList.Flink; \
  722. _EX_Blink = (FB)->FreeList.Blink; \
  723. \
  724. if ( (_EX_Blink->Flink == _EX_Flink->Blink)&& \
  725. (_EX_Blink->Flink == &(FB)->FreeList) ){ \
  726. \
  727. _EX_Blink->Flink = _EX_Flink; \
  728. _EX_Flink->Blink = _EX_Blink; \
  729. \
  730. } else { \
  731. \
  732. RtlpHeapReportCorruption(&(FB)->FreeList);\
  733. } \
  734. \
  735. if (_EX_Flink == _EX_Blink) { \
  736. \
  737. CLEAR_FREELIST_BIT( H, FB ); \
  738. } \
  739. }
  740. BOOLEAN
  741. FORCEINLINE
  742. RtlpHeapRemoveEntryList(
  743. IN PLIST_ENTRY Entry
  744. )
  745. {
  746. PLIST_ENTRY Blink;
  747. PLIST_ENTRY Flink;
  748. Flink = Entry->Flink;
  749. Blink = Entry->Blink;
  750. if ( (Blink->Flink == Flink->Blink) &&
  751. (Blink->Flink == Entry) ) {
  752. Blink->Flink = Flink;
  753. Flink->Blink = Blink;
  754. } else {
  755. RtlpHeapReportCorruption(Entry);
  756. }
  757. return (BOOLEAN)(Flink == Blink);
  758. }
  759. //
  760. // This version of RtlpRemoveFreeBlock only works for dedicated free lists
  761. // (where we know that (FB)->Mask == 0) and doesn't do any fill checking
  762. //
  763. #define RtlpFastRemoveNonDedicatedFreeBlock( H, FB ) \
  764. { \
  765. RtlpUpdateIndexRemoveBlock(H, FB); \
  766. RtlpHeapRemoveEntryList(&(FB)->FreeList); \
  767. RtlpValidateNonDedicatedList(H); \
  768. }
  769. //
  770. // Heap tagging routines implemented in heapdll.c
  771. //
  772. #if DBG
  773. #define IS_HEAP_TAGGING_ENABLED() (TRUE)
  774. #else
  775. #define IS_HEAP_TAGGING_ENABLED() (RtlGetNtGlobalFlags() & FLG_HEAP_ENABLE_TAGGING)
  776. #endif // DBG
  777. //
  778. // ORDER IS IMPORTANT HERE...SEE RtlpUpdateTagEntry sources
  779. //
  780. typedef enum _HEAP_TAG_ACTION {
  781. AllocationAction,
  782. VirtualAllocationAction,
  783. FreeAction,
  784. VirtualFreeAction,
  785. ReAllocationAction,
  786. VirtualReAllocationAction
  787. } HEAP_TAG_ACTION;
  788. PWSTR
  789. RtlpGetTagName (
  790. PHEAP Heap,
  791. USHORT TagIndex
  792. );
  793. USHORT
  794. RtlpUpdateTagEntry (
  795. PHEAP Heap,
  796. USHORT TagIndex,
  797. SIZE_T OldSize, // Only valid for ReAllocation and Free actions
  798. SIZE_T NewSize, // Only valid for ReAllocation and Allocation actions
  799. HEAP_TAG_ACTION Action
  800. );
  801. VOID
  802. RtlpResetTags (
  803. PHEAP Heap
  804. );
  805. VOID
  806. RtlpDestroyTags (
  807. PHEAP Heap
  808. );
  809. //
  810. // Define heap lookaside list allocation functions.
  811. //
  812. typedef struct _HEAP_LOOKASIDE {
  813. SLIST_HEADER ListHead;
  814. USHORT Depth;
  815. USHORT MaximumDepth;
  816. ULONG TotalAllocates;
  817. ULONG AllocateMisses;
  818. ULONG TotalFrees;
  819. ULONG FreeMisses;
  820. ULONG LastTotalAllocates;
  821. ULONG LastAllocateMisses;
  822. ULONG Counters[2];
  823. } HEAP_LOOKASIDE, *PHEAP_LOOKASIDE;
  824. NTKERNELAPI
  825. VOID
  826. RtlpInitializeHeapLookaside (
  827. IN PHEAP_LOOKASIDE Lookaside,
  828. IN USHORT Depth
  829. );
  830. NTKERNELAPI
  831. VOID
  832. RtlpDeleteHeapLookaside (
  833. IN PHEAP_LOOKASIDE Lookaside
  834. );
  835. VOID
  836. RtlpAdjustHeapLookasideDepth (
  837. IN PHEAP_LOOKASIDE Lookaside
  838. );
  839. NTKERNELAPI
  840. PVOID
  841. RtlpAllocateFromHeapLookaside (
  842. IN PHEAP_LOOKASIDE Lookaside
  843. );
  844. NTKERNELAPI
  845. BOOLEAN
  846. RtlpFreeToHeapLookaside (
  847. IN PHEAP_LOOKASIDE Lookaside,
  848. IN PVOID Entry
  849. );
  850. #ifndef NTOS_KERNEL_RUNTIME
  851. #define HEAP_LFH_INDEX ((UCHAR)0xFF)
  852. UCHAR
  853. FORCEINLINE
  854. RtlpGetSmallTagIndex(
  855. IN PHEAP Heap,
  856. IN PVOID HeapEntry )
  857. {
  858. return ((PHEAP_ENTRY)HeapEntry)->SmallTagIndex ^
  859. ((UCHAR)((ULONG_PTR)HeapEntry >> HEAP_GRANULARITY_SHIFT) ^ Heap->Entry.SmallTagIndex);
  860. }
  861. VOID
  862. FORCEINLINE
  863. RtlpSetSmallTagIndex(
  864. IN PHEAP Heap,
  865. IN PVOID HeapEntry,
  866. IN UCHAR SmallTagIndex
  867. )
  868. {
  869. ((PHEAP_ENTRY)HeapEntry)->SmallTagIndex = SmallTagIndex ^
  870. ((UCHAR)((ULONG_PTR)HeapEntry >> HEAP_GRANULARITY_SHIFT) ^ Heap->Entry.SmallTagIndex);
  871. }
  872. LOGICAL
  873. FORCEINLINE
  874. RtlpQuickValidateBlock(
  875. IN PHEAP Heap,
  876. IN PVOID HeapEntry )
  877. {
  878. UCHAR SegmentIndex = ((PHEAP_ENTRY)HeapEntry)->SegmentIndex;
  879. if ( SegmentIndex < HEAP_LFH_INDEX ) {
  880. #if DBG
  881. // The following test is usefull to detect cross heap free and
  882. // segment index corruption. However it requires fetching the some segment fields
  883. // and the perf can degrade with the heap size
  884. if ( (SegmentIndex > HEAP_MAXIMUM_SEGMENTS)
  885. ||
  886. (Heap->Segments[SegmentIndex] == NULL)
  887. ||
  888. (HeapEntry < (PVOID)Heap->Segments[SegmentIndex])
  889. ||
  890. (HeapEntry >= (PVOID)Heap->Segments[SegmentIndex]->LastValidEntry)) {
  891. RtlpHeapReportCorruption(HeapEntry);
  892. return FALSE;
  893. }
  894. #endif // DBG
  895. if (!IS_HEAP_TAGGING_ENABLED()) {
  896. if (RtlpGetSmallTagIndex(Heap, HeapEntry) != 0) {
  897. RtlpHeapReportCorruption(HeapEntry);
  898. return FALSE;
  899. }
  900. }
  901. }
  902. return TRUE;
  903. }
  904. //
  905. // Low Fragmentation Heap data structures and internal APIs
  906. //
  907. //
  908. // The memory barrier exists on IA64 only
  909. //
  910. #if defined(_IA64_)
  911. #define RtlMemoryBarrier() __mf ()
  912. #else // #if defined(_IA64_)
  913. //
  914. // On x86 and AMD64 ignore the memory barrier
  915. //
  916. #define RtlMemoryBarrier()
  917. #endif // #if defined(_IA64_)
  918. extern ULONG RtlpDisableHeapLookaside;
  919. extern ULONG_PTR RtlpLFHKey;
  920. #define HEAP_ENABLE_LOW_FRAG_HEAP 8
  921. typedef struct _BLOCK_ENTRY {
  922. HEAP_ENTRY;
  923. USHORT LinkOffset;
  924. USHORT Reserved2;
  925. } BLOCK_ENTRY, *PBLOCK_ENTRY;
  926. typedef struct _INTERLOCK_SEQ {
  927. union {
  928. struct {
  929. union {
  930. struct {
  931. USHORT Depth;
  932. USHORT FreeEntryOffset;
  933. };
  934. volatile ULONG OffsetAndDepth;
  935. };
  936. volatile ULONG Sequence;
  937. };
  938. volatile LONGLONG Exchg;
  939. };
  940. } INTERLOCK_SEQ, *PINTERLOCK_SEQ;
  941. struct _HEAP_USERDATA_HEADER;
  942. typedef struct _HEAP_SUBSEGMENT {
  943. PVOID Bucket;
  944. volatile struct _HEAP_USERDATA_HEADER * UserBlocks;
  945. INTERLOCK_SEQ AggregateExchg;
  946. union {
  947. struct {
  948. USHORT BlockSize;
  949. USHORT FreeThreshold;
  950. USHORT BlockCount;
  951. UCHAR SizeIndex;
  952. UCHAR AffinityIndex;
  953. };
  954. ULONG Alignment[2];
  955. };
  956. SINGLE_LIST_ENTRY SFreeListEntry;
  957. volatile ULONG Lock;
  958. } HEAP_SUBSEGMENT, *PHEAP_SUBSEGMENT;
  959. typedef struct _HEAP_USERDATA_HEADER {
  960. union {
  961. SINGLE_LIST_ENTRY SFreeListEntry;
  962. PHEAP_SUBSEGMENT SubSegment;
  963. };
  964. PVOID HeapHandle;
  965. ULONG_PTR SizeIndex;
  966. ULONG_PTR Signature;
  967. } HEAP_USERDATA_HEADER, *PHEAP_USERDATA_HEADER;
  968. #define HEAP_NO_CACHE_BLOCK 0x800000
  969. #define HEAP_LARGEST_LFH_BLOCK 0x4000
  970. #define HEAP_LFH_USER_SIGNATURE 0xF0E0D0C0
  971. #ifdef DISABLE_REGISTRY_TEST_HOOKS
  972. #define RtlpIsLowFragHeapEnabled() FALSE
  973. #else //DISABLE_REGISTRY_TEST_HOOKS
  974. #define RtlpIsLowFragHeapEnabled() \
  975. ((RtlpDisableHeapLookaside & HEAP_ENABLE_LOW_FRAG_HEAP) != 0)
  976. #endif //DISABLE_REGISTRY_TEST_HOOKS
  977. PHEAP_SUBSEGMENT
  978. FORCEINLINE
  979. RtlpGetSubSegment(
  980. PHEAP_ENTRY Block,
  981. ULONG_PTR Key
  982. )
  983. {
  984. return (PHEAP_SUBSEGMENT)((ULONG_PTR)Block->SubSegmentCode ^
  985. (((ULONG_PTR)Block >> HEAP_GRANULARITY_SHIFT) ^ Key ^ RtlpLFHKey));
  986. }
  987. VOID
  988. FORCEINLINE
  989. RtlpSetSubSegment(
  990. PHEAP_ENTRY Block,
  991. PHEAP_SUBSEGMENT SubSegment,
  992. ULONG_PTR Key
  993. )
  994. {
  995. Block->SubSegmentCode = (PVOID) (((ULONG_PTR)SubSegment)^
  996. (((ULONG_PTR)Block >> HEAP_GRANULARITY_SHIFT) ^ Key ^ RtlpLFHKey));
  997. }
  998. ULONG
  999. FORCEINLINE
  1000. RtlpGetAllocationUnits(
  1001. PHEAP Heap,
  1002. PHEAP_ENTRY Block
  1003. )
  1004. {
  1005. PHEAP_SUBSEGMENT SubSegment = RtlpGetSubSegment(Block, (ULONG_PTR)Heap);
  1006. if (Block->SegmentIndex == HEAP_LFH_INDEX) {
  1007. ULONG ReturnSize = *((volatile USHORT *)&SubSegment->BlockSize);
  1008. return ReturnSize;
  1009. }
  1010. return Block->Size;
  1011. }
  1012. VOID
  1013. FORCEINLINE
  1014. RtlpSetUnusedBytes(PHEAP Heap, PHEAP_ENTRY Block, SIZE_T UnusedBytes)
  1015. {
  1016. if (UnusedBytes < 0xff) {
  1017. Block->UnusedBytes = (UCHAR)(UnusedBytes);
  1018. } else {
  1019. PSIZE_T UnusedBytesULong = (PSIZE_T)(Block + RtlpGetAllocationUnits(Heap, Block));
  1020. UnusedBytesULong -= 1;
  1021. Block->UnusedBytes = 0xff;
  1022. *UnusedBytesULong = UnusedBytes;
  1023. }
  1024. }
  1025. SIZE_T
  1026. FORCEINLINE
  1027. RtlpGetUnusedBytes(PHEAP Heap, PHEAP_ENTRY Block)
  1028. {
  1029. if (Block->UnusedBytes < 0xff) {
  1030. return Block->UnusedBytes;
  1031. } else {
  1032. PSIZE_T UnusedBytesULong = (PSIZE_T)(Block + RtlpGetAllocationUnits(Heap, Block));
  1033. UnusedBytesULong -= 1;
  1034. return (*UnusedBytesULong);
  1035. }
  1036. }
  1037. VOID
  1038. RtlpInitializeLowFragHeapManager();
  1039. HANDLE
  1040. FASTCALL
  1041. RtlpCreateLowFragHeap(
  1042. HANDLE Heap
  1043. );
  1044. VOID
  1045. FASTCALL
  1046. RtlpDestroyLowFragHeap(
  1047. HANDLE LowFragHeapHandle
  1048. );
  1049. PVOID
  1050. FASTCALL
  1051. RtlpLowFragHeapAlloc(
  1052. HANDLE LowFragHeapHandle,
  1053. SIZE_T BlockSize
  1054. );
  1055. BOOLEAN
  1056. FASTCALL
  1057. RtlpLowFragHeapFree(
  1058. HANDLE LowFragHeapHandle,
  1059. PVOID p
  1060. );
  1061. NTSTATUS
  1062. RtlpActivateLowFragmentationHeap(
  1063. IN PVOID HeapHandle
  1064. );
  1065. ULONG
  1066. FASTCALL
  1067. RtlpLowFragHeapMultipleAlloc(
  1068. HANDLE LowFragHeapHandle,
  1069. ULONG Flags,
  1070. SIZE_T BlockSize,
  1071. ULONG BlockCount,
  1072. PVOID * Pointers
  1073. );
  1074. ULONG
  1075. FASTCALL
  1076. RtlpLowFragHeapMultipleFree(
  1077. HANDLE LowFragHeapHandle,
  1078. ULONG Flags,
  1079. ULONG BlockCount,
  1080. PVOID * Pointers
  1081. );
  1082. #else // NTOS_KERNEL_RUNTIME
  1083. //
  1084. // The kernel mode heap does not ajdust the heap granularity
  1085. // therefore the unused bytes always fit the UCHAR.
  1086. // No need to check for overflow here
  1087. //
  1088. ULONG
  1089. FORCEINLINE
  1090. RtlpGetAllocationUnits(
  1091. PHEAP Heap,
  1092. PHEAP_ENTRY Block
  1093. )
  1094. {
  1095. return Block->Size;
  1096. }
  1097. VOID
  1098. FORCEINLINE
  1099. RtlpSetUnusedBytes(PHEAP Heap, PHEAP_ENTRY Block, SIZE_T UnusedBytes)
  1100. {
  1101. Block->UnusedBytes = (UCHAR)(UnusedBytes);
  1102. }
  1103. SIZE_T
  1104. FORCEINLINE
  1105. RtlpGetUnusedBytes(PHEAP Heap, PHEAP_ENTRY Block)
  1106. {
  1107. return Block->UnusedBytes;
  1108. }
  1109. UCHAR
  1110. FORCEINLINE
  1111. RtlpGetSmallTagIndex(
  1112. IN PHEAP Heap,
  1113. IN PVOID HeapEntry )
  1114. {
  1115. return ((PHEAP_ENTRY)HeapEntry)->SmallTagIndex;
  1116. }
  1117. VOID
  1118. FORCEINLINE
  1119. RtlpSetSmallTagIndex(
  1120. IN PHEAP Heap,
  1121. IN PVOID HeapEntry,
  1122. IN UCHAR SmallTagIndex
  1123. )
  1124. {
  1125. ((PHEAP_ENTRY)HeapEntry)->SmallTagIndex = SmallTagIndex;
  1126. }
  1127. #define RtlpQuickValidateBlock(_x_, _y_) (TRUE)
  1128. #endif // NTOS_KERNEL_RUNTIME
  1129. #endif // _RTL_HEAP_PRIVATE_