Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

8367 lines
252 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. heap.c
  5. Abstract:
  6. This module implements a heap allocator.
  7. Author:
  8. Steve Wood (stevewo) 20-Sep-1989 (Adapted from URTL\alloc.c)
  9. Revision History:
  10. --*/
  11. #include <ntos.h>
  12. #include <ntrtl.h>
  13. #include <nturtl.h>
  14. #include "ntrtlp.h"
  15. #include "heap.h"
  16. #include "heappriv.h"
  17. #include "NtdllTrc.h"
  18. #include "wmiumkm.h"
  19. #ifndef NTOS_KERNEL_RUNTIME
  20. ULONG RtlpDisableHeapLookaside = 0;
  21. LONG RtlpSequenceNumberTest = 1024;
  22. LONG RtlpLargeListDepthLimit = 128;
  23. #define HEAP_ACTIVATE_CACHE_THRESHOLD 256
  24. #define HEAP_COMPAT_DISABLE_LOOKASIDES 1
  25. #define HEAP_COMPAT_DISABLE_LARGECACHE 2
  26. #endif
  27. #define HEAP_REUSAGE_FACTOR 4
  28. #if defined(_WIN64)
  29. //
  30. // Win64 heaps require an initial commit size of at least 8192. Note that
  31. // this is NOT necessarily the size of a page.
  32. //
  33. #define MINIMUM_HEAP_COMMIT 8192
  34. #else
  35. #define MINIMUM_HEAP_COMMIT 4096
  36. #endif
  37. C_ASSERT((MINIMUM_HEAP_COMMIT % PAGE_SIZE) == 0);
  38. //
  39. // If any of these flags are set, the fast allocator punts
  40. // to the slow do-everything allocator.
  41. //
  42. #define HEAP_SLOW_FLAGS (HEAP_DEBUG_FLAGS | \
  43. HEAP_SETTABLE_USER_FLAGS | \
  44. HEAP_NEED_EXTRA_FLAGS | \
  45. HEAP_CREATE_ALIGN_16 | \
  46. HEAP_FREE_CHECKING_ENABLED | \
  47. HEAP_TAIL_CHECKING_ENABLED)
  48. #if defined(ALLOC_DATA_PRAGMA) && defined(NTOS_KERNEL_RUNTIME)
  49. #pragma const_seg("PAGECONST")
  50. #endif
  51. const UCHAR CheckHeapFillPattern[ CHECK_HEAP_TAIL_SIZE ] = {
  52. CHECK_HEAP_TAIL_FILL,
  53. CHECK_HEAP_TAIL_FILL,
  54. CHECK_HEAP_TAIL_FILL,
  55. CHECK_HEAP_TAIL_FILL,
  56. CHECK_HEAP_TAIL_FILL,
  57. CHECK_HEAP_TAIL_FILL,
  58. CHECK_HEAP_TAIL_FILL,
  59. #ifdef _WIN64
  60. CHECK_HEAP_TAIL_FILL,
  61. CHECK_HEAP_TAIL_FILL,
  62. CHECK_HEAP_TAIL_FILL,
  63. CHECK_HEAP_TAIL_FILL,
  64. CHECK_HEAP_TAIL_FILL,
  65. CHECK_HEAP_TAIL_FILL,
  66. CHECK_HEAP_TAIL_FILL,
  67. CHECK_HEAP_TAIL_FILL,
  68. #endif
  69. CHECK_HEAP_TAIL_FILL
  70. };
  71. //
  72. // These are procedure prototypes exported by heapdbg.c
  73. //
  74. #ifndef NTOS_KERNEL_RUNTIME
  75. PVOID
  76. RtlDebugCreateHeap (
  77. IN ULONG Flags,
  78. IN PVOID HeapBase OPTIONAL,
  79. IN SIZE_T ReserveSize OPTIONAL,
  80. IN SIZE_T CommitSize OPTIONAL,
  81. IN PVOID Lock OPTIONAL,
  82. IN PRTL_HEAP_PARAMETERS Parameters OPTIONAL
  83. );
  84. BOOLEAN
  85. RtlDebugDestroyHeap (
  86. IN PVOID HeapHandle
  87. );
  88. PVOID
  89. RtlDebugAllocateHeap (
  90. IN PVOID HeapHandle,
  91. IN ULONG Flags,
  92. IN SIZE_T Size
  93. );
  94. BOOLEAN
  95. RtlDebugFreeHeap (
  96. IN PVOID HeapHandle,
  97. IN ULONG Flags,
  98. IN PVOID BaseAddress
  99. );
  100. SIZE_T
  101. RtlDebugSizeHeap (
  102. IN PVOID HeapHandle,
  103. IN ULONG Flags,
  104. IN PVOID BaseAddress
  105. );
  106. NTSTATUS
  107. RtlDebugZeroHeap (
  108. IN PVOID HeapHandle,
  109. IN ULONG Flags
  110. );
  111. SIZE_T
  112. GetUCBytes (
  113. IN PHEAP Heap,
  114. IN OUT SIZE_T *ReservedSpace,
  115. IN OUT PULONG NoOfUCRs
  116. );
  117. #endif // NTOS_KERNEL_RUNTIME
  118. //
  119. // Local procedure prototypes
  120. //
  121. PHEAP_UNCOMMMTTED_RANGE
  122. RtlpCreateUnCommittedRange (
  123. IN PHEAP_SEGMENT Segment
  124. );
  125. VOID
  126. RtlpDestroyUnCommittedRange (
  127. IN PHEAP_SEGMENT Segment,
  128. IN PHEAP_UNCOMMMTTED_RANGE UnCommittedRange
  129. );
  130. VOID
  131. RtlpInsertUnCommittedPages (
  132. IN PHEAP_SEGMENT Segment,
  133. IN ULONG_PTR Address,
  134. IN SIZE_T Size
  135. );
  136. NTSTATUS
  137. RtlpDestroyHeapSegment (
  138. IN PHEAP_SEGMENT Segment
  139. );
  140. PHEAP_FREE_ENTRY
  141. RtlpExtendHeap (
  142. IN PHEAP Heap,
  143. IN SIZE_T AllocationSize
  144. );
  145. #if defined(ALLOC_PRAGMA) && defined(NTOS_KERNEL_RUNTIME)
  146. #pragma alloc_text(PAGE, RtlCreateHeap)
  147. #pragma alloc_text(PAGE, RtlDestroyHeap)
  148. #pragma alloc_text(PAGE, RtlAllocateHeap)
  149. #pragma alloc_text(PAGE, RtlAllocateHeapSlowly)
  150. #pragma alloc_text(PAGE, RtlFreeHeapSlowly)
  151. #pragma alloc_text(PAGE, RtlFreeHeap)
  152. #pragma alloc_text(PAGE, RtlSizeHeap)
  153. #pragma alloc_text(PAGE, RtlZeroHeap)
  154. #pragma alloc_text(PAGE, RtlpGetExtraStuffPointer)
  155. #pragma alloc_text(PAGE, RtlpCreateUnCommittedRange)
  156. #pragma alloc_text(PAGE, RtlpDestroyUnCommittedRange)
  157. #pragma alloc_text(PAGE, RtlpInsertUnCommittedPages)
  158. #pragma alloc_text(PAGE, RtlpDestroyHeapSegment)
  159. #pragma alloc_text(PAGE, RtlpExtendHeap)
  160. #pragma alloc_text(PAGE, RtlpFindAndCommitPages)
  161. #pragma alloc_text(PAGE, RtlpInitializeHeapSegment)
  162. #pragma alloc_text(PAGE, RtlpCoalesceFreeBlocks)
  163. #pragma alloc_text(PAGE, RtlpDeCommitFreeBlock)
  164. #pragma alloc_text(PAGE, RtlpInsertFreeBlock)
  165. #pragma alloc_text(PAGE, RtlpGetSizeOfBigBlock)
  166. #pragma alloc_text(PAGE, RtlpCheckBusyBlockTail)
  167. #pragma alloc_text(PAGE, RtlpHeapExceptionFilter)
  168. #endif // ALLOC_PRAGMA
  169. ULONG
  170. RtlpHeapExceptionFilter (
  171. NTSTATUS ExceptionCode
  172. )
  173. /*++
  174. Routine Description:
  175. This routine is the exception filter used by heap operations.
  176. Arguments:
  177. ExceptionCode - exception code
  178. ExceptionRecord - structure with pointers to .exr and .cxr
  179. Return Value:
  180. EXCEPTION_CONTINUE_SEARCH for deadlock and stack overflow exception
  181. EXCEPTION_EXECUTE_HANDLER otherwise
  182. --*/
  183. {
  184. if ((ExceptionCode == STATUS_STACK_OVERFLOW)
  185. ||
  186. (ExceptionCode == STATUS_POSSIBLE_DEADLOCK)) {
  187. return EXCEPTION_CONTINUE_SEARCH;
  188. }
  189. return EXCEPTION_EXECUTE_HANDLER;
  190. }
  191. #ifndef NTOS_KERNEL_RUNTIME
  192. VOID
  193. RtlpHeapReportCorruption (
  194. IN PVOID Address )
  195. {
  196. DbgPrint("Heap corruption detected at %p\n", Address );
  197. //
  198. // Break for this corruption if the system enables critical breaks
  199. //
  200. if (RtlGetNtGlobalFlags() & FLG_ENABLE_SYSTEM_CRIT_BREAKS) {
  201. DbgBreakPoint();
  202. }
  203. }
  204. #endif // NTOS_KERNEL_RUNTIME
  205. PVOID
  206. RtlCreateHeap (
  207. IN ULONG Flags,
  208. IN PVOID HeapBase OPTIONAL,
  209. IN SIZE_T ReserveSize OPTIONAL,
  210. IN SIZE_T CommitSize OPTIONAL,
  211. IN PVOID Lock OPTIONAL,
  212. IN PRTL_HEAP_PARAMETERS Parameters OPTIONAL
  213. )
  214. /*++
  215. Routine Description:
  216. This routine initializes a heap.
  217. Arguments:
  218. Flags - Specifies optional attributes of the heap.
  219. Valid Flags Values:
  220. HEAP_NO_SERIALIZE - if set, then allocations and deallocations on
  221. this heap are NOT synchronized by these routines.
  222. HEAP_GROWABLE - if set, then the heap is a "sparse" heap where
  223. memory is committed only as necessary instead of
  224. being preallocated.
  225. HeapBase - if not NULL, this specifies the base address for memory
  226. to use as the heap. If NULL, memory is allocated by these routines.
  227. ReserveSize - if not zero, this specifies the amount of virtual address
  228. space to reserve for the heap.
  229. CommitSize - if not zero, this specifies the amount of virtual address
  230. space to commit for the heap. Must be less than ReserveSize. If
  231. zero, then defaults to one page.
  232. Lock - if not NULL, this parameter points to the resource lock to
  233. use. Only valid if HEAP_NO_SERIALIZE is NOT set.
  234. Parameters - optional heap parameters.
  235. Return Value:
  236. PVOID - a pointer to be used in accessing the created heap.
  237. --*/
  238. {
  239. ULONG_PTR HighestUserAddress;
  240. NTSTATUS Status;
  241. PHEAP Heap = NULL;
  242. PHEAP_SEGMENT Segment = NULL;
  243. PLIST_ENTRY FreeListHead;
  244. ULONG SizeOfHeapHeader;
  245. ULONG SegmentFlags;
  246. PVOID CommittedBase;
  247. PVOID UnCommittedBase;
  248. MEMORY_BASIC_INFORMATION MemoryInformation;
  249. SYSTEM_BASIC_INFORMATION SystemInformation;
  250. ULONG n;
  251. ULONG InitialCountOfUnusedUnCommittedRanges;
  252. SIZE_T MaximumHeapBlockSize;
  253. PVOID NextHeapHeaderAddress;
  254. PHEAP_UNCOMMMTTED_RANGE UnCommittedRange, *pp;
  255. RTL_HEAP_PARAMETERS TempParameters;
  256. ULONG GlobalFlag = RtlGetNtGlobalFlags();
  257. #ifndef NTOS_KERNEL_RUNTIME
  258. PPEB Peb;
  259. #else // NTOS_KERNEL_RUNTIME
  260. extern SIZE_T MmHeapSegmentReserve;
  261. extern SIZE_T MmHeapSegmentCommit;
  262. extern SIZE_T MmHeapDeCommitTotalFreeThreshold;
  263. extern SIZE_T MmHeapDeCommitFreeBlockThreshold;
  264. #endif // NTOS_KERNEL_RUNTIME
  265. RTL_PAGED_CODE();
  266. //
  267. // Check if we should be using the page heap code. If not then turn
  268. // off any of the page heap flags before going on
  269. //
  270. #ifdef DEBUG_PAGE_HEAP
  271. if ( RtlpDebugPageHeap && ( HeapBase == NULL ) && ( Lock == NULL )) {
  272. PVOID PageHeap;
  273. PageHeap = RtlpDebugPageHeapCreate(
  274. Flags,
  275. HeapBase,
  276. ReserveSize,
  277. CommitSize,
  278. Lock,
  279. Parameters );
  280. if (PageHeap != NULL) {
  281. return PageHeap;
  282. }
  283. //
  284. // A `-1' value signals a recursive call from page heap
  285. // manager. We set this to null and continue creating
  286. // a normal heap. This small hack is required so that we
  287. // minimize the dependencies between the normal and the page
  288. // heap manager.
  289. //
  290. if ((SIZE_T)Parameters == (SIZE_T)-1) {
  291. Parameters = NULL;
  292. }
  293. else {
  294. //
  295. // If we get here it means page heap create returned null due to
  296. // a real error (out of memory or fault injection) and we have
  297. // to fail the call.
  298. //
  299. return NULL;
  300. }
  301. }
  302. Flags &= ~( HEAP_PROTECTION_ENABLED |
  303. HEAP_BREAK_WHEN_OUT_OF_VM |
  304. HEAP_NO_ALIGNMENT );
  305. #endif // DEBUG_PAGE_HEAP
  306. //
  307. // If the caller does not want to skip heap validiation checks then we
  308. // need to validate the rest of the flags but simply masking out only
  309. // those flags that want on a create heap call
  310. //
  311. if (!(Flags & HEAP_SKIP_VALIDATION_CHECKS)) {
  312. if (Flags & ~HEAP_CREATE_VALID_MASK) {
  313. HeapDebugPrint(( "Invalid flags (%08x) specified to RtlCreateHeap\n", Flags ));
  314. HeapDebugBreak( NULL );
  315. Flags &= HEAP_CREATE_VALID_MASK;
  316. }
  317. }
  318. //
  319. // The maximum heap block size is really 0x7f000 which is 0x80000 minus a
  320. // page. Maximum block size is 0xfe00 and granularity shift is 3.
  321. //
  322. MaximumHeapBlockSize = HEAP_MAXIMUM_BLOCK_SIZE << HEAP_GRANULARITY_SHIFT;
  323. //
  324. // Assume we're going to be successful until we're shown otherwise
  325. //
  326. Status = STATUS_SUCCESS;
  327. //
  328. // This part of the routine builds up local variable containing all the
  329. // parameters used to initialize the heap. First thing we do is zero
  330. // it out.
  331. //
  332. RtlZeroMemory( &TempParameters, sizeof( TempParameters ) );
  333. //
  334. // If our caller supplied the optional heap parameters then we'll
  335. // make sure the size is good and copy over them over to our
  336. // local copy
  337. //
  338. if (ARGUMENT_PRESENT( Parameters )) {
  339. try {
  340. if (Parameters->Length == sizeof( *Parameters )) {
  341. RtlCopyMemory( &TempParameters, Parameters, sizeof( *Parameters ) );
  342. }
  343. } except( RtlpHeapExceptionFilter(GetExceptionCode()) ) {
  344. Status = GetExceptionCode();
  345. }
  346. if (!NT_SUCCESS( Status )) {
  347. return NULL;
  348. }
  349. }
  350. //
  351. // Set the parameter block to the local copy
  352. //
  353. Parameters = &TempParameters;
  354. //
  355. // If nt global flags tells us to always do tail or free checking
  356. // or to disable coalescing then force those bits set in the user
  357. // specified flags
  358. //
  359. if (GlobalFlag & FLG_HEAP_ENABLE_TAIL_CHECK) {
  360. Flags |= HEAP_TAIL_CHECKING_ENABLED;
  361. }
  362. if (GlobalFlag & FLG_HEAP_ENABLE_FREE_CHECK) {
  363. Flags |= HEAP_FREE_CHECKING_ENABLED;
  364. }
  365. if (GlobalFlag & FLG_HEAP_DISABLE_COALESCING) {
  366. Flags |= HEAP_DISABLE_COALESCE_ON_FREE;
  367. }
  368. #ifndef NTOS_KERNEL_RUNTIME
  369. //
  370. // In the non kernel case we also check if we should
  371. // validate parameters, validate all, or do stack backtraces
  372. //
  373. Peb = NtCurrentPeb();
  374. if (GlobalFlag & FLG_HEAP_VALIDATE_PARAMETERS) {
  375. Flags |= HEAP_VALIDATE_PARAMETERS_ENABLED;
  376. }
  377. if (GlobalFlag & FLG_HEAP_VALIDATE_ALL) {
  378. Flags |= HEAP_VALIDATE_ALL_ENABLED;
  379. }
  380. if (GlobalFlag & FLG_USER_STACK_TRACE_DB) {
  381. Flags |= HEAP_CAPTURE_STACK_BACKTRACES;
  382. }
  383. //
  384. // Also in the non kernel case the PEB will have some state
  385. // variables that we need to set if the user hasn't specified
  386. // otherwise
  387. //
  388. if (Parameters->SegmentReserve == 0) {
  389. Parameters->SegmentReserve = Peb->HeapSegmentReserve;
  390. }
  391. if (Parameters->SegmentCommit == 0) {
  392. Parameters->SegmentCommit = Peb->HeapSegmentCommit;
  393. }
  394. if (Parameters->DeCommitFreeBlockThreshold == 0) {
  395. Parameters->DeCommitFreeBlockThreshold = Peb->HeapDeCommitFreeBlockThreshold;
  396. }
  397. if (Parameters->DeCommitTotalFreeThreshold == 0) {
  398. Parameters->DeCommitTotalFreeThreshold = Peb->HeapDeCommitTotalFreeThreshold;
  399. }
  400. #else // NTOS_KERNEL_RUNTIME
  401. //
  402. // In the kernel case Mm has some global variables that we set
  403. // into the paramters if the user hasn't specified otherwise
  404. //
  405. if (Parameters->SegmentReserve == 0) {
  406. Parameters->SegmentReserve = MmHeapSegmentReserve;
  407. }
  408. if (Parameters->SegmentCommit == 0) {
  409. Parameters->SegmentCommit = MmHeapSegmentCommit;
  410. }
  411. if (Parameters->DeCommitFreeBlockThreshold == 0) {
  412. Parameters->DeCommitFreeBlockThreshold = MmHeapDeCommitFreeBlockThreshold;
  413. }
  414. if (Parameters->DeCommitTotalFreeThreshold == 0) {
  415. Parameters->DeCommitTotalFreeThreshold = MmHeapDeCommitTotalFreeThreshold;
  416. }
  417. #endif // NTOS_KERNEL_RUNTIME
  418. //
  419. // Get the highest user address
  420. //
  421. if (!NT_SUCCESS(ZwQuerySystemInformation(SystemBasicInformation,
  422. &SystemInformation,
  423. sizeof(SystemInformation),
  424. NULL))) {
  425. return NULL;
  426. }
  427. HighestUserAddress = SystemInformation.MaximumUserModeAddress;
  428. //
  429. // If the user hasn't said what the largest allocation size is then
  430. // we should compute it as the difference between the highest and lowest
  431. // address less one page
  432. //
  433. if (Parameters->MaximumAllocationSize == 0) {
  434. Parameters->MaximumAllocationSize = (HighestUserAddress -
  435. (ULONG_PTR)MM_LOWEST_USER_ADDRESS -
  436. PAGE_SIZE );
  437. }
  438. //
  439. // Set the virtual memory threshold to be non zero and not more than the
  440. // maximum heap block size of 0x7f000. If the user specified one that is
  441. // too large we automatically and silently drop it down.
  442. //
  443. if ((Parameters->VirtualMemoryThreshold == 0) ||
  444. (Parameters->VirtualMemoryThreshold > MaximumHeapBlockSize)) {
  445. Parameters->VirtualMemoryThreshold = MaximumHeapBlockSize;
  446. }
  447. //
  448. // The default commit size is MINIMUM_HEAP_COMMIT and the default
  449. // reserve size is 64 pages.
  450. //
  451. if (!ARGUMENT_PRESENT( CommitSize )) {
  452. CommitSize = MINIMUM_HEAP_COMMIT;
  453. if (!ARGUMENT_PRESENT( ReserveSize )) {
  454. ReserveSize = 64 * CommitSize;
  455. } else {
  456. ReserveSize = ROUND_UP_TO_POWER2( ReserveSize,
  457. MINIMUM_HEAP_COMMIT );
  458. }
  459. } else {
  460. //
  461. // The heap actually uses space that is reserved and commited
  462. // to store internal data structures (the LOCK,
  463. // the HEAP_PSEUDO_TAG, etc.). These structures can be larger than
  464. // 4K especially on a 64-bit build. So, make sure the commit
  465. // is at least 8K in length.
  466. //
  467. CommitSize = ROUND_UP_TO_POWER2(CommitSize, MINIMUM_HEAP_COMMIT);
  468. if (!ARGUMENT_PRESENT( ReserveSize )) {
  469. ReserveSize = ROUND_UP_TO_POWER2( CommitSize, 16 * PAGE_SIZE );
  470. } else {
  471. ReserveSize = ROUND_UP_TO_POWER2( ReserveSize,
  472. MINIMUM_HEAP_COMMIT );
  473. //
  474. // If the CommitSize is larger than the ReservedSize, adjust
  475. // it to the ReserveSize. Reserved size is already rounded up to
  476. // MINIMUM_HEAP_COMMIT.
  477. //
  478. if ( CommitSize > ReserveSize ) {
  479. CommitSize = ReserveSize;
  480. }
  481. }
  482. }
  483. #ifndef NTOS_KERNEL_RUNTIME
  484. //
  485. // In the non kernel case check if we are creating a debug heap
  486. // the test checks that skip validation checks is false.
  487. //
  488. if (DEBUG_HEAP( Flags )) {
  489. return RtlDebugCreateHeap( Flags,
  490. HeapBase,
  491. ReserveSize,
  492. CommitSize,
  493. Lock,
  494. Parameters );
  495. }
  496. #endif // NTOS_KERNEL_RUNTIME
  497. //
  498. // Compute the size of the heap which will be the
  499. // heap struct itself and if we are to serialize with
  500. // out own lock then add room for the lock. If the
  501. // user did not supply the lock then set the lock
  502. // variable to -1.
  503. //
  504. SizeOfHeapHeader = sizeof( HEAP );
  505. if (!(Flags & HEAP_NO_SERIALIZE)) {
  506. if (ARGUMENT_PRESENT( Lock )) {
  507. Flags |= HEAP_LOCK_USER_ALLOCATED;
  508. } else {
  509. SizeOfHeapHeader += sizeof( HEAP_LOCK );
  510. Lock = (PHEAP_LOCK)-1;
  511. }
  512. } else if (ARGUMENT_PRESENT( Lock )) {
  513. //
  514. // In this error case the call said not to serialize but also fed us
  515. // a lock
  516. //
  517. return NULL;
  518. }
  519. //
  520. // See if caller allocate the space for the heap.
  521. //
  522. if (ARGUMENT_PRESENT( HeapBase )) {
  523. //
  524. // The call specified a heap base now check if there is
  525. // a caller supplied commit routine
  526. //
  527. if (Parameters->CommitRoutine != NULL) {
  528. //
  529. // The caller specified a commit routine so the caller
  530. // also needs to have given us certain parameters and make
  531. // sure the heap is not growable. Otherwise it is an error
  532. //
  533. if ((Parameters->InitialCommit == 0) ||
  534. (Parameters->InitialReserve == 0) ||
  535. (Parameters->InitialCommit > Parameters->InitialReserve) ||
  536. (Flags & HEAP_GROWABLE)) {
  537. return NULL;
  538. }
  539. //
  540. // Set the commited base and the uncommited base to the
  541. // proper pointers within the heap.
  542. //
  543. CommittedBase = HeapBase;
  544. UnCommittedBase = (PCHAR)CommittedBase + Parameters->InitialCommit;
  545. ReserveSize = Parameters->InitialReserve;
  546. //
  547. // Zero out a page of the heap where our first part goes
  548. //
  549. RtlZeroMemory( CommittedBase, Parameters->InitialCommit );
  550. } else {
  551. //
  552. // The user gave us space but not commit routine
  553. // So query the base to get its size
  554. //
  555. Status = ZwQueryVirtualMemory( NtCurrentProcess(),
  556. HeapBase,
  557. MemoryBasicInformation,
  558. &MemoryInformation,
  559. sizeof( MemoryInformation ),
  560. NULL );
  561. if (!NT_SUCCESS( Status )) {
  562. return NULL;
  563. }
  564. //
  565. // Make sure the user gave us a base address for this block
  566. // and that the memory is not free
  567. //
  568. if (MemoryInformation.BaseAddress != HeapBase) {
  569. return NULL;
  570. }
  571. if (MemoryInformation.State == MEM_FREE) {
  572. return NULL;
  573. }
  574. //
  575. // Set our commit base to the start of the range
  576. //
  577. CommittedBase = MemoryInformation.BaseAddress;
  578. //
  579. // If the memory is committed then
  580. // we can zero out a page worth
  581. //
  582. if (MemoryInformation.State == MEM_COMMIT) {
  583. RtlZeroMemory( CommittedBase, PAGE_SIZE );
  584. //
  585. // Set the commit size and uncommitted base according
  586. // to the start of the vm
  587. //
  588. CommitSize = MemoryInformation.RegionSize;
  589. UnCommittedBase = (PCHAR)CommittedBase + CommitSize;
  590. //
  591. // Find out the uncommited base is reserved and if so
  592. // the update the reserve size accordingly.
  593. //
  594. Status = ZwQueryVirtualMemory( NtCurrentProcess(),
  595. UnCommittedBase,
  596. MemoryBasicInformation,
  597. &MemoryInformation,
  598. sizeof( MemoryInformation ),
  599. NULL );
  600. ReserveSize = CommitSize;
  601. if ((NT_SUCCESS( Status )) &&
  602. (MemoryInformation.State == MEM_RESERVE)) {
  603. ReserveSize += MemoryInformation.RegionSize;
  604. }
  605. } else {
  606. //
  607. // The memory the user gave us is not committed so dummy
  608. // up these small numbers
  609. //
  610. CommitSize = MINIMUM_HEAP_COMMIT;
  611. UnCommittedBase = CommittedBase;
  612. }
  613. }
  614. //
  615. // This user gave us a base and we've just taken care of the committed
  616. // bookkeeping. So mark this segment as user supplied and set the
  617. // heap
  618. //
  619. SegmentFlags = HEAP_SEGMENT_USER_ALLOCATED;
  620. Heap = (PHEAP)HeapBase;
  621. } else {
  622. //
  623. // The user did not specify a heap base so we have to allocate the
  624. // vm here. First make sure the user did not give us a commit routine
  625. //
  626. if (Parameters->CommitRoutine != NULL) {
  627. return NULL;
  628. }
  629. //
  630. // Reserve the amount of virtual address space requested.
  631. //
  632. Status = ZwAllocateVirtualMemory( NtCurrentProcess(),
  633. (PVOID *)&Heap,
  634. 0,
  635. &ReserveSize,
  636. MEM_RESERVE,
  637. HEAP_PROTECTION );
  638. if (!NT_SUCCESS( Status )) {
  639. return NULL;
  640. }
  641. //
  642. // Indicate that this segment is not user supplied
  643. //
  644. SegmentFlags = 0;
  645. //
  646. // Set the default commit size to one page
  647. //
  648. if (!ARGUMENT_PRESENT( CommitSize )) {
  649. CommitSize = MINIMUM_HEAP_COMMIT;
  650. }
  651. //
  652. // Set the committed and uncommitted base to be the same the following
  653. // code will actually commit the page for us
  654. //
  655. CommittedBase = Heap;
  656. UnCommittedBase = Heap;
  657. }
  658. //
  659. // At this point we have a heap pointer, committed base, uncommitted base,
  660. // segment flags, commit size, and reserve size. If the committed and
  661. // uncommited base are the same then we need to commit the amount
  662. // specified by the commit size
  663. //
  664. if (CommittedBase == UnCommittedBase) {
  665. Status = ZwAllocateVirtualMemory( NtCurrentProcess(),
  666. (PVOID *)&CommittedBase,
  667. 0,
  668. &CommitSize,
  669. MEM_COMMIT,
  670. HEAP_PROTECTION );
  671. //
  672. // In the non successful case we need to back out any vm reservation
  673. // we did earlier
  674. //
  675. if (!NT_SUCCESS( Status )) {
  676. if (!ARGUMENT_PRESENT(HeapBase)) {
  677. //
  678. // Return the reserved virtual address space.
  679. //
  680. RtlpHeapFreeVirtualMemory( NtCurrentProcess(),
  681. (PVOID *)&Heap,
  682. &ReserveSize,
  683. MEM_RELEASE );
  684. }
  685. return NULL;
  686. }
  687. //
  688. // The new uncommitted base is not adjusted above what we just
  689. // committed
  690. //
  691. UnCommittedBase = (PVOID)((PCHAR)UnCommittedBase + CommitSize);
  692. }
  693. //
  694. // At this point we have memory for the start of the heap committed and
  695. // ready to be initialized. So now we need initialize the heap
  696. //
  697. //
  698. // Calculate the end of the heap header and make room for 8 uncommitted
  699. // range structures. Once we have the room for them then chain them
  700. // together and null terminate the chain
  701. //
  702. NextHeapHeaderAddress = Heap + 1;
  703. UnCommittedRange = (PHEAP_UNCOMMMTTED_RANGE)ROUND_UP_TO_POWER2( NextHeapHeaderAddress,
  704. sizeof( QUAD ) );
  705. InitialCountOfUnusedUnCommittedRanges = 8;
  706. SizeOfHeapHeader += InitialCountOfUnusedUnCommittedRanges * sizeof( *UnCommittedRange );
  707. //
  708. // What a hack Pp is really a pointer to the next field of the
  709. // uncommitted range structure. So we set next by setting through Pp
  710. //
  711. pp = &Heap->UnusedUnCommittedRanges;
  712. while (InitialCountOfUnusedUnCommittedRanges--) {
  713. *pp = UnCommittedRange;
  714. pp = &UnCommittedRange->Next;
  715. UnCommittedRange += 1;
  716. }
  717. NextHeapHeaderAddress = UnCommittedRange;
  718. *pp = NULL;
  719. //
  720. // Check if tagging is enabled in global flags. This check is always true
  721. // in a debug build.
  722. //
  723. // If tagging is enabled then make room for 129 pseudo tag heap entry.
  724. // Which is one more than the number of free lists. Also point the heap
  725. // header to this array of pseudo tags entries.
  726. //
  727. if (IS_HEAP_TAGGING_ENABLED()) {
  728. Heap->PseudoTagEntries = (PHEAP_PSEUDO_TAG_ENTRY)ROUND_UP_TO_POWER2( NextHeapHeaderAddress,
  729. sizeof( QUAD ) );
  730. SizeOfHeapHeader += HEAP_NUMBER_OF_PSEUDO_TAG * sizeof( HEAP_PSEUDO_TAG_ENTRY );
  731. //
  732. // Update the next address with the number of pseudotags
  733. // (The math is right here because Heap->PseudoTagEntries is of
  734. // type PHEAP_PSEUDO_TAG_ENTRY)
  735. //
  736. NextHeapHeaderAddress = Heap->PseudoTagEntries + HEAP_NUMBER_OF_PSEUDO_TAG;
  737. }
  738. //
  739. // Round the size of the heap header to the next 8 byte boundary
  740. //
  741. SizeOfHeapHeader = (ULONG) ROUND_UP_TO_POWER2( SizeOfHeapHeader,
  742. HEAP_GRANULARITY );
  743. //
  744. // If the sizeof the heap header is larger than the native
  745. // page size, you have a problem. Further, if the CommitSize passed
  746. // in was smaller than the SizeOfHeapHeader, you may not even make it
  747. // this far before death...
  748. //
  749. // HeapDbgPrint() doesn't work for IA64 yet.
  750. //
  751. // HeapDbgPrint(("Size of the heap header is %u bytes, commit was %u bytes\n", SizeOfHeapHeader, (ULONG) CommitSize));
  752. //
  753. //
  754. // Fill in the heap header fields
  755. //
  756. Heap->Entry.Size = (USHORT)(SizeOfHeapHeader >> HEAP_GRANULARITY_SHIFT);
  757. Heap->Entry.Flags = HEAP_ENTRY_BUSY;
  758. Heap->Signature = HEAP_SIGNATURE;
  759. Heap->Flags = Flags;
  760. Heap->ForceFlags = (Flags & (HEAP_NO_SERIALIZE |
  761. HEAP_GENERATE_EXCEPTIONS |
  762. HEAP_ZERO_MEMORY |
  763. HEAP_REALLOC_IN_PLACE_ONLY |
  764. HEAP_VALIDATE_PARAMETERS_ENABLED |
  765. HEAP_VALIDATE_ALL_ENABLED |
  766. HEAP_TAIL_CHECKING_ENABLED |
  767. HEAP_CREATE_ALIGN_16 |
  768. HEAP_FREE_CHECKING_ENABLED));
  769. // Heap->FreeListsInUseTerminate = 0xFFFF;
  770. Heap->u2.DecommitCount = 0;
  771. Heap->HeaderValidateLength = (USHORT)((PCHAR)NextHeapHeaderAddress - (PCHAR)Heap);
  772. Heap->HeaderValidateCopy = NULL;
  773. Heap->Entry.SmallTagIndex = (UCHAR)USER_SHARED_DATA->TickCount.LowPart;
  774. //
  775. // Initialize the free list to be all empty
  776. //
  777. FreeListHead = &Heap->FreeLists[ 0 ];
  778. n = HEAP_MAXIMUM_FREELISTS;
  779. while (n--) {
  780. InitializeListHead( FreeListHead );
  781. FreeListHead++;
  782. }
  783. //
  784. // Make it so that there a no big block allocations
  785. //
  786. InitializeListHead( &Heap->VirtualAllocdBlocks );
  787. //
  788. // Initialize the critical section that controls access to
  789. // the free list. If the lock variable is -1 then the caller
  790. // did not supply a lock so we need to make room for one
  791. // and initialize it.
  792. //
  793. if (Lock == (PHEAP_LOCK)-1) {
  794. Lock = (PHEAP_LOCK)NextHeapHeaderAddress;
  795. Status = RtlInitializeLockRoutine( Lock );
  796. if (!NT_SUCCESS( Status )) {
  797. if (!ARGUMENT_PRESENT(HeapBase)) {
  798. RtlpHeapFreeVirtualMemory( NtCurrentProcess(),
  799. (PVOID *)&Heap,
  800. &ReserveSize,
  801. MEM_RELEASE );
  802. }
  803. return NULL;
  804. }
  805. NextHeapHeaderAddress = (PHEAP_LOCK)Lock + 1;
  806. }
  807. Heap->LockVariable = Lock;
  808. Heap->LastSegmentIndex = 0;
  809. //
  810. // Initialize the first segment for the heap
  811. //
  812. if (!RtlpInitializeHeapSegment( Heap,
  813. (PHEAP_SEGMENT)((PCHAR)Heap + SizeOfHeapHeader),
  814. 0,
  815. SegmentFlags,
  816. CommittedBase,
  817. UnCommittedBase,
  818. (PCHAR)CommittedBase + ReserveSize )) {
  819. if (!ARGUMENT_PRESENT(HeapBase)) {
  820. RtlpHeapFreeVirtualMemory( NtCurrentProcess(),
  821. (PVOID *)&Heap,
  822. &ReserveSize,
  823. MEM_RELEASE );
  824. }
  825. return NULL;
  826. }
  827. //
  828. // Fill in additional heap entry fields
  829. //
  830. Heap->ProcessHeapsListIndex = 0;
  831. Heap->SegmentReserve = Parameters->SegmentReserve;
  832. Heap->SegmentCommit = Parameters->SegmentCommit;
  833. Heap->DeCommitFreeBlockThreshold = Parameters->DeCommitFreeBlockThreshold >> HEAP_GRANULARITY_SHIFT;
  834. Heap->DeCommitTotalFreeThreshold = Parameters->DeCommitTotalFreeThreshold >> HEAP_GRANULARITY_SHIFT;
  835. Heap->MaximumAllocationSize = Parameters->MaximumAllocationSize;
  836. Heap->VirtualMemoryThreshold = (ULONG) (ROUND_UP_TO_POWER2( Parameters->VirtualMemoryThreshold,
  837. HEAP_GRANULARITY ) >> HEAP_GRANULARITY_SHIFT);
  838. Heap->CommitRoutine = Parameters->CommitRoutine;
  839. //
  840. // We either align the heap at 16 or 8 byte boundaries. The AlignRound
  841. // and AlignMask are used to bring allocation sizes up to the next
  842. // boundary. The align round includes the heap header and the optional
  843. // check tail size
  844. //
  845. if (Flags & HEAP_CREATE_ALIGN_16) {
  846. Heap->AlignRound = 15 + sizeof( HEAP_ENTRY );
  847. Heap->AlignMask = ~((ULONG_PTR)15);
  848. } else {
  849. Heap->AlignRound = HEAP_GRANULARITY - 1 + sizeof( HEAP_ENTRY );
  850. Heap->AlignMask = ~((ULONG_PTR)HEAP_GRANULARITY - 1);
  851. }
  852. if (Heap->Flags & HEAP_TAIL_CHECKING_ENABLED) {
  853. Heap->AlignRound += CHECK_HEAP_TAIL_SIZE;
  854. }
  855. #ifndef NTOS_KERNEL_RUNTIME
  856. //
  857. // In the non kernel case we need to add this heap to the processes heap
  858. // list
  859. //
  860. RtlpAddHeapToProcessList( Heap );
  861. //
  862. // Initialize the heap lookaside lists. This is only for the user mode
  863. // heap and the heap contains a pointer to the lookaside list array.
  864. // The array is sized the same as the dedicated free list. First we
  865. // allocate space for the lookaside list and then we initialize each
  866. // lookaside list.
  867. //
  868. // But the caller asked for no serialize or asked for non growable
  869. // heap then we won't enable the lookaside lists.
  870. //
  871. Heap->FrontEndHeap = NULL;
  872. Heap->FrontHeapLockCount = 0;
  873. Heap->FrontEndHeapType = 0;
  874. if ((!(Flags & HEAP_NO_SERIALIZE)) &&
  875. ( (Flags & HEAP_GROWABLE)) &&
  876. (!(RtlpDisableHeapLookaside & HEAP_COMPAT_DISABLE_LOOKASIDES))) {
  877. //
  878. // We do not allow creation of the cache heap if the tags are enabled.
  879. // We use the tag field to sign our private blocks
  880. //
  881. if (RtlpIsLowFragHeapEnabled() &&
  882. !IS_HEAP_TAGGING_ENABLED()) {
  883. RtlpActivateLowFragmentationHeap(Heap);
  884. } else {
  885. ULONG i;
  886. Heap->FrontEndHeap = RtlAllocateHeap( Heap,
  887. HEAP_ZERO_MEMORY,
  888. sizeof(HEAP_LOOKASIDE) * HEAP_MAXIMUM_FREELISTS );
  889. if (Heap->FrontEndHeap != NULL) {
  890. Heap->FrontEndHeapType = HEAP_FRONT_LOOKASIDE;
  891. for (i = 0; i < HEAP_MAXIMUM_FREELISTS; i += 1) {
  892. //
  893. // N.B. we should call here the function:
  894. //
  895. // RtlpInitializeHeapLookaside( &(((PHEAP_LOOKASIDE)(Heap->Lookaside))[i]), 32 );
  896. //
  897. // But for performance reasons, because the most fields are 0,
  898. // we've set the flag HEAP_ZERO_MEMORY in allocation above and we'll
  899. // initialize the only two NON-NULL fields: Depth and MaximumDepth
  900. // IceCap data showed that RtlHeapCreate spends ~30% of the time within
  901. // these calls.
  902. //
  903. // N.B. This works based on assumption that
  904. // RtlInitializeSListHead zeroed the SLIST_HEADER structure
  905. //
  906. PHEAP_LOOKASIDE HeapLookaside = &(((PHEAP_LOOKASIDE)(Heap->FrontEndHeap))[i]);
  907. HeapLookaside->Depth = MINIMUM_LOOKASIDE_DEPTH;
  908. HeapLookaside->MaximumDepth = 256; //Depth;
  909. }
  910. }
  911. }
  912. }
  913. if( IsHeapLogging( Heap )) {
  914. PTHREAD_LOCAL_DATA pThreadLocalData = NULL;
  915. PPERFINFO_TRACE_HEADER pEventHeader = NULL;
  916. USHORT ReqSize = sizeof(HEAP_EVENT_CREATE) + FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data);
  917. AcquireBufferLocation(&pEventHeader, &pThreadLocalData, &ReqSize);
  918. if(pEventHeader && pThreadLocalData) {
  919. PHEAP_EVENT_CREATE pHeapEvent = (PHEAP_EVENT_CREATE)((SIZE_T)pEventHeader
  920. +(SIZE_T)FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data ));
  921. pEventHeader->Packet.Size = (USHORT) ReqSize;
  922. pEventHeader->Packet.HookId = PERFINFO_LOG_TYPE_HEAP_CREATE;
  923. pHeapEvent->HeapHandle = (PVOID)Heap;
  924. pHeapEvent->Flags = Flags;
  925. ReleaseBufferLocation(pThreadLocalData);
  926. }
  927. }
  928. #endif // NTOS_KERNEL_RUNTIME
  929. //
  930. // And return the fully initialized heap to our caller
  931. //
  932. return (PVOID)Heap;
  933. }
  934. PVOID
  935. RtlDestroyHeap (
  936. IN PVOID HeapHandle
  937. )
  938. /*++
  939. Routine Description:
  940. This routine is the opposite of Rtl Create Heap. It tears down an
  941. existing heap structure.
  942. Arguments:
  943. HeapHandle - Supplies a pointer to the heap being destroyed
  944. Return Value:
  945. PVOID - Returns null if the heap was destroyed completely and a
  946. pointer back to the heap if for some reason the heap could
  947. not be destroyed.
  948. --*/
  949. {
  950. PHEAP Heap = (PHEAP)HeapHandle;
  951. PHEAP_SEGMENT Segment;
  952. PHEAP_UCR_SEGMENT UCRSegments;
  953. PLIST_ENTRY Head, Next;
  954. PVOID BaseAddress;
  955. SIZE_T RegionSize;
  956. UCHAR SegmentIndex;
  957. PVOID LowFragmentationHeap;
  958. //
  959. // Validate that HeapAddress points to a HEAP structure.
  960. //
  961. RTL_PAGED_CODE();
  962. if (HeapHandle == NULL) {
  963. HeapDebugPrint(( "Ignoring RtlDestroyHeap( NULL )\n" ));
  964. return NULL;
  965. }
  966. //
  967. // Check if this is the debug version of heap using page allocation
  968. // with guard pages
  969. //
  970. IF_DEBUG_PAGE_HEAP_THEN_RETURN( HeapHandle,
  971. RtlpDebugPageHeapDestroy( HeapHandle ));
  972. #ifndef NTOS_KERNEL_RUNTIME
  973. //
  974. // In the non kernel case check if this is the debug version of heap
  975. // and of so then call the debug version to do the teardown
  976. //
  977. if (DEBUG_HEAP( Heap->Flags )) {
  978. if (!RtlDebugDestroyHeap( HeapHandle )) {
  979. return HeapHandle;
  980. }
  981. }
  982. //
  983. // We are not allowed to destroy the process heap
  984. //
  985. if (HeapHandle == NtCurrentPeb()->ProcessHeap) {
  986. return HeapHandle;
  987. }
  988. if (LowFragmentationHeap = RtlpGetLowFragHeap(Heap)) {
  989. RtlpDestroyLowFragHeap(LowFragmentationHeap);
  990. }
  991. #endif // NTOS_KERNEL_RUNTIME
  992. //
  993. // For every big allocation we remove it from the list and free the
  994. // vm
  995. //
  996. Head = &Heap->VirtualAllocdBlocks;
  997. Next = Head->Flink;
  998. while (Head != Next) {
  999. BaseAddress = CONTAINING_RECORD( Next, HEAP_VIRTUAL_ALLOC_ENTRY, Entry );
  1000. Next = Next->Flink;
  1001. RegionSize = 0;
  1002. RtlpHeapFreeVirtualMemory( NtCurrentProcess(),
  1003. (PVOID *)&BaseAddress,
  1004. &RegionSize,
  1005. MEM_RELEASE );
  1006. }
  1007. #ifndef NTOS_KERNEL_RUNTIME
  1008. //
  1009. // In the non kernel case we need to destroy any heap tags we have setup
  1010. // and remove this heap from the process heap list
  1011. //
  1012. RtlpDestroyTags( Heap );
  1013. RtlpRemoveHeapFromProcessList( Heap );
  1014. #endif // NTOS_KERNEL_RUNTIME
  1015. //
  1016. // If the heap is serialized, delete the critical section created
  1017. // by RtlCreateHeap.
  1018. //
  1019. if (!(Heap->Flags & HEAP_NO_SERIALIZE)) {
  1020. if (!(Heap->Flags & HEAP_LOCK_USER_ALLOCATED)) {
  1021. (VOID)RtlDeleteLockRoutine( Heap->LockVariable );
  1022. }
  1023. Heap->LockVariable = NULL;
  1024. }
  1025. //
  1026. // For every uncommitted segment we free its vm
  1027. //
  1028. UCRSegments = Heap->UCRSegments;
  1029. Heap->UCRSegments = NULL;
  1030. while (UCRSegments) {
  1031. BaseAddress = UCRSegments;
  1032. UCRSegments = UCRSegments->Next;
  1033. RegionSize = 0;
  1034. RtlpHeapFreeVirtualMemory( NtCurrentProcess(),
  1035. &BaseAddress,
  1036. &RegionSize,
  1037. MEM_RELEASE );
  1038. }
  1039. #ifndef NTOS_KERNEL_RUNTIME
  1040. //
  1041. // Free the large block index, if we have one
  1042. //
  1043. if (Heap->LargeBlocksIndex) {
  1044. PHEAP_INDEX HeapIndex = (PHEAP_INDEX)Heap->LargeBlocksIndex;
  1045. //
  1046. // Save the commited size for the index.
  1047. //
  1048. RegionSize = HeapIndex->VirtualMemorySize;
  1049. Heap->LargeBlocksIndex = NULL;
  1050. RtlpHeapFreeVirtualMemory( NtCurrentProcess(),
  1051. &HeapIndex,
  1052. &RegionSize,
  1053. MEM_RELEASE );
  1054. }
  1055. #endif // NTOS_KERNEL_RUNTIME
  1056. //
  1057. // For every segment in the heap we call a worker routine to
  1058. // destroy the segment
  1059. //
  1060. SegmentIndex = HEAP_MAXIMUM_SEGMENTS;
  1061. while (SegmentIndex--) {
  1062. Segment = Heap->Segments[ SegmentIndex ];
  1063. if (Segment) {
  1064. RtlpDestroyHeapSegment( Segment );
  1065. }
  1066. }
  1067. #ifndef NTOS_KERNEL_RUNTIME
  1068. if( IsHeapLogging( HeapHandle ) ) {
  1069. PTHREAD_LOCAL_DATA pThreadLocalData = NULL;
  1070. PPERFINFO_TRACE_HEADER pEventHeader = NULL;
  1071. USHORT ReqSize = sizeof(NTDLL_EVENT_COMMON) + FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data);
  1072. AcquireBufferLocation(&pEventHeader, &pThreadLocalData, &ReqSize );
  1073. if(pEventHeader && pThreadLocalData) {
  1074. PNTDLL_EVENT_COMMON pHeapEvent = (PNTDLL_EVENT_COMMON)( (SIZE_T)pEventHeader
  1075. + (SIZE_T)FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data ));
  1076. pEventHeader->Packet.Size = (USHORT) ReqSize;
  1077. pEventHeader->Packet.HookId= PERFINFO_LOG_TYPE_HEAP_DESTROY;
  1078. pHeapEvent->Handle = (PVOID)HeapHandle;
  1079. ReleaseBufferLocation(pThreadLocalData);
  1080. }
  1081. }
  1082. #endif // NTOS_KERNEL_RUNTIME
  1083. //
  1084. // And we return to our caller
  1085. //
  1086. return NULL;
  1087. }
  1088. PVOID
  1089. RtlAllocateHeap (
  1090. IN PVOID HeapHandle,
  1091. IN ULONG Flags,
  1092. IN SIZE_T Size
  1093. )
  1094. /*++
  1095. Routine Description:
  1096. This routine allocates a memory of the specified size from the specified
  1097. heap.
  1098. Arguments:
  1099. HeapHandle - Supplies a pointer to an initialized heap structure
  1100. Flags - Specifies the set of flags to use to control the allocation
  1101. Size - Specifies the size, in bytes, of the allocation
  1102. Return Value:
  1103. PVOID - returns a pointer to the newly allocated block
  1104. --*/
  1105. {
  1106. PHEAP Heap = (PHEAP)HeapHandle;
  1107. PULONG FreeListsInUse;
  1108. ULONG FreeListsInUseUlong;
  1109. SIZE_T AllocationSize;
  1110. SIZE_T FreeSize, AllocationIndex;
  1111. PLIST_ENTRY FreeListHead, Next;
  1112. PHEAP_ENTRY BusyBlock;
  1113. PHEAP_FREE_ENTRY FreeBlock, SplitBlock, SplitBlock2;
  1114. ULONG InUseIndex;
  1115. UCHAR FreeFlags;
  1116. NTSTATUS Status;
  1117. EXCEPTION_RECORD ExceptionRecord;
  1118. PVOID ReturnValue = NULL;
  1119. BOOLEAN LockAcquired = FALSE;
  1120. SIZE_T BlockSize = 0;
  1121. PVOID FrontEndHeap = NULL;
  1122. HEAP_PERF_DECLARE_TIMER();
  1123. RTL_PAGED_CODE();
  1124. //
  1125. // Take the callers flags and add in the flags that we must forcibly set
  1126. // in the heap
  1127. //
  1128. Flags |= Heap->ForceFlags;
  1129. //
  1130. // Check for special features that force us to call the slow, do-everything
  1131. // version. We do everything slow for any of the following flags.
  1132. //
  1133. // HEAP_SLOW_FLAGS defined as 0x6f030f60
  1134. //
  1135. // HEAP_DEBUG_FLAGS, defined as 0x69020000 (heappriv.h)
  1136. //
  1137. // HEAP_VALIDATE_PARAMETERS_ENABLED 0x40000000 (heap.h)
  1138. //
  1139. // HEAP_VALIDATE_ALL_ENABLED 0x20000000 (heap.h)
  1140. //
  1141. // HEAP_CAPTURE_STACK_BACKTRACES 0x08000000 (heap.h)
  1142. //
  1143. // HEAP_CREATE_ENABLE_TRACING 0x00020000 (ntrtl.h winnt obsolete)
  1144. //
  1145. // HEAP_FLAG_PAGE_ALLOCS 0x01000000 (heappage.h)
  1146. //
  1147. // HEAP_SETTABLE_USER_FLAGS 0x00000E00 (ntrtl.h)
  1148. //
  1149. // HEAP_NEED_EXTRA_FLAGS 0x0f000100 (heap.h)
  1150. //
  1151. // HEAP_CREATE_ALIGN_16 0x00010000 (ntrtl.h winnt obsolete)
  1152. //
  1153. // HEAP_FREE_CHECKING_ENABLED 0x00000040 (ntrtl.h winnt)
  1154. //
  1155. // HEAP_TAIL_CHECKING_ENABLED 0x00000020 (ntrtl.h winnt )
  1156. //
  1157. // We also do everything slow if the size is greater than max long
  1158. //
  1159. if ((Flags & HEAP_SLOW_FLAGS) || (Size >= 0x80000000)) {
  1160. ReturnValue = RtlAllocateHeapSlowly( HeapHandle, Flags, Size );
  1161. if ( (ReturnValue == NULL) &&
  1162. (Flags & HEAP_GENERATE_EXCEPTIONS) ) {
  1163. //
  1164. // Construct an exception record.
  1165. //
  1166. ExceptionRecord.ExceptionCode = STATUS_NO_MEMORY;
  1167. ExceptionRecord.ExceptionRecord = (PEXCEPTION_RECORD)NULL;
  1168. ExceptionRecord.NumberParameters = 1;
  1169. ExceptionRecord.ExceptionFlags = 0;
  1170. ExceptionRecord.ExceptionInformation[ 0 ] = Size;
  1171. RtlRaiseException( &ExceptionRecord );
  1172. }
  1173. return ReturnValue;
  1174. }
  1175. #ifndef NTOS_KERNEL_RUNTIME
  1176. if ((FrontEndHeap = RtlpGetLowFragHeap(Heap))
  1177. &&
  1178. RtlpIsFrontHeapUnlocked(Heap)
  1179. &&
  1180. !(Flags & (HEAP_NO_CACHE_BLOCK | HEAP_NO_SERIALIZE))) {
  1181. ReturnValue = RtlpLowFragHeapAlloc( FrontEndHeap, (Size ? Size : 1) );
  1182. if (ReturnValue != NULL) {
  1183. if (Flags & HEAP_ZERO_MEMORY) {
  1184. RtlZeroMemory( ReturnValue, Size );
  1185. }
  1186. if( IsHeapLogging( HeapHandle ) ) {
  1187. PTHREAD_LOCAL_DATA pThreadLocalData = NULL;
  1188. PPERFINFO_TRACE_HEADER pEventHeader = NULL;
  1189. USHORT ReqSize = sizeof(HEAP_EVENT_ALLOC) + FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data);
  1190. AcquireBufferLocation(&pEventHeader, &pThreadLocalData, &ReqSize);
  1191. if(pEventHeader && pThreadLocalData) {
  1192. PHEAP_EVENT_ALLOC pHeapEvent = (PHEAP_EVENT_ALLOC)((SIZE_T)pEventHeader
  1193. + (SIZE_T)FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data ));
  1194. pEventHeader->Packet.Size = (USHORT) ReqSize;
  1195. pEventHeader->Packet.HookId = PERFINFO_LOG_TYPE_HEAP_ALLOC;
  1196. pHeapEvent->HeapHandle = (PVOID)HeapHandle;
  1197. pHeapEvent->Size = Size;
  1198. pHeapEvent->Address = (PVOID)ReturnValue;
  1199. pHeapEvent->Source = MEMORY_FROM_LOWFRAG;
  1200. ReleaseBufferLocation(pThreadLocalData);
  1201. }
  1202. }
  1203. return ReturnValue;
  1204. }
  1205. }
  1206. #endif // NTOS_KERNEL_RUNTIME
  1207. //
  1208. // At this point we know we are doing everything in this routine
  1209. // and not taking the slow route.
  1210. //
  1211. // Round the requested size up to the allocation granularity. Note
  1212. // that if the request is for 0 bytes, we still allocate memory, because
  1213. // we add in an extra 1 byte to protect ourselves from mistakes.
  1214. //
  1215. // Allocation size will be either 16, 24, 32, ...
  1216. // Allocation index will be 2, 3, 4, ...
  1217. //
  1218. // Note that allocation size 8 is skipped and are indices 0 and 1
  1219. //
  1220. AllocationSize = ((Size ? Size : 1) + HEAP_GRANULARITY - 1 + sizeof( HEAP_ENTRY ))
  1221. & ~(HEAP_GRANULARITY -1);
  1222. #ifndef NTOS_KERNEL_RUNTIME
  1223. //
  1224. // Adjust the size to page boundary to reduce the virtual address fragmentation
  1225. //
  1226. if (FrontEndHeap
  1227. &&
  1228. (AllocationSize > HEAP_LARGEST_LFH_BLOCK)) {
  1229. AllocationSize = ROUND_UP_TO_POWER2(AllocationSize, PAGE_SIZE);
  1230. }
  1231. #endif // NTOS_KERNEL_RUNTIME
  1232. AllocationIndex = AllocationSize >> HEAP_GRANULARITY_SHIFT;
  1233. //
  1234. // If there is a lookaside list and the index is within limits then
  1235. // try and allocate from the lookaside list. We'll actually capture
  1236. // the lookaside pointer from the heap and only use the captured pointer.
  1237. // This will take care of the condition where a walk or lock heap can
  1238. // cause us to check for a non null pointer and then have it become null
  1239. // when we read it again. If it is non null to start with then even if
  1240. // the user walks or locks the heap via another thread the pointer to
  1241. // still valid here so we can still try and do a lookaside list pop.
  1242. //
  1243. #ifndef NTOS_KERNEL_RUNTIME
  1244. {
  1245. PHEAP_LOOKASIDE Lookaside = (PHEAP_LOOKASIDE)RtlpGetLookasideHeap(Heap);
  1246. if ((Lookaside != NULL) &&
  1247. RtlpIsFrontHeapUnlocked(Heap) &&
  1248. (AllocationIndex < HEAP_MAXIMUM_FREELISTS)) {
  1249. //
  1250. // If the number of operation elapsed operations is 128 times the
  1251. // lookaside depth then it is time to adjust the depth
  1252. //
  1253. if ((LONG)(Lookaside[AllocationIndex].TotalAllocates - Lookaside[AllocationIndex].LastTotalAllocates) >=
  1254. (Lookaside[AllocationIndex].Depth * 128)) {
  1255. RtlpAdjustHeapLookasideDepth(&(Lookaside[AllocationIndex]));
  1256. }
  1257. ReturnValue = RtlpAllocateFromHeapLookaside(&(Lookaside[AllocationIndex]));
  1258. if (ReturnValue != NULL) {
  1259. PHEAP_ENTRY xBusyBlock;
  1260. xBusyBlock = ((PHEAP_ENTRY)ReturnValue) - 1;
  1261. xBusyBlock->UnusedBytes = (UCHAR)(AllocationSize - Size);
  1262. RtlpSetSmallTagIndex(Heap, xBusyBlock, 0);
  1263. if (Flags & HEAP_ZERO_MEMORY) {
  1264. RtlZeroMemory( ReturnValue, Size );
  1265. }
  1266. #ifndef NTOS_KERNEL_RUNTIME
  1267. if( IsHeapLogging( HeapHandle ) && (TraceLevel & LOG_LOOKASIDE)) {
  1268. PTHREAD_LOCAL_DATA pThreadLocalData = NULL;
  1269. PPERFINFO_TRACE_HEADER pEventHeader = NULL;
  1270. USHORT ReqSize = sizeof(HEAP_EVENT_ALLOC) + FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data);
  1271. AcquireBufferLocation(&pEventHeader, &pThreadLocalData,&ReqSize);
  1272. if(pEventHeader && pThreadLocalData) {
  1273. PHEAP_EVENT_ALLOC pHeapEvent = (PHEAP_EVENT_ALLOC)( (SIZE_T)pEventHeader
  1274. + (SIZE_T)FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data ));
  1275. pEventHeader->Packet.Size = (USHORT) ReqSize;
  1276. pEventHeader->Packet.HookId = PERFINFO_LOG_TYPE_HEAP_ALLOC;
  1277. pHeapEvent->HeapHandle = (PVOID)HeapHandle;
  1278. pHeapEvent->Size = Size;
  1279. pHeapEvent->Address = (PVOID)ReturnValue;
  1280. pHeapEvent->Source = MEMORY_FROM_LOOKASIDE;
  1281. ReleaseBufferLocation(pThreadLocalData);
  1282. }
  1283. }
  1284. #endif //NTOS_KERNEL_RUNTIME
  1285. return ReturnValue;
  1286. }
  1287. }
  1288. }
  1289. #endif // NTOS_KERNEL_RUNTIME
  1290. try {
  1291. HEAP_PERF_START_TIMER(Heap);
  1292. //
  1293. // Check if we need to serialize our access to the heap
  1294. //
  1295. if (!(Flags & HEAP_NO_SERIALIZE)) {
  1296. //
  1297. // Lock the free list.
  1298. //
  1299. RtlAcquireLockRoutine( Heap->LockVariable );
  1300. LockAcquired = TRUE;
  1301. }
  1302. //
  1303. // If the allocation index is less than the maximum free list size
  1304. // then we can use the index to check the free list otherwise we have
  1305. // to either pull the entry off of the [0] index list or allocate
  1306. // memory directly for this request.
  1307. //
  1308. if (AllocationIndex < HEAP_MAXIMUM_FREELISTS) {
  1309. //
  1310. // With a size that matches a free list size grab the head
  1311. // of the list and check if there is an available entry
  1312. //
  1313. FreeListHead = &Heap->FreeLists[ AllocationIndex ];
  1314. if ( !IsListEmpty( FreeListHead )) {
  1315. //
  1316. // We're in luck the list has an entry so now get the free
  1317. // entry, copy its flags, remove it from the free list
  1318. //
  1319. FreeBlock = CONTAINING_RECORD( FreeListHead->Blink,
  1320. HEAP_FREE_ENTRY,
  1321. FreeList );
  1322. FreeFlags = FreeBlock->Flags;
  1323. RtlpFastRemoveDedicatedFreeBlock( Heap, FreeBlock );
  1324. //
  1325. // Adjust the total number of bytes free in the heap
  1326. //
  1327. Heap->TotalFreeSize -= AllocationIndex;
  1328. //
  1329. // Mark the block as busy and set the number of bytes
  1330. // unused and tag index. Also if it is the last entry
  1331. // then keep that flag.
  1332. //
  1333. BusyBlock = (PHEAP_ENTRY)FreeBlock;
  1334. BusyBlock->Flags = HEAP_ENTRY_BUSY | (FreeFlags & HEAP_ENTRY_LAST_ENTRY);
  1335. RtlpSetUnusedBytes(Heap, BusyBlock, AllocationSize - Size);
  1336. RtlpSetSmallTagIndex(Heap, BusyBlock, 0);
  1337. } else {
  1338. //
  1339. // The free list that matches our request is empty
  1340. //
  1341. // Scan the free list in use vector to find the smallest
  1342. // available free block large enough for our allocations.
  1343. //
  1344. //
  1345. // Compute the index of the ULONG where the scan should begin
  1346. //
  1347. InUseIndex = (ULONG) (AllocationIndex >> 5);
  1348. FreeListsInUse = &Heap->u.FreeListsInUseUlong[InUseIndex];
  1349. //
  1350. // Mask off the bits in the first ULONG that represent allocations
  1351. // smaller than we need.
  1352. //
  1353. FreeListsInUseUlong = *FreeListsInUse++ & ~((1 << ((ULONG) AllocationIndex & 0x1f)) - 1);
  1354. //
  1355. // Begin unrolled loop to scan bit vector.
  1356. //
  1357. switch (InUseIndex) {
  1358. case 0:
  1359. if (FreeListsInUseUlong) {
  1360. FreeListHead = &Heap->FreeLists[0];
  1361. break;
  1362. }
  1363. FreeListsInUseUlong = *FreeListsInUse++;
  1364. //
  1365. // deliberate fallthrough to next ULONG
  1366. //
  1367. case 1:
  1368. if (FreeListsInUseUlong) {
  1369. FreeListHead = &Heap->FreeLists[32];
  1370. break;
  1371. }
  1372. FreeListsInUseUlong = *FreeListsInUse++;
  1373. //
  1374. // deliberate fallthrough to next ULONG
  1375. //
  1376. case 2:
  1377. if (FreeListsInUseUlong) {
  1378. FreeListHead = &Heap->FreeLists[64];
  1379. break;
  1380. }
  1381. FreeListsInUseUlong = *FreeListsInUse++;
  1382. //
  1383. // deliberate fallthrough to next ULONG
  1384. //
  1385. case 3:
  1386. if (FreeListsInUseUlong) {
  1387. FreeListHead = &Heap->FreeLists[96];
  1388. break;
  1389. }
  1390. //
  1391. // deliberate fallthrough to non dedicated list
  1392. //
  1393. default:
  1394. //
  1395. // No suitable entry on the free list was found.
  1396. //
  1397. goto LookInNonDedicatedList;
  1398. }
  1399. //
  1400. // A free list has been found with a large enough allocation.
  1401. // FreeListHead contains the base of the vector it was found in.
  1402. // FreeListsInUseUlong contains the vector.
  1403. //
  1404. FreeListHead += RtlFindFirstSetRightMember( FreeListsInUseUlong );
  1405. //
  1406. // Grab the free block and remove it from the free list
  1407. //
  1408. FreeBlock = CONTAINING_RECORD( FreeListHead->Blink,
  1409. HEAP_FREE_ENTRY,
  1410. FreeList );
  1411. RtlpFastRemoveDedicatedFreeBlock( Heap, FreeBlock );
  1412. SplitFreeBlock:
  1413. //
  1414. // Save the blocks flags and decrement the amount of
  1415. // free space left in the heap
  1416. //
  1417. FreeFlags = FreeBlock->Flags;
  1418. Heap->TotalFreeSize -= FreeBlock->Size;
  1419. //
  1420. // Mark the block busy
  1421. //
  1422. BusyBlock = (PHEAP_ENTRY)FreeBlock;
  1423. BusyBlock->Flags = HEAP_ENTRY_BUSY;
  1424. //
  1425. // Compute the size (i.e., index) of the amount from this block
  1426. // that we don't need and can return to the free list
  1427. //
  1428. FreeSize = BusyBlock->Size - AllocationIndex;
  1429. //
  1430. // Finish setting up the rest of the new busy block
  1431. //
  1432. BusyBlock->Size = (USHORT)AllocationIndex;
  1433. RtlpSetUnusedBytes(Heap, BusyBlock, (AllocationSize - Size));
  1434. RtlpSetSmallTagIndex(Heap, BusyBlock, 0);
  1435. //
  1436. // Now if the size that we are going to free up is not zero
  1437. // then lets get to work and to the split.
  1438. //
  1439. if (FreeSize != 0) {
  1440. //
  1441. // But first we won't ever bother doing a split that only
  1442. // gives us 8 bytes back. So if free size is one then just
  1443. // bump up the size of the new busy block
  1444. //
  1445. if (FreeSize == 1) {
  1446. BusyBlock->Size += 1;
  1447. RtlpSetUnusedBytes(Heap, BusyBlock, AllocationSize + sizeof( HEAP_ENTRY ) - Size );
  1448. } else {
  1449. //
  1450. // Get a pointer to where the new free block will be.
  1451. // When we split a block the first part goes to the new
  1452. // busy block and the second part goes back to the free
  1453. // list
  1454. //
  1455. SplitBlock = (PHEAP_FREE_ENTRY)(BusyBlock + AllocationIndex);
  1456. //
  1457. // Reset the flags that we copied from the original free list
  1458. // header, and set it other size fields.
  1459. //
  1460. SplitBlock->Flags = FreeFlags;
  1461. SplitBlock->PreviousSize = (USHORT)AllocationIndex;
  1462. SplitBlock->SegmentIndex = BusyBlock->SegmentIndex;
  1463. SplitBlock->Size = (USHORT)FreeSize;
  1464. //
  1465. // If nothing else follows this entry then we will insert
  1466. // this into the corresponding free list (and update
  1467. // Segment->LastEntryInSegment)
  1468. //
  1469. if (FreeFlags & HEAP_ENTRY_LAST_ENTRY) {
  1470. RtlpFastInsertFreeBlockDirect( Heap, SplitBlock, (USHORT)FreeSize);
  1471. Heap->TotalFreeSize += FreeSize;
  1472. } else {
  1473. //
  1474. // Otherwise we need to check the following block
  1475. // and if it is busy then update its previous size
  1476. // before inserting our new free block into the
  1477. // free list
  1478. //
  1479. SplitBlock2 = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize);
  1480. if (SplitBlock2->Flags & HEAP_ENTRY_BUSY) {
  1481. SplitBlock2->PreviousSize = (USHORT)FreeSize;
  1482. RtlpFastInsertFreeBlockDirect( Heap, SplitBlock, (USHORT)FreeSize );
  1483. Heap->TotalFreeSize += FreeSize;
  1484. } else {
  1485. //
  1486. // The following block is free so we'll merge
  1487. // these to blocks. by first merging the flags
  1488. //
  1489. SplitBlock->Flags = SplitBlock2->Flags;
  1490. //
  1491. // Removing the second block from its free list
  1492. //
  1493. RtlpFastRemoveFreeBlock( Heap, SplitBlock2 );
  1494. //
  1495. // Updating the free total number of free bytes
  1496. // in the heap and updating the size of the new
  1497. // free block
  1498. //
  1499. Heap->TotalFreeSize -= SplitBlock2->Size;
  1500. FreeSize += SplitBlock2->Size;
  1501. //
  1502. // If the new free block is still less than the
  1503. // maximum heap block size then we'll simply
  1504. // insert it back in the free list
  1505. //
  1506. if (FreeSize <= HEAP_MAXIMUM_BLOCK_SIZE) {
  1507. SplitBlock->Size = (USHORT)FreeSize;
  1508. //
  1509. // Again check if the new following block
  1510. // exists and if so then update is previous
  1511. // size
  1512. //
  1513. if (!(SplitBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) {
  1514. ((PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize))->PreviousSize = (USHORT)FreeSize;
  1515. }
  1516. //
  1517. // Insert the new free block into the free
  1518. // list and update the free heap size
  1519. //
  1520. RtlpFastInsertFreeBlockDirect( Heap, SplitBlock, (USHORT)FreeSize );
  1521. Heap->TotalFreeSize += FreeSize;
  1522. } else {
  1523. //
  1524. // The new free block is pretty large so we
  1525. // need to call a private routine to do the
  1526. // insert
  1527. //
  1528. RtlpInsertFreeBlock( Heap, SplitBlock, FreeSize );
  1529. }
  1530. }
  1531. }
  1532. //
  1533. // Now that free flags made it back into a free block
  1534. // we can zero out what we saved.
  1535. //
  1536. FreeFlags = 0;
  1537. //
  1538. // If splitblock now last, update LastEntryInSegment
  1539. //
  1540. if (SplitBlock->Flags & HEAP_ENTRY_LAST_ENTRY) {
  1541. PHEAP_SEGMENT Segment;
  1542. Segment = Heap->Segments[SplitBlock->SegmentIndex];
  1543. Segment->LastEntryInSegment = (PHEAP_ENTRY)SplitBlock;
  1544. }
  1545. }
  1546. }
  1547. //
  1548. // If there are no following entries then mark the new block as
  1549. // such
  1550. //
  1551. if (FreeFlags & HEAP_ENTRY_LAST_ENTRY) {
  1552. BusyBlock->Flags |= HEAP_ENTRY_LAST_ENTRY;
  1553. }
  1554. }
  1555. //
  1556. // Return the address of the user portion of the allocated block.
  1557. // This is the byte following the header.
  1558. //
  1559. ReturnValue = BusyBlock + 1;
  1560. BlockSize = BusyBlock->Size << HEAP_GRANULARITY_SHIFT;
  1561. //
  1562. // Release the lock before the zero memory call
  1563. //
  1564. if (LockAcquired) {
  1565. RtlReleaseLockRoutine( Heap->LockVariable );
  1566. LockAcquired = FALSE;
  1567. }
  1568. //
  1569. // If the flags indicate that we should zero memory then do it now
  1570. //
  1571. if (Flags & HEAP_ZERO_MEMORY) {
  1572. RtlZeroMemory( ReturnValue, Size );
  1573. }
  1574. //
  1575. // And return the allocated block to our caller
  1576. //
  1577. leave;
  1578. //
  1579. // Otherwise the allocation request is bigger than the last dedicated
  1580. // free list size. Now check if the size is within our threshold.
  1581. // Meaning that it could be in the [0] free list
  1582. //
  1583. } else if (AllocationIndex <= Heap->VirtualMemoryThreshold) {
  1584. LookInNonDedicatedList:
  1585. //
  1586. // The following code cycles through the [0] free list until
  1587. // it finds a block that satisfies the request. The list
  1588. // is sorted so the search is can be terminated early on success
  1589. //
  1590. FreeListHead = &Heap->FreeLists[0];
  1591. if (Heap->LargeBlocksIndex) {
  1592. //
  1593. // We can use the index to find the block very quick
  1594. //
  1595. Next = RtlpFindEntry( Heap, (ULONG)AllocationIndex );
  1596. if ( FreeListHead != Next ) {
  1597. FreeBlock = CONTAINING_RECORD( Next, HEAP_FREE_ENTRY, FreeList );
  1598. if ( FreeBlock->Size >= AllocationIndex ) {
  1599. //
  1600. // We've found something that we can use so now remove
  1601. // it from the free list and go to where we treat splitting
  1602. // a free block. Note that the block we found here might
  1603. // actually be the exact size we need and that is why
  1604. // in the split free block case we have to consider having
  1605. // nothing free after the split
  1606. //
  1607. #ifndef NTOS_KERNEL_RUNTIME
  1608. if ((((PHEAP_INDEX)Heap->LargeBlocksIndex)->LargeBlocksCacheSequence)
  1609. &&
  1610. (AllocationIndex > Heap->DeCommitFreeBlockThreshold)
  1611. &&
  1612. (FreeBlock->Size > (AllocationIndex * HEAP_REUSAGE_FACTOR))) {
  1613. RtlpFlushLargestCacheBlock(Heap);
  1614. } else {
  1615. RtlpFastRemoveNonDedicatedFreeBlock( Heap, FreeBlock );
  1616. goto SplitFreeBlock;
  1617. }
  1618. #else // NTOS_KERNEL_RUNTIME
  1619. RtlpFastRemoveNonDedicatedFreeBlock( Heap, FreeBlock );
  1620. goto SplitFreeBlock;
  1621. #endif // NTOS_KERNEL_RUNTIME
  1622. }
  1623. }
  1624. } else {
  1625. //
  1626. // Check if the largest block in the list is smaller than the request
  1627. //
  1628. Next = FreeListHead->Blink;
  1629. if (FreeListHead != Next) {
  1630. FreeBlock = CONTAINING_RECORD( Next, HEAP_FREE_ENTRY, FreeList );
  1631. if (FreeBlock->Size >= AllocationIndex) {
  1632. //
  1633. // Here we are sure there is at least a block here larger than
  1634. // the requested size. Start searching from the first block
  1635. //
  1636. Next = FreeListHead->Flink;
  1637. while (FreeListHead != Next) {
  1638. FreeBlock = CONTAINING_RECORD( Next, HEAP_FREE_ENTRY, FreeList );
  1639. if (FreeBlock->Size >= AllocationIndex) {
  1640. //
  1641. // We've found something that we can use so now remove
  1642. // it from the free list and go to where we treat splitting
  1643. // a free block. Note that the block we found here might
  1644. // actually be the exact size we need and that is why
  1645. // in the split free block case we have to consider having
  1646. // nothing free after the split
  1647. //
  1648. RtlpFastRemoveNonDedicatedFreeBlock( Heap, FreeBlock );
  1649. goto SplitFreeBlock;
  1650. }
  1651. Next = Next->Flink;
  1652. }
  1653. }
  1654. }
  1655. }
  1656. //
  1657. // The [0] list is either empty or everything is too small
  1658. // so now extend the heap which should get us something less
  1659. // than or equal to the virtual memory threshold
  1660. //
  1661. FreeBlock = RtlpExtendHeap( Heap, AllocationSize );
  1662. //
  1663. // And provided we got something we'll treat it just like the previous
  1664. // split free block cases
  1665. //
  1666. if (FreeBlock != NULL) {
  1667. RtlpFastRemoveNonDedicatedFreeBlock( Heap, FreeBlock );
  1668. goto SplitFreeBlock;
  1669. }
  1670. //
  1671. // We weren't able to extend the heap so we must be out of memory
  1672. //
  1673. Status = STATUS_NO_MEMORY;
  1674. //
  1675. // At this point the allocation is way too big for any of the free lists
  1676. // and we can only satisfy this request if the heap is growable
  1677. //
  1678. } else if (Heap->Flags & HEAP_GROWABLE) {
  1679. PHEAP_VIRTUAL_ALLOC_ENTRY VirtualAllocBlock;
  1680. VirtualAllocBlock = NULL;
  1681. //
  1682. // Compute how much memory we will need for this allocation which
  1683. // will include the allocation size plus a header, and then go
  1684. // get the committed memory
  1685. //
  1686. AllocationSize += FIELD_OFFSET( HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock );
  1687. Status = ZwAllocateVirtualMemory( NtCurrentProcess(),
  1688. (PVOID *)&VirtualAllocBlock,
  1689. 0,
  1690. &AllocationSize,
  1691. MEM_COMMIT,
  1692. HEAP_PROTECTION );
  1693. if (NT_SUCCESS(Status)) {
  1694. //
  1695. // Just committed, already zero. Fill in the new block
  1696. // and insert it in the list of big allocation
  1697. //
  1698. VirtualAllocBlock->BusyBlock.Size = (USHORT)(AllocationSize - Size);
  1699. VirtualAllocBlock->BusyBlock.Flags = HEAP_ENTRY_VIRTUAL_ALLOC | HEAP_ENTRY_EXTRA_PRESENT | HEAP_ENTRY_BUSY;
  1700. VirtualAllocBlock->CommitSize = AllocationSize;
  1701. VirtualAllocBlock->ReserveSize = AllocationSize;
  1702. InsertTailList( &Heap->VirtualAllocdBlocks, (PLIST_ENTRY)VirtualAllocBlock );
  1703. //
  1704. // Return the address of the user portion of the allocated block.
  1705. // This is the byte following the header.
  1706. //
  1707. ReturnValue = (PHEAP_ENTRY)(VirtualAllocBlock + 1);
  1708. BlockSize = AllocationSize;
  1709. leave;
  1710. }
  1711. } else {
  1712. Status = STATUS_BUFFER_TOO_SMALL;
  1713. }
  1714. //
  1715. // This is the error return.
  1716. //
  1717. if (Flags & HEAP_GENERATE_EXCEPTIONS) {
  1718. //
  1719. // Construct an exception record.
  1720. //
  1721. ExceptionRecord.ExceptionCode = STATUS_NO_MEMORY;
  1722. ExceptionRecord.ExceptionRecord = (PEXCEPTION_RECORD)NULL;
  1723. ExceptionRecord.NumberParameters = 1;
  1724. ExceptionRecord.ExceptionFlags = 0;
  1725. ExceptionRecord.ExceptionInformation[ 0 ] = AllocationSize;
  1726. RtlRaiseException( &ExceptionRecord );
  1727. }
  1728. SET_LAST_STATUS(Status);
  1729. ReturnValue = NULL;
  1730. } finally {
  1731. if (LockAcquired) {
  1732. RtlReleaseLockRoutine( Heap->LockVariable );
  1733. }
  1734. }
  1735. RtlpRegisterOperation(Heap, BlockSize, HEAP_OP_ALLOC);
  1736. HEAP_PERF_STOP_TIMER(Heap, HEAP_OP_ALLOC);
  1737. #ifndef NTOS_KERNEL_RUNTIME
  1738. if( IsHeapLogging( HeapHandle ) ) {
  1739. PTHREAD_LOCAL_DATA pThreadLocalData = NULL;
  1740. PPERFINFO_TRACE_HEADER pEventHeader = NULL;
  1741. USHORT ReqSize = sizeof(HEAP_EVENT_ALLOC) + FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data);
  1742. AcquireBufferLocation(&pEventHeader, &pThreadLocalData,&ReqSize);
  1743. if(pEventHeader && pThreadLocalData) {
  1744. PHEAP_EVENT_ALLOC pHeapEvent = (PHEAP_EVENT_ALLOC)( (SIZE_T)pEventHeader
  1745. + (SIZE_T)FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data ));
  1746. pEventHeader->Packet.Size = (USHORT) ReqSize;
  1747. pEventHeader->Packet.HookId = PERFINFO_LOG_TYPE_HEAP_ALLOC;
  1748. pHeapEvent->HeapHandle = (PVOID)HeapHandle;
  1749. pHeapEvent->Size = Size;
  1750. pHeapEvent->Address = (PVOID)ReturnValue;
  1751. pHeapEvent->Source = MEMORY_FROM_MAINPATH;
  1752. ReleaseBufferLocation(pThreadLocalData);
  1753. }
  1754. }
  1755. #endif //NTOS_KERNEL_RUNTIME
  1756. return ReturnValue;
  1757. }
  1758. PVOID
  1759. RtlAllocateHeapSlowly (
  1760. IN PVOID HeapHandle,
  1761. IN ULONG Flags,
  1762. IN SIZE_T Size
  1763. )
  1764. /*++
  1765. Routine Description:
  1766. This routine does the equivalent of Rtl Allocate Heap but it does it will
  1767. additional heap consistency checking logic and tagging.
  1768. Arguments:
  1769. HeapHandle - Supplies a pointer to an initialized heap structure
  1770. Flags - Specifies the set of flags to use to control the allocation
  1771. Size - Specifies the size, in bytes, of the allocation
  1772. Return Value:
  1773. PVOID - returns a pointer to the newly allocated block
  1774. --*/
  1775. {
  1776. PHEAP Heap = (PHEAP)HeapHandle;
  1777. BOOLEAN LockAcquired = FALSE;
  1778. PVOID ReturnValue = NULL;
  1779. PULONG FreeListsInUse;
  1780. ULONG FreeListsInUseUlong;
  1781. SIZE_T AllocationSize;
  1782. SIZE_T FreeSize, AllocationIndex;
  1783. UCHAR EntryFlags, FreeFlags;
  1784. PLIST_ENTRY FreeListHead, Next;
  1785. PHEAP_ENTRY BusyBlock;
  1786. PHEAP_FREE_ENTRY FreeBlock, SplitBlock, SplitBlock2;
  1787. PHEAP_ENTRY_EXTRA ExtraStuff;
  1788. NTSTATUS Status;
  1789. EXCEPTION_RECORD ExceptionRecord;
  1790. SIZE_T ZeroSize = 0;
  1791. SIZE_T BlockSize = 0;
  1792. HEAP_PERF_DECLARE_TIMER();
  1793. RTL_PAGED_CODE();
  1794. //
  1795. // Note that Flags has already been OR'd with Heap->ForceFlags.
  1796. //
  1797. #ifndef NTOS_KERNEL_RUNTIME
  1798. //
  1799. // In the non kernel case check if we should be using the debug version
  1800. // of heap allocation
  1801. //
  1802. if (DEBUG_HEAP( Flags )) {
  1803. return RtlDebugAllocateHeap( HeapHandle, Flags, Size );
  1804. }
  1805. #endif // NTOS_KERNEL_RUNTIME
  1806. //
  1807. // If the size is greater than maxlong then say we can't allocate that
  1808. // much and return the error to our caller
  1809. //
  1810. if (Size > MAXINT_PTR) {
  1811. SET_LAST_STATUS( STATUS_NO_MEMORY );
  1812. return NULL;
  1813. }
  1814. //
  1815. // Round up the requested size to the allocation granularity. Note
  1816. // that if the request is for zero bytes we will still allocate memory,
  1817. //
  1818. // Allocation size will be either 16, 24, 32, ...
  1819. // Allocation index will be 2, 3, 4, ...
  1820. //
  1821. AllocationSize = ((Size ? Size : 1) + Heap->AlignRound) & Heap->AlignMask;
  1822. //
  1823. // Generate the flags needed for this heap entry. Mark it busy and add
  1824. // any user settable bits. Also if the input flag indicates any entry
  1825. // extra fields and we have a tag to use then make room for the extra
  1826. // fields in the heap entry
  1827. //
  1828. EntryFlags = (UCHAR)(HEAP_ENTRY_BUSY | ((Flags & HEAP_SETTABLE_USER_FLAGS) >> 4));
  1829. if ((Flags & HEAP_NEED_EXTRA_FLAGS) || (Heap->PseudoTagEntries != NULL)) {
  1830. EntryFlags |= HEAP_ENTRY_EXTRA_PRESENT;
  1831. AllocationSize += sizeof( HEAP_ENTRY_EXTRA );
  1832. }
  1833. AllocationIndex = AllocationSize >> HEAP_GRANULARITY_SHIFT;
  1834. try {
  1835. HEAP_PERF_START_TIMER(Heap);
  1836. //
  1837. // Lock the free list.
  1838. //
  1839. if (!(Flags & HEAP_NO_SERIALIZE)) {
  1840. RtlAcquireLockRoutine( Heap->LockVariable );
  1841. LockAcquired = TRUE;
  1842. }
  1843. //
  1844. // Do all the actual heap work under the protection of a try-except clause
  1845. // to protect us from corruption
  1846. //
  1847. try {
  1848. //
  1849. // If the allocation index is less than the maximum free list size
  1850. // then we can use the index to check the free list otherwise we have
  1851. // to either pull the entry off of the [0] index list or allocate
  1852. // memory directly for this request.
  1853. //
  1854. if (AllocationIndex < HEAP_MAXIMUM_FREELISTS) {
  1855. //
  1856. // With a size that matches a free list size grab the head
  1857. // of the list and check if there is an available entry
  1858. //
  1859. FreeListHead = &Heap->FreeLists[ AllocationIndex ];
  1860. if ( !IsListEmpty( FreeListHead )) {
  1861. //
  1862. // We're in luck the list has an entry so now get the free
  1863. // entry, copy its flags, remove it from the free list
  1864. //
  1865. FreeBlock = CONTAINING_RECORD( FreeListHead->Flink,
  1866. HEAP_FREE_ENTRY,
  1867. FreeList );
  1868. FreeFlags = FreeBlock->Flags;
  1869. RtlpRemoveFreeBlock( Heap, FreeBlock );
  1870. //
  1871. // Adjust the total number of bytes free in the heap
  1872. //
  1873. Heap->TotalFreeSize -= AllocationIndex;
  1874. //
  1875. // Mark the block as busy and set the number of bytes
  1876. // unused and tag index. Also if it is the last entry
  1877. // then keep that flag.
  1878. //
  1879. BusyBlock = (PHEAP_ENTRY)FreeBlock;
  1880. BusyBlock->Flags = EntryFlags | (FreeFlags & HEAP_ENTRY_LAST_ENTRY);
  1881. RtlpSetUnusedBytes(Heap, BusyBlock, (AllocationSize - Size));
  1882. } else {
  1883. //
  1884. // The free list that matches our request is empty. We know
  1885. // that there are 128 free lists managed by a 4 ULONG bitmap.
  1886. // The next big if-else-if statement will decide which ULONG
  1887. // we tackle
  1888. //
  1889. // Check if the requested allocation index within the first
  1890. // quarter of the free lists.
  1891. //
  1892. if (AllocationIndex < (HEAP_MAXIMUM_FREELISTS * 1) / 4) {
  1893. //
  1894. // Grab a pointer to the corresponding bitmap ULONG, and
  1895. // then get the bit we're actually interested in to be the
  1896. // first bit of the ULONG.
  1897. //
  1898. FreeListsInUse = &Heap->u.FreeListsInUseUlong[ 0 ];
  1899. FreeListsInUseUlong = *FreeListsInUse++ >> ((ULONG) AllocationIndex & 0x1F);
  1900. //
  1901. // If the remaining bitmap has any bits set then we know
  1902. // there is a non empty list that is larger than our
  1903. // requested index so find that bit and compute the list
  1904. // head of the next non empty list
  1905. //
  1906. if (FreeListsInUseUlong) {
  1907. FreeListHead += RtlFindFirstSetRightMember( FreeListsInUseUlong );
  1908. } else {
  1909. //
  1910. // The rest of the first ULONG is all zeros so we need
  1911. // to move to the second ULONG
  1912. //
  1913. FreeListsInUseUlong = *FreeListsInUse++;
  1914. //
  1915. // Check if the second ULONG has any bits set and if
  1916. // so then compute the list head of the next non empty
  1917. // list
  1918. //
  1919. if (FreeListsInUseUlong) {
  1920. FreeListHead += ((HEAP_MAXIMUM_FREELISTS * 1) / 4) -
  1921. (AllocationIndex & 0x1F) +
  1922. RtlFindFirstSetRightMember( FreeListsInUseUlong );
  1923. } else {
  1924. //
  1925. // Do the same test for the third ULONG
  1926. //
  1927. FreeListsInUseUlong = *FreeListsInUse++;
  1928. if (FreeListsInUseUlong) {
  1929. FreeListHead += ((HEAP_MAXIMUM_FREELISTS * 2) / 4) -
  1930. (AllocationIndex & 0x1F) +
  1931. RtlFindFirstSetRightMember( FreeListsInUseUlong );
  1932. } else {
  1933. //
  1934. // Repeat the test for the forth ULONG, and if
  1935. // that one is also empty then we need to grab
  1936. // the allocation off of the [0] index list
  1937. //
  1938. FreeListsInUseUlong = *FreeListsInUse++;
  1939. if (FreeListsInUseUlong) {
  1940. FreeListHead += ((HEAP_MAXIMUM_FREELISTS * 3) / 4) -
  1941. (AllocationIndex & 0x1F) +
  1942. RtlFindFirstSetRightMember( FreeListsInUseUlong );
  1943. } else {
  1944. goto LookInNonDedicatedList;
  1945. }
  1946. }
  1947. }
  1948. }
  1949. //
  1950. // Otherwise check if the requested allocation index lies
  1951. // within the second quarter of the free lists. We repeat the
  1952. // test just like we did above on the second, third, and forth
  1953. // bitmap ulongs.
  1954. //
  1955. } else if (AllocationIndex < (HEAP_MAXIMUM_FREELISTS * 2) / 4) {
  1956. FreeListsInUse = &Heap->u.FreeListsInUseUlong[ 1 ];
  1957. FreeListsInUseUlong = *FreeListsInUse++ >> ((ULONG) AllocationIndex & 0x1F);
  1958. if (FreeListsInUseUlong) {
  1959. FreeListHead += RtlFindFirstSetRightMember( FreeListsInUseUlong );
  1960. } else {
  1961. FreeListsInUseUlong = *FreeListsInUse++;
  1962. if (FreeListsInUseUlong) {
  1963. FreeListHead += ((HEAP_MAXIMUM_FREELISTS * 1) / 4) -
  1964. (AllocationIndex & 0x1F) +
  1965. RtlFindFirstSetRightMember( FreeListsInUseUlong );
  1966. } else {
  1967. FreeListsInUseUlong = *FreeListsInUse++;
  1968. if (FreeListsInUseUlong) {
  1969. FreeListHead += ((HEAP_MAXIMUM_FREELISTS * 2) / 4) -
  1970. (AllocationIndex & 0x1F) +
  1971. RtlFindFirstSetRightMember( FreeListsInUseUlong );
  1972. } else {
  1973. goto LookInNonDedicatedList;
  1974. }
  1975. }
  1976. }
  1977. //
  1978. // Otherwise check if the requested allocation index lies
  1979. // within the third quarter of the free lists. We repeat the
  1980. // test just like we did above on the third and forth bitmap
  1981. // ulongs
  1982. //
  1983. } else if (AllocationIndex < (HEAP_MAXIMUM_FREELISTS * 3) / 4) {
  1984. FreeListsInUse = &Heap->u.FreeListsInUseUlong[ 2 ];
  1985. FreeListsInUseUlong = *FreeListsInUse++ >> ((ULONG) AllocationIndex & 0x1F);
  1986. if (FreeListsInUseUlong) {
  1987. FreeListHead += RtlFindFirstSetRightMember( FreeListsInUseUlong );
  1988. } else {
  1989. FreeListsInUseUlong = *FreeListsInUse++;
  1990. if (FreeListsInUseUlong) {
  1991. FreeListHead += ((HEAP_MAXIMUM_FREELISTS * 1) / 4) -
  1992. (AllocationIndex & 0x1F) +
  1993. RtlFindFirstSetRightMember( FreeListsInUseUlong );
  1994. } else {
  1995. goto LookInNonDedicatedList;
  1996. }
  1997. }
  1998. //
  1999. // Lastly the requested allocation index must lie within the
  2000. // last quarter of the free lists. We repeat the test just
  2001. // like we did above on the forth ulong
  2002. //
  2003. } else {
  2004. FreeListsInUse = &Heap->u.FreeListsInUseUlong[ 3 ];
  2005. FreeListsInUseUlong = *FreeListsInUse++ >> ((ULONG) AllocationIndex & 0x1F);
  2006. if (FreeListsInUseUlong) {
  2007. FreeListHead += RtlFindFirstSetRightMember( FreeListsInUseUlong );
  2008. } else {
  2009. goto LookInNonDedicatedList;
  2010. }
  2011. }
  2012. //
  2013. // At this point the free list head points to a non empty free
  2014. // list that is of greater size than we need.
  2015. //
  2016. FreeBlock = CONTAINING_RECORD( FreeListHead->Flink,
  2017. HEAP_FREE_ENTRY,
  2018. FreeList );
  2019. SplitFreeBlock:
  2020. //
  2021. // Remember the flags that go with this block and remove it
  2022. // from its list
  2023. //
  2024. FreeFlags = FreeBlock->Flags;
  2025. RtlpRemoveFreeBlock( Heap, FreeBlock );
  2026. //
  2027. // Adjust the amount free in the heap
  2028. //
  2029. Heap->TotalFreeSize -= FreeBlock->Size;
  2030. //
  2031. // Mark the block busy
  2032. //
  2033. BusyBlock = (PHEAP_ENTRY)FreeBlock;
  2034. BusyBlock->Flags = EntryFlags;
  2035. //
  2036. // Compute the size (i.e., index) of the amount from this
  2037. // block that we don't need and can return to the free list
  2038. //
  2039. FreeSize = BusyBlock->Size - AllocationIndex;
  2040. //
  2041. // Finish setting up the rest of the new busy block
  2042. //
  2043. BusyBlock->Size = (USHORT)AllocationIndex;
  2044. RtlpSetUnusedBytes(Heap, BusyBlock, ((AllocationSize - Size)));
  2045. //
  2046. // Now if the size that we are going to free up is not zero
  2047. // then lets get to work and to the split.
  2048. //
  2049. if (FreeSize != 0) {
  2050. //
  2051. // But first we won't ever bother doing a split that only
  2052. // gives us 8 bytes back. So if free size is one then
  2053. // just bump up the size of the new busy block
  2054. //
  2055. if (FreeSize == 1) {
  2056. BusyBlock->Size += 1;
  2057. RtlpSetUnusedBytes(Heap, BusyBlock, AllocationSize + sizeof( HEAP_ENTRY ) - Size);
  2058. } else {
  2059. //
  2060. // Get a pointer to where the new free block will be.
  2061. // When we split a block the first part goes to the
  2062. // new busy block and the second part goes back to the
  2063. // free list
  2064. //
  2065. SplitBlock = (PHEAP_FREE_ENTRY)(BusyBlock + AllocationIndex);
  2066. //
  2067. // Reset the flags that we copied from the original
  2068. // free list header, and set it other size fields.
  2069. //
  2070. SplitBlock->Flags = FreeFlags;
  2071. SplitBlock->PreviousSize = (USHORT)AllocationIndex;
  2072. SplitBlock->SegmentIndex = BusyBlock->SegmentIndex;
  2073. SplitBlock->Size = (USHORT)FreeSize;
  2074. //
  2075. // If nothing else follows this entry then we will
  2076. // insert this into the corresponding free list
  2077. //
  2078. if (FreeFlags & HEAP_ENTRY_LAST_ENTRY) {
  2079. RtlpInsertFreeBlockDirect( Heap, SplitBlock, (USHORT)FreeSize );
  2080. Heap->TotalFreeSize += FreeSize;
  2081. } else {
  2082. //
  2083. // Otherwise we need to check the following block
  2084. // and if it is busy then update its previous size
  2085. // before inserting our new free block into the
  2086. // free list
  2087. //
  2088. SplitBlock2 = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize);
  2089. if (SplitBlock2->Flags & HEAP_ENTRY_BUSY) {
  2090. SplitBlock2->PreviousSize = (USHORT)FreeSize;
  2091. RtlpInsertFreeBlockDirect( Heap, SplitBlock, (USHORT)FreeSize );
  2092. Heap->TotalFreeSize += FreeSize;
  2093. } else {
  2094. //
  2095. // The following block is free so we'll merge
  2096. // these to blocks. by first merging the flags
  2097. //
  2098. SplitBlock->Flags = SplitBlock2->Flags;
  2099. //
  2100. // Removing the second block from its free
  2101. // list
  2102. //
  2103. RtlpRemoveFreeBlock( Heap, SplitBlock2 );
  2104. //
  2105. // Updating the free total number of free
  2106. // bytes in the heap and updating the size of
  2107. // the new free block
  2108. //
  2109. Heap->TotalFreeSize -= SplitBlock2->Size;
  2110. FreeSize += SplitBlock2->Size;
  2111. //
  2112. // If the new free block is still less than
  2113. // the maximum heap block size then we'll
  2114. // simply insert it back in the free list
  2115. //
  2116. if (FreeSize <= HEAP_MAXIMUM_BLOCK_SIZE) {
  2117. SplitBlock->Size = (USHORT)FreeSize;
  2118. //
  2119. // Again check if the new following block
  2120. // exists and if so then update is
  2121. // previous size
  2122. //
  2123. if (!(SplitBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) {
  2124. ((PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize))->PreviousSize = (USHORT)FreeSize;
  2125. }
  2126. //
  2127. // Insert the new free block into the free
  2128. // list and update the free heap size
  2129. //
  2130. RtlpInsertFreeBlockDirect( Heap, SplitBlock, (USHORT)FreeSize );
  2131. Heap->TotalFreeSize += FreeSize;
  2132. } else {
  2133. //
  2134. // The new free block is pretty large so
  2135. // we need to call a private routine to do
  2136. // the insert
  2137. //
  2138. RtlpInsertFreeBlock( Heap, SplitBlock, FreeSize );
  2139. }
  2140. }
  2141. }
  2142. //
  2143. // Now that free flags made it back into a free block
  2144. // we can zero out what we saved.
  2145. //
  2146. FreeFlags = 0;
  2147. //
  2148. // If splitblock now last, update LastEntryInSegment
  2149. //
  2150. if (SplitBlock->Flags & HEAP_ENTRY_LAST_ENTRY) {
  2151. PHEAP_SEGMENT Segment;
  2152. Segment = Heap->Segments[SplitBlock->SegmentIndex];
  2153. Segment->LastEntryInSegment = (PHEAP_ENTRY)SplitBlock;
  2154. }
  2155. }
  2156. }
  2157. //
  2158. // If there are no following entries then mark the new block
  2159. // as such
  2160. //
  2161. if (FreeFlags & HEAP_ENTRY_LAST_ENTRY) {
  2162. BusyBlock->Flags |= HEAP_ENTRY_LAST_ENTRY;
  2163. }
  2164. }
  2165. //
  2166. // Return the address of the user portion of the allocated block.
  2167. // This is the byte following the header.
  2168. //
  2169. ReturnValue = BusyBlock + 1;
  2170. BlockSize = BusyBlock->Size << HEAP_GRANULARITY_SHIFT;
  2171. //
  2172. // If the flags indicate that we should zero memory then
  2173. // remember how much to zero. We'll do the zeroing later
  2174. //
  2175. if (Flags & HEAP_ZERO_MEMORY) {
  2176. ZeroSize = Size;
  2177. //
  2178. // Otherwise if the flags indicate that we should fill heap then
  2179. // it it now.
  2180. //
  2181. } else if (Heap->Flags & HEAP_FREE_CHECKING_ENABLED) {
  2182. RtlFillMemoryUlong( (PCHAR)(BusyBlock + 1), Size & ~0x3, ALLOC_HEAP_FILL );
  2183. }
  2184. //
  2185. // If the flags indicate that we should do tail checking then copy
  2186. // the fill pattern right after the heap block.
  2187. //
  2188. if (Heap->Flags & HEAP_TAIL_CHECKING_ENABLED) {
  2189. RtlFillMemory( (PCHAR)ReturnValue + Size,
  2190. CHECK_HEAP_TAIL_SIZE,
  2191. CHECK_HEAP_TAIL_FILL );
  2192. BusyBlock->Flags |= HEAP_ENTRY_FILL_PATTERN;
  2193. }
  2194. RtlpSetSmallTagIndex(Heap, BusyBlock, 0);
  2195. //
  2196. // If the flags indicate that there is an extra block persent then
  2197. // we'll fill it in
  2198. //
  2199. if (BusyBlock->Flags & HEAP_ENTRY_EXTRA_PRESENT) {
  2200. ExtraStuff = RtlpGetExtraStuffPointer( BusyBlock );
  2201. RtlZeroMemory( ExtraStuff, sizeof( *ExtraStuff ));
  2202. #ifndef NTOS_KERNEL_RUNTIME
  2203. //
  2204. // In the non kernel case the tagging goes in either the extra
  2205. // stuff of the busy block small tag index
  2206. //
  2207. if (IS_HEAP_TAGGING_ENABLED()) {
  2208. ExtraStuff->TagIndex = RtlpUpdateTagEntry( Heap,
  2209. (USHORT)((Flags & HEAP_TAG_MASK) >> HEAP_TAG_SHIFT),
  2210. 0,
  2211. BusyBlock->Size,
  2212. AllocationAction );
  2213. }
  2214. } else if (IS_HEAP_TAGGING_ENABLED()) {
  2215. RtlpSetSmallTagIndex( Heap,
  2216. BusyBlock,
  2217. (UCHAR)RtlpUpdateTagEntry( Heap,
  2218. (USHORT)((Flags & HEAP_SMALL_TAG_MASK) >> HEAP_TAG_SHIFT),
  2219. 0,
  2220. BusyBlock->Size,
  2221. AllocationAction ));
  2222. #endif // NTOS_KERNEL_RUNTIME
  2223. }
  2224. //
  2225. // Return the address of the user portion of the allocated block.
  2226. // This is the byte following the header.
  2227. //
  2228. leave;
  2229. //
  2230. // Otherwise the allocation request is bigger than the last dedicated
  2231. // free list size. Now check if the size is within our threshold.
  2232. // Meaning that it could be in the [0] free list
  2233. //
  2234. } else if (AllocationIndex <= Heap->VirtualMemoryThreshold) {
  2235. LookInNonDedicatedList:
  2236. //
  2237. // The following code cycles through the [0] free list until
  2238. // it finds a block that satisfies the request. The list
  2239. // is sorted so the search is can be terminated early on success
  2240. //
  2241. FreeListHead = &Heap->FreeLists[ 0 ];
  2242. if (Heap->LargeBlocksIndex) {
  2243. Next = RtlpFindEntry(Heap, (ULONG)AllocationIndex);
  2244. if (FreeListHead != Next) {
  2245. FreeBlock = CONTAINING_RECORD( Next, HEAP_FREE_ENTRY, FreeList );
  2246. if (FreeBlock->Size >= AllocationIndex) {
  2247. //
  2248. // We've found something that we can use so now remove
  2249. // it from the free list and go to where we treat splitting
  2250. // a free block. Note that the block we found here might
  2251. // actually be the exact size we need and that is why
  2252. // in the split free block case we have to consider having
  2253. // nothing free after the split
  2254. //
  2255. #ifndef NTOS_KERNEL_RUNTIME
  2256. if ((((PHEAP_INDEX)Heap->LargeBlocksIndex)->LargeBlocksCacheSequence)
  2257. &&
  2258. (AllocationIndex > Heap->DeCommitFreeBlockThreshold)
  2259. &&
  2260. (FreeBlock->Size > (AllocationIndex * HEAP_REUSAGE_FACTOR))) {
  2261. RtlpFlushLargestCacheBlock(Heap);
  2262. } else {
  2263. goto SplitFreeBlock;
  2264. }
  2265. #else // NTOS_KERNEL_RUNTIME
  2266. goto SplitFreeBlock;
  2267. #endif // NTOS_KERNEL_RUNTIME
  2268. }
  2269. }
  2270. } else {
  2271. Next = FreeListHead->Flink;
  2272. while (FreeListHead != Next) {
  2273. FreeBlock = CONTAINING_RECORD( Next, HEAP_FREE_ENTRY, FreeList );
  2274. if (FreeBlock->Size >= AllocationIndex) {
  2275. //
  2276. // We've found something that we can use so now go to
  2277. // where we treat splitting a free block. Note that
  2278. // the block we found here might actually be the exact
  2279. // size we need and that is why in the split free block
  2280. // case we have to consider having nothing free after the
  2281. // split
  2282. //
  2283. goto SplitFreeBlock;
  2284. } else {
  2285. Next = Next->Flink;
  2286. }
  2287. }
  2288. }
  2289. //
  2290. // The [0] list is either empty or everything is too small
  2291. // so now extend the heap which should get us something less
  2292. // than or equal to the virtual memory threshold
  2293. //
  2294. FreeBlock = RtlpExtendHeap( Heap, AllocationSize );
  2295. //
  2296. // And provided we got something we'll treat it just like the
  2297. // previous split free block cases
  2298. //
  2299. if (FreeBlock != NULL) {
  2300. goto SplitFreeBlock;
  2301. }
  2302. //
  2303. // We weren't able to extend the heap so we must be out of memory
  2304. //
  2305. Status = STATUS_NO_MEMORY;
  2306. //
  2307. // At this point the allocation is way too big for any of the free
  2308. // lists and we can only satisfy this request if the heap is growable
  2309. //
  2310. } else if (Heap->Flags & HEAP_GROWABLE) {
  2311. PHEAP_VIRTUAL_ALLOC_ENTRY VirtualAllocBlock;
  2312. VirtualAllocBlock = NULL;
  2313. //
  2314. // Compute how much memory we will need for this allocation which
  2315. // will include the allocation size plus a header, and then go
  2316. // get the committed memory
  2317. //
  2318. AllocationSize += FIELD_OFFSET( HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock );
  2319. Status = ZwAllocateVirtualMemory( NtCurrentProcess(),
  2320. (PVOID *)&VirtualAllocBlock,
  2321. 0,
  2322. &AllocationSize,
  2323. MEM_COMMIT,
  2324. HEAP_PROTECTION );
  2325. if (NT_SUCCESS( Status )) {
  2326. //
  2327. // Just committed, already zero. Fill in the new block
  2328. // and insert it in the list of big allocation
  2329. //
  2330. VirtualAllocBlock->BusyBlock.Size = (USHORT)(AllocationSize - Size);
  2331. VirtualAllocBlock->BusyBlock.Flags = EntryFlags | HEAP_ENTRY_VIRTUAL_ALLOC | HEAP_ENTRY_EXTRA_PRESENT;
  2332. VirtualAllocBlock->CommitSize = AllocationSize;
  2333. VirtualAllocBlock->ReserveSize = AllocationSize;
  2334. #ifndef NTOS_KERNEL_RUNTIME
  2335. //
  2336. // In the non kernel case see if we need to add heap tagging
  2337. //
  2338. if (IS_HEAP_TAGGING_ENABLED()) {
  2339. VirtualAllocBlock->ExtraStuff.TagIndex =
  2340. RtlpUpdateTagEntry( Heap,
  2341. (USHORT)((Flags & HEAP_SMALL_TAG_MASK) >> HEAP_TAG_SHIFT),
  2342. 0,
  2343. VirtualAllocBlock->CommitSize >> HEAP_GRANULARITY_SHIFT,
  2344. VirtualAllocationAction );
  2345. }
  2346. #endif // NTOS_KERNEL_RUNTIME
  2347. InsertTailList( &Heap->VirtualAllocdBlocks, (PLIST_ENTRY)VirtualAllocBlock );
  2348. //
  2349. // Return the address of the user portion of the allocated
  2350. // block. This is the byte following the header.
  2351. //
  2352. ReturnValue = (PHEAP_ENTRY)(VirtualAllocBlock + 1);
  2353. BlockSize = AllocationSize;
  2354. leave;
  2355. }
  2356. //
  2357. // Otherwise we have an error condition
  2358. //
  2359. } else {
  2360. Status = STATUS_BUFFER_TOO_SMALL;
  2361. }
  2362. SET_LAST_STATUS( Status );
  2363. if (Flags & HEAP_GENERATE_EXCEPTIONS) {
  2364. //
  2365. // Construct an exception record.
  2366. //
  2367. ExceptionRecord.ExceptionCode = STATUS_NO_MEMORY;
  2368. ExceptionRecord.ExceptionRecord = (PEXCEPTION_RECORD)NULL;
  2369. ExceptionRecord.NumberParameters = 1;
  2370. ExceptionRecord.ExceptionFlags = 0;
  2371. ExceptionRecord.ExceptionInformation[ 0 ] = AllocationSize;
  2372. RtlRaiseException( &ExceptionRecord );
  2373. }
  2374. } except( GetExceptionCode() == STATUS_NO_MEMORY ? EXCEPTION_CONTINUE_SEARCH :
  2375. EXCEPTION_EXECUTE_HANDLER ) {
  2376. SET_LAST_STATUS( GetExceptionCode() );
  2377. }
  2378. //
  2379. // Check if there is anything to zero out
  2380. //
  2381. if ( ZeroSize ) {
  2382. RtlZeroMemory( ReturnValue, ZeroSize );
  2383. }
  2384. } finally {
  2385. if (LockAcquired) {
  2386. RtlReleaseLockRoutine( Heap->LockVariable );
  2387. }
  2388. }
  2389. //
  2390. // And return to our caller
  2391. //
  2392. RtlpRegisterOperation(Heap, BlockSize, HEAP_OP_ALLOC);
  2393. HEAP_PERF_STOP_TIMER(Heap, HEAP_OP_ALLOC);
  2394. if(ReturnValue) {
  2395. #ifndef NTOS_KERNEL_RUNTIME
  2396. if( IsHeapLogging( HeapHandle ) ) {
  2397. PTHREAD_LOCAL_DATA pThreadLocalData = NULL;
  2398. PPERFINFO_TRACE_HEADER pEventHeader = NULL;
  2399. USHORT ReqSize = sizeof(HEAP_EVENT_ALLOC) + FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data);
  2400. AcquireBufferLocation(&pEventHeader, &pThreadLocalData, &ReqSize);
  2401. if(pEventHeader && pThreadLocalData) {
  2402. PHEAP_EVENT_ALLOC pHeapEvent = (PHEAP_EVENT_ALLOC)( (SIZE_T)pEventHeader
  2403. + (SIZE_T)FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data ));
  2404. pEventHeader->Packet.Size = (USHORT) ReqSize;
  2405. pEventHeader->Packet.HookId = PERFINFO_LOG_TYPE_HEAP_ALLOC;
  2406. pHeapEvent->HeapHandle = (PVOID)HeapHandle;
  2407. pHeapEvent->Size = Size;
  2408. pHeapEvent->Address = (PVOID)ReturnValue;
  2409. pHeapEvent->Source = MEMORY_FROM_SLOWPATH;
  2410. ReleaseBufferLocation(pThreadLocalData);
  2411. }
  2412. }
  2413. #endif // NTOS_KERNEL_RUNTIME
  2414. }
  2415. return ReturnValue;
  2416. }
  2417. BOOLEAN
  2418. RtlFreeHeap (
  2419. IN PVOID HeapHandle,
  2420. IN ULONG Flags,
  2421. IN PVOID BaseAddress
  2422. )
  2423. /*++
  2424. Routine Description:
  2425. This routine returns a previously allocated block back to its heap
  2426. Arguments:
  2427. HeapHandle - Supplies a pointer to the owning heap structure
  2428. Flags - Specifies the set of flags to use in the deallocation
  2429. BaseAddress - Supplies a pointer to the block being freed
  2430. Return Value:
  2431. BOOLEAN - TRUE if the block was properly freed and FALSE otherwise
  2432. --*/
  2433. {
  2434. NTSTATUS Status;
  2435. PHEAP Heap = (PHEAP)HeapHandle;
  2436. PHEAP_ENTRY BusyBlock;
  2437. PHEAP_ENTRY_EXTRA ExtraStuff;
  2438. SIZE_T FreeSize;
  2439. BOOLEAN LockAcquired = FALSE;
  2440. BOOLEAN ReturnValue = TRUE;
  2441. SIZE_T BlockSize;
  2442. PVOID FrontHeap = NULL;
  2443. HEAP_PERF_DECLARE_TIMER();
  2444. RTL_PAGED_CODE();
  2445. //
  2446. // First check if the address we're given is null and if so then
  2447. // there is really nothing to do so just return success
  2448. //
  2449. if (BaseAddress == NULL) {
  2450. return TRUE;
  2451. }
  2452. #ifndef NTOS_KERNEL_RUNTIME
  2453. if (FrontHeap = RtlpGetLowFragHeap(Heap)) {
  2454. //
  2455. // We can do everything in this routine. So now backup to get
  2456. // a pointer to the start of the block
  2457. //
  2458. BusyBlock = (PHEAP_ENTRY)BaseAddress - 1;
  2459. if (BusyBlock->SegmentIndex >= HEAP_LFH_INDEX) {
  2460. if (RtlpLowFragHeapFree( FrontHeap, BaseAddress)) {
  2461. #ifndef NTOS_KERNEL_RUNTIME
  2462. if( IsHeapLogging( HeapHandle ) ) {
  2463. PTHREAD_LOCAL_DATA pThreadLocalData = NULL;
  2464. PPERFINFO_TRACE_HEADER pEventHeader = NULL;
  2465. USHORT ReqSize = sizeof(HEAP_EVENT_FREE) + FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data);
  2466. AcquireBufferLocation(&pEventHeader, &pThreadLocalData,&ReqSize);
  2467. if(pEventHeader && pThreadLocalData) {
  2468. PHEAP_EVENT_FREE pHeapEvent = (PHEAP_EVENT_FREE)( (SIZE_T)pEventHeader
  2469. + (SIZE_T)FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data ));
  2470. pEventHeader->Packet.Size = (USHORT) ReqSize;
  2471. pEventHeader->Packet.HookId = PERFINFO_LOG_TYPE_HEAP_FREE;
  2472. pHeapEvent->HeapHandle = (PVOID)HeapHandle;
  2473. pHeapEvent->Address = (PVOID)BaseAddress;
  2474. pHeapEvent->Source = MEMORY_FROM_LOWFRAG;
  2475. ReleaseBufferLocation(pThreadLocalData);
  2476. }
  2477. }
  2478. #endif //NTOS_KERNEL_RUNTIME
  2479. return TRUE;
  2480. }
  2481. }
  2482. }
  2483. #endif // NTOS_KERNEL_RUNTIME
  2484. //
  2485. // Compliment the input flags with those enforced by the heap
  2486. //
  2487. Flags |= Heap->ForceFlags;
  2488. //
  2489. // Now check if we should go the slow route
  2490. //
  2491. if (Flags & HEAP_SLOW_FLAGS) {
  2492. return RtlFreeHeapSlowly(HeapHandle, Flags, BaseAddress);
  2493. }
  2494. //
  2495. // We can do everything in this routine. So now backup to get
  2496. // a pointer to the start of the block
  2497. //
  2498. BusyBlock = (PHEAP_ENTRY)BaseAddress - 1;
  2499. //
  2500. // Protect ourselves from mistakes by refusing to free blocks
  2501. // that do not have the busy bit set.
  2502. //
  2503. // Also refuse to free blocks that are not eight-byte aligned.
  2504. // The specific mistake in this case is Office95, which likes
  2505. // to free a random pointer when you start Word95 from a desktop
  2506. // shortcut.
  2507. //
  2508. // As further insurance against mistakes, check the segment index
  2509. // to make sure it is less than HEAP_MAXIMUM_SEGMENTS (16). This
  2510. // should fix all the dorks who have ASCII or Unicode where the
  2511. // heap header is supposed to be.
  2512. //
  2513. try {
  2514. if ((((ULONG_PTR)BaseAddress & 0x7) != 0) ||
  2515. (!(BusyBlock->Flags & HEAP_ENTRY_BUSY)) ||
  2516. (BusyBlock->SegmentIndex >= HEAP_MAXIMUM_SEGMENTS)) {
  2517. //
  2518. // Not a busy block, or it's not aligned or the segment is
  2519. // to big, meaning it's corrupt
  2520. //
  2521. SET_LAST_STATUS( STATUS_INVALID_PARAMETER );
  2522. return FALSE;
  2523. }
  2524. } except( RtlpHeapExceptionFilter(GetExceptionCode()) ) {
  2525. SET_LAST_STATUS( STATUS_INVALID_PARAMETER );
  2526. return FALSE;
  2527. }
  2528. BlockSize = BusyBlock->Size << HEAP_GRANULARITY_SHIFT;
  2529. //
  2530. // If there is a lookaside list and the block is not a big allocation
  2531. // and the index is for a dedicated list then free the block to the
  2532. // lookaside list. We'll actually capture
  2533. // the lookaside pointer from the heap and only use the captured pointer.
  2534. // This will take care of the condition where a walk or lock heap can
  2535. // cause us to check for a non null pointer and then have it become null
  2536. // when we read it again. If it is non null to start with then even if
  2537. // the user walks or locks the heap via another thread the pointer to
  2538. // still valid here so we can still try and do a lookaside list push
  2539. //
  2540. #ifndef NTOS_KERNEL_RUNTIME
  2541. if (!(BusyBlock->Flags & HEAP_ENTRY_VIRTUAL_ALLOC)
  2542. &&
  2543. !RtlpQuickValidateBlock(Heap, BusyBlock)) {
  2544. SET_LAST_STATUS( STATUS_INVALID_PARAMETER );
  2545. return FALSE;
  2546. }
  2547. if ( !(BusyBlock->Flags & HEAP_ENTRY_SETTABLE_FLAGS) ) {
  2548. PHEAP_LOOKASIDE Lookaside = (PHEAP_LOOKASIDE)RtlpGetLookasideHeap(Heap);
  2549. if ((Lookaside != NULL) &&
  2550. RtlpIsFrontHeapUnlocked(Heap) &&
  2551. (!(BusyBlock->Flags & HEAP_ENTRY_VIRTUAL_ALLOC)) &&
  2552. ((FreeSize = BusyBlock->Size) < HEAP_MAXIMUM_FREELISTS)) {
  2553. if (RtlpFreeToHeapLookaside( &Lookaside[FreeSize], BaseAddress)) {
  2554. if( IsHeapLogging( HeapHandle ) && (TraceLevel & LOG_LOOKASIDE)) {
  2555. PTHREAD_LOCAL_DATA pThreadLocalData = NULL;
  2556. PPERFINFO_TRACE_HEADER pEventHeader = NULL;
  2557. USHORT ReqSize = sizeof(HEAP_EVENT_FREE) + FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data);
  2558. AcquireBufferLocation(&pEventHeader, &pThreadLocalData,&ReqSize);
  2559. if(pEventHeader && pThreadLocalData) {
  2560. PHEAP_EVENT_FREE pHeapEvent = (PHEAP_EVENT_FREE)( (SIZE_T)pEventHeader
  2561. + (SIZE_T)FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data ));
  2562. pEventHeader->Packet.Size = (USHORT) ReqSize;
  2563. pEventHeader->Packet.HookId = PERFINFO_LOG_TYPE_HEAP_FREE;
  2564. pHeapEvent->HeapHandle = (PVOID)HeapHandle;
  2565. pHeapEvent->Address = (PVOID)BaseAddress;
  2566. pHeapEvent->Source = MEMORY_FROM_LOOKASIDE;
  2567. ReleaseBufferLocation(pThreadLocalData);
  2568. }
  2569. }
  2570. return TRUE;
  2571. }
  2572. }
  2573. }
  2574. #endif // NTOS_KERNEL_RUNTIME
  2575. try {
  2576. HEAP_PERF_START_TIMER(Heap);
  2577. //
  2578. // Check if we need to lock the heap
  2579. //
  2580. if (!(Flags & HEAP_NO_SERIALIZE)) {
  2581. RtlAcquireLockRoutine( Heap->LockVariable );
  2582. LockAcquired = TRUE;
  2583. }
  2584. //
  2585. // Check if this is not a virtual block allocation meaning
  2586. // that we it is part of the heap free list structure and not
  2587. // one huge allocation that we got from vm
  2588. //
  2589. if (!(BusyBlock->Flags & HEAP_ENTRY_VIRTUAL_ALLOC)) {
  2590. //
  2591. // This block is not a big allocation so we need to
  2592. // to get its size, and coalesce the blocks note that
  2593. // the user mode heap does this conditionally on a heap
  2594. // flag. The coalesce function returns the newly formed
  2595. // free block and the new size.
  2596. //
  2597. FreeSize = BusyBlock->Size;
  2598. #ifdef NTOS_KERNEL_RUNTIME
  2599. BusyBlock = (PHEAP_ENTRY)RtlpCoalesceFreeBlocks( Heap,
  2600. (PHEAP_FREE_ENTRY)BusyBlock,
  2601. &FreeSize,
  2602. FALSE );
  2603. #else // NTOS_KERNEL_RUNTIME
  2604. if (!(Heap->Flags & HEAP_DISABLE_COALESCE_ON_FREE)) {
  2605. BusyBlock = (PHEAP_ENTRY)RtlpCoalesceFreeBlocks( Heap,
  2606. (PHEAP_FREE_ENTRY)BusyBlock,
  2607. &FreeSize,
  2608. FALSE );
  2609. }
  2610. #endif // NTOS_KERNEL_RUNTIME
  2611. //
  2612. // Check for a small allocation that can go on a freelist
  2613. // first, these should never trigger a decommit.
  2614. //
  2615. HEAPASSERT(HEAP_MAXIMUM_FREELISTS < Heap->DeCommitFreeBlockThreshold);
  2616. //
  2617. // If the allocation fits on a free list then insert it on
  2618. // the appropriate free list. If the block is not the last
  2619. // entry then make sure that the next block knows our correct
  2620. // size, and update the heap free space counter.
  2621. //
  2622. if (FreeSize < HEAP_MAXIMUM_FREELISTS) {
  2623. RtlpFastInsertDedicatedFreeBlockDirect( Heap,
  2624. (PHEAP_FREE_ENTRY)BusyBlock,
  2625. (USHORT)FreeSize );
  2626. if (!(BusyBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) {
  2627. HEAPASSERT((BusyBlock + FreeSize)->PreviousSize == (USHORT)FreeSize);
  2628. }
  2629. Heap->TotalFreeSize += FreeSize;
  2630. //
  2631. // Otherwise the block is to big for one of the dedicated free list so
  2632. // see if the free size is under the decommit threshold by itself
  2633. // or the total free in the heap is under the decomit threshold then
  2634. // we'll put this into a free list
  2635. //
  2636. } else if ((FreeSize < Heap->DeCommitFreeBlockThreshold) ||
  2637. ((Heap->TotalFreeSize + FreeSize) < Heap->DeCommitTotalFreeThreshold)) {
  2638. #ifndef NTOS_KERNEL_RUNTIME
  2639. //
  2640. // If the block is larger than 1 page, and has uncommited ranges around
  2641. // force the decommit to reduce the VA fragmentation
  2642. //
  2643. if (((Heap->TotalFreeSize + FreeSize) > Heap->DeCommitTotalFreeThreshold)
  2644. &&
  2645. !(RtlpDisableHeapLookaside & HEAP_COMPAT_DISABLE_LARGECACHE)
  2646. &&
  2647. (FreeSize >= (PAGE_SIZE >> HEAP_GRANULARITY_SHIFT))
  2648. &&
  2649. ((BusyBlock->PreviousSize == 0) || (BusyBlock->Flags & HEAP_ENTRY_LAST_ENTRY))) {
  2650. //
  2651. // Check if the block can go into the [0] index free list, and if
  2652. // so then do the insert and make sure the following block is
  2653. // needed knows our correct size, and update the heaps free space
  2654. // counter
  2655. //
  2656. RtlpDeCommitFreeBlock( Heap, (PHEAP_FREE_ENTRY)BusyBlock, FreeSize );
  2657. } else
  2658. #endif //NTOS_KERNEL_RUNTIME
  2659. if (FreeSize <= (ULONG)HEAP_MAXIMUM_BLOCK_SIZE) {
  2660. RtlpFastInsertNonDedicatedFreeBlockDirect( Heap,
  2661. (PHEAP_FREE_ENTRY)BusyBlock,
  2662. (USHORT)FreeSize );
  2663. if (!(BusyBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) {
  2664. HEAPASSERT((BusyBlock + FreeSize)->PreviousSize == (USHORT)FreeSize);
  2665. }
  2666. Heap->TotalFreeSize += FreeSize;
  2667. } else {
  2668. //
  2669. // The block is too big to go on a free list in its
  2670. // entirety but we don't want to decommit anything so
  2671. // simply call a worker routine to hack up the block
  2672. // into pieces that will fit on the free lists.
  2673. //
  2674. RtlpInsertFreeBlock( Heap, (PHEAP_FREE_ENTRY)BusyBlock, FreeSize );
  2675. }
  2676. //
  2677. // Otherwise the block is to big for any lists and we should decommit
  2678. // the block
  2679. //
  2680. } else {
  2681. RtlpDeCommitFreeBlock( Heap, (PHEAP_FREE_ENTRY)BusyBlock, FreeSize );
  2682. }
  2683. } else {
  2684. //
  2685. // This is a big virtual block allocation. To free it we only have to
  2686. // remove it from the heaps list of virtual allocated blocks, unlock
  2687. // the heap, and return the block to vm
  2688. //
  2689. PHEAP_VIRTUAL_ALLOC_ENTRY VirtualAllocBlock;
  2690. VirtualAllocBlock = CONTAINING_RECORD( BusyBlock, HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock );
  2691. RtlpHeapRemoveEntryList( &VirtualAllocBlock->Entry );
  2692. //
  2693. // Release lock here as there is no reason to hold it across
  2694. // the system call.
  2695. //
  2696. if (LockAcquired) {
  2697. RtlReleaseLockRoutine( Heap->LockVariable );
  2698. LockAcquired = FALSE;
  2699. }
  2700. FreeSize = 0;
  2701. Status = RtlpHeapFreeVirtualMemory( NtCurrentProcess(),
  2702. (PVOID *)&VirtualAllocBlock,
  2703. &FreeSize,
  2704. MEM_RELEASE );
  2705. //
  2706. // Check if we had trouble freeing the block back to vm
  2707. // and return an error if necessary
  2708. //
  2709. if (!NT_SUCCESS( Status )) {
  2710. SET_LAST_STATUS( Status );
  2711. ReturnValue = FALSE;
  2712. }
  2713. }
  2714. } finally {
  2715. if (LockAcquired) {
  2716. RtlReleaseLockRoutine( Heap->LockVariable );
  2717. }
  2718. }
  2719. //
  2720. // The block was freed successfully so return success to our
  2721. // caller
  2722. //
  2723. RtlpRegisterOperation(Heap, BlockSize, HEAP_OP_FREE);
  2724. HEAP_PERF_STOP_TIMER(Heap, HEAP_OP_FREE);
  2725. #ifndef NTOS_KERNEL_RUNTIME
  2726. if( IsHeapLogging( HeapHandle ) ) {
  2727. PTHREAD_LOCAL_DATA pThreadLocalData = NULL;
  2728. PPERFINFO_TRACE_HEADER pEventHeader = NULL;
  2729. USHORT ReqSize = sizeof(HEAP_EVENT_FREE) + FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data);
  2730. AcquireBufferLocation(&pEventHeader, &pThreadLocalData,&ReqSize);
  2731. if(pEventHeader && pThreadLocalData) {
  2732. PHEAP_EVENT_FREE pHeapEvent = (PHEAP_EVENT_FREE)( (SIZE_T)pEventHeader
  2733. + (SIZE_T)FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data ));
  2734. pEventHeader->Packet.Size = (USHORT) ReqSize;
  2735. pEventHeader->Packet.HookId = PERFINFO_LOG_TYPE_HEAP_FREE;
  2736. pHeapEvent->HeapHandle = (PVOID)HeapHandle;
  2737. pHeapEvent->Address = (PVOID)BaseAddress;
  2738. pHeapEvent->Source = MEMORY_FROM_MAINPATH;
  2739. ReleaseBufferLocation(pThreadLocalData);
  2740. }
  2741. }
  2742. #endif //NTOS_KERNEL_RUNTIME
  2743. return ReturnValue;
  2744. }
  2745. BOOLEAN
  2746. RtlFreeHeapSlowly (
  2747. IN PVOID HeapHandle,
  2748. IN ULONG Flags,
  2749. IN PVOID BaseAddress
  2750. )
  2751. /*++
  2752. Routine Description:
  2753. This routine returns a previously allocated block back to its heap.
  2754. It is the slower version of Rtl Free Heap and does more checking and
  2755. tagging control.
  2756. Arguments:
  2757. HeapHandle - Supplies a pointer to the owning heap structure
  2758. Flags - Specifies the set of flags to use in the deallocation
  2759. BaseAddress - Supplies a pointer to the block being freed
  2760. Return Value:
  2761. BOOLEAN - TRUE if the block was properly freed and FALSE otherwise
  2762. --*/
  2763. {
  2764. NTSTATUS Status;
  2765. PHEAP Heap = (PHEAP)HeapHandle;
  2766. PHEAP_ENTRY BusyBlock;
  2767. PHEAP_ENTRY_EXTRA ExtraStuff;
  2768. SIZE_T FreeSize;
  2769. BOOLEAN Result;
  2770. BOOLEAN LockAcquired = FALSE;
  2771. SIZE_T BlockSize;
  2772. #ifndef NTOS_KERNEL_RUNTIME
  2773. USHORT TagIndex;
  2774. #endif // NTOS_KERNEL_RUNTIME
  2775. HEAP_PERF_DECLARE_TIMER();
  2776. RTL_PAGED_CODE();
  2777. //
  2778. // Note that Flags has already been OR'd with Heap->ForceFlags.
  2779. //
  2780. #ifndef NTOS_KERNEL_RUNTIME
  2781. //
  2782. // In the non kernel case see if we should be calling the debug version to
  2783. // free the heap
  2784. //
  2785. if (DEBUG_HEAP( Flags )) {
  2786. return RtlDebugFreeHeap( HeapHandle, Flags, BaseAddress );
  2787. }
  2788. #endif // NTOS_KERNEL_RUNTIME
  2789. //
  2790. // Until we figure out otherwise we'll assume that this call will fail
  2791. //
  2792. Result = FALSE;
  2793. try {
  2794. HEAP_PERF_START_TIMER(Heap);
  2795. //
  2796. // Lock the heap
  2797. //
  2798. if (!(Flags & HEAP_NO_SERIALIZE)) {
  2799. RtlAcquireLockRoutine( Heap->LockVariable );
  2800. LockAcquired = TRUE;
  2801. }
  2802. try {
  2803. //
  2804. // Backup to get a pointer to the start of the block
  2805. //
  2806. BusyBlock = (PHEAP_ENTRY)BaseAddress - 1;
  2807. BlockSize = BusyBlock->Size << HEAP_GRANULARITY_SHIFT;
  2808. //
  2809. // Protect ourselves from mistakes by refusing to free blocks
  2810. // that do not have the busy bit set.
  2811. //
  2812. // Also refuse to free blocks that are not eight-byte aligned.
  2813. // The specific mistake in this case is Office95, which likes
  2814. // to free a random pointer when you start Word95 from a desktop
  2815. // shortcut.
  2816. //
  2817. // As further insurance against mistakes, check the segment index
  2818. // to make sure it is less than HEAP_MAXIMUM_SEGMENTS (16). This
  2819. // should fix all the dorks who have ASCII or Unicode where the
  2820. // heap header is supposed to be.
  2821. //
  2822. // Note that this test is just opposite from the test used in
  2823. // Rtl Free Heap
  2824. //
  2825. if ((BusyBlock->Flags & HEAP_ENTRY_BUSY) &&
  2826. (((ULONG_PTR)BaseAddress & 0x7) == 0) &&
  2827. (BusyBlock->SegmentIndex < HEAP_MAXIMUM_SEGMENTS)) {
  2828. //
  2829. // Check if this is a virtual block allocation
  2830. //
  2831. if (BusyBlock->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) {
  2832. PHEAP_VIRTUAL_ALLOC_ENTRY VirtualAllocBlock;
  2833. //
  2834. // This is a big virtual block allocation. To free it
  2835. // we only have to remove it from the heaps list of
  2836. // virtual allocated blocks, unlock the heap, and return
  2837. // the block to vm
  2838. //
  2839. VirtualAllocBlock = CONTAINING_RECORD( BusyBlock, HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock );
  2840. RtlpHeapRemoveEntryList( &VirtualAllocBlock->Entry );
  2841. #ifndef NTOS_KERNEL_RUNTIME
  2842. //
  2843. // In the non kernel case see if we need to free the tag
  2844. //
  2845. if (IS_HEAP_TAGGING_ENABLED()) {
  2846. RtlpUpdateTagEntry( Heap,
  2847. VirtualAllocBlock->ExtraStuff.TagIndex,
  2848. VirtualAllocBlock->CommitSize >> HEAP_GRANULARITY_SHIFT,
  2849. 0,
  2850. VirtualFreeAction );
  2851. }
  2852. #endif // NTOS_KERNEL_RUNTIME
  2853. FreeSize = 0;
  2854. Status = RtlpHeapFreeVirtualMemory( NtCurrentProcess(),
  2855. (PVOID *)&VirtualAllocBlock,
  2856. &FreeSize,
  2857. MEM_RELEASE );
  2858. //
  2859. // Check if everything worked okay, if we had trouble freeing
  2860. // the block back to vm return an error if necessary,
  2861. //
  2862. if (NT_SUCCESS( Status )) {
  2863. #ifndef NTOS_KERNEL_RUNTIME
  2864. if( IsHeapLogging( HeapHandle ) ) {
  2865. PTHREAD_LOCAL_DATA pThreadLocalData = NULL;
  2866. PPERFINFO_TRACE_HEADER pEventHeader = NULL;
  2867. USHORT ReqSize = sizeof(HEAP_EVENT_FREE) + FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data);
  2868. AcquireBufferLocation(&pEventHeader, &pThreadLocalData,&ReqSize);
  2869. if(pEventHeader && pThreadLocalData) {
  2870. PHEAP_EVENT_FREE pHeapEvent = (PHEAP_EVENT_FREE)( (SIZE_T)pEventHeader
  2871. +(SIZE_T)FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data ));
  2872. pEventHeader->Packet.Size = (USHORT) ReqSize;
  2873. pEventHeader->Packet.HookId = PERFINFO_LOG_TYPE_HEAP_FREE;
  2874. pHeapEvent->HeapHandle = (PVOID)HeapHandle;
  2875. pHeapEvent->Address = (PVOID)BaseAddress;
  2876. pHeapEvent->Source = MEMORY_FROM_SLOWPATH;
  2877. ReleaseBufferLocation(pThreadLocalData);
  2878. }
  2879. }
  2880. #endif //NTOS_KERNEL_RUNTIME
  2881. Result = TRUE;
  2882. } else {
  2883. SET_LAST_STATUS( Status );
  2884. }
  2885. } else if (RtlpQuickValidateBlock(Heap, BusyBlock)) {
  2886. //
  2887. // This block is not a big allocation so we need to
  2888. // to get its size, and coalesce the blocks note that
  2889. // the user mode heap does this conditionally on a heap
  2890. // flag. The coalesce function returns the newly formed
  2891. // free block and the new size.
  2892. //
  2893. #ifndef NTOS_KERNEL_RUNTIME
  2894. //
  2895. // First in the non kernel case remove any tagging we might
  2896. // have been using. Note that the will either be in
  2897. // the heap header, or in the extra block if present
  2898. //
  2899. if (IS_HEAP_TAGGING_ENABLED()) {
  2900. if (BusyBlock->Flags & HEAP_ENTRY_EXTRA_PRESENT) {
  2901. ExtraStuff = (PHEAP_ENTRY_EXTRA)(BusyBlock + BusyBlock->Size - 1);
  2902. TagIndex = RtlpUpdateTagEntry( Heap,
  2903. ExtraStuff->TagIndex,
  2904. BusyBlock->Size,
  2905. 0,
  2906. FreeAction );
  2907. } else {
  2908. TagIndex = RtlpUpdateTagEntry( Heap,
  2909. RtlpGetSmallTagIndex( Heap, BusyBlock),
  2910. BusyBlock->Size,
  2911. 0,
  2912. FreeAction );
  2913. }
  2914. } else {
  2915. TagIndex = 0;
  2916. }
  2917. #endif // NTOS_KERNEL_RUNTIME
  2918. //
  2919. // This is the size of the block we are freeing
  2920. //
  2921. FreeSize = BusyBlock->Size;
  2922. #ifndef NTOS_KERNEL_RUNTIME
  2923. //
  2924. // In the non kernel case see if we should coalesce on free
  2925. //
  2926. if (!(Heap->Flags & HEAP_DISABLE_COALESCE_ON_FREE)) {
  2927. #endif // NTOS_KERNEL_RUNTIME
  2928. //
  2929. // In kernel case and in the tested user mode case we
  2930. // now coalesce free blocks
  2931. //
  2932. BusyBlock = (PHEAP_ENTRY)RtlpCoalesceFreeBlocks( Heap, (PHEAP_FREE_ENTRY)BusyBlock, &FreeSize, FALSE );
  2933. #ifndef NTOS_KERNEL_RUNTIME
  2934. }
  2935. #endif // NTOS_KERNEL_RUNTIME
  2936. //
  2937. // If the block should not be decommit then try and put it
  2938. // on a free list
  2939. //
  2940. if ((FreeSize < Heap->DeCommitFreeBlockThreshold) ||
  2941. ((Heap->TotalFreeSize + FreeSize) < Heap->DeCommitTotalFreeThreshold)) {
  2942. if (FreeSize <= (ULONG)HEAP_MAXIMUM_BLOCK_SIZE) {
  2943. //
  2944. // It can fit on a dedicated free list so insert it on
  2945. //
  2946. RtlpInsertFreeBlockDirect( Heap, (PHEAP_FREE_ENTRY)BusyBlock, (USHORT)FreeSize );
  2947. //
  2948. // If there is a following entry then make sure the
  2949. // sizes agree
  2950. //
  2951. if (!(BusyBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) {
  2952. HEAPASSERT((BusyBlock + FreeSize)->PreviousSize == (USHORT)FreeSize);
  2953. }
  2954. //
  2955. // Update the heap with the amount of free space
  2956. // available
  2957. //
  2958. Heap->TotalFreeSize += FreeSize;
  2959. } else {
  2960. //
  2961. // The block goes on the non dedicated free list
  2962. //
  2963. RtlpInsertFreeBlock( Heap, (PHEAP_FREE_ENTRY)BusyBlock, FreeSize );
  2964. }
  2965. #ifndef NTOS_KERNEL_RUNTIME
  2966. //
  2967. // In the non kernel case see if the there was tag and if
  2968. // so then update the entry to show that it's been freed
  2969. //
  2970. if (TagIndex != 0) {
  2971. PHEAP_FREE_ENTRY_EXTRA FreeExtra;
  2972. BusyBlock->Flags |= HEAP_ENTRY_EXTRA_PRESENT;
  2973. FreeExtra = (PHEAP_FREE_ENTRY_EXTRA)(BusyBlock + BusyBlock->Size) - 1;
  2974. FreeExtra->TagIndex = TagIndex;
  2975. FreeExtra->FreeBackTraceIndex = 0;
  2976. if (Heap->Flags & HEAP_CAPTURE_STACK_BACKTRACES) {
  2977. FreeExtra->FreeBackTraceIndex = (USHORT)RtlLogStackBackTrace();
  2978. }
  2979. }
  2980. #endif // NTOS_KERNEL_RUNTIME
  2981. } else {
  2982. //
  2983. // Otherwise the block is big enough to decommit so have a
  2984. // worker routine to do the decommit
  2985. //
  2986. RtlpDeCommitFreeBlock( Heap, (PHEAP_FREE_ENTRY)BusyBlock, FreeSize );
  2987. }
  2988. #ifndef NTOS_KERNEL_RUNTIME
  2989. if( IsHeapLogging( HeapHandle ) ) {
  2990. PTHREAD_LOCAL_DATA pThreadLocalData = NULL;
  2991. PPERFINFO_TRACE_HEADER pEventHeader = NULL;
  2992. USHORT ReqSize = sizeof(HEAP_EVENT_FREE) + FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data);
  2993. AcquireBufferLocation(&pEventHeader, &pThreadLocalData,&ReqSize);
  2994. if(pEventHeader && pThreadLocalData) {
  2995. PHEAP_EVENT_FREE pHeapEvent = (PHEAP_EVENT_FREE)( (SIZE_T)pEventHeader
  2996. +(SIZE_T)FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data ));
  2997. pEventHeader->Packet.Size = (USHORT) ReqSize;
  2998. pEventHeader->Packet.HookId = PERFINFO_LOG_TYPE_HEAP_FREE;
  2999. pHeapEvent->HeapHandle = (PVOID)HeapHandle;
  3000. pHeapEvent->Address = (PVOID)BaseAddress;
  3001. pHeapEvent->Source = MEMORY_FROM_SLOWPATH;
  3002. ReleaseBufferLocation(pThreadLocalData);
  3003. }
  3004. }
  3005. #endif //NTOS_KERNEL_RUNTIME
  3006. //
  3007. // And say the free worked fine
  3008. //
  3009. Result = TRUE;
  3010. } else {
  3011. SET_LAST_STATUS( STATUS_INVALID_PARAMETER );
  3012. }
  3013. } else {
  3014. //
  3015. // Not a busy block, or it's not aligned or the segment is
  3016. // to big, meaning it's corrupt
  3017. //
  3018. SET_LAST_STATUS( STATUS_INVALID_PARAMETER );
  3019. }
  3020. } except( RtlpHeapExceptionFilter(GetExceptionCode()) ) {
  3021. SET_LAST_STATUS( GetExceptionCode() );
  3022. Result = FALSE;
  3023. }
  3024. } finally {
  3025. //
  3026. // Unlock the heap
  3027. //
  3028. if (LockAcquired) {
  3029. RtlReleaseLockRoutine( Heap->LockVariable );
  3030. }
  3031. }
  3032. //
  3033. // And return to our caller
  3034. //
  3035. RtlpRegisterOperation(Heap, BlockSize, HEAP_OP_FREE);
  3036. HEAP_PERF_STOP_TIMER(Heap, HEAP_OP_FREE);
  3037. return Result;
  3038. }
  3039. SIZE_T
  3040. RtlSizeHeap (
  3041. IN PVOID HeapHandle,
  3042. IN ULONG Flags,
  3043. IN PVOID BaseAddress
  3044. )
  3045. /*++
  3046. Routine Description:
  3047. This routine returns the size, in bytes, of the indicated block
  3048. of heap storage. The size only includes the number of bytes the
  3049. original caller used to allocate the block and not any unused
  3050. bytes at the end of the block.
  3051. Arguments:
  3052. HeapHandle - Supplies a pointer to the heap that owns the block
  3053. being queried
  3054. Flags - Supplies a set of flags used to allocate the block
  3055. BaseAddress - Supplies the address of the block being queried
  3056. Return Value:
  3057. SIZE_T - returns the size, in bytes, of the queried block, or -1
  3058. if the block is not in use.
  3059. --*/
  3060. {
  3061. PHEAP Heap = (PHEAP)HeapHandle;
  3062. PHEAP_ENTRY BusyBlock;
  3063. SIZE_T BusySize;
  3064. //
  3065. // Compliment the input flags with those enforced by the heap
  3066. //
  3067. Flags |= Heap->ForceFlags;
  3068. //
  3069. // Check if this is the nonkernel debug version of heap
  3070. //
  3071. #ifndef NTOS_KERNEL_RUNTIME
  3072. if (DEBUG_HEAP( Flags )) {
  3073. return RtlDebugSizeHeap( HeapHandle, Flags, BaseAddress );
  3074. }
  3075. #endif // NTOS_KERNEL_RUNTIME
  3076. //
  3077. // No lock is required since nothing is modified and nothing
  3078. // outside the busy block is read. Backup to get a pointer
  3079. // to the heap entry
  3080. //
  3081. BusyBlock = (PHEAP_ENTRY)BaseAddress - 1;
  3082. //
  3083. // If the block is not in use then the answer is -1 and
  3084. // we'll set the error status for the user mode thread
  3085. //
  3086. if (!(BusyBlock->Flags & HEAP_ENTRY_BUSY)) {
  3087. BusySize = -1;
  3088. SET_LAST_STATUS( STATUS_INVALID_PARAMETER );
  3089. //
  3090. // Otherwise if the block is from our large allocation then
  3091. // we'll get the result from that routine
  3092. //
  3093. } else if (BusyBlock->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) {
  3094. BusySize = RtlpGetSizeOfBigBlock( BusyBlock );
  3095. //
  3096. // Otherwise the block must be one that we can handle so
  3097. // calculate its block size and then subtract what's not being
  3098. // used by the caller.
  3099. //
  3100. // Note: This includes the heap entry header in its calculation.
  3101. //
  3102. } else {
  3103. BusySize = (((SIZE_T)RtlpGetAllocationUnits(Heap, BusyBlock)) << HEAP_GRANULARITY_SHIFT) -
  3104. RtlpGetUnusedBytes(Heap, BusyBlock);
  3105. }
  3106. //
  3107. // And return to our caller
  3108. //
  3109. return BusySize;
  3110. }
  3111. NTSTATUS
  3112. RtlZeroHeap (
  3113. IN PVOID HeapHandle,
  3114. IN ULONG Flags
  3115. )
  3116. /*++
  3117. Routine Description:
  3118. This routine zero's (or fills) in all the free blocks in a heap.
  3119. It does not touch big allocations.
  3120. Arguments:
  3121. HeapHandle - Supplies a pointer to the heap being zeroed
  3122. Flags - Supplies a set of heap flags to compliment those already
  3123. set in the heap
  3124. Return Value:
  3125. NTSTATUS - An appropriate status code
  3126. --*/
  3127. {
  3128. PHEAP Heap = (PHEAP)HeapHandle;
  3129. NTSTATUS Status;
  3130. BOOLEAN LockAcquired = FALSE;
  3131. PHEAP_SEGMENT Segment;
  3132. ULONG SegmentIndex;
  3133. PHEAP_ENTRY CurrentBlock;
  3134. PHEAP_FREE_ENTRY FreeBlock;
  3135. SIZE_T Size;
  3136. PHEAP_UNCOMMMTTED_RANGE UnCommittedRange;
  3137. RTL_PAGED_CODE();
  3138. //
  3139. // Compliment the input flags with those enforced by the heap
  3140. //
  3141. Flags |= Heap->ForceFlags;
  3142. //
  3143. // Check if this is the nonkernel debug version of heap
  3144. //
  3145. #ifndef NTOS_KERNEL_RUNTIME
  3146. if (DEBUG_HEAP( Flags )) {
  3147. return RtlDebugZeroHeap( HeapHandle, Flags );
  3148. }
  3149. #endif // NTOS_KERNEL_RUNTIME
  3150. //
  3151. // Unless something happens otherwise we'll assume that we'll
  3152. // be successful
  3153. //
  3154. Status = STATUS_SUCCESS;
  3155. try {
  3156. //
  3157. // Lock the heap
  3158. //
  3159. if (!(Flags & HEAP_NO_SERIALIZE)) {
  3160. RtlAcquireLockRoutine( Heap->LockVariable );
  3161. LockAcquired = TRUE;
  3162. }
  3163. try {
  3164. //
  3165. // Zero fill all the free blocks in all the segements
  3166. //
  3167. for (SegmentIndex=0; SegmentIndex<HEAP_MAXIMUM_SEGMENTS; SegmentIndex++) {
  3168. Segment = Heap->Segments[ SegmentIndex ];
  3169. if (!Segment) {
  3170. continue;
  3171. }
  3172. UnCommittedRange = Segment->UnCommittedRanges;
  3173. CurrentBlock = Segment->FirstEntry;
  3174. //
  3175. // With the current segment we'll zoom through the
  3176. // blocks until we reach the end
  3177. //
  3178. while (CurrentBlock < Segment->LastValidEntry) {
  3179. Size = CurrentBlock->Size << HEAP_GRANULARITY_SHIFT;
  3180. //
  3181. // If the block is not in use then we'll either zero
  3182. // it or fill it.
  3183. //
  3184. if (!(CurrentBlock->Flags & HEAP_ENTRY_BUSY)) {
  3185. FreeBlock = (PHEAP_FREE_ENTRY)CurrentBlock;
  3186. if ((Heap->Flags & HEAP_FREE_CHECKING_ENABLED) &&
  3187. (CurrentBlock->Flags & HEAP_ENTRY_FILL_PATTERN)) {
  3188. RtlFillMemoryUlong( FreeBlock + 1,
  3189. Size - sizeof( *FreeBlock ),
  3190. FREE_HEAP_FILL );
  3191. } else {
  3192. RtlFillMemoryUlong( FreeBlock + 1,
  3193. Size - sizeof( *FreeBlock ),
  3194. 0 );
  3195. }
  3196. }
  3197. //
  3198. // If the following entry is uncommited then we need to
  3199. // skip over it. This code strongly implies that the
  3200. // uncommitted range list is in perfect sync with the
  3201. // blocks in the segement
  3202. //
  3203. if (CurrentBlock->Flags & HEAP_ENTRY_LAST_ENTRY) {
  3204. CurrentBlock += CurrentBlock->Size;
  3205. //
  3206. // Check if the we've reached the end of the segment
  3207. // and should just break out of the while loop
  3208. //
  3209. // "break;" would probably be more clear here
  3210. //
  3211. if (UnCommittedRange == NULL) {
  3212. CurrentBlock = Segment->LastValidEntry;
  3213. //
  3214. // Otherwise skip over the uncommitted range
  3215. //
  3216. } else {
  3217. CurrentBlock = (PHEAP_ENTRY)
  3218. ((PCHAR)UnCommittedRange->Address + UnCommittedRange->Size);
  3219. UnCommittedRange = UnCommittedRange->Next;
  3220. }
  3221. //
  3222. // Otherwise the next block exists so advance to it
  3223. //
  3224. } else {
  3225. CurrentBlock += CurrentBlock->Size;
  3226. }
  3227. }
  3228. }
  3229. } except( RtlpHeapExceptionFilter(GetExceptionCode()) ) {
  3230. Status = GetExceptionCode();
  3231. }
  3232. } finally {
  3233. //
  3234. // Unlock the heap
  3235. //
  3236. if (LockAcquired) {
  3237. RtlReleaseLockRoutine( Heap->LockVariable );
  3238. }
  3239. }
  3240. return Status;
  3241. }
  3242. //
  3243. // Local Support Routine
  3244. //
  3245. PHEAP_UNCOMMMTTED_RANGE
  3246. RtlpCreateUnCommittedRange (
  3247. IN PHEAP_SEGMENT Segment
  3248. )
  3249. /*++
  3250. Routine Description:
  3251. This routine add a new uncommitted range structure to the specified heap
  3252. segment. This routine works by essentially doing a pop of the stack of
  3253. unused uncommitted range structures located off the heap structure. If
  3254. the stack is empty then we'll create some more before doing the pop.
  3255. Arguments:
  3256. Segment - Supplies the heap segment being modified
  3257. Return Value:
  3258. PHEAP_UNCOMMITTED_RANGE - returns a pointer to the newly created
  3259. uncommitted range structure
  3260. --*/
  3261. {
  3262. NTSTATUS Status;
  3263. PVOID FirstEntry, LastEntry;
  3264. PHEAP_UNCOMMMTTED_RANGE UnCommittedRange, *pp;
  3265. SIZE_T ReserveSize, CommitSize;
  3266. PHEAP_UCR_SEGMENT UCRSegment;
  3267. RTL_PAGED_CODE();
  3268. //
  3269. // Get a pointer to the unused uncommitted range structures for
  3270. // the specified heap
  3271. //
  3272. pp = &Segment->Heap->UnusedUnCommittedRanges;
  3273. //
  3274. // If the list is null then we need to allocate some more to
  3275. // put on the list
  3276. //
  3277. if (*pp == NULL) {
  3278. //
  3279. // Get the next uncommitted range segment from the heap
  3280. //
  3281. UCRSegment = Segment->Heap->UCRSegments;
  3282. //
  3283. // If there are no more uncommitted range segments or
  3284. // the segments commited and reserved sizes are equal (meaning
  3285. // it's all used up) then we need to allocate another uncommitted
  3286. // range segment
  3287. //
  3288. if ((UCRSegment == NULL) ||
  3289. (UCRSegment->CommittedSize == UCRSegment->ReservedSize)) {
  3290. //
  3291. // We'll reserve 16 pages of memory and commit at this
  3292. // time one page of it.
  3293. //
  3294. ReserveSize = PAGE_SIZE * 16;
  3295. UCRSegment = NULL;
  3296. Status = ZwAllocateVirtualMemory( NtCurrentProcess(),
  3297. &UCRSegment,
  3298. 0,
  3299. &ReserveSize,
  3300. MEM_RESERVE,
  3301. PAGE_READWRITE );
  3302. if (!NT_SUCCESS( Status )) {
  3303. return NULL;
  3304. }
  3305. CommitSize = PAGE_SIZE;
  3306. Status = ZwAllocateVirtualMemory( NtCurrentProcess(),
  3307. &UCRSegment,
  3308. 0,
  3309. &CommitSize,
  3310. MEM_COMMIT,
  3311. PAGE_READWRITE );
  3312. if (!NT_SUCCESS( Status )) {
  3313. RtlpHeapFreeVirtualMemory( NtCurrentProcess(),
  3314. &UCRSegment,
  3315. &ReserveSize,
  3316. MEM_RELEASE );
  3317. return NULL;
  3318. }
  3319. //
  3320. // Add this new segment to the front of the UCR segments
  3321. //
  3322. UCRSegment->Next = Segment->Heap->UCRSegments;
  3323. Segment->Heap->UCRSegments = UCRSegment;
  3324. //
  3325. // Set the segments commit and reserve size
  3326. //
  3327. UCRSegment->ReservedSize = ReserveSize;
  3328. UCRSegment->CommittedSize = CommitSize;
  3329. //
  3330. // Point to the first free spot in the segment
  3331. //
  3332. FirstEntry = (PCHAR)(UCRSegment + 1);
  3333. } else {
  3334. //
  3335. // We have an existing UCR segment with available space
  3336. // So now try and commit another PAGE_SIZE bytes. When we are done
  3337. // FirstEntry will point to the newly committed space
  3338. //
  3339. CommitSize = PAGE_SIZE;
  3340. FirstEntry = (PCHAR)UCRSegment + UCRSegment->CommittedSize;
  3341. Status = ZwAllocateVirtualMemory( NtCurrentProcess(),
  3342. &FirstEntry,
  3343. 0,
  3344. &CommitSize,
  3345. MEM_COMMIT,
  3346. PAGE_READWRITE );
  3347. if (!NT_SUCCESS( Status )) {
  3348. return NULL;
  3349. }
  3350. //
  3351. // And update the amount committed in the segment
  3352. //
  3353. UCRSegment->CommittedSize += CommitSize;
  3354. }
  3355. //
  3356. // At this point UCR segment exists and First Entry points to the
  3357. // start of the available committed space. We'll make Last Entry
  3358. // point to the end of the committed space
  3359. //
  3360. LastEntry = (PCHAR)UCRSegment + UCRSegment->CommittedSize;
  3361. //
  3362. // Now the task is to push all of this new space unto the
  3363. // unused uncommitted range list off the heap, then we can
  3364. // do a regular pop
  3365. //
  3366. UnCommittedRange = (PHEAP_UNCOMMMTTED_RANGE)FirstEntry;
  3367. pp = &Segment->Heap->UnusedUnCommittedRanges;
  3368. while ((PCHAR)UnCommittedRange < (PCHAR)LastEntry) {
  3369. *pp = UnCommittedRange;
  3370. pp = &UnCommittedRange->Next;
  3371. UnCommittedRange += 1;
  3372. }
  3373. //
  3374. // Null terminate the list
  3375. //
  3376. *pp = NULL;
  3377. //
  3378. // And have Pp point the new top of the list
  3379. //
  3380. pp = &Segment->Heap->UnusedUnCommittedRanges;
  3381. }
  3382. //
  3383. // At this point the Pp points to a non empty list of unused uncommitted
  3384. // range structures. So we pop the list and return the top to our caller
  3385. //
  3386. UnCommittedRange = *pp;
  3387. *pp = UnCommittedRange->Next;
  3388. return UnCommittedRange;
  3389. }
  3390. //
  3391. // Local Support Routine
  3392. //
  3393. VOID
  3394. RtlpDestroyUnCommittedRange (
  3395. IN PHEAP_SEGMENT Segment,
  3396. IN PHEAP_UNCOMMMTTED_RANGE UnCommittedRange
  3397. )
  3398. /*++
  3399. Routine Description:
  3400. This routine returns an uncommitted range structure back to the unused
  3401. uncommitted range list
  3402. Arguments:
  3403. Segment - Supplies any segment in the heap being modified. Most likely but
  3404. not necessarily the segment containing the uncommitted range structure
  3405. UnCommittedRange - Supplies a pointer to the uncommitted range structure
  3406. being decommissioned.
  3407. Return Value:
  3408. None.
  3409. --*/
  3410. {
  3411. RTL_PAGED_CODE();
  3412. //
  3413. // This routine simply does a "push" of the uncommitted range structure
  3414. // onto the heap's stack of unused uncommitted ranges
  3415. //
  3416. UnCommittedRange->Next = Segment->Heap->UnusedUnCommittedRanges;
  3417. Segment->Heap->UnusedUnCommittedRanges = UnCommittedRange;
  3418. //
  3419. // For safety sake we'll also zero out the fields in the decommissioned
  3420. // structure
  3421. //
  3422. UnCommittedRange->Address = 0;
  3423. UnCommittedRange->Size = 0;
  3424. //
  3425. // And return to our caller
  3426. //
  3427. return;
  3428. }
  3429. //
  3430. // Local Support Routine
  3431. //
  3432. VOID
  3433. RtlpInsertUnCommittedPages (
  3434. IN PHEAP_SEGMENT Segment,
  3435. IN ULONG_PTR Address,
  3436. IN SIZE_T Size
  3437. )
  3438. /*++
  3439. Routine Description:
  3440. This routine adds the specified range to the list of uncommitted pages
  3441. in the segment. When done the information will hang off the segments
  3442. uncommitted ranges list.
  3443. Arguments:
  3444. Segment - Supplies a segment whose uncommitted range is being modified
  3445. Address - Supplies the base (start) address for the uncommitted range
  3446. Size - Supplies the size, in bytes, of the uncommitted range
  3447. Return Value:
  3448. None.
  3449. --*/
  3450. {
  3451. PHEAP_UNCOMMMTTED_RANGE UnCommittedRange, *pp;
  3452. RTL_PAGED_CODE();
  3453. //
  3454. // Get a pointer to the front of the segments uncommitted range list
  3455. // The list is sorted by ascending address
  3456. //
  3457. pp = &Segment->UnCommittedRanges;
  3458. //
  3459. // While we haven't reached the end of the list we'll zoom through
  3460. // trying to find a fit
  3461. //
  3462. while (UnCommittedRange = *pp) {
  3463. //
  3464. // If address we want is less than what we're pointing at then
  3465. // we've found where this new entry goes
  3466. //
  3467. if (UnCommittedRange->Address > Address) {
  3468. //
  3469. // If the new block matches right up to the existing block
  3470. // then we can simply backup the existing block and add
  3471. // to its size
  3472. //
  3473. if ((Address + Size) == UnCommittedRange->Address) {
  3474. UnCommittedRange->Address = Address;
  3475. UnCommittedRange->Size += Size;
  3476. //
  3477. // Check if we need to update our notion of what the
  3478. // largest uncommitted range is
  3479. //
  3480. if (UnCommittedRange->Size > Segment->LargestUnCommittedRange) {
  3481. Segment->LargestUnCommittedRange = UnCommittedRange->Size;
  3482. }
  3483. //
  3484. // And return to our caller
  3485. //
  3486. return;
  3487. }
  3488. //
  3489. // Pp is the address of the block right before us, and *Pp is the
  3490. // address of the block right after us. So now fall out to where
  3491. // the insertion takes place.
  3492. //
  3493. break;
  3494. //
  3495. // Otherwise if this existing block stops right where the new block
  3496. // starts then we get to modify this entry.
  3497. //
  3498. } else if ((UnCommittedRange->Address + UnCommittedRange->Size) == Address) {
  3499. //
  3500. // Remember the starting address and compute the new larger size
  3501. //
  3502. Address = UnCommittedRange->Address;
  3503. Size += UnCommittedRange->Size;
  3504. //
  3505. // Remove this entry from the list and then return it to the
  3506. // unused uncommitted list
  3507. //
  3508. *pp = UnCommittedRange->Next;
  3509. RtlpDestroyUnCommittedRange( Segment, UnCommittedRange );
  3510. //
  3511. // Modify the segment counters and largest size state. The next
  3512. // time through the loop should hit the first case above where
  3513. // we'll either merge with a list following us or add a new
  3514. // entry
  3515. //
  3516. Segment->NumberOfUnCommittedRanges -= 1;
  3517. if (Size > Segment->LargestUnCommittedRange) {
  3518. Segment->LargestUnCommittedRange = Size;
  3519. }
  3520. //
  3521. // Otherwise we'll continue search down the list
  3522. //
  3523. } else {
  3524. pp = &UnCommittedRange->Next;
  3525. }
  3526. }
  3527. //
  3528. // If we reach this point that means we've either fallen off the end of the
  3529. // list, or the list is empty, or we've located the spot where a new uncommitted
  3530. // range structure belongs. So allocate a new uncommitted range structure,
  3531. // and make sure we got one.
  3532. //
  3533. // Pp is the address of the block right before us and *Pp is the address of the
  3534. // block right after us
  3535. //
  3536. UnCommittedRange = RtlpCreateUnCommittedRange( Segment );
  3537. if (UnCommittedRange == NULL) {
  3538. HeapDebugPrint(( "Abandoning uncommitted range (%p for %x)\n", Address, Size ));
  3539. // HeapDebugBreak( NULL );
  3540. return;
  3541. }
  3542. //
  3543. // Fill in the new uncommitted range structure
  3544. //
  3545. UnCommittedRange->Address = Address;
  3546. UnCommittedRange->Size = Size;
  3547. //
  3548. // Insert it in the list for the segment
  3549. //
  3550. UnCommittedRange->Next = *pp;
  3551. *pp = UnCommittedRange;
  3552. //
  3553. // Update the segment counters and notion of the largest uncommitted range
  3554. //
  3555. Segment->NumberOfUnCommittedRanges += 1;
  3556. if (Size >= Segment->LargestUnCommittedRange) {
  3557. Segment->LargestUnCommittedRange = Size;
  3558. }
  3559. //
  3560. // And return to our caller
  3561. //
  3562. return;
  3563. }
  3564. //
  3565. // Declared in heappriv.h
  3566. //
  3567. PHEAP_FREE_ENTRY
  3568. RtlpFindAndCommitPages (
  3569. IN PHEAP Heap,
  3570. IN PHEAP_SEGMENT Segment,
  3571. IN OUT PSIZE_T Size,
  3572. IN PVOID AddressWanted OPTIONAL
  3573. )
  3574. /*++
  3575. Routine Description:
  3576. This function searches the supplied segment for an uncommitted range that
  3577. satisfies the specified size. It commits the range and returns a heap entry
  3578. for the range.
  3579. Arguments:
  3580. Heap - Supplies the heap being manipulated
  3581. Segment - Supplies the segment being searched
  3582. Size - Supplies the size of what we need to look for, on return it contains
  3583. the size of what we're just found and committed.
  3584. AddressWanted - Optionally gives an address where we would like the pages
  3585. based. If supplied the entry must start at this address
  3586. Return Value:
  3587. PHEAP_FREE_ENTRY - Returns a pointer to the newly committed range that
  3588. satisfies the given size requirement, or NULL if we could not find
  3589. something large enough and/or based at the address wanted.
  3590. --*/
  3591. {
  3592. NTSTATUS Status;
  3593. PHEAP_ENTRY FirstEntry, LastEntry, PreviousLastEntry;
  3594. PHEAP_UNCOMMMTTED_RANGE PreviousUnCommittedRange, UnCommittedRange, *pp;
  3595. ULONG_PTR Address;
  3596. SIZE_T Length;
  3597. RTL_PAGED_CODE();
  3598. //
  3599. // What the outer loop does is cycle through the uncommited ranges
  3600. // stored in in the specified segment
  3601. //
  3602. PreviousUnCommittedRange = NULL;
  3603. pp = &Segment->UnCommittedRanges;
  3604. while (UnCommittedRange = *pp) {
  3605. //
  3606. // Check for the best of worlds, where the size of this current
  3607. // uncommitted range satisfies our size request and either the user
  3608. // didn't specify an address or the address match
  3609. //
  3610. if ((UnCommittedRange->Size >= *Size) &&
  3611. (!ARGUMENT_PRESENT( AddressWanted ) || (UnCommittedRange->Address == (ULONG_PTR)AddressWanted ))) {
  3612. //
  3613. // Calculate an address
  3614. //
  3615. Address = UnCommittedRange->Address;
  3616. //
  3617. // Commit the memory. If the heap doesn't have a commit
  3618. // routine then use the default mm supplied routine.
  3619. //
  3620. if (Heap->CommitRoutine != NULL) {
  3621. Status = (Heap->CommitRoutine)( Heap,
  3622. (PVOID *)&Address,
  3623. Size );
  3624. } else {
  3625. #ifndef NTOS_KERNEL_RUNTIME
  3626. //
  3627. // If we have a small uncommited range left, Adjust the size to
  3628. // take that block too
  3629. //
  3630. if (!(RtlpDisableHeapLookaside & HEAP_COMPAT_DISABLE_LARGECACHE)
  3631. &&
  3632. ( (UnCommittedRange->Size - (*Size)) <= (((SIZE_T)Heap->DeCommitFreeBlockThreshold) << HEAP_GRANULARITY_SHIFT) )
  3633. &&
  3634. (UnCommittedRange->Size < (((SIZE_T)Heap->VirtualMemoryThreshold) << HEAP_GRANULARITY_SHIFT)) ) {
  3635. *Size = UnCommittedRange->Size;
  3636. }
  3637. #endif // NTOS_KERNEL_RUNTIME
  3638. #ifdef _WIN64
  3639. //
  3640. // This is for Wow64 processes. This is needed to return PAGE_SIZE aligned
  3641. // aligned sizes.
  3642. //
  3643. *Size = ROUND_UP_TO_POWER2 (*Size, PAGE_SIZE);
  3644. #endif
  3645. Status = ZwAllocateVirtualMemory( NtCurrentProcess(),
  3646. (PVOID *)&Address,
  3647. 0,
  3648. Size,
  3649. MEM_COMMIT,
  3650. HEAP_PROTECTION );
  3651. }
  3652. if (!NT_SUCCESS( Status )) {
  3653. return NULL;
  3654. }
  3655. //
  3656. // At this point we have some committed memory, with Address and Size
  3657. // giving us the necessary details
  3658. //
  3659. // Update the number of uncommitted pages in the segment and if necessary
  3660. // mark down the largest uncommitted range
  3661. //
  3662. Segment->NumberOfUnCommittedPages -= (ULONG) (*Size / PAGE_SIZE);
  3663. if (Segment->LargestUnCommittedRange == UnCommittedRange->Size) {
  3664. Segment->LargestUnCommittedRange = 0;
  3665. }
  3666. //
  3667. // First entry is the start of the newly committed range
  3668. //
  3669. FirstEntry = (PHEAP_ENTRY)Address;
  3670. //
  3671. // We want last entry to point to the last real entry before
  3672. // this newly committed spot. To do this we start by
  3673. // setting last entry to either the first entry for the
  3674. // segment or (if we can do better), to right after the last
  3675. // uncommitted range we examined. Either way it points to
  3676. // some committed range
  3677. //
  3678. if ((Segment->LastEntryInSegment->Flags & HEAP_ENTRY_LAST_ENTRY) &&
  3679. (ULONG_PTR)(Segment->LastEntryInSegment + Segment->LastEntryInSegment->Size) == UnCommittedRange->Address) {
  3680. LastEntry = Segment->LastEntryInSegment;
  3681. } else {
  3682. if (PreviousUnCommittedRange == NULL) {
  3683. LastEntry = Segment->FirstEntry;
  3684. } else {
  3685. LastEntry = (PHEAP_ENTRY)(PreviousUnCommittedRange->Address +
  3686. PreviousUnCommittedRange->Size);
  3687. }
  3688. //
  3689. // Now we zoom through the entries until we find the one
  3690. // marked last
  3691. //
  3692. while (!(LastEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) {
  3693. PreviousLastEntry = LastEntry;
  3694. LastEntry += LastEntry->Size;
  3695. if (((PCHAR)LastEntry >= (PCHAR)Segment->LastValidEntry) || (LastEntry->Size == 0)) {
  3696. //
  3697. // Check for the situation where the last entry in the
  3698. // segment isn't marked as a last entry but does put
  3699. // us right where the have a new committed range
  3700. //
  3701. if (LastEntry == (PHEAP_ENTRY)Address) {
  3702. LastEntry = PreviousLastEntry;
  3703. break;
  3704. }
  3705. HeapDebugPrint(( "Heap missing last entry in committed range near %p\n", PreviousLastEntry ));
  3706. HeapDebugBreak( PreviousLastEntry );
  3707. return NULL;
  3708. }
  3709. }
  3710. }
  3711. //
  3712. // Turn off the last bit on this entry because what's following
  3713. // is no longer uncommitted
  3714. //
  3715. LastEntry->Flags &= ~HEAP_ENTRY_LAST_ENTRY;
  3716. //
  3717. // Shrink the uncommited range by the size we've committed
  3718. //
  3719. UnCommittedRange->Address += *Size;
  3720. UnCommittedRange->Size -= *Size;
  3721. //
  3722. // Now if the size is zero then we've committed everything that there
  3723. // was in the range. Otherwise make sure the first entry of what
  3724. // we've just committed knows that an uncommitted range follows.
  3725. //
  3726. if (UnCommittedRange->Size == 0) {
  3727. //
  3728. // This uncommitted range is about to vanish. Base on if the
  3729. // range is the last one in the segment then we know how to
  3730. // mark the committed range as being last or not.
  3731. //
  3732. if (UnCommittedRange->Address == (ULONG_PTR)Segment->LastValidEntry) {
  3733. FirstEntry->Flags = HEAP_ENTRY_LAST_ENTRY;
  3734. Segment->LastEntryInSegment = FirstEntry;
  3735. } else {
  3736. FirstEntry->Flags = 0;
  3737. Segment->LastEntryInSegment = Segment->FirstEntry;
  3738. }
  3739. //
  3740. // Remove this zero sized range from the uncommitted range
  3741. // list, and update the segment counters
  3742. //
  3743. *pp = UnCommittedRange->Next;
  3744. RtlpDestroyUnCommittedRange( Segment, UnCommittedRange );
  3745. Segment->NumberOfUnCommittedRanges -= 1;
  3746. } else {
  3747. //
  3748. // Otherwise the range is not empty so we know what we committed
  3749. // is immediately followed by an uncommitted range
  3750. //
  3751. FirstEntry->Flags = HEAP_ENTRY_LAST_ENTRY;
  3752. Segment->LastEntryInSegment = FirstEntry;
  3753. }
  3754. //
  3755. // Update the fields in the first entry, and optional
  3756. // following entry.
  3757. //
  3758. FirstEntry->SegmentIndex = LastEntry->SegmentIndex;
  3759. FirstEntry->Size = (USHORT)(*Size >> HEAP_GRANULARITY_SHIFT);
  3760. FirstEntry->PreviousSize = LastEntry->Size;
  3761. if (!(FirstEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) {
  3762. (FirstEntry + FirstEntry->Size)->PreviousSize = FirstEntry->Size;
  3763. }
  3764. //
  3765. // Now if we adjusted the largest uncommitted range to zero then
  3766. // we need to go back and find the largest uncommitted range
  3767. // To do that we simply zoom down the uncommitted range list
  3768. // remembering the largest one
  3769. //
  3770. if (Segment->LargestUnCommittedRange == 0) {
  3771. UnCommittedRange = Segment->UnCommittedRanges;
  3772. while (UnCommittedRange != NULL) {
  3773. if (UnCommittedRange->Size >= Segment->LargestUnCommittedRange) {
  3774. Segment->LargestUnCommittedRange = UnCommittedRange->Size;
  3775. }
  3776. UnCommittedRange = UnCommittedRange->Next;
  3777. }
  3778. }
  3779. #ifndef NTOS_KERNEL_RUNTIME
  3780. if(IsHeapLogging( Heap ) ) {
  3781. PTHREAD_LOCAL_DATA pThreadLocalData = NULL;
  3782. PPERFINFO_TRACE_HEADER pEventHeader = NULL;
  3783. USHORT ReqSize = sizeof(HEAP_EVENT_EXPANSION) + FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data);
  3784. AcquireBufferLocation(&pEventHeader, &pThreadLocalData,&ReqSize);
  3785. if(pEventHeader && pThreadLocalData) {
  3786. SIZE_T UCBytes = 0;
  3787. PHEAP_EVENT_EXPANSION pHeapEvent = (PHEAP_EVENT_EXPANSION)( (SIZE_T)pEventHeader
  3788. + (SIZE_T)FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data ));
  3789. pEventHeader->Packet.Size = (USHORT) ReqSize;
  3790. pEventHeader->Packet.HookId = PERFINFO_LOG_TYPE_HEAP_EXTEND;
  3791. pHeapEvent->HeapHandle = (PVOID)Heap;
  3792. pHeapEvent->CommittedSize = *Size;
  3793. pHeapEvent->Address = (PVOID)FirstEntry;
  3794. pHeapEvent->FreeSpace = Heap->TotalFreeSize;
  3795. pHeapEvent->ReservedSpace = 0;
  3796. pHeapEvent->CommittedSpace = 0;
  3797. pHeapEvent->NoOfUCRs = 0;
  3798. UCBytes = GetUCBytes(Heap, &pHeapEvent->ReservedSpace, &pHeapEvent->NoOfUCRs);
  3799. pHeapEvent->ReservedSpace *= PAGE_SIZE;
  3800. pHeapEvent->CommittedSpace = pHeapEvent->ReservedSpace - UCBytes;
  3801. ReleaseBufferLocation(pThreadLocalData);
  3802. }
  3803. }
  3804. #endif //NTOS_KERNEL_RUNTIME
  3805. //
  3806. // And return the heap entry to our caller
  3807. //
  3808. return (PHEAP_FREE_ENTRY)FirstEntry;
  3809. } else {
  3810. //
  3811. // Otherwise the current uncommited range is too small or
  3812. // doesn't have the right address so go to the next uncommitted
  3813. // range entry
  3814. //
  3815. PreviousUnCommittedRange = UnCommittedRange;
  3816. pp = &UnCommittedRange->Next;
  3817. }
  3818. }
  3819. //
  3820. // At this point we did not find an uncommitted range entry that satisfied
  3821. // our requirements either because of size and/or address. So return null
  3822. // to tell the user we didn't find anything.
  3823. //
  3824. return NULL;
  3825. }
  3826. //
  3827. // Declared in heappriv.h
  3828. //
  3829. BOOLEAN
  3830. RtlpInitializeHeapSegment (
  3831. IN PHEAP Heap,
  3832. IN PHEAP_SEGMENT Segment,
  3833. IN UCHAR SegmentIndex,
  3834. IN ULONG Flags,
  3835. IN PVOID BaseAddress,
  3836. IN PVOID UnCommittedAddress,
  3837. IN PVOID CommitLimitAddress
  3838. )
  3839. /*++
  3840. Routine Description:
  3841. This routines initializes the internal structures for a heap segment.
  3842. The caller supplies the heap and the memory for the segment being
  3843. initialized
  3844. Arguments:
  3845. Heap - Supplies the address of the heap owning this segment
  3846. Segment - Supplies a pointer to the segment being initialized
  3847. SegmentIndex - Supplies the segement index within the heap that this
  3848. new segment is being assigned
  3849. Flags - Supplies flags controlling the initialization of the segment
  3850. Valid flags are:
  3851. HEAP_SEGMENT_USER_ALLOCATED
  3852. BaseAddress - Supplies the base address for the segment
  3853. UnCommittedAddress - Supplies the address where the uncommited range starts
  3854. CommitLimitAddress - Supplies the top address available to the segment
  3855. Return Value:
  3856. BOOLEAN - TRUE if the initialization is successful and FALSE otherwise
  3857. --*/
  3858. {
  3859. NTSTATUS Status;
  3860. PHEAP_ENTRY FirstEntry;
  3861. USHORT PreviousSize, Size;
  3862. ULONG NumberOfPages;
  3863. ULONG NumberOfCommittedPages;
  3864. ULONG NumberOfUnCommittedPages;
  3865. SIZE_T CommitSize;
  3866. ULONG GlobalFlag = RtlGetNtGlobalFlags();
  3867. RTL_PAGED_CODE();
  3868. //
  3869. // Compute the total number of pages possible in this segment
  3870. //
  3871. NumberOfPages = (ULONG) (((PCHAR)CommitLimitAddress - (PCHAR)BaseAddress) / PAGE_SIZE);
  3872. //
  3873. // First entry points to the first possible segment entry after
  3874. // the segment header
  3875. //
  3876. FirstEntry = (PHEAP_ENTRY)ROUND_UP_TO_POWER2( Segment + 1,
  3877. HEAP_GRANULARITY );
  3878. //
  3879. // Now if the heap is equal to the base address for the segment which
  3880. // it the case for the segment zero then the previous size is the
  3881. // heap header. Otherwise there isn't a previous entry
  3882. //
  3883. if ((PVOID)Heap == BaseAddress) {
  3884. PreviousSize = Heap->Entry.Size;
  3885. } else {
  3886. PreviousSize = 0;
  3887. }
  3888. //
  3889. // Compute the index size of the segment header
  3890. //
  3891. Size = (USHORT)(((PCHAR)FirstEntry - (PCHAR)Segment) >> HEAP_GRANULARITY_SHIFT);
  3892. //
  3893. // If the first available heap entry is not committed and
  3894. // it is beyond the heap limit then we cannot initialize
  3895. //
  3896. if ((PCHAR)(FirstEntry + 1) >= (PCHAR)UnCommittedAddress) {
  3897. if ((PCHAR)(FirstEntry + 1) >= (PCHAR)CommitLimitAddress) {
  3898. return FALSE;
  3899. }
  3900. //
  3901. // Enough of the segment has not been committed so we
  3902. // will commit enough now to handle the first entry
  3903. //
  3904. CommitSize = (PCHAR)(FirstEntry + 1) - (PCHAR)UnCommittedAddress;
  3905. #ifdef _WIN64
  3906. //
  3907. // This is for Wow64 processes. This is needed to return PAGE_SIZE aligned
  3908. // aligned sizes.
  3909. //
  3910. CommitSize = ROUND_UP_TO_POWER2 (CommitSize, PAGE_SIZE);
  3911. #endif
  3912. Status = ZwAllocateVirtualMemory( NtCurrentProcess(),
  3913. (PVOID *)&UnCommittedAddress,
  3914. 0,
  3915. &CommitSize,
  3916. MEM_COMMIT,
  3917. HEAP_PROTECTION );
  3918. if (!NT_SUCCESS( Status )) {
  3919. return FALSE;
  3920. }
  3921. //
  3922. // Because we had to commit some memory we need to adjust
  3923. // the uncommited address
  3924. //
  3925. UnCommittedAddress = (PVOID)((PCHAR)UnCommittedAddress + CommitSize);
  3926. }
  3927. //
  3928. // At this point we know there is enough memory committed to handle the
  3929. // segment header and one heap entry
  3930. //
  3931. // Now compute the number of uncommited pages and the number of committed
  3932. // pages
  3933. //
  3934. NumberOfUnCommittedPages = (ULONG)(((PCHAR)CommitLimitAddress - (PCHAR)UnCommittedAddress) / PAGE_SIZE);
  3935. NumberOfCommittedPages = NumberOfPages - NumberOfUnCommittedPages;
  3936. //
  3937. // Initialize the heap segment heap entry. We
  3938. // calculated earlier if there was a previous entry
  3939. //
  3940. Segment->Entry.PreviousSize = PreviousSize;
  3941. Segment->Entry.Size = Size;
  3942. Segment->Entry.Flags = HEAP_ENTRY_BUSY;
  3943. Segment->Entry.SegmentIndex = SegmentIndex;
  3944. #if !NTOS_KERNEL_RUNTIME
  3945. //
  3946. // In the non kernel case see if we need to capture the callers stack
  3947. // backtrace
  3948. //
  3949. if (GlobalFlag & FLG_USER_STACK_TRACE_DB) {
  3950. Segment->AllocatorBackTraceIndex = (USHORT)RtlLogStackBackTrace();
  3951. }
  3952. #endif // !NTOS_KERNEL_RUNTIME
  3953. //
  3954. // Now initializes the heap segment
  3955. //
  3956. Segment->Signature = HEAP_SEGMENT_SIGNATURE;
  3957. Segment->Flags = Flags;
  3958. Segment->Heap = Heap;
  3959. Segment->BaseAddress = BaseAddress;
  3960. Segment->FirstEntry = FirstEntry;
  3961. Segment->LastValidEntry = (PHEAP_ENTRY)((PCHAR)BaseAddress + (NumberOfPages * PAGE_SIZE));
  3962. Segment->NumberOfPages = NumberOfPages;
  3963. Segment->NumberOfUnCommittedPages = NumberOfUnCommittedPages;
  3964. //
  3965. // If there are uncommitted pages then we need to insert them
  3966. // into the uncommitted ranges list
  3967. //
  3968. if (NumberOfUnCommittedPages) {
  3969. RtlpInsertUnCommittedPages( Segment,
  3970. (ULONG_PTR)UnCommittedAddress,
  3971. NumberOfUnCommittedPages * PAGE_SIZE );
  3972. //
  3973. // Test if we successfully created the uncommitted range within the segment
  3974. //
  3975. if (Segment->NumberOfUnCommittedRanges == 0) {
  3976. HeapDebugPrint(( "Failed to initialize a new segment (%p)\n", Segment ));
  3977. //
  3978. // We don't need to decommitt the extra memory commited before because
  3979. // the caller for this function will do this for the entire reserved size
  3980. //
  3981. return FALSE;
  3982. }
  3983. }
  3984. //
  3985. // Have the containing heap point to this segment via the specified index
  3986. //
  3987. Heap->Segments[ SegmentIndex ] = Segment;
  3988. if (Heap->LastSegmentIndex < SegmentIndex) {
  3989. Heap->LastSegmentIndex = SegmentIndex;
  3990. }
  3991. //
  3992. // Initialize the first free heap entry after the heap segment header and
  3993. // put it in the free list. This first entry will be for whatever is left
  3994. // of the committed range
  3995. //
  3996. PreviousSize = Segment->Entry.Size;
  3997. FirstEntry->Flags = HEAP_ENTRY_LAST_ENTRY;
  3998. Segment->LastEntryInSegment = FirstEntry;
  3999. FirstEntry->PreviousSize = PreviousSize;
  4000. FirstEntry->SegmentIndex = SegmentIndex;
  4001. RtlpInsertFreeBlock( Heap,
  4002. (PHEAP_FREE_ENTRY)FirstEntry,
  4003. (PHEAP_ENTRY)UnCommittedAddress - FirstEntry);
  4004. //
  4005. // And return to our caller
  4006. //
  4007. return TRUE;
  4008. }
  4009. //
  4010. // Local Support Routine
  4011. //
  4012. NTSTATUS
  4013. RtlpDestroyHeapSegment (
  4014. IN PHEAP_SEGMENT Segment
  4015. )
  4016. /*++
  4017. Routine Description:
  4018. This routine removes an existing heap segment. After the call it
  4019. is as if the segment never existed
  4020. Arguments:
  4021. Segment - Supplies a pointer to the heap segment being destroyed
  4022. Return Value:
  4023. NTSTATUS - An appropriate status value
  4024. --*/
  4025. {
  4026. PVOID BaseAddress;
  4027. SIZE_T BytesToFree;
  4028. RTL_PAGED_CODE();
  4029. //
  4030. // We actually only have work to do if the segment is not
  4031. // user allocated. If the segment is user allocated then
  4032. // we'll assume knows how to get rid of the memory
  4033. //
  4034. if (!(Segment->Flags & HEAP_SEGMENT_USER_ALLOCATED)) {
  4035. BaseAddress = Segment->BaseAddress;
  4036. BytesToFree = 0;
  4037. //
  4038. // Free all the virtual memory for the segment and return
  4039. // to our caller.
  4040. //
  4041. return RtlpHeapFreeVirtualMemory( NtCurrentProcess(),
  4042. (PVOID *)&BaseAddress,
  4043. &BytesToFree,
  4044. MEM_RELEASE );
  4045. } else {
  4046. //
  4047. // User allocated segments are a noop
  4048. //
  4049. return STATUS_SUCCESS;
  4050. }
  4051. }
  4052. //
  4053. // Local Support Routine
  4054. //
  4055. PHEAP_FREE_ENTRY
  4056. RtlpExtendHeap (
  4057. IN PHEAP Heap,
  4058. IN SIZE_T AllocationSize
  4059. )
  4060. /*++
  4061. Routine Description:
  4062. This routine is used to extend the amount of committed memory in a heap
  4063. Arguments:
  4064. Heap - Supplies the heap being modified
  4065. AllocationSize - Supplies the size, in bytes, that we need to extend the
  4066. heap
  4067. Return Value:
  4068. PHEAP_FREE_ENTRY - Returns a pointer to the newly created heap entry
  4069. of the specified size, or NULL if we weren't able to extend the heap
  4070. --*/
  4071. {
  4072. NTSTATUS Status;
  4073. PHEAP_SEGMENT Segment;
  4074. PHEAP_FREE_ENTRY FreeBlock;
  4075. UCHAR SegmentIndex, EmptySegmentIndex;
  4076. ULONG NumberOfPages;
  4077. SIZE_T CommitSize;
  4078. SIZE_T ReserveSize;
  4079. SIZE_T FreeSize;
  4080. RTL_PAGED_CODE();
  4081. #ifndef NTOS_KERNEL_RUNTIME
  4082. if (Heap->LargeBlocksIndex) {
  4083. PHEAP_INDEX HeapIndex = (PHEAP_INDEX)Heap->LargeBlocksIndex;
  4084. if (HeapIndex->LargeBlocksCacheMaxDepth < RtlpLargeListDepthLimit) {
  4085. HeapIndex->LargeBlocksCacheMaxDepth += 1;
  4086. }
  4087. HeapIndex->CacheStats.Committs += 1;
  4088. }
  4089. #endif // NTOS_KERNEL_RUNTIME
  4090. //
  4091. // Compute the number of pages need to hold this extension
  4092. // And then compute the real free, still in bytes, based on
  4093. // the page count
  4094. //
  4095. NumberOfPages = (ULONG) ((AllocationSize + PAGE_SIZE - 1) / PAGE_SIZE);
  4096. FreeSize = NumberOfPages * PAGE_SIZE;
  4097. //
  4098. // For every segment we're either going to look for an existing
  4099. // heap segment that we can get some pages out of or we will
  4100. // identify a free heap segment index where we'll try and create a new
  4101. // segment
  4102. //
  4103. EmptySegmentIndex = (UCHAR)(Heap->LastSegmentIndex + 1);
  4104. for (SegmentIndex=0; SegmentIndex <= Heap->LastSegmentIndex; SegmentIndex++) {
  4105. #ifndef NTOS_KERNEL_RUNTIME
  4106. if ((RtlpGetLowFragHeap(Heap) != NULL)
  4107. &&
  4108. (AllocationSize > HEAP_LARGEST_LFH_BLOCK)) {
  4109. //
  4110. // Search backward for large blocks. This will group the
  4111. // large allocations into upper segments, and small allocations
  4112. // into lower index segments. It helps the fragmentation
  4113. //
  4114. Segment = Heap->Segments[ Heap->LastSegmentIndex - SegmentIndex ];
  4115. } else {
  4116. Segment = Heap->Segments[ SegmentIndex ];
  4117. }
  4118. #else // NTOS_KERNEL_RUNTIME
  4119. Segment = Heap->Segments[ SegmentIndex ];
  4120. #endif // NTOS_KERNEL_RUNTIME
  4121. //
  4122. // If the segment exists and number of uncommitted pages will
  4123. // satisfy our request and the largest uncommitted range will
  4124. // also satisfy our request then we'll try and segment
  4125. //
  4126. // Note that this second test seems unnecessary given that
  4127. // the largest uncommitted range is also being tested
  4128. //
  4129. if ((Segment) &&
  4130. (NumberOfPages <= Segment->NumberOfUnCommittedPages) &&
  4131. (FreeSize <= Segment->LargestUnCommittedRange)) {
  4132. //
  4133. // Looks like a good segment so try and commit the
  4134. // amount we need
  4135. //
  4136. FreeBlock = RtlpFindAndCommitPages( Heap,
  4137. Segment,
  4138. &FreeSize,
  4139. NULL );
  4140. //
  4141. // If we were successful the we will coalesce it with adjacent
  4142. // free blocks and put it in the free list then return the
  4143. // the free block
  4144. //
  4145. if (FreeBlock != NULL) {
  4146. //
  4147. // RtlpCoalesceFreeBlocks needs the free size in heap units.
  4148. // We'll shift with the granularity before calling the coalesce.
  4149. //
  4150. FreeSize = FreeSize >> HEAP_GRANULARITY_SHIFT;
  4151. FreeBlock = RtlpCoalesceFreeBlocks( Heap, FreeBlock, &FreeSize, FALSE );
  4152. RtlpInsertFreeBlock( Heap, FreeBlock, FreeSize );
  4153. return FreeBlock;
  4154. }
  4155. }
  4156. }
  4157. //
  4158. // At this point we weren't able to get the memory from an existing
  4159. // heap segment so now check if we found an unused segment index
  4160. // and if we're allowed to grow the heap.
  4161. //
  4162. if ((EmptySegmentIndex != HEAP_MAXIMUM_SEGMENTS) &&
  4163. (Heap->Flags & HEAP_GROWABLE)) {
  4164. Segment = NULL;
  4165. //
  4166. // Calculate a reserve size for the new segment, we might
  4167. // need to fudge it up if the allocation size we're going for
  4168. // right now is already beyond the default reserve size
  4169. //
  4170. if ((AllocationSize + PAGE_SIZE) > Heap->SegmentReserve) {
  4171. ReserveSize = AllocationSize + PAGE_SIZE;
  4172. } else {
  4173. ReserveSize = Heap->SegmentReserve;
  4174. }
  4175. #if defined(_WIN64)
  4176. //
  4177. // Limit the size of the segments to 2 GBytes
  4178. //
  4179. #define HEAP_MAX_SEGMENT_SIZE 0x80000000
  4180. if (ReserveSize >= HEAP_MAX_SEGMENT_SIZE) {
  4181. ReserveSize = HEAP_MAX_SEGMENT_SIZE;
  4182. }
  4183. #endif
  4184. //
  4185. // Try and reserve some vm
  4186. //
  4187. Status = ZwAllocateVirtualMemory( NtCurrentProcess(),
  4188. (PVOID *)&Segment,
  4189. 0,
  4190. &ReserveSize,
  4191. MEM_RESERVE,
  4192. HEAP_PROTECTION );
  4193. //
  4194. // If we get back status no memory then we should trim back the
  4195. // request to something reasonable and try again. We'll half
  4196. // the amount until we it either succeeds or until we reach
  4197. // the allocation size. In the latter case we are really
  4198. // out of memory.
  4199. //
  4200. while ((!NT_SUCCESS( Status )) && (ReserveSize != (AllocationSize + PAGE_SIZE))) {
  4201. ReserveSize = ReserveSize / 2;
  4202. if( ReserveSize < (AllocationSize + PAGE_SIZE) ) {
  4203. ReserveSize = (AllocationSize + PAGE_SIZE);
  4204. }
  4205. Status = ZwAllocateVirtualMemory( NtCurrentProcess(),
  4206. (PVOID *)&Segment,
  4207. 0,
  4208. &ReserveSize,
  4209. MEM_RESERVE,
  4210. HEAP_PROTECTION );
  4211. }
  4212. if (NT_SUCCESS( Status )) {
  4213. //
  4214. // Adjust the heap state information
  4215. //
  4216. Heap->SegmentReserve += ReserveSize;
  4217. //
  4218. // Compute the commit size to be either the default, or if
  4219. // that's not big enough then make it big enough to handle
  4220. // this current request
  4221. //
  4222. if ((AllocationSize + PAGE_SIZE) > Heap->SegmentCommit) {
  4223. CommitSize = AllocationSize + PAGE_SIZE;
  4224. } else {
  4225. CommitSize = Heap->SegmentCommit;
  4226. }
  4227. #ifdef _WIN64
  4228. //
  4229. // This is for Wow64 processes. This is needed to return PAGE_SIZE aligned
  4230. // aligned sizes.
  4231. //
  4232. CommitSize = ROUND_UP_TO_POWER2 (CommitSize, PAGE_SIZE);
  4233. #endif
  4234. //
  4235. // Try and commit the memory
  4236. //
  4237. Status = ZwAllocateVirtualMemory( NtCurrentProcess(),
  4238. (PVOID *)&Segment,
  4239. 0,
  4240. &CommitSize,
  4241. MEM_COMMIT,
  4242. HEAP_PROTECTION );
  4243. //
  4244. // If the commit is successful but we were not able to
  4245. // initialize the heap segment then still make the status
  4246. // and error value
  4247. //
  4248. if (NT_SUCCESS( Status ) &&
  4249. !RtlpInitializeHeapSegment( Heap,
  4250. Segment,
  4251. EmptySegmentIndex,
  4252. 0,
  4253. Segment,
  4254. (PCHAR)Segment + CommitSize,
  4255. (PCHAR)Segment + ReserveSize)) {
  4256. Status = STATUS_NO_MEMORY;
  4257. }
  4258. //
  4259. // If we've been successful so far then we're done and we
  4260. // can return the first entry in the segment to our caller
  4261. //
  4262. if (NT_SUCCESS(Status)) {
  4263. #ifndef NTOS_KERNEL_RUNTIME
  4264. if(IsHeapLogging( Heap ) ) {
  4265. PTHREAD_LOCAL_DATA pThreadLocalData = NULL;
  4266. PPERFINFO_TRACE_HEADER pEventHeader = NULL;
  4267. USHORT ReqSize = sizeof(HEAP_EVENT_EXPANSION) + FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data);
  4268. AcquireBufferLocation(&pEventHeader, &pThreadLocalData,&ReqSize);
  4269. if(pEventHeader && pThreadLocalData) {
  4270. SIZE_T UCBytes = 0;
  4271. PHEAP_EVENT_EXPANSION pHeapEvent = (PHEAP_EVENT_EXPANSION)( (SIZE_T)pEventHeader
  4272. + (SIZE_T)FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data ));
  4273. pEventHeader->Packet.Size = (USHORT) ReqSize;
  4274. pEventHeader->Packet.HookId = PERFINFO_LOG_TYPE_HEAP_EXTEND;
  4275. pHeapEvent->HeapHandle = (PVOID)Heap;
  4276. pHeapEvent->CommittedSize = CommitSize;
  4277. pHeapEvent->Address = (PVOID)Segment->FirstEntry;
  4278. pHeapEvent->FreeSpace = Heap->TotalFreeSize;
  4279. pHeapEvent->ReservedSpace = 0;
  4280. pHeapEvent->CommittedSpace = 0;
  4281. pHeapEvent->NoOfUCRs = 0;
  4282. UCBytes = GetUCBytes(Heap, &pHeapEvent->ReservedSpace, &pHeapEvent->NoOfUCRs);
  4283. pHeapEvent->ReservedSpace *= PAGE_SIZE;
  4284. pHeapEvent->CommittedSpace = pHeapEvent->ReservedSpace - UCBytes;
  4285. ReleaseBufferLocation(pThreadLocalData);
  4286. }
  4287. }
  4288. #endif //NTOS_KERNEL_RUNTIME
  4289. return (PHEAP_FREE_ENTRY)Segment->FirstEntry;
  4290. }
  4291. //
  4292. // Otherwise either the commit or heap segment initialization failed
  4293. // so we'll release the memory which will also decommit it if necessary
  4294. //
  4295. RtlpHeapFreeVirtualMemory( NtCurrentProcess(),
  4296. (PVOID *)&Segment,
  4297. &ReserveSize,
  4298. MEM_RELEASE );
  4299. }
  4300. }
  4301. #ifndef NTOS_KERNEL_RUNTIME
  4302. //
  4303. // In the non kernel case we disabled coalescing on free then what we'll
  4304. // do as a last resort is coalesce the heap and see if a block comes out
  4305. // that we can use
  4306. //
  4307. if (Heap->Flags & HEAP_DISABLE_COALESCE_ON_FREE) {
  4308. FreeBlock = RtlpCoalesceHeap( Heap );
  4309. if ((FreeBlock != NULL) && (FreeBlock->Size >= AllocationSize)) {
  4310. return FreeBlock;
  4311. }
  4312. }
  4313. #endif // NTOS_KERNEL_RUNTIME
  4314. //
  4315. // Either the heap cannot grow or we out of resources of some type
  4316. // so we're going to return null
  4317. //
  4318. return NULL;
  4319. }
  4320. //
  4321. // Declared in heappriv.h
  4322. //
  4323. PHEAP_FREE_ENTRY
  4324. RtlpCoalesceFreeBlocks (
  4325. IN PHEAP Heap,
  4326. IN PHEAP_FREE_ENTRY FreeBlock,
  4327. IN OUT PSIZE_T FreeSize,
  4328. IN BOOLEAN RemoveFromFreeList
  4329. )
  4330. /*++
  4331. Routine Description:
  4332. This routine coalesces the free block together.
  4333. Arguments:
  4334. Heap - Supplies a pointer to the heap being manipulated
  4335. FreeBlock - Supplies a pointer to the free block that we want coalesced
  4336. FreeSize - Supplies the size, in heap units, of the free block. On return it
  4337. contains the size, in bytes, of the of the newly coalesced free block
  4338. RemoveFromFreeList - Indicates if the input free block is already on a
  4339. free list and needs to be removed to before coalescing
  4340. Return Value:
  4341. PHEAP_FREE_ENTRY - returns a pointer to the newly coalesced free block
  4342. --*/
  4343. {
  4344. PHEAP_FREE_ENTRY FreeBlock1, NextFreeBlock;
  4345. RTL_PAGED_CODE();
  4346. //
  4347. // Point to the preceding block
  4348. //
  4349. FreeBlock1 = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)FreeBlock - FreeBlock->PreviousSize);
  4350. //
  4351. // Check if there is a preceding block, and if it is free, and the two sizes
  4352. // put together will still fit on a free lists.
  4353. //
  4354. if ((FreeBlock1 != FreeBlock) &&
  4355. !(FreeBlock1->Flags & HEAP_ENTRY_BUSY) &&
  4356. ((*FreeSize + FreeBlock1->Size) <= HEAP_MAXIMUM_BLOCK_SIZE)) {
  4357. //
  4358. // We are going to merge ourselves with the preceding block
  4359. //
  4360. HEAPASSERT(FreeBlock->PreviousSize == FreeBlock1->Size);
  4361. //
  4362. // Check if we need to remove the input block from the free list
  4363. //
  4364. if (RemoveFromFreeList) {
  4365. RtlpRemoveFreeBlock( Heap, FreeBlock );
  4366. Heap->TotalFreeSize -= FreeBlock->Size;
  4367. //
  4368. // We're removed so we don't have to do it again
  4369. //
  4370. RemoveFromFreeList = FALSE;
  4371. }
  4372. //
  4373. // Remove the preceding block from its free list
  4374. //
  4375. RtlpRemoveFreeBlock( Heap, FreeBlock1 );
  4376. //
  4377. // Copy over the last entry flag if necessary from what we're freeing
  4378. // to the preceding block
  4379. //
  4380. FreeBlock1->Flags = FreeBlock->Flags & HEAP_ENTRY_LAST_ENTRY;
  4381. if( FreeBlock1->Flags & HEAP_ENTRY_LAST_ENTRY ) {
  4382. PHEAP_SEGMENT Segment;
  4383. Segment = Heap->Segments[FreeBlock1->SegmentIndex];
  4384. Segment->LastEntryInSegment = (PHEAP_ENTRY)FreeBlock1;
  4385. }
  4386. //
  4387. // Point to the preceding block, and adjust the sizes for the
  4388. // new free block. It is the total of both blocks.
  4389. //
  4390. FreeBlock = FreeBlock1;
  4391. *FreeSize += FreeBlock1->Size;
  4392. Heap->TotalFreeSize -= FreeBlock1->Size;
  4393. FreeBlock->Size = (USHORT)*FreeSize;
  4394. //
  4395. // Check if we need to update the previous size of the next
  4396. // entry
  4397. //
  4398. if (!(FreeBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) {
  4399. ((PHEAP_ENTRY)FreeBlock + *FreeSize)->PreviousSize = (USHORT)*FreeSize;
  4400. }
  4401. }
  4402. //
  4403. // Check if there is a following block.
  4404. //
  4405. if (!(FreeBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) {
  4406. //
  4407. // There is a following block so now get a pointer to it
  4408. // and check if it is free and if putting the two blocks together
  4409. // still fits on a free list
  4410. //
  4411. NextFreeBlock = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)FreeBlock + *FreeSize);
  4412. if (!(NextFreeBlock->Flags & HEAP_ENTRY_BUSY) &&
  4413. ((*FreeSize + NextFreeBlock->Size) <= HEAP_MAXIMUM_BLOCK_SIZE)) {
  4414. //
  4415. // We are going to merge ourselves with the following block
  4416. //
  4417. HEAPASSERT(*FreeSize == NextFreeBlock->PreviousSize);
  4418. //
  4419. // Check if we need to remove the input block from the free list
  4420. //
  4421. if (RemoveFromFreeList) {
  4422. RtlpRemoveFreeBlock( Heap, FreeBlock );
  4423. Heap->TotalFreeSize -= FreeBlock->Size;
  4424. }
  4425. //
  4426. // Copy up the last entry flag if necessary from the following
  4427. // block to our input block
  4428. //
  4429. FreeBlock->Flags = NextFreeBlock->Flags & HEAP_ENTRY_LAST_ENTRY;
  4430. if( FreeBlock->Flags & HEAP_ENTRY_LAST_ENTRY ) {
  4431. PHEAP_SEGMENT Segment;
  4432. Segment = Heap->Segments[FreeBlock->SegmentIndex];
  4433. Segment->LastEntryInSegment = (PHEAP_ENTRY)FreeBlock;
  4434. }
  4435. //
  4436. // Remove the following block from its free list
  4437. //
  4438. RtlpRemoveFreeBlock( Heap, NextFreeBlock );
  4439. //
  4440. // Adjust the size for the newly combined block
  4441. //
  4442. *FreeSize += NextFreeBlock->Size;
  4443. Heap->TotalFreeSize -= NextFreeBlock->Size;
  4444. FreeBlock->Size = (USHORT)*FreeSize;
  4445. //
  4446. // Check if we need to update the previous size of the next block
  4447. //
  4448. if (!(FreeBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) {
  4449. ((PHEAP_ENTRY)FreeBlock + *FreeSize)->PreviousSize = (USHORT)*FreeSize;
  4450. }
  4451. }
  4452. }
  4453. //
  4454. // And return the free block to our caller
  4455. //
  4456. return FreeBlock;
  4457. }
  4458. //
  4459. // Declared in heappriv.h
  4460. //
  4461. VOID
  4462. RtlpDeCommitFreeBlock (
  4463. IN PHEAP Heap,
  4464. IN PHEAP_FREE_ENTRY FreeBlock,
  4465. IN SIZE_T FreeSize
  4466. )
  4467. /*++
  4468. Routine Description:
  4469. This routine takes a free block and decommits it. This is usually called
  4470. because the block is beyond the decommit threshold
  4471. Arguments:
  4472. Heap - Supplies a pointer to the heap being manipulated
  4473. FreeBlock - Supplies a pointer to the block being decommitted
  4474. FreeSize - Supplies the size, in heap units, of the free block being decommitted
  4475. Return Value:
  4476. None.
  4477. --*/
  4478. {
  4479. NTSTATUS Status;
  4480. ULONG_PTR DeCommitAddress;
  4481. SIZE_T DeCommitSize;
  4482. USHORT LeadingFreeSize, TrailingFreeSize;
  4483. PHEAP_SEGMENT Segment;
  4484. PHEAP_FREE_ENTRY LeadingFreeBlock, TrailingFreeBlock;
  4485. PHEAP_ENTRY LeadingBusyBlock, TrailingBusyBlock;
  4486. PHEAP_UNCOMMMTTED_RANGE UnCommittedRange;
  4487. PHEAP_FREE_ENTRY LeadingBlockToDecommit = NULL, TrailingBlockToDecommit = NULL;
  4488. RTL_PAGED_CODE();
  4489. //
  4490. // If the heap has a user specified decommit routine then we won't really
  4491. // decommit anything instead we'll call a worker routine to chop it up
  4492. // into pieces that will fit on the free lists
  4493. //
  4494. if (Heap->CommitRoutine != NULL) {
  4495. RtlpInsertFreeBlock( Heap, FreeBlock, FreeSize );
  4496. return;
  4497. }
  4498. //
  4499. // Get a pointer to the owning segment
  4500. //
  4501. Segment = Heap->Segments[ FreeBlock->SegmentIndex ];
  4502. //
  4503. // The leading busy block identifies the preceding in use block before
  4504. // what we are trying to decommit. It is only used if what we are trying
  4505. // decommit is right on a page boundary and then it is the block right
  4506. // before us if it exists.
  4507. //
  4508. // The leading free block is used to identify whatever space is needed
  4509. // to round up the callers specified address to a page address. If the
  4510. // caller already gave us a page aligned address then the free block
  4511. // address is identical to what the caller supplied.
  4512. //
  4513. LeadingBusyBlock = NULL;
  4514. LeadingFreeBlock = FreeBlock;
  4515. //
  4516. // Make sure the block we are trying to decommit start on the next full
  4517. // page boundary. The leading free size is the size of whatever it takes
  4518. // to round up the free block to the next page specified in units of
  4519. // heap entries.
  4520. //
  4521. DeCommitAddress = ROUND_UP_TO_POWER2( LeadingFreeBlock, PAGE_SIZE );
  4522. LeadingFreeSize = (USHORT)((PHEAP_ENTRY)DeCommitAddress - (PHEAP_ENTRY)LeadingFreeBlock);
  4523. //
  4524. // If we leading free size only has space for one heap entry then we'll
  4525. // bump it up to include the next page, because we don't want to leave
  4526. // anything that small laying around. Otherwise if we have a preceding
  4527. // block and the leading free size is zero then identify the preceding
  4528. // block as the leading busy block
  4529. //
  4530. if (LeadingFreeSize == 1) {
  4531. DeCommitAddress += PAGE_SIZE;
  4532. LeadingFreeSize += PAGE_SIZE >> HEAP_GRANULARITY_SHIFT;
  4533. } else if (LeadingFreeBlock->PreviousSize != 0) {
  4534. if (DeCommitAddress == (ULONG_PTR)LeadingFreeBlock) {
  4535. LeadingBusyBlock = (PHEAP_ENTRY)LeadingFreeBlock - LeadingFreeBlock->PreviousSize;
  4536. }
  4537. }
  4538. //
  4539. // The trailing busy block identifies the block immediately after the one
  4540. // we are trying to decommit provided what we are decommitting ends right
  4541. // on a page boundary otherwise the trailing busy block stays null and
  4542. // the trailing free block value is used.
  4543. //
  4544. TrailingBusyBlock = NULL;
  4545. TrailingFreeBlock = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)FreeBlock + FreeSize);
  4546. //
  4547. // Make sure the block we are trying to decommit ends on a page boundary.
  4548. //
  4549. // And compute how many heap entries we had to backup to make it land on a
  4550. // page boundary.
  4551. //
  4552. DeCommitSize = ROUND_DOWN_TO_POWER2( (ULONG_PTR)TrailingFreeBlock, PAGE_SIZE );
  4553. TrailingFreeSize = (USHORT)((PHEAP_ENTRY)TrailingFreeBlock - (PHEAP_ENTRY)DeCommitSize);
  4554. //
  4555. // If the trailing free size is exactly one heap in size then we will
  4556. // nibble off a bit more from the decommit size because free block of
  4557. // exactly one heap entry in size are useless. Otherwise if we actually
  4558. // ended on a page boundary and there is a block after us then indicate
  4559. // that we have a trailing busy block
  4560. //
  4561. if (TrailingFreeSize == (sizeof( HEAP_ENTRY ) >> HEAP_GRANULARITY_SHIFT)) {
  4562. DeCommitSize -= PAGE_SIZE;
  4563. TrailingFreeSize += PAGE_SIZE >> HEAP_GRANULARITY_SHIFT;
  4564. } else if ((TrailingFreeSize == 0) && !(FreeBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) {
  4565. TrailingBusyBlock = (PHEAP_ENTRY)TrailingFreeBlock;
  4566. }
  4567. //
  4568. // Now adjust the trailing free block to compensate for the trailing free size
  4569. // we just computed.
  4570. //
  4571. TrailingFreeBlock = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)TrailingFreeBlock - TrailingFreeSize);
  4572. //
  4573. // Right now DeCommit size is really a pointer. If it points at is beyond
  4574. // the decommit address then make the size really be just the byte count
  4575. // to decommit. Otherwise the decommit size is zero.
  4576. //
  4577. if (DeCommitSize > DeCommitAddress) {
  4578. DeCommitSize -= DeCommitAddress;
  4579. } else {
  4580. DeCommitSize = 0;
  4581. }
  4582. //
  4583. // Now check if we still have something to decommit
  4584. //
  4585. if (DeCommitSize != 0) {
  4586. #ifndef NTOS_KERNEL_RUNTIME
  4587. //
  4588. // We do not try to push the large block to the
  4589. // cache if it already has uncommited ranges arround
  4590. //
  4591. if ( (FreeBlock->PreviousSize != 0)
  4592. &&
  4593. !(FreeBlock->Flags & HEAP_ENTRY_LAST_ENTRY) ) {
  4594. if (Heap->LargeBlocksIndex == NULL) {
  4595. Heap->u2.DecommitCount += 1;
  4596. if ( (Heap->u2.DecommitCount == HEAP_ACTIVATE_CACHE_THRESHOLD) &&
  4597. (Heap->Flags & HEAP_GROWABLE) &&
  4598. !(RtlpDisableHeapLookaside & HEAP_COMPAT_DISABLE_LARGECACHE) ) {
  4599. RtlpInitializeListIndex( Heap );
  4600. }
  4601. } else {
  4602. PHEAP_INDEX HeapIndex = (PHEAP_INDEX)Heap->LargeBlocksIndex;
  4603. //
  4604. // Check if the cache is locked for flushing
  4605. //
  4606. if ((HeapIndex->LargeBlocksCacheSequence != 0)
  4607. &&
  4608. ( (LeadingFreeBlock->PreviousSize != 0)
  4609. ||
  4610. (TrailingFreeSize != 0) ) ) {
  4611. if (HeapIndex->LargeBlocksCacheDepth < HeapIndex->LargeBlocksCacheMaxDepth) {
  4612. //
  4613. // There is nothing left to decommit to take our leading free block
  4614. // and put it on a free list
  4615. //
  4616. RtlpInsertFreeBlock( Heap, LeadingFreeBlock, FreeSize );
  4617. RtlpCheckLargeCache(Heap);
  4618. return;
  4619. //
  4620. // Check whether the block being deleted is the only one
  4621. // between two uncommitted ranges. If no, we'll decommit the largest block from the list
  4622. //
  4623. } else {
  4624. PLIST_ENTRY Head, Next;
  4625. PHEAP_FREE_ENTRY LargestFreeBlock;
  4626. //
  4627. // we have too many blocks outside. We need to decommit one.
  4628. // To reduce the virtual address fragmentation we nned to decommit the
  4629. // largest block available
  4630. //
  4631. Head = &Heap->FreeLists[ 0 ];
  4632. Next = Head->Blink;
  4633. if (Head != Next) {
  4634. //
  4635. // Lock the cache operations
  4636. //
  4637. LargestFreeBlock = CONTAINING_RECORD( Next, HEAP_FREE_ENTRY, FreeList );
  4638. //
  4639. // Even if the we found a larger block in the list, because of a wrong
  4640. // alignment it can produce less uncommitted space. We'll try to free the larges one instead
  4641. // the current block only if the size is significant bigger (+ one page)
  4642. //
  4643. if (LargestFreeBlock->Size > (FreeSize + (PAGE_SIZE >> HEAP_GRANULARITY_SHIFT))) {
  4644. //
  4645. // If we have a larger block into the list
  4646. // we'll insert this one into the list and we'll decommitt
  4647. // the largest one
  4648. //
  4649. RtlpInsertFreeBlock( Heap, LeadingFreeBlock, FreeSize );
  4650. RtlpFlushLargestCacheBlock(Heap);
  4651. RtlpCheckLargeCache(Heap);
  4652. return;
  4653. }
  4654. }
  4655. }
  4656. }
  4657. HeapIndex->CacheStats.Decommitts += 1;
  4658. // HeapDebugPrint(("Decommitting size %ld\n", DeCommitSize));
  4659. }
  4660. }
  4661. #endif // NTOS_KERNEL_RUNTIME
  4662. //
  4663. // Before freeing the memory to MM we have to be sure we can create
  4664. // a PHEAP_UNCOMMMTTED_RANGE later. So we do it right now
  4665. //
  4666. UnCommittedRange = RtlpCreateUnCommittedRange(Segment);
  4667. if (UnCommittedRange == NULL) {
  4668. HeapDebugPrint(( "Failing creating uncommitted range (%p for %x)\n", DeCommitAddress, DeCommitSize ));
  4669. //
  4670. // We weren't successful in the decommit so now simply
  4671. // add the leading free block to the free list
  4672. //
  4673. RtlpInsertFreeBlock( Heap, LeadingFreeBlock, FreeSize );
  4674. return;
  4675. }
  4676. //
  4677. // Decommit the memory
  4678. //
  4679. Status = RtlpHeapFreeVirtualMemory( NtCurrentProcess(),
  4680. (PVOID *)&DeCommitAddress,
  4681. &DeCommitSize,
  4682. MEM_DECOMMIT );
  4683. //
  4684. // Push back the UnCommittedRange structure. Now the insert cannot fail
  4685. //
  4686. RtlpDestroyUnCommittedRange( Segment, UnCommittedRange );
  4687. if (NT_SUCCESS( Status )) {
  4688. //
  4689. // Insert information regarding the pages we just decommitted
  4690. // to the lsit of uncommited pages in the segment
  4691. //
  4692. RtlpInsertUnCommittedPages( Segment,
  4693. DeCommitAddress,
  4694. DeCommitSize );
  4695. //
  4696. // Adjust the segments count of uncommitted pages
  4697. //
  4698. Segment->NumberOfUnCommittedPages += (ULONG)(DeCommitSize / PAGE_SIZE);
  4699. //
  4700. // If we have a leading free block then mark its proper state
  4701. // update the heap, and put it on the free list
  4702. //
  4703. if (LeadingFreeSize != 0) {
  4704. SIZE_T TempSize;
  4705. LeadingFreeBlock->Flags = HEAP_ENTRY_LAST_ENTRY;
  4706. TempSize = LeadingFreeBlock->Size = LeadingFreeSize;
  4707. Segment->LastEntryInSegment = (PHEAP_ENTRY)LeadingFreeBlock;
  4708. LeadingFreeBlock = RtlpCoalesceFreeBlocks( Heap,
  4709. LeadingFreeBlock,
  4710. &TempSize,
  4711. FALSE );
  4712. if (LeadingFreeBlock->Size < Heap->DeCommitFreeBlockThreshold) {
  4713. Heap->TotalFreeSize += LeadingFreeBlock->Size;
  4714. RtlpInsertFreeBlockDirect( Heap, LeadingFreeBlock, LeadingFreeBlock->Size );
  4715. } else {
  4716. LeadingBlockToDecommit = LeadingFreeBlock;
  4717. }
  4718. //
  4719. // Otherwise if we actually have a leading busy block then
  4720. // make sure the busy block knows we're uncommitted
  4721. //
  4722. } else if (LeadingBusyBlock != NULL) {
  4723. LeadingBusyBlock->Flags |= HEAP_ENTRY_LAST_ENTRY;
  4724. Segment->LastEntryInSegment = LeadingBusyBlock;
  4725. } else if ((Segment->LastEntryInSegment >= (PHEAP_ENTRY)DeCommitAddress)
  4726. &&
  4727. ((PCHAR)Segment->LastEntryInSegment < ((PCHAR)DeCommitAddress + DeCommitSize))) {
  4728. Segment->LastEntryInSegment = Segment->FirstEntry;
  4729. }
  4730. //
  4731. // If there is a trailing free block then sets its state,
  4732. // update the heap, and insert it on a free list
  4733. //
  4734. if (TrailingFreeSize != 0) {
  4735. SIZE_T TempSize;
  4736. TrailingFreeBlock->PreviousSize = 0;
  4737. TrailingFreeBlock->SegmentIndex = Segment->Entry.SegmentIndex;
  4738. TrailingFreeBlock->Flags = 0;
  4739. TempSize = TrailingFreeBlock->Size = TrailingFreeSize;
  4740. ((PHEAP_FREE_ENTRY)((PHEAP_ENTRY)TrailingFreeBlock + TrailingFreeSize))->PreviousSize = (USHORT)TrailingFreeSize;
  4741. TrailingFreeBlock = RtlpCoalesceFreeBlocks( Heap,
  4742. TrailingFreeBlock,
  4743. &TempSize,
  4744. FALSE );
  4745. if (TrailingFreeBlock->Size < Heap->DeCommitFreeBlockThreshold) {
  4746. RtlpInsertFreeBlockDirect( Heap, TrailingFreeBlock, TrailingFreeBlock->Size );
  4747. Heap->TotalFreeSize += TrailingFreeBlock->Size;
  4748. } else {
  4749. TrailingBlockToDecommit = TrailingFreeBlock;
  4750. }
  4751. //
  4752. // Otherwise if we actually have a succeeding block then
  4753. // make it know we are uncommitted
  4754. //
  4755. } else if (TrailingBusyBlock != NULL) {
  4756. TrailingBusyBlock->PreviousSize = 0;
  4757. }
  4758. #ifndef NTOS_KERNEL_RUNTIME
  4759. if( IsHeapLogging( Heap ) ) {
  4760. PPERFINFO_TRACE_HEADER pEventHeader = NULL;
  4761. PTHREAD_LOCAL_DATA pThreadLocalData = NULL;
  4762. USHORT ReqSize = sizeof(HEAP_EVENT_CONTRACTION) + FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data);
  4763. AcquireBufferLocation(&pEventHeader, &pThreadLocalData,&ReqSize);
  4764. if(pEventHeader && pThreadLocalData) {
  4765. SIZE_T UCBytes = 0;
  4766. PHEAP_EVENT_CONTRACTION pHeapEvent = (PHEAP_EVENT_CONTRACTION)( (SIZE_T)pEventHeader
  4767. + (SIZE_T)FIELD_OFFSET(PERFINFO_TRACE_HEADER, Data ));
  4768. pEventHeader->Packet.Size = (USHORT) ReqSize;
  4769. pEventHeader->Packet.HookId = PERFINFO_LOG_TYPE_HEAP_CONTRACT;
  4770. pHeapEvent->HeapHandle = (PVOID)Heap;
  4771. pHeapEvent->DeCommitAddress = (PVOID)DeCommitAddress;
  4772. pHeapEvent->DeCommitSize = DeCommitSize;
  4773. pHeapEvent->FreeSpace = Heap->TotalFreeSize;
  4774. pHeapEvent->ReservedSpace = 0;
  4775. pHeapEvent->CommittedSpace = 0;
  4776. pHeapEvent->NoOfUCRs = 0;
  4777. UCBytes = GetUCBytes(Heap, &pHeapEvent->ReservedSpace, &pHeapEvent->NoOfUCRs);
  4778. pHeapEvent->ReservedSpace *= PAGE_SIZE;
  4779. pHeapEvent->CommittedSpace = pHeapEvent->ReservedSpace - UCBytes;
  4780. ReleaseBufferLocation(pThreadLocalData);
  4781. }
  4782. }
  4783. #endif // NTOS_KERNEL_RUNTIME
  4784. } else {
  4785. //
  4786. // We weren't successful in the decommit so now simply
  4787. // add the leading free block to the free list
  4788. //
  4789. RtlpInsertFreeBlock( Heap, LeadingFreeBlock, FreeSize );
  4790. }
  4791. } else {
  4792. //
  4793. // There is nothing left to decommit to take our leading free block
  4794. // and put it on a free list
  4795. //
  4796. RtlpInsertFreeBlock( Heap, LeadingFreeBlock, FreeSize );
  4797. }
  4798. #ifndef NTOS_KERNEL_RUNTIME
  4799. if ( (LeadingBlockToDecommit != NULL)
  4800. ||
  4801. (TrailingBlockToDecommit != NULL)){
  4802. PHEAP_INDEX HeapIndex = (PHEAP_INDEX)Heap->LargeBlocksIndex;
  4803. LONG PreviousSequence = 0;
  4804. if (HeapIndex) {
  4805. PreviousSequence = HeapIndex->LargeBlocksCacheSequence;
  4806. //
  4807. // Lock the cache for the next two decommits
  4808. //
  4809. HeapIndex->LargeBlocksCacheSequence = 0;
  4810. }
  4811. #endif // NTOS_KERNEL_RUNTIME
  4812. if (LeadingBlockToDecommit) {
  4813. RtlpDeCommitFreeBlock( Heap,
  4814. LeadingBlockToDecommit,
  4815. LeadingBlockToDecommit->Size
  4816. );
  4817. }
  4818. if (TrailingBlockToDecommit) {
  4819. RtlpDeCommitFreeBlock( Heap,
  4820. TrailingBlockToDecommit,
  4821. TrailingBlockToDecommit->Size
  4822. );
  4823. }
  4824. #ifndef NTOS_KERNEL_RUNTIME
  4825. if (HeapIndex) {
  4826. //
  4827. // Unlock the large block cache
  4828. //
  4829. HeapIndex->LargeBlocksCacheSequence = PreviousSequence;
  4830. }
  4831. }
  4832. //
  4833. // At this point the free block to decommit is inserted
  4834. // into the free lists. So it's safe now to check the large
  4835. // block list and flush what is not longer necessary.
  4836. //
  4837. RtlpCheckLargeCache(Heap);
  4838. #endif // NTOS_KERNEL_RUNTIME
  4839. //
  4840. // And return to our caller
  4841. //
  4842. return;
  4843. }
  4844. //
  4845. // Declared in heappriv.h
  4846. //
  4847. VOID
  4848. RtlpInsertFreeBlock (
  4849. IN PHEAP Heap,
  4850. IN PHEAP_FREE_ENTRY FreeBlock,
  4851. IN SIZE_T FreeSize
  4852. )
  4853. /*++
  4854. Routine Description:
  4855. This routines take a piece of committed memory and adds to the
  4856. the appropriate free lists for the heap. If necessary this
  4857. routine will divide up the free block to sizes that fit
  4858. on the free list
  4859. Arguments:
  4860. Heap - Supplies a pointer to the owning heap
  4861. FreeBlock - Supplies a pointer to the block being freed
  4862. FreeSize - Supplies the size, in bytes, of the block being freed
  4863. Return Value:
  4864. None.
  4865. --*/
  4866. {
  4867. USHORT PreviousSize, Size;
  4868. UCHAR Flags;
  4869. UCHAR SegmentIndex;
  4870. PHEAP_SEGMENT Segment;
  4871. RTL_PAGED_CODE();
  4872. //
  4873. // Get the size of the previous block, the index of the segment
  4874. // containing this block, and the flags specific to the block
  4875. //
  4876. PreviousSize = FreeBlock->PreviousSize;
  4877. SegmentIndex = FreeBlock->SegmentIndex;
  4878. Segment = Heap->Segments[ SegmentIndex ];
  4879. Flags = FreeBlock->Flags;
  4880. //
  4881. // Adjust the total amount free in the heap
  4882. //
  4883. Heap->TotalFreeSize += FreeSize;
  4884. //
  4885. // Now, while there is still something left to add to the free list
  4886. // we'll process the information
  4887. //
  4888. while (FreeSize != 0) {
  4889. //
  4890. // If the size is too big for our free lists then we'll
  4891. // chop it down.
  4892. //
  4893. if (FreeSize > (ULONG)HEAP_MAXIMUM_BLOCK_SIZE) {
  4894. Size = HEAP_MAXIMUM_BLOCK_SIZE;
  4895. //
  4896. // This little adjustment is so that we don't have a remainder
  4897. // that is too small to be useful on the next iteration
  4898. // through the loop
  4899. //
  4900. if (FreeSize == ((ULONG)HEAP_MAXIMUM_BLOCK_SIZE + 1)) {
  4901. Size -= 16;
  4902. }
  4903. //
  4904. // Guarantee that Last entry does not get set in this
  4905. // block.
  4906. //
  4907. FreeBlock->Flags = 0;
  4908. } else {
  4909. Size = (USHORT)FreeSize;
  4910. //
  4911. // This could propagate the last entry flag
  4912. //
  4913. FreeBlock->Flags = Flags;
  4914. }
  4915. //
  4916. // Update the block sizes and then insert this
  4917. // block into a free list
  4918. //
  4919. FreeBlock->PreviousSize = PreviousSize;
  4920. FreeBlock->SegmentIndex = SegmentIndex;
  4921. FreeBlock->Size = Size;
  4922. RtlpInsertFreeBlockDirect( Heap, FreeBlock, Size );
  4923. //
  4924. // Note the size of what we just freed, and then update
  4925. // our state information for the next time through the
  4926. // loop
  4927. //
  4928. PreviousSize = Size;
  4929. FreeSize -= Size;
  4930. //
  4931. // Update the last entry in segment, if necessary
  4932. //
  4933. if (FreeBlock->Flags & HEAP_ENTRY_LAST_ENTRY) {
  4934. PHEAP_SEGMENT xSegment;
  4935. xSegment = Heap->Segments[ FreeBlock->SegmentIndex ];
  4936. xSegment->LastEntryInSegment = (PHEAP_ENTRY)FreeBlock;
  4937. }
  4938. FreeBlock = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)FreeBlock + Size);
  4939. //
  4940. // Check if we're done with the free block based on the
  4941. // segment information, otherwise go back up and check size
  4942. // Note that is means that we can get called with a very
  4943. // large size and still work.
  4944. //
  4945. if ((PHEAP_ENTRY)FreeBlock >= Segment->LastValidEntry) {
  4946. return;
  4947. }
  4948. }
  4949. //
  4950. // If the block we're freeing did not think it was the last entry
  4951. // then tell the next block our real size.
  4952. //
  4953. if (!(Flags & HEAP_ENTRY_LAST_ENTRY)) {
  4954. FreeBlock->PreviousSize = PreviousSize;
  4955. }
  4956. //
  4957. // And return to our caller
  4958. //
  4959. return;
  4960. }
  4961. //
  4962. // Declared in heappriv.h
  4963. //
  4964. PHEAP_ENTRY_EXTRA
  4965. RtlpGetExtraStuffPointer (
  4966. PHEAP_ENTRY BusyBlock
  4967. )
  4968. /*++
  4969. Routine Description:
  4970. This routine calculates where the extra stuff record will be given
  4971. the busy block and returns a pointer to it. The caller must have
  4972. already checked that the entry extra field is present
  4973. Arguments:
  4974. BusyBlock - Supplies the busy block whose extra stuff we are seeking
  4975. Return Value:
  4976. PHEAP_ENTRY_EXTRA - returns a pointer to the extra stuff record.
  4977. --*/
  4978. {
  4979. ULONG AllocationIndex;
  4980. RTL_PAGED_CODE();
  4981. //
  4982. // On big blocks the extra stuff is automatically part of the
  4983. // block
  4984. //
  4985. if (BusyBlock->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) {
  4986. PHEAP_VIRTUAL_ALLOC_ENTRY VirtualAllocBlock;
  4987. VirtualAllocBlock = CONTAINING_RECORD( BusyBlock, HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock );
  4988. return &VirtualAllocBlock->ExtraStuff;
  4989. } else {
  4990. //
  4991. // On non big blocks the extra stuff follows immediately after
  4992. // the allocation itself.
  4993. //
  4994. // We do some funny math here because the busy block
  4995. // stride is 8 bytes we know we can stride it by its
  4996. // index minus one to get to the end of the allocation
  4997. //
  4998. AllocationIndex = BusyBlock->Size;
  4999. return (PHEAP_ENTRY_EXTRA)(BusyBlock + AllocationIndex - 1);
  5000. }
  5001. }
  5002. //
  5003. // Declared in heappriv.h
  5004. //
  5005. SIZE_T
  5006. RtlpGetSizeOfBigBlock (
  5007. IN PHEAP_ENTRY BusyBlock
  5008. )
  5009. /*++
  5010. Routine Description:
  5011. This routine returns the size, in bytes, of the big allocation block
  5012. Arguments:
  5013. BusyBlock - Supplies a pointer to the block being queried
  5014. Return Value:
  5015. SIZE_T - Returns the size, in bytes, that was allocated to the big
  5016. block
  5017. --*/
  5018. {
  5019. PHEAP_VIRTUAL_ALLOC_ENTRY VirtualAllocBlock;
  5020. RTL_PAGED_CODE();
  5021. //
  5022. // Get a pointer to the block header itself
  5023. //
  5024. VirtualAllocBlock = CONTAINING_RECORD( BusyBlock, HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock );
  5025. //
  5026. // The size allocated to the block is actually the difference between the
  5027. // commit size stored in the virtual alloc block and the size stored in
  5028. // in the block.
  5029. //
  5030. return VirtualAllocBlock->CommitSize - BusyBlock->Size;
  5031. }
  5032. //
  5033. // Declared in heappriv.h
  5034. //
  5035. BOOLEAN
  5036. RtlpCheckBusyBlockTail (
  5037. IN PHEAP_ENTRY BusyBlock
  5038. )
  5039. /*++
  5040. Routine Description:
  5041. This routine checks to see if the bytes beyond the user specified
  5042. allocation have been modified. It does this by checking for a tail
  5043. fill pattern
  5044. Arguments:
  5045. BusyBlock - Supplies the heap block being queried
  5046. Return Value:
  5047. BOOLEAN - TRUE if the tail is still okay and FALSE otherwise
  5048. --*/
  5049. {
  5050. PCHAR Tail;
  5051. SIZE_T Size, cbEqual;
  5052. RTL_PAGED_CODE();
  5053. //
  5054. // Compute the user allocated size of the input heap block
  5055. //
  5056. if (BusyBlock->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) {
  5057. Size = RtlpGetSizeOfBigBlock( BusyBlock );
  5058. } else {
  5059. Size = (BusyBlock->Size << HEAP_GRANULARITY_SHIFT) - BusyBlock->UnusedBytes;
  5060. }
  5061. //
  5062. // Compute a pointer to the tail of the input block. This would
  5063. // be the space right after the user allocated portion
  5064. //
  5065. Tail = (PCHAR)(BusyBlock + 1) + Size;
  5066. //
  5067. // Check if the tail fill pattern is still there
  5068. //
  5069. cbEqual = RtlCompareMemory( Tail,
  5070. CheckHeapFillPattern,
  5071. CHECK_HEAP_TAIL_SIZE );
  5072. //
  5073. // If the number we get back isn't equal to the tail size then
  5074. // someone modified the block beyond its user specified allocation
  5075. // size
  5076. //
  5077. if (cbEqual != CHECK_HEAP_TAIL_SIZE) {
  5078. //
  5079. // Do some debug printing
  5080. //
  5081. HeapDebugPrint(( "Heap block at %p modified at %p past requested size of %lx\n",
  5082. BusyBlock,
  5083. Tail + cbEqual,
  5084. Size ));
  5085. HeapDebugBreak( BusyBlock );
  5086. //
  5087. // And tell our caller there was an error
  5088. //
  5089. return FALSE;
  5090. } else {
  5091. //
  5092. // And return to our caller that the tail is fine
  5093. //
  5094. return TRUE;
  5095. }
  5096. }
  5097. //
  5098. // Nondedicated free list optimization
  5099. // The index is active only in the USER MODE HEAP
  5100. //
  5101. #ifndef NTOS_KERNEL_RUNTIME
  5102. //
  5103. // RtlpSizeToAllocIndex is used to convert from a size (in heap allocation units)
  5104. // to an index into the array
  5105. //
  5106. #define RtlpSizeToAllocIndex(HI,S) \
  5107. (( (ULONG)((S) - HEAP_MAXIMUM_FREELISTS) >= (HI)->ArraySize) ? ((HI)->ArraySize - 1) : \
  5108. ((S) - HEAP_MAXIMUM_FREELISTS))
  5109. VOID
  5110. RtlpInitializeListIndex(
  5111. IN PHEAP Heap
  5112. )
  5113. /*++
  5114. Routine Description:
  5115. This routine initialize the index for large blocks. It can be called any time
  5116. during the execution. The function assumes the heap lock is acquired.
  5117. Arguments:
  5118. Heap - Supplies a pointer to the heap being manipulated
  5119. Return Value:
  5120. None
  5121. --*/
  5122. {
  5123. PHEAP_INDEX HeapIndex = NULL;
  5124. ULONG i;
  5125. SIZE_T CommitSize;
  5126. NTSTATUS Status;
  5127. ULONG_PTR ArraySize;
  5128. UINT64 _HeapPerfStartTimer;
  5129. //
  5130. // Check if we already have an index
  5131. //
  5132. if ( Heap->LargeBlocksIndex == NULL) {
  5133. //
  5134. // Determine the number of entries into the index.
  5135. // For a heap with high usage, the most of blocks into the
  5136. // non-dedicated list should be smaller than block
  5137. // decommit threshold + one page
  5138. //
  5139. ArraySize = Heap->DeCommitFreeBlockThreshold + (PAGE_SIZE >> HEAP_GRANULARITY_SHIFT) - HEAP_MAXIMUM_FREELISTS;
  5140. //
  5141. // The statement bellow is just a sanity round up of the array size.
  5142. // Basically for current heap constants this is not necessary:
  5143. // DeCommitFreeBlockThreshold == 512
  5144. // PAGE_SIZE >> HEAP_GRANULARITY_SHIFT == 512
  5145. // So the ArraySize == 1024 is 32 aligned
  5146. //
  5147. ArraySize = ROUND_UP_TO_POWER2( ArraySize, 32 );
  5148. //
  5149. // Determine the amount of memory we need from OS
  5150. //
  5151. CommitSize = sizeof(HEAP_INDEX) +
  5152. ArraySize * sizeof(PHEAP_FREE_ENTRY) +
  5153. ArraySize / 8;
  5154. CommitSize = ROUND_UP_TO_POWER2( CommitSize, PAGE_SIZE );
  5155. Status = ZwAllocateVirtualMemory( NtCurrentProcess(),
  5156. (PVOID *)&HeapIndex,
  5157. 0,
  5158. &CommitSize,
  5159. MEM_RESERVE | MEM_COMMIT,
  5160. PAGE_READWRITE
  5161. );
  5162. if ( NT_SUCCESS(Status) ) {
  5163. //
  5164. // Here the allocation succeed. We need to
  5165. // initialize the index structures
  5166. //
  5167. PLIST_ENTRY Head, Next;
  5168. //
  5169. // Initialize the array fields
  5170. //
  5171. HeapIndex->ArraySize = (ULONG)ArraySize;
  5172. HeapIndex->VirtualMemorySize = (ULONG)CommitSize;
  5173. //
  5174. // The FreeListHints will be immediately after the index structure
  5175. //
  5176. HeapIndex->FreeListHints = (PHEAP_FREE_ENTRY *)((PUCHAR)HeapIndex + sizeof(HEAP_INDEX));
  5177. //
  5178. // The bitmap is placed after the array with hints to
  5179. // free blocks
  5180. //
  5181. HeapIndex->u.FreeListsInUseBytes = (PUCHAR)(HeapIndex->FreeListHints + ArraySize);
  5182. HeapIndex->LargeBlocksCacheDepth = 0;
  5183. if (RtlpDisableHeapLookaside & HEAP_COMPAT_DISABLE_LARGECACHE) {
  5184. HeapIndex->LargeBlocksCacheSequence = 0;
  5185. } else {
  5186. HeapIndex->LargeBlocksCacheSequence = 1;
  5187. }
  5188. //
  5189. // Save the original non-dedicated list from the heap
  5190. //
  5191. Head = &Heap->FreeLists[ 0 ];
  5192. Next = Head->Flink;
  5193. //
  5194. // Walk the non-dedicated list and insert each block found
  5195. // there into the new structures
  5196. //
  5197. while (Head != Next) {
  5198. PHEAP_FREE_ENTRY FreeEntry;
  5199. ULONG AllocIndex;
  5200. //
  5201. // Get the free block from the old list
  5202. //
  5203. FreeEntry = CONTAINING_RECORD( Next, HEAP_FREE_ENTRY, FreeList );
  5204. //
  5205. // Save the next link. The insertion into the new
  5206. // structure will destroy it
  5207. //
  5208. Next = Next->Flink;
  5209. //
  5210. // Insert the block into the large blocks array
  5211. //
  5212. AllocIndex = RtlpSizeToAllocIndex( HeapIndex, FreeEntry->Size );
  5213. if ( !HeapIndex->FreeListHints[ AllocIndex ] ) {
  5214. HeapIndex->FreeListHints[ AllocIndex ] = FreeEntry;
  5215. SET_INDEX_BIT( HeapIndex, AllocIndex );
  5216. }
  5217. if (AllocIndex == (HeapIndex->ArraySize - 1)) {
  5218. HeapIndex->LargeBlocksCacheDepth += 1;
  5219. }
  5220. }
  5221. HeapIndex->LargeBlocksCacheMaxDepth = HeapIndex->LargeBlocksCacheDepth;
  5222. HeapIndex->LargeBlocksCacheMinDepth = HeapIndex->LargeBlocksCacheDepth;
  5223. HeapIndex->CacheStats.Committs = 0;
  5224. HeapIndex->CacheStats.Decommitts = 0;
  5225. HeapIndex->CacheStats.LargestDepth = HeapIndex->LargeBlocksCacheDepth;
  5226. HeapIndex->CacheStats.LargestRequiredDepth = 0;
  5227. NtQueryPerformanceCounter( (PLARGE_INTEGER)&_HeapPerfStartTimer , (PLARGE_INTEGER)&HeapIndex->PerfData.CountFrequence);
  5228. //
  5229. // Initialize the LargeBlocksIndex with the new created structure
  5230. //
  5231. Heap->LargeBlocksIndex = HeapIndex;
  5232. //
  5233. // validate the index if HEAP_VALIDATE_INDEX is defined
  5234. // (Debug - test only)
  5235. //
  5236. RtlpValidateNonDedicatedList( Heap );
  5237. }
  5238. }
  5239. }
  5240. PLIST_ENTRY
  5241. RtlpFindEntry (
  5242. IN PHEAP Heap,
  5243. IN ULONG Size
  5244. )
  5245. /*++
  5246. Routine Description:
  5247. The function search the first block into the non-dedicated list
  5248. greater or equal with the given size.
  5249. Arguments:
  5250. Heap - Supplies a pointer to the heap being manipulated
  5251. Size - The size in heap units we're looking for
  5252. Return Value:
  5253. Return the list entry for the block which match the search criteria.
  5254. If the search fails simple returns the Non-dedicated list header.
  5255. --*/
  5256. {
  5257. PHEAP_INDEX HeapIndex = NULL;
  5258. ULONG LookupBitmapUlongIndex;
  5259. ULONG LastValidIndex;
  5260. ULONG CrtBitmapUlong;
  5261. PULONG UlongArray;
  5262. PHEAP_FREE_ENTRY FreeEntry = NULL;
  5263. PLIST_ENTRY Head, Next;
  5264. PHEAP_FREE_ENTRY LastBlock, FirstBlock;
  5265. ULONG AllocIndex;
  5266. Head = &Heap->FreeLists[0];
  5267. Next = Head->Blink;
  5268. //
  5269. // Check if the list is empty. Return the list head if it is.
  5270. //
  5271. if (Head == Next) {
  5272. return Head;
  5273. }
  5274. //
  5275. // Compare if the largest block into the free list is smaller
  5276. // than the requested size.
  5277. //
  5278. LastBlock = CONTAINING_RECORD( Next, HEAP_FREE_ENTRY, FreeList );
  5279. if (LastBlock->Size < Size) {
  5280. //
  5281. // we don't have a block available for our request
  5282. //
  5283. return Head;
  5284. }
  5285. //
  5286. // If the block is smaller or equal with the first free block we'll
  5287. // return the first block into the list w/o searching the index
  5288. //
  5289. FirstBlock = CONTAINING_RECORD( Head->Flink, HEAP_FREE_ENTRY, FreeList );
  5290. if (Size <= FirstBlock->Size) {
  5291. //
  5292. // Return the first block then.
  5293. //
  5294. return Head->Flink;
  5295. }
  5296. //
  5297. // At this point we have a block that must be somewhere in
  5298. // the middle of the list. We'll use the index to locate it.
  5299. //
  5300. HeapIndex = (PHEAP_INDEX)Heap->LargeBlocksIndex;
  5301. AllocIndex = RtlpSizeToAllocIndex(HeapIndex, Size);
  5302. //
  5303. // We'll try first into the last sublist
  5304. //
  5305. if ( AllocIndex == (HeapIndex->ArraySize - 1) ) {
  5306. FreeEntry = HeapIndex->FreeListHints[ AllocIndex ];
  5307. Next = &FreeEntry->FreeList;
  5308. while ( Head != Next ) {
  5309. FreeEntry = CONTAINING_RECORD( Next, HEAP_FREE_ENTRY, FreeList );
  5310. if (FreeEntry->Size >= Size) {
  5311. return &FreeEntry->FreeList;
  5312. }
  5313. Next = Next->Flink;
  5314. }
  5315. }
  5316. //
  5317. // Calculate the starting index into the bitmap array
  5318. //
  5319. LookupBitmapUlongIndex = AllocIndex >> 5;
  5320. //
  5321. // Determine the last index into the ULONG bitmap where the
  5322. // lookup must stop
  5323. //
  5324. LastValidIndex = (HeapIndex->ArraySize >> 5) - 1;
  5325. UlongArray = HeapIndex->u.FreeListsInUseUlong + LookupBitmapUlongIndex;
  5326. CrtBitmapUlong = *UlongArray;
  5327. //
  5328. // Mask off the bits in the first ULONG that represent allocations
  5329. // smaller than we need.
  5330. //
  5331. CrtBitmapUlong = CrtBitmapUlong & ~((1 << ((ULONG) Size & 0x1f)) - 1);
  5332. //
  5333. // Loop through the ULONG bitmap until we'll find something
  5334. // not empty
  5335. //
  5336. while ( !CrtBitmapUlong &&
  5337. (LookupBitmapUlongIndex <= LastValidIndex) ) {
  5338. CrtBitmapUlong = *(++UlongArray);
  5339. LookupBitmapUlongIndex++;
  5340. }
  5341. //
  5342. // Sanity checking if we found something.
  5343. // The test for smaller block and larger block should
  5344. // guarantee we found something in the loop above
  5345. //
  5346. if ( !CrtBitmapUlong ) {
  5347. HeapDebugPrint(( "Index not found into the bitmap %08lx\n", Size ));
  5348. // DbgBreakPoint();
  5349. return Head;
  5350. }
  5351. //
  5352. // Determine the position within bitmap where the bit is set.
  5353. // This is the index into the hints array
  5354. //
  5355. LookupBitmapUlongIndex = (LookupBitmapUlongIndex << 5) +
  5356. RtlFindFirstSetRightMember( CrtBitmapUlong );
  5357. //
  5358. // Return the list entry for the block we found
  5359. //
  5360. FreeEntry = HeapIndex->FreeListHints[ LookupBitmapUlongIndex ];
  5361. return &FreeEntry->FreeList;
  5362. }
  5363. VOID
  5364. RtlpFlushLargestCacheBlock (
  5365. IN PHEAP Heap
  5366. )
  5367. {
  5368. PHEAP_INDEX HeapIndex = (PHEAP_INDEX)Heap->LargeBlocksIndex;
  5369. if ((HeapIndex != NULL) &&
  5370. (HeapIndex->LargeBlocksCacheSequence != 0) ) {
  5371. PLIST_ENTRY Head, Next;
  5372. PHEAP_FREE_ENTRY FreeBlock;
  5373. Head = &Heap->FreeLists[ 0 ];
  5374. Next = Head->Blink;
  5375. if (Head != Next) {
  5376. ULONG PrevSeq = HeapIndex->LargeBlocksCacheSequence;
  5377. //
  5378. // Lock the cache operations
  5379. //
  5380. HeapIndex->LargeBlocksCacheSequence = 0;
  5381. FreeBlock = CONTAINING_RECORD( Next, HEAP_FREE_ENTRY, FreeList );
  5382. RtlpFastRemoveNonDedicatedFreeBlock( Heap, FreeBlock );
  5383. FreeBlock->Flags |= HEAP_ENTRY_BUSY;
  5384. Heap->TotalFreeSize -= FreeBlock->Size;
  5385. RtlpDeCommitFreeBlock( Heap, (PHEAP_FREE_ENTRY)FreeBlock, FreeBlock->Size );
  5386. //
  5387. // Unlock the cache
  5388. //
  5389. HeapIndex->LargeBlocksCacheSequence = PrevSeq;
  5390. RtlpValidateNonDedicatedList(Heap);
  5391. }
  5392. }
  5393. }
  5394. VOID
  5395. RtlpFlushCacheContents (
  5396. IN PHEAP Heap
  5397. )
  5398. {
  5399. PHEAP_INDEX HeapIndex = (PHEAP_INDEX)Heap->LargeBlocksIndex;
  5400. if ((HeapIndex != NULL) &&
  5401. (HeapIndex->LargeBlocksCacheSequence != 0) ) {
  5402. LONG NewDepth = HeapIndex->LargeBlocksCacheMaxDepth - HeapIndex->LargeBlocksCacheMinDepth;
  5403. if ( (HeapIndex->LargeBlocksCacheDepth > NewDepth) ) {
  5404. PLIST_ENTRY Head, Next;
  5405. LIST_ENTRY ListToFree;
  5406. PHEAP_FREE_ENTRY FreeBlock;
  5407. LONG BlocksToFree = HeapIndex->LargeBlocksCacheDepth - NewDepth;
  5408. LONG RemainingBlocks = HeapIndex->LargeBlocksCacheDepth;
  5409. if (HeapIndex->LargeBlocksCacheMaxDepth > HeapIndex->CacheStats.LargestDepth) {
  5410. HeapIndex->CacheStats.LargestDepth = HeapIndex->LargeBlocksCacheMaxDepth;
  5411. }
  5412. if (NewDepth > HeapIndex->CacheStats.LargestRequiredDepth) {
  5413. HeapIndex->CacheStats.LargestRequiredDepth = NewDepth;
  5414. }
  5415. //
  5416. // Get the last hint for this specific size from the index
  5417. //
  5418. FreeBlock = HeapIndex->FreeListHints[ HeapIndex->ArraySize - 1 ];
  5419. if (FreeBlock == NULL) {
  5420. DbgPrint("No free blocks in the cache but the depth is not 0 %ld\n",
  5421. HeapIndex->LargeBlocksCacheDepth);
  5422. return;
  5423. }
  5424. //
  5425. // Lock the cache operations
  5426. //
  5427. HeapIndex->LargeBlocksCacheSequence = 0;
  5428. Head = &Heap->FreeLists[ 0 ];
  5429. Next = &FreeBlock->FreeList;
  5430. InitializeListHead(&ListToFree);
  5431. while (Head != Next) {
  5432. FreeBlock = CONTAINING_RECORD( Next, HEAP_FREE_ENTRY, FreeList );
  5433. Next = Next->Flink;
  5434. if ( (((SIZE_T)FreeBlock->Size) << HEAP_GRANULARITY_SHIFT) > Heap->DeCommitFreeBlockThreshold) {
  5435. if ((FreeBlock->Flags & HEAP_ENTRY_SETTABLE_FLAG3)
  5436. ||
  5437. (BlocksToFree >= RemainingBlocks) ) {
  5438. RtlpFastRemoveNonDedicatedFreeBlock( Heap, FreeBlock );
  5439. InsertTailList(&ListToFree, &FreeBlock->FreeList);
  5440. FreeBlock->Flags |= HEAP_ENTRY_BUSY;
  5441. Heap->TotalFreeSize -= FreeBlock->Size;
  5442. BlocksToFree -= 1;
  5443. } else {
  5444. FreeBlock->Flags |= HEAP_ENTRY_SETTABLE_FLAG3;
  5445. }
  5446. }
  5447. RemainingBlocks -= 1;
  5448. }
  5449. Head = &ListToFree;
  5450. Next = ListToFree.Flink;
  5451. while (Head != Next) {
  5452. FreeBlock = CONTAINING_RECORD( Next, HEAP_FREE_ENTRY, FreeList );
  5453. RtlpHeapRemoveEntryList(&FreeBlock->FreeList);
  5454. Next = ListToFree.Flink;
  5455. RtlpDeCommitFreeBlock( Heap, (PHEAP_FREE_ENTRY)FreeBlock, FreeBlock->Size );
  5456. }
  5457. }
  5458. HeapIndex->LargeBlocksCacheMaxDepth = HeapIndex->LargeBlocksCacheDepth;
  5459. HeapIndex->LargeBlocksCacheMinDepth = HeapIndex->LargeBlocksCacheDepth;
  5460. HeapIndex->LargeBlocksCacheSequence = 1;
  5461. RtlpValidateNonDedicatedList(Heap);
  5462. }
  5463. }
  5464. VOID
  5465. RtlpUpdateIndexRemoveBlock (
  5466. IN PHEAP Heap,
  5467. IN PHEAP_FREE_ENTRY FreeEntry
  5468. )
  5469. /*++
  5470. Routine Description:
  5471. This function is called each time a free block is removed from
  5472. non-dedicated list. This should update the heap index to reflect the change.
  5473. NOTE : This function must be called before the freeentry is
  5474. actually removed from the non-dedicated list
  5475. Arguments:
  5476. Heap - Supplies a pointer to the heap being manipulated
  5477. FreeEntry - The removed free block
  5478. Return Value:
  5479. None
  5480. --*/
  5481. {
  5482. PHEAP_INDEX HeapIndex = (PHEAP_INDEX)Heap->LargeBlocksIndex;
  5483. ULONG Size = FreeEntry->Size;
  5484. //
  5485. // It updates the index only for large sizes
  5486. // (over 1024 bytes)
  5487. //
  5488. if (Size >= HEAP_MAXIMUM_FREELISTS) {
  5489. //
  5490. // Update the length of the nondedicated list
  5491. // This is happening even if the index isn't created
  5492. //
  5493. Heap->NonDedicatedListLength -= 1;
  5494. //
  5495. // If we have an index, we need to update the index structures
  5496. //
  5497. if (HeapIndex) {
  5498. PHEAP_FREE_ENTRY PrevBlock;
  5499. ULONG AllocIndex = RtlpSizeToAllocIndex( HeapIndex, Size );
  5500. PLIST_ENTRY Flink = FreeEntry->FreeList.Flink;
  5501. //
  5502. // Get the next block into the list. Set it to NULL
  5503. // if this is the last element here
  5504. //
  5505. PHEAP_FREE_ENTRY NextFreeBlock = (Flink == &Heap->FreeLists[ 0 ]) ?
  5506. NULL :
  5507. CONTAINING_RECORD( Flink, HEAP_FREE_ENTRY, FreeList );
  5508. //
  5509. // Get the last hint for this specific size from the index
  5510. //
  5511. PrevBlock = HeapIndex->FreeListHints[ AllocIndex ];
  5512. if ( PrevBlock == FreeEntry) {
  5513. //
  5514. // The free block being removed is actually the hint for that
  5515. // specific size. We need then to update
  5516. //
  5517. if (AllocIndex < (HeapIndex->ArraySize - 1)) {
  5518. //
  5519. // If the next block has the same size with the current one
  5520. // we need to update only the hint pointer
  5521. //
  5522. if ( NextFreeBlock &&
  5523. (NextFreeBlock->Size == Size) ) {
  5524. HeapIndex->FreeListHints[ AllocIndex ] = NextFreeBlock;
  5525. } else {
  5526. //
  5527. // There is no other block with this size, so we need
  5528. // set the hint to NULL and clear the appropriate bit
  5529. //
  5530. HeapIndex->FreeListHints[ AllocIndex ] = NULL;
  5531. CLEAR_INDEX_BIT( HeapIndex, AllocIndex );
  5532. }
  5533. } else {
  5534. //
  5535. // We are here because this is the last hint from the array
  5536. // This list may contain free blocks with different sizes.
  5537. //
  5538. if (NextFreeBlock) {
  5539. //
  5540. // We have an other block larger than this one
  5541. // We move the hint to that pointer
  5542. //
  5543. HeapIndex->FreeListHints[ AllocIndex ] = NextFreeBlock;
  5544. } else {
  5545. //
  5546. // This was the last block within the non-dedicated list
  5547. // Clear the hint pointer and the appropriate bit.
  5548. //
  5549. HeapIndex->FreeListHints[ AllocIndex ] = NULL;
  5550. CLEAR_INDEX_BIT( HeapIndex, AllocIndex );
  5551. }
  5552. }
  5553. }
  5554. if (AllocIndex == (HeapIndex->ArraySize - 1)) {
  5555. HeapIndex->LargeBlocksCacheDepth -= 1;
  5556. if (HeapIndex->LargeBlocksCacheDepth < 0) {
  5557. DbgPrint(("Invalid Cache depth\n"));
  5558. }
  5559. if (HeapIndex->LargeBlocksCacheSequence != 0) {
  5560. HeapIndex->LargeBlocksCacheSequence += 1;
  5561. if (HeapIndex->LargeBlocksCacheDepth < HeapIndex->LargeBlocksCacheMinDepth) {
  5562. HeapIndex->LargeBlocksCacheMinDepth = HeapIndex->LargeBlocksCacheDepth;
  5563. }
  5564. }
  5565. }
  5566. }
  5567. }
  5568. }
  5569. VOID
  5570. RtlpUpdateIndexInsertBlock (
  5571. IN PHEAP Heap,
  5572. IN PHEAP_FREE_ENTRY FreeEntry
  5573. )
  5574. /*++
  5575. Routine Description:
  5576. This function is called each time a free block is inserted into
  5577. non-dedicated list. This should update the heap index to reflect the change.
  5578. NOTE : This function must be called AFTER the free entry is
  5579. actually inserted into the non-dedicated list
  5580. Arguments:
  5581. Heap - Supplies a pointer to the heap being manipulated
  5582. FreeEntry - The new inserted free block free block
  5583. Return Value:
  5584. None
  5585. --*/
  5586. {
  5587. PHEAP_INDEX HeapIndex = (PHEAP_INDEX)Heap->LargeBlocksIndex;
  5588. //
  5589. // we only have something to do if the size is over the dedicated list
  5590. // range
  5591. //
  5592. if ( FreeEntry->Size >= HEAP_MAXIMUM_FREELISTS ) {
  5593. //
  5594. // Update the non-dedicated list length
  5595. //
  5596. Heap->NonDedicatedListLength += 1;
  5597. if ( HeapIndex ) {
  5598. //
  5599. // We have an index for this list. we need to to some
  5600. // extra-work to maintain it
  5601. //
  5602. PHEAP_FREE_ENTRY PrevBlock;
  5603. ULONG AllocIndex = RtlpSizeToAllocIndex( HeapIndex, FreeEntry->Size );
  5604. //
  5605. // Get the original hint stored into the index
  5606. //
  5607. PrevBlock = HeapIndex->FreeListHints[ AllocIndex ];
  5608. //
  5609. // If the hint before was NULL, or we are adding a new
  5610. // block smaller or equal with the previous one, we need
  5611. // to update the hint pointer
  5612. //
  5613. if ( (!PrevBlock) ||
  5614. (FreeEntry->Size <= PrevBlock->Size) ) {
  5615. HeapIndex->FreeListHints[ AllocIndex ] = FreeEntry;
  5616. }
  5617. //
  5618. // If this is the first time we set a hint for that size
  5619. // we need to set the busy bit into the bitmap
  5620. //
  5621. if ( !PrevBlock ) {
  5622. SET_INDEX_BIT( HeapIndex, AllocIndex );
  5623. }
  5624. if ( AllocIndex == (HeapIndex->ArraySize - 1) ) {
  5625. HeapIndex->LargeBlocksCacheDepth += 1;
  5626. if (HeapIndex->LargeBlocksCacheSequence != 0) {
  5627. HeapIndex->LargeBlocksCacheSequence += 1;
  5628. if (HeapIndex->LargeBlocksCacheDepth > HeapIndex->LargeBlocksCacheMaxDepth) {
  5629. HeapIndex->LargeBlocksCacheMaxDepth = HeapIndex->LargeBlocksCacheDepth;
  5630. }
  5631. }
  5632. }
  5633. } else if ( Heap->NonDedicatedListLength >= HEAP_INDEX_THRESHOLD ) {
  5634. //
  5635. // We don't have an index, but we have enough blocks into the
  5636. // non-dedicated list. We need to create an index right now,
  5637. // to help us in search further
  5638. //
  5639. RtlpInitializeListIndex( Heap );
  5640. }
  5641. }
  5642. }
  5643. //
  5644. // Additional debug - test code
  5645. //
  5646. #ifdef HEAP_VALIDATE_INDEX
  5647. BOOLEAN
  5648. RtlpGetBitState(
  5649. IN PHEAP_INDEX HeapIndex,
  5650. IN ULONG Bit
  5651. )
  5652. /*++
  5653. Routine Description:
  5654. Utility routine which tests the given bit from the bitmap
  5655. Arguments:
  5656. Heap - Supplies a pointer to the heapindex being manipulated
  5657. Bit - The bit to be tested
  5658. Return Value:
  5659. TRUE if the bit is 1 and 0 otherwise
  5660. --*/
  5661. {
  5662. ULONG _Index_;
  5663. ULONG _Bit_;
  5664. _Index_ = Bit >> 3;
  5665. _Bit_ = (1 << (Bit & 7));
  5666. return (((HeapIndex)->u.FreeListsInUseBytes[ _Index_ ] & _Bit_)) != 0;
  5667. }
  5668. BOOLEAN
  5669. RtlpValidateNonDedicatedList (
  5670. IN PHEAP Heap
  5671. )
  5672. /*++
  5673. Routine Description:
  5674. Utility routine which validate the index and non-dedicated lists
  5675. structures
  5676. Arguments:
  5677. Heap - Supplies a pointer to the heapindex being manipulated
  5678. Return Value:
  5679. TRUE validation succeeds
  5680. --*/
  5681. {
  5682. PHEAP_INDEX HeapIndex = NULL;
  5683. PLIST_ENTRY Head, Next;
  5684. ULONG PreviousSize = 0;
  5685. ULONG PreviousIndex = 0;
  5686. LONG LargeBlocksCount = 0;
  5687. HeapIndex = (PHEAP_INDEX)Heap->LargeBlocksIndex;
  5688. //
  5689. // we only do validation if we have a heap index created
  5690. //
  5691. if (HeapIndex) {
  5692. Head = &Heap->FreeLists[ 0 ];
  5693. Next = Head->Flink;
  5694. //
  5695. // Loop through the free blocks placed into the non-dedicated list
  5696. //
  5697. while (Head != Next) {
  5698. PHEAP_FREE_ENTRY FreeEntry;
  5699. ULONG AllocIndex;
  5700. //
  5701. // Get the free block from the old list
  5702. //
  5703. FreeEntry = CONTAINING_RECORD( Next, HEAP_FREE_ENTRY, FreeList );
  5704. //
  5705. // Test if the blocks are in the proper order (ascending)
  5706. //
  5707. if (PreviousSize > FreeEntry->Size) {
  5708. HeapDebugPrint(( "Invalid Block order %p - %08lx, %08lx\n",
  5709. FreeEntry,
  5710. FreeEntry->Size,
  5711. PreviousSize ));
  5712. DbgBreakPoint();
  5713. }
  5714. //
  5715. // Get the appropriate index for the current block
  5716. //
  5717. AllocIndex = RtlpSizeToAllocIndex( HeapIndex, FreeEntry->Size );
  5718. if (AllocIndex == (HeapIndex->ArraySize - 1)) {
  5719. LargeBlocksCount += 1;
  5720. }
  5721. if (PreviousSize != FreeEntry->Size) {
  5722. ULONG i;
  5723. //
  5724. // We are here only for the first block of a given size
  5725. //
  5726. //
  5727. // We need to have all hints NULL between two adjacent
  5728. // free blocks of different sizes
  5729. //
  5730. for (i = PreviousIndex + 1; i < AllocIndex; i++) {
  5731. //
  5732. // Report an error if there is a hint, but that block doesn't
  5733. // exists into the non-dedicated list
  5734. //
  5735. if (HeapIndex->FreeListHints[i]) {
  5736. DbgPrint( "Free block missing %lx, %08lx\n",
  5737. i,
  5738. HeapIndex->FreeListHints[i]
  5739. );
  5740. DbgBreakPoint();
  5741. }
  5742. //
  5743. // Reports an error if there is a bit set for a size
  5744. // not inserted into the non-dedicated list
  5745. //
  5746. if ( RtlpGetBitState(HeapIndex, i) ) {
  5747. DbgPrint("Invalid bit state. Must be 0 %lx\n", i);
  5748. DbgBreakPoint();
  5749. }
  5750. }
  5751. //
  5752. // we are here for the first block of this size. So the hint
  5753. // should point to this block
  5754. //
  5755. if ( (AllocIndex < HeapIndex->ArraySize - 1) &&
  5756. (HeapIndex->FreeListHints[ AllocIndex ] != FreeEntry)) {
  5757. DbgPrint( "Invalid index %lx for block %08lx (%08lx)\n",
  5758. AllocIndex,
  5759. HeapIndex->FreeListHints[AllocIndex],
  5760. FreeEntry);
  5761. DbgBreakPoint();
  5762. }
  5763. //
  5764. // We have a block into the non-dedicated list so we need to have
  5765. // the appropriate bit set
  5766. //
  5767. if ( !RtlpGetBitState( HeapIndex, AllocIndex ) ) {
  5768. DbgPrint("Invalid bit state. Must be 1 %lx\n", i);
  5769. DbgBreakPoint();
  5770. }
  5771. }
  5772. //
  5773. // Save the next link. The insertion into the new
  5774. // structure will destroy it
  5775. //
  5776. Next = Next->Flink;
  5777. PreviousSize = FreeEntry->Size;
  5778. PreviousIndex = AllocIndex;
  5779. }
  5780. if (LargeBlocksCount != HeapIndex->LargeBlocksCacheDepth) {
  5781. DbgPrint("Invalid Cache depth %ld. Should be %ld\n",
  5782. HeapIndex->LargeBlocksCacheDepth,
  5783. LargeBlocksCount);
  5784. }
  5785. }
  5786. return TRUE;
  5787. }
  5788. #endif // HEAP_VALIDATE_INDEX
  5789. #endif // NTOS_KERNEL_RUNTIME
  5790. #ifndef NTOS_KERNEL_RUNTIME
  5791. SIZE_T
  5792. GetUCBytes(
  5793. IN PHEAP Heap,
  5794. IN OUT SIZE_T *ReservedSpace,
  5795. IN OUT PULONG NoOfUCRs
  5796. )
  5797. /*++
  5798. Routine Description:
  5799. Utility routine which computes Uncommited Bytes in a Heap
  5800. Arguments:
  5801. Heap - Supplies a pointer to the heap
  5802. ReservedSpace - Pointer to Reserved space which is computed
  5803. NoOfUCRs - Pointer to NoOfUCRs which is computed
  5804. Return Value:
  5805. Total number of UCBytes.
  5806. --*/
  5807. {
  5808. SIZE_T UCBytes = 0;
  5809. PHEAP_SEGMENT pSegment;
  5810. LONG SegmentIndex;
  5811. *NoOfUCRs = 0;
  5812. for (SegmentIndex=0; SegmentIndex<HEAP_MAXIMUM_SEGMENTS; SegmentIndex++) {
  5813. pSegment = Heap->Segments[ SegmentIndex ];
  5814. if (pSegment) {
  5815. (*ReservedSpace) += pSegment->NumberOfPages;
  5816. (*NoOfUCRs) += pSegment->NumberOfUnCommittedRanges;
  5817. UCBytes += pSegment->NumberOfUnCommittedPages * PAGE_SIZE;
  5818. }
  5819. }
  5820. return UCBytes;
  5821. }
  5822. #endif // NTOS_KERNEL_RUNTIME
  5823. #if defined(ALLOC_DATA_PRAGMA) && defined(NTOS_KERNEL_RUNTIME)
  5824. #pragma const_seg()
  5825. #endif