Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

6761 lines
174 KiB

  1. /*++
  2. Copyright (c) 1994-2000 Microsoft Corporation
  3. Module Name:
  4. heappage.c
  5. Abstract:
  6. Implementation of NT RtlHeap family of APIs for debugging
  7. applications with heap usage bugs. Each allocation returned to
  8. the calling app is placed at the end of a virtual page such that
  9. the following virtual page is protected (ie, NO_ACCESS).
  10. So, when the errant app attempts to reference or modify memory
  11. beyond the allocated portion of a heap block, an access violation
  12. is immediately caused. This facilitates debugging the app
  13. because the access violation occurs at the exact point in the
  14. app where the heap corruption or abuse would occur. Note that
  15. significantly more memory (pagefile) is required to run an app
  16. using this heap implementation as opposed to the retail heap
  17. manager.
  18. Author:
  19. Tom McGuire (TomMcg) 06-Jan-1995
  20. Silviu Calinoiu (SilviuC) 22-Feb-2000
  21. Revision History:
  22. --*/
  23. #include "ntrtlp.h"
  24. #include "heappage.h" // external interface (hooks) to debug heap manager
  25. #include "heappagi.h"
  26. #include "heappriv.h"
  27. int __cdecl sprintf(char *, const char *, ...);
  28. //
  29. // Remainder of entire file is wrapped with #ifdef DEBUG_PAGE_HEAP so that
  30. // it will compile away to nothing if DEBUG_PAGE_HEAP is not defined in
  31. // heappage.h
  32. //
  33. #ifdef DEBUG_PAGE_HEAP
  34. //
  35. // Page size
  36. //
  37. #if defined(_X86_)
  38. #ifndef PAGE_SIZE
  39. #define PAGE_SIZE 0x1000
  40. #endif
  41. #define USER_ALIGNMENT 8
  42. #elif defined(_IA64_)
  43. #ifndef PAGE_SIZE
  44. #define PAGE_SIZE 0x2000
  45. #endif
  46. #define USER_ALIGNMENT 16
  47. #elif defined(_AMD64_)
  48. #ifndef PAGE_SIZE
  49. #define PAGE_SIZE 0x1000
  50. #endif
  51. #define USER_ALIGNMENT 16
  52. #else
  53. #error // platform not defined
  54. #endif
  55. //
  56. // Few constants
  57. //
  58. #define DPH_HEAP_SIGNATURE 0xFFEEDDCC
  59. #define FILL_BYTE 0xEE
  60. #define HEAD_FILL_SIZE 0x10
  61. #define RESERVE_SIZE 0x100000
  62. #define VM_UNIT_SIZE 0x10000
  63. #define POOL_SIZE 0x4000
  64. #define INLINE __inline
  65. #define MIN_FREE_LIST_LENGTH 8
  66. //
  67. // Few macros
  68. //
  69. #define ROUNDUP2( x, n ) ((( x ) + (( n ) - 1 )) & ~(( n ) - 1 ))
  70. #if INTERNAL_DEBUG
  71. #define DEBUG_CODE( a ) a
  72. #else
  73. #define DEBUG_CODE( a )
  74. #endif
  75. #define RETAIL_ASSERT( a ) ( (a) ? TRUE : \
  76. RtlpDebugPageHeapAssert( "Page heap: assert: (" #a ")\n" ))
  77. #define DEBUG_ASSERT( a ) DEBUG_CODE( RETAIL_ASSERT( a ))
  78. #define HEAP_HANDLE_FROM_ROOT( HeapRoot ) \
  79. ((PVOID)(((PCHAR)(HeapRoot)) - PAGE_SIZE ))
  80. #define IF_GENERATE_EXCEPTION( Flags, Status ) { \
  81. if (( Flags ) & HEAP_GENERATE_EXCEPTIONS ) \
  82. RtlpDebugPageHeapException((ULONG)(Status)); \
  83. }
  84. #define OUT_OF_VM_BREAK( Flags, szText ) { \
  85. if (( Flags ) & HEAP_BREAK_WHEN_OUT_OF_VM ) \
  86. RtlpDebugPageHeapBreak(( szText )); \
  87. }
  88. //
  89. // List manipulation macros
  90. //
  91. #define ENQUEUE_HEAD( Node, Head, Tail ) { \
  92. (Node)->pNextAlloc = (Head); \
  93. if ((Head) == NULL ) \
  94. (Tail) = (Node); \
  95. (Head) = (Node); \
  96. }
  97. #define ENQUEUE_TAIL( Node, Head, Tail ) { \
  98. if ((Tail) == NULL ) \
  99. (Head) = (Node); \
  100. else \
  101. (Tail)->pNextAlloc = (Node); \
  102. (Tail) = (Node); \
  103. }
  104. #define DEQUEUE_NODE( Node, Prev, Head, Tail ) { \
  105. PVOID Next = (Node)->pNextAlloc; \
  106. if ((Head) == (Node)) \
  107. (Head) = Next; \
  108. if ((Tail) == (Node)) \
  109. (Tail) = (Prev); \
  110. if ((Prev) != (NULL)) \
  111. (Prev)->pNextAlloc = Next; \
  112. }
  113. //
  114. // Bias/unbias pointer
  115. //
  116. #define BIAS_POINTER(p) ((PVOID)((ULONG_PTR)(p) | (ULONG_PTR)0x01))
  117. #define UNBIAS_POINTER(p) ((PVOID)((ULONG_PTR)(p) & ~((ULONG_PTR)0x01)))
  118. #define IS_BIASED_POINTER(p) ((PVOID)((ULONG_PTR)(p) & (ULONG_PTR)0x01))
  119. //
  120. // Scramble/unscramble
  121. //
  122. // We scramble heap pointers in the header blocks in order to make them
  123. // look as kernel pointers and cause an AV if used. This is not totally
  124. // accurate on IA64 but still likely to cause an AV.
  125. //
  126. #if defined(_WIN64)
  127. #define SCRAMBLE_VALUE ((ULONG_PTR)0x8000000000000000)
  128. #else
  129. #define SCRAMBLE_VALUE ((ULONG_PTR)0x80000000)
  130. #endif
  131. #define SCRAMBLE_POINTER(P) ((PVOID)((ULONG_PTR)(P) ^ SCRAMBLE_VALUE))
  132. #define UNSCRAMBLE_POINTER(P) ((PVOID)((ULONG_PTR)(P) ^ SCRAMBLE_VALUE))
  133. //
  134. // Protect/Unprotect heap structures macros
  135. //
  136. // The Protect/Unprotect functions are #if zeroed for now because there is
  137. // an issue to be resolved when destroying a heap. At that moment we need
  138. // to modify the global list of heaps and for this we need to touch the
  139. // heap structure for another heap. In order to do this we need to unprotect
  140. // and later protect it and for that we need to acquire the lock of that heap.
  141. // But this is prone to causing deadlocks. Until we will find a smart scheme
  142. // for doing this we will disable the whole /protect feature. Note also that
  143. // the same problem exists in the heap create code path where we have to update
  144. // the global list of heaps too.
  145. //
  146. // The best fix for this would be to move the fwrd/bwrd pointers for the heap
  147. // list from the DPH_HEAP_ROOT structure into the special R/W page that stores
  148. // the heap lock (needs to be always R/W).
  149. //
  150. #define PROTECT_HEAP_STRUCTURES( HeapRoot ) { \
  151. if ((HeapRoot)->HeapFlags & HEAP_PROTECTION_ENABLED ) { \
  152. RtlpDebugPageHeapProtectStructures( (HeapRoot) ); \
  153. } \
  154. } \
  155. #define UNPROTECT_HEAP_STRUCTURES( HeapRoot ) { \
  156. if ((HeapRoot)->HeapFlags & HEAP_PROTECTION_ENABLED ) { \
  157. RtlpDebugPageHeapUnProtectStructures( (HeapRoot) ); \
  158. } \
  159. } \
  160. //
  161. // RtlpDebugPageHeap
  162. //
  163. // Global variable that marks that page heap is enabled. It is set
  164. // in \nt\base\ntdll\ldrinit.c by reading the GlobalFlag registry
  165. // value (system wide or per process one) and checking if the
  166. // FLG_HEAP_PAGE_ALLOCS is set.
  167. //
  168. BOOLEAN RtlpDebugPageHeap;
  169. //
  170. // Internal version used to figure out what are people running
  171. // in various VBLs.
  172. //
  173. PCHAR RtlpDphVersion = "01/30/2001";
  174. //
  175. // Page heaps list manipulation.
  176. //
  177. // We maintain a list of all page heaps in the process to support
  178. // APIs like GetProcessHeaps. The list is also useful for debug
  179. // extensions that need to iterate the heaps. The list is protected
  180. // by RtlpDphHeapListCriticalSection lock.
  181. //
  182. BOOLEAN RtlpDphHeapListHasBeenInitialized;
  183. RTL_CRITICAL_SECTION RtlpDphHeapListCriticalSection;
  184. PDPH_HEAP_ROOT RtlpDphHeapListHead;
  185. PDPH_HEAP_ROOT RtlpDphHeapListTail;
  186. ULONG RtlpDphHeapListCount;
  187. //
  188. // `RtlpDebugPageHeapGlobalFlags' stores the global page heap flags.
  189. // The value of this variable is copied into the per heap
  190. // flags (ExtraFlags field) during heap creation.
  191. //
  192. // The initial value is so that by default we use page heap only with
  193. // normal allocations. This way if system wide global flag for page
  194. // heap is set the machine will still boot. After that we can enable
  195. // page heap with "sudden death" for specific processes. The most useful
  196. // flags for this case would be:
  197. //
  198. // PAGE_HEAP_ENABLE_PAGE_HEAP |
  199. // PAGE_HEAP_COLLECT_STACK_TRACES ;
  200. //
  201. // If no flags specified the default is page heap light with
  202. // stack trace collection.
  203. //
  204. ULONG RtlpDphGlobalFlags = PAGE_HEAP_COLLECT_STACK_TRACES;
  205. //
  206. // Page heap global flags.
  207. //
  208. // These values are read from registry in \nt\base\ntdll\ldrinit.c.
  209. //
  210. ULONG RtlpDphSizeRangeStart;
  211. ULONG RtlpDphSizeRangeEnd;
  212. ULONG RtlpDphDllRangeStart;
  213. ULONG RtlpDphDllRangeEnd;
  214. ULONG RtlpDphRandomProbability;
  215. WCHAR RtlpDphTargetDlls [512];
  216. UNICODE_STRING RtlpDphTargetDllsUnicode;
  217. //
  218. // If not zero controls the probability with which
  219. // allocations will be failed on purpose by page heap
  220. // manager. Timeout represents the initial period during
  221. // process initialization when faults are not allowed.
  222. //
  223. ULONG RtlpDphFaultProbability;
  224. ULONG RtlpDphFaultTimeOut;
  225. //
  226. // This variable offers volatile fault injection.
  227. // It can be set/reset from debugger to disable/enable
  228. // fault injection.
  229. //
  230. ULONG RtlpDphDisableFaults;
  231. //
  232. // `RtlpDphDebugLevel' controls debug messages in the code.
  233. //
  234. #define DPH_DEBUG_INTERNAL_VALIDATION 0x0001
  235. #define DPH_DEBUG_RESERVED_2 0x0002
  236. #define DPH_DEBUG_RESERVED_4 0x0004
  237. #define DPH_DEBUG_RESERVED_8 0x0008
  238. #define DPH_DEBUG_DECOMMIT_RANGES 0x0010
  239. #define DPH_DEBUG_SLOW_CHECKS 0x0080
  240. #define DPH_DEBUG_SHOW_VM_LIMITS 0x0100
  241. ULONG RtlpDphDebugLevel;
  242. //
  243. // Threshold for delaying a free operation in the normal heap.
  244. // If we get over this limit we start actually freeing blocks.
  245. //
  246. SIZE_T RtlpDphDelayedFreeCacheSize = 256 * PAGE_SIZE;
  247. //
  248. // Process wide trace database and the maximum size it can
  249. // grow to.
  250. //
  251. SIZE_T RtlpDphTraceDatabaseMaximumSize = 256 * PAGE_SIZE;
  252. PRTL_TRACE_DATABASE RtlpDphTraceDatabase;
  253. //
  254. // Support for normal heap allocations
  255. //
  256. // In order to make better use of memory available page heap will
  257. // allocate some of the block into a normal NT heap that it manages.
  258. // We will call these blocks "normal blocks" as opposed to "page blocks".
  259. //
  260. // All normal blocks have the requested size increased by DPH_BLOCK_INFORMATION.
  261. // The address returned is of course of the first byte after the block
  262. // info structure. Upon free, blocks are checked for corruption and
  263. // then released into the normal heap.
  264. //
  265. // All these normal heap functions are called with the page heap
  266. // lock acquired.
  267. //
  268. PVOID
  269. RtlpDphNormalHeapAllocate (
  270. PDPH_HEAP_ROOT Heap,
  271. ULONG Flags,
  272. SIZE_T Size
  273. );
  274. BOOLEAN
  275. RtlpDphNormalHeapFree (
  276. PDPH_HEAP_ROOT Heap,
  277. ULONG Flags,
  278. PVOID Block
  279. );
  280. PVOID
  281. RtlpDphNormalHeapReAllocate (
  282. PDPH_HEAP_ROOT Heap,
  283. ULONG Flags,
  284. PVOID OldBlock,
  285. SIZE_T Size
  286. );
  287. SIZE_T
  288. RtlpDphNormalHeapSize (
  289. PDPH_HEAP_ROOT Heap,
  290. ULONG Flags,
  291. PVOID Block
  292. );
  293. BOOLEAN
  294. RtlpDphNormalHeapSetUserFlags(
  295. IN PDPH_HEAP_ROOT Heap,
  296. IN ULONG Flags,
  297. IN PVOID Address,
  298. IN ULONG UserFlagsReset,
  299. IN ULONG UserFlagsSet
  300. );
  301. BOOLEAN
  302. RtlpDphNormalHeapSetUserValue(
  303. IN PDPH_HEAP_ROOT Heap,
  304. IN ULONG Flags,
  305. IN PVOID Address,
  306. IN PVOID UserValue
  307. );
  308. BOOLEAN
  309. RtlpDphNormalHeapGetUserInfo(
  310. IN PDPH_HEAP_ROOT Heap,
  311. IN ULONG Flags,
  312. IN PVOID Address,
  313. OUT PVOID* UserValue,
  314. OUT PULONG UserFlags
  315. );
  316. BOOLEAN
  317. RtlpDphNormalHeapValidate(
  318. IN PDPH_HEAP_ROOT Heap,
  319. IN ULONG Flags,
  320. IN PVOID Address
  321. );
  322. //
  323. // Support for DPH_BLOCK_INFORMATION management
  324. //
  325. // This header information prefixes both the normal and page heap
  326. // blocks.
  327. //
  328. #define DPH_CONTEXT_GENERAL 0
  329. #define DPH_CONTEXT_FULL_PAGE_HEAP_FREE 1
  330. #define DPH_CONTEXT_FULL_PAGE_HEAP_REALLOC 2
  331. #define DPH_CONTEXT_FULL_PAGE_HEAP_DESTROY 3
  332. #define DPH_CONTEXT_NORMAL_PAGE_HEAP_FREE 4
  333. #define DPH_CONTEXT_NORMAL_PAGE_HEAP_REALLOC 5
  334. #define DPH_CONTEXT_NORMAL_PAGE_HEAP_SETFLAGS 6
  335. #define DPH_CONTEXT_NORMAL_PAGE_HEAP_SETVALUE 7
  336. #define DPH_CONTEXT_NORMAL_PAGE_HEAP_GETINFO 8
  337. #define DPH_CONTEXT_DELAYED_FREE 9
  338. #define DPH_CONTEXT_DELAYED_DESTROY 10
  339. VOID
  340. RtlpDphReportCorruptedBlock (
  341. PVOID Heap,
  342. ULONG Context,
  343. PVOID Block,
  344. ULONG Reason
  345. );
  346. BOOLEAN
  347. RtlpDphIsNormalHeapBlock (
  348. PDPH_HEAP_ROOT Heap,
  349. PVOID Block,
  350. PULONG Reason,
  351. BOOLEAN CheckPattern
  352. );
  353. BOOLEAN
  354. RtlpDphIsNormalFreeHeapBlock (
  355. PVOID Block,
  356. PULONG Reason,
  357. BOOLEAN CheckPattern
  358. );
  359. BOOLEAN
  360. RtlpDphIsPageHeapBlock (
  361. PDPH_HEAP_ROOT Heap,
  362. PVOID Block,
  363. PULONG Reason,
  364. BOOLEAN CheckPattern
  365. );
  366. BOOLEAN
  367. RtlpDphWriteNormalHeapBlockInformation (
  368. PDPH_HEAP_ROOT Heap,
  369. PVOID Block,
  370. SIZE_T RequestedSize,
  371. SIZE_T ActualSize
  372. );
  373. BOOLEAN
  374. RtlpDphWritePageHeapBlockInformation (
  375. PDPH_HEAP_ROOT Heap,
  376. PVOID Block,
  377. SIZE_T RequestedSize,
  378. SIZE_T ActualSize
  379. );
  380. BOOLEAN
  381. RtlpDphGetBlockSizeFromCorruptedBlock (
  382. PVOID Block,
  383. PSIZE_T Size
  384. );
  385. //
  386. // Delayed free queue (of normal heap allocations) management
  387. //
  388. VOID
  389. RtlpDphInitializeDelayedFreeQueue (
  390. );
  391. VOID
  392. RtlpDphAddToDelayedFreeQueue (
  393. PDPH_BLOCK_INFORMATION Info
  394. );
  395. BOOLEAN
  396. RtlpDphNeedToTrimDelayedFreeQueue (
  397. PSIZE_T TrimSize
  398. );
  399. VOID
  400. RtlpDphTrimDelayedFreeQueue (
  401. SIZE_T TrimSize,
  402. ULONG Flags
  403. );
  404. VOID
  405. RtlpDphFreeDelayedBlocksFromHeap (
  406. PVOID PageHeap,
  407. PVOID NormalHeap
  408. );
  409. //
  410. // Decision normal heap vs. page heap
  411. //
  412. BOOLEAN
  413. RtlpDphShouldAllocateInPageHeap (
  414. PDPH_HEAP_ROOT Heap,
  415. SIZE_T Size
  416. );
  417. BOOLEAN
  418. RtlpDphVmLimitCanUsePageHeap (
  419. );
  420. //
  421. // Stack trace detection for trace database.
  422. //
  423. PRTL_TRACE_BLOCK
  424. RtlpDphLogStackTrace (
  425. ULONG FramesToSkip
  426. );
  427. //
  428. // Page heap general support functions
  429. //
  430. VOID
  431. RtlpDebugPageHeapBreak(
  432. IN PCH Text
  433. );
  434. BOOLEAN
  435. RtlpDebugPageHeapAssert(
  436. IN PCH Text
  437. );
  438. VOID
  439. RtlpDebugPageHeapEnterCritSect(
  440. IN PDPH_HEAP_ROOT HeapRoot,
  441. IN ULONG Flags
  442. );
  443. INLINE
  444. VOID
  445. RtlpDebugPageHeapLeaveCritSect(
  446. IN PDPH_HEAP_ROOT HeapRoot
  447. );
  448. VOID
  449. RtlpDebugPageHeapException(
  450. IN ULONG ExceptionCode
  451. );
  452. PVOID
  453. RtlpDebugPageHeapPointerFromHandle(
  454. IN PVOID HeapHandle
  455. );
  456. PCCH
  457. RtlpDebugPageHeapProtectionText(
  458. IN ULONG Access,
  459. IN OUT PCHAR Buffer
  460. );
  461. //
  462. // Virtual memory manipulation functions
  463. //
  464. BOOLEAN
  465. RtlpDebugPageHeapRobustProtectVM(
  466. IN PVOID VirtualBase,
  467. IN SIZE_T VirtualSize,
  468. IN ULONG NewAccess,
  469. IN BOOLEAN Recursion
  470. );
  471. INLINE
  472. BOOLEAN
  473. RtlpDebugPageHeapProtectVM(
  474. IN PVOID VirtualBase,
  475. IN SIZE_T VirtualSize,
  476. IN ULONG NewAccess
  477. );
  478. INLINE
  479. PVOID
  480. RtlpDebugPageHeapAllocateVM(
  481. IN SIZE_T nSize
  482. );
  483. INLINE
  484. BOOLEAN
  485. RtlpDebugPageHeapReleaseVM(
  486. IN PVOID pVirtual
  487. );
  488. INLINE
  489. BOOLEAN
  490. RtlpDebugPageHeapCommitVM(
  491. IN PVOID pVirtual,
  492. IN SIZE_T nSize
  493. );
  494. INLINE
  495. BOOLEAN
  496. RtlpDebugPageHeapDecommitVM(
  497. IN PVOID pVirtual,
  498. IN SIZE_T nSize
  499. );
  500. //
  501. // Target dlls logic
  502. //
  503. // RtlpDphTargetDllsLoadCallBack is called in ntdll\ldrapi.c
  504. // (LdrpLoadDll) whenever a new dll is loaded in the process
  505. // space.
  506. //
  507. VOID
  508. RtlpDphTargetDllsLogicInitialize (
  509. );
  510. VOID
  511. RtlpDphTargetDllsLoadCallBack (
  512. PUNICODE_STRING Name,
  513. PVOID Address,
  514. ULONG Size
  515. );
  516. const WCHAR *
  517. RtlpDphIsDllTargeted (
  518. const WCHAR * Name
  519. );
  520. //
  521. // Internal heap validation
  522. //
  523. VOID
  524. RtlpDphInternalValidatePageHeap (
  525. PDPH_HEAP_ROOT Heap,
  526. PUCHAR ExemptAddress,
  527. SIZE_T ExemptSize
  528. );
  529. VOID
  530. RtlpDphValidateInternalLists (
  531. PDPH_HEAP_ROOT Heap
  532. );
  533. //
  534. // Fault injection logic
  535. //
  536. BOOLEAN
  537. RtlpDphShouldFaultInject (
  538. );
  539. //
  540. // Free delayed cache internal checking
  541. //
  542. VOID
  543. RtlpDphCheckFreeDelayedCache (
  544. PVOID CheckBlock,
  545. SIZE_T CheckSize
  546. );
  547. //
  548. // Defined in \base\ntdll\resource.c
  549. //
  550. VOID
  551. RtlpCheckForCriticalSectionsInMemoryRange(
  552. IN PVOID StartAddress,
  553. IN SIZE_T RegionSize,
  554. IN PVOID Information
  555. );
  556. /////////////////////////////////////////////////////////////////////
  557. ///////////////////////////////// Page heap general support functions
  558. /////////////////////////////////////////////////////////////////////
  559. VOID
  560. RtlpDebugPageHeapBreak(
  561. IN PCH Text
  562. )
  563. {
  564. DbgPrint( Text );
  565. DbgBreakPoint();
  566. }
  567. BOOLEAN
  568. RtlpDebugPageHeapAssert(
  569. IN PCH Text
  570. )
  571. {
  572. RtlpDebugPageHeapBreak( Text );
  573. return FALSE;
  574. }
  575. VOID
  576. RtlpDebugPageHeapEnterCritSect(
  577. IN PDPH_HEAP_ROOT HeapRoot,
  578. IN ULONG Flags
  579. )
  580. {
  581. if (HeapRoot->FirstThread == NULL) {
  582. HeapRoot->FirstThread = NtCurrentTeb()->ClientId.UniqueThread;
  583. }
  584. if (Flags & HEAP_NO_SERIALIZE) {
  585. //
  586. // If current thread has a different ID than the first thread
  587. // that got into this heap then we break. Avoid this check if
  588. // this allocation comes from Global/Local Heap APIs because
  589. // they lock the heap in a separate call and then they call
  590. // NT heap APIs with no_serialize flag set.
  591. //
  592. // Note. We avoid this check if we do not have the specific flag
  593. // on. This is so because MPheap-like heaps can give false
  594. // positives.
  595. //
  596. if ((HeapRoot->ExtraFlags & PAGE_HEAP_CHECK_NO_SERIALIZE_ACCESS)) {
  597. if (RtlpDebugPageHeapPointerFromHandle(RtlProcessHeap()) != HeapRoot) {
  598. if (HeapRoot->FirstThread != NtCurrentTeb()->ClientId.UniqueThread) {
  599. VERIFIER_STOP (APPLICATION_VERIFIER_UNSYNCHRONIZED_ACCESS,
  600. "multithreaded access in HEAP_NO_SERIALIZE heap",
  601. HeapRoot, "Heap handle",
  602. HeapRoot->FirstThread, "First thread that used the heap",
  603. NtCurrentTeb()->ClientId.UniqueThread, "Current thread using the heap",
  604. 1, "/no_sync option used");
  605. }
  606. }
  607. }
  608. if (! RtlTryEnterCriticalSection( HeapRoot->HeapCritSect )) {
  609. if (HeapRoot->nRemoteLockAcquired == 0) {
  610. //
  611. // Another thread owns the CritSect. This is an application
  612. // bug since multithreaded access to heap was attempted with
  613. // the HEAP_NO_SERIALIZE flag specified.
  614. //
  615. VERIFIER_STOP (APPLICATION_VERIFIER_UNSYNCHRONIZED_ACCESS,
  616. "multithreaded access in HEAP_NO_SERIALIZE heap",
  617. HeapRoot, "Heap handle",
  618. HeapRoot->HeapCritSect->OwningThread, "Thread owning heap lock",
  619. NtCurrentTeb()->ClientId.UniqueThread, "Current thread trying to acquire the heap lock",
  620. 0, "");
  621. //
  622. // In the interest of allowing the errant app to continue,
  623. // we'll force serialization and continue.
  624. //
  625. HeapRoot->HeapFlags &= ~HEAP_NO_SERIALIZE;
  626. }
  627. RtlEnterCriticalSection( HeapRoot->HeapCritSect );
  628. }
  629. }
  630. else {
  631. RtlEnterCriticalSection( HeapRoot->HeapCritSect );
  632. }
  633. }
  634. INLINE
  635. VOID
  636. RtlpDebugPageHeapLeaveCritSect(
  637. IN PDPH_HEAP_ROOT HeapRoot
  638. )
  639. {
  640. RtlLeaveCriticalSection( HeapRoot->HeapCritSect );
  641. }
  642. VOID
  643. RtlpDebugPageHeapException(
  644. IN ULONG ExceptionCode
  645. )
  646. {
  647. EXCEPTION_RECORD ER;
  648. ER.ExceptionCode = ExceptionCode;
  649. ER.ExceptionFlags = 0;
  650. ER.ExceptionRecord = NULL;
  651. ER.ExceptionAddress = RtlpDebugPageHeapException;
  652. ER.NumberParameters = 0;
  653. RtlRaiseException( &ER );
  654. }
  655. PVOID
  656. RtlpDebugPageHeapPointerFromHandle(
  657. IN PVOID HeapHandle
  658. )
  659. {
  660. try {
  661. if (((PHEAP)(HeapHandle))->ForceFlags & HEAP_FLAG_PAGE_ALLOCS) {
  662. PDPH_HEAP_ROOT HeapRoot = (PVOID)(((PCHAR)(HeapHandle)) + PAGE_SIZE );
  663. if (HeapRoot->Signature == DPH_HEAP_SIGNATURE) {
  664. return HeapRoot;
  665. }
  666. }
  667. }
  668. except( EXCEPTION_EXECUTE_HANDLER ) {
  669. }
  670. VERIFIER_STOP (APPLICATION_VERIFIER_BAD_HEAP_HANDLE,
  671. "heap handle with incorrect signature",
  672. HeapHandle, "Heap handle",
  673. 0, "", 0, "", 0, "");
  674. return NULL;
  675. }
  676. PCCH
  677. RtlpDebugPageHeapProtectionText(
  678. IN ULONG Access,
  679. IN OUT PCHAR Buffer
  680. )
  681. {
  682. switch (Access) {
  683. case PAGE_NOACCESS: return "PAGE_NOACCESS";
  684. case PAGE_READONLY: return "PAGE_READONLY";
  685. case PAGE_READWRITE: return "PAGE_READWRITE";
  686. case PAGE_WRITECOPY: return "PAGE_WRITECOPY";
  687. case PAGE_EXECUTE: return "PAGE_EXECUTE";
  688. case PAGE_EXECUTE_READ: return "PAGE_EXECUTE_READ";
  689. case PAGE_EXECUTE_READWRITE: return "PAGE_EXECUTE_READWRITE";
  690. case PAGE_EXECUTE_WRITECOPY: return "PAGE_EXECUTE_WRITECOPY";
  691. case PAGE_GUARD: return "PAGE_GUARD";
  692. case 0: return "UNKNOWN";
  693. default: sprintf( Buffer, "0x%08X", Access );
  694. return Buffer;
  695. }
  696. }
  697. /////////////////////////////////////////////////////////////////////
  698. /////////////////////////////// Virtual memory manipulation functions
  699. /////////////////////////////////////////////////////////////////////
  700. BOOLEAN
  701. RtlpDebugPageHeapRobustProtectVM(
  702. IN PVOID VirtualBase,
  703. IN SIZE_T VirtualSize,
  704. IN ULONG NewAccess,
  705. IN BOOLEAN Recursion
  706. )
  707. {
  708. PVOID CopyOfVirtualBase = VirtualBase;
  709. SIZE_T CopyOfVirtualSize = VirtualSize;
  710. ULONG OldAccess;
  711. NTSTATUS Status;
  712. Status = ZwProtectVirtualMemory(
  713. NtCurrentProcess(),
  714. &CopyOfVirtualBase,
  715. &CopyOfVirtualSize,
  716. NewAccess,
  717. &OldAccess
  718. );
  719. if (NT_SUCCESS( Status ))
  720. return TRUE;
  721. if (! Recursion) {
  722. //
  723. // We failed to change the protection on a range of memory.
  724. // This can happen if if the range of memory spans more than
  725. // one adjancent blocks allocated by separate calls to
  726. // ZwAllocateVirtualMemory. It also seems fails occasionally
  727. // for reasons unknown to me, but always when attempting to
  728. // change the protection on more than one page in a single call.
  729. // So, fall back to changing pages individually in this range.
  730. // This should be rare, so it should not be a performance problem.
  731. //
  732. PCHAR VirtualExtent = (PCHAR)ROUNDUP2((ULONG_PTR)((PCHAR)VirtualBase + VirtualSize ), PAGE_SIZE );
  733. PCHAR VirtualPage = (PCHAR)((ULONG_PTR)VirtualBase & ~( PAGE_SIZE - 1 ));
  734. BOOLEAN SuccessAll = TRUE;
  735. BOOLEAN SuccessOne;
  736. while (VirtualPage < VirtualExtent) {
  737. SuccessOne = RtlpDebugPageHeapRobustProtectVM(
  738. VirtualPage,
  739. PAGE_SIZE,
  740. NewAccess,
  741. TRUE
  742. );
  743. if (! SuccessOne) {
  744. SuccessAll = FALSE;
  745. }
  746. VirtualPage += PAGE_SIZE;
  747. }
  748. return SuccessAll; // TRUE if all succeeded, FALSE if any failed
  749. }
  750. else {
  751. MEMORY_BASIC_INFORMATION mbi;
  752. CHAR OldProtectionText[ 12 ]; // big enough for "0x12345678"
  753. CHAR NewProtectionText[ 12 ]; // big enough for "0x12345678"
  754. mbi.Protect = 0; // in case ZwQueryVirtualMemory fails
  755. ZwQueryVirtualMemory(
  756. NtCurrentProcess(),
  757. VirtualBase,
  758. MemoryBasicInformation,
  759. &mbi,
  760. sizeof( mbi ),
  761. NULL
  762. );
  763. DbgPrint(
  764. "Page heap: Failed changing VM at %08X size 0x%X\n"
  765. " from %s to %s (Status %08X)\n",
  766. VirtualBase,
  767. VirtualSize,
  768. RtlpDebugPageHeapProtectionText( mbi.Protect, OldProtectionText ),
  769. RtlpDebugPageHeapProtectionText( NewAccess, NewProtectionText ),
  770. Status
  771. );
  772. }
  773. return FALSE;
  774. }
  775. INLINE
  776. BOOLEAN
  777. RtlpDebugPageHeapProtectVM(
  778. IN PVOID VirtualBase,
  779. IN SIZE_T VirtualSize,
  780. IN ULONG NewAccess
  781. )
  782. {
  783. return RtlpDebugPageHeapRobustProtectVM( VirtualBase, VirtualSize, NewAccess, FALSE );
  784. }
  785. INLINE
  786. PVOID
  787. RtlpDebugPageHeapAllocateVM(
  788. IN SIZE_T nSize
  789. )
  790. {
  791. NTSTATUS Status;
  792. PVOID pVirtual;
  793. pVirtual = NULL;
  794. Status = ZwAllocateVirtualMemory( NtCurrentProcess(),
  795. &pVirtual,
  796. 0,
  797. &nSize,
  798. MEM_COMMIT,
  799. PAGE_NOACCESS );
  800. return NT_SUCCESS( Status ) ? pVirtual : NULL;
  801. }
  802. INLINE
  803. BOOLEAN
  804. RtlpDebugPageHeapReleaseVM(
  805. IN PVOID pVirtual
  806. )
  807. {
  808. SIZE_T nSize = 0;
  809. return NT_SUCCESS( RtlpHeapFreeVirtualMemory( NtCurrentProcess(),
  810. &pVirtual,
  811. &nSize,
  812. MEM_RELEASE ));
  813. }
  814. INLINE
  815. BOOLEAN
  816. RtlpDebugPageHeapCommitVM(
  817. IN PVOID pVirtual,
  818. IN SIZE_T nSize
  819. )
  820. {
  821. PCHAR pStart, pEnd, pCurrent;
  822. NTSTATUS Status;
  823. SIZE_T CommitSize;
  824. BOOLEAN Failed = FALSE;
  825. pStart = (PCHAR)((ULONG_PTR)pVirtual & ~(PAGE_SIZE - 1));
  826. pEnd = (PCHAR)(((ULONG_PTR)pVirtual + nSize) & ~(PAGE_SIZE - 1));
  827. for (pCurrent = pStart; pCurrent < pEnd; pCurrent += PAGE_SIZE) {
  828. CommitSize = PAGE_SIZE;
  829. Status = ZwAllocateVirtualMemory(
  830. NtCurrentProcess(),
  831. &pCurrent,
  832. 0,
  833. &CommitSize,
  834. MEM_COMMIT,
  835. PAGE_NOACCESS);
  836. if (! NT_SUCCESS(Status)) {
  837. //
  838. // The call can fail in low memory conditions. In this case we
  839. // try to recover and will probably fail the original allocation.
  840. //
  841. if ((RtlpDphDebugLevel & DPH_DEBUG_DECOMMIT_RANGES)) {
  842. VERIFIER_STOP (APPLICATION_VERIFIER_INTERNAL_WARNING,
  843. "page heap failed to commit memory",
  844. pCurrent, "",
  845. CommitSize, "",
  846. Status, "",
  847. 0, "");
  848. }
  849. Failed = TRUE;
  850. break;
  851. }
  852. }
  853. if (Failed) {
  854. //
  855. // We need to roll back whatever succeeded.
  856. //
  857. for (pCurrent -= PAGE_SIZE; pCurrent >= pStart && pCurrent < pEnd; pCurrent -= PAGE_SIZE) {
  858. CommitSize = PAGE_SIZE;
  859. Status = RtlpHeapFreeVirtualMemory(
  860. NtCurrentProcess(),
  861. &pCurrent,
  862. &CommitSize,
  863. MEM_DECOMMIT);
  864. if (! NT_SUCCESS(Status)) {
  865. //
  866. // There is now valid reason known to me for a correct free operation
  867. // failure. So, in this case we make a little bit of fuss about it.
  868. //
  869. VERIFIER_STOP (APPLICATION_VERIFIER_INTERNAL_WARNING,
  870. "page heap failed to decommit memory",
  871. pCurrent, "",
  872. CommitSize, "",
  873. Status, "",
  874. 0, "");
  875. }
  876. }
  877. }
  878. if (Failed) {
  879. return FALSE;
  880. }
  881. else {
  882. return TRUE;
  883. }
  884. }
  885. INLINE
  886. BOOLEAN
  887. RtlpDebugPageHeapDecommitVM(
  888. IN PVOID pVirtual,
  889. IN SIZE_T nSize
  890. )
  891. {
  892. PCHAR pStart, pEnd, pCurrent;
  893. NTSTATUS Status;
  894. SIZE_T DecommitSize;
  895. BOOLEAN Failed = FALSE;
  896. pStart = (PCHAR)((ULONG_PTR)pVirtual & ~(PAGE_SIZE - 1));
  897. pEnd = (PCHAR)(((ULONG_PTR)pVirtual + nSize) & ~(PAGE_SIZE - 1));
  898. for (pCurrent = pStart; pCurrent < pEnd; pCurrent += PAGE_SIZE) {
  899. DecommitSize = PAGE_SIZE;
  900. Status = RtlpHeapFreeVirtualMemory(
  901. NtCurrentProcess(),
  902. &pCurrent,
  903. &DecommitSize,
  904. MEM_DECOMMIT);
  905. if (! NT_SUCCESS(Status)) {
  906. //
  907. // There is now valid reason known to me for a correct free operation
  908. // failure. So, in this case we make a little bit of fuss about it.
  909. //
  910. VERIFIER_STOP (APPLICATION_VERIFIER_INTERNAL_WARNING,
  911. "page heap failed to commit memory",
  912. pCurrent, "",
  913. DecommitSize, "",
  914. Status, "",
  915. 0, "");
  916. Failed = TRUE;
  917. }
  918. }
  919. if (Failed) {
  920. return FALSE;
  921. }
  922. else {
  923. return TRUE;
  924. }
  925. }
  926. /////////////////////////////////////////////////////////////////////
  927. //////////////////////////////////////// Internal page heap functions
  928. /////////////////////////////////////////////////////////////////////
  929. PDPH_HEAP_BLOCK
  930. RtlpDebugPageHeapTakeNodeFromUnusedList(
  931. IN PDPH_HEAP_ROOT pHeap
  932. )
  933. {
  934. PDPH_HEAP_BLOCK pNode = pHeap->pUnusedNodeListHead;
  935. PDPH_HEAP_BLOCK pPrev = NULL;
  936. //
  937. // UnusedNodeList is LIFO with most recent entry at head of list.
  938. //
  939. if (pNode) {
  940. DEQUEUE_NODE( pNode, pPrev, pHeap->pUnusedNodeListHead, pHeap->pUnusedNodeListTail );
  941. pHeap->nUnusedNodes -= 1;
  942. }
  943. return pNode;
  944. }
  945. VOID
  946. RtlpDebugPageHeapReturnNodeToUnusedList(
  947. IN PDPH_HEAP_ROOT pHeap,
  948. IN PDPH_HEAP_BLOCK pNode
  949. )
  950. {
  951. //
  952. // UnusedNodeList is LIFO with most recent entry at head of list.
  953. //
  954. ENQUEUE_HEAD( pNode, pHeap->pUnusedNodeListHead, pHeap->pUnusedNodeListTail );
  955. pHeap->nUnusedNodes += 1;
  956. }
  957. PDPH_HEAP_BLOCK
  958. RtlpDebugPageHeapFindBusyMem(
  959. IN PDPH_HEAP_ROOT pHeap,
  960. IN PVOID pUserMem,
  961. OUT PDPH_HEAP_BLOCK *pPrevAlloc
  962. )
  963. {
  964. PDPH_HEAP_BLOCK pNode = pHeap->pBusyAllocationListHead;
  965. PDPH_HEAP_BLOCK pPrev = NULL;
  966. while (pNode != NULL) {
  967. if (pNode->pUserAllocation == pUserMem) {
  968. if (pPrevAlloc)
  969. *pPrevAlloc = pPrev;
  970. return pNode;
  971. }
  972. pPrev = pNode;
  973. pNode = pNode->pNextAlloc;
  974. }
  975. return NULL;
  976. }
  977. VOID
  978. RtlpDebugPageHeapRemoveFromAvailableList(
  979. IN PDPH_HEAP_ROOT pHeap,
  980. IN PDPH_HEAP_BLOCK pNode,
  981. IN PDPH_HEAP_BLOCK pPrev
  982. )
  983. {
  984. DEQUEUE_NODE( pNode, pPrev, pHeap->pAvailableAllocationListHead, pHeap->pAvailableAllocationListTail );
  985. pHeap->nAvailableAllocations -= 1;
  986. pHeap->nAvailableAllocationBytesCommitted -= pNode->nVirtualBlockSize;
  987. }
  988. VOID
  989. RtlpDebugPageHeapPlaceOnFreeList(
  990. IN PDPH_HEAP_ROOT pHeap,
  991. IN PDPH_HEAP_BLOCK pAlloc
  992. )
  993. {
  994. //
  995. // FreeAllocationList is stored FIFO to enhance finding
  996. // reference-after-freed bugs by keeping previously freed
  997. // allocations on the free list as long as possible.
  998. //
  999. pAlloc->pNextAlloc = NULL;
  1000. ENQUEUE_TAIL( pAlloc, pHeap->pFreeAllocationListHead, pHeap->pFreeAllocationListTail );
  1001. pHeap->nFreeAllocations += 1;
  1002. pHeap->nFreeAllocationBytesCommitted += pAlloc->nVirtualBlockSize;
  1003. }
  1004. VOID
  1005. RtlpDebugPageHeapRemoveFromFreeList(
  1006. IN PDPH_HEAP_ROOT pHeap,
  1007. IN PDPH_HEAP_BLOCK pNode,
  1008. IN PDPH_HEAP_BLOCK pPrev
  1009. )
  1010. {
  1011. DEQUEUE_NODE( pNode, pPrev, pHeap->pFreeAllocationListHead, pHeap->pFreeAllocationListTail );
  1012. pHeap->nFreeAllocations -= 1;
  1013. pHeap->nFreeAllocationBytesCommitted -= pNode->nVirtualBlockSize;
  1014. pNode->StackTrace = NULL;
  1015. }
  1016. VOID
  1017. RtlpDebugPageHeapPlaceOnVirtualList(
  1018. IN PDPH_HEAP_ROOT pHeap,
  1019. IN PDPH_HEAP_BLOCK pNode
  1020. )
  1021. {
  1022. //
  1023. // VirtualStorageList is LIFO so that releasing VM blocks will
  1024. // occur in exact reverse order.
  1025. //
  1026. ENQUEUE_HEAD( pNode, pHeap->pVirtualStorageListHead, pHeap->pVirtualStorageListTail );
  1027. pHeap->nVirtualStorageRanges += 1;
  1028. pHeap->nVirtualStorageBytes += pNode->nVirtualBlockSize;
  1029. }
  1030. VOID
  1031. RtlpDebugPageHeapPlaceOnBusyList(
  1032. IN PDPH_HEAP_ROOT pHeap,
  1033. IN PDPH_HEAP_BLOCK pNode
  1034. )
  1035. {
  1036. //
  1037. // BusyAllocationList is LIFO to achieve better temporal locality
  1038. // of reference (older allocations are farther down the list).
  1039. //
  1040. ENQUEUE_HEAD( pNode, pHeap->pBusyAllocationListHead, pHeap->pBusyAllocationListTail );
  1041. pHeap->nBusyAllocations += 1;
  1042. pHeap->nBusyAllocationBytesCommitted += pNode->nVirtualBlockSize;
  1043. pHeap->nBusyAllocationBytesAccessible += pNode->nVirtualAccessSize;
  1044. }
  1045. VOID
  1046. RtlpDebugPageHeapRemoveFromBusyList(
  1047. IN PDPH_HEAP_ROOT pHeap,
  1048. IN PDPH_HEAP_BLOCK pNode,
  1049. IN PDPH_HEAP_BLOCK pPrev
  1050. )
  1051. {
  1052. DEQUEUE_NODE( pNode, pPrev, pHeap->pBusyAllocationListHead, pHeap->pBusyAllocationListTail );
  1053. pHeap->nBusyAllocations -= 1;
  1054. pHeap->nBusyAllocationBytesCommitted -= pNode->nVirtualBlockSize;
  1055. pHeap->nBusyAllocationBytesAccessible -= pNode->nVirtualAccessSize;
  1056. }
  1057. PDPH_HEAP_BLOCK
  1058. RtlpDebugPageHeapSearchAvailableMemListForBestFit(
  1059. IN PDPH_HEAP_ROOT pHeap,
  1060. IN SIZE_T nSize,
  1061. OUT PDPH_HEAP_BLOCK *pPrevAvailNode
  1062. )
  1063. {
  1064. PDPH_HEAP_BLOCK pAvail, pFound, pAvailPrev, pFoundPrev;
  1065. SIZE_T nAvail, nFound;
  1066. nFound = 0x7FFFFFFF;
  1067. pFound = NULL;
  1068. pFoundPrev = NULL;
  1069. pAvailPrev = NULL;
  1070. pAvail = pHeap->pAvailableAllocationListHead;
  1071. while (( pAvail != NULL ) && ( nFound > nSize )) {
  1072. nAvail = pAvail->nVirtualBlockSize;
  1073. if (( nAvail >= nSize ) && ( nAvail < nFound )) {
  1074. nFound = nAvail;
  1075. pFound = pAvail;
  1076. pFoundPrev = pAvailPrev;
  1077. }
  1078. pAvailPrev = pAvail;
  1079. pAvail = pAvail->pNextAlloc;
  1080. }
  1081. *pPrevAvailNode = pFoundPrev;
  1082. return pFound;
  1083. }
  1084. //
  1085. // Counters for # times coalesce operations got rejected
  1086. // to avoid cross-VAD issues.
  1087. //
  1088. LONG RtlpDphCoalesceStatistics [4];
  1089. #define ALIGN_TO_SIZE(P, Sz) (((ULONG_PTR)(P)) & ~((ULONG_PTR)(Sz) - 1))
  1090. BOOLEAN
  1091. RtlpDphSameVirtualRegion (
  1092. IN PDPH_HEAP_BLOCK Left,
  1093. IN PDPH_HEAP_BLOCK Right
  1094. )
  1095. /*++
  1096. Routine description:
  1097. This function tries to figure out if two nodes are part of the
  1098. same VAD. The function is used during coalescing in order to avoid
  1099. merging together blocks from different VADs. If we do not do this
  1100. we will break applications that do GDI calls.
  1101. SilviuC: this can be done differently if we keep the VAD address in
  1102. every node and make sure to propagate the value when nodes get split.
  1103. Then this function will just be a comparison of the two values.
  1104. --*/
  1105. {
  1106. PVOID LeftRegion;
  1107. MEMORY_BASIC_INFORMATION MemoryInfo;
  1108. NTSTATUS Status;
  1109. SIZE_T ReturnLength;
  1110. //
  1111. // If blocks are in the same 64K chunk we are okay.
  1112. //
  1113. if (ALIGN_TO_SIZE(Left->pVirtualBlock, VM_UNIT_SIZE)
  1114. == ALIGN_TO_SIZE(Right->pVirtualBlock, VM_UNIT_SIZE)) {
  1115. InterlockedIncrement (&(RtlpDphCoalesceStatistics[2]));
  1116. return TRUE;
  1117. }
  1118. //
  1119. // Call query() to find out what is the start address of the
  1120. // VAD for each node.
  1121. //
  1122. Status = ZwQueryVirtualMemory (NtCurrentProcess(),
  1123. Left->pVirtualBlock,
  1124. MemoryBasicInformation,
  1125. &MemoryInfo,
  1126. sizeof MemoryInfo,
  1127. &ReturnLength);
  1128. if (! NT_SUCCESS(Status)) {
  1129. InterlockedIncrement (&(RtlpDphCoalesceStatistics[3]));
  1130. return FALSE;
  1131. }
  1132. LeftRegion = MemoryInfo.AllocationBase;
  1133. Status = ZwQueryVirtualMemory (NtCurrentProcess(),
  1134. Right->pVirtualBlock,
  1135. MemoryBasicInformation,
  1136. &MemoryInfo,
  1137. sizeof MemoryInfo,
  1138. &ReturnLength);
  1139. if (! NT_SUCCESS(Status)) {
  1140. InterlockedIncrement (&(RtlpDphCoalesceStatistics[3]));
  1141. return FALSE;
  1142. }
  1143. if (LeftRegion == MemoryInfo.AllocationBase) {
  1144. InterlockedIncrement (&(RtlpDphCoalesceStatistics[0]));
  1145. return TRUE;
  1146. }
  1147. else {
  1148. InterlockedIncrement (&(RtlpDphCoalesceStatistics[1]));
  1149. return FALSE;
  1150. }
  1151. }
  1152. VOID
  1153. RtlpDebugPageHeapCoalesceNodeIntoAvailable(
  1154. IN PDPH_HEAP_ROOT pHeap,
  1155. IN PDPH_HEAP_BLOCK pNode
  1156. )
  1157. {
  1158. PDPH_HEAP_BLOCK pPrev;
  1159. PDPH_HEAP_BLOCK pNext;
  1160. PUCHAR pVirtual;
  1161. SIZE_T nVirtual;
  1162. pPrev = NULL;
  1163. pNext = pHeap->pAvailableAllocationListHead;
  1164. pVirtual = pNode->pVirtualBlock;
  1165. nVirtual = pNode->nVirtualBlockSize;
  1166. pHeap->nAvailableAllocationBytesCommitted += nVirtual;
  1167. pHeap->nAvailableAllocations += 1;
  1168. //
  1169. // Walk list to insertion point.
  1170. //
  1171. while (( pNext ) && ( pNext->pVirtualBlock < pVirtual )) {
  1172. pPrev = pNext;
  1173. pNext = pNext->pNextAlloc;
  1174. }
  1175. if (pPrev) {
  1176. if (((pPrev->pVirtualBlock + pPrev->nVirtualBlockSize) == pVirtual) &&
  1177. RtlpDphSameVirtualRegion (pPrev, pNode)) {
  1178. //
  1179. // pPrev and pNode are adjacent, so simply add size of
  1180. // pNode entry to pPrev entry.
  1181. //
  1182. pPrev->nVirtualBlockSize += nVirtual;
  1183. RtlpDebugPageHeapReturnNodeToUnusedList( pHeap, pNode );
  1184. pHeap->nAvailableAllocations--;
  1185. pNode = pPrev;
  1186. pVirtual = pPrev->pVirtualBlock;
  1187. nVirtual = pPrev->nVirtualBlockSize;
  1188. }
  1189. else {
  1190. //
  1191. // pPrev and pNode are not adjacent, so insert the pNode
  1192. // block into the list after pPrev.
  1193. //
  1194. pNode->pNextAlloc = pPrev->pNextAlloc;
  1195. pPrev->pNextAlloc = pNode;
  1196. }
  1197. }
  1198. else {
  1199. //
  1200. // pNode should be inserted at head of list.
  1201. //
  1202. pNode->pNextAlloc = pHeap->pAvailableAllocationListHead;
  1203. pHeap->pAvailableAllocationListHead = pNode;
  1204. }
  1205. if (pNext) {
  1206. if (((pVirtual + nVirtual) == pNext->pVirtualBlock) &&
  1207. RtlpDphSameVirtualRegion (pNode, pNext)) {
  1208. //
  1209. // pNode and pNext are adjacent, so simply add size of
  1210. // pNext entry to pNode entry and remove pNext entry
  1211. // from the list.
  1212. //
  1213. pNode->nVirtualBlockSize += pNext->nVirtualBlockSize;
  1214. pNode->pNextAlloc = pNext->pNextAlloc;
  1215. if (pHeap->pAvailableAllocationListTail == pNext) {
  1216. pHeap->pAvailableAllocationListTail = pNode;
  1217. }
  1218. RtlpDebugPageHeapReturnNodeToUnusedList( pHeap, pNext );
  1219. pHeap->nAvailableAllocations--;
  1220. }
  1221. }
  1222. else {
  1223. //
  1224. // pNode is tail of list.
  1225. //
  1226. pHeap->pAvailableAllocationListTail = pNode;
  1227. }
  1228. }
  1229. VOID
  1230. RtlpDebugPageHeapCoalesceFreeIntoAvailable(
  1231. IN PDPH_HEAP_ROOT pHeap,
  1232. IN ULONG nLeaveOnFreeList
  1233. )
  1234. {
  1235. PDPH_HEAP_BLOCK pNode = pHeap->pFreeAllocationListHead;
  1236. SIZE_T nFree = pHeap->nFreeAllocations;
  1237. PDPH_HEAP_BLOCK pNext;
  1238. DEBUG_ASSERT( nFree >= nLeaveOnFreeList );
  1239. while (( pNode ) && ( nFree-- > nLeaveOnFreeList )) {
  1240. pNext = pNode->pNextAlloc; // preserve next pointer across shuffling
  1241. RtlpDebugPageHeapRemoveFromFreeList( pHeap, pNode, NULL );
  1242. RtlpDebugPageHeapCoalesceNodeIntoAvailable( pHeap, pNode );
  1243. pNode = pNext;
  1244. }
  1245. DEBUG_ASSERT(( nFree = (volatile SIZE_T)( pHeap->nFreeAllocations )) >= nLeaveOnFreeList );
  1246. DEBUG_ASSERT(( pNode != NULL ) || ( nFree == 0 ));
  1247. }
  1248. // forward
  1249. BOOLEAN
  1250. RtlpDebugPageHeapGrowVirtual(
  1251. IN PDPH_HEAP_ROOT pHeap,
  1252. IN SIZE_T nSize
  1253. );
  1254. PDPH_HEAP_BLOCK
  1255. RtlpDebugPageHeapFindAvailableMem(
  1256. IN PDPH_HEAP_ROOT pHeap,
  1257. IN SIZE_T nSize,
  1258. OUT PDPH_HEAP_BLOCK *pPrevAvailNode,
  1259. IN BOOLEAN bGrowVirtual
  1260. )
  1261. {
  1262. PDPH_HEAP_BLOCK pAvail;
  1263. ULONG nLeaveOnFreeList;
  1264. //
  1265. // If we use uncommitted ranges it is really important to
  1266. // call FindAvailableMemory only with page aligned sizes.
  1267. //
  1268. if ((pHeap->ExtraFlags & PAGE_HEAP_SMART_MEMORY_USAGE)) {
  1269. DEBUG_ASSERT ((nSize & ~(PAGE_SIZE - 1)) == nSize);
  1270. }
  1271. //
  1272. // First search existing AvailableList for a "best-fit" block
  1273. // (the smallest block that will satisfy the request).
  1274. //
  1275. pAvail = RtlpDebugPageHeapSearchAvailableMemListForBestFit(
  1276. pHeap,
  1277. nSize,
  1278. pPrevAvailNode
  1279. );
  1280. while (( pAvail == NULL ) && ( pHeap->nFreeAllocations > MIN_FREE_LIST_LENGTH )) {
  1281. //
  1282. // Failed to find sufficient memory on AvailableList. Coalesce
  1283. // 3/4 of the FreeList memory to the AvailableList and try again.
  1284. // Continue this until we have sufficient memory in AvailableList,
  1285. // or the FreeList length is reduced to MIN_FREE_LIST_LENGTH entries.
  1286. // We don't shrink the FreeList length below MIN_FREE_LIST_LENGTH
  1287. // entries to preserve the most recent MIN_FREE_LIST_LENGTH entries
  1288. // for reference-after-freed purposes.
  1289. //
  1290. nLeaveOnFreeList = pHeap->nFreeAllocations / 4;
  1291. if (nLeaveOnFreeList < MIN_FREE_LIST_LENGTH)
  1292. nLeaveOnFreeList = MIN_FREE_LIST_LENGTH;
  1293. RtlpDebugPageHeapCoalesceFreeIntoAvailable( pHeap, nLeaveOnFreeList );
  1294. pAvail = RtlpDebugPageHeapSearchAvailableMemListForBestFit(
  1295. pHeap,
  1296. nSize,
  1297. pPrevAvailNode
  1298. );
  1299. }
  1300. if (( pAvail == NULL ) && ( bGrowVirtual )) {
  1301. //
  1302. // After coalescing FreeList into AvailableList, still don't have
  1303. // enough memory (large enough block) to satisfy request, so we
  1304. // need to allocate more VM.
  1305. //
  1306. if (RtlpDebugPageHeapGrowVirtual( pHeap, nSize )) {
  1307. pAvail = RtlpDebugPageHeapSearchAvailableMemListForBestFit(
  1308. pHeap,
  1309. nSize,
  1310. pPrevAvailNode
  1311. );
  1312. if (pAvail == NULL) {
  1313. //
  1314. // Failed to satisfy request with more VM. If remainder
  1315. // of free list combined with available list is larger
  1316. // than the request, we might still be able to satisfy
  1317. // the request by merging all of the free list onto the
  1318. // available list. Note we lose our MIN_FREE_LIST_LENGTH
  1319. // reference-after-freed insurance in this case, but it
  1320. // is a rare case, and we'd prefer to satisfy the allocation.
  1321. //
  1322. if (( pHeap->nFreeAllocationBytesCommitted +
  1323. pHeap->nAvailableAllocationBytesCommitted ) >= nSize) {
  1324. RtlpDebugPageHeapCoalesceFreeIntoAvailable( pHeap, 0 );
  1325. pAvail = RtlpDebugPageHeapSearchAvailableMemListForBestFit(
  1326. pHeap,
  1327. nSize,
  1328. pPrevAvailNode
  1329. );
  1330. }
  1331. }
  1332. }
  1333. }
  1334. //
  1335. // If we use uncommitted ranges we need to commit the memory
  1336. // range now. Note that the memory will be committed but
  1337. // the protection on it will be N/A.
  1338. //
  1339. if (pAvail && (pHeap->ExtraFlags & PAGE_HEAP_SMART_MEMORY_USAGE)) {
  1340. BOOLEAN Success;
  1341. //
  1342. // (SilviuC): The memory here might be already committed if we use
  1343. // it for the first time. Whenever we allocate virtual memory to grow
  1344. // the heap we commit it. This is the reason the consumption does not
  1345. // decrease as spectacular as we expected. We will need to fix it in
  1346. // the future. It affects 0x43 flags.
  1347. //
  1348. Success = RtlpDebugPageHeapCommitVM (pAvail->pVirtualBlock, nSize);
  1349. if (!Success) {
  1350. //
  1351. // We did not manage to commit memory for this block. This
  1352. // can happen in low memory conditions. We will return null.
  1353. // There is no need to do anything with the node we obtained.
  1354. // It is already in the Available list where it should be anyway.
  1355. return NULL;
  1356. }
  1357. }
  1358. return pAvail;
  1359. }
  1360. VOID
  1361. RtlpDebugPageHeapPlaceOnPoolList(
  1362. IN PDPH_HEAP_ROOT pHeap,
  1363. IN PDPH_HEAP_BLOCK pNode
  1364. )
  1365. {
  1366. //
  1367. // NodePoolList is FIFO.
  1368. //
  1369. pNode->pNextAlloc = NULL;
  1370. ENQUEUE_TAIL( pNode, pHeap->pNodePoolListHead, pHeap->pNodePoolListTail );
  1371. pHeap->nNodePoolBytes += pNode->nVirtualBlockSize;
  1372. pHeap->nNodePools += 1;
  1373. }
  1374. VOID
  1375. RtlpDebugPageHeapAddNewPool(
  1376. IN PDPH_HEAP_ROOT pHeap,
  1377. IN PVOID pVirtual,
  1378. IN SIZE_T nSize,
  1379. IN BOOLEAN bAddToPoolList
  1380. )
  1381. {
  1382. PDPH_HEAP_BLOCK pNode, pFirst;
  1383. ULONG n, nCount;
  1384. //
  1385. // Assume pVirtual points to committed block of nSize bytes.
  1386. //
  1387. pFirst = pVirtual;
  1388. nCount = (ULONG)(nSize / sizeof( DPH_HEAP_BLOCK ));
  1389. for (n = nCount - 1, pNode = pFirst; n > 0; pNode++, n--)
  1390. pNode->pNextAlloc = pNode + 1;
  1391. pNode->pNextAlloc = NULL;
  1392. //
  1393. // Now link this list into the tail of the UnusedNodeList
  1394. //
  1395. ENQUEUE_TAIL( pFirst, pHeap->pUnusedNodeListHead, pHeap->pUnusedNodeListTail );
  1396. pHeap->pUnusedNodeListTail = pNode;
  1397. pHeap->nUnusedNodes += nCount;
  1398. if (bAddToPoolList) {
  1399. //
  1400. // Now add an entry on the PoolList by taking a node from the
  1401. // UnusedNodeList, which should be guaranteed to be non-empty
  1402. // since we just added new nodes to it.
  1403. //
  1404. pNode = RtlpDebugPageHeapTakeNodeFromUnusedList( pHeap );
  1405. DEBUG_ASSERT( pNode != NULL );
  1406. pNode->pVirtualBlock = pVirtual;
  1407. pNode->nVirtualBlockSize = nSize;
  1408. RtlpDebugPageHeapPlaceOnPoolList( pHeap, pNode );
  1409. }
  1410. }
  1411. PDPH_HEAP_BLOCK
  1412. RtlpDebugPageHeapAllocateNode(
  1413. IN PDPH_HEAP_ROOT pHeap
  1414. )
  1415. {
  1416. PDPH_HEAP_BLOCK pNode, pPrev, pReturn;
  1417. PUCHAR pVirtual;
  1418. SIZE_T nVirtual;
  1419. SIZE_T nRequest;
  1420. DEBUG_ASSERT( ! pHeap->InsideAllocateNode );
  1421. DEBUG_CODE( pHeap->InsideAllocateNode = TRUE );
  1422. pReturn = NULL;
  1423. if (pHeap->pUnusedNodeListHead == NULL) {
  1424. //
  1425. // We're out of nodes -- allocate new node pool
  1426. // from AvailableList. Set bGrowVirtual to FALSE
  1427. // since growing virtual will require new nodes, causing
  1428. // recursion. Note that simply calling FindAvailableMem
  1429. // might return some nodes to the pUnusedNodeList, even if
  1430. // the call fails, so we'll check that the UnusedNodeList
  1431. // is still empty before we try to use or allocate more
  1432. // memory.
  1433. //
  1434. nRequest = POOL_SIZE;
  1435. pNode = RtlpDebugPageHeapFindAvailableMem(
  1436. pHeap,
  1437. nRequest,
  1438. &pPrev,
  1439. FALSE
  1440. );
  1441. if (( pHeap->pUnusedNodeListHead == NULL ) && ( pNode == NULL )) {
  1442. //
  1443. // Reduce request size to PAGE_SIZE and see if
  1444. // we can find at least a page on the available
  1445. // list.
  1446. //
  1447. nRequest = PAGE_SIZE;
  1448. pNode = RtlpDebugPageHeapFindAvailableMem(
  1449. pHeap,
  1450. nRequest,
  1451. &pPrev,
  1452. FALSE
  1453. );
  1454. }
  1455. if (pHeap->pUnusedNodeListHead == NULL) {
  1456. if (pNode == NULL) {
  1457. //
  1458. // Insufficient memory on Available list. Try allocating a
  1459. // new virtual block.
  1460. //
  1461. nRequest = POOL_SIZE;
  1462. nVirtual = RESERVE_SIZE;
  1463. pVirtual = RtlpDebugPageHeapAllocateVM( nVirtual );
  1464. if (pVirtual == NULL) {
  1465. //
  1466. // Unable to allocate full RESERVE_SIZE block,
  1467. // so reduce request to single VM unit (64K)
  1468. // and try again.
  1469. //
  1470. nVirtual = VM_UNIT_SIZE;
  1471. pVirtual = RtlpDebugPageHeapAllocateVM( nVirtual );
  1472. if (pVirtual == NULL) {
  1473. //
  1474. // Can't allocate any VM.
  1475. //
  1476. goto EXIT;
  1477. }
  1478. }
  1479. }
  1480. else {
  1481. RtlpDebugPageHeapRemoveFromAvailableList( pHeap, pNode, pPrev );
  1482. pVirtual = pNode->pVirtualBlock;
  1483. nVirtual = pNode->nVirtualBlockSize;
  1484. }
  1485. //
  1486. // We now have allocated VM referenced by pVirtual,nVirtual.
  1487. // Make nRequest portion of VM accessible for new node pool.
  1488. //
  1489. if (! RtlpDebugPageHeapProtectVM( pVirtual, nRequest, PAGE_READWRITE )) {
  1490. if (pNode == NULL) {
  1491. RtlpDebugPageHeapReleaseVM( pVirtual );
  1492. }
  1493. else {
  1494. RtlpDebugPageHeapCoalesceNodeIntoAvailable( pHeap, pNode );
  1495. }
  1496. goto EXIT;
  1497. }
  1498. //
  1499. // Now we have accessible memory for new pool. Add the
  1500. // new memory to the pool. If the new memory came from
  1501. // AvailableList versus fresh VM, zero the memory first.
  1502. //
  1503. if (pNode != NULL) {
  1504. RtlZeroMemory( pVirtual, nRequest );
  1505. }
  1506. RtlpDebugPageHeapAddNewPool( pHeap, pVirtual, nRequest, TRUE );
  1507. //
  1508. // If any memory remaining, put it on available list.
  1509. //
  1510. if (pNode == NULL) {
  1511. //
  1512. // Memory came from new VM -- add appropriate list entries
  1513. // for new VM and add remainder of VM to free list.
  1514. //
  1515. pNode = RtlpDebugPageHeapTakeNodeFromUnusedList( pHeap );
  1516. DEBUG_ASSERT( pNode != NULL );
  1517. pNode->pVirtualBlock = pVirtual;
  1518. pNode->nVirtualBlockSize = nVirtual;
  1519. RtlpDebugPageHeapPlaceOnVirtualList( pHeap, pNode );
  1520. pNode = RtlpDebugPageHeapTakeNodeFromUnusedList( pHeap );
  1521. DEBUG_ASSERT( pNode != NULL );
  1522. pNode->pVirtualBlock = pVirtual + nRequest;
  1523. pNode->nVirtualBlockSize = nVirtual - nRequest;
  1524. RtlpDebugPageHeapCoalesceNodeIntoAvailable( pHeap, pNode );
  1525. }
  1526. else {
  1527. if (pNode->nVirtualBlockSize > nRequest) {
  1528. pNode->pVirtualBlock += nRequest;
  1529. pNode->nVirtualBlockSize -= nRequest;
  1530. RtlpDebugPageHeapCoalesceNodeIntoAvailable( pHeap, pNode );
  1531. }
  1532. else {
  1533. //
  1534. // Used up entire available block -- return node to
  1535. // unused list.
  1536. //
  1537. RtlpDebugPageHeapReturnNodeToUnusedList( pHeap, pNode );
  1538. }
  1539. }
  1540. }
  1541. }
  1542. pReturn = RtlpDebugPageHeapTakeNodeFromUnusedList( pHeap );
  1543. DEBUG_ASSERT( pReturn != NULL );
  1544. EXIT:
  1545. DEBUG_CODE( pHeap->InsideAllocateNode = FALSE );
  1546. return pReturn;
  1547. }
  1548. BOOLEAN
  1549. RtlpDebugPageHeapGrowVirtual(
  1550. IN PDPH_HEAP_ROOT pHeap,
  1551. IN SIZE_T nSize
  1552. )
  1553. {
  1554. PDPH_HEAP_BLOCK pVirtualNode;
  1555. PDPH_HEAP_BLOCK pAvailNode;
  1556. PVOID pVirtual;
  1557. SIZE_T nVirtual;
  1558. pVirtualNode = RtlpDebugPageHeapAllocateNode( pHeap );
  1559. if (pVirtualNode == NULL) {
  1560. return FALSE;
  1561. }
  1562. pAvailNode = RtlpDebugPageHeapAllocateNode( pHeap );
  1563. if (pAvailNode == NULL) {
  1564. RtlpDebugPageHeapReturnNodeToUnusedList( pHeap, pVirtualNode );
  1565. return FALSE;
  1566. }
  1567. nSize = ROUNDUP2( nSize, VM_UNIT_SIZE );
  1568. nVirtual = ( nSize > RESERVE_SIZE ) ? nSize : RESERVE_SIZE;
  1569. pVirtual = RtlpDebugPageHeapAllocateVM( nVirtual );
  1570. if (( pVirtual == NULL ) && ( nSize < RESERVE_SIZE )) {
  1571. nVirtual = nSize;
  1572. pVirtual = RtlpDebugPageHeapAllocateVM( nVirtual );
  1573. }
  1574. if (pVirtual == NULL) {
  1575. RtlpDebugPageHeapReturnNodeToUnusedList( pHeap, pVirtualNode );
  1576. RtlpDebugPageHeapReturnNodeToUnusedList( pHeap, pAvailNode );
  1577. return FALSE;
  1578. }
  1579. pVirtualNode->pVirtualBlock = pVirtual;
  1580. pVirtualNode->nVirtualBlockSize = nVirtual;
  1581. RtlpDebugPageHeapPlaceOnVirtualList( pHeap, pVirtualNode );
  1582. pAvailNode->pVirtualBlock = pVirtual;
  1583. pAvailNode->nVirtualBlockSize = nVirtual;
  1584. RtlpDebugPageHeapCoalesceNodeIntoAvailable( pHeap, pAvailNode );
  1585. return TRUE;
  1586. }
  1587. VOID
  1588. RtlpDebugPageHeapProtectStructures(
  1589. IN PDPH_HEAP_ROOT pHeap
  1590. )
  1591. {
  1592. #if 0
  1593. PDPH_HEAP_BLOCK pNode;
  1594. //
  1595. // Assume CritSect is owned so we're the only thread twiddling
  1596. // the protection.
  1597. //
  1598. DEBUG_ASSERT( pHeap->HeapFlags & HEAP_PROTECTION_ENABLED );
  1599. if (--pHeap->nUnProtectionReferenceCount == 0) {
  1600. pNode = pHeap->pNodePoolListHead;
  1601. while (pNode != NULL) {
  1602. RtlpDebugPageHeapProtectVM( pNode->pVirtualBlock,
  1603. pNode->nVirtualBlockSize,
  1604. PAGE_READONLY );
  1605. pNode = pNode->pNextAlloc;
  1606. }
  1607. }
  1608. //
  1609. // Protect the main NT heap structure associated with page heap.
  1610. // Nobody should touch this outside of page heap code paths.
  1611. //
  1612. RtlpDebugPageHeapProtectVM (pHeap->NormalHeap,
  1613. PAGE_SIZE,
  1614. PAGE_READONLY);
  1615. #endif
  1616. }
  1617. VOID
  1618. RtlpDebugPageHeapUnProtectStructures(
  1619. IN PDPH_HEAP_ROOT pHeap
  1620. )
  1621. {
  1622. #if 0
  1623. PDPH_HEAP_BLOCK pNode;
  1624. DEBUG_ASSERT( pHeap->HeapFlags & HEAP_PROTECTION_ENABLED );
  1625. if (pHeap->nUnProtectionReferenceCount == 0) {
  1626. pNode = pHeap->pNodePoolListHead;
  1627. while (pNode != NULL) {
  1628. RtlpDebugPageHeapProtectVM( pNode->pVirtualBlock,
  1629. pNode->nVirtualBlockSize,
  1630. PAGE_READWRITE );
  1631. pNode = pNode->pNextAlloc;
  1632. }
  1633. }
  1634. //
  1635. // Unprotect the main NT heap structure associatied with page heap.
  1636. //
  1637. RtlpDebugPageHeapProtectVM (pHeap->NormalHeap,
  1638. PAGE_SIZE,
  1639. PAGE_READWRITE);
  1640. pHeap->nUnProtectionReferenceCount += 1;
  1641. #endif
  1642. }
  1643. VOID
  1644. RtlpDphPreProcessing (
  1645. PDPH_HEAP_ROOT Heap,
  1646. ULONG Flags
  1647. )
  1648. {
  1649. RtlpDebugPageHeapEnterCritSect (Heap, Flags);
  1650. DEBUG_CODE (RtlpDebugPageHeapVerifyIntegrity (Heap));
  1651. UNPROTECT_HEAP_STRUCTURES (Heap);
  1652. #if 0
  1653. RtlpDphValidateInternalLists (Heap);
  1654. #endif
  1655. }
  1656. VOID
  1657. RtlpDphPostProcessing (
  1658. PDPH_HEAP_ROOT Heap
  1659. )
  1660. {
  1661. #if 0
  1662. RtlpDphValidateInternalLists (Heap);
  1663. #endif
  1664. PROTECT_HEAP_STRUCTURES (Heap);
  1665. DEBUG_CODE (RtlpDebugPageHeapVerifyIntegrity (Heap));
  1666. RtlpDebugPageHeapLeaveCritSect (Heap);
  1667. }
  1668. /////////////////////////////////////////////////////////////////////
  1669. //////////////////////////////////////////////// Exception management
  1670. /////////////////////////////////////////////////////////////////////
  1671. #define EXN_STACK_OVERFLOW 0
  1672. #define EXN_NO_MEMORY 1
  1673. #define EXN_ACCESS_VIOLATION 2
  1674. #define EXN_IGNORE_AV 3
  1675. #define EXN_OTHER 4
  1676. ULONG RtlpDphException[8];
  1677. ULONG
  1678. RtlpDphUnexpectedExceptionFilter (
  1679. ULONG ExceptionCode,
  1680. PVOID ExceptionRecord,
  1681. PDPH_HEAP_ROOT Heap,
  1682. BOOLEAN IgnoreAccessViolations
  1683. )
  1684. /*++
  1685. Routine Description:
  1686. This routine is the exception filter used by page heap operations. The role
  1687. of the function is to bring the page heap in a consistent state (unlock
  1688. heap lock, protect page heap metadata, etc.) if an exception has been raised.
  1689. The exception can be raised for legitimate reasons (e.g. STATUS_NO_MEMORY
  1690. from HeapAlloc()) or because there is some sort of corruption.
  1691. Legitimate exceptions do not cause breaks but an unrecognized exception will
  1692. cause a break. The break is continuable at least with respect to page heap.
  1693. Arguments:
  1694. ExceptionCode - exception code
  1695. ExceptionRecord - structure with pointers to .exr and .cxr
  1696. Heap - heap in which code was executing at the time of exception
  1697. IgnoreAccessViolations - sometimes we want to ignore this (e.g. HeapSize).
  1698. Return Value:
  1699. Always EXCEPTION_CONTINUE_SEARCH. The philosophy of this exception filter
  1700. function is that if we get an exception we bring back page heap in a consistent
  1701. state and then let the exception go to the next exception handler.
  1702. Environment:
  1703. Called within page heap APIs if an exception is raised.
  1704. --*/
  1705. {
  1706. if (ExceptionCode == STATUS_NO_MEMORY) {
  1707. //
  1708. // Underlying NT heap functions can legitimately raise this
  1709. // exception.
  1710. //
  1711. InterlockedIncrement (&(RtlpDphException[EXN_NO_MEMORY]));
  1712. }
  1713. else if (Heap != NULL && ExceptionCode == STATUS_STACK_OVERFLOW) {
  1714. //
  1715. // We go to the next exception handler for stack overflows.
  1716. //
  1717. InterlockedIncrement (&(RtlpDphException[EXN_STACK_OVERFLOW]));
  1718. }
  1719. else if (ExceptionCode == STATUS_ACCESS_VIOLATION) {
  1720. if (IgnoreAccessViolations == FALSE) {
  1721. VERIFIER_STOP (APPLICATION_VERIFIER_UNEXPECTED_EXCEPTION,
  1722. "unexpected exception raised in heap code path",
  1723. Heap, "Heap handle involved",
  1724. ExceptionCode, "Exception code",
  1725. ExceptionRecord, "Exception record (.exr on 1st word, .cxr on 2nd word)",
  1726. 0, "");
  1727. InterlockedIncrement (&(RtlpDphException[EXN_ACCESS_VIOLATION]));
  1728. }
  1729. else {
  1730. InterlockedIncrement (&(RtlpDphException[EXN_IGNORE_AV]));
  1731. }
  1732. }
  1733. else {
  1734. //
  1735. // Any other exceptions will go to the next exception handler.
  1736. //
  1737. InterlockedIncrement (&(RtlpDphException[EXN_OTHER]));
  1738. }
  1739. RtlpDphPostProcessing (Heap);
  1740. return EXCEPTION_CONTINUE_SEARCH;
  1741. }
  1742. #if DBG
  1743. #define ASSERT_UNEXPECTED_CODE_PATH() ASSERT(0 && "unexpected code path")
  1744. #else
  1745. #define ASSERT_UNEXPECTED_CODE_PATH()
  1746. #endif
  1747. /////////////////////////////////////////////////////////////////////
  1748. //////////////////////////////////////////// Internal debug functions
  1749. /////////////////////////////////////////////////////////////////////
  1750. #if INTERNAL_DEBUG
  1751. VOID
  1752. RtlpDebugPageHeapVerifyList(
  1753. IN PDPH_HEAP_BLOCK pListHead,
  1754. IN PDPH_HEAP_BLOCK pListTail,
  1755. IN SIZE_T nExpectedLength,
  1756. IN SIZE_T nExpectedVirtual,
  1757. IN PCCH pListName
  1758. )
  1759. {
  1760. PDPH_HEAP_BLOCK pPrev = NULL;
  1761. PDPH_HEAP_BLOCK pNode = pListHead;
  1762. PDPH_HEAP_BLOCK pTest = pListHead ? pListHead->pNextAlloc : NULL;
  1763. ULONG nNode = 0;
  1764. SIZE_T nSize = 0;
  1765. while (pNode) {
  1766. if (pNode == pTest) {
  1767. DbgPrint( "Page heap: Internal %s list is circular\n", pListName );
  1768. DbgBreakPoint ();
  1769. return;
  1770. }
  1771. nNode += 1;
  1772. nSize += pNode->nVirtualBlockSize;
  1773. if (pTest) {
  1774. pTest = pTest->pNextAlloc;
  1775. if (pTest) {
  1776. pTest = pTest->pNextAlloc;
  1777. }
  1778. }
  1779. pPrev = pNode;
  1780. pNode = pNode->pNextAlloc;
  1781. }
  1782. if (pPrev != pListTail) {
  1783. DbgPrint( "Page heap: Internal %s list has incorrect tail pointer\n", pListName );
  1784. DbgBreakPoint ();
  1785. }
  1786. if (( nExpectedLength != 0xFFFFFFFF ) && ( nExpectedLength != nNode )) {
  1787. DbgPrint( "Page heap: Internal %s list has incorrect length\n", pListName );
  1788. DbgBreakPoint ();
  1789. }
  1790. if (( nExpectedVirtual != 0xFFFFFFFF ) && ( nExpectedVirtual != nSize )) {
  1791. DbgPrint( "Page heap: Internal %s list has incorrect virtual size\n", pListName );
  1792. DbgBreakPoint ();
  1793. }
  1794. }
  1795. VOID
  1796. RtlpDebugPageHeapVerifyIntegrity(
  1797. IN PDPH_HEAP_ROOT pHeap
  1798. )
  1799. {
  1800. RtlpDebugPageHeapVerifyList(
  1801. pHeap->pVirtualStorageListHead,
  1802. pHeap->pVirtualStorageListTail,
  1803. pHeap->nVirtualStorageRanges,
  1804. pHeap->nVirtualStorageBytes,
  1805. "VIRTUAL"
  1806. );
  1807. RtlpDebugPageHeapVerifyList(
  1808. pHeap->pBusyAllocationListHead,
  1809. pHeap->pBusyAllocationListTail,
  1810. pHeap->nBusyAllocations,
  1811. pHeap->nBusyAllocationBytesCommitted,
  1812. "BUSY"
  1813. );
  1814. RtlpDebugPageHeapVerifyList(
  1815. pHeap->pFreeAllocationListHead,
  1816. pHeap->pFreeAllocationListTail,
  1817. pHeap->nFreeAllocations,
  1818. pHeap->nFreeAllocationBytesCommitted,
  1819. "FREE"
  1820. );
  1821. RtlpDebugPageHeapVerifyList(
  1822. pHeap->pAvailableAllocationListHead,
  1823. pHeap->pAvailableAllocationListTail,
  1824. pHeap->nAvailableAllocations,
  1825. pHeap->nAvailableAllocationBytesCommitted,
  1826. "AVAILABLE"
  1827. );
  1828. RtlpDebugPageHeapVerifyList(
  1829. pHeap->pUnusedNodeListHead,
  1830. pHeap->pUnusedNodeListTail,
  1831. pHeap->nUnusedNodes,
  1832. 0xFFFFFFFF,
  1833. "FREENODE"
  1834. );
  1835. RtlpDebugPageHeapVerifyList(
  1836. pHeap->pNodePoolListHead,
  1837. pHeap->pNodePoolListTail,
  1838. pHeap->nNodePools,
  1839. pHeap->nNodePoolBytes,
  1840. "NODEPOOL"
  1841. );
  1842. }
  1843. #endif // #if INTERNAL_DEBUG
  1844. /////////////////////////////////////////////////////////////////////
  1845. ///////////////////////////// Exported page heap management functions
  1846. /////////////////////////////////////////////////////////////////////
  1847. //
  1848. // Here's where the exported interface functions are defined.
  1849. //
  1850. #if (( DPH_CAPTURE_STACK_TRACE ) && ( i386 ) && ( FPO ))
  1851. #pragma optimize( "y", off ) // disable FPO for consistent stack traces
  1852. #endif
  1853. PVOID
  1854. RtlpDebugPageHeapCreate(
  1855. IN ULONG Flags,
  1856. IN PVOID HeapBase OPTIONAL,
  1857. IN SIZE_T ReserveSize OPTIONAL,
  1858. IN SIZE_T CommitSize OPTIONAL,
  1859. IN PVOID Lock OPTIONAL,
  1860. IN PRTL_HEAP_PARAMETERS Parameters OPTIONAL
  1861. )
  1862. {
  1863. SYSTEM_BASIC_INFORMATION SystemInfo;
  1864. PDPH_HEAP_BLOCK Node;
  1865. PDPH_HEAP_ROOT HeapRoot;
  1866. PVOID HeapHandle;
  1867. PUCHAR pVirtual;
  1868. SIZE_T nVirtual;
  1869. SIZE_T Size;
  1870. NTSTATUS Status;
  1871. //
  1872. // If `Parameters' is -1 then this is a recursive call to
  1873. // RtlpDebugPageHeapCreate and we will return NULL so that
  1874. // the normal heap manager will create a normal heap.
  1875. // I agree this is a hack but we need this so that we maintain
  1876. // a very loose dependency between the normal and page heap
  1877. // manager.
  1878. //
  1879. if ((SIZE_T)Parameters == (SIZE_T)-1) {
  1880. return NULL;
  1881. }
  1882. //
  1883. // We don't handle heaps where HeapBase is already allocated
  1884. // from user or where Lock is provided by user.
  1885. //
  1886. DEBUG_ASSERT( HeapBase == NULL );
  1887. DEBUG_ASSERT( Lock == NULL );
  1888. if (( HeapBase != NULL ) || ( Lock != NULL ))
  1889. return NULL;
  1890. //
  1891. // Note that we simply ignore ReserveSize, CommitSize, and
  1892. // Parameters as we always have a growable heap with our
  1893. // own thresholds, etc.
  1894. //
  1895. ZwQuerySystemInformation( SystemBasicInformation,
  1896. &SystemInfo,
  1897. sizeof( SystemInfo ),
  1898. NULL );
  1899. RETAIL_ASSERT( SystemInfo.PageSize == PAGE_SIZE );
  1900. RETAIL_ASSERT( SystemInfo.AllocationGranularity == VM_UNIT_SIZE );
  1901. DEBUG_ASSERT(( PAGE_SIZE + POOL_SIZE + PAGE_SIZE ) < VM_UNIT_SIZE );
  1902. nVirtual = RESERVE_SIZE;
  1903. pVirtual = RtlpDebugPageHeapAllocateVM( nVirtual );
  1904. if (pVirtual == NULL) {
  1905. nVirtual = VM_UNIT_SIZE;
  1906. pVirtual = RtlpDebugPageHeapAllocateVM( nVirtual );
  1907. if (pVirtual == NULL) {
  1908. OUT_OF_VM_BREAK( Flags, "Page heap: Insufficient memory to create heap\n" );
  1909. IF_GENERATE_EXCEPTION( Flags, STATUS_NO_MEMORY );
  1910. return NULL;
  1911. }
  1912. }
  1913. if (! RtlpDebugPageHeapProtectVM( pVirtual, PAGE_SIZE + POOL_SIZE + PAGE_SIZE, PAGE_READWRITE )) {
  1914. RtlpDebugPageHeapReleaseVM( pVirtual );
  1915. IF_GENERATE_EXCEPTION( Flags, STATUS_NO_MEMORY );
  1916. return NULL;
  1917. }
  1918. //
  1919. // Out of our initial allocation, the initial page is the fake
  1920. // retail HEAP structure. The second page begins our DPH_HEAP
  1921. // structure followed by (POOL_SIZE-sizeof(DPH_HEAP)) bytes for
  1922. // the initial pool. The next page contains out CRIT_SECT
  1923. // variable, which must always be READWRITE. Beyond that, the
  1924. // remainder of the virtual allocation is placed on the available
  1925. // list.
  1926. //
  1927. // |_____|___________________|_____|__ _ _ _ _ _ _ _ _ _ _ _ _ __|
  1928. //
  1929. // ^pVirtual
  1930. //
  1931. // ^FakeRetailHEAP
  1932. //
  1933. // ^HeapRoot
  1934. //
  1935. // ^InitialNodePool
  1936. //
  1937. // ^CRITICAL_SECTION
  1938. //
  1939. // ^AvailableSpace
  1940. //
  1941. //
  1942. //
  1943. // Our DPH_HEAP structure starts at the page following the
  1944. // fake retail HEAP structure pointed to by the "heap handle".
  1945. // For the fake HEAP structure, we'll fill it with 0xEEEEEEEE
  1946. // except for the Heap->Flags and Heap->ForceFlags fields,
  1947. // which we must set to include our HEAP_FLAG_PAGE_ALLOCS flag,
  1948. // and then we'll make the whole page read-only.
  1949. //
  1950. RtlFillMemory( pVirtual, PAGE_SIZE, FILL_BYTE );
  1951. ((PHEAP)pVirtual)->Flags = Flags | HEAP_FLAG_PAGE_ALLOCS;
  1952. ((PHEAP)pVirtual)->ForceFlags = Flags | HEAP_FLAG_PAGE_ALLOCS;
  1953. if (! RtlpDebugPageHeapProtectVM( pVirtual, PAGE_SIZE, PAGE_READONLY )) {
  1954. RtlpDebugPageHeapReleaseVM( pVirtual );
  1955. IF_GENERATE_EXCEPTION( Flags, STATUS_NO_MEMORY );
  1956. return NULL;
  1957. }
  1958. HeapRoot = (PDPH_HEAP_ROOT)( pVirtual + PAGE_SIZE );
  1959. HeapRoot->Signature = DPH_HEAP_SIGNATURE;
  1960. HeapRoot->HeapFlags = Flags;
  1961. HeapRoot->HeapCritSect = (PVOID)((PCHAR)HeapRoot + POOL_SIZE );
  1962. //
  1963. // Copy the page heap global flags into per heap flags.
  1964. //
  1965. HeapRoot->ExtraFlags = RtlpDphGlobalFlags;
  1966. //
  1967. // If page heap meta data protection was requested we transfer
  1968. // the bit into the HeapFlags field.
  1969. //
  1970. if ((HeapRoot->ExtraFlags & PAGE_HEAP_PROTECT_META_DATA)) {
  1971. HeapRoot->HeapFlags |= HEAP_PROTECTION_ENABLED;
  1972. }
  1973. //
  1974. // If the PAGE_HEAP_UNALIGNED_ALLOCATIONS bit is set
  1975. // in ExtraFlags we will set the HEAP_NO_ALIGNMENT flag
  1976. // in the HeapFlags. This last bit controls if allocations
  1977. // will be aligned or not. The reason we do this transfer is
  1978. // that ExtraFlags can be set from the registry whereas the
  1979. // normal HeapFlags cannot.
  1980. //
  1981. if ((HeapRoot->ExtraFlags & PAGE_HEAP_UNALIGNED_ALLOCATIONS)) {
  1982. HeapRoot->HeapFlags |= HEAP_NO_ALIGNMENT;
  1983. }
  1984. //
  1985. // Initialize the seed for the random generator used to decide
  1986. // from where should we make allocations if random decision
  1987. // flag is on.
  1988. //
  1989. {
  1990. LARGE_INTEGER PerformanceCounter;
  1991. PerformanceCounter.LowPart = 0xABCDDCBA;
  1992. NtQueryPerformanceCounter (
  1993. &PerformanceCounter,
  1994. NULL);
  1995. HeapRoot->Seed = PerformanceCounter.LowPart;
  1996. }
  1997. //
  1998. // Create the normal heap associated with the page heap.
  1999. // The last parameter value (-1) is very important because
  2000. // it stops the recursive call into page heap create.
  2001. //
  2002. // Note that it is very important to reset the NO_SERIALIZE
  2003. // bit because normal heap operations can happen in random
  2004. // threads when the free delayed cache gets trimmed.
  2005. //
  2006. HeapRoot->NormalHeap = RtlCreateHeap (
  2007. Flags & (~HEAP_NO_SERIALIZE),
  2008. HeapBase,
  2009. ReserveSize,
  2010. CommitSize,
  2011. Lock,
  2012. (PRTL_HEAP_PARAMETERS)-1 );
  2013. if (HeapRoot->NormalHeap == NULL) {
  2014. RtlpDebugPageHeapReleaseVM( pVirtual );
  2015. IF_GENERATE_EXCEPTION( Flags, STATUS_NO_MEMORY );
  2016. return NULL;
  2017. }
  2018. //
  2019. // Initialize heap lock.
  2020. //
  2021. RtlInitializeCriticalSection( HeapRoot->HeapCritSect );
  2022. //
  2023. // On the page that contains our DPH_HEAP structure, use
  2024. // the remaining memory beyond the DPH_HEAP structure as
  2025. // pool for allocating heap nodes.
  2026. //
  2027. RtlpDebugPageHeapAddNewPool( HeapRoot,
  2028. HeapRoot + 1,
  2029. POOL_SIZE - sizeof( DPH_HEAP_ROOT ),
  2030. FALSE
  2031. );
  2032. //
  2033. // Make initial PoolList entry by taking a node from the
  2034. // UnusedNodeList, which should be guaranteed to be non-empty
  2035. // since we just added new nodes to it.
  2036. //
  2037. Node = RtlpDebugPageHeapAllocateNode( HeapRoot );
  2038. DEBUG_ASSERT( Node != NULL );
  2039. Node->pVirtualBlock = (PVOID)HeapRoot;
  2040. Node->nVirtualBlockSize = POOL_SIZE;
  2041. RtlpDebugPageHeapPlaceOnPoolList( HeapRoot, Node );
  2042. //
  2043. // Make VirtualStorageList entry for initial VM allocation
  2044. //
  2045. Node = RtlpDebugPageHeapAllocateNode( HeapRoot );
  2046. DEBUG_ASSERT( Node != NULL );
  2047. Node->pVirtualBlock = pVirtual;
  2048. Node->nVirtualBlockSize = nVirtual;
  2049. RtlpDebugPageHeapPlaceOnVirtualList( HeapRoot, Node );
  2050. //
  2051. // Make AvailableList entry containing remainder of initial VM
  2052. // and add to (create) the AvailableList.
  2053. //
  2054. Node = RtlpDebugPageHeapAllocateNode( HeapRoot );
  2055. DEBUG_ASSERT( Node != NULL );
  2056. Node->pVirtualBlock = pVirtual + ( PAGE_SIZE + POOL_SIZE + PAGE_SIZE );
  2057. Node->nVirtualBlockSize = nVirtual - ( PAGE_SIZE + POOL_SIZE + PAGE_SIZE );
  2058. RtlpDebugPageHeapCoalesceNodeIntoAvailable( HeapRoot, Node );
  2059. //
  2060. // Get heap creation stack trace.
  2061. //
  2062. HeapRoot->CreateStackTrace = RtlpDphLogStackTrace(1);
  2063. //
  2064. // Initialize heap internal structure protection.
  2065. //
  2066. HeapRoot->nUnProtectionReferenceCount = 1; // initialize
  2067. //
  2068. // If this is the first heap creation in this process, then we
  2069. // need to initialize the process heap list critical section,
  2070. // the global delayed free queue for normal blocks and the
  2071. // trace database.
  2072. //
  2073. if (! RtlpDphHeapListHasBeenInitialized) {
  2074. RtlpDphHeapListHasBeenInitialized = TRUE;
  2075. RtlInitializeCriticalSection( &RtlpDphHeapListCriticalSection );
  2076. RtlpDphInitializeDelayedFreeQueue ();
  2077. //
  2078. // Do not make fuss if the trace database creation fails.
  2079. // This is something we can live with.
  2080. //
  2081. // The number of buckets is chosen to be a prime not too
  2082. // close to a power of two (Knuth says so). Three possible
  2083. // values are: 1567, 3089, 6263.
  2084. //
  2085. RtlpDphTraceDatabase = RtlTraceDatabaseCreate (
  2086. 6263,
  2087. RtlpDphTraceDatabaseMaximumSize,
  2088. 0,
  2089. 0,
  2090. NULL);
  2091. #if DBG
  2092. if (RtlpDphTraceDatabase == NULL) {
  2093. DbgPrint ("Page heap: warning: failed to create trace database for %p",
  2094. HeapRoot);
  2095. }
  2096. #endif
  2097. //
  2098. // Create the Unicode string containing the target dlls.
  2099. // If no target dlls have been specified the string will
  2100. // be initialized with the empty string.
  2101. //
  2102. RtlInitUnicodeString (
  2103. &RtlpDphTargetDllsUnicode,
  2104. RtlpDphTargetDlls);
  2105. //
  2106. // Initialize the target dlls logic
  2107. //
  2108. RtlpDphTargetDllsLogicInitialize ();
  2109. }
  2110. //
  2111. // Add this heap entry to the process heap linked list.
  2112. //
  2113. RtlEnterCriticalSection( &RtlpDphHeapListCriticalSection );
  2114. if (RtlpDphHeapListHead == NULL) {
  2115. RtlpDphHeapListHead = HeapRoot;
  2116. RtlpDphHeapListTail = HeapRoot;
  2117. }
  2118. else {
  2119. HeapRoot->pPrevHeapRoot = RtlpDphHeapListTail;
  2120. UNPROTECT_HEAP_STRUCTURES(RtlpDphHeapListTail);
  2121. RtlpDphHeapListTail->pNextHeapRoot = HeapRoot;
  2122. PROTECT_HEAP_STRUCTURES(RtlpDphHeapListTail);
  2123. RtlpDphHeapListTail = HeapRoot;
  2124. }
  2125. PROTECT_HEAP_STRUCTURES( HeapRoot ); // now protected
  2126. RtlpDphHeapListCount += 1;
  2127. RtlLeaveCriticalSection( &RtlpDphHeapListCriticalSection );
  2128. DEBUG_CODE( RtlpDebugPageHeapVerifyIntegrity( HeapRoot ));
  2129. #if 0 // ISSUE: SilviuC: use DbgPrintEx instead.
  2130. DbgPrint( "Page heap: process 0x%X created heap @ %p (%p, flags 0x%X)\n",
  2131. NtCurrentTeb()->ClientId.UniqueProcess,
  2132. HEAP_HANDLE_FROM_ROOT( HeapRoot ),
  2133. HeapRoot->NormalHeap,
  2134. HeapRoot->ExtraFlags);
  2135. #endif
  2136. if ((RtlpDphDebugLevel & DPH_DEBUG_INTERNAL_VALIDATION)) {
  2137. RtlpDphInternalValidatePageHeap (HeapRoot, NULL, 0);
  2138. }
  2139. return HEAP_HANDLE_FROM_ROOT( HeapRoot ); // same as pVirtual
  2140. }
  2141. PVOID
  2142. RtlpDebugPageHeapAllocate(
  2143. IN PVOID HeapHandle,
  2144. IN ULONG Flags,
  2145. IN SIZE_T Size
  2146. )
  2147. {
  2148. PDPH_HEAP_ROOT HeapRoot;
  2149. PDPH_HEAP_BLOCK pAvailNode;
  2150. PDPH_HEAP_BLOCK pPrevAvailNode;
  2151. PDPH_HEAP_BLOCK pBusyNode;
  2152. SIZE_T nBytesAllocate;
  2153. SIZE_T nBytesAccess;
  2154. SIZE_T nActual;
  2155. PVOID pVirtual;
  2156. PVOID pReturn;
  2157. PUCHAR pBlockHeader;
  2158. ULONG Reason;
  2159. BOOLEAN ForcePageHeap = FALSE;
  2160. //
  2161. // Reject extreme size requests.
  2162. //
  2163. #if defined(_IA64_)
  2164. if (Size > 0x8000000000000000) {
  2165. #else
  2166. if (Size > 0x80000000) {
  2167. #endif
  2168. VERIFIER_STOP (APPLICATION_VERIFIER_EXTREME_SIZE_REQUEST,
  2169. "extreme size request",
  2170. HeapHandle, "Heap handle",
  2171. Size, "Size requested",
  2172. 0, "",
  2173. 0, "");
  2174. return NULL;
  2175. }
  2176. //
  2177. // Check if it is time to do fault injection.
  2178. //
  2179. if (RtlpDphShouldFaultInject ()) {
  2180. return NULL;
  2181. }
  2182. //
  2183. // Check if we have a biased heap pointer which signals
  2184. // a forced page heap allocation (no normal heap).
  2185. //
  2186. if (IS_BIASED_POINTER(HeapHandle)) {
  2187. HeapHandle = UNBIAS_POINTER(HeapHandle);
  2188. ForcePageHeap = TRUE;
  2189. }
  2190. HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle );
  2191. if (HeapRoot == NULL)
  2192. return NULL;
  2193. //
  2194. // Get the heap lock, unprotect heap structures, etc.
  2195. //
  2196. RtlpDphPreProcessing (HeapRoot, Flags);
  2197. try {
  2198. //
  2199. // We cannot validate the heap when a forced allocation into page heap
  2200. // is requested due to accounting problems. Allocate is called in this way
  2201. // from ReAllocate while the old node (just about to be freed) is in limbo
  2202. // and is not accounted in any internal structure.
  2203. //
  2204. if ((RtlpDphDebugLevel & DPH_DEBUG_INTERNAL_VALIDATION) && !ForcePageHeap) {
  2205. RtlpDphInternalValidatePageHeap (HeapRoot, NULL, 0);
  2206. }
  2207. Flags |= HeapRoot->HeapFlags;
  2208. //
  2209. // Figure out if we need to minimize memory impact. This
  2210. // might trigger an allocation in the normal heap.
  2211. //
  2212. if (! ForcePageHeap) {
  2213. if (! (RtlpDphShouldAllocateInPageHeap (HeapRoot, Size))) {
  2214. pReturn = RtlpDphNormalHeapAllocate (
  2215. HeapRoot,
  2216. Flags,
  2217. Size);
  2218. goto EXIT;
  2219. }
  2220. }
  2221. //
  2222. // Check the heap a little bit on checked builds.
  2223. //
  2224. DEBUG_CODE( RtlpDebugPageHeapVerifyIntegrity( HeapRoot ));
  2225. pReturn = NULL;
  2226. //
  2227. // Validate requested size so we don't overflow
  2228. // while rounding up size computations. We do this
  2229. // after we've acquired the critsect so we can still
  2230. // catch serialization problems.
  2231. //
  2232. if (Size > 0x7FFF0000) {
  2233. OUT_OF_VM_BREAK( Flags, "Page heap: Invalid allocation size\n" );
  2234. goto EXIT;
  2235. }
  2236. //
  2237. // Determine number of pages needed for READWRITE portion
  2238. // of allocation and add an extra page for the NO_ACCESS
  2239. // memory beyond the READWRITE page(s).
  2240. //
  2241. nBytesAccess = ROUNDUP2( Size + sizeof(DPH_BLOCK_INFORMATION), PAGE_SIZE );
  2242. nBytesAllocate = nBytesAccess + PAGE_SIZE;
  2243. //
  2244. // RtlpDebugPageHeapFindAvailableMem will first attempt to satisfy
  2245. // the request from memory on the Available list. If that fails,
  2246. // it will coalesce some of the Free list memory into the Available
  2247. // list and try again. If that still fails, new VM is allocated and
  2248. // added to the Available list. If that fails, the function will
  2249. // finally give up and return NULL.
  2250. //
  2251. pAvailNode = RtlpDebugPageHeapFindAvailableMem(
  2252. HeapRoot,
  2253. nBytesAllocate,
  2254. &pPrevAvailNode,
  2255. TRUE
  2256. );
  2257. if (pAvailNode == NULL) {
  2258. OUT_OF_VM_BREAK( Flags, "Page heap: Unable to allocate virtual memory\n" );
  2259. goto EXIT;
  2260. }
  2261. //
  2262. // Now can't call AllocateNode until pAvailNode is
  2263. // adjusted and/or removed from Avail list since AllocateNode
  2264. // might adjust the Avail list.
  2265. //
  2266. pVirtual = pAvailNode->pVirtualBlock;
  2267. if (nBytesAccess > 0) {
  2268. if ((HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) {
  2269. if (! RtlpDebugPageHeapProtectVM( (PUCHAR)pVirtual + PAGE_SIZE, nBytesAccess, PAGE_READWRITE )) {
  2270. goto EXIT;
  2271. }
  2272. }
  2273. else {
  2274. if (! RtlpDebugPageHeapProtectVM( pVirtual, nBytesAccess, PAGE_READWRITE )) {
  2275. goto EXIT;
  2276. }
  2277. }
  2278. }
  2279. //
  2280. // If we use uncommitted ranges we need to decommit the protection
  2281. // page at the end. BAckward overruns flag disables smart memory
  2282. // usage flag.
  2283. //
  2284. if ((HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) {
  2285. // nothing
  2286. }
  2287. else {
  2288. if ((HeapRoot->ExtraFlags & PAGE_HEAP_SMART_MEMORY_USAGE)) {
  2289. RtlpDebugPageHeapDecommitVM (
  2290. (PCHAR)pVirtual + nBytesAccess,
  2291. PAGE_SIZE);
  2292. }
  2293. }
  2294. //
  2295. // pAvailNode (still on avail list) points to block large enough
  2296. // to satisfy request, but it might be large enough to split
  2297. // into two blocks -- one for request, remainder leave on
  2298. // avail list.
  2299. //
  2300. if (pAvailNode->nVirtualBlockSize > nBytesAllocate) {
  2301. //
  2302. // Adjust pVirtualBlock and nVirtualBlock size of existing
  2303. // node in avail list. The node will still be in correct
  2304. // address space order on the avail list. This saves having
  2305. // to remove and then re-add node to avail list. Note since
  2306. // we're changing sizes directly, we need to adjust the
  2307. // avail and busy list counters manually.
  2308. //
  2309. // Note: since we're leaving at least one page on the
  2310. // available list, we are guaranteed that AllocateNode
  2311. // will not fail.
  2312. //
  2313. pAvailNode->pVirtualBlock += nBytesAllocate;
  2314. pAvailNode->nVirtualBlockSize -= nBytesAllocate;
  2315. HeapRoot->nAvailableAllocationBytesCommitted -= nBytesAllocate;
  2316. pBusyNode = RtlpDebugPageHeapAllocateNode( HeapRoot );
  2317. DEBUG_ASSERT( pBusyNode != NULL );
  2318. pBusyNode->pVirtualBlock = pVirtual;
  2319. pBusyNode->nVirtualBlockSize = nBytesAllocate;
  2320. }
  2321. else {
  2322. //
  2323. // Entire avail block is needed, so simply remove it from avail list.
  2324. //
  2325. RtlpDebugPageHeapRemoveFromAvailableList( HeapRoot, pAvailNode, pPrevAvailNode );
  2326. pBusyNode = pAvailNode;
  2327. }
  2328. //
  2329. // Now pBusyNode points to our committed virtual block.
  2330. //
  2331. if (HeapRoot->HeapFlags & HEAP_NO_ALIGNMENT)
  2332. nActual = Size;
  2333. else
  2334. nActual = ROUNDUP2( Size, USER_ALIGNMENT );
  2335. pBusyNode->nVirtualAccessSize = nBytesAccess;
  2336. pBusyNode->nUserRequestedSize = Size;
  2337. pBusyNode->nUserActualSize = nActual;
  2338. if ((HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) {
  2339. pBusyNode->pUserAllocation = pBusyNode->pVirtualBlock
  2340. + PAGE_SIZE;
  2341. }
  2342. else {
  2343. pBusyNode->pUserAllocation = pBusyNode->pVirtualBlock
  2344. + pBusyNode->nVirtualAccessSize
  2345. - nActual;
  2346. }
  2347. pBusyNode->UserValue = NULL;
  2348. pBusyNode->UserFlags = Flags & HEAP_SETTABLE_USER_FLAGS;
  2349. //
  2350. // RtlpDebugPageHeapAllocate gets called from RtlDebugAllocateHeap,
  2351. // which gets called from RtlAllocateHeapSlowly, which gets called
  2352. // from RtlAllocateHeap. To keep from wasting lots of stack trace
  2353. // storage, we'll skip the bottom 3 entries, leaving RtlAllocateHeap
  2354. // as the first recorded entry.
  2355. //
  2356. if ((HeapRoot->ExtraFlags & PAGE_HEAP_COLLECT_STACK_TRACES)) {
  2357. pBusyNode->StackTrace = RtlpDphLogStackTrace(3);
  2358. if (pBusyNode->StackTrace) {
  2359. RtlTraceDatabaseLock (RtlpDphTraceDatabase);
  2360. pBusyNode->StackTrace->UserCount += 1;
  2361. pBusyNode->StackTrace->UserSize += pBusyNode->nUserRequestedSize;
  2362. pBusyNode->StackTrace->UserContext = HeapRoot;
  2363. RtlTraceDatabaseUnlock (RtlpDphTraceDatabase);
  2364. }
  2365. }
  2366. else {
  2367. pBusyNode->StackTrace = NULL;
  2368. }
  2369. RtlpDebugPageHeapPlaceOnBusyList( HeapRoot, pBusyNode );
  2370. pReturn = pBusyNode->pUserAllocation;
  2371. //
  2372. // For requests the specify HEAP_ZERO_MEMORY, we'll fill the
  2373. // user-requested portion of the block with zeros. For requests
  2374. // that don't specify HEAP_ZERO_MEMORY, we fill the whole user block
  2375. // with DPH_PAGE_BLOCK_INFIX.
  2376. //
  2377. if ((Flags & HEAP_ZERO_MEMORY)) {
  2378. RtlZeroMemory( pBusyNode->pUserAllocation, Size );
  2379. }
  2380. else {
  2381. RtlFillMemory( pBusyNode->pUserAllocation, Size, DPH_PAGE_BLOCK_INFIX);
  2382. }
  2383. if ((HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) {
  2384. // nothing
  2385. }
  2386. else {
  2387. RtlpDphWritePageHeapBlockInformation (
  2388. HeapRoot,
  2389. pBusyNode->pUserAllocation,
  2390. Size,
  2391. nBytesAccess);
  2392. }
  2393. }
  2394. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  2395. _exception_info(),
  2396. HeapRoot,
  2397. FALSE)) {
  2398. //
  2399. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  2400. //
  2401. ASSERT_UNEXPECTED_CODE_PATH ();
  2402. }
  2403. EXIT:
  2404. //
  2405. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  2406. //
  2407. RtlpDphPostProcessing (HeapRoot);
  2408. if (pReturn == NULL) {
  2409. IF_GENERATE_EXCEPTION (Flags, STATUS_NO_MEMORY);
  2410. }
  2411. return pReturn;
  2412. }
  2413. BOOLEAN
  2414. RtlpDebugPageHeapFree(
  2415. IN PVOID HeapHandle,
  2416. IN ULONG Flags,
  2417. IN PVOID Address
  2418. )
  2419. {
  2420. PDPH_HEAP_ROOT HeapRoot;
  2421. PDPH_HEAP_BLOCK Node, Prev;
  2422. BOOLEAN Success;
  2423. PCH p;
  2424. ULONG Reason;
  2425. //
  2426. // Skip over null frees. These are valid in C++.
  2427. //
  2428. if (Address == NULL) {
  2429. return TRUE;
  2430. }
  2431. HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle );
  2432. if (HeapRoot == NULL)
  2433. return FALSE;
  2434. //
  2435. // Get the heap lock, unprotect heap structures, etc.
  2436. //
  2437. RtlpDphPreProcessing (HeapRoot, Flags);
  2438. try {
  2439. if ((RtlpDphDebugLevel & DPH_DEBUG_INTERNAL_VALIDATION)) {
  2440. RtlpDphInternalValidatePageHeap (HeapRoot, NULL, 0);
  2441. }
  2442. Flags |= HeapRoot->HeapFlags;
  2443. Success = FALSE;
  2444. Node = RtlpDebugPageHeapFindBusyMem( HeapRoot, Address, &Prev );
  2445. if (Node == NULL) {
  2446. //
  2447. // No wonder we did not find the block in the page heap
  2448. // structures because the block was probably allocated
  2449. // from the normal heap. Or there is a real bug.
  2450. // If there is a bug NormalHeapFree will break into debugger.
  2451. //
  2452. Success = RtlpDphNormalHeapFree (
  2453. HeapRoot,
  2454. Flags,
  2455. Address);
  2456. goto EXIT;
  2457. }
  2458. //
  2459. // Check if there are any orphan critical sections in the block to be freed.
  2460. //
  2461. RtlpCheckForCriticalSectionsInMemoryRange (Address,
  2462. Node->nUserRequestedSize,
  2463. NULL);
  2464. //
  2465. // If tail was allocated, make sure filler not overwritten
  2466. //
  2467. if ((HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) {
  2468. if (Node->nVirtualAccessSize > 0) {
  2469. RtlpDebugPageHeapProtectVM( Node->pVirtualBlock + PAGE_SIZE,
  2470. Node->nVirtualAccessSize,
  2471. PAGE_NOACCESS );
  2472. }
  2473. }
  2474. else {
  2475. if (! (RtlpDphIsPageHeapBlock (HeapRoot, Address, &Reason, TRUE))) {
  2476. RtlpDphReportCorruptedBlock (HeapRoot,
  2477. DPH_CONTEXT_FULL_PAGE_HEAP_FREE,
  2478. Address,
  2479. Reason);
  2480. }
  2481. if (Node->nVirtualAccessSize > 0) {
  2482. //
  2483. // Mark the block as freed. The information is gone if we
  2484. // will decommit the region but will remain if smart memory
  2485. // flag is not set and can help debug failures.
  2486. //
  2487. {
  2488. PDPH_BLOCK_INFORMATION Info = (PDPH_BLOCK_INFORMATION)(Node->pUserAllocation);
  2489. Info -= 1;
  2490. Info->StartStamp -= 1;
  2491. Info->EndStamp -= 1;
  2492. }
  2493. RtlpDebugPageHeapProtectVM( Node->pVirtualBlock,
  2494. Node->nVirtualAccessSize,
  2495. PAGE_NOACCESS );
  2496. }
  2497. }
  2498. RtlpDebugPageHeapRemoveFromBusyList( HeapRoot, Node, Prev );
  2499. //
  2500. // If we use uncommitted ranges we need to decommit the memory
  2501. // range now for the allocation. Note that the next page (guard)
  2502. // was already decommitted when we allocated the block.
  2503. //
  2504. if ((HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) {
  2505. // nothing
  2506. }
  2507. else {
  2508. if ((HeapRoot->ExtraFlags & PAGE_HEAP_SMART_MEMORY_USAGE)) {
  2509. RtlpDebugPageHeapDecommitVM (
  2510. Node->pVirtualBlock,
  2511. Node->nVirtualAccessSize);
  2512. }
  2513. }
  2514. RtlpDebugPageHeapPlaceOnFreeList( HeapRoot, Node );
  2515. //
  2516. // RtlpDebugPageHeapFree gets called from RtlDebugFreeHeap, which
  2517. // gets called from RtlFreeHeapSlowly, which gets called from
  2518. // RtlFreeHeap. To keep from wasting lots of stack trace storage,
  2519. // we'll skip the bottom 3 entries, leaving RtlFreeHeap as the
  2520. // first recorded entry.
  2521. //
  2522. if ((HeapRoot->ExtraFlags & PAGE_HEAP_COLLECT_STACK_TRACES)) {
  2523. if (Node->StackTrace) {
  2524. RtlTraceDatabaseLock (RtlpDphTraceDatabase);
  2525. if (Node->StackTrace->UserCount > 0) {
  2526. Node->StackTrace->UserCount -= 1;
  2527. }
  2528. if (Node->StackTrace->UserSize >= Node->nUserRequestedSize) {
  2529. Node->StackTrace->UserSize -= Node->nUserRequestedSize;
  2530. }
  2531. RtlTraceDatabaseUnlock (RtlpDphTraceDatabase);
  2532. }
  2533. Node->StackTrace = RtlpDphLogStackTrace(3);
  2534. }
  2535. else {
  2536. Node->StackTrace = NULL;
  2537. }
  2538. Success = TRUE;
  2539. }
  2540. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  2541. _exception_info(),
  2542. HeapRoot,
  2543. FALSE)) {
  2544. //
  2545. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  2546. //
  2547. ASSERT_UNEXPECTED_CODE_PATH ();
  2548. }
  2549. EXIT:
  2550. //
  2551. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  2552. //
  2553. RtlpDphPostProcessing (HeapRoot);
  2554. if (! Success) {
  2555. IF_GENERATE_EXCEPTION( Flags, STATUS_ACCESS_VIOLATION );
  2556. }
  2557. return Success;
  2558. }
  2559. PVOID
  2560. RtlpDebugPageHeapReAllocate(
  2561. IN PVOID HeapHandle,
  2562. IN ULONG Flags,
  2563. IN PVOID Address,
  2564. IN SIZE_T Size
  2565. )
  2566. {
  2567. PDPH_HEAP_ROOT HeapRoot;
  2568. PDPH_HEAP_BLOCK OldNode, OldPrev, NewNode;
  2569. PVOID NewAddress;
  2570. PUCHAR p;
  2571. SIZE_T CopyDataSize;
  2572. ULONG SaveFlags;
  2573. BOOLEAN ReallocInNormalHeap = FALSE;
  2574. ULONG Reason;
  2575. BOOLEAN ForcePageHeap = FALSE;
  2576. BOOLEAN OriginalAllocationInPageHeap = FALSE;
  2577. //
  2578. // Reject extreme size requests.
  2579. //
  2580. #if defined(_IA64_)
  2581. if (Size > 0x8000000000000000) {
  2582. #else
  2583. if (Size > 0x80000000) {
  2584. #endif
  2585. VERIFIER_STOP (APPLICATION_VERIFIER_EXTREME_SIZE_REQUEST,
  2586. "extreme size request",
  2587. HeapHandle, "Heap handle",
  2588. Size, "Size requested",
  2589. 0, "",
  2590. 0, "");
  2591. return NULL;
  2592. }
  2593. //
  2594. // Check if it is time to do fault injection.
  2595. //
  2596. if (RtlpDphShouldFaultInject ()) {
  2597. return NULL;
  2598. }
  2599. //
  2600. // Check if we have a biased heap pointer which signals
  2601. // a forced page heap allocation (no normal heap).
  2602. //
  2603. if (IS_BIASED_POINTER(HeapHandle)) {
  2604. HeapHandle = UNBIAS_POINTER(HeapHandle);
  2605. ForcePageHeap = TRUE;
  2606. }
  2607. HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle );
  2608. if (HeapRoot == NULL)
  2609. return NULL;
  2610. //
  2611. // Get the heap lock, unprotect heap structures, etc.
  2612. //
  2613. RtlpDphPreProcessing (HeapRoot, Flags);
  2614. try {
  2615. if ((RtlpDphDebugLevel & DPH_DEBUG_INTERNAL_VALIDATION)) {
  2616. RtlpDphInternalValidatePageHeap (HeapRoot, NULL, 0);
  2617. }
  2618. Flags |= HeapRoot->HeapFlags;
  2619. NewAddress = NULL;
  2620. //
  2621. // Check Flags for non-moveable reallocation and fail it
  2622. // unconditionally. Apps that specify this flag should be
  2623. // prepared to deal with failure anyway.
  2624. //
  2625. if (Flags & HEAP_REALLOC_IN_PLACE_ONLY) {
  2626. goto EXIT;
  2627. }
  2628. //
  2629. // Validate requested size so we don't overflow
  2630. // while rounding up size computations. We do this
  2631. // after we've acquired the critsect so we can still
  2632. // catch serialization problems.
  2633. //
  2634. if (Size > 0x7FFF0000) {
  2635. OUT_OF_VM_BREAK( Flags, "Page heap: Invalid allocation size\n" );
  2636. goto EXIT;
  2637. }
  2638. OldNode = RtlpDebugPageHeapFindBusyMem( HeapRoot, Address, &OldPrev );
  2639. if (OldNode) {
  2640. OriginalAllocationInPageHeap = TRUE;
  2641. }
  2642. if (OldNode == NULL) {
  2643. //
  2644. // No wonder we did not find the block in the page heap
  2645. // structures because the block was probably allocated
  2646. // from the normal heap. Or there is a real bug. If there
  2647. // is a bug NormalHeapReAllocate will break into debugger.
  2648. //
  2649. NewAddress = RtlpDphNormalHeapReAllocate (
  2650. HeapRoot,
  2651. Flags,
  2652. Address,
  2653. Size);
  2654. goto EXIT;
  2655. }
  2656. //
  2657. // If tail was allocated, make sure filler not overwritten
  2658. //
  2659. if ((HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) {
  2660. // nothing
  2661. }
  2662. else {
  2663. if (! (RtlpDphIsPageHeapBlock (HeapRoot, Address, &Reason, TRUE))) {
  2664. RtlpDphReportCorruptedBlock (HeapRoot,
  2665. DPH_CONTEXT_FULL_PAGE_HEAP_REALLOC,
  2666. Address,
  2667. Reason);
  2668. }
  2669. }
  2670. //
  2671. // Before allocating a new block, remove the old block from
  2672. // the busy list. When we allocate the new block, the busy
  2673. // list pointers will change, possibly leaving our acquired
  2674. // Prev pointer invalid.
  2675. //
  2676. RtlpDebugPageHeapRemoveFromBusyList( HeapRoot, OldNode, OldPrev );
  2677. //
  2678. // Allocate new memory for new requested size. Use try/except
  2679. // to trap exception if Flags caused out-of-memory exception.
  2680. //
  2681. try {
  2682. if (!ForcePageHeap && !(RtlpDphShouldAllocateInPageHeap (HeapRoot, Size))) {
  2683. NewAddress = RtlpDphNormalHeapAllocate (
  2684. HeapRoot,
  2685. Flags,
  2686. Size);
  2687. ReallocInNormalHeap = TRUE;
  2688. }
  2689. else {
  2690. //
  2691. // Force the allocation in page heap by biasing
  2692. // the heap handle. Validate the heap here since when we use
  2693. // biased pointers validation inside Allocate is disabled.
  2694. //
  2695. if ((RtlpDphDebugLevel & DPH_DEBUG_INTERNAL_VALIDATION)) {
  2696. RtlpDphInternalValidatePageHeap (HeapRoot, OldNode->pVirtualBlock, OldNode->nVirtualBlockSize);
  2697. }
  2698. NewAddress = RtlpDebugPageHeapAllocate(
  2699. BIAS_POINTER(HeapHandle),
  2700. Flags,
  2701. Size);
  2702. //
  2703. // When we get back from the page heap call we will get
  2704. // back read only meta data that we need to make read write.
  2705. //
  2706. UNPROTECT_HEAP_STRUCTURES( HeapRoot );
  2707. if ((RtlpDphDebugLevel & DPH_DEBUG_INTERNAL_VALIDATION)) {
  2708. RtlpDphInternalValidatePageHeap (HeapRoot, OldNode->pVirtualBlock, OldNode->nVirtualBlockSize);
  2709. }
  2710. ReallocInNormalHeap = FALSE;
  2711. }
  2712. }
  2713. except( EXCEPTION_EXECUTE_HANDLER ) {
  2714. }
  2715. //
  2716. // We managed to make a new allocation (normal or page heap).
  2717. // Now we need to copy from old to new all sorts of stuff
  2718. // (contents, user flags/values).
  2719. //
  2720. if (NewAddress) {
  2721. //
  2722. // Copy old block contents into the new node.
  2723. //
  2724. CopyDataSize = OldNode->nUserRequestedSize;
  2725. if (CopyDataSize > Size) {
  2726. CopyDataSize = Size;
  2727. }
  2728. if (CopyDataSize > 0) {
  2729. RtlCopyMemory(
  2730. NewAddress,
  2731. Address,
  2732. CopyDataSize
  2733. );
  2734. }
  2735. //
  2736. // If new allocation was done in page heap we need to detect the new node
  2737. // and copy over user flags/values.
  2738. //
  2739. if (! ReallocInNormalHeap) {
  2740. NewNode = RtlpDebugPageHeapFindBusyMem( HeapRoot, NewAddress, NULL );
  2741. //
  2742. // This block could not be in normal heap therefore from this
  2743. // respect the call above should always succeed.
  2744. //
  2745. DEBUG_ASSERT( NewNode != NULL );
  2746. NewNode->UserValue = OldNode->UserValue;
  2747. NewNode->UserFlags = ( Flags & HEAP_SETTABLE_USER_FLAGS ) ?
  2748. ( Flags & HEAP_SETTABLE_USER_FLAGS ) :
  2749. OldNode->UserFlags;
  2750. }
  2751. //
  2752. // We need to cover the case where old allocation was in page heap.
  2753. // In this case we still need to cleanup the old node and
  2754. // insert it back in free list. Actually the way the code is written
  2755. // we take this code path only if original allocation was in page heap.
  2756. // This is the reason for the assert.
  2757. //
  2758. RETAIL_ASSERT (OriginalAllocationInPageHeap);
  2759. if (OriginalAllocationInPageHeap) {
  2760. if (OldNode->nVirtualAccessSize > 0) {
  2761. RtlpDebugPageHeapProtectVM( OldNode->pVirtualBlock,
  2762. OldNode->nVirtualAccessSize,
  2763. PAGE_NOACCESS );
  2764. }
  2765. //
  2766. // If we use uncommitted ranges we need to decommit the memory
  2767. // range now. Note that the next page (guard) was already decommitted
  2768. // when we made the allocation.
  2769. //
  2770. if ((HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) {
  2771. // nothing
  2772. }
  2773. else {
  2774. if ((HeapRoot->ExtraFlags & PAGE_HEAP_SMART_MEMORY_USAGE)) {
  2775. RtlpDebugPageHeapDecommitVM (
  2776. OldNode->pVirtualBlock,
  2777. OldNode->nVirtualAccessSize);
  2778. }
  2779. }
  2780. RtlpDebugPageHeapPlaceOnFreeList( HeapRoot, OldNode );
  2781. //
  2782. // RtlpDebugPageHeapReAllocate gets called from RtlDebugReAllocateHeap,
  2783. // which gets called from RtlReAllocateHeap. To keep from wasting
  2784. // lots of stack trace storage, we'll skip the bottom 2 entries,
  2785. // leaving RtlReAllocateHeap as the first recorded entry in the
  2786. // freed stack trace.
  2787. //
  2788. // Note. For realloc we need to do the accounting for free in the
  2789. // trace block. The accounting for alloc is done in the real
  2790. // alloc operation which always happens for page heap reallocs.
  2791. //
  2792. if ((HeapRoot->ExtraFlags & PAGE_HEAP_COLLECT_STACK_TRACES)) {
  2793. if (OldNode->StackTrace) {
  2794. RtlTraceDatabaseLock (RtlpDphTraceDatabase);
  2795. if (OldNode->StackTrace->UserCount > 0) {
  2796. OldNode->StackTrace->UserCount -= 1;
  2797. }
  2798. if (OldNode->StackTrace->UserSize >= OldNode->nUserRequestedSize) {
  2799. OldNode->StackTrace->UserSize -= OldNode->nUserRequestedSize;
  2800. }
  2801. RtlTraceDatabaseUnlock (RtlpDphTraceDatabase);
  2802. }
  2803. OldNode->StackTrace = RtlpDphLogStackTrace(2);
  2804. }
  2805. else {
  2806. OldNode->StackTrace = NULL;
  2807. }
  2808. }
  2809. }
  2810. else {
  2811. //
  2812. // Failed to allocate a new block. Return old block to busy list.
  2813. //
  2814. if (OriginalAllocationInPageHeap) {
  2815. RtlpDebugPageHeapPlaceOnBusyList( HeapRoot, OldNode );
  2816. }
  2817. }
  2818. }
  2819. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  2820. _exception_info(),
  2821. HeapRoot,
  2822. FALSE)) {
  2823. //
  2824. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  2825. //
  2826. ASSERT_UNEXPECTED_CODE_PATH ();
  2827. }
  2828. EXIT:
  2829. //
  2830. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  2831. //
  2832. RtlpDphPostProcessing (HeapRoot);
  2833. if (NewAddress == NULL) {
  2834. IF_GENERATE_EXCEPTION( Flags, STATUS_NO_MEMORY );
  2835. }
  2836. return NewAddress;
  2837. }
  2838. #if (( DPH_CAPTURE_STACK_TRACE ) && ( i386 ) && ( FPO ))
  2839. #pragma optimize( "", on ) // restore original optimizations
  2840. #endif
  2841. PVOID
  2842. RtlpDebugPageHeapDestroy(
  2843. IN PVOID HeapHandle
  2844. )
  2845. {
  2846. PDPH_HEAP_ROOT HeapRoot;
  2847. PDPH_HEAP_ROOT PrevHeapRoot;
  2848. PDPH_HEAP_ROOT NextHeapRoot;
  2849. PDPH_HEAP_BLOCK Node;
  2850. PDPH_HEAP_BLOCK Next;
  2851. ULONG Flags;
  2852. PUCHAR p;
  2853. ULONG Reason;
  2854. PVOID NormalHeap;
  2855. if (HeapHandle == RtlProcessHeap()) {
  2856. VERIFIER_STOP (APPLICATION_VERIFIER_DESTROY_PROCESS_HEAP,
  2857. "attempt to destroy process heap",
  2858. HeapHandle, "Process heap handle",
  2859. 0, "", 0, "", 0, "");
  2860. return NULL;
  2861. }
  2862. HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle );
  2863. if (HeapRoot == NULL)
  2864. return NULL;
  2865. // silviuc: should find another way to detect destroy while using
  2866. // Flags = HeapRoot->HeapFlags | HEAP_NO_SERIALIZE;
  2867. Flags = HeapRoot->HeapFlags;
  2868. //
  2869. // Get the heap lock, unprotect heap structures, etc.
  2870. //
  2871. RtlpDphPreProcessing (HeapRoot, Flags);
  2872. try {
  2873. //
  2874. // Save normal heap pointer for later.
  2875. //
  2876. NormalHeap = HeapRoot->NormalHeap;
  2877. //
  2878. // Free all blocks in the delayed free queue that belong to the
  2879. // normal heap just about to be destroyed. Note that this is
  2880. // not a bug. The application freed the blocks correctly but
  2881. // we delayed the free operation.
  2882. //
  2883. RtlpDphFreeDelayedBlocksFromHeap (HeapRoot, NormalHeap);
  2884. //
  2885. // Walk all busy allocations and check for tail fill corruption
  2886. //
  2887. Node = HeapRoot->pBusyAllocationListHead;
  2888. while (Node) {
  2889. if (! (HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) {
  2890. if (! (RtlpDphIsPageHeapBlock (HeapRoot, Node->pUserAllocation, &Reason, TRUE))) {
  2891. RtlpDphReportCorruptedBlock (HeapRoot,
  2892. DPH_CONTEXT_FULL_PAGE_HEAP_DESTROY,
  2893. Node->pUserAllocation,
  2894. Reason);
  2895. }
  2896. }
  2897. //
  2898. // Check if this active block contains a critical section. Since the
  2899. // block will be freed this will leak a critical section. Unfortunately
  2900. // we cannot do the same check for light page heap blocks due to the
  2901. // loose interaction between page heap and NT heap (we want to keep it
  2902. // this way to avoid compatibility issues).
  2903. //
  2904. RtlpCheckForCriticalSectionsInMemoryRange (Node->pUserAllocation,
  2905. Node->nUserRequestedSize,
  2906. NULL);
  2907. //
  2908. // Move to next node.
  2909. //
  2910. Node = Node->pNextAlloc;
  2911. }
  2912. //
  2913. // Remove this heap entry from the process heap linked list.
  2914. //
  2915. RtlEnterCriticalSection( &RtlpDphHeapListCriticalSection );
  2916. if (HeapRoot->pPrevHeapRoot) {
  2917. UNPROTECT_HEAP_STRUCTURES( HeapRoot->pPrevHeapRoot );
  2918. HeapRoot->pPrevHeapRoot->pNextHeapRoot = HeapRoot->pNextHeapRoot;
  2919. PROTECT_HEAP_STRUCTURES( HeapRoot->pPrevHeapRoot );
  2920. }
  2921. else {
  2922. RtlpDphHeapListHead = HeapRoot->pNextHeapRoot;
  2923. }
  2924. if (HeapRoot->pNextHeapRoot) {
  2925. UNPROTECT_HEAP_STRUCTURES( HeapRoot->pNextHeapRoot );
  2926. HeapRoot->pNextHeapRoot->pPrevHeapRoot = HeapRoot->pPrevHeapRoot;
  2927. PROTECT_HEAP_STRUCTURES( HeapRoot->pNextHeapRoot );
  2928. }
  2929. else {
  2930. RtlpDphHeapListTail = HeapRoot->pPrevHeapRoot;
  2931. }
  2932. RtlpDphHeapListCount -= 1;
  2933. RtlLeaveCriticalSection( &RtlpDphHeapListCriticalSection );
  2934. //
  2935. // Must release critical section before deleting it; otherwise,
  2936. // checked build Teb->CountOfOwnedCriticalSections gets out of sync.
  2937. //
  2938. RtlLeaveCriticalSection( HeapRoot->HeapCritSect );
  2939. RtlDeleteCriticalSection( HeapRoot->HeapCritSect );
  2940. //
  2941. // This is weird. A virtual block might contain storage for
  2942. // one of the nodes necessary to walk this list. In fact,
  2943. // we're guaranteed that the root node contains at least one
  2944. // virtual alloc node.
  2945. //
  2946. // Each time we alloc new VM, we make that the head of the
  2947. // of the VM list, like a LIFO structure. I think we're ok
  2948. // because no VM list node should be on a subsequently alloc'd
  2949. // VM -- only a VM list entry might be on its own memory (as
  2950. // is the case for the root node). We read pNode->pNextAlloc
  2951. // before releasing the VM in case pNode existed on that VM.
  2952. // I think this is safe -- as long as the VM list is LIFO and
  2953. // we don't do any list reorganization.
  2954. //
  2955. Node = HeapRoot->pVirtualStorageListHead;
  2956. while (Node) {
  2957. Next = Node->pNextAlloc;
  2958. if (! RtlpDebugPageHeapReleaseVM( Node->pVirtualBlock )) {
  2959. VERIFIER_STOP (APPLICATION_VERIFIER_INTERNAL_ERROR,
  2960. "unable to release virtual memory",
  2961. 0, "", 0, "", 0, "", 0, "");
  2962. }
  2963. Node = Next;
  2964. }
  2965. //
  2966. // Destroy normal heap. Note that this will not make a recursive
  2967. // call into this function because this is not a page heap and
  2968. // code in NT heap manager will detect this.
  2969. //
  2970. RtlDestroyHeap (NormalHeap);
  2971. }
  2972. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  2973. _exception_info(),
  2974. NULL,
  2975. FALSE)) {
  2976. //
  2977. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  2978. //
  2979. ASSERT_UNEXPECTED_CODE_PATH ();
  2980. }
  2981. //
  2982. // That's it. All the VM, including the root node, should now
  2983. // be released. RtlDestroyHeap always returns NULL.
  2984. //
  2985. #if 0 // ISSUE: SilviuC: use DbgPrintEx instead.
  2986. DbgPrint( "Page heap: process 0x%X destroyed heap @ %p (%p)\n",
  2987. HandleToUlong(NtCurrentTeb()->ClientId.UniqueProcess),
  2988. HeapRoot,
  2989. NormalHeap);
  2990. #endif
  2991. return NULL;
  2992. }
  2993. SIZE_T
  2994. RtlpDebugPageHeapSize(
  2995. IN PVOID HeapHandle,
  2996. IN ULONG Flags,
  2997. IN PVOID Address
  2998. )
  2999. {
  3000. PDPH_HEAP_ROOT HeapRoot;
  3001. PDPH_HEAP_BLOCK Node;
  3002. SIZE_T Size;
  3003. Size = -1;
  3004. HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle );
  3005. if (HeapRoot == NULL) {
  3006. return Size;
  3007. }
  3008. Flags |= HeapRoot->HeapFlags;
  3009. //
  3010. // Get the heap lock, unprotect heap structures, etc.
  3011. //
  3012. RtlpDphPreProcessing (HeapRoot, Flags);
  3013. try {
  3014. Node = RtlpDebugPageHeapFindBusyMem( HeapRoot, Address, NULL );
  3015. if (Node == NULL) {
  3016. //
  3017. // No wonder we did not find the block in the page heap
  3018. // structures because the block was probably allocated
  3019. // from the normal heap. Or there is a real bug. If there
  3020. // is a bug NormalHeapSize will break into debugger.
  3021. //
  3022. Size = RtlpDphNormalHeapSize (
  3023. HeapRoot,
  3024. Flags,
  3025. Address);
  3026. goto EXIT;
  3027. }
  3028. else {
  3029. Size = Node->nUserRequestedSize;
  3030. }
  3031. }
  3032. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  3033. _exception_info(),
  3034. HeapRoot,
  3035. TRUE)) {
  3036. //
  3037. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  3038. //
  3039. ASSERT_UNEXPECTED_CODE_PATH ();
  3040. }
  3041. EXIT:
  3042. //
  3043. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  3044. //
  3045. RtlpDphPostProcessing (HeapRoot);
  3046. if (Size == -1) {
  3047. IF_GENERATE_EXCEPTION( Flags, STATUS_ACCESS_VIOLATION );
  3048. }
  3049. return Size;
  3050. }
  3051. ULONG
  3052. RtlpDebugPageHeapGetProcessHeaps(
  3053. ULONG NumberOfHeaps,
  3054. PVOID *ProcessHeaps
  3055. )
  3056. {
  3057. PDPH_HEAP_ROOT HeapRoot;
  3058. ULONG Count;
  3059. //
  3060. // Although we'd expect GetProcessHeaps never to be called
  3061. // before at least the very first heap creation, we should
  3062. // still be safe and initialize the critical section if
  3063. // necessary.
  3064. //
  3065. if (! RtlpDphHeapListHasBeenInitialized) {
  3066. RtlpDphHeapListHasBeenInitialized = TRUE;
  3067. RtlInitializeCriticalSection( &RtlpDphHeapListCriticalSection );
  3068. }
  3069. RtlEnterCriticalSection( &RtlpDphHeapListCriticalSection );
  3070. if (RtlpDphHeapListCount <= NumberOfHeaps) {
  3071. for (HeapRoot = RtlpDphHeapListHead, Count = 0;
  3072. HeapRoot != NULL;
  3073. HeapRoot = HeapRoot->pNextHeapRoot, Count += 1) {
  3074. *ProcessHeaps++ = HEAP_HANDLE_FROM_ROOT( HeapRoot );
  3075. }
  3076. if (Count != RtlpDphHeapListCount) {
  3077. VERIFIER_STOP (APPLICATION_VERIFIER_UNKNOWN_ERROR,
  3078. "process heap list count is wrong",
  3079. Count, "Actual count",
  3080. RtlpDphHeapListCount, "Page heap count",
  3081. 0, "",
  3082. 0, "");
  3083. }
  3084. }
  3085. else {
  3086. //
  3087. // User's buffer is too small. Return number of entries
  3088. // necessary for subsequent call to succeed. Buffer
  3089. // remains untouched.
  3090. //
  3091. Count = RtlpDphHeapListCount;
  3092. }
  3093. RtlLeaveCriticalSection( &RtlpDphHeapListCriticalSection );
  3094. return Count;
  3095. }
  3096. ULONG
  3097. RtlpDebugPageHeapCompact(
  3098. IN PVOID HeapHandle,
  3099. IN ULONG Flags
  3100. )
  3101. {
  3102. PDPH_HEAP_ROOT HeapRoot;
  3103. HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle );
  3104. if (HeapRoot == NULL)
  3105. return 0;
  3106. Flags |= HeapRoot->HeapFlags;
  3107. RtlpDebugPageHeapEnterCritSect( HeapRoot, Flags );
  3108. //
  3109. // Don't do anything, but we did want to acquire the critsect
  3110. // in case this was called with HEAP_NO_SERIALIZE while another
  3111. // thread is in the heap code.
  3112. //
  3113. RtlpDebugPageHeapLeaveCritSect( HeapRoot );
  3114. return 0;
  3115. }
  3116. BOOLEAN
  3117. RtlpDebugPageHeapValidate(
  3118. IN PVOID HeapHandle,
  3119. IN ULONG Flags,
  3120. IN PVOID Address
  3121. )
  3122. {
  3123. PDPH_HEAP_ROOT HeapRoot;
  3124. PDPH_HEAP_BLOCK Node;
  3125. BOOLEAN Result = FALSE;
  3126. HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle );
  3127. if (HeapRoot == NULL)
  3128. return FALSE;
  3129. Flags |= HeapRoot->HeapFlags;
  3130. //
  3131. // Get the heap lock, unprotect heap structures, etc.
  3132. //
  3133. RtlpDphPreProcessing (HeapRoot, Flags);
  3134. try {
  3135. Node = Address ? RtlpDebugPageHeapFindBusyMem( HeapRoot, Address, NULL ) : NULL;
  3136. if (Node == NULL) {
  3137. Result = RtlpDphNormalHeapValidate (
  3138. HeapRoot,
  3139. Flags,
  3140. Address);
  3141. }
  3142. }
  3143. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  3144. _exception_info(),
  3145. HeapRoot,
  3146. TRUE)) {
  3147. //
  3148. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  3149. //
  3150. ASSERT_UNEXPECTED_CODE_PATH ();
  3151. }
  3152. //
  3153. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  3154. //
  3155. RtlpDphPostProcessing (HeapRoot);
  3156. if (Address) {
  3157. if (Node) {
  3158. return TRUE;
  3159. }
  3160. else {
  3161. return Result;
  3162. }
  3163. }
  3164. else {
  3165. return TRUE;
  3166. }
  3167. }
  3168. NTSTATUS
  3169. RtlpDebugPageHeapWalk(
  3170. IN PVOID HeapHandle,
  3171. IN OUT PRTL_HEAP_WALK_ENTRY Entry
  3172. )
  3173. {
  3174. #if DBG
  3175. DbgPrint ("Page heap: warning: failing HeapWalk call with STATUS_NOT_IMPLEMENTED.\n");
  3176. #endif
  3177. return STATUS_NOT_IMPLEMENTED;
  3178. }
  3179. BOOLEAN
  3180. RtlpDebugPageHeapLock(
  3181. IN PVOID HeapHandle
  3182. )
  3183. {
  3184. PDPH_HEAP_ROOT HeapRoot;
  3185. HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle );
  3186. if (HeapRoot == NULL) {
  3187. return FALSE;
  3188. }
  3189. RtlpDebugPageHeapEnterCritSect( HeapRoot, HeapRoot->HeapFlags );
  3190. return TRUE;
  3191. }
  3192. BOOLEAN
  3193. RtlpDebugPageHeapUnlock(
  3194. IN PVOID HeapHandle
  3195. )
  3196. {
  3197. PDPH_HEAP_ROOT HeapRoot;
  3198. HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle );
  3199. if (HeapRoot == NULL) {
  3200. return FALSE;
  3201. }
  3202. RtlpDebugPageHeapLeaveCritSect( HeapRoot );
  3203. return TRUE;
  3204. }
  3205. BOOLEAN
  3206. RtlpDebugPageHeapSetUserValue(
  3207. IN PVOID HeapHandle,
  3208. IN ULONG Flags,
  3209. IN PVOID Address,
  3210. IN PVOID UserValue
  3211. )
  3212. {
  3213. PDPH_HEAP_ROOT HeapRoot;
  3214. PDPH_HEAP_BLOCK Node;
  3215. BOOLEAN Success;
  3216. Success = FALSE;
  3217. HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle );
  3218. if ( HeapRoot == NULL )
  3219. return Success;
  3220. Flags |= HeapRoot->HeapFlags;
  3221. //
  3222. // Get the heap lock, unprotect heap structures, etc.
  3223. //
  3224. RtlpDphPreProcessing (HeapRoot, Flags);
  3225. try {
  3226. Node = RtlpDebugPageHeapFindBusyMem( HeapRoot, Address, NULL );
  3227. if ( Node == NULL ) {
  3228. //
  3229. // If we cannot find the node in page heap structures it might be
  3230. // because it has been allocated from normal heap.
  3231. //
  3232. Success = RtlpDphNormalHeapSetUserValue (
  3233. HeapRoot,
  3234. Flags,
  3235. Address,
  3236. UserValue);
  3237. goto EXIT;
  3238. }
  3239. else {
  3240. Node->UserValue = UserValue;
  3241. Success = TRUE;
  3242. }
  3243. }
  3244. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  3245. _exception_info(),
  3246. HeapRoot,
  3247. FALSE)) {
  3248. //
  3249. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  3250. //
  3251. ASSERT_UNEXPECTED_CODE_PATH ();
  3252. }
  3253. EXIT:
  3254. //
  3255. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  3256. //
  3257. RtlpDphPostProcessing (HeapRoot);
  3258. return Success;
  3259. }
  3260. BOOLEAN
  3261. RtlpDebugPageHeapGetUserInfo(
  3262. IN PVOID HeapHandle,
  3263. IN ULONG Flags,
  3264. IN PVOID Address,
  3265. OUT PVOID* UserValue,
  3266. OUT PULONG UserFlags
  3267. )
  3268. {
  3269. PDPH_HEAP_ROOT HeapRoot;
  3270. PDPH_HEAP_BLOCK Node;
  3271. BOOLEAN Success;
  3272. Success = FALSE;
  3273. HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle );
  3274. if ( HeapRoot == NULL )
  3275. return Success;
  3276. Flags |= HeapRoot->HeapFlags;
  3277. //
  3278. // Get the heap lock, unprotect heap structures, etc.
  3279. //
  3280. RtlpDphPreProcessing (HeapRoot, Flags);
  3281. try {
  3282. Node = RtlpDebugPageHeapFindBusyMem( HeapRoot, Address, NULL );
  3283. if ( Node == NULL ) {
  3284. //
  3285. // If we cannot find the node in page heap structures it might be
  3286. // because it has been allocated from normal heap.
  3287. //
  3288. Success = RtlpDphNormalHeapGetUserInfo (
  3289. HeapRoot,
  3290. Flags,
  3291. Address,
  3292. UserValue,
  3293. UserFlags);
  3294. goto EXIT;
  3295. }
  3296. else {
  3297. if ( UserValue != NULL )
  3298. *UserValue = Node->UserValue;
  3299. if ( UserFlags != NULL )
  3300. *UserFlags = Node->UserFlags;
  3301. Success = TRUE;
  3302. }
  3303. }
  3304. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  3305. _exception_info(),
  3306. HeapRoot,
  3307. FALSE)) {
  3308. //
  3309. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  3310. //
  3311. ASSERT_UNEXPECTED_CODE_PATH ();
  3312. }
  3313. EXIT:
  3314. //
  3315. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  3316. //
  3317. RtlpDphPostProcessing (HeapRoot);
  3318. return Success;
  3319. }
  3320. BOOLEAN
  3321. RtlpDebugPageHeapSetUserFlags(
  3322. IN PVOID HeapHandle,
  3323. IN ULONG Flags,
  3324. IN PVOID Address,
  3325. IN ULONG UserFlagsReset,
  3326. IN ULONG UserFlagsSet
  3327. )
  3328. {
  3329. PDPH_HEAP_ROOT HeapRoot;
  3330. PDPH_HEAP_BLOCK Node;
  3331. BOOLEAN Success;
  3332. Success = FALSE;
  3333. HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle );
  3334. if ( HeapRoot == NULL )
  3335. return Success;
  3336. Flags |= HeapRoot->HeapFlags;
  3337. //
  3338. // Get the heap lock, unprotect heap structures, etc.
  3339. //
  3340. RtlpDphPreProcessing (HeapRoot, Flags);
  3341. try {
  3342. Node = RtlpDebugPageHeapFindBusyMem( HeapRoot, Address, NULL );
  3343. if ( Node == NULL ) {
  3344. //
  3345. // If we cannot find the node in page heap structures it might be
  3346. // because it has been allocated from normal heap.
  3347. //
  3348. Success = RtlpDphNormalHeapSetUserFlags (
  3349. HeapRoot,
  3350. Flags,
  3351. Address,
  3352. UserFlagsReset,
  3353. UserFlagsSet);
  3354. goto EXIT;
  3355. }
  3356. else {
  3357. Node->UserFlags &= ~( UserFlagsReset );
  3358. Node->UserFlags |= UserFlagsSet;
  3359. Success = TRUE;
  3360. }
  3361. }
  3362. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  3363. _exception_info(),
  3364. HeapRoot,
  3365. FALSE)) {
  3366. //
  3367. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  3368. //
  3369. ASSERT_UNEXPECTED_CODE_PATH ();
  3370. }
  3371. EXIT:
  3372. //
  3373. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  3374. //
  3375. RtlpDphPostProcessing (HeapRoot);
  3376. return Success;
  3377. }
  3378. BOOLEAN
  3379. RtlpDebugPageHeapSerialize(
  3380. IN PVOID HeapHandle
  3381. )
  3382. {
  3383. PDPH_HEAP_ROOT HeapRoot;
  3384. HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle );
  3385. if ( HeapRoot == NULL )
  3386. return FALSE;
  3387. //
  3388. // Get the heap lock, unprotect heap structures, etc.
  3389. //
  3390. RtlpDphPreProcessing (HeapRoot, 0);
  3391. HeapRoot->HeapFlags &= ~HEAP_NO_SERIALIZE;
  3392. //
  3393. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  3394. //
  3395. RtlpDphPostProcessing (HeapRoot);
  3396. return TRUE;
  3397. }
  3398. NTSTATUS
  3399. RtlpDebugPageHeapExtend(
  3400. IN PVOID HeapHandle,
  3401. IN ULONG Flags,
  3402. IN PVOID Base,
  3403. IN SIZE_T Size
  3404. )
  3405. {
  3406. return STATUS_SUCCESS;
  3407. }
  3408. NTSTATUS
  3409. RtlpDebugPageHeapZero(
  3410. IN PVOID HeapHandle,
  3411. IN ULONG Flags
  3412. )
  3413. {
  3414. return STATUS_SUCCESS;
  3415. }
  3416. NTSTATUS
  3417. RtlpDebugPageHeapReset(
  3418. IN PVOID HeapHandle,
  3419. IN ULONG Flags
  3420. )
  3421. {
  3422. return STATUS_SUCCESS;
  3423. }
  3424. NTSTATUS
  3425. RtlpDebugPageHeapUsage(
  3426. IN PVOID HeapHandle,
  3427. IN ULONG Flags,
  3428. IN OUT PRTL_HEAP_USAGE Usage
  3429. )
  3430. {
  3431. PDPH_HEAP_ROOT HeapRoot;
  3432. //
  3433. // Partial implementation since this information is kind of meaningless.
  3434. //
  3435. HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle );
  3436. if ( HeapRoot == NULL )
  3437. return STATUS_INVALID_PARAMETER;
  3438. if ( Usage->Length != sizeof( RTL_HEAP_USAGE ))
  3439. return STATUS_INFO_LENGTH_MISMATCH;
  3440. memset( Usage, 0, sizeof( RTL_HEAP_USAGE ));
  3441. Usage->Length = sizeof( RTL_HEAP_USAGE );
  3442. //
  3443. // Get the heap lock, unprotect heap structures, etc.
  3444. //
  3445. RtlpDphPreProcessing (HeapRoot, Flags);
  3446. try {
  3447. Usage->BytesAllocated = HeapRoot->nBusyAllocationBytesAccessible;
  3448. Usage->BytesCommitted = HeapRoot->nVirtualStorageBytes;
  3449. Usage->BytesReserved = HeapRoot->nVirtualStorageBytes;
  3450. Usage->BytesReservedMaximum = HeapRoot->nVirtualStorageBytes;
  3451. }
  3452. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  3453. _exception_info(),
  3454. HeapRoot,
  3455. FALSE)) {
  3456. //
  3457. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  3458. //
  3459. ASSERT_UNEXPECTED_CODE_PATH ();
  3460. }
  3461. //
  3462. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  3463. //
  3464. RtlpDphPostProcessing (HeapRoot);
  3465. return STATUS_SUCCESS;
  3466. }
  3467. BOOLEAN
  3468. RtlpDebugPageHeapIsLocked(
  3469. IN PVOID HeapHandle
  3470. )
  3471. {
  3472. PDPH_HEAP_ROOT HeapRoot;
  3473. HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle );
  3474. if ( HeapRoot == NULL )
  3475. return FALSE;
  3476. if ( RtlTryEnterCriticalSection( HeapRoot->HeapCritSect )) {
  3477. RtlLeaveCriticalSection( HeapRoot->HeapCritSect );
  3478. return FALSE;
  3479. }
  3480. else {
  3481. return TRUE;
  3482. }
  3483. }
  3484. /////////////////////////////////////////////////////////////////////
  3485. /////////////////////////// Page heap vs. normal heap decision making
  3486. /////////////////////////////////////////////////////////////////////
  3487. //
  3488. // 0 - full page heap
  3489. // 1 - light page heap
  3490. //
  3491. LONG RtlpDphBlockDistribution[2];
  3492. BOOLEAN
  3493. RtlpDphShouldAllocateInPageHeap (
  3494. PDPH_HEAP_ROOT HeapRoot,
  3495. SIZE_T Size
  3496. )
  3497. /*++
  3498. Routine Description:
  3499. This routine decides if the current allocation should be made in full
  3500. page heap or light page heap.
  3501. Parameters:
  3502. HeapRoot - heap descriptor for the current allocation request.
  3503. Size - size of the current allocation request.
  3504. Return Value:
  3505. True if this should be a full page heap allocation and false otherwise.
  3506. --*/
  3507. {
  3508. SYSTEM_PERFORMANCE_INFORMATION PerfInfo;
  3509. NTSTATUS Status;
  3510. ULONG Random;
  3511. ULONG Percentage;
  3512. //
  3513. // If page heap is not enabled => normal heap.
  3514. //
  3515. if (! (HeapRoot->ExtraFlags & PAGE_HEAP_ENABLE_PAGE_HEAP)) {
  3516. InterlockedIncrement (&(RtlpDphBlockDistribution[1]));
  3517. return FALSE;
  3518. }
  3519. //
  3520. // If call not generated from one of the target dlls => normal heap
  3521. // We do this check up front to avoid the slow path where we check
  3522. // if VM limits have been hit.
  3523. //
  3524. else if ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_DLL_NAMES)) {
  3525. //
  3526. // We return false. The calls generated from target
  3527. // dlls will never get into this function and therefore
  3528. // we just return false signalling that we do not want
  3529. // page heap verification for the rest of the world.
  3530. //
  3531. InterlockedIncrement (&(RtlpDphBlockDistribution[1]));
  3532. return FALSE;
  3533. }
  3534. //
  3535. // Check memory availability. If we tend to exhaust virtual space
  3536. // or page file then we will go to the normal heap.
  3537. //
  3538. else if (RtlpDphVmLimitCanUsePageHeap() == FALSE) {
  3539. InterlockedIncrement (&(RtlpDphBlockDistribution[1]));
  3540. return FALSE;
  3541. }
  3542. //
  3543. // If in size range => page heap
  3544. //
  3545. else if ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_SIZE_RANGE)) {
  3546. if (Size >= RtlpDphSizeRangeStart && Size <= RtlpDphSizeRangeEnd) {
  3547. InterlockedIncrement (&(RtlpDphBlockDistribution[0]));
  3548. return TRUE;
  3549. }
  3550. else {
  3551. InterlockedIncrement (&(RtlpDphBlockDistribution[1]));
  3552. return FALSE;
  3553. }
  3554. }
  3555. //
  3556. // If in dll range => page heap
  3557. //
  3558. else if ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_DLL_RANGE)) {
  3559. PVOID StackTrace[32];
  3560. ULONG Count;
  3561. ULONG Index;
  3562. ULONG Hash;
  3563. Count = RtlCaptureStackBackTrace (
  3564. 1,
  3565. 32,
  3566. StackTrace,
  3567. &Hash);
  3568. //
  3569. // (SilviuC): should read DllRange as PVOIDs
  3570. //
  3571. for (Index = 0; Index < Count; Index += 1) {
  3572. if (PtrToUlong(StackTrace[Index]) >= RtlpDphDllRangeStart
  3573. && PtrToUlong(StackTrace[Index]) <= RtlpDphDllRangeEnd) {
  3574. InterlockedIncrement (&(RtlpDphBlockDistribution[0]));
  3575. return TRUE;
  3576. }
  3577. }
  3578. InterlockedIncrement (&(RtlpDphBlockDistribution[1]));
  3579. return FALSE;
  3580. }
  3581. //
  3582. // If randomly decided => page heap
  3583. //
  3584. else if ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_RANDOM_DECISION)) {
  3585. Random = RtlRandom (& (HeapRoot->Seed));
  3586. if ((Random % 100) < RtlpDphRandomProbability) {
  3587. InterlockedIncrement (&(RtlpDphBlockDistribution[0]));
  3588. return TRUE;
  3589. }
  3590. else {
  3591. InterlockedIncrement (&(RtlpDphBlockDistribution[1]));
  3592. return FALSE;
  3593. }
  3594. }
  3595. //
  3596. // For all other cases we will allocate in the page heap.
  3597. //
  3598. else {
  3599. InterlockedIncrement (&(RtlpDphBlockDistribution[0]));
  3600. return TRUE;
  3601. }
  3602. }
  3603. //
  3604. // Vm limit related globals.
  3605. //
  3606. LONG RtlpDphVmLimitNoPageHeap;
  3607. LONG RtlpDphVmLimitHits[2];
  3608. #define SIZE_1_MB 0x100000
  3609. BOOLEAN
  3610. RtlpDphVmLimitCanUsePageHeap (
  3611. )
  3612. /*++
  3613. Routine Description:
  3614. This routine decides if we have good conditions for a full page heap
  3615. allocation to be successful. It checks two things: the pagefile commit
  3616. available on the system and the virtual space available in the current
  3617. process. Since full page heap uses at least 2 pages for each allocation
  3618. it can potentially exhaust both these resources. The current criteria are:
  3619. (1) if less than 32Mb of pagefile commit are left we switch to light
  3620. page heap
  3621. (2) if less than 128Mb of empty virtual space is left we switch to light
  3622. page heap
  3623. Parameters:
  3624. None.
  3625. Return Value:
  3626. True if full page heap allocations are allowed and false otherwise.
  3627. --*/
  3628. {
  3629. SYSTEM_PERFORMANCE_INFORMATION PerfInfo;
  3630. SYSTEM_BASIC_INFORMATION MemInfo;
  3631. VM_COUNTERS VmCounters;
  3632. NTSTATUS Status;
  3633. LONG Value;
  3634. LONG Calls;
  3635. ULONGLONG Total;
  3636. //
  3637. // Find if full page heap is currently allowed.
  3638. //
  3639. Value = InterlockedCompareExchange (&RtlpDphVmLimitNoPageHeap,
  3640. 0,
  3641. 0);
  3642. //
  3643. // Query system for page file availability etc.
  3644. //
  3645. Status = NtQuerySystemInformation (SystemPerformanceInformation,
  3646. &PerfInfo,
  3647. sizeof(PerfInfo),
  3648. NULL);
  3649. if (!NT_SUCCESS(Status)) {
  3650. return FALSE;
  3651. }
  3652. Status = NtQuerySystemInformation (SystemBasicInformation,
  3653. &MemInfo,
  3654. sizeof(MemInfo),
  3655. NULL);
  3656. if (!NT_SUCCESS(Status)) {
  3657. return FALSE;
  3658. }
  3659. Status = NtQueryInformationProcess (NtCurrentProcess(),
  3660. ProcessVmCounters,
  3661. &VmCounters,
  3662. sizeof(VM_COUNTERS),
  3663. NULL);
  3664. if (!NT_SUCCESS(Status)) {
  3665. return FALSE;
  3666. }
  3667. //
  3668. // First check that we have enough virtual space left in the process.
  3669. // If less than 128Mb are left we will disable full page heap allocs.
  3670. //
  3671. Total = (MemInfo.MaximumUserModeAddress - MemInfo.MinimumUserModeAddress);
  3672. if (Total - VmCounters.VirtualSize < 128 * SIZE_1_MB) {
  3673. if (Value == 0) {
  3674. if ((RtlpDphDebugLevel & DPH_DEBUG_SHOW_VM_LIMITS)) {
  3675. DbgPrint ("Page heap: pid 0x%X: vm limit: vspace: disabling full page heap \n",
  3676. HandleToUlong(NtCurrentTeb()->ClientId.UniqueProcess));
  3677. }
  3678. }
  3679. InterlockedIncrement (&(RtlpDphVmLimitHits[0]));
  3680. InterlockedExchange (&RtlpDphVmLimitNoPageHeap, 1);
  3681. return FALSE;
  3682. }
  3683. //
  3684. // Next check for page file availability. If less than 32Mb are
  3685. // available for commit we disable full page heap. Note that
  3686. // CommitLimit does not reflect future pagefile extension potential.
  3687. // Therefore pageheap will scale down even if the pagefile has not
  3688. // been extended to its maximum.
  3689. //
  3690. Total = PerfInfo.CommitLimit - PerfInfo.CommittedPages;
  3691. Total *= MemInfo.PageSize;
  3692. if (Total - VmCounters.PagefileUsage < 32 * SIZE_1_MB) {
  3693. if (Value == 0) {
  3694. if ((RtlpDphDebugLevel & DPH_DEBUG_SHOW_VM_LIMITS)) {
  3695. DbgPrint ("Page heap: pid 0x%X: vm limit: pfile: disabling full page heap \n",
  3696. HandleToUlong(NtCurrentTeb()->ClientId.UniqueProcess));
  3697. }
  3698. }
  3699. InterlockedIncrement (&(RtlpDphVmLimitHits[1]));
  3700. InterlockedExchange (&RtlpDphVmLimitNoPageHeap, 1);
  3701. return FALSE;
  3702. }
  3703. if (Value == 1) {
  3704. if ((RtlpDphDebugLevel & DPH_DEBUG_SHOW_VM_LIMITS)) {
  3705. DbgPrint ("Page heap: pid 0x%X: vm limit: reenabling full page heap \n",
  3706. HandleToUlong(NtCurrentTeb()->ClientId.UniqueProcess));
  3707. }
  3708. InterlockedExchange (&RtlpDphVmLimitNoPageHeap, 0);
  3709. }
  3710. return TRUE;
  3711. }
  3712. /////////////////////////////////////////////////////////////////////
  3713. //////////////////////////////////// DPH_BLOCK_INFORMATION management
  3714. /////////////////////////////////////////////////////////////////////
  3715. VOID
  3716. RtlpDphReportCorruptedBlock (
  3717. PVOID Heap,
  3718. ULONG Context,
  3719. PVOID Block,
  3720. ULONG Reason
  3721. )
  3722. {
  3723. SIZE_T Size;
  3724. DPH_BLOCK_INFORMATION Info;
  3725. BOOLEAN InfoRead = FALSE;
  3726. BOOLEAN SizeRead = FALSE;
  3727. try {
  3728. RtlCopyMemory (&Info, (PDPH_BLOCK_INFORMATION)Block - 1, sizeof Info);
  3729. InfoRead = TRUE;
  3730. }
  3731. except (EXCEPTION_EXECUTE_HANDLER) {
  3732. }
  3733. if (RtlpDphGetBlockSizeFromCorruptedBlock (Block, &Size)) {
  3734. SizeRead = TRUE;
  3735. }
  3736. //
  3737. // If we did not even manage to read the entire block header
  3738. // report exception. If we managed to read the header we will let it
  3739. // run through the other messages and only in the end report exception.
  3740. //
  3741. if (!InfoRead && (Reason & DPH_ERROR_RAISED_EXCEPTION)) {
  3742. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3743. "exception raised while verifying block header",
  3744. Heap, "Heap handle",
  3745. Block, "Heap block",
  3746. (SizeRead ? Size : 0), "Block size",
  3747. 0, "");
  3748. }
  3749. if ((Reason & DPH_ERROR_DOUBLE_FREE)) {
  3750. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3751. "block already freed",
  3752. Heap, "Heap handle",
  3753. Block, "Heap block",
  3754. (SizeRead ? Size : 0), "Block size",
  3755. 0, "");
  3756. }
  3757. if ((Reason & DPH_ERROR_CORRUPTED_INFIX_PATTERN)) {
  3758. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3759. "corrupted infix pattern for freed block",
  3760. Heap, "Heap handle",
  3761. Block, "Heap block",
  3762. (SizeRead ? Size : 0), "Block size",
  3763. 0, "");
  3764. }
  3765. if ((Reason & DPH_ERROR_CORRUPTED_HEAP_POINTER)) {
  3766. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3767. "corrupted heap pointer or using wrong heap",
  3768. Heap, "Heap used in the call",
  3769. Block, "Heap block",
  3770. (SizeRead ? Size : 0), "Block size",
  3771. (InfoRead ? (UNSCRAMBLE_POINTER(Info.Heap)) : 0), "Heap owning the block");
  3772. }
  3773. if ((Reason & DPH_ERROR_CORRUPTED_SUFFIX_PATTERN)) {
  3774. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3775. "corrupted suffix pattern",
  3776. Heap, "Heap handle",
  3777. Block, "Heap block",
  3778. (SizeRead ? Size : 0), "Block size",
  3779. 0, "");
  3780. }
  3781. if ((Reason & DPH_ERROR_CORRUPTED_PREFIX_PATTERN)) {
  3782. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3783. "corrupted prefix pattern",
  3784. Heap, "Heap handle",
  3785. Block, "Heap block",
  3786. (SizeRead ? Size : 0), "Block size",
  3787. 0, "");
  3788. }
  3789. if ((Reason & DPH_ERROR_CORRUPTED_START_STAMP)) {
  3790. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3791. "corrupted start stamp",
  3792. Heap, "Heap handle",
  3793. Block, "Heap block",
  3794. (SizeRead ? Size : 0), "Block size",
  3795. (InfoRead ? Info.StartStamp : 0), "Corrupted stamp");
  3796. }
  3797. if ((Reason & DPH_ERROR_CORRUPTED_END_STAMP)) {
  3798. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3799. "corrupted end stamp",
  3800. Heap, "Heap handle",
  3801. Block, "Heap block",
  3802. (SizeRead ? Size : 0), "Block size",
  3803. (InfoRead ? Info.EndStamp : 0), "Corrupted stamp");
  3804. }
  3805. if ((Reason & DPH_ERROR_RAISED_EXCEPTION)) {
  3806. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3807. "exception raised while verifying block",
  3808. Heap, "Heap handle",
  3809. Block, "Heap block",
  3810. (SizeRead ? Size : 0), "Block size",
  3811. 0, "");
  3812. }
  3813. //
  3814. // Catch all case.
  3815. //
  3816. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3817. "corrupted heap block",
  3818. Heap, "Heap handle",
  3819. Block, "Heap block",
  3820. (SizeRead ? Size : 0), "Block size",
  3821. 0, "");
  3822. }
  3823. BOOLEAN
  3824. RtlpDphIsPageHeapBlock (
  3825. PDPH_HEAP_ROOT Heap,
  3826. PVOID Block,
  3827. PULONG Reason,
  3828. BOOLEAN CheckPattern
  3829. )
  3830. {
  3831. PDPH_BLOCK_INFORMATION Info;
  3832. BOOLEAN Corrupted = FALSE;
  3833. PUCHAR Current;
  3834. PUCHAR FillStart;
  3835. PUCHAR FillEnd;
  3836. DEBUG_ASSERT (Reason != NULL);
  3837. *Reason = 0;
  3838. try {
  3839. Info = (PDPH_BLOCK_INFORMATION)Block - 1;
  3840. //
  3841. // Start checking ...
  3842. //
  3843. if (Info->StartStamp != DPH_PAGE_BLOCK_START_STAMP_ALLOCATED) {
  3844. *Reason |= DPH_ERROR_CORRUPTED_START_STAMP;
  3845. Corrupted = TRUE;
  3846. if (Info->StartStamp == DPH_PAGE_BLOCK_START_STAMP_FREE) {
  3847. *Reason |= DPH_ERROR_DOUBLE_FREE;
  3848. }
  3849. }
  3850. if (Info->EndStamp != DPH_PAGE_BLOCK_END_STAMP_ALLOCATED) {
  3851. *Reason |= DPH_ERROR_CORRUPTED_END_STAMP;
  3852. Corrupted = TRUE;
  3853. }
  3854. if (Info->Heap != Heap) {
  3855. *Reason |= DPH_ERROR_CORRUPTED_HEAP_POINTER;
  3856. Corrupted = TRUE;
  3857. }
  3858. //
  3859. // Check the block suffix byte pattern.
  3860. //
  3861. if (CheckPattern) {
  3862. FillStart = (PUCHAR)Block + Info->RequestedSize;
  3863. FillEnd = (PUCHAR)ROUNDUP2((ULONG_PTR)FillStart, PAGE_SIZE);
  3864. for (Current = FillStart; Current < FillEnd; Current++) {
  3865. if (*Current != DPH_PAGE_BLOCK_SUFFIX) {
  3866. *Reason |= DPH_ERROR_CORRUPTED_SUFFIX_PATTERN;
  3867. Corrupted = TRUE;
  3868. break;
  3869. }
  3870. }
  3871. }
  3872. }
  3873. except (EXCEPTION_EXECUTE_HANDLER) {
  3874. *Reason |= DPH_ERROR_RAISED_EXCEPTION;
  3875. Corrupted = TRUE;
  3876. }
  3877. if (Corrupted) {
  3878. return FALSE;
  3879. }
  3880. else {
  3881. return TRUE;
  3882. }
  3883. }
  3884. BOOLEAN
  3885. RtlpDphIsNormalHeapBlock (
  3886. PDPH_HEAP_ROOT Heap,
  3887. PVOID Block,
  3888. PULONG Reason,
  3889. BOOLEAN CheckPattern
  3890. )
  3891. {
  3892. PDPH_BLOCK_INFORMATION Info;
  3893. BOOLEAN Corrupted = FALSE;
  3894. PUCHAR Current;
  3895. PUCHAR FillStart;
  3896. PUCHAR FillEnd;
  3897. DEBUG_ASSERT (Reason != NULL);
  3898. *Reason = 0;
  3899. Info = (PDPH_BLOCK_INFORMATION)Block - 1;
  3900. try {
  3901. if (UNSCRAMBLE_POINTER(Info->Heap) != Heap) {
  3902. *Reason |= DPH_ERROR_CORRUPTED_HEAP_POINTER;
  3903. Corrupted = TRUE;
  3904. }
  3905. if (Info->StartStamp != DPH_NORMAL_BLOCK_START_STAMP_ALLOCATED) {
  3906. *Reason |= DPH_ERROR_CORRUPTED_START_STAMP;
  3907. Corrupted = TRUE;
  3908. if (Info->StartStamp == DPH_NORMAL_BLOCK_START_STAMP_FREE) {
  3909. *Reason |= DPH_ERROR_DOUBLE_FREE;
  3910. }
  3911. }
  3912. if (Info->EndStamp != DPH_NORMAL_BLOCK_END_STAMP_ALLOCATED) {
  3913. *Reason |= DPH_ERROR_CORRUPTED_END_STAMP;
  3914. Corrupted = TRUE;
  3915. }
  3916. //
  3917. // Check the block suffix byte pattern.
  3918. //
  3919. if (CheckPattern) {
  3920. FillStart = (PUCHAR)Block + Info->RequestedSize;
  3921. FillEnd = FillStart + USER_ALIGNMENT;
  3922. for (Current = FillStart; Current < FillEnd; Current++) {
  3923. if (*Current != DPH_NORMAL_BLOCK_SUFFIX) {
  3924. *Reason |= DPH_ERROR_CORRUPTED_SUFFIX_PATTERN;
  3925. Corrupted = TRUE;
  3926. break;
  3927. }
  3928. }
  3929. }
  3930. }
  3931. except (EXCEPTION_EXECUTE_HANDLER) {
  3932. *Reason |= DPH_ERROR_RAISED_EXCEPTION;
  3933. Corrupted = TRUE;
  3934. }
  3935. if (Corrupted) {
  3936. return FALSE;
  3937. }
  3938. else {
  3939. return TRUE;
  3940. }
  3941. }
  3942. BOOLEAN
  3943. RtlpDphIsNormalFreeHeapBlock (
  3944. PVOID Block,
  3945. PULONG Reason,
  3946. BOOLEAN CheckPattern
  3947. )
  3948. {
  3949. PDPH_BLOCK_INFORMATION Info;
  3950. BOOLEAN Corrupted = FALSE;
  3951. PUCHAR Current;
  3952. PUCHAR FillStart;
  3953. PUCHAR FillEnd;
  3954. DEBUG_ASSERT (Reason != NULL);
  3955. *Reason = 0;
  3956. Info = (PDPH_BLOCK_INFORMATION)Block - 1;
  3957. try {
  3958. //
  3959. // If heap pointer is null we will just ignore this field.
  3960. // This can happen during heap destroy operations where
  3961. // the page heap got destroyed but the normal heap is still
  3962. // alive.
  3963. //
  3964. if (Info->StartStamp != DPH_NORMAL_BLOCK_START_STAMP_FREE) {
  3965. *Reason |= DPH_ERROR_CORRUPTED_START_STAMP;
  3966. Corrupted = TRUE;
  3967. }
  3968. if (Info->EndStamp != DPH_NORMAL_BLOCK_END_STAMP_FREE) {
  3969. *Reason |= DPH_ERROR_CORRUPTED_END_STAMP;
  3970. Corrupted = TRUE;
  3971. }
  3972. //
  3973. // Check the block suffix byte pattern.
  3974. //
  3975. if (CheckPattern) {
  3976. FillStart = (PUCHAR)Block + Info->RequestedSize;
  3977. FillEnd = FillStart + USER_ALIGNMENT;
  3978. for (Current = FillStart; Current < FillEnd; Current++) {
  3979. if (*Current != DPH_NORMAL_BLOCK_SUFFIX) {
  3980. *Reason |= DPH_ERROR_CORRUPTED_SUFFIX_PATTERN;
  3981. Corrupted = TRUE;
  3982. break;
  3983. }
  3984. }
  3985. }
  3986. //
  3987. // Check the block infix byte pattern.
  3988. //
  3989. if (CheckPattern) {
  3990. FillStart = (PUCHAR)Block;
  3991. FillEnd = FillStart
  3992. + ((Info->RequestedSize > USER_ALIGNMENT) ? USER_ALIGNMENT : Info->RequestedSize);
  3993. for (Current = FillStart; Current < FillEnd; Current++) {
  3994. if (*Current != DPH_FREE_BLOCK_INFIX) {
  3995. *Reason |= DPH_ERROR_CORRUPTED_INFIX_PATTERN;
  3996. Corrupted = TRUE;
  3997. break;
  3998. }
  3999. }
  4000. }
  4001. }
  4002. except (EXCEPTION_EXECUTE_HANDLER) {
  4003. *Reason |= DPH_ERROR_RAISED_EXCEPTION;
  4004. Corrupted = TRUE;
  4005. }
  4006. if (Corrupted) {
  4007. return FALSE;
  4008. }
  4009. else {
  4010. return TRUE;
  4011. }
  4012. }
  4013. BOOLEAN
  4014. RtlpDphWritePageHeapBlockInformation (
  4015. PDPH_HEAP_ROOT Heap,
  4016. PVOID Block,
  4017. SIZE_T RequestedSize,
  4018. SIZE_T ActualSize
  4019. )
  4020. {
  4021. PDPH_BLOCK_INFORMATION Info;
  4022. PUCHAR FillStart;
  4023. PUCHAR FillEnd;
  4024. ULONG Hash;
  4025. //
  4026. // Size and stamp information
  4027. //
  4028. Info = (PDPH_BLOCK_INFORMATION)Block - 1;
  4029. Info->Heap = Heap;
  4030. Info->RequestedSize = RequestedSize;
  4031. Info->ActualSize = ActualSize;
  4032. Info->StartStamp = DPH_PAGE_BLOCK_START_STAMP_ALLOCATED;
  4033. Info->EndStamp = DPH_PAGE_BLOCK_END_STAMP_ALLOCATED;
  4034. //
  4035. // Fill the block suffix pattern.
  4036. // We fill up to USER_ALIGNMENT bytes.
  4037. //
  4038. FillStart = (PUCHAR)Block + RequestedSize;
  4039. FillEnd = (PUCHAR)ROUNDUP2((ULONG_PTR)FillStart, PAGE_SIZE);
  4040. RtlFillMemory (FillStart, FillEnd - FillStart, DPH_PAGE_BLOCK_SUFFIX);
  4041. //
  4042. // Capture stack trace
  4043. //
  4044. if ((Heap->ExtraFlags & PAGE_HEAP_COLLECT_STACK_TRACES)) {
  4045. Info->StackTrace = RtlpDphLogStackTrace (3);
  4046. }
  4047. else {
  4048. Info->StackTrace = NULL;
  4049. }
  4050. //
  4051. // Call the old logging function (SteveWo's trace database).
  4052. // We do this so that tools that are used for leak detection
  4053. // (e.g. umdh) will work even if page heap is enabled.
  4054. // If the trace database was not created this function will
  4055. // return immediately.
  4056. //
  4057. Info->TraceIndex = RtlLogStackBackTrace ();
  4058. return TRUE;
  4059. }
  4060. BOOLEAN
  4061. RtlpDphWriteNormalHeapBlockInformation (
  4062. PDPH_HEAP_ROOT Heap,
  4063. PVOID Block,
  4064. SIZE_T RequestedSize,
  4065. SIZE_T ActualSize
  4066. )
  4067. {
  4068. PDPH_BLOCK_INFORMATION Info;
  4069. PUCHAR FillStart;
  4070. PUCHAR FillEnd;
  4071. ULONG Hash;
  4072. ULONG Reason;
  4073. Info = (PDPH_BLOCK_INFORMATION)Block - 1;
  4074. //
  4075. // Size and stamp information
  4076. //
  4077. Info->Heap = SCRAMBLE_POINTER(Heap);
  4078. Info->RequestedSize = RequestedSize;
  4079. Info->ActualSize = ActualSize;
  4080. Info->StartStamp = DPH_NORMAL_BLOCK_START_STAMP_ALLOCATED;
  4081. Info->EndStamp = DPH_NORMAL_BLOCK_END_STAMP_ALLOCATED;
  4082. Info->FreeQueue.Blink = NULL;
  4083. Info->FreeQueue.Flink = NULL;
  4084. //
  4085. // Fill the block suffix pattern.
  4086. // We fill only USER_ALIGNMENT bytes.
  4087. //
  4088. FillStart = (PUCHAR)Block + RequestedSize;
  4089. FillEnd = FillStart + USER_ALIGNMENT;
  4090. RtlFillMemory (FillStart, FillEnd - FillStart, DPH_NORMAL_BLOCK_SUFFIX);
  4091. //
  4092. // Capture stack trace
  4093. //
  4094. if ((Heap->ExtraFlags & PAGE_HEAP_COLLECT_STACK_TRACES)) {
  4095. Info->StackTrace = RtlpDphLogStackTrace (4);
  4096. if (Info->StackTrace) {
  4097. RtlTraceDatabaseLock (RtlpDphTraceDatabase);
  4098. ((PRTL_TRACE_BLOCK)(Info->StackTrace))->UserCount += 1;
  4099. ((PRTL_TRACE_BLOCK)(Info->StackTrace))->UserSize += RequestedSize;
  4100. ((PRTL_TRACE_BLOCK)(Info->StackTrace))->UserContext = Heap;
  4101. RtlTraceDatabaseUnlock (RtlpDphTraceDatabase);
  4102. }
  4103. }
  4104. else {
  4105. Info->StackTrace = NULL;
  4106. }
  4107. //
  4108. // Call the old logging function (SteveWo's trace database).
  4109. // We do this so that tools that are used for leak detection
  4110. // (e.g. umdh) will work even if page heap is enabled.
  4111. // If the trace database was not created this function will
  4112. // return immediately.
  4113. //
  4114. Info->TraceIndex = RtlLogStackBackTrace ();
  4115. return TRUE;
  4116. }
  4117. BOOLEAN
  4118. RtlpDphGetBlockSizeFromCorruptedBlock (
  4119. PVOID Block,
  4120. PSIZE_T Size
  4121. )
  4122. //
  4123. // This function gets called from RtlpDphReportCorruptedBlock only.
  4124. // It tries to extract a size for the block when an error is reported.
  4125. // If it cannot get the size it will return false.
  4126. //
  4127. {
  4128. PDPH_BLOCK_INFORMATION Info;
  4129. BOOLEAN Success = FALSE;
  4130. Info = (PDPH_BLOCK_INFORMATION)Block - 1;
  4131. try {
  4132. if (Info->StartStamp == DPH_NORMAL_BLOCK_START_STAMP_FREE
  4133. || Info->StartStamp == DPH_NORMAL_BLOCK_START_STAMP_ALLOCATED
  4134. || Info->StartStamp == DPH_PAGE_BLOCK_START_STAMP_FREE
  4135. || Info->StartStamp == DPH_NORMAL_BLOCK_START_STAMP_ALLOCATED) {
  4136. *Size = Info->RequestedSize;
  4137. Success = TRUE;
  4138. }
  4139. else {
  4140. Success = FALSE;
  4141. }
  4142. }
  4143. except (EXCEPTION_EXECUTE_HANDLER) {
  4144. Success = FALSE;
  4145. }
  4146. return Success;
  4147. }
  4148. /////////////////////////////////////////////////////////////////////
  4149. /////////////////////////////// Normal heap allocation/free functions
  4150. /////////////////////////////////////////////////////////////////////
  4151. PVOID
  4152. RtlpDphNormalHeapAllocate (
  4153. PDPH_HEAP_ROOT Heap,
  4154. ULONG Flags,
  4155. SIZE_T Size
  4156. )
  4157. {
  4158. PVOID Block;
  4159. PDPH_BLOCK_INFORMATION Info;
  4160. ULONG Hash;
  4161. SIZE_T ActualSize;
  4162. SIZE_T RequestedSize;
  4163. ULONG Reason;
  4164. //
  4165. // Reject extreme size requests.
  4166. //
  4167. #if defined(_IA64_)
  4168. if (Size > 0x8000000000000000) {
  4169. #else
  4170. if (Size > 0x80000000) {
  4171. #endif
  4172. VERIFIER_STOP (APPLICATION_VERIFIER_EXTREME_SIZE_REQUEST,
  4173. "extreme size request",
  4174. Heap, "Heap handle",
  4175. Size, "Size requested",
  4176. 0, "",
  4177. 0, "");
  4178. return NULL;
  4179. }
  4180. RequestedSize = Size;
  4181. ActualSize = Size + sizeof(DPH_BLOCK_INFORMATION) + USER_ALIGNMENT;
  4182. //
  4183. // We need to reset the NO_SERIALIZE flag because a free operation can be
  4184. // active in another thread due to free delayed cache trimming. If the
  4185. // allocation operation will raise an exception (e.g. OUT_OF_MEMORY) we are
  4186. // safe to let it go here. It will be caught by the exception handler
  4187. // established in the main page heap entry (RtlpDebugPageHeapAlloc).
  4188. //
  4189. Block = RtlAllocateHeap (
  4190. Heap->NormalHeap,
  4191. Flags & (~HEAP_NO_SERIALIZE),
  4192. ActualSize);
  4193. if (Block == NULL) {
  4194. //
  4195. // (SilviuC): If we have memory pressure we might want
  4196. // to trim the delayed free queues. We do not do this
  4197. // right now because the threshold is kind of small and there
  4198. // are many benefits in keeping this cache around.
  4199. //
  4200. return NULL;
  4201. }
  4202. RtlpDphWriteNormalHeapBlockInformation (
  4203. Heap,
  4204. (PDPH_BLOCK_INFORMATION)Block + 1,
  4205. RequestedSize,
  4206. ActualSize);
  4207. if (! (Flags & HEAP_ZERO_MEMORY)) {
  4208. RtlFillMemory ((PDPH_BLOCK_INFORMATION)Block + 1,
  4209. RequestedSize,
  4210. DPH_NORMAL_BLOCK_INFIX);
  4211. }
  4212. return (PVOID)((PDPH_BLOCK_INFORMATION)Block + 1);
  4213. }
  4214. BOOLEAN
  4215. RtlpDphNormalHeapFree (
  4216. PDPH_HEAP_ROOT Heap,
  4217. ULONG Flags,
  4218. PVOID Block
  4219. )
  4220. {
  4221. PDPH_BLOCK_INFORMATION Info;
  4222. BOOLEAN Success;
  4223. ULONG Reason;
  4224. ULONG Hash;
  4225. SIZE_T TrimSize;
  4226. Info = (PDPH_BLOCK_INFORMATION)Block - 1;
  4227. if (! RtlpDphIsNormalHeapBlock(Heap, Block, &Reason, TRUE)) {
  4228. RtlpDphReportCorruptedBlock (Heap,
  4229. DPH_CONTEXT_NORMAL_PAGE_HEAP_FREE,
  4230. Block,
  4231. Reason);
  4232. return FALSE;
  4233. }
  4234. //
  4235. // Check if there are any orphan critical sections in the block to be freed.
  4236. //
  4237. RtlpCheckForCriticalSectionsInMemoryRange (Block,
  4238. Info->RequestedSize,
  4239. NULL);
  4240. //
  4241. // Save the free stack trace.
  4242. //
  4243. if ((Heap->ExtraFlags & PAGE_HEAP_COLLECT_STACK_TRACES)) {
  4244. if (Info->StackTrace) {
  4245. RtlTraceDatabaseLock (RtlpDphTraceDatabase);
  4246. ((PRTL_TRACE_BLOCK)(Info->StackTrace))->UserCount -= 1;
  4247. ((PRTL_TRACE_BLOCK)(Info->StackTrace))->UserSize -= Info->RequestedSize;
  4248. RtlTraceDatabaseUnlock (RtlpDphTraceDatabase);
  4249. }
  4250. Info->StackTrace = RtlpDphLogStackTrace (3);
  4251. }
  4252. else {
  4253. Info->StackTrace = NULL;
  4254. }
  4255. //
  4256. // Mark the block as freed.
  4257. //
  4258. Info->StartStamp -= 1;
  4259. Info->EndStamp -= 1;
  4260. //
  4261. // Wipe out all the information in the block so that it cannot
  4262. // be used while free. The pattern looks like a kernel pointer
  4263. // and if we are lucky enough the buggy code might use a value
  4264. // from the block as a pointer and instantly access violate.
  4265. //
  4266. RtlFillMemory (Info + 1,
  4267. Info->RequestedSize,
  4268. DPH_FREE_BLOCK_INFIX);
  4269. //
  4270. // Add block to the delayed free queue.
  4271. //
  4272. RtlpDphAddToDelayedFreeQueue (Info);
  4273. //
  4274. // If we are over the threshold we need to really free
  4275. // some of the guys.
  4276. //
  4277. Success = TRUE;
  4278. if (RtlpDphNeedToTrimDelayedFreeQueue(&TrimSize)) {
  4279. RtlpDphTrimDelayedFreeQueue (TrimSize, Flags);
  4280. }
  4281. return Success;
  4282. }
  4283. PVOID
  4284. RtlpDphNormalHeapReAllocate (
  4285. PDPH_HEAP_ROOT Heap,
  4286. ULONG Flags,
  4287. PVOID OldBlock,
  4288. SIZE_T Size
  4289. )
  4290. {
  4291. PVOID Block;
  4292. PDPH_BLOCK_INFORMATION Info;
  4293. ULONG Hash;
  4294. SIZE_T CopySize;
  4295. ULONG Reason;
  4296. //
  4297. // Reject extreme size requests.
  4298. //
  4299. #if defined(_IA64_)
  4300. if (Size > 0x8000000000000000) {
  4301. #else
  4302. if (Size > 0x80000000) {
  4303. #endif
  4304. VERIFIER_STOP (APPLICATION_VERIFIER_EXTREME_SIZE_REQUEST,
  4305. "extreme size request",
  4306. Heap, "Heap handle",
  4307. Size, "Size requested",
  4308. 0, "",
  4309. 0, "");
  4310. return NULL;
  4311. }
  4312. Info = (PDPH_BLOCK_INFORMATION)OldBlock - 1;
  4313. if (! RtlpDphIsNormalHeapBlock(Heap, OldBlock, &Reason, TRUE)) {
  4314. RtlpDphReportCorruptedBlock (Heap,
  4315. DPH_CONTEXT_NORMAL_PAGE_HEAP_REALLOC,
  4316. OldBlock,
  4317. Reason);
  4318. return NULL;
  4319. }
  4320. Block = RtlpDphNormalHeapAllocate (Heap, Flags, Size);
  4321. if (Block == NULL) {
  4322. return NULL;
  4323. }
  4324. //
  4325. // Copy old block stuff into the new block and then
  4326. // free old block.
  4327. //
  4328. if (Size < Info->RequestedSize) {
  4329. CopySize = Size;
  4330. }
  4331. else {
  4332. CopySize = Info->RequestedSize;
  4333. }
  4334. RtlCopyMemory (Block, OldBlock, CopySize);
  4335. //
  4336. // Free the old guy.
  4337. //
  4338. RtlpDphNormalHeapFree (Heap, Flags, OldBlock);
  4339. return Block;
  4340. }
  4341. SIZE_T
  4342. RtlpDphNormalHeapSize (
  4343. PDPH_HEAP_ROOT Heap,
  4344. ULONG Flags,
  4345. PVOID Block
  4346. )
  4347. {
  4348. PDPH_BLOCK_INFORMATION Info;
  4349. SIZE_T Result;
  4350. ULONG Reason;
  4351. Info = (PDPH_BLOCK_INFORMATION)Block - 1;
  4352. if (! RtlpDphIsNormalHeapBlock(Heap, Block, &Reason, FALSE)) {
  4353. //
  4354. // We cannot stop here for a wrong block.
  4355. // The users might use this function to validate
  4356. // if a block belongs to the heap or not. However
  4357. // they should use HeapValidate for that.
  4358. //
  4359. #if DBG
  4360. DbgPrint ("Page heap: warning: HeapSize called with "
  4361. "invalid block @ %p (reason %0X) \n", Block, Reason);
  4362. #endif
  4363. return (SIZE_T)-1;
  4364. }
  4365. Result = RtlSizeHeap (
  4366. Heap->NormalHeap,
  4367. Flags,
  4368. Info);
  4369. if (Result == (SIZE_T)-1) {
  4370. return Result;
  4371. }
  4372. else {
  4373. return Result - sizeof(*Info) - USER_ALIGNMENT;
  4374. }
  4375. }
  4376. BOOLEAN
  4377. RtlpDphNormalHeapSetUserFlags(
  4378. IN PDPH_HEAP_ROOT Heap,
  4379. IN ULONG Flags,
  4380. IN PVOID Address,
  4381. IN ULONG UserFlagsReset,
  4382. IN ULONG UserFlagsSet
  4383. )
  4384. {
  4385. BOOLEAN Success;
  4386. ULONG Reason;
  4387. if (! RtlpDphIsNormalHeapBlock(Heap, Address, &Reason, FALSE)) {
  4388. RtlpDphReportCorruptedBlock (Heap,
  4389. DPH_CONTEXT_NORMAL_PAGE_HEAP_SETFLAGS,
  4390. Address,
  4391. Reason);
  4392. return FALSE;
  4393. }
  4394. Success = RtlSetUserFlagsHeap (
  4395. Heap->NormalHeap,
  4396. Flags,
  4397. (PDPH_BLOCK_INFORMATION)Address - 1,
  4398. UserFlagsReset,
  4399. UserFlagsSet);
  4400. return Success;
  4401. }
  4402. BOOLEAN
  4403. RtlpDphNormalHeapSetUserValue(
  4404. IN PDPH_HEAP_ROOT Heap,
  4405. IN ULONG Flags,
  4406. IN PVOID Address,
  4407. IN PVOID UserValue
  4408. )
  4409. {
  4410. BOOLEAN Success;
  4411. ULONG Reason;
  4412. if (! RtlpDphIsNormalHeapBlock(Heap, Address, &Reason, FALSE)) {
  4413. RtlpDphReportCorruptedBlock (Heap,
  4414. DPH_CONTEXT_NORMAL_PAGE_HEAP_SETVALUE,
  4415. Address,
  4416. Reason);
  4417. return FALSE;
  4418. }
  4419. Success = RtlSetUserValueHeap (
  4420. Heap->NormalHeap,
  4421. Flags,
  4422. (PDPH_BLOCK_INFORMATION)Address - 1,
  4423. UserValue);
  4424. return Success;
  4425. }
  4426. BOOLEAN
  4427. RtlpDphNormalHeapGetUserInfo(
  4428. IN PDPH_HEAP_ROOT Heap,
  4429. IN ULONG Flags,
  4430. IN PVOID Address,
  4431. OUT PVOID* UserValue,
  4432. OUT PULONG UserFlags
  4433. )
  4434. {
  4435. BOOLEAN Success;
  4436. ULONG Reason;
  4437. if (! RtlpDphIsNormalHeapBlock(Heap, Address, &Reason, FALSE)) {
  4438. RtlpDphReportCorruptedBlock (Heap,
  4439. DPH_CONTEXT_NORMAL_PAGE_HEAP_GETINFO,
  4440. Address,
  4441. Reason);
  4442. return FALSE;
  4443. }
  4444. Success = RtlGetUserInfoHeap (
  4445. Heap->NormalHeap,
  4446. Flags,
  4447. (PDPH_BLOCK_INFORMATION)Address - 1,
  4448. UserValue,
  4449. UserFlags);
  4450. return Success;
  4451. }
  4452. BOOLEAN
  4453. RtlpDphNormalHeapValidate(
  4454. IN PDPH_HEAP_ROOT Heap,
  4455. IN ULONG Flags,
  4456. IN PVOID Address
  4457. )
  4458. {
  4459. BOOLEAN Success;
  4460. ULONG Reason;
  4461. if (Address == NULL) {
  4462. //
  4463. // Validation for the whole heap.
  4464. //
  4465. Success = RtlValidateHeap (
  4466. Heap->NormalHeap,
  4467. Flags,
  4468. Address);
  4469. }
  4470. else {
  4471. //
  4472. // Validation for a heap block.
  4473. //
  4474. if (! RtlpDphIsNormalHeapBlock(Heap, Address, &Reason, TRUE)) {
  4475. //
  4476. // We cannot break in this case because the function might indeed
  4477. // be called with invalid block. On checked builds we print a
  4478. // warning just in case the invalid block was not intended.
  4479. //
  4480. #if DBG
  4481. DbgPrint ("Page heap: warning: validate called with "
  4482. "invalid block @ %p (reason %0X) \n", Address, Reason);
  4483. #endif
  4484. return FALSE;
  4485. }
  4486. Success = RtlValidateHeap (
  4487. Heap->NormalHeap,
  4488. Flags,
  4489. (PDPH_BLOCK_INFORMATION)Address - 1);
  4490. }
  4491. return Success;
  4492. }
  4493. /////////////////////////////////////////////////////////////////////
  4494. ////////////////////////////////// Delayed free queue for normal heap
  4495. /////////////////////////////////////////////////////////////////////
  4496. RTL_CRITICAL_SECTION RtlpDphDelayedFreeQueueLock;
  4497. SIZE_T RtlpDphMemoryUsedByDelayedFreeBlocks;
  4498. SIZE_T RtlpDphNumberOfDelayedFreeBlocks;
  4499. LIST_ENTRY RtlpDphDelayedFreeQueue;
  4500. VOID
  4501. RtlpDphInitializeDelayedFreeQueue (
  4502. )
  4503. {
  4504. RtlInitializeCriticalSection (&RtlpDphDelayedFreeQueueLock);
  4505. InitializeListHead (&RtlpDphDelayedFreeQueue);
  4506. RtlpDphMemoryUsedByDelayedFreeBlocks = 0;
  4507. RtlpDphNumberOfDelayedFreeBlocks = 0;
  4508. }
  4509. VOID
  4510. RtlpDphAddToDelayedFreeQueue (
  4511. PDPH_BLOCK_INFORMATION Info
  4512. )
  4513. {
  4514. RtlEnterCriticalSection (&RtlpDphDelayedFreeQueueLock);
  4515. InsertTailList (&(RtlpDphDelayedFreeQueue), &(Info->FreeQueue));
  4516. RtlpDphMemoryUsedByDelayedFreeBlocks += Info->ActualSize;
  4517. RtlpDphNumberOfDelayedFreeBlocks += 1;
  4518. RtlLeaveCriticalSection (&RtlpDphDelayedFreeQueueLock);
  4519. }
  4520. BOOLEAN
  4521. RtlpDphNeedToTrimDelayedFreeQueue (
  4522. PSIZE_T TrimSize
  4523. )
  4524. {
  4525. BOOLEAN Result;
  4526. RtlEnterCriticalSection (&RtlpDphDelayedFreeQueueLock);
  4527. if (RtlpDphMemoryUsedByDelayedFreeBlocks > RtlpDphDelayedFreeCacheSize) {
  4528. *TrimSize = RtlpDphMemoryUsedByDelayedFreeBlocks - RtlpDphDelayedFreeCacheSize;
  4529. if (*TrimSize < PAGE_SIZE) {
  4530. *TrimSize = PAGE_SIZE;
  4531. }
  4532. Result = TRUE;
  4533. }
  4534. else {
  4535. Result = FALSE;
  4536. }
  4537. RtlLeaveCriticalSection (&RtlpDphDelayedFreeQueueLock);
  4538. return Result;
  4539. }
  4540. VOID
  4541. RtlpDphTrimDelayedFreeQueue (
  4542. SIZE_T TrimSize,
  4543. ULONG Flags
  4544. )
  4545. /*++
  4546. Routine Description:
  4547. This routine trims the delayed free queue (global per process).
  4548. If trim size is zero it will trim up to a global threshold
  4549. (RtlpDphDelayedFreeCacheSize) otherwise uses `TrimSize'.
  4550. Note. This function might become a little bit of a bottleneck
  4551. because it is called by every free operation. Because of this
  4552. it is better to always call RtlpDphNeedToTrimDelayedFreeQueue
  4553. first.
  4554. Arguments:
  4555. TrimSize: amount to trim (in bytes). If zero it trims down to
  4556. a global threshold.
  4557. Flags: flags for free operation.
  4558. Return Value:
  4559. None.
  4560. Environment:
  4561. Called from RtlpDphNormalXxx (normal heap management) routines.
  4562. --*/
  4563. {
  4564. ULONG Reason;
  4565. SIZE_T CurrentTrimmed = 0;
  4566. PDPH_BLOCK_INFORMATION QueueBlock;
  4567. PLIST_ENTRY ListEntry;
  4568. RtlEnterCriticalSection (&RtlpDphDelayedFreeQueueLock);
  4569. if (TrimSize == 0) {
  4570. if (RtlpDphMemoryUsedByDelayedFreeBlocks > RtlpDphDelayedFreeCacheSize) {
  4571. TrimSize = RtlpDphMemoryUsedByDelayedFreeBlocks - RtlpDphDelayedFreeCacheSize;
  4572. }
  4573. }
  4574. while (TRUE) {
  4575. //
  4576. // Did we achieve our trimming goal?
  4577. //
  4578. if (CurrentTrimmed >= TrimSize) {
  4579. break;
  4580. }
  4581. //
  4582. // The list can get empty since we remove blocks from it.
  4583. //
  4584. if (IsListEmpty(&RtlpDphDelayedFreeQueue)) {
  4585. break;
  4586. }
  4587. ListEntry = RemoveHeadList (&RtlpDphDelayedFreeQueue);
  4588. QueueBlock = CONTAINING_RECORD (ListEntry, DPH_BLOCK_INFORMATION, FreeQueue);
  4589. if (! RtlpDphIsNormalFreeHeapBlock(QueueBlock + 1, &Reason, TRUE)) {
  4590. RtlpDphReportCorruptedBlock (NULL,
  4591. DPH_CONTEXT_DELAYED_FREE,
  4592. QueueBlock + 1,
  4593. Reason);
  4594. }
  4595. RtlpDphMemoryUsedByDelayedFreeBlocks -= QueueBlock->ActualSize;
  4596. RtlpDphNumberOfDelayedFreeBlocks -= 1;
  4597. CurrentTrimmed += QueueBlock->ActualSize;
  4598. QueueBlock->StartStamp -= 1;
  4599. QueueBlock->EndStamp -= 1;
  4600. //
  4601. // We protect against any mishaps when we call into NT heap. Note that we
  4602. // cannot use the original flags used for free because this free operation
  4603. // may happen in another thread. Plus we do not want unsynchronized access
  4604. // anyway.
  4605. //
  4606. try {
  4607. RtlFreeHeap (((PDPH_HEAP_ROOT)(UNSCRAMBLE_POINTER(QueueBlock->Heap)))->NormalHeap,
  4608. 0,
  4609. QueueBlock);
  4610. }
  4611. except (EXCEPTION_EXECUTE_HANDLER) {
  4612. }
  4613. }
  4614. RtlLeaveCriticalSection (&RtlpDphDelayedFreeQueueLock);
  4615. }
  4616. VOID
  4617. RtlpDphFreeDelayedBlocksFromHeap (
  4618. PVOID PageHeap,
  4619. PVOID NormalHeap
  4620. )
  4621. {
  4622. ULONG Reason;
  4623. PDPH_BLOCK_INFORMATION Block;
  4624. PLIST_ENTRY Current;
  4625. PLIST_ENTRY Next;
  4626. RtlEnterCriticalSection (&RtlpDphDelayedFreeQueueLock);
  4627. for (Current = RtlpDphDelayedFreeQueue.Flink;
  4628. Current != &RtlpDphDelayedFreeQueue;
  4629. Current = Next) {
  4630. Next = Current->Flink;
  4631. Block = CONTAINING_RECORD (Current, DPH_BLOCK_INFORMATION, FreeQueue);
  4632. if (UNSCRAMBLE_POINTER(Block->Heap) != PageHeap) {
  4633. continue;
  4634. }
  4635. //
  4636. // We need to delete this block;
  4637. //
  4638. RemoveEntryList (Current);
  4639. Block = CONTAINING_RECORD (Current, DPH_BLOCK_INFORMATION, FreeQueue);
  4640. //
  4641. // Prevent probing of this field during RtlpDphIsNormalFreeBlock.
  4642. //
  4643. Block->Heap = 0;
  4644. //
  4645. // Check if the block about to be freed was touched.
  4646. //
  4647. if (! RtlpDphIsNormalFreeHeapBlock(Block + 1, &Reason, TRUE)) {
  4648. RtlpDphReportCorruptedBlock (PageHeap,
  4649. DPH_CONTEXT_DELAYED_DESTROY,
  4650. Block + 1,
  4651. Reason);
  4652. }
  4653. RtlpDphMemoryUsedByDelayedFreeBlocks -= Block->ActualSize;
  4654. RtlpDphNumberOfDelayedFreeBlocks -= 1;
  4655. //
  4656. // (SilviuC): ISSUE: Not sure what flags to use here because the flags from the original
  4657. // call have been lost (we do not store them somewhere in the delayed queue).
  4658. // Zero should work though. The safest fix would be to add a new field in
  4659. // DPH_BLOCK_INFORMATION that stores the flags used during the original free
  4660. // and uses them again here.
  4661. //
  4662. Block->StartStamp -= 1;
  4663. Block->EndStamp -= 1;
  4664. //
  4665. // We protect against any mishaps when we call into NT heap. Note that we
  4666. // cannot use the original flags used for free because this free operation
  4667. // may happen in another thread. Plus we do not want unsynchronized access
  4668. // anyway.
  4669. //
  4670. try {
  4671. RtlFreeHeap (NormalHeap,
  4672. 0,
  4673. Block);
  4674. }
  4675. except (EXCEPTION_EXECUTE_HANDLER) {
  4676. }
  4677. }
  4678. RtlLeaveCriticalSection (&RtlpDphDelayedFreeQueueLock);
  4679. }
  4680. /////////////////////////////////////////////////////////////////////
  4681. /////////////////////////////////////////////// Stack trace detection
  4682. /////////////////////////////////////////////////////////////////////
  4683. PRTL_TRACE_BLOCK
  4684. RtlpDphLogStackTrace (
  4685. ULONG FramesToSkip
  4686. )
  4687. {
  4688. PVOID Trace [DPH_MAX_STACK_LENGTH];
  4689. ULONG Hash;
  4690. ULONG Count;
  4691. PRTL_TRACE_BLOCK Block;
  4692. BOOLEAN Result;
  4693. Count = RtlCaptureStackBackTrace (
  4694. 1 + FramesToSkip,
  4695. DPH_MAX_STACK_LENGTH,
  4696. Trace,
  4697. &Hash);
  4698. if (Count == 0 || RtlpDphTraceDatabase == NULL) {
  4699. return NULL;
  4700. }
  4701. Result = RtlTraceDatabaseAdd (
  4702. RtlpDphTraceDatabase,
  4703. Count,
  4704. Trace,
  4705. &Block);
  4706. if (Result == FALSE) {
  4707. return NULL;
  4708. }
  4709. else {
  4710. return Block;
  4711. }
  4712. }
  4713. /////////////////////////////////////////////////////////////////////
  4714. /////////////////////////////////////////////////// Target dlls logic
  4715. /////////////////////////////////////////////////////////////////////
  4716. RTL_CRITICAL_SECTION RtlpDphTargetDllsLock;
  4717. LIST_ENTRY RtlpDphTargetDllsList;
  4718. BOOLEAN RtlpDphTargetDllsInitialized;
  4719. typedef struct _DPH_TARGET_DLL {
  4720. LIST_ENTRY List;
  4721. UNICODE_STRING Name;
  4722. PVOID StartAddress;
  4723. PVOID EndAddress;
  4724. } DPH_TARGET_DLL, * PDPH_TARGET_DLL;
  4725. VOID
  4726. RtlpDphTargetDllsLogicInitialize (
  4727. )
  4728. {
  4729. RtlInitializeCriticalSection (&RtlpDphTargetDllsLock);
  4730. InitializeListHead (&RtlpDphTargetDllsList);
  4731. RtlpDphTargetDllsInitialized = TRUE;
  4732. }
  4733. VOID
  4734. RtlpDphTargetDllsLoadCallBack (
  4735. PUNICODE_STRING Name,
  4736. PVOID Address,
  4737. ULONG Size
  4738. )
  4739. //
  4740. // This function is not called right now but it will get called
  4741. // from \base\ntdll\ldrapi.c whenever a dll gets loaded. This
  4742. // gives page heap the opportunity to update per dll data structures
  4743. // that are not used right now for anything.
  4744. //
  4745. {
  4746. PDPH_TARGET_DLL Descriptor;
  4747. //
  4748. // Get out if we are in some weird condition.
  4749. //
  4750. if (! RtlpDphTargetDllsInitialized) {
  4751. return;
  4752. }
  4753. if (! RtlpDphIsDllTargeted (Name->Buffer)) {
  4754. return;
  4755. }
  4756. Descriptor = RtlAllocateHeap (RtlProcessHeap(), 0, sizeof *Descriptor);
  4757. if (Descriptor == NULL) {
  4758. return;
  4759. }
  4760. if (! RtlCreateUnicodeString (&(Descriptor->Name), Name->Buffer)) {
  4761. RtlFreeHeap (RtlProcessHeap(), 0, Descriptor);
  4762. return;
  4763. }
  4764. Descriptor->StartAddress = Address;
  4765. Descriptor->EndAddress = (PUCHAR)Address + Size;
  4766. RtlEnterCriticalSection (&RtlpDphTargetDllsLock);
  4767. InsertTailList (&(RtlpDphTargetDllsList), &(Descriptor->List));
  4768. RtlLeaveCriticalSection (&RtlpDphTargetDllsLock);
  4769. //
  4770. // Print a message if a target dll has been identified.
  4771. //
  4772. DbgPrint("Page heap: loaded target dll %ws [%p - %p]\n",
  4773. Descriptor->Name.Buffer,
  4774. Descriptor->StartAddress,
  4775. Descriptor->EndAddress);
  4776. }
  4777. const WCHAR *
  4778. RtlpDphIsDllTargeted (
  4779. const WCHAR * Name
  4780. )
  4781. {
  4782. const WCHAR * All;
  4783. ULONG I, J;
  4784. All = RtlpDphTargetDllsUnicode.Buffer;
  4785. for (I = 0; All[I]; I += 1) {
  4786. for (J = 0; All[I+J] && Name[J]; J += 1) {
  4787. if (RtlUpcaseUnicodeChar(All[I+J]) != RtlUpcaseUnicodeChar(Name[J])) {
  4788. break;
  4789. }
  4790. }
  4791. if (Name[J]) {
  4792. continue;
  4793. }
  4794. else {
  4795. // we got to the end of string
  4796. return &(All[I]);
  4797. }
  4798. }
  4799. return NULL;
  4800. }
  4801. /////////////////////////////////////////////////////////////////////
  4802. /////////////////////////////////////////////////// Validation checks
  4803. /////////////////////////////////////////////////////////////////////
  4804. PDPH_HEAP_BLOCK
  4805. RtlpDphSearchBlockInList (
  4806. PDPH_HEAP_BLOCK List,
  4807. PUCHAR Address
  4808. )
  4809. {
  4810. PDPH_HEAP_BLOCK Current;
  4811. for (Current = List; Current; Current = Current->pNextAlloc) {
  4812. if (Current->pVirtualBlock == Address) {
  4813. return Current;
  4814. }
  4815. }
  4816. return NULL;
  4817. }
  4818. PVOID RtlpDphLastValidationStack;
  4819. PVOID RtlpDphCurrentValidationStack;
  4820. VOID
  4821. RtlpDphInternalValidatePageHeap (
  4822. PDPH_HEAP_ROOT Heap,
  4823. PUCHAR ExemptAddress,
  4824. SIZE_T ExemptSize
  4825. )
  4826. {
  4827. PDPH_HEAP_BLOCK Range;
  4828. PDPH_HEAP_BLOCK Node;
  4829. PUCHAR Address;
  4830. BOOLEAN FoundLeak;
  4831. RtlpDphLastValidationStack = RtlpDphCurrentValidationStack;
  4832. RtlpDphCurrentValidationStack = RtlpDphLogStackTrace (0);
  4833. FoundLeak = FALSE;
  4834. for (Range = Heap->pVirtualStorageListHead;
  4835. Range != NULL;
  4836. Range = Range->pNextAlloc) {
  4837. Address = Range->pVirtualBlock;
  4838. while (Address < Range->pVirtualBlock + Range->nVirtualBlockSize) {
  4839. //
  4840. // Ignore DPH_HEAP_ROOT structures.
  4841. //
  4842. if ((Address >= (PUCHAR)Heap - PAGE_SIZE) && (Address < (PUCHAR)Heap + 5 * PAGE_SIZE)) {
  4843. Address += PAGE_SIZE;
  4844. continue;
  4845. }
  4846. //
  4847. // Ignore exempt region (temporarily out of all structures).
  4848. //
  4849. if ((Address >= ExemptAddress) && (Address < ExemptAddress + ExemptSize)) {
  4850. Address += PAGE_SIZE;
  4851. continue;
  4852. }
  4853. Node = RtlpDphSearchBlockInList (Heap->pBusyAllocationListHead, Address);
  4854. if (Node) {
  4855. Address += Node->nVirtualBlockSize;
  4856. continue;
  4857. }
  4858. Node = RtlpDphSearchBlockInList (Heap->pFreeAllocationListHead, Address);
  4859. if (Node) {
  4860. Address += Node->nVirtualBlockSize;
  4861. continue;
  4862. }
  4863. Node = RtlpDphSearchBlockInList (Heap->pAvailableAllocationListHead, Address);
  4864. if (Node) {
  4865. Address += Node->nVirtualBlockSize;
  4866. continue;
  4867. }
  4868. Node = RtlpDphSearchBlockInList (Heap->pNodePoolListHead, Address);
  4869. if (Node) {
  4870. Address += Node->nVirtualBlockSize;
  4871. continue;
  4872. }
  4873. DbgPrint ("Block @ %p has been leaked \n", Address);
  4874. FoundLeak = TRUE;
  4875. Address += PAGE_SIZE;
  4876. }
  4877. }
  4878. if (FoundLeak) {
  4879. DbgPrint ("Page heap: Last stack @ %p, Current stack @ %p \n",
  4880. RtlpDphLastValidationStack,
  4881. RtlpDphCurrentValidationStack);
  4882. DbgBreakPoint ();
  4883. }
  4884. }
  4885. VOID
  4886. RtlpDphValidateInternalLists (
  4887. PDPH_HEAP_ROOT Heap
  4888. )
  4889. /*++
  4890. Routine Description:
  4891. This routine is called to validate the busy and free lists of a page heap
  4892. if /protect bit is enabled. In the wbemstress lab we have seen a corruption
  4893. of the busy list with the start of the busy list pointing towards the end of
  4894. the free list. This is the reason we touch very carefully the nodes that are
  4895. in the busy list.
  4896. --*/
  4897. {
  4898. PDPH_HEAP_BLOCK StartNode;
  4899. PDPH_HEAP_BLOCK EndNode;
  4900. PDPH_HEAP_BLOCK Node;
  4901. ULONG NumberOfBlocks;
  4902. PDPH_BLOCK_INFORMATION Block;
  4903. //
  4904. // Nothing to do if /protect is not enabled.
  4905. //
  4906. if (! (Heap->ExtraFlags & PAGE_HEAP_PROTECT_META_DATA)) {
  4907. return;
  4908. }
  4909. RtlpDphLastValidationStack = RtlpDphCurrentValidationStack;
  4910. RtlpDphCurrentValidationStack = RtlpDphLogStackTrace (0);
  4911. StartNode = Heap->pBusyAllocationListHead;
  4912. EndNode = Heap->pBusyAllocationListTail;
  4913. try {
  4914. //
  4915. // Sanity checks.
  4916. //
  4917. if (Heap->nBusyAllocations == 0) {
  4918. return;
  4919. }
  4920. if (StartNode == NULL || StartNode->pVirtualBlock == NULL) {
  4921. DbgPrint ("Page heap: corruption detected: %u: \n", __LINE__);
  4922. DbgBreakPoint ();
  4923. }
  4924. if (EndNode == NULL || EndNode->pVirtualBlock == NULL) {
  4925. DbgPrint ("Page heap: corruption detected: %u: \n", __LINE__);
  4926. DbgBreakPoint ();
  4927. }
  4928. //
  4929. // First check if StartNode is also in the free list. This was the typical
  4930. // corruption pattern that I have seen in the past.
  4931. //
  4932. if (RtlpDphSearchBlockInList (Heap->pFreeAllocationListHead, StartNode->pVirtualBlock)) {
  4933. DbgPrint ("Page heap: corruption detected: %u: \n", __LINE__);
  4934. DbgPrint ("Corruption detected: %u: \n", __LINE__);
  4935. DbgBreakPoint ();
  4936. }
  4937. //
  4938. // Make sure that we have in the busy list exactly the number of blocks we think
  4939. // we should have.
  4940. //
  4941. NumberOfBlocks = 0;
  4942. for (Node = StartNode; Node != NULL; Node = Node->pNextAlloc) {
  4943. NumberOfBlocks += 1;
  4944. }
  4945. if (NumberOfBlocks != Heap->nBusyAllocations) {
  4946. DbgPrint ("Page heap: corruption detected: %u: \n", __LINE__);
  4947. DbgBreakPoint ();
  4948. }
  4949. //
  4950. // Take all nodes in the busy list and make sure they seem to be allocated, that is
  4951. // they have the required pattern. This is skipped if we have the /backwards option
  4952. // enabled since in this case we do not put magic patterns.
  4953. //
  4954. if (! (Heap->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) {
  4955. for (Node = StartNode; Node != NULL; Node = Node->pNextAlloc) {
  4956. Block = (PDPH_BLOCK_INFORMATION)(Node->pUserAllocation) - 1;
  4957. if (Block->StartStamp != DPH_PAGE_BLOCK_START_STAMP_ALLOCATED) {
  4958. DbgPrint ("Page heap: corruption detected: wrong stamp for node %p \n", Node);
  4959. DbgBreakPoint ();
  4960. }
  4961. }
  4962. }
  4963. }
  4964. except (EXCEPTION_EXECUTE_HANDLER) {
  4965. DbgPrint ("Page heap: corruption detected: exception raised \n");
  4966. DbgBreakPoint ();
  4967. }
  4968. }
  4969. /////////////////////////////////////////////////////////////////////
  4970. /////////////////////////////////////////////// Fault injection logic
  4971. /////////////////////////////////////////////////////////////////////
  4972. BOOLEAN RtlpDphFaultSeedInitialized;
  4973. BOOLEAN RtlpDphFaultProcessEnoughStarted;
  4974. ULONG RtlpDphFaultInjectionDisabled;
  4975. ULONG RtlpDphFaultSeed;
  4976. ULONG RtlpDphFaultSuccessRate;
  4977. ULONG RtlpDphFaultFailureRate;
  4978. #define NO_OF_FAULT_STACKS 128
  4979. PVOID RtlpDphFaultStacks [NO_OF_FAULT_STACKS];
  4980. ULONG RtlpDphFaultStacksIndex;
  4981. #define ENOUGH_TIME ((DWORDLONG)(5 * 1000 * 1000 * 10)) // 5 secs
  4982. LARGE_INTEGER RtlpDphFaultStartTime;
  4983. LARGE_INTEGER RtlpDphFaultCurrentTime;
  4984. BOOLEAN
  4985. RtlpDphShouldFaultInject (
  4986. )
  4987. {
  4988. ULONG Index;
  4989. DWORDLONG Delta;
  4990. if (RtlpDphFaultProbability == 0) {
  4991. return FALSE;
  4992. }
  4993. if (RtlpDphDisableFaults != 0) {
  4994. return FALSE;
  4995. }
  4996. //
  4997. // Make sure we do not fault inject if at least one guy
  4998. // requested our mercy by calling RtlpDphDisableFaultInjection.
  4999. //
  5000. if (InterlockedExchangeAdd (&RtlpDphFaultInjectionDisabled, 1) > 0) {
  5001. InterlockedDecrement (&RtlpDphFaultInjectionDisabled);
  5002. return FALSE;
  5003. }
  5004. else {
  5005. InterlockedDecrement (&RtlpDphFaultInjectionDisabled);
  5006. }
  5007. //
  5008. // Make sure we do not fault while the process is getting
  5009. // initialized. In principle we should deal with these bugs
  5010. // also but it is not really a priority right now.
  5011. //
  5012. if (RtlpDphFaultProcessEnoughStarted == FALSE) {
  5013. if ((DWORDLONG)(RtlpDphFaultStartTime.QuadPart) == 0) {
  5014. NtQuerySystemTime (&RtlpDphFaultStartTime);
  5015. return FALSE;
  5016. }
  5017. else {
  5018. NtQuerySystemTime (&RtlpDphFaultCurrentTime);
  5019. Delta = (DWORDLONG)(RtlpDphFaultCurrentTime.QuadPart)
  5020. - (DWORDLONG)(RtlpDphFaultStartTime.QuadPart);
  5021. if (Delta < ENOUGH_TIME) {
  5022. return FALSE;
  5023. }
  5024. if (Delta <= ((DWORDLONG)RtlpDphFaultTimeOut * 1000 * 1000 * 10)) {
  5025. return FALSE;
  5026. }
  5027. DbgPrint( "Page heap: enabling fault injection for process 0x%X \n",
  5028. HandleToUlong(NtCurrentTeb()->ClientId.UniqueProcess));
  5029. RtlpDphFaultProcessEnoughStarted = TRUE;
  5030. }
  5031. }
  5032. //
  5033. // Initialize the seed if we need to.
  5034. //
  5035. if (RtlpDphFaultSeedInitialized == FALSE) {
  5036. LARGE_INTEGER PerformanceCounter;
  5037. PerformanceCounter.LowPart = 0xABCDDCBA;
  5038. NtQueryPerformanceCounter (
  5039. &PerformanceCounter,
  5040. NULL);
  5041. RtlpDphFaultSeed = PerformanceCounter.LowPart;
  5042. RtlpDphFaultSeedInitialized = TRUE;
  5043. }
  5044. if ((RtlRandom(&RtlpDphFaultSeed) % 10000) < RtlpDphFaultProbability) {
  5045. Index = InterlockedExchangeAdd (&RtlpDphFaultStacksIndex, 1);
  5046. Index &= (NO_OF_FAULT_STACKS - 1);
  5047. RtlpDphFaultStacks[Index] = RtlpDphLogStackTrace (2);
  5048. RtlpDphFaultFailureRate += 1;
  5049. return TRUE;
  5050. }
  5051. else {
  5052. RtlpDphFaultSuccessRate += 1;
  5053. return FALSE;
  5054. }
  5055. }
  5056. ULONG RtlpDphFaultInjectionDisabled;
  5057. VOID
  5058. RtlpDphDisableFaultInjection (
  5059. )
  5060. {
  5061. InterlockedIncrement (&RtlpDphFaultInjectionDisabled);
  5062. }
  5063. VOID
  5064. RtlpDphEnableFaultInjection (
  5065. )
  5066. {
  5067. InterlockedDecrement (&RtlpDphFaultInjectionDisabled);
  5068. }
  5069. /////////////////////////////////////////////////////////////////////
  5070. ////////////////////////////////////////////////////////// Debug code
  5071. /////////////////////////////////////////////////////////////////////
  5072. #if INTERNAL_DEBUG
  5073. PVOID RtlpDphLastCheckTrace [16];
  5074. VOID
  5075. RtlpDphCheckFreeDelayedCache (
  5076. PVOID CheckBlock,
  5077. SIZE_T CheckSize
  5078. )
  5079. {
  5080. ULONG Reason;
  5081. PDPH_BLOCK_INFORMATION Block;
  5082. PLIST_ENTRY Current;
  5083. PLIST_ENTRY Next;
  5084. ULONG Hash;
  5085. if (RtlpDphDelayedFreeQueue.Flink == NULL) {
  5086. return;
  5087. }
  5088. RtlEnterCriticalSection (&RtlpDphDelayedFreeQueueLock);
  5089. for (Current = RtlpDphDelayedFreeQueue.Flink;
  5090. Current != &RtlpDphDelayedFreeQueue;
  5091. Current = Next) {
  5092. Next = Current->Flink;
  5093. if (Current >= (PLIST_ENTRY)CheckBlock &&
  5094. Current < (PLIST_ENTRY)((SIZE_T)CheckBlock + CheckSize)) {
  5095. DbgPrint ("Page heap: block %p contains freed block %p \n", CheckBlock, Current);
  5096. DbgBreakPoint ();
  5097. }
  5098. Block = CONTAINING_RECORD (Current, DPH_BLOCK_INFORMATION, FreeQueue);
  5099. Block->Heap = UNSCRAMBLE_POINTER(Block->Heap);
  5100. //
  5101. // Check if the block about to be freed was touched.
  5102. //
  5103. if (! RtlpDphIsNormalFreeHeapBlock(Block + 1, &Reason, FALSE)) {
  5104. RtlpDphReportCorruptedBlock (NULL,
  5105. DPH_CONTEXT_DELAYED_FREE,
  5106. Block + 1,
  5107. Reason);
  5108. }
  5109. //
  5110. // Check busy bit
  5111. //
  5112. if ((((PHEAP_ENTRY)Block - 1)->Flags & HEAP_ENTRY_BUSY) == 0) {
  5113. DbgPrint ("Page heap: block %p has busy bit reset \n", Block);
  5114. DbgBreakPoint ();
  5115. }
  5116. Block->Heap = SCRAMBLE_POINTER(Block->Heap);
  5117. }
  5118. RtlZeroMemory (RtlpDphLastCheckTrace,
  5119. sizeof RtlpDphLastCheckTrace);
  5120. RtlCaptureStackBackTrace (0,
  5121. 16,
  5122. RtlpDphLastCheckTrace,
  5123. &Hash);
  5124. RtlLeaveCriticalSection (&RtlpDphDelayedFreeQueueLock);
  5125. }
  5126. #endif // #if INTERNAL_DEBUG
  5127. /////////////////////////////////////////////////////////////////////
  5128. ///////////////////////////////////// Page heap global initialization
  5129. /////////////////////////////////////////////////////////////////////
  5130. #if 0
  5131. //
  5132. // It is not worth intercepting exceptions before anybody else. On one hand
  5133. // it is useful to detect if there is code that hides problems behind try/except's
  5134. // but on the other hand the code to detect if it is a legitimate AV or a
  5135. // bad one is fairly complicated and we have chances of getting double exceptions
  5136. // and other weird issues. We better leave all this business to the debugger if
  5137. // for instance it is configured to catch first chance exceptions.
  5138. //
  5139. LONG
  5140. NTAPI
  5141. RtlpDphPageHeapExceptionFilter (
  5142. struct _EXCEPTION_POINTERS * ExnInfo
  5143. )
  5144. {
  5145. PEXCEPTION_RECORD Exn;
  5146. HANDLE CurrentThread;
  5147. Exn = ExnInfo->ExceptionRecord;
  5148. if (Exn->ExceptionCode == STATUS_ACCESS_VIOLATION) {
  5149. if (NtCurrentPeb()->BeingDebugged) {
  5150. if (Exn->NumberParameters > 1) {
  5151. //
  5152. // We skip over AV's in the first 64K. This skips C++ issues
  5153. // where people free(0). This happens in Java VM stuff for instance.
  5154. //
  5155. if (Exn->ExceptionInformation[1] > 0x10000) {
  5156. VERIFIER_STOP (APPLICATION_VERIFIER_ACCESS_VIOLATION
  5157. | APPLICATION_VERIFIER_DO_NOT_BREAK,
  5158. "first chance access violation (address, .exr, .cxr)",
  5159. Exn->ExceptionInformation[1],
  5160. ExnInfo->ExceptionRecord,
  5161. ExnInfo->ContextRecord,
  5162. 0);
  5163. }
  5164. }
  5165. }
  5166. }
  5167. return EXCEPTION_CONTINUE_SEARCH;
  5168. }
  5169. BOOLEAN
  5170. RtlpDphInitializePageHeapPackage (
  5171. )
  5172. {
  5173. //
  5174. // SilviuC: we do not establish a first chance AV filter for now.
  5175. //
  5176. PVOID Handler;
  5177. Handler = RtlAddVectoredExceptionHandler (1, RtlpDphPageHeapExceptionFilter);
  5178. if (Handler == NULL) {
  5179. DbgPrint ("Page heap: failed to establish an exception filter \n");
  5180. return FALSE;
  5181. }
  5182. return TRUE;
  5183. }
  5184. #endif // #if 0
  5185. #endif // DEBUG_PAGE_HEAP
  5186. //
  5187. // End of module
  5188. //