Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

7241 lines
195 KiB

  1. /*++
  2. Copyright (c) 1994-2000 Microsoft Corporation
  3. Module Name:
  4. heappage.c
  5. Abstract:
  6. Implementation of NT RtlHeap family of APIs for debugging
  7. applications with heap usage bugs. Each allocation returned to
  8. the calling app is placed at the end of a virtual page such that
  9. the following virtual page is protected (ie, NO_ACCESS).
  10. So, when the errant app attempts to reference or modify memory
  11. beyond the allocated portion of a heap block, an access violation
  12. is immediately caused. This facilitates debugging the app
  13. because the access violation occurs at the exact point in the
  14. app where the heap corruption or abuse would occur. Note that
  15. significantly more memory (pagefile) is required to run an app
  16. using this heap implementation as opposed to the retail heap
  17. manager.
  18. Author:
  19. Tom McGuire (TomMcg) 06-Jan-1995
  20. Silviu Calinoiu (SilviuC) 22-Feb-2000
  21. Revision History:
  22. --*/
  23. #include "ntrtlp.h"
  24. #include "heappage.h"
  25. #include "heappagi.h"
  26. #include "heappriv.h"
  27. //
  28. // Remainder of entire file is wrapped with #ifdef DEBUG_PAGE_HEAP so that
  29. // it will compile away to nothing if DEBUG_PAGE_HEAP is not defined in
  30. // heappage.h
  31. //
  32. #ifdef DEBUG_PAGE_HEAP
  33. //
  34. // Page size
  35. //
  36. #if defined(_X86_)
  37. #ifndef PAGE_SIZE
  38. #define PAGE_SIZE 0x1000
  39. #endif
  40. #define USER_ALIGNMENT 8
  41. #elif defined(_IA64_)
  42. #ifndef PAGE_SIZE
  43. #define PAGE_SIZE 0x2000
  44. #endif
  45. #define USER_ALIGNMENT 16
  46. #elif defined(_AMD64_)
  47. #ifndef PAGE_SIZE
  48. #define PAGE_SIZE 0x1000
  49. #endif
  50. #define USER_ALIGNMENT 16
  51. #else
  52. #error // platform not defined
  53. #endif
  54. //
  55. // Few constants
  56. //
  57. #define DPH_HEAP_SIGNATURE 0xFFEEDDCC
  58. #define FILL_BYTE 0xEE
  59. #define HEAD_FILL_SIZE 0x10
  60. #define RESERVE_SIZE ((ULONG_PTR)0x100000)
  61. #define VM_UNIT_SIZE ((ULONG_PTR)0x10000)
  62. #define POOL_SIZE ((ULONG_PTR)0x4000)
  63. #define INLINE __inline
  64. #define MIN_FREE_LIST_LENGTH 128
  65. #if defined(_WIN64)
  66. #define EXTREME_SIZE_REQUEST (ULONG_PTR)(0x8000000000000000 - RESERVE_SIZE)
  67. #else
  68. #define EXTREME_SIZE_REQUEST (ULONG_PTR)(0x80000000 - RESERVE_SIZE)
  69. #endif
  70. //
  71. // Functions from stktrace.c to manipulate traces in the trace database.
  72. //
  73. PVOID
  74. RtlpGetStackTraceAddress (
  75. USHORT Index
  76. );
  77. USHORT
  78. RtlpLogStackBackTraceEx(
  79. ULONG FramesToSkip
  80. );
  81. //
  82. // Few macros
  83. //
  84. #define ROUNDUP2( x, n ) ((( x ) + (( n ) - 1 )) & ~(( n ) - 1 ))
  85. #define HEAP_HANDLE_FROM_ROOT( HeapRoot ) \
  86. ((PVOID)(((PCHAR)(HeapRoot)) - PAGE_SIZE ))
  87. #define IF_GENERATE_EXCEPTION( Flags, Status ) { \
  88. if (( Flags ) & HEAP_GENERATE_EXCEPTIONS ) \
  89. RtlpDphRaiseException((ULONG)(Status)); \
  90. }
  91. #define OUT_OF_VM_BREAK( Flags, szText ) { \
  92. if (( Flags ) & HEAP_BREAK_WHEN_OUT_OF_VM ) { \
  93. DbgPrintEx (DPFLTR_VERIFIER_ID, \
  94. DPFLTR_ERROR_LEVEL, \
  95. (szText)); \
  96. DbgBreakPoint (); \
  97. } \
  98. }
  99. #define PROCESS_ID() HandleToUlong(NtCurrentTeb()->ClientId.UniqueProcess)
  100. //
  101. // List manipulation macros
  102. //
  103. #define ENQUEUE_HEAD( Node, Head, Tail ) { \
  104. (Node)->pNextAlloc = (Head); \
  105. if ((Head) == NULL ) \
  106. (Tail) = (Node); \
  107. (Head) = (Node); \
  108. }
  109. #define ENQUEUE_TAIL( Node, Head, Tail ) { \
  110. if ((Tail) == NULL ) \
  111. (Head) = (Node); \
  112. else \
  113. (Tail)->pNextAlloc = (Node); \
  114. (Tail) = (Node); \
  115. }
  116. #define DEQUEUE_NODE( Node, Prev, Head, Tail ) { \
  117. PVOID Next = (Node)->pNextAlloc; \
  118. if ((Head) == (Node)) \
  119. (Head) = Next; \
  120. if ((Tail) == (Node)) \
  121. (Tail) = (Prev); \
  122. if ((Prev) != (NULL)) \
  123. (Prev)->pNextAlloc = Next; \
  124. }
  125. //
  126. // Bias/unbias pointer
  127. //
  128. #define BIAS_POINTER(p) ((PVOID)((ULONG_PTR)(p) | (ULONG_PTR)0x01))
  129. #define UNBIAS_POINTER(p) ((PVOID)((ULONG_PTR)(p) & ~((ULONG_PTR)0x01)))
  130. #define IS_BIASED_POINTER(p) ((PVOID)((ULONG_PTR)(p) & (ULONG_PTR)0x01))
  131. //
  132. // Scramble/unscramble
  133. //
  134. // We scramble heap pointers in the header blocks in order to make them
  135. // look as kernel pointers and cause an AV if used. This is not totally
  136. // accurate on IA64 but still likely to cause an AV.
  137. //
  138. #if defined(_WIN64)
  139. #define SCRAMBLE_VALUE ((ULONG_PTR)0x8000000000000000)
  140. #else
  141. #define SCRAMBLE_VALUE ((ULONG_PTR)0x80000000)
  142. #endif
  143. #define SCRAMBLE_POINTER(P) ((PVOID)((ULONG_PTR)(P) ^ SCRAMBLE_VALUE))
  144. #define UNSCRAMBLE_POINTER(P) ((PVOID)((ULONG_PTR)(P) ^ SCRAMBLE_VALUE))
  145. //
  146. // Protect/Unprotect heap structures macros
  147. //
  148. // The Protect/Unprotect functions are #if zeroed for now because there is
  149. // an issue to be resolved when destroying a heap. At that moment we need
  150. // to modify the global list of heaps and for this we need to touch the
  151. // heap structure for another heap. In order to do this we need to unprotect
  152. // and later protect it and for that we need to acquire the lock of that heap.
  153. // But this is prone to causing deadlocks. Until we will find a smart scheme
  154. // for doing this we will disable the whole /protect feature. Note also that
  155. // the same problem exists in the heap create code path where we have to update
  156. // the global list of heaps too.
  157. //
  158. // The best fix for this would be to move the fwrd/bwrd pointers for the heap
  159. // list from the DPH_HEAP_ROOT structure into the special R/W page that stores
  160. // the heap lock (needs to be always R/W).
  161. //
  162. #define PROTECT_HEAP_STRUCTURES( HeapRoot ) { \
  163. if ((HeapRoot)->HeapFlags & HEAP_PROTECTION_ENABLED ) { \
  164. RtlpDphProtectHeapStructures( (HeapRoot) ); \
  165. } \
  166. } \
  167. #define UNPROTECT_HEAP_STRUCTURES( HeapRoot ) { \
  168. if ((HeapRoot)->HeapFlags & HEAP_PROTECTION_ENABLED ) { \
  169. RtlpDphUnprotectHeapStructures( (HeapRoot) ); \
  170. } \
  171. } \
  172. //
  173. // RtlpDebugPageHeap
  174. //
  175. // Global variable that marks that page heap is enabled. It is set
  176. // in \nt\base\ntdll\ldrinit.c by reading the GlobalFlag registry
  177. // value (system wide or per process one) and checking if the
  178. // FLG_HEAP_PAGE_ALLOCS is set.
  179. //
  180. BOOLEAN RtlpDebugPageHeap;
  181. //
  182. // Per process verifier flags.
  183. //
  184. extern ULONG AVrfpVerifierFlags;
  185. //
  186. // Statistics
  187. //
  188. ULONG RtlpDphCounter [32];
  189. #define BUMP_COUNTER(cnt) InterlockedIncrement((PLONG)(&(RtlpDphCounter[cnt])))
  190. #define CNT_RESERVE_VM_FAILURES 0
  191. #define CNT_COMMIT_VM_FAILURES 1
  192. #define CNT_DECOMMIT_VM_FAILURES 2
  193. #define CNT_RELEASE_VM_FAILURES 3
  194. #define CNT_PROTECT_VM_FAILURES 4
  195. #define CNT_PAGE_HEAP_CREATE_FAILURES 5
  196. #define CNT_NT_HEAP_CREATE_FAILURES 6
  197. #define CNT_INITIALIZE_CS_FAILURES 7
  198. #define CNT_TRACEDB_CREATE_FAILURES 8
  199. #define CNT_TRACE_ADD_FAILURES 9
  200. #define CNT_TRACE_CAPTURE_FAILURES 10
  201. #define CNT_ALLOCS_FILLED 11
  202. #define CNT_ALLOCS_ZEROED 12
  203. #define CNT_HEAP_WALK_CALLS 13
  204. #define CNT_HEAP_GETUSERINFO_CALLS 14
  205. #define CNT_HEAP_SETUSERFLAGS_CALLS 15
  206. #define CNT_HEAP_SETUSERVALUE_CALLS 16
  207. #define CNT_HEAP_SIZE_CALLS 17
  208. #define CNT_HEAP_VALIDATE_CALLS 18
  209. #define CNT_HEAP_GETPROCESSHEAPS_CALLS 19
  210. #define CNT_COALESCE_SUCCESSES 20
  211. #define CNT_COALESCE_FAILURES 21
  212. #define CNT_COALESCE_QUERYVM_FAILURES 22
  213. #define CNT_REALLOC_IN_PLACE_SMALLER 23
  214. #define CNT_REALLOC_IN_PLACE_BIGGER 24
  215. #define CNT_MAX_INDEX 31
  216. //
  217. // Breakpoints for various conditions.
  218. //
  219. ULONG RtlpDphBreakOptions;
  220. #define BRK_ON_RESERVE_VM_FAILURE 0x0001
  221. #define BRK_ON_COMMIT_VM_FAILURE 0x0002
  222. #define BRK_ON_RELEASE_VM_FAILURE 0x0004
  223. #define BRK_ON_DECOMMIT_VM_FAILURE 0x0008
  224. #define BRK_ON_PROTECT_VM_FAILURE 0x0010
  225. #define BRK_ON_QUERY_VM_FAILURE 0x0020
  226. #define BRK_ON_EXTREME_SIZE_REQUEST 0x0040
  227. #define BRK_ON_NULL_FREE 0x0080
  228. #define SHOULD_BREAK(flg) ((RtlpDphBreakOptions & (flg)))
  229. //
  230. // Debug options.
  231. //
  232. ULONG RtlpDphDebugOptions;
  233. #define DBG_INTERNAL_VALIDATION 0x0001
  234. #define DBG_SHOW_VM_LIMITS 0x0002
  235. #define DBG_SHOW_PAGE_CREATE_DESTROY 0x0004
  236. #define DEBUG_OPTION(flg) ((RtlpDphDebugOptions & (flg)))
  237. //
  238. // Page heaps list manipulation.
  239. //
  240. // We maintain a list of all page heaps in the process to support
  241. // APIs like GetProcessHeaps. The list is also useful for debug
  242. // extensions that need to iterate the heaps. The list is protected
  243. // by RtlpDphPageHeapListLock lock.
  244. //
  245. BOOLEAN RtlpDphPageHeapListInitialized;
  246. RTL_CRITICAL_SECTION RtlpDphPageHeapListLock;
  247. ULONG RtlpDphPageHeapListLength;
  248. LIST_ENTRY RtlpDphPageHeapList;
  249. //
  250. // `RtlpDebugPageHeapGlobalFlags' stores the global page heap flags.
  251. // The value of this variable is copied into the per heap
  252. // flags (ExtraFlags field) during heap creation.
  253. //
  254. // The initial value is so that by default we use page heap only with
  255. // normal allocations. This way if system wide global flag for page
  256. // heap is set the machine will still boot. After that we can enable
  257. // page heap with "sudden death" for specific processes. The most useful
  258. // flags for this case would be:
  259. //
  260. // PAGE_HEAP_ENABLE_PAGE_HEAP |
  261. // PAGE_HEAP_COLLECT_STACK_TRACES ;
  262. //
  263. // If no flags specified the default is page heap light with
  264. // stack trace collection.
  265. //
  266. ULONG RtlpDphGlobalFlags = PAGE_HEAP_COLLECT_STACK_TRACES;
  267. //
  268. // Page heap global flags.
  269. //
  270. // These values are read from registry in \nt\base\ntdll\ldrinit.c.
  271. //
  272. ULONG RtlpDphSizeRangeStart;
  273. ULONG RtlpDphSizeRangeEnd;
  274. ULONG RtlpDphDllRangeStart;
  275. ULONG RtlpDphDllRangeEnd;
  276. ULONG RtlpDphRandomProbability;
  277. WCHAR RtlpDphTargetDlls [512];
  278. UNICODE_STRING RtlpDphTargetDllsUnicode;
  279. //
  280. // If not zero controls the probability with which
  281. // allocations will be failed on purpose by page heap
  282. // manager. Timeout represents the initial period during
  283. // process initialization when faults are not allowed.
  284. //
  285. ULONG RtlpDphFaultProbability;
  286. ULONG RtlpDphFaultTimeOut;
  287. //
  288. // This variable offers volatile fault injection.
  289. // It can be set/reset from debugger to disable/enable
  290. // fault injection.
  291. //
  292. ULONG RtlpDphDisableFaults;
  293. //
  294. // Threshold for delaying a free operation in the normal heap.
  295. // If we get over this limit we start actually freeing blocks.
  296. //
  297. SIZE_T RtlpDphDelayedFreeCacheSize = 1024 * PAGE_SIZE;
  298. //
  299. // Support for normal heap allocations
  300. //
  301. // In order to make better use of memory available page heap will
  302. // allocate some of the block into a normal NT heap that it manages.
  303. // We will call these blocks "normal blocks" as opposed to "page blocks".
  304. //
  305. // All normal blocks have the requested size increased by DPH_BLOCK_INFORMATION.
  306. // The address returned is of course of the first byte after the block
  307. // info structure. Upon free, blocks are checked for corruption and
  308. // then released into the normal heap.
  309. //
  310. // All these normal heap functions are called with the page heap
  311. // lock acquired.
  312. //
  313. PVOID
  314. RtlpDphNormalHeapAllocate (
  315. PDPH_HEAP_ROOT Heap,
  316. PVOID NtHeap,
  317. ULONG Flags,
  318. SIZE_T Size
  319. );
  320. BOOLEAN
  321. RtlpDphNormalHeapFree (
  322. PDPH_HEAP_ROOT Heap,
  323. PVOID NtHeap,
  324. ULONG Flags,
  325. PVOID Block
  326. );
  327. PVOID
  328. RtlpDphNormalHeapReAllocate (
  329. PDPH_HEAP_ROOT Heap,
  330. PVOID NtHeap,
  331. ULONG Flags,
  332. PVOID OldBlock,
  333. SIZE_T Size
  334. );
  335. SIZE_T
  336. RtlpDphNormalHeapSize (
  337. PDPH_HEAP_ROOT Heap,
  338. PVOID NtHeap,
  339. ULONG Flags,
  340. PVOID Block
  341. );
  342. BOOLEAN
  343. RtlpDphNormalHeapSetUserFlags(
  344. IN PDPH_HEAP_ROOT Heap,
  345. PVOID NtHeap,
  346. IN ULONG Flags,
  347. IN PVOID Address,
  348. IN ULONG UserFlagsReset,
  349. IN ULONG UserFlagsSet
  350. );
  351. BOOLEAN
  352. RtlpDphNormalHeapSetUserValue(
  353. IN PDPH_HEAP_ROOT Heap,
  354. PVOID NtHeap,
  355. IN ULONG Flags,
  356. IN PVOID Address,
  357. IN PVOID UserValue
  358. );
  359. BOOLEAN
  360. RtlpDphNormalHeapGetUserInfo(
  361. IN PDPH_HEAP_ROOT Heap,
  362. PVOID NtHeap,
  363. IN ULONG Flags,
  364. IN PVOID Address,
  365. OUT PVOID* UserValue,
  366. OUT PULONG UserFlags
  367. );
  368. BOOLEAN
  369. RtlpDphNormalHeapValidate(
  370. IN PDPH_HEAP_ROOT Heap,
  371. PVOID NtHeap,
  372. IN ULONG Flags,
  373. IN PVOID Address
  374. );
  375. //
  376. // Support for DPH_BLOCK_INFORMATION management
  377. //
  378. // This header information prefixes both the normal and page heap
  379. // blocks.
  380. //
  381. #define DPH_CONTEXT_GENERAL 0
  382. #define DPH_CONTEXT_FULL_PAGE_HEAP_FREE 1
  383. #define DPH_CONTEXT_FULL_PAGE_HEAP_REALLOC 2
  384. #define DPH_CONTEXT_FULL_PAGE_HEAP_DESTROY 3
  385. #define DPH_CONTEXT_NORMAL_PAGE_HEAP_FREE 4
  386. #define DPH_CONTEXT_NORMAL_PAGE_HEAP_REALLOC 5
  387. #define DPH_CONTEXT_NORMAL_PAGE_HEAP_SETFLAGS 6
  388. #define DPH_CONTEXT_NORMAL_PAGE_HEAP_SETVALUE 7
  389. #define DPH_CONTEXT_NORMAL_PAGE_HEAP_GETINFO 8
  390. #define DPH_CONTEXT_DELAYED_FREE 9
  391. #define DPH_CONTEXT_DELAYED_DESTROY 10
  392. VOID
  393. RtlpDphReportCorruptedBlock (
  394. PVOID Heap,
  395. ULONG Context,
  396. PVOID Block,
  397. ULONG Reason
  398. );
  399. BOOLEAN
  400. RtlpDphIsNormalHeapBlock (
  401. PDPH_HEAP_ROOT Heap,
  402. PVOID Block,
  403. PULONG Reason,
  404. BOOLEAN CheckPattern
  405. );
  406. BOOLEAN
  407. RtlpDphIsNormalFreeHeapBlock (
  408. PVOID Block,
  409. PULONG Reason,
  410. BOOLEAN CheckPattern
  411. );
  412. BOOLEAN
  413. RtlpDphIsPageHeapBlock (
  414. PDPH_HEAP_ROOT Heap,
  415. PVOID Block,
  416. PULONG Reason,
  417. BOOLEAN CheckPattern
  418. );
  419. BOOLEAN
  420. RtlpDphWriteNormalHeapBlockInformation (
  421. PDPH_HEAP_ROOT Heap,
  422. PVOID Block,
  423. SIZE_T RequestedSize,
  424. SIZE_T ActualSize
  425. );
  426. BOOLEAN
  427. RtlpDphWritePageHeapBlockInformation (
  428. PDPH_HEAP_ROOT Heap,
  429. ULONG HeapFlags,
  430. PVOID Block,
  431. SIZE_T RequestedSize,
  432. SIZE_T ActualSize
  433. );
  434. BOOLEAN
  435. RtlpDphGetBlockSizeFromCorruptedBlock (
  436. PVOID Block,
  437. PSIZE_T Size
  438. );
  439. //
  440. // Delayed free queue (of normal heap allocations) management
  441. //
  442. NTSTATUS
  443. RtlpDphInitializeDelayedFreeQueue (
  444. VOID
  445. );
  446. VOID
  447. RtlpDphAddToDelayedFreeQueue (
  448. PDPH_BLOCK_INFORMATION Info
  449. );
  450. BOOLEAN
  451. RtlpDphNeedToTrimDelayedFreeQueue (
  452. PSIZE_T TrimSize
  453. );
  454. VOID
  455. RtlpDphTrimDelayedFreeQueue (
  456. SIZE_T TrimSize,
  457. ULONG Flags
  458. );
  459. VOID
  460. RtlpDphFreeDelayedBlocksFromHeap (
  461. PVOID PageHeap,
  462. PVOID NormalHeap
  463. );
  464. //
  465. // Decision normal heap vs. page heap
  466. //
  467. BOOLEAN
  468. RtlpDphShouldAllocateInPageHeap (
  469. PDPH_HEAP_ROOT Heap,
  470. SIZE_T Size
  471. );
  472. BOOLEAN
  473. RtlpDphVmLimitCanUsePageHeap (
  474. );
  475. //
  476. // Stack trace detection for trace database.
  477. //
  478. PVOID
  479. RtlpDphLogStackTrace (
  480. ULONG FramesToSkip
  481. );
  482. //
  483. // Page heap general support functions
  484. //
  485. VOID
  486. RtlpDphEnterCriticalSection(
  487. IN PDPH_HEAP_ROOT HeapRoot,
  488. IN ULONG Flags
  489. );
  490. INLINE
  491. VOID
  492. RtlpDphLeaveCriticalSection(
  493. IN PDPH_HEAP_ROOT HeapRoot
  494. );
  495. VOID
  496. RtlpDphRaiseException(
  497. IN ULONG ExceptionCode
  498. );
  499. PVOID
  500. RtlpDphPointerFromHandle(
  501. IN PVOID HeapHandle
  502. );
  503. //
  504. // Virtual memory manipulation functions
  505. //
  506. BOOLEAN
  507. RtlpDebugPageHeapRobustProtectVM(
  508. IN PVOID VirtualBase,
  509. IN SIZE_T VirtualSize,
  510. IN ULONG NewAccess,
  511. IN BOOLEAN Recursion
  512. );
  513. INLINE
  514. BOOLEAN
  515. RtlpDebugPageHeapProtectVM(
  516. IN PVOID VirtualBase,
  517. IN SIZE_T VirtualSize,
  518. IN ULONG NewAccess
  519. );
  520. INLINE
  521. PVOID
  522. RtlpDebugPageHeapAllocateVM(
  523. IN SIZE_T nSize
  524. );
  525. INLINE
  526. BOOLEAN
  527. RtlpDebugPageHeapReleaseVM(
  528. IN PVOID pVirtual
  529. );
  530. INLINE
  531. BOOLEAN
  532. RtlpDebugPageHeapCommitVM(
  533. IN PVOID pVirtual,
  534. IN SIZE_T nSize
  535. );
  536. INLINE
  537. BOOLEAN
  538. RtlpDebugPageHeapDecommitVM(
  539. IN PVOID pVirtual,
  540. IN SIZE_T nSize
  541. );
  542. //
  543. // Target dlls logic
  544. //
  545. // RtlpDphTargetDllsLoadCallBack is called in ntdll\ldrapi.c
  546. // (LdrpLoadDll) whenever a new dll is loaded in the process
  547. // space.
  548. //
  549. NTSTATUS
  550. RtlpDphTargetDllsLogicInitialize (
  551. VOID
  552. );
  553. VOID
  554. RtlpDphTargetDllsLoadCallBack (
  555. PUNICODE_STRING Name,
  556. PVOID Address,
  557. ULONG Size
  558. );
  559. const WCHAR *
  560. RtlpDphIsDllTargeted (
  561. const WCHAR * Name
  562. );
  563. //
  564. // Fault injection logic
  565. //
  566. BOOLEAN
  567. RtlpDphShouldFaultInject (
  568. VOID
  569. );
  570. //
  571. // Internal validation functions.
  572. //
  573. VOID
  574. RtlpDphInternalValidatePageHeap (
  575. PDPH_HEAP_ROOT Heap,
  576. PUCHAR ExemptAddress,
  577. SIZE_T ExemptSize
  578. );
  579. VOID
  580. RtlpDphValidateInternalLists (
  581. PDPH_HEAP_ROOT Heap
  582. );
  583. VOID
  584. RtlpDphCheckFreeDelayedCache (
  585. PVOID CheckBlock,
  586. SIZE_T CheckSize
  587. );
  588. VOID
  589. RtlpDphVerifyIntegrity(
  590. IN PDPH_HEAP_ROOT pHeap
  591. );
  592. VOID
  593. RtlpDphCheckFillPattern (
  594. PUCHAR Address,
  595. SIZE_T Size,
  596. UCHAR Fill
  597. );
  598. //
  599. // Defined in ntdll\verifier.c.
  600. //
  601. VOID
  602. AVrfInternalHeapFreeNotification (
  603. PVOID AllocationBase,
  604. SIZE_T AllocationSize
  605. );
  606. /////////////////////////////////////////////////////////////////////
  607. ///////////////////////////////// Page heap general support functions
  608. /////////////////////////////////////////////////////////////////////
  609. VOID
  610. RtlpDphEnterCriticalSection(
  611. IN PDPH_HEAP_ROOT HeapRoot,
  612. IN ULONG Flags
  613. )
  614. {
  615. if (HeapRoot->FirstThread == NULL) {
  616. HeapRoot->FirstThread = NtCurrentTeb()->ClientId.UniqueThread;
  617. }
  618. if (Flags & HEAP_NO_SERIALIZE) {
  619. //
  620. // If current thread has a different ID than the first thread
  621. // that got into this heap then we break. Avoid this check if
  622. // this allocation comes from Global/Local Heap APIs because
  623. // they lock the heap in a separate call and then they call
  624. // NT heap APIs with no_serialize flag set.
  625. //
  626. // Note. We avoid this check if we do not have the specific flag
  627. // on. This is so because MPheap-like heaps can give false
  628. // positives.
  629. //
  630. if ((HeapRoot->ExtraFlags & PAGE_HEAP_CHECK_NO_SERIALIZE_ACCESS)) {
  631. if (RtlpDphPointerFromHandle(RtlProcessHeap()) != HeapRoot) {
  632. if (HeapRoot->FirstThread != NtCurrentTeb()->ClientId.UniqueThread) {
  633. VERIFIER_STOP (APPLICATION_VERIFIER_UNSYNCHRONIZED_ACCESS,
  634. "multithreaded access in HEAP_NO_SERIALIZE heap",
  635. HeapRoot, "Heap handle",
  636. HeapRoot->FirstThread, "First thread that used the heap",
  637. NtCurrentTeb()->ClientId.UniqueThread, "Current thread using the heap",
  638. 1, "/no_sync option used");
  639. }
  640. }
  641. }
  642. if (! RtlTryEnterCriticalSection( HeapRoot->HeapCritSect )) {
  643. if (HeapRoot->nRemoteLockAcquired == 0) {
  644. //
  645. // Another thread owns the CritSect. This is an application
  646. // bug since multithreaded access to heap was attempted with
  647. // the HEAP_NO_SERIALIZE flag specified.
  648. //
  649. VERIFIER_STOP (APPLICATION_VERIFIER_UNSYNCHRONIZED_ACCESS,
  650. "multithreaded access in HEAP_NO_SERIALIZE heap",
  651. HeapRoot, "Heap handle",
  652. HeapRoot->HeapCritSect->OwningThread, "Thread owning heap lock",
  653. NtCurrentTeb()->ClientId.UniqueThread, "Current thread trying to acquire the heap lock",
  654. 0, "");
  655. //
  656. // In the interest of allowing the errant app to continue,
  657. // we'll force serialization and continue.
  658. //
  659. HeapRoot->HeapFlags &= ~HEAP_NO_SERIALIZE;
  660. }
  661. RtlEnterCriticalSection( HeapRoot->HeapCritSect );
  662. }
  663. }
  664. else {
  665. RtlEnterCriticalSection( HeapRoot->HeapCritSect );
  666. }
  667. }
  668. INLINE
  669. VOID
  670. RtlpDphLeaveCriticalSection(
  671. IN PDPH_HEAP_ROOT HeapRoot
  672. )
  673. {
  674. RtlLeaveCriticalSection( HeapRoot->HeapCritSect );
  675. }
  676. VOID
  677. RtlpDphRaiseException(
  678. IN ULONG ExceptionCode
  679. )
  680. {
  681. EXCEPTION_RECORD ER;
  682. ER.ExceptionCode = ExceptionCode;
  683. ER.ExceptionFlags = 0;
  684. ER.ExceptionRecord = NULL;
  685. ER.ExceptionAddress = RtlpDphRaiseException;
  686. ER.NumberParameters = 0;
  687. RtlRaiseException( &ER );
  688. }
  689. PVOID
  690. RtlpDphPointerFromHandle(
  691. IN PVOID HeapHandle
  692. )
  693. {
  694. try {
  695. if (((PHEAP)(HeapHandle))->ForceFlags & HEAP_FLAG_PAGE_ALLOCS) {
  696. PDPH_HEAP_ROOT HeapRoot = (PVOID)(((PCHAR)(HeapHandle)) + PAGE_SIZE );
  697. if (HeapRoot->Signature == DPH_HEAP_SIGNATURE) {
  698. return HeapRoot;
  699. }
  700. }
  701. }
  702. except( EXCEPTION_EXECUTE_HANDLER ) {
  703. }
  704. VERIFIER_STOP (APPLICATION_VERIFIER_BAD_HEAP_HANDLE,
  705. "heap handle with incorrect signature",
  706. HeapHandle, "Heap handle",
  707. 0, "", 0, "", 0, "");
  708. return NULL;
  709. }
  710. /////////////////////////////////////////////////////////////////////
  711. /////////////////////////////// Virtual memory manipulation functions
  712. /////////////////////////////////////////////////////////////////////
  713. INLINE
  714. NTSTATUS
  715. RtlpDphAllocateVm(
  716. IN PVOID * Address,
  717. IN SIZE_T Size,
  718. IN ULONG Type,
  719. IN ULONG Protection
  720. )
  721. {
  722. NTSTATUS Status;
  723. Status = ZwAllocateVirtualMemory (NtCurrentProcess(),
  724. Address,
  725. 0,
  726. &Size,
  727. Type,
  728. Protection);
  729. if (! NT_SUCCESS(Status)) {
  730. if (Type == MEM_RESERVE) {
  731. BUMP_COUNTER (CNT_RESERVE_VM_FAILURES);
  732. if (SHOULD_BREAK(BRK_ON_RESERVE_VM_FAILURE)) {
  733. DbgPrintEx (DPFLTR_VERIFIER_ID,
  734. DPFLTR_ERROR_LEVEL,
  735. "Page heap: AllocVm (%p, %p, %x) failed with %x \n",
  736. *Address, Size, Type, Status);
  737. DbgBreakPoint ();
  738. }
  739. }
  740. else {
  741. BUMP_COUNTER (CNT_COMMIT_VM_FAILURES);
  742. if (SHOULD_BREAK(BRK_ON_COMMIT_VM_FAILURE)) {
  743. DbgPrintEx (DPFLTR_VERIFIER_ID,
  744. DPFLTR_ERROR_LEVEL,
  745. "Page heap: AllocVm (%p, %p, %x) failed with %x \n",
  746. *Address, Size, Type, Status);
  747. DbgBreakPoint ();
  748. }
  749. }
  750. }
  751. return Status;
  752. }
  753. INLINE
  754. NTSTATUS
  755. RtlpDphFreeVm(
  756. IN PVOID Address,
  757. IN SIZE_T Size,
  758. IN ULONG Type
  759. )
  760. {
  761. NTSTATUS Status;
  762. Status = RtlpHeapFreeVirtualMemory (NtCurrentProcess(),
  763. &Address,
  764. &Size,
  765. Type);
  766. if (! NT_SUCCESS(Status)) {
  767. if (Type == MEM_RELEASE) {
  768. BUMP_COUNTER (CNT_RELEASE_VM_FAILURES);
  769. if (SHOULD_BREAK(BRK_ON_RELEASE_VM_FAILURE)) {
  770. DbgPrintEx (DPFLTR_VERIFIER_ID,
  771. DPFLTR_ERROR_LEVEL,
  772. "Page heap: FreeVm (%p, %p, %x) failed with %x \n",
  773. Address, Size, Type, Status);
  774. DbgBreakPoint();
  775. }
  776. }
  777. else {
  778. BUMP_COUNTER (CNT_DECOMMIT_VM_FAILURES);
  779. if (SHOULD_BREAK(BRK_ON_DECOMMIT_VM_FAILURE)) {
  780. DbgPrintEx (DPFLTR_VERIFIER_ID,
  781. DPFLTR_ERROR_LEVEL,
  782. "Page heap: FreeVm (%p, %p, %x) failed with %x \n",
  783. Address, Size, Type, Status);
  784. DbgBreakPoint();
  785. }
  786. }
  787. }
  788. return Status;
  789. }
  790. INLINE
  791. NTSTATUS
  792. RtlpDphProtectVm (
  793. IN PVOID Address,
  794. IN SIZE_T Size,
  795. IN ULONG NewAccess
  796. )
  797. {
  798. ULONG OldAccess;
  799. NTSTATUS Status;
  800. Status = ZwProtectVirtualMemory (NtCurrentProcess(),
  801. &Address,
  802. &Size,
  803. NewAccess,
  804. &OldAccess);
  805. if (! NT_SUCCESS(Status)) {
  806. BUMP_COUNTER (CNT_PROTECT_VM_FAILURES);
  807. if (SHOULD_BREAK(BRK_ON_PROTECT_VM_FAILURE)) {
  808. DbgPrintEx (DPFLTR_VERIFIER_ID,
  809. DPFLTR_ERROR_LEVEL,
  810. "Page heap: ProtectVm (%p, %p, %x) failed with %x \n",
  811. Address, Size, NewAccess, Status);
  812. DbgBreakPoint();
  813. }
  814. }
  815. return Status;
  816. }
  817. INLINE
  818. NTSTATUS
  819. RtlpDphSetProtectionsBeforeUse (
  820. PDPH_HEAP_ROOT Heap,
  821. PVOID pVirtual,
  822. SIZE_T nBytesAccess
  823. )
  824. {
  825. NTSTATUS Status;
  826. LOGICAL MemoryCommitted;
  827. ULONG Protection;
  828. //
  829. // Set NOACCESS or READONLY protection on the page used to catch
  830. // buffer overruns or underruns.
  831. //
  832. if ((Heap->ExtraFlags & PAGE_HEAP_USE_READONLY)) {
  833. Protection = PAGE_READONLY;
  834. }
  835. else {
  836. Protection = PAGE_NOACCESS;
  837. }
  838. if ((Heap->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) {
  839. Status = RtlpDphProtectVm (pVirtual,
  840. PAGE_SIZE,
  841. Protection);
  842. }
  843. else {
  844. Status = RtlpDphProtectVm ((PUCHAR)pVirtual + nBytesAccess,
  845. PAGE_SIZE,
  846. Protection);
  847. }
  848. return Status;
  849. }
  850. INLINE
  851. NTSTATUS
  852. RtlpDphSetProtectionsAfterUse (
  853. PDPH_HEAP_ROOT Heap,
  854. PDPH_HEAP_BLOCK Node
  855. )
  856. {
  857. NTSTATUS Status;
  858. Status = RtlpDphFreeVm (Node->pVirtualBlock,
  859. Node->nVirtualAccessSize + PAGE_SIZE,
  860. MEM_DECOMMIT);
  861. return Status;
  862. }
  863. /////////////////////////////////////////////////////////////////////
  864. //////////////////////////////////////// Internal page heap functions
  865. /////////////////////////////////////////////////////////////////////
  866. PDPH_HEAP_BLOCK
  867. RtlpDphTakeNodeFromUnusedList(
  868. IN PDPH_HEAP_ROOT pHeap
  869. )
  870. {
  871. PDPH_HEAP_BLOCK pNode = pHeap->pUnusedNodeListHead;
  872. PDPH_HEAP_BLOCK pPrev = NULL;
  873. //
  874. // UnusedNodeList is LIFO with most recent entry at head of list.
  875. //
  876. if (pNode) {
  877. DEQUEUE_NODE( pNode, pPrev, pHeap->pUnusedNodeListHead, pHeap->pUnusedNodeListTail );
  878. pHeap->nUnusedNodes -= 1;
  879. }
  880. return pNode;
  881. }
  882. VOID
  883. RtlpDphReturnNodeToUnusedList(
  884. IN PDPH_HEAP_ROOT pHeap,
  885. IN PDPH_HEAP_BLOCK pNode
  886. )
  887. {
  888. //
  889. // UnusedNodeList is LIFO with most recent entry at head of list.
  890. //
  891. ENQUEUE_HEAD( pNode, pHeap->pUnusedNodeListHead, pHeap->pUnusedNodeListTail );
  892. pHeap->nUnusedNodes += 1;
  893. }
  894. PDPH_HEAP_BLOCK
  895. RtlpDphFindBusyMemory(
  896. IN PDPH_HEAP_ROOT pHeap,
  897. IN PVOID pUserMem,
  898. OUT PDPH_HEAP_BLOCK *pPrevAlloc
  899. )
  900. {
  901. PDPH_HEAP_BLOCK pNode = pHeap->pBusyAllocationListHead;
  902. PDPH_HEAP_BLOCK pPrev = NULL;
  903. while (pNode != NULL) {
  904. if (pNode->pUserAllocation == pUserMem) {
  905. if (pPrevAlloc)
  906. *pPrevAlloc = pPrev;
  907. return pNode;
  908. }
  909. pPrev = pNode;
  910. pNode = pNode->pNextAlloc;
  911. }
  912. return NULL;
  913. }
  914. VOID
  915. RtlpDphRemoveFromAvailableList(
  916. IN PDPH_HEAP_ROOT pHeap,
  917. IN PDPH_HEAP_BLOCK pNode,
  918. IN PDPH_HEAP_BLOCK pPrev
  919. )
  920. {
  921. DEQUEUE_NODE( pNode, pPrev, pHeap->pAvailableAllocationListHead, pHeap->pAvailableAllocationListTail );
  922. pHeap->nAvailableAllocations -= 1;
  923. pHeap->nAvailableAllocationBytesCommitted -= pNode->nVirtualBlockSize;
  924. }
  925. VOID
  926. RtlpDphPlaceOnFreeList(
  927. IN PDPH_HEAP_ROOT pHeap,
  928. IN PDPH_HEAP_BLOCK pAlloc
  929. )
  930. {
  931. //
  932. // FreeAllocationList is stored FIFO to enhance finding
  933. // reference-after-freed bugs by keeping previously freed
  934. // allocations on the free list as long as possible.
  935. //
  936. pAlloc->pNextAlloc = NULL;
  937. ENQUEUE_TAIL( pAlloc, pHeap->pFreeAllocationListHead, pHeap->pFreeAllocationListTail );
  938. pHeap->nFreeAllocations += 1;
  939. pHeap->nFreeAllocationBytesCommitted += pAlloc->nVirtualBlockSize;
  940. }
  941. VOID
  942. RtlpDphRemoveFromFreeList(
  943. IN PDPH_HEAP_ROOT pHeap,
  944. IN PDPH_HEAP_BLOCK pNode,
  945. IN PDPH_HEAP_BLOCK pPrev
  946. )
  947. {
  948. DEQUEUE_NODE( pNode, pPrev, pHeap->pFreeAllocationListHead, pHeap->pFreeAllocationListTail );
  949. pHeap->nFreeAllocations -= 1;
  950. pHeap->nFreeAllocationBytesCommitted -= pNode->nVirtualBlockSize;
  951. pNode->StackTrace = NULL;
  952. }
  953. VOID
  954. RtlpDphPlaceOnVirtualList(
  955. IN PDPH_HEAP_ROOT pHeap,
  956. IN PDPH_HEAP_BLOCK pNode
  957. )
  958. {
  959. //
  960. // VirtualStorageList is LIFO so that releasing VM blocks will
  961. // occur in exact reverse order.
  962. //
  963. ENQUEUE_HEAD( pNode, pHeap->pVirtualStorageListHead, pHeap->pVirtualStorageListTail );
  964. pHeap->nVirtualStorageRanges += 1;
  965. pHeap->nVirtualStorageBytes += pNode->nVirtualBlockSize;
  966. }
  967. VOID
  968. RtlpDphPlaceOnBusyList(
  969. IN PDPH_HEAP_ROOT pHeap,
  970. IN PDPH_HEAP_BLOCK pNode
  971. )
  972. {
  973. //
  974. // BusyAllocationList is LIFO to achieve better temporal locality
  975. // of reference (older allocations are farther down the list).
  976. //
  977. ENQUEUE_HEAD( pNode, pHeap->pBusyAllocationListHead, pHeap->pBusyAllocationListTail );
  978. pHeap->nBusyAllocations += 1;
  979. pHeap->nBusyAllocationBytesCommitted += pNode->nVirtualBlockSize;
  980. pHeap->nBusyAllocationBytesAccessible += pNode->nVirtualAccessSize;
  981. }
  982. VOID
  983. RtlpDphRemoveFromBusyList(
  984. IN PDPH_HEAP_ROOT pHeap,
  985. IN PDPH_HEAP_BLOCK pNode,
  986. IN PDPH_HEAP_BLOCK pPrev
  987. )
  988. {
  989. DEQUEUE_NODE( pNode, pPrev, pHeap->pBusyAllocationListHead, pHeap->pBusyAllocationListTail );
  990. pHeap->nBusyAllocations -= 1;
  991. pHeap->nBusyAllocationBytesCommitted -= pNode->nVirtualBlockSize;
  992. pHeap->nBusyAllocationBytesAccessible -= pNode->nVirtualAccessSize;
  993. }
  994. PDPH_HEAP_BLOCK
  995. RtlpDphSearchAvailableMemoryListForBestFit(
  996. IN PDPH_HEAP_ROOT pHeap,
  997. IN SIZE_T nSize,
  998. OUT PDPH_HEAP_BLOCK *pPrevAvailNode
  999. )
  1000. {
  1001. PDPH_HEAP_BLOCK pAvail;
  1002. PDPH_HEAP_BLOCK pFound;
  1003. PDPH_HEAP_BLOCK pAvailPrev;
  1004. PDPH_HEAP_BLOCK pFoundPrev;
  1005. SIZE_T nAvail;
  1006. SIZE_T nFound;
  1007. LOGICAL FoundSomething;
  1008. FoundSomething = FALSE;
  1009. pFound = NULL;
  1010. pFoundPrev = NULL;
  1011. pAvailPrev = NULL;
  1012. pAvail = pHeap->pAvailableAllocationListHead;
  1013. while (pAvail != NULL) {
  1014. nAvail = pAvail->nVirtualBlockSize;
  1015. if (nAvail >= nSize) {
  1016. //
  1017. // Current block has a size bigger than the request.
  1018. //
  1019. if (nAvail == nSize) {
  1020. //
  1021. // If block matches exactly the size of the request the search
  1022. // will stop. We cannot do better than that.
  1023. //
  1024. nFound = nAvail;
  1025. pFound = pAvail;
  1026. pFoundPrev = pAvailPrev;
  1027. break;
  1028. }
  1029. else if (FoundSomething == FALSE) {
  1030. //
  1031. // We found a first potential block for the request. We make it
  1032. // our first candidate.
  1033. //
  1034. nFound = nAvail;
  1035. pFound = pAvail;
  1036. pFoundPrev = pAvailPrev;
  1037. FoundSomething = TRUE;
  1038. }
  1039. else if (nAvail < nFound){
  1040. //
  1041. // We found a potential block and it is smaller than our best
  1042. // candidate so far. Therefore we make it our new candidate.
  1043. //
  1044. nFound = nAvail;
  1045. pFound = pAvail;
  1046. pFoundPrev = pAvailPrev;
  1047. }
  1048. else {
  1049. //
  1050. // This potential block has a bigger size than our best candidate
  1051. // so we will dismiss it. We are looking for best fit therefore
  1052. // there is nothing to be done on this branch. We will move on
  1053. // to the next block in the list.
  1054. //
  1055. }
  1056. }
  1057. //
  1058. // Move to the next block in the list.
  1059. //
  1060. pAvailPrev = pAvail;
  1061. pAvail = pAvail->pNextAlloc;
  1062. }
  1063. *pPrevAvailNode = pFoundPrev;
  1064. return pFound;
  1065. }
  1066. //
  1067. // Counters for # times coalesce operations got rejected
  1068. // to avoid cross-VAD issues.
  1069. //
  1070. LONG RtlpDphCoalesceStatistics [4];
  1071. #define ALIGN_TO_SIZE(P, Sz) (((ULONG_PTR)(P)) & ~((ULONG_PTR)(Sz) - 1))
  1072. BOOLEAN
  1073. RtlpDphSameVirtualRegion (
  1074. IN PDPH_HEAP_BLOCK Left,
  1075. IN PDPH_HEAP_BLOCK Right
  1076. )
  1077. /*++
  1078. Routine description:
  1079. This function tries to figure out if two nodes are part of the
  1080. same VAD. The function is used during coalescing in order to avoid
  1081. merging together blocks from different VADs. If we do not do this
  1082. we will break applications that do GDI calls.
  1083. SilviuC: this can be done differently if we keep the VAD address in
  1084. every node and make sure to propagate the value when nodes get split.
  1085. Then this function will just be a comparison of the two values.
  1086. --*/
  1087. {
  1088. PVOID LeftRegion;
  1089. MEMORY_BASIC_INFORMATION MemoryInfo;
  1090. NTSTATUS Status;
  1091. SIZE_T ReturnLength;
  1092. //
  1093. // If blocks are in the same 64K chunk we are okay.
  1094. //
  1095. if (ALIGN_TO_SIZE(Left->pVirtualBlock, VM_UNIT_SIZE)
  1096. == ALIGN_TO_SIZE(Right->pVirtualBlock, VM_UNIT_SIZE)) {
  1097. InterlockedIncrement (&(RtlpDphCoalesceStatistics[2]));
  1098. return TRUE;
  1099. }
  1100. //
  1101. // Call query() to find out what is the start address of the
  1102. // VAD for each node.
  1103. //
  1104. Status = ZwQueryVirtualMemory (NtCurrentProcess(),
  1105. Left->pVirtualBlock,
  1106. MemoryBasicInformation,
  1107. &MemoryInfo,
  1108. sizeof MemoryInfo,
  1109. &ReturnLength);
  1110. if (! NT_SUCCESS(Status)) {
  1111. InterlockedIncrement (&(RtlpDphCoalesceStatistics[3]));
  1112. return FALSE;
  1113. }
  1114. LeftRegion = MemoryInfo.AllocationBase;
  1115. Status = ZwQueryVirtualMemory (NtCurrentProcess(),
  1116. Right->pVirtualBlock,
  1117. MemoryBasicInformation,
  1118. &MemoryInfo,
  1119. sizeof MemoryInfo,
  1120. &ReturnLength);
  1121. if (! NT_SUCCESS(Status)) {
  1122. if (SHOULD_BREAK (BRK_ON_QUERY_VM_FAILURE)) {
  1123. DbgPrintEx (DPFLTR_VERIFIER_ID,
  1124. DPFLTR_ERROR_LEVEL,
  1125. "Page heap: QueryVm (%p) failed with %x \n",
  1126. Right->pVirtualBlock, Status);
  1127. DbgBreakPoint ();
  1128. }
  1129. BUMP_COUNTER (CNT_COALESCE_QUERYVM_FAILURES);
  1130. return FALSE;
  1131. }
  1132. if (LeftRegion == MemoryInfo.AllocationBase) {
  1133. BUMP_COUNTER (CNT_COALESCE_SUCCESSES);
  1134. return TRUE;
  1135. }
  1136. else {
  1137. BUMP_COUNTER (CNT_COALESCE_FAILURES);
  1138. return FALSE;
  1139. }
  1140. }
  1141. VOID
  1142. RtlpDphCoalesceNodeIntoAvailable(
  1143. IN PDPH_HEAP_ROOT pHeap,
  1144. IN PDPH_HEAP_BLOCK pNode
  1145. )
  1146. {
  1147. PDPH_HEAP_BLOCK pPrev;
  1148. PDPH_HEAP_BLOCK pNext;
  1149. PUCHAR pVirtual;
  1150. SIZE_T nVirtual;
  1151. pPrev = NULL;
  1152. pNext = pHeap->pAvailableAllocationListHead;
  1153. pVirtual = pNode->pVirtualBlock;
  1154. nVirtual = pNode->nVirtualBlockSize;
  1155. pHeap->nAvailableAllocationBytesCommitted += nVirtual;
  1156. pHeap->nAvailableAllocations += 1;
  1157. //
  1158. // Walk list to insertion point.
  1159. //
  1160. while (( pNext ) && ( pNext->pVirtualBlock < pVirtual )) {
  1161. pPrev = pNext;
  1162. pNext = pNext->pNextAlloc;
  1163. }
  1164. if (pPrev) {
  1165. if (((pPrev->pVirtualBlock + pPrev->nVirtualBlockSize) == pVirtual) &&
  1166. RtlpDphSameVirtualRegion (pPrev, pNode)) {
  1167. //
  1168. // pPrev and pNode are adjacent, so simply add size of
  1169. // pNode entry to pPrev entry.
  1170. //
  1171. pPrev->nVirtualBlockSize += nVirtual;
  1172. RtlpDphReturnNodeToUnusedList( pHeap, pNode );
  1173. pHeap->nAvailableAllocations--;
  1174. pNode = pPrev;
  1175. pVirtual = pPrev->pVirtualBlock;
  1176. nVirtual = pPrev->nVirtualBlockSize;
  1177. }
  1178. else {
  1179. //
  1180. // pPrev and pNode are not adjacent, so insert the pNode
  1181. // block into the list after pPrev.
  1182. //
  1183. pNode->pNextAlloc = pPrev->pNextAlloc;
  1184. pPrev->pNextAlloc = pNode;
  1185. }
  1186. }
  1187. else {
  1188. //
  1189. // pNode should be inserted at head of list.
  1190. //
  1191. pNode->pNextAlloc = pHeap->pAvailableAllocationListHead;
  1192. pHeap->pAvailableAllocationListHead = pNode;
  1193. }
  1194. if (pNext) {
  1195. if (((pVirtual + nVirtual) == pNext->pVirtualBlock) &&
  1196. RtlpDphSameVirtualRegion (pNode, pNext)) {
  1197. //
  1198. // pNode and pNext are adjacent, so simply add size of
  1199. // pNext entry to pNode entry and remove pNext entry
  1200. // from the list.
  1201. //
  1202. pNode->nVirtualBlockSize += pNext->nVirtualBlockSize;
  1203. pNode->pNextAlloc = pNext->pNextAlloc;
  1204. if (pHeap->pAvailableAllocationListTail == pNext) {
  1205. pHeap->pAvailableAllocationListTail = pNode;
  1206. }
  1207. RtlpDphReturnNodeToUnusedList( pHeap, pNext );
  1208. pHeap->nAvailableAllocations--;
  1209. }
  1210. }
  1211. else {
  1212. //
  1213. // pNode is tail of list.
  1214. //
  1215. pHeap->pAvailableAllocationListTail = pNode;
  1216. }
  1217. }
  1218. VOID
  1219. RtlpDphCoalesceFreeIntoAvailable(
  1220. IN PDPH_HEAP_ROOT pHeap,
  1221. IN ULONG nLeaveOnFreeList
  1222. )
  1223. {
  1224. PDPH_HEAP_BLOCK pNode = pHeap->pFreeAllocationListHead;
  1225. SIZE_T nFree = pHeap->nFreeAllocations;
  1226. PDPH_HEAP_BLOCK pNext;
  1227. ASSERT( nFree >= nLeaveOnFreeList );
  1228. while (( pNode ) && ( nFree-- > nLeaveOnFreeList )) {
  1229. pNext = pNode->pNextAlloc; // preserve next pointer across shuffling
  1230. RtlpDphRemoveFromFreeList( pHeap, pNode, NULL );
  1231. RtlpDphCoalesceNodeIntoAvailable( pHeap, pNode );
  1232. pNode = pNext;
  1233. }
  1234. ASSERT ((nFree = (SIZE_T)( pHeap->nFreeAllocations )) >= nLeaveOnFreeList );
  1235. ASSERT ((pNode != NULL ) || ( nFree == 0 ));
  1236. }
  1237. // forward
  1238. BOOLEAN
  1239. RtlpDphGrowVirtual(
  1240. IN PDPH_HEAP_ROOT pHeap,
  1241. IN SIZE_T nSize
  1242. );
  1243. PDPH_HEAP_BLOCK
  1244. RtlpDphFindAvailableMemory(
  1245. IN PDPH_HEAP_ROOT pHeap,
  1246. IN SIZE_T nSize,
  1247. OUT PDPH_HEAP_BLOCK *pPrevAvailNode,
  1248. IN BOOLEAN bGrowVirtual
  1249. )
  1250. {
  1251. PDPH_HEAP_BLOCK pAvail;
  1252. ULONG nLeaveOnFreeList;
  1253. NTSTATUS Status;
  1254. //
  1255. // First search existing AvailableList for a "best-fit" block
  1256. // (the smallest block that will satisfy the request).
  1257. //
  1258. pAvail = RtlpDphSearchAvailableMemoryListForBestFit(
  1259. pHeap,
  1260. nSize,
  1261. pPrevAvailNode
  1262. );
  1263. while (( pAvail == NULL ) && ( pHeap->nFreeAllocations > MIN_FREE_LIST_LENGTH )) {
  1264. //
  1265. // Failed to find sufficient memory on AvailableList. Coalesce
  1266. // 3/4 of the FreeList memory to the AvailableList and try again.
  1267. // Continue this until we have sufficient memory in AvailableList,
  1268. // or the FreeList length is reduced to MIN_FREE_LIST_LENGTH entries.
  1269. // We don't shrink the FreeList length below MIN_FREE_LIST_LENGTH
  1270. // entries to preserve the most recent MIN_FREE_LIST_LENGTH entries
  1271. // for reference-after-freed purposes.
  1272. //
  1273. nLeaveOnFreeList = 3 * pHeap->nFreeAllocations / 4;
  1274. if (nLeaveOnFreeList < MIN_FREE_LIST_LENGTH) {
  1275. nLeaveOnFreeList = MIN_FREE_LIST_LENGTH;
  1276. }
  1277. RtlpDphCoalesceFreeIntoAvailable( pHeap, nLeaveOnFreeList );
  1278. pAvail = RtlpDphSearchAvailableMemoryListForBestFit(
  1279. pHeap,
  1280. nSize,
  1281. pPrevAvailNode
  1282. );
  1283. }
  1284. if (( pAvail == NULL ) && ( bGrowVirtual )) {
  1285. //
  1286. // After coalescing FreeList into AvailableList, still don't have
  1287. // enough memory (large enough block) to satisfy request, so we
  1288. // need to allocate more VM.
  1289. //
  1290. if (RtlpDphGrowVirtual( pHeap, nSize )) {
  1291. pAvail = RtlpDphSearchAvailableMemoryListForBestFit(
  1292. pHeap,
  1293. nSize,
  1294. pPrevAvailNode
  1295. );
  1296. if (pAvail == NULL) {
  1297. //
  1298. // Failed to satisfy request with more VM. If remainder
  1299. // of free list combined with available list is larger
  1300. // than the request, we might still be able to satisfy
  1301. // the request by merging all of the free list onto the
  1302. // available list. Note we lose our MIN_FREE_LIST_LENGTH
  1303. // reference-after-freed insurance in this case, but it
  1304. // is a rare case, and we'd prefer to satisfy the allocation.
  1305. //
  1306. if (( pHeap->nFreeAllocationBytesCommitted +
  1307. pHeap->nAvailableAllocationBytesCommitted ) >= nSize) {
  1308. RtlpDphCoalesceFreeIntoAvailable( pHeap, 0 );
  1309. pAvail = RtlpDphSearchAvailableMemoryListForBestFit(
  1310. pHeap,
  1311. nSize,
  1312. pPrevAvailNode
  1313. );
  1314. }
  1315. }
  1316. }
  1317. }
  1318. //
  1319. // We need to commit the memory range now for the node descriptor
  1320. // we just found. The memory will be committed and
  1321. // the protection on it will be RW.
  1322. //
  1323. if (pAvail) {
  1324. // ISSUE
  1325. // (SilviuC): The memory here might be already committed if we use
  1326. // it for the first time. Whenever we allocate virtual memory to grow
  1327. // the heap we commit it. This is the reason the consumption does not
  1328. // decrease as spectacular as we expected. We will need to fix it in
  1329. // the future. It affects 0x43 flags.
  1330. //
  1331. Status = RtlpDphAllocateVm (&(pAvail->pVirtualBlock),
  1332. nSize,
  1333. MEM_COMMIT,
  1334. HEAP_PROTECTION);
  1335. if (! NT_SUCCESS(Status)) {
  1336. //
  1337. // We did not manage to commit memory for this block. This
  1338. // can happen in low memory conditions. We will return null.
  1339. // There is no need to do anything with the node we obtained.
  1340. // It is already in the Available list where it should be anyway.
  1341. return NULL;
  1342. }
  1343. }
  1344. return pAvail;
  1345. }
  1346. VOID
  1347. RtlpDphPlaceOnPoolList(
  1348. IN PDPH_HEAP_ROOT pHeap,
  1349. IN PDPH_HEAP_BLOCK pNode
  1350. )
  1351. {
  1352. //
  1353. // NodePoolList is FIFO.
  1354. //
  1355. pNode->pNextAlloc = NULL;
  1356. ENQUEUE_TAIL( pNode, pHeap->pNodePoolListHead, pHeap->pNodePoolListTail );
  1357. pHeap->nNodePoolBytes += pNode->nVirtualBlockSize;
  1358. pHeap->nNodePools += 1;
  1359. }
  1360. VOID
  1361. RtlpDphAddNewPool(
  1362. IN PDPH_HEAP_ROOT pHeap,
  1363. IN PVOID pVirtual,
  1364. IN SIZE_T nSize,
  1365. IN BOOLEAN bAddToPoolList
  1366. )
  1367. {
  1368. PDPH_HEAP_BLOCK pNode, pFirst;
  1369. ULONG n, nCount;
  1370. //
  1371. // Assume pVirtual points to committed block of nSize bytes.
  1372. //
  1373. pFirst = pVirtual;
  1374. nCount = (ULONG)(nSize / sizeof( DPH_HEAP_BLOCK ));
  1375. for (n = nCount - 1, pNode = pFirst; n > 0; pNode++, n--) {
  1376. pNode->pNextAlloc = pNode + 1;
  1377. }
  1378. pNode->pNextAlloc = NULL;
  1379. //
  1380. // Now link this list into the tail of the UnusedNodeList
  1381. //
  1382. ENQUEUE_TAIL( pFirst, pHeap->pUnusedNodeListHead, pHeap->pUnusedNodeListTail );
  1383. pHeap->pUnusedNodeListTail = pNode;
  1384. pHeap->nUnusedNodes += nCount;
  1385. if (bAddToPoolList) {
  1386. //
  1387. // Now add an entry on the PoolList by taking a node from the
  1388. // UnusedNodeList, which should be guaranteed to be non-empty
  1389. // since we just added new nodes to it.
  1390. //
  1391. pNode = RtlpDphTakeNodeFromUnusedList( pHeap );
  1392. ASSERT( pNode != NULL );
  1393. pNode->pVirtualBlock = pVirtual;
  1394. pNode->nVirtualBlockSize = nSize;
  1395. RtlpDphPlaceOnPoolList( pHeap, pNode );
  1396. }
  1397. }
  1398. PDPH_HEAP_BLOCK
  1399. RtlpDphAllocateNode(
  1400. IN PDPH_HEAP_ROOT pHeap
  1401. )
  1402. {
  1403. PDPH_HEAP_BLOCK pNode, pPrev, pReturn;
  1404. PUCHAR pVirtual;
  1405. SIZE_T nVirtual;
  1406. SIZE_T nRequest;
  1407. NTSTATUS Status;
  1408. pReturn = NULL;
  1409. if (pHeap->pUnusedNodeListHead == NULL) {
  1410. //
  1411. // We're out of nodes -- allocate new node pool
  1412. // from AvailableList. Set bGrowVirtual to FALSE
  1413. // since growing virtual will require new nodes, causing
  1414. // recursion. Note that simply calling FindAvailableMem
  1415. // might return some nodes to the pUnusedNodeList, even if
  1416. // the call fails, so we'll check that the UnusedNodeList
  1417. // is still empty before we try to use or allocate more
  1418. // memory.
  1419. //
  1420. nRequest = POOL_SIZE;
  1421. pNode = RtlpDphFindAvailableMemory(
  1422. pHeap,
  1423. nRequest,
  1424. &pPrev,
  1425. FALSE
  1426. );
  1427. if (( pHeap->pUnusedNodeListHead == NULL ) && ( pNode == NULL )) {
  1428. //
  1429. // Reduce request size to PAGE_SIZE and see if
  1430. // we can find at least a page on the available
  1431. // list.
  1432. //
  1433. nRequest = PAGE_SIZE;
  1434. pNode = RtlpDphFindAvailableMemory(
  1435. pHeap,
  1436. nRequest,
  1437. &pPrev,
  1438. FALSE
  1439. );
  1440. }
  1441. if (pHeap->pUnusedNodeListHead == NULL) {
  1442. if (pNode == NULL) {
  1443. //
  1444. // Insufficient memory on Available list. Try allocating a
  1445. // new virtual block.
  1446. //
  1447. nRequest = POOL_SIZE;
  1448. nVirtual = RESERVE_SIZE;
  1449. pVirtual = NULL;
  1450. Status = RtlpDphAllocateVm (&pVirtual,
  1451. nVirtual,
  1452. MEM_RESERVE,
  1453. PAGE_NOACCESS);
  1454. if (! NT_SUCCESS(Status)) {
  1455. //
  1456. // We are out of virtual space.
  1457. //
  1458. goto EXIT;
  1459. }
  1460. }
  1461. else {
  1462. RtlpDphRemoveFromAvailableList( pHeap, pNode, pPrev );
  1463. pVirtual = pNode->pVirtualBlock;
  1464. nVirtual = pNode->nVirtualBlockSize;
  1465. }
  1466. //
  1467. // We now have allocated VM referenced by pVirtual,nVirtual.
  1468. // Make nRequest portion of VM accessible for new node pool.
  1469. //
  1470. Status = RtlpDphAllocateVm (&pVirtual,
  1471. nRequest,
  1472. MEM_COMMIT,
  1473. HEAP_PROTECTION);
  1474. if (! NT_SUCCESS(Status)) {
  1475. if (pNode == NULL) {
  1476. RtlpDphFreeVm (pVirtual,
  1477. 0,
  1478. MEM_RELEASE);
  1479. }
  1480. else {
  1481. RtlpDphCoalesceNodeIntoAvailable( pHeap, pNode );
  1482. }
  1483. goto EXIT;
  1484. }
  1485. //
  1486. // Now we have accessible memory for new pool. Add the
  1487. // new memory to the pool. If the new memory came from
  1488. // AvailableList versus fresh VM, zero the memory first.
  1489. //
  1490. if (pNode != NULL) {
  1491. RtlZeroMemory( pVirtual, nRequest );
  1492. }
  1493. RtlpDphAddNewPool( pHeap, pVirtual, nRequest, TRUE );
  1494. //
  1495. // If any memory remaining, put it on available list.
  1496. //
  1497. if (pNode == NULL) {
  1498. //
  1499. // Memory came from new VM -- add appropriate list entries
  1500. // for new VM and add remainder of VM to free list.
  1501. //
  1502. pNode = RtlpDphTakeNodeFromUnusedList( pHeap );
  1503. ASSERT( pNode != NULL );
  1504. pNode->pVirtualBlock = pVirtual;
  1505. pNode->nVirtualBlockSize = nVirtual;
  1506. RtlpDphPlaceOnVirtualList( pHeap, pNode );
  1507. pNode = RtlpDphTakeNodeFromUnusedList( pHeap );
  1508. ASSERT( pNode != NULL );
  1509. pNode->pVirtualBlock = pVirtual + nRequest;
  1510. pNode->nVirtualBlockSize = nVirtual - nRequest;
  1511. RtlpDphCoalesceNodeIntoAvailable( pHeap, pNode );
  1512. }
  1513. else {
  1514. if (pNode->nVirtualBlockSize > nRequest) {
  1515. pNode->pVirtualBlock += nRequest;
  1516. pNode->nVirtualBlockSize -= nRequest;
  1517. RtlpDphCoalesceNodeIntoAvailable( pHeap, pNode );
  1518. }
  1519. else {
  1520. //
  1521. // Used up entire available block -- return node to
  1522. // unused list.
  1523. //
  1524. RtlpDphReturnNodeToUnusedList( pHeap, pNode );
  1525. }
  1526. }
  1527. }
  1528. }
  1529. pReturn = RtlpDphTakeNodeFromUnusedList( pHeap );
  1530. ASSERT( pReturn != NULL );
  1531. EXIT:
  1532. return pReturn;
  1533. }
  1534. BOOLEAN
  1535. RtlpDphGrowVirtual(
  1536. IN PDPH_HEAP_ROOT pHeap,
  1537. IN SIZE_T nSize
  1538. )
  1539. {
  1540. PDPH_HEAP_BLOCK pVirtualNode;
  1541. PDPH_HEAP_BLOCK pAvailNode;
  1542. PVOID pVirtual;
  1543. SIZE_T nVirtual;
  1544. NTSTATUS Status;
  1545. pVirtualNode = RtlpDphAllocateNode( pHeap );
  1546. if (pVirtualNode == NULL) {
  1547. return FALSE;
  1548. }
  1549. pAvailNode = RtlpDphAllocateNode( pHeap );
  1550. if (pAvailNode == NULL) {
  1551. RtlpDphReturnNodeToUnusedList( pHeap, pVirtualNode );
  1552. return FALSE;
  1553. }
  1554. nSize = ROUNDUP2( nSize, VM_UNIT_SIZE );
  1555. nVirtual = ( nSize > RESERVE_SIZE ) ? nSize : RESERVE_SIZE;
  1556. pVirtual = NULL;
  1557. Status = RtlpDphAllocateVm (&pVirtual,
  1558. nVirtual,
  1559. MEM_RESERVE,
  1560. PAGE_NOACCESS);
  1561. if (! NT_SUCCESS(Status)) {
  1562. RtlpDphReturnNodeToUnusedList( pHeap, pVirtualNode );
  1563. RtlpDphReturnNodeToUnusedList( pHeap, pAvailNode );
  1564. return FALSE;
  1565. }
  1566. pVirtualNode->pVirtualBlock = pVirtual;
  1567. pVirtualNode->nVirtualBlockSize = nVirtual;
  1568. RtlpDphPlaceOnVirtualList( pHeap, pVirtualNode );
  1569. pAvailNode->pVirtualBlock = pVirtual;
  1570. pAvailNode->nVirtualBlockSize = nVirtual;
  1571. RtlpDphCoalesceNodeIntoAvailable( pHeap, pAvailNode );
  1572. return TRUE;
  1573. }
  1574. VOID
  1575. RtlpDphProtectHeapStructures(
  1576. IN PDPH_HEAP_ROOT pHeap
  1577. )
  1578. {
  1579. #if 0
  1580. PDPH_HEAP_BLOCK pNode;
  1581. //
  1582. // Assume CritSect is owned so we're the only thread twiddling
  1583. // the protection.
  1584. //
  1585. ASSERT( pHeap->HeapFlags & HEAP_PROTECTION_ENABLED );
  1586. if (--pHeap->nUnProtectionReferenceCount == 0) {
  1587. pNode = pHeap->pNodePoolListHead;
  1588. while (pNode != NULL) {
  1589. RtlpDebugPageHeapProtectVM( pNode->pVirtualBlock,
  1590. pNode->nVirtualBlockSize,
  1591. PAGE_READONLY );
  1592. pNode = pNode->pNextAlloc;
  1593. }
  1594. }
  1595. //
  1596. // Protect the main NT heap structure associated with page heap.
  1597. // Nobody should touch this outside of page heap code paths.
  1598. //
  1599. RtlpDebugPageHeapProtectVM (pHeap->NormalHeap,
  1600. PAGE_SIZE,
  1601. PAGE_READONLY);
  1602. #endif
  1603. }
  1604. VOID
  1605. RtlpDphUnprotectHeapStructures(
  1606. IN PDPH_HEAP_ROOT pHeap
  1607. )
  1608. {
  1609. #if 0
  1610. PDPH_HEAP_BLOCK pNode;
  1611. ASSERT( pHeap->HeapFlags & HEAP_PROTECTION_ENABLED );
  1612. if (pHeap->nUnProtectionReferenceCount == 0) {
  1613. pNode = pHeap->pNodePoolListHead;
  1614. while (pNode != NULL) {
  1615. RtlpDebugPageHeapProtectVM( pNode->pVirtualBlock,
  1616. pNode->nVirtualBlockSize,
  1617. HEAP_PROTECTION );
  1618. pNode = pNode->pNextAlloc;
  1619. }
  1620. }
  1621. //
  1622. // Unprotect the main NT heap structure associatied with page heap.
  1623. //
  1624. RtlpDebugPageHeapProtectVM (pHeap->NormalHeap,
  1625. PAGE_SIZE,
  1626. HEAP_PROTECTION);
  1627. pHeap->nUnProtectionReferenceCount += 1;
  1628. #endif
  1629. }
  1630. VOID
  1631. RtlpDphPreProcessing (
  1632. PDPH_HEAP_ROOT Heap,
  1633. ULONG Flags
  1634. )
  1635. {
  1636. RtlpDphEnterCriticalSection (Heap, Flags);
  1637. if (DEBUG_OPTION (DBG_INTERNAL_VALIDATION)) {
  1638. RtlpDphVerifyIntegrity (Heap);
  1639. }
  1640. UNPROTECT_HEAP_STRUCTURES (Heap);
  1641. if (DEBUG_OPTION (DBG_INTERNAL_VALIDATION)) {
  1642. RtlpDphValidateInternalLists (Heap);
  1643. }
  1644. }
  1645. VOID
  1646. RtlpDphPostProcessing (
  1647. PDPH_HEAP_ROOT Heap
  1648. )
  1649. {
  1650. //
  1651. // If an exception is raised during HeapDestroy this function
  1652. // gets called with a null heap pointer. For this case the
  1653. // function is a no op.
  1654. //
  1655. if (Heap == NULL) {
  1656. return;
  1657. }
  1658. if (DEBUG_OPTION (DBG_INTERNAL_VALIDATION)) {
  1659. RtlpDphValidateInternalLists (Heap);
  1660. }
  1661. PROTECT_HEAP_STRUCTURES (Heap);
  1662. if (DEBUG_OPTION (DBG_INTERNAL_VALIDATION)) {
  1663. RtlpDphVerifyIntegrity (Heap);
  1664. }
  1665. RtlpDphLeaveCriticalSection (Heap);
  1666. }
  1667. /////////////////////////////////////////////////////////////////////
  1668. //////////////////////////////////////////////// Exception management
  1669. /////////////////////////////////////////////////////////////////////
  1670. #define EXN_STACK_OVERFLOW 0
  1671. #define EXN_NO_MEMORY 1
  1672. #define EXN_ACCESS_VIOLATION 2
  1673. #define EXN_IGNORE_AV 3
  1674. #define EXN_OTHER 4
  1675. ULONG RtlpDphException[8];
  1676. ULONG
  1677. RtlpDphUnexpectedExceptionFilter (
  1678. ULONG ExceptionCode,
  1679. PVOID ExceptionRecord,
  1680. PDPH_HEAP_ROOT Heap,
  1681. BOOLEAN IgnoreAccessViolations
  1682. )
  1683. /*++
  1684. Routine Description:
  1685. This routine is the exception filter used by page heap operations. The role
  1686. of the function is to bring the page heap in a consistent state (unlock
  1687. heap lock, protect page heap metadata, etc.) if an exception has been raised.
  1688. The exception can be raised for legitimate reasons (e.g. STATUS_NO_MEMORY
  1689. from HeapAlloc()) or because there is some sort of corruption.
  1690. Legitimate exceptions do not cause breaks but an unrecognized exception will
  1691. cause a break. The break is continuable at least with respect to page heap.
  1692. Arguments:
  1693. ExceptionCode - exception code
  1694. ExceptionRecord - structure with pointers to .exr and .cxr
  1695. Heap - heap in which code was executing at the time of exception
  1696. IgnoreAccessViolations - sometimes we want to ignore this (e.g. HeapSize).
  1697. Return Value:
  1698. Always EXCEPTION_CONTINUE_SEARCH. The philosophy of this exception filter
  1699. function is that if we get an exception we bring back page heap in a consistent
  1700. state and then let the exception go to the next exception handler.
  1701. Environment:
  1702. Called within page heap APIs if an exception is raised.
  1703. --*/
  1704. {
  1705. if (ExceptionCode == STATUS_NO_MEMORY) {
  1706. //
  1707. // Underlying NT heap functions can legitimately raise this
  1708. // exception.
  1709. //
  1710. InterlockedIncrement (&(RtlpDphException[EXN_NO_MEMORY]));
  1711. }
  1712. else if (Heap != NULL && ExceptionCode == STATUS_STACK_OVERFLOW) {
  1713. //
  1714. // We go to the next exception handler for stack overflows.
  1715. //
  1716. InterlockedIncrement (&(RtlpDphException[EXN_STACK_OVERFLOW]));
  1717. }
  1718. else if (ExceptionCode == STATUS_ACCESS_VIOLATION) {
  1719. if (IgnoreAccessViolations == FALSE) {
  1720. VERIFIER_STOP (APPLICATION_VERIFIER_UNEXPECTED_EXCEPTION,
  1721. "unexpected exception raised in heap code path",
  1722. Heap, "Heap handle involved",
  1723. ExceptionCode, "Exception code",
  1724. ExceptionRecord, "Exception record (.exr on 1st word, .cxr on 2nd word)",
  1725. 0, "");
  1726. InterlockedIncrement (&(RtlpDphException[EXN_ACCESS_VIOLATION]));
  1727. }
  1728. else {
  1729. InterlockedIncrement (&(RtlpDphException[EXN_IGNORE_AV]));
  1730. }
  1731. }
  1732. else {
  1733. //
  1734. // Any other exceptions will go to the next exception handler.
  1735. //
  1736. InterlockedIncrement (&(RtlpDphException[EXN_OTHER]));
  1737. }
  1738. RtlpDphPostProcessing (Heap);
  1739. return EXCEPTION_CONTINUE_SEARCH;
  1740. }
  1741. #if DBG
  1742. #define ASSERT_UNEXPECTED_CODE_PATH() ASSERT(0 && "unexpected code path")
  1743. #else
  1744. #define ASSERT_UNEXPECTED_CODE_PATH()
  1745. #endif
  1746. /////////////////////////////////////////////////////////////////////
  1747. ///////////////////////////// Exported page heap management functions
  1748. /////////////////////////////////////////////////////////////////////
  1749. NTSTATUS
  1750. RtlpDphProcessStartupInitialization (
  1751. VOID
  1752. )
  1753. {
  1754. NTSTATUS Status;
  1755. InitializeListHead (&RtlpDphPageHeapList);
  1756. Status = RtlInitializeCriticalSection (&RtlpDphPageHeapListLock);
  1757. if (! NT_SUCCESS(Status)) {
  1758. BUMP_COUNTER (CNT_INITIALIZE_CS_FAILURES);
  1759. return Status;
  1760. }
  1761. Status = RtlpDphInitializeDelayedFreeQueue ();
  1762. if (! NT_SUCCESS(Status)) {
  1763. return Status;
  1764. }
  1765. //
  1766. // Create the Unicode string containing the target dlls.
  1767. // If no target dlls have been specified the string will
  1768. // be initialized with the empty string.
  1769. //
  1770. RtlInitUnicodeString (&RtlpDphTargetDllsUnicode,
  1771. RtlpDphTargetDlls);
  1772. //
  1773. // Initialize the target dlls logic
  1774. //
  1775. Status = RtlpDphTargetDllsLogicInitialize ();
  1776. RtlpDphPageHeapListInitialized = TRUE;
  1777. //
  1778. // The following is not an error message but we want it to be
  1779. // on for almost all situations and the only flag that behaves
  1780. // like this is DPFLTR_ERROR_LEVEL. Since it happens only once per
  1781. // process it is really no big deal in terms of performance.
  1782. //
  1783. DbgPrintEx (DPFLTR_VERIFIER_ID,
  1784. DPFLTR_ERROR_LEVEL,
  1785. "Page heap: pid 0x%X: page heap enabled with flags 0x%X.\n",
  1786. PROCESS_ID(),
  1787. RtlpDphGlobalFlags);
  1788. return Status;
  1789. }
  1790. //
  1791. // Here's where the exported interface functions are defined.
  1792. //
  1793. #pragma optimize("y", off) // disable FPO
  1794. PVOID
  1795. RtlpDebugPageHeapCreate(
  1796. IN ULONG Flags,
  1797. IN PVOID HeapBase OPTIONAL,
  1798. IN SIZE_T ReserveSize OPTIONAL,
  1799. IN SIZE_T CommitSize OPTIONAL,
  1800. IN PVOID Lock OPTIONAL,
  1801. IN PRTL_HEAP_PARAMETERS Parameters OPTIONAL
  1802. )
  1803. {
  1804. SYSTEM_BASIC_INFORMATION SystemInfo;
  1805. PDPH_HEAP_BLOCK Node;
  1806. PDPH_HEAP_ROOT HeapRoot;
  1807. PVOID HeapHandle;
  1808. PUCHAR pVirtual;
  1809. SIZE_T nVirtual;
  1810. SIZE_T Size;
  1811. NTSTATUS Status;
  1812. LARGE_INTEGER PerformanceCounter;
  1813. LOGICAL CreateReadOnlyHeap = FALSE;
  1814. //
  1815. // If `Parameters' is -1 then this is a recursive call to
  1816. // RtlpDebugPageHeapCreate and we will return NULL so that
  1817. // the normal heap manager will create a normal heap.
  1818. // I agree this is a hack but we need this so that we maintain
  1819. // a very loose dependency between the normal and page heap
  1820. // manager.
  1821. //
  1822. if ((SIZE_T)Parameters == (SIZE_T)-1) {
  1823. return NULL;
  1824. }
  1825. //
  1826. // If `Parameters' is -2 we need to create a read only page heap.
  1827. // This happens only inside RPC verifier.
  1828. //
  1829. if ((SIZE_T)Parameters == (SIZE_T)-2) {
  1830. CreateReadOnlyHeap = TRUE;
  1831. }
  1832. //
  1833. // If this is the first heap creation in this process, then we
  1834. // need to initialize the process heap list critical section,
  1835. // the global delayed free queue for normal blocks and the
  1836. // trace database. If this fail we will fail the creation of the
  1837. // initial process heap and therefore the process will fail
  1838. // the startup.
  1839. //
  1840. if (! RtlpDphPageHeapListInitialized) {
  1841. Status = RtlpDphProcessStartupInitialization ();
  1842. if (! NT_SUCCESS(Status)) {
  1843. BUMP_COUNTER (CNT_PAGE_HEAP_CREATE_FAILURES);
  1844. return NULL;
  1845. }
  1846. }
  1847. //
  1848. // We don't handle heaps where HeapBase is already allocated
  1849. // from user or where Lock is provided by user. Code in the
  1850. // NT heap manager prevents this.
  1851. //
  1852. ASSERT (HeapBase == NULL && Lock == NULL);
  1853. if (HeapBase != NULL || Lock != NULL) {
  1854. BUMP_COUNTER (CNT_PAGE_HEAP_CREATE_FAILURES);
  1855. return NULL;
  1856. }
  1857. //
  1858. // Note that we simply ignore ReserveSize, CommitSize, and
  1859. // Parameters as we always have a growable heap with our
  1860. // own thresholds, etc.
  1861. //
  1862. Status = ZwQuerySystemInformation (SystemBasicInformation,
  1863. &SystemInfo,
  1864. sizeof( SystemInfo ),
  1865. NULL);
  1866. if (! NT_SUCCESS(Status)) {
  1867. BUMP_COUNTER (CNT_PAGE_HEAP_CREATE_FAILURES);
  1868. return NULL;
  1869. }
  1870. ASSERT (SystemInfo.PageSize == PAGE_SIZE);
  1871. ASSERT (SystemInfo.AllocationGranularity == VM_UNIT_SIZE);
  1872. ASSERT ((PAGE_SIZE + POOL_SIZE + PAGE_SIZE ) < VM_UNIT_SIZE);
  1873. //
  1874. // Reserve space for the initial chunk of virtual space
  1875. // for this heap.
  1876. //
  1877. nVirtual = RESERVE_SIZE;
  1878. pVirtual = NULL;
  1879. Status = RtlpDphAllocateVm (&pVirtual,
  1880. nVirtual,
  1881. MEM_RESERVE,
  1882. PAGE_NOACCESS);
  1883. if (! NT_SUCCESS(Status)) {
  1884. BUMP_COUNTER (CNT_PAGE_HEAP_CREATE_FAILURES);
  1885. OUT_OF_VM_BREAK (Flags, "Page heap: Insufficient virtual space to create heap\n");
  1886. IF_GENERATE_EXCEPTION (Flags, STATUS_NO_MEMORY);
  1887. return NULL;
  1888. }
  1889. //
  1890. // Commit the portion needed for heap data structures (header, some small
  1891. // initial pool and the page for the heap critical section).
  1892. //
  1893. Status = RtlpDphAllocateVm (&pVirtual,
  1894. PAGE_SIZE + POOL_SIZE + PAGE_SIZE,
  1895. MEM_COMMIT,
  1896. HEAP_PROTECTION);
  1897. if (! NT_SUCCESS(Status)) {
  1898. RtlpDphFreeVm (pVirtual,
  1899. 0,
  1900. MEM_RELEASE);
  1901. BUMP_COUNTER (CNT_PAGE_HEAP_CREATE_FAILURES);
  1902. OUT_OF_VM_BREAK (Flags, "Page heap: Insufficient memory to create heap\n");
  1903. IF_GENERATE_EXCEPTION (Flags, STATUS_NO_MEMORY);
  1904. return NULL;
  1905. }
  1906. //
  1907. // Out of our initial allocation, the initial page is the fake
  1908. // retail HEAP structure. The second page begins our DPH_HEAP
  1909. // structure followed by (POOL_SIZE-sizeof(DPH_HEAP)) bytes for
  1910. // the initial pool. The next page contains out CRIT_SECT
  1911. // variable, which must always be READWRITE. Beyond that, the
  1912. // remainder of the virtual allocation is placed on the available
  1913. // list.
  1914. //
  1915. // |_____|___________________|_____|__ _ _ _ _ _ _ _ _ _ _ _ _ __|
  1916. //
  1917. // ^pVirtual
  1918. //
  1919. // ^FakeRetailHEAP
  1920. //
  1921. // ^HeapRoot
  1922. //
  1923. // ^InitialNodePool
  1924. //
  1925. // ^CRITICAL_SECTION
  1926. //
  1927. // ^AvailableSpace
  1928. //
  1929. //
  1930. //
  1931. // Our DPH_HEAP structure starts at the page following the
  1932. // fake retail HEAP structure pointed to by the "heap handle".
  1933. // For the fake HEAP structure, we'll fill it with 0xEEEEEEEE
  1934. // except for the Heap->Flags and Heap->ForceFlags fields,
  1935. // which we must set to include our HEAP_FLAG_PAGE_ALLOCS flag,
  1936. // and then we'll make the whole page read-only.
  1937. //
  1938. RtlFillMemory (pVirtual, PAGE_SIZE, FILL_BYTE);
  1939. ((PHEAP)pVirtual)->Flags = Flags | HEAP_FLAG_PAGE_ALLOCS;
  1940. ((PHEAP)pVirtual)->ForceFlags = Flags | HEAP_FLAG_PAGE_ALLOCS;
  1941. Status = RtlpDphProtectVm (pVirtual,
  1942. PAGE_SIZE,
  1943. PAGE_READONLY);
  1944. if (! NT_SUCCESS(Status)) {
  1945. RtlpDphFreeVm (pVirtual,
  1946. 0,
  1947. MEM_RELEASE);
  1948. BUMP_COUNTER (CNT_PAGE_HEAP_CREATE_FAILURES);
  1949. IF_GENERATE_EXCEPTION (Flags, STATUS_NO_MEMORY);
  1950. return NULL;
  1951. }
  1952. //
  1953. // Fill up the heap root structure.
  1954. //
  1955. HeapRoot = (PDPH_HEAP_ROOT)(pVirtual + PAGE_SIZE);
  1956. HeapRoot->Signature = DPH_HEAP_SIGNATURE;
  1957. HeapRoot->HeapFlags = Flags;
  1958. HeapRoot->HeapCritSect = (PVOID)((PCHAR)HeapRoot + POOL_SIZE );
  1959. //
  1960. // Copy the page heap global flags into per heap flags.
  1961. //
  1962. HeapRoot->ExtraFlags = RtlpDphGlobalFlags;
  1963. //
  1964. // If we need to create a read-only page heap OR the proper flag.
  1965. //
  1966. if (CreateReadOnlyHeap) {
  1967. HeapRoot->ExtraFlags |= PAGE_HEAP_USE_READONLY;
  1968. }
  1969. //
  1970. // If page heap meta data protection was requested we transfer
  1971. // the bit into the HeapFlags field.
  1972. //
  1973. if ((HeapRoot->ExtraFlags & PAGE_HEAP_PROTECT_META_DATA)) {
  1974. HeapRoot->HeapFlags |= HEAP_PROTECTION_ENABLED;
  1975. }
  1976. //
  1977. // If the PAGE_HEAP_UNALIGNED_ALLOCATIONS bit is set
  1978. // in ExtraFlags we will set the HEAP_NO_ALIGNMENT flag
  1979. // in the HeapFlags. This last bit controls if allocations
  1980. // will be aligned or not. The reason we do this transfer is
  1981. // that ExtraFlags can be set from the registry whereas the
  1982. // normal HeapFlags cannot.
  1983. //
  1984. if ((HeapRoot->ExtraFlags & PAGE_HEAP_UNALIGNED_ALLOCATIONS)) {
  1985. HeapRoot->HeapFlags |= HEAP_NO_ALIGNMENT;
  1986. }
  1987. //
  1988. // Initialize the seed for the random generator used to decide
  1989. // from where should we make allocations if random decision
  1990. // flag is on.
  1991. //
  1992. ZwQueryPerformanceCounter (&PerformanceCounter, NULL);
  1993. HeapRoot->Seed = PerformanceCounter.LowPart;
  1994. //
  1995. // Initialize heap lock.
  1996. //
  1997. Status = RtlInitializeCriticalSection (HeapRoot->HeapCritSect);
  1998. if (! NT_SUCCESS(Status)) {
  1999. RtlpDphFreeVm (pVirtual,
  2000. 0,
  2001. MEM_RELEASE);
  2002. BUMP_COUNTER (CNT_INITIALIZE_CS_FAILURES);
  2003. BUMP_COUNTER (CNT_PAGE_HEAP_CREATE_FAILURES);
  2004. IF_GENERATE_EXCEPTION (Flags, STATUS_NO_MEMORY);
  2005. return NULL;
  2006. }
  2007. //
  2008. // Create the normal heap associated with the page heap.
  2009. // The last parameter value (-1) is very important because
  2010. // it stops the recursive call into page heap create.
  2011. //
  2012. // Note that it is very important to reset the NO_SERIALIZE
  2013. // bit because normal heap operations can happen in random
  2014. // threads when the free delayed cache gets trimmed.
  2015. //
  2016. HeapRoot->NormalHeap = RtlCreateHeap (Flags & (~HEAP_NO_SERIALIZE),
  2017. HeapBase,
  2018. ReserveSize,
  2019. CommitSize,
  2020. Lock,
  2021. (PRTL_HEAP_PARAMETERS)-1);
  2022. if (HeapRoot->NormalHeap == NULL) {
  2023. RtlDeleteCriticalSection (HeapRoot->HeapCritSect);
  2024. RtlpDphFreeVm (pVirtual,
  2025. 0,
  2026. MEM_RELEASE);
  2027. BUMP_COUNTER (CNT_NT_HEAP_CREATE_FAILURES);
  2028. BUMP_COUNTER (CNT_PAGE_HEAP_CREATE_FAILURES);
  2029. IF_GENERATE_EXCEPTION (Flags, STATUS_NO_MEMORY);
  2030. return NULL;
  2031. }
  2032. //
  2033. // On the page that contains our DPH_HEAP structure, use
  2034. // the remaining memory beyond the DPH_HEAP structure as
  2035. // pool for allocating heap nodes.
  2036. //
  2037. RtlpDphAddNewPool (HeapRoot,
  2038. HeapRoot + 1,
  2039. POOL_SIZE - sizeof(DPH_HEAP_ROOT),
  2040. FALSE);
  2041. //
  2042. // Make initial PoolList entry by taking a node from the
  2043. // UnusedNodeList, which should be guaranteed to be non-empty
  2044. // since we just added new nodes to it.
  2045. //
  2046. Node = RtlpDphAllocateNode (HeapRoot);
  2047. ASSERT (Node != NULL);
  2048. Node->pVirtualBlock = (PVOID)HeapRoot;
  2049. Node->nVirtualBlockSize = POOL_SIZE;
  2050. RtlpDphPlaceOnPoolList (HeapRoot, Node);
  2051. //
  2052. // Make VirtualStorageList entry for initial VM allocation
  2053. //
  2054. Node = RtlpDphAllocateNode( HeapRoot );
  2055. ASSERT (Node != NULL);
  2056. Node->pVirtualBlock = pVirtual;
  2057. Node->nVirtualBlockSize = nVirtual;
  2058. RtlpDphPlaceOnVirtualList (HeapRoot, Node);
  2059. //
  2060. // Make AvailableList entry containing remainder of initial VM
  2061. // and add to (create) the AvailableList.
  2062. //
  2063. Node = RtlpDphAllocateNode( HeapRoot );
  2064. ASSERT (Node != NULL);
  2065. Node->pVirtualBlock = pVirtual + (PAGE_SIZE + POOL_SIZE + PAGE_SIZE);
  2066. Node->nVirtualBlockSize = nVirtual - (PAGE_SIZE + POOL_SIZE + PAGE_SIZE);
  2067. RtlpDphCoalesceNodeIntoAvailable (HeapRoot, Node);
  2068. //
  2069. // Get heap creation stack trace.
  2070. //
  2071. HeapRoot->CreateStackTrace = RtlpDphLogStackTrace (1);
  2072. //
  2073. // Add this heap entry to the process heap linked list.
  2074. //
  2075. RtlEnterCriticalSection (&RtlpDphPageHeapListLock);
  2076. InsertTailList (&RtlpDphPageHeapList, &(HeapRoot->NextHeap));
  2077. RtlpDphPageHeapListLength += 1;
  2078. RtlLeaveCriticalSection( &RtlpDphPageHeapListLock );
  2079. if (DEBUG_OPTION (DBG_SHOW_PAGE_CREATE_DESTROY)) {
  2080. DbgPrintEx (DPFLTR_VERIFIER_ID,
  2081. DPFLTR_INFO_LEVEL,
  2082. "Page heap: process 0x%X created heap @ %p (%p, flags 0x%X)\n",
  2083. NtCurrentTeb()->ClientId.UniqueProcess,
  2084. HEAP_HANDLE_FROM_ROOT( HeapRoot ),
  2085. HeapRoot->NormalHeap,
  2086. HeapRoot->ExtraFlags);
  2087. }
  2088. if (DEBUG_OPTION (DBG_INTERNAL_VALIDATION)) {
  2089. RtlpDphInternalValidatePageHeap (HeapRoot, NULL, 0);
  2090. }
  2091. return HEAP_HANDLE_FROM_ROOT (HeapRoot); // same as pVirtual
  2092. }
  2093. #pragma optimize("y", off) // disable FPO
  2094. PVOID
  2095. RtlpDebugPageHeapDestroy(
  2096. IN PVOID HeapHandle
  2097. )
  2098. {
  2099. PDPH_HEAP_ROOT HeapRoot;
  2100. PDPH_HEAP_ROOT PrevHeapRoot;
  2101. PDPH_HEAP_ROOT NextHeapRoot;
  2102. PDPH_HEAP_BLOCK Node;
  2103. PDPH_HEAP_BLOCK Next;
  2104. ULONG Flags;
  2105. PUCHAR p;
  2106. ULONG Reason;
  2107. PVOID NormalHeap;
  2108. if (HeapHandle == RtlProcessHeap()) {
  2109. VERIFIER_STOP (APPLICATION_VERIFIER_DESTROY_PROCESS_HEAP,
  2110. "attempt to destroy process heap",
  2111. HeapHandle, "Process heap handle",
  2112. 0, "", 0, "", 0, "");
  2113. return NULL;
  2114. }
  2115. HeapRoot = RtlpDphPointerFromHandle( HeapHandle );
  2116. if (HeapRoot == NULL) {
  2117. return NULL;
  2118. }
  2119. Flags = HeapRoot->HeapFlags;
  2120. //
  2121. // Get the heap lock, unprotect heap structures, etc.
  2122. //
  2123. RtlpDphPreProcessing (HeapRoot, Flags);
  2124. try {
  2125. //
  2126. // Save normal heap pointer for later.
  2127. //
  2128. NormalHeap = HeapRoot->NormalHeap;
  2129. //
  2130. // Free all blocks in the delayed free queue that belong to the
  2131. // normal heap just about to be destroyed. Note that this is
  2132. // not a bug. The application freed the blocks correctly but
  2133. // we delayed the free operation.
  2134. //
  2135. RtlpDphFreeDelayedBlocksFromHeap (HeapRoot, NormalHeap);
  2136. //
  2137. // Walk all busy allocations and check for tail fill corruption
  2138. //
  2139. Node = HeapRoot->pBusyAllocationListHead;
  2140. while (Node) {
  2141. if (! (HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) {
  2142. if (! (RtlpDphIsPageHeapBlock (HeapRoot, Node->pUserAllocation, &Reason, TRUE))) {
  2143. RtlpDphReportCorruptedBlock (HeapRoot,
  2144. DPH_CONTEXT_FULL_PAGE_HEAP_DESTROY,
  2145. Node->pUserAllocation,
  2146. Reason);
  2147. }
  2148. }
  2149. //
  2150. // Notify the app verifier that this block is about to be freed.
  2151. // This is a good chance to verify if there are any active critical
  2152. // sections about to be leaked in this heap allocation. Unfortunately
  2153. // we cannot do the same check for light page heap blocks due to the
  2154. // loose interaction between page heap and NT heap (we want to keep it
  2155. // this way to avoid compatibility issues).
  2156. //
  2157. AVrfInternalHeapFreeNotification (Node->pUserAllocation,
  2158. Node->nUserRequestedSize);
  2159. //
  2160. // Move to next node.
  2161. //
  2162. Node = Node->pNextAlloc;
  2163. }
  2164. //
  2165. // Remove this heap entry from the process heap linked list.
  2166. //
  2167. RtlEnterCriticalSection( &RtlpDphPageHeapListLock );
  2168. RemoveEntryList (&(HeapRoot->NextHeap));
  2169. RtlpDphPageHeapListLength -= 1;
  2170. RtlLeaveCriticalSection( &RtlpDphPageHeapListLock );
  2171. //
  2172. // Must release critical section before deleting it; otherwise,
  2173. // checked build Teb->CountOfOwnedCriticalSections gets out of sync.
  2174. //
  2175. RtlLeaveCriticalSection( HeapRoot->HeapCritSect );
  2176. RtlDeleteCriticalSection( HeapRoot->HeapCritSect );
  2177. //
  2178. // This is weird. A virtual block might contain storage for
  2179. // one of the nodes necessary to walk this list. In fact,
  2180. // we're guaranteed that the root node contains at least one
  2181. // virtual alloc node.
  2182. //
  2183. // Each time we alloc new VM, we make that the head of the
  2184. // of the VM list, like a LIFO structure. I think we're ok
  2185. // because no VM list node should be on a subsequently alloc'd
  2186. // VM -- only a VM list entry might be on its own memory (as
  2187. // is the case for the root node). We read pNode->pNextAlloc
  2188. // before releasing the VM in case pNode existed on that VM.
  2189. // I think this is safe -- as long as the VM list is LIFO and
  2190. // we don't do any list reorganization.
  2191. //
  2192. Node = HeapRoot->pVirtualStorageListHead;
  2193. while (Node) {
  2194. Next = Node->pNextAlloc;
  2195. //
  2196. // Even if the free will fail we will march forward.
  2197. //
  2198. RtlpDphFreeVm (Node->pVirtualBlock,
  2199. 0,
  2200. MEM_RELEASE);
  2201. Node = Next;
  2202. }
  2203. //
  2204. // Destroy normal heap. Note that this will not make a recursive
  2205. // call into this function because this is not a page heap and
  2206. // code in NT heap manager will detect this.
  2207. //
  2208. RtlDestroyHeap (NormalHeap);
  2209. }
  2210. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  2211. _exception_info(),
  2212. NULL,
  2213. FALSE)) {
  2214. //
  2215. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  2216. //
  2217. ASSERT_UNEXPECTED_CODE_PATH ();
  2218. }
  2219. //
  2220. // That's it. All the VM, including the root node, should now
  2221. // be released. RtlDestroyHeap always returns NULL.
  2222. //
  2223. if (DEBUG_OPTION (DBG_SHOW_PAGE_CREATE_DESTROY)) {
  2224. DbgPrintEx (DPFLTR_VERIFIER_ID,
  2225. DPFLTR_INFO_LEVEL,
  2226. "Page heap: process 0x%X destroyed heap @ %p (%p)\n",
  2227. PROCESS_ID(),
  2228. HeapRoot,
  2229. NormalHeap);
  2230. }
  2231. return NULL;
  2232. }
  2233. PVOID
  2234. RtlpDebugPageHeapAllocate(
  2235. IN PVOID HeapHandle,
  2236. IN ULONG Flags,
  2237. IN SIZE_T Size
  2238. )
  2239. {
  2240. PDPH_HEAP_ROOT HeapRoot;
  2241. PDPH_HEAP_BLOCK pAvailNode;
  2242. PDPH_HEAP_BLOCK pPrevAvailNode;
  2243. PDPH_HEAP_BLOCK pBusyNode;
  2244. PDPH_HEAP_BLOCK PreAllocatedNode = NULL;
  2245. SIZE_T nBytesAllocate;
  2246. SIZE_T nBytesAccess;
  2247. SIZE_T nActual;
  2248. PVOID pVirtual;
  2249. PVOID pReturn = NULL;
  2250. PUCHAR pBlockHeader;
  2251. ULONG Reason;
  2252. BOOLEAN ForcePageHeap = FALSE;
  2253. NTSTATUS Status;
  2254. PVOID NtHeap = NULL;
  2255. PDPH_HEAP_ROOT ExitHeap;
  2256. ULONG ExitFlags;
  2257. ULONG ExitExtraFlags;
  2258. PUCHAR ExitBlock;
  2259. SIZE_T ExitRequestedSize;
  2260. SIZE_T ExitActualSize;
  2261. //
  2262. // Reject extreme size requests.
  2263. //
  2264. if (Size > EXTREME_SIZE_REQUEST) {
  2265. if (SHOULD_BREAK(BRK_ON_EXTREME_SIZE_REQUEST)) {
  2266. VERIFIER_STOP (APPLICATION_VERIFIER_EXTREME_SIZE_REQUEST,
  2267. "extreme size request",
  2268. HeapHandle, "Heap handle",
  2269. Size, "Size requested",
  2270. 0, "",
  2271. 0, "");
  2272. }
  2273. IF_GENERATE_EXCEPTION (Flags, STATUS_NO_MEMORY);
  2274. return NULL;
  2275. }
  2276. //
  2277. // Check if it is time to do fault injection.
  2278. //
  2279. if (RtlpDphShouldFaultInject ()) {
  2280. IF_GENERATE_EXCEPTION (Flags, STATUS_NO_MEMORY);
  2281. return NULL;
  2282. }
  2283. //
  2284. // Check if we have a biased heap pointer which signals
  2285. // a forced page heap allocation (no normal heap).
  2286. //
  2287. if (IS_BIASED_POINTER(HeapHandle)) {
  2288. HeapHandle = UNBIAS_POINTER(HeapHandle);
  2289. ForcePageHeap = TRUE;
  2290. }
  2291. HeapRoot = RtlpDphPointerFromHandle (HeapHandle);
  2292. if (HeapRoot == NULL) {
  2293. return FALSE;
  2294. }
  2295. //
  2296. // If fast fill heap is enabled we avoid page heap altogether.
  2297. // Reading the `NormalHeap' field is safe as long as nobody
  2298. // destroys the heap in a different thread. But this would be
  2299. // an application bug anyway. If fast fill heap is enabled
  2300. // we should never get a biased heap pointer since we disable
  2301. // per dll during startup.
  2302. //
  2303. if ((AVrfpVerifierFlags & RTL_VRF_FLG_FAST_FILL_HEAP)) {
  2304. ASSERT (ForcePageHeap == FALSE);
  2305. NtHeap = HeapRoot->NormalHeap;
  2306. goto FAST_FILL_HEAP;
  2307. }
  2308. //
  2309. // Get the heap lock, unprotect heap structures, etc.
  2310. //
  2311. RtlpDphPreProcessing (HeapRoot, Flags);
  2312. try {
  2313. //
  2314. // We cannot validate the heap when a forced allocation into page heap
  2315. // is requested due to accounting problems. Allocate is called in this way
  2316. // from ReAllocate while the old node (just about to be freed) is in limbo
  2317. // and is not accounted in any internal structure.
  2318. //
  2319. if (DEBUG_OPTION (DBG_INTERNAL_VALIDATION) && !ForcePageHeap) {
  2320. RtlpDphInternalValidatePageHeap (HeapRoot, NULL, 0);
  2321. }
  2322. Flags |= HeapRoot->HeapFlags;
  2323. //
  2324. // Figure out if we need to minimize memory impact. This
  2325. // might trigger an allocation in the normal heap.
  2326. //
  2327. if (! ForcePageHeap) {
  2328. if (! (RtlpDphShouldAllocateInPageHeap (HeapRoot, Size))) {
  2329. NtHeap = HeapRoot->NormalHeap;
  2330. goto EXIT;
  2331. }
  2332. }
  2333. //
  2334. // Check the heap if internal validation is on.
  2335. //
  2336. if (DEBUG_OPTION (DBG_INTERNAL_VALIDATION)) {
  2337. RtlpDphVerifyIntegrity( HeapRoot );
  2338. }
  2339. //
  2340. // Determine number of pages needed for READWRITE portion
  2341. // of allocation and add an extra page for the NO_ACCESS
  2342. // memory beyond the READWRITE page(s).
  2343. //
  2344. nBytesAccess = ROUNDUP2( Size + sizeof(DPH_BLOCK_INFORMATION), PAGE_SIZE );
  2345. nBytesAllocate = nBytesAccess + PAGE_SIZE;
  2346. //
  2347. // Preallocate node that will be used as the busy node in case
  2348. // the available list node must be split. See coments below.
  2349. // We need to do this here because the operation can fail and later
  2350. // it is more difficult to recover from the error.
  2351. //
  2352. PreAllocatedNode = RtlpDphAllocateNode (HeapRoot);
  2353. if (PreAllocatedNode == NULL) {
  2354. goto EXIT;
  2355. }
  2356. //
  2357. // RtlpDphFindAvailableMemory will first attempt to satisfy
  2358. // the request from memory on the Available list. If that fails,
  2359. // it will coalesce some of the Free list memory into the Available
  2360. // list and try again. If that still fails, new VM is allocated and
  2361. // added to the Available list. If that fails, the function will
  2362. // finally give up and return NULL.
  2363. //
  2364. pAvailNode = RtlpDphFindAvailableMemory (HeapRoot,
  2365. nBytesAllocate,
  2366. &pPrevAvailNode,
  2367. TRUE);
  2368. if (pAvailNode == NULL) {
  2369. OUT_OF_VM_BREAK( Flags, "Page heap: Unable to allocate virtual memory\n" );
  2370. goto EXIT;
  2371. }
  2372. //
  2373. // Now can't call AllocateNode until pAvailNode is
  2374. // adjusted and/or removed from Avail list since AllocateNode
  2375. // might adjust the Avail list.
  2376. //
  2377. pVirtual = pAvailNode->pVirtualBlock;
  2378. Status = RtlpDphSetProtectionsBeforeUse (HeapRoot,
  2379. pVirtual,
  2380. nBytesAccess);
  2381. if (! NT_SUCCESS(Status)) {
  2382. goto EXIT;
  2383. }
  2384. //
  2385. // pAvailNode (still on avail list) points to block large enough
  2386. // to satisfy request, but it might be large enough to split
  2387. // into two blocks -- one for request, remainder leave on
  2388. // avail list.
  2389. //
  2390. if (pAvailNode->nVirtualBlockSize > nBytesAllocate) {
  2391. //
  2392. // pAvailNode is bigger than the request. We need to
  2393. // split into two blocks. One will remain in available list
  2394. // and the other will become a busy node.
  2395. //
  2396. // We adjust pVirtualBlock and nVirtualBlock size of existing
  2397. // node in avail list. The node will still be in correct
  2398. // address space order on the avail list. This saves having
  2399. // to remove and then re-add node to avail list. Note since
  2400. // we're changing sizes directly, we need to adjust the
  2401. // avail and busy list counters manually.
  2402. //
  2403. // Note: since we're leaving at least one page on the
  2404. // available list, we are guaranteed that AllocateNode
  2405. // will not fail.
  2406. //
  2407. pAvailNode->pVirtualBlock += nBytesAllocate;
  2408. pAvailNode->nVirtualBlockSize -= nBytesAllocate;
  2409. HeapRoot->nAvailableAllocationBytesCommitted -= nBytesAllocate;
  2410. ASSERT (PreAllocatedNode != NULL);
  2411. pBusyNode = PreAllocatedNode;
  2412. PreAllocatedNode = NULL;
  2413. pBusyNode->pVirtualBlock = pVirtual;
  2414. pBusyNode->nVirtualBlockSize = nBytesAllocate;
  2415. }
  2416. else {
  2417. //
  2418. // Entire avail block is needed, so simply remove it from avail list.
  2419. //
  2420. RtlpDphRemoveFromAvailableList( HeapRoot, pAvailNode, pPrevAvailNode );
  2421. pBusyNode = pAvailNode;
  2422. }
  2423. //
  2424. // Now pBusyNode points to our committed virtual block.
  2425. //
  2426. if (HeapRoot->HeapFlags & HEAP_NO_ALIGNMENT)
  2427. nActual = Size;
  2428. else
  2429. nActual = ROUNDUP2( Size, USER_ALIGNMENT );
  2430. pBusyNode->nVirtualAccessSize = nBytesAccess;
  2431. pBusyNode->nUserRequestedSize = Size;
  2432. pBusyNode->nUserActualSize = nActual;
  2433. if ((HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) {
  2434. pBusyNode->pUserAllocation = pBusyNode->pVirtualBlock
  2435. + PAGE_SIZE;
  2436. }
  2437. else {
  2438. pBusyNode->pUserAllocation = pBusyNode->pVirtualBlock
  2439. + pBusyNode->nVirtualAccessSize
  2440. - nActual;
  2441. }
  2442. pBusyNode->UserValue = NULL;
  2443. pBusyNode->UserFlags = Flags & HEAP_SETTABLE_USER_FLAGS;
  2444. //
  2445. // RtlpDebugPageHeapAllocate gets called from RtlDebugAllocateHeap,
  2446. // which gets called from RtlAllocateHeapSlowly, which gets called
  2447. // from RtlAllocateHeap. To keep from wasting lots of stack trace
  2448. // storage, we'll skip the bottom 3 entries, leaving RtlAllocateHeap
  2449. // as the first recorded entry.
  2450. //
  2451. // SilviuC: should collect traces out of page heap lock.
  2452. //
  2453. if ((HeapRoot->ExtraFlags & PAGE_HEAP_COLLECT_STACK_TRACES)) {
  2454. pBusyNode->StackTrace = RtlpDphLogStackTrace(3);
  2455. }
  2456. else {
  2457. pBusyNode->StackTrace = NULL;
  2458. }
  2459. RtlpDphPlaceOnBusyList( HeapRoot, pBusyNode );
  2460. pReturn = pBusyNode->pUserAllocation;
  2461. //
  2462. // Prepare data that will be needed to fill out the blocks
  2463. // after we release the heap lock.
  2464. //
  2465. ExitHeap = HeapRoot;
  2466. ExitFlags = Flags;
  2467. ExitExtraFlags = HeapRoot->ExtraFlags;
  2468. ExitBlock = pBusyNode->pUserAllocation;
  2469. ExitRequestedSize = Size;
  2470. ExitActualSize = Size;
  2471. }
  2472. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  2473. _exception_info(),
  2474. HeapRoot,
  2475. FALSE)) {
  2476. //
  2477. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  2478. //
  2479. ASSERT_UNEXPECTED_CODE_PATH ();
  2480. }
  2481. EXIT:
  2482. //
  2483. // If preallocated node did not get used we return it to unused
  2484. // nodes list.
  2485. //
  2486. if (PreAllocatedNode) {
  2487. RtlpDphReturnNodeToUnusedList(HeapRoot, PreAllocatedNode);
  2488. }
  2489. //
  2490. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  2491. //
  2492. RtlpDphPostProcessing (HeapRoot);
  2493. FAST_FILL_HEAP:
  2494. if (NtHeap) {
  2495. //
  2496. // We need to allocate from light page heap.
  2497. //
  2498. pReturn = RtlpDphNormalHeapAllocate (HeapRoot,
  2499. NtHeap,
  2500. Flags,
  2501. Size);
  2502. }
  2503. else {
  2504. //
  2505. // If allocation was successfully done from full page heap
  2506. // then out of locks fill in the block with the required patterns.
  2507. // Since we always commit memory fresh user area is already zeroed.
  2508. // No need to re-zero it. If there wasn't a request for zeroed
  2509. // memory then we fill it with stuff that looks like kernel
  2510. // pointers.
  2511. //
  2512. if (pReturn != NULL) {
  2513. if (! (ExitFlags & HEAP_ZERO_MEMORY)) {
  2514. BUMP_COUNTER (CNT_ALLOCS_FILLED);
  2515. RtlFillMemory (ExitBlock,
  2516. ExitRequestedSize,
  2517. DPH_PAGE_BLOCK_INFIX);
  2518. }
  2519. else {
  2520. BUMP_COUNTER (CNT_ALLOCS_ZEROED);
  2521. //
  2522. // The user buffer is guaranteed to be zeroed since
  2523. // we freshly committed the memory.
  2524. //
  2525. if (DEBUG_OPTION (DBG_INTERNAL_VALIDATION)) {
  2526. RtlpDphCheckFillPattern (ExitBlock,
  2527. ExitRequestedSize,
  2528. 0);
  2529. }
  2530. }
  2531. if (! (ExitExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) {
  2532. RtlpDphWritePageHeapBlockInformation (ExitHeap,
  2533. ExitExtraFlags,
  2534. ExitBlock,
  2535. ExitRequestedSize,
  2536. ExitActualSize);
  2537. }
  2538. }
  2539. }
  2540. //
  2541. // Finally return.
  2542. //
  2543. if (pReturn == NULL) {
  2544. IF_GENERATE_EXCEPTION (Flags, STATUS_NO_MEMORY);
  2545. }
  2546. return pReturn;
  2547. }
  2548. BOOLEAN
  2549. RtlpDebugPageHeapFree(
  2550. IN PVOID HeapHandle,
  2551. IN ULONG Flags,
  2552. IN PVOID Address
  2553. )
  2554. {
  2555. PDPH_HEAP_ROOT HeapRoot;
  2556. PDPH_HEAP_BLOCK Node, Prev;
  2557. BOOLEAN Success = FALSE;
  2558. PCH p;
  2559. ULONG Reason;
  2560. PVOID NtHeap = NULL;
  2561. //
  2562. // Skip over null frees. These are valid in C++.
  2563. //
  2564. if (Address == NULL) {
  2565. if (SHOULD_BREAK (BRK_ON_NULL_FREE)) {
  2566. DbgPrintEx (DPFLTR_VERIFIER_ID,
  2567. DPFLTR_ERROR_LEVEL,
  2568. "Page heap: freeing a null pointer \n");
  2569. DbgBreakPoint ();
  2570. }
  2571. return TRUE;
  2572. }
  2573. HeapRoot = RtlpDphPointerFromHandle (HeapHandle);
  2574. if (HeapRoot == NULL) {
  2575. return FALSE;
  2576. }
  2577. //
  2578. // If fast fill heap is enabled we avoid page heap altogether.
  2579. // Reading the `NormalHeap' field is safe as long as nobody
  2580. // destroys the heap in a different thread. But this would be
  2581. // an application bug anyway. If fast fill heap is enabled
  2582. // we should never have per dll enabled sicne we disable
  2583. // per dll during startup.
  2584. //
  2585. if ((AVrfpVerifierFlags & RTL_VRF_FLG_FAST_FILL_HEAP)) {
  2586. ASSERT ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_DLL_NAMES) == 0);
  2587. ASSERT (HeapRoot->NormalHeap);
  2588. NtHeap = HeapRoot->NormalHeap;
  2589. goto FAST_FILL_HEAP;
  2590. }
  2591. //
  2592. // Get the heap lock, unprotect heap structures, etc.
  2593. //
  2594. RtlpDphPreProcessing (HeapRoot, Flags);
  2595. try {
  2596. if (DEBUG_OPTION (DBG_INTERNAL_VALIDATION)) {
  2597. RtlpDphInternalValidatePageHeap (HeapRoot, NULL, 0);
  2598. }
  2599. Flags |= HeapRoot->HeapFlags;
  2600. Node = RtlpDphFindBusyMemory( HeapRoot, Address, &Prev );
  2601. if (Node == NULL) {
  2602. //
  2603. // No wonder we did not find the block in the page heap
  2604. // structures because the block was probably allocated
  2605. // from the normal heap. Or there is a real bug.
  2606. // If there is a bug NormalHeapFree will break into debugger.
  2607. //
  2608. NtHeap = HeapRoot->NormalHeap;
  2609. goto EXIT;
  2610. }
  2611. //
  2612. // If tail was allocated, make sure filler not overwritten
  2613. //
  2614. if (! (HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) {
  2615. if (! (RtlpDphIsPageHeapBlock (HeapRoot, Address, &Reason, TRUE))) {
  2616. RtlpDphReportCorruptedBlock (HeapRoot,
  2617. DPH_CONTEXT_FULL_PAGE_HEAP_FREE,
  2618. Address,
  2619. Reason);
  2620. }
  2621. }
  2622. //
  2623. // Decommit the memory for this block. We will continue the free
  2624. // even if the decommit will fail (cannot imagine why but in
  2625. // principle it can happen).
  2626. //
  2627. RtlpDphSetProtectionsAfterUse (HeapRoot, Node);
  2628. //
  2629. // Move node descriptor from busy to free.
  2630. //
  2631. RtlpDphRemoveFromBusyList( HeapRoot, Node, Prev );
  2632. RtlpDphPlaceOnFreeList( HeapRoot, Node );
  2633. //
  2634. // RtlpDebugPageHeapFree gets called from RtlDebugFreeHeap, which
  2635. // gets called from RtlFreeHeapSlowly, which gets called from
  2636. // RtlFreeHeap. To keep from wasting lots of stack trace storage,
  2637. // we'll skip the bottom 3 entries, leaving RtlFreeHeap as the
  2638. // first recorded entry.
  2639. //
  2640. if ((HeapRoot->ExtraFlags & PAGE_HEAP_COLLECT_STACK_TRACES)) {
  2641. //
  2642. // If we already picked up the free stack trace then
  2643. // reuse it, otherwise get the stack trace now.
  2644. //
  2645. Node->StackTrace = RtlpDphLogStackTrace(3);
  2646. }
  2647. else {
  2648. Node->StackTrace = NULL;
  2649. }
  2650. Success = TRUE;
  2651. }
  2652. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  2653. _exception_info(),
  2654. HeapRoot,
  2655. FALSE)) {
  2656. //
  2657. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  2658. //
  2659. ASSERT_UNEXPECTED_CODE_PATH ();
  2660. }
  2661. EXIT:
  2662. //
  2663. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  2664. //
  2665. RtlpDphPostProcessing (HeapRoot);
  2666. FAST_FILL_HEAP:
  2667. if (NtHeap) {
  2668. Success = RtlpDphNormalHeapFree (HeapRoot,
  2669. NtHeap,
  2670. Flags,
  2671. Address);
  2672. }
  2673. if (! Success) {
  2674. IF_GENERATE_EXCEPTION( Flags, STATUS_ACCESS_VIOLATION );
  2675. }
  2676. return Success;
  2677. }
  2678. PVOID
  2679. RtlpDebugPageHeapReAllocate(
  2680. IN PVOID HeapHandle,
  2681. IN ULONG Flags,
  2682. IN PVOID Address,
  2683. IN SIZE_T Size
  2684. )
  2685. {
  2686. PDPH_HEAP_ROOT HeapRoot;
  2687. PDPH_HEAP_BLOCK OldNode, OldPrev, NewNode;
  2688. PVOID NewAddress;
  2689. PUCHAR p;
  2690. SIZE_T CopyDataSize;
  2691. ULONG SaveFlags;
  2692. BOOLEAN ReallocInNormalHeap = FALSE;
  2693. ULONG Reason;
  2694. BOOLEAN ForcePageHeap = FALSE;
  2695. BOOLEAN OriginalAllocationInPageHeap = FALSE;
  2696. PVOID NtHeap = NULL;
  2697. //
  2698. // Reject extreme size requests.
  2699. //
  2700. if (Size > EXTREME_SIZE_REQUEST) {
  2701. if (SHOULD_BREAK(BRK_ON_EXTREME_SIZE_REQUEST)) {
  2702. VERIFIER_STOP (APPLICATION_VERIFIER_EXTREME_SIZE_REQUEST,
  2703. "extreme size request",
  2704. HeapHandle, "Heap handle",
  2705. Size, "Size requested",
  2706. 0, "",
  2707. 0, "");
  2708. }
  2709. IF_GENERATE_EXCEPTION (Flags, STATUS_NO_MEMORY);
  2710. return NULL;
  2711. }
  2712. //
  2713. // Check if it is time to do fault injection.
  2714. //
  2715. if (RtlpDphShouldFaultInject ()) {
  2716. IF_GENERATE_EXCEPTION (Flags, STATUS_NO_MEMORY);
  2717. return NULL;
  2718. }
  2719. //
  2720. // Check if we have a biased heap pointer which signals
  2721. // a forced page heap allocation (no normal heap).
  2722. //
  2723. if (IS_BIASED_POINTER(HeapHandle)) {
  2724. HeapHandle = UNBIAS_POINTER(HeapHandle);
  2725. ForcePageHeap = TRUE;
  2726. }
  2727. HeapRoot = RtlpDphPointerFromHandle (HeapHandle);
  2728. if (HeapRoot == NULL) {
  2729. return FALSE;
  2730. }
  2731. //
  2732. // If fast fill heap is enabled we avoid page heap altogether.
  2733. // Reading the `NormalHeap' field is safe as long as nobody
  2734. // destroys the heap in a different thread. But this would be
  2735. // an application bug anyway. If fast fill heap is enabled
  2736. // we should never get a biased heap pointer since we disable
  2737. // per dll during startup.
  2738. //
  2739. if ((AVrfpVerifierFlags & RTL_VRF_FLG_FAST_FILL_HEAP)) {
  2740. ASSERT (ForcePageHeap == FALSE);
  2741. NtHeap = HeapRoot->NormalHeap;
  2742. goto FAST_FILL_HEAP;
  2743. }
  2744. //
  2745. // Get the heap lock, unprotect heap structures, etc.
  2746. //
  2747. RtlpDphPreProcessing (HeapRoot, Flags);
  2748. try {
  2749. if (DEBUG_OPTION (DBG_INTERNAL_VALIDATION)) {
  2750. RtlpDphInternalValidatePageHeap (HeapRoot, NULL, 0);
  2751. }
  2752. Flags |= HeapRoot->HeapFlags;
  2753. NewAddress = NULL;
  2754. //
  2755. // Find descriptor for the block to be reallocated.
  2756. //
  2757. OldNode = RtlpDphFindBusyMemory( HeapRoot, Address, &OldPrev );
  2758. if (OldNode) {
  2759. OriginalAllocationInPageHeap = TRUE;
  2760. //
  2761. // Deal separately with the case where request is made with
  2762. // HEAP_REALLOC_IN_PLACE_ONLY flag and the new size is smaller than
  2763. // the old size. For these cases we will just resize the block.
  2764. // If the flag is used and the size is bigger we will fail always
  2765. // the call.
  2766. //
  2767. if ((Flags & HEAP_REALLOC_IN_PLACE_ONLY)) {
  2768. if (OldNode->nUserRequestedSize < Size) {
  2769. BUMP_COUNTER (CNT_REALLOC_IN_PLACE_BIGGER);
  2770. goto EXIT;
  2771. } else {
  2772. PUCHAR FillStart;
  2773. PUCHAR FillEnd;
  2774. PDPH_BLOCK_INFORMATION Info;
  2775. Info = (PDPH_BLOCK_INFORMATION)Address - 1;
  2776. Info->RequestedSize = Size;
  2777. OldNode->nUserRequestedSize = Size;
  2778. FillStart = (PUCHAR)Address + Info->RequestedSize;
  2779. FillEnd = (PUCHAR)ROUNDUP2((ULONG_PTR)FillStart, PAGE_SIZE);
  2780. RtlFillMemory (FillStart, FillEnd - FillStart, DPH_PAGE_BLOCK_SUFFIX);
  2781. NewAddress = Address;
  2782. BUMP_COUNTER (CNT_REALLOC_IN_PLACE_SMALLER);
  2783. goto EXIT;
  2784. }
  2785. }
  2786. }
  2787. if (OldNode == NULL) {
  2788. //
  2789. // No wonder we did not find the block in the page heap
  2790. // structures because the block was probably allocated
  2791. // from the normal heap. Or there is a real bug. If there
  2792. // is a bug NormalHeapReAllocate will break into debugger.
  2793. //
  2794. NtHeap = HeapRoot->NormalHeap;
  2795. goto EXIT;
  2796. }
  2797. //
  2798. // If tail was allocated, make sure filler not overwritten
  2799. //
  2800. if ((HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) {
  2801. // nothing
  2802. }
  2803. else {
  2804. if (! (RtlpDphIsPageHeapBlock (HeapRoot, Address, &Reason, TRUE))) {
  2805. RtlpDphReportCorruptedBlock (HeapRoot,
  2806. DPH_CONTEXT_FULL_PAGE_HEAP_REALLOC,
  2807. Address,
  2808. Reason);
  2809. }
  2810. }
  2811. //
  2812. // Before allocating a new block, remove the old block from
  2813. // the busy list. When we allocate the new block, the busy
  2814. // list pointers will change, possibly leaving our acquired
  2815. // Prev pointer invalid.
  2816. //
  2817. RtlpDphRemoveFromBusyList( HeapRoot, OldNode, OldPrev );
  2818. //
  2819. // Allocate new memory for new requested size. Use try/except
  2820. // to trap exception if Flags caused out-of-memory exception.
  2821. //
  2822. try {
  2823. if (!ForcePageHeap && !(RtlpDphShouldAllocateInPageHeap (HeapRoot, Size))) {
  2824. //
  2825. // SilviuC: think how can we make this allocation
  2826. // without holding the page heap lock. It is tough because
  2827. // we are making a transfer from a page heap block to an
  2828. // NT heap block and we need to keep them around to copy
  2829. // user data etc.
  2830. //
  2831. NewAddress = RtlpDphNormalHeapAllocate (HeapRoot,
  2832. HeapRoot->NormalHeap,
  2833. Flags,
  2834. Size);
  2835. ReallocInNormalHeap = TRUE;
  2836. }
  2837. else {
  2838. //
  2839. // Force the allocation in page heap by biasing
  2840. // the heap handle. Validate the heap here since when we use
  2841. // biased pointers validation inside Allocate is disabled.
  2842. //
  2843. if (DEBUG_OPTION (DBG_INTERNAL_VALIDATION)) {
  2844. RtlpDphInternalValidatePageHeap (HeapRoot, OldNode->pVirtualBlock, OldNode->nVirtualBlockSize);
  2845. }
  2846. NewAddress = RtlpDebugPageHeapAllocate(
  2847. BIAS_POINTER(HeapHandle),
  2848. Flags,
  2849. Size);
  2850. //
  2851. // When we get back from the page heap call we will get
  2852. // back read only meta data that we need to make read write.
  2853. //
  2854. UNPROTECT_HEAP_STRUCTURES( HeapRoot );
  2855. if (DEBUG_OPTION (DBG_INTERNAL_VALIDATION)) {
  2856. RtlpDphInternalValidatePageHeap (HeapRoot, OldNode->pVirtualBlock, OldNode->nVirtualBlockSize);
  2857. }
  2858. ReallocInNormalHeap = FALSE;
  2859. }
  2860. }
  2861. except( EXCEPTION_EXECUTE_HANDLER ) {
  2862. //
  2863. // ISSUE: SilviuC: We should break for status different from STATUS_NO_MEMORY
  2864. //
  2865. }
  2866. //
  2867. // We managed to make a new allocation (normal or page heap).
  2868. // Now we need to copy from old to new all sorts of stuff
  2869. // (contents, user flags/values).
  2870. //
  2871. if (NewAddress) {
  2872. //
  2873. // Copy old block contents into the new node.
  2874. //
  2875. CopyDataSize = OldNode->nUserRequestedSize;
  2876. if (CopyDataSize > Size) {
  2877. CopyDataSize = Size;
  2878. }
  2879. if (CopyDataSize > 0) {
  2880. RtlCopyMemory(
  2881. NewAddress,
  2882. Address,
  2883. CopyDataSize
  2884. );
  2885. }
  2886. //
  2887. // If new allocation was done in page heap we need to detect the new node
  2888. // and copy over user flags/values.
  2889. //
  2890. if (! ReallocInNormalHeap) {
  2891. NewNode = RtlpDphFindBusyMemory( HeapRoot, NewAddress, NULL );
  2892. //
  2893. // This block could not be in normal heap therefore from this
  2894. // respect the call above should always succeed.
  2895. //
  2896. ASSERT( NewNode != NULL );
  2897. NewNode->UserValue = OldNode->UserValue;
  2898. NewNode->UserFlags = ( Flags & HEAP_SETTABLE_USER_FLAGS ) ?
  2899. ( Flags & HEAP_SETTABLE_USER_FLAGS ) :
  2900. OldNode->UserFlags;
  2901. }
  2902. //
  2903. // We need to cover the case where old allocation was in page heap.
  2904. // In this case we still need to cleanup the old node and
  2905. // insert it back in free list. Actually the way the code is written
  2906. // we take this code path only if original allocation was in page heap.
  2907. // This is the reason for the assert.
  2908. //
  2909. ASSERT (OriginalAllocationInPageHeap);
  2910. if (OriginalAllocationInPageHeap) {
  2911. //
  2912. // Decommit the memory for this block. We will continue the realloc
  2913. // even if the decommit will fail (cannot imagine why but in
  2914. // principle it can happen).
  2915. //
  2916. RtlpDphSetProtectionsAfterUse (HeapRoot, OldNode);
  2917. //
  2918. // Place node descriptor in the free list.
  2919. //
  2920. RtlpDphPlaceOnFreeList( HeapRoot, OldNode );
  2921. //
  2922. // RtlpDebugPageHeapReAllocate gets called from RtlDebugReAllocateHeap,
  2923. // which gets called from RtlReAllocateHeap. To keep from wasting
  2924. // lots of stack trace storage, we'll skip the bottom 2 entries,
  2925. // leaving RtlReAllocateHeap as the first recorded entry in the
  2926. // freed stack trace.
  2927. //
  2928. // Note. For realloc we need to do the accounting for free in the
  2929. // trace block. The accounting for alloc is done in the real
  2930. // alloc operation which always happens for page heap reallocs.
  2931. //
  2932. if ((HeapRoot->ExtraFlags & PAGE_HEAP_COLLECT_STACK_TRACES)) {
  2933. OldNode->StackTrace = RtlpDphLogStackTrace(2);
  2934. }
  2935. else {
  2936. OldNode->StackTrace = NULL;
  2937. }
  2938. }
  2939. }
  2940. else {
  2941. //
  2942. // Failed to allocate a new block. Return old block to busy list.
  2943. //
  2944. if (OriginalAllocationInPageHeap) {
  2945. RtlpDphPlaceOnBusyList( HeapRoot, OldNode );
  2946. }
  2947. }
  2948. }
  2949. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  2950. _exception_info(),
  2951. HeapRoot,
  2952. FALSE)) {
  2953. //
  2954. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  2955. //
  2956. ASSERT_UNEXPECTED_CODE_PATH ();
  2957. }
  2958. EXIT:
  2959. //
  2960. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  2961. //
  2962. RtlpDphPostProcessing (HeapRoot);
  2963. FAST_FILL_HEAP:
  2964. if (NtHeap) {
  2965. NewAddress = RtlpDphNormalHeapReAllocate (HeapRoot,
  2966. NtHeap,
  2967. Flags,
  2968. Address,
  2969. Size);
  2970. }
  2971. if (NewAddress == NULL) {
  2972. IF_GENERATE_EXCEPTION( Flags, STATUS_NO_MEMORY );
  2973. }
  2974. return NewAddress;
  2975. }
  2976. SIZE_T
  2977. RtlpDebugPageHeapSize(
  2978. IN PVOID HeapHandle,
  2979. IN ULONG Flags,
  2980. IN PVOID Address
  2981. )
  2982. {
  2983. PDPH_HEAP_ROOT HeapRoot;
  2984. PDPH_HEAP_BLOCK Node;
  2985. SIZE_T Size;
  2986. PVOID NtHeap = NULL;
  2987. Size = -1;
  2988. BUMP_COUNTER (CNT_HEAP_SIZE_CALLS);
  2989. HeapRoot = RtlpDphPointerFromHandle( HeapHandle );
  2990. if (HeapRoot == NULL) {
  2991. return Size;
  2992. }
  2993. Flags |= HeapRoot->HeapFlags;
  2994. //
  2995. // If fast fill heap is enabled we avoid page heap altogether.
  2996. // Reading the `NormalHeap' field is safe as long as nobody
  2997. // destroys the heap in a different thread. But this would be
  2998. // an application bug anyway. If fast fill heap is enabled
  2999. // we should never have per dll enabled sicne we disable
  3000. // per dll during startup.
  3001. //
  3002. if ((AVrfpVerifierFlags & RTL_VRF_FLG_FAST_FILL_HEAP)) {
  3003. ASSERT ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_DLL_NAMES) == 0);
  3004. NtHeap = HeapRoot->NormalHeap;
  3005. goto FAST_FILL_HEAP;
  3006. }
  3007. //
  3008. // Get the heap lock, unprotect heap structures, etc.
  3009. //
  3010. RtlpDphPreProcessing (HeapRoot, Flags);
  3011. try {
  3012. Node = RtlpDphFindBusyMemory( HeapRoot, Address, NULL );
  3013. if (Node == NULL) {
  3014. //
  3015. // No wonder we did not find the block in the page heap
  3016. // structures because the block was probably allocated
  3017. // from the normal heap. Or there is a real bug. If there
  3018. // is a bug NormalHeapSize will break into debugger.
  3019. //
  3020. NtHeap = HeapRoot->NormalHeap;
  3021. goto EXIT;
  3022. }
  3023. else {
  3024. Size = Node->nUserRequestedSize;
  3025. }
  3026. }
  3027. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  3028. _exception_info(),
  3029. HeapRoot,
  3030. TRUE)) {
  3031. //
  3032. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  3033. //
  3034. ASSERT_UNEXPECTED_CODE_PATH ();
  3035. }
  3036. EXIT:
  3037. //
  3038. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  3039. //
  3040. RtlpDphPostProcessing (HeapRoot);
  3041. FAST_FILL_HEAP:
  3042. if (NtHeap) {
  3043. Size = RtlpDphNormalHeapSize (HeapRoot,
  3044. NtHeap,
  3045. Flags,
  3046. Address);
  3047. }
  3048. if (Size == -1) {
  3049. IF_GENERATE_EXCEPTION( Flags, STATUS_ACCESS_VIOLATION );
  3050. }
  3051. return Size;
  3052. }
  3053. ULONG
  3054. RtlpDebugPageHeapGetProcessHeaps(
  3055. ULONG NumberOfHeaps,
  3056. PVOID *ProcessHeaps
  3057. )
  3058. {
  3059. PDPH_HEAP_ROOT HeapRoot;
  3060. PLIST_ENTRY Current;
  3061. ULONG Count;
  3062. BUMP_COUNTER (CNT_HEAP_GETPROCESSHEAPS_CALLS);
  3063. //
  3064. // GetProcessHeaps is never called before at least the very
  3065. // first heap is created.
  3066. //
  3067. ASSERT (RtlpDphPageHeapListInitialized);
  3068. if (! RtlpDphPageHeapListInitialized) {
  3069. return 0;
  3070. }
  3071. RtlEnterCriticalSection( &RtlpDphPageHeapListLock );
  3072. if (RtlpDphPageHeapListLength <= NumberOfHeaps) {
  3073. Current = RtlpDphPageHeapList.Flink;
  3074. Count = 0;
  3075. while (Current != &RtlpDphPageHeapList) {
  3076. HeapRoot = CONTAINING_RECORD (Current,
  3077. DPH_HEAP_ROOT,
  3078. NextHeap);
  3079. Current = Current->Flink;
  3080. *ProcessHeaps = HEAP_HANDLE_FROM_ROOT(HeapRoot);
  3081. ProcessHeaps += 1;
  3082. Count += 1;
  3083. }
  3084. if (Count != RtlpDphPageHeapListLength) {
  3085. VERIFIER_STOP (APPLICATION_VERIFIER_UNKNOWN_ERROR,
  3086. "process heap list count is wrong",
  3087. Count, "Actual count",
  3088. RtlpDphPageHeapListLength, "Page heap count",
  3089. 0, "",
  3090. 0, "");
  3091. }
  3092. }
  3093. else {
  3094. //
  3095. // User's buffer is too small. Return number of entries
  3096. // necessary for subsequent call to succeed. Buffer
  3097. // remains untouched.
  3098. //
  3099. Count = RtlpDphPageHeapListLength;
  3100. }
  3101. RtlLeaveCriticalSection( &RtlpDphPageHeapListLock );
  3102. return Count;
  3103. }
  3104. ULONG
  3105. RtlpDebugPageHeapCompact(
  3106. IN PVOID HeapHandle,
  3107. IN ULONG Flags
  3108. )
  3109. {
  3110. PDPH_HEAP_ROOT HeapRoot;
  3111. HeapRoot = RtlpDphPointerFromHandle( HeapHandle );
  3112. if (HeapRoot == NULL)
  3113. return 0;
  3114. Flags |= HeapRoot->HeapFlags;
  3115. RtlpDphEnterCriticalSection( HeapRoot, Flags );
  3116. //
  3117. // Don't do anything, but we did want to acquire the critsect
  3118. // in case this was called with HEAP_NO_SERIALIZE while another
  3119. // thread is in the heap code.
  3120. //
  3121. RtlpDphLeaveCriticalSection( HeapRoot );
  3122. return 0;
  3123. }
  3124. BOOLEAN
  3125. RtlpDebugPageHeapValidate(
  3126. IN PVOID HeapHandle,
  3127. IN ULONG Flags,
  3128. IN PVOID Address
  3129. )
  3130. {
  3131. PDPH_HEAP_ROOT HeapRoot;
  3132. PDPH_HEAP_BLOCK Node = NULL;
  3133. BOOLEAN Result = FALSE;
  3134. PVOID NtHeap = NULL;
  3135. BUMP_COUNTER (CNT_HEAP_VALIDATE_CALLS);
  3136. HeapRoot = RtlpDphPointerFromHandle( HeapHandle );
  3137. if (HeapRoot == NULL)
  3138. return FALSE;
  3139. Flags |= HeapRoot->HeapFlags;
  3140. //
  3141. // If fast fill heap is enabled we avoid page heap altogether.
  3142. // Reading the `NormalHeap' field is safe as long as nobody
  3143. // destroys the heap in a different thread. But this would be
  3144. // an application bug anyway. If fast fill heap is enabled
  3145. // we should never have per dll enabled sicne we disable
  3146. // per dll during startup.
  3147. //
  3148. if ((AVrfpVerifierFlags & RTL_VRF_FLG_FAST_FILL_HEAP)) {
  3149. ASSERT ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_DLL_NAMES) == 0);
  3150. ASSERT (HeapRoot->NormalHeap);
  3151. NtHeap = HeapRoot->NormalHeap;
  3152. goto FAST_FILL_HEAP;
  3153. }
  3154. //
  3155. // Get the heap lock, unprotect heap structures, etc.
  3156. //
  3157. RtlpDphPreProcessing (HeapRoot, Flags);
  3158. try {
  3159. Node = Address ? RtlpDphFindBusyMemory( HeapRoot, Address, NULL ) : NULL;
  3160. if (Node == NULL) {
  3161. NtHeap = HeapRoot->NormalHeap;
  3162. }
  3163. }
  3164. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  3165. _exception_info(),
  3166. HeapRoot,
  3167. TRUE)) {
  3168. //
  3169. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  3170. //
  3171. ASSERT_UNEXPECTED_CODE_PATH ();
  3172. }
  3173. //
  3174. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  3175. //
  3176. RtlpDphPostProcessing (HeapRoot);
  3177. FAST_FILL_HEAP:
  3178. if (NtHeap) {
  3179. Result = RtlpDphNormalHeapValidate (HeapRoot,
  3180. NtHeap,
  3181. Flags,
  3182. Address);
  3183. return Result;
  3184. }
  3185. else {
  3186. if (Address) {
  3187. if (Node) {
  3188. return TRUE;
  3189. }
  3190. else {
  3191. return Result;
  3192. }
  3193. }
  3194. else {
  3195. return TRUE;
  3196. }
  3197. }
  3198. }
  3199. NTSTATUS
  3200. RtlpDebugPageHeapWalk(
  3201. IN PVOID HeapHandle,
  3202. IN OUT PRTL_HEAP_WALK_ENTRY Entry
  3203. )
  3204. {
  3205. BUMP_COUNTER (CNT_HEAP_WALK_CALLS);
  3206. return STATUS_NOT_IMPLEMENTED;
  3207. }
  3208. BOOLEAN
  3209. RtlpDebugPageHeapLock(
  3210. IN PVOID HeapHandle
  3211. )
  3212. {
  3213. PDPH_HEAP_ROOT HeapRoot;
  3214. HeapRoot = RtlpDphPointerFromHandle( HeapHandle );
  3215. if (HeapRoot == NULL) {
  3216. return FALSE;
  3217. }
  3218. RtlpDphEnterCriticalSection( HeapRoot, HeapRoot->HeapFlags );
  3219. return TRUE;
  3220. }
  3221. BOOLEAN
  3222. RtlpDebugPageHeapUnlock(
  3223. IN PVOID HeapHandle
  3224. )
  3225. {
  3226. PDPH_HEAP_ROOT HeapRoot;
  3227. HeapRoot = RtlpDphPointerFromHandle( HeapHandle );
  3228. if (HeapRoot == NULL) {
  3229. return FALSE;
  3230. }
  3231. RtlpDphLeaveCriticalSection( HeapRoot );
  3232. return TRUE;
  3233. }
  3234. BOOLEAN
  3235. RtlpDebugPageHeapSetUserValue(
  3236. IN PVOID HeapHandle,
  3237. IN ULONG Flags,
  3238. IN PVOID Address,
  3239. IN PVOID UserValue
  3240. )
  3241. {
  3242. PDPH_HEAP_ROOT HeapRoot;
  3243. PDPH_HEAP_BLOCK Node;
  3244. BOOLEAN Success;
  3245. PVOID NtHeap = NULL;
  3246. Success = FALSE;
  3247. BUMP_COUNTER (CNT_HEAP_SETUSERVALUE_CALLS);
  3248. HeapRoot = RtlpDphPointerFromHandle( HeapHandle );
  3249. if ( HeapRoot == NULL )
  3250. return Success;
  3251. Flags |= HeapRoot->HeapFlags;
  3252. //
  3253. // If fast fill heap is enabled we avoid page heap altogether.
  3254. // Reading the `NormalHeap' field is safe as long as nobody
  3255. // destroys the heap in a different thread. But this would be
  3256. // an application bug anyway. If fast fill heap is enabled
  3257. // we should never have per dll enabled sicne we disable
  3258. // per dll during startup.
  3259. //
  3260. if ((AVrfpVerifierFlags & RTL_VRF_FLG_FAST_FILL_HEAP)) {
  3261. ASSERT ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_DLL_NAMES) == 0);
  3262. NtHeap = HeapRoot->NormalHeap;
  3263. goto FAST_FILL_HEAP;
  3264. }
  3265. //
  3266. // Get the heap lock, unprotect heap structures, etc.
  3267. //
  3268. RtlpDphPreProcessing (HeapRoot, Flags);
  3269. try {
  3270. Node = RtlpDphFindBusyMemory( HeapRoot, Address, NULL );
  3271. if ( Node == NULL ) {
  3272. //
  3273. // If we cannot find the node in page heap structures it might be
  3274. // because it has been allocated from normal heap.
  3275. //
  3276. NtHeap = HeapRoot->NormalHeap;
  3277. goto EXIT;
  3278. }
  3279. else {
  3280. Node->UserValue = UserValue;
  3281. Success = TRUE;
  3282. }
  3283. }
  3284. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  3285. _exception_info(),
  3286. HeapRoot,
  3287. FALSE)) {
  3288. //
  3289. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  3290. //
  3291. ASSERT_UNEXPECTED_CODE_PATH ();
  3292. }
  3293. EXIT:
  3294. //
  3295. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  3296. //
  3297. RtlpDphPostProcessing (HeapRoot);
  3298. FAST_FILL_HEAP:
  3299. if (NtHeap) {
  3300. Success = RtlpDphNormalHeapSetUserValue (HeapRoot,
  3301. NtHeap,
  3302. Flags,
  3303. Address,
  3304. UserValue);
  3305. }
  3306. return Success;
  3307. }
  3308. BOOLEAN
  3309. RtlpDebugPageHeapGetUserInfo(
  3310. IN PVOID HeapHandle,
  3311. IN ULONG Flags,
  3312. IN PVOID Address,
  3313. OUT PVOID* UserValue,
  3314. OUT PULONG UserFlags
  3315. )
  3316. {
  3317. PDPH_HEAP_ROOT HeapRoot;
  3318. PDPH_HEAP_BLOCK Node;
  3319. BOOLEAN Success;
  3320. PVOID NtHeap = NULL;
  3321. Success = FALSE;
  3322. BUMP_COUNTER (CNT_HEAP_GETUSERINFO_CALLS);
  3323. HeapRoot = RtlpDphPointerFromHandle( HeapHandle );
  3324. if ( HeapRoot == NULL )
  3325. return Success;
  3326. Flags |= HeapRoot->HeapFlags;
  3327. //
  3328. // If fast fill heap is enabled we avoid page heap altogether.
  3329. // Reading the `NormalHeap' field is safe as long as nobody
  3330. // destroys the heap in a different thread. But this would be
  3331. // an application bug anyway. If fast fill heap is enabled
  3332. // we should never have per dll enabled sicne we disable
  3333. // per dll during startup.
  3334. //
  3335. if ((AVrfpVerifierFlags & RTL_VRF_FLG_FAST_FILL_HEAP)) {
  3336. ASSERT ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_DLL_NAMES) == 0);
  3337. NtHeap = HeapRoot->NormalHeap;
  3338. goto FAST_FILL_HEAP;
  3339. }
  3340. //
  3341. // Get the heap lock, unprotect heap structures, etc.
  3342. //
  3343. RtlpDphPreProcessing (HeapRoot, Flags);
  3344. try {
  3345. Node = RtlpDphFindBusyMemory( HeapRoot, Address, NULL );
  3346. if ( Node == NULL ) {
  3347. //
  3348. // If we cannot find the node in page heap structures it might be
  3349. // because it has been allocated from normal heap.
  3350. //
  3351. NtHeap = HeapRoot->NormalHeap;
  3352. goto EXIT;
  3353. }
  3354. else {
  3355. if ( UserValue != NULL )
  3356. *UserValue = Node->UserValue;
  3357. if ( UserFlags != NULL )
  3358. *UserFlags = Node->UserFlags;
  3359. Success = TRUE;
  3360. }
  3361. }
  3362. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  3363. _exception_info(),
  3364. HeapRoot,
  3365. FALSE)) {
  3366. //
  3367. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  3368. //
  3369. ASSERT_UNEXPECTED_CODE_PATH ();
  3370. }
  3371. EXIT:
  3372. //
  3373. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  3374. //
  3375. RtlpDphPostProcessing (HeapRoot);
  3376. FAST_FILL_HEAP:
  3377. if (NtHeap) {
  3378. Success = RtlpDphNormalHeapGetUserInfo (HeapRoot,
  3379. NtHeap,
  3380. Flags,
  3381. Address,
  3382. UserValue,
  3383. UserFlags);
  3384. }
  3385. return Success;
  3386. }
  3387. BOOLEAN
  3388. RtlpDebugPageHeapSetUserFlags(
  3389. IN PVOID HeapHandle,
  3390. IN ULONG Flags,
  3391. IN PVOID Address,
  3392. IN ULONG UserFlagsReset,
  3393. IN ULONG UserFlagsSet
  3394. )
  3395. {
  3396. PDPH_HEAP_ROOT HeapRoot;
  3397. PDPH_HEAP_BLOCK Node;
  3398. BOOLEAN Success;
  3399. PVOID NtHeap = NULL;
  3400. Success = FALSE;
  3401. BUMP_COUNTER (CNT_HEAP_SETUSERFLAGS_CALLS);
  3402. HeapRoot = RtlpDphPointerFromHandle( HeapHandle );
  3403. if ( HeapRoot == NULL )
  3404. return Success;
  3405. Flags |= HeapRoot->HeapFlags;
  3406. //
  3407. // If fast fill heap is enabled we avoid page heap altogether.
  3408. // Reading the `NormalHeap' field is safe as long as nobody
  3409. // destroys the heap in a different thread. But this would be
  3410. // an application bug anyway. If fast fill heap is enabled
  3411. // we should never have per dll enabled sicne we disable
  3412. // per dll during startup.
  3413. //
  3414. if ((AVrfpVerifierFlags & RTL_VRF_FLG_FAST_FILL_HEAP)) {
  3415. ASSERT ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_DLL_NAMES) == 0);
  3416. NtHeap = HeapRoot->NormalHeap;
  3417. goto FAST_FILL_HEAP;
  3418. }
  3419. //
  3420. // Get the heap lock, unprotect heap structures, etc.
  3421. //
  3422. RtlpDphPreProcessing (HeapRoot, Flags);
  3423. try {
  3424. Node = RtlpDphFindBusyMemory( HeapRoot, Address, NULL );
  3425. if ( Node == NULL ) {
  3426. //
  3427. // If we cannot find the node in page heap structures it might be
  3428. // because it has been allocated from normal heap.
  3429. //
  3430. NtHeap = HeapRoot->NormalHeap;
  3431. goto EXIT;
  3432. }
  3433. else {
  3434. Node->UserFlags &= ~( UserFlagsReset );
  3435. Node->UserFlags |= UserFlagsSet;
  3436. Success = TRUE;
  3437. }
  3438. }
  3439. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  3440. _exception_info(),
  3441. HeapRoot,
  3442. FALSE)) {
  3443. //
  3444. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  3445. //
  3446. ASSERT_UNEXPECTED_CODE_PATH ();
  3447. }
  3448. EXIT:
  3449. //
  3450. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  3451. //
  3452. RtlpDphPostProcessing (HeapRoot);
  3453. FAST_FILL_HEAP:
  3454. if (NtHeap) {
  3455. Success = RtlpDphNormalHeapSetUserFlags (HeapRoot,
  3456. NtHeap,
  3457. Flags,
  3458. Address,
  3459. UserFlagsReset,
  3460. UserFlagsSet);
  3461. }
  3462. return Success;
  3463. }
  3464. BOOLEAN
  3465. RtlpDebugPageHeapSerialize(
  3466. IN PVOID HeapHandle
  3467. )
  3468. {
  3469. PDPH_HEAP_ROOT HeapRoot;
  3470. HeapRoot = RtlpDphPointerFromHandle( HeapHandle );
  3471. if ( HeapRoot == NULL )
  3472. return FALSE;
  3473. //
  3474. // Get the heap lock, unprotect heap structures, etc.
  3475. //
  3476. RtlpDphPreProcessing (HeapRoot, 0);
  3477. HeapRoot->HeapFlags &= ~HEAP_NO_SERIALIZE;
  3478. //
  3479. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  3480. //
  3481. RtlpDphPostProcessing (HeapRoot);
  3482. return TRUE;
  3483. }
  3484. NTSTATUS
  3485. RtlpDebugPageHeapExtend(
  3486. IN PVOID HeapHandle,
  3487. IN ULONG Flags,
  3488. IN PVOID Base,
  3489. IN SIZE_T Size
  3490. )
  3491. {
  3492. return STATUS_SUCCESS;
  3493. }
  3494. NTSTATUS
  3495. RtlpDebugPageHeapZero(
  3496. IN PVOID HeapHandle,
  3497. IN ULONG Flags
  3498. )
  3499. {
  3500. return STATUS_SUCCESS;
  3501. }
  3502. NTSTATUS
  3503. RtlpDebugPageHeapReset(
  3504. IN PVOID HeapHandle,
  3505. IN ULONG Flags
  3506. )
  3507. {
  3508. return STATUS_SUCCESS;
  3509. }
  3510. NTSTATUS
  3511. RtlpDebugPageHeapUsage(
  3512. IN PVOID HeapHandle,
  3513. IN ULONG Flags,
  3514. IN OUT PRTL_HEAP_USAGE Usage
  3515. )
  3516. {
  3517. PDPH_HEAP_ROOT HeapRoot;
  3518. //
  3519. // Partial implementation since this information is kind of meaningless.
  3520. //
  3521. HeapRoot = RtlpDphPointerFromHandle( HeapHandle );
  3522. if ( HeapRoot == NULL )
  3523. return STATUS_INVALID_PARAMETER;
  3524. if ( Usage->Length != sizeof( RTL_HEAP_USAGE ))
  3525. return STATUS_INFO_LENGTH_MISMATCH;
  3526. memset( Usage, 0, sizeof( RTL_HEAP_USAGE ));
  3527. Usage->Length = sizeof( RTL_HEAP_USAGE );
  3528. //
  3529. // Get the heap lock, unprotect heap structures, etc.
  3530. //
  3531. RtlpDphPreProcessing (HeapRoot, Flags);
  3532. try {
  3533. Usage->BytesAllocated = HeapRoot->nBusyAllocationBytesAccessible;
  3534. Usage->BytesCommitted = HeapRoot->nVirtualStorageBytes;
  3535. Usage->BytesReserved = HeapRoot->nVirtualStorageBytes;
  3536. Usage->BytesReservedMaximum = HeapRoot->nVirtualStorageBytes;
  3537. }
  3538. except (RtlpDphUnexpectedExceptionFilter (_exception_code(),
  3539. _exception_info(),
  3540. HeapRoot,
  3541. FALSE)) {
  3542. //
  3543. // The exception filter always returns EXCEPTION_CONTINUE_SEARCH.
  3544. //
  3545. ASSERT_UNEXPECTED_CODE_PATH ();
  3546. }
  3547. //
  3548. // Prepare page heap for exit (unlock heap lock, protect structures, etc.).
  3549. //
  3550. RtlpDphPostProcessing (HeapRoot);
  3551. return STATUS_SUCCESS;
  3552. }
  3553. BOOLEAN
  3554. RtlpDebugPageHeapIsLocked(
  3555. IN PVOID HeapHandle
  3556. )
  3557. {
  3558. PDPH_HEAP_ROOT HeapRoot;
  3559. HeapRoot = RtlpDphPointerFromHandle( HeapHandle );
  3560. if ( HeapRoot == NULL )
  3561. return FALSE;
  3562. if ( RtlTryEnterCriticalSection( HeapRoot->HeapCritSect )) {
  3563. RtlLeaveCriticalSection( HeapRoot->HeapCritSect );
  3564. return FALSE;
  3565. }
  3566. else {
  3567. return TRUE;
  3568. }
  3569. }
  3570. /////////////////////////////////////////////////////////////////////
  3571. /////////////////////////// Page heap vs. normal heap decision making
  3572. /////////////////////////////////////////////////////////////////////
  3573. //
  3574. // 0 - full page heap
  3575. // 1 - light page heap
  3576. //
  3577. LONG RtlpDphBlockDistribution[2];
  3578. BOOLEAN
  3579. RtlpDphShouldAllocateInPageHeap (
  3580. PDPH_HEAP_ROOT HeapRoot,
  3581. SIZE_T Size
  3582. )
  3583. /*++
  3584. Routine Description:
  3585. This routine decides if the current allocation should be made in full
  3586. page heap or light page heap.
  3587. Parameters:
  3588. HeapRoot - heap descriptor for the current allocation request.
  3589. Size - size of the current allocation request.
  3590. Return Value:
  3591. True if this should be a full page heap allocation and false otherwise.
  3592. --*/
  3593. {
  3594. SYSTEM_PERFORMANCE_INFORMATION PerfInfo;
  3595. NTSTATUS Status;
  3596. ULONG Random;
  3597. ULONG Percentage;
  3598. //
  3599. // If this is a read-only page heap we go into full page heap.
  3600. //
  3601. if ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_READONLY)) {
  3602. InterlockedIncrement (&(RtlpDphBlockDistribution[0]));
  3603. return TRUE;
  3604. }
  3605. //
  3606. // If page heap is not enabled => normal heap.
  3607. //
  3608. if (! (HeapRoot->ExtraFlags & PAGE_HEAP_ENABLE_PAGE_HEAP)) {
  3609. InterlockedIncrement (&(RtlpDphBlockDistribution[1]));
  3610. return FALSE;
  3611. }
  3612. //
  3613. // If call not generated from one of the target dlls => normal heap
  3614. // We do this check up front to avoid the slow path where we check
  3615. // if VM limits have been hit.
  3616. //
  3617. else if ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_DLL_NAMES)) {
  3618. //
  3619. // We return false. The calls generated from target
  3620. // dlls will never get into this function and therefore
  3621. // we just return false signalling that we do not want
  3622. // page heap verification for the rest of the world.
  3623. //
  3624. InterlockedIncrement (&(RtlpDphBlockDistribution[1]));
  3625. return FALSE;
  3626. }
  3627. //
  3628. // Check memory availability. If we tend to exhaust virtual space
  3629. // or page file then we will go to the normal heap.
  3630. //
  3631. else if (RtlpDphVmLimitCanUsePageHeap() == FALSE) {
  3632. InterlockedIncrement (&(RtlpDphBlockDistribution[1]));
  3633. return FALSE;
  3634. }
  3635. //
  3636. // If in size range => page heap
  3637. //
  3638. else if ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_SIZE_RANGE)) {
  3639. if (Size >= RtlpDphSizeRangeStart && Size <= RtlpDphSizeRangeEnd) {
  3640. InterlockedIncrement (&(RtlpDphBlockDistribution[0]));
  3641. return TRUE;
  3642. }
  3643. else {
  3644. InterlockedIncrement (&(RtlpDphBlockDistribution[1]));
  3645. return FALSE;
  3646. }
  3647. }
  3648. //
  3649. // If in dll range => page heap
  3650. //
  3651. else if ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_DLL_RANGE)) {
  3652. PVOID StackTrace[32];
  3653. ULONG Count;
  3654. ULONG Index;
  3655. ULONG Hash;
  3656. Count = RtlCaptureStackBackTrace (
  3657. 1,
  3658. 32,
  3659. StackTrace,
  3660. &Hash);
  3661. //
  3662. // (SilviuC): should read DllRange as PVOIDs
  3663. //
  3664. for (Index = 0; Index < Count; Index += 1) {
  3665. if (PtrToUlong(StackTrace[Index]) >= RtlpDphDllRangeStart
  3666. && PtrToUlong(StackTrace[Index]) <= RtlpDphDllRangeEnd) {
  3667. InterlockedIncrement (&(RtlpDphBlockDistribution[0]));
  3668. return TRUE;
  3669. }
  3670. }
  3671. InterlockedIncrement (&(RtlpDphBlockDistribution[1]));
  3672. return FALSE;
  3673. }
  3674. //
  3675. // If randomly decided => page heap
  3676. //
  3677. else if ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_RANDOM_DECISION)) {
  3678. Random = RtlRandom (& (HeapRoot->Seed));
  3679. if ((Random % 100) < RtlpDphRandomProbability) {
  3680. InterlockedIncrement (&(RtlpDphBlockDistribution[0]));
  3681. return TRUE;
  3682. }
  3683. else {
  3684. InterlockedIncrement (&(RtlpDphBlockDistribution[1]));
  3685. return FALSE;
  3686. }
  3687. }
  3688. //
  3689. // For all other cases we will allocate in the page heap.
  3690. //
  3691. else {
  3692. InterlockedIncrement (&(RtlpDphBlockDistribution[0]));
  3693. return TRUE;
  3694. }
  3695. }
  3696. //
  3697. // Vm limit related globals.
  3698. //
  3699. LONG RtlpDphVmLimitNoPageHeap;
  3700. LONG RtlpDphVmLimitHits[2];
  3701. #define SIZE_1_MB 0x100000
  3702. BOOLEAN
  3703. RtlpDphVmLimitCanUsePageHeap (
  3704. )
  3705. /*++
  3706. Routine Description:
  3707. This routine decides if we have good conditions for a full page heap
  3708. allocation to be successful. It checks two things: the pagefile commit
  3709. available on the system and the virtual space available in the current
  3710. process. Since full page heap uses at least 2 pages for each allocation
  3711. it can potentially exhaust both these resources. The current criteria are:
  3712. (1) if less than 32Mb of pagefile commit are left we switch to light
  3713. page heap
  3714. (2) if less than 128Mb of empty virtual space is left we switch to light
  3715. page heap
  3716. Parameters:
  3717. None.
  3718. Return Value:
  3719. True if full page heap allocations are allowed and false otherwise.
  3720. --*/
  3721. {
  3722. union {
  3723. SYSTEM_PERFORMANCE_INFORMATION PerfInfo;
  3724. SYSTEM_BASIC_INFORMATION MemInfo;
  3725. VM_COUNTERS VmCounters;
  3726. } u;
  3727. NTSTATUS Status;
  3728. LONG Value;
  3729. ULONGLONG Total;
  3730. SYSINF_PAGE_COUNT CommitLimit;
  3731. SYSINF_PAGE_COUNT CommittedPages;
  3732. ULONG_PTR MinimumUserModeAddress;
  3733. ULONG_PTR MaximumUserModeAddress;
  3734. ULONG PageSize;
  3735. SIZE_T VirtualSize;
  3736. SIZE_T PagefileUsage;
  3737. //
  3738. // Find if full page heap is currently allowed.
  3739. //
  3740. Value = InterlockedCompareExchange (&RtlpDphVmLimitNoPageHeap,
  3741. 0,
  3742. 0);
  3743. //
  3744. // Query system for page file availability etc.
  3745. //
  3746. Status = NtQuerySystemInformation (SystemPerformanceInformation,
  3747. &(u.PerfInfo),
  3748. sizeof(u.PerfInfo),
  3749. NULL);
  3750. if (!NT_SUCCESS(Status)) {
  3751. return FALSE;
  3752. }
  3753. CommitLimit = u.PerfInfo.CommitLimit;
  3754. CommittedPages = u.PerfInfo.CommittedPages;
  3755. //
  3756. // General memory information.
  3757. //
  3758. // SilviuC: This is read-only stuff that should be done only once
  3759. // during process startup.
  3760. //
  3761. Status = NtQuerySystemInformation (SystemBasicInformation,
  3762. &(u.MemInfo),
  3763. sizeof(u.MemInfo),
  3764. NULL);
  3765. if (!NT_SUCCESS(Status)) {
  3766. return FALSE;
  3767. }
  3768. MinimumUserModeAddress = u.MemInfo.MinimumUserModeAddress;
  3769. MaximumUserModeAddress = u.MemInfo.MaximumUserModeAddress;
  3770. PageSize = u.MemInfo.PageSize;
  3771. //
  3772. // Process memory counters.
  3773. //
  3774. Status = NtQueryInformationProcess (NtCurrentProcess(),
  3775. ProcessVmCounters,
  3776. &(u.VmCounters),
  3777. sizeof(u.VmCounters),
  3778. NULL);
  3779. if (!NT_SUCCESS(Status)) {
  3780. return FALSE;
  3781. }
  3782. VirtualSize = u.VmCounters.VirtualSize;
  3783. PagefileUsage = u.VmCounters.PagefileUsage;
  3784. //
  3785. // First check that we have enough virtual space left in the process.
  3786. // If less than 128Mb are left we will disable full page heap allocs.
  3787. //
  3788. Total = (MaximumUserModeAddress - MinimumUserModeAddress);
  3789. if (Total - VirtualSize < 128 * SIZE_1_MB) {
  3790. if (Value == 0) {
  3791. if (DEBUG_OPTION (DBG_SHOW_VM_LIMITS)) {
  3792. DbgPrintEx (DPFLTR_VERIFIER_ID,
  3793. DPFLTR_INFO_LEVEL,
  3794. "Page heap: pid 0x%X: vm limit: vspace: disabling full page heap \n",
  3795. PROCESS_ID());
  3796. }
  3797. }
  3798. InterlockedIncrement (&(RtlpDphVmLimitHits[0]));
  3799. InterlockedExchange (&RtlpDphVmLimitNoPageHeap, 1);
  3800. return FALSE;
  3801. }
  3802. //
  3803. // Next check for page file availability. If less than 32Mb are
  3804. // available for commit we disable full page heap. Note that
  3805. // CommitLimit does not reflect future pagefile extension potential.
  3806. // Therefore pageheap will scale down even if the pagefile has not
  3807. // been extended to its maximum.
  3808. //
  3809. Total = CommitLimit - CommittedPages;
  3810. Total *= PageSize;
  3811. if (Total - PagefileUsage < 32 * SIZE_1_MB) {
  3812. if (Value == 0) {
  3813. if (DEBUG_OPTION (DBG_SHOW_VM_LIMITS)) {
  3814. DbgPrintEx (DPFLTR_VERIFIER_ID,
  3815. DPFLTR_INFO_LEVEL,
  3816. "Page heap: pid 0x%X: vm limit: pfile: disabling full page heap \n",
  3817. PROCESS_ID());
  3818. }
  3819. }
  3820. InterlockedIncrement (&(RtlpDphVmLimitHits[1]));
  3821. InterlockedExchange (&RtlpDphVmLimitNoPageHeap, 1);
  3822. return FALSE;
  3823. }
  3824. if (Value == 1) {
  3825. if (DEBUG_OPTION (DBG_SHOW_VM_LIMITS)) {
  3826. DbgPrintEx (DPFLTR_VERIFIER_ID,
  3827. DPFLTR_INFO_LEVEL,
  3828. "Page heap: pid 0x%X: vm limit: reenabling full page heap \n",
  3829. PROCESS_ID());
  3830. }
  3831. InterlockedExchange (&RtlpDphVmLimitNoPageHeap, 0);
  3832. }
  3833. return TRUE;
  3834. }
  3835. /////////////////////////////////////////////////////////////////////
  3836. //////////////////////////////////// DPH_BLOCK_INFORMATION management
  3837. /////////////////////////////////////////////////////////////////////
  3838. VOID
  3839. RtlpDphReportCorruptedBlock (
  3840. PVOID Heap,
  3841. ULONG Context,
  3842. PVOID Block,
  3843. ULONG Reason
  3844. )
  3845. {
  3846. SIZE_T Size;
  3847. DPH_BLOCK_INFORMATION Info;
  3848. BOOLEAN InfoRead = FALSE;
  3849. BOOLEAN SizeRead = FALSE;
  3850. try {
  3851. RtlCopyMemory (&Info, (PDPH_BLOCK_INFORMATION)Block - 1, sizeof Info);
  3852. InfoRead = TRUE;
  3853. }
  3854. except (EXCEPTION_EXECUTE_HANDLER) {
  3855. }
  3856. if (RtlpDphGetBlockSizeFromCorruptedBlock (Block, &Size)) {
  3857. SizeRead = TRUE;
  3858. }
  3859. //
  3860. // If we did not even manage to read the entire block header
  3861. // report exception. If we managed to read the header we will let it
  3862. // run through the other messages and only in the end report exception.
  3863. //
  3864. if (!InfoRead && (Reason & DPH_ERROR_RAISED_EXCEPTION)) {
  3865. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3866. "exception raised while verifying block header",
  3867. Heap, "Heap handle",
  3868. Block, "Heap block",
  3869. (SizeRead ? Size : 0), "Block size",
  3870. 0, "");
  3871. }
  3872. if ((Reason & DPH_ERROR_DOUBLE_FREE)) {
  3873. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3874. "block already freed",
  3875. Heap, "Heap handle",
  3876. Block, "Heap block",
  3877. (SizeRead ? Size : 0), "Block size",
  3878. 0, "");
  3879. }
  3880. if ((Reason & DPH_ERROR_CORRUPTED_INFIX_PATTERN)) {
  3881. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3882. "corrupted infix pattern for freed block",
  3883. Heap, "Heap handle",
  3884. Block, "Heap block",
  3885. (SizeRead ? Size : 0), "Block size",
  3886. 0, "");
  3887. }
  3888. if ((Reason & DPH_ERROR_CORRUPTED_HEAP_POINTER)) {
  3889. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3890. "corrupted heap pointer or using wrong heap",
  3891. Heap, "Heap used in the call",
  3892. Block, "Heap block",
  3893. (SizeRead ? Size : 0), "Block size",
  3894. (InfoRead ? (UNSCRAMBLE_POINTER(Info.Heap)) : 0), "Heap owning the block");
  3895. }
  3896. if ((Reason & DPH_ERROR_CORRUPTED_SUFFIX_PATTERN)) {
  3897. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3898. "corrupted suffix pattern",
  3899. Heap, "Heap handle",
  3900. Block, "Heap block",
  3901. (SizeRead ? Size : 0), "Block size",
  3902. 0, "");
  3903. }
  3904. if ((Reason & DPH_ERROR_CORRUPTED_PREFIX_PATTERN)) {
  3905. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3906. "corrupted prefix pattern",
  3907. Heap, "Heap handle",
  3908. Block, "Heap block",
  3909. (SizeRead ? Size : 0), "Block size",
  3910. 0, "");
  3911. }
  3912. if ((Reason & DPH_ERROR_CORRUPTED_START_STAMP)) {
  3913. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3914. "corrupted start stamp",
  3915. Heap, "Heap handle",
  3916. Block, "Heap block",
  3917. (SizeRead ? Size : 0), "Block size",
  3918. (InfoRead ? Info.StartStamp : 0), "Corrupted stamp");
  3919. }
  3920. if ((Reason & DPH_ERROR_CORRUPTED_END_STAMP)) {
  3921. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3922. "corrupted end stamp",
  3923. Heap, "Heap handle",
  3924. Block, "Heap block",
  3925. (SizeRead ? Size : 0), "Block size",
  3926. (InfoRead ? Info.EndStamp : 0), "Corrupted stamp");
  3927. }
  3928. if ((Reason & DPH_ERROR_RAISED_EXCEPTION)) {
  3929. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3930. "exception raised while verifying block",
  3931. Heap, "Heap handle",
  3932. Block, "Heap block",
  3933. (SizeRead ? Size : 0), "Block size",
  3934. 0, "");
  3935. }
  3936. //
  3937. // Catch all case.
  3938. //
  3939. VERIFIER_STOP (APPLICATION_VERIFIER_CORRUPTED_HEAP_BLOCK,
  3940. "corrupted heap block",
  3941. Heap, "Heap handle",
  3942. Block, "Heap block",
  3943. (SizeRead ? Size : 0), "Block size",
  3944. 0, "");
  3945. }
  3946. BOOLEAN
  3947. RtlpDphIsPageHeapBlock (
  3948. PDPH_HEAP_ROOT Heap,
  3949. PVOID Block,
  3950. PULONG Reason,
  3951. BOOLEAN CheckPattern
  3952. )
  3953. {
  3954. PDPH_BLOCK_INFORMATION Info;
  3955. BOOLEAN Corrupted = FALSE;
  3956. PUCHAR Current;
  3957. PUCHAR FillStart;
  3958. PUCHAR FillEnd;
  3959. ASSERT (Reason != NULL);
  3960. *Reason = 0;
  3961. try {
  3962. Info = (PDPH_BLOCK_INFORMATION)Block - 1;
  3963. //
  3964. // Start checking ...
  3965. //
  3966. if (Info->StartStamp != DPH_PAGE_BLOCK_START_STAMP_ALLOCATED) {
  3967. *Reason |= DPH_ERROR_CORRUPTED_START_STAMP;
  3968. Corrupted = TRUE;
  3969. if (Info->StartStamp == DPH_PAGE_BLOCK_START_STAMP_FREE) {
  3970. *Reason |= DPH_ERROR_DOUBLE_FREE;
  3971. }
  3972. }
  3973. if (Info->EndStamp != DPH_PAGE_BLOCK_END_STAMP_ALLOCATED) {
  3974. *Reason |= DPH_ERROR_CORRUPTED_END_STAMP;
  3975. Corrupted = TRUE;
  3976. }
  3977. if (Info->Heap != Heap) {
  3978. *Reason |= DPH_ERROR_CORRUPTED_HEAP_POINTER;
  3979. Corrupted = TRUE;
  3980. }
  3981. //
  3982. // Check the block suffix byte pattern.
  3983. //
  3984. if (CheckPattern) {
  3985. FillStart = (PUCHAR)Block + Info->RequestedSize;
  3986. FillEnd = (PUCHAR)ROUNDUP2((ULONG_PTR)FillStart, PAGE_SIZE);
  3987. for (Current = FillStart; Current < FillEnd; Current++) {
  3988. if (*Current != DPH_PAGE_BLOCK_SUFFIX) {
  3989. *Reason |= DPH_ERROR_CORRUPTED_SUFFIX_PATTERN;
  3990. Corrupted = TRUE;
  3991. break;
  3992. }
  3993. }
  3994. }
  3995. }
  3996. except (EXCEPTION_EXECUTE_HANDLER) {
  3997. *Reason |= DPH_ERROR_RAISED_EXCEPTION;
  3998. Corrupted = TRUE;
  3999. }
  4000. if (Corrupted) {
  4001. return FALSE;
  4002. }
  4003. else {
  4004. return TRUE;
  4005. }
  4006. }
  4007. BOOLEAN
  4008. RtlpDphIsNormalHeapBlock (
  4009. PDPH_HEAP_ROOT Heap,
  4010. PVOID Block,
  4011. PULONG Reason,
  4012. BOOLEAN CheckPattern
  4013. )
  4014. {
  4015. PDPH_BLOCK_INFORMATION Info;
  4016. BOOLEAN Corrupted = FALSE;
  4017. PUCHAR Current;
  4018. PUCHAR FillStart;
  4019. PUCHAR FillEnd;
  4020. ASSERT (Reason != NULL);
  4021. *Reason = 0;
  4022. Info = (PDPH_BLOCK_INFORMATION)Block - 1;
  4023. try {
  4024. if (UNSCRAMBLE_POINTER(Info->Heap) != Heap) {
  4025. *Reason |= DPH_ERROR_CORRUPTED_HEAP_POINTER;
  4026. Corrupted = TRUE;
  4027. }
  4028. if (Info->StartStamp != DPH_NORMAL_BLOCK_START_STAMP_ALLOCATED) {
  4029. *Reason |= DPH_ERROR_CORRUPTED_START_STAMP;
  4030. Corrupted = TRUE;
  4031. if (Info->StartStamp == DPH_NORMAL_BLOCK_START_STAMP_FREE) {
  4032. *Reason |= DPH_ERROR_DOUBLE_FREE;
  4033. }
  4034. }
  4035. if (Info->EndStamp != DPH_NORMAL_BLOCK_END_STAMP_ALLOCATED) {
  4036. *Reason |= DPH_ERROR_CORRUPTED_END_STAMP;
  4037. Corrupted = TRUE;
  4038. }
  4039. //
  4040. // Check the block suffix byte pattern.
  4041. //
  4042. if (CheckPattern) {
  4043. FillStart = (PUCHAR)Block + Info->RequestedSize;
  4044. FillEnd = FillStart + USER_ALIGNMENT;
  4045. for (Current = FillStart; Current < FillEnd; Current++) {
  4046. if (*Current != DPH_NORMAL_BLOCK_SUFFIX) {
  4047. *Reason |= DPH_ERROR_CORRUPTED_SUFFIX_PATTERN;
  4048. Corrupted = TRUE;
  4049. break;
  4050. }
  4051. }
  4052. }
  4053. }
  4054. except (EXCEPTION_EXECUTE_HANDLER) {
  4055. *Reason |= DPH_ERROR_RAISED_EXCEPTION;
  4056. Corrupted = TRUE;
  4057. }
  4058. if (Corrupted) {
  4059. return FALSE;
  4060. }
  4061. else {
  4062. return TRUE;
  4063. }
  4064. }
  4065. BOOLEAN
  4066. RtlpDphIsNormalFreeHeapBlock (
  4067. PVOID Block,
  4068. PULONG Reason,
  4069. BOOLEAN CheckPattern
  4070. )
  4071. {
  4072. PDPH_BLOCK_INFORMATION Info;
  4073. BOOLEAN Corrupted = FALSE;
  4074. PUCHAR Current;
  4075. PUCHAR FillStart;
  4076. PUCHAR FillEnd;
  4077. ASSERT (Reason != NULL);
  4078. *Reason = 0;
  4079. Info = (PDPH_BLOCK_INFORMATION)Block - 1;
  4080. try {
  4081. //
  4082. // If heap pointer is null we will just ignore this field.
  4083. // This can happen during heap destroy operations where
  4084. // the page heap got destroyed but the normal heap is still
  4085. // alive.
  4086. //
  4087. if (Info->StartStamp != DPH_NORMAL_BLOCK_START_STAMP_FREE) {
  4088. *Reason |= DPH_ERROR_CORRUPTED_START_STAMP;
  4089. Corrupted = TRUE;
  4090. }
  4091. if (Info->EndStamp != DPH_NORMAL_BLOCK_END_STAMP_FREE) {
  4092. *Reason |= DPH_ERROR_CORRUPTED_END_STAMP;
  4093. Corrupted = TRUE;
  4094. }
  4095. //
  4096. // Check the block suffix byte pattern.
  4097. //
  4098. if (CheckPattern) {
  4099. FillStart = (PUCHAR)Block + Info->RequestedSize;
  4100. FillEnd = FillStart + USER_ALIGNMENT;
  4101. for (Current = FillStart; Current < FillEnd; Current++) {
  4102. if (*Current != DPH_NORMAL_BLOCK_SUFFIX) {
  4103. *Reason |= DPH_ERROR_CORRUPTED_SUFFIX_PATTERN;
  4104. Corrupted = TRUE;
  4105. break;
  4106. }
  4107. }
  4108. }
  4109. //
  4110. // Check the block infix byte pattern.
  4111. //
  4112. if (CheckPattern) {
  4113. FillStart = (PUCHAR)Block;
  4114. FillEnd = FillStart
  4115. + ((Info->RequestedSize > USER_ALIGNMENT) ? USER_ALIGNMENT : Info->RequestedSize);
  4116. for (Current = FillStart; Current < FillEnd; Current++) {
  4117. if (*Current != DPH_FREE_BLOCK_INFIX) {
  4118. *Reason |= DPH_ERROR_CORRUPTED_INFIX_PATTERN;
  4119. Corrupted = TRUE;
  4120. break;
  4121. }
  4122. }
  4123. }
  4124. }
  4125. except (EXCEPTION_EXECUTE_HANDLER) {
  4126. *Reason |= DPH_ERROR_RAISED_EXCEPTION;
  4127. Corrupted = TRUE;
  4128. }
  4129. if (Corrupted) {
  4130. return FALSE;
  4131. }
  4132. else {
  4133. return TRUE;
  4134. }
  4135. }
  4136. BOOLEAN
  4137. RtlpDphWritePageHeapBlockInformation (
  4138. PDPH_HEAP_ROOT Heap,
  4139. ULONG HeapFlags,
  4140. PVOID Block,
  4141. SIZE_T RequestedSize,
  4142. SIZE_T ActualSize
  4143. )
  4144. {
  4145. PDPH_BLOCK_INFORMATION Info;
  4146. PUCHAR FillStart;
  4147. PUCHAR FillEnd;
  4148. ULONG Hash;
  4149. //
  4150. // Size and stamp information
  4151. //
  4152. Info = (PDPH_BLOCK_INFORMATION)Block - 1;
  4153. Info->Heap = Heap;
  4154. Info->RequestedSize = RequestedSize;
  4155. Info->ActualSize = ActualSize;
  4156. Info->StartStamp = DPH_PAGE_BLOCK_START_STAMP_ALLOCATED;
  4157. Info->EndStamp = DPH_PAGE_BLOCK_END_STAMP_ALLOCATED;
  4158. //
  4159. // Fill the block suffix pattern.
  4160. // We fill up to USER_ALIGNMENT bytes.
  4161. //
  4162. FillStart = (PUCHAR)Block + RequestedSize;
  4163. FillEnd = (PUCHAR)ROUNDUP2((ULONG_PTR)FillStart, PAGE_SIZE);
  4164. RtlFillMemory (FillStart, FillEnd - FillStart, DPH_PAGE_BLOCK_SUFFIX);
  4165. //
  4166. // Call the old logging function (SteveWo's trace database).
  4167. // We do this so that tools that are used for leak detection
  4168. // (e.g. umdh) will work even if page heap is enabled.
  4169. // If the trace database was not created this function will
  4170. // return immediately.
  4171. //
  4172. if ((HeapFlags & PAGE_HEAP_NO_UMDH_SUPPORT)) {
  4173. Info->TraceIndex = 0;
  4174. }
  4175. else {
  4176. Info->TraceIndex = RtlLogStackBackTrace ();
  4177. }
  4178. //
  4179. // Capture stack trace
  4180. //
  4181. if ((HeapFlags & PAGE_HEAP_COLLECT_STACK_TRACES)) {
  4182. Info->StackTrace = RtlpGetStackTraceAddress (Info->TraceIndex);
  4183. }
  4184. else {
  4185. Info->StackTrace = NULL;
  4186. }
  4187. return TRUE;
  4188. }
  4189. BOOLEAN
  4190. RtlpDphWriteNormalHeapBlockInformation (
  4191. PDPH_HEAP_ROOT Heap,
  4192. PVOID Block,
  4193. SIZE_T RequestedSize,
  4194. SIZE_T ActualSize
  4195. )
  4196. {
  4197. PDPH_BLOCK_INFORMATION Info;
  4198. PUCHAR FillStart;
  4199. PUCHAR FillEnd;
  4200. ULONG Hash;
  4201. ULONG Reason;
  4202. Info = (PDPH_BLOCK_INFORMATION)Block - 1;
  4203. //
  4204. // Size and stamp information
  4205. //
  4206. Info->Heap = SCRAMBLE_POINTER(Heap);
  4207. Info->RequestedSize = RequestedSize;
  4208. Info->ActualSize = ActualSize;
  4209. Info->StartStamp = DPH_NORMAL_BLOCK_START_STAMP_ALLOCATED;
  4210. Info->EndStamp = DPH_NORMAL_BLOCK_END_STAMP_ALLOCATED;
  4211. Info->FreeQueue.Blink = NULL;
  4212. Info->FreeQueue.Flink = NULL;
  4213. //
  4214. // Fill the block suffix pattern.
  4215. // We fill only USER_ALIGNMENT bytes.
  4216. //
  4217. FillStart = (PUCHAR)Block + RequestedSize;
  4218. FillEnd = FillStart + USER_ALIGNMENT;
  4219. RtlFillMemory (FillStart, FillEnd - FillStart, DPH_NORMAL_BLOCK_SUFFIX);
  4220. //
  4221. // Call the old logging function (SteveWo's trace database).
  4222. // We do this so that tools that are used for leak detection
  4223. // (e.g. umdh) will work even if page heap is enabled.
  4224. // If the trace database was not created this function will
  4225. // return immediately.
  4226. //
  4227. if ((Heap->ExtraFlags & PAGE_HEAP_NO_UMDH_SUPPORT)) {
  4228. Info->TraceIndex = 0;
  4229. }
  4230. else {
  4231. Info->TraceIndex = RtlLogStackBackTrace ();
  4232. }
  4233. //
  4234. // Capture stack trace
  4235. //
  4236. Info->StackTrace = RtlpGetStackTraceAddress (Info->TraceIndex);
  4237. return TRUE;
  4238. }
  4239. BOOLEAN
  4240. RtlpDphGetBlockSizeFromCorruptedBlock (
  4241. PVOID Block,
  4242. PSIZE_T Size
  4243. )
  4244. //
  4245. // This function gets called from RtlpDphReportCorruptedBlock only.
  4246. // It tries to extract a size for the block when an error is reported.
  4247. // If it cannot get the size it will return false.
  4248. //
  4249. {
  4250. PDPH_BLOCK_INFORMATION Info;
  4251. BOOLEAN Success = FALSE;
  4252. Info = (PDPH_BLOCK_INFORMATION)Block - 1;
  4253. try {
  4254. if (Info->StartStamp == DPH_NORMAL_BLOCK_START_STAMP_FREE
  4255. || Info->StartStamp == DPH_NORMAL_BLOCK_START_STAMP_ALLOCATED
  4256. || Info->StartStamp == DPH_PAGE_BLOCK_START_STAMP_FREE
  4257. || Info->StartStamp == DPH_NORMAL_BLOCK_START_STAMP_ALLOCATED) {
  4258. *Size = Info->RequestedSize;
  4259. Success = TRUE;
  4260. }
  4261. else {
  4262. Success = FALSE;
  4263. }
  4264. }
  4265. except (EXCEPTION_EXECUTE_HANDLER) {
  4266. Success = FALSE;
  4267. }
  4268. return Success;
  4269. }
  4270. /////////////////////////////////////////////////////////////////////
  4271. /////////////////////////////// Normal heap allocation/free functions
  4272. /////////////////////////////////////////////////////////////////////
  4273. PVOID
  4274. RtlpDphNormalHeapAllocate (
  4275. PDPH_HEAP_ROOT Heap,
  4276. PVOID NtHeap,
  4277. ULONG Flags,
  4278. SIZE_T Size
  4279. )
  4280. {
  4281. PVOID Block;
  4282. PDPH_BLOCK_INFORMATION Info;
  4283. ULONG Hash;
  4284. SIZE_T ActualSize;
  4285. SIZE_T RequestedSize;
  4286. ULONG Reason;
  4287. RequestedSize = Size;
  4288. ActualSize = Size + sizeof(DPH_BLOCK_INFORMATION) + USER_ALIGNMENT;
  4289. //
  4290. // We need to reset the NO_SERIALIZE flag because a free operation can be
  4291. // active in another thread due to free delayed cache trimming. If the
  4292. // allocation operation will raise an exception (e.g. OUT_OF_MEMORY) we are
  4293. // safe to let it go here. It will be caught by the exception handler
  4294. // established in the main page heap entry (RtlpDebugPageHeapAlloc).
  4295. //
  4296. Block = RtlAllocateHeap (NtHeap,
  4297. Flags & (~HEAP_NO_SERIALIZE),
  4298. ActualSize);
  4299. if (Block == NULL) {
  4300. //
  4301. // If we have memory pressure we might want
  4302. // to trim the delayed free queues. We do not do this
  4303. // right now because the threshold is kind of small and there
  4304. // are many benefits in keeping this cache around.
  4305. //
  4306. return NULL;
  4307. }
  4308. RtlpDphWriteNormalHeapBlockInformation (Heap,
  4309. (PDPH_BLOCK_INFORMATION)Block + 1,
  4310. RequestedSize,
  4311. ActualSize);
  4312. if (! (Flags & HEAP_ZERO_MEMORY)) {
  4313. RtlFillMemory ((PDPH_BLOCK_INFORMATION)Block + 1,
  4314. RequestedSize,
  4315. DPH_NORMAL_BLOCK_INFIX);
  4316. }
  4317. return (PVOID)((PDPH_BLOCK_INFORMATION)Block + 1);
  4318. }
  4319. BOOLEAN
  4320. RtlpDphNormalHeapFree (
  4321. PDPH_HEAP_ROOT Heap,
  4322. PVOID NtHeap,
  4323. ULONG Flags,
  4324. PVOID Block
  4325. )
  4326. {
  4327. PDPH_BLOCK_INFORMATION Info;
  4328. ULONG Reason;
  4329. ULONG Hash;
  4330. SIZE_T TrimSize;
  4331. Info = (PDPH_BLOCK_INFORMATION)Block - 1;
  4332. if (! RtlpDphIsNormalHeapBlock(Heap, Block, &Reason, TRUE)) {
  4333. RtlpDphReportCorruptedBlock (Heap,
  4334. DPH_CONTEXT_NORMAL_PAGE_HEAP_FREE,
  4335. Block,
  4336. Reason);
  4337. return FALSE;
  4338. }
  4339. //
  4340. // Save the free stack trace.
  4341. //
  4342. Info->StackTrace = RtlpDphLogStackTrace (3);
  4343. //
  4344. // Mark the block as freed.
  4345. //
  4346. Info->StartStamp -= 1;
  4347. Info->EndStamp -= 1;
  4348. //
  4349. // Wipe out all the information in the block so that it cannot
  4350. // be used while free. The pattern looks like a kernel pointer
  4351. // and if we are lucky enough the buggy code might use a value
  4352. // from the block as a pointer and instantly access violate.
  4353. //
  4354. RtlFillMemory (Info + 1,
  4355. Info->RequestedSize,
  4356. DPH_FREE_BLOCK_INFIX);
  4357. //
  4358. // Add block to the delayed free queue.
  4359. //
  4360. RtlpDphAddToDelayedFreeQueue (Info);
  4361. return TRUE;
  4362. }
  4363. PVOID
  4364. RtlpDphNormalHeapReAllocate (
  4365. PDPH_HEAP_ROOT Heap,
  4366. PVOID NtHeap,
  4367. ULONG Flags,
  4368. PVOID OldBlock,
  4369. SIZE_T Size
  4370. )
  4371. {
  4372. PVOID Block;
  4373. PDPH_BLOCK_INFORMATION Info;
  4374. ULONG Hash;
  4375. SIZE_T CopySize;
  4376. ULONG Reason;
  4377. Info = (PDPH_BLOCK_INFORMATION)OldBlock - 1;
  4378. if (! RtlpDphIsNormalHeapBlock(Heap, OldBlock, &Reason, TRUE)) {
  4379. RtlpDphReportCorruptedBlock (Heap,
  4380. DPH_CONTEXT_NORMAL_PAGE_HEAP_REALLOC,
  4381. OldBlock,
  4382. Reason);
  4383. return NULL;
  4384. }
  4385. //
  4386. // Deal separately with the case where request is made with
  4387. // HEAP_REALLOC_IN_PLACE_ONLY flag and the new size is smaller than
  4388. // the old size. For these cases we will just resize the block.
  4389. // If the flag is used and the size is bigger we will fail always
  4390. // the call.
  4391. //
  4392. if ((Flags & HEAP_REALLOC_IN_PLACE_ONLY)) {
  4393. if (Info->RequestedSize < Size) {
  4394. BUMP_COUNTER (CNT_REALLOC_IN_PLACE_BIGGER);
  4395. return NULL;
  4396. }
  4397. else {
  4398. PUCHAR FillStart;
  4399. PUCHAR FillEnd;
  4400. Info->RequestedSize = Size;
  4401. FillStart = (PUCHAR)OldBlock + Info->RequestedSize;
  4402. FillEnd = FillStart + USER_ALIGNMENT;
  4403. RtlFillMemory (FillStart, FillEnd - FillStart, DPH_NORMAL_BLOCK_SUFFIX);
  4404. BUMP_COUNTER (CNT_REALLOC_IN_PLACE_SMALLER);
  4405. return OldBlock;
  4406. }
  4407. }
  4408. Block = RtlpDphNormalHeapAllocate (Heap,
  4409. NtHeap,
  4410. Flags,
  4411. Size);
  4412. if (Block == NULL) {
  4413. return NULL;
  4414. }
  4415. //
  4416. // Copy old block stuff into the new block and then
  4417. // free old block.
  4418. //
  4419. if (Size < Info->RequestedSize) {
  4420. CopySize = Size;
  4421. }
  4422. else {
  4423. CopySize = Info->RequestedSize;
  4424. }
  4425. RtlCopyMemory (Block, OldBlock, CopySize);
  4426. //
  4427. // Free the old guy.
  4428. //
  4429. RtlpDphNormalHeapFree (Heap,
  4430. NtHeap,
  4431. Flags,
  4432. OldBlock);
  4433. return Block;
  4434. }
  4435. SIZE_T
  4436. RtlpDphNormalHeapSize (
  4437. PDPH_HEAP_ROOT Heap,
  4438. PVOID NtHeap,
  4439. ULONG Flags,
  4440. PVOID Block
  4441. )
  4442. {
  4443. PDPH_BLOCK_INFORMATION Info;
  4444. SIZE_T Result;
  4445. ULONG Reason;
  4446. Info = (PDPH_BLOCK_INFORMATION)Block - 1;
  4447. if (! RtlpDphIsNormalHeapBlock(Heap, Block, &Reason, FALSE)) {
  4448. //
  4449. // We cannot stop here for a wrong block.
  4450. // The users might use this function to validate
  4451. // if a block belongs to the heap or not. However
  4452. // they should use HeapValidate for that.
  4453. //
  4454. #if DBG
  4455. DbgPrintEx (DPFLTR_VERIFIER_ID,
  4456. DPFLTR_WARNING_LEVEL,
  4457. "Page heap: warning: HeapSize called with "
  4458. "invalid block @ %p (reason %0X) \n",
  4459. Block,
  4460. Reason);
  4461. #endif
  4462. return (SIZE_T)-1;
  4463. }
  4464. Result = RtlSizeHeap (NtHeap,
  4465. Flags,
  4466. Info);
  4467. if (Result == (SIZE_T)-1) {
  4468. return Result;
  4469. }
  4470. else {
  4471. return Result - sizeof(*Info) - USER_ALIGNMENT;
  4472. }
  4473. }
  4474. BOOLEAN
  4475. RtlpDphNormalHeapSetUserFlags(
  4476. IN PDPH_HEAP_ROOT Heap,
  4477. PVOID NtHeap,
  4478. IN ULONG Flags,
  4479. IN PVOID Address,
  4480. IN ULONG UserFlagsReset,
  4481. IN ULONG UserFlagsSet
  4482. )
  4483. {
  4484. BOOLEAN Success;
  4485. ULONG Reason;
  4486. if (! RtlpDphIsNormalHeapBlock(Heap, Address, &Reason, FALSE)) {
  4487. RtlpDphReportCorruptedBlock (Heap,
  4488. DPH_CONTEXT_NORMAL_PAGE_HEAP_SETFLAGS,
  4489. Address,
  4490. Reason);
  4491. return FALSE;
  4492. }
  4493. Success = RtlSetUserFlagsHeap (NtHeap,
  4494. Flags,
  4495. (PDPH_BLOCK_INFORMATION)Address - 1,
  4496. UserFlagsReset,
  4497. UserFlagsSet);
  4498. return Success;
  4499. }
  4500. BOOLEAN
  4501. RtlpDphNormalHeapSetUserValue(
  4502. IN PDPH_HEAP_ROOT Heap,
  4503. PVOID NtHeap,
  4504. IN ULONG Flags,
  4505. IN PVOID Address,
  4506. IN PVOID UserValue
  4507. )
  4508. {
  4509. BOOLEAN Success;
  4510. ULONG Reason;
  4511. if (! RtlpDphIsNormalHeapBlock(Heap, Address, &Reason, FALSE)) {
  4512. RtlpDphReportCorruptedBlock (Heap,
  4513. DPH_CONTEXT_NORMAL_PAGE_HEAP_SETVALUE,
  4514. Address,
  4515. Reason);
  4516. return FALSE;
  4517. }
  4518. Success = RtlSetUserValueHeap (NtHeap,
  4519. Flags,
  4520. (PDPH_BLOCK_INFORMATION)Address - 1,
  4521. UserValue);
  4522. return Success;
  4523. }
  4524. BOOLEAN
  4525. RtlpDphNormalHeapGetUserInfo(
  4526. IN PDPH_HEAP_ROOT Heap,
  4527. PVOID NtHeap,
  4528. IN ULONG Flags,
  4529. IN PVOID Address,
  4530. OUT PVOID* UserValue,
  4531. OUT PULONG UserFlags
  4532. )
  4533. {
  4534. BOOLEAN Success;
  4535. ULONG Reason;
  4536. if (! RtlpDphIsNormalHeapBlock(Heap, Address, &Reason, FALSE)) {
  4537. //
  4538. // We do not complain about the block because this API gets called by GlobalFlags and
  4539. // it is documented as accepting bogus pointers.
  4540. //
  4541. #if 0
  4542. RtlpDphReportCorruptedBlock (Heap,
  4543. DPH_CONTEXT_NORMAL_PAGE_HEAP_GETINFO,
  4544. Address,
  4545. Reason);
  4546. #endif
  4547. return FALSE;
  4548. }
  4549. Success = RtlGetUserInfoHeap (NtHeap,
  4550. Flags,
  4551. (PDPH_BLOCK_INFORMATION)Address - 1,
  4552. UserValue,
  4553. UserFlags);
  4554. return Success;
  4555. }
  4556. BOOLEAN
  4557. RtlpDphNormalHeapValidate(
  4558. IN PDPH_HEAP_ROOT Heap,
  4559. PVOID NtHeap,
  4560. IN ULONG Flags,
  4561. IN PVOID Address
  4562. )
  4563. {
  4564. BOOLEAN Success;
  4565. ULONG Reason;
  4566. if (Address == NULL) {
  4567. //
  4568. // Validation for the whole heap.
  4569. //
  4570. Success = RtlValidateHeap (NtHeap,
  4571. Flags,
  4572. Address);
  4573. }
  4574. else {
  4575. //
  4576. // Validation for a heap block.
  4577. //
  4578. if (! RtlpDphIsNormalHeapBlock(Heap, Address, &Reason, TRUE)) {
  4579. //
  4580. // We cannot break in this case because the function might indeed
  4581. // be called with invalid block. On checked builds we print a
  4582. // warning just in case the invalid block was not intended.
  4583. //
  4584. #if DBG
  4585. DbgPrintEx (DPFLTR_VERIFIER_ID,
  4586. DPFLTR_WARNING_LEVEL,
  4587. "Page heap: warning: validate called with "
  4588. "invalid block @ %p (reason %0X) \n",
  4589. Address, Reason);
  4590. #endif
  4591. return FALSE;
  4592. }
  4593. Success = RtlValidateHeap (NtHeap,
  4594. Flags,
  4595. (PDPH_BLOCK_INFORMATION)Address - 1);
  4596. }
  4597. return Success;
  4598. }
  4599. /////////////////////////////////////////////////////////////////////
  4600. ////////////////////////////////// Delayed free queue for normal heap
  4601. /////////////////////////////////////////////////////////////////////
  4602. RTL_CRITICAL_SECTION RtlpDphDelayedFreeQueueLock;
  4603. SIZE_T RtlpDphMemoryUsedByDelayedFreeBlocks;
  4604. SIZE_T RtlpDphNumberOfDelayedFreeBlocks;
  4605. LIST_ENTRY RtlpDphDelayedFreeQueue;
  4606. SLIST_HEADER RtlpDphDelayedTemporaryPushList;
  4607. LONG RtlpDphDelayedTemporaryPushCount;
  4608. LONG RtlpDphDelayedQueueTrims;
  4609. NTSTATUS
  4610. RtlpDphInitializeDelayedFreeQueue (
  4611. VOID
  4612. )
  4613. {
  4614. NTSTATUS Status;
  4615. Status = RtlInitializeCriticalSection (&RtlpDphDelayedFreeQueueLock);
  4616. if (! NT_SUCCESS(Status)) {
  4617. BUMP_COUNTER (CNT_INITIALIZE_CS_FAILURES);
  4618. return Status;
  4619. }
  4620. else {
  4621. InitializeListHead (&RtlpDphDelayedFreeQueue);
  4622. RtlInitializeSListHead (&RtlpDphDelayedTemporaryPushList);
  4623. RtlpDphMemoryUsedByDelayedFreeBlocks = 0;
  4624. RtlpDphNumberOfDelayedFreeBlocks = 0;
  4625. return Status;
  4626. }
  4627. }
  4628. VOID
  4629. RtlpDphAddToDelayedFreeQueue (
  4630. PDPH_BLOCK_INFORMATION Info
  4631. )
  4632. /*++
  4633. Routine Description:
  4634. This routines adds a block to the dealyed free queue and then if
  4635. the queue exceeded a high watermark it gets trimmed and the blocks
  4636. remove get freed into NT heap.
  4637. Arguments:
  4638. Info: pointer to a block to be "freed".
  4639. Return Value:
  4640. None.
  4641. Environment:
  4642. Called from RtlpDphNormalFree (normal heap management) routines.
  4643. --*/
  4644. {
  4645. BOOLEAN LockAcquired;
  4646. volatile PSLIST_ENTRY Current;
  4647. PSLIST_ENTRY Next;
  4648. PDPH_BLOCK_INFORMATION Block;
  4649. SIZE_T TrimSize;
  4650. SIZE_T Trimmed;
  4651. PLIST_ENTRY ListEntry;
  4652. ULONG Reason;
  4653. RtlEnterCriticalSection (&RtlpDphDelayedFreeQueueLock);
  4654. #if 0
  4655. LockAcquired = RtlTryEnterCriticalSection (&RtlpDphDelayedFreeQueueLock);
  4656. //
  4657. // If we do not manage to get the delayed queue lock we avoid waiting
  4658. // by quickly pushing the block into a lock-free push list. The first
  4659. // thread that manages to get the lock will flush the list.
  4660. //
  4661. if (LockAcquired == FALSE) {
  4662. InterlockedIncrement (&RtlpDphDelayedTemporaryPushCount);
  4663. RtlInterlockedPushEntrySList (&RtlpDphDelayedTemporaryPushList,
  4664. &Info->FreePushList);
  4665. return;
  4666. }
  4667. //
  4668. // We managed to get the lock. First we empty the lock-free push list
  4669. // into the delayed free queue.
  4670. //
  4671. // Note. `Current' variable is declared volatile because this is the
  4672. // only reference to the blocks in temporary push list and if it is
  4673. // kept in a register `!heap -l' (garbage collection leak detection)
  4674. // will report false positives.
  4675. //
  4676. Current = RtlInterlockedFlushSList (&RtlpDphDelayedTemporaryPushList);
  4677. while (Current != NULL) {
  4678. Next = Current->Next;
  4679. Block = CONTAINING_RECORD (Current,
  4680. DPH_BLOCK_INFORMATION,
  4681. FreePushList);
  4682. InsertTailList (&RtlpDphDelayedFreeQueue,
  4683. &Block->FreeQueue);
  4684. RtlpDphMemoryUsedByDelayedFreeBlocks += Block->ActualSize;
  4685. RtlpDphNumberOfDelayedFreeBlocks += 1;
  4686. Current = Next;
  4687. }
  4688. #endif // #if 0
  4689. //
  4690. // Add the current block to the queue too.
  4691. //
  4692. InsertTailList (&(RtlpDphDelayedFreeQueue),
  4693. &(Info->FreeQueue));
  4694. RtlpDphMemoryUsedByDelayedFreeBlocks += Info->ActualSize;
  4695. RtlpDphNumberOfDelayedFreeBlocks += 1;
  4696. //
  4697. // Check if we need to trim the queue. If we have to do it we will
  4698. // remove the blocks from the queue and free them one by one.
  4699. //
  4700. // NOTE. We cannot remove the blocks and push them into a local list and
  4701. // then free them after releasing the queue lock because the heap to which
  4702. // a block belongs may get destroyed. The synchronization between these frees
  4703. // and a heap destroy operation is assured by the fact that heap destroy tries
  4704. // to acquire the queue lock first and therefore there cannot be blocks to be
  4705. // freed to a destroyed heap.
  4706. //
  4707. if (RtlpDphMemoryUsedByDelayedFreeBlocks > RtlpDphDelayedFreeCacheSize) {
  4708. //
  4709. // We add 64Kb to the amount to trim in order to avoid a
  4710. // chainsaw effect where we end up trimming each time this function is called.
  4711. // A trim will shave at least 64Kb of stuff so that next few calls will not need
  4712. // to go through the trimming process.
  4713. //
  4714. TrimSize = RtlpDphMemoryUsedByDelayedFreeBlocks - RtlpDphDelayedFreeCacheSize + 0x10000;
  4715. InterlockedIncrement (&RtlpDphDelayedQueueTrims);
  4716. }
  4717. else {
  4718. TrimSize = 0;
  4719. }
  4720. for (Trimmed = 0; Trimmed < TrimSize; /* nothing */) {
  4721. if (IsListEmpty(&RtlpDphDelayedFreeQueue)) {
  4722. break;
  4723. }
  4724. ListEntry = RemoveHeadList (&RtlpDphDelayedFreeQueue);
  4725. Block = CONTAINING_RECORD (ListEntry,
  4726. DPH_BLOCK_INFORMATION,
  4727. FreeQueue);
  4728. //
  4729. // Check out the block.
  4730. //
  4731. if (! RtlpDphIsNormalFreeHeapBlock(Block + 1, &Reason, TRUE)) {
  4732. RtlpDphReportCorruptedBlock (NULL,
  4733. DPH_CONTEXT_DELAYED_FREE,
  4734. Block + 1,
  4735. Reason);
  4736. }
  4737. Block->StartStamp -= 1;
  4738. Block->EndStamp -= 1;
  4739. RtlpDphMemoryUsedByDelayedFreeBlocks -= Block->ActualSize;
  4740. RtlpDphNumberOfDelayedFreeBlocks -= 1;
  4741. Trimmed += Block->ActualSize;
  4742. //
  4743. // We call into NT heap to really free the block. Note that we
  4744. // cannot use the original flags used for free because this free operation
  4745. // may happen in another thread. Plus we do not want unsynchronized access
  4746. // anyway.
  4747. //
  4748. RtlFreeHeap (((PDPH_HEAP_ROOT)(UNSCRAMBLE_POINTER(Block->Heap)))->NormalHeap,
  4749. 0,
  4750. Block);
  4751. }
  4752. //
  4753. // Release the delayed queue lock.
  4754. //
  4755. RtlLeaveCriticalSection (&RtlpDphDelayedFreeQueueLock);
  4756. }
  4757. // SilviuC: temporary debugging variable
  4758. PVOID RtlpDphPreviousBlock;
  4759. VOID
  4760. RtlpDphFreeDelayedBlocksFromHeap (
  4761. PVOID PageHeap,
  4762. PVOID NormalHeap
  4763. )
  4764. /*++
  4765. Routine Description:
  4766. This routine removes all blocks belonging to this heap (heap that is
  4767. just about to be destroyed), checks them for fill patterns and then
  4768. frees them into the heap.
  4769. Arguments:
  4770. PageHeap: page heap that will be destroyed and whose blocks need to be removed.
  4771. NormalHeap: normal heap associated with PageHeap.
  4772. Return Value:
  4773. None.
  4774. Environment:
  4775. Called from RtlpDebugPageHeapDestroy routine.
  4776. --*/
  4777. {
  4778. ULONG Reason;
  4779. PDPH_BLOCK_INFORMATION Block;
  4780. PLIST_ENTRY Current;
  4781. PLIST_ENTRY Next;
  4782. volatile PSLIST_ENTRY SingleCurrent;
  4783. PSLIST_ENTRY SingleNext;
  4784. LIST_ENTRY BlocksToFree;
  4785. SIZE_T TrimSize;
  4786. SIZE_T Trimmed;
  4787. PLIST_ENTRY ListEntry;
  4788. //
  4789. // It is critical here to acquire the queue lock because this will synchronize
  4790. // work with other threads that might have delayed blocks belonging to this heap
  4791. // just about to be freed. Whoever gets the lock first will flush all these blocks
  4792. // and we will never free into a destroyed heap.
  4793. //
  4794. RtlEnterCriticalSection (&RtlpDphDelayedFreeQueueLock);
  4795. //
  4796. // We managed to get the lock. First we empty the lock-free push list
  4797. // into the delayed free queue.
  4798. //
  4799. // Note. `Current' variable is declared volatile because this is the
  4800. // only reference to the blocks in temporary push list and if it is
  4801. // kept in a register `!heap -l' (garbage collection leak detection)
  4802. // will report false positives.
  4803. //
  4804. SingleCurrent = RtlInterlockedFlushSList (&RtlpDphDelayedTemporaryPushList);
  4805. while (SingleCurrent != NULL) {
  4806. SingleNext = SingleCurrent->Next;
  4807. Block = CONTAINING_RECORD (SingleCurrent,
  4808. DPH_BLOCK_INFORMATION,
  4809. FreePushList);
  4810. InsertTailList (&RtlpDphDelayedFreeQueue,
  4811. &Block->FreeQueue);
  4812. RtlpDphMemoryUsedByDelayedFreeBlocks += Block->ActualSize;
  4813. RtlpDphNumberOfDelayedFreeBlocks += 1;
  4814. SingleCurrent = SingleNext;
  4815. }
  4816. //
  4817. // Trim the queue if there is accumulation of blocks. This step is very important
  4818. // for processes in which HeapDestroy() is a very frequent operation because
  4819. // trimming of the queue is normally done during HeapFree() but this happens
  4820. // only if the lock protecting the queue is available (uses tryenter). So for such
  4821. // cases if we do not do the trimming here the queue will grow without boundaries.
  4822. //
  4823. if (RtlpDphMemoryUsedByDelayedFreeBlocks > RtlpDphDelayedFreeCacheSize) {
  4824. //
  4825. // We add 64Kb to the amount to trim in order to avoid a chainsaw effect.
  4826. //
  4827. TrimSize = RtlpDphMemoryUsedByDelayedFreeBlocks - RtlpDphDelayedFreeCacheSize + 0x10000;
  4828. InterlockedIncrement (&RtlpDphDelayedQueueTrims);
  4829. }
  4830. else {
  4831. TrimSize = 0;
  4832. }
  4833. for (Trimmed = 0; Trimmed < TrimSize; /* nothing */) {
  4834. if (IsListEmpty(&RtlpDphDelayedFreeQueue)) {
  4835. break;
  4836. }
  4837. ListEntry = RemoveHeadList (&RtlpDphDelayedFreeQueue);
  4838. Block = CONTAINING_RECORD (ListEntry,
  4839. DPH_BLOCK_INFORMATION,
  4840. FreeQueue);
  4841. //
  4842. // Check out the block.
  4843. //
  4844. if (! RtlpDphIsNormalFreeHeapBlock(Block + 1, &Reason, TRUE)) {
  4845. RtlpDphReportCorruptedBlock (NULL,
  4846. DPH_CONTEXT_DELAYED_FREE,
  4847. Block + 1,
  4848. Reason);
  4849. }
  4850. Block->StartStamp -= 1;
  4851. Block->EndStamp -= 1;
  4852. RtlpDphMemoryUsedByDelayedFreeBlocks -= Block->ActualSize;
  4853. RtlpDphNumberOfDelayedFreeBlocks -= 1;
  4854. Trimmed += Block->ActualSize;
  4855. //
  4856. // We call into NT heap to really free the block. Note that we
  4857. // cannot use the original flags used for free because this free operation
  4858. // may happen in another thread. Plus we do not want unsynchronized access
  4859. // anyway.
  4860. //
  4861. RtlFreeHeap (((PDPH_HEAP_ROOT)(UNSCRAMBLE_POINTER(Block->Heap)))->NormalHeap,
  4862. 0,
  4863. Block);
  4864. }
  4865. //
  4866. // Traverse the entire queue and free all blocks that belong to this heap.
  4867. //
  4868. InitializeListHead (&BlocksToFree);
  4869. RtlpDphPreviousBlock = NULL;
  4870. for (Current = RtlpDphDelayedFreeQueue.Flink;
  4871. Current != &RtlpDphDelayedFreeQueue;
  4872. RtlpDphPreviousBlock = Current, Current = Next) {
  4873. Next = Current->Flink;
  4874. Block = CONTAINING_RECORD (Current,
  4875. DPH_BLOCK_INFORMATION,
  4876. FreeQueue);
  4877. if (UNSCRAMBLE_POINTER(Block->Heap) != PageHeap) {
  4878. continue;
  4879. }
  4880. //
  4881. // We need to delete this block. We will remove it from the queue and
  4882. // add it to a temporary local list that will be used to free the blocks
  4883. // later out of locks.
  4884. //
  4885. RemoveEntryList (Current);
  4886. RtlpDphMemoryUsedByDelayedFreeBlocks -= Block->ActualSize;
  4887. RtlpDphNumberOfDelayedFreeBlocks -= 1;
  4888. InsertHeadList (&BlocksToFree,
  4889. &Block->FreeQueue);
  4890. }
  4891. //
  4892. // We can release the global queue lock now.
  4893. //
  4894. RtlLeaveCriticalSection (&RtlpDphDelayedFreeQueueLock);
  4895. //
  4896. // Free all blocks left in the delayed queue belonging to the current
  4897. // heap being destroyed.
  4898. //
  4899. for (Current = BlocksToFree.Flink;
  4900. Current != &BlocksToFree;
  4901. RtlpDphPreviousBlock = Current, Current = Next) {
  4902. Next = Current->Flink;
  4903. Block = CONTAINING_RECORD (Current,
  4904. DPH_BLOCK_INFORMATION,
  4905. FreeQueue);
  4906. //
  4907. // Remove the block fromt he temporary list.
  4908. //
  4909. RemoveEntryList (Current);
  4910. //
  4911. // Prevent probing of this field during RtlpDphIsNormalFreeBlock.
  4912. //
  4913. Block->Heap = 0;
  4914. //
  4915. // Check if the block about to be freed was touched.
  4916. //
  4917. if (! RtlpDphIsNormalFreeHeapBlock(Block + 1, &Reason, TRUE)) {
  4918. RtlpDphReportCorruptedBlock (PageHeap,
  4919. DPH_CONTEXT_DELAYED_DESTROY,
  4920. Block + 1,
  4921. Reason);
  4922. }
  4923. Block->StartStamp -= 1;
  4924. Block->EndStamp -= 1;
  4925. //
  4926. // We call into NT heap to really free the block. Note that we
  4927. // cannot use the original flags used for free because this free operation
  4928. // may happen in another thread. Plus we do not want unsynchronized access
  4929. // anyway.
  4930. //
  4931. RtlFreeHeap (NormalHeap,
  4932. 0,
  4933. Block);
  4934. }
  4935. }
  4936. /////////////////////////////////////////////////////////////////////
  4937. /////////////////////////////////////////////// Stack trace detection
  4938. /////////////////////////////////////////////////////////////////////
  4939. #pragma optimize("y", off) // disable FPO
  4940. PVOID
  4941. RtlpDphLogStackTrace (
  4942. ULONG FramesToSkip
  4943. )
  4944. {
  4945. USHORT TraceIndex;
  4946. TraceIndex = RtlpLogStackBackTraceEx (FramesToSkip + 1);
  4947. return RtlpGetStackTraceAddress (TraceIndex);
  4948. }
  4949. /////////////////////////////////////////////////////////////////////
  4950. /////////////////////////////////////////////////// Target dlls logic
  4951. /////////////////////////////////////////////////////////////////////
  4952. RTL_CRITICAL_SECTION RtlpDphTargetDllsLock;
  4953. LIST_ENTRY RtlpDphTargetDllsList;
  4954. BOOLEAN RtlpDphTargetDllsInitialized;
  4955. typedef struct _DPH_TARGET_DLL {
  4956. LIST_ENTRY List;
  4957. UNICODE_STRING Name;
  4958. PVOID StartAddress;
  4959. PVOID EndAddress;
  4960. } DPH_TARGET_DLL, * PDPH_TARGET_DLL;
  4961. NTSTATUS
  4962. RtlpDphTargetDllsLogicInitialize (
  4963. VOID
  4964. )
  4965. {
  4966. NTSTATUS Status;
  4967. Status = RtlInitializeCriticalSection (&RtlpDphTargetDllsLock);
  4968. if (! NT_SUCCESS(Status)) {
  4969. BUMP_COUNTER (CNT_INITIALIZE_CS_FAILURES);
  4970. return Status;
  4971. }
  4972. else {
  4973. InitializeListHead (&RtlpDphTargetDllsList);
  4974. RtlpDphTargetDllsInitialized = TRUE;
  4975. return Status;
  4976. }
  4977. }
  4978. VOID
  4979. RtlpDphTargetDllsLoadCallBack (
  4980. PUNICODE_STRING Name,
  4981. PVOID Address,
  4982. ULONG Size
  4983. )
  4984. //
  4985. // This function is not called right now but it will get called
  4986. // from \base\ntdll\ldrapi.c whenever a dll gets loaded. This
  4987. // gives page heap the opportunity to update per dll data structures
  4988. // that are not used right now for anything.
  4989. //
  4990. {
  4991. PDPH_TARGET_DLL Descriptor;
  4992. //
  4993. // Get out if we are in some weird condition.
  4994. //
  4995. if (! RtlpDphTargetDllsInitialized) {
  4996. return;
  4997. }
  4998. if (! RtlpDphIsDllTargeted (Name->Buffer)) {
  4999. return;
  5000. }
  5001. Descriptor = RtlAllocateHeap (RtlProcessHeap(), 0, sizeof *Descriptor);
  5002. if (Descriptor == NULL) {
  5003. return;
  5004. }
  5005. if (! RtlCreateUnicodeString (&(Descriptor->Name), Name->Buffer)) {
  5006. RtlFreeHeap (RtlProcessHeap(), 0, Descriptor);
  5007. return;
  5008. }
  5009. Descriptor->StartAddress = Address;
  5010. Descriptor->EndAddress = (PUCHAR)Address + Size;
  5011. RtlEnterCriticalSection (&RtlpDphTargetDllsLock);
  5012. InsertTailList (&(RtlpDphTargetDllsList), &(Descriptor->List));
  5013. RtlLeaveCriticalSection (&RtlpDphTargetDllsLock);
  5014. //
  5015. // Print a message if a target dll has been identified.
  5016. //
  5017. DbgPrintEx (DPFLTR_VERIFIER_ID,
  5018. DPFLTR_INFO_LEVEL,
  5019. "Page heap: loaded target dll %ws [%p - %p]\n",
  5020. Descriptor->Name.Buffer,
  5021. Descriptor->StartAddress,
  5022. Descriptor->EndAddress);
  5023. }
  5024. const WCHAR *
  5025. RtlpDphIsDllTargeted (
  5026. const WCHAR * Name
  5027. )
  5028. {
  5029. const WCHAR * All;
  5030. ULONG I, J;
  5031. All = RtlpDphTargetDllsUnicode.Buffer;
  5032. for (I = 0; All[I]; I += 1) {
  5033. for (J = 0; All[I+J] && Name[J]; J += 1) {
  5034. if (RtlUpcaseUnicodeChar(All[I+J]) != RtlUpcaseUnicodeChar(Name[J])) {
  5035. break;
  5036. }
  5037. }
  5038. if (Name[J]) {
  5039. continue;
  5040. }
  5041. else {
  5042. // we got to the end of string
  5043. return &(All[I]);
  5044. }
  5045. }
  5046. return NULL;
  5047. }
  5048. /////////////////////////////////////////////////////////////////////
  5049. /////////////////////////////////////////////// Fault injection logic
  5050. /////////////////////////////////////////////////////////////////////
  5051. BOOLEAN RtlpDphFaultSeedInitialized;
  5052. BOOLEAN RtlpDphFaultProcessEnoughStarted;
  5053. ULONG RtlpDphFaultInjectionDisabled;
  5054. ULONG RtlpDphFaultSeed;
  5055. ULONG RtlpDphFaultSuccessRate;
  5056. ULONG RtlpDphFaultFailureRate;
  5057. #define NO_OF_FAULT_STACKS 128
  5058. PVOID RtlpDphFaultStacks [NO_OF_FAULT_STACKS];
  5059. ULONG RtlpDphFaultStacksIndex;
  5060. #define ENOUGH_TIME ((DWORDLONG)(5 * 1000 * 1000 * 10)) // 5 secs
  5061. LARGE_INTEGER RtlpDphFaultStartTime;
  5062. LARGE_INTEGER RtlpDphFaultCurrentTime;
  5063. BOOLEAN
  5064. RtlpDphShouldFaultInject (
  5065. VOID
  5066. )
  5067. {
  5068. ULONG Index;
  5069. DWORDLONG Delta;
  5070. if (RtlpDphFaultProbability == 0) {
  5071. return FALSE;
  5072. }
  5073. if (RtlpDphDisableFaults != 0) {
  5074. return FALSE;
  5075. }
  5076. //
  5077. // Make sure we do not fault inject if at least one guy
  5078. // requested our mercy by calling RtlpDphDisableFaultInjection.
  5079. //
  5080. if (InterlockedExchangeAdd (&RtlpDphFaultInjectionDisabled, 1) > 0) {
  5081. InterlockedDecrement (&RtlpDphFaultInjectionDisabled);
  5082. return FALSE;
  5083. }
  5084. else {
  5085. InterlockedDecrement (&RtlpDphFaultInjectionDisabled);
  5086. }
  5087. //
  5088. // Make sure we do not fault while the process is getting
  5089. // initialized. In principle we should deal with these bugs
  5090. // also but it is not really a priority right now.
  5091. //
  5092. if (RtlpDphFaultProcessEnoughStarted == FALSE) {
  5093. if ((DWORDLONG)(RtlpDphFaultStartTime.QuadPart) == 0) {
  5094. NtQuerySystemTime (&RtlpDphFaultStartTime);
  5095. return FALSE;
  5096. }
  5097. else {
  5098. NtQuerySystemTime (&RtlpDphFaultCurrentTime);
  5099. Delta = (DWORDLONG)(RtlpDphFaultCurrentTime.QuadPart)
  5100. - (DWORDLONG)(RtlpDphFaultStartTime.QuadPart);
  5101. if (Delta < ENOUGH_TIME) {
  5102. return FALSE;
  5103. }
  5104. if (Delta <= ((DWORDLONG)RtlpDphFaultTimeOut * 1000 * 1000 * 10)) {
  5105. return FALSE;
  5106. }
  5107. //
  5108. // The following is not an error message but we want it to be
  5109. // printed for almost all situations. It happens only once per
  5110. // process.
  5111. //
  5112. DbgPrintEx (DPFLTR_VERIFIER_ID,
  5113. DPFLTR_ERROR_LEVEL,
  5114. "Page heap: enabling fault injection for process 0x%X \n",
  5115. PROCESS_ID());
  5116. RtlpDphFaultProcessEnoughStarted = TRUE;
  5117. }
  5118. }
  5119. //
  5120. // Initialize the seed if we need to.
  5121. //
  5122. if (RtlpDphFaultSeedInitialized == FALSE) {
  5123. LARGE_INTEGER PerformanceCounter;
  5124. PerformanceCounter.LowPart = 0xABCDDCBA;
  5125. NtQueryPerformanceCounter (
  5126. &PerformanceCounter,
  5127. NULL);
  5128. RtlpDphFaultSeed = PerformanceCounter.LowPart;
  5129. RtlpDphFaultSeedInitialized = TRUE;
  5130. }
  5131. if ((RtlRandom(&RtlpDphFaultSeed) % 10000) < RtlpDphFaultProbability) {
  5132. Index = InterlockedExchangeAdd (&RtlpDphFaultStacksIndex, 1);
  5133. Index &= (NO_OF_FAULT_STACKS - 1);
  5134. RtlpDphFaultStacks[Index] = RtlpDphLogStackTrace (2);
  5135. RtlpDphFaultFailureRate += 1;
  5136. return TRUE;
  5137. }
  5138. else {
  5139. RtlpDphFaultSuccessRate += 1;
  5140. return FALSE;
  5141. }
  5142. }
  5143. ULONG RtlpDphFaultInjectionDisabled;
  5144. VOID
  5145. RtlpDphDisableFaultInjection (
  5146. )
  5147. {
  5148. InterlockedIncrement (&RtlpDphFaultInjectionDisabled);
  5149. }
  5150. VOID
  5151. RtlpDphEnableFaultInjection (
  5152. )
  5153. {
  5154. InterlockedDecrement (&RtlpDphFaultInjectionDisabled);
  5155. }
  5156. /////////////////////////////////////////////////////////////////////
  5157. /////////////////////////////////////// Internal validation functions
  5158. /////////////////////////////////////////////////////////////////////
  5159. PDPH_HEAP_BLOCK
  5160. RtlpDphSearchBlockInList (
  5161. PDPH_HEAP_BLOCK List,
  5162. PUCHAR Address
  5163. )
  5164. {
  5165. PDPH_HEAP_BLOCK Current;
  5166. for (Current = List; Current; Current = Current->pNextAlloc) {
  5167. if (Current->pVirtualBlock == Address) {
  5168. return Current;
  5169. }
  5170. }
  5171. return NULL;
  5172. }
  5173. PVOID RtlpDphLastValidationStack;
  5174. PVOID RtlpDphCurrentValidationStack;
  5175. VOID
  5176. RtlpDphInternalValidatePageHeap (
  5177. PDPH_HEAP_ROOT Heap,
  5178. PUCHAR ExemptAddress,
  5179. SIZE_T ExemptSize
  5180. )
  5181. {
  5182. PDPH_HEAP_BLOCK Range;
  5183. PDPH_HEAP_BLOCK Node;
  5184. PUCHAR Address;
  5185. BOOLEAN FoundLeak;
  5186. RtlpDphLastValidationStack = RtlpDphCurrentValidationStack;
  5187. RtlpDphCurrentValidationStack = RtlpDphLogStackTrace (0);
  5188. FoundLeak = FALSE;
  5189. for (Range = Heap->pVirtualStorageListHead;
  5190. Range != NULL;
  5191. Range = Range->pNextAlloc) {
  5192. Address = Range->pVirtualBlock;
  5193. while (Address < Range->pVirtualBlock + Range->nVirtualBlockSize) {
  5194. //
  5195. // Ignore DPH_HEAP_ROOT structures.
  5196. //
  5197. if ((Address >= (PUCHAR)Heap - PAGE_SIZE) && (Address < (PUCHAR)Heap + 5 * PAGE_SIZE)) {
  5198. Address += PAGE_SIZE;
  5199. continue;
  5200. }
  5201. //
  5202. // Ignore exempt region (temporarily out of all structures).
  5203. //
  5204. if ((Address >= ExemptAddress) && (Address < ExemptAddress + ExemptSize)) {
  5205. Address += PAGE_SIZE;
  5206. continue;
  5207. }
  5208. Node = RtlpDphSearchBlockInList (Heap->pBusyAllocationListHead, Address);
  5209. if (Node) {
  5210. Address += Node->nVirtualBlockSize;
  5211. continue;
  5212. }
  5213. Node = RtlpDphSearchBlockInList (Heap->pFreeAllocationListHead, Address);
  5214. if (Node) {
  5215. Address += Node->nVirtualBlockSize;
  5216. continue;
  5217. }
  5218. Node = RtlpDphSearchBlockInList (Heap->pAvailableAllocationListHead, Address);
  5219. if (Node) {
  5220. Address += Node->nVirtualBlockSize;
  5221. continue;
  5222. }
  5223. Node = RtlpDphSearchBlockInList (Heap->pNodePoolListHead, Address);
  5224. if (Node) {
  5225. Address += Node->nVirtualBlockSize;
  5226. continue;
  5227. }
  5228. DbgPrintEx (DPFLTR_VERIFIER_ID,
  5229. DPFLTR_ERROR_LEVEL,
  5230. "Block @ %p has been leaked \n",
  5231. Address);
  5232. FoundLeak = TRUE;
  5233. Address += PAGE_SIZE;
  5234. }
  5235. }
  5236. if (FoundLeak) {
  5237. DbgPrintEx (DPFLTR_VERIFIER_ID,
  5238. DPFLTR_ERROR_LEVEL,
  5239. "Page heap: Last stack @ %p, Current stack @ %p \n",
  5240. RtlpDphLastValidationStack,
  5241. RtlpDphCurrentValidationStack);
  5242. DbgBreakPoint ();
  5243. }
  5244. }
  5245. VOID
  5246. RtlpDphValidateInternalLists (
  5247. PDPH_HEAP_ROOT Heap
  5248. )
  5249. /*++
  5250. Routine Description:
  5251. This routine is called to validate the busy and free lists of a page heap
  5252. if /protect bit is enabled. In the wbemstress lab we have seen a corruption
  5253. of the busy list with the start of the busy list pointing towards the end of
  5254. the free list. This is the reason we touch very carefully the nodes that are
  5255. in the busy list.
  5256. --*/
  5257. {
  5258. PDPH_HEAP_BLOCK StartNode;
  5259. PDPH_HEAP_BLOCK EndNode;
  5260. PDPH_HEAP_BLOCK Node;
  5261. ULONG NumberOfBlocks;
  5262. PDPH_BLOCK_INFORMATION Block;
  5263. //
  5264. // Nothing to do if /protect is not enabled.
  5265. //
  5266. if (! (Heap->ExtraFlags & PAGE_HEAP_PROTECT_META_DATA)) {
  5267. return;
  5268. }
  5269. RtlpDphLastValidationStack = RtlpDphCurrentValidationStack;
  5270. RtlpDphCurrentValidationStack = RtlpDphLogStackTrace (0);
  5271. StartNode = Heap->pBusyAllocationListHead;
  5272. EndNode = Heap->pBusyAllocationListTail;
  5273. try {
  5274. //
  5275. // Sanity checks.
  5276. //
  5277. if (Heap->nBusyAllocations == 0) {
  5278. return;
  5279. }
  5280. if (StartNode == NULL || StartNode->pVirtualBlock == NULL) {
  5281. DbgPrintEx (DPFLTR_VERIFIER_ID,
  5282. DPFLTR_ERROR_LEVEL,
  5283. "Page heap: corruption detected: %u: \n", __LINE__);
  5284. DbgBreakPoint ();
  5285. }
  5286. if (EndNode == NULL || EndNode->pVirtualBlock == NULL) {
  5287. DbgPrintEx (DPFLTR_VERIFIER_ID,
  5288. DPFLTR_ERROR_LEVEL,
  5289. "Page heap: corruption detected: %u: \n", __LINE__);
  5290. DbgBreakPoint ();
  5291. }
  5292. //
  5293. // First check if StartNode is also in the free list. This was the typical
  5294. // corruption pattern that I have seen in the past.
  5295. //
  5296. if (RtlpDphSearchBlockInList (Heap->pFreeAllocationListHead, StartNode->pVirtualBlock)) {
  5297. DbgPrintEx (DPFLTR_VERIFIER_ID,
  5298. DPFLTR_ERROR_LEVEL,
  5299. "Page heap: corruption detected: %u: \n", __LINE__);
  5300. DbgBreakPoint ();
  5301. }
  5302. //
  5303. // Make sure that we have in the busy list exactly the number of blocks we think
  5304. // we should have.
  5305. //
  5306. NumberOfBlocks = 0;
  5307. for (Node = StartNode; Node != NULL; Node = Node->pNextAlloc) {
  5308. NumberOfBlocks += 1;
  5309. }
  5310. if (NumberOfBlocks != Heap->nBusyAllocations) {
  5311. DbgPrintEx (DPFLTR_VERIFIER_ID,
  5312. DPFLTR_ERROR_LEVEL,
  5313. "Page heap: corruption detected: %u: \n", __LINE__);
  5314. DbgBreakPoint ();
  5315. }
  5316. //
  5317. // Take all nodes in the busy list and make sure they seem to be allocated, that is
  5318. // they have the required pattern. This is skipped if we have the /backwards option
  5319. // enabled since in this case we do not put magic patterns.
  5320. //
  5321. if (! (Heap->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) {
  5322. for (Node = StartNode; Node != NULL; Node = Node->pNextAlloc) {
  5323. Block = (PDPH_BLOCK_INFORMATION)(Node->pUserAllocation) - 1;
  5324. if (Block->StartStamp != DPH_PAGE_BLOCK_START_STAMP_ALLOCATED) {
  5325. DbgPrintEx (DPFLTR_VERIFIER_ID,
  5326. DPFLTR_ERROR_LEVEL,
  5327. "Page heap: corruption detected: wrong stamp for node %p \n", Node);
  5328. DbgBreakPoint ();
  5329. }
  5330. }
  5331. }
  5332. }
  5333. except (EXCEPTION_EXECUTE_HANDLER) {
  5334. DbgPrintEx (DPFLTR_VERIFIER_ID,
  5335. DPFLTR_ERROR_LEVEL,
  5336. "Page heap: corruption detected: exception raised \n");
  5337. DbgBreakPoint ();
  5338. }
  5339. }
  5340. VOID
  5341. RtlpDphCheckFillPattern (
  5342. PUCHAR Address,
  5343. SIZE_T Size,
  5344. UCHAR Fill
  5345. )
  5346. {
  5347. PUCHAR Current;
  5348. for (Current = Address; Current < Address + Size; Current += 1) {
  5349. if (*Current != Fill) {
  5350. DbgPrintEx (DPFLTR_VERIFIER_ID,
  5351. DPFLTR_ERROR_LEVEL,
  5352. "Page heap: fill check failed @ %p for (%p, %p, %x) \n",
  5353. Current,
  5354. Address,
  5355. Size,
  5356. (ULONG)Fill);
  5357. }
  5358. }
  5359. }
  5360. VOID
  5361. RtlpDphVerifyList(
  5362. IN PDPH_HEAP_BLOCK pListHead,
  5363. IN PDPH_HEAP_BLOCK pListTail,
  5364. IN SIZE_T nExpectedLength,
  5365. IN SIZE_T nExpectedVirtual,
  5366. IN PCCH pListName
  5367. )
  5368. {
  5369. PDPH_HEAP_BLOCK pPrev = NULL;
  5370. PDPH_HEAP_BLOCK pNode = pListHead;
  5371. PDPH_HEAP_BLOCK pTest = pListHead ? pListHead->pNextAlloc : NULL;
  5372. ULONG nNode = 0;
  5373. SIZE_T nSize = 0;
  5374. while (pNode) {
  5375. if (pNode == pTest) {
  5376. DbgPrintEx (DPFLTR_VERIFIER_ID,
  5377. DPFLTR_ERROR_LEVEL,
  5378. "Page heap: Internal %s list is circular\n",
  5379. pListName );
  5380. DbgBreakPoint ();
  5381. return;
  5382. }
  5383. nNode += 1;
  5384. nSize += pNode->nVirtualBlockSize;
  5385. if (pTest) {
  5386. pTest = pTest->pNextAlloc;
  5387. if (pTest) {
  5388. pTest = pTest->pNextAlloc;
  5389. }
  5390. }
  5391. pPrev = pNode;
  5392. pNode = pNode->pNextAlloc;
  5393. }
  5394. if (pPrev != pListTail) {
  5395. DbgPrintEx (DPFLTR_VERIFIER_ID,
  5396. DPFLTR_ERROR_LEVEL,
  5397. "Page heap: Internal %s list has incorrect tail pointer\n",
  5398. pListName );
  5399. DbgBreakPoint ();
  5400. }
  5401. if (( nExpectedLength != 0xFFFFFFFF ) && ( nExpectedLength != nNode )) {
  5402. DbgPrintEx (DPFLTR_VERIFIER_ID,
  5403. DPFLTR_ERROR_LEVEL,
  5404. "Page heap: Internal %s list has incorrect length\n",
  5405. pListName );
  5406. DbgBreakPoint ();
  5407. }
  5408. if (( nExpectedVirtual != 0xFFFFFFFF ) && ( nExpectedVirtual != nSize )) {
  5409. DbgPrintEx (DPFLTR_VERIFIER_ID,
  5410. DPFLTR_ERROR_LEVEL,
  5411. "Page heap: Internal %s list has incorrect virtual size\n",
  5412. pListName );
  5413. DbgBreakPoint ();
  5414. }
  5415. }
  5416. VOID
  5417. RtlpDphVerifyIntegrity(
  5418. IN PDPH_HEAP_ROOT pHeap
  5419. )
  5420. {
  5421. RtlpDphVerifyList(
  5422. pHeap->pVirtualStorageListHead,
  5423. pHeap->pVirtualStorageListTail,
  5424. pHeap->nVirtualStorageRanges,
  5425. pHeap->nVirtualStorageBytes,
  5426. "VIRTUAL"
  5427. );
  5428. RtlpDphVerifyList(
  5429. pHeap->pBusyAllocationListHead,
  5430. pHeap->pBusyAllocationListTail,
  5431. pHeap->nBusyAllocations,
  5432. pHeap->nBusyAllocationBytesCommitted,
  5433. "BUSY"
  5434. );
  5435. RtlpDphVerifyList(
  5436. pHeap->pFreeAllocationListHead,
  5437. pHeap->pFreeAllocationListTail,
  5438. pHeap->nFreeAllocations,
  5439. pHeap->nFreeAllocationBytesCommitted,
  5440. "FREE"
  5441. );
  5442. RtlpDphVerifyList(
  5443. pHeap->pAvailableAllocationListHead,
  5444. pHeap->pAvailableAllocationListTail,
  5445. pHeap->nAvailableAllocations,
  5446. pHeap->nAvailableAllocationBytesCommitted,
  5447. "AVAILABLE"
  5448. );
  5449. RtlpDphVerifyList(
  5450. pHeap->pUnusedNodeListHead,
  5451. pHeap->pUnusedNodeListTail,
  5452. pHeap->nUnusedNodes,
  5453. 0xFFFFFFFF,
  5454. "FREENODE"
  5455. );
  5456. RtlpDphVerifyList(
  5457. pHeap->pNodePoolListHead,
  5458. pHeap->pNodePoolListTail,
  5459. pHeap->nNodePools,
  5460. pHeap->nNodePoolBytes,
  5461. "NODEPOOL"
  5462. );
  5463. }
  5464. PVOID RtlpDphLastCheckTrace [16];
  5465. VOID
  5466. RtlpDphCheckFreeDelayedCache (
  5467. PVOID CheckBlock,
  5468. SIZE_T CheckSize
  5469. )
  5470. {
  5471. ULONG Reason;
  5472. PDPH_BLOCK_INFORMATION Block;
  5473. PLIST_ENTRY Current;
  5474. PLIST_ENTRY Next;
  5475. ULONG Hash;
  5476. if (RtlpDphDelayedFreeQueue.Flink == NULL) {
  5477. return;
  5478. }
  5479. RtlEnterCriticalSection (&RtlpDphDelayedFreeQueueLock);
  5480. for (Current = RtlpDphDelayedFreeQueue.Flink;
  5481. Current != &RtlpDphDelayedFreeQueue;
  5482. Current = Next) {
  5483. Next = Current->Flink;
  5484. if (Current >= (PLIST_ENTRY)CheckBlock &&
  5485. Current < (PLIST_ENTRY)((SIZE_T)CheckBlock + CheckSize)) {
  5486. DbgPrintEx (DPFLTR_VERIFIER_ID,
  5487. DPFLTR_ERROR_LEVEL,
  5488. "Page heap: block %p contains freed block %p \n", CheckBlock, Current);
  5489. DbgBreakPoint ();
  5490. }
  5491. Block = CONTAINING_RECORD (Current, DPH_BLOCK_INFORMATION, FreeQueue);
  5492. Block->Heap = UNSCRAMBLE_POINTER(Block->Heap);
  5493. //
  5494. // Check if the block about to be freed was touched.
  5495. //
  5496. if (! RtlpDphIsNormalFreeHeapBlock(Block + 1, &Reason, FALSE)) {
  5497. RtlpDphReportCorruptedBlock (NULL,
  5498. DPH_CONTEXT_DELAYED_FREE,
  5499. Block + 1,
  5500. Reason);
  5501. }
  5502. //
  5503. // Check busy bit
  5504. //
  5505. if ((((PHEAP_ENTRY)Block - 1)->Flags & HEAP_ENTRY_BUSY) == 0) {
  5506. DbgPrintEx (DPFLTR_VERIFIER_ID,
  5507. DPFLTR_ERROR_LEVEL,
  5508. "Page heap: block %p has busy bit reset \n", Block);
  5509. DbgBreakPoint ();
  5510. }
  5511. Block->Heap = SCRAMBLE_POINTER(Block->Heap);
  5512. }
  5513. RtlZeroMemory (RtlpDphLastCheckTrace,
  5514. sizeof RtlpDphLastCheckTrace);
  5515. RtlCaptureStackBackTrace (0,
  5516. 16,
  5517. RtlpDphLastCheckTrace,
  5518. &Hash);
  5519. RtlLeaveCriticalSection (&RtlpDphDelayedFreeQueueLock);
  5520. }
  5521. #endif // DEBUG_PAGE_HEAP