Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2195 lines
61 KiB

  1. /*** HEAP.C
  2. *
  3. * (C) Copyright Microsoft Corp., 1988-1994
  4. *
  5. * Heap management
  6. *
  7. * If you are having trouble getting errors from this code, you might
  8. * want to try setting one of the following variables to non-zero:
  9. *
  10. * mmfErrorStop - enables stopping whenever there is an
  11. * error returned from a memory manager function
  12. *
  13. * hpfWalk - enables some verification of the entire heap when
  14. * coming into heap functions. Enabling heap walking
  15. * can dramatically slow down heap functions
  16. * but system-wide performance doesn't change too much.
  17. *
  18. * hpfParanoid - enables even more checking during heap walking
  19. * (hpfWalk must also be set) and enables heap walking
  20. * coming in and out of every heap call.
  21. *
  22. * hpfTrashStop - enables stopping in the debugger whenever
  23. * we detect a trashed heap block during hpfWalk
  24. * and it attempts to print the trashed address
  25. *
  26. * Origin: Chicago
  27. *
  28. * Change history:
  29. *
  30. * Date Who Description
  31. * --------- --------- -------------------------------------------------
  32. * ?/91 BrianSm Created
  33. * 3/94 BrianSm Added heaps that can grow beyond initial max size
  34. * 6/94 BrianSm Decommit pages within free heap blocks
  35. */
  36. #ifdef WIN32
  37. #include <EmulateHeap_kernel32.h>
  38. #endif
  39. #pragma hdrstop("kernel32.pch")
  40. #ifndef WIN32
  41. #include <basedef.h>
  42. #include <winerror.h>
  43. #include <vmmsys.h>
  44. #include <mmlocal.h>
  45. #include <sched.h>
  46. #include <thrdsys.h>
  47. #include <schedsys.h>
  48. #include <schedc.h>
  49. #include <heap.h>
  50. #define pthCur pthcbCur
  51. #define hpGetTID() (pthCur->thcb_ThreadId)
  52. char INTERNAL hpfWalk = 1; /* enable some heap walking */
  53. #ifdef HPDEBUG
  54. #define dprintf(x) dprintf##x
  55. #define DebugStop() mmFatalError(0)
  56. #else
  57. #define dprintf(x)
  58. #endif
  59. #define HeapFree(hheap, flags, lpMem) HPFree(hheap, lpMem)
  60. #define HeapSize(hheap, flags, lpMem) HPSize(hheap, lpMem)
  61. #define hpTakeSem(hheap, pblock, flags) hpTakeSem2(hheap, pblock)
  62. #define hpClearSem(hheap, flags) hpClearSem2(hheap)
  63. #else /* WIN32 */
  64. #define pthCur (*pptdbCur)
  65. #define hpGetTID() (pthCur ? (((struct tcb_s *)(pthCur->R0ThreadHandle))->TCB_ThreadId) : 0);
  66. char mmfErrorStop = 1; /* enable stopping for all errors */
  67. char INTERNAL hpfWalk = 0; /* disable heap walking */
  68. #ifdef HPMEASURE
  69. BOOL PRIVATE hpMeasureItem(HHEAP hheap, unsigned uItem);
  70. #endif
  71. #endif /* WIN32 */
  72. #ifdef HPDEBUG
  73. #define hpTrash(s) dprintf((s));dprintf((("\nheap handle=%x\n", hheap)));if (hpfTrashStop) DebugStop()
  74. char INTERNAL hpfParanoid = 0; /* disable very strict walking */
  75. char INTERNAL hpfTrashStop = 1; /* enable stopping for trashed heap */
  76. char INTERNAL hpWalkCount = 0; /* keep count of times hpWalk called*/
  77. #endif
  78. /***LD hpFreeSizes - the block sizes for the different free list heads
  79. */
  80. unsigned long hpFreeSizes[hpFREELISTHEADS] = {32, 128, 512, (ULONG)-1};
  81. #ifndef WIN32
  82. #pragma VMM_PAGEABLE_DATA_SEG
  83. #pragma VxD_VMCREATE_CODE_SEG
  84. #endif
  85. #ifdef DEBUG
  86. /***EP HeapSetFlags - set heap error flags
  87. *
  88. * ENTRY: dwFlags - flags to change
  89. * dwFlagValues - new flag values
  90. *
  91. * EXIT: old values of the flags
  92. * (on RETAIL, this is a stub which returns -1)
  93. */
  94. #define HSF_MMFERRORSTOP 0x00000001
  95. #define HSF_HPFPARANOID 0x00000002
  96. #define HSF_VALIDMASK 0x00000003
  97. DWORD APIENTRY
  98. HeapSetFlags( DWORD dwFlags, DWORD dwFlagValues)
  99. {
  100. DWORD dwOldFlagValues;
  101. dwOldFlagValues = (mmfErrorStop ? HSF_MMFERRORSTOP : 0) |
  102. (hpfParanoid ? HSF_HPFPARANOID : 0);
  103. if( dwFlags & ~HSF_VALIDMASK) {
  104. OutputDebugString( "HeapSetFlags: invalid flags, ignored\n");
  105. return (DWORD)-1; // error
  106. }
  107. if( dwFlags & HSF_MMFERRORSTOP) {
  108. if( dwFlagValues & HSF_MMFERRORSTOP)
  109. mmfErrorStop = 1;
  110. else
  111. mmfErrorStop = 0;
  112. }
  113. if( dwFlags & HSF_HPFPARANOID) {
  114. if( dwFlagValues & HSF_HPFPARANOID) {
  115. hpfTrashStop = 1;
  116. hpfWalk = 1;
  117. hpfParanoid = 1;
  118. } else {
  119. hpfParanoid = 0;
  120. }
  121. }
  122. return dwOldFlagValues;
  123. }
  124. #endif
  125. /***EP HPInit - initialize a memory block as a heap
  126. *
  127. * ENTRY: hheap - heap handle for heap (same as pmem unless HP_INITSEGMENT)
  128. * pmem - pointer to chunk of memory (must be page aligned)
  129. * cbreserve - number of bytes reserved in block (must be PAGESIZE
  130. * multiple)
  131. * flags - HP_NOSERIALIZE: don't serialize heap operations
  132. * (if not, caller MUST serialize)
  133. * HP_EXCEPT: generate exceptions instead of errors
  134. * HP_GROWABLE: heap can grow infinitely beyond cbreserve
  135. * HP_LOCKED: commit pages as fixed to heap
  136. * HP_INITSEGMENT: initialize the block as an growable
  137. * heap segment
  138. * HP_GROWUP: waste last page in heap so heap allocs
  139. * will grow monotonically upwards from base
  140. * EXIT: handle to new heap or 0 if error.
  141. */
  142. HHEAP INTERNAL
  143. HPInit(struct heapinfo_s *hheap,
  144. struct heapinfo_s *pmem,
  145. unsigned long cbreserve,
  146. unsigned long flags)
  147. {
  148. struct freelist_s *pfreelist;
  149. struct freelist_s *pfreelistend;
  150. unsigned *psizes;
  151. struct busyheap_s *pfakebusy;
  152. unsigned cbheader, cbmainfree;
  153. mmAssert(((unsigned)pmem & PAGEMASK) == 0 && cbreserve != 0 &&
  154. (cbreserve & PAGEMASK) == 0, "HPInit: invalid parameter\n");
  155. /*
  156. * Commit enough space at the beginning of the heap to hold a
  157. * heapinfo_s structure and a minimal free list.
  158. */
  159. if (hpCommit((unsigned)pmem / PAGESIZE,
  160. (sizeof(struct heapinfo_s)+sizeof(struct freeheap_s)+PAGEMASK)
  161. / PAGESIZE,
  162. flags) == 0) {
  163. goto error;
  164. }
  165. /*
  166. * This next block of initialization stuff we only have to do if
  167. * we are creating a brand new heap, not just a heap segment.
  168. */
  169. if ((flags & HP_INITSEGMENT) == 0) {
  170. cbheader = sizeof(struct heapinfo_s);
  171. /*
  172. * Fill in the heapinfo_s structure (per-heap information).
  173. */
  174. #ifdef WIN32
  175. pmem->hi_procnext = 0;
  176. #endif
  177. pmem->hi_psegnext = 0;
  178. pmem->hi_signature = HI_SIGNATURE;
  179. pmem->hi_flags = (unsigned char)flags;
  180. #ifdef HPDEBUG
  181. pmem->hi_cbreserve = cbreserve; /* this is also done below, here for sum */
  182. pmem->hi_sum = hpSum(pmem, HI_CDWSUM);
  183. pmem->hi_eip = hpGetAllocator();
  184. pmem->hi_tid = hpGetTID();
  185. pmem->hi_thread = 0;
  186. #endif
  187. /*
  188. * If the caller requested that we serialize access to the heap,
  189. * create a critical section to do that.
  190. */
  191. if ((flags & HP_NOSERIALIZE) == 0) {
  192. hpInitializeCriticalSection(pmem);
  193. }
  194. /*
  195. * Initialize the free list heads.
  196. * In the future we might want to have the user pass in the
  197. * size of the free lists he would like, but for now just copy
  198. * them from the static list hpFreeSizes.
  199. */
  200. pfreelist = pmem->hi_freelist;
  201. pfreelistend = pfreelist + hpFREELISTHEADS;
  202. psizes = hpFreeSizes;
  203. for (; pfreelist < pfreelistend; ++pfreelist, ++psizes) {
  204. pfreelist->fl_cbmax = *psizes;
  205. hpSetFreeSize(&pfreelist->fl_header, 0);
  206. pfreelist->fl_header.fh_flink = &(pfreelist+1)->fl_header;
  207. pfreelist->fl_header.fh_blink = &(pfreelist-1)->fl_header;
  208. #ifdef HPDEBUG
  209. pfreelist->fl_header.fh_signature = FH_SIGNATURE;
  210. pfreelist->fl_header.fh_sum = hpSum(&pfreelist->fl_header, FH_CDWSUM);
  211. #endif
  212. }
  213. /*
  214. * Make the list circular by fusing the start and beginning
  215. */
  216. pmem->hi_freelist[0].fl_header.fh_blink =
  217. &(pmem->hi_freelist[hpFREELISTHEADS - 1].fl_header);
  218. pmem->hi_freelist[hpFREELISTHEADS - 1].fl_header.fh_flink =
  219. &(pmem->hi_freelist[0].fl_header);
  220. #ifdef HPDEBUG
  221. pmem->hi_freelist[0].fl_header.fh_sum =
  222. hpSum(&(pmem->hi_freelist[0].fl_header), FH_CDWSUM);
  223. pmem->hi_freelist[hpFREELISTHEADS - 1].fl_header.fh_sum =
  224. hpSum(&(pmem->hi_freelist[hpFREELISTHEADS - 1].fl_header), FH_CDWSUM);
  225. #endif
  226. } else {
  227. cbheader = sizeof(struct heapseg_s);
  228. }
  229. pmem->hi_cbreserve = cbreserve;
  230. /*
  231. * Put a tiny busy heap header at the very end of the heap
  232. * so we can free the true last block and mark the following
  233. * block as HP_PREVFREE without worrying about falling off the
  234. * end of the heap. Give him a size of 0 so we can also use
  235. * him to terminate heap-walking functions.
  236. * We also might need to commit a page to hold the thing.
  237. */
  238. pfakebusy = (struct busyheap_s *)((unsigned long)pmem + cbreserve) - 1;
  239. if (cbreserve > PAGESIZE) {
  240. if (hpCommit((unsigned)pfakebusy / PAGESIZE,
  241. (sizeof(struct busyheap_s) + PAGEMASK) / PAGESIZE, flags) == 0) {
  242. goto errordecommit;
  243. }
  244. }
  245. hpSetBusySize(pfakebusy, 0);
  246. #ifdef HPDEBUG
  247. pfakebusy->bh_signature = BH_SIGNATURE;
  248. pfakebusy->bh_sum = hpSum(pfakebusy, BH_CDWSUM);
  249. #endif
  250. /*
  251. * Link the interior of the heap into the free list.
  252. * If we create one big free block, the page at the end of the heap will
  253. * be wasted because it will be committed (to hold the end sentinel) but
  254. * it will won't be touched for allocations until every other page in the
  255. * heap has been used. To avoid this, we create two free blocks, one for
  256. * main body of the heap and another block which has most of the last
  257. * page in it. We need to insert the last page first because hpFreeSub
  258. * looks at the following block to see if we need to coalesce.
  259. * The caller can force us to waste the last page by passing in HP_GROWUP.
  260. * It is used by some ring 3 components who would waste tiled selectors
  261. * if we had blocks being allocated from an outlying end page.
  262. */
  263. if ((flags & HP_GROWUP) == 0 && cbreserve > PAGESIZE) {
  264. cbmainfree = cbreserve - cbheader - PAGESIZE + /* size of main block */
  265. sizeof(struct freeheap_s *);
  266. /*
  267. * Put a little busy heap block at the front of the final page
  268. * to keep the final page from getting coalesced into the main free
  269. * block.
  270. */
  271. pfakebusy = (struct busyheap_s *)((char *)pmem + cbmainfree + cbheader);
  272. hpSetBusySize(pfakebusy, sizeof(struct busyheap_s));
  273. #ifdef HPDEBUG
  274. pfakebusy->bh_signature = BH_SIGNATURE;
  275. pfakebusy->bh_sum = hpSum(pfakebusy, BH_CDWSUM);
  276. #endif
  277. /*
  278. * Free the rest of the last page (minus the various little bits
  279. * we have taken out)
  280. */
  281. hpFreeSub(hheap, pfakebusy + 1,
  282. PAGESIZE - /* entire page, less... */
  283. sizeof(struct freeheap_s *) - /* back-pointer to prev free*/
  284. sizeof(struct busyheap_s) - /* anti-coalescing busy block*/
  285. sizeof(struct busyheap_s), /* end sentinel */
  286. 0);
  287. /*
  288. * Otherwise, make the entirety of our heap between the end of the header
  289. * end the end sentinel into a free block.
  290. */
  291. } else {
  292. cbmainfree = cbreserve - sizeof(struct busyheap_s) - cbheader;
  293. }
  294. /*
  295. * Now put the main body of the heap onto the free list
  296. */
  297. hpFreeSub(hheap, (char *)pmem + cbheader, cbmainfree, 0);
  298. #ifdef HPDEBUG
  299. /*
  300. * Verify the heap is ok. Note, a new heap segment will fail the test
  301. * until we hook it up properly in HPAlloc, so skip the check for them.
  302. */
  303. if (hpfParanoid && hheap == pmem) {
  304. hpWalk(hheap);
  305. }
  306. #endif
  307. /*
  308. * Return a pointer to the start of the heap as the heap handle
  309. */
  310. exit:
  311. return(pmem);
  312. errordecommit:
  313. PageDecommit((unsigned)pmem / PAGESIZE,
  314. (sizeof(struct heapinfo_s)+sizeof(struct freeheap_s)+PAGEMASK)
  315. / PAGESIZE,
  316. PC_STATIC);
  317. error:
  318. pmem = 0;
  319. goto exit;
  320. }
  321. #ifndef WIN32
  322. /***EP HPClone - make a duplicate of an existing heap
  323. *
  324. * This routine is used to create a new heap that has heap blocks
  325. * allocated and free in the same places as another heap. However,
  326. * the contents of the blocks will be zero-initialized, rather than
  327. * the same as the other heap.
  328. *
  329. * If this routine fails, it is the responsibility of the caller
  330. * to free up any memory that might have been committed (as well as
  331. * the original reserved object).
  332. *
  333. * ENTRY: hheap - handle to existing heap to duplicate
  334. * pmem - pointer to new memory block to turn into duplicate heap
  335. * (the address must be reserved and not committed)
  336. * EXIT: handle to new heap if success, else 0 if failure
  337. */
  338. HHEAP INTERNAL
  339. HPClone(struct heapinfo_s *hheap, struct heapinfo_s *pmem)
  340. {
  341. struct freeheap_s *ph;
  342. struct freeheap_s *phend;
  343. #ifdef HPDEBUG
  344. struct freeheap_s *phnew;
  345. #endif
  346. /*
  347. * We need to take the heap semaphore for the old heap so no one
  348. * changes its contents while we clone it (that could confuse the
  349. * walking code).
  350. */
  351. if (hpTakeSem(hheap, 0, 0) == 0) {
  352. pmem = 0;
  353. goto exit;
  354. }
  355. /*
  356. * First call HPInit on the new block to get it a header
  357. */
  358. if (HPInit(pmem, pmem, hheap->hi_cbreserve, (unsigned)hheap->hi_flags) == 0) {
  359. goto error;
  360. }
  361. /*
  362. * Ring 0 heaps are layed out in the following general areas:
  363. *
  364. * 1 heap header
  365. * 2 mix of allocated and free heap blocks
  366. * 3 giant free heap block (remains of initial free block)
  367. * 4 a single minimum size busy heap block
  368. * 5 mix of allocated and free heap blocks
  369. * 6 end sentinel
  370. *
  371. * The general method for cloning a heap is to walk the entire source
  372. * heap and allocate blocks on the new heap corresponding to all
  373. * the blocks on the source heap, busy or free. Then go back through
  374. * the source free list and free the corresponding blocks on the
  375. * new heap. You will then have two heaps with the same lay-out of
  376. * free and busy blocks. However, doing this will cause a huge overcommit
  377. * spike when block (3) gets allocated and then freed. To avoid this,
  378. * when allocating the blocks we first allocate the blocks from area (5)
  379. * then the blocks in (2) which will naturally leave us with a big
  380. * free block at (3) like there should be without causing a spike.
  381. * This scheme will only work if (3) is the last block on the free
  382. * list, otherwise the free list will not be in the correct order when
  383. * we are done. "phend" will be pointed to block (3) if it is
  384. * in the correct place for us to do our trick, otherwise we set it to (4).
  385. * "ph" will start just past (4).
  386. */
  387. ph = (struct freeheap_s *)((char *)hheap + hheap->hi_cbreserve - PAGESIZE +
  388. sizeof(struct freeheap_s *) +
  389. sizeof(struct busyheap_s));
  390. phend = hheap->hi_freelist[0].fl_header.fh_blink;
  391. /*
  392. * If the last block on the free list isn't just before (4) then
  393. * reset our variables as per comment above.
  394. */
  395. if ((char *)phend + hpSize(phend)+sizeof(struct busyheap_s) != (char *)ph) {
  396. phend = (struct freeheap_s *)((char *)ph - sizeof(struct busyheap_s));
  397. mmAssert(hpIsBusySignatureValid((struct busyheap_s *)ph) &&
  398. hpSize(ph) == sizeof(struct busyheap_s),
  399. "HPClone: bad small busy block");
  400. }
  401. /*
  402. * Now walk through the old heap and allocate corresponding blocks
  403. * on the new heap. First we allocate the blocks on the last page.
  404. */
  405. for (; hpSize(ph) != 0; (char *)ph += hpSize(ph)) {
  406. if (HPAlloc(pmem,hpSize(ph)-sizeof(struct busyheap_s),HP_ZEROINIT)==0){
  407. mmAssert(0, "HPClone: alloc off last page failed"); /* already committed */
  408. }
  409. }
  410. /*
  411. * Then allocate the blocks in the first part of heap, except maybe
  412. * the big free block (3) if we are set up that way from above.
  413. */
  414. ph = (struct freeheap_s *)(hheap + 1);
  415. for (; ph != phend; (char *)ph += hpSize(ph)) {
  416. if (HPAlloc(pmem, hpSize(ph) - sizeof(struct busyheap_s),
  417. HP_ZEROINIT) == 0) {
  418. goto error;
  419. }
  420. }
  421. /*
  422. * How go back through the heap and free up all the blocks that are
  423. * free on the old heap. We have to do this by walking the old
  424. * heap's free list backwards, so the free blocks are in the same
  425. * order on both heaps.
  426. */
  427. ph = hheap->hi_freelist[0].fl_header.fh_blink;
  428. for (; ph != &(hheap->hi_freelist[0].fl_header); ph = ph->fh_blink) {
  429. mmAssert(hpIsFreeSignatureValid(ph), "HPClone: bad block on free list");
  430. /*
  431. * Skip freeing any list heads and the "pfhbigfree" if we are
  432. * special casing him
  433. */
  434. if (hpSize(ph) != 0 && ph != phend) {
  435. if (HPFree(pmem, (char *)pmem + sizeof(struct busyheap_s) +
  436. (unsigned long)ph - (unsigned long)hheap) == 0) {
  437. mmAssert(0, "HPClone: HPFree failed");
  438. }
  439. }
  440. }
  441. #ifdef HPDEBUG
  442. /*
  443. * Now let's verify that they really came out the same
  444. */
  445. for (ph = (struct freeheap_s *)(hheap+1),
  446. phnew = (struct freeheap_s *)(pmem + 1);
  447. hpSize(ph) != 0;
  448. (char *)phnew += hpSize(ph), (char *)ph += hpSize(ph)) {
  449. mmAssert(ph->fh_size == phnew->fh_size, "HPClone: mis-compare");
  450. }
  451. #endif
  452. clearsem:
  453. hpClearSem(hheap, 0);
  454. exit:
  455. return(pmem);
  456. error:
  457. pmem = 0;
  458. goto clearsem;
  459. }
  460. #ifndef WIN32
  461. #pragma VMM_PAGEABLE_DATA_SEG
  462. #pragma VxD_W16_CODE_SEG
  463. #endif
  464. /***LP hpWhichHeap - figure out which Dos386 heap a pointer came from
  465. *
  466. * ENTRY: p - pointer to heap block
  467. * EXIT: handle to appropriate heap or 0 if invalid address
  468. */
  469. HHEAP INTERNAL
  470. hpWhichHeap(ULONG p)
  471. {
  472. struct heapseg_s *pseg;
  473. /*
  474. * Check the fixed heap first, because it is sadly the most commonly used
  475. */
  476. pseg = (struct heapseg_s *)hFixedHeap;
  477. do {
  478. if (p > (ULONG)pseg && p < (ULONG)pseg + pseg->hs_cbreserve) {
  479. return(hFixedHeap);
  480. }
  481. pseg = pseg->hs_psegnext;
  482. } while (pseg != 0);
  483. /*
  484. * Then check the swappable heap
  485. */
  486. pseg = (struct heapseg_s *)hSwapHeap;
  487. do {
  488. if (p > (ULONG)pseg && p < (ULONG)pseg + pseg->hs_cbreserve) {
  489. return(hSwapHeap);
  490. }
  491. pseg = pseg->hs_psegnext;
  492. } while (pseg != 0);
  493. /*
  494. * Finally the init heap. Note that the init heap isn't growable, so we
  495. * can just do a simple range check rather than the segment looping we
  496. * do for the other heaps.
  497. */
  498. if (p > (ULONG)hInitHeap && p < InitHeapEnd) {
  499. return(hInitHeap);
  500. }
  501. /*
  502. * If we fall down to here, the address wasn't on any of the heaps
  503. */
  504. mmError(ERROR_INVALID_ADDRESS, "hpWhichHeap: block not on heap");
  505. return(0);
  506. }
  507. #endif
  508. /***EP HeapFree or HPFree - free a heap block
  509. *
  510. * Mark the passed in block as free and insert it on the appropriate
  511. * free list.
  512. *
  513. * ENTRY: hheap - pointer to base of heap
  514. * flags (ring 3 only) - HP_NOSERIALIZE
  515. * pblock - pointer to data of block to free (i.e., just past
  516. * busyheap_s structure)
  517. * EXIT: 0 if error (bad hheap or pblock) or 1 if success
  518. */
  519. #ifdef WIN32
  520. BOOL APIENTRY
  521. HeapFreeInternal(HHEAP hheap, DWORD flags, LPSTR lpMem)
  522. #else
  523. unsigned INTERNAL
  524. HPFree(HHEAP hheap, void *lpMem)
  525. #endif
  526. {
  527. unsigned long cb;
  528. struct freeheap_s *pblock;
  529. pblock = (struct freeheap_s *)((struct busyheap_s *)lpMem - 1);
  530. /* point to heap header */
  531. if (hpTakeSem(hheap, pblock, flags) == 0) {
  532. return(0);
  533. }
  534. cb = hpSize(pblock);
  535. pblock->fh_size |= 0xf0000000;
  536. #ifdef HPMEASURE
  537. if (hheap->hi_flags & HP_MEASURE) {
  538. hpMeasureItem(hheap, cb | HPMEASURE_FREE);
  539. }
  540. #endif
  541. /*
  542. * If the previous block is free, coalesce with it.
  543. */
  544. if (pblock->fh_size & HP_PREVFREE) {
  545. (unsigned)pblock = *((unsigned *)pblock - 1); /* point to prev block */
  546. cb += hpSize(pblock);
  547. /*
  548. * Remove the previous block from the free list so we can re-insert
  549. * the combined block in the right place later
  550. */
  551. hpRemove(pblock);
  552. }
  553. /*
  554. * Build a free header for the block and insert him on the appropriate
  555. * free list. This routine also marks the following block as HP_PREVFREE
  556. * and performs coalescing with the following block.
  557. */
  558. hpFreeSub(hheap, pblock, cb, HP_DECOMMIT);
  559. hpClearSem(hheap, flags);
  560. return(1);
  561. }
  562. /***EP HPAlloc - allocate a heap block
  563. *
  564. * ENTRY: hheap - pointer to base of heap
  565. * cb - size of block requested
  566. * flags - HP_ZEROINIT - zero initialize new block
  567. * EXIT: none
  568. */
  569. void * INTERNAL
  570. HPAlloc(HHEAP hheap, unsigned long cb, unsigned long flags)
  571. {
  572. struct freelist_s *pfreelist;
  573. struct freeheap_s *pfh;
  574. struct freeheap_s *pfhend;
  575. struct heapseg_s *pseg;
  576. unsigned cbreserve;
  577. /*
  578. * Detect really big sizes here so that we don't have to worry about
  579. * rounding up big numbers to 0
  580. */
  581. if (cb > hpMAXALLOC) {
  582. mmError(ERROR_NOT_ENOUGH_MEMORY, "HPAlloc: request too big\n\r");
  583. goto error;
  584. }
  585. if (hpTakeSem(hheap, 0, flags) == 0) {
  586. goto error;
  587. }
  588. cb = hpRoundUp(cb);
  589. #ifdef HPMEASURE
  590. if (hheap->hi_flags & HP_MEASURE) {
  591. hpMeasureItem(hheap, cb);
  592. }
  593. #endif
  594. restart:
  595. /*
  596. * Find the first free list header that will contain a block big
  597. * enough to satisfy this allocation.
  598. *
  599. * NOTE: at the cost of heap fragmentation, we could change this
  600. * to allocate from the first free list that is guaranteed to
  601. * have a block big enough as its first entry. That would
  602. * cut down paging on swappable heaps.
  603. */
  604. for (pfreelist=hheap->hi_freelist; cb > pfreelist->fl_cbmax; ++pfreelist) {
  605. }
  606. /*
  607. * Look for a block big enough for us on the list head returned.
  608. * Even if we follow the advice of the NOTE above and pick a list
  609. * that will definitely contain a block big enough for us we still
  610. * have to do this scan to pass by any free list heads in the
  611. * way (they have a size of 0, so we will never try to allocate them).
  612. *
  613. * We know we have reached the end of the free list when we get to
  614. * to the first free list head (since the list is circular).
  615. */
  616. pfh = pfreelist->fl_header.fh_flink;
  617. pfhend = &(hheap->hi_freelist[0].fl_header);
  618. for (; pfh != pfhend; pfh = pfh->fh_flink) {
  619. /*
  620. * Did we find a block big enough to hold our request?
  621. */
  622. if (hpSize(pfh) >= cb) {
  623. /*
  624. * At this point we have a block of free memory big enough to
  625. * use in pfh.
  626. */
  627. {
  628. struct busyheap_s *pbh = (struct busyheap_s *)pfh;
  629. if ((cb = hpCarve(hheap, pfh, cb, flags)) == 0) {
  630. goto errorclearsem;
  631. }
  632. hpSetBusySize(pbh, cb);
  633. #ifdef HPDEBUG
  634. pbh->bh_signature = BH_SIGNATURE;
  635. pbh->bh_eip = hpGetAllocator();
  636. pbh->bh_tid = hpGetTID();
  637. pbh->bh_sum = hpSum(pbh, BH_CDWSUM);
  638. #endif
  639. hpClearSem(hheap, flags);
  640. return(pbh + 1);
  641. }
  642. }
  643. }
  644. /*
  645. * If we fall out of the above loop, there are no blocks available
  646. * of the correct size.
  647. */
  648. /*
  649. * If the heap isn't there is nothing we can do but return error.
  650. */
  651. if ((hheap->hi_flags & HP_GROWABLE) == 0) {
  652. mmError(ERROR_NOT_ENOUGH_MEMORY,"HPAlloc: not enough room on heap\n");
  653. goto errorclearsem;
  654. }
  655. /*
  656. * The heap is growable but all the existing heap segments are full.
  657. * So reserve a new segment here. The "PAGESIZE*2" below will take care
  658. * of the header on the new segment and the special final page, leaving
  659. * a big enough free block for the actual request.
  660. */
  661. cbreserve = max(((cb + PAGESIZE*2) & ~PAGEMASK), hpCBRESERVE);
  662. if (((unsigned)pseg =
  663. #ifdef WIN32
  664. PageReserve(((unsigned)hheap >= MINSHAREDLADDR) ? PR_SHARED : PR_PRIVATE,
  665. cbreserve / PAGESIZE, PR_STATIC)) == -1) {
  666. mmError(ERROR_NOT_ENOUGH_MEMORY, "HPAlloc: reserve failed\n");
  667. #else
  668. PageReserve(PR_SYSTEM, cbreserve / PAGESIZE, PR_STATIC |
  669. ((hheap->hi_flags & HP_LOCKED) ? PR_FIXED :0))) == -1) {
  670. #endif
  671. goto errorclearsem;
  672. }
  673. /*
  674. * Initialize the new segment as a heap (including linking its initial
  675. * free block into the heap).
  676. */
  677. if (HPInit(hheap, (HHEAP)pseg, cbreserve, hheap->hi_flags | HP_INITSEGMENT) == 0) {
  678. goto errorfree;
  679. }
  680. /*
  681. * Link the new heap segment onto the list of segments.
  682. */
  683. pseg->hs_psegnext = hheap->hi_psegnext;
  684. hheap->hi_psegnext = pseg;
  685. /*
  686. * Now go back up to restart our search, we should find the new segment
  687. * to satisfy the request.
  688. */
  689. goto restart;
  690. /*
  691. * Code below this comment is used only in the error path.
  692. */
  693. errorfree:
  694. PageFree((unsigned)pseg, PR_STATIC);
  695. errorclearsem:
  696. hpClearSem(hheap, flags);
  697. #ifdef WIN32
  698. if ((flags | hheap->hi_flags) & HP_EXCEPT) {
  699. RaiseException(STATUS_NO_MEMORY, 0, 1, &cb);
  700. }
  701. #endif
  702. error:
  703. return(0);
  704. }
  705. /***EP HPReAlloc - reallocate a heap block
  706. *
  707. * ENTRY: hheap - pointer to base of heap
  708. * pblock - pointer to data of block to reallocate
  709. * (just past the busyheap_s structure)
  710. * cb - new size requested (in bytes)
  711. * flags - HP_ZEROINIT - on grows, fill new area with 0s
  712. * HP_MOVEABLE - on grows, moving of block is allowed
  713. * HP_NOCOPY - don't preserve old block's contents
  714. * EXIT: pointer to reallocated block or 0 if failure
  715. */
  716. void * INTERNAL
  717. HPReAlloc(HHEAP hheap, void *pblock, unsigned long cb, unsigned long flags)
  718. {
  719. void *pnew;
  720. unsigned oldsize;
  721. struct freeheap_s *pnext;
  722. struct busyheap_s *pbh;
  723. /*
  724. * Detect really big sizes here so that we don't have to worry about
  725. * rounding up big numbers to 0
  726. */
  727. if (cb > hpMAXALLOC) {
  728. mmError(ERROR_NOT_ENOUGH_MEMORY, "HPReAlloc: request too big\n\r");
  729. goto error;
  730. }
  731. pbh = (struct busyheap_s *)pblock - 1; /* point to heap block header */
  732. if (hpTakeSem(hheap, pbh, flags) == 0) {
  733. goto error;
  734. }
  735. cb = hpRoundUp(cb); /* convert to heap block size */
  736. oldsize = hpSize(pbh);
  737. /*
  738. * Is this a big enough shrink to cause us to carve off the end of
  739. * the block?
  740. */
  741. if (cb + hpMINSIZE <= oldsize) {
  742. hpFreeSub(hheap, (char *)pbh + cb, oldsize - cb, HP_DECOMMIT);
  743. hpSetSize(pbh, cb);
  744. #ifdef HPDEBUG
  745. pbh->bh_sum = hpSum(pbh, BH_CDWSUM);
  746. #endif
  747. /*
  748. * Is this a grow?
  749. */
  750. } else if (cb > oldsize) {
  751. /*
  752. * See if there is a next door free block big enough for us
  753. * grow into so we can realloc in place.
  754. */
  755. pnext = (struct freeheap_s *)((char *)pbh + oldsize);
  756. if ((pnext->fh_size & HP_FREE) == 0 || hpSize(pnext) < cb - oldsize) {
  757. /*
  758. * We have to move the object in order to grow it.
  759. * Make sure that is ok with the caller first.
  760. */
  761. if (flags & HP_MOVEABLE) {
  762. #ifdef HPDEBUG
  763. /*
  764. * On a debug system, remember who allocated this memory
  765. * so we don't lose the info when we allocate the new block
  766. */
  767. ULONG eip;
  768. USHORT tid;
  769. eip = pbh->bh_eip;
  770. tid = pbh->bh_tid;
  771. #endif
  772. /*
  773. * The size we have computed in cb includes a heap header.
  774. * Remove that since our call to HPAlloc bellow will
  775. * also add on a header.
  776. */
  777. cb -= sizeof(struct busyheap_s);
  778. /*
  779. * If the caller doesn't care about the contents of the
  780. * memory block, just allocate a new chunk and free old one
  781. */
  782. if (flags & HP_NOCOPY) {
  783. HeapFree(hheap, HP_NOSERIALIZE, pblock);
  784. if ((pblock = HPAlloc(hheap, cb,
  785. flags | HP_NOSERIALIZE)) == 0) {
  786. dprintf(("HPReAlloc: HPAlloc failed 1\n"));
  787. goto errorclearsem;
  788. }
  789. /*
  790. * If the caller cares about his data, allocate a new
  791. * block and copy the old stuff into it
  792. */
  793. } else {
  794. if ((pnew = HPAlloc(hheap, cb, flags | HP_NOSERIALIZE))==0){
  795. dprintf(("HPReAlloc: HPAlloc failed 2\n"));
  796. goto errorclearsem;
  797. }
  798. memcpy(pnew, pblock, oldsize - sizeof(struct busyheap_s));
  799. HeapFree(hheap, HP_NOSERIALIZE, pblock);
  800. pblock = pnew;
  801. }
  802. #ifdef HPDEBUG
  803. /*
  804. * Put back in the original owner
  805. */
  806. pbh = (((struct busyheap_s *)pblock) - 1);
  807. pbh->bh_eip = eip;
  808. pbh->bh_tid = tid;
  809. pbh->bh_sum = hpSum(pbh, BH_CDWSUM);
  810. #endif
  811. /*
  812. * Moving of the block is not allowed. Return error.
  813. */
  814. } else {
  815. mmError(ERROR_LOCKED,"HPReAlloc: fixed block\n");
  816. goto errorclearsem;
  817. }
  818. /*
  819. * We can grow in place into the following block
  820. */
  821. } else {
  822. if ((cb = hpCarve(hheap, pnext, cb - oldsize, flags)) == 0) {
  823. goto errorclearsem;
  824. }
  825. hpSetSize(pbh, oldsize + cb);
  826. #ifdef HPDEBUG
  827. pbh->bh_sum = hpSum(pbh, BH_CDWSUM);
  828. #endif
  829. }
  830. /*
  831. * This is place to put code for nop realloc if we ever have any.
  832. */
  833. } else {
  834. }
  835. hpClearSem(hheap, flags);
  836. exit:
  837. return(pblock);
  838. errorclearsem:
  839. hpClearSem(hheap, flags);
  840. error:
  841. pblock = 0;
  842. goto exit;
  843. }
  844. #ifndef WIN32
  845. #pragma VMM_PAGEABLE_DATA_SEG
  846. #pragma VxD_RARE_CODE_SEG
  847. #endif
  848. /***EP HeapSize or HPSize - return size of a busy heap block (less any header)
  849. *
  850. * ENTRY: hheap - pointer to base of heap
  851. * flags (ring 3 only) - HP_NOSERIALIZE
  852. * pdata - pointer to heap block (just past busyheap_s struct)
  853. * EXIT: size of block in bytes, or 0 if error
  854. */
  855. #ifdef WIN32
  856. DWORD APIENTRY
  857. HeapSize(HHEAP hheap, DWORD flags, LPSTR lpMem)
  858. #else
  859. unsigned INTERNAL
  860. HPSize(HHEAP hheap, void *lpMem)
  861. #endif
  862. {
  863. struct busyheap_s *pblock;
  864. unsigned long cb;
  865. pblock = ((struct busyheap_s *)lpMem) - 1; /* point to heap block header*/
  866. if (hpTakeSem(hheap, pblock, flags) == 0) {
  867. return(0);
  868. }
  869. cb = hpSize(pblock) - sizeof(struct busyheap_s);
  870. hpClearSem(hheap, flags);
  871. return(cb);
  872. }
  873. #ifndef WIN32
  874. #pragma VMM_PAGEABLE_DATA_SEG
  875. #pragma VxD_W16_CODE_SEG
  876. #endif
  877. /***LP hpCarve - carve off a chunk from the top of a free block
  878. *
  879. * This is a low level worker routine and several very specific
  880. * entry conditions must be true:
  881. *
  882. * The free block is valid.
  883. * The free block is at least as big as the chunk you want to carve.
  884. * The heap semaphore is taken.
  885. *
  886. * No header is created for the carved-off piece.
  887. *
  888. * ENTRY: hheap - pointer to base of heap
  889. * pfh - pointer to header of free block to carve from
  890. * cb - size of block to carve out
  891. * flags - HP_ZEROINIT
  892. * EXIT: count of bytes in carved off block (may differ from cb if
  893. * free block wasn't big enough to make a new free block from
  894. * its end) or 0 if error (out of memory on commit)
  895. */
  896. unsigned INTERNAL
  897. hpCarve(HHEAP hheap, struct freeheap_s *pfh, unsigned cb, unsigned flags)
  898. {
  899. unsigned cbblock = hpSize(pfh);
  900. unsigned pgcommit, pgnextcommit, pglastcommit;
  901. unsigned fcommitzero;
  902. /*
  903. * For multi-page HP_ZEROINIT blocks, it would be nice to commit
  904. * zero-filled pages rather than use memset because then we wouldn't have
  905. * to make the new pages in the block present and dirty unless and until
  906. * the app really wanted to use them (saving on working set and page outs).
  907. * This could be a huge win if someone is allocating big objects.
  908. * However, we have the problem of what to do about a new partial page at
  909. * the end of a heap block. If we commit it as zero-filled, then we are
  910. * zeroing more than we have to (the part of the page not used for this
  911. * block). If we commit it un-initialized, then we have to make two
  912. * separate commit calls, one for the zero-filled pages and one for the
  913. * last page. Rather than spend the time of two commit calls and the logic
  914. * to figure out when to make them, we always commit zero-filled pages for
  915. * everything. Better to zero too much than too little by mistake. We
  916. * reduce the percentage cost of the mistake case by only doing this
  917. * optimization for large blocks.
  918. * Here we decide if the block is big enough to commit zero-filled pages.
  919. */
  920. if ((flags & HP_ZEROINIT) && cb > 4*PAGESIZE) {
  921. fcommitzero = HP_ZEROINIT;
  922. } else {
  923. fcommitzero = 0;
  924. }
  925. mmAssert(cbblock >= cb, "hpCarve: carving out too big a block\n");
  926. mmAssert((pfh->fh_size & HP_FREE), "hpCarve: target not free\n");
  927. /*
  928. * Since pfh points to a valid free block header, we know we have
  929. * committed memory up through the end of the fh structure. However,
  930. * the page following the one containing the last byte of the fh
  931. * structure might not be committed. We set "pgcommit" to that
  932. * possibly uncommitted page.
  933. */
  934. /*last byte in fh*/ /*next pg*/ /*its pg #*/
  935. pgcommit = ((((unsigned)(pfh+1)) - 1 + PAGESIZE) / PAGESIZE);
  936. /*
  937. * pgnextcommit is the page number of the page just past this free block
  938. * that we know is already committed. Since free blocks have a
  939. * pointer back to the header in the last dword of the free block,
  940. * we know that the first byte of this dword is where we are guaranteed
  941. * to have committed memory.
  942. */
  943. pgnextcommit = ((unsigned)pfh + cbblock -
  944. sizeof(struct freeheap_s *)) / PAGESIZE;
  945. /*
  946. * If the block we found is too big, carve off the end into
  947. * a new free block.
  948. */
  949. if (cbblock >= cb + hpMINSIZE) {
  950. /*
  951. * We need to commit the memory for the new block we are allocating
  952. * plus enough space on the end for the following free block header
  953. * that hpFreeSub will make. The page number for that last page
  954. * we need to commit is pglastcommit. If we know that pglastcommit
  955. * is already committed because it sits on the same page as
  956. * the start of the next block (pgnextcommit), back it up one.
  957. */
  958. pglastcommit = ((unsigned)pfh + cb + sizeof(struct freeheap_s) - 1) / PAGESIZE;
  959. if (pglastcommit == pgnextcommit) {
  960. pglastcommit--;
  961. }
  962. if (hpCommit(pgcommit, pglastcommit - pgcommit + 1,
  963. fcommitzero | hheap->hi_flags) == 0) {
  964. goto error;
  965. }
  966. /*
  967. * Remove the original free block from the free list. We need to do
  968. * this before the hpFreeSub below because it might trash our current
  969. * free links.
  970. */
  971. hpRemove(pfh);
  972. /*
  973. * Link the portion we are not using onto the free list
  974. */
  975. hpFreeSub(hheap, (struct freeheap_s *)((char *)pfh + cb), cbblock-cb,0);
  976. /*
  977. * We are using the whole free block for our purposes.
  978. */
  979. } else {
  980. if (hpCommit(pgcommit, pgnextcommit - pgcommit,
  981. fcommitzero | hheap->hi_flags) == 0) {
  982. goto error;
  983. }
  984. /*
  985. * Remove the original free block from the free list.
  986. */
  987. hpRemove(pfh);
  988. /*
  989. * Clear the PREVFREE bit from the next block since we are no longer
  990. * free.
  991. */
  992. cb = cbblock;
  993. ((struct busyheap_s *)((char *)pfh + cb))->bh_size &= ~HP_PREVFREE;
  994. #ifdef HPDEBUG
  995. ((struct busyheap_s *)((char *)pfh + cb))->bh_sum =
  996. hpSum((struct busyheap_s *)((char *)pfh + cb), BH_CDWSUM);
  997. #endif
  998. }
  999. /*
  1000. * Zero-fill the block if requested and return
  1001. */
  1002. if (flags & HP_ZEROINIT) {
  1003. /*
  1004. * If fcommitzero is set, we have multi-page heap object with the
  1005. * newly committed pages already set up to be zero-filled.
  1006. * So we only have to memset the partial page at the start of the
  1007. * block (up to the first page we committed) and maybe the partial
  1008. * page at the end.
  1009. */
  1010. if (fcommitzero) {
  1011. memset(pfh, 0, (pgcommit * PAGESIZE) - (unsigned)pfh);
  1012. /*
  1013. * We have to zero the partial end page of this block if we didn't
  1014. * commit the page freshly this time.
  1015. */
  1016. if ((unsigned)pfh + cb > pgnextcommit * PAGESIZE) {
  1017. memset((PVOID)(pgnextcommit * PAGESIZE), 0,
  1018. (unsigned)pfh + cb - (pgnextcommit * PAGESIZE));
  1019. }
  1020. /*
  1021. * If the block fits on one page, just fill the whole thing
  1022. */
  1023. } else {
  1024. memset(pfh, 0, cb);
  1025. }
  1026. #ifdef HPDEBUG
  1027. } else {
  1028. memset(pfh, 0xcc, cb);
  1029. #endif
  1030. }
  1031. exit:
  1032. return(cb);
  1033. error:
  1034. cb = 0;
  1035. goto exit;
  1036. }
  1037. /***LP hpCommit - commit new pages of the right type into the heap
  1038. *
  1039. * The new pages aren't initialized in any way.
  1040. * The pages getting committed must currently be uncommitted.
  1041. * Negative values are allowed for the "npages" parameter, they
  1042. * are treated the same as 0 (a nop).
  1043. *
  1044. * ENTRY: page - starting page number to commit
  1045. * npages - number of pages to commit (may be negative or zero)
  1046. * flags - HP_LOCKED: commit the new pages as fixed (otherwise
  1047. * they will be swappable)
  1048. * HP_ZEROINIT: commit the new pages as zero-initialized
  1049. * EXIT: non-zero if success, else 0 if error
  1050. */
  1051. unsigned INTERNAL
  1052. hpCommit(unsigned page, int npages, unsigned flags)
  1053. {
  1054. unsigned rc = 1; /* assume success */
  1055. if (npages > 0) {
  1056. #ifdef HPDEBUG
  1057. MEMORY_BASIC_INFORMATION mbi;
  1058. /*
  1059. * All the pages should be currently reserved but not committed
  1060. * or else our math in hpCarve is off.
  1061. */
  1062. PageQuery(page * PAGESIZE, &mbi, sizeof(mbi));
  1063. #ifdef WIN32
  1064. mmAssert(mbi.State == MEM_RESERVE &&
  1065. mbi.RegionSize >= (unsigned)npages * PAGESIZE,
  1066. "hpCommit: range not all reserved\n");
  1067. #else
  1068. mmAssert(mbi.mbi_State == MEM_RESERVE &&
  1069. mbi.mbi_RegionSize >= (unsigned)npages * PAGESIZE,
  1070. "hpCommit: range not all reserved");
  1071. #endif
  1072. #endif
  1073. rc = PageCommit(page, npages,
  1074. (
  1075. #ifndef WIN32
  1076. (flags & HP_LOCKED) ? PD_FIXED :
  1077. #endif
  1078. PD_NOINIT) -
  1079. ((flags & HP_ZEROINIT) ? (PD_NOINIT - PD_ZEROINIT) : 0),
  1080. 0,
  1081. #ifndef WIN32
  1082. ((flags & HP_LOCKED) ? PC_FIXED : 0) |
  1083. PC_PRESENT |
  1084. #endif
  1085. PC_STATIC | PC_USER | PC_WRITEABLE);
  1086. #ifdef WIN32
  1087. if (rc == 0) {
  1088. mmError(ERROR_NOT_ENOUGH_MEMORY, "hpCommit: commit failed\n");
  1089. }
  1090. #endif
  1091. }
  1092. return(rc);
  1093. }
  1094. /***LP hpFreeSub - low level block free routine
  1095. *
  1096. * This routine inserts a block of memory on the free list with no
  1097. * checking for block validity. It handles coalescing with the
  1098. * following block but not the previous one. The block must also
  1099. * be big enough to hold a free header. The heap semaphore must
  1100. * be taken already. Any existing header information is ignored and
  1101. * overwritten.
  1102. *
  1103. * This routine also marks the following block as HP_PREVFREE.
  1104. *
  1105. * Enough memory must be committed at "pblock" to hold a free header and
  1106. * a dword must committed at the very end of "pblock". Any whole pages
  1107. * in between those areas will be decommitted by this routine.
  1108. *
  1109. * ENTRY: hheap - pointer to base of heap
  1110. * pblock - pointer to memory block
  1111. * cb - count of bytes in block
  1112. * flags - HP_DECOMMIT: decommit pages entirely within heap block
  1113. * (must be specified unless pages are known
  1114. * to be already decommitted)
  1115. * EXIT: none
  1116. */
  1117. void INTERNAL
  1118. hpFreeSub(HHEAP hheap, struct freeheap_s *pblock, unsigned cb, unsigned flags)
  1119. {
  1120. struct freelist_s *pfreelist;
  1121. struct freeheap_s *pnext;
  1122. struct freeheap_s *pfhprev;
  1123. struct freeheap_s **ppnext;
  1124. unsigned pgdecommit, pgdecommitmax;
  1125. unsigned cbfree;
  1126. int cpgdecommit;
  1127. mmAssert(cb >= hpMINSIZE, "hpFreeSub: bad param\n");
  1128. /*
  1129. * If the following block is free, coalesce with it.
  1130. */
  1131. pnext = (struct freeheap_s *)((char *)pblock + cb);
  1132. if (pnext->fh_size & HP_FREE) {
  1133. cb += hpSize(pnext);
  1134. /*
  1135. * Remove the following block from the free list. We will insert
  1136. * the combined block in the right place later.
  1137. * Here we also set "pgdecommitmax" which is the page just past the
  1138. * header of the following free block we are coalescing with.
  1139. */
  1140. hpRemove(pnext);
  1141. pgdecommitmax = ((unsigned)(pnext+1) + PAGEMASK) / PAGESIZE;
  1142. pnext = (struct freeheap_s *)((char *)pblock + cb); /* recompute */
  1143. } else {
  1144. pgdecommitmax = 0x100000;
  1145. }
  1146. #ifdef HPDEBUG
  1147. /*
  1148. * In debug we fill the free block with here with the byte
  1149. * 0xfe which happens to be nice invalid value either if excecuted
  1150. * or referenced as a pointer. We only fill up through the first
  1151. * page boundary because I don't want to deal with figuring out
  1152. * which pages are committed and which not.
  1153. */
  1154. memset(pblock, 0xfe, min(cb, (PAGESIZE - ((unsigned)pblock & PAGEMASK))));
  1155. #endif
  1156. /*
  1157. * Decommit any whole pages within this free block. We need to be
  1158. * careful not to decommit either part of our heap header for this block
  1159. * or the back-pointer to the header we store at the end of the block.
  1160. * It would be nice if we could double check our math by making sure
  1161. * that all of the pages we are decommitting are currently committed
  1162. * but we can't because we might be either carving off part of a currently
  1163. * free block or we might be coalescing with other already free blocks.
  1164. */
  1165. ppnext = (struct freeheap_s **)pnext - 1;
  1166. if (flags & HP_DECOMMIT) {
  1167. /*last byte in fh*/ /*next pg*/ /*its pg #*/
  1168. pgdecommit = ((unsigned)(pblock+1) - 1 + PAGESIZE) / PAGESIZE;
  1169. /*
  1170. * This max statement will keep us from re-decommitting the pages
  1171. * of any block we may have coalesced with above.
  1172. */
  1173. pgdecommitmax = min(pgdecommitmax, ((unsigned)ppnext / PAGESIZE));
  1174. cpgdecommit = pgdecommitmax - pgdecommit;
  1175. if (cpgdecommit > 0) {
  1176. #ifdef HPDEBUG
  1177. unsigned tmp =
  1178. #endif
  1179. PageDecommit(pgdecommit, cpgdecommit, PC_STATIC);
  1180. #ifdef HPDEBUG
  1181. mmAssert(tmp != 0, "hpFreeSub: PageDecommit failed\n");
  1182. #endif
  1183. }
  1184. #ifdef HPDEBUG
  1185. /*
  1186. * If the caller didn't specify HP_DECOMMIT verify that all the pages
  1187. * are already decommitted.
  1188. */
  1189. } else {
  1190. pgdecommit = ((unsigned)(pblock+1) - 1 + PAGESIZE) / PAGESIZE;
  1191. cpgdecommit = ((unsigned)ppnext / PAGESIZE) - pgdecommit;
  1192. if (cpgdecommit > 0) {
  1193. MEMORY_BASIC_INFORMATION mbi;
  1194. PageQuery(pgdecommit * PAGESIZE, &mbi, sizeof(mbi));
  1195. #ifdef WIN32
  1196. mmAssert(mbi.State == MEM_RESERVE &&
  1197. mbi.RegionSize >= (unsigned)cpgdecommit * PAGESIZE,
  1198. "hpFreeSub: range not all reserved\n");
  1199. #else
  1200. mmAssert(mbi.mbi_State == MEM_RESERVE &&
  1201. mbi.mbi_RegionSize >= (unsigned)cpgdecommit * PAGESIZE,
  1202. "hpFreeSub: range not all reserved");
  1203. #endif /*WIN32*/
  1204. }
  1205. #endif /*HPDEBUG*/
  1206. }
  1207. /*
  1208. * Point the last dword of the new free block to its header and
  1209. * mark the following block as HP_PREVFREE;
  1210. */
  1211. *ppnext = pblock;
  1212. pnext->fh_size |= HP_PREVFREE;
  1213. #ifdef HPDEBUG
  1214. ((struct busyheap_s *)pnext)->bh_sum = hpSum(pnext, BH_CDWSUM);
  1215. #endif
  1216. /*
  1217. * Find the appropriate free list to insert the block on.
  1218. * The last free list node should have a size of -1 so don't
  1219. * have to count to make sure we don't fall off the end of the list
  1220. * heads.
  1221. */
  1222. for (pfreelist=hheap->hi_freelist; cb > pfreelist->fl_cbmax; ++pfreelist) {
  1223. }
  1224. /*
  1225. * Now walk starting from that list head and insert it into the list in
  1226. * sorted order.
  1227. */
  1228. pnext = &(pfreelist->fl_header);
  1229. do {
  1230. pfhprev = pnext;
  1231. pnext = pfhprev->fh_flink;
  1232. cbfree = hpSize(pnext);
  1233. } while (cb > cbfree && cbfree != 0);
  1234. /*
  1235. * Insert the block on the free list just after the list head and
  1236. * mark the header as free
  1237. */
  1238. hpInsert(pblock, pfhprev);
  1239. hpSetFreeSize(pblock, cb);
  1240. #ifdef HPDEBUG
  1241. pblock->fh_signature = FH_SIGNATURE;
  1242. pblock->fh_sum = hpSum(pblock, FH_CDWSUM);
  1243. #endif
  1244. return;
  1245. }
  1246. /***LP hpTakeSem - get exclusive access to a heap
  1247. *
  1248. * This routine verifies that the passed in heap header is valid
  1249. * and takes the semaphore for that heap (if HP_NOSERIALIZE wasn't
  1250. * specified when the heap was created). Optionally, it will
  1251. * also verify the validity of a busy heap block header.
  1252. *
  1253. * ENTRY: hheap - pointer to base of heap
  1254. * pbh - pointer to busy heap block header (for validation)
  1255. * or 0 if there is no block to verify
  1256. * flags (ring 3 only) - HP_NOSERIALIZE
  1257. * EXIT: 0 if error (bad heap or block header), else 1
  1258. */
  1259. #ifdef WIN32
  1260. unsigned INTERNAL
  1261. hpTakeSem(HHEAP hheap, struct busyheap_s *pbh, unsigned htsflags)
  1262. #else
  1263. unsigned INTERNAL
  1264. hpTakeSem2(HHEAP hheap, struct busyheap_s *pbh)
  1265. #endif
  1266. {
  1267. struct heapseg_s *pseg;
  1268. #ifdef HPDEBUG
  1269. unsigned cb;
  1270. #endif
  1271. #ifndef WIN32
  1272. #define htsflags 0
  1273. mmAssert(!mmIsSwapping(),
  1274. "hpTakeSem: heap operation attempted while swapping\n");
  1275. #endif
  1276. #ifdef HPNOTTRUSTED
  1277. /*
  1278. * Verify the heap header.
  1279. */
  1280. if (hheap->hi_signature != HI_SIGNATURE) {
  1281. mmError(ERROR_INVALID_PARAMETER,"hpTakeSem: bad header\n");
  1282. goto error;
  1283. }
  1284. #else
  1285. pbh; /* dummy reference to keep compiler happy */
  1286. cb; /* dummy reference to keep compiler happy */
  1287. #endif
  1288. /*
  1289. * Do the actual semaphore taking
  1290. */
  1291. if (((htsflags | hheap->hi_flags) & HP_NOSERIALIZE) == 0) {
  1292. #ifdef WIN32
  1293. EnterMustComplete();
  1294. #endif
  1295. hpEnterCriticalSection(hheap);
  1296. }
  1297. #ifndef WIN32
  1298. /*
  1299. * This is make sure that if we block while committing or decommitting
  1300. * pages we will not get reentered.
  1301. */
  1302. mmEnterPaging("hpTakeSem: bogus thcb_Paging");
  1303. #endif
  1304. #ifdef HPNOTTRUSTED
  1305. /*
  1306. * If the caller wanted us to verify a heap block header, do so here.
  1307. */
  1308. if (pbh) {
  1309. /*
  1310. * First check that the pointer is within the specified heap
  1311. */
  1312. pseg = (struct heapseg_s *)hheap;
  1313. do {
  1314. if ((char *)pbh > (char *)pseg &&
  1315. (char *)pbh < (char *)pseg + pseg->hs_cbreserve) {
  1316. /*
  1317. * We found the segment containing the block. Validate that
  1318. * it actually points to a heap block.
  1319. */
  1320. if (!hpIsBusySignatureValid(pbh)
  1321. #ifdef HPDEBUG
  1322. || ((unsigned)pbh & hpGRANMASK) ||
  1323. (pbh->bh_size & HP_FREE) ||
  1324. (char *)pbh+(cb = hpSize(pbh)) > (char *)pseg+pseg->hs_cbreserve||
  1325. (int)cb < hpMINSIZE
  1326. || pbh->bh_signature != BH_SIGNATURE
  1327. #endif
  1328. ) {
  1329. goto badaddress;
  1330. } else {
  1331. goto pointerok;
  1332. }
  1333. }
  1334. pseg = pseg->hs_psegnext; /* on to next heap segment */
  1335. } while (pseg);
  1336. /*
  1337. * If we fell out of loop, we couldn't find the heap block on this
  1338. * heap.
  1339. */
  1340. goto badaddress;
  1341. }
  1342. #endif
  1343. pointerok:
  1344. #ifdef HPDEBUG
  1345. /*
  1346. * Make sure that only one thread gets in the heap at a time
  1347. */
  1348. if (hheap->hi_thread && hheap->hi_thread != (unsigned)pthCur) {
  1349. dprintf(("WARNING: two threads are using heap %x at the same time.\n",
  1350. hheap));
  1351. mmError(ERROR_BUSY, "hpTakeSem: re-entered\n\r");
  1352. goto clearsem;
  1353. }
  1354. hheap->hi_thread = (unsigned)pthCur;
  1355. /*
  1356. * Verify the heap is ok. If hpfParanoid isn't set, only walk the heap
  1357. * every 4th time.
  1358. */
  1359. if (hpfParanoid || (hpWalkCount++ & 0x03) == 0) {
  1360. if (hpWalk(hheap) == 0) {
  1361. mmError(ERROR_INVALID_PARAMETER,"Heap trashed outside of heap code -- someone wrote outside of their block!\n");
  1362. goto clearsem;
  1363. }
  1364. }
  1365. #endif
  1366. return(1);
  1367. badaddress:
  1368. mmError(ERROR_INVALID_PARAMETER,"hpTakeSem: invalid address passed to heap API\n");
  1369. goto clearsem;
  1370. clearsem:
  1371. hpClearSem(hheap, htsflags);
  1372. error:
  1373. return(0);
  1374. }
  1375. /***LP hpClearSem - give up exclusive access to a heap
  1376. *
  1377. * ENTRY: hheap - pointer to base of heap
  1378. * flags (ring 3 only) - HP_NOSERIALIZE
  1379. * EXIT: none
  1380. */
  1381. #ifdef WIN32
  1382. void INTERNAL
  1383. hpClearSem(HHEAP hheap, unsigned flags)
  1384. #else
  1385. void INTERNAL
  1386. hpClearSem2(HHEAP hheap)
  1387. #endif
  1388. {
  1389. /*
  1390. * Verify the heap is ok
  1391. */
  1392. #ifdef HPDEBUG
  1393. if (hpfParanoid) {
  1394. hpWalk(hheap);
  1395. }
  1396. hheap->hi_thread = 0;
  1397. #endif
  1398. #ifndef WIN32
  1399. mmExitPaging("hpClearSem: bogus thcb_Paging");
  1400. #endif
  1401. /*
  1402. * Free the semaphore
  1403. */
  1404. if (((
  1405. #ifdef WIN32
  1406. flags |
  1407. #endif
  1408. hheap->hi_flags) & HP_NOSERIALIZE) == 0) {
  1409. hpLeaveCriticalSection(hheap);
  1410. #ifdef WIN32
  1411. LeaveMustComplete();
  1412. #endif
  1413. }
  1414. }
  1415. #ifdef HPDEBUG
  1416. #ifndef WIN32
  1417. #pragma VMM_LOCKED_DATA_SEG
  1418. #pragma VMM_LOCKED_CODE_SEG
  1419. #endif
  1420. /***LP hpWalk - walk a heap to verify everthing is OK
  1421. *
  1422. * This routine is "turned-on" if the hpfWalk flag is non-zero.
  1423. *
  1424. * If hpWalk is detecting an error, you might want to set
  1425. * hpfTrashStop which enables stopping in the debugger whenever
  1426. * we detect a trashed heap block and it attempts to print the
  1427. * trashed address.
  1428. *
  1429. * ENTRY: hheap - pointer to base of heap
  1430. * EXIT: 1 if the heap is OK, 0 if it is trashed
  1431. */
  1432. unsigned INTERNAL
  1433. hpWalk(HHEAP hheap)
  1434. {
  1435. struct heapseg_s *pseg;
  1436. struct freeheap_s *pfh;
  1437. struct freeheap_s *pend;
  1438. struct freeheap_s *pstart;
  1439. struct freeheap_s *pfhend;
  1440. struct busyheap_s *pnext;
  1441. struct freeheap_s *pprev;
  1442. unsigned cbmax;
  1443. unsigned cheads;
  1444. if (hpfWalk) {
  1445. /*
  1446. * First make a sanity check of the header
  1447. */
  1448. if (hheap->hi_signature != HI_SIGNATURE) {
  1449. dprintf(("hpWalk: bad header signature\n"));
  1450. hpTrash(("trashed at %x\n", &hheap->hi_signature));
  1451. goto error;
  1452. }
  1453. if (hheap->hi_sum != hpSum(hheap, HI_CDWSUM)) {
  1454. dprintf(("hpWalk: bad header checksum\n"));
  1455. hpTrash(("trashed between %x and %x\n", hheap, &hheap->hi_sum));
  1456. goto error;
  1457. }
  1458. /*
  1459. * Walk through all the blocks and make sure we get to the end.
  1460. * The last block in the heap should be a busy guy of size 0.
  1461. */
  1462. (unsigned)pfh = (unsigned)hheap + sizeof(struct heapinfo_s);
  1463. pseg = (struct heapseg_s *)hheap;
  1464. for (;;) {
  1465. pprev = pstart = pfh;
  1466. (unsigned)pend = (unsigned)pseg + pseg->hs_cbreserve;
  1467. for (;; (unsigned)pfh += hpSize(pfh)) {
  1468. if (pfh < pstart || pfh >= pend) {
  1469. dprintf(("hpWalk: bad block address\n"));
  1470. hpTrash(("trashed addr %x\n", pprev));
  1471. goto error;
  1472. }
  1473. /*
  1474. * If the block is free...
  1475. */
  1476. if (pfh->fh_signature == FH_SIGNATURE) {
  1477. if (pfh->fh_sum != hpSum(pfh, FH_CDWSUM)) {
  1478. dprintf(("hpWalk: bad free block checksum\n"));
  1479. hpTrash(("trashed addr between %x and %x\n",
  1480. pfh, &pfh->fh_sum));
  1481. goto error;
  1482. }
  1483. mmAssert(hpIsFreeSignatureValid(pfh),
  1484. "hpWalk: bad tiny free sig\n");
  1485. if (hpfParanoid) {
  1486. /*
  1487. * Free blocks should be marked as HP_FREE and the following
  1488. * block should be marked HP_PREVFREE and be busy.
  1489. * But skip this check if the following block is 4 bytes
  1490. * into a page boundary so we don't accidentally catch
  1491. * the moment in HPInit where we have two adjacent
  1492. * free blocks for a minute. Any real errors that this
  1493. * skips will probably be caught later on.
  1494. */
  1495. pnext = (struct busyheap_s *)((char *)pfh + hpSize(pfh));
  1496. if (((unsigned)pnext & PAGEMASK) != sizeof(struct freeheap_s *) &&
  1497. ((pfh->fh_size & HP_FREE) == 0 ||
  1498. (pnext->bh_size & HP_PREVFREE) == 0 ||
  1499. pnext->bh_signature != BH_SIGNATURE)) {
  1500. dprintf(("hpWalk: bad free block\n"));
  1501. hpTrash(("trashed addr near %x or %x or %x\n",pprev, pfh, pnext));
  1502. goto error;
  1503. }
  1504. /*
  1505. * Also verify that a free block is linked on the free list
  1506. */
  1507. if ((pfh->fh_flink->fh_size & HP_FREE) == 0 ||
  1508. pfh->fh_flink->fh_blink != pfh ||
  1509. (pfh->fh_blink->fh_size & HP_FREE) == 0 ||
  1510. pfh->fh_blink->fh_flink != pfh) {
  1511. dprintf(("hpWalk: free block not in free list properly\n"));
  1512. hpTrash(("trashed addr probably near %x or %x or %x\n", pfh, pfh->fh_blink, pfh->fh_flink));
  1513. goto error;
  1514. }
  1515. }
  1516. /*
  1517. * Busy blocks should not be marked HP_FREE and if they are
  1518. * marked HP_PREVFREE the previous block better be free.
  1519. */
  1520. } else if (pfh->fh_signature == BH_SIGNATURE) {
  1521. if (((struct busyheap_s *)pfh)->bh_sum != hpSum(pfh, BH_CDWSUM)) {
  1522. dprintf(("hpWalk: bad busy block checksum\n"));
  1523. hpTrash(("trashed addr between %x and %x\n",
  1524. pfh, &((struct busyheap_s *)pfh)->bh_sum));
  1525. goto error;
  1526. }
  1527. mmAssert(hpIsBusySignatureValid((struct busyheap_s *)pfh),
  1528. "hpWalk: bad tiny busy sig\n");
  1529. if (hpfParanoid) {
  1530. if (pfh->fh_size & HP_FREE) {
  1531. dprintf(("hpWalk: busy block marked free\n"));
  1532. hpTrash(("trashed addr %x\n", pfh));
  1533. goto error;
  1534. }
  1535. /*
  1536. * Verify that the HP_PREVFREE bit is set only when
  1537. * the previous block is free, and vice versa
  1538. * But skip this check if the following block is 4 bytes
  1539. * into a page boundary so we don't accidentally catch
  1540. * the moment in HPInit where we have two adjacent
  1541. * free blocks for a minute. Any real errors that this
  1542. * skips will probably be caught later on.
  1543. */
  1544. if (pfh->fh_size & HP_PREVFREE) {
  1545. if (pprev->fh_signature == FH_SIGNATURE) {
  1546. if (*((struct freeheap_s **)pfh - 1) != pprev) {
  1547. dprintf(("hpWalk: free block tail doesn't point to head\n"));
  1548. hpTrash(("trashed at %x\n", (unsigned)pfh - 4));
  1549. goto error;
  1550. }
  1551. } else {
  1552. dprintf(("HP_PREVFREE erroneously set\n"));
  1553. hpTrash(("trashed at %x\n", pfh));
  1554. goto error;
  1555. }
  1556. } else if (pprev->fh_signature == FH_SIGNATURE &&
  1557. ((unsigned)pfh & PAGEMASK) != sizeof(struct freeheap_s *)) {
  1558. dprintf(("hpWalk: HP_PREVFREE not set\n"));
  1559. hpTrash(("trashed addr %x\n", pfh));
  1560. goto error;
  1561. }
  1562. }
  1563. /*
  1564. * The block should have had one of these signatures!
  1565. */
  1566. } else {
  1567. dprintf(("hpWalk: bad block signature\n"));
  1568. hpTrash(("trashed addr %x\n",pfh));
  1569. goto error;
  1570. }
  1571. /*
  1572. * We are at the end of the heap blocks when we hit one with
  1573. * a size of 0 (the end sentinel).
  1574. */
  1575. if (hpSize(pfh) == 0) {
  1576. break;
  1577. }
  1578. pprev = pfh;
  1579. }
  1580. if ((unsigned)pfh != (unsigned)pend - sizeof(struct busyheap_s) ||
  1581. pfh->fh_signature != BH_SIGNATURE) {
  1582. dprintf(("hpWalk: bad end sentinel\n"));
  1583. hpTrash(("trashed addr between %x and %x\n", pfh, pend));
  1584. goto error;
  1585. }
  1586. /*
  1587. * We are done walking this segment. If there is another one, go
  1588. * on to it, otherwise, terminate the walk
  1589. */
  1590. pseg = pseg->hs_psegnext;
  1591. if (pseg == 0) {
  1592. break;
  1593. }
  1594. pfh = (struct freeheap_s *)(pseg + 1);
  1595. }
  1596. if (hpfParanoid) {
  1597. /*
  1598. * Walk through the free list.
  1599. * cbmax is the maximum size of block we should find considering
  1600. * the last free list header we ran into.
  1601. * cheads is the number of list heads we found.
  1602. */
  1603. pprev = pfh = hheap->hi_freelist[0].fl_header.fh_flink;
  1604. cbmax = hheap->hi_freelist[0].fl_cbmax;
  1605. cheads = 1;
  1606. pfhend = &(hheap->hi_freelist[0].fl_header);
  1607. for (; pfh != pfhend; pfh = pfh->fh_flink) {
  1608. if (pfh->fh_sum != hpSum(pfh, FH_CDWSUM)) {
  1609. dprintf(("hpWalk: bad free block checksum 2\n"));
  1610. hpTrash(("trashed addr between %x and %x\n",
  1611. pfh, &pfh->fh_sum));
  1612. goto error;
  1613. }
  1614. mmAssert(hpIsFreeSignatureValid(pfh),
  1615. "hpWalk: bad tiny free sig 2\n");
  1616. /*
  1617. * Keep track of the list heads we find (so we know all of them
  1618. * are on the list) and make sure they are in acsending order.
  1619. */
  1620. if ((HHEAP)pfh >= hheap && (HHEAP)pfh < hheap + 1) {
  1621. if (hpSize(pfh) != 0) {
  1622. dprintf(("hpWalk: bad size of free list head\n"));
  1623. hpTrash(("trashed addr near %x or %x\n", pfh, pprev));
  1624. }
  1625. if (&(hheap->hi_freelist[cheads].fl_header) != pfh) {
  1626. dprintf(("hpWalk: free list head out of order\n"));
  1627. hpTrash(("trashed addr probably near %x or %x\n", pfh, &(hheap->hi_freelist[cheads].fl_header)));
  1628. goto error;
  1629. }
  1630. cbmax = hheap->hi_freelist[cheads].fl_cbmax;
  1631. cheads++;
  1632. /*
  1633. * Normal free heap block
  1634. */
  1635. } else {
  1636. /*
  1637. * Look through each segment for the block
  1638. */
  1639. for (pseg = (struct heapseg_s *)hheap;
  1640. pseg != 0; pseg = pseg->hs_psegnext) {
  1641. if ((unsigned)pfh > (unsigned)pseg &&
  1642. (unsigned)pfh < (unsigned)pseg + pseg->hs_cbreserve) {
  1643. goto addrok; /* found the address */
  1644. }
  1645. }
  1646. /* If we fall out pfh isn't within any of our segments */
  1647. dprintf(("hpWalk: free list pointer points outside heap bounds\n"));
  1648. hpTrash(("trashed addr probably %x\n", pprev));
  1649. goto error;
  1650. addrok:
  1651. if (pfh->fh_signature != FH_SIGNATURE ||
  1652. hpSize(pfh) > cbmax) {
  1653. dprintf(("hpWalk: bad free block on free list\n"));
  1654. hpTrash(("trashed addr probably %x or %x\n", pfh, pprev));
  1655. goto error;
  1656. }
  1657. /*
  1658. * Since the free list is in sorted order, this block
  1659. * should be bigger than the previous one. This check
  1660. * will also pass ok for list heads since they have
  1661. * size 0 and everything is bigger than that.
  1662. */
  1663. if (hpSize(pprev) > hpSize(pfh)) {
  1664. dprintf(("hpWalk: free list not sorted right\n"));
  1665. hpTrash(("trashed addr probably %x or %x\n", pfh, pprev));
  1666. }
  1667. }
  1668. pprev = pfh;
  1669. }
  1670. if (cheads != hpFREELISTHEADS) {
  1671. dprintf(("hpWalk: bad free list head count\n"));
  1672. hpTrash(("trashed somewhere between %x and %x\n", hheap, pend));
  1673. goto error;
  1674. }
  1675. }
  1676. }
  1677. return(1);
  1678. error:
  1679. return(0);
  1680. }
  1681. /***LP hpSum - compute checksum for a block of memory
  1682. *
  1683. * This routine XORs all of the DWORDs in a block together and
  1684. * then XORs that value with a constant.
  1685. *
  1686. * ENTRY: p - pointer to block to checksum
  1687. * cdw - number of dwords to sum
  1688. * EXIT: computed sum
  1689. */
  1690. unsigned long INTERNAL
  1691. hpSum(unsigned long *p, unsigned long cdw)
  1692. {
  1693. unsigned long sum;
  1694. for (sum = 0; cdw > 0; cdw--, p++) {
  1695. sum ^= *p;
  1696. }
  1697. return(sum ^ 0x17761965);
  1698. }
  1699. #ifdef WIN32
  1700. /***LP hpGetAllocator - walk the stack to find who allocated a block
  1701. *
  1702. * This routine is used by HPAlloc to figure out who owns a block of
  1703. * memory that is being allocated. We determine the owner by walking
  1704. * up the stack and finding the first eip that is not inside the
  1705. * memory manager or inside any other module that obfuscates who
  1706. * the real allocator is (such as HMGR, which all GDI allocations
  1707. * go through).
  1708. *
  1709. * ENTRY: none
  1710. * EXIT: eip of interesting caller
  1711. */
  1712. extern HANDLE APIENTRY LocalAllocNG(UINT dwFlags, UINT dwBytes);
  1713. ULONG INTERNAL
  1714. hpGetAllocator(void)
  1715. {
  1716. ULONG Caller = 0;
  1717. _asm {
  1718. mov edx,[ebp] ; (edx) = HPAlloc ebp
  1719. mov eax,[edx+4] ; (eax) = HPAlloc caller
  1720. ; See if HPAlloc was called directly or from LocalAlloc or HeapAlloc
  1721. ; or PvKernelAlloc
  1722. cmp eax,offset LocalAllocNG
  1723. jb hga4 ; jump to exit if called directly
  1724. cmp eax,offset LocalAllocNG + 0x300
  1725. jb hga20
  1726. hga4:
  1727. cmp eax,offset HeapAlloc
  1728. jb hga6 ; jump to exit if called directly
  1729. cmp eax,offset HeapAlloc + 0x50
  1730. jb hga20
  1731. hga6:
  1732. cmp eax,offset PvKernelAlloc
  1733. jb hga8
  1734. cmp eax,offset PvKernelAlloc + 0x50
  1735. jb hga20
  1736. hga8:
  1737. cmp eax,offset PvKernelAlloc0
  1738. jb hgax
  1739. cmp eax,offset PvKernelAlloc + 0x50
  1740. ja hgax
  1741. ; When we get here, we know HPAlloc was called by LocalAlloc or HeapAlloc
  1742. ; or PvKernelAlloc. See if PvKernelAlloc was called by NewObject or
  1743. ; PlstNew.
  1744. hga20:
  1745. mov edx,[edx] ; (edx) = Local/HeapAlloc ebp
  1746. mov eax,[edx+4] ; (eax) = Local/HeapAlloc caller
  1747. cmp eax,offset NewObject
  1748. jb hga34
  1749. cmp eax,offset NewObject + 0x50
  1750. jb hga40
  1751. hga34:
  1752. cmp eax,offset LocalAlloc
  1753. jb hga36
  1754. cmp eax,offset LocalAlloc+ 0x200
  1755. jb hga40
  1756. hga36:
  1757. cmp eax,offset PlstNew
  1758. jb hgax
  1759. cmp eax,offset PlstNew + 0x50
  1760. ja hgax
  1761. hga40:
  1762. mov edx,[edx] ; (edx) = PlstNew/NewObject ebp
  1763. mov eax,[edx+4] ; (eax) = PlstNew/NewObject caller
  1764. cmp eax,offset NewNsObject
  1765. jb hga50
  1766. cmp eax,offset NewNsObject + 0x50
  1767. jb hga60
  1768. hga50:
  1769. cmp eax,offset NewPDB
  1770. jb hga55
  1771. cmp eax,offset NewPDB + 0x50
  1772. jb hga60
  1773. hga55:
  1774. cmp eax,offset NewPevt
  1775. jb hgax
  1776. cmp eax,offset NewPevt + 0x50
  1777. ja hgax
  1778. hga60:
  1779. mov edx,[edx] ; (edx) = NewNsObject/NewPDB/NewPevt ebp
  1780. mov eax,[edx+4] ; (eax) = NewNsObject/NewPDB/NewPevt caller
  1781. hgax:
  1782. mov Caller, eax
  1783. }
  1784. return Caller;
  1785. }
  1786. #ifdef HPMEASURE
  1787. #define FIRSTBLOCK(hheap) ((unsigned)(hheap + 1) + sizeof(struct busyheap_s))
  1788. /***EP HPMeasure - enable measurement of heap activity.
  1789. *
  1790. * ENTRY: hheap - pointer to base of heap
  1791. * pszFile - name of file to place measurement data in.
  1792. * EXIT: FALSE if error (couldn't allocate buffer)
  1793. */
  1794. BOOL APIENTRY
  1795. HPMeasure(HHEAP hheap, LPSTR pszFile)
  1796. {
  1797. struct measure_s *pMeasure;
  1798. HANDLE hFile;
  1799. BOOL bSuccess = FALSE;
  1800. if (!hpTakeSem(hheap, NULL, 0)) return FALSE;
  1801. /* Allocate the structure & ensure it is the first block in the heap! */
  1802. pMeasure = (struct measure_s *)HPAlloc(hheap, sizeof(struct measure_s), 0);
  1803. if ((unsigned)pMeasure != (unsigned)FIRSTBLOCK(hheap)) {
  1804. mmError(0, "HPMeasure: Must be called before first heap allocation.\n");
  1805. goto cleanup;
  1806. }
  1807. /* verify the filename is valid and transfer the filename to the buffer */
  1808. hFile = CreateFile(pszFile, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS,
  1809. FILE_ATTRIBUTE_NORMAL, NULL);
  1810. if ((long)hFile == -1) {
  1811. mmError(0, "HPMeasure: The specified file is invalid.\n");
  1812. goto cleanup;
  1813. }
  1814. CloseHandle(hFile);
  1815. lstrcpy(pMeasure->szFile, pszFile);
  1816. /* initialize the buffer variables */
  1817. pMeasure->iCur = 0;
  1818. /* set the measure flag in the heap header */
  1819. hheap->hi_flags |= HP_MEASURE;
  1820. /* Success. */
  1821. bSuccess = TRUE;
  1822. cleanup:
  1823. hpClearSem(hheap, 0);
  1824. return bSuccess;
  1825. }
  1826. /***EP HPFlush - write out contents of sample buffer.
  1827. *
  1828. * ENTRY: hheap - pointer to base of heap
  1829. * EXIT: FALSE if error (couldn't write data)
  1830. */
  1831. BOOL APIENTRY
  1832. HPFlush(HHEAP hheap)
  1833. {
  1834. BOOL bResult, bSuccess = FALSE;
  1835. HANDLE hFile;
  1836. unsigned uBytesWritten;
  1837. struct measure_s *pMeasure = (struct measure_s *)FIRSTBLOCK(hheap);
  1838. if (!hpTakeSem(hheap, NULL, 0)) return FALSE;
  1839. /* open the file & seek to the end */
  1840. hFile = CreateFile(pMeasure->szFile, GENERIC_WRITE, 0, NULL, OPEN_EXISTING,
  1841. FILE_ATTRIBUTE_NORMAL, NULL);
  1842. if ((long)hFile == -1) {
  1843. mmError(0, "HPFlush: could not open file.\n");
  1844. goto cleanup;
  1845. }
  1846. SetFilePointer(hFile, 0, 0, FILE_END);
  1847. /* write the data out. */
  1848. bResult = WriteFile(hFile, pMeasure->uSamples,
  1849. pMeasure->iCur * sizeof(unsigned),
  1850. &uBytesWritten, NULL);
  1851. CloseHandle(hFile);
  1852. if (!bResult) {
  1853. mmError(0, "HPFlush: could not write to file.\n");
  1854. goto cleanup;
  1855. }
  1856. /* Success. */
  1857. bSuccess = TRUE;
  1858. cleanup:
  1859. /* clear the buffer */
  1860. pMeasure->iCur = 0;
  1861. hpClearSem(hheap, 0);
  1862. return bSuccess;
  1863. }
  1864. /***LP hpMeasureItem - add item to measurement data
  1865. *
  1866. * ENTRY: hheap - pointer to base of heap
  1867. * uItem - piece of data to record
  1868. * EXIT: FALSE if error (couldn't write buffer)
  1869. */
  1870. BOOL PRIVATE
  1871. hpMeasureItem(HHEAP hheap, unsigned uItem)
  1872. {
  1873. struct measure_s *pMeasure = (struct measure_s *)FIRSTBLOCK(hheap);
  1874. /* empty buffer if it's full */
  1875. if (pMeasure->iCur == SAMPLE_CACHE_SIZE) {
  1876. if (!HPFlush(hheap))
  1877. return FALSE;
  1878. }
  1879. /* Add data to the list */
  1880. pMeasure->uSamples[pMeasure->iCur++] = uItem;
  1881. return TRUE;
  1882. }
  1883. #endif
  1884. /* routine by DonC to help debug heap leaks */
  1885. void KERNENTRY
  1886. hpDump(HHEAP hheap, char *where) {
  1887. struct freeheap_s *pfh;
  1888. unsigned avail = 0, acnt = 0;
  1889. unsigned used = 0, ucnt = 0;
  1890. /*
  1891. * Walk through all the blocks and make sure we get to the end.
  1892. * The last block in the heap should be a busy guy of size 0.
  1893. */
  1894. (unsigned)pfh = (unsigned)hheap + sizeof(struct heapinfo_s);
  1895. for (;; (unsigned)pfh += hpSize(pfh)) {
  1896. /*
  1897. * If the block is free...
  1898. */
  1899. if (pfh->fh_signature == FH_SIGNATURE) {
  1900. avail += hpSize(pfh);
  1901. acnt++;
  1902. /*
  1903. * Busy blocks should not be marked HP_FREE and if they are
  1904. * marked HP_PREVFREE the previous block better be free.
  1905. */
  1906. } else if (pfh->fh_signature == BH_SIGNATURE) {
  1907. used += hpSize(pfh);
  1908. ucnt++;
  1909. /*
  1910. * The block should have had one of these signatures!
  1911. */
  1912. } else {
  1913. dprintf(("hpWalk: bad block signature\n"));
  1914. hpTrash(("trashed addr %x\n",pfh));
  1915. }
  1916. /*
  1917. * We are at the end of the heap blocks when we hit one with
  1918. * a size of 0 (the end sentinel).
  1919. */
  1920. if (hpSize(pfh) == 0) {
  1921. break;
  1922. }
  1923. }
  1924. DebugOut((DEB_WARN, "%ld/%ld used, %ld/%ld avail (%s)", used, ucnt, avail, acnt, where));
  1925. }
  1926. #endif
  1927. #endif /* HPDEBUG */