Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2965 lines
107 KiB

  1. /***
  2. *sbheap.c - Small-block heap code
  3. *
  4. * Copyright (c) 1996-2001, Microsoft Corporation. All rights reserved.
  5. *
  6. *Purpose:
  7. * Core code for small-block heap.
  8. *
  9. *Revision History:
  10. * 03-06-96 GJF Module created.
  11. * 04-03-96 GJF A couple of bug fixes courtesy of Steve Wood.
  12. * 04-05-96 GJF Optimizations from Steve Wood (and John Vert)
  13. * 1. all alloc_map[] entries are marked with
  14. * _FREE_PARA except the first one (John Vert and
  15. * Steve Wood).
  16. * 2. depend on sentinel value to terminate loops in
  17. * __sbh_alloc_block_in_page (me)
  18. * 3. replace starting_para_index field with
  19. * pstarting_alloc_map and added keep track of
  20. * contiguous free paragraphs there (added
  21. * free_paras_at_start field) (Steve Wood).
  22. * 4. changed return type of __sbh_find_block, and
  23. * type of the third args to __sbh_free_block and
  24. * __sbh_resize_block to __map_t * (me).
  25. * 05-22-96 GJF Deadly typo in __sbh_resize_block (had an = instead of
  26. * an ==).
  27. * 06-04-96 GJF Made several changes to the small-block heap types for
  28. * better performance. Main idea was to reduce index
  29. * expressions.
  30. * 04-18-97 JWM Explicit cast added in __sbh_resize_block() to avoid
  31. * new C4242 warnings.
  32. * 05-22-97 RDK New small-block heap scheme implemented.
  33. * 09-22-97 GJF #if 0 -ed out DumpEntry, a routine leftover from the
  34. * debugging of the new small-block heap scheme.
  35. * 12-05-97 GJF Release the address space for the heap data when a
  36. * region is removed.
  37. * 02-18-98 GJF Changes for Win64: replaced casts of pointers to
  38. * (unsigned) int with casts to (u)intptr_t.
  39. * 09-30-98 GJF Allow for initialization of small-block heap when
  40. * _set_sbh_threshold is called.
  41. * 10-13-98 GJF In __sbh_free_block, added check for already free
  42. * blocks (simply return, with no action).
  43. * 11-12-98 GJF Spliced in old small-block heap from VC++ 5.0.
  44. * 12-18-98 GJF Changes for 64-bit size_t.
  45. * 05-01-99 PML Disable small-block heap for Win64.
  46. * 06-17-99 GJF Removed old small-block heap from static libs.
  47. * 10-11-99 PML Supply stubs for _{get,set}_sbh_threshold on Win64.
  48. * 11-30-99 PML Compile /Wp64 clean.
  49. *
  50. *******************************************************************************/
  51. #include <stddef.h>
  52. #include <stdlib.h>
  53. #include <string.h>
  54. #include <winheap.h>
  55. #include <windows.h>
  56. #ifndef _WIN64
  57. /* Current (VC++ 6.0) small-block heap code and data */
  58. size_t __sbh_threshold;
  59. int __sbh_initialized;
  60. PHEADER __sbh_pHeaderList; // pointer to list start
  61. PHEADER __sbh_pHeaderScan; // pointer to list rover
  62. int __sbh_sizeHeaderList; // allocated size of list
  63. int __sbh_cntHeaderList; // count of entries defined
  64. PHEADER __sbh_pHeaderDefer;
  65. int __sbh_indGroupDefer;
  66. /* Prototypes for user functions */
  67. size_t __cdecl _get_sbh_threshold(void);
  68. int __cdecl _set_sbh_threshold(size_t);
  69. void DumpEntry(char *, int *);
  70. #endif /* ndef _WIN64 */
  71. /***
  72. *size_t _get_sbh_threshold() - return small-block threshold
  73. *
  74. *Purpose:
  75. * Return the current value of __sbh_threshold
  76. *
  77. *Entry:
  78. * None.
  79. *
  80. *Exit:
  81. * See above.
  82. *
  83. *Exceptions:
  84. *
  85. *******************************************************************************/
  86. size_t __cdecl _get_sbh_threshold (void)
  87. {
  88. #ifndef _WIN64
  89. if ( __active_heap == __V6_HEAP )
  90. return __sbh_threshold;
  91. #ifdef CRTDLL
  92. else if ( __active_heap == __V5_HEAP )
  93. return __old_sbh_threshold;
  94. #endif /* CRTDLL */
  95. else
  96. #endif /* ndef _WIN64 */
  97. return 0;
  98. }
  99. /***
  100. *int _set_sbh_threshold(threshold) - set small-block heap threshold
  101. *
  102. *Purpose:
  103. * Set the upper limit for the size of an allocation which will be
  104. * supported from the small-block heap.
  105. *
  106. *Entry:
  107. * size_t threshold - proposed new value for __sbh_theshold
  108. *
  109. *Exit:
  110. * Returns 1 if successful. Returns 0 if threshold was too big.
  111. *
  112. *Exceptions:
  113. *
  114. *******************************************************************************/
  115. int __cdecl _set_sbh_threshold (size_t threshold)
  116. {
  117. #ifndef _WIN64
  118. if ( __active_heap == __V6_HEAP )
  119. {
  120. // test against maximum value - if too large, return error
  121. if ( threshold <= MAX_ALLOC_DATA_SIZE )
  122. {
  123. __sbh_threshold = threshold;
  124. return 1;
  125. }
  126. else
  127. return 0;
  128. }
  129. #ifdef CRTDLL
  130. if ( __active_heap == __V5_HEAP )
  131. {
  132. // Round up the proposed new value to the nearest paragraph
  133. threshold = (threshold + _OLD_PARASIZE - 1) & ~(_OLD_PARASIZE - 1);
  134. // Require that at least two allocations be can be made within a
  135. // page.
  136. if ( threshold <= (_OLD_PARASIZE * (_OLD_PARAS_PER_PAGE / 2)) ) {
  137. __old_sbh_threshold = threshold;
  138. return 1;
  139. }
  140. else
  141. return 0;
  142. }
  143. // if necessary, initialize a small-block heap
  144. if ( (__active_heap == __SYSTEM_HEAP) && (threshold > 0) )
  145. {
  146. LinkerVersion lv;
  147. _GetLinkerVersion(&lv);
  148. if (lv.bverMajor >= 6)
  149. {
  150. // Initialize the VC++ 6.0 small-block heap
  151. if ( (threshold <= MAX_ALLOC_DATA_SIZE) &&
  152. __sbh_heap_init(threshold) )
  153. {
  154. __sbh_threshold = threshold;
  155. __active_heap = __V6_HEAP;
  156. return 1;
  157. }
  158. }
  159. else
  160. {
  161. // Initialize the old (VC++ 5.0) small-block heap
  162. threshold = (threshold + _OLD_PARASIZE - 1) &
  163. ~(_OLD_PARASIZE - 1);
  164. if ( (threshold <= (_OLD_PARASIZE * (_OLD_PARAS_PER_PAGE / 2)))
  165. && (__old_sbh_new_region() != NULL) )
  166. {
  167. __old_sbh_threshold = threshold;
  168. __active_heap = __V5_HEAP;
  169. return 1;
  170. }
  171. }
  172. }
  173. #else /* ndef CRTDLL */
  174. // if necessary, initialize a small-block heap
  175. if ( (__active_heap == __SYSTEM_HEAP) && (threshold > 0) )
  176. {
  177. // Initialize the VC++ 6.0 small-block heap
  178. if ( (threshold <= MAX_ALLOC_DATA_SIZE) &&
  179. __sbh_heap_init(threshold) )
  180. {
  181. __sbh_threshold = threshold;
  182. __active_heap = __V6_HEAP;
  183. return 1;
  184. }
  185. }
  186. #endif /* CRTDLL */
  187. #endif /* ndef _WIN64 */
  188. return 0;
  189. }
  190. #ifndef _WIN64
  191. /***
  192. *int __sbh_heap_init() - set small-block heap threshold
  193. *
  194. *Purpose:
  195. * Allocate space for initial header list and init variables.
  196. *
  197. *Entry:
  198. * None.
  199. *
  200. *Exit:
  201. * Returns 1 if successful. Returns 0 if initialization failed.
  202. *
  203. *Exceptions:
  204. *
  205. *******************************************************************************/
  206. int __cdecl __sbh_heap_init (size_t threshold)
  207. {
  208. if (!(__sbh_pHeaderList = HeapAlloc(_crtheap, 0, 16 * sizeof(HEADER))))
  209. return FALSE;
  210. __sbh_threshold = threshold;
  211. __sbh_pHeaderScan = __sbh_pHeaderList;
  212. __sbh_pHeaderDefer = NULL;
  213. __sbh_cntHeaderList = 0;
  214. __sbh_sizeHeaderList = 16;
  215. return TRUE;
  216. }
  217. /***
  218. *PHEADER *__sbh_find_block(pvAlloc) - find block in small-block heap
  219. *
  220. *Purpose:
  221. * Determine if the specified allocation block lies in the small-block
  222. * heap and, if so, return the header to be used for the block.
  223. *
  224. *Entry:
  225. * void * pvBlock - pointer to block to be freed
  226. *
  227. *Exit:
  228. * If successful, a pointer to the header to use is returned.
  229. * If unsuccessful, NULL is returned.
  230. *
  231. *Exceptions:
  232. *
  233. *******************************************************************************/
  234. PHEADER __cdecl __sbh_find_block (void * pvAlloc)
  235. {
  236. PHEADER pHeaderLast = __sbh_pHeaderList + __sbh_cntHeaderList;
  237. PHEADER pHeader;
  238. unsigned int offRegion;
  239. // scan through the header list to determine if entry
  240. // is in the region heap data reserved address space
  241. pHeader = __sbh_pHeaderList;
  242. while (pHeader < pHeaderLast)
  243. {
  244. offRegion = (unsigned int)((uintptr_t)pvAlloc - (uintptr_t)pHeader->pHeapData);
  245. if (offRegion < BYTES_PER_REGION)
  246. return pHeader;
  247. pHeader++;
  248. }
  249. return NULL;
  250. }
  251. #ifdef _DEBUG
  252. /***
  253. *int __sbh_verify_block(pHeader, pvAlloc) - verify pointer in sbh
  254. *
  255. *Purpose:
  256. * Test if pointer is valid within the heap header given.
  257. *
  258. *Entry:
  259. * pHeader - pointer to HEADER where entry should be
  260. * pvAlloc - pointer to test validity of
  261. *
  262. *Exit:
  263. * Returns 1 if pointer is valid, else 0.
  264. *
  265. *Exceptions:
  266. *
  267. *******************************************************************************/
  268. int __cdecl __sbh_verify_block (PHEADER pHeader, void * pvAlloc)
  269. {
  270. unsigned int indGroup;
  271. unsigned int offRegion;
  272. // calculate region offset to determine the group index
  273. offRegion = (unsigned int)((uintptr_t)pvAlloc - (uintptr_t)pHeader->pHeapData);
  274. indGroup = offRegion / BYTES_PER_GROUP;
  275. // return TRUE if:
  276. // group is committed (bit in vector cleared) AND
  277. // pointer is at paragraph boundary AND
  278. // pointer is not at start of page
  279. return (!(pHeader->bitvCommit & (0x80000000UL >> indGroup))) &&
  280. (!(offRegion & 0xf)) &&
  281. (offRegion & (BYTES_PER_PAGE - 1));
  282. }
  283. #endif
  284. /***
  285. *void __sbh_free_block(preg, ppage, pmap) - free block
  286. *
  287. *Purpose:
  288. * Free the specified block from the small-block heap.
  289. *
  290. *Entry:
  291. * pHeader - pointer to HEADER of region to free memory
  292. * pvAlloc - pointer to memory to free
  293. *
  294. *Exit:
  295. * No return value.
  296. *
  297. *Exceptions:
  298. *
  299. *******************************************************************************/
  300. void __cdecl __sbh_free_block (PHEADER pHeader, void * pvAlloc)
  301. {
  302. PREGION pRegion;
  303. PGROUP pGroup;
  304. PENTRY pHead;
  305. PENTRY pEntry;
  306. PENTRY pNext;
  307. PENTRY pPrev;
  308. void * pHeapDecommit;
  309. int sizeEntry;
  310. int sizeNext;
  311. int sizePrev;
  312. unsigned int indGroup;
  313. unsigned int indEntry;
  314. unsigned int indNext;
  315. unsigned int indPrev;
  316. unsigned int offRegion;
  317. // region is determined by the header
  318. pRegion = pHeader->pRegion;
  319. // use the region offset to determine the group index
  320. offRegion = (unsigned int)(((uintptr_t)pvAlloc - (uintptr_t)pHeader->pHeapData));
  321. indGroup = offRegion / BYTES_PER_GROUP;
  322. pGroup = &pRegion->grpHeadList[indGroup];
  323. // get size of entry - decrement value since entry is allocated
  324. pEntry = (PENTRY)((char *)pvAlloc - sizeof(int));
  325. sizeEntry = pEntry->sizeFront - 1;
  326. // check if the entry is already free. note the size has already been
  327. // decremented
  328. if ( (sizeEntry & 1 ) != 0 )
  329. return;
  330. // point to next entry to get its size
  331. pNext = (PENTRY)((char *)pEntry + sizeEntry);
  332. sizeNext = pNext->sizeFront;
  333. // get size from end of previous entry
  334. sizePrev = ((PENTRYEND)((char *)pEntry - sizeof(int)))->sizeBack;
  335. // test if next entry is free by an even size value
  336. if ((sizeNext & 1) == 0)
  337. {
  338. // free next entry - disconnect and add its size to sizeEntry
  339. // determine index of next entry
  340. indNext = (sizeNext >> 4) - 1;
  341. if (indNext > 63)
  342. indNext = 63;
  343. // test entry is sole member of bucket (next == prev),
  344. if (pNext->pEntryNext == pNext->pEntryPrev)
  345. {
  346. // clear bit in group vector, decrement region count
  347. // if region count is now zero, clear bit in header
  348. // entry vector
  349. if (indNext < 32)
  350. {
  351. pRegion->bitvGroupHi[indGroup] &= ~(0x80000000L >> indNext);
  352. if (--pRegion->cntRegionSize[indNext] == 0)
  353. pHeader->bitvEntryHi &= ~(0x80000000L >> indNext);
  354. }
  355. else
  356. {
  357. pRegion->bitvGroupLo[indGroup] &=
  358. ~(0x80000000L >> (indNext - 32));
  359. if (--pRegion->cntRegionSize[indNext] == 0)
  360. pHeader->bitvEntryLo &= ~(0x80000000L >> (indNext - 32));
  361. }
  362. }
  363. // unlink entry from list
  364. pNext->pEntryPrev->pEntryNext = pNext->pEntryNext;
  365. pNext->pEntryNext->pEntryPrev = pNext->pEntryPrev;
  366. // add next entry size to freed entry size
  367. sizeEntry += sizeNext;
  368. }
  369. // compute index of free entry (plus next entry if it was free)
  370. indEntry = (sizeEntry >> 4) - 1;
  371. if (indEntry > 63)
  372. indEntry = 63;
  373. // test if previous entry is free by an even size value
  374. if ((sizePrev & 1) == 0)
  375. {
  376. // free previous entry - add size to sizeEntry and
  377. // disconnect if index changes
  378. // get pointer to previous entry
  379. pPrev = (PENTRY)((char *)pEntry - sizePrev);
  380. // determine index of previous entry
  381. indPrev = (sizePrev >> 4) - 1;
  382. if (indPrev > 63)
  383. indPrev = 63;
  384. // add previous entry size to sizeEntry and determine
  385. // its new index
  386. sizeEntry += sizePrev;
  387. indEntry = (sizeEntry >> 4) - 1;
  388. if (indEntry > 63)
  389. indEntry = 63;
  390. // if index changed due to coalesing, reconnect to new size
  391. if (indPrev != indEntry)
  392. {
  393. // disconnect entry from indPrev
  394. // test entry is sole member of bucket (next == prev),
  395. if (pPrev->pEntryNext == pPrev->pEntryPrev)
  396. {
  397. // clear bit in group vector, decrement region count
  398. // if region count is now zero, clear bit in header
  399. // entry vector
  400. if (indPrev < 32)
  401. {
  402. pRegion->bitvGroupHi[indGroup] &=
  403. ~(0x80000000L >> indPrev);
  404. if (--pRegion->cntRegionSize[indPrev] == 0)
  405. pHeader->bitvEntryHi &= ~(0x80000000L >> indPrev);
  406. }
  407. else
  408. {
  409. pRegion->bitvGroupLo[indGroup] &=
  410. ~(0x80000000L >> (indPrev - 32));
  411. if (--pRegion->cntRegionSize[indPrev] == 0)
  412. pHeader->bitvEntryLo &=
  413. ~(0x80000000L >> (indPrev - 32));
  414. }
  415. }
  416. // unlink entry from list
  417. pPrev->pEntryPrev->pEntryNext = pPrev->pEntryNext;
  418. pPrev->pEntryNext->pEntryPrev = pPrev->pEntryPrev;
  419. }
  420. // set pointer to connect it instead of the free entry
  421. pEntry = pPrev;
  422. }
  423. // test if previous entry was free with an index change or allocated
  424. if (!((sizePrev & 1) == 0 && indPrev == indEntry))
  425. {
  426. // connect pEntry entry to indEntry
  427. // add entry to the start of the bucket list
  428. pHead = (PENTRY)((char *)&pGroup->listHead[indEntry] - sizeof(int));
  429. pEntry->pEntryNext = pHead->pEntryNext;
  430. pEntry->pEntryPrev = pHead;
  431. pHead->pEntryNext = pEntry;
  432. pEntry->pEntryNext->pEntryPrev = pEntry;
  433. // test entry is sole member of bucket (next == prev),
  434. if (pEntry->pEntryNext == pEntry->pEntryPrev)
  435. {
  436. // if region count was zero, set bit in region vector
  437. // set bit in header entry vector, increment region count
  438. if (indEntry < 32)
  439. {
  440. if (pRegion->cntRegionSize[indEntry]++ == 0)
  441. pHeader->bitvEntryHi |= 0x80000000L >> indEntry;
  442. pRegion->bitvGroupHi[indGroup] |= 0x80000000L >> indEntry;
  443. }
  444. else
  445. {
  446. if (pRegion->cntRegionSize[indEntry]++ == 0)
  447. pHeader->bitvEntryLo |= 0x80000000L >> (indEntry - 32);
  448. pRegion->bitvGroupLo[indGroup] |= 0x80000000L >>
  449. (indEntry - 32);
  450. }
  451. }
  452. }
  453. // adjust the entry size front and back
  454. pEntry->sizeFront = sizeEntry;
  455. ((PENTRYEND)((char *)pEntry + sizeEntry -
  456. sizeof(ENTRYEND)))->sizeBack = sizeEntry;
  457. // one less allocation in group - test if empty
  458. if (--pGroup->cntEntries == 0)
  459. {
  460. // if a group has been deferred, free that group
  461. if (__sbh_pHeaderDefer)
  462. {
  463. // if now zero, decommit the group data heap
  464. pHeapDecommit = (void *)((char *)__sbh_pHeaderDefer->pHeapData +
  465. __sbh_indGroupDefer * BYTES_PER_GROUP);
  466. VirtualFree(pHeapDecommit, BYTES_PER_GROUP, MEM_DECOMMIT);
  467. // set bit in commit vector
  468. __sbh_pHeaderDefer->bitvCommit |=
  469. 0x80000000 >> __sbh_indGroupDefer;
  470. // clear entry vector for the group and header vector bit
  471. // if needed
  472. __sbh_pHeaderDefer->pRegion->bitvGroupLo[__sbh_indGroupDefer] = 0;
  473. if (--__sbh_pHeaderDefer->pRegion->cntRegionSize[63] == 0)
  474. __sbh_pHeaderDefer->bitvEntryLo &= ~0x00000001L;
  475. // if commit vector is the initial value,
  476. // remove the region if it is not the last
  477. if (__sbh_pHeaderDefer->bitvCommit == BITV_COMMIT_INIT)
  478. {
  479. // release the address space for heap data
  480. VirtualFree(__sbh_pHeaderDefer->pHeapData, 0, MEM_RELEASE);
  481. // free the region memory area
  482. HeapFree(_crtheap, 0, __sbh_pHeaderDefer->pRegion);
  483. // remove entry from header list by copying over
  484. memmove((void *)__sbh_pHeaderDefer,
  485. (void *)(__sbh_pHeaderDefer + 1),
  486. (int)((intptr_t)(__sbh_pHeaderList + __sbh_cntHeaderList) -
  487. (intptr_t)(__sbh_pHeaderDefer + 1)));
  488. __sbh_cntHeaderList--;
  489. // if pHeader was after the one just removed, adjust it
  490. if (pHeader > __sbh_pHeaderDefer)
  491. pHeader--;
  492. // initialize scan pointer to start of list
  493. __sbh_pHeaderScan = __sbh_pHeaderList;
  494. }
  495. }
  496. // defer the group just freed
  497. __sbh_pHeaderDefer = pHeader;
  498. __sbh_indGroupDefer = indGroup;
  499. }
  500. }
  501. /***
  502. *void * __sbh_alloc_block(intSize) - allocate a block
  503. *
  504. *Purpose:
  505. * Allocate a block from the small-block heap, the specified number of
  506. * bytes in size.
  507. *
  508. *Entry:
  509. * intSize - size of the allocation request in bytes
  510. *
  511. *Exit:
  512. * Returns a pointer to the newly allocated block, if successful.
  513. * Returns NULL, if failure.
  514. *
  515. *Exceptions:
  516. *
  517. *******************************************************************************/
  518. void * __cdecl __sbh_alloc_block (int intSize)
  519. {
  520. PHEADER pHeaderLast = __sbh_pHeaderList + __sbh_cntHeaderList;
  521. PHEADER pHeader;
  522. PREGION pRegion;
  523. PGROUP pGroup;
  524. PENTRY pEntry;
  525. PENTRY pHead;
  526. BITVEC bitvEntryLo;
  527. BITVEC bitvEntryHi;
  528. BITVEC bitvTest;
  529. int sizeEntry;
  530. int indEntry;
  531. int indGroupUse;
  532. int sizeNewFree;
  533. int indNewFree;
  534. // add 8 bytes entry overhead and round up to next para size
  535. sizeEntry = (intSize + 2 * (int)sizeof(int) + (BYTES_PER_PARA - 1))
  536. & ~(BYTES_PER_PARA - 1);
  537. #ifdef _WIN64
  538. if (sizeEntry < 32)
  539. sizeEntry = 32;
  540. #endif
  541. // determine index and mask from entry size
  542. // Hi MSB: bit 0 size: 1 paragraph
  543. // bit 1 2 paragraphs
  544. // ... ...
  545. // bit 30 31 paragraphs
  546. // bit 31 32 paragraphs
  547. // Lo MSB: bit 0 size: 33 paragraph
  548. // bit 1 34 paragraphs
  549. // ... ...
  550. // bit 30 63 paragraphs
  551. // bit 31 64+ paragraphs
  552. indEntry = (sizeEntry >> 4) - 1;
  553. if (indEntry < 32)
  554. {
  555. bitvEntryHi = 0xffffffffUL >> indEntry;
  556. bitvEntryLo = 0xffffffffUL;
  557. }
  558. else
  559. {
  560. bitvEntryHi = 0;
  561. bitvEntryLo = 0xffffffffUL >> (indEntry - 32);
  562. }
  563. // scan header list from rover to end for region with a free
  564. // entry with an adequate size
  565. pHeader = __sbh_pHeaderScan;
  566. while (pHeader < pHeaderLast)
  567. {
  568. if ((bitvEntryHi & pHeader->bitvEntryHi) |
  569. (bitvEntryLo & pHeader->bitvEntryLo))
  570. break;
  571. pHeader++;
  572. }
  573. // if no entry, scan from list start up to the rover
  574. if (pHeader == pHeaderLast)
  575. {
  576. pHeader = __sbh_pHeaderList;
  577. while (pHeader < __sbh_pHeaderScan)
  578. {
  579. if ((bitvEntryHi & pHeader->bitvEntryHi) |
  580. (bitvEntryLo & pHeader->bitvEntryLo))
  581. break;
  582. pHeader++;
  583. }
  584. // no free entry exists, scan list from rover to end
  585. // for available groups to commit
  586. if (pHeader == __sbh_pHeaderScan)
  587. {
  588. while (pHeader < pHeaderLast)
  589. {
  590. if (pHeader->bitvCommit)
  591. break;
  592. pHeader++;
  593. }
  594. // if no available groups, scan from start to rover
  595. if (pHeader == pHeaderLast)
  596. {
  597. pHeader = __sbh_pHeaderList;
  598. while (pHeader < __sbh_pHeaderScan)
  599. {
  600. if (pHeader->bitvCommit)
  601. break;
  602. pHeader++;
  603. }
  604. // if no available groups, create a new region
  605. if (pHeader == __sbh_pHeaderScan)
  606. if (!(pHeader = __sbh_alloc_new_region()))
  607. return NULL;
  608. }
  609. // commit a new group in region associated with pHeader
  610. if ((pHeader->pRegion->indGroupUse =
  611. __sbh_alloc_new_group(pHeader)) == -1)
  612. return NULL;
  613. }
  614. }
  615. __sbh_pHeaderScan = pHeader;
  616. pRegion = pHeader->pRegion;
  617. indGroupUse = pRegion->indGroupUse;
  618. // determine the group to allocate from
  619. if (indGroupUse == -1 ||
  620. !((bitvEntryHi & pRegion->bitvGroupHi[indGroupUse]) |
  621. (bitvEntryLo & pRegion->bitvGroupLo[indGroupUse])))
  622. {
  623. // preferred group could not allocate entry, so
  624. // scan through all defined vectors
  625. indGroupUse = 0;
  626. while (!((bitvEntryHi & pRegion->bitvGroupHi[indGroupUse]) |
  627. (bitvEntryLo & pRegion->bitvGroupLo[indGroupUse])))
  628. indGroupUse++;
  629. }
  630. pGroup = &pRegion->grpHeadList[indGroupUse];
  631. // determine bucket index
  632. indEntry = 0;
  633. // get high entry intersection - if zero, use the lower one
  634. if (!(bitvTest = bitvEntryHi & pRegion->bitvGroupHi[indGroupUse]))
  635. {
  636. indEntry = 32;
  637. bitvTest = bitvEntryLo & pRegion->bitvGroupLo[indGroupUse];
  638. }
  639. while ((int)bitvTest >= 0)
  640. {
  641. bitvTest <<= 1;
  642. indEntry++;
  643. }
  644. pEntry = pGroup->listHead[indEntry].pEntryNext;
  645. // compute size and bucket index of new free entry
  646. // for zero-sized entry, the index is -1
  647. sizeNewFree = pEntry->sizeFront - sizeEntry;
  648. indNewFree = (sizeNewFree >> 4) - 1;
  649. if (indNewFree > 63)
  650. indNewFree = 63;
  651. // only modify entry pointers if bucket index changed
  652. if (indNewFree != indEntry)
  653. {
  654. // test entry is sole member of bucket (next == prev),
  655. if (pEntry->pEntryNext == pEntry->pEntryPrev)
  656. {
  657. // clear bit in group vector, decrement region count
  658. // if region count is now zero, clear bit in region vector
  659. if (indEntry < 32)
  660. {
  661. pRegion->bitvGroupHi[indGroupUse] &=
  662. ~(0x80000000L >> indEntry);
  663. if (--pRegion->cntRegionSize[indEntry] == 0)
  664. pHeader->bitvEntryHi &= ~(0x80000000L >> indEntry);
  665. }
  666. else
  667. {
  668. pRegion->bitvGroupLo[indGroupUse] &=
  669. ~(0x80000000L >> (indEntry - 32));
  670. if (--pRegion->cntRegionSize[indEntry] == 0)
  671. pHeader->bitvEntryLo &= ~(0x80000000L >> (indEntry - 32));
  672. }
  673. }
  674. // unlink entry from list
  675. pEntry->pEntryPrev->pEntryNext = pEntry->pEntryNext;
  676. pEntry->pEntryNext->pEntryPrev = pEntry->pEntryPrev;
  677. // if free entry size is still nonzero, reconnect it
  678. if (sizeNewFree != 0)
  679. {
  680. // add entry to the start of the bucket list
  681. pHead = (PENTRY)((char *)&pGroup->listHead[indNewFree] -
  682. sizeof(int));
  683. pEntry->pEntryNext = pHead->pEntryNext;
  684. pEntry->pEntryPrev = pHead;
  685. pHead->pEntryNext = pEntry;
  686. pEntry->pEntryNext->pEntryPrev = pEntry;
  687. // test entry is sole member of bucket (next == prev),
  688. if (pEntry->pEntryNext == pEntry->pEntryPrev)
  689. {
  690. // if region count was zero, set bit in region vector
  691. // set bit in group vector, increment region count
  692. if (indNewFree < 32)
  693. {
  694. if (pRegion->cntRegionSize[indNewFree]++ == 0)
  695. pHeader->bitvEntryHi |= 0x80000000L >> indNewFree;
  696. pRegion->bitvGroupHi[indGroupUse] |=
  697. 0x80000000L >> indNewFree;
  698. }
  699. else
  700. {
  701. if (pRegion->cntRegionSize[indNewFree]++ == 0)
  702. pHeader->bitvEntryLo |=
  703. 0x80000000L >> (indNewFree - 32);
  704. pRegion->bitvGroupLo[indGroupUse] |=
  705. 0x80000000L >> (indNewFree - 32);
  706. }
  707. }
  708. }
  709. }
  710. // change size of free entry (front and back)
  711. if (sizeNewFree != 0)
  712. {
  713. pEntry->sizeFront = sizeNewFree;
  714. ((PENTRYEND)((char *)pEntry + sizeNewFree -
  715. sizeof(ENTRYEND)))->sizeBack = sizeNewFree;
  716. }
  717. // mark the allocated entry
  718. pEntry = (PENTRY)((char *)pEntry + sizeNewFree);
  719. pEntry->sizeFront = sizeEntry + 1;
  720. ((PENTRYEND)((char *)pEntry + sizeEntry -
  721. sizeof(ENTRYEND)))->sizeBack = sizeEntry + 1;
  722. // one more allocation in group - test if group was empty
  723. if (pGroup->cntEntries++ == 0)
  724. {
  725. // if allocating into deferred group, cancel deferral
  726. if (pHeader == __sbh_pHeaderDefer &&
  727. indGroupUse == __sbh_indGroupDefer)
  728. __sbh_pHeaderDefer = NULL;
  729. }
  730. pRegion->indGroupUse = indGroupUse;
  731. return (void *)((char *)pEntry + sizeof(int));
  732. }
  733. /***
  734. *PHEADER __sbh_alloc_new_region()
  735. *
  736. *Purpose:
  737. * Add a new HEADER structure in the header list. Allocate a new
  738. * REGION structure and initialize. Reserve memory for future
  739. * group commitments.
  740. *
  741. *Entry:
  742. * None.
  743. *
  744. *Exit:
  745. * Returns a pointer to newly created HEADER entry, if successful.
  746. * Returns NULL, if failure.
  747. *
  748. *Exceptions:
  749. *
  750. *******************************************************************************/
  751. PHEADER __cdecl __sbh_alloc_new_region (void)
  752. {
  753. PHEADER pHeader;
  754. // create a new entry in the header list
  755. // if list if full, realloc to extend its size
  756. if (__sbh_cntHeaderList == __sbh_sizeHeaderList)
  757. {
  758. if (!(pHeader = (PHEADER)HeapReAlloc(_crtheap, 0, __sbh_pHeaderList,
  759. (__sbh_sizeHeaderList + 16) * sizeof(HEADER))))
  760. return NULL;
  761. // update pointer and counter values
  762. __sbh_pHeaderList = pHeader;
  763. __sbh_sizeHeaderList += 16;
  764. }
  765. // point to new header in list
  766. pHeader = __sbh_pHeaderList + __sbh_cntHeaderList;
  767. // allocate a new region associated with the new header
  768. if (!(pHeader->pRegion = (PREGION)HeapAlloc(_crtheap, HEAP_ZERO_MEMORY,
  769. sizeof(REGION))))
  770. return NULL;
  771. // reserve address space for heap data in the region
  772. if ((pHeader->pHeapData = VirtualAlloc(0, BYTES_PER_REGION,
  773. MEM_RESERVE, PAGE_READWRITE)) == NULL)
  774. {
  775. HeapFree(_crtheap, 0, pHeader->pRegion);
  776. return NULL;
  777. }
  778. // initialize alloc and commit group vectors
  779. pHeader->bitvEntryHi = 0;
  780. pHeader->bitvEntryLo = 0;
  781. pHeader->bitvCommit = BITV_COMMIT_INIT;
  782. // complete entry by incrementing list count
  783. __sbh_cntHeaderList++;
  784. // initialize index of group to try first (none defined yet)
  785. pHeader->pRegion->indGroupUse = -1;
  786. return pHeader;
  787. }
  788. /***
  789. *int __sbh_alloc_new_group(pHeader)
  790. *
  791. *Purpose:
  792. * Initializes a GROUP structure within HEADER pointed by pHeader.
  793. * Commits and initializes the memory in the memory reserved by the
  794. * REGION.
  795. *
  796. *Entry:
  797. * pHeader - pointer to HEADER from which the GROUP is defined.
  798. *
  799. *Exit:
  800. * Returns an index to newly created GROUP, if successful.
  801. * Returns -1, if failure.
  802. *
  803. *Exceptions:
  804. *
  805. *******************************************************************************/
  806. int __cdecl __sbh_alloc_new_group (PHEADER pHeader)
  807. {
  808. PREGION pRegion = pHeader->pRegion;
  809. PGROUP pGroup;
  810. PENTRY pEntry;
  811. PENTRY pHead;
  812. PENTRYEND pEntryEnd;
  813. BITVEC bitvCommit;
  814. int indCommit;
  815. int index;
  816. void * pHeapPage;
  817. void * pHeapStartPage;
  818. void * pHeapEndPage;
  819. // determine next group to use by first bit set in commit vector
  820. bitvCommit = pHeader->bitvCommit;
  821. indCommit = 0;
  822. while ((int)bitvCommit >= 0)
  823. {
  824. bitvCommit <<= 1;
  825. indCommit++;
  826. }
  827. // allocate and initialize a new group
  828. pGroup = &pRegion->grpHeadList[indCommit];
  829. for (index = 0; index < 63; index++)
  830. {
  831. pEntry = (PENTRY)((char *)&pGroup->listHead[index] - sizeof(int));
  832. pEntry->pEntryNext = pEntry->pEntryPrev = pEntry;
  833. }
  834. // commit heap memory for new group
  835. pHeapStartPage = (void *)((char *)pHeader->pHeapData +
  836. indCommit * BYTES_PER_GROUP);
  837. if ((VirtualAlloc(pHeapStartPage, BYTES_PER_GROUP, MEM_COMMIT,
  838. PAGE_READWRITE)) == NULL)
  839. return -1;
  840. // initialize heap data with empty page entries
  841. pHeapEndPage = (void *)((char *)pHeapStartPage +
  842. (PAGES_PER_GROUP - 1) * BYTES_PER_PAGE);
  843. for (pHeapPage = pHeapStartPage; pHeapPage <= pHeapEndPage;
  844. pHeapPage = (void *)((char *)pHeapPage + BYTES_PER_PAGE))
  845. {
  846. // set sentinel values at start and end of the page
  847. *(int *)((char *)pHeapPage + 8) = -1;
  848. *(int *)((char *)pHeapPage + BYTES_PER_PAGE - 4) = -1;
  849. // set size and pointer info for one empty entry
  850. pEntry = (PENTRY)((char *)pHeapPage + ENTRY_OFFSET);
  851. pEntry->sizeFront = MAX_FREE_ENTRY_SIZE;
  852. pEntry->pEntryNext = (PENTRY)((char *)pEntry +
  853. BYTES_PER_PAGE);
  854. pEntry->pEntryPrev = (PENTRY)((char *)pEntry -
  855. BYTES_PER_PAGE);
  856. pEntryEnd = (PENTRYEND)((char *)pEntry + MAX_FREE_ENTRY_SIZE -
  857. sizeof(ENTRYEND));
  858. pEntryEnd->sizeBack = MAX_FREE_ENTRY_SIZE;
  859. }
  860. // initialize group entry pointer for maximum size
  861. // and set terminate list entries
  862. pHead = (PENTRY)((char *)&pGroup->listHead[63] - sizeof(int));
  863. pEntry = pHead->pEntryNext =
  864. (PENTRY)((char *)pHeapStartPage + ENTRY_OFFSET);
  865. pEntry->pEntryPrev = pHead;
  866. pEntry = pHead->pEntryPrev =
  867. (PENTRY)((char *)pHeapEndPage + ENTRY_OFFSET);
  868. pEntry->pEntryNext = pHead;
  869. pRegion->bitvGroupHi[indCommit] = 0x00000000L;
  870. pRegion->bitvGroupLo[indCommit] = 0x00000001L;
  871. if (pRegion->cntRegionSize[63]++ == 0)
  872. pHeader->bitvEntryLo |= 0x00000001L;
  873. // clear bit in commit vector
  874. pHeader->bitvCommit &= ~(0x80000000L >> indCommit);
  875. return indCommit;
  876. }
  877. /***
  878. *int __sbh_resize_block(pHeader, pvAlloc, intNew) - resize block
  879. *
  880. *Purpose:
  881. * Resize the specified block from the small-block heap.
  882. * The allocation block is not moved.
  883. *
  884. *Entry:
  885. * pHeader - pointer to HEADER containing block
  886. * pvAlloc - pointer to block to resize
  887. * intNew - new size of block in bytes
  888. *
  889. *Exit:
  890. * Returns 1, if successful. Otherwise, 0 is returned.
  891. *
  892. *Exceptions:
  893. *
  894. *******************************************************************************/
  895. int __cdecl __sbh_resize_block (PHEADER pHeader, void * pvAlloc, int intNew)
  896. {
  897. PREGION pRegion;
  898. PGROUP pGroup;
  899. PENTRY pHead;
  900. PENTRY pEntry;
  901. PENTRY pNext;
  902. int sizeEntry;
  903. int sizeNext;
  904. int sizeNew;
  905. unsigned int indGroup;
  906. unsigned int indEntry;
  907. unsigned int indNext;
  908. unsigned int offRegion;
  909. // add 8 bytes entry overhead and round up to next para size
  910. sizeNew = (intNew + 2 * (int)sizeof(int) + (BYTES_PER_PARA - 1))
  911. & ~(BYTES_PER_PARA - 1);
  912. // region is determined by the header
  913. pRegion = pHeader->pRegion;
  914. // use the region offset to determine the group index
  915. offRegion = (unsigned int)((uintptr_t)pvAlloc - (uintptr_t)pHeader->pHeapData);
  916. indGroup = offRegion / BYTES_PER_GROUP;
  917. pGroup = &pRegion->grpHeadList[indGroup];
  918. // get size of entry - decrement value since entry is allocated
  919. pEntry = (PENTRY)((char *)pvAlloc - sizeof(int));
  920. sizeEntry = pEntry->sizeFront - 1;
  921. // point to next entry to get its size
  922. pNext = (PENTRY)((char *)pEntry + sizeEntry);
  923. sizeNext = pNext->sizeFront;
  924. // test if new size is larger than the current one
  925. if (sizeNew > sizeEntry)
  926. {
  927. // if next entry not free, or not large enough, fail
  928. if ((sizeNext & 1) || (sizeNew > sizeEntry + sizeNext))
  929. return FALSE;
  930. // disconnect next entry
  931. // determine index of next entry
  932. indNext = (sizeNext >> 4) - 1;
  933. if (indNext > 63)
  934. indNext = 63;
  935. // test entry is sole member of bucket (next == prev),
  936. if (pNext->pEntryNext == pNext->pEntryPrev)
  937. {
  938. // clear bit in group vector, decrement region count
  939. // if region count is now zero, clear bit in header
  940. // entry vector
  941. if (indNext < 32)
  942. {
  943. pRegion->bitvGroupHi[indGroup] &= ~(0x80000000L >> indNext);
  944. if (--pRegion->cntRegionSize[indNext] == 0)
  945. pHeader->bitvEntryHi &= ~(0x80000000L >> indNext);
  946. }
  947. else
  948. {
  949. pRegion->bitvGroupLo[indGroup] &=
  950. ~(0x80000000L >> (indNext - 32));
  951. if (--pRegion->cntRegionSize[indNext] == 0)
  952. pHeader->bitvEntryLo &= ~(0x80000000L >> (indNext - 32));
  953. }
  954. }
  955. // unlink entry from list
  956. pNext->pEntryPrev->pEntryNext = pNext->pEntryNext;
  957. pNext->pEntryNext->pEntryPrev = pNext->pEntryPrev;
  958. // compute new size of the next entry, test if nonzero
  959. if ((sizeNext = sizeEntry + sizeNext - sizeNew) > 0)
  960. {
  961. // compute start of next entry and connect it
  962. pNext = (PENTRY)((char *)pEntry + sizeNew);
  963. // determine index of next entry
  964. indNext = (sizeNext >> 4) - 1;
  965. if (indNext > 63)
  966. indNext = 63;
  967. // add next entry to the start of the bucket list
  968. pHead = (PENTRY)((char *)&pGroup->listHead[indNext] -
  969. sizeof(int));
  970. pNext->pEntryNext = pHead->pEntryNext;
  971. pNext->pEntryPrev = pHead;
  972. pHead->pEntryNext = pNext;
  973. pNext->pEntryNext->pEntryPrev = pNext;
  974. // test entry is sole member of bucket (next == prev),
  975. if (pNext->pEntryNext == pNext->pEntryPrev)
  976. {
  977. // if region count was zero, set bit in region vector
  978. // set bit in header entry vector, increment region count
  979. if (indNext < 32)
  980. {
  981. if (pRegion->cntRegionSize[indNext]++ == 0)
  982. pHeader->bitvEntryHi |= 0x80000000L >> indNext;
  983. pRegion->bitvGroupHi[indGroup] |= 0x80000000L >> indNext;
  984. }
  985. else
  986. {
  987. if (pRegion->cntRegionSize[indNext]++ == 0)
  988. pHeader->bitvEntryLo |= 0x80000000L >> (indNext - 32);
  989. pRegion->bitvGroupLo[indGroup] |=
  990. 0x80000000L >> (indNext - 32);
  991. }
  992. }
  993. // adjust size fields of next entry
  994. pNext->sizeFront = sizeNext;
  995. ((PENTRYEND)((char *)pNext + sizeNext -
  996. sizeof(ENTRYEND)))->sizeBack = sizeNext;
  997. }
  998. // adjust pEntry to its new size (plus one since allocated)
  999. pEntry->sizeFront = sizeNew + 1;
  1000. ((PENTRYEND)((char *)pEntry + sizeNew -
  1001. sizeof(ENTRYEND)))->sizeBack = sizeNew + 1;
  1002. }
  1003. // not larger, test if smaller
  1004. else if (sizeNew < sizeEntry)
  1005. {
  1006. // adjust pEntry to new smaller size
  1007. pEntry->sizeFront = sizeNew + 1;
  1008. ((PENTRYEND)((char *)pEntry + sizeNew -
  1009. sizeof(ENTRYEND)))->sizeBack = sizeNew + 1;
  1010. // set pEntry and sizeEntry to leftover space
  1011. pEntry = (PENTRY)((char *)pEntry + sizeNew);
  1012. sizeEntry -= sizeNew;
  1013. // determine index of entry
  1014. indEntry = (sizeEntry >> 4) - 1;
  1015. if (indEntry > 63)
  1016. indEntry = 63;
  1017. // test if next entry is free
  1018. if ((sizeNext & 1) == 0)
  1019. {
  1020. // if so, disconnect it
  1021. // determine index of next entry
  1022. indNext = (sizeNext >> 4) - 1;
  1023. if (indNext > 63)
  1024. indNext = 63;
  1025. // test entry is sole member of bucket (next == prev),
  1026. if (pNext->pEntryNext == pNext->pEntryPrev)
  1027. {
  1028. // clear bit in group vector, decrement region count
  1029. // if region count is now zero, clear bit in header
  1030. // entry vector
  1031. if (indNext < 32)
  1032. {
  1033. pRegion->bitvGroupHi[indGroup] &=
  1034. ~(0x80000000L >> indNext);
  1035. if (--pRegion->cntRegionSize[indNext] == 0)
  1036. pHeader->bitvEntryHi &= ~(0x80000000L >> indNext);
  1037. }
  1038. else
  1039. {
  1040. pRegion->bitvGroupLo[indGroup] &=
  1041. ~(0x80000000L >> (indNext - 32));
  1042. if (--pRegion->cntRegionSize[indNext] == 0)
  1043. pHeader->bitvEntryLo &=
  1044. ~(0x80000000L >> (indNext - 32));
  1045. }
  1046. }
  1047. // unlink entry from list
  1048. pNext->pEntryPrev->pEntryNext = pNext->pEntryNext;
  1049. pNext->pEntryNext->pEntryPrev = pNext->pEntryPrev;
  1050. // add next entry size to present
  1051. sizeEntry += sizeNext;
  1052. indEntry = (sizeEntry >> 4) - 1;
  1053. if (indEntry > 63)
  1054. indEntry = 63;
  1055. }
  1056. // connect leftover space with any free next entry
  1057. // add next entry to the start of the bucket list
  1058. pHead = (PENTRY)((char *)&pGroup->listHead[indEntry] - sizeof(int));
  1059. pEntry->pEntryNext = pHead->pEntryNext;
  1060. pEntry->pEntryPrev = pHead;
  1061. pHead->pEntryNext = pEntry;
  1062. pEntry->pEntryNext->pEntryPrev = pEntry;
  1063. // test entry is sole member of bucket (next == prev),
  1064. if (pEntry->pEntryNext == pEntry->pEntryPrev)
  1065. {
  1066. // if region count was zero, set bit in region vector
  1067. // set bit in header entry vector, increment region count
  1068. if (indEntry < 32)
  1069. {
  1070. if (pRegion->cntRegionSize[indEntry]++ == 0)
  1071. pHeader->bitvEntryHi |= 0x80000000L >> indEntry;
  1072. pRegion->bitvGroupHi[indGroup] |= 0x80000000L >> indEntry;
  1073. }
  1074. else
  1075. {
  1076. if (pRegion->cntRegionSize[indEntry]++ == 0)
  1077. pHeader->bitvEntryLo |= 0x80000000L >> (indEntry - 32);
  1078. pRegion->bitvGroupLo[indGroup] |= 0x80000000L >>
  1079. (indEntry - 32);
  1080. }
  1081. }
  1082. // adjust size fields of entry
  1083. pEntry->sizeFront = sizeEntry;
  1084. ((PENTRYEND)((char *)pEntry + sizeEntry -
  1085. sizeof(ENTRYEND)))->sizeBack = sizeEntry;
  1086. }
  1087. return TRUE;
  1088. }
  1089. /***
  1090. *int __sbh_heapmin() - minimize heap
  1091. *
  1092. *Purpose:
  1093. * Minimize the heap by freeing any deferred group.
  1094. *
  1095. *Entry:
  1096. * __sbh_pHeaderDefer - pointer to HEADER of deferred group
  1097. * __sbh_indGroupDefer - index of GROUP to defer
  1098. *
  1099. *Exit:
  1100. * None.
  1101. *
  1102. *Exceptions:
  1103. *
  1104. *******************************************************************************/
  1105. void __cdecl __sbh_heapmin (void)
  1106. {
  1107. void * pHeapDecommit;
  1108. // if a group has been deferred, free that group
  1109. if (__sbh_pHeaderDefer)
  1110. {
  1111. // if now zero, decommit the group data heap
  1112. pHeapDecommit = (void *)((char *)__sbh_pHeaderDefer->pHeapData +
  1113. __sbh_indGroupDefer * BYTES_PER_GROUP);
  1114. VirtualFree(pHeapDecommit, BYTES_PER_GROUP, MEM_DECOMMIT);
  1115. // set bit in commit vector
  1116. __sbh_pHeaderDefer->bitvCommit |= 0x80000000 >> __sbh_indGroupDefer;
  1117. // clear entry vector for the group and header vector bit
  1118. // if needed
  1119. __sbh_pHeaderDefer->pRegion->bitvGroupLo[__sbh_indGroupDefer] = 0;
  1120. if (--__sbh_pHeaderDefer->pRegion->cntRegionSize[63] == 0)
  1121. __sbh_pHeaderDefer->bitvEntryLo &= ~0x00000001L;
  1122. // if commit vector is the initial value,
  1123. // remove the region if it is not the last
  1124. if (__sbh_pHeaderDefer->bitvCommit == BITV_COMMIT_INIT &&
  1125. __sbh_cntHeaderList > 1)
  1126. {
  1127. // free the region memory area
  1128. HeapFree(_crtheap, 0, __sbh_pHeaderDefer->pRegion);
  1129. // remove entry from header list by copying over
  1130. memmove((void *)__sbh_pHeaderDefer, (void *)(__sbh_pHeaderDefer + 1),
  1131. (int)((intptr_t)(__sbh_pHeaderList + __sbh_cntHeaderList) -
  1132. (intptr_t)(__sbh_pHeaderDefer + 1)));
  1133. __sbh_cntHeaderList--;
  1134. }
  1135. // clear deferred condition
  1136. __sbh_pHeaderDefer = NULL;
  1137. }
  1138. }
  1139. /***
  1140. *int __sbh_heap_check() - check small-block heap
  1141. *
  1142. *Purpose:
  1143. * Perform validity checks on the small-block heap.
  1144. *
  1145. *Entry:
  1146. * There are no arguments.
  1147. *
  1148. *Exit:
  1149. * Returns 0 if the small-block is okay.
  1150. * Returns < 0 if the small-block heap has an error. The exact value
  1151. * identifies where, in the source code below, the error was detected.
  1152. *
  1153. *Exceptions:
  1154. *
  1155. *******************************************************************************/
  1156. int __cdecl __sbh_heap_check (void)
  1157. {
  1158. PHEADER pHeader;
  1159. PREGION pRegion;
  1160. PGROUP pGroup;
  1161. PENTRY pEntry;
  1162. PENTRY pNext;
  1163. PENTRY pEntryLast;
  1164. PENTRY pEntryHead;
  1165. PENTRY pEntryPage;
  1166. PENTRY pEntryPageLast;
  1167. int indHeader;
  1168. int indGroup;
  1169. int indPage;
  1170. int indEntry;
  1171. int indHead;
  1172. int sizeEntry;
  1173. int sizeTrue;
  1174. int cntAllocated;
  1175. int cntFree[64];
  1176. int cntEntries;
  1177. void * pHeapGroup;
  1178. void * pHeapPage;
  1179. void * pPageStart;
  1180. BITVEC bitvCommit;
  1181. BITVEC bitvGroupHi;
  1182. BITVEC bitvGroupLo;
  1183. BITVEC bitvEntryHi;
  1184. BITVEC bitvEntryLo;
  1185. // check validity of header list
  1186. if (IsBadWritePtr(__sbh_pHeaderList,
  1187. __sbh_cntHeaderList * (unsigned int)sizeof(HEADER)))
  1188. return -1;
  1189. // scan for all headers in list
  1190. pHeader = __sbh_pHeaderList;
  1191. for (indHeader = 0; indHeader < __sbh_cntHeaderList; indHeader++)
  1192. {
  1193. // define region and test if valid
  1194. pRegion = pHeader->pRegion;
  1195. if (IsBadWritePtr(pRegion, sizeof(REGION)))
  1196. return -2;
  1197. // scan for all groups in region
  1198. pHeapGroup = pHeader->pHeapData;
  1199. pGroup = &pRegion->grpHeadList[0];
  1200. bitvCommit = pHeader->bitvCommit;
  1201. bitvEntryHi = 0;
  1202. bitvEntryLo = 0;
  1203. for (indGroup = 0; indGroup < GROUPS_PER_REGION; indGroup++)
  1204. {
  1205. // initialize entry vector and entry counts for group
  1206. bitvGroupHi = 0;
  1207. bitvGroupLo = 0;
  1208. cntAllocated = 0;
  1209. for (indEntry = 0; indEntry < 64; indEntry++)
  1210. cntFree[indEntry] = 0;
  1211. // test if group is committed
  1212. if ((int)bitvCommit >= 0)
  1213. {
  1214. // committed, ensure addresses are accessable
  1215. if (IsBadWritePtr(pHeapGroup, BYTES_PER_GROUP))
  1216. return -4;
  1217. // for each page in group, check validity of entries
  1218. pHeapPage = pHeapGroup;
  1219. for (indPage = 0; indPage < PAGES_PER_GROUP; indPage++)
  1220. {
  1221. // define pointers to first and past last entry
  1222. pEntry = (PENTRY)((char *)pHeapPage + ENTRY_OFFSET);
  1223. pEntryLast = (PENTRY)((char *)pEntry
  1224. + MAX_FREE_ENTRY_SIZE);
  1225. // check front and back page sentinel values
  1226. if (*(int *)((char *)pEntry - sizeof(int)) != -1 ||
  1227. *(int *)pEntryLast != -1)
  1228. return -5;
  1229. // loop through each entry in page
  1230. do
  1231. {
  1232. // get entry size and test if allocated
  1233. sizeEntry = sizeTrue = pEntry->sizeFront;
  1234. if (sizeEntry & 1)
  1235. {
  1236. // allocated entry - set true size
  1237. sizeTrue--;
  1238. // test against maximum allocated entry size
  1239. if (sizeTrue > MAX_ALLOC_ENTRY_SIZE)
  1240. return -6;
  1241. // increment allocated count for group
  1242. cntAllocated++;
  1243. }
  1244. else
  1245. {
  1246. // free entry - determine index and increment
  1247. // count for list head checking
  1248. indEntry = (sizeTrue >> 4) - 1;
  1249. if (indEntry > 63)
  1250. indEntry = 63;
  1251. cntFree[indEntry]++;
  1252. }
  1253. // check size validity
  1254. if (sizeTrue < 0x10 || sizeTrue & 0xf
  1255. || sizeTrue > MAX_FREE_ENTRY_SIZE)
  1256. return -7;
  1257. // check if back entry size same as front
  1258. if (((PENTRYEND)((char *)pEntry + sizeTrue
  1259. - sizeof(int)))->sizeBack != sizeEntry)
  1260. return -8;
  1261. // move to next entry in page
  1262. pEntry = (PENTRY)((char *)pEntry + sizeTrue);
  1263. }
  1264. while (pEntry < pEntryLast);
  1265. // test if last entry did not overrun page end
  1266. if (pEntry != pEntryLast)
  1267. return -8;
  1268. // point to next page in data heap
  1269. pHeapPage = (void *)((char *)pHeapPage + BYTES_PER_PAGE);
  1270. }
  1271. // check if allocated entry count is correct
  1272. if (pGroup->cntEntries != cntAllocated)
  1273. return -9;
  1274. // check validity of linked-lists of free entries
  1275. pEntryHead = (PENTRY)((char *)&pGroup->listHead[0] -
  1276. sizeof(int));
  1277. for (indHead = 0; indHead < 64; indHead++)
  1278. {
  1279. // scan through list until head is reached or expected
  1280. // number of entries traversed
  1281. cntEntries = 0;
  1282. pEntry = pEntryHead;
  1283. while ((pNext = pEntry->pEntryNext) != pEntryHead &&
  1284. cntEntries != cntFree[indHead])
  1285. {
  1286. // test if next pointer is in group data area
  1287. if ((void *)pNext < pHeapGroup || (void *)pNext >=
  1288. (void *)((char *)pHeapGroup + BYTES_PER_GROUP))
  1289. return -10;
  1290. // determine page address of next entry
  1291. pPageStart = (void *)((uintptr_t)pNext &
  1292. ~(uintptr_t)(BYTES_PER_PAGE - 1));
  1293. // point to first entry and past last in the page
  1294. pEntryPage = (PENTRY)((char *)pPageStart +
  1295. ENTRY_OFFSET);
  1296. pEntryPageLast = (PENTRY)((char *)pEntryPage +
  1297. MAX_FREE_ENTRY_SIZE);
  1298. // do scan from start of page
  1299. // no error checking since it was already scanned
  1300. while (pEntryPage != pEntryPageLast)
  1301. {
  1302. // if entry matches, exit loop
  1303. if (pEntryPage == pNext)
  1304. break;
  1305. // point to next entry
  1306. pEntryPage = (PENTRY)((char *)pEntryPage +
  1307. (pEntryPage->sizeFront & ~1));
  1308. }
  1309. // if page end reached, pNext was not valid
  1310. if (pEntryPage == pEntryPageLast)
  1311. return -11;
  1312. // entry valid, but check if entry index matches
  1313. // the header
  1314. indEntry = (pNext->sizeFront >> 4) - 1;
  1315. if (indEntry > 63)
  1316. indEntry = 63;
  1317. if (indEntry != indHead)
  1318. return -12;
  1319. // check if previous pointer in pNext points
  1320. // back to pEntry
  1321. if (pNext->pEntryPrev != pEntry)
  1322. return -13;
  1323. // update scan pointer and counter
  1324. pEntry = pNext;
  1325. cntEntries++;
  1326. }
  1327. // if nonzero number of entries, set bit in group
  1328. // and region vectors
  1329. if (cntEntries)
  1330. {
  1331. if (indHead < 32)
  1332. {
  1333. bitvGroupHi |= 0x80000000L >> indHead;
  1334. bitvEntryHi |= 0x80000000L >> indHead;
  1335. }
  1336. else
  1337. {
  1338. bitvGroupLo |= 0x80000000L >> (indHead - 32);
  1339. bitvEntryLo |= 0x80000000L >> (indHead - 32);
  1340. }
  1341. }
  1342. // check if list is exactly the expected size
  1343. if (pEntry->pEntryNext != pEntryHead ||
  1344. cntEntries != cntFree[indHead])
  1345. return -14;
  1346. // check if previous pointer in header points to
  1347. // last entry processed
  1348. if (pEntryHead->pEntryPrev != pEntry)
  1349. return -15;
  1350. // point to next linked-list header - note size
  1351. pEntryHead = (PENTRY)((char *)pEntryHead +
  1352. sizeof(LISTHEAD));
  1353. }
  1354. }
  1355. // test if group vector is valid
  1356. if (bitvGroupHi != pRegion->bitvGroupHi[indGroup] ||
  1357. bitvGroupLo != pRegion->bitvGroupLo[indGroup])
  1358. return -16;
  1359. // adjust for next group in region
  1360. pHeapGroup = (void *)((char *)pHeapGroup + BYTES_PER_GROUP);
  1361. pGroup++;
  1362. bitvCommit <<= 1;
  1363. }
  1364. // test if group vector is valid
  1365. if (bitvEntryHi != pHeader->bitvEntryHi ||
  1366. bitvEntryLo != pHeader->bitvEntryLo)
  1367. return -17;
  1368. // adjust for next header in list
  1369. pHeader++;
  1370. }
  1371. return 0;
  1372. }
  1373. #if 0
  1374. void DumpEntry (char * pLine, int * piValue)
  1375. {
  1376. HANDLE hdlFile;
  1377. char buffer[80];
  1378. int index;
  1379. int iTemp;
  1380. char chTemp[9];
  1381. DWORD dwWritten;
  1382. hdlFile = CreateFile("d:\\heap.log", GENERIC_READ | GENERIC_WRITE,
  1383. 0, NULL, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
  1384. SetFilePointer(hdlFile, 0, NULL, FILE_END);
  1385. strcpy(buffer, pLine);
  1386. if (piValue)
  1387. {
  1388. strcat(buffer, "0x");
  1389. iTemp = *piValue;
  1390. for (index = 7; index >= 0; index--)
  1391. {
  1392. if ((chTemp[index] = (iTemp & 0xf) + '0') > '9')
  1393. chTemp[index] += 'a' - ('9' + 1);
  1394. iTemp >>= 4;
  1395. }
  1396. chTemp[8] = '\0';
  1397. strcat(buffer, chTemp);
  1398. }
  1399. strcat(buffer, "\r\n");
  1400. WriteFile(hdlFile, buffer, strlen(buffer), &dwWritten, NULL);
  1401. CloseHandle(hdlFile);
  1402. }
  1403. #endif
  1404. #ifdef CRTDLL
  1405. /* Old (VC++ 5.0) small-block heap data and code */
  1406. __old_sbh_region_t __old_small_block_heap = {
  1407. &__old_small_block_heap, /* p_next_region */
  1408. &__old_small_block_heap, /* p_prev_region */
  1409. &__old_small_block_heap.region_map[0], /* p_starting_region_map */
  1410. &__old_small_block_heap.region_map[0], /* p_first_uncommitted */
  1411. (__old_sbh_page_t *)_OLD_NO_PAGES, /* p_pages_begin */
  1412. (__old_sbh_page_t *)_OLD_NO_PAGES, /* p_pages_end */
  1413. { _OLD_PARAS_PER_PAGE, _OLD_NO_FAILED_ALLOC } /* region_map[] */
  1414. };
  1415. static __old_sbh_region_t *__old_sbh_p_starting_region = &__old_small_block_heap;
  1416. static int __old_sbh_decommitable_pages = 0;
  1417. size_t __old_sbh_threshold = _OLD_PARASIZE * (_OLD_PARAS_PER_PAGE / 8);
  1418. /* Prototypes for user functions */
  1419. size_t __cdecl _get_old_sbh_threshold(void);
  1420. int __cdecl _set_old_sbh_threshold(size_t);
  1421. /***
  1422. *size_t _get_old_sbh_threshold() - return small-block threshold
  1423. *
  1424. *Purpose:
  1425. * Return the current value of __old_sbh_threshold
  1426. *
  1427. *Entry:
  1428. * None.
  1429. *
  1430. *Exit:
  1431. * See above.
  1432. *
  1433. *Exceptions:
  1434. *
  1435. *******************************************************************************/
  1436. size_t __cdecl _get_old_sbh_threshold (
  1437. void
  1438. )
  1439. {
  1440. return __old_sbh_threshold;
  1441. }
  1442. /***
  1443. *int _set_old_sbh_threshold(size_t threshold) - set small-block heap threshold
  1444. *
  1445. *Purpose:
  1446. * Set the upper limit for the size of an allocation which will be
  1447. * supported from the small-block heap. It is required that at least two
  1448. * allocations can come from a page. This imposes an upper limit on how
  1449. * big the new threshold can be.
  1450. *
  1451. *Entry:
  1452. * size_t threshold - proposed new value for __sbh_theshold
  1453. *
  1454. *Exit:
  1455. * Returns 1 if successful. Returns 0 if threshold was too big.
  1456. *
  1457. *Exceptions:
  1458. *
  1459. *******************************************************************************/
  1460. int __cdecl _set_old_sbh_threshold (
  1461. size_t threshold
  1462. )
  1463. {
  1464. /*
  1465. * Round up the proposed new value to the nearest paragraph
  1466. */
  1467. threshold = (threshold + _OLD_PARASIZE - 1) & ~(_OLD_PARASIZE - 1);
  1468. /*
  1469. * Require that at least two allocations be can be made within a
  1470. * page.
  1471. */
  1472. if ( threshold <= (_OLD_PARASIZE * (_OLD_PARAS_PER_PAGE / 2)) ) {
  1473. __old_sbh_threshold = threshold;
  1474. return 1;
  1475. }
  1476. else
  1477. return 0;
  1478. }
  1479. /***
  1480. *__old_sbh_region_t * __old_sbh_new_region() - get a region for the small-block heap
  1481. *
  1482. *Purpose:
  1483. * Creates and adds a new region for the small-block heap. First, a
  1484. * descriptor (__old_sbh_region_t) is obtained for the new region. Next,
  1485. * VirtualAlloc() is used to reserved an address space of size
  1486. * _OLD_PAGES_PER_REGION * _OLD_PAGESIZE, and the first _PAGES_PER_COMMITTMENT
  1487. * pages are committed.
  1488. *
  1489. * Note that if __old_small_block_heap is available (i.e., the p_pages_begin
  1490. * field is _OLD_NO_PAGES), it becomes the descriptor for the new regions. This is
  1491. * basically the small-block heap initialization.
  1492. *
  1493. *Entry:
  1494. * No arguments.
  1495. *
  1496. *Exit:
  1497. * If successful, a pointer to the descriptor for the new region is
  1498. * returned. Otherwise, NULL is returned.
  1499. *
  1500. *******************************************************************************/
  1501. __old_sbh_region_t * __cdecl __old_sbh_new_region(
  1502. void
  1503. )
  1504. {
  1505. __old_sbh_region_t * pregnew;
  1506. __old_sbh_page_t * ppage;
  1507. int i;
  1508. /*
  1509. * Get a region descriptor (__old_sbh_region_t). If __old_small_block_heap is
  1510. * available, always use it.
  1511. */
  1512. if ( __old_small_block_heap.p_pages_begin == _OLD_NO_PAGES ) {
  1513. pregnew = &__old_small_block_heap;
  1514. }
  1515. else {
  1516. /*
  1517. * Allocate space for the new __old_sbh_region_t structure. Note that
  1518. * this allocation comes out of the 'big block heap.
  1519. */
  1520. if ( (pregnew = HeapAlloc( _crtheap, 0, sizeof(__old_sbh_region_t) ))
  1521. == NULL )
  1522. return NULL;
  1523. }
  1524. /*
  1525. * Reserve a new contiguous address range (i.e., a region).
  1526. */
  1527. if ( (ppage = VirtualAlloc( NULL,
  1528. _OLD_PAGESIZE * _OLD_PAGES_PER_REGION,
  1529. MEM_RESERVE,
  1530. PAGE_READWRITE )) != NULL )
  1531. {
  1532. /*
  1533. * Commit the first _OLD_PAGES_PER_COMMITMENT of the new region.
  1534. */
  1535. if ( VirtualAlloc( ppage,
  1536. _OLD_PAGESIZE * _OLD_PAGES_PER_COMMITMENT,
  1537. MEM_COMMIT,
  1538. PAGE_READWRITE ) != NULL )
  1539. {
  1540. /*
  1541. * Insert *pregnew into the linked list of regions (just
  1542. * before __old_small_block_heap)
  1543. */
  1544. if ( pregnew == &__old_small_block_heap ) {
  1545. if ( __old_small_block_heap.p_next_region == NULL )
  1546. __old_small_block_heap.p_next_region =
  1547. &__old_small_block_heap;
  1548. if ( __old_small_block_heap.p_prev_region == NULL )
  1549. __old_small_block_heap.p_prev_region =
  1550. &__old_small_block_heap;
  1551. }
  1552. else {
  1553. pregnew->p_next_region = &__old_small_block_heap;
  1554. pregnew->p_prev_region = __old_small_block_heap.p_prev_region;
  1555. __old_small_block_heap.p_prev_region = pregnew;
  1556. pregnew->p_prev_region->p_next_region = pregnew;
  1557. }
  1558. /*
  1559. * Fill in the rest of *pregnew
  1560. */
  1561. pregnew->p_pages_begin = ppage;
  1562. pregnew->p_pages_end = ppage + _OLD_PAGES_PER_REGION;
  1563. pregnew->p_starting_region_map = &(pregnew->region_map[0]);
  1564. pregnew->p_first_uncommitted =
  1565. &(pregnew->region_map[_OLD_PAGES_PER_COMMITMENT]);
  1566. /*
  1567. * Initialize pregnew->region_map[].
  1568. */
  1569. for ( i = 0 ; i < _OLD_PAGES_PER_REGION ; i++ ) {
  1570. if ( i < _OLD_PAGES_PER_COMMITMENT )
  1571. pregnew->region_map[i].free_paras_in_page =
  1572. _OLD_PARAS_PER_PAGE;
  1573. else
  1574. pregnew->region_map[i].free_paras_in_page =
  1575. _OLD_UNCOMMITTED_PAGE;
  1576. pregnew->region_map[i].last_failed_alloc =
  1577. _OLD_NO_FAILED_ALLOC;
  1578. }
  1579. /*
  1580. * Initialize pages
  1581. */
  1582. memset( ppage, 0, _OLD_PAGESIZE * _OLD_PAGES_PER_COMMITMENT );
  1583. while ( ppage < pregnew->p_pages_begin +
  1584. _OLD_PAGES_PER_COMMITMENT )
  1585. {
  1586. ppage->p_starting_alloc_map = &(ppage->alloc_map[0]);
  1587. ppage->free_paras_at_start = _OLD_PARAS_PER_PAGE;
  1588. (ppage++)->alloc_map[_OLD_PARAS_PER_PAGE] = (__old_page_map_t)-1;
  1589. }
  1590. /*
  1591. * Return success
  1592. */
  1593. return pregnew;
  1594. }
  1595. else {
  1596. /*
  1597. * Couldn't commit the pages. Release the address space .
  1598. */
  1599. VirtualFree( ppage, 0, MEM_RELEASE );
  1600. }
  1601. }
  1602. /*
  1603. * Unable to create the new region. Free the region descriptor, if necessary.
  1604. */
  1605. if ( pregnew != &__old_small_block_heap )
  1606. HeapFree(_crtheap, 0, pregnew);
  1607. /*
  1608. * Return failure.
  1609. */
  1610. return NULL;
  1611. }
  1612. /***
  1613. *void __old_sbh_release_region(preg) - release region
  1614. *
  1615. *Purpose:
  1616. * Release the address space associated with the specified region
  1617. * descriptor. Also, free the specified region descriptor and update
  1618. * the linked list of region descriptors if appropriate.
  1619. *
  1620. *Entry:
  1621. * __old_sbh_region_t * preg - pointer to descriptor for the region to
  1622. * be released.
  1623. *
  1624. *Exit:
  1625. * No return value.
  1626. *
  1627. *Exceptions:
  1628. *
  1629. *******************************************************************************/
  1630. void __cdecl __old_sbh_release_region(
  1631. __old_sbh_region_t * preg
  1632. )
  1633. {
  1634. /*
  1635. * Release the passed region
  1636. */
  1637. VirtualFree( preg->p_pages_begin, 0, MEM_RELEASE);
  1638. /*
  1639. * Update __old_sbh_p_starting_region, if necessary
  1640. */
  1641. if ( __old_sbh_p_starting_region == preg )
  1642. __old_sbh_p_starting_region = preg->p_prev_region;
  1643. if ( preg != &__old_small_block_heap ) {
  1644. /*
  1645. * Update linked list of region descriptors.
  1646. */
  1647. preg->p_prev_region->p_next_region = preg->p_next_region;
  1648. preg->p_next_region->p_prev_region = preg->p_prev_region;
  1649. /*
  1650. * Free the region desciptor
  1651. */
  1652. HeapFree(_crtheap, 0, preg);
  1653. }
  1654. else {
  1655. /*
  1656. * Mark p_pages_begin as _OLD_NO_PAGES to indicate __old_small_block_heap
  1657. * is not associated with any region (and can be reused). This the
  1658. * only region descriptor for which this is supported.
  1659. */
  1660. __old_small_block_heap.p_pages_begin = _OLD_NO_PAGES;
  1661. }
  1662. }
  1663. /***
  1664. *void __old_sbh_decommit_pages(count) - decommit specified number of pages
  1665. *
  1666. *Purpose:
  1667. * Decommit count pages, if possible, in reverse (i.e., last to
  1668. * first) order. If this results in all the pages in any region being
  1669. * uncommitted, the region is released.
  1670. *
  1671. *Entry:
  1672. * int count - number of pages to decommit
  1673. *
  1674. *Exit:
  1675. * No return value.
  1676. *
  1677. *Exceptions:
  1678. *
  1679. *******************************************************************************/
  1680. void __cdecl __old_sbh_decommit_pages(
  1681. int count
  1682. )
  1683. {
  1684. __old_sbh_region_t * preg1;
  1685. __old_sbh_region_t * preg2;
  1686. __old_region_map_t * pregmap;
  1687. int page_decommitted_flag;
  1688. int i;
  1689. /*
  1690. * Scan the regions of the small-block heap, in reverse order. looking
  1691. * for pages which can be decommitted.
  1692. */
  1693. preg1 = __old_small_block_heap.p_prev_region;
  1694. do {
  1695. if ( preg1->p_pages_begin != _OLD_NO_PAGES ) {
  1696. /*
  1697. * Scan the pages in *preg1, in reverse order, looking for
  1698. * pages which can be decommitted.
  1699. */
  1700. for ( i = _OLD_PAGES_PER_REGION - 1, page_decommitted_flag = 0,
  1701. pregmap = &(preg1->region_map[i]) ;
  1702. i >= 0 ; i--, pregmap-- )
  1703. {
  1704. /*
  1705. * Check if the pool page is unused and, if so, decommit it.
  1706. */
  1707. if ( pregmap->free_paras_in_page == _OLD_PARAS_PER_PAGE ) {
  1708. if ( VirtualFree((preg1->p_pages_begin) + i, _OLD_PAGESIZE,
  1709. MEM_DECOMMIT) )
  1710. {
  1711. /*
  1712. * Mark the page as uncommitted, update the count
  1713. * (global) decommitable pages, update the
  1714. * first_uncommitted_index field of the region
  1715. * descriptor, set the flag indicating at least
  1716. * one page has been decommitted in the region,
  1717. * and decrement count.
  1718. */
  1719. pregmap->free_paras_in_page = _OLD_UNCOMMITTED_PAGE;
  1720. __old_sbh_decommitable_pages--;
  1721. if ( (preg1->p_first_uncommitted == NULL)
  1722. || (preg1->p_first_uncommitted > pregmap) )
  1723. preg1->p_first_uncommitted = pregmap;
  1724. page_decommitted_flag++;
  1725. if ( --count == 0 )
  1726. break;
  1727. }
  1728. }
  1729. }
  1730. /*
  1731. * 'Decrement' the preg1 pointer, but save a copy in preg2 in
  1732. * case the region needs to be released.
  1733. */
  1734. preg2 = preg1;
  1735. preg1 = preg1->p_prev_region;
  1736. /*
  1737. * If appropriate, determine if all the pages in the region
  1738. * are uncommitted so that the region can be released.
  1739. */
  1740. if ( page_decommitted_flag &&
  1741. (preg2->region_map[0].free_paras_in_page ==
  1742. _OLD_UNCOMMITTED_PAGE) )
  1743. {
  1744. for ( i = 1, pregmap = &(preg2->region_map[1]) ;
  1745. (i < _OLD_PAGES_PER_REGION) &&
  1746. (pregmap->free_paras_in_page ==
  1747. _OLD_UNCOMMITTED_PAGE) ;
  1748. i++, pregmap++ );
  1749. if ( i == _OLD_PAGES_PER_REGION )
  1750. __old_sbh_release_region(preg2);
  1751. }
  1752. }
  1753. }
  1754. while ( (preg1 != __old_small_block_heap.p_prev_region) && (count > 0) );
  1755. }
  1756. /***
  1757. *__old_page_map_t *__old_sbh_find_block(pblck, ppreg, pppage) - find block in
  1758. * small-block heap
  1759. *
  1760. *Purpose:
  1761. * Determine if the specified allocation block lies in the small-block
  1762. * heap and, if so, return the region, page and starting paragraph index
  1763. * of the block.
  1764. *
  1765. *Entry:
  1766. * void * pblck - pointer to block to be freed
  1767. * __old_sbh_region_t ** ppreg - pointer to a pointer to the region
  1768. * holding *pblck, if found
  1769. * __old_sbh_page_t ** pppage - pointer to a pointer to the page holding
  1770. * *pblck, if found
  1771. *
  1772. *Exit:
  1773. * If successful, a pointer to the starting alloc_map[] entry for the
  1774. * allocation block is returned.
  1775. * If unsuccessful, NULL is returned.
  1776. *
  1777. *Exceptions:
  1778. *
  1779. *******************************************************************************/
  1780. __old_page_map_t * __cdecl __old_sbh_find_block (
  1781. void * pblck,
  1782. __old_sbh_region_t ** ppreg,
  1783. __old_sbh_page_t ** pppage
  1784. )
  1785. {
  1786. __old_sbh_region_t * preg;
  1787. __old_sbh_page_t * ppage;
  1788. preg = &__old_small_block_heap;
  1789. do
  1790. {
  1791. /*
  1792. * Does the block lie within this small heap region?
  1793. */
  1794. if ( (pblck > (void *)preg->p_pages_begin) &&
  1795. (pblck < (void *)preg->p_pages_end) )
  1796. {
  1797. /*
  1798. * pblck lies within the region! Carry out a couple of
  1799. * important validity checks.
  1800. */
  1801. if ( (((uintptr_t)pblck & (_OLD_PARASIZE - 1)) == 0) &&
  1802. (((uintptr_t)pblck & (_OLD_PAGESIZE - 1)) >=
  1803. offsetof(struct __old_sbh_page_struct, alloc_blocks[0])) )
  1804. {
  1805. /*
  1806. * Copy region and page pointers back through the passed
  1807. * pointers.
  1808. */
  1809. *ppreg = preg;
  1810. *pppage = ppage = (__old_sbh_page_t *)((uintptr_t)pblck &
  1811. ~(_OLD_PAGESIZE - 1));
  1812. /*
  1813. * Return pointer to the alloc_map[] entry of the block.
  1814. */
  1815. return ( &(ppage->alloc_map[0]) + ((__old_para_t *)pblck -
  1816. &(ppage->alloc_blocks[0])) );
  1817. }
  1818. return NULL;
  1819. }
  1820. }
  1821. while ( (preg = preg->p_next_region) != &__old_small_block_heap );
  1822. return NULL;
  1823. }
  1824. /***
  1825. *void __old_sbh_free_block(preg, ppage, pmap) - free block
  1826. *
  1827. *Purpose:
  1828. * Free the specified block from the small-block heap.
  1829. *
  1830. *Entry:
  1831. * __old_sbh_region_t *preg - pointer to the descriptor for the
  1832. * region containing the block
  1833. * __old_sbh_page_t * ppage - pointer to the page containing the
  1834. * block
  1835. * __old_page_map_t * pmap - pointer to the initial alloc_map[]
  1836. * entry for the allocation block
  1837. *
  1838. *Exit:
  1839. * No return value.
  1840. *
  1841. *Exceptions:
  1842. *
  1843. *******************************************************************************/
  1844. void __cdecl __old_sbh_free_block (
  1845. __old_sbh_region_t * preg,
  1846. __old_sbh_page_t * ppage,
  1847. __old_page_map_t * pmap
  1848. )
  1849. {
  1850. __old_region_map_t * pregmap;
  1851. pregmap = &(preg->region_map[0]) + (ppage - preg->p_pages_begin);
  1852. /*
  1853. * Update the region_map[] entry.
  1854. */
  1855. pregmap->free_paras_in_page += (int)*pmap;
  1856. /*
  1857. * Mark the alloc_map[] entry as free
  1858. */
  1859. *pmap = _OLD_FREE_PARA;
  1860. /*
  1861. * Clear the last_failed_alloc[] entry for the page.
  1862. */
  1863. pregmap->last_failed_alloc = _OLD_NO_FAILED_ALLOC;
  1864. /*
  1865. * Check if the count of decommitable pages needs to be updated, and
  1866. * if some pages need to be decommited.
  1867. */
  1868. if ( pregmap->free_paras_in_page == _OLD_PARAS_PER_PAGE )
  1869. if ( ++__old_sbh_decommitable_pages == (2 * _OLD_PAGES_PER_COMMITMENT) )
  1870. __old_sbh_decommit_pages(_OLD_PAGES_PER_COMMITMENT);
  1871. }
  1872. /***
  1873. *void * __old_sbh_alloc_block(para_req) - allocate a block
  1874. *
  1875. *Purpose:
  1876. * Allocate a block from the small-block heap, the specified number of
  1877. * paragraphs in size.
  1878. *
  1879. *Entry:
  1880. * size_t para_req - size of the allocation request in paragraphs.
  1881. *
  1882. *Exit:
  1883. * Returns a pointer to the newly allocated block, if successful.
  1884. * Returns NULL, if failure.
  1885. *
  1886. *Exceptions:
  1887. *
  1888. *******************************************************************************/
  1889. void * __cdecl __old_sbh_alloc_block (
  1890. size_t para_req
  1891. )
  1892. {
  1893. __old_sbh_region_t * preg;
  1894. __old_sbh_page_t * ppage;
  1895. __old_sbh_page_t * ppage2;
  1896. __old_region_map_t * pregmap;
  1897. __old_region_map_t * pregmap2;
  1898. void * retp;
  1899. int i, j;
  1900. /*
  1901. * First pass through the small-block heap. Try to satisfy the current
  1902. * request from already committed pages.
  1903. */
  1904. preg = __old_sbh_p_starting_region;
  1905. do {
  1906. if ( preg->p_pages_begin != _OLD_NO_PAGES ) {
  1907. /*
  1908. * Search from *p_starting_region_map to the end of the
  1909. * region_map[] array.
  1910. */
  1911. for ( pregmap = preg->p_starting_region_map,
  1912. pregmap2 = &(preg->region_map[_OLD_PAGES_PER_REGION]),
  1913. ppage = preg->p_pages_begin +
  1914. (int)(pregmap - &(preg->region_map[0])) ;
  1915. pregmap < pregmap2 ;
  1916. pregmap++, ppage++ )
  1917. {
  1918. /*
  1919. * If the page has at least para_req free paragraphs, try
  1920. * to satisfy the request in this page.
  1921. */
  1922. if ( (pregmap->free_paras_in_page >= (int)para_req) &&
  1923. (pregmap->last_failed_alloc > para_req) )
  1924. {
  1925. if ( (retp = __old_sbh_alloc_block_from_page(
  1926. ppage,
  1927. pregmap->free_paras_in_page,
  1928. para_req)) != NULL )
  1929. {
  1930. /*
  1931. * Success.
  1932. * Update __old_sbh_p_starting_region.
  1933. * Update free_paras_in_page field for the page.
  1934. * Update the p_starting_region_map field in the
  1935. * region.
  1936. * Return a pointer to the allocated block.
  1937. */
  1938. __old_sbh_p_starting_region = preg;
  1939. pregmap->free_paras_in_page -= (int)para_req;
  1940. preg->p_starting_region_map = pregmap;
  1941. return retp;
  1942. }
  1943. else {
  1944. /*
  1945. * Update last_failed_alloc field.
  1946. */
  1947. pregmap->last_failed_alloc = para_req;
  1948. }
  1949. }
  1950. }
  1951. /*
  1952. * If necessary, search from 0 page to search_start_index.
  1953. */
  1954. for ( pregmap = &(preg->region_map[0]),
  1955. pregmap2 = preg->p_starting_region_map,
  1956. ppage = preg->p_pages_begin ;
  1957. pregmap < pregmap2 ;
  1958. pregmap++, ppage++ )
  1959. {
  1960. /*
  1961. * If the page has at least para_req free paragraphs, try
  1962. * to satisfy the request in this page.
  1963. */
  1964. if ( (pregmap->free_paras_in_page >= (int)para_req) &&
  1965. (pregmap->last_failed_alloc > para_req) )
  1966. {
  1967. if ( (retp = __old_sbh_alloc_block_from_page(
  1968. ppage,
  1969. pregmap->free_paras_in_page,
  1970. para_req)) != NULL )
  1971. {
  1972. /*
  1973. * Success.
  1974. * Update __old_sbh_p_starting_region.
  1975. * Update free_paras_in_page field for the page.
  1976. * Update the p_starting_region_map field in the
  1977. * region.
  1978. * Return a pointer to the allocated block.
  1979. */
  1980. __old_sbh_p_starting_region = preg;
  1981. pregmap->free_paras_in_page -= (int)para_req;
  1982. preg->p_starting_region_map = pregmap;
  1983. return retp;
  1984. }
  1985. else {
  1986. /*
  1987. * Update last_failed_alloc field.
  1988. */
  1989. pregmap->last_failed_alloc = para_req;
  1990. }
  1991. }
  1992. }
  1993. }
  1994. }
  1995. while ( (preg = preg->p_next_region) != __old_sbh_p_starting_region );
  1996. /*
  1997. * Second pass through the small-block heap. This time, look for an
  1998. * uncommitted page. Also, start at __old_small_block_heap rather than at
  1999. * *__old_sbh_p_starting_region.
  2000. */
  2001. preg = &__old_small_block_heap;
  2002. do
  2003. {
  2004. if ( (preg->p_pages_begin != _OLD_NO_PAGES) &&
  2005. (preg->p_first_uncommitted != NULL) )
  2006. {
  2007. pregmap = preg->p_first_uncommitted;
  2008. ppage = preg->p_pages_begin +
  2009. (pregmap - &(preg->region_map[0]));
  2010. /*
  2011. * Determine how many adjacent pages, up to
  2012. * _OLD_PAGES_PER_COMMITMENT, are uncommitted (and can now be
  2013. * committed)
  2014. */
  2015. for ( i = 0, pregmap2 = pregmap ;
  2016. (pregmap2->free_paras_in_page == _OLD_UNCOMMITTED_PAGE) &&
  2017. (i < _OLD_PAGES_PER_COMMITMENT) ;
  2018. pregmap2++, i++ ) ;
  2019. /*
  2020. * Commit the pages.
  2021. */
  2022. if ( VirtualAlloc( (void *)ppage,
  2023. i * _OLD_PAGESIZE,
  2024. MEM_COMMIT,
  2025. PAGE_READWRITE ) == ppage )
  2026. {
  2027. /*
  2028. * Initialize the committed pages.
  2029. */
  2030. memset(ppage, 0, i * _OLD_PAGESIZE);
  2031. for ( j = 0, ppage2 = ppage, pregmap2 = pregmap ;
  2032. j < i ;
  2033. j++, ppage2++, pregmap2++ )
  2034. {
  2035. /*
  2036. * Initialize fields in the page header
  2037. */
  2038. ppage2->p_starting_alloc_map = &(ppage2->alloc_map[0]);
  2039. ppage2->free_paras_at_start = _OLD_PARAS_PER_PAGE;
  2040. ppage2->alloc_map[_OLD_PARAS_PER_PAGE] = (__old_page_map_t)(-1);
  2041. /*
  2042. * Initialize region_map[] entry for the page.
  2043. */
  2044. pregmap2->free_paras_in_page = _OLD_PARAS_PER_PAGE;
  2045. pregmap2->last_failed_alloc = _OLD_NO_FAILED_ALLOC;
  2046. }
  2047. /*
  2048. * Update __old_sbh_p_starting_region
  2049. */
  2050. __old_sbh_p_starting_region = preg;
  2051. /*
  2052. * Update the p_first_uncommitted for the region.
  2053. */
  2054. while ( (pregmap2 < &(preg->region_map[_OLD_PAGES_PER_REGION]))
  2055. && (pregmap2->free_paras_in_page
  2056. != _OLD_UNCOMMITTED_PAGE) )
  2057. pregmap2++;
  2058. preg->p_first_uncommitted = (pregmap2 <
  2059. &(preg->region_map[_OLD_PAGES_PER_REGION])) ? pregmap2 :
  2060. NULL;
  2061. /*
  2062. * Fulfill the allocation request using the first of the
  2063. * newly committed pages.
  2064. */
  2065. ppage->alloc_map[0] = (__old_page_map_t)para_req;
  2066. /*
  2067. * Update the p_starting_region_map field in the region
  2068. * descriptor and region_map[] entry for the page.
  2069. */
  2070. preg->p_starting_region_map = pregmap;
  2071. pregmap->free_paras_in_page -= (int)para_req;
  2072. /*
  2073. * Update the p_starting_alloc_map and free_paras_at_start
  2074. * fields of the page.
  2075. */
  2076. ppage->p_starting_alloc_map = &(ppage->alloc_map[para_req]);
  2077. ppage->free_paras_at_start -= para_req;
  2078. /*
  2079. * Return pointer to allocated paragraphs.
  2080. */
  2081. return (void *)&(ppage->alloc_blocks[0]);
  2082. }
  2083. else {
  2084. /*
  2085. * Attempt to commit the pages failed. Return failure, the
  2086. * allocation will be attempted in the Win32 heap manager.
  2087. */
  2088. return NULL;
  2089. }
  2090. }
  2091. }
  2092. while ( (preg = preg->p_next_region) != &__old_small_block_heap );
  2093. /*
  2094. * Failure so far. None of the pages have a big enough free area to
  2095. * fulfill the pending request. All of the pages in all of the current
  2096. * regions are committed. Therefore, try to create a new region.
  2097. */
  2098. if ( (preg = __old_sbh_new_region()) != NULL ) {
  2099. /*
  2100. * Success! A new region has been created and the first few pages
  2101. * (_OLD_PAGES_PER_COMMITMENT to be exact) have been committed.
  2102. * satisfy the request out of the first page of the new region.
  2103. */
  2104. ppage = preg->p_pages_begin;
  2105. ppage->alloc_map[0] = (__old_page_map_t)para_req;
  2106. __old_sbh_p_starting_region = preg;
  2107. ppage->p_starting_alloc_map = &(ppage->alloc_map[para_req]);
  2108. ppage->free_paras_at_start = _OLD_PARAS_PER_PAGE - para_req;
  2109. (preg->region_map[0]).free_paras_in_page -= (__old_page_map_t)para_req;
  2110. return (void *)&(ppage->alloc_blocks[0]);
  2111. }
  2112. /*
  2113. * Everything has failed, return NULL
  2114. */
  2115. return NULL;
  2116. }
  2117. /***
  2118. *void * __old_sbh_alloc_block_from_page(ppage, free_para_count, para_req) -
  2119. * allocate a block from the given page.
  2120. *
  2121. *Purpose:
  2122. * Allocate a block from the specified page of the small-block heap, of
  2123. * the specified number of paragraphs in size.
  2124. *
  2125. *Entry:
  2126. * __old_sbh_page_t * ppage - pointer to a page in the small-block
  2127. * heap
  2128. * int free_para_count - number of free paragraphs in *ppage
  2129. * size_t para_req - size of the allocation request in
  2130. * paragraphs.
  2131. *
  2132. *Exit:
  2133. * Returns a pointer to the newly allocated block, if successful.
  2134. * Returns NULL, otherwise.
  2135. *
  2136. *Exceptions:
  2137. * It is assumed that free_para_count >= para_req on entry. This must be
  2138. * guaranteed by the caller. The behavior is undefined if this condition
  2139. * is violated.
  2140. *
  2141. *******************************************************************************/
  2142. void * __cdecl __old_sbh_alloc_block_from_page (
  2143. __old_sbh_page_t * ppage,
  2144. size_t free_para_count,
  2145. size_t para_req
  2146. )
  2147. {
  2148. __old_page_map_t * pmap1;
  2149. __old_page_map_t * pmap2;
  2150. __old_page_map_t * pstartmap;
  2151. __old_page_map_t * pendmap;
  2152. size_t contiguous_free;
  2153. pmap1 = pstartmap = ppage->p_starting_alloc_map;
  2154. pendmap = &(ppage->alloc_map[_OLD_PARAS_PER_PAGE]);
  2155. /*
  2156. * Start at start_para_index and walk towards the end of alloc_map[],
  2157. * looking for a string of free paragraphs big enough to satisfy the
  2158. * the current request.
  2159. *
  2160. * Check if there are enough free paragraphs are p_starting_alloc_map
  2161. * to satisfy the pending allocation request.
  2162. */
  2163. if ( ppage->free_paras_at_start >= para_req ) {
  2164. /*
  2165. * Success right off!
  2166. * Mark the alloc_map entry with the size of the allocation
  2167. * request.
  2168. */
  2169. *pmap1 = (__old_page_map_t)para_req;
  2170. /*
  2171. * Update the p_starting_alloc_map and free_paras_at_start fields
  2172. * in the page.
  2173. */
  2174. if ( (pmap1 + para_req) < pendmap ) {
  2175. ppage->p_starting_alloc_map += para_req;
  2176. ppage->free_paras_at_start -= para_req;
  2177. }
  2178. else {
  2179. ppage->p_starting_alloc_map = &(ppage->alloc_map[0]);
  2180. ppage->free_paras_at_start = 0;
  2181. }
  2182. /*
  2183. * Derive and return a pointer to the newly allocated
  2184. * paragraphs.
  2185. */
  2186. return (void *)&(ppage->alloc_blocks[pmap1 -
  2187. &(ppage->alloc_map[0])]);
  2188. }
  2189. /*
  2190. * See if the search loop can be started just beyond the paragraphs
  2191. * examined above. Note, this test assumes alloc_map[_OLD_PARAS_PER_PAGE]
  2192. * != _OLD_FREE_PARA!
  2193. */
  2194. if ( *(pmap1 + ppage->free_paras_at_start) != _OLD_FREE_PARA )
  2195. pmap1 += ppage->free_paras_at_start;
  2196. while ( pmap1 + para_req < pendmap ) {
  2197. if ( *pmap1 == _OLD_FREE_PARA ) {
  2198. /*
  2199. * pmap1 refers to a free paragraph. Determine if there are
  2200. * enough free paragraphs contiguous with it to satisfy the
  2201. * allocation request. Note that the loop below requires that
  2202. * alloc_map[_OLD_PARAS_PER_PAGE] != _OLD_FREE_PARA to guarantee
  2203. * termination.
  2204. */
  2205. for ( pmap2 = pmap1 + 1, contiguous_free = 1 ;
  2206. *pmap2 == _OLD_FREE_PARA ;
  2207. pmap2++, contiguous_free++ );
  2208. if ( contiguous_free < para_req ) {
  2209. /*
  2210. * There were not enough contiguous free paragraphs. Do
  2211. * a little bookkeeping before going on to the next
  2212. * interation.
  2213. */
  2214. /* If pmap1 != pstartmap then these free paragraphs
  2215. * cannot be revisited.
  2216. */
  2217. if ( pmap1 == pstartmap ) {
  2218. /*
  2219. * Make sure free_paras_at_start is up-to-date.
  2220. */
  2221. ppage->free_paras_at_start = contiguous_free;
  2222. }
  2223. else {
  2224. /*
  2225. * These free paragraphs will not be revisited!
  2226. */
  2227. if ( (free_para_count -= contiguous_free) < para_req )
  2228. /*
  2229. * There are not enough unvisited free paragraphs
  2230. * to satisfy the current request. Return failure
  2231. * to the caller.
  2232. */
  2233. return NULL;
  2234. }
  2235. /*
  2236. * Update pmap1 for the next iteration of the loop.
  2237. */
  2238. pmap1 = pmap2;
  2239. }
  2240. else {
  2241. /*
  2242. * Success!
  2243. *
  2244. * Update the p_starting_alloc_map and free_paras_at_start
  2245. * fields in the page.
  2246. */
  2247. if ( (pmap1 + para_req) < pendmap ) {
  2248. ppage->p_starting_alloc_map = pmap1 + para_req;
  2249. ppage->free_paras_at_start = contiguous_free -
  2250. para_req;
  2251. }
  2252. else {
  2253. ppage->p_starting_alloc_map = &(ppage->alloc_map[0]);
  2254. ppage->free_paras_at_start = 0;
  2255. }
  2256. /*
  2257. * Mark the alloc_map entry with the size of the
  2258. * allocation request.
  2259. */
  2260. *pmap1 = (__old_page_map_t)para_req;
  2261. /*
  2262. * Derive and return a pointer to the newly allocated
  2263. * paragraphs.
  2264. */
  2265. return (void *)&(ppage->alloc_blocks[pmap1 -
  2266. &(ppage->alloc_map[0])]);
  2267. }
  2268. }
  2269. else {
  2270. /*
  2271. * pmap1 points to start of an allocated block in alloc_map[].
  2272. * Skip over it.
  2273. */
  2274. pmap1 = pmap1 + *pmap1;
  2275. }
  2276. }
  2277. /*
  2278. * Now start at index 0 in alloc_map[] and walk towards, but not past,
  2279. * index starting_para_index, looking for a string of free paragraphs
  2280. * big enough to satisfy the allocation request.
  2281. */
  2282. pmap1 = &(ppage->alloc_map[0]);
  2283. while ( (pmap1 < pstartmap) &&
  2284. (pmap1 + para_req < pendmap) )
  2285. {
  2286. if ( *pmap1 == _OLD_FREE_PARA ) {
  2287. /*
  2288. * pmap1 refers to a free paragraph. Determine if there are
  2289. * enough free paragraphs contiguous with it to satisfy the
  2290. * allocation request.
  2291. */
  2292. for ( pmap2 = pmap1 + 1, contiguous_free = 1 ;
  2293. *pmap2 == _OLD_FREE_PARA ;
  2294. pmap2++, contiguous_free++ );
  2295. if ( contiguous_free < para_req ) {
  2296. /*
  2297. * There were not enough contiguous free paragraphs.
  2298. *
  2299. * Update the count of unvisited free paragraphs.
  2300. */
  2301. if ( (free_para_count -= contiguous_free) < para_req )
  2302. /*
  2303. * There are not enough unvisited free paragraphs
  2304. * to satisfy the current request. Return failure
  2305. * to the caller.
  2306. */
  2307. return NULL;
  2308. /*
  2309. * Update pmap1 for the next iteration of the loop.
  2310. */
  2311. pmap1 = pmap2;
  2312. }
  2313. else {
  2314. /*
  2315. * Success!
  2316. *
  2317. * Update the p_starting_alloc_map and free_paras_at_start
  2318. * fields in the page..
  2319. */
  2320. if ( (pmap1 + para_req) < pendmap ) {
  2321. ppage->p_starting_alloc_map = pmap1 + para_req;
  2322. ppage->free_paras_at_start = contiguous_free -
  2323. para_req;
  2324. }
  2325. else {
  2326. ppage->p_starting_alloc_map = &(ppage->alloc_map[0]);
  2327. ppage->free_paras_at_start = 0;
  2328. }
  2329. /*
  2330. * Mark the alloc_map entry with the size of the
  2331. * allocation request.
  2332. */
  2333. *pmap1 = (__old_page_map_t)para_req;
  2334. /*
  2335. * Derive and return a pointer to the newly allocated
  2336. * paragraphs.
  2337. */
  2338. return (void *)&(ppage->alloc_blocks[pmap1 -
  2339. &(ppage->alloc_map[0])]);
  2340. }
  2341. }
  2342. else {
  2343. /*
  2344. * pmap1 points to start of an allocated block in alloc_map[].
  2345. * Skip over it.
  2346. */
  2347. pmap1 = pmap1 + *pmap1;
  2348. }
  2349. }
  2350. /*
  2351. * Return failure.
  2352. */
  2353. return NULL;
  2354. }
  2355. /***
  2356. *size_t __old_sbh_resize_block(preg, ppage, pmap, new_para_sz) -
  2357. * resize block
  2358. *
  2359. *Purpose:
  2360. * Resize the specified block from the small-block heap. The allocation
  2361. * block is not moved.
  2362. *
  2363. *Entry:
  2364. * __old_sbh_region_t *preg - pointer to the descriptor for the
  2365. * region containing the block
  2366. * __old_sbh_page_t * ppage - pointer to the page containing the
  2367. * block
  2368. * __old_page_map_t * pmap - pointer to the initial alloc_map[]
  2369. * entry for the allocation block
  2370. * size_t new_para_sz - requested new size for the allocation
  2371. * block, in paragraphs.
  2372. *
  2373. *Exit:
  2374. * Returns 1, if successful. Otherwise, 0 is returned.
  2375. *
  2376. *Exceptions:
  2377. *
  2378. *******************************************************************************/
  2379. int __cdecl __old_sbh_resize_block (
  2380. __old_sbh_region_t * preg,
  2381. __old_sbh_page_t * ppage,
  2382. __old_page_map_t * pmap,
  2383. size_t new_para_sz
  2384. )
  2385. {
  2386. __old_page_map_t * pmap2;
  2387. __old_page_map_t * pmap3;
  2388. __old_region_map_t * pregmap;
  2389. size_t old_para_sz;
  2390. size_t free_para_count;
  2391. int retval = 0;
  2392. pregmap = &(preg->region_map[ppage - preg->p_pages_begin]);
  2393. if ( (old_para_sz = *pmap) > new_para_sz ) {
  2394. /*
  2395. * The allocation block is to be shrunk.
  2396. */
  2397. *pmap = (__old_page_map_t)new_para_sz;
  2398. pregmap->free_paras_in_page += (int)(old_para_sz - new_para_sz);
  2399. pregmap->last_failed_alloc = _OLD_NO_FAILED_ALLOC;
  2400. retval++;
  2401. }
  2402. else if ( old_para_sz < new_para_sz ) {
  2403. /*
  2404. * The allocation block is to be grown to new_para_sz paragraphs
  2405. * (if possible).
  2406. */
  2407. if ( (pmap + new_para_sz) <= &(ppage->alloc_map[_OLD_PARAS_PER_PAGE]) )
  2408. {
  2409. /*
  2410. * Determine if there are sufficient free paragraphs to
  2411. * expand the block to the desired new size.
  2412. */
  2413. for ( pmap2 = pmap + old_para_sz,
  2414. pmap3 = pmap + new_para_sz ;
  2415. (pmap2 < pmap3) && (*pmap2 == _OLD_FREE_PARA) ;
  2416. pmap2++ ) ;
  2417. if ( pmap2 == pmap3 ) {
  2418. /*
  2419. * Success, mark the resized allocation
  2420. */
  2421. *pmap = (__old_page_map_t)new_para_sz;
  2422. /*
  2423. * Check whether the p_starting_alloc_map and the
  2424. * free_paras_at_start fields need to be updated.
  2425. */
  2426. if ( (pmap <= ppage->p_starting_alloc_map) &&
  2427. (pmap3 > ppage->p_starting_alloc_map) )
  2428. {
  2429. if ( pmap3 < &(ppage->alloc_map[_OLD_PARAS_PER_PAGE]) ) {
  2430. ppage->p_starting_alloc_map = pmap3;
  2431. /*
  2432. * Determine how many contiguous free paragraphs
  2433. * there are starting a *pmap3. Note, this assumes
  2434. * that alloc_map[_OLD_PARAS_PER_PAGE] != _OLD_FREE_PARA.
  2435. */
  2436. for ( free_para_count = 0 ; *pmap3 == _OLD_FREE_PARA ;
  2437. free_para_count++, pmap3++ ) ;
  2438. ppage->free_paras_at_start = free_para_count;
  2439. }
  2440. else {
  2441. ppage->p_starting_alloc_map = &(ppage->alloc_map[0]);
  2442. ppage->free_paras_at_start = 0;
  2443. }
  2444. }
  2445. /*
  2446. * Update the region_map[] entry.
  2447. */
  2448. pregmap->free_paras_in_page += (int)(old_para_sz - new_para_sz);
  2449. retval++;
  2450. }
  2451. }
  2452. }
  2453. return retval;
  2454. }
  2455. /***
  2456. *void * __old_sbh_heap_check() - check small-block heap
  2457. *
  2458. *Purpose:
  2459. * Perform validity checks on the small-block heap.
  2460. *
  2461. *Entry:
  2462. * There are no arguments.
  2463. *
  2464. *Exit:
  2465. * Returns 0 if the small-block is okay.
  2466. * Returns < 0 if the small-block heap has an error. The exact value
  2467. * identifies where, in the source code below, the error was detected.
  2468. *
  2469. *Exceptions:
  2470. * There is no protection against memory access error (exceptions).
  2471. *
  2472. *******************************************************************************/
  2473. int __cdecl __old_sbh_heap_check (
  2474. void
  2475. )
  2476. {
  2477. __old_sbh_region_t * preg;
  2478. __old_sbh_page_t * ppage;
  2479. int uncommitted_pages;
  2480. int free_paras_in_page;
  2481. int contiguous_free_paras;
  2482. int starting_region_found;
  2483. int p_starting_alloc_map_found;
  2484. int i, j, k;
  2485. starting_region_found = 0;
  2486. preg = &__old_small_block_heap;
  2487. do {
  2488. if ( __old_sbh_p_starting_region == preg )
  2489. starting_region_found++;
  2490. if ( (ppage = preg->p_pages_begin) != _OLD_NO_PAGES ) {
  2491. /*
  2492. * Scan the pages of the region looking for
  2493. * inconsistencies.
  2494. */
  2495. for ( i = 0, uncommitted_pages = 0,
  2496. ppage = preg->p_pages_begin ;
  2497. i < _OLD_PAGES_PER_REGION ;
  2498. i++, ppage++ )
  2499. {
  2500. if ( preg->region_map[i].free_paras_in_page ==
  2501. _OLD_UNCOMMITTED_PAGE )
  2502. {
  2503. /*
  2504. * Verify the first_uncommitted_index field.
  2505. */
  2506. if ( (uncommitted_pages == 0) &&
  2507. (preg->p_first_uncommitted !=
  2508. &(preg->region_map[i])) )
  2509. /*
  2510. * Bad first_uncommitted_index field!
  2511. */
  2512. return -1;
  2513. uncommitted_pages++;
  2514. }
  2515. else {
  2516. if ( ppage->p_starting_alloc_map >=
  2517. &(ppage->alloc_map[_OLD_PARAS_PER_PAGE]) )
  2518. /*
  2519. * Bad p_starting_alloc_map field
  2520. */
  2521. return -2;
  2522. if ( ppage->alloc_map[_OLD_PARAS_PER_PAGE] !=
  2523. (__old_page_map_t)-1 )
  2524. /*
  2525. * Bad alloc_map[_OLD_PARAS_PER_PAGE] field
  2526. */
  2527. return -3;
  2528. /*
  2529. * Scan alloc_map[].
  2530. */
  2531. j = 0;
  2532. p_starting_alloc_map_found = 0;
  2533. free_paras_in_page = 0;
  2534. contiguous_free_paras = 0;
  2535. while ( j < _OLD_PARAS_PER_PAGE ) {
  2536. /*
  2537. * Look for the *p_starting_alloc_map.
  2538. */
  2539. if ( &(ppage->alloc_map[j]) ==
  2540. ppage->p_starting_alloc_map )
  2541. p_starting_alloc_map_found++;
  2542. if ( ppage->alloc_map[j] == _OLD_FREE_PARA ) {
  2543. /*
  2544. * Free paragraph, increment the count.
  2545. */
  2546. free_paras_in_page++;
  2547. contiguous_free_paras++;
  2548. j++;
  2549. }
  2550. else {
  2551. /*
  2552. * First paragraph of an allocated block.
  2553. */
  2554. /*
  2555. * Make sure the preceding free block, if any,
  2556. * was smaller than the last_failed_alloc[]
  2557. * entry for the page.
  2558. */
  2559. if ( contiguous_free_paras >=
  2560. (int)preg->region_map[i].last_failed_alloc )
  2561. /*
  2562. * last_failed_alloc[i] was mismarked!
  2563. */
  2564. return -4;
  2565. /*
  2566. * If this is the end of the string of free
  2567. * paragraphs starting at *p_starting_alloc_map,
  2568. * verify that free_paras_at_start is
  2569. * reasonable.
  2570. */
  2571. if ( p_starting_alloc_map_found == 1 ) {
  2572. if ( contiguous_free_paras <
  2573. (int)ppage->free_paras_at_start )
  2574. return -5;
  2575. else
  2576. /*
  2577. * Set flag to 2 so the check is not
  2578. * repeated.
  2579. */
  2580. p_starting_alloc_map_found++;
  2581. }
  2582. contiguous_free_paras = 0;
  2583. /*
  2584. * Scan the remaining paragraphs and make
  2585. * sure they are marked properly (they should
  2586. * look like free paragraphs).
  2587. */
  2588. for ( k = j + 1 ;
  2589. k < j + ppage->alloc_map[j] ; k++ )
  2590. {
  2591. if ( ppage->alloc_map[k] != _OLD_FREE_PARA )
  2592. /*
  2593. * alloc_map[k] is mismarked!
  2594. */
  2595. return -6;
  2596. }
  2597. j = k;
  2598. }
  2599. }
  2600. if ( free_paras_in_page !=
  2601. preg->region_map[i].free_paras_in_page )
  2602. /*
  2603. * region_map[i] does not match the number of
  2604. * free paragraphs in the page!
  2605. */
  2606. return -7;
  2607. if ( p_starting_alloc_map_found == 0 )
  2608. /*
  2609. * Bad p_starting_alloc_map field!
  2610. */
  2611. return -8;
  2612. }
  2613. }
  2614. }
  2615. }
  2616. while ( (preg = preg->p_next_region) != &__old_small_block_heap );
  2617. if ( starting_region_found == 0 )
  2618. /*
  2619. * Bad __old_sbh_p_starting_region!
  2620. */
  2621. return -9;
  2622. return 0;
  2623. }
  2624. #endif /* CRTDLL */
  2625. #endif /* ndef _WIN64 */