Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1110 lines
30 KiB

  1. // Ruler
  2. // 1 2 3 4 5 6 7 8
  3. //345678901234567890123456789012345678901234567890123456789012345678901234567890
  4. /********************************************************************/
  5. /* */
  6. /* The standard layout. */
  7. /* */
  8. /* The standard layout for 'cpp' files in this code is as */
  9. /* follows: */
  10. /* */
  11. /* 1. Include files. */
  12. /* 2. Constants local to the class. */
  13. /* 3. Data structures local to the class. */
  14. /* 4. Data initializations. */
  15. /* 5. Static functions. */
  16. /* 6. Class functions. */
  17. /* */
  18. /* The constructor is typically the first function, class */
  19. /* member functions appear in alphabetical order with the */
  20. /* destructor appearing at the end of the file. Any section */
  21. /* or function this is not required is simply omitted. */
  22. /* */
  23. /********************************************************************/
  24. #include "HeapPCH.hpp"
  25. #include "Cache.hpp"
  26. #include "Heap.hpp"
  27. /********************************************************************/
  28. /* */
  29. /* Constants local to the class. */
  30. /* */
  31. /* The constants supplied here control the maximum size of */
  32. /* the cache. */
  33. /* */
  34. /********************************************************************/
  35. CONST SBIT32 MaxCacheSize = ((2 << 16)-1);
  36. /********************************************************************/
  37. /* */
  38. /* Class constructor. */
  39. /* */
  40. /* Create a new allocation cache and prepare it for use. A */
  41. /* is inactive until the first request is received at which */
  42. /* time it springs into life. */
  43. /* */
  44. /********************************************************************/
  45. CACHE::CACHE
  46. (
  47. SBIT32 NewAllocationSize,
  48. SBIT32 NewCacheSize,
  49. SBIT32 NewChunkSize,
  50. SBIT32 NewPageSize,
  51. BOOLEAN NewStealing,
  52. BOOLEAN NewThreadSafe
  53. ) :
  54. //
  55. // Call the constructors for the contained classes.
  56. //
  57. BUCKET( NewAllocationSize,NewChunkSize,NewPageSize )
  58. {
  59. //
  60. // We need to be very careful with the configuration
  61. // information as it has come indirectly from the
  62. // user and my be bogus.
  63. //
  64. if ( (NewCacheSize >= 0) && (NewCacheSize < MaxCacheSize) )
  65. {
  66. //
  67. // Setup the cache and mark it as inactive.
  68. //
  69. Active = False;
  70. Stealing = NewStealing;
  71. ThreadSafe = NewThreadSafe;
  72. #ifdef ENABLE_HEAP_STATISTICS
  73. CacheFills = 0;
  74. CacheFlushes = 0;
  75. HighTide = 0;
  76. HighWater = 0;
  77. InUse = 0;
  78. #endif
  79. CacheSize = ((SBIT16) NewCacheSize);
  80. FillSize = 1;
  81. NumberOfChildren = 0;
  82. //
  83. // The stacks that may later contain allocations
  84. // are set to zero just to be neat.
  85. //
  86. DeleteStack = NULL;
  87. NewStack = NULL;
  88. TopOfDeleteStack = 0;
  89. TopOfNewStack = 0;
  90. }
  91. else
  92. { Failure( "Cache size in constructor for CACHE" ); }
  93. }
  94. /********************************************************************/
  95. /* */
  96. /* Create the cache stacks. */
  97. /* */
  98. /* A cache is created on demand. We do this when we get the */
  99. /* first allocation or deallocation request. */
  100. /* */
  101. /********************************************************************/
  102. VOID CACHE::CreateCacheStacks( VOID )
  103. {
  104. //
  105. // We allocate the cache stacks from the internal
  106. // new page allocator if we have not done it already.
  107. //
  108. if ( DeleteStack == NULL )
  109. {
  110. REGISTER SBIT32 Size = (CacheSize * sizeof(ADDRESS_AND_PAGE));
  111. DeleteStack =
  112. ((ADDRESS_AND_PAGE*) (NewPage -> NewCacheStack( Size )));
  113. }
  114. if ( NewStack == NULL )
  115. {
  116. REGISTER SBIT32 Size = (CacheSize * sizeof(VOID*));
  117. NewStack =
  118. ((VOID**) (NewPage -> NewCacheStack( Size )));
  119. }
  120. //
  121. // We can now activate the cache as long as we
  122. // were able to allocate both stacks.
  123. //
  124. if ( (NewStack != NULL ) && (DeleteStack != NULL ) )
  125. {
  126. //
  127. // We have completed creating the cache so set
  128. // various flags and zero various counters.
  129. //
  130. Active = True;
  131. //
  132. // Setup the fill size.
  133. //
  134. FillSize = 1;
  135. //
  136. // Zero the stack tops.
  137. //
  138. TopOfDeleteStack = 0;
  139. TopOfNewStack = 0;
  140. }
  141. }
  142. /********************************************************************/
  143. /* */
  144. /* Create a new data page. */
  145. /* */
  146. /* When we create a new page we also need to allocate some */
  147. /* memory to hold the associated data. */
  148. /* */
  149. /********************************************************************/
  150. VOID *CACHE::CreateDataPage( VOID )
  151. {
  152. REGISTER VOID *NewMemory;
  153. //
  154. // When there is a potential for multiple threads we
  155. // claim the cache lock.
  156. //
  157. ClaimCacheLock();
  158. //
  159. // Create a data page.
  160. //
  161. NewMemory = ((BUCKET*) this) -> New( True );
  162. //
  163. // Release any lock we may have claimed earlier.
  164. //
  165. ReleaseCacheLock();
  166. return NewMemory;
  167. }
  168. #ifdef ENABLE_HEAP_STATISTICS
  169. /********************************************************************/
  170. /* */
  171. /* Compute high water. */
  172. /* */
  173. /* Compute the high water mark for the current cache. */
  174. /* */
  175. /********************************************************************/
  176. VOID CACHE::ComputeHighWater( SBIT32 Size )
  177. {
  178. //
  179. // Update the usage statistics.
  180. //
  181. if ( (InUse += Size) > HighTide )
  182. {
  183. HighTide = InUse;
  184. if ( HighTide > HighWater )
  185. { HighWater = HighTide; }
  186. }
  187. }
  188. #endif
  189. /********************************************************************/
  190. /* */
  191. /* A memory deallocation cache. */
  192. /* */
  193. /* We cache memory deallocation requests to improve performance. */
  194. /* We do this by stacking requests until we have a batch. */
  195. /* */
  196. /********************************************************************/
  197. BOOLEAN CACHE::Delete( VOID *Address,PAGE *Page,SBIT32 Version )
  198. {
  199. REGISTER BOOLEAN Result;
  200. //
  201. // When there is a potential for multiple threads we
  202. // claim the cache lock.
  203. //
  204. ClaimCacheLock();
  205. //
  206. // At various times the cache may be either disabled
  207. // or inactive. Here we ensure that we are able to use
  208. // the cache. If not we bypass it and call the bucket
  209. // directly.
  210. //
  211. if ( Active )
  212. {
  213. //
  214. // If recycling is allowed and the address is
  215. // on the current page or a previous page and
  216. // there is space on the new stack then put the
  217. // element in the new stack for immediate reuse.
  218. //
  219. if
  220. (
  221. (Stealing)
  222. &&
  223. (Address < GetCurrentPage())
  224. &&
  225. (TopOfNewStack < CacheSize)
  226. )
  227. {
  228. //
  229. // The address is suitable for immediate
  230. // reuse. So put it on the stack of new
  231. // elements.
  232. //
  233. NewStack[ (TopOfNewStack ++) ] = Address;
  234. Result = True;
  235. }
  236. else
  237. {
  238. REGISTER ADDRESS_AND_PAGE *Current =
  239. (& DeleteStack[ TopOfDeleteStack ++ ]);
  240. //
  241. // The address would best be deleted before
  242. // being reused.
  243. //
  244. Current -> Address = Address;
  245. Current -> Page = Page;
  246. Current -> Version = Version;
  247. //
  248. // When the delete stack is full we flush it.
  249. //
  250. if ( TopOfDeleteStack >= CacheSize )
  251. {
  252. AUTO SBIT32 Deleted;
  253. //
  254. // Flush the delete stack.
  255. //
  256. Result =
  257. (
  258. ((BUCKET*) this) -> MultipleDelete
  259. (
  260. DeleteStack,
  261. & Deleted,
  262. TopOfDeleteStack
  263. )
  264. );
  265. #ifdef ENABLE_HEAP_STATISTICS
  266. //
  267. // Update the usage statistics. There
  268. // is a nasty case here where we cache
  269. // a delete only to find out later that
  270. // it was bogus. When this occurs we
  271. // have to increase the 'InUse' count
  272. // to allow for this situation.
  273. //
  274. CacheFlushes ++;
  275. InUse += (TopOfDeleteStack - Deleted);
  276. #endif
  277. //
  278. // Zero the top of the stack.
  279. //
  280. TopOfDeleteStack = 0;
  281. }
  282. else
  283. { Result = True; }
  284. }
  285. }
  286. else
  287. {
  288. //
  289. // Delete the element.
  290. //
  291. Result =
  292. (((BUCKET*) this) -> Delete( Address,Page,Version ));
  293. }
  294. #ifdef ENABLE_HEAP_STATISTICS
  295. //
  296. // Update the usage statistics.
  297. //
  298. if ( Result )
  299. { InUse --; }
  300. #endif
  301. //
  302. // Release any lock we may have claimed earlier.
  303. //
  304. ReleaseCacheLock();
  305. return Result;
  306. }
  307. /********************************************************************/
  308. /* */
  309. /* Delete all allocations. */
  310. /* */
  311. /* The entire heap is about to be deleted under our feet. We */
  312. /* need to prepare for this by disabling the cache as its */
  313. /* contents will disappear as well. */
  314. /* */
  315. /********************************************************************/
  316. VOID CACHE::DeleteAll( VOID )
  317. {
  318. //
  319. // Disable the cache if needed.
  320. //
  321. Active = False;
  322. #ifdef ENABLE_HEAP_STATISTICS
  323. //
  324. // Zero the statistics.
  325. //
  326. HighTide = 0;
  327. InUse = 0;
  328. #endif
  329. //
  330. // Setup the fill size.
  331. //
  332. FillSize = 1;
  333. //
  334. // Zero the top of stacks.
  335. //
  336. TopOfDeleteStack = 0;
  337. TopOfNewStack = 0;
  338. }
  339. /********************************************************************/
  340. /* */
  341. /* Delete a data page. */
  342. /* */
  343. /* Delete a data page that was associated with a smaller cache */
  344. /* so its space can be reused. */
  345. /* */
  346. /********************************************************************/
  347. BOOLEAN CACHE::DeleteDataPage( VOID *Address )
  348. {
  349. AUTO SEARCH_PAGE Details;
  350. REGISTER BOOLEAN Result;
  351. REGISTER PAGE *Page;
  352. //
  353. // When there is a potential for multiple threads we
  354. // claim the cache lock.
  355. //
  356. ClaimCacheLock();
  357. //
  358. // Find the description of the data page we need to
  359. // delete and make sure it is valid.
  360. //
  361. Find -> ClaimFindShareLock();
  362. Page = FindParentPage( Address );
  363. if ( Page != NULL )
  364. { Page = (Page -> FindPage( Address,& Details,False )); }
  365. Find -> ReleaseFindShareLock();
  366. //
  367. // Delete the data page.
  368. //
  369. if ( Page != NULL )
  370. { Result = (Page -> Delete( & Details )); }
  371. else
  372. { Failure( "No data page in DeleteDataPage" ); }
  373. //
  374. // Release any lock we may have claimed earlier.
  375. //
  376. ReleaseCacheLock();
  377. return Result;
  378. }
  379. /********************************************************************/
  380. /* */
  381. /* Multiple memory allocations. */
  382. /* */
  383. /* The allocation cache contains preallocated memory from the */
  384. /* associated allocation bucket. The cache will supply these */
  385. /* preallocated elements with the minimum fuss to any caller. */
  386. /* */
  387. /********************************************************************/
  388. BOOLEAN CACHE::MultipleNew( SBIT32 *Actual,VOID *Array[],SBIT32 Requested )
  389. {
  390. REGISTER BOOLEAN Result;
  391. //
  392. // When there is a potential for multiple threads we
  393. // claim the cache lock.
  394. //
  395. ClaimCacheLock();
  396. //
  397. // At various times the cache may be either disabled
  398. // or inactive. Here we ensure that we are able to use
  399. // the cache. If not we bypass it and call the bucket
  400. // directly.
  401. //
  402. if ( Active )
  403. {
  404. //
  405. // We have been asked to allocalte multiple
  406. // new elements. If it appears that we don't
  407. // have enough elements available but stealing
  408. // is allowed we can try raiding the deleted
  409. // stack.
  410. //
  411. if ( (Requested > TopOfNewStack) && (Stealing) )
  412. {
  413. while ( (TopOfDeleteStack > 0) && (TopOfNewStack < CacheSize) )
  414. {
  415. NewStack[ (TopOfNewStack ++) ] =
  416. (DeleteStack[ (-- TopOfDeleteStack) ].Address);
  417. }
  418. }
  419. //
  420. // We will allocate from the cache if requested
  421. // size is smaller than the number of available
  422. // elements.
  423. //
  424. if ( Requested <= TopOfNewStack )
  425. {
  426. REGISTER SBIT32 Count;
  427. //
  428. // We need to copy the elements out of the
  429. // cache into the callers array.
  430. //
  431. for ( Count=0;Count < Requested;Count ++ )
  432. { Array[ Count ] = NewStack[ (-- TopOfNewStack) ]; }
  433. (*Actual) = Requested;
  434. Result = True;
  435. }
  436. else
  437. {
  438. REGISTER BUCKET *Bucket = ((BUCKET*) this);
  439. //
  440. // We don't have enough elements in the cache
  441. // so we allocate directly from the bucket.
  442. //
  443. Result =
  444. (
  445. Bucket -> MultipleNew
  446. (
  447. Actual,
  448. Array,
  449. Requested
  450. )
  451. );
  452. //
  453. // We fill up the cache so we have a good
  454. // chance of dealing with any following
  455. // requests if it is less than half full.
  456. //
  457. if ( TopOfNewStack <= (CacheSize / 2) )
  458. {
  459. AUTO SBIT32 NewSize;
  460. REGISTER SBIT32 MaxSize = (CacheSize - TopOfNewStack);
  461. //
  462. // We slowly increse the fill size
  463. // of the cache to make sure we don't
  464. // waste too much space.
  465. //
  466. if ( FillSize < CacheSize )
  467. {
  468. if ( (FillSize *= 2) > CacheSize )
  469. { FillSize = CacheSize; }
  470. }
  471. //
  472. // Bulk load the cache with new
  473. // elements.
  474. //
  475. Bucket -> MultipleNew
  476. (
  477. & NewSize,
  478. & NewStack[ TopOfNewStack ],
  479. ((FillSize < MaxSize) ? FillSize : MaxSize)
  480. );
  481. #ifdef ENABLE_HEAP_STATISTICS
  482. CacheFills ++;
  483. #endif
  484. TopOfNewStack += NewSize;
  485. }
  486. }
  487. }
  488. else
  489. {
  490. //
  491. // We may want to enable the cache for next
  492. // time so see if this needs to be done.
  493. //
  494. if ( CacheSize > 1 )
  495. { CreateCacheStacks(); }
  496. //
  497. // The cache is disabled so go directly to the
  498. // bucket.
  499. //
  500. Result = ((BUCKET*) this) -> MultipleNew
  501. (
  502. Actual,
  503. Array,
  504. Requested
  505. );
  506. }
  507. #ifdef ENABLE_HEAP_STATISTICS
  508. //
  509. // Update the usage statistics.
  510. //
  511. ComputeHighWater( (*Actual) );
  512. #endif
  513. //
  514. // Release any lock we may have claimed earlier.
  515. //
  516. ReleaseCacheLock();
  517. return Result;
  518. }
  519. /********************************************************************/
  520. /* */
  521. /* Memory allocation. */
  522. /* */
  523. /* The allocation cache contains preallocated memory from the */
  524. /* associated allocation bucket. The cache will supply these */
  525. /* preallocated elements with the minimum fuss to any caller. */
  526. /* */
  527. /********************************************************************/
  528. VOID *CACHE::New( VOID )
  529. {
  530. REGISTER VOID *NewMemory;
  531. //
  532. // When there is a potential for multiple threads we
  533. // claim the cache lock.
  534. //
  535. ClaimCacheLock();
  536. //
  537. // At various times the cache may be either disabled
  538. // or inactive. Here we ensure that we are able to use
  539. // the cache. If not we bypass it and call the bucket
  540. // directly.
  541. //
  542. if ( Active )
  543. {
  544. //
  545. // We first try the stack for new allocations
  546. // to see if there are any available elements.
  547. //
  548. if ( TopOfNewStack > 0 )
  549. { NewMemory = (NewStack[ (-- TopOfNewStack) ]); }
  550. else
  551. {
  552. //
  553. // When stealing is allowed we will recycle
  554. // elements from the top of the deleted stack.
  555. //
  556. if ( (TopOfDeleteStack > 0) && (Stealing) )
  557. { NewMemory = (DeleteStack[ (-- TopOfDeleteStack) ].Address); }
  558. else
  559. {
  560. //
  561. // We slowly increse the fill size
  562. // of the cache to make sure we don't
  563. // waste too much space.
  564. //
  565. if ( FillSize < CacheSize )
  566. {
  567. if ( (FillSize *= 2) > CacheSize )
  568. { FillSize = CacheSize; }
  569. }
  570. //
  571. // We need to bulk load some new
  572. // memory from the heap.
  573. //
  574. if
  575. (
  576. ((BUCKET*) this) -> MultipleNew
  577. (
  578. & TopOfNewStack,
  579. NewStack,
  580. FillSize
  581. )
  582. )
  583. {
  584. //
  585. // Update the statistics and return
  586. // the top element on the stack.
  587. //
  588. #ifdef ENABLE_HEAP_STATISTICS
  589. CacheFills ++;
  590. #endif
  591. NewMemory = NewStack[ (-- TopOfNewStack) ];
  592. }
  593. else
  594. {
  595. //
  596. // Update the statistics and fail
  597. // the request for memeory.
  598. //
  599. NewMemory = ((VOID*) AllocationFailure);
  600. }
  601. }
  602. }
  603. }
  604. else
  605. {
  606. //
  607. // We may want to enable the cache for next
  608. // time so see if this needs to be done.
  609. //
  610. if ( CacheSize > 1 )
  611. { CreateCacheStacks(); }
  612. //
  613. // The cache is disabled so go directly to the
  614. // bucket.
  615. //
  616. NewMemory = ((BUCKET*) this) -> New( False );
  617. }
  618. #ifdef ENABLE_HEAP_STATISTICS
  619. //
  620. // Update the usage statistics.
  621. //
  622. ComputeHighWater( (NewMemory != ((VOID*) AllocationFailure)) );
  623. #endif
  624. //
  625. // Release any lock we may have claimed earlier.
  626. //
  627. ReleaseCacheLock();
  628. //
  629. // Prefetch the first cache line if we are running
  630. // a Pentium III or better.
  631. //
  632. Prefetch.L1( ((CHAR*) NewMemory),1 );
  633. return NewMemory;
  634. }
  635. /********************************************************************/
  636. /* */
  637. /* Memory allocation for non-standard sizes. */
  638. /* */
  639. /* A non standard sized allocation simply by-passes the cache */
  640. /* but it still needs to hold the lock to prevent failure on */
  641. /* SMP systems. */
  642. /* */
  643. /********************************************************************/
  644. VOID *CACHE::New( BOOLEAN SubDivided,SBIT32 NewSize )
  645. {
  646. REGISTER VOID *NewMemory;
  647. //
  648. // When there is a potential for multiple threads we
  649. // claim the cache lock.
  650. //
  651. ClaimCacheLock();
  652. //
  653. // Allocate a non-standard sized block.
  654. //
  655. NewMemory = ((BUCKET*) this) -> New( SubDivided,NewSize );
  656. #ifdef ENABLE_HEAP_STATISTICS
  657. //
  658. // Update the usage statistics.
  659. //
  660. ComputeHighWater( (NewMemory != ((VOID*) AllocationFailure)) );
  661. #endif
  662. //
  663. // Release any lock we may have claimed earlier.
  664. //
  665. ReleaseCacheLock();
  666. return NewMemory;
  667. }
  668. /********************************************************************/
  669. /* */
  670. /* Release free space. */
  671. /* */
  672. /* We sometimes do not release free space from a bucket as */
  673. /* returning it to the operating system and getting it again */
  674. /* later is very expensive. Here we flush any free space we */
  675. /* have aquired over the user supplied limit. */
  676. /* */
  677. /********************************************************************/
  678. VOID CACHE::ReleaseSpace( SBIT32 MaxActivePages )
  679. {
  680. //
  681. // When there is a potential for multiple threads
  682. // we claim the cache lock.
  683. //
  684. ClaimCacheLock();
  685. //
  686. // Release the free space from the backet.
  687. //
  688. ((BUCKET*) this) -> ReleaseSpace( MaxActivePages );
  689. //
  690. // Release any lock we may have claimed earlier.
  691. //
  692. ReleaseCacheLock();
  693. }
  694. /********************************************************************/
  695. /* */
  696. /* Search the cacahe for an allocation. */
  697. /* */
  698. /* We sometimes need to search the cache to see if an */
  699. /* allocation is currently in the cacahe awaiting allocation */
  700. /* or release. */
  701. /* */
  702. /********************************************************************/
  703. BOOLEAN CACHE::SearchCache( VOID *Address )
  704. {
  705. REGISTER BOOLEAN Result = False;
  706. //
  707. // We check to see if the cache is active.
  708. //
  709. if ( Active )
  710. {
  711. //
  712. // When there is a potential for multiple
  713. // threads we claim the cache lock.
  714. //
  715. ClaimCacheLock();
  716. //
  717. // We check to see if the cache is still
  718. // active.
  719. //
  720. if ( Active )
  721. {
  722. REGISTER SBIT32 Count;
  723. //
  724. // Search the allocated cache.
  725. //
  726. for ( Count=(TopOfNewStack-1);Count >= 0;Count -- )
  727. {
  728. if ( Address == NewStack[ Count ] )
  729. {
  730. Result = True;
  731. break;
  732. }
  733. }
  734. //
  735. // If it has not been found yet then try
  736. // the deleted cache.
  737. //
  738. if ( ! Result )
  739. {
  740. //
  741. // Search the deleted cache.
  742. //
  743. for ( Count=(TopOfDeleteStack-1);Count >= 0;Count -- )
  744. {
  745. if ( Address == DeleteStack[ Count ].Address )
  746. {
  747. Result = True;
  748. break;
  749. }
  750. }
  751. }
  752. }
  753. //
  754. // Release any lock we may have claimed earlier.
  755. //
  756. ReleaseCacheLock();
  757. }
  758. return Result;
  759. }
  760. /********************************************************************/
  761. /* */
  762. /* Truncate the heap. */
  763. /* */
  764. /* Flush the cache to release the maximum amount of space back */
  765. /* to the operating system. This is slow but may be very */
  766. /* valuable in some situations. */
  767. /* */
  768. /********************************************************************/
  769. BOOLEAN CACHE::Truncate( VOID )
  770. {
  771. REGISTER BOOLEAN Result = True;
  772. //
  773. // When there is a potential for multiple threads we
  774. // claim the cache lock.
  775. //
  776. ClaimCacheLock();
  777. //
  778. // Disable the cache if needed.
  779. //
  780. Active = False;
  781. //
  782. // Setup the fill size.
  783. //
  784. FillSize = 1;
  785. //
  786. // Flush any elements in the delete cache.
  787. // We do this now because we need to use
  788. // the delete cache below.
  789. //
  790. if ( TopOfDeleteStack > 0 )
  791. {
  792. AUTO SBIT32 Deleted;
  793. //
  794. // Flush the delete stack.
  795. //
  796. Result =
  797. (
  798. ((BUCKET*) this) -> MultipleDelete
  799. (
  800. DeleteStack,
  801. & Deleted,
  802. TopOfDeleteStack
  803. )
  804. &&
  805. (Result)
  806. );
  807. #ifdef ENABLE_HEAP_STATISTICS
  808. //
  809. // Update the usage statistics. There
  810. // is a nasty case here where we cache
  811. // a delete only to find out later that
  812. // it was bogus. When this occurs we
  813. // have to increase the 'InUse' count
  814. // to allow for this situation.
  815. //
  816. CacheFlushes ++;
  817. InUse += (TopOfDeleteStack - Deleted);
  818. #endif
  819. //
  820. // Zero the top of the stack.
  821. //
  822. TopOfDeleteStack = 0;
  823. }
  824. //
  825. // Flush any elements in the new cache by
  826. // copying them over to the delete cache
  827. // and adding the additional information
  828. // required.
  829. //
  830. if ( TopOfNewStack > 0 )
  831. {
  832. //
  833. // We need to find the data page for each
  834. // allocation we have in the new cache.
  835. // Claim the lock here to make things a
  836. // little more efficient.
  837. //
  838. Find -> ClaimFindShareLock();
  839. //
  840. // We copy each allocation across and
  841. // add the associated page information.
  842. //
  843. for ( TopOfNewStack --;TopOfNewStack >= 0;TopOfNewStack -- )
  844. {
  845. REGISTER VOID *Address = (NewStack[ TopOfNewStack ]);
  846. REGISTER PAGE *Page = (ParentCache -> FindChildPage( Address ));
  847. //
  848. // You would think that any memory in the
  849. // new cache had to be valid. Well it
  850. // does except in the case when we have
  851. // 'Recycle' set and somebody does a double
  852. // delete on a valid heap address.
  853. //
  854. if ( Page != NULL )
  855. {
  856. REGISTER ADDRESS_AND_PAGE *Current =
  857. (& DeleteStack[ TopOfDeleteStack ++ ]);
  858. //
  859. // We need to find the allocation page
  860. // where the memory was allocated from
  861. // so we can delete it.
  862. //
  863. Current -> Address = Address;
  864. Current -> Page = Page;
  865. Current -> Version = Page -> GetVersion();
  866. }
  867. else
  868. {
  869. #ifdef ENABLE_HEAP_STATISTICS
  870. //
  871. // Update the usage statistics. There
  872. // is a nasty case here where we cache
  873. // a delete only to find out later that
  874. // it was bogus. When this occurs we
  875. // have to increase the 'InUse' count
  876. // to allow for this situation.
  877. //
  878. InUse ++;
  879. #endif
  880. Result = False;
  881. }
  882. }
  883. //
  884. // Release the lock.
  885. //
  886. Find -> ReleaseFindShareLock();
  887. }
  888. //
  889. // Flush the delete cache again to delete
  890. // any new elements that we added to it
  891. // above.
  892. //
  893. if ( TopOfDeleteStack > 0 )
  894. {
  895. AUTO SBIT32 Deleted;
  896. //
  897. // Flush the delete stack.
  898. //
  899. Result =
  900. (
  901. ((BUCKET*) this) -> MultipleDelete
  902. (
  903. DeleteStack,
  904. & Deleted,
  905. TopOfDeleteStack
  906. )
  907. &&
  908. (Result)
  909. );
  910. #ifdef ENABLE_HEAP_STATISTICS
  911. //
  912. // Update the usage statistics. There
  913. // is a nasty case here where we cache
  914. // a delete only to find out later that
  915. // it was bogus. When this occurs we
  916. // have to increase the 'InUse' count
  917. // to allow for this situation.
  918. //
  919. CacheFlushes ++;
  920. InUse += (TopOfDeleteStack - Deleted);
  921. #endif
  922. //
  923. // Zero the top of the stack.
  924. //
  925. TopOfDeleteStack = 0;
  926. }
  927. //
  928. // Release any lock we may have claimed earlier.
  929. //
  930. ReleaseCacheLock();
  931. return Result;
  932. }
  933. /********************************************************************/
  934. /* */
  935. /* Update the bucket information. */
  936. /* */
  937. /* When we create the bucket there is some information that */
  938. /* is not available. Here we update the bucket to make sure */
  939. /* it has all the data it needs. */
  940. /* */
  941. /********************************************************************/
  942. VOID CACHE::UpdateCache
  943. (
  944. FIND *NewFind,
  945. HEAP *NewHeap,
  946. NEW_PAGE *NewPages,
  947. CACHE *NewParentCache
  948. )
  949. {
  950. //
  951. // Notify the parent cache that it has a new
  952. // child.
  953. //
  954. if ( NewParentCache != ((CACHE*) GlobalRoot) )
  955. { NewParentCache -> NumberOfChildren ++; }
  956. //
  957. // Update the allocation bucket.
  958. //
  959. UpdateBucket
  960. (
  961. NewFind,
  962. NewHeap,
  963. NewPages,
  964. NewParentCache
  965. );
  966. }
  967. /********************************************************************/
  968. /* */
  969. /* Class destructor. */
  970. /* */
  971. /* Destory the cache and ensure it is disabled. */
  972. /* */
  973. /********************************************************************/
  974. CACHE::~CACHE( VOID )
  975. {
  976. if ( Active )
  977. { Failure( "Cache active in destructor for CACHE" ); }
  978. }