Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1291 lines
39 KiB

  1. // Ruler
  2. // 1 2 3 4 5 6 7 8
  3. //345678901234567890123456789012345678901234567890123456789012345678901234567890
  4. /********************************************************************/
  5. /* */
  6. /* The standard layout. */
  7. /* */
  8. /* The standard layout for 'cpp' files in this code is as */
  9. /* follows: */
  10. /* */
  11. /* 1. Include files. */
  12. /* 2. Constants local to the class. */
  13. /* 3. Data structures local to the class. */
  14. /* 4. Data initializations. */
  15. /* 5. Static functions. */
  16. /* 6. Class functions. */
  17. /* */
  18. /* The constructor is typically the first function, class */
  19. /* member functions appear in alphabetical order with the */
  20. /* destructor appearing at the end of the file. Any section */
  21. /* or function this is not required is simply omitted. */
  22. /* */
  23. /********************************************************************/
  24. #include "HeapPCH.hpp"
  25. #include "Cache.hpp"
  26. #include "Find.hpp"
  27. #include "Heap.hpp"
  28. #include "New.hpp"
  29. /********************************************************************/
  30. /* */
  31. /* Constants local to the class. */
  32. /* */
  33. /* The constants supplied here control the size of the hash */
  34. /* table and other related features. */
  35. /* */
  36. /********************************************************************/
  37. CONST SBIT32 MinHash = 1024;
  38. CONST SBIT32 MinHashSpace = (100/25);
  39. CONST SBIT32 MinLookAside = 128;
  40. CONST BIT32 NoAddressMask = ((BIT32) -1);
  41. CONST SBIT32 NoCacheEntry = -1;
  42. #ifndef ENABLE_RECURSIVE_LOCKS
  43. /********************************************************************/
  44. /* */
  45. /* Static member initialization. */
  46. /* */
  47. /* Static member initialization sets the initial value for all */
  48. /* static members. */
  49. /* */
  50. /********************************************************************/
  51. THREAD_LOCAL_STORE FIND::LockCount;
  52. #endif
  53. /********************************************************************/
  54. /* */
  55. /* Class constructor. */
  56. /* */
  57. /* Create the hash table and initialize it ready for use. The */
  58. /* configuration information supplied from the parameters needs */
  59. /* to be carefully checked as it has come indirectly from the */
  60. /* user and may be bogus. */
  61. /* */
  62. /********************************************************************/
  63. FIND::FIND
  64. (
  65. SBIT32 NewMaxHash,
  66. SBIT32 NewMaxLookAside,
  67. SBIT32 NewFindThreshold,
  68. ROCKALL *NewRockall,
  69. BOOLEAN NewResize,
  70. BOOLEAN NewThreadSafe
  71. )
  72. {
  73. REGISTER SBIT32 AlignMask = (NewRockall -> NaturalSize()-1);
  74. //
  75. // We need to make sure that the size of the hash table
  76. // makes sense. The hash table size needs to be a reasonable
  77. // size (say 1k or larger) and a power of 2 (so we don't need
  78. // to do any divides).
  79. //
  80. if
  81. (
  82. PowerOfTwo( (AlignMask+1) )
  83. &&
  84. (NewFindThreshold >= 0 )
  85. &&
  86. (NewMaxHash >= MinHash)
  87. &&
  88. (ConvertDivideToShift( NewMaxHash,& HashMask ))
  89. &&
  90. (NewMaxLookAside >= MinLookAside)
  91. &&
  92. (ConvertDivideToShift( NewMaxLookAside,& LookAsideMask ))
  93. )
  94. {
  95. REGISTER SBIT32 HashSize = (NewMaxHash * sizeof(LIST));
  96. REGISTER SBIT32 LookAsideSize = (NewMaxLookAside * sizeof(LOOK_ASIDE));
  97. REGISTER SBIT32 TotalSize = (HashSize + LookAsideSize);
  98. //
  99. // Set up the hash table.
  100. //
  101. MaxHash = NewMaxHash;
  102. HashShift = (32-HashMask);
  103. HashMask = ((1 << HashMask)-1);
  104. Resize = NewResize;
  105. //
  106. // Set up the lookaside table.
  107. //
  108. MaxLookAside = NewMaxLookAside;
  109. MaxAddressMask = NoAddressMask;
  110. MinAddressMask = NoAddressMask;
  111. LookAsideActions = 0;
  112. LookAsideShift = (32-LookAsideMask);
  113. LookAsideMask = ((1 << LookAsideMask)-1);
  114. LookAsideThreshold = NewFindThreshold;
  115. ThreadSafe = NewThreadSafe;
  116. //
  117. // Create some space for the find table and the
  118. // look aside table.
  119. //
  120. Hash = ((LIST*) NewRockall -> NewArea( AlignMask,TotalSize,False ));
  121. LookAside = ((LOOK_ASIDE*) & Hash[ MaxHash ]);
  122. Rockall = NewRockall;
  123. //
  124. // If the memory allocation request for the hash
  125. // table fails we are doomed. If it works we need
  126. // to call the constructor for each linked list
  127. // head node.
  128. //
  129. if ( Hash != ((LIST*) AllocationFailure) )
  130. {
  131. REGISTER SBIT32 Count;
  132. //
  133. // Call the constructor for each hash table
  134. // linked list header.
  135. //
  136. for ( Count=0;Count < NewMaxHash;Count ++ )
  137. { PLACEMENT_NEW( & Hash[ Count ],LIST ); }
  138. //
  139. // Zero the look aside structures. We need
  140. // to do this to ensure they do not match a
  141. // valid allocation address later.
  142. //
  143. for ( Count=0;Count < MaxLookAside;Count ++ )
  144. {
  145. REGISTER LOOK_ASIDE *Current = & LookAside[ Count ];
  146. Current -> Address = ((VOID*) NoCacheEntry);
  147. Current -> Page = ((PAGE*) NoCacheEntry);
  148. #ifdef DEBUGGING
  149. Current -> Version = ((SBIT32) NoCacheEntry);
  150. #endif
  151. }
  152. #ifdef ENABLE_HEAP_STATISTICS
  153. //
  154. // Zero the statistics information.
  155. //
  156. Fills = 0;
  157. Hits = 0;
  158. MaxPages = 0;
  159. MaxTests = 0;
  160. Misses = 0;
  161. Scans = 0;
  162. Tests = 0;
  163. #endif
  164. Used = 0;
  165. }
  166. else
  167. { Failure( "Create hash fails in constructor for FIND" ); }
  168. }
  169. else
  170. { Failure( "Hash table size in constructor for FIND" ); }
  171. }
  172. /********************************************************************/
  173. /* */
  174. /* Delete a memory allocation. */
  175. /* */
  176. /* We need to delete a particular memory allocation. All */
  177. /* we have is an address. We use this to find the largest */
  178. /* allocation page this address is contained in and then */
  179. /* navigate through the sub-divisions of this page until we */
  180. /* find the allocation we need to delete. */
  181. /* */
  182. /********************************************************************/
  183. BOOLEAN FIND::Delete( VOID *Address,CACHE *ParentCache )
  184. {
  185. REGISTER PAGE *Page;
  186. REGISTER BOOLEAN Update;
  187. //
  188. // If we need to be thread safe then claim a sharable lock
  189. // on the hash table to stop it being changed under our feet.
  190. //
  191. ClaimFindShareLock();
  192. //
  193. // Lets try the lookaside table. There is a pretty
  194. // good chance that we will have the details we need
  195. // already in the cache. If not we need to find it
  196. // the hard way. During the process we add the mapping
  197. // into the lookaside for next time.
  198. //
  199. if
  200. (
  201. Update =
  202. (
  203. ! FindLookAside
  204. (
  205. ((VOID*) (((LONG) Address) & ~MinAddressMask)),
  206. & Page
  207. )
  208. )
  209. )
  210. {
  211. //
  212. // Find the allocation page and get the details of entry.
  213. // We do this by finding the parent of the top cache.
  214. // We know that this is the global root and will find
  215. // the correct page even if it is on another heap (as
  216. // long as the find table is globally shared).
  217. //
  218. Page = (ParentCache -> FindParentPage( Address ));
  219. if ( Page != ((PAGE*) NULL) )
  220. { Page = (Page -> FindPage( Address,NULL,True )); }
  221. }
  222. //
  223. // We may have failed to find the address. If so
  224. // we simply fail the call. If not we put the deleted
  225. // element back in the associated cache.
  226. //
  227. if ( Page != ((PAGE*) NULL) )
  228. {
  229. REGISTER CACHE *Cache = (Page -> GetCache());
  230. REGISTER SBIT32 Original = (Page -> GetVersion());
  231. //
  232. // Prefetch the class data if we are running a
  233. // Pentium III or better with locks. We do this
  234. // because prefetching hot SMP data structures
  235. // really helps. However, if the structures are
  236. // not shared (i.e. no locks) then it is worthless
  237. // overhead.
  238. //
  239. if ( ThreadSafe )
  240. { Prefetch.Nta( ((CHAR*) Cache),sizeof(CACHE) ); }
  241. //
  242. // Release the lock if we claimed it earlier and
  243. // update the lookaside if needed.
  244. //
  245. if ( Update )
  246. { ReleaseFindShareLockAndUpdate( Address,Page,Original ); }
  247. else
  248. { ReleaseFindShareLock(); }
  249. //
  250. // We have found the associated page description
  251. // so pass the delete request along to the cache
  252. // and get out of here.
  253. //
  254. return (Cache -> Delete( Address,Page,Original ));
  255. }
  256. else
  257. {
  258. //
  259. // Release the lock if we claimed it earlier.
  260. //
  261. ReleaseFindShareLock();
  262. return False;
  263. }
  264. }
  265. /********************************************************************/
  266. /* */
  267. /* Delete an item from the find table. */
  268. /* */
  269. /* We need to delete page from the find list. We expect */
  270. /* this to take quite a while as multiple threads can be */
  271. /* using this class at the same time. */
  272. /* */
  273. /********************************************************************/
  274. VOID FIND::DeleteFromFindList( PAGE *Page )
  275. {
  276. REGISTER VOID *Address = (Page -> GetAddress());
  277. //
  278. // Claim an exclusive lock so we can update the
  279. // hash and lookaside as needed.
  280. //
  281. ClaimFindExclusiveLock();
  282. //
  283. // Delete the page from the hash table.
  284. //
  285. Page -> DeleteFromFindList( FindHashHead( Address ) );
  286. //
  287. // When we create very small heaps (i.e. a heap
  288. // where only 20-30 allocations are requested)
  289. // the various caches become a problem as they
  290. // tend to front load work. So we allow a limit
  291. // to be set before which we run with caches
  292. // disabled.
  293. //
  294. if ( LookAsideActions >= LookAsideThreshold )
  295. {
  296. REGISTER SBIT32 Count;
  297. REGISTER CACHE *Cache = (Page -> GetCache());
  298. REGISTER SBIT32 Stride = (Cache -> GetAllocationSize());
  299. //
  300. // We are about look up various look aside entries
  301. // and delete any that are stale. We need to do
  302. // this for every lookaside slot that relates to
  303. // the page. If the allocation size is smaller
  304. // than the lookaside slot size we can save some
  305. // iterations by increasing the stride size.
  306. //
  307. if ( Stride <= ((SBIT32) MinAddressMask) )
  308. { Stride = ((SBIT32) (MinAddressMask+1)); }
  309. //
  310. // Whenever we delete an entry from the hash table
  311. // the lookaside is potentially corrupt. So we
  312. // need to delete any look aside entries relating
  313. // to this page.
  314. //
  315. for ( Count=0;Count < Cache -> GetPageSize();Count += Stride )
  316. {
  317. REGISTER VOID *Segment =
  318. ((VOID*) ((((LONG) Address) + Count) & ~MinAddressMask));
  319. REGISTER LOOK_ASIDE *Current =
  320. (FindLookAsideHead( Segment ));
  321. //
  322. // Delete the look aside entry if it is stale.
  323. //
  324. if ( Segment == Current -> Address )
  325. {
  326. Current -> Address = ((VOID*) NoCacheEntry);
  327. Current -> Page = ((PAGE*) NoCacheEntry);
  328. #ifdef DEBUGGING
  329. Current -> Version = ((SBIT32) NoCacheEntry);
  330. #endif
  331. }
  332. }
  333. }
  334. //
  335. // Update the statistics.
  336. //
  337. Used --;
  338. //
  339. // Release the lock if we claimed it earlier.
  340. //
  341. ReleaseFindExclusiveLock();
  342. }
  343. /********************************************************************/
  344. /* */
  345. /* Details of a memory allocation. */
  346. /* */
  347. /* We need to the details of a particular memory allocation. */
  348. /* All we have is an address. We use this to find the largest */
  349. /* allocation page this address is contained in and then */
  350. /* navigate through the sub-divisions of this page until we */
  351. /* find the allocation. */
  352. /* */
  353. /********************************************************************/
  354. BOOLEAN FIND::Details
  355. (
  356. VOID *Address,
  357. SEARCH_PAGE *Details,
  358. CACHE *ParentCache,
  359. SBIT32 *Size
  360. )
  361. {
  362. REGISTER PAGE *Page;
  363. REGISTER BOOLEAN Result;
  364. REGISTER BOOLEAN Update;
  365. //
  366. // If we need to be thread safe then claim a sharable lock
  367. // on the hash table to stop it being changed under our feet.
  368. //
  369. ClaimFindShareLock();
  370. //
  371. // Lets try the lookaside table. There is a pretty
  372. // good chance that we will have the deatils we need
  373. // already in the cache. If not we need to find it
  374. // the hard way. During the process we add the mapping
  375. // into the lookaside for next time.
  376. //
  377. if
  378. (
  379. Update =
  380. (
  381. ! FindLookAside
  382. (
  383. ((VOID*) (((LONG) Address) & ~MinAddressMask)),
  384. & Page
  385. )
  386. )
  387. )
  388. {
  389. //
  390. // Find the allocation page and get the details of entry.
  391. // We do this by finding the parent of the top cache.
  392. // We know that this is the global root and will find
  393. // the correct page even if it is on another heap (as
  394. // long as the find table is globally shared).
  395. //
  396. Page = (ParentCache -> FindParentPage( Address ));
  397. if ( Page != ((PAGE*) NULL) )
  398. { Page = (Page -> FindPage( Address,Details,True )); }
  399. }
  400. else
  401. {
  402. //
  403. // We may need to provide the all the details of the
  404. // allocation for some reason.
  405. //
  406. if ( Details != NULL )
  407. { Page = (Page -> FindPage( Address,Details,True )); }
  408. }
  409. //
  410. // We may have failed to find the address. If so
  411. // we simply fail the call. If not we extract the
  412. // information we want.
  413. //
  414. if ( Result = (Page != ((PAGE*) NULL)) )
  415. {
  416. //
  417. // Compute the size. We would normally expect
  418. // this to be the cache size. However, there
  419. // are some weird pages that sometimes have
  420. // other sizes.
  421. //
  422. (*Size) = (Page -> ActualSize());
  423. }
  424. //
  425. // Release the lock if we claimed it earlier and
  426. // update the lookaside if needed.
  427. //
  428. if ( (Update) && (Result) )
  429. { ReleaseFindShareLockAndUpdate( Address,Page,Page -> GetVersion() ); }
  430. else
  431. { ReleaseFindShareLock(); }
  432. return Result;
  433. }
  434. /********************************************************************/
  435. /* */
  436. /* Find in the look aside. */
  437. /* */
  438. /* We need to find a particular page in the look aside. So we */
  439. /* try a simple look up (no lists or chains). */
  440. /* */
  441. /********************************************************************/
  442. BOOLEAN FIND::FindLookAside( VOID *Address,PAGE **Page )
  443. {
  444. //
  445. // When we create very small heaps (i.e. a heap
  446. // where only 20-30 allocations are requested)
  447. // the various caches become a problem as they
  448. // tend to front load work. So we allow a limit
  449. // to be set before which we run with caches
  450. // disabled.
  451. //
  452. if ( LookAsideActions >= LookAsideThreshold )
  453. {
  454. REGISTER LOOK_ASIDE *Current = FindLookAsideHead( Address );
  455. //
  456. // We have hashed to a lookaside slot. Maybe
  457. // it contains what we want or maybe not.
  458. //
  459. if ( Address == Current -> Address )
  460. {
  461. #ifdef DEBUGGING
  462. if ( Current -> Version == (Current -> Page -> GetVersion()) )
  463. {
  464. #endif
  465. //
  466. // We hit the lookaside and the
  467. // contents are valid.
  468. //
  469. (*Page) = (Current -> Page);
  470. #ifdef ENABLE_HEAP_STATISTICS
  471. //
  472. // Update the statistics.
  473. //
  474. Hits ++;
  475. #endif
  476. return True;
  477. #ifdef DEBUGGING
  478. }
  479. else
  480. { Failure( "Deleted page in FindLookAside" ); }
  481. #endif
  482. }
  483. }
  484. else
  485. {
  486. //
  487. // We update number of times we tried to
  488. // use the lookaside and it was disabled.
  489. // After a while this will lead to the
  490. // lookaside being enabled.
  491. //
  492. LookAsideActions ++;
  493. }
  494. #ifdef ENABLE_HEAP_STATISTICS
  495. //
  496. // We missed the lookaside so update the
  497. // statistics to reflect our misfortune.
  498. //
  499. Misses ++;
  500. #endif
  501. return False;
  502. }
  503. /********************************************************************/
  504. /* */
  505. /* Find a page. */
  506. /* */
  507. /* We need to find a particular page in the hash table. So we */
  508. /* scan along the associated linked list looking for a match. */
  509. /* */
  510. /********************************************************************/
  511. PAGE *FIND::FindPage( VOID *Address,CACHE *ParentCache )
  512. {
  513. #ifdef ENABLE_HEAP_STATISTICS
  514. REGISTER SBIT32 Cycles = 0;
  515. REGISTER PAGE *Result = NULL;
  516. #endif
  517. REGISTER PAGE *Page;
  518. //
  519. // Find the associated hash bucket and then walk
  520. // along the linked list for this looking for
  521. // the correct page description.
  522. //
  523. for
  524. (
  525. Page = PAGE::FirstInFindList( FindHashHead( Address ) );
  526. ! Page -> EndOfFindList();
  527. Page = Page -> NextInFindList()
  528. )
  529. {
  530. #ifdef ENABLE_HEAP_STATISTICS
  531. //
  532. // Count the number of iterations in when we
  533. // are recording statistics so we can calculate
  534. // the average chain length.
  535. //
  536. Cycles ++;
  537. #endif
  538. //
  539. // We can identify the the target page by two key
  540. // characteristics. These are the start address and
  541. // the parent page. Although we may have sub-divided
  542. // a page into various chunks each chunk will have
  543. // a different parent (although its start address
  544. // may sometimes be the same).
  545. //
  546. if
  547. (
  548. (Address == (Page -> GetAddress()))
  549. &&
  550. (ParentCache == (Page -> GetParentPage()))
  551. )
  552. {
  553. #ifdef ENABLE_HEAP_STATISTICS
  554. //
  555. // We have found the target page. So return it
  556. // to the caller.
  557. //
  558. if ( Page -> ValidPage() )
  559. {
  560. Result = Page;
  561. break;
  562. }
  563. else
  564. { Failure( "Deleted page in FindPage" ); }
  565. #else
  566. return Page;
  567. #endif
  568. }
  569. }
  570. #ifdef ENABLE_HEAP_STATISTICS
  571. //
  572. // When we are in statistics mode we need to update the
  573. // information so we can output it at the end of the
  574. // run.
  575. //
  576. if ( MaxTests < Cycles )
  577. { MaxTests = Cycles; }
  578. Tests += Cycles;
  579. Scans ++;
  580. return Result;
  581. #else
  582. return NULL;
  583. #endif
  584. }
  585. /********************************************************************/
  586. /* */
  587. /* Insert an item into the find table. */
  588. /* */
  589. /* We need to insert a new page into the find table. We expect */
  590. /* this to take quite a while as multiple threads can be using */
  591. /* this class at the same time. */
  592. /* */
  593. /********************************************************************/
  594. VOID FIND::InsertInFindList( PAGE *Page )
  595. {
  596. REGISTER VOID *Address = (Page -> GetAddress());
  597. //
  598. // Claim an exclusive lock so we can update the
  599. // find table and lookaside as needed.
  600. //
  601. ClaimFindExclusiveLock();
  602. //
  603. // Insert a new page into the find table.
  604. //
  605. Page -> InsertInFindList( FindHashHead( Address ) );
  606. //
  607. // When we create very small heaps (i.e. a heap
  608. // where only 20-30 allocations are requested)
  609. // the various caches become a problem as they
  610. // tend to front load work. So we allow a limit
  611. // to be set before which we run with caches
  612. // disabled.
  613. //
  614. if ( LookAsideActions >= LookAsideThreshold )
  615. {
  616. REGISTER SBIT32 Count;
  617. REGISTER CACHE *Cache = (Page -> GetCache());
  618. REGISTER SBIT32 Stride = (Cache -> GetAllocationSize());
  619. //
  620. // We are about look up various lookaside entries
  621. // and update any that are stale. We need to do
  622. // this for every lookaside slot that relates to
  623. // the page. If the allocation size is smaller
  624. // than the lookaside slot size we can save some
  625. // iterations by increasing the stride size.
  626. //
  627. if ( Stride <= ((SBIT32) MinAddressMask) )
  628. { Stride = ((SBIT32) (MinAddressMask+1)); }
  629. //
  630. // Whenever we add an entry from the find table
  631. // the lookaside is potentially corrupt. So we
  632. // need to update any lookaside entries relating
  633. // to the page.
  634. //
  635. for ( Count=0;Count < Cache -> GetPageSize();Count += Stride )
  636. {
  637. REGISTER VOID *Segment =
  638. ((VOID*) ((((LONG) Address) + Count) & ~MinAddressMask));
  639. REGISTER LOOK_ASIDE *Current =
  640. (FindLookAsideHead( Segment ));
  641. //
  642. // Add the new page to the lookaside as we
  643. // expect it to get hit pretty soon one way
  644. // or another.
  645. //
  646. Current -> Address = Segment;
  647. Current -> Page = Page;
  648. #ifdef DEBUGGING
  649. Current -> Version = Page -> GetVersion();
  650. #endif
  651. }
  652. }
  653. //
  654. // Update the statistics and resize the find
  655. // table if it is over 75% full.
  656. //
  657. if ( ((++ Used) + (MaxHash / MinHashSpace)) > MaxHash )
  658. { ResizeHashTable(); }
  659. #ifdef ENABLE_HEAP_STATISTICS
  660. if ( Used > MaxPages )
  661. { MaxPages = Used; }
  662. #endif
  663. //
  664. // Release the lock if we claimed it earlier.
  665. //
  666. ReleaseFindExclusiveLock();
  667. }
  668. /********************************************************************/
  669. /* */
  670. /* A known area. */
  671. /* */
  672. /* We have an address and don't have a clue which heap */
  673. /* owns the space. Here we take a look at the address */
  674. /* and figure out if it is known to the current heap. */
  675. /* */
  676. /********************************************************************/
  677. BOOLEAN FIND::KnownArea( VOID *Address,CACHE *ParentCache )
  678. {
  679. REGISTER PAGE *Page;
  680. //
  681. // If we need to be thread safe then claim a sharable lock
  682. // on the hash table to stop it being changed under our feet.
  683. //
  684. ClaimFindShareLock();
  685. //
  686. // Find out if the address belongs to this heap
  687. // or any other heap of which we are aware (i.e.
  688. // when single image is active).
  689. //
  690. Page = (ParentCache -> FindParentPage( Address ));
  691. //
  692. // Release the lock if we claimed it earlier.
  693. //
  694. ReleaseFindShareLock();
  695. return (Page != ((PAGE*) NULL));
  696. }
  697. /********************************************************************/
  698. /* */
  699. /* Release a shared lock and update. */
  700. /* */
  701. /* We have been asked to insert a page into the lookaside. */
  702. /* We assume the caller already has a share lock which we */
  703. /* release when we are finished. */
  704. /* */
  705. /********************************************************************/
  706. VOID FIND::ReleaseFindShareLockAndUpdate
  707. (
  708. VOID *Address,
  709. PAGE *Page,
  710. SBIT32 Version
  711. )
  712. {
  713. //
  714. // When we create very small heaps (i.e. a heap
  715. // where only 20-30 allocations are requested)
  716. // the various caches become a problem as they
  717. // tend to front load work. So we allow a limit
  718. // to be set before which we run with caches
  719. // disabled.
  720. //
  721. if ( LookAsideActions >= LookAsideThreshold )
  722. {
  723. //
  724. // Claim an exclusive lock so we can update the
  725. // lookaside as needed.
  726. //
  727. ChangeToExclusiveLock();
  728. #ifdef DEBUGGING
  729. if ( Page -> ValidPage() )
  730. {
  731. #endif
  732. if ( Version == (Page -> GetVersion()) )
  733. {
  734. REGISTER LONG Base = (((LONG) Address) & ~MinAddressMask);
  735. REGISTER VOID *Segment = ((VOID*) Base);
  736. REGISTER LOOK_ASIDE *Current = FindLookAsideHead( Segment );
  737. //
  738. // Overwrite any existing information.
  739. //
  740. Current -> Address = Segment;
  741. Current -> Page = Page;
  742. #ifdef DEBUGGING
  743. Current -> Version = Page -> GetVersion();
  744. #endif
  745. #ifdef ENABLE_HEAP_STATISTICS
  746. //
  747. // Update the statistics.
  748. //
  749. Fills ++;
  750. #endif
  751. }
  752. #ifdef DEBUGGING
  753. }
  754. else
  755. { Failure( "Deleted page in ReleaseFindShareLockAndUpdate" ); }
  756. #endif
  757. //
  758. // Release the lock if we claimed it earlier.
  759. //
  760. ReleaseFindExclusiveLock();
  761. }
  762. else
  763. {
  764. //
  765. // Release the lock if we claimed it earlier.
  766. //
  767. ReleaseFindShareLock();
  768. }
  769. }
  770. /********************************************************************/
  771. /* */
  772. /* Resize the find table. */
  773. /* */
  774. /* We need to grow the hash table as it appears to be a little */
  775. /* small given the number of pages that have been created. */
  776. /* */
  777. /********************************************************************/
  778. VOID FIND::ResizeHashTable( VOID )
  779. {
  780. AUTO SBIT32 NewHashMask;
  781. AUTO SBIT32 NewLookAsideMask;
  782. //
  783. // When we need to resize the hash table it is a
  784. // straight race. The first thread to claim the
  785. // lock gets to do the work. Everyone else just
  786. // exits.
  787. //
  788. if ( (Resize) && (Spinlock.ClaimLock(0)) )
  789. {
  790. REGISTER SBIT32 AlignMask = (Rockall -> NaturalSize()-1);
  791. REGISTER SBIT32 NewMaxHash = (MaxHash * ExpandStore);
  792. REGISTER SBIT32 NewMaxLookAside = (MaxLookAside * ExpandStore);
  793. REGISTER SBIT32 NewHashSize = (NewMaxHash * sizeof(LIST));
  794. REGISTER SBIT32 NewLookAsideSize = (NewMaxLookAside * sizeof(LOOK_ASIDE));
  795. REGISTER SBIT32 NewTotalSize = (NewHashSize + NewLookAsideSize);
  796. REGISTER SBIT32 HashSize = (MaxHash * sizeof(LIST));
  797. REGISTER SBIT32 LookAsideSize = (MaxLookAside * sizeof(LOOK_ASIDE));
  798. REGISTER SBIT32 TotalSize = (HashSize + LookAsideSize);
  799. //
  800. // It is actually possible for a thread to get
  801. // delayed for so long that it thinks the hash
  802. // table still needs to be resized long after the
  803. // work has been completed. Additionally, we want
  804. // to make sure that all the new values are sane.
  805. //
  806. if
  807. (
  808. PowerOfTwo( (AlignMask+1) )
  809. &&
  810. (NewMaxHash > 0)
  811. &&
  812. (ConvertDivideToShift( NewMaxHash,& NewHashMask ))
  813. &&
  814. (NewMaxLookAside > 0)
  815. &&
  816. (ConvertDivideToShift( NewMaxLookAside,& NewLookAsideMask ))
  817. &&
  818. ((Used + (MaxHash / MinHashSpace)) > MaxHash)
  819. )
  820. {
  821. REGISTER LIST *NewHash;
  822. REGISTER LOOK_ASIDE *NewLookAside;
  823. //
  824. // We have been picked as the victim who
  825. // needs to resize the hash table. We are
  826. // going to call the external allocator
  827. // to get more memory. As we know this is
  828. // likely to to nail us we drop the lock to
  829. // allow other threads to continue.
  830. //
  831. ReleaseFindExclusiveLock();
  832. //
  833. // We know that allocating a new table and
  834. // initializing it is going to take ages.
  835. // Well at least everyone else gets to carry
  836. // on in the mean time.
  837. //
  838. NewHash =
  839. ((LIST*) Rockall -> NewArea( AlignMask,NewTotalSize,False ));
  840. NewLookAside =
  841. ((LOOK_ASIDE*) & NewHash[ NewMaxHash ]);
  842. //
  843. // If the memory allocation request for the hash
  844. // table fails we exit and try again later.
  845. //
  846. if ( NewHash != ((LIST*) AllocationFailure) )
  847. {
  848. REGISTER SBIT32 Count;
  849. //
  850. // Call the constructor for each hash table
  851. // linked list header.
  852. //
  853. for ( Count=0;Count < NewMaxHash;Count ++ )
  854. { PLACEMENT_NEW( & NewHash[ Count ],LIST ); }
  855. //
  856. // Zero the look aside structure.
  857. //
  858. for ( Count=0;Count < NewMaxLookAside;Count ++ )
  859. {
  860. REGISTER LOOK_ASIDE *Current = & NewLookAside[ Count ];
  861. Current -> Address = ((VOID*) NoCacheEntry);
  862. Current -> Page = ((PAGE*) NoCacheEntry);
  863. #ifdef DEBUGGING
  864. Current -> Version = ((SBIT32) NoCacheEntry);
  865. #endif
  866. }
  867. }
  868. //
  869. // Claim an exclusive lock so we can resize
  870. // the hash table.
  871. //
  872. ClaimFindExclusiveLock();
  873. //
  874. // If we have allocated the new find table
  875. // we can now rehash the existing entries.
  876. // If not we are out of here.
  877. //
  878. if ( NewHash != ((LIST*) AllocationFailure) )
  879. {
  880. REGISTER SBIT32 Count;
  881. REGISTER SBIT32 MaxOldHash = MaxHash;
  882. REGISTER LIST *OldHash = Hash;
  883. //
  884. // Update the control information
  885. // for the new hash table.
  886. //
  887. MaxHash = NewMaxHash;
  888. HashShift = (32-NewHashMask);
  889. HashMask = ((1 << NewHashMask)-1);
  890. MaxLookAside = NewMaxLookAside;
  891. LookAsideShift = (32-NewLookAsideMask);
  892. LookAsideMask = ((1 << NewLookAsideMask)-1);
  893. Hash = NewHash;
  894. LookAside = NewLookAside;
  895. //
  896. // Delete all the existing records
  897. // from the old hash table and insert
  898. // them into the new hash table.
  899. //
  900. for ( Count=0;Count < MaxOldHash;Count ++ )
  901. {
  902. REGISTER LIST *Current = & OldHash[ Count ];
  903. //
  904. // Walk along each hash bucket
  905. // deleting the records and inserting
  906. // them into the new hash table.
  907. //
  908. while ( ! Current -> EndOfList() )
  909. {
  910. REGISTER PAGE *Page = PAGE::FirstInFindList( Current );
  911. REGISTER VOID *Address = (Page -> GetAddress());
  912. Page -> DeleteFromFindList( Current );
  913. Page -> InsertInFindList( FindHashHead( Address ) );
  914. }
  915. }
  916. //
  917. // Time to do more operating system work
  918. // so lets drop the lock again.
  919. //
  920. ReleaseFindExclusiveLock();
  921. //
  922. // Delete all the list heads and return the
  923. // original allocation to the operating system.
  924. //
  925. for ( Count=0;Count < MaxOldHash;Count ++ )
  926. { PLACEMENT_DELETE( & OldHash[ Count ],LIST ); }
  927. //
  928. // Deallocate the old extent.
  929. //
  930. Rockall -> DeleteArea( ((VOID*) OldHash),TotalSize,False );
  931. //
  932. // We are finished so reclaim the lock
  933. // so we can exit.
  934. //
  935. ClaimFindExclusiveLock();
  936. }
  937. else
  938. { Resize = False; }
  939. }
  940. Spinlock.ReleaseLock();
  941. }
  942. }
  943. /********************************************************************/
  944. /* */
  945. /* Update the find table. */
  946. /* */
  947. /* We need to update the find table with certain information */
  948. /* to ensure it is used correctly and consistently. */
  949. /* */
  950. /********************************************************************/
  951. VOID FIND::UpdateFind( BIT32 NewMaxAddressMask,BIT32 NewMinAddressMask )
  952. {
  953. //
  954. // When we have a single heap image all the 'TopCache' sizes
  955. // must be the same.
  956. //
  957. if
  958. (
  959. (MaxAddressMask == NoAddressMask)
  960. ||
  961. (MaxAddressMask == NewMaxAddressMask)
  962. )
  963. {
  964. //
  965. // If we need to be thread safe then claim a sharable lock
  966. // on the hash table to stop it being changed under our feet.
  967. //
  968. ClaimFindExclusiveLock();
  969. //
  970. // Update the max address mask if it is not the current
  971. // value but yet consistent.
  972. //
  973. MaxAddressMask = NewMaxAddressMask;
  974. //
  975. // Update the address mask is the new heap has a smaller
  976. // parent than all of the other heaps.
  977. //
  978. if ( MinAddressMask > NewMinAddressMask )
  979. { MinAddressMask = NewMinAddressMask; }
  980. //
  981. // Release the lock if we claimed it earlier.
  982. //
  983. ReleaseFindExclusiveLock();
  984. }
  985. else
  986. { Failure( "Different 'TopCache' sizes with 'SingleImage'" ); }
  987. }
  988. /********************************************************************/
  989. /* */
  990. /* Walk the heap. */
  991. /* */
  992. /* We have been asked to walk the heap. It is hard to know */
  993. /* whay anybody might want to do this given the rest of the */
  994. /* functionality available. Nonetheless, we just do what is */
  995. /* required to keep everyone happy. */
  996. /* */
  997. /********************************************************************/
  998. BOOLEAN FIND::Walk
  999. (
  1000. BOOLEAN *Active,
  1001. VOID **Address,
  1002. CACHE *ParentCache,
  1003. SBIT32 *Size
  1004. )
  1005. {
  1006. REGISTER VOID *Memory = (*Address);
  1007. REGISTER BOOLEAN Result;
  1008. REGISTER BOOLEAN Update;
  1009. REGISTER PAGE *Page;
  1010. //
  1011. // If we need to be thread safe then claim a sharable lock
  1012. // on the hash table to stop it being changed under our feet.
  1013. //
  1014. ClaimFindShareLock();
  1015. //
  1016. // When the address is null we need to set up the heap
  1017. // walk. In all other cases we just extract the next
  1018. // allocation in the list.
  1019. //
  1020. if ( Memory != NULL )
  1021. {
  1022. AUTO SEARCH_PAGE Details;
  1023. //
  1024. // Lets try the lookaside table. There is a pretty
  1025. // good chance that we will have the details we need
  1026. // already in the cache. If not we need to find it
  1027. // the hard way. During the process we add the mapping
  1028. // into the lookaside for next time.
  1029. //
  1030. if
  1031. (
  1032. Update =
  1033. (
  1034. ! FindLookAside
  1035. (
  1036. ((VOID*) (((LONG) Memory) & ~MinAddressMask)),
  1037. & Page
  1038. )
  1039. )
  1040. )
  1041. {
  1042. //
  1043. // Find the allocation page and get the details of entry.
  1044. // We do this by finding the parent of the top cache.
  1045. // We know that this is the global root and will find
  1046. // the correct page even if it is on another heap (as
  1047. // long as the find table is globally shared).
  1048. //
  1049. Page = (ParentCache -> FindParentPage( Memory ));
  1050. }
  1051. //
  1052. // We now compute all the details relating to the address
  1053. // so we can find any subsequent allocation.
  1054. //
  1055. if ( Page != ((PAGE*) NULL) )
  1056. { Page = (Page -> FindPage( Memory,& Details,True )); }
  1057. //
  1058. // We may have failed to find the address .If so
  1059. // we simply fail the call. If not we find the next
  1060. // allocation in the heap.
  1061. //
  1062. if ( Result = ((Page != ((PAGE*) NULL)) && (Details.Found)) )
  1063. {
  1064. //
  1065. // We need to walk the heap to get te details
  1066. // of the next allocation.
  1067. //
  1068. if ( Result = (Page -> Walk( & Details )) )
  1069. {
  1070. REGISTER BIT32 AllocationBit =
  1071. ((*Details.VectorWord) & Details.AllocationMask);
  1072. (*Active) = (AllocationBit != 0);
  1073. (*Address) = Details.Address;
  1074. (*Size) = (Details.Page -> ActualSize());
  1075. //
  1076. // If we are considering putting something
  1077. // in the lookaside lets make sure that
  1078. // we will get to hit the cache entry at
  1079. // least once. If not lets forget putting
  1080. // it in the cache.
  1081. //
  1082. if ( Update )
  1083. {
  1084. Update =
  1085. (
  1086. (((LONG) Memory) & ~MinAddressMask)
  1087. ==
  1088. (((LONG) Details.Address) & ~MinAddressMask)
  1089. );
  1090. }
  1091. }
  1092. }
  1093. }
  1094. else
  1095. {
  1096. AUTO SEARCH_PAGE Details;
  1097. //
  1098. // We start a heap walk by setting the initial
  1099. // address to the value null.
  1100. //
  1101. Details.Address = NULL;
  1102. Details.Cache = ParentCache;
  1103. Details.Page = NULL;
  1104. Page = NULL;
  1105. Update = False;
  1106. //
  1107. // We walk the heap to get te details of the
  1108. // first heap allocation.
  1109. //
  1110. if ( Result = (Page -> Walk( & Details )) )
  1111. {
  1112. REGISTER BIT32 AllocationBit =
  1113. ((*Details.VectorWord) & Details.AllocationMask);
  1114. (*Active) = (AllocationBit != 0);
  1115. (*Address) = Details.Address;
  1116. (*Size) = (Details.Page -> ActualSize());
  1117. }
  1118. }
  1119. //
  1120. // Release the lock if we claimed it earlier and
  1121. // update the lookaside if needed.
  1122. //
  1123. if ( (Update) && (Result) )
  1124. { ReleaseFindShareLockAndUpdate( Memory,Page,Page -> GetVersion() ); }
  1125. else
  1126. { ReleaseFindShareLock(); }
  1127. return Result;
  1128. }
  1129. /********************************************************************/
  1130. /* */
  1131. /* Class destructor. */
  1132. /* */
  1133. /* Delete the hash table and release all the associated memory. */
  1134. /* */
  1135. /********************************************************************/
  1136. FIND::~FIND( VOID )
  1137. {
  1138. REGISTER SBIT32 Count;
  1139. REGISTER SBIT32 HashSize = (MaxHash * sizeof(LIST));
  1140. REGISTER SBIT32 LookAsideSize = (MaxLookAside * sizeof(LOOK_ASIDE));
  1141. REGISTER SBIT32 TotalSize = (HashSize + LookAsideSize);
  1142. //
  1143. // Call the destructor for each hash table
  1144. // linked list header.
  1145. //
  1146. for ( Count=0;Count < MaxHash;Count ++ )
  1147. { PLACEMENT_DELETE( & Hash[ Count ],LIST ); }
  1148. //
  1149. // Deallocate the area.
  1150. //
  1151. Rockall -> DeleteArea( ((VOID*) Hash),TotalSize,False );
  1152. }