Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3154 lines
82 KiB

  1. #ifndef _COLLECTION_HXX_INCLUDED
  2. #define _COLLECTION_HXX_INCLUDED
  3. // asserts
  4. //
  5. // #define COLLAssert to point to your favorite assert function per #include
  6. #ifdef COLLAssert
  7. #else // !COLLAssert
  8. #define COLLAssert Assert
  9. #endif // COLLAssert
  10. #ifdef DHTAssert
  11. #else // !DHTAssert
  12. #define DHTAssert COLLAssert
  13. #endif // DHTAssert
  14. #include "dht.hxx"
  15. #include <memory.h>
  16. #include <minmax.h>
  17. #pragma warning ( disable : 4786 ) // we allow huge symbol names
  18. namespace COLL {
  19. //////////////////////////////////////////////////////////////////////////////////////////
  20. // CInvasiveList
  21. //
  22. // Implements an "invasive" doubly linked list of objects. The list is "invasive"
  23. // because part of its state is embedded directly in the objects it contains. An
  24. // additional property of this list class is that the head of the list can be relocated
  25. // without updating the state of any of the contained objects.
  26. //
  27. // CObject = class representing objects in the list. each class must contain
  28. // storage for a CElement for embedded list state
  29. // OffsetOfILE = inline function returning the offset of the CElement contained
  30. // in the CObject
  31. typedef SIZE_T (*PfnOffsetOf)();
  32. template< class CObject, PfnOffsetOf OffsetOfILE >
  33. class CInvasiveList
  34. {
  35. public:
  36. // invasive list element state (embedded in linked objects)
  37. class CElement
  38. {
  39. public:
  40. // ctor / dtor
  41. CElement() : m_pilePrev( (CElement*)-1 ), m_pileNext( (CElement*)-1 ) {}
  42. ~CElement() {}
  43. private:
  44. CElement& operator=( CElement& ); // disallowed
  45. friend class CInvasiveList< CObject, OffsetOfILE >;
  46. CElement* m_pilePrev;
  47. CElement* m_pileNext;
  48. };
  49. public:
  50. // ctor / dtor
  51. CInvasiveList();
  52. ~CInvasiveList();
  53. // operators
  54. CInvasiveList& operator=( const CInvasiveList& il );
  55. // API
  56. BOOL FEmpty() const;
  57. BOOL FMember( CObject* const pobj ) const;
  58. CObject* Prev( CObject* const pobj ) const;
  59. CObject* Next( CObject* const pobj ) const;
  60. CObject* PrevMost() const;
  61. CObject* NextMost() const;
  62. void InsertAsPrevMost( CObject* const pobj );
  63. void InsertAsNextMost( CObject* const pobj );
  64. void Remove( CObject* const pobj );
  65. void Empty();
  66. private:
  67. // internal functions
  68. CObject* _PobjFromPile( CElement* const pile ) const;
  69. CElement* _PileFromPobj( CObject* const pobj ) const;
  70. private:
  71. CElement* m_pilePrevMost;
  72. CElement* m_pileNextMost;
  73. };
  74. // ctor
  75. template< class CObject, PfnOffsetOf OffsetOfILE >
  76. inline CInvasiveList< CObject, OffsetOfILE >::
  77. CInvasiveList()
  78. {
  79. // start with an empty list
  80. Empty();
  81. }
  82. // dtor
  83. template< class CObject, PfnOffsetOf OffsetOfILE >
  84. inline CInvasiveList< CObject, OffsetOfILE >::
  85. ~CInvasiveList()
  86. {
  87. }
  88. // assignment operator
  89. template< class CObject, PfnOffsetOf OffsetOfILE >
  90. inline CInvasiveList< CObject, OffsetOfILE >& CInvasiveList< CObject, OffsetOfILE >::
  91. operator=( const CInvasiveList& il )
  92. {
  93. m_pilePrevMost = il.m_pilePrevMost;
  94. m_pileNextMost = il.m_pileNextMost;
  95. return *this;
  96. }
  97. // returns fTrue if the list is empty
  98. template< class CObject, PfnOffsetOf OffsetOfILE >
  99. inline BOOL CInvasiveList< CObject, OffsetOfILE >::
  100. FEmpty() const
  101. {
  102. return m_pilePrevMost == _PileFromPobj( NULL );
  103. }
  104. // returns fTrue if the specified object is a member of this list
  105. //
  106. // NOTE: this function currently returns fTrue if the specified object is a
  107. // member of any list!
  108. template< class CObject, PfnOffsetOf OffsetOfILE >
  109. inline BOOL CInvasiveList< CObject, OffsetOfILE >::
  110. FMember( CObject* const pobj ) const
  111. {
  112. #ifdef EXPENSIVE_DEBUG
  113. for ( CObject* pobjT = PrevMost(); pobjT && pobjT != pobj; pobjT = Next( pobjT ) )
  114. {
  115. }
  116. return pobjT == pobj;
  117. #else // !DEBUG
  118. CElement* const pile = _PileFromPobj( pobj );
  119. COLLAssert( ( ( DWORD_PTR( pile->m_pilePrev ) + DWORD_PTR( pile->m_pileNext ) ) == -2 ) ==
  120. ( pile->m_pilePrev == (CElement*)-1 && pile->m_pileNext == (CElement*)-1 ) );
  121. return ( DWORD_PTR( pile->m_pilePrev ) + DWORD_PTR( pile->m_pileNext ) ) != -2;
  122. #endif // DEBUG
  123. }
  124. // returns the prev object to the given object in the list
  125. template< class CObject, PfnOffsetOf OffsetOfILE >
  126. inline CObject* CInvasiveList< CObject, OffsetOfILE >::
  127. Prev( CObject* const pobj ) const
  128. {
  129. return _PobjFromPile( _PileFromPobj( pobj )->m_pilePrev );
  130. }
  131. // returns the next object to the given object in the list
  132. template< class CObject, PfnOffsetOf OffsetOfILE >
  133. inline CObject* CInvasiveList< CObject, OffsetOfILE >::
  134. Next( CObject* const pobj ) const
  135. {
  136. return _PobjFromPile( _PileFromPobj( pobj )->m_pileNext );
  137. }
  138. // returns the prev-most object to the given object in the list
  139. template< class CObject, PfnOffsetOf OffsetOfILE >
  140. inline CObject* CInvasiveList< CObject, OffsetOfILE >::
  141. PrevMost() const
  142. {
  143. return _PobjFromPile( m_pilePrevMost );
  144. }
  145. // returns the next-most object to the given object in the list
  146. template< class CObject, PfnOffsetOf OffsetOfILE >
  147. inline CObject* CInvasiveList< CObject, OffsetOfILE >::
  148. NextMost() const
  149. {
  150. return _PobjFromPile( m_pileNextMost );
  151. }
  152. // inserts the given object as the prev-most object in the list
  153. template< class CObject, PfnOffsetOf OffsetOfILE >
  154. inline void CInvasiveList< CObject, OffsetOfILE >::
  155. InsertAsPrevMost( CObject* const pobj )
  156. {
  157. CElement* const pile = _PileFromPobj( pobj );
  158. // this object had better not already be in the list
  159. COLLAssert( !FMember( pobj ) );
  160. // this object had better not already be in any list
  161. COLLAssert( pile->m_pilePrev == (CElement*)-1 );
  162. COLLAssert( pile->m_pileNext == (CElement*)-1 );
  163. // the list is empty
  164. if ( m_pilePrevMost == _PileFromPobj( NULL ) )
  165. {
  166. // insert this element as the only element in the list
  167. pile->m_pilePrev = _PileFromPobj( NULL );
  168. pile->m_pileNext = _PileFromPobj( NULL );
  169. m_pilePrevMost = pile;
  170. m_pileNextMost = pile;
  171. }
  172. // the list is not empty
  173. else
  174. {
  175. // insert this element at the prev-most position in the list
  176. pile->m_pilePrev = _PileFromPobj( NULL );
  177. pile->m_pileNext = m_pilePrevMost;
  178. m_pilePrevMost->m_pilePrev = pile;
  179. m_pilePrevMost = pile;
  180. }
  181. }
  182. // inserts the given object as the next-most object in the list
  183. template< class CObject, PfnOffsetOf OffsetOfILE >
  184. inline void CInvasiveList< CObject, OffsetOfILE >::
  185. InsertAsNextMost( CObject* const pobj )
  186. {
  187. CElement* const pile = _PileFromPobj( pobj );
  188. // this object had better not already be in the list
  189. COLLAssert( !FMember( pobj ) );
  190. // this object had better not already be in any list
  191. COLLAssert( pile->m_pilePrev == (CElement*)-1 );
  192. COLLAssert( pile->m_pileNext == (CElement*)-1 );
  193. // the list is empty
  194. if ( m_pileNextMost == _PileFromPobj( NULL ) )
  195. {
  196. // insert this element as the only element in the list
  197. pile->m_pilePrev = _PileFromPobj( NULL );
  198. pile->m_pileNext = _PileFromPobj( NULL );
  199. m_pilePrevMost = pile;
  200. m_pileNextMost = pile;
  201. }
  202. // the list is not empty
  203. else
  204. {
  205. // insert this element at the next-most position in the list
  206. pile->m_pilePrev = m_pileNextMost;
  207. pile->m_pileNext = _PileFromPobj( NULL );
  208. m_pileNextMost->m_pileNext = pile;
  209. m_pileNextMost = pile;
  210. }
  211. }
  212. // removes the given object from the list
  213. template< class CObject, PfnOffsetOf OffsetOfILE >
  214. inline void CInvasiveList< CObject, OffsetOfILE >::
  215. Remove( CObject* const pobj )
  216. {
  217. CElement* const pile = _PileFromPobj( pobj );
  218. // this object had better already be in the list
  219. COLLAssert( FMember( pobj ) );
  220. // there is an element after us in the list
  221. if ( pile->m_pileNext != _PileFromPobj( NULL ) )
  222. {
  223. // fix up its prev element to be our prev element (if any)
  224. pile->m_pileNext->m_pilePrev = pile->m_pilePrev;
  225. }
  226. else
  227. {
  228. // set the next-most element to be our prev element (if any)
  229. m_pileNextMost = pile->m_pilePrev;
  230. }
  231. // there is an element before us in the list
  232. if ( pile->m_pilePrev != _PileFromPobj( NULL ) )
  233. {
  234. // fix up its next element to be our next element (if any)
  235. pile->m_pilePrev->m_pileNext = pile->m_pileNext;
  236. }
  237. else
  238. {
  239. // set the prev-most element to be our next element (if any)
  240. m_pilePrevMost = pile->m_pileNext;
  241. }
  242. // mark ourself as not in any list
  243. pile->m_pilePrev = (CElement*)-1;
  244. pile->m_pileNext = (CElement*)-1;
  245. }
  246. // resets the list to the empty state
  247. template< class CObject, PfnOffsetOf OffsetOfILE >
  248. inline void CInvasiveList< CObject, OffsetOfILE >::
  249. Empty()
  250. {
  251. m_pilePrevMost = _PileFromPobj( NULL );
  252. m_pileNextMost = _PileFromPobj( NULL );
  253. }
  254. // converts a pointer to an ILE to a pointer to the object
  255. template< class CObject, PfnOffsetOf OffsetOfILE >
  256. inline CObject* CInvasiveList< CObject, OffsetOfILE >::
  257. _PobjFromPile( CElement* const pile ) const
  258. {
  259. return (CObject*)( (BYTE*)pile - OffsetOfILE() );
  260. }
  261. // converts a pointer to an object to a pointer to the ILE
  262. template< class CObject, PfnOffsetOf OffsetOfILE >
  263. inline CInvasiveList< CObject, OffsetOfILE >::CElement* CInvasiveList< CObject, OffsetOfILE >::
  264. _PileFromPobj( CObject* const pobj ) const
  265. {
  266. return (CElement*)( (BYTE*)pobj + OffsetOfILE() );
  267. }
  268. //////////////////////////////////////////////////////////////////////////////////////////
  269. // CApproximateIndex
  270. //
  271. // Implements a dynamically resizable table of entries indexed approximately by key
  272. // ranges of a specified uncertainty. Accuracy and exact ordering are sacrificied for
  273. // improved performance and concurrency. This index is optimized for a set of records
  274. // whose keys occupy a fairly dense range of values. The index is designed to handle
  275. // key ranges that can wrap around zero. As such, the indexed key range can not span
  276. // more than half the numerical precision of the key.
  277. //
  278. // CKey = class representing keys used to order entries in the mesh table.
  279. // this class must support all the standard math operators. wrap-
  280. // around in the key values is supported
  281. // CEntry = class indexed by the mesh table. this class must contain storage
  282. // for a CInvasiveContext class
  283. // OffsetOfIC = inline function returning the offset of the CInvasiveContext
  284. // contained in the CEntry
  285. //
  286. // You must use the DECLARE_APPROXIMATE_INDEX macro to declare this class.
  287. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  288. class CApproximateIndex
  289. {
  290. public:
  291. // class containing context needed per CEntry
  292. class CInvasiveContext
  293. {
  294. public:
  295. CInvasiveContext() {}
  296. ~CInvasiveContext() {}
  297. static SIZE_T OffsetOfILE() { return OffsetOfIC() + OffsetOf( CInvasiveContext, m_ile ); }
  298. private:
  299. CInvasiveList< CEntry, OffsetOfILE >::CElement m_ile;
  300. };
  301. // API Error Codes
  302. enum ERR
  303. {
  304. errSuccess,
  305. errInvalidParameter,
  306. errOutOfMemory,
  307. errEntryNotFound,
  308. errNoCurrentEntry,
  309. errKeyRangeExceeded,
  310. };
  311. // API Lock Context
  312. class CLock;
  313. public:
  314. // ctor / dtor
  315. CApproximateIndex( const int Rank );
  316. ~CApproximateIndex();
  317. // API
  318. ERR ErrInit( const CKey dkeyPrecision,
  319. const CKey dkeyUncertainty,
  320. const double dblSpeedSizeTradeoff );
  321. void Term();
  322. void LockKeyPtr( const CKey& key, CEntry* const pentry, CLock* const plock );
  323. void UnlockKeyPtr( CLock* const plock );
  324. long CmpKey( const CKey& key1, const CKey& key2 ) const;
  325. CKey KeyRangeFirst() const;
  326. CKey KeyRangeLast() const;
  327. CKey KeyInsertLeast() const;
  328. CKey KeyInsertMost() const;
  329. ERR ErrRetrieveEntry( CLock* const plock, CEntry** const ppentry ) const;
  330. ERR ErrInsertEntry( CLock* const plock, CEntry* const pentry, const BOOL fNextMost = fTrue );
  331. ERR ErrDeleteEntry( CLock* const plock );
  332. ERR ErrReserveEntry( CLock* const plock );
  333. void UnreserveEntry( CLock* const plock );
  334. void MoveBeforeFirst( CLock* const plock );
  335. ERR ErrMoveNext( CLock* const plock );
  336. ERR ErrMovePrev( CLock* const plock );
  337. void MoveAfterLast( CLock* const plock );
  338. void MoveBeforeKeyPtr( const CKey& key, CEntry* const pentry, CLock* const plock );
  339. void MoveAfterKeyPtr( const CKey& key, CEntry* const pentry, CLock* const plock );
  340. public:
  341. // bucket used for containing index entries that have approximately
  342. // the same key
  343. class CBucket
  344. {
  345. public:
  346. // bucket ID
  347. typedef unsigned long ID;
  348. public:
  349. CBucket() {}
  350. ~CBucket() {}
  351. CBucket& operator=( const CBucket& bucket )
  352. {
  353. m_id = bucket.m_id;
  354. m_cPin = bucket.m_cPin;
  355. m_il = bucket.m_il;
  356. return *this;
  357. }
  358. public:
  359. ID m_id;
  360. unsigned long m_cPin;
  361. CInvasiveList< CEntry, CInvasiveContext::OffsetOfILE > m_il;
  362. };
  363. // table that contains our buckets
  364. typedef CDynamicHashTable< CBucket::ID, CBucket > CBucketTable;
  365. public:
  366. // API Lock Context
  367. class CLock
  368. {
  369. public:
  370. CLock() {}
  371. ~CLock() {}
  372. private:
  373. friend class CApproximateIndex< CKey, CEntry, OffsetOfIC >;
  374. CBucketTable::CLock m_lock;
  375. CBucket m_bucket;
  376. CEntry* m_pentryPrev;
  377. CEntry* m_pentry;
  378. CEntry* m_pentryNext;
  379. };
  380. private:
  381. CBucket::ID _IdFromKeyPtr( const CKey& key, CEntry* const pentry ) const;
  382. CBucket::ID _DeltaId( const CBucket::ID id, const long did ) const;
  383. long _SubId( const CBucket::ID id1, const CBucket::ID id2 ) const;
  384. long _CmpId( const CBucket::ID id1, const CBucket::ID id2 ) const;
  385. CInvasiveContext* _PicFromPentry( CEntry* const pentry ) const;
  386. BOOL _FExpandIdRange( const CBucket::ID idNew );
  387. ERR _ErrInsertBucket( CLock* const plock );
  388. ERR _ErrInsertEntry( CLock* const plock, CEntry* const pentry );
  389. ERR _ErrMoveNext( CLock* const plock );
  390. ERR _ErrMovePrev( CLock* const plock );
  391. private:
  392. // never updated
  393. long m_shfKeyPrecision;
  394. long m_shfKeyUncertainty;
  395. long m_shfBucketHash;
  396. long m_shfFillMSB;
  397. CBucket::ID m_maskBucketKey;
  398. CBucket::ID m_maskBucketPtr;
  399. CBucket::ID m_maskBucketID;
  400. long m_didRangeMost;
  401. //BYTE m_rgbReserved1[ 0 ];
  402. // seldom updated
  403. CCriticalSection m_critUpdateIdRange;
  404. long m_cidRange;
  405. CBucket::ID m_idRangeFirst;
  406. CBucket::ID m_idRangeLast;
  407. BYTE m_rgbReserved2[ 16 ];
  408. // commonly updated
  409. CBucketTable m_bt;
  410. //BYTE m_rgbReserved3[ 0 ];
  411. };
  412. // ctor
  413. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  414. inline CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  415. CApproximateIndex( const int Rank )
  416. : m_critUpdateIdRange( CLockBasicInfo( CSyncBasicInfo( "CApproximateIndex::m_critUpdateIdRange" ), Rank - 1, 0 ) ),
  417. m_bt( Rank )
  418. {
  419. }
  420. // dtor
  421. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  422. inline CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  423. ~CApproximateIndex()
  424. {
  425. }
  426. // initializes the approximate index using the given parameters. if the index
  427. // cannot be initialized, errOutOfMemory is returned
  428. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  429. inline CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  430. ErrInit( const CKey dkeyPrecision,
  431. const CKey dkeyUncertainty,
  432. const double dblSpeedSizeTradeoff )
  433. {
  434. // validate all parameters
  435. if ( dkeyPrecision <= dkeyUncertainty ||
  436. dkeyUncertainty < CKey( 0 ) ||
  437. dblSpeedSizeTradeoff < 0.0 || dblSpeedSizeTradeoff > 1.0 )
  438. {
  439. return errInvalidParameter;
  440. }
  441. // init our parameters
  442. const CBucket::ID cbucketHashMin = CBucket::ID( ( 1.0 - dblSpeedSizeTradeoff ) * OSSyncGetProcessorCount() );
  443. CKey maskKey;
  444. for ( m_shfKeyPrecision = 0, maskKey = 0;
  445. dkeyPrecision > CKey( 1 ) << m_shfKeyPrecision && m_shfKeyPrecision < sizeof( CKey ) * 8;
  446. maskKey |= CKey( 1 ) << m_shfKeyPrecision++ )
  447. {
  448. }
  449. for ( m_shfKeyUncertainty = 0;
  450. dkeyUncertainty > CKey( 1 ) << m_shfKeyUncertainty && m_shfKeyUncertainty < sizeof( CKey ) * 8;
  451. m_shfKeyUncertainty++ )
  452. {
  453. }
  454. for ( m_shfBucketHash = 0, m_maskBucketPtr = 0;
  455. cbucketHashMin > CBucket::ID( 1 ) << m_shfBucketHash && m_shfBucketHash < sizeof( CBucket::ID ) * 8;
  456. m_maskBucketPtr |= CBucket::ID( 1 ) << m_shfBucketHash++ )
  457. {
  458. }
  459. m_maskBucketKey = CBucket::ID( maskKey >> m_shfKeyUncertainty );
  460. m_shfFillMSB = sizeof( CBucket::ID ) * 8 - m_shfKeyPrecision + m_shfKeyUncertainty - m_shfBucketHash;
  461. m_shfFillMSB = max( m_shfFillMSB, 0 );
  462. m_maskBucketID = ( ~CBucket::ID( 0 ) ) >> m_shfFillMSB;
  463. // if our parameters leave us with too much or too little precision for
  464. // our bucket IDs, fail. "too much" precision would allow our bucket IDs
  465. // to span more than half the precision of our bucket ID and cause our
  466. // wrap-around-aware comparisons to fail. "too little" precision would
  467. // give us too few bucket IDs to allow us to hash efficiently
  468. //
  469. // NOTE: we check for hash efficiency in the worst case so that we don't
  470. // suddenly return errInvalidParameter on some new monster machine
  471. const CBucket::ID cbucketHashMax = CBucket::ID( 1.0 * OSSyncGetProcessorCountMax() );
  472. for ( long shfBucketHashMax = 0;
  473. cbucketHashMax > CBucket::ID( 1 ) << shfBucketHashMax && shfBucketHashMax < sizeof( CBucket::ID ) * 8;
  474. shfBucketHashMax++ )
  475. {
  476. }
  477. long shfFillMSBMin;
  478. shfFillMSBMin = sizeof( CBucket::ID ) * 8 - m_shfKeyPrecision + m_shfKeyUncertainty - shfBucketHashMax;
  479. shfFillMSBMin = max( shfFillMSBMin, 0 );
  480. if ( shfFillMSBMin < 0 ||
  481. shfFillMSBMin > sizeof( CBucket::ID ) * 8 - shfBucketHashMax )
  482. {
  483. return errInvalidParameter;
  484. }
  485. // limit the ID range to within half the precision of the bucket ID
  486. m_didRangeMost = m_maskBucketID >> 1;
  487. // init our bucket ID range to be empty
  488. m_cidRange = 0;
  489. m_idRangeFirst = 0;
  490. m_idRangeLast = 0;
  491. // initialize the bucket table
  492. if ( m_bt.ErrInit( 5.0, 1.0 ) != errSuccess )
  493. {
  494. Term();
  495. return errOutOfMemory;
  496. }
  497. return errSuccess;
  498. }
  499. // terminates the approximate index. this function can be called even if the
  500. // index has never been initialized or is only partially initialized
  501. //
  502. // NOTE: any data stored in the index at this time will be lost!
  503. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  504. inline void CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  505. Term()
  506. {
  507. // terminate the bucket table
  508. m_bt.Term();
  509. }
  510. // acquires a lock on the specified key and entry pointer and returns the lock
  511. // in the provided lock context
  512. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  513. inline void CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  514. LockKeyPtr( const CKey& key, CEntry* const pentry, CLock* const plock )
  515. {
  516. // compute the bucket ID for this key and entry pointer
  517. plock->m_bucket.m_id = _IdFromKeyPtr( key, pentry );
  518. // write lock this bucket ID in the bucket table
  519. m_bt.WriteLockKey( plock->m_bucket.m_id, &plock->m_lock );
  520. // fetch this bucket from the bucket table if it exists. if it doesn't
  521. // exist, the bucket will start out empty and have the above bucket ID
  522. plock->m_bucket.m_cPin = 0;
  523. plock->m_bucket.m_il.Empty();
  524. (void)m_bt.ErrRetrieveEntry( &plock->m_lock, &plock->m_bucket );
  525. // the entry is in this bucket
  526. if ( plock->m_bucket.m_il.FMember( pentry ) )
  527. {
  528. // set our currency to be on this entry in the bucket
  529. plock->m_pentryPrev = NULL;
  530. plock->m_pentry = pentry;
  531. plock->m_pentryNext = NULL;
  532. }
  533. // the entry is not in this bucket
  534. else
  535. {
  536. // set our currency to be before the first entry in this bucket
  537. plock->m_pentryPrev = NULL;
  538. plock->m_pentry = NULL;
  539. plock->m_pentryNext = plock->m_bucket.m_il.PrevMost();
  540. }
  541. // if this bucket isn't pinned, it had better be represented by the valid
  542. // bucket ID range of the index
  543. COLLAssert( !plock->m_bucket.m_cPin ||
  544. ( _CmpId( plock->m_bucket.m_id, m_idRangeFirst ) >= 0 &&
  545. _CmpId( plock->m_bucket.m_id, m_idRangeLast ) <= 0 ) );
  546. }
  547. // releases the lock in the specified lock context
  548. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  549. inline void CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  550. UnlockKeyPtr( CLock* const plock )
  551. {
  552. // if this bucket isn't pinned, it had better be represented by the valid
  553. // bucket ID range of the index
  554. COLLAssert( !plock->m_bucket.m_cPin ||
  555. ( _CmpId( plock->m_bucket.m_id, m_idRangeFirst ) >= 0 &&
  556. _CmpId( plock->m_bucket.m_id, m_idRangeLast ) <= 0 ) );
  557. // write unlock this bucket ID in the bucket table
  558. m_bt.WriteUnlockKey( &plock->m_lock );
  559. }
  560. // compares two keys as they would be seen relative to each other by the
  561. // approximate index
  562. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  563. inline long CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  564. CmpKey( const CKey& key1, const CKey& key2 ) const
  565. {
  566. return _CmpId( _IdFromKeyPtr( key1, NULL ), _IdFromKeyPtr( key2, NULL ) );
  567. }
  568. // returns the first key in the current key range. this key is guaranteed to
  569. // be at least as small as the key of any record currently in the index given
  570. // the precision and uncertainty of the index
  571. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  572. inline CKey CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  573. KeyRangeFirst() const
  574. {
  575. return CKey( m_idRangeFirst >> m_shfBucketHash ) << m_shfKeyUncertainty;
  576. }
  577. // returns the last key in the current key range. this key is guaranteed to
  578. // be at least as large as the key of any record currently in the index given
  579. // the precision and uncertainty of the index
  580. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  581. inline CKey CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  582. KeyRangeLast() const
  583. {
  584. return CKey( m_idRangeLast >> m_shfBucketHash ) << m_shfKeyUncertainty;
  585. }
  586. // returns the smallest key that could be successfully inserted into the index
  587. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  588. inline CKey CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  589. KeyInsertLeast() const
  590. {
  591. const CBucket::ID cBucketHash = 1 << m_shfBucketHash;
  592. CBucket::ID idFirstLeast = m_idRangeLast - m_didRangeMost;
  593. idFirstLeast = idFirstLeast + ( cBucketHash - idFirstLeast % cBucketHash ) % cBucketHash;
  594. return CKey( idFirstLeast >> m_shfBucketHash ) << m_shfKeyUncertainty;
  595. }
  596. // returns the largest key that could be successfully inserted into the index
  597. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  598. inline CKey CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  599. KeyInsertMost() const
  600. {
  601. const CBucket::ID cBucketHash = 1 << m_shfBucketHash;
  602. CBucket::ID idLastMost = m_idRangeFirst + m_didRangeMost;
  603. idLastMost = idLastMost - ( idLastMost + 1 ) % cBucketHash;
  604. return CKey( idLastMost >> m_shfBucketHash ) << m_shfKeyUncertainty;
  605. }
  606. // retrieves the entry corresponding to the key and entry pointer locked by the
  607. // specified lock context. if there is no entry for this key, errEntryNotFound
  608. // will be returned
  609. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  610. inline CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  611. ErrRetrieveEntry( CLock* const plock, CEntry** const ppentry ) const
  612. {
  613. // return the current entry. if the current entry is NULL, then there is
  614. // no current entry
  615. *ppentry = plock->m_pentry;
  616. return *ppentry ? errSuccess : errEntryNotFound;
  617. }
  618. // inserts a new entry corresponding to the key and entry pointer locked by the
  619. // specified lock context. fNextMost biases the position the entry will take
  620. // when inserted in the index. if the new entry cannot be inserted,
  621. // errOutOfMemory will be returned. if inserting the new entry will cause the
  622. // key space to become too large, errKeyRangeExceeded will be returned
  623. //
  624. // NOTE: it is illegal to attempt to insert an entry into the index that is
  625. // already in the index
  626. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  627. inline CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  628. ErrInsertEntry( CLock* const plock, CEntry* const pentry, const BOOL fNextMost )
  629. {
  630. CBucketTable::ERR err;
  631. // this entry had better not already be in the index
  632. COLLAssert( !plock->m_bucket.m_il.FMember( pentry ) );
  633. // pin the bucket on behalf of the entry to insert
  634. plock->m_bucket.m_cPin++;
  635. // insert this entry at the selected end of the current bucket
  636. if ( fNextMost )
  637. {
  638. plock->m_bucket.m_il.InsertAsNextMost( pentry );
  639. }
  640. else
  641. {
  642. plock->m_bucket.m_il.InsertAsPrevMost( pentry );
  643. }
  644. // try to update this bucket in the bucket table
  645. if ( ( err = m_bt.ErrReplaceEntry( &plock->m_lock, plock->m_bucket ) ) != CBucketTable::errSuccess )
  646. {
  647. COLLAssert( err == CBucketTable::errNoCurrentEntry );
  648. // the bucket does not yet exist, so try to insert it in the bucket table
  649. return _ErrInsertEntry( plock, pentry );
  650. }
  651. // we succeeded in updating the bucket
  652. else
  653. {
  654. // set the current entry to the newly inserted entry
  655. plock->m_pentryPrev = NULL;
  656. plock->m_pentry = pentry;
  657. plock->m_pentryNext = NULL;
  658. return errSuccess;
  659. }
  660. }
  661. // deletes the entry corresponding to the key and entry pointer locked by the
  662. // specified lock context. if there is no entry for this key, errNoCurrentEntry
  663. // will be returned
  664. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  665. inline CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  666. ErrDeleteEntry( CLock* const plock )
  667. {
  668. // there is a current entry
  669. if ( plock->m_pentry )
  670. {
  671. // save the current entry's prev and next pointers so that we can
  672. // recover our currency when it is deleted
  673. plock->m_pentryPrev = plock->m_bucket.m_il.Prev( plock->m_pentry );
  674. plock->m_pentryNext = plock->m_bucket.m_il.Next( plock->m_pentry );
  675. // delete the current entry from this bucket
  676. plock->m_bucket.m_il.Remove( plock->m_pentry );
  677. // unpin the bucket on behalf of this entry
  678. plock->m_bucket.m_cPin--;
  679. // update the bucket in the bucket table. it is OK if the bucket is
  680. // empty because empty buckets are deleted in _ErrMoveNext/_ErrMovePrev
  681. const CBucketTable::ERR err = m_bt.ErrReplaceEntry( &plock->m_lock, plock->m_bucket );
  682. COLLAssert( err == CBucketTable::errSuccess );
  683. // set our currency to no current entry
  684. plock->m_pentry = NULL;
  685. return errSuccess;
  686. }
  687. // there is no current entry
  688. else
  689. {
  690. // return no current entry
  691. return errNoCurrentEntry;
  692. }
  693. }
  694. // reserves room to insert a new entry corresponding to the key and entry
  695. // pointer locked by the specified lock context. if room for the new entry
  696. // cannot be reserved, errOutOfMemory will be returned. if reserving the new
  697. // entry will cause the key space to become too large, errKeyRangeExceeded
  698. // will be returned
  699. //
  700. // NOTE: once room is reserved, it must be unreserved via UnreserveEntry()
  701. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  702. inline CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  703. ErrReserveEntry( CLock* const plock )
  704. {
  705. // pin the locked bucket
  706. plock->m_bucket.m_cPin++;
  707. // we failed to update the pin count on the bucket in the index because the
  708. // bucket doesn't exist
  709. CBucketTable::ERR errBT;
  710. if ( ( errBT = m_bt.ErrReplaceEntry( &plock->m_lock, plock->m_bucket ) ) != CBucketTable::errSuccess )
  711. {
  712. COLLAssert( errBT == CBucketTable::errNoCurrentEntry );
  713. // insert this bucket in the bucket table
  714. ERR err;
  715. if ( ( err = _ErrInsertBucket( plock ) ) != errSuccess )
  716. {
  717. COLLAssert( err == errOutOfMemory || err == errKeyRangeExceeded );
  718. // we cannot insert the bucket so unpin the locked bucket and fail
  719. // the reservation
  720. plock->m_bucket.m_cPin--;
  721. return err;
  722. }
  723. }
  724. return errSuccess;
  725. }
  726. // removes a reservation made with ErrReserveEntry()
  727. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  728. inline void CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  729. UnreserveEntry( CLock* const plock )
  730. {
  731. // unpin the locked bucket
  732. plock->m_bucket.m_cPin--;
  733. // update the pin count on the bucket in the index. this cannot fail
  734. // because we know the bucket exists because it is pinned
  735. CBucketTable::ERR errBT = m_bt.ErrReplaceEntry( &plock->m_lock, plock->m_bucket );
  736. COLLAssert( errBT == CBucketTable::errSuccess );
  737. }
  738. // sets up the specified lock context in preparation for scanning all entries
  739. // in the index by ascending key value, give or take the key uncertainty
  740. //
  741. // NOTE: this function will acquire a lock that must eventually be released
  742. // via UnlockKeyPtr()
  743. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  744. inline void CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  745. MoveBeforeFirst( CLock* const plock )
  746. {
  747. // we will start scanning at the first bucket ID believed to be present in
  748. // the index (it could have been emptied by now)
  749. plock->m_bucket.m_id = m_idRangeFirst;
  750. // write lock this bucket ID in the bucket table
  751. m_bt.WriteLockKey( plock->m_bucket.m_id, &plock->m_lock );
  752. // fetch this bucket from the bucket table if it exists. if it doesn't
  753. // exist, the bucket will start out empty and have the above bucket ID
  754. plock->m_bucket.m_cPin = 0;
  755. plock->m_bucket.m_il.Empty();
  756. (void)m_bt.ErrRetrieveEntry( &plock->m_lock, &plock->m_bucket );
  757. // set our currency to be before the first entry in this bucket
  758. plock->m_pentryPrev = NULL;
  759. plock->m_pentry = NULL;
  760. plock->m_pentryNext = plock->m_bucket.m_il.PrevMost();
  761. }
  762. // moves the specified lock context to the next key and entry pointer in the
  763. // index by ascending key value, give or take the key uncertainty. if the end
  764. // of the index is reached, errNoCurrentEntry is returned
  765. //
  766. // NOTE: this function will acquire a lock that must eventually be released
  767. // via UnlockKeyPtr()
  768. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  769. inline CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  770. ErrMoveNext( CLock* const plock )
  771. {
  772. // move to the next entry in this bucket
  773. plock->m_pentryPrev = NULL;
  774. plock->m_pentry = plock->m_pentry ?
  775. plock->m_bucket.m_il.Next( plock->m_pentry ) :
  776. plock->m_pentryNext;
  777. plock->m_pentryNext = NULL;
  778. // we still have no current entry
  779. if ( !plock->m_pentry )
  780. {
  781. // possibly advance to the next bucket
  782. return _ErrMoveNext( plock );
  783. }
  784. // we now have a current entry
  785. else
  786. {
  787. // we're done
  788. return errSuccess;
  789. }
  790. }
  791. // moves the specified lock context to the next key and entry pointer in the
  792. // index by descending key value, give or take the key uncertainty. if the
  793. // start of the index is reached, errNoCurrentEntry is returned
  794. //
  795. // NOTE: this function will acquire a lock that must eventually be released
  796. // via UnlockKeyPtr()
  797. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  798. inline CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  799. ErrMovePrev( CLock* const plock )
  800. {
  801. // move to the prev entry in this bucket
  802. plock->m_pentryNext = NULL;
  803. plock->m_pentry = plock->m_pentry ?
  804. plock->m_bucket.m_il.Prev( plock->m_pentry ) :
  805. plock->m_pentryPrev;
  806. plock->m_pentryPrev = NULL;
  807. // we still have no current entry
  808. if ( !plock->m_pentry )
  809. {
  810. // possibly advance to the prev bucket
  811. return _ErrMovePrev( plock );
  812. }
  813. // we now have a current entry
  814. else
  815. {
  816. // we're done
  817. return errSuccess;
  818. }
  819. }
  820. // sets up the specified lock context in preparation for scanning all entries
  821. // in the index by descending key value, give or take the key uncertainty
  822. //
  823. // NOTE: this function will acquire a lock that must eventually be released
  824. // via UnlockKeyPtr()
  825. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  826. inline void CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  827. MoveAfterLast( CLock* const plock )
  828. {
  829. // we will start scanning at the last bucket ID believed to be present in
  830. // the index (it could have been emptied by now)
  831. plock->m_bucket.m_id = m_idRangeLast;
  832. // write lock this bucket ID in the bucket table
  833. m_bt.WriteLockKey( plock->m_bucket.m_id, &plock->m_lock );
  834. // fetch this bucket from the bucket table if it exists. if it doesn't
  835. // exist, the bucket will start out empty and have the above bucket ID
  836. plock->m_bucket.m_cPin = 0;
  837. plock->m_bucket.m_il.Empty();
  838. (void)m_bt.ErrRetrieveEntry( &plock->m_lock, &plock->m_bucket );
  839. // set our currency to be after the last entry in this bucket
  840. plock->m_pentryPrev = plock->m_bucket.m_il.NextMost();
  841. plock->m_pentry = NULL;
  842. plock->m_pentryNext = NULL;
  843. }
  844. // sets up the specified lock context in preparation for scanning all entries
  845. // greater than or approximately equal to the specified key and entry pointer
  846. // in the index by ascending key value, give or take the key uncertainty
  847. //
  848. // NOTE: this function will acquire a lock that must eventually be released
  849. // via UnlockKeyPtr()
  850. //
  851. // NOTE: even though this function may land between two valid entries in
  852. // the index, the currency will not be on one of those entries until
  853. // ErrMoveNext() or ErrMovePrev() has been called
  854. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  855. inline void CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  856. MoveBeforeKeyPtr( const CKey& key, CEntry* const pentry, CLock* const plock )
  857. {
  858. // we will start scanning at the bucket ID formed from the given key and
  859. // entry pointer
  860. plock->m_bucket.m_id = _IdFromKeyPtr( key, pentry );
  861. // write lock this bucket ID in the bucket table
  862. m_bt.WriteLockKey( plock->m_bucket.m_id, &plock->m_lock );
  863. // fetch this bucket from the bucket table if it exists. if it doesn't
  864. // exist, the bucket will start out empty and have the above bucket ID
  865. plock->m_bucket.m_cPin = 0;
  866. plock->m_bucket.m_il.Empty();
  867. (void)m_bt.ErrRetrieveEntry( &plock->m_lock, &plock->m_bucket );
  868. // set our currency to be before the first entry in this bucket
  869. plock->m_pentryPrev = NULL;
  870. plock->m_pentry = NULL;
  871. plock->m_pentryNext = plock->m_bucket.m_il.PrevMost();
  872. }
  873. // sets up the specified lock context in preparation for scanning all entries
  874. // less than or approximately equal to the specified key and entry pointer
  875. // in the index by descending key value, give or take the key uncertainty
  876. //
  877. // NOTE: this function will acquire a lock that must eventually be released
  878. // via UnlockKeyPtr()
  879. //
  880. // NOTE: even though this function may land between two valid entries in
  881. // the index, the currency will not be on one of those entries until
  882. // ErrMoveNext() or ErrMovePrev() has been called
  883. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  884. inline void CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  885. MoveAfterKeyPtr( const CKey& key, CEntry* const pentry, CLock* const plock )
  886. {
  887. // we will start scanning at the bucket ID formed from the given key and
  888. // entry pointer
  889. plock->m_bucket.m_id = _IdFromKeyPtr( key, pentry );
  890. // write lock this bucket ID in the bucket table
  891. m_bt.WriteLockKey( plock->m_bucket.m_id, &plock->m_lock );
  892. // fetch this bucket from the bucket table if it exists. if it doesn't
  893. // exist, the bucket will start out empty and have the above bucket ID
  894. plock->m_bucket.m_cPin = 0;
  895. plock->m_bucket.m_il.Empty();
  896. (void)m_bt.ErrRetrieveEntry( &plock->m_lock, &plock->m_bucket );
  897. // set our currency to be after the last entry in this bucket
  898. plock->m_pentryPrev = plock->m_bucket.m_il.NextMost();
  899. plock->m_pentry = NULL;
  900. plock->m_pentryNext = NULL;
  901. }
  902. // transforms the given key and entry pointer into a bucket ID
  903. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  904. inline CApproximateIndex< CKey, CEntry, OffsetOfIC >::CBucket::ID CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  905. _IdFromKeyPtr( const CKey& key, CEntry* const pentry ) const
  906. {
  907. // we compute the bucket ID such that each uncertainty range is split into
  908. // several buckets, each of which are indexed by the pointer. we do this
  909. // to provide maximum concurrency while accessing any particular range of
  910. // keys. the reason we use the pointer in the calculation is that we want
  911. // to minimize the number of times the user has to update the position of
  912. // an entry due to a key change yet we need some property of the entry
  913. // over which we can reproducibly hash
  914. const CBucket::ID iBucketKey = CBucket::ID( key >> m_shfKeyUncertainty );
  915. const CBucket::ID iBucketPtr = CBucket::ID( DWORD_PTR( pentry ) / sizeof( CEntry ) );
  916. return ( ( iBucketKey & m_maskBucketKey ) << m_shfBucketHash ) + ( iBucketPtr & m_maskBucketPtr );
  917. }
  918. // performs a wrap-around insensitive delta of a bucket ID by an offset
  919. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  920. inline CApproximateIndex< CKey, CEntry, OffsetOfIC >::CBucket::ID CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  921. _DeltaId( const CBucket::ID id, const long did ) const
  922. {
  923. return ( id + CBucket::ID( did ) ) & m_maskBucketID;
  924. }
  925. // performs a wrap-around insensitive subtraction of two bucket IDs
  926. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  927. inline long CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  928. _SubId( const CBucket::ID id1, const CBucket::ID id2 ) const
  929. {
  930. // munge bucket IDs to fill the Most Significant Bit of a long so that we
  931. // can make a wrap-around aware subtraction
  932. const long lid1 = id1 << m_shfFillMSB;
  933. const long lid2 = id2 << m_shfFillMSB;
  934. // munge the result back into the same scale as the bucket IDs
  935. return CBucket::ID( ( lid1 - lid2 ) >> m_shfFillMSB );
  936. }
  937. // performs a wrap-around insensitive comparison of two bucket IDs
  938. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  939. inline long CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  940. _CmpId( const CBucket::ID id1, const CBucket::ID id2 ) const
  941. {
  942. // munge bucket IDs to fill the Most Significant Bit of a long so that we
  943. // can make a wrap-around aware comparison
  944. const long lid1 = id1 << m_shfFillMSB;
  945. const long lid2 = id2 << m_shfFillMSB;
  946. return lid1 - lid2;
  947. }
  948. // converts a pointer to an entry to a pointer to the invasive context
  949. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  950. inline CApproximateIndex< CKey, CEntry, OffsetOfIC >::CInvasiveContext* CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  951. _PicFromPentry( CEntry* const pentry ) const
  952. {
  953. return (CInvasiveContext*)( (BYTE*)pentry + OffsetOfIC() );
  954. }
  955. // tries to expand the bucket ID range by adding the new bucket ID. if this
  956. // cannot be done without violating the range constraints, fFalse will be
  957. // returned
  958. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  959. inline BOOL CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  960. _FExpandIdRange( const CBucket::ID idNew )
  961. {
  962. // fetch the current ID range
  963. const long cidRange = m_cidRange;
  964. const CBucket::ID idFirst = m_idRangeFirst;
  965. const CBucket::ID idLast = m_idRangeLast;
  966. const long didRange = _SubId( idLast, idFirst );
  967. COLLAssert( didRange >= 0 );
  968. COLLAssert( didRange <= m_didRangeMost );
  969. COLLAssert( cidRange >= 0 );
  970. COLLAssert( cidRange <= m_didRangeMost + 1 );
  971. // if there are no entries in the ID range then simply set the ID range to
  972. // exactly contain this new bucket ID
  973. if ( !cidRange )
  974. {
  975. m_cidRange = 1;
  976. m_idRangeFirst = idNew;
  977. m_idRangeLast = idNew;
  978. return fTrue;
  979. }
  980. // compute the valid range for the new first ID and new last ID. these
  981. // points and the above points form four ranges in a circular number
  982. // line containing all possible bucket IDs:
  983. //
  984. // ( idFirstMic, idFirst ) Possible extension of the ID range
  985. // [ idFirst, idLast ] The current ID range
  986. // ( idLast, idLastMax ) Possible extension of the ID range
  987. // [ idLastMax, idFirstMic ] Cannot be part of the ID range
  988. //
  989. // these ranges will never overlap due to the restriction that the
  990. // ID range cannot meet or exceed half the number of bucket IDs
  991. //
  992. // NOTE: due to a quirk in 2's complement arithmetic where the 2's
  993. // complement negative of the smallest negative number is itself, the
  994. // inclusive range tests fail when idFirst == idLast and idNew ==
  995. // idFirstMic == idLastMax or when idFirstMic == idLastMax and idnew ==
  996. // idFirst == idLast. we have added special logic to handle these
  997. // cases correctly
  998. const CBucket::ID idFirstMic = _DeltaId( idFirst, -( m_didRangeMost - didRange + 1 ) );
  999. const CBucket::ID idLastMax = _DeltaId( idLast, m_didRangeMost - didRange + 1 );
  1000. // if the new bucket ID is already part of this ID range, no change
  1001. // is needed
  1002. if ( _CmpId( idFirstMic, idNew ) != 0 && _CmpId( idLastMax, idNew ) != 0 &&
  1003. _CmpId( idFirst, idNew ) <= 0 && _CmpId( idNew, idLast ) <= 0 )
  1004. {
  1005. m_cidRange = cidRange + 1;
  1006. return fTrue;
  1007. }
  1008. // if the new bucket ID cannot be a part of this ID range, fail the
  1009. // expansion
  1010. if ( _CmpId( idFirst, idNew ) != 0 && _CmpId( idLast, idNew ) != 0 &&
  1011. _CmpId( idLastMax, idNew ) <= 0 && _CmpId( idNew, idFirstMic ) <= 0 )
  1012. {
  1013. return fFalse;
  1014. }
  1015. // compute the new ID range including this new bucket ID
  1016. CBucket::ID idFirstNew = idFirst;
  1017. CBucket::ID idLastNew = idLast;
  1018. if ( _CmpId( idFirstMic, idNew ) < 0 && _CmpId( idNew, idFirst ) < 0 )
  1019. {
  1020. idFirstNew = idNew;
  1021. }
  1022. else
  1023. {
  1024. COLLAssert( _CmpId( idLast, idNew ) < 0 && _CmpId( idNew, idLastMax ) < 0 );
  1025. idLastNew = idNew;
  1026. }
  1027. // the new ID range should be larger than the old ID range and should
  1028. // include the new bucket ID
  1029. COLLAssert( _CmpId( idFirstNew, idFirst ) <= 0 );
  1030. COLLAssert( _CmpId( idLast, idLastNew ) <= 0 );
  1031. COLLAssert( _SubId( idLastNew, idFirstNew ) > 0 );
  1032. COLLAssert( _SubId( idLastNew, idFirstNew ) <= m_didRangeMost );
  1033. COLLAssert( _CmpId( idFirstNew, idNew ) <= 0 );
  1034. COLLAssert( _CmpId( idNew, idLastNew ) <= 0 );
  1035. // update the key range to include the new bucket ID
  1036. m_cidRange = cidRange + 1;
  1037. m_idRangeFirst = idFirstNew;
  1038. m_idRangeLast = idLastNew;
  1039. return fTrue;
  1040. }
  1041. // inserts a new bucket in the bucket table
  1042. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  1043. inline CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  1044. _ErrInsertBucket( CLock* const plock )
  1045. {
  1046. // try to update the bucket ID range and subrange of the index to include
  1047. // this new bucket ID
  1048. m_critUpdateIdRange.Enter();
  1049. const BOOL fRangeUpdated = _FExpandIdRange( plock->m_bucket.m_id );
  1050. m_critUpdateIdRange.Leave();
  1051. // if the update failed, fail the bucket insertion
  1052. if ( !fRangeUpdated )
  1053. {
  1054. return errKeyRangeExceeded;
  1055. }
  1056. // the bucket does not yet exist, so try to insert it in the bucket table
  1057. CBucketTable::ERR err;
  1058. if ( ( err = m_bt.ErrInsertEntry( &plock->m_lock, plock->m_bucket ) ) != CBucketTable::errSuccess )
  1059. {
  1060. COLLAssert( err == CBucketTable::errOutOfMemory );
  1061. // we cannot do the insert so fail
  1062. m_critUpdateIdRange.Enter();
  1063. m_cidRange--;
  1064. m_critUpdateIdRange.Leave();
  1065. return errOutOfMemory;
  1066. }
  1067. return errSuccess;
  1068. }
  1069. // performs an entry insertion that must insert a new bucket in the bucket table
  1070. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  1071. inline CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  1072. _ErrInsertEntry( CLock* const plock, CEntry* const pentry )
  1073. {
  1074. ERR err;
  1075. // insert this bucket in the bucket table
  1076. if ( ( err = _ErrInsertBucket( plock ) ) != errSuccess )
  1077. {
  1078. COLLAssert( err == errOutOfMemory || err == errKeyRangeExceeded );
  1079. // we cannot insert the bucket so undo the list insertion and fail
  1080. plock->m_bucket.m_il.Remove( pentry );
  1081. plock->m_bucket.m_cPin--;
  1082. return err;
  1083. }
  1084. // set the current entry to the newly inserted entry
  1085. plock->m_pentryPrev = NULL;
  1086. plock->m_pentry = pentry;
  1087. plock->m_pentryNext = NULL;
  1088. return errSuccess;
  1089. }
  1090. // performs a move next that possibly goes to the next bucket. we won't go to
  1091. // the next bucket if we are already at the last bucket ID
  1092. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  1093. inline CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  1094. _ErrMoveNext( CLock* const plock )
  1095. {
  1096. // set our currency to be after the last entry in this bucket
  1097. plock->m_pentryPrev = plock->m_bucket.m_il.NextMost();
  1098. plock->m_pentry = NULL;
  1099. plock->m_pentryNext = NULL;
  1100. // scan forward until we have a current entry or we are at or beyond the
  1101. // last bucket ID
  1102. while ( !plock->m_pentry && _CmpId( plock->m_bucket.m_id, m_idRangeLast ) < 0 )
  1103. {
  1104. // we are currently at the first bucket ID and that bucket isn't pinned
  1105. if ( !plock->m_bucket.m_cPin )
  1106. {
  1107. // delete this empty bucket (if it exists)
  1108. const CBucketTable::ERR err = m_bt.ErrDeleteEntry( &plock->m_lock );
  1109. COLLAssert( err == CBucketTable::errSuccess ||
  1110. err == CBucketTable::errNoCurrentEntry );
  1111. // advance the first bucket ID by one so that subsequent searches
  1112. // do not scan through this empty bucket unnecessarily
  1113. m_critUpdateIdRange.Enter();
  1114. if ( m_idRangeFirst == plock->m_bucket.m_id )
  1115. {
  1116. m_idRangeFirst = _DeltaId( m_idRangeFirst, 1 );
  1117. }
  1118. if ( err == CBucketTable::errSuccess )
  1119. {
  1120. m_cidRange--;
  1121. }
  1122. m_critUpdateIdRange.Leave();
  1123. }
  1124. // unlock the current bucket ID in the bucket table
  1125. m_bt.WriteUnlockKey( &plock->m_lock );
  1126. // this bucket ID may not be in the valid bucket ID range
  1127. if ( _CmpId( m_idRangeFirst, plock->m_bucket.m_id ) > 0 ||
  1128. _CmpId( plock->m_bucket.m_id, m_idRangeLast ) > 0 )
  1129. {
  1130. // we can get the critical section protecting the bucket ID range
  1131. if ( m_critUpdateIdRange.FTryEnter() )
  1132. {
  1133. // this bucket ID is not in the valid bucket ID range
  1134. if ( _CmpId( m_idRangeFirst, plock->m_bucket.m_id ) > 0 ||
  1135. _CmpId( plock->m_bucket.m_id, m_idRangeLast ) > 0 )
  1136. {
  1137. // go to the first valid bucket ID
  1138. plock->m_bucket.m_id = m_idRangeFirst;
  1139. }
  1140. // this bucket ID is in the valid bucket ID range
  1141. else
  1142. {
  1143. // advance to the next bucket ID
  1144. plock->m_bucket.m_id = _DeltaId( plock->m_bucket.m_id, 1 );
  1145. }
  1146. m_critUpdateIdRange.Leave();
  1147. }
  1148. // we cannot get the critical section protecting the bucket ID range
  1149. else
  1150. {
  1151. // advance to the next bucket ID
  1152. plock->m_bucket.m_id = _DeltaId( plock->m_bucket.m_id, 1 );
  1153. }
  1154. }
  1155. // this bucket may be in the valid bucket ID range
  1156. else
  1157. {
  1158. // advance to the next bucket ID
  1159. plock->m_bucket.m_id = _DeltaId( plock->m_bucket.m_id, 1 );
  1160. }
  1161. // write lock this bucket ID in the bucket table
  1162. m_bt.WriteLockKey( plock->m_bucket.m_id, &plock->m_lock );
  1163. // fetch this bucket from the bucket table if it exists. if it doesn't
  1164. // exist, the bucket will start out empty and have the above bucket ID
  1165. plock->m_bucket.m_cPin = 0;
  1166. plock->m_bucket.m_il.Empty();
  1167. (void)m_bt.ErrRetrieveEntry( &plock->m_lock, &plock->m_bucket );
  1168. // set our currency to be the first entry in this bucket
  1169. plock->m_pentryPrev = NULL;
  1170. plock->m_pentry = plock->m_bucket.m_il.PrevMost();
  1171. plock->m_pentryNext = NULL;
  1172. }
  1173. // return the status of our currency
  1174. return plock->m_pentry ? errSuccess : errNoCurrentEntry;
  1175. }
  1176. // performs a move prev that goes possibly to the prev bucket. we won't go to
  1177. // the prev bucket if we are already at the first bucket ID
  1178. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  1179. inline CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  1180. _ErrMovePrev( CLock* const plock )
  1181. {
  1182. // set our currency to be before the first entry in this bucket
  1183. plock->m_pentryPrev = NULL;
  1184. plock->m_pentry = NULL;
  1185. plock->m_pentryNext = plock->m_bucket.m_il.PrevMost();
  1186. // scan backward until we have a current entry or we are at or before the
  1187. // first bucket ID
  1188. while ( !plock->m_pentry && _CmpId( m_idRangeFirst, plock->m_bucket.m_id ) < 0 )
  1189. {
  1190. // we are currently at the last bucket ID and that bucket isn't pinned
  1191. if ( !plock->m_bucket.m_cPin )
  1192. {
  1193. // delete this empty bucket (if it exists)
  1194. const CBucketTable::ERR err = m_bt.ErrDeleteEntry( &plock->m_lock );
  1195. COLLAssert( err == CBucketTable::errSuccess ||
  1196. err == CBucketTable::errNoCurrentEntry );
  1197. // retreat the last bucket ID by one so that subsequent searches
  1198. // do not scan through this empty bucket unnecessarily
  1199. m_critUpdateIdRange.Enter();
  1200. if ( m_idRangeLast == plock->m_bucket.m_id )
  1201. {
  1202. m_idRangeLast = _DeltaId( m_idRangeLast, -1 );
  1203. }
  1204. if ( err == CBucketTable::errSuccess )
  1205. {
  1206. m_cidRange--;
  1207. }
  1208. m_critUpdateIdRange.Leave();
  1209. }
  1210. // unlock the current bucket ID in the bucket table
  1211. m_bt.WriteUnlockKey( &plock->m_lock );
  1212. // this bucket ID may not be in the valid bucket ID range
  1213. if ( _CmpId( m_idRangeFirst, plock->m_bucket.m_id ) > 0 ||
  1214. _CmpId( plock->m_bucket.m_id, m_idRangeLast ) > 0 )
  1215. {
  1216. // we can get the critical section protecting the bucket ID range
  1217. if ( m_critUpdateIdRange.FTryEnter() )
  1218. {
  1219. // this bucket ID is not in the valid bucket ID range
  1220. if ( _CmpId( m_idRangeFirst, plock->m_bucket.m_id ) > 0 ||
  1221. _CmpId( plock->m_bucket.m_id, m_idRangeLast ) > 0 )
  1222. {
  1223. // go to the last valid bucket ID
  1224. plock->m_bucket.m_id = m_idRangeLast;
  1225. }
  1226. // this bucket ID is in the valid bucket ID range
  1227. else
  1228. {
  1229. // retreat to the previous bucket ID
  1230. plock->m_bucket.m_id = _DeltaId( plock->m_bucket.m_id, -1 );
  1231. }
  1232. m_critUpdateIdRange.Leave();
  1233. }
  1234. // we cannot get the critical section protecting the bucket ID range
  1235. else
  1236. {
  1237. // retreat to the previous bucket ID
  1238. plock->m_bucket.m_id = _DeltaId( plock->m_bucket.m_id, -1 );
  1239. }
  1240. }
  1241. // this bucket may be in the valid bucket ID range
  1242. else
  1243. {
  1244. // retreat to the previous bucket ID
  1245. plock->m_bucket.m_id = _DeltaId( plock->m_bucket.m_id, -1 );
  1246. }
  1247. // write lock this bucket ID in the bucket table
  1248. m_bt.WriteLockKey( plock->m_bucket.m_id, &plock->m_lock );
  1249. // fetch this bucket from the bucket table if it exists. if it doesn't
  1250. // exist, the bucket will start out empty and have the above bucket ID
  1251. plock->m_bucket.m_cPin = 0;
  1252. plock->m_bucket.m_il.Empty();
  1253. (void)m_bt.ErrRetrieveEntry( &plock->m_lock, &plock->m_bucket );
  1254. // set our currency to be the last entry in this bucket
  1255. plock->m_pentryPrev = NULL;
  1256. plock->m_pentry = plock->m_bucket.m_il.NextMost();
  1257. plock->m_pentryNext = NULL;
  1258. }
  1259. // return the status of our currency
  1260. return plock->m_pentry ? errSuccess : errNoCurrentEntry;
  1261. }
  1262. #define DECLARE_APPROXIMATE_INDEX( CKey, CEntry, OffsetOfIC, Typedef ) \
  1263. \
  1264. typedef CApproximateIndex< CKey, CEntry, OffsetOfIC > Typedef; \
  1265. \
  1266. inline ULONG_PTR Typedef::CBucketTable::CKeyEntry:: \
  1267. Hash( const CBucket::ID& id ) \
  1268. { \
  1269. return id; \
  1270. } \
  1271. \
  1272. inline ULONG_PTR Typedef::CBucketTable::CKeyEntry:: \
  1273. Hash() const \
  1274. { \
  1275. return m_entry.m_id; \
  1276. } \
  1277. \
  1278. inline BOOL Typedef::CBucketTable::CKeyEntry:: \
  1279. FEntryMatchesKey( const CBucket::ID& id ) const \
  1280. { \
  1281. return m_entry.m_id == id; \
  1282. } \
  1283. \
  1284. inline void Typedef::CBucketTable::CKeyEntry:: \
  1285. SetEntry( const CBucket& bucket ) \
  1286. { \
  1287. m_entry = bucket; \
  1288. } \
  1289. \
  1290. inline void Typedef::CBucketTable::CKeyEntry:: \
  1291. GetEntry( CBucket* const pbucket ) const \
  1292. { \
  1293. *pbucket = m_entry; \
  1294. }
  1295. //////////////////////////////////////////////////////////////////////////////////////////
  1296. // CPool
  1297. //
  1298. // Implements a pool of objects that can be inserted and deleted quickly in arbitrary
  1299. // order.
  1300. //
  1301. // CObject = class representing objects in the pool. each class must contain
  1302. // storage for a CInvasiveContext for embedded pool state
  1303. // OffsetOfIC = inline function returning the offset of the CInvasiveContext
  1304. // contained in the CObject
  1305. template< class CObject, PfnOffsetOf OffsetOfIC >
  1306. class CPool
  1307. {
  1308. public:
  1309. // class containing context needed per CObject
  1310. class CInvasiveContext
  1311. {
  1312. public:
  1313. CInvasiveContext() {}
  1314. ~CInvasiveContext() {}
  1315. static SIZE_T OffsetOfILE() { return OffsetOfIC() + OffsetOf( CInvasiveContext, m_ile ); }
  1316. private:
  1317. CInvasiveList< CObject, OffsetOfILE >::CElement m_ile;
  1318. };
  1319. // API Error Codes
  1320. enum ERR
  1321. {
  1322. errSuccess,
  1323. errInvalidParameter,
  1324. errOutOfMemory,
  1325. errObjectNotFound,
  1326. errOutOfObjects,
  1327. errNoCurrentObject,
  1328. };
  1329. // API Lock Context
  1330. class CLock;
  1331. public:
  1332. // ctor / dtor
  1333. CPool();
  1334. ~CPool();
  1335. // API
  1336. ERR ErrInit( const double dblSpeedSizeTradeoff );
  1337. void Term();
  1338. void Insert( CObject* const pobj );
  1339. ERR ErrRemove( CObject** const ppobj, const BOOL fWait = fTrue );
  1340. void BeginPoolScan( CLock* const plock );
  1341. ERR ErrGetNextObject( CLock* const plock, CObject** const ppobj );
  1342. ERR ErrRemoveCurrentObject( CLock* const plock );
  1343. void EndPoolScan( CLock* const plock );
  1344. DWORD Cobject();
  1345. DWORD CWaiter();
  1346. DWORD CRemove();
  1347. DWORD CRemoveWait();
  1348. private:
  1349. // bucket used for containing objects in the pool
  1350. class CBucket
  1351. {
  1352. public:
  1353. CBucket() : m_crit( CLockBasicInfo( CSyncBasicInfo( "CPool::CBucket::m_crit" ), 0, 0 ) ) {}
  1354. ~CBucket() {}
  1355. public:
  1356. CCriticalSection m_crit;
  1357. CInvasiveList< CObject, CInvasiveContext::OffsetOfILE > m_il;
  1358. BYTE m_rgbReserved[20];
  1359. };
  1360. public:
  1361. // API Lock Context
  1362. class CLock
  1363. {
  1364. public:
  1365. CLock() {}
  1366. ~CLock() {}
  1367. private:
  1368. friend class CPool< CObject, OffsetOfIC >;
  1369. CBucket* m_pbucket;
  1370. CObject* m_pobj;
  1371. CObject* m_pobjNext;
  1372. };
  1373. private:
  1374. void _GetNextObject( CLock* const plock );
  1375. static void* _PvMEMIAlign( void* const pv, const size_t cbAlign );
  1376. static void* _PvMEMIUnalign( void* const pv );
  1377. static void* _PvMEMAlloc( const size_t cbSize, const size_t cbAlign = 1 );
  1378. static void _MEMFree( void* const pv );
  1379. private:
  1380. // never updated
  1381. DWORD m_cbucket;
  1382. CBucket* m_rgbucket;
  1383. BYTE m_rgbReserved1[24];
  1384. // commonly updated
  1385. CSemaphore m_semObjectCount;
  1386. DWORD m_cRemove;
  1387. DWORD m_cRemoveWait;
  1388. BYTE m_rgbReserved2[20];
  1389. };
  1390. // ctor
  1391. template< class CObject, PfnOffsetOf OffsetOfIC >
  1392. inline CPool< CObject, OffsetOfIC >::
  1393. CPool()
  1394. : m_semObjectCount( CSyncBasicInfo( "CPool::m_semObjectCount" ) )
  1395. {
  1396. }
  1397. // dtor
  1398. template< class CObject, PfnOffsetOf OffsetOfIC >
  1399. inline CPool< CObject, OffsetOfIC >::
  1400. ~CPool()
  1401. {
  1402. // nop
  1403. }
  1404. // initializes the pool using the given parameters. if the pool cannot be
  1405. // initialized, errOutOfMemory is returned
  1406. template< class CObject, PfnOffsetOf OffsetOfIC >
  1407. inline CPool< CObject, OffsetOfIC >::ERR CPool< CObject, OffsetOfIC >::
  1408. ErrInit( const double dblSpeedSizeTradeoff )
  1409. {
  1410. // validate all parameters
  1411. if ( dblSpeedSizeTradeoff < 0.0 || dblSpeedSizeTradeoff > 1.0 )
  1412. {
  1413. return errInvalidParameter;
  1414. }
  1415. // allocate our bucket array, one per CPU, on a cache-line boundary
  1416. m_cbucket = OSSyncGetProcessorCount();
  1417. const SIZE_T cbrgbucket = sizeof( CBucket ) * m_cbucket;
  1418. if ( !( m_rgbucket = (CBucket*)_PvMEMAlloc( cbrgbucket, cbCacheLine ) ) )
  1419. {
  1420. return errOutOfMemory;
  1421. }
  1422. // setup our bucket array
  1423. for ( DWORD ibucket = 0; ibucket < m_cbucket; ibucket++ )
  1424. {
  1425. new( m_rgbucket + ibucket ) CBucket;
  1426. }
  1427. // init out stats
  1428. m_cRemove = 0;
  1429. m_cRemoveWait = 0;
  1430. return errSuccess;
  1431. }
  1432. // terminates the pool. this function can be called even if the pool has never
  1433. // been initialized or is only partially initialized
  1434. //
  1435. // NOTE: any data stored in the pool at this time will be lost!
  1436. template< class CObject, PfnOffsetOf OffsetOfIC >
  1437. inline void CPool< CObject, OffsetOfIC >::
  1438. Term()
  1439. {
  1440. // free our bucket array
  1441. if ( m_rgbucket )
  1442. {
  1443. for ( DWORD ibucket = 0; ibucket < m_cbucket; ibucket++ )
  1444. {
  1445. m_rgbucket[ ibucket ].~CBucket();
  1446. }
  1447. _MEMFree( m_rgbucket );
  1448. m_rgbucket = NULL;
  1449. }
  1450. // remove any free counts on our semaphore
  1451. while ( m_semObjectCount.FTryAcquire() )
  1452. {
  1453. }
  1454. }
  1455. // inserts the given object into the pool
  1456. template< class CObject, PfnOffsetOf OffsetOfIC >
  1457. inline void CPool< CObject, OffsetOfIC >::
  1458. Insert( CObject* const pobj )
  1459. {
  1460. // add the given object to the bucket for this CPU. we use one bucket per
  1461. // CPU to reduce cache sloshing. if we cannot lock the bucket for this CPU,
  1462. // we will try another bucket instead of blocking
  1463. DWORD ibucketBase;
  1464. DWORD ibucket;
  1465. ibucketBase = OSSyncGetCurrentProcessor();
  1466. ibucket = 0;
  1467. do {
  1468. CBucket* const pbucket = m_rgbucket + ( ibucketBase + ibucket++ ) % m_cbucket;
  1469. if ( ibucket < m_cbucket )
  1470. {
  1471. if ( !pbucket->m_crit.FTryEnter() )
  1472. {
  1473. continue;
  1474. }
  1475. }
  1476. else
  1477. {
  1478. pbucket->m_crit.Enter();
  1479. }
  1480. pbucket->m_il.InsertAsNextMost( pobj );
  1481. pbucket->m_crit.Leave();
  1482. break;
  1483. }
  1484. while ( fTrue );
  1485. // increment the object count
  1486. m_semObjectCount.Release();
  1487. }
  1488. // removes an object from the pool, optionally waiting until an object can be
  1489. // removed. if an object can be removed, errSuccess is returned. if an
  1490. // object cannot be immediately removed and waiting is not desired,
  1491. // errOutOfObjects will be returned
  1492. template< class CObject, PfnOffsetOf OffsetOfIC >
  1493. inline CPool< CObject, OffsetOfIC >::ERR CPool< CObject, OffsetOfIC >::
  1494. ErrRemove( CObject** const ppobj, const BOOL fWait )
  1495. {
  1496. // reserve an object for removal from the pool by acquiring a count on the
  1497. // object count semaphore. if we get a count, we are allowed to remove an
  1498. // object from the pool. acquire a count in the requested mode, i.e. wait
  1499. // or do not wait for a count
  1500. if ( !m_semObjectCount.FTryAcquire() )
  1501. {
  1502. if ( !fWait )
  1503. {
  1504. return errOutOfObjects;
  1505. }
  1506. else
  1507. {
  1508. m_cRemoveWait++;
  1509. m_semObjectCount.FAcquire( cmsecInfinite );
  1510. }
  1511. }
  1512. // we are now entitled to an object from the pool, so scan all buckets for
  1513. // an object to remove until we find one. start with the bucket for the
  1514. // current CPU to reduce cache sloshing
  1515. DWORD ibucketBase;
  1516. DWORD ibucket;
  1517. ibucketBase = OSSyncGetCurrentProcessor();
  1518. ibucket = 0;
  1519. *ppobj = NULL;
  1520. do {
  1521. CBucket* const pbucket = m_rgbucket + ( ibucketBase + ibucket++ ) % m_cbucket;
  1522. if ( pbucket->m_il.FEmpty() )
  1523. {
  1524. continue;
  1525. }
  1526. if ( ibucket < m_cbucket )
  1527. {
  1528. if ( !pbucket->m_crit.FTryEnter() )
  1529. {
  1530. continue;
  1531. }
  1532. }
  1533. else
  1534. {
  1535. pbucket->m_crit.Enter();
  1536. }
  1537. if ( !pbucket->m_il.FEmpty() )
  1538. {
  1539. *ppobj = pbucket->m_il.PrevMost();
  1540. pbucket->m_il.Remove( *ppobj );
  1541. }
  1542. pbucket->m_crit.Leave();
  1543. }
  1544. while ( *ppobj == NULL );
  1545. // return the object
  1546. m_cRemove++;
  1547. return errSuccess;
  1548. }
  1549. // sets up the specified lock context in preparation for scanning all objects
  1550. // in the pool
  1551. //
  1552. // NOTE: this function will acquire a lock that must eventually be released
  1553. // via EndPoolScan()
  1554. template< class CObject, PfnOffsetOf OffsetOfIC >
  1555. inline void CPool< CObject, OffsetOfIC >::
  1556. BeginPoolScan( CLock* const plock )
  1557. {
  1558. // we will start in the first bucket
  1559. plock->m_pbucket = m_rgbucket;
  1560. // lock this bucket
  1561. plock->m_pbucket->m_crit.Enter();
  1562. // set out currency to be before the first object in this bucket
  1563. plock->m_pobj = NULL;
  1564. plock->m_pobjNext = plock->m_pbucket->m_il.PrevMost();
  1565. }
  1566. // retrieves the next object in the pool locked by the specified lock context.
  1567. // if there are no more objects to be scanned, errNoCurrentObject is returned
  1568. template< class CObject, PfnOffsetOf OffsetOfIC >
  1569. inline CPool< CObject, OffsetOfIC >::ERR CPool< CObject, OffsetOfIC >::
  1570. ErrGetNextObject( CLock* const plock, CObject** const ppobj )
  1571. {
  1572. // move to the next object in this bucket
  1573. plock->m_pobj = plock->m_pobj ?
  1574. plock->m_pbucket->m_il.Next( plock->m_pobj ) :
  1575. plock->m_pobjNext;
  1576. plock->m_pobjNext = NULL;
  1577. // we still have no current object
  1578. if ( !plock->m_pobj )
  1579. {
  1580. // possibly advance to the next bucket
  1581. _GetNextObject( plock );
  1582. }
  1583. // return the current object, if any
  1584. *ppobj = plock->m_pobj;
  1585. return plock->m_pobj ? errSuccess : errNoCurrentObject;
  1586. }
  1587. // removes the current object in the pool locaked by the specified lock context
  1588. // from the pool. if there is no current object, errNoCurrentObject will be
  1589. // returned
  1590. template< class CObject, PfnOffsetOf OffsetOfIC >
  1591. inline CPool< CObject, OffsetOfIC >::ERR CPool< CObject, OffsetOfIC >::
  1592. ErrRemoveCurrentObject( CLock* const plock )
  1593. {
  1594. // there is a current object and we can remove that object from the pool
  1595. //
  1596. // NOTE: we must get a count from the semaphore to remove an object from
  1597. // the pool
  1598. if ( plock->m_pobj && m_semObjectCount.FTryAcquire() )
  1599. {
  1600. // save the current object's next pointer so that we can recover our
  1601. // currency when it is deleted
  1602. plock->m_pobjNext = plock->m_pbucket->m_il.Next( plock->m_pobj );
  1603. // delete the current object from this bucket
  1604. plock->m_pbucket->m_il.Remove( plock->m_pobj );
  1605. // set our currency to no current object
  1606. plock->m_pobj = NULL;
  1607. return errSuccess;
  1608. }
  1609. // there is no current object
  1610. else
  1611. {
  1612. // return no current object
  1613. return errNoCurrentObject;
  1614. }
  1615. }
  1616. // ends the scan of all objects in the pool associated with the specified lock
  1617. // context and releases all locks held
  1618. template< class CObject, PfnOffsetOf OffsetOfIC >
  1619. inline void CPool< CObject, OffsetOfIC >::
  1620. EndPoolScan( CLock* const plock )
  1621. {
  1622. // unlock the current bucket
  1623. plock->m_pbucket->m_crit.Leave();
  1624. }
  1625. // returns the current count of objects in the pool
  1626. template< class CObject, PfnOffsetOf OffsetOfIC >
  1627. inline DWORD CPool< CObject, OffsetOfIC >::
  1628. Cobject()
  1629. {
  1630. // the number of objects in the pool is equal to the available count on the
  1631. // object count semaphore
  1632. return m_semObjectCount.CAvail();
  1633. }
  1634. // returns the number of waiters for objects in the pool
  1635. template< class CObject, PfnOffsetOf OffsetOfIC >
  1636. inline DWORD CPool< CObject, OffsetOfIC >::
  1637. CWaiter()
  1638. {
  1639. // the number of waiters on the pool is equal to the waiter count on the
  1640. // object count semaphore
  1641. return m_semObjectCount.CWait();
  1642. }
  1643. // returns the number of times on object has been successfully removed from the
  1644. // pool
  1645. template< class CObject, PfnOffsetOf OffsetOfIC >
  1646. inline DWORD CPool< CObject, OffsetOfIC >::
  1647. CRemove()
  1648. {
  1649. return m_cRemove;
  1650. }
  1651. // returns the number of waits that occurred while removing objects from the
  1652. // pool
  1653. template< class CObject, PfnOffsetOf OffsetOfIC >
  1654. inline DWORD CPool< CObject, OffsetOfIC >::
  1655. CRemoveWait()
  1656. {
  1657. return m_cRemoveWait;
  1658. }
  1659. // performs a move next that possibly goes to the next bucket. we won't go to
  1660. // the next bucket if we are already at the last bucket
  1661. template< class CObject, PfnOffsetOf OffsetOfIC >
  1662. inline void CPool< CObject, OffsetOfIC >::
  1663. _GetNextObject( CLock* const plock )
  1664. {
  1665. // set our currency to be after the last object in this bucket
  1666. plock->m_pobj = NULL;
  1667. plock->m_pobjNext = NULL;
  1668. // scan forward until we have a current object or we are at or beyond the
  1669. // last bucket
  1670. while ( !plock->m_pobj && plock->m_pbucket < m_rgbucket + m_cbucket - 1 )
  1671. {
  1672. // unlock the current bucket
  1673. plock->m_pbucket->m_crit.Leave();
  1674. // advance to the next bucket
  1675. plock->m_pbucket++;
  1676. // lock this bucket
  1677. plock->m_pbucket->m_crit.Enter();
  1678. // set our currency to be the first object in this bucket
  1679. plock->m_pobj = plock->m_pbucket->m_il.PrevMost();
  1680. plock->m_pobjNext = NULL;
  1681. }
  1682. }
  1683. // calculate the address of the aligned block and store its offset (for free)
  1684. template< class CObject, PfnOffsetOf OffsetOfIC >
  1685. inline void* CPool< CObject, OffsetOfIC >::
  1686. _PvMEMIAlign( void* const pv, const size_t cbAlign )
  1687. {
  1688. // round up to the nearest cache line
  1689. // NOTE: this formula always forces an offset of at least 1 byte
  1690. const ULONG_PTR ulp = ULONG_PTR( pv );
  1691. const ULONG_PTR ulpAligned = ( ( ulp + cbAlign ) / cbAlign ) * cbAlign;
  1692. const ULONG_PTR ulpOffset = ulpAligned - ulp;
  1693. COLLAssert( ulpOffset > 0 );
  1694. COLLAssert( ulpOffset <= cbAlign );
  1695. COLLAssert( ulpOffset == BYTE( ulpOffset ) ); // must fit into a single BYTE
  1696. // store the offset
  1697. BYTE *const pbAligned = (BYTE*)ulpAligned;
  1698. pbAligned[ -1 ] = BYTE( ulpOffset );
  1699. // return the aligned block
  1700. return (void*)pbAligned;
  1701. }
  1702. // retrieve the offset of the real block being freed
  1703. template< class CObject, PfnOffsetOf OffsetOfIC >
  1704. inline void* CPool< CObject, OffsetOfIC >::
  1705. _PvMEMIUnalign( void* const pv )
  1706. {
  1707. // read the offset of the real block
  1708. BYTE *const pbAligned = (BYTE*)pv;
  1709. const BYTE bOffset = pbAligned[ -1 ];
  1710. COLLAssert( bOffset > 0 );
  1711. // return the real unaligned block
  1712. return (void*)( pbAligned - bOffset );
  1713. }
  1714. template< class CObject, PfnOffsetOf OffsetOfIC >
  1715. inline void* CPool< CObject, OffsetOfIC >::
  1716. _PvMEMAlloc( const size_t cbSize, const size_t cbAlign )
  1717. {
  1718. void* const pv = new BYTE[ cbSize + cbAlign ];
  1719. if ( pv )
  1720. {
  1721. return _PvMEMIAlign( pv, cbAlign );
  1722. }
  1723. return NULL;
  1724. }
  1725. template< class CObject, PfnOffsetOf OffsetOfIC >
  1726. inline void CPool< CObject, OffsetOfIC >::
  1727. _MEMFree( void* const pv )
  1728. {
  1729. if ( pv )
  1730. {
  1731. delete [] _PvMEMIUnalign( pv );
  1732. }
  1733. }
  1734. ////////////////////////////////////////////////////////////////////////////////
  1735. // CArray
  1736. //
  1737. // Implements a dynamically resized array of entries stored for efficient
  1738. // iteration.
  1739. //
  1740. // CEntry = class representing entries stored in the array
  1741. //
  1742. // NOTE: the user must provide CEntry::CEntry() and CEntry::operator=()
  1743. template< class CEntry >
  1744. class CArray
  1745. {
  1746. public:
  1747. // API Error Codes
  1748. enum ERR
  1749. {
  1750. errSuccess,
  1751. errInvalidParameter,
  1752. errOutOfMemory,
  1753. };
  1754. public:
  1755. CArray();
  1756. CArray( const size_t centry, CEntry* const rgentry );
  1757. ~CArray();
  1758. ERR ErrClone( const CArray& array );
  1759. ERR ErrSetSize( const size_t centry );
  1760. ERR ErrSetEntry( const size_t ientry, const CEntry& entry );
  1761. void SetEntry( const CEntry* const pentry, const CEntry& entry );
  1762. size_t Size() const;
  1763. const CEntry* Entry( const size_t ientry ) const;
  1764. private:
  1765. size_t m_centry;
  1766. CEntry* m_rgentry;
  1767. BOOL m_fInPlace;
  1768. };
  1769. template< class CEntry >
  1770. inline CArray< CEntry >::
  1771. CArray()
  1772. : m_centry( 0 ),
  1773. m_rgentry( NULL ),
  1774. m_fInPlace( fTrue )
  1775. {
  1776. }
  1777. template< class CEntry >
  1778. inline CArray< CEntry >::
  1779. CArray( const size_t centry, CEntry* const rgentry )
  1780. : m_centry( centry ),
  1781. m_rgentry( rgentry ),
  1782. m_fInPlace( fTrue )
  1783. {
  1784. }
  1785. template< class CEntry >
  1786. inline CArray< CEntry >::
  1787. ~CArray()
  1788. {
  1789. ErrSetSize( 0 );
  1790. }
  1791. // clones an existing array
  1792. template< class CEntry >
  1793. inline CArray< CEntry >::ERR CArray< CEntry >::
  1794. ErrClone( const CArray& array )
  1795. {
  1796. CEntry* rgentryNew = NULL;
  1797. size_t ientryCopy = 0;
  1798. if ( array.m_centry )
  1799. {
  1800. if ( !( rgentryNew = new CEntry[ array.m_centry ] ) )
  1801. {
  1802. return errOutOfMemory;
  1803. }
  1804. }
  1805. for ( ientryCopy = 0; ientryCopy < array.m_centry; ientryCopy++ )
  1806. {
  1807. rgentryNew[ ientryCopy ] = array.m_rgentry[ ientryCopy ];
  1808. }
  1809. if ( !m_fInPlace )
  1810. {
  1811. delete [] m_rgentry;
  1812. }
  1813. m_centry = array.m_centry;
  1814. m_rgentry = rgentryNew;
  1815. m_fInPlace = fFalse;
  1816. rgentryNew = NULL;
  1817. delete [] rgentryNew;
  1818. return errSuccess;
  1819. }
  1820. // sets the size of the array
  1821. template< class CEntry >
  1822. inline CArray< CEntry >::ERR CArray< CEntry >::
  1823. ErrSetSize( const size_t centry )
  1824. {
  1825. CEntry* rgentryNew = NULL;
  1826. size_t ientryCopy = 0;
  1827. if ( Size() != centry )
  1828. {
  1829. if ( centry )
  1830. {
  1831. if ( !( rgentryNew = new CEntry[ centry ] ) )
  1832. {
  1833. return errOutOfMemory;
  1834. }
  1835. for ( ientryCopy = 0; ientryCopy < Size(); ientryCopy++ )
  1836. {
  1837. rgentryNew[ ientryCopy ] = *Entry( ientryCopy );
  1838. }
  1839. if ( !m_fInPlace )
  1840. {
  1841. delete [] m_rgentry;
  1842. }
  1843. m_centry = centry;
  1844. m_rgentry = rgentryNew;
  1845. m_fInPlace = fFalse;
  1846. rgentryNew = NULL;
  1847. }
  1848. else
  1849. {
  1850. if ( !m_fInPlace )
  1851. {
  1852. delete [] m_rgentry;
  1853. }
  1854. m_centry = 0;
  1855. m_rgentry = NULL;
  1856. m_fInPlace = fTrue;
  1857. }
  1858. }
  1859. delete [] rgentryNew;
  1860. return errSuccess;
  1861. }
  1862. // sets the Nth entry of the array, growing the array if necessary
  1863. template< class CEntry >
  1864. inline CArray< CEntry >::ERR CArray< CEntry >::
  1865. ErrSetEntry( const size_t ientry, const CEntry& entry )
  1866. {
  1867. ERR err = errSuccess;
  1868. size_t centryReq = ientry + 1;
  1869. if ( Size() < centryReq )
  1870. {
  1871. if ( ( err = ErrSetSize( centryReq ) ) != errSuccess )
  1872. {
  1873. return err;
  1874. }
  1875. }
  1876. SetEntry( Entry( ientry ), entry );
  1877. return errSuccess;
  1878. }
  1879. // sets an existing entry of the array
  1880. template< class CEntry >
  1881. inline void CArray< CEntry >::
  1882. SetEntry( const CEntry* const pentry, const CEntry& entry )
  1883. {
  1884. *const_cast< CEntry* >( pentry ) = entry;
  1885. }
  1886. // returns the current size of the array
  1887. template< class CEntry >
  1888. inline size_t CArray< CEntry >::
  1889. Size() const
  1890. {
  1891. return m_centry;
  1892. }
  1893. // returns a pointer to the Nth entry of the array or NULL if it is empty
  1894. template< class CEntry >
  1895. inline const CEntry* CArray< CEntry >::
  1896. Entry( const size_t ientry ) const
  1897. {
  1898. return ientry < m_centry ? m_rgentry + ientry : NULL;
  1899. }
  1900. ////////////////////////////////////////////////////////////////////////////////
  1901. // CTable
  1902. //
  1903. // Implements a table of entries identified by a key and stored for efficient
  1904. // lookup and iteration. The keys need not be unique.
  1905. //
  1906. // CKey = class representing keys used to identify entries
  1907. // CEntry = class representing entries stored in the table
  1908. //
  1909. // NOTE: the user must implement the CKeyEntry::Cmp() functions and provide
  1910. // CEntry::CEntry() and CEntry::operator=()
  1911. template< class CKey, class CEntry >
  1912. class CTable
  1913. {
  1914. public:
  1915. class CKeyEntry
  1916. : public CEntry
  1917. {
  1918. public:
  1919. // Cmp() return values:
  1920. //
  1921. // < 0 this entry < specified entry / key
  1922. // = 0 this entry = specified entry / key
  1923. // > 0 this entry > specified entry / key
  1924. int Cmp( const CKeyEntry& keyentry ) const;
  1925. int Cmp( const CKey& key ) const;
  1926. };
  1927. // API Error Codes
  1928. enum ERR
  1929. {
  1930. errSuccess,
  1931. errInvalidParameter,
  1932. errOutOfMemory,
  1933. errKeyChange,
  1934. };
  1935. public:
  1936. CTable();
  1937. CTable( const size_t centry, CEntry* const rgentry, const BOOL fInOrder = fFalse );
  1938. ERR ErrLoad( const size_t centry, const CEntry* const rgentry );
  1939. ERR ErrClone( const CTable& table );
  1940. ERR ErrUpdateEntry( const CEntry* const pentry, const CEntry& entry );
  1941. size_t Size() const;
  1942. const CEntry* Entry( const size_t ientry ) const;
  1943. const CEntry* SeekLT( const CKey& key ) const;
  1944. const CEntry* SeekLE( const CKey& key ) const;
  1945. const CEntry* SeekEQ( const CKey& key ) const;
  1946. const CEntry* SeekHI( const CKey& key ) const;
  1947. const CEntry* SeekGE( const CKey& key ) const;
  1948. const CEntry* SeekGT( const CKey& key ) const;
  1949. private:
  1950. typedef size_t (CTable< CKey, CEntry >::*PfnSearch)( const CKey& key, const BOOL fHigh ) const;
  1951. private:
  1952. const CKeyEntry& _Entry( const size_t ikeyentry ) const;
  1953. void _SetEntry( const size_t ikeyentry, const CKeyEntry& keyentry );
  1954. void _SwapEntry( const size_t ikeyentry1, const size_t ikeyentry2 );
  1955. size_t _LinearSearch( const CKey& key, const BOOL fHigh ) const;
  1956. size_t _BinarySearch( const CKey& key, const BOOL fHigh ) const;
  1957. void _InsertionSort( const size_t ikeyentryMinIn, const size_t ikeyentryMaxIn );
  1958. void _QuickSort( const size_t ikeyentryMinIn, const size_t ikeyentryMaxIn );
  1959. private:
  1960. CArray< CKeyEntry > m_arrayKeyEntry;
  1961. PfnSearch m_pfnSearch;
  1962. };
  1963. template< class CKey, class CEntry >
  1964. inline CTable< CKey, CEntry >::
  1965. CTable()
  1966. : m_pfnSearch( _LinearSearch )
  1967. {
  1968. }
  1969. // loads the table over an existing array of entries. if the entries are not
  1970. // in order then they will be sorted in place
  1971. template< class CKey, class CEntry >
  1972. inline CTable< CKey, CEntry >::
  1973. CTable( const size_t centry, CEntry* const rgentry, const BOOL fInOrder )
  1974. : m_arrayKeyEntry( centry, reinterpret_cast< CKeyEntry* >( rgentry ) )
  1975. {
  1976. size_t n;
  1977. size_t log2n;
  1978. for ( n = Size(), log2n = 0; n; n = n / 2, log2n++ );
  1979. if ( 2 * log2n < Size() )
  1980. {
  1981. if ( !fInOrder )
  1982. {
  1983. _QuickSort( 0, Size() );
  1984. }
  1985. m_pfnSearch = _BinarySearch;
  1986. }
  1987. else
  1988. {
  1989. if ( !fInOrder )
  1990. {
  1991. _InsertionSort( 0, Size() );
  1992. }
  1993. m_pfnSearch = _LinearSearch;
  1994. }
  1995. }
  1996. // loads an array of entries into the table. additional entries may also be
  1997. // loaded into the table via this function
  1998. template< class CKey, class CEntry >
  1999. inline CTable< CKey, CEntry >::ERR CTable< CKey, CEntry >::
  2000. ErrLoad( const size_t centry, const CEntry* const rgentry )
  2001. {
  2002. CArray< CKeyEntry >::ERR err = CArray< CKeyEntry >::errSuccess;
  2003. size_t ientry = 0;
  2004. size_t ientryMin = Size();
  2005. size_t ientryMax = Size() + centry;
  2006. const CKeyEntry* rgkeyentry = reinterpret_cast< const CKeyEntry* >( rgentry );
  2007. if ( ( err = m_arrayKeyEntry.ErrSetSize( Size() + centry ) ) != CArray< CKeyEntry >::errSuccess )
  2008. {
  2009. COLLAssert( err == CArray< CKeyEntry >::errOutOfMemory );
  2010. return errOutOfMemory;
  2011. }
  2012. for ( ientry = ientryMin; ientry < ientryMax; ientry++ )
  2013. {
  2014. err = m_arrayKeyEntry.ErrSetEntry( ientry, rgkeyentry[ ientry - ientryMin ] );
  2015. COLLAssert( err == CArray< CKeyEntry >::errSuccess );
  2016. }
  2017. size_t n;
  2018. size_t log2n;
  2019. for ( n = Size(), log2n = 0; n; n = n / 2, log2n++ );
  2020. if ( 2 * log2n < centry )
  2021. {
  2022. _QuickSort( 0, Size() );
  2023. }
  2024. else
  2025. {
  2026. _InsertionSort( 0, Size() );
  2027. }
  2028. if ( 2 * log2n < Size() )
  2029. {
  2030. m_pfnSearch = _BinarySearch;
  2031. }
  2032. else
  2033. {
  2034. m_pfnSearch = _LinearSearch;
  2035. }
  2036. return errSuccess;
  2037. }
  2038. // clones an existing table
  2039. template< class CKey, class CEntry >
  2040. inline CTable< CKey, CEntry >::ERR CTable< CKey, CEntry >::
  2041. ErrClone( const CTable& table )
  2042. {
  2043. CArray< CKeyEntry >::ERR err = CArray< CKeyEntry >::errSuccess;
  2044. if ( ( err = m_arrayKeyEntry.ErrClone( table.m_arrayKeyEntry ) ) != CArray< CKeyEntry >::errSuccess )
  2045. {
  2046. COLLAssert( err == CArray< CKeyEntry >::errOutOfMemory );
  2047. return errOutOfMemory;
  2048. }
  2049. m_pfnSearch = table.m_pfnSearch;
  2050. return errSuccess;
  2051. }
  2052. // updates an existing entry in the table as long as it doesn't change
  2053. // that entry's position in the table
  2054. template< class CKey, class CEntry >
  2055. inline CTable< CKey, CEntry >::ERR CTable< CKey, CEntry >::
  2056. ErrUpdateEntry( const CEntry* const pentry, const CEntry& entry )
  2057. {
  2058. ERR err = errSuccess;
  2059. const CKeyEntry* pkeyentry = reinterpret_cast< const CKeyEntry* >( pentry );
  2060. const CKeyEntry& keyentry = reinterpret_cast< const CKeyEntry& >( entry );
  2061. if ( !pkeyentry->Cmp( keyentry ) )
  2062. {
  2063. m_arrayKeyEntry.SetEntry( pkeyentry, keyentry );
  2064. err = errSuccess;
  2065. }
  2066. else
  2067. {
  2068. err = errKeyChange;
  2069. }
  2070. return err;
  2071. }
  2072. // returns the current size of the table
  2073. template< class CKey, class CEntry >
  2074. inline size_t CTable< CKey, CEntry >::
  2075. Size() const
  2076. {
  2077. return m_arrayKeyEntry.Size();
  2078. }
  2079. // returns a pointer to the Nth entry of the table or NULL if it is empty
  2080. template< class CKey, class CEntry >
  2081. inline const CEntry* CTable< CKey, CEntry >::
  2082. Entry( const size_t ientry ) const
  2083. {
  2084. return static_cast< const CEntry* >( m_arrayKeyEntry.Entry( ientry ) );
  2085. }
  2086. // the following group of functions return a pointer to an entry whose key
  2087. // matches the specified key according to the given criteria:
  2088. //
  2089. // Suffix Description Positional bias
  2090. //
  2091. // LT less than high
  2092. // LE less than or equal to low
  2093. // EQ equal to low
  2094. // HI equal to high
  2095. // GE greater than or equal to high
  2096. // GT greater than low
  2097. //
  2098. // if no matching entry was found then NULL will be returned
  2099. //
  2100. // "positional bias" means that the function will land on a matching entry
  2101. // whose position is closest to the low / high end of the table
  2102. template< class CKey, class CEntry >
  2103. inline const CEntry* CTable< CKey, CEntry >::
  2104. SeekLT( const CKey& key ) const
  2105. {
  2106. const size_t ikeyentry = (this->*m_pfnSearch)( key, fFalse );
  2107. if ( ikeyentry < Size() &&
  2108. _Entry( ikeyentry ).Cmp( key ) < 0 )
  2109. {
  2110. return Entry( ikeyentry );
  2111. }
  2112. else
  2113. {
  2114. return Entry( ikeyentry - 1 );
  2115. }
  2116. }
  2117. template< class CKey, class CEntry >
  2118. inline const CEntry* CTable< CKey, CEntry >::
  2119. SeekLE( const CKey& key ) const
  2120. {
  2121. const size_t ikeyentry = (this->*m_pfnSearch)( key, fFalse );
  2122. if ( ikeyentry < Size() &&
  2123. _Entry( ikeyentry ).Cmp( key ) <= 0 )
  2124. {
  2125. return Entry( ikeyentry );
  2126. }
  2127. else
  2128. {
  2129. return Entry( ikeyentry - 1 );
  2130. }
  2131. }
  2132. template< class CKey, class CEntry >
  2133. inline const CEntry* CTable< CKey, CEntry >::
  2134. SeekEQ( const CKey& key ) const
  2135. {
  2136. const size_t ikeyentry = (this->*m_pfnSearch)( key, fFalse );
  2137. if ( ikeyentry < Size() &&
  2138. _Entry( ikeyentry ).Cmp( key ) == 0 )
  2139. {
  2140. return Entry( ikeyentry );
  2141. }
  2142. else
  2143. {
  2144. return NULL;
  2145. }
  2146. }
  2147. template< class CKey, class CEntry >
  2148. inline const CEntry* CTable< CKey, CEntry >::
  2149. SeekHI( const CKey& key ) const
  2150. {
  2151. const size_t ikeyentry = (this->*m_pfnSearch)( key, fTrue );
  2152. if ( ikeyentry > 0 &&
  2153. _Entry( ikeyentry - 1 ).Cmp( key ) == 0 )
  2154. {
  2155. return Entry( ikeyentry - 1 );
  2156. }
  2157. else
  2158. {
  2159. return NULL;
  2160. }
  2161. }
  2162. template< class CKey, class CEntry >
  2163. inline const CEntry* CTable< CKey, CEntry >::
  2164. SeekGE( const CKey& key ) const
  2165. {
  2166. const size_t ikeyentry = (this->*m_pfnSearch)( key, fTrue );
  2167. if ( ikeyentry > 0 &&
  2168. _Entry( ikeyentry - 1 ).Cmp( key ) == 0 )
  2169. {
  2170. return Entry( ikeyentry - 1 );
  2171. }
  2172. else
  2173. {
  2174. return Entry( ikeyentry );
  2175. }
  2176. }
  2177. template< class CKey, class CEntry >
  2178. inline const CEntry* CTable< CKey, CEntry >::
  2179. SeekGT( const CKey& key ) const
  2180. {
  2181. return Entry( (this->*m_pfnSearch)( key, fTrue ) );
  2182. }
  2183. template< class CKey, class CEntry >
  2184. inline const CTable< CKey, CEntry >::CKeyEntry& CTable< CKey, CEntry >::
  2185. _Entry( const size_t ikeyentry ) const
  2186. {
  2187. return *( m_arrayKeyEntry.Entry( ikeyentry ) );
  2188. }
  2189. template< class CKey, class CEntry >
  2190. inline void CTable< CKey, CEntry >::
  2191. _SetEntry( const size_t ikeyentry, const CKeyEntry& keyentry )
  2192. {
  2193. m_arrayKeyEntry.SetEntry( m_arrayKeyEntry.Entry( ikeyentry ), keyentry );
  2194. }
  2195. template< class CKey, class CEntry >
  2196. inline void CTable< CKey, CEntry >::
  2197. _SwapEntry( const size_t ikeyentry1, const size_t ikeyentry2 )
  2198. {
  2199. CKeyEntry keyentryT;
  2200. keyentryT = _Entry( ikeyentry1 );
  2201. _SetEntry( ikeyentry1, _Entry( ikeyentry2 ) );
  2202. _SetEntry( ikeyentry2, keyentryT );
  2203. }
  2204. template< class CKey, class CEntry >
  2205. inline size_t CTable< CKey, CEntry >::
  2206. _LinearSearch( const CKey& key, const BOOL fHigh ) const
  2207. {
  2208. for ( size_t ikeyentry = 0; ikeyentry < Size(); ikeyentry++ )
  2209. {
  2210. const int cmp = _Entry( ikeyentry ).Cmp( key );
  2211. if ( !( cmp < 0 || cmp == 0 && fHigh ) )
  2212. {
  2213. break;
  2214. }
  2215. }
  2216. return ikeyentry;
  2217. }
  2218. template< class CKey, class CEntry >
  2219. inline size_t CTable< CKey, CEntry >::
  2220. _BinarySearch( const CKey& key, const BOOL fHigh ) const
  2221. {
  2222. size_t ikeyentryMin = 0;
  2223. size_t ikeyentryMax = Size();
  2224. while ( ikeyentryMin < ikeyentryMax )
  2225. {
  2226. const size_t ikeyentryMid = ikeyentryMin + ( ikeyentryMax - ikeyentryMin ) / 2;
  2227. const int cmp = _Entry( ikeyentryMid ).Cmp( key );
  2228. if ( cmp < 0 || cmp == 0 && fHigh )
  2229. {
  2230. ikeyentryMin = ikeyentryMid + 1;
  2231. }
  2232. else
  2233. {
  2234. ikeyentryMax = ikeyentryMid;
  2235. }
  2236. }
  2237. return ikeyentryMax;
  2238. }
  2239. template< class CKey, class CEntry >
  2240. inline void CTable< CKey, CEntry >::
  2241. _InsertionSort( const size_t ikeyentryMinIn, const size_t ikeyentryMaxIn )
  2242. {
  2243. size_t ikeyentryLast;
  2244. size_t ikeyentryFirst;
  2245. CKeyEntry keyentryKey;
  2246. for ( ikeyentryFirst = ikeyentryMinIn, ikeyentryLast = ikeyentryMinIn + 1;
  2247. ikeyentryLast < ikeyentryMaxIn;
  2248. ikeyentryFirst = ikeyentryLast++ )
  2249. {
  2250. if ( _Entry( ikeyentryFirst ).Cmp( _Entry( ikeyentryLast ) ) > 0 )
  2251. {
  2252. keyentryKey = _Entry( ikeyentryLast );
  2253. _SetEntry( ikeyentryLast, _Entry( ikeyentryFirst ) );
  2254. while ( ikeyentryFirst-- >= ikeyentryMinIn + 1 &&
  2255. _Entry( ikeyentryFirst ).Cmp( keyentryKey ) > 0 )
  2256. {
  2257. _SetEntry( ikeyentryFirst + 1, _Entry( ikeyentryFirst ) );
  2258. }
  2259. _SetEntry( ikeyentryFirst + 1, keyentryKey );
  2260. }
  2261. }
  2262. }
  2263. template< class CKey, class CEntry >
  2264. inline void CTable< CKey, CEntry >::
  2265. _QuickSort( const size_t ikeyentryMinIn, const size_t ikeyentryMaxIn )
  2266. {
  2267. // quicksort cutoff
  2268. const size_t ckeyentryMin = 32;
  2269. // partition stack (used to reduce levels of recursion)
  2270. const size_t cpartMax = 16;
  2271. size_t cpart = 0;
  2272. struct
  2273. {
  2274. size_t ikeyentryMin;
  2275. size_t ikeyentryMax;
  2276. } rgpart[ cpartMax ];
  2277. // current partition = partition passed in arguments
  2278. size_t ikeyentryMin = ikeyentryMinIn;
  2279. size_t ikeyentryMax = ikeyentryMaxIn;
  2280. // _QuickSort current partition
  2281. for ( ; ; )
  2282. {
  2283. // if this partition is small enough, insertion sort it
  2284. if ( ikeyentryMax - ikeyentryMin < ckeyentryMin )
  2285. {
  2286. _InsertionSort( ikeyentryMin, ikeyentryMax );
  2287. // if there are no more partitions to sort, we're done
  2288. if ( !cpart )
  2289. {
  2290. break;
  2291. }
  2292. // pop a partition off the stack and make it the current partition
  2293. ikeyentryMin = rgpart[ --cpart ].ikeyentryMin;
  2294. ikeyentryMax = rgpart[ cpart ].ikeyentryMax;
  2295. continue;
  2296. }
  2297. // determine divisor by sorting the first, middle, and last entries and
  2298. // taking the resulting middle entry as the divisor
  2299. size_t ikeyentryFirst = ikeyentryMin;
  2300. size_t ikeyentryMid = ikeyentryMin + ( ikeyentryMax - ikeyentryMin ) / 2;
  2301. size_t ikeyentryLast = ikeyentryMax - 1;
  2302. if ( _Entry( ikeyentryFirst ).Cmp( _Entry( ikeyentryMid ) ) > 0 )
  2303. {
  2304. _SwapEntry( ikeyentryFirst, ikeyentryMid );
  2305. }
  2306. if ( _Entry( ikeyentryFirst ).Cmp( _Entry( ikeyentryLast ) ) > 0 )
  2307. {
  2308. _SwapEntry( ikeyentryFirst, ikeyentryLast );
  2309. }
  2310. if ( _Entry( ikeyentryMid ).Cmp( _Entry( ikeyentryLast ) ) > 0 )
  2311. {
  2312. _SwapEntry( ikeyentryMid, ikeyentryLast );
  2313. }
  2314. // sort large partition into two smaller partitions (<=, >)
  2315. do {
  2316. // advance past all entries <= the divisor
  2317. while ( ikeyentryFirst <= ikeyentryLast &&
  2318. _Entry( ikeyentryFirst ).Cmp( _Entry( ikeyentryMin ) ) <= 0 )
  2319. {
  2320. ikeyentryFirst++;
  2321. }
  2322. // advance past all entries > the divisor
  2323. while ( ikeyentryFirst <= ikeyentryLast &&
  2324. _Entry( ikeyentryLast ).Cmp( _Entry( ikeyentryMin ) ) > 0 )
  2325. {
  2326. ikeyentryLast--;
  2327. }
  2328. // if we have found a pair to swap, swap them and continue
  2329. if ( ikeyentryFirst < ikeyentryLast )
  2330. {
  2331. _SwapEntry( ikeyentryFirst++, ikeyentryLast-- );
  2332. }
  2333. }
  2334. while ( ikeyentryFirst <= ikeyentryLast );
  2335. // move the divisor to the end of the <= partition
  2336. _SwapEntry( ikeyentryMin, ikeyentryLast );
  2337. // determine the limits of the smaller and larger sub-partitions
  2338. size_t ikeyentrySmallMin;
  2339. size_t ikeyentrySmallMax;
  2340. size_t ikeyentryLargeMin;
  2341. size_t ikeyentryLargeMax;
  2342. if ( ikeyentryMax - ikeyentryFirst == 0 )
  2343. {
  2344. ikeyentryLargeMin = ikeyentryMin;
  2345. ikeyentryLargeMax = ikeyentryLast;
  2346. ikeyentrySmallMin = ikeyentryLast;
  2347. ikeyentrySmallMax = ikeyentryMax;
  2348. }
  2349. else if ( ikeyentryMax - ikeyentryFirst > ikeyentryFirst - ikeyentryMin )
  2350. {
  2351. ikeyentrySmallMin = ikeyentryMin;
  2352. ikeyentrySmallMax = ikeyentryFirst;
  2353. ikeyentryLargeMin = ikeyentryFirst;
  2354. ikeyentryLargeMax = ikeyentryMax;
  2355. }
  2356. else
  2357. {
  2358. ikeyentryLargeMin = ikeyentryMin;
  2359. ikeyentryLargeMax = ikeyentryFirst;
  2360. ikeyentrySmallMin = ikeyentryFirst;
  2361. ikeyentrySmallMax = ikeyentryMax;
  2362. }
  2363. // push the larger sub-partition or recurse if the stack is full
  2364. if ( cpart < cpartMax )
  2365. {
  2366. rgpart[ cpart ].ikeyentryMin = ikeyentryLargeMin;
  2367. rgpart[ cpart++ ].ikeyentryMax = ikeyentryLargeMax;
  2368. }
  2369. else
  2370. {
  2371. _QuickSort( ikeyentryLargeMin, ikeyentryLargeMax );
  2372. }
  2373. // set our current partition to be the smaller sub-partition
  2374. ikeyentryMin = ikeyentrySmallMin;
  2375. ikeyentryMax = ikeyentrySmallMax;
  2376. }
  2377. }
  2378. }; // namespace COLL
  2379. using namespace COLL;
  2380. #endif // _COLLECTION_HXX_INCLUDED