Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3173 lines
86 KiB

  1. #ifndef _COLLECTION_HXX_INCLUDED
  2. #define _COLLECTION_HXX_INCLUDED
  3. // asserts
  4. //
  5. // #define COLLAssert to point to your favorite assert function per #include
  6. #ifdef COLLAssert
  7. #else // !COLLAssert
  8. #define COLLAssert Assert
  9. #endif // COLLAssert
  10. #ifdef DHTAssert
  11. #else // !DHTAssert
  12. #define DHTAssert COLLAssert
  13. #endif // DHTAssert
  14. #include "dht.hxx"
  15. #include <memory.h>
  16. #include <minmax.h>
  17. #pragma warning ( disable : 4786 ) // we allow huge symbol names
  18. namespace COLL {
  19. //////////////////////////////////////////////////////////////////////////////////////////
  20. // CInvasiveList
  21. //
  22. // Implements an "invasive" doubly linked list of objects. The list is "invasive"
  23. // because part of its state is embedded directly in the objects it contains. An
  24. // additional property of this list class is that the head of the list can be relocated
  25. // without updating the state of any of the contained objects.
  26. //
  27. // CObject = class representing objects in the list. each class must contain
  28. // storage for a CElement for embedded list state
  29. // OffsetOfILE = inline function returning the offset of the CElement contained
  30. // in the CObject
  31. typedef SIZE_T (*PfnOffsetOf)();
  32. template< class CObject, PfnOffsetOf OffsetOfILE >
  33. class CInvasiveList
  34. {
  35. public:
  36. // invasive list element state (embedded in linked objects)
  37. class CElement
  38. {
  39. public:
  40. // ctor / dtor
  41. CElement() : m_pilePrev( (CElement*)-1 ), m_pileNext( (CElement*)-1 ) {}
  42. ~CElement() {}
  43. private:
  44. CElement& operator=( CElement& ); // disallowed
  45. friend class CInvasiveList< CObject, OffsetOfILE >;
  46. CElement* m_pilePrev;
  47. CElement* m_pileNext;
  48. };
  49. public:
  50. // ctor / dtor
  51. CInvasiveList();
  52. ~CInvasiveList();
  53. // operators
  54. CInvasiveList& operator=( const CInvasiveList& il );
  55. // API
  56. BOOL FEmpty() const;
  57. BOOL FMember( CObject* const pobj ) const;
  58. CObject* Prev( CObject* const pobj ) const;
  59. CObject* Next( CObject* const pobj ) const;
  60. CObject* PrevMost() const;
  61. CObject* NextMost() const;
  62. void InsertAsPrevMost( CObject* const pobj );
  63. void InsertAsNextMost( CObject* const pobj );
  64. void Remove( CObject* const pobj );
  65. void Empty();
  66. private:
  67. // internal functions
  68. CObject* _PobjFromPile( CElement* const pile ) const;
  69. CElement* _PileFromPobj( CObject* const pobj ) const;
  70. private:
  71. CElement* m_pilePrevMost;
  72. CElement* m_pileNextMost;
  73. };
  74. // ctor
  75. template< class CObject, PfnOffsetOf OffsetOfILE >
  76. inline CInvasiveList< CObject, OffsetOfILE >::
  77. CInvasiveList()
  78. {
  79. // start with an empty list
  80. Empty();
  81. }
  82. // dtor
  83. template< class CObject, PfnOffsetOf OffsetOfILE >
  84. inline CInvasiveList< CObject, OffsetOfILE >::
  85. ~CInvasiveList()
  86. {
  87. }
  88. // assignment operator
  89. template< class CObject, PfnOffsetOf OffsetOfILE >
  90. inline CInvasiveList< CObject, OffsetOfILE >& CInvasiveList< CObject, OffsetOfILE >::
  91. operator=( const CInvasiveList& il )
  92. {
  93. m_pilePrevMost = il.m_pilePrevMost;
  94. m_pileNextMost = il.m_pileNextMost;
  95. return *this;
  96. }
  97. // returns fTrue if the list is empty
  98. template< class CObject, PfnOffsetOf OffsetOfILE >
  99. inline BOOL CInvasiveList< CObject, OffsetOfILE >::
  100. FEmpty() const
  101. {
  102. return m_pilePrevMost == _PileFromPobj( NULL );
  103. }
  104. // returns fTrue if the specified object is a member of this list
  105. //
  106. // NOTE: this function currently returns fTrue if the specified object is a
  107. // member of any list!
  108. template< class CObject, PfnOffsetOf OffsetOfILE >
  109. inline BOOL CInvasiveList< CObject, OffsetOfILE >::
  110. FMember( CObject* const pobj ) const
  111. {
  112. #ifdef EXPENSIVE_DEBUG
  113. for ( CObject* pobjT = PrevMost(); pobjT && pobjT != pobj; pobjT = Next( pobjT ) )
  114. {
  115. }
  116. return pobjT == pobj;
  117. #else // !DEBUG
  118. CElement* const pile = _PileFromPobj( pobj );
  119. COLLAssert( ( ( DWORD_PTR( pile->m_pilePrev ) + DWORD_PTR( pile->m_pileNext ) ) == -2 ) ==
  120. ( pile->m_pilePrev == (CElement*)-1 && pile->m_pileNext == (CElement*)-1 ) );
  121. return ( DWORD_PTR( pile->m_pilePrev ) + DWORD_PTR( pile->m_pileNext ) ) != -2;
  122. #endif // DEBUG
  123. }
  124. // returns the prev object to the given object in the list
  125. template< class CObject, PfnOffsetOf OffsetOfILE >
  126. inline CObject* CInvasiveList< CObject, OffsetOfILE >::
  127. Prev( CObject* const pobj ) const
  128. {
  129. return _PobjFromPile( _PileFromPobj( pobj )->m_pilePrev );
  130. }
  131. // returns the next object to the given object in the list
  132. template< class CObject, PfnOffsetOf OffsetOfILE >
  133. inline CObject* CInvasiveList< CObject, OffsetOfILE >::
  134. Next( CObject* const pobj ) const
  135. {
  136. return _PobjFromPile( _PileFromPobj( pobj )->m_pileNext );
  137. }
  138. // returns the prev-most object to the given object in the list
  139. template< class CObject, PfnOffsetOf OffsetOfILE >
  140. inline CObject* CInvasiveList< CObject, OffsetOfILE >::
  141. PrevMost() const
  142. {
  143. return _PobjFromPile( m_pilePrevMost );
  144. }
  145. // returns the next-most object to the given object in the list
  146. template< class CObject, PfnOffsetOf OffsetOfILE >
  147. inline CObject* CInvasiveList< CObject, OffsetOfILE >::
  148. NextMost() const
  149. {
  150. return _PobjFromPile( m_pileNextMost );
  151. }
  152. // inserts the given object as the prev-most object in the list
  153. template< class CObject, PfnOffsetOf OffsetOfILE >
  154. inline void CInvasiveList< CObject, OffsetOfILE >::
  155. InsertAsPrevMost( CObject* const pobj )
  156. {
  157. CElement* const pile = _PileFromPobj( pobj );
  158. // this object had better not already be in the list
  159. COLLAssert( !FMember( pobj ) );
  160. // this object had better not already be in any list
  161. COLLAssert( pile->m_pilePrev == (CElement*)-1 );
  162. COLLAssert( pile->m_pileNext == (CElement*)-1 );
  163. // the list is empty
  164. if ( m_pilePrevMost == _PileFromPobj( NULL ) )
  165. {
  166. // insert this element as the only element in the list
  167. pile->m_pilePrev = _PileFromPobj( NULL );
  168. pile->m_pileNext = _PileFromPobj( NULL );
  169. m_pilePrevMost = pile;
  170. m_pileNextMost = pile;
  171. }
  172. // the list is not empty
  173. else
  174. {
  175. // insert this element at the prev-most position in the list
  176. pile->m_pilePrev = _PileFromPobj( NULL );
  177. pile->m_pileNext = m_pilePrevMost;
  178. m_pilePrevMost->m_pilePrev = pile;
  179. m_pilePrevMost = pile;
  180. }
  181. }
  182. // inserts the given object as the next-most object in the list
  183. template< class CObject, PfnOffsetOf OffsetOfILE >
  184. inline void CInvasiveList< CObject, OffsetOfILE >::
  185. InsertAsNextMost( CObject* const pobj )
  186. {
  187. CElement* const pile = _PileFromPobj( pobj );
  188. // this object had better not already be in the list
  189. COLLAssert( !FMember( pobj ) );
  190. // this object had better not already be in any list
  191. COLLAssert( pile->m_pilePrev == (CElement*)-1 );
  192. COLLAssert( pile->m_pileNext == (CElement*)-1 );
  193. // the list is empty
  194. if ( m_pileNextMost == _PileFromPobj( NULL ) )
  195. {
  196. // insert this element as the only element in the list
  197. pile->m_pilePrev = _PileFromPobj( NULL );
  198. pile->m_pileNext = _PileFromPobj( NULL );
  199. m_pilePrevMost = pile;
  200. m_pileNextMost = pile;
  201. }
  202. // the list is not empty
  203. else
  204. {
  205. // insert this element at the next-most position in the list
  206. pile->m_pilePrev = m_pileNextMost;
  207. pile->m_pileNext = _PileFromPobj( NULL );
  208. m_pileNextMost->m_pileNext = pile;
  209. m_pileNextMost = pile;
  210. }
  211. }
  212. // removes the given object from the list
  213. template< class CObject, PfnOffsetOf OffsetOfILE >
  214. inline void CInvasiveList< CObject, OffsetOfILE >::
  215. Remove( CObject* const pobj )
  216. {
  217. CElement* const pile = _PileFromPobj( pobj );
  218. // this object had better already be in the list
  219. COLLAssert( FMember( pobj ) );
  220. // there is an element after us in the list
  221. if ( pile->m_pileNext != _PileFromPobj( NULL ) )
  222. {
  223. // fix up its prev element to be our prev element (if any)
  224. pile->m_pileNext->m_pilePrev = pile->m_pilePrev;
  225. }
  226. else
  227. {
  228. // set the next-most element to be our prev element (if any)
  229. m_pileNextMost = pile->m_pilePrev;
  230. }
  231. // there is an element before us in the list
  232. if ( pile->m_pilePrev != _PileFromPobj( NULL ) )
  233. {
  234. // fix up its next element to be our next element (if any)
  235. pile->m_pilePrev->m_pileNext = pile->m_pileNext;
  236. }
  237. else
  238. {
  239. // set the prev-most element to be our next element (if any)
  240. m_pilePrevMost = pile->m_pileNext;
  241. }
  242. // mark ourself as not in any list
  243. pile->m_pilePrev = (CElement*)-1;
  244. pile->m_pileNext = (CElement*)-1;
  245. }
  246. // resets the list to the empty state
  247. template< class CObject, PfnOffsetOf OffsetOfILE >
  248. inline void CInvasiveList< CObject, OffsetOfILE >::
  249. Empty()
  250. {
  251. m_pilePrevMost = _PileFromPobj( NULL );
  252. m_pileNextMost = _PileFromPobj( NULL );
  253. }
  254. // converts a pointer to an ILE to a pointer to the object
  255. template< class CObject, PfnOffsetOf OffsetOfILE >
  256. inline CObject* CInvasiveList< CObject, OffsetOfILE >::
  257. _PobjFromPile( CElement* const pile ) const
  258. {
  259. return (CObject*)( (BYTE*)pile - OffsetOfILE() );
  260. }
  261. // converts a pointer to an object to a pointer to the ILE
  262. template< class CObject, PfnOffsetOf OffsetOfILE >
  263. inline __TYPENAME CInvasiveList< CObject, OffsetOfILE >::CElement* CInvasiveList< CObject, OffsetOfILE >::
  264. _PileFromPobj( CObject* const pobj ) const
  265. {
  266. return (CElement*)( (BYTE*)pobj + OffsetOfILE() );
  267. }
  268. //////////////////////////////////////////////////////////////////////////////////////////
  269. // CApproximateIndex
  270. //
  271. // Implements a dynamically resizable table of entries indexed approximately by key
  272. // ranges of a specified uncertainty. Accuracy and exact ordering are sacrificied for
  273. // improved performance and concurrency. This index is optimized for a set of records
  274. // whose keys occupy a fairly dense range of values. The index is designed to handle
  275. // key ranges that can wrap around zero. As such, the indexed key range can not span
  276. // more than half the numerical precision of the key.
  277. //
  278. // CKey = class representing keys used to order entries in the mesh table.
  279. // this class must support all the standard math operators. wrap-
  280. // around in the key values is supported
  281. // CEntry = class indexed by the mesh table. this class must contain storage
  282. // for a CInvasiveContext class
  283. // OffsetOfIC = inline function returning the offset of the CInvasiveContext
  284. // contained in the CEntry
  285. //
  286. // You must use the DECLARE_APPROXIMATE_INDEX macro to declare this class.
  287. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  288. class CApproximateIndex
  289. {
  290. public:
  291. // class containing context needed per CEntry
  292. class CInvasiveContext
  293. {
  294. public:
  295. CInvasiveContext() {}
  296. ~CInvasiveContext() {}
  297. static SIZE_T OffsetOfILE() { return OffsetOfIC() + OffsetOf( CInvasiveContext, m_ile ); }
  298. private:
  299. typename CInvasiveList< CEntry, OffsetOfILE >::CElement m_ile;
  300. };
  301. // API Error Codes
  302. enum ERR
  303. {
  304. errSuccess,
  305. errInvalidParameter,
  306. errOutOfMemory,
  307. errEntryNotFound,
  308. errNoCurrentEntry,
  309. errKeyRangeExceeded,
  310. };
  311. // API Lock Context
  312. class CLock;
  313. public:
  314. // ctor / dtor
  315. CApproximateIndex( const int Rank );
  316. ~CApproximateIndex();
  317. // API
  318. ERR ErrInit( const CKey dkeyPrecision,
  319. const CKey dkeyUncertainty,
  320. const double dblSpeedSizeTradeoff );
  321. void Term();
  322. void LockKeyPtr( const CKey& key, CEntry* const pentry, CLock* const plock );
  323. void UnlockKeyPtr( CLock* const plock );
  324. long CmpKey( const CKey& key1, const CKey& key2 ) const;
  325. CKey KeyRangeFirst() const;
  326. CKey KeyRangeLast() const;
  327. CKey KeyInsertLeast() const;
  328. CKey KeyInsertMost() const;
  329. ERR ErrRetrieveEntry( CLock* const plock, CEntry** const ppentry ) const;
  330. ERR ErrInsertEntry( CLock* const plock, CEntry* const pentry, const BOOL fNextMost = fTrue );
  331. ERR ErrDeleteEntry( CLock* const plock );
  332. ERR ErrReserveEntry( CLock* const plock );
  333. void UnreserveEntry( CLock* const plock );
  334. void MoveBeforeFirst( CLock* const plock );
  335. ERR ErrMoveNext( CLock* const plock );
  336. ERR ErrMovePrev( CLock* const plock );
  337. void MoveAfterLast( CLock* const plock );
  338. void MoveBeforeKeyPtr( const CKey& key, CEntry* const pentry, CLock* const plock );
  339. void MoveAfterKeyPtr( const CKey& key, CEntry* const pentry, CLock* const plock );
  340. #ifdef DEBUGGER_EXTENSION
  341. VOID Dump( CPRINTF * pcprintf, const DWORD_PTR dwOffset = 0 ) const;
  342. VOID Scan( CPRINTF * pcprintf, VOID * pv ) const { m_bt.Scan( pcprintf, pv ); }
  343. #endif
  344. public:
  345. // bucket used for containing index entries that have approximately
  346. // the same key
  347. class CBucket
  348. {
  349. public:
  350. // bucket ID
  351. typedef unsigned long ID;
  352. public:
  353. CBucket() {}
  354. ~CBucket() {}
  355. CBucket& operator=( const CBucket& bucket )
  356. {
  357. m_id = bucket.m_id;
  358. m_cPin = bucket.m_cPin;
  359. m_il = bucket.m_il;
  360. return *this;
  361. }
  362. public:
  363. ID m_id;
  364. unsigned long m_cPin;
  365. CInvasiveList< CEntry, CInvasiveContext::OffsetOfILE > m_il;
  366. };
  367. // table that contains our buckets
  368. typedef CDynamicHashTable< CBucket::ID, CBucket > CBucketTable;
  369. public:
  370. // API Lock Context
  371. class CLock
  372. {
  373. public:
  374. CLock() {}
  375. ~CLock() {}
  376. private:
  377. friend class CApproximateIndex< CKey, CEntry, OffsetOfIC >;
  378. typename CBucketTable::CLock m_lock;
  379. CBucket m_bucket;
  380. CEntry* m_pentryPrev;
  381. CEntry* m_pentry;
  382. CEntry* m_pentryNext;
  383. };
  384. private:
  385. CBucket::ID _IdFromKeyPtr( const CKey& key, CEntry* const pentry ) const;
  386. CBucket::ID _DeltaId( const CBucket::ID id, const long did ) const;
  387. long _SubId( const CBucket::ID id1, const CBucket::ID id2 ) const;
  388. long _CmpId( const CBucket::ID id1, const CBucket::ID id2 ) const;
  389. CInvasiveContext* _PicFromPentry( CEntry* const pentry ) const;
  390. BOOL _FExpandIdRange( const CBucket::ID idNew );
  391. ERR _ErrInsertBucket( CLock* const plock );
  392. ERR _ErrInsertEntry( CLock* const plock, CEntry* const pentry );
  393. ERR _ErrMoveNext( CLock* const plock );
  394. ERR _ErrMovePrev( CLock* const plock );
  395. private:
  396. // never updated
  397. long m_shfKeyPrecision;
  398. long m_shfKeyUncertainty;
  399. long m_shfBucketHash;
  400. long m_shfFillMSB;
  401. CBucket::ID m_maskBucketKey;
  402. CBucket::ID m_maskBucketPtr;
  403. CBucket::ID m_maskBucketID;
  404. long m_didRangeMost;
  405. //BYTE m_rgbReserved1[ 0 ];
  406. // seldom updated
  407. CCriticalSection m_critUpdateIdRange;
  408. long m_cidRange;
  409. CBucket::ID m_idRangeFirst;
  410. CBucket::ID m_idRangeLast;
  411. BYTE m_rgbReserved2[ 16 ];
  412. // commonly updated
  413. CBucketTable m_bt;
  414. //BYTE m_rgbReserved3[ 0 ];
  415. };
  416. // ctor
  417. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  418. inline CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  419. CApproximateIndex( const int Rank )
  420. : m_critUpdateIdRange( CLockBasicInfo( CSyncBasicInfo( "CApproximateIndex::m_critUpdateIdRange" ), Rank - 1, 0 ) ),
  421. m_bt( Rank )
  422. {
  423. }
  424. // dtor
  425. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  426. inline CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  427. ~CApproximateIndex()
  428. {
  429. }
  430. // initializes the approximate index using the given parameters. if the index
  431. // cannot be initialized, errOutOfMemory is returned
  432. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  433. inline __TYPENAME CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  434. ErrInit( const CKey dkeyPrecision,
  435. const CKey dkeyUncertainty,
  436. const double dblSpeedSizeTradeoff )
  437. {
  438. // validate all parameters
  439. if ( dkeyPrecision <= dkeyUncertainty ||
  440. dkeyUncertainty < CKey( 0 ) ||
  441. dblSpeedSizeTradeoff < 0.0 || dblSpeedSizeTradeoff > 1.0 )
  442. {
  443. return errInvalidParameter;
  444. }
  445. // init our parameters
  446. const CBucket::ID cbucketHashMin = CBucket::ID( ( 1.0 - dblSpeedSizeTradeoff ) * OSSyncGetProcessorCount() );
  447. CKey maskKey;
  448. for ( m_shfKeyPrecision = 0, maskKey = 0;
  449. dkeyPrecision > CKey( 1 ) << m_shfKeyPrecision && m_shfKeyPrecision < sizeof( CKey ) * 8;
  450. maskKey |= CKey( 1 ) << m_shfKeyPrecision++ )
  451. {
  452. }
  453. for ( m_shfKeyUncertainty = 0;
  454. dkeyUncertainty > CKey( 1 ) << m_shfKeyUncertainty && m_shfKeyUncertainty < sizeof( CKey ) * 8;
  455. m_shfKeyUncertainty++ )
  456. {
  457. }
  458. for ( m_shfBucketHash = 0, m_maskBucketPtr = 0;
  459. cbucketHashMin > CBucket::ID( 1 ) << m_shfBucketHash && m_shfBucketHash < sizeof( CBucket::ID ) * 8;
  460. m_maskBucketPtr |= CBucket::ID( 1 ) << m_shfBucketHash++ )
  461. {
  462. }
  463. m_maskBucketKey = CBucket::ID( maskKey >> m_shfKeyUncertainty );
  464. m_shfFillMSB = sizeof( CBucket::ID ) * 8 - m_shfKeyPrecision + m_shfKeyUncertainty - m_shfBucketHash;
  465. m_shfFillMSB = max( m_shfFillMSB, 0 );
  466. m_maskBucketID = ( ~CBucket::ID( 0 ) ) >> m_shfFillMSB;
  467. // if our parameters leave us with too much or too little precision for
  468. // our bucket IDs, fail. "too much" precision would allow our bucket IDs
  469. // to span more than half the precision of our bucket ID and cause our
  470. // wrap-around-aware comparisons to fail. "too little" precision would
  471. // give us too few bucket IDs to allow us to hash efficiently
  472. //
  473. // NOTE: we check for hash efficiency in the worst case so that we don't
  474. // suddenly return errInvalidParameter on some new monster machine
  475. const CBucket::ID cbucketHashMax = CBucket::ID( 1.0 * OSSyncGetProcessorCountMax() );
  476. for ( long shfBucketHashMax = 0;
  477. cbucketHashMax > CBucket::ID( 1 ) << shfBucketHashMax && shfBucketHashMax < sizeof( CBucket::ID ) * 8;
  478. shfBucketHashMax++ )
  479. {
  480. }
  481. long shfFillMSBMin;
  482. shfFillMSBMin = sizeof( CBucket::ID ) * 8 - m_shfKeyPrecision + m_shfKeyUncertainty - shfBucketHashMax;
  483. shfFillMSBMin = max( shfFillMSBMin, 0 );
  484. if ( shfFillMSBMin < 0 ||
  485. shfFillMSBMin > sizeof( CBucket::ID ) * 8 - shfBucketHashMax )
  486. {
  487. return errInvalidParameter;
  488. }
  489. // limit the ID range to within half the precision of the bucket ID
  490. m_didRangeMost = m_maskBucketID >> 1;
  491. // init our bucket ID range to be empty
  492. m_cidRange = 0;
  493. m_idRangeFirst = 0;
  494. m_idRangeLast = 0;
  495. // initialize the bucket table
  496. if ( m_bt.ErrInit( 5.0, 1.0 ) != errSuccess )
  497. {
  498. Term();
  499. return errOutOfMemory;
  500. }
  501. return errSuccess;
  502. }
  503. // terminates the approximate index. this function can be called even if the
  504. // index has never been initialized or is only partially initialized
  505. //
  506. // NOTE: any data stored in the index at this time will be lost!
  507. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  508. inline void CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  509. Term()
  510. {
  511. // terminate the bucket table
  512. m_bt.Term();
  513. }
  514. // acquires a lock on the specified key and entry pointer and returns the lock
  515. // in the provided lock context
  516. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  517. inline void CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  518. LockKeyPtr( const CKey& key, CEntry* const pentry, CLock* const plock )
  519. {
  520. // compute the bucket ID for this key and entry pointer
  521. plock->m_bucket.m_id = _IdFromKeyPtr( key, pentry );
  522. // write lock this bucket ID in the bucket table
  523. m_bt.WriteLockKey( plock->m_bucket.m_id, &plock->m_lock );
  524. // fetch this bucket from the bucket table if it exists. if it doesn't
  525. // exist, the bucket will start out empty and have the above bucket ID
  526. plock->m_bucket.m_cPin = 0;
  527. plock->m_bucket.m_il.Empty();
  528. (void)m_bt.ErrRetrieveEntry( &plock->m_lock, &plock->m_bucket );
  529. // the entry is in this bucket
  530. if ( plock->m_bucket.m_il.FMember( pentry ) )
  531. {
  532. // set our currency to be on this entry in the bucket
  533. plock->m_pentryPrev = NULL;
  534. plock->m_pentry = pentry;
  535. plock->m_pentryNext = NULL;
  536. }
  537. // the entry is not in this bucket
  538. else
  539. {
  540. // set our currency to be before the first entry in this bucket
  541. plock->m_pentryPrev = NULL;
  542. plock->m_pentry = NULL;
  543. plock->m_pentryNext = plock->m_bucket.m_il.PrevMost();
  544. }
  545. // if this bucket isn't pinned, it had better be represented by the valid
  546. // bucket ID range of the index
  547. COLLAssert( !plock->m_bucket.m_cPin ||
  548. ( _CmpId( plock->m_bucket.m_id, m_idRangeFirst ) >= 0 &&
  549. _CmpId( plock->m_bucket.m_id, m_idRangeLast ) <= 0 ) );
  550. }
  551. // releases the lock in the specified lock context
  552. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  553. inline void CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  554. UnlockKeyPtr( CLock* const plock )
  555. {
  556. // if this bucket isn't pinned, it had better be represented by the valid
  557. // bucket ID range of the index
  558. COLLAssert( !plock->m_bucket.m_cPin ||
  559. ( _CmpId( plock->m_bucket.m_id, m_idRangeFirst ) >= 0 &&
  560. _CmpId( plock->m_bucket.m_id, m_idRangeLast ) <= 0 ) );
  561. // write unlock this bucket ID in the bucket table
  562. m_bt.WriteUnlockKey( &plock->m_lock );
  563. }
  564. // compares two keys as they would be seen relative to each other by the
  565. // approximate index
  566. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  567. inline long CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  568. CmpKey( const CKey& key1, const CKey& key2 ) const
  569. {
  570. return _CmpId( _IdFromKeyPtr( key1, NULL ), _IdFromKeyPtr( key2, NULL ) );
  571. }
  572. // returns the first key in the current key range. this key is guaranteed to
  573. // be at least as small as the key of any record currently in the index given
  574. // the precision and uncertainty of the index
  575. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  576. inline CKey CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  577. KeyRangeFirst() const
  578. {
  579. return CKey( m_idRangeFirst >> m_shfBucketHash ) << m_shfKeyUncertainty;
  580. }
  581. // returns the last key in the current key range. this key is guaranteed to
  582. // be at least as large as the key of any record currently in the index given
  583. // the precision and uncertainty of the index
  584. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  585. inline CKey CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  586. KeyRangeLast() const
  587. {
  588. return CKey( m_idRangeLast >> m_shfBucketHash ) << m_shfKeyUncertainty;
  589. }
  590. // returns the smallest key that could be successfully inserted into the index
  591. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  592. inline CKey CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  593. KeyInsertLeast() const
  594. {
  595. const CBucket::ID cBucketHash = 1 << m_shfBucketHash;
  596. CBucket::ID idFirstLeast = m_idRangeLast - m_didRangeMost;
  597. idFirstLeast = idFirstLeast + ( cBucketHash - idFirstLeast % cBucketHash ) % cBucketHash;
  598. return CKey( idFirstLeast >> m_shfBucketHash ) << m_shfKeyUncertainty;
  599. }
  600. // returns the largest key that could be successfully inserted into the index
  601. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  602. inline CKey CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  603. KeyInsertMost() const
  604. {
  605. const CBucket::ID cBucketHash = 1 << m_shfBucketHash;
  606. CBucket::ID idLastMost = m_idRangeFirst + m_didRangeMost;
  607. idLastMost = idLastMost - ( idLastMost + 1 ) % cBucketHash;
  608. return CKey( idLastMost >> m_shfBucketHash ) << m_shfKeyUncertainty;
  609. }
  610. // retrieves the entry corresponding to the key and entry pointer locked by the
  611. // specified lock context. if there is no entry for this key, errEntryNotFound
  612. // will be returned
  613. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  614. inline __TYPENAME CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  615. ErrRetrieveEntry( CLock* const plock, CEntry** const ppentry ) const
  616. {
  617. // return the current entry. if the current entry is NULL, then there is
  618. // no current entry
  619. *ppentry = plock->m_pentry;
  620. return *ppentry ? errSuccess : errEntryNotFound;
  621. }
  622. // inserts a new entry corresponding to the key and entry pointer locked by the
  623. // specified lock context. fNextMost biases the position the entry will take
  624. // when inserted in the index. if the new entry cannot be inserted,
  625. // errOutOfMemory will be returned. if inserting the new entry will cause the
  626. // key space to become too large, errKeyRangeExceeded will be returned
  627. //
  628. // NOTE: it is illegal to attempt to insert an entry into the index that is
  629. // already in the index
  630. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  631. inline __TYPENAME CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  632. ErrInsertEntry( CLock* const plock, CEntry* const pentry, const BOOL fNextMost )
  633. {
  634. CBucketTable::ERR err;
  635. // this entry had better not already be in the index
  636. COLLAssert( !plock->m_bucket.m_il.FMember( pentry ) );
  637. // pin the bucket on behalf of the entry to insert
  638. plock->m_bucket.m_cPin++;
  639. // insert this entry at the selected end of the current bucket
  640. if ( fNextMost )
  641. {
  642. plock->m_bucket.m_il.InsertAsNextMost( pentry );
  643. }
  644. else
  645. {
  646. plock->m_bucket.m_il.InsertAsPrevMost( pentry );
  647. }
  648. // try to update this bucket in the bucket table
  649. if ( ( err = m_bt.ErrReplaceEntry( &plock->m_lock, plock->m_bucket ) ) != CBucketTable::errSuccess )
  650. {
  651. COLLAssert( err == CBucketTable::errNoCurrentEntry );
  652. // the bucket does not yet exist, so try to insert it in the bucket table
  653. return _ErrInsertEntry( plock, pentry );
  654. }
  655. // we succeeded in updating the bucket
  656. else
  657. {
  658. // set the current entry to the newly inserted entry
  659. plock->m_pentryPrev = NULL;
  660. plock->m_pentry = pentry;
  661. plock->m_pentryNext = NULL;
  662. return errSuccess;
  663. }
  664. }
  665. // deletes the entry corresponding to the key and entry pointer locked by the
  666. // specified lock context. if there is no entry for this key, errNoCurrentEntry
  667. // will be returned
  668. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  669. inline __TYPENAME CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  670. ErrDeleteEntry( CLock* const plock )
  671. {
  672. // there is a current entry
  673. if ( plock->m_pentry )
  674. {
  675. // save the current entry's prev and next pointers so that we can
  676. // recover our currency when it is deleted
  677. plock->m_pentryPrev = plock->m_bucket.m_il.Prev( plock->m_pentry );
  678. plock->m_pentryNext = plock->m_bucket.m_il.Next( plock->m_pentry );
  679. // delete the current entry from this bucket
  680. plock->m_bucket.m_il.Remove( plock->m_pentry );
  681. // unpin the bucket on behalf of this entry
  682. plock->m_bucket.m_cPin--;
  683. // update the bucket in the bucket table. it is OK if the bucket is
  684. // empty because empty buckets are deleted in _ErrMoveNext/_ErrMovePrev
  685. const CBucketTable::ERR err = m_bt.ErrReplaceEntry( &plock->m_lock, plock->m_bucket );
  686. COLLAssert( err == CBucketTable::errSuccess );
  687. // set our currency to no current entry
  688. plock->m_pentry = NULL;
  689. return errSuccess;
  690. }
  691. // there is no current entry
  692. else
  693. {
  694. // return no current entry
  695. return errNoCurrentEntry;
  696. }
  697. }
  698. // reserves room to insert a new entry corresponding to the key and entry
  699. // pointer locked by the specified lock context. if room for the new entry
  700. // cannot be reserved, errOutOfMemory will be returned. if reserving the new
  701. // entry will cause the key space to become too large, errKeyRangeExceeded
  702. // will be returned
  703. //
  704. // NOTE: once room is reserved, it must be unreserved via UnreserveEntry()
  705. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  706. inline __TYPENAME CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  707. ErrReserveEntry( CLock* const plock )
  708. {
  709. // pin the locked bucket
  710. plock->m_bucket.m_cPin++;
  711. // we failed to update the pin count on the bucket in the index because the
  712. // bucket doesn't exist
  713. CBucketTable::ERR errBT;
  714. if ( ( errBT = m_bt.ErrReplaceEntry( &plock->m_lock, plock->m_bucket ) ) != CBucketTable::errSuccess )
  715. {
  716. COLLAssert( errBT == CBucketTable::errNoCurrentEntry );
  717. // insert this bucket in the bucket table
  718. ERR err;
  719. if ( ( err = _ErrInsertBucket( plock ) ) != errSuccess )
  720. {
  721. COLLAssert( err == errOutOfMemory || err == errKeyRangeExceeded );
  722. // we cannot insert the bucket so unpin the locked bucket and fail
  723. // the reservation
  724. plock->m_bucket.m_cPin--;
  725. return err;
  726. }
  727. }
  728. return errSuccess;
  729. }
  730. // removes a reservation made with ErrReserveEntry()
  731. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  732. inline void CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  733. UnreserveEntry( CLock* const plock )
  734. {
  735. // unpin the locked bucket
  736. plock->m_bucket.m_cPin--;
  737. // update the pin count on the bucket in the index. this cannot fail
  738. // because we know the bucket exists because it is pinned
  739. CBucketTable::ERR errBT = m_bt.ErrReplaceEntry( &plock->m_lock, plock->m_bucket );
  740. COLLAssert( errBT == CBucketTable::errSuccess );
  741. }
  742. // sets up the specified lock context in preparation for scanning all entries
  743. // in the index by ascending key value, give or take the key uncertainty
  744. //
  745. // NOTE: this function will acquire a lock that must eventually be released
  746. // via UnlockKeyPtr()
  747. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  748. inline void CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  749. MoveBeforeFirst( CLock* const plock )
  750. {
  751. // we will start scanning at the first bucket ID believed to be present in
  752. // the index (it could have been emptied by now)
  753. plock->m_bucket.m_id = m_idRangeFirst;
  754. // write lock this bucket ID in the bucket table
  755. m_bt.WriteLockKey( plock->m_bucket.m_id, &plock->m_lock );
  756. // fetch this bucket from the bucket table if it exists. if it doesn't
  757. // exist, the bucket will start out empty and have the above bucket ID
  758. plock->m_bucket.m_cPin = 0;
  759. plock->m_bucket.m_il.Empty();
  760. (void)m_bt.ErrRetrieveEntry( &plock->m_lock, &plock->m_bucket );
  761. // set our currency to be before the first entry in this bucket
  762. plock->m_pentryPrev = NULL;
  763. plock->m_pentry = NULL;
  764. plock->m_pentryNext = plock->m_bucket.m_il.PrevMost();
  765. }
  766. // moves the specified lock context to the next key and entry pointer in the
  767. // index by ascending key value, give or take the key uncertainty. if the end
  768. // of the index is reached, errNoCurrentEntry is returned
  769. //
  770. // NOTE: this function will acquire a lock that must eventually be released
  771. // via UnlockKeyPtr()
  772. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  773. inline __TYPENAME CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  774. ErrMoveNext( CLock* const plock )
  775. {
  776. // move to the next entry in this bucket
  777. plock->m_pentryPrev = NULL;
  778. plock->m_pentry = plock->m_pentry ?
  779. plock->m_bucket.m_il.Next( plock->m_pentry ) :
  780. plock->m_pentryNext;
  781. plock->m_pentryNext = NULL;
  782. // we still have no current entry
  783. if ( !plock->m_pentry )
  784. {
  785. // possibly advance to the next bucket
  786. return _ErrMoveNext( plock );
  787. }
  788. // we now have a current entry
  789. else
  790. {
  791. // we're done
  792. return errSuccess;
  793. }
  794. }
  795. // moves the specified lock context to the next key and entry pointer in the
  796. // index by descending key value, give or take the key uncertainty. if the
  797. // start of the index is reached, errNoCurrentEntry is returned
  798. //
  799. // NOTE: this function will acquire a lock that must eventually be released
  800. // via UnlockKeyPtr()
  801. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  802. inline __TYPENAME CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  803. ErrMovePrev( CLock* const plock )
  804. {
  805. // move to the prev entry in this bucket
  806. plock->m_pentryNext = NULL;
  807. plock->m_pentry = plock->m_pentry ?
  808. plock->m_bucket.m_il.Prev( plock->m_pentry ) :
  809. plock->m_pentryPrev;
  810. plock->m_pentryPrev = NULL;
  811. // we still have no current entry
  812. if ( !plock->m_pentry )
  813. {
  814. // possibly advance to the prev bucket
  815. return _ErrMovePrev( plock );
  816. }
  817. // we now have a current entry
  818. else
  819. {
  820. // we're done
  821. return errSuccess;
  822. }
  823. }
  824. // sets up the specified lock context in preparation for scanning all entries
  825. // in the index by descending key value, give or take the key uncertainty
  826. //
  827. // NOTE: this function will acquire a lock that must eventually be released
  828. // via UnlockKeyPtr()
  829. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  830. inline void CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  831. MoveAfterLast( CLock* const plock )
  832. {
  833. // we will start scanning at the last bucket ID believed to be present in
  834. // the index (it could have been emptied by now)
  835. plock->m_bucket.m_id = m_idRangeLast;
  836. // write lock this bucket ID in the bucket table
  837. m_bt.WriteLockKey( plock->m_bucket.m_id, &plock->m_lock );
  838. // fetch this bucket from the bucket table if it exists. if it doesn't
  839. // exist, the bucket will start out empty and have the above bucket ID
  840. plock->m_bucket.m_cPin = 0;
  841. plock->m_bucket.m_il.Empty();
  842. (void)m_bt.ErrRetrieveEntry( &plock->m_lock, &plock->m_bucket );
  843. // set our currency to be after the last entry in this bucket
  844. plock->m_pentryPrev = plock->m_bucket.m_il.NextMost();
  845. plock->m_pentry = NULL;
  846. plock->m_pentryNext = NULL;
  847. }
  848. // sets up the specified lock context in preparation for scanning all entries
  849. // greater than or approximately equal to the specified key and entry pointer
  850. // in the index by ascending key value, give or take the key uncertainty
  851. //
  852. // NOTE: this function will acquire a lock that must eventually be released
  853. // via UnlockKeyPtr()
  854. //
  855. // NOTE: even though this function may land between two valid entries in
  856. // the index, the currency will not be on one of those entries until
  857. // ErrMoveNext() or ErrMovePrev() has been called
  858. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  859. inline void CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  860. MoveBeforeKeyPtr( const CKey& key, CEntry* const pentry, CLock* const plock )
  861. {
  862. // we will start scanning at the bucket ID formed from the given key and
  863. // entry pointer
  864. plock->m_bucket.m_id = _IdFromKeyPtr( key, pentry );
  865. // write lock this bucket ID in the bucket table
  866. m_bt.WriteLockKey( plock->m_bucket.m_id, &plock->m_lock );
  867. // fetch this bucket from the bucket table if it exists. if it doesn't
  868. // exist, the bucket will start out empty and have the above bucket ID
  869. plock->m_bucket.m_cPin = 0;
  870. plock->m_bucket.m_il.Empty();
  871. (void)m_bt.ErrRetrieveEntry( &plock->m_lock, &plock->m_bucket );
  872. // set our currency to be before the first entry in this bucket
  873. plock->m_pentryPrev = NULL;
  874. plock->m_pentry = NULL;
  875. plock->m_pentryNext = plock->m_bucket.m_il.PrevMost();
  876. }
  877. // sets up the specified lock context in preparation for scanning all entries
  878. // less than or approximately equal to the specified key and entry pointer
  879. // in the index by descending key value, give or take the key uncertainty
  880. //
  881. // NOTE: this function will acquire a lock that must eventually be released
  882. // via UnlockKeyPtr()
  883. //
  884. // NOTE: even though this function may land between two valid entries in
  885. // the index, the currency will not be on one of those entries until
  886. // ErrMoveNext() or ErrMovePrev() has been called
  887. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  888. inline void CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  889. MoveAfterKeyPtr( const CKey& key, CEntry* const pentry, CLock* const plock )
  890. {
  891. // we will start scanning at the bucket ID formed from the given key and
  892. // entry pointer
  893. plock->m_bucket.m_id = _IdFromKeyPtr( key, pentry );
  894. // write lock this bucket ID in the bucket table
  895. m_bt.WriteLockKey( plock->m_bucket.m_id, &plock->m_lock );
  896. // fetch this bucket from the bucket table if it exists. if it doesn't
  897. // exist, the bucket will start out empty and have the above bucket ID
  898. plock->m_bucket.m_cPin = 0;
  899. plock->m_bucket.m_il.Empty();
  900. (void)m_bt.ErrRetrieveEntry( &plock->m_lock, &plock->m_bucket );
  901. // set our currency to be after the last entry in this bucket
  902. plock->m_pentryPrev = plock->m_bucket.m_il.NextMost();
  903. plock->m_pentry = NULL;
  904. plock->m_pentryNext = NULL;
  905. }
  906. // transforms the given key and entry pointer into a bucket ID
  907. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  908. inline __TYPENAME CApproximateIndex< CKey, CEntry, OffsetOfIC >::CBucket::ID CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  909. _IdFromKeyPtr( const CKey& key, CEntry* const pentry ) const
  910. {
  911. // we compute the bucket ID such that each uncertainty range is split into
  912. // several buckets, each of which are indexed by the pointer. we do this
  913. // to provide maximum concurrency while accessing any particular range of
  914. // keys. the reason we use the pointer in the calculation is that we want
  915. // to minimize the number of times the user has to update the position of
  916. // an entry due to a key change yet we need some property of the entry
  917. // over which we can reproducibly hash
  918. const CBucket::ID iBucketKey = CBucket::ID( key >> m_shfKeyUncertainty );
  919. const CBucket::ID iBucketPtr = CBucket::ID( DWORD_PTR( pentry ) / sizeof( CEntry ) );
  920. return ( ( iBucketKey & m_maskBucketKey ) << m_shfBucketHash ) + ( iBucketPtr & m_maskBucketPtr );
  921. }
  922. // performs a wrap-around insensitive delta of a bucket ID by an offset
  923. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  924. inline __TYPENAME CApproximateIndex< CKey, CEntry, OffsetOfIC >::CBucket::ID CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  925. _DeltaId( const CBucket::ID id, const long did ) const
  926. {
  927. return ( id + CBucket::ID( did ) ) & m_maskBucketID;
  928. }
  929. // performs a wrap-around insensitive subtraction of two bucket IDs
  930. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  931. inline long CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  932. _SubId( const CBucket::ID id1, const CBucket::ID id2 ) const
  933. {
  934. // munge bucket IDs to fill the Most Significant Bit of a long so that we
  935. // can make a wrap-around aware subtraction
  936. const long lid1 = id1 << m_shfFillMSB;
  937. const long lid2 = id2 << m_shfFillMSB;
  938. // munge the result back into the same scale as the bucket IDs
  939. return CBucket::ID( ( lid1 - lid2 ) >> m_shfFillMSB );
  940. }
  941. // performs a wrap-around insensitive comparison of two bucket IDs
  942. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  943. inline long CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  944. _CmpId( const CBucket::ID id1, const CBucket::ID id2 ) const
  945. {
  946. // munge bucket IDs to fill the Most Significant Bit of a long so that we
  947. // can make a wrap-around aware comparison
  948. const long lid1 = id1 << m_shfFillMSB;
  949. const long lid2 = id2 << m_shfFillMSB;
  950. return lid1 - lid2;
  951. }
  952. // converts a pointer to an entry to a pointer to the invasive context
  953. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  954. inline __TYPENAME CApproximateIndex< CKey, CEntry, OffsetOfIC >::CInvasiveContext* CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  955. _PicFromPentry( CEntry* const pentry ) const
  956. {
  957. return (CInvasiveContext*)( (BYTE*)pentry + OffsetOfIC() );
  958. }
  959. // tries to expand the bucket ID range by adding the new bucket ID. if this
  960. // cannot be done without violating the range constraints, fFalse will be
  961. // returned
  962. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  963. inline BOOL CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  964. _FExpandIdRange( const CBucket::ID idNew )
  965. {
  966. // fetch the current ID range
  967. const long cidRange = m_cidRange;
  968. const CBucket::ID idFirst = m_idRangeFirst;
  969. const CBucket::ID idLast = m_idRangeLast;
  970. const long didRange = _SubId( idLast, idFirst );
  971. COLLAssert( didRange >= 0 );
  972. COLLAssert( didRange <= m_didRangeMost );
  973. COLLAssert( cidRange >= 0 );
  974. COLLAssert( cidRange <= m_didRangeMost + 1 );
  975. // if there are no entries in the ID range then simply set the ID range to
  976. // exactly contain this new bucket ID
  977. if ( !cidRange )
  978. {
  979. m_cidRange = 1;
  980. m_idRangeFirst = idNew;
  981. m_idRangeLast = idNew;
  982. return fTrue;
  983. }
  984. // compute the valid range for the new first ID and new last ID. these
  985. // points and the above points form four ranges in a circular number
  986. // line containing all possible bucket IDs:
  987. //
  988. // ( idFirstMic, idFirst ) Possible extension of the ID range
  989. // [ idFirst, idLast ] The current ID range
  990. // ( idLast, idLastMax ) Possible extension of the ID range
  991. // [ idLastMax, idFirstMic ] Cannot be part of the ID range
  992. //
  993. // these ranges will never overlap due to the restriction that the
  994. // ID range cannot meet or exceed half the number of bucket IDs
  995. //
  996. // NOTE: due to a quirk in 2's complement arithmetic where the 2's
  997. // complement negative of the smallest negative number is itself, the
  998. // inclusive range tests fail when idFirst == idLast and idNew ==
  999. // idFirstMic == idLastMax or when idFirstMic == idLastMax and idnew ==
  1000. // idFirst == idLast. we have added special logic to handle these
  1001. // cases correctly
  1002. const CBucket::ID idFirstMic = _DeltaId( idFirst, -( m_didRangeMost - didRange + 1 ) );
  1003. const CBucket::ID idLastMax = _DeltaId( idLast, m_didRangeMost - didRange + 1 );
  1004. // if the new bucket ID is already part of this ID range, no change
  1005. // is needed
  1006. if ( _CmpId( idFirstMic, idNew ) != 0 && _CmpId( idLastMax, idNew ) != 0 &&
  1007. _CmpId( idFirst, idNew ) <= 0 && _CmpId( idNew, idLast ) <= 0 )
  1008. {
  1009. m_cidRange = cidRange + 1;
  1010. return fTrue;
  1011. }
  1012. // if the new bucket ID cannot be a part of this ID range, fail the
  1013. // expansion
  1014. if ( _CmpId( idFirst, idNew ) != 0 && _CmpId( idLast, idNew ) != 0 &&
  1015. _CmpId( idLastMax, idNew ) <= 0 && _CmpId( idNew, idFirstMic ) <= 0 )
  1016. {
  1017. return fFalse;
  1018. }
  1019. // compute the new ID range including this new bucket ID
  1020. CBucket::ID idFirstNew = idFirst;
  1021. CBucket::ID idLastNew = idLast;
  1022. if ( _CmpId( idFirstMic, idNew ) < 0 && _CmpId( idNew, idFirst ) < 0 )
  1023. {
  1024. idFirstNew = idNew;
  1025. }
  1026. else
  1027. {
  1028. COLLAssert( _CmpId( idLast, idNew ) < 0 && _CmpId( idNew, idLastMax ) < 0 );
  1029. idLastNew = idNew;
  1030. }
  1031. // the new ID range should be larger than the old ID range and should
  1032. // include the new bucket ID
  1033. COLLAssert( _CmpId( idFirstNew, idFirst ) <= 0 );
  1034. COLLAssert( _CmpId( idLast, idLastNew ) <= 0 );
  1035. COLLAssert( _SubId( idLastNew, idFirstNew ) > 0 );
  1036. COLLAssert( _SubId( idLastNew, idFirstNew ) <= m_didRangeMost );
  1037. COLLAssert( _CmpId( idFirstNew, idNew ) <= 0 );
  1038. COLLAssert( _CmpId( idNew, idLastNew ) <= 0 );
  1039. // update the key range to include the new bucket ID
  1040. m_cidRange = cidRange + 1;
  1041. m_idRangeFirst = idFirstNew;
  1042. m_idRangeLast = idLastNew;
  1043. return fTrue;
  1044. }
  1045. // inserts a new bucket in the bucket table
  1046. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  1047. inline __TYPENAME CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  1048. _ErrInsertBucket( CLock* const plock )
  1049. {
  1050. // try to update the bucket ID range and subrange of the index to include
  1051. // this new bucket ID
  1052. m_critUpdateIdRange.Enter();
  1053. const BOOL fRangeUpdated = _FExpandIdRange( plock->m_bucket.m_id );
  1054. m_critUpdateIdRange.Leave();
  1055. // if the update failed, fail the bucket insertion
  1056. if ( !fRangeUpdated )
  1057. {
  1058. return errKeyRangeExceeded;
  1059. }
  1060. // the bucket does not yet exist, so try to insert it in the bucket table
  1061. CBucketTable::ERR err;
  1062. if ( ( err = m_bt.ErrInsertEntry( &plock->m_lock, plock->m_bucket ) ) != CBucketTable::errSuccess )
  1063. {
  1064. COLLAssert( err == CBucketTable::errOutOfMemory );
  1065. // we cannot do the insert so fail
  1066. m_critUpdateIdRange.Enter();
  1067. m_cidRange--;
  1068. m_critUpdateIdRange.Leave();
  1069. return errOutOfMemory;
  1070. }
  1071. return errSuccess;
  1072. }
  1073. // performs an entry insertion that must insert a new bucket in the bucket table
  1074. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  1075. inline __TYPENAME CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  1076. _ErrInsertEntry( CLock* const plock, CEntry* const pentry )
  1077. {
  1078. ERR err;
  1079. // insert this bucket in the bucket table
  1080. if ( ( err = _ErrInsertBucket( plock ) ) != errSuccess )
  1081. {
  1082. COLLAssert( err == errOutOfMemory || err == errKeyRangeExceeded );
  1083. // we cannot insert the bucket so undo the list insertion and fail
  1084. plock->m_bucket.m_il.Remove( pentry );
  1085. plock->m_bucket.m_cPin--;
  1086. return err;
  1087. }
  1088. // set the current entry to the newly inserted entry
  1089. plock->m_pentryPrev = NULL;
  1090. plock->m_pentry = pentry;
  1091. plock->m_pentryNext = NULL;
  1092. return errSuccess;
  1093. }
  1094. // performs a move next that possibly goes to the next bucket. we won't go to
  1095. // the next bucket if we are already at the last bucket ID
  1096. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  1097. inline __TYPENAME CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  1098. _ErrMoveNext( CLock* const plock )
  1099. {
  1100. // set our currency to be after the last entry in this bucket
  1101. plock->m_pentryPrev = plock->m_bucket.m_il.NextMost();
  1102. plock->m_pentry = NULL;
  1103. plock->m_pentryNext = NULL;
  1104. // scan forward until we have a current entry or we are at or beyond the
  1105. // last bucket ID
  1106. while ( !plock->m_pentry && _CmpId( plock->m_bucket.m_id, m_idRangeLast ) < 0 )
  1107. {
  1108. // we are currently at the first bucket ID and that bucket isn't pinned
  1109. if ( !plock->m_bucket.m_cPin )
  1110. {
  1111. // delete this empty bucket (if it exists)
  1112. const CBucketTable::ERR err = m_bt.ErrDeleteEntry( &plock->m_lock );
  1113. COLLAssert( err == CBucketTable::errSuccess ||
  1114. err == CBucketTable::errNoCurrentEntry );
  1115. // advance the first bucket ID by one so that subsequent searches
  1116. // do not scan through this empty bucket unnecessarily
  1117. m_critUpdateIdRange.Enter();
  1118. if ( m_idRangeFirst == plock->m_bucket.m_id )
  1119. {
  1120. m_idRangeFirst = _DeltaId( m_idRangeFirst, 1 );
  1121. }
  1122. if ( err == CBucketTable::errSuccess )
  1123. {
  1124. m_cidRange--;
  1125. }
  1126. m_critUpdateIdRange.Leave();
  1127. }
  1128. // unlock the current bucket ID in the bucket table
  1129. m_bt.WriteUnlockKey( &plock->m_lock );
  1130. // this bucket ID may not be in the valid bucket ID range
  1131. if ( _CmpId( m_idRangeFirst, plock->m_bucket.m_id ) > 0 ||
  1132. _CmpId( plock->m_bucket.m_id, m_idRangeLast ) > 0 )
  1133. {
  1134. // we can get the critical section protecting the bucket ID range
  1135. if ( m_critUpdateIdRange.FTryEnter() )
  1136. {
  1137. // this bucket ID is not in the valid bucket ID range
  1138. if ( _CmpId( m_idRangeFirst, plock->m_bucket.m_id ) > 0 ||
  1139. _CmpId( plock->m_bucket.m_id, m_idRangeLast ) > 0 )
  1140. {
  1141. // go to the first valid bucket ID
  1142. plock->m_bucket.m_id = m_idRangeFirst;
  1143. }
  1144. // this bucket ID is in the valid bucket ID range
  1145. else
  1146. {
  1147. // advance to the next bucket ID
  1148. plock->m_bucket.m_id = _DeltaId( plock->m_bucket.m_id, 1 );
  1149. }
  1150. m_critUpdateIdRange.Leave();
  1151. }
  1152. // we cannot get the critical section protecting the bucket ID range
  1153. else
  1154. {
  1155. // advance to the next bucket ID
  1156. plock->m_bucket.m_id = _DeltaId( plock->m_bucket.m_id, 1 );
  1157. }
  1158. }
  1159. // this bucket may be in the valid bucket ID range
  1160. else
  1161. {
  1162. // advance to the next bucket ID
  1163. plock->m_bucket.m_id = _DeltaId( plock->m_bucket.m_id, 1 );
  1164. }
  1165. // write lock this bucket ID in the bucket table
  1166. m_bt.WriteLockKey( plock->m_bucket.m_id, &plock->m_lock );
  1167. // fetch this bucket from the bucket table if it exists. if it doesn't
  1168. // exist, the bucket will start out empty and have the above bucket ID
  1169. plock->m_bucket.m_cPin = 0;
  1170. plock->m_bucket.m_il.Empty();
  1171. (void)m_bt.ErrRetrieveEntry( &plock->m_lock, &plock->m_bucket );
  1172. // set our currency to be the first entry in this bucket
  1173. plock->m_pentryPrev = NULL;
  1174. plock->m_pentry = plock->m_bucket.m_il.PrevMost();
  1175. plock->m_pentryNext = NULL;
  1176. }
  1177. // return the status of our currency
  1178. return plock->m_pentry ? errSuccess : errNoCurrentEntry;
  1179. }
  1180. // performs a move prev that goes possibly to the prev bucket. we won't go to
  1181. // the prev bucket if we are already at the first bucket ID
  1182. template< class CKey, class CEntry, PfnOffsetOf OffsetOfIC >
  1183. inline __TYPENAME CApproximateIndex< CKey, CEntry, OffsetOfIC >::ERR CApproximateIndex< CKey, CEntry, OffsetOfIC >::
  1184. _ErrMovePrev( CLock* const plock )
  1185. {
  1186. // set our currency to be before the first entry in this bucket
  1187. plock->m_pentryPrev = NULL;
  1188. plock->m_pentry = NULL;
  1189. plock->m_pentryNext = plock->m_bucket.m_il.PrevMost();
  1190. // scan backward until we have a current entry or we are at or before the
  1191. // first bucket ID
  1192. while ( !plock->m_pentry && _CmpId( m_idRangeFirst, plock->m_bucket.m_id ) < 0 )
  1193. {
  1194. // we are currently at the last bucket ID and that bucket isn't pinned
  1195. if ( !plock->m_bucket.m_cPin )
  1196. {
  1197. // delete this empty bucket (if it exists)
  1198. const CBucketTable::ERR err = m_bt.ErrDeleteEntry( &plock->m_lock );
  1199. COLLAssert( err == CBucketTable::errSuccess ||
  1200. err == CBucketTable::errNoCurrentEntry );
  1201. // retreat the last bucket ID by one so that subsequent searches
  1202. // do not scan through this empty bucket unnecessarily
  1203. m_critUpdateIdRange.Enter();
  1204. if ( m_idRangeLast == plock->m_bucket.m_id )
  1205. {
  1206. m_idRangeLast = _DeltaId( m_idRangeLast, -1 );
  1207. }
  1208. if ( err == CBucketTable::errSuccess )
  1209. {
  1210. m_cidRange--;
  1211. }
  1212. m_critUpdateIdRange.Leave();
  1213. }
  1214. // unlock the current bucket ID in the bucket table
  1215. m_bt.WriteUnlockKey( &plock->m_lock );
  1216. // this bucket ID may not be in the valid bucket ID range
  1217. if ( _CmpId( m_idRangeFirst, plock->m_bucket.m_id ) > 0 ||
  1218. _CmpId( plock->m_bucket.m_id, m_idRangeLast ) > 0 )
  1219. {
  1220. // we can get the critical section protecting the bucket ID range
  1221. if ( m_critUpdateIdRange.FTryEnter() )
  1222. {
  1223. // this bucket ID is not in the valid bucket ID range
  1224. if ( _CmpId( m_idRangeFirst, plock->m_bucket.m_id ) > 0 ||
  1225. _CmpId( plock->m_bucket.m_id, m_idRangeLast ) > 0 )
  1226. {
  1227. // go to the last valid bucket ID
  1228. plock->m_bucket.m_id = m_idRangeLast;
  1229. }
  1230. // this bucket ID is in the valid bucket ID range
  1231. else
  1232. {
  1233. // retreat to the previous bucket ID
  1234. plock->m_bucket.m_id = _DeltaId( plock->m_bucket.m_id, -1 );
  1235. }
  1236. m_critUpdateIdRange.Leave();
  1237. }
  1238. // we cannot get the critical section protecting the bucket ID range
  1239. else
  1240. {
  1241. // retreat to the previous bucket ID
  1242. plock->m_bucket.m_id = _DeltaId( plock->m_bucket.m_id, -1 );
  1243. }
  1244. }
  1245. // this bucket may be in the valid bucket ID range
  1246. else
  1247. {
  1248. // retreat to the previous bucket ID
  1249. plock->m_bucket.m_id = _DeltaId( plock->m_bucket.m_id, -1 );
  1250. }
  1251. // write lock this bucket ID in the bucket table
  1252. m_bt.WriteLockKey( plock->m_bucket.m_id, &plock->m_lock );
  1253. // fetch this bucket from the bucket table if it exists. if it doesn't
  1254. // exist, the bucket will start out empty and have the above bucket ID
  1255. plock->m_bucket.m_cPin = 0;
  1256. plock->m_bucket.m_il.Empty();
  1257. (void)m_bt.ErrRetrieveEntry( &plock->m_lock, &plock->m_bucket );
  1258. // set our currency to be the last entry in this bucket
  1259. plock->m_pentryPrev = NULL;
  1260. plock->m_pentry = plock->m_bucket.m_il.NextMost();
  1261. plock->m_pentryNext = NULL;
  1262. }
  1263. // return the status of our currency
  1264. return plock->m_pentry ? errSuccess : errNoCurrentEntry;
  1265. }
  1266. #define DECLARE_APPROXIMATE_INDEX( CKey, CEntry, OffsetOfIC, Typedef ) \
  1267. \
  1268. typedef CApproximateIndex< CKey, CEntry, OffsetOfIC > Typedef; \
  1269. \
  1270. inline ULONG_PTR Typedef::CBucketTable::CKeyEntry:: \
  1271. Hash( const CBucket::ID& id ) \
  1272. { \
  1273. return id; \
  1274. } \
  1275. \
  1276. inline ULONG_PTR Typedef::CBucketTable::CKeyEntry:: \
  1277. Hash() const \
  1278. { \
  1279. return m_entry.m_id; \
  1280. } \
  1281. \
  1282. inline BOOL Typedef::CBucketTable::CKeyEntry:: \
  1283. FEntryMatchesKey( const CBucket::ID& id ) const \
  1284. { \
  1285. return m_entry.m_id == id; \
  1286. } \
  1287. \
  1288. inline void Typedef::CBucketTable::CKeyEntry:: \
  1289. SetEntry( const CBucket& bucket ) \
  1290. { \
  1291. m_entry = bucket; \
  1292. } \
  1293. \
  1294. inline void Typedef::CBucketTable::CKeyEntry:: \
  1295. GetEntry( CBucket* const pbucket ) const \
  1296. { \
  1297. *pbucket = m_entry; \
  1298. }
  1299. //////////////////////////////////////////////////////////////////////////////////////////
  1300. // CPool
  1301. //
  1302. // Implements a pool of objects that can be inserted and deleted quickly in arbitrary
  1303. // order.
  1304. //
  1305. // CObject = class representing objects in the pool. each class must contain
  1306. // storage for a CInvasiveContext for embedded pool state
  1307. // OffsetOfIC = inline function returning the offset of the CInvasiveContext
  1308. // contained in the CObject
  1309. template< class CObject, PfnOffsetOf OffsetOfIC >
  1310. class CPool
  1311. {
  1312. public:
  1313. // class containing context needed per CObject
  1314. class CInvasiveContext
  1315. {
  1316. public:
  1317. CInvasiveContext() {}
  1318. ~CInvasiveContext() {}
  1319. static SIZE_T OffsetOfILE() { return OffsetOfIC() + OffsetOf( CInvasiveContext, m_ile ); }
  1320. private:
  1321. typename CInvasiveList< CObject, OffsetOfILE >::CElement m_ile;
  1322. };
  1323. // API Error Codes
  1324. enum ERR
  1325. {
  1326. errSuccess,
  1327. errInvalidParameter,
  1328. errOutOfMemory,
  1329. errObjectNotFound,
  1330. errOutOfObjects,
  1331. errNoCurrentObject,
  1332. };
  1333. // API Lock Context
  1334. class CLock;
  1335. public:
  1336. // ctor / dtor
  1337. CPool();
  1338. ~CPool();
  1339. // API
  1340. ERR ErrInit( const double dblSpeedSizeTradeoff );
  1341. void Term();
  1342. void Insert( CObject* const pobj, const BOOL fMRU = fTrue );
  1343. ERR ErrRemove( CObject** const ppobj, const BOOL fWait = fTrue, const BOOL fMRU = fTrue );
  1344. void BeginPoolScan( CLock* const plock );
  1345. ERR ErrGetNextObject( CLock* const plock, CObject** const ppobj );
  1346. ERR ErrRemoveCurrentObject( CLock* const plock );
  1347. void EndPoolScan( CLock* const plock );
  1348. DWORD Cobject();
  1349. DWORD CWaiter();
  1350. DWORD CRemove();
  1351. DWORD CRemoveWait();
  1352. private:
  1353. // bucket used for containing objects in the pool
  1354. class CBucket
  1355. {
  1356. public:
  1357. CBucket() : m_crit( CLockBasicInfo( CSyncBasicInfo( "CPool::CBucket::m_crit" ), 0, 0 ) ) {}
  1358. ~CBucket() {}
  1359. public:
  1360. CCriticalSection m_crit;
  1361. CInvasiveList< CObject, CInvasiveContext::OffsetOfILE > m_il;
  1362. BYTE m_rgbReserved[20];
  1363. };
  1364. public:
  1365. // API Lock Context
  1366. class CLock
  1367. {
  1368. public:
  1369. CLock() {}
  1370. ~CLock() {}
  1371. private:
  1372. friend class CPool< CObject, OffsetOfIC >;
  1373. CBucket* m_pbucket;
  1374. CObject* m_pobj;
  1375. CObject* m_pobjNext;
  1376. };
  1377. private:
  1378. void _GetNextObject( CLock* const plock );
  1379. static void* _PvMEMIAlign( void* const pv, const size_t cbAlign );
  1380. static void* _PvMEMIUnalign( void* const pv );
  1381. static void* _PvMEMAlloc( const size_t cbSize, const size_t cbAlign = 1 );
  1382. static void _MEMFree( void* const pv );
  1383. private:
  1384. // never updated
  1385. DWORD m_cbucket;
  1386. CBucket* m_rgbucket;
  1387. BYTE m_rgbReserved1[24];
  1388. // commonly updated
  1389. CSemaphore m_semObjectCount;
  1390. DWORD m_cRemove;
  1391. DWORD m_cRemoveWait;
  1392. BYTE m_rgbReserved2[20];
  1393. };
  1394. // ctor
  1395. template< class CObject, PfnOffsetOf OffsetOfIC >
  1396. inline CPool< CObject, OffsetOfIC >::
  1397. CPool()
  1398. : m_semObjectCount( CSyncBasicInfo( "CPool::m_semObjectCount" ) )
  1399. {
  1400. }
  1401. // dtor
  1402. template< class CObject, PfnOffsetOf OffsetOfIC >
  1403. inline CPool< CObject, OffsetOfIC >::
  1404. ~CPool()
  1405. {
  1406. // nop
  1407. }
  1408. // initializes the pool using the given parameters. if the pool cannot be
  1409. // initialized, errOutOfMemory is returned
  1410. template< class CObject, PfnOffsetOf OffsetOfIC >
  1411. inline __TYPENAME CPool< CObject, OffsetOfIC >::ERR CPool< CObject, OffsetOfIC >::
  1412. ErrInit( const double dblSpeedSizeTradeoff )
  1413. {
  1414. // validate all parameters
  1415. if ( dblSpeedSizeTradeoff < 0.0 || dblSpeedSizeTradeoff > 1.0 )
  1416. {
  1417. return errInvalidParameter;
  1418. }
  1419. // allocate our bucket array, one per CPU, on a cache-line boundary
  1420. m_cbucket = OSSyncGetProcessorCount();
  1421. const SIZE_T cbrgbucket = sizeof( CBucket ) * m_cbucket;
  1422. if ( !( m_rgbucket = (CBucket*)_PvMEMAlloc( cbrgbucket, cbCacheLine ) ) )
  1423. {
  1424. return errOutOfMemory;
  1425. }
  1426. // setup our bucket array
  1427. for ( DWORD ibucket = 0; ibucket < m_cbucket; ibucket++ )
  1428. {
  1429. new( m_rgbucket + ibucket ) CBucket;
  1430. }
  1431. // init out stats
  1432. m_cRemove = 0;
  1433. m_cRemoveWait = 0;
  1434. return errSuccess;
  1435. }
  1436. // terminates the pool. this function can be called even if the pool has never
  1437. // been initialized or is only partially initialized
  1438. //
  1439. // NOTE: any data stored in the pool at this time will be lost!
  1440. template< class CObject, PfnOffsetOf OffsetOfIC >
  1441. inline void CPool< CObject, OffsetOfIC >::
  1442. Term()
  1443. {
  1444. // free our bucket array
  1445. if ( m_rgbucket )
  1446. {
  1447. for ( DWORD ibucket = 0; ibucket < m_cbucket; ibucket++ )
  1448. {
  1449. m_rgbucket[ ibucket ].~CBucket();
  1450. }
  1451. _MEMFree( m_rgbucket );
  1452. m_rgbucket = NULL;
  1453. }
  1454. // remove any free counts on our semaphore
  1455. while ( m_semObjectCount.FTryAcquire() )
  1456. {
  1457. }
  1458. }
  1459. // inserts the given object into the pool
  1460. template< class CObject, PfnOffsetOf OffsetOfIC >
  1461. inline void CPool< CObject, OffsetOfIC >::
  1462. Insert( CObject* const pobj, const BOOL fMRU )
  1463. {
  1464. // add the given object to the bucket for this CPU. we use one bucket per
  1465. // CPU to reduce cache sloshing. if we cannot lock the bucket for this CPU,
  1466. // we will try another bucket instead of blocking
  1467. DWORD ibucketBase;
  1468. DWORD ibucket;
  1469. ibucketBase = OSSyncGetCurrentProcessor();
  1470. ibucket = 0;
  1471. do {
  1472. CBucket* const pbucket = m_rgbucket + ( ibucketBase + ibucket++ ) % m_cbucket;
  1473. if ( ibucket < m_cbucket )
  1474. {
  1475. if ( !pbucket->m_crit.FTryEnter() )
  1476. {
  1477. continue;
  1478. }
  1479. }
  1480. else
  1481. {
  1482. pbucket->m_crit.Enter();
  1483. }
  1484. if ( !ibucket && fMRU )
  1485. {
  1486. pbucket->m_il.InsertAsPrevMost( pobj );
  1487. }
  1488. else
  1489. {
  1490. pbucket->m_il.InsertAsNextMost( pobj );
  1491. }
  1492. pbucket->m_crit.Leave();
  1493. break;
  1494. }
  1495. while ( fTrue );
  1496. // increment the object count
  1497. m_semObjectCount.Release();
  1498. }
  1499. // removes an object from the pool, optionally waiting until an object can be
  1500. // removed. if an object can be removed, errSuccess is returned. if an
  1501. // object cannot be immediately removed and waiting is not desired,
  1502. // errOutOfObjects will be returned
  1503. template< class CObject, PfnOffsetOf OffsetOfIC >
  1504. inline __TYPENAME CPool< CObject, OffsetOfIC >::ERR CPool< CObject, OffsetOfIC >::
  1505. ErrRemove( CObject** const ppobj, const BOOL fWait, const BOOL fMRU )
  1506. {
  1507. // reserve an object for removal from the pool by acquiring a count on the
  1508. // object count semaphore. if we get a count, we are allowed to remove an
  1509. // object from the pool. acquire a count in the requested mode, i.e. wait
  1510. // or do not wait for a count
  1511. if ( !m_semObjectCount.FTryAcquire() )
  1512. {
  1513. if ( !fWait )
  1514. {
  1515. return errOutOfObjects;
  1516. }
  1517. else
  1518. {
  1519. m_cRemoveWait++;
  1520. m_semObjectCount.FAcquire( cmsecInfinite );
  1521. }
  1522. }
  1523. // we are now entitled to an object from the pool, so scan all buckets for
  1524. // an object to remove until we find one. start with the bucket for the
  1525. // current CPU to reduce cache sloshing
  1526. DWORD ibucketBase;
  1527. DWORD ibucket;
  1528. ibucketBase = OSSyncGetCurrentProcessor();
  1529. ibucket = 0;
  1530. *ppobj = NULL;
  1531. do {
  1532. CBucket* const pbucket = m_rgbucket + ( ibucketBase + ibucket++ ) % m_cbucket;
  1533. if ( pbucket->m_il.FEmpty() )
  1534. {
  1535. continue;
  1536. }
  1537. if ( ibucket < m_cbucket )
  1538. {
  1539. if ( !pbucket->m_crit.FTryEnter() )
  1540. {
  1541. continue;
  1542. }
  1543. }
  1544. else
  1545. {
  1546. pbucket->m_crit.Enter();
  1547. }
  1548. if ( !pbucket->m_il.FEmpty() )
  1549. {
  1550. if ( !ibucket && fMRU )
  1551. {
  1552. *ppobj = pbucket->m_il.PrevMost();
  1553. }
  1554. else
  1555. {
  1556. *ppobj = pbucket->m_il.NextMost();
  1557. }
  1558. pbucket->m_il.Remove( *ppobj );
  1559. }
  1560. pbucket->m_crit.Leave();
  1561. }
  1562. while ( *ppobj == NULL );
  1563. // return the object
  1564. m_cRemove++;
  1565. return errSuccess;
  1566. }
  1567. // sets up the specified lock context in preparation for scanning all objects
  1568. // in the pool
  1569. //
  1570. // NOTE: this function will acquire a lock that must eventually be released
  1571. // via EndPoolScan()
  1572. template< class CObject, PfnOffsetOf OffsetOfIC >
  1573. inline void CPool< CObject, OffsetOfIC >::
  1574. BeginPoolScan( CLock* const plock )
  1575. {
  1576. // we will start in the first bucket
  1577. plock->m_pbucket = m_rgbucket;
  1578. // lock this bucket
  1579. plock->m_pbucket->m_crit.Enter();
  1580. // set out currency to be before the first object in this bucket
  1581. plock->m_pobj = NULL;
  1582. plock->m_pobjNext = plock->m_pbucket->m_il.PrevMost();
  1583. }
  1584. // retrieves the next object in the pool locked by the specified lock context.
  1585. // if there are no more objects to be scanned, errNoCurrentObject is returned
  1586. template< class CObject, PfnOffsetOf OffsetOfIC >
  1587. inline __TYPENAME CPool< CObject, OffsetOfIC >::ERR CPool< CObject, OffsetOfIC >::
  1588. ErrGetNextObject( CLock* const plock, CObject** const ppobj )
  1589. {
  1590. // move to the next object in this bucket
  1591. plock->m_pobj = plock->m_pobj ?
  1592. plock->m_pbucket->m_il.Next( plock->m_pobj ) :
  1593. plock->m_pobjNext;
  1594. plock->m_pobjNext = NULL;
  1595. // we still have no current object
  1596. if ( !plock->m_pobj )
  1597. {
  1598. // possibly advance to the next bucket
  1599. _GetNextObject( plock );
  1600. }
  1601. // return the current object, if any
  1602. *ppobj = plock->m_pobj;
  1603. return plock->m_pobj ? errSuccess : errNoCurrentObject;
  1604. }
  1605. // removes the current object in the pool locaked by the specified lock context
  1606. // from the pool. if there is no current object, errNoCurrentObject will be
  1607. // returned
  1608. template< class CObject, PfnOffsetOf OffsetOfIC >
  1609. inline __TYPENAME CPool< CObject, OffsetOfIC >::ERR CPool< CObject, OffsetOfIC >::
  1610. ErrRemoveCurrentObject( CLock* const plock )
  1611. {
  1612. // there is a current object and we can remove that object from the pool
  1613. //
  1614. // NOTE: we must get a count from the semaphore to remove an object from
  1615. // the pool
  1616. if ( plock->m_pobj && m_semObjectCount.FTryAcquire() )
  1617. {
  1618. // save the current object's next pointer so that we can recover our
  1619. // currency when it is deleted
  1620. plock->m_pobjNext = plock->m_pbucket->m_il.Next( plock->m_pobj );
  1621. // delete the current object from this bucket
  1622. plock->m_pbucket->m_il.Remove( plock->m_pobj );
  1623. // set our currency to no current object
  1624. plock->m_pobj = NULL;
  1625. return errSuccess;
  1626. }
  1627. // there is no current object
  1628. else
  1629. {
  1630. // return no current object
  1631. return errNoCurrentObject;
  1632. }
  1633. }
  1634. // ends the scan of all objects in the pool associated with the specified lock
  1635. // context and releases all locks held
  1636. template< class CObject, PfnOffsetOf OffsetOfIC >
  1637. inline void CPool< CObject, OffsetOfIC >::
  1638. EndPoolScan( CLock* const plock )
  1639. {
  1640. // unlock the current bucket
  1641. plock->m_pbucket->m_crit.Leave();
  1642. }
  1643. // returns the current count of objects in the pool
  1644. template< class CObject, PfnOffsetOf OffsetOfIC >
  1645. inline DWORD CPool< CObject, OffsetOfIC >::
  1646. Cobject()
  1647. {
  1648. // the number of objects in the pool is equal to the available count on the
  1649. // object count semaphore
  1650. return m_semObjectCount.CAvail();
  1651. }
  1652. // returns the number of waiters for objects in the pool
  1653. template< class CObject, PfnOffsetOf OffsetOfIC >
  1654. inline DWORD CPool< CObject, OffsetOfIC >::
  1655. CWaiter()
  1656. {
  1657. // the number of waiters on the pool is equal to the waiter count on the
  1658. // object count semaphore
  1659. return m_semObjectCount.CWait();
  1660. }
  1661. // returns the number of times on object has been successfully removed from the
  1662. // pool
  1663. template< class CObject, PfnOffsetOf OffsetOfIC >
  1664. inline DWORD CPool< CObject, OffsetOfIC >::
  1665. CRemove()
  1666. {
  1667. return m_cRemove;
  1668. }
  1669. // returns the number of waits that occurred while removing objects from the
  1670. // pool
  1671. template< class CObject, PfnOffsetOf OffsetOfIC >
  1672. inline DWORD CPool< CObject, OffsetOfIC >::
  1673. CRemoveWait()
  1674. {
  1675. return m_cRemoveWait;
  1676. }
  1677. // performs a move next that possibly goes to the next bucket. we won't go to
  1678. // the next bucket if we are already at the last bucket
  1679. template< class CObject, PfnOffsetOf OffsetOfIC >
  1680. inline void CPool< CObject, OffsetOfIC >::
  1681. _GetNextObject( CLock* const plock )
  1682. {
  1683. // set our currency to be after the last object in this bucket
  1684. plock->m_pobj = NULL;
  1685. plock->m_pobjNext = NULL;
  1686. // scan forward until we have a current object or we are at or beyond the
  1687. // last bucket
  1688. while ( !plock->m_pobj && plock->m_pbucket < m_rgbucket + m_cbucket - 1 )
  1689. {
  1690. // unlock the current bucket
  1691. plock->m_pbucket->m_crit.Leave();
  1692. // advance to the next bucket
  1693. plock->m_pbucket++;
  1694. // lock this bucket
  1695. plock->m_pbucket->m_crit.Enter();
  1696. // set our currency to be the first object in this bucket
  1697. plock->m_pobj = plock->m_pbucket->m_il.PrevMost();
  1698. plock->m_pobjNext = NULL;
  1699. }
  1700. }
  1701. // calculate the address of the aligned block and store its offset (for free)
  1702. template< class CObject, PfnOffsetOf OffsetOfIC >
  1703. inline void* CPool< CObject, OffsetOfIC >::
  1704. _PvMEMIAlign( void* const pv, const size_t cbAlign )
  1705. {
  1706. // round up to the nearest cache line
  1707. // NOTE: this formula always forces an offset of at least 1 byte
  1708. const ULONG_PTR ulp = ULONG_PTR( pv );
  1709. const ULONG_PTR ulpAligned = ( ( ulp + cbAlign ) / cbAlign ) * cbAlign;
  1710. const ULONG_PTR ulpOffset = ulpAligned - ulp;
  1711. COLLAssert( ulpOffset > 0 );
  1712. COLLAssert( ulpOffset <= cbAlign );
  1713. COLLAssert( ulpOffset == BYTE( ulpOffset ) ); // must fit into a single BYTE
  1714. // store the offset
  1715. BYTE *const pbAligned = (BYTE*)ulpAligned;
  1716. pbAligned[ -1 ] = BYTE( ulpOffset );
  1717. // return the aligned block
  1718. return (void*)pbAligned;
  1719. }
  1720. // retrieve the offset of the real block being freed
  1721. template< class CObject, PfnOffsetOf OffsetOfIC >
  1722. inline void* CPool< CObject, OffsetOfIC >::
  1723. _PvMEMIUnalign( void* const pv )
  1724. {
  1725. // read the offset of the real block
  1726. BYTE *const pbAligned = (BYTE*)pv;
  1727. const BYTE bOffset = pbAligned[ -1 ];
  1728. COLLAssert( bOffset > 0 );
  1729. // return the real unaligned block
  1730. return (void*)( pbAligned - bOffset );
  1731. }
  1732. template< class CObject, PfnOffsetOf OffsetOfIC >
  1733. inline void* CPool< CObject, OffsetOfIC >::
  1734. _PvMEMAlloc( const size_t cbSize, const size_t cbAlign )
  1735. {
  1736. void* const pv = new BYTE[ cbSize + cbAlign ];
  1737. if ( pv )
  1738. {
  1739. return _PvMEMIAlign( pv, cbAlign );
  1740. }
  1741. return NULL;
  1742. }
  1743. template< class CObject, PfnOffsetOf OffsetOfIC >
  1744. inline void CPool< CObject, OffsetOfIC >::
  1745. _MEMFree( void* const pv )
  1746. {
  1747. if ( pv )
  1748. {
  1749. delete [] _PvMEMIUnalign( pv );
  1750. }
  1751. }
  1752. ////////////////////////////////////////////////////////////////////////////////
  1753. // CArray
  1754. //
  1755. // Implements a dynamically resized array of entries stored for efficient
  1756. // iteration.
  1757. //
  1758. // CEntry = class representing entries stored in the array
  1759. //
  1760. // NOTE: the user must provide CEntry::CEntry() and CEntry::operator=()
  1761. template< class CEntry >
  1762. class CArray
  1763. {
  1764. public:
  1765. // API Error Codes
  1766. enum ERR
  1767. {
  1768. errSuccess,
  1769. errInvalidParameter,
  1770. errOutOfMemory,
  1771. };
  1772. public:
  1773. CArray();
  1774. CArray( const size_t centry, CEntry* const rgentry );
  1775. ~CArray();
  1776. ERR ErrClone( const CArray& array );
  1777. ERR ErrSetSize( const size_t centry );
  1778. ERR ErrSetEntry( const size_t ientry, const CEntry& entry );
  1779. void SetEntry( const CEntry* const pentry, const CEntry& entry );
  1780. size_t Size() const;
  1781. const CEntry* Entry( const size_t ientry ) const;
  1782. private:
  1783. size_t m_centry;
  1784. CEntry* m_rgentry;
  1785. BOOL m_fInPlace;
  1786. };
  1787. template< class CEntry >
  1788. inline CArray< CEntry >::
  1789. CArray()
  1790. : m_centry( 0 ),
  1791. m_rgentry( NULL ),
  1792. m_fInPlace( fTrue )
  1793. {
  1794. }
  1795. template< class CEntry >
  1796. inline CArray< CEntry >::
  1797. CArray( const size_t centry, CEntry* const rgentry )
  1798. : m_centry( centry ),
  1799. m_rgentry( rgentry ),
  1800. m_fInPlace( fTrue )
  1801. {
  1802. }
  1803. template< class CEntry >
  1804. inline CArray< CEntry >::
  1805. ~CArray()
  1806. {
  1807. ErrSetSize( 0 );
  1808. }
  1809. // clones an existing array
  1810. template< class CEntry >
  1811. inline __TYPENAME CArray< CEntry >::ERR CArray< CEntry >::
  1812. ErrClone( const CArray& array )
  1813. {
  1814. CEntry* rgentryNew = NULL;
  1815. size_t ientryCopy = 0;
  1816. if ( array.m_centry )
  1817. {
  1818. if ( !( rgentryNew = new CEntry[ array.m_centry ] ) )
  1819. {
  1820. return errOutOfMemory;
  1821. }
  1822. }
  1823. for ( ientryCopy = 0; ientryCopy < array.m_centry; ientryCopy++ )
  1824. {
  1825. rgentryNew[ ientryCopy ] = array.m_rgentry[ ientryCopy ];
  1826. }
  1827. if ( !m_fInPlace )
  1828. {
  1829. delete [] m_rgentry;
  1830. }
  1831. m_centry = array.m_centry;
  1832. m_rgentry = rgentryNew;
  1833. m_fInPlace = fFalse;
  1834. rgentryNew = NULL;
  1835. delete [] rgentryNew;
  1836. return errSuccess;
  1837. }
  1838. // sets the size of the array
  1839. template< class CEntry >
  1840. inline __TYPENAME CArray< CEntry >::ERR CArray< CEntry >::
  1841. ErrSetSize( const size_t centry )
  1842. {
  1843. CEntry* rgentryNew = NULL;
  1844. size_t ientryCopy = 0;
  1845. if ( Size() != centry )
  1846. {
  1847. if ( centry )
  1848. {
  1849. if ( !( rgentryNew = new CEntry[ centry ] ) )
  1850. {
  1851. return errOutOfMemory;
  1852. }
  1853. for ( ientryCopy = 0; ientryCopy < Size(); ientryCopy++ )
  1854. {
  1855. rgentryNew[ ientryCopy ] = *Entry( ientryCopy );
  1856. }
  1857. if ( !m_fInPlace )
  1858. {
  1859. delete [] m_rgentry;
  1860. }
  1861. m_centry = centry;
  1862. m_rgentry = rgentryNew;
  1863. m_fInPlace = fFalse;
  1864. rgentryNew = NULL;
  1865. }
  1866. else
  1867. {
  1868. if ( !m_fInPlace )
  1869. {
  1870. delete [] m_rgentry;
  1871. }
  1872. m_centry = 0;
  1873. m_rgentry = NULL;
  1874. m_fInPlace = fTrue;
  1875. }
  1876. }
  1877. delete [] rgentryNew;
  1878. return errSuccess;
  1879. }
  1880. // sets the Nth entry of the array, growing the array if necessary
  1881. template< class CEntry >
  1882. inline __TYPENAME CArray< CEntry >::ERR CArray< CEntry >::
  1883. ErrSetEntry( const size_t ientry, const CEntry& entry )
  1884. {
  1885. ERR err = errSuccess;
  1886. size_t centryReq = ientry + 1;
  1887. if ( Size() < centryReq )
  1888. {
  1889. if ( ( err = ErrSetSize( centryReq ) ) != errSuccess )
  1890. {
  1891. return err;
  1892. }
  1893. }
  1894. SetEntry( Entry( ientry ), entry );
  1895. return errSuccess;
  1896. }
  1897. // sets an existing entry of the array
  1898. template< class CEntry >
  1899. inline void CArray< CEntry >::
  1900. SetEntry( const CEntry* const pentry, const CEntry& entry )
  1901. {
  1902. *const_cast< CEntry* >( pentry ) = entry;
  1903. }
  1904. // returns the current size of the array
  1905. template< class CEntry >
  1906. inline size_t CArray< CEntry >::
  1907. Size() const
  1908. {
  1909. return m_centry;
  1910. }
  1911. // returns a pointer to the Nth entry of the array or NULL if it is empty
  1912. template< class CEntry >
  1913. inline const CEntry* CArray< CEntry >::
  1914. Entry( const size_t ientry ) const
  1915. {
  1916. return ientry < m_centry ? m_rgentry + ientry : NULL;
  1917. }
  1918. ////////////////////////////////////////////////////////////////////////////////
  1919. // CTable
  1920. //
  1921. // Implements a table of entries identified by a key and stored for efficient
  1922. // lookup and iteration. The keys need not be unique.
  1923. //
  1924. // CKey = class representing keys used to identify entries
  1925. // CEntry = class representing entries stored in the table
  1926. //
  1927. // NOTE: the user must implement the CKeyEntry::Cmp() functions and provide
  1928. // CEntry::CEntry() and CEntry::operator=()
  1929. template< class CKey, class CEntry >
  1930. class CTable
  1931. {
  1932. public:
  1933. class CKeyEntry
  1934. : public CEntry
  1935. {
  1936. public:
  1937. // Cmp() return values:
  1938. //
  1939. // < 0 this entry < specified entry / key
  1940. // = 0 this entry = specified entry / key
  1941. // > 0 this entry > specified entry / key
  1942. int Cmp( const CKeyEntry& keyentry ) const;
  1943. int Cmp( const CKey& key ) const;
  1944. };
  1945. // API Error Codes
  1946. enum ERR
  1947. {
  1948. errSuccess,
  1949. errInvalidParameter,
  1950. errOutOfMemory,
  1951. errKeyChange,
  1952. };
  1953. public:
  1954. CTable();
  1955. CTable( const size_t centry, CEntry* const rgentry, const BOOL fInOrder = fFalse );
  1956. ERR ErrLoad( const size_t centry, const CEntry* const rgentry );
  1957. ERR ErrClone( const CTable& table );
  1958. ERR ErrUpdateEntry( const CEntry* const pentry, const CEntry& entry );
  1959. size_t Size() const;
  1960. const CEntry* Entry( const size_t ientry ) const;
  1961. const CEntry* SeekLT( const CKey& key ) const;
  1962. const CEntry* SeekLE( const CKey& key ) const;
  1963. const CEntry* SeekEQ( const CKey& key ) const;
  1964. const CEntry* SeekHI( const CKey& key ) const;
  1965. const CEntry* SeekGE( const CKey& key ) const;
  1966. const CEntry* SeekGT( const CKey& key ) const;
  1967. private:
  1968. typedef size_t (CTable< CKey, CEntry >::*PfnSearch)( const CKey& key, const BOOL fHigh ) const;
  1969. private:
  1970. const CKeyEntry& _Entry( const size_t ikeyentry ) const;
  1971. void _SetEntry( const size_t ikeyentry, const CKeyEntry& keyentry );
  1972. void _SwapEntry( const size_t ikeyentry1, const size_t ikeyentry2 );
  1973. size_t _LinearSearch( const CKey& key, const BOOL fHigh ) const;
  1974. size_t _BinarySearch( const CKey& key, const BOOL fHigh ) const;
  1975. void _InsertionSort( const size_t ikeyentryMinIn, const size_t ikeyentryMaxIn );
  1976. void _QuickSort( const size_t ikeyentryMinIn, const size_t ikeyentryMaxIn );
  1977. private:
  1978. CArray< CKeyEntry > m_arrayKeyEntry;
  1979. PfnSearch m_pfnSearch;
  1980. };
  1981. template< class CKey, class CEntry >
  1982. inline CTable< CKey, CEntry >::
  1983. CTable()
  1984. : m_pfnSearch( _LinearSearch )
  1985. {
  1986. }
  1987. // loads the table over an existing array of entries. if the entries are not
  1988. // in order then they will be sorted in place
  1989. template< class CKey, class CEntry >
  1990. inline CTable< CKey, CEntry >::
  1991. CTable( const size_t centry, CEntry* const rgentry, const BOOL fInOrder )
  1992. : m_arrayKeyEntry( centry, reinterpret_cast< CKeyEntry* >( rgentry ) )
  1993. {
  1994. size_t n;
  1995. size_t log2n;
  1996. for ( n = Size(), log2n = 0; n; n = n / 2, log2n++ );
  1997. if ( 2 * log2n < Size() )
  1998. {
  1999. if ( !fInOrder )
  2000. {
  2001. _QuickSort( 0, Size() );
  2002. }
  2003. m_pfnSearch = _BinarySearch;
  2004. }
  2005. else
  2006. {
  2007. if ( !fInOrder )
  2008. {
  2009. _InsertionSort( 0, Size() );
  2010. }
  2011. m_pfnSearch = _LinearSearch;
  2012. }
  2013. }
  2014. // loads an array of entries into the table. additional entries may also be
  2015. // loaded into the table via this function
  2016. template< class CKey, class CEntry >
  2017. inline __TYPENAME CTable< CKey, CEntry >::ERR CTable< CKey, CEntry >::
  2018. ErrLoad( const size_t centry, const CEntry* const rgentry )
  2019. {
  2020. CArray< CKeyEntry >::ERR err = CArray< CKeyEntry >::errSuccess;
  2021. size_t ientry = 0;
  2022. size_t ientryMin = Size();
  2023. size_t ientryMax = Size() + centry;
  2024. const CKeyEntry* rgkeyentry = reinterpret_cast< const CKeyEntry* >( rgentry );
  2025. if ( ( err = m_arrayKeyEntry.ErrSetSize( Size() + centry ) ) != CArray< CKeyEntry >::errSuccess )
  2026. {
  2027. COLLAssert( err == CArray< CKeyEntry >::errOutOfMemory );
  2028. return errOutOfMemory;
  2029. }
  2030. for ( ientry = ientryMin; ientry < ientryMax; ientry++ )
  2031. {
  2032. err = m_arrayKeyEntry.ErrSetEntry( ientry, rgkeyentry[ ientry - ientryMin ] );
  2033. COLLAssert( err == CArray< CKeyEntry >::errSuccess );
  2034. }
  2035. size_t n;
  2036. size_t log2n;
  2037. for ( n = Size(), log2n = 0; n; n = n / 2, log2n++ );
  2038. if ( 2 * log2n < centry )
  2039. {
  2040. _QuickSort( 0, Size() );
  2041. }
  2042. else
  2043. {
  2044. _InsertionSort( 0, Size() );
  2045. }
  2046. if ( 2 * log2n < Size() )
  2047. {
  2048. m_pfnSearch = _BinarySearch;
  2049. }
  2050. else
  2051. {
  2052. m_pfnSearch = _LinearSearch;
  2053. }
  2054. return errSuccess;
  2055. }
  2056. // clones an existing table
  2057. template< class CKey, class CEntry >
  2058. inline __TYPENAME CTable< CKey, CEntry >::ERR CTable< CKey, CEntry >::
  2059. ErrClone( const CTable& table )
  2060. {
  2061. CArray< CKeyEntry >::ERR err = CArray< CKeyEntry >::errSuccess;
  2062. if ( ( err = m_arrayKeyEntry.ErrClone( table.m_arrayKeyEntry ) ) != CArray< CKeyEntry >::errSuccess )
  2063. {
  2064. COLLAssert( err == CArray< CKeyEntry >::errOutOfMemory );
  2065. return errOutOfMemory;
  2066. }
  2067. m_pfnSearch = table.m_pfnSearch;
  2068. return errSuccess;
  2069. }
  2070. // updates an existing entry in the table as long as it doesn't change
  2071. // that entry's position in the table
  2072. template< class CKey, class CEntry >
  2073. inline __TYPENAME CTable< CKey, CEntry >::ERR CTable< CKey, CEntry >::
  2074. ErrUpdateEntry( const CEntry* const pentry, const CEntry& entry )
  2075. {
  2076. ERR err = errSuccess;
  2077. const CKeyEntry* pkeyentry = reinterpret_cast< const CKeyEntry* >( pentry );
  2078. const CKeyEntry& keyentry = reinterpret_cast< const CKeyEntry& >( entry );
  2079. if ( !pkeyentry->Cmp( keyentry ) )
  2080. {
  2081. m_arrayKeyEntry.SetEntry( pkeyentry, keyentry );
  2082. err = errSuccess;
  2083. }
  2084. else
  2085. {
  2086. err = errKeyChange;
  2087. }
  2088. return err;
  2089. }
  2090. // returns the current size of the table
  2091. template< class CKey, class CEntry >
  2092. inline size_t CTable< CKey, CEntry >::
  2093. Size() const
  2094. {
  2095. return m_arrayKeyEntry.Size();
  2096. }
  2097. // returns a pointer to the Nth entry of the table or NULL if it is empty
  2098. template< class CKey, class CEntry >
  2099. inline const CEntry* CTable< CKey, CEntry >::
  2100. Entry( const size_t ientry ) const
  2101. {
  2102. return static_cast< const CEntry* >( m_arrayKeyEntry.Entry( ientry ) );
  2103. }
  2104. // the following group of functions return a pointer to an entry whose key
  2105. // matches the specified key according to the given criteria:
  2106. //
  2107. // Suffix Description Positional bias
  2108. //
  2109. // LT less than high
  2110. // LE less than or equal to low
  2111. // EQ equal to low
  2112. // HI equal to high
  2113. // GE greater than or equal to high
  2114. // GT greater than low
  2115. //
  2116. // if no matching entry was found then NULL will be returned
  2117. //
  2118. // "positional bias" means that the function will land on a matching entry
  2119. // whose position is closest to the low / high end of the table
  2120. template< class CKey, class CEntry >
  2121. inline const CEntry* CTable< CKey, CEntry >::
  2122. SeekLT( const CKey& key ) const
  2123. {
  2124. const size_t ikeyentry = (this->*m_pfnSearch)( key, fFalse );
  2125. if ( ikeyentry < Size() &&
  2126. _Entry( ikeyentry ).Cmp( key ) < 0 )
  2127. {
  2128. return Entry( ikeyentry );
  2129. }
  2130. else
  2131. {
  2132. return Entry( ikeyentry - 1 );
  2133. }
  2134. }
  2135. template< class CKey, class CEntry >
  2136. inline const CEntry* CTable< CKey, CEntry >::
  2137. SeekLE( const CKey& key ) const
  2138. {
  2139. const size_t ikeyentry = (this->*m_pfnSearch)( key, fFalse );
  2140. if ( ikeyentry < Size() &&
  2141. _Entry( ikeyentry ).Cmp( key ) <= 0 )
  2142. {
  2143. return Entry( ikeyentry );
  2144. }
  2145. else
  2146. {
  2147. return Entry( ikeyentry - 1 );
  2148. }
  2149. }
  2150. template< class CKey, class CEntry >
  2151. inline const CEntry* CTable< CKey, CEntry >::
  2152. SeekEQ( const CKey& key ) const
  2153. {
  2154. const size_t ikeyentry = (this->*m_pfnSearch)( key, fFalse );
  2155. if ( ikeyentry < Size() &&
  2156. _Entry( ikeyentry ).Cmp( key ) == 0 )
  2157. {
  2158. return Entry( ikeyentry );
  2159. }
  2160. else
  2161. {
  2162. return NULL;
  2163. }
  2164. }
  2165. template< class CKey, class CEntry >
  2166. inline const CEntry* CTable< CKey, CEntry >::
  2167. SeekHI( const CKey& key ) const
  2168. {
  2169. const size_t ikeyentry = (this->*m_pfnSearch)( key, fTrue );
  2170. if ( ikeyentry > 0 &&
  2171. _Entry( ikeyentry - 1 ).Cmp( key ) == 0 )
  2172. {
  2173. return Entry( ikeyentry - 1 );
  2174. }
  2175. else
  2176. {
  2177. return NULL;
  2178. }
  2179. }
  2180. template< class CKey, class CEntry >
  2181. inline const CEntry* CTable< CKey, CEntry >::
  2182. SeekGE( const CKey& key ) const
  2183. {
  2184. const size_t ikeyentry = (this->*m_pfnSearch)( key, fTrue );
  2185. if ( ikeyentry > 0 &&
  2186. _Entry( ikeyentry - 1 ).Cmp( key ) == 0 )
  2187. {
  2188. return Entry( ikeyentry - 1 );
  2189. }
  2190. else
  2191. {
  2192. return Entry( ikeyentry );
  2193. }
  2194. }
  2195. template< class CKey, class CEntry >
  2196. inline const CEntry* CTable< CKey, CEntry >::
  2197. SeekGT( const CKey& key ) const
  2198. {
  2199. return Entry( (this->*m_pfnSearch)( key, fTrue ) );
  2200. }
  2201. template< class CKey, class CEntry >
  2202. inline const __TYPENAME CTable< CKey, CEntry >::CKeyEntry& CTable< CKey, CEntry >::
  2203. _Entry( const size_t ikeyentry ) const
  2204. {
  2205. return *( m_arrayKeyEntry.Entry( ikeyentry ) );
  2206. }
  2207. template< class CKey, class CEntry >
  2208. inline void CTable< CKey, CEntry >::
  2209. _SetEntry( const size_t ikeyentry, const CKeyEntry& keyentry )
  2210. {
  2211. m_arrayKeyEntry.SetEntry( m_arrayKeyEntry.Entry( ikeyentry ), keyentry );
  2212. }
  2213. template< class CKey, class CEntry >
  2214. inline void CTable< CKey, CEntry >::
  2215. _SwapEntry( const size_t ikeyentry1, const size_t ikeyentry2 )
  2216. {
  2217. CKeyEntry keyentryT;
  2218. keyentryT = _Entry( ikeyentry1 );
  2219. _SetEntry( ikeyentry1, _Entry( ikeyentry2 ) );
  2220. _SetEntry( ikeyentry2, keyentryT );
  2221. }
  2222. template< class CKey, class CEntry >
  2223. inline size_t CTable< CKey, CEntry >::
  2224. _LinearSearch( const CKey& key, const BOOL fHigh ) const
  2225. {
  2226. for ( size_t ikeyentry = 0; ikeyentry < Size(); ikeyentry++ )
  2227. {
  2228. const int cmp = _Entry( ikeyentry ).Cmp( key );
  2229. if ( !( cmp < 0 || cmp == 0 && fHigh ) )
  2230. {
  2231. break;
  2232. }
  2233. }
  2234. return ikeyentry;
  2235. }
  2236. template< class CKey, class CEntry >
  2237. inline size_t CTable< CKey, CEntry >::
  2238. _BinarySearch( const CKey& key, const BOOL fHigh ) const
  2239. {
  2240. size_t ikeyentryMin = 0;
  2241. size_t ikeyentryMax = Size();
  2242. while ( ikeyentryMin < ikeyentryMax )
  2243. {
  2244. const size_t ikeyentryMid = ikeyentryMin + ( ikeyentryMax - ikeyentryMin ) / 2;
  2245. const int cmp = _Entry( ikeyentryMid ).Cmp( key );
  2246. if ( cmp < 0 || cmp == 0 && fHigh )
  2247. {
  2248. ikeyentryMin = ikeyentryMid + 1;
  2249. }
  2250. else
  2251. {
  2252. ikeyentryMax = ikeyentryMid;
  2253. }
  2254. }
  2255. return ikeyentryMax;
  2256. }
  2257. template< class CKey, class CEntry >
  2258. inline void CTable< CKey, CEntry >::
  2259. _InsertionSort( const size_t ikeyentryMinIn, const size_t ikeyentryMaxIn )
  2260. {
  2261. size_t ikeyentryLast;
  2262. size_t ikeyentryFirst;
  2263. CKeyEntry keyentryKey;
  2264. for ( ikeyentryFirst = ikeyentryMinIn, ikeyentryLast = ikeyentryMinIn + 1;
  2265. ikeyentryLast < ikeyentryMaxIn;
  2266. ikeyentryFirst = ikeyentryLast++ )
  2267. {
  2268. if ( _Entry( ikeyentryFirst ).Cmp( _Entry( ikeyentryLast ) ) > 0 )
  2269. {
  2270. keyentryKey = _Entry( ikeyentryLast );
  2271. _SetEntry( ikeyentryLast, _Entry( ikeyentryFirst ) );
  2272. while ( ikeyentryFirst-- >= ikeyentryMinIn + 1 &&
  2273. _Entry( ikeyentryFirst ).Cmp( keyentryKey ) > 0 )
  2274. {
  2275. _SetEntry( ikeyentryFirst + 1, _Entry( ikeyentryFirst ) );
  2276. }
  2277. _SetEntry( ikeyentryFirst + 1, keyentryKey );
  2278. }
  2279. }
  2280. }
  2281. template< class CKey, class CEntry >
  2282. inline void CTable< CKey, CEntry >::
  2283. _QuickSort( const size_t ikeyentryMinIn, const size_t ikeyentryMaxIn )
  2284. {
  2285. // quicksort cutoff
  2286. const size_t ckeyentryMin = 32;
  2287. // partition stack (used to reduce levels of recursion)
  2288. const size_t cpartMax = 16;
  2289. size_t cpart = 0;
  2290. struct
  2291. {
  2292. size_t ikeyentryMin;
  2293. size_t ikeyentryMax;
  2294. } rgpart[ cpartMax ];
  2295. // current partition = partition passed in arguments
  2296. size_t ikeyentryMin = ikeyentryMinIn;
  2297. size_t ikeyentryMax = ikeyentryMaxIn;
  2298. // _QuickSort current partition
  2299. for ( ; ; )
  2300. {
  2301. // if this partition is small enough, insertion sort it
  2302. if ( ikeyentryMax - ikeyentryMin < ckeyentryMin )
  2303. {
  2304. _InsertionSort( ikeyentryMin, ikeyentryMax );
  2305. // if there are no more partitions to sort, we're done
  2306. if ( !cpart )
  2307. {
  2308. break;
  2309. }
  2310. // pop a partition off the stack and make it the current partition
  2311. ikeyentryMin = rgpart[ --cpart ].ikeyentryMin;
  2312. ikeyentryMax = rgpart[ cpart ].ikeyentryMax;
  2313. continue;
  2314. }
  2315. // determine divisor by sorting the first, middle, and last entries and
  2316. // taking the resulting middle entry as the divisor
  2317. size_t ikeyentryFirst = ikeyentryMin;
  2318. size_t ikeyentryMid = ikeyentryMin + ( ikeyentryMax - ikeyentryMin ) / 2;
  2319. size_t ikeyentryLast = ikeyentryMax - 1;
  2320. if ( _Entry( ikeyentryFirst ).Cmp( _Entry( ikeyentryMid ) ) > 0 )
  2321. {
  2322. _SwapEntry( ikeyentryFirst, ikeyentryMid );
  2323. }
  2324. if ( _Entry( ikeyentryFirst ).Cmp( _Entry( ikeyentryLast ) ) > 0 )
  2325. {
  2326. _SwapEntry( ikeyentryFirst, ikeyentryLast );
  2327. }
  2328. if ( _Entry( ikeyentryMid ).Cmp( _Entry( ikeyentryLast ) ) > 0 )
  2329. {
  2330. _SwapEntry( ikeyentryMid, ikeyentryLast );
  2331. }
  2332. // sort large partition into two smaller partitions (<=, >)
  2333. do {
  2334. // advance past all entries <= the divisor
  2335. while ( ikeyentryFirst <= ikeyentryLast &&
  2336. _Entry( ikeyentryFirst ).Cmp( _Entry( ikeyentryMin ) ) <= 0 )
  2337. {
  2338. ikeyentryFirst++;
  2339. }
  2340. // advance past all entries > the divisor
  2341. while ( ikeyentryFirst <= ikeyentryLast &&
  2342. _Entry( ikeyentryLast ).Cmp( _Entry( ikeyentryMin ) ) > 0 )
  2343. {
  2344. ikeyentryLast--;
  2345. }
  2346. // if we have found a pair to swap, swap them and continue
  2347. if ( ikeyentryFirst < ikeyentryLast )
  2348. {
  2349. _SwapEntry( ikeyentryFirst++, ikeyentryLast-- );
  2350. }
  2351. }
  2352. while ( ikeyentryFirst <= ikeyentryLast );
  2353. // move the divisor to the end of the <= partition
  2354. _SwapEntry( ikeyentryMin, ikeyentryLast );
  2355. // determine the limits of the smaller and larger sub-partitions
  2356. size_t ikeyentrySmallMin;
  2357. size_t ikeyentrySmallMax;
  2358. size_t ikeyentryLargeMin;
  2359. size_t ikeyentryLargeMax;
  2360. if ( ikeyentryMax - ikeyentryFirst == 0 )
  2361. {
  2362. ikeyentryLargeMin = ikeyentryMin;
  2363. ikeyentryLargeMax = ikeyentryLast;
  2364. ikeyentrySmallMin = ikeyentryLast;
  2365. ikeyentrySmallMax = ikeyentryMax;
  2366. }
  2367. else if ( ikeyentryMax - ikeyentryFirst > ikeyentryFirst - ikeyentryMin )
  2368. {
  2369. ikeyentrySmallMin = ikeyentryMin;
  2370. ikeyentrySmallMax = ikeyentryFirst;
  2371. ikeyentryLargeMin = ikeyentryFirst;
  2372. ikeyentryLargeMax = ikeyentryMax;
  2373. }
  2374. else
  2375. {
  2376. ikeyentryLargeMin = ikeyentryMin;
  2377. ikeyentryLargeMax = ikeyentryFirst;
  2378. ikeyentrySmallMin = ikeyentryFirst;
  2379. ikeyentrySmallMax = ikeyentryMax;
  2380. }
  2381. // push the larger sub-partition or recurse if the stack is full
  2382. if ( cpart < cpartMax )
  2383. {
  2384. rgpart[ cpart ].ikeyentryMin = ikeyentryLargeMin;
  2385. rgpart[ cpart++ ].ikeyentryMax = ikeyentryLargeMax;
  2386. }
  2387. else
  2388. {
  2389. _QuickSort( ikeyentryLargeMin, ikeyentryLargeMax );
  2390. }
  2391. // set our current partition to be the smaller sub-partition
  2392. ikeyentryMin = ikeyentrySmallMin;
  2393. ikeyentryMax = ikeyentrySmallMax;
  2394. }
  2395. }
  2396. }; // namespace COLL
  2397. using namespace COLL;
  2398. #endif // _COLLECTION_HXX_INCLUDED