Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2447 lines
82 KiB

  1. /*++
  2. Copyright (c) 1998-2000 Microsoft Corporation
  3. Module Name :
  4. LKRhash.h
  5. Abstract:
  6. Declares LKRhash: a fast, scalable, cache- and MP-friendly hash table
  7. Author:
  8. Paul (Per-Ake) Larson, palarson@microsoft.com, July 1997
  9. Murali R. Krishnan (MuraliK)
  10. George V. Reilly (GeorgeRe) 06-Jan-1998
  11. Environment:
  12. Win32 - User Mode
  13. Project:
  14. Internet Information Server RunTime Library
  15. Revision History:
  16. 10/01/1998 - Change name from LKhash to LKRhash
  17. --*/
  18. #undef LKR_OLD_SEGMENT
  19. #define LKR_SIGS_NODES
  20. #define LKR_TEST_SIGNATURE
  21. #undef LKR_COUNTDOWN
  22. #define LKR_EQUALKEYS
  23. #define LKR_DEPRECATED_ITERATORS
  24. #define LKR_BITSCRAMBLE_SUBTABLE_INDEX
  25. #define LKR_SUBTABLE_BITMASK
  26. #define LKR_SUBTABLE_MASK
  27. #ifndef __LKRHASH_H__
  28. #define __LKRHASH_H__
  29. //=====================================================================
  30. // The class CLKRLinearHashTable defined in this file provides dynamic hash
  31. // tables, i.e. tables that grow and shrink dynamically with
  32. // the number of records in the table.
  33. // The basic method used is linear hashing, as explained in:
  34. //
  35. // P.-A. Larson, Dynamic Hash Tables, Comm. of the ACM, 31, 4 (1988)
  36. //
  37. // This version has the following characteristics:
  38. // - It is thread-safe and uses spin locks for synchronization.
  39. // - It was designed to support very high rates of concurrent
  40. // operations (inserts/deletes/lookups). It achieves this by
  41. // (a) partitioning a CLKRHashTable into a collection of
  42. // CLKRLinearHashTables to reduce contention on the global table lock.
  43. // (b) minimizing the hold time on a table lock, preferring to lock
  44. // down a bucket chain instead.
  45. // - The design is L1 cache-conscious. See CNodeClump.
  46. // - It is designed for sets varying in size from a dozen
  47. // elements to several million.
  48. //
  49. // Main classes:
  50. // CLKRLinearHashTable: thread-safe linear hash table
  51. // CLKRHashTable: collection of CLKRLinearHashTables
  52. // CTypedHashTable: typesafe wrapper for CLKRHashTable
  53. //
  54. //
  55. // Paul Larson, [email protected], July 1997
  56. // Original implementation with input from Murali R. Krishnan,
  57. // [email protected].
  58. //
  59. // George V. Reilly, [email protected], Dec 1997-Jan 1998
  60. // Massive cleanup and rewrite. Added templates.
  61. //=====================================================================
  62. // 1) Linear Hashing
  63. // ------------------
  64. //
  65. // Linear hash tables grow and shrink dynamically with the number of
  66. // records in the table. The growth or shrinkage is smooth: logically,
  67. // one bucket at a time but physically in larger increments
  68. // (64 buckets). An insertion (deletion) may cause an expansion
  69. // (contraction) of the table. This causes relocation of a small number
  70. // of records (at most one bucket worth). All operations (insert,
  71. // delete, lookup) take constant expected time, regardless of the
  72. // current size or the growth of the table.
  73. //
  74. // 2) LKR extensions to Linear hash table
  75. // --------------------------------------
  76. //
  77. // Larson-Krishnan-Reilly extensions to Linear hash tables for multiprocessor
  78. // scalability and improved cache performance.
  79. //
  80. // Traditional implementations of linear hash tables use one global lock
  81. // to prevent interference between concurrent operations
  82. // (insert/delete/lookup) on the table. The single lock easily becomes
  83. // the bottleneck in SMP scenarios when multiple threads are used.
  84. //
  85. // Traditionally, a (hash) bucket is implemented as a chain of
  86. // single-item nodes. Every operation results in chasing down a chain
  87. // looking for an item. However, pointer chasing is very slow on modern
  88. // systems because almost every jump results in a cache miss. L2 (or L3)
  89. // cache misses are very expensive in missed CPU cycles and the cost is
  90. // increasing (going to 100s of cycles in the future).
  91. //
  92. // LKR extensions offer
  93. // 1) Partitioning (by hashing) of records among multiple subtables.
  94. // Each subtable has locks but there is no global lock. Each
  95. // subtable receives a much lower rate of operations, resulting in
  96. // fewer conflicts.
  97. //
  98. // 2) Improved cache locality by grouping keys and their hash values
  99. // into contigous chunks that fit exactly into one (or a few)
  100. // cache lines.
  101. //
  102. // Specifically the implementation that exists here achieves this using
  103. // the following techniques.
  104. //
  105. // Class CLKRHashTable is the top-level data structure that dynamically
  106. // creates m_cSubTables linear hash tables. The CLKRLinearHashTables act as
  107. // the subtables to which items and accesses are fanned out. A good
  108. // hash function multiplexes requests uniformly to various subtables,
  109. // thus minimizing traffic to any single subtable. The implemenation
  110. // uses a home-grown version of bounded spinlocks, that is, a thread
  111. // does not spin on a lock indefinitely, instead yielding after a
  112. // predetermined number of loops.
  113. //
  114. // Each CLKRLinearHashTable consists of a CDirEntry pointing to segments
  115. // each holding m_dwSegSize CBuckets. Each CBucket in turn consists of a
  116. // chain of CNodeClumps. Each CNodeClump contains a group of
  117. // NODES_PER_CLUMP hash values (aka hash keys or signatures) and
  118. // pointers to the associated data items. Keeping the signatures
  119. // together increases the cache locality in scans for lookup.
  120. //
  121. // Traditionally, people store a link-list element right inside the
  122. // object that is hashed and use this link-list for the chaining of data
  123. // blocks. However, keeping just the pointers to the data object and
  124. // not chaining through them limits the need for bringing in the data
  125. // object to the cache. We need to access the data object only if the
  126. // hash values match. This limits the cache-thrashing behaviour
  127. // exhibited by conventional implementations. It has the additional
  128. // benefit that the objects themselves do not need to be modified
  129. // in order to be collected in the hash table (i.e., it's non-invasive).
  130. //--------------------------------------------------------------------
  131. // TODO
  132. // * Debugging support for iisprobe?
  133. // * Use auto_ptrs.
  134. // * Provide ReplaceRecord and DeleteRecord methods on iterators.
  135. // * Sloppy iterators
  136. // * Provide implementations of the STL collection classes, map, set,
  137. // multimap, and multiset.
  138. // * Make exception safe.
  139. //--------------------------------------------------------------------
  140. #include <irtldbg.h>
  141. #include <lstentry.h>
  142. #include <hashfn.h>
  143. #include <limits.h>
  144. #ifdef __LKRHASH_NAMESPACE__
  145. namespace LKRHash {
  146. #endif // __LKRHASH_NAMESPACE__
  147. enum LK_TABLESIZE {
  148. LK_SMALL_TABLESIZE= 1, // < 200 elements
  149. LK_MEDIUM_TABLESIZE= 2, // 200...10,000 elements
  150. LK_LARGE_TABLESIZE= 3, // 10,000+ elements
  151. };
  152. // Default values for the hashtable constructors
  153. enum {
  154. #ifndef _WIN64
  155. LK_DFLT_MAXLOAD= 6, // Default upperbound on average chain length.
  156. #else // _WIN64
  157. LK_DFLT_MAXLOAD= 4, // 64-byte nodes => NODES_PER_CLUMP = 4
  158. #endif // _WIN64
  159. LK_DFLT_INITSIZE=LK_MEDIUM_TABLESIZE, // Default initial size of hash table
  160. LK_DFLT_NUM_SUBTBLS= 0, // Use a heuristic to choose #subtables
  161. };
  162. // build fix hack
  163. // enum {
  164. // DFLT_LK_MAXLOAD= LK_DFLT_MAXLOAD,
  165. // DFLT_LK_INITSIZE= LK_DFLT_INITSIZE,
  166. // DFLT_LK_NUM_SUBTBLS= LK_DFLT_NUM_SUBTBLS,
  167. // };
  168. //--------------------------------------------------------------------
  169. // forward declarations
  170. class IRTL_DLLEXP CLKRLinearHashTable;
  171. class IRTL_DLLEXP CLKRHashTable;
  172. template <class _Der, class _Rcd, class _Ky, class _HT
  173. #ifdef LKR_DEPRECATED_ITERATORS
  174. , class _Iter
  175. #endif // LKR_DEPRECATED_ITERATORS
  176. >
  177. class CTypedHashTable;
  178. //--------------------------------------------------------------------
  179. // Possible return codes from public member functions of
  180. // CLKRLinearHashTable, CLKRHashTable, and CTypedHashTable
  181. enum LK_RETCODE {
  182. // severe errors < 0
  183. LK_UNUSABLE = -99, // Table corrupted: all bets are off
  184. LK_ALLOC_FAIL, // ran out of memory
  185. LK_BAD_ITERATOR, // invalid iterator; e.g., points to another table
  186. LK_BAD_RECORD, // invalid record; e.g., NULL for InsertRecord
  187. LK_BAD_PARAMETERS, // invalid parameters; e.g., NULL fnptrs to ctor
  188. LK_NOT_INITIALIZED, // LKRHashTableInit was not called
  189. LK_SUCCESS = 0, // everything's okay
  190. LK_KEY_EXISTS, // key already present for InsertRecord(no-overwrite)
  191. LK_NO_SUCH_KEY, // key not found
  192. LK_NO_MORE_ELEMENTS,// iterator exhausted
  193. };
  194. #define LK_SUCCEEDED(lkrc) ((lkrc) >= LK_SUCCESS)
  195. #ifdef LKR_DEPRECATED_ITERATORS
  196. //--------------------------------------------------------------------
  197. // Return codes from PFnRecordPred.
  198. enum LK_PREDICATE {
  199. LKP_ABORT = 1, // Stop walking the table immediately
  200. LKP_NO_ACTION = 2, // No action, just keep walking
  201. LKP_PERFORM = 3, // Perform action and continue walking
  202. LKP_PERFORM_STOP = 4, // Perform action, then stop
  203. LKP_DELETE = 5, // Delete record and keep walking
  204. LKP_DELETE_STOP = 6, // Delete record, then stop
  205. };
  206. //--------------------------------------------------------------------
  207. // Return codes from PFnRecordAction.
  208. enum LK_ACTION {
  209. LKA_ABORT = 1, // Stop walking the table immediately
  210. LKA_FAILED = 2, // Action failed; continue walking the table
  211. LKA_SUCCEEDED = 3, // Action succeeded; continue walking the table
  212. };
  213. //--------------------------------------------------------------------
  214. // Parameter to Apply and ApplyIf.
  215. enum LK_LOCKTYPE {
  216. LKL_READLOCK = 1, // Lock the table for reading (for constness)
  217. LKL_WRITELOCK = 2, // Lock the table for writing
  218. };
  219. #endif // LKR_DEPRECATED_ITERATORS
  220. //--------------------------------------------------------------------
  221. // Global table lock code. This is only used to measure how much a
  222. // slowdown having a global lock on the CLKRHashTable causes. It is *never*
  223. // used in production code.
  224. // #define LKRHASH_GLOBAL_LOCK CCritSec
  225. #ifdef LKRHASH_GLOBAL_LOCK
  226. # define LKRHASH_GLOBAL_LOCK_DECLARATIONS() \
  227. typedef LKRHASH_GLOBAL_LOCK GlobalLock; \
  228. mutable GlobalLock m_lkGlobal;
  229. # define LKRHASH_GLOBAL_READ_LOCK() m_lkGlobal.ReadLock()
  230. # define LKRHASH_GLOBAL_WRITE_LOCK() m_lkGlobal.WriteLock()
  231. # define LKRHASH_GLOBAL_READ_UNLOCK() m_lkGlobal.ReadUnlock()
  232. # define LKRHASH_GLOBAL_WRITE_UNLOCK() m_lkGlobal.WriteUnlock()
  233. #else // !LKRHASH_GLOBAL_LOCK
  234. # define LKRHASH_GLOBAL_LOCK_DECLARATIONS()
  235. // These ones will be optimized away by the compiler
  236. # define LKRHASH_GLOBAL_READ_LOCK() ((void)0)
  237. # define LKRHASH_GLOBAL_WRITE_LOCK() ((void)0)
  238. # define LKRHASH_GLOBAL_READ_UNLOCK() ((void)0)
  239. # define LKRHASH_GLOBAL_WRITE_UNLOCK() ((void)0)
  240. #endif // !LKRHASH_GLOBAL_LOCK
  241. //--------------------------------------------------------------------
  242. // Statistical information returned by GetStatistics
  243. //--------------------------------------------------------------------
  244. #ifdef LOCK_INSTRUMENTATION
  245. class IRTL_DLLEXP CAveragedLockStats : public CLockStatistics
  246. {
  247. public:
  248. int m_nItems;
  249. CAveragedLockStats()
  250. : m_nItems(1)
  251. {}
  252. };
  253. #endif // LOCK_INSTRUMENTATION
  254. class IRTL_DLLEXP CLKRHashTableStats
  255. {
  256. public:
  257. int RecordCount; // number of records in the table
  258. int TableSize; // table size in number of slots
  259. int DirectorySize; // number of entries in directory
  260. int LongestChain; // longest hash chain in the table
  261. int EmptySlots; // number of unused hash slots
  262. double SplitFactor; // fraction of buckets split
  263. double AvgSearchLength; // average length of a successful search
  264. double ExpSearchLength; // theoretically expected length
  265. double AvgUSearchLength; // average length of an unsuccessful search
  266. double ExpUSearchLength; // theoretically expected length
  267. int NodeClumpSize; // number of slots in a node clump
  268. int CBucketSize; // sizeof(CBucket)
  269. #ifdef LOCK_INSTRUMENTATION
  270. CAveragedLockStats m_alsTable; // stats for table lock
  271. CAveragedLockStats m_alsBucketsAvg; // avg of stats for bucket locks
  272. CGlobalLockStatistics m_gls; // global statistics for all locks
  273. #endif // LOCK_INSTRUMENTATION
  274. enum {
  275. MAX_BUCKETS = 40,
  276. };
  277. // histogram of bucket lengths
  278. LONG m_aBucketLenHistogram[MAX_BUCKETS];
  279. CLKRHashTableStats()
  280. : RecordCount(0),
  281. TableSize(0),
  282. DirectorySize(0),
  283. LongestChain(0),
  284. EmptySlots(0),
  285. SplitFactor(0.0),
  286. AvgSearchLength(0.0),
  287. ExpSearchLength(0.0),
  288. AvgUSearchLength(0.0),
  289. ExpUSearchLength(0.0),
  290. NodeClumpSize(1),
  291. CBucketSize(0)
  292. {
  293. for (int i = MAX_BUCKETS; --i >= 0; )
  294. m_aBucketLenHistogram[i] = 0;
  295. }
  296. static const LONG*
  297. BucketSizes()
  298. {
  299. static const LONG s_aBucketSizes[MAX_BUCKETS] = {
  300. 1, 2, 3, 4, 5, 6, 7, 8, 9,
  301. 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
  302. 20, 21, 22, 23, 24, 25, 30, 40, 50, 60,
  303. 70, 80, 90, 100, 200, 500, 1000,10000, 100000, LONG_MAX,
  304. };
  305. return s_aBucketSizes;
  306. }
  307. static LONG
  308. BucketSize(
  309. LONG nBucketIndex)
  310. {
  311. IRTLASSERT(0 <= nBucketIndex && nBucketIndex < MAX_BUCKETS);
  312. return BucketSizes()[nBucketIndex];
  313. }
  314. static LONG
  315. BucketIndex(
  316. LONG nBucketLength)
  317. {
  318. const LONG* palBucketSizes = BucketSizes();
  319. LONG i = 0;
  320. while (palBucketSizes[i] < nBucketLength)
  321. ++i;
  322. if (i == MAX_BUCKETS || palBucketSizes[i] > nBucketLength)
  323. --i;
  324. IRTLASSERT(0 <= i && i < MAX_BUCKETS);
  325. return i;
  326. }
  327. };
  328. // Use types defined in recent versions of the Platform SDK.
  329. #ifndef _W64
  330. typedef DWORD DWORD_PTR; // integral type big enough to hold a pointer
  331. #endif
  332. //--------------------------------------------------------------------
  333. // CLKRLinearHashTable deals with void* records. These typedefs
  334. // provide prototypes for functions that manipulate instances of
  335. // those records. CTypedHashTable and CStringTestHashTable (below) show a
  336. // way to encapsulate these in typesafe wrappers.
  337. //--------------------------------------------------------------------
  338. // Given a record, return its key. Assumes that the key is embedded in
  339. // the record, or at least somehow derivable from the record. For
  340. // completely unrelated keys & values, a wrapper class should use
  341. // something like STL's pair<key,value> template to aggregate them
  342. // into a record.
  343. typedef const DWORD_PTR (WINAPI *PFnExtractKey) (const void* pvRecord);
  344. // Given a key, return its hash signature. The hashing functions in
  345. // hashfn.h (or something that builds upon them) are suggested.
  346. typedef DWORD (WINAPI *PFnCalcKeyHash) (const DWORD_PTR pnKey);
  347. // Compare two keys for equality; e.g., _stricmp, memcmp, operator==
  348. typedef bool (WINAPI *PFnEqualKeys) (const DWORD_PTR pnKey1,
  349. const DWORD_PTR pnKey2);
  350. // Increment the reference count of a record before returning it from
  351. // FindKey. It's necessary to do it in FindKey itself while the bucket
  352. // is still locked, rather than one of the wrappers, to avoid race
  353. // conditions. Similarly, the reference count is incremented in
  354. // InsertRecord and decremented in DeleteKey. Finally, if an old record
  355. // is overwritten in InsertRecord, its reference count is decremented.
  356. //
  357. // It's up to you to decrement the reference count when you're finished
  358. // with it after retrieving it via FindKey and to determine the
  359. // semantics of what this means. The hashtable itself has no notion of
  360. // reference counts; this is merely to help with the lifetime management
  361. // of the record objects.
  362. typedef void (WINAPI *PFnAddRefRecord)(const void* pvRecord, int nIncr);
  363. #ifdef LKR_DEPRECATED_ITERATORS
  364. // ApplyIf() and DeleteIf(): Does the record match the predicate?
  365. typedef LK_PREDICATE (WINAPI *PFnRecordPred) (const void* pvRecord,
  366. void* pvState);
  367. // Apply() et al: Perform action on record.
  368. typedef LK_ACTION (WINAPI *PFnRecordAction)(const void* pvRecord,
  369. void* pvState);
  370. #endif // LKR_DEPRECATED_ITERATORS
  371. //--------------------------------------------------------------------
  372. // Custom memory allocators
  373. //--------------------------------------------------------------------
  374. #ifndef LKR_NO_ALLOCATORS
  375. // # define LKRHASH_ACACHE 1
  376. // # define LKRHASH_MANODEL 1
  377. // # define LKRHASH_MADEL 1
  378. // # define LKRHASH_ROCKALL_FAST 1
  379. // # define LKRHASH_MEM_DEFAULT_ALIGN 32
  380. #endif // !LKR_NO_ALLOCATORS
  381. #ifndef LKRHASH_MEM_DEFAULT_ALIGN
  382. # define LKRHASH_MEM_DEFAULT_ALIGN 8
  383. #endif // !LKRHASH_MEM_DEFAULT_ALIGN
  384. #if defined(LKRHASH_ACACHE)
  385. # include <acache.hxx>
  386. typedef ALLOC_CACHE_HANDLER CLKRhashAllocator;
  387. # define LKRHASH_ALLOCATOR_NEW(C, N) \
  388. const ALLOC_CACHE_CONFIGURATION acc = { 1, N, sizeof(C) }; \
  389. C::sm_palloc = new ALLOC_CACHE_HANDLER("IISRTL:" #C, &acc);
  390. #elif defined(LKRHASH_MANODEL)
  391. # include <manodel.hxx>
  392. typedef MEMORY_ALLOC_NO_DELETE CLKRhashAllocator;
  393. # define LKRHASH_ALLOCATOR_NEW(C, N) \
  394. C::sm_palloc = new MEMORY_ALLOC_NO_DELETE(sizeof(C), \
  395. LKRHASH_MEM_DEFAULT_ALIGN);
  396. #elif defined(LKRHASH_MADEL)
  397. # include <madel.hxx>
  398. typedef MEMORY_ALLOC_DELETE CLKRhashAllocator;
  399. # define LKRHASH_ALLOCATOR_NEW(C, N) \
  400. C::sm_palloc = new MEMORY_ALLOC_DELETE(sizeof(C), \
  401. LKRHASH_MEM_DEFAULT_ALIGN, N);
  402. #elif defined(LKRHASH_ROCKALL_FAST)
  403. # include <FastHeap.hpp>
  404. class FastHeap : public FAST_HEAP
  405. {
  406. public:
  407. FastHeap(
  408. SIZE_T cb)
  409. : m_cb(cb)
  410. {}
  411. LPVOID Alloc()
  412. { return New(m_cb, NULL, false); }
  413. BOOL Free(LPVOID pvMem)
  414. { return Delete(pvMem); }
  415. SIZE_T m_cb;
  416. };
  417. typedef FastHeap CLKRhashAllocator;
  418. # define LKRHASH_ALLOCATOR_NEW(C, N) \
  419. C::sm_palloc = new FastHeap(sizeof(C));
  420. #else // no custom allocator
  421. # undef LKRHASH_ALLOCATOR_NEW
  422. #endif // no custom allocator
  423. // Used to initialize and destroy custom allocators
  424. bool LKRHashTableInit();
  425. void LKRHashTableUninit();
  426. #ifdef LKRHASH_ALLOCATOR_NEW
  427. // placed inline in the declaration of class C
  428. # define LKRHASH_ALLOCATOR_DEFINITIONS(C) \
  429. protected: \
  430. static CLKRhashAllocator* sm_palloc; \
  431. friend bool LKRHashTableInit(); \
  432. friend void LKRHashTableUninit(); \
  433. public: \
  434. static void* operator new(size_t s) \
  435. { \
  436. IRTLASSERT(s == sizeof(C)); \
  437. IRTLASSERT(sm_palloc != NULL); \
  438. return sm_palloc->Alloc(); \
  439. } \
  440. static void operator delete(void* pv) \
  441. { \
  442. IRTLASSERT(pv != NULL); \
  443. IRTLASSERT(sm_palloc != NULL); \
  444. sm_palloc->Free(pv); \
  445. }
  446. // used in LKRHashTableInit()
  447. # define LKRHASH_ALLOCATOR_INIT(C, N, f) \
  448. { \
  449. if (f) \
  450. { \
  451. IRTLASSERT(C::sm_palloc == NULL); \
  452. LKRHASH_ALLOCATOR_NEW(C, N); \
  453. f = (C::sm_palloc != NULL); \
  454. } \
  455. }
  456. // used in LKRHashTableUninit()
  457. # define LKRHASH_ALLOCATOR_UNINIT(C) \
  458. { \
  459. if (C::sm_palloc != NULL) \
  460. { \
  461. delete C::sm_palloc; \
  462. C::sm_palloc = NULL; \
  463. } \
  464. }
  465. #else // !LKRHASH_ALLOCATOR_NEW
  466. # define LKRHASH_ALLOCATOR_DEFINITIONS(C)
  467. # define LKRHASH_ALLOCATOR_INIT(C, N, f)
  468. # define LKRHASH_ALLOCATOR_UNINIT(C)
  469. #endif // !LKRHASH_ALLOCATOR_NEW
  470. //--------------------------------------------------------------------
  471. // CLKRLinearHashTable
  472. //
  473. // A thread-safe linear hash table.
  474. //--------------------------------------------------------------------
  475. class IRTL_DLLEXP CLKRLinearHashTable
  476. {
  477. public:
  478. // typedef CSmallSpinLock TableLock;
  479. // typedef CFakeLock TableLock;
  480. // typedef CSpinLock TableLock;
  481. // typedef CSpinLock2 TableLock;
  482. // typedef CSpinLock3 TableLock;
  483. // typedef CShareLock TableLock;
  484. // typedef CReaderWriterLock TableLock;
  485. // typedef CReaderWriterLock2 TableLock;
  486. typedef CReaderWriterLock3 TableLock;
  487. // typedef CSmallSpinLock BucketLock;
  488. // typedef CFakeLock BucketLock;
  489. // typedef CSpinLock BucketLock;
  490. // typedef CSpinLock2 BucketLock;
  491. // typedef CSpinLock3 BucketLock;
  492. // typedef CShareLock BucketLock;
  493. // typedef CReaderWriterLock BucketLock;
  494. // typedef CReaderWriterLock2 BucketLock;
  495. typedef CReaderWriterLock3 BucketLock;
  496. #ifdef LKR_DEPRECATED_ITERATORS
  497. class CIterator;
  498. friend class CLKRLinearHashTable::CIterator;
  499. #endif // LKR_DEPRECATED_ITERATORS
  500. private:
  501. class CNodeClump;
  502. friend class CLKRLinearHashTable::CNodeClump;
  503. friend class CLKRHashTable;
  504. #ifdef LKRHASH_ALLOCATOR_NEW
  505. friend bool LKRHashTableInit();
  506. friend void LKRHashTableUninit();
  507. #endif // LKRHASH_ALLOCATOR_NEW
  508. #ifdef LKRHASH_INSTRUMENTATION
  509. // TODO
  510. #endif // LKRHASH_INSTRUMENTATION
  511. #define HASH_RANDOMIZE_BITS
  512. #ifdef HASH_RANDOMIZE_BITS
  513. enum {
  514. // No number in 0..2^31-1 maps to this number after it has been
  515. // scrambled by RandomizeBits
  516. HASH_INVALID_SIGNATURE = 31678523,
  517. };
  518. #else // !HASH_RANDOMIZE_BITS
  519. enum {
  520. // Given M = A % B, A and B unsigned 32-bit integers greater than zero,
  521. // there are no values of A or B which yield M = 2^32-1. Why? Because
  522. // M must be less than B.
  523. HASH_INVALID_SIGNATURE = ULONG_MAX,
  524. };
  525. #endif // !HASH_RANDOMIZE_BITS
  526. // Class for nodes on a bucket chain. Instead of a node containing
  527. // one (signature, record-pointer, next-tuple-pointer) tuple, it
  528. // contains _N_ such tuples. (N-1 next-tuple-pointers are omitted.)
  529. // This improves locality of reference greatly; i.e., it's L1
  530. // cache-friendly. It also reduces memory fragmentation and memory
  531. // allocator overhead. It does complicate the chain traversal code
  532. // slightly, admittedly.
  533. //
  534. // This theory is beautiful. In practice, however, CNodeClumps
  535. // are *not* perfectly aligned on 32-byte boundaries by the memory
  536. // allocators. Experimental results indicate that we get a 2-3%
  537. // speed improvement by using 32-byte-aligned blocks, but this must
  538. // be considered against the average of 16 bytes wasted per block.
  539. class CNodeClump
  540. {
  541. public:
  542. // Record slots per chunk - set so a chunk matches (one or
  543. // two) cache lines. 3 ==> 32 bytes, 7 ==> 64 bytes
  544. // Note: the default max load factor is 4.0, which implies that
  545. // there will seldom be more than one node clump in a chain.
  546. enum {
  547. BUCKET_BYTE_SIZE = 64,
  548. BUCKET_OVERHEAD = sizeof(BucketLock) + sizeof(CNodeClump*),
  549. NODE_SIZE = sizeof(const void*) + sizeof(DWORD),
  550. NODES_PER_CLUMP = (BUCKET_BYTE_SIZE - BUCKET_OVERHEAD) / NODE_SIZE
  551. };
  552. #ifdef LKR_SIGS_NODES
  553. DWORD m_dwKeySigs[NODES_PER_CLUMP]; // hash values computed from keys
  554. CNodeClump* m_pncNext; // next node clump on the chain
  555. const void* m_pvNode[NODES_PER_CLUMP];// pointers to records
  556. #else // !LKR_SIGS_NODES
  557. const void* m_pvNode[NODES_PER_CLUMP];// pointers to records
  558. CNodeClump* m_pncNext; // next node clump on the chain
  559. DWORD m_dwKeySigs[NODES_PER_CLUMP]; // hash values computed from keys
  560. #endif // !LKR_SIGS_NODES
  561. CNodeClump()
  562. {
  563. Clear();
  564. }
  565. void
  566. Clear()
  567. {
  568. m_pncNext = NULL; // no dangling pointers
  569. for (int i = NODES_PER_CLUMP; --i >= 0; )
  570. {
  571. m_dwKeySigs[i] = HASH_INVALID_SIGNATURE;
  572. m_pvNode[i] = NULL;
  573. }
  574. }
  575. bool
  576. InvalidSignature(
  577. DWORD i) const
  578. {
  579. IRTLASSERT(0 <= i && i < NODES_PER_CLUMP);
  580. return (m_dwKeySigs[i] == HASH_INVALID_SIGNATURE);
  581. }
  582. bool
  583. IsEmptyNode(
  584. DWORD i) const
  585. {
  586. IRTLASSERT(0 <= i && i < NODES_PER_CLUMP);
  587. return (m_pvNode[i] == NULL);
  588. }
  589. bool
  590. IsEmptyAndInvalid(
  591. DWORD i) const
  592. {
  593. return IsEmptyNode(i) && InvalidSignature(i);
  594. }
  595. bool
  596. IsEmptySlot(
  597. DWORD i) const
  598. {
  599. #ifdef LKR_TEST_SIGNATURE
  600. return InvalidSignature(i);
  601. #else // !LKR_TEST_SIGNATURE
  602. return IsEmptyNode(i);
  603. #endif // !LKR_TEST_SIGNATURE
  604. }
  605. bool
  606. IsLastClump() const
  607. {
  608. return (m_pncNext == NULL);
  609. }
  610. #ifdef _DEBUG
  611. // Don't want overhead of calls to dtor in retail build
  612. ~CNodeClump()
  613. {
  614. IRTLASSERT(IsLastClump()); // no dangling pointers
  615. for (int i = NODES_PER_CLUMP; --i >= 0; )
  616. IRTLASSERT(InvalidSignature(i) && IsEmptyNode(i));
  617. }
  618. #endif // _DEBUG
  619. LKRHASH_ALLOCATOR_DEFINITIONS(CNodeClump);
  620. };
  621. // Class for bucket chains of the hash table. Note that the first
  622. // nodeclump is actually included in the bucket and not dynamically
  623. // allocated, which increases space requirements slightly but does
  624. // improve performance.
  625. class CBucket
  626. {
  627. private:
  628. mutable BucketLock m_Lock; // lock protecting this bucket
  629. #ifdef LOCK_INSTRUMENTATION
  630. static LONG sm_cBuckets;
  631. static const char*
  632. _LockName()
  633. {
  634. LONG l = ++sm_cBuckets;
  635. // possible race condition but we don't care, as this is never
  636. // used in production code
  637. static char s_szName[CLockStatistics::L_NAMELEN];
  638. wsprintf(s_szName, "B%06x", 0xFFFFFF & l);
  639. return s_szName;
  640. }
  641. #endif // LOCK_INSTRUMENTATION
  642. public:
  643. CNodeClump m_ncFirst; // first CNodeClump of this bucket
  644. #if defined(LOCK_INSTRUMENTATION) || defined(_DEBUG)
  645. CBucket()
  646. #ifdef LOCK_INSTRUMENTATION
  647. : m_Lock(_LockName())
  648. #endif // LOCK_INSTRUMENTATION
  649. {
  650. #ifdef _DEBUG
  651. LOCK_LOCKTYPE lt = BucketLock::LockType();
  652. if (lt == LOCK_SPINLOCK || lt == LOCK_FAKELOCK)
  653. IRTLASSERT(sizeof(*this) <= 64);
  654. #endif _DEBUG
  655. }
  656. #endif // LOCK_INSTRUMENTATION || _DEBUG
  657. void WriteLock() { m_Lock.WriteLock(); }
  658. void ReadLock() const { m_Lock.ReadLock(); }
  659. void WriteUnlock() const { m_Lock.WriteUnlock(); }
  660. void ReadUnlock() const { m_Lock.ReadUnlock(); }
  661. bool IsWriteLocked() const { return m_Lock.IsWriteLocked(); }
  662. bool IsReadLocked() const { return m_Lock.IsReadLocked(); }
  663. bool IsWriteUnlocked() const { return m_Lock.IsWriteUnlocked(); }
  664. bool IsReadUnlocked() const { return m_Lock.IsReadUnlocked(); }
  665. void SetSpinCount(WORD wSpins) { m_Lock.SetSpinCount(wSpins); }
  666. WORD GetSpinCount() const { return m_Lock.GetSpinCount(); }
  667. #ifdef LOCK_INSTRUMENTATION
  668. CLockStatistics LockStats() const {return m_Lock.Statistics();}
  669. #endif // LOCK_INSTRUMENTATION
  670. };
  671. // The hash table space is divided into fixed-size segments (arrays of
  672. // CBuckets) and physically grows/shrinks one segment at a time.
  673. // We provide small, medium, and large segments to better tune the
  674. // overall memory requirements of the hash table according to the
  675. // expected usage of an instance.
  676. class CSegment
  677. {
  678. public:
  679. #ifdef LKR_OLD_SEGMENT
  680. virtual ~CSegment() {}; // link fails if this is pure virtual
  681. virtual DWORD Bits() const = 0;
  682. virtual DWORD Size() const = 0;
  683. virtual DWORD Mask() const = 0;
  684. virtual DWORD InitSize() const = 0;
  685. virtual CBucket& Slot(DWORD i) = 0;
  686. #else // !LKR_OLD_SEGMENT
  687. CBucket m_bktSlots;
  688. CBucket& Slot(DWORD i)
  689. { return static_cast<CBucket*>(&m_bktSlots)[i]; }
  690. #endif // !LKR_OLD_SEGMENT
  691. };
  692. // Small-sized segments contain 2^3 = 8 buckets => ~0.5Kb
  693. class CSmallSegment : public CSegment
  694. {
  695. public:
  696. // Maximum table size equals MAX_DIRSIZE * SEGSIZE buckets.
  697. enum {
  698. SEGBITS = 3,// number of bits extracted from a hash
  699. // address for offset within a segment
  700. SEGSIZE = (1<<SEGBITS),// segment size
  701. SEGMASK = (SEGSIZE-1), // mask used for extracting offset bit
  702. INITSIZE = 1 * SEGSIZE, // #segments to allocate initially
  703. };
  704. #ifdef LKR_OLD_SEGMENT
  705. private:
  706. CBucket m_bktSlots[SEGSIZE];
  707. public:
  708. virtual ~CSmallSegment() {}
  709. virtual DWORD Bits() const { return SEGBITS; }
  710. virtual DWORD Size() const { return SEGSIZE; }
  711. virtual DWORD Mask() const { return SEGMASK; }
  712. virtual DWORD InitSize() const { return INITSIZE;}
  713. virtual CBucket& Slot(DWORD i)
  714. { IRTLASSERT(i < SEGSIZE); return m_bktSlots[i]; }
  715. #else // !LKR_OLD_SEGMENT
  716. private:
  717. CBucket m_bktSlots2[SEGSIZE-1];
  718. public:
  719. DWORD Bits() const { return SEGBITS; }
  720. DWORD Size() const { return SEGSIZE; }
  721. DWORD Mask() const { return SEGMASK; }
  722. DWORD InitSize() const { return INITSIZE;}
  723. #endif // !LKR_OLD_SEGMENT
  724. #ifdef _DEBUG
  725. CSmallSegment()
  726. {
  727. IRTLASSERT(((DWORD_PTR)this & (LKRHASH_MEM_DEFAULT_ALIGN-1)) == 0);
  728. #ifdef LKR_OLD_SEGMENT
  729. IRTLASSERT(sizeof(*this) == SEGSIZE * sizeof(CBucket)
  730. + sizeof(void*));
  731. #else // !LKR_OLD_SEGMENT
  732. IRTLASSERT(sizeof(*this) == SEGSIZE * sizeof(CBucket));
  733. #endif // !LKR_OLD_SEGMENT
  734. }
  735. #endif // _DEBUG
  736. LKRHASH_ALLOCATOR_DEFINITIONS(CSmallSegment);
  737. };
  738. // Medium-sized segments contain 2^6 = 64 buckets => ~4Kb
  739. class CMediumSegment : public CSegment
  740. {
  741. public:
  742. enum {
  743. SEGBITS = 6,
  744. SEGSIZE = (1<<SEGBITS),
  745. SEGMASK = (SEGSIZE-1),
  746. INITSIZE = 2 * SEGSIZE,
  747. };
  748. #ifdef LKR_OLD_SEGMENT
  749. private:
  750. CBucket m_bktSlots[SEGSIZE];
  751. public:
  752. virtual ~CMediumSegment() {}
  753. virtual DWORD Bits() const { return SEGBITS; }
  754. virtual DWORD Size() const { return SEGSIZE; }
  755. virtual DWORD Mask() const { return SEGMASK; }
  756. virtual DWORD InitSize() const { return INITSIZE;}
  757. virtual CBucket& Slot(DWORD i)
  758. { IRTLASSERT(i < SEGSIZE); return m_bktSlots[i]; }
  759. #else // !LKR_OLD_SEGMENT
  760. private:
  761. CBucket m_bktSlots2[SEGSIZE-1];
  762. public:
  763. DWORD Bits() const { return SEGBITS; }
  764. DWORD Size() const { return SEGSIZE; }
  765. DWORD Mask() const { return SEGMASK; }
  766. DWORD InitSize() const { return INITSIZE;}
  767. #endif // !LKR_OLD_SEGMENT
  768. #ifdef _DEBUG
  769. CMediumSegment()
  770. {
  771. IRTLASSERT(((DWORD_PTR)this & (LKRHASH_MEM_DEFAULT_ALIGN-1)) == 0);
  772. #ifdef LKR_OLD_SEGMENT
  773. IRTLASSERT(sizeof(*this) == SEGSIZE * sizeof(CBucket)
  774. + sizeof(void*));
  775. #else // !LKR_OLD_SEGMENT
  776. IRTLASSERT(sizeof(*this) == SEGSIZE * sizeof(CBucket));
  777. #endif // !LKR_OLD_SEGMENT
  778. }
  779. #endif // _DEBUG
  780. LKRHASH_ALLOCATOR_DEFINITIONS(CMediumSegment);
  781. };
  782. // Large-sized segments contain 2^9 = 512 buckets => ~32Kb
  783. class CLargeSegment : public CSegment
  784. {
  785. public:
  786. enum {
  787. SEGBITS = 9,
  788. SEGSIZE = (1<<SEGBITS),
  789. SEGMASK = (SEGSIZE-1),
  790. INITSIZE = 4 * SEGSIZE,
  791. };
  792. #ifdef LKR_OLD_SEGMENT
  793. private:
  794. CBucket m_bktSlots[SEGSIZE];
  795. public:
  796. virtual ~CLargeSegment() {}
  797. virtual DWORD Bits() const { return SEGBITS; }
  798. virtual DWORD Size() const { return SEGSIZE; }
  799. virtual DWORD Mask() const { return SEGMASK; }
  800. virtual DWORD InitSize() const { return INITSIZE;}
  801. virtual CBucket& Slot(DWORD i)
  802. { IRTLASSERT(i < SEGSIZE); return m_bktSlots[i]; }
  803. #else // !LKR_OLD_SEGMENT
  804. private:
  805. CBucket m_bktSlots2[SEGSIZE-1];
  806. public:
  807. DWORD Bits() const { return SEGBITS; }
  808. DWORD Size() const { return SEGSIZE; }
  809. DWORD Mask() const { return SEGMASK; }
  810. DWORD InitSize() const { return INITSIZE;}
  811. #endif // !LKR_OLD_SEGMENT
  812. #ifdef _DEBUG
  813. CLargeSegment()
  814. {
  815. IRTLASSERT(((DWORD_PTR)this & (LKRHASH_MEM_DEFAULT_ALIGN-1)) == 0);
  816. #ifdef LKR_OLD_SEGMENT
  817. IRTLASSERT(sizeof(*this) == SEGSIZE * sizeof(CBucket)
  818. + sizeof(void*));
  819. #else // !LKR_OLD_SEGMENT
  820. IRTLASSERT(sizeof(*this) == SEGSIZE * sizeof(CBucket));
  821. #endif // !LKR_OLD_SEGMENT
  822. }
  823. #endif // _DEBUG
  824. LKRHASH_ALLOCATOR_DEFINITIONS(CLargeSegment);
  825. };
  826. // A directory keeps track of the segments comprising the hash table.
  827. // The directory is just a variable-sized array of pointers to
  828. // segments (CDirEntrys).
  829. class CDirEntry
  830. {
  831. public:
  832. // MIN_DIRSIZE and MAX_DIRSIZE can be changed independently
  833. // of anything else. Should be powers of two.
  834. enum {
  835. MIN_DIRSIZE = (1<<3), // minimum directory size
  836. MAX_DIRSIZE = (1<<20), // maximum directory size
  837. };
  838. CSegment* m_pseg;
  839. CDirEntry()
  840. : m_pseg(NULL)
  841. {}
  842. ~CDirEntry()
  843. { delete m_pseg; }
  844. };
  845. public:
  846. // aliases for convenience
  847. enum {
  848. NODES_PER_CLUMP = CNodeClump::NODES_PER_CLUMP,
  849. MIN_DIRSIZE = CDirEntry::MIN_DIRSIZE,
  850. MAX_DIRSIZE = CDirEntry::MAX_DIRSIZE,
  851. NAME_SIZE = 16,
  852. };
  853. private:
  854. // Miscellaneous helper functions
  855. // Convert a hash signature to a bucket address
  856. inline DWORD _BucketAddress(DWORD dwSignature) const
  857. {
  858. DWORD dwBktAddr = _H0(dwSignature);
  859. // Has this bucket been split already?
  860. if (dwBktAddr < m_iExpansionIdx)
  861. dwBktAddr = _H1(dwSignature);
  862. IRTLASSERT(dwBktAddr < m_cActiveBuckets);
  863. IRTLASSERT(dwBktAddr < (m_cDirSegs << m_dwSegBits));
  864. return dwBktAddr;
  865. }
  866. // See the Linear Hashing paper
  867. static DWORD _H0(DWORD dwSignature, DWORD dwBktAddrMask)
  868. { return dwSignature & dwBktAddrMask; }
  869. DWORD _H0(DWORD dwSignature) const
  870. { return _H0(dwSignature, m_dwBktAddrMask0); }
  871. // See the Linear Hashing paper. Preserves one bit more than _H0.
  872. static DWORD _H1(DWORD dwSignature, DWORD dwBktAddrMask)
  873. { return dwSignature & ((dwBktAddrMask << 1) | 1); }
  874. DWORD _H1(DWORD dwSignature) const
  875. { return _H0(dwSignature, m_dwBktAddrMask1); }
  876. // In which segment within the directory does the bucketaddress lie?
  877. // (Return type must be lvalue so that it can be assigned to.)
  878. CSegment*& _Segment(DWORD dwBucketAddr) const
  879. {
  880. const DWORD iSeg = dwBucketAddr >> m_dwSegBits;
  881. IRTLASSERT(m_paDirSegs != NULL && iSeg < m_cDirSegs);
  882. return m_paDirSegs[iSeg].m_pseg;
  883. }
  884. // Offset within the segment of the bucketaddress
  885. DWORD _SegIndex(DWORD dwBucketAddr) const
  886. { return (dwBucketAddr & m_dwSegMask); }
  887. // Convert a bucketaddress to a CBucket*
  888. inline CBucket* _Bucket(DWORD dwBucketAddr) const
  889. {
  890. IRTLASSERT(dwBucketAddr < m_cActiveBuckets);
  891. CSegment* const pseg = _Segment(dwBucketAddr);
  892. IRTLASSERT(pseg != NULL);
  893. return &(pseg->Slot(_SegIndex(dwBucketAddr)));
  894. }
  895. // Extract the key from a record
  896. const DWORD_PTR _ExtractKey(const void* pvRecord) const
  897. {
  898. IRTLASSERT(pvRecord != NULL);
  899. IRTLASSERT(m_pfnExtractKey != NULL);
  900. return (*m_pfnExtractKey)(pvRecord);
  901. }
  902. // Hash the key
  903. DWORD _CalcKeyHash(const DWORD_PTR pnKey) const
  904. {
  905. // Note pnKey==0 is acceptable, as the real key type could be an int
  906. IRTLASSERT(m_pfnCalcKeyHash != NULL);
  907. DWORD dwHash = (*m_pfnCalcKeyHash)(pnKey);
  908. // We forcibly scramble the result to help ensure a better distribution
  909. #ifdef HASH_RANDOMIZE_BITS
  910. dwHash = RandomizeBits(dwHash);
  911. #else // !HASH_RANDOMIZE_BITS
  912. dwHash = HashScramble(dwHash);
  913. #endif // !HASH_RANDOMIZE_BITS
  914. IRTLASSERT(dwHash != HASH_INVALID_SIGNATURE);
  915. return dwHash;
  916. }
  917. // Compare two keys for equality
  918. bool _EqualKeys(const DWORD_PTR pnKey1, const DWORD_PTR pnKey2) const
  919. {
  920. IRTLASSERT(m_pfnEqualKeys != NULL);
  921. return (*m_pfnEqualKeys)(pnKey1, pnKey2);
  922. }
  923. // AddRef or Release a record.
  924. void _AddRefRecord(const void* pvRecord, int nIncr) const
  925. {
  926. IRTLASSERT(pvRecord != NULL && (nIncr == -1 || nIncr == +1));
  927. IRTLASSERT(m_pfnAddRefRecord != NULL);
  928. (*m_pfnAddRefRecord)(pvRecord, nIncr);
  929. }
  930. // Find a bucket, given its signature.
  931. CBucket* _FindBucket(DWORD dwSignature, bool fLockForWrite) const;
  932. // Used by _FindKey so that the thread won't deadlock if the user has
  933. // already explicitly called table->WriteLock().
  934. bool _ReadOrWriteLock() const
  935. { return m_Lock.ReadOrWriteLock(); }
  936. void _ReadOrWriteUnlock(bool fReadLocked) const
  937. { m_Lock.ReadOrWriteUnlock(fReadLocked); }
  938. // Memory allocation wrappers to allow us to simulate allocation
  939. // failures during testing
  940. static CDirEntry* const
  941. _AllocateSegmentDirectory(
  942. size_t n);
  943. static bool
  944. _FreeSegmentDirectory(
  945. CDirEntry* paDirSegs);
  946. static CNodeClump* const
  947. _AllocateNodeClump();
  948. static bool
  949. _FreeNodeClump(
  950. CNodeClump* pnc);
  951. CSegment* const
  952. _AllocateSegment() const;
  953. bool
  954. _FreeSegment(
  955. CSegment* pseg) const;
  956. #ifdef LOCK_INSTRUMENTATION
  957. static LONG sm_cTables;
  958. static const char*
  959. _LockName()
  960. {
  961. LONG l = ++sm_cTables;
  962. // possible race condition but we don't care, as this is never
  963. // used in production code
  964. static char s_szName[CLockStatistics::L_NAMELEN];
  965. wsprintf(s_szName, "LH%05x", 0xFFFFF & l);
  966. return s_szName;
  967. }
  968. // Statistics for the table lock
  969. CLockStatistics _LockStats() const
  970. { return m_Lock.Statistics(); }
  971. #endif // LOCK_INSTRUMENTATION
  972. private:
  973. // Fields are ordered so as to minimize number of cache lines touched
  974. DWORD m_dwSignature; // debugging: id & corruption check
  975. CHAR m_szName[NAME_SIZE]; // an identifier for debugging
  976. mutable LK_RETCODE m_lkrcState; // Internal state of table
  977. mutable TableLock m_Lock; // Lock on entire linear hash table
  978. // type-specific function pointers
  979. PFnExtractKey m_pfnExtractKey; // Extract key from record
  980. PFnCalcKeyHash m_pfnCalcKeyHash; // Calculate hash signature of key
  981. PFnEqualKeys m_pfnEqualKeys; // Compare two keys
  982. PFnAddRefRecord m_pfnAddRefRecord; // AddRef a record
  983. LK_TABLESIZE m_lkts; // "size" of table: small, medium, or large
  984. DWORD m_dwSegBits; // C{Small,Medium,Large}Segment::SEGBITS
  985. DWORD m_dwSegSize; // C{Small,Medium,Large}Segment::SEGSIZE
  986. DWORD m_dwSegMask; // C{Small,Medium,Large}Segment::SEGMASK
  987. double m_MaxLoad; // max load factor (average chain length)
  988. DWORD m_dwBktAddrMask0; // mask used for address calculation
  989. DWORD m_dwBktAddrMask1; // used in _H1 calculation
  990. DWORD m_iExpansionIdx; // address of next bucket to be expanded
  991. CDirEntry* m_paDirSegs; // directory of table segments
  992. DWORD m_nLevel; // number of table doublings performed
  993. DWORD m_cDirSegs; // segment directory size: varies between
  994. // MIN_DIRSIZE and MAX_DIRSIZE
  995. DWORD m_cRecords; // number of records in the table
  996. DWORD m_cActiveBuckets; // number of buckets in use (table size)
  997. WORD m_wBucketLockSpins;// default spin count for bucket locks
  998. const BYTE m_nTableLockType; // for debugging: LOCK_SPINLOCK, etc
  999. const BYTE m_nBucketLockType;// for debugging: LOCK_SPINLOCK, etc
  1000. const CLKRHashTable* const m_phtParent;// Owning table. NULL => standalone
  1001. #ifndef LKR_NO_GLOBAL_LIST
  1002. static CLockedDoubleList sm_llGlobalList;// All active CLKRLinearHashTables
  1003. CListEntry m_leGlobalList;
  1004. #endif // !LKR_NO_GLOBAL_LIST
  1005. void _InsertThisIntoGlobalList()
  1006. {
  1007. #ifndef LKR_NO_GLOBAL_LIST
  1008. // Only add standalone CLKRLinearHashTables to global list.
  1009. // CLKRHashTables have their own global list.
  1010. if (m_phtParent == NULL)
  1011. sm_llGlobalList.InsertHead(&m_leGlobalList);
  1012. #endif // !LKR_NO_GLOBAL_LIST
  1013. }
  1014. void _RemoveThisFromGlobalList()
  1015. {
  1016. #ifndef LKR_NO_GLOBAL_LIST
  1017. if (m_phtParent == NULL)
  1018. sm_llGlobalList.RemoveEntry(&m_leGlobalList);
  1019. #endif // !LKR_NO_GLOBAL_LIST
  1020. }
  1021. // Non-trivial implementation functions
  1022. LK_RETCODE _InsertRecord(const void* pvRecord, DWORD dwSignature,
  1023. bool fOverwrite);
  1024. LK_RETCODE _DeleteKey(const DWORD_PTR pnKey, DWORD dwSignature);
  1025. LK_RETCODE _DeleteRecord(const void* pvRecord, DWORD dwSignature);
  1026. bool _DeleteNode(CBucket* pbkt, CNodeClump*& rpnc,
  1027. CNodeClump*& rpncPrev, int& riNode);
  1028. LK_RETCODE _FindKey(const DWORD_PTR pnKey, DWORD dwSignature,
  1029. const void** ppvRecord) const;
  1030. LK_RETCODE _FindRecord(const void* pvRecord, DWORD dwSignature) const;
  1031. // returns count of errors in compacted state => 0 is good
  1032. int _IsNodeCompact(CBucket* const pbkt) const;
  1033. #ifdef LKR_DEPRECATED_ITERATORS
  1034. // Predicate functions
  1035. static LK_PREDICATE WINAPI
  1036. _PredTrue(const void* /*pvRecord*/, void* /*pvState*/)
  1037. { return LKP_PERFORM; }
  1038. DWORD _Apply(PFnRecordAction pfnAction, void* pvState,
  1039. LK_LOCKTYPE lkl, LK_PREDICATE& rlkp);
  1040. DWORD _ApplyIf(PFnRecordPred pfnPredicate,
  1041. PFnRecordAction pfnAction, void* pvState,
  1042. LK_LOCKTYPE lkl, LK_PREDICATE& rlkp);
  1043. DWORD _DeleteIf(PFnRecordPred pfnPredicate, void* pvState,
  1044. LK_PREDICATE& rlkp);
  1045. #endif // LKR_DEPRECATED_ITERATORS
  1046. void _Clear(bool fShrinkDirectory);
  1047. void _SetSegVars(LK_TABLESIZE lkts);
  1048. LK_RETCODE _Expand();
  1049. LK_RETCODE _Contract();
  1050. LK_RETCODE _SplitRecordSet(CNodeClump* pncOldTarget,
  1051. CNodeClump* pncNewTarget,
  1052. DWORD iExpansionIdx,
  1053. DWORD dwBktAddrMask,
  1054. DWORD dwNewBkt,
  1055. CNodeClump* pncFreeList);
  1056. LK_RETCODE _MergeRecordSets(CBucket* pbktNewTarget,
  1057. CNodeClump* pncOldList,
  1058. CNodeClump* pncFreeList);
  1059. // Private copy ctor and op= to prevent compiler synthesizing them.
  1060. // Must provide a (bad) implementation because we export instantiations.
  1061. // TODO: implement these properly; they could be useful.
  1062. CLKRLinearHashTable(const CLKRLinearHashTable&)
  1063. : m_dwSignature(SIGNATURE_FREE)
  1064. #ifdef LOCK_INSTRUMENTATION
  1065. , m_Lock(NULL)
  1066. #endif // LOCK_INSTRUMENTATION
  1067. , m_nTableLockType(0),
  1068. m_nBucketLockType(0),
  1069. m_phtParent(NULL)
  1070. {*(BYTE*)NULL;}
  1071. CLKRLinearHashTable& operator=(const CLKRLinearHashTable&)
  1072. {return *(CLKRLinearHashTable*)NULL;}
  1073. private:
  1074. // This ctor is used by CLKRHashTable
  1075. CLKRLinearHashTable(
  1076. LPCSTR pszName, // An identifier for debugging
  1077. PFnExtractKey pfnExtractKey, // Extract key from record
  1078. PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  1079. PFnEqualKeys pfnEqualKeys, // Compare two keys
  1080. PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  1081. double maxload, // Upperbound on average chain length
  1082. DWORD initsize, // Initial size of hash table.
  1083. CLKRHashTable* phtParent // Owning table.
  1084. );
  1085. LK_RETCODE
  1086. _Initialize(
  1087. PFnExtractKey pfnExtractKey,
  1088. PFnCalcKeyHash pfnCalcKeyHash,
  1089. PFnEqualKeys pfnEqualKeys,
  1090. PFnAddRefRecord pfnAddRefRecord,
  1091. LPCSTR pszName,
  1092. double maxload,
  1093. DWORD initsize);
  1094. public:
  1095. CLKRLinearHashTable(
  1096. LPCSTR pszName, // An identifier for debugging
  1097. PFnExtractKey pfnExtractKey, // Extract key from record
  1098. PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  1099. PFnEqualKeys pfnEqualKeys, // Compare two keys
  1100. PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  1101. double maxload=LK_DFLT_MAXLOAD,// Upperbound on average chain length
  1102. DWORD initsize=LK_DFLT_INITSIZE, // Initial size of hash table.
  1103. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS // for signature compatiblity
  1104. // with CLKRHashTable
  1105. );
  1106. ~CLKRLinearHashTable();
  1107. static const char* ClassName() {return "CLKRLinearHashTable";}
  1108. int NumSubTables() const {return 1;}
  1109. static LK_TABLESIZE NumSubTables(DWORD& rinitsize, DWORD& rnum_subtbls);
  1110. // Insert a new record into hash table.
  1111. // Returns LK_SUCCESS if all OK, LK_KEY_EXISTS if same key already
  1112. // exists (unless fOverwrite), LK_ALLOC_FAIL if out of space,
  1113. // or LK_BAD_RECORD for a bad record.
  1114. LK_RETCODE InsertRecord(const void* pvRecord, bool fOverwrite=false)
  1115. {
  1116. if (!IsUsable())
  1117. return m_lkrcState;
  1118. if (pvRecord == NULL)
  1119. return LK_BAD_RECORD;
  1120. return _InsertRecord(pvRecord, _CalcKeyHash(_ExtractKey(pvRecord)),
  1121. fOverwrite);
  1122. }
  1123. // Delete record with the given key.
  1124. // Returns LK_SUCCESS if all OK, or LK_NO_SUCH_KEY if not found
  1125. LK_RETCODE DeleteKey(const DWORD_PTR pnKey)
  1126. {
  1127. if (!IsUsable())
  1128. return m_lkrcState;
  1129. return _DeleteKey(pnKey, _CalcKeyHash(pnKey));
  1130. }
  1131. // Delete a record from the table, if present.
  1132. // Returns LK_SUCCESS if all OK, or LK_NO_SUCH_KEY if not found
  1133. LK_RETCODE DeleteRecord(const void* pvRecord)
  1134. {
  1135. if (!IsUsable())
  1136. return m_lkrcState;
  1137. if (pvRecord == NULL)
  1138. return LK_BAD_RECORD;
  1139. return _DeleteRecord(pvRecord, _CalcKeyHash(_ExtractKey(pvRecord)));
  1140. }
  1141. // Find record with given key.
  1142. // Returns: LK_SUCCESS, if record found (record is returned in *ppvRecord)
  1143. // LK_BAD_RECORD, if ppvRecord is invalid
  1144. // LK_NO_SUCH_KEY, if no record with given key value was found
  1145. // LK_UNUSABLE, if hash table not in usable state
  1146. // Note: the record is AddRef'd. You must decrement the reference
  1147. // count when you are finished with the record (if you're implementing
  1148. // refcounting semantics).
  1149. LK_RETCODE FindKey(const DWORD_PTR pnKey,
  1150. const void** ppvRecord) const
  1151. {
  1152. if (!IsUsable())
  1153. return m_lkrcState;
  1154. if (ppvRecord == NULL)
  1155. return LK_BAD_RECORD;
  1156. return _FindKey(pnKey, _CalcKeyHash(pnKey), ppvRecord);
  1157. }
  1158. // Sees if the record is contained in the table
  1159. // Returns: LK_SUCCESS, if record found
  1160. // LK_BAD_RECORD, if pvRecord is invalid
  1161. // LK_NO_SUCH_KEY, if record is not in the table
  1162. // LK_UNUSABLE, if hash table not in usable state
  1163. // Note: the record is *not* AddRef'd.
  1164. LK_RETCODE FindRecord(const void* pvRecord) const
  1165. {
  1166. if (!IsUsable())
  1167. return m_lkrcState;
  1168. if (pvRecord == NULL)
  1169. return LK_BAD_RECORD;
  1170. return _FindRecord(pvRecord, _CalcKeyHash(_ExtractKey(pvRecord)));
  1171. }
  1172. #ifdef LKR_DEPRECATED_ITERATORS
  1173. // Walk the hash table, applying pfnAction to all records.
  1174. // Locks the whole table for the duration with either a (possibly
  1175. // shared) readlock or a writelock, according to lkl.
  1176. // Loop is aborted if pfnAction returns LKA_ABORT.
  1177. // Returns the number of successful applications.
  1178. DWORD Apply(PFnRecordAction pfnAction,
  1179. void* pvState=NULL,
  1180. LK_LOCKTYPE lkl=LKL_READLOCK);
  1181. // Walk the hash table, applying pfnAction to any records that match
  1182. // pfnPredicate. Locks the whole table for the duration with either
  1183. // a (possibly shared) readlock or a writelock, according to lkl.
  1184. // Loop is aborted if pfnAction returns LKA_ABORT.
  1185. // Returns the number of successful applications.
  1186. DWORD ApplyIf(PFnRecordPred pfnPredicate,
  1187. PFnRecordAction pfnAction,
  1188. void* pvState=NULL,
  1189. LK_LOCKTYPE lkl=LKL_READLOCK);
  1190. // Delete any records that match pfnPredicate.
  1191. // Locks the table for the duration with a writelock.
  1192. // Returns the number of deletions.
  1193. //
  1194. // Do *not* walk the hash table by hand with an iterator and call
  1195. // DeleteKey. The iterator will end up pointing to garbage.
  1196. DWORD DeleteIf(PFnRecordPred pfnPredicate,
  1197. void* pvState=NULL);
  1198. #endif // LKR_DEPRECATED_ITERATORS
  1199. // Check table for consistency. Returns 0 if okay, or the number of
  1200. // errors otherwise.
  1201. int CheckTable() const;
  1202. // Remove all data from the table
  1203. void Clear()
  1204. {
  1205. WriteLock();
  1206. _Clear(true);
  1207. WriteUnlock();
  1208. }
  1209. // Number of elements in the table
  1210. DWORD Size() const
  1211. { return m_cRecords; }
  1212. // Maximum possible number of elements in the table
  1213. DWORD MaxSize() const
  1214. { return static_cast<DWORD>(m_MaxLoad * MAX_DIRSIZE * m_dwSegSize); }
  1215. // Get hash table statistics
  1216. CLKRHashTableStats GetStatistics() const;
  1217. // Is the hash table usable?
  1218. bool IsUsable() const
  1219. { return (m_lkrcState == LK_SUCCESS); }
  1220. // Is the hash table consistent and correct?
  1221. bool IsValid() const
  1222. {
  1223. bool f = (m_lkrcState == LK_SUCCESS // serious internal failure?
  1224. && m_paDirSegs != NULL
  1225. && (MIN_DIRSIZE & (MIN_DIRSIZE-1)) == 0 // == (1 << N)
  1226. && (MAX_DIRSIZE & (MAX_DIRSIZE-1)) == 0
  1227. && MAX_DIRSIZE > MIN_DIRSIZE
  1228. && MIN_DIRSIZE <= m_cDirSegs && m_cDirSegs <= MAX_DIRSIZE
  1229. && (m_cDirSegs & (m_cDirSegs-1)) == 0
  1230. && m_pfnExtractKey != NULL
  1231. && m_pfnCalcKeyHash != NULL
  1232. && m_pfnEqualKeys != NULL
  1233. && m_cActiveBuckets > 0
  1234. && ValidSignature()
  1235. );
  1236. if (!f)
  1237. m_lkrcState = LK_UNUSABLE;
  1238. return f;
  1239. }
  1240. // Set the spin count on the table lock
  1241. void SetTableLockSpinCount(WORD wSpins)
  1242. { m_Lock.SetSpinCount(wSpins); }
  1243. // Get the spin count on the table lock
  1244. WORD GetTableLockSpinCount()
  1245. { return m_Lock.GetSpinCount(); }
  1246. // Set/Get the spin count on the bucket locks
  1247. void SetBucketLockSpinCount(WORD wSpins);
  1248. WORD GetBucketLockSpinCount();
  1249. enum {
  1250. SIGNATURE = (('L') | ('K' << 8) | ('L' << 16) | ('H' << 24)),
  1251. SIGNATURE_FREE = (('L') | ('K' << 8) | ('L' << 16) | ('x' << 24)),
  1252. };
  1253. bool
  1254. ValidSignature() const
  1255. { return m_dwSignature == SIGNATURE;}
  1256. // Lock manipulators
  1257. // Lock the table (exclusively) for writing
  1258. void WriteLock()
  1259. { m_Lock.WriteLock(); }
  1260. // Lock the table (possibly shared) for reading
  1261. void ReadLock() const
  1262. { m_Lock.ReadLock(); }
  1263. // Unlock the table for writing
  1264. void WriteUnlock() const
  1265. { m_Lock.WriteUnlock(); }
  1266. // Unlock the table for reading
  1267. void ReadUnlock() const
  1268. { m_Lock.ReadUnlock(); }
  1269. // Is the table already locked for writing?
  1270. bool IsWriteLocked() const
  1271. { return m_Lock.IsWriteLocked(); }
  1272. // Is the table already locked for reading?
  1273. bool IsReadLocked() const
  1274. { return m_Lock.IsReadLocked(); }
  1275. // Is the table unlocked for writing?
  1276. bool IsWriteUnlocked() const
  1277. { return m_Lock.IsWriteUnlocked(); }
  1278. // Is the table unlocked for reading?
  1279. bool IsReadUnlocked() const
  1280. { return m_Lock.IsReadUnlocked(); }
  1281. // Convert the read lock to a write lock
  1282. void ConvertSharedToExclusive() const
  1283. { m_Lock.ConvertSharedToExclusive(); }
  1284. // Convert the write lock to a read lock
  1285. void ConvertExclusiveToShared() const
  1286. { m_Lock.ConvertExclusiveToShared(); }
  1287. // LKRHASH_ALLOCATOR_DEFINITIONS(CLKRLinearHashTable);
  1288. public:
  1289. #ifdef LKR_DEPRECATED_ITERATORS
  1290. // Iterators can be used to walk the table. To ensure a consistent
  1291. // view of the data, the iterator locks the whole table. This can
  1292. // have a negative effect upon performance, because no other thread
  1293. // can do anything with the table. Use with care.
  1294. //
  1295. // You should not use an iterator to walk the table, calling DeleteKey,
  1296. // as the iterator will end up pointing to garbage.
  1297. //
  1298. // Use Apply, ApplyIf, or DeleteIf instead of iterators to safely
  1299. // walk the tree.
  1300. //
  1301. // Note that iterators acquire a reference to the record pointed to
  1302. // and release that reference as soon as the iterator is incremented.
  1303. // In other words, this code is safe:
  1304. // lkrc = ht.IncrementIterator(&iter);
  1305. // // assume lkrc == LK_SUCCESS for the sake of this example
  1306. // CMyHashTable::Record* pRec = iter.Record();
  1307. // Foo(pRec); // uses pRec but doesn't hang on to it
  1308. // lkrc = ht.IncrementIterator(&iter);
  1309. //
  1310. // But this code is not safe because pRec is used out of the scope of the
  1311. // iterator that provided it:
  1312. // lkrc = ht.IncrementIterator(&iter);
  1313. // CMyHashTable::Record* pRec = iter.Record();
  1314. // // Broken code: Should have called ht.AddRefRecord(pRec, +1) here
  1315. // lkrc = ht.IncrementIterator(&iter);
  1316. // Foo(pRec); // Unsafe: because no longer have a valid reference
  1317. //
  1318. // If record has no reference-counting semantics, then you can ignore
  1319. // the above remarks about scope.
  1320. class CIterator
  1321. {
  1322. protected:
  1323. friend class CLKRLinearHashTable;
  1324. CLKRLinearHashTable* m_plht; // which linear hash table?
  1325. DWORD m_dwBucketAddr; // bucket index
  1326. CNodeClump* m_pnc; // a CNodeClump in bucket
  1327. int m_iNode; // offset within m_pnc
  1328. LK_LOCKTYPE m_lkl; // readlock or writelock?
  1329. private:
  1330. // Private copy ctor and op= to prevent compiler synthesizing them.
  1331. // Must provide (bad) implementation because we export instantiations.
  1332. CIterator(const CIterator&) {*(BYTE*)NULL;}
  1333. CIterator& operator=(const CIterator&) {return *(CIterator*)NULL;}
  1334. public:
  1335. CIterator(
  1336. LK_LOCKTYPE lkl=LKL_WRITELOCK)
  1337. : m_plht(NULL),
  1338. m_dwBucketAddr(0),
  1339. m_pnc(NULL),
  1340. m_iNode(-1),
  1341. m_lkl(lkl)
  1342. {}
  1343. // Return the record associated with this iterator
  1344. const void* Record() const
  1345. {
  1346. IRTLASSERT(IsValid());
  1347. return ((m_pnc != NULL
  1348. && m_iNode >= 0
  1349. && m_iNode < CLKRLinearHashTable::NODES_PER_CLUMP)
  1350. ? m_pnc->m_pvNode[m_iNode]
  1351. : NULL);
  1352. }
  1353. // Return the key associated with this iterator
  1354. const DWORD_PTR Key() const
  1355. {
  1356. IRTLASSERT(m_plht != NULL);
  1357. const void* pRec = Record();
  1358. return ((pRec != NULL && m_plht != NULL)
  1359. ? m_plht->_ExtractKey(pRec)
  1360. : NULL);
  1361. }
  1362. bool IsValid() const
  1363. {
  1364. return ((m_plht != NULL)
  1365. && (m_pnc != NULL)
  1366. && (0 <= m_iNode
  1367. && m_iNode < CLKRLinearHashTable::NODES_PER_CLUMP)
  1368. && (!m_pnc->IsEmptyNode(m_iNode)));
  1369. }
  1370. // Delete the record that the iterator points to. Does an implicit
  1371. // IncrementIterator after deletion.
  1372. LK_RETCODE DeleteRecord();
  1373. // Change the record that the iterator points to. The new record
  1374. // must have the same key as the old one.
  1375. LK_RETCODE ChangeRecord(const void* pNewRec);
  1376. };
  1377. // Const iterators for readonly access. You must use these with
  1378. // const CLKRLinearHashTables.
  1379. class CConstIterator : public CIterator
  1380. {
  1381. private:
  1382. // Private, unimplemented copy ctor and op= to prevent
  1383. // compiler synthesizing them.
  1384. CConstIterator(const CConstIterator&);
  1385. CConstIterator& operator=(const CConstIterator&);
  1386. public:
  1387. CConstIterator()
  1388. : CIterator(LKL_READLOCK)
  1389. {}
  1390. };
  1391. private:
  1392. // The public APIs lock the table. The private ones, which are used
  1393. // directly by CLKRHashTable, don't.
  1394. LK_RETCODE _InitializeIterator(CIterator* piter);
  1395. LK_RETCODE _CloseIterator(CIterator* piter);
  1396. public:
  1397. // Initialize the iterator to point to the first item in the hash table
  1398. // Returns LK_SUCCESS, LK_NO_MORE_ELEMENTS, or LK_BAD_ITERATOR.
  1399. LK_RETCODE InitializeIterator(CIterator* piter)
  1400. {
  1401. IRTLASSERT(piter != NULL && piter->m_plht == NULL);
  1402. if (piter == NULL || piter->m_plht != NULL)
  1403. return LK_BAD_ITERATOR;
  1404. if (piter->m_lkl == LKL_WRITELOCK)
  1405. WriteLock();
  1406. else
  1407. ReadLock();
  1408. return _InitializeIterator(piter);
  1409. }
  1410. // The const iterator version
  1411. LK_RETCODE InitializeIterator(CConstIterator* piter) const
  1412. {
  1413. IRTLASSERT(piter != NULL && piter->m_plht == NULL);
  1414. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1415. if (piter == NULL || piter->m_plht != NULL
  1416. || piter->m_lkl == LKL_WRITELOCK)
  1417. return LK_BAD_ITERATOR;
  1418. ReadLock();
  1419. return const_cast<CLKRLinearHashTable*>(this)
  1420. ->_InitializeIterator(static_cast<CIterator*>(piter));
  1421. }
  1422. // Move the iterator on to the next item in the table.
  1423. // Returns LK_SUCCESS, LK_NO_MORE_ELEMENTS, or LK_BAD_ITERATOR.
  1424. LK_RETCODE IncrementIterator(CIterator* piter);
  1425. LK_RETCODE IncrementIterator(CConstIterator* piter) const
  1426. {
  1427. IRTLASSERT(piter != NULL && piter->m_plht == this);
  1428. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1429. if (piter == NULL || piter->m_plht != this
  1430. || piter->m_lkl == LKL_WRITELOCK)
  1431. return LK_BAD_ITERATOR;
  1432. return const_cast<CLKRLinearHashTable*>(this)
  1433. ->IncrementIterator(static_cast<CIterator*>(piter));
  1434. }
  1435. // Close the iterator.
  1436. LK_RETCODE CloseIterator(CIterator* piter)
  1437. {
  1438. IRTLASSERT(piter != NULL && piter->m_plht == this);
  1439. if (piter == NULL || piter->m_plht != this)
  1440. return LK_BAD_ITERATOR;
  1441. _CloseIterator(piter);
  1442. if (piter->m_lkl == LKL_WRITELOCK)
  1443. WriteUnlock();
  1444. else
  1445. ReadUnlock();
  1446. return LK_SUCCESS;
  1447. };
  1448. // Close the CConstIterator
  1449. LK_RETCODE CloseIterator(CConstIterator* piter) const
  1450. {
  1451. IRTLASSERT(piter != NULL && piter->m_plht == this);
  1452. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1453. if (piter == NULL || piter->m_plht != this
  1454. || piter->m_lkl == LKL_WRITELOCK)
  1455. return LK_BAD_ITERATOR;
  1456. const_cast<CLKRLinearHashTable*>(this)
  1457. ->_CloseIterator(static_cast<CIterator*>(piter));
  1458. ReadUnlock();
  1459. return LK_SUCCESS;
  1460. };
  1461. #endif // LKR_DEPRECATED_ITERATORS
  1462. };
  1463. //--------------------------------------------------------------------
  1464. // CLKRHashTable
  1465. //
  1466. // To improve concurrency, a hash table is divided into a number of
  1467. // (independent) subtables. Each subtable is a linear hash table. The
  1468. // number of subtables is defined when the table is created and remains
  1469. // fixed thereafter. Records are assigned to subtables based on their
  1470. // hashed key.
  1471. //
  1472. // For small or low-contention hashtables, you can bypass this
  1473. // thin wrapper and use CLKRLinearHashTable directly. The methods are
  1474. // documented in the declarations for CLKRHashTable (above).
  1475. //--------------------------------------------------------------------
  1476. class IRTL_DLLEXP CLKRHashTable
  1477. {
  1478. private:
  1479. typedef CLKRLinearHashTable SubTable;
  1480. public:
  1481. typedef SubTable::TableLock TableLock;
  1482. typedef SubTable::BucketLock BucketLock;
  1483. #ifdef LKR_DEPRECATED_ITERATORS
  1484. class CIterator;
  1485. friend class CLKRHashTable::CIterator;
  1486. #endif // LKR_DEPRECATED_ITERATORS
  1487. // aliases for convenience
  1488. enum {
  1489. NAME_SIZE = SubTable::NAME_SIZE,
  1490. HASH_INVALID_SIGNATURE = SubTable::HASH_INVALID_SIGNATURE,
  1491. NODES_PER_CLUMP = SubTable::NODES_PER_CLUMP,
  1492. };
  1493. private:
  1494. // Hash table parameters
  1495. DWORD m_dwSignature; // debugging: id & corruption check
  1496. CHAR m_szName[NAME_SIZE]; // an identifier for debugging
  1497. DWORD m_cSubTables; // number of subtables
  1498. SubTable** m_palhtDir; // array of subtables
  1499. // type-specific function pointers
  1500. PFnExtractKey m_pfnExtractKey;
  1501. PFnCalcKeyHash m_pfnCalcKeyHash;
  1502. mutable LK_RETCODE m_lkrcState; // Internal state of table
  1503. #ifdef LKR_SUBTABLE_MASK
  1504. int m_nSubTableMask;
  1505. #endif // LKR_SUBTABLE_MASK
  1506. #ifndef LKR_NO_GLOBAL_LIST
  1507. static CLockedDoubleList sm_llGlobalList; // All active CLKRHashTables
  1508. CListEntry m_leGlobalList;
  1509. #endif // !LKR_NO_GLOBAL_LIST
  1510. void
  1511. _InsertThisIntoGlobalList()
  1512. {
  1513. #ifndef LKR_NO_GLOBAL_LIST
  1514. sm_llGlobalList.InsertHead(&m_leGlobalList);
  1515. #endif // !LKR_NO_GLOBAL_LIST
  1516. }
  1517. void
  1518. _RemoveThisFromGlobalList()
  1519. {
  1520. #ifndef LKR_NO_GLOBAL_LIST
  1521. sm_llGlobalList.RemoveEntry(&m_leGlobalList);
  1522. #endif // !LKR_NO_GLOBAL_LIST
  1523. }
  1524. LKRHASH_GLOBAL_LOCK_DECLARATIONS();
  1525. // Private copy ctor and op= to prevent compiler synthesizing them.
  1526. // Must provide a (bad) implementation because we export instantiations.
  1527. // TODO: implement these properly; they could be useful.
  1528. CLKRHashTable(const CLKRHashTable&) {*(BYTE*)NULL;}
  1529. CLKRHashTable& operator=(const CLKRHashTable&) {return *(CLKRHashTable*)NULL;}
  1530. // Extract the key from the record
  1531. const DWORD_PTR _ExtractKey(const void* pvRecord) const
  1532. {
  1533. IRTLASSERT(pvRecord != NULL);
  1534. IRTLASSERT(m_pfnExtractKey != NULL);
  1535. return (*m_pfnExtractKey)(pvRecord);
  1536. }
  1537. // Hash the key
  1538. DWORD _CalcKeyHash(const DWORD_PTR pnKey) const
  1539. {
  1540. // Note pnKey==0 is acceptable, as the real key type could be an int
  1541. IRTLASSERT(m_pfnCalcKeyHash != NULL);
  1542. DWORD dwHash = (*m_pfnCalcKeyHash)(pnKey);
  1543. // We forcibly scramble the result to help ensure a better distribution
  1544. #ifdef HASH_RANDOMIZE_BITS
  1545. dwHash = RandomizeBits(dwHash);
  1546. #else // !HASH_RANDOMIZE_BITS
  1547. dwHash = HashScramble(dwHash);
  1548. #endif // !HASH_RANDOMIZE_BITS
  1549. IRTLASSERT(dwHash != HASH_INVALID_SIGNATURE);
  1550. return dwHash;
  1551. }
  1552. // Use the key's hash signature to multiplex into a subtable
  1553. SubTable* _SubTable(DWORD dwSignature) const;
  1554. // Memory allocation wrappers to allow us to simulate allocation
  1555. // failures during testing
  1556. static SubTable** const
  1557. _AllocateSubTableArray(
  1558. size_t n);
  1559. static bool
  1560. _FreeSubTableArray(
  1561. SubTable** palht);
  1562. static SubTable* const
  1563. _AllocateSubTable(
  1564. LPCSTR pszName, // An identifier for debugging
  1565. PFnExtractKey pfnExtractKey, // Extract key from record
  1566. PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  1567. PFnEqualKeys pfnEqualKeys, // Compare two keys
  1568. PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  1569. double maxload, // Upperbound on average chain length
  1570. DWORD initsize, // Initial size of hash table.
  1571. CLKRHashTable* phtParent // Owning table.
  1572. );
  1573. static bool
  1574. _FreeSubTable(
  1575. SubTable* plht);
  1576. public:
  1577. CLKRHashTable(
  1578. LPCSTR pszName, // An identifier for debugging
  1579. PFnExtractKey pfnExtractKey, // Extract key from record
  1580. PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  1581. PFnEqualKeys pfnEqualKeys, // Compare two keys
  1582. PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  1583. double maxload=LK_DFLT_MAXLOAD, // bound on avg chain length
  1584. DWORD initsize=LK_DFLT_INITSIZE, // Initial size of hash table.
  1585. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS // #subordinate hash tables.
  1586. );
  1587. ~CLKRHashTable();
  1588. static const char* ClassName() {return "CLKRHashTable";}
  1589. int NumSubTables() const {return m_cSubTables;}
  1590. static LK_TABLESIZE NumSubTables(DWORD& rinitsize, DWORD& rnum_subtbls);
  1591. // Thin wrappers for the corresponding methods in CLKRLinearHashTable
  1592. LK_RETCODE InsertRecord(const void* pvRecord, bool fOverwrite=false);
  1593. LK_RETCODE DeleteKey(const DWORD_PTR pnKey);
  1594. LK_RETCODE DeleteRecord(const void* pvRecord);
  1595. LK_RETCODE FindKey(const DWORD_PTR pnKey,
  1596. const void** ppvRecord) const;
  1597. LK_RETCODE FindRecord(const void* pvRecord) const;
  1598. #ifdef LKR_DEPRECATED_ITERATORS
  1599. DWORD Apply(PFnRecordAction pfnAction,
  1600. void* pvState=NULL,
  1601. LK_LOCKTYPE lkl=LKL_READLOCK);
  1602. DWORD ApplyIf(PFnRecordPred pfnPredicate,
  1603. PFnRecordAction pfnAction,
  1604. void* pvState=NULL,
  1605. LK_LOCKTYPE lkl=LKL_READLOCK);
  1606. DWORD DeleteIf(PFnRecordPred pfnPredicate,
  1607. void* pvState=NULL);
  1608. #endif // LKR_DEPRECATED_ITERATORS
  1609. void Clear();
  1610. int CheckTable() const;
  1611. DWORD Size() const;
  1612. DWORD MaxSize() const;
  1613. CLKRHashTableStats GetStatistics() const;
  1614. bool IsValid() const;
  1615. void SetTableLockSpinCount(WORD wSpins);
  1616. WORD GetTableLockSpinCount();
  1617. void SetBucketLockSpinCount(WORD wSpins);
  1618. WORD GetBucketLockSpinCount();
  1619. enum {
  1620. SIGNATURE = (('L') | ('K' << 8) | ('H' << 16) | ('T' << 24)),
  1621. SIGNATURE_FREE = (('L') | ('K' << 8) | ('H' << 16) | ('x' << 24)),
  1622. };
  1623. bool
  1624. ValidSignature() const
  1625. { return m_dwSignature == SIGNATURE;}
  1626. // Is the hash table usable?
  1627. bool IsUsable() const
  1628. { return (m_lkrcState == LK_SUCCESS); }
  1629. void WriteLock();
  1630. void ReadLock() const;
  1631. void WriteUnlock() const;
  1632. void ReadUnlock() const;
  1633. bool IsWriteLocked() const;
  1634. bool IsReadLocked() const;
  1635. bool IsWriteUnlocked() const;
  1636. bool IsReadUnlocked() const;
  1637. void ConvertSharedToExclusive() const;
  1638. void ConvertExclusiveToShared() const;
  1639. // LKRHASH_ALLOCATOR_DEFINITIONS(CLKRHashTable);
  1640. public:
  1641. #ifdef LKR_DEPRECATED_ITERATORS
  1642. typedef SubTable::CIterator CLHTIterator;
  1643. class CIterator : public CLHTIterator
  1644. {
  1645. protected:
  1646. friend class CLKRHashTable;
  1647. CLKRHashTable* m_pht; // which hash table?
  1648. int m_ist; // which subtable
  1649. private:
  1650. // Private copy ctor and op= to prevent compiler synthesizing them.
  1651. // Must provide (bad) implementation because we export instantiations.
  1652. CIterator(const CIterator&) {*(BYTE*)NULL;}
  1653. CIterator& operator=(const CIterator&) {return *(CIterator*)NULL;}
  1654. public:
  1655. CIterator(
  1656. LK_LOCKTYPE lkl=LKL_WRITELOCK)
  1657. : CLHTIterator(lkl),
  1658. m_pht(NULL),
  1659. m_ist(-1)
  1660. {}
  1661. const void* Record() const
  1662. {
  1663. IRTLASSERT(IsValid());
  1664. // This is a hack to work around a compiler bug. Calling
  1665. // CLHTIterator::Record calls this function recursively until
  1666. // the stack overflows.
  1667. const CLHTIterator* pBase = static_cast<const CLHTIterator*>(this);
  1668. return pBase->Record();
  1669. }
  1670. const DWORD_PTR Key() const
  1671. {
  1672. IRTLASSERT(IsValid());
  1673. const CLHTIterator* pBase = static_cast<const CLHTIterator*>(this);
  1674. return pBase->Key();
  1675. }
  1676. bool IsValid() const
  1677. {
  1678. const CLHTIterator* pBase = static_cast<const CLHTIterator*>(this);
  1679. return (m_pht != NULL && m_ist >= 0 && pBase->IsValid());
  1680. }
  1681. };
  1682. // Const iterators for readonly access
  1683. class CConstIterator : public CIterator
  1684. {
  1685. private:
  1686. // Private, unimplemented copy ctor and op= to prevent
  1687. // compiler synthesizing them.
  1688. CConstIterator(const CConstIterator&);
  1689. CConstIterator& operator=(const CConstIterator&);
  1690. public:
  1691. CConstIterator()
  1692. : CIterator(LKL_READLOCK)
  1693. {}
  1694. };
  1695. public:
  1696. LK_RETCODE InitializeIterator(CIterator* piter);
  1697. LK_RETCODE IncrementIterator(CIterator* piter);
  1698. LK_RETCODE CloseIterator(CIterator* piter);
  1699. LK_RETCODE InitializeIterator(CConstIterator* piter) const
  1700. {
  1701. IRTLASSERT(piter != NULL && piter->m_pht == NULL);
  1702. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1703. if (piter == NULL || piter->m_pht != NULL
  1704. || piter->m_lkl == LKL_WRITELOCK)
  1705. return LK_BAD_ITERATOR;
  1706. return const_cast<CLKRHashTable*>(this)
  1707. ->InitializeIterator(static_cast<CIterator*>(piter));
  1708. }
  1709. LK_RETCODE IncrementIterator(CConstIterator* piter) const
  1710. {
  1711. IRTLASSERT(piter != NULL && piter->m_pht == this);
  1712. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1713. if (piter == NULL || piter->m_pht != this
  1714. || piter->m_lkl == LKL_WRITELOCK)
  1715. return LK_BAD_ITERATOR;
  1716. return const_cast<CLKRHashTable*>(this)
  1717. ->IncrementIterator(static_cast<CIterator*>(piter));
  1718. }
  1719. LK_RETCODE CloseIterator(CConstIterator* piter) const
  1720. {
  1721. IRTLASSERT(piter != NULL && piter->m_pht == this);
  1722. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1723. if (piter == NULL || piter->m_pht != this
  1724. || piter->m_lkl == LKL_WRITELOCK)
  1725. return LK_BAD_ITERATOR;
  1726. return const_cast<CLKRHashTable*>(this)
  1727. ->CloseIterator(static_cast<CIterator*>(piter));
  1728. }
  1729. #endif // LKR_DEPRECATED_ITERATORS
  1730. };
  1731. //--------------------------------------------------------------------
  1732. // A typesafe wrapper for CLKRHashTable (or CLKRLinearHashTable).
  1733. //
  1734. // * _Derived must derive from CTypedHashTable and provide certain member
  1735. // functions. It's needed for various downcasting operations. See
  1736. // CStringTestHashTable and CNumberTestHashTable below.
  1737. // * _Record is the type of the record. C{Linear}HashTable will store
  1738. // pointers to _Record.
  1739. // * _Key is the type of the key. _Key is used directly; i.e., it is
  1740. // not assumed to be a pointer type. C{Linear}HashTable assumes that
  1741. // the key is stored in the associated record. See the comments
  1742. // at the declaration of PFnExtractKey for more details.
  1743. //
  1744. // (optional parameters):
  1745. // * _BaseHashTable is the base hash table: CLKRHashTable or
  1746. /// CLKRLinearHashTable
  1747. // * _BaseIterator is the iterator type, _BaseHashTable::CIterator
  1748. //
  1749. // CTypedHashTable could derive directly from CLKRLinearHashTable, if you
  1750. // don't need the extra overhead of CLKRHashTable (which is quite low).
  1751. //
  1752. // You may need to add the following line to your code to disable
  1753. // warning messages about truncating extremly long identifiers.
  1754. // #pragma warning (disable : 4786)
  1755. //--------------------------------------------------------------------
  1756. #define LKRHASH_HACKY_CAST(T, pv) ((T) (UINT_PTR) (pv))
  1757. template < class _Derived, class _Record, class _Key,
  1758. class _BaseHashTable=CLKRHashTable
  1759. #ifdef LKR_DEPRECATED_ITERATORS
  1760. , class _BaseIterator=_BaseHashTable::CIterator
  1761. #endif // LKR_DEPRECATED_ITERATORS
  1762. >
  1763. class CTypedHashTable : public _BaseHashTable
  1764. {
  1765. public:
  1766. // convenient aliases
  1767. typedef _Derived Derived;
  1768. typedef _Record Record;
  1769. typedef _Key Key;
  1770. typedef _BaseHashTable BaseHashTable;
  1771. typedef CTypedHashTable<_Derived, _Record, _Key, _BaseHashTable
  1772. #ifdef LKR_DEPRECATED_ITERATORS
  1773. , _BaseIterator
  1774. #endif // LKR_DEPRECATED_ITERATORS
  1775. > HashTable;
  1776. #ifdef LKR_DEPRECATED_ITERATORS
  1777. typedef _BaseIterator BaseIterator;
  1778. #endif // LKR_DEPRECATED_ITERATORS
  1779. #ifdef LKR_DEPRECATED_ITERATORS
  1780. // ApplyIf() and DeleteIf(): Does the record match the predicate?
  1781. // Note: takes a Record*, not a const Record*. You can modify the
  1782. // record in Pred() or Action(), if you like, but if you do, you
  1783. // should use LKL_WRITELOCK to lock the table.
  1784. typedef LK_PREDICATE (WINAPI *PFnRecordPred) (Record* pRec, void* pvState);
  1785. // Apply() et al: Perform action on record.
  1786. typedef LK_ACTION (WINAPI *PFnRecordAction)(Record* pRec, void* pvState);
  1787. #endif // LKR_DEPRECATED_ITERATORS
  1788. private:
  1789. // Wrappers for the typesafe methods exposed by the derived class
  1790. static const DWORD_PTR WINAPI
  1791. _ExtractKey(const void* pvRecord)
  1792. {
  1793. const _Record* pRec = static_cast<const _Record*>(pvRecord);
  1794. _Key key = static_cast<_Key>(_Derived::ExtractKey(pRec));
  1795. return (DWORD_PTR) key;
  1796. }
  1797. static DWORD WINAPI
  1798. _CalcKeyHash(const DWORD_PTR pnKey)
  1799. {
  1800. _Key key = LKRHASH_HACKY_CAST(_Key, pnKey);
  1801. return _Derived::CalcKeyHash(key);
  1802. }
  1803. static bool WINAPI
  1804. _EqualKeys(const DWORD_PTR pnKey1, const DWORD_PTR pnKey2)
  1805. {
  1806. _Key key1 = LKRHASH_HACKY_CAST(_Key, pnKey1);
  1807. _Key key2 = LKRHASH_HACKY_CAST(_Key, pnKey2);
  1808. return _Derived::EqualKeys(key1, key2);
  1809. }
  1810. static void WINAPI
  1811. _AddRefRecord(const void* pvRecord, int nIncr)
  1812. {
  1813. _Record* pRec = static_cast<_Record*>(const_cast<void*>(pvRecord));
  1814. _Derived::AddRefRecord(pRec, nIncr);
  1815. }
  1816. #ifdef LKR_DEPRECATED_ITERATORS
  1817. // Typesafe wrappers for Apply, ApplyIf, and DeleteIf.
  1818. class CState
  1819. {
  1820. public:
  1821. PFnRecordPred m_pfnPred;
  1822. PFnRecordAction m_pfnAction;
  1823. void* m_pvState;
  1824. CState(
  1825. PFnRecordPred pfnPred,
  1826. PFnRecordAction pfnAction,
  1827. void* pvState)
  1828. : m_pfnPred(pfnPred), m_pfnAction(pfnAction), m_pvState(pvState)
  1829. {}
  1830. };
  1831. static LK_PREDICATE WINAPI
  1832. _Pred(const void* pvRecord, void* pvState)
  1833. {
  1834. _Record* pRec = static_cast<_Record*>(const_cast<void*>(pvRecord));
  1835. CState* pState = static_cast<CState*>(pvState);
  1836. return (*pState->m_pfnPred)(pRec, pState->m_pvState);
  1837. }
  1838. static LK_ACTION WINAPI
  1839. _Action(const void* pvRecord, void* pvState)
  1840. {
  1841. _Record* pRec = static_cast<_Record*>(const_cast<void*>(pvRecord));
  1842. CState* pState = static_cast<CState*>(pvState);
  1843. return (*pState->m_pfnAction)(pRec, pState->m_pvState);
  1844. }
  1845. #endif // LKR_DEPRECATED_ITERATORS
  1846. public:
  1847. CTypedHashTable(
  1848. LPCSTR pszName, // An identifier for debugging
  1849. double maxload=LK_DFLT_MAXLOAD, // Upperbound on avg chain length
  1850. DWORD initsize=LK_DFLT_INITSIZE, // Initial size of hash table.
  1851. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS// #subordinate hash tables.
  1852. )
  1853. : _BaseHashTable(pszName, _ExtractKey, _CalcKeyHash, _EqualKeys,
  1854. _AddRefRecord, maxload, initsize, num_subtbls)
  1855. {}
  1856. LK_RETCODE InsertRecord(const _Record* pRec, bool fOverwrite=false)
  1857. { return _BaseHashTable::InsertRecord(pRec, fOverwrite); }
  1858. LK_RETCODE DeleteKey(const _Key key)
  1859. {
  1860. const void* pvKey = UlongToPtr(key);
  1861. DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  1862. return _BaseHashTable::DeleteKey(pnKey);
  1863. }
  1864. //{ return _BaseHashTable::DeleteKey(reinterpret_cast<const DWORD_PTR>(key));}
  1865. LK_RETCODE DeleteRecord(const _Record* pRec)
  1866. { return _BaseHashTable::DeleteRecord(pRec);}
  1867. // Note: returns a _Record**, not a const Record**. Note that you
  1868. // can use a const type for the template parameter to ensure constness.
  1869. LK_RETCODE FindKey(const _Key key, _Record** ppRec) const
  1870. {
  1871. if (ppRec == NULL)
  1872. return LK_BAD_RECORD;
  1873. *ppRec = NULL;
  1874. const void* pvRec = NULL;
  1875. const void* pvKey = UlongToPtr(key);
  1876. DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  1877. LK_RETCODE lkrc = _BaseHashTable::FindKey(pnKey, &pvRec);
  1878. *ppRec = static_cast<_Record*>(const_cast<void*>(pvRec));
  1879. return lkrc;
  1880. }
  1881. // Note: returns a _Record**, not a const Record**. Note that you
  1882. // can use a const type for the template parameter to ensure constness.
  1883. LK_RETCODE FindKeyBstr(const _Key key, _Record** ppRec) const
  1884. {
  1885. if (ppRec == NULL)
  1886. return LK_BAD_RECORD;
  1887. *ppRec = NULL;
  1888. const void* pvRec = NULL;
  1889. const void* pvKey = key;
  1890. DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  1891. LK_RETCODE lkrc = _BaseHashTable::FindKey(pnKey, &pvRec);
  1892. *ppRec = static_cast<_Record*>(const_cast<void*>(pvRec));
  1893. return lkrc;
  1894. }
  1895. LK_RETCODE FindRecord(const _Record* pRec) const
  1896. { return _BaseHashTable::FindRecord(pRec);}
  1897. // Other C{Linear}HashTable methods can be exposed without change
  1898. #ifdef LKR_DEPRECATED_ITERATORS
  1899. // Typesafe wrappers for Apply et al
  1900. DWORD Apply(PFnRecordAction pfnAction,
  1901. void* pvState=NULL,
  1902. LK_LOCKTYPE lkl=LKL_READLOCK)
  1903. {
  1904. IRTLASSERT(pfnAction != NULL);
  1905. if (pfnAction == NULL)
  1906. return 0;
  1907. CState state(NULL, pfnAction, pvState);
  1908. return _BaseHashTable::Apply(_Action, &state, lkl);
  1909. }
  1910. DWORD ApplyIf(PFnRecordPred pfnPredicate,
  1911. PFnRecordAction pfnAction,
  1912. void* pvState=NULL,
  1913. LK_LOCKTYPE lkl=LKL_READLOCK)
  1914. {
  1915. IRTLASSERT(pfnPredicate != NULL && pfnAction != NULL);
  1916. if (pfnPredicate == NULL || pfnAction == NULL)
  1917. return 0;
  1918. CState state(pfnPredicate, pfnAction, pvState);
  1919. return _BaseHashTable::ApplyIf(_Pred, _Action, &state, lkl);
  1920. }
  1921. DWORD DeleteIf(PFnRecordPred pfnPredicate, void* pvState=NULL)
  1922. {
  1923. IRTLASSERT(pfnPredicate != NULL);
  1924. if (pfnPredicate == NULL)
  1925. return 0;
  1926. CState state(pfnPredicate, NULL, pvState);
  1927. return _BaseHashTable::DeleteIf(_Pred, &state);
  1928. }
  1929. // Typesafe wrappers for iterators
  1930. class CIterator : public _BaseIterator
  1931. {
  1932. private:
  1933. // Private, unimplemented copy ctor and op= to prevent
  1934. // compiler synthesizing them.
  1935. CIterator(const CIterator&);
  1936. CIterator& operator=(const CIterator&);
  1937. public:
  1938. CIterator(
  1939. LK_LOCKTYPE lkl=LKL_WRITELOCK)
  1940. : _BaseIterator(lkl)
  1941. {}
  1942. _Record* Record() const
  1943. {
  1944. const _BaseIterator* pBase = static_cast<const _BaseIterator*>(this);
  1945. return reinterpret_cast<_Record*>(const_cast<void*>(
  1946. pBase->Record()));
  1947. }
  1948. _Key Key() const
  1949. {
  1950. const _BaseIterator* pBase = static_cast<const _BaseIterator*>(this);
  1951. return reinterpret_cast<_Key>(reinterpret_cast<void*>(pBase->Key()));
  1952. }
  1953. };
  1954. // readonly iterator
  1955. class CConstIterator : public CIterator
  1956. {
  1957. private:
  1958. // Private, unimplemented copy ctor and op= to prevent
  1959. // compiler synthesizing them.
  1960. CConstIterator(const CConstIterator&);
  1961. CConstIterator& operator=(const CConstIterator&);
  1962. public:
  1963. CConstIterator()
  1964. : CIterator(LKL_READLOCK)
  1965. {}
  1966. const _Record* Record() const
  1967. {
  1968. return CIterator::Record();
  1969. }
  1970. const _Key Key() const
  1971. {
  1972. return CIterator::Key();
  1973. }
  1974. };
  1975. public:
  1976. LK_RETCODE InitializeIterator(CIterator* piter)
  1977. {
  1978. return _BaseHashTable::InitializeIterator(piter);
  1979. }
  1980. LK_RETCODE IncrementIterator(CIterator* piter)
  1981. {
  1982. return _BaseHashTable::IncrementIterator(piter);
  1983. }
  1984. LK_RETCODE CloseIterator(CIterator* piter)
  1985. {
  1986. return _BaseHashTable::CloseIterator(piter);
  1987. }
  1988. LK_RETCODE InitializeIterator(CConstIterator* piter) const
  1989. {
  1990. return const_cast<HashTable*>(this)
  1991. ->InitializeIterator(static_cast<CIterator*>(piter));
  1992. }
  1993. LK_RETCODE IncrementIterator(CConstIterator* piter) const
  1994. {
  1995. return const_cast<HashTable*>(this)
  1996. ->IncrementIterator(static_cast<CIterator*>(piter));
  1997. }
  1998. LK_RETCODE CloseIterator(CConstIterator* piter) const
  1999. {
  2000. return const_cast<HashTable*>(this)
  2001. ->CloseIterator(static_cast<CIterator*>(piter));
  2002. }
  2003. #endif // LKR_DEPRECATED_ITERATORS
  2004. };
  2005. #ifdef __LKRHASH_NAMESPACE__
  2006. }
  2007. #endif // __LKRHASH_NAMESPACE__
  2008. #endif // __LKRHASH_H__