Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3143 lines
103 KiB

  1. /*++
  2. Copyright (c) 1998-2002 Microsoft Corporation
  3. Module Name :
  4. LKRhash.h
  5. Abstract:
  6. Declares LKRhash: a fast, scalable, cache- and MP-friendly hash table
  7. Author:
  8. Paul (Per-Ake) Larson, palarson@microsoft.com, July 1997
  9. Murali R. Krishnan (MuraliK)
  10. George V. Reilly (GeorgeRe) 06-Jan-1998
  11. Environment:
  12. Win32 - User Mode
  13. Project:
  14. Internet Information Server RunTime Library
  15. Revision History:
  16. 10/01/1998 - Change name from LKhash to LKRhash
  17. --*/
  18. #define LKR_STL_ITERATORS 1
  19. // #define LKR_DEPRECATED_ITERATORS
  20. #define LKR_APPLY_IF
  21. #undef LKR_COUNTDOWN
  22. #ifndef LKR_TABLE_LOCK
  23. # define LKR_TABLE_LOCK CReaderWriterLock3
  24. #endif // !LKR_TABLE_LOCK
  25. #ifndef LKR_BUCKET_LOCK
  26. # ifdef LKR_DEPRECATED_ITERATORS
  27. # define LKR_BUCKET_LOCK CReaderWriterLock3
  28. # else // !LKR_DEPRECATED_ITERATORS
  29. # define LKR_BUCKET_LOCK CSmallSpinLock
  30. # endif // !LKR_DEPRECATED_ITERATORS
  31. #endif // !LKR_BUCKET_LOCK
  32. #ifndef __LKRHASH_H__
  33. #define __LKRHASH_H__
  34. //=====================================================================
  35. // The class CLKRLinearHashTable defined in this file provides dynamic hash
  36. // tables, i.e. tables that grow and shrink dynamically with
  37. // the number of records in the table.
  38. // The basic method used is linear hashing, as explained in:
  39. //
  40. // P.-A. Larson, Dynamic Hash Tables, Comm. of the ACM, 31, 4 (1988)
  41. //
  42. // This version has the following characteristics:
  43. // - It is thread-safe and uses spin locks for synchronization.
  44. // - It was designed to support very high rates of concurrent
  45. // operations (inserts/deletes/lookups). It achieves this by
  46. // (a) partitioning a CLKRHashTable into a collection of
  47. // CLKRLinearHashTables to reduce contention on the global table lock.
  48. // (b) minimizing the hold time on a table lock, preferring to lock
  49. // down a bucket chain instead.
  50. // - The design is L1 cache-conscious. See CNodeClump.
  51. // - It is designed for sets varying in size from a dozen
  52. // elements to several million.
  53. //
  54. // Main classes:
  55. // CLKRLinearHashTable: thread-safe linear hash table
  56. // CLKRHashTable: collection of CLKRLinearHashTables
  57. // CTypedHashTable: typesafe wrapper for CLKRHashTable
  58. //
  59. //
  60. // Paul Larson, [email protected], July 1997
  61. // Original implementation with input from Murali R. Krishnan,
  62. // [email protected].
  63. //
  64. // George V. Reilly, [email protected], Dec 1997-Jan 1998
  65. // Massive cleanup and rewrite. Added templates.
  66. //=====================================================================
  67. // 1) Linear Hashing
  68. // ------------------
  69. //
  70. // Linear hash tables grow and shrink dynamically with the number of
  71. // records in the table. The growth or shrinkage is smooth: logically,
  72. // one bucket at a time but physically in larger increments
  73. // (64 buckets). An insertion (deletion) may cause an expansion
  74. // (contraction) of the table. This causes relocation of a small number
  75. // of records (at most one bucket worth). All operations (insert,
  76. // delete, lookup) take constant expected time, regardless of the
  77. // current size or the growth of the table.
  78. //
  79. // 2) LKR extensions to Linear hash table
  80. // --------------------------------------
  81. //
  82. // Larson-Krishnan-Reilly extensions to Linear hash tables for multiprocessor
  83. // scalability and improved cache performance.
  84. //
  85. // Traditional implementations of linear hash tables use one global lock
  86. // to prevent interference between concurrent operations
  87. // (insert/delete/lookup) on the table. The single lock easily becomes
  88. // the bottleneck in SMP scenarios when multiple threads are used.
  89. //
  90. // Traditionally, a (hash) bucket is implemented as a chain of
  91. // single-item nodes. Every operation results in chasing down a chain
  92. // looking for an item. However, pointer chasing is very slow on modern
  93. // systems because almost every jump results in a cache miss. L2 (or L3)
  94. // cache misses are very expensive in missed CPU cycles and the cost is
  95. // increasing (going to 100s of cycles in the future).
  96. //
  97. // LKR extensions offer
  98. // 1) Partitioning (by hashing) of records among multiple subtables.
  99. // Each subtable has locks but there is no global lock. Each
  100. // subtable receives a much lower rate of operations, resulting in
  101. // fewer conflicts.
  102. //
  103. // 2) Improved cache locality by grouping keys and their hash values
  104. // into contigous chunks that fit exactly into one (or a few)
  105. // cache lines.
  106. //
  107. // Specifically the implementation that exists here achieves this using
  108. // the following techniques.
  109. //
  110. // Class CLKRHashTable is the top-level data structure that dynamically
  111. // creates m_cSubTables linear hash tables. The CLKRLinearHashTables act as
  112. // the subtables to which items and accesses are fanned out. A good
  113. // hash function multiplexes requests uniformly to various subtables,
  114. // thus minimizing traffic to any single subtable. The implemenation
  115. // uses a home-grown version of bounded spinlocks, that is, a thread
  116. // does not spin on a lock indefinitely, instead yielding after a
  117. // predetermined number of loops.
  118. //
  119. // Each CLKRLinearHashTable consists of a CDirEntry pointing to segments
  120. // each holding m_dwSegSize CBuckets. Each CBucket in turn consists of a
  121. // chain of CNodeClumps. Each CNodeClump contains a group of
  122. // NODES_PER_CLUMP hash values (aka hash keys or signatures) and
  123. // pointers to the associated data items. Keeping the signatures
  124. // together increases the cache locality in scans for lookup.
  125. //
  126. // Traditionally, people store a link-list element right inside the
  127. // object that is hashed and use this link-list for the chaining of data
  128. // blocks. However, keeping just the pointers to the data object and
  129. // not chaining through them limits the need for bringing in the data
  130. // object to the cache. We need to access the data object only if the
  131. // hash values match. This limits the cache-thrashing behaviour
  132. // exhibited by conventional implementations. It has the additional
  133. // benefit that the objects themselves do not need to be modified
  134. // in order to be collected in the hash table (i.e., it's non-invasive).
  135. //--------------------------------------------------------------------
  136. // TODO
  137. // * Provide support for multiple, identical keys. Needed for EqualRange,
  138. // hash_multiset, and hash_multimap.
  139. // * Provide implementations of the STL collection classes: hash_map,
  140. // hash_set, hash_multimap, and hash_multiset.
  141. // * Make exception-safe.
  142. // * Use auto_ptrs.
  143. // * Add some kind of auto object for readlocking or writelocking a table,
  144. // so that the table automatically gets unlocked by auto-obj's destructor.
  145. // * Provide a C API wrapper
  146. // * Port to kernel mode (will require different locks, at the very least)
  147. // * Port to managed code (Chris Tracy has started on this)
  148. // * Typedef hash signatures (currently DWORDs)
  149. // * Make available as a static library as well as a DLL
  150. //--------------------------------------------------------------------
  151. #ifndef __IRTLDBG_H__
  152. # include <irtldbg.h>
  153. #endif
  154. #ifndef __LSTENTRY_H__
  155. # include <lstentry.h>
  156. #endif
  157. #ifndef __HASHFN_H__
  158. # include <hashfn.h>
  159. #endif
  160. #include <limits.h>
  161. #ifdef LKR_STL_ITERATORS
  162. // needed for std::forward_iterator_tag, etc
  163. # include <iterator>
  164. // The iterators have very verbose tracing. Don't want it on all the time
  165. // in debug builds.
  166. # if defined(IRTLDEBUG) && (LKR_STL_ITERATORS >= 2)
  167. # define LKR_ITER_TRACE IrtlTrace
  168. # else // !defined(IRTLDEBUG) || LKR_STL_ITERATORS < 2
  169. # define LKR_ITER_TRACE 1 ? (void)0 : IrtlTrace
  170. # endif // !defined(IRTLDEBUG) || LKR_STL_ITERATORS < 2
  171. #endif // LKR_STL_ITERATORS
  172. // Used to initialize and destroy custom allocators
  173. extern "C" bool LKRHashTableInit();
  174. extern "C" void LKRHashTableUninit();
  175. #define __LKRHASH_NO_NAMESPACE__
  176. #define LKR_DEPRECATED_ITERATORS
  177. #ifndef __LKRHASH_NO_NAMESPACE__
  178. namespace LKRhash {
  179. #endif // !__LKRHASH_NO_NAMESPACE__
  180. enum LK_TABLESIZE {
  181. LK_SMALL_TABLESIZE= 1, // < 200 elements
  182. LK_MEDIUM_TABLESIZE= 2, // 200...10,000 elements
  183. LK_LARGE_TABLESIZE= 3, // 10,000+ elements
  184. };
  185. // Default values for the hashtable constructors
  186. enum {
  187. #ifndef _WIN64
  188. LK_DFLT_MAXLOAD= 6, // Default upperbound on average chain length.
  189. #else // _WIN64
  190. LK_DFLT_MAXLOAD= 4, // 64-byte nodes => NODES_PER_CLUMP = 4
  191. #endif // _WIN64
  192. LK_DFLT_INITSIZE=LK_MEDIUM_TABLESIZE, // Default initial size of hash table
  193. LK_DFLT_NUM_SUBTBLS= 0, // Use a heuristic to choose #subtables
  194. };
  195. // obsolete build fix hack
  196. // enum {
  197. // DFLT_LK_MAXLOAD= LK_DFLT_MAXLOAD,
  198. // DFLT_LK_INITSIZE= LK_DFLT_INITSIZE,
  199. // DFLT_LK_NUM_SUBTBLS= LK_DFLT_NUM_SUBTBLS,
  200. // };
  201. //--------------------------------------------------------------------
  202. // forward declarations
  203. class IRTL_DLLEXP CLKRLinearHashTable;
  204. class IRTL_DLLEXP CLKRHashTable;
  205. template <class _Der, class _Rcd, class _Ky, class _HT
  206. #ifdef LKR_DEPRECATED_ITERATORS
  207. , class _Iter
  208. #endif // LKR_DEPRECATED_ITERATORS
  209. >
  210. class CTypedHashTable;
  211. //--------------------------------------------------------------------
  212. // Possible return codes from public member functions of
  213. // CLKRLinearHashTable, CLKRHashTable, and CTypedHashTable
  214. enum LK_RETCODE {
  215. // severe errors < 0
  216. LK_UNUSABLE = -99, // Table corrupted: all bets are off
  217. LK_ALLOC_FAIL, // ran out of memory
  218. LK_BAD_ITERATOR, // invalid iterator; e.g., points to another table
  219. LK_BAD_RECORD, // invalid record; e.g., NULL for InsertRecord
  220. LK_BAD_PARAMETERS, // invalid parameters; e.g., NULL fnptrs to ctor
  221. LK_NOT_INITIALIZED, // LKRHashTableInit was not called
  222. LK_SUCCESS = 0, // everything's okay
  223. LK_KEY_EXISTS, // key already present for InsertRecord(no-overwrite)
  224. LK_NO_SUCH_KEY, // key not found
  225. LK_NO_MORE_ELEMENTS,// iterator exhausted
  226. };
  227. #define LK_SUCCEEDED(lkrc) ((lkrc) >= LK_SUCCESS)
  228. #ifdef LKR_APPLY_IF
  229. //--------------------------------------------------------------------
  230. // Return codes from PFnRecordPred.
  231. enum LK_PREDICATE {
  232. LKP_ABORT = 1, // Stop walking the table immediately
  233. LKP_NO_ACTION = 2, // No action, just keep walking
  234. LKP_PERFORM = 3, // Perform action and continue walking
  235. LKP_PERFORM_STOP = 4, // Perform action, then stop
  236. LKP_DELETE = 5, // Delete record and keep walking
  237. LKP_DELETE_STOP = 6, // Delete record, then stop
  238. };
  239. //--------------------------------------------------------------------
  240. // Return codes from PFnRecordAction.
  241. enum LK_ACTION {
  242. LKA_ABORT = 1, // Stop walking the table immediately
  243. LKA_FAILED = 2, // Action failed; continue walking the table
  244. LKA_SUCCEEDED = 3, // Action succeeded; continue walking the table
  245. };
  246. #endif // LKR_APPLY_IF
  247. #if defined(LKR_DEPRECATED_ITERATORS) || defined(LKR_APPLY_IF)
  248. //--------------------------------------------------------------------
  249. // Parameter to Apply and ApplyIf.
  250. enum LK_LOCKTYPE {
  251. LKL_READLOCK = 1, // Lock the table for reading (for constness)
  252. LKL_WRITELOCK = 2, // Lock the table for writing
  253. };
  254. #endif // LKR_DEPRECATED_ITERATORS || LKR_APPLY_IF
  255. //--------------------------------------------------------------------
  256. // Global table lock code. This is only used to measure how much of a
  257. // slowdown having a global lock on the CLKRHashTable causes. It is
  258. // *never* used in production code.
  259. // #define LKRHASH_GLOBAL_LOCK CCritSec
  260. #ifdef LKRHASH_GLOBAL_LOCK
  261. # define LKRHASH_GLOBAL_LOCK_DECLARATIONS() \
  262. typedef LKRHASH_GLOBAL_LOCK GlobalLock; \
  263. mutable GlobalLock m_lkGlobal;
  264. # define LKRHASH_GLOBAL_READ_LOCK() m_lkGlobal.ReadLock()
  265. # define LKRHASH_GLOBAL_WRITE_LOCK() m_lkGlobal.WriteLock()
  266. # define LKRHASH_GLOBAL_READ_UNLOCK() m_lkGlobal.ReadUnlock()
  267. # define LKRHASH_GLOBAL_WRITE_UNLOCK() m_lkGlobal.WriteUnlock()
  268. #else // !LKRHASH_GLOBAL_LOCK
  269. # define LKRHASH_GLOBAL_LOCK_DECLARATIONS()
  270. // These ones will be optimized away by the compiler
  271. # define LKRHASH_GLOBAL_READ_LOCK() ((void)0)
  272. # define LKRHASH_GLOBAL_WRITE_LOCK() ((void)0)
  273. # define LKRHASH_GLOBAL_READ_UNLOCK() ((void)0)
  274. # define LKRHASH_GLOBAL_WRITE_UNLOCK() ((void)0)
  275. #endif // !LKRHASH_GLOBAL_LOCK
  276. //--------------------------------------------------------------------
  277. // Statistical information returned by GetStatistics
  278. //--------------------------------------------------------------------
  279. #ifdef LOCK_INSTRUMENTATION
  280. class IRTL_DLLEXP CAveragedLockStats : public CLockStatistics
  281. {
  282. public:
  283. int m_nItems;
  284. CAveragedLockStats()
  285. : m_nItems(1)
  286. {}
  287. };
  288. #endif // LOCK_INSTRUMENTATION
  289. class IRTL_DLLEXP CLKRHashTableStats
  290. {
  291. public:
  292. int RecordCount; // number of records in the table
  293. int TableSize; // table size in number of slots
  294. int DirectorySize; // number of entries in directory
  295. int LongestChain; // longest hash chain in the table
  296. int EmptySlots; // number of unused hash slots
  297. double SplitFactor; // fraction of buckets split
  298. double AvgSearchLength; // average length of a successful search
  299. double ExpSearchLength; // theoretically expected length
  300. double AvgUSearchLength; // average length of an unsuccessful search
  301. double ExpUSearchLength; // theoretically expected length
  302. int NodeClumpSize; // number of slots in a node clump
  303. int CBucketSize; // sizeof(CBucket)
  304. #ifdef LOCK_INSTRUMENTATION
  305. CAveragedLockStats m_alsTable; // stats for table lock
  306. CAveragedLockStats m_alsBucketsAvg; // avg of stats for bucket locks
  307. CGlobalLockStatistics m_gls; // global statistics for all locks
  308. #endif // LOCK_INSTRUMENTATION
  309. enum {
  310. MAX_BUCKETS = 40,
  311. };
  312. // histogram of bucket lengths
  313. LONG m_aBucketLenHistogram[MAX_BUCKETS];
  314. CLKRHashTableStats()
  315. : RecordCount(0),
  316. TableSize(0),
  317. DirectorySize(0),
  318. LongestChain(0),
  319. EmptySlots(0),
  320. SplitFactor(0.0),
  321. AvgSearchLength(0.0),
  322. ExpSearchLength(0.0),
  323. AvgUSearchLength(0.0),
  324. ExpUSearchLength(0.0),
  325. NodeClumpSize(1),
  326. CBucketSize(0)
  327. {
  328. for (int i = MAX_BUCKETS; --i >= 0; )
  329. m_aBucketLenHistogram[i] = 0;
  330. }
  331. static const LONG*
  332. BucketSizes()
  333. {
  334. static const LONG s_aBucketSizes[MAX_BUCKETS] = {
  335. 1, 2, 3, 4, 5, 6, 7, 8, 9,
  336. 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
  337. 20, 21, 22, 23, 24, 25, 30, 40, 50, 60,
  338. 70, 80, 90, 100, 200, 500, 1000,10000, 100000, LONG_MAX,
  339. };
  340. return s_aBucketSizes;
  341. }
  342. static LONG
  343. BucketSize(
  344. LONG nBucketIndex)
  345. {
  346. IRTLASSERT(0 <= nBucketIndex && nBucketIndex < MAX_BUCKETS);
  347. return BucketSizes()[nBucketIndex];
  348. }
  349. static LONG
  350. BucketIndex(
  351. LONG nBucketLength)
  352. {
  353. const LONG* palBucketSizes = BucketSizes();
  354. LONG i = 0;
  355. while (palBucketSizes[i] < nBucketLength)
  356. ++i;
  357. if (i == MAX_BUCKETS || palBucketSizes[i] > nBucketLength)
  358. --i;
  359. IRTLASSERT(0 <= i && i < MAX_BUCKETS);
  360. return i;
  361. }
  362. };
  363. // Use types defined in recent versions of the Platform SDK in basetsd.h.
  364. #ifndef _W64
  365. typedef DWORD DWORD_PTR; // integral type big enough to hold a pointer
  366. #endif
  367. //--------------------------------------------------------------------
  368. // CLKRLinearHashTable deals with void* records. These typedefs
  369. // provide prototypes for functions that manipulate instances of
  370. // those records. CTypedHashTable and CStringTestHashTable (below) show a
  371. // way to encapsulate these in typesafe wrappers.
  372. //--------------------------------------------------------------------
  373. // Given a record, return its key. Assumes that the key is embedded in
  374. // the record, or at least somehow derivable from the record. For
  375. // completely unrelated keys & values, a wrapper class should use
  376. // something like STL's pair<key,value> template to aggregate them
  377. // into a record.
  378. typedef const DWORD_PTR (WINAPI *PFnExtractKey) (const void* pvRecord);
  379. // Given a key, return its hash signature. The hashing functions in
  380. // hashfn.h (or something that builds upon them) are suggested.
  381. typedef DWORD (WINAPI *PFnCalcKeyHash) (const DWORD_PTR pnKey);
  382. // Compare two keys for equality; e.g., _stricmp, memcmp, operator==
  383. typedef bool (WINAPI *PFnEqualKeys) (const DWORD_PTR pnKey1,
  384. const DWORD_PTR pnKey2);
  385. // Increment the reference count of a record before returning it from
  386. // FindKey. It's necessary to do it in FindKey itself while the bucket
  387. // is still locked, rather than one of the wrappers, to avoid race
  388. // conditions. Similarly, the reference count is incremented in
  389. // InsertRecord and decremented in DeleteKey. Finally, if an old record
  390. // is overwritten in InsertRecord, its reference count is decremented.
  391. //
  392. // It's up to you to decrement the reference count when you're finished
  393. // with it after retrieving it via FindKey and to determine the
  394. // semantics of what this means. The hashtable itself has no notion of
  395. // reference counts; this is merely to help with the lifetime management
  396. // of the record objects.
  397. typedef void (WINAPI *PFnAddRefRecord)(const void* pvRecord, int nIncr);
  398. #ifdef LKR_APPLY_IF
  399. // ApplyIf() and DeleteIf(): Does the record match the predicate?
  400. typedef LK_PREDICATE (WINAPI *PFnRecordPred) (const void* pvRecord,
  401. void* pvState);
  402. // Apply() et al: Perform action on record.
  403. typedef LK_ACTION (WINAPI *PFnRecordAction)(const void* pvRecord,
  404. void* pvState);
  405. #endif // LKR_APPLY_IF
  406. #ifndef __LKRHASH_NO_NAMESPACE__
  407. }
  408. #endif // !__LKRHASH_NO_NAMESPACE__
  409. //--------------------------------------------------------------------
  410. // Custom memory allocators
  411. //--------------------------------------------------------------------
  412. #ifndef LKR_NO_ALLOCATORS
  413. // # define LKRHASH_ACACHE 1
  414. // # define LKRHASH_MANODEL 1
  415. // # define LKRHASH_MADEL 1
  416. // # define LKRHASH_ROCKALL_FAST 1
  417. // # define LKRHASH_MEM_DEFAULT_ALIGN 32
  418. #endif // !LKR_NO_ALLOCATORS
  419. #ifndef LKRHASH_MEM_DEFAULT_ALIGN
  420. # define LKRHASH_MEM_DEFAULT_ALIGN 8
  421. #endif // !LKRHASH_MEM_DEFAULT_ALIGN
  422. #if defined(LKRHASH_ACACHE)
  423. # include <acache.hxx>
  424. typedef ALLOC_CACHE_HANDLER CLKRhashAllocator;
  425. # define LKRHASH_ALLOCATOR_NEW(C, N) \
  426. const ALLOC_CACHE_CONFIGURATION acc = { 1, N, sizeof(C) }; \
  427. C::sm_palloc = new ALLOC_CACHE_HANDLER("LKRhash:" #C, &acc);
  428. #elif defined(LKRHASH_ROCKALL_FAST)
  429. # include <FastHeap.hpp>
  430. class FastHeap : public FAST_HEAP
  431. {
  432. public:
  433. FastHeap(
  434. SIZE_T cb)
  435. : m_cb(cb)
  436. {}
  437. LPVOID Alloc()
  438. { return New(m_cb, NULL, false); }
  439. BOOL Free(LPVOID pvMem)
  440. { return Delete(pvMem); }
  441. SIZE_T m_cb;
  442. };
  443. typedef FastHeap CLKRhashAllocator;
  444. # define LKRHASH_ALLOCATOR_NEW(C, N) \
  445. C::sm_palloc = new FastHeap(sizeof(C));
  446. #else // no custom allocator
  447. # undef LKRHASH_ALLOCATOR_NEW
  448. #endif // no custom allocator
  449. #ifdef LKRHASH_ALLOCATOR_NEW
  450. // placed inline in the declaration of class C
  451. # define LKRHASH_ALLOCATOR_DEFINITIONS(C) \
  452. protected: \
  453. static CLKRhashAllocator* sm_palloc; \
  454. friend bool LKRHashTableInit(); \
  455. friend void LKRHashTableUninit(); \
  456. friend class CLKRLinearHashTable; \
  457. public: \
  458. static void* operator new(size_t s) \
  459. { \
  460. UNREFERENCED_PARAMETER(s); \
  461. IRTLASSERT(s == sizeof(C)); \
  462. IRTLASSERT(sm_palloc != NULL); \
  463. return sm_palloc->Alloc(); \
  464. } \
  465. static void operator delete(void* pv) \
  466. { \
  467. IRTLASSERT(pv != NULL); \
  468. IRTLASSERT(sm_palloc != NULL); \
  469. sm_palloc->Free(pv); \
  470. } \
  471. static bool init(void) \
  472. { \
  473. IRTLASSERT(sm_palloc == NULL); \
  474. LKRHASH_ALLOCATOR_NEW(C, 1); \
  475. return (sm_palloc != NULL); \
  476. } \
  477. static void uninit(void) \
  478. { \
  479. if (sm_palloc != NULL) \
  480. { \
  481. delete sm_palloc; \
  482. sm_palloc = NULL; \
  483. } \
  484. }
  485. // used in LKRHashTableInit()
  486. # define LKRHASH_ALLOCATOR_INIT(C, N, f) \
  487. { \
  488. if (f) \
  489. { \
  490. f = C::init(); \
  491. } \
  492. }
  493. // used in LKRHashTableUninit()
  494. # define LKRHASH_ALLOCATOR_UNINIT(C) \
  495. { \
  496. C::uninit(); \
  497. }
  498. #else // !LKRHASH_ALLOCATOR_NEW
  499. # define LKRHASH_ALLOCATOR_DEFINITIONS(C)
  500. # define LKRHASH_ALLOCATOR_INIT(C, N, f)
  501. # define LKRHASH_ALLOCATOR_UNINIT(C)
  502. #endif // !LKRHASH_ALLOCATOR_NEW
  503. #ifndef __LKRHASH_NO_NAMESPACE__
  504. namespace LKRhash {
  505. #endif // !__LKRHASH_NO_NAMESPACE__
  506. // Class for nodes on a bucket chain. Instead of a node containing
  507. // one (signature, record-pointer, next-tuple-pointer) tuple, it
  508. // contains _N_ such tuples. (N-1 next-tuple-pointers are omitted.)
  509. // This improves locality of reference greatly; i.e., it's L1
  510. // cache-friendly. It also reduces memory fragmentation and memory
  511. // allocator overhead. It does complicate the chain traversal code
  512. // slightly, admittedly.
  513. //
  514. // This theory is beautiful. In practice, however, CNodeClumps
  515. // are *not* perfectly aligned on 32-byte boundaries by the memory
  516. // allocators. Experimental results indicate that we get a 2-3%
  517. // speed improvement by using 32-byte-aligned blocks, but this must
  518. // be considered against the average of 16 bytes wasted per block.
  519. class CNodeClump
  520. {
  521. public:
  522. // Record slots per chunk - set so a chunk matches (one or
  523. // two) cache lines. 3 ==> 32 bytes, 7 ==> 64 bytes
  524. // Note: the default max load factor is 6.0, which implies that
  525. // there will seldom be more than one node clump in a chain.
  526. enum {
  527. BUCKET_BYTE_SIZE = 64,
  528. BUCKET_OVERHEAD = sizeof(LKR_BUCKET_LOCK) + sizeof(CNodeClump*),
  529. NODE_SIZE = sizeof(const void*) + sizeof(DWORD),
  530. NODES_PER_CLUMP = (BUCKET_BYTE_SIZE - BUCKET_OVERHEAD) / NODE_SIZE
  531. };
  532. enum {
  533. // See if countdown loops are faster than countup loops for
  534. // traversing a CNodeClump. In practice, countup loops are faster.
  535. #ifndef LKR_COUNTDOWN
  536. NODE_BEGIN = 0,
  537. NODE_END = NODES_PER_CLUMP,
  538. NODE_STEP = +1,
  539. // for (int x = 0; x < NODES_PER_CLUMP; ++x) ...
  540. #else // LKR_COUNTDOWN
  541. NODE_BEGIN = NODES_PER_CLUMP-1,
  542. NODE_END = -1,
  543. NODE_STEP = -1,
  544. // for (int x = NODES_PER_CLUMP; --x >= 0; ) ...
  545. #endif // LKR_COUNTDOWN
  546. };
  547. enum {
  548. // No number in 0..2^31-1 maps to this number after it has been
  549. // scrambled by HashFn::HashRandomizeBits
  550. HASH_INVALID_SIGNATURE = 31678523,
  551. };
  552. DWORD m_dwKeySigs[NODES_PER_CLUMP]; // hash values computed from keys
  553. CNodeClump* m_pncNext; // next node clump on the chain
  554. const void* m_pvNode[NODES_PER_CLUMP];// pointers to records
  555. CNodeClump()
  556. {
  557. Clear();
  558. }
  559. void
  560. Clear()
  561. {
  562. m_pncNext = NULL; // no dangling pointers
  563. for (int i = NODES_PER_CLUMP; --i >= 0; )
  564. {
  565. m_dwKeySigs[i] = HASH_INVALID_SIGNATURE;
  566. m_pvNode[i] = NULL;
  567. }
  568. }
  569. bool
  570. InvalidSignature(
  571. int i) const
  572. {
  573. IRTLASSERT(0 <= i && i < NODES_PER_CLUMP);
  574. return (m_dwKeySigs[i] == HASH_INVALID_SIGNATURE);
  575. }
  576. bool
  577. IsEmptyNode(
  578. int i) const
  579. {
  580. IRTLASSERT(0 <= i && i < NODES_PER_CLUMP);
  581. return (m_pvNode[i] == NULL);
  582. }
  583. bool
  584. IsEmptyAndInvalid(
  585. int i) const
  586. {
  587. return IsEmptyNode(i) && InvalidSignature(i);
  588. }
  589. bool
  590. IsEmptySlot(
  591. int i) const
  592. {
  593. return InvalidSignature(i);
  594. }
  595. bool
  596. IsLastClump() const
  597. {
  598. return (m_pncNext == NULL);
  599. }
  600. #ifdef IRTLDEBUG
  601. // Don't want overhead of calls to dtor in retail build
  602. ~CNodeClump()
  603. {
  604. IRTLASSERT(IsLastClump()); // no dangling pointers
  605. for (int i = NODES_PER_CLUMP; --i >= 0; )
  606. IRTLASSERT(InvalidSignature(i) && IsEmptyNode(i));
  607. }
  608. #endif // IRTLDEBUG
  609. LKRHASH_ALLOCATOR_DEFINITIONS(CNodeClump);
  610. }; // class CNodeClump
  611. // Class for bucket chains of the hash table. Note that the first
  612. // nodeclump is actually included in the bucket and not dynamically
  613. // allocated, which increases space requirements slightly but does
  614. // improve performance.
  615. class CBucket
  616. {
  617. private:
  618. typedef LKR_BUCKET_LOCK BucketLock;
  619. mutable BucketLock m_Lock; // lock protecting this bucket
  620. #ifdef LOCK_INSTRUMENTATION
  621. static LONG sm_cBuckets;
  622. static const TCHAR*
  623. _LockName()
  624. {
  625. LONG l = ++sm_cBuckets;
  626. // possible race condition but we don't care, as this is never
  627. // used in production code
  628. static TCHAR s_tszName[CLockStatistics::L_NAMELEN];
  629. wsprintf(s_tszName, _TEXT("B%06x"), 0xFFFFFF & l);
  630. return s_tszName;
  631. }
  632. #endif // LOCK_INSTRUMENTATION
  633. public:
  634. CNodeClump m_ncFirst; // first CNodeClump of this bucket
  635. #if defined(LOCK_INSTRUMENTATION) || defined(IRTLDEBUG)
  636. CBucket()
  637. #ifdef LOCK_INSTRUMENTATION
  638. : m_Lock(_LockName())
  639. #endif // LOCK_INSTRUMENTATION
  640. {
  641. #ifdef IRTLDEBUG
  642. LOCK_LOCKTYPE lt = BucketLock::LockType();
  643. if (lt == LOCK_SPINLOCK || lt == LOCK_FAKELOCK)
  644. IRTLASSERT(sizeof(*this) <= 64);
  645. #endif IRTLDEBUG
  646. }
  647. #endif // LOCK_INSTRUMENTATION || IRTLDEBUG
  648. void WriteLock() { m_Lock.WriteLock(); }
  649. void ReadLock() const { m_Lock.ReadLock(); }
  650. void WriteUnlock() const { m_Lock.WriteUnlock(); }
  651. void ReadUnlock() const { m_Lock.ReadUnlock(); }
  652. bool IsWriteLocked() const { return m_Lock.IsWriteLocked(); }
  653. bool IsReadLocked() const { return m_Lock.IsReadLocked(); }
  654. bool IsWriteUnlocked() const { return m_Lock.IsWriteUnlocked(); }
  655. bool IsReadUnlocked() const { return m_Lock.IsReadUnlocked(); }
  656. void SetSpinCount(WORD wSpins) { m_Lock.SetSpinCount(wSpins); }
  657. WORD GetSpinCount() const { return m_Lock.GetSpinCount(); }
  658. #ifdef LOCK_INSTRUMENTATION
  659. CLockStatistics LockStats() const {return m_Lock.Statistics();}
  660. #endif // LOCK_INSTRUMENTATION
  661. }; // class CBucket
  662. // The hash table space is divided into fixed-size segments (arrays of
  663. // CBuckets) and physically grows/shrinks one segment at a time.
  664. //
  665. // We provide small, medium, and large segments to better tune the
  666. // overall memory requirements of the hash table according to the
  667. // expected usage of an instance.
  668. class CSegment
  669. {
  670. public:
  671. CBucket m_bktSlots[1];
  672. // See note at m_bktSlots2 in CSmallSegment below
  673. CBucket& Slot(DWORD i)
  674. { return m_bktSlots[i]; }
  675. }; // class CSegment
  676. // Small-sized segments contain 2^3 = 8 buckets => ~0.5Kb
  677. class CSmallSegment : public CSegment
  678. {
  679. public:
  680. // Maximum table size equals MAX_DIRSIZE * SEGSIZE buckets.
  681. enum {
  682. SEGBITS = 3,// number of bits extracted from a hash
  683. // address for offset within a segment
  684. SEGSIZE = (1<<SEGBITS),// segment size
  685. SEGMASK = (SEGSIZE-1), // mask used for extracting offset bit
  686. INITSIZE = 1 * SEGSIZE, // #segments to allocate initially
  687. };
  688. // Hack: assumes immediately after CSegment::m_bktSlots, with no
  689. // padding. The STATIC_ASSERT in _AllocateSegment should cause a
  690. // compile-time error if this assumption is false.
  691. CBucket m_bktSlots2[SEGSIZE-1];
  692. public:
  693. DWORD Bits() const { return SEGBITS; }
  694. DWORD Size() const { return SEGSIZE; }
  695. DWORD Mask() const { return SEGMASK; }
  696. DWORD InitSize() const { return INITSIZE;}
  697. #ifdef IRTLDEBUG
  698. CSmallSegment()
  699. {
  700. IRTLASSERT(&Slot(1) == m_bktSlots2);
  701. IRTLASSERT(((DWORD_PTR)this & (LKRHASH_MEM_DEFAULT_ALIGN-1)) == 0);
  702. IRTLASSERT(sizeof(*this) == SEGSIZE * sizeof(CBucket));
  703. }
  704. #endif // IRTLDEBUG
  705. LKRHASH_ALLOCATOR_DEFINITIONS(CSmallSegment);
  706. }; // class CSmallSegment
  707. // Medium-sized segments contain 2^6 = 64 buckets => ~4Kb
  708. class CMediumSegment : public CSegment
  709. {
  710. public:
  711. enum {
  712. SEGBITS = 6,
  713. SEGSIZE = (1<<SEGBITS),
  714. SEGMASK = (SEGSIZE-1),
  715. INITSIZE = 2 * SEGSIZE,
  716. };
  717. CBucket m_bktSlots2[SEGSIZE-1];
  718. public:
  719. DWORD Bits() const { return SEGBITS; }
  720. DWORD Size() const { return SEGSIZE; }
  721. DWORD Mask() const { return SEGMASK; }
  722. DWORD InitSize() const { return INITSIZE;}
  723. #ifdef IRTLDEBUG
  724. CMediumSegment()
  725. {
  726. IRTLASSERT(&Slot(1) == m_bktSlots2);
  727. IRTLASSERT(((DWORD_PTR)this & (LKRHASH_MEM_DEFAULT_ALIGN-1)) == 0);
  728. IRTLASSERT(sizeof(*this) == SEGSIZE * sizeof(CBucket));
  729. }
  730. #endif // IRTLDEBUG
  731. LKRHASH_ALLOCATOR_DEFINITIONS(CMediumSegment);
  732. }; // class CMediumSegment
  733. // Large-sized segments contain 2^9 = 512 buckets => ~32Kb
  734. class CLargeSegment : public CSegment
  735. {
  736. public:
  737. enum {
  738. SEGBITS = 9,
  739. SEGSIZE = (1<<SEGBITS),
  740. SEGMASK = (SEGSIZE-1),
  741. INITSIZE = 4 * SEGSIZE,
  742. };
  743. CBucket m_bktSlots2[SEGSIZE-1];
  744. public:
  745. DWORD Bits() const { return SEGBITS; }
  746. DWORD Size() const { return SEGSIZE; }
  747. DWORD Mask() const { return SEGMASK; }
  748. DWORD InitSize() const { return INITSIZE;}
  749. #ifdef IRTLDEBUG
  750. CLargeSegment()
  751. {
  752. IRTLASSERT(&Slot(1) == m_bktSlots2);
  753. IRTLASSERT(((DWORD_PTR)this & (LKRHASH_MEM_DEFAULT_ALIGN-1)) == 0);
  754. IRTLASSERT(sizeof(*this) == SEGSIZE * sizeof(CBucket));
  755. }
  756. #endif // IRTLDEBUG
  757. LKRHASH_ALLOCATOR_DEFINITIONS(CLargeSegment);
  758. }; // class CLargeSegment
  759. // A directory keeps track of the segments comprising the hash table.
  760. // The directory is just a variable-sized array of pointers to
  761. // segments (CDirEntrys).
  762. class CDirEntry
  763. {
  764. public:
  765. // MIN_DIRSIZE and MAX_DIRSIZE can be changed independently
  766. // of anything else. Should be powers of two.
  767. enum {
  768. MIN_DIRSIZE = (1<<3), // minimum directory size
  769. MAX_DIRSIZE = (1<<20), // maximum directory size
  770. };
  771. CSegment* m_pseg;
  772. CDirEntry()
  773. : m_pseg(NULL)
  774. {}
  775. ~CDirEntry()
  776. { delete m_pseg; }
  777. }; // class CDirEntry
  778. #ifdef LKR_STL_ITERATORS
  779. class IRTL_DLLEXP CLKRLinearHashTable_Iterator;
  780. class IRTL_DLLEXP CLKRHashTable_Iterator;
  781. class IRTL_DLLEXP CLKRLinearHashTable_Iterator
  782. {
  783. friend class CLKRLinearHashTable;
  784. friend class CLKRHashTable;
  785. friend class CLKRHashTable_Iterator;
  786. protected:
  787. CLKRLinearHashTable* m_plht; // which linear hash table?
  788. CNodeClump* m_pnc; // a CNodeClump in bucket
  789. DWORD m_dwBucketAddr;// bucket index
  790. short m_iNode; // offset within m_pnc
  791. enum {
  792. NODES_PER_CLUMP = CNodeClump::NODES_PER_CLUMP,
  793. NODE_BEGIN = CNodeClump::NODE_BEGIN,
  794. NODE_END = CNodeClump::NODE_END,
  795. NODE_STEP = CNodeClump::NODE_STEP,
  796. };
  797. CLKRLinearHashTable_Iterator(
  798. CLKRLinearHashTable* plht,
  799. CNodeClump* pnc,
  800. DWORD dwBucketAddr,
  801. short iNode)
  802. : m_plht(plht),
  803. m_pnc(pnc),
  804. m_dwBucketAddr(dwBucketAddr),
  805. m_iNode(iNode)
  806. {
  807. LKR_ITER_TRACE(_TEXT(" LKLH::prot ctor, this=%p, plht=%p, ")
  808. _TEXT("pnc=%p, ba=%d, in=%d\n"),
  809. this, plht, pnc, dwBucketAddr, iNode);
  810. }
  811. inline void _AddRef(
  812. int nIncr) const;
  813. bool _Increment(
  814. bool fDecrementOldValue=true);
  815. public:
  816. CLKRLinearHashTable_Iterator()
  817. : m_plht(NULL),
  818. m_pnc(NULL),
  819. m_dwBucketAddr(0),
  820. m_iNode(0)
  821. {
  822. LKR_ITER_TRACE(_TEXT(" LKLH::default ctor, this=%p\n"), this);
  823. }
  824. CLKRLinearHashTable_Iterator(
  825. const CLKRLinearHashTable_Iterator& rhs)
  826. : m_plht(rhs.m_plht),
  827. m_pnc(rhs.m_pnc),
  828. m_dwBucketAddr(rhs.m_dwBucketAddr),
  829. m_iNode(rhs.m_iNode)
  830. {
  831. LKR_ITER_TRACE(_TEXT(" LKLH::copy ctor, this=%p, rhs=%p\n"),
  832. this, &rhs);
  833. _AddRef(+1);
  834. }
  835. CLKRLinearHashTable_Iterator& operator=(
  836. const CLKRLinearHashTable_Iterator& rhs)
  837. {
  838. LKR_ITER_TRACE(_TEXT(" LKLH::operator=, this=%p, rhs=%p\n"),
  839. this, &rhs);
  840. rhs._AddRef(+1);
  841. this->_AddRef(-1);
  842. m_plht = rhs.m_plht;
  843. m_pnc = rhs.m_pnc;
  844. m_dwBucketAddr = rhs.m_dwBucketAddr;
  845. m_iNode = rhs.m_iNode;
  846. return *this;
  847. }
  848. ~CLKRLinearHashTable_Iterator()
  849. {
  850. LKR_ITER_TRACE(_TEXT(" LKLH::dtor, this=%p, plht=%p\n"),
  851. this, m_plht);
  852. _AddRef(-1);
  853. }
  854. bool Increment()
  855. {
  856. return IsValid() ? _Increment() : false;
  857. }
  858. bool IsValid() const
  859. {
  860. bool fValid = (m_plht != NULL && m_pnc != NULL
  861. && 0 <= m_iNode && m_iNode < NODES_PER_CLUMP);
  862. if (fValid)
  863. fValid = (m_pnc->m_pvNode[m_iNode] != NULL);
  864. IRTLASSERT(fValid);
  865. return fValid;
  866. }
  867. const void* Record() const
  868. {
  869. IRTLASSERT(IsValid());
  870. return m_pnc->m_pvNode[m_iNode];
  871. }
  872. inline const DWORD_PTR Key() const;
  873. bool operator==(
  874. const CLKRLinearHashTable_Iterator& rhs) const
  875. {
  876. LKR_ITER_TRACE(_TEXT(" LKLH::operator==, this=%p, rhs=%p\n"),
  877. this, &rhs);
  878. // m_pnc and m_iNode uniquely identify an iterator
  879. bool fEQ = ((m_pnc == rhs.m_pnc) // most unique field
  880. && (m_iNode == rhs.m_iNode));
  881. IRTLASSERT(!fEQ || ((m_plht == rhs.m_plht)
  882. && (m_dwBucketAddr == rhs.m_dwBucketAddr)));
  883. return fEQ;
  884. }
  885. bool operator!=(
  886. const CLKRLinearHashTable_Iterator& rhs) const
  887. {
  888. LKR_ITER_TRACE(_TEXT(" LKLH::operator!=, this=%p, rhs=%p\n"),
  889. this, &rhs);
  890. bool fNE = ((m_pnc != rhs.m_pnc)
  891. || (m_iNode != rhs.m_iNode));
  892. //// IRTLASSERT(fNE == !this->operator==(rhs));
  893. return fNE;
  894. }
  895. }; // class CLKRLinearHashTable_Iterator
  896. class IRTL_DLLEXP CLKRHashTable_Iterator
  897. {
  898. friend class CLKRHashTable;
  899. protected:
  900. // order important to minimize size
  901. CLKRHashTable* m_pht; // which hash table?
  902. CLKRLinearHashTable_Iterator m_subiter; // iterator into subtable
  903. short m_ist; // index of subtable
  904. CLKRHashTable_Iterator(
  905. CLKRHashTable* pht,
  906. short ist)
  907. : m_pht(pht),
  908. m_subiter(CLKRLinearHashTable_Iterator()), // zero
  909. m_ist(ist)
  910. {
  911. LKR_ITER_TRACE(_TEXT(" LKHT::prot ctor, this=%p, pht=%p, ist=%d\n"),
  912. this, pht, ist);
  913. }
  914. bool _Increment(
  915. bool fDecrementOldValue=true);
  916. public:
  917. CLKRHashTable_Iterator()
  918. : m_pht(NULL),
  919. m_subiter(CLKRLinearHashTable_Iterator()), // zero
  920. m_ist(0)
  921. {
  922. LKR_ITER_TRACE(_TEXT(" LKHT::default ctor, this=%p\n"), this);
  923. }
  924. #ifdef IRTLDEBUG
  925. // Compiler does a perfectly adequate job of synthesizing these methods.
  926. CLKRHashTable_Iterator(
  927. const CLKRHashTable_Iterator& rhs)
  928. : m_pht(rhs.m_pht),
  929. m_subiter(rhs.m_subiter),
  930. m_ist(rhs.m_ist)
  931. {
  932. LKR_ITER_TRACE(_TEXT(" LKHT::copy ctor, this=%p, rhs=%p\n"),
  933. this, &rhs);
  934. }
  935. CLKRHashTable_Iterator& operator=(
  936. const CLKRHashTable_Iterator& rhs)
  937. {
  938. LKR_ITER_TRACE(_TEXT(" LKHT::operator=, this=%p, rhs=%p\n"),
  939. this, &rhs);
  940. m_ist = rhs.m_ist;
  941. m_subiter = rhs.m_subiter;
  942. m_pht = rhs.m_pht;
  943. return *this;
  944. }
  945. ~CLKRHashTable_Iterator()
  946. {
  947. LKR_ITER_TRACE(_TEXT(" LKHT::dtor, this=%p, pht=%p\n"), this, m_pht);
  948. }
  949. #endif // IRTLDEBUG
  950. bool Increment()
  951. {
  952. return IsValid() ? _Increment() : false;
  953. }
  954. bool IsValid() const
  955. {
  956. bool fValid = (m_pht != NULL && m_ist >= 0);
  957. IRTLASSERT(fValid);
  958. fValid = fValid && (m_subiter.m_plht != NULL);
  959. IRTLASSERT(fValid);
  960. fValid = fValid && (m_subiter.m_pnc != NULL);
  961. IRTLASSERT(fValid);
  962. fValid = fValid && (0 <= m_subiter.m_iNode);
  963. IRTLASSERT(fValid);
  964. fValid = fValid && (m_subiter.m_iNode < CNodeClump::NODES_PER_CLUMP);
  965. IRTLASSERT(fValid);
  966. if (fValid)
  967. fValid = (m_subiter.m_pnc->m_pvNode[m_subiter.m_iNode] != NULL);
  968. IRTLASSERT(fValid);
  969. return fValid;
  970. }
  971. const void* Record() const
  972. {
  973. IRTLASSERT(IsValid());
  974. return m_subiter.Record();
  975. }
  976. const DWORD_PTR Key() const
  977. {
  978. IRTLASSERT(IsValid());
  979. return m_subiter.Key();
  980. }
  981. bool operator==(
  982. const CLKRHashTable_Iterator& rhs) const
  983. {
  984. LKR_ITER_TRACE(_TEXT(" LKHT::operator==, this=%p, rhs=%p\n"),
  985. this, &rhs);
  986. // m_pnc and m_iNode uniquely identify an iterator
  987. bool fEQ = ((m_subiter.m_pnc
  988. == rhs.m_subiter.m_pnc) // most unique field
  989. && (m_subiter.m_iNode == rhs.m_subiter.m_iNode));
  990. IRTLASSERT(!fEQ
  991. || ((m_ist == rhs.m_ist)
  992. && (m_pht == rhs.m_pht)
  993. && (m_subiter.m_plht == rhs.m_subiter.m_plht)
  994. && (m_subiter.m_dwBucketAddr
  995. == rhs.m_subiter.m_dwBucketAddr)));
  996. return fEQ;
  997. }
  998. bool operator!=(
  999. const CLKRHashTable_Iterator& rhs) const
  1000. {
  1001. LKR_ITER_TRACE(_TEXT(" LKHT::operator!=, this=%p, rhs=%p\n"),
  1002. this, &rhs);
  1003. bool fNE = ((m_subiter.m_pnc != rhs.m_subiter.m_pnc)
  1004. || (m_subiter.m_iNode != rhs.m_subiter.m_iNode));
  1005. //// IRTLASSERT(fNE == !this->operator==(rhs));
  1006. return fNE;
  1007. }
  1008. }; // class CLKRHashTable_Iterator
  1009. #endif // LKR_STL_ITERATORS
  1010. //--------------------------------------------------------------------
  1011. // CLKRLinearHashTable
  1012. //
  1013. // A thread-safe linear hash table.
  1014. //--------------------------------------------------------------------
  1015. class IRTL_DLLEXP CLKRLinearHashTable
  1016. {
  1017. public:
  1018. typedef LKR_TABLE_LOCK TableLock;
  1019. typedef LKR_BUCKET_LOCK BucketLock;
  1020. #ifdef LKR_DEPRECATED_ITERATORS
  1021. class CIterator;
  1022. friend class CLKRLinearHashTable::CIterator;
  1023. #endif // LKR_DEPRECATED_ITERATORS
  1024. #ifdef LKR_STL_ITERATORS
  1025. friend class CLKRLinearHashTable_Iterator;
  1026. typedef CLKRLinearHashTable_Iterator Iterator;
  1027. #endif // LKR_STL_ITERATORS
  1028. private:
  1029. friend class CNodeClump;
  1030. friend class CLKRHashTable;
  1031. #ifdef LKRHASH_ALLOCATOR_NEW
  1032. friend bool LKRHashTableInit();
  1033. friend void LKRHashTableUninit();
  1034. #endif // LKRHASH_ALLOCATOR_NEW
  1035. #ifdef LKRHASH_INSTRUMENTATION
  1036. // TODO
  1037. #endif // LKRHASH_INSTRUMENTATION
  1038. public:
  1039. // aliases for convenience
  1040. enum {
  1041. NODES_PER_CLUMP = CNodeClump::NODES_PER_CLUMP,
  1042. MIN_DIRSIZE = CDirEntry::MIN_DIRSIZE,
  1043. MAX_DIRSIZE = CDirEntry::MAX_DIRSIZE,
  1044. NAME_SIZE = 16,
  1045. NODE_BEGIN = CNodeClump::NODE_BEGIN,
  1046. NODE_END = CNodeClump::NODE_END,
  1047. NODE_STEP = CNodeClump::NODE_STEP,
  1048. HASH_INVALID_SIGNATURE = CNodeClump::HASH_INVALID_SIGNATURE,
  1049. };
  1050. private:
  1051. //
  1052. // Miscellaneous helper functions
  1053. //
  1054. // Convert a hash signature to a bucket address
  1055. inline DWORD _BucketAddress(DWORD dwSignature) const
  1056. {
  1057. DWORD dwBktAddr = _H0(dwSignature);
  1058. // Has this bucket been split already?
  1059. if (dwBktAddr < m_iExpansionIdx)
  1060. dwBktAddr = _H1(dwSignature);
  1061. IRTLASSERT(dwBktAddr < m_cActiveBuckets);
  1062. IRTLASSERT(dwBktAddr < (m_cDirSegs << m_dwSegBits));
  1063. return dwBktAddr;
  1064. }
  1065. // See the Linear Hashing paper
  1066. static DWORD _H0(DWORD dwSignature, DWORD dwBktAddrMask)
  1067. { return dwSignature & dwBktAddrMask; }
  1068. DWORD _H0(DWORD dwSignature) const
  1069. { return _H0(dwSignature, m_dwBktAddrMask0); }
  1070. // See the Linear Hashing paper. Preserves one bit more than _H0.
  1071. static DWORD _H1(DWORD dwSignature, DWORD dwBktAddrMask)
  1072. { return dwSignature & ((dwBktAddrMask << 1) | 1); }
  1073. DWORD _H1(DWORD dwSignature) const
  1074. { return _H0(dwSignature, m_dwBktAddrMask1); }
  1075. // In which segment within the directory does the bucketaddress lie?
  1076. // (Return type must be lvalue so that it can be assigned to.)
  1077. CSegment*& _Segment(DWORD dwBucketAddr) const
  1078. {
  1079. const DWORD iSeg = dwBucketAddr >> m_dwSegBits;
  1080. IRTLASSERT(m_paDirSegs != NULL && iSeg < m_cDirSegs);
  1081. return m_paDirSegs[iSeg].m_pseg;
  1082. }
  1083. // Offset within the segment of the bucketaddress
  1084. DWORD _SegIndex(DWORD dwBucketAddr) const
  1085. { return (dwBucketAddr & m_dwSegMask); }
  1086. // Convert a bucketaddress to a CBucket*
  1087. inline CBucket* _Bucket(DWORD dwBucketAddr) const
  1088. {
  1089. IRTLASSERT(dwBucketAddr < m_cActiveBuckets);
  1090. CSegment* const pseg = _Segment(dwBucketAddr);
  1091. IRTLASSERT(pseg != NULL);
  1092. return &(pseg->Slot(_SegIndex(dwBucketAddr)));
  1093. }
  1094. // Extract the key from a record
  1095. const DWORD_PTR _ExtractKey(const void* pvRecord) const
  1096. {
  1097. IRTLASSERT(pvRecord != NULL);
  1098. IRTLASSERT(m_pfnExtractKey != NULL);
  1099. return (*m_pfnExtractKey)(pvRecord);
  1100. }
  1101. // Hash the key
  1102. DWORD _CalcKeyHash(const DWORD_PTR pnKey) const
  1103. {
  1104. // Note pnKey==0 is acceptable, as the real key type could be an int
  1105. IRTLASSERT(m_pfnCalcKeyHash != NULL);
  1106. DWORD dwHash = (*m_pfnCalcKeyHash)(pnKey);
  1107. // We forcibly scramble the result to help ensure a better distribution
  1108. #ifndef __HASHFN_NO_NAMESPACE__
  1109. dwHash = HashFn::HashRandomizeBits(dwHash);
  1110. #else // !__HASHFN_NO_NAMESPACE__
  1111. dwHash = ::HashRandomizeBits(dwHash);
  1112. #endif // !__HASHFN_NO_NAMESPACE__
  1113. IRTLASSERT(dwHash != HASH_INVALID_SIGNATURE);
  1114. return dwHash;
  1115. }
  1116. // Compare two keys for equality
  1117. bool _EqualKeys(const DWORD_PTR pnKey1, const DWORD_PTR pnKey2) const
  1118. {
  1119. IRTLASSERT(m_pfnEqualKeys != NULL);
  1120. return (*m_pfnEqualKeys)(pnKey1, pnKey2);
  1121. }
  1122. // AddRef or Release a record.
  1123. void _AddRefRecord(const void* pvRecord, int nIncr) const
  1124. {
  1125. IRTLASSERT(pvRecord != NULL && (nIncr == -1 || nIncr == +1));
  1126. IRTLASSERT(m_pfnAddRefRecord != NULL);
  1127. (*m_pfnAddRefRecord)(pvRecord, nIncr);
  1128. }
  1129. // Find a bucket, given its signature.
  1130. CBucket* _FindBucket(DWORD dwSignature, bool fLockForWrite) const;
  1131. // Used by _FindKey so that the thread won't deadlock if the user has
  1132. // already explicitly called table->WriteLock().
  1133. bool _ReadOrWriteLock() const
  1134. { return m_Lock.ReadOrWriteLock(); }
  1135. void _ReadOrWriteUnlock(bool fReadLocked) const
  1136. { m_Lock.ReadOrWriteUnlock(fReadLocked); }
  1137. // Memory allocation wrappers to allow us to simulate allocation
  1138. // failures during testing
  1139. static CDirEntry* const
  1140. _AllocateSegmentDirectory(
  1141. size_t n);
  1142. bool
  1143. _FreeSegmentDirectory();
  1144. static CNodeClump* const
  1145. _AllocateNodeClump();
  1146. static bool
  1147. _FreeNodeClump(
  1148. CNodeClump* pnc);
  1149. CSegment* const
  1150. _AllocateSegment() const;
  1151. bool
  1152. _FreeSegment(
  1153. CSegment* pseg) const;
  1154. #ifdef LOCK_INSTRUMENTATION
  1155. static LONG sm_cTables;
  1156. static const TCHAR*
  1157. _LockName()
  1158. {
  1159. LONG l = ++sm_cTables;
  1160. // possible race condition but we don't care, as this is never
  1161. // used in production code
  1162. static TCHAR s_tszName[CLockStatistics::L_NAMELEN];
  1163. wsprintf(s_tszName, _TEXT("LH%05x"), 0xFFFFF & l);
  1164. return s_tszName;
  1165. }
  1166. // Statistics for the table lock
  1167. CLockStatistics _LockStats() const
  1168. { return m_Lock.Statistics(); }
  1169. #endif // LOCK_INSTRUMENTATION
  1170. private:
  1171. // Fields are ordered so as to minimize number of cache lines touched
  1172. DWORD m_dwSignature; // debugging: id & corruption check
  1173. CHAR m_szName[NAME_SIZE]; // an identifier for debugging
  1174. mutable LK_RETCODE m_lkrcState; // Internal state of table
  1175. mutable TableLock m_Lock; // Lock on entire linear hash table
  1176. // type-specific function pointers
  1177. PFnExtractKey m_pfnExtractKey; // Extract key from record
  1178. PFnCalcKeyHash m_pfnCalcKeyHash; // Calculate hash signature of key
  1179. PFnEqualKeys m_pfnEqualKeys; // Compare two keys
  1180. PFnAddRefRecord m_pfnAddRefRecord; // AddRef a record
  1181. LK_TABLESIZE m_lkts; // "size" of table: small, medium, or large
  1182. DWORD m_dwSegBits; // C{Small,Medium,Large}Segment::SEGBITS
  1183. DWORD m_dwSegSize; // C{Small,Medium,Large}Segment::SEGSIZE
  1184. DWORD m_dwSegMask; // C{Small,Medium,Large}Segment::SEGMASK
  1185. double m_MaxLoad; // max load factor (average chain length)
  1186. DWORD m_dwBktAddrMask0; // mask used for address calculation
  1187. DWORD m_dwBktAddrMask1; // used in _H1 calculation
  1188. DWORD m_iExpansionIdx; // address of next bucket to be expanded
  1189. CDirEntry* m_paDirSegs; // directory of table segments
  1190. DWORD m_nLevel; // number of table doublings performed
  1191. DWORD m_cDirSegs; // segment directory size: varies between
  1192. // MIN_DIRSIZE and MAX_DIRSIZE
  1193. DWORD m_cRecords; // number of records in the table
  1194. DWORD m_cActiveBuckets; // number of buckets in use (table size)
  1195. WORD m_wBucketLockSpins;// default spin count for bucket locks
  1196. const BYTE m_nTableLockType; // for debugging: LOCK_SPINLOCK, etc
  1197. const BYTE m_nBucketLockType;// for debugging: LOCK_SPINLOCK, etc
  1198. const CLKRHashTable* const m_phtParent;// Owning table. NULL => standalone
  1199. const bool m_fMultiKeys; // Allow multiple identical keys?
  1200. #ifndef LKR_NO_GLOBAL_LIST
  1201. static CLockedDoubleList sm_llGlobalList;// All active CLKRLinearHashTables
  1202. CListEntry m_leGlobalList;
  1203. #endif // !LKR_NO_GLOBAL_LIST
  1204. void _InsertThisIntoGlobalList()
  1205. {
  1206. #ifndef LKR_NO_GLOBAL_LIST
  1207. // Only add standalone CLKRLinearHashTables to global list.
  1208. // CLKRHashTables have their own global list.
  1209. if (m_phtParent == NULL)
  1210. sm_llGlobalList.InsertHead(&m_leGlobalList);
  1211. #endif // !LKR_NO_GLOBAL_LIST
  1212. }
  1213. void _RemoveThisFromGlobalList()
  1214. {
  1215. #ifndef LKR_NO_GLOBAL_LIST
  1216. if (m_phtParent == NULL)
  1217. sm_llGlobalList.RemoveEntry(&m_leGlobalList);
  1218. #endif // !LKR_NO_GLOBAL_LIST
  1219. }
  1220. // Non-trivial implementation functions
  1221. LK_RETCODE _InsertRecord(const void* pvRecord, DWORD dwSignature,
  1222. bool fOverwrite
  1223. #ifdef LKR_STL_ITERATORS
  1224. , Iterator* piterResult=NULL
  1225. #endif // LKR_STL_ITERATORS
  1226. );
  1227. LK_RETCODE _DeleteKey(const DWORD_PTR pnKey, DWORD dwSignature);
  1228. LK_RETCODE _DeleteRecord(const void* pvRecord, DWORD dwSignature);
  1229. bool _DeleteNode(CBucket* pbkt, CNodeClump*& rpnc,
  1230. CNodeClump*& rpncPrev, int& riNode);
  1231. LK_RETCODE _FindKey(const DWORD_PTR pnKey, DWORD dwSignature,
  1232. const void** ppvRecord
  1233. #ifdef LKR_STL_ITERATORS
  1234. , Iterator* piterResult=NULL
  1235. #endif // LKR_STL_ITERATORS
  1236. ) const;
  1237. LK_RETCODE _FindRecord(const void* pvRecord, DWORD dwSignature) const;
  1238. // returns count of errors in compacted state => 0 is good
  1239. int _IsNodeCompact(CBucket* const pbkt) const;
  1240. #ifdef LKR_APPLY_IF
  1241. // Predicate functions
  1242. static LK_PREDICATE WINAPI
  1243. _PredTrue(const void* /*pvRecord*/, void* /*pvState*/)
  1244. { return LKP_PERFORM; }
  1245. DWORD _Apply(PFnRecordAction pfnAction, void* pvState,
  1246. LK_LOCKTYPE lkl, LK_PREDICATE& rlkp);
  1247. DWORD _ApplyIf(PFnRecordPred pfnPredicate,
  1248. PFnRecordAction pfnAction, void* pvState,
  1249. LK_LOCKTYPE lkl, LK_PREDICATE& rlkp);
  1250. DWORD _DeleteIf(PFnRecordPred pfnPredicate, void* pvState,
  1251. LK_PREDICATE& rlkp);
  1252. #endif // LKR_APPLY_IF
  1253. void _Clear(bool fShrinkDirectory);
  1254. LK_RETCODE _SetSegVars(LK_TABLESIZE lkts, DWORD cInitialBuckets);
  1255. LK_RETCODE _Expand();
  1256. LK_RETCODE _Contract();
  1257. LK_RETCODE _SplitRecordSet(CNodeClump* pncOldTarget,
  1258. CNodeClump* pncNewTarget,
  1259. DWORD iExpansionIdx,
  1260. DWORD dwBktAddrMask,
  1261. DWORD dwNewBkt,
  1262. CNodeClump* pncFreeList);
  1263. LK_RETCODE _MergeRecordSets(CBucket* pbktNewTarget,
  1264. CNodeClump* pncOldList,
  1265. CNodeClump* pncFreeList);
  1266. // Private copy ctor and op= to prevent compiler synthesizing them.
  1267. // TODO: implement these properly; they could be useful.
  1268. CLKRLinearHashTable(const CLKRLinearHashTable&);
  1269. CLKRLinearHashTable& operator=(const CLKRLinearHashTable&);
  1270. private:
  1271. // This ctor is used by CLKRHashTable
  1272. CLKRLinearHashTable(
  1273. LPCSTR pszName, // An identifier for debugging
  1274. PFnExtractKey pfnExtractKey, // Extract key from record
  1275. PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  1276. PFnEqualKeys pfnEqualKeys, // Compare two keys
  1277. PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  1278. double maxload, // Upperbound on average chain length
  1279. DWORD initsize, // Initial size of hash table.
  1280. CLKRHashTable* phtParent, // Owning table.
  1281. bool fMultiKeys // Allow multiple identical keys?
  1282. );
  1283. LK_RETCODE
  1284. _Initialize(
  1285. PFnExtractKey pfnExtractKey,
  1286. PFnCalcKeyHash pfnCalcKeyHash,
  1287. PFnEqualKeys pfnEqualKeys,
  1288. PFnAddRefRecord pfnAddRefRecord,
  1289. LPCSTR pszName,
  1290. double maxload,
  1291. DWORD initsize);
  1292. public:
  1293. CLKRLinearHashTable(
  1294. LPCSTR pszName, // An identifier for debugging
  1295. PFnExtractKey pfnExtractKey, // Extract key from record
  1296. PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  1297. PFnEqualKeys pfnEqualKeys, // Compare two keys
  1298. PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  1299. double maxload=LK_DFLT_MAXLOAD,// Upperbound on average chain length
  1300. DWORD initsize=LK_DFLT_INITSIZE, // Initial size of hash table.
  1301. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS, // for signature compatiblity
  1302. // with CLKRHashTable
  1303. bool fMultiKeys=false // Allow multiple identical keys?
  1304. );
  1305. ~CLKRLinearHashTable();
  1306. static const TCHAR* ClassName()
  1307. {return _TEXT("CLKRLinearHashTable");}
  1308. int NumSubTables() const {return 1;}
  1309. bool MultiKeys() const
  1310. {
  1311. return false;
  1312. // return m_fMultiKeys; // TODO: implement
  1313. }
  1314. static LK_TABLESIZE NumSubTables(DWORD& rinitsize, DWORD& rnum_subtbls);
  1315. // Insert a new record into hash table.
  1316. // Returns LK_SUCCESS if all OK, LK_KEY_EXISTS if same key already
  1317. // exists (unless fOverwrite), LK_ALLOC_FAIL if out of space,
  1318. // or LK_BAD_RECORD for a bad record.
  1319. LK_RETCODE InsertRecord(const void* pvRecord, bool fOverwrite=false)
  1320. {
  1321. if (!IsUsable())
  1322. return m_lkrcState;
  1323. if (pvRecord == NULL)
  1324. return LK_BAD_RECORD;
  1325. return _InsertRecord(pvRecord, _CalcKeyHash(_ExtractKey(pvRecord)),
  1326. fOverwrite);
  1327. }
  1328. // Delete record with the given key.
  1329. // Returns LK_SUCCESS if all OK, or LK_NO_SUCH_KEY if not found
  1330. LK_RETCODE DeleteKey(const DWORD_PTR pnKey)
  1331. {
  1332. if (!IsUsable())
  1333. return m_lkrcState;
  1334. return _DeleteKey(pnKey, _CalcKeyHash(pnKey));
  1335. }
  1336. // Delete a record from the table, if present.
  1337. // Returns LK_SUCCESS if all OK, or LK_NO_SUCH_KEY if not found
  1338. LK_RETCODE DeleteRecord(const void* pvRecord)
  1339. {
  1340. if (!IsUsable())
  1341. return m_lkrcState;
  1342. if (pvRecord == NULL)
  1343. return LK_BAD_RECORD;
  1344. return _DeleteRecord(pvRecord, _CalcKeyHash(_ExtractKey(pvRecord)));
  1345. }
  1346. // Find record with given key.
  1347. // Returns: LK_SUCCESS, if record found (record is returned in *ppvRecord)
  1348. // LK_BAD_RECORD, if ppvRecord is invalid
  1349. // LK_NO_SUCH_KEY, if no record with given key value was found
  1350. // LK_UNUSABLE, if hash table not in usable state
  1351. // Note: the record is AddRef'd. You must decrement the reference
  1352. // count when you are finished with the record (if you're implementing
  1353. // refcounting semantics).
  1354. LK_RETCODE FindKey(const DWORD_PTR pnKey,
  1355. const void** ppvRecord) const
  1356. {
  1357. if (!IsUsable())
  1358. return m_lkrcState;
  1359. if (ppvRecord == NULL)
  1360. return LK_BAD_RECORD;
  1361. return _FindKey(pnKey, _CalcKeyHash(pnKey), ppvRecord);
  1362. }
  1363. // Sees if the record is contained in the table
  1364. // Returns: LK_SUCCESS, if record found
  1365. // LK_BAD_RECORD, if pvRecord is invalid
  1366. // LK_NO_SUCH_KEY, if record is not in the table
  1367. // LK_UNUSABLE, if hash table not in usable state
  1368. // Note: the record is *not* AddRef'd.
  1369. LK_RETCODE FindRecord(const void* pvRecord) const
  1370. {
  1371. if (!IsUsable())
  1372. return m_lkrcState;
  1373. if (pvRecord == NULL)
  1374. return LK_BAD_RECORD;
  1375. return _FindRecord(pvRecord, _CalcKeyHash(_ExtractKey(pvRecord)));
  1376. }
  1377. #ifdef LKR_APPLY_IF
  1378. // Walk the hash table, applying pfnAction to all records.
  1379. // Locks the whole table for the duration with either a (possibly
  1380. // shared) readlock or a writelock, according to lkl.
  1381. // Loop is aborted if pfnAction returns LKA_ABORT.
  1382. // Returns the number of successful applications.
  1383. DWORD Apply(PFnRecordAction pfnAction,
  1384. void* pvState=NULL,
  1385. LK_LOCKTYPE lkl=LKL_READLOCK);
  1386. // Walk the hash table, applying pfnAction to any records that match
  1387. // pfnPredicate. Locks the whole table for the duration with either
  1388. // a (possibly shared) readlock or a writelock, according to lkl.
  1389. // Loop is aborted if pfnAction returns LKA_ABORT.
  1390. // Returns the number of successful applications.
  1391. DWORD ApplyIf(PFnRecordPred pfnPredicate,
  1392. PFnRecordAction pfnAction,
  1393. void* pvState=NULL,
  1394. LK_LOCKTYPE lkl=LKL_READLOCK);
  1395. // Delete any records that match pfnPredicate.
  1396. // Locks the table for the duration with a writelock.
  1397. // Returns the number of deletions.
  1398. //
  1399. // Do *not* walk the hash table by hand with an iterator and call
  1400. // DeleteKey. The iterator will end up pointing to garbage.
  1401. DWORD DeleteIf(PFnRecordPred pfnPredicate,
  1402. void* pvState=NULL);
  1403. #endif // LKR_APPLY_IF
  1404. // Check table for consistency. Returns 0 if okay, or the number of
  1405. // errors otherwise.
  1406. int CheckTable() const;
  1407. // Remove all data from the table
  1408. void Clear()
  1409. {
  1410. WriteLock();
  1411. _Clear(true);
  1412. WriteUnlock();
  1413. }
  1414. // Number of elements in the table
  1415. DWORD Size() const
  1416. { return m_cRecords; }
  1417. // Maximum possible number of elements in the table
  1418. DWORD MaxSize() const
  1419. { return static_cast<DWORD>(m_MaxLoad * MAX_DIRSIZE * m_dwSegSize); }
  1420. // Get hash table statistics
  1421. CLKRHashTableStats GetStatistics() const;
  1422. // Is the hash table usable?
  1423. bool IsUsable() const
  1424. { return (m_lkrcState == LK_SUCCESS); }
  1425. // Is the hash table consistent and correct?
  1426. bool IsValid() const
  1427. {
  1428. STATIC_ASSERT(((MIN_DIRSIZE & (MIN_DIRSIZE-1)) == 0) // == (1 << N)
  1429. && ((1 << 3) <= MIN_DIRSIZE)
  1430. && (MIN_DIRSIZE < MAX_DIRSIZE)
  1431. && ((MAX_DIRSIZE & (MAX_DIRSIZE-1)) == 0)
  1432. && (MAX_DIRSIZE <= (1 << 30)));
  1433. bool f = (m_lkrcState == LK_SUCCESS // serious internal failure?
  1434. && m_paDirSegs != NULL
  1435. && MIN_DIRSIZE <= m_cDirSegs && m_cDirSegs <= MAX_DIRSIZE
  1436. && (m_cDirSegs & (m_cDirSegs-1)) == 0
  1437. && m_pfnExtractKey != NULL
  1438. && m_pfnCalcKeyHash != NULL
  1439. && m_pfnEqualKeys != NULL
  1440. && m_pfnAddRefRecord != NULL
  1441. && m_cActiveBuckets > 0
  1442. && ValidSignature()
  1443. );
  1444. if (!f)
  1445. m_lkrcState = LK_UNUSABLE;
  1446. return f;
  1447. }
  1448. // Set the spin count on the table lock
  1449. void SetTableLockSpinCount(WORD wSpins)
  1450. { m_Lock.SetSpinCount(wSpins); }
  1451. // Get the spin count on the table lock
  1452. WORD GetTableLockSpinCount() const
  1453. { return m_Lock.GetSpinCount(); }
  1454. // Set/Get the spin count on the bucket locks
  1455. void SetBucketLockSpinCount(WORD wSpins);
  1456. WORD GetBucketLockSpinCount() const;
  1457. enum {
  1458. SIGNATURE = (('L') | ('K' << 8) | ('L' << 16) | ('H' << 24)),
  1459. SIGNATURE_FREE = (('L') | ('K' << 8) | ('L' << 16) | ('x' << 24)),
  1460. };
  1461. bool
  1462. ValidSignature() const
  1463. { return m_dwSignature == SIGNATURE;}
  1464. //
  1465. // Lock manipulators
  1466. //
  1467. // Lock the table (exclusively) for writing
  1468. void WriteLock()
  1469. { m_Lock.WriteLock(); }
  1470. // Lock the table (possibly shared) for reading
  1471. void ReadLock() const
  1472. { m_Lock.ReadLock(); }
  1473. // Unlock the table for writing
  1474. void WriteUnlock() const
  1475. { m_Lock.WriteUnlock(); }
  1476. // Unlock the table for reading
  1477. void ReadUnlock() const
  1478. { m_Lock.ReadUnlock(); }
  1479. // Is the table already locked for writing?
  1480. bool IsWriteLocked() const
  1481. { return m_Lock.IsWriteLocked(); }
  1482. // Is the table already locked for reading?
  1483. bool IsReadLocked() const
  1484. { return m_Lock.IsReadLocked(); }
  1485. // Is the table unlocked for writing?
  1486. bool IsWriteUnlocked() const
  1487. { return m_Lock.IsWriteUnlocked(); }
  1488. // Is the table unlocked for reading?
  1489. bool IsReadUnlocked() const
  1490. { return m_Lock.IsReadUnlocked(); }
  1491. // Convert the read lock to a write lock
  1492. void ConvertSharedToExclusive() const
  1493. { m_Lock.ConvertSharedToExclusive(); }
  1494. // Convert the write lock to a read lock
  1495. void ConvertExclusiveToShared() const
  1496. { m_Lock.ConvertExclusiveToShared(); }
  1497. // LKRHASH_ALLOCATOR_DEFINITIONS(CLKRLinearHashTable);
  1498. #ifdef LKR_DEPRECATED_ITERATORS
  1499. public:
  1500. // Iterators can be used to walk the table. To ensure a consistent
  1501. // view of the data, the iterator locks the whole table. This can
  1502. // have a negative effect upon performance, because no other thread
  1503. // can do anything with the table. Use with care.
  1504. //
  1505. // You should not use an iterator to walk the table, calling DeleteKey,
  1506. // as the iterator will end up pointing to garbage.
  1507. //
  1508. // Use Apply, ApplyIf, or DeleteIf instead of iterators to safely
  1509. // walk the tree. Or use the STL-style iterators.
  1510. //
  1511. // Note that iterators acquire a reference to the record pointed to
  1512. // and release that reference as soon as the iterator is incremented.
  1513. // In other words, this code is safe:
  1514. // lkrc = ht.IncrementIterator(&iter);
  1515. // // assume lkrc == LK_SUCCESS for the sake of this example
  1516. // CMyHashTable::Record* pRec = iter.Record();
  1517. // Foo(pRec); // uses pRec but doesn't hang on to it
  1518. // lkrc = ht.IncrementIterator(&iter);
  1519. //
  1520. // But this code is not safe because pRec is used out of the scope of
  1521. // the iterator that provided it:
  1522. // lkrc = ht.IncrementIterator(&iter);
  1523. // CMyHashTable::Record* pRec = iter.Record();
  1524. // // Broken code: Should have called ht.AddRefRecord(pRec, +1) here
  1525. // lkrc = ht.IncrementIterator(&iter);
  1526. // Foo(pRec); // Unsafe: because no longer have a valid reference
  1527. //
  1528. // If the record has no reference-counting semantics, then you can
  1529. // ignore the above remarks about scope.
  1530. class CIterator
  1531. {
  1532. protected:
  1533. friend class CLKRLinearHashTable;
  1534. CLKRLinearHashTable* m_plht; // which linear hash table?
  1535. DWORD m_dwBucketAddr; // bucket index
  1536. CNodeClump* m_pnc; // a CNodeClump in bucket
  1537. int m_iNode; // offset within m_pnc
  1538. LK_LOCKTYPE m_lkl; // readlock or writelock?
  1539. private:
  1540. // Private copy ctor and op= to prevent compiler synthesizing them.
  1541. // Must provide (bad) implementation because we export instantiations.
  1542. CIterator(const CIterator&);
  1543. CIterator& operator=(const CIterator&);
  1544. public:
  1545. CIterator(
  1546. LK_LOCKTYPE lkl=LKL_WRITELOCK)
  1547. : m_plht(NULL),
  1548. m_dwBucketAddr(0),
  1549. m_pnc(NULL),
  1550. m_iNode(-1),
  1551. m_lkl(lkl)
  1552. {}
  1553. // Return the record associated with this iterator
  1554. const void* Record() const
  1555. {
  1556. IRTLASSERT(IsValid());
  1557. return ((m_pnc != NULL
  1558. && m_iNode >= 0
  1559. && m_iNode < CLKRLinearHashTable::NODES_PER_CLUMP)
  1560. ? m_pnc->m_pvNode[m_iNode]
  1561. : NULL);
  1562. }
  1563. // Return the key associated with this iterator
  1564. const DWORD_PTR Key() const
  1565. {
  1566. IRTLASSERT(m_plht != NULL);
  1567. const void* pRec = Record();
  1568. return ((pRec != NULL && m_plht != NULL)
  1569. ? m_plht->_ExtractKey(pRec)
  1570. : NULL);
  1571. }
  1572. bool IsValid() const
  1573. {
  1574. return ((m_plht != NULL)
  1575. && (m_pnc != NULL)
  1576. && (0 <= m_iNode
  1577. && m_iNode < CLKRLinearHashTable::NODES_PER_CLUMP)
  1578. && (!m_pnc->IsEmptyNode(m_iNode)));
  1579. }
  1580. // Delete the record that the iterator points to. Does an implicit
  1581. // IncrementIterator after deletion.
  1582. LK_RETCODE DeleteRecord();
  1583. // Change the record that the iterator points to. The new record
  1584. // must have the same key as the old one.
  1585. LK_RETCODE ChangeRecord(const void* pNewRec);
  1586. }; // class CIterator
  1587. // Const iterators for readonly access. You must use these with
  1588. // const CLKRLinearHashTables.
  1589. class CConstIterator : public CIterator
  1590. {
  1591. private:
  1592. // Private, unimplemented copy ctor and op= to prevent
  1593. // compiler synthesizing them.
  1594. CConstIterator(const CConstIterator&);
  1595. CConstIterator& operator=(const CConstIterator&);
  1596. public:
  1597. CConstIterator()
  1598. : CIterator(LKL_READLOCK)
  1599. {}
  1600. }; // class CConstIterator
  1601. private:
  1602. // The public APIs lock the table. The private ones, which are used
  1603. // directly by CLKRHashTable, don't.
  1604. LK_RETCODE _InitializeIterator(CIterator* piter);
  1605. LK_RETCODE _CloseIterator(CIterator* piter);
  1606. public:
  1607. // Initialize the iterator to point to the first item in the hash table
  1608. // Returns LK_SUCCESS, LK_NO_MORE_ELEMENTS, or LK_BAD_ITERATOR.
  1609. LK_RETCODE InitializeIterator(CIterator* piter)
  1610. {
  1611. IRTLASSERT(piter != NULL && piter->m_plht == NULL);
  1612. if (piter == NULL || piter->m_plht != NULL)
  1613. return LK_BAD_ITERATOR;
  1614. if (piter->m_lkl == LKL_WRITELOCK)
  1615. WriteLock();
  1616. else
  1617. ReadLock();
  1618. return _InitializeIterator(piter);
  1619. }
  1620. // The const iterator version
  1621. LK_RETCODE InitializeIterator(CConstIterator* piter) const
  1622. {
  1623. IRTLASSERT(piter != NULL && piter->m_plht == NULL);
  1624. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1625. if (piter == NULL || piter->m_plht != NULL
  1626. || piter->m_lkl == LKL_WRITELOCK)
  1627. return LK_BAD_ITERATOR;
  1628. ReadLock();
  1629. return const_cast<CLKRLinearHashTable*>(this)
  1630. ->_InitializeIterator(static_cast<CIterator*>(piter));
  1631. }
  1632. // Move the iterator on to the next item in the table.
  1633. // Returns LK_SUCCESS, LK_NO_MORE_ELEMENTS, or LK_BAD_ITERATOR.
  1634. LK_RETCODE IncrementIterator(CIterator* piter);
  1635. LK_RETCODE IncrementIterator(CConstIterator* piter) const
  1636. {
  1637. IRTLASSERT(piter != NULL && piter->m_plht == this);
  1638. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1639. if (piter == NULL || piter->m_plht != this
  1640. || piter->m_lkl == LKL_WRITELOCK)
  1641. return LK_BAD_ITERATOR;
  1642. return const_cast<CLKRLinearHashTable*>(this)
  1643. ->IncrementIterator(static_cast<CIterator*>(piter));
  1644. }
  1645. // Close the iterator.
  1646. LK_RETCODE CloseIterator(CIterator* piter)
  1647. {
  1648. IRTLASSERT(piter != NULL && piter->m_plht == this);
  1649. if (piter == NULL || piter->m_plht != this)
  1650. return LK_BAD_ITERATOR;
  1651. _CloseIterator(piter);
  1652. if (piter->m_lkl == LKL_WRITELOCK)
  1653. WriteUnlock();
  1654. else
  1655. ReadUnlock();
  1656. return LK_SUCCESS;
  1657. };
  1658. // Close the CConstIterator
  1659. LK_RETCODE CloseIterator(CConstIterator* piter) const
  1660. {
  1661. IRTLASSERT(piter != NULL && piter->m_plht == this);
  1662. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1663. if (piter == NULL || piter->m_plht != this
  1664. || piter->m_lkl == LKL_WRITELOCK)
  1665. return LK_BAD_ITERATOR;
  1666. const_cast<CLKRLinearHashTable*>(this)
  1667. ->_CloseIterator(static_cast<CIterator*>(piter));
  1668. ReadUnlock();
  1669. return LK_SUCCESS;
  1670. };
  1671. #endif // LKR_DEPRECATED_ITERATORS
  1672. #ifdef LKR_STL_ITERATORS
  1673. private:
  1674. bool _Erase(Iterator& riter, DWORD dwSignature);
  1675. bool _Find(DWORD_PTR pnKey, DWORD dwSignature, Iterator& riterResult);
  1676. bool _IsValidIterator(const Iterator& riter) const
  1677. {
  1678. LKR_ITER_TRACE(_TEXT(" LKLH:_IsValidIterator(%p)\n"), &riter);
  1679. bool fValid = ((riter.m_plht == this)
  1680. && (riter.m_dwBucketAddr < m_cActiveBuckets)
  1681. && riter.IsValid());
  1682. IRTLASSERT(fValid);
  1683. return fValid;
  1684. }
  1685. public:
  1686. // Return iterator pointing to first item in table
  1687. Iterator
  1688. Begin();
  1689. // Return a one-past-the-end iterator. Always empty.
  1690. Iterator
  1691. End()
  1692. {
  1693. LKR_ITER_TRACE(_TEXT(" LKLH::End\n"));
  1694. return Iterator();
  1695. }
  1696. // Insert a record
  1697. // Returns `true' if successful; iterResult points to that record
  1698. // Returns `false' otherwise; iterResult == End()
  1699. bool
  1700. Insert(
  1701. /* in */ const void* pvRecord,
  1702. /* out */ Iterator& riterResult,
  1703. /* in */ bool fOverwrite=false);
  1704. // Erase the record pointed to by the iterator; adjust the iterator
  1705. // to point to the next record. Returns `true' if successful.
  1706. bool
  1707. Erase(
  1708. /* in,out */ Iterator& riter);
  1709. // Erase the records in the range [riterFirst, riterLast).
  1710. // Returns `true' if successful.
  1711. bool
  1712. Erase(
  1713. /*in*/ Iterator& riterFirst,
  1714. /*in*/ Iterator& riterLast);
  1715. // Find the (first) record that has its key == pnKey.
  1716. // If successful, returns `true' and iterator points to (first) record.
  1717. // If fails, returns `false' and iterator == End()
  1718. bool
  1719. Find(
  1720. /* in */ DWORD_PTR pnKey,
  1721. /* out */ Iterator& riterResult);
  1722. // Find the range of records that have their keys == pnKey.
  1723. // If successful, returns `true', iterFirst points to first record,
  1724. // and iterLast points to one-beyond-the last such record.
  1725. // If fails, returns `false' and both iterators == End().
  1726. // Primarily useful when m_fMultiKey == true
  1727. bool
  1728. EqualRange(
  1729. /* in */ DWORD_PTR pnKey,
  1730. /* out */ Iterator& riterFirst, // inclusive
  1731. /* out */ Iterator& riterLast); // exclusive
  1732. #endif // LKR_STL_ITERATORS
  1733. }; // class CLKRLinearHashTable
  1734. #ifdef LKR_STL_ITERATORS
  1735. // These functions have to be defined after CLKRLinearHashTable
  1736. inline void
  1737. CLKRLinearHashTable_Iterator::_AddRef(
  1738. int nIncr) const
  1739. {
  1740. // TODO: should iterator call _AddRefRecord at all
  1741. if (m_plht != NULL && m_iNode != NODE_BEGIN - NODE_STEP)
  1742. {
  1743. IRTLASSERT((0 <= m_iNode && m_iNode < NODES_PER_CLUMP)
  1744. && (unsigned) m_iNode < NODES_PER_CLUMP
  1745. && m_pnc != NULL
  1746. && (nIncr == -1 || nIncr == +1));
  1747. const void* pvRecord = m_pnc->m_pvNode[m_iNode];
  1748. IRTLASSERT(pvRecord != NULL);
  1749. LKR_ITER_TRACE(_TEXT(" LKLH::AddRef, this=%p, Rec=%p\n"),
  1750. this, pvRecord);
  1751. m_plht->_AddRefRecord(pvRecord, nIncr);
  1752. }
  1753. } // CLKRLinearHashTable_Iterator::_AddRef
  1754. inline const DWORD_PTR
  1755. CLKRLinearHashTable_Iterator::Key() const
  1756. {
  1757. IRTLASSERT(IsValid());
  1758. return m_plht->_ExtractKey(m_pnc->m_pvNode[m_iNode]);
  1759. } // CLKRLinearHashTable_Iterator::Key
  1760. #endif // LKR_STL_ITERATORS
  1761. //--------------------------------------------------------------------
  1762. // CLKRHashTable
  1763. //
  1764. // To improve concurrency, a hash table is divided into a number of
  1765. // (independent) subtables. Each subtable is a linear hash table. The
  1766. // number of subtables is defined when the table is created and remains
  1767. // fixed thereafter. Records are assigned to subtables based on their
  1768. // hashed key.
  1769. //
  1770. // For small or low-contention hashtables, you can bypass this
  1771. // thin wrapper and use CLKRLinearHashTable directly. The methods are
  1772. // documented in the declarations for CLKRHashTable (above).
  1773. //--------------------------------------------------------------------
  1774. class IRTL_DLLEXP CLKRHashTable
  1775. {
  1776. private:
  1777. typedef CLKRLinearHashTable SubTable;
  1778. public:
  1779. typedef SubTable::TableLock TableLock;
  1780. typedef SubTable::BucketLock BucketLock;
  1781. #ifdef LKR_DEPRECATED_ITERATORS
  1782. class CIterator;
  1783. friend class CLKRHashTable::CIterator;
  1784. #endif // LKR_DEPRECATED_ITERATORS
  1785. #ifdef LKR_STL_ITERATORS
  1786. friend class CLKRHashTable_Iterator;
  1787. typedef CLKRHashTable_Iterator Iterator;
  1788. #endif // LKR_STL_ITERATORS
  1789. friend class CLKRLinearHashTable;
  1790. // aliases for convenience
  1791. enum {
  1792. NAME_SIZE = SubTable::NAME_SIZE,
  1793. HASH_INVALID_SIGNATURE = SubTable::HASH_INVALID_SIGNATURE,
  1794. NODES_PER_CLUMP = SubTable::NODES_PER_CLUMP,
  1795. };
  1796. enum {
  1797. MAX_SUBTABLES = 64,
  1798. };
  1799. private:
  1800. // Hash table parameters
  1801. DWORD m_dwSignature; // debugging: id & corruption check
  1802. CHAR m_szName[NAME_SIZE]; // an identifier for debugging
  1803. DWORD m_cSubTables; // number of subtables
  1804. SubTable** m_palhtDir; // array of subtables
  1805. // type-specific function pointers
  1806. PFnExtractKey m_pfnExtractKey;
  1807. PFnCalcKeyHash m_pfnCalcKeyHash;
  1808. mutable LK_RETCODE m_lkrcState; // Internal state of table
  1809. int m_nSubTableMask;
  1810. #ifndef LKR_NO_GLOBAL_LIST
  1811. static CLockedDoubleList sm_llGlobalList; // All active CLKRHashTables
  1812. CListEntry m_leGlobalList;
  1813. #endif // !LKR_NO_GLOBAL_LIST
  1814. void
  1815. _InsertThisIntoGlobalList()
  1816. {
  1817. #ifndef LKR_NO_GLOBAL_LIST
  1818. sm_llGlobalList.InsertHead(&m_leGlobalList);
  1819. #endif // !LKR_NO_GLOBAL_LIST
  1820. }
  1821. void
  1822. _RemoveThisFromGlobalList()
  1823. {
  1824. #ifndef LKR_NO_GLOBAL_LIST
  1825. sm_llGlobalList.RemoveEntry(&m_leGlobalList);
  1826. #endif // !LKR_NO_GLOBAL_LIST
  1827. }
  1828. LKRHASH_GLOBAL_LOCK_DECLARATIONS();
  1829. // Private copy ctor and op= to prevent compiler synthesizing them.
  1830. // TODO: implement these properly; they could be useful.
  1831. CLKRHashTable(const CLKRHashTable&);
  1832. CLKRHashTable& operator=(const CLKRHashTable&);
  1833. // Extract the key from the record
  1834. const DWORD_PTR _ExtractKey(const void* pvRecord) const
  1835. {
  1836. IRTLASSERT(pvRecord != NULL);
  1837. IRTLASSERT(m_pfnExtractKey != NULL);
  1838. return (*m_pfnExtractKey)(pvRecord);
  1839. }
  1840. // Hash the key
  1841. DWORD _CalcKeyHash(const DWORD_PTR pnKey) const
  1842. {
  1843. // Note pnKey==0 is acceptable, as the real key type could be an int
  1844. IRTLASSERT(m_pfnCalcKeyHash != NULL);
  1845. DWORD dwHash = (*m_pfnCalcKeyHash)(pnKey);
  1846. // We forcibly scramble the result to help ensure a better distribution
  1847. #ifndef __HASHFN_NO_NAMESPACE__
  1848. dwHash = HashFn::HashRandomizeBits(dwHash);
  1849. #else // !__HASHFN_NO_NAMESPACE__
  1850. dwHash = ::HashRandomizeBits(dwHash);
  1851. #endif // !__HASHFN_NO_NAMESPACE__
  1852. IRTLASSERT(dwHash != HASH_INVALID_SIGNATURE);
  1853. return dwHash;
  1854. }
  1855. // Use the key's hash signature to multiplex into a subtable
  1856. SubTable* _SubTable(DWORD dwSignature) const;
  1857. // Find the index of pst within the subtable array
  1858. int _SubTableIndex(SubTable* pst) const;
  1859. // Memory allocation wrappers to allow us to simulate allocation
  1860. // failures during testing
  1861. static SubTable** const
  1862. _AllocateSubTableArray(
  1863. size_t n);
  1864. static bool
  1865. _FreeSubTableArray(
  1866. SubTable** palht);
  1867. static SubTable* const
  1868. _AllocateSubTable(
  1869. LPCSTR pszName, // An identifier for debugging
  1870. PFnExtractKey pfnExtractKey, // Extract key from record
  1871. PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  1872. PFnEqualKeys pfnEqualKeys, // Compare two keys
  1873. PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  1874. double maxload, // Upperbound on average chain length
  1875. DWORD initsize, // Initial size of hash table.
  1876. CLKRHashTable* phtParent, // Owning table.
  1877. bool fMultiKeys // Allow multiple identical keys?
  1878. );
  1879. static bool
  1880. _FreeSubTable(
  1881. SubTable* plht);
  1882. public:
  1883. CLKRHashTable(
  1884. LPCSTR pszName, // An identifier for debugging
  1885. PFnExtractKey pfnExtractKey, // Extract key from record
  1886. PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  1887. PFnEqualKeys pfnEqualKeys, // Compare two keys
  1888. PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  1889. double maxload=LK_DFLT_MAXLOAD, // bound on avg chain length
  1890. DWORD initsize=LK_DFLT_INITSIZE, // Initial size of hash table.
  1891. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS, // #subordinate hash tables.
  1892. bool fMultiKeys=false // Allow multiple identical keys?
  1893. );
  1894. ~CLKRHashTable();
  1895. static const TCHAR* ClassName()
  1896. {return _TEXT("CLKRHashTable");}
  1897. int NumSubTables() const {return m_cSubTables;}
  1898. bool MultiKeys() const;
  1899. static LK_TABLESIZE NumSubTables(DWORD& rinitsize, DWORD& rnum_subtbls);
  1900. // Thin wrappers for the corresponding methods in CLKRLinearHashTable
  1901. LK_RETCODE InsertRecord(const void* pvRecord, bool fOverwrite=false);
  1902. LK_RETCODE DeleteKey(const DWORD_PTR pnKey);
  1903. LK_RETCODE DeleteRecord(const void* pvRecord);
  1904. LK_RETCODE FindKey(const DWORD_PTR pnKey,
  1905. const void** ppvRecord) const;
  1906. LK_RETCODE FindRecord(const void* pvRecord) const;
  1907. #ifdef LKR_APPLY_IF
  1908. DWORD Apply(PFnRecordAction pfnAction,
  1909. void* pvState=NULL,
  1910. LK_LOCKTYPE lkl=LKL_READLOCK);
  1911. DWORD ApplyIf(PFnRecordPred pfnPredicate,
  1912. PFnRecordAction pfnAction,
  1913. void* pvState=NULL,
  1914. LK_LOCKTYPE lkl=LKL_READLOCK);
  1915. DWORD DeleteIf(PFnRecordPred pfnPredicate,
  1916. void* pvState=NULL);
  1917. #endif // LKR_APPLY_IF
  1918. void Clear();
  1919. int CheckTable() const;
  1920. DWORD Size() const;
  1921. DWORD MaxSize() const;
  1922. CLKRHashTableStats GetStatistics() const;
  1923. bool IsValid() const;
  1924. void SetTableLockSpinCount(WORD wSpins);
  1925. WORD GetTableLockSpinCount() const;
  1926. void SetBucketLockSpinCount(WORD wSpins);
  1927. WORD GetBucketLockSpinCount() const;
  1928. enum {
  1929. SIGNATURE = (('L') | ('K' << 8) | ('H' << 16) | ('T' << 24)),
  1930. SIGNATURE_FREE = (('L') | ('K' << 8) | ('H' << 16) | ('x' << 24)),
  1931. };
  1932. bool
  1933. ValidSignature() const
  1934. { return m_dwSignature == SIGNATURE;}
  1935. // Is the hash table usable?
  1936. bool IsUsable() const
  1937. { return (m_lkrcState == LK_SUCCESS); }
  1938. void WriteLock();
  1939. void ReadLock() const;
  1940. void WriteUnlock() const;
  1941. void ReadUnlock() const;
  1942. bool IsWriteLocked() const;
  1943. bool IsReadLocked() const;
  1944. bool IsWriteUnlocked() const;
  1945. bool IsReadUnlocked() const;
  1946. void ConvertSharedToExclusive() const;
  1947. void ConvertExclusiveToShared() const;
  1948. // LKRHASH_ALLOCATOR_DEFINITIONS(CLKRHashTable);
  1949. #ifdef LKR_DEPRECATED_ITERATORS
  1950. public:
  1951. typedef SubTable::CIterator CLHTIterator;
  1952. class CIterator : public CLHTIterator
  1953. {
  1954. protected:
  1955. friend class CLKRHashTable;
  1956. CLKRHashTable* m_pht; // which hash table?
  1957. int m_ist; // which subtable
  1958. private:
  1959. // Private copy ctor and op= to prevent compiler synthesizing them.
  1960. // TODO: implement these properly; they could be useful.
  1961. CIterator(const CIterator&);
  1962. CIterator& operator=(const CIterator&);
  1963. public:
  1964. CIterator(
  1965. LK_LOCKTYPE lkl=LKL_WRITELOCK)
  1966. : CLHTIterator(lkl),
  1967. m_pht(NULL),
  1968. m_ist(-1)
  1969. {}
  1970. const void* Record() const
  1971. {
  1972. IRTLASSERT(IsValid());
  1973. // This is a hack to work around a compiler bug. Calling
  1974. // CLHTIterator::Record calls this function recursively until
  1975. // the stack overflows.
  1976. const CLHTIterator* pBase = static_cast<const CLHTIterator*>(this);
  1977. return pBase->Record();
  1978. }
  1979. const DWORD_PTR Key() const
  1980. {
  1981. IRTLASSERT(IsValid());
  1982. const CLHTIterator* pBase = static_cast<const CLHTIterator*>(this);
  1983. return pBase->Key();
  1984. }
  1985. bool IsValid() const
  1986. {
  1987. const CLHTIterator* pBase = static_cast<const CLHTIterator*>(this);
  1988. return (m_pht != NULL && m_ist >= 0 && pBase->IsValid());
  1989. }
  1990. };
  1991. // Const iterators for readonly access
  1992. class CConstIterator : public CIterator
  1993. {
  1994. private:
  1995. // Private, unimplemented copy ctor and op= to prevent
  1996. // compiler synthesizing them.
  1997. CConstIterator(const CConstIterator&);
  1998. CConstIterator& operator=(const CConstIterator&);
  1999. public:
  2000. CConstIterator()
  2001. : CIterator(LKL_READLOCK)
  2002. {}
  2003. };
  2004. public:
  2005. LK_RETCODE InitializeIterator(CIterator* piter);
  2006. LK_RETCODE IncrementIterator(CIterator* piter);
  2007. LK_RETCODE CloseIterator(CIterator* piter);
  2008. LK_RETCODE InitializeIterator(CConstIterator* piter) const
  2009. {
  2010. IRTLASSERT(piter != NULL && piter->m_pht == NULL);
  2011. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  2012. if (piter == NULL || piter->m_pht != NULL
  2013. || piter->m_lkl == LKL_WRITELOCK)
  2014. return LK_BAD_ITERATOR;
  2015. return const_cast<CLKRHashTable*>(this)
  2016. ->InitializeIterator(static_cast<CIterator*>(piter));
  2017. }
  2018. LK_RETCODE IncrementIterator(CConstIterator* piter) const
  2019. {
  2020. IRTLASSERT(piter != NULL && piter->m_pht == this);
  2021. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  2022. if (piter == NULL || piter->m_pht != this
  2023. || piter->m_lkl == LKL_WRITELOCK)
  2024. return LK_BAD_ITERATOR;
  2025. return const_cast<CLKRHashTable*>(this)
  2026. ->IncrementIterator(static_cast<CIterator*>(piter));
  2027. }
  2028. LK_RETCODE CloseIterator(CConstIterator* piter) const
  2029. {
  2030. IRTLASSERT(piter != NULL && piter->m_pht == this);
  2031. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  2032. if (piter == NULL || piter->m_pht != this
  2033. || piter->m_lkl == LKL_WRITELOCK)
  2034. return LK_BAD_ITERATOR;
  2035. return const_cast<CLKRHashTable*>(this)
  2036. ->CloseIterator(static_cast<CIterator*>(piter));
  2037. };
  2038. #endif // LKR_DEPRECATED_ITERATORS
  2039. #ifdef LKR_STL_ITERATORS
  2040. private:
  2041. bool _IsValidIterator(const Iterator& riter) const
  2042. {
  2043. LKR_ITER_TRACE(_TEXT(" LKHT:_IsValidIterator(%p)\n"), &riter);
  2044. bool fValid = (riter.m_pht == this);
  2045. IRTLASSERT(fValid);
  2046. fValid = fValid && (0 <= riter.m_ist
  2047. && riter.m_ist < (int) m_cSubTables);
  2048. IRTLASSERT(fValid);
  2049. IRTLASSERT(_SubTableIndex(riter.m_subiter.m_plht) == riter.m_ist);
  2050. fValid = fValid && riter.IsValid();
  2051. IRTLASSERT(fValid);
  2052. return fValid;
  2053. }
  2054. public:
  2055. Iterator
  2056. Begin();
  2057. Iterator
  2058. End()
  2059. {
  2060. LKR_ITER_TRACE(_TEXT(" LKHT::End\n"));
  2061. return Iterator();
  2062. }
  2063. bool
  2064. Insert(
  2065. /* in */ const void* pvRecord,
  2066. /* out */ Iterator& riterResult,
  2067. /* in */ bool fOverwrite=false);
  2068. bool
  2069. Erase(
  2070. /* in,out */ Iterator& riter);
  2071. bool
  2072. Erase(
  2073. /*in*/ Iterator& riterFirst,
  2074. /*in*/ Iterator& riterLast);
  2075. bool
  2076. Find(
  2077. /* in */ DWORD_PTR pnKey,
  2078. /* out */ Iterator& riterResult);
  2079. bool
  2080. EqualRange(
  2081. /* in */ DWORD_PTR pnKey,
  2082. /* out */ Iterator& riterFirst, // inclusive
  2083. /* out */ Iterator& riterLast); // exclusive
  2084. #endif // LKR_STL_ITERATORS
  2085. }; // class CLKRHashTable
  2086. //--------------------------------------------------------------------
  2087. // A typesafe wrapper for CLKRHashTable (or CLKRLinearHashTable).
  2088. //
  2089. // * _Derived must derive from CTypedHashTable and provide certain member
  2090. // functions. It's needed for various downcasting operations. See
  2091. // CStringTestHashTable and CNumberTestHashTable below.
  2092. // * _Record is the type of the record. C{Linear}HashTable will store
  2093. // pointers to _Record.
  2094. // * _Key is the type of the key. _Key is used directly; i.e., it is
  2095. // not assumed to be a pointer type. C{Linear}HashTable assumes that
  2096. // the key is stored in the associated record. See the comments
  2097. // at the declaration of PFnExtractKey for more details.
  2098. //
  2099. // (optional parameters):
  2100. // * _BaseHashTable is the base hash table: CLKRHashTable or
  2101. /// CLKRLinearHashTable
  2102. // * _BaseIterator is the iterator type, _BaseHashTable::CIterator
  2103. //
  2104. // CTypedHashTable could derive directly from CLKRLinearHashTable, if you
  2105. // don't need the extra overhead of CLKRHashTable (which is quite low).
  2106. //
  2107. // You may need to add the following line to your code to disable
  2108. // warning messages about truncating extremly long identifiers.
  2109. // #pragma warning (disable : 4786)
  2110. //--------------------------------------------------------------------
  2111. template < class _Derived, class _Record, class _Key,
  2112. class _BaseHashTable=CLKRHashTable
  2113. #ifdef LKR_DEPRECATED_ITERATORS
  2114. , class _BaseIterator=_BaseHashTable::CIterator
  2115. #endif // LKR_DEPRECATED_ITERATORS
  2116. >
  2117. class CTypedHashTable : public _BaseHashTable
  2118. {
  2119. public:
  2120. // convenient aliases
  2121. typedef _Derived Derived;
  2122. typedef _Record Record;
  2123. typedef _Key Key;
  2124. typedef _BaseHashTable BaseHashTable;
  2125. typedef CTypedHashTable<_Derived, _Record, _Key, _BaseHashTable
  2126. #ifdef LKR_DEPRECATED_ITERATORS
  2127. , _BaseIterator
  2128. #endif // LKR_DEPRECATED_ITERATORS
  2129. > HashTable;
  2130. #ifdef LKR_DEPRECATED_ITERATORS
  2131. typedef _BaseIterator BaseIterator;
  2132. #endif // LKR_DEPRECATED_ITERATORS
  2133. #ifdef LKR_APPLY_IF
  2134. // ApplyIf() and DeleteIf(): Does the record match the predicate?
  2135. // Note: takes a Record*, not a const Record*. You can modify the
  2136. // record in Pred() or Action(), if you like, but if you do, you
  2137. // should use LKL_WRITELOCK to lock the table. Do NOT modify the key,
  2138. // unless you're going to remove the record from the table.
  2139. typedef LK_PREDICATE (WINAPI *PFnRecordPred) (Record* pRec, void* pvState);
  2140. // Apply() et al: Perform action on record.
  2141. typedef LK_ACTION (WINAPI *PFnRecordAction)(Record* pRec, void* pvState);
  2142. #endif // LKR_APPLY_IF
  2143. private:
  2144. // Wrappers for the typesafe methods exposed by the derived class
  2145. static const DWORD_PTR WINAPI
  2146. _ExtractKey(const void* pvRecord)
  2147. {
  2148. const _Record* pRec = static_cast<const _Record*>(pvRecord);
  2149. const _Key key = static_cast<const _Key>(_Derived::ExtractKey(pRec));
  2150. // I would prefer to use reinterpret_cast here and in _CalcKeyHash
  2151. // and _CompareKeys, but the stupid Win64 compiler thinks it knows
  2152. // better than I do.
  2153. return (const DWORD_PTR) key;
  2154. }
  2155. static DWORD WINAPI
  2156. _CalcKeyHash(const DWORD_PTR pnKey)
  2157. {
  2158. const _Key key = (const _Key) (DWORD_PTR) pnKey;
  2159. return _Derived::CalcKeyHash(key);
  2160. }
  2161. static bool WINAPI
  2162. _EqualKeys(const DWORD_PTR pnKey1, const DWORD_PTR pnKey2)
  2163. {
  2164. const _Key key1 = (const _Key) (DWORD_PTR) pnKey1;
  2165. const _Key key2 = (const _Key) (DWORD_PTR) pnKey2;
  2166. return _Derived::EqualKeys(key1, key2);
  2167. }
  2168. static void WINAPI
  2169. _AddRefRecord(const void* pvRecord, int nIncr)
  2170. {
  2171. _Record* pRec = static_cast<_Record*>(const_cast<void*>(pvRecord));
  2172. _Derived::AddRefRecord(pRec, nIncr);
  2173. }
  2174. #ifdef LKR_APPLY_IF
  2175. // Typesafe wrappers for Apply, ApplyIf, and DeleteIf.
  2176. class CState
  2177. {
  2178. public:
  2179. PFnRecordPred m_pfnPred;
  2180. PFnRecordAction m_pfnAction;
  2181. void* m_pvState;
  2182. CState(
  2183. PFnRecordPred pfnPred,
  2184. PFnRecordAction pfnAction,
  2185. void* pvState)
  2186. : m_pfnPred(pfnPred), m_pfnAction(pfnAction), m_pvState(pvState)
  2187. {}
  2188. };
  2189. static LK_PREDICATE WINAPI
  2190. _Pred(const void* pvRecord, void* pvState)
  2191. {
  2192. _Record* pRec = static_cast<_Record*>(const_cast<void*>(pvRecord));
  2193. CState* pState = static_cast<CState*>(pvState);
  2194. return (*pState->m_pfnPred)(pRec, pState->m_pvState);
  2195. }
  2196. static LK_ACTION WINAPI
  2197. _Action(const void* pvRecord, void* pvState)
  2198. {
  2199. _Record* pRec = static_cast<_Record*>(const_cast<void*>(pvRecord));
  2200. CState* pState = static_cast<CState*>(pvState);
  2201. return (*pState->m_pfnAction)(pRec, pState->m_pvState);
  2202. }
  2203. #endif // LKR_APPLY_IF
  2204. public:
  2205. CTypedHashTable(
  2206. LPCSTR pszName, // An identifier for debugging
  2207. double maxload=LK_DFLT_MAXLOAD, // Upperbound on avg chain len
  2208. DWORD initsize=LK_DFLT_INITSIZE, // Initial size of table: S/M/L
  2209. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS,// #subordinate hash tables.
  2210. bool fMultiKeys=false // Allow multiple identical keys?
  2211. )
  2212. : _BaseHashTable(pszName, _ExtractKey, _CalcKeyHash, _EqualKeys,
  2213. _AddRefRecord, maxload, initsize, num_subtbls,
  2214. fMultiKeys)
  2215. {
  2216. // Ensure that _Key is no bigger than a pointer. Because we
  2217. // support both numeric and pointer keys, the various casts
  2218. // in the member functions unfortunately silently truncate if
  2219. // _Key is an unacceptable numeric type, such as __int64 on x86.
  2220. STATIC_ASSERT(sizeof(_Key) <= sizeof(DWORD_PTR));
  2221. }
  2222. LK_RETCODE InsertRecord(const _Record* pRec, bool fOverwrite=false)
  2223. { return _BaseHashTable::InsertRecord(pRec, fOverwrite); }
  2224. LK_RETCODE DeleteKey(const _Key key)
  2225. {
  2226. DWORD_PTR pnKey = (DWORD_PTR) key;
  2227. return _BaseHashTable::DeleteKey(pnKey);
  2228. }
  2229. LK_RETCODE DeleteRecord(const _Record* pRec)
  2230. { return _BaseHashTable::DeleteRecord(pRec);}
  2231. // Note: returns a _Record**, not a const Record**. Note that you
  2232. // can use a const type for the template parameter to ensure constness.
  2233. LK_RETCODE FindKey(const _Key key, _Record** ppRec) const
  2234. {
  2235. if (ppRec == NULL)
  2236. return LK_BAD_RECORD;
  2237. *ppRec = NULL;
  2238. const void* pvRec = NULL;
  2239. DWORD_PTR pnKey = (DWORD_PTR) key;
  2240. LK_RETCODE lkrc = _BaseHashTable::FindKey(pnKey, &pvRec);
  2241. *ppRec = static_cast<_Record*>(const_cast<void*>(pvRec));
  2242. return lkrc;
  2243. }
  2244. LK_RETCODE FindRecord(const _Record* pRec) const
  2245. { return _BaseHashTable::FindRecord(pRec);}
  2246. // Other C{Linear}HashTable methods can be exposed without change
  2247. #ifdef LKR_APPLY_IF
  2248. public:
  2249. // Typesafe wrappers for Apply et al
  2250. DWORD Apply(PFnRecordAction pfnAction,
  2251. void* pvState=NULL,
  2252. LK_LOCKTYPE lkl=LKL_READLOCK)
  2253. {
  2254. IRTLASSERT(pfnAction != NULL);
  2255. if (pfnAction == NULL)
  2256. return 0;
  2257. CState state(NULL, pfnAction, pvState);
  2258. return _BaseHashTable::Apply(_Action, &state, lkl);
  2259. }
  2260. DWORD ApplyIf(PFnRecordPred pfnPredicate,
  2261. PFnRecordAction pfnAction,
  2262. void* pvState=NULL,
  2263. LK_LOCKTYPE lkl=LKL_READLOCK)
  2264. {
  2265. IRTLASSERT(pfnPredicate != NULL && pfnAction != NULL);
  2266. if (pfnPredicate == NULL || pfnAction == NULL)
  2267. return 0;
  2268. CState state(pfnPredicate, pfnAction, pvState);
  2269. return _BaseHashTable::ApplyIf(_Pred, _Action, &state, lkl);
  2270. }
  2271. DWORD DeleteIf(PFnRecordPred pfnPredicate, void* pvState=NULL)
  2272. {
  2273. IRTLASSERT(pfnPredicate != NULL);
  2274. if (pfnPredicate == NULL)
  2275. return 0;
  2276. CState state(pfnPredicate, NULL, pvState);
  2277. return _BaseHashTable::DeleteIf(_Pred, &state);
  2278. }
  2279. #endif // LKR_APPLY_IF
  2280. #ifdef LKR_DEPRECATED_ITERATORS
  2281. // Typesafe wrappers for iterators
  2282. class CIterator : public _BaseIterator
  2283. {
  2284. private:
  2285. // Private, unimplemented copy ctor and op= to prevent
  2286. // compiler synthesizing them.
  2287. CIterator(const CIterator&);
  2288. CIterator& operator=(const CIterator&);
  2289. public:
  2290. CIterator(
  2291. LK_LOCKTYPE lkl=LKL_WRITELOCK)
  2292. : _BaseIterator(lkl)
  2293. {}
  2294. _Record* Record() const
  2295. {
  2296. const _BaseIterator* pBase = static_cast<const _BaseIterator*>(this);
  2297. return reinterpret_cast<_Record*>(const_cast<void*>(
  2298. pBase->Record()));
  2299. }
  2300. _Key Key() const
  2301. {
  2302. const _BaseIterator* pBase = static_cast<const _BaseIterator*>(this);
  2303. return reinterpret_cast<_Key>(reinterpret_cast<void*>(pBase->Key()));
  2304. }
  2305. };
  2306. // readonly iterator
  2307. class CConstIterator : public CIterator
  2308. {
  2309. private:
  2310. // Private, unimplemented copy ctor and op= to prevent
  2311. // compiler synthesizing them.
  2312. CConstIterator(const CConstIterator&);
  2313. CConstIterator& operator=(const CConstIterator&);
  2314. public:
  2315. CConstIterator()
  2316. : CIterator(LKL_READLOCK)
  2317. {}
  2318. const _Record* Record() const
  2319. {
  2320. return CIterator::Record();
  2321. }
  2322. const _Key Key() const
  2323. {
  2324. return CIterator::Key();
  2325. }
  2326. };
  2327. public:
  2328. LK_RETCODE InitializeIterator(CIterator* piter)
  2329. {
  2330. return _BaseHashTable::InitializeIterator(piter);
  2331. }
  2332. LK_RETCODE IncrementIterator(CIterator* piter)
  2333. {
  2334. return _BaseHashTable::IncrementIterator(piter);
  2335. }
  2336. LK_RETCODE CloseIterator(CIterator* piter)
  2337. {
  2338. return _BaseHashTable::CloseIterator(piter);
  2339. }
  2340. LK_RETCODE InitializeIterator(CConstIterator* piter) const
  2341. {
  2342. return const_cast<HashTable*>(this)
  2343. ->InitializeIterator(static_cast<CIterator*>(piter));
  2344. }
  2345. LK_RETCODE IncrementIterator(CConstIterator* piter) const
  2346. {
  2347. return const_cast<HashTable*>(this)
  2348. ->IncrementIterator(static_cast<CIterator*>(piter));
  2349. }
  2350. LK_RETCODE CloseIterator(CConstIterator* piter) const
  2351. {
  2352. return const_cast<HashTable*>(this)
  2353. ->CloseIterator(static_cast<CIterator*>(piter));
  2354. }
  2355. #endif // LKR_DEPRECATED_ITERATORS
  2356. #ifdef LKR_STL_ITERATORS
  2357. // TODO: const_iterator
  2358. public:
  2359. class iterator
  2360. {
  2361. friend class CTypedHashTable<_Derived, _Record, _Key,
  2362. _BaseHashTable
  2363. #ifdef LKR_DEPRECATED_ITERATORS
  2364. , _BaseIterator
  2365. #endif // LKR_DEPRECATED_ITERATORS
  2366. >;
  2367. protected:
  2368. typename _BaseHashTable::Iterator m_iter;
  2369. iterator(
  2370. const typename _BaseHashTable::Iterator& rhs)
  2371. : m_iter(rhs)
  2372. {
  2373. LKR_ITER_TRACE(_TEXT("Typed::prot ctor, this=%p, rhs=%p\n"),
  2374. this, &rhs);
  2375. }
  2376. public:
  2377. typedef std::forward_iterator_tag iterator_category;
  2378. typedef _Record value_type;
  2379. typedef ptrdiff_t difference_type;
  2380. typedef size_t size_type;
  2381. typedef value_type& reference;
  2382. typedef value_type* pointer;
  2383. iterator()
  2384. : m_iter()
  2385. {
  2386. LKR_ITER_TRACE(_TEXT("Typed::default ctor, this=%p\n"), this);
  2387. }
  2388. iterator(
  2389. const iterator& rhs)
  2390. : m_iter(rhs.m_iter)
  2391. {
  2392. LKR_ITER_TRACE(_TEXT("Typed::copy ctor, this=%p, rhs=%p\n"),
  2393. this, &rhs);
  2394. }
  2395. iterator& operator=(
  2396. const iterator& rhs)
  2397. {
  2398. LKR_ITER_TRACE(_TEXT("Typed::operator=, this=%p, rhs=%p\n"),
  2399. this, &rhs);
  2400. m_iter = rhs.m_iter;
  2401. return *this;
  2402. }
  2403. ~iterator()
  2404. {
  2405. LKR_ITER_TRACE(_TEXT("Typed::dtor, this=%p\n"), this);
  2406. }
  2407. pointer operator->() const
  2408. {
  2409. return (reinterpret_cast<_Record*>(
  2410. const_cast<void*>(m_iter.Record())));
  2411. }
  2412. reference operator*() const
  2413. {
  2414. return * (operator->());
  2415. }
  2416. // pre-increment
  2417. iterator& operator++()
  2418. {
  2419. LKR_ITER_TRACE(_TEXT("Typed::pre-increment, this=%p\n"), this);
  2420. m_iter.Increment();
  2421. return *this;
  2422. }
  2423. // post-increment
  2424. iterator operator++(int)
  2425. {
  2426. LKR_ITER_TRACE(_TEXT("Typed::post-increment, this=%p\n"), this);
  2427. iterator iterPrev = *this;
  2428. m_iter.Increment();
  2429. return iterPrev;
  2430. }
  2431. bool operator==(
  2432. const iterator& rhs) const
  2433. {
  2434. LKR_ITER_TRACE(_TEXT("Typed::operator==, this=%p, rhs=%p\n"),
  2435. this, &rhs);
  2436. return m_iter == rhs.m_iter;
  2437. }
  2438. bool operator!=(
  2439. const iterator& rhs) const
  2440. {
  2441. LKR_ITER_TRACE(_TEXT("Typed::operator!=, this=%p, rhs=%p\n"),
  2442. this, &rhs);
  2443. return m_iter != rhs.m_iter;
  2444. }
  2445. _Record* Record() const
  2446. {
  2447. LKR_ITER_TRACE(_TEXT("Typed::Record, this=%p\n"), this);
  2448. return reinterpret_cast<_Record*>(
  2449. const_cast<void*>(m_iter.Record()));
  2450. }
  2451. _Key Key() const
  2452. {
  2453. LKR_ITER_TRACE(_TEXT("Typed::Key, this=%p\n"), this);
  2454. return reinterpret_cast<_Key>(
  2455. reinterpret_cast<void*>(m_iter.Key()));
  2456. }
  2457. }; // class iterator
  2458. // Return iterator pointing to first item in table
  2459. iterator begin()
  2460. {
  2461. LKR_ITER_TRACE(_TEXT("Typed::begin()\n"));
  2462. return iterator(_BaseHashTable::Begin());
  2463. }
  2464. // Return a one-past-the-end iterator. Always empty.
  2465. iterator end()
  2466. {
  2467. LKR_ITER_TRACE(_TEXT("Typed::end()\n"));
  2468. return iterator(_BaseHashTable::End());
  2469. }
  2470. template <class _InputIterator>
  2471. CTypedHashTable(
  2472. LPCSTR pszName, // An identifier for debugging
  2473. _InputIterator f, // first element in range
  2474. _InputIterator l, // one-beyond-last element
  2475. double maxload=LK_DFLT_MAXLOAD, // Upperbound on avg chain len
  2476. DWORD initsize=LK_DFLT_INITSIZE, // Initial size of table: S/M/L
  2477. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS,// #subordinate hash tables.
  2478. bool fMultiKeys=false // Allow multiple identical keys?
  2479. )
  2480. : _BaseHashTable(pszName, _ExtractKey, _CalcKeyHash, _EqualKeys,
  2481. _AddRefRecord, maxload, initsize, num_subtbls,
  2482. fMultiKeys)
  2483. {
  2484. insert(f, l);
  2485. }
  2486. template <class _InputIterator>
  2487. void insert(_InputIterator f, _InputIterator l)
  2488. {
  2489. for ( ; f != l; ++f)
  2490. InsertRecord(&(*f));
  2491. }
  2492. bool
  2493. Insert(
  2494. const _Record* pRecord,
  2495. iterator& riterResult,
  2496. bool fOverwrite=false)
  2497. {
  2498. LKR_ITER_TRACE(_TEXT("Typed::Insert\n"));
  2499. return _BaseHashTable::Insert(pRecord, riterResult.m_iter, fOverwrite);
  2500. }
  2501. bool
  2502. Erase(
  2503. iterator& riter)
  2504. {
  2505. LKR_ITER_TRACE(_TEXT("Typed::Erase\n"));
  2506. return _BaseHashTable::Erase(riter.m_iter);
  2507. }
  2508. bool
  2509. Erase(
  2510. iterator& riterFirst,
  2511. iterator& riterLast)
  2512. {
  2513. LKR_ITER_TRACE(_TEXT("Typed::Erase2\n"));
  2514. return _BaseHashTable::Erase(riterFirst.m_iter, riterLast.m_iter);
  2515. }
  2516. bool
  2517. Find(
  2518. const _Key key,
  2519. iterator& riterResult)
  2520. {
  2521. LKR_ITER_TRACE(_TEXT("Typed::Find\n"));
  2522. const void* pvKey = reinterpret_cast<const void*>(key);
  2523. DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  2524. return _BaseHashTable::Find(pnKey, riterResult.m_iter);
  2525. }
  2526. bool
  2527. EqualRange(
  2528. const _Key key,
  2529. iterator& riterFirst,
  2530. iterator& riterLast)
  2531. {
  2532. LKR_ITER_TRACE(_TEXT("Typed::EqualRange\n"));
  2533. const void* pvKey = reinterpret_cast<const void*>(key);
  2534. DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  2535. return _BaseHashTable::EqualRange(pnKey, riterFirst.m_iter,
  2536. riterLast.m_iter);
  2537. }
  2538. // The iterator functions for an STL hash_(|multi)_(set|map)
  2539. //
  2540. // Value type of a Pair-Associative Container is
  2541. // pair<const key_type, mapped_type>
  2542. //
  2543. // pair<iterator,bool> insert(const value_type& x);
  2544. //
  2545. // void erase(iterator pos);
  2546. // void erase(iterator f, iterator l);
  2547. //
  2548. // iterator find(const key_type& k) [const];
  2549. // const_iterator find(const key_type& k) const;
  2550. //
  2551. // pair<iterator,iterator> equal_range(const key_type& k) [const];
  2552. // pair<const_iterator,const_iterator> equal_range(const key_type& k) const
  2553. #endif // LKR_STL_ITERATORS
  2554. };
  2555. #ifndef __LKRHASH_NO_NAMESPACE__
  2556. }
  2557. #endif // !__LKRHASH_NO_NAMESPACE__
  2558. #endif // __LKRHASH_H__