Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3133 lines
103 KiB

  1. /*++
  2. Copyright (c) 1998-2001 Microsoft Corporation
  3. Module Name :
  4. LKRhash.h
  5. Abstract:
  6. Declares LKRhash: a fast, scalable, cache- and MP-friendly hash table
  7. Author:
  8. Paul (Per-Ake) Larson, palarson@microsoft.com, July 1997
  9. Murali R. Krishnan (MuraliK)
  10. George V. Reilly (GeorgeRe) 06-Jan-1998
  11. Environment:
  12. Win32 - User Mode
  13. Project:
  14. Internet Information Services Rearchitecture Core Library
  15. Revision History:
  16. 10/01/1998 - Change name from LKhash to LKRhash
  17. --*/
  18. #define LKR_STL_ITERATORS 1
  19. // #define LKR_DEPRECATED_ITERATORS
  20. #define LKR_APPLY_IF
  21. #undef LKR_COUNTDOWN
  22. #define __HASHFN_NO_NAMESPACE__
  23. #define __LKRHASH_NO_NAMESPACE__
  24. #ifndef LKR_TABLE_LOCK
  25. # define LKR_TABLE_LOCK CReaderWriterLock3
  26. #endif // !LKR_TABLE_LOCK
  27. #ifndef LKR_BUCKET_LOCK
  28. # ifdef LKR_DEPRECATED_ITERATORS
  29. # define LKR_BUCKET_LOCK CReaderWriterLock3
  30. # else // !LKR_DEPRECATED_ITERATORS
  31. # define LKR_BUCKET_LOCK CReaderWriterLock2
  32. # endif // !LKR_DEPRECATED_ITERATORS
  33. #endif // !LKR_BUCKET_LOCK
  34. #ifndef __LKRHASH_H__
  35. #define __LKRHASH_H__
  36. //=====================================================================
  37. // The class CLKRLinearHashTable defined in this file provides dynamic hash
  38. // tables, i.e. tables that grow and shrink dynamically with
  39. // the number of records in the table.
  40. // The basic method used is linear hashing, as explained in:
  41. //
  42. // P.-A. Larson, Dynamic Hash Tables, Comm. of the ACM, 31, 4 (1988)
  43. //
  44. // This version has the following characteristics:
  45. // - It is thread-safe and uses spin locks for synchronization.
  46. // - It was designed to support very high rates of concurrent
  47. // operations (inserts/deletes/lookups). It achieves this by
  48. // (a) partitioning a CLKRHashTable into a collection of
  49. // CLKRLinearHashTables to reduce contention on the global table lock.
  50. // (b) minimizing the hold time on a table lock, preferring to lock
  51. // down a bucket chain instead.
  52. // - The design is L1 cache-conscious. See CNodeClump.
  53. // - It is designed for sets varying in size from a dozen
  54. // elements to several million.
  55. //
  56. // Main classes:
  57. // CLKRLinearHashTable: thread-safe linear hash table
  58. // CLKRHashTable: collection of CLKRLinearHashTables
  59. // CTypedHashTable: typesafe wrapper for CLKRHashTable
  60. //
  61. //
  62. // Paul Larson, [email protected], July 1997
  63. // Original implementation with input from Murali R. Krishnan,
  64. // [email protected].
  65. //
  66. // George V. Reilly, [email protected], Dec 1997-Jan 1998
  67. // Massive cleanup and rewrite. Added templates.
  68. //=====================================================================
  69. // 1) Linear Hashing
  70. // ------------------
  71. //
  72. // Linear hash tables grow and shrink dynamically with the number of
  73. // records in the table. The growth or shrinkage is smooth: logically,
  74. // one bucket at a time but physically in larger increments
  75. // (64 buckets). An insertion (deletion) may cause an expansion
  76. // (contraction) of the table. This causes relocation of a small number
  77. // of records (at most one bucket worth). All operations (insert,
  78. // delete, lookup) take constant expected time, regardless of the
  79. // current size or the growth of the table.
  80. //
  81. // 2) LKR extensions to Linear hash table
  82. // --------------------------------------
  83. //
  84. // Larson-Krishnan-Reilly extensions to Linear hash tables for multiprocessor
  85. // scalability and improved cache performance.
  86. //
  87. // Traditional implementations of linear hash tables use one global lock
  88. // to prevent interference between concurrent operations
  89. // (insert/delete/lookup) on the table. The single lock easily becomes
  90. // the bottleneck in SMP scenarios when multiple threads are used.
  91. //
  92. // Traditionally, a (hash) bucket is implemented as a chain of
  93. // single-item nodes. Every operation results in chasing down a chain
  94. // looking for an item. However, pointer chasing is very slow on modern
  95. // systems because almost every jump results in a cache miss. L2 (or L3)
  96. // cache misses are very expensive in missed CPU cycles and the cost is
  97. // increasing (going to 100s of cycles in the future).
  98. //
  99. // LKR extensions offer
  100. // 1) Partitioning (by hashing) of records among multiple subtables.
  101. // Each subtable has locks but there is no global lock. Each
  102. // subtable receives a much lower rate of operations, resulting in
  103. // fewer conflicts.
  104. //
  105. // 2) Improved cache locality by grouping keys and their hash values
  106. // into contigous chunks that fit exactly into one (or a few)
  107. // cache lines.
  108. //
  109. // Specifically the implementation that exists here achieves this using
  110. // the following techniques.
  111. //
  112. // Class CLKRHashTable is the top-level data structure that dynamically
  113. // creates m_cSubTables linear hash tables. The CLKRLinearHashTables act as
  114. // the subtables to which items and accesses are fanned out. A good
  115. // hash function multiplexes requests uniformly to various subtables,
  116. // thus minimizing traffic to any single subtable. The implemenation
  117. // uses a home-grown version of bounded spinlocks, that is, a thread
  118. // does not spin on a lock indefinitely, instead yielding after a
  119. // predetermined number of loops.
  120. //
  121. // Each CLKRLinearHashTable consists of a CDirEntry pointing to segments
  122. // each holding m_dwSegSize CBuckets. Each CBucket in turn consists of a
  123. // chain of CNodeClumps. Each CNodeClump contains a group of
  124. // NODES_PER_CLUMP hash values (aka hash keys or signatures) and
  125. // pointers to the associated data items. Keeping the signatures
  126. // together increases the cache locality in scans for lookup.
  127. //
  128. // Traditionally, people store a link-list element right inside the
  129. // object that is hashed and use this link-list for the chaining of data
  130. // blocks. However, keeping just the pointers to the data object and
  131. // not chaining through them limits the need for bringing in the data
  132. // object to the cache. We need to access the data object only if the
  133. // hash values match. This limits the cache-thrashing behaviour
  134. // exhibited by conventional implementations. It has the additional
  135. // benefit that the objects themselves do not need to be modified
  136. // in order to be collected in the hash table (i.e., it's non-invasive).
  137. //--------------------------------------------------------------------
  138. // TODO
  139. // * Provide support for multiple, identical keys. Needed for EqualRange,
  140. // hash_multiset, and hash_multimap.
  141. // * Provide implementations of the STL collection classes: hash_map,
  142. // hash_set, hash_multimap, and hash_multiset.
  143. // * Make exception-safe.
  144. // * Use auto_ptrs.
  145. // * Add some kind of auto object for readlocking or writelocking a table,
  146. // so that the table automatically gets unlocked by auto-obj's destructor.
  147. // * Provide a C API wrapper
  148. // * Port to kernel mode (will require different locks, at the very least)
  149. // * Port to managed code (Chris Tracy has started on this)
  150. // * Typedef hash signatures (currently DWORDs)
  151. // * Make available as a static library as well as a DLL
  152. //--------------------------------------------------------------------
  153. #ifndef __IRTLDBG_H__
  154. # include <irtldbg.h>
  155. #endif
  156. #ifndef __LSTENTRY_H__
  157. # include <lstentry.h>
  158. #endif
  159. #ifndef __HASHFN_H__
  160. # include <hashfn.h>
  161. #endif
  162. #include <limits.h>
  163. #ifdef LKR_STL_ITERATORS
  164. // needed for std::forward_iterator_tag, etc
  165. # include <iterator>
  166. // The iterators have very verbose tracing. Don't want it on all the time
  167. // in debug builds.
  168. # if defined(IRTLDEBUG) && (LKR_STL_ITERATORS >= 2)
  169. # define LKR_ITER_TRACE IrtlTrace
  170. # else // !defined(IRTLDEBUG) || LKR_STL_ITERATORS < 2
  171. # define LKR_ITER_TRACE 1 ? (void)0 : IrtlTrace
  172. # endif // !defined(IRTLDEBUG) || LKR_STL_ITERATORS < 2
  173. #endif // LKR_STL_ITERATORS
  174. // Used to initialize and destroy custom allocators
  175. extern "C" bool LKRHashTableInit();
  176. extern "C" void LKRHashTableUninit();
  177. enum LK_TABLESIZE {
  178. LK_SMALL_TABLESIZE= 1, // < 200 elements
  179. LK_MEDIUM_TABLESIZE= 2, // 200...10,000 elements
  180. LK_LARGE_TABLESIZE= 3, // 10,000+ elements
  181. };
  182. // Default values for the hashtable constructors
  183. enum {
  184. #ifndef _WIN64
  185. LK_DFLT_MAXLOAD= 6, // Default upperbound on average chain length.
  186. #else // _WIN64
  187. LK_DFLT_MAXLOAD= 4, // 64-byte nodes => NODES_PER_CLUMP = 4
  188. #endif // _WIN64
  189. LK_DFLT_INITSIZE=LK_MEDIUM_TABLESIZE, // Default initial size of hash table
  190. LK_DFLT_NUM_SUBTBLS= 0, // Use a heuristic to choose #subtables
  191. };
  192. // build fix hack
  193. enum {
  194. DFLT_LK_MAXLOAD= LK_DFLT_MAXLOAD,
  195. DFLT_LK_INITSIZE= LK_DFLT_INITSIZE,
  196. DFLT_LK_NUM_SUBTBLS= LK_DFLT_NUM_SUBTBLS,
  197. };
  198. //--------------------------------------------------------------------
  199. // Possible return codes from public member functions of
  200. // CLKRLinearHashTable, CLKRHashTable, and CTypedHashTable
  201. enum LK_RETCODE {
  202. // severe errors < 0
  203. LK_UNUSABLE = -99, // Table corrupted: all bets are off
  204. LK_ALLOC_FAIL, // ran out of memory
  205. LK_BAD_ITERATOR, // invalid iterator; e.g., points to another table
  206. LK_BAD_RECORD, // invalid record; e.g., NULL for InsertRecord
  207. LK_BAD_PARAMETERS, // invalid parameters; e.g., NULL fnptrs to ctor
  208. LK_NOT_INITIALIZED, // LKRHashTableInit was not called
  209. LK_SUCCESS = 0, // everything's okay
  210. LK_KEY_EXISTS, // key already present for InsertRecord(no-overwrite)
  211. LK_NO_SUCH_KEY, // key not found
  212. LK_NO_MORE_ELEMENTS,// iterator exhausted
  213. };
  214. #define LK_SUCCEEDED(lkrc) ((lkrc) >= LK_SUCCESS)
  215. #ifdef LKR_APPLY_IF
  216. //--------------------------------------------------------------------
  217. // Return codes from PFnRecordPred.
  218. enum LK_PREDICATE {
  219. LKP_ABORT = 1, // Stop walking the table immediately
  220. LKP_NO_ACTION = 2, // No action, just keep walking
  221. LKP_PERFORM = 3, // Perform action and continue walking
  222. LKP_PERFORM_STOP = 4, // Perform action, then stop
  223. LKP_DELETE = 5, // Delete record and keep walking
  224. LKP_DELETE_STOP = 6, // Delete record, then stop
  225. };
  226. //--------------------------------------------------------------------
  227. // Return codes from PFnRecordAction.
  228. enum LK_ACTION {
  229. LKA_ABORT = 1, // Stop walking the table immediately
  230. LKA_FAILED = 2, // Action failed; continue walking the table
  231. LKA_SUCCEEDED = 3, // Action succeeded; continue walking the table
  232. };
  233. #endif // LKR_APPLY_IF
  234. #if defined(LKR_DEPRECATED_ITERATORS) || defined(LKR_APPLY_IF)
  235. //--------------------------------------------------------------------
  236. // Parameter to Apply and ApplyIf.
  237. enum LK_LOCKTYPE {
  238. LKL_READLOCK = 1, // Lock the table for reading (for constness)
  239. LKL_WRITELOCK = 2, // Lock the table for writing
  240. };
  241. #endif // LKR_DEPRECATED_ITERATORS || LKR_APPLY_IF
  242. //--------------------------------------------------------------------
  243. // Global table lock code. This is only used to measure how much of a
  244. // slowdown having a global lock on the CLKRHashTable causes. It is
  245. // *never* used in production code.
  246. // #define LKRHASH_GLOBAL_LOCK CCritSec
  247. #ifdef LKRHASH_GLOBAL_LOCK
  248. # define LKRHASH_GLOBAL_LOCK_DECLARATIONS() \
  249. typedef LKRHASH_GLOBAL_LOCK GlobalLock; \
  250. mutable GlobalLock m_lkGlobal;
  251. # define LKRHASH_GLOBAL_READ_LOCK() m_lkGlobal.ReadLock()
  252. # define LKRHASH_GLOBAL_WRITE_LOCK() m_lkGlobal.WriteLock()
  253. # define LKRHASH_GLOBAL_READ_UNLOCK() m_lkGlobal.ReadUnlock()
  254. # define LKRHASH_GLOBAL_WRITE_UNLOCK() m_lkGlobal.WriteUnlock()
  255. #else // !LKRHASH_GLOBAL_LOCK
  256. # define LKRHASH_GLOBAL_LOCK_DECLARATIONS()
  257. // These ones will be optimized away by the compiler
  258. # define LKRHASH_GLOBAL_READ_LOCK() ((void)0)
  259. # define LKRHASH_GLOBAL_WRITE_LOCK() ((void)0)
  260. # define LKRHASH_GLOBAL_READ_UNLOCK() ((void)0)
  261. # define LKRHASH_GLOBAL_WRITE_UNLOCK() ((void)0)
  262. #endif // !LKRHASH_GLOBAL_LOCK
  263. //--------------------------------------------------------------------
  264. // Statistical information returned by GetStatistics
  265. //--------------------------------------------------------------------
  266. #ifdef LOCK_INSTRUMENTATION
  267. class IRTL_DLLEXP CAveragedLockStats : public CLockStatistics
  268. {
  269. public:
  270. int m_nItems;
  271. CAveragedLockStats()
  272. : m_nItems(1)
  273. {}
  274. };
  275. #endif // LOCK_INSTRUMENTATION
  276. class IRTL_DLLEXP CLKRHashTableStats
  277. {
  278. public:
  279. int RecordCount; // number of records in the table
  280. int TableSize; // table size in number of slots
  281. int DirectorySize; // number of entries in directory
  282. int LongestChain; // longest hash chain in the table
  283. int EmptySlots; // number of unused hash slots
  284. double SplitFactor; // fraction of buckets split
  285. double AvgSearchLength; // average length of a successful search
  286. double ExpSearchLength; // theoretically expected length
  287. double AvgUSearchLength; // average length of an unsuccessful search
  288. double ExpUSearchLength; // theoretically expected length
  289. int NodeClumpSize; // number of slots in a node clump
  290. int CBucketSize; // sizeof(CBucket)
  291. #ifdef LOCK_INSTRUMENTATION
  292. CAveragedLockStats m_alsTable; // stats for table lock
  293. CAveragedLockStats m_alsBucketsAvg; // avg of stats for bucket locks
  294. CGlobalLockStatistics m_gls; // global statistics for all locks
  295. #endif // LOCK_INSTRUMENTATION
  296. enum {
  297. MAX_BUCKETS = 40,
  298. };
  299. // histogram of bucket lengths
  300. LONG m_aBucketLenHistogram[MAX_BUCKETS];
  301. CLKRHashTableStats()
  302. : RecordCount(0),
  303. TableSize(0),
  304. DirectorySize(0),
  305. LongestChain(0),
  306. EmptySlots(0),
  307. SplitFactor(0.0),
  308. AvgSearchLength(0.0),
  309. ExpSearchLength(0.0),
  310. AvgUSearchLength(0.0),
  311. ExpUSearchLength(0.0),
  312. NodeClumpSize(1),
  313. CBucketSize(0)
  314. {
  315. for (int i = MAX_BUCKETS; --i >= 0; )
  316. m_aBucketLenHistogram[i] = 0;
  317. }
  318. static const LONG*
  319. BucketSizes()
  320. {
  321. static const LONG s_aBucketSizes[MAX_BUCKETS] = {
  322. 1, 2, 3, 4, 5, 6, 7, 8, 9,
  323. 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
  324. 20, 21, 22, 23, 24, 25, 30, 40, 50, 60,
  325. 70, 80, 90, 100, 200, 500, 1000,10000, 100000, LONG_MAX,
  326. };
  327. return s_aBucketSizes;
  328. }
  329. static LONG
  330. BucketSize(
  331. LONG nBucketIndex)
  332. {
  333. IRTLASSERT(0 <= nBucketIndex && nBucketIndex < MAX_BUCKETS);
  334. return BucketSizes()[nBucketIndex];
  335. }
  336. static LONG
  337. BucketIndex(
  338. LONG nBucketLength)
  339. {
  340. const LONG* palBucketSizes = BucketSizes();
  341. LONG i = 0;
  342. while (palBucketSizes[i] < nBucketLength)
  343. ++i;
  344. if (i == MAX_BUCKETS || palBucketSizes[i] > nBucketLength)
  345. --i;
  346. IRTLASSERT(0 <= i && i < MAX_BUCKETS);
  347. return i;
  348. }
  349. };
  350. // Use types defined in recent versions of the Platform SDK in basetsd.h.
  351. #ifndef _W64
  352. typedef DWORD DWORD_PTR; // integral type big enough to hold a pointer
  353. #endif
  354. //--------------------------------------------------------------------
  355. // CLKRLinearHashTable deals with void* records. These typedefs
  356. // provide prototypes for functions that manipulate instances of
  357. // those records. CTypedHashTable and CStringTestHashTable (below) show a
  358. // way to encapsulate these in typesafe wrappers.
  359. //--------------------------------------------------------------------
  360. // Given a record, return its key. Assumes that the key is embedded in
  361. // the record, or at least somehow derivable from the record. For
  362. // completely unrelated keys & values, a wrapper class should use
  363. // something like STL's pair<key,value> template to aggregate them
  364. // into a record.
  365. typedef const DWORD_PTR (WINAPI *PFnExtractKey) (const void* pvRecord);
  366. // Given a key, return its hash signature. The hashing functions in
  367. // hashfn.h (or something that builds upon them) are suggested.
  368. typedef DWORD (WINAPI *PFnCalcKeyHash) (const DWORD_PTR pnKey);
  369. // Compare two keys for equality; e.g., _stricmp, memcmp, operator==
  370. typedef bool (WINAPI *PFnEqualKeys) (const DWORD_PTR pnKey1,
  371. const DWORD_PTR pnKey2);
  372. // Increment the reference count of a record before returning it from
  373. // FindKey. It's necessary to do it in FindKey itself while the bucket
  374. // is still locked, rather than one of the wrappers, to avoid race
  375. // conditions. Similarly, the reference count is incremented in
  376. // InsertRecord and decremented in DeleteKey. Finally, if an old record
  377. // is overwritten in InsertRecord, its reference count is decremented.
  378. //
  379. // It's up to you to decrement the reference count when you're finished
  380. // with it after retrieving it via FindKey and to determine the
  381. // semantics of what this means. The hashtable itself has no notion of
  382. // reference counts; this is merely to help with the lifetime management
  383. // of the record objects.
  384. typedef void (WINAPI *PFnAddRefRecord)(const void* pvRecord, int nIncr);
  385. #ifdef LKR_APPLY_IF
  386. // ApplyIf() and DeleteIf(): Does the record match the predicate?
  387. typedef LK_PREDICATE (WINAPI *PFnRecordPred) (const void* pvRecord,
  388. void* pvState);
  389. // Apply() et al: Perform action on record.
  390. typedef LK_ACTION (WINAPI *PFnRecordAction)(const void* pvRecord,
  391. void* pvState);
  392. #endif // LKR_APPLY_IF
  393. //--------------------------------------------------------------------
  394. // Custom memory allocators
  395. //--------------------------------------------------------------------
  396. #ifndef LKR_NO_ALLOCATORS
  397. # define LKRHASH_ACACHE 1
  398. // # define LKRHASH_MANODEL 1
  399. // # define LKRHASH_MADEL 1
  400. // # define LKRHASH_ROCKALL_FAST 1
  401. // # define LKRHASH_MEM_DEFAULT_ALIGN 32
  402. #endif // !LKR_NO_ALLOCATORS
  403. #ifndef LKRHASH_MEM_DEFAULT_ALIGN
  404. # define LKRHASH_MEM_DEFAULT_ALIGN 8
  405. #endif // !LKRHASH_MEM_DEFAULT_ALIGN
  406. #if defined(LKRHASH_ACACHE)
  407. # include <acache.hxx>
  408. typedef ALLOC_CACHE_HANDLER CLKRhashAllocator;
  409. # define LKRHASH_ALLOCATOR_NEW(C, N) \
  410. const ALLOC_CACHE_CONFIGURATION acc = { 1, N, sizeof(C) }; \
  411. C::sm_palloc = new ALLOC_CACHE_HANDLER("LKRhash:" #C, &acc);
  412. #elif defined(LKRHASH_ROCKALL_FAST)
  413. # include <FastHeap.hpp>
  414. class FastHeap : public FAST_HEAP
  415. {
  416. public:
  417. FastHeap(
  418. SIZE_T cb)
  419. : m_cb(cb)
  420. {}
  421. LPVOID Alloc()
  422. { return New(m_cb, NULL, false); }
  423. BOOL Free(LPVOID pvMem)
  424. { return Delete(pvMem); }
  425. SIZE_T m_cb;
  426. };
  427. typedef FastHeap CLKRhashAllocator;
  428. # define LKRHASH_ALLOCATOR_NEW(C, N) \
  429. C::sm_palloc = new FastHeap(sizeof(C));
  430. #else // no custom allocator
  431. # undef LKRHASH_ALLOCATOR_NEW
  432. #endif // no custom allocator
  433. #ifdef LKRHASH_ALLOCATOR_NEW
  434. // placed inline in the declaration of class C
  435. # define LKRHASH_ALLOCATOR_DEFINITIONS(C) \
  436. protected: \
  437. static CLKRhashAllocator* sm_palloc; \
  438. friend class CLKRLinearHashTable; \
  439. friend bool LKRHashTableInit(); \
  440. friend void LKRHashTableUninit(); \
  441. public: \
  442. static void* operator new(size_t s) \
  443. { \
  444. UNREFERENCED_PARAMETER(s); \
  445. IRTLASSERT(s == sizeof(C)); \
  446. IRTLASSERT(sm_palloc != NULL); \
  447. return sm_palloc->Alloc(); \
  448. } \
  449. static void operator delete(void* pv) \
  450. { \
  451. IRTLASSERT(pv != NULL); \
  452. IRTLASSERT(sm_palloc != NULL); \
  453. sm_palloc->Free(pv); \
  454. }
  455. // used in LKRHashTableInit()
  456. # define LKRHASH_ALLOCATOR_INIT(C, N, f) \
  457. { \
  458. if (f) \
  459. { \
  460. IRTLASSERT(C::sm_palloc == NULL); \
  461. LKRHASH_ALLOCATOR_NEW(C, N); \
  462. f = (C::sm_palloc != NULL); \
  463. } \
  464. }
  465. // used in LKRHashTableUninit()
  466. # define LKRHASH_ALLOCATOR_UNINIT(C) \
  467. { \
  468. if (C::sm_palloc != NULL) \
  469. { \
  470. delete C::sm_palloc; \
  471. C::sm_palloc = NULL; \
  472. } \
  473. }
  474. #else // !LKRHASH_ALLOCATOR_NEW
  475. # define LKRHASH_ALLOCATOR_DEFINITIONS(C)
  476. # define LKRHASH_ALLOCATOR_INIT(C, N, f)
  477. # define LKRHASH_ALLOCATOR_UNINIT(C)
  478. #endif // !LKRHASH_ALLOCATOR_NEW
  479. #ifndef __LKRHASH_NO_NAMESPACE__
  480. namespace LKRhash {
  481. #endif // !__LKRHASH_NO_NAMESPACE__
  482. //--------------------------------------------------------------------
  483. // forward declarations
  484. class IRTL_DLLEXP CLKRLinearHashTable;
  485. class IRTL_DLLEXP CLKRHashTable;
  486. template <class _Der, class _Rcd, class _Ky, class _HT
  487. #ifdef LKR_DEPRECATED_ITERATORS
  488. , class _Iter
  489. #endif // LKR_DEPRECATED_ITERATORS
  490. >
  491. class CTypedHashTable;
  492. // Class for nodes on a bucket chain. Instead of a node containing
  493. // one (signature, record-pointer, next-tuple-pointer) tuple, it
  494. // contains _N_ such tuples. (N-1 next-tuple-pointers are omitted.)
  495. // This improves locality of reference greatly; i.e., it's L1
  496. // cache-friendly. It also reduces memory fragmentation and memory
  497. // allocator overhead. It does complicate the chain traversal code
  498. // slightly, admittedly.
  499. //
  500. // This theory is beautiful. In practice, however, CNodeClumps
  501. // are *not* perfectly aligned on 32-byte boundaries by the memory
  502. // allocators. Experimental results indicate that we get a 2-3%
  503. // speed improvement by using 32-byte-aligned blocks, but this must
  504. // be considered against the average of 16 bytes wasted per block.
  505. class CNodeClump
  506. {
  507. public:
  508. // Record slots per chunk - set so a chunk matches (one or
  509. // two) cache lines. 3 ==> 32 bytes, 7 ==> 64 bytes
  510. // Note: the default max load factor is 6.0, which implies that
  511. // there will seldom be more than one node clump in a chain.
  512. enum {
  513. BUCKET_BYTE_SIZE = 64,
  514. BUCKET_OVERHEAD = sizeof(LKR_BUCKET_LOCK) + sizeof(CNodeClump*),
  515. NODE_SIZE = sizeof(const void*) + sizeof(DWORD),
  516. NODES_PER_CLUMP = (BUCKET_BYTE_SIZE - BUCKET_OVERHEAD) / NODE_SIZE
  517. };
  518. enum {
  519. // See if countdown loops are faster than countup loops for
  520. // traversing a CNodeClump. In practice, countup loops are faster.
  521. #ifndef LKR_COUNTDOWN
  522. NODE_BEGIN = 0,
  523. NODE_END = NODES_PER_CLUMP,
  524. NODE_STEP = +1,
  525. // for (int x = 0; x < NODES_PER_CLUMP; ++x) ...
  526. #else // LKR_COUNTDOWN
  527. NODE_BEGIN = NODES_PER_CLUMP-1,
  528. NODE_END = -1,
  529. NODE_STEP = -1,
  530. // for (int x = NODES_PER_CLUMP; --x >= 0; ) ...
  531. #endif // LKR_COUNTDOWN
  532. };
  533. enum {
  534. // No number in 0..2^31-1 maps to this number after it has been
  535. // scrambled by HashFn::HashRandomizeBits
  536. HASH_INVALID_SIGNATURE = 31678523,
  537. };
  538. DWORD m_dwKeySigs[NODES_PER_CLUMP]; // hash values computed from keys
  539. CNodeClump* m_pncNext; // next node clump on the chain
  540. const void* m_pvNode[NODES_PER_CLUMP];// pointers to records
  541. CNodeClump()
  542. {
  543. Clear();
  544. }
  545. void
  546. Clear()
  547. {
  548. m_pncNext = NULL; // no dangling pointers
  549. for (int i = NODES_PER_CLUMP; --i >= 0; )
  550. {
  551. m_dwKeySigs[i] = HASH_INVALID_SIGNATURE;
  552. m_pvNode[i] = NULL;
  553. }
  554. }
  555. bool
  556. InvalidSignature(
  557. int i) const
  558. {
  559. IRTLASSERT(0 <= i && i < NODES_PER_CLUMP);
  560. return (m_dwKeySigs[i] == HASH_INVALID_SIGNATURE);
  561. }
  562. bool
  563. IsEmptyNode(
  564. int i) const
  565. {
  566. IRTLASSERT(0 <= i && i < NODES_PER_CLUMP);
  567. return (m_pvNode[i] == NULL);
  568. }
  569. bool
  570. IsEmptyAndInvalid(
  571. int i) const
  572. {
  573. return IsEmptyNode(i) && InvalidSignature(i);
  574. }
  575. bool
  576. IsEmptySlot(
  577. int i) const
  578. {
  579. return InvalidSignature(i);
  580. }
  581. bool
  582. IsLastClump() const
  583. {
  584. return (m_pncNext == NULL);
  585. }
  586. #ifdef IRTLDEBUG
  587. // Don't want overhead of calls to dtor in retail build
  588. ~CNodeClump()
  589. {
  590. IRTLASSERT(IsLastClump()); // no dangling pointers
  591. for (int i = NODES_PER_CLUMP; --i >= 0; )
  592. IRTLASSERT(InvalidSignature(i) && IsEmptyNode(i));
  593. }
  594. #endif // IRTLDEBUG
  595. LKRHASH_ALLOCATOR_DEFINITIONS(CNodeClump);
  596. }; // class CNodeClump
  597. // Class for bucket chains of the hash table. Note that the first
  598. // nodeclump is actually included in the bucket and not dynamically
  599. // allocated, which increases space requirements slightly but does
  600. // improve performance.
  601. class CBucket
  602. {
  603. private:
  604. typedef LKR_BUCKET_LOCK BucketLock;
  605. mutable BucketLock m_Lock; // lock protecting this bucket
  606. #ifdef LOCK_INSTRUMENTATION
  607. static LONG sm_cBuckets;
  608. static const char*
  609. _LockName()
  610. {
  611. LONG l = ++sm_cBuckets;
  612. // possible race condition but we don't care, as this is never
  613. // used in production code
  614. static char s_szName[CLockStatistics::L_NAMELEN];
  615. wsprintf(s_szName, "B%06x", 0xFFFFFF & l);
  616. return s_szName;
  617. }
  618. #endif // LOCK_INSTRUMENTATION
  619. public:
  620. CNodeClump m_ncFirst; // first CNodeClump of this bucket
  621. #if defined(LOCK_INSTRUMENTATION) || defined(IRTLDEBUG)
  622. CBucket()
  623. #ifdef LOCK_INSTRUMENTATION
  624. : m_Lock(_LockName())
  625. #endif // LOCK_INSTRUMENTATION
  626. {
  627. #ifdef IRTLDEBUG
  628. LOCK_LOCKTYPE lt = BucketLock::LockType();
  629. if (lt == LOCK_SPINLOCK || lt == LOCK_FAKELOCK)
  630. IRTLASSERT(sizeof(*this) <= 64);
  631. #endif IRTLDEBUG
  632. }
  633. #endif // LOCK_INSTRUMENTATION || IRTLDEBUG
  634. void WriteLock() { m_Lock.WriteLock(); }
  635. void ReadLock() const { m_Lock.ReadLock(); }
  636. void WriteUnlock() const { m_Lock.WriteUnlock(); }
  637. void ReadUnlock() const { m_Lock.ReadUnlock(); }
  638. bool IsWriteLocked() const { return m_Lock.IsWriteLocked(); }
  639. bool IsReadLocked() const { return m_Lock.IsReadLocked(); }
  640. bool IsWriteUnlocked() const { return m_Lock.IsWriteUnlocked(); }
  641. bool IsReadUnlocked() const { return m_Lock.IsReadUnlocked(); }
  642. void SetSpinCount(WORD wSpins) { m_Lock.SetSpinCount(wSpins); }
  643. WORD GetSpinCount() const { return m_Lock.GetSpinCount(); }
  644. #ifdef LOCK_INSTRUMENTATION
  645. CLockStatistics LockStats() const {return m_Lock.Statistics();}
  646. #endif // LOCK_INSTRUMENTATION
  647. }; // class CBucket
  648. // The hash table space is divided into fixed-size segments (arrays of
  649. // CBuckets) and physically grows/shrinks one segment at a time.
  650. //
  651. // We provide small, medium, and large segments to better tune the
  652. // overall memory requirements of the hash table according to the
  653. // expected usage of an instance.
  654. class CSegment
  655. {
  656. public:
  657. CBucket m_bktSlots[1];
  658. // See note at m_bktSlots2 in CSmallSegment below
  659. CBucket& Slot(DWORD i)
  660. { return m_bktSlots[i]; }
  661. }; // class CSegment
  662. // Small-sized segments contain 2^3 = 8 buckets => ~0.5Kb
  663. class CSmallSegment : public CSegment
  664. {
  665. public:
  666. // Maximum table size equals MAX_DIRSIZE * SEGSIZE buckets.
  667. enum {
  668. SEGBITS = 3,// number of bits extracted from a hash
  669. // address for offset within a segment
  670. SEGSIZE = (1<<SEGBITS),// segment size
  671. SEGMASK = (SEGSIZE-1), // mask used for extracting offset bit
  672. INITSIZE = 1 * SEGSIZE, // #segments to allocate initially
  673. };
  674. // Hack: assumes immediately after CSegment::m_bktSlots, with no
  675. // padding. The STATIC_ASSERT in _AllocateSegment should cause a
  676. // compile-time error if this assumption is false.
  677. CBucket m_bktSlots2[SEGSIZE-1];
  678. public:
  679. DWORD Bits() const { return SEGBITS; }
  680. DWORD Size() const { return SEGSIZE; }
  681. DWORD Mask() const { return SEGMASK; }
  682. DWORD InitSize() const { return INITSIZE;}
  683. #ifdef IRTLDEBUG
  684. CSmallSegment()
  685. {
  686. IRTLASSERT(&Slot(1) == m_bktSlots2);
  687. IRTLASSERT(((DWORD_PTR)this & (LKRHASH_MEM_DEFAULT_ALIGN-1)) == 0);
  688. IRTLASSERT(sizeof(*this) == SEGSIZE * sizeof(CBucket));
  689. }
  690. #endif // IRTLDEBUG
  691. LKRHASH_ALLOCATOR_DEFINITIONS(CSmallSegment);
  692. }; // class CSmallSegment
  693. // Medium-sized segments contain 2^6 = 64 buckets => ~4Kb
  694. class CMediumSegment : public CSegment
  695. {
  696. public:
  697. enum {
  698. SEGBITS = 6,
  699. SEGSIZE = (1<<SEGBITS),
  700. SEGMASK = (SEGSIZE-1),
  701. INITSIZE = 2 * SEGSIZE,
  702. };
  703. CBucket m_bktSlots2[SEGSIZE-1];
  704. public:
  705. DWORD Bits() const { return SEGBITS; }
  706. DWORD Size() const { return SEGSIZE; }
  707. DWORD Mask() const { return SEGMASK; }
  708. DWORD InitSize() const { return INITSIZE;}
  709. #ifdef IRTLDEBUG
  710. CMediumSegment()
  711. {
  712. IRTLASSERT(&Slot(1) == m_bktSlots2);
  713. IRTLASSERT(((DWORD_PTR)this & (LKRHASH_MEM_DEFAULT_ALIGN-1)) == 0);
  714. IRTLASSERT(sizeof(*this) == SEGSIZE * sizeof(CBucket));
  715. }
  716. #endif // IRTLDEBUG
  717. LKRHASH_ALLOCATOR_DEFINITIONS(CMediumSegment);
  718. }; // class CMediumSegment
  719. // Large-sized segments contain 2^9 = 512 buckets => ~32Kb
  720. class CLargeSegment : public CSegment
  721. {
  722. public:
  723. enum {
  724. SEGBITS = 9,
  725. SEGSIZE = (1<<SEGBITS),
  726. SEGMASK = (SEGSIZE-1),
  727. INITSIZE = 4 * SEGSIZE,
  728. };
  729. CBucket m_bktSlots2[SEGSIZE-1];
  730. public:
  731. DWORD Bits() const { return SEGBITS; }
  732. DWORD Size() const { return SEGSIZE; }
  733. DWORD Mask() const { return SEGMASK; }
  734. DWORD InitSize() const { return INITSIZE;}
  735. #ifdef IRTLDEBUG
  736. CLargeSegment()
  737. {
  738. IRTLASSERT(&Slot(1) == m_bktSlots2);
  739. IRTLASSERT(((DWORD_PTR)this & (LKRHASH_MEM_DEFAULT_ALIGN-1)) == 0);
  740. IRTLASSERT(sizeof(*this) == SEGSIZE * sizeof(CBucket));
  741. }
  742. #endif // IRTLDEBUG
  743. LKRHASH_ALLOCATOR_DEFINITIONS(CLargeSegment);
  744. }; // class CLargeSegment
  745. // A directory keeps track of the segments comprising the hash table.
  746. // The directory is just a variable-sized array of pointers to
  747. // segments (CDirEntrys).
  748. class CDirEntry
  749. {
  750. public:
  751. // MIN_DIRSIZE and MAX_DIRSIZE can be changed independently
  752. // of anything else. Should be powers of two.
  753. enum {
  754. MIN_DIRSIZE = (1<<3), // minimum directory size
  755. MAX_DIRSIZE = (1<<20), // maximum directory size
  756. };
  757. CSegment* m_pseg;
  758. CDirEntry()
  759. : m_pseg(NULL)
  760. {}
  761. ~CDirEntry()
  762. { delete m_pseg; }
  763. }; // class CDirEntry
  764. #ifdef LKR_STL_ITERATORS
  765. class IRTL_DLLEXP CLKRLinearHashTable_Iterator;
  766. class IRTL_DLLEXP CLKRHashTable_Iterator;
  767. class IRTL_DLLEXP CLKRLinearHashTable_Iterator
  768. {
  769. friend class CLKRLinearHashTable;
  770. friend class CLKRHashTable;
  771. friend class CLKRHashTable_Iterator;
  772. protected:
  773. CLKRLinearHashTable* m_plht; // which linear hash table?
  774. CNodeClump* m_pnc; // a CNodeClump in bucket
  775. DWORD m_dwBucketAddr;// bucket index
  776. short m_iNode; // offset within m_pnc
  777. enum {
  778. NODES_PER_CLUMP = CNodeClump::NODES_PER_CLUMP,
  779. NODE_BEGIN = CNodeClump::NODE_BEGIN,
  780. NODE_END = CNodeClump::NODE_END,
  781. NODE_STEP = CNodeClump::NODE_STEP,
  782. };
  783. CLKRLinearHashTable_Iterator(
  784. CLKRLinearHashTable* plht,
  785. CNodeClump* pnc,
  786. DWORD dwBucketAddr,
  787. short iNode)
  788. : m_plht(plht),
  789. m_pnc(pnc),
  790. m_dwBucketAddr(dwBucketAddr),
  791. m_iNode(iNode)
  792. {
  793. LKR_ITER_TRACE(_TEXT(" LKLH::prot ctor, this=%p, plht=%p, ")
  794. _TEXT("pnc=%p, ba=%d, in=%d\n"),
  795. this, plht, pnc, dwBucketAddr, iNode);
  796. }
  797. inline void _AddRef(
  798. int nIncr) const;
  799. bool _Increment(
  800. bool fDecrementOldValue=true);
  801. public:
  802. CLKRLinearHashTable_Iterator()
  803. : m_plht(NULL),
  804. m_pnc(NULL),
  805. m_dwBucketAddr(0),
  806. m_iNode(0)
  807. {
  808. LKR_ITER_TRACE(_TEXT(" LKLH::default ctor, this=%p\n"), this);
  809. }
  810. CLKRLinearHashTable_Iterator(
  811. const CLKRLinearHashTable_Iterator& rhs)
  812. : m_plht(rhs.m_plht),
  813. m_pnc(rhs.m_pnc),
  814. m_dwBucketAddr(rhs.m_dwBucketAddr),
  815. m_iNode(rhs.m_iNode)
  816. {
  817. LKR_ITER_TRACE(_TEXT(" LKLH::copy ctor, this=%p, rhs=%p\n"),
  818. this, &rhs);
  819. _AddRef(+1);
  820. }
  821. CLKRLinearHashTable_Iterator& operator=(
  822. const CLKRLinearHashTable_Iterator& rhs)
  823. {
  824. LKR_ITER_TRACE(_TEXT(" LKLH::operator=, this=%p, rhs=%p\n"),
  825. this, &rhs);
  826. rhs._AddRef(+1);
  827. this->_AddRef(-1);
  828. m_plht = rhs.m_plht;
  829. m_pnc = rhs.m_pnc;
  830. m_dwBucketAddr = rhs.m_dwBucketAddr;
  831. m_iNode = rhs.m_iNode;
  832. return *this;
  833. }
  834. ~CLKRLinearHashTable_Iterator()
  835. {
  836. LKR_ITER_TRACE(_TEXT(" LKLH::dtor, this=%p, plht=%p\n"),
  837. this, m_plht);
  838. _AddRef(-1);
  839. }
  840. bool Increment()
  841. {
  842. return IsValid() ? _Increment() : false;
  843. }
  844. bool IsValid() const
  845. {
  846. bool fValid = (m_plht != NULL && m_pnc != NULL
  847. && 0 <= m_iNode && m_iNode < NODES_PER_CLUMP);
  848. if (fValid)
  849. fValid = (m_pnc->m_pvNode[m_iNode] != NULL);
  850. IRTLASSERT(fValid);
  851. return fValid;
  852. }
  853. const void* Record() const
  854. {
  855. IRTLASSERT(IsValid());
  856. return m_pnc->m_pvNode[m_iNode];
  857. }
  858. inline const DWORD_PTR Key() const;
  859. bool operator==(
  860. const CLKRLinearHashTable_Iterator& rhs) const
  861. {
  862. LKR_ITER_TRACE(_TEXT(" LKLH::operator==, this=%p, rhs=%p\n"),
  863. this, &rhs);
  864. // m_pnc and m_iNode uniquely identify an iterator
  865. bool fEQ = ((m_pnc == rhs.m_pnc) // most unique field
  866. && (m_iNode == rhs.m_iNode));
  867. IRTLASSERT(!fEQ || ((m_plht == rhs.m_plht)
  868. && (m_dwBucketAddr == rhs.m_dwBucketAddr)));
  869. return fEQ;
  870. }
  871. bool operator!=(
  872. const CLKRLinearHashTable_Iterator& rhs) const
  873. {
  874. LKR_ITER_TRACE(_TEXT(" LKLH::operator!=, this=%p, rhs=%p\n"),
  875. this, &rhs);
  876. bool fNE = ((m_pnc != rhs.m_pnc)
  877. || (m_iNode != rhs.m_iNode));
  878. //// IRTLASSERT(fNE == !this->operator==(rhs));
  879. return fNE;
  880. }
  881. }; // class CLKRLinearHashTable_Iterator
  882. class IRTL_DLLEXP CLKRHashTable_Iterator
  883. {
  884. friend class CLKRHashTable;
  885. protected:
  886. // order important to minimize size
  887. CLKRHashTable* m_pht; // which hash table?
  888. CLKRLinearHashTable_Iterator m_subiter; // iterator into subtable
  889. short m_ist; // index of subtable
  890. CLKRHashTable_Iterator(
  891. CLKRHashTable* pht,
  892. short ist)
  893. : m_pht(pht),
  894. m_subiter(CLKRLinearHashTable_Iterator()), // zero
  895. m_ist(ist)
  896. {
  897. LKR_ITER_TRACE(_TEXT(" LKHT::prot ctor, this=%p, pht=%p, ist=%d\n"),
  898. this, pht, ist);
  899. }
  900. bool _Increment(
  901. bool fDecrementOldValue=true);
  902. public:
  903. CLKRHashTable_Iterator()
  904. : m_pht(NULL),
  905. m_subiter(CLKRLinearHashTable_Iterator()), // zero
  906. m_ist(0)
  907. {
  908. LKR_ITER_TRACE(_TEXT(" LKHT::default ctor, this=%p\n"), this);
  909. }
  910. #ifdef IRTLDEBUG
  911. // Compiler does a perfectly adequate job of synthesizing these
  912. // methods.
  913. CLKRHashTable_Iterator(
  914. const CLKRHashTable_Iterator& rhs)
  915. : m_pht(rhs.m_pht),
  916. m_subiter(rhs.m_subiter),
  917. m_ist(rhs.m_ist)
  918. {
  919. LKR_ITER_TRACE(_TEXT(" LKHT::copy ctor, this=%p, rhs=%p\n"),
  920. this, &rhs);
  921. }
  922. CLKRHashTable_Iterator& operator=(
  923. const CLKRHashTable_Iterator& rhs)
  924. {
  925. LKR_ITER_TRACE(_TEXT(" LKHT::operator=, this=%p, rhs=%p\n"),
  926. this, &rhs);
  927. m_ist = rhs.m_ist;
  928. m_subiter = rhs.m_subiter;
  929. m_pht = rhs.m_pht;
  930. return *this;
  931. }
  932. ~CLKRHashTable_Iterator()
  933. {
  934. LKR_ITER_TRACE(_TEXT(" LKHT::dtor, this=%p, pht=%p\n"), this, m_pht);
  935. }
  936. #endif
  937. bool Increment()
  938. {
  939. return IsValid() ? _Increment() : false;
  940. }
  941. bool IsValid() const
  942. {
  943. bool fValid = (m_pht != NULL && m_ist >= 0);
  944. IRTLASSERT(fValid);
  945. fValid = fValid && (m_subiter.m_plht != NULL);
  946. IRTLASSERT(fValid);
  947. fValid = fValid && (m_subiter.m_pnc != NULL);
  948. IRTLASSERT(fValid);
  949. fValid = fValid && (0 <= m_subiter.m_iNode);
  950. IRTLASSERT(fValid);
  951. fValid = fValid && (m_subiter.m_iNode < CNodeClump::NODES_PER_CLUMP);
  952. IRTLASSERT(fValid);
  953. if (fValid)
  954. fValid = (m_subiter.m_pnc->m_pvNode[m_subiter.m_iNode] != NULL);
  955. IRTLASSERT(fValid);
  956. return fValid;
  957. }
  958. const void* Record() const
  959. {
  960. IRTLASSERT(IsValid());
  961. return m_subiter.Record();
  962. }
  963. const DWORD_PTR Key() const
  964. {
  965. IRTLASSERT(IsValid());
  966. return m_subiter.Key();
  967. }
  968. bool operator==(
  969. const CLKRHashTable_Iterator& rhs) const
  970. {
  971. LKR_ITER_TRACE(_TEXT(" LKHT::operator==, this=%p, rhs=%p\n"),
  972. this, &rhs);
  973. // m_pnc and m_iNode uniquely identify an iterator
  974. bool fEQ = ((m_subiter.m_pnc
  975. == rhs.m_subiter.m_pnc) // most unique field
  976. && (m_subiter.m_iNode == rhs.m_subiter.m_iNode));
  977. IRTLASSERT(!fEQ
  978. || ((m_ist == rhs.m_ist)
  979. && (m_pht == rhs.m_pht)
  980. && (m_subiter.m_plht == rhs.m_subiter.m_plht)
  981. && (m_subiter.m_dwBucketAddr
  982. == rhs.m_subiter.m_dwBucketAddr)));
  983. return fEQ;
  984. }
  985. bool operator!=(
  986. const CLKRHashTable_Iterator& rhs) const
  987. {
  988. LKR_ITER_TRACE(_TEXT(" LKHT::operator!=, this=%p, rhs=%p\n"),
  989. this, &rhs);
  990. bool fNE = ((m_subiter.m_pnc != rhs.m_subiter.m_pnc)
  991. || (m_subiter.m_iNode != rhs.m_subiter.m_iNode));
  992. //// IRTLASSERT(fNE == !this->operator==(rhs));
  993. return fNE;
  994. }
  995. }; // class CLKRHashTable_Iterator
  996. #endif // LKR_STL_ITERATORS
  997. //--------------------------------------------------------------------
  998. // CLKRLinearHashTable
  999. //
  1000. // A thread-safe linear hash table.
  1001. //--------------------------------------------------------------------
  1002. class IRTL_DLLEXP CLKRLinearHashTable
  1003. {
  1004. public:
  1005. typedef LKR_TABLE_LOCK TableLock;
  1006. typedef LKR_BUCKET_LOCK BucketLock;
  1007. #ifdef LKR_DEPRECATED_ITERATORS
  1008. class CIterator;
  1009. friend class CLKRLinearHashTable::CIterator;
  1010. #endif // LKR_DEPRECATED_ITERATORS
  1011. #ifdef LKR_STL_ITERATORS
  1012. friend class CLKRLinearHashTable_Iterator;
  1013. typedef CLKRLinearHashTable_Iterator Iterator;
  1014. #endif // LKR_STL_ITERATORS
  1015. private:
  1016. friend class CNodeClump;
  1017. friend class CLKRHashTable;
  1018. #ifdef LKRHASH_ALLOCATOR_NEW
  1019. friend bool LKRHashTableInit();
  1020. friend void LKRHashTableUninit();
  1021. #endif // LKRHASH_ALLOCATOR_NEW
  1022. #ifdef LKRHASH_INSTRUMENTATION
  1023. // TODO
  1024. #endif // LKRHASH_INSTRUMENTATION
  1025. public:
  1026. // aliases for convenience
  1027. enum {
  1028. NODES_PER_CLUMP = CNodeClump::NODES_PER_CLUMP,
  1029. MIN_DIRSIZE = CDirEntry::MIN_DIRSIZE,
  1030. MAX_DIRSIZE = CDirEntry::MAX_DIRSIZE,
  1031. NAME_SIZE = 16,
  1032. NODE_BEGIN = CNodeClump::NODE_BEGIN,
  1033. NODE_END = CNodeClump::NODE_END,
  1034. NODE_STEP = CNodeClump::NODE_STEP,
  1035. HASH_INVALID_SIGNATURE = CNodeClump::HASH_INVALID_SIGNATURE,
  1036. };
  1037. private:
  1038. //
  1039. // Miscellaneous helper functions
  1040. //
  1041. // Convert a hash signature to a bucket address
  1042. inline DWORD _BucketAddress(DWORD dwSignature) const
  1043. {
  1044. DWORD dwBktAddr = _H0(dwSignature);
  1045. // Has this bucket been split already?
  1046. if (dwBktAddr < m_iExpansionIdx)
  1047. dwBktAddr = _H1(dwSignature);
  1048. IRTLASSERT(dwBktAddr < m_cActiveBuckets);
  1049. IRTLASSERT(dwBktAddr < (m_cDirSegs << m_dwSegBits));
  1050. return dwBktAddr;
  1051. }
  1052. // See the Linear Hashing paper
  1053. static DWORD _H0(DWORD dwSignature, DWORD dwBktAddrMask)
  1054. { return dwSignature & dwBktAddrMask; }
  1055. DWORD _H0(DWORD dwSignature) const
  1056. { return _H0(dwSignature, m_dwBktAddrMask0); }
  1057. // See the Linear Hashing paper. Preserves one bit more than _H0.
  1058. static DWORD _H1(DWORD dwSignature, DWORD dwBktAddrMask)
  1059. { return dwSignature & ((dwBktAddrMask << 1) | 1); }
  1060. DWORD _H1(DWORD dwSignature) const
  1061. { return _H0(dwSignature, m_dwBktAddrMask1); }
  1062. // In which segment within the directory does the bucketaddress lie?
  1063. // (Return type must be lvalue so that it can be assigned to.)
  1064. CSegment*& _Segment(DWORD dwBucketAddr) const
  1065. {
  1066. const DWORD iSeg = dwBucketAddr >> m_dwSegBits;
  1067. IRTLASSERT(m_paDirSegs != NULL && iSeg < m_cDirSegs);
  1068. return m_paDirSegs[iSeg].m_pseg;
  1069. }
  1070. // Offset within the segment of the bucketaddress
  1071. DWORD _SegIndex(DWORD dwBucketAddr) const
  1072. { return (dwBucketAddr & m_dwSegMask); }
  1073. // Convert a bucketaddress to a CBucket*
  1074. inline CBucket* _Bucket(DWORD dwBucketAddr) const
  1075. {
  1076. IRTLASSERT(dwBucketAddr < m_cActiveBuckets);
  1077. CSegment* const pseg = _Segment(dwBucketAddr);
  1078. IRTLASSERT(pseg != NULL);
  1079. return &(pseg->Slot(_SegIndex(dwBucketAddr)));
  1080. }
  1081. // Extract the key from a record
  1082. const DWORD_PTR _ExtractKey(const void* pvRecord) const
  1083. {
  1084. IRTLASSERT(pvRecord != NULL);
  1085. IRTLASSERT(m_pfnExtractKey != NULL);
  1086. return (*m_pfnExtractKey)(pvRecord);
  1087. }
  1088. // Hash the key
  1089. DWORD _CalcKeyHash(const DWORD_PTR pnKey) const
  1090. {
  1091. // Note pnKey==0 is acceptable, as the real key type could be an int
  1092. IRTLASSERT(m_pfnCalcKeyHash != NULL);
  1093. DWORD dwHash = (*m_pfnCalcKeyHash)(pnKey);
  1094. // We forcibly scramble the result to help ensure a better distribution
  1095. #ifndef __HASHFN_NO_NAMESPACE__
  1096. dwHash = HashFn::HashRandomizeBits(dwHash);
  1097. #else // !__HASHFN_NO_NAMESPACE__
  1098. dwHash = ::HashRandomizeBits(dwHash);
  1099. #endif // !__HASHFN_NO_NAMESPACE__
  1100. IRTLASSERT(dwHash != HASH_INVALID_SIGNATURE);
  1101. return dwHash;
  1102. }
  1103. // Compare two keys for equality
  1104. bool _EqualKeys(const DWORD_PTR pnKey1, const DWORD_PTR pnKey2) const
  1105. {
  1106. IRTLASSERT(m_pfnEqualKeys != NULL);
  1107. return (*m_pfnEqualKeys)(pnKey1, pnKey2);
  1108. }
  1109. // AddRef or Release a record.
  1110. void _AddRefRecord(const void* pvRecord, int nIncr) const
  1111. {
  1112. IRTLASSERT(pvRecord != NULL && (nIncr == -1 || nIncr == +1));
  1113. IRTLASSERT(m_pfnAddRefRecord != NULL);
  1114. (*m_pfnAddRefRecord)(pvRecord, nIncr);
  1115. }
  1116. // Find a bucket, given its signature.
  1117. CBucket* _FindBucket(DWORD dwSignature, bool fLockForWrite) const;
  1118. // Used by _FindKey so that the thread won't deadlock if the user has
  1119. // already explicitly called table->WriteLock().
  1120. bool _ReadOrWriteLock() const
  1121. { return m_Lock.ReadOrWriteLock(); }
  1122. void _ReadOrWriteUnlock(bool fReadLocked) const
  1123. { m_Lock.ReadOrWriteUnlock(fReadLocked); }
  1124. // Memory allocation wrappers to allow us to simulate allocation
  1125. // failures during testing
  1126. static CDirEntry* const
  1127. _AllocateSegmentDirectory(
  1128. size_t n);
  1129. bool
  1130. _FreeSegmentDirectory();
  1131. static CNodeClump* const
  1132. _AllocateNodeClump();
  1133. static bool
  1134. _FreeNodeClump(
  1135. CNodeClump* pnc);
  1136. CSegment* const
  1137. _AllocateSegment() const;
  1138. bool
  1139. _FreeSegment(
  1140. CSegment* pseg) const;
  1141. #ifdef LOCK_INSTRUMENTATION
  1142. static LONG sm_cTables;
  1143. static const char*
  1144. _LockName()
  1145. {
  1146. LONG l = ++sm_cTables;
  1147. // possible race condition but we don't care, as this is never
  1148. // used in production code
  1149. static char s_szName[CLockStatistics::L_NAMELEN];
  1150. wsprintf(s_szName, "LH%05x", 0xFFFFF & l);
  1151. return s_szName;
  1152. }
  1153. // Statistics for the table lock
  1154. CLockStatistics _LockStats() const
  1155. { return m_Lock.Statistics(); }
  1156. #endif // LOCK_INSTRUMENTATION
  1157. private:
  1158. // Fields are ordered so as to minimize number of cache lines touched
  1159. DWORD m_dwSignature; // debugging: id & corruption check
  1160. CHAR m_szName[NAME_SIZE]; // an identifier for debugging
  1161. mutable LK_RETCODE m_lkrcState; // Internal state of table
  1162. mutable TableLock m_Lock; // Lock on entire linear hash table
  1163. // type-specific function pointers
  1164. PFnExtractKey m_pfnExtractKey; // Extract key from record
  1165. PFnCalcKeyHash m_pfnCalcKeyHash; // Calculate hash signature of key
  1166. PFnEqualKeys m_pfnEqualKeys; // Compare two keys
  1167. PFnAddRefRecord m_pfnAddRefRecord; // AddRef a record
  1168. LK_TABLESIZE m_lkts; // "size" of table: small, medium, or large
  1169. DWORD m_dwSegBits; // C{Small,Medium,Large}Segment::SEGBITS
  1170. DWORD m_dwSegSize; // C{Small,Medium,Large}Segment::SEGSIZE
  1171. DWORD m_dwSegMask; // C{Small,Medium,Large}Segment::SEGMASK
  1172. double m_MaxLoad; // max load factor (average chain length)
  1173. DWORD m_dwBktAddrMask0; // mask used for address calculation
  1174. DWORD m_dwBktAddrMask1; // used in _H1 calculation
  1175. DWORD m_iExpansionIdx; // address of next bucket to be expanded
  1176. CDirEntry* m_paDirSegs; // directory of table segments
  1177. DWORD m_nLevel; // number of table doublings performed
  1178. DWORD m_cDirSegs; // segment directory size: varies between
  1179. // MIN_DIRSIZE and MAX_DIRSIZE
  1180. DWORD m_cRecords; // number of records in the table
  1181. DWORD m_cActiveBuckets; // number of buckets in use (table size)
  1182. WORD m_wBucketLockSpins;// default spin count for bucket locks
  1183. const BYTE m_nTableLockType; // for debugging: LOCK_SPINLOCK, etc
  1184. const BYTE m_nBucketLockType;// for debugging: LOCK_SPINLOCK, etc
  1185. const CLKRHashTable* const m_phtParent;// Owning table. NULL => standalone
  1186. const bool m_fMultiKeys; // Allow multiple identical keys?
  1187. #ifndef LKR_NO_GLOBAL_LIST
  1188. static CLockedDoubleList sm_llGlobalList;// All active CLKRLinearHashTables
  1189. CListEntry m_leGlobalList;
  1190. #endif // !LKR_NO_GLOBAL_LIST
  1191. void _InsertThisIntoGlobalList()
  1192. {
  1193. #ifndef LKR_NO_GLOBAL_LIST
  1194. // Only add standalone CLKRLinearHashTables to global list.
  1195. // CLKRHashTables have their own global list.
  1196. if (m_phtParent == NULL)
  1197. sm_llGlobalList.InsertHead(&m_leGlobalList);
  1198. #endif // !LKR_NO_GLOBAL_LIST
  1199. }
  1200. void _RemoveThisFromGlobalList()
  1201. {
  1202. #ifndef LKR_NO_GLOBAL_LIST
  1203. if (m_phtParent == NULL)
  1204. sm_llGlobalList.RemoveEntry(&m_leGlobalList);
  1205. #endif // !LKR_NO_GLOBAL_LIST
  1206. }
  1207. // Non-trivial implementation functions
  1208. LK_RETCODE _InsertRecord(const void* pvRecord, DWORD dwSignature,
  1209. bool fOverwrite
  1210. #ifdef LKR_STL_ITERATORS
  1211. , Iterator* piterResult=NULL
  1212. #endif // LKR_STL_ITERATORS
  1213. );
  1214. LK_RETCODE _DeleteKey(const DWORD_PTR pnKey, DWORD dwSignature);
  1215. LK_RETCODE _DeleteRecord(const void* pvRecord, DWORD dwSignature);
  1216. bool _DeleteNode(CBucket* pbkt, CNodeClump*& rpnc,
  1217. CNodeClump*& rpncPrev, int& riNode);
  1218. LK_RETCODE _FindKey(const DWORD_PTR pnKey, DWORD dwSignature,
  1219. const void** ppvRecord
  1220. #ifdef LKR_STL_ITERATORS
  1221. , Iterator* piterResult=NULL
  1222. #endif // LKR_STL_ITERATORS
  1223. ) const;
  1224. LK_RETCODE _FindRecord(const void* pvRecord, DWORD dwSignature) const;
  1225. // returns count of errors in compacted state => 0 is good
  1226. int _IsNodeCompact(CBucket* const pbkt) const;
  1227. #ifdef LKR_APPLY_IF
  1228. // Predicate functions
  1229. static LK_PREDICATE WINAPI
  1230. _PredTrue(const void* /*pvRecord*/, void* /*pvState*/)
  1231. { return LKP_PERFORM; }
  1232. DWORD _Apply(PFnRecordAction pfnAction, void* pvState,
  1233. LK_LOCKTYPE lkl, LK_PREDICATE& rlkp);
  1234. DWORD _ApplyIf(PFnRecordPred pfnPredicate,
  1235. PFnRecordAction pfnAction, void* pvState,
  1236. LK_LOCKTYPE lkl, LK_PREDICATE& rlkp);
  1237. DWORD _DeleteIf(PFnRecordPred pfnPredicate, void* pvState,
  1238. LK_PREDICATE& rlkp);
  1239. #endif // LKR_APPLY_IF
  1240. void _Clear(bool fShrinkDirectory);
  1241. LK_RETCODE _SetSegVars(LK_TABLESIZE lkts, DWORD cInitialBuckets);
  1242. LK_RETCODE _Expand();
  1243. LK_RETCODE _Contract();
  1244. LK_RETCODE _SplitRecordSet(CNodeClump* pncOldTarget,
  1245. CNodeClump* pncNewTarget,
  1246. DWORD iExpansionIdx,
  1247. DWORD dwBktAddrMask,
  1248. DWORD dwNewBkt,
  1249. CNodeClump* pncFreeList);
  1250. LK_RETCODE _MergeRecordSets(CBucket* pbktNewTarget,
  1251. CNodeClump* pncOldList,
  1252. CNodeClump* pncFreeList);
  1253. // Private copy ctor and op= to prevent compiler synthesizing them.
  1254. // TODO: implement these properly; they could be useful.
  1255. CLKRLinearHashTable(const CLKRLinearHashTable&);
  1256. CLKRLinearHashTable& operator=(const CLKRLinearHashTable&);
  1257. private:
  1258. // This ctor is used by CLKRHashTable
  1259. CLKRLinearHashTable(
  1260. LPCSTR pszName, // An identifier for debugging
  1261. PFnExtractKey pfnExtractKey, // Extract key from record
  1262. PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  1263. PFnEqualKeys pfnEqualKeys, // Compare two keys
  1264. PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  1265. double maxload, // Upperbound on average chain length
  1266. DWORD initsize, // Initial size of hash table.
  1267. CLKRHashTable* phtParent, // Owning table.
  1268. bool fMultiKeys // Allow multiple identical keys?
  1269. );
  1270. LK_RETCODE
  1271. _Initialize(
  1272. PFnExtractKey pfnExtractKey,
  1273. PFnCalcKeyHash pfnCalcKeyHash,
  1274. PFnEqualKeys pfnEqualKeys,
  1275. PFnAddRefRecord pfnAddRefRecord,
  1276. LPCSTR pszName,
  1277. double maxload,
  1278. DWORD initsize);
  1279. public:
  1280. CLKRLinearHashTable(
  1281. LPCSTR pszName, // An identifier for debugging
  1282. PFnExtractKey pfnExtractKey, // Extract key from record
  1283. PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  1284. PFnEqualKeys pfnEqualKeys, // Compare two keys
  1285. PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  1286. double maxload=LK_DFLT_MAXLOAD,// Upperbound on average chain length
  1287. DWORD initsize=LK_DFLT_INITSIZE, // Initial size of hash table.
  1288. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS, // for signature compatiblity
  1289. // with CLKRHashTable
  1290. bool fMultiKeys=false // Allow multiple identical keys?
  1291. );
  1292. ~CLKRLinearHashTable();
  1293. static const TCHAR* ClassName()
  1294. {return _TEXT("CLKRLinearHashTable");}
  1295. int NumSubTables() const {return 1;}
  1296. bool MultiKeys() const
  1297. {
  1298. return false;
  1299. // return m_fMultiKeys; // TODO: implement
  1300. }
  1301. static LK_TABLESIZE NumSubTables(DWORD& rinitsize, DWORD& rnum_subtbls);
  1302. // Insert a new record into hash table.
  1303. // Returns LK_SUCCESS if all OK, LK_KEY_EXISTS if same key already
  1304. // exists (unless fOverwrite), LK_ALLOC_FAIL if out of space,
  1305. // or LK_BAD_RECORD for a bad record.
  1306. LK_RETCODE InsertRecord(const void* pvRecord, bool fOverwrite=false)
  1307. {
  1308. if (!IsUsable())
  1309. return m_lkrcState;
  1310. if (pvRecord == NULL)
  1311. return LK_BAD_RECORD;
  1312. return _InsertRecord(pvRecord, _CalcKeyHash(_ExtractKey(pvRecord)),
  1313. fOverwrite);
  1314. }
  1315. // Delete record with the given key.
  1316. // Returns LK_SUCCESS if all OK, or LK_NO_SUCH_KEY if not found
  1317. LK_RETCODE DeleteKey(const DWORD_PTR pnKey)
  1318. {
  1319. if (!IsUsable())
  1320. return m_lkrcState;
  1321. return _DeleteKey(pnKey, _CalcKeyHash(pnKey));
  1322. }
  1323. // Delete a record from the table, if present.
  1324. // Returns LK_SUCCESS if all OK, or LK_NO_SUCH_KEY if not found
  1325. LK_RETCODE DeleteRecord(const void* pvRecord)
  1326. {
  1327. if (!IsUsable())
  1328. return m_lkrcState;
  1329. if (pvRecord == NULL)
  1330. return LK_BAD_RECORD;
  1331. return _DeleteRecord(pvRecord, _CalcKeyHash(_ExtractKey(pvRecord)));
  1332. }
  1333. // Find record with given key.
  1334. // Returns: LK_SUCCESS, if record found (record is returned in *ppvRecord)
  1335. // LK_BAD_RECORD, if ppvRecord is invalid
  1336. // LK_NO_SUCH_KEY, if no record with given key value was found
  1337. // LK_UNUSABLE, if hash table not in usable state
  1338. // Note: the record is AddRef'd. You must decrement the reference
  1339. // count when you are finished with the record (if you're implementing
  1340. // refcounting semantics).
  1341. LK_RETCODE FindKey(const DWORD_PTR pnKey,
  1342. const void** ppvRecord) const
  1343. {
  1344. if (!IsUsable())
  1345. return m_lkrcState;
  1346. if (ppvRecord == NULL)
  1347. return LK_BAD_RECORD;
  1348. return _FindKey(pnKey, _CalcKeyHash(pnKey), ppvRecord);
  1349. }
  1350. // Sees if the record is contained in the table
  1351. // Returns: LK_SUCCESS, if record found
  1352. // LK_BAD_RECORD, if pvRecord is invalid
  1353. // LK_NO_SUCH_KEY, if record is not in the table
  1354. // LK_UNUSABLE, if hash table not in usable state
  1355. // Note: the record is *not* AddRef'd.
  1356. LK_RETCODE FindRecord(const void* pvRecord) const
  1357. {
  1358. if (!IsUsable())
  1359. return m_lkrcState;
  1360. if (pvRecord == NULL)
  1361. return LK_BAD_RECORD;
  1362. return _FindRecord(pvRecord, _CalcKeyHash(_ExtractKey(pvRecord)));
  1363. }
  1364. #ifdef LKR_APPLY_IF
  1365. // Walk the hash table, applying pfnAction to all records.
  1366. // Locks the whole table for the duration with either a (possibly
  1367. // shared) readlock or a writelock, according to lkl.
  1368. // Loop is aborted if pfnAction returns LKA_ABORT.
  1369. // Returns the number of successful applications.
  1370. DWORD Apply(PFnRecordAction pfnAction,
  1371. void* pvState=NULL,
  1372. LK_LOCKTYPE lkl=LKL_READLOCK);
  1373. // Walk the hash table, applying pfnAction to any records that match
  1374. // pfnPredicate. Locks the whole table for the duration with either
  1375. // a (possibly shared) readlock or a writelock, according to lkl.
  1376. // Loop is aborted if pfnAction returns LKA_ABORT.
  1377. // Returns the number of successful applications.
  1378. DWORD ApplyIf(PFnRecordPred pfnPredicate,
  1379. PFnRecordAction pfnAction,
  1380. void* pvState=NULL,
  1381. LK_LOCKTYPE lkl=LKL_READLOCK);
  1382. // Delete any records that match pfnPredicate.
  1383. // Locks the table for the duration with a writelock.
  1384. // Returns the number of deletions.
  1385. //
  1386. // Do *not* walk the hash table by hand with an iterator and call
  1387. // DeleteKey. The iterator will end up pointing to garbage.
  1388. DWORD DeleteIf(PFnRecordPred pfnPredicate,
  1389. void* pvState=NULL);
  1390. #endif // LKR_APPLY_IF
  1391. // Check table for consistency. Returns 0 if okay, or the number of
  1392. // errors otherwise.
  1393. int CheckTable() const;
  1394. // Remove all data from the table
  1395. void Clear()
  1396. {
  1397. WriteLock();
  1398. _Clear(true);
  1399. WriteUnlock();
  1400. }
  1401. // Number of elements in the table
  1402. DWORD Size() const
  1403. { return m_cRecords; }
  1404. // Maximum possible number of elements in the table
  1405. DWORD MaxSize() const
  1406. { return static_cast<DWORD>(m_MaxLoad * MAX_DIRSIZE * m_dwSegSize); }
  1407. // Get hash table statistics
  1408. CLKRHashTableStats GetStatistics() const;
  1409. // Is the hash table usable?
  1410. bool IsUsable() const
  1411. { return (m_lkrcState == LK_SUCCESS); }
  1412. // Is the hash table consistent and correct?
  1413. bool IsValid() const
  1414. {
  1415. STATIC_ASSERT(((MIN_DIRSIZE & (MIN_DIRSIZE-1)) == 0) // == (1 << N)
  1416. && ((1 << 3) <= MIN_DIRSIZE)
  1417. && (MIN_DIRSIZE < MAX_DIRSIZE)
  1418. && ((MAX_DIRSIZE & (MAX_DIRSIZE-1)) == 0)
  1419. && (MAX_DIRSIZE <= (1 << 30)));
  1420. bool f = (m_lkrcState == LK_SUCCESS // serious internal failure?
  1421. && m_paDirSegs != NULL
  1422. && MIN_DIRSIZE <= m_cDirSegs && m_cDirSegs <= MAX_DIRSIZE
  1423. && (m_cDirSegs & (m_cDirSegs-1)) == 0
  1424. && m_pfnExtractKey != NULL
  1425. && m_pfnCalcKeyHash != NULL
  1426. && m_pfnEqualKeys != NULL
  1427. && m_pfnAddRefRecord != NULL
  1428. && m_cActiveBuckets > 0
  1429. && ValidSignature()
  1430. );
  1431. if (!f)
  1432. m_lkrcState = LK_UNUSABLE;
  1433. return f;
  1434. }
  1435. // Set the spin count on the table lock
  1436. void SetTableLockSpinCount(WORD wSpins)
  1437. { m_Lock.SetSpinCount(wSpins); }
  1438. // Get the spin count on the table lock
  1439. WORD GetTableLockSpinCount() const
  1440. { return m_Lock.GetSpinCount(); }
  1441. // Set/Get the spin count on the bucket locks
  1442. void SetBucketLockSpinCount(WORD wSpins);
  1443. WORD GetBucketLockSpinCount() const;
  1444. enum {
  1445. SIGNATURE = (('L') | ('K' << 8) | ('L' << 16) | ('H' << 24)),
  1446. SIGNATURE_FREE = (('L') | ('K' << 8) | ('L' << 16) | ('x' << 24)),
  1447. };
  1448. bool
  1449. ValidSignature() const
  1450. { return m_dwSignature == SIGNATURE;}
  1451. //
  1452. // Lock manipulators
  1453. //
  1454. // Lock the table (exclusively) for writing
  1455. void WriteLock()
  1456. { m_Lock.WriteLock(); }
  1457. // Lock the table (possibly shared) for reading
  1458. void ReadLock() const
  1459. { m_Lock.ReadLock(); }
  1460. // Unlock the table for writing
  1461. void WriteUnlock() const
  1462. { m_Lock.WriteUnlock(); }
  1463. // Unlock the table for reading
  1464. void ReadUnlock() const
  1465. { m_Lock.ReadUnlock(); }
  1466. // Is the table already locked for writing?
  1467. bool IsWriteLocked() const
  1468. { return m_Lock.IsWriteLocked(); }
  1469. // Is the table already locked for reading?
  1470. bool IsReadLocked() const
  1471. { return m_Lock.IsReadLocked(); }
  1472. // Is the table unlocked for writing?
  1473. bool IsWriteUnlocked() const
  1474. { return m_Lock.IsWriteUnlocked(); }
  1475. // Is the table unlocked for reading?
  1476. bool IsReadUnlocked() const
  1477. { return m_Lock.IsReadUnlocked(); }
  1478. // Convert the read lock to a write lock
  1479. void ConvertSharedToExclusive() const
  1480. { m_Lock.ConvertSharedToExclusive(); }
  1481. // Convert the write lock to a read lock
  1482. void ConvertExclusiveToShared() const
  1483. { m_Lock.ConvertExclusiveToShared(); }
  1484. LKRHASH_ALLOCATOR_DEFINITIONS(CLKRLinearHashTable);
  1485. #ifdef LKR_DEPRECATED_ITERATORS
  1486. public:
  1487. // Iterators can be used to walk the table. To ensure a consistent
  1488. // view of the data, the iterator locks the whole table. This can
  1489. // have a negative effect upon performance, because no other thread
  1490. // can do anything with the table. Use with care.
  1491. //
  1492. // You should not use an iterator to walk the table, calling DeleteKey,
  1493. // as the iterator will end up pointing to garbage.
  1494. //
  1495. // Use Apply, ApplyIf, or DeleteIf instead of iterators to safely
  1496. // walk the tree. Or use the STL-style iterators.
  1497. //
  1498. // Note that iterators acquire a reference to the record pointed to
  1499. // and release that reference as soon as the iterator is incremented.
  1500. // In other words, this code is safe:
  1501. // lkrc = ht.IncrementIterator(&iter);
  1502. // // assume lkrc == LK_SUCCESS for the sake of this example
  1503. // CMyHashTable::Record* pRec = iter.Record();
  1504. // Foo(pRec); // uses pRec but doesn't hang on to it
  1505. // lkrc = ht.IncrementIterator(&iter);
  1506. //
  1507. // But this code is not safe because pRec is used out of the scope of
  1508. // the iterator that provided it:
  1509. // lkrc = ht.IncrementIterator(&iter);
  1510. // CMyHashTable::Record* pRec = iter.Record();
  1511. // // Broken code: Should have called ht.AddRefRecord(pRec, +1) here
  1512. // lkrc = ht.IncrementIterator(&iter);
  1513. // Foo(pRec); // Unsafe: because no longer have a valid reference
  1514. //
  1515. // If the record has no reference-counting semantics, then you can
  1516. // ignore the above remarks about scope.
  1517. class CIterator
  1518. {
  1519. protected:
  1520. friend class CLKRLinearHashTable;
  1521. CLKRLinearHashTable* m_plht; // which linear hash table?
  1522. DWORD m_dwBucketAddr; // bucket index
  1523. CNodeClump* m_pnc; // a CNodeClump in bucket
  1524. int m_iNode; // offset within m_pnc
  1525. LK_LOCKTYPE m_lkl; // readlock or writelock?
  1526. private:
  1527. // Private copy ctor and op= to prevent compiler synthesizing them.
  1528. // Must provide (bad) implementation because we export instantiations.
  1529. CIterator(const CIterator&);
  1530. CIterator& operator=(const CIterator&);
  1531. public:
  1532. CIterator(
  1533. LK_LOCKTYPE lkl=LKL_WRITELOCK)
  1534. : m_plht(NULL),
  1535. m_dwBucketAddr(0),
  1536. m_pnc(NULL),
  1537. m_iNode(-1),
  1538. m_lkl(lkl)
  1539. {}
  1540. // Return the record associated with this iterator
  1541. const void* Record() const
  1542. {
  1543. IRTLASSERT(IsValid());
  1544. return ((m_pnc != NULL
  1545. && m_iNode >= 0
  1546. && m_iNode < CLKRLinearHashTable::NODES_PER_CLUMP)
  1547. ? m_pnc->m_pvNode[m_iNode]
  1548. : NULL);
  1549. }
  1550. // Return the key associated with this iterator
  1551. const DWORD_PTR Key() const
  1552. {
  1553. IRTLASSERT(m_plht != NULL);
  1554. const void* pRec = Record();
  1555. return ((pRec != NULL && m_plht != NULL)
  1556. ? m_plht->_ExtractKey(pRec)
  1557. : NULL);
  1558. }
  1559. bool IsValid() const
  1560. {
  1561. return ((m_plht != NULL)
  1562. && (m_pnc != NULL)
  1563. && (0 <= m_iNode
  1564. && m_iNode < CLKRLinearHashTable::NODES_PER_CLUMP)
  1565. && (!m_pnc->IsEmptyNode(m_iNode)));
  1566. }
  1567. // Delete the record that the iterator points to. Does an implicit
  1568. // IncrementIterator after deletion.
  1569. LK_RETCODE DeleteRecord();
  1570. // Change the record that the iterator points to. The new record
  1571. // must have the same key as the old one.
  1572. LK_RETCODE ChangeRecord(const void* pNewRec);
  1573. }; // class CIterator
  1574. // Const iterators for readonly access. You must use these with
  1575. // const CLKRLinearHashTables.
  1576. class CConstIterator : public CIterator
  1577. {
  1578. private:
  1579. // Private, unimplemented copy ctor and op= to prevent
  1580. // compiler synthesizing them.
  1581. CConstIterator(const CConstIterator&);
  1582. CConstIterator& operator=(const CConstIterator&);
  1583. public:
  1584. CConstIterator()
  1585. : CIterator(LKL_READLOCK)
  1586. {}
  1587. }; // class CConstIterator
  1588. private:
  1589. // The public APIs lock the table. The private ones, which are used
  1590. // directly by CLKRHashTable, don't.
  1591. LK_RETCODE _InitializeIterator(CIterator* piter);
  1592. LK_RETCODE _CloseIterator(CIterator* piter);
  1593. public:
  1594. // Initialize the iterator to point to the first item in the hash table
  1595. // Returns LK_SUCCESS, LK_NO_MORE_ELEMENTS, or LK_BAD_ITERATOR.
  1596. LK_RETCODE InitializeIterator(CIterator* piter)
  1597. {
  1598. IRTLASSERT(piter != NULL && piter->m_plht == NULL);
  1599. if (piter == NULL || piter->m_plht != NULL)
  1600. return LK_BAD_ITERATOR;
  1601. if (piter->m_lkl == LKL_WRITELOCK)
  1602. WriteLock();
  1603. else
  1604. ReadLock();
  1605. return _InitializeIterator(piter);
  1606. }
  1607. // The const iterator version
  1608. LK_RETCODE InitializeIterator(CConstIterator* piter) const
  1609. {
  1610. IRTLASSERT(piter != NULL && piter->m_plht == NULL);
  1611. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1612. if (piter == NULL || piter->m_plht != NULL
  1613. || piter->m_lkl == LKL_WRITELOCK)
  1614. return LK_BAD_ITERATOR;
  1615. ReadLock();
  1616. return const_cast<CLKRLinearHashTable*>(this)
  1617. ->_InitializeIterator(static_cast<CIterator*>(piter));
  1618. }
  1619. // Move the iterator on to the next item in the table.
  1620. // Returns LK_SUCCESS, LK_NO_MORE_ELEMENTS, or LK_BAD_ITERATOR.
  1621. LK_RETCODE IncrementIterator(CIterator* piter);
  1622. LK_RETCODE IncrementIterator(CConstIterator* piter) const
  1623. {
  1624. IRTLASSERT(piter != NULL && piter->m_plht == this);
  1625. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1626. if (piter == NULL || piter->m_plht != this
  1627. || piter->m_lkl == LKL_WRITELOCK)
  1628. return LK_BAD_ITERATOR;
  1629. return const_cast<CLKRLinearHashTable*>(this)
  1630. ->IncrementIterator(static_cast<CIterator*>(piter));
  1631. }
  1632. // Close the iterator.
  1633. LK_RETCODE CloseIterator(CIterator* piter)
  1634. {
  1635. IRTLASSERT(piter != NULL && piter->m_plht == this);
  1636. if (piter == NULL || piter->m_plht != this)
  1637. return LK_BAD_ITERATOR;
  1638. _CloseIterator(piter);
  1639. if (piter->m_lkl == LKL_WRITELOCK)
  1640. WriteUnlock();
  1641. else
  1642. ReadUnlock();
  1643. return LK_SUCCESS;
  1644. };
  1645. // Close the CConstIterator
  1646. LK_RETCODE CloseIterator(CConstIterator* piter) const
  1647. {
  1648. IRTLASSERT(piter != NULL && piter->m_plht == this);
  1649. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1650. if (piter == NULL || piter->m_plht != this
  1651. || piter->m_lkl == LKL_WRITELOCK)
  1652. return LK_BAD_ITERATOR;
  1653. const_cast<CLKRLinearHashTable*>(this)
  1654. ->_CloseIterator(static_cast<CIterator*>(piter));
  1655. ReadUnlock();
  1656. return LK_SUCCESS;
  1657. };
  1658. #endif // LKR_DEPRECATED_ITERATORS
  1659. #ifdef LKR_STL_ITERATORS
  1660. private:
  1661. bool _Erase(Iterator& riter, DWORD dwSignature);
  1662. bool _Find(DWORD_PTR pnKey, DWORD dwSignature, Iterator& riterResult);
  1663. bool _IsValidIterator(const Iterator& riter) const
  1664. {
  1665. LKR_ITER_TRACE(_TEXT(" LKLH:_IsValidIterator(%p)\n"), &riter);
  1666. bool fValid = ((riter.m_plht == this)
  1667. && (riter.m_dwBucketAddr < m_cActiveBuckets)
  1668. && riter.IsValid());
  1669. IRTLASSERT(fValid);
  1670. return fValid;
  1671. }
  1672. public:
  1673. // Return iterator pointing to first item in table
  1674. Iterator
  1675. Begin();
  1676. // Return a one-past-the-end iterator. Always empty.
  1677. Iterator
  1678. End()
  1679. {
  1680. LKR_ITER_TRACE(_TEXT(" LKLH::End\n"));
  1681. return Iterator();
  1682. }
  1683. // Insert a record
  1684. // Returns `true' if successful; iterResult points to that record
  1685. // Returns `false' otherwise; iterResult == End()
  1686. bool
  1687. Insert(
  1688. /* in */ const void* pvRecord,
  1689. /* out */ Iterator& riterResult,
  1690. /* in */ bool fOverwrite=false);
  1691. // Erase the record pointed to by the iterator; adjust the iterator
  1692. // to point to the next record. Returns `true' if successful.
  1693. bool
  1694. Erase(
  1695. /* in,out */ Iterator& riter);
  1696. // Erase the records in the range [riterFirst, riterLast).
  1697. // Returns `true' if successful.
  1698. bool
  1699. Erase(
  1700. /*in*/ Iterator& riterFirst,
  1701. /*in*/ Iterator& riterLast);
  1702. // Find the (first) record that has its key == pnKey.
  1703. // If successful, returns `true' and iterator points to (first) record.
  1704. // If fails, returns `false' and iterator == End()
  1705. bool
  1706. Find(
  1707. /* in */ DWORD_PTR pnKey,
  1708. /* out */ Iterator& riterResult);
  1709. // Find the range of records that have their keys == pnKey.
  1710. // If successful, returns `true', iterFirst points to first record,
  1711. // and iterLast points to one-beyond-the last such record.
  1712. // If fails, returns `false' and both iterators == End().
  1713. // Primarily useful when m_fMultiKey == true
  1714. bool
  1715. EqualRange(
  1716. /* in */ DWORD_PTR pnKey,
  1717. /* out */ Iterator& riterFirst, // inclusive
  1718. /* out */ Iterator& riterLast); // exclusive
  1719. #endif // LKR_STL_ITERATORS
  1720. }; // class CLKRLinearHashTable
  1721. #ifdef LKR_STL_ITERATORS
  1722. // These functions have to be defined after CLKRLinearHashTable
  1723. inline void
  1724. CLKRLinearHashTable_Iterator::_AddRef(
  1725. int nIncr) const
  1726. {
  1727. // TODO: should iterator call _AddRefRecord at all
  1728. if (m_plht != NULL && m_iNode != NODE_BEGIN - NODE_STEP)
  1729. {
  1730. IRTLASSERT((0 <= m_iNode && m_iNode < NODES_PER_CLUMP)
  1731. && (unsigned) m_iNode < NODES_PER_CLUMP
  1732. && m_pnc != NULL
  1733. && (nIncr == -1 || nIncr == +1));
  1734. const void* pvRecord = m_pnc->m_pvNode[m_iNode];
  1735. IRTLASSERT(pvRecord != NULL);
  1736. LKR_ITER_TRACE(_TEXT(" LKLH::AddRef, this=%p, Rec=%p\n"),
  1737. this, pvRecord);
  1738. m_plht->_AddRefRecord(pvRecord, nIncr);
  1739. }
  1740. } // CLKRLinearHashTable_Iterator::_AddRef
  1741. inline const DWORD_PTR
  1742. CLKRLinearHashTable_Iterator::Key() const
  1743. {
  1744. IRTLASSERT(IsValid());
  1745. return m_plht->_ExtractKey(m_pnc->m_pvNode[m_iNode]);
  1746. } // CLKRLinearHashTable_Iterator::Key
  1747. #endif // LKR_STL_ITERATORS
  1748. //--------------------------------------------------------------------
  1749. // CLKRHashTable
  1750. //
  1751. // To improve concurrency, a hash table is divided into a number of
  1752. // (independent) subtables. Each subtable is a linear hash table. The
  1753. // number of subtables is defined when the table is created and remains
  1754. // fixed thereafter. Records are assigned to subtables based on their
  1755. // hashed key.
  1756. //
  1757. // For small or low-contention hashtables, you can bypass this
  1758. // thin wrapper and use CLKRLinearHashTable directly. The methods are
  1759. // documented in the declarations for CLKRHashTable (above).
  1760. //--------------------------------------------------------------------
  1761. class IRTL_DLLEXP CLKRHashTable
  1762. {
  1763. private:
  1764. typedef CLKRLinearHashTable SubTable;
  1765. public:
  1766. typedef SubTable::TableLock TableLock;
  1767. typedef SubTable::BucketLock BucketLock;
  1768. friend class CLKRLinearHashTable;
  1769. #ifdef LKR_DEPRECATED_ITERATORS
  1770. class CIterator;
  1771. friend class CLKRHashTable::CIterator;
  1772. #endif // LKR_DEPRECATED_ITERATORS
  1773. #ifdef LKR_STL_ITERATORS
  1774. friend class CLKRHashTable_Iterator;
  1775. typedef CLKRHashTable_Iterator Iterator;
  1776. #endif // LKR_STL_ITERATORS
  1777. #ifdef LKRHASH_ALLOCATOR_NEW
  1778. friend bool LKRHashTableInit();
  1779. friend void LKRHashTableUninit();
  1780. #endif // LKRHASH_ALLOCATOR_NEW
  1781. // aliases for convenience
  1782. enum {
  1783. NAME_SIZE = SubTable::NAME_SIZE,
  1784. HASH_INVALID_SIGNATURE = SubTable::HASH_INVALID_SIGNATURE,
  1785. NODES_PER_CLUMP = SubTable::NODES_PER_CLUMP,
  1786. };
  1787. enum {
  1788. MAX_SUBTABLES = 64,
  1789. };
  1790. private:
  1791. // Hash table parameters
  1792. DWORD m_dwSignature; // debugging: id & corruption check
  1793. CHAR m_szName[NAME_SIZE]; // an identifier for debugging
  1794. DWORD m_cSubTables; // number of subtables
  1795. SubTable** m_palhtDir; // array of subtables
  1796. // type-specific function pointers
  1797. PFnExtractKey m_pfnExtractKey;
  1798. PFnCalcKeyHash m_pfnCalcKeyHash;
  1799. mutable LK_RETCODE m_lkrcState; // Internal state of table
  1800. int m_nSubTableMask;
  1801. #ifndef LKR_NO_GLOBAL_LIST
  1802. static CLockedDoubleList sm_llGlobalList; // All active CLKRHashTables
  1803. CListEntry m_leGlobalList;
  1804. #endif // !LKR_NO_GLOBAL_LIST
  1805. void
  1806. _InsertThisIntoGlobalList()
  1807. {
  1808. #ifndef LKR_NO_GLOBAL_LIST
  1809. sm_llGlobalList.InsertHead(&m_leGlobalList);
  1810. #endif // !LKR_NO_GLOBAL_LIST
  1811. }
  1812. void
  1813. _RemoveThisFromGlobalList()
  1814. {
  1815. #ifndef LKR_NO_GLOBAL_LIST
  1816. sm_llGlobalList.RemoveEntry(&m_leGlobalList);
  1817. #endif // !LKR_NO_GLOBAL_LIST
  1818. }
  1819. LKRHASH_GLOBAL_LOCK_DECLARATIONS();
  1820. // Private copy ctor and op= to prevent compiler synthesizing them.
  1821. // TODO: implement these properly; they could be useful.
  1822. CLKRHashTable(const CLKRHashTable&);
  1823. CLKRHashTable& operator=(const CLKRHashTable&);
  1824. // Extract the key from the record
  1825. const DWORD_PTR _ExtractKey(const void* pvRecord) const
  1826. {
  1827. IRTLASSERT(pvRecord != NULL);
  1828. IRTLASSERT(m_pfnExtractKey != NULL);
  1829. return (*m_pfnExtractKey)(pvRecord);
  1830. }
  1831. // Hash the key
  1832. DWORD _CalcKeyHash(const DWORD_PTR pnKey) const
  1833. {
  1834. // Note pnKey==0 is acceptable, as the real key type could be an int
  1835. IRTLASSERT(m_pfnCalcKeyHash != NULL);
  1836. DWORD dwHash = (*m_pfnCalcKeyHash)(pnKey);
  1837. // We forcibly scramble the result to help ensure a better distribution
  1838. #ifndef __HASHFN_NO_NAMESPACE__
  1839. dwHash = HashFn::HashRandomizeBits(dwHash);
  1840. #else // !__HASHFN_NO_NAMESPACE__
  1841. dwHash = ::HashRandomizeBits(dwHash);
  1842. #endif // !__HASHFN_NO_NAMESPACE__
  1843. IRTLASSERT(dwHash != HASH_INVALID_SIGNATURE);
  1844. return dwHash;
  1845. }
  1846. // Use the key's hash signature to multiplex into a subtable
  1847. SubTable* _SubTable(DWORD dwSignature) const;
  1848. // Find the index of pst within the subtable array
  1849. int _SubTableIndex(SubTable* pst) const;
  1850. // Memory allocation wrappers to allow us to simulate allocation
  1851. // failures during testing
  1852. static SubTable** const
  1853. _AllocateSubTableArray(
  1854. size_t n);
  1855. static bool
  1856. _FreeSubTableArray(
  1857. SubTable** palht);
  1858. static SubTable* const
  1859. _AllocateSubTable(
  1860. LPCSTR pszName, // An identifier for debugging
  1861. PFnExtractKey pfnExtractKey, // Extract key from record
  1862. PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  1863. PFnEqualKeys pfnEqualKeys, // Compare two keys
  1864. PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  1865. double maxload, // Upperbound on average chain length
  1866. DWORD initsize, // Initial size of hash table.
  1867. CLKRHashTable* phtParent, // Owning table.
  1868. bool fMultiKeys // Allow multiple identical keys?
  1869. );
  1870. static bool
  1871. _FreeSubTable(
  1872. SubTable* plht);
  1873. public:
  1874. CLKRHashTable(
  1875. LPCSTR pszName, // An identifier for debugging
  1876. PFnExtractKey pfnExtractKey, // Extract key from record
  1877. PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  1878. PFnEqualKeys pfnEqualKeys, // Compare two keys
  1879. PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  1880. double maxload=LK_DFLT_MAXLOAD, // bound on avg chain length
  1881. DWORD initsize=LK_DFLT_INITSIZE, // Initial size of hash table.
  1882. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS, // #subordinate hash tables.
  1883. bool fMultiKeys=false // Allow multiple identical keys?
  1884. );
  1885. ~CLKRHashTable();
  1886. static const TCHAR* ClassName()
  1887. {return _TEXT("CLKRHashTable");}
  1888. int NumSubTables() const {return m_cSubTables;}
  1889. bool MultiKeys() const;
  1890. static LK_TABLESIZE NumSubTables(DWORD& rinitsize, DWORD& rnum_subtbls);
  1891. // Thin wrappers for the corresponding methods in CLKRLinearHashTable
  1892. LK_RETCODE InsertRecord(const void* pvRecord, bool fOverwrite=false);
  1893. LK_RETCODE DeleteKey(const DWORD_PTR pnKey);
  1894. LK_RETCODE DeleteRecord(const void* pvRecord);
  1895. LK_RETCODE FindKey(const DWORD_PTR pnKey,
  1896. const void** ppvRecord) const;
  1897. LK_RETCODE FindRecord(const void* pvRecord) const;
  1898. #ifdef LKR_APPLY_IF
  1899. DWORD Apply(PFnRecordAction pfnAction,
  1900. void* pvState=NULL,
  1901. LK_LOCKTYPE lkl=LKL_READLOCK);
  1902. DWORD ApplyIf(PFnRecordPred pfnPredicate,
  1903. PFnRecordAction pfnAction,
  1904. void* pvState=NULL,
  1905. LK_LOCKTYPE lkl=LKL_READLOCK);
  1906. DWORD DeleteIf(PFnRecordPred pfnPredicate,
  1907. void* pvState=NULL);
  1908. #endif // LKR_APPLY_IF
  1909. void Clear();
  1910. int CheckTable() const;
  1911. DWORD Size() const;
  1912. DWORD MaxSize() const;
  1913. CLKRHashTableStats GetStatistics() const;
  1914. bool IsValid() const;
  1915. void SetTableLockSpinCount(WORD wSpins);
  1916. WORD GetTableLockSpinCount() const;
  1917. void SetBucketLockSpinCount(WORD wSpins);
  1918. WORD GetBucketLockSpinCount() const;
  1919. enum {
  1920. SIGNATURE = (('L') | ('K' << 8) | ('H' << 16) | ('T' << 24)),
  1921. SIGNATURE_FREE = (('L') | ('K' << 8) | ('H' << 16) | ('x' << 24)),
  1922. };
  1923. bool
  1924. ValidSignature() const
  1925. { return m_dwSignature == SIGNATURE;}
  1926. // Is the hash table usable?
  1927. bool IsUsable() const
  1928. { return (m_lkrcState == LK_SUCCESS); }
  1929. void WriteLock();
  1930. void ReadLock() const;
  1931. void WriteUnlock() const;
  1932. void ReadUnlock() const;
  1933. bool IsWriteLocked() const;
  1934. bool IsReadLocked() const;
  1935. bool IsWriteUnlocked() const;
  1936. bool IsReadUnlocked() const;
  1937. void ConvertSharedToExclusive() const;
  1938. void ConvertExclusiveToShared() const;
  1939. // LKRHASH_ALLOCATOR_DEFINITIONS(CLKRHashTable);
  1940. #ifdef LKR_DEPRECATED_ITERATORS
  1941. public:
  1942. typedef SubTable::CIterator CLHTIterator;
  1943. class CIterator : public CLHTIterator
  1944. {
  1945. protected:
  1946. friend class CLKRHashTable;
  1947. CLKRHashTable* m_pht; // which hash table?
  1948. int m_ist; // which subtable
  1949. private:
  1950. // Private copy ctor and op= to prevent compiler synthesizing them.
  1951. // Must provide (bad) implementation because we export instantiations.
  1952. CIterator(const CIterator&);
  1953. CIterator& operator=(const CIterator&);
  1954. public:
  1955. CIterator(
  1956. LK_LOCKTYPE lkl=LKL_WRITELOCK)
  1957. : CLHTIterator(lkl),
  1958. m_pht(NULL),
  1959. m_ist(-1)
  1960. {}
  1961. const void* Record() const
  1962. {
  1963. IRTLASSERT(IsValid());
  1964. // This is a hack to work around a compiler bug. Calling
  1965. // CLHTIterator::Record calls this function recursively until
  1966. // the stack overflows.
  1967. const CLHTIterator* pBase = static_cast<const CLHTIterator*>(this);
  1968. return pBase->Record();
  1969. }
  1970. const DWORD_PTR Key() const
  1971. {
  1972. IRTLASSERT(IsValid());
  1973. const CLHTIterator* pBase = static_cast<const CLHTIterator*>(this);
  1974. return pBase->Key();
  1975. }
  1976. bool IsValid() const
  1977. {
  1978. const CLHTIterator* pBase = static_cast<const CLHTIterator*>(this);
  1979. return (m_pht != NULL && m_ist >= 0 && pBase->IsValid());
  1980. }
  1981. };
  1982. // Const iterators for readonly access
  1983. class CConstIterator : public CIterator
  1984. {
  1985. private:
  1986. // Private, unimplemented copy ctor and op= to prevent
  1987. // compiler synthesizing them.
  1988. CConstIterator(const CConstIterator&);
  1989. CConstIterator& operator=(const CConstIterator&);
  1990. public:
  1991. CConstIterator()
  1992. : CIterator(LKL_READLOCK)
  1993. {}
  1994. };
  1995. public:
  1996. LK_RETCODE InitializeIterator(CIterator* piter);
  1997. LK_RETCODE IncrementIterator(CIterator* piter);
  1998. LK_RETCODE CloseIterator(CIterator* piter);
  1999. LK_RETCODE InitializeIterator(CConstIterator* piter) const
  2000. {
  2001. IRTLASSERT(piter != NULL && piter->m_pht == NULL);
  2002. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  2003. if (piter == NULL || piter->m_pht != NULL
  2004. || piter->m_lkl == LKL_WRITELOCK)
  2005. return LK_BAD_ITERATOR;
  2006. return const_cast<CLKRHashTable*>(this)
  2007. ->InitializeIterator(static_cast<CIterator*>(piter));
  2008. }
  2009. LK_RETCODE IncrementIterator(CConstIterator* piter) const
  2010. {
  2011. IRTLASSERT(piter != NULL && piter->m_pht == this);
  2012. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  2013. if (piter == NULL || piter->m_pht != this
  2014. || piter->m_lkl == LKL_WRITELOCK)
  2015. return LK_BAD_ITERATOR;
  2016. return const_cast<CLKRHashTable*>(this)
  2017. ->IncrementIterator(static_cast<CIterator*>(piter));
  2018. }
  2019. LK_RETCODE CloseIterator(CConstIterator* piter) const
  2020. {
  2021. IRTLASSERT(piter != NULL && piter->m_pht == this);
  2022. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  2023. if (piter == NULL || piter->m_pht != this
  2024. || piter->m_lkl == LKL_WRITELOCK)
  2025. return LK_BAD_ITERATOR;
  2026. return const_cast<CLKRHashTable*>(this)
  2027. ->CloseIterator(static_cast<CIterator*>(piter));
  2028. };
  2029. #endif // LKR_DEPRECATED_ITERATORS
  2030. #ifdef LKR_STL_ITERATORS
  2031. private:
  2032. bool _IsValidIterator(const Iterator& riter) const
  2033. {
  2034. LKR_ITER_TRACE(_TEXT(" LKHT:_IsValidIterator(%p)\n"), &riter);
  2035. bool fValid = (riter.m_pht == this);
  2036. IRTLASSERT(fValid);
  2037. fValid = fValid && (0 <= riter.m_ist
  2038. && riter.m_ist < (int) m_cSubTables);
  2039. IRTLASSERT(fValid);
  2040. IRTLASSERT(_SubTableIndex(riter.m_subiter.m_plht) == riter.m_ist);
  2041. fValid = fValid && riter.IsValid();
  2042. IRTLASSERT(fValid);
  2043. return fValid;
  2044. }
  2045. public:
  2046. Iterator
  2047. Begin();
  2048. Iterator
  2049. End()
  2050. {
  2051. LKR_ITER_TRACE(_TEXT(" LKHT::End\n"));
  2052. return Iterator();
  2053. }
  2054. bool
  2055. Insert(
  2056. /* in */ const void* pvRecord,
  2057. /* out */ Iterator& riterResult,
  2058. /* in */ bool fOverwrite=false);
  2059. bool
  2060. Erase(
  2061. /* in,out */ Iterator& riter);
  2062. bool
  2063. Erase(
  2064. /*in*/ Iterator& riterFirst,
  2065. /*in*/ Iterator& riterLast);
  2066. bool
  2067. Find(
  2068. /* in */ DWORD_PTR pnKey,
  2069. /* out */ Iterator& riterResult);
  2070. bool
  2071. EqualRange(
  2072. /* in */ DWORD_PTR pnKey,
  2073. /* out */ Iterator& riterFirst, // inclusive
  2074. /* out */ Iterator& riterLast); // exclusive
  2075. #endif // LKR_STL_ITERATORS
  2076. }; // class CLKRHashTable
  2077. //--------------------------------------------------------------------
  2078. // A typesafe wrapper for CLKRHashTable (or CLKRLinearHashTable).
  2079. //
  2080. // * _Derived must derive from CTypedHashTable and provide certain member
  2081. // functions. It's needed for various downcasting operations. See
  2082. // CStringTestHashTable and CNumberTestHashTable below.
  2083. // * _Record is the type of the record. C{Linear}HashTable will store
  2084. // pointers to _Record.
  2085. // * _Key is the type of the key. _Key is used directly; i.e., it is
  2086. // not assumed to be a pointer type. C{Linear}HashTable assumes that
  2087. // the key is stored in the associated record. See the comments
  2088. // at the declaration of PFnExtractKey for more details.
  2089. //
  2090. // (optional parameters):
  2091. // * _BaseHashTable is the base hash table: CLKRHashTable or
  2092. /// CLKRLinearHashTable
  2093. // * _BaseIterator is the iterator type, _BaseHashTable::CIterator
  2094. //
  2095. // CTypedHashTable could derive directly from CLKRLinearHashTable, if you
  2096. // don't need the extra overhead of CLKRHashTable (which is quite low).
  2097. //
  2098. // You may need to add the following line to your code to disable
  2099. // warning messages about truncating extremly long identifiers.
  2100. // #pragma warning (disable : 4786)
  2101. //--------------------------------------------------------------------
  2102. #define LKRHASH_HACKY_CAST(T, pv) ((T) (UINT_PTR) (pv))
  2103. template < class _Derived, class _Record, class _Key,
  2104. class _BaseHashTable=CLKRHashTable
  2105. #ifdef LKR_DEPRECATED_ITERATORS
  2106. , class _BaseIterator=_BaseHashTable::CIterator
  2107. #endif // LKR_DEPRECATED_ITERATORS
  2108. >
  2109. class CTypedHashTable : public _BaseHashTable
  2110. {
  2111. public:
  2112. // convenient aliases
  2113. typedef _Derived Derived;
  2114. typedef _Record Record;
  2115. typedef _Key Key;
  2116. typedef _BaseHashTable BaseHashTable;
  2117. typedef CTypedHashTable<_Derived, _Record, _Key, _BaseHashTable
  2118. #ifdef LKR_DEPRECATED_ITERATORS
  2119. , _BaseIterator
  2120. #endif // LKR_DEPRECATED_ITERATORS
  2121. > HashTable;
  2122. #ifdef LKR_DEPRECATED_ITERATORS
  2123. typedef _BaseIterator BaseIterator;
  2124. #endif // LKR_DEPRECATED_ITERATORS
  2125. #ifdef LKR_APPLY_IF
  2126. // ApplyIf() and DeleteIf(): Does the record match the predicate?
  2127. // Note: takes a Record*, not a const Record*. You can modify the
  2128. // record in Pred() or Action(), if you like, but if you do, you
  2129. // should use LKL_WRITELOCK to lock the table.
  2130. typedef LK_PREDICATE (WINAPI *PFnRecordPred) (Record* pRec, void* pvState);
  2131. // Apply() et al: Perform action on record.
  2132. typedef LK_ACTION (WINAPI *PFnRecordAction)(Record* pRec, void* pvState);
  2133. #endif // LKR_APPLY_IF
  2134. private:
  2135. // Wrappers for the typesafe methods exposed by the derived class
  2136. static const DWORD_PTR WINAPI
  2137. _ExtractKey(const void* pvRecord)
  2138. {
  2139. const _Record* pRec = static_cast<const _Record*>(pvRecord);
  2140. _Key key = static_cast<_Key>(_Derived::ExtractKey(pRec));
  2141. // I would prefer to use reinterpret_cast here, but the stupid
  2142. // Win64 compiler thinks it knows better than I do.
  2143. return (const DWORD_PTR) (key);
  2144. }
  2145. static DWORD WINAPI
  2146. _CalcKeyHash(const DWORD_PTR pnKey)
  2147. {
  2148. _Key key = LKRHASH_HACKY_CAST(_Key, pnKey);
  2149. return _Derived::CalcKeyHash(key);
  2150. }
  2151. static bool WINAPI
  2152. _EqualKeys(const DWORD_PTR pnKey1, const DWORD_PTR pnKey2)
  2153. {
  2154. _Key key1 = LKRHASH_HACKY_CAST(_Key, pnKey1);
  2155. _Key key2 = LKRHASH_HACKY_CAST(_Key, pnKey2);
  2156. return _Derived::EqualKeys(key1, key2);
  2157. }
  2158. static void WINAPI
  2159. _AddRefRecord(const void* pvRecord, int nIncr)
  2160. {
  2161. _Record* pRec = static_cast<_Record*>(const_cast<void*>(pvRecord));
  2162. _Derived::AddRefRecord(pRec, nIncr);
  2163. }
  2164. #ifdef LKR_APPLY_IF
  2165. // Typesafe wrappers for Apply, ApplyIf, and DeleteIf.
  2166. class CState
  2167. {
  2168. public:
  2169. PFnRecordPred m_pfnPred;
  2170. PFnRecordAction m_pfnAction;
  2171. void* m_pvState;
  2172. CState(
  2173. PFnRecordPred pfnPred,
  2174. PFnRecordAction pfnAction,
  2175. void* pvState)
  2176. : m_pfnPred(pfnPred), m_pfnAction(pfnAction), m_pvState(pvState)
  2177. {}
  2178. };
  2179. static LK_PREDICATE WINAPI
  2180. _Pred(const void* pvRecord, void* pvState)
  2181. {
  2182. _Record* pRec = static_cast<_Record*>(const_cast<void*>(pvRecord));
  2183. CState* pState = static_cast<CState*>(pvState);
  2184. return (*pState->m_pfnPred)(pRec, pState->m_pvState);
  2185. }
  2186. static LK_ACTION WINAPI
  2187. _Action(const void* pvRecord, void* pvState)
  2188. {
  2189. _Record* pRec = static_cast<_Record*>(const_cast<void*>(pvRecord));
  2190. CState* pState = static_cast<CState*>(pvState);
  2191. return (*pState->m_pfnAction)(pRec, pState->m_pvState);
  2192. }
  2193. #endif // LKR_APPLY_IF
  2194. public:
  2195. CTypedHashTable(
  2196. LPCSTR pszName, // An identifier for debugging
  2197. double maxload=LK_DFLT_MAXLOAD, // Upperbound on avg chain len
  2198. DWORD initsize=LK_DFLT_INITSIZE, // Initial size of table: S/M/L
  2199. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS,// #subordinate hash tables.
  2200. bool fMultiKeys=false // Allow multiple identical keys?
  2201. )
  2202. : _BaseHashTable(pszName, _ExtractKey, _CalcKeyHash, _EqualKeys,
  2203. _AddRefRecord, maxload, initsize, num_subtbls,
  2204. fMultiKeys)
  2205. {
  2206. // Ensure that _Key is no bigger than a pointer. Because we
  2207. // support both numeric and pointer keys, the various casts
  2208. // in the member functions unfortunately silently truncate if
  2209. // _Key is an unacceptable numeric type, such as __int64 on x86.
  2210. STATIC_ASSERT(sizeof(_Key) <= sizeof(DWORD_PTR));
  2211. }
  2212. LK_RETCODE InsertRecord(const _Record* pRec, bool fOverwrite=false)
  2213. { return _BaseHashTable::InsertRecord(pRec, fOverwrite); }
  2214. LK_RETCODE DeleteKey(const _Key key)
  2215. {
  2216. const void* pvKey = reinterpret_cast<const void*>((DWORD_PTR)(key));
  2217. DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  2218. return _BaseHashTable::DeleteKey(pnKey);
  2219. }
  2220. LK_RETCODE DeleteRecord(const _Record* pRec)
  2221. { return _BaseHashTable::DeleteRecord(pRec);}
  2222. // Note: returns a _Record**, not a const Record**. Note that you
  2223. // can use a const type for the template parameter to ensure constness.
  2224. LK_RETCODE FindKey(const _Key key, _Record** ppRec) const
  2225. {
  2226. if (ppRec == NULL)
  2227. return LK_BAD_RECORD;
  2228. *ppRec = NULL;
  2229. const void* pvRec = NULL;
  2230. const void* pvKey = reinterpret_cast<const void*>((DWORD_PTR)(key));
  2231. DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  2232. LK_RETCODE lkrc = _BaseHashTable::FindKey(pnKey, &pvRec);
  2233. *ppRec = static_cast<_Record*>(const_cast<void*>(pvRec));
  2234. return lkrc;
  2235. }
  2236. LK_RETCODE FindRecord(const _Record* pRec) const
  2237. { return _BaseHashTable::FindRecord(pRec);}
  2238. // Other C{Linear}HashTable methods can be exposed without change
  2239. #ifdef LKR_APPLY_IF
  2240. public:
  2241. // Typesafe wrappers for Apply et al
  2242. DWORD Apply(PFnRecordAction pfnAction,
  2243. void* pvState=NULL,
  2244. LK_LOCKTYPE lkl=LKL_READLOCK)
  2245. {
  2246. IRTLASSERT(pfnAction != NULL);
  2247. if (pfnAction == NULL)
  2248. return 0;
  2249. CState state(NULL, pfnAction, pvState);
  2250. return _BaseHashTable::Apply(_Action, &state, lkl);
  2251. }
  2252. DWORD ApplyIf(PFnRecordPred pfnPredicate,
  2253. PFnRecordAction pfnAction,
  2254. void* pvState=NULL,
  2255. LK_LOCKTYPE lkl=LKL_READLOCK)
  2256. {
  2257. IRTLASSERT(pfnPredicate != NULL && pfnAction != NULL);
  2258. if (pfnPredicate == NULL || pfnAction == NULL)
  2259. return 0;
  2260. CState state(pfnPredicate, pfnAction, pvState);
  2261. return _BaseHashTable::ApplyIf(_Pred, _Action, &state, lkl);
  2262. }
  2263. DWORD DeleteIf(PFnRecordPred pfnPredicate, void* pvState=NULL)
  2264. {
  2265. IRTLASSERT(pfnPredicate != NULL);
  2266. if (pfnPredicate == NULL)
  2267. return 0;
  2268. CState state(pfnPredicate, NULL, pvState);
  2269. return _BaseHashTable::DeleteIf(_Pred, &state);
  2270. }
  2271. #endif // LKR_APPLY_IF
  2272. #ifdef LKR_DEPRECATED_ITERATORS
  2273. // Typesafe wrappers for iterators
  2274. class CIterator : public _BaseIterator
  2275. {
  2276. private:
  2277. // Private, unimplemented copy ctor and op= to prevent
  2278. // compiler synthesizing them.
  2279. CIterator(const CIterator&);
  2280. CIterator& operator=(const CIterator&);
  2281. public:
  2282. CIterator(
  2283. LK_LOCKTYPE lkl=LKL_WRITELOCK)
  2284. : _BaseIterator(lkl)
  2285. {}
  2286. _Record* Record() const
  2287. {
  2288. const _BaseIterator* pBase = static_cast<const _BaseIterator*>(this);
  2289. return reinterpret_cast<_Record*>(const_cast<void*>(
  2290. pBase->Record()));
  2291. }
  2292. _Key Key() const
  2293. {
  2294. const _BaseIterator* pBase = static_cast<const _BaseIterator*>(this);
  2295. return reinterpret_cast<_Key>(reinterpret_cast<void*>(pBase->Key()));
  2296. }
  2297. };
  2298. // readonly iterator
  2299. class CConstIterator : public CIterator
  2300. {
  2301. private:
  2302. // Private, unimplemented copy ctor and op= to prevent
  2303. // compiler synthesizing them.
  2304. CConstIterator(const CConstIterator&);
  2305. CConstIterator& operator=(const CConstIterator&);
  2306. public:
  2307. CConstIterator()
  2308. : CIterator(LKL_READLOCK)
  2309. {}
  2310. const _Record* Record() const
  2311. {
  2312. return CIterator::Record();
  2313. }
  2314. const _Key Key() const
  2315. {
  2316. return CIterator::Key();
  2317. }
  2318. };
  2319. public:
  2320. LK_RETCODE InitializeIterator(CIterator* piter)
  2321. {
  2322. return _BaseHashTable::InitializeIterator(piter);
  2323. }
  2324. LK_RETCODE IncrementIterator(CIterator* piter)
  2325. {
  2326. return _BaseHashTable::IncrementIterator(piter);
  2327. }
  2328. LK_RETCODE CloseIterator(CIterator* piter)
  2329. {
  2330. return _BaseHashTable::CloseIterator(piter);
  2331. }
  2332. LK_RETCODE InitializeIterator(CConstIterator* piter) const
  2333. {
  2334. return const_cast<HashTable*>(this)
  2335. ->InitializeIterator(static_cast<CIterator*>(piter));
  2336. }
  2337. LK_RETCODE IncrementIterator(CConstIterator* piter) const
  2338. {
  2339. return const_cast<HashTable*>(this)
  2340. ->IncrementIterator(static_cast<CIterator*>(piter));
  2341. }
  2342. LK_RETCODE CloseIterator(CConstIterator* piter) const
  2343. {
  2344. return const_cast<HashTable*>(this)
  2345. ->CloseIterator(static_cast<CIterator*>(piter));
  2346. }
  2347. #endif // LKR_DEPRECATED_ITERATORS
  2348. #ifdef LKR_STL_ITERATORS
  2349. // TODO: const_iterator
  2350. public:
  2351. class iterator
  2352. {
  2353. friend class CTypedHashTable<_Derived, _Record, _Key,
  2354. _BaseHashTable
  2355. #ifdef LKR_DEPRECATED_ITERATORS
  2356. , _BaseIterator
  2357. #endif // LKR_DEPRECATED_ITERATORS
  2358. >;
  2359. protected:
  2360. typename _BaseHashTable::Iterator m_iter;
  2361. iterator(
  2362. typename _BaseHashTable::Iterator& rhs)
  2363. : m_iter(rhs)
  2364. {
  2365. LKR_ITER_TRACE(_TEXT("Typed::prot ctor, this=%p, rhs=%p\n"),
  2366. this, &rhs);
  2367. }
  2368. public:
  2369. typedef std::forward_iterator_tag iterator_category;
  2370. typedef _Record value_type;
  2371. typedef ptrdiff_t difference_type;
  2372. typedef size_t size_type;
  2373. typedef value_type& reference;
  2374. typedef value_type* pointer;
  2375. iterator()
  2376. : m_iter()
  2377. {
  2378. LKR_ITER_TRACE(_TEXT("Typed::default ctor, this=%p\n"), this);
  2379. }
  2380. iterator(
  2381. const iterator& rhs)
  2382. : m_iter(rhs.m_iter)
  2383. {
  2384. LKR_ITER_TRACE(_TEXT("Typed::copy ctor, this=%p, rhs=%p\n"),
  2385. this, &rhs);
  2386. }
  2387. iterator& operator=(
  2388. const iterator& rhs)
  2389. {
  2390. LKR_ITER_TRACE(_TEXT("Typed::operator=, this=%p, rhs=%p\n"),
  2391. this, &rhs);
  2392. m_iter = rhs.m_iter;
  2393. return *this;
  2394. }
  2395. ~iterator()
  2396. {
  2397. LKR_ITER_TRACE(_TEXT("Typed::dtor, this=%p\n"), this);
  2398. }
  2399. reference operator*() const
  2400. {
  2401. void* pvRecord = const_cast<void*>(m_iter.Record());
  2402. return reinterpret_cast<reference>(pvRecord);
  2403. }
  2404. pointer operator->() const { return &(operator*()); }
  2405. // pre-increment
  2406. iterator& operator++()
  2407. {
  2408. LKR_ITER_TRACE(_TEXT("Typed::pre-increment, this=%p\n"), this);
  2409. m_iter.Increment();
  2410. return *this;
  2411. }
  2412. // post-increment
  2413. iterator operator++(int)
  2414. {
  2415. LKR_ITER_TRACE(_TEXT("Typed::post-increment, this=%p\n"), this);
  2416. iterator iterPrev = *this;
  2417. m_iter.Increment();
  2418. return iterPrev;
  2419. }
  2420. bool operator==(
  2421. const iterator& rhs) const
  2422. {
  2423. LKR_ITER_TRACE(_TEXT("Typed::operator==, this=%p, rhs=%p\n"),
  2424. this, &rhs);
  2425. return m_iter == rhs.m_iter;
  2426. }
  2427. bool operator!=(
  2428. const iterator& rhs) const
  2429. {
  2430. LKR_ITER_TRACE(_TEXT("Typed::operator!=, this=%p, rhs=%p\n"),
  2431. this, &rhs);
  2432. return m_iter != rhs.m_iter;
  2433. }
  2434. _Record* Record() const
  2435. {
  2436. LKR_ITER_TRACE(_TEXT("Typed::Record, this=%p\n"), this);
  2437. return reinterpret_cast<_Record*>(
  2438. const_cast<void*>(m_iter.Record()));
  2439. }
  2440. _Key Key() const
  2441. {
  2442. LKR_ITER_TRACE(_TEXT("Typed::Key, this=%p\n"), this);
  2443. return reinterpret_cast<_Key>(
  2444. reinterpret_cast<void*>(m_iter.Key()));
  2445. }
  2446. }; // class iterator
  2447. // Return iterator pointing to first item in table
  2448. iterator begin()
  2449. {
  2450. LKR_ITER_TRACE(_TEXT("Typed::begin()\n"));
  2451. return iterator(_BaseHashTable::Begin());
  2452. }
  2453. // Return a one-past-the-end iterator. Always empty.
  2454. iterator end()
  2455. {
  2456. LKR_ITER_TRACE(_TEXT("Typed::end()\n"));
  2457. return iterator(_BaseHashTable::End());
  2458. }
  2459. template <class _InputIterator>
  2460. CTypedHashTable(
  2461. LPCSTR pszName, // An identifier for debugging
  2462. _InputIterator f, // first element in range
  2463. _InputIterator l, // one-beyond-last element
  2464. double maxload=LK_DFLT_MAXLOAD, // Upperbound on avg chain len
  2465. DWORD initsize=LK_DFLT_INITSIZE, // Initial size of table: S/M/L
  2466. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS,// #subordinate hash tables.
  2467. bool fMultiKeys=false // Allow multiple identical keys?
  2468. )
  2469. : _BaseHashTable(pszName, _ExtractKey, _CalcKeyHash, _EqualKeys,
  2470. _AddRefRecord, maxload, initsize, num_subtbls,
  2471. fMultiKeys)
  2472. {
  2473. insert(f, l);
  2474. }
  2475. template <class _InputIterator>
  2476. void insert(_InputIterator f, _InputIterator l)
  2477. {
  2478. for ( ; f != l; ++f)
  2479. InsertRecord(&(*f));
  2480. }
  2481. bool
  2482. Insert(
  2483. const _Record* pRecord,
  2484. iterator& riterResult,
  2485. bool fOverwrite=false)
  2486. {
  2487. LKR_ITER_TRACE(_TEXT("Typed::Insert\n"));
  2488. return _BaseHashTable::Insert(pRecord, riterResult.m_iter, fOverwrite);
  2489. }
  2490. bool
  2491. Erase(
  2492. iterator& riter)
  2493. {
  2494. LKR_ITER_TRACE(_TEXT("Typed::Erase\n"));
  2495. return _BaseHashTable::Erase(riter.m_iter);
  2496. }
  2497. bool
  2498. Erase(
  2499. iterator& riterFirst,
  2500. iterator& riterLast)
  2501. {
  2502. LKR_ITER_TRACE(_TEXT("Typed::Erase2\n"));
  2503. return _BaseHashTable::Erase(riterFirst.m_iter, riterLast.m_iter);
  2504. }
  2505. bool
  2506. Find(
  2507. const _Key key,
  2508. iterator& riterResult)
  2509. {
  2510. LKR_ITER_TRACE(_TEXT("Typed::Find\n"));
  2511. const void* pvKey = reinterpret_cast<const void*>((DWORD_PTR)(key));
  2512. DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  2513. return _BaseHashTable::Find(pnKey, riterResult.m_iter);
  2514. }
  2515. bool
  2516. EqualRange(
  2517. const _Key key,
  2518. iterator& riterFirst,
  2519. iterator& riterLast)
  2520. {
  2521. LKR_ITER_TRACE(_TEXT("Typed::EqualRange\n"));
  2522. const void* pvKey = reinterpret_cast<const void*>((DWORD_PTR)(key));
  2523. DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  2524. return _BaseHashTable::EqualRange(pnKey, riterFirst.m_iter,
  2525. riterLast.m_iter);
  2526. }
  2527. // The iterator functions for an STL hash_(|multi)_(set|map)
  2528. //
  2529. // Value type of a Pair-Associative Container is
  2530. // pair<const key_type, mapped_type>
  2531. //
  2532. // pair<iterator,bool> insert(const value_type& x);
  2533. //
  2534. // void erase(iterator pos);
  2535. // void erase(iterator f, iterator l);
  2536. //
  2537. // iterator find(const key_type& k) [const];
  2538. // const_iterator find(const key_type& k) const;
  2539. //
  2540. // pair<iterator,iterator> equal_range(const key_type& k) [const];
  2541. // pair<const_iterator,const_iterator> equal_range(const key_type& k) const
  2542. #endif // LKR_STL_ITERATORS
  2543. };
  2544. #ifndef __LKRHASH_NO_NAMESPACE__
  2545. }
  2546. #endif // !__LKRHASH_NO_NAMESPACE__
  2547. #endif // __LKRHASH_H__