Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3391 lines
111 KiB

  1. /*++
  2. Copyright (c) 1997-2002 Microsoft Corporation
  3. Module Name :
  4. LKRhash.h
  5. Abstract:
  6. Declares LKRhash: a fast, scalable, cache- and MP-friendly hash table
  7. Author:
  8. Paul (Per-Ake) Larson, PALarson@microsoft.com, July 1997
  9. Murali R. Krishnan (MuraliK)
  10. George V. Reilly (GeorgeRe) 06-Jan-1998
  11. Environment:
  12. Win32 - User Mode
  13. Project:
  14. LKRhash
  15. Revision History:
  16. 10/01/1998 - Change name from LKhash to LKRhash
  17. 10/2000 - Port to kernel mode
  18. --*/
  19. #ifndef __LKRHASH_H__
  20. #define __LKRHASH_H__
  21. #ifndef __LKR_HASH_H__
  22. // external definitions
  23. # include <LKR-hash.h>
  24. #endif // !__LKR_HASH_H__
  25. #ifndef __IRTLDBG_H__
  26. # include <IrtlDbg.h>
  27. #endif // !__IRTLDBG_H__
  28. #ifndef LKR_NO_GLOBAL_LIST
  29. # ifndef __LSTENTRY_H__
  30. # include <LstEntry.h>
  31. # endif // !__LSTENTRY_H__
  32. #else // LKR_NO_GLOBAL_LIST
  33. # ifndef __LOCKS_H__
  34. # include <Locks.h>
  35. # endif // !__LOCKS_H__
  36. #endif // LKR_NO_GLOBAL_LIST
  37. #ifndef __HASHFN_H__
  38. # include <HashFn.h>
  39. #endif // !__HASHFN_H__
  40. // Disable old-style deprecated iterators, by default
  41. #ifndef LKR_DEPRECATED_ITERATORS
  42. # define LKR_NO_DEPRECATED_ITERATORS
  43. #endif // !LKR_DEPRECATED_ITERATORS
  44. #ifndef LKR_NO_DEPRECATED_ITERATORS
  45. # undef LKR_DEPRECATED_ITERATORS
  46. # define LKR_DEPRECATED_ITERATORS 1
  47. #endif // !LKR_NO_DEPRECATED_ITERATORS
  48. #undef LKR_COUNTDOWN
  49. // Is bucket locking enabled? If not, then the table lock must be held
  50. // for longer, but that may be cheaper
  51. #define LKR_USE_BUCKET_LOCKS
  52. #define LKR_ALLOW_NULL_RECORDS
  53. // #define __LKRHASH_NO_NAMESPACE__
  54. // #define __HASHFN_NO_NAMESPACE__
  55. // #define LKR_TABLE_LOCK CReaderWriterLock3
  56. // #define LKR_BUCKET_LOCK CSmallSpinLock
  57. #ifndef LKR_TABLE_LOCK
  58. # if defined(LKR_EXPOSED_TABLE_LOCK) || defined(LKR_DEPRECATED_ITERATORS)
  59. // need recursive writelocks
  60. # define LKR_TABLE_LOCK CReaderWriterLock4
  61. # else
  62. // use non-recursive writelocks
  63. # define LKR_TABLE_LOCK CReaderWriterLock2
  64. # endif
  65. #endif // !LKR_TABLE_LOCK
  66. #ifndef LKR_BUCKET_LOCK
  67. # ifndef LKR_USE_BUCKET_LOCKS
  68. # define LKR_BUCKET_LOCK CFakeLock
  69. # elif defined(LKR_DEPRECATED_ITERATORS)
  70. # define LKR_BUCKET_LOCK CReaderWriterLock3
  71. # else // !LKR_DEPRECATED_ITERATORS
  72. # define LKR_BUCKET_LOCK CSmallSpinLock
  73. # endif // !LKR_DEPRECATED_ITERATORS
  74. #endif // !LKR_BUCKET_LOCK
  75. #ifdef IRTLDEBUG
  76. # define LKR_ALLOC_STATS
  77. # define LKR_OPS_STATS
  78. #endif // IRTLDEBUG
  79. //=====================================================================
  80. // The class CLKRLinearHashTable defined in this file provides dynamic hash
  81. // tables, i.e. tables that grow and shrink dynamically with
  82. // the number of records in the table.
  83. // The basic method used is linear hashing, as explained in:
  84. //
  85. // P.-A. Larson, Dynamic Hash Tables, Comm. of the ACM, 31, 4 (1988)
  86. //
  87. // This version has the following characteristics:
  88. // - It is thread-safe and uses spin locks for synchronization.
  89. // - It was designed to support very high rates of concurrent
  90. // operations (inserts/deletes/lookups). It achieves this by
  91. // (a) partitioning a CLKRHashTable into a collection of
  92. // CLKRLinearHashTables to reduce contention on the global table lock.
  93. // (b) minimizing the hold time on a table lock, preferring to lock
  94. // down a bucket chain instead.
  95. // - The design is L1 cache-conscious. See CNodeClump.
  96. // - It is designed for sets varying in size from a dozen
  97. // elements to several million.
  98. //
  99. // Main classes:
  100. // CLKRLinearHashTable: thread-safe linear hash table
  101. // CLKRHashTable: collection of CLKRLinearHashTables
  102. // CTypedHashTable: typesafe wrapper for CLKRHashTable
  103. //
  104. //
  105. // Paul Larson, [email protected], July 1997
  106. // Original implementation with input from Murali R. Krishnan,
  107. // [email protected].
  108. //
  109. // George V. Reilly, [email protected], Dec 1997-Jan 1998
  110. // Massive cleanup and rewrite. Added templates.
  111. //=====================================================================
  112. // 1) Linear Hashing
  113. // ------------------
  114. //
  115. // Linear hash tables grow and shrink dynamically with the number of
  116. // records in the table. The growth or shrinkage is smooth: logically,
  117. // one bucket at a time but physically in larger increments
  118. // (64 buckets). An insertion (deletion) may cause an expansion
  119. // (contraction) of the table. This causes relocation of a small number
  120. // of records (at most one bucket worth). All operations (insert,
  121. // delete, lookup) take constant expected time, regardless of the
  122. // current size or the growth of the table.
  123. //
  124. // 2) LKR extensions to Linear hash table
  125. // --------------------------------------
  126. //
  127. // Larson-Krishnan-Reilly extensions to Linear hash tables for multiprocessor
  128. // scalability and improved cache performance.
  129. //
  130. // Traditional implementations of linear hash tables use one global lock
  131. // to prevent interference between concurrent operations
  132. // (insert/delete/lookup) on the table. The single lock easily becomes
  133. // the bottleneck in SMP scenarios when multiple threads are used.
  134. //
  135. // Traditionally, a (hash) bucket is implemented as a chain of
  136. // single-item nodes. Every operation results in chasing down a chain
  137. // looking for an item. However, pointer chasing is very slow on modern
  138. // systems because almost every jump results in a cache miss. L2 (or L3)
  139. // cache misses are very expensive in missed CPU cycles and the cost is
  140. // increasing (going to 100s of cycles in the future).
  141. //
  142. // LKR extensions offer
  143. // 1) Partitioning (by hashing) of records among multiple subtables.
  144. // Each subtable has locks but there is no global lock. Each
  145. // subtable receives a much lower rate of operations, resulting in
  146. // fewer conflicts.
  147. //
  148. // 2) Improved cache locality by grouping keys and their hash values
  149. // into contigous chunks that fit exactly into one (or a few)
  150. // cache lines.
  151. //
  152. // Specifically the implementation that exists here achieves this using
  153. // the following techniques.
  154. //
  155. // Class CLKRHashTable is the top-level data structure that dynamically
  156. // creates m_cSubTables linear hash subtables. The CLKRLinearHashTables act as
  157. // the subtables to which items and accesses are fanned out. A good
  158. // hash function multiplexes requests uniformly to various subtables,
  159. // thus minimizing traffic to any single subtable. The implemenation
  160. // uses a home-grown version of bounded spinlocks, that is, a thread
  161. // does not spin on a lock indefinitely, instead yielding after a
  162. // predetermined number of loops.
  163. //
  164. // Each CLKRLinearHashTable consists of a directory (growable array) of
  165. // segments, each holding m_nSegSize CBuckets. Each CBucket in turn consists
  166. // of a chain of CNodeClumps. Each CNodeClump contains a group of
  167. // NODES_PER_CLUMP hash values (aka hash keys or signatures) and
  168. // pointers to the associated data items. Keeping the signatures
  169. // together increases the cache locality in scans for lookup.
  170. //
  171. // Traditionally, people store a link-list element right inside the
  172. // object that is hashed and use this link-list for the chaining of data
  173. // blocks. However, keeping just the pointers to the data object and
  174. // not chaining through them limits the need for bringing in the data
  175. // object to the cache. We need to access the data object only if the
  176. // hash values match. This limits the cache-thrashing behaviour
  177. // exhibited by conventional implementations. It has the additional
  178. // benefit that the objects themselves do not need to be modified
  179. // in order to be collected in the hash subtable (i.e., it's non-invasive).
  180. #ifdef LKR_STL_ITERATORS
  181. // needed for std::forward_iterator_tag, etc
  182. # include <iterator>
  183. // The iterators have very verbose tracing. Don't want it on all the time
  184. // in debug builds.
  185. # if defined(IRTLDEBUG) && (LKR_STL_ITERATORS >= 2)
  186. # define LKR_ITER_TRACE IrtlTrace
  187. # else // !defined(IRTLDEBUG) || LKR_STL_ITERATORS < 2
  188. # define LKR_ITER_TRACE 1 ? (void)0 : IrtlTrace
  189. # endif // !defined(IRTLDEBUG) || LKR_STL_ITERATORS < 2
  190. #endif // LKR_STL_ITERATORS
  191. //--------------------------------------------------------------------
  192. // Default values for the hashtable constructors
  193. enum {
  194. #ifdef _WIN64
  195. LK_DFLT_MAXLOAD= 4, // 64-byte nodes => NODES_PER_CLUMP = 4
  196. #else
  197. LK_DFLT_MAXLOAD= 7, // Default upperbound on average chain length.
  198. #endif
  199. LK_DFLT_INITSIZE=LK_MEDIUM_TABLESIZE, // Default initial size of hash table
  200. LK_DFLT_NUM_SUBTBLS= 0, // Use a heuristic to choose #subtables
  201. };
  202. /*--------------------------------------------------------------------
  203. * Undocumented additional creation flag parameters to LKR_CreateTable
  204. */
  205. enum {
  206. LK_CREATE_NON_PAGED_ALLOCS = 0x1000, // Use paged or NP pool in kernel
  207. };
  208. //--------------------------------------------------------------------
  209. // Custom memory allocators (optional)
  210. //--------------------------------------------------------------------
  211. #if !defined(LKR_NO_ALLOCATORS) && !defined(LKRHASH_KERNEL_MODE)
  212. // # define LKRHASH_ACACHE 1
  213. // # define LKRHASH_ROCKALL_FAST 1
  214. #endif // !LKR_NO_ALLOCATORS && !LKRHASH_KERNEL_MODE
  215. #if defined(LKRHASH_ACACHE)
  216. # include <acache.hxx>
  217. class ACache : public ALLOC_CACHE_HANDLER
  218. {
  219. private:
  220. SIZE_T m_cb;
  221. public:
  222. ACache(IN LPCSTR pszName, IN const ALLOC_CACHE_CONFIGURATION* pacConfig)
  223. : ALLOC_CACHE_HANDLER(pszName, pacConfig),
  224. m_cb(m_acConfig.cbSize)
  225. {}
  226. SIZE_T ByteSize() const
  227. {
  228. return m_cb;
  229. }
  230. static const TCHAR* ClassName() {return _TEXT("ACache");}
  231. }; // class ACache
  232. typedef ACache CLKRhashAllocator;
  233. # define LKRHASH_ALLOCATOR_NEW(_C, N, Tag) \
  234. const ALLOC_CACHE_CONFIGURATION acc = { 1, N, sizeof(_C) }; \
  235. _C::sm_palloc = new ACache("LKRhash:" #_C, &acc);
  236. #elif defined(LKRHASH_ROCKALL_FAST)
  237. # include <FastHeap.hpp>
  238. class FastHeap : public FAST_HEAP
  239. {
  240. private:
  241. SIZE_T m_cb;
  242. public:
  243. FastHeap(SIZE_T cb)
  244. : m_cb(cb)
  245. {}
  246. LPVOID Alloc()
  247. { return New(m_cb, NULL, false); }
  248. BOOL Free(LPVOID pvMem)
  249. { return Delete(pvMem); }
  250. SIZE_T ByteSize() const
  251. {
  252. return m_cb;
  253. }
  254. static const TCHAR* ClassName() {return _TEXT("FastHeap");}
  255. }; // class FastHeap
  256. typedef FastHeap CLKRhashAllocator;
  257. # define LKRHASH_ALLOCATOR_NEW(_C, N, Tag) \
  258. _C::sm_palloc = new FastHeap(sizeof(_C))
  259. #endif // LKRHASH_ROCKALL_FAST
  260. #ifdef LKRHASH_ALLOCATOR_NEW
  261. // placed inline in the declaration of class _C
  262. # define LKRHASH_ALLOCATOR_DEFINITIONS(_C) \
  263. protected: \
  264. static CLKRhashAllocator* sm_palloc; \
  265. public: \
  266. friend class CLKRLinearHashTable; \
  267. \
  268. static void* operator new(size_t s) \
  269. { \
  270. UNREFERENCED_PARAMETER(s); \
  271. IRTLASSERT(s == sizeof(_C)); \
  272. IRTLASSERT(sm_palloc != NULL); \
  273. return sm_palloc->Alloc(); \
  274. } \
  275. static void operator delete(void* pv) \
  276. { \
  277. IRTLASSERT(pv != NULL); \
  278. IRTLASSERT(sm_palloc != NULL); \
  279. sm_palloc->Free(pv); \
  280. }
  281. // used in LKR_Initialize()
  282. # define LKRHASH_ALLOCATOR_INIT(_C, N, Tag, f) \
  283. { \
  284. if (f) \
  285. { \
  286. IRTLASSERT(_C::sm_palloc == NULL); \
  287. LKRHASH_ALLOCATOR_NEW(_C, N, Tag); \
  288. f = (_C::sm_palloc != NULL); \
  289. } \
  290. }
  291. // used in LKR_Terminate()
  292. # define LKRHASH_ALLOCATOR_UNINIT(_C) \
  293. { \
  294. if (_C::sm_palloc != NULL) \
  295. { \
  296. delete _C::sm_palloc; \
  297. _C::sm_palloc = NULL; \
  298. } \
  299. }
  300. #else // !LKRHASH_ALLOCATOR_NEW
  301. # define LKRHASH_ALLOCATOR_DEFINITIONS(_C)
  302. # define LKRHASH_ALLOCATOR_INIT(_C, N, Tag, f)
  303. # define LKRHASH_ALLOCATOR_UNINIT(_C)
  304. class CLKRhashAllocator
  305. {
  306. public:
  307. static const TCHAR* ClassName() {return _TEXT("global new");}
  308. };
  309. #endif // !LKRHASH_ALLOCATOR_NEW
  310. #define LKRHASH_CLASS_INIT_DECLS(_C) \
  311. private: \
  312. /* class-wide initialization and termination */ \
  313. static int _Initialize(DWORD dwFlags); \
  314. static void _Terminate(); \
  315. \
  316. friend int ::LKR_Initialize(DWORD dwInitFlags); \
  317. friend void ::LKR_Terminate()
  318. #ifndef __LKRHASH_NO_NAMESPACE__
  319. namespace LKRhash {
  320. #endif // !__LKRHASH_NO_NAMESPACE__
  321. //--------------------------------------------------------------------
  322. // forward declarations
  323. class IRTL_DLLEXP CLKRLinearHashTable;
  324. class IRTL_DLLEXP CLKRHashTable;
  325. template <class _Der, class _Rcd, class _Ky, bool _fDRC, class _HT
  326. #ifdef LKR_DEPRECATED_ITERATORS
  327. , class _Iter
  328. #endif // LKR_DEPRECATED_ITERATORS
  329. >
  330. class CTypedHashTable;
  331. class CNodeClump;
  332. typedef CNodeClump* PNodeClump;
  333. class CBucket;
  334. typedef CBucket* PBucket;
  335. class CSegment;
  336. typedef CSegment* PSegment;
  337. class IRTL_DLLEXP CLKRHashTableStats;
  338. //--------------------------------------------------------------------
  339. // Statistical information returned by GetStatistics
  340. //--------------------------------------------------------------------
  341. #ifdef LOCK_INSTRUMENTATION
  342. class IRTL_DLLEXP CAveragedLockStats : public CLockStatistics
  343. {
  344. public:
  345. int m_nItems;
  346. CAveragedLockStats();
  347. }; // class CAveragedLockStats
  348. #endif // LOCK_INSTRUMENTATION
  349. #ifndef LKRHASH_KERNEL_MODE
  350. class IRTL_DLLEXP CLKRHashTableStats
  351. {
  352. public:
  353. int RecordCount; // number of records in the table
  354. int TableSize; // table size in number of slots
  355. int DirectorySize; // number of entries in directory
  356. int LongestChain; // longest hash chain in the table
  357. int EmptySlots; // number of unused hash slots
  358. double SplitFactor; // fraction of buckets split
  359. double AvgSearchLength; // average length of a successful search
  360. double ExpSearchLength; // theoretically expected length
  361. double AvgUSearchLength; // average length of an unsuccessful search
  362. double ExpUSearchLength; // theoretically expected length
  363. int NodeClumpSize; // number of slots in a node clump
  364. int CBucketSize; // sizeof(CBucket)
  365. #ifdef LOCK_INSTRUMENTATION
  366. CAveragedLockStats m_alsTable; // stats for table lock
  367. CAveragedLockStats m_alsBucketsAvg; // avg of stats for bucket locks
  368. CGlobalLockStatistics m_gls; // global statistics for all locks
  369. #endif // LOCK_INSTRUMENTATION
  370. enum {
  371. MAX_BUCKETS = 40,
  372. };
  373. // histogram of bucket lengths
  374. LONG m_aBucketLenHistogram[MAX_BUCKETS];
  375. CLKRHashTableStats();
  376. static const LONG*
  377. BucketSizes();
  378. static LONG
  379. BucketSize(
  380. LONG nBucketIndex);
  381. static LONG
  382. BucketIndex(
  383. LONG nBucketLength);
  384. }; // class CLKRHashTableStats
  385. #endif // !LKRHASH_KERNEL_MODE
  386. //--------------------------------------------------------------------
  387. // Keep some statistics about allocations/frees and about various operations
  388. #ifdef LKR_ALLOC_STATS
  389. # define DECLARE_ALLOC_STAT(Type) \
  390. mutable LONG m_c##Type##Allocs; \
  391. mutable LONG m_c##Type##Frees; \
  392. static LONG sm_c##Type##Allocs; \
  393. static LONG sm_c##Type##Frees
  394. # define DECLARE_CLASS_ALLOC_STAT_STORAGE(Class, Type) \
  395. LONG LKRHASH_NS::Class::sm_c##Type##Allocs; \
  396. LONG LKRHASH_NS::Class::sm_c##Type##Frees
  397. # define INIT_ALLOC_STAT(Type) \
  398. m_c##Type##Allocs = m_c##Type##Frees = 0
  399. # define INIT_CLASS_ALLOC_STAT(Class, Type) \
  400. LKRHASH_NS::Class::sm_c##Type##Allocs = 0; \
  401. LKRHASH_NS::Class::sm_c##Type##Frees = 0
  402. # define INCREMENT_ALLOC_STAT(Type) \
  403. InterlockedIncrement(&m_c##Type##Allocs); \
  404. InterlockedIncrement(&sm_c##Type##Allocs)
  405. # define INCREMENT_FREE_STAT(Type) \
  406. InterlockedIncrement(&m_c##Type##Frees); \
  407. InterlockedIncrement(&sm_c##Type##Frees)
  408. # define VALIDATE_DUMP_ALLOC_STAT(Type) \
  409. IRTLASSERT(m_c##Type##Allocs == m_c##Type##Frees); \
  410. IRTLTRACE(_TEXT(#Type) _TEXT(": Allocs=%ld, Frees=%ld\n"), \
  411. m_c##Type##Allocs, m_c##Type##Frees)
  412. # define VALIDATE_DUMP_CLASS_ALLOC_STAT(Class, Type) \
  413. IRTLASSERT(LKRHASH_NS::Class::sm_c##Type##Allocs \
  414. == LKRHASH_NS::Class::sm_c##Type##Frees); \
  415. IRTLTRACE(_TEXT("Global ") _TEXT(#Type) \
  416. _TEXT(": Allocs=%ld, Frees=%ld\n"), \
  417. LKRHASH_NS::Class::sm_c##Type##Allocs, \
  418. LKRHASH_NS::Class::sm_c##Type##Frees)
  419. #else // !LKR_ALLOC_STATS
  420. # define DECLARE_ALLOC_STAT(Type)
  421. # define DECLARE_CLASS_ALLOC_STAT_STORAGE(Class, Type)
  422. # define INIT_ALLOC_STAT(Type) ((void) 0)
  423. # define INIT_CLASS_ALLOC_STAT(Class, Type) ((void) 0)
  424. # define INCREMENT_ALLOC_STAT(Type) ((void) 0)
  425. # define INCREMENT_FREE_STAT(Type) ((void) 0)
  426. # define VALIDATE_DUMP_ALLOC_STAT(Type) ((void) 0)
  427. # define VALIDATE_DUMP_CLASS_ALLOC_STAT(Class, Type) ((void) 0)
  428. #endif // !LKR_ALLOC_STATS
  429. // Statistics on different kinds of operations
  430. #ifdef LKR_OPS_STATS
  431. # define DECLARE_OP_STAT(Type) \
  432. mutable LONG m_c##Type##Ops; \
  433. static LONG sm_c##Type##Ops
  434. # define DECLARE_CLASS_OP_STAT_STORAGE(Class, Type) \
  435. LONG LKRHASH_NS::Class::sm_c##Type##Ops
  436. # define INIT_OP_STAT(Type) \
  437. m_c##Type##Ops = 0
  438. # define INIT_CLASS_OP_STAT(Class, Type) \
  439. LKRHASH_NS::Class::sm_c##Type##Ops = 0
  440. # define INCREMENT_OP_STAT(Type) \
  441. InterlockedIncrement(&m_c##Type##Ops); \
  442. InterlockedIncrement(&sm_c##Type##Ops)
  443. # define DUMP_OP_STAT(Type) \
  444. IRTLTRACE(_TEXT(#Type) _TEXT(": Ops=%ld\n"), m_c##Type##Ops)
  445. # define DUMP_CLASS_OP_STAT(Class, Type) \
  446. IRTLTRACE(_TEXT("Global ") _TEXT(#Type) _TEXT(": Ops=%ld\n"), \
  447. LKRHASH_NS::Class::sm_c##Type##Ops)
  448. #else // !LKR_OPS_STATS
  449. # define DECLARE_OP_STAT(Type)
  450. # define DECLARE_CLASS_OP_STAT_STORAGE(Class, Type)
  451. # define INIT_OP_STAT(Type) ((void) 0)
  452. # define INIT_CLASS_OP_STAT(Class,Type) ((void) 0)
  453. # define INCREMENT_OP_STAT(Type) ((void) 0)
  454. # define DUMP_OP_STAT(Type) ((void) 0)
  455. # define DUMP_CLASS_OP_STAT(Class,Type) ((void) 0)
  456. #endif // !LKR_OPS_STATS
  457. //--------------------------------------------------------------------
  458. // Global table lock code. This is only used to measure how much of a
  459. // slowdown having a global lock on the CLKRHashTable causes. It is
  460. // *never* used in production code.
  461. // #define LKRHASH_GLOBAL_LOCK CCritSec
  462. #ifdef LKRHASH_GLOBAL_LOCK
  463. # define LKRHASH_GLOBAL_LOCK_DECLARATIONS() \
  464. typedef LKRHASH_GLOBAL_LOCK GlobalLock; \
  465. mutable GlobalLock m_lkGlobal;
  466. # define LKRHASH_GLOBAL_READ_LOCK() m_lkGlobal.ReadLock()
  467. # define LKRHASH_GLOBAL_WRITE_LOCK() m_lkGlobal.WriteLock()
  468. # define LKRHASH_GLOBAL_READ_UNLOCK() m_lkGlobal.ReadUnlock()
  469. # define LKRHASH_GLOBAL_WRITE_UNLOCK() m_lkGlobal.WriteUnlock()
  470. #else // !LKRHASH_GLOBAL_LOCK
  471. # define LKRHASH_GLOBAL_LOCK_DECLARATIONS()
  472. // These ones will be optimized away by the compiler
  473. # define LKRHASH_GLOBAL_READ_LOCK() ((void)0)
  474. # define LKRHASH_GLOBAL_WRITE_LOCK() ((void)0)
  475. # define LKRHASH_GLOBAL_READ_UNLOCK() ((void)0)
  476. # define LKRHASH_GLOBAL_WRITE_UNLOCK() ((void)0)
  477. #endif // !LKRHASH_GLOBAL_LOCK
  478. // Class for nodes on a bucket chain. Instead of a node containing
  479. // one (signature, record-pointer, next-tuple-pointer) tuple, it
  480. // contains _N_ such tuples. (N-1 next-tuple-pointers are omitted.)
  481. // This improves locality of reference greatly; i.e., it's L1
  482. // cache-friendly. It also reduces memory fragmentation and memory
  483. // allocator overhead. It does complicate the chain traversal code
  484. // slightly, admittedly.
  485. //
  486. // This theory is beautiful. In practice, however, CNodeClumps
  487. // are *not* perfectly aligned on 32-byte boundaries by the memory
  488. // allocators. Experimental results indicate that we get a 2-3%
  489. // speed improvement by using 32-byte-aligned blocks, but this must
  490. // be considered against the average of 16 bytes wasted per block.
  491. class CNodeClump
  492. {
  493. public:
  494. // Record slots per chunk - set so a chunk matches (one or two)
  495. // cache lines. 3 ==> 32 bytes, 7 ==> 64 bytes, on 32-bit system.
  496. // Note: the default max load factor is 7, which implies that
  497. // there will seldom be more than one node clump in a chain.
  498. enum {
  499. #if defined(LOCK_INSTRUMENTATION)
  500. BUCKET_BYTE_SIZE = 96,
  501. #else
  502. BUCKET_BYTE_SIZE = 64,
  503. #endif
  504. // overhead = m_Lock + m_pncNext
  505. BUCKET_OVERHEAD = sizeof(LKR_BUCKET_LOCK) + sizeof(PNodeClump),
  506. // node size = dwKeySignature + pvRecord
  507. NODE_SIZE = sizeof(const void*) + sizeof(DWORD),
  508. NODES_PER_CLUMP = (BUCKET_BYTE_SIZE - BUCKET_OVERHEAD) / NODE_SIZE,
  509. #ifdef _WIN64
  510. NODE_CLUMP_BITS = 2,
  511. #else
  512. NODE_CLUMP_BITS = 3,
  513. #endif
  514. _NODES_PER_CLUMP = 15, // <<---
  515. _NODE_CLUMP_BITS = 4,
  516. };
  517. typedef int NodeIndex; // for iterating through a CNodeClump
  518. enum {
  519. // See if countdown loops are faster than countup loops for
  520. // traversing a CNodeClump. In practice, countup loops are faster.
  521. // These constants allow us to write direction-agnostic loops,
  522. // such as
  523. // for (NodeIndex x = NODE_BEGIN; x != NODE_END; x += NODE_STEP)
  524. #ifndef LKR_COUNTDOWN
  525. // for (NodeIndex x = 0; x < NODES_PER_CLUMP; ++x) ...
  526. NODE_BEGIN = 0,
  527. NODE_END = NODES_PER_CLUMP,
  528. NODE_STEP = +1,
  529. #else // LKR_COUNTDOWN
  530. // for (NodeIndex x = NODES_PER_CLUMP; --x >= 0; ) ...
  531. NODE_BEGIN = NODES_PER_CLUMP - 1,
  532. NODE_END = -1,
  533. NODE_STEP = -1,
  534. #endif // LKR_COUNTDOWN
  535. };
  536. // If m_dwKeySigs[iNode] == HASH_INVALID_SIGNATURE then the node is
  537. // empty, as are all nodes in the range [iNode+NODE_STEP, NODE_END).
  538. enum {
  539. #ifndef __HASHFN_NO_NAMESPACE__
  540. HASH_INVALID_SIGNATURE = HashFn::HASH_INVALID_SIGNATURE,
  541. #else // !__HASHFN_NO_NAMESPACE__
  542. HASH_INVALID_SIGNATURE = ::HASH_INVALID_SIGNATURE,
  543. #endif // !__HASHFN_NO_NAMESPACE__
  544. };
  545. DWORD m_dwKeySigs[NODES_PER_CLUMP];// hash values computed from keys
  546. PNodeClump m_pncNext; // next node clump on the chain
  547. const void* m_pvNode[NODES_PER_CLUMP]; // pointers to records
  548. CNodeClump()
  549. {
  550. STATIC_ASSERT((1 << (NODE_CLUMP_BITS - 1)) < NODES_PER_CLUMP);
  551. STATIC_ASSERT(NODES_PER_CLUMP <= (1 << NODE_CLUMP_BITS));
  552. STATIC_ASSERT(NODES_PER_CLUMP * NODE_SIZE + BUCKET_OVERHEAD
  553. <= BUCKET_BYTE_SIZE);
  554. Clear();
  555. }
  556. void
  557. Clear()
  558. {
  559. m_pncNext = NULL; // no dangling pointers
  560. IRTLASSERT(IsLastClump());
  561. for (NodeIndex iNode = NODES_PER_CLUMP; --iNode >= 0; )
  562. {
  563. m_dwKeySigs[iNode] = HASH_INVALID_SIGNATURE;
  564. m_pvNode[iNode] = NULL;
  565. IRTLASSERT(IsEmptyAndInvalid(iNode));
  566. }
  567. }
  568. DWORD
  569. Signature(
  570. NodeIndex iNode) const
  571. {
  572. IRTLASSERT(0 <= iNode && iNode < NODES_PER_CLUMP);
  573. return m_dwKeySigs[iNode];
  574. }
  575. const void*
  576. Node(
  577. NodeIndex iNode) const
  578. {
  579. IRTLASSERT(0 <= iNode && iNode < NODES_PER_CLUMP);
  580. return m_pvNode[iNode];
  581. }
  582. PNodeClump const
  583. NextClump() const
  584. {
  585. return m_pncNext;
  586. }
  587. bool
  588. InvalidSignature(
  589. NodeIndex iNode) const
  590. {
  591. IRTLASSERT(0 <= iNode && iNode < NODES_PER_CLUMP);
  592. return (m_dwKeySigs[iNode] == HASH_INVALID_SIGNATURE);
  593. }
  594. bool
  595. IsEmptyNode(
  596. NodeIndex iNode) const
  597. {
  598. IRTLASSERT(0 <= iNode && iNode < NODES_PER_CLUMP);
  599. #ifndef LKR_ALLOW_NULL_RECORDS
  600. return (m_pvNode[iNode] == NULL);
  601. #else
  602. return InvalidSignature(iNode);
  603. #endif
  604. }
  605. bool
  606. IsEmptyAndInvalid(
  607. NodeIndex iNode) const
  608. {
  609. return
  610. #ifndef LKR_ALLOW_NULL_RECORDS
  611. IsEmptyNode(iNode) &&
  612. #endif
  613. InvalidSignature(iNode);
  614. }
  615. bool
  616. IsEmptySlot(
  617. NodeIndex iNode) const
  618. {
  619. return InvalidSignature(iNode);
  620. }
  621. bool
  622. IsLastClump() const
  623. {
  624. return (m_pncNext == NULL);
  625. }
  626. #ifdef IRTLDEBUG
  627. bool
  628. NoMoreValidSlots(
  629. NodeIndex iNode) const
  630. {
  631. IRTLASSERT(0 <= iNode && iNode < NODES_PER_CLUMP);
  632. bool f = IsLastClump(); // last nodeclump in chain
  633. for ( ; iNode != NODE_END; iNode += NODE_STEP)
  634. f = f && IsEmptyAndInvalid(iNode);
  635. return f;
  636. }
  637. bool
  638. NoValidSlots() const
  639. {
  640. return NoMoreValidSlots(NODE_BEGIN);
  641. }
  642. // Don't want overhead of calls to dtor in retail build, since it
  643. // doesn't do anything useful
  644. ~CNodeClump()
  645. {
  646. IRTLASSERT(IsLastClump()); // no dangling pointers
  647. for (NodeIndex iNode = NODES_PER_CLUMP; --iNode >= 0; )
  648. IRTLASSERT(InvalidSignature(iNode) && IsEmptyNode(iNode));
  649. }
  650. #endif // IRTLDEBUG
  651. private:
  652. // We rely on the compiler to generate an efficient copy constructor
  653. // and operator= that make shallow (bitwise) copies of CNodeClumps.
  654. LKRHASH_ALLOCATOR_DEFINITIONS(CNodeClump);
  655. LKRHASH_CLASS_INIT_DECLS(CNodeClump);
  656. }; // class CNodeClump
  657. #ifdef LKR_STL_ITERATORS
  658. class IRTL_DLLEXP CLKRLinearHashTable_Iterator;
  659. class IRTL_DLLEXP CLKRHashTable_Iterator;
  660. class IRTL_DLLEXP CLKRLinearHashTable_Iterator
  661. {
  662. friend class CLKRLinearHashTable;
  663. friend class CLKRHashTable;
  664. friend class CLKRHashTable_Iterator;
  665. protected:
  666. typedef short NodeIndex;
  667. CLKRLinearHashTable* m_plht; // which linear hash subtable?
  668. PNodeClump m_pnc; // a CNodeClump in bucket
  669. DWORD m_dwBucketAddr;// bucket index
  670. NodeIndex m_iNode; // offset within m_pnc
  671. enum {
  672. NODES_PER_CLUMP = CNodeClump::NODES_PER_CLUMP,
  673. NODE_BEGIN = CNodeClump::NODE_BEGIN,
  674. NODE_END = CNodeClump::NODE_END,
  675. NODE_STEP = CNodeClump::NODE_STEP,
  676. HASH_INVALID_SIGNATURE = CNodeClump::HASH_INVALID_SIGNATURE,
  677. };
  678. CLKRLinearHashTable_Iterator(
  679. CLKRLinearHashTable* plht,
  680. PNodeClump pnc,
  681. DWORD dwBucketAddr,
  682. NodeIndex iNode)
  683. : m_plht(plht),
  684. m_pnc(pnc),
  685. m_dwBucketAddr(dwBucketAddr),
  686. m_iNode(iNode)
  687. {
  688. LKR_ITER_TRACE(_TEXT(" LKLH::prot ctor, this=%p, plht=%p, ")
  689. _TEXT("pnc=%p, ba=%d, in=%d\n"),
  690. this, plht, pnc, dwBucketAddr, iNode);
  691. }
  692. inline void _AddRef(
  693. LK_ADDREF_REASON lkar) const;
  694. bool _Increment(
  695. bool fDecrementOldValue=true);
  696. NodeIndex _NodesPerClump() const { return NODES_PER_CLUMP; }
  697. NodeIndex _NodeBegin() const { return NODE_BEGIN; }
  698. NodeIndex _NodeEnd() const { return NODE_END; }
  699. NodeIndex _NodeStep() const { return NODE_STEP; }
  700. public:
  701. CLKRLinearHashTable_Iterator()
  702. : m_plht(NULL),
  703. m_pnc(NULL),
  704. m_dwBucketAddr(0),
  705. m_iNode(0)
  706. {
  707. LKR_ITER_TRACE(_TEXT(" LKLH::default ctor, this=%p\n"), this);
  708. }
  709. CLKRLinearHashTable_Iterator(
  710. const CLKRLinearHashTable_Iterator& rhs)
  711. : m_plht(rhs.m_plht),
  712. m_pnc(rhs.m_pnc),
  713. m_dwBucketAddr(rhs.m_dwBucketAddr),
  714. m_iNode(rhs.m_iNode)
  715. {
  716. LKR_ITER_TRACE(_TEXT(" LKLH::copy ctor, this=%p, rhs=%p\n"),
  717. this, &rhs);
  718. _AddRef(LKAR_ITER_COPY_CTOR);
  719. }
  720. CLKRLinearHashTable_Iterator& operator=(
  721. const CLKRLinearHashTable_Iterator& rhs)
  722. {
  723. LKR_ITER_TRACE(_TEXT(" LKLH::operator=, this=%p, rhs=%p\n"),
  724. this, &rhs);
  725. rhs._AddRef(LKAR_ITER_ASSIGN_ACQUIRE);
  726. this->_AddRef(LKAR_ITER_ASSIGN_RELEASE);
  727. m_plht = rhs.m_plht;
  728. m_pnc = rhs.m_pnc;
  729. m_dwBucketAddr = rhs.m_dwBucketAddr;
  730. m_iNode = rhs.m_iNode;
  731. return *this;
  732. }
  733. ~CLKRLinearHashTable_Iterator()
  734. {
  735. LKR_ITER_TRACE(_TEXT(" LKLH::dtor, this=%p, plht=%p\n"),
  736. this, m_plht);
  737. _AddRef(LKAR_ITER_DTOR);
  738. }
  739. bool Increment()
  740. {
  741. return IsValid() ? _Increment() : false;
  742. }
  743. bool IsValid() const
  744. {
  745. bool fValid = (m_plht != NULL && m_pnc != NULL
  746. && 0 <= m_iNode && m_iNode < NODES_PER_CLUMP);
  747. if (fValid)
  748. fValid = (m_pnc->m_dwKeySigs[m_iNode] != HASH_INVALID_SIGNATURE);
  749. IRTLASSERT(fValid);
  750. return fValid;
  751. }
  752. const void* Record() const
  753. {
  754. IRTLASSERT(IsValid());
  755. return m_pnc->m_pvNode[m_iNode];
  756. }
  757. inline const DWORD_PTR Key() const;
  758. bool operator==(
  759. const CLKRLinearHashTable_Iterator& rhs) const
  760. {
  761. LKR_ITER_TRACE(_TEXT(" LKLH::operator==, this=%p, rhs=%p\n"),
  762. this, &rhs);
  763. // m_pnc and m_iNode uniquely identify an iterator
  764. bool fEQ = ((m_pnc == rhs.m_pnc) // most unique field
  765. && (m_iNode == rhs.m_iNode));
  766. IRTLASSERT(!fEQ || ((m_plht == rhs.m_plht)
  767. && (m_dwBucketAddr == rhs.m_dwBucketAddr)));
  768. return fEQ;
  769. }
  770. bool operator!=(
  771. const CLKRLinearHashTable_Iterator& rhs) const
  772. {
  773. LKR_ITER_TRACE(_TEXT(" LKLH::operator!=, this=%p, rhs=%p\n"),
  774. this, &rhs);
  775. bool fNE = ((m_pnc != rhs.m_pnc)
  776. || (m_iNode != rhs.m_iNode));
  777. //// IRTLASSERT(fNE == !this->operator==(rhs));
  778. return fNE;
  779. }
  780. }; // class CLKRLinearHashTable_Iterator
  781. class IRTL_DLLEXP CLKRHashTable_Iterator
  782. {
  783. friend class CLKRHashTable;
  784. protected:
  785. typedef short SubTableIndex;
  786. // order important to minimize size
  787. CLKRHashTable* m_pht; // which hash table?
  788. CLKRLinearHashTable_Iterator m_subiter; // iterator into subtable
  789. SubTableIndex m_ist; // index of subtable
  790. CLKRHashTable_Iterator(
  791. CLKRHashTable* pht,
  792. SubTableIndex ist)
  793. : m_pht(pht),
  794. m_subiter(CLKRLinearHashTable_Iterator()), // zero
  795. m_ist(ist)
  796. {
  797. LKR_ITER_TRACE(_TEXT(" LKHT::prot ctor, this=%p, pht=%p, ist=%d\n"),
  798. this, pht, ist);
  799. }
  800. bool _Increment(
  801. bool fDecrementOldValue=true);
  802. public:
  803. CLKRHashTable_Iterator()
  804. : m_pht(NULL),
  805. m_subiter(CLKRLinearHashTable_Iterator()), // zero
  806. m_ist(0)
  807. {
  808. LKR_ITER_TRACE(_TEXT(" LKHT::default ctor, this=%p\n"), this);
  809. }
  810. #ifdef IRTLDEBUG
  811. // Compiler does a perfectly adequate job of synthesizing these methods.
  812. CLKRHashTable_Iterator(
  813. const CLKRHashTable_Iterator& rhs)
  814. : m_pht(rhs.m_pht),
  815. m_subiter(rhs.m_subiter),
  816. m_ist(rhs.m_ist)
  817. {
  818. LKR_ITER_TRACE(_TEXT(" LKHT::copy ctor, this=%p, rhs=%p\n"),
  819. this, &rhs);
  820. }
  821. CLKRHashTable_Iterator& operator=(
  822. const CLKRHashTable_Iterator& rhs)
  823. {
  824. LKR_ITER_TRACE(_TEXT(" LKHT::operator=, this=%p, rhs=%p\n"),
  825. this, &rhs);
  826. m_ist = rhs.m_ist;
  827. m_subiter = rhs.m_subiter;
  828. m_pht = rhs.m_pht;
  829. return *this;
  830. }
  831. ~CLKRHashTable_Iterator()
  832. {
  833. LKR_ITER_TRACE(_TEXT(" LKHT::dtor, this=%p, pht=%p\n"), this, m_pht);
  834. }
  835. #endif // IRTLDEBUG
  836. bool Increment()
  837. {
  838. return IsValid() ? _Increment() : false;
  839. }
  840. bool IsValid() const
  841. {
  842. bool fValid = (m_pht != NULL && m_ist >= 0);
  843. IRTLASSERT(fValid);
  844. fValid = fValid && (m_subiter.m_plht != NULL);
  845. IRTLASSERT(fValid);
  846. fValid = fValid && (m_subiter.m_pnc != NULL);
  847. IRTLASSERT(fValid);
  848. fValid = fValid && (0 <= m_subiter.m_iNode);
  849. IRTLASSERT(fValid);
  850. fValid = fValid && (m_subiter.m_iNode < CNodeClump::NODES_PER_CLUMP);
  851. IRTLASSERT(fValid);
  852. if (fValid)
  853. {
  854. fValid = (m_subiter.m_pnc->m_dwKeySigs[m_subiter.m_iNode]
  855. != CNodeClump::HASH_INVALID_SIGNATURE);
  856. }
  857. IRTLASSERT(fValid);
  858. return fValid;
  859. }
  860. const void* Record() const
  861. {
  862. IRTLASSERT(IsValid());
  863. return m_subiter.Record();
  864. }
  865. const DWORD_PTR Key() const
  866. {
  867. IRTLASSERT(IsValid());
  868. return m_subiter.Key();
  869. }
  870. bool operator==(
  871. const CLKRHashTable_Iterator& rhs) const
  872. {
  873. LKR_ITER_TRACE(_TEXT(" LKHT::operator==, this=%p, rhs=%p\n"),
  874. this, &rhs);
  875. // m_pnc and m_iNode uniquely identify an iterator
  876. bool fEQ = ((m_subiter.m_pnc
  877. == rhs.m_subiter.m_pnc) // most unique field
  878. && (m_subiter.m_iNode == rhs.m_subiter.m_iNode));
  879. IRTLASSERT(!fEQ
  880. || ((m_ist == rhs.m_ist)
  881. && (m_pht == rhs.m_pht)
  882. && (m_subiter.m_plht == rhs.m_subiter.m_plht)
  883. && (m_subiter.m_dwBucketAddr
  884. == rhs.m_subiter.m_dwBucketAddr)));
  885. return fEQ;
  886. }
  887. bool operator!=(
  888. const CLKRHashTable_Iterator& rhs) const
  889. {
  890. LKR_ITER_TRACE(_TEXT(" LKHT::operator!=, this=%p, rhs=%p\n"),
  891. this, &rhs);
  892. bool fNE = ((m_subiter.m_pnc != rhs.m_subiter.m_pnc)
  893. || (m_subiter.m_iNode != rhs.m_subiter.m_iNode));
  894. //// IRTLASSERT(fNE == !this->operator==(rhs));
  895. return fNE;
  896. }
  897. }; // class CLKRHashTable_Iterator
  898. #endif // LKR_STL_ITERATORS
  899. //--------------------------------------------------------------------
  900. // CLKRLinearHashTable
  901. //
  902. // A thread-safe linear hash (sub)table.
  903. //--------------------------------------------------------------------
  904. class IRTL_DLLEXP CLKRLinearHashTable
  905. {
  906. public:
  907. typedef LKR_TABLE_LOCK TableLock;
  908. typedef LKR_BUCKET_LOCK BucketLock;
  909. #ifdef LKR_DEPRECATED_ITERATORS
  910. class CIterator;
  911. friend class CLKRLinearHashTable::CIterator;
  912. #endif // LKR_DEPRECATED_ITERATORS
  913. #ifdef LKR_STL_ITERATORS
  914. friend class CLKRLinearHashTable_Iterator;
  915. typedef CLKRLinearHashTable_Iterator Iterator;
  916. #endif // LKR_STL_ITERATORS
  917. private:
  918. friend class CNodeClump;
  919. friend class CLKRHashTable;
  920. #ifdef LKRHASH_INSTRUMENTATION
  921. // TODO
  922. #endif // LKRHASH_INSTRUMENTATION
  923. public:
  924. // aliases for convenience
  925. enum {
  926. MIN_DIRSIZE_BITS = 2, // min size for directory of segments
  927. MIN_DIRSIZE = 1u << MIN_DIRSIZE_BITS,
  928. MAX_DIRSIZE_BITS = 20, // max size for directory of segments
  929. MAX_DIRSIZE = 1u << MAX_DIRSIZE_BITS,
  930. NODES_PER_CLUMP = CNodeClump::NODES_PER_CLUMP,
  931. NODE_BEGIN = CNodeClump::NODE_BEGIN,
  932. NODE_END = CNodeClump::NODE_END,
  933. NODE_STEP = CNodeClump::NODE_STEP,
  934. HASH_INVALID_SIGNATURE = CNodeClump::HASH_INVALID_SIGNATURE,
  935. NAME_SIZE = 24, // CHARs, includes trailing '\0'
  936. MAX_LKR_SUBTABLES = MAXIMUM_PROCESSORS, // 64 on Win64, 32 on W32
  937. INVALID_PARENT_INDEX = 128,
  938. };
  939. typedef CNodeClump::NodeIndex NodeIndex;
  940. private:
  941. //
  942. // Miscellaneous helper functions
  943. //
  944. LKRHASH_CLASS_INIT_DECLS(CLKRLinearHashTable);
  945. // Convert a hash signature to a bucket address
  946. inline DWORD _BucketAddress(DWORD dwSignature) const;
  947. // See the Linear Hashing paper
  948. inline static DWORD _H0(DWORD dwSignature, DWORD dwBktAddrMask);
  949. inline DWORD _H0(DWORD dwSignature) const;
  950. // See the Linear Hashing paper. Preserves one bit more than _H0.
  951. inline static DWORD _H1(DWORD dwSignature, DWORD dwBktAddrMask);
  952. inline DWORD _H1(DWORD dwSignature) const;
  953. // In which segment within the directory does the bucketaddress lie?
  954. // (Return type must be lvalue so that it can be assigned to.)
  955. inline PSegment& _Segment(DWORD dwBucketAddr) const;
  956. // Offset within the segment of the bucketaddress
  957. inline DWORD _SegIndex(DWORD dwBucketAddr) const;
  958. // Convert a bucketaddress to a PBucket
  959. inline PBucket _BucketFromAddress(DWORD dwBucketAddr) const;
  960. // Number of nodes in a CNodeClump
  961. inline NodeIndex _NodesPerClump() const;
  962. // Index of first node in a CNodeClump
  963. inline NodeIndex _NodeBegin() const;
  964. // Index of last node in a CNodeClump
  965. inline NodeIndex _NodeEnd() const;
  966. // Advance from _NodeBegin() to _NodeEnd() by this increment
  967. inline NodeIndex _NodeStep() const;
  968. // Use bucket locks or not?
  969. inline bool _UseBucketLocking() const;
  970. // Move the expansion index forward by one.
  971. inline void _IncrementExpansionIndex();
  972. // Move the expansion index back by one.
  973. inline void _DecrementExpansionIndex();
  974. // Extract the key from a record
  975. inline const DWORD_PTR _ExtractKey(const void* pvRecord) const;
  976. // Hash the key
  977. inline DWORD _CalcKeyHash(const DWORD_PTR pnKey) const;
  978. // Compare two keys for equality
  979. inline int _CompareKeys(const DWORD_PTR pnKey1,
  980. const DWORD_PTR pnKey2) const;
  981. // AddRef or Release a record.
  982. inline LONG _AddRefRecord(const void* pvRecord,
  983. LK_ADDREF_REASON lkar) const;
  984. // Used by _FindKey so that the thread won't deadlock if the user has
  985. // already explicitly called subtable->WriteLock().
  986. inline bool _ReadOrWriteLock() const;
  987. inline void _ReadOrWriteUnlock(bool fReadLocked) const;
  988. // Memory allocation wrappers to allow us to simulate allocation
  989. // failures during testing
  990. PSegment* const
  991. _AllocateSegmentDirectory(
  992. size_t n);
  993. bool
  994. _FreeSegmentDirectory();
  995. PNodeClump const
  996. _AllocateNodeClump() const;
  997. bool
  998. _FreeNodeClump(
  999. PNodeClump pnc) const;
  1000. CSegment* const
  1001. _AllocateSegment() const;
  1002. bool
  1003. _FreeSegment(
  1004. CSegment* pseg) const;
  1005. #ifdef LOCK_INSTRUMENTATION
  1006. static LONG sm_cTables;
  1007. static const TCHAR*
  1008. _LockName()
  1009. {
  1010. LONG l = ++sm_cTables;
  1011. // possible race condition but we don't care, as this is never
  1012. // used in production code
  1013. static TCHAR s_tszName[CLockStatistics::L_NAMELEN];
  1014. wsprintf(s_tszName, _TEXT("LH%05x"), 0xFFFFF & l);
  1015. return s_tszName;
  1016. }
  1017. // Statistics for the subtable lock
  1018. CLockStatistics _LockStats() const
  1019. { return m_Lock.Statistics(); }
  1020. #endif // LOCK_INSTRUMENTATION
  1021. private:
  1022. // Fields are ordered so as to minimize number of cache lines touched
  1023. DWORD m_dwSignature; // debugging: id & corruption check
  1024. // Put the table lock in the first cache line, far away from the other
  1025. // volatile fields.
  1026. mutable TableLock m_Lock; // Lock on entire linear hash subtable
  1027. const bool m_fUseLocks; // Must use locks to protect data
  1028. bool m_fSealed; // no further updates allowed
  1029. mutable LK_RETCODE m_lkrcState; // Internal state of subtable
  1030. CHAR m_szName[NAME_SIZE];// an identifier for debugging
  1031. // type-specific function pointers
  1032. LKR_PFnExtractKey m_pfnExtractKey; // Extract key from record
  1033. LKR_PFnCalcKeyHash m_pfnCalcKeyHash; // Calculate hash signature of key
  1034. LKR_PFnCompareKeys m_pfnCompareKeys; // Compare two keys
  1035. LKR_PFnAddRefRecord m_pfnAddRefRecord; // AddRef a record
  1036. BYTE m_MaxLoad; // max load factor (average chain length)
  1037. BYTE m_nLevel; // number of subtable doublings performed
  1038. BYTE m_lkts; // "size" of subtable: small/medium/large
  1039. BYTE m_nSegBits; // C{Small,Medium,Large}Segment::SEGBITS
  1040. WORD m_nSegSize; // C{Small,Medium,Large}Segment::SEGSIZE
  1041. WORD m_nSegMask; // C{Small,Medium,Large}Segment::SEGMASK
  1042. DWORD m_dwBktAddrMask0; // mask used for address calculation
  1043. DWORD m_dwBktAddrMask1; // used in _H1 calculation
  1044. DWORD m_cDirSegs; // segment directory size: varies between
  1045. // MIN_DIRSIZE and MAX_DIRSIZE
  1046. PSegment* m_paDirSegs; // directory of subtable segments
  1047. PSegment m_aDirSegs[MIN_DIRSIZE]; // inlined directory, adequate
  1048. // for many subtables
  1049. // These three fields are fairly volatile, but tend to be adjusted
  1050. // at the same time. Keep them well away from the TableLock.
  1051. DWORD m_iExpansionIdx; // address of next bucket to be expanded
  1052. DWORD m_cRecords; // number of records in the subtable
  1053. DWORD m_cActiveBuckets; // number of buckets in use (subtable size)
  1054. const BYTE m_nTableLockType; // for debugging: LOCK_READERWRITERLOCK4
  1055. const BYTE m_nBucketLockType;// for debugging: e.g., LOCK_SPINLOCK
  1056. WORD m_wBucketLockSpins;// default spin count for bucket locks
  1057. const CLKRHashTable* const m_phtParent;// Owning table. NULL => standalone
  1058. const BYTE m_iParentIndex; // index within parent table
  1059. const bool m_fMultiKeys; // Allow multiple identical keys?
  1060. const bool m_fNonPagedAllocs;// Use paged or NP pool in kernel
  1061. BYTE m_Dummy1;
  1062. // Reserve some space for future debugging needs
  1063. DWORD_PTR m_pvReserved1;
  1064. DWORD_PTR m_pvReserved2;
  1065. DWORD_PTR m_pvReserved3;
  1066. DWORD_PTR m_pvReserved4;
  1067. #ifndef LKR_NO_GLOBAL_LIST
  1068. CListEntry m_leGlobalList;
  1069. static CLockedDoubleList sm_llGlobalList;// All active CLKRLinearHashTables
  1070. #endif // !LKR_NO_GLOBAL_LIST
  1071. DECLARE_ALLOC_STAT(SegDir);
  1072. DECLARE_ALLOC_STAT(Segment);
  1073. DECLARE_ALLOC_STAT(NodeClump);
  1074. DECLARE_OP_STAT(InsertRecord);
  1075. DECLARE_OP_STAT(FindKey);
  1076. DECLARE_OP_STAT(FindRecord);
  1077. DECLARE_OP_STAT(DeleteKey);
  1078. DECLARE_OP_STAT(DeleteRecord);
  1079. DECLARE_OP_STAT(FindKeyMultiRec);
  1080. DECLARE_OP_STAT(DeleteKeyMultiRec);
  1081. DECLARE_OP_STAT(Expand);
  1082. DECLARE_OP_STAT(Contract);
  1083. DECLARE_OP_STAT(LevelExpansion);
  1084. DECLARE_OP_STAT(LevelContraction);
  1085. DECLARE_OP_STAT(ApplyIf);
  1086. DECLARE_OP_STAT(DeleteIf);
  1087. // Non-trivial implementation functions
  1088. void _InsertThisIntoGlobalList();
  1089. void _RemoveThisFromGlobalList();
  1090. LK_RETCODE _InsertRecord(
  1091. const void* pvRecord,
  1092. const DWORD_PTR pnKey,
  1093. const DWORD dwSignature,
  1094. bool fOverwrite
  1095. #ifdef LKR_STL_ITERATORS
  1096. , Iterator* piterResult=NULL
  1097. #endif // LKR_STL_ITERATORS
  1098. );
  1099. LK_RETCODE _DeleteKey(
  1100. const DWORD_PTR pnKey,
  1101. const DWORD dwSignature,
  1102. const void** ppvRecord,
  1103. bool fDeleteAllSame);
  1104. LK_RETCODE _DeleteRecord(
  1105. const void* pvRecord,
  1106. const DWORD dwSignature);
  1107. void _DeleteNode(
  1108. PBucket const pbkt,
  1109. PNodeClump& rpnc,
  1110. PNodeClump& rpncPrev,
  1111. NodeIndex& riNode,
  1112. LK_ADDREF_REASON lkar);
  1113. LK_RETCODE _FindKey(
  1114. const DWORD_PTR pnKey,
  1115. const DWORD dwSignature,
  1116. const void** ppvRecord
  1117. #ifdef LKR_STL_ITERATORS
  1118. , Iterator* piterResult=NULL
  1119. #endif // LKR_STL_ITERATORS
  1120. ) const;
  1121. LK_RETCODE _FindRecord(
  1122. const void* pvRecord,
  1123. const DWORD dwSignature) const;
  1124. LK_RETCODE _FindKeyMultipleRecords(
  1125. const DWORD_PTR pnKey,
  1126. const DWORD dwSignature,
  1127. size_t* pcRecords,
  1128. LKR_MULTIPLE_RECORDS** pplmr) const;
  1129. LK_RETCODE _DeleteKeyMultipleRecords(
  1130. const DWORD_PTR pnKey,
  1131. const DWORD dwSignature,
  1132. size_t* pcRecords,
  1133. LKR_MULTIPLE_RECORDS** pplmr);
  1134. #ifdef LKR_APPLY_IF
  1135. // Predicate functions
  1136. static LK_PREDICATE WINAPI
  1137. _PredTrue(const void* /*pvRecord*/, void* /*pvState*/)
  1138. { return LKP_PERFORM; }
  1139. DWORD _ApplyIf(
  1140. LKR_PFnRecordPred pfnPredicate,
  1141. LKR_PFnRecordAction pfnAction,
  1142. void* pvState,
  1143. LK_LOCKTYPE lkl,
  1144. LK_PREDICATE& rlkp);
  1145. DWORD _DeleteIf(
  1146. LKR_PFnRecordPred pfnPredicate,
  1147. void* pvState,
  1148. LK_PREDICATE& rlkp);
  1149. #endif // LKR_APPLY_IF
  1150. // returns count of errors in compacted state => 0 is good
  1151. int _IsBucketChainCompact(PBucket const pbkt) const;
  1152. int _IsBucketChainMultiKeySorted(PBucket const pbkt) const;
  1153. // helper functions
  1154. void _Clear(bool fShrinkDirectory);
  1155. LK_RETCODE _SetSegVars(LK_TABLESIZE lkts, DWORD cInitialBuckets);
  1156. LK_RETCODE _Expand();
  1157. LK_RETCODE _Contract();
  1158. LK_RETCODE _SplitBucketChain(
  1159. PNodeClump pncOldTarget,
  1160. PNodeClump pncNewTarget,
  1161. const DWORD dwBktAddrMask,
  1162. const DWORD dwNewBkt,
  1163. PNodeClump pncFreeList);
  1164. LK_RETCODE _AppendBucketChain(
  1165. PBucket const pbktNewTarget,
  1166. CNodeClump& rncOldFirst,
  1167. PNodeClump pncFreeList);
  1168. LK_RETCODE _MergeSortBucketChains(
  1169. PBucket const pbktNewTarget,
  1170. CNodeClump& rncOldFirst,
  1171. PNodeClump pncFreeList);
  1172. // Private copy ctor and op= to prevent compiler synthesizing them.
  1173. // TODO: implement these properly; they could be useful.
  1174. CLKRLinearHashTable(const CLKRLinearHashTable&);
  1175. CLKRLinearHashTable& operator=(const CLKRLinearHashTable&);
  1176. private:
  1177. // This ctor is used by CLKRHashTable, the parent table
  1178. CLKRLinearHashTable(
  1179. LPCSTR pszClassName, // Identifies subtable for debugging
  1180. LKR_PFnExtractKey pfnExtractKey, // Extract key from record
  1181. LKR_PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  1182. LKR_PFnCompareKeys pfnCompareKeys, // Compare two keys
  1183. LKR_PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  1184. unsigned maxload, // Upperbound on avg chain length
  1185. DWORD initsize, // Initial size of hash subtable
  1186. CLKRHashTable* phtParent, // Owning table.
  1187. int iParentIndex, // index within parent table
  1188. bool fMultiKeys, // Allow multiple identical keys?
  1189. bool fUseLocks, // Must use locks
  1190. bool fNonPagedAllocs // use paged or NP pool in kernel
  1191. );
  1192. // Does all the common work of the constructors
  1193. LK_RETCODE
  1194. _Initialize(
  1195. LKR_PFnExtractKey pfnExtractKey,
  1196. LKR_PFnCalcKeyHash pfnCalcKeyHash,
  1197. LKR_PFnCompareKeys pfnCompareKeys,
  1198. LKR_PFnAddRefRecord pfnAddRefRecord,
  1199. LPCSTR pszClassName,
  1200. unsigned maxload,
  1201. DWORD initsize);
  1202. public:
  1203. CLKRLinearHashTable(
  1204. LPCSTR pszClassName, // Identifies subtable for debugging
  1205. LKR_PFnExtractKey pfnExtractKey, // Extract key from record
  1206. LKR_PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  1207. LKR_PFnCompareKeys pfnCompareKeys, // Compare two keys
  1208. LKR_PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  1209. unsigned maxload=LK_DFLT_MAXLOAD,// Upperbound on average chain length
  1210. DWORD initsize=LK_DFLT_INITSIZE, // Initial size of hash subtable.
  1211. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS, // for signature compatiblity
  1212. // with CLKRHashTable
  1213. bool fMultiKeys=false,// Allow multiple identical keys?
  1214. bool fUseLocks=true // Must use locks
  1215. #ifdef LKRHASH_KERNEL_MODE
  1216. , bool fNonPagedAllocs=true // use paged or NP pool
  1217. #endif
  1218. );
  1219. ~CLKRLinearHashTable();
  1220. static const TCHAR* ClassName()
  1221. {return _TEXT("CLKRLinearHashTable");}
  1222. unsigned NumSubTables() const {return 1;}
  1223. bool MultiKeys() const
  1224. {
  1225. return m_fMultiKeys;
  1226. }
  1227. bool UsingLocks() const
  1228. {
  1229. return m_fUseLocks;
  1230. }
  1231. #ifdef LKRHASH_KERNEL_MODE
  1232. bool NonPagedAllocs() const
  1233. {
  1234. return m_fNonPagedAllocs;
  1235. }
  1236. #endif
  1237. static LK_TABLESIZE NumSubTables(DWORD& rinitsize, DWORD& rnum_subtbls);
  1238. // Insert a new record into hash subtable.
  1239. // Returns LK_SUCCESS if all OK, LK_KEY_EXISTS if same key already
  1240. // exists (unless fOverwrite), LK_ALLOC_FAIL if out of space,
  1241. // or LK_BAD_RECORD for a bad record.
  1242. LK_RETCODE InsertRecord(
  1243. const void* pvRecord,
  1244. bool fOverwrite=false)
  1245. {
  1246. if (!IsUsable())
  1247. return m_lkrcState;
  1248. #ifndef LKR_ALLOW_NULL_RECORDS
  1249. if (pvRecord == NULL)
  1250. return LK_BAD_RECORD;
  1251. #endif
  1252. const DWORD_PTR pnKey = _ExtractKey(pvRecord);
  1253. return _InsertRecord(pvRecord, pnKey, _CalcKeyHash(pnKey), fOverwrite);
  1254. }
  1255. // Delete record with the given key.
  1256. // Returns LK_SUCCESS if all OK, or LK_NO_SUCH_KEY if not found
  1257. LK_RETCODE DeleteKey(
  1258. const DWORD_PTR pnKey,
  1259. const void** ppvRecord=NULL,
  1260. bool fDeleteAllSame=false)
  1261. {
  1262. if (!IsUsable())
  1263. return m_lkrcState;
  1264. if (ppvRecord != NULL)
  1265. *ppvRecord = NULL;
  1266. return _DeleteKey(pnKey, _CalcKeyHash(pnKey),
  1267. ppvRecord, fDeleteAllSame);
  1268. }
  1269. // Delete a record from the subtable, if present.
  1270. // Returns LK_SUCCESS if all OK, or LK_NO_SUCH_KEY if not found
  1271. LK_RETCODE DeleteRecord(
  1272. const void* pvRecord)
  1273. {
  1274. if (!IsUsable())
  1275. return m_lkrcState;
  1276. #ifndef LKR_ALLOW_NULL_RECORDS
  1277. if (pvRecord == NULL)
  1278. return LK_BAD_RECORD;
  1279. #endif
  1280. return _DeleteRecord(pvRecord, _CalcKeyHash(_ExtractKey(pvRecord)));
  1281. }
  1282. // Find record with given key.
  1283. // Returns: LK_SUCCESS, if record found (record is returned in *ppvRecord)
  1284. // LK_BAD_RECORD, if ppvRecord is invalid
  1285. // LK_NO_SUCH_KEY, if no record with given key value was found
  1286. // LK_UNUSABLE, if hash subtable not in usable state
  1287. // Note: the record is AddRef'd. You must decrement the reference
  1288. // count when you are finished with the record (if you're implementing
  1289. // refcounting semantics).
  1290. LK_RETCODE FindKey(
  1291. const DWORD_PTR pnKey,
  1292. const void** ppvRecord) const
  1293. {
  1294. if (!IsUsable())
  1295. return m_lkrcState;
  1296. if (ppvRecord == NULL)
  1297. return LK_BAD_RECORD;
  1298. return _FindKey(pnKey, _CalcKeyHash(pnKey), ppvRecord);
  1299. }
  1300. // Sees if the record is contained in the subtable
  1301. // Returns: LK_SUCCESS, if record found
  1302. // LK_BAD_RECORD, if pvRecord is invalid
  1303. // LK_NO_SUCH_KEY, if record is not in the subtable
  1304. // LK_UNUSABLE, if hash subtable not in usable state
  1305. // Note: the record is *not* AddRef'd.
  1306. LK_RETCODE FindRecord(
  1307. const void* pvRecord) const
  1308. {
  1309. if (!IsUsable())
  1310. return m_lkrcState;
  1311. #ifndef LKR_ALLOW_NULL_RECORDS
  1312. if (pvRecord == NULL)
  1313. return LK_BAD_RECORD;
  1314. #endif
  1315. return _FindRecord(pvRecord, _CalcKeyHash(_ExtractKey(pvRecord)));
  1316. }
  1317. LK_RETCODE FindKeyMultipleRecords(
  1318. const DWORD_PTR pnKey,
  1319. size_t* pcRecords,
  1320. LKR_MULTIPLE_RECORDS** pplmr=NULL) const
  1321. {
  1322. if (!IsUsable())
  1323. return m_lkrcState;
  1324. if (pcRecords == NULL)
  1325. return LK_BAD_PARAMETERS;
  1326. return _FindKeyMultipleRecords(pnKey, _CalcKeyHash(pnKey),
  1327. pcRecords, pplmr);
  1328. }
  1329. LK_RETCODE DeleteKeyMultipleRecords(
  1330. const DWORD_PTR pnKey,
  1331. size_t* pcRecords,
  1332. LKR_MULTIPLE_RECORDS** pplmr=NULL)
  1333. {
  1334. if (!IsUsable())
  1335. return m_lkrcState;
  1336. if (pcRecords == NULL)
  1337. return LK_BAD_PARAMETERS;
  1338. return _DeleteKeyMultipleRecords(pnKey, _CalcKeyHash(pnKey),
  1339. pcRecords, pplmr);
  1340. }
  1341. static LK_RETCODE FreeMultipleRecords(LKR_MULTIPLE_RECORDS* plmr);
  1342. #ifdef LKR_APPLY_IF
  1343. // Walk the hash subtable, applying pfnAction to all records.
  1344. // Locks the whole subtable for the duration with either a (possibly
  1345. // shared) readlock or a writelock, according to lkl.
  1346. // Loop is aborted if pfnAction returns LKA_ABORT.
  1347. // Returns the number of successful applications.
  1348. DWORD Apply(
  1349. LKR_PFnRecordAction pfnAction,
  1350. void* pvState=NULL,
  1351. LK_LOCKTYPE lkl=LKL_READLOCK);
  1352. // Walk the hash subtable, applying pfnAction to any records that match
  1353. // pfnPredicate. Locks the whole subtable for the duration with either
  1354. // a (possibly shared) readlock or a writelock, according to lkl.
  1355. // Loop is aborted if pfnAction returns LKA_ABORT.
  1356. // Returns the number of successful applications.
  1357. DWORD ApplyIf(
  1358. LKR_PFnRecordPred pfnPredicate,
  1359. LKR_PFnRecordAction pfnAction,
  1360. void* pvState=NULL,
  1361. LK_LOCKTYPE lkl=LKL_READLOCK);
  1362. // Delete any records that match pfnPredicate.
  1363. // Locks the subtable for the duration with a writelock.
  1364. // Returns the number of deletions.
  1365. //
  1366. // Do *not* walk the hash table by hand with an iterator and call
  1367. // DeleteKey. The iterator will end up pointing to garbage.
  1368. DWORD DeleteIf(
  1369. LKR_PFnRecordPred pfnPredicate,
  1370. void* pvState=NULL);
  1371. #endif // LKR_APPLY_IF
  1372. // Check subtable for consistency. Returns 0 if okay, or the number of
  1373. // errors otherwise.
  1374. int CheckTable() const;
  1375. // Remove all data from the subtable
  1376. void Clear()
  1377. {
  1378. WriteLock();
  1379. _Clear(true);
  1380. WriteUnlock();
  1381. }
  1382. // Number of elements in the subtable
  1383. DWORD Size() const
  1384. { return m_cRecords; }
  1385. // Maximum possible number of elements in the subtable
  1386. DWORD MaxSize() const
  1387. { return static_cast<DWORD>(m_MaxLoad * MAX_DIRSIZE * m_nSegSize); }
  1388. // Get hash subtable statistics
  1389. CLKRHashTableStats GetStatistics() const;
  1390. // Is the hash subtable usable?
  1391. bool IsUsable() const
  1392. { return (m_lkrcState == LK_SUCCESS); }
  1393. // Is the hash subtable consistent and correct?
  1394. bool IsValid() const
  1395. {
  1396. STATIC_ASSERT(((MIN_DIRSIZE & (MIN_DIRSIZE-1)) == 0) // == (1 << N)
  1397. && ((1 << 2) <= MIN_DIRSIZE)
  1398. && (MIN_DIRSIZE < MAX_DIRSIZE)
  1399. && ((MAX_DIRSIZE & (MAX_DIRSIZE-1)) == 0)
  1400. && (MAX_DIRSIZE <= (1 << 30)));
  1401. bool f = (m_lkrcState == LK_SUCCESS // serious internal failure?
  1402. && m_paDirSegs != NULL
  1403. && MIN_DIRSIZE <= m_cDirSegs && m_cDirSegs <= MAX_DIRSIZE
  1404. && (m_cDirSegs & (m_cDirSegs-1)) == 0
  1405. && m_pfnExtractKey != NULL
  1406. && m_pfnCalcKeyHash != NULL
  1407. && m_pfnCompareKeys != NULL
  1408. && m_pfnAddRefRecord != NULL
  1409. && m_cActiveBuckets > 0
  1410. && ValidSignature()
  1411. );
  1412. if (!f)
  1413. m_lkrcState = LK_UNUSABLE;
  1414. return f;
  1415. }
  1416. // Set the spin count on the subtable lock
  1417. void SetTableLockSpinCount(WORD wSpins);
  1418. // Get the spin count on the subtable lock
  1419. WORD GetTableLockSpinCount() const;
  1420. // Set/Get the spin count on the bucket locks
  1421. void SetBucketLockSpinCount(WORD wSpins);
  1422. WORD GetBucketLockSpinCount() const;
  1423. enum {
  1424. SIGNATURE = (('L') | ('K' << 8) | ('L' << 16) | ('H' << 24)),
  1425. SIGNATURE_FREE = (('L') | ('K' << 8) | ('L' << 16) | ('x' << 24)),
  1426. };
  1427. bool
  1428. ValidSignature() const
  1429. { return m_dwSignature == SIGNATURE;}
  1430. #ifdef LKR_EXPOSED_TABLE_LOCK
  1431. public:
  1432. #else // !LKR_EXPOSED_TABLE_LOCK
  1433. protected:
  1434. #endif // !LKR_EXPOSED_TABLE_LOCK
  1435. //
  1436. // Lock manipulators
  1437. //
  1438. // Lock the subtable (exclusively) for writing
  1439. void WriteLock()
  1440. { m_Lock.WriteLock(); }
  1441. // Lock the subtable (possibly shared) for reading
  1442. void ReadLock() const
  1443. { m_Lock.ReadLock(); }
  1444. // Unlock the subtable for writing
  1445. void WriteUnlock()
  1446. { m_Lock.WriteUnlock(); }
  1447. // Unlock the subtable for reading
  1448. void ReadUnlock() const
  1449. { m_Lock.ReadUnlock(); }
  1450. // Is the subtable already locked for writing?
  1451. bool IsWriteLocked() const
  1452. { return m_Lock.IsWriteLocked(); }
  1453. // Is the subtable already locked for reading?
  1454. bool IsReadLocked() const
  1455. { return m_Lock.IsReadLocked(); }
  1456. // Is the subtable unlocked for writing?
  1457. bool IsWriteUnlocked() const
  1458. { return m_Lock.IsWriteUnlocked(); }
  1459. // Is the subtable unlocked for reading?
  1460. bool IsReadUnlocked() const
  1461. { return m_Lock.IsReadUnlocked(); }
  1462. // Convert the read lock to a write lock
  1463. void ConvertSharedToExclusive()
  1464. { m_Lock.ConvertSharedToExclusive(); }
  1465. // Convert the write lock to a read lock
  1466. void ConvertExclusiveToShared() const
  1467. { m_Lock.ConvertExclusiveToShared(); }
  1468. #ifdef LKRHASH_KERNEL_MODE
  1469. LKRHASH_ALLOCATOR_DEFINITIONS(CLKRLinearHashTable);
  1470. #endif // LKRHASH_KERNEL_MODE
  1471. #ifdef LKR_DEPRECATED_ITERATORS
  1472. // These iterators are deprecated. Use the STL-style iterators instead.
  1473. public:
  1474. // Iterators can be used to walk the subtable. To ensure a consistent
  1475. // view of the data, the iterator locks the whole subtable. This can
  1476. // have a negative effect upon performance, because no other thread
  1477. // can do anything with the subtable. Use with care.
  1478. //
  1479. // You should not use an iterator to walk the subtable, calling DeleteKey,
  1480. // as the iterator will end up pointing to garbage.
  1481. //
  1482. // Use Apply, ApplyIf, or DeleteIf instead of iterators to safely
  1483. // walk the tree. Or use the STL-style iterators.
  1484. //
  1485. // Note that iterators acquire a reference to the record pointed to
  1486. // and release that reference as soon as the iterator is incremented.
  1487. // In other words, this code is safe:
  1488. // lkrc = ht.IncrementIterator(&iter);
  1489. // // assume lkrc == LK_SUCCESS for the sake of this example
  1490. // CMyHashTable::Record* pRec = iter.Record();
  1491. // Foo(pRec); // uses pRec but doesn't hang on to it
  1492. // lkrc = ht.IncrementIterator(&iter);
  1493. //
  1494. // But this code is not safe because pRec is used out of the scope of
  1495. // the iterator that provided it:
  1496. // lkrc = ht.IncrementIterator(&iter);
  1497. // CMyHashTable::Record* pRec = iter.Record();
  1498. // // Broken code: Should have called
  1499. // // ht.AddRefRecord(pRec, LKAR_EXPLICIT_ACQUIRE) here
  1500. // lkrc = ht.IncrementIterator(&iter);
  1501. // Foo(pRec); // Unsafe: because no longer have a valid reference
  1502. //
  1503. // If the record has no reference-counting semantics, then you can
  1504. // ignore the above remarks about scope.
  1505. class CIterator
  1506. {
  1507. protected:
  1508. friend class CLKRLinearHashTable;
  1509. CLKRLinearHashTable* m_plht; // which linear hash subtable?
  1510. DWORD m_dwBucketAddr; // bucket index
  1511. PNodeClump m_pnc; // a CNodeClump in bucket
  1512. NodeIndex m_iNode; // offset within m_pnc
  1513. LK_LOCKTYPE m_lkl; // readlock or writelock?
  1514. private:
  1515. // Private copy ctor and op= to prevent compiler synthesizing them.
  1516. // TODO: implement these properly; they could be useful.
  1517. CIterator(const CIterator&);
  1518. CIterator& operator=(const CIterator&);
  1519. public:
  1520. CIterator(
  1521. LK_LOCKTYPE lkl=LKL_WRITELOCK)
  1522. : m_plht(NULL),
  1523. m_dwBucketAddr(0),
  1524. m_pnc(NULL),
  1525. m_iNode(-1),
  1526. m_lkl(lkl)
  1527. {}
  1528. // Return the record associated with this iterator
  1529. const void* Record() const
  1530. {
  1531. IRTLASSERT(IsValid());
  1532. return ((m_pnc != NULL
  1533. && m_iNode >= 0
  1534. && m_iNode < CLKRLinearHashTable::NODES_PER_CLUMP)
  1535. ? m_pnc->m_pvNode[m_iNode]
  1536. : NULL);
  1537. }
  1538. // Return the key associated with this iterator
  1539. const DWORD_PTR Key() const
  1540. {
  1541. IRTLASSERT(m_plht != NULL);
  1542. const void* pRec = Record();
  1543. return ((m_plht != NULL)
  1544. ? m_plht->_ExtractKey(pRec)
  1545. : NULL);
  1546. }
  1547. bool IsValid() const
  1548. {
  1549. return ((m_plht != NULL)
  1550. && (m_pnc != NULL)
  1551. && (0 <= m_iNode
  1552. && m_iNode < CLKRLinearHashTable::NODES_PER_CLUMP)
  1553. && (!m_pnc->IsEmptyNode(m_iNode)));
  1554. }
  1555. // Delete the record that the iterator points to. Does an implicit
  1556. // IncrementIterator after deletion.
  1557. LK_RETCODE DeleteRecord();
  1558. // Change the record that the iterator points to. The new record
  1559. // must have the same key as the old one.
  1560. LK_RETCODE ChangeRecord(const void* pNewRec);
  1561. }; // class CIterator
  1562. // Const iterators for readonly access. You must use these with
  1563. // const CLKRLinearHashTables.
  1564. class CConstIterator : public CIterator
  1565. {
  1566. private:
  1567. // Private, unimplemented copy ctor and op= to prevent
  1568. // compiler synthesizing them.
  1569. CConstIterator(const CConstIterator&);
  1570. CConstIterator& operator=(const CConstIterator&);
  1571. public:
  1572. CConstIterator()
  1573. : CIterator(LKL_READLOCK)
  1574. {}
  1575. }; // class CConstIterator
  1576. private:
  1577. // The public APIs lock the subtable. The private ones, which are used
  1578. // directly by CLKRHashTable, don't.
  1579. LK_RETCODE _InitializeIterator(CIterator* piter);
  1580. LK_RETCODE _CloseIterator(CIterator* piter);
  1581. public:
  1582. // Initialize the iterator to point to the first item in the hash subtable
  1583. // Returns LK_SUCCESS, LK_NO_MORE_ELEMENTS, or LK_BAD_ITERATOR.
  1584. LK_RETCODE InitializeIterator(CIterator* piter)
  1585. {
  1586. IRTLASSERT(piter != NULL && piter->m_plht == NULL);
  1587. if (piter == NULL || piter->m_plht != NULL)
  1588. return LK_BAD_ITERATOR;
  1589. if (piter->m_lkl == LKL_WRITELOCK)
  1590. WriteLock();
  1591. else
  1592. ReadLock();
  1593. return _InitializeIterator(piter);
  1594. }
  1595. // The const iterator version
  1596. LK_RETCODE InitializeIterator(CConstIterator* piter) const
  1597. {
  1598. IRTLASSERT(piter != NULL && piter->m_plht == NULL);
  1599. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1600. if (piter == NULL || piter->m_plht != NULL
  1601. || piter->m_lkl == LKL_WRITELOCK)
  1602. return LK_BAD_ITERATOR;
  1603. ReadLock();
  1604. return const_cast<CLKRLinearHashTable*>(this)
  1605. ->_InitializeIterator(static_cast<CIterator*>(piter));
  1606. }
  1607. // Move the iterator on to the next item in the subtable.
  1608. // Returns LK_SUCCESS, LK_NO_MORE_ELEMENTS, or LK_BAD_ITERATOR.
  1609. LK_RETCODE IncrementIterator(CIterator* piter);
  1610. LK_RETCODE IncrementIterator(CConstIterator* piter) const
  1611. {
  1612. IRTLASSERT(piter != NULL && piter->m_plht == this);
  1613. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1614. if (piter == NULL || piter->m_plht != this
  1615. || piter->m_lkl == LKL_WRITELOCK)
  1616. return LK_BAD_ITERATOR;
  1617. return const_cast<CLKRLinearHashTable*>(this)
  1618. ->IncrementIterator(static_cast<CIterator*>(piter));
  1619. }
  1620. // Close the iterator.
  1621. LK_RETCODE CloseIterator(CIterator* piter)
  1622. {
  1623. IRTLASSERT(piter != NULL && piter->m_plht == this);
  1624. if (piter == NULL || piter->m_plht != this)
  1625. return LK_BAD_ITERATOR;
  1626. _CloseIterator(piter);
  1627. if (piter->m_lkl == LKL_WRITELOCK)
  1628. WriteUnlock();
  1629. else
  1630. ReadUnlock();
  1631. return LK_SUCCESS;
  1632. };
  1633. // Close the CConstIterator
  1634. LK_RETCODE CloseIterator(CConstIterator* piter) const
  1635. {
  1636. IRTLASSERT(piter != NULL && piter->m_plht == this);
  1637. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1638. if (piter == NULL || piter->m_plht != this
  1639. || piter->m_lkl == LKL_WRITELOCK)
  1640. return LK_BAD_ITERATOR;
  1641. const_cast<CLKRLinearHashTable*>(this)
  1642. ->_CloseIterator(static_cast<CIterator*>(piter));
  1643. ReadUnlock();
  1644. return LK_SUCCESS;
  1645. };
  1646. #endif // LKR_DEPRECATED_ITERATORS
  1647. #ifdef LKR_STL_ITERATORS
  1648. private:
  1649. bool _Erase(Iterator& riter, DWORD dwSignature);
  1650. bool _Find(DWORD_PTR pnKey, DWORD dwSignature,
  1651. Iterator& riterResult);
  1652. bool _IsValidIterator(const Iterator& riter) const
  1653. {
  1654. LKR_ITER_TRACE(_TEXT(" LKLH:_IsValidIterator(%p)\n"), &riter);
  1655. bool fValid = ((riter.m_plht == this)
  1656. && (riter.m_dwBucketAddr < m_cActiveBuckets)
  1657. && riter.IsValid());
  1658. IRTLASSERT(fValid);
  1659. return fValid;
  1660. }
  1661. public:
  1662. // Return iterator pointing to first item in subtable
  1663. Iterator
  1664. Begin();
  1665. // Return a one-past-the-end iterator. Always empty.
  1666. Iterator
  1667. End() const
  1668. {
  1669. LKR_ITER_TRACE(_TEXT(" LKLH::End\n"));
  1670. return Iterator();
  1671. }
  1672. // Insert a record
  1673. // Returns `true' if successful; iterResult points to that record
  1674. // Returns `false' otherwise; iterResult == End()
  1675. bool
  1676. Insert(
  1677. /* in */ const void* pvRecord,
  1678. /* out */ Iterator& riterResult,
  1679. /* in */ bool fOverwrite=false);
  1680. // Erase the record pointed to by the iterator; adjust the iterator
  1681. // to point to the next record. Returns `true' if successful.
  1682. bool
  1683. Erase(
  1684. /* in,out */ Iterator& riter);
  1685. // Erase the records in the range [riterFirst, riterLast).
  1686. // Returns `true' if successful. riterFirst points to riterLast on return.
  1687. bool
  1688. Erase(
  1689. /*in*/ Iterator& riterFirst,
  1690. /*in*/ Iterator& riterLast);
  1691. // Find the (first) record that has its key == pnKey.
  1692. // If successful, returns `true' and iterator points to (first) record.
  1693. // If fails, returns `false' and iterator == End()
  1694. bool
  1695. Find(
  1696. /* in */ DWORD_PTR pnKey,
  1697. /* out */ Iterator& riterResult);
  1698. // Find the range of records that have their keys == pnKey.
  1699. // If successful, returns `true', iterFirst points to first record,
  1700. // and iterLast points to one-beyond-the last such record.
  1701. // If fails, returns `false' and both iterators == End().
  1702. // Primarily useful when m_fMultiKey == true
  1703. bool
  1704. EqualRange(
  1705. /* in */ DWORD_PTR pnKey,
  1706. /* out */ Iterator& riterFirst, // inclusive
  1707. /* out */ Iterator& riterLast); // exclusive
  1708. #endif // LKR_STL_ITERATORS
  1709. }; // class CLKRLinearHashTable
  1710. #ifdef LKR_STL_ITERATORS
  1711. // These functions have to be defined after CLKRLinearHashTable
  1712. inline void
  1713. CLKRLinearHashTable_Iterator::_AddRef(
  1714. LK_ADDREF_REASON lkar) const
  1715. {
  1716. // TODO: should iterator call _AddRefRecord at all
  1717. if (m_plht != NULL && m_iNode != NODE_BEGIN - NODE_STEP)
  1718. {
  1719. IRTLASSERT((0 <= m_iNode && m_iNode < NODES_PER_CLUMP)
  1720. && (unsigned) m_iNode < NODES_PER_CLUMP
  1721. && m_pnc != NULL
  1722. && (lkar < 0 || lkar > 0)
  1723. );
  1724. const void* pvRecord = m_pnc->m_pvNode[m_iNode];
  1725. #ifndef LKR_ALLOW_NULL_RECORDS
  1726. IRTLASSERT(pvRecord != NULL);
  1727. #endif
  1728. LKR_ITER_TRACE(_TEXT(" LKLH::AddRef, this=%p, Rec=%p\n"),
  1729. this, pvRecord);
  1730. LONG cRefs = m_plht->_AddRefRecord(pvRecord, lkar);
  1731. UNREFERENCED_PARAMETER(cRefs);
  1732. IRTLASSERT(cRefs > 0);
  1733. }
  1734. } // CLKRLinearHashTable_Iterator::_AddRef
  1735. inline const DWORD_PTR
  1736. CLKRLinearHashTable_Iterator::Key() const
  1737. {
  1738. IRTLASSERT(IsValid());
  1739. return m_plht->_ExtractKey(m_pnc->m_pvNode[m_iNode]);
  1740. } // CLKRLinearHashTable_Iterator::Key
  1741. #endif // LKR_STL_ITERATORS
  1742. //--------------------------------------------------------------------
  1743. // CLKRHashTable
  1744. //
  1745. // To improve concurrency, a hash table is divided into a number of
  1746. // (independent) subtables. Each subtable is a linear hash table. The
  1747. // number of subtables is defined when the outer table is created and remains
  1748. // fixed thereafter. Records are assigned to subtables based on their
  1749. // hashed key.
  1750. //
  1751. // For small or low-contention hashtables, you can bypass this
  1752. // thin wrapper and use CLKRLinearHashTable directly. The methods are
  1753. // documented in the declarations for CLKRHashTable (above).
  1754. //--------------------------------------------------------------------
  1755. class IRTL_DLLEXP CLKRHashTable
  1756. {
  1757. private:
  1758. typedef CLKRLinearHashTable SubTable;
  1759. public:
  1760. typedef SubTable::TableLock TableLock;
  1761. typedef SubTable::BucketLock BucketLock;
  1762. #ifdef LKR_DEPRECATED_ITERATORS
  1763. class CIterator;
  1764. friend class CLKRHashTable::CIterator;
  1765. #endif // LKR_DEPRECATED_ITERATORS
  1766. #ifdef LKR_STL_ITERATORS
  1767. friend class CLKRHashTable_Iterator;
  1768. typedef CLKRHashTable_Iterator Iterator;
  1769. #endif // LKR_STL_ITERATORS
  1770. friend class CLKRLinearHashTable;
  1771. friend int ::LKR_Initialize(DWORD dwInitFlags);
  1772. friend void ::LKR_Terminate();
  1773. // aliases for convenience
  1774. enum {
  1775. NAME_SIZE = SubTable::NAME_SIZE,
  1776. HASH_INVALID_SIGNATURE = SubTable::HASH_INVALID_SIGNATURE,
  1777. NODES_PER_CLUMP = SubTable::NODES_PER_CLUMP,
  1778. MAX_LKR_SUBTABLES = SubTable::MAX_LKR_SUBTABLES,
  1779. INVALID_PARENT_INDEX = SubTable::INVALID_PARENT_INDEX,
  1780. };
  1781. private:
  1782. // Hash table parameters
  1783. DWORD m_dwSignature; // debugging: id & corruption check
  1784. CHAR m_szName[NAME_SIZE]; // an identifier for debugging
  1785. mutable LK_RETCODE m_lkrcState; // Internal state of table
  1786. LKR_PFnExtractKey m_pfnExtractKey;
  1787. LKR_PFnCalcKeyHash m_pfnCalcKeyHash;
  1788. DWORD m_cSubTables; // number of subtables
  1789. int m_nSubTableMask;
  1790. SubTable* m_palhtDir[MAX_LKR_SUBTABLES]; // array of subtables
  1791. #ifndef LKR_NO_GLOBAL_LIST
  1792. CListEntry m_leGlobalList;
  1793. static CLockedDoubleList sm_llGlobalList; // All active CLKRHashTables
  1794. #endif // !LKR_NO_GLOBAL_LIST
  1795. DECLARE_ALLOC_STAT(SubTable);
  1796. LKRHASH_GLOBAL_LOCK_DECLARATIONS();
  1797. LKRHASH_CLASS_INIT_DECLS(CLKRHashTable);
  1798. private:
  1799. inline void _InsertThisIntoGlobalList();
  1800. inline void _RemoveThisFromGlobalList();
  1801. // Private copy ctor and op= to prevent compiler synthesizing them.
  1802. // TODO: implement these properly; they could be useful.
  1803. CLKRHashTable(const CLKRHashTable&);
  1804. CLKRHashTable& operator=(const CLKRHashTable&);
  1805. // Extract the key from the record
  1806. inline const DWORD_PTR _ExtractKey(const void* pvRecord) const;
  1807. // Hash the key
  1808. inline DWORD _CalcKeyHash(const DWORD_PTR pnKey) const;
  1809. // Use the key's hash signature to multiplex into a subtable
  1810. inline SubTable* const _SubTable(DWORD dwSignature) const;
  1811. // Find the index of pst within the subtable array
  1812. inline int _SubTableIndex(SubTable* pst) const;
  1813. // Memory allocation wrappers to allow us to simulate allocation
  1814. // failures during testing
  1815. SubTable* const
  1816. _AllocateSubTable(
  1817. LPCSTR pszClassName, // Identifies table for debugging
  1818. LKR_PFnExtractKey pfnExtractKey, // Extract key from record
  1819. LKR_PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  1820. LKR_PFnCompareKeys pfnCompareKeys, // Compare two keys
  1821. LKR_PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  1822. unsigned maxload, // Upperbound on avg chain length
  1823. DWORD initsize, // Initial size of hash table.
  1824. CLKRHashTable* phtParent, // Owning table.
  1825. int iParentIndex, // index within parent table
  1826. bool fMultiKeys, // Allow multiple identical keys?
  1827. bool fUseLocks, // Must use locks
  1828. bool fNonPagedAllocs // use paged or NP pool in kernel
  1829. ) const;
  1830. bool
  1831. _FreeSubTable(
  1832. SubTable* plht) const;
  1833. public:
  1834. CLKRHashTable(
  1835. LPCSTR pszClassName, // Identifies table for debugging
  1836. LKR_PFnExtractKey pfnExtractKey, // Extract key from record
  1837. LKR_PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  1838. LKR_PFnCompareKeys pfnCompareKeys, // Compare two keys
  1839. LKR_PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  1840. unsigned maxload=LK_DFLT_MAXLOAD, // bound on avg chain length
  1841. DWORD initsize=LK_DFLT_INITSIZE,// Initial size of hash table.
  1842. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS, // #subordinate hash tables.
  1843. bool fMultiKeys=false,// Allow multiple identical keys?
  1844. bool fUseLocks=true // Must use locks
  1845. #ifdef LKRHASH_KERNEL_MODE
  1846. , bool fNonPagedAllocs=true // use paged or NP pool
  1847. #endif
  1848. );
  1849. ~CLKRHashTable();
  1850. static const TCHAR* ClassName()
  1851. {return _TEXT("CLKRHashTable");}
  1852. unsigned NumSubTables() const {return m_cSubTables;}
  1853. bool MultiKeys() const;
  1854. #ifdef LKRHASH_KERNEL_MODE
  1855. bool NonPagedAllocs() const;
  1856. #endif
  1857. static LK_TABLESIZE NumSubTables(DWORD& rinitsize, DWORD& rnum_subtbls);
  1858. // Thin wrappers for the corresponding methods in CLKRLinearHashTable
  1859. LK_RETCODE InsertRecord(
  1860. const void* pvRecord,
  1861. bool fOverwrite=false);
  1862. LK_RETCODE DeleteKey(
  1863. const DWORD_PTR pnKey,
  1864. const void** ppvRecord=NULL,
  1865. bool fDeleteAllSame=false);
  1866. LK_RETCODE DeleteRecord(
  1867. const void* pvRecord);
  1868. LK_RETCODE FindKey(
  1869. const DWORD_PTR pnKey,
  1870. const void** ppvRecord) const;
  1871. LK_RETCODE FindRecord(
  1872. const void* pvRecord) const;
  1873. LK_RETCODE FindKeyMultipleRecords(
  1874. const DWORD_PTR pnKey,
  1875. size_t* pcRecords,
  1876. LKR_MULTIPLE_RECORDS** pplmr=NULL) const;
  1877. LK_RETCODE DeleteKeyMultipleRecords(
  1878. const DWORD_PTR pnKey,
  1879. size_t* pcRecords,
  1880. LKR_MULTIPLE_RECORDS** pplmr=NULL);
  1881. static LK_RETCODE FreeMultipleRecords(
  1882. LKR_MULTIPLE_RECORDS* plmr);
  1883. #ifdef LKR_APPLY_IF
  1884. DWORD Apply(
  1885. LKR_PFnRecordAction pfnAction,
  1886. void* pvState=NULL,
  1887. LK_LOCKTYPE lkl=LKL_READLOCK);
  1888. DWORD ApplyIf(
  1889. LKR_PFnRecordPred pfnPredicate,
  1890. LKR_PFnRecordAction pfnAction,
  1891. void* pvState=NULL,
  1892. LK_LOCKTYPE lkl=LKL_READLOCK);
  1893. DWORD DeleteIf(
  1894. LKR_PFnRecordPred pfnPredicate,
  1895. void* pvState=NULL);
  1896. #endif // LKR_APPLY_IF
  1897. void Clear();
  1898. int CheckTable() const;
  1899. DWORD Size() const;
  1900. DWORD MaxSize() const;
  1901. CLKRHashTableStats GetStatistics() const;
  1902. bool IsValid() const;
  1903. void SetTableLockSpinCount(WORD wSpins);
  1904. WORD GetTableLockSpinCount() const;
  1905. void SetBucketLockSpinCount(WORD wSpins);
  1906. WORD GetBucketLockSpinCount() const;
  1907. enum {
  1908. SIGNATURE = (('L') | ('K' << 8) | ('H' << 16) | ('T' << 24)),
  1909. SIGNATURE_FREE = (('L') | ('K' << 8) | ('H' << 16) | ('x' << 24)),
  1910. };
  1911. bool
  1912. ValidSignature() const
  1913. { return m_dwSignature == SIGNATURE;}
  1914. // Is the hash table usable?
  1915. bool IsUsable() const
  1916. { return (m_lkrcState == LK_SUCCESS); }
  1917. #ifdef LKR_EXPOSED_TABLE_LOCK
  1918. public:
  1919. #else // !LKR_EXPOSED_TABLE_LOCK
  1920. protected:
  1921. #endif // !LKR_EXPOSED_TABLE_LOCK
  1922. void WriteLock();
  1923. void ReadLock() const;
  1924. void WriteUnlock();
  1925. void ReadUnlock() const;
  1926. bool IsWriteLocked() const;
  1927. bool IsReadLocked() const;
  1928. bool IsWriteUnlocked() const;
  1929. bool IsReadUnlocked() const;
  1930. void ConvertSharedToExclusive();
  1931. void ConvertExclusiveToShared() const;
  1932. #ifdef LKRHASH_KERNEL_MODE
  1933. LKRHASH_ALLOCATOR_DEFINITIONS(CLKRHashTable);
  1934. #endif // LKRHASH_KERNEL_MODE
  1935. #ifdef LKR_DEPRECATED_ITERATORS
  1936. public:
  1937. typedef SubTable::CIterator CLHTIterator;
  1938. class CIterator : public CLHTIterator
  1939. {
  1940. protected:
  1941. friend class CLKRHashTable;
  1942. CLKRHashTable* m_pht; // which hash table?
  1943. int m_ist; // which subtable
  1944. private:
  1945. // Private copy ctor and op= to prevent compiler synthesizing them.
  1946. CIterator(const CIterator&);
  1947. CIterator& operator=(const CIterator&);
  1948. public:
  1949. CIterator(
  1950. LK_LOCKTYPE lkl=LKL_WRITELOCK)
  1951. : CLHTIterator(lkl),
  1952. m_pht(NULL),
  1953. m_ist(-1)
  1954. {}
  1955. const void* Record() const
  1956. {
  1957. IRTLASSERT(IsValid());
  1958. // This is a hack to work around a compiler bug. Calling
  1959. // CLHTIterator::Record calls this function recursively until
  1960. // the stack overflows.
  1961. const CLHTIterator* pBase = static_cast<const CLHTIterator*>(this);
  1962. return pBase->Record();
  1963. }
  1964. const DWORD_PTR Key() const
  1965. {
  1966. IRTLASSERT(IsValid());
  1967. const CLHTIterator* pBase = static_cast<const CLHTIterator*>(this);
  1968. return pBase->Key();
  1969. }
  1970. bool IsValid() const
  1971. {
  1972. const CLHTIterator* pBase = static_cast<const CLHTIterator*>(this);
  1973. return (m_pht != NULL && m_ist >= 0 && pBase->IsValid());
  1974. }
  1975. };
  1976. // Const iterators for readonly access
  1977. class CConstIterator : public CIterator
  1978. {
  1979. private:
  1980. // Private, unimplemented copy ctor and op= to prevent
  1981. // compiler synthesizing them.
  1982. CConstIterator(const CConstIterator&);
  1983. CConstIterator& operator=(const CConstIterator&);
  1984. public:
  1985. CConstIterator()
  1986. : CIterator(LKL_READLOCK)
  1987. {}
  1988. };
  1989. public:
  1990. LK_RETCODE InitializeIterator(CIterator* piter);
  1991. LK_RETCODE IncrementIterator(CIterator* piter);
  1992. LK_RETCODE CloseIterator(CIterator* piter);
  1993. LK_RETCODE InitializeIterator(CConstIterator* piter) const
  1994. {
  1995. IRTLASSERT(piter != NULL && piter->m_pht == NULL);
  1996. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1997. if (piter == NULL || piter->m_pht != NULL
  1998. || piter->m_lkl == LKL_WRITELOCK)
  1999. return LK_BAD_ITERATOR;
  2000. return const_cast<CLKRHashTable*>(this)
  2001. ->InitializeIterator(static_cast<CIterator*>(piter));
  2002. }
  2003. LK_RETCODE IncrementIterator(CConstIterator* piter) const
  2004. {
  2005. IRTLASSERT(piter != NULL && piter->m_pht == this);
  2006. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  2007. if (piter == NULL || piter->m_pht != this
  2008. || piter->m_lkl == LKL_WRITELOCK)
  2009. return LK_BAD_ITERATOR;
  2010. return const_cast<CLKRHashTable*>(this)
  2011. ->IncrementIterator(static_cast<CIterator*>(piter));
  2012. }
  2013. LK_RETCODE CloseIterator(CConstIterator* piter) const
  2014. {
  2015. IRTLASSERT(piter != NULL && piter->m_pht == this);
  2016. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  2017. if (piter == NULL || piter->m_pht != this
  2018. || piter->m_lkl == LKL_WRITELOCK)
  2019. return LK_BAD_ITERATOR;
  2020. return const_cast<CLKRHashTable*>(this)
  2021. ->CloseIterator(static_cast<CIterator*>(piter));
  2022. };
  2023. #endif // LKR_DEPRECATED_ITERATORS
  2024. #ifdef LKR_STL_ITERATORS
  2025. private:
  2026. bool _IsValidIterator(const Iterator& riter) const
  2027. {
  2028. LKR_ITER_TRACE(_TEXT(" LKHT:_IsValidIterator(%p)\n"), &riter);
  2029. bool fValid = (riter.m_pht == this);
  2030. IRTLASSERT(fValid);
  2031. fValid = fValid && (0 <= riter.m_ist
  2032. && riter.m_ist < (int) m_cSubTables);
  2033. IRTLASSERT(fValid);
  2034. IRTLASSERT(_SubTableIndex(riter.m_subiter.m_plht) == riter.m_ist);
  2035. fValid = fValid && riter.IsValid();
  2036. IRTLASSERT(fValid);
  2037. return fValid;
  2038. }
  2039. public:
  2040. Iterator
  2041. Begin();
  2042. Iterator
  2043. End() const
  2044. {
  2045. LKR_ITER_TRACE(_TEXT(" LKHT::End\n"));
  2046. return Iterator();
  2047. }
  2048. bool
  2049. Insert(
  2050. /* in */ const void* pvRecord,
  2051. /* out */ Iterator& riterResult,
  2052. /* in */ bool fOverwrite=false);
  2053. bool
  2054. Erase(
  2055. /* in,out */ Iterator& riter);
  2056. bool
  2057. Erase(
  2058. /*in*/ Iterator& riterFirst,
  2059. /*in*/ Iterator& riterLast);
  2060. bool
  2061. Find(
  2062. /* in */ DWORD_PTR pnKey,
  2063. /* out */ Iterator& riterResult);
  2064. bool
  2065. EqualRange(
  2066. /* in */ DWORD_PTR pnKey,
  2067. /* out */ Iterator& riterFirst, // inclusive
  2068. /* out */ Iterator& riterLast); // exclusive
  2069. #endif // LKR_STL_ITERATORS
  2070. }; // class CLKRHashTable
  2071. //--------------------------------------------------------------------
  2072. // A typesafe wrapper for CLKRHashTable (or CLKRLinearHashTable).
  2073. //
  2074. // * _Derived must derive from CTypedHashTable and provide certain member
  2075. // functions (ExtractKey, CalcKeyHash, CompareKeys, AddRefRecord). It's
  2076. // needed so that the method wrappers can downcast to the typesafe
  2077. // implementations that you provide.
  2078. // * _Record is the type of the record. C{Linear}HashTable will store
  2079. // >pointers< to _Record; i.e., stores _Records by reference, not by value.
  2080. // * _Key is the type of the key. _Key is used directly---it is not
  2081. // assumed to be a pointer type. _Key can be an integer or a pointer.
  2082. // C{Linear}HashTable assumes that the key is stored in the associated
  2083. // record. See the comments at the declaration of LKR_PFnExtractKey
  2084. // for more details.
  2085. // (optional parameters):
  2086. // * _BaseHashTable is the base hash table: CLKRHashTable or
  2087. /// CLKRLinearHashTable
  2088. // * _BaseIterator is the iterator type, _BaseHashTable::CIterator
  2089. //
  2090. // Some associative containers allow you to store key-value (aka
  2091. // name-value) pairs. LKRhash doesn't allow you to do this directly, but
  2092. // it's straightforward to build a simple wrapper class (or to use
  2093. // std::pair<key,value>).
  2094. //
  2095. // CTypedHashTable could derive directly from CLKRLinearHashTable, if you
  2096. // don't need the extra overhead of CLKRHashTable (which is quite low).
  2097. // If you expect to be using the table a lot on multiprocessor machines,
  2098. // you should use the default of CLKRHashTable, as it will scale better.
  2099. //
  2100. // You may need to add the following line to your code to disable
  2101. // warning messages about truncating extremly long identifiers.
  2102. // #pragma warning (disable : 4786)
  2103. //
  2104. // The _Derived class should look something like this:
  2105. // class CDerived : public CTypedHashTable<CDerived, RecordType, KeyType>
  2106. // {
  2107. // public:
  2108. // CDerived()
  2109. // : CTypedHashTable<CDerived, RecordType, KeyType>("DerivedTable")
  2110. // { /* other ctor actions, if needed */ }
  2111. // static KeyType ExtractKey(const RecordType* pTest);
  2112. // static DWORD CalcKeyHash(const KeyType Key);
  2113. // static int CompareKeys(const KeyType Key1, const KeyType Key2);
  2114. // static LONG AddRefRecord(RecordType* pRecord,LK_ADDREF_REASON lkar);
  2115. // // You probably want to declare the copy ctor and operator=
  2116. // // as private, so that the compiler won't synthesize them.
  2117. // // You don't need to provide a dtor, unless you have custom
  2118. // // member data to clean up.
  2119. //
  2120. // // Optional: other functions
  2121. // };
  2122. //
  2123. //--------------------------------------------------------------------
  2124. template < class _Derived, class _Record, class _Key,
  2125. bool _fDoRefCounting=false,
  2126. class _BaseHashTable=CLKRHashTable
  2127. #ifdef LKR_DEPRECATED_ITERATORS
  2128. , class _BaseIterator=_BaseHashTable::CIterator
  2129. #endif // LKR_DEPRECATED_ITERATORS
  2130. >
  2131. class CTypedHashTable : public _BaseHashTable
  2132. {
  2133. public:
  2134. // convenient aliases
  2135. typedef _Derived Derived;
  2136. typedef _Record Record;
  2137. typedef _Key Key;
  2138. enum { REF_COUNTED = _fDoRefCounting };
  2139. typedef _BaseHashTable BaseHashTable;
  2140. typedef CTypedHashTable<_Derived, _Record, _Key,
  2141. _fDoRefCounting, _BaseHashTable
  2142. #ifdef LKR_DEPRECATED_ITERATORS
  2143. , _BaseIterator
  2144. #endif // LKR_DEPRECATED_ITERATORS
  2145. > HashTable;
  2146. #ifdef LKR_DEPRECATED_ITERATORS
  2147. typedef _BaseIterator BaseIterator;
  2148. #endif // LKR_DEPRECATED_ITERATORS
  2149. private:
  2150. // Wrappers for the typesafe methods exposed by the derived class
  2151. static const DWORD_PTR WINAPI
  2152. _ExtractKey(const void* pvRecord)
  2153. {
  2154. const _Record* pRec = static_cast<const _Record*>(pvRecord);
  2155. // I would prefer to use reinterpret_cast here and in _CalcKeyHash
  2156. // and _CompareKeys, but the stupid Win64 compiler thinks it knows
  2157. // better than I do.
  2158. const _Key key = _Derived::ExtractKey(pRec);
  2159. // const void* pvKey = reinterpret_cast<const void*>(key);
  2160. // const DWORD_PTR pnKey = reinterpret_cast<const DWORD_PTR>(pvKey);
  2161. const DWORD_PTR pnKey = (DWORD_PTR) key;
  2162. return pnKey;
  2163. }
  2164. static DWORD WINAPI
  2165. _CalcKeyHash(const DWORD_PTR pnKey1)
  2166. {
  2167. const void* pvKey1 = reinterpret_cast<const void*>(pnKey1);
  2168. const DWORD_PTR pnKey1a = reinterpret_cast<const DWORD_PTR>(pvKey1);
  2169. // const _Key key1 = reinterpret_cast<const _Key>(pnKey1a);
  2170. const _Key key1 = (const _Key) pnKey1a;
  2171. return _Derived::CalcKeyHash(key1);
  2172. }
  2173. static int WINAPI
  2174. _CompareKeys(const DWORD_PTR pnKey1, const DWORD_PTR pnKey2)
  2175. {
  2176. const void* pvKey1 = reinterpret_cast<const void*>(pnKey1);
  2177. const DWORD_PTR pnKey1a = reinterpret_cast<const DWORD_PTR>(pvKey1);
  2178. // const _Key key1 = reinterpret_cast<const _Key>(pnKey1a);
  2179. const _Key key1 = (const _Key) pnKey1a;
  2180. const void* pvKey2 = reinterpret_cast<const void*>(pnKey2);
  2181. const DWORD_PTR pnKey2a = reinterpret_cast<const DWORD_PTR>(pvKey2);
  2182. // const _Key key2 = reinterpret_cast<const _Key>(pnKey2a);
  2183. const _Key key2 = (const _Key) pnKey2a;
  2184. return _Derived::CompareKeys(key1, key2);
  2185. }
  2186. static LONG WINAPI
  2187. _AddRefRecord(void* pvRecord, LK_ADDREF_REASON lkar)
  2188. {
  2189. _Record* pRec = static_cast<_Record*>(pvRecord);
  2190. return _Derived::AddRefRecord(pRec, lkar);
  2191. }
  2192. #ifdef LKR_APPLY_IF
  2193. // Typesafe wrappers for Apply, ApplyIf, and DeleteIf.
  2194. public:
  2195. // ApplyIf() and DeleteIf(): Does the record match the predicate?
  2196. // Note: takes a Record*, not a const Record*. You can modify the
  2197. // record in Pred() or Action(), if you like, but if you do, you
  2198. // should use LKL_WRITELOCK to lock the table. Also, you need to
  2199. // exercise care that you don't modify the key of the record.
  2200. typedef LK_PREDICATE (WINAPI *PFnRecordPred) (Record* pRec, void* pvState);
  2201. // Apply() et al: Perform action on record.
  2202. typedef LK_ACTION (WINAPI *PFnRecordAction)(Record* pRec, void* pvState);
  2203. protected:
  2204. class CState
  2205. {
  2206. protected:
  2207. friend class CTypedHashTable<_Derived, _Record, _Key,
  2208. _fDoRefCounting, _BaseHashTable
  2209. #ifdef LKR_DEPRECATED_ITERATORS
  2210. , _BaseIterator
  2211. #endif // LKR_DEPRECATED_ITERATORS
  2212. >;
  2213. PFnRecordPred m_pfnPred;
  2214. PFnRecordAction m_pfnAction;
  2215. void* m_pvState;
  2216. public:
  2217. CState(
  2218. PFnRecordPred pfnPred,
  2219. PFnRecordAction pfnAction,
  2220. void* pvState)
  2221. : m_pfnPred(pfnPred), m_pfnAction(pfnAction), m_pvState(pvState)
  2222. {}
  2223. };
  2224. static LK_PREDICATE WINAPI
  2225. _Pred(const void* pvRecord, void* pvState)
  2226. {
  2227. _Record* pRec = static_cast<_Record*>(const_cast<void*>(pvRecord));
  2228. CState* pState = static_cast<CState*>(pvState);
  2229. return (*pState->m_pfnPred)(pRec, pState->m_pvState);
  2230. }
  2231. static LK_ACTION WINAPI
  2232. _Action(const void* pvRecord, void* pvState)
  2233. {
  2234. _Record* pRec = static_cast<_Record*>(const_cast<void*>(pvRecord));
  2235. CState* pState = static_cast<CState*>(pvState);
  2236. return (*pState->m_pfnAction)(pRec, pState->m_pvState);
  2237. }
  2238. #endif // LKR_APPLY_IF
  2239. protected:
  2240. ~CTypedHashTable()
  2241. {
  2242. IRTLTRACE1("~CTypedHashTable(%p)\n", this);
  2243. }
  2244. CTypedHashTable(const HashTable&);
  2245. HashTable& operator=(const HashTable&);
  2246. private:
  2247. template <bool> class CRefCount;
  2248. // Dummy, no-op specialization
  2249. template <> class CRefCount<false>
  2250. {
  2251. public:
  2252. LONG Increment() { return 1; }
  2253. LONG Decrement() { return 0; }
  2254. };
  2255. // Real, threadsafe specialization
  2256. template <> class CRefCount<true>
  2257. {
  2258. public:
  2259. CRefCount<true>() : m_cRefs(1) {}
  2260. ~CRefCount<true>() { IRTLASSERT(0 == m_cRefs); }
  2261. LONG Increment() { return ::InterlockedIncrement(&m_cRefs); }
  2262. LONG Decrement() { return ::InterlockedDecrement(&m_cRefs); }
  2263. private:
  2264. LONG m_cRefs;
  2265. };
  2266. mutable CRefCount<_fDoRefCounting> m_rc;
  2267. LONG
  2268. _AddRef() const
  2269. {
  2270. return m_rc.Increment();
  2271. }
  2272. LONG
  2273. _Release() const
  2274. {
  2275. const LONG cRefs = m_rc.Decrement();
  2276. if (0 == cRefs)
  2277. {
  2278. _Derived* pThis = static_cast<_Derived*>(
  2279. const_cast<HashTable*>(this));
  2280. delete pThis;
  2281. }
  2282. return cRefs;
  2283. }
  2284. template <bool> class CAutoRefCountImpl;
  2285. typedef CAutoRefCountImpl<_fDoRefCounting> CAutoRefCount;
  2286. friend typename CAutoRefCountImpl<_fDoRefCounting>;
  2287. // friend typename CAutoRefCount;
  2288. // no-op specialization
  2289. template <> class CAutoRefCountImpl<false>
  2290. {
  2291. public:
  2292. CAutoRefCountImpl<false>(const HashTable* const) {}
  2293. };
  2294. // threadsafe specialization
  2295. template <> class CAutoRefCountImpl<true>
  2296. {
  2297. private:
  2298. const HashTable* const m_pCont;
  2299. // At /W4, compiler complains that it can't generate operator=
  2300. CAutoRefCountImpl<true>& operator=(const CAutoRefCountImpl<true>&);
  2301. public:
  2302. CAutoRefCountImpl<true>(
  2303. const HashTable* const pCont)
  2304. : m_pCont(pCont)
  2305. {
  2306. m_pCont->_AddRef();
  2307. }
  2308. ~CAutoRefCountImpl<true>()
  2309. {
  2310. m_pCont->_Release();
  2311. }
  2312. };
  2313. // Now at the top of an operation, we place a statement like this:
  2314. // CAutoRefCount arc(this);
  2315. //
  2316. // If we're using CAutoRefCountImpl<false>, the compiler optimizes it away.
  2317. //
  2318. // If we're using CAutoRefCountImpl<true>, we increment m_rc at this
  2319. // point. At the end of the function, the destructor calls _Release(),
  2320. // which will `delete this' if the last reference was released.
  2321. public:
  2322. CTypedHashTable(
  2323. LPCSTR pszClassName, // Identifies table for debugging
  2324. unsigned maxload=LK_DFLT_MAXLOAD, // Upperbound on avg chain len
  2325. DWORD initsize=LK_DFLT_INITSIZE, // Initial size of table: S/M/L
  2326. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS,// #subordinate hash tables.
  2327. bool fMultiKeys=false, // Allow multiple identical keys?
  2328. bool fUseLocks=true // Must use locks
  2329. #ifdef LKRHASH_KERNEL_MODE
  2330. , bool fNonPagedAllocs=true // use paged or NP pool in kernel
  2331. #endif
  2332. )
  2333. : _BaseHashTable(pszClassName, _ExtractKey, _CalcKeyHash, _CompareKeys,
  2334. _AddRefRecord, maxload, initsize, num_subtbls,
  2335. fMultiKeys, fUseLocks
  2336. #ifdef LKRHASH_KERNEL_MODE
  2337. , fNonPagedAllocs
  2338. #endif
  2339. )
  2340. {
  2341. // Ensure that _Key is no bigger than a pointer. Because we
  2342. // support both numeric and pointer keys, the various casts
  2343. // in the member functions unfortunately silently truncate if
  2344. // _Key is an unacceptable numeric type, such as __int64 on x86.
  2345. STATIC_ASSERT(sizeof(_Key) <= sizeof(DWORD_PTR));
  2346. }
  2347. LK_RETCODE InsertRecord(const _Record* pRec, bool fOverwrite=false)
  2348. {
  2349. CAutoRefCount arc(this);
  2350. return _BaseHashTable::InsertRecord(pRec, fOverwrite);
  2351. }
  2352. LK_RETCODE DeleteKey(const _Key key, _Record** ppRec=NULL,
  2353. bool fDeleteAllSame=false)
  2354. {
  2355. CAutoRefCount arc(this);
  2356. // const void* pvKey = reinterpret_cast<const void*>(key);
  2357. // DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  2358. DWORD_PTR pnKey = (DWORD_PTR) key;
  2359. const void** ppvRec = (const void**) ppRec;
  2360. return _BaseHashTable::DeleteKey(pnKey, ppvRec, fDeleteAllSame);
  2361. }
  2362. LK_RETCODE DeleteRecord(const _Record* pRec)
  2363. {
  2364. CAutoRefCount arc(this);
  2365. return _BaseHashTable::DeleteRecord(pRec);
  2366. }
  2367. // Note: returns a _Record**, not a const Record**. Note that you
  2368. // can use a const type for the template parameter to ensure constness.
  2369. LK_RETCODE FindKey(const _Key key, _Record** ppRec) const
  2370. {
  2371. if (ppRec == NULL)
  2372. return LK_BAD_RECORD;
  2373. *ppRec = NULL;
  2374. CAutoRefCount arc(this);
  2375. const void* pvRec = NULL;
  2376. // const void* pvKey = reinterpret_cast<const void*>(key);
  2377. // DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  2378. DWORD_PTR pnKey = (DWORD_PTR) key;
  2379. LK_RETCODE lkrc = _BaseHashTable::FindKey(pnKey, &pvRec);
  2380. *ppRec = static_cast<_Record*>(const_cast<void*>(pvRec));
  2381. return lkrc;
  2382. }
  2383. LK_RETCODE FindRecord(const _Record* pRec) const
  2384. {
  2385. CAutoRefCount arc(this);
  2386. return _BaseHashTable::FindRecord(pRec);
  2387. }
  2388. void Destroy()
  2389. {
  2390. _Release();
  2391. }
  2392. LK_RETCODE FindKeyMultipleRecords(
  2393. const _Key key,
  2394. size_t* pcRecords,
  2395. LKR_MULTIPLE_RECORDS** pplmr=NULL
  2396. ) const
  2397. {
  2398. CAutoRefCount arc(this);
  2399. // const void* pvKey = reinterpret_cast<const void*>(key);
  2400. // DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  2401. DWORD_PTR pnKey = (DWORD_PTR) key;
  2402. return _BaseHashTable::FindKeyMultipleRecords(pnKey, pcRecords, pplmr);
  2403. }
  2404. LK_RETCODE DeleteKeyMultipleRecords(
  2405. const _Key key,
  2406. size_t* pcRecords,
  2407. LKR_MULTIPLE_RECORDS** pplmr=NULL)
  2408. {
  2409. CAutoRefCount arc(this);
  2410. // const void* pvKey = reinterpret_cast<const void*>(key);
  2411. // DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  2412. DWORD_PTR pnKey = (DWORD_PTR) key;
  2413. return _BaseHashTable::DeleteKeyMultipleRecords(pnKey, pcRecords,
  2414. pplmr);
  2415. }
  2416. static LK_RETCODE FreeMultipleRecords(LKR_MULTIPLE_RECORDS* plmr)
  2417. {
  2418. return _BaseHashTable::FreeMultipleRecords(LKR_MULTIPLE_RECORDS* plmr)
  2419. }
  2420. // Other C{Linear}HashTable methods can be exposed without change
  2421. // CODEWORK: refcount iterators
  2422. #ifdef LKR_APPLY_IF
  2423. public:
  2424. // Typesafe wrappers for Apply et al
  2425. DWORD Apply(PFnRecordAction pfnAction,
  2426. void* pvState=NULL,
  2427. LK_LOCKTYPE lkl=LKL_READLOCK)
  2428. {
  2429. IRTLASSERT(pfnAction != NULL);
  2430. if (pfnAction == NULL)
  2431. return 0;
  2432. CAutoRefCount arc(this);
  2433. CState state(NULL, pfnAction, pvState);
  2434. return _BaseHashTable::Apply(_Action, &state, lkl);
  2435. }
  2436. DWORD ApplyIf(PFnRecordPred pfnPredicate,
  2437. PFnRecordAction pfnAction,
  2438. void* pvState=NULL,
  2439. LK_LOCKTYPE lkl=LKL_READLOCK)
  2440. {
  2441. IRTLASSERT(pfnPredicate != NULL && pfnAction != NULL);
  2442. if (pfnPredicate == NULL || pfnAction == NULL)
  2443. return 0;
  2444. CAutoRefCount arc(this);
  2445. CState state(pfnPredicate, pfnAction, pvState);
  2446. return _BaseHashTable::ApplyIf(_Pred, _Action, &state, lkl);
  2447. }
  2448. DWORD DeleteIf(PFnRecordPred pfnPredicate, void* pvState=NULL)
  2449. {
  2450. IRTLASSERT(pfnPredicate != NULL);
  2451. if (pfnPredicate == NULL)
  2452. return 0;
  2453. CAutoRefCount arc(this);
  2454. CState state(pfnPredicate, NULL, pvState);
  2455. return _BaseHashTable::DeleteIf(_Pred, &state);
  2456. }
  2457. #endif // LKR_APPLY_IF
  2458. #ifdef LKR_DEPRECATED_ITERATORS
  2459. // Typesafe wrappers for iterators
  2460. class CIterator : public _BaseIterator
  2461. {
  2462. private:
  2463. // Private, unimplemented copy ctor and op= to prevent
  2464. // compiler synthesizing them.
  2465. CIterator(const CIterator&);
  2466. CIterator& operator=(const CIterator&);
  2467. public:
  2468. CIterator(
  2469. LK_LOCKTYPE lkl=LKL_WRITELOCK)
  2470. : _BaseIterator(lkl)
  2471. {}
  2472. _Record* Record() const
  2473. {
  2474. const _BaseIterator* pBase = static_cast<const _BaseIterator*>(this);
  2475. return reinterpret_cast<_Record*>(const_cast<void*>(
  2476. pBase->Record()));
  2477. }
  2478. _Key Key() const
  2479. {
  2480. const _BaseIterator* pBase = static_cast<const _BaseIterator*>(this);
  2481. return reinterpret_cast<_Key>(reinterpret_cast<void*>(pBase->Key()));
  2482. }
  2483. };
  2484. // readonly iterator
  2485. class CConstIterator : public CIterator
  2486. {
  2487. private:
  2488. // Private, unimplemented copy ctor and op= to prevent
  2489. // compiler synthesizing them.
  2490. CConstIterator(const CConstIterator&);
  2491. CConstIterator& operator=(const CConstIterator&);
  2492. public:
  2493. CConstIterator()
  2494. : CIterator(LKL_READLOCK)
  2495. {}
  2496. const _Record* Record() const
  2497. {
  2498. return CIterator::Record();
  2499. }
  2500. const _Key Key() const
  2501. {
  2502. return CIterator::Key();
  2503. }
  2504. };
  2505. public:
  2506. LK_RETCODE InitializeIterator(CIterator* piter)
  2507. {
  2508. return _BaseHashTable::InitializeIterator(piter);
  2509. }
  2510. LK_RETCODE IncrementIterator(CIterator* piter)
  2511. {
  2512. return _BaseHashTable::IncrementIterator(piter);
  2513. }
  2514. LK_RETCODE CloseIterator(CIterator* piter)
  2515. {
  2516. return _BaseHashTable::CloseIterator(piter);
  2517. }
  2518. LK_RETCODE InitializeIterator(CConstIterator* piter) const
  2519. {
  2520. return const_cast<HashTable*>(this)
  2521. ->InitializeIterator(static_cast<CIterator*>(piter));
  2522. }
  2523. LK_RETCODE IncrementIterator(CConstIterator* piter) const
  2524. {
  2525. return const_cast<HashTable*>(this)
  2526. ->IncrementIterator(static_cast<CIterator*>(piter));
  2527. }
  2528. LK_RETCODE CloseIterator(CConstIterator* piter) const
  2529. {
  2530. return const_cast<HashTable*>(this)
  2531. ->CloseIterator(static_cast<CIterator*>(piter));
  2532. }
  2533. #endif // LKR_DEPRECATED_ITERATORS
  2534. #ifdef LKR_STL_ITERATORS
  2535. // TODO: const_iterator
  2536. public:
  2537. class iterator
  2538. {
  2539. friend class CTypedHashTable<_Derived, _Record, _Key,
  2540. _fDoRefCounting, _BaseHashTable
  2541. #ifdef LKR_DEPRECATED_ITERATORS
  2542. , _BaseIterator
  2543. #endif // LKR_DEPRECATED_ITERATORS
  2544. >;
  2545. protected:
  2546. typename _BaseHashTable::Iterator m_iter;
  2547. iterator(
  2548. const typename _BaseHashTable::Iterator& rhs)
  2549. : m_iter(rhs)
  2550. {
  2551. LKR_ITER_TRACE(_TEXT("Typed::prot ctor, this=%p, rhs=%p\n"),
  2552. this, &rhs);
  2553. }
  2554. public:
  2555. typedef std::forward_iterator_tag iterator_category;
  2556. typedef _Record value_type;
  2557. typedef ptrdiff_t difference_type;
  2558. typedef size_t size_type;
  2559. typedef value_type& reference;
  2560. typedef value_type* pointer;
  2561. iterator()
  2562. : m_iter()
  2563. {
  2564. LKR_ITER_TRACE(_TEXT("Typed::default ctor, this=%p\n"), this);
  2565. }
  2566. iterator(
  2567. const iterator& rhs)
  2568. : m_iter(rhs.m_iter)
  2569. {
  2570. LKR_ITER_TRACE(_TEXT("Typed::copy ctor, this=%p, rhs=%p\n"),
  2571. this, &rhs);
  2572. }
  2573. iterator& operator=(
  2574. const iterator& rhs)
  2575. {
  2576. LKR_ITER_TRACE(_TEXT("Typed::operator=, this=%p, rhs=%p\n"),
  2577. this, &rhs);
  2578. m_iter = rhs.m_iter;
  2579. return *this;
  2580. }
  2581. ~iterator()
  2582. {
  2583. LKR_ITER_TRACE(_TEXT("Typed::dtor, this=%p\n"), this);
  2584. }
  2585. pointer operator->() const
  2586. {
  2587. return (reinterpret_cast<_Record*>(
  2588. const_cast<void*>(m_iter.Record())));
  2589. }
  2590. reference operator*() const
  2591. {
  2592. return * (operator->());
  2593. }
  2594. // pre-increment
  2595. iterator& operator++()
  2596. {
  2597. LKR_ITER_TRACE(_TEXT("Typed::pre-increment, this=%p\n"), this);
  2598. m_iter.Increment();
  2599. return *this;
  2600. }
  2601. // post-increment
  2602. iterator operator++(int)
  2603. {
  2604. LKR_ITER_TRACE(_TEXT("Typed::post-increment, this=%p\n"), this);
  2605. iterator iterPrev = *this;
  2606. m_iter.Increment();
  2607. return iterPrev;
  2608. }
  2609. bool operator==(
  2610. const iterator& rhs) const
  2611. {
  2612. LKR_ITER_TRACE(_TEXT("Typed::operator==, this=%p, rhs=%p\n"),
  2613. this, &rhs);
  2614. return m_iter == rhs.m_iter;
  2615. }
  2616. bool operator!=(
  2617. const iterator& rhs) const
  2618. {
  2619. LKR_ITER_TRACE(_TEXT("Typed::operator!=, this=%p, rhs=%p\n"),
  2620. this, &rhs);
  2621. return m_iter != rhs.m_iter;
  2622. }
  2623. _Record* Record() const
  2624. {
  2625. LKR_ITER_TRACE(_TEXT("Typed::Record, this=%p\n"), this);
  2626. return reinterpret_cast<_Record*>(
  2627. const_cast<void*>(m_iter.Record()));
  2628. }
  2629. _Key Key() const
  2630. {
  2631. LKR_ITER_TRACE(_TEXT("Typed::Key, this=%p\n"), this);
  2632. return reinterpret_cast<_Key>(
  2633. reinterpret_cast<void*>(m_iter.Key()));
  2634. }
  2635. }; // class iterator
  2636. // Return iterator pointing to first item in table
  2637. iterator begin()
  2638. {
  2639. LKR_ITER_TRACE(_TEXT("Typed::begin()\n"));
  2640. return iterator(_BaseHashTable::Begin());
  2641. }
  2642. // Return a one-past-the-end iterator. Always empty.
  2643. iterator end() const
  2644. {
  2645. LKR_ITER_TRACE(_TEXT("Typed::end()\n"));
  2646. return iterator(_BaseHashTable::End());
  2647. }
  2648. template <class _InputIterator>
  2649. CTypedHashTable(
  2650. _InputIterator f, // first element in range
  2651. _InputIterator l, // one-beyond-last element
  2652. LPCSTR pszClassName, // Identifies table for debugging
  2653. unsigned maxload=LK_DFLT_MAXLOAD, // Upperbound on avg chain len
  2654. DWORD initsize=LK_DFLT_INITSIZE, // Initial size of table: S/M/L
  2655. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS,// #subordinate hash tables.
  2656. bool fMultiKeys=false, // Allow multiple identical keys?
  2657. bool fUseLocks=true // Must use locks
  2658. #ifdef LKRHASH_KERNEL_MODE
  2659. , bool fNonPagedAllocs=true // use paged or NP pool in kernel
  2660. #endif
  2661. )
  2662. : _BaseHashTable(pszClassName, _ExtractKey, _CalcKeyHash, _CompareKeys,
  2663. _AddRefRecord, maxload, initsize, num_subtbls,
  2664. fMultiKeys, fUseLocks
  2665. #ifdef LKRHASH_KERNEL_MODE
  2666. , fNonPagedAllocs
  2667. #endif
  2668. )
  2669. {
  2670. insert(f, l);
  2671. }
  2672. template <class _InputIterator>
  2673. void insert(_InputIterator f, _InputIterator l)
  2674. {
  2675. for ( ; f != l; ++f)
  2676. InsertRecord(&(*f));
  2677. }
  2678. bool
  2679. Insert(
  2680. const _Record* pRecord,
  2681. iterator& riterResult,
  2682. bool fOverwrite=false)
  2683. {
  2684. LKR_ITER_TRACE(_TEXT("Typed::Insert\n"));
  2685. return _BaseHashTable::Insert(pRecord, riterResult.m_iter, fOverwrite);
  2686. }
  2687. bool
  2688. Erase(
  2689. iterator& riter)
  2690. {
  2691. LKR_ITER_TRACE(_TEXT("Typed::Erase\n"));
  2692. return _BaseHashTable::Erase(riter.m_iter);
  2693. }
  2694. bool
  2695. Erase(
  2696. iterator& riterFirst,
  2697. iterator& riterLast)
  2698. {
  2699. LKR_ITER_TRACE(_TEXT("Typed::Erase2\n"));
  2700. return _BaseHashTable::Erase(riterFirst.m_iter, riterLast.m_iter);
  2701. }
  2702. bool
  2703. Find(
  2704. const _Key key,
  2705. iterator& riterResult)
  2706. {
  2707. LKR_ITER_TRACE(_TEXT("Typed::Find\n"));
  2708. const void* pvKey = reinterpret_cast<const void*>(key);
  2709. DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  2710. return _BaseHashTable::Find(pnKey, riterResult.m_iter);
  2711. }
  2712. bool
  2713. EqualRange(
  2714. const _Key key,
  2715. iterator& riterFirst,
  2716. iterator& riterLast)
  2717. {
  2718. LKR_ITER_TRACE(_TEXT("Typed::EqualRange\n"));
  2719. const void* pvKey = reinterpret_cast<const void*>(key);
  2720. DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  2721. return _BaseHashTable::EqualRange(pnKey, riterFirst.m_iter,
  2722. riterLast.m_iter);
  2723. }
  2724. // The iterator functions for an STL hash_(|multi)_(set|map)
  2725. //
  2726. // Value type of a Pair-Associative Container is
  2727. // pair<const key_type, mapped_type>
  2728. //
  2729. // pair<iterator,bool> insert(const value_type& x);
  2730. //
  2731. // void erase(iterator pos);
  2732. // void erase(iterator f, iterator l);
  2733. //
  2734. // iterator find(const key_type& k) [const];
  2735. // const_iterator find(const key_type& k) const;
  2736. //
  2737. // pair<iterator,iterator> equal_range(const key_type& k) [const];
  2738. // pair<const_iterator,const_iterator> equal_range(const key_type& k) const
  2739. #endif // LKR_STL_ITERATORS
  2740. }; // class CTypedHashTable
  2741. #ifndef __LKRHASH_NO_NAMESPACE__
  2742. };
  2743. #endif // !__LKRHASH_NO_NAMESPACE__
  2744. #endif // __LKRHASH_H__