Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2847 lines
90 KiB

  1. /*++
  2. Copyright (c) 1998-2001 Microsoft Corporation
  3. Module Name :
  4. LKRhash.h
  5. Abstract:
  6. Declares LKRhash: a fast, scalable, cache- and MP-friendly hash table
  7. Author:
  8. Paul (Per-Ake) Larson, PALarson@microsoft.com, July 1997
  9. Murali R. Krishnan (MuraliK)
  10. George V. Reilly (GeorgeRe) 06-Jan-1998
  11. Environment:
  12. Win32 - User Mode
  13. Project:
  14. Internet Information Server RunTime Library
  15. Revision History:
  16. 10/01/1998 - Change name from LKhash to LKRhash
  17. 10/2000 - Port to kernel mode
  18. --*/
  19. #ifndef __LKRHASH_H__
  20. #define __LKRHASH_H__
  21. #ifndef __LKR_HASH_H__
  22. // external definitions
  23. # include <LKR-hash.h>
  24. #endif // !__LKR_HASH_H__
  25. #ifndef __IRTLDBG_H__
  26. # include <IrtlDbg.h>
  27. #endif // !__IRTLDBG_H__
  28. #ifndef LKR_NO_GLOBAL_LIST
  29. # ifndef __LSTENTRY_H__
  30. # include <LstEntry.h>
  31. # endif // !__LSTENTRY_H__
  32. #else // LKR_NO_GLOBAL_LIST
  33. # ifndef __LOCKS_H__
  34. # include <Locks.h>
  35. # endif // !__LOCKS_H__
  36. #endif // LKR_NO_GLOBAL_LIST
  37. #ifndef __HASHFN_H__
  38. # include <HashFn.h>
  39. #endif // !__HASHFN_H__
  40. // Disable old-style deprecated iterators, by default
  41. #ifndef LKR_DEPRECATED_ITERATORS
  42. # define LKR_NO_DEPRECATED_ITERATORS
  43. #endif // !LKR_DEPRECATED_ITERATORS
  44. #ifndef LKR_NO_DEPRECATED_ITERATORS
  45. # undef LKR_DEPRECATED_ITERATORS
  46. # define LKR_DEPRECATED_ITERATORS 1
  47. #endif // !LKR_NO_DEPRECATED_ITERATORS
  48. #undef LKR_COUNTDOWN
  49. // #define __LKRHASH_NO_NAMESPACE__
  50. // #define __HASHFN_NO_NAMESPACE__
  51. // #define LKR_TABLE_LOCK CReaderWriterLock3
  52. // #define LKR_BUCKET_LOCK CSmallSpinLock
  53. #ifndef LKR_TABLE_LOCK
  54. # if defined(LKR_EXPOSED_TABLE_LOCK) || defined(LKR_DEPRECATED_ITERATORS)
  55. // need recursive writelocks
  56. # define LKR_TABLE_LOCK CReaderWriterLock3
  57. # else
  58. // use non-recursive writelocks
  59. # define LKR_TABLE_LOCK CReaderWriterLock2
  60. # endif
  61. #endif // !LKR_TABLE_LOCK
  62. #ifndef LKR_BUCKET_LOCK
  63. # ifdef LKR_DEPRECATED_ITERATORS
  64. # define LKR_BUCKET_LOCK CReaderWriterLock3
  65. # else // !LKR_DEPRECATED_ITERATORS
  66. # define LKR_BUCKET_LOCK CSmallSpinLock
  67. # endif // !LKR_DEPRECATED_ITERATORS
  68. #endif // !LKR_BUCKET_LOCK
  69. //=====================================================================
  70. // The class CLKRLinearHashTable defined in this file provides dynamic hash
  71. // tables, i.e. tables that grow and shrink dynamically with
  72. // the number of records in the table.
  73. // The basic method used is linear hashing, as explained in:
  74. //
  75. // P.-A. Larson, Dynamic Hash Tables, Comm. of the ACM, 31, 4 (1988)
  76. //
  77. // This version has the following characteristics:
  78. // - It is thread-safe and uses spin locks for synchronization.
  79. // - It was designed to support very high rates of concurrent
  80. // operations (inserts/deletes/lookups). It achieves this by
  81. // (a) partitioning a CLKRHashTable into a collection of
  82. // CLKRLinearHashTables to reduce contention on the global table lock.
  83. // (b) minimizing the hold time on a table lock, preferring to lock
  84. // down a bucket chain instead.
  85. // - The design is L1 cache-conscious. See CNodeClump.
  86. // - It is designed for sets varying in size from a dozen
  87. // elements to several million.
  88. //
  89. // Main classes:
  90. // CLKRLinearHashTable: thread-safe linear hash table
  91. // CLKRHashTable: collection of CLKRLinearHashTables
  92. // CTypedHashTable: typesafe wrapper for CLKRHashTable
  93. //
  94. //
  95. // Paul Larson, [email protected], July 1997
  96. // Original implementation with input from Murali R. Krishnan,
  97. // [email protected].
  98. //
  99. // George V. Reilly, [email protected], Dec 1997-Jan 1998
  100. // Massive cleanup and rewrite. Added templates.
  101. //=====================================================================
  102. // 1) Linear Hashing
  103. // ------------------
  104. //
  105. // Linear hash tables grow and shrink dynamically with the number of
  106. // records in the table. The growth or shrinkage is smooth: logically,
  107. // one bucket at a time but physically in larger increments
  108. // (64 buckets). An insertion (deletion) may cause an expansion
  109. // (contraction) of the table. This causes relocation of a small number
  110. // of records (at most one bucket worth). All operations (insert,
  111. // delete, lookup) take constant expected time, regardless of the
  112. // current size or the growth of the table.
  113. //
  114. // 2) LKR extensions to Linear hash table
  115. // --------------------------------------
  116. //
  117. // Larson-Krishnan-Reilly extensions to Linear hash tables for multiprocessor
  118. // scalability and improved cache performance.
  119. //
  120. // Traditional implementations of linear hash tables use one global lock
  121. // to prevent interference between concurrent operations
  122. // (insert/delete/lookup) on the table. The single lock easily becomes
  123. // the bottleneck in SMP scenarios when multiple threads are used.
  124. //
  125. // Traditionally, a (hash) bucket is implemented as a chain of
  126. // single-item nodes. Every operation results in chasing down a chain
  127. // looking for an item. However, pointer chasing is very slow on modern
  128. // systems because almost every jump results in a cache miss. L2 (or L3)
  129. // cache misses are very expensive in missed CPU cycles and the cost is
  130. // increasing (going to 100s of cycles in the future).
  131. //
  132. // LKR extensions offer
  133. // 1) Partitioning (by hashing) of records among multiple subtables.
  134. // Each subtable has locks but there is no global lock. Each
  135. // subtable receives a much lower rate of operations, resulting in
  136. // fewer conflicts.
  137. //
  138. // 2) Improved cache locality by grouping keys and their hash values
  139. // into contigous chunks that fit exactly into one (or a few)
  140. // cache lines.
  141. //
  142. // Specifically the implementation that exists here achieves this using
  143. // the following techniques.
  144. //
  145. // Class CLKRHashTable is the top-level data structure that dynamically
  146. // creates m_cSubTables linear hash tables. The CLKRLinearHashTables act as
  147. // the subtables to which items and accesses are fanned out. A good
  148. // hash function multiplexes requests uniformly to various subtables,
  149. // thus minimizing traffic to any single subtable. The implemenation
  150. // uses a home-grown version of bounded spinlocks, that is, a thread
  151. // does not spin on a lock indefinitely, instead yielding after a
  152. // predetermined number of loops.
  153. //
  154. // Each CLKRLinearHashTable consists of a CDirEntry pointing to segments
  155. // each holding m_dwSegSize CBuckets. Each CBucket in turn consists of a
  156. // chain of CNodeClumps. Each CNodeClump contains a group of
  157. // NODES_PER_CLUMP hash values (aka hash keys or signatures) and
  158. // pointers to the associated data items. Keeping the signatures
  159. // together increases the cache locality in scans for lookup.
  160. //
  161. // Traditionally, people store a link-list element right inside the
  162. // object that is hashed and use this link-list for the chaining of data
  163. // blocks. However, keeping just the pointers to the data object and
  164. // not chaining through them limits the need for bringing in the data
  165. // object to the cache. We need to access the data object only if the
  166. // hash values match. This limits the cache-thrashing behaviour
  167. // exhibited by conventional implementations. It has the additional
  168. // benefit that the objects themselves do not need to be modified
  169. // in order to be collected in the hash table (i.e., it's non-invasive).
  170. #ifdef LKR_STL_ITERATORS
  171. // needed for std::forward_iterator_tag, etc
  172. # include <utility>
  173. // The iterators have very verbose tracing. Don't want it on all the time
  174. // in debug builds.
  175. # if defined(IRTLDEBUG) && (LKR_STL_ITERATORS >= 2)
  176. # define LKR_ITER_TRACE IrtlTrace
  177. # else // !defined(IRTLDEBUG) || LKR_STL_ITERATORS < 2
  178. # define LKR_ITER_TRACE 1 ? (void)0 : IrtlTrace
  179. # endif // !defined(IRTLDEBUG) || LKR_STL_ITERATORS < 2
  180. #endif // LKR_STL_ITERATORS
  181. //--------------------------------------------------------------------
  182. // Default values for the hashtable constructors
  183. enum {
  184. #ifdef _WIN64
  185. LK_DFLT_MAXLOAD= 4, // 64-byte nodes => NODES_PER_CLUMP = 4
  186. #else
  187. LK_DFLT_MAXLOAD= 7, // Default upperbound on average chain length.
  188. #endif
  189. LK_DFLT_INITSIZE=LK_MEDIUM_TABLESIZE, // Default initial size of hash table
  190. LK_DFLT_NUM_SUBTBLS= 0, // Use a heuristic to choose #subtables
  191. };
  192. /*--------------------------------------------------------------------
  193. * Undocumented additional creation flag parameters to LKR_CreateTable
  194. */
  195. enum {
  196. LK_CREATE_NON_PAGED_ALLOCS = 0x1000, // Use paged or NP pool in kernel
  197. };
  198. //--------------------------------------------------------------------
  199. // Custom memory allocators (optional)
  200. //--------------------------------------------------------------------
  201. #if !defined(LKR_NO_ALLOCATORS) && !defined(LKRHASH_KERNEL_MODE)
  202. // # define LKRHASH_ACACHE 1
  203. // # define LKRHASH_ROCKALL_FAST 1
  204. #endif // !LKR_NO_ALLOCATORS && !LKRHASH_KERNEL_MODE
  205. #if defined(LKRHASH_ACACHE)
  206. # include <acache.hxx>
  207. class ACache : public ALLOC_CACHE_HANDLER
  208. {
  209. private:
  210. SIZE_T m_cb;
  211. public:
  212. ACache(IN LPCSTR pszName, IN const ALLOC_CACHE_CONFIGURATION* pacConfig)
  213. : ALLOC_CACHE_HANDLER(pszName, pacConfig),
  214. m_cb(m_acConfig.cbSize)
  215. {}
  216. SIZE_T ByteSize() const
  217. {
  218. return m_cb;
  219. }
  220. static const TCHAR* ClassName() {return _TEXT("ACache");}
  221. }; // class ACache
  222. typedef ACache CLKRhashAllocator;
  223. # define LKRHASH_ALLOCATOR_NEW(C, N, Tag) \
  224. const ALLOC_CACHE_CONFIGURATION acc = { 1, N, sizeof(C) }; \
  225. C::sm_palloc = new ACache("LKRhash:" #C, &acc);
  226. #elif defined(LKRHASH_ROCKALL_FAST)
  227. # include <FastHeap.hpp>
  228. class FastHeap : public FAST_HEAP
  229. {
  230. private:
  231. SIZE_T m_cb;
  232. public:
  233. FastHeap(SIZE_T cb)
  234. : m_cb(cb)
  235. {}
  236. LPVOID Alloc()
  237. { return New(m_cb, NULL, false); }
  238. BOOL Free(LPVOID pvMem)
  239. { return Delete(pvMem); }
  240. SIZE_T ByteSize() const
  241. {
  242. return m_cb;
  243. }
  244. static const TCHAR* ClassName() {return _TEXT("FastHeap");}
  245. }; // class FastHeap
  246. typedef FastHeap CLKRhashAllocator;
  247. # define LKRHASH_ALLOCATOR_NEW(C, N, Tag) \
  248. C::sm_palloc = new FastHeap(sizeof(C))
  249. #endif // LKRHASH_ROCKALL_FAST
  250. #ifdef LKRHASH_ALLOCATOR_NEW
  251. // placed inline in the declaration of class C
  252. # define LKRHASH_ALLOCATOR_DEFINITIONS(C) \
  253. protected: \
  254. friend class CLKRLinearHashTable; \
  255. friend BOOL LKR_Initialize(); \
  256. friend void LKR_Terminate(); \
  257. public: \
  258. static CLKRhashAllocator* sm_palloc; \
  259. static void* operator new(size_t s) \
  260. { \
  261. IRTLASSERT(s == sizeof(C)); \
  262. IRTLASSERT(sm_palloc != NULL); \
  263. return sm_palloc->Alloc(); \
  264. } \
  265. static void operator delete(void* pv) \
  266. { \
  267. IRTLASSERT(pv != NULL); \
  268. IRTLASSERT(sm_palloc != NULL); \
  269. sm_palloc->Free(pv); \
  270. }
  271. // used in LKRHashTableInit()
  272. # define LKRHASH_ALLOCATOR_INIT(C, N, Tag, f) \
  273. { \
  274. if (f) \
  275. { \
  276. IRTLASSERT(C::sm_palloc == NULL); \
  277. LKRHASH_ALLOCATOR_NEW(C, N, Tag); \
  278. f = (C::sm_palloc != NULL); \
  279. } \
  280. }
  281. // used in LKRHashTableUninit()
  282. # define LKRHASH_ALLOCATOR_UNINIT(C) \
  283. { \
  284. if (C::sm_palloc != NULL) \
  285. { \
  286. delete C::sm_palloc; \
  287. C::sm_palloc = NULL; \
  288. } \
  289. }
  290. #else // !LKRHASH_ALLOCATOR_NEW
  291. # define LKRHASH_ALLOCATOR_DEFINITIONS(C)
  292. # define LKRHASH_ALLOCATOR_INIT(C, N, Tag, f)
  293. # define LKRHASH_ALLOCATOR_UNINIT(C)
  294. class CLKRhashAllocator
  295. {
  296. public:
  297. static const TCHAR* ClassName() {return _TEXT("global new");}
  298. };
  299. #endif // !LKRHASH_ALLOCATOR_NEW
  300. #ifndef __LKRHASH_NO_NAMESPACE__
  301. namespace LKRhash {
  302. #endif // !__LKRHASH_NO_NAMESPACE__
  303. //--------------------------------------------------------------------
  304. // forward declarations
  305. class IRTL_DLLEXP CLKRLinearHashTable;
  306. class IRTL_DLLEXP CLKRHashTable;
  307. template <class _Der, class _Rcd, class _Ky, class _HT
  308. #ifdef LKR_DEPRECATED_ITERATORS
  309. , class _Iter
  310. #endif // LKR_DEPRECATED_ITERATORS
  311. >
  312. class CTypedHashTable;
  313. class CNodeClump;
  314. class CBucket;
  315. class CSegment;
  316. class CDirEntry;
  317. class IRTL_DLLEXP CLKRHashTableStats;
  318. //--------------------------------------------------------------------
  319. // Statistical information returned by GetStatistics
  320. //--------------------------------------------------------------------
  321. #ifdef LOCK_INSTRUMENTATION
  322. class IRTL_DLLEXP CAveragedLockStats : public CLockStatistics
  323. {
  324. public:
  325. int m_nItems;
  326. CAveragedLockStats();
  327. }; // class CAveragedLockStats
  328. #endif // LOCK_INSTRUMENTATION
  329. #ifndef LKRHASH_KERNEL_MODE
  330. class IRTL_DLLEXP CLKRHashTableStats
  331. {
  332. public:
  333. int RecordCount; // number of records in the table
  334. int TableSize; // table size in number of slots
  335. int DirectorySize; // number of entries in directory
  336. int LongestChain; // longest hash chain in the table
  337. int EmptySlots; // number of unused hash slots
  338. double SplitFactor; // fraction of buckets split
  339. double AvgSearchLength; // average length of a successful search
  340. double ExpSearchLength; // theoretically expected length
  341. double AvgUSearchLength; // average length of an unsuccessful search
  342. double ExpUSearchLength; // theoretically expected length
  343. int NodeClumpSize; // number of slots in a node clump
  344. int CBucketSize; // sizeof(CBucket)
  345. #ifdef LOCK_INSTRUMENTATION
  346. CAveragedLockStats m_alsTable; // stats for table lock
  347. CAveragedLockStats m_alsBucketsAvg; // avg of stats for bucket locks
  348. CGlobalLockStatistics m_gls; // global statistics for all locks
  349. #endif // LOCK_INSTRUMENTATION
  350. enum {
  351. MAX_BUCKETS = 40,
  352. };
  353. // histogram of bucket lengths
  354. LONG m_aBucketLenHistogram[MAX_BUCKETS];
  355. CLKRHashTableStats();
  356. static const LONG*
  357. BucketSizes();
  358. static LONG
  359. BucketSize(
  360. LONG nBucketIndex);
  361. static LONG
  362. BucketIndex(
  363. LONG nBucketLength);
  364. }; // class CLKRHashTableStats
  365. #endif // !LKRHASH_KERNEL_MODE
  366. //--------------------------------------------------------------------
  367. // Global table lock code. This is only used to measure how much of a
  368. // slowdown having a global lock on the CLKRHashTable causes. It is
  369. // *never* used in production code.
  370. // #define LKRHASH_GLOBAL_LOCK CCritSec
  371. #ifdef LKRHASH_GLOBAL_LOCK
  372. # define LKRHASH_GLOBAL_LOCK_DECLARATIONS() \
  373. typedef LKRHASH_GLOBAL_LOCK GlobalLock; \
  374. mutable GlobalLock m_lkGlobal;
  375. # define LKRHASH_GLOBAL_READ_LOCK() m_lkGlobal.ReadLock()
  376. # define LKRHASH_GLOBAL_WRITE_LOCK() m_lkGlobal.WriteLock()
  377. # define LKRHASH_GLOBAL_READ_UNLOCK() m_lkGlobal.ReadUnlock()
  378. # define LKRHASH_GLOBAL_WRITE_UNLOCK() m_lkGlobal.WriteUnlock()
  379. #else // !LKRHASH_GLOBAL_LOCK
  380. # define LKRHASH_GLOBAL_LOCK_DECLARATIONS()
  381. // These ones will be optimized away by the compiler
  382. # define LKRHASH_GLOBAL_READ_LOCK() ((void)0)
  383. # define LKRHASH_GLOBAL_WRITE_LOCK() ((void)0)
  384. # define LKRHASH_GLOBAL_READ_UNLOCK() ((void)0)
  385. # define LKRHASH_GLOBAL_WRITE_UNLOCK() ((void)0)
  386. #endif // !LKRHASH_GLOBAL_LOCK
  387. // Class for nodes on a bucket chain. Instead of a node containing
  388. // one (signature, record-pointer, next-tuple-pointer) tuple, it
  389. // contains _N_ such tuples. (N-1 next-tuple-pointers are omitted.)
  390. // This improves locality of reference greatly; i.e., it's L1
  391. // cache-friendly. It also reduces memory fragmentation and memory
  392. // allocator overhead. It does complicate the chain traversal code
  393. // slightly, admittedly.
  394. //
  395. // This theory is beautiful. In practice, however, CNodeClumps
  396. // are *not* perfectly aligned on 32-byte boundaries by the memory
  397. // allocators. Experimental results indicate that we get a 2-3%
  398. // speed improvement by using 32-byte-aligned blocks, but this must
  399. // be considered against the average of 16 bytes wasted per block.
  400. class CNodeClump
  401. {
  402. public:
  403. // Record slots per chunk - set so a chunk matches (one or two)
  404. // cache lines. 3 ==> 32 bytes, 7 ==> 64 bytes, on 32-bit system.
  405. // Note: the default max load factor is 7, which implies that
  406. // there will seldom be more than one node clump in a chain.
  407. enum {
  408. #if defined(LOCK_INSTRUMENTATION)
  409. BUCKET_BYTE_SIZE = 96,
  410. #else
  411. BUCKET_BYTE_SIZE = 64,
  412. #endif
  413. BUCKET_OVERHEAD = sizeof(LKR_BUCKET_LOCK) + sizeof(CNodeClump*),
  414. NODE_SIZE = sizeof(const void*) + sizeof(DWORD),
  415. NODES_PER_CLUMP = (BUCKET_BYTE_SIZE - BUCKET_OVERHEAD) / NODE_SIZE
  416. };
  417. enum {
  418. // See if countdown loops are faster than countup loops for
  419. // traversing a CNodeClump. In practice, countup loops are faster.
  420. #ifndef LKR_COUNTDOWN
  421. NODE_BEGIN = 0,
  422. NODE_END = NODES_PER_CLUMP,
  423. NODE_STEP = +1,
  424. // for (int x = 0; x < NODES_PER_CLUMP; ++x) ...
  425. #else // LKR_COUNTDOWN
  426. NODE_BEGIN = NODES_PER_CLUMP-1,
  427. NODE_END = -1,
  428. NODE_STEP = -1,
  429. // for (int x = NODES_PER_CLUMP; --x >= 0; ) ...
  430. #endif // LKR_COUNTDOWN
  431. };
  432. enum {
  433. #ifndef __HASHFN_NO_NAMESPACE__
  434. HASH_INVALID_SIGNATURE = HashFn::HASH_INVALID_SIGNATURE,
  435. #else // !__HASHFN_NO_NAMESPACE__
  436. HASH_INVALID_SIGNATURE = ::HASH_INVALID_SIGNATURE,
  437. #endif // !__HASHFN_NO_NAMESPACE__
  438. };
  439. DWORD m_dwKeySigs[NODES_PER_CLUMP]; // hash values computed from keys
  440. CNodeClump* m_pncNext; // next node clump on the chain
  441. const void* m_pvNode[NODES_PER_CLUMP]; // pointers to records
  442. CNodeClump()
  443. {
  444. Clear();
  445. }
  446. void
  447. Clear()
  448. {
  449. m_pncNext = NULL; // no dangling pointers
  450. for (int i = NODES_PER_CLUMP; --i >= 0; )
  451. {
  452. m_dwKeySigs[i] = HASH_INVALID_SIGNATURE;
  453. m_pvNode[i] = NULL;
  454. }
  455. }
  456. bool
  457. InvalidSignature(
  458. int i) const
  459. {
  460. IRTLASSERT(0 <= i && i < NODES_PER_CLUMP);
  461. return (m_dwKeySigs[i] == HASH_INVALID_SIGNATURE);
  462. }
  463. bool
  464. IsEmptyNode(
  465. int i) const
  466. {
  467. IRTLASSERT(0 <= i && i < NODES_PER_CLUMP);
  468. return (m_pvNode[i] == NULL);
  469. }
  470. bool
  471. IsEmptyAndInvalid(
  472. int i) const
  473. {
  474. return IsEmptyNode(i) && InvalidSignature(i);
  475. }
  476. bool
  477. IsEmptySlot(
  478. int i) const
  479. {
  480. return InvalidSignature(i);
  481. }
  482. bool
  483. IsLastClump() const
  484. {
  485. return (m_pncNext == NULL);
  486. }
  487. #ifdef IRTLDEBUG
  488. // Don't want overhead of calls to dtor in retail build
  489. ~CNodeClump()
  490. {
  491. IRTLASSERT(IsLastClump()); // no dangling pointers
  492. for (int i = NODES_PER_CLUMP; --i >= 0; )
  493. IRTLASSERT(InvalidSignature(i) && IsEmptyNode(i));
  494. }
  495. #endif // IRTLDEBUG
  496. LKRHASH_ALLOCATOR_DEFINITIONS(CNodeClump);
  497. }; // class CNodeClump
  498. #ifdef LKR_STL_ITERATORS
  499. class IRTL_DLLEXP CLKRLinearHashTable_Iterator;
  500. class IRTL_DLLEXP CLKRHashTable_Iterator;
  501. class IRTL_DLLEXP CLKRLinearHashTable_Iterator
  502. {
  503. friend class CLKRLinearHashTable;
  504. friend class CLKRHashTable;
  505. friend class CLKRHashTable_Iterator;
  506. protected:
  507. CLKRLinearHashTable* m_plht; // which linear hash table?
  508. CNodeClump* m_pnc; // a CNodeClump in bucket
  509. DWORD m_dwBucketAddr;// bucket index
  510. short m_iNode; // offset within m_pnc
  511. enum {
  512. NODES_PER_CLUMP = CNodeClump::NODES_PER_CLUMP,
  513. NODE_BEGIN = CNodeClump::NODE_BEGIN,
  514. NODE_END = CNodeClump::NODE_END,
  515. NODE_STEP = CNodeClump::NODE_STEP,
  516. };
  517. CLKRLinearHashTable_Iterator(
  518. CLKRLinearHashTable* plht,
  519. CNodeClump* pnc,
  520. DWORD dwBucketAddr,
  521. short iNode)
  522. : m_plht(plht),
  523. m_pnc(pnc),
  524. m_dwBucketAddr(dwBucketAddr),
  525. m_iNode(iNode)
  526. {
  527. LKR_ITER_TRACE(_TEXT(" LKLH::prot ctor, this=%p, plht=%p, ")
  528. _TEXT("pnc=%p, ba=%d, in=%d\n"),
  529. this, plht, pnc, dwBucketAddr, iNode);
  530. }
  531. inline void _AddRef(
  532. LK_ADDREF_REASON lkar) const;
  533. bool _Increment(
  534. bool fDecrementOldValue=true);
  535. public:
  536. CLKRLinearHashTable_Iterator()
  537. : m_plht(NULL),
  538. m_pnc(NULL),
  539. m_dwBucketAddr(0),
  540. m_iNode(0)
  541. {
  542. LKR_ITER_TRACE(_TEXT(" LKLH::default ctor, this=%p\n"), this);
  543. }
  544. CLKRLinearHashTable_Iterator(
  545. const CLKRLinearHashTable_Iterator& rhs)
  546. : m_plht(rhs.m_plht),
  547. m_pnc(rhs.m_pnc),
  548. m_dwBucketAddr(rhs.m_dwBucketAddr),
  549. m_iNode(rhs.m_iNode)
  550. {
  551. LKR_ITER_TRACE(_TEXT(" LKLH::copy ctor, this=%p, rhs=%p\n"),
  552. this, &rhs);
  553. _AddRef(LKAR_ITER_COPY_CTOR);
  554. }
  555. CLKRLinearHashTable_Iterator& operator=(
  556. const CLKRLinearHashTable_Iterator& rhs)
  557. {
  558. LKR_ITER_TRACE(_TEXT(" LKLH::operator=, this=%p, rhs=%p\n"),
  559. this, &rhs);
  560. rhs._AddRef(LKAR_ITER_ASSIGN_ACQUIRE);
  561. this->_AddRef(LKAR_ITER_ASSIGN_RELEASE);
  562. m_plht = rhs.m_plht;
  563. m_pnc = rhs.m_pnc;
  564. m_dwBucketAddr = rhs.m_dwBucketAddr;
  565. m_iNode = rhs.m_iNode;
  566. return *this;
  567. }
  568. ~CLKRLinearHashTable_Iterator()
  569. {
  570. LKR_ITER_TRACE(_TEXT(" LKLH::dtor, this=%p, plht=%p\n"),
  571. this, m_plht);
  572. _AddRef(LKAR_ITER_DTOR);
  573. }
  574. bool Increment()
  575. {
  576. return IsValid() ? _Increment() : false;
  577. }
  578. bool IsValid() const
  579. {
  580. bool fValid = (m_plht != NULL && m_pnc != NULL
  581. && 0 <= m_iNode && m_iNode < NODES_PER_CLUMP);
  582. if (fValid)
  583. fValid = (m_pnc->m_pvNode[m_iNode] != NULL);
  584. IRTLASSERT(fValid);
  585. return fValid;
  586. }
  587. const void* Record() const
  588. {
  589. IRTLASSERT(IsValid());
  590. return m_pnc->m_pvNode[m_iNode];
  591. }
  592. inline const DWORD_PTR Key() const;
  593. bool operator==(
  594. const CLKRLinearHashTable_Iterator& rhs) const
  595. {
  596. LKR_ITER_TRACE(_TEXT(" LKLH::operator==, this=%p, rhs=%p\n"),
  597. this, &rhs);
  598. // m_pnc and m_iNode uniquely identify an iterator
  599. bool fEQ = ((m_pnc == rhs.m_pnc) // most unique field
  600. && (m_iNode == rhs.m_iNode));
  601. IRTLASSERT(!fEQ || ((m_plht == rhs.m_plht)
  602. && (m_dwBucketAddr == rhs.m_dwBucketAddr)));
  603. return fEQ;
  604. }
  605. bool operator!=(
  606. const CLKRLinearHashTable_Iterator& rhs) const
  607. {
  608. LKR_ITER_TRACE(_TEXT(" LKLH::operator!=, this=%p, rhs=%p\n"),
  609. this, &rhs);
  610. bool fNE = ((m_pnc != rhs.m_pnc)
  611. || (m_iNode != rhs.m_iNode));
  612. //// IRTLASSERT(fNE == !this->operator==(rhs));
  613. return fNE;
  614. }
  615. }; // class CLKRLinearHashTable_Iterator
  616. class IRTL_DLLEXP CLKRHashTable_Iterator
  617. {
  618. friend class CLKRHashTable;
  619. protected:
  620. // order important to minimize size
  621. CLKRHashTable* m_pht; // which hash table?
  622. CLKRLinearHashTable_Iterator m_subiter; // iterator into subtable
  623. short m_ist; // index of subtable
  624. CLKRHashTable_Iterator(
  625. CLKRHashTable* pht,
  626. short ist)
  627. : m_pht(pht),
  628. m_subiter(CLKRLinearHashTable_Iterator()), // zero
  629. m_ist(ist)
  630. {
  631. LKR_ITER_TRACE(_TEXT(" LKHT::prot ctor, this=%p, pht=%p, ist=%d\n"),
  632. this, pht, ist);
  633. }
  634. bool _Increment(
  635. bool fDecrementOldValue=true);
  636. public:
  637. CLKRHashTable_Iterator()
  638. : m_pht(NULL),
  639. m_subiter(CLKRLinearHashTable_Iterator()), // zero
  640. m_ist(0)
  641. {
  642. LKR_ITER_TRACE(_TEXT(" LKHT::default ctor, this=%p\n"), this);
  643. }
  644. #ifdef IRTLDEBUG
  645. // Compiler does a perfectly adequate job of synthesizing these
  646. // methods.
  647. CLKRHashTable_Iterator(
  648. const CLKRHashTable_Iterator& rhs)
  649. : m_pht(rhs.m_pht),
  650. m_subiter(rhs.m_subiter),
  651. m_ist(rhs.m_ist)
  652. {
  653. LKR_ITER_TRACE(_TEXT(" LKHT::copy ctor, this=%p, rhs=%p\n"),
  654. this, &rhs);
  655. }
  656. CLKRHashTable_Iterator& operator=(
  657. const CLKRHashTable_Iterator& rhs)
  658. {
  659. LKR_ITER_TRACE(_TEXT(" LKHT::operator=, this=%p, rhs=%p\n"),
  660. this, &rhs);
  661. m_ist = rhs.m_ist;
  662. m_subiter = rhs.m_subiter;
  663. m_pht = rhs.m_pht;
  664. return *this;
  665. }
  666. ~CLKRHashTable_Iterator()
  667. {
  668. LKR_ITER_TRACE(_TEXT(" LKHT::dtor, this=%p, pht=%p\n"), this, m_pht);
  669. }
  670. #endif
  671. bool Increment()
  672. {
  673. return IsValid() ? _Increment() : false;
  674. }
  675. bool IsValid() const
  676. {
  677. bool fValid = (m_pht != NULL && m_ist >= 0);
  678. IRTLASSERT(fValid);
  679. fValid = fValid && (m_subiter.m_plht != NULL);
  680. IRTLASSERT(fValid);
  681. fValid = fValid && (m_subiter.m_pnc != NULL);
  682. IRTLASSERT(fValid);
  683. fValid = fValid && (0 <= m_subiter.m_iNode);
  684. IRTLASSERT(fValid);
  685. fValid = fValid && (m_subiter.m_iNode < CNodeClump::NODES_PER_CLUMP);
  686. IRTLASSERT(fValid);
  687. if (fValid)
  688. fValid = (m_subiter.m_pnc->m_pvNode[m_subiter.m_iNode] != NULL);
  689. IRTLASSERT(fValid);
  690. return fValid;
  691. }
  692. const void* Record() const
  693. {
  694. IRTLASSERT(IsValid());
  695. return m_subiter.Record();
  696. }
  697. const DWORD_PTR Key() const
  698. {
  699. IRTLASSERT(IsValid());
  700. return m_subiter.Key();
  701. }
  702. bool operator==(
  703. const CLKRHashTable_Iterator& rhs) const
  704. {
  705. LKR_ITER_TRACE(_TEXT(" LKHT::operator==, this=%p, rhs=%p\n"),
  706. this, &rhs);
  707. // m_pnc and m_iNode uniquely identify an iterator
  708. bool fEQ = ((m_subiter.m_pnc
  709. == rhs.m_subiter.m_pnc) // most unique field
  710. && (m_subiter.m_iNode == rhs.m_subiter.m_iNode));
  711. IRTLASSERT(!fEQ
  712. || ((m_ist == rhs.m_ist)
  713. && (m_pht == rhs.m_pht)
  714. && (m_subiter.m_plht == rhs.m_subiter.m_plht)
  715. && (m_subiter.m_dwBucketAddr
  716. == rhs.m_subiter.m_dwBucketAddr)));
  717. return fEQ;
  718. }
  719. bool operator!=(
  720. const CLKRHashTable_Iterator& rhs) const
  721. {
  722. LKR_ITER_TRACE(_TEXT(" LKHT::operator!=, this=%p, rhs=%p\n"),
  723. this, &rhs);
  724. bool fNE = ((m_subiter.m_pnc != rhs.m_subiter.m_pnc)
  725. || (m_subiter.m_iNode != rhs.m_subiter.m_iNode));
  726. //// IRTLASSERT(fNE == !this->operator==(rhs));
  727. return fNE;
  728. }
  729. }; // class CLKRHashTable_Iterator
  730. #endif // LKR_STL_ITERATORS
  731. //--------------------------------------------------------------------
  732. // CLKRLinearHashTable
  733. //
  734. // A thread-safe linear hash table.
  735. //--------------------------------------------------------------------
  736. class IRTL_DLLEXP CLKRLinearHashTable
  737. {
  738. public:
  739. typedef LKR_TABLE_LOCK TableLock;
  740. typedef LKR_BUCKET_LOCK BucketLock;
  741. #ifdef LKR_DEPRECATED_ITERATORS
  742. class CIterator;
  743. friend class CLKRLinearHashTable::CIterator;
  744. #endif // LKR_DEPRECATED_ITERATORS
  745. #ifdef LKR_STL_ITERATORS
  746. friend class CLKRLinearHashTable_Iterator;
  747. typedef CLKRLinearHashTable_Iterator Iterator;
  748. #endif // LKR_STL_ITERATORS
  749. private:
  750. friend class CNodeClump;
  751. friend class CLKRHashTable;
  752. friend BOOL LKR_Initialize();
  753. friend void LKR_Terminate();
  754. #ifdef LKRHASH_INSTRUMENTATION
  755. // TODO
  756. #endif // LKRHASH_INSTRUMENTATION
  757. public:
  758. // aliases for convenience
  759. enum {
  760. NODES_PER_CLUMP = CNodeClump::NODES_PER_CLUMP,
  761. MIN_DIRSIZE = 1<<3, // CDirEntry::MIN_DIRSIZE
  762. MAX_DIRSIZE = 1<<20, // CDirEntry::MAX_DIRSIZE
  763. NAME_SIZE = 16, // includes trailing '\0'
  764. NODE_BEGIN = CNodeClump::NODE_BEGIN,
  765. NODE_END = CNodeClump::NODE_END,
  766. NODE_STEP = CNodeClump::NODE_STEP,
  767. HASH_INVALID_SIGNATURE = CNodeClump::HASH_INVALID_SIGNATURE,
  768. };
  769. private:
  770. //
  771. // Miscellaneous helper functions
  772. //
  773. // Convert a hash signature to a bucket address
  774. inline DWORD _BucketAddress(DWORD dwSignature) const;
  775. // See the Linear Hashing paper
  776. static DWORD _H0(DWORD dwSignature, DWORD dwBktAddrMask);
  777. DWORD _H0(DWORD dwSignature) const;
  778. // See the Linear Hashing paper. Preserves one bit more than _H0.
  779. static DWORD _H1(DWORD dwSignature, DWORD dwBktAddrMask);
  780. DWORD _H1(DWORD dwSignature) const;
  781. // In which segment within the directory does the bucketaddress lie?
  782. // (Return type must be lvalue so that it can be assigned to.)
  783. CSegment*& _Segment(DWORD dwBucketAddr) const;
  784. // Offset within the segment of the bucketaddress
  785. DWORD _SegIndex(DWORD dwBucketAddr) const;
  786. // Convert a bucketaddress to a CBucket*
  787. inline CBucket* _Bucket(DWORD dwBucketAddr) const;
  788. // Extract the key from a record
  789. const DWORD_PTR _ExtractKey(const void* pvRecord) const;
  790. // Hash the key
  791. DWORD _CalcKeyHash(const DWORD_PTR pnKey) const;
  792. // Compare two keys for equality
  793. BOOL _EqualKeys(const DWORD_PTR pnKey1,
  794. const DWORD_PTR pnKey2) const;
  795. // AddRef or Release a record.
  796. void _AddRefRecord(const void* pvRecord,
  797. LK_ADDREF_REASON lkar) const;
  798. // Find a bucket, given its signature.
  799. CBucket* _FindBucket(DWORD dwSignature,
  800. bool fLockForWrite) const;
  801. // Used by _FindKey so that the thread won't deadlock if the user has
  802. // already explicitly called table->WriteLock().
  803. bool _ReadOrWriteLock() const;
  804. void _ReadOrWriteUnlock(bool fReadLocked) const;
  805. // Memory allocation wrappers to allow us to simulate allocation
  806. // failures during testing
  807. static CDirEntry* const
  808. _AllocateSegmentDirectory(
  809. size_t n);
  810. bool
  811. _FreeSegmentDirectory();
  812. static CNodeClump* const
  813. _AllocateNodeClump();
  814. static bool
  815. _FreeNodeClump(
  816. CNodeClump* pnc);
  817. CSegment* const
  818. _AllocateSegment() const;
  819. bool
  820. _FreeSegment(
  821. CSegment* pseg) const;
  822. LK_RETCODE
  823. _InitializeSegmentDirectory();
  824. #ifdef LOCK_INSTRUMENTATION
  825. static LONG sm_cTables;
  826. static const TCHAR*
  827. _LockName()
  828. {
  829. LONG l = ++sm_cTables;
  830. // possible race condition but we don't care, as this is never
  831. // used in production code
  832. static TCHAR s_tszName[CLockStatistics::L_NAMELEN];
  833. wsprintf(s_tszName, _TEXT("LH%05x"), 0xFFFFF & l);
  834. return s_tszName;
  835. }
  836. // Statistics for the table lock
  837. CLockStatistics _LockStats() const
  838. { return m_Lock.Statistics(); }
  839. #endif // LOCK_INSTRUMENTATION
  840. private:
  841. // Fields are ordered so as to minimize number of cache lines touched
  842. DWORD m_dwSignature; // debugging: id & corruption check
  843. CHAR m_szName[NAME_SIZE]; // an identifier for debugging
  844. mutable LK_RETCODE m_lkrcState; // Internal state of table
  845. mutable TableLock m_Lock; // Lock on entire linear hash table
  846. // type-specific function pointers
  847. LKR_PFnExtractKey m_pfnExtractKey; // Extract key from record
  848. LKR_PFnCalcKeyHash m_pfnCalcKeyHash; // Calculate hash signature of key
  849. LKR_PFnEqualKeys m_pfnEqualKeys; // Compare two keys
  850. LKR_PFnAddRefRecord m_pfnAddRefRecord; // AddRef a record
  851. LK_TABLESIZE m_lkts; // "size" of table: small, medium, or large
  852. DWORD m_dwSegBits; // C{Small,Medium,Large}Segment::SEGBITS
  853. DWORD m_dwSegSize; // C{Small,Medium,Large}Segment::SEGSIZE
  854. DWORD m_dwSegMask; // C{Small,Medium,Large}Segment::SEGMASK
  855. DWORD m_dwBktAddrMask0; // mask used for address calculation
  856. DWORD m_dwBktAddrMask1; // used in _H1 calculation
  857. DWORD m_iExpansionIdx; // address of next bucket to be expanded
  858. CDirEntry* m_paDirSegs; // directory of table segments
  859. DWORD m_cDirSegs; // segment directory size: varies between
  860. // MIN_DIRSIZE and MAX_DIRSIZE
  861. DWORD m_nLevel; // number of table doublings performed
  862. DWORD m_cRecords; // number of records in the table
  863. DWORD m_cActiveBuckets; // number of buckets in use (table size)
  864. WORD m_wBucketLockSpins;// default spin count for bucket locks
  865. const BYTE m_nTableLockType; // for debugging: LOCK_SPINLOCK, etc
  866. const BYTE m_nBucketLockType;// for debugging: LOCK_SPINLOCK, etc
  867. const CLKRHashTable* const m_phtParent;// Owning table. NULL => standalone
  868. BYTE m_MaxLoad; // max load factor (average chain length)
  869. const bool m_fMultiKeys; // Allow multiple identical keys?
  870. const bool m_fNonPagedAllocs;// Use paged or NP pool in kernel
  871. DWORD_PTR m_pvReserved1; // Reserved for future debugging needs
  872. DWORD_PTR m_pvReserved2; // Reserved for future debugging needs
  873. DWORD_PTR m_pvReserved3; // Reserved for future debugging needs
  874. DWORD_PTR m_pvReserved4; // Reserved for future debugging needs
  875. #ifndef LKR_NO_GLOBAL_LIST
  876. static CLockedDoubleList sm_llGlobalList;// All active CLKRLinearHashTables
  877. CListEntry m_leGlobalList;
  878. #endif // !LKR_NO_GLOBAL_LIST
  879. void _InsertThisIntoGlobalList()
  880. {
  881. #ifndef LKR_NO_GLOBAL_LIST
  882. // Only add standalone CLKRLinearHashTables to global list.
  883. // CLKRHashTables have their own global list.
  884. if (m_phtParent == NULL)
  885. sm_llGlobalList.InsertHead(&m_leGlobalList);
  886. #endif // !LKR_NO_GLOBAL_LIST
  887. }
  888. void _RemoveThisFromGlobalList()
  889. {
  890. #ifndef LKR_NO_GLOBAL_LIST
  891. if (m_phtParent == NULL)
  892. sm_llGlobalList.RemoveEntry(&m_leGlobalList);
  893. #endif // !LKR_NO_GLOBAL_LIST
  894. }
  895. // Non-trivial implementation functions
  896. LK_RETCODE _InsertRecord(const void* pvRecord, DWORD dwSignature,
  897. bool fOverwrite
  898. #ifdef LKR_STL_ITERATORS
  899. , Iterator* piterResult=NULL
  900. #endif // LKR_STL_ITERATORS
  901. );
  902. LK_RETCODE _DeleteKey(const DWORD_PTR pnKey, DWORD dwSignature,
  903. bool fDeleteAllSame);
  904. LK_RETCODE _DeleteRecord(const void* pvRecord, DWORD dwSignature);
  905. bool _DeleteNode(CBucket* pbkt, CNodeClump*& rpnc,
  906. CNodeClump*& rpncPrev, int& riNode,
  907. LK_ADDREF_REASON lkar);
  908. LK_RETCODE _FindKey(const DWORD_PTR pnKey, DWORD dwSignature,
  909. const void** ppvRecord
  910. #ifdef LKR_STL_ITERATORS
  911. , Iterator* piterResult=NULL
  912. #endif // LKR_STL_ITERATORS
  913. ) const;
  914. LK_RETCODE _FindRecord(const void* pvRecord,
  915. DWORD dwSignature) const;
  916. // returns count of errors in compacted state => 0 is good
  917. int _IsNodeCompact(CBucket* const pbkt) const;
  918. #ifdef LKR_APPLY_IF
  919. // Predicate functions
  920. static LK_PREDICATE WINAPI
  921. _PredTrue(const void* /*pvRecord*/, void* /*pvState*/)
  922. { return LKP_PERFORM; }
  923. DWORD _ApplyIf(LKR_PFnRecordPred pfnPredicate,
  924. LKR_PFnRecordAction pfnAction, void* pvState,
  925. LK_LOCKTYPE lkl, LK_PREDICATE& rlkp);
  926. DWORD _DeleteIf(LKR_PFnRecordPred pfnPredicate, void* pvState,
  927. LK_PREDICATE& rlkp);
  928. #endif // LKR_APPLY_IF
  929. void _Clear(bool fShrinkDirectory);
  930. LK_RETCODE _SetSegVars(LK_TABLESIZE lkts, DWORD cInitialBuckets);
  931. LK_RETCODE _Expand();
  932. LK_RETCODE _Contract();
  933. LK_RETCODE _SplitRecordSet(CNodeClump* pncOldTarget,
  934. CNodeClump* pncNewTarget,
  935. DWORD iExpansionIdx,
  936. DWORD dwBktAddrMask,
  937. DWORD dwNewBkt,
  938. CNodeClump* pncFreeList);
  939. LK_RETCODE _MergeRecordSets(CBucket* pbktNewTarget,
  940. CNodeClump* pncOldList,
  941. CNodeClump* pncFreeList);
  942. // Private copy ctor and op= to prevent compiler synthesizing them.
  943. // Must provide a (bad) implementation because we export instantiations.
  944. // TODO: implement these properly; they could be useful.
  945. CLKRLinearHashTable(const CLKRLinearHashTable&)
  946. : m_dwSignature(SIGNATURE_FREE)
  947. #ifdef LOCK_INSTRUMENTATION
  948. , m_Lock(NULL)
  949. #endif // LOCK_INSTRUMENTATION
  950. , m_nTableLockType(0),
  951. m_nBucketLockType(0),
  952. m_fMultiKeys(false),
  953. m_fNonPagedAllocs(false),
  954. m_phtParent(NULL)
  955. {*(BYTE*)NULL;}
  956. CLKRLinearHashTable& operator=(const CLKRLinearHashTable&)
  957. {return *(CLKRLinearHashTable*)NULL;}
  958. private:
  959. // This ctor is used by CLKRHashTable
  960. CLKRLinearHashTable(
  961. LPCSTR pszName, // Identifies table for debugging
  962. LKR_PFnExtractKey pfnExtractKey, // Extract key from record
  963. LKR_PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  964. LKR_PFnEqualKeys pfnEqualKeys, // Compare two keys
  965. LKR_PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  966. unsigned maxload, // Upperbound on avg chain length
  967. DWORD initsize, // Initial size of hash table.
  968. CLKRHashTable* phtParent, // Owning table.
  969. bool fMultiKeys, // Allow multiple identical keys?
  970. bool fNonPagedAllocs // use paged or NP pool in kernel
  971. );
  972. LK_RETCODE
  973. _Initialize(
  974. LKR_PFnExtractKey pfnExtractKey,
  975. LKR_PFnCalcKeyHash pfnCalcKeyHash,
  976. LKR_PFnEqualKeys pfnEqualKeys,
  977. LKR_PFnAddRefRecord pfnAddRefRecord,
  978. LPCSTR pszName,
  979. unsigned maxload,
  980. DWORD initsize);
  981. public:
  982. CLKRLinearHashTable(
  983. LPCSTR pszName, // Identifies table for debugging
  984. LKR_PFnExtractKey pfnExtractKey, // Extract key from record
  985. LKR_PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  986. LKR_PFnEqualKeys pfnEqualKeys, // Compare two keys
  987. LKR_PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  988. unsigned maxload=LK_DFLT_MAXLOAD,// Upperbound on average chain length
  989. DWORD initsize=LK_DFLT_INITSIZE, // Initial size of hash table.
  990. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS, // for signature compatiblity
  991. // with CLKRHashTable
  992. bool fMultiKeys=false // Allow multiple identical keys?
  993. #ifdef LKRHASH_KERNEL_MODE
  994. , bool fNonPagedAllocs=true // use paged or NP pool
  995. #endif
  996. );
  997. ~CLKRLinearHashTable();
  998. static const TCHAR* ClassName()
  999. {return _TEXT("CLKRLinearHashTable");}
  1000. int NumSubTables() const {return 1;}
  1001. bool MultiKeys() const
  1002. {
  1003. return false;
  1004. // return m_fMultiKeys; // TODO: implement
  1005. }
  1006. #ifdef LKRHASH_KERNEL_MODE
  1007. bool NonPagedAllocs() const
  1008. {
  1009. return m_fNonPagedAllocs;
  1010. }
  1011. #endif
  1012. static LK_TABLESIZE NumSubTables(DWORD& rinitsize, DWORD& rnum_subtbls);
  1013. // Insert a new record into hash table.
  1014. // Returns LK_SUCCESS if all OK, LK_KEY_EXISTS if same key already
  1015. // exists (unless fOverwrite), LK_ALLOC_FAIL if out of space,
  1016. // or LK_BAD_RECORD for a bad record.
  1017. LK_RETCODE InsertRecord(const void* pvRecord, bool fOverwrite=false)
  1018. {
  1019. if (!IsUsable())
  1020. return m_lkrcState;
  1021. if (pvRecord == NULL)
  1022. return LK_BAD_RECORD;
  1023. return _InsertRecord(pvRecord, _CalcKeyHash(_ExtractKey(pvRecord)),
  1024. fOverwrite);
  1025. }
  1026. // Delete record with the given key.
  1027. // Returns LK_SUCCESS if all OK, or LK_NO_SUCH_KEY if not found
  1028. LK_RETCODE DeleteKey(const DWORD_PTR pnKey,
  1029. bool fDeleteAllSame=false)
  1030. {
  1031. if (!IsUsable())
  1032. return m_lkrcState;
  1033. return _DeleteKey(pnKey, _CalcKeyHash(pnKey), fDeleteAllSame);
  1034. }
  1035. // Delete a record from the table, if present.
  1036. // Returns LK_SUCCESS if all OK, or LK_NO_SUCH_KEY if not found
  1037. LK_RETCODE DeleteRecord(const void* pvRecord)
  1038. {
  1039. if (!IsUsable())
  1040. return m_lkrcState;
  1041. if (pvRecord == NULL)
  1042. return LK_BAD_RECORD;
  1043. return _DeleteRecord(pvRecord, _CalcKeyHash(_ExtractKey(pvRecord)));
  1044. }
  1045. // Find record with given key.
  1046. // Returns: LK_SUCCESS, if record found (record is returned in *ppvRecord)
  1047. // LK_BAD_RECORD, if ppvRecord is invalid
  1048. // LK_NO_SUCH_KEY, if no record with given key value was found
  1049. // LK_UNUSABLE, if hash table not in usable state
  1050. // Note: the record is AddRef'd. You must decrement the reference
  1051. // count when you are finished with the record (if you're implementing
  1052. // refcounting semantics).
  1053. LK_RETCODE FindKey(const DWORD_PTR pnKey,
  1054. const void** ppvRecord) const
  1055. {
  1056. if (!IsUsable())
  1057. return m_lkrcState;
  1058. if (ppvRecord == NULL)
  1059. return LK_BAD_RECORD;
  1060. return _FindKey(pnKey, _CalcKeyHash(pnKey), ppvRecord);
  1061. }
  1062. // Sees if the record is contained in the table
  1063. // Returns: LK_SUCCESS, if record found
  1064. // LK_BAD_RECORD, if pvRecord is invalid
  1065. // LK_NO_SUCH_KEY, if record is not in the table
  1066. // LK_UNUSABLE, if hash table not in usable state
  1067. // Note: the record is *not* AddRef'd.
  1068. LK_RETCODE FindRecord(const void* pvRecord) const
  1069. {
  1070. if (!IsUsable())
  1071. return m_lkrcState;
  1072. if (pvRecord == NULL)
  1073. return LK_BAD_RECORD;
  1074. return _FindRecord(pvRecord, _CalcKeyHash(_ExtractKey(pvRecord)));
  1075. }
  1076. #ifdef LKR_APPLY_IF
  1077. // Walk the hash table, applying pfnAction to all records.
  1078. // Locks the whole table for the duration with either a (possibly
  1079. // shared) readlock or a writelock, according to lkl.
  1080. // Loop is aborted if pfnAction returns LKA_ABORT.
  1081. // Returns the number of successful applications.
  1082. DWORD Apply(LKR_PFnRecordAction pfnAction,
  1083. void* pvState=NULL,
  1084. LK_LOCKTYPE lkl=LKL_READLOCK);
  1085. // Walk the hash table, applying pfnAction to any records that match
  1086. // pfnPredicate. Locks the whole table for the duration with either
  1087. // a (possibly shared) readlock or a writelock, according to lkl.
  1088. // Loop is aborted if pfnAction returns LKA_ABORT.
  1089. // Returns the number of successful applications.
  1090. DWORD ApplyIf(LKR_PFnRecordPred pfnPredicate,
  1091. LKR_PFnRecordAction pfnAction,
  1092. void* pvState=NULL,
  1093. LK_LOCKTYPE lkl=LKL_READLOCK);
  1094. // Delete any records that match pfnPredicate.
  1095. // Locks the table for the duration with a writelock.
  1096. // Returns the number of deletions.
  1097. //
  1098. // Do *not* walk the hash table by hand with an iterator and call
  1099. // DeleteKey. The iterator will end up pointing to garbage.
  1100. DWORD DeleteIf(LKR_PFnRecordPred pfnPredicate,
  1101. void* pvState=NULL);
  1102. #endif // LKR_APPLY_IF
  1103. // Check table for consistency. Returns 0 if okay, or the number of
  1104. // errors otherwise.
  1105. int CheckTable() const;
  1106. // Remove all data from the table
  1107. void Clear()
  1108. {
  1109. WriteLock();
  1110. _Clear(true);
  1111. WriteUnlock();
  1112. }
  1113. // Number of elements in the table
  1114. DWORD Size() const
  1115. { return m_cRecords; }
  1116. // Maximum possible number of elements in the table
  1117. DWORD MaxSize() const
  1118. { return static_cast<DWORD>(m_MaxLoad * MAX_DIRSIZE * m_dwSegSize); }
  1119. // Get hash table statistics
  1120. CLKRHashTableStats GetStatistics() const;
  1121. // Is the hash table usable?
  1122. bool IsUsable() const
  1123. { return (m_lkrcState == LK_SUCCESS); }
  1124. // Is the hash table consistent and correct?
  1125. bool IsValid() const
  1126. {
  1127. STATIC_ASSERT(((MIN_DIRSIZE & (MIN_DIRSIZE-1)) == 0) // == (1 << N)
  1128. && ((1 << 3) <= MIN_DIRSIZE)
  1129. && (MIN_DIRSIZE < MAX_DIRSIZE)
  1130. && ((MAX_DIRSIZE & (MAX_DIRSIZE-1)) == 0)
  1131. && (MAX_DIRSIZE <= (1 << 30)));
  1132. bool f = (m_lkrcState == LK_SUCCESS // serious internal failure?
  1133. && m_paDirSegs != NULL
  1134. && MIN_DIRSIZE <= m_cDirSegs && m_cDirSegs <= MAX_DIRSIZE
  1135. && (m_cDirSegs & (m_cDirSegs-1)) == 0
  1136. && m_pfnExtractKey != NULL
  1137. && m_pfnCalcKeyHash != NULL
  1138. && m_pfnEqualKeys != NULL
  1139. && m_pfnAddRefRecord != NULL
  1140. && m_cActiveBuckets > 0
  1141. && ValidSignature()
  1142. );
  1143. if (!f)
  1144. m_lkrcState = LK_UNUSABLE;
  1145. return f;
  1146. }
  1147. // Set the spin count on the table lock
  1148. void SetTableLockSpinCount(WORD wSpins);
  1149. // Get the spin count on the table lock
  1150. WORD GetTableLockSpinCount() const;
  1151. // Set/Get the spin count on the bucket locks
  1152. void SetBucketLockSpinCount(WORD wSpins);
  1153. WORD GetBucketLockSpinCount() const;
  1154. enum {
  1155. SIGNATURE = (('L') | ('K' << 8) | ('L' << 16) | ('H' << 24)),
  1156. SIGNATURE_FREE = (('L') | ('K' << 8) | ('L' << 16) | ('x' << 24)),
  1157. };
  1158. bool
  1159. ValidSignature() const
  1160. { return m_dwSignature == SIGNATURE;}
  1161. #ifdef LKR_EXPOSED_TABLE_LOCK
  1162. public:
  1163. #else // !LKR_EXPOSED_TABLE_LOCK
  1164. protected:
  1165. #endif // !LKR_EXPOSED_TABLE_LOCK
  1166. //
  1167. // Lock manipulators
  1168. //
  1169. // Lock the table (exclusively) for writing
  1170. void WriteLock()
  1171. { m_Lock.WriteLock(); }
  1172. // Lock the table (possibly shared) for reading
  1173. void ReadLock() const
  1174. { m_Lock.ReadLock(); }
  1175. // Unlock the table for writing
  1176. void WriteUnlock()
  1177. { m_Lock.WriteUnlock(); }
  1178. // Unlock the table for reading
  1179. void ReadUnlock() const
  1180. { m_Lock.ReadUnlock(); }
  1181. // Is the table already locked for writing?
  1182. bool IsWriteLocked() const
  1183. { return m_Lock.IsWriteLocked(); }
  1184. // Is the table already locked for reading?
  1185. bool IsReadLocked() const
  1186. { return m_Lock.IsReadLocked(); }
  1187. // Is the table unlocked for writing?
  1188. bool IsWriteUnlocked() const
  1189. { return m_Lock.IsWriteUnlocked(); }
  1190. // Is the table unlocked for reading?
  1191. bool IsReadUnlocked() const
  1192. { return m_Lock.IsReadUnlocked(); }
  1193. // Convert the read lock to a write lock
  1194. void ConvertSharedToExclusive()
  1195. { m_Lock.ConvertSharedToExclusive(); }
  1196. // Convert the write lock to a read lock
  1197. void ConvertExclusiveToShared() const
  1198. { m_Lock.ConvertExclusiveToShared(); }
  1199. #ifdef LKRHASH_KERNEL_MODE
  1200. LKRHASH_ALLOCATOR_DEFINITIONS(CLKRLinearHashTable);
  1201. #endif // LKRHASH_KERNEL_MODE
  1202. #ifdef LKR_DEPRECATED_ITERATORS
  1203. public:
  1204. // Iterators can be used to walk the table. To ensure a consistent
  1205. // view of the data, the iterator locks the whole table. This can
  1206. // have a negative effect upon performance, because no other thread
  1207. // can do anything with the table. Use with care.
  1208. //
  1209. // You should not use an iterator to walk the table, calling DeleteKey,
  1210. // as the iterator will end up pointing to garbage.
  1211. //
  1212. // Use Apply, ApplyIf, or DeleteIf instead of iterators to safely
  1213. // walk the tree. Or use the STL-style iterators.
  1214. //
  1215. // Note that iterators acquire a reference to the record pointed to
  1216. // and release that reference as soon as the iterator is incremented.
  1217. // In other words, this code is safe:
  1218. // lkrc = ht.IncrementIterator(&iter);
  1219. // // assume lkrc == LK_SUCCESS for the sake of this example
  1220. // CMyHashTable::Record* pRec = iter.Record();
  1221. // Foo(pRec); // uses pRec but doesn't hang on to it
  1222. // lkrc = ht.IncrementIterator(&iter);
  1223. //
  1224. // But this code is not safe because pRec is used out of the scope of
  1225. // the iterator that provided it:
  1226. // lkrc = ht.IncrementIterator(&iter);
  1227. // CMyHashTable::Record* pRec = iter.Record();
  1228. // // Broken code: Should have called
  1229. // // ht.AddRefRecord(pRec, LKAR_EXPLICIT_ACQUIRE) here
  1230. // lkrc = ht.IncrementIterator(&iter);
  1231. // Foo(pRec); // Unsafe: because no longer have a valid reference
  1232. //
  1233. // If the record has no reference-counting semantics, then you can
  1234. // ignore the above remarks about scope.
  1235. class CIterator
  1236. {
  1237. protected:
  1238. friend class CLKRLinearHashTable;
  1239. CLKRLinearHashTable* m_plht; // which linear hash table?
  1240. DWORD m_dwBucketAddr; // bucket index
  1241. CNodeClump* m_pnc; // a CNodeClump in bucket
  1242. int m_iNode; // offset within m_pnc
  1243. LK_LOCKTYPE m_lkl; // readlock or writelock?
  1244. private:
  1245. // Private copy ctor and op= to prevent compiler synthesizing them.
  1246. // Must provide (bad) implementation because we export instantiations.
  1247. CIterator(const CIterator&) {*(BYTE*)NULL;}
  1248. CIterator& operator=(const CIterator&) {return *(CIterator*)NULL;}
  1249. public:
  1250. CIterator(
  1251. LK_LOCKTYPE lkl=LKL_WRITELOCK)
  1252. : m_plht(NULL),
  1253. m_dwBucketAddr(0),
  1254. m_pnc(NULL),
  1255. m_iNode(-1),
  1256. m_lkl(lkl)
  1257. {}
  1258. // Return the record associated with this iterator
  1259. const void* Record() const
  1260. {
  1261. IRTLASSERT(IsValid());
  1262. return ((m_pnc != NULL
  1263. && m_iNode >= 0
  1264. && m_iNode < CLKRLinearHashTable::NODES_PER_CLUMP)
  1265. ? m_pnc->m_pvNode[m_iNode]
  1266. : NULL);
  1267. }
  1268. // Return the key associated with this iterator
  1269. const DWORD_PTR Key() const
  1270. {
  1271. IRTLASSERT(m_plht != NULL);
  1272. const void* pRec = Record();
  1273. return ((pRec != NULL && m_plht != NULL)
  1274. ? m_plht->_ExtractKey(pRec)
  1275. : NULL);
  1276. }
  1277. bool IsValid() const
  1278. {
  1279. return ((m_plht != NULL)
  1280. && (m_pnc != NULL)
  1281. && (0 <= m_iNode
  1282. && m_iNode < CLKRLinearHashTable::NODES_PER_CLUMP)
  1283. && (!m_pnc->IsEmptyNode(m_iNode)));
  1284. }
  1285. // Delete the record that the iterator points to. Does an implicit
  1286. // IncrementIterator after deletion.
  1287. LK_RETCODE DeleteRecord();
  1288. // Change the record that the iterator points to. The new record
  1289. // must have the same key as the old one.
  1290. LK_RETCODE ChangeRecord(const void* pNewRec);
  1291. }; // class CIterator
  1292. // Const iterators for readonly access. You must use these with
  1293. // const CLKRLinearHashTables.
  1294. class CConstIterator : public CIterator
  1295. {
  1296. private:
  1297. // Private, unimplemented copy ctor and op= to prevent
  1298. // compiler synthesizing them.
  1299. CConstIterator(const CConstIterator&);
  1300. CConstIterator& operator=(const CConstIterator&);
  1301. public:
  1302. CConstIterator()
  1303. : CIterator(LKL_READLOCK)
  1304. {}
  1305. }; // class CConstIterator
  1306. private:
  1307. // The public APIs lock the table. The private ones, which are used
  1308. // directly by CLKRHashTable, don't.
  1309. LK_RETCODE _InitializeIterator(CIterator* piter);
  1310. LK_RETCODE _CloseIterator(CIterator* piter);
  1311. public:
  1312. // Initialize the iterator to point to the first item in the hash table
  1313. // Returns LK_SUCCESS, LK_NO_MORE_ELEMENTS, or LK_BAD_ITERATOR.
  1314. LK_RETCODE InitializeIterator(CIterator* piter)
  1315. {
  1316. IRTLASSERT(piter != NULL && piter->m_plht == NULL);
  1317. if (piter == NULL || piter->m_plht != NULL)
  1318. return LK_BAD_ITERATOR;
  1319. if (piter->m_lkl == LKL_WRITELOCK)
  1320. WriteLock();
  1321. else
  1322. ReadLock();
  1323. return _InitializeIterator(piter);
  1324. }
  1325. // The const iterator version
  1326. LK_RETCODE InitializeIterator(CConstIterator* piter) const
  1327. {
  1328. IRTLASSERT(piter != NULL && piter->m_plht == NULL);
  1329. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1330. if (piter == NULL || piter->m_plht != NULL
  1331. || piter->m_lkl == LKL_WRITELOCK)
  1332. return LK_BAD_ITERATOR;
  1333. ReadLock();
  1334. return const_cast<CLKRLinearHashTable*>(this)
  1335. ->_InitializeIterator(static_cast<CIterator*>(piter));
  1336. }
  1337. // Move the iterator on to the next item in the table.
  1338. // Returns LK_SUCCESS, LK_NO_MORE_ELEMENTS, or LK_BAD_ITERATOR.
  1339. LK_RETCODE IncrementIterator(CIterator* piter);
  1340. LK_RETCODE IncrementIterator(CConstIterator* piter) const
  1341. {
  1342. IRTLASSERT(piter != NULL && piter->m_plht == this);
  1343. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1344. if (piter == NULL || piter->m_plht != this
  1345. || piter->m_lkl == LKL_WRITELOCK)
  1346. return LK_BAD_ITERATOR;
  1347. return const_cast<CLKRLinearHashTable*>(this)
  1348. ->IncrementIterator(static_cast<CIterator*>(piter));
  1349. }
  1350. // Close the iterator.
  1351. LK_RETCODE CloseIterator(CIterator* piter)
  1352. {
  1353. IRTLASSERT(piter != NULL && piter->m_plht == this);
  1354. if (piter == NULL || piter->m_plht != this)
  1355. return LK_BAD_ITERATOR;
  1356. _CloseIterator(piter);
  1357. if (piter->m_lkl == LKL_WRITELOCK)
  1358. WriteUnlock();
  1359. else
  1360. ReadUnlock();
  1361. return LK_SUCCESS;
  1362. };
  1363. // Close the CConstIterator
  1364. LK_RETCODE CloseIterator(CConstIterator* piter) const
  1365. {
  1366. IRTLASSERT(piter != NULL && piter->m_plht == this);
  1367. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1368. if (piter == NULL || piter->m_plht != this
  1369. || piter->m_lkl == LKL_WRITELOCK)
  1370. return LK_BAD_ITERATOR;
  1371. const_cast<CLKRLinearHashTable*>(this)
  1372. ->_CloseIterator(static_cast<CIterator*>(piter));
  1373. ReadUnlock();
  1374. return LK_SUCCESS;
  1375. };
  1376. #endif // LKR_DEPRECATED_ITERATORS
  1377. #ifdef LKR_STL_ITERATORS
  1378. private:
  1379. bool _Erase(Iterator& riter, DWORD dwSignature);
  1380. bool _Find(DWORD_PTR pnKey, DWORD dwSignature,
  1381. Iterator& riterResult);
  1382. bool _IsValidIterator(const Iterator& riter) const
  1383. {
  1384. LKR_ITER_TRACE(_TEXT(" LKLH:_IsValidIterator(%p)\n"), &riter);
  1385. bool fValid = ((riter.m_plht == this)
  1386. && (riter.m_dwBucketAddr < m_cActiveBuckets)
  1387. && riter.IsValid());
  1388. IRTLASSERT(fValid);
  1389. return fValid;
  1390. }
  1391. public:
  1392. // Return iterator pointing to first item in table
  1393. Iterator
  1394. Begin();
  1395. // Return a one-past-the-end iterator. Always empty.
  1396. Iterator
  1397. End() const
  1398. {
  1399. LKR_ITER_TRACE(_TEXT(" LKLH::End\n"));
  1400. return Iterator();
  1401. }
  1402. // Insert a record
  1403. // Returns `true' if successful; iterResult points to that record
  1404. // Returns `false' otherwise; iterResult == End()
  1405. bool
  1406. Insert(
  1407. /* in */ const void* pvRecord,
  1408. /* out */ Iterator& riterResult,
  1409. /* in */ bool fOverwrite=false);
  1410. // Erase the record pointed to by the iterator; adjust the iterator
  1411. // to point to the next record. Returns `true' if successful.
  1412. bool
  1413. Erase(
  1414. /* in,out */ Iterator& riter);
  1415. // Erase the records in the range [riterFirst, riterLast).
  1416. // Returns `true' if successful. riterFirst points to riterLast on return.
  1417. bool
  1418. Erase(
  1419. /*in*/ Iterator& riterFirst,
  1420. /*in*/ Iterator& riterLast);
  1421. // Find the (first) record that has its key == pnKey.
  1422. // If successful, returns `true' and iterator points to (first) record.
  1423. // If fails, returns `false' and iterator == End()
  1424. bool
  1425. Find(
  1426. /* in */ DWORD_PTR pnKey,
  1427. /* out */ Iterator& riterResult);
  1428. // Find the range of records that have their keys == pnKey.
  1429. // If successful, returns `true', iterFirst points to first record,
  1430. // and iterLast points to one-beyond-the last such record.
  1431. // If fails, returns `false' and both iterators == End().
  1432. // Primarily useful when m_fMultiKey == true
  1433. bool
  1434. EqualRange(
  1435. /* in */ DWORD_PTR pnKey,
  1436. /* out */ Iterator& riterFirst, // inclusive
  1437. /* out */ Iterator& riterLast); // exclusive
  1438. #endif // LKR_STL_ITERATORS
  1439. }; // class CLKRLinearHashTable
  1440. #ifdef LKR_STL_ITERATORS
  1441. // These functions have to be defined after CLKRLinearHashTable
  1442. inline void
  1443. CLKRLinearHashTable_Iterator::_AddRef(
  1444. LK_ADDREF_REASON lkar) const
  1445. {
  1446. // TODO: should iterator call _AddRefRecord at all
  1447. if (m_plht != NULL && m_iNode != NODE_BEGIN - NODE_STEP)
  1448. {
  1449. IRTLASSERT((0 <= m_iNode && m_iNode < NODES_PER_CLUMP)
  1450. && (unsigned) m_iNode < NODES_PER_CLUMP
  1451. && m_pnc != NULL
  1452. && (lkar < 0 || lkar > 0)
  1453. );
  1454. const void* pvRecord = m_pnc->m_pvNode[m_iNode];
  1455. IRTLASSERT(pvRecord != NULL);
  1456. LKR_ITER_TRACE(_TEXT(" LKLH::AddRef, this=%p, Rec=%p\n"),
  1457. this, pvRecord);
  1458. m_plht->_AddRefRecord(pvRecord, lkar);
  1459. }
  1460. } // CLKRLinearHashTable_Iterator::_AddRef
  1461. inline const DWORD_PTR
  1462. CLKRLinearHashTable_Iterator::Key() const
  1463. {
  1464. IRTLASSERT(IsValid());
  1465. return m_plht->_ExtractKey(m_pnc->m_pvNode[m_iNode]);
  1466. } // CLKRLinearHashTable_Iterator::Key
  1467. #endif // LKR_STL_ITERATORS
  1468. //--------------------------------------------------------------------
  1469. // CLKRHashTable
  1470. //
  1471. // To improve concurrency, a hash table is divided into a number of
  1472. // (independent) subtables. Each subtable is a linear hash table. The
  1473. // number of subtables is defined when the table is created and remains
  1474. // fixed thereafter. Records are assigned to subtables based on their
  1475. // hashed key.
  1476. //
  1477. // For small or low-contention hashtables, you can bypass this
  1478. // thin wrapper and use CLKRLinearHashTable directly. The methods are
  1479. // documented in the declarations for CLKRHashTable (above).
  1480. //--------------------------------------------------------------------
  1481. class IRTL_DLLEXP CLKRHashTable
  1482. {
  1483. private:
  1484. typedef CLKRLinearHashTable SubTable;
  1485. public:
  1486. typedef SubTable::TableLock TableLock;
  1487. typedef SubTable::BucketLock BucketLock;
  1488. #ifdef LKR_DEPRECATED_ITERATORS
  1489. class CIterator;
  1490. friend class CLKRHashTable::CIterator;
  1491. #endif // LKR_DEPRECATED_ITERATORS
  1492. #ifdef LKR_STL_ITERATORS
  1493. friend class CLKRHashTable_Iterator;
  1494. typedef CLKRHashTable_Iterator Iterator;
  1495. #endif // LKR_STL_ITERATORS
  1496. friend class CLKRLinearHashTable;
  1497. // aliases for convenience
  1498. enum {
  1499. NAME_SIZE = SubTable::NAME_SIZE,
  1500. HASH_INVALID_SIGNATURE = SubTable::HASH_INVALID_SIGNATURE,
  1501. NODES_PER_CLUMP = SubTable::NODES_PER_CLUMP,
  1502. };
  1503. enum {
  1504. MAX_LKR_SUBTABLES = 64,
  1505. };
  1506. private:
  1507. // Hash table parameters
  1508. DWORD m_dwSignature; // debugging: id & corruption check
  1509. CHAR m_szName[NAME_SIZE]; // an identifier for debugging
  1510. DWORD m_cSubTables; // number of subtables
  1511. SubTable** m_palhtDir; // array of subtables
  1512. // type-specific function pointers
  1513. LKR_PFnExtractKey m_pfnExtractKey;
  1514. LKR_PFnCalcKeyHash m_pfnCalcKeyHash;
  1515. mutable LK_RETCODE m_lkrcState; // Internal state of table
  1516. int m_nSubTableMask;
  1517. #ifndef LKR_NO_GLOBAL_LIST
  1518. static CLockedDoubleList sm_llGlobalList; // All active CLKRHashTables
  1519. CListEntry m_leGlobalList;
  1520. #endif // !LKR_NO_GLOBAL_LIST
  1521. void
  1522. _InsertThisIntoGlobalList()
  1523. {
  1524. #ifndef LKR_NO_GLOBAL_LIST
  1525. sm_llGlobalList.InsertHead(&m_leGlobalList);
  1526. #endif // !LKR_NO_GLOBAL_LIST
  1527. }
  1528. void
  1529. _RemoveThisFromGlobalList()
  1530. {
  1531. #ifndef LKR_NO_GLOBAL_LIST
  1532. sm_llGlobalList.RemoveEntry(&m_leGlobalList);
  1533. #endif // !LKR_NO_GLOBAL_LIST
  1534. }
  1535. LKRHASH_GLOBAL_LOCK_DECLARATIONS();
  1536. // Private copy ctor and op= to prevent compiler synthesizing them.
  1537. // Must provide a (bad) implementation because we export instantiations.
  1538. // TODO: implement these properly; they could be useful.
  1539. CLKRHashTable(const CLKRHashTable&) {*(BYTE*)NULL;}
  1540. CLKRHashTable& operator=(const CLKRHashTable&) {return *(CLKRHashTable*)NULL;}
  1541. // Extract the key from the record
  1542. const DWORD_PTR _ExtractKey(const void* pvRecord) const
  1543. {
  1544. IRTLASSERT(pvRecord != NULL);
  1545. IRTLASSERT(m_pfnExtractKey != NULL);
  1546. return (*m_pfnExtractKey)(pvRecord);
  1547. }
  1548. // Hash the key
  1549. DWORD _CalcKeyHash(const DWORD_PTR pnKey) const
  1550. {
  1551. // Note pnKey==0 is acceptable, as the real key type could be an int
  1552. IRTLASSERT(m_pfnCalcKeyHash != NULL);
  1553. DWORD dwHash = (*m_pfnCalcKeyHash)(pnKey);
  1554. // We forcibly scramble the result to help ensure a better distribution
  1555. #ifndef __HASHFN_NO_NAMESPACE__
  1556. dwHash = HashFn::HashRandomizeBits(dwHash);
  1557. #else // !__HASHFN_NO_NAMESPACE__
  1558. dwHash = ::HashRandomizeBits(dwHash);
  1559. #endif // !__HASHFN_NO_NAMESPACE__
  1560. IRTLASSERT(dwHash != HASH_INVALID_SIGNATURE);
  1561. return dwHash;
  1562. }
  1563. // Use the key's hash signature to multiplex into a subtable
  1564. SubTable* _SubTable(DWORD dwSignature) const;
  1565. // Find the index of pst within the subtable array
  1566. int _SubTableIndex(SubTable* pst) const;
  1567. // Memory allocation wrappers to allow us to simulate allocation
  1568. // failures during testing
  1569. static SubTable** const
  1570. _AllocateSubTableArray(
  1571. size_t n);
  1572. static bool
  1573. _FreeSubTableArray(
  1574. SubTable** palht);
  1575. static SubTable* const
  1576. _AllocateSubTable(
  1577. LPCSTR pszName, // Identifies table for debugging
  1578. LKR_PFnExtractKey pfnExtractKey, // Extract key from record
  1579. LKR_PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  1580. LKR_PFnEqualKeys pfnEqualKeys, // Compare two keys
  1581. LKR_PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  1582. unsigned maxload, // Upperbound on avg chain length
  1583. DWORD initsize, // Initial size of hash table.
  1584. CLKRHashTable* phtParent, // Owning table.
  1585. bool fMultiKeys, // Allow multiple identical keys?
  1586. bool fNonPagedAllocs // use paged or NP pool in kernel
  1587. );
  1588. static bool
  1589. _FreeSubTable(
  1590. SubTable* plht);
  1591. public:
  1592. CLKRHashTable(
  1593. LPCSTR pszName, // Identifies table for debugging
  1594. LKR_PFnExtractKey pfnExtractKey, // Extract key from record
  1595. LKR_PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  1596. LKR_PFnEqualKeys pfnEqualKeys, // Compare two keys
  1597. LKR_PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  1598. unsigned maxload=LK_DFLT_MAXLOAD, // bound on avg chain length
  1599. DWORD initsize=LK_DFLT_INITSIZE,// Initial size of hash table.
  1600. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS, // #subordinate hash tables.
  1601. bool fMultiKeys=false // Allow multiple identical keys?
  1602. #ifdef LKRHASH_KERNEL_MODE
  1603. , bool fNonPagedAllocs=true // use paged or NP pool
  1604. #endif
  1605. );
  1606. ~CLKRHashTable();
  1607. static const TCHAR* ClassName()
  1608. {return _TEXT("CLKRHashTable");}
  1609. int NumSubTables() const {return m_cSubTables;}
  1610. bool MultiKeys() const;
  1611. #ifdef LKRHASH_KERNEL_MODE
  1612. bool NonPagedAllocs() const;
  1613. #endif
  1614. static LK_TABLESIZE NumSubTables(DWORD& rinitsize, DWORD& rnum_subtbls);
  1615. // Thin wrappers for the corresponding methods in CLKRLinearHashTable
  1616. LK_RETCODE InsertRecord(const void* pvRecord, bool fOverwrite=false);
  1617. LK_RETCODE DeleteKey(const DWORD_PTR pnKey, bool fDeleteAllSame=false);
  1618. LK_RETCODE DeleteRecord(const void* pvRecord);
  1619. LK_RETCODE FindKey(const DWORD_PTR pnKey,
  1620. const void** ppvRecord) const;
  1621. LK_RETCODE FindRecord(const void* pvRecord) const;
  1622. #ifdef LKR_APPLY_IF
  1623. DWORD Apply(LKR_PFnRecordAction pfnAction,
  1624. void* pvState=NULL,
  1625. LK_LOCKTYPE lkl=LKL_READLOCK);
  1626. DWORD ApplyIf(LKR_PFnRecordPred pfnPredicate,
  1627. LKR_PFnRecordAction pfnAction,
  1628. void* pvState=NULL,
  1629. LK_LOCKTYPE lkl=LKL_READLOCK);
  1630. DWORD DeleteIf(LKR_PFnRecordPred pfnPredicate,
  1631. void* pvState=NULL);
  1632. #endif // LKR_APPLY_IF
  1633. void Clear();
  1634. int CheckTable() const;
  1635. DWORD Size() const;
  1636. DWORD MaxSize() const;
  1637. CLKRHashTableStats GetStatistics() const;
  1638. bool IsValid() const;
  1639. void SetTableLockSpinCount(WORD wSpins);
  1640. WORD GetTableLockSpinCount() const;
  1641. void SetBucketLockSpinCount(WORD wSpins);
  1642. WORD GetBucketLockSpinCount() const;
  1643. enum {
  1644. SIGNATURE = (('L') | ('K' << 8) | ('H' << 16) | ('T' << 24)),
  1645. SIGNATURE_FREE = (('L') | ('K' << 8) | ('H' << 16) | ('x' << 24)),
  1646. };
  1647. bool
  1648. ValidSignature() const
  1649. { return m_dwSignature == SIGNATURE;}
  1650. // Is the hash table usable?
  1651. bool IsUsable() const
  1652. { return (m_lkrcState == LK_SUCCESS); }
  1653. #ifdef LKR_EXPOSED_TABLE_LOCK
  1654. public:
  1655. #else // !LKR_EXPOSED_TABLE_LOCK
  1656. protected:
  1657. #endif // !LKR_EXPOSED_TABLE_LOCK
  1658. void WriteLock();
  1659. void ReadLock() const;
  1660. void WriteUnlock();
  1661. void ReadUnlock() const;
  1662. bool IsWriteLocked() const;
  1663. bool IsReadLocked() const;
  1664. bool IsWriteUnlocked() const;
  1665. bool IsReadUnlocked() const;
  1666. void ConvertSharedToExclusive();
  1667. void ConvertExclusiveToShared() const;
  1668. #ifdef LKRHASH_KERNEL_MODE
  1669. LKRHASH_ALLOCATOR_DEFINITIONS(CLKRHashTable);
  1670. #endif // LKRHASH_KERNEL_MODE
  1671. #ifdef LKR_DEPRECATED_ITERATORS
  1672. public:
  1673. typedef SubTable::CIterator CLHTIterator;
  1674. class CIterator : public CLHTIterator
  1675. {
  1676. protected:
  1677. friend class CLKRHashTable;
  1678. CLKRHashTable* m_pht; // which hash table?
  1679. int m_ist; // which subtable
  1680. private:
  1681. // Private copy ctor and op= to prevent compiler synthesizing them.
  1682. // Must provide (bad) implementation because we export instantiations.
  1683. CIterator(const CIterator&) {*(BYTE*)NULL;}
  1684. CIterator& operator=(const CIterator&) {return *(CIterator*)NULL;}
  1685. public:
  1686. CIterator(
  1687. LK_LOCKTYPE lkl=LKL_WRITELOCK)
  1688. : CLHTIterator(lkl),
  1689. m_pht(NULL),
  1690. m_ist(-1)
  1691. {}
  1692. const void* Record() const
  1693. {
  1694. IRTLASSERT(IsValid());
  1695. // This is a hack to work around a compiler bug. Calling
  1696. // CLHTIterator::Record calls this function recursively until
  1697. // the stack overflows.
  1698. const CLHTIterator* pBase = static_cast<const CLHTIterator*>(this);
  1699. return pBase->Record();
  1700. }
  1701. const DWORD_PTR Key() const
  1702. {
  1703. IRTLASSERT(IsValid());
  1704. const CLHTIterator* pBase = static_cast<const CLHTIterator*>(this);
  1705. return pBase->Key();
  1706. }
  1707. bool IsValid() const
  1708. {
  1709. const CLHTIterator* pBase = static_cast<const CLHTIterator*>(this);
  1710. return (m_pht != NULL && m_ist >= 0 && pBase->IsValid());
  1711. }
  1712. };
  1713. // Const iterators for readonly access
  1714. class CConstIterator : public CIterator
  1715. {
  1716. private:
  1717. // Private, unimplemented copy ctor and op= to prevent
  1718. // compiler synthesizing them.
  1719. CConstIterator(const CConstIterator&);
  1720. CConstIterator& operator=(const CConstIterator&);
  1721. public:
  1722. CConstIterator()
  1723. : CIterator(LKL_READLOCK)
  1724. {}
  1725. };
  1726. public:
  1727. LK_RETCODE InitializeIterator(CIterator* piter);
  1728. LK_RETCODE IncrementIterator(CIterator* piter);
  1729. LK_RETCODE CloseIterator(CIterator* piter);
  1730. LK_RETCODE InitializeIterator(CConstIterator* piter) const
  1731. {
  1732. IRTLASSERT(piter != NULL && piter->m_pht == NULL);
  1733. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1734. if (piter == NULL || piter->m_pht != NULL
  1735. || piter->m_lkl == LKL_WRITELOCK)
  1736. return LK_BAD_ITERATOR;
  1737. return const_cast<CLKRHashTable*>(this)
  1738. ->InitializeIterator(static_cast<CIterator*>(piter));
  1739. }
  1740. LK_RETCODE IncrementIterator(CConstIterator* piter) const
  1741. {
  1742. IRTLASSERT(piter != NULL && piter->m_pht == this);
  1743. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1744. if (piter == NULL || piter->m_pht != this
  1745. || piter->m_lkl == LKL_WRITELOCK)
  1746. return LK_BAD_ITERATOR;
  1747. return const_cast<CLKRHashTable*>(this)
  1748. ->IncrementIterator(static_cast<CIterator*>(piter));
  1749. }
  1750. LK_RETCODE CloseIterator(CConstIterator* piter) const
  1751. {
  1752. IRTLASSERT(piter != NULL && piter->m_pht == this);
  1753. IRTLASSERT(piter->m_lkl != LKL_WRITELOCK);
  1754. if (piter == NULL || piter->m_pht != this
  1755. || piter->m_lkl == LKL_WRITELOCK)
  1756. return LK_BAD_ITERATOR;
  1757. return const_cast<CLKRHashTable*>(this)
  1758. ->CloseIterator(static_cast<CIterator*>(piter));
  1759. };
  1760. #endif // LKR_DEPRECATED_ITERATORS
  1761. #ifdef LKR_STL_ITERATORS
  1762. private:
  1763. bool _IsValidIterator(const Iterator& riter) const
  1764. {
  1765. LKR_ITER_TRACE(_TEXT(" LKHT:_IsValidIterator(%p)\n"), &riter);
  1766. bool fValid = (riter.m_pht == this);
  1767. IRTLASSERT(fValid);
  1768. fValid = fValid && (0 <= riter.m_ist
  1769. && riter.m_ist < (int) m_cSubTables);
  1770. IRTLASSERT(fValid);
  1771. IRTLASSERT(_SubTableIndex(riter.m_subiter.m_plht) == riter.m_ist);
  1772. fValid = fValid && riter.IsValid();
  1773. IRTLASSERT(fValid);
  1774. return fValid;
  1775. }
  1776. public:
  1777. Iterator
  1778. Begin();
  1779. Iterator
  1780. End() const
  1781. {
  1782. LKR_ITER_TRACE(_TEXT(" LKHT::End\n"));
  1783. return Iterator();
  1784. }
  1785. bool
  1786. Insert(
  1787. /* in */ const void* pvRecord,
  1788. /* out */ Iterator& riterResult,
  1789. /* in */ bool fOverwrite=false);
  1790. bool
  1791. Erase(
  1792. /* in,out */ Iterator& riter);
  1793. bool
  1794. Erase(
  1795. /*in*/ Iterator& riterFirst,
  1796. /*in*/ Iterator& riterLast);
  1797. bool
  1798. Find(
  1799. /* in */ DWORD_PTR pnKey,
  1800. /* out */ Iterator& riterResult);
  1801. bool
  1802. EqualRange(
  1803. /* in */ DWORD_PTR pnKey,
  1804. /* out */ Iterator& riterFirst, // inclusive
  1805. /* out */ Iterator& riterLast); // exclusive
  1806. #endif // LKR_STL_ITERATORS
  1807. }; // class CLKRHashTable
  1808. //--------------------------------------------------------------------
  1809. // A typesafe wrapper for CLKRHashTable (or CLKRLinearHashTable).
  1810. //
  1811. // * _Derived must derive from CTypedHashTable and provide certain member
  1812. // functions (ExtractKey, CalcKeyHash, EqualKeys, AddRefRecord). It's
  1813. // needed so that the method wrappers can downcast to the typesafe
  1814. // implementations that you provide.
  1815. // * _Record is the type of the record. C{Linear}HashTable will store
  1816. // >pointers< to _Record; i.e., stores _Records by reference, not by value.
  1817. // * _Key is the type of the key. _Key is used directly---it is not
  1818. // assumed to be a pointer type. _Key can be an integer or a pointer.
  1819. // C{Linear}HashTable assumes that the key is stored in the associated
  1820. // record. See the comments at the declaration of LKR_PFnExtractKey
  1821. // for more details.
  1822. // (optional parameters):
  1823. // * _BaseHashTable is the base hash table: CLKRHashTable or
  1824. /// CLKRLinearHashTable
  1825. // * _BaseIterator is the iterator type, _BaseHashTable::CIterator
  1826. //
  1827. // Some associative containers allow you to store key-value (aka
  1828. // name-value) pairs. LKRhash doesn't allow you to do this directly, but
  1829. // it's straightforward to build a simple wrapper class (or to use
  1830. // std::pair<key,value>).
  1831. //
  1832. // CTypedHashTable could derive directly from CLKRLinearHashTable, if you
  1833. // don't need the extra overhead of CLKRHashTable (which is quite low).
  1834. // If you expect to be using the table a lot on multiprocessor machines,
  1835. // you should use the default of CLKRHashTable, as it will scale better.
  1836. //
  1837. // You may need to add the following line to your code to disable
  1838. // warning messages about truncating extremly long identifiers.
  1839. // #pragma warning (disable : 4786)
  1840. //
  1841. // The _Derived class should look something like this:
  1842. // class CDerived : public CTypedHashTable<CDerived, RecordType, KeyType>
  1843. // {
  1844. // public:
  1845. // CDerived()
  1846. // : CTypedHashTable<CDerived, RecordType, KeyType>("DerivedTable")
  1847. // { /* other ctor actions, if needed */ }
  1848. // static KeyType ExtractKey(const RecordType* pTest);
  1849. // static DWORD CalcKeyHash(const KeyType Key);
  1850. // static bool EqualKeys(const KeyType Key1, const KeyType Key2);
  1851. // static void AddRefRecord(RecordType* pRecord,LK_ADDREF_REASON lkar);
  1852. // // You probably want to declare the copy ctor and operator=
  1853. // // as private, so that the compiler won't synthesize them.
  1854. // // You don't need to provide a dtor, unless you have custom
  1855. // // member data to clean up.
  1856. //
  1857. // // Optional: other functions
  1858. // };
  1859. //
  1860. //--------------------------------------------------------------------
  1861. template < class _Derived, class _Record, class _Key,
  1862. class _BaseHashTable=CLKRHashTable
  1863. #ifdef LKR_DEPRECATED_ITERATORS
  1864. , class _BaseIterator=_BaseHashTable::CIterator
  1865. #endif // LKR_DEPRECATED_ITERATORS
  1866. >
  1867. class CTypedHashTable : public _BaseHashTable
  1868. {
  1869. public:
  1870. // convenient aliases
  1871. typedef _Derived Derived;
  1872. typedef _Record Record;
  1873. typedef _Key Key;
  1874. typedef _BaseHashTable BaseHashTable;
  1875. typedef CTypedHashTable<_Derived, _Record, _Key, _BaseHashTable
  1876. #ifdef LKR_DEPRECATED_ITERATORS
  1877. , _BaseIterator
  1878. #endif // LKR_DEPRECATED_ITERATORS
  1879. > HashTable;
  1880. #ifdef LKR_DEPRECATED_ITERATORS
  1881. typedef _BaseIterator BaseIterator;
  1882. #endif // LKR_DEPRECATED_ITERATORS
  1883. #ifdef LKR_APPLY_IF
  1884. // ApplyIf() and DeleteIf(): Does the record match the predicate?
  1885. // Note: takes a Record*, not a const Record*. You can modify the
  1886. // record in Pred() or Action(), if you like, but if you do, you
  1887. // should use LKL_WRITELOCK to lock the table.
  1888. typedef LK_PREDICATE (WINAPI *PFnRecordPred) (Record* pRec, void* pvState);
  1889. // Apply() et al: Perform action on record.
  1890. typedef LK_ACTION (WINAPI *PFnRecordAction)(Record* pRec, void* pvState);
  1891. #endif // LKR_APPLY_IF
  1892. private:
  1893. // Wrappers for the typesafe methods exposed by the derived class
  1894. static const DWORD_PTR WINAPI
  1895. _ExtractKey(const void* pvRecord)
  1896. {
  1897. const _Record* pRec = static_cast<const _Record*>(pvRecord);
  1898. const _Key key = static_cast<const _Key>(_Derived::ExtractKey(pRec));
  1899. // I would prefer to use reinterpret_cast here and in _CalcKeyHash
  1900. // and _EqualKeys, but the stupid Win64 compiler thinks it knows
  1901. // better than I do.
  1902. return (const DWORD_PTR) key;
  1903. }
  1904. static DWORD WINAPI
  1905. _CalcKeyHash(const DWORD_PTR pnKey)
  1906. {
  1907. const _Key key = (const _Key) (DWORD_PTR) pnKey;
  1908. return _Derived::CalcKeyHash(key);
  1909. }
  1910. static BOOL WINAPI
  1911. _EqualKeys(const DWORD_PTR pnKey1, const DWORD_PTR pnKey2)
  1912. {
  1913. const _Key key1 = (const _Key) (DWORD_PTR) pnKey1;
  1914. const _Key key2 = (const _Key) (DWORD_PTR) pnKey2;
  1915. return _Derived::EqualKeys(key1, key2);
  1916. }
  1917. static void WINAPI
  1918. _AddRefRecord(const void* pvRecord, LK_ADDREF_REASON lkar)
  1919. {
  1920. _Record* pRec = static_cast<_Record*>(const_cast<void*>(pvRecord));
  1921. _Derived::AddRefRecord(pRec, lkar);
  1922. }
  1923. #ifdef LKR_APPLY_IF
  1924. // Typesafe wrappers for Apply, ApplyIf, and DeleteIf.
  1925. class CState
  1926. {
  1927. public:
  1928. PFnRecordPred m_pfnPred;
  1929. PFnRecordAction m_pfnAction;
  1930. void* m_pvState;
  1931. CState(
  1932. PFnRecordPred pfnPred,
  1933. PFnRecordAction pfnAction,
  1934. void* pvState)
  1935. : m_pfnPred(pfnPred), m_pfnAction(pfnAction), m_pvState(pvState)
  1936. {}
  1937. };
  1938. static LK_PREDICATE WINAPI
  1939. _Pred(const void* pvRecord, void* pvState)
  1940. {
  1941. _Record* pRec = static_cast<_Record*>(const_cast<void*>(pvRecord));
  1942. CState* pState = static_cast<CState*>(pvState);
  1943. return (*pState->m_pfnPred)(pRec, pState->m_pvState);
  1944. }
  1945. static LK_ACTION WINAPI
  1946. _Action(const void* pvRecord, void* pvState)
  1947. {
  1948. _Record* pRec = static_cast<_Record*>(const_cast<void*>(pvRecord));
  1949. CState* pState = static_cast<CState*>(pvState);
  1950. return (*pState->m_pfnAction)(pRec, pState->m_pvState);
  1951. }
  1952. #endif // LKR_APPLY_IF
  1953. public:
  1954. CTypedHashTable(
  1955. LPCSTR pszName, // Identifies table for debugging
  1956. unsigned maxload=LK_DFLT_MAXLOAD, // Upperbound on avg chain len
  1957. DWORD initsize=LK_DFLT_INITSIZE, // Initial size of table: S/M/L
  1958. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS,// #subordinate hash tables.
  1959. bool fMultiKeys=false // Allow multiple identical keys?
  1960. #ifdef LKRHASH_KERNEL_MODE
  1961. , bool fNonPagedAllocs=true // use paged or NP pool in kernel
  1962. #endif
  1963. )
  1964. : _BaseHashTable(pszName, _ExtractKey, _CalcKeyHash, _EqualKeys,
  1965. _AddRefRecord, maxload, initsize, num_subtbls,
  1966. fMultiKeys
  1967. #ifdef LKRHASH_KERNEL_MODE
  1968. , fNonPagedAllocs
  1969. #endif
  1970. )
  1971. {}
  1972. LK_RETCODE InsertRecord(const _Record* pRec, bool fOverwrite=false)
  1973. { return _BaseHashTable::InsertRecord(pRec, fOverwrite); }
  1974. LK_RETCODE DeleteKey(const _Key key, bool fDeleteAllSame=false)
  1975. {
  1976. const void* pvKey = reinterpret_cast<const void*>((DWORD_PTR)(key));
  1977. DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  1978. return _BaseHashTable::DeleteKey(pnKey, fDeleteAllSame);
  1979. }
  1980. LK_RETCODE DeleteRecord(const _Record* pRec)
  1981. { return _BaseHashTable::DeleteRecord(pRec); }
  1982. // Note: returns a _Record**, not a const Record**. Note that you
  1983. // can use a const type for the template parameter to ensure constness.
  1984. LK_RETCODE FindKey(const _Key key, _Record** ppRec) const
  1985. {
  1986. if (ppRec == NULL)
  1987. return LK_BAD_RECORD;
  1988. *ppRec = NULL;
  1989. const void* pvRec = NULL;
  1990. const void* pvKey = reinterpret_cast<const void*>((DWORD_PTR)(key));
  1991. DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  1992. LK_RETCODE lkrc = _BaseHashTable::FindKey(pnKey, &pvRec);
  1993. *ppRec = static_cast<_Record*>(const_cast<void*>(pvRec));
  1994. return lkrc;
  1995. }
  1996. LK_RETCODE FindRecord(const _Record* pRec) const
  1997. { return _BaseHashTable::FindRecord(pRec); }
  1998. // Other C{Linear}HashTable methods can be exposed without change
  1999. #ifdef LKR_APPLY_IF
  2000. public:
  2001. // Typesafe wrappers for Apply et al
  2002. DWORD Apply(PFnRecordAction pfnAction,
  2003. void* pvState=NULL,
  2004. LK_LOCKTYPE lkl=LKL_READLOCK)
  2005. {
  2006. IRTLASSERT(pfnAction != NULL);
  2007. if (pfnAction == NULL)
  2008. return 0;
  2009. CState state(NULL, pfnAction, pvState);
  2010. return _BaseHashTable::Apply(_Action, &state, lkl);
  2011. }
  2012. DWORD ApplyIf(PFnRecordPred pfnPredicate,
  2013. PFnRecordAction pfnAction,
  2014. void* pvState=NULL,
  2015. LK_LOCKTYPE lkl=LKL_READLOCK)
  2016. {
  2017. IRTLASSERT(pfnPredicate != NULL && pfnAction != NULL);
  2018. if (pfnPredicate == NULL || pfnAction == NULL)
  2019. return 0;
  2020. CState state(pfnPredicate, pfnAction, pvState);
  2021. return _BaseHashTable::ApplyIf(_Pred, _Action, &state, lkl);
  2022. }
  2023. DWORD DeleteIf(PFnRecordPred pfnPredicate, void* pvState=NULL)
  2024. {
  2025. IRTLASSERT(pfnPredicate != NULL);
  2026. if (pfnPredicate == NULL)
  2027. return 0;
  2028. CState state(pfnPredicate, NULL, pvState);
  2029. return _BaseHashTable::DeleteIf(_Pred, &state);
  2030. }
  2031. #endif // LKR_APPLY_IF
  2032. #ifdef LKR_DEPRECATED_ITERATORS
  2033. // Typesafe wrappers for iterators
  2034. class CIterator : public _BaseIterator
  2035. {
  2036. private:
  2037. // Private, unimplemented copy ctor and op= to prevent
  2038. // compiler synthesizing them.
  2039. CIterator(const CIterator&);
  2040. CIterator& operator=(const CIterator&);
  2041. public:
  2042. CIterator(
  2043. LK_LOCKTYPE lkl=LKL_WRITELOCK)
  2044. : _BaseIterator(lkl)
  2045. {}
  2046. _Record* Record() const
  2047. {
  2048. const _BaseIterator* pBase = static_cast<const _BaseIterator*>(this);
  2049. return reinterpret_cast<_Record*>(const_cast<void*>(
  2050. pBase->Record()));
  2051. }
  2052. _Key Key() const
  2053. {
  2054. const _BaseIterator* pBase = static_cast<const _BaseIterator*>(this);
  2055. return reinterpret_cast<_Key>(reinterpret_cast<void*>(pBase->Key()));
  2056. }
  2057. };
  2058. // readonly iterator
  2059. class CConstIterator : public CIterator
  2060. {
  2061. private:
  2062. // Private, unimplemented copy ctor and op= to prevent
  2063. // compiler synthesizing them.
  2064. CConstIterator(const CConstIterator&);
  2065. CConstIterator& operator=(const CConstIterator&);
  2066. public:
  2067. CConstIterator()
  2068. : CIterator(LKL_READLOCK)
  2069. {}
  2070. const _Record* Record() const
  2071. {
  2072. return CIterator::Record();
  2073. }
  2074. const _Key Key() const
  2075. {
  2076. return CIterator::Key();
  2077. }
  2078. };
  2079. public:
  2080. LK_RETCODE InitializeIterator(CIterator* piter)
  2081. {
  2082. return _BaseHashTable::InitializeIterator(piter);
  2083. }
  2084. LK_RETCODE IncrementIterator(CIterator* piter)
  2085. {
  2086. return _BaseHashTable::IncrementIterator(piter);
  2087. }
  2088. LK_RETCODE CloseIterator(CIterator* piter)
  2089. {
  2090. return _BaseHashTable::CloseIterator(piter);
  2091. }
  2092. LK_RETCODE InitializeIterator(CConstIterator* piter) const
  2093. {
  2094. return const_cast<HashTable*>(this)
  2095. ->InitializeIterator(static_cast<CIterator*>(piter));
  2096. }
  2097. LK_RETCODE IncrementIterator(CConstIterator* piter) const
  2098. {
  2099. return const_cast<HashTable*>(this)
  2100. ->IncrementIterator(static_cast<CIterator*>(piter));
  2101. }
  2102. LK_RETCODE CloseIterator(CConstIterator* piter) const
  2103. {
  2104. return const_cast<HashTable*>(this)
  2105. ->CloseIterator(static_cast<CIterator*>(piter));
  2106. }
  2107. #endif // LKR_DEPRECATED_ITERATORS
  2108. #ifdef LKR_STL_ITERATORS
  2109. // TODO: const_iterator
  2110. public:
  2111. class iterator
  2112. {
  2113. friend class CTypedHashTable<_Derived, _Record, _Key,
  2114. _BaseHashTable
  2115. #ifdef LKR_DEPRECATED_ITERATORS
  2116. , _BaseIterator
  2117. #endif // LKR_DEPRECATED_ITERATORS
  2118. >;
  2119. protected:
  2120. _BaseHashTable::Iterator m_iter;
  2121. iterator(
  2122. _BaseHashTable::Iterator& rhs)
  2123. : m_iter(rhs)
  2124. {
  2125. LKR_ITER_TRACE(_TEXT("Typed::prot ctor, this=%p, rhs=%p\n"),
  2126. this, &rhs);
  2127. }
  2128. public:
  2129. typedef std::forward_iterator_tag iterator_category;
  2130. typedef _Record value_type;
  2131. typedef ptrdiff_t difference_type;
  2132. typedef size_t size_type;
  2133. typedef value_type& reference;
  2134. typedef value_type* pointer;
  2135. iterator()
  2136. : m_iter()
  2137. {
  2138. LKR_ITER_TRACE(_TEXT("Typed::default ctor, this=%p\n"), this);
  2139. }
  2140. iterator(
  2141. const iterator& rhs)
  2142. : m_iter(rhs.m_iter)
  2143. {
  2144. LKR_ITER_TRACE(_TEXT("Typed::copy ctor, this=%p, rhs=%p\n"),
  2145. this, &rhs);
  2146. }
  2147. iterator& operator=(
  2148. const iterator& rhs)
  2149. {
  2150. LKR_ITER_TRACE(_TEXT("Typed::operator=, this=%p, rhs=%p\n"),
  2151. this, &rhs);
  2152. m_iter = rhs.m_iter;
  2153. return *this;
  2154. }
  2155. ~iterator()
  2156. {
  2157. LKR_ITER_TRACE(_TEXT("Typed::dtor, this=%p\n"), this);
  2158. }
  2159. reference operator*() const
  2160. {
  2161. void* pvRecord = const_cast<void*>(m_iter.Record());
  2162. return reinterpret_cast<reference>(pvRecord);
  2163. }
  2164. pointer operator->() const { return &(operator*()); }
  2165. // pre-increment
  2166. iterator& operator++()
  2167. {
  2168. LKR_ITER_TRACE(_TEXT("Typed::pre-increment, this=%p\n"), this);
  2169. m_iter.Increment();
  2170. return *this;
  2171. }
  2172. // post-increment
  2173. iterator operator++(int)
  2174. {
  2175. LKR_ITER_TRACE(_TEXT("Typed::post-increment, this=%p\n"), this);
  2176. iterator iterPrev = *this;
  2177. m_iter.Increment();
  2178. return iterPrev;
  2179. }
  2180. bool operator==(
  2181. const iterator& rhs) const
  2182. {
  2183. LKR_ITER_TRACE(_TEXT("Typed::operator==, this=%p, rhs=%p\n"),
  2184. this, &rhs);
  2185. return m_iter == rhs.m_iter;
  2186. }
  2187. bool operator!=(
  2188. const iterator& rhs) const
  2189. {
  2190. LKR_ITER_TRACE(_TEXT("Typed::operator!=, this=%p, rhs=%p\n"),
  2191. this, &rhs);
  2192. return m_iter != rhs.m_iter;
  2193. }
  2194. _Record* Record() const
  2195. {
  2196. LKR_ITER_TRACE(_TEXT("Typed::Record, this=%p\n"), this);
  2197. return reinterpret_cast<_Record*>(
  2198. const_cast<void*>(m_iter.Record()));
  2199. }
  2200. _Key Key() const
  2201. {
  2202. LKR_ITER_TRACE(_TEXT("Typed::Key, this=%p\n"), this);
  2203. return reinterpret_cast<_Key>(
  2204. reinterpret_cast<void*>(m_iter.Key()));
  2205. }
  2206. }; // class iterator
  2207. // Return iterator pointing to first item in table
  2208. iterator begin()
  2209. {
  2210. LKR_ITER_TRACE(_TEXT("Typed::begin()\n"));
  2211. return iterator(_BaseHashTable::Begin());
  2212. }
  2213. // Return a one-past-the-end iterator. Always empty.
  2214. iterator end() const
  2215. {
  2216. LKR_ITER_TRACE(_TEXT("Typed::end()\n"));
  2217. return iterator(_BaseHashTable::End());
  2218. }
  2219. template <class _InputIterator>
  2220. CTypedHashTable(
  2221. LPCSTR pszName, // An identifier for debugging
  2222. _InputIterator f, // first element in range
  2223. _InputIterator l, // one-beyond-last element
  2224. unsigned maxload=LK_DFLT_MAXLOAD, // Upperbound on avg chain len
  2225. DWORD initsize=LK_DFLT_INITSIZE, // Initial size of table: S/M/L
  2226. DWORD num_subtbls=LK_DFLT_NUM_SUBTBLS,// #subordinate hash tables.
  2227. bool fMultiKeys=false // Allow multiple identical keys?
  2228. #ifdef LKRHASH_KERNEL_MODE
  2229. , bool fNonPagedAllocs=true // use paged or NP pool in kernel
  2230. #endif
  2231. )
  2232. : _BaseHashTable(pszName, _ExtractKey, _CalcKeyHash, _EqualKeys,
  2233. _AddRefRecord, maxload, initsize, num_subtbls,
  2234. fMultiKeys
  2235. #ifdef LKRHASH_KERNEL_MODE
  2236. , fNonPagedAllocs
  2237. #endif
  2238. )
  2239. {
  2240. insert(f, l);
  2241. }
  2242. template <class _InputIterator>
  2243. void insert(_InputIterator f, _InputIterator l)
  2244. {
  2245. for ( ; f != l; ++f)
  2246. InsertRecord(&(*f));
  2247. }
  2248. bool
  2249. Insert(
  2250. const _Record* pRecord,
  2251. iterator& riterResult,
  2252. bool fOverwrite=false)
  2253. {
  2254. LKR_ITER_TRACE(_TEXT("Typed::Insert\n"));
  2255. return _BaseHashTable::Insert(pRecord, riterResult.m_iter, fOverwrite);
  2256. }
  2257. bool
  2258. Erase(
  2259. iterator& riter)
  2260. {
  2261. LKR_ITER_TRACE(_TEXT("Typed::Erase\n"));
  2262. return _BaseHashTable::Erase(riter.m_iter);
  2263. }
  2264. bool
  2265. Erase(
  2266. iterator& riterFirst,
  2267. iterator& riterLast)
  2268. {
  2269. LKR_ITER_TRACE(_TEXT("Typed::Erase2\n"));
  2270. return _BaseHashTable::Erase(riterFirst.m_iter, riterLast.m_iter);
  2271. }
  2272. bool
  2273. Find(
  2274. const _Key key,
  2275. iterator& riterResult)
  2276. {
  2277. LKR_ITER_TRACE(_TEXT("Typed::Find\n"));
  2278. const void* pvKey = reinterpret_cast<const void*>((DWORD_PTR)(key));
  2279. DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  2280. return _BaseHashTable::Find(pnKey, riterResult.m_iter);
  2281. }
  2282. bool
  2283. EqualRange(
  2284. const _Key key,
  2285. iterator& riterFirst,
  2286. iterator& riterLast)
  2287. {
  2288. LKR_ITER_TRACE(_TEXT("Typed::EqualRange\n"));
  2289. const void* pvKey = reinterpret_cast<const void*>((DWORD_PTR)(key));
  2290. DWORD_PTR pnKey = reinterpret_cast<DWORD_PTR>(pvKey);
  2291. return _BaseHashTable::EqualRange(pnKey, riterFirst.m_iter,
  2292. riterLast.m_iter);
  2293. }
  2294. // The iterator functions for an STL hash_(|multi)_(set|map)
  2295. //
  2296. // Value type of a Pair-Associative Container is
  2297. // pair<const key_type, mapped_type>
  2298. //
  2299. // pair<iterator,bool> insert(const value_type& x);
  2300. //
  2301. // void erase(iterator pos);
  2302. // void erase(iterator f, iterator l);
  2303. //
  2304. // iterator find(const key_type& k) [const];
  2305. // const_iterator find(const key_type& k) const;
  2306. //
  2307. // pair<iterator,iterator> equal_range(const key_type& k) [const];
  2308. // pair<const_iterator,const_iterator> equal_range(const key_type& k) const
  2309. #endif // LKR_STL_ITERATORS
  2310. }; // class CTypedHashTable
  2311. #ifndef __LKRHASH_NO_NAMESPACE__
  2312. };
  2313. #endif // !__LKRHASH_NO_NAMESPACE__
  2314. #endif // __LKRHASH_H__