Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4007 lines
120 KiB

  1. /*++
  2. Copyright (c) 1998-2001 Microsoft Corporation
  3. Module Name :
  4. LKRhash.cpp
  5. Abstract:
  6. Implements LKRhash: a fast, scalable, cache- and MP-friendly hash table
  7. Author:
  8. Paul (Per-Ake) Larson, palarson@microsoft.com, July 1997
  9. Murali R. Krishnan (MuraliK)
  10. George V. Reilly (GeorgeRe) 06-Jan-1998
  11. Environment:
  12. Win32 - User Mode
  13. Project:
  14. Internet Information Server RunTime Library
  15. Revision History:
  16. Jan 1998 - Massive cleanup and rewrite. Templatized.
  17. 10/01/1998 - Change name from LKhash to LKRhash
  18. --*/
  19. #include "precomp.hxx"
  20. #define DLL_IMPLEMENTATION
  21. #define IMPLEMENTATION_EXPORT
  22. #include <lkrhash.h>
  23. #ifdef __LKRHASH_NAMESPACE__
  24. namespace LKRHash {
  25. #endif // __LKRHASH_NAMESPACE__
  26. #ifdef LKRHASH_ALLOCATOR_NEW
  27. # define DECLARE_ALLOCATOR(CLASS) \
  28. CLKRhashAllocator* CLASS::sm_palloc = NULL; \
  29. # define DECLARE_ALLOCATOR_LHTSUBCLASS(CLASS) \
  30. CLKRhashAllocator* CLKRLinearHashTable::CLASS::sm_palloc = NULL; \
  31. // DECLARE_ALLOCATOR(CLKRLinearHashTable);
  32. // DECLARE_ALLOCATOR(CLKRHashTable);
  33. DECLARE_ALLOCATOR_LHTSUBCLASS(CNodeClump);
  34. DECLARE_ALLOCATOR_LHTSUBCLASS(CSmallSegment);
  35. DECLARE_ALLOCATOR_LHTSUBCLASS(CMediumSegment);
  36. DECLARE_ALLOCATOR_LHTSUBCLASS(CLargeSegment);
  37. #endif // LKRHASH_ALLOCATOR_NEW
  38. static bool s_fInitialized = false;
  39. // -------------------------------------------------------------------------
  40. // Initialize per-class allocators
  41. // -------------------------------------------------------------------------
  42. bool
  43. LKRHashTableInit()
  44. {
  45. bool f = true;
  46. TRACE("LKRHashTableInit\n");
  47. #define INIT_ALLOCATOR(CLASS, N) \
  48. LKRHASH_ALLOCATOR_INIT(CLASS, N, f); \
  49. #define INIT_ALLOCATOR_LHTSUBCLASS(CLASS, N) \
  50. LKRHASH_ALLOCATOR_INIT(CLKRLinearHashTable::CLASS, N, f); \
  51. // INIT_ALLOCATOR(CLKRLinearHashTable, 20);
  52. // INIT_ALLOCATOR(CLKRHashTable, 4);
  53. INIT_ALLOCATOR_LHTSUBCLASS(CNodeClump, 200);
  54. INIT_ALLOCATOR_LHTSUBCLASS(CSmallSegment, 5);
  55. INIT_ALLOCATOR_LHTSUBCLASS(CMediumSegment, 5);
  56. INIT_ALLOCATOR_LHTSUBCLASS(CLargeSegment, 5);
  57. s_fInitialized = f;
  58. return f;
  59. } // LKRHashTableInit
  60. // -------------------------------------------------------------------------
  61. // Destroy per-class allocators
  62. // -------------------------------------------------------------------------
  63. void
  64. LKRHashTableUninit()
  65. {
  66. #define UNINIT_ALLOCATOR(CLASS) \
  67. LKRHASH_ALLOCATOR_UNINIT(CLASS); \
  68. #define UNINIT_ALLOCATOR_LHTSUBCLASS(CLASS) \
  69. LKRHASH_ALLOCATOR_UNINIT(CLKRLinearHashTable::CLASS);\
  70. s_fInitialized = false;
  71. // UNINIT_ALLOCATOR(CLKRLinearHashTable);
  72. // UNINIT_ALLOCATOR(CLKRHashTable);
  73. UNINIT_ALLOCATOR_LHTSUBCLASS(CNodeClump);
  74. UNINIT_ALLOCATOR_LHTSUBCLASS(CSmallSegment);
  75. UNINIT_ALLOCATOR_LHTSUBCLASS(CMediumSegment);
  76. UNINIT_ALLOCATOR_LHTSUBCLASS(CLargeSegment);
  77. TRACE("LKRHashTableUninit done\n");
  78. } // LKRHashTableUninit
  79. // -------------------------------------------------------------------------
  80. // class static member variables
  81. // -------------------------------------------------------------------------
  82. #ifdef LOCK_INSTRUMENTATION
  83. LONG CLKRLinearHashTable::CBucket::sm_cBuckets = 0;
  84. LONG CLKRLinearHashTable::sm_cTables = 0;
  85. #endif // LOCK_INSTRUMENTATION
  86. #ifdef LKR_NEWCODE
  87. CLockedDoubleList CLKRLinearHashTable::sm_llGlobalList;
  88. CLockedDoubleList CLKRHashTable::sm_llGlobalList;
  89. #endif // LKR_NEWCODE
  90. // CLKRLinearHashTable --------------------------------------------------------
  91. // Public Constructor for class CLKRLinearHashTable.
  92. // -------------------------------------------------------------------------
  93. CLKRLinearHashTable::CLKRLinearHashTable(
  94. LPCSTR pszName, // An identifier for debugging
  95. PFnExtractKey pfnExtractKey, // Extract key from record
  96. PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  97. PFnEqualKeys pfnEqualKeys, // Compare two keys
  98. PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  99. double maxload, // Upperbound on the average chain length
  100. DWORD initsize, // Initial size of hash table.
  101. DWORD /*num_subtbls*/ // for compatiblity with CLKRHashTable
  102. )
  103. :
  104. #ifdef LOCK_INSTRUMENTATION
  105. m_Lock(_LockName()),
  106. #endif // LOCK_INSTRUMENTATION
  107. #ifdef LKR_NEWCODE
  108. m_nTableLockType(TableLock::LockType()),
  109. m_nBucketLockType(BucketLock::LockType()),
  110. m_phtParent(NULL)
  111. #endif // LKR_NEWCODE
  112. {
  113. IRTLVERIFY(LK_SUCCESS
  114. == _Initialize(pfnExtractKey, pfnCalcKeyHash, pfnEqualKeys,
  115. pfnAddRefRecord, pszName, maxload, initsize));
  116. #ifdef LKR_NEWCODE
  117. _InsertThisIntoGlobalList();
  118. #endif // LKR_NEWCODE
  119. } // CLKRLinearHashTable::CLKRLinearHashTable
  120. #ifdef LKR_NEWCODE
  121. // CLKRLinearHashTable --------------------------------------------------------
  122. // Private Constructor for class CLKRLinearHashTable, used by CLKRHashTable.
  123. // -------------------------------------------------------------------------
  124. CLKRLinearHashTable::CLKRLinearHashTable(
  125. LPCSTR pszName, // An identifier for debugging
  126. PFnExtractKey pfnExtractKey, // Extract key from record
  127. PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  128. PFnEqualKeys pfnEqualKeys, // Compare two keys
  129. PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  130. double maxload, // Upperbound on the average chain length
  131. DWORD initsize, // Initial size of hash table.
  132. CLKRHashTable* phtParent // Owning table.
  133. )
  134. :
  135. #ifdef LOCK_INSTRUMENTATION
  136. m_Lock(_LockName()),
  137. #endif // LOCK_INSTRUMENTATION
  138. m_nTableLockType(TableLock::LockType()),
  139. m_nBucketLockType(BucketLock::LockType()),
  140. m_phtParent(phtParent)
  141. {
  142. IRTLASSERT(m_phtParent != NULL);
  143. IRTLVERIFY(LK_SUCCESS
  144. == _Initialize(pfnExtractKey, pfnCalcKeyHash, pfnEqualKeys,
  145. pfnAddRefRecord, pszName, maxload, initsize));
  146. _InsertThisIntoGlobalList();
  147. } // CLKRLinearHashTable::CLKRLinearHashTable
  148. #endif // LKR_NEWCODE
  149. // _Initialize -------------------------------------------------------------
  150. // Do all the real work of constructing a CLKRLinearHashTable
  151. // -------------------------------------------------------------------------
  152. LK_RETCODE
  153. CLKRLinearHashTable::_Initialize(
  154. PFnExtractKey pfnExtractKey,
  155. PFnCalcKeyHash pfnCalcKeyHash,
  156. PFnEqualKeys pfnEqualKeys,
  157. PFnAddRefRecord pfnAddRefRecord,
  158. LPCSTR pszName,
  159. double maxload,
  160. DWORD initsize)
  161. {
  162. m_dwSignature = SIGNATURE;
  163. m_dwBktAddrMask = 0;
  164. #ifdef LKR_MASK
  165. m_dwBktAddrMask1 = 0;
  166. #endif // LKR_MASK
  167. m_iExpansionIdx = 0;
  168. m_paDirSegs = NULL;
  169. m_lkts = LK_MEDIUM_TABLESIZE;
  170. m_dwSegBits = 0;
  171. m_dwSegSize = 0;
  172. m_dwSegMask = 0;
  173. m_lkrcState = LK_UNUSABLE;
  174. m_MaxLoad = LK_DFLT_MAXLOAD;
  175. m_nLevel = 0;
  176. m_cDirSegs = 0;
  177. m_cRecords = 0;
  178. m_cActiveBuckets = 0;
  179. m_wBucketLockSpins= LOCK_USE_DEFAULT_SPINS;
  180. m_pfnExtractKey = pfnExtractKey;
  181. m_pfnCalcKeyHash = pfnCalcKeyHash;
  182. m_pfnEqualKeys = pfnEqualKeys;
  183. m_pfnAddRefRecord = pfnAddRefRecord;
  184. strncpy(m_szName, pszName, NAME_SIZE-1);
  185. m_szName[NAME_SIZE-1] = '\0';
  186. IRTLASSERT(m_pfnExtractKey != NULL
  187. && m_pfnCalcKeyHash != NULL
  188. && m_pfnEqualKeys != NULL
  189. && m_pfnAddRefRecord != NULL);
  190. IRTLASSERT(s_fInitialized);
  191. if (!s_fInitialized)
  192. return (m_lkrcState = LK_NOT_INITIALIZED);
  193. if (m_pfnExtractKey == NULL
  194. || m_pfnCalcKeyHash == NULL
  195. || m_pfnEqualKeys == NULL
  196. || m_pfnAddRefRecord == NULL)
  197. return (m_lkrcState = LK_BAD_PARAMETERS);
  198. // TODO: better sanity check for ridiculous values?
  199. m_MaxLoad = (maxload <= 1.0) ? LK_DFLT_MAXLOAD : maxload;
  200. m_MaxLoad = min(m_MaxLoad, 10 * NODES_PER_CLUMP);
  201. // Choose the size of the segments according to the desired "size" of
  202. // the table, small, medium, or large.
  203. LK_TABLESIZE lkts;
  204. if (initsize == LK_SMALL_TABLESIZE)
  205. {
  206. lkts = LK_SMALL_TABLESIZE;
  207. initsize = CSmallSegment::INITSIZE;
  208. }
  209. else if (initsize == LK_MEDIUM_TABLESIZE)
  210. {
  211. lkts = LK_MEDIUM_TABLESIZE;
  212. initsize = CMediumSegment::INITSIZE;
  213. }
  214. else if (initsize == LK_LARGE_TABLESIZE)
  215. {
  216. lkts = LK_LARGE_TABLESIZE;
  217. initsize = CLargeSegment::INITSIZE;
  218. }
  219. // specified an explicit initial size
  220. else
  221. {
  222. // force Small::INITSIZE <= initsize <= MAX_DIRSIZE * Large::INITSIZE
  223. initsize = min(max(initsize, CSmallSegment::INITSIZE),
  224. (MAX_DIRSIZE >> CLargeSegment::SEGBITS)
  225. * CLargeSegment::INITSIZE);
  226. // Guess a table size
  227. if (initsize <= 8 * CSmallSegment::INITSIZE)
  228. lkts = LK_SMALL_TABLESIZE;
  229. else if (initsize >= CLargeSegment::INITSIZE)
  230. lkts = LK_LARGE_TABLESIZE;
  231. else
  232. lkts = LK_MEDIUM_TABLESIZE;
  233. }
  234. m_cActiveBuckets = initsize;
  235. _SetSegVars(lkts);
  236. return m_lkrcState;
  237. } // CLKRLinearHashTable::_Initialize
  238. // CLKRHashTable ----------------------------------------------------------
  239. // Constructor for class CLKRHashTable.
  240. // ---------------------------------------------------------------------
  241. CLKRHashTable::CLKRHashTable(
  242. LPCSTR pszName, // An identifier for debugging
  243. PFnExtractKey pfnExtractKey, // Extract key from record
  244. PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  245. PFnEqualKeys pfnEqualKeys, // Compare two keys
  246. PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  247. double maxload, // Bound on the average chain length
  248. DWORD initsize, // Initial size of hash table.
  249. DWORD num_subtbls // Number of subordinate hash tables.
  250. )
  251. : m_dwSignature(SIGNATURE),
  252. m_cSubTables(0),
  253. m_palhtDir(NULL),
  254. m_pfnExtractKey(pfnExtractKey),
  255. m_pfnCalcKeyHash(pfnCalcKeyHash),
  256. m_lkrcState(LK_BAD_PARAMETERS)
  257. {
  258. strncpy(m_szName, pszName, NAME_SIZE-1);
  259. m_szName[NAME_SIZE-1] = '\0';
  260. #ifdef LKR_NEWCODE
  261. _InsertThisIntoGlobalList();
  262. #endif // LKR_NEWCODE
  263. IRTLASSERT(pfnExtractKey != NULL
  264. && pfnCalcKeyHash != NULL
  265. && pfnEqualKeys != NULL
  266. && pfnAddRefRecord != NULL);
  267. if (pfnExtractKey == NULL
  268. || pfnCalcKeyHash == NULL
  269. || pfnEqualKeys == NULL
  270. || pfnAddRefRecord == NULL)
  271. return;
  272. LK_TABLESIZE lkts = NumSubTables(initsize, num_subtbls);
  273. #ifdef _DEBUG
  274. int cBuckets = initsize;
  275. if (initsize == LK_SMALL_TABLESIZE)
  276. cBuckets = SubTable::CSmallSegment::INITSIZE;
  277. else if (initsize == LK_MEDIUM_TABLESIZE)
  278. cBuckets = SubTable::CMediumSegment::INITSIZE;
  279. else if (initsize == LK_LARGE_TABLESIZE)
  280. cBuckets = SubTable::CLargeSegment::INITSIZE;
  281. TRACE("CLKRHashTable: %s, %d subtables, initsize = %d, "
  282. "total #buckets = %d\n",
  283. ((lkts == LK_SMALL_TABLESIZE) ? "small" :
  284. (lkts == LK_MEDIUM_TABLESIZE) ? "medium" : "large"),
  285. num_subtbls, initsize, cBuckets * num_subtbls);
  286. #endif
  287. m_lkrcState = LK_ALLOC_FAIL;
  288. m_palhtDir = _AllocateSubTableArray(num_subtbls);
  289. if (m_palhtDir == NULL)
  290. return;
  291. else
  292. {
  293. m_cSubTables = num_subtbls;
  294. for (DWORD i = 0; i < m_cSubTables; i++)
  295. m_palhtDir[i] = NULL;
  296. }
  297. for (DWORD i = 0; i < m_cSubTables; i++)
  298. {
  299. m_palhtDir[i] = _AllocateSubTable(pszName, pfnExtractKey,
  300. pfnCalcKeyHash, pfnEqualKeys,
  301. pfnAddRefRecord, maxload,
  302. initsize, this);
  303. // Failed to allocate a subtable. Destroy everything allocated so far.
  304. if (m_palhtDir[i] == NULL || !m_palhtDir[i]->IsValid())
  305. {
  306. for (DWORD j = i; j-- > 0; )
  307. _FreeSubTable(m_palhtDir[j]);
  308. _FreeSubTableArray(m_palhtDir);
  309. m_cSubTables = 0;
  310. m_palhtDir = NULL;
  311. return;
  312. }
  313. }
  314. m_lkrcState = LK_SUCCESS; // so IsValid/IsUsable won't fail
  315. } // CLKRHashTable::CLKRHashTable
  316. // ~CLKRLinearHashTable ------------------------------------------------------
  317. // Destructor for class CLKRLinearHashTable
  318. //-------------------------------------------------------------------------
  319. CLKRLinearHashTable::~CLKRLinearHashTable()
  320. {
  321. // must acquire all locks before deleting to make sure
  322. // that no other threads are using the table
  323. WriteLock();
  324. _Clear(false);
  325. WriteUnlock();
  326. #ifdef LKR_NEWCODE
  327. _RemoveThisFromGlobalList();
  328. #endif // LKR_NEWCODE
  329. m_dwSignature = SIGNATURE_FREE;
  330. m_lkrcState = LK_UNUSABLE; // so IsUsable will fail
  331. } // CLKRLinearHashTable::~CLKRLinearHashTable
  332. // ~CLKRHashTable ------------------------------------------------------------
  333. // Destructor for class CLKRHashTable
  334. //-------------------------------------------------------------------------
  335. CLKRHashTable::~CLKRHashTable()
  336. {
  337. // delete in reverse order, just like delete[].
  338. for (DWORD i = m_cSubTables; i-- > 0; )
  339. _FreeSubTable(m_palhtDir[i]);
  340. _FreeSubTableArray(m_palhtDir);
  341. #ifdef LKR_NEWCODE
  342. _RemoveThisFromGlobalList();
  343. #endif // LKR_NEWCODE
  344. m_dwSignature = SIGNATURE_FREE;
  345. m_lkrcState = LK_UNUSABLE; // so IsUsable will fail
  346. } // CLKRHashTable::~CLKRHashTable
  347. //------------------------------------------------------------------------
  348. // Function: CLKRLinearHashTable::NumSubTables
  349. // Synopsis:
  350. //------------------------------------------------------------------------
  351. LK_TABLESIZE
  352. CLKRLinearHashTable::NumSubTables(
  353. DWORD& rinitsize,
  354. DWORD& rnum_subtbls)
  355. {
  356. LK_TABLESIZE lkts = LK_MEDIUM_TABLESIZE;
  357. return lkts;
  358. } // CLKRLinearHashTable::NumSubTables
  359. //------------------------------------------------------------------------
  360. // Function: CLKRHashTable::NumSubTables
  361. // Synopsis:
  362. //------------------------------------------------------------------------
  363. LK_TABLESIZE
  364. CLKRHashTable::NumSubTables(
  365. DWORD& rinitsize,
  366. DWORD& rnum_subtbls)
  367. {
  368. LK_TABLESIZE lkts;
  369. // Establish the table size
  370. if (rinitsize == LK_SMALL_TABLESIZE
  371. || rinitsize == LK_MEDIUM_TABLESIZE
  372. || rinitsize == LK_LARGE_TABLESIZE)
  373. {
  374. lkts = static_cast<LK_TABLESIZE>(rinitsize);
  375. }
  376. else
  377. {
  378. if (rnum_subtbls != LK_DFLT_NUM_SUBTBLS)
  379. {
  380. rinitsize = (rinitsize - 1) / rnum_subtbls + 1;
  381. if (rinitsize <= SubTable::CSmallSegment::SEGSIZE)
  382. lkts = LK_SMALL_TABLESIZE;
  383. else if (rinitsize >= SubTable::CLargeSegment::SEGSIZE)
  384. lkts = LK_LARGE_TABLESIZE;
  385. else
  386. lkts = LK_MEDIUM_TABLESIZE;
  387. }
  388. else
  389. {
  390. lkts = LK_MEDIUM_TABLESIZE;
  391. }
  392. }
  393. // Choose a suitable number of subtables
  394. if (rnum_subtbls == LK_DFLT_NUM_SUBTBLS)
  395. {
  396. int nCPUs = NumProcessors();
  397. switch (lkts)
  398. {
  399. case LK_SMALL_TABLESIZE:
  400. rnum_subtbls = min(2, nCPUs);
  401. break;
  402. case LK_MEDIUM_TABLESIZE:
  403. rnum_subtbls = 2 * nCPUs;
  404. break;
  405. case LK_LARGE_TABLESIZE:
  406. rnum_subtbls = 4 * nCPUs;
  407. break;
  408. }
  409. }
  410. return lkts;
  411. } // CLKRHashTable::NumSubTables
  412. #ifdef LKR_COMPACT_DELETE
  413. //------------------------------------------------------------------------
  414. // Function: CLKRLinearHashTable::_IsNodeCompact
  415. // Synopsis: validates that a node is correctly compacted
  416. //------------------------------------------------------------------------
  417. int
  418. CLKRLinearHashTable::_IsNodeCompact(
  419. CBucket* const pbkt) const
  420. {
  421. CNodeClump* pncCurr;
  422. CNodeClump* pncPrev;
  423. bool fEmpty = pbkt->m_ncFirst.InvalidSignature(0);
  424. int cErrors = fEmpty ? !pbkt->m_ncFirst.IsLastClump() : 0;
  425. for (pncCurr = &pbkt->m_ncFirst, pncPrev = NULL;
  426. pncCurr != NULL;
  427. pncPrev = pncCurr, pncCurr = pncCurr->m_pncNext)
  428. {
  429. for (DWORD i = 0; i < NODES_PER_CLUMP; i++)
  430. {
  431. if (fEmpty)
  432. {
  433. cErrors += (!pncCurr->InvalidSignature(i));
  434. cErrors += (!pncCurr->IsEmptyNode(i));
  435. }
  436. else if (pncCurr->InvalidSignature(i))
  437. {
  438. fEmpty = true;
  439. cErrors += (!pncCurr->IsEmptyNode(i));
  440. cErrors += (!pncCurr->IsLastClump());
  441. }
  442. else // still in non-empty portion
  443. {
  444. cErrors += (pncCurr->InvalidSignature(i));
  445. cErrors += (pncCurr->IsEmptyNode(i));
  446. }
  447. }
  448. }
  449. return cErrors;
  450. }
  451. #endif // LKR_COMPACT_DELETE
  452. //------------------------------------------------------------------------
  453. // Function: CLKRLinearHashTable::_InsertRecord
  454. // Synopsis: Inserts a new record into the hash table. If this causes the
  455. // average chain length to exceed the upper bound, the table is
  456. // expanded by one bucket.
  457. // Output: LK_SUCCESS, if the record was inserted.
  458. // LK_KEY_EXISTS, if the record was not inserted (because a record
  459. // with the same key value already exists in the table, unless
  460. // fOverwrite==true).
  461. // LK_ALLOC_FAIL, if failed to allocate the required space
  462. // LK_UNUSABLE, if hash table not in usable state
  463. // LK_BAD_RECORD, if record is bad.
  464. //------------------------------------------------------------------------
  465. LK_RETCODE
  466. CLKRLinearHashTable::_InsertRecord(
  467. const void* pvRecord, // Pointer to the record to add to table
  468. DWORD dwSignature,// hash signature
  469. bool fOverwrite // overwrite record if key already present
  470. )
  471. {
  472. #ifdef LKR_SUBTABLE
  473. IRTLASSERT(IsUsable()
  474. && pvRecord != NULL
  475. && dwSignature != HASH_INVALID_SIGNATURE);
  476. #else
  477. if (!IsUsable())
  478. return LK_UNUSABLE;
  479. if (pvRecord == NULL)
  480. return LK_BAD_RECORD;
  481. #endif
  482. // find the beginning of the correct bucket chain
  483. WriteLock();
  484. // Must call IsValid inside a lock to ensure that none of the state
  485. // variables change while it's being evaluated
  486. IRTLASSERT(IsValid());
  487. CBucket* const pbkt = _FindBucket(dwSignature, true);
  488. IRTLASSERT(pbkt != NULL);
  489. IRTLASSERT(pbkt->IsWriteLocked());
  490. WriteUnlock();
  491. // check that no record with the same key value exists
  492. // and save a pointer to the last element on the chain
  493. LK_RETCODE lkrc = LK_SUCCESS;
  494. CNodeClump* pncFree = NULL;
  495. int iFreePos = -1;
  496. CNodeClump* pncPrev;
  497. CNodeClump* pncCurr;
  498. bool fUpdate = false;
  499. const DWORD_PTR pnKey = _ExtractKey(pvRecord);
  500. // walk down the entire bucket chain, looking for matching hash
  501. // signatures and keys
  502. for (pncCurr = &pbkt->m_ncFirst, pncPrev = NULL;
  503. pncCurr != NULL;
  504. pncPrev = pncCurr, pncCurr = pncCurr->m_pncNext)
  505. {
  506. for (DWORD i = 0; i < NODES_PER_CLUMP; i++)
  507. {
  508. #ifdef LKR_COMPACT_DELETE
  509. if (pncCurr->InvalidSignature(i))
  510. {
  511. IRTLASSERT(pncCurr->IsEmptyNode(i));
  512. IRTLASSERT(0 == _IsNodeCompact(pbkt));
  513. IRTLASSERT(pncCurr->IsLastClump());
  514. pncFree = pncCurr;
  515. iFreePos = i;
  516. goto insert;
  517. }
  518. #endif // LKR_COMPACT_DELETE
  519. if (dwSignature == pncCurr->m_dwKeySigs[i]
  520. && _EqualKeys(pnKey, _ExtractKey(pncCurr->m_pvNode[i])))
  521. {
  522. if (fOverwrite)
  523. {
  524. // If we allow overwrites, this is the slot to do it to
  525. fUpdate = true;
  526. pncFree = pncCurr;
  527. iFreePos = i;
  528. goto insert;
  529. }
  530. else
  531. {
  532. // overwrites forbidden: return an error
  533. lkrc = LK_KEY_EXISTS;
  534. goto exit;
  535. }
  536. }
  537. #ifndef LKR_COMPACT_DELETE
  538. // keep track of the first free slot in the bucket chain
  539. if (pncFree == NULL && pncCurr->IsEmptyNode(i))
  540. {
  541. IRTLASSERT(pncCurr->InvalidSignature(i));
  542. pncFree = pncCurr;
  543. iFreePos = i;
  544. }
  545. #endif // !LKR_COMPACT_DELETE
  546. }
  547. }
  548. insert:
  549. if (pncFree != NULL)
  550. {
  551. pncCurr = pncFree;
  552. IRTLASSERT(iFreePos >= 0);
  553. }
  554. else
  555. {
  556. // No free slots. Attach the new node to the end of the chain
  557. IRTLASSERT(iFreePos < 0);
  558. pncCurr = _AllocateNodeClump();
  559. if (pncCurr == NULL)
  560. {
  561. lkrc = LK_ALLOC_FAIL;
  562. goto exit;
  563. }
  564. IRTLASSERT(pncPrev != NULL && pncPrev->IsLastClump());
  565. pncPrev->m_pncNext = pncCurr;
  566. iFreePos = 0;
  567. }
  568. // Bump the new record's reference count upwards
  569. _AddRefRecord(pvRecord, +1);
  570. if (fUpdate)
  571. {
  572. // We're overwriting an existing record. Adjust the old record's
  573. // refcount downwards. (Doing ++new, --old in this order ensures
  574. // that the refcount won't briefly go to zero if new and old are
  575. // the same record.)
  576. IRTLASSERT(!pncCurr->IsEmptyNode(iFreePos));
  577. _AddRefRecord(pncCurr->m_pvNode[iFreePos], -1);
  578. }
  579. else
  580. {
  581. IRTLASSERT(pncCurr->IsEmptyNode(iFreePos));
  582. InterlockedIncrement(reinterpret_cast<LONG*>(&m_cRecords));
  583. }
  584. pncCurr->m_dwKeySigs[iFreePos] = dwSignature;
  585. pncCurr->m_pvNode[iFreePos] = pvRecord;
  586. exit:
  587. pbkt->WriteUnlock();
  588. if (lkrc == LK_SUCCESS)
  589. {
  590. // If the average load factor has grown too high, we grow the
  591. // table one bucket at a time.
  592. while (m_cRecords > m_MaxLoad * m_cActiveBuckets)
  593. {
  594. // If _Expand returns an error code (viz. LK_ALLOC_FAIL), it
  595. // just means that there isn't enough spare memory to expand
  596. // the table by one bucket. This is likely to cause problems
  597. // elsewhere soon, but this hashtable has not been corrupted.
  598. // If the call to _AllocateNodeClump above failed, then we do
  599. // have a real error that must be propagated back to the caller
  600. // because we were unable to insert the element at all.
  601. if (_Expand() != LK_SUCCESS)
  602. break; // expansion failed
  603. }
  604. }
  605. return lkrc;
  606. } // CLKRLinearHashTable::_InsertRecord
  607. //-------------------------------------------------------------------------
  608. // Function: CLKRLinearHashTable::_DeleteKey
  609. // Synopsis: Deletes the record with the given key value from the hash
  610. // table (if it exists). Holes created by deletions are not filled
  611. // immediately by moving records around. They will eventually be
  612. // filled by insertions or reorganizations during expansions or
  613. // contractions.
  614. // Returns: LK_SUCCESS, if record found and deleted.
  615. // LK_NO_SUCH_KEY, if no record with the given key value was found.
  616. // LK_UNUSABLE, if hash table not in usable state
  617. //-------------------------------------------------------------------------
  618. LK_RETCODE
  619. CLKRLinearHashTable::_DeleteKey(
  620. const DWORD_PTR pnKey, // Key value of the record, depends on key type
  621. DWORD dwSignature
  622. )
  623. {
  624. #ifdef LKR_SUBTABLE
  625. IRTLASSERT(IsUsable());
  626. #else
  627. if (!IsUsable())
  628. return LK_UNUSABLE;
  629. #endif
  630. LK_RETCODE lkrc = LK_NO_SUCH_KEY;
  631. // locate the beginning of the correct bucket chain
  632. WriteLock();
  633. // Must call IsValid inside a lock to ensure that none of the state
  634. // variables change while it's being evaluated
  635. IRTLASSERT(IsValid());
  636. CBucket* const pbkt = _FindBucket(dwSignature, true);
  637. IRTLASSERT(pbkt != NULL);
  638. IRTLASSERT(pbkt->IsWriteLocked());
  639. WriteUnlock();
  640. // scan down the bucket chain, looking for the victim
  641. for (CNodeClump* pncCurr = &pbkt->m_ncFirst, *pncPrev = NULL;
  642. pncCurr != NULL;
  643. pncPrev = pncCurr, pncCurr = pncCurr->m_pncNext)
  644. {
  645. for (int i = 0; i < NODES_PER_CLUMP; i++)
  646. {
  647. #ifdef LKR_COMPACT_DELETE
  648. if (pncCurr->InvalidSignature(i))
  649. {
  650. IRTLASSERT(pncCurr->IsEmptyNode(i));
  651. IRTLASSERT(0 == _IsNodeCompact(pbkt));
  652. IRTLASSERT(pncCurr->IsLastClump());
  653. goto exit;
  654. }
  655. #endif // LKR_COMPACT_DELETE
  656. if (dwSignature == pncCurr->m_dwKeySigs[i]
  657. && _EqualKeys(pnKey, _ExtractKey(pncCurr->m_pvNode[i])))
  658. {
  659. IRTLVERIFY(_DeleteNode(pbkt, pncCurr, pncPrev, i));
  660. lkrc = LK_SUCCESS;
  661. goto exit;
  662. }
  663. }
  664. }
  665. exit:
  666. pbkt->WriteUnlock();
  667. if (lkrc == LK_SUCCESS)
  668. {
  669. // contract the table if necessary
  670. double maxcontract = 1.0 / static_cast<double>(m_MaxLoad);
  671. for (int contractions = 0;
  672. m_cRecords < m_MaxLoad * m_cActiveBuckets
  673. && m_cActiveBuckets > m_dwSegSize * MIN_DIRSIZE
  674. && contractions < maxcontract;
  675. ++contractions)
  676. {
  677. // If _Contract returns an error code (viz. LK_ALLOC_FAIL), it
  678. // just means that there isn't enough spare memory to contract
  679. // the table by one bucket. This is likely to cause problems
  680. // elsewhere soon, but this hashtable has not been corrupted.
  681. if (_Contract() != LK_SUCCESS)
  682. break;
  683. }
  684. }
  685. return lkrc;
  686. } // CLKRLinearHashTable::_DeleteKey
  687. //-------------------------------------------------------------------------
  688. // Function: CLKRLinearHashTable::_DeleteRecord
  689. // Synopsis: Deletes the specified record from the hash table (if it
  690. // exists). Holes created by deletions are not filled immediately
  691. // by moving records around. They will eventually be filled by
  692. // insertions or reorganizations during expansions or
  693. // contractions. This is not the same thing as calling
  694. // DeleteKey(_ExtractKey(pvRecord)). If that were called for a
  695. // record that doesn't exist in the table, it could delete some
  696. // completely unrelated record that happened to have the key.
  697. // Returns: LK_SUCCESS, if record found and deleted.
  698. // LK_NO_SUCH_KEY, if the record is not found in the table.
  699. // LK_UNUSABLE, if hash table not in usable state.
  700. //-------------------------------------------------------------------------
  701. LK_RETCODE
  702. CLKRLinearHashTable::_DeleteRecord(
  703. const void* pvRecord, // Pointer to the record to delete from the table
  704. DWORD dwSignature
  705. )
  706. {
  707. #ifdef LKR_SUBTABLE
  708. IRTLASSERT(IsUsable() && pvRecord != NULL);
  709. #else
  710. if (!IsUsable())
  711. return LK_UNUSABLE;
  712. if (pvRecord == NULL)
  713. return LK_NO_SUCH_KEY;
  714. #endif
  715. LK_RETCODE lkrc = LK_NO_SUCH_KEY;
  716. // locate the beginning of the correct bucket chain
  717. WriteLock();
  718. // Must call IsValid inside a lock to ensure that none of the state
  719. // variables change while it's being evaluated
  720. IRTLASSERT(IsValid());
  721. CBucket* const pbkt = _FindBucket(dwSignature, true);
  722. IRTLASSERT(pbkt != NULL);
  723. IRTLASSERT(pbkt->IsWriteLocked());
  724. WriteUnlock();
  725. const DWORD_PTR pnKey = _ExtractKey(pvRecord);
  726. IRTLASSERT(dwSignature == _CalcKeyHash(pnKey));
  727. // scan down the bucket chain, looking for the victim
  728. for (CNodeClump* pncCurr = &pbkt->m_ncFirst, *pncPrev = NULL;
  729. pncCurr != NULL;
  730. pncPrev = pncCurr, pncCurr = pncCurr->m_pncNext)
  731. {
  732. for (int i = 0; i < NODES_PER_CLUMP; i++)
  733. {
  734. #ifdef LKR_COMPACT_DELETE
  735. if (pncCurr->IsEmptyNode(i))
  736. {
  737. IRTLASSERT(pncCurr->InvalidSignature(i));
  738. IRTLASSERT(0 == _IsNodeCompact(pbkt));
  739. IRTLASSERT(pncCurr->IsLastClump());
  740. goto exit;
  741. }
  742. #endif // LKR_COMPACT_DELETE
  743. if (pncCurr->m_pvNode[i] == pvRecord)
  744. {
  745. IRTLASSERT(_EqualKeys(pnKey,
  746. _ExtractKey(pncCurr->m_pvNode[i])));
  747. IRTLASSERT(dwSignature == pncCurr->m_dwKeySigs[i]);
  748. IRTLVERIFY(_DeleteNode(pbkt, pncCurr, pncPrev, i));
  749. lkrc = LK_SUCCESS;
  750. goto exit;
  751. }
  752. }
  753. }
  754. exit:
  755. pbkt->WriteUnlock();
  756. if (lkrc == LK_SUCCESS)
  757. {
  758. // contract the table if necessary
  759. double maxcontract = 1.0 / static_cast<double>(m_MaxLoad);
  760. for (int contractions = 0;
  761. m_cRecords < m_MaxLoad * m_cActiveBuckets
  762. && m_cActiveBuckets > m_dwSegSize * MIN_DIRSIZE
  763. && contractions < maxcontract;
  764. ++contractions)
  765. {
  766. // If _Contract returns an error code (viz. LK_ALLOC_FAIL), it
  767. // just means that there isn't enough spare memory to contract
  768. // the table by one bucket. This is likely to cause problems
  769. // elsewhere soon, but this hashtable has not been corrupted.
  770. if (_Contract() != LK_SUCCESS)
  771. break;
  772. }
  773. }
  774. return lkrc;
  775. } // CLKRLinearHashTable::_DeleteRecord
  776. //------------------------------------------------------------------------
  777. // Function: CLKRLinearHashTable::_DeleteNode
  778. // Synopsis: Deletes a node; removes the node clump if empty
  779. // Returns: true if successful
  780. //------------------------------------------------------------------------
  781. bool
  782. CLKRLinearHashTable::_DeleteNode(
  783. CBucket* pbkt, // bucket chain containing node
  784. CNodeClump*& rpnc, // actual node
  785. CNodeClump*& rpncPrev, // predecessor of actual node, or NULL
  786. int& riNode) // index within node
  787. {
  788. IRTLASSERT(pbkt != NULL && pbkt->IsWriteLocked());
  789. IRTLASSERT(rpnc != NULL);
  790. IRTLASSERT(rpncPrev == NULL || rpncPrev->m_pncNext == rpnc);
  791. IRTLASSERT(0 <= riNode && riNode < NODES_PER_CLUMP);
  792. IRTLASSERT(!rpnc->IsEmptyNode(riNode));
  793. IRTLASSERT(!rpnc->InvalidSignature(riNode));
  794. #ifdef _DEBUG
  795. // Check that the node clump really does belong to the bucket
  796. CNodeClump* pnc1 = &pbkt->m_ncFirst;
  797. while (pnc1 != NULL && pnc1 != rpnc)
  798. pnc1 = pnc1->m_pncNext;
  799. IRTLASSERT(pnc1 == rpnc);
  800. #endif // _DEBUG
  801. // Release the reference to the record
  802. _AddRefRecord(rpnc->m_pvNode[riNode], -1);
  803. #ifdef LKR_COMPACT_DELETE
  804. IRTLASSERT(0 == _IsNodeCompact(pbkt));
  805. // Compact the nodeclump by moving the very last node back to the
  806. // newly freed slot
  807. CNodeClump* pnc2 = rpnc;
  808. int iNode2 = riNode;
  809. // Find the last nodeclump in the chain
  810. while (!pnc2->IsLastClump())
  811. {
  812. pnc2 = pnc2->m_pncNext;
  813. iNode2 = 0;
  814. }
  815. IRTLASSERT(0 <= iNode2 && iNode2 < NODES_PER_CLUMP);
  816. IRTLASSERT(!pnc2->InvalidSignature(iNode2));
  817. // Find the first empty slot in the nodeclump
  818. while (iNode2 < NODES_PER_CLUMP && !pnc2->InvalidSignature(iNode2))
  819. {
  820. iNode2++;
  821. }
  822. // Back up to last non-empty slot
  823. --iNode2;
  824. IRTLASSERT(0 <= iNode2 && iNode2 < NODES_PER_CLUMP
  825. && !pnc2->InvalidSignature(iNode2));
  826. IRTLASSERT(iNode2+1 == NODES_PER_CLUMP
  827. || pnc2->InvalidSignature(iNode2+1));
  828. #ifdef _DEBUG
  829. // Check that all the remaining nodes are empty
  830. IRTLASSERT(pnc2->IsLastClump());
  831. for (int iNode3 = iNode2 + 1; iNode3 < NODES_PER_CLUMP; ++iNode3)
  832. {
  833. IRTLASSERT(pnc2->InvalidSignature(iNode3)
  834. && pnc2->IsEmptyNode(iNode3));
  835. }
  836. #endif // _DEBUG
  837. // Move the last node's data back to the current node
  838. rpnc->m_pvNode[riNode] = pnc2->m_pvNode[iNode2];
  839. rpnc->m_dwKeySigs[riNode] = pnc2->m_dwKeySigs[iNode2];
  840. // Blank the old last node.
  841. // Correct even if (rpnc, riNode) == (pnc2, iNode2).
  842. pnc2->m_pvNode[iNode2] = NULL;
  843. pnc2->m_dwKeySigs[iNode2] = HASH_INVALID_SIGNATURE;
  844. IRTLASSERT(0 == _IsNodeCompact(pbkt));
  845. // Back up riNode by one, so that the next iteration of the loop
  846. // calling _DeleteNode will end up pointing to the same spot.
  847. if (riNode != 0)
  848. {
  849. riNode--;
  850. }
  851. else
  852. {
  853. // rewind rpnc and rpncPrev to previous node
  854. if (rpnc == &pbkt->m_ncFirst)
  855. {
  856. riNode = -1;
  857. }
  858. else
  859. {
  860. riNode = NODES_PER_CLUMP;
  861. rpnc = rpncPrev;
  862. if (rpnc == &pbkt->m_ncFirst)
  863. {
  864. rpncPrev = NULL;
  865. }
  866. else
  867. {
  868. for (rpncPrev = &pbkt->m_ncFirst;
  869. rpncPrev->m_pncNext != rpnc;
  870. rpncPrev = rpncPrev->m_pncNext)
  871. {}
  872. }
  873. }
  874. }
  875. // Is the last node clump now completely empty? Delete, if possible
  876. if (iNode2 == 0 && pnc2 != &pbkt->m_ncFirst)
  877. {
  878. // Find preceding nodeclump
  879. CNodeClump* pnc3 = &pbkt->m_ncFirst;
  880. while (pnc3->m_pncNext != pnc2)
  881. {
  882. pnc3 = pnc3->m_pncNext;
  883. IRTLASSERT(pnc3 != NULL);
  884. }
  885. pnc3->m_pncNext = NULL;
  886. #ifdef _DEBUG
  887. pnc2->m_pncNext = NULL; // or dtor will ASSERT
  888. #endif // _DEBUG
  889. _FreeNodeClump(pnc2);
  890. }
  891. #else // !LKR_COMPACT_DELETE
  892. // Delete the node from the table
  893. rpnc->m_pvNode[riNode] = NULL;
  894. rpnc->m_dwKeySigs[riNode] = HASH_INVALID_SIGNATURE;
  895. // Is clump empty now? Delete it, if possible
  896. if (rpncPrev != NULL)
  897. {
  898. bool fEmpty = true;
  899. for (int j = 0; j < NODES_PER_CLUMP; j++)
  900. {
  901. if (!rpnc->IsEmptyNode(j))
  902. {
  903. fEmpty = false;
  904. break;
  905. }
  906. }
  907. // if clump is now empty, disconnect and delete it.
  908. if (fEmpty)
  909. {
  910. IRTLASSERT(rpnc != &pbkt->m_ncFirst);
  911. IRTLASSERT(rpncPrev->m_pncNext == rpnc);
  912. rpncPrev->m_pncNext = rpnc->m_pncNext;
  913. #ifdef _DEBUG
  914. rpnc->m_pncNext = NULL; // or dtor will ASSERT
  915. #endif // _DEBUG
  916. _FreeNodeClump(rpnc);
  917. // Reset these to point to the end of the preceding clump so
  918. // that the calling procedure's loop variables aren't pointing
  919. // into limbo.
  920. rpnc = rpncPrev;
  921. riNode = NODES_PER_CLUMP;
  922. if (rpnc == &pbkt->m_ncFirst)
  923. rpncPrev = NULL;
  924. else
  925. {
  926. for (rpncPrev = &pbkt->m_ncFirst;
  927. rpncPrev->m_pncNext != rpnc;
  928. rpncPrev = rpncPrev->m_pncNext)
  929. {}
  930. }
  931. }
  932. }
  933. #endif // !LKR_COMPACT_DELETE
  934. IRTLASSERT(rpncPrev == NULL || rpncPrev->m_pncNext == rpnc);
  935. InterlockedDecrement(reinterpret_cast<LONG*>(&m_cRecords));
  936. return true;
  937. } // CLKRLinearHashTable::_DeleteNode
  938. //------------------------------------------------------------------------
  939. // Function: CLKRLinearHashTable::_FindKey
  940. // Synopsis: Locate the record associated with the given key value.
  941. // Returns: Pointer to the record, if it is found.
  942. // NULL, if the record is not found.
  943. // Returns: LK_SUCCESS, if record found (record is returned in *ppvRecord)
  944. // LK_BAD_RECORD, if ppvRecord is invalid
  945. // LK_NO_SUCH_KEY, if no record with the given key value was found.
  946. // LK_UNUSABLE, if hash table not in usable state
  947. // Note: the record is AddRef'd. You must decrement the reference count
  948. // when you are finished with the record (if you're implementing
  949. // refcounting semantics).
  950. //------------------------------------------------------------------------
  951. LK_RETCODE
  952. CLKRLinearHashTable::_FindKey(
  953. const DWORD_PTR pnKey, // Key value of the record, depends on key type
  954. DWORD dwSignature,// hash signature
  955. const void** ppvRecord // resultant record
  956. ) const
  957. {
  958. #ifdef LKR_SUBTABLE
  959. IRTLASSERT(IsUsable() && ppvRecord != NULL);
  960. #else
  961. if (!IsUsable())
  962. return LK_UNUSABLE;
  963. IRTLASSERT(ppvRecord != NULL);
  964. if (ppvRecord == NULL)
  965. return LK_BAD_RECORD;
  966. #endif
  967. *ppvRecord = NULL;
  968. LK_RETCODE lkrc = LK_NO_SUCH_KEY;
  969. // locate the beginning of the correct bucket chain
  970. ReadLock();
  971. // Must call IsValid inside a lock to ensure that none of the state
  972. // variables change while it's being evaluated
  973. IRTLASSERT(IsValid());
  974. CBucket* const pbkt = _FindBucket(dwSignature, false);
  975. IRTLASSERT(pbkt != NULL);
  976. IRTLASSERT(pbkt->IsReadLocked());
  977. ReadUnlock();
  978. // walk down the bucket chain
  979. for (CNodeClump* pncCurr = &pbkt->m_ncFirst;
  980. pncCurr != NULL;
  981. pncCurr = pncCurr->m_pncNext)
  982. {
  983. for (DWORD i = 0; i < NODES_PER_CLUMP; i++)
  984. {
  985. #ifdef LKR_COMPACT_DELETE
  986. if (pncCurr->InvalidSignature(i))
  987. {
  988. IRTLASSERT(pncCurr->IsEmptyNode(i));
  989. IRTLASSERT(0 == _IsNodeCompact(pbkt));
  990. IRTLASSERT(pncCurr->IsLastClump());
  991. goto exit;
  992. }
  993. #endif // LKR_COMPACT_DELETE
  994. if (dwSignature == pncCurr->m_dwKeySigs[i]
  995. && _EqualKeys(pnKey, _ExtractKey(pncCurr->m_pvNode[i])))
  996. {
  997. *ppvRecord = pncCurr->m_pvNode[i];
  998. lkrc = LK_SUCCESS;
  999. #ifdef LKR_FIND_FIRST
  1000. // Move the found node to very first slot in the bucket
  1001. // Hopefully, the principle of temporal locality will
  1002. // come into play.
  1003. const DWORD dwTempSig = pbkt->m_ncFirst.m_dwKeySigs[0];
  1004. const void* pvTempRecord = pbkt->m_ncFirst.m_pvNode[0];
  1005. pbkt->m_ncFirst.m_dwKeySigs[0] = pncCurr->m_dwKeySigs[i];
  1006. pbkt->m_ncFirst.m_pvNode[0] = pncCurr->m_pvNode[i];
  1007. pncCurr->m_dwKeySigs[i] = dwTempSig;
  1008. pncCurr->m_pvNode[i] = pvTempRecord;
  1009. #endif // LKR_FIND_FIRST
  1010. // bump the reference count before handing the record
  1011. // back to the user. The user should decrement the
  1012. // reference count when finished with this record.
  1013. _AddRefRecord(*ppvRecord, +1);
  1014. goto exit;
  1015. }
  1016. }
  1017. }
  1018. exit:
  1019. pbkt->ReadUnlock();
  1020. return lkrc;
  1021. } // CLKRLinearHashTable::_FindKey
  1022. //------------------------------------------------------------------------
  1023. // Function: CLKRLinearHashTable::_FindRecord
  1024. // Synopsis: Sees if the record is contained in the table
  1025. // Returns: Pointer to the record, if it is found.
  1026. // NULL, if the record is not found.
  1027. // Returns: LK_SUCCESS, if record found
  1028. // LK_BAD_RECORD, if pvRecord is invalid
  1029. // LK_NO_SUCH_KEY, if the record was not found in the table
  1030. // LK_UNUSABLE, if hash table not in usable state
  1031. // Note: The record is *not* AddRef'd.
  1032. //------------------------------------------------------------------------
  1033. LK_RETCODE
  1034. CLKRLinearHashTable::_FindRecord(
  1035. const void* pvRecord, // Pointer to the record to find in the table
  1036. DWORD dwSignature // hash signature
  1037. ) const
  1038. {
  1039. #ifdef LKR_SUBTABLE
  1040. IRTLASSERT(IsUsable() && pvRecord != NULL);
  1041. #else
  1042. if (!IsUsable())
  1043. return LK_UNUSABLE;
  1044. IRTLASSERT(pvRecord != NULL);
  1045. if (pvRecord == NULL)
  1046. return LK_BAD_RECORD;
  1047. #endif
  1048. LK_RETCODE lkrc = LK_NO_SUCH_KEY;
  1049. // locate the beginning of the correct bucket chain
  1050. ReadLock();
  1051. // Must call IsValid inside a lock to ensure that none of the state
  1052. // variables change while it's being evaluated
  1053. IRTLASSERT(IsValid());
  1054. CBucket* const pbkt = _FindBucket(dwSignature, false);
  1055. IRTLASSERT(pbkt != NULL);
  1056. IRTLASSERT(pbkt->IsReadLocked());
  1057. ReadUnlock();
  1058. const DWORD_PTR pnKey = _ExtractKey(pvRecord);
  1059. IRTLASSERT(dwSignature == _CalcKeyHash(pnKey));
  1060. // walk down the bucket chain
  1061. for (CNodeClump* pncCurr = &pbkt->m_ncFirst;
  1062. pncCurr != NULL;
  1063. pncCurr = pncCurr->m_pncNext)
  1064. {
  1065. for (DWORD i = 0; i < NODES_PER_CLUMP; i++)
  1066. {
  1067. #ifdef LKR_COMPACT_DELETE
  1068. if (pncCurr->IsEmptyNode(i))
  1069. {
  1070. IRTLASSERT(pncCurr->InvalidSignature(i));
  1071. IRTLASSERT(0 == _IsNodeCompact(pbkt));
  1072. IRTLASSERT(pncCurr->IsLastClump());
  1073. goto exit;
  1074. }
  1075. #endif // LKR_COMPACT_DELETE
  1076. if (pncCurr->m_pvNode[i] == pvRecord)
  1077. {
  1078. IRTLASSERT(dwSignature == pncCurr->m_dwKeySigs[i]);
  1079. IRTLASSERT(_EqualKeys(pnKey,
  1080. _ExtractKey(pncCurr->m_pvNode[i])));
  1081. lkrc = LK_SUCCESS;
  1082. #ifdef LKR_FIND_FIRST
  1083. // Move the found node to very first slot in the bucket
  1084. // Hopefully, the principle of temporal locality will
  1085. // come into play.
  1086. const DWORD dwTempSig = pbkt->m_ncFirst.m_dwKeySigs[0];
  1087. const void* pvTempRecord = pbkt->m_ncFirst.m_pvNode[0];
  1088. pbkt->m_ncFirst.m_dwKeySigs[0] = pncCurr->m_dwKeySigs[i];
  1089. pbkt->m_ncFirst.m_pvNode[0] = pncCurr->m_pvNode[i];
  1090. pncCurr->m_dwKeySigs[i] = dwTempSig;
  1091. pncCurr->m_pvNode[i] = pvTempRecord;
  1092. #endif // LKR_FIND_FIRST
  1093. goto exit;
  1094. }
  1095. }
  1096. }
  1097. exit:
  1098. pbkt->ReadUnlock();
  1099. return lkrc;
  1100. } // CLKRLinearHashTable::_FindRecord
  1101. //------------------------------------------------------------------------
  1102. // Function: CLKRLinearHashTable::Apply
  1103. // Synopsis:
  1104. // Returns:
  1105. //------------------------------------------------------------------------
  1106. DWORD
  1107. CLKRLinearHashTable::Apply(
  1108. PFnRecordAction pfnAction,
  1109. void* pvState,
  1110. LK_LOCKTYPE lkl)
  1111. {
  1112. if (!IsUsable())
  1113. return LK_UNUSABLE;
  1114. LK_PREDICATE lkp = LKP_PERFORM;
  1115. if (lkl == LKL_WRITELOCK)
  1116. WriteLock();
  1117. else
  1118. ReadLock();
  1119. // Must call IsValid inside a lock to ensure that none of the state
  1120. // variables change while it's being evaluated
  1121. IRTLASSERT(IsValid());
  1122. DWORD dw = _Apply(pfnAction, pvState, lkl, lkp);
  1123. if (lkl == LKL_WRITELOCK)
  1124. WriteUnlock();
  1125. else
  1126. ReadUnlock();
  1127. return dw;
  1128. } // CLKRLinearHashTable::Apply
  1129. //------------------------------------------------------------------------
  1130. // Function: CLKRHashTable::Apply
  1131. // Synopsis:
  1132. // Returns:
  1133. //------------------------------------------------------------------------
  1134. DWORD
  1135. CLKRHashTable::Apply(
  1136. PFnRecordAction pfnAction,
  1137. void* pvState,
  1138. LK_LOCKTYPE lkl)
  1139. {
  1140. if (!IsUsable())
  1141. return LK_UNUSABLE;
  1142. DWORD dw = 0;
  1143. LK_PREDICATE lkp = LKP_PERFORM;
  1144. if (lkl == LKL_WRITELOCK)
  1145. WriteLock();
  1146. else
  1147. ReadLock();
  1148. // Must call IsValid inside a lock to ensure that none of the state
  1149. // variables change while it's being evaluated
  1150. IRTLASSERT(IsValid());
  1151. if (IsValid())
  1152. {
  1153. for (DWORD i = 0; i < m_cSubTables; i++)
  1154. {
  1155. dw += m_palhtDir[i]->_Apply(pfnAction, pvState, lkl, lkp);
  1156. if (lkp == LKP_ABORT || lkp == LKP_PERFORM_STOP
  1157. || lkp == LKP_DELETE_STOP)
  1158. break;
  1159. }
  1160. }
  1161. if (lkl == LKL_WRITELOCK)
  1162. WriteUnlock();
  1163. else
  1164. ReadUnlock();
  1165. return dw;
  1166. } // CLKRHashTable::Apply
  1167. //------------------------------------------------------------------------
  1168. // Function: CLKRLinearHashTable::ApplyIf
  1169. // Synopsis:
  1170. // Returns:
  1171. //------------------------------------------------------------------------
  1172. DWORD
  1173. CLKRLinearHashTable::ApplyIf(
  1174. PFnRecordPred pfnPredicate,
  1175. PFnRecordAction pfnAction,
  1176. void* pvState,
  1177. LK_LOCKTYPE lkl)
  1178. {
  1179. if (!IsUsable())
  1180. return LK_UNUSABLE;
  1181. DWORD dw = 0;
  1182. LK_PREDICATE lkp = LKP_PERFORM;
  1183. if (lkl == LKL_WRITELOCK)
  1184. WriteLock();
  1185. else
  1186. ReadLock();
  1187. // Must call IsValid inside a lock to ensure that none of the state
  1188. // variables change while it's being evaluated
  1189. IRTLASSERT(IsValid());
  1190. if (IsValid())
  1191. {
  1192. dw = _ApplyIf(pfnPredicate, pfnAction, pvState, lkl, lkp);
  1193. }
  1194. if (lkl == LKL_WRITELOCK)
  1195. WriteUnlock();
  1196. else
  1197. ReadUnlock();
  1198. return dw;
  1199. } // CLKRLinearHashTable::ApplyIf
  1200. //------------------------------------------------------------------------
  1201. // Function: CLKRHashTable::ApplyIf
  1202. // Synopsis:
  1203. // Returns:
  1204. //------------------------------------------------------------------------
  1205. DWORD
  1206. CLKRHashTable::ApplyIf(
  1207. PFnRecordPred pfnPredicate,
  1208. PFnRecordAction pfnAction,
  1209. void* pvState,
  1210. LK_LOCKTYPE lkl)
  1211. {
  1212. if (!IsUsable())
  1213. return LK_UNUSABLE;
  1214. DWORD dw = 0;
  1215. LK_PREDICATE lkp = LKP_PERFORM;
  1216. if (lkl == LKL_WRITELOCK)
  1217. WriteLock();
  1218. else
  1219. ReadLock();
  1220. // Must call IsValid inside a lock to ensure that none of the state
  1221. // variables change while it's being evaluated
  1222. IRTLASSERT(IsValid());
  1223. if (IsValid())
  1224. {
  1225. for (DWORD i = 0; i < m_cSubTables; i++)
  1226. {
  1227. dw += m_palhtDir[i]->_ApplyIf(pfnPredicate, pfnAction,
  1228. pvState, lkl, lkp);
  1229. if (lkp == LKP_ABORT || lkp == LKP_PERFORM_STOP
  1230. || lkp == LKP_DELETE_STOP)
  1231. break;
  1232. }
  1233. }
  1234. if (lkl == LKL_WRITELOCK)
  1235. WriteUnlock();
  1236. else
  1237. ReadUnlock();
  1238. return dw;
  1239. } // CLKRHashTable::ApplyIf
  1240. //------------------------------------------------------------------------
  1241. // Function: CLKRLinearHashTable::DeleteIf
  1242. // Synopsis:
  1243. // Returns:
  1244. //------------------------------------------------------------------------
  1245. DWORD
  1246. CLKRLinearHashTable::DeleteIf(
  1247. PFnRecordPred pfnPredicate,
  1248. void* pvState)
  1249. {
  1250. if (!IsUsable())
  1251. return LK_UNUSABLE;
  1252. DWORD dw = 0;
  1253. LK_PREDICATE lkp = LKP_PERFORM;
  1254. WriteLock();
  1255. // Must call IsValid inside a lock to ensure that none of the state
  1256. // variables change while it's being evaluated
  1257. IRTLASSERT(IsValid());
  1258. if (IsValid())
  1259. dw = _DeleteIf(pfnPredicate, pvState, lkp);
  1260. WriteUnlock();
  1261. return dw;
  1262. } // CLKRLinearHashTable::DeleteIf
  1263. //------------------------------------------------------------------------
  1264. // Function: CLKRHashTable::DeleteIf
  1265. // Synopsis:
  1266. // Returns:
  1267. //------------------------------------------------------------------------
  1268. DWORD
  1269. CLKRHashTable::DeleteIf(
  1270. PFnRecordPred pfnPredicate,
  1271. void* pvState)
  1272. {
  1273. if (!IsUsable())
  1274. return LK_UNUSABLE;
  1275. DWORD dw = 0;
  1276. LK_PREDICATE lkp = LKP_PERFORM;
  1277. WriteLock();
  1278. // Must call IsValid inside a lock to ensure that none of the state
  1279. // variables change while it's being evaluated
  1280. IRTLASSERT(IsValid());
  1281. if (IsValid())
  1282. {
  1283. for (DWORD i = 0; i < m_cSubTables; i++)
  1284. {
  1285. dw += m_palhtDir[i]->_DeleteIf(pfnPredicate, pvState, lkp);
  1286. if (lkp == LKP_ABORT || lkp == LKP_PERFORM_STOP
  1287. || lkp == LKP_DELETE_STOP)
  1288. break;
  1289. }
  1290. }
  1291. WriteUnlock();
  1292. return dw;
  1293. } // CLKRHashTable::DeleteIf
  1294. //------------------------------------------------------------------------
  1295. // Function: CLKRLinearHashTable::_Apply
  1296. // Synopsis:
  1297. // Returns:
  1298. //------------------------------------------------------------------------
  1299. DWORD
  1300. CLKRLinearHashTable::_Apply(
  1301. PFnRecordAction pfnAction,
  1302. void* pvState,
  1303. LK_LOCKTYPE lkl,
  1304. LK_PREDICATE& rlkp)
  1305. {
  1306. if (!IsUsable())
  1307. return LK_UNUSABLE;
  1308. IRTLASSERT(lkl == LKL_WRITELOCK ? IsWriteLocked() : IsReadLocked());
  1309. return _ApplyIf(_PredTrue, pfnAction, pvState, lkl, rlkp);
  1310. } // CLKRLinearHashTable::_Apply
  1311. //------------------------------------------------------------------------
  1312. // Function: CLKRLinearHashTable::_ApplyIf
  1313. // Synopsis:
  1314. // Returns: Number of successful actions
  1315. //------------------------------------------------------------------------
  1316. DWORD
  1317. CLKRLinearHashTable::_ApplyIf(
  1318. PFnRecordPred pfnPredicate,
  1319. PFnRecordAction pfnAction,
  1320. void* pvState,
  1321. LK_LOCKTYPE lkl,
  1322. LK_PREDICATE& rlkp)
  1323. {
  1324. if (!IsUsable())
  1325. return LK_UNUSABLE;
  1326. IRTLASSERT(lkl == LKL_WRITELOCK ? IsWriteLocked() : IsReadLocked());
  1327. IRTLASSERT(pfnPredicate != NULL && pfnAction != NULL);
  1328. if ((lkl == LKL_WRITELOCK ? !IsWriteLocked() : !IsReadLocked())
  1329. || pfnPredicate == NULL || pfnAction == NULL)
  1330. return 0;
  1331. DWORD cActions = 0;
  1332. for (DWORD iBkt = 0; iBkt < m_cActiveBuckets; ++iBkt)
  1333. {
  1334. CBucket* const pbkt = _Bucket(iBkt);
  1335. IRTLASSERT(pbkt != NULL);
  1336. if (lkl == LKL_WRITELOCK)
  1337. pbkt->WriteLock();
  1338. else
  1339. pbkt->ReadLock();
  1340. for (CNodeClump* pncCurr = &pbkt->m_ncFirst, *pncPrev = NULL;
  1341. pncCurr != NULL;
  1342. pncPrev = pncCurr, pncCurr = pncCurr->m_pncNext)
  1343. {
  1344. for (int i = 0; i < NODES_PER_CLUMP; i++)
  1345. {
  1346. #ifdef LKR_COMPACT_DELETE
  1347. if (pncCurr->IsEmptyNode(i))
  1348. {
  1349. IRTLASSERT(pncCurr->InvalidSignature(i));
  1350. IRTLASSERT(0 == _IsNodeCompact(pbkt));
  1351. IRTLASSERT(pncCurr->IsLastClump());
  1352. goto unlock;
  1353. }
  1354. else
  1355. #else // !LKR_COMPACT_DELETE
  1356. if (!pncCurr->IsEmptyNode(i))
  1357. #endif // !LKR_COMPACT_DELETE
  1358. {
  1359. rlkp = (*pfnPredicate)(pncCurr->m_pvNode[i], pvState);
  1360. switch (rlkp)
  1361. {
  1362. case LKP_ABORT:
  1363. if (lkl == LKL_WRITELOCK)
  1364. pbkt->WriteUnlock();
  1365. else
  1366. pbkt->ReadUnlock();
  1367. return cActions;
  1368. break;
  1369. case LKP_NO_ACTION:
  1370. // nothing to do
  1371. break;
  1372. case LKP_DELETE:
  1373. case LKP_DELETE_STOP:
  1374. if (lkl != LKL_WRITELOCK)
  1375. {
  1376. pbkt->ReadUnlock();
  1377. return cActions;
  1378. }
  1379. // fall through
  1380. case LKP_PERFORM:
  1381. case LKP_PERFORM_STOP:
  1382. {
  1383. LK_ACTION lka;
  1384. if (rlkp == LKP_DELETE || rlkp == LKP_DELETE_STOP)
  1385. {
  1386. IRTLVERIFY(_DeleteNode(pbkt, pncCurr, pncPrev, i));
  1387. ++cActions;
  1388. lka = LKA_SUCCEEDED;
  1389. }
  1390. else
  1391. {
  1392. lka = (*pfnAction)(pncCurr->m_pvNode[i], pvState);
  1393. switch (lka)
  1394. {
  1395. case LKA_ABORT:
  1396. if (lkl == LKL_WRITELOCK)
  1397. pbkt->WriteUnlock();
  1398. else
  1399. pbkt->ReadUnlock();
  1400. return cActions;
  1401. case LKA_FAILED:
  1402. // nothing to do
  1403. break;
  1404. case LKA_SUCCEEDED:
  1405. ++cActions;
  1406. break;
  1407. default:
  1408. IRTLASSERT(FALSE);
  1409. break;
  1410. }
  1411. }
  1412. if (rlkp == LKP_PERFORM_STOP
  1413. || rlkp == LKP_DELETE_STOP)
  1414. {
  1415. if (lkl == LKL_WRITELOCK)
  1416. pbkt->WriteUnlock();
  1417. else
  1418. pbkt->ReadUnlock();
  1419. return cActions;
  1420. }
  1421. break;
  1422. }
  1423. default:
  1424. IRTLASSERT(FALSE);
  1425. break;
  1426. }
  1427. }
  1428. }
  1429. }
  1430. #ifdef LKR_COMPACT_DELETE
  1431. unlock:
  1432. #endif // LKR_COMPACT_DELETE
  1433. if (lkl == LKL_WRITELOCK)
  1434. pbkt->WriteUnlock();
  1435. else
  1436. pbkt->ReadUnlock();
  1437. }
  1438. return cActions;
  1439. } // CLKRLinearHashTable::_ApplyIf
  1440. //------------------------------------------------------------------------
  1441. // Function: CLKRLinearHashTable::_DeleteIf
  1442. // Synopsis: Deletes all records that match the predicate
  1443. // Returns: Count of successful deletions
  1444. //------------------------------------------------------------------------
  1445. DWORD
  1446. CLKRLinearHashTable::_DeleteIf(
  1447. PFnRecordPred pfnPredicate,
  1448. void* pvState,
  1449. LK_PREDICATE& rlkp)
  1450. {
  1451. if (!IsUsable())
  1452. return LK_UNUSABLE;
  1453. IRTLASSERT(IsWriteLocked());
  1454. IRTLASSERT(pfnPredicate != NULL);
  1455. if (!IsWriteLocked() || pfnPredicate == NULL)
  1456. return 0;
  1457. DWORD cActions = 0;
  1458. for (DWORD iBkt = 0; iBkt < m_cActiveBuckets; ++iBkt)
  1459. {
  1460. CBucket* const pbkt = _Bucket(iBkt);
  1461. IRTLASSERT(pbkt != NULL);
  1462. pbkt->WriteLock();
  1463. for (CNodeClump* pncCurr = &pbkt->m_ncFirst, *pncPrev = NULL;
  1464. pncCurr != NULL;
  1465. pncPrev = pncCurr, pncCurr = pncCurr->m_pncNext)
  1466. {
  1467. for (int i = 0; i < NODES_PER_CLUMP; i++)
  1468. {
  1469. #ifdef LKR_COMPACT_DELETE
  1470. if (pncCurr->IsEmptyNode(i))
  1471. {
  1472. IRTLASSERT(pncCurr->InvalidSignature(i));
  1473. IRTLASSERT(0 == _IsNodeCompact(pbkt));
  1474. IRTLASSERT(pncCurr->IsLastClump());
  1475. goto unlock;
  1476. }
  1477. else
  1478. #else // !LKR_COMPACT_DELETE
  1479. if (!pncCurr->IsEmptyNode(i))
  1480. #endif // !LKR_COMPACT_DELETE
  1481. {
  1482. rlkp = (*pfnPredicate)(pncCurr->m_pvNode[i], pvState);
  1483. switch (rlkp)
  1484. {
  1485. case LKP_ABORT:
  1486. pbkt->WriteUnlock();
  1487. return cActions;
  1488. break;
  1489. case LKP_NO_ACTION:
  1490. // nothing to do
  1491. break;
  1492. case LKP_PERFORM:
  1493. case LKP_PERFORM_STOP:
  1494. case LKP_DELETE:
  1495. case LKP_DELETE_STOP:
  1496. {
  1497. IRTLVERIFY(_DeleteNode(pbkt, pncCurr, pncPrev, i));
  1498. ++cActions;
  1499. if (rlkp == LKP_PERFORM_STOP
  1500. || rlkp == LKP_DELETE_STOP)
  1501. {
  1502. pbkt->WriteUnlock();
  1503. return cActions;
  1504. }
  1505. break;
  1506. }
  1507. default:
  1508. IRTLASSERT(FALSE);
  1509. break;
  1510. }
  1511. }
  1512. }
  1513. }
  1514. #ifdef LKR_COMPACT_DELETE
  1515. unlock:
  1516. #endif // LKR_COMPACT_DELETE
  1517. pbkt->WriteUnlock();
  1518. }
  1519. return cActions;
  1520. } // CLKRLinearHashTable::_DeleteIf
  1521. //------------------------------------------------------------------------
  1522. // Function: CLKRLinearHashTable::CheckTable
  1523. // Synopsis: Verify that all records are in the right place and can be located.
  1524. // Returns: 0 => hash table is consistent
  1525. // >0 => that many misplaced records
  1526. // <0 => otherwise invalid
  1527. //------------------------------------------------------------------------
  1528. int
  1529. CLKRLinearHashTable::CheckTable() const
  1530. {
  1531. if (!IsUsable())
  1532. return LK_UNUSABLE;
  1533. ReadLock();
  1534. // Must call IsValid inside a lock to ensure that none of the state
  1535. // variables change while it's being evaluated
  1536. IRTLASSERT(IsValid());
  1537. if (!IsValid())
  1538. {
  1539. ReadUnlock();
  1540. return LK_UNUSABLE;
  1541. }
  1542. int cMisplaced = 0;
  1543. DWORD cRecords = 0;
  1544. int retcode = 0;
  1545. // Check every bucket
  1546. for (DWORD i = 0; i < m_cActiveBuckets; i++)
  1547. {
  1548. CBucket* const pbkt = _Bucket(i);
  1549. IRTLASSERT(pbkt != NULL);
  1550. retcode += !(pbkt != NULL);
  1551. pbkt->ReadLock();
  1552. #ifdef LKR_COMPACT_DELETE
  1553. IRTLASSERT(0 == _IsNodeCompact(pbkt));
  1554. #endif // LKR_COMPACT_DELETE
  1555. // Walk the bucket chain
  1556. for (CNodeClump* pncCurr = &pbkt->m_ncFirst, *pncPrev = NULL;
  1557. pncCurr != NULL;
  1558. pncPrev = pncCurr, pncCurr = pncCurr->m_pncNext)
  1559. {
  1560. for (DWORD j = 0; j < NODES_PER_CLUMP; j++)
  1561. {
  1562. #ifdef LKR_COMPACT_DELETE
  1563. if (pncCurr->IsEmptyNode(j))
  1564. {
  1565. IRTLASSERT(pncCurr->IsLastClump());
  1566. retcode += !(pncCurr->IsLastClump());
  1567. for (DWORD k = j; k < NODES_PER_CLUMP; k++)
  1568. {
  1569. IRTLASSERT(pncCurr->IsEmptyNode(k));
  1570. retcode += !pncCurr->IsEmptyNode(k);
  1571. IRTLASSERT(pncCurr->InvalidSignature(k));
  1572. retcode += !pncCurr->InvalidSignature(k);
  1573. }
  1574. break;
  1575. }
  1576. #endif // LKR_COMPACT_DELETE
  1577. if (!pncCurr->IsEmptyNode(j))
  1578. {
  1579. ++cRecords;
  1580. const DWORD_PTR pnKey = _ExtractKey(pncCurr->m_pvNode[j]);
  1581. DWORD dwSignature = _CalcKeyHash(pnKey);
  1582. IRTLASSERT(dwSignature != HASH_INVALID_SIGNATURE);
  1583. retcode += !(dwSignature != HASH_INVALID_SIGNATURE);
  1584. IRTLASSERT(dwSignature == pncCurr->m_dwKeySigs[j]);
  1585. retcode += !(dwSignature == pncCurr->m_dwKeySigs[j]);
  1586. DWORD address = _BucketAddress(dwSignature);
  1587. IRTLASSERT(address == i);
  1588. retcode += !(address == i);
  1589. if (address != i || dwSignature != pncCurr->m_dwKeySigs[j])
  1590. cMisplaced++;
  1591. }
  1592. else // pncCurr->IsEmptyNode(j)
  1593. {
  1594. IRTLASSERT(pncCurr->InvalidSignature(j));
  1595. retcode += !pncCurr->InvalidSignature(j);
  1596. }
  1597. }
  1598. if (pncPrev != NULL)
  1599. {
  1600. IRTLASSERT(pncPrev->m_pncNext == pncCurr);
  1601. retcode += !(pncPrev->m_pncNext == pncCurr);
  1602. }
  1603. }
  1604. pbkt->ReadUnlock();
  1605. }
  1606. if (cRecords != m_cRecords)
  1607. ++retcode;
  1608. IRTLASSERT(cRecords == m_cRecords);
  1609. retcode += !(cRecords == m_cRecords);
  1610. if (cMisplaced > 0)
  1611. retcode = cMisplaced;
  1612. IRTLASSERT(cMisplaced == 0);
  1613. retcode += !(cMisplaced == 0);
  1614. ReadUnlock();
  1615. return retcode;
  1616. } // CheckTable
  1617. //------------------------------------------------------------------------
  1618. // Function: CLKRHashTable::CheckTable
  1619. // Synopsis: Verify that all records are in the right place and can be located.
  1620. // Returns: 0 => hash table is consistent
  1621. // >0 => that many misplaced records
  1622. // <0 => otherwise invalid
  1623. //------------------------------------------------------------------------
  1624. int
  1625. CLKRHashTable::CheckTable() const
  1626. {
  1627. if (!IsUsable())
  1628. return LK_UNUSABLE;
  1629. int retcode = 0;
  1630. for (DWORD i = 0; i < m_cSubTables; i++)
  1631. retcode += m_palhtDir[i]->CheckTable();
  1632. return retcode;
  1633. } // CLKRHashTable::CheckTable
  1634. //------------------------------------------------------------------------
  1635. // Function: CLKRLinearHashTable::Print
  1636. // Synopsis: Prints the table
  1637. //------------------------------------------------------------------------
  1638. void
  1639. CLKRLinearHashTable::Print() const
  1640. {
  1641. DBGPRINTF(( DBG_CONTEXT,
  1642. "CLKRLinearHashTable(%08p) # Elements %4d; ",
  1643. this, m_cRecords));
  1644. // TODO: flesh out further
  1645. } // CLKRLinearHashTable::Print
  1646. //------------------------------------------------------------------------
  1647. // Function: CLKRHashTable::Print
  1648. // Synopsis: Prints the table
  1649. //------------------------------------------------------------------------
  1650. void
  1651. CLKRHashTable::Print() const
  1652. {
  1653. DBGPRINTF(( DBG_CONTEXT,
  1654. "CLKRHashTable(%08p) # Subtables = %4d.\n",
  1655. this, m_cSubTables));
  1656. for (DWORD i = 0; i < m_cSubTables; i++)
  1657. m_palhtDir[i]->Print();
  1658. // TODO: print footer?
  1659. } // CLKRHashTable::Print
  1660. //------------------------------------------------------------------------
  1661. // Function: CLKRLinearHashTable::_Clear
  1662. // Synopsis: Remove all data from the table
  1663. //------------------------------------------------------------------------
  1664. void
  1665. CLKRLinearHashTable::_Clear(
  1666. bool fShrinkDirectory) // Shrink to min size but don't destroy entirely?
  1667. {
  1668. if (!IsUsable())
  1669. return;
  1670. IRTLASSERT(IsWriteLocked());
  1671. #ifdef _DEBUG
  1672. DWORD cDeleted = 0;
  1673. DWORD cOldRecords = m_cRecords;
  1674. #endif // _DEBUG
  1675. for (DWORD iBkt = 0; iBkt < m_cActiveBuckets; ++iBkt)
  1676. {
  1677. CBucket* const pbkt = _Bucket(iBkt);
  1678. IRTLASSERT(pbkt != NULL);
  1679. pbkt->WriteLock();
  1680. #ifdef LKR_COMPACT_DELETE
  1681. IRTLASSERT(0 == _IsNodeCompact(pbkt));
  1682. #endif // LKR_COMPACT_DELETE
  1683. for (CNodeClump* pncCurr = &pbkt->m_ncFirst, *pncPrev = NULL;
  1684. pncCurr != NULL;
  1685. )
  1686. {
  1687. for (int i = 0; i < NODES_PER_CLUMP; i++)
  1688. {
  1689. #ifdef LKR_COMPACT_DELETE
  1690. if (pncCurr->IsEmptyNode(i))
  1691. {
  1692. IRTLASSERT(pncCurr->InvalidSignature(i));
  1693. IRTLASSERT(pncCurr->IsLastClump());
  1694. break;
  1695. }
  1696. else
  1697. {
  1698. _AddRefRecord(pncCurr->m_pvNode[i], -1);
  1699. pncCurr->m_pvNode[i] = NULL;
  1700. pncCurr->m_dwKeySigs[i] = HASH_INVALID_SIGNATURE;
  1701. m_cRecords--;
  1702. #ifdef _DEBUG
  1703. ++cDeleted;
  1704. #endif // _DEBUG
  1705. }
  1706. #else // !LKR_COMPACT_DELETE
  1707. if (!pncCurr->IsEmptyNode(i))
  1708. {
  1709. IRTLVERIFY(_DeleteNode(pbkt, pncCurr, pncPrev, i));
  1710. #ifdef _DEBUG
  1711. ++cDeleted;
  1712. #endif // _DEBUG
  1713. }
  1714. #endif // !LKR_COMPACT_DELETE
  1715. } // for (i ...
  1716. pncPrev = pncCurr;
  1717. pncCurr = pncCurr->m_pncNext;
  1718. pncPrev->m_pncNext = NULL;
  1719. #ifdef LKR_COMPACT_DELETE
  1720. if (pncPrev != &pbkt->m_ncFirst)
  1721. _FreeNodeClump(pncPrev);
  1722. #endif // LKR_COMPACT_DELETE
  1723. } // for (pncCurr ...
  1724. pbkt->WriteUnlock();
  1725. } // for (iBkt ...
  1726. IRTLASSERT(m_cRecords == 0 && cDeleted == cOldRecords);
  1727. // delete all (or all but the first MIN_DIRSIZE) segments
  1728. for (DWORD iSeg = 0; iSeg < m_cActiveBuckets; iSeg += m_dwSegSize)
  1729. {
  1730. _FreeSegment(_Segment(iSeg));
  1731. _Segment(iSeg) = NULL;
  1732. }
  1733. _FreeSegmentDirectory(m_paDirSegs);
  1734. m_paDirSegs = NULL;
  1735. m_cDirSegs = m_nLevel = m_cActiveBuckets = m_iExpansionIdx = 0;
  1736. m_dwBktAddrMask = 1;
  1737. #ifdef LKR_MASK
  1738. m_dwBktAddrMask1 = (m_dwBktAddrMask << 1) | 1;
  1739. #endif // LKR_MASK
  1740. // reduce directory of segments to minimum size
  1741. if (fShrinkDirectory)
  1742. {
  1743. if (LK_SMALL_TABLESIZE == m_lkts)
  1744. m_cActiveBuckets = CSmallSegment::INITSIZE;
  1745. else if (LK_MEDIUM_TABLESIZE == m_lkts)
  1746. m_cActiveBuckets = CMediumSegment::INITSIZE;
  1747. else if (LK_LARGE_TABLESIZE == m_lkts)
  1748. m_cActiveBuckets = CLargeSegment::INITSIZE;
  1749. else
  1750. IRTLASSERT(! "Unknown LK_TABLESIZE");
  1751. _SetSegVars(m_lkts);
  1752. }
  1753. } // CLKRLinearHashTable::_Clear
  1754. //------------------------------------------------------------------------
  1755. // Function: CLKRHashTable::Clear
  1756. // Synopsis: Remove all data from the table
  1757. //------------------------------------------------------------------------
  1758. void
  1759. CLKRHashTable::Clear()
  1760. {
  1761. WriteLock();
  1762. for (DWORD i = 0; i < m_cSubTables; i++)
  1763. m_palhtDir[i]->_Clear(true);
  1764. WriteUnlock();
  1765. } // CLKRHashTable::Clear
  1766. //------------------------------------------------------------------------
  1767. // Function: CLKRLinearHashTable::GetStatistics
  1768. // Synopsis: Gather statistics about the table
  1769. //------------------------------------------------------------------------
  1770. CLKRHashTableStats
  1771. CLKRLinearHashTable::GetStatistics() const
  1772. {
  1773. CLKRHashTableStats stats;
  1774. if (!IsUsable())
  1775. return stats;
  1776. if (m_paDirSegs != NULL)
  1777. {
  1778. stats.RecordCount = m_cRecords;
  1779. stats.TableSize = m_cActiveBuckets;
  1780. stats.SplitFactor = static_cast<double>(m_iExpansionIdx)
  1781. / (1 << m_nLevel);
  1782. stats.DirectorySize = m_cDirSegs;
  1783. stats.NodeClumpSize = NODES_PER_CLUMP;
  1784. stats.CBucketSize = sizeof(CBucket);
  1785. #ifdef LOCK_INSTRUMENTATION
  1786. stats.m_alsBucketsAvg.m_nContentions = 0;
  1787. stats.m_alsBucketsAvg.m_nSleeps = 0;
  1788. stats.m_alsBucketsAvg.m_nContentionSpins = 0;
  1789. stats.m_alsBucketsAvg.m_nAverageSpins = 0;
  1790. stats.m_alsBucketsAvg.m_nReadLocks = 0;
  1791. stats.m_alsBucketsAvg.m_nWriteLocks = 0;
  1792. stats.m_alsBucketsAvg.m_nItems = 0;
  1793. #endif // LOCK_INSTRUMENTATION
  1794. int empty = 0;
  1795. int totacc = 0;
  1796. int low_count = 0;
  1797. int high_count = 0;
  1798. int max_length = 0;
  1799. for (DWORD i = 0; i < m_cActiveBuckets; i++)
  1800. {
  1801. int acc = 0;
  1802. for (CNodeClump* pncCurr = &_Bucket(i)->m_ncFirst;
  1803. pncCurr != NULL;
  1804. pncCurr = pncCurr->m_pncNext)
  1805. {
  1806. for (DWORD j = 0; j < NODES_PER_CLUMP; j++)
  1807. {
  1808. if (!pncCurr->IsEmptyNode(j))
  1809. {
  1810. acc++;
  1811. totacc += acc;
  1812. int iBucketIndex = stats.BucketIndex(acc);
  1813. ++stats.m_aBucketLenHistogram[iBucketIndex];
  1814. }
  1815. }
  1816. }
  1817. #ifdef LOCK_INSTRUMENTATION
  1818. CLockStatistics ls = _Bucket(i)->LockStats();
  1819. stats.m_alsBucketsAvg.m_nContentions += ls.m_nContentions;
  1820. stats.m_alsBucketsAvg.m_nSleeps += ls.m_nSleeps;
  1821. stats.m_alsBucketsAvg.m_nContentionSpins += ls.m_nContentionSpins;
  1822. stats.m_alsBucketsAvg.m_nAverageSpins += ls.m_nAverageSpins;
  1823. stats.m_alsBucketsAvg.m_nReadLocks += ls.m_nReadLocks;
  1824. stats.m_alsBucketsAvg.m_nWriteLocks += ls.m_nWriteLocks;
  1825. stats.m_alsBucketsAvg.m_nItems ++;
  1826. #endif // LOCK_INSTRUMENTATION
  1827. max_length = max(max_length, acc);
  1828. if (acc == 0)
  1829. empty++;
  1830. if (_H0(i) < m_iExpansionIdx)
  1831. {
  1832. low_count += acc;
  1833. }
  1834. else
  1835. {
  1836. high_count += acc;
  1837. }
  1838. }
  1839. stats.LongestChain = max_length;
  1840. stats.EmptySlots = empty;
  1841. if (m_cActiveBuckets > 0)
  1842. {
  1843. if (m_cRecords > 0)
  1844. {
  1845. double x=static_cast<double>(m_iExpansionIdx) /(1 << m_nLevel);
  1846. double alpha= static_cast<double>(m_cRecords)/m_cActiveBuckets;
  1847. double low_sl = 0.0;
  1848. double high_sl = 0.0;
  1849. stats.AvgSearchLength= static_cast<double>(totacc) /m_cRecords;
  1850. stats.ExpSearchLength = 1 + alpha * 0.25 * (2 + x - x*x);
  1851. if (m_iExpansionIdx > 0)
  1852. low_sl = static_cast<double>(low_count)
  1853. / (2.0 * m_iExpansionIdx);
  1854. if (m_cActiveBuckets - 2 * m_iExpansionIdx > 0)
  1855. high_sl = static_cast<double>(high_count)
  1856. / (m_cActiveBuckets - 2.0 * m_iExpansionIdx);
  1857. stats.AvgUSearchLength = low_sl * x + high_sl * (1.0 - x);
  1858. stats.ExpUSearchLength = alpha * 0.5 * (2 + x - x*x);
  1859. }
  1860. #ifdef LOCK_INSTRUMENTATION
  1861. stats.m_alsBucketsAvg.m_nContentions /= m_cActiveBuckets;
  1862. stats.m_alsBucketsAvg.m_nSleeps /= m_cActiveBuckets;
  1863. stats.m_alsBucketsAvg.m_nContentionSpins /= m_cActiveBuckets;
  1864. stats.m_alsBucketsAvg.m_nAverageSpins /= m_cActiveBuckets;
  1865. stats.m_alsBucketsAvg.m_nReadLocks /= m_cActiveBuckets;
  1866. stats.m_alsBucketsAvg.m_nWriteLocks /= m_cActiveBuckets;
  1867. #endif // LOCK_INSTRUMENTATION
  1868. }
  1869. else
  1870. {
  1871. stats.AvgSearchLength = 0.0;
  1872. stats.ExpSearchLength = 0.0;
  1873. stats.AvgUSearchLength = 0.0;
  1874. stats.ExpUSearchLength = 0.0;
  1875. }
  1876. }
  1877. #ifdef LOCK_INSTRUMENTATION
  1878. stats.m_gls = TableLock::GlobalStatistics();
  1879. CLockStatistics ls = _LockStats();
  1880. stats.m_alsTable.m_nContentions = ls.m_nContentions;
  1881. stats.m_alsTable.m_nSleeps = ls.m_nSleeps;
  1882. stats.m_alsTable.m_nContentionSpins = ls.m_nContentionSpins;
  1883. stats.m_alsTable.m_nAverageSpins = ls.m_nAverageSpins;
  1884. stats.m_alsTable.m_nReadLocks = ls.m_nReadLocks;
  1885. stats.m_alsTable.m_nWriteLocks = ls.m_nWriteLocks;
  1886. stats.m_alsTable.m_nItems = 1;
  1887. #endif // LOCK_INSTRUMENTATION
  1888. return stats;
  1889. } // CLKRLinearHashTable::GetStatistics
  1890. //------------------------------------------------------------------------
  1891. // Function: CLKRHashTable::GetStatistics
  1892. // Synopsis: Gather statistics about the table
  1893. //------------------------------------------------------------------------
  1894. CLKRHashTableStats
  1895. CLKRHashTable::GetStatistics() const
  1896. {
  1897. CLKRHashTableStats hts;
  1898. if (!IsUsable())
  1899. return hts;
  1900. for (DWORD i = 0; i < m_cSubTables; i++)
  1901. {
  1902. CLKRHashTableStats stats = m_palhtDir[i]->GetStatistics();
  1903. hts.RecordCount += stats.RecordCount;
  1904. hts.TableSize += stats.TableSize;
  1905. hts.DirectorySize += stats.DirectorySize;
  1906. hts.LongestChain = max(hts.LongestChain, stats.LongestChain);
  1907. hts.EmptySlots += stats.EmptySlots;
  1908. hts.SplitFactor += stats.SplitFactor;
  1909. hts.AvgSearchLength += stats.AvgSearchLength;
  1910. hts.ExpSearchLength += stats.ExpSearchLength;
  1911. hts.AvgUSearchLength += stats.AvgUSearchLength;
  1912. hts.ExpUSearchLength += stats.ExpUSearchLength;
  1913. hts.NodeClumpSize = stats.NodeClumpSize;
  1914. hts.CBucketSize = stats.CBucketSize;
  1915. for (int j = 0; j < CLKRHashTableStats::MAX_BUCKETS; ++j)
  1916. hts.m_aBucketLenHistogram[j] += stats.m_aBucketLenHistogram[j];
  1917. #ifdef LOCK_INSTRUMENTATION
  1918. hts.m_alsTable.m_nContentions += stats.m_alsTable.m_nContentions;
  1919. hts.m_alsTable.m_nSleeps += stats.m_alsTable.m_nSleeps;
  1920. hts.m_alsTable.m_nContentionSpins
  1921. += stats.m_alsTable.m_nContentionSpins;
  1922. hts.m_alsTable.m_nAverageSpins += stats.m_alsTable.m_nAverageSpins;
  1923. hts.m_alsTable.m_nReadLocks += stats.m_alsTable.m_nReadLocks;
  1924. hts.m_alsTable.m_nWriteLocks += stats.m_alsTable.m_nWriteLocks;
  1925. hts.m_alsBucketsAvg.m_nContentions
  1926. += stats.m_alsBucketsAvg.m_nContentions;
  1927. hts.m_alsBucketsAvg.m_nSleeps
  1928. += stats.m_alsBucketsAvg.m_nSleeps;
  1929. hts.m_alsBucketsAvg.m_nContentionSpins
  1930. += stats.m_alsBucketsAvg.m_nContentionSpins;
  1931. hts.m_alsBucketsAvg.m_nAverageSpins
  1932. += stats.m_alsBucketsAvg.m_nAverageSpins;
  1933. hts.m_alsBucketsAvg.m_nReadLocks
  1934. += stats.m_alsBucketsAvg.m_nReadLocks;
  1935. hts.m_alsBucketsAvg.m_nWriteLocks
  1936. += stats.m_alsBucketsAvg.m_nWriteLocks;
  1937. hts.m_alsBucketsAvg.m_nItems
  1938. += stats.m_alsBucketsAvg.m_nItems;
  1939. hts.m_gls = stats.m_gls;
  1940. #endif // LOCK_INSTRUMENTATION
  1941. }
  1942. // Average out the subtables statistics. (Does this make sense
  1943. // for all of these fields?)
  1944. hts.DirectorySize /= m_cSubTables;
  1945. hts.SplitFactor /= m_cSubTables;
  1946. hts.AvgSearchLength /= m_cSubTables;
  1947. hts.ExpSearchLength /= m_cSubTables;
  1948. hts.AvgUSearchLength /= m_cSubTables;
  1949. hts.ExpUSearchLength /= m_cSubTables;
  1950. #ifdef LOCK_INSTRUMENTATION
  1951. hts.m_alsTable.m_nContentions /= m_cSubTables;
  1952. hts.m_alsTable.m_nSleeps /= m_cSubTables;
  1953. hts.m_alsTable.m_nContentionSpins /= m_cSubTables;
  1954. hts.m_alsTable.m_nAverageSpins /= m_cSubTables;
  1955. hts.m_alsTable.m_nReadLocks /= m_cSubTables;
  1956. hts.m_alsTable.m_nWriteLocks /= m_cSubTables;
  1957. hts.m_alsTable.m_nItems = m_cSubTables;
  1958. hts.m_alsBucketsAvg.m_nContentions /= m_cSubTables;
  1959. hts.m_alsBucketsAvg.m_nSleeps /= m_cSubTables;
  1960. hts.m_alsBucketsAvg.m_nContentionSpins /= m_cSubTables;
  1961. hts.m_alsBucketsAvg.m_nAverageSpins /= m_cSubTables;
  1962. hts.m_alsBucketsAvg.m_nReadLocks /= m_cSubTables;
  1963. hts.m_alsBucketsAvg.m_nWriteLocks /= m_cSubTables;
  1964. #endif // LOCK_INSTRUMENTATION
  1965. return hts;
  1966. } // CLKRHashTable::GetStatistics
  1967. //-----------------------------------------------------------------------
  1968. // Function: CLKRLinearHashTable::_SetSegVars
  1969. // Synopsis: sets the size-specific segment variables
  1970. //-----------------------------------------------------------------------
  1971. void
  1972. CLKRLinearHashTable::_SetSegVars(
  1973. LK_TABLESIZE lkts)
  1974. {
  1975. switch (lkts)
  1976. {
  1977. case LK_SMALL_TABLESIZE:
  1978. m_lkts = LK_SMALL_TABLESIZE;
  1979. m_dwSegBits = CSmallSegment::SEGBITS;
  1980. m_dwSegSize = CSmallSegment::SEGSIZE;
  1981. m_dwSegMask = CSmallSegment::SEGMASK;
  1982. break;
  1983. default:
  1984. IRTLASSERT(! "Unknown LK_TABLESIZE");
  1985. // fall-through
  1986. case LK_MEDIUM_TABLESIZE:
  1987. m_lkts = LK_MEDIUM_TABLESIZE;
  1988. m_dwSegBits = CMediumSegment::SEGBITS;
  1989. m_dwSegSize = CMediumSegment::SEGSIZE;
  1990. m_dwSegMask = CMediumSegment::SEGMASK;
  1991. break;
  1992. case LK_LARGE_TABLESIZE:
  1993. m_lkts = LK_LARGE_TABLESIZE;
  1994. m_dwSegBits = CLargeSegment::SEGBITS;
  1995. m_dwSegSize = CLargeSegment::SEGSIZE;
  1996. m_dwSegMask = CLargeSegment::SEGMASK;
  1997. break;
  1998. }
  1999. m_dwBktAddrMask = m_dwSegMask;
  2000. m_nLevel = m_dwSegBits;
  2001. #ifdef LKR_MASK
  2002. m_dwBktAddrMask1 = (m_dwBktAddrMask << 1) | 1;
  2003. #endif // LKR_MASK
  2004. IRTLASSERT(m_cActiveBuckets > 0);
  2005. IRTLASSERT(m_nLevel == m_dwSegBits);
  2006. IRTLASSERT(m_dwBktAddrMask == (1U << m_nLevel) - 1);
  2007. IRTLASSERT(m_dwBktAddrMask1 == ((m_dwBktAddrMask << 1) | 1));
  2008. IRTLASSERT(m_dwSegBits > 0);
  2009. IRTLASSERT(m_dwSegSize == (1U << m_dwSegBits));
  2010. IRTLASSERT(m_dwSegMask == (m_dwSegSize - 1));
  2011. IRTLASSERT(m_dwBktAddrMask == m_dwSegMask);
  2012. // adjust m_dwBktAddrMask (== m_dwSegMask) to make it large
  2013. // enough to distribute the buckets across the address space
  2014. for (DWORD tmp = m_cActiveBuckets >> m_dwSegBits; tmp > 1; tmp >>= 1)
  2015. {
  2016. ++m_nLevel;
  2017. m_dwBktAddrMask = (m_dwBktAddrMask << 1) | 1;
  2018. }
  2019. m_dwBktAddrMask1 = (m_dwBktAddrMask << 1) | 1;
  2020. IRTLASSERT(_H1(m_cActiveBuckets) == m_cActiveBuckets);
  2021. m_iExpansionIdx = m_cActiveBuckets & m_dwBktAddrMask;
  2022. // create and clear directory of segments
  2023. DWORD cDirSegs = MIN_DIRSIZE;
  2024. while (cDirSegs < (m_cActiveBuckets >> m_dwSegBits))
  2025. cDirSegs <<= 1;
  2026. cDirSegs = min(cDirSegs, MAX_DIRSIZE);
  2027. IRTLASSERT((cDirSegs << m_dwSegBits) >= m_cActiveBuckets);
  2028. m_lkrcState = LK_ALLOC_FAIL;
  2029. m_paDirSegs = _AllocateSegmentDirectory(cDirSegs);
  2030. if (m_paDirSegs != NULL)
  2031. {
  2032. m_cDirSegs = cDirSegs;
  2033. IRTLASSERT(m_cDirSegs >= MIN_DIRSIZE
  2034. && (m_cDirSegs & (m_cDirSegs-1)) == 0); // == (1 << N)
  2035. // create and initialize only the required segments
  2036. DWORD dwMaxSegs = (m_cActiveBuckets + m_dwSegSize - 1) >> m_dwSegBits;
  2037. IRTLASSERT(dwMaxSegs <= m_cDirSegs);
  2038. TRACE(TEXT("InitSegDir: m_lkts = %d, m_cActiveBuckets = %lu, ")
  2039. TEXT("m_dwSegSize = %lu, bits = %lu\n")
  2040. TEXT("m_cDirSegs = %lu, dwMaxSegs = %lu, ")
  2041. TEXT("segment total size = %lu bytes\n"),
  2042. m_lkts, m_cActiveBuckets,
  2043. m_dwSegSize, m_dwSegBits,
  2044. m_cDirSegs, dwMaxSegs,
  2045. m_dwSegSize * sizeof(CBucket));
  2046. m_lkrcState = LK_SUCCESS; // so IsValid/IsUsable won't fail
  2047. for (DWORD i = 0; i < dwMaxSegs; i++)
  2048. {
  2049. CSegment* pSeg = _AllocateSegment();
  2050. if (pSeg != NULL)
  2051. m_paDirSegs[i].m_pseg = pSeg;
  2052. else
  2053. {
  2054. // problem: deallocate everything
  2055. m_lkrcState = LK_ALLOC_FAIL;
  2056. for (DWORD j = i; j-- > 0; )
  2057. {
  2058. _FreeSegment(m_paDirSegs[j].m_pseg);
  2059. m_paDirSegs[j].m_pseg = NULL;
  2060. }
  2061. _FreeSegmentDirectory(m_paDirSegs);
  2062. break;
  2063. }
  2064. }
  2065. }
  2066. if (m_lkrcState != LK_SUCCESS)
  2067. {
  2068. m_paDirSegs = NULL;
  2069. m_cDirSegs = 0;
  2070. m_cActiveBuckets = 0;
  2071. m_iExpansionIdx = 0;
  2072. // Propagate error back up to parent (if it exists). This ensures
  2073. // that all of the parent's public methods will start failing.
  2074. if (m_phtParent != NULL)
  2075. m_phtParent->m_lkrcState = m_lkrcState;
  2076. }
  2077. } // CLKRLinearHashTable::_SetSegVars
  2078. #include <stdlib.h>
  2079. // #define LKR_RANDOM_MEMORY_FAILURES 1000 // 1..RAND_MAX (32767)
  2080. // Memory allocation wrappers to allow us to simulate allocation
  2081. // failures during testing
  2082. //------------------------------------------------------------------------
  2083. // Function: CLKRLinearHashTable::_AllocateSegmentDirectory
  2084. // Synopsis:
  2085. //------------------------------------------------------------------------
  2086. CLKRLinearHashTable::CDirEntry* const
  2087. CLKRLinearHashTable::_AllocateSegmentDirectory(
  2088. size_t n)
  2089. {
  2090. #ifdef LKR_RANDOM_MEMORY_FAILURES
  2091. if (rand() < LKR_RANDOM_MEMORY_FAILURES)
  2092. return NULL;
  2093. #endif
  2094. CDirEntry* pade = new CDirEntry [n];
  2095. #ifdef _DEBUG
  2096. for (size_t i = 0; i < n; ++i)
  2097. IRTLASSERT(pade[i].m_pseg == NULL);
  2098. #endif
  2099. return pade;
  2100. }
  2101. //------------------------------------------------------------------------
  2102. // Function: CLKRLinearHashTable::_FreeSegmentDirectory
  2103. // Synopsis:
  2104. //------------------------------------------------------------------------
  2105. bool
  2106. CLKRLinearHashTable::_FreeSegmentDirectory(
  2107. CLKRLinearHashTable::CDirEntry* paDirSegs)
  2108. {
  2109. delete [] paDirSegs;
  2110. return true;
  2111. }
  2112. //------------------------------------------------------------------------
  2113. // Function: CLKRLinearHashTable::_AllocateNodeClump
  2114. // Synopsis:
  2115. //------------------------------------------------------------------------
  2116. CLKRLinearHashTable::CNodeClump* const
  2117. CLKRLinearHashTable::_AllocateNodeClump()
  2118. {
  2119. #ifdef LKR_RANDOM_MEMORY_FAILURES
  2120. if (rand() < LKR_RANDOM_MEMORY_FAILURES)
  2121. return NULL;
  2122. #endif
  2123. return new CNodeClump;
  2124. }
  2125. //------------------------------------------------------------------------
  2126. // Function: CLKRLinearHashTable::_FreeSegment
  2127. // Synopsis:
  2128. //------------------------------------------------------------------------
  2129. bool
  2130. CLKRLinearHashTable::_FreeNodeClump(
  2131. CLKRLinearHashTable::CNodeClump* pnc)
  2132. {
  2133. delete pnc;
  2134. return true;
  2135. }
  2136. //-----------------------------------------------------------------------
  2137. // Function: CLKRLinearHashTable::_AllocateSegment
  2138. // Synopsis: creates a new segment of the approriate size
  2139. // Output: pointer to the new segment; NULL => failure
  2140. //-----------------------------------------------------------------------
  2141. CLKRLinearHashTable::CSegment* const
  2142. CLKRLinearHashTable::_AllocateSegment(
  2143. ) const
  2144. {
  2145. #ifdef LKR_RANDOM_MEMORY_FAILURES
  2146. if (rand() < LKR_RANDOM_MEMORY_FAILURES)
  2147. return NULL;
  2148. #endif
  2149. CSegment* pseg = NULL;
  2150. switch (m_lkts)
  2151. {
  2152. case LK_SMALL_TABLESIZE:
  2153. IRTLASSERT(CLKRLinearHashTable::CSmallSegment::sm_palloc != NULL);
  2154. pseg = new CSmallSegment;
  2155. break;
  2156. default:
  2157. IRTLASSERT(FALSE);
  2158. // fall-through
  2159. case LK_MEDIUM_TABLESIZE:
  2160. IRTLASSERT(CLKRLinearHashTable::CMediumSegment::sm_palloc != NULL);
  2161. pseg = new CMediumSegment;
  2162. break;
  2163. case LK_LARGE_TABLESIZE:
  2164. IRTLASSERT(CLKRLinearHashTable::CLargeSegment::sm_palloc != NULL);
  2165. pseg = new CLargeSegment;
  2166. break;
  2167. }
  2168. IRTLASSERT(pseg != NULL);
  2169. if (pseg != NULL && BucketLock::PerLockSpin() == LOCK_INDIVIDUAL_SPIN)
  2170. {
  2171. for (DWORD i = 0; i < m_dwSegSize; ++i)
  2172. pseg->Slot(i).SetSpinCount(m_wBucketLockSpins);
  2173. }
  2174. return pseg;
  2175. } // CLKRLinearHashTable::_AllocateSegment
  2176. //------------------------------------------------------------------------
  2177. // Function: CLKRLinearHashTable::_FreeSegment
  2178. // Synopsis:
  2179. //------------------------------------------------------------------------
  2180. bool
  2181. CLKRLinearHashTable::_FreeSegment(
  2182. CLKRLinearHashTable::CSegment* pseg) const
  2183. {
  2184. delete pseg;
  2185. return true;
  2186. }
  2187. //------------------------------------------------------------------------
  2188. // Function: CLKRHashTable::_AllocateSubTableArray
  2189. // Synopsis:
  2190. //------------------------------------------------------------------------
  2191. CLKRHashTable::SubTable** const
  2192. CLKRHashTable::_AllocateSubTableArray(
  2193. size_t n)
  2194. {
  2195. #ifdef LKR_RANDOM_MEMORY_FAILURES
  2196. if (rand() < LKR_RANDOM_MEMORY_FAILURES)
  2197. return NULL;
  2198. #endif
  2199. return new SubTable* [n];
  2200. }
  2201. //------------------------------------------------------------------------
  2202. // Function: CLKRHashTable::_FreeSubTableArray
  2203. // Synopsis:
  2204. //------------------------------------------------------------------------
  2205. bool
  2206. CLKRHashTable::_FreeSubTableArray(
  2207. CLKRHashTable::SubTable** palht)
  2208. {
  2209. delete [] palht;
  2210. return true;
  2211. }
  2212. //------------------------------------------------------------------------
  2213. // Function: CLKRHashTable::_AllocateSubTable
  2214. // Synopsis:
  2215. //------------------------------------------------------------------------
  2216. CLKRHashTable::SubTable* const
  2217. CLKRHashTable::_AllocateSubTable(
  2218. LPCSTR pszName, // An identifier for debugging
  2219. PFnExtractKey pfnExtractKey, // Extract key from record
  2220. PFnCalcKeyHash pfnCalcKeyHash, // Calculate hash signature of key
  2221. PFnEqualKeys pfnEqualKeys, // Compare two keys
  2222. PFnAddRefRecord pfnAddRefRecord,// AddRef in FindKey, etc
  2223. double maxload, // Upperbound on average chain length
  2224. DWORD initsize, // Initial size of hash table.
  2225. CLKRHashTable* phtParent // Owning table.
  2226. )
  2227. {
  2228. #ifdef LKR_RANDOM_MEMORY_FAILURES
  2229. if (rand() < LKR_RANDOM_MEMORY_FAILURES)
  2230. return NULL;
  2231. #endif
  2232. return new SubTable(pszName, pfnExtractKey, pfnCalcKeyHash,
  2233. pfnEqualKeys, pfnAddRefRecord,
  2234. maxload, initsize, phtParent);
  2235. }
  2236. //------------------------------------------------------------------------
  2237. // Function: CLKRHashTable::_FreeSubTable
  2238. // Synopsis:
  2239. //------------------------------------------------------------------------
  2240. bool
  2241. CLKRHashTable::_FreeSubTable(
  2242. CLKRHashTable::SubTable* plht)
  2243. {
  2244. delete plht;
  2245. return true;
  2246. }
  2247. //-----------------------------------------------------------------------
  2248. // Function: CLKRLinearHashTable::_Expand
  2249. // Synopsis: Expands the table by one bucket. Done by splitting the
  2250. // bucket pointed to by m_iExpansionIdx.
  2251. // Output: LK_SUCCESS, if expansion was successful.
  2252. // LK_ALLOC_FAIL, if expansion failed due to lack of memory.
  2253. //-----------------------------------------------------------------------
  2254. LK_RETCODE
  2255. CLKRLinearHashTable::_Expand()
  2256. {
  2257. if (m_cActiveBuckets >= MAX_DIRSIZE * m_dwSegSize - 1)
  2258. return LK_ALLOC_FAIL; // table is not allowed to grow any more
  2259. WriteLock();
  2260. // double segment directory size if necessary
  2261. if (m_cActiveBuckets >= m_cDirSegs * m_dwSegSize)
  2262. {
  2263. IRTLASSERT(m_cDirSegs < MAX_DIRSIZE);
  2264. DWORD cDirSegsNew = (m_cDirSegs == 0) ? MIN_DIRSIZE : m_cDirSegs << 1;
  2265. CDirEntry* paDirSegsNew = _AllocateSegmentDirectory(cDirSegsNew);
  2266. if (paDirSegsNew != NULL)
  2267. {
  2268. for (DWORD j = 0; j < m_cDirSegs; j++)
  2269. {
  2270. paDirSegsNew[j] = m_paDirSegs[j];
  2271. m_paDirSegs[j].m_pseg = NULL;
  2272. }
  2273. _FreeSegmentDirectory(m_paDirSegs);
  2274. m_paDirSegs = paDirSegsNew;
  2275. m_cDirSegs = cDirSegsNew;
  2276. }
  2277. else
  2278. {
  2279. WriteUnlock();
  2280. return LK_ALLOC_FAIL; // expansion failed
  2281. }
  2282. }
  2283. // locate the new bucket, creating a new segment if necessary
  2284. ++m_cActiveBuckets;
  2285. DWORD dwOldBkt = m_iExpansionIdx;
  2286. DWORD dwNewBkt = (1 << m_nLevel) | dwOldBkt;
  2287. IRTLASSERT(dwOldBkt < m_cActiveBuckets);
  2288. IRTLASSERT(dwNewBkt < m_cActiveBuckets);
  2289. CSegment* psegNew = _Segment(dwNewBkt);
  2290. if (psegNew == NULL)
  2291. {
  2292. psegNew = _AllocateSegment();
  2293. if (psegNew == NULL)
  2294. {
  2295. --m_cActiveBuckets;
  2296. WriteUnlock();
  2297. return LK_ALLOC_FAIL; // expansion failed
  2298. }
  2299. _Segment(dwNewBkt) = psegNew;
  2300. }
  2301. // prepare to relocate records to the new bucket
  2302. CBucket* pbktOld = _Bucket(dwOldBkt);
  2303. CBucket* pbktNew = _Bucket(dwNewBkt);
  2304. // get locks on the two buckets involved
  2305. pbktOld->WriteLock();
  2306. pbktNew->WriteLock();
  2307. // Now work out if we need to allocate any extra CNodeClumps. We do
  2308. // this up front, before calling _SplitRecordSet, as it's hard to
  2309. // gracefully recover from the depths of that routine should we run
  2310. // out of memory.
  2311. CNodeClump* pncFreeList = NULL;
  2312. LK_RETCODE lkrc = LK_SUCCESS;
  2313. // If the old bucket has more than one CNodeClump, there's a chance that
  2314. // we'll need extra CNodeClumps in the new bucket too. If it doesn't,
  2315. // we definitely won't. One CNodeClump is enough to prime the freelist.
  2316. if (!pbktOld->m_ncFirst.IsLastClump())
  2317. {
  2318. pncFreeList = _AllocateNodeClump();
  2319. if (pncFreeList == NULL)
  2320. {
  2321. lkrc = LK_ALLOC_FAIL;
  2322. --m_cActiveBuckets;
  2323. }
  2324. }
  2325. // adjust expansion pointer, level, and mask
  2326. if (lkrc == LK_SUCCESS)
  2327. {
  2328. if (++m_iExpansionIdx == (1U << m_nLevel))
  2329. {
  2330. ++m_nLevel;
  2331. m_iExpansionIdx = 0;
  2332. m_dwBktAddrMask = (m_dwBktAddrMask << 1) | 1;
  2333. // m_dwBktAddrMask = 00011..111
  2334. IRTLASSERT((m_dwBktAddrMask & (m_dwBktAddrMask+1)) == 0);
  2335. #ifdef LKR_MASK
  2336. m_dwBktAddrMask1 = (m_dwBktAddrMask << 1) | 1;
  2337. IRTLASSERT((m_dwBktAddrMask1 & (m_dwBktAddrMask1+1)) == 0);
  2338. #endif // LKR_MASK
  2339. }
  2340. }
  2341. DWORD iExpansionIdx = m_iExpansionIdx; // save to avoid race conditions
  2342. DWORD dwBktAddrMask = m_dwBktAddrMask; // ditto
  2343. // Release the table lock before doing the actual relocation
  2344. WriteUnlock();
  2345. if (lkrc == LK_SUCCESS)
  2346. {
  2347. lkrc = _SplitRecordSet(&pbktOld->m_ncFirst, &pbktNew->m_ncFirst,
  2348. iExpansionIdx, dwBktAddrMask,
  2349. dwNewBkt, pncFreeList);
  2350. }
  2351. pbktNew->WriteUnlock();
  2352. pbktOld->WriteUnlock();
  2353. return lkrc;
  2354. } // CLKRLinearHashTable::_Expand
  2355. //------------------------------------------------------------------------
  2356. // Function: CLKRLinearHashTable::_SplitRecordSet
  2357. // Synopsis: Split records between the old and new buckets.
  2358. //------------------------------------------------------------------------
  2359. LK_RETCODE
  2360. CLKRLinearHashTable::_SplitRecordSet(
  2361. CNodeClump* pncOldTarget,
  2362. CNodeClump* pncNewTarget,
  2363. DWORD iExpansionIdx,
  2364. DWORD dwBktAddrMask,
  2365. DWORD dwNewBkt,
  2366. CNodeClump* pncFreeList // list of free nodes available for reuse
  2367. )
  2368. {
  2369. CNodeClump ncFirst = *pncOldTarget; // save head of old target chain
  2370. CNodeClump* pncOldList = &ncFirst;
  2371. CNodeClump* pncTmp;
  2372. int iOldSlot = 0;
  2373. int iNewSlot = 0;
  2374. // clear target buckets
  2375. pncOldTarget->Clear();
  2376. pncNewTarget->Clear();
  2377. // scan through the old bucket chain and decide where to move each record
  2378. while (pncOldList != NULL)
  2379. {
  2380. for (DWORD i = 0; i < NODES_PER_CLUMP; i++)
  2381. {
  2382. // node already empty?
  2383. if (pncOldList->IsEmptyNode(i))
  2384. {
  2385. IRTLASSERT(pncOldList->InvalidSignature(i));
  2386. continue;
  2387. }
  2388. // calculate bucket address of this node
  2389. DWORD dwBkt = _H0(pncOldList->m_dwKeySigs[i], dwBktAddrMask);
  2390. if (dwBkt < iExpansionIdx)
  2391. dwBkt = _H1(pncOldList->m_dwKeySigs[i], dwBktAddrMask);
  2392. // record to be moved to the new address?
  2393. if (dwBkt == dwNewBkt)
  2394. {
  2395. // node in new bucket chain full?
  2396. if (iNewSlot == NODES_PER_CLUMP)
  2397. {
  2398. // the calling routine has passed in a FreeList adequate
  2399. // for all needs
  2400. IRTLASSERT(pncFreeList != NULL);
  2401. pncTmp = pncFreeList;
  2402. pncFreeList = pncFreeList->m_pncNext;
  2403. pncTmp->Clear();
  2404. pncNewTarget->m_pncNext = pncTmp;
  2405. pncNewTarget = pncTmp;
  2406. iNewSlot = 0;
  2407. }
  2408. pncNewTarget->m_dwKeySigs[iNewSlot]
  2409. = pncOldList->m_dwKeySigs[i];
  2410. pncNewTarget->m_pvNode[iNewSlot]
  2411. = pncOldList->m_pvNode[i];
  2412. ++iNewSlot;
  2413. }
  2414. // no, record stays in its current bucket chain
  2415. else
  2416. {
  2417. // node in old bucket chain full?
  2418. if (iOldSlot == NODES_PER_CLUMP)
  2419. {
  2420. // the calling routine has passed in a FreeList adequate
  2421. // for all needs
  2422. IRTLASSERT(pncFreeList != NULL);
  2423. pncTmp = pncFreeList;
  2424. pncFreeList = pncFreeList->m_pncNext;
  2425. pncTmp->Clear();
  2426. pncOldTarget->m_pncNext = pncTmp;
  2427. pncOldTarget = pncTmp;
  2428. iOldSlot = 0;
  2429. }
  2430. pncOldTarget->m_dwKeySigs[iOldSlot]
  2431. = pncOldList->m_dwKeySigs[i];
  2432. pncOldTarget->m_pvNode[iOldSlot]
  2433. = pncOldList->m_pvNode[i];
  2434. ++iOldSlot;
  2435. }
  2436. // clear old slot
  2437. pncOldList->m_dwKeySigs[i] = HASH_INVALID_SIGNATURE;
  2438. pncOldList->m_pvNode[i] = NULL;
  2439. }
  2440. // keep walking down the original bucket chain
  2441. pncTmp = pncOldList;
  2442. pncOldList = pncOldList->m_pncNext;
  2443. // ncFirst is a stack variable, not allocated on the heap
  2444. if (pncTmp != &ncFirst)
  2445. {
  2446. pncTmp->m_pncNext = pncFreeList;
  2447. pncFreeList = pncTmp;
  2448. }
  2449. }
  2450. // delete any leftover nodes
  2451. while (pncFreeList != NULL)
  2452. {
  2453. pncTmp = pncFreeList;
  2454. pncFreeList = pncFreeList->m_pncNext;
  2455. #ifdef _DEBUG
  2456. pncTmp->m_pncNext = NULL; // or ~CNodeClump will ASSERT
  2457. #endif // _DEBUG
  2458. _FreeNodeClump(pncTmp);
  2459. }
  2460. #ifdef _DEBUG
  2461. ncFirst.m_pncNext = NULL; // or ~CNodeClump will ASSERT
  2462. #endif // _DEBUG
  2463. return LK_SUCCESS;
  2464. } // CLKRLinearHashTable::_SplitRecordSet
  2465. //------------------------------------------------------------------------
  2466. // Function: CLKRLinearHashTable::_Contract
  2467. // Synopsis: Contract the table by deleting the last bucket in the active
  2468. // address space. Return the records to the "buddy" of the
  2469. // deleted bucket.
  2470. //------------------------------------------------------------------------
  2471. LK_RETCODE
  2472. CLKRLinearHashTable::_Contract()
  2473. {
  2474. WriteLock();
  2475. // update the state variables (expansion ptr, level and mask)
  2476. if (m_iExpansionIdx > 0)
  2477. --m_iExpansionIdx;
  2478. else
  2479. {
  2480. --m_nLevel;
  2481. m_iExpansionIdx = (1 << m_nLevel) - 1;
  2482. IRTLASSERT(m_nLevel > 0 && m_iExpansionIdx > 0);
  2483. m_dwBktAddrMask >>= 1;
  2484. IRTLASSERT((m_dwBktAddrMask & (m_dwBktAddrMask+1)) == 0); // 00011..111
  2485. #ifdef LKR_MASK
  2486. m_dwBktAddrMask1 >>= 1;
  2487. IRTLASSERT(m_dwBktAddrMask1 == ((m_dwBktAddrMask << 1) | 1));
  2488. IRTLASSERT((m_dwBktAddrMask1 & (m_dwBktAddrMask1+1)) == 0);
  2489. #endif // LKR_MASK
  2490. }
  2491. // The last bucket is the one that will be emptied
  2492. CBucket* pbktLast = _Bucket(m_cActiveBuckets - 1);
  2493. pbktLast->WriteLock();
  2494. // Decrement after calculating pbktLast, or _Bucket() will assert.
  2495. --m_cActiveBuckets;
  2496. // Where the nodes from pbktLast will end up
  2497. CBucket* pbktNew = _Bucket(m_iExpansionIdx);
  2498. pbktNew->WriteLock();
  2499. // Now we work out if we need to allocate any extra CNodeClumps. We do
  2500. // this up front, before calling _MergeRecordSets, as it's hard to
  2501. // gracefully recover from the depths of that routine should we run
  2502. // out of memory.
  2503. CNodeClump* pnc;
  2504. int c = 0;
  2505. // First, count the number of items in the old bucket
  2506. for (pnc = &pbktLast->m_ncFirst; pnc != NULL; pnc = pnc->m_pncNext)
  2507. {
  2508. for (DWORD i = 0; i < NODES_PER_CLUMP; i++)
  2509. {
  2510. if (!pnc->IsEmptyNode(i))
  2511. {
  2512. IRTLASSERT(!pnc->InvalidSignature(i));
  2513. c++;
  2514. }
  2515. }
  2516. }
  2517. // Then, subtract off the number of empty slots in the new bucket
  2518. for (pnc = &pbktNew->m_ncFirst; pnc != NULL; pnc = pnc->m_pncNext)
  2519. {
  2520. for (DWORD i = 0; i < NODES_PER_CLUMP; i++)
  2521. {
  2522. if (pnc->IsEmptyNode(i))
  2523. {
  2524. IRTLASSERT(pnc->InvalidSignature(i));
  2525. c--;
  2526. }
  2527. }
  2528. }
  2529. CNodeClump* pncFreeList = NULL; // list of nodes available for reuse
  2530. LK_RETCODE lkrc = LK_SUCCESS;
  2531. // Do we need to allocate CNodeClumps to accommodate the surplus items?
  2532. if (c > 0)
  2533. {
  2534. pncFreeList = _AllocateNodeClump();
  2535. if (pncFreeList == NULL)
  2536. lkrc = LK_ALLOC_FAIL;
  2537. else if (c > NODES_PER_CLUMP)
  2538. {
  2539. // In the worst case, we need a 2-element freelist for
  2540. // _MergeRecordSets. Two CNodeClumps always suffice since the
  2541. // freelist will be augmented by the CNodeClumps from the old
  2542. // bucket as they are processed.
  2543. pnc = _AllocateNodeClump();
  2544. if (pnc == NULL)
  2545. {
  2546. _FreeNodeClump(pncFreeList);
  2547. lkrc = LK_ALLOC_FAIL;
  2548. }
  2549. else
  2550. pncFreeList->m_pncNext = pnc;
  2551. }
  2552. }
  2553. // Abort if we couldn't allocate enough CNodeClumps
  2554. if (lkrc != LK_SUCCESS)
  2555. {
  2556. // undo the changes to the state variables
  2557. if (++m_iExpansionIdx == (1U << m_nLevel))
  2558. {
  2559. ++m_nLevel;
  2560. m_iExpansionIdx = 0;
  2561. m_dwBktAddrMask = (m_dwBktAddrMask << 1) | 1;
  2562. #ifdef LKR_MASK
  2563. m_dwBktAddrMask1 = (m_dwBktAddrMask << 1) | 1;
  2564. #endif // LKR_MASK
  2565. }
  2566. ++m_cActiveBuckets;
  2567. // Unlock the buckets and the table
  2568. pbktLast->WriteUnlock();
  2569. pbktNew->WriteUnlock();
  2570. WriteUnlock();
  2571. return lkrc;
  2572. }
  2573. // Copy the chain of records from pbktLast
  2574. CNodeClump ncOldFirst = pbktLast->m_ncFirst;
  2575. // destroy pbktLast
  2576. pbktLast->m_ncFirst.Clear();
  2577. pbktLast->WriteUnlock();
  2578. // remove segment, if empty
  2579. if (_SegIndex(m_cActiveBuckets) == 0)
  2580. {
  2581. #ifdef _DEBUG
  2582. // double-check that the supposedly empty segment is really empty
  2583. IRTLASSERT(_Segment(m_cActiveBuckets) != NULL);
  2584. for (DWORD i = 0; i < m_dwSegSize; ++i)
  2585. {
  2586. CBucket* pbkt = &_Segment(m_cActiveBuckets)->Slot(i);
  2587. IRTLASSERT(pbkt->IsWriteUnlocked() && pbkt->IsReadUnlocked());
  2588. IRTLASSERT(pbkt->m_ncFirst.IsLastClump());
  2589. for (DWORD j = 0; j < NODES_PER_CLUMP; ++j)
  2590. {
  2591. IRTLASSERT(pbkt->m_ncFirst.InvalidSignature(j)
  2592. && pbkt->m_ncFirst.IsEmptyNode(j));
  2593. }
  2594. }
  2595. #endif
  2596. _FreeSegment(_Segment(m_cActiveBuckets));
  2597. _Segment(m_cActiveBuckets) = NULL;
  2598. }
  2599. // reduce directory of segments if possible
  2600. if (m_cActiveBuckets <= ((m_cDirSegs * m_dwSegSize) >> 1)
  2601. && m_cDirSegs > MIN_DIRSIZE)
  2602. {
  2603. DWORD cDirSegsNew = m_cDirSegs >> 1;
  2604. CDirEntry* paDirSegsNew = _AllocateSegmentDirectory(cDirSegsNew);
  2605. // Memory allocation failure here does not require us to abort; it
  2606. // just means that the directory of segments is larger than we'd like.
  2607. if (paDirSegsNew != NULL)
  2608. {
  2609. for (DWORD j = 0; j < cDirSegsNew; j++)
  2610. paDirSegsNew[j] = m_paDirSegs[j];
  2611. for (j = 0; j < m_cDirSegs; j++)
  2612. m_paDirSegs[j].m_pseg = NULL;
  2613. _FreeSegmentDirectory(m_paDirSegs);
  2614. m_paDirSegs = paDirSegsNew;
  2615. m_cDirSegs = cDirSegsNew;
  2616. }
  2617. }
  2618. // release the table lock before doing the reorg
  2619. WriteUnlock();
  2620. lkrc = _MergeRecordSets(pbktNew, &ncOldFirst, pncFreeList);
  2621. pbktNew->WriteUnlock();
  2622. #ifdef _DEBUG
  2623. ncOldFirst.m_pncNext = NULL; // or ~CNodeClump will ASSERT
  2624. #endif // _DEBUG
  2625. return lkrc;
  2626. } // CLKRLinearHashTable::_Contract
  2627. //------------------------------------------------------------------------
  2628. // Function: CLKRLinearHashTable::_MergeRecordSets
  2629. // Synopsis: Merge two record sets. Copy the contents of pncOldList
  2630. // into pbktNewTarget.
  2631. //------------------------------------------------------------------------
  2632. LK_RETCODE
  2633. CLKRLinearHashTable::_MergeRecordSets(
  2634. CBucket* pbktNewTarget,
  2635. CNodeClump* pncOldList,
  2636. CNodeClump* pncFreeList
  2637. )
  2638. {
  2639. IRTLASSERT(pbktNewTarget != NULL && pncOldList != NULL);
  2640. CNodeClump* pncTmp = NULL;
  2641. CNodeClump* const pncOldFirst = pncOldList;
  2642. CNodeClump* pncNewTarget = &pbktNewTarget->m_ncFirst;
  2643. DWORD iNewSlot;
  2644. // find the first nodeclump in the new target bucket with an empty slot
  2645. while (!pncNewTarget->IsLastClump())
  2646. {
  2647. for (iNewSlot = 0; iNewSlot < NODES_PER_CLUMP; iNewSlot++)
  2648. if (pncNewTarget->IsEmptyNode(iNewSlot))
  2649. break;
  2650. if (iNewSlot == NODES_PER_CLUMP)
  2651. pncNewTarget = pncNewTarget->m_pncNext;
  2652. else
  2653. break;
  2654. }
  2655. IRTLASSERT(pncNewTarget != NULL);
  2656. // find the first empty slot in pncNewTarget;
  2657. // if none, iNewSlot == NODES_PER_CLUMP
  2658. for (iNewSlot = 0; iNewSlot < NODES_PER_CLUMP; iNewSlot++)
  2659. {
  2660. if (pncNewTarget->IsEmptyNode(iNewSlot))
  2661. {
  2662. break;
  2663. }
  2664. }
  2665. while (pncOldList != NULL)
  2666. {
  2667. for (DWORD i = 0; i < NODES_PER_CLUMP; i++)
  2668. {
  2669. if (!pncOldList->IsEmptyNode(i))
  2670. {
  2671. // any empty slots left in pncNewTarget?
  2672. if (iNewSlot == NODES_PER_CLUMP)
  2673. {
  2674. // no, so walk down pncNewTarget until we find another
  2675. // emptry slot
  2676. while (!pncNewTarget->IsLastClump())
  2677. {
  2678. pncNewTarget = pncNewTarget->m_pncNext;
  2679. for (iNewSlot = 0;
  2680. iNewSlot < NODES_PER_CLUMP;
  2681. iNewSlot++)
  2682. {
  2683. if (pncNewTarget->IsEmptyNode(iNewSlot))
  2684. goto found_slot;
  2685. }
  2686. }
  2687. // Oops, reached the last nodeclump in pncNewTarget
  2688. // and it's full. Get a new nodeclump off the free
  2689. // list, which is big enough to handle all needs.
  2690. IRTLASSERT(pncNewTarget != NULL);
  2691. IRTLASSERT(pncFreeList != NULL);
  2692. pncTmp = pncFreeList;
  2693. pncFreeList = pncFreeList->m_pncNext;
  2694. pncTmp->Clear();
  2695. pncNewTarget->m_pncNext = pncTmp;
  2696. pncNewTarget = pncTmp;
  2697. iNewSlot = 0;
  2698. }
  2699. found_slot:
  2700. // We have an empty slot in pncNewTarget
  2701. IRTLASSERT(iNewSlot < NODES_PER_CLUMP
  2702. && pncNewTarget != NULL
  2703. && pncNewTarget->IsEmptyNode(iNewSlot)
  2704. && pncNewTarget->InvalidSignature(iNewSlot));
  2705. // Let's copy the node from pncOldList
  2706. pncNewTarget->m_dwKeySigs[iNewSlot]
  2707. = pncOldList->m_dwKeySigs[i];
  2708. pncNewTarget->m_pvNode[iNewSlot]
  2709. = pncOldList->m_pvNode[i];
  2710. // Clear old slot
  2711. pncOldList->m_dwKeySigs[i] = HASH_INVALID_SIGNATURE;
  2712. pncOldList->m_pvNode[i] = NULL;
  2713. // find the next free slot in pncNewTarget
  2714. while (++iNewSlot < NODES_PER_CLUMP)
  2715. {
  2716. if (pncNewTarget->IsEmptyNode(iNewSlot))
  2717. {
  2718. break;
  2719. }
  2720. }
  2721. }
  2722. else // iNewSlot != NODES_PER_CLUMP
  2723. {
  2724. IRTLASSERT(pncOldList->InvalidSignature(i));
  2725. }
  2726. }
  2727. // Move into the next nodeclump in pncOldList
  2728. pncTmp = pncOldList;
  2729. pncOldList = pncOldList->m_pncNext;
  2730. // Append to the free list. Don't put the first node of
  2731. // pncOldList on the free list, as it's a stack variable.
  2732. if (pncTmp != pncOldFirst)
  2733. {
  2734. pncTmp->m_pncNext = pncFreeList;
  2735. pncFreeList = pncTmp;
  2736. }
  2737. }
  2738. // delete any leftover nodes
  2739. while (pncFreeList != NULL)
  2740. {
  2741. pncTmp = pncFreeList;
  2742. pncFreeList = pncFreeList->m_pncNext;
  2743. #ifdef _DEBUG
  2744. pncTmp->m_pncNext = NULL; // or ~CNodeClump will ASSERT
  2745. #endif // _DEBUG
  2746. _FreeNodeClump(pncTmp);
  2747. }
  2748. return LK_SUCCESS;
  2749. } // CLKRLinearHashTable::_MergeRecordSets
  2750. //------------------------------------------------------------------------
  2751. // Function: CLKRLinearHashTable::_InitializeIterator
  2752. // Synopsis: Make the iterator point to the first record in the hash table.
  2753. //------------------------------------------------------------------------
  2754. LK_RETCODE
  2755. CLKRLinearHashTable::_InitializeIterator(
  2756. CIterator* piter)
  2757. {
  2758. if (!IsUsable())
  2759. return LK_UNUSABLE;
  2760. IRTLASSERT(piter != NULL);
  2761. IRTLASSERT(piter->m_lkl == LKL_WRITELOCK
  2762. ? IsWriteLocked()
  2763. : IsReadLocked());
  2764. if (piter == NULL || piter->m_plht != NULL)
  2765. return LK_BAD_ITERATOR;
  2766. piter->m_plht = this;
  2767. piter->m_dwBucketAddr = 0;
  2768. CBucket* pbkt = _Bucket(piter->m_dwBucketAddr);
  2769. IRTLASSERT(pbkt != NULL);
  2770. if (piter->m_lkl == LKL_WRITELOCK)
  2771. pbkt->WriteLock();
  2772. else
  2773. pbkt->ReadLock();
  2774. piter->m_pnc = &pbkt->m_ncFirst;
  2775. piter->m_iNode = -1;
  2776. // Let IncrementIterator do the hard work of finding the first
  2777. // slot in use.
  2778. return IncrementIterator(piter);
  2779. } // CLKRLinearHashTable::_InitializeIterator
  2780. //------------------------------------------------------------------------
  2781. // Function: CLKRHashTable::InitializeIterator
  2782. // Synopsis: make the iterator point to the first record in the hash table
  2783. //------------------------------------------------------------------------
  2784. LK_RETCODE
  2785. CLKRHashTable::InitializeIterator(
  2786. CIterator* piter)
  2787. {
  2788. if (!IsUsable())
  2789. return LK_UNUSABLE;
  2790. IRTLASSERT(piter != NULL && piter->m_pht == NULL);
  2791. if (piter == NULL || piter->m_pht != NULL)
  2792. return LK_BAD_ITERATOR;
  2793. // First, lock all the subtables
  2794. if (piter->m_lkl == LKL_WRITELOCK)
  2795. WriteLock();
  2796. else
  2797. ReadLock();
  2798. // Must call IsValid inside a lock to ensure that none of the state
  2799. // variables change while it's being evaluated
  2800. IRTLASSERT(IsValid());
  2801. if (!IsValid())
  2802. return LK_UNUSABLE;
  2803. piter->m_pht = this;
  2804. piter->m_ist = -1;
  2805. piter->m_plht = NULL;
  2806. // Let IncrementIterator do the hard work of finding the first
  2807. // valid node in the subtables.
  2808. return IncrementIterator(piter);
  2809. } // CLKRHashTable::InitializeIterator
  2810. //------------------------------------------------------------------------
  2811. // Function: CLKRLinearHashTable::IncrementIterator
  2812. // Synopsis: move the iterator on to the next record in the hash table
  2813. //------------------------------------------------------------------------
  2814. LK_RETCODE
  2815. CLKRLinearHashTable::IncrementIterator(
  2816. CIterator* piter)
  2817. {
  2818. if (!IsUsable())
  2819. return LK_UNUSABLE;
  2820. IRTLASSERT(piter != NULL);
  2821. IRTLASSERT(piter->m_plht == this);
  2822. IRTLASSERT(piter->m_lkl == LKL_WRITELOCK
  2823. ? IsWriteLocked()
  2824. : IsReadLocked());
  2825. IRTLASSERT(piter->m_dwBucketAddr < m_cActiveBuckets);
  2826. IRTLASSERT(piter->m_pnc != NULL);
  2827. IRTLASSERT(-1 <= piter->m_iNode && piter->m_iNode < NODES_PER_CLUMP);
  2828. if (piter == NULL || piter->m_plht != this)
  2829. return LK_BAD_ITERATOR;
  2830. const void* pvRecord = NULL;
  2831. if (piter->m_iNode >= 0)
  2832. {
  2833. // Release the reference acquired in the previous call to
  2834. // IncrementIterator
  2835. pvRecord = piter->m_pnc->m_pvNode[piter->m_iNode];
  2836. _AddRefRecord(pvRecord, -1);
  2837. }
  2838. do
  2839. {
  2840. do
  2841. {
  2842. // find the next slot in the nodeclump that's in use
  2843. while (++piter->m_iNode < NODES_PER_CLUMP)
  2844. {
  2845. pvRecord = piter->m_pnc->m_pvNode[piter->m_iNode];
  2846. if (pvRecord != NULL)
  2847. {
  2848. // Add a new reference
  2849. _AddRefRecord(pvRecord, +1);
  2850. return LK_SUCCESS;
  2851. }
  2852. }
  2853. // try the next nodeclump in the bucket chain
  2854. piter->m_iNode = -1;
  2855. piter->m_pnc = piter->m_pnc->m_pncNext;
  2856. } while (piter->m_pnc != NULL);
  2857. // Exhausted this bucket chain. Unlock it.
  2858. CBucket* pbkt = _Bucket(piter->m_dwBucketAddr);
  2859. IRTLASSERT(pbkt != NULL);
  2860. IRTLASSERT(piter->m_lkl == LKL_WRITELOCK
  2861. ? pbkt->IsWriteLocked()
  2862. : pbkt->IsReadLocked());
  2863. if (piter->m_lkl == LKL_WRITELOCK)
  2864. pbkt->WriteUnlock();
  2865. else
  2866. pbkt->ReadUnlock();
  2867. // Try the next bucket, if there is one
  2868. if (++piter->m_dwBucketAddr < m_cActiveBuckets)
  2869. {
  2870. pbkt = _Bucket(piter->m_dwBucketAddr);
  2871. IRTLASSERT(pbkt != NULL);
  2872. if (piter->m_lkl == LKL_WRITELOCK)
  2873. pbkt->WriteLock();
  2874. else
  2875. pbkt->ReadLock();
  2876. piter->m_pnc = &pbkt->m_ncFirst;
  2877. }
  2878. } while (piter->m_dwBucketAddr < m_cActiveBuckets);
  2879. // We have fallen off the end of the hashtable
  2880. piter->m_iNode = -1;
  2881. piter->m_pnc = NULL;
  2882. return LK_NO_MORE_ELEMENTS;
  2883. } // CLKRLinearHashTable::IncrementIterator
  2884. //------------------------------------------------------------------------
  2885. // Function: CLKRHashTable::IncrementIterator
  2886. // Synopsis: move the iterator on to the next record in the hash table
  2887. //------------------------------------------------------------------------
  2888. LK_RETCODE
  2889. CLKRHashTable::IncrementIterator(
  2890. CIterator* piter)
  2891. {
  2892. if (!IsUsable())
  2893. return LK_UNUSABLE;
  2894. IRTLASSERT(piter != NULL);
  2895. IRTLASSERT(piter->m_pht == this);
  2896. IRTLASSERT(-1 <= piter->m_ist
  2897. && piter->m_ist < static_cast<int>(m_cSubTables));
  2898. if (piter == NULL || piter->m_pht != this)
  2899. return LK_BAD_ITERATOR;
  2900. // Table is already locked
  2901. if (!IsValid())
  2902. return LK_UNUSABLE;
  2903. LK_RETCODE lkrc;
  2904. CLHTIterator* pBaseIter = static_cast<CLHTIterator*>(piter);
  2905. for (;;)
  2906. {
  2907. // Do we have a valid iterator into a subtable? If not, get one.
  2908. while (piter->m_plht == NULL)
  2909. {
  2910. while (++piter->m_ist < static_cast<int>(m_cSubTables))
  2911. {
  2912. lkrc = m_palhtDir[piter->m_ist]->_InitializeIterator(piter);
  2913. if (lkrc == LK_SUCCESS)
  2914. {
  2915. IRTLASSERT(m_palhtDir[piter->m_ist] == piter->m_plht);
  2916. return lkrc;
  2917. }
  2918. else if (lkrc == LK_NO_MORE_ELEMENTS)
  2919. lkrc = piter->m_plht->_CloseIterator(pBaseIter);
  2920. if (lkrc != LK_SUCCESS)
  2921. return lkrc;
  2922. }
  2923. // There are no more subtables left.
  2924. return LK_NO_MORE_ELEMENTS;
  2925. }
  2926. // We already have a valid iterator into a subtable. Increment it.
  2927. lkrc = piter->m_plht->IncrementIterator(pBaseIter);
  2928. if (lkrc == LK_SUCCESS)
  2929. return lkrc;
  2930. // We've exhausted that subtable. Move on.
  2931. if (lkrc == LK_NO_MORE_ELEMENTS)
  2932. lkrc = piter->m_plht->_CloseIterator(pBaseIter);
  2933. if (lkrc != LK_SUCCESS)
  2934. return lkrc;
  2935. }
  2936. } // CLKRHashTable::IncrementIterator
  2937. //------------------------------------------------------------------------
  2938. // Function: CLKRLinearHashTable::_CloseIterator
  2939. // Synopsis: release the resources held by the iterator
  2940. //------------------------------------------------------------------------
  2941. LK_RETCODE
  2942. CLKRLinearHashTable::_CloseIterator(
  2943. CIterator* piter)
  2944. {
  2945. if (!IsUsable())
  2946. return LK_UNUSABLE;
  2947. IRTLASSERT(piter != NULL);
  2948. IRTLASSERT(piter->m_plht == this);
  2949. IRTLASSERT(piter->m_lkl == LKL_WRITELOCK
  2950. ? IsWriteLocked()
  2951. : IsReadLocked());
  2952. IRTLASSERT(piter->m_dwBucketAddr <= m_cActiveBuckets);
  2953. IRTLASSERT(-1 <= piter->m_iNode && piter->m_iNode < NODES_PER_CLUMP);
  2954. if (piter == NULL || piter->m_plht != this)
  2955. return LK_BAD_ITERATOR;
  2956. // Are we abandoning the iterator before the end of the table?
  2957. // If so, need to unlock the bucket.
  2958. if (piter->m_dwBucketAddr < m_cActiveBuckets)
  2959. {
  2960. CBucket* pbkt = _Bucket(piter->m_dwBucketAddr);
  2961. IRTLASSERT(pbkt != NULL);
  2962. IRTLASSERT(piter->m_lkl == LKL_WRITELOCK
  2963. ? pbkt->IsWriteLocked()
  2964. : pbkt->IsReadLocked());
  2965. if (0 <= piter->m_iNode && piter->m_iNode < NODES_PER_CLUMP)
  2966. {
  2967. IRTLASSERT(piter->m_pnc != NULL);
  2968. const void* pvRecord = piter->m_pnc->m_pvNode[piter->m_iNode];
  2969. _AddRefRecord(pvRecord, -1);
  2970. }
  2971. if (piter->m_lkl == LKL_WRITELOCK)
  2972. pbkt->WriteUnlock();
  2973. else
  2974. pbkt->ReadUnlock();
  2975. }
  2976. piter->m_plht = NULL;
  2977. piter->m_pnc = NULL;
  2978. return LK_SUCCESS;
  2979. } // CLKRLinearHashTable::_CloseIterator
  2980. //------------------------------------------------------------------------
  2981. // Function: CLKRHashTable::CloseIterator
  2982. // Synopsis: release the resources held by the iterator
  2983. //------------------------------------------------------------------------
  2984. LK_RETCODE
  2985. CLKRHashTable::CloseIterator(
  2986. CIterator* piter)
  2987. {
  2988. if (!IsUsable())
  2989. return LK_UNUSABLE;
  2990. IRTLASSERT(piter != NULL);
  2991. IRTLASSERT(piter->m_pht == this);
  2992. IRTLASSERT(-1 <= piter->m_ist
  2993. && piter->m_ist <= static_cast<int>(m_cSubTables));
  2994. if (piter == NULL || piter->m_pht != this)
  2995. return LK_BAD_ITERATOR;
  2996. LK_RETCODE lkrc = LK_SUCCESS;
  2997. if (!IsValid())
  2998. lkrc = LK_UNUSABLE;
  2999. else
  3000. {
  3001. // Are we abandoning the iterator before we've reached the end?
  3002. // If so, close the subtable iterator.
  3003. if (piter->m_plht != NULL)
  3004. {
  3005. IRTLASSERT(piter->m_ist < static_cast<int>(m_cSubTables));
  3006. CLHTIterator* pBaseIter = static_cast<CLHTIterator*>(piter);
  3007. piter->m_plht->_CloseIterator(pBaseIter);
  3008. }
  3009. }
  3010. // Unlock all the subtables
  3011. if (piter->m_lkl == LKL_WRITELOCK)
  3012. WriteUnlock();
  3013. else
  3014. ReadUnlock();
  3015. piter->m_plht = NULL;
  3016. piter->m_pht = NULL;
  3017. piter->m_ist = -1;
  3018. return lkrc;
  3019. } // CLKRHashTable::CloseIterator
  3020. //------------------------------------------------------------------------
  3021. // Function: CLKRHashTable::WriteLock
  3022. // Synopsis: Lock all subtables for writing
  3023. //------------------------------------------------------------------------
  3024. void
  3025. CLKRHashTable::WriteLock()
  3026. {
  3027. for (DWORD i = 0; i < m_cSubTables; i++)
  3028. {
  3029. m_palhtDir[i]->WriteLock();
  3030. IRTLASSERT(m_palhtDir[i]->IsWriteLocked());
  3031. }
  3032. } // CLKRHashTable::WriteLock
  3033. //------------------------------------------------------------------------
  3034. // Function: CLKRHashTable::ReadLock
  3035. // Synopsis: Lock all subtables for reading
  3036. //------------------------------------------------------------------------
  3037. void
  3038. CLKRHashTable::ReadLock() const
  3039. {
  3040. for (DWORD i = 0; i < m_cSubTables; i++)
  3041. {
  3042. m_palhtDir[i]->ReadLock();
  3043. IRTLASSERT(m_palhtDir[i]->IsReadLocked());
  3044. }
  3045. } // CLKRHashTable::ReadLock
  3046. //------------------------------------------------------------------------
  3047. // Function: CLKRHashTable::WriteUnlock
  3048. // Synopsis: Unlock all subtables
  3049. //------------------------------------------------------------------------
  3050. void
  3051. CLKRHashTable::WriteUnlock() const
  3052. {
  3053. // unlock in reverse order: LIFO
  3054. for (DWORD i = m_cSubTables; i-- > 0; )
  3055. {
  3056. IRTLASSERT(m_palhtDir[i]->IsWriteLocked());
  3057. m_palhtDir[i]->WriteUnlock();
  3058. IRTLASSERT(m_palhtDir[i]->IsWriteUnlocked());
  3059. }
  3060. } // CLKRHashTable::WriteUnlock
  3061. //------------------------------------------------------------------------
  3062. // Function: CLKRHashTable::ReadUnlock
  3063. // Synopsis: Unlock all subtables
  3064. //------------------------------------------------------------------------
  3065. void
  3066. CLKRHashTable::ReadUnlock() const
  3067. {
  3068. // unlock in reverse order: LIFO
  3069. for (DWORD i = m_cSubTables; i-- > 0; )
  3070. {
  3071. IRTLASSERT(m_palhtDir[i]->IsReadLocked());
  3072. m_palhtDir[i]->ReadUnlock();
  3073. IRTLASSERT(m_palhtDir[i]->IsReadUnlocked());
  3074. }
  3075. } // CLKRHashTable::ReadUnlock
  3076. //------------------------------------------------------------------------
  3077. // Function: CLKRHashTable::IsWriteLocked
  3078. // Synopsis: Are all subtables write-locked?
  3079. //------------------------------------------------------------------------
  3080. bool
  3081. CLKRHashTable::IsWriteLocked() const
  3082. {
  3083. bool fLocked = (m_cSubTables > 0);
  3084. for (DWORD i = 0; i < m_cSubTables; i++)
  3085. {
  3086. fLocked = fLocked && m_palhtDir[i]->IsWriteLocked();
  3087. }
  3088. return fLocked;
  3089. }
  3090. //------------------------------------------------------------------------
  3091. // Function: CLKRHashTable::IsReadLocked
  3092. // Synopsis: Are all subtables read-locked?
  3093. //------------------------------------------------------------------------
  3094. bool
  3095. CLKRHashTable::IsReadLocked() const
  3096. {
  3097. bool fLocked = (m_cSubTables > 0);
  3098. for (DWORD i = 0; i < m_cSubTables; i++)
  3099. {
  3100. fLocked = fLocked && m_palhtDir[i]->IsReadLocked();
  3101. }
  3102. return fLocked;
  3103. }
  3104. //------------------------------------------------------------------------
  3105. // Function: CLKRHashTable::IsWriteUnlocked
  3106. // Synopsis: Are all subtables write-unlocked?
  3107. //------------------------------------------------------------------------
  3108. bool
  3109. CLKRHashTable::IsWriteUnlocked() const
  3110. {
  3111. bool fUnlocked = (m_cSubTables > 0);
  3112. for (DWORD i = 0; i < m_cSubTables; i++)
  3113. {
  3114. fUnlocked = fUnlocked && m_palhtDir[i]->IsWriteUnlocked();
  3115. }
  3116. return fUnlocked;
  3117. }
  3118. //------------------------------------------------------------------------
  3119. // Function: CLKRHashTable::IsReadUnlocked
  3120. // Synopsis: Are all subtables read-unlocked?
  3121. //------------------------------------------------------------------------
  3122. bool
  3123. CLKRHashTable::IsReadUnlocked() const
  3124. {
  3125. bool fUnlocked = (m_cSubTables > 0);
  3126. for (DWORD i = 0; i < m_cSubTables; i++)
  3127. {
  3128. fUnlocked = fUnlocked && m_palhtDir[i]->IsReadUnlocked();
  3129. }
  3130. return fUnlocked;
  3131. }
  3132. //------------------------------------------------------------------------
  3133. // Function: CLKRHashTable::Size
  3134. // Synopsis: Number of elements in the table
  3135. //------------------------------------------------------------------------
  3136. DWORD
  3137. CLKRHashTable::Size() const
  3138. {
  3139. DWORD cSize = 0;
  3140. for (DWORD i = 0; i < m_cSubTables; i++)
  3141. cSize += m_palhtDir[i]->Size();
  3142. return cSize;
  3143. } // CLKRHashTable::Size
  3144. //------------------------------------------------------------------------
  3145. // Function: CLKRHashTable::MaxSize
  3146. // Synopsis: Maximum possible number of elements in the table
  3147. //------------------------------------------------------------------------
  3148. DWORD
  3149. CLKRHashTable::MaxSize() const
  3150. {
  3151. return (m_cSubTables == 0) ? 0 : m_cSubTables * m_palhtDir[0]->MaxSize();
  3152. } // CLKRHashTable::MaxSize
  3153. //------------------------------------------------------------------------
  3154. // Function: CLKRHashTable::IsValid
  3155. // Synopsis: is the table valid?
  3156. //------------------------------------------------------------------------
  3157. bool
  3158. CLKRHashTable::IsValid() const
  3159. {
  3160. bool f = (m_lkrcState == LK_SUCCESS // serious internal failure?
  3161. && (m_palhtDir != NULL && m_cSubTables > 0)
  3162. && ValidSignature());
  3163. for (DWORD i = 0; f && i < m_cSubTables; i++)
  3164. f = f && m_palhtDir[i]->IsValid();
  3165. if (!f)
  3166. m_lkrcState = LK_UNUSABLE;
  3167. return f;
  3168. } // CLKRHashTable::IsValid
  3169. //------------------------------------------------------------------------
  3170. // Function: CLKRHashTable::SetBucketLockSpinCount
  3171. // Synopsis:
  3172. //------------------------------------------------------------------------
  3173. void
  3174. CLKRLinearHashTable::SetBucketLockSpinCount(
  3175. WORD wSpins)
  3176. {
  3177. m_wBucketLockSpins = wSpins;
  3178. if (BucketLock::PerLockSpin() != LOCK_INDIVIDUAL_SPIN)
  3179. return;
  3180. for (DWORD i = 0; i < m_cDirSegs; i++)
  3181. {
  3182. CSegment* pseg = m_paDirSegs[i].m_pseg;
  3183. if (pseg != NULL)
  3184. {
  3185. for (DWORD j = 0; j < m_dwSegSize; ++j)
  3186. {
  3187. pseg->Slot(j).SetSpinCount(wSpins);
  3188. }
  3189. }
  3190. }
  3191. } // CLKRLinearHashTable::SetBucketLockSpinCount
  3192. //------------------------------------------------------------------------
  3193. // Function: CLKRHashTable::SetBucketLockSpinCount
  3194. // Synopsis:
  3195. //------------------------------------------------------------------------
  3196. WORD
  3197. CLKRLinearHashTable::GetBucketLockSpinCount()
  3198. {
  3199. return m_wBucketLockSpins;
  3200. } // CLKRLinearHashTable::GetBucketLockSpinCount
  3201. //------------------------------------------------------------------------
  3202. // Function: CLKRHashTable::SetTableLockSpinCount
  3203. // Synopsis:
  3204. //------------------------------------------------------------------------
  3205. void
  3206. CLKRHashTable::SetTableLockSpinCount(
  3207. WORD wSpins)
  3208. {
  3209. for (DWORD i = 0; i < m_cSubTables; i++)
  3210. m_palhtDir[i]->SetTableLockSpinCount(wSpins);
  3211. } // CLKRHashTable::SetTableLockSpinCount
  3212. //------------------------------------------------------------------------
  3213. // Function: CLKRHashTable::GetTableLockSpinCount
  3214. // Synopsis:
  3215. //------------------------------------------------------------------------
  3216. WORD
  3217. CLKRHashTable::GetTableLockSpinCount()
  3218. {
  3219. return ((m_cSubTables == 0)
  3220. ? LOCK_DEFAULT_SPINS
  3221. : m_palhtDir[0]->GetTableLockSpinCount());
  3222. } // CLKRHashTable::GetTableLockSpinCount
  3223. //------------------------------------------------------------------------
  3224. // Function: CLKRHashTable::SetBucketLockSpinCount
  3225. // Synopsis:
  3226. //------------------------------------------------------------------------
  3227. void
  3228. CLKRHashTable::SetBucketLockSpinCount(
  3229. WORD wSpins)
  3230. {
  3231. for (DWORD i = 0; i < m_cSubTables; i++)
  3232. m_palhtDir[i]->SetBucketLockSpinCount(wSpins);
  3233. } // CLKRHashTable::SetBucketLockSpinCount
  3234. //------------------------------------------------------------------------
  3235. // Function: CLKRHashTable::GetBucketLockSpinCount
  3236. // Synopsis:
  3237. //------------------------------------------------------------------------
  3238. WORD
  3239. CLKRHashTable::GetBucketLockSpinCount()
  3240. {
  3241. return ((m_cSubTables == 0)
  3242. ? LOCK_DEFAULT_SPINS
  3243. : m_palhtDir[0]->GetBucketLockSpinCount());
  3244. } // CLKRHashTable::GetBucketLockSpinCount
  3245. #ifdef __LKRHASH_NAMESPACE__
  3246. }
  3247. #endif // __LKRHASH_NAMESPACE__