Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1240 lines
29 KiB

  1. /*++
  2. Copyright (c) 1998-2000 Microsoft Corporation
  3. Module Name:
  4. hash.c
  5. Abstract:
  6. this homes kernel mode hash routines
  7. Author:
  8. Paul McDaniel (paulmcd) 28-Apr-2000
  9. Revision History:
  10. --*/
  11. #include "precomp.h"
  12. //
  13. // Private constants.
  14. //
  15. //
  16. // Private types.
  17. //
  18. //
  19. // Private prototypes.
  20. //
  21. NTSTATUS
  22. HashpFindEntry (
  23. IN PHASH_HEADER pHeader,
  24. IN PHASH_BUCKET pBucket OPTIONAL,
  25. IN ULONG HashValue,
  26. IN PHASH_KEY pKey,
  27. OUT PVOID * ppContext,
  28. OUT PULONG pIndex
  29. );
  30. LONG
  31. HashpCompare (
  32. IN PHASH_HEADER pHeader,
  33. IN ULONG HashValue,
  34. IN PHASH_KEY pKey,
  35. IN PHASH_ENTRY pEntry
  36. );
  37. LONG
  38. HashpCompareStreams (
  39. PHASH_KEY pKey,
  40. PHASH_ENTRY pEntry
  41. );
  42. VOID
  43. HashTrimList (
  44. IN PHASH_HEADER pHeader
  45. );
  46. //
  47. // linker commands
  48. //
  49. #ifdef ALLOC_PRAGMA
  50. #pragma alloc_text( PAGE, HashCreateList )
  51. #pragma alloc_text( PAGE, HashAddEntry )
  52. #pragma alloc_text( PAGE, HashFindEntry )
  53. #pragma alloc_text( PAGE, HashpFindEntry )
  54. #pragma alloc_text( PAGE, HashpCompare )
  55. #pragma alloc_text( PAGE, HashpCompareStreams )
  56. #pragma alloc_text( PAGE, HashClearEntries )
  57. #pragma alloc_text( PAGE, HashClearAllFileEntries )
  58. #pragma alloc_text( PAGE, HashProcessEntries )
  59. #pragma alloc_text( PAGE, HashDestroyList )
  60. #pragma alloc_text( PAGE, HashTrimList )
  61. #endif // ALLOC_PRAGMA
  62. //
  63. // Private globals.
  64. //
  65. #define CONTINUE -1
  66. #define FOUND 0
  67. #define NOT_FOUND 1
  68. //
  69. // We track how much memory we have used for the names we have in the hash.
  70. // We need to track the memory for both the file and stream components of the
  71. // name.
  72. //
  73. #define HASH_KEY_LENGTH( pHashKey ) \
  74. ((pHashKey)->FileName.Length + (pHashKey)->StreamNameLength)
  75. //
  76. // Public globals.
  77. //
  78. //
  79. // Public functions.
  80. //
  81. NTSTATUS
  82. HashCreateList(
  83. IN ULONG BucketCount,
  84. IN ULONG AllowedLength,
  85. IN ULONG PrefixLength OPTIONAL,
  86. IN PHASH_ENTRY_DESTRUCTOR pDestructor OPTIONAL,
  87. OUT PHASH_HEADER * ppHeader
  88. )
  89. {
  90. NTSTATUS Status;
  91. PHASH_HEADER pHeader;
  92. PAGED_CODE();
  93. ASSERT(ppHeader != NULL);
  94. pHeader = SR_ALLOCATE_STRUCT_WITH_SPACE( NonPagedPool,
  95. HASH_HEADER,
  96. sizeof(PHASH_BUCKET) * BucketCount,
  97. HASH_HEADER_TAG );
  98. if (pHeader == NULL)
  99. {
  100. Status = STATUS_INSUFFICIENT_RESOURCES;
  101. goto end;
  102. }
  103. RtlZeroMemory( pHeader,
  104. sizeof(HASH_HEADER)
  105. + (sizeof(PHASH_BUCKET) * BucketCount) );
  106. pHeader->Signature = HASH_HEADER_TAG;
  107. pHeader->BucketCount = BucketCount;
  108. pHeader->AllowedLength = AllowedLength;
  109. pHeader->PrefixLength = PrefixLength;
  110. pHeader->pDestructor = pDestructor;
  111. ExInitializeResourceLite(&pHeader->Lock);
  112. *ppHeader = pHeader;
  113. SrTrace(HASH, ("SR!HashCreateList(%p)\n", pHeader));
  114. Status = STATUS_SUCCESS;
  115. end:
  116. RETURN(Status);
  117. } // HashCreateList
  118. NTSTATUS
  119. HashAddEntry(
  120. IN PHASH_HEADER pHeader,
  121. IN PHASH_KEY pKey,
  122. IN PVOID pContext
  123. )
  124. {
  125. NTSTATUS Status = STATUS_SUCCESS;
  126. PHASH_BUCKET pNewBucket = NULL;
  127. PHASH_BUCKET pOldBucket = NULL;
  128. PHASH_BUCKET pBucket = NULL;
  129. PHASH_ENTRY pEntry = NULL;
  130. ULONG HashValue = 0;
  131. ULONG HashBucket = 0;
  132. ULONG Index = 0;
  133. PVOID pTemp = NULL;
  134. PWCHAR pKeyBuffer = NULL;
  135. BOOLEAN lookedUpHashValue = FALSE; // Use this to track if the
  136. // HashValue is valid.
  137. PAGED_CODE();
  138. ASSERT(IS_VALID_HASH_HEADER(pHeader));
  139. ASSERT(pKey != NULL);
  140. try {
  141. //
  142. // the caller is responsible for synchronizing access to this list
  143. // paulmcd: 1/01
  144. //
  145. ASSERT(ExIsResourceAcquiredExclusive(&pHeader->Lock));
  146. //
  147. // do we need to trim space ?
  148. //
  149. if (pHeader->UsedLength > pHeader->AllowedLength)
  150. {
  151. (VOID)HashTrimList(pHeader);
  152. }
  153. Status = RtlHashUnicodeString( &(pKey->FileName),
  154. TRUE,
  155. HASH_STRING_ALGORITHM_DEFAULT,
  156. &HashValue );
  157. if (!NT_SUCCESS(Status)) {
  158. leave;
  159. }
  160. lookedUpHashValue = TRUE;
  161. HashBucket = HashValue % pHeader->BucketCount;
  162. pBucket = pHeader->Buckets[HashBucket];
  163. ASSERT(pBucket == NULL || IS_VALID_HASH_BUCKET(pBucket));
  164. //
  165. // find this entry in the bucket list
  166. //
  167. Status = HashpFindEntry( pHeader,
  168. pBucket,
  169. HashValue,
  170. pKey,
  171. &pTemp,
  172. &Index );
  173. if (Status != STATUS_OBJECT_NAME_NOT_FOUND)
  174. {
  175. //
  176. // did it fail ?
  177. //
  178. if (!NT_SUCCESS(Status)) {
  179. leave;
  180. }
  181. //
  182. // we found it... update the context
  183. //
  184. if (pBucket == NULL)
  185. {
  186. ASSERTMSG("sr.sys[hash.c] Hash Bucket is NULL!", FALSE);
  187. Status = STATUS_INVALID_DEVICE_REQUEST;
  188. leave;
  189. }
  190. ASSERT(IS_VALID_HASH_BUCKET(pBucket));
  191. ASSERT(Index < pBucket->UsedCount);
  192. pBucket->Entries[Index].pContext = pContext;
  193. //
  194. // all done, give an error just to make sure they were expecting
  195. // duplicates !
  196. //
  197. Status = STATUS_DUPLICATE_OBJECTID;
  198. leave;
  199. }
  200. //
  201. // didn't find it... let's insert it
  202. //
  203. //
  204. // any existing entries?
  205. //
  206. if (pBucket == NULL)
  207. {
  208. //
  209. // allocate a sibling array
  210. //
  211. pBucket = SR_ALLOCATE_STRUCT_WITH_SPACE( PagedPool,
  212. HASH_BUCKET,
  213. sizeof(HASH_ENTRY)
  214. * HASH_ENTRY_DEFAULT_WIDTH,
  215. HASH_BUCKET_TAG );
  216. if (pBucket == NULL)
  217. {
  218. Status = STATUS_INSUFFICIENT_RESOURCES;
  219. leave;
  220. }
  221. RtlZeroMemory( pBucket,
  222. sizeof(HASH_BUCKET) +
  223. sizeof(HASH_ENTRY) * HASH_ENTRY_DEFAULT_WIDTH );
  224. pBucket->Signature = HASH_BUCKET_TAG;
  225. pBucket->AllocCount = HASH_ENTRY_DEFAULT_WIDTH;
  226. }
  227. else if ((pBucket->UsedCount + 1) > pBucket->AllocCount)
  228. {
  229. //
  230. // Grow a bigger array
  231. //
  232. pNewBucket = SR_ALLOCATE_STRUCT_WITH_SPACE( PagedPool,
  233. HASH_BUCKET,
  234. sizeof(HASH_ENTRY)
  235. * (pBucket->AllocCount * 2),
  236. HASH_BUCKET_TAG );
  237. if (pNewBucket == NULL)
  238. {
  239. Status = STATUS_INSUFFICIENT_RESOURCES;
  240. leave;
  241. }
  242. RtlCopyMemory( pNewBucket,
  243. pBucket,
  244. sizeof(HASH_BUCKET) +
  245. sizeof(HASH_ENTRY) * pBucket->AllocCount );
  246. RtlZeroMemory( ((PUCHAR)pNewBucket) + sizeof(HASH_BUCKET) +
  247. sizeof(HASH_ENTRY) * pBucket->AllocCount,
  248. sizeof(HASH_ENTRY) * pBucket->AllocCount );
  249. pNewBucket->AllocCount *= 2;
  250. pOldBucket = pBucket;
  251. pBucket = pNewBucket;
  252. }
  253. //
  254. // Allocate an key buffer
  255. //
  256. pKeyBuffer = SR_ALLOCATE_ARRAY( PagedPool,
  257. WCHAR,
  258. (HASH_KEY_LENGTH( pKey )/sizeof(WCHAR))+1,
  259. HASH_KEY_TAG );
  260. if (pKeyBuffer == NULL)
  261. {
  262. Status = STATUS_INSUFFICIENT_RESOURCES;
  263. leave;
  264. }
  265. //
  266. // need to shuffle things around?
  267. //
  268. if (Index < pBucket->UsedCount)
  269. {
  270. //
  271. // shift right the chunk at Index
  272. //
  273. RtlMoveMemory( &(pBucket->Entries[Index+1]),
  274. &(pBucket->Entries[Index]),
  275. (pBucket->UsedCount - Index) * sizeof(HASH_ENTRY) );
  276. }
  277. //
  278. // now fill in the entry
  279. //
  280. pEntry = &pBucket->Entries[Index];
  281. pEntry->Key.FileName.Buffer = pKeyBuffer;
  282. //
  283. // copy over the key string
  284. //
  285. RtlCopyMemory( pEntry->Key.FileName.Buffer,
  286. pKey->FileName.Buffer,
  287. pKey->FileName.Length + pKey->StreamNameLength);
  288. pEntry->Key.FileName.Length = pKey->FileName.Length;
  289. pEntry->Key.FileName.MaximumLength = pKey->FileName.MaximumLength;
  290. pEntry->Key.StreamNameLength = pKey->StreamNameLength;
  291. //
  292. // NULL terminate it
  293. //
  294. pEntry->Key.FileName.Buffer[(pEntry->Key.FileName.Length + pEntry->Key.StreamNameLength)/sizeof(WCHAR)] = UNICODE_NULL;
  295. //
  296. // and the hash value and context
  297. //
  298. pEntry->HashValue = HashValue;
  299. pEntry->pContext = pContext;
  300. //
  301. // update we've used an extra block
  302. //
  303. pBucket->UsedCount += 1;
  304. //
  305. // update the hash header with this new bucket
  306. //
  307. pHeader->Buckets[HashBucket] = pBucket;
  308. //
  309. // update our used count
  310. //
  311. pHeader->UsedLength += HASH_KEY_LENGTH( &(pEntry->Key) );
  312. //
  313. // all done
  314. //
  315. Status = STATUS_SUCCESS;
  316. }finally {
  317. Status = FinallyUnwind( HashAddEntry, Status );
  318. if (lookedUpHashValue) {
  319. if ((Status != STATUS_DUPLICATE_OBJECTID) && !NT_SUCCESS(Status))
  320. {
  321. //
  322. // free any new bucket we allocated but didn't use
  323. //
  324. if (pHeader->Buckets[HashBucket] != pBucket && pBucket != NULL)
  325. {
  326. SR_FREE_POOL_WITH_SIG(pBucket, HASH_BUCKET_TAG);
  327. }
  328. //
  329. // same for the key buffer
  330. //
  331. if (pKeyBuffer != NULL)
  332. {
  333. SR_FREE_POOL(pKeyBuffer, HASH_KEY_TAG);
  334. pKeyBuffer = NULL;
  335. }
  336. }
  337. else
  338. {
  339. SrTraceSafe( HASH, ("sr!HashAddEntry(%p[%d][%d], ['%wZ', %p]) %ws%ws\n",
  340. pHeader,
  341. HashBucket,
  342. Index,
  343. &(pKey->FileName),
  344. pContext,
  345. (Index < (pBucket->UsedCount-1)) ? L"[shifted]" : L"",
  346. (pOldBucket) ? L"[realloc]" : L"" ) );
  347. //
  348. // supposed to free the old bucket buffer?
  349. //
  350. if (pOldBucket != NULL)
  351. {
  352. SR_FREE_POOL_WITH_SIG(pOldBucket, HASH_BUCKET_TAG);
  353. }
  354. }
  355. }
  356. }
  357. #if DBG
  358. if (Status == STATUS_DUPLICATE_OBJECTID)
  359. {
  360. //
  361. // don't want to break when we return this
  362. //
  363. return Status;
  364. }
  365. #endif
  366. RETURN(Status);
  367. } // HashAddEntry
  368. NTSTATUS
  369. HashFindEntry(
  370. IN PHASH_HEADER pHeader,
  371. IN PHASH_KEY pKey,
  372. OUT PVOID * ppContext
  373. )
  374. {
  375. NTSTATUS Status;
  376. ULONG Index;
  377. ULONG HashValue;
  378. ULONG HashBucket;
  379. PAGED_CODE();
  380. ASSERT(IS_VALID_HASH_HEADER(pHeader));
  381. ASSERT(pKey != NULL);
  382. ASSERT(ppContext != NULL);
  383. //
  384. // this has to be held as we are returning a context who's refcount
  385. // is owned by the hash, and can be free'd on the lock is released
  386. //
  387. //
  388. // the caller is responsible for synchronizing access to this list
  389. // paulmcd: 1/01
  390. //
  391. ASSERT(ExIsResourceAcquiredShared(&pHeader->Lock));
  392. Status = RtlHashUnicodeString( &(pKey->FileName),
  393. TRUE,
  394. HASH_STRING_ALGORITHM_DEFAULT,
  395. &HashValue );
  396. if (!NT_SUCCESS(Status))
  397. return Status;
  398. HashBucket = HashValue % pHeader->BucketCount;
  399. Status = HashpFindEntry( pHeader,
  400. pHeader->Buckets[HashBucket],
  401. HashValue,
  402. pKey,
  403. ppContext,
  404. &Index );
  405. return Status;
  406. } // HashFindEntry
  407. NTSTATUS
  408. HashpFindEntry(
  409. IN PHASH_HEADER pHeader,
  410. IN PHASH_BUCKET pBucket OPTIONAL,
  411. IN ULONG HashValue,
  412. IN PHASH_KEY pKey,
  413. OUT PVOID * ppContext,
  414. OUT PULONG pIndex
  415. )
  416. {
  417. NTSTATUS Status;
  418. PHASH_ENTRY pEntry;
  419. ULONG Index = 0;
  420. PAGED_CODE();
  421. ASSERT(IS_VALID_HASH_HEADER(pHeader));
  422. ASSERT(pBucket == NULL || IS_VALID_HASH_BUCKET(pBucket));
  423. ASSERT(HashValue != 0);
  424. ASSERT(pKey != NULL);
  425. ASSERT(ppContext != NULL);
  426. ASSERT(pIndex != NULL);
  427. //
  428. // assume we didn't find it
  429. //
  430. Status = STATUS_OBJECT_NAME_NOT_FOUND;
  431. //
  432. // are there any entries in this bucket?
  433. //
  434. if (pBucket != NULL)
  435. {
  436. //
  437. // Walk the sorted array looking for a match (linear)
  438. //
  439. //
  440. // CODEWORK: make this a binary search!
  441. //
  442. for (Index = 0; Index < pBucket->UsedCount; Index++)
  443. {
  444. LONG result;
  445. pEntry = &pBucket->Entries[Index];
  446. result = HashpCompare( pHeader,
  447. HashValue,
  448. pKey,
  449. pEntry );
  450. if (result == NOT_FOUND)
  451. {
  452. //
  453. // We passed this name in the sorted list, so stop.
  454. //
  455. break;
  456. }
  457. else if (result == FOUND)
  458. {
  459. //
  460. // We got a match, so return the context.
  461. //
  462. Status = STATUS_SUCCESS;
  463. *ppContext = pEntry->pContext;
  464. break;
  465. }
  466. //
  467. // else if (result == CONTINUE)
  468. //
  469. // Otherwise, keep scanning.
  470. //
  471. } // for (Index = 0; Index < pBucket->UsedCount; Index++)
  472. } // if (pBucket != NULL)
  473. *pIndex = Index;
  474. return Status;
  475. } // HashpFindEntry
  476. /***************************************************************************++
  477. Routine Description:
  478. This routine will compare two HASH_KEYs (one explicitly passed in and the
  479. other in the pEntry). This comparison function assumes sorting in the
  480. following way:
  481. * Increasing hash values
  482. * File names (not including stream components) in increasing lengths.
  483. * File names in lexically increasing order.
  484. * Stream components in increasing lengths.
  485. * Stream components in lexically increasing order.
  486. Because this routine routine assumes the above sort, it will return
  487. one of three values indicating that pKey is not in the list, we've
  488. matched pKey or we need to keep looking.
  489. Examples of lexically sorted order:
  490. "cat" < "cats"
  491. "stream1" < "stream2"
  492. "file1" < "file1:stream1"
  493. Arguments:
  494. pHeader - The header for this hash table.
  495. HashValue - The hash value for pKey
  496. pKey - The hash key we are looking up. Note: the name buffer is NOT
  497. NULL terminated.
  498. pEntry - The entry with the hash key we are comparing against. Note:
  499. the name buffer for this hash key IS NULL terminated.
  500. Return Value:
  501. NOT_FOUND - pKey does not match pEntry->Key and we know that it is not in
  502. the list because pKey is LESS than pEntry->Key (by our sort definition).
  503. FOUND - pKey matches pEntry->Key
  504. CONTINUE - pKey does not match pEntry->Key, but it is GREATER
  505. than pEntry->Key (by our sort definition), so keep looking.
  506. --***************************************************************************/
  507. LONG
  508. HashpCompare (
  509. IN PHASH_HEADER pHeader,
  510. IN ULONG HashValue,
  511. IN PHASH_KEY pKey,
  512. IN PHASH_ENTRY pEntry
  513. )
  514. {
  515. int temp;
  516. PAGED_CODE();
  517. //
  518. // How does the hash compare?
  519. //
  520. if (HashValue > pEntry->HashValue)
  521. {
  522. //
  523. // larger, time to stop
  524. //
  525. return CONTINUE;
  526. }
  527. else if (HashValue == pEntry->HashValue)
  528. {
  529. //
  530. // and the length?
  531. //
  532. if (pKey->FileName.Length < pEntry->Key.FileName.Length)
  533. {
  534. return NOT_FOUND;
  535. }
  536. else if (pKey->FileName.Length == pEntry->Key.FileName.Length)
  537. {
  538. ULONG offsetToFileName;
  539. ASSERT(pHeader->PrefixLength < pKey->FileName.Length);
  540. offsetToFileName = pHeader->PrefixLength/sizeof(WCHAR);
  541. //
  542. // Use pKey's length to control how long to search since it is
  543. // not necessarily NULL terminated and pEntry is.
  544. //
  545. temp = _wcsnicmp( pKey->FileName.Buffer + offsetToFileName,
  546. pEntry->Key.FileName.Buffer + offsetToFileName,
  547. pKey->FileName.Length/sizeof(WCHAR) - offsetToFileName );
  548. if (temp > 0)
  549. {
  550. //
  551. // pKey > pEntry->Key, so we need to keep looking.
  552. //
  553. return CONTINUE;
  554. }
  555. else if (temp == 0)
  556. {
  557. //
  558. // Found a file name match. Now we look at the stream
  559. // components of the name for the match.
  560. //
  561. return HashpCompareStreams( pKey, pEntry );
  562. }
  563. else
  564. {
  565. return NOT_FOUND;
  566. }
  567. }
  568. else
  569. {
  570. //
  571. // pKey->FileName.Length > pEntry->Key.FileName.Length
  572. //
  573. return CONTINUE;
  574. }
  575. }
  576. else
  577. {
  578. return NOT_FOUND;
  579. }
  580. }
  581. LONG
  582. HashpCompareStreams (
  583. PHASH_KEY pKey,
  584. PHASH_ENTRY pEntry
  585. )
  586. {
  587. int temp;
  588. PAGED_CODE();
  589. //
  590. // This is the most common case, so make the check quick.
  591. //
  592. if (pKey->StreamNameLength == 0 &&
  593. pEntry->Key.StreamNameLength == 0)
  594. {
  595. return FOUND;
  596. }
  597. if (pKey->StreamNameLength < pEntry->Key.StreamNameLength)
  598. {
  599. return NOT_FOUND;
  600. }
  601. else if (pKey->StreamNameLength == pEntry->Key.StreamNameLength)
  602. {
  603. ULONG offsetToStream;
  604. ASSERT( pKey->StreamNameLength > 0 );
  605. offsetToStream = pKey->FileName.Length/sizeof(WCHAR);
  606. //
  607. // See if stream name matches
  608. //
  609. temp = _wcsnicmp( pKey->FileName.Buffer + offsetToStream,
  610. pEntry->Key.FileName.Buffer + offsetToStream,
  611. pKey->StreamNameLength/sizeof(WCHAR) );
  612. if (temp > 0)
  613. {
  614. //
  615. // pKey > pEntry->Key, so we need to keep looking.
  616. //
  617. return CONTINUE;
  618. }
  619. else if (temp == 0)
  620. {
  621. //
  622. // Found the exact match
  623. //
  624. return FOUND;
  625. }
  626. else
  627. {
  628. //
  629. // pKey < pEntry->Key
  630. //
  631. return NOT_FOUND;
  632. }
  633. }
  634. else
  635. {
  636. //
  637. // pKey->FileName.Length > pEntry->Key.FileName.Length
  638. //
  639. return CONTINUE;
  640. }
  641. }
  642. VOID
  643. HashClearEntries(
  644. IN PHASH_HEADER pHeader
  645. )
  646. {
  647. ULONG Index;
  648. ULONG Index2;
  649. PHASH_BUCKET pBucket;
  650. PHASH_ENTRY pEntry;
  651. PAGED_CODE();
  652. ASSERT(IS_VALID_HASH_HEADER(pHeader));
  653. SrTrace(HASH, ("SR!HashClearEntries(%p)\n", pHeader));
  654. //
  655. // the caller is responsible for synchronizing access to this list
  656. // paulmcd: 1/01
  657. //
  658. ASSERT(ExIsResourceAcquiredExclusive(&pHeader->Lock));
  659. //
  660. // walk all of our entries and delete them
  661. //
  662. for (Index = 0; Index < pHeader->BucketCount; ++Index)
  663. {
  664. pBucket = pHeader->Buckets[Index];
  665. if (pBucket != NULL)
  666. {
  667. ASSERT(IS_VALID_HASH_BUCKET(pBucket));
  668. for (Index2 = 0 ; Index2 < pBucket->UsedCount; ++Index2)
  669. {
  670. pEntry = &pBucket->Entries[Index2];
  671. //
  672. // invoke the callback?
  673. //
  674. if (pHeader->pDestructor != NULL)
  675. {
  676. pHeader->pDestructor(&pEntry->Key, pEntry->pContext);
  677. }
  678. //
  679. // update our header usage
  680. //
  681. pHeader->UsedLength -= HASH_KEY_LENGTH( &(pEntry->Key) );
  682. SR_FREE_POOL(pEntry->Key.FileName.Buffer, HASH_KEY_TAG);
  683. pEntry->Key.FileName.Buffer = NULL;
  684. pEntry->Key.FileName.Length = 0;
  685. pEntry->Key.FileName.MaximumLength = 0;
  686. pEntry->Key.StreamNameLength = 0;
  687. pEntry->HashValue = 0;
  688. pEntry->pContext = NULL;
  689. }
  690. //
  691. // reset it
  692. //
  693. pBucket->UsedCount = 0;
  694. }
  695. }
  696. //
  697. // everything should be gone
  698. //
  699. ASSERT(pHeader->UsedLength == 0);
  700. //
  701. // reset the trim time counter
  702. //
  703. pHeader->LastTrimTime.QuadPart = 0;
  704. } // HashClearEntries
  705. VOID
  706. HashProcessEntries(
  707. IN PHASH_HEADER pHeader,
  708. IN PHASH_ENTRY_CALLBACK pfnCallback,
  709. IN PVOID pCallbackContext
  710. )
  711. {
  712. ULONG Index;
  713. ULONG Index2;
  714. PHASH_BUCKET pBucket;
  715. PHASH_ENTRY pEntry;
  716. PAGED_CODE();
  717. ASSERT(pfnCallback != NULL );
  718. ASSERT(IS_VALID_HASH_HEADER(pHeader));
  719. SrTrace(HASH, ("SR!HashProcessEntries(%p)\n", pHeader));
  720. //
  721. // grab the lock exclusive
  722. //
  723. SrAcquireResourceExclusive(&pHeader->Lock, TRUE);
  724. //
  725. // walk all of our entries and "process" them
  726. //
  727. for (Index = 0; Index < pHeader->BucketCount; ++Index)
  728. {
  729. pBucket = pHeader->Buckets[Index];
  730. if (pBucket != NULL)
  731. {
  732. ASSERT(IS_VALID_HASH_BUCKET(pBucket));
  733. for (Index2 = 0 ; Index2 < pBucket->UsedCount; ++Index2)
  734. {
  735. pEntry = &pBucket->Entries[Index2];
  736. //
  737. // invoke the callback
  738. //
  739. pEntry->pContext = pfnCallback( &pEntry->Key,
  740. pEntry->pContext,
  741. pCallbackContext );
  742. }
  743. }
  744. }
  745. SrReleaseResource(&pHeader->Lock);
  746. }
  747. NTSTATUS
  748. HashClearAllFileEntries (
  749. IN PHASH_HEADER pHeader,
  750. IN PUNICODE_STRING pFileName
  751. )
  752. {
  753. NTSTATUS Status;
  754. ULONG HashValue, HashBucket;
  755. ULONG Index;
  756. PHASH_BUCKET pHashBucket;
  757. PHASH_ENTRY pEntry;
  758. PAGED_CODE();
  759. ASSERT( ExIsResourceAcquiredExclusive( &pHeader->Lock ) );
  760. Status = RtlHashUnicodeString( pFileName,
  761. TRUE,
  762. HASH_STRING_ALGORITHM_DEFAULT,
  763. &HashValue );
  764. if (!NT_SUCCESS(Status))
  765. {
  766. goto HashClearAllFileEntries_Exit;
  767. }
  768. HashBucket = HashValue % pHeader->BucketCount;
  769. pHashBucket = pHeader->Buckets[HashBucket];
  770. if (pHashBucket == NULL)
  771. {
  772. Status = STATUS_SUCCESS;
  773. goto HashClearAllFileEntries_Exit;
  774. }
  775. for (Index = 0; Index < pHashBucket->UsedCount; Index++)
  776. {
  777. pEntry = &pHashBucket->Entries[Index];
  778. if (RtlEqualUnicodeString( pFileName, &(pEntry->Key.FileName), TRUE ))
  779. {
  780. pEntry->pContext = (PVOID)SrEventInvalid;
  781. }
  782. }
  783. HashClearAllFileEntries_Exit:
  784. RETURN( Status );
  785. }
  786. VOID
  787. HashDestroyList(
  788. IN PHASH_HEADER pHeader
  789. )
  790. {
  791. ULONG Index;
  792. PHASH_BUCKET pBucket;
  793. PAGED_CODE();
  794. ASSERT(IS_VALID_HASH_HEADER(pHeader));
  795. SrTrace(HASH, ("SR!HashDestroyList(%p)\n", pHeader));
  796. //
  797. // let go of all of the entries
  798. //
  799. SrAcquireResourceExclusive( &pHeader->Lock, TRUE );
  800. HashClearEntries(pHeader);
  801. SrReleaseResource( &pHeader->Lock );
  802. //
  803. // now free the memory blocks
  804. //
  805. for (Index = 0; Index < pHeader->BucketCount; ++Index)
  806. {
  807. pBucket = pHeader->Buckets[Index];
  808. if (pBucket != NULL)
  809. {
  810. ASSERT(IS_VALID_HASH_BUCKET(pBucket));
  811. SR_FREE_POOL_WITH_SIG(pBucket, HASH_BUCKET_TAG);
  812. }
  813. }
  814. ExDeleteResourceLite(&pHeader->Lock);
  815. SR_FREE_POOL_WITH_SIG(pHeader, HASH_HEADER_TAG);
  816. } // HashDestroyList
  817. VOID
  818. HashTrimList(
  819. IN PHASH_HEADER pHeader
  820. )
  821. {
  822. LARGE_INTEGER EndTime;
  823. LARGE_INTEGER CurrentTime;
  824. ULONG EndLength;
  825. ULONG MinutesSinceTrim;
  826. ULONG MaxPercentDivisor;
  827. ULONG MaxTime;
  828. ULONG Index;
  829. ULONG Index2;
  830. PHASH_BUCKET pBucket;
  831. PHASH_ENTRY pEntry;
  832. PAGED_CODE();
  833. ASSERT(IS_VALID_HASH_HEADER(pHeader));
  834. //
  835. // the caller is responsible for synchronizing access to this list
  836. // paulmcd: 1/01
  837. //
  838. ASSERT(ExIsResourceAcquiredExclusive(&pHeader->Lock));
  839. //
  840. // decide how much to trim
  841. //
  842. //
  843. // we don't want to trim all of the time, trim based on when we trimmed
  844. // last
  845. //
  846. KeQuerySystemTime( &CurrentTime );
  847. if (pHeader->LastTrimTime.QuadPart == 0)
  848. {
  849. MinutesSinceTrim = 0xffffffff;
  850. }
  851. else
  852. {
  853. MinutesSinceTrim = (ULONG)(CurrentTime.QuadPart -
  854. pHeader->LastTrimTime.QuadPart)
  855. / NANO_FULL_SECOND;
  856. }
  857. //
  858. // < 10 mins = 30% or 8s
  859. // < 30 mins = 20% or 4s
  860. // > 1 hour = 10% or 2s
  861. //
  862. if (MinutesSinceTrim < 10)
  863. {
  864. MaxPercentDivisor = 3; // 30%
  865. MaxTime = 8;
  866. }
  867. else if (MinutesSinceTrim < 60)
  868. {
  869. MaxPercentDivisor = 5; // 20%
  870. MaxTime = 4;
  871. }
  872. else
  873. {
  874. MaxPercentDivisor = 10; // 10%
  875. MaxTime = 2;
  876. }
  877. SrTrace(HASH, ("sr!HashTrimList, trimming. MinutesSinceTrim=%d,MaxTime=%d, MaxPercentDivisor=%d\n",
  878. MinutesSinceTrim,
  879. MaxTime,
  880. MaxPercentDivisor ));
  881. EndTime.QuadPart = CurrentTime.QuadPart + (MaxTime * NANO_FULL_SECOND);
  882. ASSERT(MaxPercentDivisor != 0);
  883. EndLength = pHeader->UsedLength - (pHeader->UsedLength / MaxPercentDivisor);
  884. //
  885. // update that we've trimmed
  886. //
  887. KeQuerySystemTime( &pHeader->LastTrimTime );
  888. //
  889. // loop through the hash list
  890. //
  891. for (Index = 0; Index < pHeader->BucketCount; ++Index)
  892. {
  893. pBucket = pHeader->Buckets[Index];
  894. if (pBucket != NULL)
  895. {
  896. ASSERT(IS_VALID_HASH_BUCKET(pBucket));
  897. //
  898. // loop through this bucket
  899. //
  900. for (Index2 = 0 ; Index2 < pBucket->UsedCount; ++Index2)
  901. {
  902. //
  903. // throw this away
  904. //
  905. pEntry = &pBucket->Entries[Index2];
  906. //
  907. // invoke the callback?
  908. //
  909. if (pHeader->pDestructor != NULL)
  910. {
  911. pHeader->pDestructor(&pEntry->Key, pEntry->pContext);
  912. }
  913. //
  914. // update the length of the hash
  915. //
  916. pHeader->UsedLength -= HASH_KEY_LENGTH( &(pEntry->Key) );
  917. //
  918. // and free the memory
  919. //
  920. SR_FREE_POOL(pEntry->Key.FileName.Buffer, HASH_KEY_TAG);
  921. pEntry->Key.FileName.Buffer = NULL;
  922. pEntry->Key.FileName.Length = 0;
  923. pEntry->Key.FileName.MaximumLength = 0;
  924. pEntry->Key.StreamNameLength = 0;
  925. pEntry->HashValue = 0;
  926. pEntry->pContext = NULL;
  927. }
  928. //
  929. // reset it
  930. //
  931. pBucket->UsedCount = 0;
  932. } // if (pBucket != NULL)
  933. //
  934. // are we ready to quit
  935. //
  936. KeQuerySystemTime( &CurrentTime );
  937. if (EndTime.QuadPart <= CurrentTime.QuadPart)
  938. {
  939. SrTrace(HASH, ("sr!HashTrimList, leaving due to time\n"));
  940. break;
  941. }
  942. if (pHeader->UsedLength <= EndLength)
  943. {
  944. SrTrace(HASH, ("sr!HashTrimList, leaving due to space\n"));
  945. break;
  946. }
  947. } // for (Index = 0; Index < pHeader->BucketCount; ++Index)
  948. } // HashTrimList