Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3218 lines
90 KiB

  1. /*++
  2. Copyright (c) 1990 Microsoft Corporation
  3. Module Name:
  4. vacbsup.c
  5. Abstract:
  6. This module implements the support routines for the Virtual Address
  7. Control Block support for the Cache Manager. These routines are used
  8. to manage a large number of relatively small address windows to map
  9. file data for all forms of cache access.
  10. Author:
  11. Tom Miller [TomM] 8-Feb-1992
  12. Revision History:
  13. --*/
  14. #include "cc.h"
  15. #include "ex.h"
  16. //
  17. // Define our debug constant
  18. //
  19. #define me 0x000000040
  20. //
  21. // Internal Support Routines.
  22. //
  23. VOID
  24. CcUnmapVacb (
  25. IN PVACB Vacb,
  26. IN PSHARED_CACHE_MAP SharedCacheMap,
  27. IN BOOLEAN UnmapBehind
  28. );
  29. PVACB
  30. CcGetVacbMiss (
  31. IN PSHARED_CACHE_MAP SharedCacheMap,
  32. IN LARGE_INTEGER FileOffset,
  33. IN OUT PKIRQL OldIrql
  34. );
  35. VOID
  36. CcCalculateVacbLevelLockCount (
  37. IN PSHARED_CACHE_MAP SharedCacheMap,
  38. IN PVACB *VacbArray,
  39. IN ULONG Level
  40. );
  41. PVACB
  42. CcGetVacbLargeOffset (
  43. IN PSHARED_CACHE_MAP SharedCacheMap,
  44. IN LONGLONG FileOffset
  45. );
  46. VOID
  47. CcSetVacbLargeOffset (
  48. IN PSHARED_CACHE_MAP SharedCacheMap,
  49. IN LONGLONG FileOffset,
  50. IN PVACB Vacb
  51. );
  52. #ifdef ALLOC_PRAGMA
  53. #pragma alloc_text(INIT, CcInitializeVacbs)
  54. #endif
  55. //
  56. // Define a few macros for manipulating the Vacb array.
  57. //
  58. #define GetVacb(SCM,OFF) ( \
  59. ((SCM)->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL) ? \
  60. CcGetVacbLargeOffset((SCM),(OFF).QuadPart) : \
  61. (SCM)->Vacbs[(OFF).LowPart >> VACB_OFFSET_SHIFT] \
  62. )
  63. _inline
  64. VOID
  65. SetVacb (
  66. IN PSHARED_CACHE_MAP SharedCacheMap,
  67. IN LARGE_INTEGER Offset,
  68. IN PVACB Vacb
  69. )
  70. {
  71. if (SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL) {
  72. CcSetVacbLargeOffset(SharedCacheMap, Offset.QuadPart, Vacb);
  73. #ifdef VACB_DBG
  74. ASSERT(Vacb >= VACB_SPECIAL_FIRST_VALID || CcGetVacbLargeOffset(SharedCacheMap, Offset.QuadPart) == Vacb);
  75. #endif // VACB_DBG
  76. } else if (Vacb < VACB_SPECIAL_FIRST_VALID) {
  77. SharedCacheMap->Vacbs[Offset.LowPart >> VACB_OFFSET_SHIFT] = Vacb;
  78. }
  79. #ifdef VACB_DBG
  80. //
  81. // Note, we need a new field if we turn this check on again - ReservedForAlignment
  82. // has been stolen for other purposes.
  83. //
  84. if (Vacb < VACB_SPECIAL_FIRST_VALID) {
  85. if (Vacb != NULL) {
  86. SharedCacheMap->ReservedForAlignment++;
  87. } else {
  88. SharedCacheMap->ReservedForAlignment--;
  89. }
  90. }
  91. ASSERT((SharedCacheMap->SectionSize.QuadPart <= VACB_SIZE_OF_FIRST_LEVEL) ||
  92. (SharedCacheMap->ReservedForAlignment == 0) ||
  93. IsVacbLevelReferenced( SharedCacheMap, SharedCacheMap->Vacbs, 1 ));
  94. #endif // VACB_DBG
  95. }
  96. //
  97. // Define the macro for referencing the multilevel Vacb array.
  98. //
  99. _inline
  100. VOID
  101. ReferenceVacbLevel (
  102. IN PSHARED_CACHE_MAP SharedCacheMap,
  103. IN PVACB *VacbArray,
  104. IN ULONG Level,
  105. IN LONG Amount,
  106. IN LOGICAL Special
  107. )
  108. {
  109. PVACB_LEVEL_REFERENCE VacbReference = VacbLevelReference( SharedCacheMap, VacbArray, Level );
  110. ASSERT( Amount > 0 ||
  111. (!Special && VacbReference->Reference >= (0 - Amount)) ||
  112. ( Special && VacbReference->SpecialReference >= (0 - Amount)));
  113. if (Special) {
  114. VacbReference->SpecialReference += Amount;
  115. } else {
  116. VacbReference->Reference += Amount;
  117. }
  118. #ifdef VACB_DBG
  119. //
  120. // For debugging purposes, we can assert that the regular reference count
  121. // corresponds to the population of the level.
  122. //
  123. {
  124. LONG Current = VacbReference->Reference;
  125. CcCalculateVacbLevelLockCount( SharedCacheMap, VacbArray, Level );
  126. ASSERT( Current == VacbReference->Reference );
  127. }
  128. #endif // VACB_DBG
  129. }
  130. //
  131. // Define the macros for moving the VACBs on the LRU list
  132. //
  133. #define CcMoveVacbToReuseFree(V) RemoveEntryList( &(V)->LruList ); \
  134. InsertHeadList( &CcVacbFreeList, &(V)->LruList );
  135. #define CcMoveVacbToReuseTail(V) RemoveEntryList( &(V)->LruList ); \
  136. InsertTailList( &CcVacbLru, &(V)->LruList );
  137. //
  138. // If the HighPart is nonzero, then we will go to a multi-level structure anyway, which is
  139. // most easily triggered by returning MAXULONG.
  140. //
  141. #define SizeOfVacbArray(LSZ) ( \
  142. ((LSZ).HighPart != 0) ? MAXULONG : \
  143. ((LSZ).LowPart > (PREALLOCATED_VACBS * VACB_MAPPING_GRANULARITY) ? \
  144. (((LSZ).LowPart >> VACB_OFFSET_SHIFT) * sizeof(PVACB)) : \
  145. (PREALLOCATED_VACBS * sizeof(PVACB))) \
  146. )
  147. #define CheckedDec(N) { \
  148. ASSERT((N) != 0); \
  149. (N) -= 1; \
  150. }
  151. #ifdef ALLOC_PRAGMA
  152. #pragma alloc_text(INIT,CcInitializeVacbs)
  153. #pragma alloc_text(PAGE,CcCreateVacbArray)
  154. #pragma alloc_text(PAGE,CcUnmapVacb)
  155. #endif
  156. VOID
  157. CcInitializeVacbs(
  158. )
  159. /*++
  160. Routine Description:
  161. This routine must be called during Cache Manager initialization to
  162. initialize the Virtual Address Control Block structures.
  163. Arguments:
  164. None.
  165. Return Value:
  166. None.
  167. --*/
  168. {
  169. SIZE_T VacbBytes;
  170. PVACB NextVacb;
  171. CcNumberVacbs = (MmSizeOfSystemCacheInPages >> (VACB_OFFSET_SHIFT - PAGE_SHIFT)) - 2;
  172. VacbBytes = CcNumberVacbs * sizeof(VACB);
  173. CcVacbs = (PVACB) ExAllocatePoolWithTag( NonPagedPool, VacbBytes, 'aVcC' );
  174. if (CcVacbs != NULL) {
  175. CcBeyondVacbs = (PVACB)((PCHAR)CcVacbs + VacbBytes);
  176. RtlZeroMemory( CcVacbs, VacbBytes );
  177. InitializeListHead( &CcVacbLru );
  178. InitializeListHead( &CcVacbFreeList );
  179. for (NextVacb = CcVacbs; NextVacb < CcBeyondVacbs; NextVacb++) {
  180. InsertTailList( &CcVacbFreeList, &NextVacb->LruList );
  181. }
  182. }
  183. }
  184. PVOID
  185. CcGetVirtualAddressIfMapped (
  186. IN PSHARED_CACHE_MAP SharedCacheMap,
  187. IN LONGLONG FileOffset,
  188. OUT PVACB *Vacb,
  189. OUT PULONG ReceivedLength
  190. )
  191. /*++
  192. Routine Description:
  193. This routine returns a virtual address for the specified FileOffset,
  194. iff it is mapped. Otherwise, it informs the caller that the specified
  195. virtual address was not mapped. In the latter case, it still returns
  196. a ReceivedLength, which may be used to advance to the next view boundary.
  197. Arguments:
  198. SharedCacheMap - Supplies a pointer to the Shared Cache Map for the file.
  199. FileOffset - Supplies the desired FileOffset within the file.
  200. Vach - Returns a Vacb pointer which must be supplied later to free
  201. this virtual address, or NULL if not mapped.
  202. ReceivedLength - Returns the number of bytes to the next view boundary,
  203. whether the desired file offset is mapped or not.
  204. Return Value:
  205. The virtual address at which the desired data is mapped, or NULL if it
  206. is not mapped.
  207. --*/
  208. {
  209. KIRQL OldIrql;
  210. ULONG VacbOffset = (ULONG)FileOffset & (VACB_MAPPING_GRANULARITY - 1);
  211. PVOID Value = NULL;
  212. ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
  213. //
  214. // Generate ReceivedLength return right away.
  215. //
  216. *ReceivedLength = VACB_MAPPING_GRANULARITY - VacbOffset;
  217. //
  218. // Modifiers of VacbArray hold the VacbLock to synchronize access. The
  219. // VacbLock must be released during the call to CcUnmapVacb() because it
  220. // contains a call to MmUnmapViewInSystemCache(). It is this MM call that
  221. // is responsible for copying the dirty bit from the PTEs back to the PFN.
  222. //
  223. // During this time the worker thread may call CcFlushCache() on the
  224. // Vacb being unmapped. CcGetVirtualAddressIfMapped() is used to determine
  225. // if the Vacb's memory is mapped and will correctly report that the address
  226. // is not mapped so CcFlushCache() will proceed to call MmFlushSection().
  227. //
  228. // This is where we have synchronization problems. If MmUnmapViewInSystemCache()
  229. // is not finished propogating the dirty PTE information back to the
  230. // PFN when MmFlushSection() is run the MM doesn't thing there is anything
  231. // to flush.
  232. //
  233. // Later this results in noncached I/O returning different page data than
  234. // cached I/O.
  235. //
  236. // The solution to this problem is to use a multiple reader/single writer
  237. // EX to delay CcGetVirtualAddressIfMapped() until any existing calls to
  238. // MmUnmapViewInSystemCache() via CcUnmapVacb() complete.
  239. //
  240. ExAcquirePushLockExclusive( &SharedCacheMap->VacbPushLock );
  241. //
  242. // Acquire the Vacb lock to see if the desired offset is already mapped.
  243. //
  244. CcAcquireVacbLock( &OldIrql );
  245. ASSERT( FileOffset <= SharedCacheMap->SectionSize.QuadPart );
  246. if ((*Vacb = GetVacb( SharedCacheMap, *(PLARGE_INTEGER)&FileOffset )) != NULL) {
  247. if ((*Vacb)->Overlay.ActiveCount == 0) {
  248. SharedCacheMap->VacbActiveCount += 1;
  249. }
  250. (*Vacb)->Overlay.ActiveCount += 1;
  251. //
  252. // Move this range away from the front to avoid wasting cycles
  253. // looking at it for reuse.
  254. //
  255. CcMoveVacbToReuseTail( *Vacb );
  256. Value = (PVOID)((PCHAR)(*Vacb)->BaseAddress + VacbOffset);
  257. }
  258. CcReleaseVacbLock( OldIrql );
  259. ExReleasePushLockExclusive( &SharedCacheMap->VacbPushLock );
  260. return Value;
  261. }
  262. PVOID
  263. CcGetVirtualAddress (
  264. IN PSHARED_CACHE_MAP SharedCacheMap,
  265. IN LARGE_INTEGER FileOffset,
  266. OUT PVACB *Vacb,
  267. IN OUT PULONG ReceivedLength
  268. )
  269. /*++
  270. Routine Description:
  271. This is the main routine for Vacb management. It may be called to acquire
  272. a virtual address for a given file offset. If the desired file offset is
  273. already mapped, this routine does very little work before returning with
  274. the desired virtual address and Vacb pointer (which must be supplied to
  275. free the mapping).
  276. If the desired virtual address is not currently mapped, then this routine
  277. claims a Vacb from the tail of the Vacb LRU to reuse its mapping. This Vacb
  278. is then unmapped if necessary (normally not required), and mapped to the
  279. desired address.
  280. Arguments:
  281. SharedCacheMap - Supplies a pointer to the Shared Cache Map for the file.
  282. FileOffset - Supplies the desired FileOffset within the file.
  283. Vacb - Returns a Vacb pointer which must be supplied later to free
  284. this virtual address.
  285. ReceivedLength - Returns the number of bytes which are contiguously
  286. mapped starting at the virtual address returned.
  287. Return Value:
  288. The virtual address at which the desired data is mapped.
  289. --*/
  290. {
  291. KIRQL OldIrql;
  292. PVACB TempVacb;
  293. ULONG VacbOffset = FileOffset.LowPart & (VACB_MAPPING_GRANULARITY - 1);
  294. ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
  295. //
  296. // Acquire the shared lock on the VacbArray because CcGetVacbMiss()
  297. // might unmap a Vacb. See CcGetVirtualAddressIfMapped() for more
  298. // details.
  299. //
  300. ExAcquirePushLockShared( &SharedCacheMap->VacbPushLock );
  301. //
  302. // Acquire the Vacb lock to see if the desired offset is already mapped.
  303. //
  304. CcAcquireVacbLock( &OldIrql );
  305. ASSERT( FileOffset.QuadPart <= SharedCacheMap->SectionSize.QuadPart );
  306. if ((TempVacb = GetVacb( SharedCacheMap, FileOffset )) == NULL) {
  307. TempVacb = CcGetVacbMiss( SharedCacheMap, FileOffset, &OldIrql );
  308. } else {
  309. if (TempVacb->Overlay.ActiveCount == 0) {
  310. SharedCacheMap->VacbActiveCount += 1;
  311. }
  312. TempVacb->Overlay.ActiveCount += 1;
  313. }
  314. //
  315. // Move this range away from the front to avoid wasting cycles
  316. // looking at it for reuse.
  317. //
  318. CcMoveVacbToReuseTail( TempVacb );
  319. CcReleaseVacbLock( OldIrql );
  320. ExReleasePushLockShared( &SharedCacheMap->VacbPushLock );
  321. //
  322. // Now form all outputs.
  323. //
  324. *Vacb = TempVacb;
  325. *ReceivedLength = VACB_MAPPING_GRANULARITY - VacbOffset;
  326. ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
  327. //
  328. // PREfix wants to know this cannot be NULL, otherwise it will complain
  329. // about users of this function.
  330. //
  331. ASSERT( TempVacb->BaseAddress != NULL );
  332. return (PVOID)((PCHAR)TempVacb->BaseAddress + VacbOffset);
  333. }
  334. PVACB
  335. CcGetVacbMiss (
  336. IN PSHARED_CACHE_MAP SharedCacheMap,
  337. IN LARGE_INTEGER FileOffset,
  338. IN OUT PKIRQL OldIrql
  339. )
  340. /*++
  341. Routine Description:
  342. This is the main routine for Vacb management. It may be called to acquire
  343. a virtual address for a given file offset. If the desired file offset is
  344. already mapped, this routine does very little work before returning with
  345. the desired virtual address and Vacb pointer (which must be supplied to
  346. free the mapping).
  347. If the desired virtual address is not currently mapped, then this routine
  348. claims a Vacb from the tail of the Vacb LRU to reuse its mapping. This Vacb
  349. is then unmapped if necessary (normally not required), and mapped to the
  350. desired address.
  351. Arguments:
  352. SharedCacheMap - Supplies a pointer to the Shared Cache Map for the file.
  353. FileOffset - Supplies the desired FileOffset within the file.
  354. OldIrql - Pointer to the OldIrql variable in the caller
  355. Return Value:
  356. The Vacb.
  357. --*/
  358. {
  359. PSHARED_CACHE_MAP OldSharedCacheMap;
  360. PVACB Vacb, TempVacb;
  361. LARGE_INTEGER MappedLength;
  362. LARGE_INTEGER NormalOffset;
  363. NTSTATUS Status;
  364. ULONG ActivePage;
  365. ULONG PageIsDirty;
  366. PVACB ActiveVacb = NULL;
  367. ULONG VacbOffset = FileOffset.LowPart & (VACB_MAPPING_GRANULARITY - 1);
  368. NormalOffset = FileOffset;
  369. NormalOffset.LowPart -= VacbOffset;
  370. //
  371. // For files that are not open for random access, we assume sequential
  372. // access and periodically unmap unused views behind us as we go, to
  373. // keep from hogging memory.
  374. //
  375. // We used to only do this for pure FO_SEQUENTIAL_ONLY access. The
  376. // sequential flags still has an effect (to put the pages at the front
  377. // of the standby lists) but we intend for the majority of the file
  378. // cache to live on the standby and are willing to take transition
  379. // faults to bring it back. Granted, this exacerbates the problem that
  380. // it is hard to figure out how big the filecache really is since even
  381. // less of it is going to be mapped at any given time. It may also
  382. // promote the synchronization bottlenecks in view mapping (MmPfnLock)
  383. // to the forefront when significant view thrashing occurs.
  384. //
  385. // This isn't as bad as it seems. When we see access take a view miss,
  386. // it is really likely that it is a result of sequential access. As long
  387. // as the pages go onto the back of the standby, they'll live for a while.
  388. // The problem we're dealing with here is that the cache can be filled at
  389. // high speed, but the working set manager can't possibly trim it as fast,
  390. // intelligently, while we have a pretty good guess where the candidate
  391. // pages should come from. We can't let the filecache size make large
  392. // excursions, or we'll kick out a lot of valuable pages in the process.
  393. //
  394. if (!FlagOn(SharedCacheMap->Flags, RANDOM_ACCESS_SEEN) &&
  395. ((NormalOffset.LowPart & (SEQUENTIAL_MAP_LIMIT - 1)) == 0) &&
  396. (NormalOffset.QuadPart >= (SEQUENTIAL_MAP_LIMIT * 2))) {
  397. //
  398. // Use MappedLength as a scratch variable to form the offset
  399. // to start unmapping. We are not synchronized with these past
  400. // views, so it is possible that CcUnmapVacbArray will kick out
  401. // early when it sees an active view. That is why we go back
  402. // twice the distance, and effectively try to unmap everything
  403. // twice. The second time should normally do it. If the file
  404. // is truly sequential only, then the only collision expected
  405. // might be the previous view if we are being called from readahead,
  406. // or there is a small chance that we can collide with the
  407. // Lazy Writer during the small window where he briefly maps
  408. // the file to push out the dirty bits.
  409. //
  410. CcReleaseVacbLock( *OldIrql );
  411. MappedLength.QuadPart = NormalOffset.QuadPart - (SEQUENTIAL_MAP_LIMIT * 2);
  412. CcUnmapVacbArray( SharedCacheMap, &MappedLength, (SEQUENTIAL_MAP_LIMIT * 2), TRUE );
  413. CcAcquireVacbLock( OldIrql );
  414. }
  415. //
  416. // If there is a free view, move it to the LRU and we're done.
  417. //
  418. if (!IsListEmpty(&CcVacbFreeList)) {
  419. Vacb = CONTAINING_RECORD( CcVacbFreeList.Flink, VACB, LruList );
  420. CcMoveVacbToReuseTail( Vacb );
  421. } else {
  422. //
  423. // Scan from the front of the lru for the next victim Vacb
  424. //
  425. Vacb = CONTAINING_RECORD( CcVacbLru.Flink, VACB, LruList );
  426. while (TRUE) {
  427. //
  428. // If this guy is not active, break out and use him. Also, if
  429. // it is an Active Vacb, nuke it now, because the reader may be idle and we
  430. // want to clean up.
  431. //
  432. OldSharedCacheMap = Vacb->SharedCacheMap;
  433. if ((Vacb->Overlay.ActiveCount == 0) ||
  434. ((ActiveVacb == NULL) &&
  435. (OldSharedCacheMap != NULL) &&
  436. (OldSharedCacheMap->ActiveVacb == Vacb))) {
  437. //
  438. // The normal case is that the Vacb is no longer mapped
  439. // and we can just get out and use it, however, here we
  440. // handle the case where it is mapped.
  441. //
  442. if (Vacb->BaseAddress != NULL) {
  443. //
  444. // If this Vacb is active, it must be the ActiveVacb.
  445. //
  446. if (Vacb->Overlay.ActiveCount != 0) {
  447. //
  448. // Get the active Vacb.
  449. //
  450. GetActiveVacbAtDpcLevel( Vacb->SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty );
  451. //
  452. // Otherwise we will break out and use this Vacb. If it
  453. // is still mapped we can now safely increment the open
  454. // count.
  455. //
  456. } else {
  457. //
  458. // Note that if the SharedCacheMap is currently
  459. // being deleted, we need to skip over
  460. // it, otherwise we will become the second
  461. // deleter. CcDeleteSharedCacheMap clears the
  462. // pointer in the SectionObjectPointer.
  463. //
  464. CcAcquireMasterLockAtDpcLevel();
  465. if (Vacb->SharedCacheMap->FileObject->SectionObjectPointer->SharedCacheMap ==
  466. Vacb->SharedCacheMap) {
  467. CcIncrementOpenCount( Vacb->SharedCacheMap, 'mvGS' );
  468. CcReleaseMasterLockFromDpcLevel();
  469. break;
  470. }
  471. CcReleaseMasterLockFromDpcLevel();
  472. }
  473. } else {
  474. break;
  475. }
  476. }
  477. //
  478. // Advance to the next guy if we haven't scanned
  479. // the entire list.
  480. //
  481. if (Vacb->LruList.Flink != &CcVacbLru) {
  482. Vacb = CONTAINING_RECORD( Vacb->LruList.Flink, VACB, LruList );
  483. } else {
  484. CcReleaseVacbLock( *OldIrql );
  485. //
  486. // If we found an active vacb, then free it and go back and
  487. // try again. Else it's time to bail.
  488. //
  489. if (ActiveVacb != NULL) {
  490. CcFreeActiveVacb( ActiveVacb->SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty );
  491. ActiveVacb = NULL;
  492. //
  493. // Reacquire spinlocks to loop back and position ourselves at the head
  494. // of the LRU for the next pass.
  495. //
  496. CcAcquireVacbLock( OldIrql );
  497. Vacb = CONTAINING_RECORD( CcVacbLru.Flink, VACB, LruList );
  498. } else {
  499. ExReleasePushLockShared( &SharedCacheMap->VacbPushLock );
  500. ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES );
  501. }
  502. }
  503. }
  504. }
  505. //
  506. // Unlink it from the other SharedCacheMap, so the other
  507. // guy will not try to use it when we free the spin lock.
  508. //
  509. if (Vacb->SharedCacheMap != NULL) {
  510. OldSharedCacheMap = Vacb->SharedCacheMap;
  511. SetVacb( OldSharedCacheMap, Vacb->Overlay.FileOffset, NULL );
  512. Vacb->SharedCacheMap = NULL;
  513. }
  514. //
  515. // Mark it in use so no one else will muck with it after
  516. // we release the spin lock.
  517. //
  518. Vacb->Overlay.ActiveCount = 1;
  519. SharedCacheMap->VacbActiveCount += 1;
  520. CcReleaseVacbLock( *OldIrql );
  521. //
  522. // If the Vacb is already mapped, then unmap it.
  523. //
  524. if (Vacb->BaseAddress != NULL) {
  525. //
  526. // Check to see if we need to drain the zone.
  527. //
  528. CcDrainVacbLevelZone();
  529. CcUnmapVacb( Vacb, OldSharedCacheMap, FALSE );
  530. //
  531. // Now we can decrement the open count as we normally
  532. // do, possibly deleting the guy.
  533. //
  534. CcAcquireMasterLock( OldIrql );
  535. //
  536. // Now release our open count.
  537. //
  538. CcDecrementOpenCount( OldSharedCacheMap, 'mvGF' );
  539. if ((OldSharedCacheMap->OpenCount == 0) &&
  540. !FlagOn(OldSharedCacheMap->Flags, WRITE_QUEUED) &&
  541. (OldSharedCacheMap->DirtyPages == 0)) {
  542. //
  543. // Move to the dirty list.
  544. //
  545. RemoveEntryList( &OldSharedCacheMap->SharedCacheMapLinks );
  546. InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks,
  547. &OldSharedCacheMap->SharedCacheMapLinks );
  548. //
  549. // Make sure the Lazy Writer will wake up, because we
  550. // want him to delete this SharedCacheMap.
  551. //
  552. LazyWriter.OtherWork = TRUE;
  553. if (!LazyWriter.ScanActive) {
  554. CcScheduleLazyWriteScan( FALSE );
  555. }
  556. }
  557. CcReleaseMasterLock( *OldIrql );
  558. }
  559. //
  560. // Use try-finally to return this guy to the list if we get an
  561. // exception.
  562. //
  563. try {
  564. //
  565. // Assume we are mapping to the end of the section, but
  566. // reduce to our normal mapping granularity if the section
  567. // is too large.
  568. //
  569. MappedLength.QuadPart = SharedCacheMap->SectionSize.QuadPart - NormalOffset.QuadPart;
  570. if ((MappedLength.HighPart != 0) ||
  571. (MappedLength.LowPart > VACB_MAPPING_GRANULARITY)) {
  572. MappedLength.LowPart = VACB_MAPPING_GRANULARITY;
  573. }
  574. //
  575. // Now map this one in the system cache.
  576. //
  577. DebugTrace( 0, mm, "MmMapViewInSystemCache:\n", 0 );
  578. DebugTrace( 0, mm, " Section = %08lx\n", SharedCacheMap->Section );
  579. DebugTrace2(0, mm, " Offset = %08lx, %08lx\n",
  580. NormalOffset.LowPart,
  581. NormalOffset.HighPart );
  582. DebugTrace( 0, mm, " ViewSize = %08lx\n", MappedLength.LowPart );
  583. Status =
  584. MmMapViewInSystemCache( SharedCacheMap->Section,
  585. &Vacb->BaseAddress,
  586. &NormalOffset,
  587. &MappedLength.LowPart );
  588. DebugTrace( 0, mm, " <BaseAddress = %08lx\n", Vacb->BaseAddress );
  589. DebugTrace( 0, mm, " <ViewSize = %08lx\n", MappedLength.LowPart );
  590. if (!NT_SUCCESS( Status )) {
  591. DebugTrace( 0, 0, "Error from Map, Status = %08lx\n", Status );
  592. ExReleasePushLockShared( &SharedCacheMap->VacbPushLock );
  593. ExRaiseStatus( FsRtlNormalizeNtstatus( Status,
  594. STATUS_UNEXPECTED_MM_MAP_ERROR ));
  595. }
  596. } finally {
  597. //
  598. // Take this opportunity to free the active vacb.
  599. //
  600. if (ActiveVacb != NULL) {
  601. CcFreeActiveVacb( ActiveVacb->SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty );
  602. }
  603. //
  604. // On abnormal termination, get this guy back in the list.
  605. //
  606. if (AbnormalTermination()) {
  607. CcAcquireVacbLock( OldIrql );
  608. //
  609. // This is like the unlucky case below. Just back out the stuff
  610. // we did and put the guy at the tail of the list. Basically
  611. // only the Map should fail, and we clear BaseAddress accordingly.
  612. //
  613. Vacb->BaseAddress = NULL;
  614. CheckedDec(Vacb->Overlay.ActiveCount);
  615. CheckedDec(SharedCacheMap->VacbActiveCount);
  616. //
  617. // If there is someone waiting for this count to go to zero,
  618. // wake them here.
  619. //
  620. if (SharedCacheMap->WaitOnActiveCount != NULL) {
  621. KeSetEvent( SharedCacheMap->WaitOnActiveCount, 0, FALSE );
  622. }
  623. CcReleaseVacbLock( *OldIrql );
  624. }
  625. }
  626. //
  627. // Make sure the zone contains the worst case number of entries.
  628. //
  629. if (SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL) {
  630. //
  631. // Raise if we cannot preallocate enough buffers.
  632. //
  633. if (!CcPrefillVacbLevelZone( CcMaxVacbLevelsSeen - 1,
  634. OldIrql,
  635. FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) )) {
  636. ExReleasePushLockShared( &SharedCacheMap->VacbPushLock );
  637. ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES );
  638. }
  639. } else {
  640. CcAcquireVacbLock( OldIrql );
  641. }
  642. //
  643. // Finish filling in the Vacb, and store its address in the array in
  644. // the Shared Cache Map. (We have to rewrite the ActiveCount
  645. // since it is overlaid.) To do this we must reacquire the
  646. // spin lock one more time. Note we have to check for the unusual
  647. // case that someone beat us to mapping this view, since we had to
  648. // drop the spin lock.
  649. //
  650. if ((TempVacb = GetVacb( SharedCacheMap, NormalOffset )) == NULL) {
  651. Vacb->SharedCacheMap = SharedCacheMap;
  652. Vacb->Overlay.FileOffset = NormalOffset;
  653. Vacb->Overlay.ActiveCount = 1;
  654. SetVacb( SharedCacheMap, NormalOffset, Vacb );
  655. //
  656. // This is the unlucky case where we collided with someone else
  657. // trying to map the same view. He can get in because we dropped
  658. // the spin lock above. Rather than allocating events and making
  659. // someone wait, considering this case is fairly unlikely, we just
  660. // dump this one at the head of the LRU and use the one from the
  661. // guy who beat us.
  662. //
  663. } else {
  664. //
  665. // Now we have to increment all of the counts for the one that
  666. // was already there, then ditch the one we had.
  667. //
  668. if (TempVacb->Overlay.ActiveCount == 0) {
  669. SharedCacheMap->VacbActiveCount += 1;
  670. }
  671. TempVacb->Overlay.ActiveCount += 1;
  672. //
  673. // Now unmap the one we mapped and proceed with the other Vacb.
  674. // On this path we have to release the spinlock to do the unmap,
  675. // and then reacquire the spinlock before cleaning up.
  676. //
  677. CcReleaseVacbLock( *OldIrql );
  678. CcUnmapVacb( Vacb, SharedCacheMap, FALSE );
  679. CcAcquireVacbLock( OldIrql );
  680. CheckedDec(Vacb->Overlay.ActiveCount);
  681. CheckedDec(SharedCacheMap->VacbActiveCount);
  682. Vacb->SharedCacheMap = NULL;
  683. CcMoveVacbToReuseFree( Vacb );
  684. Vacb = TempVacb;
  685. }
  686. return Vacb;
  687. }
  688. VOID
  689. FASTCALL
  690. CcFreeVirtualAddress (
  691. IN PVACB Vacb
  692. )
  693. /*++
  694. Routine Description:
  695. This routine must be called once for each call to CcGetVirtualAddress
  696. to free that virtual address.
  697. Arguments:
  698. Vacb - Supplies the Vacb which was returned from CcGetVirtualAddress.
  699. Return Value:
  700. None.
  701. --*/
  702. {
  703. KIRQL OldIrql;
  704. PSHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap;
  705. CcAcquireVacbLock( &OldIrql );
  706. CheckedDec(Vacb->Overlay.ActiveCount);
  707. //
  708. // If the count goes to zero, then we want to decrement the global
  709. // Active count.
  710. //
  711. if (Vacb->Overlay.ActiveCount == 0) {
  712. //
  713. // If the SharedCacheMap address is not NULL, then this one is
  714. // in use by a shared cache map, and we have to decrement his
  715. // count and see if anyone is waiting.
  716. //
  717. if (SharedCacheMap != NULL) {
  718. CheckedDec(SharedCacheMap->VacbActiveCount);
  719. //
  720. // If there is someone waiting for this count to go to zero,
  721. // wake them here.
  722. //
  723. if (SharedCacheMap->WaitOnActiveCount != NULL) {
  724. KeSetEvent( SharedCacheMap->WaitOnActiveCount, 0, FALSE );
  725. }
  726. //
  727. // Go to the back of the LRU to save this range for a bit
  728. //
  729. CcMoveVacbToReuseTail( Vacb );
  730. } else {
  731. //
  732. // This range is no longer referenced, so make it available
  733. //
  734. ASSERT( Vacb->BaseAddress == NULL );
  735. CcMoveVacbToReuseFree( Vacb );
  736. }
  737. } else {
  738. //
  739. // This range is still in use, so move it away from the front
  740. // so that it doesn't consume cycles being checked.
  741. //
  742. CcMoveVacbToReuseTail( Vacb );
  743. }
  744. CcReleaseVacbLock( OldIrql );
  745. }
  746. VOID
  747. CcReferenceFileOffset (
  748. IN PSHARED_CACHE_MAP SharedCacheMap,
  749. IN LARGE_INTEGER FileOffset
  750. )
  751. /*++
  752. Routine Description:
  753. This is a special form of reference that insures that the multi-level
  754. Vacb structures are expanded to cover a given file offset.
  755. Arguments:
  756. SharedCacheMap - Supplies a pointer to the Shared Cache Map for the file.
  757. FileOffset - Supplies the desired FileOffset within the file.
  758. Return Value:
  759. None
  760. --*/
  761. {
  762. KIRQL OldIrql;
  763. ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
  764. //
  765. // This operation only has meaning if the Vacbs are in the multilevel form.
  766. //
  767. if (SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL) {
  768. //
  769. // Prefill the level zone so that we can expand the tree if required.
  770. //
  771. if (!CcPrefillVacbLevelZone( CcMaxVacbLevelsSeen - 1,
  772. &OldIrql,
  773. FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) )) {
  774. ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES );
  775. }
  776. ASSERT( FileOffset.QuadPart <= SharedCacheMap->SectionSize.QuadPart );
  777. SetVacb( SharedCacheMap, FileOffset, VACB_SPECIAL_REFERENCE );
  778. CcReleaseVacbLock( OldIrql );
  779. }
  780. ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
  781. return;
  782. }
  783. VOID
  784. CcDereferenceFileOffset (
  785. IN PSHARED_CACHE_MAP SharedCacheMap,
  786. IN LARGE_INTEGER FileOffset
  787. )
  788. /*++
  789. Routine Description:
  790. This routine must be called once for each call to CcReferenceFileOffset
  791. to remove the reference.
  792. Arguments:
  793. SharedCacheMap - Supplies a pointer to the Shared Cache Map for the file.
  794. FileOffset - Supplies the desired FileOffset within the file.
  795. Return Value:
  796. None
  797. --*/
  798. {
  799. KIRQL OldIrql;
  800. ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
  801. //
  802. // This operation only has meaning if the Vacbs are in the multilevel form.
  803. //
  804. if (SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL) {
  805. //
  806. // Acquire the Vacb lock to synchronize the dereference.
  807. //
  808. CcAcquireVacbLock( &OldIrql );
  809. ASSERT( FileOffset.QuadPart <= SharedCacheMap->SectionSize.QuadPart );
  810. SetVacb( SharedCacheMap, FileOffset, VACB_SPECIAL_DEREFERENCE );
  811. CcReleaseVacbLock( OldIrql );
  812. }
  813. ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
  814. return;
  815. }
  816. VOID
  817. CcWaitOnActiveCount (
  818. IN PSHARED_CACHE_MAP SharedCacheMap
  819. )
  820. /*++
  821. Routine Description:
  822. This routine may be called to wait for outstanding mappings for
  823. a given SharedCacheMap to go inactive. It is intended to be called
  824. from CcUninitializeCacheMap, which is called by the file systems
  825. during cleanup processing. In that case this routine only has to
  826. wait if the user closed a handle without waiting for all I/Os on the
  827. handle to complete.
  828. This routine returns each time the active count is decremented. The
  829. caller must recheck his wait conditions on return, either waiting for
  830. the ActiveCount to go to 0, or for specific views to go inactive
  831. (CcPurgeCacheSection case).
  832. Arguments:
  833. SharedCacheMap - Supplies the Shared Cache Map on whose VacbActiveCount
  834. we wish to wait.
  835. Return Value:
  836. None.
  837. --*/
  838. {
  839. KIRQL OldIrql;
  840. PKEVENT Event;
  841. //
  842. // In the unusual case that we get a cleanup while I/O is still going
  843. // on, we can wait here. The caller must test the count for nonzero
  844. // before calling this routine.
  845. //
  846. // Since we are being called from cleanup, we cannot afford to
  847. // fail here.
  848. //
  849. CcAcquireVacbLock( &OldIrql );
  850. //
  851. // It is possible that the count went to zero before we acquired the
  852. // spinlock, so we must handle two cases here.
  853. //
  854. if (SharedCacheMap->VacbActiveCount != 0) {
  855. Event = SharedCacheMap->WaitOnActiveCount;
  856. if (Event == NULL) {
  857. //
  858. // Take the event. We avoid dispatcher lock overhead for
  859. // every single zero transition by only picking up the event
  860. // when we actually need it.
  861. //
  862. Event = &SharedCacheMap->Event;
  863. KeInitializeEvent( Event,
  864. NotificationEvent,
  865. FALSE );
  866. SharedCacheMap->WaitOnActiveCount = Event;
  867. }
  868. else {
  869. KeClearEvent( Event );
  870. }
  871. CcReleaseVacbLock( OldIrql );
  872. KeWaitForSingleObject( Event,
  873. Executive,
  874. KernelMode,
  875. FALSE,
  876. (PLARGE_INTEGER)NULL);
  877. } else {
  878. CcReleaseVacbLock( OldIrql );
  879. }
  880. }
  881. //
  882. // Internal Support Routine.
  883. //
  884. VOID
  885. CcUnmapVacb (
  886. IN PVACB Vacb,
  887. IN PSHARED_CACHE_MAP SharedCacheMap,
  888. IN BOOLEAN UnmapBehind
  889. )
  890. /*++
  891. Routine Description:
  892. This routine may be called to unmap a previously mapped Vacb, and
  893. clear its BaseAddress field.
  894. Arguments:
  895. Vacb - Supplies the Vacb which was returned from CcGetVirtualAddress.
  896. UnmapBehind - If this is a result of our unmap behind logic (the
  897. only case in which we pay attention to sequential hints)
  898. Return Value:
  899. None.
  900. --*/
  901. {
  902. //
  903. // Make sure it is mapped.
  904. //
  905. ASSERT(SharedCacheMap != NULL);
  906. ASSERT(Vacb->BaseAddress != NULL);
  907. //
  908. // Call MM to unmap it.
  909. //
  910. DebugTrace( 0, mm, "MmUnmapViewInSystemCache:\n", 0 );
  911. DebugTrace( 0, mm, " BaseAddress = %08lx\n", Vacb->BaseAddress );
  912. MmUnmapViewInSystemCache( Vacb->BaseAddress,
  913. SharedCacheMap->Section,
  914. UnmapBehind &&
  915. FlagOn(SharedCacheMap->Flags, ONLY_SEQUENTIAL_ONLY_SEEN) );
  916. Vacb->BaseAddress = NULL;
  917. }
  918. NTSTATUS
  919. FASTCALL
  920. CcCreateVacbArray (
  921. IN PSHARED_CACHE_MAP SharedCacheMap,
  922. IN LARGE_INTEGER NewSectionSize
  923. )
  924. /*++
  925. Routine Description:
  926. This routine must be called when a SharedCacheMap is created to create
  927. and initialize the initial Vacb array.
  928. Arguments:
  929. SharedCacheMap - Supplies the shared cache map for which the array is
  930. to be created.
  931. NewSectionSize - Supplies the current size of the section which must be
  932. covered by the Vacb array.
  933. Return Value:
  934. NTSTATUS.
  935. --*/
  936. {
  937. PVACB *NewAddresses;
  938. ULONG NewSize, SizeToAllocate;
  939. PLIST_ENTRY BcbListHead;
  940. LOGICAL CreateBcbListHeads = FALSE, CreateReference = FALSE;
  941. NewSize = SizeToAllocate = SizeOfVacbArray(NewSectionSize);
  942. //
  943. // The following limit is greater than the MM limit
  944. // (i.e., MM actually only supports even smaller sections).
  945. // We have to reject the sign bit, and testing the high byte
  946. // for nonzero will surely only catch errors.
  947. //
  948. if (NewSectionSize.HighPart & ~(PAGE_SIZE - 1)) {
  949. return STATUS_SECTION_TOO_BIG;
  950. }
  951. //
  952. // See if we can use the array inside the shared cache map.
  953. //
  954. if (NewSize == (PREALLOCATED_VACBS * sizeof(PVACB))) {
  955. NewAddresses = &SharedCacheMap->InitialVacbs[0];
  956. //
  957. // Else allocate the array.
  958. //
  959. } else {
  960. //
  961. // For large metadata streams, double the size to allocate
  962. // an array of Bcb listheads. Each two Vacb pointers also
  963. // gets its own Bcb listhead, thus requiring double the size.
  964. //
  965. ASSERT(SIZE_PER_BCB_LIST == (VACB_MAPPING_GRANULARITY * 2));
  966. //
  967. // If this stream is larger than the size for multi-level Vacbs,
  968. // then fix the size to allocate the root.
  969. //
  970. if (NewSize > VACB_LEVEL_BLOCK_SIZE) {
  971. ULONG Level = 0;
  972. ULONG Shift = VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT;
  973. NewSize = SizeToAllocate = VACB_LEVEL_BLOCK_SIZE;
  974. SizeToAllocate += sizeof(VACB_LEVEL_REFERENCE);
  975. CreateReference = TRUE;
  976. //
  977. // Loop to calculate how many levels we have and how much we have to
  978. // shift to index into the first level.
  979. //
  980. do {
  981. Level += 1;
  982. Shift += VACB_LEVEL_SHIFT;
  983. } while ((NewSectionSize.QuadPart > ((LONGLONG)1 << Shift)) != 0);
  984. //
  985. // Remember the maximum level ever seen (which is actually Level + 1).
  986. //
  987. if (Level >= CcMaxVacbLevelsSeen) {
  988. ASSERT(Level <= VACB_NUMBER_OF_LEVELS);
  989. CcMaxVacbLevelsSeen = Level + 1;
  990. }
  991. } else {
  992. //
  993. // Does this stream get a Bcb Listhead array?
  994. //
  995. if (FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) &&
  996. (NewSectionSize.QuadPart > BEGIN_BCB_LIST_ARRAY)) {
  997. SizeToAllocate *= 2;
  998. CreateBcbListHeads = TRUE;
  999. }
  1000. //
  1001. // Handle the boundary case by giving the proto-level a
  1002. // reference count. This will allow us to simply push it
  1003. // in the expansion case. In practice, due to pool granularity
  1004. // this will not change the amount of space allocated
  1005. //
  1006. if (NewSize == VACB_LEVEL_BLOCK_SIZE) {
  1007. SizeToAllocate += sizeof(VACB_LEVEL_REFERENCE);
  1008. CreateReference = TRUE;
  1009. }
  1010. }
  1011. NewAddresses = ExAllocatePoolWithTag( NonPagedPool, SizeToAllocate, 'pVcC' );
  1012. if (NewAddresses == NULL) {
  1013. SharedCacheMap->Status = STATUS_INSUFFICIENT_RESOURCES;
  1014. return STATUS_INSUFFICIENT_RESOURCES;
  1015. }
  1016. }
  1017. //
  1018. // Zero out the Vacb array and the trailing reference counts.
  1019. //
  1020. RtlZeroMemory( (PCHAR)NewAddresses, NewSize );
  1021. if (CreateReference) {
  1022. SizeToAllocate -= sizeof(VACB_LEVEL_REFERENCE);
  1023. RtlZeroMemory( (PCHAR)NewAddresses + SizeToAllocate, sizeof(VACB_LEVEL_REFERENCE) );
  1024. }
  1025. //
  1026. // Loop to insert the Bcb listheads (if any) in the *descending* order
  1027. // Bcb list.
  1028. //
  1029. if (CreateBcbListHeads) {
  1030. for (BcbListHead = (PLIST_ENTRY)((PCHAR)NewAddresses + NewSize);
  1031. BcbListHead < (PLIST_ENTRY)((PCHAR)NewAddresses + SizeToAllocate);
  1032. BcbListHead++) {
  1033. InsertHeadList( &SharedCacheMap->BcbList, BcbListHead );
  1034. }
  1035. }
  1036. SharedCacheMap->Vacbs = NewAddresses;
  1037. SharedCacheMap->SectionSize = NewSectionSize;
  1038. return STATUS_SUCCESS;
  1039. }
  1040. NTSTATUS
  1041. CcExtendVacbArray (
  1042. IN PSHARED_CACHE_MAP SharedCacheMap,
  1043. IN LARGE_INTEGER NewSectionSize
  1044. )
  1045. /*++
  1046. Routine Description:
  1047. This routine must be called any time the section for a shared cache
  1048. map is extended, in order to extend the Vacb array (if necessary).
  1049. Arguments:
  1050. SharedCacheMap - Supplies the shared cache map for which the array is
  1051. to be created.
  1052. NewSectionSize - Supplies the new size of the section which must be
  1053. covered by the Vacb array.
  1054. Return Value:
  1055. NTSTATUS.
  1056. --*/
  1057. {
  1058. KLOCK_QUEUE_HANDLE LockHandle;
  1059. PVACB *OldAddresses;
  1060. PVACB *NewAddresses;
  1061. ULONG OldSize;
  1062. ULONG NewSize, SizeToAllocate;
  1063. LARGE_INTEGER NextLevelSize;
  1064. LOGICAL GrowingBcbListHeads = FALSE, CreateReference = FALSE;
  1065. //
  1066. // The following limit is greater than the MM limit
  1067. // (i.e., MM actually only supports even smaller sections).
  1068. // We have to reject the sign bit, and testing the high byte
  1069. // for nonzero will surely only catch errors.
  1070. //
  1071. if (NewSectionSize.HighPart & ~(PAGE_SIZE - 1)) {
  1072. return STATUS_SECTION_TOO_BIG;
  1073. }
  1074. //
  1075. // See if we will be growing the Bcb ListHeads, so we can take out the
  1076. // master lock if so.
  1077. //
  1078. if (FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) &&
  1079. (NewSectionSize.QuadPart > BEGIN_BCB_LIST_ARRAY)) {
  1080. GrowingBcbListHeads = TRUE;
  1081. }
  1082. //
  1083. // Is there any work to do?
  1084. //
  1085. if (NewSectionSize.QuadPart > SharedCacheMap->SectionSize.QuadPart) {
  1086. //
  1087. // Handle the growth of the first level here.
  1088. //
  1089. if (SharedCacheMap->SectionSize.QuadPart < VACB_SIZE_OF_FIRST_LEVEL) {
  1090. NextLevelSize = NewSectionSize;
  1091. //
  1092. // Limit the growth of this level
  1093. //
  1094. if (NextLevelSize.QuadPart >= VACB_SIZE_OF_FIRST_LEVEL) {
  1095. NextLevelSize.QuadPart = VACB_SIZE_OF_FIRST_LEVEL;
  1096. CreateReference = TRUE;
  1097. }
  1098. //
  1099. // N.B.: SizeOfVacbArray only calculates the size of the VACB
  1100. // pointer block. We must adjust for Bcb listheads and the
  1101. // multilevel reference count.
  1102. //
  1103. NewSize = SizeToAllocate = SizeOfVacbArray(NextLevelSize);
  1104. OldSize = SizeOfVacbArray(SharedCacheMap->SectionSize);
  1105. //
  1106. // Only do something if the size is growing.
  1107. //
  1108. if (NewSize > OldSize) {
  1109. //
  1110. // Does this stream get a Bcb Listhead array?
  1111. //
  1112. if (GrowingBcbListHeads) {
  1113. SizeToAllocate *= 2;
  1114. }
  1115. //
  1116. // Do we need space for the reference count?
  1117. //
  1118. if (CreateReference) {
  1119. SizeToAllocate += sizeof(VACB_LEVEL_REFERENCE);
  1120. }
  1121. NewAddresses = ExAllocatePoolWithTag( NonPagedPool, SizeToAllocate, 'pVcC' );
  1122. if (NewAddresses == NULL) {
  1123. return STATUS_INSUFFICIENT_RESOURCES;
  1124. }
  1125. //
  1126. // See if we will be growing the Bcb ListHeads, so we can take out the
  1127. // master lock if so.
  1128. //
  1129. if (GrowingBcbListHeads) {
  1130. KeAcquireInStackQueuedSpinLock( &SharedCacheMap->BcbSpinLock, &LockHandle );
  1131. CcAcquireVacbLockAtDpcLevel();
  1132. } else {
  1133. //
  1134. // Acquire the spin lock to serialize with anyone who might like
  1135. // to "steal" one of the mappings we are going to move.
  1136. //
  1137. CcAcquireVacbLock( &LockHandle.OldIrql );
  1138. }
  1139. OldAddresses = SharedCacheMap->Vacbs;
  1140. if (OldAddresses != NULL) {
  1141. RtlCopyMemory( NewAddresses, OldAddresses, OldSize );
  1142. } else {
  1143. OldSize = 0;
  1144. }
  1145. RtlZeroMemory( (PCHAR)NewAddresses + OldSize, NewSize - OldSize );
  1146. if (CreateReference) {
  1147. SizeToAllocate -= sizeof(VACB_LEVEL_REFERENCE);
  1148. RtlZeroMemory( (PCHAR)NewAddresses + SizeToAllocate, sizeof(VACB_LEVEL_REFERENCE) );
  1149. }
  1150. //
  1151. // See if we have to initialize Bcb Listheads.
  1152. //
  1153. if (GrowingBcbListHeads) {
  1154. LARGE_INTEGER Offset;
  1155. PLIST_ENTRY BcbListHeadNew, TempEntry;
  1156. Offset.QuadPart = 0;
  1157. BcbListHeadNew = (PLIST_ENTRY)((PCHAR)NewAddresses + NewSize );
  1158. //
  1159. // Handle case where the old array had Bcb Listheads.
  1160. //
  1161. if ((SharedCacheMap->SectionSize.QuadPart > BEGIN_BCB_LIST_ARRAY) &&
  1162. (OldAddresses != NULL)) {
  1163. PLIST_ENTRY BcbListHeadOld;
  1164. BcbListHeadOld = (PLIST_ENTRY)((PCHAR)OldAddresses + OldSize);
  1165. //
  1166. // Loop to remove each old listhead and insert the new one
  1167. // in its place.
  1168. //
  1169. do {
  1170. TempEntry = BcbListHeadOld->Flink;
  1171. RemoveEntryList( BcbListHeadOld );
  1172. InsertTailList( TempEntry, BcbListHeadNew );
  1173. Offset.QuadPart += SIZE_PER_BCB_LIST;
  1174. BcbListHeadOld += 1;
  1175. BcbListHeadNew += 1;
  1176. } while (Offset.QuadPart < SharedCacheMap->SectionSize.QuadPart);
  1177. //
  1178. // Otherwise, handle the case where we are adding Bcb
  1179. // Listheads.
  1180. //
  1181. } else {
  1182. TempEntry = SharedCacheMap->BcbList.Blink;
  1183. //
  1184. // Loop through any/all Bcbs to insert the new listheads.
  1185. //
  1186. while (TempEntry != &SharedCacheMap->BcbList) {
  1187. //
  1188. // Sit on this Bcb until we have inserted all listheads
  1189. // that go before it.
  1190. //
  1191. while (Offset.QuadPart <= ((PBCB)CONTAINING_RECORD(TempEntry, BCB, BcbLinks))->FileOffset.QuadPart) {
  1192. InsertHeadList(TempEntry, BcbListHeadNew);
  1193. Offset.QuadPart += SIZE_PER_BCB_LIST;
  1194. BcbListHeadNew += 1;
  1195. }
  1196. TempEntry = TempEntry->Blink;
  1197. }
  1198. }
  1199. //
  1200. // Now insert the rest of the new listhead entries that were
  1201. // not finished in either loop above.
  1202. //
  1203. while (Offset.QuadPart < NextLevelSize.QuadPart) {
  1204. InsertHeadList(&SharedCacheMap->BcbList, BcbListHeadNew);
  1205. Offset.QuadPart += SIZE_PER_BCB_LIST;
  1206. BcbListHeadNew += 1;
  1207. }
  1208. }
  1209. //
  1210. // These two fields must be changed while still holding the spinlock.
  1211. //
  1212. SharedCacheMap->Vacbs = NewAddresses;
  1213. SharedCacheMap->SectionSize = NextLevelSize;
  1214. //
  1215. // Now we can free the spinlocks ahead of freeing pool.
  1216. //
  1217. if (GrowingBcbListHeads) {
  1218. CcReleaseVacbLockFromDpcLevel();
  1219. KeReleaseInStackQueuedSpinLock( &LockHandle );
  1220. } else {
  1221. CcReleaseVacbLock( LockHandle.OldIrql );
  1222. }
  1223. if ((OldAddresses != &SharedCacheMap->InitialVacbs[0]) &&
  1224. (OldAddresses != NULL)) {
  1225. ExFreePool( OldAddresses );
  1226. }
  1227. }
  1228. //
  1229. // Make sure SectionSize gets updated. It is ok to fall through here
  1230. // without a spinlock, so long as either Vacbs was not changed, or it
  1231. // was changed together with SectionSize under the spinlock(s) above.
  1232. //
  1233. SharedCacheMap->SectionSize = NextLevelSize;
  1234. }
  1235. //
  1236. // Handle extends up to and within multi-level Vacb arrays here. This is fairly simple.
  1237. // If no additional Vacb levels are required, then there is no work to do, otherwise
  1238. // we just have to push the root one or more levels linked through the first pointer
  1239. // in the new root(s).
  1240. //
  1241. if (NewSectionSize.QuadPart > SharedCacheMap->SectionSize.QuadPart) {
  1242. PVACB *NextVacbArray;
  1243. ULONG NewLevel;
  1244. ULONG Level = 1;
  1245. ULONG Shift = VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT;
  1246. //
  1247. // Loop to calculate how many levels we currently have.
  1248. //
  1249. while (SharedCacheMap->SectionSize.QuadPart > ((LONGLONG)1 << Shift)) {
  1250. Level += 1;
  1251. Shift += VACB_LEVEL_SHIFT;
  1252. }
  1253. NewLevel = Level;
  1254. //
  1255. // Loop to calculate how many levels we need.
  1256. //
  1257. while (((NewSectionSize.QuadPart - 1) >> Shift) != 0) {
  1258. NewLevel += 1;
  1259. Shift += VACB_LEVEL_SHIFT;
  1260. }
  1261. //
  1262. // Now see if we have any work to do.
  1263. //
  1264. if (NewLevel > Level) {
  1265. //
  1266. // Remember the maximum level ever seen (which is actually NewLevel + 1).
  1267. //
  1268. if (NewLevel >= CcMaxVacbLevelsSeen) {
  1269. ASSERT(NewLevel <= VACB_NUMBER_OF_LEVELS);
  1270. CcMaxVacbLevelsSeen = NewLevel + 1;
  1271. }
  1272. //
  1273. // Raise if we cannot preallocate enough buffers.
  1274. //
  1275. if (!CcPrefillVacbLevelZone( NewLevel - Level, &LockHandle.OldIrql, FALSE )) {
  1276. return STATUS_INSUFFICIENT_RESOURCES;
  1277. }
  1278. //
  1279. // Now if the current Level of the file is 1, we have not been maintaining
  1280. // a reference count, so we have to calculate it before pushing. In the
  1281. // boundary case we have made sure that the reference space is available.
  1282. //
  1283. if (Level == 1) {
  1284. //
  1285. // We know this is always a leaf-like level right now.
  1286. //
  1287. CcCalculateVacbLevelLockCount( SharedCacheMap, SharedCacheMap->Vacbs, 0 );
  1288. }
  1289. //
  1290. // Finally, if there are any active pointers in the first level, then we
  1291. // have to create new levels by adding a new root enough times to create
  1292. // additional levels. On the other hand, if the pointer count in the top
  1293. // level is zero, then we must not do any pushes, because we never allow
  1294. // empty leaves!
  1295. //
  1296. if (IsVacbLevelReferenced( SharedCacheMap, SharedCacheMap->Vacbs, Level - 1 )) {
  1297. while (NewLevel > Level++) {
  1298. ASSERT(CcVacbLevelEntries != 0);
  1299. NextVacbArray = CcAllocateVacbLevel(FALSE);
  1300. NextVacbArray[0] = (PVACB)SharedCacheMap->Vacbs;
  1301. ReferenceVacbLevel( SharedCacheMap, NextVacbArray, Level, 1, FALSE );
  1302. SharedCacheMap->Vacbs = NextVacbArray;
  1303. }
  1304. } else {
  1305. //
  1306. // We are now possesed of the additional problem that this level has no
  1307. // references but may have Bcb listheads due to the boundary case where
  1308. // we have expanded up to the multilevel Vacbs above. This level can't
  1309. // remain at the root and needs to be destroyed. What we need to do is
  1310. // replace it with one of our prefilled (non Bcb) levels and unlink the
  1311. // Bcb listheads in the old one.
  1312. //
  1313. if (Level == 1 && FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED)) {
  1314. PLIST_ENTRY PredecessorListHead, SuccessorListHead;
  1315. NextVacbArray = SharedCacheMap->Vacbs;
  1316. SharedCacheMap->Vacbs = CcAllocateVacbLevel(FALSE);
  1317. PredecessorListHead = ((PLIST_ENTRY)((PCHAR)NextVacbArray + VACB_LEVEL_BLOCK_SIZE))->Flink;
  1318. SuccessorListHead = ((PLIST_ENTRY)((PCHAR)NextVacbArray + (VACB_LEVEL_BLOCK_SIZE * 2) - sizeof(LIST_ENTRY)))->Blink;
  1319. PredecessorListHead->Blink = SuccessorListHead;
  1320. SuccessorListHead->Flink = PredecessorListHead;
  1321. CcDeallocateVacbLevel( NextVacbArray, TRUE );
  1322. }
  1323. }
  1324. //
  1325. // These two fields (Vacbs and SectionSize) must be changed while still
  1326. // holding the spinlock.
  1327. //
  1328. SharedCacheMap->SectionSize = NewSectionSize;
  1329. CcReleaseVacbLock( LockHandle.OldIrql );
  1330. }
  1331. //
  1332. // Make sure SectionSize gets updated. It is ok to fall through here
  1333. // without a spinlock, so long as either Vacbs was not changed, or it
  1334. // was changed together with SectionSize under the spinlock(s) above.
  1335. //
  1336. SharedCacheMap->SectionSize = NewSectionSize;
  1337. }
  1338. }
  1339. return STATUS_SUCCESS;
  1340. }
  1341. BOOLEAN
  1342. FASTCALL
  1343. CcUnmapVacbArray (
  1344. IN PSHARED_CACHE_MAP SharedCacheMap,
  1345. IN PLARGE_INTEGER FileOffset OPTIONAL,
  1346. IN ULONG Length,
  1347. IN BOOLEAN UnmapBehind
  1348. )
  1349. /*++
  1350. Routine Description:
  1351. This routine must be called to do any unmapping and associated
  1352. cleanup for a shared cache map, just before it is deleted.
  1353. Arguments:
  1354. SharedCacheMap - Supplies a pointer to the shared cache map
  1355. which is about to be deleted.
  1356. FileOffset - If supplied, only unmap the specified offset and length
  1357. Length - Completes range to unmap if FileOffset specified. If FileOffset
  1358. is specified, Length of 0 means unmap to the end of the section.
  1359. UnmapBehind - If this is a result of our unmap behind logic
  1360. Return Value:
  1361. FALSE -- if an the unmap was not done due to an active vacb
  1362. TRUE -- if the unmap was done
  1363. --*/
  1364. {
  1365. PVACB Vacb;
  1366. KIRQL OldIrql;
  1367. LARGE_INTEGER StartingFileOffset = {0,0};
  1368. LARGE_INTEGER EndingFileOffset = SharedCacheMap->SectionSize;
  1369. //
  1370. // We could be just cleaning up for error recovery.
  1371. //
  1372. if (SharedCacheMap->Vacbs == NULL) {
  1373. return TRUE;
  1374. }
  1375. //
  1376. // See if a range was specified. Align it to the VACB boundaries so it
  1377. // works in the loop below
  1378. //
  1379. if (ARGUMENT_PRESENT(FileOffset)) {
  1380. StartingFileOffset.QuadPart = ((FileOffset->QuadPart) & (~((LONGLONG)VACB_MAPPING_GRANULARITY - 1)));
  1381. if (Length != 0) {
  1382. EndingFileOffset.QuadPart = FileOffset->QuadPart + Length;
  1383. }
  1384. }
  1385. //
  1386. // Acquire the spin lock to
  1387. //
  1388. CcAcquireVacbLock( &OldIrql );
  1389. while (StartingFileOffset.QuadPart < EndingFileOffset.QuadPart) {
  1390. //
  1391. // Note that the caller with an explicit range may be off the
  1392. // end of the section (example CcPurgeCacheSection for cache
  1393. // coherency). That is the reason for the first part of the
  1394. // test below.
  1395. //
  1396. // Check the next cell once without the spin lock, it probably will
  1397. // not change, but we will handle it if it does not.
  1398. //
  1399. if ((StartingFileOffset.QuadPart < SharedCacheMap->SectionSize.QuadPart) &&
  1400. ((Vacb = GetVacb( SharedCacheMap, StartingFileOffset )) != NULL)) {
  1401. //
  1402. // Return here if we are unlucky and see an active
  1403. // Vacb. It could be Purge calling, and the Lazy Writer
  1404. // may have done a CcGetVirtualAddressIfMapped!
  1405. //
  1406. if (Vacb->Overlay.ActiveCount != 0) {
  1407. CcReleaseVacbLock( OldIrql );
  1408. return FALSE;
  1409. }
  1410. //
  1411. // Unlink it from the other SharedCacheMap, so the other
  1412. // guy will not try to use it when we free the spin lock.
  1413. //
  1414. SetVacb( SharedCacheMap, StartingFileOffset, NULL );
  1415. Vacb->SharedCacheMap = NULL;
  1416. //
  1417. // Increment the open count so that no one else will
  1418. // try to unmap or reuse until we are done.
  1419. //
  1420. Vacb->Overlay.ActiveCount += 1;
  1421. //
  1422. // Release the spin lock.
  1423. //
  1424. CcReleaseVacbLock( OldIrql );
  1425. //
  1426. // Unmap and free it if we really got it above.
  1427. //
  1428. CcUnmapVacb( Vacb, SharedCacheMap, UnmapBehind );
  1429. //
  1430. // Reacquire the spin lock so that we can decrment the count.
  1431. //
  1432. CcAcquireVacbLock( &OldIrql );
  1433. Vacb->Overlay.ActiveCount -= 1;
  1434. //
  1435. // Place this VACB at the head of the LRU
  1436. //
  1437. CcMoveVacbToReuseFree( Vacb );
  1438. }
  1439. StartingFileOffset.QuadPart = StartingFileOffset.QuadPart + VACB_MAPPING_GRANULARITY;
  1440. }
  1441. CcReleaseVacbLock( OldIrql );
  1442. CcDrainVacbLevelZone();
  1443. return TRUE;
  1444. }
  1445. ULONG
  1446. CcPrefillVacbLevelZone (
  1447. IN ULONG NumberNeeded,
  1448. OUT PKIRQL OldIrql,
  1449. IN ULONG NeedBcbListHeads
  1450. )
  1451. /*++
  1452. Routine Description:
  1453. This routine may be called to prefill the VacbLevelZone with the number of
  1454. entries required, and return with CcVacbSpinLock acquired. This approach is
  1455. taken so that the pool allocations and RtlZeroMemory calls can occur without
  1456. holding any spinlock, yet the caller may proceed to peform a single indivisible
  1457. operation without error handling, since there is a guaranteed minimum number of
  1458. entries in the zone.
  1459. Arguments:
  1460. NumberNeeded - Number of VacbLevel entries needed, not counting the possible
  1461. one with Bcb listheads.
  1462. OldIrql = supplies a pointer to where OldIrql should be returned upon acquiring
  1463. the spinlock.
  1464. NeedBcbListHeads - Supplies true if a level is also needed which contains listheads.
  1465. Return Value:
  1466. FALSE if the buffers could not be preallocated, TRUE otherwise.
  1467. Environment:
  1468. No spinlocks should be held upon entry.
  1469. --*/
  1470. {
  1471. PVACB *NextVacbArray;
  1472. CcAcquireVacbLock( OldIrql );
  1473. //
  1474. // Loop until there is enough entries, else return failure...
  1475. //
  1476. while ((NumberNeeded > CcVacbLevelEntries) ||
  1477. (NeedBcbListHeads && (CcVacbLevelWithBcbsFreeList == NULL))) {
  1478. //
  1479. // Else release the spinlock so we can do the allocate/zero.
  1480. //
  1481. CcReleaseVacbLock( *OldIrql );
  1482. //
  1483. // First handle the case where we need a VacbListHead with Bcb Listheads.
  1484. // The pointer test is unsafe but see below.
  1485. //
  1486. if (NeedBcbListHeads && (CcVacbLevelWithBcbsFreeList == NULL)) {
  1487. //
  1488. // Allocate and initialize the Vacb block for this level, and store its pointer
  1489. // back into our parent. We do not zero the listhead area.
  1490. //
  1491. NextVacbArray =
  1492. (PVACB *)ExAllocatePoolWithTag( NonPagedPool, (VACB_LEVEL_BLOCK_SIZE * 2) + sizeof(VACB_LEVEL_REFERENCE), 'lVcC' );
  1493. if (NextVacbArray == NULL) {
  1494. return FALSE;
  1495. }
  1496. RtlZeroMemory( (PCHAR)NextVacbArray, VACB_LEVEL_BLOCK_SIZE );
  1497. RtlZeroMemory( (PCHAR)NextVacbArray + (VACB_LEVEL_BLOCK_SIZE * 2), sizeof(VACB_LEVEL_REFERENCE) );
  1498. CcAcquireVacbLock( OldIrql );
  1499. NextVacbArray[0] = (PVACB)CcVacbLevelWithBcbsFreeList;
  1500. CcVacbLevelWithBcbsFreeList = NextVacbArray;
  1501. CcVacbLevelWithBcbsEntries += 1;
  1502. } else {
  1503. //
  1504. // Allocate and initialize the Vacb block for this level, and store its pointer
  1505. // back into our parent.
  1506. //
  1507. NextVacbArray =
  1508. (PVACB *)ExAllocatePoolWithTag( NonPagedPool, VACB_LEVEL_BLOCK_SIZE + sizeof(VACB_LEVEL_REFERENCE), 'lVcC' );
  1509. if (NextVacbArray == NULL) {
  1510. return FALSE;
  1511. }
  1512. RtlZeroMemory( (PCHAR)NextVacbArray, VACB_LEVEL_BLOCK_SIZE + sizeof(VACB_LEVEL_REFERENCE) );
  1513. CcAcquireVacbLock( OldIrql );
  1514. NextVacbArray[0] = (PVACB)CcVacbLevelFreeList;
  1515. CcVacbLevelFreeList = NextVacbArray;
  1516. CcVacbLevelEntries += 1;
  1517. }
  1518. }
  1519. return TRUE;
  1520. }
  1521. VOID
  1522. CcDrainVacbLevelZone (
  1523. )
  1524. /*++
  1525. Routine Description:
  1526. This routine should be called any time some entries have been deallocated to
  1527. the VacbLevel zone, and we want to insure the zone is returned to a normal level.
  1528. Arguments:
  1529. Return Value:
  1530. None.
  1531. Environment:
  1532. No spinlocks should be held upon entry.
  1533. --*/
  1534. {
  1535. KIRQL OldIrql;
  1536. PVACB *NextVacbArray;
  1537. //
  1538. // This is an unsafe loop to see if it looks like there is stuff to
  1539. // clean up.
  1540. //
  1541. while ((CcVacbLevelEntries > (CcMaxVacbLevelsSeen * 4)) ||
  1542. (CcVacbLevelWithBcbsEntries > 2)) {
  1543. //
  1544. // Now go in and try to pick up one entry to free under a FastLock.
  1545. //
  1546. NextVacbArray = NULL;
  1547. CcAcquireVacbLock( &OldIrql );
  1548. if (CcVacbLevelEntries > (CcMaxVacbLevelsSeen * 4)) {
  1549. NextVacbArray = CcVacbLevelFreeList;
  1550. CcVacbLevelFreeList = (PVACB *)NextVacbArray[0];
  1551. CcVacbLevelEntries -= 1;
  1552. } else if (CcVacbLevelWithBcbsEntries > 2) {
  1553. NextVacbArray = CcVacbLevelWithBcbsFreeList;
  1554. CcVacbLevelWithBcbsFreeList = (PVACB *)NextVacbArray[0];
  1555. CcVacbLevelWithBcbsEntries -= 1;
  1556. }
  1557. CcReleaseVacbLock( OldIrql );
  1558. //
  1559. // Since the loop is unsafe, we may not have gotten anything.
  1560. //
  1561. if (NextVacbArray != NULL) {
  1562. ExFreePool(NextVacbArray);
  1563. }
  1564. }
  1565. }
  1566. PLIST_ENTRY
  1567. CcGetBcbListHeadLargeOffset (
  1568. IN PSHARED_CACHE_MAP SharedCacheMap,
  1569. IN LONGLONG FileOffset,
  1570. IN BOOLEAN FailToSuccessor
  1571. )
  1572. /*++
  1573. Routine Description:
  1574. This routine may be called to return the Bcb listhead for the specified FileOffset.
  1575. It should only be called if the SectionSize is greater than VACB_SIZE_OF_FIRST_LEVEL.
  1576. Arguments:
  1577. SharedCacheMap - Supplies the pointer to the SharedCacheMap for which the listhead
  1578. is desired.
  1579. FileOffset - Supplies the fileOffset corresponding to the desired listhead.
  1580. FailToSuccessor - Instructs whether not finding the exact listhead should cause us to
  1581. return the predecessor or successor Bcb listhead.
  1582. Return Value:
  1583. Returns the desired Listhead pointer. If the desired listhead does not actually exist
  1584. yet, then it returns the appropriate listhead.
  1585. Environment:
  1586. The BcbSpinlock should be held on entry.
  1587. --*/
  1588. {
  1589. ULONG Level, Shift;
  1590. PVACB *VacbArray, *NextVacbArray;
  1591. ULONG Index;
  1592. ULONG SavedIndexes[VACB_NUMBER_OF_LEVELS];
  1593. PVACB *SavedVacbArrays[VACB_NUMBER_OF_LEVELS];
  1594. ULONG SavedLevels = 0;
  1595. //
  1596. // Initialize variables controlling our descent into the hierarchy.
  1597. //
  1598. Level = 0;
  1599. Shift = VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT;
  1600. VacbArray = SharedCacheMap->Vacbs;
  1601. //
  1602. // Caller must have verified that we have a hierarchy, otherwise this routine
  1603. // would fail.
  1604. //
  1605. ASSERT(SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL);
  1606. //
  1607. // Loop to calculate how many levels we have and how much we have to
  1608. // shift to index into the first level.
  1609. //
  1610. do {
  1611. Level += 1;
  1612. Shift += VACB_LEVEL_SHIFT;
  1613. } while (SharedCacheMap->SectionSize.QuadPart > ((LONGLONG)1 << Shift));
  1614. //
  1615. // Our caller could be asking for an offset off the end of section size, so if he
  1616. // is actually off the size of the level, then return the main listhead.
  1617. //
  1618. if (FileOffset >= ((LONGLONG)1 << Shift)) {
  1619. return &SharedCacheMap->BcbList;
  1620. }
  1621. //
  1622. // Now descend the tree to the bottom level to get the caller's Bcb ListHead.
  1623. //
  1624. Shift -= VACB_LEVEL_SHIFT;
  1625. do {
  1626. //
  1627. // Decrement back to the level that describes the size we are within.
  1628. //
  1629. Level -= 1;
  1630. //
  1631. // Calculate the index into the Vacb block for this level.
  1632. //
  1633. Index = (ULONG)(FileOffset >> Shift);
  1634. ASSERT(Index <= VACB_LAST_INDEX_FOR_LEVEL);
  1635. //
  1636. // Get block address for next level.
  1637. //
  1638. NextVacbArray = (PVACB *)VacbArray[Index];
  1639. //
  1640. // If it is NULL then we have to go find the highest Bcb or listhead which
  1641. // comes before the guy we are looking for, i.e., its predecessor.
  1642. //
  1643. if (NextVacbArray == NULL) {
  1644. //
  1645. // Back up to look for the highest guy earlier in this tree, i.e., the
  1646. // predecessor listhead.
  1647. //
  1648. while (TRUE) {
  1649. //
  1650. // Scan, if we can, in the current array for a non-null index.
  1651. //
  1652. if (FailToSuccessor) {
  1653. if (Index != VACB_LAST_INDEX_FOR_LEVEL) {
  1654. while ((Index != VACB_LAST_INDEX_FOR_LEVEL) && (VacbArray[++Index] == NULL)) {
  1655. continue;
  1656. }
  1657. //
  1658. // If we found a non-null index, get out and try to return the
  1659. // listhead.
  1660. //
  1661. if ((NextVacbArray = (PVACB *)VacbArray[Index]) != NULL) {
  1662. break;
  1663. }
  1664. }
  1665. } else {
  1666. if (Index != 0) {
  1667. while ((Index != 0) && (VacbArray[--Index] == NULL)) {
  1668. continue;
  1669. }
  1670. //
  1671. // If we found a non-null index, get out and try to return the
  1672. // listhead.
  1673. //
  1674. if ((NextVacbArray = (PVACB *)VacbArray[Index]) != NULL) {
  1675. break;
  1676. }
  1677. }
  1678. }
  1679. //
  1680. // If there are no saved levels yet, then there is no predecessor or
  1681. // successor - it is the main listhead.
  1682. //
  1683. if (SavedLevels == 0) {
  1684. return &SharedCacheMap->BcbList;
  1685. }
  1686. //
  1687. // Otherwise, we can pop up a level in the tree and start scanning
  1688. // from that guy for a path to the right listhead.
  1689. //
  1690. Level += 1;
  1691. Index = SavedIndexes[--SavedLevels];
  1692. VacbArray = SavedVacbArrays[SavedLevels];
  1693. }
  1694. //
  1695. // We have backed up in the hierarchy, so now we are just looking for the
  1696. // highest/lowest guy in the level we want, i.e., the level-linking listhead.
  1697. // So smash FileOffset accordingly (we mask the high bits out anyway).
  1698. //
  1699. if (FailToSuccessor) {
  1700. FileOffset = 0;
  1701. } else {
  1702. FileOffset = MAXLONGLONG;
  1703. }
  1704. }
  1705. //
  1706. // We save Index and VacbArray at each level, for the case that we
  1707. // have to walk back up the tree to find a predecessor.
  1708. //
  1709. SavedIndexes[SavedLevels] = Index;
  1710. SavedVacbArrays[SavedLevels] = VacbArray;
  1711. SavedLevels += 1;
  1712. //
  1713. // Now make this one our current pointer, and mask away the extraneous high-order
  1714. // FileOffset bits for this level.
  1715. //
  1716. VacbArray = NextVacbArray;
  1717. FileOffset &= ((LONGLONG)1 << Shift) - 1;
  1718. Shift -= VACB_LEVEL_SHIFT;
  1719. //
  1720. // Loop until we hit the bottom level.
  1721. //
  1722. } while (Level != 0);
  1723. //
  1724. // Now calculate the index for the bottom level and return the appropriate listhead.
  1725. // (The normal Vacb index indexes to a pointer to a Vacb for a .25MB view, so dropping
  1726. // the low bit gets you to the even-indexed Vacb pointer which is one block size below
  1727. // the two-pointer listhead for the Bcbs for that .5MB range...)
  1728. //
  1729. Index = (ULONG)(FileOffset >> Shift);
  1730. return (PLIST_ENTRY)((PCHAR)&VacbArray[Index & ~1] + VACB_LEVEL_BLOCK_SIZE);
  1731. }
  1732. VOID
  1733. CcAdjustVacbLevelLockCount (
  1734. IN PSHARED_CACHE_MAP SharedCacheMap,
  1735. IN LONGLONG FileOffset,
  1736. IN LONG Adjustment
  1737. )
  1738. /*++
  1739. Routine Description:
  1740. This routine may be called to adjust the lock count of the bottom Vacb level when
  1741. Bcbs are inserted or deleted. If the count goes to zero, the level will be
  1742. eliminated. The bottom level must exist, or we crash!
  1743. Arguments:
  1744. SharedCacheMap - Supplies the pointer to the SharedCacheMap for which the Vacb
  1745. is desired.
  1746. FileOffset - Supplies the fileOffset corresponding to the desired Vacb.
  1747. Adjustment - Generally -1 or +1.
  1748. Return Value:
  1749. None.
  1750. Environment:
  1751. CcVacbSpinLock should be held on entry.
  1752. --*/
  1753. {
  1754. ULONG Level, Shift;
  1755. PVACB *VacbArray;
  1756. LONGLONG OriginalFileOffset = FileOffset;
  1757. //
  1758. // Initialize variables controlling our descent into the hierarchy.
  1759. //
  1760. Level = 0;
  1761. Shift = VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT;
  1762. VacbArray = SharedCacheMap->Vacbs;
  1763. //
  1764. // Caller must have verified that we have a hierarchy, otherwise this routine
  1765. // would fail.
  1766. //
  1767. ASSERT(SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL);
  1768. //
  1769. // Loop to calculate how many levels we have and how much we have to
  1770. // shift to index into the first level.
  1771. //
  1772. do {
  1773. Level += 1;
  1774. Shift += VACB_LEVEL_SHIFT;
  1775. } while (SharedCacheMap->SectionSize.QuadPart > ((LONGLONG)1 << Shift));
  1776. //
  1777. // Now descend the tree to the bottom level to get the caller's Vacb.
  1778. //
  1779. Shift -= VACB_LEVEL_SHIFT;
  1780. do {
  1781. VacbArray = (PVACB *)VacbArray[(ULONG)(FileOffset >> Shift)];
  1782. Level -= 1;
  1783. FileOffset &= ((LONGLONG)1 << Shift) - 1;
  1784. Shift -= VACB_LEVEL_SHIFT;
  1785. } while (Level != 0);
  1786. //
  1787. // Now we have reached the final level, do the adjustment.
  1788. //
  1789. ReferenceVacbLevel( SharedCacheMap, VacbArray, Level, Adjustment, FALSE );
  1790. //
  1791. // Now, if we decremented the count to 0, then force the collapse to happen by
  1792. // upping count and resetting to NULL. Then smash OriginalFileOffset to be
  1793. // the first entry so we do not recalculate!
  1794. //
  1795. if (!IsVacbLevelReferenced( SharedCacheMap, VacbArray, Level )) {
  1796. ReferenceVacbLevel( SharedCacheMap, VacbArray, Level, 1, TRUE );
  1797. OriginalFileOffset &= ~(VACB_SIZE_OF_FIRST_LEVEL - 1);
  1798. CcSetVacbLargeOffset( SharedCacheMap, OriginalFileOffset, VACB_SPECIAL_DEREFERENCE );
  1799. }
  1800. }
  1801. VOID
  1802. CcCalculateVacbLevelLockCount (
  1803. IN PSHARED_CACHE_MAP SharedCacheMap,
  1804. IN PVACB *VacbArray,
  1805. IN ULONG Level
  1806. )
  1807. /*++
  1808. Routine Description:
  1809. This routine may be called to calculate or recalculate the lock count on a
  1810. given Vacb level array. It is called, for example, when we are extending a
  1811. section up to the point where we activate multilevel logic and want to start
  1812. keeping the count.
  1813. Arguments:
  1814. SharedCacheMap - Supplies the pointer to the SharedCacheMap for which the Vacb
  1815. is desired.
  1816. VacbArray - The Vacb Level array to recalculate
  1817. Level - Supplies 0 for the bottom level, nonzero otherwise.
  1818. Return Value:
  1819. None.
  1820. Environment:
  1821. CcVacbSpinLock should be held on entry.
  1822. --*/
  1823. {
  1824. PBCB Bcb;
  1825. ULONG Index;
  1826. LONG Count = 0;
  1827. PVACB *VacbTemp = VacbArray;
  1828. PVACB_LEVEL_REFERENCE VacbReference;
  1829. //
  1830. // First loop through to count how many Vacb pointers are in use.
  1831. //
  1832. for (Index = 0; Index <= VACB_LAST_INDEX_FOR_LEVEL; Index++) {
  1833. if (*(VacbTemp++) != NULL) {
  1834. Count += 1;
  1835. }
  1836. }
  1837. //
  1838. // If this is a metadata stream, we also have to count the Bcbs in the
  1839. // corresponding listheads.
  1840. //
  1841. if (FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) && (Level == 0)) {
  1842. //
  1843. // Pick up the Blink of the first listhead, casting it to a Bcb.
  1844. //
  1845. Bcb = (PBCB)CONTAINING_RECORD(((PLIST_ENTRY)VacbTemp)->Blink, BCB, BcbLinks);
  1846. Index = 0;
  1847. //
  1848. // Now loop through the list. For each Bcb we see, increment the count,
  1849. // and for each listhead, increment Index. We are done when we hit the
  1850. // last listhead, which is actually the next listhead past the ones in this
  1851. // block.
  1852. //
  1853. do {
  1854. if (Bcb->NodeTypeCode == CACHE_NTC_BCB) {
  1855. Count += 1;
  1856. } else {
  1857. Index += 1;
  1858. }
  1859. Bcb = (PBCB)CONTAINING_RECORD(Bcb->BcbLinks.Blink, BCB, BcbLinks);
  1860. } while (Index <= (VACB_LAST_INDEX_FOR_LEVEL / 2));
  1861. }
  1862. //
  1863. // Store the count and get out... (by hand, don't touch the special count)
  1864. //
  1865. VacbReference = VacbLevelReference( SharedCacheMap, VacbArray, Level );
  1866. VacbReference->Reference = Count;
  1867. }
  1868. PVACB
  1869. CcGetVacbLargeOffset (
  1870. IN PSHARED_CACHE_MAP SharedCacheMap,
  1871. IN LONGLONG FileOffset
  1872. )
  1873. /*++
  1874. Routine Description:
  1875. This routine may be called to return the Vacb for the specified FileOffset.
  1876. It should only be called if the SectionSize is greater than VACB_SIZE_OF_FIRST_LEVEL.
  1877. Arguments:
  1878. SharedCacheMap - Supplies the pointer to the SharedCacheMap for which the Vacb
  1879. is desired.
  1880. FileOffset - Supplies the fileOffset corresponding to the desired Vacb.
  1881. Return Value:
  1882. Returns the desired Vacb pointer or NULL if there is none.
  1883. Environment:
  1884. CcVacbSpinLock should be held on entry.
  1885. --*/
  1886. {
  1887. ULONG Level, Shift;
  1888. PVACB *VacbArray;
  1889. PVACB Vacb;
  1890. //
  1891. // Initialize variables controlling our descent into the hierarchy.
  1892. //
  1893. Level = 0;
  1894. Shift = VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT;
  1895. VacbArray = SharedCacheMap->Vacbs;
  1896. //
  1897. // Caller must have verified that we have a hierarchy, otherwise this routine
  1898. // would fail.
  1899. //
  1900. ASSERT(SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL);
  1901. //
  1902. // Loop to calculate how many levels we have and how much we have to
  1903. // shift to index into the first level.
  1904. //
  1905. do {
  1906. Level += 1;
  1907. Shift += VACB_LEVEL_SHIFT;
  1908. } while (SharedCacheMap->SectionSize.QuadPart > ((LONGLONG)1 << Shift));
  1909. //
  1910. // Now descend the tree to the bottom level to get the caller's Vacb.
  1911. //
  1912. Shift -= VACB_LEVEL_SHIFT;
  1913. while (((Vacb = (PVACB)VacbArray[FileOffset >> Shift]) != NULL) && (Level != 0)) {
  1914. Level -= 1;
  1915. VacbArray = (PVACB *)Vacb;
  1916. FileOffset &= ((LONGLONG)1 << Shift) - 1;
  1917. Shift -= VACB_LEVEL_SHIFT;
  1918. }
  1919. //
  1920. // If the Vacb we exited with is not NULL, we want to make sure it looks OK.
  1921. //
  1922. ASSERT(Vacb == NULL || ((Vacb >= CcVacbs) && (Vacb < CcBeyondVacbs)));
  1923. return Vacb;
  1924. }
  1925. VOID
  1926. CcSetVacbLargeOffset (
  1927. IN PSHARED_CACHE_MAP SharedCacheMap,
  1928. IN LONGLONG FileOffset,
  1929. IN PVACB Vacb
  1930. )
  1931. /*++
  1932. Routine Description:
  1933. This routine may be called to set the specified Vacb pointer for the specified FileOffset.
  1934. It should only be called if the SectionSize is greater than VACB_SIZE_OF_FIRST_LEVEL.
  1935. For non-null Vacb, intermediate Vacb levels will be added as necessary, and if the lowest
  1936. level has Bcb listheads, these will also be added. For this case the caller must acquire
  1937. the spinlock by calling CcPrefillVacbLevelZone specifying the worst-case number of levels
  1938. required.
  1939. For a null Vacb pointer, the tree is pruned of all Vacb levels that go empty. If the lowest
  1940. level has Bcb listheads, then they are removed. The caller should subsequently call
  1941. CcDrainVacbLevelZone once the spinlock is release to actually free some of this zone to the
  1942. pool.
  1943. Arguments:
  1944. SharedCacheMap - Supplies the pointer to the SharedCacheMap for which the Vacb
  1945. is desired.
  1946. FileOffset - Supplies the fileOffset corresponding to the desired Vacb.
  1947. Return Value:
  1948. Returns the desired Vacb pointer or NULL if there is none.
  1949. Environment:
  1950. CcVacbSpinLock should be held on entry.
  1951. --*/
  1952. {
  1953. ULONG Level, Shift;
  1954. PVACB *VacbArray, *NextVacbArray;
  1955. ULONG Index;
  1956. ULONG SavedIndexes[VACB_NUMBER_OF_LEVELS];
  1957. PVACB *SavedVacbArrays[VACB_NUMBER_OF_LEVELS];
  1958. PLIST_ENTRY PredecessorListHead, SuccessorListHead, CurrentListHead;
  1959. LOGICAL AllocatingBcbListHeads, Special = FALSE;
  1960. LONGLONG OriginalFileOffset = FileOffset;
  1961. ULONG SavedLevels = 0;
  1962. //
  1963. // Initialize variables controlling our descent into the hierarchy.
  1964. //
  1965. Level = 0;
  1966. Shift = VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT;
  1967. VacbArray = SharedCacheMap->Vacbs;
  1968. //
  1969. // Caller must have verified that we have a hierarchy, otherwise this routine
  1970. // would fail.
  1971. //
  1972. ASSERT(SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL);
  1973. //
  1974. // Loop to calculate how many levels we have and how much we have to
  1975. // shift to index into the first level.
  1976. //
  1977. do {
  1978. Level += 1;
  1979. Shift += VACB_LEVEL_SHIFT;
  1980. } while (SharedCacheMap->SectionSize.QuadPart > ((LONGLONG)1 << Shift));
  1981. //
  1982. // Now descend the tree to the bottom level to set the caller's Vacb.
  1983. //
  1984. Shift -= VACB_LEVEL_SHIFT;
  1985. do {
  1986. //
  1987. // Decrement back to the level that describes the size we are within.
  1988. //
  1989. Level -= 1;
  1990. //
  1991. // Calculate the index into the Vacb block for this level.
  1992. //
  1993. Index = (ULONG)(FileOffset >> Shift);
  1994. ASSERT(Index <= VACB_LAST_INDEX_FOR_LEVEL);
  1995. //
  1996. // We save Index and VacbArray at each level, for the case that we
  1997. // are collapsing and deallocating blocks below.
  1998. //
  1999. SavedIndexes[SavedLevels] = Index;
  2000. SavedVacbArrays[SavedLevels] = VacbArray;
  2001. SavedLevels += 1;
  2002. //
  2003. // Get block address for next level.
  2004. //
  2005. NextVacbArray = (PVACB *)VacbArray[Index];
  2006. //
  2007. // If it is NULL then we have to allocate the next level to fill it in.
  2008. //
  2009. if (NextVacbArray == NULL) {
  2010. //
  2011. // We better not be thinking we're dereferencing a level if the level
  2012. // doesn't currently exist.
  2013. //
  2014. ASSERT( Vacb != VACB_SPECIAL_DEREFERENCE );
  2015. AllocatingBcbListHeads = FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) && (Level == 0);
  2016. //
  2017. // This is only valid if we are setting a nonzero pointer!
  2018. //
  2019. ASSERT(Vacb != NULL);
  2020. NextVacbArray = CcAllocateVacbLevel(AllocatingBcbListHeads);
  2021. //
  2022. // If we allocated Bcb Listheads, we must link them in.
  2023. //
  2024. if (AllocatingBcbListHeads) {
  2025. ULONG i;
  2026. //
  2027. // Find our predecessor.
  2028. //
  2029. PredecessorListHead = CcGetBcbListHeadLargeOffset( SharedCacheMap, OriginalFileOffset, FALSE );
  2030. //
  2031. // If he is followed by any Bcbs, they "belong" to him, and we have to
  2032. // skip over them.
  2033. //
  2034. while (((PBCB)CONTAINING_RECORD(PredecessorListHead->Blink, BCB, BcbLinks))->NodeTypeCode ==
  2035. CACHE_NTC_BCB) {
  2036. PredecessorListHead = (PLIST_ENTRY)PredecessorListHead->Blink;
  2037. }
  2038. //
  2039. // Point to the first newly allocated listhead.
  2040. //
  2041. CurrentListHead = (PLIST_ENTRY)((PCHAR)NextVacbArray + VACB_LEVEL_BLOCK_SIZE);
  2042. //
  2043. // Link first new listhead to predecessor.
  2044. //
  2045. SuccessorListHead = PredecessorListHead->Blink;
  2046. PredecessorListHead->Blink = CurrentListHead;
  2047. CurrentListHead->Flink = PredecessorListHead;
  2048. //
  2049. // Now loop to link all of the new listheads together.
  2050. //
  2051. for (i = 0; i < ((VACB_LEVEL_BLOCK_SIZE / sizeof(LIST_ENTRY) - 1)); i++) {
  2052. CurrentListHead->Blink = CurrentListHead + 1;
  2053. CurrentListHead += 1;
  2054. CurrentListHead->Flink = CurrentListHead - 1;
  2055. }
  2056. //
  2057. // Finally link the last new listhead to the successor.
  2058. //
  2059. CurrentListHead->Blink = SuccessorListHead;
  2060. SuccessorListHead->Flink = CurrentListHead;
  2061. }
  2062. VacbArray[Index] = (PVACB)NextVacbArray;
  2063. //
  2064. // Increment the reference count. Note that Level right now properly indicates
  2065. // what level NextVacbArray is at, not VacbArray.
  2066. //
  2067. ReferenceVacbLevel( SharedCacheMap, VacbArray, Level + 1, 1, FALSE );
  2068. }
  2069. //
  2070. // Now make this one our current pointer, and mask away the extraneous high-order
  2071. // FileOffset bits for this level and reduce the shift count.
  2072. //
  2073. VacbArray = NextVacbArray;
  2074. FileOffset &= ((LONGLONG)1 << Shift) - 1;
  2075. Shift -= VACB_LEVEL_SHIFT;
  2076. //
  2077. // Loop until we hit the bottom level.
  2078. //
  2079. } while (Level != 0);
  2080. if (Vacb < VACB_SPECIAL_FIRST_VALID) {
  2081. //
  2082. // Now calculate the index for the bottom level and store the caller's Vacb pointer.
  2083. //
  2084. Index = (ULONG)(FileOffset >> Shift);
  2085. VacbArray[Index] = Vacb;
  2086. //
  2087. // Handle the special actions.
  2088. //
  2089. } else {
  2090. Special = TRUE;
  2091. //
  2092. // Induce the dereference.
  2093. //
  2094. if (Vacb == VACB_SPECIAL_DEREFERENCE) {
  2095. Vacb = NULL;
  2096. }
  2097. }
  2098. //
  2099. // If he is storing a nonzero pointer, just reference the level.
  2100. //
  2101. if (Vacb != NULL) {
  2102. ASSERT( !(Special && Level != 0) );
  2103. ReferenceVacbLevel( SharedCacheMap, VacbArray, Level, 1, Special );
  2104. //
  2105. // Otherwise we are storing a NULL pointer, and we have to see if we can collapse
  2106. // the tree by deallocating empty blocks of pointers.
  2107. //
  2108. } else {
  2109. //
  2110. // Loop until doing all possible collapse except for the top level.
  2111. //
  2112. while (TRUE) {
  2113. ReferenceVacbLevel( SharedCacheMap, VacbArray, Level, -1, Special );
  2114. //
  2115. // If this was a special dereference, then recognize that this was
  2116. // the only one. The rest, as we tear up the tree, are regular
  2117. // (calculable) references.
  2118. //
  2119. Special = FALSE;
  2120. //
  2121. // Now, if we have an empty block (other than the top one), then we should free the
  2122. // block and keep looping.
  2123. //
  2124. if (!IsVacbLevelReferenced( SharedCacheMap, VacbArray, Level ) && (SavedLevels != 0)) {
  2125. SavedLevels -= 1;
  2126. //
  2127. // First see if we have Bcb Listheads to delete and if so, we have to unlink
  2128. // the whole block first.
  2129. //
  2130. AllocatingBcbListHeads = FALSE;
  2131. if ((Level++ == 0) && FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED)) {
  2132. AllocatingBcbListHeads = TRUE;
  2133. PredecessorListHead = ((PLIST_ENTRY)((PCHAR)VacbArray + VACB_LEVEL_BLOCK_SIZE))->Flink;
  2134. SuccessorListHead = ((PLIST_ENTRY)((PCHAR)VacbArray + (VACB_LEVEL_BLOCK_SIZE * 2) - sizeof(LIST_ENTRY)))->Blink;
  2135. PredecessorListHead->Blink = SuccessorListHead;
  2136. SuccessorListHead->Flink = PredecessorListHead;
  2137. }
  2138. //
  2139. // Free the unused block and then pick up the saved parent pointer array and
  2140. // index and erase the pointer to this block.
  2141. //
  2142. CcDeallocateVacbLevel( VacbArray, AllocatingBcbListHeads );
  2143. Index = SavedIndexes[SavedLevels];
  2144. VacbArray = SavedVacbArrays[SavedLevels];
  2145. VacbArray[Index] = NULL;
  2146. //
  2147. // No more collapsing if we hit a block that still has pointers, or we hit the root.
  2148. //
  2149. } else {
  2150. break;
  2151. }
  2152. }
  2153. }
  2154. }
  2155. VOID
  2156. CcGetActiveVacb (
  2157. IN PSHARED_CACHE_MAP SharedCacheMap,
  2158. OUT PVACB *Vacb,
  2159. OUT PULONG Page,
  2160. OUT PULONG Dirty
  2161. )
  2162. /*++
  2163. Routine Description:
  2164. This routine retrieves and clears the active page hint from a shared cache map.
  2165. Originally, this routine is a macro. To reduce the nonpaged footprint of the
  2166. system we want to page as much as possible, and it turns out this was the only
  2167. reason a substantial part of the cache manager wasn't.
  2168. Arguments:
  2169. SharedCacheMap - Supplies the pointer to the SharedCacheMap for which the active
  2170. Vacb is desired.
  2171. Vacb - Receives the active Vacb
  2172. Page - Receives the active Page #
  2173. Dirty - Receives ACTIVE_PAGE_IS_DIRTY if the page has dirty data
  2174. Return Value:
  2175. None.
  2176. Environment:
  2177. Passive.
  2178. --*/
  2179. {
  2180. KIRQL Irql;
  2181. ExAcquireFastLock(&SharedCacheMap->ActiveVacbSpinLock, &Irql);
  2182. *Vacb = SharedCacheMap->ActiveVacb;
  2183. if (*Vacb != NULL) {
  2184. *Page = SharedCacheMap->ActivePage;
  2185. SharedCacheMap->ActiveVacb = NULL;
  2186. *Dirty = SharedCacheMap->Flags & ACTIVE_PAGE_IS_DIRTY;
  2187. }
  2188. ExReleaseFastLock(&SharedCacheMap->ActiveVacbSpinLock, Irql);
  2189. }
  2190. VOID
  2191. CcSetActiveVacb (
  2192. IN PSHARED_CACHE_MAP SharedCacheMap,
  2193. IN OUT PVACB *Vacb,
  2194. IN ULONG Page,
  2195. IN ULONG Dirty
  2196. )
  2197. /*++
  2198. Routine Description:
  2199. This routine sets the active page hint for a shared cache map.
  2200. Originally, this routine is a macro. To reduce the nonpaged footprint of the
  2201. system we want to page as much as possible, and it turns out this was the only
  2202. reason a substantial part of the cache manager wasn't.
  2203. Arguments:
  2204. SharedCacheMap - Supplies the pointer to the SharedCacheMap for which the active
  2205. Vacb is desired.
  2206. Vacb - Supplies the new active Vacb
  2207. Page - Supplies the new active Page #
  2208. Dirty - Supplies ACTIVE_PAGE_IS_DIRTY if the page has dirty data
  2209. Return Value:
  2210. None.
  2211. Environment:
  2212. Passive.
  2213. --*/
  2214. {
  2215. KIRQL Irql;
  2216. //
  2217. // When setting dirty, when we set ACTIVE_PAGE_IS_DIRTY the first time,
  2218. // we increment the dirty counts, and they never get decremented until
  2219. // CcFreeActiveVacb. If we are trying to set and there is already an
  2220. // active Vacb *or* we are trying to set a clean one and the flag above
  2221. // is set, we do not allow it, and we just free the vacb (we only want
  2222. // to handle the clean transition in one place).
  2223. //
  2224. // MP & UP cases are separately defined, because I do not trust the compiler
  2225. // to otherwise generate the optimal UP code.
  2226. //
  2227. //
  2228. // In the MP case, we test if we are setting the page dirty, because then
  2229. // we must acquire CcMasterSpinLock to diddle CcDirtyPages.
  2230. //
  2231. //
  2232. // In the UP case, any FastLock will do, so we just use the ActiveVacb lock, and do not
  2233. // explicitly acquire CcMasterSpinLock.
  2234. //
  2235. #if !defined(NT_UP)
  2236. if (Dirty) {
  2237. CcAcquireMasterLock(&Irql);
  2238. ExAcquireSpinLockAtDpcLevel(&SharedCacheMap->ActiveVacbSpinLock);
  2239. } else {
  2240. ExAcquireSpinLock(&SharedCacheMap->ActiveVacbSpinLock, &Irql);
  2241. }
  2242. #else
  2243. ExAcquireFastLock(&SharedCacheMap->ActiveVacbSpinLock, &Irql);
  2244. #endif
  2245. do {
  2246. if (SharedCacheMap->ActiveVacb == NULL) {
  2247. if ((SharedCacheMap->Flags & ACTIVE_PAGE_IS_DIRTY) != Dirty) {
  2248. if (Dirty) {
  2249. SharedCacheMap->ActiveVacb = *Vacb;
  2250. SharedCacheMap->ActivePage = Page;
  2251. *Vacb = NULL;
  2252. SetFlag(SharedCacheMap->Flags, ACTIVE_PAGE_IS_DIRTY);
  2253. CcTotalDirtyPages += 1;
  2254. SharedCacheMap->DirtyPages += 1;
  2255. if (SharedCacheMap->DirtyPages == 1) {
  2256. PLIST_ENTRY Blink;
  2257. PLIST_ENTRY Entry;
  2258. PLIST_ENTRY Flink;
  2259. PLIST_ENTRY Head;
  2260. Entry = &SharedCacheMap->SharedCacheMapLinks;
  2261. Blink = Entry->Blink;
  2262. Flink = Entry->Flink;
  2263. Blink->Flink = Flink;
  2264. Flink->Blink = Blink;
  2265. Head = &CcDirtySharedCacheMapList.SharedCacheMapLinks;
  2266. Blink = Head->Blink;
  2267. Entry->Flink = Head;
  2268. Entry->Blink = Blink;
  2269. Blink->Flink = Entry;
  2270. Head->Blink = Entry;
  2271. if (!LazyWriter.ScanActive) {
  2272. LazyWriter.ScanActive = TRUE;
  2273. #if !defined(NT_UP)
  2274. ExReleaseSpinLockFromDpcLevel(&SharedCacheMap->ActiveVacbSpinLock);
  2275. CcReleaseMasterLock(Irql);
  2276. #else
  2277. ExReleaseFastLock(&SharedCacheMap->ActiveVacbSpinLock, Irql);
  2278. #endif
  2279. KeSetTimer( &LazyWriter.ScanTimer,
  2280. CcFirstDelay,
  2281. &LazyWriter.ScanDpc );
  2282. break;
  2283. }
  2284. }
  2285. }
  2286. } else {
  2287. SharedCacheMap->ActiveVacb = *Vacb;
  2288. SharedCacheMap->ActivePage = Page;
  2289. *Vacb = NULL;
  2290. }
  2291. }
  2292. #if !defined(NT_UP)
  2293. if (Dirty) {
  2294. ExReleaseSpinLockFromDpcLevel(&SharedCacheMap->ActiveVacbSpinLock);
  2295. CcReleaseMasterLock(Irql);
  2296. } else {
  2297. ExReleaseSpinLock(&SharedCacheMap->ActiveVacbSpinLock, Irql);
  2298. }
  2299. #else
  2300. ExReleaseFastLock(&SharedCacheMap->ActiveVacbSpinLock, Irql);
  2301. #endif
  2302. if (*Vacb != NULL) {
  2303. CcFreeActiveVacb( SharedCacheMap, *Vacb, Page, Dirty);
  2304. }
  2305. } while (FALSE);
  2306. }