Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3987 lines
122 KiB

  1. /*++
  2. Copyright (c) 1990 Microsoft Corporation
  3. Module Name:
  4. fssup.c
  5. Abstract:
  6. This module implements the File System support routines for the
  7. Cache subsystem.
  8. Author:
  9. Tom Miller [TomM] 4-May-1990
  10. Revision History:
  11. --*/
  12. #include "cc.h"
  13. //
  14. // The Bug check file id for this module
  15. //
  16. #define BugCheckFileId (CACHE_BUG_CHECK_FSSUP)
  17. //
  18. // Define our debug constant
  19. //
  20. #define me 0x00000001
  21. //
  22. // For your debugging pleasure, if the flag doesn't move! (Currently not used)
  23. //
  24. #define IsSyscacheFile(FO) (((FO) != NULL) && \
  25. (*(PUSHORT)(FO)->FsContext == 0X705) && \
  26. FlagOn(*(PULONG)((PCHAR)(FO)->FsContext + 0x48), 0x80000000))
  27. extern POBJECT_TYPE IoFileObjectType;
  28. extern ULONG MmLargeSystemCache;
  29. VOID
  30. CcUnmapAndPurge(
  31. IN PSHARED_CACHE_MAP SharedCacheMap
  32. );
  33. VOID
  34. CcDeleteMbcb(
  35. IN PSHARED_CACHE_MAP SharedCacheMap
  36. );
  37. VOID
  38. CcDeleteBcbs (
  39. IN PSHARED_CACHE_MAP SharedCacheMap
  40. );
  41. VOID
  42. CcPurgeAndClearCacheSection (
  43. IN PSHARED_CACHE_MAP SharedCacheMap,
  44. IN PLARGE_INTEGER FileOffset
  45. );
  46. #ifdef ALLOC_PRAGMA
  47. #pragma alloc_text(INIT,CcInitializeCacheManager)
  48. #pragma alloc_text(PAGE,CcZeroData)
  49. #endif
  50. BOOLEAN
  51. CcInitializeCacheManager (
  52. )
  53. /*++
  54. Routine Description:
  55. This routine must be called during system initialization before the
  56. first call to any file system, to allow the Cache Manager to initialize
  57. its global data structures. This routine has no dependencies on other
  58. system components being initialized.
  59. Arguments:
  60. None
  61. Return Value:
  62. TRUE if initialization was successful
  63. --*/
  64. {
  65. CLONG i;
  66. ULONG Index;
  67. PGENERAL_LOOKASIDE Lookaside;
  68. USHORT NumberOfItems;
  69. PKPRCB Prcb;
  70. PWORK_QUEUE_ITEM WorkItem;
  71. #ifdef CCDBG_LOCK
  72. KeInitializeSpinLock( &CcDebugTraceLock );
  73. #endif
  74. #if DBG
  75. CcBcbCount = 0;
  76. InitializeListHead( &CcBcbList );
  77. #endif
  78. //
  79. // Figure out the timeout clock tick for the lazy writer.
  80. //
  81. CcIdleDelayTick = LAZY_WRITER_IDLE_DELAY / KeQueryTimeIncrement();
  82. //
  83. // Initialize shared cache map list structures
  84. //
  85. InitializeListHead( &CcCleanSharedCacheMapList );
  86. InitializeListHead( &CcDirtySharedCacheMapList.SharedCacheMapLinks );
  87. CcDirtySharedCacheMapList.Flags = IS_CURSOR;
  88. InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks,
  89. &CcLazyWriterCursor.SharedCacheMapLinks );
  90. CcLazyWriterCursor.Flags = IS_CURSOR;
  91. //
  92. // Initialize worker thread structures
  93. //
  94. InitializeListHead( &CcIdleWorkerThreadList );
  95. InitializeListHead( &CcExpressWorkQueue );
  96. InitializeListHead( &CcRegularWorkQueue );
  97. InitializeListHead( &CcPostTickWorkQueue );
  98. //
  99. // Set the number of worker threads based on the system size.
  100. //
  101. CcCapturedSystemSize = MmQuerySystemSize();
  102. if (CcNumberWorkerThreads == 0) {
  103. switch (CcCapturedSystemSize) {
  104. case MmSmallSystem:
  105. CcNumberWorkerThreads = ExCriticalWorkerThreads - 1;
  106. CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
  107. CcAggressiveZeroThreshold = 1;
  108. break;
  109. case MmMediumSystem:
  110. CcNumberWorkerThreads = ExCriticalWorkerThreads - 1;
  111. CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
  112. CcAggressiveZeroThreshold = 2;
  113. break;
  114. case MmLargeSystem:
  115. CcNumberWorkerThreads = ExCriticalWorkerThreads - 2;
  116. CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4 +
  117. MmNumberOfPhysicalPages / 8;
  118. CcAggressiveZeroThreshold = 4;
  119. break;
  120. default:
  121. CcNumberWorkerThreads = 1;
  122. CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
  123. }
  124. if (MmSystemCacheWs.MaximumWorkingSetSize > ((4*1024*1024)/PAGE_SIZE)) {
  125. CcDirtyPageThreshold = (ULONG)(MmSystemCacheWs.MaximumWorkingSetSize -
  126. ((2*1024*1024)/PAGE_SIZE));
  127. }
  128. CcDirtyPageTarget = CcDirtyPageThreshold / 2 +
  129. CcDirtyPageThreshold / 4;
  130. }
  131. CcAggressiveZeroCount = 0;
  132. //
  133. // Now allocate and initialize the above number of worker thread
  134. // items.
  135. //
  136. for (i = 0; i < CcNumberWorkerThreads; i++) {
  137. WorkItem = ExAllocatePoolWithTag( NonPagedPool, sizeof(WORK_QUEUE_ITEM), 'qWcC' );
  138. if (WorkItem == NULL) {
  139. CcBugCheck( 0, 0, 0 );
  140. }
  141. //
  142. // Initialize the work queue item and insert in our queue
  143. // of potential worker threads.
  144. //
  145. ExInitializeWorkItem( WorkItem, CcWorkerThread, WorkItem );
  146. InsertTailList( &CcIdleWorkerThreadList, &WorkItem->List );
  147. }
  148. //
  149. // Initialize the Lazy Writer thread structure, and start him up.
  150. //
  151. RtlZeroMemory( &LazyWriter, sizeof(LAZY_WRITER) );
  152. InitializeListHead( &LazyWriter.WorkQueue );
  153. //
  154. // Initialize the Scan Dpc and Timer.
  155. //
  156. KeInitializeDpc( &LazyWriter.ScanDpc, &CcScanDpc, NULL );
  157. KeInitializeTimer( &LazyWriter.ScanTimer );
  158. //
  159. // Now initialize the lookaside list for allocating Work Queue entries.
  160. //
  161. switch ( CcCapturedSystemSize ) {
  162. //
  163. // ~512 bytes
  164. //
  165. case MmSmallSystem :
  166. NumberOfItems = 32;
  167. break;
  168. //
  169. // ~1k bytes
  170. //
  171. case MmMediumSystem :
  172. NumberOfItems = 64;
  173. break;
  174. //
  175. // ~2k bytes
  176. //
  177. case MmLargeSystem :
  178. NumberOfItems = 128;
  179. if (MmIsThisAnNtAsSystem()) {
  180. NumberOfItems += 128;
  181. }
  182. break;
  183. }
  184. ExInitializeSystemLookasideList( &CcTwilightLookasideList,
  185. NonPagedPool,
  186. sizeof( WORK_QUEUE_ENTRY ),
  187. 'kWcC',
  188. NumberOfItems,
  189. &ExSystemLookasideListHead );
  190. //
  191. // Initialize the per processor nonpaged lookaside lists and descriptors.
  192. //
  193. for (Index = 0; Index < (ULONG)KeNumberProcessors; Index += 1) {
  194. Prcb = KiProcessorBlock[Index];
  195. //
  196. // Initialize the large IRP per processor lookaside pointers.
  197. //
  198. Prcb->PPLookasideList[LookasideTwilightList].L = &CcTwilightLookasideList;
  199. Lookaside = ExAllocatePoolWithTag( NonPagedPool,
  200. sizeof(GENERAL_LOOKASIDE),
  201. 'KWcC');
  202. if (Lookaside != NULL) {
  203. ExInitializeSystemLookasideList( Lookaside,
  204. NonPagedPool,
  205. sizeof( WORK_QUEUE_ENTRY ),
  206. 'KWcC',
  207. NumberOfItems,
  208. &ExSystemLookasideListHead );
  209. } else {
  210. Lookaside = &CcTwilightLookasideList;
  211. }
  212. Prcb->PPLookasideList[LookasideTwilightList].P = Lookaside;
  213. }
  214. //
  215. // Initialize the Deferred Write List.
  216. //
  217. KeInitializeSpinLock( &CcDeferredWriteSpinLock );
  218. InitializeListHead( &CcDeferredWrites );
  219. //
  220. // Initialize the Vacbs.
  221. //
  222. CcInitializeVacbs();
  223. return TRUE;
  224. }
  225. VOID
  226. CcInitializeCacheMap (
  227. IN PFILE_OBJECT FileObject,
  228. IN PCC_FILE_SIZES FileSizes,
  229. IN BOOLEAN PinAccess,
  230. IN PCACHE_MANAGER_CALLBACKS Callbacks,
  231. IN PVOID LazyWriteContext
  232. )
  233. /*++
  234. Routine Description:
  235. This routine is intended to be called by File Systems only. It
  236. initializes the cache maps for data caching. It should be called
  237. every time a file is opened or created, and NO_INTERMEDIATE_BUFFERING
  238. was specified as FALSE.
  239. Arguments:
  240. FileObject - A pointer to the newly-created file object.
  241. FileSizes - A pointer to AllocationSize, FileSize and ValidDataLength
  242. for the file. ValidDataLength should contain MAXLONGLONG if
  243. valid data length tracking and callbacks are not desired.
  244. PinAccess - FALSE if file will be used exclusively for Copy and Mdl
  245. access, or TRUE if file will be used for Pin access.
  246. (Files for Pin access are not limited in size as the caller
  247. must access multiple areas of the file at once.)
  248. Callbacks - Structure of callbacks used by the Lazy Writer
  249. LazyWriteContext - Parameter to be passed in to above routine.
  250. Return Value:
  251. None. If an error occurs, this routine will Raise the status.
  252. --*/
  253. {
  254. KIRQL OldIrql;
  255. PSHARED_CACHE_MAP SharedCacheMap;
  256. PVOID CacheMapToFree = NULL;
  257. CC_FILE_SIZES LocalSizes;
  258. LOGICAL WeSetBeingCreated = FALSE;
  259. LOGICAL SharedListOwned = FALSE;
  260. LOGICAL MustUninitialize = FALSE;
  261. LOGICAL WeCreated = FALSE;
  262. PPRIVATE_CACHE_MAP PrivateCacheMap;
  263. NTSTATUS Status = STATUS_SUCCESS;
  264. DebugTrace(+1, me, "CcInitializeCacheMap:\n", 0 );
  265. DebugTrace( 0, me, " FileObject = %08lx\n", FileObject );
  266. DebugTrace( 0, me, " FileSizes = %08lx\n", FileSizes );
  267. //
  268. // Make a local copy of the passed in file sizes before acquiring
  269. // the spin lock.
  270. //
  271. LocalSizes = *FileSizes;
  272. //
  273. // If no FileSize was given, set to one byte before maximizing below.
  274. //
  275. if (LocalSizes.AllocationSize.QuadPart == 0) {
  276. LocalSizes.AllocationSize.LowPart += 1;
  277. }
  278. //
  279. // If caller has Write access or will allow write, then round
  280. // size to next create modulo. (***Temp*** there may be too many
  281. // apps that end up allowing shared write, thanks to our Dos heritage,
  282. // to keep that part of the check in.)
  283. //
  284. if (FileObject->WriteAccess /*|| FileObject->SharedWrite */) {
  285. LocalSizes.AllocationSize.QuadPart = LocalSizes.AllocationSize.QuadPart + (LONGLONG)(DEFAULT_CREATE_MODULO - 1);
  286. LocalSizes.AllocationSize.LowPart &= ~(DEFAULT_CREATE_MODULO - 1);
  287. } else {
  288. LocalSizes.AllocationSize.QuadPart = LocalSizes.AllocationSize.QuadPart + (LONGLONG)(VACB_MAPPING_GRANULARITY - 1);
  289. LocalSizes.AllocationSize.LowPart &= ~(VACB_MAPPING_GRANULARITY - 1);
  290. }
  291. //
  292. // Do the allocate of the SharedCacheMap, based on an unsafe test,
  293. // while not holding a spinlock. If the allocation fails, it's ok
  294. // to fail the request even though the test was unsafe.
  295. //
  296. if (FileObject->SectionObjectPointer->SharedCacheMap == NULL) {
  297. restart:
  298. ASSERT (CacheMapToFree == NULL);
  299. SharedCacheMap = ExAllocatePoolWithTag( NonPagedPool, sizeof(SHARED_CACHE_MAP), 'cScC' );
  300. if (SharedCacheMap == NULL) {
  301. DebugTrace( 0, 0, "Failed to allocate SharedCacheMap\n", 0 );
  302. ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES );
  303. }
  304. //
  305. // Stash a copy of it so we can free it in the error path below.
  306. //
  307. CacheMapToFree = SharedCacheMap;
  308. //
  309. // Zero the SharedCacheMap and fill in the nonzero portions later.
  310. //
  311. RtlZeroMemory( SharedCacheMap, sizeof(SHARED_CACHE_MAP) );
  312. #if OPEN_COUNT_LOG
  313. SharedCacheMap->OpenCountLog.Size = sizeof(SharedCacheMap->OpenCountLog.Log)/sizeof(CC_OPEN_COUNT_LOG_ENTRY);
  314. #endif
  315. //
  316. // Now initialize the Shared Cache Map.
  317. //
  318. SharedCacheMap->NodeTypeCode = CACHE_NTC_SHARED_CACHE_MAP;
  319. SharedCacheMap->NodeByteSize = sizeof(SHARED_CACHE_MAP);
  320. SharedCacheMap->FileObject = FileObject;
  321. SharedCacheMap->FileSize = LocalSizes.FileSize;
  322. SharedCacheMap->ValidDataLength = LocalSizes.ValidDataLength;
  323. SharedCacheMap->ValidDataGoal = LocalSizes.ValidDataLength;
  324. // SharedCacheMap->Section set below
  325. //
  326. // Initialize the spin locks.
  327. //
  328. KeInitializeSpinLock( &SharedCacheMap->ActiveVacbSpinLock );
  329. KeInitializeSpinLock( &SharedCacheMap->BcbSpinLock );
  330. ExInitializePushLock( &SharedCacheMap->VacbPushLock );
  331. if (PinAccess) {
  332. SetFlag(SharedCacheMap->Flags, PIN_ACCESS);
  333. }
  334. //
  335. // If this file has FO_SEQUENTIAL_ONLY set, then remember that
  336. // in the SharedCacheMap.
  337. //
  338. if (FlagOn(FileObject->Flags, FO_SEQUENTIAL_ONLY)) {
  339. SetFlag(SharedCacheMap->Flags, ONLY_SEQUENTIAL_ONLY_SEEN);
  340. }
  341. //
  342. // Do the round-robin allocation of the spinlock for the shared
  343. // cache map. Note the manipulation of the next
  344. // counter is safe, since we have the CcMasterSpinLock
  345. // exclusive.
  346. //
  347. InitializeListHead( &SharedCacheMap->BcbList );
  348. SharedCacheMap->Callbacks = Callbacks;
  349. SharedCacheMap->LazyWriteContext = LazyWriteContext;
  350. //
  351. // Initialize listhead for all PrivateCacheMaps
  352. //
  353. InitializeListHead( &SharedCacheMap->PrivateList );
  354. }
  355. //
  356. // Serialize Creation/Deletion of all Shared CacheMaps
  357. //
  358. SharedListOwned = TRUE;
  359. CcAcquireMasterLock( &OldIrql );
  360. //
  361. // Check for second initialization of same file object
  362. //
  363. if (FileObject->PrivateCacheMap != NULL) {
  364. DebugTrace( 0, 0, "CacheMap already initialized\n", 0 );
  365. CcReleaseMasterLock( OldIrql );
  366. if (CacheMapToFree != NULL) {
  367. ExFreePool(CacheMapToFree);
  368. }
  369. DebugTrace(-1, me, "CcInitializeCacheMap -> VOID\n", 0 );
  370. return;
  371. }
  372. //
  373. // Get current Shared Cache Map pointer indirectly off of the file object.
  374. // (The actual pointer is typically in a file system data structure, such
  375. // as an Fcb.)
  376. //
  377. SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
  378. //
  379. // If there is no SharedCacheMap, then we must create a section and
  380. // the SharedCacheMap structure.
  381. //
  382. if (SharedCacheMap == NULL) {
  383. //
  384. // Insert the new SharedCacheMap.
  385. //
  386. if (CacheMapToFree == NULL) {
  387. CcReleaseMasterLock( OldIrql );
  388. SharedListOwned = FALSE;
  389. goto restart;
  390. }
  391. SharedCacheMap = CacheMapToFree;
  392. CacheMapToFree = NULL;
  393. //
  394. // Insert the new Shared Cache Map in the global list
  395. //
  396. //
  397. // Note: We do NOT use the common CcInsertIntoCleanSharedCacheMapList
  398. // routine here because this shared cache map does not meet the
  399. // validation conditions we check for in that routine since it is
  400. // not finished being initialized.
  401. //
  402. InsertTailList( &CcCleanSharedCacheMapList,
  403. &SharedCacheMap->SharedCacheMapLinks );
  404. WeCreated = TRUE;
  405. //
  406. // Finally, store the pointer to the Shared Cache Map back
  407. // via the indirect pointer in the File Object.
  408. //
  409. FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
  410. //
  411. // We must reference this file object so that it cannot go away
  412. // until we do CcUninitializeCacheMap below. Note we cannot
  413. // find or rely on the FileObject that Memory Management has,
  414. // although normally it will be this same one anyway.
  415. //
  416. ObReferenceObject ( FileObject );
  417. } else {
  418. //
  419. // If this file has FO_SEQUENTIAL_ONLY clear, then remember that
  420. // in the SharedCacheMap.
  421. //
  422. if (!FlagOn(FileObject->Flags, FO_SEQUENTIAL_ONLY)) {
  423. ClearFlag(SharedCacheMap->Flags, ONLY_SEQUENTIAL_ONLY_SEEN);
  424. }
  425. }
  426. //
  427. // If this file is opened for random access, remember this in
  428. // the SharedCacheMap.
  429. //
  430. if (FlagOn(FileObject->Flags, FO_RANDOM_ACCESS)) {
  431. SetFlag(SharedCacheMap->Flags, RANDOM_ACCESS_SEEN);
  432. }
  433. //
  434. // Make sure that no one is trying to lazy delete it in the case
  435. // that the Cache Map was already there.
  436. //
  437. ClearFlag(SharedCacheMap->Flags, TRUNCATE_REQUIRED);
  438. //
  439. // In case there has been a CcUnmapAndPurge call, we check here if we
  440. // if we need to recreate the section and map it.
  441. //
  442. if ((SharedCacheMap->Vacbs == NULL) &&
  443. !FlagOn(SharedCacheMap->Flags, BEING_CREATED)) {
  444. //
  445. // Increment the OpenCount on the CacheMap.
  446. //
  447. CcIncrementOpenCount( SharedCacheMap, 'onnI' );
  448. //
  449. // We still want anyone else to wait.
  450. //
  451. SetFlag(SharedCacheMap->Flags, BEING_CREATED);
  452. //
  453. // If there is a create event, then this must be the path where we
  454. // we were only unmapped. We will just clear it here again in case
  455. // someone needs to wait again this time too.
  456. //
  457. if (SharedCacheMap->CreateEvent != NULL) {
  458. KeInitializeEvent( SharedCacheMap->CreateEvent,
  459. NotificationEvent,
  460. FALSE );
  461. }
  462. //
  463. // Release global resource
  464. //
  465. CcReleaseMasterLock( OldIrql );
  466. SharedListOwned = FALSE;
  467. //
  468. // Signify we have incremented the open count.
  469. //
  470. MustUninitialize = TRUE;
  471. //
  472. // Signify we have marked BEING_CREATED in the CacheMap flags.
  473. //
  474. WeSetBeingCreated = TRUE;
  475. //
  476. // We have to test this, because the section may only be unmapped.
  477. //
  478. if (SharedCacheMap->Section == NULL) {
  479. //
  480. // Call MM to create a section for this file, for the calculated
  481. // section size. Note that we have the choice in this service to
  482. // pass in a FileHandle or a FileObject pointer, but not both.
  483. // Use the pointer as it results in much faster performance.
  484. //
  485. DebugTrace( 0, mm, "MmCreateSection:\n", 0 );
  486. DebugTrace2(0, mm, " MaximumSize = %08lx, %08lx\n",
  487. LocalSizes.AllocationSize.LowPart,
  488. LocalSizes.AllocationSize.HighPart );
  489. DebugTrace( 0, mm, " FileObject = %08lx\n", FileObject );
  490. SharedCacheMap->Status = MmCreateSection( &SharedCacheMap->Section,
  491. SECTION_MAP_READ
  492. | SECTION_MAP_WRITE
  493. | SECTION_QUERY,
  494. NULL,
  495. &LocalSizes.AllocationSize,
  496. PAGE_READWRITE,
  497. SEC_COMMIT,
  498. NULL,
  499. FileObject );
  500. DebugTrace( 0, mm, " <Section = %08lx\n", SharedCacheMap->Section );
  501. if (!NT_SUCCESS( SharedCacheMap->Status )){
  502. DebugTrace( 0, 0, "Error from MmCreateSection = %08lx\n",
  503. SharedCacheMap->Status );
  504. SharedCacheMap->Section = NULL;
  505. Status = FsRtlNormalizeNtstatus( SharedCacheMap->Status,
  506. STATUS_UNEXPECTED_MM_CREATE_ERR );
  507. goto exitfinally;
  508. }
  509. ObDeleteCapturedInsertInfo(SharedCacheMap->Section);
  510. //
  511. // If this is a stream file object, then no user can map it,
  512. // and we should keep the modified page writer out of it.
  513. //
  514. if (!FlagOn(((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->Flags2,
  515. FSRTL_FLAG2_DO_MODIFIED_WRITE) &&
  516. (FileObject->FsContext2 == NULL)) {
  517. MmDisableModifiedWriteOfSection( FileObject->SectionObjectPointer );
  518. CcAcquireMasterLock( &OldIrql );
  519. SetFlag(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED);
  520. CcReleaseMasterLock( OldIrql );
  521. }
  522. //
  523. // Create the Vacb array.
  524. //
  525. Status = CcCreateVacbArray( SharedCacheMap, LocalSizes.AllocationSize );
  526. if (!NT_SUCCESS(Status)) {
  527. goto exitfinally;
  528. }
  529. }
  530. //
  531. // If the section already exists, we still have to call MM to
  532. // extend, in case it is not large enough.
  533. //
  534. else {
  535. if ( LocalSizes.AllocationSize.QuadPart > SharedCacheMap->SectionSize.QuadPart ) {
  536. DebugTrace( 0, mm, "MmExtendSection:\n", 0 );
  537. DebugTrace( 0, mm, " Section = %08lx\n", SharedCacheMap->Section );
  538. DebugTrace2(0, mm, " Size = %08lx, %08lx\n",
  539. LocalSizes.AllocationSize.LowPart,
  540. LocalSizes.AllocationSize.HighPart );
  541. Status = MmExtendSection( SharedCacheMap->Section,
  542. &LocalSizes.AllocationSize,
  543. TRUE );
  544. if (!NT_SUCCESS(Status)) {
  545. DebugTrace( 0, 0, "Error from MmExtendSection, Status = %08lx\n",
  546. Status );
  547. Status = FsRtlNormalizeNtstatus( Status,
  548. STATUS_UNEXPECTED_MM_EXTEND_ERR );
  549. goto exitfinally;
  550. }
  551. }
  552. //
  553. // Extend the Vacb array.
  554. //
  555. Status = CcExtendVacbArray( SharedCacheMap, LocalSizes.AllocationSize );
  556. if (!NT_SUCCESS(Status)) {
  557. goto exitfinally;
  558. }
  559. }
  560. //
  561. // Now show that we are all done and resume any waiters.
  562. //
  563. CcAcquireMasterLock( &OldIrql );
  564. ClearFlag(SharedCacheMap->Flags, BEING_CREATED);
  565. if (SharedCacheMap->CreateEvent != NULL) {
  566. KeSetEvent( SharedCacheMap->CreateEvent, 0, FALSE );
  567. }
  568. CcReleaseMasterLock( OldIrql );
  569. WeSetBeingCreated = FALSE;
  570. }
  571. //
  572. // Else if the section is already there, we make sure it is large
  573. // enough by calling CcExtendCacheSection.
  574. //
  575. else {
  576. //
  577. // If the SharedCacheMap is currently being created we have
  578. // to optionally create and wait on an event for it. Note that
  579. // the only safe time to delete the event is in
  580. // CcUninitializeCacheMap, because we otherwise have no way of
  581. // knowing when everyone has reached the KeWaitForSingleObject.
  582. //
  583. if (FlagOn(SharedCacheMap->Flags, BEING_CREATED)) {
  584. if (SharedCacheMap->CreateEvent == NULL) {
  585. SharedCacheMap->CreateEvent = (PKEVENT)ExAllocatePoolWithTag( NonPagedPool,
  586. sizeof(KEVENT),
  587. 'vEcC' );
  588. if (SharedCacheMap->CreateEvent == NULL) {
  589. DebugTrace( 0, 0, "Failed to allocate CreateEvent\n", 0 );
  590. CcReleaseMasterLock( OldIrql );
  591. SharedListOwned = FALSE;
  592. Status = STATUS_INSUFFICIENT_RESOURCES;
  593. goto exitfinally;
  594. }
  595. KeInitializeEvent( SharedCacheMap->CreateEvent,
  596. NotificationEvent,
  597. FALSE );
  598. }
  599. //
  600. // Increment the OpenCount on the CacheMap.
  601. //
  602. CcIncrementOpenCount( SharedCacheMap, 'ecnI' );
  603. //
  604. // Release global resource before waiting
  605. //
  606. CcReleaseMasterLock( OldIrql );
  607. SharedListOwned = FALSE;
  608. MustUninitialize = TRUE;
  609. DebugTrace( 0, 0, "Waiting on CreateEvent\n", 0 );
  610. KeWaitForSingleObject( SharedCacheMap->CreateEvent,
  611. Executive,
  612. KernelMode,
  613. FALSE,
  614. (PLARGE_INTEGER)NULL);
  615. //
  616. // If the real creator got an error, then we must bomb
  617. // out too.
  618. //
  619. if (!NT_SUCCESS(SharedCacheMap->Status)) {
  620. Status = FsRtlNormalizeNtstatus( SharedCacheMap->Status,
  621. STATUS_UNEXPECTED_MM_CREATE_ERR );
  622. goto exitfinally;
  623. }
  624. }
  625. else {
  626. //
  627. // Increment the OpenCount on the CacheMap.
  628. //
  629. CcIncrementOpenCount( SharedCacheMap, 'esnI' );
  630. //
  631. // Release global resource
  632. //
  633. CcReleaseMasterLock( OldIrql );
  634. SharedListOwned = FALSE;
  635. MustUninitialize = TRUE;
  636. }
  637. }
  638. if (CacheMapToFree != NULL) {
  639. ExFreePool( CacheMapToFree );
  640. CacheMapToFree = NULL;
  641. }
  642. //
  643. // Now allocate (if local one already in use) and initialize
  644. // the Private Cache Map.
  645. //
  646. PrivateCacheMap = &SharedCacheMap->PrivateCacheMap;
  647. //
  648. // See if we should allocate a PrivateCacheMap while not holding
  649. // a spinlock.
  650. //
  651. if (PrivateCacheMap->NodeTypeCode != 0) {
  652. restart2:
  653. CacheMapToFree = ExAllocatePoolWithTag( NonPagedPool, sizeof(PRIVATE_CACHE_MAP), 'cPcC' );
  654. if (CacheMapToFree == NULL) {
  655. DebugTrace( 0, 0, "Failed to allocate PrivateCacheMap\n", 0 );
  656. Status = STATUS_INSUFFICIENT_RESOURCES;
  657. goto exitfinally;
  658. }
  659. }
  660. //
  661. // Insert the new PrivateCacheMap in the list off the SharedCacheMap.
  662. //
  663. SharedListOwned = TRUE;
  664. CcAcquireMasterLock( &OldIrql );
  665. //
  666. // Now make sure there is still no PrivateCacheMap, and if so just get out.
  667. //
  668. if (FileObject->PrivateCacheMap == NULL) {
  669. //
  670. // Is the local one already in use?
  671. //
  672. if (PrivateCacheMap->NodeTypeCode != 0) {
  673. //
  674. // Use the one allocated above, if there is one, else go to pool now.
  675. //
  676. if (CacheMapToFree == NULL) {
  677. CcReleaseMasterLock( OldIrql );
  678. SharedListOwned = FALSE;
  679. goto restart2;
  680. }
  681. PrivateCacheMap = CacheMapToFree;
  682. CacheMapToFree = NULL;
  683. }
  684. RtlZeroMemory( PrivateCacheMap, sizeof(PRIVATE_CACHE_MAP) );
  685. PrivateCacheMap->NodeTypeCode = CACHE_NTC_PRIVATE_CACHE_MAP;
  686. PrivateCacheMap->FileObject = FileObject;
  687. PrivateCacheMap->ReadAheadMask = PAGE_SIZE - 1;
  688. //
  689. // Initialize the spin lock.
  690. //
  691. KeInitializeSpinLock( &PrivateCacheMap->ReadAheadSpinLock );
  692. InsertTailList( &SharedCacheMap->PrivateList, &PrivateCacheMap->PrivateLinks );
  693. FileObject->PrivateCacheMap = PrivateCacheMap;
  694. } else {
  695. //
  696. // We raced with another initializer for the same fileobject and must
  697. // drop our (to this point speculative) opencount.
  698. //
  699. ASSERT( SharedCacheMap->OpenCount > 1 );
  700. CcDecrementOpenCount( SharedCacheMap, 'rpnI' );
  701. SharedCacheMap = NULL;
  702. }
  703. MustUninitialize = FALSE;
  704. exitfinally:
  705. //
  706. // See if we got an error and must uninitialize the SharedCacheMap
  707. //
  708. if (MustUninitialize) {
  709. if (!SharedListOwned) {
  710. CcAcquireMasterLock( &OldIrql );
  711. }
  712. if (WeSetBeingCreated) {
  713. if (SharedCacheMap->CreateEvent != NULL) {
  714. KeSetEvent( SharedCacheMap->CreateEvent, 0, FALSE );
  715. }
  716. ClearFlag(SharedCacheMap->Flags, BEING_CREATED);
  717. }
  718. //
  719. // Now release our open count.
  720. //
  721. CcDecrementOpenCount( SharedCacheMap, 'umnI' );
  722. if ((SharedCacheMap->OpenCount == 0) &&
  723. !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) &&
  724. (SharedCacheMap->DirtyPages == 0)) {
  725. //
  726. // It is neccesary to eliminate the structure now. We should
  727. // be guaranteed that our dereference will not result in close
  728. // due to the caller's reference on the fileobject, unlike the
  729. // comment in the original code, below, would indicate.
  730. //
  731. // Not removing this structure can result in problems if the file
  732. // is also mapped and the mapped page writer extends VDL. An FS
  733. // will use CcSetFileSizes and cause us to issue a recursive flush
  734. // of the same range, resulting in a self-colliding page flush and
  735. // a deadlock.
  736. //
  737. // We also think that file extension/truncation in the interim
  738. // (if the section create failed) would result in an inconsistent
  739. // "resurrected" cache map if we managed to use the one we have
  740. // now. Note CcSetFileSizes aborts if the section is NULL.
  741. //
  742. CcDeleteSharedCacheMap( SharedCacheMap, OldIrql, FALSE );
  743. #if 0
  744. //
  745. // On PinAccess it is safe and necessary to eliminate
  746. // the structure immediately.
  747. //
  748. if (PinAccess) {
  749. CcDeleteSharedCacheMap( SharedCacheMap, OldIrql, FALSE );
  750. //
  751. // If it is not PinAccess, we must lazy delete, because
  752. // we could get into a deadlock trying to acquire the
  753. // stream exclusive when we dereference the file object.
  754. //
  755. } else {
  756. //
  757. // Move it to the dirty list so the lazy write scan will
  758. // see it.
  759. //
  760. RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks );
  761. InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks,
  762. &SharedCacheMap->SharedCacheMapLinks );
  763. //
  764. // Make sure the Lazy Writer will wake up, because we
  765. // want him to delete this SharedCacheMap.
  766. //
  767. LazyWriter.OtherWork = TRUE;
  768. if (!LazyWriter.ScanActive) {
  769. CcScheduleLazyWriteScan( FALSE );
  770. }
  771. CcReleaseMasterLock( OldIrql );
  772. }
  773. #endif
  774. } else {
  775. CcReleaseMasterLock( OldIrql );
  776. }
  777. SharedListOwned = FALSE;
  778. } else if (SharedCacheMap != NULL) {
  779. PCACHE_UNINITIALIZE_EVENT CUEvent, EventNext;
  780. //
  781. // If we did not create this SharedCacheMap, then there is a
  782. // possibility that it is in the dirty list. Once we are sure
  783. // we have the spinlock, just make sure it is in the clean list
  784. // if there are no dirty bytes and the open count is nonzero.
  785. // (The latter test is almost guaranteed, of course, but we check
  786. // it to be safe.)
  787. //
  788. if (!SharedListOwned) {
  789. CcAcquireMasterLock( &OldIrql );
  790. SharedListOwned = TRUE;
  791. }
  792. if (!WeCreated &&
  793. (SharedCacheMap->DirtyPages == 0) &&
  794. (SharedCacheMap->OpenCount != 0)) {
  795. RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks );
  796. CcInsertIntoCleanSharedCacheMapList( SharedCacheMap );
  797. }
  798. //
  799. // If there is a process waiting on an uninitialize on this
  800. // cache map to complete, let the thread that is waiting go,
  801. // since the uninitialize is now complete.
  802. //
  803. CUEvent = SharedCacheMap->UninitializeEvent;
  804. while (CUEvent != NULL) {
  805. EventNext = CUEvent->Next;
  806. KeSetEvent(&CUEvent->Event, 0, FALSE);
  807. CUEvent = EventNext;
  808. }
  809. SharedCacheMap->UninitializeEvent = NULL;
  810. ClearFlag( SharedCacheMap->Flags, WAITING_FOR_TEARDOWN );
  811. }
  812. //
  813. // Release global resource
  814. //
  815. if (SharedListOwned) {
  816. CcReleaseMasterLock( OldIrql );
  817. }
  818. if (CacheMapToFree != NULL) {
  819. ExFreePool(CacheMapToFree);
  820. }
  821. if (!NT_SUCCESS(Status)) {
  822. DebugTrace(-1, me, "CcInitializeCacheMap -> RAISING EXCEPTION\n", 0 );
  823. ExRaiseStatus(Status);
  824. }
  825. DebugTrace(-1, me, "CcInitializeCacheMap -> VOID\n", 0 );
  826. return;
  827. }
  828. BOOLEAN
  829. CcUninitializeCacheMap (
  830. IN PFILE_OBJECT FileObject,
  831. IN PLARGE_INTEGER TruncateSize OPTIONAL,
  832. IN PCACHE_UNINITIALIZE_EVENT UninitializeEvent OPTIONAL
  833. )
  834. /*++
  835. Routine Description:
  836. This routine uninitializes the previously initialized Shared and Private
  837. Cache Maps. This routine is only intended to be called by File Systems.
  838. It should be called when the File System receives a cleanup call on the
  839. File Object.
  840. A File System which supports data caching must always call this routine
  841. whenever it closes a file, whether the caller opened the file with
  842. NO_INTERMEDIATE_BUFFERING as FALSE or not. This is because the final
  843. cleanup of a file related to truncation or deletion of the file, can
  844. only occur on the last close, whether the last closer cached the file
  845. or not. When CcUnitializeCacheMap is called on a file object for which
  846. CcInitializeCacheMap was never called, the call has a benign effect
  847. iff no one has truncated or deleted the file; otherwise the necessary
  848. cleanup relating to the truncate or close is performed.
  849. In summary, CcUnitializeCacheMap does the following:
  850. If the caller had Write or Delete access, the cache is flushed.
  851. (This could change with lazy writing.)
  852. If a Cache Map was initialized on this File Object, it is
  853. unitialized (unmap any views, delete section, and delete
  854. Cache Map structures).
  855. On the last Cleanup, if the file has been deleted, the
  856. Section is forced closed. If the file has been truncated, then
  857. the truncated pages are purged from the cache.
  858. Arguments:
  859. FileObject - File Object which was previously supplied to
  860. CcInitializeCacheMap.
  861. TruncateSize - If specified, the file was truncated to the specified
  862. size, and the cache should be purged accordingly.
  863. UninitializeEvent - If specified, then the provided event will be set
  864. to the signalled state when the actual flush is
  865. completed. This is only of interest to file systems
  866. that require that they be notified when a cache flush
  867. operation has completed. Due to network protocol
  868. restrictions, it is critical that network file
  869. systems know exactly when a cache flush operation
  870. completes, by specifying this event, they can be
  871. notified when the cache section is finally purged
  872. if the section is "lazy-deleted".
  873. ReturnValue:
  874. FALSE if Section was not closed.
  875. TRUE if Section was closed.
  876. --*/
  877. {
  878. KIRQL OldIrql;
  879. PSHARED_CACHE_MAP SharedCacheMap;
  880. ULONG ActivePage;
  881. ULONG PageIsDirty;
  882. PVACB ActiveVacb = NULL;
  883. BOOLEAN SectionClosed = FALSE;
  884. PPRIVATE_CACHE_MAP PrivateCacheMap;
  885. DebugTrace(+1, me, "CcUninitializeCacheMap:\n", 0 );
  886. DebugTrace( 0, me, " FileObject = %08lx\n", FileObject );
  887. DebugTrace( 0, me, " &TruncateSize = %08lx\n", TruncateSize );
  888. //
  889. // Serialize Creation/Deletion of all Shared CacheMaps
  890. //
  891. CcAcquireMasterLock( &OldIrql );
  892. //
  893. // Get pointer to SharedCacheMap via File Object.
  894. //
  895. SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
  896. PrivateCacheMap = FileObject->PrivateCacheMap;
  897. //
  898. // Decrement Open Count on SharedCacheMap, if we did a cached open.
  899. // Also unmap PrivateCacheMap if it is mapped and deallocate it.
  900. //
  901. if (PrivateCacheMap != NULL) {
  902. ASSERT( PrivateCacheMap->FileObject == FileObject );
  903. CcDecrementOpenCount( SharedCacheMap, 'ninU' );
  904. //
  905. // Remove PrivateCacheMap from list in SharedCacheMap.
  906. //
  907. RemoveEntryList( &PrivateCacheMap->PrivateLinks );
  908. //
  909. // Free local or allocated PrivateCacheMap
  910. //
  911. if (PrivateCacheMap == &SharedCacheMap->PrivateCacheMap) {
  912. PrivateCacheMap->NodeTypeCode = 0;
  913. PrivateCacheMap = NULL;
  914. }
  915. FileObject->PrivateCacheMap = (PPRIVATE_CACHE_MAP)NULL;
  916. }
  917. //
  918. // Now if we have a SharedCacheMap whose Open Count went to 0, we
  919. // have some additional cleanup.
  920. //
  921. if (SharedCacheMap != NULL) {
  922. //
  923. // If a Truncate Size was specified, then remember that we want to
  924. // truncate the FileSize and purge the unneeded pages when OpenCount
  925. // goes to 0.
  926. //
  927. if (ARGUMENT_PRESENT(TruncateSize)) {
  928. if ( (TruncateSize->QuadPart == 0) && (SharedCacheMap->FileSize.QuadPart != 0) ) {
  929. SetFlag(SharedCacheMap->Flags, TRUNCATE_REQUIRED);
  930. } else if (IsListEmpty(&SharedCacheMap->PrivateList)) {
  931. //
  932. // If this is the last guy, I can drop the file size down
  933. // now.
  934. //
  935. SharedCacheMap->FileSize = *TruncateSize;
  936. }
  937. }
  938. //
  939. // If other file objects are still using this SharedCacheMap,
  940. // then we are done now.
  941. //
  942. if (SharedCacheMap->OpenCount != 0) {
  943. DebugTrace(-1, me, "SharedCacheMap OpenCount != 0\n", 0);
  944. //
  945. // If the caller specified an event to be set when
  946. // the cache uninitialize is completed, set the event
  947. // now, because the uninitialize is complete for this file.
  948. // (Note, we make him wait if he is the last guy.)
  949. //
  950. if (ARGUMENT_PRESENT(UninitializeEvent)) {
  951. if (!IsListEmpty(&SharedCacheMap->PrivateList)) {
  952. KeSetEvent(&UninitializeEvent->Event, 0, FALSE);
  953. } else {
  954. UninitializeEvent->Next = SharedCacheMap->UninitializeEvent;
  955. SharedCacheMap->UninitializeEvent = UninitializeEvent;
  956. }
  957. }
  958. CcReleaseMasterLock( OldIrql );
  959. //
  960. // Free PrivateCacheMap now that we no longer have the spinlock.
  961. //
  962. if (PrivateCacheMap != NULL) {
  963. ExFreePool( PrivateCacheMap );
  964. }
  965. DebugTrace(-1, me, "CcUnitializeCacheMap -> %02lx\n", FALSE );
  966. return FALSE;
  967. }
  968. //
  969. // Remove the private write flag synchronously. Even though a
  970. // private writer is also opening the file exclusively, the
  971. // shared cache map is not going away synchronously and we
  972. // cannot let a non private writer re-reference the scm in
  973. // this state. Their data will never be written!
  974. //
  975. if (FlagOn(SharedCacheMap->Flags, PRIVATE_WRITE)) {
  976. ClearFlag(SharedCacheMap->Flags, PRIVATE_WRITE | DISABLE_WRITE_BEHIND);
  977. MmEnableModifiedWriteOfSection( FileObject->SectionObjectPointer );
  978. }
  979. //
  980. // The private cache map list better be empty!
  981. //
  982. ASSERT(IsListEmpty(&SharedCacheMap->PrivateList));
  983. //
  984. // Set the "uninitialize complete" in the shared cache map
  985. // so that CcDeleteSharedCacheMap will delete it.
  986. //
  987. if (ARGUMENT_PRESENT(UninitializeEvent)) {
  988. UninitializeEvent->Next = SharedCacheMap->UninitializeEvent;
  989. SharedCacheMap->UninitializeEvent = UninitializeEvent;
  990. }
  991. //
  992. // We are in the process of deleting this cache map. If the
  993. // Lazy Writer is active or the Bcb list is not empty or the Lazy
  994. // Writer will hit this SharedCacheMap because we are purging
  995. // the file to 0, then get out and let the Lazy Writer clean
  996. // up. If a write through, was forced queue a lazy write to
  997. // update the file sizes.
  998. //
  999. if ((!FlagOn(SharedCacheMap->Flags, PIN_ACCESS) &&
  1000. !ARGUMENT_PRESENT(UninitializeEvent))
  1001. ||
  1002. FlagOn(SharedCacheMap->Flags, WRITE_QUEUED)
  1003. ||
  1004. (SharedCacheMap->DirtyPages != 0)
  1005. ||
  1006. FlagOn(SharedCacheMap->Flags, FORCED_WRITE_THROUGH)) {
  1007. //
  1008. // Move it to the dirty list so the lazy write scan will
  1009. // see it.
  1010. //
  1011. if (!FlagOn(SharedCacheMap->Flags, WRITE_QUEUED)) {
  1012. RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks );
  1013. InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks,
  1014. &SharedCacheMap->SharedCacheMapLinks );
  1015. }
  1016. //
  1017. // Make sure the Lazy Writer will wake up, because we
  1018. // want him to delete this SharedCacheMap.
  1019. //
  1020. LazyWriter.OtherWork = TRUE;
  1021. if (!LazyWriter.ScanActive) {
  1022. CcScheduleLazyWriteScan( FALSE );
  1023. }
  1024. //
  1025. // Get the active Vacb if we are going to lazy delete, to
  1026. // free it for someone who can use it.
  1027. //
  1028. GetActiveVacbAtDpcLevel( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty );
  1029. DebugTrace(-1, me, "SharedCacheMap has Bcbs and not purging to 0\n", 0);
  1030. CcReleaseMasterLock( OldIrql );
  1031. ASSERT (SectionClosed == FALSE);
  1032. }
  1033. else {
  1034. //
  1035. // Now we can delete the SharedCacheMap. If there are any Bcbs,
  1036. // then we must be truncating to 0, and they will also be deleted.
  1037. // On return the Shared Cache Map List Spinlock will be released.
  1038. //
  1039. CcDeleteSharedCacheMap( SharedCacheMap, OldIrql, FALSE );
  1040. SectionClosed = TRUE;
  1041. }
  1042. }
  1043. //
  1044. // No Shared Cache Map. To make the file go away, we still need to
  1045. // purge the section, if one exists. (And we still need to release
  1046. // our global list first to avoid deadlocks.)
  1047. //
  1048. else {
  1049. if (ARGUMENT_PRESENT(TruncateSize) &&
  1050. ( TruncateSize->QuadPart == 0 ) &&
  1051. (*(PCHAR *)FileObject->SectionObjectPointer != NULL)) {
  1052. CcReleaseMasterLock( OldIrql );
  1053. DebugTrace( 0, mm, "MmPurgeSection:\n", 0 );
  1054. DebugTrace( 0, mm, " SectionObjectPointer = %08lx\n",
  1055. FileObject->SectionObjectPointer );
  1056. DebugTrace2(0, mm, " Offset = %08lx\n",
  1057. TruncateSize->LowPart,
  1058. TruncateSize->HighPart );
  1059. //
  1060. // 0 Length means to purge from the TruncateSize on.
  1061. //
  1062. CcPurgeCacheSection( FileObject->SectionObjectPointer,
  1063. TruncateSize,
  1064. 0,
  1065. FALSE );
  1066. }
  1067. else {
  1068. CcReleaseMasterLock( OldIrql );
  1069. }
  1070. //
  1071. // If the caller specified an event to be set when
  1072. // the cache uninitialize is completed, set the event
  1073. // now, because the uninitialize is complete for this file.
  1074. //
  1075. if (ARGUMENT_PRESENT(UninitializeEvent)) {
  1076. KeSetEvent(&UninitializeEvent->Event, 0, FALSE);
  1077. }
  1078. }
  1079. //
  1080. // Free the active vacb, if we found one.
  1081. //
  1082. if (ActiveVacb != NULL) {
  1083. CcFreeActiveVacb( ActiveVacb->SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty );
  1084. }
  1085. //
  1086. // Free PrivateCacheMap now that we no longer have the spinlock.
  1087. //
  1088. if (PrivateCacheMap != NULL) {
  1089. ExFreePool( PrivateCacheMap );
  1090. }
  1091. DebugTrace(-1, me, "CcUnitializeCacheMap -> %02lx\n", SectionClosed );
  1092. return SectionClosed;
  1093. }
  1094. VOID
  1095. CcWaitForUninitializeCacheMap (
  1096. IN PFILE_OBJECT FileObject
  1097. )
  1098. /*++
  1099. Routine Description:
  1100. This routine is called to wait for the uninitialization of this FileObject's
  1101. SharedCacheMap to complete. If we are in the process of tearing down the
  1102. SharedCacheMap, this routine will wait for that work to complete. If this
  1103. SharedCacheMap is referenced by another file object initiating caching of
  1104. this stream, the wait will end.
  1105. This routine will wait for residual references to a data section
  1106. caused by a SharedCacheMap to go away. If this SharedCacheMap still needs
  1107. to reference the data section, then the SharedCacheMap reference on the
  1108. section will remain when this call returns.
  1109. Arguments:
  1110. FileObject - The file of interest for which the caller wants to ensure any
  1111. residual references on the sections backing this file due to the
  1112. Cache Manager are released.
  1113. Return Value:
  1114. None.
  1115. --*/
  1116. {
  1117. KIRQL OldIrql;
  1118. PSHARED_CACHE_MAP SharedCacheMap;
  1119. CACHE_UNINITIALIZE_EVENT UninitializeEvent;
  1120. BOOLEAN ShouldWait = FALSE;
  1121. LARGE_INTEGER Timeout;
  1122. NTSTATUS Status;
  1123. DebugTrace(+1, me, "CcWaitForUninitializeCacheMap:\n", 0 );
  1124. DebugTrace( 0, me, " FileObject = %08lx\n", FileObject );
  1125. //
  1126. // First, do an unprotected check to see if a SharedCacheMap exists
  1127. // for this file. If not, we've got no more work to do and avoid
  1128. // acquiring the master lock.
  1129. //
  1130. if (FileObject->SectionObjectPointer->SharedCacheMap == NULL) {
  1131. return;
  1132. }
  1133. //
  1134. // Initialize event that we may have to wait on if we are in the process
  1135. // of uninitializing the SharedCacheMap.
  1136. //
  1137. KeInitializeEvent( &UninitializeEvent.Event,
  1138. NotificationEvent,
  1139. FALSE );
  1140. //
  1141. // Serialize Creation/Deletion of all Shared CacheMaps
  1142. //
  1143. CcAcquireMasterLock( &OldIrql );
  1144. //
  1145. // Get pointer to SharedCacheMap via File Object.
  1146. //
  1147. SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
  1148. //
  1149. // If we have a SharedCacheMap, we will check to OpenCount to see if
  1150. // we are in the process of uninitializing the SharedCacheMap and
  1151. // should therefore wait for that work to complete.
  1152. //
  1153. if (SharedCacheMap != NULL) {
  1154. //
  1155. // If the OpenCount on the SharedCacheMap is zero or the list
  1156. // of private cache maps is empty, we are in the process of
  1157. // uninitializing this SharedCacheMap. Link our event into the
  1158. // SharedCacheMap's UninitializeEvent list so that we will be signaled
  1159. // when the work is completed or the SharedCacheMap is referenced by
  1160. // another file before it is torn down.
  1161. //
  1162. if (SharedCacheMap->OpenCount == 0 ||
  1163. IsListEmpty( &SharedCacheMap->PrivateList )) {
  1164. DebugTrace(-1, me, "SharedCacheMap OpenCount == 0 or PrivateList is empty\n", 0);
  1165. ShouldWait = TRUE;
  1166. SetFlag( SharedCacheMap->Flags, WAITING_FOR_TEARDOWN );
  1167. UninitializeEvent.Next = SharedCacheMap->UninitializeEvent;
  1168. SharedCacheMap->UninitializeEvent = &UninitializeEvent;
  1169. //
  1170. // Give the lazy write scan a kick to get it to start doing its
  1171. // scan right now.
  1172. //
  1173. CcScheduleLazyWriteScan( TRUE );
  1174. }
  1175. }
  1176. //
  1177. // Release the lock because we are finished with the SharedCacheMap.
  1178. //
  1179. CcReleaseMasterLock( OldIrql );
  1180. if (!ShouldWait) {
  1181. //
  1182. // We shouldn't wait or try to force this teardown sooner, so just
  1183. // return now.
  1184. goto exit;
  1185. }
  1186. //
  1187. // We will now wait for the event to get signaled. We've given the lazy
  1188. // write scan a kick so it should process this right away ahead of any
  1189. // other outstanding work. We should get signaled as soon as the flush
  1190. // has been completed.
  1191. //
  1192. Timeout.QuadPart = (LONGLONG)-(10 * 60 * NANO_FULL_SECOND);
  1193. Status = KeWaitForSingleObject( &UninitializeEvent.Event,
  1194. Executive,
  1195. KernelMode,
  1196. FALSE,
  1197. &Timeout );
  1198. if (Status == STATUS_TIMEOUT) {
  1199. PCACHE_UNINITIALIZE_EVENT CUEvent;
  1200. //
  1201. // We weren't signaled, so grab the master spin lock and remove
  1202. // this event from the shared cache map if it is still around.
  1203. //
  1204. CcAcquireMasterLock( &OldIrql );
  1205. SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
  1206. if (SharedCacheMap != NULL) {
  1207. //
  1208. // We've got a shared cache map, so take our UninitializeEvent
  1209. // out of the list. Since this is a singlely-linked list, we've
  1210. // got to search, but the list shouldn't be long.
  1211. //
  1212. CUEvent = CONTAINING_RECORD( &SharedCacheMap->UninitializeEvent,
  1213. CACHE_UNINITIALIZE_EVENT,
  1214. Next );
  1215. while (CUEvent->Next != NULL) {
  1216. if (CUEvent->Next == &UninitializeEvent) {
  1217. CUEvent->Next = UninitializeEvent.Next;
  1218. break;
  1219. }
  1220. CUEvent = CUEvent->Next;
  1221. }
  1222. ClearFlag( SharedCacheMap->Flags, WAITING_FOR_TEARDOWN );
  1223. //
  1224. // All done, so release the master lock.
  1225. //
  1226. CcReleaseMasterLock( OldIrql );
  1227. } else {
  1228. //
  1229. // Release the master lock and wait again on the event. If the
  1230. // shared cache map is no longer around, another thread is
  1231. // in CcDeleteSharedCacheMap and will be walking the event list
  1232. // to signal this event very soon.
  1233. //
  1234. CcReleaseMasterLock( OldIrql );
  1235. KeWaitForSingleObject( &UninitializeEvent.Event,
  1236. Executive,
  1237. KernelMode,
  1238. FALSE,
  1239. NULL );
  1240. }
  1241. }
  1242. exit:
  1243. DebugTrace(-1, me, "CcWaitForUninitializeCacheMap\n", 0 );
  1244. return;
  1245. }
  1246. //
  1247. // Internal support routine.
  1248. //
  1249. VOID
  1250. CcDeleteBcbs (
  1251. IN PSHARED_CACHE_MAP SharedCacheMap
  1252. )
  1253. /*++
  1254. Routine Description:
  1255. This routine may be called to delete all Bcbs for a stream.
  1256. External synchronization must be acquired to guarantee no
  1257. active pin on any bcb.
  1258. Arguments:
  1259. SharedCacheMap - Pointer to SharedCacheMap.
  1260. Return Value:
  1261. None.
  1262. --*/
  1263. {
  1264. KIRQL OldIrql;
  1265. PLIST_ENTRY NextEntry;
  1266. PBCB Bcb;
  1267. //
  1268. // If there are Bcbs, then empty the list. None of them can be pinned now!
  1269. // Either the file is being truncated, in which case synchronization with
  1270. // the lazy writer must have been externally acheived, or the file is being
  1271. // closed down and nothing should be able to get a fresh reference on this
  1272. // shared cache map.
  1273. //
  1274. NextEntry = SharedCacheMap->BcbList.Flink;
  1275. while (NextEntry != &SharedCacheMap->BcbList) {
  1276. Bcb = (PBCB)CONTAINING_RECORD( NextEntry,
  1277. BCB,
  1278. BcbLinks );
  1279. NextEntry = Bcb->BcbLinks.Flink;
  1280. //
  1281. // Skip over the pendaflex entries, only removing true Bcbs
  1282. // so that level teardown doesn't need to special case unhooking
  1283. // the pendaflex. This has the side benefit of dramatically
  1284. // reducing write traffic to memory on teardown of large files.
  1285. //
  1286. if (Bcb->NodeTypeCode == CACHE_NTC_BCB) {
  1287. ASSERT( Bcb->PinCount == 0 );
  1288. RemoveEntryList( &Bcb->BcbLinks );
  1289. //
  1290. // For large metadata streams we unlock the Vacb level when
  1291. // removing. We do not need spinlocks since no other thread
  1292. // can be accessing this list when we are deleting the
  1293. // SharedCacheMap.
  1294. //
  1295. CcUnlockVacbLevel( SharedCacheMap, Bcb->FileOffset.QuadPart );
  1296. //
  1297. // There is a small window where the data could still be mapped
  1298. // if (for example) the Lazy Writer collides with a CcCopyWrite
  1299. // in the foreground, and then someone calls CcUninitializeCacheMap
  1300. // while the Lazy Writer is active. This is because the Lazy
  1301. // Writer biases the pin count. Deal with that here.
  1302. //
  1303. if (Bcb->BaseAddress != NULL) {
  1304. CcFreeVirtualAddress( Bcb->Vacb );
  1305. }
  1306. #if LIST_DBG
  1307. //
  1308. // Debug routines used to remove Bcbs from the global list
  1309. //
  1310. OldIrql = KeAcquireQueuedSpinLock( LockQueueBcbLock );
  1311. if (Bcb->CcBcbLinks.Flink != NULL) {
  1312. RemoveEntryList( &Bcb->CcBcbLinks );
  1313. CcBcbCount -= 1;
  1314. }
  1315. KeReleaseQueuedSpinLock( LockQueueBcbLock, OldIrql );
  1316. #endif
  1317. //
  1318. // If the Bcb is dirty, we have to synchronize with the Lazy Writer
  1319. // and reduce the total number of dirty.
  1320. //
  1321. CcAcquireMasterLock( &OldIrql );
  1322. if (Bcb->Dirty) {
  1323. CcDeductDirtyPages( SharedCacheMap, Bcb->ByteLength >> PAGE_SHIFT );
  1324. }
  1325. CcReleaseMasterLock( OldIrql );
  1326. CcDeallocateBcb( Bcb );
  1327. }
  1328. }
  1329. }
  1330. //
  1331. // Internal support routine.
  1332. //
  1333. VOID
  1334. FASTCALL
  1335. CcDeleteSharedCacheMap (
  1336. IN PSHARED_CACHE_MAP SharedCacheMap,
  1337. IN KIRQL ListIrql,
  1338. IN ULONG ReleaseFile
  1339. )
  1340. /*++
  1341. Routine Description:
  1342. The specified SharedCacheMap is removed from the global list of
  1343. SharedCacheMap's and deleted with all of its related structures.
  1344. Other objects which were referenced in CcInitializeCacheMap are
  1345. dereferenced here.
  1346. NOTE: The CcMasterSpinLock must already be acquired
  1347. on entry. It is released on return.
  1348. Arguments:
  1349. SharedCacheMap - Pointer to Cache Map to delete
  1350. ListIrql - priority to restore to when releasing shared cache map list
  1351. ReleaseFile - Supplied as nonzero if file was acquired exclusive and
  1352. should be released.
  1353. ReturnValue:
  1354. None.
  1355. --*/
  1356. {
  1357. LIST_ENTRY LocalList;
  1358. PFILE_OBJECT FileObject;
  1359. PVACB ActiveVacb;
  1360. ULONG ActivePage;
  1361. ULONG PageIsDirty;
  1362. DebugTrace(+1, me, "CcDeleteSharedCacheMap:\n", 0 );
  1363. DebugTrace( 0, me, " SharedCacheMap = %08lx\n", SharedCacheMap );
  1364. //
  1365. // Remove it from the global list and clear the pointer to it via
  1366. // the File Object.
  1367. //
  1368. RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks );
  1369. //
  1370. // Zero pointer to SharedCacheMap. Once we have cleared the pointer,
  1371. // we can/must release the global list to avoid deadlocks.
  1372. //
  1373. FileObject = SharedCacheMap->FileObject;
  1374. FileObject->SectionObjectPointer->SharedCacheMap = (PSHARED_CACHE_MAP)NULL;
  1375. SetFlag( SharedCacheMap->Flags, WRITE_QUEUED );
  1376. //
  1377. // The OpenCount is 0, but we still need to flush out any dangling
  1378. // cache read or writes.
  1379. //
  1380. if ((SharedCacheMap->VacbActiveCount != 0) || (SharedCacheMap->NeedToZero != NULL)) {
  1381. //
  1382. // We will put it in a local list and set a flag
  1383. // to keep the Lazy Writer away from it, so that we can rip it out
  1384. // below if someone manages to sneak in and set something dirty, etc.
  1385. // If the file system does not synchronize cleanup calls with an
  1386. // exclusive on the stream, then this case is possible.
  1387. //
  1388. InitializeListHead( &LocalList );
  1389. InsertTailList( &LocalList, &SharedCacheMap->SharedCacheMapLinks );
  1390. //
  1391. // If there is an active Vacb, then nuke it now (before waiting!).
  1392. //
  1393. GetActiveVacbAtDpcLevel( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty );
  1394. CcReleaseMasterLock( ListIrql );
  1395. //
  1396. // No point in saying the page is dirty (which can cause an allocation
  1397. // failure), since we are deleting this SharedCacheMap anyway.
  1398. //
  1399. CcFreeActiveVacb( SharedCacheMap, ActiveVacb, ActivePage, FALSE );
  1400. while (SharedCacheMap->VacbActiveCount != 0) {
  1401. CcWaitOnActiveCount( SharedCacheMap );
  1402. }
  1403. //
  1404. // Now in case we hit the rare path where someone moved the
  1405. // SharedCacheMap again, do a remove again now. It may be
  1406. // from our local list or it may be from the dirty list,
  1407. // but who cares? The important thing is to remove it in
  1408. // the case it was the dirty list, since we will delete it
  1409. // below.
  1410. //
  1411. CcAcquireMasterLock( &ListIrql );
  1412. RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks );
  1413. }
  1414. CcReleaseMasterLock( ListIrql );
  1415. //
  1416. // If there are Bcbs, then empty the list.
  1417. //
  1418. // I really wonder how often we have Bcbs at teardown. This is
  1419. // a lot of work that could be avoided otherwise.
  1420. //
  1421. if (!IsListEmpty( &SharedCacheMap->BcbList )) {
  1422. CcDeleteBcbs( SharedCacheMap );
  1423. }
  1424. //
  1425. // Call local routine to unmap, and purge if necessary.
  1426. //
  1427. CcUnmapAndPurge( SharedCacheMap );
  1428. //
  1429. // Now release the file now that the purge is done.
  1430. //
  1431. if (ReleaseFile) {
  1432. FsRtlReleaseFile( SharedCacheMap->FileObject );
  1433. }
  1434. //
  1435. // Dereference our pointer to the Section and FileObject
  1436. // (We have to test the Section pointer since CcInitializeCacheMap
  1437. // calls this routine for error recovery. Release our global
  1438. // resource before dereferencing the FileObject to avoid deadlocks.
  1439. //
  1440. if (SharedCacheMap->Section != NULL) {
  1441. ObDereferenceObject( SharedCacheMap->Section );
  1442. }
  1443. ObDereferenceObject( FileObject );
  1444. //
  1445. // If there is an Mbcb, deduct any dirty pages and deallocate.
  1446. //
  1447. if (SharedCacheMap->Mbcb != NULL) {
  1448. CcDeleteMbcb( SharedCacheMap );
  1449. }
  1450. //
  1451. // If there was an uninitialize event specified for this shared cache
  1452. // map, then set it to the signalled state, indicating that we are
  1453. // removing the section and deleting the shared cache map.
  1454. //
  1455. if (SharedCacheMap->UninitializeEvent != NULL) {
  1456. PCACHE_UNINITIALIZE_EVENT CUEvent, EventNext;
  1457. CUEvent = SharedCacheMap->UninitializeEvent;
  1458. while (CUEvent != NULL) {
  1459. EventNext = CUEvent->Next;
  1460. KeSetEvent(&CUEvent->Event, 0, FALSE);
  1461. CUEvent = EventNext;
  1462. }
  1463. }
  1464. //
  1465. // Now delete the Vacb vector.
  1466. //
  1467. if ((SharedCacheMap->Vacbs != &SharedCacheMap->InitialVacbs[0])
  1468. &&
  1469. (SharedCacheMap->Vacbs != NULL)) {
  1470. //
  1471. // If there are Vacb levels, then the Vacb Array better be in an empty state.
  1472. //
  1473. ASSERT((SharedCacheMap->SectionSize.QuadPart <= VACB_SIZE_OF_FIRST_LEVEL) ||
  1474. !IsVacbLevelReferenced( SharedCacheMap, SharedCacheMap->Vacbs, 1 ));
  1475. ExFreePool( SharedCacheMap->Vacbs );
  1476. }
  1477. //
  1478. // If an event had to be allocated for this SharedCacheMap,
  1479. // deallocate it.
  1480. //
  1481. if ((SharedCacheMap->CreateEvent != NULL) && (SharedCacheMap->CreateEvent != &SharedCacheMap->Event)) {
  1482. ExFreePool( SharedCacheMap->CreateEvent );
  1483. }
  1484. if ((SharedCacheMap->WaitOnActiveCount != NULL) && (SharedCacheMap->WaitOnActiveCount != &SharedCacheMap->Event)) {
  1485. ExFreePool( SharedCacheMap->WaitOnActiveCount );
  1486. }
  1487. //
  1488. // Deallocate the storeage for the SharedCacheMap.
  1489. //
  1490. ExFreePool( SharedCacheMap );
  1491. DebugTrace(-1, me, "CcDeleteSharedCacheMap -> VOID\n", 0 );
  1492. return;
  1493. }
  1494. VOID
  1495. CcSetFileSizes (
  1496. IN PFILE_OBJECT FileObject,
  1497. IN PCC_FILE_SIZES FileSizes
  1498. )
  1499. /*++
  1500. Routine Description:
  1501. This routine must be called whenever a file has been extended to reflect
  1502. this extension in the cache maps and underlying section. Calling this
  1503. routine has a benign effect if the current size of the section is
  1504. already greater than or equal to the new AllocationSize.
  1505. This routine must also be called whenever the FileSize for a file changes
  1506. to reflect these changes in the Cache Manager.
  1507. This routine seems rather large, but in the normal case it only acquires
  1508. a spinlock, updates some fields, and exits. Less often it will either
  1509. extend the section, or truncate/purge the file, but it would be unexpected
  1510. to do both. On the other hand, the idea of this routine is that it does
  1511. "everything" required when AllocationSize or FileSize change.
  1512. Arguments:
  1513. FileObject - A file object for which CcInitializeCacheMap has been
  1514. previously called.
  1515. FileSizes - A pointer to AllocationSize, FileSize and ValidDataLength
  1516. for the file. AllocationSize is ignored if it is not larger
  1517. than the current section size (i.e., it is ignored unless it
  1518. has grown). ValidDataLength is not used.
  1519. Return Value:
  1520. None
  1521. --*/
  1522. {
  1523. LARGE_INTEGER NewSectionSize;
  1524. LARGE_INTEGER NewFileSize;
  1525. LARGE_INTEGER NewValidDataLength;
  1526. IO_STATUS_BLOCK IoStatus;
  1527. PSHARED_CACHE_MAP SharedCacheMap;
  1528. NTSTATUS Status;
  1529. KIRQL OldIrql;
  1530. PVACB ActiveVacb;
  1531. ULONG ActivePage;
  1532. ULONG PageIsDirty;
  1533. DebugTrace(+1, me, "CcSetFileSizes:\n", 0 );
  1534. DebugTrace( 0, me, " FileObject = %08lx\n", FileObject );
  1535. DebugTrace( 0, me, " FileSizes = %08lx\n", FileSizes );
  1536. //
  1537. // Make a local copy of the new file size and section size.
  1538. //
  1539. NewSectionSize = FileSizes->AllocationSize;
  1540. NewFileSize = FileSizes->FileSize;
  1541. NewValidDataLength = FileSizes->ValidDataLength;
  1542. //
  1543. // Serialize Creation/Deletion of all Shared CacheMaps
  1544. //
  1545. CcAcquireMasterLock( &OldIrql );
  1546. //
  1547. // Get pointer to SharedCacheMap via File Object.
  1548. //
  1549. SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
  1550. //
  1551. // If the file is not cached, just get out.
  1552. //
  1553. if ((SharedCacheMap == NULL) || (SharedCacheMap->Section == NULL)) {
  1554. CcReleaseMasterLock( OldIrql );
  1555. //
  1556. // Let's try to purge the file incase this is a truncate. In the
  1557. // vast majority of cases when there is no shared cache map, there
  1558. // is no data section either, so this call will eventually be
  1559. // no-oped in Mm.
  1560. //
  1561. // First flush the first page we are keeping, if it has data, before
  1562. // we throw it away.
  1563. //
  1564. if (NewFileSize.LowPart & (PAGE_SIZE - 1)) {
  1565. MmFlushSection( FileObject->SectionObjectPointer, &NewFileSize, 1, &IoStatus, FALSE );
  1566. }
  1567. CcPurgeCacheSection( FileObject->SectionObjectPointer,
  1568. &NewFileSize,
  1569. 0,
  1570. FALSE );
  1571. DebugTrace(-1, me, "CcSetFileSizes -> VOID\n", 0 );
  1572. return;
  1573. }
  1574. //
  1575. // Make call a Noop if file is not mapped, or section already big enough.
  1576. //
  1577. if ( NewSectionSize.QuadPart > SharedCacheMap->SectionSize.QuadPart ) {
  1578. //
  1579. // Increment open count to make sure the SharedCacheMap stays around,
  1580. // then release the spinlock so that we can call Mm.
  1581. //
  1582. CcIncrementOpenCount( SharedCacheMap, '1fSS' );
  1583. CcReleaseMasterLock( OldIrql );
  1584. //
  1585. // Round new section size to pages.
  1586. //
  1587. NewSectionSize.QuadPart = NewSectionSize.QuadPart + (LONGLONG)(DEFAULT_EXTEND_MODULO - 1);
  1588. NewSectionSize.LowPart &= ~(DEFAULT_EXTEND_MODULO - 1);
  1589. //
  1590. // Call MM to extend the section.
  1591. //
  1592. DebugTrace( 0, mm, "MmExtendSection:\n", 0 );
  1593. DebugTrace( 0, mm, " Section = %08lx\n", SharedCacheMap->Section );
  1594. DebugTrace2(0, mm, " Size = %08lx, %08lx\n",
  1595. NewSectionSize.LowPart, NewSectionSize.HighPart );
  1596. Status = MmExtendSection( SharedCacheMap->Section, &NewSectionSize, TRUE );
  1597. if (NT_SUCCESS(Status)) {
  1598. //
  1599. // Extend the Vacb array.
  1600. //
  1601. Status = CcExtendVacbArray( SharedCacheMap, NewSectionSize );
  1602. }
  1603. else {
  1604. DebugTrace( 0, 0, "Error from MmExtendSection, Status = %08lx\n",
  1605. Status );
  1606. Status = FsRtlNormalizeNtstatus( Status,
  1607. STATUS_UNEXPECTED_MM_EXTEND_ERR );
  1608. }
  1609. //
  1610. // Serialize again to decrement the open count.
  1611. //
  1612. CcAcquireMasterLock( &OldIrql );
  1613. CcDecrementOpenCount( SharedCacheMap, '1fSF' );
  1614. if ((SharedCacheMap->OpenCount == 0) &&
  1615. !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) &&
  1616. (SharedCacheMap->DirtyPages == 0)) {
  1617. //
  1618. // Move to the dirty list.
  1619. //
  1620. RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks );
  1621. InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks,
  1622. &SharedCacheMap->SharedCacheMapLinks );
  1623. //
  1624. // Make sure the Lazy Writer will wake up, because we
  1625. // want him to delete this SharedCacheMap.
  1626. //
  1627. LazyWriter.OtherWork = TRUE;
  1628. if (!LazyWriter.ScanActive) {
  1629. CcScheduleLazyWriteScan( FALSE );
  1630. }
  1631. }
  1632. //
  1633. // If section or VACB extension failed, raise an
  1634. // exception to our caller.
  1635. //
  1636. if (!NT_SUCCESS(Status)) {
  1637. CcReleaseMasterLock( OldIrql );
  1638. ExRaiseStatus( Status );
  1639. }
  1640. //
  1641. // It is now very unlikely that we have any more work to do, but since
  1642. // the spinlock is already held, check again if we are cached.
  1643. //
  1644. //
  1645. // Get pointer to SharedCacheMap via File Object.
  1646. //
  1647. SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
  1648. //
  1649. // If the file is not cached, just get out.
  1650. //
  1651. if (SharedCacheMap == NULL) {
  1652. CcReleaseMasterLock( OldIrql );
  1653. DebugTrace(-1, me, "CcSetFileSizes -> VOID\n", 0 );
  1654. return;
  1655. }
  1656. }
  1657. //
  1658. // If we are shrinking either of these two sizes, then we must free the
  1659. // active page, since it may be locked.
  1660. //
  1661. CcIncrementOpenCount( SharedCacheMap, '2fSS' );
  1662. if ( ( NewFileSize.QuadPart < SharedCacheMap->ValidDataGoal.QuadPart ) ||
  1663. ( NewFileSize.QuadPart < SharedCacheMap->FileSize.QuadPart )) {
  1664. GetActiveVacbAtDpcLevel( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty );
  1665. if ((ActiveVacb != NULL) || (SharedCacheMap->NeedToZero != NULL)) {
  1666. CcReleaseMasterLock( OldIrql );
  1667. CcFreeActiveVacb( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty );
  1668. //
  1669. // Serialize again to reduce ValidDataLength. It cannot change
  1670. // because the caller must have the file exclusive.
  1671. //
  1672. CcAcquireMasterLock( &OldIrql );
  1673. }
  1674. }
  1675. //
  1676. // If the section did not grow, see if the file system supports
  1677. // ValidDataLength, then update the valid data length in the file system.
  1678. //
  1679. if ( SharedCacheMap->ValidDataLength.QuadPart != MAXLONGLONG ) {
  1680. if ( NewFileSize.QuadPart < SharedCacheMap->ValidDataLength.QuadPart ) {
  1681. SharedCacheMap->ValidDataLength = NewFileSize;
  1682. }
  1683. //
  1684. // Update our notion of ValidDataGoal (how far the file has been
  1685. // written in the cache) with caller's ValidDataLength. (Our
  1686. // ValidDataLength controls when we issue ValidDataLength callbacks.)
  1687. //
  1688. SharedCacheMap->ValidDataGoal = NewValidDataLength;
  1689. }
  1690. //
  1691. // On truncate, be nice guys and actually purge away user data from
  1692. // the cache. However, the PinAccess check is important to avoid deadlocks
  1693. // in Ntfs.
  1694. //
  1695. // It is also important to check the Vacb Active count. The caller
  1696. // must have the file exclusive, therefore, no one else can be actively
  1697. // doing anything in the file. Normally the Active count will be zero
  1698. // (like in a normal call from Set File Info), and we can go ahead and
  1699. // truncate. However, if the active count is nonzero, chances are this
  1700. // very thread has something pinned or mapped, and we will deadlock if
  1701. // we try to purge and wait for the count to go zero. A rare case of
  1702. // this which deadlocked DaveC on Christmas Day of 1992, is where Ntfs
  1703. // was trying to convert an attribute from resident to nonresident - which
  1704. // is a good example of a case where the purge was not needed.
  1705. //
  1706. if ( (NewFileSize.QuadPart < SharedCacheMap->FileSize.QuadPart ) &&
  1707. !FlagOn(SharedCacheMap->Flags, PIN_ACCESS) &&
  1708. (SharedCacheMap->VacbActiveCount == 0)) {
  1709. //
  1710. // Release the spinlock so that we can call Mm.
  1711. //
  1712. CcReleaseMasterLock( OldIrql );
  1713. //
  1714. // If we are actually truncating to zero (a size which has particular
  1715. // meaning to the Lazy Writer scan!) then we must reset the Mbcb/Bcbs,
  1716. // if there are any, so that we do not keep dirty pages around forever.
  1717. //
  1718. if (NewFileSize.QuadPart == 0) {
  1719. if (SharedCacheMap->Mbcb != NULL) {
  1720. CcDeleteMbcb( SharedCacheMap );
  1721. }
  1722. if (!IsListEmpty( &SharedCacheMap->BcbList )) {
  1723. CcDeleteBcbs( SharedCacheMap );
  1724. }
  1725. }
  1726. CcPurgeAndClearCacheSection( SharedCacheMap, &NewFileSize );
  1727. //
  1728. // Serialize again to decrement the open count.
  1729. //
  1730. CcAcquireMasterLock( &OldIrql );
  1731. }
  1732. CcDecrementOpenCount( SharedCacheMap, '2fSF' );
  1733. SharedCacheMap->FileSize = NewFileSize;
  1734. if ((SharedCacheMap->OpenCount == 0) &&
  1735. !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) &&
  1736. (SharedCacheMap->DirtyPages == 0)) {
  1737. //
  1738. // Move to the dirty list.
  1739. //
  1740. RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks );
  1741. InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks,
  1742. &SharedCacheMap->SharedCacheMapLinks );
  1743. //
  1744. // Make sure the Lazy Writer will wake up, because we
  1745. // want him to delete this SharedCacheMap.
  1746. //
  1747. LazyWriter.OtherWork = TRUE;
  1748. if (!LazyWriter.ScanActive) {
  1749. CcScheduleLazyWriteScan( FALSE );
  1750. }
  1751. }
  1752. CcReleaseMasterLock( OldIrql );
  1753. DebugTrace(-1, me, "CcSetFileSizes -> VOID\n", 0 );
  1754. return;
  1755. }
  1756. VOID
  1757. CcPurgeAndClearCacheSection (
  1758. IN PSHARED_CACHE_MAP SharedCacheMap,
  1759. IN PLARGE_INTEGER FileOffset
  1760. )
  1761. /*++
  1762. Routine Description:
  1763. This routine calls CcPurgeCacheSection after zeroing the end any
  1764. partial page at the start of the range. If the file is not cached
  1765. it flushes this page before the purge.
  1766. Arguments:
  1767. SectionObjectPointer - A pointer to the Section Object Pointers
  1768. structure in the nonpaged Fcb.
  1769. FileOffset - Offset from which file should be purged - rounded down
  1770. to page boundary. If NULL, purge the entire file.
  1771. ReturnValue:
  1772. FALSE - if the section was not successfully purged
  1773. TRUE - if the section was successfully purged
  1774. --*/
  1775. {
  1776. ULONG TempLength, Length;
  1777. LARGE_INTEGER LocalFileOffset;
  1778. IO_STATUS_BLOCK IoStatus;
  1779. PVOID TempVa;
  1780. PVACB Vacb;
  1781. LOGICAL ZeroSucceeded = TRUE;
  1782. //
  1783. // Awareness is indicated by the lowbit of the fileoffset pointer.
  1784. // Non-awareness of a private write stream results in a no-op.
  1785. //
  1786. if (FlagOn( SharedCacheMap->Flags, PRIVATE_WRITE )) {
  1787. if (((ULONG_PTR)FileOffset & 1) == 0) {
  1788. return;
  1789. }
  1790. FileOffset = (PLARGE_INTEGER)((ULONG_PTR)FileOffset ^ 1);
  1791. }
  1792. //
  1793. // If a range was specified, then we have to see if we need to
  1794. // save any user data before purging.
  1795. //
  1796. if ((FileOffset->LowPart & (PAGE_SIZE - 1)) != 0) {
  1797. //
  1798. // Switch to LocalFileOffset. We do it this way because we
  1799. // still pass it on as an optional parameter.
  1800. //
  1801. LocalFileOffset = *FileOffset;
  1802. FileOffset = &LocalFileOffset;
  1803. //
  1804. // If the file is cached, then we can actually zero the data to
  1805. // be purged in memory, and not purge those pages. This is a huge
  1806. // savings, because sometimes the flushes in the other case cause
  1807. // us to kill lots of stack, time and I/O doing CcZeroData in especially
  1808. // large user-mapped files.
  1809. //
  1810. if ((SharedCacheMap->Section != NULL) &&
  1811. (SharedCacheMap->Vacbs != NULL)) {
  1812. //
  1813. // First zero the first page we are keeping, if it has data, and
  1814. // adjust FileOffset and Length to allow it to stay.
  1815. //
  1816. TempLength = PAGE_SIZE - (FileOffset->LowPart & (PAGE_SIZE - 1));
  1817. TempVa = CcGetVirtualAddress( SharedCacheMap, *FileOffset, &Vacb, &Length );
  1818. try {
  1819. //
  1820. // Do not map and zero the page if we are not reducing our notion
  1821. // of Valid Data, because that does two bad things. First
  1822. // CcSetDirtyInMask will arbitrarily smash up ValidDataGoal
  1823. // (causing a potential invalid CcSetValidData call). Secondly,
  1824. // if the Lazy Writer writes the last page ahead of another flush
  1825. // through MM, then the file system will never see a write from
  1826. // MM, and will not include the last page in ValidDataLength on
  1827. // disk.
  1828. //
  1829. RtlZeroMemory( TempVa, TempLength );
  1830. } except (EXCEPTION_EXECUTE_HANDLER) {
  1831. //
  1832. // If we get an exception here, it means TempVa was not valid
  1833. // and we got an error trying to page that data in from the
  1834. // backing file. If that is the case, then we don't need zero
  1835. // the end of this file because the file system will take
  1836. // care of that. We will just swallow the exception here
  1837. // and continue. If we couldn't zero this range, we don't
  1838. // want to mark that we made data dirty, so remember that
  1839. // this operation failed.
  1840. //
  1841. ZeroSucceeded = FALSE;
  1842. }
  1843. if (ZeroSucceeded) {
  1844. if (FileOffset->QuadPart <= SharedCacheMap->ValidDataGoal.QuadPart) {
  1845. //
  1846. // Make sure the Lazy Writer writes it.
  1847. //
  1848. CcSetDirtyInMask( SharedCacheMap, FileOffset, TempLength );
  1849. //
  1850. // Otherwise, we are mapped, so make sure at least that Mm
  1851. // knows the page is dirty since we zeroed it.
  1852. //
  1853. } else {
  1854. MmSetAddressRangeModified( TempVa, 1 );
  1855. }
  1856. FileOffset->QuadPart += (LONGLONG)TempLength;
  1857. }
  1858. //
  1859. // If we get any kind of error, like failing to read the page from
  1860. // the network, just charge on. Note that we only read it in order
  1861. // to zero it and avoid the flush below, so if we cannot read it
  1862. // there is really no stale data problem.
  1863. //
  1864. CcFreeVirtualAddress( Vacb );
  1865. } else {
  1866. //
  1867. // First flush the first page we are keeping, if it has data, before
  1868. // we throw it away.
  1869. //
  1870. MmFlushSection( SharedCacheMap->FileObject->SectionObjectPointer, FileOffset, 1, &IoStatus, FALSE );
  1871. }
  1872. }
  1873. CcPurgeCacheSection( SharedCacheMap->FileObject->SectionObjectPointer,
  1874. FileOffset,
  1875. 0,
  1876. FALSE );
  1877. }
  1878. BOOLEAN
  1879. CcPurgeCacheSection (
  1880. IN PSECTION_OBJECT_POINTERS SectionObjectPointer,
  1881. IN PLARGE_INTEGER FileOffset,
  1882. IN ULONG Length,
  1883. IN BOOLEAN UninitializeCacheMaps
  1884. )
  1885. /*++
  1886. Routine Description:
  1887. This routine may be called to force a purge of the cache section,
  1888. even if it is cached. Note, if a user has the file mapped, then the purge
  1889. will *not* take effect, and this must be considered part of normal application
  1890. interaction. The purpose of purge is to throw away potentially nonzero
  1891. data, so that it will be read in again and presumably zeroed. This is
  1892. not really a security issue, but rather an effort to not confuse the
  1893. application when it sees nonzero data. We cannot help the fact that
  1894. a user-mapped view forces us to hang on to stale data.
  1895. This routine is intended to be called whenever previously written
  1896. data is being truncated from the file, and the file is not being
  1897. deleted.
  1898. The file must be acquired exclusive in order to call this routine.
  1899. Arguments:
  1900. SectionObjectPointer - A pointer to the Section Object Pointers
  1901. structure in the nonpaged Fcb.
  1902. FileOffset - Offset from which file should be purged - rounded down
  1903. to page boundary. If NULL, purge the entire file.
  1904. Length - Defines the length of the byte range to purge, starting at
  1905. FileOffset. This parameter is ignored if FileOffset is
  1906. specified as NULL. If FileOffset is specified and Length
  1907. is 0, then purge from FileOffset to the end of the file.
  1908. UninitializeCacheMaps - If TRUE, we should uninitialize all the private
  1909. cache maps before purging the data.
  1910. ReturnValue:
  1911. FALSE - if the section was not successfully purged
  1912. TRUE - if the section was successfully purged
  1913. --*/
  1914. {
  1915. KIRQL OldIrql;
  1916. PSHARED_CACHE_MAP SharedCacheMap;
  1917. PPRIVATE_CACHE_MAP PrivateCacheMap;
  1918. ULONG ActivePage;
  1919. ULONG PageIsDirty;
  1920. BOOLEAN PurgeWorked = TRUE;
  1921. PVACB Vacb = NULL;
  1922. DebugTrace(+1, me, "CcPurgeCacheSection:\n", 0 );
  1923. DebugTrace( 0, mm, " SectionObjectPointer = %08lx\n", SectionObjectPointer );
  1924. DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n",
  1925. ARGUMENT_PRESENT(FileOffset) ? FileOffset->LowPart
  1926. : 0,
  1927. ARGUMENT_PRESENT(FileOffset) ? FileOffset->HighPart
  1928. : 0 );
  1929. DebugTrace( 0, me, " Length = %08lx\n", Length );
  1930. //
  1931. // If you want us to uninitialize cache maps, the RtlZeroMemory paths
  1932. // below depend on actually having to purge something after zeroing.
  1933. //
  1934. ASSERT(!UninitializeCacheMaps || (Length == 0) || (Length >= PAGE_SIZE * 2));
  1935. //
  1936. // Serialize Creation/Deletion of all Shared CacheMaps
  1937. //
  1938. CcAcquireMasterLock( &OldIrql );
  1939. //
  1940. // Get pointer to SharedCacheMap via File Object.
  1941. //
  1942. SharedCacheMap = SectionObjectPointer->SharedCacheMap;
  1943. //
  1944. // Increment open count to make sure the SharedCacheMap stays around,
  1945. // then release the spinlock so that we can call Mm.
  1946. //
  1947. if (SharedCacheMap != NULL) {
  1948. //
  1949. // Awareness is indicated by the lowbit of the fileoffset pointer.
  1950. // Non-awareness of a private write stream results in a no-op.
  1951. //
  1952. if (FlagOn( SharedCacheMap->Flags, PRIVATE_WRITE )) {
  1953. if (((ULONG_PTR)FileOffset & 1) == 0) {
  1954. CcReleaseMasterLock( OldIrql );
  1955. return TRUE;
  1956. }
  1957. FileOffset = (PLARGE_INTEGER)((ULONG_PTR)FileOffset ^ 1);
  1958. }
  1959. CcIncrementOpenCount( SharedCacheMap, 'scPS' );
  1960. //
  1961. // If there is an active Vacb, then nuke it now (before waiting!).
  1962. //
  1963. GetActiveVacbAtDpcLevel( SharedCacheMap, Vacb, ActivePage, PageIsDirty );
  1964. }
  1965. CcReleaseMasterLock( OldIrql );
  1966. if (Vacb != NULL) {
  1967. CcFreeActiveVacb( SharedCacheMap, Vacb, ActivePage, PageIsDirty );
  1968. }
  1969. //
  1970. // Increment open count to make sure the SharedCacheMap stays around,
  1971. // then release the spinlock so that we can call Mm.
  1972. //
  1973. if (SharedCacheMap != NULL) {
  1974. //
  1975. // Now loop to make sure that no one is currently caching the file.
  1976. //
  1977. if (UninitializeCacheMaps) {
  1978. while (!IsListEmpty( &SharedCacheMap->PrivateList )) {
  1979. PrivateCacheMap = CONTAINING_RECORD( SharedCacheMap->PrivateList.Flink,
  1980. PRIVATE_CACHE_MAP,
  1981. PrivateLinks );
  1982. CcUninitializeCacheMap( PrivateCacheMap->FileObject, NULL, NULL );
  1983. }
  1984. }
  1985. //
  1986. // Now, let's unmap and purge here.
  1987. //
  1988. // We still need to wait for any dangling cache read or writes.
  1989. //
  1990. // In fact we have to loop and wait because the lazy writer can
  1991. // sneak in and do an CcGetVirtualAddressIfMapped, and we are not
  1992. // synchronized.
  1993. //
  1994. while ((SharedCacheMap->Vacbs != NULL) &&
  1995. !CcUnmapVacbArray( SharedCacheMap, FileOffset, Length, FALSE )) {
  1996. CcWaitOnActiveCount( SharedCacheMap );
  1997. }
  1998. }
  1999. //
  2000. // Purge failures are extremely rare if there are no user mapped sections.
  2001. // However, it is possible that we will get one from our own mapping, if
  2002. // the file is being lazy deleted from a previous open. For that case
  2003. // we wait here until the purge succeeds, so that we are not left with
  2004. // old user file data. Although Length is actually invariant in this loop,
  2005. // we do need to keep checking that we are allowed to truncate in case a
  2006. // user maps the file during a delay.
  2007. //
  2008. while (!(PurgeWorked = MmPurgeSection(SectionObjectPointer,
  2009. FileOffset,
  2010. Length,
  2011. (BOOLEAN)((SharedCacheMap !=NULL) &&
  2012. ARGUMENT_PRESENT(FileOffset)))) &&
  2013. (Length == 0) &&
  2014. MmCanFileBeTruncated(SectionObjectPointer, FileOffset)) {
  2015. (VOID)KeDelayExecutionThread( KernelMode, FALSE, &CcCollisionDelay );
  2016. }
  2017. //
  2018. // Reduce the open count on the SharedCacheMap if there was one.
  2019. //
  2020. if (SharedCacheMap != NULL) {
  2021. //
  2022. // Serialize again to decrement the open count.
  2023. //
  2024. CcAcquireMasterLock( &OldIrql );
  2025. CcDecrementOpenCount( SharedCacheMap, 'scPF' );
  2026. if ((SharedCacheMap->OpenCount == 0) &&
  2027. !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) &&
  2028. (SharedCacheMap->DirtyPages == 0)) {
  2029. //
  2030. // Move to the dirty list.
  2031. //
  2032. RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks );
  2033. InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks,
  2034. &SharedCacheMap->SharedCacheMapLinks );
  2035. //
  2036. // Make sure the Lazy Writer will wake up, because we
  2037. // want him to delete this SharedCacheMap.
  2038. //
  2039. LazyWriter.OtherWork = TRUE;
  2040. if (!LazyWriter.ScanActive) {
  2041. CcScheduleLazyWriteScan( FALSE );
  2042. }
  2043. }
  2044. CcReleaseMasterLock( OldIrql );
  2045. }
  2046. DebugTrace(-1, me, "CcPurgeCacheSection -> %02lx\n", PurgeWorked );
  2047. return PurgeWorked;
  2048. }
  2049. //
  2050. // Internal support routine.
  2051. //
  2052. VOID
  2053. CcUnmapAndPurge(
  2054. IN PSHARED_CACHE_MAP SharedCacheMap
  2055. )
  2056. /*++
  2057. Routine Description:
  2058. This routine may be called to unmap and purge a section, causing Memory
  2059. Management to throw the pages out and reset his notion of file size.
  2060. Arguments:
  2061. SharedCacheMap - Pointer to SharedCacheMap of section to purge.
  2062. Return Value:
  2063. None.
  2064. --*/
  2065. {
  2066. PFILE_OBJECT FileObject;
  2067. FileObject = SharedCacheMap->FileObject;
  2068. //
  2069. // Unmap all Vacbs
  2070. //
  2071. if (SharedCacheMap->Vacbs != NULL) {
  2072. (VOID)CcUnmapVacbArray( SharedCacheMap, NULL, 0, FALSE );
  2073. }
  2074. //
  2075. // Now that the file is unmapped, we can purge the truncated
  2076. // pages from memory, if TRUNCATE_REQUIRED. Note that since the
  2077. // entire section is being purged (FileSize == NULL), the purge
  2078. // and subsequent delete of the SharedCacheMap should drop
  2079. // all references on the section and file object clearing the
  2080. // way for the Close Call and actual file delete to occur
  2081. // immediately.
  2082. //
  2083. if (FlagOn(SharedCacheMap->Flags, TRUNCATE_REQUIRED)) {
  2084. DebugTrace( 0, mm, "MmPurgeSection:\n", 0 );
  2085. DebugTrace( 0, mm, " SectionObjectPointer = %08lx\n",
  2086. FileObject->SectionObjectPointer );
  2087. DebugTrace2(0, mm, " Offset = %08lx\n",
  2088. SharedCacheMap->FileSize.LowPart,
  2089. SharedCacheMap->FileSize.HighPart );
  2090. CcPurgeCacheSection( FileObject->SectionObjectPointer,
  2091. NULL,
  2092. 0,
  2093. FALSE );
  2094. }
  2095. }
  2096. VOID
  2097. CcDeleteMbcb(
  2098. IN PSHARED_CACHE_MAP SharedCacheMap
  2099. )
  2100. /*++
  2101. Routine Description:
  2102. This routine may be called to reset the Mbcb for a stream to say
  2103. there are no dirty pages, and free all auxillary allocation.
  2104. Arguments:
  2105. SharedCacheMap - Pointer to SharedCacheMap.
  2106. Return Value:
  2107. None.
  2108. --*/
  2109. {
  2110. PMBCB Mbcb;
  2111. PBITMAP_RANGE BitmapRange;
  2112. KLOCK_QUEUE_HANDLE LockHandle;
  2113. ULONG DoDrain = FALSE;
  2114. PLIST_ENTRY NextEntry;
  2115. LIST_ENTRY BitmapRangesToFree;
  2116. InitializeListHead( &BitmapRangesToFree );
  2117. KeAcquireInStackQueuedSpinLock( &SharedCacheMap->BcbSpinLock, &LockHandle );
  2118. Mbcb = SharedCacheMap->Mbcb;
  2119. //
  2120. // Is there an Mbcb?
  2121. //
  2122. if (Mbcb != NULL) {
  2123. //
  2124. // First deduct the dirty pages we are getting rid of.
  2125. //
  2126. CcAcquireMasterLockAtDpcLevel();
  2127. CcDeductDirtyPages( SharedCacheMap, Mbcb->DirtyPages );
  2128. CcReleaseMasterLockFromDpcLevel();
  2129. //
  2130. // Now loop through all of the ranges.
  2131. //
  2132. while (!IsListEmpty(&Mbcb->BitmapRanges)) {
  2133. //
  2134. // Get next range and remove it from the list.
  2135. //
  2136. BitmapRange = (PBITMAP_RANGE)CONTAINING_RECORD( Mbcb->BitmapRanges.Flink,
  2137. BITMAP_RANGE,
  2138. Links );
  2139. RemoveEntryList( &BitmapRange->Links );
  2140. //
  2141. // If there is a bitmap, and it is not the initial embedded one, then
  2142. // delete it.
  2143. //
  2144. if ((BitmapRange->Bitmap != NULL) &&
  2145. (BitmapRange->Bitmap != (PULONG)&Mbcb->BitmapRange2)) {
  2146. DoDrain = TRUE;
  2147. //
  2148. // Usually the bitmap is all zeros at this point, but it may not be.
  2149. //
  2150. if (BitmapRange->DirtyPages != 0) {
  2151. RtlZeroMemory( BitmapRange->Bitmap, MBCB_BITMAP_BLOCK_SIZE );
  2152. }
  2153. CcAcquireVacbLockAtDpcLevel();
  2154. CcDeallocateVacbLevel( (PVACB *)BitmapRange->Bitmap, FALSE );
  2155. CcReleaseVacbLockFromDpcLevel();
  2156. }
  2157. //
  2158. // If the range is not one of the initial embedded ranges, then delete it.
  2159. //
  2160. if ((BitmapRange < (PBITMAP_RANGE)Mbcb) ||
  2161. (BitmapRange >= (PBITMAP_RANGE)((PCHAR)Mbcb + sizeof(MBCB)))) {
  2162. InsertTailList( &BitmapRangesToFree, &BitmapRange->Links );
  2163. }
  2164. }
  2165. //
  2166. // Zero the pointer and get out.
  2167. //
  2168. SharedCacheMap->Mbcb = NULL;
  2169. KeReleaseInStackQueuedSpinLock( &LockHandle );
  2170. //
  2171. // Free all the pool now that no locks are held.
  2172. //
  2173. while (!IsListEmpty(&BitmapRangesToFree)) {
  2174. NextEntry = RemoveHeadList( &BitmapRangesToFree );
  2175. BitmapRange = CONTAINING_RECORD ( NextEntry,
  2176. BITMAP_RANGE,
  2177. Links );
  2178. ExFreePool( BitmapRange );
  2179. }
  2180. //
  2181. // Now delete the Mbcb.
  2182. //
  2183. CcDeallocateBcb( (PBCB)Mbcb );
  2184. } else {
  2185. KeReleaseInStackQueuedSpinLock( &LockHandle );
  2186. }
  2187. if (DoDrain) {
  2188. CcDrainVacbLevelZone();
  2189. }
  2190. }
  2191. VOID
  2192. CcSetDirtyPageThreshold (
  2193. IN PFILE_OBJECT FileObject,
  2194. IN ULONG DirtyPageThreshold
  2195. )
  2196. /*++
  2197. Routine Description:
  2198. This routine may be called to set a dirty page threshold for this
  2199. stream. The write throttling will kick in whenever the file system
  2200. attempts to exceed the dirty page threshold for this file.
  2201. Arguments:
  2202. FileObject - Supplies file object for the stream
  2203. DirtyPageThreshold - Supplies the dirty page threshold for this stream,
  2204. or 0 for no threshold.
  2205. Return Value:
  2206. None
  2207. Environment:
  2208. The caller must guarantee exclusive access to the FsRtl header flags,
  2209. for example, by calling this routine once during create of the structure
  2210. containing the header. Then it would call the routine again when actually
  2211. caching the stream.
  2212. --*/
  2213. {
  2214. PSHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
  2215. if (SharedCacheMap != NULL) {
  2216. SharedCacheMap->DirtyPageThreshold = DirtyPageThreshold;
  2217. }
  2218. //
  2219. // Test the flag before setting, in case the caller is no longer properly
  2220. // synchronized.
  2221. //
  2222. if (!FlagOn(((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->Flags,
  2223. FSRTL_FLAG_LIMIT_MODIFIED_PAGES)) {
  2224. SetFlag(((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->Flags,
  2225. FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
  2226. }
  2227. }
  2228. VOID
  2229. CcZeroEndOfLastPage (
  2230. IN PFILE_OBJECT FileObject
  2231. )
  2232. /*++
  2233. Routine Description:
  2234. This routine is only called by Mm before mapping a user view to
  2235. a section. If there is an uninitialized page at the end of the
  2236. file, we zero it by freeing that page.
  2237. Parameters:
  2238. FileObject - File object for section to be mapped
  2239. Return Value:
  2240. None
  2241. --*/
  2242. {
  2243. PSHARED_CACHE_MAP SharedCacheMap;
  2244. ULONG ActivePage;
  2245. ULONG PageIsDirty;
  2246. KIRQL OldIrql;
  2247. PVOID NeedToZero = NULL;
  2248. PVACB ActiveVacb = NULL;
  2249. IO_STATUS_BLOCK Iosb;
  2250. BOOLEAN PurgeResult;
  2251. BOOLEAN ReferencedCacheMap = FALSE;
  2252. //
  2253. // See if we have an active Vacb, that we need to free.
  2254. //
  2255. FsRtlAcquireFileExclusive( FileObject );
  2256. CcAcquireMasterLock( &OldIrql );
  2257. SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
  2258. if (SharedCacheMap != NULL) {
  2259. //
  2260. // See if there is an active vacb.
  2261. //
  2262. if ((SharedCacheMap->ActiveVacb != NULL) || ((NeedToZero = SharedCacheMap->NeedToZero) != NULL)) {
  2263. CcIncrementOpenCount( SharedCacheMap, 'peZS' );
  2264. ReferencedCacheMap = TRUE;
  2265. GetActiveVacbAtDpcLevel( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty );
  2266. }
  2267. }
  2268. CcReleaseMasterLock( OldIrql );
  2269. //
  2270. // Remember in FsRtl header there is a user section.
  2271. // If this is an advanced header then also acquire the mutex to access
  2272. // this field.
  2273. //
  2274. if (FlagOn( ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->Flags,
  2275. FSRTL_FLAG_ADVANCED_HEADER )) {
  2276. ExAcquireFastMutex( ((PFSRTL_ADVANCED_FCB_HEADER)FileObject->FsContext)->FastMutex );
  2277. SetFlag( ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->Flags,
  2278. FSRTL_FLAG_USER_MAPPED_FILE );
  2279. ExReleaseFastMutex( ((PFSRTL_ADVANCED_FCB_HEADER)FileObject->FsContext)->FastMutex );
  2280. } else {
  2281. SetFlag( ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->Flags,
  2282. FSRTL_FLAG_USER_MAPPED_FILE );
  2283. }
  2284. //
  2285. // Free the active vacb now so we don't deadlock if we have to purge
  2286. //
  2287. if ((ActiveVacb != NULL) || (NeedToZero != NULL)) {
  2288. CcFreeActiveVacb( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty );
  2289. }
  2290. if (FlagOn( ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->Flags2, FSRTL_FLAG2_PURGE_WHEN_MAPPED )) {
  2291. if (FileObject->SectionObjectPointer->SharedCacheMap) {
  2292. ASSERT( ((PSHARED_CACHE_MAP)(FileObject->SectionObjectPointer->SharedCacheMap))->VacbActiveCount == 0 );
  2293. }
  2294. CcFlushCache( FileObject->SectionObjectPointer, NULL, 0, &Iosb );
  2295. //
  2296. // Only purge if the flush was successful so we don't lose user data
  2297. //
  2298. if (Iosb.Status == STATUS_SUCCESS) {
  2299. PurgeResult = CcPurgeCacheSection( FileObject->SectionObjectPointer, NULL, 0, FALSE );
  2300. }
  2301. if (FileObject->SectionObjectPointer->SharedCacheMap) {
  2302. ASSERT( ((PSHARED_CACHE_MAP)(FileObject->SectionObjectPointer->SharedCacheMap))->VacbActiveCount == 0 );
  2303. }
  2304. }
  2305. FsRtlReleaseFile( FileObject );
  2306. //
  2307. // If the file is cached and we have a Vacb to free, we need to
  2308. // use the lazy writer callback to synchronize so no one will be
  2309. // extending valid data.
  2310. //
  2311. if (ReferencedCacheMap) {
  2312. //
  2313. // Serialize again to decrement the open count.
  2314. //
  2315. CcAcquireMasterLock( &OldIrql );
  2316. CcDecrementOpenCount( SharedCacheMap, 'peZF' );
  2317. if ((SharedCacheMap->OpenCount == 0) &&
  2318. !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) &&
  2319. (SharedCacheMap->DirtyPages == 0)) {
  2320. //
  2321. // Move to the dirty list.
  2322. //
  2323. RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks );
  2324. InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks,
  2325. &SharedCacheMap->SharedCacheMapLinks );
  2326. //
  2327. // Make sure the Lazy Writer will wake up, because we
  2328. // want him to delete this SharedCacheMap.
  2329. //
  2330. LazyWriter.OtherWork = TRUE;
  2331. if (!LazyWriter.ScanActive) {
  2332. CcScheduleLazyWriteScan( FALSE );
  2333. }
  2334. }
  2335. CcReleaseMasterLock( OldIrql );
  2336. }
  2337. }
  2338. BOOLEAN
  2339. CcZeroData (
  2340. IN PFILE_OBJECT FileObject,
  2341. IN PLARGE_INTEGER StartOffset,
  2342. IN PLARGE_INTEGER EndOffset,
  2343. IN BOOLEAN Wait
  2344. )
  2345. /*++
  2346. Routine Description:
  2347. This routine attempts to zero the specified file data and deliver the
  2348. correct I/O status.
  2349. If the caller does not want to block (such as for disk I/O), then
  2350. Wait should be supplied as FALSE. If Wait was supplied as FALSE and
  2351. it is currently impossible to zero all of the requested data without
  2352. blocking, then this routine will return FALSE. However, if the
  2353. required space is immediately accessible in the cache and no blocking is
  2354. required, this routine zeros the data and returns TRUE.
  2355. If the caller supplies Wait as TRUE, then this routine is guaranteed
  2356. to zero the data and return TRUE. If the correct space is immediately
  2357. accessible in the cache, then no blocking will occur. Otherwise,
  2358. the necessary work will be initiated to read and/or free cache data,
  2359. and the caller will be blocked until the data can be received.
  2360. File system Fsd's should typically supply Wait = TRUE if they are
  2361. processing a synchronous I/O requests, or Wait = FALSE if they are
  2362. processing an asynchronous request.
  2363. File system threads should supply Wait = TRUE.
  2364. IMPORTANT NOTE: File systems which call this routine must be prepared
  2365. to handle a special form of a write call where the Mdl is already
  2366. supplied. Namely, if Irp->MdlAddress is supplied, the file system
  2367. must check the low order bit of Irp->MdlAddress->ByteOffset. If it
  2368. is set, that means that the Irp was generated in this routine and
  2369. the file system must do two things:
  2370. Decrement Irp->MdlAddress->ByteOffset and Irp->UserBuffer
  2371. Clear Irp->MdlAddress immediately prior to completing the
  2372. request, as this routine expects to reuse the Mdl and
  2373. ultimately deallocate the Mdl itself.
  2374. Arguments:
  2375. FileObject - pointer to the FileObject for which a range of bytes
  2376. is to be zeroed. This FileObject may either be for
  2377. a cached file or a noncached file. If the file is
  2378. not cached, then WriteThrough must be TRUE and
  2379. StartOffset and EndOffset must be on sector boundaries.
  2380. StartOffset - Start offset in file to be zeroed.
  2381. EndOffset - End offset in file to be zeroed.
  2382. Wait - FALSE if caller may not block, TRUE otherwise (see description
  2383. above)
  2384. Return Value:
  2385. FALSE - if Wait was supplied as FALSE and the data was not zeroed.
  2386. TRUE - if the data has been zeroed.
  2387. Raises:
  2388. STATUS_INSUFFICIENT_RESOURCES - If a pool allocation failure occurs.
  2389. This can only occur if Wait was specified as TRUE. (If Wait is
  2390. specified as FALSE, and an allocation failure occurs, this
  2391. routine simply returns FALSE.)
  2392. --*/
  2393. {
  2394. PSHARED_CACHE_MAP SharedCacheMap;
  2395. PVOID CacheBuffer;
  2396. LARGE_INTEGER FOffset;
  2397. LARGE_INTEGER ToGo;
  2398. ULONG ZeroBytes, ZeroTransfer;
  2399. ULONG SectorMask;
  2400. ULONG i;
  2401. BOOLEAN WriteThrough;
  2402. BOOLEAN AggressiveZero = FALSE;
  2403. ULONG SavedState = 0;
  2404. ULONG MaxZerosInCache = MAX_ZEROS_IN_CACHE;
  2405. ULONG NumberOfColors = 1;
  2406. PBCB Bcb = NULL;
  2407. PCHAR Zeros = NULL;
  2408. PMDL ZeroMdl = NULL;
  2409. ULONG MaxBytesMappedInMdl = 0;
  2410. BOOLEAN Result = TRUE;
  2411. PPFN_NUMBER Page;
  2412. ULONG SavedByteCount;
  2413. LARGE_INTEGER SizeLeft;
  2414. DebugTrace(+1, me, "CcZeroData\n", 0 );
  2415. WriteThrough = (BOOLEAN)(((FileObject->Flags & FO_WRITE_THROUGH) != 0) ||
  2416. (FileObject->PrivateCacheMap == NULL));
  2417. //
  2418. // If the caller specified Wait, but the FileObject is WriteThrough,
  2419. // then we need to just get out.
  2420. //
  2421. if (WriteThrough && !Wait) {
  2422. DebugTrace(-1, me, "CcZeroData->FALSE (WriteThrough && !Wait)\n", 0 );
  2423. return FALSE;
  2424. }
  2425. SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
  2426. SectorMask = IoGetRelatedDeviceObject(FileObject)->SectorSize - 1;
  2427. FOffset = *StartOffset;
  2428. //
  2429. // Calculate how much to zero this time.
  2430. //
  2431. ToGo.QuadPart = EndOffset->QuadPart - FOffset.QuadPart;
  2432. //
  2433. // This magic number is what the fastpaths throttle on, and they will present
  2434. // non-sector aligned zeroing requests. As long as we will always handle them
  2435. // on the cached path, we are OK.
  2436. //
  2437. // If we will not make the cached path, the request must be aligned.
  2438. //
  2439. ASSERT( ToGo.QuadPart <= 0x2000 ||
  2440. ((ToGo.LowPart & SectorMask) == 0 &&
  2441. (FOffset.LowPart & SectorMask) == 0));
  2442. //
  2443. // We will only do zeroing in the cache if the caller is using a
  2444. // cached file object, and did not specify WriteThrough. We are
  2445. // willing to zero some data in the cache if our total is not too
  2446. // much, or there is sufficient available pages.
  2447. //
  2448. if (((ToGo.QuadPart <= 0x2000) ||
  2449. (MmAvailablePages >= ((MAX_ZEROS_IN_CACHE / PAGE_SIZE) * 4))) && !WriteThrough) {
  2450. try {
  2451. while (MaxZerosInCache != 0) {
  2452. ULONG ReceivedLength;
  2453. LARGE_INTEGER BeyondLastByte;
  2454. if ( ToGo.QuadPart > (LONGLONG)MaxZerosInCache ) {
  2455. //
  2456. // If Wait == FALSE, then there is no point in getting started,
  2457. // because we would have to start all over again zeroing with
  2458. // Wait == TRUE, since we would fall out of this loop and
  2459. // start synchronously writing pages to disk.
  2460. //
  2461. if (!Wait) {
  2462. DebugTrace(-1, me, "CcZeroData -> FALSE\n", 0 );
  2463. try_return( Result = FALSE );
  2464. }
  2465. }
  2466. else {
  2467. MaxZerosInCache = ToGo.LowPart;
  2468. }
  2469. //
  2470. // Call local routine to Map or Access the file data, then zero the data,
  2471. // then call another local routine to free the data. If we cannot map
  2472. // the data because of a Wait condition, return FALSE.
  2473. //
  2474. // Note that this call may result in an exception, however, if it
  2475. // does no Bcb is returned and this routine has absolutely no
  2476. // cleanup to perform. Therefore, we do not have a try-finally
  2477. // and we allow the possibility that we will simply be unwound
  2478. // without notice.
  2479. //
  2480. if (!CcPinFileData( FileObject,
  2481. &FOffset,
  2482. MaxZerosInCache,
  2483. FALSE,
  2484. TRUE,
  2485. Wait,
  2486. &Bcb,
  2487. &CacheBuffer,
  2488. &BeyondLastByte )) {
  2489. DebugTrace(-1, me, "CcZeroData -> FALSE\n", 0 );
  2490. try_return( Result = FALSE );
  2491. }
  2492. //
  2493. // Calculate how much data is described by Bcb starting at our desired
  2494. // file offset. If it is more than we need, we will zero the whole thing
  2495. // anyway.
  2496. //
  2497. ReceivedLength = (ULONG)(BeyondLastByte.QuadPart - FOffset.QuadPart );
  2498. //
  2499. // Now attempt to allocate an Mdl to describe the mapped data.
  2500. //
  2501. ZeroMdl = IoAllocateMdl( CacheBuffer,
  2502. ReceivedLength,
  2503. FALSE,
  2504. FALSE,
  2505. NULL );
  2506. if (ZeroMdl == NULL) {
  2507. ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES );
  2508. }
  2509. //
  2510. // It is necessary to probe and lock the pages, or else
  2511. // the pages may not still be in memory when we do the
  2512. // MmSetAddressRangeModified for the dirty Bcb.
  2513. //
  2514. MmDisablePageFaultClustering(&SavedState);
  2515. MmProbeAndLockPages( ZeroMdl, KernelMode, IoReadAccess );
  2516. MmEnablePageFaultClustering(SavedState);
  2517. SavedState = 0;
  2518. //
  2519. // Assume we did not get all the data we wanted, and set FOffset
  2520. // to the end of the returned data, and advance buffer pointer.
  2521. //
  2522. FOffset = BeyondLastByte;
  2523. //
  2524. // Figure out how many bytes we are allowed to zero in the cache.
  2525. // Note it is possible we have zeroed a little more than our maximum,
  2526. // because we hit an existing Bcb that extended beyond the range.
  2527. //
  2528. if (MaxZerosInCache <= ReceivedLength) {
  2529. MaxZerosInCache = 0;
  2530. }
  2531. else {
  2532. MaxZerosInCache -= ReceivedLength;
  2533. }
  2534. //
  2535. // Now set the Bcb dirty. We have to explicitly set the address
  2536. // range modified here, because that work otherwise gets deferred
  2537. // to the Lazy Writer.
  2538. //
  2539. MmSetAddressRangeModified( CacheBuffer, ReceivedLength );
  2540. CcSetDirtyPinnedData( Bcb, NULL );
  2541. //
  2542. // Unmap the data now
  2543. //
  2544. CcUnpinFileData( Bcb, FALSE, UNPIN );
  2545. Bcb = NULL;
  2546. //
  2547. // Unlock and free the Mdl (we only loop back if we crossed
  2548. // a 256KB boundary.
  2549. //
  2550. MmUnlockPages( ZeroMdl );
  2551. IoFreeMdl( ZeroMdl );
  2552. ZeroMdl = NULL;
  2553. }
  2554. try_exit: NOTHING;
  2555. } finally {
  2556. if (SavedState != 0) {
  2557. MmEnablePageFaultClustering(SavedState);
  2558. }
  2559. //
  2560. // Clean up only necessary in abnormal termination.
  2561. //
  2562. if (Bcb != NULL) {
  2563. CcUnpinFileData( Bcb, FALSE, UNPIN );
  2564. }
  2565. //
  2566. // Since the last thing in the above loop which can
  2567. // fail is the MmProbeAndLockPages, we only need to
  2568. // free the Mdl here.
  2569. //
  2570. if (ZeroMdl != NULL) {
  2571. IoFreeMdl( ZeroMdl );
  2572. }
  2573. }
  2574. //
  2575. // If hit a wait condition above, return it now.
  2576. //
  2577. if (!Result) {
  2578. return FALSE;
  2579. }
  2580. //
  2581. // If we finished, get out nbow.
  2582. //
  2583. if ( FOffset.QuadPart >= EndOffset->QuadPart ) {
  2584. return TRUE;
  2585. }
  2586. }
  2587. //
  2588. // We either get here because we decided above not to zero anything in
  2589. // the cache directly, or else we zeroed up to our maximum and still
  2590. // have some left to zero direct to the file on disk. In either case,
  2591. // we will now zero from FOffset to *EndOffset, and then flush this
  2592. // range in case the file is cached/mapped, and there are modified
  2593. // changes in memory.
  2594. //
  2595. //
  2596. // Round FOffset and EndOffset up to sector boundaries, since
  2597. // we will be doing disk I/O, and calculate size left.
  2598. //
  2599. ASSERT( (FOffset.LowPart & SectorMask) == 0 );
  2600. FOffset.QuadPart += (LONGLONG)SectorMask;
  2601. FOffset.LowPart &= ~SectorMask;
  2602. SizeLeft.QuadPart = EndOffset->QuadPart + (LONGLONG)SectorMask;
  2603. SizeLeft.LowPart &= ~SectorMask;
  2604. SizeLeft.QuadPart -= FOffset.QuadPart;
  2605. ASSERT( (FOffset.LowPart & SectorMask) == 0 );
  2606. ASSERT( (SizeLeft.LowPart & SectorMask) == 0 );
  2607. if (SizeLeft.QuadPart == 0) {
  2608. return TRUE;
  2609. }
  2610. //
  2611. // try-finally to guarantee cleanup.
  2612. //
  2613. try {
  2614. //
  2615. // Allocate a page to hold the zeros we will write, and
  2616. // zero it.
  2617. //
  2618. ZeroBytes = NumberOfColors * PAGE_SIZE;
  2619. if (SizeLeft.HighPart == 0 && SizeLeft.LowPart < ZeroBytes) {
  2620. ZeroBytes = SizeLeft.LowPart;
  2621. }
  2622. Zeros = (PCHAR)ExAllocatePoolWithTag( NonPagedPoolCacheAligned, ZeroBytes, 'eZcC' );
  2623. if (Zeros != NULL) {
  2624. //
  2625. // Allocate and initialize an Mdl to describe the zeros
  2626. // we need to transfer. Allocate to cover the maximum
  2627. // size required, and we will use and reuse it in the
  2628. // loop below, initialized correctly.
  2629. //
  2630. if (SizeLeft.HighPart == 0 && SizeLeft.LowPart < MAX_ZERO_TRANSFER) {
  2631. ZeroTransfer = SizeLeft.LowPart;
  2632. } else {
  2633. //
  2634. // See how aggressive we can afford to be.
  2635. //
  2636. if (InterlockedIncrement( &CcAggressiveZeroCount ) <= CcAggressiveZeroThreshold) {
  2637. AggressiveZero = TRUE;
  2638. ZeroTransfer = MAX_ZERO_TRANSFER;
  2639. } else {
  2640. InterlockedDecrement( &CcAggressiveZeroCount );
  2641. ZeroTransfer = MIN_ZERO_TRANSFER;
  2642. }
  2643. }
  2644. //
  2645. // Since the maximum zero may start at a very aggresive level, fall back
  2646. // until we really have to give up. Since filter drivers, filesystems and
  2647. // even storage drivers may need to map this Mdl, we have to pre-map it
  2648. // into system space so that we know enough PTEs are available. We also
  2649. // need to throttle our consumption of virtual addresses based on the size
  2650. // of the system and the number of parallel instances of this work outstanding.
  2651. // This may be a bit of overkill, but since running out of PTEs is a fatal
  2652. // event for the rest of the system, try to help out while still being fast.
  2653. //
  2654. while (TRUE) {
  2655. //
  2656. // Spin down trying to get an MDL which can describe our operation.
  2657. //
  2658. while (TRUE) {
  2659. ZeroMdl = IoAllocateMdl( Zeros, ZeroTransfer, FALSE, FALSE, NULL );
  2660. //
  2661. // Throttle ourselves to what we've physically allocated. Note that
  2662. // we could have started with an odd multiple of this number. If we
  2663. // tried for exactly that size and failed, we're toast.
  2664. //
  2665. if (ZeroMdl || ZeroTransfer == ZeroBytes) {
  2666. break;
  2667. }
  2668. Fall_Back:
  2669. //
  2670. // Fallback by half and round down to a sector multiple.
  2671. //
  2672. ZeroTransfer /= 2;
  2673. ZeroTransfer &= ~SectorMask;
  2674. if (ZeroTransfer < ZeroBytes) {
  2675. ZeroTransfer = ZeroBytes;
  2676. }
  2677. ASSERT( (ZeroTransfer & SectorMask) == 0 && ZeroTransfer != 0);
  2678. }
  2679. if (ZeroMdl == NULL) {
  2680. ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES );
  2681. }
  2682. //
  2683. // If we have throttled all the way down, stop and just build a
  2684. // simple MDL describing our previous allocation.
  2685. //
  2686. if (ZeroTransfer == ZeroBytes) {
  2687. MmBuildMdlForNonPagedPool( ZeroMdl );
  2688. break;
  2689. }
  2690. //
  2691. // Now we will temporarily lock the allocated pages
  2692. // only, and then replicate the page frame numbers through
  2693. // the entire Mdl to keep writing the same pages of zeros.
  2694. //
  2695. // It would be nice if Mm exported a way for us to not have
  2696. // to pull the Mdl apart and rebuild it ourselves, but this
  2697. // is so bizzare a purpose as to be tolerable.
  2698. //
  2699. SavedByteCount = ZeroMdl->ByteCount;
  2700. ZeroMdl->ByteCount = ZeroBytes;
  2701. MmBuildMdlForNonPagedPool( ZeroMdl );
  2702. ZeroMdl->MdlFlags &= ~MDL_SOURCE_IS_NONPAGED_POOL;
  2703. ZeroMdl->MdlFlags |= MDL_PAGES_LOCKED;
  2704. ZeroMdl->MappedSystemVa = NULL;
  2705. ZeroMdl->ByteCount = SavedByteCount;
  2706. Page = MmGetMdlPfnArray( ZeroMdl );
  2707. for (i = NumberOfColors;
  2708. i < (ADDRESS_AND_SIZE_TO_SPAN_PAGES( 0, SavedByteCount ));
  2709. i++) {
  2710. *(Page + i) = *(Page + i - NumberOfColors);
  2711. }
  2712. if (MmGetSystemAddressForMdlSafe( ZeroMdl, LowPagePriority ) == NULL) {
  2713. //
  2714. // Blow away this Mdl and trim for the retry. Since it didn't
  2715. // get mapped, there is nothing fancy to do.
  2716. //
  2717. IoFreeMdl( ZeroMdl );
  2718. goto Fall_Back;
  2719. }
  2720. break;
  2721. }
  2722. //
  2723. // We failed to allocate the space we wanted, so we will go to
  2724. // half of a page and limp along.
  2725. //
  2726. } else {
  2727. //
  2728. // Of course, if we have a device which has large sectors, that defines
  2729. // the lower limit of our attempt.
  2730. //
  2731. if (IoGetRelatedDeviceObject(FileObject)->SectorSize < PAGE_SIZE / 2) {
  2732. ZeroBytes = PAGE_SIZE / 2;
  2733. Zeros = (PCHAR)ExAllocatePoolWithTag( NonPagedPoolCacheAligned, ZeroBytes, 'eZcC' );
  2734. }
  2735. //
  2736. // If we cannot get even that much, then let's write a sector at a time.
  2737. //
  2738. if (Zeros == NULL) {
  2739. ZeroBytes = IoGetRelatedDeviceObject(FileObject)->SectorSize;
  2740. Zeros = (PCHAR)ExAllocatePoolWithTag( NonPagedPoolCacheAligned, ZeroBytes, 'eZcC' );
  2741. //
  2742. // If we cannot get even the minimum, we have to give up.
  2743. //
  2744. if (Zeros == NULL) {
  2745. ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES );
  2746. }
  2747. }
  2748. //
  2749. // Allocate and initialize an Mdl to describe the zeros
  2750. // we need to transfer. Allocate to cover the maximum
  2751. // size required, and we will use and reuse it in the
  2752. // loop below, initialized correctly.
  2753. //
  2754. ZeroTransfer = ZeroBytes;
  2755. ZeroMdl = IoAllocateMdl( Zeros, ZeroBytes, FALSE, FALSE, NULL );
  2756. ASSERT( (ZeroTransfer & SectorMask) == 0 );
  2757. if (ZeroMdl == NULL) {
  2758. ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES );
  2759. }
  2760. //
  2761. // Now we will lock and map the allocated pages.
  2762. //
  2763. MmBuildMdlForNonPagedPool( ZeroMdl );
  2764. ASSERT( ZeroMdl->MappedSystemVa == Zeros );
  2765. }
  2766. //
  2767. // Zero the buffer now.
  2768. //
  2769. RtlZeroMemory( Zeros, ZeroBytes );
  2770. //
  2771. // We have a mapped and zeroed range back by an MDL to use. Note the
  2772. // size we have for cleanup, since we will possibly wind this down
  2773. // over the operation.
  2774. //
  2775. ASSERT( MmGetSystemAddressForMdl(ZeroMdl) );
  2776. MaxBytesMappedInMdl = ZeroMdl->ByteCount;
  2777. //
  2778. // Now loop to write buffers full of zeros through to the file
  2779. // until we reach the starting Vbn for the transfer.
  2780. //
  2781. ASSERT( ZeroTransfer != 0 &&
  2782. (ZeroTransfer & SectorMask) == 0 &&
  2783. (SizeLeft.LowPart & SectorMask) == 0 );
  2784. while ( SizeLeft.QuadPart != 0 ) {
  2785. IO_STATUS_BLOCK IoStatus;
  2786. NTSTATUS Status;
  2787. KEVENT Event;
  2788. //
  2789. // See if we really need to write that many zeros, and
  2790. // trim the size back if not.
  2791. //
  2792. if ( (LONGLONG)ZeroTransfer > SizeLeft.QuadPart ) {
  2793. ZeroTransfer = SizeLeft.LowPart;
  2794. }
  2795. //
  2796. // (Re)initialize the kernel event to FALSE.
  2797. //
  2798. KeInitializeEvent( &Event, NotificationEvent, FALSE );
  2799. //
  2800. // Initiate and wait for the synchronous transfer.
  2801. //
  2802. ZeroMdl->ByteCount = ZeroTransfer;
  2803. Status = IoSynchronousPageWrite( FileObject,
  2804. ZeroMdl,
  2805. &FOffset,
  2806. &Event,
  2807. &IoStatus );
  2808. //
  2809. // If pending is returned (which is a successful status),
  2810. // we must wait for the request to complete.
  2811. //
  2812. if (Status == STATUS_PENDING) {
  2813. KeWaitForSingleObject( &Event,
  2814. Executive,
  2815. KernelMode,
  2816. FALSE,
  2817. (PLARGE_INTEGER)NULL);
  2818. }
  2819. //
  2820. // If we got an error back in Status, then the Iosb
  2821. // was not written, so we will just copy the status
  2822. // there, then test the final status after that.
  2823. //
  2824. if (!NT_SUCCESS(Status)) {
  2825. ExRaiseStatus( Status );
  2826. }
  2827. if (!NT_SUCCESS(IoStatus.Status)) {
  2828. ExRaiseStatus( IoStatus.Status );
  2829. }
  2830. //
  2831. // If we succeeded, then update where we are at by how much
  2832. // we wrote, and loop back to see if there is more.
  2833. //
  2834. FOffset.QuadPart = FOffset.QuadPart + (LONGLONG)ZeroTransfer;
  2835. SizeLeft.QuadPart = SizeLeft.QuadPart - (LONGLONG)ZeroTransfer;
  2836. }
  2837. }
  2838. finally{
  2839. //
  2840. // Clean up anything from zeroing pages on a noncached
  2841. // write.
  2842. //
  2843. if (ZeroMdl != NULL) {
  2844. if ((MaxBytesMappedInMdl != 0) &&
  2845. !FlagOn(ZeroMdl->MdlFlags, MDL_SOURCE_IS_NONPAGED_POOL)) {
  2846. ZeroMdl->ByteCount = MaxBytesMappedInMdl;
  2847. MmUnmapLockedPages (ZeroMdl->MappedSystemVa, ZeroMdl);
  2848. }
  2849. IoFreeMdl( ZeroMdl );
  2850. }
  2851. if (AggressiveZero) {
  2852. InterlockedDecrement( &CcAggressiveZeroCount );
  2853. }
  2854. if (Zeros != NULL) {
  2855. ExFreePool( Zeros );
  2856. }
  2857. DebugTrace(-1, me, "CcZeroData -> TRUE\n", 0 );
  2858. }
  2859. return TRUE;
  2860. }
  2861. PFILE_OBJECT
  2862. CcGetFileObjectFromSectionPtrs (
  2863. IN PSECTION_OBJECT_POINTERS SectionObjectPointer
  2864. )
  2865. /*++
  2866. This routine may be used to retrieve a pointer to the FileObject that the
  2867. Cache Manager is using for a given file from the Section Object Pointers
  2868. in the nonpaged File System structure Fcb. The use of this function is
  2869. intended for exceptional use unrelated to the processing of user requests,
  2870. when the File System would otherwise not have a FileObject at its disposal.
  2871. An example is for mount verification.
  2872. Note that the File System is responsible for insuring that the File
  2873. Object does not go away while in use. It is impossible for the Cache
  2874. Manager to guarantee this.
  2875. Arguments:
  2876. SectionObjectPointer - A pointer to the Section Object Pointers
  2877. structure in the nonpaged Fcb.
  2878. Return Value:
  2879. Pointer to the File Object, or NULL if the file is not cached or no
  2880. longer cached
  2881. --*/
  2882. {
  2883. KIRQL OldIrql;
  2884. PFILE_OBJECT FileObject = NULL;
  2885. //
  2886. // Serialize with Creation/Deletion of all Shared CacheMaps
  2887. //
  2888. CcAcquireMasterLock( &OldIrql );
  2889. if (SectionObjectPointer->SharedCacheMap != NULL) {
  2890. FileObject = ((PSHARED_CACHE_MAP)SectionObjectPointer->SharedCacheMap)->FileObject;
  2891. }
  2892. CcReleaseMasterLock( OldIrql );
  2893. return FileObject;
  2894. }
  2895. PFILE_OBJECT
  2896. CcGetFileObjectFromBcb (
  2897. IN PVOID Bcb
  2898. )
  2899. /*++
  2900. This routine may be used to retrieve a pointer to the FileObject that the
  2901. Cache Manager is using for a given file from a Bcb of that file.
  2902. Note that the File System is responsible for insuring that the File
  2903. Object does not go away while in use. It is impossible for the Cache
  2904. Manager to guarantee this.
  2905. Arguments:
  2906. Bcb - A pointer to the pinned Bcb.
  2907. Return Value:
  2908. Pointer to the File Object, or NULL if the file is not cached or no
  2909. longer cached
  2910. --*/
  2911. {
  2912. return ((PBCB)Bcb)->SharedCacheMap->FileObject;
  2913. }