Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

12300 lines
408 KiB

  1. /*++
  2. Copyright (c) 1991 Microsoft Corporation
  3. Module Name:
  4. DevIoSup.c
  5. Abstract:
  6. This module implements the low lever disk read/write support for Ntfs
  7. Author:
  8. Brian Andrew BrianAn
  9. Tom Miller TomM
  10. Revision History:
  11. --*/
  12. #include "NtfsProc.h"
  13. #include <ntddft.h>
  14. #include <ntddvol.h>
  15. #ifdef NTFS_RWC_DEBUG
  16. extern BOOLEAN NtfsBreakOnConflict;
  17. #endif
  18. //
  19. // Number of pages to allocate a mdl on the stack for
  20. //
  21. #define NTFS_MDL_TRANSFER_PAGES 0x10
  22. //
  23. // The Bug check file id for this module
  24. //
  25. #define BugCheckFileId (NTFS_BUG_CHECK_DEVIOSUP)
  26. //
  27. // Local debug trace level
  28. //
  29. #define Dbg (DEBUG_TRACE_DEVIOSUP)
  30. //
  31. // Define a tag for general pool allocations from this module
  32. //
  33. #undef MODULE_POOL_TAG
  34. #define MODULE_POOL_TAG ('DFtN')
  35. //
  36. // We need a special test for success, whenever we are seeing if we should
  37. // hot fix, because the FT driver returns one of two success codes if a read or
  38. // write failed to only one of the members.
  39. //
  40. #define FT_SUCCESS(STS) (NT_SUCCESS(STS) && \
  41. ((STS) != STATUS_FT_READ_RECOVERY_FROM_BACKUP) && \
  42. ((STS) != STATUS_FT_WRITE_RECOVERY))
  43. //
  44. // Boolean to control whether we output HotFix information to the debugger.
  45. //
  46. #if DBG
  47. BOOLEAN NtfsHotFixTrace = FALSE;
  48. #define HotFixTrace(X) {if (NtfsHotFixTrace) KdPrint(X);}
  49. #else
  50. #define HotFixTrace(X) {NOTHING;}
  51. #endif
  52. //
  53. // Boolean to indicate whether to break on a decompress error
  54. //
  55. #if (defined BRIANDBG || defined SYSCACHE_DEBUG)
  56. BOOLEAN NtfsStopOnDecompressError = TRUE;
  57. #else
  58. BOOLEAN NtfsStopOnDecompressError = FALSE;
  59. #endif
  60. //
  61. // Macro to collect the Disk IO stats.
  62. //
  63. #define CollectDiskIoStats(VCB,SCB,FUNCTION,COUNT) { \
  64. PFILESYSTEM_STATISTICS FsStats = &(VCB)->Statistics[KeGetCurrentProcessorNumber()].Common; \
  65. ASSERT((SCB)->Fcb != NULL); \
  66. if (NtfsIsTypeCodeUserData( (SCB)->AttributeTypeCode ) && \
  67. !FlagOn( (SCB)->Fcb->FcbState, FCB_STATE_SYSTEM_FILE )) { \
  68. if ((FUNCTION) == IRP_MJ_WRITE) { \
  69. FsStats->UserDiskWrites += (COUNT); \
  70. } else { \
  71. FsStats->UserDiskReads += (COUNT); \
  72. } \
  73. } else if ((SCB) != (VCB)->LogFileScb) { \
  74. if ((FUNCTION) == IRP_MJ_WRITE) { \
  75. FsStats->MetaDataDiskWrites += (COUNT); \
  76. } else { \
  77. FsStats->MetaDataDiskReads += (COUNT); \
  78. } \
  79. } \
  80. }
  81. //
  82. // Define a context for holding the context the compression state
  83. // for buffers.
  84. //
  85. typedef struct COMPRESSION_CONTEXT {
  86. //
  87. // Pointer to allocated compression buffer, and its length
  88. //
  89. PUCHAR CompressionBuffer;
  90. ULONG CompressionBufferLength;
  91. //
  92. // Saved fields from originating Irp
  93. //
  94. PMDL SavedMdl;
  95. PVOID SavedUserBuffer;
  96. //
  97. // System Buffer pointer and offset in the System (user's) buffer
  98. //
  99. PVOID SystemBuffer;
  100. ULONG SystemBufferOffset;
  101. //
  102. // IoRuns array in use. This array may be extended one time
  103. // in NtfsPrepareBuffers.
  104. //
  105. PIO_RUN IoRuns;
  106. ULONG AllocatedRuns;
  107. //
  108. // Workspace pointer, so that cleanup can occur in the caller.
  109. //
  110. PVOID WorkSpace;
  111. //
  112. // Write acquires the Scb.
  113. //
  114. BOOLEAN ScbAcquired;
  115. BOOLEAN FinishBuffersNeeded;
  116. //
  117. // If this field is TRUE, it means the data has been copied from the
  118. // system buffer to the compression buffer, and further operations,
  119. // like compression, should look to the compression buffer for their
  120. // source data.
  121. //
  122. BOOLEAN DataTransformed;
  123. } COMPRESSION_CONTEXT, *PCOMPRESSION_CONTEXT;
  124. //
  125. // Local support routines
  126. //
  127. VOID
  128. NtfsAllocateCompressionBuffer (
  129. IN PIRP_CONTEXT IrpContext,
  130. IN PSCB ThisScb,
  131. IN PIRP Irp,
  132. IN PCOMPRESSION_CONTEXT CompressionContext,
  133. IN OUT PULONG CompressionBufferLength
  134. );
  135. VOID
  136. NtfsDeallocateCompressionBuffer (
  137. IN PIRP Irp,
  138. IN PCOMPRESSION_CONTEXT CompressionContext,
  139. IN BOOLEAN Reinitialize
  140. );
  141. LONG
  142. NtfsCompressionFilter (
  143. IN PIRP_CONTEXT IrpContext,
  144. IN PEXCEPTION_POINTERS ExceptionPointer
  145. );
  146. ULONG
  147. NtfsPrepareBuffers (
  148. IN PIRP_CONTEXT IrpContext,
  149. IN PIRP Irp,
  150. IN PSCB Scb,
  151. IN PVBO StartingVbo,
  152. IN ULONG ByteCount,
  153. IN ULONG StreamFlags,
  154. IN OUT PBOOLEAN Wait,
  155. OUT PULONG NumberRuns,
  156. OUT PCOMPRESSION_CONTEXT CompressionContext
  157. );
  158. NTSTATUS
  159. NtfsFinishBuffers (
  160. IN PIRP_CONTEXT IrpContext,
  161. IN PIRP Irp,
  162. IN PSCB Scb,
  163. IN PVBO StartingVbo,
  164. IN ULONG ByteCount,
  165. IN ULONG NumberRuns,
  166. IN PCOMPRESSION_CONTEXT CompressionContext,
  167. IN ULONG StreamFlags
  168. );
  169. VOID
  170. NtfsMultipleAsync (
  171. IN PIRP_CONTEXT IrpContext,
  172. IN PDEVICE_OBJECT DeviceObject,
  173. IN PIRP MasterIrp,
  174. IN ULONG MultipleIrpCount,
  175. IN PIO_RUN IoRuns,
  176. IN UCHAR IrpSpFlags
  177. );
  178. VOID
  179. NtfsSingleAsync (
  180. IN PIRP_CONTEXT IrpContext,
  181. IN PDEVICE_OBJECT DeviceObject,
  182. IN LBO StartingLbo,
  183. IN ULONG ByteCount,
  184. IN PIRP Irp,
  185. IN UCHAR MajorFunction,
  186. IN UCHAR IrpSpFlags
  187. );
  188. VOID
  189. NtfsWaitSync (
  190. IN PIRP_CONTEXT IrpContext
  191. );
  192. NTSTATUS
  193. NtfsMultiAsyncCompletionRoutine (
  194. IN PDEVICE_OBJECT DeviceObject,
  195. IN PIRP Irp,
  196. IN PVOID Contxt
  197. );
  198. NTSTATUS
  199. NtfsMultiSyncCompletionRoutine (
  200. IN PDEVICE_OBJECT DeviceObject,
  201. IN PIRP Irp,
  202. IN PVOID Contxt
  203. );
  204. NTSTATUS
  205. NtfsSingleAsyncCompletionRoutine (
  206. IN PDEVICE_OBJECT DeviceObject,
  207. IN PIRP Irp,
  208. IN PVOID Contxt
  209. );
  210. NTSTATUS
  211. NtfsSingleSyncCompletionRoutine (
  212. IN PDEVICE_OBJECT DeviceObject,
  213. IN PIRP Irp,
  214. IN PVOID Contxt
  215. );
  216. NTSTATUS
  217. NtfsPagingFileCompletionRoutine (
  218. IN PDEVICE_OBJECT DeviceObject,
  219. IN PIRP Irp,
  220. IN PVOID MasterIrp
  221. );
  222. NTSTATUS
  223. NtfsPagingFileNoAllocCompletionRoutine (
  224. IN PDEVICE_OBJECT DeviceObject,
  225. IN PIRP Irp,
  226. IN PVOID Context
  227. );
  228. VOID
  229. NtfsSingleNonAlignedSync (
  230. IN PIRP_CONTEXT IrpContext,
  231. IN PVCB Vcb,
  232. IN PSCB Scb,
  233. IN PUCHAR Buffer,
  234. IN VBO Vbo,
  235. IN LBO Lbo,
  236. IN ULONG ByteCount,
  237. IN PIRP Irp
  238. );
  239. NTSTATUS
  240. NtfsEncryptBuffers (
  241. IN PIRP_CONTEXT IrpContext,
  242. IN PIRP Irp,
  243. IN PSCB Scb,
  244. IN VBO StartingVbo,
  245. IN ULONG NumberRuns,
  246. IN PCOMPRESSION_CONTEXT CompressionContext
  247. );
  248. VOID
  249. NtfsFixDataError (
  250. IN PIRP_CONTEXT IrpContext,
  251. IN PSCB Scb,
  252. IN PDEVICE_OBJECT DeviceObject,
  253. IN PIRP MasterIrp,
  254. IN ULONG MultipleIrpCount,
  255. IN PIO_RUN IoRuns,
  256. IN UCHAR IrpSpFlags
  257. );
  258. VOID
  259. NtfsPostHotFix(
  260. IN PIRP Irp,
  261. IN PLONGLONG BadVbo,
  262. IN LONGLONG BadLbo,
  263. IN ULONG ByteLength,
  264. IN BOOLEAN DelayIrpCompletion
  265. );
  266. VOID
  267. NtfsPerformHotFix (
  268. IN PIRP_CONTEXT IrpContext
  269. );
  270. BOOLEAN
  271. NtfsGetReservedBuffer (
  272. IN PFCB ThisFcb,
  273. OUT PVOID *Buffer,
  274. OUT PULONG Length,
  275. IN UCHAR Need2
  276. );
  277. BOOLEAN
  278. NtfsFreeReservedBuffer (
  279. IN PVOID Buffer
  280. );
  281. LONG
  282. NtfsDefragExceptionFilter (
  283. IN PIRP_CONTEXT IrpContext OPTIONAL,
  284. IN PEXCEPTION_POINTERS ExceptionPointer,
  285. IN OUT PULONG DeletePendingFailureCountsLeft
  286. );
  287. #ifdef ALLOC_PRAGMA
  288. #pragma alloc_text(PAGE, NtfsReadFromPlex)
  289. #pragma alloc_text(PAGE, NtfsDefragFile)
  290. #endif
  291. INLINE
  292. BOOLEAN
  293. NtfsZeroEndOfBuffer (
  294. IN PIRP Irp,
  295. IN PNTFS_IO_CONTEXT Context
  296. )
  297. /*++
  298. Routine Description:
  299. This routine Zeros the end of an async transfer. Because the transfer is done
  300. in sector sized chunks there will be garbage data from the end of file size to
  301. the sector boundary. If there are any errors they will stored in the IoStatus field
  302. of the irp. We're going to allow out of resource errors in this path because its async.
  303. Only the synchronous paging paths have a guarantee of fwd. progress
  304. Arguments:
  305. Irp - Pointer to the Irp for which the buffer is to be zeroed
  306. Device - device which contains the vcb
  307. Context - io context which has the original operation bounds
  308. Return Value:
  309. TRUE if successful
  310. --*/
  311. {
  312. PIO_STACK_LOCATION IrpSp;
  313. PDEVICE_OBJECT DeviceObject;
  314. PVCB Vcb;
  315. PVOID SystemBuffer;
  316. ULONG RoundedTransfer;
  317. UCHAR Buffer[sizeof( MDL ) + sizeof( PFN_NUMBER ) * (NTFS_MDL_TRANSFER_PAGES + 1)];
  318. PMDL PartialMdl = (PMDL) Buffer;
  319. IrpSp = IoGetCurrentIrpStackLocation( Irp );
  320. DeviceObject = IrpSp->DeviceObject;
  321. //
  322. // Zero the difference between filesize and data read if necc. on reads
  323. //
  324. if ((IrpSp->MajorFunction == IRP_MJ_READ) &&
  325. (Context->Wait.Async.RequestedByteCount < IrpSp->Parameters.Read.Length)) {
  326. Vcb = &((PVOLUME_DEVICE_OBJECT) DeviceObject)->Vcb;
  327. ASSERT( Vcb->NodeTypeCode == NTFS_NTC_VCB );
  328. RoundedTransfer = BlockAlign( Context->Wait.Async.RequestedByteCount, (LONG)Vcb->BytesPerSector );
  329. if (RoundedTransfer > Context->Wait.Async.RequestedByteCount) {
  330. MmInitializeMdl( PartialMdl, NULL, NTFS_MDL_TRANSFER_PAGES * PAGE_SIZE );
  331. IoBuildPartialMdl( Irp->MdlAddress, PartialMdl, Add2Ptr( MmGetMdlBaseVa( Irp->MdlAddress ), MmGetMdlByteOffset( Irp->MdlAddress ) + Context->Wait.Async.RequestedByteCount ), RoundedTransfer - Context->Wait.Async.RequestedByteCount );
  332. //
  333. // Now map that last page
  334. //
  335. SystemBuffer = MmGetSystemAddressForMdlSafe( PartialMdl, NormalPagePriority );
  336. if (SystemBuffer == NULL) {
  337. //
  338. // We're an async path so we can return out of resources
  339. //
  340. Irp->IoStatus.Status = STATUS_INSUFFICIENT_RESOURCES;
  341. return FALSE;
  342. }
  343. #ifdef BENL_DBG
  344. // KdPrint(( "NTFS: Zero %x %x %x\n", MmGetMdlByteOffset( Irp->MdlAddress ), RoundedTransfer, Context->Wait.Async.RequestedByteCount ));
  345. #endif
  346. //
  347. // Zero the end of the transfer between expected size and read size. If the mdl is not
  348. // on a page boundary this will all be offset by the MdlByteOffset
  349. //
  350. RtlZeroMemory( SystemBuffer, RoundedTransfer - Context->Wait.Async.RequestedByteCount );
  351. MmPrepareMdlForReuse( PartialMdl );
  352. }
  353. }
  354. return TRUE;
  355. }
  356. VOID
  357. NtfsLockUserBuffer (
  358. IN PIRP_CONTEXT IrpContext,
  359. IN OUT PIRP Irp,
  360. IN LOCK_OPERATION Operation,
  361. IN ULONG BufferLength
  362. )
  363. /*++
  364. Routine Description:
  365. This routine locks the specified buffer for the specified type of
  366. access. The file system requires this routine since it does not
  367. ask the I/O system to lock its buffers for direct I/O. This routine
  368. may only be called from the Fsd while still in the user context.
  369. Arguments:
  370. Irp - Pointer to the Irp for which the buffer is to be locked.
  371. Operation - IoWriteAccess for read operations, or IoReadAccess for
  372. write operations.
  373. BufferLength - Length of user buffer.
  374. Return Value:
  375. None
  376. --*/
  377. {
  378. PMDL Mdl = NULL;
  379. ASSERT_IRP_CONTEXT( IrpContext );
  380. ASSERT_IRP( Irp );
  381. if (Irp->MdlAddress == NULL) {
  382. //
  383. // Allocate the Mdl, and Raise if we fail.
  384. //
  385. Mdl = IoAllocateMdl( Irp->UserBuffer, BufferLength, FALSE, FALSE, Irp );
  386. if (Mdl == NULL) {
  387. NtfsRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES, NULL, NULL );
  388. }
  389. //
  390. // Now probe the buffer described by the Irp. If we get an exception,
  391. // deallocate the Mdl and return the appropriate "expected" status.
  392. //
  393. try {
  394. MmProbeAndLockPages( Mdl, Irp->RequestorMode, Operation );
  395. } except(EXCEPTION_EXECUTE_HANDLER) {
  396. NTSTATUS Status;
  397. Status = GetExceptionCode();
  398. IoFreeMdl( Mdl );
  399. Irp->MdlAddress = NULL;
  400. NtfsRaiseStatus( IrpContext,
  401. FsRtlIsNtstatusExpected(Status) ? Status : STATUS_INVALID_USER_BUFFER,
  402. NULL,
  403. NULL );
  404. }
  405. }
  406. //
  407. // And return to our caller
  408. //
  409. return;
  410. }
  411. PVOID
  412. NtfsMapUserBuffer (
  413. IN OUT PIRP Irp,
  414. IN MM_PAGE_PRIORITY Priority
  415. )
  416. /*++
  417. Routine Description:
  418. This routine conditionally maps the user buffer for the current I/O
  419. request in the specified mode. If the buffer is already mapped, it
  420. just returns its address.
  421. Arguments:
  422. Irp - Pointer to the Irp for the request.
  423. Priority - priority of the pages should be normalpagepriority unless its a metadata page
  424. in which case it can be high priority
  425. Return Value:
  426. Mapped address
  427. --*/
  428. {
  429. PVOID SystemBuffer;
  430. //
  431. // All paging i/o is high priority
  432. //
  433. if (FlagOn( Irp->Flags, IRP_PAGING_IO )) {
  434. Priority = HighPagePriority;
  435. }
  436. //
  437. // If there is no Mdl, then we must be in the Fsd, and we can simply
  438. // return the UserBuffer field from the Irp.
  439. //
  440. if (Irp->MdlAddress == NULL) {
  441. return Irp->UserBuffer;
  442. } else {
  443. //
  444. // MM can return NULL if there are no system ptes.
  445. //
  446. if ((SystemBuffer = MmGetSystemAddressForMdlSafe( Irp->MdlAddress, Priority )) == NULL) {
  447. ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES );
  448. }
  449. return SystemBuffer;
  450. }
  451. }
  452. PVOID
  453. NtfsMapUserBufferNoRaise (
  454. IN OUT PIRP Irp,
  455. IN MM_PAGE_PRIORITY Priority
  456. )
  457. /*++
  458. Routine Description:
  459. This routine conditionally maps the user buffer for the current I/O
  460. request in the specified mode. If the buffer is already mapped, it
  461. just returns its address.
  462. Arguments:
  463. Irp - Pointer to the Irp for the request.
  464. Priority - priority of the pages should be normalpagepriority unless its a metadata page
  465. in which case it can be high priority
  466. Return Value:
  467. Mapped address
  468. --*/
  469. {
  470. //
  471. // All paging i/o is high priority
  472. //
  473. if (FlagOn( Irp->Flags, IRP_PAGING_IO )) {
  474. Priority = HighPagePriority;
  475. }
  476. //
  477. // If there is no Mdl, then we must be in the Fsd, and we can simply
  478. // return the UserBuffer field from the Irp.
  479. //
  480. if (Irp->MdlAddress == NULL) {
  481. return Irp->UserBuffer;
  482. } else {
  483. //
  484. // MM can return NULL if there are no system ptes.
  485. //
  486. return MmGetSystemAddressForMdlSafe( Irp->MdlAddress, Priority );
  487. }
  488. }
  489. VOID
  490. NtfsFillIrpBuffer (
  491. IN PIRP_CONTEXT IrpContext,
  492. IN PIRP Irp,
  493. IN ULONG ByteCount,
  494. IN ULONG Offset,
  495. IN UCHAR Pattern
  496. )
  497. /*++
  498. Routine Description:
  499. Fill a range in the buffer contained within an irp with a given pattern
  500. Arguments:
  501. IrpContext - If present this an IrpContext put on the caller's stack
  502. to avoid having to allocate it from pool.
  503. Irp - Supplies the Irp being processed
  504. ByteCount - bytes to zero
  505. Offset - Offset within the irp's buffer to begin zeroing at
  506. Pattern - Pattern to fill the buffer with
  507. Return Value:
  508. NTSTATUS - The FSD status for the IRP
  509. --*/
  510. {
  511. PVOID SystemBuffer;
  512. PVCB Vcb = IrpContext->Vcb;
  513. UCHAR Buffer[sizeof( MDL ) + sizeof( PFN_NUMBER ) * 2];
  514. PMDL PartialMdl = (PMDL) Buffer;
  515. ULONG FillCount = ByteCount;
  516. //
  517. // First attempt to directly map the user's buffer
  518. //
  519. SystemBuffer = NtfsMapUserBufferNoRaise( Irp, NormalPagePriority );
  520. //
  521. // If there weren't pte in the system cache we'll use the reserved mapping instead
  522. //
  523. if (!SystemBuffer) {
  524. ASSERT( Irp->MdlAddress != NULL );
  525. MmInitializeMdl( PartialMdl, NULL, 2 * PAGE_SIZE );
  526. ExAcquireFastMutexUnsafe( &Vcb->ReservedMappingMutex );
  527. while (FillCount > 0) {
  528. IoBuildPartialMdl( Irp->MdlAddress, PartialMdl, Add2Ptr( MmGetMdlBaseVa( Irp->MdlAddress ), MmGetMdlByteOffset( Irp->MdlAddress ) + Offset + ByteCount - FillCount ), min( PAGE_SIZE, FillCount ));
  529. SystemBuffer = MmMapLockedPagesWithReservedMapping( Vcb->ReservedMapping,
  530. RESERVE_POOL_TAG,
  531. PartialMdl,
  532. MmCached );
  533. ASSERT( SystemBuffer != NULL );
  534. try {
  535. RtlFillMemory( SystemBuffer, min( PAGE_SIZE, FillCount), Pattern );
  536. } except( EXCEPTION_EXECUTE_HANDLER ) {
  537. MmUnmapReservedMapping( Vcb->ReservedMapping, RESERVE_POOL_TAG, PartialMdl );
  538. MmPrepareMdlForReuse( PartialMdl );
  539. ExReleaseFastMutexUnsafe( &Vcb->ReservedMappingMutex );
  540. NtfsRaiseStatus( IrpContext, STATUS_INVALID_USER_BUFFER, NULL, NULL );
  541. }
  542. if (FillCount >= PAGE_SIZE) {
  543. FillCount -= PAGE_SIZE;
  544. } else {
  545. FillCount = 0;
  546. }
  547. MmUnmapReservedMapping( Vcb->ReservedMapping, RESERVE_POOL_TAG, PartialMdl );
  548. MmPrepareMdlForReuse( PartialMdl );
  549. SystemBuffer = NULL;
  550. }
  551. ExReleaseFastMutexUnsafe( &Vcb->ReservedMappingMutex );
  552. } else {
  553. try {
  554. RtlFillMemory( Add2Ptr( SystemBuffer, Offset ), ByteCount, Pattern );
  555. } except( EXCEPTION_EXECUTE_HANDLER ) {
  556. NtfsRaiseStatus( IrpContext, STATUS_INVALID_USER_BUFFER, NULL, NULL );
  557. }
  558. }
  559. }
  560. NTSTATUS
  561. NtfsVolumeDasdIo (
  562. IN PIRP_CONTEXT IrpContext,
  563. IN PIRP Irp,
  564. IN PSCB DasdScb,
  565. IN PCCB Ccb,
  566. IN VBO StartingVbo,
  567. IN ULONG ByteCount
  568. )
  569. /*++
  570. Routine Description:
  571. This routine performs the non-cached disk io for Volume Dasd, as described
  572. in its parameters.
  573. Arguments:
  574. IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
  575. Irp - Supplies the requesting Irp.
  576. Scb - Supplies the DasdScb for the volume - we don't use vcb to find this
  577. since the vcb maybe dismounted
  578. Ccb - flag in it used to track whether to flush the volume
  579. StartingVbo - Starting offset within the file for the operation.
  580. ByteCount - The lengh of the operation.
  581. Return Value:
  582. The result of the Io operation. STATUS_PENDING if this is an asynchronous
  583. open.
  584. --*/
  585. {
  586. NTSTATUS Status = STATUS_SUCCESS;
  587. PIO_STACK_LOCATION IrpSp = IoGetCurrentIrpStackLocation( Irp );
  588. BOOLEAN AcquiredVcb = FALSE;
  589. BOOLEAN AcquiredScb = FALSE;
  590. LOGICAL Dismounted;
  591. PAGED_CODE();
  592. DebugTrace( +1, Dbg, ("NtfsVolumeDasdIo\n") );
  593. DebugTrace( 0, Dbg, ("Irp = %08lx\n", Irp) );
  594. DebugTrace( 0, Dbg, ("MajorFunction = %08lx\n", IrpContext->MajorFunction) );
  595. DebugTrace( 0, Dbg, ("Vcb = %08lx\n", DasdScb->Vcb) );
  596. DebugTrace( 0, Dbg, ("StartingVbo = %016I64x\n", StartingVbo) );
  597. DebugTrace( 0, Dbg, ("ByteCount = %08lx\n", ByteCount) );
  598. //
  599. // Acquire the vcb if we'll flush based on the ccb flag - this test is
  600. // safe since its off the ccb. Acquire the dasd handle shared otherwise
  601. // use the appropriate object to capture the volume mount state
  602. //
  603. if (FlagOn( Ccb->Flags, CCB_FLAG_FLUSH_VOLUME_ON_IO )) {
  604. NtfsAcquireExclusiveVcb( IrpContext, DasdScb->Vcb, TRUE );
  605. Dismounted = !FlagOn( DasdScb->Vcb->VcbState, VCB_STATE_VOLUME_MOUNTED );
  606. AcquiredVcb = TRUE;
  607. } else {
  608. if (!NtfsAcquireSharedScbWaitForEx( IrpContext, DasdScb )) {
  609. NtfsRaiseStatus( IrpContext, STATUS_CANT_WAIT, &DasdScb->Fcb->FileReference, DasdScb->Fcb );
  610. }
  611. Dismounted = FlagOn( DasdScb->ScbState, SCB_STATE_VOLUME_DISMOUNTED );
  612. AcquiredScb = TRUE;
  613. }
  614. try {
  615. //
  616. // If this is the handle that locked the volume its still ok to use
  617. // even if dismounted. We don't necc. own the vcb but since the volume is def. dismounted
  618. // at this point if we aren't the handle in question either the value will be null or not us
  619. // so we're ok in either case
  620. //
  621. if (Dismounted &&
  622. (ClearFlag( (ULONG_PTR)DasdScb->Vcb->FileObjectWithVcbLocked, 1 ) != (ULONG_PTR)IrpSp->FileObject)) {
  623. Status = STATUS_VOLUME_DISMOUNTED;
  624. leave;
  625. }
  626. //
  627. // Do delayed volume flush if required
  628. //
  629. if (FlagOn( Ccb->Flags, CCB_FLAG_FLUSH_VOLUME_ON_IO )) {
  630. ASSERT( IrpContext->ExceptionStatus == STATUS_SUCCESS );
  631. //
  632. // No need to purge or lock the volume while flushing. NtfsFlushVolume
  633. // will acquire the vcb exclusive
  634. //
  635. Status = NtfsFlushVolume( IrpContext, DasdScb->Vcb, TRUE, FALSE, TRUE, FALSE );
  636. //
  637. // Ignore corruption errors while flushing
  638. //
  639. if (!NT_SUCCESS( Status ) && (Status != STATUS_FILE_CORRUPT_ERROR)) {
  640. //
  641. // Report the error that there is an data section blocking the flush by returning
  642. // sharing violation. Otherwise Win32 callers will get INVALID_PARAMETER.
  643. //
  644. if (Status == STATUS_UNABLE_TO_DELETE_SECTION) {
  645. Status = STATUS_SHARING_VIOLATION;
  646. }
  647. NtfsRaiseStatus( IrpContext, Status, NULL, NULL );
  648. }
  649. ClearFlag( Ccb->Flags, CCB_FLAG_FLUSH_VOLUME_ON_IO );
  650. }
  651. //
  652. // For nonbuffered I/O, we need the buffer locked in all
  653. // cases.
  654. //
  655. // This call may raise. If this call succeeds and a subsequent
  656. // condition is raised, the buffers are unlocked automatically
  657. // by the I/O system when the request is completed, via the
  658. // Irp->MdlAddress field.
  659. //
  660. NtfsLockUserBuffer( IrpContext,
  661. Irp,
  662. (IrpContext->MajorFunction == IRP_MJ_READ) ?
  663. IoWriteAccess : IoReadAccess,
  664. ByteCount );
  665. //
  666. // Read or write the data
  667. //
  668. NtfsSingleAsync( IrpContext,
  669. DasdScb->Vcb->TargetDeviceObject,
  670. StartingVbo,
  671. ByteCount,
  672. Irp,
  673. IrpContext->MajorFunction,
  674. 0 );
  675. if (!FlagOn( IrpContext->State, IRP_CONTEXT_STATE_WAIT )) {
  676. //
  677. // We can get rid of the IrpContext now.
  678. //
  679. IrpContext->Union.NtfsIoContext = NULL;
  680. NtfsCleanupIrpContext( IrpContext, TRUE );
  681. DebugTrace( -1, Dbg, ("NtfsVolumeDasdIo -> STATUS_PENDING\n") );
  682. Status = STATUS_PENDING;
  683. leave;
  684. }
  685. //
  686. // Wait for the result
  687. //
  688. NtfsWaitSync( IrpContext );
  689. Status = Irp->IoStatus.Status;
  690. DebugTrace( -1, Dbg, ("NtfsVolumeDasdIo -> %08lx\n", Irp->IoStatus.Status) );
  691. } finally {
  692. if (AcquiredVcb) {
  693. NtfsReleaseVcb( IrpContext, DasdScb->Vcb );
  694. }
  695. if (AcquiredScb) {
  696. NtfsReleaseScb( IrpContext, DasdScb );
  697. }
  698. }
  699. return Status;
  700. }
  701. VOID
  702. NtfsPagingFileIoWithNoAllocation (
  703. IN PIRP_CONTEXT IrpContext,
  704. IN PIRP Irp,
  705. IN PSCB Scb,
  706. IN VBO StartingVbo,
  707. IN ULONG ByteCount
  708. )
  709. /*++
  710. Routine Description:
  711. This routine performs the non-cached disk io described in its parameters.
  712. This routine nevers blocks, and should only be used with the paging
  713. file since no completion processing is performed. This version does not allocate
  714. any memory so it guarantees fwd progress
  715. Arguments:
  716. IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
  717. Irp - Supplies the requesting Irp.
  718. Scb - Supplies the file to act on.
  719. StartingVbo - Starting offset within the file for the operation.
  720. ByteCount - The lengh of the operation.
  721. Return Value:
  722. None.
  723. --*/
  724. {
  725. UCHAR Buffer[sizeof( MDL ) + sizeof( PFN_NUMBER ) * (NTFS_MDL_TRANSFER_PAGES + 1)];
  726. PMDL PartialMdl = (PMDL) Buffer;
  727. PMDL MasterMdl = Irp->MdlAddress;
  728. LONGLONG ThisClusterCount;
  729. ULONG ThisByteCount;
  730. LCN ThisLcn;
  731. LBO ThisLbo;
  732. VCN ThisVcn;
  733. PIO_STACK_LOCATION IrpSp;
  734. ULONG BufferOffset;
  735. PVCB Vcb = Scb->Vcb;
  736. ULONG ClusterOffset;
  737. VCN BeyondLastCluster;
  738. NTSTATUS Status;
  739. KEVENT Event;
  740. //
  741. // Initialize some locals.
  742. //
  743. BufferOffset = 0;
  744. ClusterOffset = (ULONG) StartingVbo & Vcb->ClusterMask;
  745. BeyondLastCluster = LlClustersFromBytes( Vcb, StartingVbo + ByteCount );
  746. KeInitializeEvent( &Event, SynchronizationEvent, FALSE );
  747. RtlZeroMemory( Buffer, sizeof( Buffer ) );
  748. ThisVcn = LlClustersFromBytesTruncate( Vcb, StartingVbo );
  749. while (ByteCount > 0) {
  750. //
  751. // Try to lookup the next run
  752. // Paging files reads/ writes should always be correct. If
  753. // we didn't find the allocation, something bad has happened.
  754. //
  755. if (!NtfsLookupNtfsMcbEntry( &Scb->Mcb,
  756. ThisVcn,
  757. &ThisLcn,
  758. &ThisClusterCount,
  759. NULL,
  760. NULL,
  761. NULL,
  762. NULL )) {;
  763. NtfsBugCheck( 0, 0, 0 );
  764. }
  765. //
  766. // Adjust from Lcn to Lbo.
  767. //
  768. ThisLbo = LlBytesFromClusters( Vcb, ThisLcn ) + ClusterOffset;
  769. //
  770. // If next run is larger than we need, "ya get what you need".
  771. //
  772. ThisByteCount = BytesFromClusters( Vcb, (ULONG) ThisClusterCount ) - ClusterOffset;
  773. if (ThisVcn + ThisClusterCount >= BeyondLastCluster) {
  774. ThisByteCount = ByteCount;
  775. }
  776. //
  777. // Now that we have properly bounded this piece of the
  778. // transfer, it is time to read/write it NTFS_MDL_TRANSFER_PAGES pages at a time.
  779. //
  780. while (ThisByteCount > 0) {
  781. ULONG TransferSize = min( NTFS_MDL_TRANSFER_PAGES * PAGE_SIZE, ThisByteCount );
  782. //
  783. // The partial mdl is on the stack
  784. //
  785. PartialMdl->Size = sizeof( Buffer );
  786. IoBuildPartialMdl( MasterMdl,
  787. PartialMdl,
  788. Add2Ptr( Irp->UserBuffer, BufferOffset ),
  789. TransferSize );
  790. Irp->MdlAddress = PartialMdl;
  791. IrpSp = IoGetNextIrpStackLocation( Irp );
  792. //
  793. // Setup the Stack location to do a read from the disk driver.
  794. //
  795. IrpSp->MajorFunction = IrpContext->MajorFunction;
  796. IrpSp->Parameters.Read.Length = TransferSize;
  797. IrpSp->Parameters.Read.ByteOffset.QuadPart = ThisLbo;
  798. IoSetCompletionRoutine( Irp, NtfsPagingFileNoAllocCompletionRoutine, &Event, TRUE, TRUE, TRUE );
  799. Status = IoCallDriver( Vcb->TargetDeviceObject, Irp );
  800. if (Status == STATUS_PENDING) {
  801. KeWaitForSingleObject( &Event, Executive, KernelMode, FALSE, NULL );
  802. Status = Irp->IoStatus.Status;
  803. }
  804. ASSERT( Status != STATUS_INSUFFICIENT_RESOURCES );
  805. if (!FT_SUCCESS( Irp->IoStatus.Status )) {
  806. BOOLEAN DataLost = TRUE;
  807. if (!FsRtlIsTotalDeviceFailure( Status ) &&
  808. (Status != STATUS_VERIFY_REQUIRED)) {
  809. //
  810. // We don't want to try to hotfix READ errors on the paging file
  811. // because of deadlock possibilities with MM. Instead we'll just
  812. // return the error for MM to deal with. Chances are that
  813. // MM (eg. MiWaitForInPageComplete) will bugcheck anyway,
  814. // but it's still nicer than walking right into the deadlock.
  815. //
  816. if (IrpSp->MajorFunction != IRP_MJ_READ) {
  817. if ((Irp->IoStatus.Status == STATUS_FT_READ_RECOVERY_FROM_BACKUP) ||
  818. (Irp->IoStatus.Status == STATUS_FT_WRITE_RECOVERY)) {
  819. //
  820. // We got the data down on part of the mirror so we can do the fix
  821. // asynchronously
  822. //
  823. DataLost = FALSE;
  824. }
  825. //
  826. // Start an async hotfix
  827. //
  828. try {
  829. NtfsPostHotFix( Irp,
  830. &StartingVbo,
  831. ThisLbo,
  832. TransferSize,
  833. FALSE );
  834. } except( GetExceptionCode() == STATUS_INSUFFICIENT_RESOURCES ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH ) {
  835. //
  836. // If we don't have enough memory to post the hotfix - so be it
  837. // continue on
  838. //
  839. NtfsMinimumExceptionProcessing( IrpContext );
  840. }
  841. }
  842. }
  843. //
  844. // If mm needs to rewrite the data return back the error
  845. //
  846. if (DataLost) {
  847. Irp->MdlAddress = MasterMdl;
  848. NtfsCompleteRequest( NULL, Irp, Irp->IoStatus.Status );
  849. return;
  850. }
  851. }
  852. //
  853. // Now adjust everything for the next pass through the loop
  854. //
  855. StartingVbo += TransferSize;
  856. BufferOffset += TransferSize;
  857. ByteCount -= TransferSize;
  858. ThisByteCount -= TransferSize;
  859. ThisLbo += TransferSize;
  860. }
  861. //
  862. // Now adjust everything for the next pass through the loop but
  863. // break out now if all the irps have been created for the io.
  864. //
  865. ClusterOffset = 0;
  866. ThisVcn += ThisClusterCount;
  867. }
  868. //
  869. // Finally restore back the fields and complete the original irp
  870. //
  871. Irp->MdlAddress = MasterMdl;
  872. NtfsCompleteRequest( NULL, Irp, Irp->IoStatus.Status );
  873. }
  874. VOID
  875. NtfsPagingFileIo (
  876. IN PIRP_CONTEXT IrpContext,
  877. IN PIRP Irp,
  878. IN PSCB Scb,
  879. IN VBO StartingVbo,
  880. IN ULONG ByteCount
  881. )
  882. /*++
  883. Routine Description:
  884. This routine performs the non-cached disk io described in its parameters.
  885. This routine nevers blocks, and should only be used with the paging
  886. file since no completion processing is performed.
  887. Arguments:
  888. IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
  889. Irp - Supplies the requesting Irp.
  890. Scb - Supplies the file to act on.
  891. StartingVbo - Starting offset within the file for the operation.
  892. ByteCount - The lengh of the operation.
  893. Return Value:
  894. None.
  895. --*/
  896. {
  897. LONGLONG ThisClusterCount;
  898. ULONG ThisByteCount;
  899. LCN ThisLcn;
  900. LBO ThisLbo;
  901. VCN ThisVcn;
  902. PIRP AssocIrp;
  903. PIRP ContextIrp;
  904. PIO_STACK_LOCATION IrpSp;
  905. ULONG BufferOffset;
  906. PDEVICE_OBJECT DeviceObject;
  907. PFILE_OBJECT FileObject;
  908. PDEVICE_OBJECT OurDeviceObject;
  909. PVCB Vcb = Scb->Vcb;
  910. LIST_ENTRY AssociatedIrps;
  911. ULONG AssociatedIrpCount;
  912. ULONG ClusterOffset;
  913. VCN BeyondLastCluster;
  914. VBO OriginalStartingVbo = StartingVbo;
  915. ULONG OriginalByteCount = ByteCount;
  916. ClearFlag( Vcb->Vpb->RealDevice->Flags, DO_VERIFY_VOLUME ); //****ignore verify for now
  917. //
  918. // Check whether we want to set the low order bit in the Irp to pass
  919. // as a context value to the completion routine.
  920. //
  921. ContextIrp = Irp;
  922. if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_HOTFIX_UNDERWAY )) {
  923. SetFlag( ((ULONG_PTR) ContextIrp), 0x1 );
  924. }
  925. //
  926. // Check that we are sector aligned.
  927. //
  928. ASSERT( (((ULONG)StartingVbo) & (Vcb->BytesPerSector - 1)) == 0 );
  929. //
  930. // Initialize some locals.
  931. //
  932. BufferOffset = 0;
  933. ClusterOffset = (ULONG) StartingVbo & Vcb->ClusterMask;
  934. DeviceObject = Vcb->TargetDeviceObject;
  935. BeyondLastCluster = LlClustersFromBytes( Vcb, StartingVbo + ByteCount );
  936. //
  937. // Try to lookup the first run. If there is just a single run,
  938. // we may just be able to pass it on.
  939. //
  940. ThisVcn = LlClustersFromBytesTruncate( Vcb, StartingVbo );
  941. //
  942. // Paging files reads/ writes should always be correct. If we didn't
  943. // find the allocation, something bad has happened.
  944. //
  945. if (!NtfsLookupNtfsMcbEntry( &Scb->Mcb,
  946. ThisVcn,
  947. &ThisLcn,
  948. &ThisClusterCount,
  949. NULL,
  950. NULL,
  951. NULL,
  952. NULL )) {
  953. NtfsRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR, NULL, Scb->Fcb );
  954. }
  955. //
  956. // Adjust from Lcn to Lbo.
  957. //
  958. ThisLbo = LlBytesFromClusters( Vcb, ThisLcn ) + ClusterOffset;
  959. //
  960. // Now set up the Irp->IoStatus. It will be modified by the
  961. // multi-completion routine in case of error or verify required.
  962. //
  963. Irp->IoStatus.Status = STATUS_SUCCESS;
  964. Irp->IoStatus.Information = ByteCount;
  965. //
  966. // Save the FileObject.
  967. //
  968. IrpSp = IoGetCurrentIrpStackLocation( Irp );
  969. FileObject = IrpSp->FileObject;
  970. OurDeviceObject = IrpSp->DeviceObject;
  971. //
  972. // See if the write covers a single valid run, and if so pass
  973. // it on.
  974. //
  975. if (ThisVcn + ThisClusterCount >= BeyondLastCluster) {
  976. DebugTrace( 0, Dbg, ("Passing Irp on to Disk Driver\n") );
  977. //
  978. // We use our stack location to store request information in a
  979. // rather strange way, to give us enough context to post a
  980. // hot fix on error. It's ok, because it is our stack location!
  981. //
  982. IrpSp->Parameters.Read.ByteOffset.QuadPart = ThisLbo;
  983. IrpSp->Parameters.Read.Key = ((ULONG)StartingVbo);
  984. //
  985. // Set up the completion routine address in our stack frame.
  986. // This is only invoked on error or cancel, and just copies
  987. // the error Status into master irp's iosb.
  988. //
  989. IoSetCompletionRoutine( Irp,
  990. &NtfsPagingFileCompletionRoutine,
  991. ContextIrp,
  992. (BOOLEAN)!FlagOn(Vcb->VcbState, VCB_STATE_NO_SECONDARY_AVAILABLE),
  993. TRUE,
  994. TRUE );
  995. //
  996. // Setup the next IRP stack location for the disk driver beneath us.
  997. //
  998. IrpSp = IoGetNextIrpStackLocation( Irp );
  999. //
  1000. // Setup the Stack location to do a read from the disk driver.
  1001. //
  1002. IrpSp->MajorFunction = IrpContext->MajorFunction;
  1003. IrpSp->Parameters.Read.Length = ByteCount;
  1004. IrpSp->Parameters.Read.ByteOffset.QuadPart = ThisLbo;
  1005. //
  1006. // Issue the read/write request
  1007. //
  1008. // If IoCallDriver returns an error, it has completed the Irp
  1009. // and the error will be dealt with as a normal IO error.
  1010. //
  1011. (VOID)IoCallDriver( DeviceObject, Irp );
  1012. DebugTrace( -1, Dbg, ("NtfsPagingFileIo -> VOID\n") );
  1013. return;
  1014. }
  1015. //
  1016. // Loop while there are still byte writes to satisfy. Always keep the
  1017. // associated irp count one up, so that the master irp won't get
  1018. // completed prematurly.
  1019. //
  1020. try {
  1021. //
  1022. // We will allocate and initialize all of the Irps and then send
  1023. // them down to the driver. We will queue them off of our
  1024. // AssociatedIrp queue.
  1025. //
  1026. InitializeListHead( &AssociatedIrps );
  1027. AssociatedIrpCount = 0;
  1028. while (TRUE) {
  1029. //
  1030. // Reset this for unwinding purposes
  1031. //
  1032. AssocIrp = NULL;
  1033. //
  1034. // If next run is larger than we need, "ya get what you need".
  1035. //
  1036. ThisByteCount = BytesFromClusters( Vcb, (ULONG) ThisClusterCount ) - ClusterOffset;
  1037. if (ThisVcn + ThisClusterCount >= BeyondLastCluster) {
  1038. ThisByteCount = ByteCount;
  1039. }
  1040. //
  1041. // Now that we have properly bounded this piece of the
  1042. // transfer, it is time to read/write it.
  1043. //
  1044. AssocIrp = IoMakeAssociatedIrp( Irp, (CCHAR)(DeviceObject->StackSize + 1) );
  1045. if (AssocIrp == NULL) {
  1046. break;
  1047. }
  1048. //
  1049. // Now add the Irp to our queue of Irps.
  1050. //
  1051. InsertTailList( &AssociatedIrps, &AssocIrp->Tail.Overlay.ListEntry );
  1052. //
  1053. // Allocate and build a partial Mdl for the request.
  1054. //
  1055. {
  1056. PMDL Mdl;
  1057. Mdl = IoAllocateMdl( (PCHAR)Irp->UserBuffer + BufferOffset,
  1058. ThisByteCount,
  1059. FALSE,
  1060. FALSE,
  1061. AssocIrp );
  1062. if (Mdl == NULL) {
  1063. break;
  1064. }
  1065. IoBuildPartialMdl( Irp->MdlAddress,
  1066. Mdl,
  1067. Add2Ptr( Irp->UserBuffer, BufferOffset ),
  1068. ThisByteCount );
  1069. }
  1070. AssociatedIrpCount += 1;
  1071. //
  1072. // Get the first IRP stack location in the associated Irp
  1073. //
  1074. IoSetNextIrpStackLocation( AssocIrp );
  1075. IrpSp = IoGetCurrentIrpStackLocation( AssocIrp );
  1076. //
  1077. // We use our stack location to store request information in a
  1078. // rather strange way, to give us enough context to post a
  1079. // hot fix on error. It's ok, because it is our stack location!
  1080. //
  1081. IrpSp->MajorFunction = IrpContext->MajorFunction;
  1082. IrpSp->Parameters.Read.Length = ThisByteCount;
  1083. IrpSp->Parameters.Read.ByteOffset.QuadPart = ThisLbo;
  1084. IrpSp->Parameters.Read.Key = ((ULONG)StartingVbo);
  1085. IrpSp->FileObject = FileObject;
  1086. IrpSp->DeviceObject = OurDeviceObject;
  1087. //
  1088. // Set up the completion routine address in our stack frame.
  1089. // This is only invoked on error or cancel, and just copies
  1090. // the error Status into master irp's iosb.
  1091. //
  1092. IoSetCompletionRoutine( AssocIrp,
  1093. &NtfsPagingFileCompletionRoutine,
  1094. ContextIrp,
  1095. (BOOLEAN)!FlagOn(Vcb->VcbState, VCB_STATE_NO_SECONDARY_AVAILABLE),
  1096. TRUE,
  1097. TRUE );
  1098. //
  1099. // Setup the next IRP stack location in the associated Irp for the disk
  1100. // driver beneath us.
  1101. //
  1102. IrpSp = IoGetNextIrpStackLocation( AssocIrp );
  1103. //
  1104. // Setup the Stack location to do a read from the disk driver.
  1105. //
  1106. IrpSp->MajorFunction = IrpContext->MajorFunction;
  1107. IrpSp->Parameters.Read.Length = ThisByteCount;
  1108. IrpSp->Parameters.Read.ByteOffset.QuadPart = ThisLbo;
  1109. //
  1110. // Now adjust everything for the next pass through the loop but
  1111. // break out now if all the irps have been created for the io.
  1112. //
  1113. StartingVbo += ThisByteCount;
  1114. BufferOffset += ThisByteCount;
  1115. ByteCount -= ThisByteCount;
  1116. ClusterOffset = 0;
  1117. ThisVcn += ThisClusterCount;
  1118. if (ByteCount == 0) {
  1119. break;
  1120. }
  1121. //
  1122. // Try to lookup the next run (if we are not done).
  1123. // Paging files reads/ writes should always be correct. If
  1124. // we didn't find the allocation, something bad has happened.
  1125. //
  1126. if (!NtfsLookupNtfsMcbEntry( &Scb->Mcb,
  1127. ThisVcn,
  1128. &ThisLcn,
  1129. &ThisClusterCount,
  1130. NULL,
  1131. NULL,
  1132. NULL,
  1133. NULL )) {;
  1134. NtfsBugCheck( 0, 0, 0 );
  1135. }
  1136. ThisLbo = LlBytesFromClusters( Vcb, ThisLcn );
  1137. } // while (ByteCount != 0)
  1138. if (ByteCount == 0) {
  1139. //
  1140. // We have now created all of the Irps that we need. We will set the
  1141. // Irp count in the master Irp and then fire off the associated irps.
  1142. //
  1143. Irp->AssociatedIrp.IrpCount = AssociatedIrpCount;
  1144. while (!IsListEmpty( &AssociatedIrps )) {
  1145. AssocIrp = CONTAINING_RECORD( AssociatedIrps.Flink,
  1146. IRP,
  1147. Tail.Overlay.ListEntry );
  1148. RemoveHeadList( &AssociatedIrps );
  1149. (VOID) IoCallDriver( DeviceObject, AssocIrp );
  1150. }
  1151. } else {
  1152. NtfsPagingFileIoWithNoAllocation( IrpContext, Irp, Scb, OriginalStartingVbo, OriginalByteCount );
  1153. }
  1154. } finally {
  1155. DebugUnwind( NtfsPagingFileIo );
  1156. //
  1157. // In the case of an error we must clean up any of the associated Irps
  1158. // we have created.
  1159. //
  1160. while (!IsListEmpty( &AssociatedIrps )) {
  1161. AssocIrp = CONTAINING_RECORD( AssociatedIrps.Flink,
  1162. IRP,
  1163. Tail.Overlay.ListEntry );
  1164. RemoveHeadList( &AssociatedIrps );
  1165. if (AssocIrp->MdlAddress != NULL) {
  1166. IoFreeMdl( AssocIrp->MdlAddress );
  1167. AssocIrp->MdlAddress = NULL;
  1168. }
  1169. IoFreeIrp( AssocIrp );
  1170. }
  1171. }
  1172. DebugTrace( -1, Dbg, ("NtfsPagingFileIo -> VOID\n") );
  1173. return;
  1174. }
  1175. BOOLEAN
  1176. NtfsIsReadAheadThread (
  1177. )
  1178. /*++
  1179. Routine Description:
  1180. This routine returns whether the current thread is doing read ahead.
  1181. Arguments:
  1182. None
  1183. Return Value:
  1184. FALSE - if the thread is not doing read ahead
  1185. TRUE - if the thread is doing read ahead
  1186. --*/
  1187. {
  1188. PREAD_AHEAD_THREAD ReadAheadThread;
  1189. PVOID CurrentThread = PsGetCurrentThread();
  1190. KIRQL OldIrql;
  1191. OldIrql = KeAcquireQueuedSpinLock( LockQueueNtfsStructLock );
  1192. ReadAheadThread = (PREAD_AHEAD_THREAD)NtfsData.ReadAheadThreads.Flink;
  1193. //
  1194. // Scan for our thread, stopping at the end of the list or on the first
  1195. // NULL. We can stop on the first NULL, since when we free an entry
  1196. // we move it to the end of the list.
  1197. //
  1198. while ((ReadAheadThread != (PREAD_AHEAD_THREAD)&NtfsData.ReadAheadThreads) &&
  1199. (ReadAheadThread->Thread != NULL)) {
  1200. //
  1201. // Get out if we see our thread.
  1202. //
  1203. if (ReadAheadThread->Thread == CurrentThread) {
  1204. KeReleaseQueuedSpinLock( LockQueueNtfsStructLock, OldIrql );
  1205. return TRUE;
  1206. }
  1207. ReadAheadThread = (PREAD_AHEAD_THREAD)ReadAheadThread->Links.Flink;
  1208. }
  1209. KeReleaseQueuedSpinLock( LockQueueNtfsStructLock, OldIrql );
  1210. return FALSE;
  1211. }
  1212. //
  1213. // Internal support routine
  1214. //
  1215. VOID
  1216. NtfsAllocateCompressionBuffer (
  1217. IN PIRP_CONTEXT IrpContext,
  1218. IN PSCB ThisScb,
  1219. IN PIRP Irp,
  1220. IN PCOMPRESSION_CONTEXT CompressionContext,
  1221. IN OUT PULONG CompressionBufferLength
  1222. )
  1223. /*++
  1224. Routine Description:
  1225. This routine allocates a compression buffer of the desired length, and
  1226. describes it with an Mdl. It updates the Irp to describe the new buffer.
  1227. Note that whoever allocates the CompressionContext must initially zero it.
  1228. Arguments:
  1229. ThisScb - The stream where the IO is taking place.
  1230. Irp - Irp for the current request
  1231. CompressionContext - Pointer to the compression context for the request.
  1232. CompressionBufferLength - Supplies length required for the compression buffer.
  1233. Returns length available.
  1234. Return Value:
  1235. None.
  1236. --*/
  1237. {
  1238. PMDL Mdl;
  1239. //
  1240. // If no compression buffer is allocated, or it is too small, then we must
  1241. // take action here.
  1242. //
  1243. if (*CompressionBufferLength > CompressionContext->CompressionBufferLength) {
  1244. //
  1245. // If there already is an Mdl, then there must also be a compression
  1246. // buffer (since we are part of main-line processing), and we must
  1247. // free these first.
  1248. //
  1249. if (CompressionContext->SavedMdl != NULL) {
  1250. //
  1251. // Restore the byte count for which the Mdl was created, and free it.
  1252. //
  1253. Irp->MdlAddress->ByteCount = CompressionContext->CompressionBufferLength;
  1254. NtfsDeleteMdlAndBuffer( Irp->MdlAddress,
  1255. CompressionContext->CompressionBuffer );
  1256. //
  1257. // Restore the Mdl and UserBuffer fields in the Irp.
  1258. //
  1259. Irp->MdlAddress = CompressionContext->SavedMdl;
  1260. Irp->UserBuffer = CompressionContext->SavedUserBuffer;
  1261. CompressionContext->SavedMdl = NULL;
  1262. CompressionContext->CompressionBuffer = NULL;
  1263. }
  1264. CompressionContext->CompressionBufferLength = *CompressionBufferLength;
  1265. //
  1266. // Allocate the compression buffer or raise
  1267. //
  1268. NtfsCreateMdlAndBuffer( IrpContext,
  1269. ThisScb,
  1270. (UCHAR) ((IrpContext->MajorFunction == IRP_MJ_WRITE) ?
  1271. RESERVED_BUFFER_TWO_NEEDED :
  1272. RESERVED_BUFFER_ONE_NEEDED),
  1273. &CompressionContext->CompressionBufferLength,
  1274. &Mdl,
  1275. &CompressionContext->CompressionBuffer );
  1276. //
  1277. // Finally save the Mdl and Buffer fields from the Irp, and replace
  1278. // with the ones we just allocated.
  1279. //
  1280. CompressionContext->SavedMdl = Irp->MdlAddress;
  1281. CompressionContext->SavedUserBuffer = Irp->UserBuffer;
  1282. Irp->MdlAddress = Mdl;
  1283. Irp->UserBuffer = CompressionContext->CompressionBuffer;
  1284. }
  1285. //
  1286. // Update the caller's length field in all cases.
  1287. //
  1288. *CompressionBufferLength = CompressionContext->CompressionBufferLength;
  1289. }
  1290. //
  1291. // Internal support routine
  1292. //
  1293. VOID
  1294. NtfsDeallocateCompressionBuffer (
  1295. IN PIRP Irp,
  1296. IN PCOMPRESSION_CONTEXT CompressionContext,
  1297. IN BOOLEAN Reinitialize
  1298. )
  1299. /*++
  1300. Routine Description:
  1301. This routine peforms all necessary cleanup for a compressed I/O, as described
  1302. by the compression context.
  1303. Arguments:
  1304. Irp - Irp for the current request
  1305. CompressionContext - Pointer to the compression context for the request.
  1306. Reinitialize - TRUE if we plan to continue using this context.
  1307. Return Value:
  1308. None.
  1309. --*/
  1310. {
  1311. //
  1312. // If there is a saved mdl, then we have to restore the original
  1313. // byte count it was allocated with and free it. Then restore the
  1314. // Irp fields we modified.
  1315. //
  1316. if (CompressionContext->SavedMdl != NULL) {
  1317. Irp->MdlAddress->ByteCount = CompressionContext->CompressionBufferLength;
  1318. NtfsDeleteMdlAndBuffer( Irp->MdlAddress,
  1319. CompressionContext->CompressionBuffer );
  1320. } else {
  1321. NtfsDeleteMdlAndBuffer( NULL,
  1322. CompressionContext->CompressionBuffer );
  1323. }
  1324. //
  1325. // If there is a saved mdl, then we have to restore the original
  1326. // byte count it was allocated with and free it. Then restore the
  1327. // Irp fields we modified.
  1328. //
  1329. if (CompressionContext->SavedMdl != NULL) {
  1330. Irp->MdlAddress = CompressionContext->SavedMdl;
  1331. Irp->UserBuffer = CompressionContext->SavedUserBuffer;
  1332. }
  1333. //
  1334. // If there is a work space structure allocated, free it.
  1335. //
  1336. if (CompressionContext->WorkSpace != NULL) {
  1337. NtfsDeleteMdlAndBuffer( NULL, CompressionContext->WorkSpace );
  1338. }
  1339. //
  1340. // If are reinitializing the structure then clear the fields which
  1341. // we have already cleaned up.
  1342. //
  1343. if (Reinitialize) {
  1344. CompressionContext->SavedMdl = NULL;
  1345. CompressionContext->SavedUserBuffer = NULL;
  1346. CompressionContext->CompressionBuffer = NULL;
  1347. CompressionContext->WorkSpace = NULL;
  1348. CompressionContext->CompressionBufferLength = 0;
  1349. //
  1350. // Delete any allocate IoRuns array if we are done.
  1351. //
  1352. } else if (CompressionContext->AllocatedRuns != NTFS_MAX_PARALLEL_IOS) {
  1353. NtfsFreePool( CompressionContext->IoRuns );
  1354. }
  1355. }
  1356. //
  1357. // Internal support routine
  1358. //
  1359. LONG
  1360. NtfsCompressionFilter (
  1361. IN PIRP_CONTEXT IrpContext,
  1362. IN PEXCEPTION_POINTERS ExceptionPointer
  1363. )
  1364. {
  1365. UNREFERENCED_PARAMETER( IrpContext );
  1366. UNREFERENCED_PARAMETER( ExceptionPointer );
  1367. ASSERT( FsRtlIsNtstatusExpected( ExceptionPointer->ExceptionRecord->ExceptionCode ) );
  1368. return EXCEPTION_EXECUTE_HANDLER;
  1369. }
  1370. //
  1371. // Internal support routine
  1372. //
  1373. ULONG
  1374. NtfsPrepareBuffers (
  1375. IN PIRP_CONTEXT IrpContext,
  1376. IN PIRP Irp,
  1377. IN PSCB Scb,
  1378. IN PVBO StartingVbo,
  1379. IN ULONG ByteCount,
  1380. IN ULONG StreamFlags,
  1381. IN OUT PBOOLEAN Wait,
  1382. OUT PULONG NumberRuns,
  1383. OUT PCOMPRESSION_CONTEXT CompressionContext
  1384. )
  1385. /*++
  1386. Routine Description:
  1387. This routine prepares the buffers for a noncached transfer, and fills
  1388. in the IoRuns array to describe all of the separate transfers which must
  1389. take place.
  1390. For compressed reads, the exact size of the compressed data is
  1391. calculated by scanning the run information, and a buffer is allocated
  1392. to receive the compressed data.
  1393. For compressed writes, an estimate is made on how large of a compressed
  1394. buffer will be required. Then the compression is performed, as much as
  1395. possible, into the compressed buffer which was allocated.
  1396. Arguments:
  1397. IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
  1398. Irp - Supplies the requesting Irp.
  1399. Scb - Supplies the stream file to act on.
  1400. StartingVbo - The starting point for the operation.
  1401. ByteCount - The lengh of the operation.
  1402. NumberRuns - Returns the number of runs filled in to the IoRuns array.
  1403. CompressionContext - Returns information related to the compression
  1404. to be cleaned up after the transfer.
  1405. StreamFlags - Supplies either 0 or some combination of COMPRESSED_STREAM
  1406. and ENCRYPTED_STREAM
  1407. Return Value:
  1408. Returns uncompressed bytes remaining to be processed, or 0 if all buffers
  1409. are prepared in the IoRuns and CompressionContext.
  1410. --*/
  1411. {
  1412. PVOID RangePtr;
  1413. ULONG Index;
  1414. LBO NextLbo;
  1415. LCN NextLcn;
  1416. VBO TempVbo;
  1417. ULONG NextLcnOffset;
  1418. VCN StartingVcn;
  1419. ULONG NextByteCount;
  1420. ULONG ReturnByteCount;
  1421. ULONG TrimmedByteCount;
  1422. LONGLONG NextClusterCount;
  1423. BOOLEAN NextIsAllocated;
  1424. BOOLEAN SparseWrite = FALSE;
  1425. BOOLEAN OriginalSparseWrite = FALSE;
  1426. ULONG BufferOffset;
  1427. ULONG StructureSize;
  1428. ULONG UsaOffset;
  1429. ULONG BytesInIoRuns;
  1430. BOOLEAN StopForUsa;
  1431. PVOID SystemBuffer;
  1432. ULONG CompressionUnit, CompressionUnitInClusters;
  1433. ULONG CompressionUnitOffset;
  1434. ULONG CompressedSize, FinalCompressedSize;
  1435. LONGLONG FinalCompressedClusters;
  1436. ULONG LastStartUsaIoRun;
  1437. LOGICAL ReadRequest;
  1438. PIO_STACK_LOCATION IrpSp;
  1439. PIO_RUN IoRuns;
  1440. NTSTATUS Status;
  1441. VBO StartVbo = *StartingVbo;
  1442. PVCB Vcb = Scb->Vcb;
  1443. PAGED_CODE();
  1444. //
  1445. // Initialize some locals.
  1446. //
  1447. IoRuns = CompressionContext->IoRuns;
  1448. *NumberRuns = 0;
  1449. IrpSp = IoGetCurrentIrpStackLocation(Irp);
  1450. ReadRequest = (LOGICAL)((IrpContext->MajorFunction == IRP_MJ_READ) ||
  1451. ((IrpContext->MajorFunction == IRP_MJ_FILE_SYSTEM_CONTROL) &&
  1452. (IrpContext->MinorFunction == IRP_MN_USER_FS_REQUEST) &&
  1453. (IrpSp->Parameters.FileSystemControl.FsControlCode == FSCTL_READ_FROM_PLEX)));
  1454. //
  1455. // For nonbuffered I/O, we need the buffer locked in all
  1456. // cases.
  1457. //
  1458. // This call may raise. If this call succeeds and a subsequent
  1459. // condition is raised, the buffers are unlocked automatically
  1460. // by the I/O system when the request is completed, via the
  1461. // Irp->MdlAddress field.
  1462. //
  1463. ASSERT( FIELD_OFFSET(IO_STACK_LOCATION, Parameters.Read.Length) ==
  1464. FIELD_OFFSET(IO_STACK_LOCATION, Parameters.Write.Length) );
  1465. NtfsLockUserBuffer( IrpContext,
  1466. Irp,
  1467. ReadRequest ?
  1468. IoWriteAccess : IoReadAccess,
  1469. IrpSp->Parameters.Read.Length );
  1470. //
  1471. // Normally the Mdl BufferOffset picks up from where we last left off.
  1472. // However, for those cases where we have called NtfsAllocateCompressionBuffer,
  1473. // for a scratch buffer, we always reset to offset 0.
  1474. //
  1475. BufferOffset = CompressionContext->SystemBufferOffset;
  1476. if (CompressionContext->SavedMdl != NULL) {
  1477. BufferOffset = 0;
  1478. }
  1479. //
  1480. // Check if this request wants to drive the IO directly from the Mcb. This is
  1481. // the case for all Scb's without a compression unit or for reads of uncompressed
  1482. // files or compressed reads. Also proceed with sparse writes optimistically
  1483. // assuming the compression unit is allocated.
  1484. //
  1485. if ((ReadRequest) ?
  1486. //
  1487. // Trust Mcb on reads of uncompressed files or reading compressed data.
  1488. //
  1489. ((Scb->CompressionUnit == 0) ||
  1490. !FlagOn( Scb->AttributeFlags, ATTRIBUTE_FLAG_COMPRESSION_MASK ) ||
  1491. FlagOn( StreamFlags, COMPRESSED_STREAM )) :
  1492. //
  1493. // Trust Mcb (optimistically) for writes of uncompressed sparse files.
  1494. //
  1495. ((Scb->CompressionUnit == 0) ||
  1496. (OriginalSparseWrite =
  1497. SparseWrite = FlagOn( Scb->AttributeFlags,
  1498. ATTRIBUTE_FLAG_COMPRESSION_MASK | ATTRIBUTE_FLAG_SPARSE ) == ATTRIBUTE_FLAG_SPARSE))) {
  1499. ASSERT( (ReadRequest) ||
  1500. (IrpContext->MajorFunction == IRP_MJ_WRITE) ||
  1501. FlagOn( StreamFlags, COMPRESSED_STREAM ) );
  1502. ASSERT( (Scb->CompressionUnit == 0) ||
  1503. NtfsIsTypeCodeCompressible( Scb->AttributeTypeCode ) );
  1504. //
  1505. // If this is a Usa-protected structure and we are reading, figure out
  1506. // what units we want to access it in.
  1507. //
  1508. TrimmedByteCount = 0;
  1509. if ((Scb->EncryptionContext != NULL) &&
  1510. (IrpContext->MajorFunction == IRP_MJ_WRITE)) {
  1511. //
  1512. // For an encrypted file, we will be allocating a new buffer in the irp
  1513. // so the entries in the ioruns array should have offsets relative to
  1514. // this new buffer.
  1515. //
  1516. if (ByteCount > LARGE_BUFFER_SIZE) {
  1517. //
  1518. // Trim to LARGE_BUFFER_SIZE and remember the amount trimmed
  1519. // to add back to byte count later.
  1520. //
  1521. TrimmedByteCount = ByteCount - LARGE_BUFFER_SIZE;
  1522. ByteCount = LARGE_BUFFER_SIZE;
  1523. DebugTrace( 0, Dbg, ("\nTrimming ByteCount by %x", TrimmedByteCount) );
  1524. }
  1525. }
  1526. StructureSize = ByteCount;
  1527. if (FlagOn(Scb->ScbState, SCB_STATE_USA_PRESENT) &&
  1528. (ReadRequest)) {
  1529. //
  1530. // Get the the number of blocks, based on what type of stream it is.
  1531. // First check for Mft or Log file.
  1532. //
  1533. if (Scb->Header.NodeTypeCode == NTFS_NTC_SCB_MFT) {
  1534. ASSERT((Scb == Vcb->MftScb) || (Scb == Vcb->Mft2Scb));
  1535. StructureSize = Vcb->BytesPerFileRecordSegment;
  1536. //
  1537. // Otherwise it is an index, so we can get the count out of the Scb.
  1538. //
  1539. } else if (Scb->Header.NodeTypeCode != NTFS_NTC_SCB_DATA) {
  1540. StructureSize = Scb->ScbType.Index.BytesPerIndexBuffer;
  1541. }
  1542. //
  1543. // Remember the last index in the IO runs array which will allow us to
  1544. // read in a full USA structure in the worst case.
  1545. //
  1546. LastStartUsaIoRun = ClustersFromBytes( Vcb, StructureSize );
  1547. if (LastStartUsaIoRun > NTFS_MAX_PARALLEL_IOS) {
  1548. LastStartUsaIoRun = 0;
  1549. } else {
  1550. LastStartUsaIoRun = NTFS_MAX_PARALLEL_IOS - LastStartUsaIoRun;
  1551. }
  1552. }
  1553. BytesInIoRuns = 0;
  1554. UsaOffset = 0;
  1555. StopForUsa = FALSE;
  1556. while ((ByteCount != 0) && (*NumberRuns != NTFS_MAX_PARALLEL_IOS) && !StopForUsa) {
  1557. //
  1558. // Lookup next run
  1559. //
  1560. StartingVcn = LlClustersFromBytesTruncate( Vcb, StartVbo );
  1561. //
  1562. // If another writer is modifying the Mcb of a sparse file then we need
  1563. // to serialize our lookup.
  1564. //
  1565. if (FlagOn( Scb->ScbState, SCB_STATE_PROTECT_SPARSE_MCB )) {
  1566. NtfsPurgeFileRecordCache( IrpContext );
  1567. NtfsAcquireSharedScb( IrpContext, Scb );
  1568. try {
  1569. NextIsAllocated = NtfsLookupAllocation( IrpContext,
  1570. Scb,
  1571. StartingVcn,
  1572. &NextLcn,
  1573. &NextClusterCount,
  1574. NULL,
  1575. NULL );
  1576. } finally {
  1577. NtfsReleaseScb( IrpContext, Scb );
  1578. }
  1579. } else {
  1580. //
  1581. // Purge because lookupallocation may acquire the scb main if it needs to load
  1582. // which will be first main acquire and can be blocked behind an acquireallfiles
  1583. //
  1584. NtfsPurgeFileRecordCache( IrpContext );
  1585. NextIsAllocated = NtfsLookupAllocation( IrpContext,
  1586. Scb,
  1587. StartingVcn,
  1588. &NextLcn,
  1589. &NextClusterCount,
  1590. NULL,
  1591. NULL );
  1592. }
  1593. ASSERT( NextIsAllocated ||
  1594. FlagOn( Vcb->VcbState, VCB_STATE_RESTART_IN_PROGRESS ) ||
  1595. FlagOn( Scb->AttributeFlags, ATTRIBUTE_FLAG_SPARSE ) ||
  1596. (Scb == Vcb->MftScb) ||
  1597. FlagOn( StreamFlags, COMPRESSED_STREAM | ENCRYPTED_STREAM ) );
  1598. //
  1599. // If this is a sparse write we need to deal with cases where
  1600. // the run is not allocated OR the last run in this transfer
  1601. // was unallocated but this run is allocated.
  1602. //
  1603. if (SparseWrite) {
  1604. //
  1605. // If the current run is not allocated then break out of the loop.
  1606. //
  1607. if (!NextIsAllocated) {
  1608. //
  1609. // Convert to synchronous since we need to allocate space
  1610. //
  1611. if (*Wait == FALSE) {
  1612. *Wait = TRUE;
  1613. SetFlag( IrpContext->State, IRP_CONTEXT_STATE_WAIT );
  1614. ClearFlag( IrpContext->Union.NtfsIoContext->Flags, NTFS_IO_CONTEXT_ASYNC );
  1615. KeInitializeEvent( &IrpContext->Union.NtfsIoContext->Wait.SyncEvent,
  1616. NotificationEvent,
  1617. FALSE );
  1618. }
  1619. break;
  1620. }
  1621. //
  1622. // Deal with the case where the last run in this transfer was not allocated.
  1623. // In that case we would have allocated a compression buffer and stored
  1624. // the original Mdl into the compression context. Since this is an allocated
  1625. // range we can use the original user buffer and Mdl. Restore these
  1626. // back into the original irp now.
  1627. //
  1628. // If this file is encrypted, we do NOT want to change the buffer offset,
  1629. // because this offset will be stored as the first IoRun's buffer offset, and
  1630. // encrypt buffers will add the system buffer offset to that, and end up
  1631. // passing a bad buffer to the encryption driver. Besides, it's inefficient
  1632. // to deallocate the buffer, since encrypt buffers will have to reallocate it.
  1633. //
  1634. if ((CompressionContext->SavedMdl != NULL) &&
  1635. (Scb->EncryptionContext == NULL)) {
  1636. NtfsDeallocateCompressionBuffer( Irp, CompressionContext, TRUE );
  1637. BufferOffset = CompressionContext->SystemBufferOffset;
  1638. }
  1639. }
  1640. //
  1641. // Adjust from NextLcn to Lbo. NextByteCount may overflow out of 32 bits
  1642. // but we will catch that below when we compare clusters.
  1643. //
  1644. NextLcnOffset = ((ULONG)StartVbo) & Vcb->ClusterMask;
  1645. NextByteCount = BytesFromClusters( Vcb, (ULONG)NextClusterCount ) - NextLcnOffset;
  1646. //
  1647. // If next run is larger than we need, "ya get what you need".
  1648. // Note that after this we are guaranteed that the HighPart of
  1649. // NextByteCount is 0.
  1650. //
  1651. if ((ULONG)NextClusterCount >= ClustersFromBytes( Vcb, ByteCount + NextLcnOffset )) {
  1652. NextByteCount = ByteCount;
  1653. }
  1654. //
  1655. // If the byte count is zero then we will spin indefinitely. Raise
  1656. // corrupt here so the system doesn't hang.
  1657. //
  1658. if (NextByteCount == 0) {
  1659. NtfsRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR, NULL, Scb->Fcb );
  1660. }
  1661. //
  1662. // If this is a USA-protected structure, broken up in
  1663. // multiple runs, then we want to guarantee that we do
  1664. // not end up in the middle of a Usa-protected structure in the read path.
  1665. // Therefore, on the first run we will calculate the
  1666. // initial UsaOffset. Then in the worst case it can
  1667. // take the remaining four runs to finish the Usa structure.
  1668. //
  1669. // On the first subsequent run to complete a Usa structure,
  1670. // we set the count to end exactly on a Usa boundary.
  1671. //
  1672. if (FlagOn( Scb->ScbState, SCB_STATE_USA_PRESENT ) &&
  1673. (ReadRequest)) {
  1674. //
  1675. // So long as we know there are more IO runs left than the maximum
  1676. // number needed for the USA structure just maintain the current
  1677. // Usa offset.
  1678. //
  1679. if (*NumberRuns < LastStartUsaIoRun) {
  1680. UsaOffset = (UsaOffset + NextByteCount) & (StructureSize - 1);
  1681. //
  1682. // Now we will stop on the next Usa boundary, but we may not
  1683. // have it yet.
  1684. //
  1685. } else {
  1686. if ((NextByteCount + UsaOffset) >= StructureSize) {
  1687. NextByteCount = ((NextByteCount + UsaOffset) & ~(StructureSize - 1)) -
  1688. (UsaOffset & (StructureSize - 1));
  1689. StopForUsa = TRUE;
  1690. }
  1691. UsaOffset += NextByteCount;
  1692. }
  1693. }
  1694. //
  1695. // Only fill in the run array if the run is allocated.
  1696. //
  1697. if (NextIsAllocated) {
  1698. //
  1699. // Adjust if the Lcn offset (if we have one) and isn't zero.
  1700. //
  1701. NextLbo = LlBytesFromClusters( Vcb, NextLcn );
  1702. NextLbo = NextLbo + NextLcnOffset;
  1703. //
  1704. // Now that we have properly bounded this piece of the
  1705. // transfer, it is time to write it.
  1706. //
  1707. // We remember each piece of a parallel run by saving the
  1708. // essential information in the IoRuns array. The tranfers
  1709. // are started up in parallel below.
  1710. //
  1711. IoRuns[*NumberRuns].StartingVbo = StartVbo;
  1712. IoRuns[*NumberRuns].StartingLbo = NextLbo;
  1713. IoRuns[*NumberRuns].BufferOffset = BufferOffset;
  1714. IoRuns[*NumberRuns].ByteCount = NextByteCount;
  1715. BytesInIoRuns += NextByteCount;
  1716. *NumberRuns += 1;
  1717. } else if (ReadRequest) {
  1718. SystemBuffer = Add2Ptr( NtfsMapUserBuffer( Irp, NormalPagePriority ), BufferOffset );
  1719. //
  1720. // If this is not a compressed stream then fill this range with zeroes.
  1721. // Also if this is a sparse, non-compressed stream then check if we need to
  1722. // reserve clusters.
  1723. //
  1724. if (!FlagOn( StreamFlags, COMPRESSED_STREAM )) {
  1725. #ifdef SYSCACHE_DEBUG
  1726. if (ScbIsBeingLogged( Scb )) {
  1727. FsRtlLogSyscacheEvent( Scb, SCE_ZERO_NC, SCE_FLAG_NON_CACHED | SCE_FLAG_READ | SCE_FLAG_PREPARE_BUFFERS, StartVbo + BufferOffset, NextByteCount, 0 );
  1728. }
  1729. #endif
  1730. RtlZeroMemory( SystemBuffer, NextByteCount );
  1731. if (FlagOn( Irp->Flags, IRP_PAGING_IO ) &&
  1732. FlagOn( Scb->Header.Flags, FSRTL_FLAG_USER_MAPPED_FILE ) &&
  1733. (FlagOn( Scb->AttributeFlags, ATTRIBUTE_FLAG_COMPRESSION_MASK | ATTRIBUTE_FLAG_SPARSE ) == ATTRIBUTE_FLAG_SPARSE)) {
  1734. if (!NtfsReserveClusters( IrpContext,
  1735. Scb,
  1736. StartVbo,
  1737. NextByteCount )) {
  1738. NtfsRaiseStatus( IrpContext, STATUS_DISK_FULL, NULL, NULL );
  1739. }
  1740. }
  1741. //
  1742. // If it is compressed then make sure the range begins with a zero in
  1743. // case MM passed a non-zeroed buffer. Then the compressed read/write
  1744. // routines will know the chunk begins with a zero.
  1745. //
  1746. } else {
  1747. *((PULONG) SystemBuffer) = 0;
  1748. }
  1749. }
  1750. //
  1751. // Now adjust everything for the next pass through the loop.
  1752. //
  1753. StartVbo = StartVbo + NextByteCount;
  1754. BufferOffset += NextByteCount;
  1755. ByteCount -= NextByteCount;
  1756. }
  1757. //
  1758. // Let's remember about those bytes we trimmed off above. We have more
  1759. // bytes remaining than we think, and we didn't transfer as much, so we
  1760. // need to back up where we start the next transfer.
  1761. //
  1762. if (TrimmedByteCount != 0) {
  1763. DebugTrace( 0,
  1764. Dbg,
  1765. ("\nByteCount + TrimmedByteCount = %x + %x = %x",
  1766. ByteCount,
  1767. TrimmedByteCount,
  1768. ByteCount + TrimmedByteCount) );
  1769. DebugTrace( 0,
  1770. Dbg,
  1771. ("\nStartVbo - TrimmedByteCount = %I64x - %x = %I64x",
  1772. StartVbo,
  1773. TrimmedByteCount,
  1774. StartVbo - TrimmedByteCount) );
  1775. ByteCount += TrimmedByteCount;
  1776. }
  1777. //
  1778. // If this is a sparse write and the start of the write is unallocated then drop
  1779. // down to the compressed path below. Otherwise do the IO we found.
  1780. //
  1781. if (!SparseWrite || (BytesInIoRuns != 0)) {
  1782. return ByteCount;
  1783. }
  1784. }
  1785. ASSERT( Scb->Header.NodeTypeCode == NTFS_NTC_SCB_DATA );
  1786. //
  1787. // Initialize the compression parameters.
  1788. //
  1789. CompressionUnit = Scb->CompressionUnit;
  1790. CompressionUnitInClusters = ClustersFromBytes(Vcb, CompressionUnit);
  1791. CompressionUnitOffset = 0;
  1792. if (CompressionUnit != 0) {
  1793. CompressionUnitOffset = ((ULONG)StartVbo) & (CompressionUnit - 1);
  1794. }
  1795. //
  1796. // We want to make sure and wait to get byte count and things correctly.
  1797. //
  1798. if (!FlagOn(IrpContext->State, IRP_CONTEXT_STATE_WAIT)) {
  1799. NtfsRaiseStatus( IrpContext, STATUS_CANT_WAIT, NULL, NULL );
  1800. }
  1801. //
  1802. // Handle the compressed read case.
  1803. //
  1804. if (IrpContext->MajorFunction == IRP_MJ_READ) {
  1805. //
  1806. // If we have not already mapped the user buffer, then do it.
  1807. //
  1808. if (CompressionContext->SystemBuffer == NULL) {
  1809. CompressionContext->SystemBuffer = NtfsMapUserBuffer( Irp, NormalPagePriority );
  1810. }
  1811. BytesInIoRuns = 0;
  1812. //
  1813. // Adjust StartVbo and ByteCount by the offset.
  1814. //
  1815. ((ULONG)StartVbo) -= CompressionUnitOffset;
  1816. ByteCount += CompressionUnitOffset;
  1817. //
  1818. // Capture this value for maintaining the byte count to
  1819. // return.
  1820. //
  1821. ReturnByteCount = ByteCount;
  1822. //
  1823. // Now, the ByteCount we actually have to process has to
  1824. // be rounded up to the next compression unit.
  1825. //
  1826. ByteCount = BlockAlign( ByteCount, (LONG)CompressionUnit );
  1827. //
  1828. // Make sure we never try to handle more than a LARGE_BUFFER_SIZE
  1829. // at once, forcing our caller to call back.
  1830. //
  1831. if (ByteCount > LARGE_BUFFER_SIZE) {
  1832. ByteCount = LARGE_BUFFER_SIZE;
  1833. }
  1834. //
  1835. // In case we find no allocation....
  1836. //
  1837. IoRuns[0].ByteCount = 0;
  1838. while (ByteCount != 0) {
  1839. //
  1840. // Try to lookup the first run. If there is just a single run,
  1841. // we may just be able to pass it on.
  1842. //
  1843. ASSERT( !FlagOn( ((ULONG) StartVbo), Vcb->ClusterMask ));
  1844. StartingVcn = LlClustersFromBytesTruncate( Vcb, StartVbo );
  1845. NextIsAllocated = NtfsLookupAllocation( IrpContext,
  1846. Scb,
  1847. StartingVcn,
  1848. &NextLcn,
  1849. &NextClusterCount,
  1850. NULL,
  1851. NULL );
  1852. #if (defined(NTFS_RWCMP_TRACE))
  1853. ASSERT(!IsSyscache(Scb) || (NextClusterCount < 16) || !NextIsAllocated);
  1854. #endif
  1855. //
  1856. // Adjust from NextLcn to Lbo.
  1857. //
  1858. // If next run is larger than we need, "ya get what you need".
  1859. // Note that after this we are guaranteed that the HighPart of
  1860. // NextByteCount is 0.
  1861. //
  1862. if ((ULONG)NextClusterCount >= ClustersFromBytes( Vcb, ByteCount )) {
  1863. NextByteCount = ByteCount;
  1864. } else {
  1865. NextByteCount = BytesFromClusters( Vcb, (ULONG)NextClusterCount );
  1866. }
  1867. //
  1868. // Adjust if the Lcn offset isn't zero.
  1869. //
  1870. NextLbo = LlBytesFromClusters( Vcb, NextLcn );
  1871. //
  1872. // Only fill in the run array if the run is allocated.
  1873. //
  1874. if (NextIsAllocated) {
  1875. //
  1876. // If the Lbos are contiguous, then we can do a contiguous
  1877. // transfer, so we just increase the current byte count.
  1878. //
  1879. if ((*NumberRuns != 0) && (NextLbo ==
  1880. (IoRuns[*NumberRuns - 1].StartingLbo +
  1881. (IoRuns[*NumberRuns - 1].ByteCount)))) {
  1882. //
  1883. // Stop on the first compression unit boundary after the
  1884. // the penultimate run in the default io array.
  1885. //
  1886. if (*NumberRuns >= NTFS_MAX_PARALLEL_IOS - 1) {
  1887. //
  1888. // First, if we are beyond the penultimate run and we are starting
  1889. // a run in a different compression unit than the previous
  1890. // run, then we can just break out and not use the current
  1891. // run. (*NumberRuns has not yet been incremented.)
  1892. // In order for it to be in the same run it can't begin at
  1893. // offset 0 in the compression unit and it must be contiguous
  1894. // with the virtual end of the previous run.
  1895. // The only case where this can happen in the running system is
  1896. // if there is a file record boundary in the middle of the
  1897. // compression unit.
  1898. //
  1899. if ((*NumberRuns > NTFS_MAX_PARALLEL_IOS - 1) &&
  1900. (!FlagOn( (ULONG) StartVbo, CompressionUnit - 1 ) ||
  1901. (StartVbo != (IoRuns[*NumberRuns - 1].StartingVbo +
  1902. IoRuns[*NumberRuns - 1].ByteCount)))) {
  1903. break;
  1904. //
  1905. // Else detect the case where this run ends on or
  1906. // crosses a compression unit boundary. In this case,
  1907. // just make sure the run stops on a compression unit
  1908. // boundary, and break out to return it.
  1909. //
  1910. } else if ((((ULONG) StartVbo & (CompressionUnit - 1)) + NextByteCount) >=
  1911. CompressionUnit) {
  1912. NextByteCount -= (((ULONG)StartVbo) + NextByteCount) & (CompressionUnit - 1);
  1913. BytesInIoRuns += NextByteCount;
  1914. if (ReturnByteCount > NextByteCount) {
  1915. ReturnByteCount -= NextByteCount;
  1916. } else {
  1917. ReturnByteCount = 0;
  1918. }
  1919. IoRuns[*NumberRuns - 1].ByteCount += NextByteCount;
  1920. break;
  1921. }
  1922. }
  1923. IoRuns[*NumberRuns - 1].ByteCount += NextByteCount;
  1924. //
  1925. // Otherwise it is time to start a new run, if there is space for one.
  1926. //
  1927. } else {
  1928. //
  1929. // If we have filled up the current I/O runs array, then we
  1930. // will grow it once to a size which would allow the worst
  1931. // case compression unit (all noncontiguous clusters) to
  1932. // start at index NTFS_MAX_PARALLEL_IOS - 1.
  1933. // The following if statement enforces
  1934. // this case as the worst case. With 16 clusters per compression
  1935. // unit, the theoretical maximum number of parallel I/Os
  1936. // would be 16 + NTFS_MAX_PARALLEL_IOS - 1, since we stop on the
  1937. // first compression unit boundary after the penultimate run.
  1938. // Normally, of course we will do much fewer.
  1939. //
  1940. if ((*NumberRuns == NTFS_MAX_PARALLEL_IOS) &&
  1941. (CompressionContext->AllocatedRuns == NTFS_MAX_PARALLEL_IOS)) {
  1942. PIO_RUN NewIoRuns;
  1943. NewIoRuns = NtfsAllocatePool( NonPagedPool,
  1944. (CompressionUnitInClusters + NTFS_MAX_PARALLEL_IOS - 1) * sizeof(IO_RUN) );
  1945. RtlCopyMemory( NewIoRuns,
  1946. CompressionContext->IoRuns,
  1947. NTFS_MAX_PARALLEL_IOS * sizeof(IO_RUN) );
  1948. IoRuns = CompressionContext->IoRuns = NewIoRuns;
  1949. CompressionContext->AllocatedRuns = CompressionUnitInClusters + NTFS_MAX_PARALLEL_IOS - 1;
  1950. }
  1951. //
  1952. // We remember each piece of a parallel run by saving the
  1953. // essential information in the IoRuns array. The tranfers
  1954. // will be started up in parallel below.
  1955. //
  1956. ASSERT(*NumberRuns < CompressionContext->AllocatedRuns);
  1957. IoRuns[*NumberRuns].StartingVbo = StartVbo;
  1958. IoRuns[*NumberRuns].StartingLbo = NextLbo;
  1959. IoRuns[*NumberRuns].BufferOffset = BufferOffset;
  1960. IoRuns[*NumberRuns].ByteCount = NextByteCount;
  1961. if ((*NumberRuns + 1) < CompressionContext->AllocatedRuns) {
  1962. IoRuns[*NumberRuns + 1].ByteCount = 0;
  1963. }
  1964. //
  1965. // Stop on the first compression unit boundary after the
  1966. // penultimate run in the default array.
  1967. //
  1968. if (*NumberRuns >= NTFS_MAX_PARALLEL_IOS - 1) {
  1969. //
  1970. // First, if we are beyond penultimate run and we are starting
  1971. // a run in a different compression unit than the previous
  1972. // run, then we can just break out and not use the current
  1973. // run. (*NumberRuns has not yet been incremented.)
  1974. //
  1975. if ((*NumberRuns > NTFS_MAX_PARALLEL_IOS - 1) &&
  1976. ((((ULONG)StartVbo) & ~(CompressionUnit - 1)) !=
  1977. ((((ULONG)IoRuns[*NumberRuns - 1].StartingVbo) +
  1978. IoRuns[*NumberRuns - 1].ByteCount - 1) &
  1979. ~(CompressionUnit - 1)))) {
  1980. break;
  1981. //
  1982. // Else detect the case where this run ends on or
  1983. // crosses a compression unit boundary. In this case,
  1984. // just make sure the run stops on a compression unit
  1985. // boundary, and break out to return it.
  1986. //
  1987. } else if ((((ULONG)StartVbo) & ~(CompressionUnit - 1)) !=
  1988. ((((ULONG)StartVbo) + NextByteCount) & ~(CompressionUnit - 1))) {
  1989. NextByteCount -= (((ULONG)StartVbo) + NextByteCount) & (CompressionUnit - 1);
  1990. IoRuns[*NumberRuns].ByteCount = NextByteCount;
  1991. BytesInIoRuns += NextByteCount;
  1992. if (ReturnByteCount > NextByteCount) {
  1993. ReturnByteCount -= NextByteCount;
  1994. } else {
  1995. ReturnByteCount = 0;
  1996. }
  1997. *NumberRuns += 1;
  1998. break;
  1999. }
  2000. }
  2001. *NumberRuns += 1;
  2002. }
  2003. BytesInIoRuns += NextByteCount;
  2004. BufferOffset += NextByteCount;
  2005. }
  2006. //
  2007. // Now adjust everything for the next pass through the loop.
  2008. //
  2009. StartVbo += NextByteCount;
  2010. ByteCount -= NextByteCount;
  2011. if (ReturnByteCount > NextByteCount) {
  2012. ReturnByteCount -= NextByteCount;
  2013. } else {
  2014. ReturnByteCount = 0;
  2015. }
  2016. }
  2017. //
  2018. // Allocate the compressed buffer if it is not already allocated.
  2019. //
  2020. if (BytesInIoRuns < CompressionUnit) {
  2021. BytesInIoRuns = CompressionUnit;
  2022. }
  2023. NtfsAllocateCompressionBuffer( IrpContext, Scb, Irp, CompressionContext, &BytesInIoRuns );
  2024. return ReturnByteCount;
  2025. //
  2026. // Otherwise handle the compressed write case
  2027. //
  2028. } else {
  2029. LONGLONG SavedValidDataToDisk;
  2030. PUCHAR UncompressedBuffer;
  2031. ULONG UncompressedOffset;
  2032. ULONG ClusterOffset;
  2033. BOOLEAN NoopRange;
  2034. ULONG CompressedOffset;
  2035. PBCB Bcb;
  2036. ASSERT(IrpContext->MajorFunction == IRP_MJ_WRITE);
  2037. //
  2038. // Adjust StartVbo and ByteCount by the offset.
  2039. //
  2040. ((ULONG)StartVbo) -= CompressionUnitOffset;
  2041. ByteCount += CompressionUnitOffset;
  2042. //
  2043. // Maintain additional bytes to be returned in ReturnByteCount,
  2044. // and adjust this if we are larger than a LARGE_BUFFER_SIZE.
  2045. //
  2046. ReturnByteCount = 0;
  2047. if (ByteCount > LARGE_BUFFER_SIZE) {
  2048. ReturnByteCount = ByteCount - LARGE_BUFFER_SIZE;
  2049. ByteCount = LARGE_BUFFER_SIZE;
  2050. }
  2051. CompressedSize = ByteCount;
  2052. if (!FlagOn( StreamFlags, COMPRESSED_STREAM ) && (CompressionUnit != 0)) {
  2053. //
  2054. // To reduce pool consumption, make an educated/optimistic guess on
  2055. // how much pool we need to store the compressed data. If we are wrong
  2056. // we will just have to do some more I/O.
  2057. //
  2058. CompressedSize = BlockAlign( ByteCount, (LONG)CompressionUnit );
  2059. CompressedSize += Vcb->BytesPerCluster;
  2060. if (CompressedSize > LARGE_BUFFER_SIZE) {
  2061. CompressedSize = LARGE_BUFFER_SIZE;
  2062. }
  2063. //
  2064. // Allocate the compressed buffer if it is not already allocated, and this
  2065. // isn't the compressed stream.
  2066. //
  2067. if (SparseWrite &&
  2068. (CompressionContext->SystemBuffer == NULL)) {
  2069. CompressionContext->SystemBuffer = NtfsMapUserBuffer( Irp, NormalPagePriority );
  2070. }
  2071. //
  2072. // At this point BufferOffset should always be 0.
  2073. //
  2074. BufferOffset = 0;
  2075. NtfsAllocateCompressionBuffer( IrpContext, Scb, Irp, CompressionContext, &CompressedSize );
  2076. CompressionContext->DataTransformed = TRUE;
  2077. }
  2078. //
  2079. // Loop to compress the user's buffer.
  2080. //
  2081. CompressedOffset = 0;
  2082. UncompressedOffset = 0;
  2083. Bcb = NULL;
  2084. try {
  2085. BOOLEAN ChangeAllocation;
  2086. ULONG SparseFileBias;
  2087. //
  2088. // Loop as long as we will not overflow our compressed buffer, and we
  2089. // are also guanteed that we will not overflow the extended IoRuns array
  2090. // in the worst case (and as long as we have more write to satisfy!).
  2091. //
  2092. while ((ByteCount != 0) && (*NumberRuns <= NTFS_MAX_PARALLEL_IOS - 1) &&
  2093. (((CompressedOffset + CompressionUnit) <= CompressedSize) ||
  2094. FlagOn( StreamFlags, COMPRESSED_STREAM ))) {
  2095. LONGLONG SizeToCompress;
  2096. //
  2097. // State variables to determine a reallocate range.
  2098. //
  2099. VCN DeleteVcn;
  2100. LONGLONG DeleteCount;
  2101. LONGLONG AllocateCount;
  2102. DeleteCount = 0;
  2103. AllocateCount = 0;
  2104. NoopRange = FALSE;
  2105. SparseFileBias = 0;
  2106. ClusterOffset = 0;
  2107. //
  2108. // Assume we are only compressing to FileSize, or else
  2109. // reduce to one compression unit. The maximum compression size
  2110. // we can accept is saving at least one cluster.
  2111. //
  2112. NtfsAcquireFsrtlHeader( Scb );
  2113. //
  2114. // If this is a compressed stream then we may need to go past file size.
  2115. //
  2116. if (FlagOn( StreamFlags, COMPRESSED_STREAM)) {
  2117. SizeToCompress = BlockAlign( Scb->Header.FileSize.QuadPart, (LONG)CompressionUnit );
  2118. SizeToCompress -= StartVbo;
  2119. } else {
  2120. SizeToCompress = Scb->Header.FileSize.QuadPart - StartVbo;
  2121. }
  2122. NtfsReleaseFsrtlHeader( Scb );
  2123. //
  2124. // It is possible that if this is the lazy writer that the file
  2125. // size was rolled back from a cached write which is aborting.
  2126. // In that case we either truncate the write or can exit this
  2127. // loop if there is nothing left to write.
  2128. //
  2129. if (SizeToCompress <= 0) {
  2130. ByteCount = 0;
  2131. break;
  2132. }
  2133. //
  2134. // Note if CompressionUnit is 0, then we do not need SizeToCompress.
  2135. //
  2136. if (SizeToCompress > CompressionUnit) {
  2137. SizeToCompress = (LONGLONG)CompressionUnit;
  2138. }
  2139. #ifdef COMPRESS_ON_WIRE
  2140. //
  2141. // For the normal uncompressed stream, map the data and compress it
  2142. // into the allocated buffer.
  2143. //
  2144. if (!FlagOn( StreamFlags, COMPRESSED_STREAM )) {
  2145. #endif
  2146. //
  2147. // If this is a sparse write then we zero the beginning and
  2148. // end of the compression unit as needed and copy in the user
  2149. // data.
  2150. //
  2151. if (SparseWrite) {
  2152. //
  2153. // Use local variables to position ourselves in the
  2154. // compression context buffer and user system buffer.
  2155. // We'll reuse StructureSize to show the number of
  2156. // user bytes copied to the buffer.
  2157. //
  2158. SystemBuffer = Add2Ptr( CompressionContext->SystemBuffer,
  2159. CompressionContext->SystemBufferOffset + UncompressedOffset );
  2160. UncompressedBuffer = Add2Ptr( CompressionContext->CompressionBuffer,
  2161. BufferOffset );
  2162. //
  2163. // Zero the beginning of the compression buffer if necessary.
  2164. //
  2165. if (CompressionUnitOffset != 0) {
  2166. #ifdef SYSCACHE_DEBUG
  2167. if (ScbIsBeingLogged( Scb )) {
  2168. FsRtlLogSyscacheEvent( Scb, SCE_ZERO_NC, SCE_FLAG_NON_CACHED | SCE_FLAG_PREPARE_BUFFERS | SCE_FLAG_WRITE, StartVbo, CompressionUnitOffset, 0 );
  2169. }
  2170. #endif
  2171. RtlZeroMemory( UncompressedBuffer, CompressionUnitOffset );
  2172. UncompressedBuffer += CompressionUnitOffset;
  2173. }
  2174. //
  2175. // Now copy the user data into the buffer.
  2176. //
  2177. if ((ULONG) SizeToCompress < ByteCount) {
  2178. StructureSize = (ULONG) BlockAlign( SizeToCompress, (LONG)Vcb->BytesPerSector ) - CompressionUnitOffset;
  2179. } else {
  2180. StructureSize = ByteCount - CompressionUnitOffset;
  2181. }
  2182. RtlCopyMemory( UncompressedBuffer,
  2183. SystemBuffer,
  2184. StructureSize );
  2185. //
  2186. // It may be necessary to zero the end of the buffer.
  2187. //
  2188. if ((ULONG) SizeToCompress > ByteCount) {
  2189. #ifdef SYSCACHE_DEBUG
  2190. if (ScbIsBeingLogged( Scb )) {
  2191. FsRtlLogSyscacheEvent( Scb, SCE_ZERO_NC, SCE_FLAG_NON_CACHED | SCE_FLAG_PREPARE_BUFFERS | SCE_FLAG_WRITE, StartVbo + StructureSize, SizeToCompress - ByteCount, 1 );
  2192. }
  2193. #endif
  2194. RtlZeroMemory( Add2Ptr( UncompressedBuffer, StructureSize ),
  2195. (ULONG) SizeToCompress - ByteCount );
  2196. }
  2197. FinalCompressedSize = CompressionUnit;
  2198. Status = STATUS_SUCCESS;
  2199. } else {
  2200. UncompressedBuffer = NULL;
  2201. if (CompressionUnit != 0) {
  2202. //
  2203. // Map the aligned range, set it dirty, and flush. We have to
  2204. // loop, because the Cache Manager limits how much and over what
  2205. // boundaries we can map. Only do this if there a file
  2206. // object. Otherwise we will assume we are writing the
  2207. // clusters directly to disk (via NtfsWriteClusters).
  2208. //
  2209. if (Scb->FileObject != NULL) {
  2210. CcMapData( Scb->FileObject,
  2211. (PLARGE_INTEGER)&StartVbo,
  2212. (ULONG)SizeToCompress,
  2213. TRUE,
  2214. &Bcb,
  2215. &UncompressedBuffer );
  2216. #ifdef MAPCOUNT_DBG
  2217. IrpContext->MapCount += 1;
  2218. #endif
  2219. } else {
  2220. UncompressedBuffer = MmGetSystemAddressForMdlSafe( CompressionContext->SavedMdl, NormalPagePriority );
  2221. if (UncompressedBuffer == NULL) {
  2222. NtfsRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES, NULL, NULL );
  2223. }
  2224. }
  2225. //
  2226. // If we have not already allocated the workspace, then do it. We don't
  2227. // need the workspace if the file is not compressed (i.e. sparse).
  2228. //
  2229. if ((CompressionContext->WorkSpace == NULL) &&
  2230. FlagOn( Scb->AttributeFlags, ATTRIBUTE_FLAG_COMPRESSION_MASK )) {
  2231. ULONG CompressWorkSpaceSize;
  2232. ULONG FragmentWorkSpaceSize;
  2233. (VOID) RtlGetCompressionWorkSpaceSize( (USHORT)((Scb->AttributeFlags & ATTRIBUTE_FLAG_COMPRESSION_MASK) + 1),
  2234. &CompressWorkSpaceSize,
  2235. &FragmentWorkSpaceSize );
  2236. //
  2237. // It is critical to ask for the work space buffer. It is the only
  2238. // one large enough to hold the bigger ia64 pointers.
  2239. //
  2240. NtfsCreateMdlAndBuffer( IrpContext,
  2241. Scb,
  2242. RESERVED_BUFFER_WORKSPACE_NEEDED,
  2243. &CompressWorkSpaceSize,
  2244. NULL,
  2245. &CompressionContext->WorkSpace );
  2246. }
  2247. }
  2248. try {
  2249. //
  2250. // If we are moving an uncompressed file, then do not compress
  2251. //
  2252. if (CompressionUnit == 0) {
  2253. FinalCompressedSize = ByteCount;
  2254. Status = STATUS_SUCCESS;
  2255. //
  2256. // If we are writing compressed, compress it now.
  2257. //
  2258. } else if (!FlagOn(Scb->ScbState, SCB_STATE_WRITE_COMPRESSED) ||
  2259. ((Status =
  2260. RtlCompressBuffer( (USHORT)((Scb->AttributeFlags & ATTRIBUTE_FLAG_COMPRESSION_MASK) + 1),
  2261. UncompressedBuffer,
  2262. (ULONG)SizeToCompress,
  2263. CompressionContext->CompressionBuffer + CompressedOffset,
  2264. (CompressionUnit - Vcb->BytesPerCluster),
  2265. NTFS_CHUNK_SIZE,
  2266. &FinalCompressedSize,
  2267. CompressionContext->WorkSpace )) ==
  2268. STATUS_BUFFER_TOO_SMALL)) {
  2269. //
  2270. // If it did not compress, just copy it over, sigh. This looks bad,
  2271. // but it should virtually never occur assuming compression is working
  2272. // ok. In the case where FileSize is in this unit, make sure we
  2273. // at least copy to a sector boundary.
  2274. //
  2275. FinalCompressedSize = CompressionUnit;
  2276. if (!SparseWrite) {
  2277. RtlCopyMemory( CompressionContext->CompressionBuffer + CompressedOffset,
  2278. UncompressedBuffer,
  2279. (ULONG)BlockAlign( SizeToCompress, (LONG)Vcb->BytesPerSector));
  2280. }
  2281. ASSERT(FinalCompressedSize <= (CompressedSize - CompressedOffset));
  2282. Status = STATUS_SUCCESS;
  2283. }
  2284. //
  2285. // Probably Gary's compression routine faulted, but blame it on
  2286. // the user buffer!
  2287. //
  2288. } except(NtfsCompressionFilter(IrpContext, GetExceptionInformation())) {
  2289. NtfsRaiseStatus( IrpContext, STATUS_INVALID_USER_BUFFER, NULL, NULL );
  2290. }
  2291. }
  2292. //
  2293. // For the compressed stream, we need to scan the compressed data
  2294. // to see how much we actually have to write.
  2295. //
  2296. #ifdef COMPRESS_ON_WIRE
  2297. } else {
  2298. //
  2299. // Don't walk off the end of the data being written, because that
  2300. // would cause bogus faults in the compressed stream.
  2301. //
  2302. if (SizeToCompress > ByteCount) {
  2303. SizeToCompress = ByteCount;
  2304. }
  2305. //
  2306. // Map the compressed data.
  2307. //
  2308. CcMapData( Scb->Header.FileObjectC,
  2309. (PLARGE_INTEGER)&StartVbo,
  2310. (ULONG)SizeToCompress,
  2311. TRUE,
  2312. &Bcb,
  2313. &UncompressedBuffer );
  2314. #ifdef MAPCOUNT_DBG
  2315. IrpContext->MapCount++;
  2316. #endif
  2317. FinalCompressedSize = 0;
  2318. //
  2319. // Loop until we get an error or stop advancing.
  2320. //
  2321. RangePtr = UncompressedBuffer + CompressionUnit;
  2322. do {
  2323. Status = RtlDescribeChunk( (USHORT)((Scb->AttributeFlags & ATTRIBUTE_FLAG_COMPRESSION_MASK) + 1),
  2324. &UncompressedBuffer,
  2325. (PUCHAR)RangePtr,
  2326. (PUCHAR *)&SystemBuffer,
  2327. &CompressedSize );
  2328. //
  2329. // Remember if we see any nonzero chunks
  2330. //
  2331. FinalCompressedSize |= CompressedSize;
  2332. } while (NT_SUCCESS(Status));
  2333. //
  2334. // If we terminated on anything but STATUS_NO_MORE_ENTRIES, we
  2335. // somehow picked up some bad data.
  2336. //
  2337. if (Status != STATUS_NO_MORE_ENTRIES) {
  2338. ASSERT(Status == STATUS_NO_MORE_ENTRIES);
  2339. NtfsRaiseStatus( IrpContext, Status, NULL, NULL );
  2340. }
  2341. Status = STATUS_SUCCESS;
  2342. //
  2343. // If we got any nonzero chunks, then calculate size of buffer to write.
  2344. // (Size does not include terminating Ushort of 0.)
  2345. //
  2346. if (FinalCompressedSize != 0) {
  2347. FinalCompressedSize = BlockAlignTruncate( (ULONG_PTR)UncompressedBuffer, (ULONG)CompressionUnit );
  2348. //
  2349. // If the Lazy Writer is writing beyond the end of the compression
  2350. // unit (there are dirty pages at the end of the compression unit)
  2351. // then we can throw this data away.
  2352. //
  2353. if (FinalCompressedSize < CompressionUnitOffset) {
  2354. //
  2355. // Set up to move to the next compression unit.
  2356. //
  2357. NoopRange = TRUE;
  2358. ChangeAllocation = FALSE;
  2359. //
  2360. // Set TempVbo to the compression unit offset. The
  2361. // number of bytes to skip over is the remaining
  2362. // bytes in a compression unit.
  2363. //
  2364. TempVbo = CompressionUnitOffset;
  2365. //
  2366. // If the Lazy Writer does not have the beginning of the compression
  2367. // unit then raise out of here and wait for the write which includes
  2368. // the beginning.
  2369. //
  2370. } else if (CompressionUnitOffset != 0) {
  2371. #if defined(COMPRESS_ON_WIRE) && defined(NTFS_RWC_DEBUG)
  2372. ASSERT( !NtfsBreakOnConflict ||
  2373. (Scb->LazyWriteThread[1] == PsGetCurrentThread()) );
  2374. #endif
  2375. NtfsRaiseStatus( IrpContext, STATUS_FILE_LOCK_CONFLICT, NULL, NULL );
  2376. //
  2377. // If we saw more chunks than our writer is trying to write (it
  2378. // more or less has to be the Lazy Writer), then we need to reject
  2379. // this request and assume he will come back later for the entire
  2380. // amount. This could be a problem for WRITE_THROUGH.
  2381. //
  2382. } else if (FinalCompressedSize > ByteCount) {
  2383. #ifdef NTFS_RWC_DEBUG
  2384. ASSERT( !NtfsBreakOnConflict ||
  2385. (Scb->LazyWriteThread[1] == PsGetCurrentThread()) );
  2386. ASSERT( Scb->LazyWriteThread[1] == PsGetCurrentThread() );
  2387. #endif
  2388. NtfsRaiseStatus( IrpContext, STATUS_FILE_LOCK_CONFLICT, NULL, NULL );
  2389. }
  2390. }
  2391. }
  2392. #endif
  2393. NtfsUnpinBcb( IrpContext, &Bcb );
  2394. //
  2395. // Round the FinalCompressedSize up to a cluster boundary now.
  2396. //
  2397. FinalCompressedSize = (FinalCompressedSize + Vcb->BytesPerCluster - 1) &
  2398. ~(Vcb->BytesPerCluster - 1);
  2399. //
  2400. // If the Status was not success, then we have to do something.
  2401. //
  2402. if (Status != STATUS_SUCCESS) {
  2403. //
  2404. // If it was actually an error, then we will raise out of
  2405. // here.
  2406. //
  2407. if (!NT_SUCCESS(Status)) {
  2408. NtfsRaiseStatus( IrpContext, Status, NULL, NULL );
  2409. //
  2410. // If the buffer compressed to all zeros, then we will
  2411. // not allocate anything.
  2412. //
  2413. } else if (Status == STATUS_BUFFER_ALL_ZEROS) {
  2414. FinalCompressedSize = 0;
  2415. }
  2416. }
  2417. if (!NoopRange) {
  2418. StartingVcn = LlClustersFromBytesTruncate( Vcb, StartVbo );
  2419. //
  2420. // Time to get the Scb if we do not have it already. We
  2421. // need to serialize our changes of the Mcb.
  2422. // N.B. -- We may _not_ always be the top level request.
  2423. // Converting a compressed stream to nonresident can
  2424. // send us down this path with Irp != OriginatingIrp.
  2425. //
  2426. if (!CompressionContext->ScbAcquired) {
  2427. NtfsPurgeFileRecordCache( IrpContext );
  2428. NtfsAcquireExclusiveScb( IrpContext, Scb );
  2429. CompressionContext->ScbAcquired = TRUE;
  2430. }
  2431. NextIsAllocated = NtfsLookupAllocation( IrpContext,
  2432. Scb,
  2433. StartingVcn,
  2434. &NextLcn,
  2435. &NextClusterCount,
  2436. NULL,
  2437. NULL );
  2438. //
  2439. // If this originally was a sparse write but we were defragging
  2440. // then we need to be careful if the range is unallocated. In
  2441. // that case we really need to do the full sparse support. Break
  2442. // out of the loop at this point and perform the IO with
  2443. // the ranges we already have.
  2444. //
  2445. if (!NextIsAllocated && OriginalSparseWrite && !SparseWrite) {
  2446. break;
  2447. }
  2448. //
  2449. // If the StartingVcn is allocated, we always have to check
  2450. // if we need to delete something, or if in the unusual case
  2451. // there is a hole there smaller than a compression unit.
  2452. //
  2453. // If this is a sparse write then we never have anything to
  2454. // deallocate.
  2455. //
  2456. FinalCompressedClusters = ClustersFromBytes( Vcb, FinalCompressedSize );
  2457. ChangeAllocation = FALSE;
  2458. if (SparseWrite) {
  2459. //
  2460. // It is possible that the compression unit has been allocated since we
  2461. // tested allocation when we entered this routine. If so we can
  2462. // write directly to disk in the allocated range. We need to
  2463. // modify the range being written however.
  2464. //
  2465. if (NextIsAllocated) {
  2466. //
  2467. // Move forward to the beginning of this write.
  2468. //
  2469. SparseFileBias = CompressionUnitOffset;
  2470. ((ULONG) StartVbo) += CompressionUnitOffset;
  2471. CompressedOffset += CompressionUnitOffset;
  2472. BufferOffset += CompressionUnitOffset;
  2473. FinalCompressedSize -= CompressionUnitOffset;
  2474. if (FinalCompressedSize > (ByteCount - CompressionUnitOffset)) {
  2475. FinalCompressedSize = (ByteCount - CompressionUnitOffset);
  2476. }
  2477. StartingVcn = LlClustersFromBytesTruncate( Vcb, StartVbo );
  2478. //
  2479. // Remember that we might not be on a cluster boundary at this point.
  2480. //
  2481. ClusterOffset = (ULONG) StartVbo & Vcb->ClusterMask;
  2482. //
  2483. // Look up the correct range on the disk.
  2484. //
  2485. NextIsAllocated = NtfsLookupAllocation( IrpContext,
  2486. Scb,
  2487. StartingVcn,
  2488. &NextLcn,
  2489. &NextClusterCount,
  2490. NULL,
  2491. NULL );
  2492. ASSERT( NextIsAllocated );
  2493. } else {
  2494. //
  2495. // Set the Scb flag to indicate we need to serialize non-cached IO
  2496. // with the Mcb.
  2497. //
  2498. SetFlag( Scb->ScbState, SCB_STATE_PROTECT_SPARSE_MCB );
  2499. }
  2500. } else if (NextIsAllocated || (NextClusterCount < CompressionUnitInClusters)) {
  2501. VCN TempClusterCount;
  2502. //
  2503. // If we need fewer clusters than allocated, then just allocate them.
  2504. // But if we need more clusters, then deallocate all the ones we have
  2505. // now, otherwise we could corrupt file data if we back out a write
  2506. // after actually having written the sectors. (For example, we could
  2507. // extend from 5 to 6 clusters and write 6 clusters of compressed data.
  2508. // If we have to back that out we will have a 6-cluster pattern of
  2509. // compressed data with one sector deallocated!).
  2510. //
  2511. NextIsAllocated = NextIsAllocated &&
  2512. (NextClusterCount >= FinalCompressedClusters);
  2513. //
  2514. // If we are cleaning up a hole, or the next run is unuseable,
  2515. // then make sure we just delete it rather than sliding the
  2516. // tiny run up with SplitMcb. Note that we have the Scb exclusive,
  2517. // and that since all compressed files go through the cache, we
  2518. // know that the dirty pages can't go away even if we spin out
  2519. // of here with ValidDataToDisk bumped up too high.
  2520. //
  2521. SavedValidDataToDisk = Scb->ValidDataToDisk;
  2522. if (!NextIsAllocated && ((StartVbo + CompressionUnit) > Scb->ValidDataToDisk)) {
  2523. Scb->ValidDataToDisk = StartVbo + CompressionUnit;
  2524. }
  2525. //
  2526. // Also, we need to handle the case where a range within
  2527. // ValidDataToDisk is fully allocated. If we are going to compress
  2528. // now, then we have the same problem with failing after writing
  2529. // the compressed data out, i.e., because we are fully allocated
  2530. // we would see the data as uncompressed after an abort, yet we
  2531. // have written compressed data. We do not implement the entire
  2532. // loop necessary to really see if the compression unit is fully
  2533. // allocated - we just verify that NextClusterCount is less than
  2534. // a compression unit and that the next run is not allocated. Just
  2535. // because the next contiguous run is also allocated does not guarantee
  2536. // that the compression unit is fully allocated, but maybe we will
  2537. // get some small defrag gain by reallocating what we need in a
  2538. // single run.
  2539. //
  2540. NextIsAllocated = NextIsAllocated &&
  2541. ((StartVbo >= Scb->ValidDataToDisk) ||
  2542. (FinalCompressedClusters == CompressionUnitInClusters) ||
  2543. ((NextClusterCount < CompressionUnitInClusters) &&
  2544. (!NtfsLookupAllocation( IrpContext,
  2545. Scb,
  2546. StartingVcn + NextClusterCount,
  2547. &NextLbo,
  2548. &TempClusterCount,
  2549. NULL,
  2550. NULL ) ||
  2551. (NextLbo != UNUSED_LCN))));
  2552. //
  2553. // If we are not keeping any allocation, or we need less
  2554. // than a compression unit, then call NtfsDeleteAllocation.
  2555. //
  2556. if (!NextIsAllocated ||
  2557. (FinalCompressedClusters < CompressionUnitInClusters)) {
  2558. //
  2559. // Skip this explicit delete if we are rewriting within
  2560. // ValidDataToDisk. We know we won't be doing a SplitMcb.
  2561. //
  2562. DeleteVcn = StartingVcn;
  2563. if (NextIsAllocated) {
  2564. DeleteVcn += FinalCompressedClusters;
  2565. }
  2566. DeleteCount = CompressionUnit;
  2567. if (CompressionUnit == 0) {
  2568. DeleteCount = ByteCount;
  2569. }
  2570. DeleteCount = LlClustersFromBytes( Vcb, DeleteCount );
  2571. //
  2572. // Take the explicit DeleteAllocation path if there is a chance
  2573. // we might do a SplitMcb. This is true for a compressed write
  2574. // which extends into a new compression unit.
  2575. //
  2576. if ((CompressionUnit != 0) &&
  2577. ((StartingVcn + DeleteCount) >
  2578. LlClustersFromBytesTruncate( Vcb,
  2579. ((Scb->ValidDataToDisk + CompressionUnit - 1) &
  2580. ~((LONGLONG) (CompressionUnit - 1))) ))) {
  2581. NtfsDeleteAllocation( IrpContext,
  2582. IrpSp->FileObject,
  2583. Scb,
  2584. DeleteVcn,
  2585. StartingVcn + DeleteCount - 1,
  2586. TRUE,
  2587. FALSE );
  2588. //
  2589. // Set the DeleteCount to 0 so we know there is no other deallocate
  2590. // to do.
  2591. //
  2592. DeleteCount = 0;
  2593. //
  2594. // Bias the DeleteCount by the number of clusters into the compression
  2595. // unit we are beginning.
  2596. //
  2597. } else {
  2598. DeleteCount -= (DeleteVcn - StartingVcn);
  2599. ASSERT( DeleteCount >= 0 );
  2600. }
  2601. ChangeAllocation = TRUE;
  2602. }
  2603. Scb->ValidDataToDisk = SavedValidDataToDisk;
  2604. }
  2605. //
  2606. // Now deal with the case where we do need to allocate space.
  2607. //
  2608. TempVbo = StartVbo;
  2609. if (FinalCompressedSize != 0) {
  2610. //
  2611. // If this compression unit is not (sufficiently) allocated, then
  2612. // do it now.
  2613. //
  2614. if (!NextIsAllocated ||
  2615. ((NextClusterCount < FinalCompressedClusters) && !SparseWrite)) {
  2616. AllocateCount = FinalCompressedClusters;
  2617. } else {
  2618. AllocateCount = 0;
  2619. }
  2620. //
  2621. // Now call our reallocate routine to do the work.
  2622. //
  2623. if ((DeleteCount != 0) || (AllocateCount != 0)) {
  2624. #ifdef SYSCACHE_DEBUG
  2625. if (ScbIsBeingLogged( Scb )) {
  2626. FsRtlLogSyscacheEvent( Scb, SCE_ADD_ALLOCATION, SCE_FLAG_PREPARE_BUFFERS, StartingVcn, AllocateCount, DeleteCount );
  2627. }
  2628. #endif
  2629. NtfsReallocateRange( IrpContext,
  2630. Scb,
  2631. DeleteVcn,
  2632. DeleteCount,
  2633. StartingVcn,
  2634. AllocateCount,
  2635. NULL );
  2636. ChangeAllocation = TRUE;
  2637. }
  2638. //
  2639. // If we added space, something may have moved, so we must
  2640. // look up our position and get a new index. Also relookup
  2641. // to get a rangeptr and index
  2642. //
  2643. NtfsLookupAllocation( IrpContext,
  2644. Scb,
  2645. StartingVcn,
  2646. &NextLcn,
  2647. &NextClusterCount,
  2648. &RangePtr,
  2649. &Index );
  2650. //
  2651. // Now loop to update the IoRuns array.
  2652. //
  2653. CompressedOffset += FinalCompressedSize;
  2654. while (FinalCompressedSize != 0) {
  2655. LONGLONG RunOffset;
  2656. //
  2657. // Get the actual number of clusters being written.
  2658. //
  2659. FinalCompressedClusters = ClustersFromBytes( Vcb, FinalCompressedSize );
  2660. //
  2661. // Try to lookup the first run. If there is just a single run,
  2662. // we may just be able to pass it on. Index into the Mcb directly
  2663. // for greater speed.
  2664. //
  2665. NextIsAllocated = NtfsGetSequentialMcbEntry( &Scb->Mcb,
  2666. &RangePtr,
  2667. Index,
  2668. &StartingVcn,
  2669. &NextLcn,
  2670. &NextClusterCount );
  2671. //
  2672. // It is possible that we could walk across an Mcb boundary and the
  2673. // following entry isn't loaded. In that case we want to look the
  2674. // up the allocation specifically to force the Mcb load.
  2675. //
  2676. if (Index == MAXULONG) {
  2677. //
  2678. // A failure on NtfsGetSequentialMcbEntry above will modify StartingVcn.
  2679. // Recalculate here based on TempVbo.
  2680. //
  2681. StartingVcn = LlClustersFromBytesTruncate( Vcb, TempVbo );
  2682. NextIsAllocated = NtfsLookupAllocation( IrpContext,
  2683. Scb,
  2684. StartingVcn,
  2685. &NextLcn,
  2686. &NextClusterCount,
  2687. &RangePtr,
  2688. &Index );
  2689. ASSERT( NextIsAllocated );
  2690. NextIsAllocated = NtfsGetSequentialMcbEntry( &Scb->Mcb,
  2691. &RangePtr,
  2692. Index,
  2693. &StartingVcn,
  2694. &NextLcn,
  2695. &NextClusterCount );
  2696. }
  2697. Index += 1;
  2698. ASSERT(NextIsAllocated);
  2699. ASSERT(NextLcn != UNUSED_LCN);
  2700. //
  2701. // Our desired Vcn could be in the middle of this run, so do
  2702. // some adjustments.
  2703. //
  2704. RunOffset = Int64ShraMod32(TempVbo, Vcb->ClusterShift) - StartingVcn;
  2705. ASSERT( ((PLARGE_INTEGER)&RunOffset)->HighPart >= 0 );
  2706. ASSERT( NextClusterCount > RunOffset );
  2707. NextLcn = NextLcn + RunOffset;
  2708. NextClusterCount = NextClusterCount - RunOffset;
  2709. //
  2710. // Adjust from NextLcn to Lbo. NextByteCount may overflow out of 32 bits
  2711. // but we will catch that below when we compare clusters.
  2712. //
  2713. NextLbo = LlBytesFromClusters( Vcb, NextLcn ) + ClusterOffset;
  2714. NextByteCount = BytesFromClusters( Vcb, (ULONG)NextClusterCount );
  2715. //
  2716. // If next run is larger than we need, "ya get what you need".
  2717. // Note that after this we are guaranteed that the HighPart of
  2718. // NextByteCount is 0.
  2719. //
  2720. if (NextClusterCount >= FinalCompressedClusters) {
  2721. NextByteCount = FinalCompressedSize;
  2722. }
  2723. //
  2724. // If the Lbos are contiguous, then we can do a contiguous
  2725. // transfer, so we just increase the current byte count.
  2726. // For compressed streams, note however that the BufferOffset
  2727. // may not be contiguous!
  2728. //
  2729. if ((*NumberRuns != 0) &&
  2730. (NextLbo == (IoRuns[*NumberRuns - 1].StartingLbo +
  2731. IoRuns[*NumberRuns - 1].ByteCount)) &&
  2732. (BufferOffset == (IoRuns[*NumberRuns - 1].BufferOffset +
  2733. IoRuns[*NumberRuns - 1].ByteCount))) {
  2734. IoRuns[*NumberRuns - 1].ByteCount += NextByteCount;
  2735. //
  2736. // Otherwise it is time to start a new run, if there is space for one.
  2737. //
  2738. } else {
  2739. //
  2740. // If we have filled up the current I/O runs array, then we
  2741. // will grow it once to a size which would allow the worst
  2742. // case compression unit (all noncontiguous clusters) to
  2743. // start at the penultimate index. The following if
  2744. // statement enforces this case as the worst case. With 16
  2745. // clusters per compression unit, the theoretical maximum
  2746. // number of parallel I/Os would be 16 + NTFS_MAX_PARALLEL_IOS - 1,
  2747. // since we stop on the first compression unit
  2748. // boundary after the penultimate run. Normally, of course we
  2749. // will do much fewer.
  2750. //
  2751. if ((*NumberRuns == NTFS_MAX_PARALLEL_IOS) &&
  2752. (CompressionContext->AllocatedRuns == NTFS_MAX_PARALLEL_IOS)) {
  2753. PIO_RUN NewIoRuns;
  2754. NewIoRuns = NtfsAllocatePool( NonPagedPool,
  2755. (CompressionUnitInClusters + NTFS_MAX_PARALLEL_IOS - 1) * sizeof(IO_RUN) );
  2756. RtlCopyMemory( NewIoRuns,
  2757. CompressionContext->IoRuns,
  2758. NTFS_MAX_PARALLEL_IOS * sizeof(IO_RUN) );
  2759. IoRuns = CompressionContext->IoRuns = NewIoRuns;
  2760. CompressionContext->AllocatedRuns = CompressionUnitInClusters + NTFS_MAX_PARALLEL_IOS - 1;
  2761. }
  2762. //
  2763. // We remember each piece of a parallel run by saving the
  2764. // essential information in the IoRuns array. The tranfers
  2765. // will be started up in parallel below.
  2766. //
  2767. IoRuns[*NumberRuns].StartingVbo = TempVbo;
  2768. IoRuns[*NumberRuns].StartingLbo = NextLbo;
  2769. IoRuns[*NumberRuns].BufferOffset = BufferOffset;
  2770. IoRuns[*NumberRuns].ByteCount = NextByteCount;
  2771. *NumberRuns += 1;
  2772. }
  2773. //
  2774. // Now adjust everything for the next pass through the loop.
  2775. //
  2776. BufferOffset += NextByteCount;
  2777. TempVbo = TempVbo + NextByteCount;
  2778. FinalCompressedSize -= NextByteCount;
  2779. ClusterOffset = 0;
  2780. }
  2781. } else if (DeleteCount != 0) {
  2782. //
  2783. // Call our reallocate routine.
  2784. //
  2785. NtfsReallocateRange( IrpContext,
  2786. Scb,
  2787. DeleteVcn,
  2788. DeleteCount,
  2789. 0,
  2790. 0,
  2791. NULL );
  2792. ChangeAllocation = TRUE;
  2793. }
  2794. }
  2795. //
  2796. // For the compressed stream, we need to advance the buffer offset to the
  2797. // end of a compression unit, so that if adjacent compression units are
  2798. // being written, we correctly advance over the unused clusters in the
  2799. // compressed stream.
  2800. //
  2801. if (FlagOn(StreamFlags, COMPRESSED_STREAM)) {
  2802. BufferOffset += CompressionUnit - (ULONG)(TempVbo & (CompressionUnit - 1));
  2803. }
  2804. //
  2805. // If this is the unnamed data stream then we need to update
  2806. // the total allocated size.
  2807. //
  2808. if (ChangeAllocation &&
  2809. FlagOn( Scb->ScbState, SCB_STATE_UNNAMED_DATA ) &&
  2810. (Scb->Fcb->Info.AllocatedLength != Scb->TotalAllocated)) {
  2811. Scb->Fcb->Info.AllocatedLength = Scb->TotalAllocated;
  2812. SetFlag( Scb->Fcb->InfoFlags, FCB_INFO_CHANGED_ALLOC_SIZE );
  2813. }
  2814. UncompressedOffset += CompressionUnit - CompressionUnitOffset;
  2815. //
  2816. // Now reduce the byte counts by the compression unit we just
  2817. // transferred.
  2818. //
  2819. if ((CompressionUnit != 0) && (ByteCount > CompressionUnit)) {
  2820. StartVbo += (CompressionUnit - SparseFileBias);
  2821. ByteCount -= CompressionUnit;
  2822. } else {
  2823. StartVbo += (ByteCount - SparseFileBias);
  2824. ByteCount = 0;
  2825. leave;
  2826. }
  2827. CompressionUnitOffset = 0;
  2828. }
  2829. } finally {
  2830. NtfsUnpinBcb( IrpContext, &Bcb );
  2831. }
  2832. //
  2833. // See if we need to advance ValidDataToDisk.
  2834. //
  2835. if (FlagOn( Scb->AttributeFlags, ATTRIBUTE_FLAG_COMPRESSION_MASK ) &&
  2836. (StartVbo > Scb->ValidDataToDisk)) {
  2837. ASSERT( (Scb->ScbSnapshot != NULL) && (Scb->ScbSnapshot->ValidDataToDisk == Scb->ValidDataToDisk) );
  2838. Scb->ValidDataToDisk = StartVbo;
  2839. }
  2840. return ByteCount + ReturnByteCount;
  2841. }
  2842. }
  2843. //
  2844. // Internal support routine
  2845. //
  2846. NTSTATUS
  2847. NtfsFinishBuffers (
  2848. IN PIRP_CONTEXT IrpContext,
  2849. IN PIRP Irp,
  2850. IN PSCB Scb,
  2851. IN PVBO StartingVbo,
  2852. IN ULONG ByteCount,
  2853. IN ULONG NumberRuns,
  2854. IN PCOMPRESSION_CONTEXT CompressionContext,
  2855. IN ULONG StreamFlags
  2856. )
  2857. /*++
  2858. Routine Description:
  2859. This routine performs post processing for noncached transfers of
  2860. compressed or encrypted data. For reads, the decompression actually takes
  2861. place here. For reads and writes, all necessary cleanup operations are
  2862. performed.
  2863. Arguments:
  2864. IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
  2865. Irp - Supplies the requesting Irp.
  2866. Scb - Supplies the stream file to act on.
  2867. StartingVbo - The starting point for the operation.
  2868. ByteCount - The lengh of the operation.
  2869. CompressionContext - Supplies information related to the compression
  2870. filled in by NtfsPrepareBuffers.
  2871. StreamFlags - Supplies either 0 or some combination of COMPRESSED_STREAM
  2872. and ENCRYPTED_STREAM
  2873. Return Value:
  2874. Status from the operation
  2875. --*/
  2876. {
  2877. VCN CurrentVcn, NextVcn, BeyondLastVcn;
  2878. LCN NextLcn;
  2879. ULONG Run;
  2880. ULONG NextByteCount;
  2881. LONGLONG NextClusterCount;
  2882. LARGE_INTEGER OffsetWithinFile;
  2883. BOOLEAN NextIsAllocated;
  2884. BOOLEAN AlreadyFilled;
  2885. PVOID SystemBuffer = NULL;
  2886. ULONG CompressionUnit, CompressionUnitInClusters;
  2887. ULONG StartingOffset, UncompressedOffset, CompressedOffset;
  2888. ULONG CompressedSize;
  2889. LONGLONG UncompressedSize;
  2890. LONGLONG CurrentAllocatedClusterCount;
  2891. NTSTATUS Status = STATUS_SUCCESS;
  2892. PVCB Vcb = Scb->Vcb;
  2893. PAGED_CODE();
  2894. //
  2895. // If this is a normal termination of a read, then let's give him the
  2896. // data...
  2897. //
  2898. ASSERT( (Scb->CompressionUnit != 0) ||
  2899. (Scb->EncryptionContext != NULL) ||
  2900. FlagOn( StreamFlags, COMPRESSED_STREAM ) );
  2901. //
  2902. // We never want to be here if this is the read raw encrypted data case.
  2903. //
  2904. ASSERT( !FlagOn( StreamFlags, ENCRYPTED_STREAM ) );
  2905. if (IrpContext->MajorFunction == IRP_MJ_READ) {
  2906. //
  2907. // If there is an encryption context then transform the data.
  2908. //
  2909. if ((Scb->EncryptionContext != NULL) &&
  2910. (NtfsData.EncryptionCallBackTable.AfterReadProcess != NULL)) {
  2911. ASSERT ( NtfsIsTypeCodeEncryptible( Scb->AttributeTypeCode ) );
  2912. //
  2913. // If the compression context has a buffer then we will use that.
  2914. //
  2915. if (CompressionContext->CompressionBuffer != NULL) {
  2916. SystemBuffer = CompressionContext->CompressionBuffer;
  2917. } else {
  2918. SystemBuffer = NtfsMapUserBuffer( Irp, NormalPagePriority );
  2919. }
  2920. //
  2921. // Now look at each run of real data heading coming from the disk and
  2922. // let the encryption driver decrypt it.
  2923. //
  2924. for ( Run = 0; Run < NumberRuns; Run++ ) {
  2925. OffsetWithinFile.QuadPart = CompressionContext->IoRuns[Run].StartingVbo;
  2926. Status = NtfsData.EncryptionCallBackTable.AfterReadProcess(
  2927. Add2Ptr(SystemBuffer, CompressionContext->IoRuns[Run].BufferOffset),
  2928. &OffsetWithinFile,
  2929. CompressionContext->IoRuns[Run].ByteCount,
  2930. Scb->EncryptionContext);
  2931. if (!NT_SUCCESS( Status )) {
  2932. return Status;
  2933. }
  2934. }
  2935. if (!NT_SUCCESS( Status )) {
  2936. return Status;
  2937. }
  2938. }
  2939. //
  2940. // There may be a compression unit but there is no completion to do
  2941. // i.e this is an uncompressed sparse file.
  2942. // We might be operating on an encrypted file as well.
  2943. // In either case just exit if the file is not compressed.
  2944. //
  2945. if (!FlagOn( Scb->AttributeFlags, ATTRIBUTE_FLAG_COMPRESSION_MASK )) {
  2946. if (SystemBuffer != NULL) {
  2947. KeFlushIoBuffers( Irp->MdlAddress, TRUE, FALSE );
  2948. }
  2949. return STATUS_SUCCESS;
  2950. }
  2951. ASSERT( Scb->CompressionUnit != 0 );
  2952. if (!FlagOn( StreamFlags, COMPRESSED_STREAM )) {
  2953. //
  2954. // Initialize remaining context for the loop.
  2955. //
  2956. CompressionUnit = Scb->CompressionUnit;
  2957. CompressionUnitInClusters = ClustersFromBytes(Vcb, CompressionUnit);
  2958. CompressedOffset = 0;
  2959. UncompressedOffset = 0;
  2960. Status = STATUS_SUCCESS;
  2961. //
  2962. // Map the user buffer.
  2963. //
  2964. SystemBuffer = (PVOID)((PCHAR)CompressionContext->SystemBuffer +
  2965. CompressionContext->SystemBufferOffset);
  2966. //
  2967. // Calculate the first Vcn and offset within the compression
  2968. // unit of the start of the transfer, and lookup the first
  2969. // run.
  2970. //
  2971. StartingOffset = *((PULONG)StartingVbo) & (CompressionUnit - 1);
  2972. CurrentVcn = LlClustersFromBytes(Vcb, *StartingVbo - StartingOffset);
  2973. NextIsAllocated =
  2974. NtfsLookupAllocation( IrpContext,
  2975. Scb,
  2976. CurrentVcn,
  2977. &NextLcn,
  2978. &CurrentAllocatedClusterCount,
  2979. NULL,
  2980. NULL );
  2981. //
  2982. // Set NextIsAllocated and NextLcn as the Mcb package would, to show if
  2983. // we are off the end.
  2984. //
  2985. if (!NextIsAllocated) {
  2986. NextLcn = UNUSED_LCN;
  2987. }
  2988. NextIsAllocated = (BOOLEAN)(CurrentAllocatedClusterCount < (MAXLONGLONG - CurrentVcn));
  2989. //
  2990. // If this is actually a hole or there was no entry in the Mcb, then
  2991. // set CurrentAllocatedClusterCount to zero so we will always make the first
  2992. // pass in the embedded while loop below.
  2993. //
  2994. if (!NextIsAllocated || (NextLcn == UNUSED_LCN)) {
  2995. CurrentAllocatedClusterCount = 0;
  2996. }
  2997. //
  2998. // Prepare for the initial Mcb scan below by pretending that the
  2999. // next run has been looked up, and is a contiguous run of 0 clusters!
  3000. //
  3001. NextVcn = CurrentVcn + CurrentAllocatedClusterCount;
  3002. NextClusterCount = 0;
  3003. //
  3004. // Remember the last Vcn we should look up.
  3005. //
  3006. BeyondLastVcn = BlockAlign( *StartingVbo + ByteCount, (LONG)CompressionUnit );
  3007. BeyondLastVcn = LlClustersFromBytesTruncate( Vcb, BeyondLastVcn );
  3008. //
  3009. // Loop to return the data.
  3010. //
  3011. while (ByteCount != 0) {
  3012. //
  3013. // Loop to determine the compressed size of the next compression
  3014. // unit. I.e., loop until we either find the end of the current
  3015. // range of contiguous Vcns, or until we find that the current
  3016. // compression unit is fully allocated.
  3017. //
  3018. while (NextIsAllocated &&
  3019. (CurrentAllocatedClusterCount < CompressionUnitInClusters) &&
  3020. ((CurrentVcn + CurrentAllocatedClusterCount) == NextVcn)) {
  3021. if ((CurrentVcn + CurrentAllocatedClusterCount) > NextVcn) {
  3022. NtfsRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR, NULL, Scb->Fcb );
  3023. }
  3024. CurrentAllocatedClusterCount = CurrentAllocatedClusterCount + NextClusterCount;
  3025. //
  3026. // Loop to find the next allocated Vcn, or the end of the Mcb.
  3027. // None of the interfaces using RangePtr and Index as inputs
  3028. // can be used here, such as NtfsGetSequentialMcbEntry, because
  3029. // we do not have the Scb main resource acquired, and writers can
  3030. // be moving stuff around in parallel.
  3031. //
  3032. while (TRUE) {
  3033. //
  3034. // Set up NextVcn for next call
  3035. //
  3036. NextVcn += NextClusterCount;
  3037. //
  3038. // Exit if we are past the end of the range being decompressed.
  3039. //
  3040. if (NextVcn >= BeyondLastVcn) {
  3041. NextIsAllocated = TRUE;
  3042. break;
  3043. }
  3044. NextIsAllocated = NtfsLookupAllocation( IrpContext,
  3045. Scb,
  3046. NextVcn,
  3047. &NextLcn,
  3048. &NextClusterCount,
  3049. NULL,
  3050. NULL );
  3051. //
  3052. // Set NextIsAllocated and NextLcn as the Mcb package would, to show if
  3053. // we are off the end.
  3054. //
  3055. if (!NextIsAllocated) {
  3056. NextLcn = UNUSED_LCN;
  3057. }
  3058. NextIsAllocated = (BOOLEAN)(NextClusterCount < (MAXLONGLONG - NextVcn));
  3059. //
  3060. // Get out if we hit the end or see something allocated.
  3061. //
  3062. if (!NextIsAllocated || (NextLcn != UNUSED_LCN)) {
  3063. break;
  3064. }
  3065. }
  3066. }
  3067. //
  3068. // The compression unit is fully allocated.
  3069. //
  3070. if (CurrentAllocatedClusterCount >= CompressionUnitInClusters) {
  3071. CompressedSize = CompressionUnit;
  3072. CurrentAllocatedClusterCount = CurrentAllocatedClusterCount - CompressionUnitInClusters;
  3073. //
  3074. // Otherwise calculate how much is allocated at the current Vcn
  3075. // (if any).
  3076. //
  3077. } else {
  3078. CompressedSize = BytesFromClusters(Vcb, (ULONG)CurrentAllocatedClusterCount);
  3079. CurrentAllocatedClusterCount = 0;
  3080. }
  3081. //
  3082. // The next time through this loop, we will be working on the next
  3083. // compression unit.
  3084. //
  3085. CurrentVcn = CurrentVcn + CompressionUnitInClusters;
  3086. //
  3087. // Calculate uncompressed size of the desired fragment, or
  3088. // entire compression unit.
  3089. //
  3090. NtfsAcquireFsrtlHeader( Scb );
  3091. UncompressedSize = Scb->Header.FileSize.QuadPart -
  3092. (*StartingVbo + UncompressedOffset);
  3093. NtfsReleaseFsrtlHeader( Scb );
  3094. if (UncompressedSize > CompressionUnit) {
  3095. (ULONG)UncompressedSize = CompressionUnit;
  3096. }
  3097. //
  3098. // Calculate how much we want now, based on StartingOffset and
  3099. // ByteCount.
  3100. //
  3101. NextByteCount = CompressionUnit - StartingOffset;
  3102. if (NextByteCount > ByteCount) {
  3103. NextByteCount = ByteCount;
  3104. }
  3105. //
  3106. // Practice safe access
  3107. //
  3108. try {
  3109. //
  3110. // There were no clusters allocated, return 0's.
  3111. //
  3112. AlreadyFilled = FALSE;
  3113. if (CompressedSize == 0) {
  3114. RtlZeroMemory( (PUCHAR)SystemBuffer + UncompressedOffset,
  3115. NextByteCount );
  3116. //
  3117. // The compression unit was fully allocated, just copy.
  3118. //
  3119. } else if (CompressedSize == CompressionUnit) {
  3120. RtlCopyMemory( (PUCHAR)SystemBuffer + UncompressedOffset,
  3121. CompressionContext->CompressionBuffer +
  3122. CompressedOffset + StartingOffset,
  3123. NextByteCount );
  3124. //
  3125. // Caller does not want the entire compression unit, decompress
  3126. // a fragment.
  3127. //
  3128. } else if (NextByteCount < CompressionUnit) {
  3129. //
  3130. // If we have not already allocated the workspace, then do it.
  3131. //
  3132. if (CompressionContext->WorkSpace == NULL) {
  3133. ULONG CompressWorkSpaceSize;
  3134. ULONG FragmentWorkSpaceSize;
  3135. ASSERT((Scb->AttributeFlags & ATTRIBUTE_FLAG_COMPRESSION_MASK) != 0);
  3136. (VOID) RtlGetCompressionWorkSpaceSize( (USHORT)((Scb->AttributeFlags & ATTRIBUTE_FLAG_COMPRESSION_MASK) + 1),
  3137. &CompressWorkSpaceSize,
  3138. &FragmentWorkSpaceSize );
  3139. //
  3140. // Allocate first from non-paged, then paged. The typical
  3141. // size of this workspace is just over a single page so
  3142. // if both allocations fail then the system is running
  3143. // a reduced capacity. Return an error to the user
  3144. // and let him retry.
  3145. //
  3146. CompressionContext->WorkSpace = NtfsAllocatePoolWithTagNoRaise( NonPagedPool, FragmentWorkSpaceSize, 'wftN' );
  3147. if (CompressionContext->WorkSpace == NULL) {
  3148. CompressionContext->WorkSpace =
  3149. NtfsAllocatePool( PagedPool, FragmentWorkSpaceSize );
  3150. }
  3151. }
  3152. while (TRUE) {
  3153. Status =
  3154. RtlDecompressFragment( (USHORT)((Scb->AttributeFlags & ATTRIBUTE_FLAG_COMPRESSION_MASK) + 1),
  3155. (PUCHAR)SystemBuffer + UncompressedOffset,
  3156. NextByteCount,
  3157. CompressionContext->CompressionBuffer + CompressedOffset,
  3158. CompressedSize,
  3159. StartingOffset,
  3160. (PULONG)&UncompressedSize,
  3161. CompressionContext->WorkSpace );
  3162. ASSERT(NT_SUCCESS( Status ) || !NtfsStopOnDecompressError);
  3163. if (NT_SUCCESS(Status)) {
  3164. RtlZeroMemory( (PUCHAR)SystemBuffer + UncompressedOffset + (ULONG)UncompressedSize,
  3165. NextByteCount - (ULONG)UncompressedSize );
  3166. break;
  3167. } else {
  3168. //
  3169. // The compressed buffer could have been bad. We need to fill
  3170. // it with a pattern and get on with life. Someone could be
  3171. // faulting it in just to overwrite it, or it could be a rare
  3172. // case of corruption. We fill the data with a pattern, but
  3173. // we must return success so a pagefault will succeed. We
  3174. // do this once, then loop back to decompress what we can.
  3175. //
  3176. Status = STATUS_SUCCESS;
  3177. if (!AlreadyFilled) {
  3178. RtlFillMemory( (PUCHAR)SystemBuffer + UncompressedOffset,
  3179. NextByteCount,
  3180. 0xDF );
  3181. AlreadyFilled = TRUE;
  3182. } else {
  3183. break;
  3184. }
  3185. }
  3186. }
  3187. //
  3188. // Decompress the entire compression unit.
  3189. //
  3190. } else {
  3191. ASSERT( StartingOffset == 0 );
  3192. while (TRUE) {
  3193. Status =
  3194. RtlDecompressBuffer( (USHORT)((Scb->AttributeFlags & ATTRIBUTE_FLAG_COMPRESSION_MASK) + 1),
  3195. (PUCHAR)SystemBuffer + UncompressedOffset,
  3196. NextByteCount,
  3197. CompressionContext->CompressionBuffer + CompressedOffset,
  3198. CompressedSize,
  3199. (PULONG)&UncompressedSize );
  3200. ASSERT(NT_SUCCESS( Status ) || !NtfsStopOnDecompressError);
  3201. if (NT_SUCCESS(Status)) {
  3202. RtlZeroMemory( (PUCHAR)SystemBuffer + UncompressedOffset + (ULONG)UncompressedSize,
  3203. NextByteCount - (ULONG)UncompressedSize );
  3204. break;
  3205. } else {
  3206. //
  3207. // The compressed buffer could have been bad. We need to fill
  3208. // it with a pattern and get on with life. Someone could be
  3209. // faulting it in just to overwrite it, or it could be a rare
  3210. // case of corruption. We fill the data with a pattern, but
  3211. // we must return success so a pagefault will succeed. We
  3212. // do this once, then loop back to decompress what we can.
  3213. //
  3214. Status = STATUS_SUCCESS;
  3215. if (!AlreadyFilled) {
  3216. RtlFillMemory( (PUCHAR)SystemBuffer + UncompressedOffset,
  3217. NextByteCount,
  3218. 0xDB );
  3219. AlreadyFilled = TRUE;
  3220. } else {
  3221. break;
  3222. }
  3223. }
  3224. }
  3225. }
  3226. //
  3227. // If its an unexpected error then
  3228. // Probably Gary's decompression routine faulted, but blame it on
  3229. // the user buffer!
  3230. //
  3231. } except(NtfsCompressionFilter(IrpContext, GetExceptionInformation())) {
  3232. Status = GetExceptionCode();
  3233. if (!FsRtlIsNtstatusExpected( Status )) {
  3234. Status = STATUS_INVALID_USER_BUFFER;
  3235. }
  3236. }
  3237. if (!NT_SUCCESS(Status)) {
  3238. break;
  3239. }
  3240. //
  3241. // Advance these fields for the next pass through.
  3242. //
  3243. StartingOffset = 0;
  3244. UncompressedOffset += NextByteCount;
  3245. CompressedOffset += CompressedSize;
  3246. ByteCount -= NextByteCount;
  3247. }
  3248. //
  3249. // We now flush the user's buffer to memory.
  3250. //
  3251. KeFlushIoBuffers( CompressionContext->SavedMdl, TRUE, FALSE );
  3252. }
  3253. //
  3254. // For compressed writes we just checkpoint the transaction and
  3255. // free all snapshots and resources, then get the Scb back. Only do this if the
  3256. // request is for the same Irp as the original Irp. We don't want to checkpoint
  3257. // if called from NtfsWriteClusters.
  3258. //
  3259. } else if (Irp == IrpContext->OriginatingIrp) {
  3260. if (CompressionContext->ScbAcquired) {
  3261. BOOLEAN Reinsert = FALSE;
  3262. NtfsCheckpointCurrentTransaction( IrpContext );
  3263. //
  3264. // We want to empty the exclusive Fcb list but still hold
  3265. // the current file. Go ahead and remove it from the exclusive
  3266. // list and reinsert it after freeing the other entries.
  3267. //
  3268. while (!IsListEmpty(&IrpContext->ExclusiveFcbList)) {
  3269. //
  3270. // If this is the Scb for this Fcb then remove it from the list.
  3271. // We have to preserve the number of times this Fcb may have been
  3272. // acquired outside of PrepareBuffers.
  3273. //
  3274. if ((PFCB)CONTAINING_RECORD( IrpContext->ExclusiveFcbList.Flink,
  3275. FCB,
  3276. ExclusiveFcbLinks ) == Scb->Fcb) {
  3277. RemoveEntryList( &Scb->Fcb->ExclusiveFcbLinks );
  3278. Reinsert = TRUE;
  3279. } else {
  3280. NtfsReleaseFcb( IrpContext,
  3281. (PFCB)CONTAINING_RECORD(IrpContext->ExclusiveFcbList.Flink,
  3282. FCB,
  3283. ExclusiveFcbLinks ));
  3284. }
  3285. }
  3286. ClearFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_RELEASE_USN_JRNL |
  3287. IRP_CONTEXT_FLAG_RELEASE_MFT );
  3288. if (Reinsert) {
  3289. InsertHeadList( &IrpContext->ExclusiveFcbList,
  3290. &Scb->Fcb->ExclusiveFcbLinks );
  3291. //
  3292. // Release the Scb if we acquired it in PrepareBuffers. It is
  3293. // important that we have released the Scb before going back
  3294. // and faulting into the data section. Otherwise we could
  3295. // hit a collided page fault deadlock.
  3296. //
  3297. NtfsReleaseScb( IrpContext, Scb );
  3298. CompressionContext->ScbAcquired = FALSE;
  3299. }
  3300. }
  3301. }
  3302. return Status;
  3303. }
  3304. PMDL
  3305. NtfsLockFileRange (
  3306. IN PIRP_CONTEXT IrpContext,
  3307. IN PSCB Scb,
  3308. IN LONGLONG Offset,
  3309. IN ULONG Length
  3310. )
  3311. /*++
  3312. Routine Description:
  3313. This function maps the given range of file into the cachemanager space and
  3314. then probes and locks it down
  3315. Arguments:
  3316. Scb - Supplies the stream file to act on.
  3317. Offset - The starting point to be probed and locked
  3318. Length - The lengh of the operation.
  3319. Return Value:
  3320. PMDL - a mdl representing the locked area - this mdl must be unlocked and freed by the caller
  3321. --*/
  3322. {
  3323. NTSTATUS Status;
  3324. PBCB Bcb;
  3325. PVOID Buffer;
  3326. PMDL Mdl = NULL;
  3327. //
  3328. // File must be cached
  3329. //
  3330. ASSERT( Scb->FileObject != NULL);
  3331. //
  3332. // Map the offset into the address space
  3333. //
  3334. CcMapData( Scb->FileObject, (PLARGE_INTEGER)&Offset, Length, TRUE, &Bcb, &Buffer );
  3335. #ifdef MAPCOUNT_DBG
  3336. IrpContext->MapCount++;
  3337. #endif
  3338. //
  3339. // Lock the data into memory Don't tell Mm here that we plan to write it, as he sets
  3340. // dirty now and at the unlock below if we do.
  3341. //
  3342. try {
  3343. //
  3344. // Now attempt to allocate an Mdl to describe the mapped data.
  3345. //
  3346. Mdl = IoAllocateMdl( Buffer, Length, FALSE, FALSE, NULL );
  3347. if (Mdl == NULL) {
  3348. NtfsRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES, NULL, NULL );
  3349. }
  3350. MmProbeAndLockPages( Mdl, KernelMode, IoReadAccess );
  3351. //
  3352. // Catch any raises here and clean up appropriately.
  3353. //
  3354. } except(EXCEPTION_EXECUTE_HANDLER) {
  3355. Status = GetExceptionCode();
  3356. CcUnpinData( Bcb );
  3357. #ifdef MAPCOUNT_DBG
  3358. IrpContext->MapCount--;
  3359. #endif
  3360. if (Mdl != NULL) {
  3361. IoFreeMdl( Mdl );
  3362. Mdl = NULL;
  3363. }
  3364. NtfsRaiseStatus( IrpContext,
  3365. FsRtlIsNtstatusExpected(Status) ? Status : STATUS_UNEXPECTED_IO_ERROR,
  3366. NULL,
  3367. NULL );
  3368. }
  3369. CcUnpinData( Bcb );
  3370. #ifdef MAPCOUNT_DBG
  3371. IrpContext->MapCount--;
  3372. #endif
  3373. return Mdl;
  3374. }
  3375. VOID
  3376. NtfsZeroEndOfSector (
  3377. IN PIRP_CONTEXT IrpContext,
  3378. IN PIRP Irp,
  3379. IN PSCB Scb,
  3380. IN LONGLONG Offset,
  3381. IN BOOLEAN Cached
  3382. )
  3383. /*++
  3384. Routine Description:
  3385. This function zeroes from the given offset to the next sector boundary directly
  3386. onto disk. Particularly if the file is cached the caller must synchronize in some fashion
  3387. to prevent the sector from being written at the same time through other paths. I.e
  3388. own ioateof or paging exclusive. Also this only be called with non sparse / non compressed files
  3389. Arguments:
  3390. Scb - Supplies the stream file to act on.
  3391. Offset - The starting offset to zero to its sector boundary
  3392. Return Value:
  3393. None - raises on error
  3394. --*/
  3395. {
  3396. PVCB Vcb = Scb->Fcb->Vcb;
  3397. ULONG BufferLength = Vcb->BytesPerSector;
  3398. PMDL Mdl = NULL;
  3399. PMDL OriginalMdl = Irp->MdlAddress;
  3400. PVOID Buffer = NULL;
  3401. LCN Lcn;
  3402. LONGLONG ClusterCount;
  3403. LONGLONG LogicalOffset;
  3404. LONGLONG VirtualOffset;
  3405. PIO_STACK_LOCATION IrpSp = IoGetCurrentIrpStackLocation( Irp );
  3406. NTSTATUS Status;
  3407. LOGICAL Wait = FlagOn( IrpContext->State, IRP_CONTEXT_STATE_WAIT );
  3408. ASSERT( !FlagOn( Scb->AttributeFlags, ATTRIBUTE_FLAG_COMPRESSION_MASK ) );
  3409. //
  3410. // Decide whether to use cached or noncached path
  3411. //
  3412. if (Cached) {
  3413. NtfsCreateMdlAndBuffer( IrpContext,
  3414. Scb,
  3415. RESERVED_BUFFER_ONE_NEEDED,
  3416. &BufferLength,
  3417. &Mdl,
  3418. &Buffer );
  3419. try {
  3420. RtlZeroMemory( Buffer, Vcb->BytesPerSector - (LONG)(Offset % Vcb->BytesPerSector) );
  3421. CcCopyWrite( IrpSp->FileObject, (PLARGE_INTEGER)&Offset, Vcb->BytesPerSector - (LONG)(Offset % Vcb->BytesPerSector), TRUE, Buffer );
  3422. } finally {
  3423. NtfsDeleteMdlAndBuffer( Mdl, Buffer );
  3424. }
  3425. } else {
  3426. //
  3427. // Find the lcn that contains the cluster in question
  3428. //
  3429. if (NtfsLookupAllocation( IrpContext, Scb, LlClustersFromBytesTruncate( Vcb, Offset ), &Lcn, &ClusterCount, NULL, NULL )) {
  3430. try {
  3431. //
  3432. // Set calls to be temp. synchronous
  3433. //
  3434. SetFlag( IrpContext->State, IRP_CONTEXT_STATE_WAIT );
  3435. NtfsCreateMdlAndBuffer( IrpContext,
  3436. Scb,
  3437. RESERVED_BUFFER_ONE_NEEDED,
  3438. &BufferLength,
  3439. &Mdl,
  3440. &Buffer );
  3441. Irp->MdlAddress = Mdl;
  3442. //
  3443. // The logical offset on disk is at the lcn we found + the offset within that cluster of the
  3444. // offset rounded down to the nearest sector
  3445. //
  3446. LogicalOffset = LlBytesFromClusters( Vcb, Lcn ) + Offset - BlockAlignTruncate( Offset, (LONG)Vcb->BytesPerCluster );
  3447. LogicalOffset = BlockAlignTruncate( LogicalOffset, (LONG)Vcb->BytesPerSector );
  3448. //
  3449. // First read the sector
  3450. //
  3451. NtfsSingleAsync( IrpContext,
  3452. Vcb->TargetDeviceObject,
  3453. LogicalOffset,
  3454. Vcb->BytesPerSector,
  3455. Irp,
  3456. IRP_MJ_READ,
  3457. 0 );
  3458. NtfsWaitSync( IrpContext );
  3459. NtfsNormalizeAndCleanupTransaction( IrpContext,
  3460. &Irp->IoStatus.Status,
  3461. TRUE,
  3462. STATUS_UNEXPECTED_IO_ERROR );
  3463. //
  3464. // Decrypt the buffer if its encrypted
  3465. //
  3466. if ((Scb->EncryptionContext != NULL) &&
  3467. (NtfsData.EncryptionCallBackTable.AfterReadProcess != NULL)) {
  3468. VirtualOffset = BlockAlignTruncate( Offset, (LONG)Vcb->BytesPerSector );
  3469. Status = NtfsData.EncryptionCallBackTable.AfterReadProcess( Buffer,
  3470. (PLARGE_INTEGER)&VirtualOffset,
  3471. Vcb->BytesPerSector,
  3472. Scb->EncryptionContext );
  3473. if (!NT_SUCCESS( Status )) {
  3474. NtfsRaiseStatus( IrpContext, Status, &Scb->Fcb->FileReference, Scb->Fcb );
  3475. }
  3476. }
  3477. //
  3478. // Clear return info field
  3479. //
  3480. Irp->IoStatus.Information = 0;
  3481. //
  3482. // Zero out the remainder of the sector
  3483. //
  3484. RtlZeroMemory( Add2Ptr( Buffer, (LONG)(Offset % Vcb->BytesPerSector )), Vcb->BytesPerSector - (LONG)(Offset % Vcb->BytesPerSector) );
  3485. //
  3486. // Re-ecrypt the buffer if its encrypted
  3487. //
  3488. if ((Scb->EncryptionContext != NULL) &&
  3489. (NtfsData.EncryptionCallBackTable.BeforeWriteProcess != NULL)) {
  3490. Status = NtfsData.EncryptionCallBackTable.BeforeWriteProcess( Buffer,
  3491. Buffer,
  3492. (PLARGE_INTEGER)&VirtualOffset,
  3493. Vcb->BytesPerSector,
  3494. Scb->EncryptionContext );
  3495. if (!NT_SUCCESS( Status )) {
  3496. NtfsRaiseStatus( IrpContext, Status, &Scb->Fcb->FileReference, Scb->Fcb );
  3497. }
  3498. }
  3499. //
  3500. // Rewrite the sector back down
  3501. //
  3502. NtfsSingleAsync( IrpContext,
  3503. Vcb->TargetDeviceObject,
  3504. LogicalOffset,
  3505. Vcb->BytesPerSector,
  3506. Irp,
  3507. IRP_MJ_WRITE,
  3508. 0 );
  3509. NtfsWaitSync( IrpContext );
  3510. } finally {
  3511. //
  3512. // Reset to original wait state
  3513. //
  3514. if (!Wait) {
  3515. ClearFlag( IrpContext->State, IRP_CONTEXT_STATE_WAIT );
  3516. }
  3517. NtfsDeleteMdlAndBuffer( Mdl, Buffer );
  3518. Irp->MdlAddress = OriginalMdl;
  3519. }
  3520. }
  3521. }
  3522. return;
  3523. }
  3524. NTSTATUS
  3525. NtfsNonCachedIo (
  3526. IN PIRP_CONTEXT IrpContext,
  3527. IN PIRP Irp,
  3528. IN PSCB Scb,
  3529. IN VBO StartingVbo,
  3530. IN ULONG ByteCount,
  3531. IN ULONG StreamFlags
  3532. )
  3533. /*++
  3534. Routine Description:
  3535. This routine performs the non-cached disk io described in its parameters.
  3536. The choice of a single run is made if possible, otherwise multiple runs
  3537. are executed.
  3538. Sparse files are supported. If "holes" are encountered, then the user
  3539. buffer is zeroed over the specified range. This should only happen on
  3540. reads during normal operation, but it can also happen on writes during
  3541. restart, in which case it is also appropriate to zero the buffer.
  3542. Arguments:
  3543. IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
  3544. Irp - Supplies the requesting Irp.
  3545. Scb - Supplies the stream file to act on.
  3546. StartingVbo - The starting point for the operation.
  3547. ByteCount - The lengh of the operation.
  3548. StreamFlags - Supplies either 0 or some combination of COMPRESSED_STREAM
  3549. and ENCRYPTED_STREAM
  3550. Return Value:
  3551. None.
  3552. --*/
  3553. {
  3554. ULONG OriginalByteCount, RemainingByteCount;
  3555. ULONG NumberRuns;
  3556. IO_RUN IoRuns[NTFS_MAX_PARALLEL_IOS];
  3557. COMPRESSION_CONTEXT CompressionContext;
  3558. NTSTATUS Status = STATUS_SUCCESS;
  3559. PMDL Mdl = NULL;
  3560. LONGLONG LfsStartingVbo;
  3561. PVCB Vcb = Scb->Fcb->Vcb;
  3562. BOOLEAN Wait;
  3563. UCHAR IrpSpFlags = 0;
  3564. #ifdef PERF_STATS
  3565. BOOLEAN CreateNewFile = FALSE;
  3566. BOOLEAN TrackIos = FALSE;
  3567. LARGE_INTEGER StartIo;
  3568. LARGE_INTEGER Now;
  3569. PTOP_LEVEL_CONTEXT TopLevelContext = NtfsGetTopLevelContext();
  3570. #endif
  3571. PAGED_CODE();
  3572. DebugTrace( +1, Dbg, ("NtfsNonCachedIo\n") );
  3573. DebugTrace( 0, Dbg, ("Irp = %08lx\n", Irp) );
  3574. DebugTrace( 0, Dbg, ("MajorFunction = %08lx\n", IrpContext->MajorFunction) );
  3575. DebugTrace( 0, Dbg, ("Scb = %08lx\n", Scb) );
  3576. DebugTrace( 0, Dbg, ("StartingVbo = %016I64x\n", StartingVbo) );
  3577. DebugTrace( 0, Dbg, ("ByteCount = %08lx\n", ByteCount) );
  3578. //
  3579. // Initialize some locals.
  3580. //
  3581. OriginalByteCount = ByteCount;
  3582. Wait = (BOOLEAN) FlagOn( IrpContext->State, IRP_CONTEXT_STATE_WAIT );
  3583. //
  3584. // Check if we need to do sequential writes.
  3585. //
  3586. if ((IrpContext->MajorFunction == IRP_MJ_WRITE) &&
  3587. FlagOn( Scb->ScbState, SCB_STATE_MODIFIED_NO_WRITE )) {
  3588. IrpSpFlags = SL_FT_SEQUENTIAL_WRITE | SL_WRITE_THROUGH;
  3589. }
  3590. #ifdef PERF_STATS
  3591. {
  3592. if ((ULONG_PTR)TopLevelContext >= FSRTL_MAX_TOP_LEVEL_IRP_FLAG &&
  3593. (ULONG_PTR)TopLevelContext != (FSRTL_CACHE_TOP_LEVEL_IRP | 0x80000000)) {
  3594. if (TopLevelContext->SavedTopLevelIrp &&
  3595. (ULONG_PTR)TopLevelContext->SavedTopLevelIrp >= FSRTL_MAX_TOP_LEVEL_IRP_FLAG &&
  3596. (ULONG_PTR)TopLevelContext->SavedTopLevelIrp != (FSRTL_CACHE_TOP_LEVEL_IRP | 0x80000000) &&
  3597. (((PTOP_LEVEL_CONTEXT)TopLevelContext->SavedTopLevelIrp)->Ntfs == 0x5346544e)) {
  3598. TopLevelContext = (PTOP_LEVEL_CONTEXT) TopLevelContext->SavedTopLevelIrp;
  3599. }
  3600. if ((TopLevelContext->ThreadIrpContext->MajorFunction == IRP_MJ_CREATE) &&
  3601. (TopLevelContext->ThreadIrpContext->MinorFunction == IRP_MN_CREATE_NEW)) {
  3602. CreateNewFile = TRUE;
  3603. }
  3604. if (FlagOn( TopLevelContext->ThreadIrpContext->State, IRP_CONTEXT_STATE_TRACK_IOS )) {
  3605. TrackIos = TRUE;
  3606. }
  3607. }
  3608. }
  3609. #endif
  3610. //
  3611. // Prepare the (first set) of buffers for I/O.
  3612. //
  3613. RtlZeroMemory( &CompressionContext, sizeof(COMPRESSION_CONTEXT) );
  3614. CompressionContext.IoRuns = IoRuns;
  3615. CompressionContext.AllocatedRuns = NTFS_MAX_PARALLEL_IOS;
  3616. CompressionContext.FinishBuffersNeeded =
  3617. ((Scb->CompressionUnit != 0) || (Scb->EncryptionContext != NULL)) &&
  3618. !FlagOn( StreamFlags, ENCRYPTED_STREAM );
  3619. try {
  3620. //
  3621. // If this is a write to a compressed file, we want to make sure here
  3622. // that any fragments of compression units get locked in memory, so
  3623. // no one will be reading them into the cache while we are mucking with
  3624. // the Mcb, etc. We do this right here at the top so that we have
  3625. // more stack(!), and we get this over with before we have to acquire
  3626. // the Scb exclusive.
  3627. //
  3628. if ((IrpContext->MajorFunction == IRP_MJ_WRITE) &&
  3629. (Scb->CompressionUnit != 0) &&
  3630. FlagOn( Scb->AttributeFlags, ATTRIBUTE_FLAG_COMPRESSION_MASK )) {
  3631. LONGLONG TempOffset;
  3632. LONGLONG TempRange;
  3633. ULONG CompressionUnit = Scb->CompressionUnit;
  3634. #ifdef COMPRESS_ON_WIRE
  3635. //
  3636. // For a compressed stream, just make sure the stream exists.
  3637. //
  3638. if (FlagOn( StreamFlags, COMPRESSED_STREAM )) {
  3639. if (Scb->Header.FileObjectC == NULL) {
  3640. NtfsCreateInternalCompressedStream( IrpContext, Scb, FALSE, NULL );
  3641. //
  3642. // If there is no one who will cause this stream to
  3643. // be dereferenced then add an entry on the delayed
  3644. // close queue for this. We can do this test without
  3645. // worrying about synchronization since it is OK to have
  3646. // an extra entry in the delayed queue.
  3647. //
  3648. if ((Scb->CleanupCount == 0) &&
  3649. (Scb->Fcb->DelayedCloseCount == 0)) {
  3650. NtfsAddScbToFspClose( IrpContext, Scb, TRUE );
  3651. }
  3652. }
  3653. //
  3654. // This better be paging I/O, because we ignore the caller's buffer
  3655. // and write the entire compression unit out of the section.
  3656. //
  3657. // We don't want to map in the data in the case where we are called
  3658. // from write clusters because MM is creating the section for the
  3659. // file. Otherwise we will deadlock when Cc tries to create the
  3660. // section.
  3661. //
  3662. }
  3663. #endif
  3664. if (
  3665. #ifdef COMPRESS_ON_WIRE
  3666. !FlagOn( StreamFlags, COMPRESSED_STREAM ) &&
  3667. #endif
  3668. ((Irp == IrpContext->OriginatingIrp) ||
  3669. (Scb->NonpagedScb->SegmentObject.SharedCacheMap != NULL))) {
  3670. PMDL *TempMdl;
  3671. if (Scb->FileObject == NULL) {
  3672. NtfsCreateInternalAttributeStream( IrpContext,
  3673. Scb,
  3674. FALSE,
  3675. &NtfsInternalUseFile[NONCACHEDIO_FILE_NUMBER] );
  3676. //
  3677. // If there is no one who will cause this stream to
  3678. // be dereferenced then add an entry on the delayed
  3679. // close queue for this. We can do this test without
  3680. // worrying about synchronization since it is OK to have
  3681. // an extra entry in the delayed queue.
  3682. //
  3683. if ((Scb->CleanupCount == 0) &&
  3684. (Scb->Fcb->DelayedCloseCount == 0)) {
  3685. NtfsAddScbToFspClose( IrpContext, Scb, TRUE );
  3686. }
  3687. }
  3688. //
  3689. // Lock the entire range rounded to its compression unit boundaries
  3690. // First round the start of the range down to a compression unit and then
  3691. // round the top of the range up to one
  3692. //
  3693. TempOffset = BlockAlignTruncate( StartingVbo, (LONG)CompressionUnit );
  3694. TempRange = BlockAlign( StartingVbo + ByteCount, (LONG)CompressionUnit );
  3695. TempMdl = &Mdl;
  3696. do {
  3697. LONGLONG MapBoundary;
  3698. ULONG Range;
  3699. MapBoundary = BlockAlign( TempOffset + 1, VACB_MAPPING_GRANULARITY );
  3700. Range = (ULONG) min( TempRange - TempOffset, MapBoundary - TempOffset );
  3701. *TempMdl = NtfsLockFileRange( IrpContext,
  3702. Scb,
  3703. TempOffset,
  3704. Range );
  3705. TempOffset += Range;
  3706. TempMdl = &((*TempMdl)->Next );
  3707. } while ( TempOffset != TempRange );
  3708. } else {
  3709. //
  3710. // This had better be a convert to non-resident.
  3711. //
  3712. ASSERT( StartingVbo == 0 );
  3713. ASSERT( ByteCount <= Scb->CompressionUnit );
  3714. }
  3715. }
  3716. //
  3717. // Check if need to trim the write for the log file.
  3718. //
  3719. if ((PAGE_SIZE != LFS_DEFAULT_LOG_PAGE_SIZE) &&
  3720. (Scb == Vcb->LogFileScb) &&
  3721. (IrpContext->MajorFunction == IRP_MJ_WRITE)) {
  3722. LfsStartingVbo = StartingVbo;
  3723. LfsCheckWriteRange( &Vcb->LfsWriteData, &LfsStartingVbo, &ByteCount );
  3724. //
  3725. // If the byte count is now zero then exit this routine.
  3726. //
  3727. if (ByteCount == 0) {
  3728. Irp->IoStatus.Status = STATUS_SUCCESS;
  3729. Irp->IoStatus.Information = ByteCount;
  3730. DebugTrace( -1, Dbg, ("NtfsNonCachedIo -> %08lx\n", Irp->IoStatus.Status) );
  3731. try_return( Status = Irp->IoStatus.Status );
  3732. }
  3733. //
  3734. // Adjust the buffer offset in the compression context if necessary.
  3735. //
  3736. CompressionContext.SystemBufferOffset = (ULONG) (LfsStartingVbo - StartingVbo);
  3737. StartingVbo = LfsStartingVbo;
  3738. }
  3739. RemainingByteCount = NtfsPrepareBuffers( IrpContext,
  3740. Irp,
  3741. Scb,
  3742. &StartingVbo,
  3743. ByteCount,
  3744. StreamFlags,
  3745. &Wait,
  3746. &NumberRuns,
  3747. &CompressionContext );
  3748. //
  3749. // If we are writing to an encrypted stream, now is the
  3750. // time to do the encryption, before we pass the buffer
  3751. // down to the disk driver below us.
  3752. //
  3753. if ((Scb->EncryptionContext != NULL) &&
  3754. (IrpContext->MajorFunction == IRP_MJ_WRITE) &&
  3755. (NtfsData.EncryptionCallBackTable.BeforeWriteProcess != NULL) &&
  3756. (!FlagOn( StreamFlags, ENCRYPTED_STREAM ))) {
  3757. ASSERT ( NtfsIsTypeCodeEncryptible( Scb->AttributeTypeCode ) );
  3758. ASSERT( NumberRuns > 0 );
  3759. NtfsEncryptBuffers( IrpContext,
  3760. Irp,
  3761. Scb,
  3762. StartingVbo,
  3763. NumberRuns,
  3764. &CompressionContext );
  3765. }
  3766. ASSERT( RemainingByteCount < ByteCount );
  3767. if (FlagOn(Irp->Flags, IRP_PAGING_IO)) {
  3768. CollectDiskIoStats(Vcb, Scb, IrpContext->MajorFunction, NumberRuns);
  3769. }
  3770. //
  3771. // See if the write covers a single valid run, and if so pass
  3772. // it on. Notice that if there is a single run but it does not
  3773. // begin at the beginning of the buffer then we will still need to
  3774. // allocate an associated Irp for this.
  3775. //
  3776. if ((RemainingByteCount == 0) &&
  3777. (((NumberRuns == 1) &&
  3778. (CompressionContext.IoRuns[0].BufferOffset == 0)) ||
  3779. (NumberRuns == 0))) {
  3780. DebugTrace( 0, Dbg, ("Passing Irp on to Disk Driver\n") );
  3781. //
  3782. // See if there is an allocated run
  3783. //
  3784. if (NumberRuns == 1) {
  3785. DebugTrace( 0, Dbg, ("One run\n") );
  3786. //
  3787. // Now set up the Irp->IoStatus. It will be modified by the
  3788. // completion routine in case of error or verify required.
  3789. //
  3790. Irp->IoStatus.Status = STATUS_SUCCESS;
  3791. //
  3792. // We will continously try the I/O if we get a verify required
  3793. // back and can verify the volume
  3794. //
  3795. while (TRUE) {
  3796. //
  3797. // Do the I/O and wait for it to finish
  3798. //
  3799. #ifdef PERF_STATS
  3800. if (TrackIos) {
  3801. TopLevelContext->ThreadIrpContext->Ios += 1;
  3802. }
  3803. if (CreateNewFile) {
  3804. InterlockedIncrement( &IrpContext->Vcb->IosPerCreates );
  3805. //KeQueryTickCount( &StartIo );
  3806. StartIo = KeQueryPerformanceCounter( NULL );
  3807. }
  3808. #endif
  3809. NtfsSingleAsync( IrpContext,
  3810. Vcb->TargetDeviceObject,
  3811. CompressionContext.IoRuns[0].StartingLbo,
  3812. CompressionContext.IoRuns[0].ByteCount,
  3813. Irp,
  3814. IrpContext->MajorFunction,
  3815. IrpSpFlags );
  3816. //
  3817. // If this is an asynch transfer we return STATUS_PENDING.
  3818. //
  3819. if (!Wait) {
  3820. DebugTrace( -1, Dbg, ("NtfsNonCachedIo -> STATUS_PENDING\n") );
  3821. try_return(Status = STATUS_PENDING);
  3822. } else {
  3823. NtfsWaitSync( IrpContext );
  3824. #ifdef PERF_STATS
  3825. if (CreateNewFile) {
  3826. //KeQueryTickCount( &Now );
  3827. Now = KeQueryPerformanceCounter( NULL );
  3828. IrpContext->Vcb->TimePerCreateIos += Now.QuadPart - StartIo.QuadPart;
  3829. }
  3830. #endif
  3831. }
  3832. //
  3833. // If we didn't get a verify required back then break out of
  3834. // this loop
  3835. //
  3836. if (Irp->IoStatus.Status != STATUS_VERIFY_REQUIRED) { break; }
  3837. //
  3838. // Otherwise we need to verify the volume, and if it doesn't
  3839. // verify correctly the we dismount the volume and raise our
  3840. // error
  3841. //
  3842. if (!NtfsPerformVerifyOperation( IrpContext, Vcb )) {
  3843. //**** NtfsPerformDismountOnVcb( IrpContext, Vcb, TRUE, NULL );
  3844. ClearFlag( Vcb->VcbState, VCB_STATE_VOLUME_MOUNTED );
  3845. NtfsRaiseStatus( IrpContext, STATUS_FILE_INVALID, NULL, NULL );
  3846. }
  3847. //
  3848. // The volume verified correctly so now clear the verify bit
  3849. // and try and I/O again
  3850. //
  3851. ClearFlag( Vcb->Vpb->RealDevice->Flags, DO_VERIFY_VOLUME );
  3852. //
  3853. // Reset the status before retrying.
  3854. //
  3855. Irp->IoStatus.Status = STATUS_SUCCESS;
  3856. }
  3857. //
  3858. // See if we need to do a hot fix. Hotfix if the request failed
  3859. // (except if called from WriteClusters) or we couldn't revert
  3860. // a USA block.
  3861. //
  3862. if ((!FT_SUCCESS( Irp->IoStatus.Status ) &&
  3863. ((IrpContext->MajorFunction != IRP_MJ_WRITE) ||
  3864. (Irp == IrpContext->OriginatingIrp))) ||
  3865. (FlagOn(Scb->ScbState, SCB_STATE_USA_PRESENT) &&
  3866. (IrpContext->MajorFunction == IRP_MJ_READ) &&
  3867. !NtfsVerifyAndRevertUsaBlock( IrpContext,
  3868. Scb,
  3869. Irp,
  3870. NULL,
  3871. 0,
  3872. OriginalByteCount,
  3873. StartingVbo ))) {
  3874. //
  3875. // Try to fix the problem
  3876. //
  3877. NtfsFixDataError( IrpContext,
  3878. Scb,
  3879. Vcb->TargetDeviceObject,
  3880. Irp,
  3881. 1,
  3882. CompressionContext.IoRuns,
  3883. IrpSpFlags );
  3884. }
  3885. //
  3886. // Show that we successfully read zeros for the deallocated range.
  3887. //
  3888. } else {
  3889. Irp->IoStatus.Status = STATUS_SUCCESS;
  3890. Irp->IoStatus.Information = ByteCount;
  3891. }
  3892. DebugTrace( -1, Dbg, ("NtfsNonCachedIo -> %08lx\n", Irp->IoStatus.Status) );
  3893. try_return( Status = Irp->IoStatus.Status );
  3894. }
  3895. //
  3896. // If there are bytes remaining and we cannot wait, then we must
  3897. // post this request unless we are doing paging io.
  3898. //
  3899. if (!Wait && (RemainingByteCount != 0)) {
  3900. if (!FlagOn( Irp->Flags, IRP_PAGING_IO )) {
  3901. NtfsRaiseStatus( IrpContext, STATUS_CANT_WAIT, NULL, NULL );
  3902. }
  3903. Wait = TRUE;
  3904. SetFlag( IrpContext->State, IRP_CONTEXT_STATE_WAIT );
  3905. ClearFlag( IrpContext->Union.NtfsIoContext->Flags, NTFS_IO_CONTEXT_ASYNC );
  3906. KeInitializeEvent( &IrpContext->Union.NtfsIoContext->Wait.SyncEvent,
  3907. NotificationEvent,
  3908. FALSE );
  3909. }
  3910. //
  3911. // Now set up the Irp->IoStatus. It will be modified by the
  3912. // multi-completion routine in case of error or verify required.
  3913. //
  3914. Irp->IoStatus.Status = STATUS_SUCCESS;
  3915. //
  3916. // Loop while there are still byte writes to satisfy.
  3917. //
  3918. while (TRUE) {
  3919. //
  3920. // We will continously try the I/O if we get a verify required
  3921. // back and can verify the volume. Note that we could have ended
  3922. // on a hole, and have no runs left.
  3923. //
  3924. if (NumberRuns != 0) {
  3925. while (TRUE) {
  3926. #ifdef PERF_STATS
  3927. if (TrackIos) {
  3928. TopLevelContext->ThreadIrpContext->Ios += 1;
  3929. }
  3930. if (CreateNewFile) {
  3931. InterlockedIncrement( &IrpContext->Vcb->IosPerCreates );
  3932. //KeQueryTickCount( &StartIo );
  3933. StartIo = KeQueryPerformanceCounter( NULL );
  3934. }
  3935. #endif
  3936. //
  3937. // Do the I/O and wait for it to finish
  3938. //
  3939. NtfsMultipleAsync( IrpContext,
  3940. Vcb->TargetDeviceObject,
  3941. Irp,
  3942. NumberRuns,
  3943. CompressionContext.IoRuns,
  3944. IrpSpFlags );
  3945. //
  3946. // If this is an asynchronous transfer, then return STATUS_PENDING.
  3947. //
  3948. if (!Wait) {
  3949. DebugTrace( -1, Dbg, ("NtfsNonCachedIo -> STATUS_PENDING\n") );
  3950. try_return( Status = STATUS_PENDING );
  3951. }
  3952. NtfsWaitSync( IrpContext );
  3953. #ifdef PERF_STATS
  3954. if (CreateNewFile) {
  3955. // KeQueryTickCount( &Now );
  3956. Now = KeQueryPerformanceCounter( NULL );
  3957. IrpContext->Vcb->TimePerCreateIos += Now.QuadPart - StartIo.QuadPart;
  3958. }
  3959. #endif
  3960. #ifdef SYSCACHE_DEBUG
  3961. if (ScbIsBeingLogged( Scb ) && (IrpContext->MajorFunction == IRP_MJ_WRITE)) {
  3962. FsRtlLogSyscacheEvent( Scb, SCE_WRITE, SCE_FLAG_SUB_WRITE, CompressionContext.IoRuns[NumberRuns-1].StartingVbo, CompressionContext.IoRuns[NumberRuns-1].ByteCount, Irp->IoStatus.Status );
  3963. }
  3964. #endif
  3965. //
  3966. // If we didn't get a verify required back then break out of
  3967. // this loop
  3968. //
  3969. if (Irp->IoStatus.Status != STATUS_VERIFY_REQUIRED) { break; }
  3970. //
  3971. // Otherwise we need to verify the volume, and if it doesn't
  3972. // verify correctly the we dismount the volume and raise our
  3973. // error
  3974. //
  3975. if (!NtfsPerformVerifyOperation( IrpContext, Vcb )) {
  3976. //**** NtfsPerformDismountOnVcb( IrpContext, Vcb, TRUE, NULL );
  3977. ClearFlag( Vcb->VcbState, VCB_STATE_VOLUME_MOUNTED );
  3978. NtfsRaiseStatus( IrpContext, STATUS_FILE_INVALID, NULL, NULL );
  3979. }
  3980. //
  3981. // The volume verified correctly so now clear the verify bit
  3982. // and try and I/O again
  3983. //
  3984. ClearFlag( Vcb->Vpb->RealDevice->Flags, DO_VERIFY_VOLUME );
  3985. //
  3986. // Reset the status before retrying.
  3987. //
  3988. Irp->IoStatus.Status = STATUS_SUCCESS;
  3989. }
  3990. //
  3991. // See if we need to do a hot fix. Hotfix if the request failed
  3992. // (except if called from WriteClusters) or we couldn't revert
  3993. // a USA block.
  3994. //
  3995. if ((!FT_SUCCESS( Irp->IoStatus.Status ) &&
  3996. ((IrpContext->MajorFunction != IRP_MJ_WRITE) ||
  3997. (Irp == IrpContext->OriginatingIrp))) ||
  3998. (FlagOn(Scb->ScbState, SCB_STATE_USA_PRESENT) &&
  3999. (IrpContext->MajorFunction == IRP_MJ_READ) &&
  4000. !NtfsVerifyAndRevertUsaBlock( IrpContext,
  4001. Scb,
  4002. Irp,
  4003. NULL,
  4004. CompressionContext.IoRuns[0].BufferOffset,
  4005. OriginalByteCount -
  4006. CompressionContext.IoRuns[0].BufferOffset -
  4007. RemainingByteCount,
  4008. StartingVbo ))) {
  4009. //
  4010. // Try to fix the problem
  4011. //
  4012. NtfsFixDataError( IrpContext,
  4013. Scb,
  4014. Vcb->TargetDeviceObject,
  4015. Irp,
  4016. NumberRuns,
  4017. CompressionContext.IoRuns,
  4018. IrpSpFlags );
  4019. }
  4020. }
  4021. if (!NT_SUCCESS(Irp->IoStatus.Status) || (RemainingByteCount == 0)) { break; }
  4022. if (CompressionContext.FinishBuffersNeeded) {
  4023. Irp->IoStatus.Status =
  4024. NtfsFinishBuffers( IrpContext,
  4025. Irp,
  4026. Scb,
  4027. &StartingVbo,
  4028. ByteCount - RemainingByteCount,
  4029. NumberRuns,
  4030. &CompressionContext,
  4031. StreamFlags );
  4032. if (!NT_SUCCESS(Irp->IoStatus.Status)) { break; }
  4033. }
  4034. StartingVbo = StartingVbo + (ByteCount - RemainingByteCount);
  4035. CompressionContext.SystemBufferOffset += ByteCount - RemainingByteCount;
  4036. ByteCount = RemainingByteCount;
  4037. //
  4038. // Reset this boolean for each iteration.
  4039. //
  4040. CompressionContext.DataTransformed = FALSE;
  4041. RemainingByteCount = NtfsPrepareBuffers( IrpContext,
  4042. Irp,
  4043. Scb,
  4044. &StartingVbo,
  4045. ByteCount,
  4046. StreamFlags,
  4047. &Wait,
  4048. &NumberRuns,
  4049. &CompressionContext );
  4050. //
  4051. // If we are writing to an encrypted stream, now is the
  4052. // time to do the encryption, before we pass the buffer
  4053. // down to the disk driver below us.
  4054. //
  4055. if ((Scb->EncryptionContext != NULL) &&
  4056. (IrpContext->MajorFunction == IRP_MJ_WRITE) &&
  4057. (NtfsData.EncryptionCallBackTable.BeforeWriteProcess != NULL) &&
  4058. (!FlagOn( StreamFlags, ENCRYPTED_STREAM ))) {
  4059. ASSERT ( NtfsIsTypeCodeEncryptible( Scb->AttributeTypeCode ) );
  4060. ASSERT( NumberRuns > 0 );
  4061. NtfsEncryptBuffers( IrpContext,
  4062. Irp,
  4063. Scb,
  4064. StartingVbo,
  4065. NumberRuns,
  4066. &CompressionContext );
  4067. }
  4068. ASSERT( RemainingByteCount < ByteCount );
  4069. if (FlagOn(Irp->Flags, IRP_PAGING_IO)) {
  4070. CollectDiskIoStats(Vcb, Scb, IrpContext->MajorFunction, NumberRuns);
  4071. }
  4072. }
  4073. Status = Irp->IoStatus.Status;
  4074. try_exit: NOTHING;
  4075. } finally {
  4076. //
  4077. // If this is a compressed file and we got success, go do our normal
  4078. // post processing.
  4079. //
  4080. if (CompressionContext.FinishBuffersNeeded &&
  4081. NT_SUCCESS(Status) &&
  4082. (Status != STATUS_PENDING) &&
  4083. !AbnormalTermination() ) {
  4084. Irp->IoStatus.Status =
  4085. Status =
  4086. NtfsFinishBuffers( IrpContext,
  4087. Irp,
  4088. Scb,
  4089. &StartingVbo,
  4090. ByteCount - RemainingByteCount,
  4091. NumberRuns,
  4092. &CompressionContext,
  4093. StreamFlags );
  4094. }
  4095. //
  4096. // For writes, free any Mdls which may have been used.
  4097. //
  4098. if (Mdl != NULL) {
  4099. PMDL TempMdl;
  4100. do {
  4101. TempMdl = Mdl->Next;
  4102. MmUnlockPages( Mdl );
  4103. IoFreeMdl( Mdl );
  4104. Mdl = TempMdl;
  4105. } while (Mdl != NULL);
  4106. }
  4107. //
  4108. // Cleanup the compression context.
  4109. //
  4110. NtfsDeallocateCompressionBuffer( Irp, &CompressionContext, FALSE );
  4111. }
  4112. //
  4113. // Now set up the final byte count if we got success
  4114. //
  4115. if (Wait && NT_SUCCESS(Status)) {
  4116. Irp->IoStatus.Information = OriginalByteCount;
  4117. }
  4118. DebugTrace( -1, Dbg, ("NtfsNonCachedIo -> %08lx\n", Status) );
  4119. return Status;
  4120. }
  4121. VOID
  4122. NtfsNonCachedNonAlignedIo (
  4123. IN PIRP_CONTEXT IrpContext,
  4124. IN PIRP Irp,
  4125. IN PSCB Scb,
  4126. IN VBO StartingVbo,
  4127. IN ULONG ByteCount
  4128. )
  4129. /*++
  4130. Routine Description:
  4131. This routine performs the non-cached disk io described in its parameters.
  4132. This routine differs from the above in that the range does not have to be
  4133. sector aligned. This accomplished with the use of intermediate buffers.
  4134. devices where the sector is 1024 and callers have generated 512 byte aligned i/o.
  4135. This accomplished with the use of intermediate buffers.
  4136. Currently only read is supported.
  4137. Arguments:
  4138. IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
  4139. Irp - Supplies the requesting Irp.
  4140. Scb - Provides the stream to act on.
  4141. StartingVbo - The starting point for the operation.
  4142. ByteCount - The lengh of the operation.
  4143. Return Value:
  4144. None.
  4145. --*/
  4146. {
  4147. //
  4148. // Declare some local variables for enumeration through the
  4149. // runs of the file, and an array to store parameters for
  4150. // parallel I/Os
  4151. //
  4152. LBO NextLbo;
  4153. LCN NextLcn;
  4154. ULONG NextLcnOffset;
  4155. LONGLONG NextClusterCount;
  4156. BOOLEAN NextIsAllocated;
  4157. ULONG SectorOffset;
  4158. ULONG SectorSize;
  4159. ULONG BytesToCopy;
  4160. ULONG OriginalByteCount;
  4161. ULONG TailByteCount;
  4162. VBO OriginalStartingVbo;
  4163. PUCHAR UserBuffer;
  4164. PUCHAR DiskBuffer = NULL;
  4165. PMDL Mdl;
  4166. PMDL SavedMdl;
  4167. PVOID SavedUserBuffer;
  4168. PVCB Vcb = Scb->Vcb;
  4169. PAGED_CODE();
  4170. DebugTrace( +1, Dbg, ("NtfsNonCachedNonAlignedRead\n") );
  4171. DebugTrace( 0, Dbg, ("Irp = %08lx\n", Irp) );
  4172. DebugTrace( 0, Dbg, ("MajorFunction = %08lx\n", IrpContext->MajorFunction) );
  4173. DebugTrace( 0, Dbg, ("Scb = %08lx\n", Scb) );
  4174. DebugTrace( 0, Dbg, ("StartingVbo = %016I64x\n", StartingVbo) );
  4175. DebugTrace( 0, Dbg, ("ByteCount = %08lx\n", ByteCount) );
  4176. //
  4177. // Currently only read is supported.
  4178. //
  4179. ASSERT(IoGetCurrentIrpStackLocation(Irp)->MajorFunction != IRP_MJ_WRITE);
  4180. //
  4181. // This code assumes the file is uncompressed. Sparse files are supported.
  4182. // Before we assert that the file is uncompressed, assert that our test is
  4183. // going to be properly serialized. We'll also be testing for the sparse
  4184. // attribute in the main code path, so we really need to be serialized here.
  4185. //
  4186. ASSERT( NtfsIsSharedScb( Scb ) ||
  4187. ((Scb->Header.PagingIoResource != NULL) && NtfsIsSharedScbPagingIo( Scb )) );
  4188. ASSERT( !FlagOn( Scb->AttributeFlags, ATTRIBUTE_FLAG_COMPRESSION_MASK ) );
  4189. //
  4190. // Initialize some locals.
  4191. //
  4192. OriginalByteCount = ByteCount;
  4193. OriginalStartingVbo = StartingVbo;
  4194. SectorSize = Vcb->BytesPerSector;
  4195. //
  4196. // For nonbuffered I/O, we need the buffer locked in all
  4197. // cases.
  4198. //
  4199. // This call may raise. If this call succeeds and a subsequent
  4200. // condition is raised, the buffers are unlocked automatically
  4201. // by the I/O system when the request is completed, via the
  4202. // Irp->MdlAddress field.
  4203. //
  4204. NtfsLockUserBuffer( IrpContext,
  4205. Irp,
  4206. IoWriteAccess,
  4207. IoGetCurrentIrpStackLocation(Irp)->Parameters.Read.Length );
  4208. UserBuffer = NtfsMapUserBuffer( Irp, NormalPagePriority );
  4209. //
  4210. // Allocate the local buffer. Round to pages to avoid any device alignment
  4211. // problems.
  4212. //
  4213. DiskBuffer = NtfsAllocatePool( NonPagedPool,
  4214. (ULONG) ROUND_TO_PAGES( SectorSize ));
  4215. //
  4216. // We use a try block here to ensure the buffer is freed, and to
  4217. // fill in the correct byte count in the Iosb.Information field.
  4218. //
  4219. try {
  4220. //
  4221. // If the beginning of the request was not aligned correctly, read in
  4222. // the first part first.
  4223. //
  4224. SectorOffset = ((ULONG)StartingVbo) & (SectorSize - 1);
  4225. if (SectorOffset != 0) {
  4226. //
  4227. // Try to lookup the first run.
  4228. //
  4229. NextIsAllocated = NtfsLookupAllocation( IrpContext,
  4230. Scb,
  4231. Int64ShraMod32( StartingVbo, Vcb->ClusterShift ),
  4232. &NextLcn,
  4233. &NextClusterCount,
  4234. NULL,
  4235. NULL );
  4236. //
  4237. // If this is a sparse file and we've been asked to read in a
  4238. // deallocated range, we need to fill in the buffer with some
  4239. // zeroes and there's nothing to really read from the disk.
  4240. // If this isn't a sparse file, and this range isn't allocated,
  4241. // the file and/or mcb is corrupt.
  4242. //
  4243. if (!NextIsAllocated) {
  4244. if (FlagOn( Scb->AttributeFlags, ATTRIBUTE_FLAG_SPARSE )) {
  4245. RtlZeroMemory( DiskBuffer + SectorOffset,
  4246. SectorSize - SectorOffset );
  4247. } else {
  4248. NtfsRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR, NULL, Scb->Fcb );
  4249. }
  4250. } else {
  4251. //
  4252. // Adjust for any Lcn offset to the start of the sector we want.
  4253. //
  4254. NextLcnOffset = ((ULONG)StartingVbo) & ~(SectorSize - 1);
  4255. NextLcnOffset &= Vcb->ClusterMask;
  4256. NextLbo = Int64ShllMod32(NextLcn, Vcb->ClusterShift);
  4257. NextLbo = NextLbo + NextLcnOffset;
  4258. NtfsSingleNonAlignedSync( IrpContext,
  4259. Vcb,
  4260. Scb,
  4261. DiskBuffer,
  4262. StartingVbo + NextLcnOffset,
  4263. NextLbo,
  4264. SectorSize,
  4265. Irp );
  4266. if (!NT_SUCCESS( Irp->IoStatus.Status )) {
  4267. try_return( NOTHING );
  4268. }
  4269. }
  4270. //
  4271. // Now copy the part of the first sector that we want to the user
  4272. // buffer.
  4273. //
  4274. BytesToCopy = (ByteCount >= SectorSize - SectorOffset
  4275. ? SectorSize - SectorOffset
  4276. : ByteCount);
  4277. RtlCopyMemory( UserBuffer,
  4278. DiskBuffer + SectorOffset,
  4279. BytesToCopy );
  4280. StartingVbo = StartingVbo + BytesToCopy;
  4281. ByteCount -= BytesToCopy;
  4282. if (ByteCount == 0) {
  4283. try_return( NOTHING );
  4284. }
  4285. }
  4286. ASSERT( (((ULONG)StartingVbo) & (SectorSize - 1)) == 0 );
  4287. //
  4288. // If there is a tail part that is not sector aligned, read it.
  4289. //
  4290. TailByteCount = (ByteCount & (SectorSize - 1));
  4291. if (TailByteCount != 0) {
  4292. VBO LastSectorVbo;
  4293. LastSectorVbo = BlockAlignTruncate( StartingVbo + ByteCount, (LONG)SectorSize );
  4294. //
  4295. // Try to lookup the last part of the requested range.
  4296. //
  4297. NextIsAllocated = NtfsLookupAllocation( IrpContext,
  4298. Scb,
  4299. Int64ShraMod32( LastSectorVbo, Vcb->ClusterShift ),
  4300. &NextLcn,
  4301. &NextClusterCount,
  4302. NULL,
  4303. NULL );
  4304. //
  4305. // If this is a sparse file and we've been asked to read in a
  4306. // deallocated range, we need to fill in the buffer with some
  4307. // zeroes and there's nothing to really read from the disk.
  4308. // If this isn't a sparse file, and this range isn't allocated,
  4309. // the file and/or mcb is corrupt.
  4310. //
  4311. if (!NextIsAllocated) {
  4312. if (FlagOn( Scb->AttributeFlags, ATTRIBUTE_FLAG_SPARSE )) {
  4313. RtlZeroMemory( DiskBuffer,
  4314. TailByteCount );
  4315. } else {
  4316. NtfsRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR, NULL, Scb->Fcb );
  4317. }
  4318. } else {
  4319. //
  4320. // Adjust for any Lcn offset.
  4321. //
  4322. NextLcnOffset = ((ULONG)LastSectorVbo) & Vcb->ClusterMask;
  4323. NextLbo = Int64ShllMod32(NextLcn, Vcb->ClusterShift);
  4324. NextLbo = NextLbo + NextLcnOffset;
  4325. NtfsSingleNonAlignedSync( IrpContext,
  4326. Vcb,
  4327. Scb,
  4328. DiskBuffer,
  4329. LastSectorVbo,
  4330. NextLbo,
  4331. SectorSize,
  4332. Irp );
  4333. if (!NT_SUCCESS( Irp->IoStatus.Status )) {
  4334. try_return( NOTHING );
  4335. }
  4336. }
  4337. //
  4338. // Now copy over the part of this last sector that we need.
  4339. //
  4340. BytesToCopy = TailByteCount;
  4341. UserBuffer += (ULONG)(LastSectorVbo - OriginalStartingVbo);
  4342. RtlCopyMemory( UserBuffer, DiskBuffer, BytesToCopy );
  4343. ByteCount -= BytesToCopy;
  4344. if (ByteCount == 0) {
  4345. try_return( NOTHING );
  4346. }
  4347. }
  4348. ASSERT( ((((ULONG)StartingVbo) | ByteCount) & (SectorSize - 1)) == 0 );
  4349. //
  4350. // Now build a Mdl describing the sector aligned balance of the transfer,
  4351. // and put it in the Irp, and read that part.
  4352. //
  4353. SavedMdl = Irp->MdlAddress;
  4354. Irp->MdlAddress = NULL;
  4355. SavedUserBuffer = Irp->UserBuffer;
  4356. Irp->UserBuffer = (PUCHAR)MmGetMdlVirtualAddress( SavedMdl ) +
  4357. (ULONG)(StartingVbo - OriginalStartingVbo);
  4358. Mdl = IoAllocateMdl(Irp->UserBuffer,
  4359. ByteCount,
  4360. FALSE,
  4361. FALSE,
  4362. Irp);
  4363. if (Mdl == NULL) {
  4364. Irp->MdlAddress = SavedMdl;
  4365. Irp->UserBuffer = SavedUserBuffer;
  4366. NtfsRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES, NULL, NULL );
  4367. }
  4368. IoBuildPartialMdl(SavedMdl,
  4369. Mdl,
  4370. Irp->UserBuffer,
  4371. ByteCount);
  4372. //
  4373. // Try to read in the pages.
  4374. //
  4375. try {
  4376. NtfsNonCachedIo( IrpContext,
  4377. Irp,
  4378. Scb,
  4379. StartingVbo,
  4380. ByteCount,
  4381. 0 );
  4382. } finally {
  4383. IoFreeMdl( Irp->MdlAddress );
  4384. Irp->MdlAddress = SavedMdl;
  4385. Irp->UserBuffer = SavedUserBuffer;
  4386. }
  4387. try_exit: NOTHING;
  4388. } finally {
  4389. NtfsFreePool( DiskBuffer );
  4390. if ( !AbnormalTermination() && NT_SUCCESS(Irp->IoStatus.Status) ) {
  4391. Irp->IoStatus.Information = OriginalByteCount;
  4392. //
  4393. // We now flush the user's buffer to memory.
  4394. //
  4395. KeFlushIoBuffers( Irp->MdlAddress, TRUE, FALSE );
  4396. }
  4397. }
  4398. DebugTrace( -1, Dbg, ("NtfsNonCachedNonAlignedRead -> VOID\n") );
  4399. return;
  4400. }
  4401. BOOLEAN
  4402. NtfsVerifyAndRevertUsaBlock (
  4403. IN PIRP_CONTEXT IrpContext,
  4404. IN PSCB Scb,
  4405. IN PIRP Irp OPTIONAL,
  4406. IN PVOID SystemBuffer OPTIONAL,
  4407. IN ULONG Offset,
  4408. IN ULONG Length,
  4409. IN LONGLONG FileOffset
  4410. )
  4411. /*++
  4412. Routine Description:
  4413. This routine will revert the bytes in all of the structures protected by
  4414. update sequence arrays. It copies the bytes from each Usa to the
  4415. separate blocks protected.
  4416. If a structure does not verify correctly, then it's signature is set
  4417. to BaadSignature.
  4418. Arguments:
  4419. Scb - The scb being read
  4420. Irp - contain the buffer to be reverted if specified if not systembuffer should be
  4421. SystemBuffer - contains the buffer if irp is null
  4422. Offset - Offset within the buffer to be reverted
  4423. Length - Length of the buffer to be reverted starting at the offset
  4424. FileOffset - Offset within the file the buffer originates from
  4425. Return Value:
  4426. FALSE - if at least one block did not verify correctly and received a BaadSignature
  4427. TRUE - if no blocks received a BaadSignature
  4428. --*/
  4429. {
  4430. PMULTI_SECTOR_HEADER MultiSectorHeader;
  4431. PUSHORT SequenceArray;
  4432. PUSHORT SequenceNumber;
  4433. ULONG StructureSize;
  4434. USHORT CountBlocks;
  4435. PUSHORT ProtectedUshort;
  4436. PVCB Vcb = Scb->Vcb;
  4437. ULONG BytesLeft = Length;
  4438. UCHAR Buffer[sizeof( MDL ) + sizeof( PFN_NUMBER ) * 2];
  4439. PMDL PartialMdl = (PMDL) Buffer;
  4440. BOOLEAN ReservedMapping = FALSE;
  4441. BOOLEAN Result = TRUE;
  4442. PAGED_CODE();
  4443. DebugTrace( +1, Dbg, ("NtfsVerifyAndRevertUsaBlock: Entered\n") );
  4444. ASSERT( (ARGUMENT_PRESENT( Irp ) && !ARGUMENT_PRESENT( SystemBuffer )) ||
  4445. (!ARGUMENT_PRESENT( Irp ) && ARGUMENT_PRESENT( SystemBuffer )) );
  4446. //
  4447. // Cast the buffer pointer to a Multi-Sector-Header and verify that this
  4448. // block has been initialized.
  4449. //
  4450. if (ARGUMENT_PRESENT( Irp )) {
  4451. SystemBuffer = NtfsMapUserBufferNoRaise( Irp, HighPagePriority );
  4452. }
  4453. //
  4454. // We can't map the user buffer due to low resources - so switch to using the reserved
  4455. // mapping instead
  4456. //
  4457. if (SystemBuffer == NULL) {
  4458. ExAcquireFastMutexUnsafe( &Vcb->ReservedMappingMutex );
  4459. ReservedMapping = TRUE;
  4460. MmInitializeMdl( PartialMdl, NULL, 2 * PAGE_SIZE );
  4461. IoBuildPartialMdl( Irp->MdlAddress, PartialMdl, Add2Ptr( MmGetMdlBaseVa( Irp->MdlAddress ), MmGetMdlByteOffset( Irp->MdlAddress ) + Offset ), Vcb->BytesPerSector );
  4462. MultiSectorHeader = (PMULTI_SECTOR_HEADER) MmMapLockedPagesWithReservedMapping( IrpContext->Vcb->ReservedMapping,
  4463. RESERVE_POOL_TAG,
  4464. PartialMdl,
  4465. MmCached );
  4466. ASSERT( MultiSectorHeader != NULL );
  4467. } else {
  4468. MultiSectorHeader = (PMULTI_SECTOR_HEADER)Add2Ptr( SystemBuffer, Offset );
  4469. }
  4470. //
  4471. // Get the the number of blocks, based on what type of stream it is.
  4472. // First check for Mft or Log file.
  4473. //
  4474. if (Scb->Header.NodeTypeCode == NTFS_NTC_SCB_MFT) {
  4475. ASSERT((Scb == Vcb->MftScb) || (Scb == Vcb->Mft2Scb));
  4476. StructureSize = Vcb->BytesPerFileRecordSegment;
  4477. } else if (Scb->Header.NodeTypeCode == NTFS_NTC_SCB_DATA) {
  4478. ASSERT( Scb == Vcb->LogFileScb );
  4479. //
  4480. // On the first pass through the log file, we see all -1,
  4481. // and we just want to let it go.
  4482. //
  4483. if (*(PULONG)&MultiSectorHeader->Signature == MAXULONG) {
  4484. //
  4485. // Use the structure size given us by Lfs if present.
  4486. //
  4487. StructureSize = Vcb->LfsWriteData.LfsStructureSize;
  4488. //
  4489. // Use the current size in the USA
  4490. //
  4491. } else {
  4492. CountBlocks = (USHORT)(MultiSectorHeader->UpdateSequenceArraySize - 1);
  4493. StructureSize = CountBlocks * SEQUENCE_NUMBER_STRIDE;
  4494. //
  4495. // Check for plausibility and otherwise use page size.
  4496. //
  4497. if ((StructureSize != 0x1000) && (StructureSize != 0x2000) && (StructureSize != PAGE_SIZE)) {
  4498. StructureSize = PAGE_SIZE;
  4499. }
  4500. }
  4501. //
  4502. // Otherwise it is an index, so we can get the count out of the Scb.
  4503. //
  4504. } else {
  4505. StructureSize = Scb->ScbType.Index.BytesPerIndexBuffer;
  4506. ASSERT((StructureSize == 0x800) || (StructureSize == 0x1000) || (StructureSize == 0x400));
  4507. ASSERT((Length & (StructureSize - 1)) == 0);
  4508. }
  4509. //
  4510. // We're done with the mapped buffer so release the reserved mapping if we used them
  4511. //
  4512. if (ReservedMapping) {
  4513. MmUnmapReservedMapping( Vcb->ReservedMapping, RESERVE_POOL_TAG, PartialMdl );
  4514. MmPrepareMdlForReuse( PartialMdl );
  4515. ExReleaseFastMutexUnsafe( &Vcb->ReservedMappingMutex );
  4516. ReservedMapping = FALSE;
  4517. MultiSectorHeader = NULL;
  4518. }
  4519. if (StructureSize == 0) {
  4520. ASSERT( Scb == Vcb->LogFileScb );
  4521. DebugTrace( -1, Dbg, ("NtfsVerifyAndRevertUsaBlock: (Virgin Log)\n") );
  4522. return TRUE;
  4523. }
  4524. ASSERTMSG( "ReservedMapping should be large enough for this structure\n", StructureSize < 2 * PAGE_SIZE );
  4525. CountBlocks = (USHORT)(StructureSize / SEQUENCE_NUMBER_STRIDE);
  4526. //
  4527. // Loop through all of the multi-sector blocks in this transfer.
  4528. //
  4529. do {
  4530. //
  4531. // First find our location in the MultiSectorHeader - use reserve mappings
  4532. // if we haven't got a system buffer
  4533. //
  4534. if (!SystemBuffer) {
  4535. ExAcquireFastMutexUnsafe( &Vcb->ReservedMappingMutex );
  4536. ReservedMapping = TRUE;
  4537. IoBuildPartialMdl( Irp->MdlAddress,
  4538. PartialMdl,
  4539. Add2Ptr( MmGetMdlBaseVa( Irp->MdlAddress ), MmGetMdlByteOffset( Irp->MdlAddress ) + Offset + Length - BytesLeft),
  4540. StructureSize );
  4541. MultiSectorHeader = (PMULTI_SECTOR_HEADER) MmMapLockedPagesWithReservedMapping( IrpContext->Vcb->ReservedMapping,
  4542. RESERVE_POOL_TAG,
  4543. PartialMdl,
  4544. MmCached );
  4545. ASSERT( MultiSectorHeader != NULL );
  4546. } else {
  4547. MultiSectorHeader = (PMULTI_SECTOR_HEADER)Add2Ptr( SystemBuffer, Offset + Length - BytesLeft );
  4548. }
  4549. //
  4550. // Uninitialized log file pages always must contain MAXULONG, which is
  4551. // not a valid signature. Do not do the check if we see MAXULONG. Also
  4552. // since we may have read random uninitialized data, we must check every
  4553. // possible field that could cause us to fault or go outside of the block,
  4554. // and also not check in this case.
  4555. //
  4556. //
  4557. // For 0 or MAXULONG we assume the value is "expected", and we do not
  4558. // want to replace with the BaadSignature, just move on.
  4559. //
  4560. if ((*(PULONG)&MultiSectorHeader->Signature == MAXULONG) ||
  4561. (*(PULONG)&MultiSectorHeader->Signature == 0)) {
  4562. NOTHING;
  4563. } else if ((CountBlocks == (USHORT)(MultiSectorHeader->UpdateSequenceArraySize - 1)) &&
  4564. !FlagOn(MultiSectorHeader->UpdateSequenceArrayOffset, 1) &&
  4565. (MultiSectorHeader->UpdateSequenceArrayOffset >= sizeof( MULTI_SECTOR_HEADER )) &&
  4566. (MultiSectorHeader->UpdateSequenceArrayOffset < SEQUENCE_NUMBER_STRIDE) &&
  4567. (StructureSize <= BytesLeft)) {
  4568. ULONG CountToGo;
  4569. CountToGo = CountBlocks;
  4570. //
  4571. // Compute the array offset and recover the current sequence number.
  4572. //
  4573. SequenceNumber = (PUSHORT)Add2Ptr( MultiSectorHeader,
  4574. MultiSectorHeader->UpdateSequenceArrayOffset );
  4575. SequenceArray = SequenceNumber + 1;
  4576. //
  4577. // We now walk through each block, and insure that the last byte in each
  4578. // block matches the sequence number.
  4579. //
  4580. ProtectedUshort = (PUSHORT) (Add2Ptr( MultiSectorHeader,
  4581. SEQUENCE_NUMBER_STRIDE - sizeof( USHORT )));
  4582. //
  4583. // Loop to test for the correct sequence numbers and restore the
  4584. // sequence numbers.
  4585. //
  4586. do {
  4587. //
  4588. // If the sequence number does not check, then raise if the record
  4589. // is not allocated. If we do not raise, i.e. the routine returns,
  4590. // then smash the signature so we can easily tell the record is not
  4591. // allocated.
  4592. //
  4593. if (*ProtectedUshort != *SequenceNumber) {
  4594. //
  4595. // We do nothing except exit if this is the log file and
  4596. // the signature is the chkdsk signature.
  4597. //
  4598. if ((Scb != Vcb->LogFileScb) ||
  4599. (*(PULONG)MultiSectorHeader->Signature != *(PULONG)ChkdskSignature)) {
  4600. //
  4601. // If this is the Mft or an index buffer and all of the data for this file
  4602. // record is contained in the verified range of the
  4603. // record then don't mark it bad.
  4604. //
  4605. if ((Scb == Vcb->MftScb) || (Scb == Vcb->Mft2Scb)) {
  4606. PFILE_RECORD_SEGMENT_HEADER FileRecord;
  4607. FileRecord = (PFILE_RECORD_SEGMENT_HEADER) MultiSectorHeader;
  4608. if (FileRecord->FirstFreeByte < ((CountBlocks - CountToGo) * SEQUENCE_NUMBER_STRIDE)) {
  4609. continue;
  4610. }
  4611. } else if (*(PULONG)MultiSectorHeader->Signature == *(PULONG)IndexSignature ) {
  4612. PINDEX_ALLOCATION_BUFFER IndexBuffer;
  4613. IndexBuffer = (PINDEX_ALLOCATION_BUFFER) MultiSectorHeader;
  4614. if (IndexBuffer->IndexHeader.FirstFreeByte < ((CountBlocks - CountToGo) * SEQUENCE_NUMBER_STRIDE)) {
  4615. continue;
  4616. }
  4617. }
  4618. *(PULONG)MultiSectorHeader->Signature = *(PULONG)BaadSignature;
  4619. Result = FALSE;
  4620. }
  4621. break;
  4622. } else {
  4623. *ProtectedUshort = *SequenceArray++;
  4624. }
  4625. ProtectedUshort += (SEQUENCE_NUMBER_STRIDE / sizeof( USHORT ));
  4626. } while (--CountToGo != 0);
  4627. //
  4628. // If this is the log file, we report an error unless the current
  4629. // signature is the chkdsk signature.
  4630. //
  4631. } else if (Scb == Vcb->LogFileScb) {
  4632. if (*(PULONG)MultiSectorHeader->Signature != *(PULONG)ChkdskSignature) {
  4633. *(PULONG)MultiSectorHeader->Signature = *(PULONG)BaadSignature;
  4634. Result = FALSE;
  4635. }
  4636. } else {
  4637. VCN Vcn;
  4638. LCN Lcn;
  4639. LONGLONG ClusterCount;
  4640. BOOLEAN IsAllocated;
  4641. Vcn = LlClustersFromBytesTruncate( Vcb, FileOffset );
  4642. //
  4643. // Release the reserved buffer before calling
  4644. //
  4645. if (ReservedMapping) {
  4646. MmUnmapReservedMapping( Vcb->ReservedMapping, RESERVE_POOL_TAG, PartialMdl );
  4647. MmPrepareMdlForReuse( PartialMdl );
  4648. ExReleaseFastMutexUnsafe( &Vcb->ReservedMappingMutex );
  4649. MultiSectorHeader = NULL;
  4650. ReservedMapping = FALSE;
  4651. }
  4652. IsAllocated = NtfsLookupAllocation( IrpContext,
  4653. Scb,
  4654. Vcn,
  4655. &Lcn,
  4656. &ClusterCount,
  4657. NULL,
  4658. NULL );
  4659. if (!SystemBuffer) {
  4660. ExAcquireFastMutexUnsafe( &Vcb->ReservedMappingMutex );
  4661. ReservedMapping = TRUE;
  4662. IoBuildPartialMdl( Irp->MdlAddress, PartialMdl, Add2Ptr( MmGetMdlBaseVa( Irp->MdlAddress ), MmGetMdlByteOffset( Irp->MdlAddress ) + Offset + Length - BytesLeft), StructureSize );
  4663. MultiSectorHeader = MmMapLockedPagesWithReservedMapping( IrpContext->Vcb->ReservedMapping,
  4664. RESERVE_POOL_TAG,
  4665. PartialMdl,
  4666. MmCached );
  4667. ASSERT( MultiSectorHeader != NULL );
  4668. }
  4669. if (!IsAllocated &&
  4670. ( ClusterCount >= LlClustersFromBytes( Vcb, StructureSize))) {
  4671. *(PULONG)MultiSectorHeader->Signature = *(PULONG)HoleSignature;
  4672. } else {
  4673. *(PULONG)MultiSectorHeader->Signature = *(PULONG)BaadSignature;
  4674. Result = FALSE;
  4675. }
  4676. }
  4677. //
  4678. // Release the reserve mapping before looping
  4679. //
  4680. if (ReservedMapping) {
  4681. MmUnmapReservedMapping( Vcb->ReservedMapping, RESERVE_POOL_TAG, PartialMdl );
  4682. MmPrepareMdlForReuse( PartialMdl );
  4683. ExReleaseFastMutexUnsafe( &Vcb->ReservedMappingMutex );
  4684. MultiSectorHeader = NULL;
  4685. ReservedMapping = FALSE;
  4686. }
  4687. if (BytesLeft > StructureSize) {
  4688. BytesLeft -= StructureSize;
  4689. } else {
  4690. BytesLeft = 0;
  4691. }
  4692. FileOffset = FileOffset + StructureSize;
  4693. } while (BytesLeft != 0);
  4694. DebugTrace( -1, Dbg, ("NtfsVerifyAndRevertUsaBlock: Exit\n") );
  4695. return Result;
  4696. }
  4697. VOID
  4698. NtfsTransformUsaBlock (
  4699. IN PSCB Scb,
  4700. IN OUT PVOID SystemBuffer,
  4701. IN OUT PVOID Buffer,
  4702. IN ULONG Length
  4703. )
  4704. /*++
  4705. Routine Description:
  4706. This routine will implement Usa protection for all structures of the
  4707. transfer passed described by the caller. It does so by copying the last
  4708. short in each block of each Usa-protected structure to the
  4709. Usa and storing the current sequence number into each of these bytes.
  4710. It also increments the sequence number in the Usa.
  4711. Arguments:
  4712. Buffer - This is the pointer to the start of the structure to transform.
  4713. Length - This is the maximum size for the structure.
  4714. Return Value:
  4715. ULONG - This is the length of the transformed structure.
  4716. --*/
  4717. {
  4718. PMULTI_SECTOR_HEADER MultiSectorHeader;
  4719. PUSHORT SequenceArray;
  4720. PUSHORT SequenceNumber;
  4721. ULONG StructureSize;
  4722. USHORT CountBlocks;
  4723. PUSHORT ProtectedUshort;
  4724. PVCB Vcb = Scb->Vcb;
  4725. ULONG BytesLeft = Length;
  4726. PAGED_CODE();
  4727. DebugTrace( +1, Dbg, ("NtfsTransformUsaBlock: Entered\n") );
  4728. //
  4729. // Cast the buffer pointer to a Multi-Sector-Header and verify that this
  4730. // block has been initialized.
  4731. //
  4732. MultiSectorHeader = (PMULTI_SECTOR_HEADER) Buffer;
  4733. //
  4734. // Get the the number of blocks, based on what type of stream it is.
  4735. // First check for Mft or Log file.
  4736. //
  4737. if (Scb->Header.NodeTypeCode == NTFS_NTC_SCB_MFT) {
  4738. ASSERT((Scb == Vcb->MftScb) || (Scb == Vcb->Mft2Scb));
  4739. StructureSize = Vcb->BytesPerFileRecordSegment;
  4740. } else if (Scb->Header.NodeTypeCode == NTFS_NTC_SCB_DATA) {
  4741. //
  4742. // For the log file, use the value that Lfs has stored in the
  4743. // Lfs WRITE_DATA structure.
  4744. //
  4745. ASSERT( Scb == Vcb->LogFileScb );
  4746. ASSERT( Vcb->LfsWriteData.LfsStructureSize != 0 );
  4747. StructureSize = Vcb->LfsWriteData.LfsStructureSize;
  4748. //
  4749. // Otherwise it is an index, so we can get the count out of the Scb.
  4750. //
  4751. } else {
  4752. StructureSize = Scb->ScbType.Index.BytesPerIndexBuffer;
  4753. ASSERT((StructureSize == 0x800) || (StructureSize == 0x1000) || (StructureSize == 0x400));
  4754. ASSERT((Length & (StructureSize - 1)) == 0);
  4755. }
  4756. CountBlocks = (USHORT)(StructureSize / SEQUENCE_NUMBER_STRIDE);
  4757. //
  4758. // Loop through all of the multi-sector blocks in this transfer.
  4759. //
  4760. do {
  4761. //
  4762. // Any uninitialized structures will begin with BaadSignature or
  4763. // MAXULONG, as guaranteed by the Revert routine above.
  4764. //
  4765. if ((*(PULONG)&MultiSectorHeader->Signature != *(PULONG)BaadSignature) &&
  4766. (*(PULONG)&MultiSectorHeader->Signature != *(PULONG)HoleSignature) &&
  4767. (*(PULONG)&MultiSectorHeader->Signature != MAXULONG) &&
  4768. ((MultiSectorHeader->UpdateSequenceArrayOffset & 1) == 0) &&
  4769. (MultiSectorHeader->UpdateSequenceArrayOffset >= sizeof( MULTI_SECTOR_HEADER )) &&
  4770. (MultiSectorHeader->UpdateSequenceArrayOffset < SEQUENCE_NUMBER_STRIDE)) {
  4771. ULONG CountToGo = CountBlocks;
  4772. //
  4773. // Compute the array offset and recover the current sequence number.
  4774. //
  4775. SequenceNumber = (PUSHORT)Add2Ptr( MultiSectorHeader,
  4776. MultiSectorHeader->UpdateSequenceArrayOffset );
  4777. //
  4778. // Increment sequence number before the write, both in the buffer
  4779. // going out and in the original buffer pointed to by SystemBuffer.
  4780. // Skip numbers with all 0's and all 1's because 0's are produced by
  4781. // by common failure cases and -1 is used by hot fix.
  4782. //
  4783. do {
  4784. *SequenceNumber += 1;
  4785. *(PUSHORT)Add2Ptr( SystemBuffer,
  4786. MultiSectorHeader->UpdateSequenceArrayOffset ) += 1;
  4787. } while ((*SequenceNumber == 0) || (*SequenceNumber == 0xFFFF));
  4788. SequenceArray = SequenceNumber + 1;
  4789. //
  4790. // We now walk through each block to copy each protected short
  4791. // to the sequence array, and replacing it by the incremented
  4792. // sequence number.
  4793. //
  4794. ProtectedUshort = (PUSHORT) (Add2Ptr( MultiSectorHeader,
  4795. SEQUENCE_NUMBER_STRIDE - sizeof( USHORT )));
  4796. //
  4797. // Loop to test for the correct sequence numbers and restore the
  4798. // sequence numbers.
  4799. //
  4800. do {
  4801. *SequenceArray++ = *ProtectedUshort;
  4802. *ProtectedUshort = *SequenceNumber;
  4803. ProtectedUshort += (SEQUENCE_NUMBER_STRIDE / sizeof( USHORT ));
  4804. } while (--CountToGo != 0);
  4805. }
  4806. //
  4807. // Now adjust all pointers and counts before looping back.
  4808. //
  4809. MultiSectorHeader = (PMULTI_SECTOR_HEADER)Add2Ptr( MultiSectorHeader,
  4810. StructureSize );
  4811. SystemBuffer = Add2Ptr( SystemBuffer, StructureSize );
  4812. BytesLeft -= StructureSize;
  4813. } while (BytesLeft != 0);
  4814. DebugTrace( -1, Dbg, ("NtfsTransformUsaBlock: Exit -> %08lx\n", StructureSize) );
  4815. return;
  4816. }
  4817. VOID
  4818. NtfsCreateMdlAndBuffer (
  4819. IN PIRP_CONTEXT IrpContext,
  4820. IN PSCB ThisScb,
  4821. IN UCHAR NeedTwoBuffers,
  4822. IN OUT PULONG Length,
  4823. OUT PMDL *Mdl OPTIONAL,
  4824. OUT PVOID *Buffer
  4825. )
  4826. /*++
  4827. Routine Description:
  4828. This routine will allocate a buffer and create an Mdl which describes
  4829. it. This buffer and Mdl can then be used for an I/O operation, the
  4830. pages will be locked in memory.
  4831. This routine is intended to be used for cases where large I/Os are
  4832. required. It attempts to avoid allocations errors and bugchecks by
  4833. using a reserved buffer scheme. In order for this scheme to work without
  4834. deadlocks, the calling thread must have all resources acquired that it
  4835. will need prior to doing the I/O. I.e., this routine itself may acquire
  4836. a resource which must work as an end resource.
  4837. Examples of callers to this routine are noncached writes to USA streams,
  4838. and noncached reads and writes to compressed streams. One case to be
  4839. aware of is the case where a noncached compressed write needs to fault
  4840. in the rest of a compression unit, in order to write the entire unit.
  4841. In an extreme case the noncached writer will allocated one reserved buffer,
  4842. and the noncached read of the rest of the compression unit may need to
  4843. recursively acquire the resource in this routine and allocate the other
  4844. reserved buffer.
  4845. Arguments:
  4846. ThisScb - Scb for the file where the IO is occurring.
  4847. NeedTwoBuffers - Indicates that this is the request for the a buffer for
  4848. a transaction which may need two buffers. A value of RESERVED_BUFFER_ONE_NEEDED means only 1
  4849. buffer is needed. A value of RESERVED_BUFFER_TWO_NEEDED or RESERVED_BUFFER_WORKSPACE_NEEDED
  4850. indicates that we need two buffers and either ReservedBuffer1 or ReservedBuffer2 should be acquired.
  4851. Length - This is the length needed for this buffer, returns (possibly larger)
  4852. length allocated.
  4853. Mdl - This is the address to store the address of the Mdl created.
  4854. Buffer - This is the address to store the address of the buffer allocated.
  4855. Return Value:
  4856. None.
  4857. --*/
  4858. {
  4859. PVOID TempBuffer;
  4860. PMDL TempMdl;
  4861. PAGED_CODE();
  4862. DebugTrace( +1, Dbg, ("NtfsCreateMdlAndBuffer: Entered\n") );
  4863. ASSERT( (NeedTwoBuffers == RESERVED_BUFFER_WORKSPACE_NEEDED) ?
  4864. (*Length <= WORKSPACE_BUFFER_SIZE) :
  4865. (*Length <= LARGE_BUFFER_SIZE) );
  4866. TempBuffer = NULL;
  4867. TempMdl = NULL;
  4868. //
  4869. // If this thread already owns a buffer then call to get the second.
  4870. //
  4871. // If there have been no allocation failures recently, and
  4872. // we can use at least half of a big buffer, then go for
  4873. // one of our preallocated buffers first.
  4874. //
  4875. if ((NtfsReservedBufferThread == (PVOID) PsGetCurrentThread()) ||
  4876. ((*Length >= LARGE_BUFFER_SIZE / 2) && !NtfsBufferAllocationFailure)) {
  4877. //
  4878. // If we didn't get one then try from pool.
  4879. //
  4880. if (!NtfsGetReservedBuffer( ThisScb->Fcb, &TempBuffer, Length, NeedTwoBuffers )) {
  4881. TempBuffer = NtfsAllocatePoolWithTagNoRaise( NonPagedPoolCacheAligned, *Length, '9ftN' );
  4882. }
  4883. //
  4884. // Otherwise try to allocate from pool and then get a reserved buffer if
  4885. // there have been no allocation errors recently.
  4886. //
  4887. } else {
  4888. TempBuffer = NtfsAllocatePoolWithTagNoRaise( NonPagedPoolCacheAligned, *Length, '9ftN' );
  4889. if ((TempBuffer == NULL) && !NtfsBufferAllocationFailure) {
  4890. NtfsGetReservedBuffer( ThisScb->Fcb, &TempBuffer, Length, NeedTwoBuffers );
  4891. }
  4892. }
  4893. //
  4894. // If we could not allocate a buffer from pool, then
  4895. // we must stake our claim to a reserved buffer.
  4896. //
  4897. // We would like to queue the requests which need a single buffer because
  4898. // they won't be completely blocked by the owner of multiple buffers.
  4899. // But if this thread wants multiple buffers and there is already a
  4900. // thread with multiple buffers then fail this request with FILE_LOCK_CONFLICT
  4901. // in case the current thread is holding some resource needed by the
  4902. // existing owner.
  4903. //
  4904. if (TempBuffer == NULL) {
  4905. ExAcquireResourceExclusiveLite( &NtfsReservedBufferResource, TRUE );
  4906. //
  4907. // Show that we have gotten an allocation failure
  4908. //
  4909. NtfsBufferAllocationFailure = TRUE;
  4910. //
  4911. // Loop here until we get a buffer or abort the current request.
  4912. //
  4913. while (TRUE) {
  4914. KeDelayExecutionThread( KernelMode, FALSE, &NtfsShortDelay );
  4915. if (NtfsGetReservedBuffer( ThisScb->Fcb, &TempBuffer, Length, NeedTwoBuffers )) {
  4916. if (ExGetExclusiveWaiterCount( &NtfsReservedBufferResource ) == 0) {
  4917. NtfsBufferAllocationFailure = FALSE;
  4918. }
  4919. ExReleaseResourceLite( &NtfsReservedBufferResource );
  4920. break;
  4921. }
  4922. //
  4923. // We will perform some deadlock detection here and raise
  4924. // STATUS_FILE_LOCK conflict in order to retry this request if
  4925. // anyone is queued behind the resource. Deadlocks can occur
  4926. // under the following circumstances when another thread is
  4927. // blocked behind this resource:
  4928. //
  4929. // - Current thread needs two buffers. We can't block the
  4930. // Needs1 guy which may need to complete before the
  4931. // current Needs2 guy can proceed. Exception is case
  4932. // where current thread already has a buffer and we
  4933. // have a recursive 2 buffer case. In this case we
  4934. // are only waiting for the third buffer to become
  4935. // available.
  4936. //
  4937. // - Current thread is the lazy writer. Lazy writer will
  4938. // need buffer for USA transform. He also can own
  4939. // the BCB resource that might be needed by the current
  4940. // owner of a buffer.
  4941. //
  4942. // - Current thread is operating on the same Fcb as the owner
  4943. // of any of the buffers.
  4944. //
  4945. //
  4946. // If the current thread already owns one of the two buffers then
  4947. // always allow him to loop. Otherwise perform deadlock detection
  4948. // if we need 2 buffers or this this is the lazy writer or we
  4949. // are trying to get the same Fcb already owned by the 2 buffer guy.
  4950. //
  4951. if ((PsGetCurrentThread() != NtfsReservedBufferThread) &&
  4952. (NeedTwoBuffers ||
  4953. #ifdef COMPRESS_ON_WIRE
  4954. (ThisScb->LazyWriteThread[0] == PsGetCurrentThread()) ||
  4955. (ThisScb->LazyWriteThread[1] == PsGetCurrentThread()) ||
  4956. #else
  4957. (NtfsGetTopLevelContext()->SavedTopLevelIrp == (PIRP)FSRTL_CACHE_TOP_LEVEL_IRP) ||
  4958. #endif
  4959. (ThisScb->Fcb == NtfsReserved12Fcb))) {
  4960. //
  4961. // If no one is waiting then see if we can continue waiting.
  4962. //
  4963. if (ExGetExclusiveWaiterCount( &NtfsReservedBufferResource ) == 0) {
  4964. //
  4965. // If there is no one waiting behind us and there is no current
  4966. // multi-buffer owner, then try again here.
  4967. //
  4968. if (NtfsReservedBufferThread == NULL) {
  4969. continue;
  4970. }
  4971. NtfsBufferAllocationFailure = FALSE;
  4972. }
  4973. ExReleaseResourceLite( &NtfsReservedBufferResource );
  4974. NtfsRaiseStatus( IrpContext, STATUS_FILE_LOCK_CONFLICT, NULL, NULL );
  4975. }
  4976. }
  4977. }
  4978. //
  4979. // Use a try-finally to facilitate cleanup.
  4980. //
  4981. try {
  4982. if (ARGUMENT_PRESENT(Mdl)) {
  4983. //
  4984. // Allocate an Mdl for this buffer.
  4985. //
  4986. TempMdl = IoAllocateMdl( TempBuffer,
  4987. *Length,
  4988. FALSE,
  4989. FALSE,
  4990. NULL );
  4991. if (TempMdl == NULL) {
  4992. NtfsRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES, NULL, NULL );
  4993. }
  4994. //
  4995. // Lock the new Mdl in memory.
  4996. //
  4997. MmBuildMdlForNonPagedPool( TempMdl );
  4998. *Mdl = TempMdl;
  4999. }
  5000. } finally {
  5001. DebugUnwind( NtfsCreateMdlAndBuffer );
  5002. //
  5003. // If abnormal termination, back out anything we've done.
  5004. //
  5005. if (AbnormalTermination()) {
  5006. NtfsDeleteMdlAndBuffer( TempMdl, TempBuffer );
  5007. //
  5008. // Otherwise, give the Mdl and buffer to the caller.
  5009. //
  5010. } else {
  5011. *Buffer = TempBuffer;
  5012. }
  5013. DebugTrace( -1, Dbg, ("NtfsCreateMdlAndBuffer: Exit\n") );
  5014. }
  5015. return;
  5016. }
  5017. VOID
  5018. NtfsDeleteMdlAndBuffer (
  5019. IN PMDL Mdl OPTIONAL,
  5020. IN PVOID Buffer OPTIONAL
  5021. )
  5022. /*++
  5023. Routine Description:
  5024. This routine will allocate a buffer and create an Mdl which describes
  5025. it. This buffer and Mdl can then be used for an I/O operation, the
  5026. pages will be locked in memory.
  5027. Arguments:
  5028. Mdl - Address of Mdl to free
  5029. Buffer - This is the address to store the address of the buffer allocated.
  5030. Return Value:
  5031. None.
  5032. --*/
  5033. {
  5034. //
  5035. // Free Mdl if there is one
  5036. //
  5037. if (Mdl != NULL) {
  5038. IoFreeMdl( Mdl );
  5039. }
  5040. //
  5041. // Free reserved buffer or pool
  5042. //
  5043. if (Buffer != NULL) {
  5044. if (!NtfsFreeReservedBuffer( Buffer )) {
  5045. NtfsFreePool( Buffer );
  5046. }
  5047. }
  5048. }
  5049. PMDL
  5050. NtfsBuildZeroMdl (
  5051. IN PIRP_CONTEXT IrpContext,
  5052. IN ULONG Length,
  5053. OUT PVOID *Buffer
  5054. )
  5055. /*++
  5056. Routine Description:
  5057. Create an efficient mdl that describe a given length of zeros. We'll only
  5058. allocate a one page buffer and make a mdl that maps all the pages back to the single
  5059. physical page. We'll default to a smaller size buffer down to 1 PAGE if memory
  5060. is tight. The caller should check the Mdl->ByteCount to see the true size
  5061. Arguments:
  5062. Length - The desired length of the zero buffer. We may return less than this
  5063. Buffer - This returns the nonpaged pool buffer we've allocated - the caller
  5064. should free it after he frees the returned MDL.
  5065. Return Value:
  5066. a MDL if successfull / NULL if not
  5067. --*/
  5068. {
  5069. PMDL ZeroMdl;
  5070. ULONG SavedByteCount;
  5071. PPFN_NUMBER Page;
  5072. ULONG i;
  5073. *Buffer = (PCHAR) NtfsAllocatePoolNoRaise( NonPagedPool, PAGE_SIZE );
  5074. if (!*Buffer) {
  5075. return NULL;
  5076. }
  5077. RtlZeroMemory( *Buffer, PAGE_SIZE );
  5078. while (TRUE) {
  5079. //
  5080. // Spin down trying to get an MDL which can describe our operation.
  5081. //
  5082. while (TRUE) {
  5083. ZeroMdl = IoAllocateMdl( *Buffer, Length, FALSE, FALSE, NULL );
  5084. //
  5085. // Throttle ourselves to what we've physically allocated. Note that
  5086. // we could have started with an odd multiple of this number. If we
  5087. // tried for exactly that size and failed, we're toast.
  5088. //
  5089. if (ZeroMdl || (Length <= PAGE_SIZE)) {
  5090. break;
  5091. }
  5092. //
  5093. // Fallback by half and round down to a sector multiple.
  5094. //
  5095. Length = BlockAlignTruncate( Length / 2, (LONG)IrpContext->Vcb->BytesPerSector );
  5096. if (Length < PAGE_SIZE) {
  5097. Length = PAGE_SIZE;
  5098. }
  5099. }
  5100. if (ZeroMdl == NULL) {
  5101. NtfsFreePool( *Buffer );
  5102. *Buffer = NULL;
  5103. return NULL;
  5104. }
  5105. //
  5106. // If we have throttled all the way down, stop and just build a
  5107. // simple MDL describing our previous allocation.
  5108. //
  5109. if (Length == PAGE_SIZE) {
  5110. MmBuildMdlForNonPagedPool( ZeroMdl );
  5111. break;
  5112. }
  5113. //
  5114. // Now we will temporarily lock the allocated pages
  5115. // only, and then replicate the page frame numbers through
  5116. // the entire Mdl to keep writing the same pages of zeros.
  5117. //
  5118. // It would be nice if Mm exported a way for us to not have
  5119. // to pull the Mdl apart and rebuild it ourselves, but this
  5120. // is so bizzare a purpose as to be tolerable.
  5121. //
  5122. SavedByteCount = ZeroMdl->ByteCount;
  5123. ZeroMdl->ByteCount = PAGE_SIZE;
  5124. MmBuildMdlForNonPagedPool( ZeroMdl );
  5125. ZeroMdl->MdlFlags &= ~MDL_SOURCE_IS_NONPAGED_POOL;
  5126. ZeroMdl->MdlFlags |= MDL_PAGES_LOCKED;
  5127. ZeroMdl->MappedSystemVa = NULL;
  5128. ZeroMdl->ByteCount = SavedByteCount;
  5129. Page = MmGetMdlPfnArray( ZeroMdl );
  5130. for (i = 0; i < (ADDRESS_AND_SIZE_TO_SPAN_PAGES( 0, SavedByteCount )); i++) {
  5131. *(Page + i) = *(Page);
  5132. }
  5133. break;
  5134. }
  5135. return ZeroMdl;
  5136. }
  5137. VOID
  5138. NtfsWriteClusters (
  5139. IN PIRP_CONTEXT IrpContext,
  5140. IN PVCB Vcb,
  5141. IN PSCB Scb,
  5142. IN VBO StartingVbo,
  5143. IN PVOID Buffer,
  5144. IN ULONG ClusterCount
  5145. )
  5146. /*++
  5147. Routine Description:
  5148. This routine is called to write clusters directly to a file. It is
  5149. needed when converting a resident attribute to non-resident when
  5150. we can't initialize through the cache manager. This happens when
  5151. we receive a SetEndOfFile from MM when creating a section for
  5152. a resident file.
  5153. Arguments:
  5154. Vcb - Vcb for this device.
  5155. StartingVbo - This is the starting offset to write to.
  5156. Buffer - Buffer containing the data to write.
  5157. ClusterCount - This is the number of clusters to write.
  5158. Return Value:
  5159. None. This routine will raise if the operation is unsuccessful.
  5160. --*/
  5161. {
  5162. PIRP NewIrp = NULL;
  5163. UCHAR MajorFunction = IrpContext->MajorFunction;
  5164. BOOLEAN LockedUserBuffer = FALSE;
  5165. PNTFS_IO_CONTEXT PreviousContext;
  5166. ULONG State;
  5167. ULONG ByteCount = BytesFromClusters( Vcb, ClusterCount );
  5168. ULONG OriginalByteCount = ByteCount;
  5169. PMDL Mdl = NULL;
  5170. NTFS_IO_CONTEXT LocalContext;
  5171. BOOLEAN ZeroBuffer = FALSE;
  5172. PAGED_CODE();
  5173. DebugTrace( +1, Dbg, ("NtfsWriteClusters: Entered\n") );
  5174. DebugTrace( 0, Dbg, ("StartingVbo -> %016I64x\n", StartingVbo) );
  5175. DebugTrace( 0, Dbg, ("Buffer -> %08lx\n", Buffer) );
  5176. DebugTrace( 0, Dbg, ("ClusterCount -> %08lx\n", ClusterCount) );
  5177. //
  5178. // Force this operation to be synchronous.
  5179. //
  5180. SetFlag( IrpContext->State, IRP_CONTEXT_STATE_WAIT );
  5181. //
  5182. // Swap out the old Io context block.
  5183. //
  5184. PreviousContext = IrpContext->Union.NtfsIoContext;
  5185. IrpContext->Union.NtfsIoContext = &LocalContext;
  5186. State = IrpContext->State;
  5187. ClearFlag( IrpContext->State, IRP_CONTEXT_STATE_ALLOC_IO_CONTEXT );
  5188. //
  5189. // Use a try-finally so we can clean up properly.
  5190. //
  5191. try {
  5192. PIO_STACK_LOCATION IrpSp;
  5193. RtlZeroMemory( IrpContext->Union.NtfsIoContext, sizeof( NTFS_IO_CONTEXT ));
  5194. KeInitializeEvent( &IrpContext->Union.NtfsIoContext->Wait.SyncEvent,
  5195. NotificationEvent,
  5196. FALSE );
  5197. NewIrp = IoBuildAsynchronousFsdRequest( IRP_MJ_WRITE,
  5198. Vcb->Vpb->DeviceObject,
  5199. Buffer,
  5200. ByteCount,
  5201. (PLARGE_INTEGER)&StartingVbo,
  5202. NULL );
  5203. if (NewIrp == NULL) {
  5204. NtfsRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES, NULL, NULL );
  5205. }
  5206. //
  5207. // We now have an Irp, we want to make it look as though it is part of
  5208. // the current call. We need to adjust the Irp stack to update this.
  5209. //
  5210. IoSetNextIrpStackLocation( NewIrp );
  5211. //
  5212. // Check if we're writing zeros
  5213. //
  5214. if (Buffer == NULL) {
  5215. //
  5216. // This won't work for compression or encryption because they manipulate
  5217. // the input buffer
  5218. //
  5219. ASSERT( !FlagOn( Scb->AttributeFlags, ATTRIBUTE_FLAG_COMPRESSION_MASK | ATTRIBUTE_FLAG_ENCRYPTED ) );
  5220. Mdl = NtfsBuildZeroMdl( IrpContext, ByteCount, &Buffer );
  5221. if (!Mdl) {
  5222. NtfsRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES, NULL, NULL );
  5223. }
  5224. ZeroBuffer = TRUE;
  5225. }
  5226. //
  5227. // Loop and do the write in chunks
  5228. //
  5229. while (ByteCount != 0) {
  5230. ULONG Size = ByteCount;
  5231. if (!ZeroBuffer) {
  5232. //
  5233. // Attempt to allocate a mdl - reducing the size if we fail until
  5234. // we're at a page size
  5235. //
  5236. do {
  5237. Mdl = IoAllocateMdl( Add2Ptr( Buffer, OriginalByteCount - ByteCount ), Size, FALSE, FALSE, NULL );
  5238. if (Mdl == NULL) {
  5239. Size = BlockAlignTruncate( Size / 2, (LONG)Vcb->BytesPerSector );
  5240. }
  5241. } while ((Mdl == NULL) && (Size >= PAGE_SIZE));
  5242. if (!Mdl) {
  5243. NtfsRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES, NULL, NULL );
  5244. }
  5245. //
  5246. // Now probe the buffer described by the Irp. If we get an exception,
  5247. // deallocate the Mdl and return the appropriate "expected" status.
  5248. //
  5249. try {
  5250. MmProbeAndLockPages( Mdl, NewIrp->RequestorMode, IoReadAccess );
  5251. } except(EXCEPTION_EXECUTE_HANDLER) {
  5252. NTSTATUS Status;
  5253. Status = GetExceptionCode();
  5254. IoFreeMdl( Mdl );
  5255. NtfsRaiseStatus( IrpContext,
  5256. FsRtlIsNtstatusExpected(Status) ? Status : STATUS_INVALID_USER_BUFFER,
  5257. NULL,
  5258. NULL );
  5259. }
  5260. LockedUserBuffer = TRUE;
  5261. } else {
  5262. Size = min( ByteCount, Mdl->ByteCount );
  5263. }
  5264. //
  5265. // Put our buffer/Mdl into the Irp and update the offset and length
  5266. //
  5267. if (!ZeroBuffer) {
  5268. NewIrp->UserBuffer = Add2Ptr( Buffer, OriginalByteCount - ByteCount );
  5269. }
  5270. NewIrp->MdlAddress = Mdl;
  5271. IrpSp = IoGetCurrentIrpStackLocation( NewIrp );
  5272. IrpSp->DeviceObject = Vcb->Vpb->DeviceObject;
  5273. IrpSp->Parameters.Write.Length = Size;
  5274. IrpSp->Parameters.Write.ByteOffset.QuadPart = StartingVbo;
  5275. //
  5276. // Put the write code into the IrpContext.
  5277. //
  5278. IrpContext->MajorFunction = IRP_MJ_WRITE;
  5279. //
  5280. // Write the data to the disk.
  5281. //
  5282. NtfsNonCachedIo( IrpContext,
  5283. NewIrp,
  5284. Scb,
  5285. StartingVbo,
  5286. Size,
  5287. 0 );
  5288. //
  5289. // If we encountered an error or didn't write all the bytes, then
  5290. // raise the error code. We use the IoStatus in the Irp instead of
  5291. // our structure since this Irp will not be completed.
  5292. //
  5293. if (!NT_SUCCESS( NewIrp->IoStatus.Status )) {
  5294. DebugTrace( 0, Dbg, ("Couldn't write clusters to disk -> %08lx\n", NewIrp->IoStatus.Status) );
  5295. NtfsRaiseStatus( IrpContext, NewIrp->IoStatus.Status, NULL, NULL );
  5296. } else if (NewIrp->IoStatus.Information != Size) {
  5297. DebugTrace( 0, Dbg, ("Couldn't write all byes to disk\n") );
  5298. NtfsRaiseStatus( IrpContext, STATUS_UNEXPECTED_IO_ERROR, NULL, NULL );
  5299. }
  5300. //
  5301. // Cleanup the MDL
  5302. //
  5303. if (LockedUserBuffer) {
  5304. MmUnlockPages( NewIrp->MdlAddress );
  5305. LockedUserBuffer = FALSE;
  5306. IoFreeMdl( NewIrp->MdlAddress );
  5307. }
  5308. NewIrp->MdlAddress = NULL;
  5309. //
  5310. // Adjust offset and length
  5311. //
  5312. ByteCount -= Size;
  5313. StartingVbo += Size;
  5314. }
  5315. } finally {
  5316. DebugUnwind( NtfsWriteClusters );
  5317. //
  5318. // Recover the Io Context and remember if it is from pool.
  5319. //
  5320. IrpContext->Union.NtfsIoContext = PreviousContext;
  5321. SetFlag( IrpContext->State, FlagOn( State, IRP_CONTEXT_STATE_ALLOC_IO_CONTEXT ) );
  5322. IrpContext->MajorFunction = MajorFunction;
  5323. //
  5324. // If we allocated an Irp, we need to deallocate it. We also
  5325. // have to return the correct function code to the Irp Context.
  5326. //
  5327. if (NewIrp != NULL) {
  5328. //
  5329. // If there is an Mdl we free that first.
  5330. //
  5331. if (NewIrp->MdlAddress != NULL) {
  5332. if (LockedUserBuffer) {
  5333. MmUnlockPages( NewIrp->MdlAddress );
  5334. }
  5335. IoFreeMdl( NewIrp->MdlAddress );
  5336. }
  5337. IoFreeIrp( NewIrp );
  5338. }
  5339. if (ZeroBuffer && Buffer) {
  5340. NtfsFreePool( Buffer );
  5341. }
  5342. DebugTrace( -1, Dbg, ("NtfsWriteClusters: Exit\n") );
  5343. }
  5344. return;
  5345. }
  5346. //
  5347. // Local support routine
  5348. //
  5349. VOID
  5350. NtfsMultipleAsync (
  5351. IN PIRP_CONTEXT IrpContext,
  5352. IN PDEVICE_OBJECT DeviceObject,
  5353. IN PIRP MasterIrp,
  5354. IN ULONG MultipleIrpCount,
  5355. IN PIO_RUN IoRuns,
  5356. IN UCHAR IrpSpFlags
  5357. )
  5358. /*++
  5359. Routine Description:
  5360. This routine first does the initial setup required of a Master IRP that is
  5361. going to be completed using associated IRPs. This routine should not
  5362. be used if only one async request is needed, instead the single read/write
  5363. async routines should be called.
  5364. A context parameter is initialized, to serve as a communications area
  5365. between here and the common completion routine. This initialization
  5366. includes allocation of a spinlock. The spinlock is deallocated in the
  5367. NtfsWaitSync routine, so it is essential that the caller insure that
  5368. this routine is always called under all circumstances following a call
  5369. to this routine.
  5370. Next this routine reads or writes one or more contiguous sectors from
  5371. a device asynchronously, and is used if there are multiple reads for a
  5372. master IRP. A completion routine is used to synchronize with the
  5373. completion of all of the I/O requests started by calls to this routine.
  5374. Also, prior to calling this routine the caller must initialize the
  5375. IoStatus field in the Context, with the correct success status and byte
  5376. count which are expected if all of the parallel transfers complete
  5377. successfully. After return this status will be unchanged if all requests
  5378. were, in fact, successful. However, if one or more errors occur, the
  5379. IoStatus will be modified to reflect the error status and byte count
  5380. from the first run (by Vbo) which encountered an error. I/O status
  5381. from all subsequent runs will not be indicated.
  5382. Arguments:
  5383. IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
  5384. DeviceObject - Supplies the device to be read
  5385. MasterIrp - Supplies the master Irp.
  5386. MulitpleIrpCount - Supplies the number of multiple async requests
  5387. that will be issued against the master irp.
  5388. IoRuns - Supplies an array containing the Vbo, Lbo, BufferOffset, and
  5389. ByteCount for all the runs to executed in parallel.
  5390. IrpSpFlags - Flags to set in the irp stack location for the i/o - i.e write through
  5391. Return Value:
  5392. None.
  5393. --*/
  5394. {
  5395. PIRP Irp;
  5396. PIO_STACK_LOCATION IrpSp;
  5397. PMDL Mdl;
  5398. PNTFS_IO_CONTEXT Context;
  5399. ULONG TotalByteCount = 0;
  5400. ULONG UnwindRunCount = 0;
  5401. BOOLEAN Wait;
  5402. PAGED_CODE();
  5403. DebugTrace( +1, Dbg, ("NtfsMultipleAsync\n") );
  5404. DebugTrace( 0, Dbg, ("MajorFunction = %08lx\n", IrpContext->MajorFunction) );
  5405. DebugTrace( 0, Dbg, ("DeviceObject = %08lx\n", DeviceObject) );
  5406. DebugTrace( 0, Dbg, ("MasterIrp = %08lx\n", MasterIrp) );
  5407. DebugTrace( 0, Dbg, ("MultipleIrpCount = %08lx\n", MultipleIrpCount) );
  5408. DebugTrace( 0, Dbg, ("IoRuns = %08lx\n", IoRuns) );
  5409. //
  5410. // Set up things according to whether this is truely async.
  5411. //
  5412. Wait = (BOOLEAN) FlagOn( IrpContext->State, IRP_CONTEXT_STATE_WAIT );
  5413. Context = IrpContext->Union.NtfsIoContext;
  5414. try {
  5415. //
  5416. // Initialize Context, for use in Read/Write Multiple Asynch.
  5417. //
  5418. Context->MasterIrp = MasterIrp;
  5419. //
  5420. // Iterate through the runs, doing everything that can fail
  5421. //
  5422. for ( UnwindRunCount = 0;
  5423. UnwindRunCount < MultipleIrpCount;
  5424. UnwindRunCount++ ) {
  5425. //
  5426. // Create an associated IRP, making sure there is one stack entry for
  5427. // us, as well.
  5428. //
  5429. IoRuns[UnwindRunCount].SavedIrp = NULL;
  5430. Irp = IoMakeAssociatedIrp( MasterIrp, (CCHAR)(DeviceObject->StackSize + 1) );
  5431. if (Irp == NULL) {
  5432. NtfsRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES, NULL, NULL );
  5433. }
  5434. IoRuns[UnwindRunCount].SavedIrp = Irp;
  5435. //
  5436. // Allocate and build a partial Mdl for the request.
  5437. //
  5438. Mdl = IoAllocateMdl( (PCHAR)MasterIrp->UserBuffer +
  5439. IoRuns[UnwindRunCount].BufferOffset,
  5440. IoRuns[UnwindRunCount].ByteCount,
  5441. FALSE,
  5442. FALSE,
  5443. Irp );
  5444. if (Mdl == NULL) {
  5445. NtfsRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES, NULL, NULL );
  5446. }
  5447. //
  5448. // Sanity Check
  5449. //
  5450. ASSERT( Mdl == Irp->MdlAddress );
  5451. IoBuildPartialMdl( MasterIrp->MdlAddress,
  5452. Mdl,
  5453. (PCHAR)MasterIrp->UserBuffer +
  5454. IoRuns[UnwindRunCount].BufferOffset,
  5455. IoRuns[UnwindRunCount].ByteCount );
  5456. //
  5457. // Get the first IRP stack location in the associated Irp
  5458. //
  5459. IoSetNextIrpStackLocation( Irp );
  5460. IrpSp = IoGetCurrentIrpStackLocation( Irp );
  5461. //
  5462. // Setup the Stack location to describe our read.
  5463. //
  5464. IrpSp->MajorFunction = IrpContext->MajorFunction;
  5465. IrpSp->Parameters.Read.Length = IoRuns[UnwindRunCount].ByteCount;
  5466. IrpSp->Parameters.Read.ByteOffset.QuadPart = IoRuns[UnwindRunCount].StartingVbo;
  5467. //
  5468. // If this Irp is the result of a WriteThough operation,
  5469. // tell the device to write it through.
  5470. //
  5471. if (FlagOn(IrpContext->State, IRP_CONTEXT_STATE_WRITE_THROUGH)) {
  5472. SetFlag( IrpSp->Flags, SL_WRITE_THROUGH );
  5473. }
  5474. //
  5475. // Set up the completion routine address in our stack frame.
  5476. //
  5477. IoSetCompletionRoutine( Irp,
  5478. (Wait
  5479. ? &NtfsMultiSyncCompletionRoutine
  5480. : &NtfsMultiAsyncCompletionRoutine),
  5481. Context,
  5482. TRUE,
  5483. TRUE,
  5484. TRUE );
  5485. //
  5486. // Setup the next IRP stack location in the associated Irp for the disk
  5487. // driver beneath us.
  5488. //
  5489. IrpSp = IoGetNextIrpStackLocation( Irp );
  5490. //
  5491. // Setup the Stack location to do a read from the disk driver.
  5492. //
  5493. IrpSp->MajorFunction = IrpContext->MajorFunction;
  5494. IrpSp->Flags = IrpSpFlags;
  5495. IrpSp->Parameters.Read.Length = IoRuns[UnwindRunCount].ByteCount;
  5496. IrpSp->Parameters.Read.ByteOffset.QuadPart = IoRuns[UnwindRunCount].StartingLbo;
  5497. TotalByteCount += IoRuns[UnwindRunCount].ByteCount;
  5498. }
  5499. //
  5500. // We only need to set the associated IRP count in the master irp to
  5501. // make it a master IRP. But we set the count to one more than our
  5502. // caller requested, because we do not want the I/O system to complete
  5503. // the I/O. We also set our own count.
  5504. //
  5505. Context->IrpCount = MultipleIrpCount;
  5506. MasterIrp->AssociatedIrp.IrpCount = MultipleIrpCount;
  5507. IrpSp = IoGetCurrentIrpStackLocation( MasterIrp );
  5508. IrpSp->Parameters.Read.Length = TotalByteCount;
  5509. if (Wait) {
  5510. MasterIrp->AssociatedIrp.IrpCount += 1;
  5511. } else {
  5512. //
  5513. // Convert the resource ownership to async before we do the i/o if
  5514. // we haven't already
  5515. //
  5516. if (IrpContext->Union.NtfsIoContext->Wait.Async.Resource &&
  5517. !FlagOn( IrpContext->Union.NtfsIoContext->Wait.Async.ResourceThreadId, 3 )) {
  5518. ASSERT( NtfsIsSharedResource( IrpContext->Union.NtfsIoContext->Wait.Async.Resource ) == 1 );
  5519. IrpContext->Union.NtfsIoContext->Wait.Async.ResourceThreadId = (ERESOURCE_THREAD)MasterIrp | 3;
  5520. ExSetResourceOwnerPointer( IrpContext->Union.NtfsIoContext->Wait.Async.Resource, (PVOID)IrpContext->Union.NtfsIoContext->Wait.Async.ResourceThreadId );
  5521. }
  5522. }
  5523. //
  5524. // Now that all the dangerous work is done, issue the Io requests
  5525. //
  5526. for (UnwindRunCount = 0;
  5527. UnwindRunCount < MultipleIrpCount;
  5528. UnwindRunCount++) {
  5529. Irp = IoRuns[UnwindRunCount].SavedIrp;
  5530. //
  5531. // If IoCallDriver returns an error, it has completed the Irp
  5532. // and the error will be caught by our completion routines
  5533. // and dealt with as a normal IO error.
  5534. //
  5535. (VOID)IoCallDriver( DeviceObject, Irp );
  5536. }
  5537. } finally {
  5538. ULONG i;
  5539. DebugUnwind( NtfsMultipleAsync );
  5540. //
  5541. // Only allocating the spinlock, making the associated Irps
  5542. // and allocating the Mdls can fail.
  5543. //
  5544. if (AbnormalTermination()) {
  5545. //
  5546. // Unwind
  5547. //
  5548. for (i = 0; i <= UnwindRunCount; i++) {
  5549. if ((Irp = IoRuns[i].SavedIrp) != NULL) {
  5550. if (Irp->MdlAddress != NULL) {
  5551. IoFreeMdl( Irp->MdlAddress );
  5552. }
  5553. IoFreeIrp( Irp );
  5554. }
  5555. }
  5556. }
  5557. //
  5558. // And return to our caller
  5559. //
  5560. DebugTrace( -1, Dbg, ("NtfsMultipleAsync -> VOID\n") );
  5561. }
  5562. return;
  5563. }
  5564. //
  5565. // Local support routine
  5566. //
  5567. VOID
  5568. NtfsSingleAsync (
  5569. IN PIRP_CONTEXT IrpContext,
  5570. IN PDEVICE_OBJECT DeviceObject,
  5571. IN LBO Lbo,
  5572. IN ULONG ByteCount,
  5573. IN PIRP Irp,
  5574. IN UCHAR MajorFunction,
  5575. IN UCHAR IrpSpFlags
  5576. )
  5577. /*++
  5578. Routine Description:
  5579. This routine reads or writes one or more contiguous sectors from a device
  5580. asynchronously, and is used if there is only one read necessary to
  5581. complete the IRP. It implements the read by simply filling
  5582. in the next stack frame in the Irp, and passing it on. The transfer
  5583. occurs to the single buffer originally specified in the user request.
  5584. Arguments:
  5585. IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
  5586. DeviceObject - Supplies the device to read
  5587. Lbo - Supplies the starting Logical Byte Offset to begin reading from
  5588. ByteCount - Supplies the number of bytes to read from the device
  5589. Irp - Supplies the master Irp to associated with the async
  5590. request.
  5591. MajorFunction - IRP_MJ_READ || IRP_MJ_WRITE
  5592. IrpSpFlags - flags to set in the irp stack location for the i/o like write through
  5593. Return Value:
  5594. None.
  5595. --*/
  5596. {
  5597. PIO_STACK_LOCATION IrpSp;
  5598. PAGED_CODE();
  5599. DebugTrace( +1, Dbg, ("NtfsSingleAsync\n") );
  5600. DebugTrace( 0, Dbg, ("MajorFunction = %08lx\n", IrpContext->MajorFunction) );
  5601. DebugTrace( 0, Dbg, ("DeviceObject = %08lx\n", DeviceObject) );
  5602. DebugTrace( 0, Dbg, ("Lbo = %016I64x\n", Lbo) );
  5603. DebugTrace( 0, Dbg, ("ByteCount = %08lx\n", ByteCount) );
  5604. DebugTrace( 0, Dbg, ("Irp = %08lx\n", Irp) );
  5605. //
  5606. // Set up the completion routine address in our stack frame.
  5607. //
  5608. IoSetCompletionRoutine( Irp,
  5609. (FlagOn( IrpContext->State, IRP_CONTEXT_STATE_WAIT )
  5610. ? &NtfsSingleSyncCompletionRoutine
  5611. : &NtfsSingleAsyncCompletionRoutine),
  5612. IrpContext->Union.NtfsIoContext,
  5613. TRUE,
  5614. TRUE,
  5615. TRUE );
  5616. //
  5617. // Setup the next IRP stack location in the associated Irp for the disk
  5618. // driver beneath us.
  5619. //
  5620. IrpSp = IoGetNextIrpStackLocation( Irp );
  5621. //
  5622. // Setup the Stack location to do a read from the disk driver.
  5623. //
  5624. IrpSp->MajorFunction = MajorFunction;
  5625. IrpSp->Parameters.Read.Length = ByteCount;
  5626. IrpSp->Parameters.Read.ByteOffset.QuadPart = Lbo;
  5627. IrpSp->Flags = IrpSpFlags;
  5628. //
  5629. // If this Irp is the result of a WriteThough operation,
  5630. // tell the device to write it through.
  5631. //
  5632. if (FlagOn(IrpContext->State, IRP_CONTEXT_STATE_WRITE_THROUGH)) {
  5633. SetFlag( IrpSp->Flags, SL_WRITE_THROUGH );
  5634. }
  5635. //
  5636. // Convert the resource ownership to async before we do the i/o if
  5637. // we haven't already
  5638. //
  5639. if (!FlagOn( IrpContext->State, IRP_CONTEXT_STATE_WAIT ) &&
  5640. IrpContext->Union.NtfsIoContext->Wait.Async.Resource &&
  5641. !FlagOn( IrpContext->Union.NtfsIoContext->Wait.Async.ResourceThreadId, 3 )) {
  5642. ASSERT( NtfsIsSharedResource( IrpContext->Union.NtfsIoContext->Wait.Async.Resource ) == 1 );
  5643. IrpContext->Union.NtfsIoContext->Wait.Async.ResourceThreadId = (ERESOURCE_THREAD)Irp | 3;
  5644. ExSetResourceOwnerPointer( IrpContext->Union.NtfsIoContext->Wait.Async.Resource, (PVOID)IrpContext->Union.NtfsIoContext->Wait.Async.ResourceThreadId );
  5645. }
  5646. //
  5647. // Issue the Io request
  5648. //
  5649. //
  5650. // If IoCallDriver returns an error, it has completed the Irp
  5651. // and the error will be caught by our completion routines
  5652. // and dealt with as a normal IO error.
  5653. //
  5654. (VOID)IoCallDriver( DeviceObject, Irp );
  5655. //
  5656. // And return to our caller
  5657. //
  5658. DebugTrace( -1, Dbg, ("NtfsSingleAsync -> VOID\n") );
  5659. return;
  5660. }
  5661. //
  5662. // Local support routine
  5663. //
  5664. VOID
  5665. NtfsWaitSync (
  5666. IN PIRP_CONTEXT IrpContext
  5667. )
  5668. /*++
  5669. Routine Description:
  5670. This routine waits for one or more previously started I/O requests
  5671. from the above routines, by simply waiting on the event.
  5672. Arguments:
  5673. Context - Pointer to Context used in previous call(s) to be waited on.
  5674. Return Value:
  5675. None
  5676. --*/
  5677. {
  5678. PAGED_CODE();
  5679. DebugTrace( +1, Dbg, ("NtfsWaitSync: Entered\n") );
  5680. KeWaitForSingleObject( &IrpContext->Union.NtfsIoContext->Wait.SyncEvent,
  5681. Executive,
  5682. KernelMode,
  5683. FALSE,
  5684. NULL );
  5685. KeClearEvent( &IrpContext->Union.NtfsIoContext->Wait.SyncEvent );
  5686. DebugTrace( -1, Dbg, ("NtfsWaitSync -> VOID\n") );
  5687. }
  5688. //
  5689. // Local support routine.
  5690. //
  5691. NTSTATUS
  5692. NtfsMultiAsyncCompletionRoutine (
  5693. IN PDEVICE_OBJECT DeviceObject,
  5694. IN PIRP Irp,
  5695. IN PVOID Contxt
  5696. )
  5697. /*++
  5698. Routine Description:
  5699. This is the completion routine for all asynchronous reads and writes
  5700. started via NtfsMultipleAsynch. It must synchronize its operation for
  5701. multiprocessor environments with itself on all other processors, via
  5702. a spin lock found via the Context parameter.
  5703. The completion routine has has the following responsibilities:
  5704. If the individual request was completed with an error, then
  5705. this completion routine must see if this is the first error
  5706. (essentially by Vbo), and if so it must correctly reduce the
  5707. byte count and remember the error status in the Context.
  5708. If the IrpCount goes to 1, then it sets the event in the Context
  5709. parameter to signal the caller that all of the asynch requests
  5710. are done.
  5711. Arguments:
  5712. DeviceObject - Pointer to the file system device object.
  5713. Irp - Pointer to the associated Irp which is being completed. (This
  5714. Irp will no longer be accessible after this routine returns.)
  5715. Contxt - The context parameter which was specified for all of
  5716. the multiple asynch I/O requests for this MasterIrp.
  5717. Return Value:
  5718. Currently always returns STATUS_SUCCESS.
  5719. --*/
  5720. {
  5721. PNTFS_IO_CONTEXT Context = Contxt;
  5722. PIRP MasterIrp = Context->MasterIrp;
  5723. PIO_STACK_LOCATION IrpSp = IoGetCurrentIrpStackLocation( Irp );
  5724. BOOLEAN CompleteRequest = TRUE;
  5725. UNREFERENCED_PARAMETER( DeviceObject );
  5726. DebugTrace( +1, Dbg, ("NtfsMultiAsyncCompletionRoutine, Context = %08lx\n", Context) );
  5727. //
  5728. // If we got an error (or verify required), remember it in the Irp
  5729. //
  5730. MasterIrp = Context->MasterIrp;
  5731. if (!NT_SUCCESS( Irp->IoStatus.Status )) {
  5732. MasterIrp->IoStatus = Irp->IoStatus;
  5733. //
  5734. // Track any lower drivers that fail a paging file operation insuff. resources
  5735. //
  5736. if ((Irp->IoStatus.Status == STATUS_INSUFFICIENT_RESOURCES) &&
  5737. FlagOn( Context->Flags, NTFS_IO_CONTEXT_PAGING_IO ) &&
  5738. (IrpSp->MajorFunction == IRP_MJ_READ)) {
  5739. NtfsFailedHandedOffPagingReads += 1;
  5740. }
  5741. }
  5742. //
  5743. // Decrement IrpCount and see if it goes to zero.
  5744. //
  5745. if (InterlockedDecrement( &Context->IrpCount ) == 0) {
  5746. PERESOURCE Resource;
  5747. ERESOURCE_THREAD ResourceThreadId;
  5748. //
  5749. // Capture the resource values out of the context to prevent
  5750. // colliding with the Fsp thread if we post this.
  5751. //
  5752. Resource = Context->Wait.Async.Resource;
  5753. ResourceThreadId = Context->Wait.Async.ResourceThreadId;
  5754. //
  5755. // Mark the master Irp pending
  5756. //
  5757. IoMarkIrpPending( MasterIrp );
  5758. //
  5759. // If this request was successful or we posted an async paging io
  5760. // request then complete this irp.
  5761. //
  5762. if (FT_SUCCESS( MasterIrp->IoStatus.Status )) {
  5763. //
  5764. // Do any necc. zeroing for read requests - if it fails then just complete
  5765. // the irp ZeroEndOfBuffer will put the error into the irp iostatus
  5766. //
  5767. if (NtfsZeroEndOfBuffer( MasterIrp, Context )) {
  5768. MasterIrp->IoStatus.Information =
  5769. Context->Wait.Async.RequestedByteCount;
  5770. //
  5771. // Go ahead an mark the File object to indicate that we performed
  5772. // either a read or write if this is not a paging io operation.
  5773. //
  5774. if (!FlagOn( Context->Flags, NTFS_IO_CONTEXT_PAGING_IO ) &&
  5775. (IrpSp->FileObject != NULL)) {
  5776. if (IrpSp->MajorFunction == IRP_MJ_READ) {
  5777. SetFlag( IrpSp->FileObject->Flags, FO_FILE_FAST_IO_READ );
  5778. } else {
  5779. SetFlag( IrpSp->FileObject->Flags, FO_FILE_MODIFIED );
  5780. }
  5781. }
  5782. }
  5783. //
  5784. // If we had an error and will hot fix, we simply post the entire
  5785. // request.
  5786. //
  5787. } else if (!FlagOn( Context->Flags, NTFS_IO_CONTEXT_PAGING_IO )) {
  5788. PIRP_CONTEXT IrpContext = NULL;
  5789. //
  5790. // We need an IrpContext and then have to post the request.
  5791. // Use a try_except in case we fail the request for an IrpContext.
  5792. //
  5793. CompleteRequest = FALSE;
  5794. try {
  5795. NtfsInitializeIrpContext( MasterIrp, TRUE, &IrpContext );
  5796. IrpContext->Union.NtfsIoContext = Context;
  5797. SetFlag( IrpContext->State, IRP_CONTEXT_STATE_ALLOC_IO_CONTEXT );
  5798. NtfsPostRequest( IrpContext, MasterIrp );
  5799. } except( EXCEPTION_EXECUTE_HANDLER ) {
  5800. //
  5801. // Just give up.
  5802. //
  5803. CompleteRequest = TRUE;
  5804. if (IrpContext) {
  5805. //
  5806. // We cleanup the context below.
  5807. //
  5808. IrpContext->Union.NtfsIoContext = NULL;
  5809. NtfsCleanupIrpContext( IrpContext, TRUE );
  5810. }
  5811. }
  5812. }
  5813. //
  5814. // Now release the resource
  5815. //
  5816. if (Resource != NULL) {
  5817. ExReleaseResourceForThreadLite( Resource,
  5818. ResourceThreadId );
  5819. }
  5820. if (CompleteRequest) {
  5821. //
  5822. // and finally, free the context record.
  5823. //
  5824. ExFreeToNPagedLookasideList( &NtfsIoContextLookasideList, Context );
  5825. }
  5826. }
  5827. DebugTrace( -1, Dbg, ("NtfsMultiAsyncCompletionRoutine\n") );
  5828. //
  5829. // Return more processing required if we don't want the Irp to go away.
  5830. //
  5831. if (CompleteRequest) {
  5832. return STATUS_SUCCESS;
  5833. } else {
  5834. //
  5835. // We need to cleanup the associated Irp and its Mdl.
  5836. //
  5837. IoFreeMdl( Irp->MdlAddress );
  5838. IoFreeIrp( Irp );
  5839. return STATUS_MORE_PROCESSING_REQUIRED;
  5840. }
  5841. }
  5842. //
  5843. // Local support routine.
  5844. //
  5845. NTSTATUS
  5846. NtfsMultiSyncCompletionRoutine (
  5847. IN PDEVICE_OBJECT DeviceObject,
  5848. IN PIRP Irp,
  5849. IN PVOID Contxt
  5850. )
  5851. /*++
  5852. Routine Description:
  5853. This is the completion routine for all synchronous reads and writes
  5854. started via NtfsMultipleAsynch. It must synchronize its operation for
  5855. multiprocessor environments with itself on all other processors, via
  5856. a spin lock found via the Context parameter.
  5857. The completion routine has has the following responsibilities:
  5858. If the individual request was completed with an error, then
  5859. this completion routine must see if this is the first error
  5860. (essentially by Vbo), and if so it must correctly reduce the
  5861. byte count and remember the error status in the Context.
  5862. If the IrpCount goes to 1, then it sets the event in the Context
  5863. parameter to signal the caller that all of the asynch requests
  5864. are done.
  5865. Arguments:
  5866. DeviceObject - Pointer to the file system device object.
  5867. Irp - Pointer to the associated Irp which is being completed. (This
  5868. Irp will no longer be accessible after this routine returns.)
  5869. Contxt - The context parameter which was specified for all of
  5870. the multiple asynch I/O requests for this MasterIrp.
  5871. Return Value:
  5872. The routine returns STATUS_MORE_PROCESSING_REQUIRED so that we can
  5873. immediately complete the Master Irp without being in a race condition
  5874. with the IoCompleteRequest thread trying to decrement the IrpCount in
  5875. the Master Irp.
  5876. --*/
  5877. {
  5878. PNTFS_IO_CONTEXT Context = Contxt;
  5879. PIRP MasterIrp = Context->MasterIrp;
  5880. DebugTrace( +1, Dbg, ("NtfsMultiSyncCompletionRoutine, Context = %08lx\n", Context) );
  5881. //
  5882. // If we got an error (or verify required), remember it in the Irp
  5883. //
  5884. MasterIrp = Context->MasterIrp;
  5885. if (!NT_SUCCESS( Irp->IoStatus.Status )) {
  5886. MasterIrp->IoStatus = Irp->IoStatus;
  5887. //
  5888. // Track any lower drivers that fail a paging file operation insuff. resources
  5889. //
  5890. if ((Irp->IoStatus.Status == STATUS_INSUFFICIENT_RESOURCES) &&
  5891. FlagOn( Context->Flags, NTFS_IO_CONTEXT_PAGING_IO ) &&
  5892. (IoGetCurrentIrpStackLocation( Irp )->MajorFunction == IRP_MJ_READ)) {
  5893. NtfsFailedHandedOffPagingReads += 1;
  5894. }
  5895. }
  5896. //
  5897. // We must do this here since IoCompleteRequest won't get a chance
  5898. // on this associated Irp.
  5899. //
  5900. IoFreeMdl( Irp->MdlAddress );
  5901. IoFreeIrp( Irp );
  5902. if (InterlockedDecrement(&Context->IrpCount) == 0) {
  5903. KeSetEvent( &Context->Wait.SyncEvent, 0, FALSE );
  5904. }
  5905. DebugTrace( -1, Dbg, ("NtfsMultiSyncCompletionRoutine -> STATUS_MORE_PROCESSING_REQUIRED\n") );
  5906. return STATUS_MORE_PROCESSING_REQUIRED;
  5907. UNREFERENCED_PARAMETER( DeviceObject );
  5908. }
  5909. //
  5910. // Local support routine.
  5911. //
  5912. NTSTATUS
  5913. NtfsSingleAsyncCompletionRoutine (
  5914. IN PDEVICE_OBJECT DeviceObject,
  5915. IN PIRP Irp,
  5916. IN PVOID Contxt
  5917. )
  5918. /*++
  5919. Routine Description:
  5920. This is the completion routine for all asynchronous reads and writes
  5921. started via NtfsSingleAsynch.
  5922. The completion routine has has the following responsibilities:
  5923. Copy the I/O status from the Irp to the Context, since the Irp
  5924. will no longer be accessible.
  5925. It sets the event in the Context parameter to signal the caller
  5926. that all of the asynch requests are done.
  5927. Arguments:
  5928. DeviceObject - Pointer to the file system device object.
  5929. Irp - Pointer to the Irp for this request. (This Irp will no longer
  5930. be accessible after this routine returns.)
  5931. Contxt - The context parameter which was specified in the call to
  5932. NtfsSingleAsynch.
  5933. Return Value:
  5934. Currently always returns STATUS_SUCCESS.
  5935. --*/
  5936. {
  5937. PNTFS_IO_CONTEXT Context = Contxt;
  5938. PIO_STACK_LOCATION IrpSp = IoGetCurrentIrpStackLocation( Irp );
  5939. BOOLEAN CompleteRequest = TRUE;
  5940. PERESOURCE Resource;
  5941. ERESOURCE_THREAD ResourceThreadId;
  5942. UNREFERENCED_PARAMETER( DeviceObject );
  5943. DebugTrace( +1, Dbg, ("NtfsSingleAsyncCompletionRoutine, Context = %08lx\n", Context) );
  5944. //
  5945. // Capture the resource values out of the context to prevent
  5946. // colliding with the Fsp thread if we post this.
  5947. //
  5948. Resource = Context->Wait.Async.Resource;
  5949. ResourceThreadId = Context->Wait.Async.ResourceThreadId;
  5950. //
  5951. // Mark the Irp pending
  5952. //
  5953. IoMarkIrpPending( Irp );
  5954. //
  5955. // Fill in the information field correctedly if this worked.
  5956. //
  5957. if (FT_SUCCESS( Irp->IoStatus.Status )) {
  5958. //
  5959. // Zero the difference between filesize and data read if necc. on reads
  5960. // if it fails just complete the irp - ZeroEndOfBuffer will put the error into the
  5961. // irp
  5962. //
  5963. if (NtfsZeroEndOfBuffer( Irp, Context )) {
  5964. Irp->IoStatus.Information = Context->Wait.Async.RequestedByteCount;
  5965. //
  5966. // Go ahead an mark the File object to indicate that we performed
  5967. // either a read or write.
  5968. //
  5969. if (!FlagOn( Context->Flags, NTFS_IO_CONTEXT_PAGING_IO ) &&
  5970. (IrpSp->FileObject != NULL)) {
  5971. if (IrpSp->MajorFunction == IRP_MJ_READ) {
  5972. SetFlag( IrpSp->FileObject->Flags, FO_FILE_FAST_IO_READ );
  5973. } else {
  5974. SetFlag( IrpSp->FileObject->Flags, FO_FILE_MODIFIED );
  5975. }
  5976. }
  5977. }
  5978. //
  5979. // If we had an error and will hot fix, we simply post the entire
  5980. // request.
  5981. //
  5982. } else if (!FlagOn( Context->Flags, NTFS_IO_CONTEXT_PAGING_IO )) {
  5983. PIRP_CONTEXT IrpContext = NULL;
  5984. //
  5985. // We need an IrpContext and then have to post the request.
  5986. // Use a try_except in case we fail the request for an IrpContext.
  5987. //
  5988. CompleteRequest = FALSE;
  5989. try {
  5990. NtfsInitializeIrpContext( Irp, TRUE, &IrpContext );
  5991. IrpContext->Union.NtfsIoContext = Context;
  5992. SetFlag( IrpContext->State, IRP_CONTEXT_STATE_ALLOC_IO_CONTEXT );
  5993. NtfsPostRequest( IrpContext, Irp );
  5994. } except( EXCEPTION_EXECUTE_HANDLER ) {
  5995. //
  5996. // Just give up.
  5997. //
  5998. CompleteRequest = TRUE;
  5999. if (IrpContext) {
  6000. //
  6001. // We cleanup the context below.
  6002. //
  6003. IrpContext->Union.NtfsIoContext = NULL;
  6004. NtfsCleanupIrpContext( IrpContext, TRUE );
  6005. }
  6006. }
  6007. } else if ((Irp->IoStatus.Status == STATUS_INSUFFICIENT_RESOURCES) &&
  6008. (IrpSp->MajorFunction == IRP_MJ_READ)) {
  6009. //
  6010. // Track any lower drivers that fail a paging file operation insuff. resources
  6011. //
  6012. NtfsFailedHandedOffPagingReads += 1;
  6013. }
  6014. //
  6015. // Now release the resource
  6016. //
  6017. if (Resource != NULL) {
  6018. ExReleaseResourceForThreadLite( Resource,
  6019. ResourceThreadId );
  6020. }
  6021. //
  6022. // and finally, free the context record.
  6023. //
  6024. DebugTrace( -1, Dbg, ("NtfsSingleAsyncCompletionRoutine -> STATUS_SUCCESS\n") );
  6025. if (CompleteRequest) {
  6026. ExFreeToNPagedLookasideList( &NtfsIoContextLookasideList, Context );
  6027. return STATUS_SUCCESS;
  6028. } else {
  6029. return STATUS_MORE_PROCESSING_REQUIRED;
  6030. }
  6031. }
  6032. //
  6033. // Local support routine.
  6034. //
  6035. NTSTATUS
  6036. NtfsSingleSyncCompletionRoutine (
  6037. IN PDEVICE_OBJECT DeviceObject,
  6038. IN PIRP Irp,
  6039. IN PVOID Contxt
  6040. )
  6041. /*++
  6042. Routine Description:
  6043. This is the completion routine for all reads and writes started via
  6044. NtfsSingleAsynch.
  6045. The completion routine has has the following responsibilities:
  6046. Copy the I/O status from the Irp to the Context, since the Irp
  6047. will no longer be accessible.
  6048. It sets the event in the Context parameter to signal the caller
  6049. that all of the asynch requests are done.
  6050. Arguments:
  6051. DeviceObject - Pointer to the file system device object.
  6052. Irp - Pointer to the Irp for this request. (This Irp will no longer
  6053. be accessible after this routine returns.)
  6054. Contxt - The context parameter which was specified in the call to
  6055. NtfsSingleAsynch.
  6056. Return Value:
  6057. The routine returns STATUS_MORE_PROCESSING_REQUIRED so that we can
  6058. immediately complete the Master Irp without being in a race condition
  6059. with the IoCompleteRequest thread trying to decrement the IrpCount in
  6060. the Master Irp.
  6061. --*/
  6062. {
  6063. PNTFS_IO_CONTEXT Context = Contxt;
  6064. PIO_STACK_LOCATION IrpSp = IoGetCurrentIrpStackLocation( Irp );
  6065. //
  6066. // Track any lower drivers that fail a paging file operation insuff. resources
  6067. //
  6068. if ((Irp->IoStatus.Status == STATUS_INSUFFICIENT_RESOURCES) &&
  6069. FlagOn( Context->Flags, NTFS_IO_CONTEXT_PAGING_IO ) &&
  6070. (IrpSp->MajorFunction == IRP_MJ_READ)) {
  6071. NtfsFailedHandedOffPagingReads += 1;
  6072. }
  6073. KeSetEvent( &Context->Wait.SyncEvent, 0, FALSE );
  6074. DebugTrace( -1, Dbg, ("NtfsSingleCompletionRoutine -> STATUS_MORE_PROCESSING_REQUIRED\n") );
  6075. return STATUS_MORE_PROCESSING_REQUIRED;
  6076. UNREFERENCED_PARAMETER( DeviceObject );
  6077. }
  6078. //
  6079. // Local support routine.
  6080. //
  6081. NTSTATUS
  6082. NtfsPagingFileCompletionRoutine (
  6083. IN PDEVICE_OBJECT DeviceObject,
  6084. IN PIRP Irp,
  6085. IN PVOID MasterIrp
  6086. )
  6087. /*++
  6088. Routine Description:
  6089. This is the completion routine for all reads and writes started via
  6090. NtfsPagingFileIo.
  6091. The completion routine has has the following responsibility:
  6092. Since the individual request was completed with an error,
  6093. this completion routine must stuff it into the master irp.
  6094. Arguments:
  6095. DeviceObject - Pointer to the file system device object.
  6096. Irp - Pointer to the associated Irp which is being completed. (This
  6097. Irp will no longer be accessible after this routine returns.)
  6098. MasterIrp - Pointer to the master Irp. The low order bit in this value will
  6099. be set if a higher level call is performing a hot-fix.
  6100. Return Value:
  6101. Always returns STATUS_SUCCESS.
  6102. --*/
  6103. {
  6104. PIO_STACK_LOCATION IrpSp = IoGetCurrentIrpStackLocation( Irp );
  6105. UNREFERENCED_PARAMETER( DeviceObject );
  6106. DebugTrace( +1, Dbg, ("NtfsPagingFileCompletionRoutine, MasterIrp = %08lx\n", MasterIrp) );
  6107. ASSERT( (Irp->IoStatus.Status != STATUS_INSUFFICIENT_RESOURCES) ||
  6108. (IrpSp->Parameters.Read.Length > PAGE_SIZE) );
  6109. if (!FT_SUCCESS( Irp->IoStatus.Status )) {
  6110. //
  6111. // Track any lower drivers that fail a paging file operation insuff. resources
  6112. //
  6113. if (Irp->IoStatus.Status == STATUS_INSUFFICIENT_RESOURCES) {
  6114. NtfsFailedHandedOffPagingFileOps += 1;
  6115. }
  6116. if (!FsRtlIsTotalDeviceFailure( Irp->IoStatus.Status ) &&
  6117. (Irp->IoStatus.Status != STATUS_VERIFY_REQUIRED) &&
  6118. !FlagOn( (ULONG_PTR) MasterIrp, 0x1 )) {
  6119. if (Irp->IoStatus.Status == STATUS_FT_READ_RECOVERY_FROM_BACKUP) {
  6120. //
  6121. // If the volume manager has actually completed the read
  6122. // from a backup, there's little point in telling MM about that.
  6123. //
  6124. Irp->IoStatus.Status = STATUS_SUCCESS;
  6125. }
  6126. //
  6127. // We don't want to try to hotfix READ errors on the paging file
  6128. // because of deadlock possibilities with MM. Instead we'll just
  6129. // return the error for MM to deal with. Chances are that
  6130. // MM (eg. MiWaitForInPageComplete) will bugcheck anyway,
  6131. // but it's still nicer than walking right into the deadlock.
  6132. // We also only asynchronously fix write errors and just return the error
  6133. // back for mm to retry elsewhere
  6134. //
  6135. if (IrpSp->MajorFunction != IRP_MJ_READ) {
  6136. VBO BadVbo;
  6137. BadVbo = IrpSp->Parameters.Read.Key;
  6138. NtfsPostHotFix( Irp,
  6139. &BadVbo,
  6140. IrpSp->Parameters.Read.ByteOffset.QuadPart,
  6141. IrpSp->Parameters.Read.Length,
  6142. FALSE );
  6143. }
  6144. }
  6145. //
  6146. // If we got an error (or verify required), remember it in the Irp
  6147. //
  6148. ClearFlag( (ULONG_PTR) MasterIrp, 0x1 );
  6149. ((PIRP) MasterIrp)->IoStatus = Irp->IoStatus;
  6150. }
  6151. DebugTrace( -1, Dbg, ("NtfsPagingFileCompletionRoutine => (STATUS_SUCCESS)\n") );
  6152. return STATUS_SUCCESS;
  6153. }
  6154. //
  6155. // Local support routine.
  6156. //
  6157. NTSTATUS
  6158. NtfsPagingFileNoAllocCompletionRoutine (
  6159. IN PDEVICE_OBJECT DeviceObject,
  6160. IN PIRP Irp,
  6161. IN PVOID Context
  6162. )
  6163. /*++
  6164. Routine Description:
  6165. This is the completion routine for all reads and writes started via
  6166. NtfsPagingFileIoNoAllocation.
  6167. The completion routine signals back to the main routine and stops processing
  6168. Arguments:
  6169. DeviceObject - Pointer to the file system device object.
  6170. Irp - Pointer to the associated Irp which is being completed. (This
  6171. Irp will no longer be accessible after this routine returns.)
  6172. Context - Actually the event to signal
  6173. Return Value:
  6174. Always returns STATUS_SUCCESS.
  6175. --*/
  6176. {
  6177. PKEVENT Event = (PKEVENT) Context;
  6178. PIO_STACK_LOCATION IrpSp = IoGetCurrentIrpStackLocation( Irp );
  6179. ASSERT( (Irp->IoStatus.Status != STATUS_INSUFFICIENT_RESOURCES) ||
  6180. (IrpSp->Parameters.Read.Length > PAGE_SIZE) );
  6181. //
  6182. // Track any lower drivers that fail a paging file operation insuff. resources
  6183. //
  6184. if (Irp->IoStatus.Status == STATUS_INSUFFICIENT_RESOURCES) {
  6185. NtfsFailedHandedOffPagingFileOps += 1;
  6186. }
  6187. KeSetEvent( Event, IO_NO_INCREMENT, FALSE );
  6188. return STATUS_MORE_PROCESSING_REQUIRED;
  6189. UNREFERENCED_PARAMETER( DeviceObject );
  6190. UNREFERENCED_PARAMETER( Irp );
  6191. }
  6192. //
  6193. // Local support routine
  6194. //
  6195. VOID
  6196. NtfsSingleNonAlignedSync (
  6197. IN PIRP_CONTEXT IrpContext,
  6198. IN PVCB Vcb,
  6199. IN PSCB Scb,
  6200. IN PUCHAR Buffer,
  6201. IN VBO Vbo,
  6202. IN LBO Lbo,
  6203. IN ULONG ByteCount,
  6204. IN PIRP Irp
  6205. )
  6206. /*++
  6207. Routine Description:
  6208. This routine reads or writes one or more contiguous sectors from a device
  6209. Synchronously, and does so to a buffer that must come from non paged
  6210. pool. It saves a pointer to the Irp's original Mdl, and creates a new
  6211. one describing the given buffer. It implements the read by simply filling
  6212. in the next stack frame in the Irp, and passing it on. The transfer
  6213. occurs to the single buffer originally specified in the user request.
  6214. Currently, only reads are supported.
  6215. Arguments:
  6216. IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
  6217. Vcb - Supplies the device to read
  6218. Scb - Supplies the Scb to read
  6219. Buffer - Supplies a buffer from non-paged pool.
  6220. Vbo - Supplies the starting Virtual Block Offset to begin reading from
  6221. Lbo - Supplies the starting Logical Block Offset to begin reading from
  6222. ByteCount - Supplies the number of bytes to read from the device
  6223. Irp - Supplies the master Irp to associated with the async
  6224. request.
  6225. Context - Asynchronous I/O context structure
  6226. Return Value:
  6227. None.
  6228. --*/
  6229. {
  6230. PIO_STACK_LOCATION IrpSp;
  6231. PMDL Mdl;
  6232. PMDL SavedMdl;
  6233. PAGED_CODE();
  6234. DebugTrace( +1, Dbg, ("NtfsSingleNonAlignedSync\n") );
  6235. DebugTrace( 0, Dbg, ("MajorFunction = %08lx\n", IrpContext->MajorFunction) );
  6236. DebugTrace( 0, Dbg, ("Vcb = %08lx\n", Vcb) );
  6237. DebugTrace( 0, Dbg, ("Buffer = %08lx\n", Buffer) );
  6238. DebugTrace( 0, Dbg, ("Lbo = %016I64x\n", Lbo) );
  6239. DebugTrace( 0, Dbg, ("ByteCount = %08lx\n", ByteCount) );
  6240. DebugTrace( 0, Dbg, ("Irp = %08lx\n", Irp) );
  6241. //
  6242. // Create a new Mdl describing the buffer, saving the current one in the
  6243. // Irp
  6244. //
  6245. SavedMdl = Irp->MdlAddress;
  6246. Irp->MdlAddress = 0;
  6247. Mdl = IoAllocateMdl( Buffer,
  6248. ByteCount,
  6249. FALSE,
  6250. FALSE,
  6251. Irp );
  6252. if (Mdl == NULL) {
  6253. Irp->MdlAddress = SavedMdl;
  6254. NtfsRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES, NULL, NULL );
  6255. }
  6256. //
  6257. // Lock the new Mdl in memory.
  6258. //
  6259. try {
  6260. MmProbeAndLockPages( Mdl, KernelMode, IoWriteAccess );
  6261. } finally {
  6262. if (AbnormalTermination()) {
  6263. IoFreeMdl( Mdl );
  6264. Irp->MdlAddress = SavedMdl;
  6265. }
  6266. }
  6267. //
  6268. // Set up the completion routine address in our stack frame.
  6269. //
  6270. IoSetCompletionRoutine( Irp,
  6271. &NtfsSingleSyncCompletionRoutine,
  6272. IrpContext->Union.NtfsIoContext,
  6273. TRUE,
  6274. TRUE,
  6275. TRUE );
  6276. //
  6277. // Setup the next IRP stack location in the associated Irp for the disk
  6278. // driver beneath us.
  6279. //
  6280. IrpSp = IoGetNextIrpStackLocation( Irp );
  6281. //
  6282. // Setup the Stack location to do a read from the disk driver.
  6283. //
  6284. IrpSp->MajorFunction = IrpContext->MajorFunction;
  6285. IrpSp->Parameters.Read.Length = ByteCount;
  6286. IrpSp->Parameters.Read.ByteOffset.QuadPart = Lbo;
  6287. //
  6288. // Initialize the Kernel Event in the context structure so that the
  6289. // caller can wait on it. Set remaining pointers to NULL.
  6290. //
  6291. KeInitializeEvent( &IrpContext->Union.NtfsIoContext->Wait.SyncEvent,
  6292. NotificationEvent,
  6293. FALSE );
  6294. //
  6295. // Issue the read request
  6296. //
  6297. // If IoCallDriver returns an error, it has completed the Irp
  6298. // and the error will be caught by our completion routines
  6299. // and dealt with as a normal IO error.
  6300. //
  6301. try {
  6302. (VOID)IoCallDriver( Vcb->TargetDeviceObject, Irp );
  6303. NtfsWaitSync( IrpContext );
  6304. //
  6305. // See if we need to do a hot fix.
  6306. //
  6307. if (!FT_SUCCESS(Irp->IoStatus.Status)) {
  6308. IO_RUN IoRun;
  6309. IoRun.StartingVbo = Vbo;
  6310. IoRun.StartingLbo = Lbo;
  6311. IoRun.BufferOffset = 0;
  6312. IoRun.ByteCount = ByteCount;
  6313. IoRun.SavedIrp = NULL;
  6314. //
  6315. // Try to fix the problem
  6316. //
  6317. NtfsFixDataError( IrpContext,
  6318. Scb,
  6319. Vcb->TargetDeviceObject,
  6320. Irp,
  6321. 1,
  6322. &IoRun,
  6323. 0 );
  6324. }
  6325. } finally {
  6326. MmUnlockPages( Mdl );
  6327. IoFreeMdl( Mdl );
  6328. Irp->MdlAddress = SavedMdl;
  6329. }
  6330. //
  6331. // And return to our caller
  6332. //
  6333. DebugTrace( -1, Dbg, ("NtfsSingleNonAlignedSync -> VOID\n") );
  6334. return;
  6335. }
  6336. //
  6337. // Local support routine
  6338. //
  6339. NTSTATUS
  6340. NtfsEncryptBuffers (
  6341. IN PIRP_CONTEXT IrpContext,
  6342. IN PIRP Irp,
  6343. IN PSCB Scb,
  6344. IN VBO StartingVbo,
  6345. IN ULONG NumberRuns,
  6346. IN PCOMPRESSION_CONTEXT CompressionContext
  6347. )
  6348. /*++
  6349. Routine Description:
  6350. This routine is called by NtfsPrepareBuffers during a write
  6351. operation on an encrypted file. It allocates a compression
  6352. buffer if necessary and calls the encyrption callout routine
  6353. to compress each run of data in the CompressionContext.
  6354. Arguments:
  6355. Irp - Supplies the requesting Irp.
  6356. Scb - Supplies the stream file to act on.
  6357. StartingVbo - The starting point for the operation.
  6358. ByteCount - The lengh of the operation.
  6359. NumberRuns - The size of the IoRuns array in the compression context.
  6360. CompressionContext - Supplies the CompressionContext for this stream.
  6361. Return Value:
  6362. None.
  6363. --*/
  6364. {
  6365. ULONG Run;
  6366. ULONG BufferSize;
  6367. LARGE_INTEGER OffsetWithinFile;
  6368. PIO_RUN IoRun;
  6369. PUCHAR SourceBuffer;
  6370. PUCHAR DestinationBuffer;
  6371. NTSTATUS Status;
  6372. ASSERT( NumberRuns > 0 );
  6373. ASSERT( IrpContext->MajorFunction == IRP_MJ_WRITE );
  6374. //
  6375. // These functions are just for debugging purposes. We need to call them
  6376. // somewhere so the compiler doesn't optimize them out as unreferenced functions.
  6377. //
  6378. #ifdef EFSDBG
  6379. if (CompressionContext->SystemBufferOffset != 0) {
  6380. DebugTrace( 0, Dbg, ("\nEncryptBuffers: SystemBufferOffset = %x", CompressionContext->SystemBufferOffset) );
  6381. }
  6382. #endif
  6383. //
  6384. // If we have not already mapped the user buffer, then do so.
  6385. //
  6386. if (CompressionContext->SystemBuffer == NULL) {
  6387. CompressionContext->SystemBuffer = NtfsMapUserBuffer( Irp, NormalPagePriority );
  6388. }
  6389. //
  6390. // For uncompressed files, we may not have a buffer allocated yet.
  6391. // The buffer needs to be big enough for this entire transfer.
  6392. // It must be big enough to go from StartingVbo for this
  6393. // transfer to the end of the last iorun for this transfer.
  6394. //
  6395. BufferSize = (ULONG) ((CompressionContext->IoRuns[NumberRuns-1].StartingVbo +
  6396. CompressionContext->IoRuns[NumberRuns-1].ByteCount) -
  6397. StartingVbo);
  6398. if (BufferSize > LARGE_BUFFER_SIZE) {
  6399. BufferSize = LARGE_BUFFER_SIZE;
  6400. }
  6401. //
  6402. // If the data already got transformed, the buffer should still be allocated.
  6403. //
  6404. ASSERT( (!CompressionContext->DataTransformed) ||
  6405. (CompressionContext->CompressionBuffer != NULL) );
  6406. //
  6407. // This function conveniently only allocates/reallocates the buffer
  6408. // if there is not one allocated yet or if the existing one is not
  6409. // big enough.
  6410. //
  6411. NtfsAllocateCompressionBuffer( IrpContext,
  6412. Scb,
  6413. Irp,
  6414. CompressionContext,
  6415. &BufferSize );
  6416. //
  6417. // If the data has already be transformed into the compression buffer, for
  6418. // a compressed or sparse file, for instance, we want to work with the
  6419. // transformed data. Otherwise, we need to pluck it directly out of the
  6420. // system buffer.
  6421. //
  6422. if (CompressionContext->DataTransformed) {
  6423. SourceBuffer = DestinationBuffer = CompressionContext->CompressionBuffer;
  6424. } else {
  6425. SourceBuffer = Add2Ptr( CompressionContext->SystemBuffer, CompressionContext->SystemBufferOffset );
  6426. DestinationBuffer = CompressionContext->CompressionBuffer;
  6427. }
  6428. //
  6429. // Now look at each run of real data heading to the disk and
  6430. // let the encryption driver encrypt it.
  6431. //
  6432. for (Run = 0; Run < NumberRuns; Run++) {
  6433. IoRun = &CompressionContext->IoRuns[Run];
  6434. OffsetWithinFile.QuadPart = IoRun->StartingVbo;
  6435. Status = NtfsData.EncryptionCallBackTable.BeforeWriteProcess( Add2Ptr(SourceBuffer, IoRun->BufferOffset),
  6436. Add2Ptr(DestinationBuffer, IoRun->BufferOffset),
  6437. &OffsetWithinFile,
  6438. IoRun->ByteCount,
  6439. Scb->EncryptionContext);
  6440. if (!NT_SUCCESS( Status )) {
  6441. return Status;
  6442. }
  6443. }
  6444. return STATUS_SUCCESS;
  6445. }
  6446. VOID
  6447. NtfsFixDataError (
  6448. IN PIRP_CONTEXT IrpContext,
  6449. IN PSCB Scb,
  6450. IN PDEVICE_OBJECT DeviceObject,
  6451. IN PIRP MasterIrp,
  6452. IN ULONG MultipleIrpCount,
  6453. IN PIO_RUN IoRuns,
  6454. IN UCHAR IrpSpFlags
  6455. )
  6456. /*
  6457. Routine Description:
  6458. This routine is called when a read error, write error, or Usa error
  6459. is received when doing noncached I/O on a stream. It attempts to
  6460. recover from Usa errors if FT is present. For bad clusters it attempts
  6461. to isolate the error to one or more bad clusters, for which hot fix
  6462. requests are posted.
  6463. Arguments:
  6464. Scb - Supplies the Scb for the stream which got the error
  6465. DeviceObject - Supplies the Device Object for the stream
  6466. MasterIrp - Supplies the original master Irp for the failing read or write
  6467. MultipleIrpCount - Supplies the number of runs in which the current
  6468. was broken into at the time the error occured.
  6469. IoRuns - Supplies an array describing the runs being accessed at the
  6470. time of the error
  6471. IrpSpFlags - flags to set in irp stack location for the i/o like write_through
  6472. Return Value:
  6473. None
  6474. -*/
  6475. {
  6476. ULONG RunNumber, FtCase;
  6477. ULONG ByteOffset = MAXULONG;
  6478. ULONG ClusterMask;
  6479. ULONG ClustersToRecover;
  6480. ULONG UsaBlockSize;
  6481. PIO_STACK_LOCATION IrpSp;
  6482. PVCB Vcb = Scb->Vcb;
  6483. ULONG BytesPerCluster = Vcb->BytesPerCluster;
  6484. NTSTATUS FinalStatus = STATUS_SUCCESS;
  6485. ULONG AlignedRunNumber = 0;
  6486. ULONG AlignedByteOffset = 0;
  6487. NTSTATUS IrpStatus = MasterIrp->IoStatus.Status;
  6488. PTOP_LEVEL_CONTEXT TopLevelContext;
  6489. PNTFS_IO_CONTEXT Context = IrpContext->Union.NtfsIoContext;
  6490. PMULTI_SECTOR_HEADER MultiSectorHeader;
  6491. UCHAR Buffer[sizeof( MDL ) + sizeof( PFN_NUMBER ) * 2];
  6492. PMDL PartialMdl = (PMDL) Buffer;
  6493. LONGLONG LlTemp1;
  6494. LONGLONG LlTemp2;
  6495. ULONG Priority = NormalPagePriority;
  6496. BOOLEAN SecondaryAvailable;
  6497. BOOLEAN FixingUsaError;
  6498. BOOLEAN FinalPass;
  6499. BOOLEAN ReservedMapping = FALSE;
  6500. PAGED_CODE();
  6501. //
  6502. // First, if the error we got indicates a total device failure, then we
  6503. // just report it rather than trying to hot fix every sector on the volume!
  6504. // Also, do not do hot fix for the read ahead thread, because that is a
  6505. // good way to conceal errors from the App.
  6506. //
  6507. if (FsRtlIsTotalDeviceFailure( MasterIrp->IoStatus.Status ) ||
  6508. (Scb->CompressionUnit != 0)) {
  6509. return;
  6510. }
  6511. //
  6512. // Get out if we got an error and the current thread is doing read ahead.
  6513. //
  6514. if (!NT_SUCCESS( MasterIrp->IoStatus.Status ) && NtfsIsReadAheadThread()) {
  6515. return;
  6516. }
  6517. //
  6518. // Also get out if the top level request came from the fast io path.
  6519. //
  6520. TopLevelContext = NtfsGetTopLevelContext();
  6521. if (TopLevelContext->SavedTopLevelIrp == (PIRP) FSRTL_FAST_IO_TOP_LEVEL_IRP) {
  6522. return;
  6523. }
  6524. //
  6525. // We can't hot fix the mft mirror or the boot file. If we're in here
  6526. // for one of those files, we have to get out now. We'll make sure we
  6527. // aren't trying to hot fix the beginning of the mft itself just before
  6528. // we call NtfsPostHotFix down below.
  6529. //
  6530. ASSERT (Scb != NULL);
  6531. if ((Scb == Vcb->Mft2Scb) ||
  6532. (NtfsEqualMftRef( &Scb->Fcb->FileReference, &BootFileReference ) &&
  6533. (Scb->AttributeTypeCode == $DATA))) {
  6534. return;
  6535. }
  6536. //
  6537. // Determine whether a secondary device is available
  6538. //
  6539. SecondaryAvailable = (BOOLEAN)!FlagOn( Vcb->VcbState, VCB_STATE_NO_SECONDARY_AVAILABLE );
  6540. //
  6541. // Assume that we are recovering from a Usa error, if the MasterIrp has
  6542. // the success status.
  6543. //
  6544. FixingUsaError = FT_SUCCESS( MasterIrp->IoStatus.Status );
  6545. //
  6546. // We cannot fix any Usa errors if there is no secondary. Even if there is
  6547. // a secondary, Usa errors should only occur during restart. If it is not
  6548. // restart we are probably looking at uninitialized data, so don't try to
  6549. // "fix" it.
  6550. //
  6551. if (FixingUsaError &&
  6552. (!SecondaryAvailable || !FlagOn( Vcb->VcbState, VCB_STATE_RESTART_IN_PROGRESS ))) {
  6553. return;
  6554. }
  6555. //
  6556. // If there is no secondary available and this is a user non-cached read then simply
  6557. // return the error. Give this user a chance to re-write the sector himself using
  6558. // non-cached io.
  6559. //
  6560. if (!SecondaryAvailable &&
  6561. (IrpContext->MajorFunction == IRP_MJ_READ) &&
  6562. (FlagOn( MasterIrp->Flags, IRP_PAGING_IO | IRP_NOCACHE ) == IRP_NOCACHE)) {
  6563. return;
  6564. }
  6565. //
  6566. // No hot fixing at all if the volume is read only.
  6567. //
  6568. if (NtfsIsVolumeReadOnly( Vcb )) {
  6569. return;
  6570. }
  6571. //
  6572. // Initialize Context, for use in Read/Write Multiple Asynch.
  6573. //
  6574. ASSERT( Context != NULL );
  6575. Context->MasterIrp = MasterIrp;
  6576. KeInitializeEvent( &Context->Wait.SyncEvent, NotificationEvent, FALSE );
  6577. HotFixTrace(("NtfsFixDataError, MasterIrp: %08lx, MultipleIrpCount: %08lx\n", MasterIrp, MultipleIrpCount));
  6578. HotFixTrace((" IoRuns: %08lx, UsaError: %02lx\n", IoRuns, FixingUsaError));
  6579. HotFixTrace((" Thread: %08lx\n", PsGetCurrentThread()));
  6580. HotFixTrace((" Scb: %08lx BadClusterScb: %08lx\n", Scb, Vcb->BadClusterFileScb));
  6581. //
  6582. // If this is a Usa-protected structure, get the block size now.
  6583. //
  6584. if (FlagOn( Scb->ScbState, SCB_STATE_USA_PRESENT )) {
  6585. //
  6586. // Get the the number of blocks, based on what type of stream it is.
  6587. // First check for Mft or Log file.
  6588. //
  6589. if (Scb->Header.NodeTypeCode == NTFS_NTC_SCB_MFT) {
  6590. ASSERT( (Scb == Vcb->MftScb) || (Scb == Vcb->Mft2Scb) );
  6591. UsaBlockSize = Vcb->BytesPerFileRecordSegment;
  6592. } else if (Scb->Header.NodeTypeCode == NTFS_NTC_SCB_DATA) {
  6593. //
  6594. // For the log file, we will just go a page at a time, which
  6595. // is generally what the log file does. Any USA errors would
  6596. // tend to be only at the logical end of the log file anyway.
  6597. //
  6598. ASSERT( Scb == Vcb->LogFileScb );
  6599. //
  6600. // We need to peek at the page so map it in
  6601. //
  6602. MultiSectorHeader = (PMULTI_SECTOR_HEADER) NtfsMapUserBufferNoRaise( MasterIrp, HighPagePriority );
  6603. //
  6604. // We can't map the user buffer due to low resources - so switch to using the reserved
  6605. // mapping instead
  6606. //
  6607. if (MultiSectorHeader == NULL) {
  6608. ExAcquireFastMutexUnsafe( &Vcb->ReservedMappingMutex );
  6609. ReservedMapping = TRUE;
  6610. MmInitializeMdl( PartialMdl, NULL, 2 * PAGE_SIZE );
  6611. IoBuildPartialMdl( MasterIrp->MdlAddress, PartialMdl, Add2Ptr( MmGetMdlBaseVa( MasterIrp->MdlAddress ), MmGetMdlByteOffset( MasterIrp->MdlAddress )), Vcb->BytesPerSector );
  6612. MultiSectorHeader = (PMULTI_SECTOR_HEADER) MmMapLockedPagesWithReservedMapping( IrpContext->Vcb->ReservedMapping,
  6613. RESERVE_POOL_TAG,
  6614. PartialMdl,
  6615. MmCached );
  6616. ASSERT( MultiSectorHeader != NULL );
  6617. }
  6618. //
  6619. // For the log file, assume it is right in the record, use that
  6620. // if we get a plausible number, else use page size.
  6621. //
  6622. RunNumber = MultiSectorHeader->UpdateSequenceArraySize - 1;
  6623. UsaBlockSize = RunNumber * SEQUENCE_NUMBER_STRIDE;
  6624. if ((UsaBlockSize != 0x1000) && (UsaBlockSize != 0x2000) && (UsaBlockSize != PAGE_SIZE)) {
  6625. UsaBlockSize = PAGE_SIZE;
  6626. }
  6627. //
  6628. // Drop the reserved mapping - since we're done with the multi-sector header
  6629. //
  6630. if (ReservedMapping) {
  6631. MmUnmapReservedMapping( Vcb->ReservedMapping, RESERVE_POOL_TAG, PartialMdl );
  6632. MmPrepareMdlForReuse( PartialMdl );
  6633. ExReleaseFastMutexUnsafe( &Vcb->ReservedMappingMutex );
  6634. ReservedMapping = FALSE;
  6635. MultiSectorHeader = NULL;
  6636. }
  6637. //
  6638. // Otherwise it is an index, so we can get the count out of the Scb.
  6639. //
  6640. } else {
  6641. UsaBlockSize = Scb->ScbType.Index.BytesPerIndexBuffer;
  6642. }
  6643. //
  6644. // Verify the maximum of UsaBlockSize and cluster size.
  6645. //
  6646. if (BytesPerCluster > UsaBlockSize) {
  6647. //
  6648. // Determine which is smaller the cluster size or the
  6649. // size of the buffer being read.
  6650. //
  6651. IrpSp = IoGetCurrentIrpStackLocation( MasterIrp );
  6652. UsaBlockSize = IrpSp->Parameters.Read.Length;
  6653. if (UsaBlockSize > BytesPerCluster) {
  6654. UsaBlockSize = BytesPerCluster;
  6655. }
  6656. }
  6657. }
  6658. //
  6659. // We know we got a failure in the given transfer, which could be any size.
  6660. // We first want to localize the error to the failing cluster(s).
  6661. //
  6662. // We do this in the following nested loops:
  6663. //
  6664. // do (for the entire transfer, 32 clusters at a time)
  6665. //
  6666. // for (primary, secondary if available, primary again if necessary)
  6667. //
  6668. // for (each run)
  6669. //
  6670. // for (each cluster)
  6671. //
  6672. // The inner-most two loops above have the ability to restart on successive
  6673. // 32-cluster boundaries, relative to the first cluster in the transfer.
  6674. // For the Ft case, where there is a secondary device available, clusters
  6675. // are blocked out of a mask as errors are found and corrected, so they
  6676. // do not have to be read in successive passes; Usa errors are blocked out
  6677. // of the mask immediately, while for I/O errors we force ourselves to read
  6678. // both copies to locate the error, only reading the primary again if the
  6679. // secondary contained the error.
  6680. //
  6681. //
  6682. // Loop through the entire transfer, 32 clusters at a time. The innermost
  6683. // loops will terminate on 32 cluster boundaries, so the outermost loop
  6684. // will simply keep looping until we exhaust the IoRuns array.
  6685. //
  6686. do {
  6687. //
  6688. // Initialize the clusters to recover to "all".
  6689. //
  6690. ClustersToRecover = MAXULONG;
  6691. FinalPass = FALSE;
  6692. //
  6693. // For these 32 clusters, loop through primary, secondary (if available),
  6694. // and primary again (only reading when necessary).
  6695. //
  6696. for (FtCase = 0; !FinalPass; FtCase++) {
  6697. //
  6698. // Calculate whether this is the final pass or not.
  6699. //
  6700. FinalPass = !SecondaryAvailable ||
  6701. (FtCase == 2) ||
  6702. (IrpContext->MajorFunction == IRP_MJ_WRITE);
  6703. //
  6704. // Initialize the current cluster mask for cluster 0
  6705. //
  6706. ClusterMask = 1;
  6707. //
  6708. // Loop through all of the runs in the IoRuns array, or until the
  6709. // ClusterMask indicates that we hit a 32 cluster boundary.
  6710. //
  6711. for (RunNumber = AlignedRunNumber;
  6712. (RunNumber < MultipleIrpCount) && (ClusterMask != 0);
  6713. (ClusterMask != 0) ? RunNumber++ : 0) {
  6714. //
  6715. // Loop through all of the clusters within this run, or until
  6716. // the ClusterMask indicates that we hit a 32 cluster boundary.
  6717. //
  6718. for (ByteOffset = (RunNumber == AlignedRunNumber) ? AlignedByteOffset : 0;
  6719. (ByteOffset < IoRuns[RunNumber].ByteCount) && (ClusterMask != 0);
  6720. ByteOffset += BytesPerCluster, ClusterMask <<= 1) {
  6721. LONGLONG StartingVbo, StartingLbo;
  6722. PIRP Irp;
  6723. PMDL Mdl;
  6724. BOOLEAN LowFileRecord;
  6725. FT_SPECIAL_READ SpecialRead;
  6726. ULONG Length;
  6727. HotFixTrace(("Doing ByteOffset: %08lx for FtCase: %02lx\n",
  6728. (((ULONG)IoRuns[RunNumber].StartingVbo) + ByteOffset),
  6729. FtCase));
  6730. //
  6731. // If this cluster no longer needs to be recovered, we can
  6732. // skip it.
  6733. //
  6734. if ((ClustersToRecover & ClusterMask) == 0) {
  6735. continue;
  6736. }
  6737. //
  6738. // Temporarily get the 64-bit byte offset into StartingVbo, then
  6739. // calculate the actual StartingLbo and StartingVbo.
  6740. //
  6741. StartingVbo = ByteOffset;
  6742. StartingLbo = IoRuns[RunNumber].StartingLbo + StartingVbo;
  6743. StartingVbo = IoRuns[RunNumber].StartingVbo + StartingVbo;
  6744. //
  6745. // If the file is compressed, then NtfsPrepareBuffers builds
  6746. // an IoRuns array where it compresses contiguous Lcns, and
  6747. // the Vcns do not always line up correctly. But we know there
  6748. // must be a corresponding Vcn for every Lcn in the stream,
  6749. // and that that Vcn can only be >= to the Vcn we have just
  6750. // calculated from the IoRuns array. Therefore, since performance
  6751. // of hotfix is not the issue here, we use the following simple
  6752. // loop to sequentially scan the Mcb for a matching Vcn for
  6753. // the current Lcn.
  6754. //
  6755. if (Scb->CompressionUnit != 0) {
  6756. VCN TempVcn;
  6757. LCN TempLcn, LcnOut;
  6758. TempLcn = LlClustersFromBytes( Vcb, StartingLbo );
  6759. TempVcn = LlClustersFromBytes( Vcb, StartingVbo );
  6760. //
  6761. // Scan to the end of the Mcb (we assert below this
  6762. // did not happen) or until we find a Vcn with the
  6763. // Lcn we currently want to read.
  6764. //
  6765. while (NtfsLookupNtfsMcbEntry( &Scb->Mcb,
  6766. TempVcn,
  6767. &LcnOut,
  6768. NULL,
  6769. NULL,
  6770. NULL,
  6771. NULL,
  6772. NULL )
  6773. &&
  6774. (LcnOut != TempLcn)) {
  6775. TempVcn = TempVcn + 1;
  6776. }
  6777. ASSERT(LcnOut == TempLcn);
  6778. StartingVbo = LlBytesFromClusters( Vcb, TempVcn );
  6779. }
  6780. LowFileRecord = (Scb == Vcb->MftScb) && (((PLARGE_INTEGER)&StartingVbo)->HighPart == 0);
  6781. //
  6782. // Calculate the amount to actually read.
  6783. //
  6784. Length = IoRuns[RunNumber].ByteCount - ByteOffset;
  6785. if (Length > BytesPerCluster) {
  6786. Length = BytesPerCluster;
  6787. }
  6788. //
  6789. // Loop while verify required, or we find we really
  6790. // do not have an FT device.
  6791. //
  6792. while (TRUE) {
  6793. //
  6794. // Create an associated IRP, making sure there is one stack entry for
  6795. // us, as well.
  6796. //
  6797. Irp = IoMakeAssociatedIrp( MasterIrp, (CCHAR)(DeviceObject->StackSize + 1) );
  6798. if (Irp == NULL) {
  6799. //
  6800. // We return the error status in the Master irp when
  6801. // we were called.
  6802. //
  6803. MasterIrp->IoStatus.Status = IrpStatus;
  6804. return;
  6805. }
  6806. //
  6807. // Allocate and build a partial Mdl for the request.
  6808. //
  6809. Mdl = IoAllocateMdl( (PCHAR)MasterIrp->UserBuffer + IoRuns[RunNumber].BufferOffset + ByteOffset,
  6810. Length,
  6811. FALSE,
  6812. FALSE,
  6813. Irp );
  6814. if (Mdl == NULL) {
  6815. IoFreeIrp(Irp);
  6816. //
  6817. // We return the error status in the Master irp when
  6818. // we were called.
  6819. //
  6820. MasterIrp->IoStatus.Status = IrpStatus;
  6821. return;
  6822. }
  6823. //
  6824. // Sanity Check
  6825. //
  6826. ASSERT( Mdl == Irp->MdlAddress );
  6827. IoBuildPartialMdl( MasterIrp->MdlAddress,
  6828. Mdl,
  6829. (PCHAR)MasterIrp->UserBuffer +
  6830. IoRuns[RunNumber].BufferOffset + ByteOffset,
  6831. Length );
  6832. //
  6833. // Get the first IRP stack location in the associated Irp
  6834. //
  6835. IoSetNextIrpStackLocation( Irp );
  6836. IrpSp = IoGetCurrentIrpStackLocation( Irp );
  6837. //
  6838. // Setup the Stack location to describe our read.
  6839. //
  6840. IrpSp->MajorFunction = IrpContext->MajorFunction;
  6841. IrpSp->Parameters.Read.Length = Length;
  6842. IrpSp->Parameters.Read.ByteOffset.QuadPart = StartingVbo;
  6843. //
  6844. // Set up the completion routine address in our stack frame.
  6845. //
  6846. IoSetCompletionRoutine( Irp,
  6847. &NtfsMultiSyncCompletionRoutine,
  6848. Context,
  6849. TRUE,
  6850. TRUE,
  6851. TRUE );
  6852. //
  6853. // Setup the next IRP stack location in the associated Irp for the disk
  6854. // driver beneath us.
  6855. //
  6856. IrpSp = IoGetNextIrpStackLocation( Irp );
  6857. //
  6858. // Setup the Stack location to do a normal read or write.
  6859. //
  6860. if ((IrpContext->MajorFunction == IRP_MJ_WRITE) || !SecondaryAvailable) {
  6861. IrpSp->MajorFunction = IrpContext->MajorFunction;
  6862. IrpSp->Flags = IrpSpFlags;
  6863. IrpSp->Parameters.Read.ByteOffset.QuadPart = StartingLbo;
  6864. IrpSp->Parameters.Read.Length = Length;
  6865. //
  6866. // Otherwise we are supposed to read from the primary or secondary
  6867. // on an FT drive.
  6868. //
  6869. } else {
  6870. IrpSp->MajorFunction = IRP_MJ_DEVICE_CONTROL;
  6871. if (FtCase != 1) {
  6872. IrpSp->Parameters.DeviceIoControl.IoControlCode = FT_PRIMARY_READ;
  6873. } else {
  6874. IrpSp->Parameters.DeviceIoControl.IoControlCode = FT_SECONDARY_READ;
  6875. }
  6876. Irp->AssociatedIrp.SystemBuffer = &SpecialRead;
  6877. SpecialRead.ByteOffset.QuadPart = StartingLbo;
  6878. SpecialRead.Length = Length;
  6879. }
  6880. //
  6881. // We only need to set the associated IRP count in the master irp to
  6882. // make it a master IRP. But we set the count to one more than our
  6883. // caller requested, because we do not want the I/O system to complete
  6884. // the I/O. We also set our own count.
  6885. //
  6886. Context->IrpCount = 1;
  6887. MasterIrp->AssociatedIrp.IrpCount = 2;
  6888. //
  6889. // MtfsMultiCompletionRoutine only modifies the status on errors,
  6890. // so we have to reset to success before each call.
  6891. //
  6892. MasterIrp->IoStatus.Status = STATUS_SUCCESS;
  6893. //
  6894. // If IoCallDriver returns an error, it has completed the Irp
  6895. // and the error will be caught by our completion routines
  6896. // and dealt with as a normal IO error.
  6897. //
  6898. HotFixTrace(("Calling driver with Irp: %08lx\n", Irp));
  6899. KeClearEvent( &Context->Wait.SyncEvent );
  6900. (VOID)IoCallDriver( DeviceObject, Irp );
  6901. //
  6902. // Now wait for it.
  6903. //
  6904. NtfsWaitSync( IrpContext );
  6905. HotFixTrace(("Request completion status: %08lx\n", MasterIrp->IoStatus.Status));
  6906. //
  6907. // If we were so lucky to get a verify required, then
  6908. // spin our wheels here a while.
  6909. //
  6910. if (MasterIrp->IoStatus.Status == STATUS_VERIFY_REQUIRED) {
  6911. //
  6912. // Otherwise we need to verify the volume, and if it doesn't
  6913. // verify correctly then we dismount the volume and report
  6914. // our error.
  6915. //
  6916. if (!NtfsPerformVerifyOperation( IrpContext, Vcb )) {
  6917. //**** NtfsPerformDismountOnVcb( IrpContext, Vcb, TRUE, NULL );
  6918. ClearFlag( Vcb->VcbState, VCB_STATE_VOLUME_MOUNTED );
  6919. MasterIrp->IoStatus.Status = STATUS_FILE_INVALID;
  6920. return;
  6921. }
  6922. //
  6923. // The volume verified correctly so now clear the verify bit
  6924. // and try and I/O again
  6925. //
  6926. ClearFlag( Vcb->Vpb->RealDevice->Flags, DO_VERIFY_VOLUME );
  6927. //
  6928. // We may have assumed that there was a secondary available
  6929. // and there is not. We can only tell from getting this code.
  6930. // Indicate there is no secondary and that we will be only
  6931. // making one pass.
  6932. //
  6933. } else if (MasterIrp->IoStatus.Status == STATUS_INVALID_DEVICE_REQUEST) {
  6934. ASSERT((IrpContext->MajorFunction != IRP_MJ_WRITE) && SecondaryAvailable);
  6935. SetFlag(Vcb->VcbState, VCB_STATE_NO_SECONDARY_AVAILABLE);
  6936. SecondaryAvailable = FALSE;
  6937. FinalPass = TRUE;
  6938. //
  6939. // If the secondary is offline then there is nothing to recover.
  6940. //
  6941. } else if (MasterIrp->IoStatus.Status == STATUS_FT_MISSING_MEMBER) {
  6942. //
  6943. // FTDISK will return this error if they are in initialization.
  6944. // Then we don't want to set VCB_STATE_NO_SECONDARY_AVAILABLE but
  6945. // will need to check whether we really want to hotfix.
  6946. //
  6947. SecondaryAvailable = FALSE;
  6948. FinalPass = TRUE;
  6949. //
  6950. // Otherwise we got success or another error and we should proceed.
  6951. //
  6952. } else {
  6953. break;
  6954. }
  6955. }
  6956. //
  6957. // Check again if we really want to perform the hot-fix in the event the status
  6958. // of the secondary has changed.
  6959. //
  6960. if (!SecondaryAvailable &&
  6961. (IrpContext->MajorFunction == IRP_MJ_READ) &&
  6962. (FlagOn( MasterIrp->Flags, IRP_PAGING_IO | IRP_NOCACHE ) == IRP_NOCACHE)) {
  6963. MasterIrp->IoStatus.Status = IrpStatus;
  6964. return;
  6965. }
  6966. if (!FT_SUCCESS(MasterIrp->IoStatus.Status)) {
  6967. BOOLEAN IsHotFixPage;
  6968. //
  6969. // Calculate whether or not this is the hot fix thread itself
  6970. // (i.e., executing NtfsPerformHotFix).
  6971. //
  6972. IsHotFixPage = NtfsIsTopLevelHotFixScb( Scb );
  6973. LlTemp1 = StartingVbo >> PAGE_SHIFT; //**** crock for x86 compiler bug
  6974. LlTemp2 = NtfsGetTopLevelHotFixVcn() >> PAGE_SHIFT; //**** crock for x86 compiler bug
  6975. if (!IsHotFixPage ||
  6976. LlTemp1 != LlTemp2) {
  6977. IsHotFixPage = FALSE;
  6978. }
  6979. //
  6980. // If the entire device manages to fail in the middle of this,
  6981. // get out.
  6982. //
  6983. if (FsRtlIsTotalDeviceFailure(MasterIrp->IoStatus.Status)) {
  6984. MasterIrp->IoStatus.Status = IrpStatus;
  6985. return;
  6986. }
  6987. //
  6988. // If this is not a write, fill the cluster with -1 for the
  6989. // event that we ultimately never find good data. This is
  6990. // for security reasons (cannot show anyone the data that
  6991. // happens to be in the buffer now), signature reasons (let
  6992. // -1 designate read errors, as opposed to 0's which occur
  6993. // on ValidDataLength cases), and finally if we fail to read
  6994. // a bitmap, we must consider all clusters allocated if we
  6995. // wish to continue to use the volume before chkdsk sees it.
  6996. //
  6997. if (IrpContext->MajorFunction == IRP_MJ_READ) {
  6998. NtfsFillIrpBuffer( IrpContext, MasterIrp, Length, IoRuns[RunNumber].BufferOffset + ByteOffset, 0xFF );
  6999. //
  7000. // If this is file system metadata, then we better mark the
  7001. // volume corrupt.
  7002. //
  7003. if (FinalPass &&
  7004. FlagOn(Scb->ScbState, SCB_STATE_MODIFIED_NO_WRITE) &&
  7005. (!LowFileRecord || (((ULONG)StartingVbo >= PAGE_SIZE) &&
  7006. ((ULONG)StartingVbo >= (ULONG)((VOLUME_DASD_NUMBER + 1) << Vcb->MftShift))))) {
  7007. NtfsPostVcbIsCorrupt( IrpContext, 0, NULL, NULL );
  7008. }
  7009. //
  7010. // If this is a Usa-protected file, or the bitmap,
  7011. // then we will try to procede with our 0xFF pattern
  7012. // above rather than returning an error to our caller.
  7013. // The Usa guy will get a Usa error, and the bitmap
  7014. // will safely say that everything is allocated until
  7015. // chkdsk can fix it up.
  7016. //
  7017. if (FlagOn(Scb->ScbState, SCB_STATE_USA_PRESENT) ||
  7018. (Scb == Vcb->BitmapScb)) {
  7019. MasterIrp->IoStatus.Status = STATUS_SUCCESS;
  7020. }
  7021. }
  7022. //
  7023. // If we are not the page being hot fixed, we want to post the
  7024. // hot fix and possibly remember the final status.
  7025. //
  7026. if (!IsHotFixPage) {
  7027. //
  7028. // If we got a media error, post the hot fix now. We expect
  7029. // to post at most one hot fix in this routine. When we post
  7030. // it it will serialize on the current stream. Do not attempt
  7031. // hot fixes during restart, or if we do not have the bad
  7032. // cluster file yet.
  7033. //
  7034. if (!FlagOn( Vcb->VcbState, VCB_STATE_RESTART_IN_PROGRESS ) &&
  7035. (Vcb->BadClusterFileScb != NULL) &&
  7036. (!LowFileRecord ||
  7037. ((ULONG)StartingVbo >= Vcb->Mft2Scb->Header.FileSize.LowPart))) {
  7038. NtfsPostHotFix( MasterIrp,
  7039. &StartingVbo,
  7040. StartingLbo,
  7041. BytesPerCluster,
  7042. FALSE );
  7043. }
  7044. //
  7045. // Now see if we ended up with an error on this cluster, and handle
  7046. // it accordingly.
  7047. //
  7048. // If we are the one actually trying to fix this error,
  7049. // then we need to get success so that we can make the page
  7050. // valid with whatever good data we have and flush data
  7051. // to its new location.
  7052. //
  7053. // Currently we will not try to figure out if the error
  7054. // is actually on the Scb (not to mention the sector) that
  7055. // we are hot fixing, assuming that the best thing is to
  7056. // just try to charge on.
  7057. //
  7058. if (FinalPass) {
  7059. //
  7060. // Make sure he gets the error (if we still have an
  7061. // error (see above).
  7062. //
  7063. if (!FT_SUCCESS(MasterIrp->IoStatus.Status)) {
  7064. FinalStatus = MasterIrp->IoStatus.Status;
  7065. }
  7066. }
  7067. }
  7068. }
  7069. //
  7070. // If this is a Usa-protected stream, we now perform end of
  7071. // Usa processing. (Otherwise do end of cluster processing
  7072. // below.)
  7073. //
  7074. if (FlagOn(Scb->ScbState, SCB_STATE_USA_PRESENT)) {
  7075. ULONG NextOffset = IoRuns[RunNumber].BufferOffset + ByteOffset + Length;
  7076. //
  7077. // If we are not at the end of a Usa block, there is no work
  7078. // to do now.
  7079. //
  7080. if ((NextOffset & (UsaBlockSize - 1)) == 0) {
  7081. HotFixTrace(("May be verifying UsaBlock\n"));
  7082. //
  7083. // If the Usa block is ok, we may be able to knock the
  7084. // corresponding sectors out of the ClustersToRecover mask.
  7085. //
  7086. if ((IrpContext->MajorFunction != IRP_MJ_READ) ||
  7087. NtfsVerifyAndRevertUsaBlock( IrpContext,
  7088. Scb,
  7089. MasterIrp,
  7090. NULL,
  7091. NextOffset - UsaBlockSize,
  7092. UsaBlockSize,
  7093. StartingVbo - (UsaBlockSize - Length) )) {
  7094. //
  7095. // If we are only fixing a Usa error anyway, or this is
  7096. // the final pass or at least not the first pass, then
  7097. // we can remove these clusters from the recover mask.
  7098. //
  7099. if (FixingUsaError || FinalPass || (FtCase != 0)) {
  7100. ULONG ShiftCount = UsaBlockSize >> Vcb->ClusterShift;
  7101. ClustersToRecover -= (ClusterMask * 2) -
  7102. (ClusterMask >> (ShiftCount - 1));
  7103. }
  7104. //
  7105. // Note, that even if we get a Usa error, we want to
  7106. // update the byte count on the final pass, because
  7107. // our reader expects that.
  7108. //
  7109. } else if (FinalPass) {
  7110. HotFixTrace(("Verify may have failed\n"));
  7111. }
  7112. }
  7113. //
  7114. // Perform end of cluster processing if not a Usa-protected stream.
  7115. //
  7116. } else {
  7117. //
  7118. // If the read succeeded and this is the final pass or at least
  7119. // not the first pass, we can take this cluster out of the cluster
  7120. // to recover mask.
  7121. //
  7122. if (FT_SUCCESS(MasterIrp->IoStatus.Status) && (FinalPass || (FtCase != 0))) {
  7123. ClustersToRecover -= ClusterMask;
  7124. }
  7125. }
  7126. }
  7127. }
  7128. }
  7129. //
  7130. // Assume we terminated the inner loops because we hit a 32 cluster boundary,
  7131. // and advance our alignment points.
  7132. //
  7133. AlignedRunNumber = RunNumber;
  7134. //
  7135. // We should have updated ByteOffset above (Prefast initialization).
  7136. //
  7137. ASSERT( ByteOffset != MAXULONG );
  7138. AlignedByteOffset = ByteOffset;
  7139. } while (RunNumber < MultipleIrpCount);
  7140. //
  7141. // Now put the final status in the MasterIrp and return
  7142. //
  7143. MasterIrp->IoStatus.Status = FinalStatus;
  7144. if (!NT_SUCCESS(FinalStatus)) {
  7145. MasterIrp->IoStatus.Information = 0;
  7146. }
  7147. HotFixTrace(("NtfsFixDataError returning IoStatus = %08lx, %08lx\n",
  7148. MasterIrp->IoStatus.Status,
  7149. MasterIrp->IoStatus.Information));
  7150. return;
  7151. }
  7152. VOID
  7153. NtfsPostHotFix (
  7154. IN PIRP Irp,
  7155. IN PLONGLONG BadVbo,
  7156. IN LONGLONG BadLbo,
  7157. IN ULONG ByteLength,
  7158. IN BOOLEAN DelayIrpCompletion
  7159. )
  7160. /*
  7161. Routine Description:
  7162. This routine posts a hot fix request to a worker thread. It has to be posted,
  7163. because we cannot expect to be able to acquire the resources we need exclusive
  7164. when the bad cluster is discovered.
  7165. Arguments:
  7166. Irp - The Irp for a read or write request which got the error
  7167. BadVbo - The Vbo of the bad cluster for the read or write request
  7168. BadLbo - The Lbo of the bad cluster
  7169. ByteLength - Length to hot fix
  7170. DelayIrpCompletion - TRUE if the Irp should not be completed until the hot
  7171. fix is done.
  7172. Return Value:
  7173. None
  7174. --*/
  7175. {
  7176. PIRP_CONTEXT HotFixIrpContext = NULL;
  7177. PVOLUME_DEVICE_OBJECT VolumeDeviceObject;
  7178. PIO_STACK_LOCATION IrpSp = IoGetCurrentIrpStackLocation(Irp);
  7179. PFILE_OBJECT FileObject = IrpSp->FileObject;
  7180. HotFixTrace(("NTFS: Posting hotfix on file object: %08lx\n", FileObject));
  7181. //
  7182. // Allocate an IrpContext to post the hot fix to a worker thread.
  7183. //
  7184. NtfsInitializeIrpContext( Irp, FALSE, &HotFixIrpContext );
  7185. //
  7186. // First reference the file object so that it will not go away
  7187. // until the hot fix is done. (We cannot increment the CloseCount
  7188. // in the Scb, since we are not properly synchronized.)
  7189. //
  7190. ObReferenceObject( FileObject );
  7191. HotFixIrpContext->OriginatingIrp = (PIRP)FileObject;
  7192. HotFixIrpContext->ScbSnapshot.AllocationSize = *BadVbo;
  7193. HotFixIrpContext->ScbSnapshot.FileSize = BadLbo;
  7194. ((ULONG)HotFixIrpContext->ScbSnapshot.ValidDataLength) = ByteLength;
  7195. if (DelayIrpCompletion) {
  7196. #ifdef _WIN64
  7197. //
  7198. // (fcf) The IrpToComplete pointer is stashed into the high half of a
  7199. // LONGLONG. This is problematic on WIN64, so we have to store it
  7200. // somewhere else on 64-bit platforms. IrpContext->SharedScb is unused
  7201. // in this codepath (asserted below), so we'll use that.
  7202. //
  7203. // Its possible that this change could be made for 32-bit platforms as
  7204. // well, if only to avoid this conditional compilation, but I would
  7205. // prefer the original authors to sanity-check this first.
  7206. //
  7207. // See also NtfsPerformHotFix() where this pointer is extracted.
  7208. //
  7209. ASSERT(HotFixIrpContext->SharedScbSize == 0);
  7210. ASSERT(HotFixIrpContext->SharedScb == NULL);
  7211. (PIRP)HotFixIrpContext->SharedScb = Irp;
  7212. #else // !_WIN64
  7213. ((PLARGE_INTEGER)&HotFixIrpContext->ScbSnapshot.ValidDataLength)->HighPart = (ULONG)Irp;
  7214. #endif // _WIN64
  7215. } else {
  7216. ((PLARGE_INTEGER)&HotFixIrpContext->ScbSnapshot.ValidDataLength)->HighPart = 0;
  7217. }
  7218. //
  7219. // Locate the volume device object and Vcb that we are trying to access
  7220. //
  7221. VolumeDeviceObject = (PVOLUME_DEVICE_OBJECT)IrpSp->DeviceObject;
  7222. HotFixIrpContext->Vcb = &VolumeDeviceObject->Vcb;
  7223. //
  7224. // Send it off.....
  7225. //
  7226. RtlZeroMemory( &HotFixIrpContext->WorkQueueItem, sizeof( WORK_QUEUE_ITEM ) );
  7227. ExInitializeWorkItem( &HotFixIrpContext->WorkQueueItem,
  7228. (PWORKER_THREAD_ROUTINE)NtfsPerformHotFix,
  7229. (PVOID)HotFixIrpContext );
  7230. ExQueueWorkItem( &HotFixIrpContext->WorkQueueItem, CriticalWorkQueue );
  7231. }
  7232. VOID
  7233. NtfsPerformHotFix (
  7234. IN PIRP_CONTEXT IrpContext
  7235. )
  7236. /*++
  7237. Routine Description:
  7238. This routine implements implements a hot fix that was scheduled
  7239. above, extracting its parameters from the IrpContext initialized
  7240. above. The hot fix must be for a contiguous range of Lcns (usually 1).
  7241. Arguments:
  7242. IrpContext - Supplies the IrpContext with the hot fix information
  7243. Return Value:
  7244. None.
  7245. --*/
  7246. {
  7247. TOP_LEVEL_CONTEXT TopLevelContext;
  7248. PTOP_LEVEL_CONTEXT ThreadTopLevelContext;
  7249. ATTRIBUTE_ENUMERATION_CONTEXT Context;
  7250. TYPE_OF_OPEN TypeOfOpen;
  7251. PVCB Vcb;
  7252. PFCB Fcb;
  7253. PSCB Scb;
  7254. PCCB Ccb;
  7255. PSCB BadClusterScb;
  7256. VCN BadVcn;
  7257. LCN LcnTemp, BadLcn;
  7258. LONGLONG ClusterCount;
  7259. NTSTATUS Status;
  7260. PVOID Buffer;
  7261. PIRP IrpToComplete;
  7262. ULONG ClustersToFix;
  7263. PBCB Bcb = NULL;
  7264. ERESOURCE_THREAD BcbOwner = 0;
  7265. BOOLEAN PerformFullCleanup = TRUE;
  7266. NTSTATUS CompletionStatus = STATUS_SUCCESS;
  7267. PSCB OriginalScb = NULL;
  7268. PSCB NewScb = NULL;
  7269. BOOLEAN PagingFile;
  7270. //
  7271. // Extract a description of the cluster to be fixed.
  7272. //
  7273. PFILE_OBJECT FileObject = (PFILE_OBJECT)IrpContext->OriginatingIrp;
  7274. VBO BadVbo = *(PVBO)&IrpContext->ScbSnapshot.AllocationSize;
  7275. PAGED_CODE();
  7276. //
  7277. // Reset the shared fields
  7278. //
  7279. InitializeListHead( &IrpContext->RecentlyDeallocatedQueue );
  7280. InitializeListHead( &IrpContext->ExclusiveFcbList );
  7281. ThreadTopLevelContext = NtfsInitializeTopLevelIrp( &TopLevelContext, TRUE, FALSE );
  7282. ASSERT( ThreadTopLevelContext == &TopLevelContext );
  7283. ASSERT( FlagOn( IrpContext->State, IRP_CONTEXT_STATE_ALLOC_FROM_POOL ));
  7284. NtfsUpdateIrpContextWithTopLevel( IrpContext, ThreadTopLevelContext );
  7285. //
  7286. // Initialize our local variables
  7287. //
  7288. TypeOfOpen = NtfsDecodeFileObject( IrpContext, FileObject, &Vcb, &Fcb, &Scb, &Ccb, FALSE );
  7289. BadClusterScb = Vcb->BadClusterFileScb;
  7290. BadVcn = LlClustersFromBytesTruncate( Vcb, BadVbo );
  7291. BadLcn = LlClustersFromBytesTruncate( Vcb, IrpContext->ScbSnapshot.FileSize );
  7292. ClustersToFix = ClustersFromBytes( Vcb, ((ULONG)IrpContext->ScbSnapshot.ValidDataLength) );
  7293. #ifdef _WIN64
  7294. //
  7295. // See comments in NtfsPostHotFix() regarding the location of IrpToComplete.
  7296. //
  7297. ASSERT(IrpContext->SharedScbSize == 0);
  7298. IrpToComplete = (PIRP)IrpContext->SharedScb;
  7299. //
  7300. // Reset SharedScb back to NULL just to be safe.
  7301. //
  7302. IrpContext->SharedScb = NULL;
  7303. #else // !_WIN64
  7304. IrpToComplete = (PIRP)(((PLARGE_INTEGER)&IrpContext->ScbSnapshot.ValidDataLength)->HighPart);
  7305. #endif
  7306. //
  7307. // Remember the status to complete the original Irp with.
  7308. //
  7309. if (IrpToComplete != NULL) {
  7310. CompletionStatus = IrpToComplete->IoStatus.Status;
  7311. }
  7312. NtfsInitializeAttributeContext( &Context );
  7313. //
  7314. // Set up for synchronous operation
  7315. //
  7316. SetFlag( IrpContext->State, IRP_CONTEXT_STATE_WAIT );
  7317. //
  7318. // Show that we are performing a HotFix. Note we are not processing
  7319. // an Irp now.
  7320. //
  7321. IrpContext->OriginatingIrp = NULL;
  7322. TopLevelContext.VboBeingHotFixed = BadVbo;
  7323. TopLevelContext.ScbBeingHotFixed = Scb;
  7324. //
  7325. // Acquire the Vcb before acquiring the paging Io resource.
  7326. //
  7327. NtfsAcquireExclusiveVcb( IrpContext, Vcb, TRUE );
  7328. ASSERT( 1 == ExIsResourceAcquiredSharedLite( &Vcb->Resource ) );
  7329. //
  7330. // While we're holding the Vcb, let's make sure the volume is still mounted.
  7331. // If it isn't mounted, we need to clean up and get out.
  7332. //
  7333. if (!FlagOn( Vcb->VcbState, VCB_STATE_VOLUME_MOUNTED )) {
  7334. NtfsCleanupAttributeContext( IrpContext, &Context );
  7335. NtfsReleaseVcb( IrpContext, Vcb );
  7336. NtfsCompleteRequest( IrpContext, IrpToComplete, CompletionStatus );
  7337. return;
  7338. }
  7339. //
  7340. // Acquire the paging io resource for this Fcb if it exists.
  7341. //
  7342. if (Scb->Header.PagingIoResource != NULL) {
  7343. NtfsAcquireExclusivePagingIo( IrpContext, Fcb );
  7344. }
  7345. //
  7346. // Just because we are hot fixing one file, it is possible that someone
  7347. // will log to another file and try to lookup Lcns. So we will acquire
  7348. // all files. Example: Hot fix is in Mft, and SetFileInfo has only the
  7349. // file acquired, and will log something to the Mft, and cause Lcns to be
  7350. // looked up.
  7351. //
  7352. NtfsAcquireAllFiles( IrpContext, Vcb, TRUE, FALSE, FALSE );
  7353. //
  7354. // For the bitmap - acquire again to explicitly get it on the exclsuive list
  7355. // and release the initial acquire
  7356. //
  7357. if (Scb == Vcb->BitmapScb) {
  7358. ASSERT( NtfsIsExclusiveScb( Scb ) && (NtfsIsSharedScb( Scb ) == 1));
  7359. NtfsAcquireExclusiveFcb( IrpContext, Scb->Fcb, Scb, ACQUIRE_HOLD_BITMAP );
  7360. NtfsReleaseResource( IrpContext, Fcb );
  7361. ASSERT( NtfsIsExclusiveScb( Scb ) && (NtfsIsSharedScb( Scb ) == 1) &&
  7362. (Scb->Fcb->ExclusiveFcbLinks.Flink != NULL));
  7363. }
  7364. //
  7365. // Don't attempt to hotfix if the scb is deleted
  7366. //
  7367. if (!FlagOn( Scb->ScbState, SCB_STATE_ATTRIBUTE_DELETED )) {
  7368. //
  7369. // Catch all exceptions. Note, we should not get any I/O error exceptions
  7370. // on our device.
  7371. //
  7372. try {
  7373. PagingFile = FlagOn( Fcb->FcbState, FCB_STATE_PAGING_FILE ) && FlagOn( Scb->ScbState, SCB_STATE_UNNAMED_DATA );
  7374. //
  7375. // Hotfixing the paging file is tricky because paging file i/o acquires no resources
  7376. // So we create a shadow scb to do the work in
  7377. //
  7378. if (PagingFile) {
  7379. UNICODE_STRING Mirror;
  7380. BOOLEAN Existing;
  7381. VCN Vcn;
  7382. LCN Lcn;
  7383. #ifdef BENL_DBG
  7384. KdPrint(( "NTFS: hotfixing pagefile\n "));
  7385. #endif
  7386. Mirror.Length = Mirror.MaximumLength = 12;
  7387. Mirror.Buffer = L"Mirror";
  7388. NewScb = NtfsCreateScb( IrpContext, Scb->Fcb, $DATA, &Mirror, FALSE, &Existing );
  7389. ASSERT( Existing == FALSE );
  7390. ASSERT( FlagOn( NewScb->ScbState, SCB_STATE_NONPAGED ));
  7391. //
  7392. // Null out the name so we think it points to real unnamed $data
  7393. //
  7394. NewScb->AttributeName.Length = 0;
  7395. //
  7396. // Now update the mirror from the attribute to get the header info and
  7397. // snapshot it
  7398. //
  7399. NtfsUpdateScbFromAttribute( IrpContext, NewScb, NULL );
  7400. NtfsSnapshotScb( IrpContext, NewScb );
  7401. //
  7402. // Load the real scb's mcb cluster info into the mirror
  7403. //
  7404. for (Vcn = 0; Vcn < LlClustersFromBytes( Vcb, Scb->Header.AllocationSize.QuadPart ); Vcn += ClusterCount ) {
  7405. if (NtfsLookupNtfsMcbEntry( &Scb->Mcb, Vcn, &Lcn, &ClusterCount, NULL, NULL, NULL, NULL )) {
  7406. NtfsAddNtfsMcbEntry( &NewScb->Mcb, Vcn, Lcn, ClusterCount, FALSE );
  7407. } else {
  7408. ASSERTMSG( "Missing range in paging file.\n", FALSE );
  7409. break;
  7410. }
  7411. }
  7412. OriginalScb = Scb;
  7413. Scb = NewScb;
  7414. }
  7415. for (; ClustersToFix != 0; ClustersToFix -= 1) {
  7416. //
  7417. // Lookup the bad cluster to see if it is already in the bad cluster
  7418. // file, and do nothing if it is.
  7419. //
  7420. if (!NtfsLookupAllocation( IrpContext,
  7421. BadClusterScb,
  7422. BadLcn,
  7423. &LcnTemp,
  7424. &ClusterCount,
  7425. NULL,
  7426. NULL ) &&
  7427. NtfsLookupAllocation( IrpContext,
  7428. Scb,
  7429. BadVcn,
  7430. &LcnTemp,
  7431. &ClusterCount,
  7432. NULL,
  7433. NULL ) &&
  7434. (LcnTemp == BadLcn)) {
  7435. //
  7436. // Pin the bad cluster in memory, so that we will not lose whatever data
  7437. // we have for it. (This data will be the correct data if we are talking
  7438. // to the FT driver or got a write error, otherwise it may be all -1's.)
  7439. //
  7440. // Do not try to do this if we are holding on to the original Irp, as that
  7441. // will cause a collided page wait deadlock.
  7442. //
  7443. if (IrpToComplete == NULL) {
  7444. ULONG Count = 100;
  7445. NtfsCreateInternalAttributeStream( IrpContext,
  7446. Scb,
  7447. FALSE,
  7448. &NtfsInternalUseFile[PERFORMHOTFIX_FILE_NUMBER] );
  7449. //
  7450. // We loop as long as we get an data error. We want our
  7451. // thread to read from the disk because we will recognize
  7452. // an I/O request started in PerformHotFix and ignore the
  7453. // data error. The cases where we do get an error will
  7454. // probably be from Mm intercepting this request because
  7455. // of a collided read with another thread.
  7456. //
  7457. do {
  7458. Status = STATUS_SUCCESS;
  7459. try {
  7460. NtfsPinStream( IrpContext, Scb, BadVbo, Vcb->BytesPerCluster, &Bcb, &Buffer );
  7461. } except ((!FsRtlIsNtstatusExpected( Status = GetExceptionCode())
  7462. || FsRtlIsTotalDeviceFailure( Status ))
  7463. ? EXCEPTION_CONTINUE_SEARCH
  7464. : EXCEPTION_EXECUTE_HANDLER) {
  7465. NOTHING;
  7466. }
  7467. } while (Count-- && (Status != STATUS_SUCCESS));
  7468. if (Status != STATUS_SUCCESS) {
  7469. NtfsRaiseStatus( IrpContext, Status, NULL, NULL );
  7470. }
  7471. }
  7472. //
  7473. // If we're hotfixing the logfile set the owner bcb owner to thread & 0x1 so
  7474. // we don't run into trouble if the logged changes to it use the same page
  7475. // Lfs will also set the bcb owner and our release will fail because the threadowner
  7476. // has been changed
  7477. //
  7478. if (Scb == Vcb->LogFileScb) {
  7479. BcbOwner = (ERESOURCE_THREAD) (((ULONG_PTR) PsGetCurrentThread()) | 1);
  7480. CcSetBcbOwnerPointer( Bcb, (PVOID)BcbOwner );
  7481. }
  7482. //
  7483. // Now deallocate the bad cluster in this stream in the bitmap only,
  7484. // since in general we do not support sparse deallocation in the file
  7485. // record. We will update the allocation below.
  7486. //
  7487. #if DBG
  7488. KdPrint(("NTFS: Freeing Bad Vcn: %08lx, %08lx\n", ((ULONG)BadVcn), ((PLARGE_INTEGER)&BadVcn)->HighPart));
  7489. #endif
  7490. //
  7491. // Deallocate clusters directly - so the change is only in memory
  7492. // Because we're not using the normal NtfsDeleteAllocation its necc. to
  7493. // manually create the snapshots that will correctly unload the modified range in
  7494. // case of a raise
  7495. //
  7496. NtfsSnapshotScb( IrpContext, Scb );
  7497. if (BadVcn < Scb->ScbSnapshot->LowestModifiedVcn) {
  7498. Scb->ScbSnapshot->LowestModifiedVcn = BadVcn;
  7499. }
  7500. if (BadVcn > Scb->ScbSnapshot->HighestModifiedVcn) {
  7501. Scb->ScbSnapshot->HighestModifiedVcn = BadVcn;
  7502. }
  7503. NtfsDeallocateClusters( IrpContext,
  7504. Vcb,
  7505. Scb,
  7506. BadVcn,
  7507. BadVcn,
  7508. &Scb->TotalAllocated );
  7509. //
  7510. // Look up the bad cluster attribute.
  7511. //
  7512. NtfsLookupAttributeForScb( IrpContext, BadClusterScb, NULL, &Context );
  7513. //
  7514. // Now append this cluster to the bad cluster file
  7515. //
  7516. #if DBG
  7517. KdPrint(("NTFS: Retiring Bad Lcn: %08lx, %08lx\n", ((ULONG)BadLcn), ((PLARGE_INTEGER)&BadLcn)->HighPart));
  7518. #endif
  7519. NtfsAddBadCluster( IrpContext, Vcb, BadLcn );
  7520. //
  7521. // Now update the file record for the bad cluster file to
  7522. // show the new cluster.
  7523. //
  7524. NtfsAddAttributeAllocation( IrpContext,
  7525. BadClusterScb,
  7526. &Context,
  7527. &BadLcn,
  7528. (PVCN)&Li1 );
  7529. //
  7530. // Now reallocate a cluster to the original stream to replace the bad cluster.
  7531. //
  7532. HotFixTrace(("NTFS: Reallocating Bad Vcn\n"));
  7533. NtfsAddAllocation( IrpContext, NULL, Scb, BadVcn, (LONGLONG)1, FALSE, NULL );
  7534. //
  7535. // Unpin the pages now so that the flush won't block if we are hot-fixing the Mft.
  7536. //
  7537. NtfsCleanupAttributeContext( IrpContext, &Context );
  7538. //
  7539. // Now that there is a new home for the data, mark the page dirty, unpin
  7540. // it and flush it out to its new home.
  7541. //
  7542. if (IrpToComplete == NULL) {
  7543. LONGLONG BiasedBadVbo = BadVbo;
  7544. CcSetDirtyPinnedData( Bcb, NULL );
  7545. if (Scb != Vcb->LogFileScb) {
  7546. NtfsUnpinBcb( IrpContext, &Bcb );
  7547. } else {
  7548. NtfsUnpinBcbForThread( IrpContext, &Bcb, BcbOwner );
  7549. }
  7550. //
  7551. // Flush the stream. Ignore the status - if we get something like
  7552. // a log file full, the Lazy Writer will eventually write the page.
  7553. // Bias the write if this is the Usn Journal.
  7554. //
  7555. if (FlagOn( Scb->ScbPersist, SCB_PERSIST_USN_JOURNAL )) {
  7556. BiasedBadVbo -= Scb->Vcb->UsnCacheBias;
  7557. }
  7558. #ifdef _WIN64
  7559. //
  7560. // Currently, we cannot hotfix the $logfile on ia64 as the
  7561. // flush below will cause an AV due to NtfsCheckWriteRange not
  7562. // not capable of handling a call all the way from this routine
  7563. // as the last flush file offset can be very different from
  7564. // the bad vcn file offset. Instead we let someone else
  7565. // do the flush. The $logfile data will be bad and drive may
  7566. // get mark dirty but we will recover on the next round
  7567. // as the bad cluster would have been replaced.
  7568. //
  7569. if (Scb != Vcb->LogFileScb) {
  7570. #endif
  7571. (VOID)NtfsFlushUserStream( IrpContext, Scb, &BiasedBadVbo, 1 );
  7572. #ifdef _WIN64
  7573. }
  7574. #endif
  7575. }
  7576. //
  7577. // Commit the transaction.
  7578. //
  7579. NtfsCommitCurrentTransaction( IrpContext );
  7580. //
  7581. // Now that the data is flushed to its new location, we will write the
  7582. // hot fix record. We don't write the log record if we are
  7583. // fixing the logfile. Instead we explicitly flush the Mft record
  7584. // for the log file. The log file is one file where we expect
  7585. // to be able to read the mapping pairs on restart.
  7586. //
  7587. if (Scb == Vcb->LogFileScb) {
  7588. if (Vcb->MftScb->FileObject != NULL) {
  7589. CcFlushCache( &Vcb->MftScb->NonpagedScb->SegmentObject,
  7590. &Li0,
  7591. Vcb->BytesPerFileRecordSegment * ATTRIBUTE_DEF_TABLE_NUMBER,
  7592. NULL );
  7593. }
  7594. } else {
  7595. (VOID) NtfsWriteLog( IrpContext,
  7596. Scb,
  7597. NULL,
  7598. HotFix,
  7599. NULL,
  7600. 0,
  7601. Noop,
  7602. NULL,
  7603. 0,
  7604. LlBytesFromClusters( Vcb, BadVcn ),
  7605. 0,
  7606. 0,
  7607. Vcb->BytesPerCluster );
  7608. //
  7609. // And we have to commit that one, too.
  7610. //
  7611. NtfsCommitCurrentTransaction( IrpContext );
  7612. }
  7613. //
  7614. // Now flush the log to insure that the hot fix gets remembered,
  7615. // especially important if this is the paging file.
  7616. //
  7617. LfsFlushToLsn( Vcb->LogHandle, LiMax );
  7618. HotFixTrace(("NTFS: Bad Cluster replaced\n"));
  7619. }
  7620. //
  7621. // Get ready for another possible pass through the loop
  7622. //
  7623. BadVcn = BadVcn + 1;
  7624. BadLcn = BadLcn + 1;
  7625. ASSERT( NULL == Bcb );
  7626. }
  7627. //
  7628. // Move the in memory allocation from the mirror of the paging file
  7629. // back to the real scb in an atomic matter
  7630. //
  7631. if (NewScb != NULL) {
  7632. NtfsSwapMcbs( &NewScb->Mcb, &OriginalScb->Mcb );
  7633. NtfsDeleteScb( IrpContext, &NewScb );
  7634. Scb = OriginalScb;
  7635. }
  7636. } except(NtfsExceptionFilter( IrpContext, GetExceptionInformation() )) {
  7637. NTSTATUS ExceptionCode = GetExceptionCode();
  7638. //
  7639. // We are not prepared to have our IrpContext requeued, so just
  7640. // consider these cases to be bad luck. We will put a status of
  7641. // data error in the irp context and pass that code to the process
  7642. // exception routine.
  7643. //
  7644. if ((ExceptionCode == STATUS_LOG_FILE_FULL) ||
  7645. (ExceptionCode == STATUS_CANT_WAIT)) {
  7646. ExceptionCode = IrpContext->ExceptionStatus = STATUS_DATA_ERROR;
  7647. }
  7648. //
  7649. // We won't be calling ReleaseAllFiles. Decrement the Acquire count
  7650. // before releasing the Fcbs.
  7651. //
  7652. ASSERT( Vcb->AcquireFilesCount != 0 );
  7653. Vcb->AcquireFilesCount -= 1;
  7654. //
  7655. // Cleanup the temporary mirror scb (if there is one) while we have an
  7656. // irpcontext
  7657. //
  7658. if (NewScb != NULL) {
  7659. NtfsDeleteScb( IrpContext, &NewScb );
  7660. Scb = OriginalScb;
  7661. }
  7662. NtfsProcessException( IrpContext, NULL, ExceptionCode );
  7663. //
  7664. // The IrpContext is really gone now.
  7665. //
  7666. IrpContext = NULL;
  7667. PerformFullCleanup = FALSE;
  7668. ASSERT( IoGetTopLevelIrp() != (PIRP) &TopLevelContext );
  7669. }
  7670. }
  7671. //
  7672. // Let any errors be handled in the except clause above, however we
  7673. // cleanup on the way out, because for example we need the IrpContext
  7674. // still in the except clause.
  7675. //
  7676. try {
  7677. NtfsCleanupAttributeContext( IrpContext, &Context );
  7678. if (Scb != Vcb->LogFileScb) {
  7679. NtfsUnpinBcb( IrpContext, &Bcb );
  7680. } else {
  7681. NtfsUnpinBcbForThread( IrpContext, &Bcb, BcbOwner );
  7682. }
  7683. //
  7684. // If we aborted this operation then all of the file resources have
  7685. // already been released.
  7686. //
  7687. if (PerformFullCleanup) {
  7688. NtfsReleaseAllFiles( IrpContext, Vcb, FALSE );
  7689. NtfsReleaseVcb( IrpContext, Vcb );
  7690. //
  7691. // The files have been released but not the Vcb or the volume bitmap.
  7692. //
  7693. } else {
  7694. if ((Vcb->BitmapScb != NULL) && NtfsIsExclusiveScb( Vcb->BitmapScb )) {
  7695. NtfsReleaseResource( IrpContext, Vcb->BitmapScb );
  7696. }
  7697. //
  7698. // We need to release the Vcb twice since we specifically acquire
  7699. // it once and then again with all the files.
  7700. //
  7701. NtfsReleaseVcb( IrpContext, Vcb );
  7702. NtfsReleaseVcb( IrpContext, Vcb );
  7703. }
  7704. ObDereferenceObject( FileObject );
  7705. //
  7706. // The IrpContext and Irp will already be NULL if they have been completed already.
  7707. //
  7708. NtfsCompleteRequest( IrpContext, IrpToComplete, CompletionStatus );
  7709. } except(EXCEPTION_EXECUTE_HANDLER) {
  7710. NOTHING;
  7711. }
  7712. ASSERT( IoGetTopLevelIrp() != (PIRP) &TopLevelContext );
  7713. }
  7714. BOOLEAN
  7715. NtfsGetReservedBuffer (
  7716. IN PFCB ThisFcb,
  7717. OUT PVOID *Buffer,
  7718. OUT PULONG Length,
  7719. IN UCHAR Need2
  7720. )
  7721. /*++
  7722. Routine Description:
  7723. This routine allocates the reserved buffers depending on the needs of
  7724. the caller. If the caller might require two buffers then we will allocate
  7725. buffers 1 or 2. Otherwise we can allocate any of the three.
  7726. Arguments:
  7727. ThisFcb - This is the Fcb where the io is occurring.
  7728. Buffer - Address to store the address of the allocated buffer.
  7729. Length - Address to store the length of the returned buffer.
  7730. Need2 - Zero if only one buffer needed. Either 1 or 2 if two buffers
  7731. might be needed. Buffer 2 can be acquired recursively. If buffer
  7732. 1 is needed and the current thread already owns buffer 1 then
  7733. grant buffer three instead.
  7734. Return Value:
  7735. BOOLEAN - Indicates whether the buffer was acquired.
  7736. --*/
  7737. {
  7738. BOOLEAN Allocated = FALSE;
  7739. PVOID CurrentThread;
  7740. //
  7741. // Capture the current thread and the Fcb for the file we are acquiring
  7742. // the buffer for.
  7743. //
  7744. CurrentThread = (PVOID) PsGetCurrentThread();
  7745. ExAcquireFastMutexUnsafe( &NtfsReservedBufferMutex );
  7746. //
  7747. // If we need two buffers then allocate either buffer 1 or buffer 2.
  7748. // We allow this caller to get a buffer if
  7749. //
  7750. // - He already owns one of these buffers (or)
  7751. //
  7752. // - Neither of the 2 buffers are allocated (and)
  7753. // - No other thread has a buffer on behalf of this file
  7754. //
  7755. if (Need2) {
  7756. if ((NtfsReservedBufferThread == CurrentThread) ||
  7757. (!FlagOn( NtfsReservedInUse, 3 ) &&
  7758. ((NtfsReserved3Fcb != ThisFcb) ||
  7759. (NtfsReserved3Thread == CurrentThread)))) {
  7760. NtfsReservedBufferThread = CurrentThread;
  7761. NtfsReserved12Fcb = ThisFcb;
  7762. //
  7763. // Check whether the caller wants buffer 1 or buffer 2.
  7764. //
  7765. if (Need2 == RESERVED_BUFFER_TWO_NEEDED) {
  7766. //
  7767. // If we don't own buffer 1 then reserve it now.
  7768. //
  7769. if (!FlagOn( NtfsReservedInUse, 1 )) {
  7770. NtfsReserved1Thread = CurrentThread;
  7771. SetFlag( NtfsReservedInUse, 1 );
  7772. *Buffer = NtfsReserved1;
  7773. *Length = LARGE_BUFFER_SIZE;
  7774. Allocated = TRUE;
  7775. } else if (!FlagOn( NtfsReservedInUse, 4 )) {
  7776. NtfsReserved3Fcb = ThisFcb;
  7777. NtfsReserved3Thread = CurrentThread;
  7778. SetFlag( NtfsReservedInUse, 4 );
  7779. *Buffer = NtfsReserved3;
  7780. *Length = LARGE_BUFFER_SIZE;
  7781. Allocated = TRUE;
  7782. }
  7783. } else {
  7784. ASSERT( Need2 == RESERVED_BUFFER_WORKSPACE_NEEDED );
  7785. NtfsReserved2Thread = CurrentThread;
  7786. SetFlag( NtfsReservedInUse, 2 );
  7787. *Buffer = NtfsReserved2;
  7788. *Length = WORKSPACE_BUFFER_SIZE;
  7789. NtfsReserved2Count += 1;
  7790. Allocated = TRUE;
  7791. }
  7792. }
  7793. //
  7794. // We only need 1 buffer. If this thread is the exclusive owner then
  7795. // we know it is safe to use buffer 2. The data in this buffer doesn't
  7796. // need to be preserved across a recursive call.
  7797. //
  7798. } else if (NtfsReservedBufferThread == CurrentThread) {
  7799. NtfsReserved2Thread = CurrentThread;
  7800. SetFlag( NtfsReservedInUse, 2 );
  7801. *Buffer = NtfsReserved2;
  7802. *Length = LARGE_BUFFER_SIZE;
  7803. NtfsReserved2Count += 1;
  7804. Allocated = TRUE;
  7805. //
  7806. // We only need 1 buffer. Try for buffer 3 first.
  7807. //
  7808. } else if (!FlagOn( NtfsReservedInUse, 4)) {
  7809. //
  7810. // Check if the owner of the first two buffers is operating in the
  7811. // same file but is a different thread. We can't grant another buffer
  7812. // for a different stream in the same file.
  7813. //
  7814. if (ThisFcb != NtfsReserved12Fcb) {
  7815. NtfsReserved3Fcb = ThisFcb;
  7816. NtfsReserved3Thread = CurrentThread;
  7817. SetFlag( NtfsReservedInUse, 4 );
  7818. *Buffer = NtfsReserved3;
  7819. *Length = LARGE_BUFFER_SIZE;
  7820. Allocated = TRUE;
  7821. }
  7822. //
  7823. // If there is no exclusive owner then we can use either of the first
  7824. // two buffers. Note that getting one of the first two buffers will
  7825. // lock out the guy who needs two buffers.
  7826. //
  7827. } else if (NtfsReservedBufferThread == NULL) {
  7828. if (!FlagOn( NtfsReservedInUse, 2 )) {
  7829. NtfsReserved2Thread = CurrentThread;
  7830. SetFlag( NtfsReservedInUse, 2 );
  7831. *Buffer = NtfsReserved2;
  7832. *Length = LARGE_BUFFER_SIZE;
  7833. NtfsReserved2Count += 1;
  7834. Allocated = TRUE;
  7835. } else if (!FlagOn( NtfsReservedInUse, 1 )) {
  7836. NtfsReserved1Thread = CurrentThread;
  7837. SetFlag( NtfsReservedInUse, 1 );
  7838. *Buffer = NtfsReserved1;
  7839. *Length = LARGE_BUFFER_SIZE;
  7840. Allocated = TRUE;
  7841. }
  7842. }
  7843. ExReleaseFastMutexUnsafe(&NtfsReservedBufferMutex);
  7844. return Allocated;
  7845. }
  7846. BOOLEAN
  7847. NtfsFreeReservedBuffer (
  7848. IN PVOID Buffer
  7849. )
  7850. {
  7851. BOOLEAN Deallocated = FALSE;
  7852. ExAcquireFastMutexUnsafe(&NtfsReservedBufferMutex);
  7853. if (Buffer == NtfsReserved1) {
  7854. ASSERT( FlagOn( NtfsReservedInUse, 1 ));
  7855. ClearFlag( NtfsReservedInUse, 1 );
  7856. NtfsReserved1Thread = NULL;
  7857. if (!FlagOn( NtfsReservedInUse, 2)) {
  7858. NtfsReservedBufferThread = NULL;
  7859. NtfsReserved12Fcb = NULL;
  7860. }
  7861. Deallocated = TRUE;
  7862. } else if (Buffer == NtfsReserved2) {
  7863. ASSERT( FlagOn( NtfsReservedInUse, 2 ));
  7864. NtfsReserved2Count -= 1;
  7865. if (NtfsReserved2Count == 0) {
  7866. ClearFlag( NtfsReservedInUse, 2 );
  7867. NtfsReserved2Thread = NULL;
  7868. if (!FlagOn( NtfsReservedInUse, 1)) {
  7869. NtfsReservedBufferThread = NULL;
  7870. NtfsReserved12Fcb = NULL;
  7871. }
  7872. }
  7873. Deallocated = TRUE;
  7874. } else if (Buffer == NtfsReserved3) {
  7875. ASSERT( FlagOn( NtfsReservedInUse, 4 ));
  7876. ClearFlag( NtfsReservedInUse, 4 );
  7877. Deallocated = TRUE;
  7878. NtfsReserved3Thread = NULL;
  7879. NtfsReserved3Fcb = NULL;
  7880. }
  7881. ExReleaseFastMutexUnsafe(&NtfsReservedBufferMutex);
  7882. return Deallocated;
  7883. }
  7884. NTSTATUS
  7885. NtfsDefragFile (
  7886. IN PIRP_CONTEXT IrpContext,
  7887. IN PIRP Irp
  7888. )
  7889. /*++
  7890. Routine Description:
  7891. Direct defrag. This routines modifies the input buffer to track progress. So the
  7892. FSCTL must always be buffered.
  7893. Arguments:
  7894. Irp - Supplies the Irp being processed.
  7895. Return Value:
  7896. NTSTATUS - The return status for the operation.
  7897. --*/
  7898. {
  7899. NTSTATUS Status;
  7900. PIO_STACK_LOCATION IrpSp;
  7901. PIO_STACK_LOCATION NextIrpSp;
  7902. ULONG FsControlCode;
  7903. PFILE_OBJECT FileObject;
  7904. TYPE_OF_OPEN TypeOfOpen;
  7905. PVCB Vcb;
  7906. PFCB Fcb;
  7907. PSCB Scb;
  7908. PCCB Ccb;
  7909. ATTRIBUTE_ENUMERATION_CONTEXT AttrContext;
  7910. #if defined( _WIN64 )
  7911. MOVE_FILE_DATA MoveDataLocal;
  7912. #endif
  7913. PMOVE_FILE_DATA MoveData;
  7914. LONGLONG FileOffset;
  7915. PMDL Mdl = NULL;
  7916. BOOLEAN AcquiredScb = FALSE;
  7917. BOOLEAN AcquiredAllFiles = FALSE;
  7918. BOOLEAN AcquiredVcb = FALSE;
  7919. ULONG DeletePendingFailureCountsLeft;
  7920. extern POBJECT_TYPE *IoFileObjectType;
  7921. PVOID Buffer = NULL;
  7922. ULONG BufferLength;
  7923. NTFS_IO_CONTEXT NtfsIoContext;
  7924. BOOLEAN AcquiredBitmap = FALSE;
  7925. BOOLEAN AcquiredMft = FALSE;
  7926. BOOLEAN FreeRecentlyDeallocated = FALSE;
  7927. BOOLEAN IoctlSupported = TRUE;
  7928. PAGED_CODE( );
  7929. //
  7930. // Always make this synchronous for MoveFile
  7931. // We should never be in the FSP for this. Otherwise the user handle
  7932. // is invalid. Also disable quota accounting since defrag doesn't affect it
  7933. // Otherwise we might trigger it while moving attributes around due to mapping pair
  7934. // changes and deadlock
  7935. //
  7936. SetFlag( IrpContext->State, IRP_CONTEXT_STATE_WAIT | IRP_CONTEXT_STATE_QUOTA_DISABLE );
  7937. ASSERT( !FlagOn( IrpContext->State, IRP_CONTEXT_STATE_IN_FSP ));
  7938. //
  7939. // Get the current Irp stack location and save some references.
  7940. //
  7941. IrpSp = IoGetCurrentIrpStackLocation( Irp );
  7942. NextIrpSp = IoGetNextIrpStackLocation( Irp );
  7943. FsControlCode = IrpSp->Parameters.FileSystemControl.FsControlCode;
  7944. DebugTrace( +1, Dbg, ("NtfsMoveFile, FsControlCode = %08lx\n", FsControlCode) );
  7945. //
  7946. // Extract and decode the file object and check for type of open.
  7947. //
  7948. TypeOfOpen = NtfsDecodeFileObject( IrpContext, IrpSp->FileObject, &Vcb, &Fcb, &Scb, &Ccb, TRUE );
  7949. if ((Ccb == NULL) || !FlagOn( Ccb->AccessFlags, MANAGE_VOLUME_ACCESS )) {
  7950. NtfsCompleteRequest( IrpContext, Irp, STATUS_ACCESS_DENIED );
  7951. return STATUS_ACCESS_DENIED;
  7952. }
  7953. #if defined(_WIN64)
  7954. //
  7955. // Win32/64 thunking code
  7956. //
  7957. if (IoIs32bitProcess( Irp )) {
  7958. PMOVE_FILE_DATA32 MoveData32;
  7959. if (IrpSp->Parameters.FileSystemControl.InputBufferLength < sizeof( MOVE_FILE_DATA32 )) {
  7960. NtfsCompleteRequest( IrpContext, Irp, STATUS_BUFFER_TOO_SMALL );
  7961. return STATUS_BUFFER_TOO_SMALL;
  7962. }
  7963. MoveData32 = (PMOVE_FILE_DATA32) Irp->AssociatedIrp.SystemBuffer;
  7964. MoveDataLocal.ClusterCount = MoveData32->ClusterCount;
  7965. MoveDataLocal.FileHandle = (HANDLE)(ULONG_PTR)(LONG) MoveData32->FileHandle;
  7966. MoveDataLocal.StartingLcn.QuadPart = MoveData32->StartingLcn.QuadPart;
  7967. MoveDataLocal.StartingVcn.QuadPart = MoveData32->StartingVcn.QuadPart;
  7968. MoveData = &MoveDataLocal;
  7969. } else {
  7970. #endif
  7971. //
  7972. // Get the input buffer pointer and check its length.
  7973. //
  7974. if (IrpSp->Parameters.FileSystemControl.InputBufferLength < sizeof( MOVE_FILE_DATA )) {
  7975. NtfsCompleteRequest( IrpContext, Irp, STATUS_BUFFER_TOO_SMALL );
  7976. return STATUS_BUFFER_TOO_SMALL;
  7977. }
  7978. MoveData = Irp->AssociatedIrp.SystemBuffer;
  7979. #if defined(_WIN64)
  7980. }
  7981. #endif
  7982. //
  7983. // Try to get a pointer to the file object from the handle passed in.
  7984. // Remember that we need to dereference this as some point but don't
  7985. // do it right away in case some gets in before we acquire it.
  7986. //
  7987. //
  7988. // NOTE: if the rdr ever allows this to be done remotely we'll have to
  7989. // change our verification since Irp->RequestorNode would be kernel but we'd
  7990. // still need to verify the handle
  7991. //
  7992. Status = ObReferenceObjectByHandle( MoveData->FileHandle,
  7993. 0,
  7994. *IoFileObjectType,
  7995. Irp->RequestorMode,
  7996. &FileObject,
  7997. NULL );
  7998. if (!NT_SUCCESS(Status)) {
  7999. NtfsCompleteRequest( IrpContext, Irp, Status );
  8000. return Status;
  8001. }
  8002. //
  8003. // Check that this file object is opened on the same volume as the
  8004. // DASD handle used to call this routine.
  8005. //
  8006. if (FileObject->Vpb != Vcb->Vpb) {
  8007. ObDereferenceObject( FileObject );
  8008. NtfsCompleteRequest( IrpContext, Irp, STATUS_INVALID_PARAMETER );
  8009. return STATUS_INVALID_PARAMETER;
  8010. }
  8011. //
  8012. // Now decode this FileObject. We don't care to raise on dismounts here
  8013. // because we check for that further down anyway. Hence, RaiseOnError=FALSE.
  8014. //
  8015. TypeOfOpen = NtfsDecodeFileObject( IrpContext, FileObject, &Vcb, &Fcb, &Scb, &Ccb, FALSE );
  8016. //
  8017. // Limit the files we will allow defragging to. We can't defrag a file which needs
  8018. // its own mapping to write log records (volume bitmap). We also eliminate the
  8019. // log file and usn journal. For the MFT we disallow moving the first 16 non-user files
  8020. //
  8021. if (((TypeOfOpen != UserFileOpen) &&
  8022. (TypeOfOpen != UserDirectoryOpen) &&
  8023. (TypeOfOpen != UserViewIndexOpen)) ||
  8024. FlagOn( Fcb->FcbState, FCB_STATE_PAGING_FILE ) ||
  8025. ((NtfsSegmentNumber( &Fcb->FileReference ) < ATTRIBUTE_DEF_TABLE_NUMBER) &&
  8026. ((NtfsSegmentNumber( &Fcb->FileReference ) != MASTER_FILE_TABLE_NUMBER) ||
  8027. (MoveData->StartingVcn.QuadPart < LlClustersFromBytes( Vcb, FIRST_USER_FILE_NUMBER * Vcb->BytesPerFileRecordSegment )))) ||
  8028. FlagOn( Fcb->FcbState, FCB_STATE_USN_JOURNAL ) ||
  8029. NtfsEqualMftRef( &Fcb->FileReference, &BitmapFileReference )) {
  8030. ObDereferenceObject( FileObject );
  8031. NtfsCompleteRequest( IrpContext, Irp, STATUS_INVALID_PARAMETER );
  8032. return STATUS_INVALID_PARAMETER;
  8033. }
  8034. //
  8035. // Disallow defragging on a read-only volume
  8036. //
  8037. if (NtfsIsVolumeReadOnly( Vcb )) {
  8038. NtfsCompleteRequest( IrpContext, Irp, STATUS_MEDIA_WRITE_PROTECTED );
  8039. return STATUS_MEDIA_WRITE_PROTECTED;
  8040. }
  8041. //
  8042. // Verify that the start Vcn, Lcn and cluster count are valid values.
  8043. //
  8044. if ((MoveData->StartingVcn.QuadPart < 0) ||
  8045. (MoveData->StartingVcn.QuadPart + MoveData->ClusterCount < MoveData->ClusterCount) ||
  8046. (Vcb->MaxClusterCount < MoveData->StartingVcn.QuadPart + MoveData->ClusterCount) ||
  8047. (MoveData->StartingLcn.QuadPart < 0) ||
  8048. (MoveData->StartingLcn.QuadPart >= Vcb->TotalClusters)) {
  8049. ObDereferenceObject( FileObject );
  8050. NtfsCompleteRequest( IrpContext, Irp, STATUS_INVALID_PARAMETER );
  8051. return STATUS_INVALID_PARAMETER;
  8052. }
  8053. NtfsInitializeAttributeContext( &AttrContext );
  8054. try {
  8055. //
  8056. // For system files we need the vcb to test for dismounted volumes
  8057. //
  8058. if (FlagOn( Scb->Fcb->FcbState, FCB_STATE_SYSTEM_FILE )) {
  8059. NtfsAcquireExclusiveVcb( IrpContext, Vcb, TRUE );
  8060. AcquiredVcb = TRUE;
  8061. if (!FlagOn(Vcb->VcbState, VCB_STATE_VOLUME_MOUNTED )) {
  8062. try_return( Status = STATUS_VOLUME_DISMOUNTED );
  8063. }
  8064. }
  8065. //
  8066. // We now want to acquire the Scb to check if we can continue. It is
  8067. // important to test whether this Scb has a paging io resource, not
  8068. // whether the Fcb has one. Consider the case where a directory has
  8069. // a named data stream in it -- the Fcb will have a paging io resource,
  8070. // but the index root Scb will not. In that case it would be a mistake
  8071. // to acquire the Fcb's paging io resource, since that will not serialize
  8072. // this operation with NtfsAcquireFileForCcFlush.
  8073. //
  8074. SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_ACQUIRE_PAGING );
  8075. NtfsAcquireFcbWithPaging( IrpContext, Scb->Fcb, 0 );
  8076. AcquiredScb = TRUE;
  8077. if (FlagOn( Scb->ScbState, SCB_STATE_VOLUME_DISMOUNTED )) {
  8078. try_return( Status = STATUS_VOLUME_DISMOUNTED );
  8079. }
  8080. //
  8081. // Check for the deny defrag being set
  8082. //
  8083. if (FlagOn( Scb->ScbPersist, SCB_PERSIST_DENY_DEFRAG ) && !FlagOn( Ccb->Flags, CCB_FLAG_DENY_DEFRAG )) {
  8084. try_return( Status = STATUS_ACCESS_DENIED );
  8085. }
  8086. //
  8087. // Initialize the header if necc. If the attribute doesn't exist
  8088. // just leave - for instance an index allocation buffer
  8089. //
  8090. if (!NtfsLookupAttributeByName( IrpContext,
  8091. Fcb,
  8092. &Fcb->FileReference,
  8093. Scb->AttributeTypeCode,
  8094. &Scb->AttributeName,
  8095. 0,
  8096. FALSE,
  8097. &AttrContext )) {
  8098. try_return( Status = STATUS_SUCCESS );
  8099. }
  8100. if (!FlagOn( Scb->ScbState, SCB_STATE_HEADER_INITIALIZED )) {
  8101. NtfsUpdateScbFromAttribute( IrpContext, Scb, NtfsFoundAttribute( &AttrContext ) );
  8102. }
  8103. if ((TypeOfOpen == UserDirectoryOpen) || (TypeOfOpen == UserViewIndexOpen)) {
  8104. //
  8105. // Initialize the Index information in the Scb if not done yet for indices.
  8106. //
  8107. if (Scb->ScbType.Index.BytesPerIndexBuffer == 0) {
  8108. NtfsCleanupAttributeContext( IrpContext, &AttrContext );
  8109. NtfsInitializeAttributeContext( &AttrContext );
  8110. if (!NtfsLookupAttributeByName( IrpContext,
  8111. Fcb,
  8112. &Fcb->FileReference,
  8113. $INDEX_ROOT,
  8114. &Scb->AttributeName,
  8115. 0,
  8116. FALSE,
  8117. &AttrContext )) {
  8118. ASSERTMSG("Could not find Index Root for Scb\n", FALSE);
  8119. NtfsRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR, NULL, Scb->Fcb );
  8120. }
  8121. NtfsUpdateIndexScbFromAttribute( IrpContext, Scb, NtfsFoundAttribute(&AttrContext), FALSE );
  8122. }
  8123. //
  8124. // Mark the irpcontext so we don't recursively push the index root while defragging
  8125. // the index. If we hit this on retry the force push flag will be set and we can safely
  8126. // pre-push the index
  8127. //
  8128. if (FlagOn( IrpContext->State, IRP_CONTEXT_STATE_FORCE_PUSH )) {
  8129. NtfsPushIndexRoot( IrpContext, Scb );
  8130. }
  8131. SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_DEFERRED_PUSH );
  8132. }
  8133. //
  8134. // Cleanup the attribute context now to remove bcbs
  8135. //
  8136. NtfsCleanupAttributeContext( IrpContext, &AttrContext );
  8137. //
  8138. // If the stream is resident then we can return SUCCESS immediately.
  8139. // If the starting point is beyond file allocation then we can also
  8140. // return immediately.
  8141. //
  8142. FileOffset = (LONGLONG) LlBytesFromClusters( Vcb, MoveData->StartingVcn.QuadPart );
  8143. ASSERT( FileOffset >= 0 );
  8144. if (FlagOn( Scb->ScbState, SCB_STATE_ATTRIBUTE_RESIDENT ) ||
  8145. (Scb->Header.AllocationSize.QuadPart < FileOffset)) {
  8146. try_return( Status = STATUS_SUCCESS );
  8147. }
  8148. //
  8149. // Setup the intermediate buffer
  8150. //
  8151. ASSERT( LARGE_BUFFER_SIZE >= Vcb->BytesPerCluster );
  8152. if (LARGE_BUFFER_SIZE > Vcb->BytesPerCluster) {
  8153. BufferLength = LARGE_BUFFER_SIZE;
  8154. } else {
  8155. BufferLength = Vcb->BytesPerCluster;
  8156. }
  8157. IrpContext->Union.NtfsIoContext = &NtfsIoContext;
  8158. RtlZeroMemory( IrpContext->Union.NtfsIoContext, sizeof( NTFS_IO_CONTEXT ));
  8159. KeInitializeEvent( &IrpContext->Union.NtfsIoContext->Wait.SyncEvent,
  8160. NotificationEvent,
  8161. FALSE );
  8162. DeletePendingFailureCountsLeft = 10;
  8163. NtfsReleaseFcbWithPaging( IrpContext, Scb->Fcb );
  8164. AcquiredScb = FALSE;
  8165. if (AcquiredVcb) {
  8166. NtfsReleaseVcb( IrpContext, Vcb );
  8167. AcquiredVcb = FALSE;
  8168. }
  8169. if (IrpContext->TransactionId != 0) {
  8170. ASSERT( !AcquiredAllFiles );
  8171. //
  8172. // Complete the request which commits the pending
  8173. // transaction if there is one and releases of the
  8174. // acquired resources. The IrpContext will not
  8175. // be deleted because the no delete flag is set.
  8176. //
  8177. SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_DONT_DELETE | IRP_CONTEXT_FLAG_RETAIN_FLAGS );
  8178. NtfsCompleteRequest( IrpContext, NULL, STATUS_SUCCESS );
  8179. }
  8180. //
  8181. // Main loop - while there are more clusters requested to move try to move them
  8182. //
  8183. while (MoveData->ClusterCount > 0) {
  8184. LCN Lcn;
  8185. LONGLONG ClusterCount;
  8186. LONGLONG TransferSize;
  8187. LONGLONG TransferClusters;
  8188. try {
  8189. try {
  8190. //
  8191. // If necc. grab all the files to synchronzie with any transactions
  8192. // flush the log and try to free recently deallocated clusters
  8193. //
  8194. if (FreeRecentlyDeallocated) {
  8195. FreeRecentlyDeallocated = FALSE;
  8196. try {
  8197. NtfsPurgeFileRecordCache( IrpContext );
  8198. //
  8199. // Acquire all files to flush the log file and free recently deallocated.
  8200. // Note the flush may raise, normally log file full, which will get rid of
  8201. // the recently deallocated in a less efficient manner.
  8202. //
  8203. NtfsAcquireAllFiles( IrpContext, IrpContext->Vcb, FALSE, FALSE, FALSE );
  8204. AcquiredAllFiles = TRUE;
  8205. //
  8206. // Since we've dropped and reacquired all thes file, we must retest
  8207. // whether the volume has been dismounted. Use the vcb since acquireallfiles
  8208. // grabs it
  8209. //
  8210. if (!FlagOn( IrpContext->Vcb->VcbState, VCB_STATE_VOLUME_MOUNTED )) {
  8211. //
  8212. // Raise we don't try to acquire the Scb exclusive in the try-finally
  8213. // below. We only hold this resource shared from the AcquireAllFiles
  8214. // above. It is OK to clear the REALLOCATE_ON_WRITE bit somewhat
  8215. // unsynchronized since we will never touch this file again.
  8216. //
  8217. NtfsRaiseStatus( IrpContext, STATUS_VOLUME_DISMOUNTED, NULL, NULL );
  8218. }
  8219. LfsFlushToLsn( IrpContext->Vcb->LogHandle, LiMax );
  8220. NtfsFreeRecentlyDeallocated( IrpContext, IrpContext->Vcb, &LiMax, TRUE );
  8221. } finally {
  8222. if (AcquiredAllFiles) {
  8223. NtfsReleaseAllFiles( IrpContext, IrpContext->Vcb, FALSE );
  8224. AcquiredAllFiles = FALSE;
  8225. }
  8226. }
  8227. }
  8228. //
  8229. // Purge anything left in cache because we hold nothing at this point
  8230. //
  8231. NtfsPurgeFileRecordCache( IrpContext );
  8232. //
  8233. // For system files we need the vcb to test for dismounted volumes
  8234. //
  8235. if (FlagOn( Scb->Fcb->FcbState, FCB_STATE_SYSTEM_FILE )) {
  8236. NtfsAcquireExclusiveVcb( IrpContext, Vcb, TRUE );
  8237. AcquiredVcb = TRUE;
  8238. if (!FlagOn(Vcb->VcbState, VCB_STATE_VOLUME_MOUNTED )) {
  8239. try_return( Status = STATUS_VOLUME_DISMOUNTED );
  8240. }
  8241. }
  8242. //
  8243. // Reacquire everything for the defrag mft case + the mft flush
  8244. // resource so we know lazy writes aren't active while we're doing stuff
  8245. //
  8246. if (NtfsSegmentNumber( &Fcb->FileReference ) == MASTER_FILE_TABLE_NUMBER) {
  8247. NtfsAcquireAllFiles( IrpContext, Vcb, TRUE, FALSE, FALSE );
  8248. AcquiredAllFiles = TRUE;
  8249. ExAcquireResourceExclusiveLite( &Vcb->MftFlushResource, TRUE );
  8250. } else {
  8251. NtfsAcquireFcbWithPaging( IrpContext, Scb->Fcb, 0 );
  8252. AcquiredScb = TRUE;
  8253. //
  8254. // Since we've dropped and reacquired the Scb, we must retest
  8255. // whether the volume has been dismounted.
  8256. //
  8257. if (FlagOn( Scb->ScbState, SCB_STATE_VOLUME_DISMOUNTED )) {
  8258. try_return( Status = STATUS_VOLUME_DISMOUNTED );
  8259. }
  8260. }
  8261. //
  8262. // If we acquired all the files above now do the work to check for free space in the mft
  8263. //
  8264. if (AcquiredAllFiles && (Vcb->MftScb->ScbType.Mft.RecordAllocationContext.NumberOfFreeBits <= 1)) {
  8265. MFT_SEGMENT_REFERENCE FileNumber;
  8266. #ifdef BENL_DBG
  8267. KdPrint(( "NTFS: too few mft records: 0x%x\n", Vcb->MftScb->ScbType.Mft.RecordAllocationContext.NumberOfFreeBits ));
  8268. #endif
  8269. FileNumber = NtfsAllocateMftRecord( IrpContext, Vcb, FALSE );
  8270. ASSERT( 0 == FileNumber.SegmentNumberHighPart );
  8271. NtfsDeallocateMftRecord( IrpContext, Vcb, FileNumber.SegmentNumberLowPart );
  8272. NtfsCheckpointCurrentTransaction( IrpContext );
  8273. #ifdef BENL_DBG
  8274. KdPrint(( "NTFS: after corection mft records: 0x%x\n", Vcb->MftScb->ScbType.Mft.RecordAllocationContext.NumberOfFreeBits ));
  8275. #endif
  8276. ASSERT( Vcb->MftScb->ScbType.Mft.RecordAllocationContext.NumberOfFreeBits > 1 );
  8277. }
  8278. //
  8279. // Check if the attribute was deleted in between
  8280. //
  8281. if (FlagOn( Scb->ScbState, SCB_STATE_ATTRIBUTE_DELETED)) {
  8282. try_return( Status = STATUS_FILE_DELETED );
  8283. }
  8284. //
  8285. // Leave if after regaining the file locks we are out of range
  8286. //
  8287. if (MoveData->StartingVcn.QuadPart > LlClustersFromBytes( Vcb, Scb->Header.AllocationSize.QuadPart )) {
  8288. break;
  8289. }
  8290. //
  8291. // Check if this range of allocation exists - if not we can skip any work
  8292. //
  8293. if (NtfsLookupAllocation( IrpContext, Scb, MoveData->StartingVcn.QuadPart, &Lcn, &ClusterCount, NULL, NULL )) {
  8294. //
  8295. // Now loop over the current range moving pieces of it
  8296. //
  8297. while ((MoveData->ClusterCount > 0) && (ClusterCount > 0)) {
  8298. LONGLONG UpperBound;
  8299. if (ClusterCount > MoveData->ClusterCount) {
  8300. TransferSize = LlBytesFromClusters( Vcb, MoveData->ClusterCount );
  8301. } else {
  8302. TransferSize = LlBytesFromClusters( Vcb, ClusterCount );
  8303. }
  8304. if (TransferSize > BufferLength ) {
  8305. TransferSize = BufferLength;
  8306. }
  8307. TransferClusters = LlClustersFromBytesTruncate( Vcb, TransferSize );
  8308. //
  8309. // Reserve the new cluster if it falls within volume range
  8310. //
  8311. if (MoveData->StartingLcn.QuadPart + TransferClusters > Vcb->TotalClusters) {
  8312. NtfsRaiseStatus( IrpContext, STATUS_ALREADY_COMMITTED, NULL, NULL );
  8313. }
  8314. NtfsPreAllocateClusters( IrpContext, Vcb, MoveData->StartingLcn.QuadPart, TransferClusters, &AcquiredBitmap, &AcquiredMft );
  8315. //
  8316. // Only actually transfer ranges within VDD or VDL - for those between
  8317. // VDD and allocation size just reallocate. Use VDD for data streams
  8318. // for all others that don't update VDD use VDL
  8319. //
  8320. if (($DATA == Scb->AttributeTypeCode) &&
  8321. !FlagOn( Scb->ScbState, SCB_STATE_MODIFIED_NO_WRITE ) &&
  8322. FlagOn( Scb->AttributeFlags, ATTRIBUTE_FLAG_COMPRESSION_MASK)) {
  8323. //
  8324. // Modified no write streams don't use VDD. The only data
  8325. // stream currently like this is $Secure and $UsnJrnl which are not
  8326. // defraggable
  8327. //
  8328. UpperBound = LlClustersFromBytes( Vcb, Scb->ValidDataToDisk );
  8329. } else {
  8330. UpperBound = LlClustersFromBytes( Vcb, Scb->Header.ValidDataLength.QuadPart );
  8331. }
  8332. if (MoveData->StartingVcn.QuadPart <= UpperBound) {
  8333. //
  8334. // Call the storage and see if they support
  8335. // the copy data ioctl - this allows lower drivers to
  8336. // implement a more efficient version of the copy and participate
  8337. // particularly in volsnap's case in the defrag
  8338. //
  8339. if (IoctlSupported) {
  8340. DISK_COPY_DATA_PARAMETERS CopyData;
  8341. CopyData.SourceOffset.QuadPart = LlBytesFromClusters( Vcb, Lcn );
  8342. CopyData.DestinationOffset.QuadPart = LlBytesFromClusters( Vcb, MoveData->StartingLcn.QuadPart );
  8343. CopyData.CopyLength.QuadPart = TransferSize;
  8344. CopyData.Reserved = 0;
  8345. Status = NtfsDeviceIoControl( IrpContext,
  8346. Vcb->TargetDeviceObject,
  8347. IOCTL_DISK_COPY_DATA,
  8348. &CopyData,
  8349. sizeof( CopyData ),
  8350. NULL,
  8351. 0,
  8352. NULL );
  8353. }
  8354. if (!IoctlSupported || !NT_SUCCESS( Status )) {
  8355. Status = STATUS_SUCCESS;
  8356. IoctlSupported = FALSE;
  8357. NtfsCreateMdlAndBuffer( IrpContext,
  8358. Scb,
  8359. RESERVED_BUFFER_ONE_NEEDED,
  8360. &BufferLength,
  8361. &Mdl,
  8362. &Buffer );
  8363. Irp->MdlAddress = Mdl;
  8364. //
  8365. // First read the cluster
  8366. //
  8367. NtfsSingleAsync( IrpContext,
  8368. Vcb->TargetDeviceObject,
  8369. LlBytesFromClusters( Vcb, Lcn ),
  8370. (ULONG)TransferSize,
  8371. Irp,
  8372. IRP_MJ_READ,
  8373. 0 );
  8374. NtfsWaitSync( IrpContext );
  8375. NtfsNormalizeAndCleanupTransaction( IrpContext,
  8376. &Irp->IoStatus.Status,
  8377. TRUE,
  8378. STATUS_UNEXPECTED_IO_ERROR );
  8379. //
  8380. // Clear return info field
  8381. //
  8382. Irp->IoStatus.Information = 0;
  8383. //
  8384. // Then transfer it to the new location
  8385. //
  8386. NtfsSingleAsync( IrpContext,
  8387. Vcb->TargetDeviceObject,
  8388. LlBytesFromClusters( Vcb, MoveData->StartingLcn.QuadPart ),
  8389. (ULONG)TransferSize,
  8390. Irp,
  8391. IRP_MJ_WRITE,
  8392. 0 );
  8393. NtfsWaitSync( IrpContext );
  8394. NtfsNormalizeAndCleanupTransaction( IrpContext,
  8395. &Irp->IoStatus.Status,
  8396. TRUE,
  8397. STATUS_UNEXPECTED_IO_ERROR );
  8398. Irp->IoStatus.Information = 0;
  8399. //
  8400. // Release the buffer before calling lfs which may also need the reserved buffer
  8401. //
  8402. NtfsDeleteMdlAndBuffer( Mdl, Buffer );
  8403. Irp->MdlAddress = NULL;
  8404. Buffer = NULL;
  8405. }
  8406. }
  8407. //
  8408. // Finally reallocate the cluster in the scb and checkpoint it
  8409. //
  8410. NtfsReallocateRange( IrpContext, Scb, MoveData->StartingVcn.QuadPart, TransferClusters, MoveData->StartingVcn.QuadPart, TransferClusters, &MoveData->StartingLcn.QuadPart );
  8411. NtfsCheckpointCurrentTransaction( IrpContext );
  8412. ASSERT( IrpContext->TransactionId == 0 );
  8413. if (AcquiredBitmap) {
  8414. NtfsReleaseScb( IrpContext, Vcb->BitmapScb );
  8415. AcquiredBitmap = FALSE;
  8416. }
  8417. if (AcquiredMft) {
  8418. NtfsReleaseScb( IrpContext, Vcb->MftScb );
  8419. AcquiredMft = FALSE;
  8420. }
  8421. MoveData->StartingLcn.QuadPart += TransferClusters;
  8422. MoveData->StartingVcn.QuadPart += TransferClusters;
  8423. MoveData->ClusterCount -= (ULONG)TransferClusters;
  8424. ClusterCount -= TransferClusters;
  8425. Lcn += TransferClusters;
  8426. } // endwhile loop over lcn range
  8427. } else {
  8428. //
  8429. // This is a hole skip over it
  8430. //
  8431. MoveData->StartingVcn.QuadPart += ClusterCount;
  8432. if (ClusterCount > MoveData->ClusterCount) {
  8433. MoveData->ClusterCount = 0;
  8434. } else {
  8435. MoveData->ClusterCount -= (ULONG)ClusterCount;
  8436. }
  8437. }
  8438. } except( NtfsDefragExceptionFilter( IrpContext, GetExceptionInformation(), &DeletePendingFailureCountsLeft )) {
  8439. //
  8440. // Cleanup the delete pending failure and next time through the loop
  8441. // try to free the recently deallocated clusters to allow the cluster to be deleted
  8442. //
  8443. NtfsMinimumExceptionProcessing( IrpContext );
  8444. IrpContext->ExceptionStatus = STATUS_SUCCESS;
  8445. FreeRecentlyDeallocated = TRUE;
  8446. }
  8447. } finally {
  8448. //
  8449. // Unlock the file and let anyone else access the file before
  8450. // looping back.
  8451. //
  8452. if (Buffer != NULL) {
  8453. NtfsDeleteMdlAndBuffer( Mdl, Buffer );
  8454. Irp->MdlAddress = NULL;
  8455. Buffer = NULL;
  8456. }
  8457. if (AcquiredBitmap) {
  8458. NtfsReleaseScb( IrpContext, Vcb->BitmapScb );
  8459. AcquiredBitmap = FALSE;
  8460. }
  8461. if (AcquiredMft) {
  8462. NtfsReleaseScb( IrpContext, Vcb->MftScb );
  8463. AcquiredMft = FALSE;
  8464. }
  8465. if (AcquiredScb) {
  8466. NtfsReleaseFcbWithPaging( IrpContext, Scb->Fcb );
  8467. AcquiredScb = FALSE;
  8468. }
  8469. if (AcquiredAllFiles) {
  8470. ExReleaseResourceLite( &Vcb->MftFlushResource );
  8471. NtfsReleaseAllFiles( IrpContext, Vcb, FALSE );
  8472. AcquiredAllFiles = FALSE;
  8473. }
  8474. if (AcquiredVcb) {
  8475. NtfsReleaseVcb( IrpContext, Vcb );
  8476. AcquiredVcb = FALSE;
  8477. }
  8478. }
  8479. } // endwhile
  8480. Status = STATUS_SUCCESS;
  8481. try_exit: NOTHING;
  8482. } finally {
  8483. DebugUnwind( NtfsDefragFile );
  8484. NtfsCleanupAttributeContext( IrpContext, &AttrContext );
  8485. IrpContext->Union.NtfsIoContext = NULL;
  8486. ASSERT( !AbnormalTermination() || (IrpContext->ExceptionStatus != STATUS_SUCCESS) );
  8487. ASSERT( (Buffer == NULL) &&
  8488. !AcquiredBitmap &&
  8489. !AcquiredMft &&
  8490. !AcquiredAllFiles );
  8491. if (AcquiredScb) {
  8492. NtfsReleaseFcbWithPaging( IrpContext, Scb->Fcb );
  8493. }
  8494. if (AcquiredVcb) {
  8495. NtfsReleaseVcb( IrpContext, Vcb );
  8496. AcquiredVcb = FALSE;
  8497. }
  8498. //
  8499. // Remove our reference on the users file object.
  8500. //
  8501. ObDereferenceObject( FileObject );
  8502. }
  8503. NtfsCompleteRequest( IrpContext, Irp, Status );
  8504. return Status;
  8505. }
  8506. LONG
  8507. NtfsDefragExceptionFilter (
  8508. IN PIRP_CONTEXT IrpContext OPTIONAL,
  8509. IN PEXCEPTION_POINTERS ExceptionPointer,
  8510. IN OUT PULONG DeletePendingFailureCountsLeft
  8511. )
  8512. /*++
  8513. Routine Description:
  8514. Exception handler for defrag - pass on for all exceptions other than delete pending
  8515. in that case if there the number of retries left is > 0 execute the handler
  8516. Arguments:
  8517. ExceptionPointer - Supplies the exception record to being checked.
  8518. DeletePendingFailureCountsLeft - how many more times to retry a delete pending
  8519. Return Value:
  8520. ULONG - returns EXCEPTION_EXECUTE_HANDLER or CONTINUE_SEARCH
  8521. --*/
  8522. {
  8523. UNREFERENCED_PARAMETER( IrpContext );
  8524. if ((STATUS_DELETE_PENDING == ExceptionPointer->ExceptionRecord->ExceptionCode)) {
  8525. *DeletePendingFailureCountsLeft -= 1;
  8526. if ((*DeletePendingFailureCountsLeft) > 0) {
  8527. return EXCEPTION_EXECUTE_HANDLER;
  8528. } else {
  8529. return EXCEPTION_CONTINUE_SEARCH;
  8530. }
  8531. } else {
  8532. return EXCEPTION_CONTINUE_SEARCH;
  8533. }
  8534. }
  8535. //
  8536. // Because of protocol limitations in CIFS which uses 16 bits,
  8537. // redirector can't currently accept buffer sizes larger than 64K.
  8538. //
  8539. #define RDR_BUFFER_SIZE_LIMIT 0x00010000L
  8540. NTSTATUS
  8541. NtfsReadFromPlex(
  8542. IN PIRP_CONTEXT IrpContext,
  8543. IN PIRP Irp
  8544. )
  8545. /*++
  8546. Routine Description:
  8547. This implements directed reads from plexes. Given an offset, a length and a plexnumber
  8548. along with a handle to a file or a volume, this coordinates reads from an underlying
  8549. dynamic (mirrored) volume manager.
  8550. Note that we ignore the VcbState flag VCB_STATE_NO_SECONDARY_AVAILABLE altogether
  8551. and let the lower level driver respond.
  8552. Arguments:
  8553. IrpContext - Supplies the IrpContext to process
  8554. Irp - Incoming FSCTL IRP.
  8555. Return Value:
  8556. Status SUCCESS on success, otherwise the relevant error code.
  8557. --*/
  8558. {
  8559. PPLEX_READ_DATA_REQUEST ReadData;
  8560. PIO_STACK_LOCATION IrpSp;
  8561. ULONG InputBufferLength;
  8562. ULONG UserBufferLength;
  8563. NTSTATUS Status = STATUS_SUCCESS;
  8564. BOOLEAN Wait = TRUE;
  8565. ULONG NumberOfRuns, RemainingByteCount;
  8566. COMPRESSION_CONTEXT CompContext;
  8567. TYPE_OF_OPEN TypeOfOpen;
  8568. IO_RUN IoRuns[NTFS_MAX_PARALLEL_IOS];
  8569. VBO ByteOffset;
  8570. ULONG ByteCount;
  8571. ULONG BytesToEof;
  8572. ULONG LastReadByteCount;
  8573. ULONG CurByteCount;
  8574. LOGICAL AcquiredScb = FALSE;
  8575. VOLUME_READ_PLEX_INPUT NplexRead;
  8576. PVCB Vcb;
  8577. PSCB Scb;
  8578. PFCB Fcb;
  8579. PCCB Ccb;
  8580. //
  8581. // Extract and decode the file object
  8582. //
  8583. IrpSp = IoGetCurrentIrpStackLocation( Irp );
  8584. TypeOfOpen = NtfsDecodeFileObject( IrpContext,
  8585. IrpSp->FileObject,
  8586. &Vcb,
  8587. &Fcb,
  8588. &Scb,
  8589. &Ccb,
  8590. FALSE );
  8591. //
  8592. // FileOpens and VolumeOpens are allowed.
  8593. //
  8594. if ((TypeOfOpen != UserFileOpen) &&
  8595. (TypeOfOpen != UserVolumeOpen)) {
  8596. Status = STATUS_INVALID_PARAMETER;
  8597. NtfsCompleteRequest( IrpContext, Irp, Status );
  8598. DebugTrace( -1, Dbg, ("NtfsReadFromPlex -> %08lx\n", Status) );
  8599. return Status;
  8600. }
  8601. //
  8602. // This FSCTL is of type METHOD_OUT_DIRECT. The Io Manager has already
  8603. // copied the input parameters into the systembuffer field, probed the
  8604. // output buffer and locked the Mdls for us. So we can access these fields
  8605. // without fear.
  8606. //
  8607. ReadData = (PPLEX_READ_DATA_REQUEST)Irp->AssociatedIrp.SystemBuffer;
  8608. if (ReadData == NULL) {
  8609. Status = STATUS_INVALID_PARAMETER;
  8610. NtfsCompleteRequest( IrpContext, Irp, Status );
  8611. DebugTrace( -1, Dbg, ("NtfsReadFromPlex -> %08lx\n", Status) );
  8612. return Status;
  8613. }
  8614. ByteOffset = ReadData->ByteOffset.QuadPart;
  8615. ByteCount = ReadData->ByteLength;
  8616. //
  8617. // Now, do the grunt work and clean up within a try finally.
  8618. //
  8619. try {
  8620. //
  8621. // Sanity check the read length.
  8622. //
  8623. check_values:
  8624. CurByteCount = 0;
  8625. BytesToEof = 0;
  8626. Irp->IoStatus.Information = 0;
  8627. if ((ByteCount > MAXLONGLONG - ByteOffset) ||
  8628. //
  8629. // File offsets should be cluster aligned
  8630. //
  8631. ((TypeOfOpen == UserFileOpen) &&
  8632. ((ByteOffset & Vcb->ClusterMask) || (ByteCount & Vcb->ClusterMask))) ||
  8633. //
  8634. // Volume offsets should be sector aligned
  8635. //
  8636. ((TypeOfOpen == UserVolumeOpen) &&
  8637. (((ULONG)ByteOffset & (Vcb->BytesPerSector - 1)) || (ByteCount & (Vcb->BytesPerSector - 1))))) {
  8638. Status = STATUS_INVALID_PARAMETER;
  8639. leave;
  8640. }
  8641. //
  8642. // No-op
  8643. //
  8644. if (ByteCount == 0) {
  8645. ASSERT(Status == STATUS_SUCCESS);
  8646. ASSERT(CurByteCount == ByteCount);
  8647. leave;
  8648. }
  8649. //
  8650. // Because of protocol limitations in CIFS which uses 16 bits,
  8651. // redirector can't accept buffer sizes larger than 64K.
  8652. //
  8653. if (ByteCount & ~(RDR_BUFFER_SIZE_LIMIT - 1L)) {
  8654. Status = STATUS_INVALID_BUFFER_SIZE;
  8655. leave;
  8656. }
  8657. //
  8658. // Sanity check input/output parameters.
  8659. //
  8660. InputBufferLength = IrpSp->Parameters.FileSystemControl.InputBufferLength;
  8661. UserBufferLength = IrpSp->Parameters.FileSystemControl.OutputBufferLength;
  8662. if ((InputBufferLength < sizeof( PLEX_READ_DATA_REQUEST )) ||
  8663. (UserBufferLength < ByteCount)) {
  8664. Status = STATUS_BUFFER_TOO_SMALL;
  8665. leave;
  8666. }
  8667. //
  8668. // For volume DASD reads, we just send an IOCTL down...
  8669. //
  8670. if (TypeOfOpen == UserVolumeOpen) {
  8671. NplexRead.ByteOffset.QuadPart = ByteOffset;
  8672. NplexRead.Length = ByteCount;
  8673. NplexRead.PlexNumber = ReadData->PlexNumber;
  8674. Status = NtfsDeviceIoControl( IrpContext,
  8675. Vcb->TargetDeviceObject,
  8676. IOCTL_VOLUME_READ_PLEX,
  8677. &NplexRead,
  8678. sizeof( VOLUME_READ_PLEX_INPUT ),
  8679. NtfsMapUserBuffer( Irp, NormalPagePriority ),
  8680. ByteCount,
  8681. &Irp->IoStatus.Information );
  8682. ASSERT(!NT_SUCCESS( Status ) || Irp->IoStatus.Information != 0);
  8683. DebugTrace( 0, Dbg, ("NtfsReadFromPlex: VolumeRead\n") );
  8684. leave;
  8685. }
  8686. NtfsAcquireSharedScb( IrpContext, Scb );
  8687. AcquiredScb = TRUE;
  8688. //
  8689. // If the volume isn't mounted then fail immediately.
  8690. //
  8691. if (FlagOn( Scb->ScbState, SCB_STATE_VOLUME_DISMOUNTED )) {
  8692. Status = STATUS_VOLUME_DISMOUNTED;
  8693. leave;
  8694. }
  8695. //
  8696. // We don't get along with encrypted/compressed/sparse things.
  8697. // ISSUE: supw: actually sparse should be ok, now that i'm using preparebuffers.
  8698. //
  8699. if (FlagOn( Scb->AttributeFlags, ATTRIBUTE_FLAG_ENCRYPTED |
  8700. ATTRIBUTE_FLAG_COMPRESSION_MASK |
  8701. ATTRIBUTE_FLAG_SPARSE )) {
  8702. DebugTrace( 0, Dbg, ("NtfsReadFromPlex: File encrypted or compressed -> %08lx\n",
  8703. STATUS_INVALID_PARAMETER) );
  8704. Status = STATUS_INVALID_PARAMETER;
  8705. leave;
  8706. }
  8707. NtfsAcquireFsrtlHeader( Scb );
  8708. //
  8709. // Make sure we aren't starting past the end of the file, in which case
  8710. // we would have nothing to return.
  8711. //
  8712. if (ByteOffset >= Scb->Header.FileSize.QuadPart) {
  8713. DebugTrace( 0, Dbg, ("NtfsReadFromPlex: beyond eof\n") );
  8714. Status = STATUS_END_OF_FILE;
  8715. NtfsReleaseFsrtlHeader( Scb );
  8716. leave;
  8717. }
  8718. //
  8719. // We can't read beyond filesize.
  8720. //
  8721. if (Scb->Header.FileSize.QuadPart - ByteOffset < ByteCount) {
  8722. BytesToEof = ByteCount = (ULONG)(Scb->Header.FileSize.QuadPart - ByteOffset);
  8723. ByteCount = ClusterAlign( Vcb, ByteCount );
  8724. //
  8725. // We need to sanity check ByteCount again, since we rounded it up.
  8726. //
  8727. NtfsReleaseFsrtlHeader( Scb );
  8728. ASSERT( AcquiredScb );
  8729. NtfsReleaseScb( IrpContext, Scb );
  8730. goto check_values;
  8731. }
  8732. NtfsReleaseFsrtlHeader( Scb );
  8733. //
  8734. // Can't deal with resident files.
  8735. //
  8736. if (FlagOn( Scb->ScbState, SCB_STATE_ATTRIBUTE_RESIDENT )) {
  8737. Status = STATUS_NOT_IMPLEMENTED;
  8738. leave;
  8739. }
  8740. //
  8741. // PrepareBuffers needs a CompressionContext for the IO_RUN array.
  8742. //
  8743. RtlZeroMemory( &CompContext, sizeof(COMPRESSION_CONTEXT) );
  8744. CompContext.IoRuns = IoRuns;
  8745. CompContext.AllocatedRuns = NTFS_MAX_PARALLEL_IOS;
  8746. CompContext.FinishBuffersNeeded = FALSE;
  8747. //
  8748. // Get the run information, and send the IOCTL down.
  8749. //
  8750. while (TRUE) {
  8751. ULONG RunCount;
  8752. ULONG_PTR SizeOfThisRead;
  8753. Irp->IoStatus.Status = STATUS_SUCCESS;
  8754. //
  8755. // Build an array of io runs to do our reads from.
  8756. //
  8757. RemainingByteCount = NtfsPrepareBuffers( IrpContext,
  8758. Irp,
  8759. Scb,
  8760. &ByteOffset,
  8761. ByteCount,
  8762. 0,
  8763. &Wait,
  8764. &NumberOfRuns,
  8765. &CompContext );
  8766. ASSERT( RemainingByteCount < ByteCount );
  8767. ASSERT( Wait == TRUE );
  8768. ASSERT( NumberOfRuns > 0 );
  8769. ASSERT( NumberOfRuns > 1 || RemainingByteCount == 0 );
  8770. //
  8771. // Send synchronous IOCTLs down to do the plex reads.
  8772. //
  8773. for (RunCount = 0;
  8774. RunCount < NumberOfRuns;
  8775. RunCount += 1) {
  8776. NplexRead.ByteOffset.QuadPart = CompContext.IoRuns[RunCount].StartingLbo;
  8777. NplexRead.Length = CompContext.IoRuns[RunCount].ByteCount;
  8778. NplexRead.PlexNumber = ReadData->PlexNumber;
  8779. //
  8780. // While CurByteCOunt keeps track of the total amount of bytes read,
  8781. // SizeOfThisRead carries the size of the last read done. This is usually
  8782. // equal to the IoRuns[].ByteCount.
  8783. //
  8784. SizeOfThisRead = 0;
  8785. ASSERT(CompContext.IoRuns[RunCount].ByteCount > 0);
  8786. Status = NtfsDeviceIoControl( IrpContext,
  8787. Vcb->TargetDeviceObject,
  8788. IOCTL_VOLUME_READ_PLEX,
  8789. &NplexRead,
  8790. sizeof(VOLUME_READ_PLEX_INPUT),
  8791. Add2Ptr( NtfsMapUserBuffer( Irp, NormalPagePriority ), CurByteCount ),
  8792. CompContext.IoRuns[RunCount].ByteCount,
  8793. &SizeOfThisRead);
  8794. if (!NT_SUCCESS( Status )) {
  8795. //
  8796. // Success if we read anything at all.
  8797. //
  8798. if (CurByteCount != 0) {
  8799. Status = STATUS_SUCCESS;
  8800. }
  8801. leave;
  8802. }
  8803. //
  8804. // This value was taken from the Iosb.Information field of the subordinate
  8805. // IRP, and should contain a nonzero value for successful completions.
  8806. //
  8807. ASSERT( (SizeOfThisRead != 0) && ((ULONG) SizeOfThisRead <= CompContext.IoRuns[RunCount].ByteCount) );
  8808. CurByteCount = CurByteCount + (ULONG) SizeOfThisRead;
  8809. //
  8810. // We don't have any more space left
  8811. //
  8812. if (UserBufferLength <= (ULONG) SizeOfThisRead) {
  8813. ASSERT( Status == STATUS_SUCCESS );
  8814. leave;
  8815. }
  8816. UserBufferLength = UserBufferLength - (ULONG) SizeOfThisRead;
  8817. }
  8818. if (RemainingByteCount == 0) {
  8819. ASSERT( Status == STATUS_SUCCESS );
  8820. break;
  8821. }
  8822. //
  8823. // We have more to read. Make sure we have enough buffer space.
  8824. //
  8825. LastReadByteCount = ByteCount - RemainingByteCount;
  8826. ByteOffset = ByteOffset + LastReadByteCount;
  8827. CompContext.SystemBufferOffset = CompContext.SystemBufferOffset + LastReadByteCount;
  8828. ByteCount = RemainingByteCount;
  8829. }
  8830. } finally {
  8831. if (AcquiredScb) {
  8832. NtfsReleaseScb( IrpContext, Scb );
  8833. }
  8834. //
  8835. // If nothing raised then complete the irp.
  8836. //
  8837. if (!AbnormalTermination()) {
  8838. if (NT_SUCCESS( Status )) {
  8839. //
  8840. // We have to be careful to zero beyond the filesize.
  8841. //
  8842. if (CurByteCount > BytesToEof) {
  8843. RtlZeroMemory( Add2Ptr( NtfsMapUserBuffer( Irp, NormalPagePriority ), BytesToEof ),
  8844. CurByteCount - BytesToEof );
  8845. Irp->IoStatus.Information = BytesToEof;
  8846. } else {
  8847. Irp->IoStatus.Information = CurByteCount;
  8848. }
  8849. }
  8850. NtfsCompleteRequest( IrpContext, Irp, Status );
  8851. }
  8852. }
  8853. DebugTrace( -1, Dbg, ("NtfsReadPlex-> %08lx\n", Status) );
  8854. return Status;
  8855. }
  8856. #if EFSDBG
  8857. NTSTATUS
  8858. NtfsDummyEfsRead (
  8859. IN OUT PUCHAR InOutBuffer,
  8860. IN PLARGE_INTEGER Offset,
  8861. IN ULONG BufferSize,
  8862. IN PVOID Context
  8863. )
  8864. {
  8865. #ifndef SYSCACHE
  8866. ULONG LocalOffset = 0;
  8867. #endif
  8868. UNREFERENCED_PARAMETER( Context );
  8869. //
  8870. // Exit cleanly if this is the call that is just there to
  8871. // make sure the compiler doesn't throw this function out.
  8872. //
  8873. if (BufferSize != 0) {
  8874. #ifdef SYSCACHE
  8875. if (FALSE && VerifySyscacheData) {
  8876. FsRtlVerifySyscacheData( NULL,
  8877. InOutBuffer,
  8878. BufferSize,
  8879. Offset->LowPart );
  8880. }
  8881. #else
  8882. ASSERT( (Offset->QuadPart & 0x1ff) == 0 );
  8883. ASSERT( (BufferSize & 0x1ff) == 0 );
  8884. while((LocalOffset + 8) < BufferSize) {
  8885. *((PLONGLONG) Add2Ptr(InOutBuffer, LocalOffset)) ^= (Offset->QuadPart + (LONGLONG) LocalOffset);
  8886. LocalOffset += 0x200;
  8887. }
  8888. // UNREFERENCED_PARAMETER( InOutBuffer );
  8889. // UNREFERENCED_PARAMETER( Offset );
  8890. // UNREFERENCED_PARAMETER( BufferSize );
  8891. #endif
  8892. }
  8893. //
  8894. // Not much to do, decryption is done in place, so we can just leave the bits
  8895. // in the buffer.
  8896. //
  8897. return STATUS_SUCCESS;
  8898. }
  8899. NTSTATUS
  8900. NtfsDummyEfsWrite (
  8901. IN PUCHAR InBuffer,
  8902. OUT PUCHAR OutBuffer,
  8903. IN PLARGE_INTEGER Offset,
  8904. IN ULONG BufferSize,
  8905. IN PUCHAR Context
  8906. )
  8907. {
  8908. #ifndef SYSCACHE
  8909. ULONG LocalOffset = 0;
  8910. #endif
  8911. UNREFERENCED_PARAMETER( Context );
  8912. //
  8913. // Exit cleanly if this is the call that is just there to
  8914. // make sure the compiler doesn't throw this function out.
  8915. //
  8916. if (BufferSize != 0) {
  8917. //
  8918. // Just copy the plaintext to the output buffer.
  8919. //
  8920. RtlCopyMemory( OutBuffer,
  8921. InBuffer,
  8922. BufferSize );
  8923. #ifdef SYSCACHE
  8924. if (FALSE && VerifySyscacheData) {
  8925. FsRtlVerifySyscacheData( NULL,
  8926. OutBuffer,
  8927. BufferSize,
  8928. Offset->LowPart );
  8929. }
  8930. #else
  8931. ASSERT( (Offset->QuadPart & 0x1ff) == 0 );
  8932. ASSERT( (BufferSize & 0x1ff) == 0 );
  8933. while((LocalOffset + 8) < BufferSize) {
  8934. *((PLONGLONG) Add2Ptr(OutBuffer, LocalOffset)) ^= (Offset->QuadPart + (LONGLONG) LocalOffset);
  8935. LocalOffset += 0x200;
  8936. }
  8937. // UNREFERENCED_PARAMETER( Offset );
  8938. #endif
  8939. }
  8940. return STATUS_SUCCESS;
  8941. }
  8942. #endif