Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

14129 lines
412 KiB

  1. /*++
  2. Copyright (c) 1991 Microsoft Corporation
  3. Module Name:
  4. BitmpSup.c
  5. Abstract:
  6. This module implements the general bitmap allocation & deallocation
  7. routines for Ntfs. It is defined into two main parts the first
  8. section handles the bitmap file for clusters on the disk. The
  9. second part is for bitmap attribute allocation (e.g., the mft bitmap).
  10. So unlike other modules this one has local procedure prototypes and
  11. definitions followed by the exported bitmap file routines, followed
  12. by the local bitmap file routines, and then followed by the bitmap
  13. attribute routines, followed by the local bitmap attribute allocation
  14. routines.
  15. Author:
  16. Gary Kimura [GaryKi] 23-Nov-1991
  17. Revision History:
  18. --*/
  19. #include "NtfsProc.h"
  20. #ifdef NTFS_FRAGMENT_DISK
  21. BOOLEAN NtfsFragmentDisk = FALSE;
  22. ULONG NtfsFragmentLength = 2;
  23. BOOLEAN NtfsFragmentMft = FALSE;
  24. #endif
  25. #ifdef NTFS_CHECK_CACHED_RUNS
  26. BOOLEAN NtfsDoVerifyCachedRuns = FALSE;
  27. #endif
  28. #define NTFS_MFT_ZONE_DEFAULT_SHIFT (3)
  29. #define BITMAP_VOLATILE_FREE_COUNT (0x400)
  30. //
  31. // Define stack overflow threshhold.
  32. //
  33. #define OVERFLOW_RECORD_THRESHHOLD (0xF00)
  34. //
  35. // A mask of single bits used to clear and set bits in a byte
  36. //
  37. static UCHAR BitMask[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
  38. //
  39. // Local debug trace level
  40. //
  41. #define Dbg (DEBUG_TRACE_BITMPSUP)
  42. //
  43. // Define a tag for general pool allocations from this module
  44. //
  45. #undef MODULE_POOL_TAG
  46. #define MODULE_POOL_TAG ('BFtN')
  47. #define MIN3(A,B,C) ((((A) < (B)) && ((A) < (C))) ? (A) : ((((B) < (A)) && ((B) < (C))) ? (B) : (C)))
  48. #define CollectAllocateClusterStats(VCB,SIZE,HINT) { \
  49. (VCB)->Statistics->Ntfs.Allocate.Calls += 1; \
  50. (VCB)->Statistics->Ntfs.Allocate.Clusters += (ULONG)(SIZE); \
  51. if (HINT) { (VCB)->Statistics->Ntfs.Allocate.Hints += 1; } \
  52. }
  53. #define IncrementAllocateClusterStats(VCB) { \
  54. (VCB)->Statistics->Ntfs.Allocate.RunsReturned += 1; \
  55. }
  56. #define IncrementHintHonoredStats(VCB,SIZE) { \
  57. (VCB)->Statistics->Ntfs.Allocate.HintsHonored += 1; \
  58. (VCB)->Statistics->Ntfs.Allocate.HintsClusters += (ULONG)(SIZE); \
  59. }
  60. #define IncrementCacheHitStats(VCB,SIZE) { \
  61. (VCB)->Statistics->Ntfs.Allocate.Cache += 1; \
  62. (VCB)->Statistics->Ntfs.Allocate.CacheClusters += (ULONG)(SIZE); \
  63. }
  64. #define IncrementCacheMissStats(VCB,SIZE) { \
  65. (VCB)->Statistics->Ntfs.Allocate.CacheMiss += 1; \
  66. (VCB)->Statistics->Ntfs.Allocate.CacheMissClusters += (ULONG)(SIZE); \
  67. }
  68. //
  69. // Local routines to manage the cached free clusters.
  70. //
  71. BOOLEAN
  72. NtfsLookupCachedLcn (
  73. IN PNTFS_CACHED_RUNS CachedRuns,
  74. IN LCN Lcn,
  75. OUT PLCN StartingLcn,
  76. OUT PLONGLONG RunLength,
  77. OUT PUSHORT Index OPTIONAL
  78. );
  79. BOOLEAN
  80. NtfsGetNextCachedLcn (
  81. IN PNTFS_CACHED_RUNS CachedRuns,
  82. IN USHORT Index,
  83. OUT PLCN StartingLcn,
  84. OUT PLONGLONG RunLength
  85. );
  86. BOOLEAN
  87. NtfsLookupCachedLcnByLength (
  88. IN PNTFS_CACHED_RUNS CachedRuns,
  89. IN LONGLONG Length,
  90. IN BOOLEAN AllowShorter,
  91. IN LCN Lcn,
  92. OUT PLCN StartingLcn,
  93. OUT PLONGLONG RunLength,
  94. OUT PUSHORT Index OPTIONAL
  95. );
  96. VOID
  97. NtfsInsertCachedLcn (
  98. IN PNTFS_CACHED_RUNS CachedRuns,
  99. IN LCN Lcn,
  100. IN LONGLONG Length
  101. );
  102. VOID
  103. NtfsRemoveCachedLcn (
  104. IN PNTFS_CACHED_RUNS CachedRuns,
  105. IN LCN Lcn,
  106. IN LONGLONG Length
  107. );
  108. //
  109. // The following are the internal routines we use to manage this.
  110. //
  111. BOOLEAN
  112. NtfsGrowCachedRuns (
  113. IN PNTFS_CACHED_RUNS CachedRuns
  114. );
  115. VOID
  116. NtfsCompactCachedRuns (
  117. IN PNTFS_CACHED_RUNS CachedRuns,
  118. IN USHORT FirstIndex,
  119. IN USHORT LastIndex,
  120. IN BOOLEAN LcnSortedList
  121. );
  122. VOID
  123. NtfsAddCachedRunMult (
  124. IN PIRP_CONTEXT IrpContext,
  125. IN PVCB Vcb,
  126. IN LCN StartingLcn,
  127. IN RTL_BITMAP_RUN *RunArray,
  128. IN ULONG RunCount
  129. );
  130. VOID
  131. NtfsDeleteCachedRun (
  132. IN PNTFS_CACHED_RUNS CachedRuns,
  133. IN USHORT LcnIndex,
  134. IN USHORT LenIndex
  135. );
  136. VOID
  137. NtfsGrowLengthInCachedLcn (
  138. IN PNTFS_CACHED_RUNS CachedRuns,
  139. IN PNTFS_LCN_CLUSTER_RUN ThisEntry,
  140. IN USHORT LcnIndex
  141. );
  142. VOID
  143. NtfsShrinkLengthInCachedLcn (
  144. IN PNTFS_CACHED_RUNS CachedRuns,
  145. IN PNTFS_LCN_CLUSTER_RUN ThisEntry,
  146. IN USHORT LcnIndex
  147. );
  148. USHORT
  149. NtfsGetCachedLengthInsertionPoint (
  150. IN PNTFS_CACHED_RUNS CachedRuns,
  151. IN LCN Lcn,
  152. IN LONGLONG Length
  153. );
  154. VOID
  155. NtfsInsertCachedRun (
  156. IN PNTFS_CACHED_RUNS CachedRuns,
  157. IN LCN Lcn,
  158. IN LONGLONG Length,
  159. IN USHORT LcnIndex
  160. );
  161. BOOLEAN
  162. NtfsPositionCachedLcn (
  163. IN PNTFS_CACHED_RUNS CachedRuns,
  164. IN LCN Lcn,
  165. OUT PUSHORT Index
  166. );
  167. BOOLEAN
  168. NtfsPositionCachedLcnByLength (
  169. IN PNTFS_CACHED_RUNS CachedRuns,
  170. IN LONGLONG RunLength,
  171. IN PLCN Lcn OPTIONAL,
  172. IN PUSHORT StartIndex OPTIONAL,
  173. IN BOOLEAN SearchForward,
  174. OUT PUSHORT RunIndex
  175. );
  176. #ifdef NTFS_CHECK_CACHED_RUNS
  177. VOID
  178. NtfsVerifyCachedLcnRuns (
  179. IN PNTFS_CACHED_RUNS CachedRuns,
  180. IN USHORT FirstIndex,
  181. IN USHORT LastIndex,
  182. IN BOOLEAN SkipSortCheck,
  183. IN BOOLEAN SkipBinCheck
  184. );
  185. VOID
  186. NtfsVerifyCachedLenRuns (
  187. IN PNTFS_CACHED_RUNS CachedRuns,
  188. IN USHORT FirstIndex,
  189. IN USHORT LastIndex,
  190. IN BOOLEAN SkipSortCheck
  191. );
  192. VOID
  193. NtfsVerifyCachedRuns (
  194. IN PNTFS_CACHED_RUNS CachedRuns,
  195. IN BOOLEAN SkipSortCheck,
  196. IN BOOLEAN SkipBinCheck
  197. );
  198. #endif
  199. //
  200. // Macros to manipulate the cached run structures.
  201. //
  202. //
  203. // VOID
  204. // NtfsModifyCachedBinArray (
  205. // IN PNTFS_CACHED_RUNS CachedRuns,
  206. // IN LONGLONG OldLength
  207. // IN LONGLONG NewLength
  208. // );
  209. //
  210. #define NtfsModifyCachedBinArray(C,OL,NL) { \
  211. ASSERT( (NL) != 0 ); \
  212. ASSERT( (OL) != 0 ); \
  213. if ((OL) <= (C)->Bins) { \
  214. (C)->BinArray[ (OL) - 1 ] -= 1; \
  215. } \
  216. if ((NL) <= (C)->Bins) { \
  217. (C)->BinArray[ (NL) - 1 ] += 1; \
  218. } \
  219. }
  220. //
  221. // Some local manifest constants
  222. //
  223. #define BYTES_PER_PAGE (PAGE_SIZE)
  224. #define BITS_PER_PAGE (BYTES_PER_PAGE * 8)
  225. //
  226. // Local procedure prototypes for direct bitmap manipulation
  227. //
  228. VOID
  229. NtfsAllocateBitmapRun (
  230. IN PIRP_CONTEXT IrpContext,
  231. IN PVCB Vcb,
  232. IN LCN StartingLcn,
  233. IN LONGLONG ClusterCount,
  234. IN BOOLEAN FromCachedRuns
  235. );
  236. VOID
  237. NtfsFreeBitmapRun (
  238. IN PIRP_CONTEXT IrpContext,
  239. IN PVCB Vcb,
  240. IN LCN StartingLcn,
  241. IN OUT PLONGLONG ClusterCount
  242. );
  243. BOOLEAN
  244. NtfsFindFreeBitmapRun (
  245. IN PIRP_CONTEXT IrpContext,
  246. IN PVCB Vcb,
  247. IN LONGLONG NumberToFind,
  248. IN LCN StartingSearchHint,
  249. IN BOOLEAN ReturnAnyLength,
  250. IN BOOLEAN IgnoreMftZone,
  251. OUT PLCN ReturnedLcn,
  252. OUT PLONGLONG ClusterCountFound
  253. );
  254. BOOLEAN
  255. NtfsScanBitmapRange (
  256. IN PIRP_CONTEXT IrpContext,
  257. IN PVCB Vcb,
  258. IN LCN StartLcn,
  259. IN LCN BeyondLcn,
  260. IN LONGLONG NumberToFind,
  261. OUT PLCN ReturnedLcn,
  262. OUT PLONGLONG ClusterCountFound
  263. );
  264. BOOLEAN
  265. NtfsAddRecentlyDeallocated (
  266. IN PVCB Vcb,
  267. IN LCN Lcn,
  268. IN OUT PRTL_BITMAP Bitmap
  269. );
  270. //
  271. // The following two prototype are macros for calling map or pin data
  272. //
  273. // VOID
  274. // NtfsMapPageInBitmap (
  275. // IN PIRP_CONTEXT IrpContext,
  276. // IN PVCB Vcb,
  277. // IN LCN Lcn,
  278. // OUT PLCN StartingLcn,
  279. // IN OUT PRTL_BITMAP Bitmap,
  280. // OUT PBCB *BitmapBcb,
  281. // );
  282. //
  283. // VOID
  284. // NtfsPinPageInBitmap (
  285. // IN PIRP_CONTEXT IrpContext,
  286. // IN PVCB Vcb,
  287. // IN LCN Lcn,
  288. // OUT PLCN StartingLcn,
  289. // IN OUT PRTL_BITMAP Bitmap,
  290. // OUT PBCB *BitmapBcb,
  291. // );
  292. //
  293. #define NtfsMapPageInBitmap(A,B,C,D,E,F) NtfsMapOrPinPageInBitmap(A,B,C,D,E,F,FALSE)
  294. #define NtfsPinPageInBitmap(A,B,C,D,E,F) NtfsMapOrPinPageInBitmap(A,B,C,D,E,F,TRUE)
  295. VOID
  296. NtfsMapOrPinPageInBitmap (
  297. IN PIRP_CONTEXT IrpContext,
  298. IN PVCB Vcb,
  299. IN LCN Lcn,
  300. OUT PLCN StartingLcn,
  301. IN OUT PRTL_BITMAP Bitmap,
  302. OUT PBCB *BitmapBcb,
  303. IN BOOLEAN AlsoPinData
  304. );
  305. //
  306. // Local procedure prototype for doing read ahead on our cached
  307. // run information
  308. //
  309. VOID
  310. NtfsReadAheadCachedBitmap (
  311. IN PIRP_CONTEXT IrpContext,
  312. IN PVCB Vcb,
  313. IN LCN StartingLcn
  314. );
  315. //
  316. // Local procedure prototypes for routines that help us find holes
  317. // that need to be filled with MCBs
  318. //
  319. BOOLEAN
  320. NtfsGetNextHoleToFill (
  321. IN PIRP_CONTEXT IrpContext,
  322. IN PNTFS_MCB Mcb,
  323. IN VCN StartingVcn,
  324. IN VCN EndingVcn,
  325. OUT PVCN VcnToFill,
  326. OUT PLONGLONG ClusterCountToFill,
  327. OUT PLCN PrecedingLcn
  328. );
  329. LONGLONG
  330. NtfsScanMcbForRealClusterCount (
  331. IN PIRP_CONTEXT IrpContext,
  332. IN PNTFS_MCB Mcb,
  333. IN VCN StartingVcn,
  334. IN VCN EndingVcn
  335. );
  336. //
  337. // A local procedure prototype for masking out recently deallocated records
  338. //
  339. BOOLEAN
  340. NtfsAddDeallocatedRecords (
  341. IN PVCB Vcb,
  342. IN PSCB Scb,
  343. IN ULONG StartingIndexOfBitmap,
  344. IN OUT PRTL_BITMAP Bitmap
  345. );
  346. //
  347. // Local procedure prototypes for managing the Mft zone.
  348. //
  349. LCN
  350. NtfsInitializeMftZone (
  351. IN PIRP_CONTEXT IrpContext,
  352. IN PVCB Vcb
  353. );
  354. BOOLEAN
  355. NtfsReduceMftZone (
  356. IN PIRP_CONTEXT IrpContext,
  357. IN PVCB Vcb
  358. );
  359. //
  360. // Local procedure prototype to check the stack usage in the record
  361. // package.
  362. //
  363. VOID
  364. NtfsCheckRecordStackUsage (
  365. IN PIRP_CONTEXT IrpContext
  366. );
  367. //
  368. // Local procedure prototype to check for a continuos volume bitmap run
  369. //
  370. VOID
  371. NtfsRunIsClear (
  372. IN PIRP_CONTEXT IrpContext,
  373. IN PVCB Vcb,
  374. IN LCN StartingLcn,
  375. IN LONGLONG RunLength
  376. );
  377. //
  378. // Local procedure prototypes for managing windows of deleted entries.
  379. //
  380. VOID
  381. NtfsAddDelWindow (
  382. IN PNTFS_CACHED_RUNS CachedRuns,
  383. IN USHORT FirstIndex,
  384. IN USHORT LastIndex,
  385. IN BOOLEAN LcnList
  386. );
  387. PNTFS_DELETED_RUNS
  388. NtfsGetDelWindow (
  389. IN PNTFS_CACHED_RUNS CachedRuns,
  390. IN USHORT FirstIndex,
  391. IN USHORT LastIndex,
  392. IN BOOLEAN LcnList,
  393. OUT PUSHORT WindowIndex OPTIONAL
  394. );
  395. VOID
  396. NtfsShrinkDelWindow (
  397. IN PNTFS_CACHED_RUNS CachedRuns,
  398. IN BOOLEAN ShrinkFromStart,
  399. IN BOOLEAN LcnWindow,
  400. IN USHORT WindowIndex
  401. );
  402. VOID
  403. NtfsDeleteDelWindow (
  404. IN PNTFS_CACHED_RUNS CachedRuns,
  405. IN BOOLEAN LcnWindow,
  406. IN USHORT WindowIndex
  407. );
  408. VOID
  409. NtfsMakeSpaceCachedLcn (
  410. IN PNTFS_CACHED_RUNS CachedRuns,
  411. IN LCN StartingLcn,
  412. IN RTL_BITMAP_RUN *RunArray,
  413. IN ULONG RunCount,
  414. IN PUSHORT LcnSorted OPTIONAL
  415. );
  416. //
  417. // Local procedure prototype for dumping cached bitmap information
  418. //
  419. #ifdef NTFSDBG
  420. ULONG
  421. NtfsDumpCachedMcbInformation (
  422. IN PVCB Vcb
  423. );
  424. #else
  425. #define NtfsDumpCachedMcbInformation(V) (0)
  426. #endif // NTFSDBG
  427. #ifdef ALLOC_PRAGMA
  428. #pragma alloc_text(PAGE, NtfsAddBadCluster)
  429. #pragma alloc_text(PAGE, NtfsAddCachedRun)
  430. #pragma alloc_text(PAGE, NtfsAddCachedRunMult)
  431. #pragma alloc_text(PAGE, NtfsAddDeallocatedRecords)
  432. #pragma alloc_text(PAGE, NtfsAddDelWindow)
  433. #pragma alloc_text(PAGE, NtfsAddRecentlyDeallocated)
  434. #pragma alloc_text(PAGE, NtfsAllocateBitmapRun)
  435. #pragma alloc_text(PAGE, NtfsAllocateClusters)
  436. #pragma alloc_text(PAGE, NtfsAllocateMftReservedRecord)
  437. #pragma alloc_text(PAGE, NtfsAllocateRecord)
  438. #pragma alloc_text(PAGE, NtfsGrowLengthInCachedLcn)
  439. #pragma alloc_text(PAGE, NtfsShrinkLengthInCachedLcn)
  440. #pragma alloc_text(PAGE, NtfsCheckRecordStackUsage)
  441. #pragma alloc_text(PAGE, NtfsCleanupClusterAllocationHints)
  442. #pragma alloc_text(PAGE, NtfsCompactCachedRuns)
  443. #pragma alloc_text(PAGE, NtfsCreateMftHole)
  444. #pragma alloc_text(PAGE, NtfsDeallocateClusters)
  445. #pragma alloc_text(PAGE, NtfsDeallocateRecord)
  446. #pragma alloc_text(PAGE, NtfsDeallocateRecordsComplete)
  447. #pragma alloc_text(PAGE, NtfsDeleteCachedRun)
  448. #pragma alloc_text(PAGE, NtfsDeleteDelWindow)
  449. #pragma alloc_text(PAGE, NtfsFindFreeBitmapRun)
  450. #pragma alloc_text(PAGE, NtfsFindMftFreeTail)
  451. #pragma alloc_text(PAGE, NtfsFreeBitmapRun)
  452. #pragma alloc_text(PAGE, NtfsGetCachedLengthInsertionPoint)
  453. #pragma alloc_text(PAGE, NtfsGetDelWindow)
  454. #pragma alloc_text(PAGE, NtfsGetNextCachedLcn)
  455. #pragma alloc_text(PAGE, NtfsGetNextHoleToFill)
  456. #pragma alloc_text(PAGE, NtfsGrowCachedRuns)
  457. #pragma alloc_text(PAGE, NtfsInitializeCachedRuns)
  458. #pragma alloc_text(PAGE, NtfsInitializeClusterAllocation)
  459. #pragma alloc_text(PAGE, NtfsInitializeMftZone)
  460. #pragma alloc_text(PAGE, NtfsInitializeRecordAllocation)
  461. #pragma alloc_text(PAGE, NtfsInsertCachedLcn)
  462. #pragma alloc_text(PAGE, NtfsInsertCachedRun)
  463. #pragma alloc_text(PAGE, NtfsIsRecordAllocated)
  464. #pragma alloc_text(PAGE, NtfsLookupCachedLcn)
  465. #pragma alloc_text(PAGE, NtfsLookupCachedLcnByLength)
  466. #pragma alloc_text(PAGE, NtfsMakeSpaceCachedLcn)
  467. #pragma alloc_text(PAGE, NtfsMapOrPinPageInBitmap)
  468. #pragma alloc_text(PAGE, NtfsModifyBitsInBitmap)
  469. #pragma alloc_text(PAGE, NtfsPositionCachedLcn)
  470. #pragma alloc_text(PAGE, NtfsPositionCachedLcnByLength)
  471. #pragma alloc_text(PAGE, NtfsPreAllocateClusters)
  472. #pragma alloc_text(PAGE, NtfsReadAheadCachedBitmap)
  473. #pragma alloc_text(PAGE, NtfsReduceMftZone)
  474. #pragma alloc_text(PAGE, NtfsReinitializeCachedRuns)
  475. #pragma alloc_text(PAGE, NtfsRemoveCachedLcn)
  476. #pragma alloc_text(PAGE, NtfsReserveMftRecord)
  477. #pragma alloc_text(PAGE, NtfsRestartClearBitsInBitMap)
  478. #pragma alloc_text(PAGE, NtfsRestartSetBitsInBitMap)
  479. #pragma alloc_text(PAGE, NtfsRunIsClear)
  480. #pragma alloc_text(PAGE, NtfsScanBitmapRange)
  481. #pragma alloc_text(PAGE, NtfsScanEntireBitmap)
  482. #pragma alloc_text(PAGE, NtfsScanMcbForRealClusterCount)
  483. #pragma alloc_text(PAGE, NtfsScanMftBitmap)
  484. #pragma alloc_text(PAGE, NtfsShrinkDelWindow)
  485. #pragma alloc_text(PAGE, NtfsUninitializeCachedRuns)
  486. #pragma alloc_text(PAGE, NtfsUninitializeRecordAllocation)
  487. #ifdef NTFS_CHECK_CACHED_RUNS
  488. #pragma alloc_text(PAGE, NtfsVerifyCachedLcnRuns)
  489. #pragma alloc_text(PAGE, NtfsVerifyCachedLenRuns)
  490. #pragma alloc_text(PAGE, NtfsVerifyCachedRuns)
  491. #endif
  492. #endif
  493. VOID
  494. NtfsInitializeClusterAllocation (
  495. IN PIRP_CONTEXT IrpContext,
  496. IN PVCB Vcb
  497. )
  498. /*++
  499. Routine Description:
  500. This routine initializes the cluster allocation structures within the
  501. specified Vcb. It reads in as necessary the bitmap and scans it for
  502. free space and builds the free space mcb based on this scan.
  503. This procedure is multi-call save. That is, it can be used to
  504. reinitialize the cluster allocation without first calling the
  505. uninitialize cluster allocation routine.
  506. Arguments:
  507. Vcb - Supplies the Vcb being initialized
  508. Return Value:
  509. None.
  510. --*/
  511. {
  512. LONGLONG ClusterCount;
  513. ASSERT_IRP_CONTEXT( IrpContext );
  514. ASSERT_VCB( Vcb );
  515. PAGED_CODE();
  516. DebugTrace( +1, Dbg, ("NtfsInitializeClusterAllocation\n") );
  517. NtfsAcquireExclusiveScb( IrpContext, Vcb->BitmapScb );
  518. try {
  519. //
  520. // The bitmap file currently doesn't have a paging IO resource.
  521. // Create one here so that we won't serialize synchronization
  522. // of the bitmap package with the lazy writer.
  523. //
  524. Vcb->BitmapScb->Header.PagingIoResource =
  525. Vcb->BitmapScb->Fcb->PagingIoResource = NtfsAllocateEresource();
  526. //
  527. // We didn't mark the Scb for the volume bitmap as MODIFIED_NO_WRITE
  528. // when creating it. Do so now.
  529. //
  530. SetFlag( Vcb->BitmapScb->ScbState, SCB_STATE_MODIFIED_NO_WRITE );
  531. //
  532. // Now call a bitmap routine to scan the entire bitmap. This
  533. // routine will compute the number of free clusters in the
  534. // bitmap and set the largest free runs that we find into the
  535. // cached bitmap structures.
  536. //
  537. NtfsScanEntireBitmap( IrpContext, Vcb, FALSE );
  538. //
  539. // Our last operation is to set the hint lcn which is used by
  540. // our allocation routine as a hint on where to find free space.
  541. // In the running system it is the last lcn that we've allocated.
  542. // But for startup we'll put it to be the first free run that
  543. // is stored in the free space mcb.
  544. //
  545. NtfsGetNextCachedLcn( &Vcb->CachedRuns,
  546. 0,
  547. &Vcb->LastBitmapHint,
  548. &ClusterCount );
  549. NtfsInitializeMftZone( IrpContext, Vcb );
  550. } finally {
  551. DebugUnwind( NtfsInitializeClusterAllocation );
  552. NtfsReleaseScb( IrpContext, Vcb->BitmapScb );
  553. }
  554. DebugTrace( -1, Dbg, ("NtfsInitializeClusterAllocation -> VOID\n") );
  555. return;
  556. }
  557. BOOLEAN
  558. NtfsAllocateClusters (
  559. IN PIRP_CONTEXT IrpContext,
  560. IN PVCB Vcb,
  561. IN OUT PSCB Scb,
  562. IN VCN OriginalStartingVcn,
  563. IN BOOLEAN AllocateAll,
  564. IN LONGLONG ClusterCount,
  565. IN PLCN TargetLcn OPTIONAL,
  566. IN OUT PLONGLONG DesiredClusterCount
  567. )
  568. /*++
  569. Routine Description:
  570. This routine allocates disk space. It fills in the unallocated holes in
  571. input mcb with allocated clusters from starting Vcn to the cluster count.
  572. The basic algorithm used by this procedure is as follows:
  573. 1. Compute the EndingVcn from the StartingVcn and cluster count
  574. 2. Compute the real number of clusters needed to allocate by scanning
  575. the mcb from starting to ending vcn seeing where the real holes are
  576. 3. If the real cluster count is greater than the known free cluster count
  577. then the disk is full
  578. 4. Call a routine that takes a starting Vcn, ending Vcn, and the Mcb and
  579. returns the first hole that needs to be filled and while there is a hole
  580. to be filled...
  581. 5. Check if the run preceding the hole that we are trying to fill
  582. has an ending Lcn and if it does then with that Lcn see if we
  583. get a cache hit, if we do then allocate the cluster
  584. 6. If we are still looking then enumerate through the cached free runs
  585. and if we find a suitable one. Allocate the first suitable run we find that
  586. satisfies our request. Also in the loop remember the largest
  587. suitable run we find.
  588. 8. If we are still looking then bite the bullet and scan the bitmap on
  589. the disk for a free run using either the preceding Lcn as a hint if
  590. available or the stored last bitmap hint in the Vcb.
  591. 9. At this point we've located a run of clusters to allocate. To do the
  592. actual allocation we allocate the space from the bitmap, decrement
  593. the number of free clusters left, and update the hint.
  594. 10. Before going back to step 4 we move the starting Vcn to be the point
  595. one after the run we've just allocated.
  596. 11. With the allocation complete we update the last bitmap hint stored in
  597. the Vcb to be the last Lcn we've allocated, and we call a routine
  598. to do the read ahead in the cached bitmap at the ending lcn.
  599. Arguments:
  600. Vcb - Supplies the Vcb used in this operation
  601. Scb - Supplies an Scb whose Mcb contains the current retrieval information
  602. for the file and on exit will contain the updated retrieval
  603. information
  604. StartingVcn - Supplies a starting cluster for us to begin allocation
  605. AllocateAll - If TRUE, allocate all the clusters here. Don't break
  606. up request.
  607. ClusterCount - Supplies the number of clusters to allocate
  608. TargetLcn - If supplied allocate at this lcn rather than searching for free space
  609. used by the movefile defragging code
  610. DesiredClusterCount - Supplies the number of clusters we would like allocated
  611. and will allocate if it doesn't require additional runs. On return
  612. this value is the number of clusters allocated.
  613. Return Value:
  614. FALSE - if no clusters were allocated (they were already allocated)
  615. TRUE - if clusters were allocated
  616. Important Note:
  617. This routine will stop after allocating MAXIMUM_RUNS_AT_ONCE runs, in order
  618. to limit the size of allocating transactions. The caller must be aware that
  619. he may not get all of the space he asked for if the disk is real fragmented.
  620. --*/
  621. {
  622. VCN StartingVcn = OriginalStartingVcn;
  623. VCN EndingVcn;
  624. VCN DesiredEndingVcn;
  625. PNTFS_MCB Mcb = &Scb->Mcb;
  626. LONGLONG RemainingDesiredClusterCount;
  627. VCN VcnToFill;
  628. LONGLONG ClusterCountToFill;
  629. LCN PrecedingLcn;
  630. BOOLEAN FoundClustersToAllocate;
  631. LCN FoundLcn;
  632. LONGLONG FoundClusterCount;
  633. LONGLONG LargestBitmapClusterCount = 0;
  634. BOOLEAN FromCachedRuns;
  635. USHORT RunIndex;
  636. LCN HintLcn;
  637. ULONG LoopCount = 0;
  638. ULONG RunCount = 0;
  639. BOOLEAN ClustersAllocated = FALSE;
  640. BOOLEAN GotAHoleToFill = TRUE;
  641. BOOLEAN FoundRun = FALSE;
  642. BOOLEAN ExtendingMft = FALSE;
  643. BOOLEAN AllocateFromBitmap = FALSE;
  644. ASSERT_IRP_CONTEXT( IrpContext );
  645. ASSERT_VCB( Vcb );
  646. PAGED_CODE();
  647. DebugTrace( +1, Dbg, ("NtfsAllocateClusters\n") );
  648. DebugTrace( 0, Dbg, ("StartVcn = %0I64x\n", StartingVcn) );
  649. DebugTrace( 0, Dbg, ("ClusterCount = %0I64x\n", ClusterCount) );
  650. DebugTrace( 0, Dbg, ("DesiredClusterCount = %0I64x\n", *DesiredClusterCount) );
  651. NtfsAcquireExclusiveScb( IrpContext, Vcb->BitmapScb );
  652. try {
  653. if (FlagOn( Vcb->VcbState, VCB_STATE_RELOAD_FREE_CLUSTERS )) {
  654. NtfsScanEntireBitmap( IrpContext, Vcb, FALSE );
  655. }
  656. //
  657. // Check to see if we are defragmenting
  658. //
  659. if (ARGUMENT_PRESENT( TargetLcn )) {
  660. FoundLcn = *TargetLcn;
  661. //
  662. // Ensure that the run is NOT already allocated
  663. //
  664. NtfsRunIsClear( IrpContext, Vcb, FoundLcn, ClusterCount );
  665. //
  666. // Get the allocation data from the Scb
  667. //
  668. VcnToFill = OriginalStartingVcn;
  669. FoundClusterCount = ClusterCount;
  670. *DesiredClusterCount = ClusterCount;
  671. GotAHoleToFill = FALSE;
  672. ClustersAllocated = TRUE;
  673. FoundRun = TRUE;
  674. FromCachedRuns = FALSE;
  675. //
  676. // Initialize PrecedingLcn in this case to skip any special action.
  677. //
  678. PrecedingLcn = 0;
  679. //
  680. // We already have the allocation so skip over the allocation section
  681. //
  682. goto Defragment;
  683. }
  684. //
  685. // Compute the ending vcn, and the cluster count of how much we really
  686. // need to allocate (based on what is already allocated). Then check if we
  687. // have space on the disk.
  688. //
  689. EndingVcn = (StartingVcn + ClusterCount) - 1;
  690. ClusterCount = NtfsScanMcbForRealClusterCount( IrpContext, Mcb, StartingVcn, EndingVcn );
  691. if ((ClusterCount + IrpContext->DeallocatedClusters) > Vcb->FreeClusters) {
  692. NtfsRaiseStatus( IrpContext, STATUS_DISK_FULL, NULL, NULL );
  693. }
  694. //
  695. // Let's see if it is ok to allocate clusters for this Scb now,
  696. // in case compressed files have over-reserved the space. This
  697. // calculation is done in such a way as to guarantee we do not
  698. // have either of the terms subtracting through zero, even if
  699. // we were to over-reserve the free space on the disk due to a
  700. // hot fix or something. Always satisfy this request if we are
  701. // in the paging IO write path because we know we are using clusters
  702. // already reserved for this stream.
  703. //
  704. NtfsAcquireReservedClusters( Vcb );
  705. //
  706. // Do the fast test to see if there is even a chance of failing the reservation test
  707. // or if we will allocate this space anyway.
  708. // If there is no Irp or this is the Usn journal then allocate the space anyway.
  709. //
  710. if ((ClusterCount + Vcb->TotalReserved > Vcb->FreeClusters) &&
  711. #ifdef BRIANDBG
  712. !NtfsIgnoreReserved &&
  713. #endif
  714. (IrpContext->OriginatingIrp != NULL) &&
  715. !FlagOn( Scb->Fcb->FcbState, FCB_STATE_USN_JOURNAL )) {
  716. //
  717. // If this is not a write then fail this unless this is an fsctl which
  718. // may have reserved space.
  719. //
  720. if (IrpContext->MajorFunction != IRP_MJ_WRITE) {
  721. //
  722. // If this is an Fsctl for a data file then account for the reservation.
  723. // All other non-writes will fail because we already checked whether
  724. // they conflicted with the volume reservation.
  725. //
  726. if ((IrpContext->MajorFunction != IRP_MJ_FILE_SYSTEM_CONTROL) ||
  727. (Scb->Header.NodeTypeCode != NTFS_NTC_SCB_DATA) ||
  728. (ClusterCount + Vcb->TotalReserved - LlClustersFromBytesTruncate( Vcb, Scb->ScbType.Data.TotalReserved ) > Vcb->FreeClusters)) {
  729. NtfsReleaseReservedClusters( Vcb );
  730. NtfsRaiseStatus( IrpContext, STATUS_DISK_FULL, NULL, NULL );
  731. }
  732. //
  733. // If we are in user write path then check the reservation. Otherwise
  734. // satisfy the request. It will be some other stream which supports the
  735. // write (i.e. Mft record for a secondary file record).
  736. //
  737. } else if ((Scb->Header.NodeTypeCode == NTFS_NTC_SCB_DATA) &&
  738. !FlagOn( IrpContext->OriginatingIrp->Flags, IRP_PAGING_IO ) &&
  739. (ClusterCount + Vcb->TotalReserved - LlClustersFromBytesTruncate( Vcb, Scb->ScbType.Data.TotalReserved ) > Vcb->FreeClusters)) {
  740. NtfsReleaseReservedClusters( Vcb );
  741. NtfsRaiseStatus( IrpContext, STATUS_DISK_FULL, NULL, NULL );
  742. }
  743. }
  744. NtfsReleaseReservedClusters( Vcb );
  745. //
  746. // We need to check that the request won't fail because of clusters
  747. // in the recently deallocated lists.
  748. //
  749. if (Vcb->FreeClusters < (Vcb->DeallocatedClusters + ClusterCount)) {
  750. #ifdef PERF_STATS
  751. IrpContext->LogFullReason = LF_DEALLOCATED_CLUSTERS;
  752. #endif
  753. NtfsRaiseStatus( IrpContext, STATUS_LOG_FILE_FULL, NULL, NULL );
  754. }
  755. //
  756. // Remember if we are extending the Mft.
  757. //
  758. if ((Scb == Vcb->MftScb) &&
  759. (LlBytesFromClusters( Vcb, StartingVcn ) == (ULONGLONG) Scb->Header.AllocationSize.QuadPart)) {
  760. ExtendingMft = TRUE;
  761. }
  762. //
  763. // Now compute the desired ending vcn and the real desired cluster count
  764. //
  765. DesiredEndingVcn = (StartingVcn + *DesiredClusterCount) - 1;
  766. RemainingDesiredClusterCount = NtfsScanMcbForRealClusterCount( IrpContext, Mcb, StartingVcn, DesiredEndingVcn );
  767. //
  768. // While there are holes to fill we will do the following loop
  769. //
  770. while ((AllocateAll || (LoopCount < MAXIMUM_RUNS_AT_ONCE))
  771. &&
  772. (GotAHoleToFill = NtfsGetNextHoleToFill( IrpContext,
  773. Mcb,
  774. StartingVcn,
  775. DesiredEndingVcn,
  776. &VcnToFill,
  777. &ClusterCountToFill,
  778. &PrecedingLcn))) {
  779. //
  780. // Assume we will find this in the cached runs array.
  781. //
  782. FromCachedRuns = TRUE;
  783. //
  784. // If this is our first time through the loop then record out bitmap stats
  785. // then always bump up the run count stat.
  786. //
  787. if (!ClustersAllocated) {
  788. CollectAllocateClusterStats( Vcb,
  789. RemainingDesiredClusterCount,
  790. PrecedingLcn != UNUSED_LCN );
  791. }
  792. IncrementAllocateClusterStats( Vcb );
  793. //
  794. // First indicate that we haven't found anything suitable yet
  795. //
  796. FoundClustersToAllocate = FALSE;
  797. //
  798. // Remember that we are will be allocating clusters.
  799. //
  800. ClustersAllocated = TRUE;
  801. //
  802. // Initialize HintLcn to a value that sorts lower than any other
  803. // Lcn. If we have no PrecedingLcn to use as a hint, the
  804. // allocation will preferentially use an Lcn that is as small
  805. // as possible for the desired cluster count. This will left
  806. // pack things as much as possible.
  807. //
  808. HintLcn = UNUSED_LCN;
  809. //
  810. // Check if the preceding lcn is anything other than -1 then with
  811. // that as a hint check if we have a cache hit on a free run
  812. //
  813. if (PrecedingLcn != UNUSED_LCN) {
  814. if (NtfsLookupCachedLcn( &Vcb->CachedRuns,
  815. PrecedingLcn + 1,
  816. &FoundLcn,
  817. &FoundClusterCount,
  818. NULL )) {
  819. //
  820. // Increment the stats and say we've found something to allocate
  821. //
  822. IncrementHintHonoredStats( Vcb, MIN3(FoundClusterCount, RemainingDesiredClusterCount, ClusterCountToFill));
  823. #ifdef NTFS_FRAGMENT_DISK
  824. if (NtfsFragmentMft &&
  825. (Scb == Vcb->MftScb) &&
  826. (FoundClusterCount > 1)) {
  827. FoundLcn += 1;
  828. FoundClusterCount -= 1;
  829. }
  830. #endif
  831. if ((Scb->AttributeTypeCode == $INDEX_ALLOCATION) &&
  832. (FoundClusterCount * Vcb->BytesPerCluster < Scb->ScbType.Index.BytesPerIndexBuffer)) {
  833. } else {
  834. FoundClustersToAllocate = TRUE;
  835. }
  836. }
  837. if (!FoundClustersToAllocate && !ExtendingMft ) {
  838. //
  839. // Set up the hint LCN for the lookup by length
  840. // call below.
  841. //
  842. HintLcn = PrecedingLcn + 1;
  843. }
  844. }
  845. //
  846. // If we are still looking to allocate something then hit the cache.
  847. // Skip this for the Mft zone as we are willing to go to disk for it.
  848. //
  849. while (!FoundClustersToAllocate &&
  850. !ExtendingMft &&
  851. NtfsLookupCachedLcnByLength( &Vcb->CachedRuns,
  852. RemainingDesiredClusterCount,
  853. (BOOLEAN)(Scb->AttributeTypeCode != $INDEX_ALLOCATION),
  854. HintLcn,
  855. &FoundLcn,
  856. &FoundClusterCount,
  857. &RunIndex )) {
  858. if ((FoundLcn < Vcb->MftZoneEnd) &&
  859. ((FoundLcn + FoundClusterCount) > Vcb->MftZoneStart)) {
  860. //
  861. // This run overlaps the Mft zone. Remove the zone from
  862. // the cache.
  863. //
  864. NtfsRemoveCachedLcn( &Vcb->CachedRuns,
  865. Vcb->MftZoneStart,
  866. Vcb->MftZoneEnd - Vcb->MftZoneStart );
  867. //
  868. // Retry the lookup.
  869. //
  870. continue;
  871. }
  872. //
  873. // This run will do.
  874. //
  875. FoundClustersToAllocate = TRUE;
  876. }
  877. //
  878. // this code tries to prevent the paging file allocations
  879. // from becoming fragmented.
  880. //
  881. // if the clusters we just found are smaller than half
  882. // the of the remaining cluster to allocate then we force
  883. // a look at the bitmap.
  884. //
  885. if (FlagOn( Scb->Fcb->FcbState, FCB_STATE_PAGING_FILE ) &&
  886. FoundClustersToAllocate &&
  887. FoundClusterCount < (RemainingDesiredClusterCount >> 1)) {
  888. if (LargestBitmapClusterCount > 0) {
  889. if (LargestBitmapClusterCount >= RemainingDesiredClusterCount) {
  890. FoundClustersToAllocate = FALSE;
  891. }
  892. } else {
  893. FoundClustersToAllocate = FALSE;
  894. }
  895. }
  896. //
  897. // Check if we've allocated from our cache and increment the stats
  898. //
  899. if (FoundClustersToAllocate) {
  900. IncrementCacheHitStats( Vcb,
  901. MIN3( FoundClusterCount,
  902. RemainingDesiredClusterCount,
  903. ClusterCountToFill ));
  904. //
  905. // We've done everything we can with the cached bitmap information so
  906. // now bite the bullet and scan the bitmap for a free cluster. If
  907. // we have an hint lcn then use it otherwise use the hint stored in the
  908. // vcb. But never use a hint that is part of the mft zone, and because
  909. // the mft always has a preceding lcn we know we'll hint in the zone
  910. // for the mft.
  911. //
  912. } else {
  913. BOOLEAN AllocatedFromZone;
  914. BOOLEAN ReturnAnyLength;
  915. //
  916. // The clusters aren't coming from the cached runs array.
  917. //
  918. FromCachedRuns = FALSE;
  919. //
  920. // First check if we have already satisfied the core requirements
  921. // and are now just going for the desired ending vcn. If so then
  922. // we will not waste time hitting the disk
  923. //
  924. if (StartingVcn > EndingVcn) {
  925. //
  926. // Set the loop count to MAXIMUM_RUNS_AT_ONCE to indicate we bailed early
  927. // without finding all of the requested clusters.
  928. //
  929. LoopCount = MAXIMUM_RUNS_AT_ONCE;
  930. break;
  931. }
  932. if (PrecedingLcn != UNUSED_LCN) {
  933. HintLcn = PrecedingLcn + 1;
  934. ReturnAnyLength = TRUE;
  935. } else {
  936. //
  937. // We shouldn't be here if we are extending the Mft.
  938. //
  939. ASSERT( !ExtendingMft );
  940. HintLcn = Vcb->LastBitmapHint;
  941. ReturnAnyLength = FALSE;
  942. if ((HintLcn >= Vcb->MftZoneStart) &&
  943. (HintLcn < Vcb->MftZoneEnd)) {
  944. HintLcn = Vcb->MftZoneEnd;
  945. }
  946. }
  947. AllocatedFromZone = NtfsFindFreeBitmapRun( IrpContext,
  948. Vcb,
  949. ClusterCountToFill,
  950. HintLcn,
  951. ReturnAnyLength,
  952. ExtendingMft,
  953. &FoundLcn,
  954. &FoundClusterCount );
  955. if (LargestBitmapClusterCount == 0) {
  956. //
  957. // remember the first cluster count that we get from
  958. // the bitmap as this will be the largest. this is used
  959. // to optimize the pagefile case.
  960. //
  961. LargestBitmapClusterCount = FoundClusterCount;
  962. }
  963. AllocateFromBitmap = TRUE;
  964. IncrementCacheMissStats(Vcb, MIN3(FoundClusterCount, RemainingDesiredClusterCount, ClusterCountToFill));
  965. if (FoundClusterCount == 0) {
  966. NtfsRaiseStatus( IrpContext, STATUS_DISK_FULL, NULL, NULL );
  967. }
  968. //
  969. // Check if we need to reduce the zone.
  970. //
  971. if (!ExtendingMft) {
  972. if (AllocatedFromZone) {
  973. //
  974. // If there is space to reduce the zone then do so now
  975. // and rescan the bitmap.
  976. //
  977. if (NtfsReduceMftZone( IrpContext, Vcb )) {
  978. FoundClusterCount = 0;
  979. NtfsFindFreeBitmapRun( IrpContext,
  980. Vcb,
  981. ClusterCountToFill,
  982. Vcb->MftZoneEnd,
  983. FALSE,
  984. FALSE,
  985. &FoundLcn,
  986. &FoundClusterCount );
  987. if (FoundClusterCount == 0) {
  988. NtfsRaiseStatus( IrpContext, STATUS_DISK_FULL, NULL, NULL );
  989. }
  990. }
  991. }
  992. //
  993. // We are extending the Mft. If we didn't get a contiguous run then
  994. // set up a new zone.
  995. //
  996. } else if (PrecedingLcn + 1 != FoundLcn) {
  997. NtfsScanEntireBitmap( IrpContext, Vcb, TRUE );
  998. ASSERT( Vcb->CachedRuns.Used != 0 );
  999. FoundLcn = NtfsInitializeMftZone( IrpContext, Vcb );
  1000. NtfsFindFreeBitmapRun( IrpContext,
  1001. Vcb,
  1002. ClusterCountToFill,
  1003. FoundLcn,
  1004. TRUE,
  1005. TRUE,
  1006. &FoundLcn,
  1007. &FoundClusterCount );
  1008. }
  1009. }
  1010. //
  1011. // At this point we have found a run to allocate denoted by the
  1012. // values in FoundLcn and FoundClusterCount. We need to trim back
  1013. // the cluster count to be the amount we really need and then
  1014. // do the allocation. To do the allocation we zap the bitmap,
  1015. // decrement the free count, and add the run to the mcb we're
  1016. // using
  1017. //
  1018. #ifdef NTFS_FRAGMENT_DISK
  1019. if (NtfsFragmentDisk && ((ULONG) FoundClusterCount > NtfsFragmentLength)) {
  1020. FoundLcn += 1;
  1021. FoundClusterCount = NtfsFragmentLength;
  1022. } else if (NtfsFragmentMft &&
  1023. (Scb == Vcb->MftScb) &&
  1024. (FoundClusterCount > NtfsFragmentLength)) {
  1025. FoundLcn += 1;
  1026. FoundClusterCount = NtfsFragmentLength;
  1027. }
  1028. #endif
  1029. if (FoundClusterCount > RemainingDesiredClusterCount) {
  1030. FoundClusterCount = RemainingDesiredClusterCount;
  1031. }
  1032. if (FoundClusterCount > ClusterCountToFill) {
  1033. FoundClusterCount = ClusterCountToFill;
  1034. }
  1035. ASSERT( Vcb->FreeClusters >= FoundClusterCount );
  1036. SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_MODIFIED_BITMAP );
  1037. Defragment:
  1038. NtfsAllocateBitmapRun( IrpContext, Vcb, FoundLcn, FoundClusterCount, FromCachedRuns );
  1039. //
  1040. // Modify the total allocated for this file.
  1041. //
  1042. NtfsAcquireReservedClusters( Vcb );
  1043. Scb->TotalAllocated += (LlBytesFromClusters( Vcb, FoundClusterCount ));
  1044. NtfsReleaseReservedClusters( Vcb );
  1045. //
  1046. // Adjust the count of free clusters. Only store the change in
  1047. // the top level irp context in case of aborts.
  1048. //
  1049. Vcb->FreeClusters -= FoundClusterCount;
  1050. IrpContext->FreeClusterChange -= FoundClusterCount;
  1051. ASSERT_LCN_RANGE_CHECKING( Vcb, (FoundLcn + FoundClusterCount) );
  1052. ASSERT( FoundClusterCount != 0 );
  1053. NtfsAddNtfsMcbEntry( Mcb, VcnToFill, FoundLcn, FoundClusterCount, FALSE );
  1054. //
  1055. // If this is the Mft file then put these into our AddedClusters Mcb
  1056. // as well.
  1057. //
  1058. if (Mcb == &Vcb->MftScb->Mcb) {
  1059. FsRtlAddLargeMcbEntry( &Vcb->MftScb->ScbType.Mft.AddedClusters,
  1060. VcnToFill,
  1061. FoundLcn,
  1062. FoundClusterCount );
  1063. }
  1064. //
  1065. // And update the last bitmap hint, but only if we used the hint to begin with
  1066. //
  1067. if (PrecedingLcn == UNUSED_LCN) {
  1068. Vcb->LastBitmapHint = FoundLcn;
  1069. }
  1070. //
  1071. // Now move the starting Vcn to the Vcn that we've just filled plus the
  1072. // found cluster count
  1073. //
  1074. StartingVcn = VcnToFill + FoundClusterCount;
  1075. LoopCount += 1;
  1076. RunCount += 1;
  1077. if (FoundRun == TRUE) {
  1078. break;
  1079. }
  1080. //
  1081. // Decrement the remaining desired cluster count by the amount we just allocated
  1082. //
  1083. RemainingDesiredClusterCount = RemainingDesiredClusterCount - FoundClusterCount;
  1084. }
  1085. //
  1086. // Now we need to compute the total cluster that we've just allocated
  1087. // We'll call get next hole to fill. If the result is false then we
  1088. // allocated everything. If the result is true then we do some quick
  1089. // math to get the size allocated
  1090. //
  1091. if (GotAHoleToFill && NtfsGetNextHoleToFill( IrpContext,
  1092. Mcb,
  1093. OriginalStartingVcn,
  1094. DesiredEndingVcn,
  1095. &VcnToFill,
  1096. &ClusterCountToFill,
  1097. &PrecedingLcn)) {
  1098. //
  1099. // If this is a sparse file and we didn't get all that we asked for
  1100. // then trim the allocation back to a compression boundary.
  1101. //
  1102. if ((LoopCount >= MAXIMUM_RUNS_AT_ONCE) &&
  1103. !AllocateAll &&
  1104. (FlagOn( Scb->AttributeFlags, ATTRIBUTE_FLAG_COMPRESSION_MASK | ATTRIBUTE_FLAG_SPARSE ) == ATTRIBUTE_FLAG_SPARSE )) {
  1105. ULONG ClustersPerCompressionMask;
  1106. ClustersPerCompressionMask = (1 << Scb->CompressionUnitShift) - 1;
  1107. //
  1108. // We should end on a compression unit boundary.
  1109. //
  1110. if ((ULONG) VcnToFill & ClustersPerCompressionMask) {
  1111. //
  1112. // Back up to a compression unit boundary.
  1113. //
  1114. StartingVcn = VcnToFill & ~((LONGLONG) ClustersPerCompressionMask);
  1115. ASSERT( StartingVcn > OriginalStartingVcn );
  1116. NtfsDeallocateClusters( IrpContext,
  1117. Vcb,
  1118. Scb,
  1119. StartingVcn,
  1120. VcnToFill - 1,
  1121. &Scb->TotalAllocated );
  1122. //
  1123. // We don't want these clusters to be reflected in the clusters
  1124. // deallocated for this transaction. Otherwise our caller may
  1125. // assume he can get them with a log file full.
  1126. //
  1127. IrpContext->DeallocatedClusters -= (VcnToFill - StartingVcn);
  1128. VcnToFill = StartingVcn;
  1129. }
  1130. }
  1131. *DesiredClusterCount = VcnToFill - OriginalStartingVcn;
  1132. }
  1133. //
  1134. // At this point we've allocated everything we were asked to do
  1135. // so now call a routine to read ahead into our cache the disk
  1136. // information at the last lcn we allocated. But only do the readahead
  1137. // if we allocated clusters and we couldn't satisfy the request in one
  1138. // run.
  1139. //
  1140. if (ClustersAllocated &&
  1141. ((RunCount > 1) || AllocateFromBitmap) &&
  1142. (FoundLcn + FoundClusterCount < Vcb->TotalClusters)) {
  1143. NtfsReadAheadCachedBitmap( IrpContext, Vcb, FoundLcn + FoundClusterCount );
  1144. }
  1145. } finally {
  1146. DebugUnwind( NtfsAllocateClusters );
  1147. DebugTrace( 0, Dbg, ("%d\n", NtfsDumpCachedMcbInformation(Vcb)) );
  1148. NtfsReleaseScb(IrpContext, Vcb->BitmapScb);
  1149. }
  1150. DebugTrace( -1, Dbg, ("NtfsAllocateClusters -> %08lx\n", ClustersAllocated) );
  1151. return ClustersAllocated;
  1152. }
  1153. VOID
  1154. NtfsAddBadCluster (
  1155. IN PIRP_CONTEXT IrpContext,
  1156. IN PVCB Vcb,
  1157. IN LCN Lcn
  1158. )
  1159. /*++
  1160. Routine Description:
  1161. This routine helps append a bad cluster to the bad cluster file.
  1162. It marks it as allocated in the volume bitmap and also adds
  1163. the Lcn to the MCB for the bad cluster file.
  1164. Arguments:
  1165. Vcb - Supplies the Vcb used in this operation
  1166. Lcn - Supplies the Lcn of the new bad cluster
  1167. Return:
  1168. None.
  1169. --*/
  1170. {
  1171. PNTFS_MCB Mcb;
  1172. LONGLONG FoundLcn;
  1173. LONGLONG FoundClusters;
  1174. PDEALLOCATED_CLUSTERS Clusters;
  1175. ASSERT_IRP_CONTEXT( IrpContext );
  1176. ASSERT_VCB( Vcb );
  1177. PAGED_CODE();
  1178. DebugTrace( +1, Dbg, ("NtfsAddBadCluster\n") );
  1179. DebugTrace( 0, Dbg, ("Lcn = %0I64x\n", Lcn) );
  1180. //
  1181. // Reference the bad cluster mcb and grab exclusive access to the
  1182. // bitmap scb
  1183. //
  1184. Mcb = &Vcb->BadClusterFileScb->Mcb;
  1185. NtfsAcquireExclusiveScb( IrpContext, Vcb->BitmapScb );
  1186. try {
  1187. //
  1188. // We are given the bad Lcn so all we need to do is
  1189. // allocate it in the bitmap, and take care of some
  1190. // bookkeeping
  1191. //
  1192. SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_MODIFIED_BITMAP );
  1193. NtfsAllocateBitmapRun( IrpContext, Vcb, Lcn, 1, FALSE );
  1194. //
  1195. // Go ahead and remove this cluster from the recently deallocated arrays.
  1196. // We don't want to give this back to the bitmap package.
  1197. //
  1198. // Best odds are that these are in the active deallocated clusters.
  1199. //
  1200. Clusters = (PDEALLOCATED_CLUSTERS)Vcb->DeallocatedClusterListHead.Flink;
  1201. do {
  1202. if (FsRtlLookupLargeMcbEntry( &Clusters->Mcb,
  1203. Lcn,
  1204. &FoundLcn,
  1205. &FoundClusters,
  1206. NULL,
  1207. NULL,
  1208. NULL ) &&
  1209. (FoundLcn != UNUSED_LCN)) {
  1210. FsRtlRemoveLargeMcbEntry( &Clusters->Mcb,
  1211. Lcn,
  1212. 1 );
  1213. //
  1214. // Removing one from Dealloc and Vcb. Operation above
  1215. // could fail leaving entry in Deallocated cluster. OK because the
  1216. // entry is still deallocated this operation will abort.
  1217. //
  1218. Clusters->ClusterCount -= 1;
  1219. Vcb->DeallocatedClusters -= 1;
  1220. break;
  1221. }
  1222. Clusters = (PDEALLOCATED_CLUSTERS)Clusters->Link.Flink;
  1223. } while ( &Clusters->Link != &Vcb->DeallocatedClusterListHead );
  1224. Vcb->FreeClusters -= 1;
  1225. IrpContext->FreeClusterChange -= 1;
  1226. ASSERT_LCN_RANGE_CHECKING( Vcb, (Lcn + 1) );
  1227. //
  1228. // Vcn == Lcn in the bad cluster file.
  1229. //
  1230. NtfsAddNtfsMcbEntry( Mcb, Lcn, Lcn, (LONGLONG)1, FALSE );
  1231. } finally {
  1232. DebugUnwind( NtfsAddBadCluster );
  1233. NtfsReleaseScb(IrpContext, Vcb->BitmapScb);
  1234. }
  1235. DebugTrace( -1, Dbg, ("NtfsAddBadCluster -> VOID\n") );
  1236. return;
  1237. }
  1238. BOOLEAN
  1239. NtfsDeallocateClusters (
  1240. IN PIRP_CONTEXT IrpContext,
  1241. IN PVCB Vcb,
  1242. IN PSCB Scb,
  1243. IN VCN StartingVcn,
  1244. IN VCN EndingVcn,
  1245. OUT PLONGLONG TotalAllocated OPTIONAL
  1246. )
  1247. /*++
  1248. Routine Description:
  1249. This routine deallocates (i.e., frees) disk space. It free any clusters that
  1250. are specified as allocated in the input mcb with the specified range of starting
  1251. vcn to ending vcn inclusive.
  1252. The basic algorithm used by this procedure is as follows:
  1253. 1. With a Vcn value beginning at starting vcn and progressing to ending vcn
  1254. do the following steps...
  1255. 2. Lookup the Mcb entry at the vcn this will yield an lcn and a cluster count
  1256. if the entry exists (even if it is a hole). If the entry does not exist
  1257. then we are completely done because we have run off the end of allocation.
  1258. 3. If the entry is a hole (i.e., Lcn == -1) then add the cluster count to
  1259. Vcn and go back to step 1.
  1260. 4. At this point we have a real run of clusters that need to be deallocated but
  1261. the cluster count might put us over the ending vcn so adjust the cluster
  1262. count to keep us within the ending vcn.
  1263. 5. Now deallocate the clusters from the bitmap, and increment the free cluster
  1264. count stored in the vcb.
  1265. 6. Add (i.e., change) any cached bitmap information concerning this run to indicate
  1266. that it is now free.
  1267. 7. Remove the run from the mcb.
  1268. 8. Add the cluster count that we've just freed to Vcn and go back to step 1.
  1269. Arguments:
  1270. Vcb - Supplies the vcb used in this operation
  1271. Mcb - Supplies the mcb describing the runs to be deallocated
  1272. StartingVcn - Supplies the vcn to start deallocating at in the input mcb
  1273. EndingVcn - Supplies the vcn to end deallocating at in the input mcb
  1274. TotalAllocated - If specified we will modifify the total allocated clusters
  1275. for this file.
  1276. Return Value:
  1277. FALSE - if nothing was deallocated.
  1278. TRUE - if some space was deallocated.
  1279. --*/
  1280. {
  1281. VCN Vcn;
  1282. LCN Lcn;
  1283. LONGLONG ClusterCount;
  1284. LONGLONG ClustersRemoved = 0;
  1285. BOOLEAN ClustersDeallocated = FALSE;
  1286. LCN LastLcnAdded;
  1287. BOOLEAN RaiseLogFull;
  1288. ASSERT_IRP_CONTEXT( IrpContext );
  1289. ASSERT_VCB( Vcb );
  1290. PAGED_CODE();
  1291. DebugTrace( +1, Dbg, ("NtfsDeallocateClusters\n") );
  1292. DebugTrace( 0, Dbg, ("StartingVcn = %016I64x\n", StartingVcn) );
  1293. DebugTrace( 0, Dbg, ("EndingVcn = %016I64\n", EndingVcn) );
  1294. NtfsAcquireExclusiveScb( IrpContext, Vcb->BitmapScb );
  1295. try {
  1296. if (FlagOn( Vcb->VcbState, VCB_STATE_RELOAD_FREE_CLUSTERS )) {
  1297. NtfsScanEntireBitmap( IrpContext, Vcb, FALSE );
  1298. }
  1299. //
  1300. // The following loop scans through the mcb from starting vcn to ending vcn
  1301. // with a step of cluster count.
  1302. //
  1303. for (Vcn = StartingVcn; Vcn <= EndingVcn; Vcn = Vcn + ClusterCount) {
  1304. //
  1305. // Get the run information from the Mcb, and if this Vcn isn't specified
  1306. // in the mcb then return now to our caller
  1307. //
  1308. if (!NtfsLookupNtfsMcbEntry( &Scb->Mcb, Vcn, &Lcn, &ClusterCount, NULL, NULL, NULL, NULL )) {
  1309. leave;
  1310. }
  1311. //
  1312. // Make sure that the run we just looked up is not a hole otherwise
  1313. // if it is a hole we'll just continue with out loop continue with our
  1314. // loop
  1315. //
  1316. if (Lcn != UNUSED_LCN) {
  1317. PDEALLOCATED_CLUSTERS CurrentClusters;
  1318. ASSERT_LCN_RANGE_CHECKING( Vcb, (Lcn + ClusterCount) );
  1319. //
  1320. // Now we have a real run to deallocate, but it might be too large
  1321. // to check for that the vcn plus cluster count must be less than
  1322. // or equal to the ending vcn plus 1.
  1323. //
  1324. if ((Vcn + ClusterCount) > EndingVcn) {
  1325. ClusterCount = (EndingVcn - Vcn) + 1;
  1326. }
  1327. //
  1328. // And to hold us off from reallocating the clusters right away we'll
  1329. // add this run to the recently deallocated mcb in the vcb. If this fails
  1330. // because we are growing the mapping then change the code to
  1331. // LOG_FILE_FULL to empty the mcb.
  1332. //
  1333. RaiseLogFull = FALSE;
  1334. try {
  1335. CurrentClusters = NtfsGetDeallocatedClusters( IrpContext, Vcb );
  1336. FsRtlAddLargeMcbEntry( &CurrentClusters->Mcb,
  1337. Lcn,
  1338. Lcn,
  1339. ClusterCount );
  1340. } except ((GetExceptionCode() == STATUS_INSUFFICIENT_RESOURCES) ?
  1341. EXCEPTION_EXECUTE_HANDLER :
  1342. EXCEPTION_CONTINUE_SEARCH) {
  1343. RaiseLogFull = TRUE;
  1344. }
  1345. if (RaiseLogFull) {
  1346. #ifdef PERF_STATS
  1347. IrpContext->LogFullReason = LF_DEALLOCATED_CLUSTERS_MEM;
  1348. #endif
  1349. NtfsRaiseStatus( IrpContext, STATUS_LOG_FILE_FULL, NULL, NULL );
  1350. }
  1351. //
  1352. // Correct here because we increment only if successfully
  1353. // adding the clusters. It is also added to dealloc and Vcb together.
  1354. //
  1355. CurrentClusters->ClusterCount += ClusterCount;
  1356. Vcb->DeallocatedClusters += ClusterCount;
  1357. IrpContext->DeallocatedClusters += ClusterCount;
  1358. ClustersRemoved = ClusterCount;
  1359. LastLcnAdded = Lcn + ClusterCount;
  1360. //
  1361. // Now zap the bitmap, increment the free cluster count, and change
  1362. // the cached information on this run to indicate that it is now free
  1363. //
  1364. SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_MODIFIED_BITMAP );
  1365. NtfsFreeBitmapRun( IrpContext, Vcb, Lcn, &ClustersRemoved);
  1366. ASSERT( ClustersRemoved == 0 );
  1367. ClustersDeallocated = TRUE;
  1368. //
  1369. // Reserve newly freed clusters if necc. to maintain balance for
  1370. // mapped data files
  1371. //
  1372. if (($DATA == Scb->AttributeTypeCode) &&
  1373. (Scb->CompressionUnit != 0) &&
  1374. FlagOn( Scb->Header.Flags, FSRTL_FLAG_USER_MAPPED_FILE )) {
  1375. LONGLONG FileOffset;
  1376. ULONG ByteCount;
  1377. LONGLONG TempL;
  1378. TempL= NtfsCalculateNeededReservedSpace( Scb );
  1379. if (Scb->ScbType.Data.TotalReserved <= TempL) {
  1380. FileOffset = LlBytesFromClusters( Vcb, Vcn );
  1381. ByteCount = BytesFromClusters( Vcb, ClusterCount );
  1382. //
  1383. // If we're deallocating beyond allocation size as a result of DeallocateInternal
  1384. // optimization (split and remove at back) compensate.
  1385. //
  1386. if (FileOffset >= Scb->Header.AllocationSize.QuadPart ) {
  1387. FileOffset = Scb->Header.AllocationSize.QuadPart - ByteCount;
  1388. }
  1389. //
  1390. // Round attempted reservation down to needed amount if its larger
  1391. //
  1392. if (ByteCount > TempL - Scb->ScbType.Data.TotalReserved) {
  1393. ByteCount = (ULONG)(TempL - Scb->ScbType.Data.TotalReserved);
  1394. }
  1395. NtfsReserveClusters( IrpContext, Scb, FileOffset, ByteCount );
  1396. }
  1397. }
  1398. //
  1399. // Adjust the count of free clusters and adjust the IrpContext
  1400. // field for the change this transaction.
  1401. //
  1402. Vcb->FreeClusters += ClusterCount;
  1403. //
  1404. // If we had shrunk the Mft zone and there is at least 1/16
  1405. // of the volume now available, then grow the zone back.
  1406. // Acquire MftScb so we can can manipulate its mcb. Use ex routines so we
  1407. // always drop it at the end in the finally clause. If we can't get it
  1408. // we'll just skip resizing the zone
  1409. //
  1410. if (FlagOn( Vcb->VcbState, VCB_STATE_REDUCED_MFT ) &&
  1411. (Int64ShraMod32( Vcb->TotalClusters, 4 ) < Vcb->FreeClusters)) {
  1412. if (NtfsAcquireResourceExclusive( IrpContext, Vcb->MftScb, FALSE )) {
  1413. try {
  1414. NtfsScanEntireBitmap( IrpContext, Vcb, TRUE );
  1415. NtfsInitializeMftZone( IrpContext, Vcb );
  1416. } finally {
  1417. NtfsReleaseResource( IrpContext, Vcb->MftScb );
  1418. }
  1419. }
  1420. }
  1421. IrpContext->FreeClusterChange += ClusterCount;
  1422. //
  1423. // Modify the total allocated amount if the pointer is specified.
  1424. //
  1425. if (ARGUMENT_PRESENT( TotalAllocated )) {
  1426. NtfsAcquireReservedClusters( Vcb );
  1427. *TotalAllocated -= (LlBytesFromClusters( Vcb, ClusterCount ));
  1428. if (*TotalAllocated < 0) {
  1429. *TotalAllocated = 0;
  1430. }
  1431. NtfsReleaseReservedClusters( Vcb );
  1432. }
  1433. //
  1434. // Now remove this entry from the mcb and go back to the top of the
  1435. // loop
  1436. //
  1437. NtfsRemoveNtfsMcbEntry( &Scb->Mcb, Vcn, ClusterCount );
  1438. //
  1439. // If this is the Mcb for the Mft file then remember this in the
  1440. // RemovedClusters Mcb.
  1441. //
  1442. if (&Scb->Mcb == &Vcb->MftScb->Mcb) {
  1443. FsRtlAddLargeMcbEntry( &Vcb->MftScb->ScbType.Mft.RemovedClusters,
  1444. Vcn,
  1445. Lcn,
  1446. ClusterCount );
  1447. }
  1448. }
  1449. }
  1450. } finally {
  1451. DebugUnwind( NtfsDeallocateClusters );
  1452. DebugTrace( 0, Dbg, ("%d\n", NtfsDumpCachedMcbInformation(Vcb)) );
  1453. //
  1454. // Remove the entries from the recently deallocated entries
  1455. // if we didn't modify the bitmap. ClustersRemoved contains
  1456. // the number we didn't insert in the last attempt to free bits
  1457. // in the bitmap.
  1458. //
  1459. if (ClustersRemoved != 0) {
  1460. PDEALLOCATED_CLUSTERS Clusters = (PDEALLOCATED_CLUSTERS) Vcb->DeallocatedClusterListHead.Flink;
  1461. FsRtlRemoveLargeMcbEntry( &Clusters->Mcb,
  1462. LastLcnAdded - ClustersRemoved,
  1463. ClustersRemoved );
  1464. //
  1465. // This should be OK. We are backing out an insert above.
  1466. // Whatever space needed should be present because we are reverting to
  1467. // a known state.
  1468. //
  1469. Clusters->ClusterCount -= ClustersRemoved;
  1470. Vcb->DeallocatedClusters -= ClustersRemoved;
  1471. }
  1472. NtfsReleaseScb( IrpContext, Vcb->BitmapScb );
  1473. }
  1474. DebugTrace( -1, Dbg, ("NtfsDeallocateClusters -> %02lx\n", ClustersDeallocated) );
  1475. return ClustersDeallocated;
  1476. }
  1477. VOID
  1478. NtfsPreAllocateClusters (
  1479. IN PIRP_CONTEXT IrpContext,
  1480. IN PVCB Vcb,
  1481. IN LCN StartingLcn,
  1482. IN LONGLONG ClusterCount,
  1483. OUT PBOOLEAN AcquiredBitmap,
  1484. OUT PBOOLEAN AcquiredMft
  1485. )
  1486. /*++
  1487. Routine Description:
  1488. This routine pre-allocates clusters in the bitmap within the specified range.
  1489. All changes are made only in memory and neither logged nor written to disk.
  1490. We allow exceptions to flow out possibly with all the files acquired. At the end we hold
  1491. the bitmap and mft exclusive to mark the reservation if we succeed
  1492. Arguments:
  1493. Vcb - Supplies the vcb used in this operation
  1494. StartingLcn - Supplies the starting Lcn index within the bitmap to
  1495. start allocating (i.e., setting to 1).
  1496. ClusterCount - Supplies the number of bits to set to 1 within the bitmap.
  1497. AcquiredBitmap - set to true if we leave with bitmap acquired
  1498. AcquiredMft - set to true if we leave with the mft acquired
  1499. Return Value:
  1500. None.
  1501. --*/
  1502. {
  1503. PAGED_CODE()
  1504. NtfsAcquireExclusiveScb( IrpContext, Vcb->MftScb );
  1505. *AcquiredMft = TRUE;
  1506. NtfsAcquireExclusiveScb( IrpContext, Vcb->BitmapScb );
  1507. *AcquiredBitmap = TRUE;
  1508. NtfsRunIsClear( IrpContext, Vcb, StartingLcn, ClusterCount );
  1509. }
  1510. VOID
  1511. NtfsScanEntireBitmap (
  1512. IN PIRP_CONTEXT IrpContext,
  1513. IN PVCB Vcb,
  1514. IN LOGICAL CachedRunsOnly
  1515. )
  1516. /*++
  1517. Routine Description:
  1518. This routine scans in the entire bitmap, It computes the number of free clusters
  1519. available, and at the same time remembers the largest free runs that it
  1520. then inserts into the cached bitmap structure.
  1521. Arguments:
  1522. Vcb - Supplies the vcb used by this operation
  1523. CachedRunsOnly - Indicates that we only want to look for the longest runs.
  1524. Return Value:
  1525. None.
  1526. --*/
  1527. {
  1528. LCN Lcn;
  1529. RTL_BITMAP Bitmap;
  1530. PBCB BitmapBcb;
  1531. BOOLEAN StuffAdded = FALSE;
  1532. ASSERT_IRP_CONTEXT( IrpContext );
  1533. ASSERT_VCB( Vcb );
  1534. PAGED_CODE();
  1535. DebugTrace( +1, Dbg, ("NtfsScanEntireBitmap\n") );
  1536. BitmapBcb = NULL;
  1537. try {
  1538. //
  1539. // If we are only reloading cached runs then check if there is any real work to do.
  1540. // We don't want to constantly rescan the bitmap if we are growing the Mft and never
  1541. // have any suitable runs available.
  1542. //
  1543. if (CachedRunsOnly) {
  1544. USHORT RunIndex;
  1545. BOOLEAN FoundRun;
  1546. //
  1547. // If there hasn't been a lot of activity freeing clusters then
  1548. // don't do this work unless the cached run structure is empty.
  1549. //
  1550. if (Vcb->ClustersRecentlyFreed < BITMAP_VOLATILE_FREE_COUNT) {
  1551. //
  1552. // Determine if there is a cached run that is at least as
  1553. // large as LongestFreedRun.
  1554. //
  1555. FoundRun = NtfsPositionCachedLcnByLength( &Vcb->CachedRuns,
  1556. Vcb->CachedRuns.LongestFreedRun,
  1557. NULL,
  1558. NULL,
  1559. TRUE,
  1560. &RunIndex );
  1561. if (!FoundRun &&
  1562. (RunIndex < Vcb->CachedRuns.Used) &&
  1563. (Vcb->CachedRuns.LengthArray[ RunIndex ] != NTFS_CACHED_RUNS_DEL_INDEX) ) {
  1564. //
  1565. // RunIndex points to a larger entry.
  1566. //
  1567. FoundRun = TRUE;
  1568. ASSERT( FoundRun ||
  1569. (RunIndex >= Vcb->CachedRuns.Used) ||
  1570. (Vcb->CachedRuns.LengthArray[ RunIndex ] == NTFS_CACHED_RUNS_DEL_INDEX) );
  1571. }
  1572. if (FoundRun) {
  1573. //
  1574. // Use the entries we already have.
  1575. //
  1576. leave;
  1577. }
  1578. }
  1579. //
  1580. // Set the current total free space to zero and the following loop will compute
  1581. // the actual number of free clusters.
  1582. //
  1583. } else {
  1584. Vcb->FreeClusters = 0;
  1585. }
  1586. NtfsReinitializeCachedRuns( &Vcb->CachedRuns );
  1587. //
  1588. // For every bitmap page we read it in and check how many free clusters there are.
  1589. // While we have the page in memory we also scan for a large chunks of free space.
  1590. //
  1591. for (Lcn = 0; Lcn < Vcb->TotalClusters; Lcn = Lcn + Bitmap.SizeOfBitMap) {
  1592. LCN StartingLcn;
  1593. RTL_BITMAP_RUN RunArray[64];
  1594. ULONG RunArrayIndex;
  1595. //
  1596. // Read in the bitmap page and make sure that we haven't messed up the math
  1597. //
  1598. if (StuffAdded) { NtfsFreePool( Bitmap.Buffer ); StuffAdded = FALSE; }
  1599. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  1600. NtfsMapPageInBitmap( IrpContext, Vcb, Lcn, &StartingLcn, &Bitmap, &BitmapBcb );
  1601. ASSERTMSG("Math wrong for bits per page of bitmap", (Lcn == StartingLcn));
  1602. //
  1603. // Compute the number of clear bits in the bitmap each clear bit denotes
  1604. // a free cluster.
  1605. //
  1606. if (!CachedRunsOnly) {
  1607. Vcb->FreeClusters += RtlNumberOfClearBits( &Bitmap );
  1608. }
  1609. //
  1610. // Now bias the bitmap with the RecentlyDeallocatedMcb.
  1611. //
  1612. StuffAdded = NtfsAddRecentlyDeallocated( Vcb, StartingLcn, &Bitmap );
  1613. //
  1614. // Find the 64 longest free runs in the bitmap and add them to the
  1615. // cached bitmap.
  1616. //
  1617. RunArrayIndex = RtlFindClearRuns( &Bitmap, RunArray, 64, TRUE );
  1618. if (RunArrayIndex > 0) {
  1619. NtfsAddCachedRunMult( IrpContext,
  1620. Vcb,
  1621. Lcn,
  1622. RunArray,
  1623. RunArrayIndex );
  1624. }
  1625. }
  1626. Vcb->ClustersRecentlyFreed = 0;
  1627. Vcb->CachedRuns.LongestFreedRun = 0;
  1628. } finally {
  1629. DebugUnwind( NtfsScanEntireBitmap );
  1630. if (!AbnormalTermination() && !CachedRunsOnly) {
  1631. ClearFlag( Vcb->VcbState, VCB_STATE_RELOAD_FREE_CLUSTERS );
  1632. }
  1633. if (StuffAdded) { NtfsFreePool( Bitmap.Buffer ); }
  1634. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  1635. }
  1636. DebugTrace( -1, Dbg, ("NtfsScanEntireBitmap -> VOID\n") );
  1637. return;
  1638. }
  1639. VOID
  1640. NtfsModifyBitsInBitmap (
  1641. IN PIRP_CONTEXT IrpContext,
  1642. IN PVCB Vcb,
  1643. IN LONGLONG FirstBit,
  1644. IN LONGLONG BeyondFinalBit,
  1645. IN ULONG RedoOperation,
  1646. IN ULONG UndoOperation
  1647. )
  1648. /*++
  1649. Routine Description:
  1650. This routine is called to directly modify a specific range of bits in the volume bitmap.
  1651. It should only be called by someone who is directly manipulating the volume bitmap
  1652. (i.e. ExtendVolume).
  1653. Arguments:
  1654. Vcb - This is the volume being modified.
  1655. FirstBit - First bit in the bitmap to set.
  1656. BeyondFinalBit - Indicates where to stop modifying.
  1657. RedoOperation - Indicates whether we are setting or clearing the bits.
  1658. UndoOperation - Indicates whether we need to back out the Redo operation above.
  1659. Return Value:
  1660. None.
  1661. --*/
  1662. {
  1663. RTL_BITMAP Bitmap;
  1664. PBCB BitmapBcb = NULL;
  1665. LONGLONG CurrentLcn;
  1666. LONGLONG BaseLcn;
  1667. BITMAP_RANGE BitmapRange;
  1668. PVOID UndoBuffer = NULL;
  1669. ULONG UndoBufferLength = 0;
  1670. PAGED_CODE();
  1671. //
  1672. // Use a try-finally to facilate cleanup.
  1673. //
  1674. try {
  1675. //
  1676. // Loop and perform the necessary operations on each affected page
  1677. // in the bitmap.
  1678. //
  1679. for (CurrentLcn = FirstBit; CurrentLcn < BeyondFinalBit; CurrentLcn = BaseLcn + Bitmap.SizeOfBitMap) {
  1680. //
  1681. // Read in the page of the bitmap.
  1682. //
  1683. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  1684. NtfsPinPageInBitmap( IrpContext, Vcb, CurrentLcn, &BaseLcn, &Bitmap, &BitmapBcb );
  1685. //
  1686. // Determine how many bits to clear on the current page.
  1687. //
  1688. BitmapRange.BitMapOffset = (ULONG) (CurrentLcn - BaseLcn);
  1689. BitmapRange.NumberOfBits = BITS_PER_PAGE - BitmapRange.BitMapOffset;
  1690. if (BitmapRange.NumberOfBits > (ULONG) (BeyondFinalBit - CurrentLcn)) {
  1691. BitmapRange.NumberOfBits = (ULONG) (BeyondFinalBit - CurrentLcn);
  1692. }
  1693. //
  1694. // Write the log record to clear or set the bits.
  1695. //
  1696. if (UndoOperation != Noop) {
  1697. ASSERT( (UndoOperation == SetBitsInNonresidentBitMap) ||
  1698. (UndoOperation == ClearBitsInNonresidentBitMap) );
  1699. UndoBuffer = &BitmapRange;
  1700. UndoBufferLength = sizeof( BITMAP_RANGE );
  1701. }
  1702. (VOID)
  1703. NtfsWriteLog( IrpContext,
  1704. Vcb->BitmapScb,
  1705. BitmapBcb,
  1706. RedoOperation,
  1707. &BitmapRange,
  1708. sizeof( BITMAP_RANGE ),
  1709. UndoOperation,
  1710. UndoBuffer,
  1711. UndoBufferLength,
  1712. Int64ShraMod32( BaseLcn, 3 ),
  1713. 0,
  1714. 0,
  1715. Bitmap.SizeOfBitMap >> 3 );
  1716. //
  1717. // Call the appropriate routine to modify the bits.
  1718. //
  1719. if (RedoOperation == SetBitsInNonresidentBitMap) {
  1720. NtfsRestartSetBitsInBitMap( IrpContext,
  1721. &Bitmap,
  1722. BitmapRange.BitMapOffset,
  1723. BitmapRange.NumberOfBits );
  1724. } else {
  1725. NtfsRestartClearBitsInBitMap( IrpContext,
  1726. &Bitmap,
  1727. BitmapRange.BitMapOffset,
  1728. BitmapRange.NumberOfBits );
  1729. }
  1730. }
  1731. } finally {
  1732. DebugUnwind( NtfsModifyBitsInBitmap );
  1733. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  1734. }
  1735. return;
  1736. }
  1737. BOOLEAN
  1738. NtfsCreateMftHole (
  1739. IN PIRP_CONTEXT IrpContext,
  1740. IN PVCB Vcb
  1741. )
  1742. /*++
  1743. Routine Description:
  1744. This routine is called to create a hole within the Mft.
  1745. Arguments:
  1746. Vcb - Vcb for volume.
  1747. Return Value:
  1748. None.
  1749. --*/
  1750. {
  1751. BOOLEAN FoundHole = FALSE;
  1752. PBCB BitmapBcb = NULL;
  1753. BOOLEAN StuffAdded = FALSE;
  1754. RTL_BITMAP Bitmap;
  1755. PUCHAR BitmapBuffer;
  1756. ULONG SizeToMap;
  1757. ULONG BitmapOffset;
  1758. ULONG BitmapSize;
  1759. ULONG BitmapIndex;
  1760. ULONG StartIndex;
  1761. ULONG HoleCount;
  1762. ULONG MftVcn;
  1763. ULONG MftClusterCount;
  1764. PAGED_CODE();
  1765. //
  1766. // Use a try-finally to facilitate cleanup.
  1767. //
  1768. try {
  1769. //
  1770. // Compute the number of records in the Mft file and the full range to
  1771. // pin in the Mft bitmap.
  1772. //
  1773. BitmapIndex = (ULONG) LlFileRecordsFromBytes( Vcb, Vcb->MftScb->Header.FileSize.QuadPart );
  1774. //
  1775. // Knock this index down to a hole boundary.
  1776. //
  1777. BitmapIndex &= Vcb->MftHoleInverseMask;
  1778. //
  1779. // Compute the values for the bitmap.
  1780. //
  1781. BitmapSize = (BitmapIndex + 7) / 8;
  1782. //
  1783. // Convert the index to the number of bits on this page.
  1784. //
  1785. BitmapIndex &= (BITS_PER_PAGE - 1);
  1786. if (BitmapIndex == 0) {
  1787. BitmapIndex = BITS_PER_PAGE;
  1788. }
  1789. //
  1790. // Set the Vcn count to the full size of the bitmap.
  1791. //
  1792. BitmapOffset = (ULONG) ROUND_TO_PAGES( BitmapSize );
  1793. //
  1794. // Loop through all of the pages of the Mft bitmap looking for an appropriate
  1795. // hole.
  1796. //
  1797. while (BitmapOffset != 0) {
  1798. //
  1799. // Move to the beginning of this page.
  1800. //
  1801. BitmapOffset -= BITS_PER_PAGE;
  1802. if (StuffAdded) { NtfsFreePool( Bitmap.Buffer ); StuffAdded = FALSE; }
  1803. //
  1804. // Compute the number of bytes to map in the current page.
  1805. //
  1806. SizeToMap = BitmapSize - BitmapOffset;
  1807. if (SizeToMap > PAGE_SIZE) {
  1808. SizeToMap = PAGE_SIZE;
  1809. }
  1810. //
  1811. // Unmap any pages from a previous page and map the current page.
  1812. //
  1813. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  1814. //
  1815. // Initialize the bitmap for this page.
  1816. //
  1817. NtfsMapStream( IrpContext,
  1818. Vcb->MftBitmapScb,
  1819. BitmapOffset,
  1820. SizeToMap,
  1821. &BitmapBcb,
  1822. &BitmapBuffer );
  1823. RtlInitializeBitMap( &Bitmap, (PULONG) BitmapBuffer, SizeToMap * 8 );
  1824. StuffAdded = NtfsAddDeallocatedRecords( Vcb,
  1825. Vcb->MftScb,
  1826. BitmapOffset * 8,
  1827. &Bitmap );
  1828. //
  1829. // Walk through the current page looking for a hole. Continue
  1830. // until we find a hole or have reached the beginning of the page.
  1831. //
  1832. do {
  1833. //
  1834. // Go back one Mft index and look for a clear run.
  1835. //
  1836. BitmapIndex -= 1;
  1837. HoleCount = RtlFindLastBackwardRunClear( &Bitmap,
  1838. BitmapIndex,
  1839. &BitmapIndex );
  1840. //
  1841. // If we couldn't find any run then break out of the loop.
  1842. //
  1843. if (HoleCount == 0) {
  1844. break;
  1845. //
  1846. // If this is too small to make a hole then continue on.
  1847. //
  1848. } else if (HoleCount < Vcb->MftHoleGranularity) {
  1849. BitmapIndex &= Vcb->MftHoleInverseMask;
  1850. continue;
  1851. }
  1852. //
  1853. // Round up the starting index for this clear run and
  1854. // adjust the hole count.
  1855. //
  1856. StartIndex = (BitmapIndex + Vcb->MftHoleMask) & Vcb->MftHoleInverseMask;
  1857. HoleCount -= (StartIndex - BitmapIndex);
  1858. //
  1859. // Round the hole count down to a hole boundary.
  1860. //
  1861. HoleCount &= Vcb->MftHoleInverseMask;
  1862. //
  1863. // If we couldn't find enough records for a hole then
  1864. // go to a previous index.
  1865. //
  1866. if (HoleCount < Vcb->MftHoleGranularity) {
  1867. BitmapIndex &= Vcb->MftHoleInverseMask;
  1868. continue;
  1869. }
  1870. //
  1871. // Convert the hole count to a cluster count.
  1872. //
  1873. if (Vcb->FileRecordsPerCluster == 0) {
  1874. HoleCount <<= Vcb->MftToClusterShift;
  1875. } else {
  1876. HoleCount = 1;
  1877. }
  1878. //
  1879. // Loop by finding the run at the given Vcn and walk through
  1880. // subsequent runs looking for a hole.
  1881. //
  1882. do {
  1883. PVOID RangePtr;
  1884. ULONG McbIndex;
  1885. VCN ThisVcn;
  1886. LCN ThisLcn;
  1887. LONGLONG ThisClusterCount;
  1888. //
  1889. // Find the starting Vcn for this hole and initialize
  1890. // the cluster count for the current hole.
  1891. //
  1892. ThisVcn = StartIndex + (BitmapOffset * 3);
  1893. if (Vcb->FileRecordsPerCluster == 0) {
  1894. ThisVcn <<= Vcb->MftToClusterShift;
  1895. } else {
  1896. ThisVcn >>= Vcb->MftToClusterShift;
  1897. }
  1898. MftVcn = (ULONG) ThisVcn;
  1899. MftClusterCount = 0;
  1900. //
  1901. // Lookup the run at the current Vcn.
  1902. //
  1903. NtfsLookupNtfsMcbEntry( &Vcb->MftScb->Mcb,
  1904. ThisVcn,
  1905. &ThisLcn,
  1906. &ThisClusterCount,
  1907. NULL,
  1908. NULL,
  1909. &RangePtr,
  1910. &McbIndex );
  1911. //
  1912. // Now walk through this bitmap run and look for a run we
  1913. // can deallocate to create a hole.
  1914. //
  1915. do {
  1916. //
  1917. // Go to the next run in the Mcb.
  1918. //
  1919. McbIndex += 1;
  1920. //
  1921. // If this run extends beyond the end of the of the
  1922. // hole then truncate the clusters in this run.
  1923. //
  1924. if (ThisClusterCount > HoleCount) {
  1925. ThisClusterCount = HoleCount;
  1926. HoleCount = 0;
  1927. } else {
  1928. HoleCount -= (ULONG) ThisClusterCount;
  1929. }
  1930. //
  1931. // Check if this run is a hole then clear the count
  1932. // of clusters.
  1933. //
  1934. if (ThisLcn == UNUSED_LCN) {
  1935. //
  1936. // We want to skip this hole. If we have found a
  1937. // hole then we are done. Otherwise we want to
  1938. // find the next range in the Mft starting at the point beyond
  1939. // the current run (which is a hole). Nothing to do if we don't
  1940. // have enough clusters for a full hole.
  1941. //
  1942. if (!FoundHole &&
  1943. (HoleCount >= Vcb->MftClustersPerHole)) {
  1944. //
  1945. // Find the Vcn after the current Mft run.
  1946. //
  1947. ThisVcn += ThisClusterCount;
  1948. //
  1949. // If this isn't on a hole boundary then
  1950. // round up to a hole boundary. Adjust the
  1951. // available clusters for a hole.
  1952. //
  1953. MftVcn = (ULONG) (ThisVcn + Vcb->MftHoleClusterMask);
  1954. MftVcn = (ULONG) ThisVcn & Vcb->MftHoleClusterInverseMask;
  1955. //
  1956. // Now subtract this from the HoleClusterCount.
  1957. //
  1958. HoleCount -= MftVcn - (ULONG) ThisVcn;
  1959. //
  1960. // We need to convert the Vcn at this point to an Mft record
  1961. // number.
  1962. //
  1963. if (Vcb->FileRecordsPerCluster == 0) {
  1964. StartIndex = MftVcn >> Vcb->MftToClusterShift;
  1965. } else {
  1966. StartIndex = MftVcn << Vcb->MftToClusterShift;
  1967. }
  1968. }
  1969. break;
  1970. //
  1971. // We found a run to deallocate.
  1972. //
  1973. } else {
  1974. //
  1975. // Add these clusters to the clusters already found.
  1976. // Set the flag indicating we found a hole if there
  1977. // are enough clusters to create a hole.
  1978. //
  1979. MftClusterCount += (ULONG) ThisClusterCount;
  1980. if (MftClusterCount >= Vcb->MftClustersPerHole) {
  1981. FoundHole = TRUE;
  1982. }
  1983. }
  1984. } while ((HoleCount != 0) &&
  1985. NtfsGetSequentialMcbEntry( &Vcb->MftScb->Mcb,
  1986. &RangePtr,
  1987. McbIndex,
  1988. &ThisVcn,
  1989. &ThisLcn,
  1990. &ThisClusterCount ));
  1991. } while (!FoundHole && (HoleCount >= Vcb->MftClustersPerHole));
  1992. //
  1993. // Round down to a hole boundary for the next search for
  1994. // a hole candidate.
  1995. //
  1996. BitmapIndex &= Vcb->MftHoleInverseMask;
  1997. } while (!FoundHole && (BitmapIndex >= Vcb->MftHoleGranularity));
  1998. //
  1999. // If we found a hole then deallocate the clusters and record
  2000. // the hole count change.
  2001. //
  2002. if (FoundHole) {
  2003. IO_STATUS_BLOCK IoStatus;
  2004. LONGLONG MftFileOffset;
  2005. //
  2006. // We want to flush the data in the Mft out to disk in
  2007. // case a lazywrite comes in during a window where we have
  2008. // removed the allocation but before a possible abort.
  2009. //
  2010. MftFileOffset = LlBytesFromClusters( Vcb, MftVcn );
  2011. //
  2012. // Round the cluster count and hole count down to a hole boundary.
  2013. //
  2014. MftClusterCount &= Vcb->MftHoleClusterInverseMask;
  2015. if (Vcb->FileRecordsPerCluster == 0) {
  2016. HoleCount = MftClusterCount >> Vcb->MftToClusterShift;
  2017. } else {
  2018. HoleCount = MftClusterCount << Vcb->MftToClusterShift;
  2019. }
  2020. CcFlushCache( &Vcb->MftScb->NonpagedScb->SegmentObject,
  2021. (PLARGE_INTEGER) &MftFileOffset,
  2022. BytesFromClusters( Vcb, MftClusterCount ),
  2023. &IoStatus );
  2024. ASSERT( IoStatus.Status == STATUS_SUCCESS );
  2025. //
  2026. // Remove the clusters from the Mcb for the Mft.
  2027. //
  2028. NtfsDeleteAllocation( IrpContext,
  2029. Vcb->MftScb->FileObject,
  2030. Vcb->MftScb,
  2031. MftVcn,
  2032. (LONGLONG) MftVcn + (MftClusterCount - 1),
  2033. TRUE,
  2034. FALSE );
  2035. //
  2036. // Record the change to the hole count.
  2037. //
  2038. Vcb->MftHoleRecords += HoleCount;
  2039. Vcb->MftScb->ScbType.Mft.HoleRecordChange += HoleCount;
  2040. //
  2041. // Exit the loop.
  2042. //
  2043. break;
  2044. }
  2045. //
  2046. // Look at all of the bits on the previous page.
  2047. //
  2048. BitmapIndex = BITS_PER_PAGE;
  2049. }
  2050. } finally {
  2051. DebugUnwind( NtfsCreateMftHole );
  2052. if (StuffAdded) { NtfsFreePool( Bitmap.Buffer ); }
  2053. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  2054. }
  2055. return FoundHole;
  2056. }
  2057. BOOLEAN
  2058. NtfsFindMftFreeTail (
  2059. IN PIRP_CONTEXT IrpContext,
  2060. IN PVCB Vcb,
  2061. OUT PLONGLONG FileOffset
  2062. )
  2063. /*++
  2064. Routine Description:
  2065. This routine is called to find the file offset where the run of free records at
  2066. the end of the Mft file begins. If we can't find a minimal run of file records
  2067. we won't perform truncation.
  2068. Arguments:
  2069. Vcb - This is the Vcb for the volume being defragged.
  2070. FileOffset - This is the offset where the truncation may begin.
  2071. Return Value:
  2072. BOOLEAN - TRUE if there is an acceptable candidate for truncation at the end of
  2073. the file FALSE otherwise.
  2074. --*/
  2075. {
  2076. ULONG FinalIndex;
  2077. ULONG BaseIndex;
  2078. ULONG ThisIndex;
  2079. RTL_BITMAP Bitmap;
  2080. PULONG BitmapBuffer;
  2081. BOOLEAN StuffAdded = FALSE;
  2082. BOOLEAN MftTailFound = FALSE;
  2083. PBCB BitmapBcb = NULL;
  2084. PAGED_CODE();
  2085. //
  2086. // Use a try-finally to facilite cleanup.
  2087. //
  2088. try {
  2089. //
  2090. // Find the page and range of the last page of the Mft bitmap.
  2091. //
  2092. FinalIndex = (ULONG)Int64ShraMod32(Vcb->MftScb->Header.FileSize.QuadPart, Vcb->MftShift) - 1;
  2093. BaseIndex = FinalIndex & ~(BITS_PER_PAGE - 1);
  2094. Bitmap.SizeOfBitMap = FinalIndex - BaseIndex + 1;
  2095. //
  2096. // Pin this page. If the last bit is not clear then return immediately.
  2097. //
  2098. NtfsMapStream( IrpContext,
  2099. Vcb->MftBitmapScb,
  2100. (LONGLONG)(BaseIndex / 8),
  2101. (Bitmap.SizeOfBitMap + 7) / 8,
  2102. &BitmapBcb,
  2103. &BitmapBuffer );
  2104. RtlInitializeBitMap( &Bitmap, BitmapBuffer, Bitmap.SizeOfBitMap );
  2105. StuffAdded = NtfsAddDeallocatedRecords( Vcb,
  2106. Vcb->MftScb,
  2107. BaseIndex,
  2108. &Bitmap );
  2109. //
  2110. // If the last bit isn't clear then there is nothing we can do.
  2111. //
  2112. if (RtlCheckBit( &Bitmap, Bitmap.SizeOfBitMap - 1 ) == 1) {
  2113. try_return( NOTHING );
  2114. }
  2115. //
  2116. // Find the final free run of the page.
  2117. //
  2118. RtlFindLastBackwardRunClear( &Bitmap, Bitmap.SizeOfBitMap - 1, &ThisIndex );
  2119. //
  2120. // This Index is a relative value. Adjust by the page offset.
  2121. //
  2122. ThisIndex += BaseIndex;
  2123. //
  2124. // Round up the index to a trucate/extend granularity value.
  2125. //
  2126. ThisIndex += Vcb->MftHoleMask;
  2127. ThisIndex &= Vcb->MftHoleInverseMask;
  2128. if (ThisIndex <= FinalIndex) {
  2129. //
  2130. // Convert this value to a file offset and return it to our caller.
  2131. //
  2132. *FileOffset = LlBytesFromFileRecords( Vcb, ThisIndex );
  2133. MftTailFound = TRUE;
  2134. }
  2135. try_exit: NOTHING;
  2136. } finally {
  2137. DebugUnwind( NtfsFindMftFreeTail );
  2138. if (StuffAdded) { NtfsFreePool( Bitmap.Buffer ); }
  2139. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  2140. }
  2141. return MftTailFound;
  2142. }
  2143. //
  2144. // Local support routine
  2145. //
  2146. VOID
  2147. NtfsAllocateBitmapRun (
  2148. IN PIRP_CONTEXT IrpContext,
  2149. IN PVCB Vcb,
  2150. IN LCN StartingLcn,
  2151. IN LONGLONG ClusterCount,
  2152. IN BOOLEAN FromCachedRuns
  2153. )
  2154. /*++
  2155. Routine Description:
  2156. This routine allocates clusters in the bitmap within the specified range.
  2157. Arguments:
  2158. Vcb - Supplies the vcb used in this operation
  2159. StartingLcn - Supplies the starting Lcn index within the bitmap to
  2160. start allocating (i.e., setting to 1).
  2161. ClusterCount - Supplies the number of bits to set to 1 within the
  2162. bitmap.
  2163. FromCachedRuns - Indicates the clusters came from cached information. Allows
  2164. us to handle the case where the cached runs are corrupt.
  2165. Return Value:
  2166. None.
  2167. --*/
  2168. {
  2169. LCN BaseLcn;
  2170. RTL_BITMAP Bitmap;
  2171. PBCB BitmapBcb;
  2172. ULONG BitOffset;
  2173. ULONG BitsToSet;
  2174. BITMAP_RANGE BitmapRange;
  2175. ASSERT_IRP_CONTEXT( IrpContext );
  2176. ASSERT_VCB( Vcb );
  2177. PAGED_CODE();
  2178. DebugTrace( +1, Dbg, ("NtfsAllocateBitmapRun\n") );
  2179. DebugTrace( 0, Dbg, ("StartingLcn = %016I64x\n", StartingLcn) );
  2180. DebugTrace( 0, Dbg, ("ClusterCount = %016I64x\n", ClusterCount) );
  2181. BitmapBcb = NULL;
  2182. try {
  2183. //
  2184. // While the cluster count is greater than zero then we
  2185. // will loop through reading in a page in the bitmap
  2186. // setting bits, and then updating cluster count,
  2187. // and starting lcn
  2188. //
  2189. while (ClusterCount > 0) {
  2190. //
  2191. // Read in the base containing the starting lcn this will return
  2192. // a base lcn for the start of the bitmap
  2193. //
  2194. NtfsPinPageInBitmap( IrpContext, Vcb, StartingLcn, &BaseLcn, &Bitmap, &BitmapBcb );
  2195. //
  2196. // Compute the bit offset within the bitmap of the first bit
  2197. // we are to set, and also compute the number of bits we need to
  2198. // set, which is the minimum of the cluster count and the
  2199. // number of bits left in the bitmap from BitOffset.
  2200. //
  2201. BitOffset = (ULONG)(StartingLcn - BaseLcn);
  2202. if (ClusterCount <= (Bitmap.SizeOfBitMap - BitOffset)) {
  2203. BitsToSet = (ULONG)ClusterCount;
  2204. } else {
  2205. BitsToSet = Bitmap.SizeOfBitMap - BitOffset;
  2206. }
  2207. //
  2208. // We can only make this check if it is not restart, because we have
  2209. // no idea whether the update is applied or not. Raise corrupt if
  2210. // already set to prevent cross-links.
  2211. //
  2212. #ifdef NTFS_CHECK_BITMAP
  2213. if ((Vcb->BitmapCopy != NULL) &&
  2214. !NtfsCheckBitmap( Vcb,
  2215. (ULONG) BaseLcn + BitOffset,
  2216. BitsToSet,
  2217. FALSE )) {
  2218. NtfsBadBitmapCopy( IrpContext, (ULONG) BaseLcn + BitOffset, BitsToSet );
  2219. }
  2220. #endif
  2221. //
  2222. // We hit an unexpected bit set in the bitmap. The assumption here is that
  2223. // we got the bit from the cached run information. If so then simply remove
  2224. // these clusters from the cached run information.
  2225. //
  2226. if (!RtlAreBitsClear( &Bitmap, BitOffset, BitsToSet )) {
  2227. if (FromCachedRuns) {
  2228. //
  2229. // Clear out the lists.
  2230. //
  2231. #ifdef NTFS_CHECK_CACHED_RUNS
  2232. ASSERT( FALSE );
  2233. #endif
  2234. NtfsReinitializeCachedRuns( &Vcb->CachedRuns );
  2235. NtfsRaiseStatus( IrpContext, STATUS_CANT_WAIT, NULL, NULL );
  2236. }
  2237. ASSERTMSG("Cannot set bits that are not clear ", FALSE );
  2238. NtfsRaiseStatus( IrpContext, STATUS_DISK_CORRUPT_ERROR, NULL, NULL );
  2239. }
  2240. //
  2241. // Now log this change as well.
  2242. //
  2243. BitmapRange.BitMapOffset = BitOffset;
  2244. BitmapRange.NumberOfBits = BitsToSet;
  2245. (VOID)
  2246. NtfsWriteLog( IrpContext,
  2247. Vcb->BitmapScb,
  2248. BitmapBcb,
  2249. SetBitsInNonresidentBitMap,
  2250. &BitmapRange,
  2251. sizeof(BITMAP_RANGE),
  2252. ClearBitsInNonresidentBitMap,
  2253. &BitmapRange,
  2254. sizeof(BITMAP_RANGE),
  2255. Int64ShraMod32( BaseLcn, 3 ),
  2256. 0,
  2257. 0,
  2258. Bitmap.SizeOfBitMap >> 3 );
  2259. //
  2260. // Now that we've logged the change go ahead and remove it from the
  2261. // free run Mcb. Do it after it appears in a log record so that
  2262. // it won't be allocated to another file.
  2263. //
  2264. (VOID)NtfsAddCachedRun( IrpContext,
  2265. Vcb,
  2266. StartingLcn,
  2267. BitsToSet,
  2268. RunStateAllocated );
  2269. //
  2270. // Now set the bits by calling the same routine used at restart.
  2271. //
  2272. NtfsRestartSetBitsInBitMap( IrpContext,
  2273. &Bitmap,
  2274. BitOffset,
  2275. BitsToSet );
  2276. #ifdef NTFS_CHECK_BITMAP
  2277. if (Vcb->BitmapCopy != NULL) {
  2278. ULONG BitmapPage;
  2279. ULONG StartBit;
  2280. BitmapPage = ((ULONG) (BaseLcn + BitOffset)) / (PAGE_SIZE * 8);
  2281. StartBit = ((ULONG) (BaseLcn + BitOffset)) & ((PAGE_SIZE * 8) - 1);
  2282. RtlSetBits( Vcb->BitmapCopy + BitmapPage, StartBit, BitsToSet );
  2283. }
  2284. #endif
  2285. //
  2286. // Unpin the Bcb now before possibly looping back.
  2287. //
  2288. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  2289. //
  2290. // Now decrement the cluster count and increment the starting lcn accordling
  2291. //
  2292. ClusterCount -= BitsToSet;
  2293. StartingLcn += BitsToSet;
  2294. }
  2295. } finally {
  2296. DebugUnwind( NtfsAllocateBitmapRun );
  2297. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  2298. }
  2299. DebugTrace( -1, Dbg, ("NtfsAllocateBitmapRun -> VOID\n") );
  2300. return;
  2301. }
  2302. VOID
  2303. NtfsRestartSetBitsInBitMap (
  2304. IN PIRP_CONTEXT IrpContext,
  2305. IN PRTL_BITMAP Bitmap,
  2306. IN ULONG BitMapOffset,
  2307. IN ULONG NumberOfBits
  2308. )
  2309. /*++
  2310. Routine Description:
  2311. This routine is common to normal operation and restart, and sets a range of
  2312. bits within a single page (as determined by the system which wrote the log
  2313. record) of the volume bitmap.
  2314. Arguments:
  2315. Bitmap - The bit map structure in which to set the bits
  2316. BitMapOffset - Bit offset to set
  2317. NumberOfBits - Number of bits to set
  2318. Return Value:
  2319. None.
  2320. --*/
  2321. {
  2322. UNREFERENCED_PARAMETER( IrpContext );
  2323. PAGED_CODE();
  2324. //
  2325. // Now set the bits and mark the bcb dirty.
  2326. //
  2327. RtlSetBits( Bitmap, BitMapOffset, NumberOfBits );
  2328. }
  2329. //
  2330. // Local support routine
  2331. //
  2332. VOID
  2333. NtfsFreeBitmapRun (
  2334. IN PIRP_CONTEXT IrpContext,
  2335. IN PVCB Vcb,
  2336. IN LCN StartingLcn,
  2337. IN OUT PLONGLONG ClusterCount
  2338. )
  2339. /*++
  2340. Routine Description:
  2341. This routine frees clusters in the bitmap within the specified range.
  2342. Arguments:
  2343. Vcb - Supplies the vcb used in this operation
  2344. StartingLcn - Supplies the starting Lcn index within the bitmap to
  2345. start freeing (i.e., setting to 0).
  2346. ClusterCount - On entry supplies the number of bits to set to 0 within the
  2347. bitmap. On exit contains the number of bits left to insert. This is
  2348. used in the error case to correct the recently deallocated bitmap.
  2349. Return Value:
  2350. None.
  2351. --*/
  2352. {
  2353. LCN BaseLcn;
  2354. RTL_BITMAP Bitmap;
  2355. PBCB BitmapBcb;
  2356. ULONG BitOffset;
  2357. ULONG BitsToClear;
  2358. BITMAP_RANGE BitmapRange;
  2359. ASSERT_IRP_CONTEXT( IrpContext );
  2360. ASSERT_VCB( Vcb );
  2361. PAGED_CODE();
  2362. DebugTrace( +1, Dbg, ("NtfsFreeBitmapRun\n") );
  2363. DebugTrace( 0, Dbg, ("StartingLcn = %016I64\n", StartingLcn) );
  2364. DebugTrace( 0, Dbg, ("ClusterCount = %016I64x\n", *ClusterCount) );
  2365. BitmapBcb = NULL;
  2366. try {
  2367. //
  2368. // Keep track of how volatile the bitmap package is.
  2369. //
  2370. Vcb->ClustersRecentlyFreed += *ClusterCount;
  2371. if (*ClusterCount > Vcb->CachedRuns.LongestFreedRun) {
  2372. Vcb->CachedRuns.LongestFreedRun = *ClusterCount;
  2373. }
  2374. //
  2375. // While the cluster count is greater than zero then we
  2376. // will loop through reading in a page in the bitmap
  2377. // clearing bits, and then updating cluster count,
  2378. // and starting lcn
  2379. //
  2380. while (*ClusterCount > 0) {
  2381. //
  2382. // Read in the base containing the starting lcn this will return
  2383. // a base lcn for the start of the bitmap
  2384. //
  2385. NtfsPinPageInBitmap( IrpContext, Vcb, StartingLcn, &BaseLcn, &Bitmap, &BitmapBcb );
  2386. //
  2387. // Compute the bit offset within the bitmap of the first bit
  2388. // we are to clear, and also compute the number of bits we need to
  2389. // clear, which is the minimum of the cluster count and the
  2390. // number of bits left in the bitmap from BitOffset.
  2391. //
  2392. BitOffset = (ULONG)(StartingLcn - BaseLcn);
  2393. if (*ClusterCount <= Bitmap.SizeOfBitMap - BitOffset) {
  2394. BitsToClear = (ULONG)(*ClusterCount);
  2395. } else {
  2396. BitsToClear = Bitmap.SizeOfBitMap - BitOffset;
  2397. }
  2398. //
  2399. // We can only make this check if it is not restart, because we have
  2400. // no idea whether the update is applied or not. Raise corrupt if
  2401. // these bits aren't set.
  2402. //
  2403. #ifdef NTFS_CHECK_BITMAP
  2404. if ((Vcb->BitmapCopy != NULL) &&
  2405. !NtfsCheckBitmap( Vcb,
  2406. (ULONG) BaseLcn + BitOffset,
  2407. BitsToClear,
  2408. TRUE )) {
  2409. NtfsBadBitmapCopy( IrpContext, (ULONG) BaseLcn + BitOffset, BitsToClear );
  2410. }
  2411. #endif
  2412. //
  2413. // Check if the bits are incorrectly clear.
  2414. //
  2415. if (!RtlAreBitsSet( &Bitmap, BitOffset, BitsToClear )) {
  2416. //
  2417. // Correct thing to do is to ignore the error since the bits are already clear.
  2418. //
  2419. NOTHING;
  2420. //
  2421. // Don't log if the bits are already correct. Otherwise we could set them in the
  2422. // abort path.
  2423. //
  2424. } else {
  2425. //
  2426. // Now log this change as well.
  2427. //
  2428. BitmapRange.BitMapOffset = BitOffset;
  2429. BitmapRange.NumberOfBits = BitsToClear;
  2430. (VOID)
  2431. NtfsWriteLog( IrpContext,
  2432. Vcb->BitmapScb,
  2433. BitmapBcb,
  2434. ClearBitsInNonresidentBitMap,
  2435. &BitmapRange,
  2436. sizeof(BITMAP_RANGE),
  2437. SetBitsInNonresidentBitMap,
  2438. &BitmapRange,
  2439. sizeof(BITMAP_RANGE),
  2440. Int64ShraMod32( BaseLcn, 3 ),
  2441. 0,
  2442. 0,
  2443. Bitmap.SizeOfBitMap >> 3 );
  2444. //
  2445. // Now clear the bits by calling the same routine used at restart.
  2446. //
  2447. NtfsRestartClearBitsInBitMap( IrpContext,
  2448. &Bitmap,
  2449. BitOffset,
  2450. BitsToClear );
  2451. #ifdef NTFS_CHECK_BITMAP
  2452. if (Vcb->BitmapCopy != NULL) {
  2453. ULONG BitmapPage;
  2454. ULONG StartBit;
  2455. BitmapPage = ((ULONG) (BaseLcn + BitOffset)) / (PAGE_SIZE * 8);
  2456. StartBit = ((ULONG) (BaseLcn + BitOffset)) & ((PAGE_SIZE * 8) - 1);
  2457. RtlClearBits( Vcb->BitmapCopy + BitmapPage, StartBit, BitsToClear );
  2458. }
  2459. #endif
  2460. }
  2461. //
  2462. // Unpin the Bcb now before possibly looping back.
  2463. //
  2464. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  2465. //
  2466. // Now decrement the cluster count and increment the starting lcn accordling
  2467. //
  2468. *ClusterCount -= BitsToClear;
  2469. StartingLcn += BitsToClear;
  2470. }
  2471. } finally {
  2472. DebugUnwind( NtfsFreeBitmapRun );
  2473. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  2474. }
  2475. DebugTrace( -1, Dbg, ("NtfsFreeBitmapRun -> VOID\n") );
  2476. return;
  2477. }
  2478. VOID
  2479. NtfsRestartClearBitsInBitMap (
  2480. IN PIRP_CONTEXT IrpContext,
  2481. IN PRTL_BITMAP Bitmap,
  2482. IN ULONG BitMapOffset,
  2483. IN ULONG NumberOfBits
  2484. )
  2485. /*++
  2486. Routine Description:
  2487. This routine is common to normal operation and restart, and clears a range of
  2488. bits within a single page (as determined by the system which wrote the log
  2489. record) of the volume bitmap.
  2490. Arguments:
  2491. Bitmap - Bitmap structure in which to clear the bits
  2492. BitMapOffset - Bit offset to clear
  2493. NumberOfBits - Number of bits to clear
  2494. Return Value:
  2495. None.
  2496. --*/
  2497. {
  2498. UNREFERENCED_PARAMETER( IrpContext );
  2499. PAGED_CODE();
  2500. //
  2501. // Now clear the bits and mark the bcb dirty.
  2502. //
  2503. RtlClearBits( Bitmap, BitMapOffset, NumberOfBits );
  2504. }
  2505. //
  2506. // Local support routine
  2507. //
  2508. BOOLEAN
  2509. NtfsFindFreeBitmapRun (
  2510. IN PIRP_CONTEXT IrpContext,
  2511. IN PVCB Vcb,
  2512. IN LONGLONG NumberToFind,
  2513. IN LCN StartingSearchHint,
  2514. IN BOOLEAN ReturnAnyLength,
  2515. IN BOOLEAN IgnoreMftZone,
  2516. OUT PLCN ReturnedLcn,
  2517. OUT PLONGLONG ClusterCountFound
  2518. )
  2519. /*++
  2520. Routine Description:
  2521. This routine searches the bitmap for free clusters based on the
  2522. hint, and number needed. This routine is actually pretty dumb in
  2523. that it doesn't try for the best fit, we'll assume the caching worked
  2524. and already would have given us a good fit.
  2525. Arguments:
  2526. Vcb - Supplies the vcb used in this operation
  2527. NumberToFind - Supplies the number of clusters that we would
  2528. really like to find
  2529. StartingSearchHint - Supplies an Lcn to start the search from
  2530. ReturnAnyLength - If TRUE then we are more interested in finding
  2531. a run which begins with the StartingSearchHint rather than
  2532. one which matches the length of the run. Case in point is when
  2533. we are trying to append to an existing file (the Mft is a
  2534. critical case).
  2535. ReturnedLcn - Recieves the Lcn of the free run of clusters that
  2536. we were able to find
  2537. IgnoreMftZone - If TRUE then don't adjust the request around the Mft zone.
  2538. ClusterCountFound - Receives the number of clusters in this run
  2539. Return Value:
  2540. BOOLEAN - TRUE if clusters allocated from zone. FALSE otherwise.
  2541. --*/
  2542. {
  2543. RTL_BITMAP Bitmap;
  2544. PVOID BitmapBuffer;
  2545. PBCB BitmapBcb;
  2546. BOOLEAN AllocatedFromZone = FALSE;
  2547. BOOLEAN StuffAdded;
  2548. ULONG Count;
  2549. ULONG RequestedCount;
  2550. ULONG FoundCount;
  2551. //
  2552. // As we walk through the bitmap pages we need to remember
  2553. // exactly where we are in the bitmap stream. We walk through
  2554. // the volume bitmap a page at a time but the current bitmap
  2555. // contained within the current page but may not be the full
  2556. // page.
  2557. //
  2558. // Lcn - Lcn used to find the bitmap page to pin. This Lcn
  2559. // will lie within the page to pin.
  2560. //
  2561. // BaseLcn - Bit offset of the start of the current bitmap in
  2562. // the bitmap stream.
  2563. //
  2564. // LcnFromHint - Bit offset of the start of the page after
  2565. // the page which contains the StartingSearchHint.
  2566. //
  2567. // BitOffset - Offset of found bits from the beginning
  2568. // of the current bitmap.
  2569. //
  2570. LCN Lcn = StartingSearchHint;
  2571. LCN BaseLcn;
  2572. LCN LcnFromHint;
  2573. ULONG BitOffset;
  2574. ULONG StartIndex;
  2575. RTL_BITMAP_RUN RunArray[16];
  2576. ULONG RunArrayIndex;
  2577. ASSERT_IRP_CONTEXT( IrpContext );
  2578. ASSERT_VCB( Vcb );
  2579. PAGED_CODE();
  2580. DebugTrace( +1, Dbg, ("NtfsFindFreeBitmapRun\n") );
  2581. DebugTrace( 0, Dbg, ("NumberToFind = %016I64x\n", NumberToFind) );
  2582. DebugTrace( 0, Dbg, ("StartingSearchHint = %016I64x\n", StartingSearchHint) );
  2583. BitmapBcb = NULL;
  2584. StuffAdded = FALSE;
  2585. try {
  2586. //
  2587. // First trim the number of clusters that we are being asked
  2588. // for to fit in a ulong
  2589. //
  2590. if (NumberToFind > MAXULONG) {
  2591. RequestedCount = Count = MAXULONG;
  2592. } else {
  2593. RequestedCount = Count = (ULONG)NumberToFind;
  2594. }
  2595. //
  2596. // Let's not go past the end of the volume.
  2597. //
  2598. if (Lcn < Vcb->TotalClusters) {
  2599. //
  2600. // Now read in the first bitmap based on the search hint, this will return
  2601. // a base lcn that we can use to compute the real bit off for our hint. We also
  2602. // must bias the bitmap by whatever has been recently deallocated.
  2603. //
  2604. NtfsMapPageInBitmap( IrpContext, Vcb, Lcn, &BaseLcn, &Bitmap, &BitmapBcb );
  2605. LcnFromHint = BaseLcn + Bitmap.SizeOfBitMap;
  2606. StuffAdded = NtfsAddRecentlyDeallocated( Vcb, BaseLcn, &Bitmap );
  2607. BitmapBuffer = Bitmap.Buffer;
  2608. //
  2609. // We don't want to look in the Mft zone if it is at the beginning
  2610. // of this page unless our caller told us to skip any zone checks. Adjust the
  2611. // bitmap so we skip this range.
  2612. //
  2613. if (!IgnoreMftZone &&
  2614. (BaseLcn < Vcb->MftZoneEnd) && (Lcn > Vcb->MftZoneEnd)) {
  2615. //
  2616. // Find the number of bits to swallow. We know this will
  2617. // a multible of bytes since the Mft zone end is always
  2618. // on a ulong boundary.
  2619. //
  2620. BitOffset = (ULONG) (Vcb->MftZoneEnd - BaseLcn);
  2621. //
  2622. // Adjust the bitmap size and buffer to skip this initial
  2623. // range in the Mft zone.
  2624. //
  2625. Bitmap.Buffer = Add2Ptr( Bitmap.Buffer, BitOffset / 8 );
  2626. Bitmap.SizeOfBitMap -= BitOffset;
  2627. BaseLcn = Vcb->MftZoneEnd;
  2628. }
  2629. //
  2630. // The bit offset is from the base of this bitmap to our starting Lcn.
  2631. //
  2632. BitOffset = (ULONG)(Lcn - BaseLcn);
  2633. //
  2634. // Now search the bitmap for a clear number of bits based on our hint
  2635. // If we the returned bitoffset is not -1 then we have a hit.
  2636. //
  2637. if (ReturnAnyLength) {
  2638. //
  2639. // We'd like to find a contiguous run. If we don't then go back and
  2640. // ask for a longer run.
  2641. //
  2642. StartIndex = RtlFindClearBits( &Bitmap, 1, BitOffset );
  2643. if ((StartIndex != -1) &&
  2644. (StartIndex != BitOffset)) {
  2645. BitOffset = RtlFindClearBits( &Bitmap, Count, BitOffset );
  2646. } else {
  2647. BitOffset = StartIndex;
  2648. }
  2649. //
  2650. // We didn't find a contiguous length
  2651. //
  2652. } else {
  2653. BitOffset = RtlFindClearBits( &Bitmap, Count, BitOffset );
  2654. }
  2655. if (BitOffset != -1) {
  2656. //
  2657. // We found a run. If the starting Lcn is our input hint AND
  2658. // we will accept any length then walk forward in the bitmap
  2659. // and find the real length of the run.
  2660. //
  2661. *ReturnedLcn = BitOffset + BaseLcn;
  2662. if (ReturnAnyLength &&
  2663. (*ReturnedLcn == StartingSearchHint)) {
  2664. Count = 0;
  2665. while (TRUE) {
  2666. FoundCount = RtlFindNextForwardRunClear( &Bitmap,
  2667. BitOffset,
  2668. &StartIndex );
  2669. //
  2670. // Verify that we found something and that the offset
  2671. // begins with out start hint.
  2672. //
  2673. if (FoundCount &&
  2674. (BitOffset == StartIndex)) {
  2675. Count += FoundCount;
  2676. if (Count >= RequestedCount) {
  2677. Count = RequestedCount;
  2678. break;
  2679. }
  2680. } else {
  2681. break;
  2682. }
  2683. //
  2684. // Break out if we found enough or the run doesn't
  2685. // extend to the end of the bitmap or we are at
  2686. // the last page of the bitmap.
  2687. //
  2688. if ((StartIndex + FoundCount != Bitmap.SizeOfBitMap) ||
  2689. (BaseLcn + Bitmap.SizeOfBitMap >= Vcb->TotalClusters)) {
  2690. break;
  2691. }
  2692. Lcn = BaseLcn + Bitmap.SizeOfBitMap;
  2693. if (StuffAdded) { NtfsFreePool( BitmapBuffer ); StuffAdded = FALSE; }
  2694. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  2695. NtfsMapPageInBitmap( IrpContext, Vcb, Lcn, &BaseLcn, &Bitmap, &BitmapBcb );
  2696. ASSERTMSG("Math wrong for bits per page of bitmap", (Lcn == BaseLcn));
  2697. StuffAdded = NtfsAddRecentlyDeallocated( Vcb, BaseLcn, &Bitmap );
  2698. BitmapBuffer = Bitmap.Buffer;
  2699. BitOffset = 0;
  2700. }
  2701. }
  2702. *ClusterCountFound = Count;
  2703. //
  2704. // While we have the bitmap let's grab some long runs
  2705. //
  2706. RunArrayIndex = RtlFindClearRuns( &Bitmap, RunArray, 16, TRUE );
  2707. if (RunArrayIndex > 0) {
  2708. NtfsAddCachedRunMult( IrpContext,
  2709. Vcb,
  2710. BaseLcn,
  2711. RunArray,
  2712. RunArrayIndex );
  2713. }
  2714. leave;
  2715. }
  2716. //
  2717. // Well the first try didn't succeed so now just grab the longest free run in the
  2718. // current bitmap, and while we're at it will populate the cached run information
  2719. //
  2720. RunArrayIndex = RtlFindClearRuns( &Bitmap, RunArray, 16, TRUE );
  2721. if (RunArrayIndex > 0) {
  2722. USHORT LocalOffset;
  2723. *ReturnedLcn = RunArray[0].StartingIndex + BaseLcn;
  2724. *ClusterCountFound = RunArray[0].NumberOfBits;
  2725. //
  2726. // There is no point in adding a free run for a range that is
  2727. // about to be consumed, although it won't affect correctness.
  2728. //
  2729. if (*ClusterCountFound > NumberToFind) {
  2730. //
  2731. // Trim off the part of the free run that will be
  2732. // consumed by the caller.
  2733. //
  2734. RunArray[0].StartingIndex += (ULONG)NumberToFind;
  2735. RunArray[0].NumberOfBits -= (ULONG)NumberToFind;
  2736. LocalOffset = 0;
  2737. //
  2738. // Only return the requested amount to the caller.
  2739. //
  2740. *ClusterCountFound = NumberToFind;
  2741. } else {
  2742. //
  2743. // Skip the first entry since the caller will use all of
  2744. // it.
  2745. //
  2746. LocalOffset = 1;
  2747. }
  2748. if (RunArrayIndex > LocalOffset) {
  2749. NtfsAddCachedRunMult( IrpContext,
  2750. Vcb,
  2751. BaseLcn,
  2752. RunArray + LocalOffset,
  2753. RunArrayIndex - LocalOffset );
  2754. }
  2755. leave;
  2756. }
  2757. //
  2758. // Well the current bitmap is full so now simply scan the disk looking
  2759. // for anything that is free, starting with the next bitmap.
  2760. // And again bias the bitmap with recently deallocated clusters.
  2761. // We won't even bother looking for the longest free runs we'll take
  2762. // whatever we can get.
  2763. //
  2764. if (StuffAdded) { NtfsFreePool( BitmapBuffer ); StuffAdded = FALSE; }
  2765. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  2766. Lcn = BaseLcn + Bitmap.SizeOfBitMap;
  2767. //
  2768. // If this is the Mft then scan from the current point to volume end,
  2769. // then back to the beginning.
  2770. //
  2771. if (IgnoreMftZone) {
  2772. //
  2773. // Look in the following ranges. Break out if we find anything.
  2774. //
  2775. // - Current point to end of volume
  2776. // - Start of volume to current
  2777. //
  2778. if (NtfsScanBitmapRange( IrpContext,
  2779. Vcb,
  2780. Lcn,
  2781. Vcb->TotalClusters,
  2782. NumberToFind,
  2783. ReturnedLcn,
  2784. ClusterCountFound )) {
  2785. if ((*ReturnedLcn < Vcb->MftZoneEnd) &&
  2786. (*ReturnedLcn >= Vcb->MftZoneStart)) {
  2787. AllocatedFromZone = TRUE;
  2788. }
  2789. leave;
  2790. }
  2791. if (NtfsScanBitmapRange( IrpContext,
  2792. Vcb,
  2793. 0,
  2794. Lcn,
  2795. NumberToFind,
  2796. ReturnedLcn,
  2797. ClusterCountFound )) {
  2798. if ((*ReturnedLcn < Vcb->MftZoneEnd) &&
  2799. (*ReturnedLcn >= Vcb->MftZoneStart)) {
  2800. AllocatedFromZone = TRUE;
  2801. }
  2802. leave;
  2803. }
  2804. //
  2805. // No luck.
  2806. //
  2807. *ClusterCountFound = 0;
  2808. leave;
  2809. }
  2810. }
  2811. //
  2812. // Check if we are starting before the Mft zone.
  2813. //
  2814. if (Lcn < Vcb->MftZoneStart) {
  2815. //
  2816. // Look in the following ranges. Break out if we find anything.
  2817. //
  2818. // - Current point to Zone start
  2819. // - Zone end to end of volume
  2820. // - Start of volume to current
  2821. //
  2822. if (NtfsScanBitmapRange( IrpContext,
  2823. Vcb,
  2824. Lcn,
  2825. Vcb->MftZoneStart,
  2826. NumberToFind,
  2827. ReturnedLcn,
  2828. ClusterCountFound )) {
  2829. leave;
  2830. }
  2831. if (NtfsScanBitmapRange( IrpContext,
  2832. Vcb,
  2833. Vcb->MftZoneEnd,
  2834. Vcb->TotalClusters,
  2835. NumberToFind,
  2836. ReturnedLcn,
  2837. ClusterCountFound )) {
  2838. leave;
  2839. }
  2840. if (NtfsScanBitmapRange( IrpContext,
  2841. Vcb,
  2842. 0,
  2843. Lcn,
  2844. NumberToFind,
  2845. ReturnedLcn,
  2846. ClusterCountFound )) {
  2847. leave;
  2848. }
  2849. //
  2850. // Check if we are beyond the Mft zone.
  2851. //
  2852. } else if (Lcn > Vcb->MftZoneEnd) {
  2853. //
  2854. // Look in the following ranges. Break out if we find anything.
  2855. //
  2856. // - Current point to end of volume
  2857. // - Start of volume to Zone start
  2858. // - Zone end to current point.
  2859. //
  2860. if (NtfsScanBitmapRange( IrpContext,
  2861. Vcb,
  2862. Lcn,
  2863. Vcb->TotalClusters,
  2864. NumberToFind,
  2865. ReturnedLcn,
  2866. ClusterCountFound )) {
  2867. leave;
  2868. }
  2869. if (NtfsScanBitmapRange( IrpContext,
  2870. Vcb,
  2871. 0,
  2872. Vcb->MftZoneStart,
  2873. NumberToFind,
  2874. ReturnedLcn,
  2875. ClusterCountFound )) {
  2876. leave;
  2877. }
  2878. if (NtfsScanBitmapRange( IrpContext,
  2879. Vcb,
  2880. Vcb->MftZoneEnd,
  2881. Lcn,
  2882. NumberToFind,
  2883. ReturnedLcn,
  2884. ClusterCountFound )) {
  2885. leave;
  2886. }
  2887. //
  2888. // We are starting within the zone. Skip over the zone to check it last.
  2889. //
  2890. } else {
  2891. //
  2892. // Look in the following ranges. Break out if we find anything.
  2893. //
  2894. // - End of zone to end of volume
  2895. // - Start of volume to start of zone
  2896. //
  2897. if (NtfsScanBitmapRange( IrpContext,
  2898. Vcb,
  2899. Vcb->MftZoneEnd,
  2900. Vcb->TotalClusters,
  2901. NumberToFind,
  2902. ReturnedLcn,
  2903. ClusterCountFound )) {
  2904. leave;
  2905. }
  2906. if (NtfsScanBitmapRange( IrpContext,
  2907. Vcb,
  2908. 0,
  2909. Vcb->MftZoneStart,
  2910. NumberToFind,
  2911. ReturnedLcn,
  2912. ClusterCountFound )) {
  2913. leave;
  2914. }
  2915. }
  2916. //
  2917. // We didn't find anything. Let's examine the zone explicitly.
  2918. //
  2919. if (NtfsScanBitmapRange( IrpContext,
  2920. Vcb,
  2921. Vcb->MftZoneStart,
  2922. Vcb->MftZoneEnd,
  2923. NumberToFind,
  2924. ReturnedLcn,
  2925. ClusterCountFound )) {
  2926. AllocatedFromZone = TRUE;
  2927. leave;
  2928. }
  2929. //
  2930. // No luck.
  2931. //
  2932. *ClusterCountFound = 0;
  2933. } finally {
  2934. DebugUnwind( NtfsFindFreeBitmapRun );
  2935. if (StuffAdded) { NtfsFreePool( BitmapBuffer ); }
  2936. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  2937. }
  2938. DebugTrace( 0, Dbg, ("ReturnedLcn <- %016I64x\n", *ReturnedLcn) );
  2939. DebugTrace( 0, Dbg, ("ClusterCountFound <- %016I64x\n", *ClusterCountFound) );
  2940. DebugTrace( -1, Dbg, ("NtfsFindFreeBitmapRun -> VOID\n") );
  2941. return AllocatedFromZone;
  2942. }
  2943. //
  2944. // Local support routine
  2945. //
  2946. BOOLEAN
  2947. NtfsScanBitmapRange (
  2948. IN PIRP_CONTEXT IrpContext,
  2949. IN PVCB Vcb,
  2950. IN LCN StartLcn,
  2951. IN LCN BeyondLcn,
  2952. IN LONGLONG NumberToFind,
  2953. OUT PLCN ReturnedLcn,
  2954. OUT PLONGLONG ClusterCountFound
  2955. )
  2956. /*++
  2957. Routine Description:
  2958. This routine will scan a range of the bitmap looking for a free run.
  2959. It is called when we need to limit the bits we are willing to consider
  2960. at a time, typically to skip over the Mft zone.
  2961. Arguments:
  2962. Vcb - Volume being scanned.
  2963. StartLcn - First Lcn in the bitmap to consider.
  2964. BeyondLcn - First Lcn in the bitmap past the range we want to consider.
  2965. NumberToFind - Supplies the number of clusters that we would
  2966. really like to find
  2967. ReturnedLcn - Start of free range if found.
  2968. ClusterCountFound - Length of free range if found.
  2969. Return Value:
  2970. BOOLEAN - TRUE if a bitmap range was found. FALSE otherwise.
  2971. --*/
  2972. {
  2973. BOOLEAN FreeRangeFound = FALSE;
  2974. RTL_BITMAP Bitmap;
  2975. PVOID BitmapBuffer;
  2976. ULONG BitOffset;
  2977. PBCB BitmapBcb = NULL;
  2978. BOOLEAN StuffAdded = FALSE;
  2979. LCN BaseLcn;
  2980. RTL_BITMAP_RUN RunArray[16];
  2981. ULONG RunArrayIndex;
  2982. PAGED_CODE();
  2983. DebugTrace( +1, Dbg, ("NtfsScanBitmapRange...\n") );
  2984. //
  2985. // The end Lcn might be beyond the end of the bitmap.
  2986. //
  2987. if (BeyondLcn > Vcb->TotalClusters) {
  2988. BeyondLcn = Vcb->TotalClusters;
  2989. }
  2990. //
  2991. // Use a try-finally to facilitate cleanup.
  2992. //
  2993. try {
  2994. //
  2995. // Now search the rest of the bitmap starting with right after the mft zone
  2996. // followed by the mft zone (or the beginning of the disk). Again take whatever
  2997. // we can get and not bother with the longest runs.
  2998. //
  2999. while (StartLcn < BeyondLcn) {
  3000. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  3001. NtfsMapPageInBitmap( IrpContext, Vcb, StartLcn, &BaseLcn, &Bitmap, &BitmapBcb );
  3002. StuffAdded = NtfsAddRecentlyDeallocated( Vcb, BaseLcn, &Bitmap );
  3003. BitmapBuffer = Bitmap.Buffer;
  3004. //
  3005. // Check if we don't want to use the entire page.
  3006. //
  3007. if ((BaseLcn + Bitmap.SizeOfBitMap) > BeyondLcn) {
  3008. Bitmap.SizeOfBitMap = (ULONG) (BeyondLcn - BaseLcn);
  3009. }
  3010. //
  3011. // Now adjust the starting Lcn if not at the beginning
  3012. // of the bitmap page. We know this will be a multiple
  3013. // of bytes since the MftZoneEnd is always on a ulong
  3014. // boundary in the bitmap.
  3015. //
  3016. if (BaseLcn != StartLcn) {
  3017. BitOffset = (ULONG) (StartLcn - BaseLcn);
  3018. Bitmap.SizeOfBitMap -= BitOffset;
  3019. Bitmap.Buffer = Add2Ptr( Bitmap.Buffer, BitOffset / 8 );
  3020. BaseLcn = StartLcn;
  3021. }
  3022. RunArrayIndex = RtlFindClearRuns( &Bitmap, RunArray, 16, TRUE );
  3023. if (RunArrayIndex > 0) {
  3024. USHORT LocalOffset;
  3025. *ReturnedLcn = RunArray[0].StartingIndex + BaseLcn;
  3026. *ClusterCountFound = RunArray[0].NumberOfBits;
  3027. FreeRangeFound = TRUE;
  3028. //
  3029. // There is no point in adding a free run for a range that is
  3030. // about to be consumed, although it won't affect correctness.
  3031. //
  3032. if (*ClusterCountFound > NumberToFind) {
  3033. //
  3034. // Trim off the part of the free run that will be
  3035. // consumed by the caller.
  3036. //
  3037. RunArray[0].StartingIndex += (ULONG)NumberToFind;
  3038. RunArray[0].NumberOfBits -= (ULONG)NumberToFind;
  3039. LocalOffset = 0;
  3040. } else {
  3041. //
  3042. // Skip the first entry since the caller will use all of
  3043. // it.
  3044. //
  3045. LocalOffset = 1;
  3046. }
  3047. if (RunArrayIndex > LocalOffset) {
  3048. NtfsAddCachedRunMult( IrpContext,
  3049. Vcb,
  3050. BaseLcn,
  3051. RunArray + LocalOffset,
  3052. RunArrayIndex - LocalOffset );
  3053. }
  3054. leave;
  3055. }
  3056. StartLcn = BaseLcn + Bitmap.SizeOfBitMap;
  3057. if (StuffAdded) { NtfsFreePool( BitmapBuffer ); StuffAdded = FALSE; }
  3058. }
  3059. } finally {
  3060. DebugUnwind( NtfsScanBitmapRange );
  3061. if (StuffAdded) { NtfsFreePool( BitmapBuffer ); StuffAdded = FALSE; }
  3062. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  3063. DebugTrace( -1, Dbg, ("NtfsScanBitmapRange -> %08lx\n", FreeRangeFound) );
  3064. }
  3065. return FreeRangeFound;
  3066. }
  3067. //
  3068. // Local support routine
  3069. //
  3070. BOOLEAN
  3071. NtfsAddRecentlyDeallocated (
  3072. IN PVCB Vcb,
  3073. IN LCN StartingBitmapLcn,
  3074. IN OUT PRTL_BITMAP Bitmap
  3075. )
  3076. /*++
  3077. Routine Description:
  3078. This routine will modify the input bitmap by removing from it
  3079. any clusters that are in the recently deallocated mcb. If we
  3080. do add stuff then we will not modify the bitmap buffer itself but
  3081. will allocate a new copy for the bitmap.
  3082. We will always protect the boot sector on the disk by marking the
  3083. first 8K as allocated. This will prevent us from overwriting the
  3084. boot sector if the volume becomes corrupted.
  3085. Arguments:
  3086. Vcb - Supplies the Vcb used in this operation
  3087. StartingBitmapLcn - Supplies the Starting Lcn of the bitmap
  3088. Bitmap - Supplies the bitmap being modified
  3089. Return Value:
  3090. BOOLEAN - TRUE if the bitmap has been modified and FALSE
  3091. otherwise.
  3092. --*/
  3093. {
  3094. BOOLEAN Results;
  3095. PVOID NewBuffer;
  3096. LCN EndingBitmapLcn;
  3097. PLARGE_MCB Mcb;
  3098. ULONG i;
  3099. VCN StartingVcn;
  3100. LCN StartingLcn;
  3101. LCN EndingLcn;
  3102. LONGLONG ClusterCount;
  3103. PDEALLOCATED_CLUSTERS DeallocatedClusters;
  3104. ULONG StartingBit;
  3105. ULONG EndingBit;
  3106. PAGED_CODE();
  3107. DebugTrace( +1, Dbg, ("NtfsAddRecentlyDeallocated...\n") );
  3108. //
  3109. // Until shown otherwise we will assume that we haven't updated anything
  3110. //
  3111. Results = FALSE;
  3112. //
  3113. // If this is the first page of the bitmap then mark the first 8K as
  3114. // allocated. This will prevent us from accidentally allocating out
  3115. // of the boot sector even if the bitmap is corrupt.
  3116. //
  3117. if ((StartingBitmapLcn == 0) &&
  3118. !RtlAreBitsSet( Bitmap, 0, ClustersFromBytes( Vcb, 0x2000 ))) {
  3119. NewBuffer = NtfsAllocatePool(PagedPool, (Bitmap->SizeOfBitMap+7)/8 );
  3120. RtlCopyMemory( NewBuffer, Bitmap->Buffer, (Bitmap->SizeOfBitMap+7)/8 );
  3121. Bitmap->Buffer = NewBuffer;
  3122. Results = TRUE;
  3123. //
  3124. // Now mark the bits as allocated.
  3125. //
  3126. RtlSetBits( Bitmap, 0, ClustersFromBytes( Vcb, 0x2000 ));
  3127. }
  3128. //
  3129. // Now compute the ending bitmap lcn for the bitmap
  3130. //
  3131. EndingBitmapLcn = StartingBitmapLcn + (Bitmap->SizeOfBitMap - 1);
  3132. //
  3133. // For every run in the recently deallocated mcb we will check if it is real and
  3134. // then check if the run in contained in the bitmap.
  3135. //
  3136. // There are really six cases to consider:
  3137. //
  3138. // StartingBitmapLcn EndingBitmapLcn
  3139. // +=================================+
  3140. //
  3141. //
  3142. // 1 -------+ EndingLcn
  3143. //
  3144. // 2 StartingLcn +--------
  3145. //
  3146. // 3 -------------------+ EndingLcn
  3147. //
  3148. // 4 StartingLcn +-------------------------
  3149. //
  3150. // 5 ---------------------------------------------------------------
  3151. //
  3152. // 6 EndingLcn +-------------------+ StartingLcn
  3153. //
  3154. //
  3155. // 1. EndingLcn is before StartingBitmapLcn which means we haven't
  3156. // reached the bitmap yet.
  3157. //
  3158. // 2. StartingLcn is after EndingBitmapLcn which means we've gone
  3159. // beyond the bitmap
  3160. //
  3161. // 3, 4, 5, 6. There is some overlap between the bitmap and
  3162. // the run.
  3163. //
  3164. DeallocatedClusters = (PDEALLOCATED_CLUSTERS)Vcb->DeallocatedClusterListHead.Flink;
  3165. do {
  3166. //
  3167. // Skip this Mcb if it has no entries.
  3168. //
  3169. if (DeallocatedClusters->ClusterCount != 0) {
  3170. Mcb = &DeallocatedClusters->Mcb;
  3171. for (i = 0; FsRtlGetNextLargeMcbEntry( Mcb, i, &StartingVcn, &StartingLcn, &ClusterCount ); i += 1) {
  3172. if (StartingVcn == StartingLcn) {
  3173. //
  3174. // Compute the ending lcn as the starting lcn minus cluster count plus 1.
  3175. //
  3176. EndingLcn = (StartingLcn + ClusterCount) - 1;
  3177. //
  3178. // Check if we haven't reached the bitmap yet.
  3179. //
  3180. if (EndingLcn < StartingBitmapLcn) {
  3181. NOTHING;
  3182. //
  3183. // Check if we've gone beyond the bitmap
  3184. //
  3185. } else if (EndingBitmapLcn < StartingLcn) {
  3186. break;
  3187. //
  3188. // Otherwise we overlap with the bitmap in some way
  3189. //
  3190. } else {
  3191. //
  3192. // First check if we have never set bit in the bitmap. and if so then
  3193. // now is the time to make an private copy of the bitmap buffer
  3194. //
  3195. if (Results == FALSE) {
  3196. NewBuffer = NtfsAllocatePool(PagedPool, (Bitmap->SizeOfBitMap+7)/8 );
  3197. RtlCopyMemory( NewBuffer, Bitmap->Buffer, (Bitmap->SizeOfBitMap+7)/8 );
  3198. Bitmap->Buffer = NewBuffer;
  3199. Results = TRUE;
  3200. }
  3201. //
  3202. // Now compute the begining and ending bit that we need to set in the bitmap
  3203. //
  3204. StartingBit = (StartingLcn < StartingBitmapLcn ?
  3205. 0 :
  3206. (ULONG)(StartingLcn - StartingBitmapLcn));
  3207. EndingBit = (EndingLcn > EndingBitmapLcn ?
  3208. Bitmap->SizeOfBitMap - 1 :
  3209. (ULONG)(EndingLcn - StartingBitmapLcn));
  3210. //
  3211. // And set those bits
  3212. //
  3213. RtlSetBits( Bitmap, StartingBit, EndingBit - StartingBit + 1 );
  3214. }
  3215. }
  3216. }
  3217. }
  3218. DeallocatedClusters = (PDEALLOCATED_CLUSTERS)DeallocatedClusters->Link.Flink;
  3219. } while (&DeallocatedClusters->Link != &Vcb->DeallocatedClusterListHead );
  3220. DebugTrace( -1, Dbg, ("NtfsAddRecentlyDeallocated -> %08lx\n", Results) );
  3221. return Results;
  3222. }
  3223. //
  3224. // Local support routine
  3225. //
  3226. VOID
  3227. NtfsMapOrPinPageInBitmap (
  3228. IN PIRP_CONTEXT IrpContext,
  3229. IN PVCB Vcb,
  3230. IN LCN Lcn,
  3231. OUT PLCN StartingLcn,
  3232. IN OUT PRTL_BITMAP Bitmap,
  3233. OUT PBCB *BitmapBcb,
  3234. IN BOOLEAN AlsoPinData
  3235. )
  3236. /*++
  3237. Routine Description:
  3238. This routine reads in a single page of the bitmap file and returns
  3239. an initialized bitmap variable for that page
  3240. Arguments:
  3241. Vcb - Supplies the vcb used in this operation
  3242. Lcn - Supplies the Lcn index in the bitmap that we want to read in
  3243. In other words, this routine reads in the bitmap page containing
  3244. the lcn index
  3245. StartingLcn - Receives the base lcn index of the bitmap that we've
  3246. just read in.
  3247. Bitmap - Receives an initialized bitmap. The memory for the bitmap
  3248. header must be supplied by the caller
  3249. BitmapBcb - Receives the Bcb for the bitmap buffer
  3250. AlsoPinData - Indicates if this routine should also pin the page
  3251. in memory, used if we need to modify the page
  3252. Return Value:
  3253. None.
  3254. --*/
  3255. {
  3256. ULONG BitmapSize;
  3257. PVOID Buffer;
  3258. ASSERT_IRP_CONTEXT( IrpContext );
  3259. ASSERT_VCB( Vcb );
  3260. PAGED_CODE();
  3261. DebugTrace( +1, Dbg, ("NtfsMapOrPinPageInBitmap\n") );
  3262. DebugTrace( 0, Dbg, ("Lcn = %016I64x\n", Lcn) );
  3263. //
  3264. // Compute the starting lcn index of the page we're after
  3265. //
  3266. *StartingLcn = Lcn & ~(BITS_PER_PAGE-1);
  3267. //
  3268. // Compute how many bits there are in the page we need to read
  3269. //
  3270. BitmapSize = (ULONG)(Vcb->TotalClusters - *StartingLcn);
  3271. if (BitmapSize > BITS_PER_PAGE) {
  3272. BitmapSize = BITS_PER_PAGE;
  3273. }
  3274. //
  3275. // Now either Pin or Map the bitmap page, we will add 7 to the bitmap
  3276. // size before dividing it by 8. That way we will ensure we get the last
  3277. // byte read in. For example a bitmap size of 1 through 8 reads in 1 byte
  3278. //
  3279. if (AlsoPinData) {
  3280. NtfsPinStream( IrpContext,
  3281. Vcb->BitmapScb,
  3282. Int64ShraMod32( *StartingLcn, 3 ),
  3283. (BitmapSize+7)/8,
  3284. BitmapBcb,
  3285. &Buffer );
  3286. } else {
  3287. NtfsMapStream( IrpContext,
  3288. Vcb->BitmapScb,
  3289. Int64ShraMod32( *StartingLcn, 3 ),
  3290. (BitmapSize+7)/8,
  3291. BitmapBcb,
  3292. &Buffer );
  3293. }
  3294. //
  3295. // And initialize the bitmap
  3296. //
  3297. RtlInitializeBitMap( Bitmap, Buffer, BitmapSize );
  3298. DebugTrace( 0, Dbg, ("StartingLcn <- %016I64x\n", *StartingLcn) );
  3299. DebugTrace( -1, Dbg, ("NtfsMapOrPinPageInBitmap -> VOID\n") );
  3300. return;
  3301. }
  3302. BOOLEAN
  3303. NtfsAddCachedRun (
  3304. IN PIRP_CONTEXT IrpContext,
  3305. IN PVCB Vcb,
  3306. IN LCN StartingLcn,
  3307. IN LONGLONG ClusterCount,
  3308. IN NTFS_RUN_STATE RunState
  3309. )
  3310. /*++
  3311. Routine Description:
  3312. This procedure adds a new run to the cached free space
  3313. bitmap information.
  3314. Arguments:
  3315. Vcb - Supplies the vcb for this operation
  3316. StartingLcn - Supplies the lcn for the run being added
  3317. ClusterCount - Supplies the number of clusters in the run being added
  3318. RunState - Supplies the state of the run being added. This state
  3319. must be either free or allocated.
  3320. Return Value:
  3321. BOOLEAN - TRUE if more entries can be added to the list, FALSE otherwise.
  3322. --*/
  3323. {
  3324. ASSERT_IRP_CONTEXT( IrpContext );
  3325. ASSERT_VCB( Vcb );
  3326. PAGED_CODE();
  3327. DebugTrace( +1, Dbg, ("NtfsAddCachedRun\n") );
  3328. DebugTrace( 0, Dbg, ("StartingLcn = %016I64x\n", StartingLcn) );
  3329. DebugTrace( 0, Dbg, ("ClusterCount = %016I64x\n", ClusterCount) );
  3330. //
  3331. // Based on whether we are adding a free or allocated run we
  3332. // setup or local variables to a point to the right
  3333. // vcb variables
  3334. //
  3335. if (RunState == RunStateFree) {
  3336. //
  3337. // We better not be setting Lcn 0 free.
  3338. //
  3339. if (StartingLcn == 0) {
  3340. ASSERT( FALSE );
  3341. NtfsRaiseStatus( IrpContext, STATUS_DISK_CORRUPT_ERROR, NULL, NULL );
  3342. }
  3343. //
  3344. // Sanity check that we aren't adding bits beyond the end of the
  3345. // bitmap.
  3346. //
  3347. ASSERT( StartingLcn + ClusterCount <= Vcb->TotalClusters );
  3348. NtfsInsertCachedLcn( &Vcb->CachedRuns,
  3349. StartingLcn,
  3350. ClusterCount );
  3351. } else {
  3352. //
  3353. // Now remove the run from the cached runs because it can potentially already be
  3354. // there.
  3355. //
  3356. NtfsRemoveCachedLcn( &Vcb->CachedRuns,
  3357. StartingLcn,
  3358. ClusterCount );
  3359. }
  3360. DebugTrace( -1, Dbg, ("NtfsAddCachedRun -> VOID\n") );
  3361. return ((Vcb->CachedRuns.Avail - Vcb->CachedRuns.Used + Vcb->CachedRuns.DelLcnCount) > 0);
  3362. }
  3363. #if 0
  3364. VOID
  3365. NtfsMakeSpaceCachedLcn (
  3366. IN PNTFS_CACHED_RUNS CachedRuns,
  3367. IN LCN StartingLcn,
  3368. IN RTL_BITMAP_RUN *RunArray,
  3369. IN ULONG RunCount,
  3370. IN PUSHORT LcnSorted OPTIONAL
  3371. )
  3372. /*++
  3373. Routine Description:
  3374. This procedure attempts to make space in the Lcn-sorted array for RunCount
  3375. new entries in the given Lcn range. This routine will not delete any
  3376. existing entries to create the space because we don't know at this time
  3377. how many will actually end up being inserted into the list. They may not
  3378. be inserted because their run lengths are too small relative to the
  3379. entries already in the list. This call is used because it is more
  3380. efficient to create space once for all the entries than to do so
  3381. individually. In effect, this routine moves windows of deleted entries
  3382. to the desired Lcn position.
  3383. Arguments:
  3384. CachedRuns - Pointer to a cached run structure.
  3385. StartingLcn - Supplies the base Lcn for the runs being added
  3386. RunArray - The bit position and length of each of the free runs.
  3387. The array will be sorted according to length.
  3388. RunCount - Supplies the number of runs being added
  3389. LcnSorted - An optional array of RunCount indices that gives the Lcn
  3390. sort order.
  3391. Return Value:
  3392. None.
  3393. --*/
  3394. {
  3395. PAGED_CODE();
  3396. DebugTrace( +1, Dbg, ("NtfsMakeSpaceCachedLcn\n") );
  3397. DebugTrace( -1, Dbg, ("NtfsMakeSpaceCachedLcn -> VOID\n") );
  3398. return;
  3399. }
  3400. #endif /* 0 */
  3401. VOID
  3402. NtfsAddCachedRunMult (
  3403. IN PIRP_CONTEXT IrpContext,
  3404. IN PVCB Vcb,
  3405. IN LCN StartingLcn,
  3406. IN PRTL_BITMAP_RUN RunArray,
  3407. IN ULONG RunCount
  3408. )
  3409. /*++
  3410. Routine Description:
  3411. This procedure adds multiple new runs to the cached free space
  3412. bitmap information. It is assumed that the new runs fall
  3413. in a close range of Lcn values. As a rule, these runs come from
  3414. a single page of the bitmap.
  3415. Arguments:
  3416. Vcb - Supplies the vcb for this operation
  3417. StartingLcn - Supplies the base Lcn for the runs being added
  3418. RunArray - The bit position and length of each of the free runs.
  3419. The array will be sorted according to length.
  3420. RunCount - Supplies the number of runs being added
  3421. Return Value:
  3422. None.
  3423. --*/
  3424. {
  3425. USHORT Index1;
  3426. PUSHORT LcnSorted = NULL;
  3427. ASSERT_IRP_CONTEXT( IrpContext );
  3428. ASSERT_VCB( Vcb );
  3429. PAGED_CODE();
  3430. DebugTrace( +1, Dbg, ("NtfsAddCachedRunMult\n") );
  3431. DebugTrace( 0, Dbg, ("StartingLcn = %016I64x\n", StartingLcn) );
  3432. DebugTrace( 0, Dbg, ("RunArray = %08lx\n", RunArray) );
  3433. DebugTrace( 0, Dbg, ("RunCount = %08lx\n", RunCount) );
  3434. #if 0
  3435. //
  3436. // Sort the entries by Lcn. It is often the case that at startup we are
  3437. // adding entries that will all fall at the end of the Lcn-sorted list.
  3438. // However, if the entries are not added in Lcn-sorted order there will
  3439. // likely be some moving around of entries in the Lcn-sorted list that
  3440. // could be avoided.
  3441. //
  3442. LcnSorted = NtfsAllocatePoolNoRaise( PagedPool, sizeof( USHORT ) * RunCount );
  3443. if (LcnSorted != NULL) {
  3444. USHORT Index2;
  3445. //
  3446. // Bubble sort the elements.
  3447. //
  3448. for (Index1 = 1, LcnSorted[0] = 0;
  3449. Index1 < RunCount;
  3450. Index1 += 1) {
  3451. for (Index2 = 0; Index2 < Index1; Index2 += 1) {
  3452. if (RunArray[Index1].StartingIndex < RunArray[LcnSorted[Index2]].StartingIndex) {
  3453. //
  3454. // Move the entries from Index2 through Index1 - 1 to the
  3455. // right to make space for the current entry.
  3456. //
  3457. RtlMoveMemory( LcnSorted + Index2 + 1,
  3458. LcnSorted + Index2,
  3459. sizeof( USHORT ) * (Index1 - Index2) );
  3460. break;
  3461. }
  3462. }
  3463. //
  3464. // Write the index into the correctly sorted location.
  3465. //
  3466. LcnSorted[Index2] = Index1;
  3467. }
  3468. }
  3469. //
  3470. // Make space in the Lcn-sorted array for these new entries.
  3471. // This is done in advance because it is more efficient to create
  3472. // space once for all the entries than to do so individually.
  3473. // The following routine will not delete any existing entries to
  3474. // create the space because we don't know at this time how many will
  3475. // actually end up being inserted into the list. They may not be
  3476. // inserted because their run lengths are too small relative to the
  3477. // entries already in the list.
  3478. //
  3479. NtfsMakeSpaceCachedLcn( &Vcb->CachedRuns,
  3480. StartingLcn,
  3481. RunArray,
  3482. RunCount,
  3483. LcnSorted );
  3484. #endif /* 0 */
  3485. //
  3486. // Insert the new entries.
  3487. //
  3488. for (Index1 = 0; Index1 < RunCount; Index1 += 1) {
  3489. //
  3490. // If not sorted then do the generic insert. The gain for the sorted case
  3491. // that we won't have to do a memory copy for entries we just inserted.
  3492. //
  3493. if (LcnSorted != NULL) {
  3494. (VOID) NtfsAddCachedRun( IrpContext,
  3495. Vcb,
  3496. StartingLcn + RunArray[ LcnSorted[ Index1 ]].StartingIndex,
  3497. (LONGLONG)RunArray[ LcnSorted[ Index1 ]].NumberOfBits,
  3498. RunStateFree );
  3499. } else {
  3500. (VOID) NtfsAddCachedRun( IrpContext,
  3501. Vcb,
  3502. StartingLcn + RunArray[ Index1 ].StartingIndex,
  3503. (LONGLONG)RunArray[ Index1 ].NumberOfBits,
  3504. RunStateFree );
  3505. }
  3506. }
  3507. if (LcnSorted != NULL) {
  3508. NtfsFreePool( LcnSorted );
  3509. }
  3510. DebugTrace( -1, Dbg, ("NtfsAddCachedRunMult -> VOID\n") );
  3511. return;
  3512. }
  3513. //
  3514. // Local support routine
  3515. //
  3516. VOID
  3517. NtfsReadAheadCachedBitmap (
  3518. IN PIRP_CONTEXT IrpContext,
  3519. IN PVCB Vcb,
  3520. IN LCN StartingLcn
  3521. )
  3522. /*++
  3523. Routine Description:
  3524. This routine does a read ahead of the bitmap into the cached bitmap
  3525. starting at the specified starting lcn.
  3526. Arguments:
  3527. Vcb - Supplies the vcb to use in this operation
  3528. StartingLcn - Supplies the starting lcn to use in this read ahead
  3529. operation.
  3530. Return Value:
  3531. None.
  3532. --*/
  3533. {
  3534. RTL_BITMAP Bitmap;
  3535. PBCB BitmapBcb;
  3536. BOOLEAN StuffAdded;
  3537. LCN BaseLcn;
  3538. ULONG Index;
  3539. LONGLONG Size;
  3540. RTL_BITMAP_RUN RunArray[16];
  3541. ULONG RunArrayIndex;
  3542. ASSERT_IRP_CONTEXT( IrpContext );
  3543. ASSERT_VCB( Vcb );
  3544. PAGED_CODE();
  3545. DebugTrace( +1, Dbg, ("NtfsReadAheadCachedBitmap\n") );
  3546. DebugTrace( 0, Dbg, ("StartingLcn = %016I64x\n", StartingLcn) );
  3547. BitmapBcb = NULL;
  3548. StuffAdded = FALSE;
  3549. try {
  3550. //
  3551. // Check if the lcn index is already in the cached runs info and if it is then
  3552. // our read ahead is done.
  3553. //
  3554. if (NtfsLookupCachedLcn( &Vcb->CachedRuns,
  3555. StartingLcn,
  3556. &BaseLcn,
  3557. &BaseLcn,
  3558. NULL )) {
  3559. try_return( NOTHING );
  3560. }
  3561. //
  3562. // Map in the page containing the starting lcn and compute the bit index for the
  3563. // starting lcn within the bitmap. And bias the bitmap with recently deallocated
  3564. // clusters.
  3565. //
  3566. NtfsMapPageInBitmap( IrpContext, Vcb, StartingLcn, &BaseLcn, &Bitmap, &BitmapBcb );
  3567. StuffAdded = NtfsAddRecentlyDeallocated( Vcb, BaseLcn, &Bitmap );
  3568. Index = (ULONG)(StartingLcn - BaseLcn);
  3569. //
  3570. // Now if the index is clear then we can build up the hint at the starting index, we
  3571. // scan through the bitmap checking the size of the run and then adding the free run
  3572. // to the cached free space mcb
  3573. //
  3574. if (RtlCheckBit( &Bitmap, Index ) == 0) {
  3575. Size = RtlFindNextForwardRunClear( &Bitmap, Index, &Index );
  3576. (VOID) NtfsAddCachedRun( IrpContext, Vcb, StartingLcn, (LONGLONG)Size, RunStateFree );
  3577. }
  3578. //
  3579. // While we have the bitmap loaded we will scan it for a few longest runs
  3580. //
  3581. RunArrayIndex = RtlFindClearRuns( &Bitmap, RunArray, 16, TRUE );
  3582. if (RunArrayIndex > 0) {
  3583. NtfsAddCachedRunMult( IrpContext,
  3584. Vcb,
  3585. BaseLcn,
  3586. RunArray,
  3587. RunArrayIndex );
  3588. }
  3589. try_exit: NOTHING;
  3590. } finally {
  3591. DebugUnwind( NtfsReadAheadCachedBitmap );
  3592. if (StuffAdded) { NtfsFreePool( Bitmap.Buffer ); }
  3593. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  3594. }
  3595. DebugTrace( -1, Dbg, ("NtfsReadAheadCachedBitmap -> VOID\n") );
  3596. return;
  3597. }
  3598. //
  3599. // Local support routine
  3600. //
  3601. BOOLEAN
  3602. NtfsGetNextHoleToFill (
  3603. IN PIRP_CONTEXT IrpContext,
  3604. IN PNTFS_MCB Mcb,
  3605. IN VCN StartingVcn,
  3606. IN VCN EndingVcn,
  3607. OUT PVCN VcnToFill,
  3608. OUT PLONGLONG ClusterCountToFill,
  3609. OUT PLCN PrecedingLcn
  3610. )
  3611. /*++
  3612. Routine Description:
  3613. This routine takes a specified range within an mcb and returns the to
  3614. caller the first run that is not allocated to any lcn within the range
  3615. Arguments:
  3616. Mcb - Supplies the mcb to use in this operation
  3617. StartingVcn - Supplies the starting vcn to search from
  3618. EndingVcn - Supplies the ending vcn to search to
  3619. VcnToFill - Receives the first Vcn within the range that is unallocated
  3620. ClusterCountToFill - Receives the size of the free run
  3621. PrecedingLcn - Receives the Lcn of the allocated cluster preceding the
  3622. free run. If the free run starts at Vcn 0 then the preceding lcn
  3623. is -1.
  3624. Return Value:
  3625. BOOLEAN - TRUE if there is another hole to fill and FALSE otherwise
  3626. --*/
  3627. {
  3628. BOOLEAN Result;
  3629. BOOLEAN McbHit;
  3630. LCN Lcn;
  3631. LONGLONG MaximumRunSize;
  3632. LONGLONG LlTemp1;
  3633. ASSERT_IRP_CONTEXT( IrpContext );
  3634. PAGED_CODE();
  3635. DebugTrace( +1, Dbg, ("NtfsGetNextHoleToFill\n") );
  3636. DebugTrace( 0, Dbg, ("StartingVcn = %016I64x\n", StartingVcn) );
  3637. DebugTrace( 0, Dbg, ("EndingVcn = %016I64x\n", EndingVcn) );
  3638. //
  3639. // We'll first assume that there is not a hole to fill unless
  3640. // the following loop finds one to fill
  3641. //
  3642. Result = FALSE;
  3643. for (*VcnToFill = StartingVcn;
  3644. *VcnToFill <= EndingVcn;
  3645. *VcnToFill += *ClusterCountToFill) {
  3646. //
  3647. // Check if the hole is already filled and it so then do nothing but loop back up
  3648. // to the top of our loop and try again
  3649. //
  3650. if ((McbHit = NtfsLookupNtfsMcbEntry( Mcb, *VcnToFill, &Lcn, ClusterCountToFill, NULL, NULL, NULL, NULL )) &&
  3651. (Lcn != UNUSED_LCN)) {
  3652. NOTHING;
  3653. } else {
  3654. //
  3655. // We have a hole to fill so now compute the maximum size hole that
  3656. // we are allowed to fill and then check if we got an miss on the lookup
  3657. // and need to set cluster count or if the size we got back is too large
  3658. //
  3659. MaximumRunSize = (EndingVcn - *VcnToFill) + 1;
  3660. if (!McbHit || (*ClusterCountToFill > MaximumRunSize)) {
  3661. *ClusterCountToFill = MaximumRunSize;
  3662. }
  3663. //
  3664. // Now set the preceding lcn to either -1 if there isn't a preceding vcn or
  3665. // set it to the lcn of the preceding vcn
  3666. //
  3667. if (*VcnToFill == 0) {
  3668. *PrecedingLcn = UNUSED_LCN;
  3669. } else {
  3670. LlTemp1 = *VcnToFill - 1;
  3671. if (!NtfsLookupNtfsMcbEntry( Mcb, LlTemp1, PrecedingLcn, NULL, NULL, NULL, NULL, NULL )) {
  3672. *PrecedingLcn = UNUSED_LCN;
  3673. }
  3674. }
  3675. //
  3676. // We found a hole so set our result to TRUE and break out of the loop
  3677. //
  3678. Result = TRUE;
  3679. break;
  3680. }
  3681. }
  3682. DebugTrace( 0, Dbg, ("VcnToFill <- %016I64x\n", *VcnToFill) );
  3683. DebugTrace( 0, Dbg, ("ClusterCountToFill <- %016I64x\n", *ClusterCountToFill) );
  3684. DebugTrace( 0, Dbg, ("PrecedingLcn <- %016I64x\n", *PrecedingLcn) );
  3685. DebugTrace( -1, Dbg, ("NtfsGetNextHoleToFill -> %08lx\n", Result) );
  3686. return Result;
  3687. }
  3688. //
  3689. // Local support routine
  3690. //
  3691. LONGLONG
  3692. NtfsScanMcbForRealClusterCount (
  3693. IN PIRP_CONTEXT IrpContext,
  3694. IN PNTFS_MCB Mcb,
  3695. IN VCN StartingVcn,
  3696. IN VCN EndingVcn
  3697. )
  3698. /*++
  3699. Routine Description:
  3700. This routine scans the input mcb within the specified range and returns
  3701. to the caller the exact number of clusters that a really free (i.e.,
  3702. not mapped to any Lcn) within the range.
  3703. Arguments:
  3704. Mcb - Supplies the Mcb used in this operation
  3705. StartingVcn - Supplies the starting vcn to search from
  3706. EndingVcn - Supplies the ending vcn to search to
  3707. Return Value:
  3708. LONGLONG - Returns the number of unassigned clusters from
  3709. StartingVcn to EndingVcn inclusive within the mcb.
  3710. --*/
  3711. {
  3712. LONGLONG FreeCount;
  3713. VCN Vcn;
  3714. LCN Lcn;
  3715. LONGLONG RunSize;
  3716. ASSERT_IRP_CONTEXT( IrpContext );
  3717. PAGED_CODE();
  3718. DebugTrace( +1, Dbg, ("NtfsScanMcbForRealClusterCount\n") );
  3719. DebugTrace( 0, Dbg, ("StartingVcn = %016I64x\n", StartingVcn) );
  3720. DebugTrace( 0, Dbg, ("EndingVcn = %016I64x\n", EndingVcn) );
  3721. //
  3722. // First compute free count as if the entire run is already unallocated
  3723. // and the in the following loop we march through the mcb looking for
  3724. // actual allocation and decrementing the free count appropriately
  3725. //
  3726. FreeCount = (EndingVcn - StartingVcn) + 1;
  3727. for (Vcn = StartingVcn; Vcn <= EndingVcn; Vcn = Vcn + RunSize) {
  3728. //
  3729. // Lookup the mcb entry and if we get back false then we're overrun
  3730. // the mcb and therefore nothing else above it can be allocated.
  3731. //
  3732. if (!NtfsLookupNtfsMcbEntry( Mcb, Vcn, &Lcn, &RunSize, NULL, NULL, NULL, NULL )) {
  3733. break;
  3734. }
  3735. //
  3736. // If the lcn we got back is not -1 then this run is actually already
  3737. // allocated, so first check if the run size puts us over the ending
  3738. // vcn and adjust as necessary and then decrement the free count
  3739. // by the run size
  3740. //
  3741. if (Lcn != UNUSED_LCN) {
  3742. if (RunSize > ((EndingVcn - Vcn) + 1)) {
  3743. RunSize = (EndingVcn - Vcn) + 1;
  3744. }
  3745. FreeCount = FreeCount - RunSize;
  3746. }
  3747. }
  3748. DebugTrace( -1, Dbg, ("NtfsScanMcbForRealClusterCount -> %016I64x\n", FreeCount) );
  3749. return FreeCount;
  3750. }
  3751. //
  3752. // Local support routine, only defined with ntfs debug version
  3753. //
  3754. #ifdef NTFSDBG
  3755. ULONG
  3756. NtfsDumpCachedMcbInformation (
  3757. IN PVCB Vcb
  3758. )
  3759. /*++
  3760. Routine Description:
  3761. This routine dumps out the cached bitmap information
  3762. Arguments:
  3763. Vcb - Supplies the Vcb used by this operation
  3764. Return Value:
  3765. ULONG - 1.
  3766. --*/
  3767. {
  3768. DbgPrint("Dump BitMpSup Information, Vcb@ %08lx\n", Vcb);
  3769. DbgPrint("TotalCluster: %016I64x\n", Vcb->TotalClusters);
  3770. DbgPrint("FreeClusters: %016I64x\n", Vcb->FreeClusters);
  3771. return 1;
  3772. }
  3773. #endif // NTFSDBG
  3774. //
  3775. // The rest of this module implements the record allocation routines
  3776. //
  3777. VOID
  3778. NtfsInitializeRecordAllocation (
  3779. IN PIRP_CONTEXT IrpContext,
  3780. IN PSCB DataScb,
  3781. IN PATTRIBUTE_ENUMERATION_CONTEXT BitmapAttribute,
  3782. IN ULONG BytesPerRecord,
  3783. IN ULONG ExtendGranularity,
  3784. IN ULONG TruncateGranularity,
  3785. IN OUT PRECORD_ALLOCATION_CONTEXT RecordAllocationContext
  3786. )
  3787. /*++
  3788. Routine Description:
  3789. This routine initializes the record allocation context used for
  3790. allocating and deallocating fixed sized records from a data stream.
  3791. Note that the bitmap attribute size must always be at least a multiple
  3792. of 32 bits. However the data scb does not need to contain that many
  3793. records. If in the course of allocating a new record we discover that
  3794. the data scb is too small we will then add allocation to the data scb.
  3795. Arguments:
  3796. DataScb - Supplies the Scb representing the data stream that is being
  3797. divided into fixed sized records with each bit in the bitmap corresponding
  3798. to one record in the data stream
  3799. BitmapAttribute - Supplies the enumeration context for the bitmap
  3800. attribute. The attribute can either be resident or nonresident
  3801. and this routine will handle both cases properly.
  3802. BytesPerRecord - Supplies the size of the homogenous records that
  3803. that the data stream is being divided into.
  3804. ExtendGranularity - Supplies the number of records (i.e., allocation units
  3805. to extend the data scb by each time).
  3806. TruncateGranularity - Supplies the number of records to use when truncating
  3807. the data scb. That is if the end of the data stream contains the
  3808. specified number of free records then we truncate.
  3809. RecordAllocationContext - Supplies the memory for an context record that is
  3810. utilized by this record allocation routines.
  3811. Return Value:
  3812. None.
  3813. --*/
  3814. {
  3815. PATTRIBUTE_RECORD_HEADER AttributeRecordHeader;
  3816. RTL_BITMAP Bitmap;
  3817. ULONG ClearLength;
  3818. ULONG ClearIndex;
  3819. ASSERT_IRP_CONTEXT( IrpContext );
  3820. ASSERT_SCB( DataScb );
  3821. PAGED_CODE();
  3822. DebugTrace( +1, Dbg, ("NtfsInitializeRecordAllocation\n") );
  3823. ASSERT( BytesPerRecord * ExtendGranularity >= DataScb->Vcb->BytesPerCluster );
  3824. ASSERT( BytesPerRecord * TruncateGranularity >= DataScb->Vcb->BytesPerCluster );
  3825. //
  3826. // First zero out the context record except for the data scb.
  3827. //
  3828. RtlZeroMemory( &RecordAllocationContext->BitmapScb,
  3829. sizeof(RECORD_ALLOCATION_CONTEXT) -
  3830. FIELD_OFFSET( RECORD_ALLOCATION_CONTEXT, BitmapScb ));
  3831. //
  3832. // And then set the fields in the context record that do not depend on
  3833. // whether the bitmap attribute is resident or not
  3834. //
  3835. RecordAllocationContext->DataScb = DataScb;
  3836. RecordAllocationContext->BytesPerRecord = BytesPerRecord;
  3837. RecordAllocationContext->ExtendGranularity = ExtendGranularity;
  3838. RecordAllocationContext->TruncateGranularity = TruncateGranularity;
  3839. //
  3840. // Set up our hint fields.
  3841. //
  3842. RecordAllocationContext->LowestDeallocatedIndex = MAXULONG;
  3843. if (DataScb == DataScb->Vcb->MftScb) {
  3844. RecordAllocationContext->StartingHint = FIRST_USER_FILE_NUMBER;
  3845. } else {
  3846. RecordAllocationContext->StartingHint = 0;
  3847. }
  3848. //
  3849. // Now get a reference to the bitmap record header and then take two
  3850. // different paths depending if the bitmap attribute is resident or not
  3851. //
  3852. AttributeRecordHeader = NtfsFoundAttribute(BitmapAttribute);
  3853. if (NtfsIsAttributeResident(AttributeRecordHeader)) {
  3854. ASSERTMSG("bitmap must be multiple quadwords", AttributeRecordHeader->Form.Resident.ValueLength % 8 == 0);
  3855. //
  3856. // For a resident bitmap attribute the bitmap scb field is null and we
  3857. // set the bitmap size from the value length. Also we will initialize
  3858. // our local bitmap variable and determine the number of free bits
  3859. // current available.
  3860. //
  3861. //
  3862. RecordAllocationContext->BitmapScb = NULL;
  3863. RecordAllocationContext->CurrentBitmapSize = 8 * AttributeRecordHeader->Form.Resident.ValueLength;
  3864. RtlInitializeBitMap( &Bitmap,
  3865. (PULONG)NtfsAttributeValue( AttributeRecordHeader ),
  3866. RecordAllocationContext->CurrentBitmapSize );
  3867. RecordAllocationContext->NumberOfFreeBits = RtlNumberOfClearBits( &Bitmap );
  3868. ClearLength = RtlFindLastBackwardRunClear( &Bitmap,
  3869. RecordAllocationContext->CurrentBitmapSize - 1,
  3870. &ClearIndex );
  3871. } else {
  3872. UNICODE_STRING BitmapName;
  3873. BOOLEAN ReturnedExistingScb;
  3874. PBCB BitmapBcb;
  3875. PVOID BitmapBuffer;
  3876. ASSERTMSG("bitmap must be multiple quadwords", ((ULONG)AttributeRecordHeader->Form.Nonresident.FileSize) % 8 == 0);
  3877. //
  3878. // For a non resident bitmap attribute we better have been given the
  3879. // record header for the first part and not somthing that has spilled
  3880. // into multiple segment records
  3881. //
  3882. ASSERT( AttributeRecordHeader->Form.Nonresident.LowestVcn == 0 );
  3883. BitmapBcb = NULL;
  3884. try {
  3885. ULONG StartingByte;
  3886. ULONG BitsThisPage;
  3887. ULONG BytesThisPage;
  3888. ULONG RemainingBytes;
  3889. ULONG ThisClearIndex;
  3890. ULONG ThisClearLength;
  3891. //
  3892. // Create the bitmap scb for the bitmap attribute
  3893. //
  3894. BitmapName.MaximumLength =
  3895. BitmapName.Length = AttributeRecordHeader->NameLength * sizeof( WCHAR );
  3896. BitmapName.Buffer = Add2Ptr(AttributeRecordHeader, AttributeRecordHeader->NameOffset);
  3897. RecordAllocationContext->BitmapScb = NtfsCreateScb( IrpContext,
  3898. DataScb->Fcb,
  3899. AttributeRecordHeader->TypeCode,
  3900. &BitmapName,
  3901. FALSE,
  3902. &ReturnedExistingScb );
  3903. //
  3904. // Now determine the bitmap size, for now we'll only take bitmap attributes that are
  3905. // no more than 16 pages large.
  3906. //
  3907. RecordAllocationContext->CurrentBitmapSize = 8 * ((ULONG)AttributeRecordHeader->Form.Nonresident.FileSize);
  3908. //
  3909. // Create the stream file if not present.
  3910. //
  3911. if (RecordAllocationContext->BitmapScb->FileObject == NULL) {
  3912. NtfsCreateInternalAttributeStream( IrpContext,
  3913. RecordAllocationContext->BitmapScb,
  3914. TRUE,
  3915. &NtfsInternalUseFile[INITIALIZERECORDALLOCATION_FILE_NUMBER] );
  3916. }
  3917. //
  3918. // Walk through each page of the bitmap and compute the number of set
  3919. // bits and the last set bit in the bitmap.
  3920. //
  3921. RecordAllocationContext->NumberOfFreeBits = 0;
  3922. RemainingBytes = (ULONG) AttributeRecordHeader->Form.Nonresident.FileSize;
  3923. StartingByte = 0;
  3924. ClearLength = 0;
  3925. while (TRUE) {
  3926. BytesThisPage = RemainingBytes;
  3927. if (RemainingBytes > PAGE_SIZE) {
  3928. BytesThisPage = PAGE_SIZE;
  3929. }
  3930. BitsThisPage = BytesThisPage * 8;
  3931. //
  3932. // Now map the bitmap data, initialize our local bitmap variable and
  3933. // calculate the number of free bits currently available
  3934. //
  3935. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  3936. NtfsMapStream( IrpContext,
  3937. RecordAllocationContext->BitmapScb,
  3938. (LONGLONG)StartingByte,
  3939. BytesThisPage,
  3940. &BitmapBcb,
  3941. &BitmapBuffer );
  3942. RtlInitializeBitMap( &Bitmap,
  3943. BitmapBuffer,
  3944. BitsThisPage );
  3945. RecordAllocationContext->NumberOfFreeBits += RtlNumberOfClearBits( &Bitmap );
  3946. //
  3947. // We are interested in remembering the last set bit in this bitmap.
  3948. // If the bitmap ends with a clear run then the last set bit is
  3949. // immediately prior to this clear run. We need to check each page
  3950. // as we go through the bitmap to see if a clear run ends at the end
  3951. // of the current page.
  3952. //
  3953. ThisClearLength = RtlFindLastBackwardRunClear( &Bitmap,
  3954. BitsThisPage - 1,
  3955. &ThisClearIndex );
  3956. //
  3957. // If there is a run and it ends at the end of the page then
  3958. // either combine with a previous run or remember that this is the
  3959. // start of the run.
  3960. //
  3961. if ((ThisClearLength != 0) &&
  3962. ((ThisClearLength + ThisClearIndex) == BitsThisPage)) {
  3963. //
  3964. // If this is the entire page and the previous page ended
  3965. // with a clear run then just extend that run.
  3966. //
  3967. if ((ThisClearIndex == 0) && (ClearLength != 0)) {
  3968. ClearLength += ThisClearLength;
  3969. //
  3970. // Otherwise this is a new clear run. Bias the starting index
  3971. // by the bit offset of this page.
  3972. //
  3973. } else {
  3974. ClearLength = ThisClearLength;
  3975. ClearIndex = ThisClearIndex + (StartingByte * 8);
  3976. }
  3977. //
  3978. // This page does not end with a clear run.
  3979. //
  3980. } else {
  3981. ClearLength = 0;
  3982. }
  3983. //
  3984. // If we are not at the end of the bitmap then update our
  3985. // counters.
  3986. //
  3987. if (RemainingBytes != BytesThisPage) {
  3988. StartingByte += PAGE_SIZE;
  3989. RemainingBytes -= PAGE_SIZE;
  3990. } else {
  3991. break;
  3992. }
  3993. }
  3994. } finally {
  3995. DebugUnwind( NtfsInitializeRecordAllocation );
  3996. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  3997. }
  3998. }
  3999. //
  4000. // With ClearLength and ClearIndex we can now deduce the last set bit in the
  4001. // bitmap
  4002. //
  4003. if ((ClearLength != 0) && ((ClearLength + ClearIndex) == RecordAllocationContext->CurrentBitmapSize)) {
  4004. RecordAllocationContext->IndexOfLastSetBit = ClearIndex - 1;
  4005. } else {
  4006. RecordAllocationContext->IndexOfLastSetBit = RecordAllocationContext->CurrentBitmapSize - 1;
  4007. }
  4008. DebugTrace( -1, Dbg, ("NtfsInitializeRecordAllocation -> VOID\n") );
  4009. return;
  4010. }
  4011. VOID
  4012. NtfsUninitializeRecordAllocation (
  4013. IN PIRP_CONTEXT IrpContext,
  4014. IN OUT PRECORD_ALLOCATION_CONTEXT RecordAllocationContext
  4015. )
  4016. /*++
  4017. Routine Description:
  4018. This routine is used to uninitialize the record allocation context.
  4019. Arguments:
  4020. RecordAllocationContext - Supplies the record allocation context being
  4021. decommissioned.
  4022. Return Value:
  4023. None.
  4024. --*/
  4025. {
  4026. ASSERT_IRP_CONTEXT( IrpContext );
  4027. PAGED_CODE();
  4028. DebugTrace( +1, Dbg, ("NtfsUninitializeRecordAllocation\n") );
  4029. //
  4030. // And then for safe measure zero out the entire record except for the
  4031. // the data Scb.
  4032. //
  4033. RtlZeroMemory( &RecordAllocationContext->BitmapScb,
  4034. sizeof(RECORD_ALLOCATION_CONTEXT) -
  4035. FIELD_OFFSET( RECORD_ALLOCATION_CONTEXT, BitmapScb ));
  4036. DebugTrace( -1, Dbg, ("NtfsUninitializeRecordAllocation -> VOID\n") );
  4037. return;
  4038. }
  4039. ULONG
  4040. NtfsAllocateRecord (
  4041. IN PIRP_CONTEXT IrpContext,
  4042. IN PRECORD_ALLOCATION_CONTEXT RecordAllocationContext,
  4043. IN PATTRIBUTE_ENUMERATION_CONTEXT BitmapAttribute
  4044. )
  4045. /*++
  4046. Routine Description:
  4047. This routine is used to allocate a new record for the specified record
  4048. allocation context.
  4049. It will return the index of a free record in the data scb as denoted by
  4050. the bitmap attribute. If necessary this routine will extend the bitmap
  4051. attribute size (including spilling over to the nonresident case), and
  4052. extend the data scb size.
  4053. On return the record is zeroed.
  4054. Arguments:
  4055. RecordAllocationContext - Supplies the record allocation context used
  4056. in this operation
  4057. BitmapAttribute - Supplies the enumeration context for the bitmap
  4058. attribute. This parameter is ignored if the bitmap attribute is
  4059. non resident, in which case we create an scb for the attribute and
  4060. store a pointer to it in the record allocation context.
  4061. Return Value:
  4062. ULONG - Returns the index of the record just allocated, zero based.
  4063. --*/
  4064. {
  4065. PSCB DataScb;
  4066. LONGLONG DataOffset;
  4067. LONGLONG ClusterCount;
  4068. ULONG BytesPerRecord;
  4069. ULONG ExtendGranularity;
  4070. ULONG TruncateGranularity;
  4071. PULONG CurrentBitmapSize;
  4072. PULONG NumberOfFreeBits;
  4073. PSCB BitmapScb;
  4074. PBCB BitmapBcb;
  4075. RTL_BITMAP Bitmap;
  4076. PUCHAR BitmapBuffer;
  4077. ULONG BitmapOffset;
  4078. ULONG BitmapIndex;
  4079. ULONG BitmapSizeInBytes;
  4080. ULONG BitmapCurrentOffset = 0;
  4081. ULONG BitmapSizeInPages;
  4082. BOOLEAN StuffAdded = FALSE;
  4083. BOOLEAN Rescan;
  4084. ULONG Hint;
  4085. PVCB Vcb;
  4086. ASSERT_IRP_CONTEXT( IrpContext );
  4087. PAGED_CODE();
  4088. DebugTrace( +1, Dbg, ("NtfsAllocateRecord\n") );
  4089. //
  4090. // Synchronize by acquiring the data scb exclusive, as an "end resource".
  4091. // Then use try-finally to insure we free it up.
  4092. //
  4093. DataScb = RecordAllocationContext->DataScb;
  4094. NtfsAcquireExclusiveScb( IrpContext, DataScb );
  4095. try {
  4096. //
  4097. // Remember some values for convenience.
  4098. //
  4099. BytesPerRecord = RecordAllocationContext->BytesPerRecord;
  4100. ExtendGranularity = RecordAllocationContext->ExtendGranularity;
  4101. TruncateGranularity = RecordAllocationContext->TruncateGranularity;
  4102. Vcb = DataScb->Vcb;
  4103. //
  4104. // See if someone made the bitmap nonresident, and we still think
  4105. // it is resident. If so, we must uninitialize and insure reinitialization
  4106. // below.
  4107. //
  4108. if ((RecordAllocationContext->BitmapScb == NULL) &&
  4109. !NtfsIsAttributeResident( NtfsFoundAttribute( BitmapAttribute ))) {
  4110. NtfsUninitializeRecordAllocation( IrpContext,
  4111. RecordAllocationContext );
  4112. RecordAllocationContext->CurrentBitmapSize = MAXULONG;
  4113. }
  4114. //
  4115. // Reinitialize the record context structure if necessary.
  4116. //
  4117. if (RecordAllocationContext->CurrentBitmapSize == MAXULONG) {
  4118. NtfsInitializeRecordAllocation( IrpContext,
  4119. DataScb,
  4120. BitmapAttribute,
  4121. BytesPerRecord,
  4122. ExtendGranularity,
  4123. TruncateGranularity,
  4124. RecordAllocationContext );
  4125. }
  4126. BitmapScb = RecordAllocationContext->BitmapScb;
  4127. CurrentBitmapSize = &RecordAllocationContext->CurrentBitmapSize;
  4128. NumberOfFreeBits = &RecordAllocationContext->NumberOfFreeBits;
  4129. BitmapSizeInBytes = *CurrentBitmapSize / 8;
  4130. Hint = RecordAllocationContext->StartingHint;
  4131. //
  4132. // We will do different operations based on whether the bitmap is resident or nonresident
  4133. // The first case we will handle is the resident bitmap.
  4134. //
  4135. if (BitmapScb == NULL) {
  4136. BOOLEAN SizeExtended = FALSE;
  4137. UCHAR NewByte;
  4138. //
  4139. // Now now initialize the local bitmap variable and hunt for that free bit
  4140. //
  4141. BitmapBuffer = (PUCHAR) NtfsAttributeValue( NtfsFoundAttribute( BitmapAttribute ));
  4142. RtlInitializeBitMap( &Bitmap,
  4143. (PULONG)BitmapBuffer,
  4144. *CurrentBitmapSize );
  4145. StuffAdded = NtfsAddDeallocatedRecords( Vcb, DataScb, 0, &Bitmap );
  4146. BitmapIndex = RtlFindClearBits( &Bitmap, 1, Hint );
  4147. //
  4148. // Check if we have found a free record that can be allocated, If not then extend
  4149. // the size of the bitmap by 64 bits, and set the index to the bit first bit
  4150. // of the extension we just added
  4151. //
  4152. if (BitmapIndex == 0xffffffff) {
  4153. union {
  4154. QUAD Quad;
  4155. UCHAR Uchar[ sizeof(QUAD) ];
  4156. } ZeroQuadWord;
  4157. *(PLARGE_INTEGER)&(ZeroQuadWord.Uchar)[0] = Li0;
  4158. NtfsChangeAttributeValue( IrpContext,
  4159. DataScb->Fcb,
  4160. BitmapSizeInBytes,
  4161. &(ZeroQuadWord.Uchar)[0],
  4162. sizeof( QUAD ),
  4163. TRUE,
  4164. TRUE,
  4165. FALSE,
  4166. TRUE,
  4167. BitmapAttribute );
  4168. BitmapIndex = *CurrentBitmapSize;
  4169. *CurrentBitmapSize += BITMAP_EXTEND_GRANULARITY;
  4170. *NumberOfFreeBits += BITMAP_EXTEND_GRANULARITY;
  4171. BitmapSizeInBytes += (BITMAP_EXTEND_GRANULARITY / 8);
  4172. SizeExtended = TRUE;
  4173. //
  4174. // We now know that the byte value we should start with is 0
  4175. // We cannot safely access the bitmap attribute any more because
  4176. // it may have moved.
  4177. //
  4178. NewByte = 0;
  4179. } else {
  4180. //
  4181. // Capture the current value of the byte for the index if we
  4182. // are not extending. Notice that we always take this from the
  4183. // unbiased original bitmap.
  4184. //
  4185. NewByte = BitmapBuffer[ BitmapIndex / 8 ];
  4186. }
  4187. //
  4188. // Check if we made the Bitmap go non-resident and if so then
  4189. // we will reinitialize the record allocation context and fall through
  4190. // to the non-resident case
  4191. //
  4192. if (SizeExtended && !NtfsIsAttributeResident( NtfsFoundAttribute( BitmapAttribute ))) {
  4193. NtfsUninitializeRecordAllocation( IrpContext,
  4194. RecordAllocationContext );
  4195. NtfsInitializeRecordAllocation( IrpContext,
  4196. DataScb,
  4197. BitmapAttribute,
  4198. BytesPerRecord,
  4199. ExtendGranularity,
  4200. TruncateGranularity,
  4201. RecordAllocationContext );
  4202. BitmapScb = RecordAllocationContext->BitmapScb;
  4203. ASSERT( BitmapScb != NULL );
  4204. //
  4205. // Snapshot the bitmap in case we modify it later on - we automatically
  4206. // snapped the data scb when we acquired it above
  4207. //
  4208. NtfsSnapshotScb( IrpContext, BitmapScb );
  4209. } else {
  4210. //
  4211. // Index is now the free bit so set the bit in the bitmap and also change
  4212. // the byte containing the bit in the attribute. Be careful to set the
  4213. // bit in the byte from the *original* bitmap, and not the one we merged
  4214. // the recently-deallocated bits with.
  4215. //
  4216. ASSERT( !FlagOn( NewByte, BitMask[BitmapIndex % 8]) );
  4217. SetFlag( NewByte, BitMask[BitmapIndex % 8] );
  4218. NtfsChangeAttributeValue( IrpContext,
  4219. DataScb->Fcb,
  4220. BitmapIndex / 8,
  4221. &NewByte,
  4222. 1,
  4223. FALSE,
  4224. FALSE,
  4225. FALSE,
  4226. FALSE,
  4227. BitmapAttribute );
  4228. }
  4229. } else {
  4230. //
  4231. // Snapshot the bitmap in case we modify it later on - we automatically
  4232. // snapped the data scb when we acquired it above
  4233. //
  4234. NtfsSnapshotScb( IrpContext, BitmapScb );
  4235. }
  4236. //
  4237. // Use a loop here to handle the extreme case where extending the allocation
  4238. // of the volume bitmap causes us to renter this routine recursively.
  4239. // In that case the top level guy will fail expecting the first bit to
  4240. // be available in the added clusters. Instead we will return to the
  4241. // top of this loop after extending the bitmap and just do our normal
  4242. // scan.
  4243. //
  4244. while (BitmapScb != NULL) {
  4245. ULONG SizeToPin;
  4246. ULONG HoleIndex;
  4247. BitmapBcb = NULL;
  4248. Rescan = FALSE;
  4249. HoleIndex = 0;
  4250. try {
  4251. if (!FlagOn( BitmapScb->ScbState, SCB_STATE_HEADER_INITIALIZED )) {
  4252. NtfsUpdateScbFromAttribute( IrpContext, BitmapScb, NULL );
  4253. }
  4254. //
  4255. // Snapshot the Scb values in case we change any of them.
  4256. //
  4257. NtfsSnapshotScb( IrpContext, BitmapScb );
  4258. //
  4259. // Create the stream file if not present.
  4260. //
  4261. if (BitmapScb->FileObject == NULL) {
  4262. NtfsCreateInternalAttributeStream( IrpContext,
  4263. BitmapScb,
  4264. FALSE,
  4265. &NtfsInternalUseFile[DEALLOCATERECORD_FILE_NUMBER] );
  4266. }
  4267. //
  4268. // Remember the starting offset for the page containing the hint.
  4269. //
  4270. BitmapCurrentOffset = (Hint / 8) & ~(PAGE_SIZE - 1);
  4271. Hint &= (BITS_PER_PAGE - 1);
  4272. BitmapSizeInPages = (ULONG) ROUND_TO_PAGES( BitmapSizeInBytes );
  4273. //
  4274. // Loop for the size of the bitmap plus one page, so that we will
  4275. // retry the initial page once starting from a hint offset of 0.
  4276. //
  4277. for (BitmapOffset = 0;
  4278. BitmapOffset <= BitmapSizeInPages;
  4279. BitmapOffset += PAGE_SIZE, BitmapCurrentOffset += PAGE_SIZE) {
  4280. ULONG LocalHint;
  4281. //
  4282. // If our current position is past the end of the bitmap
  4283. // then go to the beginning of the bitmap.
  4284. //
  4285. if (BitmapCurrentOffset >= BitmapSizeInBytes) {
  4286. BitmapCurrentOffset = 0;
  4287. }
  4288. //
  4289. // If this is the Mft and there are more than the system
  4290. // files in the first cluster of the Mft then move past
  4291. // the first cluster.
  4292. //
  4293. if ((BitmapCurrentOffset == 0) &&
  4294. (DataScb == Vcb->MftScb) &&
  4295. (Vcb->FileRecordsPerCluster > FIRST_USER_FILE_NUMBER) &&
  4296. (Hint < Vcb->FileRecordsPerCluster)) {
  4297. Hint = Vcb->FileRecordsPerCluster;
  4298. }
  4299. //
  4300. // Calculate the size to read from this point to the end of
  4301. // bitmap, or a page, whichever is less.
  4302. //
  4303. SizeToPin = BitmapSizeInBytes - BitmapCurrentOffset;
  4304. if (SizeToPin > PAGE_SIZE) { SizeToPin = PAGE_SIZE; }
  4305. //
  4306. // Unpin any Bcb from a previous loop.
  4307. //
  4308. if (StuffAdded) { NtfsFreePool( Bitmap.Buffer ); StuffAdded = FALSE; }
  4309. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  4310. //
  4311. // Read the desired bitmap page.
  4312. //
  4313. NtfsPinStream( IrpContext,
  4314. BitmapScb,
  4315. (LONGLONG)BitmapCurrentOffset,
  4316. SizeToPin,
  4317. &BitmapBcb,
  4318. &BitmapBuffer );
  4319. //
  4320. // Initialize the bitmap and search for a free bit.
  4321. //
  4322. RtlInitializeBitMap( &Bitmap, (PULONG) BitmapBuffer, SizeToPin * 8 );
  4323. StuffAdded = NtfsAddDeallocatedRecords( Vcb,
  4324. DataScb,
  4325. BitmapCurrentOffset * 8,
  4326. &Bitmap );
  4327. //
  4328. // We make a loop here to test whether the index found is
  4329. // within an Mft hole. We will always use a hole last.
  4330. //
  4331. LocalHint = Hint;
  4332. while (TRUE) {
  4333. BitmapIndex = RtlFindClearBits( &Bitmap, 1, LocalHint );
  4334. //
  4335. // If this is the Mft Scb then check if this is a hole.
  4336. //
  4337. if ((BitmapIndex != 0xffffffff) &&
  4338. (DataScb == Vcb->MftScb)) {
  4339. ULONG ThisIndex;
  4340. ULONG HoleCount;
  4341. ThisIndex = BitmapIndex + (BitmapCurrentOffset * 8);
  4342. if (NtfsIsMftIndexInHole( IrpContext,
  4343. Vcb,
  4344. ThisIndex,
  4345. &HoleCount )) {
  4346. //
  4347. // There is a hole. Save this index if we haven't
  4348. // already saved one. If we can't find an index
  4349. // not part of a hole we will use this instead of
  4350. // extending the file.
  4351. //
  4352. if (HoleIndex == 0) {
  4353. HoleIndex = ThisIndex;
  4354. }
  4355. //
  4356. // Now update the hint and try this page again
  4357. // unless the reaches to the end of the page.
  4358. //
  4359. if (BitmapIndex + HoleCount < SizeToPin * 8) {
  4360. //
  4361. // Bias the bitmap with these Mft holes
  4362. // so the bitmap package doesn't see
  4363. // them if it rescans from the
  4364. // start of the page.
  4365. //
  4366. if (!StuffAdded) {
  4367. PVOID NewBuffer;
  4368. NewBuffer = NtfsAllocatePool(PagedPool, SizeToPin );
  4369. RtlCopyMemory( NewBuffer, Bitmap.Buffer, SizeToPin );
  4370. Bitmap.Buffer = NewBuffer;
  4371. StuffAdded = TRUE;
  4372. }
  4373. RtlSetBits( &Bitmap,
  4374. BitmapIndex,
  4375. HoleCount );
  4376. LocalHint = BitmapIndex + HoleCount;
  4377. continue;
  4378. }
  4379. //
  4380. // Store a -1 in Index to show we don't have
  4381. // anything yet.
  4382. //
  4383. BitmapIndex = 0xffffffff;
  4384. }
  4385. }
  4386. break;
  4387. }
  4388. //
  4389. // If we found something, then leave the loop.
  4390. //
  4391. if (BitmapIndex != 0xffffffff) {
  4392. break;
  4393. }
  4394. //
  4395. // If we get here, we could not find anything in the page of
  4396. // the hint, so clear out the page offset from the hint.
  4397. //
  4398. Hint = 0;
  4399. }
  4400. //
  4401. // Now check if we have located a record that can be allocated, If not then extend
  4402. // the size of the bitmap by 64 bits.
  4403. //
  4404. if (BitmapIndex == 0xffffffff) {
  4405. //
  4406. // Cleanup from previous loop.
  4407. //
  4408. if (StuffAdded) { NtfsFreePool( Bitmap.Buffer ); StuffAdded = FALSE; }
  4409. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  4410. //
  4411. // If we have a hole index it means that we found a free record but
  4412. // it exists in a hole. Let's go back to this page and set up
  4413. // to fill in the hole. We will do an unsafe test of the
  4414. // defrag permitted flag. This is OK here because once set it
  4415. // will only go to the non-set state in order to halt
  4416. // future defragging.
  4417. //
  4418. if ((HoleIndex != 0) && FlagOn( Vcb->MftDefragState, VCB_MFT_DEFRAG_PERMITTED )) {
  4419. //
  4420. // Start by filling this hole.
  4421. //
  4422. NtfsCheckRecordStackUsage( IrpContext );
  4423. NtfsFillMftHole( IrpContext, Vcb, HoleIndex );
  4424. //
  4425. // Since filling the Mft hole may cause us to allocate
  4426. // a bit we will go back to the start of the routine
  4427. // and scan starting from the hole we just filled in.
  4428. //
  4429. Hint = HoleIndex;
  4430. Rescan = TRUE;
  4431. try_return( NOTHING );
  4432. } else {
  4433. //
  4434. // Allocate the first bit past the end of the bitmap.
  4435. //
  4436. BitmapIndex = *CurrentBitmapSize & (BITS_PER_PAGE - 1);
  4437. //
  4438. // Now advance the sizes and calculate the size in bytes to
  4439. // read.
  4440. //
  4441. *CurrentBitmapSize += BITMAP_EXTEND_GRANULARITY;
  4442. *NumberOfFreeBits += BITMAP_EXTEND_GRANULARITY;
  4443. //
  4444. // Calculate the size to read from this point to the end of
  4445. // bitmap.
  4446. //
  4447. BitmapSizeInBytes += BITMAP_EXTEND_GRANULARITY / 8;
  4448. BitmapCurrentOffset = BitmapScb->Header.FileSize.LowPart & ~(PAGE_SIZE - 1);
  4449. SizeToPin = BitmapSizeInBytes - BitmapCurrentOffset;
  4450. //
  4451. // Check for allocation first.
  4452. //
  4453. if (BitmapScb->Header.AllocationSize.LowPart < BitmapSizeInBytes) {
  4454. //
  4455. // Calculate number of clusters to next page boundary, and allocate
  4456. // that much.
  4457. //
  4458. ClusterCount = ((BitmapSizeInBytes + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1));
  4459. ClusterCount = LlClustersFromBytes( Vcb,
  4460. ((ULONG) ClusterCount - BitmapScb->Header.AllocationSize.LowPart) );
  4461. NtfsCheckRecordStackUsage( IrpContext );
  4462. NtfsAddAllocation( IrpContext,
  4463. BitmapScb->FileObject,
  4464. BitmapScb,
  4465. LlClustersFromBytes( Vcb,
  4466. BitmapScb->Header.AllocationSize.QuadPart ),
  4467. ClusterCount,
  4468. FALSE,
  4469. NULL );
  4470. }
  4471. //
  4472. // Tell the cache manager about the new file size.
  4473. //
  4474. BitmapScb->Header.FileSize.QuadPart = BitmapSizeInBytes;
  4475. CcSetFileSizes( BitmapScb->FileObject,
  4476. (PCC_FILE_SIZES)&BitmapScb->Header.AllocationSize );
  4477. if (StuffAdded) { NtfsFreePool( Bitmap.Buffer ); StuffAdded = FALSE; }
  4478. //
  4479. // Read the desired bitmap page.
  4480. //
  4481. NtfsPinStream( IrpContext,
  4482. BitmapScb,
  4483. (LONGLONG) BitmapCurrentOffset,
  4484. SizeToPin,
  4485. &BitmapBcb,
  4486. &BitmapBuffer );
  4487. //
  4488. // If we have just moved to the next page of the bitmap then
  4489. // set this page dirty so it doesn't leave memory while we
  4490. // twiddle valid data length. Otherwise it will be reread after
  4491. // we advance valid data and we will get garbage data from the
  4492. // disk.
  4493. //
  4494. if (FlagOn( BitmapSizeInBytes, PAGE_SIZE - 1 ) <= BITMAP_EXTEND_GRANULARITY / 8) {
  4495. *((volatile ULONG *) BitmapBuffer) = *((PULONG) BitmapBuffer);
  4496. CcSetDirtyPinnedData( BitmapBcb, NULL );
  4497. }
  4498. //
  4499. // Initialize the bitmap.
  4500. //
  4501. RtlInitializeBitMap( &Bitmap, (PULONG) BitmapBuffer, SizeToPin * 8 );
  4502. //
  4503. // Now look up a free bit in this page. We don't trust
  4504. // the index we already had since growing the MftBitmap
  4505. // allocation may have caused another bit in the bitmap
  4506. // to be set.
  4507. //
  4508. BitmapIndex = RtlFindClearBits( &Bitmap, 1, BitmapIndex );
  4509. //
  4510. // Update the ValidDataLength, now that we have read (and possibly
  4511. // zeroed) the page.
  4512. //
  4513. BitmapScb->Header.ValidDataLength.QuadPart = BitmapSizeInBytes;
  4514. NtfsWriteFileSizes( IrpContext,
  4515. BitmapScb,
  4516. &BitmapScb->Header.ValidDataLength.QuadPart,
  4517. TRUE,
  4518. TRUE,
  4519. TRUE );
  4520. }
  4521. }
  4522. //
  4523. // We can only make this check if it is not restart, because we have
  4524. // no idea whether the update is applied or not. Raise corrupt if
  4525. // the bits are not clear to prevent double allocation.
  4526. //
  4527. if (!RtlAreBitsClear( &Bitmap, BitmapIndex, 1 )) {
  4528. ASSERTMSG("Cannot set bits that are not clear ", FALSE );
  4529. NtfsRaiseStatus( IrpContext, STATUS_DISK_CORRUPT_ERROR, NULL, NULL );
  4530. }
  4531. //
  4532. // Set the bit by calling the same routine used at restart.
  4533. // But first check if we should revert back to the orginal bitmap
  4534. // buffer.
  4535. //
  4536. if (StuffAdded) {
  4537. NtfsFreePool( Bitmap.Buffer ); StuffAdded = FALSE;
  4538. Bitmap.Buffer = (PULONG) BitmapBuffer;
  4539. }
  4540. //
  4541. // Now log this change as well.
  4542. //
  4543. {
  4544. BITMAP_RANGE BitmapRange;
  4545. BitmapRange.BitMapOffset = BitmapIndex;
  4546. BitmapRange.NumberOfBits = 1;
  4547. (VOID) NtfsWriteLog( IrpContext,
  4548. BitmapScb,
  4549. BitmapBcb,
  4550. SetBitsInNonresidentBitMap,
  4551. &BitmapRange,
  4552. sizeof(BITMAP_RANGE),
  4553. ClearBitsInNonresidentBitMap,
  4554. &BitmapRange,
  4555. sizeof(BITMAP_RANGE),
  4556. BitmapCurrentOffset,
  4557. 0,
  4558. 0,
  4559. SizeToPin );
  4560. NtfsRestartSetBitsInBitMap( IrpContext,
  4561. &Bitmap,
  4562. BitmapIndex,
  4563. 1 );
  4564. }
  4565. try_exit: NOTHING;
  4566. } finally {
  4567. DebugUnwind( NtfsAllocateRecord );
  4568. //
  4569. // Reinitialize the context on any error.
  4570. //
  4571. if (AbnormalTermination()) {
  4572. RecordAllocationContext->CurrentBitmapSize = MAXULONG;
  4573. }
  4574. if (StuffAdded) { NtfsFreePool( Bitmap.Buffer ); StuffAdded = FALSE; }
  4575. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  4576. }
  4577. //
  4578. // If we added Mft allocation then go to the top of the loop.
  4579. //
  4580. if (Rescan) { continue; }
  4581. //
  4582. // The Index at this point is actually relative, so convert it to absolute
  4583. // before rejoining common code.
  4584. //
  4585. BitmapIndex += (BitmapCurrentOffset * 8);
  4586. //
  4587. // Always break out in the normal case.
  4588. //
  4589. break;
  4590. }
  4591. //
  4592. // Now that we've located an index we can subtract the number of free bits in the bitmap
  4593. //
  4594. *NumberOfFreeBits -= 1;
  4595. //
  4596. // Check if we need to extend the data stream.
  4597. //
  4598. DataOffset = UInt32x32To64( BitmapIndex + 1, BytesPerRecord );
  4599. //
  4600. // Now check if we are extending the file. We update the file size and
  4601. // valid data now.
  4602. //
  4603. if (DataOffset > DataScb->Header.FileSize.QuadPart) {
  4604. //
  4605. // Check for allocation first.
  4606. //
  4607. if (DataOffset > DataScb->Header.AllocationSize.QuadPart) {
  4608. //
  4609. // We want to allocate up to the next extend granularity
  4610. // boundary.
  4611. //
  4612. ClusterCount = UInt32x32To64( (BitmapIndex + ExtendGranularity) & ~(ExtendGranularity - 1),
  4613. BytesPerRecord );
  4614. ClusterCount -= DataScb->Header.AllocationSize.QuadPart;
  4615. ClusterCount = LlClustersFromBytesTruncate( Vcb, ClusterCount );
  4616. NtfsCheckRecordStackUsage( IrpContext );
  4617. NtfsAddAllocation( IrpContext,
  4618. DataScb->FileObject,
  4619. DataScb,
  4620. LlClustersFromBytes( Vcb,
  4621. DataScb->Header.AllocationSize.QuadPart ),
  4622. ClusterCount,
  4623. FALSE,
  4624. NULL );
  4625. }
  4626. DataScb->Header.FileSize.QuadPart = DataOffset;
  4627. DataScb->Header.ValidDataLength.QuadPart = DataOffset;
  4628. NtfsWriteFileSizes( IrpContext,
  4629. DataScb,
  4630. &DataScb->Header.ValidDataLength.QuadPart,
  4631. TRUE,
  4632. TRUE,
  4633. TRUE );
  4634. //
  4635. // Tell the cache manager about the new file size.
  4636. //
  4637. CcSetFileSizes( DataScb->FileObject,
  4638. (PCC_FILE_SIZES)&DataScb->Header.AllocationSize );
  4639. //
  4640. // If we didn't extend the file then we have used a free file record in the file.
  4641. // Update our bookeeping count for free file records.
  4642. //
  4643. } else if (DataScb == Vcb->MftScb) {
  4644. DataScb->ScbType.Mft.FreeRecordChange -= 1;
  4645. Vcb->MftFreeRecords -= 1;
  4646. }
  4647. //
  4648. // Now determine if we extended the index of the last set bit
  4649. //
  4650. if ((LONG)BitmapIndex > RecordAllocationContext->IndexOfLastSetBit) {
  4651. RecordAllocationContext->IndexOfLastSetBit = BitmapIndex;
  4652. }
  4653. NtfsReleaseScb( IrpContext, DataScb );
  4654. } finally {
  4655. if (StuffAdded) { NtfsFreePool( Bitmap.Buffer ); }
  4656. }
  4657. //
  4658. // Update our hint with this value.
  4659. //
  4660. RecordAllocationContext->StartingHint = BitmapIndex;
  4661. //
  4662. // We shouldn't allocate within the same byte as the reserved index for
  4663. // the Mft.
  4664. //
  4665. ASSERT( (DataScb != DataScb->Vcb->MftScb) ||
  4666. ((BitmapIndex & ~7) != (DataScb->ScbType.Mft.ReservedIndex & ~7)) );
  4667. DebugTrace( -1, Dbg, ("NtfsAllocateRecord -> %08lx\n", BitmapIndex) );
  4668. return BitmapIndex;
  4669. }
  4670. VOID
  4671. NtfsDeallocateRecord (
  4672. IN PIRP_CONTEXT IrpContext,
  4673. IN PRECORD_ALLOCATION_CONTEXT RecordAllocationContext,
  4674. IN ULONG Index,
  4675. IN PATTRIBUTE_ENUMERATION_CONTEXT BitmapAttribute
  4676. )
  4677. /*++
  4678. Routine Description:
  4679. This routine is used to deallocate a record from the specified record
  4680. allocation context.
  4681. If necessary this routine will also shrink the bitmap attribute and
  4682. the data scb (according to the truncation granularity used to initialize
  4683. the allocation context).
  4684. Arguments:
  4685. RecordAllocationContext - Supplies the record allocation context used
  4686. in this operation
  4687. Index - Supplies the index of the record to deallocate, zero based.
  4688. BitmapAttribute - Supplies the enumeration context for the bitmap
  4689. attribute. This parameter is ignored if the bitmap attribute is
  4690. non resident, in which case we create an scb for the attribute and
  4691. store a pointer to it in the record allocation context.
  4692. Return Value:
  4693. None.
  4694. --*/
  4695. {
  4696. PSCB DataScb;
  4697. IO_STATUS_BLOCK Iosb;
  4698. PAGED_CODE();
  4699. ASSERT_IRP_CONTEXT( IrpContext );
  4700. DebugTrace( +1, Dbg, ("NtfsDeallocateRecord\n") );
  4701. //
  4702. // Synchronize by acquiring the data scb exclusive, as an "end resource".
  4703. // Then use try-finally to insure we free it up.
  4704. //
  4705. DataScb = RecordAllocationContext->DataScb;
  4706. NtfsAcquireExclusiveScb( IrpContext, DataScb );
  4707. try {
  4708. PVCB Vcb;
  4709. PSCB BitmapScb;
  4710. RTL_BITMAP Bitmap;
  4711. PLONG IndexOfLastSetBit;
  4712. ULONG BytesPerRecord;
  4713. ULONG TruncateGranularity;
  4714. ULONG ClearIndex;
  4715. ULONG BitmapOffset = 0;
  4716. Vcb = DataScb->Vcb;
  4717. {
  4718. ULONG ExtendGranularity;
  4719. //
  4720. // Remember the current values in the record context structure.
  4721. //
  4722. BytesPerRecord = RecordAllocationContext->BytesPerRecord;
  4723. TruncateGranularity = RecordAllocationContext->TruncateGranularity;
  4724. ExtendGranularity = RecordAllocationContext->ExtendGranularity;
  4725. //
  4726. // See if someone made the bitmap nonresident, and we still think
  4727. // it is resident. If so, we must uninitialize and insure reinitialization
  4728. // below.
  4729. //
  4730. if ((RecordAllocationContext->BitmapScb == NULL)
  4731. && !NtfsIsAttributeResident(NtfsFoundAttribute(BitmapAttribute))) {
  4732. NtfsUninitializeRecordAllocation( IrpContext,
  4733. RecordAllocationContext );
  4734. RecordAllocationContext->CurrentBitmapSize = MAXULONG;
  4735. }
  4736. //
  4737. // Reinitialize the record context structure if necessary.
  4738. //
  4739. if (RecordAllocationContext->CurrentBitmapSize == MAXULONG) {
  4740. NtfsInitializeRecordAllocation( IrpContext,
  4741. DataScb,
  4742. BitmapAttribute,
  4743. BytesPerRecord,
  4744. ExtendGranularity,
  4745. TruncateGranularity,
  4746. RecordAllocationContext );
  4747. }
  4748. }
  4749. BitmapScb = RecordAllocationContext->BitmapScb;
  4750. IndexOfLastSetBit = &RecordAllocationContext->IndexOfLastSetBit;
  4751. //
  4752. // We will do different operations based on whether the bitmap is resident or nonresident
  4753. // The first case will handle the resident bitmap
  4754. //
  4755. if (BitmapScb == NULL) {
  4756. UCHAR NewByte;
  4757. //
  4758. // Initialize the local bitmap
  4759. //
  4760. RtlInitializeBitMap( &Bitmap,
  4761. (PULONG)NtfsAttributeValue( NtfsFoundAttribute( BitmapAttribute )),
  4762. RecordAllocationContext->CurrentBitmapSize );
  4763. //
  4764. // And clear the indicated bit, and also change the byte containing the bit in the
  4765. // attribute
  4766. //
  4767. NewByte = ((PUCHAR)Bitmap.Buffer)[ Index / 8 ];
  4768. ASSERT( FlagOn( NewByte, BitMask[Index % 8]) );
  4769. ClearFlag( NewByte, BitMask[Index % 8] );
  4770. NtfsChangeAttributeValue( IrpContext,
  4771. DataScb->Fcb,
  4772. Index / 8,
  4773. &NewByte,
  4774. 1,
  4775. FALSE,
  4776. FALSE,
  4777. FALSE,
  4778. FALSE,
  4779. BitmapAttribute );
  4780. //
  4781. // Now if the bit set just cleared is the same as the index for the last set bit
  4782. // then we must compute a new last set bit
  4783. //
  4784. if (Index == (ULONG)*IndexOfLastSetBit) {
  4785. RtlFindLastBackwardRunClear( &Bitmap, Index, &ClearIndex );
  4786. }
  4787. } else {
  4788. PBCB BitmapBcb = NULL;
  4789. try {
  4790. ULONG RelativeIndex;
  4791. ULONG SizeToPin;
  4792. PVOID BitmapBuffer;
  4793. //
  4794. // Snapshot the Scb values in case we change any of them.
  4795. //
  4796. if (!FlagOn( BitmapScb->ScbState, SCB_STATE_HEADER_INITIALIZED )) {
  4797. NtfsUpdateScbFromAttribute( IrpContext, BitmapScb, NULL );
  4798. }
  4799. NtfsSnapshotScb( IrpContext, BitmapScb );
  4800. //
  4801. // Create the stream file if not present.
  4802. //
  4803. if (BitmapScb->FileObject == NULL) {
  4804. NtfsCreateInternalAttributeStream( IrpContext,
  4805. BitmapScb,
  4806. FALSE,
  4807. &NtfsInternalUseFile[DEALLOCATERECORD_FILE_NUMBER] );
  4808. }
  4809. //
  4810. // Calculate offset and relative index of the bit we will deallocate,
  4811. // from the nearest page boundary.
  4812. //
  4813. BitmapOffset = Index /8 & ~(PAGE_SIZE - 1);
  4814. RelativeIndex = Index & (BITS_PER_PAGE - 1);
  4815. //
  4816. // Calculate the size to read from this point to the end of
  4817. // bitmap.
  4818. //
  4819. SizeToPin = (RecordAllocationContext->CurrentBitmapSize / 8) - BitmapOffset;
  4820. if (SizeToPin > PAGE_SIZE) {
  4821. SizeToPin = PAGE_SIZE;
  4822. }
  4823. NtfsPinStream( IrpContext,
  4824. BitmapScb,
  4825. BitmapOffset,
  4826. SizeToPin,
  4827. &BitmapBcb,
  4828. &BitmapBuffer );
  4829. RtlInitializeBitMap( &Bitmap, BitmapBuffer, SizeToPin * 8 );
  4830. //
  4831. // We can only make this check if it is not restart, because we have
  4832. // no idea whether the update is applied or not. Raise corrupt if
  4833. // we are trying to clear bits which aren't set.
  4834. //
  4835. if (!RtlAreBitsSet( &Bitmap, RelativeIndex, 1 )) {
  4836. ASSERTMSG("Cannot clear bits that are not set ", FALSE );
  4837. NtfsRaiseStatus( IrpContext, STATUS_DISK_CORRUPT_ERROR, NULL, NULL );
  4838. }
  4839. //
  4840. // Now log this change as well.
  4841. //
  4842. {
  4843. BITMAP_RANGE BitmapRange;
  4844. BitmapRange.BitMapOffset = RelativeIndex;
  4845. BitmapRange.NumberOfBits = 1;
  4846. (VOID) NtfsWriteLog( IrpContext,
  4847. BitmapScb,
  4848. BitmapBcb,
  4849. ClearBitsInNonresidentBitMap,
  4850. &BitmapRange,
  4851. sizeof(BITMAP_RANGE),
  4852. SetBitsInNonresidentBitMap,
  4853. &BitmapRange,
  4854. sizeof(BITMAP_RANGE),
  4855. BitmapOffset,
  4856. 0,
  4857. 0,
  4858. SizeToPin );
  4859. }
  4860. //
  4861. // Clear the bit by calling the same routine used at restart.
  4862. //
  4863. NtfsRestartClearBitsInBitMap( IrpContext,
  4864. &Bitmap,
  4865. RelativeIndex,
  4866. 1 );
  4867. //
  4868. // Now if the bit set just cleared is the same as the index for the last set bit
  4869. // then we must compute a new last set bit
  4870. //
  4871. if (Index == (ULONG)*IndexOfLastSetBit) {
  4872. ULONG ClearLength;
  4873. ClearLength = RtlFindLastBackwardRunClear( &Bitmap, RelativeIndex, &ClearIndex );
  4874. //
  4875. // If the last page of the bitmap is clear, then loop to
  4876. // find the first set bit in the previous page(s).
  4877. // When we reach the first page then we exit. The ClearBit
  4878. // value will be 0.
  4879. //
  4880. while ((ClearLength == (RelativeIndex + 1)) &&
  4881. (BitmapOffset != 0)) {
  4882. BitmapOffset -= PAGE_SIZE;
  4883. RelativeIndex = BITS_PER_PAGE - 1;
  4884. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  4885. NtfsMapStream( IrpContext,
  4886. BitmapScb,
  4887. BitmapOffset,
  4888. PAGE_SIZE,
  4889. &BitmapBcb,
  4890. &BitmapBuffer );
  4891. RtlInitializeBitMap( &Bitmap, BitmapBuffer, BITS_PER_PAGE );
  4892. ClearLength = RtlFindLastBackwardRunClear( &Bitmap, RelativeIndex, &ClearIndex );
  4893. }
  4894. }
  4895. } finally {
  4896. DebugUnwind( NtfsDeallocateRecord );
  4897. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  4898. }
  4899. }
  4900. RecordAllocationContext->NumberOfFreeBits += 1;
  4901. //
  4902. // Now decide if we need to truncate the allocation. First check if we need to
  4903. // set the last set bit index and then check if the new last set bit index is
  4904. // small enough that we should now truncate the allocation. We will truncate
  4905. // if the last set bit index plus the trucate granularity is smaller than
  4906. // the current number of records in the data scb.
  4907. //
  4908. // **** For now, we will not truncate the Mft, since we do not synchronize
  4909. // reads and writes, and a truncate can collide with the Lazy Writer.
  4910. //
  4911. if (Index == (ULONG)*IndexOfLastSetBit) {
  4912. *IndexOfLastSetBit = ClearIndex - 1 + (BitmapOffset * 8);
  4913. if ((DataScb != Vcb->MftScb) &&
  4914. (DataScb->Header.AllocationSize.QuadPart >
  4915. Int32x32To64( *IndexOfLastSetBit + 1 + TruncateGranularity, BytesPerRecord ))) {
  4916. VCN StartingVcn;
  4917. LONGLONG EndOfIndexOffset;
  4918. LONGLONG TruncatePoint;
  4919. //
  4920. // We can get into a situation where there is so much extra allocation that
  4921. // we can't delete it without overflowing the log file. We can't perform
  4922. // checkpoints in this path so we will forget about truncating in
  4923. // this path unless this is the first truncate of the data scb. We
  4924. // only deallocate a small piece of the allocation.
  4925. //
  4926. TruncatePoint =
  4927. EndOfIndexOffset = Int32x32To64( *IndexOfLastSetBit + 1, BytesPerRecord );
  4928. if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_EXCESS_LOG_FULL )) {
  4929. //
  4930. // Use a fudge factor of 8 to allow for the overused bits in
  4931. // the snapshot allocation field.
  4932. //
  4933. if (DataScb->Header.AllocationSize.QuadPart + 8 >= DataScb->ScbSnapshot->AllocationSize) {
  4934. TruncatePoint = DataScb->Header.AllocationSize.QuadPart - (MAXIMUM_RUNS_AT_ONCE * Vcb->BytesPerCluster);
  4935. if (TruncatePoint < EndOfIndexOffset) {
  4936. TruncatePoint = EndOfIndexOffset;
  4937. }
  4938. } else {
  4939. TruncatePoint = DataScb->Header.AllocationSize.QuadPart;
  4940. }
  4941. }
  4942. //
  4943. // Force deleted piece to flush first so dirty page dumps are
  4944. // accurate. This is only neccessary for indexes
  4945. //
  4946. if (DataScb->AttributeTypeCode == $INDEX_ALLOCATION ) {
  4947. ASSERT( DataScb->Header.PagingIoResource == NULL );
  4948. CcFlushCache( &DataScb->NonpagedScb->SegmentObject, (PLARGE_INTEGER)&TruncatePoint, (ULONG)(DataScb->Header.FileSize.QuadPart - TruncatePoint), &Iosb );
  4949. NtfsNormalizeAndCleanupTransaction( IrpContext, &Iosb.Status, TRUE, STATUS_UNEXPECTED_IO_ERROR );
  4950. }
  4951. StartingVcn = LlClustersFromBytes( Vcb, TruncatePoint );
  4952. NtfsDeleteAllocation( IrpContext,
  4953. DataScb->FileObject,
  4954. DataScb,
  4955. StartingVcn,
  4956. MAXLONGLONG,
  4957. TRUE,
  4958. FALSE );
  4959. //
  4960. // Now truncate the file sizes to the end of the last allocated record.
  4961. //
  4962. DataScb->Header.ValidDataLength.QuadPart =
  4963. DataScb->Header.FileSize.QuadPart = EndOfIndexOffset;
  4964. NtfsWriteFileSizes( IrpContext,
  4965. DataScb,
  4966. &DataScb->Header.ValidDataLength.QuadPart,
  4967. FALSE,
  4968. TRUE,
  4969. TRUE );
  4970. //
  4971. // Tell the cache manager about the new file size.
  4972. //
  4973. CcSetFileSizes( DataScb->FileObject,
  4974. (PCC_FILE_SIZES)&DataScb->Header.AllocationSize );
  4975. //
  4976. // We have truncated the index stream. Update the change count
  4977. // so that we won't trust any cached index entry information.
  4978. //
  4979. DataScb->ScbType.Index.ChangeCount += 1;
  4980. }
  4981. }
  4982. //
  4983. // As our final task we need to add this index to the recently deallocated
  4984. // queues for the Scb and the Irp Context. First scan through the IrpContext queue
  4985. // looking for a matching Scb. I do don't find one then we allocate a new one and insert
  4986. // it in the appropriate queues and lastly we add our index to the entry
  4987. //
  4988. {
  4989. PDEALLOCATED_RECORDS DeallocatedRecords;
  4990. PLIST_ENTRY Links;
  4991. //
  4992. // After the following loop either we've found an existing record in the irp context
  4993. // queue for the appropriate scb or deallocated records is null and we know we need
  4994. // to create a record
  4995. //
  4996. DeallocatedRecords = NULL;
  4997. for (Links = IrpContext->RecentlyDeallocatedQueue.Flink;
  4998. Links != &IrpContext->RecentlyDeallocatedQueue;
  4999. Links = Links->Flink) {
  5000. DeallocatedRecords = CONTAINING_RECORD( Links, DEALLOCATED_RECORDS, IrpContextLinks );
  5001. if (DeallocatedRecords->Scb == DataScb) {
  5002. break;
  5003. }
  5004. DeallocatedRecords = NULL;
  5005. }
  5006. //
  5007. // If we need to create a new record then allocate a record and insert it in both queues
  5008. // and initialize its other fields
  5009. //
  5010. if (DeallocatedRecords == NULL) {
  5011. DeallocatedRecords = (PDEALLOCATED_RECORDS)ExAllocateFromPagedLookasideList( &NtfsDeallocatedRecordsLookasideList );
  5012. InsertTailList( &DataScb->ScbType.Index.RecentlyDeallocatedQueue, &DeallocatedRecords->ScbLinks );
  5013. InsertTailList( &IrpContext->RecentlyDeallocatedQueue, &DeallocatedRecords->IrpContextLinks );
  5014. DeallocatedRecords->Scb = DataScb;
  5015. DeallocatedRecords->NumberOfEntries = DEALLOCATED_RECORD_ENTRIES;
  5016. DeallocatedRecords->NextFreeEntry = 0;
  5017. }
  5018. //
  5019. // At this point deallocated records points to a record that we are to fill in.
  5020. // We need to check whether there is space to add this entry. Otherwise we need
  5021. // to allocate a larger deallocated record structure from pool.
  5022. //
  5023. if (DeallocatedRecords->NextFreeEntry == DeallocatedRecords->NumberOfEntries) {
  5024. PDEALLOCATED_RECORDS NewDeallocatedRecords;
  5025. ULONG BytesInEntryArray;
  5026. //
  5027. // Double the number of entries in the current structure and
  5028. // allocate directly from pool.
  5029. //
  5030. BytesInEntryArray = 2 * DeallocatedRecords->NumberOfEntries * sizeof( ULONG );
  5031. NewDeallocatedRecords = NtfsAllocatePool( PagedPool,
  5032. DEALLOCATED_RECORDS_HEADER_SIZE + BytesInEntryArray );
  5033. RtlZeroMemory( NewDeallocatedRecords, DEALLOCATED_RECORDS_HEADER_SIZE + BytesInEntryArray );
  5034. //
  5035. // Initialize the structure by copying the existing structure. Then
  5036. // update the number of entries field.
  5037. //
  5038. RtlCopyMemory( NewDeallocatedRecords,
  5039. DeallocatedRecords,
  5040. DEALLOCATED_RECORDS_HEADER_SIZE + (BytesInEntryArray / 2) );
  5041. NewDeallocatedRecords->NumberOfEntries = DeallocatedRecords->NumberOfEntries * 2;
  5042. //
  5043. // Remove the previous structure from the list and insert the new structure.
  5044. //
  5045. RemoveEntryList( &DeallocatedRecords->ScbLinks );
  5046. RemoveEntryList( &DeallocatedRecords->IrpContextLinks );
  5047. InsertTailList( &DataScb->ScbType.Index.RecentlyDeallocatedQueue,
  5048. &NewDeallocatedRecords->ScbLinks );
  5049. InsertTailList( &IrpContext->RecentlyDeallocatedQueue,
  5050. &NewDeallocatedRecords->IrpContextLinks );
  5051. //
  5052. // Deallocate the previous structure and use the new structure in its place.
  5053. //
  5054. if (DeallocatedRecords->NumberOfEntries == DEALLOCATED_RECORD_ENTRIES) {
  5055. ExFreeToPagedLookasideList( &NtfsDeallocatedRecordsLookasideList, DeallocatedRecords );
  5056. } else {
  5057. NtfsFreePool( DeallocatedRecords );
  5058. }
  5059. DeallocatedRecords = NewDeallocatedRecords;
  5060. }
  5061. ASSERT( DeallocatedRecords->NextFreeEntry < DeallocatedRecords->NumberOfEntries );
  5062. DeallocatedRecords->Index[DeallocatedRecords->NextFreeEntry] = Index;
  5063. DeallocatedRecords->NextFreeEntry += 1;
  5064. }
  5065. } finally {
  5066. NtfsReleaseScb( IrpContext, DataScb );
  5067. }
  5068. //
  5069. // Check if this is the lowest index we've deallocated. It will be a future starting
  5070. // hint if so.
  5071. //
  5072. if (RecordAllocationContext->LowestDeallocatedIndex > Index) {
  5073. RecordAllocationContext->LowestDeallocatedIndex = Index;
  5074. }
  5075. DebugTrace( -1, Dbg, ("NtfsDeallocateRecord -> VOID\n") );
  5076. return;
  5077. }
  5078. VOID
  5079. NtfsReserveMftRecord (
  5080. IN PIRP_CONTEXT IrpContext,
  5081. IN OUT PVCB Vcb,
  5082. IN PATTRIBUTE_ENUMERATION_CONTEXT BitmapAttribute
  5083. )
  5084. /*++
  5085. Routine Description:
  5086. This routine reserves a record, without actually allocating it, so that the
  5087. record may be allocated later via NtfsAllocateReservedRecord. This support
  5088. is used, for example, to reserve a record for describing Mft extensions in
  5089. the current Mft mapping. Only one record may be reserved at a time.
  5090. Note that even though the reserved record number is returned, it may not
  5091. be used until it is allocated.
  5092. Arguments:
  5093. Vcb - This is the Vcb for the volume. We update flags in the Vcb on
  5094. completion of this operation.
  5095. BitmapAttribute - Supplies the enumeration context for the bitmap
  5096. attribute. This parameter is ignored if the bitmap attribute is
  5097. non resident, in which case we create an scb for the attribute and
  5098. store a pointer to it in the record allocation context.
  5099. Return Value:
  5100. None - We update the Vcb and MftScb during this operation.
  5101. --*/
  5102. {
  5103. PSCB DataScb;
  5104. RTL_BITMAP Bitmap;
  5105. BOOLEAN StuffAdded = FALSE;
  5106. PBCB BitmapBcb = NULL;
  5107. ASSERT_IRP_CONTEXT( IrpContext );
  5108. PAGED_CODE();
  5109. DebugTrace( +1, Dbg, ("NtfsReserveMftRecord\n") );
  5110. //
  5111. // Synchronize by acquiring the data scb exclusive, as an "end resource".
  5112. // Then use try-finally to insure we free it up.
  5113. //
  5114. DataScb = Vcb->MftScb;
  5115. NtfsAcquireExclusiveScb( IrpContext, DataScb );
  5116. try {
  5117. PSCB BitmapScb;
  5118. PULONG CurrentBitmapSize;
  5119. ULONG BitmapSizeInBytes;
  5120. LONGLONG EndOfIndexOffset;
  5121. LONGLONG ClusterCount;
  5122. ULONG Index;
  5123. ULONG BitOffset;
  5124. PVOID BitmapBuffer;
  5125. UCHAR BitmapByte = 0;
  5126. ULONG SizeToPin;
  5127. ULONG BitmapCurrentOffset;
  5128. //
  5129. // See if someone made the bitmap nonresident, and we still think
  5130. // it is resident. If so, we must uninitialize and insure reinitialization
  5131. // below.
  5132. //
  5133. {
  5134. ULONG BytesPerRecord = DataScb->ScbType.Index.RecordAllocationContext.BytesPerRecord;
  5135. ULONG ExtendGranularity = DataScb->ScbType.Index.RecordAllocationContext.ExtendGranularity;
  5136. if ((DataScb->ScbType.Index.RecordAllocationContext.BitmapScb == NULL) &&
  5137. !NtfsIsAttributeResident( NtfsFoundAttribute( BitmapAttribute ))) {
  5138. NtfsUninitializeRecordAllocation( IrpContext,
  5139. &DataScb->ScbType.Index.RecordAllocationContext );
  5140. DataScb->ScbType.Index.RecordAllocationContext.CurrentBitmapSize = MAXULONG;
  5141. }
  5142. //
  5143. // Reinitialize the record context structure if necessary.
  5144. //
  5145. if (DataScb->ScbType.Index.RecordAllocationContext.CurrentBitmapSize == MAXULONG) {
  5146. NtfsInitializeRecordAllocation( IrpContext,
  5147. DataScb,
  5148. BitmapAttribute,
  5149. BytesPerRecord,
  5150. ExtendGranularity,
  5151. ExtendGranularity,
  5152. &DataScb->ScbType.Index.RecordAllocationContext );
  5153. }
  5154. }
  5155. BitmapScb = DataScb->ScbType.Index.RecordAllocationContext.BitmapScb;
  5156. CurrentBitmapSize = &DataScb->ScbType.Index.RecordAllocationContext.CurrentBitmapSize;
  5157. BitmapSizeInBytes = *CurrentBitmapSize / 8;
  5158. //
  5159. // Snapshot the bitmap before possibly modifying it - we own it exclusive through
  5160. // the data scb since they share the same resource but have not snapped it before
  5161. //
  5162. NtfsSnapshotScb( IrpContext, BitmapScb );
  5163. //
  5164. // Loop through the entire bitmap. We always start from the first user
  5165. // file number as our starting point.
  5166. //
  5167. BitOffset = FIRST_USER_FILE_NUMBER;
  5168. for (BitmapCurrentOffset = 0;
  5169. BitmapCurrentOffset < BitmapSizeInBytes;
  5170. BitmapCurrentOffset += PAGE_SIZE) {
  5171. //
  5172. // Calculate the size to read from this point to the end of
  5173. // bitmap, or a page, whichever is less.
  5174. //
  5175. SizeToPin = BitmapSizeInBytes - BitmapCurrentOffset;
  5176. if (SizeToPin > PAGE_SIZE) { SizeToPin = PAGE_SIZE; }
  5177. //
  5178. // Unpin any Bcb from a previous loop.
  5179. //
  5180. if (StuffAdded) { NtfsFreePool( Bitmap.Buffer ); StuffAdded = FALSE; }
  5181. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  5182. //
  5183. // Read the desired bitmap page.
  5184. //
  5185. NtfsMapStream( IrpContext,
  5186. BitmapScb,
  5187. BitmapCurrentOffset,
  5188. SizeToPin,
  5189. &BitmapBcb,
  5190. &BitmapBuffer );
  5191. //
  5192. // Initialize the bitmap and search for a free bit.
  5193. //
  5194. RtlInitializeBitMap( &Bitmap, BitmapBuffer, SizeToPin * 8 );
  5195. StuffAdded = NtfsAddDeallocatedRecords( Vcb,
  5196. DataScb,
  5197. BitmapCurrentOffset * 8,
  5198. &Bitmap );
  5199. Index = RtlFindClearBits( &Bitmap, 1, BitOffset );
  5200. //
  5201. // If we found something, then leave the loop.
  5202. //
  5203. if (Index != 0xffffffff) {
  5204. //
  5205. // Remember the byte containing the reserved index.
  5206. //
  5207. BitmapByte = ((PCHAR) Bitmap.Buffer)[Index / 8];
  5208. break;
  5209. }
  5210. //
  5211. // For each subsequent page the page offset is zero.
  5212. //
  5213. BitOffset = 0;
  5214. }
  5215. //
  5216. // Now check if we have located a record that can be allocated, If not then extend
  5217. // the size of the bitmap by 64 bits.
  5218. //
  5219. if (Index == 0xffffffff) {
  5220. //
  5221. // Cleanup from previous loop.
  5222. //
  5223. if (StuffAdded) { NtfsFreePool( Bitmap.Buffer ); StuffAdded = FALSE; }
  5224. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  5225. //
  5226. // Calculate the page offset for the next page to pin.
  5227. //
  5228. BitmapCurrentOffset = BitmapSizeInBytes & ~(PAGE_SIZE - 1);
  5229. //
  5230. // Calculate the index of next file record to allocate.
  5231. //
  5232. Index = *CurrentBitmapSize;
  5233. //
  5234. // Now advance the sizes and calculate the size in bytes to
  5235. // read.
  5236. //
  5237. *CurrentBitmapSize += BITMAP_EXTEND_GRANULARITY;
  5238. DataScb->ScbType.Index.RecordAllocationContext.NumberOfFreeBits += BITMAP_EXTEND_GRANULARITY;
  5239. //
  5240. // Calculate the new size of the bitmap in bits and check if we must grow
  5241. // the allocation.
  5242. //
  5243. BitmapSizeInBytes = *CurrentBitmapSize / 8;
  5244. //
  5245. // Check for allocation first.
  5246. //
  5247. if (BitmapScb->Header.AllocationSize.LowPart < BitmapSizeInBytes) {
  5248. //
  5249. // Calculate number of clusters to next page boundary, and allocate
  5250. // that much.
  5251. //
  5252. ClusterCount = ((BitmapSizeInBytes + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1));
  5253. ClusterCount = LlClustersFromBytes( Vcb,
  5254. ((ULONG) ClusterCount - BitmapScb->Header.AllocationSize.LowPart) );
  5255. NtfsAddAllocation( IrpContext,
  5256. BitmapScb->FileObject,
  5257. BitmapScb,
  5258. LlClustersFromBytes( Vcb,
  5259. BitmapScb->Header.AllocationSize.QuadPart ),
  5260. ClusterCount,
  5261. FALSE,
  5262. NULL );
  5263. }
  5264. //
  5265. // Tell the cache manager about the new file size.
  5266. //
  5267. BitmapScb->Header.FileSize.QuadPart = BitmapSizeInBytes;
  5268. CcSetFileSizes( BitmapScb->FileObject,
  5269. (PCC_FILE_SIZES)&BitmapScb->Header.AllocationSize );
  5270. //
  5271. // Now read the page in and mark it dirty so that any new range will
  5272. // be zeroed.
  5273. //
  5274. SizeToPin = BitmapSizeInBytes - BitmapCurrentOffset;
  5275. if (SizeToPin > PAGE_SIZE) { SizeToPin = PAGE_SIZE; }
  5276. NtfsPinStream( IrpContext,
  5277. BitmapScb,
  5278. BitmapCurrentOffset,
  5279. SizeToPin,
  5280. &BitmapBcb,
  5281. &BitmapBuffer );
  5282. CcSetDirtyPinnedData( BitmapBcb, NULL );
  5283. //
  5284. // Update the ValidDataLength, now that we have read (and possibly
  5285. // zeroed) the page.
  5286. //
  5287. BitmapScb->Header.ValidDataLength.LowPart = BitmapSizeInBytes;
  5288. NtfsWriteFileSizes( IrpContext,
  5289. BitmapScb,
  5290. &BitmapScb->Header.ValidDataLength.QuadPart,
  5291. TRUE,
  5292. TRUE,
  5293. TRUE );
  5294. } else {
  5295. //
  5296. // The Index at this point is actually relative, so convert it to absolute
  5297. // before rejoining common code.
  5298. //
  5299. Index += (BitmapCurrentOffset * 8);
  5300. }
  5301. //
  5302. // We now have an index. There are three possible states for the file
  5303. // record corresponding to this index within the Mft. They are:
  5304. //
  5305. // - File record could lie beyond the current end of the file.
  5306. // There is nothing to do in this case.
  5307. //
  5308. // - File record is part of a hole in the Mft. In that case
  5309. // we allocate space for it bring it into memory.
  5310. //
  5311. // - File record is already within allocated space. There is nothing
  5312. // to do in that case.
  5313. //
  5314. // We store the index as our reserved index and update the Vcb flags. If
  5315. // the hole filling operation fails then the RestoreScbSnapshots routine
  5316. // will clear these values.
  5317. //
  5318. DataScb->ScbType.Mft.ReservedIndex = Index;
  5319. NtfsAcquireCheckpoint( IrpContext, Vcb );
  5320. SetFlag( Vcb->MftReserveFlags, VCB_MFT_RECORD_RESERVED );
  5321. SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_MFT_REC_RESERVED );
  5322. NtfsReleaseCheckpoint( IrpContext, Vcb );
  5323. if (NtfsIsMftIndexInHole( IrpContext, Vcb, Index, NULL )) {
  5324. //
  5325. // Make sure nothing is left pinned in the bitmap.
  5326. //
  5327. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  5328. //
  5329. // Try to fill the hole in the Mft. We will have this routine
  5330. // raise if unable to fill in the hole.
  5331. //
  5332. NtfsFillMftHole( IrpContext, Vcb, Index );
  5333. }
  5334. //
  5335. // At this point we have the index to reserve and the value of the
  5336. // byte in the bitmap which contains this bit. We make sure the
  5337. // Mft includes the allocation for this index and the other
  5338. // bits within the same byte. This is so we can uninitialize these
  5339. // file records so chkdsk won't look at stale data.
  5340. //
  5341. EndOfIndexOffset = LlBytesFromFileRecords( Vcb, (Index + 8) & ~(7));
  5342. //
  5343. // Now check if we are extending the file. We update the file size and
  5344. // valid data now.
  5345. //
  5346. if (EndOfIndexOffset > DataScb->Header.FileSize.QuadPart) {
  5347. ULONG AddedFileRecords;
  5348. ULONG CurrentIndex;
  5349. //
  5350. // Check for allocation first.
  5351. //
  5352. if (EndOfIndexOffset > DataScb->Header.AllocationSize.QuadPart) {
  5353. ClusterCount = ((Index + DataScb->ScbType.Index.RecordAllocationContext.ExtendGranularity) &
  5354. ~(DataScb->ScbType.Index.RecordAllocationContext.ExtendGranularity - 1));
  5355. ClusterCount = LlBytesFromFileRecords( Vcb, (ULONG) ClusterCount );
  5356. ClusterCount = LlClustersFromBytesTruncate( Vcb,
  5357. ClusterCount - DataScb->Header.AllocationSize.QuadPart );
  5358. NtfsAddAllocation( IrpContext,
  5359. DataScb->FileObject,
  5360. DataScb,
  5361. LlClustersFromBytes( Vcb,
  5362. DataScb->Header.AllocationSize.QuadPart ),
  5363. ClusterCount,
  5364. FALSE,
  5365. NULL );
  5366. }
  5367. //
  5368. // Now we have to figure out how many file records we will be
  5369. // adding and the index of the first record being added.
  5370. //
  5371. CurrentIndex = (ULONG) LlFileRecordsFromBytes( Vcb, DataScb->Header.FileSize.QuadPart );
  5372. AddedFileRecords = (ULONG) (EndOfIndexOffset - DataScb->Header.FileSize.QuadPart);
  5373. AddedFileRecords = FileRecordsFromBytes( Vcb, AddedFileRecords );
  5374. DataScb->Header.FileSize.QuadPart = EndOfIndexOffset;
  5375. DataScb->Header.ValidDataLength.QuadPart = EndOfIndexOffset;
  5376. NtfsWriteFileSizes( IrpContext,
  5377. DataScb,
  5378. &DataScb->Header.ValidDataLength.QuadPart,
  5379. TRUE,
  5380. TRUE,
  5381. TRUE );
  5382. //
  5383. // Tell the cache manager about the new file size.
  5384. //
  5385. CcSetFileSizes( DataScb->FileObject,
  5386. (PCC_FILE_SIZES)&DataScb->Header.AllocationSize );
  5387. //
  5388. // Update our bookeeping to reflect the number of file records
  5389. // added.
  5390. //
  5391. DataScb->ScbType.Mft.FreeRecordChange += AddedFileRecords;
  5392. Vcb->MftFreeRecords += AddedFileRecords;
  5393. //
  5394. // We now have to go through each of the file records added
  5395. // and mark it as not IN_USE. We don't want stale data in this range
  5396. // to ever confuse chkdsk or rescan. These records begin after the
  5397. // current end of file. We won't worry about anything currently
  5398. // in the file because it would already be marked as IN-USE or
  5399. // not correctly. We are only concerned with records which will
  5400. // become part of the valid portion of the file since we will
  5401. // skip them in the normal allocation path (we want to limit
  5402. // disk IO in a file record containing MFT mapping).
  5403. //
  5404. //
  5405. // Chop off the bits which are already part of the file.
  5406. //
  5407. BitmapByte >>= (8 - AddedFileRecords);
  5408. //
  5409. // Now perform the initialization routine for each file record beyond the
  5410. // previous end of the file.
  5411. //
  5412. while (AddedFileRecords) {
  5413. //
  5414. // If not allocated then uninitialize it now.
  5415. //
  5416. if (!FlagOn( BitmapByte, 0x1 )) {
  5417. NtfsInitializeMftHoleRecords( IrpContext,
  5418. Vcb,
  5419. CurrentIndex,
  5420. 1 );
  5421. }
  5422. BitmapByte >>= 1;
  5423. CurrentIndex += 1;
  5424. AddedFileRecords -= 1;
  5425. }
  5426. }
  5427. } finally {
  5428. DebugUnwind( NtfsReserveMftRecord );
  5429. if (StuffAdded) { NtfsFreePool( Bitmap.Buffer ); }
  5430. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  5431. NtfsReleaseScb( IrpContext, DataScb );
  5432. }
  5433. DebugTrace( -1, Dbg, ("NtfsReserveMftRecord -> Exit\n") );
  5434. return;
  5435. }
  5436. ULONG
  5437. NtfsAllocateMftReservedRecord (
  5438. IN PIRP_CONTEXT IrpContext,
  5439. IN PVCB Vcb,
  5440. IN PATTRIBUTE_ENUMERATION_CONTEXT BitmapAttribute
  5441. )
  5442. /*++
  5443. Routine Description:
  5444. This routine allocates a previously reserved record, and returns its
  5445. number.
  5446. Arguments:
  5447. Vcb - This is the Vcb for the volume.
  5448. BitmapAttribute - Supplies the enumeration context for the bitmap
  5449. attribute. This parameter is ignored if the bitmap attribute is
  5450. non resident, in which case we create an scb for the attribute and
  5451. store a pointer to it in the record allocation context.
  5452. Return Value:
  5453. ULONG - Returns the index of the record just reserved, zero based.
  5454. --*/
  5455. {
  5456. PSCB DataScb;
  5457. ULONG ReservedIndex;
  5458. PBCB BitmapBcb = NULL;
  5459. ASSERT_IRP_CONTEXT( IrpContext );
  5460. PAGED_CODE();
  5461. DebugTrace( +1, Dbg, ("NtfsAllocateMftReservedRecord\n") );
  5462. //
  5463. // Synchronize by acquiring the data scb exclusive, as an "end resource".
  5464. // Then use try-finally to insure we free it up.
  5465. //
  5466. DataScb = Vcb->MftScb;
  5467. NtfsAcquireExclusiveScb( IrpContext, DataScb );
  5468. try {
  5469. PSCB BitmapScb;
  5470. ULONG RelativeIndex;
  5471. ULONG SizeToPin;
  5472. RTL_BITMAP Bitmap;
  5473. PVOID BitmapBuffer;
  5474. BITMAP_RANGE BitmapRange;
  5475. ULONG BitmapCurrentOffset = 0;
  5476. //
  5477. // If we are going to allocate file record 15 then do so and set the
  5478. // flags in the IrpContext and Vcb.
  5479. //
  5480. if (!FlagOn( Vcb->MftReserveFlags, VCB_MFT_RECORD_15_USED )) {
  5481. SetFlag( Vcb->MftReserveFlags, VCB_MFT_RECORD_15_USED );
  5482. SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_MFT_REC_15_USED );
  5483. try_return( ReservedIndex = FIRST_USER_FILE_NUMBER - 1 );
  5484. }
  5485. //
  5486. // See if someone made the bitmap nonresident, and we still think
  5487. // it is resident. If so, we must uninitialize and insure reinitialization
  5488. // below.
  5489. //
  5490. {
  5491. ULONG BytesPerRecord = DataScb->ScbType.Index.RecordAllocationContext.BytesPerRecord;
  5492. ULONG ExtendGranularity = DataScb->ScbType.Index.RecordAllocationContext.ExtendGranularity;
  5493. if ((DataScb->ScbType.Index.RecordAllocationContext.BitmapScb == NULL) &&
  5494. !NtfsIsAttributeResident( NtfsFoundAttribute( BitmapAttribute ))) {
  5495. NtfsUninitializeRecordAllocation( IrpContext,
  5496. &DataScb->ScbType.Index.RecordAllocationContext );
  5497. DataScb->ScbType.Index.RecordAllocationContext.CurrentBitmapSize = MAXULONG;
  5498. }
  5499. //
  5500. // Reinitialize the record context structure if necessary.
  5501. //
  5502. if (DataScb->ScbType.Index.RecordAllocationContext.CurrentBitmapSize == MAXULONG) {
  5503. NtfsInitializeRecordAllocation( IrpContext,
  5504. DataScb,
  5505. BitmapAttribute,
  5506. BytesPerRecord,
  5507. ExtendGranularity,
  5508. ExtendGranularity,
  5509. &DataScb->ScbType.Index.RecordAllocationContext );
  5510. }
  5511. }
  5512. BitmapScb = DataScb->ScbType.Index.RecordAllocationContext.BitmapScb;
  5513. ReservedIndex = DataScb->ScbType.Mft.ReservedIndex;
  5514. //
  5515. // Find the start of the page containing the reserved index.
  5516. //
  5517. BitmapCurrentOffset = (ReservedIndex / 8) & ~(PAGE_SIZE - 1);
  5518. RelativeIndex = ReservedIndex & (BITS_PER_PAGE - 1);
  5519. //
  5520. // Calculate the size to read from this point to the end of
  5521. // bitmap, or a page, whichever is less.
  5522. //
  5523. SizeToPin = (DataScb->ScbType.Index.RecordAllocationContext.CurrentBitmapSize / 8)
  5524. - BitmapCurrentOffset;
  5525. if (SizeToPin > PAGE_SIZE) { SizeToPin = PAGE_SIZE; }
  5526. //
  5527. // Read the desired bitmap page.
  5528. //
  5529. NtfsPinStream( IrpContext,
  5530. BitmapScb,
  5531. BitmapCurrentOffset,
  5532. SizeToPin,
  5533. &BitmapBcb,
  5534. &BitmapBuffer );
  5535. //
  5536. // Initialize the bitmap.
  5537. //
  5538. RtlInitializeBitMap( &Bitmap, BitmapBuffer, SizeToPin * 8 );
  5539. //
  5540. // Now log this change as well.
  5541. //
  5542. BitmapRange.BitMapOffset = RelativeIndex;
  5543. BitmapRange.NumberOfBits = 1;
  5544. (VOID) NtfsWriteLog( IrpContext,
  5545. BitmapScb,
  5546. BitmapBcb,
  5547. SetBitsInNonresidentBitMap,
  5548. &BitmapRange,
  5549. sizeof(BITMAP_RANGE),
  5550. ClearBitsInNonresidentBitMap,
  5551. &BitmapRange,
  5552. sizeof(BITMAP_RANGE),
  5553. BitmapCurrentOffset,
  5554. 0,
  5555. 0,
  5556. Bitmap.SizeOfBitMap >> 3 );
  5557. NtfsRestartSetBitsInBitMap( IrpContext, &Bitmap, RelativeIndex, 1 );
  5558. //
  5559. // Now that we've located an index we can subtract the number of free bits in the bitmap
  5560. //
  5561. DataScb->ScbType.Index.RecordAllocationContext.NumberOfFreeBits -= 1;
  5562. //
  5563. // If we didn't extend the file then we have used a free file record in the file.
  5564. // Update our bookeeping count for free file records.
  5565. //
  5566. DataScb->ScbType.Mft.FreeRecordChange -= 1;
  5567. Vcb->MftFreeRecords -= 1;
  5568. //
  5569. // Now determine if we extended the index of the last set bit
  5570. //
  5571. if (ReservedIndex > (ULONG)DataScb->ScbType.Index.RecordAllocationContext.IndexOfLastSetBit) {
  5572. DataScb->ScbType.Index.RecordAllocationContext.IndexOfLastSetBit = ReservedIndex;
  5573. }
  5574. //
  5575. // Clear the fields that indicate we have a reserved index.
  5576. //
  5577. NtfsAcquireCheckpoint( IrpContext, Vcb );
  5578. ClearFlag( Vcb->MftReserveFlags, VCB_MFT_RECORD_RESERVED );
  5579. NtfsReleaseCheckpoint( IrpContext, Vcb );
  5580. DataScb->ScbType.Mft.ReservedIndex = 0;
  5581. try_exit: NOTHING;
  5582. } finally {
  5583. DebugUnwind( NtfsAllocateMftReserveRecord );
  5584. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  5585. NtfsReleaseScb( IrpContext, DataScb );
  5586. }
  5587. DebugTrace( -1, Dbg, ("NtfsAllocateMftReserveRecord -> %08lx\n", ReservedIndex) );
  5588. return ReservedIndex;
  5589. }
  5590. VOID
  5591. NtfsDeallocateRecordsComplete (
  5592. IN PIRP_CONTEXT IrpContext
  5593. )
  5594. /*++
  5595. Routine Description:
  5596. This routine removes recently deallocated record information from
  5597. the Scb structures based on the input irp context.
  5598. Arguments:
  5599. IrpContext - Supplies the Queue of recently deallocate records
  5600. Return Value:
  5601. None.
  5602. --*/
  5603. {
  5604. PDEALLOCATED_RECORDS DeallocatedRecords;
  5605. PAGED_CODE();
  5606. DebugTrace( +1, Dbg, ("NtfsDeallocateRecordsComplete\n") );
  5607. //
  5608. // Now while the irp context's recently deallocated queue is not empty
  5609. // we will grap the first entry off the queue, remove it from both
  5610. // the scb and irp context queue, and free the record
  5611. //
  5612. while (!IsListEmpty( &IrpContext->RecentlyDeallocatedQueue )) {
  5613. DeallocatedRecords = CONTAINING_RECORD( IrpContext->RecentlyDeallocatedQueue.Flink,
  5614. DEALLOCATED_RECORDS,
  5615. IrpContextLinks );
  5616. RemoveEntryList( &DeallocatedRecords->ScbLinks );
  5617. //
  5618. // Reset our hint index if one of the deallocated indexes is suitable.
  5619. //
  5620. if (DeallocatedRecords->Scb->ScbType.Index.RecordAllocationContext.StartingHint >
  5621. DeallocatedRecords->Scb->ScbType.Index.RecordAllocationContext.LowestDeallocatedIndex) {
  5622. DeallocatedRecords->Scb->ScbType.Index.RecordAllocationContext.StartingHint =
  5623. DeallocatedRecords->Scb->ScbType.Index.RecordAllocationContext.LowestDeallocatedIndex;
  5624. }
  5625. //
  5626. // Make sure to reset the LowestDeallocated.
  5627. //
  5628. DeallocatedRecords->Scb->ScbType.Index.RecordAllocationContext.LowestDeallocatedIndex = MAXULONG;
  5629. //
  5630. // Now remove the record from the irp context queue and deallocate the
  5631. // record
  5632. //
  5633. RemoveEntryList( &DeallocatedRecords->IrpContextLinks );
  5634. //
  5635. // If this record is the default size then return it to our private list.
  5636. // Otherwise deallocate it to pool.
  5637. //
  5638. if (DeallocatedRecords->NumberOfEntries == DEALLOCATED_RECORD_ENTRIES) {
  5639. ExFreeToPagedLookasideList( &NtfsDeallocatedRecordsLookasideList, DeallocatedRecords );
  5640. } else {
  5641. NtfsFreePool( DeallocatedRecords );
  5642. }
  5643. }
  5644. DebugTrace( -1, Dbg, ("NtfsDeallocateRecordsComplete -> VOID\n") );
  5645. return;
  5646. }
  5647. BOOLEAN
  5648. NtfsIsRecordAllocated (
  5649. IN PIRP_CONTEXT IrpContext,
  5650. IN PRECORD_ALLOCATION_CONTEXT RecordAllocationContext,
  5651. IN ULONG Index,
  5652. IN PATTRIBUTE_ENUMERATION_CONTEXT BitmapAttribute
  5653. )
  5654. /*++
  5655. Routine Description:
  5656. This routine is used to query if a record is currently allocated for
  5657. the specified record allocation context.
  5658. Arguments:
  5659. RecordAllocationContext - Supplies the record allocation context used
  5660. in this operation
  5661. Index - Supplies the index of the record being queried, zero based.
  5662. BitmapAttribute - Supplies the enumeration context for the bitmap
  5663. attribute. This parameter is ignored if the bitmap attribute is
  5664. non resident, in which case we create an scb for the attribute and
  5665. store a pointer to it in the record allocation context.
  5666. Return Value:
  5667. BOOLEAN - TRUE if the record is currently allocated and FALSE otherwise.
  5668. --*/
  5669. {
  5670. BOOLEAN Results;
  5671. PSCB DataScb;
  5672. PSCB BitmapScb;
  5673. ULONG CurrentBitmapSize;
  5674. PVCB Vcb;
  5675. RTL_BITMAP Bitmap;
  5676. PBCB BitmapBcb = NULL;
  5677. PATTRIBUTE_RECORD_HEADER AttributeRecordHeader;
  5678. ASSERT_IRP_CONTEXT( IrpContext );
  5679. PAGED_CODE();
  5680. DebugTrace( +1, Dbg, ("NtfsIsRecordAllocated\n") );
  5681. //
  5682. // Synchronize by acquiring the data scb exclusive, as an "end resource".
  5683. // Then use try-finally to insure we free it up.
  5684. //
  5685. DataScb = RecordAllocationContext->DataScb;
  5686. NtfsAcquireExclusiveScb( IrpContext, DataScb );
  5687. try {
  5688. Vcb = DataScb->Fcb->Vcb;
  5689. //
  5690. // See if someone made the bitmap nonresident, and we still think
  5691. // it is resident. If so, we must uninitialize and insure reinitialization
  5692. // below.
  5693. //
  5694. BitmapScb = RecordAllocationContext->BitmapScb;
  5695. {
  5696. ULONG ExtendGranularity;
  5697. ULONG BytesPerRecord;
  5698. ULONG TruncateGranularity;
  5699. //
  5700. // Remember the current values in the record context structure.
  5701. //
  5702. BytesPerRecord = RecordAllocationContext->BytesPerRecord;
  5703. TruncateGranularity = RecordAllocationContext->TruncateGranularity;
  5704. ExtendGranularity = RecordAllocationContext->ExtendGranularity;
  5705. if ((BitmapScb == NULL) && !NtfsIsAttributeResident(NtfsFoundAttribute(BitmapAttribute))) {
  5706. NtfsUninitializeRecordAllocation( IrpContext,
  5707. RecordAllocationContext );
  5708. RecordAllocationContext->CurrentBitmapSize = MAXULONG;
  5709. }
  5710. //
  5711. // Reinitialize the record context structure if necessary.
  5712. //
  5713. if (RecordAllocationContext->CurrentBitmapSize == MAXULONG) {
  5714. NtfsInitializeRecordAllocation( IrpContext,
  5715. DataScb,
  5716. BitmapAttribute,
  5717. BytesPerRecord,
  5718. ExtendGranularity,
  5719. TruncateGranularity,
  5720. RecordAllocationContext );
  5721. }
  5722. }
  5723. BitmapScb = RecordAllocationContext->BitmapScb;
  5724. CurrentBitmapSize = RecordAllocationContext->CurrentBitmapSize;
  5725. //
  5726. // We will do different operations based on whether the bitmap is resident or nonresident
  5727. // The first case will handle the resident bitmap
  5728. //
  5729. if (BitmapScb == NULL) {
  5730. UCHAR NewByte;
  5731. //
  5732. // Initialize the local bitmap
  5733. //
  5734. AttributeRecordHeader = NtfsFoundAttribute( BitmapAttribute );
  5735. RtlInitializeBitMap( &Bitmap,
  5736. (PULONG)NtfsAttributeValue( AttributeRecordHeader ),
  5737. CurrentBitmapSize );
  5738. //
  5739. // And check if the indcated bit is Set. If it is set then the record is allocated.
  5740. //
  5741. NewByte = ((PUCHAR)Bitmap.Buffer)[ Index / 8 ];
  5742. Results = BooleanFlagOn( NewByte, BitMask[Index % 8] );
  5743. } else {
  5744. PVOID BitmapBuffer;
  5745. ULONG SizeToMap;
  5746. ULONG RelativeIndex;
  5747. ULONG BitmapCurrentOffset;
  5748. //
  5749. // Calculate Vcn and relative index of the bit we will deallocate,
  5750. // from the nearest page boundary.
  5751. //
  5752. BitmapCurrentOffset = (Index / 8) & ~(PAGE_SIZE - 1);
  5753. RelativeIndex = Index & (BITS_PER_PAGE - 1);
  5754. //
  5755. // Calculate the size to read from this point to the end of
  5756. // bitmap.
  5757. //
  5758. SizeToMap = CurrentBitmapSize / 8 - BitmapCurrentOffset;
  5759. if (SizeToMap > PAGE_SIZE) { SizeToMap = PAGE_SIZE; }
  5760. NtfsMapStream( IrpContext,
  5761. BitmapScb,
  5762. BitmapCurrentOffset,
  5763. SizeToMap,
  5764. &BitmapBcb,
  5765. &BitmapBuffer );
  5766. RtlInitializeBitMap( &Bitmap, BitmapBuffer, SizeToMap * 8 );
  5767. //
  5768. // Now check if the indicated bit is set. If it is set then the record is allocated.
  5769. // no idea whether the update is applied or not.
  5770. //
  5771. Results = RtlAreBitsSet(&Bitmap, RelativeIndex, 1);
  5772. }
  5773. } finally {
  5774. DebugUnwind( NtfsIsRecordDeallocated );
  5775. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  5776. NtfsReleaseScb( IrpContext, DataScb );
  5777. }
  5778. DebugTrace( -1, Dbg, ("NtfsIsRecordAllocated -> %08lx\n", Results) );
  5779. return Results;
  5780. }
  5781. VOID
  5782. NtfsScanMftBitmap (
  5783. IN PIRP_CONTEXT IrpContext,
  5784. IN OUT PVCB Vcb
  5785. )
  5786. /*++
  5787. Routine Description:
  5788. This routine is called during mount to initialize the values related to
  5789. the Mft in the Vcb. These include the number of free records and hole
  5790. records. Also whether we have already used file record 15. We also scan
  5791. the Mft to check whether there is any excess mapping.
  5792. Arguments:
  5793. Vcb - Supplies the Vcb for the volume.
  5794. Return Value:
  5795. None.
  5796. --*/
  5797. {
  5798. PBCB BitmapBcb = NULL;
  5799. ATTRIBUTE_ENUMERATION_CONTEXT AttrContext;
  5800. PAGED_CODE();
  5801. DebugTrace( +1, Dbg, ("NtfsScanMftBitmap...\n") );
  5802. NtfsInitializeAttributeContext( &AttrContext );
  5803. //
  5804. // Use a try-finally to facilitate cleanup.
  5805. //
  5806. try {
  5807. ULONG SizeToMap;
  5808. ULONG FileRecords;
  5809. ULONG RemainingRecords;
  5810. ULONG BitmapCurrentOffset;
  5811. ULONG BitmapBytesToRead;
  5812. PUCHAR BitmapBuffer;
  5813. UCHAR NextByte;
  5814. VCN Vcn;
  5815. LCN Lcn;
  5816. LONGLONG Clusters;
  5817. //
  5818. // Start by walking through the file records for the Mft
  5819. // checking for excess mapping.
  5820. //
  5821. NtfsLookupAttributeForScb( IrpContext, Vcb->MftScb, NULL, &AttrContext );
  5822. //
  5823. // We don't care about the first one. Let's find the rest of them.
  5824. //
  5825. while (NtfsLookupNextAttributeForScb( IrpContext,
  5826. Vcb->MftScb,
  5827. &AttrContext )) {
  5828. PFILE_RECORD_SEGMENT_HEADER FileRecord;
  5829. SetFlag( Vcb->MftReserveFlags, VCB_MFT_RECORD_15_USED );
  5830. FileRecord = NtfsContainingFileRecord( &AttrContext );
  5831. //
  5832. // Now check for the free space.
  5833. //
  5834. if (FileRecord->BytesAvailable - FileRecord->FirstFreeByte < Vcb->MftReserved) {
  5835. NtfsAcquireCheckpoint( IrpContext, Vcb );
  5836. SetFlag( Vcb->MftDefragState, VCB_MFT_DEFRAG_EXCESS_MAP );
  5837. NtfsReleaseCheckpoint( IrpContext, Vcb );
  5838. break;
  5839. }
  5840. }
  5841. //
  5842. // We now want to find the number of free records within the Mft
  5843. // bitmap. We need to figure out how many file records are in
  5844. // the Mft and then map the necessary bytes in the bitmap and
  5845. // find the count of set bits. We will round the bitmap length
  5846. // down to a byte boundary and then look at the last byte
  5847. // separately.
  5848. //
  5849. FileRecords = (ULONG) LlFileRecordsFromBytes( Vcb, Vcb->MftScb->Header.FileSize.QuadPart );
  5850. //
  5851. // Remember how many file records are in the last byte of the bitmap.
  5852. //
  5853. RemainingRecords = FileRecords & 7;
  5854. FileRecords &= ~(7);
  5855. BitmapBytesToRead = FileRecords / 8;
  5856. for (BitmapCurrentOffset = 0;
  5857. BitmapCurrentOffset < BitmapBytesToRead;
  5858. BitmapCurrentOffset += PAGE_SIZE) {
  5859. RTL_BITMAP Bitmap;
  5860. ULONG MapAdjust;
  5861. //
  5862. // Calculate the size to read from this point to the end of
  5863. // bitmap, or a page, whichever is less.
  5864. //
  5865. SizeToMap = BitmapBytesToRead - BitmapCurrentOffset;
  5866. if (SizeToMap > PAGE_SIZE) { SizeToMap = PAGE_SIZE; }
  5867. //
  5868. // If we aren't pinning a full page and have some bits
  5869. // in the next byte then pin an extra byte.
  5870. //
  5871. if ((SizeToMap != PAGE_SIZE) && (RemainingRecords != 0)) {
  5872. MapAdjust = 1;
  5873. } else {
  5874. MapAdjust = 0;
  5875. }
  5876. //
  5877. // Unpin any Bcb from a previous loop.
  5878. //
  5879. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  5880. //
  5881. // Read the desired bitmap page.
  5882. //
  5883. NtfsMapStream( IrpContext,
  5884. Vcb->MftBitmapScb,
  5885. BitmapCurrentOffset,
  5886. SizeToMap + MapAdjust,
  5887. &BitmapBcb,
  5888. &BitmapBuffer );
  5889. //
  5890. // Initialize the bitmap and search for a free bit.
  5891. //
  5892. RtlInitializeBitMap( &Bitmap, (PULONG) BitmapBuffer, SizeToMap * 8 );
  5893. Vcb->MftFreeRecords += RtlNumberOfClearBits( &Bitmap );
  5894. }
  5895. //
  5896. // If there are some remaining bits in the next byte then process
  5897. // them now.
  5898. //
  5899. if (RemainingRecords) {
  5900. PVOID RangePtr;
  5901. ULONG Index;
  5902. //
  5903. // Hopefully this byte is on the same page. Otherwise we will
  5904. // free this page and go to the next. In this case the Vcn will
  5905. // have the correct value because we walked past the end of the
  5906. // current file records already.
  5907. //
  5908. if (SizeToMap == PAGE_SIZE) {
  5909. //
  5910. // Unpin any Bcb from a previous loop.
  5911. //
  5912. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  5913. //
  5914. // Read the desired bitmap page.
  5915. //
  5916. NtfsMapStream( IrpContext,
  5917. Vcb->MftScb->ScbType.Index.RecordAllocationContext.BitmapScb,
  5918. BitmapCurrentOffset,
  5919. 1,
  5920. &BitmapBcb,
  5921. &BitmapBuffer );
  5922. //
  5923. // Set this to the byte prior to the last byte. This will
  5924. // set this to the same state as if on the same page.
  5925. //
  5926. SizeToMap = 0;
  5927. }
  5928. //
  5929. // We look at the next byte in the page and figure out how
  5930. // many bits are set.
  5931. //
  5932. NextByte = *((PUCHAR) Add2Ptr( BitmapBuffer, SizeToMap + 1 ));
  5933. while (RemainingRecords--) {
  5934. if (!FlagOn( NextByte, 0x01 )) {
  5935. Vcb->MftFreeRecords += 1;
  5936. }
  5937. NextByte >>= 1;
  5938. }
  5939. //
  5940. // We are now ready to look for holes within the Mft. We will look
  5941. // through the Mcb for the Mft looking for holes. The holes must
  5942. // always be an integral number of file records.
  5943. //
  5944. RangePtr = NULL;
  5945. Index = 0;
  5946. while (NtfsGetSequentialMcbEntry( &Vcb->MftScb->Mcb,
  5947. &RangePtr,
  5948. Index,
  5949. &Vcn,
  5950. &Lcn,
  5951. &Clusters )) {
  5952. //
  5953. // Look for a hole and count the clusters.
  5954. //
  5955. if (Lcn == UNUSED_LCN) {
  5956. if (Vcb->FileRecordsPerCluster == 0) {
  5957. Vcb->MftHoleRecords += (((ULONG)Clusters) >> Vcb->MftToClusterShift);
  5958. } else {
  5959. Vcb->MftHoleRecords += (((ULONG)Clusters) << Vcb->MftToClusterShift);
  5960. }
  5961. }
  5962. Index += 1;
  5963. }
  5964. }
  5965. } finally {
  5966. DebugUnwind( NtfsScanMftBitmap );
  5967. NtfsCleanupAttributeContext( IrpContext, &AttrContext );
  5968. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  5969. DebugTrace( -1, Dbg, ("NtfsScanMftBitmap...\n") );
  5970. }
  5971. return;
  5972. }
  5973. //
  5974. // Local support routine
  5975. //
  5976. BOOLEAN
  5977. NtfsAddDeallocatedRecords (
  5978. IN PVCB Vcb,
  5979. IN PSCB Scb,
  5980. IN ULONG StartingIndexOfBitmap,
  5981. IN OUT PRTL_BITMAP Bitmap
  5982. )
  5983. /*++
  5984. Routine Description:
  5985. This routine will modify the input bitmap by removing from it
  5986. any records that are in the recently deallocated queue of the scb.
  5987. If we do add stuff then we will not modify the bitmap buffer itself but
  5988. will allocate a new copy for the bitmap.
  5989. Arguments:
  5990. Vcb - Supplies the Vcb for the volume
  5991. Scb - Supplies the Scb used in this operation
  5992. StartingIndexOfBitmap - Supplies the base index to use to bias the bitmap
  5993. Bitmap - Supplies the bitmap being modified
  5994. Return Value:
  5995. BOOLEAN - TRUE if the bitmap has been modified and FALSE
  5996. otherwise.
  5997. --*/
  5998. {
  5999. BOOLEAN Results;
  6000. ULONG EndingIndexOfBitmap;
  6001. PLIST_ENTRY Links;
  6002. PDEALLOCATED_RECORDS DeallocatedRecords;
  6003. ULONG i;
  6004. ULONG Index;
  6005. PVOID NewBuffer;
  6006. ULONG SizeOfBitmapInBytes;
  6007. PAGED_CODE();
  6008. DebugTrace( +1, Dbg, ("NtfsAddDeallocatedRecords...\n") );
  6009. //
  6010. // Until shown otherwise we will assume that we haven't updated anything
  6011. //
  6012. Results = FALSE;
  6013. //
  6014. // Calculate the last index in the bitmap
  6015. //
  6016. EndingIndexOfBitmap = StartingIndexOfBitmap + Bitmap->SizeOfBitMap - 1;
  6017. SizeOfBitmapInBytes = (Bitmap->SizeOfBitMap + 7) / 8;
  6018. //
  6019. // Check if we need to bias the bitmap with the reserved index
  6020. //
  6021. if ((Scb == Vcb->MftScb) &&
  6022. FlagOn( Vcb->MftReserveFlags, VCB_MFT_RECORD_RESERVED ) &&
  6023. (StartingIndexOfBitmap <= Scb->ScbType.Mft.ReservedIndex) &&
  6024. (Scb->ScbType.Mft.ReservedIndex <= EndingIndexOfBitmap)) {
  6025. //
  6026. // The index is a hit so now bias the index with the start of the bitmap
  6027. // and allocate an extra buffer to hold the bitmap
  6028. //
  6029. Index = Scb->ScbType.Mft.ReservedIndex - StartingIndexOfBitmap;
  6030. NewBuffer = NtfsAllocatePool(PagedPool, SizeOfBitmapInBytes );
  6031. RtlCopyMemory( NewBuffer, Bitmap->Buffer, SizeOfBitmapInBytes );
  6032. Bitmap->Buffer = NewBuffer;
  6033. Results = TRUE;
  6034. //
  6035. // And now set the bits in the bitmap to indicate that the record
  6036. // cannot be reallocated yet. Also set the other bits within the
  6037. // same byte so we can put all of the file records for the Mft
  6038. // within the same pages of the Mft.
  6039. //
  6040. ((PUCHAR) Bitmap->Buffer)[ Index / 8 ] = 0xff;
  6041. }
  6042. //
  6043. // Scan through the recently deallocated queue looking for any indexes that
  6044. // we need to modify
  6045. //
  6046. for (Links = Scb->ScbType.Index.RecentlyDeallocatedQueue.Flink;
  6047. Links != &Scb->ScbType.Index.RecentlyDeallocatedQueue;
  6048. Links = Links->Flink) {
  6049. DeallocatedRecords = CONTAINING_RECORD( Links, DEALLOCATED_RECORDS, ScbLinks );
  6050. //
  6051. // For every index in the record check if the index is within the range
  6052. // of the bitmap we are working with
  6053. //
  6054. for (i = 0; i < DeallocatedRecords->NextFreeEntry; i += 1) {
  6055. if ((StartingIndexOfBitmap <= DeallocatedRecords->Index[i]) &&
  6056. (DeallocatedRecords->Index[i] <= EndingIndexOfBitmap)) {
  6057. //
  6058. // The index is a hit so now bias the index with the start of the bitmap
  6059. // and check if we need to allocate an extra buffer to hold the bitmap
  6060. //
  6061. Index = DeallocatedRecords->Index[i] - StartingIndexOfBitmap;
  6062. if (!Results) {
  6063. NewBuffer = NtfsAllocatePool(PagedPool, SizeOfBitmapInBytes );
  6064. RtlCopyMemory( NewBuffer, Bitmap->Buffer, SizeOfBitmapInBytes );
  6065. Bitmap->Buffer = NewBuffer;
  6066. Results = TRUE;
  6067. }
  6068. //
  6069. // And now set the bit in the bitmap to indicate that the record
  6070. // cannot be reallocated yet. It's possible that the bit is
  6071. // already set if we have aborted a transaction which then
  6072. // restores the bit.
  6073. //
  6074. SetFlag( ((PUCHAR)Bitmap->Buffer)[ Index / 8 ], BitMask[Index % 8] );
  6075. }
  6076. }
  6077. }
  6078. //
  6079. // And return to our caller
  6080. //
  6081. DebugTrace( -1, Dbg, ("NtfsAddDeallocatedRecords -> %08lx\n", Results) );
  6082. return Results;
  6083. }
  6084. //
  6085. // Local support routine
  6086. //
  6087. LCN
  6088. NtfsInitializeMftZone (
  6089. IN PIRP_CONTEXT IrpContext,
  6090. IN PVCB Vcb
  6091. )
  6092. /*++
  6093. Routine Description:
  6094. This routine is called to reserve a range of the volume bitmap for use by the
  6095. Mft zone. We first look for a range which is contiguous with the end of the Mft.
  6096. If unavailable we look for a suitable length range out of the cached runs array.
  6097. We expect our caller has loaded the cached runs array with free runs in the volume
  6098. bitmap and also that the Mcb for the Mft is fully loaded.
  6099. Arguments:
  6100. Vcb - This is the Vcb for the volume we are looking for the zone for.
  6101. Return Value:
  6102. LCN - Return the LCN for the first run in the free portion of the zone.
  6103. --*/
  6104. {
  6105. LONGLONG ClusterCount;
  6106. LCN Lcn;
  6107. VCN Vcn;
  6108. LCN ZoneStart;
  6109. LONGLONG MinZoneSize;
  6110. LONGLONG DefaultZoneSize;
  6111. LONGLONG MftClusters;
  6112. BOOLEAN FoundRun;
  6113. PAGED_CODE();
  6114. //
  6115. // We synchronize on the volume bitmap.
  6116. //
  6117. ASSERT( NtfsIsExclusiveScb( Vcb->BitmapScb ));
  6118. ASSERT( NtfsIsExclusiveScb( Vcb->MftScb ));
  6119. DebugTrace( +1, Dbg, ("NtfsInitializeMftZone\n") );
  6120. //
  6121. // Remember the default size of the new zone and the number of clusters in the Mft.
  6122. //
  6123. MinZoneSize = Vcb->TotalClusters >> (NTFS_MFT_ZONE_DEFAULT_SHIFT + 1);
  6124. DefaultZoneSize = (Vcb->TotalClusters >> NTFS_MFT_ZONE_DEFAULT_SHIFT) * NtfsMftZoneMultiplier;
  6125. MftClusters = LlClustersFromBytesTruncate( Vcb, Vcb->MftScb->Header.AllocationSize.QuadPart );
  6126. if (DefaultZoneSize > MftClusters + MinZoneSize) {
  6127. DefaultZoneSize -= MftClusters;
  6128. } else {
  6129. DefaultZoneSize = MinZoneSize;
  6130. }
  6131. //
  6132. // Get the last Lcn for the Mft and check if we can find a contiguous free run.
  6133. //
  6134. FoundRun = NtfsLookupLastNtfsMcbEntry( &Vcb->MftScb->Mcb,
  6135. &Vcn,
  6136. &Lcn );
  6137. ASSERT( FoundRun && (Vcn + 1 >= MftClusters) );
  6138. //
  6139. // Look first in the cached runs array. If not there then look to the disk.
  6140. //
  6141. Lcn += 1;
  6142. if (!NtfsLookupCachedLcn( &Vcb->CachedRuns,
  6143. Lcn,
  6144. &ZoneStart,
  6145. &ClusterCount,
  6146. NULL )) {
  6147. //
  6148. // If there are no free runs then set the zone to a default value.
  6149. //
  6150. if (Vcb->CachedRuns.Used == 0) {
  6151. ZoneStart = Lcn;
  6152. ClusterCount = DefaultZoneSize;
  6153. //
  6154. // There should be a run available in the bitmap.
  6155. //
  6156. } else {
  6157. NtfsFindFreeBitmapRun( IrpContext,
  6158. Vcb,
  6159. DefaultZoneSize,
  6160. Lcn,
  6161. TRUE,
  6162. TRUE,
  6163. &ZoneStart,
  6164. &ClusterCount );
  6165. //
  6166. // If there is no contiguous range then look for the best fit in the cached
  6167. // runs array. Start by asking for half the original zone request. Up it
  6168. // if the current Mft is rather small.
  6169. //
  6170. if (ZoneStart != Lcn) {
  6171. ClusterCount = DefaultZoneSize;
  6172. //
  6173. // Lookup in the cached runs array by length.
  6174. //
  6175. NtfsLookupCachedLcnByLength( &Vcb->CachedRuns,
  6176. ClusterCount,
  6177. TRUE,
  6178. Lcn,
  6179. &ZoneStart,
  6180. &ClusterCount,
  6181. NULL );
  6182. }
  6183. }
  6184. }
  6185. //
  6186. // We now have a value for the zone start and length. Make sure we aren't overreserving the
  6187. // volume. Consider the current size of the Mft and the length of the additional zone.
  6188. //
  6189. if (ClusterCount > DefaultZoneSize) {
  6190. ClusterCount = DefaultZoneSize;
  6191. }
  6192. //
  6193. // Align the zone on ULONG boundary. RtlFindNextForwardRunClear expects the pointers
  6194. // to be ulong aligned.
  6195. //
  6196. Vcb->MftZoneStart = ZoneStart & ~0x1f;
  6197. Vcb->MftZoneEnd = (ZoneStart + ClusterCount + 0x1f) & ~0x1f;
  6198. //
  6199. // Keep it close to total clusters.
  6200. //
  6201. if (Vcb->MftZoneEnd > Vcb->TotalClusters) {
  6202. Vcb->MftZoneEnd = (Vcb->TotalClusters + 0x1f) & ~0x1f;
  6203. }
  6204. ClearFlag( Vcb->VcbState, VCB_STATE_REDUCED_MFT );
  6205. //
  6206. // Remove the Mft zone from the cached runs. We always look to the
  6207. // bitmap directly when extending the Mft.
  6208. //
  6209. NtfsRemoveCachedLcn( &Vcb->CachedRuns,
  6210. Vcb->MftZoneStart,
  6211. Vcb->MftZoneEnd - Vcb->MftZoneStart );
  6212. DebugTrace( -1, Dbg, ("NtfsInitializeMftZone -> VOID\n") );
  6213. return ZoneStart;
  6214. }
  6215. //
  6216. // Local support routine
  6217. //
  6218. BOOLEAN
  6219. NtfsReduceMftZone (
  6220. IN PIRP_CONTEXT IrpContext,
  6221. IN PVCB Vcb
  6222. )
  6223. /*++
  6224. Routine Description:
  6225. This routine is called when it appears that there is no disk space left on the
  6226. disk except the Mft zone. We will try to reduce the zone to make space
  6227. available for user files.
  6228. Arguments:
  6229. Vcb - Supplies the Vcb for the volume
  6230. Return Value:
  6231. BOOLEAN - TRUE if the Mft zone was shrunk. FALSE otherwise.
  6232. --*/
  6233. {
  6234. BOOLEAN ReduceMft = FALSE;
  6235. LONGLONG FreeClusters;
  6236. LONGLONG TargetFreeClusters;
  6237. LONGLONG PrevFreeClusters;
  6238. ULONG CurrentOffset;
  6239. LCN Lcn;
  6240. LCN StartingLcn;
  6241. LCN SplitLcn;
  6242. LCN FinalLcn;
  6243. RTL_BITMAP Bitmap;
  6244. PBCB BitmapBcb = NULL;
  6245. PAGED_CODE();
  6246. //
  6247. // Nothing to do if disk is almost empty.
  6248. //
  6249. if (Vcb->FreeClusters < (4 * MFT_EXTEND_GRANULARITY)) {
  6250. return FALSE;
  6251. }
  6252. //
  6253. // Use a try-finally to facilitate cleanup.
  6254. //
  6255. try {
  6256. //
  6257. // Bound our search by the end of the volume.
  6258. //
  6259. FinalLcn = Vcb->MftZoneEnd;
  6260. if (Vcb->MftZoneEnd > Vcb->TotalClusters) {
  6261. FinalLcn = Vcb->TotalClusters;
  6262. }
  6263. //
  6264. // We want to find the number of free clusters in the Mft zone and
  6265. // return half of them to the pool of clusters for users files.
  6266. //
  6267. FreeClusters = 0;
  6268. for (Lcn = Vcb->MftZoneStart;
  6269. Lcn < FinalLcn;
  6270. Lcn = StartingLcn + Bitmap.SizeOfBitMap) {
  6271. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  6272. NtfsMapPageInBitmap( IrpContext, Vcb, Lcn, &StartingLcn, &Bitmap, &BitmapBcb );
  6273. if ((StartingLcn + Bitmap.SizeOfBitMap) > FinalLcn) {
  6274. Bitmap.SizeOfBitMap = (ULONG) (FinalLcn - StartingLcn);
  6275. }
  6276. if (StartingLcn != Lcn) {
  6277. Bitmap.SizeOfBitMap -= (ULONG) (Lcn - StartingLcn);
  6278. Bitmap.Buffer = Add2Ptr( Bitmap.Buffer,
  6279. (ULONG) (Lcn - StartingLcn) / 8 );
  6280. StartingLcn = Lcn;
  6281. }
  6282. FreeClusters += RtlNumberOfClearBits( &Bitmap );
  6283. }
  6284. //
  6285. // If we are below our threshold then don't do the split.
  6286. //
  6287. if (FreeClusters < (4 * MFT_EXTEND_GRANULARITY)) {
  6288. try_return( NOTHING );
  6289. }
  6290. //
  6291. // Now we want to calculate 1/2 of this number of clusters and set the
  6292. // zone end to this point.
  6293. //
  6294. TargetFreeClusters = Int64ShraMod32( FreeClusters, 1 );
  6295. //
  6296. // Now look for the page which contains the split point.
  6297. //
  6298. FreeClusters = 0;
  6299. for (Lcn = Vcb->MftZoneStart;
  6300. Lcn < FinalLcn;
  6301. Lcn = StartingLcn + Bitmap.SizeOfBitMap) {
  6302. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  6303. NtfsMapPageInBitmap( IrpContext, Vcb, Lcn, &StartingLcn, &Bitmap, &BitmapBcb );
  6304. if ((StartingLcn + Bitmap.SizeOfBitMap) > FinalLcn) {
  6305. Bitmap.SizeOfBitMap = (ULONG) (FinalLcn - StartingLcn);
  6306. }
  6307. if (StartingLcn != Lcn) {
  6308. Bitmap.SizeOfBitMap -= (ULONG) (Lcn - StartingLcn);
  6309. Bitmap.Buffer = Add2Ptr( Bitmap.Buffer,
  6310. (ULONG) (Lcn - StartingLcn) / 8 );
  6311. StartingLcn = Lcn;
  6312. }
  6313. PrevFreeClusters = FreeClusters;
  6314. FreeClusters += RtlNumberOfClearBits( &Bitmap );
  6315. //
  6316. // Check if we found the page containing the split point.
  6317. //
  6318. if (FreeClusters >= TargetFreeClusters) {
  6319. CurrentOffset = 0;
  6320. while (TRUE) {
  6321. if (!RtlCheckBit( &Bitmap, CurrentOffset )) {
  6322. PrevFreeClusters += 1;
  6323. if (PrevFreeClusters == TargetFreeClusters) {
  6324. break;
  6325. }
  6326. }
  6327. CurrentOffset += 1;
  6328. }
  6329. SplitLcn = Lcn + CurrentOffset;
  6330. ReduceMft = TRUE;
  6331. break;
  6332. }
  6333. }
  6334. //
  6335. // If we are to reduce the Mft zone then set the split point and exit.
  6336. // We always round the split point up to a ULONG bitmap boundary so
  6337. // that the bitmap for the zone is ULONG aligned. RtlFindNextForwardRunClear
  6338. // expects the pointers to be ulong aligned.
  6339. //
  6340. if (ReduceMft) {
  6341. Vcb->MftZoneEnd = (SplitLcn + 0x1f) & ~0x1f;
  6342. //
  6343. // Keep it close to total clusters.
  6344. //
  6345. if (Vcb->MftZoneEnd > Vcb->TotalClusters) {
  6346. Vcb->MftZoneEnd = (Vcb->TotalClusters + 0x1f) & ~0x1f;
  6347. }
  6348. ASSERT( Vcb->MftZoneEnd >= Vcb->MftZoneStart );
  6349. if (Int64ShraMod32( Vcb->TotalClusters, 4 ) > Vcb->FreeClusters) {
  6350. SetFlag( Vcb->VcbState, VCB_STATE_REDUCED_MFT );
  6351. }
  6352. }
  6353. try_exit: NOTHING;
  6354. } finally {
  6355. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  6356. }
  6357. return ReduceMft;
  6358. }
  6359. //
  6360. // Local support routine
  6361. //
  6362. VOID
  6363. NtfsCheckRecordStackUsage (
  6364. IN PIRP_CONTEXT IrpContext
  6365. )
  6366. /*++
  6367. Routine Description:
  6368. This routine is called in the record package prior to adding allocation
  6369. to either a data stream or bitmap stream. The purpose is to verify
  6370. that there is room on the stack to perform a log file full in the
  6371. AddAllocation operation. This routine will check the stack space and
  6372. the available log file space and raise LOG_FILE_FULL defensively if
  6373. both of these reach a critical threshold.
  6374. Arguments:
  6375. Return Value:
  6376. None - this routine will raise if necessary.
  6377. --*/
  6378. {
  6379. LOG_FILE_INFORMATION LogFileInfo;
  6380. ULONG InfoSize;
  6381. LONGLONG RemainingLogFile;
  6382. PAGED_CODE();
  6383. //
  6384. // Check the stack usage first.
  6385. //
  6386. if (IoGetRemainingStackSize() < OVERFLOW_RECORD_THRESHHOLD) {
  6387. //
  6388. // Now check the log file space.
  6389. //
  6390. InfoSize = sizeof( LOG_FILE_INFORMATION );
  6391. RtlZeroMemory( &LogFileInfo, InfoSize );
  6392. LfsReadLogFileInformation( IrpContext->Vcb->LogHandle,
  6393. &LogFileInfo,
  6394. &InfoSize );
  6395. //
  6396. // Check that 1/4 of the log file is available.
  6397. //
  6398. if (InfoSize != 0) {
  6399. RemainingLogFile = LogFileInfo.CurrentAvailable - LogFileInfo.TotalUndoCommitment;
  6400. if ((RemainingLogFile <= 0) ||
  6401. (RemainingLogFile < Int64ShraMod32(LogFileInfo.TotalAvailable, 2))) {
  6402. #ifdef PERF_STATS
  6403. IrpContext->LogFullReason = LF_RECORD_STACK_CHECK;
  6404. #endif
  6405. NtfsRaiseStatus( IrpContext, STATUS_LOG_FILE_FULL, NULL, NULL );
  6406. }
  6407. }
  6408. }
  6409. return;
  6410. }
  6411. //
  6412. // Local support routine
  6413. //
  6414. VOID
  6415. NtfsRunIsClear (
  6416. IN PIRP_CONTEXT IrpContext,
  6417. IN PVCB Vcb,
  6418. IN LCN StartingLcn,
  6419. IN LONGLONG RunLength
  6420. )
  6421. /*++
  6422. Routine Description:
  6423. This routine verifies that a group of clusters are unallocated.
  6424. Arguments:
  6425. Vcb - Supplies the Vcb used in this operation
  6426. StartingLcn - Supplies the start of the cluster run
  6427. RunLength - Supplies the length of the cluster run
  6428. Return Value:
  6429. None.
  6430. --*/
  6431. {
  6432. RTL_BITMAP Bitmap;
  6433. PBCB BitmapBcb = NULL;
  6434. BOOLEAN StuffAdded = FALSE;
  6435. LONGLONG BitOffset;
  6436. LONGLONG BitCount;
  6437. LCN BaseLcn;
  6438. LCN Lcn = StartingLcn;
  6439. LONGLONG ValidDataLength;
  6440. ASSERT_IRP_CONTEXT( IrpContext );
  6441. ASSERT_VCB( Vcb );
  6442. PAGED_CODE();
  6443. DebugTrace( +1, Dbg, ("NtfsRunIsClear\n") );
  6444. ValidDataLength = Vcb->BitmapScb->Header.ValidDataLength.QuadPart;
  6445. try {
  6446. //
  6447. // Ensure that StartingLcn is not past the length of the bitmap.
  6448. //
  6449. if (StartingLcn > ValidDataLength * 8) {
  6450. NtfsRaiseStatus( IrpContext, STATUS_INVALID_PARAMETER, NULL, NULL );
  6451. }
  6452. while (RunLength > 0){
  6453. //
  6454. // Access the next page of bitmap and update it
  6455. //
  6456. NtfsMapPageInBitmap(IrpContext, Vcb, Lcn, &BaseLcn, &Bitmap, &BitmapBcb);
  6457. //
  6458. // Get offset into this page and bits to end of this page
  6459. //
  6460. BitOffset = Lcn - BaseLcn;
  6461. BitCount = Bitmap.SizeOfBitMap - BitOffset;
  6462. //
  6463. // Adjust for bits to end of page
  6464. //
  6465. if (BitCount > RunLength){
  6466. BitCount = RunLength;
  6467. }
  6468. //
  6469. // If any bit is set get out
  6470. //
  6471. if (!RtlAreBitsClear( &Bitmap, (ULONG)BitOffset, (ULONG)BitCount)) {
  6472. NtfsRaiseStatus( IrpContext, STATUS_ALREADY_COMMITTED, NULL, NULL );
  6473. }
  6474. StuffAdded = NtfsAddRecentlyDeallocated(Vcb, BaseLcn, &Bitmap);
  6475. //
  6476. // Now if anything was added, check if the desired clusters are still
  6477. // free, else just free the stuff added.
  6478. //
  6479. if (StuffAdded) {
  6480. //
  6481. // If any bit is set now, raise STATUS_DELETE_PENDING to indicate
  6482. // that the space will soon be free (or can be made free).
  6483. //
  6484. if (!RtlAreBitsClear( &Bitmap, (ULONG)BitOffset, (ULONG)BitCount)) {
  6485. NtfsRaiseStatus( IrpContext, STATUS_DELETE_PENDING, NULL, NULL );
  6486. }
  6487. //
  6488. // Free up resources
  6489. //
  6490. NtfsFreePool(Bitmap.Buffer);
  6491. StuffAdded = FALSE;
  6492. }
  6493. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  6494. //
  6495. // Decrease remaining bits by amount checked in this page and move Lcn to beginning
  6496. // of next page
  6497. //
  6498. RunLength = RunLength - BitCount;
  6499. Lcn = BaseLcn + Bitmap.SizeOfBitMap;
  6500. }
  6501. } finally {
  6502. DebugUnwind(NtfsRunIsClear);
  6503. //
  6504. // Free up resources
  6505. //
  6506. if(StuffAdded){ NtfsFreePool(Bitmap.Buffer); StuffAdded = FALSE; }
  6507. NtfsUnpinBcb( IrpContext, &BitmapBcb );
  6508. }
  6509. DebugTrace( -1, Dbg, ("NtfsRunIsClear -> VOID\n") );
  6510. return;
  6511. }
  6512. //
  6513. // Local support routine
  6514. //
  6515. VOID
  6516. NtfsInitializeCachedRuns (
  6517. IN PNTFS_CACHED_RUNS CachedRuns
  6518. )
  6519. /*++
  6520. Routine Description:
  6521. This routine will initialize the cached run information.
  6522. Arguments:
  6523. CachedRuns - Pointer to an uninitialized cached run structure.
  6524. Return Value:
  6525. None - this routine will raise if unable to initialize the structure.
  6526. --*/
  6527. {
  6528. USHORT Index;
  6529. PAGED_CODE();
  6530. DebugTrace( +1, Dbg, ("NtfsInitializeCachedRuns\n") );
  6531. //
  6532. // Initialize the operating parameters.
  6533. //
  6534. CachedRuns->MaximumSize = 9000;
  6535. CachedRuns->MinCount = 100;
  6536. //
  6537. // Allocate pool for the arrays.
  6538. //
  6539. CachedRuns->LcnArray = NtfsAllocatePool( PagedPool,
  6540. sizeof( NTFS_LCN_CLUSTER_RUN ) * NTFS_INITIAL_CACHED_RUNS );
  6541. CachedRuns->LengthArray = NtfsAllocatePool( PagedPool,
  6542. sizeof( USHORT ) * NTFS_INITIAL_CACHED_RUNS );
  6543. //
  6544. // Mark all entries so that they can be detected as deleted.
  6545. //
  6546. for (Index = 0; Index < NTFS_INITIAL_CACHED_RUNS; Index += 1) {
  6547. CachedRuns->LcnArray[Index].RunLength = 0;
  6548. CachedRuns->LcnArray[Index].LengthIndex = NTFS_CACHED_RUNS_DEL_INDEX;
  6549. CachedRuns->LengthArray[Index] = NTFS_CACHED_RUNS_DEL_INDEX;
  6550. }
  6551. CachedRuns->Avail = NTFS_INITIAL_CACHED_RUNS;
  6552. //
  6553. // Allocate space for the histogram of small run lengths.
  6554. //
  6555. CachedRuns->Bins = NTFS_CACHED_RUNS_BIN_COUNT;
  6556. CachedRuns->BinArray = NtfsAllocatePool( PagedPool,
  6557. sizeof( USHORT ) * CachedRuns->Bins );
  6558. RtlZeroMemory( CachedRuns->BinArray,
  6559. sizeof( USHORT ) * CachedRuns->Bins );
  6560. //
  6561. // Allocate space for the windows of deleted entries in the sorted
  6562. // arrays.
  6563. //
  6564. CachedRuns->DelLcnCount = 0;
  6565. CachedRuns->DelLengthCount = 0;
  6566. CachedRuns->DeletedLcnWindows = NtfsAllocatePool( PagedPool,
  6567. sizeof( NTFS_DELETED_RUNS ) * NTFS_CACHED_RUNS_MAX_DEL_WINDOWS );
  6568. CachedRuns->DeletedLengthWindows = NtfsAllocatePool( PagedPool,
  6569. sizeof( NTFS_DELETED_RUNS ) * NTFS_CACHED_RUNS_MAX_DEL_WINDOWS );
  6570. //
  6571. // Create a window of deleted entries to cover the newly allocated
  6572. // entries.
  6573. //
  6574. NtfsAddDelWindow( CachedRuns, 0, CachedRuns->Avail - 1, TRUE );
  6575. NtfsAddDelWindow( CachedRuns, 0, CachedRuns->Avail - 1, FALSE );
  6576. //
  6577. // Clear the in use count.
  6578. //
  6579. CachedRuns->Used = 0;
  6580. //
  6581. // Reset the longest freed run.
  6582. //
  6583. CachedRuns->LongestFreedRun = MAXLONGLONG;
  6584. #ifdef NTFS_CHECK_CACHED_RUNS
  6585. if (NtfsDoVerifyCachedRuns) {
  6586. NtfsVerifyCachedRuns( CachedRuns, FALSE, FALSE );
  6587. }
  6588. #endif
  6589. DebugTrace( -1, Dbg, ("NtfsInitializeCachedRuns -> VOID\n") );
  6590. return;
  6591. }
  6592. //
  6593. // Local support routine
  6594. //
  6595. VOID
  6596. NtfsReinitializeCachedRuns (
  6597. IN PNTFS_CACHED_RUNS CachedRuns
  6598. )
  6599. /*++
  6600. Routine Description:
  6601. This routine is called to reinitialize the cached runs array.
  6602. Arguments:
  6603. CachedRuns - Pointer to a cached run structure.
  6604. Return Value:
  6605. None
  6606. --*/
  6607. {
  6608. USHORT Index;
  6609. PNTFS_LCN_CLUSTER_RUN NewLcnArray;
  6610. PUSHORT NewLengthArray;
  6611. PAGED_CODE();
  6612. DebugTrace( +1, Dbg, ("NtfsReinitializeCachedRuns\n") );
  6613. #ifdef NTFS_CHECK_CACHED_RUNS
  6614. ASSERT( (CachedRuns->Vcb == NULL) ||
  6615. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  6616. #endif
  6617. //
  6618. // Reallocate to get a smaller array. If we get an allocation failure then simply
  6619. // empty the larger arrays.
  6620. //
  6621. if (CachedRuns->Avail != NTFS_INITIAL_CACHED_RUNS) {
  6622. NewLcnArray = NtfsAllocatePoolNoRaise( PagedPool,
  6623. sizeof( NTFS_LCN_CLUSTER_RUN ) * NTFS_INITIAL_CACHED_RUNS );
  6624. if (NewLcnArray != NULL) {
  6625. //
  6626. // Allocate the length array.
  6627. //
  6628. NewLengthArray = NtfsAllocatePoolNoRaise( PagedPool,
  6629. sizeof( USHORT ) * NTFS_INITIAL_CACHED_RUNS );
  6630. //
  6631. // If we didn't get the Length array then simply use what we have already.
  6632. //
  6633. if (NewLengthArray == NULL) {
  6634. NtfsFreePool( NewLcnArray );
  6635. //
  6636. // Otherwise replace the Lcn and length arrays.
  6637. //
  6638. } else {
  6639. NtfsFreePool( CachedRuns->LcnArray );
  6640. CachedRuns->LcnArray = NewLcnArray;
  6641. NtfsFreePool( CachedRuns->LengthArray );
  6642. CachedRuns->LengthArray = NewLengthArray;
  6643. CachedRuns->Avail = NTFS_INITIAL_CACHED_RUNS;
  6644. }
  6645. }
  6646. }
  6647. //
  6648. // Mark all entries so that they can be detected as deleted.
  6649. //
  6650. for (Index = 0; Index < CachedRuns->Avail; Index += 1) {
  6651. CachedRuns->LcnArray[Index].RunLength = 0;
  6652. CachedRuns->LcnArray[Index].LengthIndex = NTFS_CACHED_RUNS_DEL_INDEX;
  6653. CachedRuns->LengthArray[Index] = NTFS_CACHED_RUNS_DEL_INDEX;
  6654. }
  6655. //
  6656. // Clear the histogram of run lengths.
  6657. //
  6658. RtlZeroMemory( CachedRuns->BinArray, sizeof( USHORT ) * CachedRuns->Bins );
  6659. //
  6660. // Clear the list of windows of deleted entries.
  6661. //
  6662. CachedRuns->DelLcnCount = 0;
  6663. CachedRuns->DelLengthCount = 0;
  6664. //
  6665. // Create a window of deleted entries to cover all of the entries.
  6666. //
  6667. NtfsAddDelWindow( CachedRuns, 0, CachedRuns->Avail - 1, TRUE );
  6668. NtfsAddDelWindow( CachedRuns, 0, CachedRuns->Avail - 1, FALSE );
  6669. //
  6670. // Clear the in use count.
  6671. //
  6672. CachedRuns->Used = 0;
  6673. //
  6674. // Reset the longest freed run.
  6675. //
  6676. CachedRuns->LongestFreedRun = MAXLONGLONG;
  6677. #ifdef NTFS_CHECK_CACHED_RUNS
  6678. if (NtfsDoVerifyCachedRuns) {
  6679. NtfsVerifyCachedRuns( CachedRuns, FALSE, FALSE );
  6680. }
  6681. #endif
  6682. DebugTrace( -1, Dbg, ("NtfsReinitializeCachedRuns -> VOID\n") );
  6683. return;
  6684. }
  6685. //
  6686. // Local support routine
  6687. //
  6688. VOID
  6689. NtfsUninitializeCachedRuns (
  6690. IN PNTFS_CACHED_RUNS CachedRuns
  6691. )
  6692. /*++
  6693. Routine Description:
  6694. This routine is called to clean up the cached run information.
  6695. Arguments:
  6696. CachedRuns - Pointer to a cached run structure. Be defensive and check that
  6697. it is really initialized.
  6698. Return Value:
  6699. None
  6700. --*/
  6701. {
  6702. PAGED_CODE();
  6703. DebugTrace( +1, Dbg, ("NtfsUninitializeCachedRuns\n") );
  6704. if (CachedRuns->LcnArray != NULL) {
  6705. NtfsFreePool( CachedRuns->LcnArray );
  6706. CachedRuns->LcnArray = NULL;
  6707. }
  6708. if (CachedRuns->LengthArray != NULL) {
  6709. NtfsFreePool( CachedRuns->LengthArray );
  6710. CachedRuns->LengthArray = NULL;
  6711. }
  6712. if (CachedRuns->BinArray != NULL) {
  6713. NtfsFreePool( CachedRuns->BinArray );
  6714. CachedRuns->BinArray = NULL;
  6715. }
  6716. if (CachedRuns->DeletedLcnWindows != NULL) {
  6717. NtfsFreePool( CachedRuns->DeletedLcnWindows );
  6718. CachedRuns->DeletedLcnWindows = NULL;
  6719. }
  6720. if (CachedRuns->DeletedLengthWindows != NULL) {
  6721. NtfsFreePool( CachedRuns->DeletedLengthWindows );
  6722. CachedRuns->DeletedLengthWindows = NULL;
  6723. }
  6724. CachedRuns->Used = 0;
  6725. CachedRuns->Avail = 0;
  6726. CachedRuns->DelLcnCount = 0;
  6727. CachedRuns->DelLengthCount = 0;
  6728. CachedRuns->Bins = 0;
  6729. DebugTrace( -1, Dbg, ("NtfsUninitializeCachedRuns -> VOID\n") );
  6730. return;
  6731. }
  6732. //
  6733. // Local support routine
  6734. //
  6735. BOOLEAN
  6736. NtfsLookupCachedLcn (
  6737. IN PNTFS_CACHED_RUNS CachedRuns,
  6738. IN LCN Lcn,
  6739. OUT PLCN StartingLcn,
  6740. OUT PLONGLONG RunLength,
  6741. OUT PUSHORT Index OPTIONAL
  6742. )
  6743. /*++
  6744. Routine Description:
  6745. This routine is called to look up a specific Lcn in the cached runs structure.
  6746. If found it will return the entire run. It will also return the index in the
  6747. Lcn array to use as an optimization in a later call.
  6748. Arguments:
  6749. CachedRuns - Pointer to the cached runs structure.
  6750. Lcn - This is the desired Lcn.
  6751. StartingLcn - Address to store the Lcn which begins the run in the cached
  6752. structure. Typically this is the same as the Lcn above.
  6753. RunLength - Address to store the length of the found run.
  6754. Index - If specified we store the index for the run we found. This can be
  6755. used as an optimization if we later remove the run.
  6756. Return Value:
  6757. BOOLEAN - TRUE if the run was found. FALSE otherwise.
  6758. --*/
  6759. {
  6760. PNTFS_LCN_CLUSTER_RUN ThisEntry;
  6761. USHORT FoundIndex;
  6762. BOOLEAN FoundLcn;
  6763. PAGED_CODE();
  6764. DebugTrace( +1, Dbg, ("NtfsLookupCachedLcn\n") );
  6765. #ifdef NTFS_CHECK_CACHED_RUNS
  6766. ASSERT( (CachedRuns->Vcb == NULL) ||
  6767. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  6768. #endif
  6769. //
  6770. // Lookup a run containing the specific Lcn.
  6771. //
  6772. FoundLcn = NtfsPositionCachedLcn( CachedRuns,
  6773. Lcn,
  6774. &FoundIndex );
  6775. //
  6776. // If we found the Lcn then return the full run.
  6777. //
  6778. if (FoundLcn) {
  6779. ThisEntry = CachedRuns->LcnArray + FoundIndex;
  6780. *StartingLcn = ThisEntry->Lcn;
  6781. *RunLength = ThisEntry->RunLength;
  6782. }
  6783. if (ARGUMENT_PRESENT( Index )) {
  6784. *Index = FoundIndex;
  6785. }
  6786. DebugTrace( -1, Dbg, ("NtfsLookupCachedLcn -> %01x\n", FoundLcn) );
  6787. return FoundLcn;
  6788. }
  6789. //
  6790. // Local support routine
  6791. //
  6792. BOOLEAN
  6793. NtfsGetNextCachedLcn (
  6794. IN PNTFS_CACHED_RUNS CachedRuns,
  6795. IN USHORT Index,
  6796. OUT PLCN StartingLcn,
  6797. OUT PLONGLONG RunLength
  6798. )
  6799. /*++
  6800. Routine Description:
  6801. This routine is called to find an entry in the Lcn array by position.
  6802. It is assumed that the entry is not deleted.
  6803. Arguments:
  6804. CachedRuns - Pointer to the cached runs structure.
  6805. Index - Index to look up. It might point beyond the array.
  6806. StartingLcn - Address to store the Lcn at this position.
  6807. RunLength - Address to store the RunLength at this position.
  6808. Return Value:
  6809. BOOLEAN - TRUE if an entry was found, FALSE otherwise.
  6810. --*/
  6811. {
  6812. PNTFS_LCN_CLUSTER_RUN ThisEntry;
  6813. BOOLEAN FoundRun = FALSE;
  6814. PAGED_CODE();
  6815. DebugTrace( +1, Dbg, ("NtfsGetNextCachedLcn\n") );
  6816. #ifdef NTFS_CHECK_CACHED_RUNS
  6817. ASSERT( (CachedRuns->Vcb == NULL) ||
  6818. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  6819. #endif
  6820. //
  6821. // If the input index is within the array then return the run.
  6822. //
  6823. if (Index < CachedRuns->Used) {
  6824. ThisEntry = CachedRuns->LcnArray + Index;
  6825. ASSERT( ThisEntry->RunLength );
  6826. *StartingLcn = ThisEntry->Lcn;
  6827. *RunLength = ThisEntry->RunLength;
  6828. FoundRun = TRUE;
  6829. }
  6830. DebugTrace( -1, Dbg, ("NtfsGetNextCachedLcn -> %01x\n", FoundRun) );
  6831. return FoundRun;
  6832. }
  6833. //
  6834. // Local support routine
  6835. //
  6836. BOOLEAN
  6837. NtfsLookupCachedLcnByLength (
  6838. IN PNTFS_CACHED_RUNS CachedRuns,
  6839. IN LONGLONG Length,
  6840. IN BOOLEAN AllowShorter,
  6841. IN LCN Lcn,
  6842. OUT PLCN StartingLcn,
  6843. OUT PLONGLONG RunLength,
  6844. OUT PUSHORT Index OPTIONAL
  6845. )
  6846. /*++
  6847. Routine Description:
  6848. This routine is called to look up a cached run of a certain length. We
  6849. give caller any run of the given length or longer if possible. If there
  6850. is no such entry, we will use a shorter entry if allowed.
  6851. Arguments:
  6852. CachedRuns - Pointer to the cached run structure.
  6853. Length - Length of the run we are interested in.
  6854. AllowShorter - whether to accept a shorter length run if nothing else is available
  6855. Lcn - We try to find the run which is closest to this Lcn, but has the
  6856. requested Length.
  6857. StartingLcn - Address to store the starting Lcn of the run we found.
  6858. RunLength - Address to store the length of the run we found.
  6859. Index - If specified then this is the index in the RunLength array
  6860. of the entry we found. It can be used later to remove the entry.
  6861. Return Value:
  6862. BOOLEAN - TRUE if an entry was found, FALSE otherwise.
  6863. --*/
  6864. {
  6865. USHORT FoundIndex;
  6866. PNTFS_LCN_CLUSTER_RUN ThisEntry;
  6867. PNTFS_DELETED_RUNS DelWindow;
  6868. BOOLEAN FoundRun;
  6869. PAGED_CODE();
  6870. DebugTrace( +1, Dbg, ("NtfsLookupCachedLcnByLength\n") );
  6871. #ifdef NTFS_CHECK_CACHED_RUNS
  6872. ASSERT( (CachedRuns->Vcb == NULL) ||
  6873. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  6874. #endif
  6875. //
  6876. // Position ourselves for a run of a particular length.
  6877. //
  6878. FoundRun = NtfsPositionCachedLcnByLength( CachedRuns,
  6879. Length,
  6880. &Lcn,
  6881. NULL,
  6882. TRUE,
  6883. &FoundIndex );
  6884. if (!FoundRun) {
  6885. //
  6886. // We didn't find a run with the desired length. However if
  6887. // we aren't pointing past the end of the array then there
  6888. // is an entry available we can use.
  6889. //
  6890. if (FoundIndex < CachedRuns->Used) {
  6891. FoundRun = TRUE;
  6892. } else if (AllowShorter && (CachedRuns->Used > 0)) {
  6893. //
  6894. // There are no larger entries, but there might be smaller
  6895. // ones and the caller has indicated we can use them. The
  6896. // entry at the end of the list should be the largest
  6897. // available.
  6898. //
  6899. FoundIndex = CachedRuns->Used - 1;
  6900. FoundRun = TRUE;
  6901. }
  6902. //
  6903. // Check and see if there is a suitable element at or near this index.
  6904. //
  6905. if (FoundRun) {
  6906. //
  6907. // The entry has been deleted. Get the window of deleted
  6908. // entries that covers it and see if there is a usable entry on either side.
  6909. //
  6910. if (CachedRuns->LengthArray[ FoundIndex ] == NTFS_CACHED_RUNS_DEL_INDEX) {
  6911. DelWindow = NtfsGetDelWindow( CachedRuns,
  6912. FoundIndex,
  6913. FoundIndex,
  6914. FALSE,
  6915. NULL);
  6916. ASSERT( DelWindow );
  6917. ASSERT( DelWindow->StartIndex <= FoundIndex );
  6918. ASSERT( DelWindow->EndIndex >= FoundIndex );
  6919. //
  6920. // Use the entry just before the start of this window
  6921. // of deleted entries if one exists.
  6922. //
  6923. if (DelWindow->StartIndex > 0) {
  6924. FoundIndex = DelWindow->StartIndex - 1;
  6925. //
  6926. // All of the entries are deleted.
  6927. //
  6928. } else {
  6929. FoundRun = FALSE;
  6930. }
  6931. //
  6932. // If we aren't considering a shorter element then this should be a longer one.
  6933. //
  6934. } else {
  6935. ASSERT( AllowShorter ||
  6936. (CachedRuns->LcnArray[ CachedRuns->LengthArray[ FoundIndex ]].RunLength >= Length) );
  6937. }
  6938. }
  6939. }
  6940. //
  6941. // If we have a run then return the run information.
  6942. //
  6943. if (FoundRun) {
  6944. ThisEntry = CachedRuns->LcnArray + CachedRuns->LengthArray[ FoundIndex ];
  6945. ASSERT( ThisEntry->RunLength != 0 );
  6946. *StartingLcn = ThisEntry->Lcn;
  6947. *RunLength = ThisEntry->RunLength;
  6948. if (ARGUMENT_PRESENT( Index )) {
  6949. *Index = FoundIndex;
  6950. }
  6951. }
  6952. #ifdef NTFS_CHECK_CACHED_RUNS
  6953. if (NtfsDoVerifyCachedRuns) {
  6954. NtfsVerifyCachedRuns( CachedRuns, FALSE, FALSE );
  6955. }
  6956. #endif
  6957. DebugTrace( -1, Dbg, ("NtfsLookupCachedLcnByLength -> %01x\n", FoundRun) );
  6958. return FoundRun;
  6959. }
  6960. //
  6961. // Local support routine
  6962. //
  6963. VOID
  6964. NtfsAddDelWindow (
  6965. IN PNTFS_CACHED_RUNS CachedRuns,
  6966. IN USHORT FirstIndex,
  6967. IN USHORT LastIndex,
  6968. IN BOOLEAN LcnList
  6969. )
  6970. /*++
  6971. Routine Description:
  6972. This routine is called to add the given range of indices to a window
  6973. of entries known to be deleted. If the entries are adjacent to an
  6974. existing window, that window is extended. Otherwise a new window is
  6975. allocated. If there is no space in the array to add a new window, the
  6976. list is compacted. Therefore, callers should be aware that indices may
  6977. change across this call. However we do guarantee that the indices in
  6978. [FirstIndex..LastIndex] will not move.
  6979. It is assumed that no window already includes the given index range.
  6980. Arguments:
  6981. CachedRuns - Pointer to the cached runs structure.
  6982. FirstIndex - Index that marks the start of the range of deleted entries.
  6983. LastIndex - The index of the last entry in the range of deleted entries.
  6984. LcnList - If TRUE, the indices are from the Lcn-sorted list.
  6985. If FALSE, the indices are from the length-sorted list.
  6986. Return Value:
  6987. None.
  6988. --*/
  6989. {
  6990. USHORT WindowIndex;
  6991. PUSHORT Count;
  6992. PNTFS_DELETED_RUNS FirstWindow;
  6993. PNTFS_DELETED_RUNS DelWindow;
  6994. PNTFS_DELETED_RUNS NextWindow;
  6995. PAGED_CODE();
  6996. DebugTrace( +1, Dbg, ("NtfsAddDelWindow\n") );
  6997. #ifdef NTFS_CHECK_CACHED_RUNS
  6998. ASSERT( (CachedRuns->Vcb == NULL) ||
  6999. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  7000. #endif
  7001. //
  7002. // Get pointers to the windows we will be updating.
  7003. //
  7004. if (LcnList) {
  7005. Count = &CachedRuns->DelLcnCount;
  7006. FirstWindow = CachedRuns->DeletedLcnWindows;
  7007. } else {
  7008. Count = &CachedRuns->DelLengthCount;
  7009. FirstWindow = CachedRuns->DeletedLengthWindows;
  7010. }
  7011. ASSERT( *Count <= NTFS_CACHED_RUNS_MAX_DEL_WINDOWS );
  7012. while (TRUE) {
  7013. DebugTrace( 0, Dbg, ("*Count=%04x, FirstIndex=%04x, LastIndex=%04x\n", *Count, FirstIndex, LastIndex) );
  7014. if (*Count != 0) {
  7015. //
  7016. // Get the window of deleted entries that is closest to the range
  7017. // of indices we are adding.
  7018. //
  7019. DelWindow = NtfsGetDelWindow(CachedRuns,
  7020. FirstIndex,
  7021. LastIndex,
  7022. LcnList,
  7023. &WindowIndex );
  7024. ASSERT( DelWindow != NULL );
  7025. ASSERT( (DelWindow->EndIndex < FirstIndex) || (DelWindow->StartIndex > LastIndex) );
  7026. DebugTrace( 0, Dbg, ("WindowIndex=%04x, StartIndex=%04x, EndIndex=%04x\n",
  7027. WindowIndex, DelWindow->StartIndex, DelWindow->EndIndex) );
  7028. //
  7029. // Check if our range extends this window.
  7030. //
  7031. if (DelWindow->EndIndex == (FirstIndex - 1)) {
  7032. //
  7033. // Extend this window upwards.
  7034. //
  7035. DebugTrace( 0, Dbg, ("Extend window up from %04x to %04x\n",
  7036. DelWindow->EndIndex, LastIndex) );
  7037. DelWindow->EndIndex = LastIndex;
  7038. //
  7039. // If not the last window then check if we ajoin the following window.
  7040. //
  7041. if (WindowIndex < (*Count - 1) ) {
  7042. NextWindow = DelWindow + 1;
  7043. ASSERT( NextWindow->StartIndex > LastIndex );
  7044. if (NextWindow->StartIndex == (LastIndex + 1) ) {
  7045. //
  7046. // Combine these two windows.
  7047. //
  7048. DebugTrace( 0, Dbg, ("Combine with next window up to %04x\n",
  7049. NextWindow->EndIndex) );
  7050. DelWindow->EndIndex = NextWindow->EndIndex;
  7051. //
  7052. // Delete the additional window.
  7053. //
  7054. NtfsDeleteDelWindow( CachedRuns,
  7055. LcnList,
  7056. WindowIndex + 1 );
  7057. }
  7058. }
  7059. break;
  7060. //
  7061. // Check if we extend this window downwards.
  7062. //
  7063. } else if (DelWindow->StartIndex == (LastIndex + 1)) {
  7064. DebugTrace( 0, Dbg, ("Extend window down from %04x to %04x\n",
  7065. DelWindow->EndIndex, FirstIndex) );
  7066. DelWindow->StartIndex = FirstIndex;
  7067. //
  7068. // Check if we join the previous window if present.
  7069. //
  7070. if (WindowIndex > 0) {
  7071. NextWindow = DelWindow - 1;
  7072. ASSERT( NextWindow->EndIndex < FirstIndex );
  7073. if (NextWindow->EndIndex == (FirstIndex - 1) ) {
  7074. //
  7075. // Combine these two windows.
  7076. //
  7077. DebugTrace( 0,
  7078. Dbg,
  7079. ("Combine with prev window up to %04x\n", NextWindow->StartIndex) );
  7080. NextWindow->EndIndex = DelWindow->EndIndex;
  7081. //
  7082. // Delete the unused window.
  7083. //
  7084. NtfsDeleteDelWindow( CachedRuns,
  7085. LcnList,
  7086. WindowIndex );
  7087. }
  7088. }
  7089. break;
  7090. //
  7091. // Add a new window after the window we found.
  7092. //
  7093. } else if (DelWindow->EndIndex < FirstIndex) {
  7094. //
  7095. // Insert the new window after WindowIndex.
  7096. //
  7097. DebugTrace( 0, Dbg, ("New window at %04x + 1\n", WindowIndex) );
  7098. WindowIndex += 1;
  7099. } else {
  7100. //
  7101. // Insert the new window at WindowIndex.
  7102. //
  7103. DebugTrace( 0, Dbg, ("New window at %04x\n", WindowIndex) );
  7104. }
  7105. } else {
  7106. //
  7107. // Just create a new window at index 0.
  7108. //
  7109. DebugTrace( 0, Dbg, ("First new window\n") );
  7110. WindowIndex = 0;
  7111. }
  7112. //
  7113. // If we reach this point then we need to make a new window. We have the position
  7114. // we want to put the window.
  7115. //
  7116. // If we don't have an available run then compact two of the existing runs.
  7117. //
  7118. if (*Count == NTFS_CACHED_RUNS_MAX_DEL_WINDOWS) {
  7119. DebugTrace( 0, Dbg, ("Compact\n") );
  7120. NtfsCompactCachedRuns( CachedRuns,
  7121. FirstIndex,
  7122. LastIndex,
  7123. LcnList );
  7124. ASSERT( *Count < NTFS_CACHED_RUNS_MAX_DEL_WINDOWS );
  7125. //
  7126. // Retry the loop to find the correct window position.
  7127. //
  7128. continue;
  7129. }
  7130. //
  7131. // Position ourselves at the insert point.
  7132. //
  7133. DelWindow = FirstWindow + WindowIndex;
  7134. //
  7135. // Right copy the windows to make a space if we aren't at the end.
  7136. //
  7137. if (WindowIndex < *Count) {
  7138. DebugTrace( 0, Dbg, ("Copy up window indices from %04x, %04x entries\n",
  7139. WindowIndex,
  7140. *Count - WindowIndex) );
  7141. RtlMoveMemory( DelWindow + 1,
  7142. DelWindow,
  7143. sizeof( NTFS_DELETED_RUNS ) * (*Count - WindowIndex) );
  7144. }
  7145. //
  7146. // Put the new information in DelWindow
  7147. //
  7148. DelWindow->StartIndex = FirstIndex;
  7149. DelWindow->EndIndex = LastIndex;
  7150. //
  7151. // Increment the windows count.
  7152. //
  7153. *Count += 1;
  7154. break;
  7155. }
  7156. ASSERT( (CachedRuns->DelLcnCount > 0) || !LcnList );
  7157. ASSERT( (CachedRuns->DelLengthCount > 0) || LcnList );
  7158. #ifdef NTFS_CHECK_CACHED_RUNS
  7159. if (NtfsDoVerifyCachedRuns) {
  7160. //
  7161. // Make certain that the windows are in order and don't overlap.
  7162. //
  7163. for (WindowIndex = 0, DelWindow = NextWindow = FirstWindow;
  7164. WindowIndex < *Count;
  7165. WindowIndex += 1, NextWindow += 1) {
  7166. ASSERT( NextWindow->StartIndex <= NextWindow->EndIndex );
  7167. if (NextWindow != DelWindow) {
  7168. ASSERT( NextWindow->StartIndex > (DelWindow->EndIndex + 1) );
  7169. DelWindow += 1;
  7170. }
  7171. }
  7172. }
  7173. #endif
  7174. DebugTrace( -1, Dbg, ("NtfsAddDelWindow -> VOID\n") );
  7175. return;
  7176. }
  7177. //
  7178. // Local support routine
  7179. //
  7180. VOID
  7181. NtfsShrinkDelWindow (
  7182. IN PNTFS_CACHED_RUNS CachedRuns,
  7183. IN BOOLEAN ShrinkFromStart,
  7184. IN BOOLEAN LcnWindow,
  7185. IN USHORT WindowIndex
  7186. )
  7187. /*++
  7188. Routine Description:
  7189. This routine is called to remove one entry from the given window
  7190. of entries known to be deleted.
  7191. Arguments:
  7192. CachedRuns - Pointer to the cached runs structure.
  7193. ShrinkFromStart - If TRUE, remove the first entry in the window.
  7194. If FALSE, remove the last entry in the window.
  7195. LcnWindow - If TRUE, the window is of Lcn indices. If FALSE, the window is
  7196. of length indices.
  7197. WindowIndex - The index of the window.
  7198. Return Value:
  7199. None.
  7200. --*/
  7201. {
  7202. PUSHORT Count;
  7203. PNTFS_DELETED_RUNS DelWindow;
  7204. PAGED_CODE();
  7205. DebugTrace( +1, Dbg, ("NtfsShrinkDelWindow\n") );
  7206. DebugTrace( 0, Dbg, ("WindowIndex %04x\n", WindowIndex) );
  7207. #ifdef NTFS_CHECK_CACHED_RUNS
  7208. ASSERT( (CachedRuns->Vcb == NULL) ||
  7209. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  7210. #endif
  7211. if (LcnWindow) {
  7212. Count = &CachedRuns->DelLcnCount;
  7213. DelWindow = (CachedRuns->DeletedLcnWindows + WindowIndex);
  7214. } else {
  7215. Count = &CachedRuns->DelLengthCount;
  7216. DelWindow = (CachedRuns->DeletedLengthWindows + WindowIndex);
  7217. }
  7218. //
  7219. // Caller better give us something in the correct range.
  7220. //
  7221. ASSERT( WindowIndex < *Count );
  7222. //
  7223. // If the window has a single entry then remove it.
  7224. //
  7225. if (DelWindow->StartIndex == DelWindow->EndIndex) {
  7226. NtfsDeleteDelWindow( CachedRuns,
  7227. LcnWindow,
  7228. WindowIndex );
  7229. //
  7230. // Remove the first entry if desired.
  7231. //
  7232. } else if (ShrinkFromStart) {
  7233. DelWindow->StartIndex += 1;
  7234. //
  7235. // Otherwise the last entry.
  7236. //
  7237. } else {
  7238. DelWindow->EndIndex -= 1;
  7239. }
  7240. #ifdef NTFS_CHECK_CACHED_RUNS
  7241. if (NtfsDoVerifyCachedRuns) {
  7242. PNTFS_DELETED_RUNS FirstWindow;
  7243. PNTFS_DELETED_RUNS NextWindow;
  7244. USHORT Index;
  7245. //
  7246. // Make certain that the windows are in order and don't overlap.
  7247. //
  7248. if (LcnWindow) {
  7249. FirstWindow = CachedRuns->DeletedLcnWindows;
  7250. } else {
  7251. FirstWindow = CachedRuns->DeletedLengthWindows;
  7252. }
  7253. for (Index = 0, DelWindow = NextWindow = FirstWindow;
  7254. Index < *Count;
  7255. Index += 1, NextWindow += 1) {
  7256. ASSERT( NextWindow->StartIndex <= NextWindow->EndIndex );
  7257. if (NextWindow != DelWindow) {
  7258. ASSERT( NextWindow->StartIndex > (DelWindow->EndIndex + 1) );
  7259. DelWindow += 1;
  7260. }
  7261. }
  7262. }
  7263. #endif
  7264. DebugTrace( -1, Dbg, ("NtfsShrinkDelWindow -> VOID\n") );
  7265. return;
  7266. }
  7267. //
  7268. // Local support routine
  7269. //
  7270. VOID
  7271. NtfsDeleteDelWindow (
  7272. IN PNTFS_CACHED_RUNS CachedRuns,
  7273. IN BOOLEAN LcnWindow,
  7274. IN USHORT WindowIndex
  7275. )
  7276. /*++
  7277. Routine Description:
  7278. This routine is called to remove the given window of entries known to
  7279. be deleted.
  7280. Arguments:
  7281. CachedRuns - Pointer to the cached runs structure.
  7282. LcnWindow - If TRUE, the window is of Lcn indices. If FALSE, the window is of length indices.
  7283. WindowIndex - The index of the window.
  7284. Return Value:
  7285. None.
  7286. --*/
  7287. {
  7288. PUSHORT Count;
  7289. PNTFS_DELETED_RUNS FirstWindow;
  7290. PAGED_CODE();
  7291. DebugTrace( +1, Dbg, ("NtfsDeleteDelWindow\n") );
  7292. DebugTrace( 0, Dbg, ("WindowIndex %04x\n", WindowIndex) );
  7293. #ifdef NTFS_CHECK_CACHED_RUNS
  7294. ASSERT( (CachedRuns->Vcb == NULL) ||
  7295. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  7296. #endif
  7297. //
  7298. // Use the correct deleted window array.
  7299. //
  7300. if (LcnWindow) {
  7301. Count = &CachedRuns->DelLcnCount;
  7302. FirstWindow = CachedRuns->DeletedLcnWindows;
  7303. } else {
  7304. Count = &CachedRuns->DelLengthCount;
  7305. FirstWindow = CachedRuns->DeletedLengthWindows;
  7306. }
  7307. //
  7308. // Delete this window by shifting any existing windows from the right.
  7309. //
  7310. if (WindowIndex < (*Count - 1)) {
  7311. //
  7312. // Remove the deleted window.
  7313. //
  7314. DebugTrace( 0,
  7315. Dbg,
  7316. ("Move from WindowIndex %04x, %04x entries\n", WindowIndex + 1, *Count - 1 - WindowIndex) );
  7317. RtlMoveMemory( FirstWindow + WindowIndex,
  7318. FirstWindow + WindowIndex + 1,
  7319. sizeof( NTFS_DELETED_RUNS ) * (*Count - 1 - WindowIndex) );
  7320. }
  7321. //
  7322. // Decrement the windows count.
  7323. //
  7324. *Count -= 1;
  7325. #ifdef NTFS_CHECK_CACHED_RUNS
  7326. if (NtfsDoVerifyCachedRuns) {
  7327. PNTFS_DELETED_RUNS DelWindow;
  7328. PNTFS_DELETED_RUNS NextWindow;
  7329. //
  7330. // Make certain that the windows are in order and don't overlap.
  7331. //
  7332. for (WindowIndex = 0, DelWindow = NextWindow = FirstWindow;
  7333. WindowIndex < *Count;
  7334. WindowIndex += 1, NextWindow += 1) {
  7335. ASSERT( NextWindow->StartIndex <= NextWindow->EndIndex );
  7336. //
  7337. // Check against previous window if not at the first element. We don't allow
  7338. // adjacent windows to touch because they should have merged.
  7339. //
  7340. if (NextWindow != DelWindow) {
  7341. ASSERT( NextWindow->StartIndex > (DelWindow->EndIndex + 1) );
  7342. DelWindow += 1;
  7343. }
  7344. }
  7345. }
  7346. #endif
  7347. DebugTrace( -1, Dbg, ("NtfsDeleteDelWindow -> VOID\n") );
  7348. return;
  7349. }
  7350. //
  7351. // Local support routine
  7352. //
  7353. PNTFS_DELETED_RUNS
  7354. NtfsGetDelWindow (
  7355. IN PNTFS_CACHED_RUNS CachedRuns,
  7356. IN USHORT FirstIndex,
  7357. IN USHORT LastIndex,
  7358. IN BOOLEAN LcnList,
  7359. OUT PUSHORT WindowIndex OPTIONAL
  7360. )
  7361. /*++
  7362. Routine Description:
  7363. This routine is called to find the window of entries known to be deleted
  7364. that is closest to the given range of indices.
  7365. Arguments:
  7366. CachedRuns - Pointer to the cached runs structure.
  7367. FirstIndex - Index that marks the start of the range.
  7368. LastIndex - The index of the last entry in the range.
  7369. LcnList - If TRUE, the indices are from the Lcn-sorted list.
  7370. If FALSE, the indices are from the length-sorted list.
  7371. WindowIndex - If specified, the index of the returned window is put here.
  7372. Return Value:
  7373. PNTFS_DELETED_RUNS - Returns the closest window of deleted entries, or
  7374. NULL is there are no windows.
  7375. --*/
  7376. {
  7377. USHORT Count;
  7378. USHORT Distance;
  7379. USHORT Max, Min, Current;
  7380. BOOLEAN Overlap = FALSE;
  7381. PNTFS_DELETED_RUNS FirstWindow, NextWindow;
  7382. PNTFS_DELETED_RUNS DelWindow = NULL;
  7383. PAGED_CODE();
  7384. DebugTrace( +1, Dbg, ("NtfsGetDelWindow\n") );
  7385. #ifdef NTFS_CHECK_CACHED_RUNS
  7386. ASSERT( (CachedRuns->Vcb == NULL) ||
  7387. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  7388. #endif
  7389. //
  7390. // Get pointers to the windows we will be searching.
  7391. //
  7392. if (LcnList) {
  7393. Count = CachedRuns->DelLcnCount;
  7394. FirstWindow = CachedRuns->DeletedLcnWindows;
  7395. } else {
  7396. Count = CachedRuns->DelLengthCount;
  7397. FirstWindow = CachedRuns->DeletedLengthWindows;
  7398. }
  7399. if (Count != 0) {
  7400. //
  7401. // Perform a binary search to find the next element to the right.
  7402. // We always do at least one comparison to determine if a single element
  7403. // is to the left or right.
  7404. //
  7405. Min = 0;
  7406. Max = Count - 1;
  7407. while (TRUE) {
  7408. Current = (USHORT) (((ULONG) Max + Min) / 2);
  7409. NextWindow = FirstWindow + Current;
  7410. if (LastIndex < NextWindow->StartIndex) {
  7411. //
  7412. // We are done if Max and Min match. We test before changing Max
  7413. // because if Min is still 0 then we've never looked at it.
  7414. //
  7415. if (Min == Max) {
  7416. break;
  7417. }
  7418. Max = Current;
  7419. } else if (LastIndex > NextWindow->EndIndex) {
  7420. //
  7421. // Advance Min past this point.
  7422. //
  7423. Min = Current + 1;
  7424. //
  7425. // Break if past Max. This should only occur if our range is
  7426. // past the last window.
  7427. //
  7428. if (Min > Max) {
  7429. ASSERT( Min == Count );
  7430. break;
  7431. }
  7432. } else {
  7433. //
  7434. // Simple case. This is an overlap.
  7435. //
  7436. Overlap = TRUE;
  7437. Min = Current;
  7438. break;
  7439. }
  7440. }
  7441. //
  7442. // Now find nearest. First check if we are beyond the end of the array.
  7443. //
  7444. if (Min == Count) {
  7445. Min = Count - 1;
  7446. //
  7447. // If we aren't at the first entry and didn't already detect an overlap then
  7448. // compare adjacent entries.
  7449. //
  7450. } else if ((Min != 0) && !Overlap) {
  7451. DelWindow = FirstWindow + Min - 1;
  7452. NextWindow = DelWindow + 1;
  7453. //
  7454. // Test that there is no overlap with the previous
  7455. // window. If no overlap then check for the distance to
  7456. // the adjacent ranges.
  7457. //
  7458. if (FirstIndex > DelWindow->EndIndex) {
  7459. ASSERT( NextWindow->StartIndex > LastIndex );
  7460. Distance = NextWindow->StartIndex - LastIndex;
  7461. if (Distance > FirstIndex - DelWindow->EndIndex) {
  7462. //
  7463. // Move to the previous window.
  7464. //
  7465. Min -= 1;
  7466. }
  7467. //
  7468. // The previous window has an overlap.
  7469. //
  7470. } else {
  7471. Min -= 1;
  7472. }
  7473. }
  7474. if (ARGUMENT_PRESENT( WindowIndex )) {
  7475. *WindowIndex = Min;
  7476. }
  7477. DelWindow = FirstWindow + Min;
  7478. DebugTrace( 0, Dbg, ("Index -> %04x\n", Min) );
  7479. }
  7480. DebugTrace( -1, Dbg, ("NtfsGetDelWindow -> 0x%x\n", DelWindow) );
  7481. return DelWindow;
  7482. }
  7483. //
  7484. // Local support routine
  7485. //
  7486. USHORT
  7487. NtfsGetCachedLengthInsertionPoint (
  7488. IN PNTFS_CACHED_RUNS CachedRuns,
  7489. IN LCN Lcn,
  7490. IN LONGLONG Length
  7491. )
  7492. /*++
  7493. Routine Description:
  7494. This routine is called to add a new entry in the Lcn-sorted and
  7495. length-sorted lists. It is assumed that the caller has made certain
  7496. that this new entry will not overlap any existing entries.
  7497. This routine may chose not to add the new entry to the lists. If adding
  7498. this entry would force an equally or more desirable run out of the cache,
  7499. we don't make the change.
  7500. This routine can compact the lists. Therefore, the caller should not
  7501. assume that entries will not move.
  7502. If this routine finds an insertion point and there is already an undeleted
  7503. at that position, the new run sorts before it. If the new run sorts
  7504. higher than the entry at index CachedRuns->Avail - 1, we will return an
  7505. index of CachedRuns->Avail. The caller must check for this case and
  7506. not access an entry beyond the list size.
  7507. Arguments:
  7508. CachedRuns - Pointer to the cached runs structure.
  7509. Lcn - Lcn to insert.
  7510. Length - Length of the run to insert.
  7511. Return Value:
  7512. USHORT - The index into the length-sorted table at which the given Length
  7513. should be inserted. If the entry should not be inserted,
  7514. NTFS_CACHED_RUNS_DEL_INDEX is returned.
  7515. --*/
  7516. {
  7517. BOOLEAN FoundRun;
  7518. USHORT Index;
  7519. LONGLONG RunLength;
  7520. PNTFS_LCN_CLUSTER_RUN ThisEntry;
  7521. PAGED_CODE();
  7522. DebugTrace( +1, Dbg, ("NtfsGetCachedLengthInsertionPoint\n") );
  7523. #ifdef NTFS_CHECK_CACHED_RUNS
  7524. ASSERT( (CachedRuns->Vcb == NULL) ||
  7525. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  7526. #endif
  7527. if ((CachedRuns->Used == CachedRuns->Avail) &&
  7528. (CachedRuns->DelLengthCount == 0) ) {
  7529. //
  7530. // Grow the lists.
  7531. //
  7532. if (!NtfsGrowCachedRuns( CachedRuns )) {
  7533. //
  7534. // We couldn't grow the lists.
  7535. //
  7536. if (CachedRuns->Used == 0) {
  7537. ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES );
  7538. }
  7539. //
  7540. // Adding this entry will force another one to be deleted.
  7541. // Make sure the new entry is more desirable to add than
  7542. // all existing entries.
  7543. //
  7544. // The check is to make certain that we have more than enough
  7545. // entries of a size smaller than Length such that we would
  7546. // be willing to delete one.
  7547. //
  7548. RunLength = 0;
  7549. for (Index = 0;
  7550. (Index < CachedRuns->Bins) && (Index < (Length - 1) );
  7551. Index += 1) {
  7552. if (CachedRuns->BinArray[ Index ] > CachedRuns->MinCount) {
  7553. //
  7554. // We should delete an entry with RunLength = Index + 1
  7555. //
  7556. RunLength = Index + 1;
  7557. break;
  7558. }
  7559. }
  7560. if (RunLength != 0) {
  7561. //
  7562. // Find an entry of this length.
  7563. //
  7564. FoundRun = NtfsPositionCachedLcnByLength( CachedRuns,
  7565. RunLength,
  7566. NULL,
  7567. NULL,
  7568. TRUE,
  7569. &Index );
  7570. ASSERT( FoundRun );
  7571. ASSERT( (CachedRuns->LengthArray[Index] != NTFS_CACHED_RUNS_DEL_INDEX) &&
  7572. (CachedRuns->LcnArray[CachedRuns->LengthArray[Index]].RunLength != 0) );
  7573. //
  7574. // Delete the entry.
  7575. //
  7576. ThisEntry = CachedRuns->LcnArray + CachedRuns->LengthArray[ Index ];
  7577. NtfsDeleteCachedRun( CachedRuns,
  7578. CachedRuns->LengthArray[ Index ],
  7579. Index );
  7580. } else {
  7581. //
  7582. // Do not add the new entry.
  7583. //
  7584. DebugTrace( -1,
  7585. Dbg,
  7586. ("NtfsGetCachedLengthInsertionPoint -> 0x%x\n", NTFS_CACHED_RUNS_DEL_INDEX) );
  7587. return NTFS_CACHED_RUNS_DEL_INDEX;
  7588. }
  7589. }
  7590. }
  7591. //
  7592. // Get the insertion point for the new entry.
  7593. // If FoundRun is FALSE, the entry pointed to by Index is either deleted
  7594. // or sorts higher than the new one.
  7595. //
  7596. FoundRun = NtfsPositionCachedLcnByLength( CachedRuns,
  7597. Length,
  7598. &Lcn,
  7599. NULL,
  7600. TRUE,
  7601. &Index );
  7602. //
  7603. // Index points to the closest run by Lcn that has a RunLength equal
  7604. // to Length. We need to check to see if the new entry should be
  7605. // inserted before or after it.
  7606. //
  7607. if (FoundRun) {
  7608. ThisEntry = CachedRuns->LcnArray + CachedRuns->LengthArray[ Index ];
  7609. if (ThisEntry->Lcn < Lcn) {
  7610. //
  7611. // The new run should come after this one.
  7612. //
  7613. Index += 1;
  7614. }
  7615. }
  7616. DebugTrace( -1, Dbg, ("NtfsGetCachedLengthInsertionPoint -> 0x%x\n", Index) );
  7617. return Index;
  7618. }
  7619. //
  7620. // Local support routine
  7621. //
  7622. VOID
  7623. NtfsInsertCachedRun (
  7624. IN PNTFS_CACHED_RUNS CachedRuns,
  7625. IN LCN Lcn,
  7626. IN LONGLONG Length,
  7627. IN USHORT LcnIndex
  7628. )
  7629. /*++
  7630. Routine Description:
  7631. This routine is called to add a new entry in the Lcn-sorted and
  7632. length-sorted lists. It is assumed that the caller has made certain
  7633. that this new entry will not overlap any existing entries.
  7634. This routine may chose not to add the new entry to the lists. If adding
  7635. this entry would force an equally or more desirable run out of the cache,
  7636. we don't make the change.
  7637. This routine can compact the lists. Therefore, the caller should not
  7638. assume that entries will not move.
  7639. Arguments:
  7640. CachedRuns - Pointer to the cached runs structure.
  7641. Lcn - Lcn to insert.
  7642. Length - Length of the run to insert.
  7643. LcnIndex - Index into the Lcn-sorted list where this new entry should
  7644. be added. Any non-deleted entry already at this position sorts
  7645. after the new entry.
  7646. Return Value:
  7647. None.
  7648. --*/
  7649. {
  7650. USHORT Count;
  7651. USHORT RunIndex;
  7652. USHORT LenIndex;
  7653. USHORT WindowIndex;
  7654. PNTFS_DELETED_RUNS DelWindow;
  7655. PNTFS_LCN_CLUSTER_RUN ThisEntry;
  7656. PAGED_CODE();
  7657. DebugTrace( +1, Dbg, ("NtfsInsertCachedRun\n") );
  7658. #ifdef NTFS_CHECK_CACHED_RUNS
  7659. ASSERT( (CachedRuns->Vcb == NULL) ||
  7660. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  7661. #endif
  7662. //
  7663. // Find the position in the length-sorted list to insert this new
  7664. // entry. This routine will grow the lists if necessary.
  7665. //
  7666. LenIndex = NtfsGetCachedLengthInsertionPoint( CachedRuns,
  7667. Lcn,
  7668. Length );
  7669. //
  7670. // This entry will not be added to the lists because it would degrade the
  7671. // distribution of the length entries.
  7672. //
  7673. if (LenIndex == NTFS_CACHED_RUNS_DEL_INDEX) {
  7674. return;
  7675. }
  7676. //
  7677. // Find the closest window of deleted entries to LcnIndex.
  7678. //
  7679. DelWindow = NtfsGetDelWindow( CachedRuns,
  7680. LcnIndex,
  7681. LcnIndex,
  7682. TRUE,
  7683. &WindowIndex );
  7684. ASSERT( DelWindow != NULL );
  7685. ASSERT( (DelWindow->EndIndex + 1 < LcnIndex) ||
  7686. (LcnIndex < CachedRuns->Avail) );
  7687. //
  7688. // Move the entries between LcnIndex and the start of the
  7689. // window up to make room for this new entry.
  7690. //
  7691. if (DelWindow->StartIndex > LcnIndex) {
  7692. RtlMoveMemory( CachedRuns->LcnArray + LcnIndex + 1,
  7693. CachedRuns->LcnArray + LcnIndex,
  7694. sizeof( NTFS_LCN_CLUSTER_RUN ) * (DelWindow->StartIndex - LcnIndex) );
  7695. //
  7696. // Update the indices in the Length-sorted list to reflect the
  7697. // move of the lcn-sorted entries.
  7698. //
  7699. for (Count = LcnIndex + 1;
  7700. Count < DelWindow->StartIndex + 1;
  7701. Count += 1) {
  7702. RunIndex = CachedRuns->LcnArray[ Count ].LengthIndex;
  7703. ASSERT( RunIndex != NTFS_CACHED_RUNS_DEL_INDEX );
  7704. CachedRuns->LengthArray[ RunIndex ] += 1;
  7705. }
  7706. //
  7707. // Check if we are using the deleted window at the tail of the array. If
  7708. // so then we just increased the number of entries in use with this
  7709. // right shift.
  7710. //
  7711. if (DelWindow->StartIndex == CachedRuns->Used) {
  7712. CachedRuns->LengthArray[ CachedRuns->Used ] = NTFS_CACHED_RUNS_DEL_INDEX;
  7713. CachedRuns->Used += 1;
  7714. }
  7715. //
  7716. // Update the window.
  7717. //
  7718. NtfsShrinkDelWindow( CachedRuns,
  7719. TRUE,
  7720. TRUE,
  7721. WindowIndex);
  7722. //
  7723. // Check if we need to move entries down to the nearest deleted window.
  7724. //
  7725. } else if ((DelWindow->EndIndex + 1) < LcnIndex) {
  7726. //
  7727. // Update the insertion point to be LcnIndex - 1 and make a gap there
  7728. //
  7729. LcnIndex -= 1;
  7730. //
  7731. // Move the entries between the end of the window and
  7732. // LcnIndex down to make room for this new entry.
  7733. //
  7734. RtlMoveMemory( CachedRuns->LcnArray + DelWindow->EndIndex,
  7735. CachedRuns->LcnArray + DelWindow->EndIndex + 1,
  7736. sizeof( NTFS_LCN_CLUSTER_RUN ) * (LcnIndex - DelWindow->EndIndex) );
  7737. //
  7738. // Update the indices in the Length-sorted list to reflect the
  7739. // move of the lcn-sorted entries.
  7740. //
  7741. for (Count = DelWindow->EndIndex;
  7742. Count < LcnIndex;
  7743. Count += 1) {
  7744. RunIndex = CachedRuns->LcnArray[ Count ].LengthIndex;
  7745. ASSERT( RunIndex != NTFS_CACHED_RUNS_DEL_INDEX );
  7746. CachedRuns->LengthArray[ RunIndex ] -= 1;
  7747. }
  7748. //
  7749. // Update the window.
  7750. //
  7751. NtfsShrinkDelWindow( CachedRuns,
  7752. FALSE,
  7753. TRUE,
  7754. WindowIndex);
  7755. //
  7756. // The window is adjacent to LcnIndex and the entry at LcnIndex
  7757. // sorts higher than the new run. No moves are necessary.
  7758. // Decrement LcnIndex.
  7759. //
  7760. } else if ((DelWindow->EndIndex + 1) == LcnIndex) {
  7761. LcnIndex -= 1;
  7762. //
  7763. // Update the window.
  7764. //
  7765. NtfsShrinkDelWindow( CachedRuns,
  7766. FALSE,
  7767. TRUE,
  7768. WindowIndex);
  7769. } else {
  7770. //
  7771. // The window covers LcnIndex. No moves are necessary.
  7772. //
  7773. if (DelWindow->StartIndex == LcnIndex) {
  7774. //
  7775. // Update the window.
  7776. //
  7777. NtfsShrinkDelWindow( CachedRuns,
  7778. TRUE,
  7779. TRUE,
  7780. WindowIndex);
  7781. } else if (DelWindow->EndIndex == LcnIndex) {
  7782. //
  7783. // Update the window.
  7784. //
  7785. NtfsShrinkDelWindow( CachedRuns,
  7786. FALSE,
  7787. TRUE,
  7788. WindowIndex);
  7789. } else {
  7790. //
  7791. // LcnIndex does not fall on the first or last entry in
  7792. // the window, we will update it to do so. Otherwise we
  7793. // would have to split the window, with no real gain.
  7794. //
  7795. LcnIndex = DelWindow->EndIndex;
  7796. //
  7797. // Update the window.
  7798. //
  7799. NtfsShrinkDelWindow( CachedRuns,
  7800. FALSE,
  7801. TRUE,
  7802. WindowIndex);
  7803. }
  7804. }
  7805. ASSERT( LcnIndex < CachedRuns->Avail );
  7806. ASSERT( LcnIndex <= CachedRuns->Used );
  7807. //
  7808. // Find the closest window of deleted entries to LenIndex.
  7809. //
  7810. DelWindow = NtfsGetDelWindow( CachedRuns,
  7811. LenIndex,
  7812. LenIndex,
  7813. FALSE,
  7814. &WindowIndex);
  7815. ASSERT( DelWindow != NULL );
  7816. ASSERT( (DelWindow->EndIndex < (LenIndex - 1)) ||
  7817. (LenIndex < CachedRuns->Avail) );
  7818. //
  7819. // The window is to the right. Go ahead and
  7820. // move the entries between LenIndex and the start of the
  7821. // window up to make room for this new entry.
  7822. //
  7823. if (DelWindow->StartIndex > LenIndex) {
  7824. RtlMoveMemory( CachedRuns->LengthArray + LenIndex + 1,
  7825. CachedRuns->LengthArray + LenIndex,
  7826. sizeof( USHORT ) * (DelWindow->StartIndex - LenIndex) );
  7827. //
  7828. // Update the indices in the Lcn-sorted list to reflect the
  7829. // move of the length-sorted entries.
  7830. //
  7831. for (Count = LenIndex + 1;
  7832. Count < DelWindow->StartIndex + 1;
  7833. Count += 1) {
  7834. RunIndex = CachedRuns->LengthArray[ Count ];
  7835. ASSERT( RunIndex != NTFS_CACHED_RUNS_DEL_INDEX );
  7836. CachedRuns->LcnArray[ RunIndex ].LengthIndex += 1;
  7837. }
  7838. //
  7839. // We have just increased the number of entries in use with this
  7840. // right shift.
  7841. //
  7842. if (DelWindow->StartIndex == CachedRuns->Used) {
  7843. CachedRuns->LcnArray[ CachedRuns->Used ].RunLength = 0;
  7844. CachedRuns->LcnArray[ CachedRuns->Used ].LengthIndex = NTFS_CACHED_RUNS_DEL_INDEX;
  7845. CachedRuns->Used += 1;
  7846. }
  7847. //
  7848. // Update the window.
  7849. //
  7850. NtfsShrinkDelWindow( CachedRuns,
  7851. TRUE,
  7852. FALSE,
  7853. WindowIndex);
  7854. //
  7855. // The deleted window is to the left. Slide everything to the left and
  7856. // Update the insertion point to be LenIndex - 1 and make a gap there.
  7857. //
  7858. } else if ((DelWindow->EndIndex + 1) < LenIndex) {
  7859. LenIndex -= 1;
  7860. //
  7861. // Move the entries between the end of the window and
  7862. // LenIndex down to make room for this new entry.
  7863. //
  7864. RtlMoveMemory( CachedRuns->LengthArray + DelWindow->EndIndex,
  7865. CachedRuns->LengthArray + DelWindow->EndIndex + 1,
  7866. sizeof( USHORT ) * (LenIndex - DelWindow->EndIndex) );
  7867. //
  7868. // Update the indices in the Lcn-sorted list to reflect the
  7869. // move of the length-sorted entries.
  7870. //
  7871. for (Count = DelWindow->EndIndex;
  7872. Count < LenIndex;
  7873. Count += 1) {
  7874. RunIndex = CachedRuns->LengthArray[ Count ];
  7875. ASSERT( RunIndex != NTFS_CACHED_RUNS_DEL_INDEX );
  7876. CachedRuns->LcnArray[ RunIndex ].LengthIndex -= 1;
  7877. }
  7878. //
  7879. // Update the window.
  7880. //
  7881. NtfsShrinkDelWindow( CachedRuns,
  7882. FALSE,
  7883. FALSE,
  7884. WindowIndex);
  7885. //
  7886. // The window is adjacent to LenIndex and the entry at LenIndex
  7887. // sorts higher than the new run. No moves are necessary.
  7888. // Decrement LenIndex.
  7889. //
  7890. } else if ((DelWindow->EndIndex + 1) == LenIndex) {
  7891. LenIndex -= 1;
  7892. //
  7893. // Update the window.
  7894. //
  7895. NtfsShrinkDelWindow( CachedRuns,
  7896. FALSE,
  7897. FALSE,
  7898. WindowIndex);
  7899. //
  7900. // The window covers LenIndex. No moves are necessary.
  7901. //
  7902. } else {
  7903. if (DelWindow->StartIndex == LenIndex) {
  7904. //
  7905. // Update the window.
  7906. //
  7907. NtfsShrinkDelWindow( CachedRuns,
  7908. TRUE,
  7909. FALSE,
  7910. WindowIndex);
  7911. } else if (DelWindow->EndIndex == LenIndex) {
  7912. //
  7913. // Update the window.
  7914. //
  7915. NtfsShrinkDelWindow( CachedRuns,
  7916. FALSE,
  7917. FALSE,
  7918. WindowIndex);
  7919. } else {
  7920. //
  7921. // LenIndex does not fall on the first or last entry in
  7922. // the window, we will update it to do so. Otherwise we
  7923. // would have to split the window, with no real gain.
  7924. //
  7925. LenIndex = DelWindow->EndIndex;
  7926. //
  7927. // Update the window.
  7928. //
  7929. NtfsShrinkDelWindow( CachedRuns,
  7930. FALSE,
  7931. FALSE,
  7932. WindowIndex);
  7933. }
  7934. }
  7935. ASSERT( LenIndex < CachedRuns->Avail );
  7936. ASSERT( LenIndex <= CachedRuns->Used );
  7937. //
  7938. // Insert the new entry at LcnIndex, LenIndex
  7939. //
  7940. ThisEntry = CachedRuns->LcnArray + LcnIndex;
  7941. ThisEntry->Lcn = Lcn;
  7942. ThisEntry->RunLength = Length;
  7943. ThisEntry->LengthIndex = LenIndex;
  7944. CachedRuns->LengthArray[ LenIndex ] = LcnIndex;
  7945. //
  7946. // Update the count of entries of this size.
  7947. //
  7948. if (Length <= CachedRuns->Bins) {
  7949. CachedRuns->BinArray[ Length - 1 ] += 1;
  7950. }
  7951. //
  7952. // Check if we've grown the number of entries used.
  7953. //
  7954. if (LcnIndex == CachedRuns->Used) {
  7955. //
  7956. // Increase the count of the number of entries in use.
  7957. //
  7958. ASSERT( (CachedRuns->LengthArray[ CachedRuns->Used ] == NTFS_CACHED_RUNS_DEL_INDEX) ||
  7959. (LenIndex == CachedRuns->Used) );
  7960. CachedRuns->Used += 1;
  7961. }
  7962. if (LenIndex == CachedRuns->Used) {
  7963. //
  7964. // Increase the count of the number of entries in use.
  7965. //
  7966. ASSERT( (CachedRuns->LcnArray[ CachedRuns->Used ].RunLength == 0) &&
  7967. (CachedRuns->LcnArray[ CachedRuns->Used ].LengthIndex == NTFS_CACHED_RUNS_DEL_INDEX) );
  7968. CachedRuns->Used += 1;
  7969. }
  7970. #ifdef NTFS_CHECK_CACHED_RUNS
  7971. if (NtfsDoVerifyCachedRuns) {
  7972. NtfsVerifyCachedRuns( CachedRuns, FALSE, FALSE );
  7973. }
  7974. #endif
  7975. DebugTrace( -1, Dbg, ("NtfsInsertCachedRun -> VOID\n") );
  7976. return;
  7977. }
  7978. //
  7979. // Local support routine
  7980. //
  7981. VOID
  7982. NtfsDeleteCachedRun (
  7983. IN PNTFS_CACHED_RUNS CachedRuns,
  7984. IN USHORT LcnIndex,
  7985. IN USHORT LenIndex
  7986. )
  7987. /*++
  7988. Routine Description:
  7989. This routine is called to delete an Lcn run from the cached run arrays.
  7990. It is possible that the lists will be compacted. This will happen if
  7991. we use the last window of deleted entries that we are allowed to cache
  7992. for either the Lcn-sorted or Length-sorted lists. Therefore, callers
  7993. should be aware that indices may change across this call. However we do
  7994. guarantee that the indices LcnIndex and LenIndex will not move.
  7995. Arguments:
  7996. CachedRuns - Pointer to the cached runs structure.
  7997. LcnIndex - The index in the Lcn-sorted list of the entry to be deleted.
  7998. LenIndex - The index in the Length-sorted list of the entry to be deleted.
  7999. Return Value:
  8000. None.
  8001. --*/
  8002. {
  8003. PAGED_CODE();
  8004. DebugTrace( +1, Dbg, ("NtfsDeleteCachedRun\n") );
  8005. #ifdef NTFS_CHECK_CACHED_RUNS
  8006. ASSERT( (CachedRuns->Vcb == NULL) ||
  8007. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  8008. #endif
  8009. ASSERT( LcnIndex != NTFS_CACHED_RUNS_DEL_INDEX );
  8010. ASSERT( LenIndex != NTFS_CACHED_RUNS_DEL_INDEX );
  8011. //
  8012. // Update count of how many entries have this length.
  8013. //
  8014. if (CachedRuns->LcnArray[ LcnIndex ].RunLength <= CachedRuns->Bins) {
  8015. CachedRuns->BinArray[CachedRuns->LcnArray[LcnIndex].RunLength - 1] -= 1;
  8016. }
  8017. //
  8018. // Update the entries so they appear to be deleted.
  8019. //
  8020. CachedRuns->LcnArray[ LcnIndex ].RunLength = 0;
  8021. CachedRuns->LcnArray[ LcnIndex ].LengthIndex = NTFS_CACHED_RUNS_DEL_INDEX;
  8022. CachedRuns->LengthArray[ LenIndex ] = NTFS_CACHED_RUNS_DEL_INDEX;
  8023. //
  8024. // Create windows of deleted entries to cover this newly deleted
  8025. // entry.
  8026. //
  8027. NtfsAddDelWindow( CachedRuns, LcnIndex, LcnIndex, TRUE );
  8028. NtfsAddDelWindow( CachedRuns, LenIndex, LenIndex, FALSE );
  8029. #ifdef NTFS_CHECK_CACHED_RUNS
  8030. //
  8031. // We will not check sort orders in NtfsVerifyCachedRuns because we
  8032. // could be making this call as part of deleting runs that have an
  8033. // overlap with a newly inserted run. This could give false corruption
  8034. // warnings.
  8035. //
  8036. if (NtfsDoVerifyCachedRuns) {
  8037. NtfsVerifyCachedRuns( CachedRuns, TRUE, TRUE );
  8038. }
  8039. #endif
  8040. DebugTrace( -1, Dbg, ("NtfsDeleteCachedRun -> VOID\n") );
  8041. return;
  8042. }
  8043. //
  8044. // Local support routine
  8045. //
  8046. VOID
  8047. NtfsInsertCachedLcn (
  8048. IN PNTFS_CACHED_RUNS CachedRuns,
  8049. IN LCN Lcn,
  8050. IN LONGLONG Length
  8051. )
  8052. /*++
  8053. Routine Description:
  8054. This routine is called to insert an Lcn run into the cached run arrays.
  8055. Arguments:
  8056. CachedRuns - Pointer to the cached runs structure.
  8057. Lcn - Lcn to insert.
  8058. Length - Length of the run to insert.
  8059. Return Value:
  8060. None
  8061. --*/
  8062. {
  8063. USHORT NextIndex;
  8064. USHORT ThisIndex;
  8065. LCN StartingLcn;
  8066. LCN SaveLcn;
  8067. LONGLONG RunLength;
  8068. LONGLONG OldLength = 0;
  8069. LCN EndOfNewRun;
  8070. LCN EndOfThisRun;
  8071. LCN EndOfNextRun;
  8072. BOOLEAN ExtendedEntry = FALSE;
  8073. BOOLEAN ScanForOverlap = FALSE;
  8074. PNTFS_DELETED_RUNS DelWindow;
  8075. PNTFS_LCN_CLUSTER_RUN ThisEntry, NextEntry;
  8076. PAGED_CODE();
  8077. DebugTrace( +1, Dbg, ("NtfsInsertCachedLcn\n") );
  8078. #ifdef NTFS_CHECK_CACHED_RUNS
  8079. ASSERT( (CachedRuns->Vcb == NULL) ||
  8080. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  8081. #endif
  8082. //
  8083. // Return immediately if length is zero.
  8084. //
  8085. if (Length == 0) {
  8086. DebugTrace( -1, Dbg, ("NtfsInsertCachedLcn -> VOID\n") );
  8087. return;
  8088. }
  8089. //
  8090. // Lookup the Lcn at the start of our run.
  8091. //
  8092. NtfsLookupCachedLcn( CachedRuns,
  8093. Lcn,
  8094. &StartingLcn,
  8095. &RunLength,
  8096. &NextIndex );
  8097. //
  8098. // We have a run to insert. We need to deal with the following cases.
  8099. // Our strategy is to position ThisEntry at the position we want to store
  8100. // the resulting run. Then remove any subsequent runs we overlap with, possibly
  8101. // extending the run we are working with.
  8102. //
  8103. //
  8104. // 1 - We can merge with the prior run. Save that position
  8105. // and remove any following slots we overlap with.
  8106. //
  8107. // 2 - We are beyond the array. Simply store our run in
  8108. // this slot.
  8109. //
  8110. // 3 - We don't overlap with the current run. Simply slide
  8111. // the runs up and insert a new entry.
  8112. //
  8113. // 4 - We are contained within the current run. There is nothing
  8114. // we need to do.
  8115. //
  8116. // 5 - We overlap with the current run. Use that slot
  8117. // and remove any following slots we overlap with.
  8118. //
  8119. NextEntry = CachedRuns->LcnArray + NextIndex;
  8120. //
  8121. // Find a previous entry if present.
  8122. //
  8123. ThisIndex = NTFS_CACHED_RUNS_DEL_INDEX;
  8124. if (NextIndex > 0) {
  8125. ThisIndex = NextIndex - 1;
  8126. ThisEntry = CachedRuns->LcnArray + ThisIndex;
  8127. //
  8128. // If the entry has been deleted it must be ignored. Get the
  8129. // window of deleted entries that covers it.
  8130. //
  8131. if (ThisEntry->RunLength == 0) {
  8132. DelWindow = NtfsGetDelWindow( CachedRuns,
  8133. ThisIndex,
  8134. ThisIndex,
  8135. TRUE,
  8136. NULL);
  8137. ASSERT( DelWindow != NULL );
  8138. ASSERT( DelWindow->EndIndex >= ThisIndex );
  8139. ASSERT( DelWindow->StartIndex <= ThisIndex );
  8140. //
  8141. // Move to the entry just before the deleted window.
  8142. //
  8143. if (DelWindow->StartIndex > 0) {
  8144. ThisIndex = DelWindow->StartIndex - 1;
  8145. ThisEntry = CachedRuns->LcnArray + ThisIndex;
  8146. } else {
  8147. //
  8148. // All entries preceding NextEntry are deleted.
  8149. //
  8150. ThisIndex = NTFS_CACHED_RUNS_DEL_INDEX;
  8151. }
  8152. }
  8153. //
  8154. // Capture the end of the run. It's invalid if we don't have
  8155. // a real index but all of the users of this will know that.
  8156. //
  8157. EndOfThisRun = ThisEntry->Lcn + ThisEntry->RunLength;
  8158. }
  8159. //
  8160. // Let's remember the end of the next run if present.
  8161. //
  8162. EndOfNewRun = Lcn + Length;
  8163. if ((NextIndex < CachedRuns->Used) &&
  8164. (NextEntry->RunLength != 0)) {
  8165. EndOfNextRun = NextEntry->Lcn + NextEntry->RunLength;
  8166. }
  8167. //
  8168. // Case 1 - Merge with previous run.
  8169. //
  8170. if ((ThisIndex != NTFS_CACHED_RUNS_DEL_INDEX) &&
  8171. (Lcn == EndOfThisRun)) {
  8172. //
  8173. // Extend the entry in the runs array and remember the
  8174. // new length. We will defer moving the run within the
  8175. // length-sorted array until we know the final length.
  8176. // It is possible that the combined entry overlaps with
  8177. // subsequent entries. If the overlap lands in the middle
  8178. // of the final entry, the length may need to be extended
  8179. // even more.
  8180. //
  8181. Lcn = ThisEntry->Lcn;
  8182. //
  8183. // Remember the original length of the entry.
  8184. //
  8185. OldLength = ThisEntry->RunLength;
  8186. Length += ThisEntry->RunLength;
  8187. ThisEntry->RunLength = Length;
  8188. ExtendedEntry = TRUE;
  8189. ScanForOverlap = TRUE;
  8190. //
  8191. // Case 2 - We are at the end of the array
  8192. // Case 3 - We have a non-overlapping interior entry
  8193. //
  8194. } else if ((NextIndex >= CachedRuns->Used) ||
  8195. (NextEntry->RunLength == 0) ||
  8196. (EndOfNewRun < NextEntry->Lcn)) {
  8197. //
  8198. // Insert the new run in both lists.
  8199. //
  8200. NtfsInsertCachedRun( CachedRuns,
  8201. Lcn,
  8202. Length,
  8203. NextIndex );
  8204. //
  8205. // Case 4 - We are contained within the current entry.
  8206. //
  8207. } else if ((Lcn >= NextEntry->Lcn) &&
  8208. (EndOfNewRun <= EndOfNextRun)) {
  8209. NOTHING;
  8210. //
  8211. // Case 5 - We overlap the next entry. Extend the run to the end of
  8212. // current run if we end early. Extend to the beginning of the
  8213. // run if we need to.
  8214. //
  8215. } else {
  8216. //
  8217. // Remember if we are extending the run backwards.
  8218. //
  8219. if (Lcn < NextEntry->Lcn) {
  8220. //
  8221. // Move the starting point back.
  8222. //
  8223. NextEntry->Lcn = Lcn;
  8224. ExtendedEntry = TRUE;
  8225. OldLength = NextEntry->RunLength;
  8226. }
  8227. //
  8228. // Remember if we go past the end of this run.
  8229. //
  8230. if (EndOfNewRun > EndOfNextRun) {
  8231. ExtendedEntry = TRUE;
  8232. ScanForOverlap = TRUE;
  8233. OldLength = NextEntry->RunLength;
  8234. Length = EndOfNewRun - NextEntry->Lcn;
  8235. //
  8236. // Remember the full new length of this run.
  8237. //
  8238. } else {
  8239. Length = EndOfNextRun - NextEntry->Lcn;
  8240. }
  8241. //
  8242. // Update the entry and position ThisEntry to point to it.
  8243. //
  8244. NextEntry->RunLength = Length;
  8245. ThisEntry = NextEntry;
  8246. ThisIndex = NextIndex;
  8247. }
  8248. //
  8249. // Walk forward to see if we need to join with other entires.
  8250. //
  8251. if (ScanForOverlap) {
  8252. NextIndex = ThisIndex + 1;
  8253. EndOfNewRun = ThisEntry->Lcn + ThisEntry->RunLength;
  8254. while (NextIndex < CachedRuns->Used) {
  8255. NextEntry = CachedRuns->LcnArray + NextIndex;
  8256. //
  8257. // The entry has been deleted and must be ignored. Get the
  8258. // window of deleted entries that covers it.
  8259. //
  8260. if (NextEntry->RunLength == 0) {
  8261. DelWindow = NtfsGetDelWindow( CachedRuns,
  8262. NextIndex,
  8263. NextIndex,
  8264. TRUE,
  8265. NULL );
  8266. ASSERT( DelWindow );
  8267. ASSERT( DelWindow->EndIndex >= NextIndex );
  8268. ASSERT( DelWindow->StartIndex <= NextIndex );
  8269. NextIndex = DelWindow->EndIndex + 1;
  8270. continue;
  8271. }
  8272. //
  8273. // Exit if there is no overlap.
  8274. //
  8275. if (EndOfNewRun < NextEntry->Lcn) {
  8276. break;
  8277. }
  8278. //
  8279. // The runs overlap.
  8280. //
  8281. EndOfNextRun = NextEntry->Lcn + NextEntry->RunLength;
  8282. if (EndOfNewRun < EndOfNextRun) {
  8283. //
  8284. // Extend the new run.
  8285. //
  8286. ThisEntry->RunLength = EndOfNextRun - ThisEntry->Lcn;
  8287. ExtendedEntry = TRUE;
  8288. EndOfNewRun = EndOfNextRun;
  8289. }
  8290. //
  8291. // Delete the run. This can cause compaction to be run and
  8292. // that will require us to have to recalculate ThisIndex.
  8293. //
  8294. SaveLcn = ThisEntry->Lcn;
  8295. NtfsDeleteCachedRun( CachedRuns,
  8296. NextIndex,
  8297. NextEntry->LengthIndex );
  8298. //
  8299. // Check if we should recompute ThisIndex because ThisEntry must have moved
  8300. // during compaction.
  8301. //
  8302. if ((ThisEntry->Lcn != SaveLcn) ||
  8303. (ThisEntry->RunLength == 0) ) {
  8304. NtfsLookupCachedLcn( CachedRuns,
  8305. Lcn,
  8306. &StartingLcn,
  8307. &RunLength,
  8308. &ThisIndex );
  8309. ThisEntry = CachedRuns->LcnArray + ThisIndex;
  8310. //
  8311. // Reset NextIndex to point to the end after ThisIndex. That
  8312. // value may have moved due to compaction.
  8313. //
  8314. NextIndex = ThisIndex + 1;
  8315. }
  8316. if (EndOfNewRun == EndOfNextRun) {
  8317. break;
  8318. }
  8319. }
  8320. }
  8321. //
  8322. // If we changed the existing entry then update the length bins.
  8323. //
  8324. if (ExtendedEntry) {
  8325. NtfsModifyCachedBinArray( CachedRuns,
  8326. OldLength,
  8327. ThisEntry->RunLength );
  8328. //
  8329. // Move the entry to the correct position in the length-sorted array
  8330. //
  8331. NtfsGrowLengthInCachedLcn( CachedRuns,
  8332. ThisEntry,
  8333. ThisIndex );
  8334. }
  8335. #ifdef NTFS_CHECK_CACHED_RUNS
  8336. if (NtfsDoVerifyCachedRuns) {
  8337. NtfsVerifyCachedRuns( CachedRuns, FALSE, FALSE );
  8338. }
  8339. #endif
  8340. DebugTrace( -1, Dbg, ("NtfsInsertCachedLcn -> VOID\n") );
  8341. return;
  8342. }
  8343. //
  8344. // Local support routine
  8345. //
  8346. VOID
  8347. NtfsGrowLengthInCachedLcn (
  8348. IN PNTFS_CACHED_RUNS CachedRuns,
  8349. IN PNTFS_LCN_CLUSTER_RUN ThisEntry,
  8350. IN USHORT LcnIndex
  8351. )
  8352. /*++
  8353. Routine Description:
  8354. This routine is called when a run's length has been increased. This
  8355. routine makes the necessary changes to the length-sorted list.
  8356. Arguments:
  8357. CachedRuns - Pointer to the cached runs structure.
  8358. ThisEntry - Entry whose size is being changed.
  8359. LcnIndex - The index in the Lcn-sorted array of this entry.
  8360. Return Value:
  8361. None
  8362. --*/
  8363. {
  8364. BOOLEAN FoundRun;
  8365. USHORT Index;
  8366. USHORT Count;
  8367. USHORT RunIndex;
  8368. USHORT WindowIndex;
  8369. USHORT FirstWindowIndex;
  8370. PNTFS_LCN_CLUSTER_RUN OldEntry;
  8371. PNTFS_DELETED_RUNS DelWindow;
  8372. PAGED_CODE();
  8373. DebugTrace( +1, Dbg, ("NtfsGrowLengthInCachedLcn\n") );
  8374. DebugTrace( 0, Dbg, ("ThisEntry = %08lx\n", ThisEntry) );
  8375. DebugTrace( 0, Dbg, ("LcnIndex = %04x\n", LcnIndex) );
  8376. DebugTrace( 0, Dbg, ("LengthIndex = %04x\n", ThisEntry->LengthIndex) );
  8377. #ifdef NTFS_CHECK_CACHED_RUNS
  8378. ASSERT( (CachedRuns->Vcb == NULL) ||
  8379. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  8380. #endif
  8381. //
  8382. // Find the new insertion point.
  8383. //
  8384. //
  8385. // Find the nearest non-deleted entry with
  8386. // index > ThisEntry->LengthIndex.
  8387. //
  8388. if (ThisEntry->LengthIndex < (CachedRuns->Used - 1) ) {
  8389. RunIndex = ThisEntry->LengthIndex + 1;
  8390. if (CachedRuns->LengthArray[ RunIndex ] == NTFS_CACHED_RUNS_DEL_INDEX) {
  8391. //
  8392. // The entry has been deleted and must be ignored. Get the
  8393. // window of deleted entries that covers it.
  8394. //
  8395. DelWindow = NtfsGetDelWindow( CachedRuns,
  8396. RunIndex,
  8397. RunIndex,
  8398. FALSE,
  8399. NULL);
  8400. ASSERT( DelWindow != NULL );
  8401. ASSERT( DelWindow->EndIndex >= RunIndex );
  8402. ASSERT( DelWindow->StartIndex <= RunIndex );
  8403. //
  8404. // Set RunIndex to the entry just after this deleted
  8405. // window.
  8406. //
  8407. if (DelWindow->EndIndex < (CachedRuns->Used - 1)) {
  8408. RunIndex = DelWindow->EndIndex + 1;
  8409. //
  8410. // Nothing to do. The entry is still the largest in the
  8411. // list.
  8412. //
  8413. } else {
  8414. RunIndex = NTFS_CACHED_RUNS_DEL_INDEX;
  8415. }
  8416. }
  8417. //
  8418. // Nothing to do. The entry is still the largest in the list.
  8419. //
  8420. } else {
  8421. RunIndex = NTFS_CACHED_RUNS_DEL_INDEX;
  8422. }
  8423. //
  8424. // If the run is possible out of position then compare our length with the following length.
  8425. //
  8426. if (RunIndex != NTFS_CACHED_RUNS_DEL_INDEX) {
  8427. OldEntry = CachedRuns->LcnArray + CachedRuns->LengthArray[ RunIndex ];
  8428. //
  8429. // The entry will move in the list. We need to search for
  8430. // the insertion position in the
  8431. // range [RunIndex..CachedRuns->Used].
  8432. //
  8433. if ((OldEntry->RunLength < ThisEntry->RunLength) ||
  8434. ((OldEntry->RunLength == ThisEntry->RunLength) &&
  8435. (OldEntry->Lcn < ThisEntry->Lcn)) ) {
  8436. //
  8437. // Get the insertion point for the new entry.
  8438. //
  8439. FoundRun = NtfsPositionCachedLcnByLength( CachedRuns,
  8440. ThisEntry->RunLength,
  8441. &ThisEntry->Lcn,
  8442. &RunIndex,
  8443. TRUE,
  8444. &Index );
  8445. //
  8446. // Index points to the closest run by Lcn that has a RunLength
  8447. // equal to Length. We need to check to see if the new entry
  8448. // should be inserted before or after it.
  8449. //
  8450. if (FoundRun) {
  8451. OldEntry = CachedRuns->LcnArray + CachedRuns->LengthArray[ Index ];
  8452. ASSERT( OldEntry->RunLength == ThisEntry->RunLength );
  8453. //
  8454. // The new run should come before this one.
  8455. //
  8456. if (OldEntry->Lcn > ThisEntry->Lcn) {
  8457. //
  8458. // We need to adjust Index downwards.
  8459. //
  8460. Index -= 1;
  8461. }
  8462. } else {
  8463. //
  8464. // The entry pointed to by Index is either deleted or sorts
  8465. // higher than the new one. Move the insertion point back one
  8466. // position.
  8467. //
  8468. Index -= 1;
  8469. }
  8470. //
  8471. // At this point, Index indicates the new position for the entry.
  8472. // Any entry currently at Index sorts lower.
  8473. //
  8474. ASSERT( Index > ThisEntry->LengthIndex );
  8475. if (CachedRuns->LengthArray[ Index ] == NTFS_CACHED_RUNS_DEL_INDEX) {
  8476. //
  8477. // Advance Index to before the start of this window of deleted
  8478. // entries.
  8479. //
  8480. DelWindow = NtfsGetDelWindow( CachedRuns,
  8481. Index,
  8482. Index,
  8483. FALSE,
  8484. NULL);
  8485. ASSERT( DelWindow );
  8486. ASSERT( DelWindow->StartIndex <= Index );
  8487. ASSERT( DelWindow->EndIndex >= Index );
  8488. Index = DelWindow->StartIndex - 1;
  8489. }
  8490. ASSERT( Index > ThisEntry->LengthIndex );
  8491. //
  8492. // Move the entries between ThisEntry->LengthIndex + 1 and Index
  8493. // to the left.
  8494. //
  8495. RtlMoveMemory( CachedRuns->LengthArray + ThisEntry->LengthIndex,
  8496. CachedRuns->LengthArray + ThisEntry->LengthIndex + 1,
  8497. sizeof( USHORT ) * (Index - ThisEntry->LengthIndex) );
  8498. //
  8499. // Update the indices in the Lcn-sorted list to reflect
  8500. // the move of the length-sorted entries.
  8501. //
  8502. for (Count = ThisEntry->LengthIndex, DelWindow = NULL;
  8503. Count < Index;
  8504. Count += 1) {
  8505. RunIndex = CachedRuns->LengthArray[ Count ];
  8506. //
  8507. // Update the Lcn array if the length entry isn't deleted.
  8508. //
  8509. if (RunIndex != NTFS_CACHED_RUNS_DEL_INDEX) {
  8510. CachedRuns->LcnArray[ RunIndex ].LengthIndex = Count;
  8511. } else {
  8512. //
  8513. // Update the window of deleted entries.
  8514. //
  8515. if (DelWindow != NULL) {
  8516. //
  8517. // The window we want must follow the last one we
  8518. // found.
  8519. //
  8520. DelWindow += 1;
  8521. WindowIndex += 1;
  8522. ASSERT( WindowIndex < CachedRuns->DelLengthCount );
  8523. } else {
  8524. //
  8525. // Lookup the window containing the entry. Remember
  8526. // to look for Count + 1 because the window we are
  8527. // seaching for has not yet been updated.
  8528. //
  8529. DelWindow = NtfsGetDelWindow( CachedRuns,
  8530. Count + 1,
  8531. Count + 1,
  8532. FALSE,
  8533. &WindowIndex);
  8534. ASSERT( DelWindow != NULL );
  8535. FirstWindowIndex = WindowIndex;
  8536. }
  8537. ASSERT( DelWindow->StartIndex == (Count + 1) );
  8538. ASSERT( DelWindow->EndIndex < Index );
  8539. //
  8540. // Update the window.
  8541. //
  8542. DelWindow->StartIndex -= 1;
  8543. DelWindow->EndIndex -= 1;
  8544. //
  8545. // Advance Count past window.
  8546. //
  8547. Count = DelWindow->EndIndex;
  8548. }
  8549. }
  8550. //
  8551. // We may have moved the first window to the left such that
  8552. // it should be merged with the preceding window.
  8553. //
  8554. if ((DelWindow != NULL) && (FirstWindowIndex > 0) ) {
  8555. PNTFS_DELETED_RUNS PrevWindow;
  8556. DelWindow = CachedRuns->DeletedLengthWindows + FirstWindowIndex;
  8557. PrevWindow = DelWindow - 1;
  8558. if (PrevWindow->EndIndex == (DelWindow->StartIndex - 1) ) {
  8559. //
  8560. // We need to merge these windows.
  8561. //
  8562. PrevWindow->EndIndex = DelWindow->EndIndex;
  8563. NtfsDeleteDelWindow( CachedRuns,
  8564. FALSE,
  8565. FirstWindowIndex);
  8566. }
  8567. }
  8568. //
  8569. // Update the entries corresponding to ThisEntry;
  8570. //
  8571. CachedRuns->LengthArray[ Index ] = LcnIndex;
  8572. ThisEntry->LengthIndex = Index;
  8573. }
  8574. }
  8575. DebugTrace( 0, Dbg, ("Final LengthIndex = %04x\n", ThisEntry->LengthIndex) );
  8576. #ifdef NTFS_CHECK_CACHED_RUNS
  8577. if (NtfsDoVerifyCachedRuns) {
  8578. NtfsVerifyCachedRuns( CachedRuns, TRUE, TRUE );
  8579. }
  8580. #endif
  8581. DebugTrace( -1, Dbg, ("NtfsGrowLengthInCachedLcn -> VOID\n") );
  8582. return;
  8583. }
  8584. //
  8585. // Local support routine
  8586. //
  8587. VOID
  8588. NtfsShrinkLengthInCachedLcn (
  8589. IN PNTFS_CACHED_RUNS CachedRuns,
  8590. IN PNTFS_LCN_CLUSTER_RUN ThisEntry,
  8591. IN USHORT LcnIndex
  8592. )
  8593. /*++
  8594. Routine Description:
  8595. This routine is called when a run's length has been reduced. This routine
  8596. makes the necessary changes to the length-sorted list.
  8597. Arguments:
  8598. CachedRuns - Pointer to the cached runs structure.
  8599. ThisEntry - Entry whose size is being changed.
  8600. LcnIndex - The index in the Lcn-sorted array of this entry.
  8601. Return Value:
  8602. None
  8603. --*/
  8604. {
  8605. BOOLEAN FoundRun;
  8606. USHORT Index;
  8607. USHORT WindowIndex;
  8608. USHORT Count;
  8609. USHORT RunIndex;
  8610. PNTFS_LCN_CLUSTER_RUN OldEntry;
  8611. PNTFS_DELETED_RUNS DelWindow;
  8612. PAGED_CODE();
  8613. DebugTrace( +1, Dbg, ("NtfsShrinkLengthInCachedLcn\n") );
  8614. DebugTrace( 0, Dbg, ("ThisEntry = %08lx\n", ThisEntry) );
  8615. DebugTrace( 0, Dbg, ("LcnIndex = %04x\n", LcnIndex) );
  8616. DebugTrace( 0, Dbg, ("LengthIndex = %04x\n", ThisEntry->LengthIndex) );
  8617. #ifdef NTFS_CHECK_CACHED_RUNS
  8618. ASSERT( (CachedRuns->Vcb == NULL) ||
  8619. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  8620. #endif
  8621. //
  8622. // Find the nearest non-deleted entry with
  8623. // index < ThisEntry->LengthIndex.
  8624. //
  8625. if (ThisEntry->LengthIndex > 0) {
  8626. RunIndex = ThisEntry->LengthIndex - 1;
  8627. if (CachedRuns->LengthArray[ RunIndex ] == NTFS_CACHED_RUNS_DEL_INDEX) {
  8628. //
  8629. // The entry has been deleted and must be ignored. Get the
  8630. // window of deleted entries that covers it.
  8631. //
  8632. DelWindow = NtfsGetDelWindow( CachedRuns,
  8633. RunIndex,
  8634. RunIndex,
  8635. FALSE,
  8636. NULL);
  8637. ASSERT( DelWindow );
  8638. ASSERT( DelWindow->EndIndex >= RunIndex );
  8639. ASSERT( DelWindow->StartIndex <= RunIndex );
  8640. //
  8641. // Move ahead of this window if possible.
  8642. //
  8643. if (DelWindow->StartIndex > 0) {
  8644. RunIndex = DelWindow->StartIndex - 1;
  8645. //
  8646. // Nothing to do. The entry is still the smallest in the
  8647. // list.
  8648. //
  8649. } else {
  8650. RunIndex = NTFS_CACHED_RUNS_DEL_INDEX;
  8651. }
  8652. }
  8653. //
  8654. // Nothing to do. The entry is still the smallest in the list.
  8655. //
  8656. } else {
  8657. RunIndex = NTFS_CACHED_RUNS_DEL_INDEX;
  8658. }
  8659. //
  8660. // If the run is possible out of position then compare our length with the prior length.
  8661. //
  8662. if (RunIndex != NTFS_CACHED_RUNS_DEL_INDEX) {
  8663. OldEntry = CachedRuns->LcnArray + CachedRuns->LengthArray[ RunIndex ];
  8664. //
  8665. // Check for a conflict with the previous run.
  8666. //
  8667. if ((OldEntry->RunLength > ThisEntry->RunLength) ||
  8668. ((OldEntry->RunLength == ThisEntry->RunLength) &&
  8669. (OldEntry->Lcn > ThisEntry->Lcn)) ) {
  8670. //
  8671. // Get the insertion point for the new entry.
  8672. //
  8673. FoundRun = NtfsPositionCachedLcnByLength( CachedRuns,
  8674. ThisEntry->RunLength,
  8675. &ThisEntry->Lcn,
  8676. &RunIndex,
  8677. FALSE,
  8678. &Index );
  8679. //
  8680. // If found Index points to the closest run by Lcn that has a RunLength
  8681. // equal to Length. We need to check to see if the new entry
  8682. // should be inserted before or after it.
  8683. //
  8684. if (FoundRun) {
  8685. OldEntry = CachedRuns->LcnArray + CachedRuns->LengthArray[ Index ];
  8686. ASSERT( OldEntry->RunLength == ThisEntry->RunLength );
  8687. if (OldEntry->Lcn < ThisEntry->Lcn) {
  8688. //
  8689. // The new run should come after this one.
  8690. // We need to adjust Index upwards.
  8691. //
  8692. Index += 1;
  8693. DebugTrace( 0, Dbg, ("Increment Index to %04x\n", Index) );
  8694. }
  8695. }
  8696. //
  8697. // At this point, Index indicates the new position for the entry.
  8698. // Any entry currently at Index sorts higher.
  8699. //
  8700. ASSERT( Index < ThisEntry->LengthIndex );
  8701. //
  8702. // Advance Index past the end of this window of deleted
  8703. // entries.
  8704. //
  8705. if (CachedRuns->LengthArray[ Index ] == NTFS_CACHED_RUNS_DEL_INDEX) {
  8706. DelWindow = NtfsGetDelWindow( CachedRuns,
  8707. Index,
  8708. Index,
  8709. FALSE,
  8710. NULL);
  8711. ASSERT( DelWindow );
  8712. ASSERT( DelWindow->StartIndex <= Index );
  8713. ASSERT( DelWindow->EndIndex >= Index );
  8714. Index = DelWindow->EndIndex + 1;
  8715. ASSERT( Index < ThisEntry->LengthIndex );
  8716. }
  8717. // Move the entries between Index and ThisEntry->LengthIndex - 1
  8718. // to the right.
  8719. //
  8720. RtlMoveMemory( CachedRuns->LengthArray + Index + 1,
  8721. CachedRuns->LengthArray + Index,
  8722. sizeof( USHORT ) * (ThisEntry->LengthIndex - Index) );
  8723. //
  8724. // Update the indices in the Lcn-sorted list to reflect
  8725. // the move of the length-sorted entries.
  8726. //
  8727. for (Count = Index + 1, DelWindow = NULL;
  8728. Count <= ThisEntry->LengthIndex;
  8729. Count += 1) {
  8730. RunIndex = CachedRuns->LengthArray[ Count ];
  8731. //
  8732. // Update the Lcn array if the length entry isn't deleted.
  8733. //
  8734. if (RunIndex != NTFS_CACHED_RUNS_DEL_INDEX) {
  8735. CachedRuns->LcnArray[ RunIndex ].LengthIndex = Count;
  8736. } else {
  8737. //
  8738. // Update the window of deleted entries.
  8739. //
  8740. if (DelWindow != NULL) {
  8741. //
  8742. // The window we want must follow the last one we
  8743. // found.
  8744. //
  8745. DelWindow += 1;
  8746. WindowIndex += 1;
  8747. ASSERT( WindowIndex < CachedRuns->DelLengthCount );
  8748. //
  8749. // Lookup the window containing the entry. Remeber
  8750. // to look for Count - 1 because the window we are
  8751. // seaching for has not yet been updated.
  8752. //
  8753. } else {
  8754. DelWindow = NtfsGetDelWindow( CachedRuns,
  8755. Count - 1,
  8756. Count - 1,
  8757. FALSE,
  8758. &WindowIndex);
  8759. ASSERT( DelWindow != NULL );
  8760. }
  8761. ASSERT( DelWindow->StartIndex == (Count - 1) );
  8762. ASSERT( DelWindow->EndIndex < ThisEntry->LengthIndex );
  8763. //
  8764. // Update the window.
  8765. //
  8766. DelWindow->StartIndex += 1;
  8767. DelWindow->EndIndex += 1;
  8768. //
  8769. // Advance Count past window.
  8770. //
  8771. Count = DelWindow->EndIndex;
  8772. }
  8773. }
  8774. //
  8775. // We may have moved the last window to the right such that
  8776. // it should be merged with the following window.
  8777. //
  8778. if ((DelWindow != NULL) &&
  8779. ((WindowIndex + 1) < CachedRuns->DelLengthCount)) {
  8780. PNTFS_DELETED_RUNS NextWindow = DelWindow + 1;
  8781. if (DelWindow->EndIndex == (NextWindow->StartIndex - 1) ) {
  8782. //
  8783. // We need to merge these windows.
  8784. //
  8785. DelWindow->EndIndex = NextWindow->EndIndex;
  8786. NtfsDeleteDelWindow( CachedRuns,
  8787. FALSE,
  8788. WindowIndex + 1);
  8789. }
  8790. }
  8791. //
  8792. // Update the entries corresponding to ThisEntry;
  8793. //
  8794. CachedRuns->LengthArray[ Index ] = LcnIndex;
  8795. ThisEntry->LengthIndex = Index;
  8796. }
  8797. }
  8798. DebugTrace( 0, Dbg, ("Final LengthIndex = %04x\n", ThisEntry->LengthIndex) );
  8799. #ifdef NTFS_CHECK_CACHED_RUNS
  8800. if (NtfsDoVerifyCachedRuns) {
  8801. NtfsVerifyCachedRuns( CachedRuns, FALSE, FALSE );
  8802. }
  8803. #endif
  8804. DebugTrace( -1, Dbg, ("NtfsShrinkLengthInCachedLcn -> VOID\n") );
  8805. return;
  8806. }
  8807. //
  8808. // Local support routine
  8809. //
  8810. VOID
  8811. NtfsRemoveCachedLcn (
  8812. IN PNTFS_CACHED_RUNS CachedRuns,
  8813. IN LCN Lcn,
  8814. IN LONGLONG Length
  8815. )
  8816. /*++
  8817. Routine Description:
  8818. This routine is called to remove an entry from the cached run array. The run is not
  8819. guaranteed to be present.
  8820. Arguments:
  8821. CachedRuns - Pointer to the cached runs structure.
  8822. Lcn - Start of run to remove.
  8823. Length - Length of run to remove.
  8824. Return Value:
  8825. None
  8826. --*/
  8827. {
  8828. USHORT Index;
  8829. LCN StartingLcn;
  8830. LCN EndOfExistingRun;
  8831. LCN EndOfInputRun = Lcn + Length;
  8832. LONGLONG RunLength;
  8833. PNTFS_DELETED_RUNS DelWindow;
  8834. PNTFS_LCN_CLUSTER_RUN ThisEntry;
  8835. BOOLEAN FirstFragSmaller = FALSE;
  8836. BOOLEAN DontSplit = FALSE;
  8837. PAGED_CODE();
  8838. DebugTrace( +1, Dbg, ("NtfsRemoveCachedLcn\n") );
  8839. #ifdef NTFS_CHECK_CACHED_RUNS
  8840. ASSERT( (CachedRuns->Vcb == NULL) ||
  8841. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  8842. #endif
  8843. //
  8844. // Return immediately if length is zero.
  8845. //
  8846. if (Length == 0) {
  8847. DebugTrace( -1, Dbg, ("NtfsRemoveCachedLcn -> VOID\n") );
  8848. return;
  8849. }
  8850. //
  8851. // Lookup the run. If we don't find anything then point past the end
  8852. // of the array.
  8853. //
  8854. NtfsLookupCachedLcn( CachedRuns, Lcn, &StartingLcn, &RunLength, &Index );
  8855. //
  8856. // We have several cases to deal with.
  8857. //
  8858. // 1 - This run is past the end of array. Nothing to do.
  8859. // 2 - This run is not in the array. Nothing to do.
  8860. // 3 - This run starts past the beginning of a entry. Resize the entry.
  8861. // 4 - This run contains a complete array entry. Remove the entry.
  8862. // 5 - This run ends before the end of an entry. Resize the entry.
  8863. //
  8864. //
  8865. // Loop to process the case where we encounter several entries.
  8866. // Test for case 1 as the exit condition for the loop.
  8867. //
  8868. while (Index < CachedRuns->Used) {
  8869. ThisEntry = CachedRuns->LcnArray + Index;
  8870. //
  8871. // The entry has been deleted and must be ignored. Get the
  8872. // window of deleted entries that covers it.
  8873. //
  8874. if (ThisEntry->RunLength == 0) {
  8875. DelWindow = NtfsGetDelWindow( CachedRuns,
  8876. Index,
  8877. Index,
  8878. TRUE,
  8879. NULL);
  8880. ASSERT( DelWindow != NULL );
  8881. ASSERT( DelWindow->EndIndex >= Index );
  8882. ASSERT( DelWindow->StartIndex <= Index );
  8883. //
  8884. // Advance the index past the deleted entries.
  8885. //
  8886. Index = DelWindow->EndIndex + 1;
  8887. continue;
  8888. }
  8889. //
  8890. // Remember the range of this run.
  8891. //
  8892. EndOfExistingRun = ThisEntry->Lcn + ThisEntry->RunLength;
  8893. //
  8894. // Case 2 - No overlap.
  8895. //
  8896. if (EndOfInputRun <= ThisEntry->Lcn) {
  8897. break;
  8898. //
  8899. // Case 3 - The run starts beyond the beginning of this run.
  8900. //
  8901. } else if (Lcn > ThisEntry->Lcn) {
  8902. //
  8903. // Reduce the current entry so that is covers only the
  8904. // first fragment and move it to the correct position in
  8905. // the length-sorted array.
  8906. //
  8907. NtfsModifyCachedBinArray( CachedRuns,
  8908. ThisEntry->RunLength,
  8909. Lcn - ThisEntry->Lcn );
  8910. ThisEntry->RunLength = Lcn - ThisEntry->Lcn;
  8911. //
  8912. // Adjust this length in the run length array.
  8913. //
  8914. NtfsShrinkLengthInCachedLcn( CachedRuns,
  8915. ThisEntry,
  8916. Index );
  8917. //
  8918. // We need to split this entry in two. Now reinsert the portion
  8919. // split off.
  8920. //
  8921. if (EndOfInputRun < EndOfExistingRun) {
  8922. //
  8923. // Now create a new entry that covers the second
  8924. // fragment. It should directly follow ThisEntry in the
  8925. // Lcn-sorted list.
  8926. //
  8927. NtfsInsertCachedRun( CachedRuns,
  8928. EndOfInputRun,
  8929. EndOfExistingRun - EndOfInputRun,
  8930. Index + 1);
  8931. //
  8932. // Nothing else to do.
  8933. //
  8934. break;
  8935. //
  8936. // We will trim the tail of this entry.
  8937. //
  8938. } else if (EndOfInputRun > EndOfExistingRun) {
  8939. Lcn = EndOfExistingRun;
  8940. Index += 1;
  8941. } else {
  8942. break;
  8943. }
  8944. //
  8945. // Case 4 - Remove a complete entry.
  8946. //
  8947. } else if (EndOfInputRun >= EndOfExistingRun) {
  8948. ASSERT( Lcn <= ThisEntry->Lcn );
  8949. //
  8950. // Delete the run. This can cause compaction to be run but we
  8951. // are guaranteed that the entry at Index will not move.
  8952. //
  8953. NtfsDeleteCachedRun( CachedRuns,
  8954. Index,
  8955. ThisEntry->LengthIndex );
  8956. //
  8957. // Advance the Lcn if we go past this entry.
  8958. //
  8959. if (EndOfInputRun > EndOfExistingRun) {
  8960. Lcn = EndOfExistingRun;
  8961. } else {
  8962. break;
  8963. }
  8964. //
  8965. // Case 5 - This entry starts at or before the start of the run
  8966. // and ends before the end of the run.
  8967. //
  8968. } else {
  8969. ASSERT( Lcn <= ThisEntry->Lcn );
  8970. ASSERT( EndOfInputRun < EndOfExistingRun );
  8971. //
  8972. // Reduce the current entry so that is covers only the end of the
  8973. // run and move it to the correct position in the length-sorted
  8974. // array.
  8975. //
  8976. NtfsModifyCachedBinArray( CachedRuns,
  8977. ThisEntry->RunLength,
  8978. EndOfExistingRun - EndOfInputRun );
  8979. ThisEntry->RunLength = EndOfExistingRun - EndOfInputRun;
  8980. ThisEntry->Lcn = EndOfInputRun;
  8981. NtfsShrinkLengthInCachedLcn( CachedRuns,
  8982. ThisEntry,
  8983. Index );
  8984. break;
  8985. }
  8986. }
  8987. #ifdef NTFS_CHECK_CACHED_RUNS
  8988. if (NtfsDoVerifyCachedRuns) {
  8989. NtfsVerifyCachedRuns( CachedRuns, FALSE, FALSE );
  8990. }
  8991. #endif
  8992. DebugTrace( -1, Dbg, ("NtfsRemoveCachedLcn -> VOID\n") );
  8993. return;
  8994. }
  8995. //
  8996. // Local support routine
  8997. //
  8998. BOOLEAN
  8999. NtfsGrowCachedRuns (
  9000. IN PNTFS_CACHED_RUNS CachedRuns
  9001. )
  9002. /*++
  9003. Routine Description:
  9004. This routine is called to grow the size of the cached run arrays if
  9005. necessary. We will not exceed the CachedRuns->MaximumSize. It
  9006. is assumed that there are no deleted entries in the arrays. If we can
  9007. grow the arrays, we double the size unless we would grow it by more than
  9008. our max delta. Otherwise we grow it by that amount.
  9009. Arguments:
  9010. CachedRuns - Pointer to the cached runs structure to grow.
  9011. Return Value:
  9012. BOOLEAN - TRUE if we were able to grow the structure, FALSE otherwise.
  9013. --*/
  9014. {
  9015. USHORT NewSize;
  9016. USHORT OldSize = CachedRuns->Avail;
  9017. USHORT Index;
  9018. PNTFS_LCN_CLUSTER_RUN NewLcnArray;
  9019. PUSHORT NewLengthArray;
  9020. PAGED_CODE();
  9021. DebugTrace( +1, Dbg, ("NtfsGrowCachedRuns\n") );
  9022. #ifdef NTFS_CHECK_CACHED_RUNS
  9023. ASSERT( (CachedRuns->Vcb == NULL) ||
  9024. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  9025. #endif
  9026. //
  9027. // Calculate the new size.
  9028. //
  9029. if (CachedRuns->Avail > NTFS_MAX_CACHED_RUNS_DELTA) {
  9030. NewSize = CachedRuns->Avail + NTFS_MAX_CACHED_RUNS_DELTA;
  9031. } else {
  9032. NewSize = CachedRuns->Avail * 2;
  9033. }
  9034. if (NewSize > CachedRuns->MaximumSize) {
  9035. NewSize = CachedRuns->MaximumSize;
  9036. }
  9037. if (NewSize > CachedRuns->Avail) {
  9038. //
  9039. // Allocate the new buffers and copy the previous buffers over.
  9040. //
  9041. NewLcnArray = NtfsAllocatePoolNoRaise( PagedPool,
  9042. sizeof( NTFS_LCN_CLUSTER_RUN ) * NewSize );
  9043. if (NewLcnArray == NULL) {
  9044. DebugTrace( -1, Dbg, ("NtfsGrowCachedRuns -> FALSE\n") );
  9045. return FALSE;
  9046. }
  9047. NewLengthArray = NtfsAllocatePoolNoRaise( PagedPool,
  9048. sizeof( USHORT ) * NewSize );
  9049. if (NewLengthArray == NULL) {
  9050. NtfsFreePool( NewLcnArray );
  9051. DebugTrace( -1, Dbg, ("NtfsGrowCachedRuns -> FALSE\n") );
  9052. return FALSE;
  9053. }
  9054. RtlCopyMemory( NewLcnArray,
  9055. CachedRuns->LcnArray,
  9056. sizeof( NTFS_LCN_CLUSTER_RUN ) * CachedRuns->Used );
  9057. RtlCopyMemory( NewLengthArray,
  9058. CachedRuns->LengthArray,
  9059. sizeof( USHORT ) * CachedRuns->Used );
  9060. //
  9061. // Mark all entries so that they can be detected as deleted.
  9062. //
  9063. for (Index = CachedRuns->Used; Index < NewSize; Index += 1) {
  9064. NewLcnArray[ Index ].RunLength = 0;
  9065. NewLcnArray[ Index ].LengthIndex = NTFS_CACHED_RUNS_DEL_INDEX;
  9066. NewLengthArray[ Index ] = NTFS_CACHED_RUNS_DEL_INDEX;
  9067. }
  9068. //
  9069. // Deallocate the existing buffers and set the cached runs structure
  9070. // to point to the new buffers.
  9071. //
  9072. NtfsFreePool( CachedRuns->LcnArray );
  9073. CachedRuns->LcnArray = NewLcnArray;
  9074. NtfsFreePool( CachedRuns->LengthArray );
  9075. CachedRuns->LengthArray = NewLengthArray;
  9076. //
  9077. // Update the count of available entries.
  9078. //
  9079. CachedRuns->Avail = NewSize;
  9080. //
  9081. // Create a window of deleted entries to cover the newly allocated
  9082. // entries.
  9083. //
  9084. NtfsAddDelWindow( CachedRuns, OldSize, NewSize - 1, TRUE );
  9085. NtfsAddDelWindow( CachedRuns, OldSize, NewSize - 1, FALSE );
  9086. } else {
  9087. DebugTrace( -1, Dbg, ("NtfsGrowCachedRuns -> FALSE\n") );
  9088. return FALSE;
  9089. }
  9090. #ifdef NTFS_CHECK_CACHED_RUNS
  9091. if (NtfsDoVerifyCachedRuns) {
  9092. NtfsVerifyCachedRuns( CachedRuns, FALSE, FALSE );
  9093. }
  9094. #endif
  9095. DebugTrace( -1, Dbg, ("NtfsGrowCachedRuns -> TRUE\n") );
  9096. return TRUE;
  9097. }
  9098. //
  9099. // Local support routine
  9100. //
  9101. VOID
  9102. NtfsCompactCachedRuns (
  9103. IN PNTFS_CACHED_RUNS CachedRuns,
  9104. IN USHORT FirstIndex,
  9105. IN USHORT LastIndex,
  9106. IN BOOLEAN LcnSortedList
  9107. )
  9108. /*++
  9109. Routine Description:
  9110. This routine is called to compact two of the windows of deleted entries
  9111. into a single window. Note that entries in the given range of indices
  9112. have been marked as deleted, but are not yet in a window of deleted
  9113. entries. This should not trigger a corruption warning. To avoid
  9114. confusion, we will be sure not to choose the windows to be compacted
  9115. such that the given range of indices gets moved.
  9116. Arguments:
  9117. CachedRuns - Pointer to the cached run structure.
  9118. FirstIndex - Index that marks the start of the newest range of deleted
  9119. entries.
  9120. LastIndex - The index of the last entry in the newest range of deleted
  9121. entries.
  9122. LcnSortedList - If TRUE, the Lcn-sorted list is compacted.
  9123. If FALSE, the length-sorted list is compacted.
  9124. Return Value:
  9125. None
  9126. --*/
  9127. {
  9128. USHORT Gap1;
  9129. USHORT Gap2;
  9130. USHORT RunIndex;
  9131. USHORT Count;
  9132. USHORT GapIndex = NTFS_CACHED_RUNS_DEL_INDEX;
  9133. PUSHORT WindowCount;
  9134. PNTFS_DELETED_RUNS DelWindow;
  9135. PNTFS_DELETED_RUNS PrevWindow;
  9136. PNTFS_DELETED_RUNS Windows;
  9137. PAGED_CODE();
  9138. DebugTrace( +1, Dbg, ("NtfsCompactCachedRuns\n") );
  9139. ASSERT( FirstIndex != NTFS_CACHED_RUNS_DEL_INDEX );
  9140. ASSERT( LastIndex != NTFS_CACHED_RUNS_DEL_INDEX );
  9141. #ifdef NTFS_CHECK_CACHED_RUNS
  9142. ASSERT( (CachedRuns->Vcb == NULL) ||
  9143. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  9144. #endif
  9145. if (LcnSortedList) {
  9146. WindowCount = &CachedRuns->DelLcnCount;
  9147. Windows = CachedRuns->DeletedLcnWindows;
  9148. } else {
  9149. WindowCount = &CachedRuns->DelLengthCount;
  9150. Windows = CachedRuns->DeletedLengthWindows;
  9151. }
  9152. ASSERT( *WindowCount > 1 );
  9153. //
  9154. // Loop through the windows looking for the smallest gap of non-deleted
  9155. // entries. We will not choose a gap the covers [FirstIndex..LastIndex]
  9156. //
  9157. Gap1 = NTFS_CACHED_RUNS_DEL_INDEX;
  9158. for (Count = 1, DelWindow = Windows + 1, PrevWindow = Windows;
  9159. (Count < *WindowCount) && (Gap1 > 1);
  9160. Count += 1, PrevWindow += 1, DelWindow += 1) {
  9161. //
  9162. // Compute this gap if the exempt range doesn't fall within it. We want to track the
  9163. // actual number of entries.
  9164. //
  9165. if ((PrevWindow->StartIndex > LastIndex) ||
  9166. (DelWindow->EndIndex < FirstIndex)) {
  9167. Gap2 = DelWindow->StartIndex - (PrevWindow->EndIndex + 1);
  9168. //
  9169. // Remember if this gap is our smallest so far.
  9170. //
  9171. if (Gap2 < Gap1) {
  9172. Gap1 = Gap2;
  9173. GapIndex = Count;
  9174. }
  9175. }
  9176. }
  9177. //
  9178. // Merge the window at GapIndex with the one that precedes it by moving
  9179. // the non-deleted entries in the gap between them to the start of the
  9180. // preceding window.
  9181. //
  9182. ASSERT( GapIndex != NTFS_CACHED_RUNS_DEL_INDEX );
  9183. DelWindow = Windows + GapIndex;
  9184. PrevWindow = DelWindow - 1;
  9185. //
  9186. // Copy the block of entries that we will be keeping
  9187. // into the insertion point.
  9188. //
  9189. DebugTrace( 0,
  9190. Dbg,
  9191. ("copy %04x entries from=%04x to=%04x\n", Gap1, PrevWindow->EndIndex + 1, PrevWindow->StartIndex) );
  9192. if (LcnSortedList) {
  9193. RtlMoveMemory( CachedRuns->LcnArray + PrevWindow->StartIndex,
  9194. CachedRuns->LcnArray + PrevWindow->EndIndex + 1,
  9195. sizeof( NTFS_LCN_CLUSTER_RUN ) * Gap1 );
  9196. //
  9197. // Update the indices in the Length-sorted list to
  9198. // reflect the move of the lcn-sorted entries.
  9199. //
  9200. for (Count = 0; Count < Gap1; Count += 1) {
  9201. RunIndex = CachedRuns->LcnArray[ PrevWindow->StartIndex + Count ].LengthIndex;
  9202. ASSERT( RunIndex != NTFS_CACHED_RUNS_DEL_INDEX );
  9203. CachedRuns->LengthArray[ RunIndex ] = PrevWindow->StartIndex + Count;
  9204. }
  9205. //
  9206. // Mark the entries from the gap that are going to be part of the
  9207. // merged windows as deleted.
  9208. //
  9209. // We only need to do this for entries past the end of the gap we are deleting.
  9210. //
  9211. Count = PrevWindow->StartIndex + Gap1;
  9212. if (Count < PrevWindow->EndIndex + 1) {
  9213. Count = PrevWindow->EndIndex + 1;
  9214. }
  9215. while (Count < DelWindow->StartIndex) {
  9216. CachedRuns->LcnArray[ Count ].LengthIndex = NTFS_CACHED_RUNS_DEL_INDEX;
  9217. CachedRuns->LcnArray[ Count ].RunLength = 0;
  9218. Count += 1;
  9219. }
  9220. } else {
  9221. RtlMoveMemory( CachedRuns->LengthArray + PrevWindow->StartIndex,
  9222. CachedRuns->LengthArray + PrevWindow->EndIndex + 1,
  9223. sizeof( USHORT ) * Gap1 );
  9224. //
  9225. // Update the indices in the Lcn-sorted list to reflect
  9226. // the move of the length-sorted entries.
  9227. //
  9228. for (Count = 0; Count < Gap1; Count += 1) {
  9229. RunIndex = CachedRuns->LengthArray[ PrevWindow->StartIndex + Count ];
  9230. ASSERT( RunIndex != NTFS_CACHED_RUNS_DEL_INDEX );
  9231. CachedRuns->LcnArray[ RunIndex ].LengthIndex = PrevWindow->StartIndex + Count;
  9232. }
  9233. //
  9234. // Mark the entries from the gap that are going to be part of the
  9235. // merged windows as deleted.
  9236. //
  9237. // We only need to do this for entries past the end of the gap we are deleting.
  9238. //
  9239. Count = PrevWindow->StartIndex + Gap1;
  9240. if (Count < PrevWindow->EndIndex + 1) {
  9241. Count = PrevWindow->EndIndex + 1;
  9242. }
  9243. while (Count < DelWindow->StartIndex) {
  9244. CachedRuns->LengthArray[ Count ] = NTFS_CACHED_RUNS_DEL_INDEX;
  9245. Count += 1;
  9246. }
  9247. }
  9248. //
  9249. // Update the previous window to reflect the larger size.
  9250. //
  9251. ASSERT( (PrevWindow->EndIndex + Gap1 + 1) == DelWindow->StartIndex );
  9252. PrevWindow->StartIndex += Gap1;
  9253. PrevWindow->EndIndex = DelWindow->EndIndex;
  9254. //
  9255. // Delete DelWindow.
  9256. //
  9257. NtfsDeleteDelWindow( CachedRuns,
  9258. LcnSortedList,
  9259. GapIndex);
  9260. #ifdef NTFS_CHECK_CACHED_RUNS
  9261. //
  9262. // We will not check sort orders in NtfsVerifyCachedRuns because we
  9263. // could be making this call as part of deleting runs that have an
  9264. // overlap with a newly inserted run. This could give false corruption
  9265. // warnings.
  9266. //
  9267. if (LcnSortedList) {
  9268. NtfsVerifyCachedLcnRuns ( CachedRuns,
  9269. FirstIndex,
  9270. LastIndex,
  9271. TRUE,
  9272. TRUE );
  9273. } else {
  9274. NtfsVerifyCachedLenRuns ( CachedRuns,
  9275. FirstIndex,
  9276. LastIndex,
  9277. TRUE );
  9278. }
  9279. #endif
  9280. DebugTrace( -1, Dbg, ("NtfsCompactCachedRuns -> VOID\n") );
  9281. return;
  9282. }
  9283. //
  9284. // Local support routine
  9285. //
  9286. BOOLEAN
  9287. NtfsPositionCachedLcn (
  9288. IN PNTFS_CACHED_RUNS CachedRuns,
  9289. IN LCN Lcn,
  9290. OUT PUSHORT Index
  9291. )
  9292. /*++
  9293. Routine Description:
  9294. This routine is called to position ourselves with an Lcn lookup. On return
  9295. we will return the index where the current entry should go or where it
  9296. currently resides. The return value indicates whether the entry is
  9297. present. The Lcn does not have to be at the beginning of the found run.
  9298. Arguments:
  9299. CachedRuns - Pointer to the cached run structure.
  9300. Lcn - Lcn we are interested in.
  9301. Index - Address to store the index of the position in the Lcn array.
  9302. Return Value:
  9303. BOOLEAN - TRUE if the entry is found, FALSE otherwise.
  9304. --*/
  9305. {
  9306. USHORT Min, Max, Current;
  9307. PNTFS_LCN_CLUSTER_RUN ThisEntry;
  9308. PNTFS_DELETED_RUNS DelWindow;
  9309. BOOLEAN FoundLcn = FALSE;
  9310. PAGED_CODE();
  9311. DebugTrace( +1, Dbg, ("NtfsPositionCachedLcn\n") );
  9312. #ifdef NTFS_CHECK_CACHED_RUNS
  9313. ASSERT( (CachedRuns->Vcb == NULL) ||
  9314. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  9315. #endif
  9316. //
  9317. // Perform a binary search to find the index. Note we start Max past
  9318. // the end so don't rely on it being valid.
  9319. //
  9320. Min = 0;
  9321. Max = CachedRuns->Avail;
  9322. while (Min != Max) {
  9323. Current = (USHORT) (((ULONG) Max + Min) / 2);
  9324. ThisEntry = CachedRuns->LcnArray + Current;
  9325. //
  9326. // The current entry has been deleted and must be ignored.
  9327. // Get the window of deleted entries that covers Current.
  9328. //
  9329. if (ThisEntry->RunLength == 0) {
  9330. DelWindow = NtfsGetDelWindow( CachedRuns,
  9331. Current,
  9332. Current,
  9333. TRUE,
  9334. NULL);
  9335. ASSERT( DelWindow != NULL );
  9336. ASSERT( DelWindow->EndIndex >= Current );
  9337. ASSERT( DelWindow->StartIndex <= Current );
  9338. //
  9339. // Go to the edges of this deleted entries window to determine
  9340. // which way we should go.
  9341. //
  9342. //
  9343. // If the deleted window spans the remaining used runs then move
  9344. // to the beginning of the window.
  9345. //
  9346. if ((DelWindow->EndIndex + 1) >= CachedRuns->Used ) {
  9347. Max = DelWindow->StartIndex;
  9348. ASSERT( Min <= Max );
  9349. //
  9350. // If the deleted window is not at index zero then look to the entry
  9351. // on the left.
  9352. //
  9353. } else if (DelWindow->StartIndex > 0) {
  9354. ThisEntry = CachedRuns->LcnArray + DelWindow->StartIndex - 1;
  9355. ASSERT( ThisEntry->RunLength != 0 );
  9356. if (Lcn < (ThisEntry->Lcn + ThisEntry->RunLength)) {
  9357. //
  9358. // The search should continue from the lower edge of the
  9359. // window.
  9360. //
  9361. Max = DelWindow->StartIndex;
  9362. ASSERT( Min <= Max );
  9363. } else {
  9364. //
  9365. // The search should continue from the upper edge of the
  9366. // window.
  9367. //
  9368. Min = DelWindow->EndIndex + 1;
  9369. ASSERT( Min <= Max );
  9370. }
  9371. //
  9372. // The search should continue from the upper edge of the
  9373. // deleted window.
  9374. //
  9375. } else {
  9376. Min = DelWindow->EndIndex + 1;
  9377. ASSERT( Min <= Max );
  9378. }
  9379. //
  9380. // Loop back now that Min or Max has been updated.
  9381. //
  9382. continue;
  9383. }
  9384. //
  9385. // If our Lcn is less than this then move the Max value down.
  9386. //
  9387. if (Lcn < ThisEntry->Lcn) {
  9388. Max = Current;
  9389. ASSERT( Min <= Max );
  9390. //
  9391. // If our Lcn is outside the range for this entry then move
  9392. // the Min value up. Make it one greater than the current
  9393. // index since we always round the index down.
  9394. //
  9395. } else if (Lcn >= (ThisEntry->Lcn + ThisEntry->RunLength)) {
  9396. Min = Current + 1;
  9397. ASSERT( Min <= Max );
  9398. //
  9399. // This must be a hit.
  9400. //
  9401. } else {
  9402. Min = Current;
  9403. FoundLcn = TRUE;
  9404. break;
  9405. }
  9406. }
  9407. *Index = Min;
  9408. //
  9409. // Check that we are positioned correctly.
  9410. //
  9411. #if (DBG || defined( NTFS_FREE_ASSERTS ))
  9412. ThisEntry = CachedRuns->LcnArray + *Index - 1;
  9413. ASSERT( FoundLcn ||
  9414. (*Index == 0) ||
  9415. (ThisEntry->RunLength == 0) ||
  9416. (Lcn >= (ThisEntry->Lcn + ThisEntry->RunLength)) );
  9417. ThisEntry = CachedRuns->LcnArray + *Index;
  9418. ASSERT( FoundLcn ||
  9419. (*Index == CachedRuns->Used) ||
  9420. (ThisEntry->RunLength == 0) ||
  9421. (Lcn < ThisEntry->Lcn) );
  9422. #endif
  9423. DebugTrace( -1, Dbg, ("NtfsPositionCachedLcn -> %01x\n", FoundLcn) );
  9424. return FoundLcn;
  9425. }
  9426. //
  9427. // Local support routine
  9428. //
  9429. BOOLEAN
  9430. NtfsPositionCachedLcnByLength (
  9431. IN PNTFS_CACHED_RUNS CachedRuns,
  9432. IN LONGLONG RunLength,
  9433. IN PLCN Lcn OPTIONAL,
  9434. IN PUSHORT StartIndex OPTIONAL,
  9435. IN BOOLEAN SearchForward,
  9436. OUT PUSHORT RunIndex
  9437. )
  9438. /*++
  9439. Routine Description:
  9440. This routine is called to search for a run of a particular length. It
  9441. returns the position of the run being looked for. If the Lcn is specified
  9442. then the run matching the desired RunLength that is closest to Lcn is
  9443. chosen.
  9444. This routine can be used to determine the insertion position for a new
  9445. run. The returned Index will be at or adjacent to the new run's position
  9446. in the list. The caller will have to check which.
  9447. If this routine fails to find a run of the desired length, the returned
  9448. Index will either point to a deleted entry or an entry that is larger or
  9449. past the end of the array.
  9450. ENHANCEMENT - If there is no match for the desired RunLength we currently choose the
  9451. next higher size without checking for the one with the closest Lcn value.
  9452. We could change the routine to restart the loop looking explicitly for the
  9453. larger size so that the best choice in Lcn terms is returned.
  9454. Arguments:
  9455. CachedRuns - Pointer to cached run structure.
  9456. RunLength - Run length to look for.
  9457. Lcn - If specified then we try to find the run which is closest to
  9458. this Lcn, but has the requested Length. If Lcn is UNUSED_LCN, we
  9459. will end up choosing a match with the lowest Lcn as UNUSED_LCN
  9460. is < 0. This will result in maximum left-packing of the disk.
  9461. If not specified we will randomly allocate matches on the length
  9462. array.
  9463. StartIndex - Optional index where the search should begin.
  9464. SearchForward - If TRUE, the search should begin at StartIndex. If
  9465. FALSE, the search should end at StartIndex.
  9466. RunIndex - Address to store index where the desired run is or should be.
  9467. Return Value:
  9468. BOOLEAN - TRUE if we found a run with the desired RunLength,
  9469. FALSE otherwise.
  9470. --*/
  9471. {
  9472. USHORT Min, Max, Current, LcnIndex;
  9473. USHORT MinMatch, MaxMatch;
  9474. LONGLONG Distance;
  9475. PNTFS_LCN_CLUSTER_RUN ThisEntry;
  9476. PNTFS_DELETED_RUNS DelWindow;
  9477. BOOLEAN FoundRun = FALSE;
  9478. PAGED_CODE();
  9479. DebugTrace( +1, Dbg, ("NtfsPositionCachedLcnByLength\n") );
  9480. #ifdef NTFS_CHECK_CACHED_RUNS
  9481. ASSERT( (CachedRuns->Vcb == NULL) ||
  9482. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  9483. #endif
  9484. ASSERT( UNUSED_LCN < 0 );
  9485. //
  9486. // Keep track of whether we are hitting matching length entries during the search.
  9487. //
  9488. MinMatch = MaxMatch = NTFS_CACHED_RUNS_DEL_INDEX;
  9489. //
  9490. // Binary search to find the first entry which is equal to
  9491. // or larger than the one we wanted. Bias the search with the
  9492. // user's end point if necessary.
  9493. //
  9494. Min = 0;
  9495. Max = CachedRuns->Avail;
  9496. if (ARGUMENT_PRESENT( StartIndex )) {
  9497. if (SearchForward) {
  9498. Min = *StartIndex;
  9499. } else {
  9500. Max = *StartIndex + 1;
  9501. //
  9502. // The only time this could happen is if we are trying to
  9503. // find an entry that is larger than the largest in use.
  9504. // Just use values that will terminate the search.
  9505. //
  9506. if (Max > CachedRuns->Used) {
  9507. Min = Max = CachedRuns->Used;
  9508. }
  9509. }
  9510. ASSERT( Min <= Max );
  9511. }
  9512. while (Min != Max) {
  9513. ASSERT( Min <= Max );
  9514. //
  9515. // Find the mid-index point along with the Lcn index out of
  9516. // the length array and the entry in the Lcn array.
  9517. //
  9518. Current = (USHORT) (((ULONG) Max + Min) / 2);
  9519. LcnIndex = CachedRuns->LengthArray[Current];
  9520. ThisEntry = CachedRuns->LcnArray + LcnIndex;
  9521. //
  9522. // The current entry has been deleted and must be
  9523. // ignored. Get the window of deleted entries that
  9524. // covers Current.
  9525. //
  9526. if (LcnIndex == NTFS_CACHED_RUNS_DEL_INDEX) {
  9527. DelWindow = NtfsGetDelWindow( CachedRuns,
  9528. Current,
  9529. Current,
  9530. FALSE,
  9531. NULL);
  9532. ASSERT( DelWindow );
  9533. ASSERT( DelWindow->EndIndex >= Current );
  9534. ASSERT( DelWindow->StartIndex <= Current );
  9535. //
  9536. // Go to the edges of this deleted entries window to determine
  9537. // which way we should go.
  9538. //
  9539. //
  9540. // If this window extends past the end of the used entries
  9541. // then move to the begining of it.
  9542. //
  9543. if ((DelWindow->EndIndex + 1) >= CachedRuns->Used ) {
  9544. Max = DelWindow->StartIndex;
  9545. ASSERT( Min <= Max );
  9546. //
  9547. // If this window doesn't start at index zero then determine which
  9548. // direction to go.
  9549. //
  9550. } else if (DelWindow->StartIndex > 0) {
  9551. //
  9552. // Point to the entry adjacent to the lower end of the window.
  9553. //
  9554. LcnIndex = CachedRuns->LengthArray[ DelWindow->StartIndex - 1 ];
  9555. ASSERT( LcnIndex != NTFS_CACHED_RUNS_DEL_INDEX );
  9556. ThisEntry = CachedRuns->LcnArray + LcnIndex;
  9557. ASSERT( ThisEntry->RunLength != 0 );
  9558. //
  9559. // If this entry is longer than we asked for then the search
  9560. // should continue from the lower edge of the window.
  9561. //
  9562. if (RunLength < ThisEntry->RunLength) {
  9563. Max = DelWindow->StartIndex;
  9564. ASSERT( Min <= Max );
  9565. //
  9566. // The search should continue from the upper edge of the
  9567. // window if our run length is longer.
  9568. //
  9569. } else if (RunLength > ThisEntry->RunLength) {
  9570. Min = DelWindow->EndIndex + 1;
  9571. ASSERT( Min <= Max );
  9572. //
  9573. // We have found the desired run if our caller didn't specify
  9574. // an Lcn.
  9575. //
  9576. } else if (!ARGUMENT_PRESENT( Lcn )) {
  9577. Min = DelWindow->StartIndex - 1;
  9578. FoundRun = TRUE;
  9579. break;
  9580. //
  9581. // If our Lcn is less than the Lcn in the entry then the search
  9582. // should continue from the lower edge of the window.
  9583. //
  9584. } else if (*Lcn < ThisEntry->Lcn) {
  9585. Max = DelWindow->StartIndex;
  9586. ASSERT( Min <= Max );
  9587. //
  9588. // If the entry overlaps then we have a match. We already
  9589. // know our Lcn is >= to the start Lcn of the range from
  9590. // the test above.
  9591. //
  9592. } else if (*Lcn < (ThisEntry->Lcn + ThisEntry->RunLength)) {
  9593. Min = DelWindow->StartIndex - 1;
  9594. FoundRun = TRUE;
  9595. break;
  9596. //
  9597. // Move Min past the end of the window. We'll check later to see
  9598. // which end is closer.
  9599. //
  9600. } else {
  9601. Min = DelWindow->EndIndex + 1;
  9602. MinMatch = DelWindow->StartIndex - 1;
  9603. ASSERT( Min <= Max );
  9604. ASSERT( MinMatch != MaxMatch );
  9605. }
  9606. //
  9607. // The search should continue from the upper edge of the
  9608. // window.
  9609. //
  9610. } else {
  9611. Min = DelWindow->EndIndex + 1;
  9612. ASSERT( Min <= Max );
  9613. }
  9614. //
  9615. // Loop back now that Min or Max has been updated.
  9616. //
  9617. continue;
  9618. }
  9619. //
  9620. // If the run length of this entry is more than we want then
  9621. // move the Max value down.
  9622. //
  9623. if (RunLength < ThisEntry->RunLength) {
  9624. Max = Current;
  9625. ASSERT( Min <= Max );
  9626. //
  9627. // If the run length of this entry is less than we want then
  9628. // move the Min value up.
  9629. //
  9630. } else if (RunLength > ThisEntry->RunLength) {
  9631. Min = Current + 1;
  9632. ASSERT( Min <= Max );
  9633. //
  9634. // If our caller doesn't care about the Lcn then return this entry to
  9635. // him.
  9636. //
  9637. } else if (!ARGUMENT_PRESENT( Lcn )) {
  9638. //
  9639. // The caller doesn't care about the Lcn, or the Lcn falls in
  9640. // the current run.
  9641. //
  9642. Min = Current;
  9643. FoundRun = TRUE;
  9644. break;
  9645. //
  9646. // If the Lcn is less than the Lcn in the entry then move Max down.
  9647. //
  9648. } else if (*Lcn < ThisEntry->Lcn) {
  9649. Max = Current;
  9650. if (Current != MinMatch) {
  9651. MaxMatch = Current;
  9652. }
  9653. ASSERT( Min <= Max );
  9654. ASSERT( MinMatch != MaxMatch );
  9655. //
  9656. // If the entry overlaps then we have a match. We already
  9657. // know our Lcn is >= to the start Lcn of the range from
  9658. // the test above.
  9659. //
  9660. } else if (*Lcn < (ThisEntry->Lcn + ThisEntry->RunLength)) {
  9661. Min = Current;
  9662. FoundRun = TRUE;
  9663. break;
  9664. //
  9665. // Advance Min past the current point.
  9666. //
  9667. } else {
  9668. Min = Current + 1;
  9669. MinMatch = Current;
  9670. ASSERT( Min <= Max );
  9671. ASSERT( MinMatch != MaxMatch );
  9672. }
  9673. }
  9674. //
  9675. // If we don't have an exact match then we want to find the nearest point. We kept track
  9676. // of the nearest length matches as we went along.
  9677. //
  9678. if (!FoundRun) {
  9679. //
  9680. // We have a length match if either match entry was updated. Check for the nearest
  9681. // distance if they don't match.
  9682. //
  9683. ASSERT( (MinMatch == NTFS_CACHED_RUNS_DEL_INDEX) ||
  9684. (MinMatch != MaxMatch) );
  9685. if (MinMatch != MaxMatch) {
  9686. FoundRun = TRUE;
  9687. //
  9688. // Make sure our search found one of these.
  9689. //
  9690. ASSERT( (MinMatch == NTFS_CACHED_RUNS_DEL_INDEX) ||
  9691. (MinMatch <= Min) );
  9692. ASSERT( (MinMatch == NTFS_CACHED_RUNS_DEL_INDEX) ||
  9693. (MinMatch == Min) ||
  9694. (MinMatch == Min - 1) ||
  9695. (CachedRuns->LengthArray[ Min - 1 ] == NTFS_CACHED_RUNS_DEL_INDEX) );
  9696. ASSERT( (MaxMatch == NTFS_CACHED_RUNS_DEL_INDEX) ||
  9697. (MaxMatch >= Min) );
  9698. ASSERT( (MaxMatch == NTFS_CACHED_RUNS_DEL_INDEX) ||
  9699. (MaxMatch == Min) ||
  9700. (MaxMatch == Min + 1) ||
  9701. (CachedRuns->LengthArray[ Min + 1 ] == NTFS_CACHED_RUNS_DEL_INDEX) );
  9702. //
  9703. // If the user specified an Lcn then we need to check for the nearest entry.
  9704. //
  9705. if (ARGUMENT_PRESENT( Lcn )) {
  9706. Min = MinMatch;
  9707. if (MaxMatch != NTFS_CACHED_RUNS_DEL_INDEX) {
  9708. ThisEntry = CachedRuns->LcnArray + CachedRuns->LengthArray[ MaxMatch ];
  9709. Distance = ThisEntry->Lcn - *Lcn;
  9710. Min = MaxMatch;
  9711. if (MinMatch != NTFS_CACHED_RUNS_DEL_INDEX) {
  9712. ThisEntry = CachedRuns->LcnArray + CachedRuns->LengthArray[ MinMatch ];
  9713. if (*Lcn - (ThisEntry->Lcn + RunLength) < Distance) {
  9714. Min = MinMatch;
  9715. }
  9716. }
  9717. }
  9718. }
  9719. }
  9720. }
  9721. *RunIndex = Min;
  9722. #ifdef NTFS_CHECK_CACHED_RUNS
  9723. if (FoundRun) {
  9724. LcnIndex = CachedRuns->LengthArray[ Min ];
  9725. ASSERT( LcnIndex != NTFS_CACHED_RUNS_DEL_INDEX );
  9726. ThisEntry = CachedRuns->LcnArray + LcnIndex;
  9727. ASSERT( RunLength == ThisEntry->RunLength );
  9728. }
  9729. #endif
  9730. DebugTrace( 0, Dbg, ("*RunIndex = %04x\n", *RunIndex) );
  9731. DebugTrace( -1, Dbg, ("NtfsPositionCachedLcnByLength -> %01x\n", FoundRun) );
  9732. return FoundRun;
  9733. }
  9734. #ifdef NTFS_CHECK_CACHED_RUNS
  9735. //
  9736. // Local support routine
  9737. //
  9738. VOID
  9739. NtfsVerifyCachedLcnRuns (
  9740. IN PNTFS_CACHED_RUNS CachedRuns,
  9741. IN USHORT FirstIndex,
  9742. IN USHORT LastIndex,
  9743. IN BOOLEAN SkipSortCheck,
  9744. IN BOOLEAN SkipBinCheck
  9745. )
  9746. /*++
  9747. Routine Description:
  9748. This routine is called to verify the state of the cached runs arrays.
  9749. Arguments:
  9750. CachedRuns - Pointer to the cached runs structure
  9751. FirstIndex - Index that marks the start of the newest range of deleted
  9752. entries. This new range will not be in a deleted window yet.
  9753. LastIndex - The index of the last entry in the newest range of deleted
  9754. entries. This new range will not be in a deleted window yet.
  9755. SkipSortCheck - If TRUE, the list may be out of order at this time and
  9756. we should skip the checks for overlapping ranges or length sorts.
  9757. SkipBinCheck - If TRUE, the BinArray may be out of sync and should not
  9758. be checked.
  9759. Return Value:
  9760. None
  9761. --*/
  9762. {
  9763. USHORT Index;
  9764. USHORT BinArray[ NTFS_CACHED_RUNS_BIN_COUNT ];
  9765. USHORT LcnWindowIndex = 0;
  9766. PNTFS_LCN_CLUSTER_RUN ThisEntry;
  9767. PNTFS_LCN_CLUSTER_RUN LastEntry = NULL;
  9768. PNTFS_DELETED_RUNS LcnDelWindow = NULL;
  9769. PNTFS_DELETED_RUNS NextWindow;
  9770. PAGED_CODE();
  9771. DebugTrace( +1, Dbg, ("NtfsVerifyCachedLcnRuns\n") );
  9772. #ifdef NTFS_CHECK_CACHED_RUNS
  9773. ASSERT( (CachedRuns->Vcb == NULL) ||
  9774. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  9775. #endif
  9776. ASSERT( CachedRuns->Used <= CachedRuns->Avail );
  9777. //
  9778. // Initialize the tracking variables.
  9779. //
  9780. RtlZeroMemory( BinArray, NTFS_CACHED_RUNS_BIN_COUNT * sizeof( USHORT ));
  9781. if (CachedRuns->DelLcnCount != 0) {
  9782. LcnDelWindow = CachedRuns->DeletedLcnWindows;
  9783. }
  9784. ASSERT( CachedRuns->DelLcnCount <= NTFS_CACHED_RUNS_MAX_DEL_WINDOWS );
  9785. //
  9786. // Verify that every element in the Lcn-sorted list is correctly
  9787. // ordered. If it's RunLength is 0, make certain its index is
  9788. // recorded in a window of deleted entries. If its LengthIndex is
  9789. // not NTFS_CACHED_RUNS_DEL_INDEX, make sure it refers to an entry in
  9790. // the length-sorted list that refers back to it and is in a window of
  9791. // deleted entries if and only if RunLength is 0.
  9792. //
  9793. for (Index = 0, ThisEntry = CachedRuns->LcnArray;
  9794. Index < CachedRuns->Avail;
  9795. Index += 1, ThisEntry += 1) {
  9796. //
  9797. // This entry is not deleted.
  9798. //
  9799. if (ThisEntry->RunLength != 0) {
  9800. //
  9801. // Better be in the used region with valid indexes.
  9802. //
  9803. ASSERT( Index < CachedRuns->Used );
  9804. ASSERT( ThisEntry->LengthIndex != NTFS_CACHED_RUNS_DEL_INDEX );
  9805. ASSERT( ThisEntry->LengthIndex < CachedRuns->Used );
  9806. ASSERT( ThisEntry->Lcn != UNUSED_LCN );
  9807. //
  9808. // Verify that the entry is not in the current window of deleted
  9809. // entries.
  9810. //
  9811. ASSERT( (LcnDelWindow == NULL) ||
  9812. (LcnDelWindow->StartIndex > Index) );
  9813. //
  9814. // Verify the sort order.
  9815. //
  9816. ASSERT( (LastEntry == NULL) ||
  9817. SkipSortCheck ||
  9818. (ThisEntry->Lcn > (LastEntry->Lcn + LastEntry->RunLength)) );
  9819. LastEntry = ThisEntry;
  9820. //
  9821. // Make certain that the corresponding entry in the Length-sorted
  9822. // list points back to this entry.
  9823. //
  9824. ASSERT( CachedRuns->LengthArray[ ThisEntry->LengthIndex ] == Index );
  9825. //
  9826. // Keep track of how many entries have this length.
  9827. //
  9828. if (ThisEntry->RunLength <= CachedRuns->Bins) {
  9829. BinArray[ ThisEntry->RunLength - 1 ] += 1;
  9830. }
  9831. //
  9832. // This is a deleted entry. Make sure it is in the deleted window array.
  9833. //
  9834. } else {
  9835. ASSERT( ThisEntry->LengthIndex == NTFS_CACHED_RUNS_DEL_INDEX );
  9836. //
  9837. // Verify that the entry is in the current window of deleted
  9838. // entries unless we have excluded this entry.
  9839. //
  9840. if ((FirstIndex != NTFS_CACHED_RUNS_DEL_INDEX) &&
  9841. (LastIndex != NTFS_CACHED_RUNS_DEL_INDEX) &&
  9842. ((FirstIndex > Index) ||
  9843. (LastIndex < Index))) {
  9844. ASSERT( (LcnDelWindow != NULL) &&
  9845. (LcnDelWindow->StartIndex <= Index) &&
  9846. (LcnDelWindow->EndIndex >= Index) );
  9847. }
  9848. //
  9849. // Advance the window of deleted entries if we are at the end.
  9850. //
  9851. if ((LcnDelWindow != NULL) && (LcnDelWindow->EndIndex == Index)) {
  9852. LcnWindowIndex += 1;
  9853. if (LcnWindowIndex < CachedRuns->DelLcnCount) {
  9854. LcnDelWindow += 1;
  9855. } else {
  9856. LcnDelWindow = NULL;
  9857. }
  9858. }
  9859. }
  9860. }
  9861. //
  9862. // We should have walked past all of the deleted entries.
  9863. //
  9864. //
  9865. // Make certain that the windows are in order and don't overlap.
  9866. //
  9867. for (LcnWindowIndex = 0, LcnDelWindow = NextWindow = CachedRuns->DeletedLcnWindows;
  9868. LcnWindowIndex < CachedRuns->DelLcnCount;
  9869. LcnWindowIndex += 1, NextWindow += 1) {
  9870. ASSERT( NextWindow->StartIndex <= NextWindow->EndIndex );
  9871. if (NextWindow != LcnDelWindow) {
  9872. ASSERT( NextWindow->StartIndex > (LcnDelWindow->EndIndex + 1) );
  9873. LcnDelWindow += 1;
  9874. }
  9875. }
  9876. //
  9877. // Verify that the histogram of RunLengths is correct.
  9878. //
  9879. for (Index = 0;
  9880. Index < NTFS_CACHED_RUNS_BIN_COUNT;
  9881. Index += 1) {
  9882. ASSERT( SkipBinCheck || (BinArray[ Index ] == CachedRuns->BinArray[ Index ]) );
  9883. }
  9884. DebugTrace( -1, Dbg, ("NtfsVerifyCachedLcnRuns -> VOID\n") );
  9885. return;
  9886. }
  9887. //
  9888. // Local support routine
  9889. //
  9890. VOID
  9891. NtfsVerifyCachedLenRuns (
  9892. IN PNTFS_CACHED_RUNS CachedRuns,
  9893. IN USHORT FirstIndex,
  9894. IN USHORT LastIndex,
  9895. IN BOOLEAN SkipSortCheck
  9896. )
  9897. /*++
  9898. Routine Description:
  9899. This routine is called to verify the state of the cached runs arrays.
  9900. Arguments:
  9901. CachedRuns - Pointer to the cached runs structure
  9902. FirstIndex - Index that marks the start of the newest range of deleted
  9903. entries. This new range will not be in a deleted window yet.
  9904. LastIndex - The index of the last entry in the newest range of deleted
  9905. entries. This new range will not be in a deleted window yet.
  9906. SkipSortCheck - If TRUE, the list may be out of order at this time and
  9907. we should skip the checks for overlapping ranges or length sorts.
  9908. Return Value:
  9909. None
  9910. --*/
  9911. {
  9912. USHORT Index;
  9913. USHORT LenWindowIndex = 0;
  9914. PNTFS_LCN_CLUSTER_RUN ThisEntry;
  9915. PNTFS_LCN_CLUSTER_RUN LastEntry = NULL;
  9916. PNTFS_DELETED_RUNS LenDelWindow = NULL;
  9917. PNTFS_DELETED_RUNS NextWindow;
  9918. PAGED_CODE();
  9919. DebugTrace( +1, Dbg, ("NtfsVerifyCachedLenRuns\n") );
  9920. #ifdef NTFS_CHECK_CACHED_RUNS
  9921. ASSERT( (CachedRuns->Vcb == NULL) ||
  9922. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  9923. #endif
  9924. ASSERT( CachedRuns->Used <= CachedRuns->Avail );
  9925. //
  9926. // Initialize the tracking variables.
  9927. //
  9928. if (CachedRuns->DelLengthCount != 0) {
  9929. LenDelWindow = CachedRuns->DeletedLengthWindows;
  9930. }
  9931. ASSERT( CachedRuns->DelLengthCount <= NTFS_CACHED_RUNS_MAX_DEL_WINDOWS );
  9932. //
  9933. // Verify that every element in the Length-sorted list is correctly
  9934. // ordered. If it's index is NTFS_CACHED_RUNS_DEL_INDEX, make certain
  9935. // its index is recorded in a window of deleted entries. Otherwise,
  9936. // make certain that its Index refers to an entry in the lcn-sorted list
  9937. // that refers back to it.
  9938. //
  9939. for (Index = 0; Index < CachedRuns->Avail; Index += 1) {
  9940. //
  9941. // Verify any entry not in a deleted window.
  9942. //
  9943. if (CachedRuns->LengthArray[ Index ] != NTFS_CACHED_RUNS_DEL_INDEX) {
  9944. ASSERT( Index < CachedRuns->Used );
  9945. ASSERT( CachedRuns->LengthArray[ Index ] < CachedRuns->Used );
  9946. ThisEntry = CachedRuns->LcnArray + CachedRuns->LengthArray[ Index ];
  9947. //
  9948. // Verify that the corresponding Lcn-sorted entry is not deleted.
  9949. //
  9950. ASSERT( ThisEntry->RunLength != 0 );
  9951. //
  9952. // Verify that the entry is not in the current window of deleted
  9953. // entries.
  9954. //
  9955. ASSERT( (LenDelWindow == NULL) ||
  9956. (LenDelWindow->StartIndex > Index) );
  9957. //
  9958. // Verify the sort order if we have the previous entry.
  9959. //
  9960. ASSERT( (LastEntry == NULL) ||
  9961. SkipSortCheck ||
  9962. (LastEntry->RunLength < ThisEntry->RunLength) ||
  9963. ((LastEntry->RunLength == ThisEntry->RunLength) &&
  9964. (ThisEntry->Lcn > (LastEntry->Lcn + LastEntry->RunLength))) );
  9965. LastEntry = ThisEntry;
  9966. //
  9967. // Make certain that the corresponding entry in the Lcn-sorted
  9968. // list points back to this entry.
  9969. //
  9970. ASSERT( ThisEntry->LengthIndex == Index );
  9971. //
  9972. // The entry is deleted.
  9973. //
  9974. } else {
  9975. //
  9976. // Verify that the entry is in the current window of deleted
  9977. // entries unless we have excluded this entry.
  9978. //
  9979. if ((FirstIndex != NTFS_CACHED_RUNS_DEL_INDEX) &&
  9980. (LastIndex != NTFS_CACHED_RUNS_DEL_INDEX) &&
  9981. ((FirstIndex > Index) ||
  9982. (LastIndex < Index))) {
  9983. //
  9984. // Verify that the entry is in the current window of deleted
  9985. // entries.
  9986. //
  9987. ASSERT( (LenDelWindow != NULL) &&
  9988. (LenDelWindow->StartIndex <= Index) &&
  9989. (LenDelWindow->EndIndex >= Index) );
  9990. }
  9991. }
  9992. //
  9993. // Advance the window of deleted entries if we are at the end.
  9994. //
  9995. if ((LenDelWindow != NULL) && (LenDelWindow->EndIndex == Index)) {
  9996. LenWindowIndex += 1;
  9997. if (LenWindowIndex < CachedRuns->DelLengthCount) {
  9998. LenDelWindow += 1;
  9999. } else {
  10000. LenDelWindow = NULL;
  10001. }
  10002. }
  10003. }
  10004. //
  10005. // We should have walked past all of the deleted entries.
  10006. //
  10007. ASSERT( LenDelWindow == NULL );
  10008. //
  10009. // Make certain that the windows are in order and don't overlap.
  10010. //
  10011. for (LenWindowIndex = 0, LenDelWindow = NextWindow = CachedRuns->DeletedLengthWindows;
  10012. LenWindowIndex < CachedRuns->DelLengthCount;
  10013. LenWindowIndex += 1, NextWindow += 1) {
  10014. ASSERT( NextWindow->StartIndex <= NextWindow->EndIndex );
  10015. if (NextWindow != LenDelWindow) {
  10016. ASSERT( NextWindow->StartIndex > (LenDelWindow->EndIndex + 1) );
  10017. LenDelWindow += 1;
  10018. }
  10019. }
  10020. DebugTrace( -1, Dbg, ("NtfsVerifyCachedLenRuns -> VOID\n") );
  10021. return;
  10022. }
  10023. //
  10024. // Local support routine
  10025. //
  10026. VOID
  10027. NtfsVerifyCachedRuns (
  10028. IN PNTFS_CACHED_RUNS CachedRuns,
  10029. IN BOOLEAN SkipSortCheck,
  10030. IN BOOLEAN SkipBinCheck
  10031. )
  10032. /*++
  10033. Routine Description:
  10034. This routine is called to verify the state of the cached runs arrays.
  10035. Arguments:
  10036. CachedRuns - Pointer to the cached runs structure
  10037. SkipSortCheck - If TRUE, the list may be out of order at this time and
  10038. we should skip the checks for overlapping ranges or length sorts.
  10039. SkipBinCheck - If TRUE, the BinArray may be out of sync and should not
  10040. be checked.
  10041. Return Value:
  10042. None
  10043. --*/
  10044. {
  10045. PAGED_CODE();
  10046. DebugTrace( +1, Dbg, ("NtfsVerifyCachedRuns\n") );
  10047. #ifdef NTFS_CHECK_CACHED_RUNS
  10048. ASSERT( (CachedRuns->Vcb == NULL) ||
  10049. NtfsIsExclusiveScb( CachedRuns->Vcb->BitmapScb ));
  10050. #endif
  10051. NtfsVerifyCachedLcnRuns ( CachedRuns,
  10052. NTFS_CACHED_RUNS_DEL_INDEX,
  10053. NTFS_CACHED_RUNS_DEL_INDEX,
  10054. SkipSortCheck,
  10055. SkipBinCheck );
  10056. NtfsVerifyCachedLenRuns ( CachedRuns,
  10057. NTFS_CACHED_RUNS_DEL_INDEX,
  10058. NTFS_CACHED_RUNS_DEL_INDEX,
  10059. SkipSortCheck );
  10060. DebugTrace( -1, Dbg, ("NtfsVerifyCachedRuns -> VOID\n") );
  10061. return;
  10062. }
  10063. #endif