Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

5023 lines
148 KiB

  1. /*++
  2. Copyright (c) 1990-2000 Microsoft Corporation
  3. Module Name:
  4. AllocSup.c
  5. Abstract:
  6. This module implements the Allocation support routines for Fat.
  7. // @@BEGIN_DDKSPLIT
  8. Author:
  9. DavidGoebel [DavidGoe] 31-Oct-90
  10. Revision History:
  11. DavidGoebel [DavidGoe] 31-Oct-90
  12. Add unwinding support. Some steps had to be reordered, and whether
  13. operations cpuld fail carefully considered. In particular, attention
  14. was paid to to the order of Mcb operations (see note below).
  15. ##### ## # # #### ###### #####
  16. # # # # ## # # # # # #
  17. # # # # # # # # ##### # #
  18. # # ###### # # # # ### # #####
  19. # # # # # ## # # # # #
  20. ##### # # # # #### ###### # #
  21. ______________________________________________
  22. ++++++++++++++++++++++++++++++++++++++++++++++++++|
  23. | |
  24. | The unwinding aspects of this module depend on |
  25. | operational details of the Mcb package. Do not |
  26. | attempt to modify unwind procedures without |
  27. | thoughoughly understanding the innerworkings of |
  28. | the Mcb package. |
  29. | |
  30. ++++++++++++++++++++++++++++++++++++++++++++++++++|
  31. # # ## ##### # # # # # ####
  32. # # # # # # ## # # ## # # #
  33. # # # # # # # # # # # # # #
  34. # ## # ###### ##### # # # # # # # # ###
  35. ## ## # # # # # ## # # ## # #
  36. # # # # # # # # # # # ####
  37. ______________________________________________________
  38. There is also a suspect convention in use due to the way FAT32 was
  39. put into the allocator. We've got four distinct kinds of numbers
  40. you can see being used:
  41. - true volume cluster numbers, ranging from 2 to N
  42. - zero-based volume cluster numbers, ranging from 0 to N-2
  43. - window-relative "true" cluster numbers, ranging from 2 to 10001,
  44. the window size. this is because the hints/allocation within a window
  45. looks like unwindowed FAT12/16.
  46. - window-relative zero-based cluster numbers, ranging from 0 to ffff
  47. Make very sure you realize what kind of number you are looking at. This
  48. is where a bad +/-2 can come back to haunt you for years.
  49. // @@END_DDKSPLIT
  50. --*/
  51. #include "FatProcs.h"
  52. //
  53. // The Bug check file id for this module
  54. //
  55. #define BugCheckFileId (FAT_BUG_CHECK_ALLOCSUP)
  56. //
  57. // Local debug trace level
  58. //
  59. #define Dbg (DEBUG_TRACE_ALLOCSUP)
  60. #define FatMin(a, b) ((a) < (b) ? (a) : (b))
  61. //
  62. // This strucure is used by FatLookupFatEntry to remember a pinned page
  63. // of fat.
  64. //
  65. typedef struct _FAT_ENUMERATION_CONTEXT {
  66. VBO VboOfPinnedPage;
  67. PBCB Bcb;
  68. PVOID PinnedPage;
  69. } FAT_ENUMERATION_CONTEXT, *PFAT_ENUMERATION_CONTEXT;
  70. //
  71. // Local support routine prototypes
  72. //
  73. VOID
  74. FatLookupFatEntry(
  75. IN PIRP_CONTEXT IrpContext,
  76. IN PVCB Vcb,
  77. IN ULONG FatIndex,
  78. IN OUT PULONG FatEntry,
  79. IN OUT PFAT_ENUMERATION_CONTEXT Context
  80. );
  81. VOID
  82. FatSetFatRun(
  83. IN PIRP_CONTEXT IrpContext,
  84. IN PVCB Vcb,
  85. IN ULONG StartingFatIndex,
  86. IN ULONG ClusterCount,
  87. IN BOOLEAN ChainTogether
  88. );
  89. UCHAR
  90. FatLogOf(
  91. IN ULONG Value
  92. );
  93. //
  94. // Note that the KdPrint below will ONLY fire when the assert does. Leave it
  95. // alone.
  96. //
  97. #if DBG
  98. #define ASSERT_CURRENT_WINDOW_GOOD(VCB) { \
  99. ULONG FreeClusterBitMapClear; \
  100. ASSERT( (VCB)->FreeClusterBitMap.Buffer != NULL ); \
  101. FreeClusterBitMapClear = RtlNumberOfClearBits(&(VCB)->FreeClusterBitMap); \
  102. if ((VCB)->CurrentWindow->ClustersFree != FreeClusterBitMapClear) { \
  103. KdPrint(("FAT: ClustersFree %x h != FreeClusterBitMapClear %x h\n", \
  104. (VCB)->CurrentWindow->ClustersFree, \
  105. FreeClusterBitMapClear)); \
  106. } \
  107. ASSERT( (VCB)->CurrentWindow->ClustersFree == FreeClusterBitMapClear ); \
  108. }
  109. #else
  110. #define ASSERT_CURRENT_WINDOW_GOOD(VCB)
  111. #endif
  112. //
  113. // The following macros provide a convenient way of hiding the details
  114. // of bitmap allocation schemes.
  115. //
  116. //
  117. // VOID
  118. // FatLockFreeClusterBitMap (
  119. // IN PVCB Vcb
  120. // );
  121. //
  122. #define FatLockFreeClusterBitMap(VCB) { \
  123. ASSERT(KeAreApcsDisabled()); \
  124. ExAcquireFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \
  125. ASSERT_CURRENT_WINDOW_GOOD(VCB) \
  126. }
  127. //
  128. // VOID
  129. // FatUnlockFreeClusterBitMap (
  130. // IN PVCB Vcb
  131. // );
  132. //
  133. #define FatUnlockFreeClusterBitMap(VCB) { \
  134. ASSERT_CURRENT_WINDOW_GOOD(VCB) \
  135. ASSERT(KeAreApcsDisabled()); \
  136. ExReleaseFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \
  137. }
  138. //
  139. // BOOLEAN
  140. // FatIsClusterFree (
  141. // IN PIRP_CONTEXT IrpContext,
  142. // IN PVCB Vcb,
  143. // IN ULONG FatIndex
  144. // );
  145. //
  146. #define FatIsClusterFree(IRPCONTEXT,VCB,FAT_INDEX) \
  147. (RtlCheckBit(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2) == 0)
  148. //
  149. // VOID
  150. // FatFreeClusters (
  151. // IN PIRP_CONTEXT IrpContext,
  152. // IN PVCB Vcb,
  153. // IN ULONG FatIndex,
  154. // IN ULONG ClusterCount
  155. // );
  156. //
  157. #define FatFreeClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
  158. if ((CLUSTER_COUNT) == 1) { \
  159. FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_AVAILABLE); \
  160. } else { \
  161. FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),FALSE); \
  162. } \
  163. }
  164. //
  165. // VOID
  166. // FatAllocateClusters (
  167. // IN PIRP_CONTEXT IrpContext,
  168. // IN PVCB Vcb,
  169. // IN ULONG FatIndex,
  170. // IN ULONG ClusterCount
  171. // );
  172. //
  173. #define FatAllocateClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
  174. if ((CLUSTER_COUNT) == 1) { \
  175. FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_LAST); \
  176. } else { \
  177. FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),TRUE); \
  178. } \
  179. }
  180. //
  181. // VOID
  182. // FatUnreserveClusters (
  183. // IN PIRP_CONTEXT IrpContext,
  184. // IN PVCB Vcb,
  185. // IN ULONG FatIndex,
  186. // IN ULONG ClusterCount
  187. // );
  188. //
  189. #define FatUnreserveClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
  190. ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap ); \
  191. ASSERT( (FAT_INDEX) >= 2); \
  192. RtlClearBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \
  193. if ((FAT_INDEX) < (VCB)->ClusterHint) { \
  194. (VCB)->ClusterHint = (FAT_INDEX); \
  195. } \
  196. }
  197. //
  198. // VOID
  199. // FatReserveClusters (
  200. // IN PIRP_CONTEXT IrpContext,
  201. // IN PVCB Vcb,
  202. // IN ULONG FatIndex,
  203. // IN ULONG ClusterCount
  204. // );
  205. //
  206. // Handle wrapping the hint back to the front.
  207. //
  208. #define FatReserveClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
  209. ULONG _AfterRun = (FAT_INDEX) + (CLUSTER_COUNT); \
  210. ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap ); \
  211. ASSERT( (FAT_INDEX) >= 2); \
  212. RtlSetBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \
  213. \
  214. if (_AfterRun - 2 >= (VCB)->FreeClusterBitMap.SizeOfBitMap) { \
  215. _AfterRun = 2; \
  216. } \
  217. if (RtlCheckBit(&(VCB)->FreeClusterBitMap, _AfterRun - 2)) { \
  218. (VCB)->ClusterHint = RtlFindClearBits( &(VCB)->FreeClusterBitMap, 1, _AfterRun - 2) + 2; \
  219. if (1 == (VCB)->ClusterHint) { \
  220. (VCB)->ClusterHint = 2; \
  221. } \
  222. } \
  223. else { \
  224. (VCB)->ClusterHint = _AfterRun; \
  225. } \
  226. }
  227. //
  228. // ULONG
  229. // FatFindFreeClusterRun (
  230. // IN PIRP_CONTEXT IrpContext,
  231. // IN PVCB Vcb,
  232. // IN ULONG ClusterCount,
  233. // IN ULONG AlternateClusterHint
  234. // );
  235. //
  236. // Do a special check if only one cluster is desired.
  237. //
  238. #define FatFindFreeClusterRun(IRPCONTEXT,VCB,CLUSTER_COUNT,CLUSTER_HINT) ( \
  239. (CLUSTER_COUNT == 1) && \
  240. FatIsClusterFree((IRPCONTEXT), (VCB), (CLUSTER_HINT)) ? \
  241. (CLUSTER_HINT) : \
  242. RtlFindClearBits( &(VCB)->FreeClusterBitMap, \
  243. (CLUSTER_COUNT), \
  244. (CLUSTER_HINT) - 2) + 2 \
  245. )
  246. //
  247. // FAT32: Define the maximum size of the FreeClusterBitMap to be the
  248. // maximum size of a FAT16 FAT. If there are more clusters on the
  249. // volume than can be represented by this many bytes of bitmap, the
  250. // FAT will be split into "buckets", each of which does fit.
  251. //
  252. // Note this count is in clusters/bits of bitmap.
  253. //
  254. #define MAX_CLUSTER_BITMAP_SIZE (1 << 16)
  255. //
  256. // Calculate the window a given cluster number is in.
  257. //
  258. #define FatWindowOfCluster(C) (((C) - 2) / MAX_CLUSTER_BITMAP_SIZE)
  259. #ifdef ALLOC_PRAGMA
  260. #pragma alloc_text(PAGE, FatAddFileAllocation)
  261. #pragma alloc_text(PAGE, FatAllocateDiskSpace)
  262. #pragma alloc_text(PAGE, FatDeallocateDiskSpace)
  263. #pragma alloc_text(PAGE, FatExamineFatEntries)
  264. #pragma alloc_text(PAGE, FatInterpretClusterType)
  265. #pragma alloc_text(PAGE, FatLogOf)
  266. #pragma alloc_text(PAGE, FatLookupFatEntry)
  267. #pragma alloc_text(PAGE, FatLookupFileAllocation)
  268. #pragma alloc_text(PAGE, FatLookupFileAllocationSize)
  269. #pragma alloc_text(PAGE, FatMergeAllocation)
  270. #pragma alloc_text(PAGE, FatSetFatEntry)
  271. #pragma alloc_text(PAGE, FatSetFatRun)
  272. #pragma alloc_text(PAGE, FatSetupAllocationSupport)
  273. #pragma alloc_text(PAGE, FatSplitAllocation)
  274. #pragma alloc_text(PAGE, FatTearDownAllocationSupport)
  275. #pragma alloc_text(PAGE, FatTruncateFileAllocation)
  276. #endif
  277. INLINE
  278. ULONG
  279. FatSelectBestWindow(
  280. IN PVCB Vcb
  281. )
  282. /*++
  283. Routine Description:
  284. Choose a window to allocate clusters from. Order of preference is:
  285. 1. First window with >50% free clusters
  286. 2. First empty window
  287. 3. Window with greatest number of free clusters.
  288. Arguments:
  289. Vcb - Supplies the Vcb for the volume
  290. Return Value:
  291. 'Best window' number (index into Vcb->Windows[])
  292. --*/
  293. {
  294. ULONG i, Fave = 0;
  295. ULONG MaxFree = 0;
  296. ULONG FirstEmpty = -1;
  297. ULONG ClustersPerWindow = MAX_CLUSTER_BITMAP_SIZE;
  298. ASSERT( 1 != Vcb->NumberOfWindows);
  299. for (i = 0; i < Vcb->NumberOfWindows; i++) {
  300. if (Vcb->Windows[i].ClustersFree == ClustersPerWindow) {
  301. if (-1 == FirstEmpty) {
  302. //
  303. // Keep note of the first empty window on the disc
  304. //
  305. FirstEmpty = i;
  306. }
  307. }
  308. else if (Vcb->Windows[i].ClustersFree > MaxFree) {
  309. //
  310. // This window has the most free clusters, so far
  311. //
  312. MaxFree = Vcb->Windows[i].ClustersFree;
  313. Fave = i;
  314. //
  315. // If this window has >50% free clusters, then we will take it,
  316. // so don't bother considering more windows.
  317. //
  318. if (MaxFree >= (ClustersPerWindow >> 1)) {
  319. break;
  320. }
  321. }
  322. }
  323. //
  324. // If there were no windows with 50% or more freespace, then select the
  325. // first empty window on the disc, if any - otherwise we'll just go with
  326. // the one with the most free clusters.
  327. //
  328. if ((MaxFree < (ClustersPerWindow >> 1)) && (-1 != FirstEmpty)) {
  329. Fave = FirstEmpty;
  330. }
  331. return Fave;
  332. }
  333. VOID
  334. FatSetupAllocationSupport (
  335. IN PIRP_CONTEXT IrpContext,
  336. IN PVCB Vcb
  337. )
  338. /*++
  339. Routine Description:
  340. This routine fills in the Allocation Support structure in the Vcb.
  341. Most entries are computed using fat.h macros supplied with data from
  342. the Bios Parameter Block. The free cluster count, however, requires
  343. going to the Fat and actually counting free sectors. At the same time
  344. the free cluster bit map is initalized.
  345. Arguments:
  346. Vcb - Supplies the Vcb to fill in.
  347. --*/
  348. {
  349. ULONG BitMapSize;
  350. PVOID BitMapBuffer;
  351. ULONG BitIndex;
  352. PBCB Bcb;
  353. ULONG Page;
  354. ULONG Offset;
  355. ULONG FatIndexBitSize;
  356. ULONG ClustersDescribableByFat;
  357. PAGED_CODE();
  358. DebugTrace(+1, Dbg, "FatSetupAllocationSupport\n", 0);
  359. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  360. //
  361. // Compute a number of fields for Vcb.AllocationSupport
  362. //
  363. Vcb->AllocationSupport.RootDirectoryLbo = FatRootDirectoryLbo( &Vcb->Bpb );
  364. Vcb->AllocationSupport.RootDirectorySize = FatRootDirectorySize( &Vcb->Bpb );
  365. Vcb->AllocationSupport.FileAreaLbo = FatFileAreaLbo( &Vcb->Bpb );
  366. Vcb->AllocationSupport.NumberOfClusters = FatNumberOfClusters( &Vcb->Bpb );
  367. Vcb->AllocationSupport.FatIndexBitSize = FatIndexBitSize( &Vcb->Bpb );
  368. Vcb->AllocationSupport.LogOfBytesPerSector = FatLogOf(Vcb->Bpb.BytesPerSector);
  369. Vcb->AllocationSupport.LogOfBytesPerCluster = FatLogOf(FatBytesPerCluster( &Vcb->Bpb ));
  370. Vcb->AllocationSupport.NumberOfFreeClusters = 0;
  371. //
  372. // Deal with a bug in DOS 5 format, if the Fat is not big enough to
  373. // describe all the clusters on the disk, reduce this number. We expect
  374. // that fat32 volumes will not have this problem.
  375. //
  376. // Turns out this was not a good assumption. We have to do this always now.
  377. //
  378. ClustersDescribableByFat = ( ((FatIsFat32(Vcb)? Vcb->Bpb.LargeSectorsPerFat :
  379. Vcb->Bpb.SectorsPerFat) *
  380. Vcb->Bpb.BytesPerSector * 8)
  381. / FatIndexBitSize(&Vcb->Bpb) ) - 2;
  382. if (Vcb->AllocationSupport.NumberOfClusters > ClustersDescribableByFat) {
  383. Vcb->AllocationSupport.NumberOfClusters = ClustersDescribableByFat;
  384. }
  385. //
  386. // Extend the virtual volume file to include the Fat
  387. //
  388. {
  389. CC_FILE_SIZES FileSizes;
  390. FileSizes.AllocationSize.QuadPart =
  391. FileSizes.FileSize.QuadPart = (FatReservedBytes( &Vcb->Bpb ) +
  392. FatBytesPerFat( &Vcb->Bpb ));
  393. FileSizes.ValidDataLength = FatMaxLarge;
  394. if ( Vcb->VirtualVolumeFile->PrivateCacheMap == NULL ) {
  395. CcInitializeCacheMap( Vcb->VirtualVolumeFile,
  396. &FileSizes,
  397. TRUE,
  398. &FatData.CacheManagerNoOpCallbacks,
  399. Vcb );
  400. } else {
  401. CcSetFileSizes( Vcb->VirtualVolumeFile, &FileSizes );
  402. }
  403. }
  404. try {
  405. if (FatIsFat32(Vcb) &&
  406. Vcb->AllocationSupport.NumberOfClusters > MAX_CLUSTER_BITMAP_SIZE) {
  407. Vcb->NumberOfWindows = (Vcb->AllocationSupport.NumberOfClusters +
  408. MAX_CLUSTER_BITMAP_SIZE - 1) /
  409. MAX_CLUSTER_BITMAP_SIZE;
  410. BitMapSize = MAX_CLUSTER_BITMAP_SIZE;
  411. } else {
  412. Vcb->NumberOfWindows = 1;
  413. BitMapSize = Vcb->AllocationSupport.NumberOfClusters;
  414. }
  415. Vcb->Windows = FsRtlAllocatePoolWithTag( PagedPool,
  416. Vcb->NumberOfWindows * sizeof(FAT_WINDOW),
  417. TAG_FAT_WINDOW );
  418. RtlInitializeBitMap( &Vcb->FreeClusterBitMap,
  419. NULL,
  420. 0 );
  421. //
  422. // Chose a FAT window to begin operation in.
  423. //
  424. if (Vcb->NumberOfWindows > 1) {
  425. //
  426. // Read the fat and count up free clusters. We bias by the two reserved
  427. // entries in the FAT.
  428. //
  429. FatExamineFatEntries( IrpContext, Vcb,
  430. 2,
  431. Vcb->AllocationSupport.NumberOfClusters + 2 - 1,
  432. TRUE,
  433. NULL,
  434. NULL);
  435. //
  436. // Pick a window to begin allocating from
  437. //
  438. Vcb->CurrentWindow = &Vcb->Windows[ FatSelectBestWindow( Vcb)];
  439. } else {
  440. Vcb->CurrentWindow = &Vcb->Windows[0];
  441. //
  442. // Carefully bias ourselves by the two reserved entries in the FAT.
  443. //
  444. Vcb->CurrentWindow->FirstCluster = 2;
  445. Vcb->CurrentWindow->LastCluster = Vcb->AllocationSupport.NumberOfClusters + 2 - 1;
  446. }
  447. //
  448. // Now transition to the FAT window we have chosen.
  449. //
  450. FatExamineFatEntries( IrpContext, Vcb,
  451. 0,
  452. 0,
  453. FALSE,
  454. Vcb->CurrentWindow,
  455. NULL);
  456. //
  457. // Now set the ClusterHint to the first free bit in our favorite
  458. // window (except the ClusterHint is off by two).
  459. //
  460. Vcb->ClusterHint =
  461. (BitIndex = RtlFindClearBits( &Vcb->FreeClusterBitMap, 1, 0 )) != -1 ?
  462. BitIndex + 2 : 2;
  463. } finally {
  464. DebugUnwind( FatSetupAllocationSupport );
  465. //
  466. // If we hit an exception, back out.
  467. //
  468. if (AbnormalTermination()) {
  469. FatTearDownAllocationSupport( IrpContext, Vcb );
  470. }
  471. }
  472. return;
  473. }
  474. VOID
  475. FatTearDownAllocationSupport (
  476. IN PIRP_CONTEXT IrpContext,
  477. IN PVCB Vcb
  478. )
  479. /*++
  480. Routine Description:
  481. This routine prepares the volume for closing. Specifically, we must
  482. release the free fat bit map buffer, and uninitialize the dirty fat
  483. Mcb.
  484. Arguments:
  485. Vcb - Supplies the Vcb to fill in.
  486. Return Value:
  487. VOID
  488. --*/
  489. {
  490. DebugTrace(+1, Dbg, "FatTearDownAllocationSupport\n", 0);
  491. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  492. PAGED_CODE();
  493. //
  494. // If there are FAT buckets, free them.
  495. //
  496. if ( Vcb->Windows != NULL ) {
  497. ExFreePool( Vcb->Windows );
  498. Vcb->Windows = NULL;
  499. }
  500. //
  501. // Free the memory associated with the free cluster bitmap.
  502. //
  503. if ( Vcb->FreeClusterBitMap.Buffer != NULL ) {
  504. ExFreePool( Vcb->FreeClusterBitMap.Buffer );
  505. //
  506. // NULL this field as an flag.
  507. //
  508. Vcb->FreeClusterBitMap.Buffer = NULL;
  509. }
  510. //
  511. // And remove all the runs in the dirty fat Mcb
  512. //
  513. FatRemoveMcbEntry( Vcb, &Vcb->DirtyFatMcb, 0, 0xFFFFFFFF );
  514. DebugTrace(-1, Dbg, "FatTearDownAllocationSupport -> (VOID)\n", 0);
  515. UNREFERENCED_PARAMETER( IrpContext );
  516. return;
  517. }
  518. VOID
  519. FatLookupFileAllocation (
  520. IN PIRP_CONTEXT IrpContext,
  521. IN PFCB FcbOrDcb,
  522. IN VBO Vbo,
  523. OUT PLBO Lbo,
  524. OUT PULONG ByteCount,
  525. OUT PBOOLEAN Allocated,
  526. OUT PBOOLEAN EndOnMax,
  527. OUT PULONG Index
  528. )
  529. /*++
  530. Routine Description:
  531. This routine looks up the existing mapping of VBO to LBO for a
  532. file/directory. The information it queries is either stored in the
  533. mcb field of the fcb/dcb or it is stored on in the fat table and
  534. needs to be retrieved and decoded, and updated in the mcb.
  535. Arguments:
  536. FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being queried
  537. Vbo - Supplies the VBO whose LBO we want returned
  538. Lbo - Receives the LBO corresponding to the input Vbo if one exists
  539. ByteCount - Receives the number of bytes within the run the run
  540. that correpond between the input vbo and output lbo.
  541. Allocated - Receives TRUE if the Vbo does have a corresponding Lbo
  542. and FALSE otherwise.
  543. EndOnMax - Receives TRUE if the run ends in the maximal FAT cluster,
  544. which results in a fractional bytecount.
  545. Index - Receives the Index of the run
  546. --*/
  547. {
  548. VBO CurrentVbo;
  549. LBO CurrentLbo;
  550. LBO PriorLbo;
  551. VBO FirstVboOfCurrentRun;
  552. LBO FirstLboOfCurrentRun;
  553. BOOLEAN LastCluster;
  554. ULONG Runs;
  555. PVCB Vcb;
  556. FAT_ENTRY FatEntry;
  557. ULONG BytesPerCluster;
  558. ULARGE_INTEGER BytesOnVolume;
  559. FAT_ENUMERATION_CONTEXT Context;
  560. PAGED_CODE();
  561. DebugTrace(+1, Dbg, "FatLookupFileAllocation\n", 0);
  562. DebugTrace( 0, Dbg, " FcbOrDcb = %8lx\n", FcbOrDcb);
  563. DebugTrace( 0, Dbg, " Vbo = %8lx\n", Vbo);
  564. DebugTrace( 0, Dbg, " Lbo = %8lx\n", Lbo);
  565. DebugTrace( 0, Dbg, " ByteCount = %8lx\n", ByteCount);
  566. DebugTrace( 0, Dbg, " Allocated = %8lx\n", Allocated);
  567. Context.Bcb = NULL;
  568. Vcb = FcbOrDcb->Vcb;
  569. *EndOnMax = FALSE;
  570. //
  571. // Check the trivial case that the mapping is already in our
  572. // Mcb.
  573. //
  574. if ( FatLookupMcbEntry(Vcb, &FcbOrDcb->Mcb, Vbo, Lbo, ByteCount, Index) ) {
  575. *Allocated = TRUE;
  576. ASSERT( ByteCount != 0);
  577. //
  578. // Detect the overflow case, trim and claim the condition.
  579. //
  580. if (Vbo + *ByteCount == 0) {
  581. *EndOnMax = TRUE;
  582. }
  583. DebugTrace( 0, Dbg, "Found run in Mcb.\n", 0);
  584. DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
  585. return;
  586. }
  587. //
  588. // Initialize the Vcb, the cluster size, LastCluster, and
  589. // FirstLboOfCurrentRun (to be used as an indication of the first
  590. // iteration through the following while loop).
  591. //
  592. BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
  593. BytesOnVolume.QuadPart = UInt32x32To64( Vcb->AllocationSupport.NumberOfClusters, BytesPerCluster );
  594. LastCluster = FALSE;
  595. FirstLboOfCurrentRun = 0;
  596. //
  597. // Discard the case that the request extends beyond the end of
  598. // allocation. Note that if the allocation size if not known
  599. // AllocationSize is set to 0xffffffff.
  600. //
  601. if ( Vbo >= FcbOrDcb->Header.AllocationSize.LowPart ) {
  602. *Allocated = FALSE;
  603. DebugTrace( 0, Dbg, "Vbo beyond end of file.\n", 0);
  604. DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
  605. return;
  606. }
  607. //
  608. // The Vbo is beyond the last Mcb entry. So we adjust Current Vbo/Lbo
  609. // and FatEntry to describe the beginning of the last entry in the Mcb.
  610. // This is used as initialization for the following loop.
  611. //
  612. // If the Mcb was empty, we start at the beginning of the file with
  613. // CurrentVbo set to 0 to indicate a new run.
  614. //
  615. if (FatLookupLastMcbEntry( Vcb, &FcbOrDcb->Mcb, &CurrentVbo, &CurrentLbo, &Runs )) {
  616. DebugTrace( 0, Dbg, "Current Mcb size = %8lx.\n", CurrentVbo + 1);
  617. CurrentVbo -= (BytesPerCluster - 1);
  618. CurrentLbo -= (BytesPerCluster - 1);
  619. //
  620. // Convert an index to a count.
  621. //
  622. Runs += 1;
  623. } else {
  624. DebugTrace( 0, Dbg, "Mcb empty.\n", 0);
  625. //
  626. // Check for an FcbOrDcb that has no allocation
  627. //
  628. if (FcbOrDcb->FirstClusterOfFile == 0) {
  629. *Allocated = FALSE;
  630. DebugTrace( 0, Dbg, "File has no allocation.\n", 0);
  631. DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
  632. return;
  633. } else {
  634. CurrentVbo = 0;
  635. CurrentLbo = FatGetLboFromIndex( Vcb, FcbOrDcb->FirstClusterOfFile );
  636. FirstVboOfCurrentRun = CurrentVbo;
  637. FirstLboOfCurrentRun = CurrentLbo;
  638. Runs = 0;
  639. DebugTrace( 0, Dbg, "First Lbo of file = %8lx\n", CurrentLbo);
  640. }
  641. }
  642. //
  643. // Now we know that we are looking up a valid Vbo, but it is
  644. // not in the Mcb, which is a monotonically increasing list of
  645. // Vbo's. Thus we have to go to the Fat, and update
  646. // the Mcb as we go. We use a try-finally to unpin the page
  647. // of fat hanging around. Also we mark *Allocated = FALSE, so that
  648. // the caller wont try to use the data if we hit an exception.
  649. //
  650. *Allocated = FALSE;
  651. try {
  652. FatEntry = (FAT_ENTRY)FatGetIndexFromLbo( Vcb, CurrentLbo );
  653. //
  654. // ASSERT that CurrentVbo and CurrentLbo are now cluster alligned.
  655. // The assumption here, is that only whole clusters of Vbos and Lbos
  656. // are mapped in the Mcb.
  657. //
  658. ASSERT( ((CurrentLbo - Vcb->AllocationSupport.FileAreaLbo)
  659. % BytesPerCluster == 0) &&
  660. (CurrentVbo % BytesPerCluster == 0) );
  661. //
  662. // Starting from the first Vbo after the last Mcb entry, scan through
  663. // the Fat looking for our Vbo. We continue through the Fat until we
  664. // hit a noncontiguity beyond the desired Vbo, or the last cluster.
  665. //
  666. while ( !LastCluster ) {
  667. //
  668. // Get the next fat entry, and update our Current variables.
  669. //
  670. FatLookupFatEntry( IrpContext, Vcb, FatEntry, &FatEntry, &Context );
  671. PriorLbo = CurrentLbo;
  672. CurrentLbo = FatGetLboFromIndex( Vcb, FatEntry );
  673. CurrentVbo += BytesPerCluster;
  674. switch ( FatInterpretClusterType( Vcb, FatEntry )) {
  675. //
  676. // Check for a break in the Fat allocation chain.
  677. //
  678. case FatClusterAvailable:
  679. case FatClusterReserved:
  680. case FatClusterBad:
  681. DebugTrace( 0, Dbg, "Break in allocation chain, entry = %d\n", FatEntry);
  682. DebugTrace(-1, Dbg, "FatLookupFileAllocation -> Fat Corrupt. Raise Status.\n", 0);
  683. FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
  684. FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
  685. break;
  686. //
  687. // If this is the last cluster, we must update the Mcb and
  688. // exit the loop.
  689. //
  690. case FatClusterLast:
  691. //
  692. // Assert we know where the current run started. If the
  693. // Mcb was empty when we were called, thenFirstLboOfCurrentRun
  694. // was set to the start of the file. If the Mcb contained an
  695. // entry, then FirstLboOfCurrentRun was set on the first
  696. // iteration through the loop. Thus if FirstLboOfCurrentRun
  697. // is 0, then there was an Mcb entry and we are on our first
  698. // iteration, meaing that the last cluster in the Mcb was
  699. // really the last allocated cluster, but we checked Vbo
  700. // against AllocationSize, and found it OK, thus AllocationSize
  701. // must be too large.
  702. //
  703. // Note that, when we finally arrive here, CurrentVbo is actually
  704. // the first Vbo beyond the file allocation and CurrentLbo is
  705. // meaningless.
  706. //
  707. DebugTrace( 0, Dbg, "Read last cluster of file.\n", 0);
  708. //
  709. // Detect the case of the maximal file. Note that this really isn't
  710. // a proper Vbo - those are zero-based, and this is a one-based number.
  711. // The maximal file, of 2^32 - 1 bytes, has a maximum byte offset of
  712. // 2^32 - 2.
  713. //
  714. // Just so we don't get confused here.
  715. //
  716. if (CurrentVbo == 0) {
  717. *EndOnMax = TRUE;
  718. CurrentVbo -= 1;
  719. }
  720. LastCluster = TRUE;
  721. if (FirstLboOfCurrentRun != 0 ) {
  722. DebugTrace( 0, Dbg, "Adding a run to the Mcb.\n", 0);
  723. DebugTrace( 0, Dbg, " Vbo = %08lx.\n", FirstVboOfCurrentRun);
  724. DebugTrace( 0, Dbg, " Lbo = %08lx.\n", FirstLboOfCurrentRun);
  725. DebugTrace( 0, Dbg, " Length = %08lx.\n", CurrentVbo - FirstVboOfCurrentRun);
  726. (VOID)FatAddMcbEntry( Vcb,
  727. &FcbOrDcb->Mcb,
  728. FirstVboOfCurrentRun,
  729. FirstLboOfCurrentRun,
  730. CurrentVbo - FirstVboOfCurrentRun );
  731. Runs += 1;
  732. }
  733. //
  734. // Being at the end of allocation, make sure we have found
  735. // the Vbo. If we haven't, seeing as we checked VBO
  736. // against AllocationSize, the real disk allocation is less
  737. // than that of AllocationSize. This comes about when the
  738. // real allocation is not yet known, and AllocaitonSize
  739. // contains MAXULONG.
  740. //
  741. // KLUDGE! - If we were called by FatLookupFileAllocationSize
  742. // Vbo is set to MAXULONG - 1, and AllocationSize to the lookup
  743. // hint. Thus we merrily go along looking for a match that isn't
  744. // there, but in the meantime building an Mcb. If this is
  745. // the case, fill in AllocationSize and return.
  746. //
  747. if ( Vbo == MAXULONG - 1 ) {
  748. *Allocated = FALSE;
  749. FcbOrDcb->Header.AllocationSize.QuadPart = CurrentVbo;
  750. DebugTrace( 0, Dbg, "New file allocation size = %08lx.\n", CurrentVbo);
  751. try_return ( NOTHING );
  752. }
  753. //
  754. // We will lie ever so slightly if we really terminated on the
  755. // maximal byte of a file. It is really allocated.
  756. //
  757. if (Vbo >= CurrentVbo && !*EndOnMax) {
  758. *Allocated = FALSE;
  759. try_return ( NOTHING );
  760. }
  761. break;
  762. //
  763. // This is a continuation in the chain. If the run has a
  764. // discontiguity at this point, update the Mcb, and if we are beyond
  765. // the desired Vbo, this is the end of the run, so set LastCluster
  766. // and exit the loop.
  767. //
  768. case FatClusterNext:
  769. //
  770. // This is the loop check. The Vbo must not be bigger than the size of
  771. // the volume, and the Vbo must not have a) wrapped and b) not been at the
  772. // very last cluster in the chain, for the case of the maximal file.
  773. //
  774. if ( CurrentVbo == 0 ||
  775. (BytesOnVolume.HighPart == 0 && CurrentVbo > BytesOnVolume.LowPart)) {
  776. FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
  777. FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
  778. }
  779. if ( PriorLbo + BytesPerCluster != CurrentLbo ) {
  780. //
  781. // Note that on the first time through the loop
  782. // (FirstLboOfCurrentRun == 0), we don't add the
  783. // run to the Mcb since it curresponds to the last
  784. // run already stored in the Mcb.
  785. //
  786. if ( FirstLboOfCurrentRun != 0 ) {
  787. DebugTrace( 0, Dbg, "Adding a run to the Mcb.\n", 0);
  788. DebugTrace( 0, Dbg, " Vbo = %08lx.\n", FirstVboOfCurrentRun);
  789. DebugTrace( 0, Dbg, " Lbo = %08lx.\n", FirstLboOfCurrentRun);
  790. DebugTrace( 0, Dbg, " Length = %08lx.\n", CurrentVbo - FirstVboOfCurrentRun);
  791. FatAddMcbEntry( Vcb,
  792. &FcbOrDcb->Mcb,
  793. FirstVboOfCurrentRun,
  794. FirstLboOfCurrentRun,
  795. CurrentVbo - FirstVboOfCurrentRun );
  796. Runs += 1;
  797. }
  798. //
  799. // Since we are at a run boundry, with CurrentLbo and
  800. // CurrentVbo being the first cluster of the next run,
  801. // we see if the run we just added encompases the desired
  802. // Vbo, and if so exit. Otherwise we set up two new
  803. // First*boOfCurrentRun, and continue.
  804. //
  805. if (CurrentVbo > Vbo) {
  806. LastCluster = TRUE;
  807. } else {
  808. FirstVboOfCurrentRun = CurrentVbo;
  809. FirstLboOfCurrentRun = CurrentLbo;
  810. }
  811. }
  812. break;
  813. default:
  814. DebugTrace(0, Dbg, "Illegal Cluster Type.\n", FatEntry);
  815. FatBugCheck( 0, 0, 0 );
  816. break;
  817. } // switch()
  818. } // while()
  819. //
  820. // Load up the return parameters.
  821. //
  822. // On exit from the loop, Vbo still contains the desired Vbo, and
  823. // CurrentVbo is the first byte after the run that contained the
  824. // desired Vbo.
  825. //
  826. *Allocated = TRUE;
  827. *Lbo = FirstLboOfCurrentRun + (Vbo - FirstVboOfCurrentRun);
  828. *ByteCount = CurrentVbo - Vbo;
  829. if (ARGUMENT_PRESENT(Index)) {
  830. //
  831. // Note that Runs only needs to be accurate with respect to where we
  832. // ended. Since partial-lookup cases will occur without exclusive
  833. // synchronization, the Mcb itself may be much bigger by now.
  834. //
  835. *Index = Runs - 1;
  836. }
  837. try_exit: NOTHING;
  838. } finally {
  839. DebugUnwind( FatLookupFileAllocation );
  840. //
  841. // We are done reading the Fat, so unpin the last page of fat
  842. // that is hanging around
  843. //
  844. FatUnpinBcb( IrpContext, Context.Bcb );
  845. DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
  846. }
  847. return;
  848. }
  849. VOID
  850. FatAddFileAllocation (
  851. IN PIRP_CONTEXT IrpContext,
  852. IN PFCB FcbOrDcb,
  853. IN PFILE_OBJECT FileObject OPTIONAL,
  854. IN ULONG DesiredAllocationSize
  855. )
  856. /*++
  857. Routine Description:
  858. This routine adds additional allocation to the specified file/directory.
  859. Additional allocation is added by appending clusters to the file/directory.
  860. If the file already has a sufficient allocation then this procedure
  861. is effectively a noop.
  862. Arguments:
  863. FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified.
  864. This parameter must not specify the root dcb.
  865. FileObject - If supplied inform the cache manager of the change.
  866. DesiredAllocationSize - Supplies the minimum size, in bytes, that we want
  867. allocated to the file/directory.
  868. --*/
  869. {
  870. PVCB Vcb;
  871. LARGE_MCB NewMcb;
  872. PLARGE_MCB McbToCleanup = NULL;
  873. PDIRENT Dirent = NULL;
  874. ULONG NewAllocation;
  875. PBCB Bcb = NULL;
  876. BOOLEAN UnwindWeAllocatedDiskSpace = FALSE;
  877. BOOLEAN UnwindAllocationSizeSet = FALSE;
  878. BOOLEAN UnwindCacheManagerInformed = FALSE;
  879. BOOLEAN UnwindWeInitializedMcb = FALSE;
  880. PAGED_CODE();
  881. DebugTrace(+1, Dbg, "FatAddFileAllocation\n", 0);
  882. DebugTrace( 0, Dbg, " FcbOrDcb = %8lx\n", FcbOrDcb);
  883. DebugTrace( 0, Dbg, " DesiredAllocationSize = %8lx\n", DesiredAllocationSize);
  884. //
  885. // If we haven't yet set the correct AllocationSize, do so.
  886. //
  887. if (FcbOrDcb->Header.AllocationSize.QuadPart == FCB_LOOKUP_ALLOCATIONSIZE_HINT) {
  888. FatLookupFileAllocationSize( IrpContext, FcbOrDcb );
  889. }
  890. //
  891. // Check for the benign case that the desired allocation is already
  892. // within the allocation size.
  893. //
  894. if (DesiredAllocationSize <= FcbOrDcb->Header.AllocationSize.LowPart) {
  895. DebugTrace(0, Dbg, "Desired size within current allocation.\n", 0);
  896. DebugTrace(-1, Dbg, "FatAddFileAllocation -> (VOID)\n", 0);
  897. return;
  898. }
  899. DebugTrace( 0, Dbg, "InitialAllocation = %08lx.\n", FcbOrDcb->Header.AllocationSize.LowPart);
  900. //
  901. // Get a chunk of disk space that will fullfill our needs. If there
  902. // was no initial allocation, start from the hint in the Vcb, otherwise
  903. // try to allocate from the cluster after the initial allocation.
  904. //
  905. // If there was no initial allocation to the file, we can just use the
  906. // Mcb in the FcbOrDcb, otherwise we have to use a new one, and merge
  907. // it to the one in the FcbOrDcb.
  908. //
  909. Vcb = FcbOrDcb->Vcb;
  910. try {
  911. if (FcbOrDcb->Header.AllocationSize.LowPart == 0) {
  912. LBO FirstLboOfFile;
  913. ASSERT( FcbOrDcb->FcbCondition == FcbGood );
  914. FatGetDirentFromFcbOrDcb( IrpContext,
  915. FcbOrDcb,
  916. &Dirent,
  917. &Bcb );
  918. ASSERT( Bcb != NULL );
  919. //
  920. // Set this dirty right now since this call can fail.
  921. //
  922. FatSetDirtyBcb( IrpContext, Bcb, Vcb, TRUE );
  923. FatAllocateDiskSpace( IrpContext,
  924. Vcb,
  925. 0,
  926. &DesiredAllocationSize,
  927. FALSE,
  928. &FcbOrDcb->Mcb );
  929. UnwindWeAllocatedDiskSpace = TRUE;
  930. McbToCleanup = &FcbOrDcb->Mcb;
  931. //
  932. // We have to update the dirent and FcbOrDcb copies of
  933. // FirstClusterOfFile since before it was 0
  934. //
  935. FatLookupMcbEntry( FcbOrDcb->Vcb,
  936. &FcbOrDcb->Mcb,
  937. 0,
  938. &FirstLboOfFile,
  939. (PULONG)NULL,
  940. NULL );
  941. DebugTrace( 0, Dbg, "First Lbo of file will be %08lx.\n", FirstLboOfFile );
  942. FcbOrDcb->FirstClusterOfFile = FatGetIndexFromLbo( Vcb, FirstLboOfFile );
  943. Dirent->FirstClusterOfFile = (USHORT)FcbOrDcb->FirstClusterOfFile;
  944. if ( FatIsFat32(Vcb) ) {
  945. Dirent->FirstClusterOfFileHi = (USHORT)(FcbOrDcb->FirstClusterOfFile >> 16);
  946. }
  947. //
  948. // Note the size of the allocation we need to tell the cache manager about.
  949. //
  950. NewAllocation = DesiredAllocationSize;
  951. } else {
  952. LBO LastAllocatedLbo;
  953. VBO DontCare;
  954. //
  955. // Get the first cluster following the current allocation. It is possible
  956. // the Mcb is empty (or short, etc.) so we need to be slightly careful
  957. // about making sure we don't lie with the hint.
  958. //
  959. (void)FatLookupLastMcbEntry( FcbOrDcb->Vcb, &FcbOrDcb->Mcb, &DontCare, &LastAllocatedLbo, NULL );
  960. //
  961. // Try to get some disk space starting from there.
  962. //
  963. NewAllocation = DesiredAllocationSize - FcbOrDcb->Header.AllocationSize.LowPart;
  964. FsRtlInitializeLargeMcb( &NewMcb, PagedPool );
  965. UnwindWeInitializedMcb = TRUE;
  966. McbToCleanup = &NewMcb;
  967. FatAllocateDiskSpace( IrpContext,
  968. Vcb,
  969. (LastAllocatedLbo != ~0 ?
  970. FatGetIndexFromLbo(Vcb,LastAllocatedLbo + 1) :
  971. 0),
  972. &NewAllocation,
  973. FALSE,
  974. &NewMcb );
  975. UnwindWeAllocatedDiskSpace = TRUE;
  976. }
  977. //
  978. // Now that we increased the allocation of the file, mark it in the
  979. // FcbOrDcb. Carefully prepare to handle an inability to grow the cache
  980. // structures.
  981. //
  982. FcbOrDcb->Header.AllocationSize.LowPart += NewAllocation;
  983. //
  984. // Handle the maximal file case, where we may have just wrapped. Note
  985. // that this must be the precise boundary case wrap, i.e. by one byte,
  986. // so that the new allocation is actually one byte "less" as far as we're
  987. // concerned. This is important for the extension case.
  988. //
  989. if (FcbOrDcb->Header.AllocationSize.LowPart == 0) {
  990. NewAllocation -= 1;
  991. FcbOrDcb->Header.AllocationSize.LowPart = 0xffffffff;
  992. }
  993. UnwindAllocationSizeSet = TRUE;
  994. //
  995. // Inform the cache manager to increase the section size
  996. //
  997. if ( ARGUMENT_PRESENT(FileObject) && CcIsFileCached(FileObject) ) {
  998. CcSetFileSizes( FileObject,
  999. (PCC_FILE_SIZES)&FcbOrDcb->Header.AllocationSize );
  1000. UnwindCacheManagerInformed = TRUE;
  1001. }
  1002. //
  1003. // In the extension case, we have held off actually gluing the new
  1004. // allocation onto the file. This simplifies exception cleanup since
  1005. // if it was already added and the section grow failed, we'd have to
  1006. // do extra work to unglue it. This way, we can assume that if we
  1007. // raise the only thing we need to do is deallocate the disk space.
  1008. //
  1009. // Merge the allocation now.
  1010. //
  1011. if (FcbOrDcb->Header.AllocationSize.LowPart != NewAllocation) {
  1012. //
  1013. // Tack the new Mcb onto the end of the FcbOrDcb one.
  1014. //
  1015. FatMergeAllocation( IrpContext,
  1016. Vcb,
  1017. &FcbOrDcb->Mcb,
  1018. &NewMcb );
  1019. }
  1020. } finally {
  1021. DebugUnwind( FatAddFileAllocation );
  1022. //
  1023. // Give FlushFileBuffer a clue here.
  1024. //
  1025. SetFlag(FcbOrDcb->FcbState, FCB_STATE_FLUSH_FAT);
  1026. //
  1027. // If we were dogged trying to complete this operation, we need to go
  1028. // back various things out.
  1029. //
  1030. if (AbnormalTermination()) {
  1031. //
  1032. // Pull off the allocation size we tried to add to this object if
  1033. // we failed to grow cache structures or Mcb structures.
  1034. //
  1035. if (UnwindAllocationSizeSet) {
  1036. FcbOrDcb->Header.AllocationSize.LowPart -= NewAllocation;
  1037. }
  1038. if (UnwindCacheManagerInformed) {
  1039. CcSetFileSizes( FileObject,
  1040. (PCC_FILE_SIZES)&FcbOrDcb->Header.AllocationSize );
  1041. }
  1042. //
  1043. // In the case of initial allocation, we used the Fcb's Mcb and have
  1044. // to clean that up as well as the FAT chain references.
  1045. //
  1046. if (FcbOrDcb->Header.AllocationSize.LowPart == 0) {
  1047. if (Dirent != NULL) {
  1048. FcbOrDcb->FirstClusterOfFile = 0;
  1049. Dirent->FirstClusterOfFile = 0;
  1050. if ( FatIsFat32(Vcb) ) {
  1051. Dirent->FirstClusterOfFileHi = 0;
  1052. }
  1053. }
  1054. }
  1055. //
  1056. // ... and drop the dirent Bcb if we got it. Do it now
  1057. // so we can afford to take the exception if we have to.
  1058. //
  1059. FatUnpinBcb( IrpContext, Bcb );
  1060. try {
  1061. //
  1062. // Note this can re-raise.
  1063. //
  1064. if ( UnwindWeAllocatedDiskSpace ) {
  1065. FatDeallocateDiskSpace( IrpContext, Vcb, McbToCleanup );
  1066. }
  1067. } finally {
  1068. //
  1069. // We always want to clean up the non-initial allocation temporary Mcb,
  1070. // otherwise we have the Fcb's Mcb and we just truncate it away.
  1071. //
  1072. if (UnwindWeInitializedMcb == TRUE) {
  1073. //
  1074. // Note that we already know a raise is in progress. No danger
  1075. // of encountering the normal case code below and doing this again.
  1076. //
  1077. FsRtlUninitializeLargeMcb( McbToCleanup );
  1078. } else {
  1079. if (McbToCleanup) {
  1080. FsRtlTruncateLargeMcb( McbToCleanup, 0 );
  1081. }
  1082. }
  1083. }
  1084. }
  1085. DebugTrace(-1, Dbg, "FatAddFileAllocation -> (VOID)\n", 0);
  1086. }
  1087. //
  1088. // Non-exceptional cleanup we always want to do. In handling the re-raise possibilities
  1089. // during exceptions we had to make sure these two steps always happened there beforehand.
  1090. // So now we handle the usual case.
  1091. //
  1092. FatUnpinBcb( IrpContext, Bcb );
  1093. if (UnwindWeInitializedMcb == TRUE) {
  1094. FsRtlUninitializeLargeMcb( &NewMcb );
  1095. }
  1096. }
  1097. VOID
  1098. FatTruncateFileAllocation (
  1099. IN PIRP_CONTEXT IrpContext,
  1100. IN PFCB FcbOrDcb,
  1101. IN ULONG DesiredAllocationSize
  1102. )
  1103. /*++
  1104. Routine Description:
  1105. This routine truncates the allocation to the specified file/directory.
  1106. If the file is already smaller than the indicated size then this procedure
  1107. is effectively a noop.
  1108. Arguments:
  1109. FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified
  1110. This parameter must not specify the root dcb.
  1111. DesiredAllocationSize - Supplies the maximum size, in bytes, that we want
  1112. allocated to the file/directory. It is rounded
  1113. up to the nearest cluster.
  1114. Return Value:
  1115. VOID - TRUE if the operation completed and FALSE if it had to
  1116. block but could not.
  1117. --*/
  1118. {
  1119. PVCB Vcb;
  1120. PBCB Bcb = NULL;
  1121. LARGE_MCB RemainingMcb;
  1122. ULONG BytesPerCluster;
  1123. PDIRENT Dirent = NULL;
  1124. BOOLEAN UpdatedDirent = FALSE;
  1125. ULONG UnwindInitialAllocationSize;
  1126. ULONG UnwindInitialFirstClusterOfFile;
  1127. BOOLEAN UnwindWeAllocatedMcb = FALSE;
  1128. PAGED_CODE();
  1129. DebugTrace(+1, Dbg, "FatTruncateFileAllocation\n", 0);
  1130. DebugTrace( 0, Dbg, " FcbOrDcb = %8lx\n", FcbOrDcb);
  1131. DebugTrace( 0, Dbg, " DesiredAllocationSize = %8lx\n", DesiredAllocationSize);
  1132. //
  1133. // If the Fcb isn't in good condition, we have no business whacking around on
  1134. // the disk after "its" clusters.
  1135. //
  1136. // Inspired by a Prefix complaint.
  1137. //
  1138. ASSERT( FcbOrDcb->FcbCondition == FcbGood );
  1139. //
  1140. // If we haven't yet set the correct AllocationSize, do so.
  1141. //
  1142. if (FcbOrDcb->Header.AllocationSize.QuadPart == FCB_LOOKUP_ALLOCATIONSIZE_HINT) {
  1143. FatLookupFileAllocationSize( IrpContext, FcbOrDcb );
  1144. }
  1145. //
  1146. // Round up the Desired Allocation Size to the next cluster size
  1147. //
  1148. Vcb = FcbOrDcb->Vcb;
  1149. BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
  1150. //
  1151. // Note if the desired allocation is zero, to distinguish this from
  1152. // the wrap case below.
  1153. //
  1154. if (DesiredAllocationSize != 0) {
  1155. DesiredAllocationSize = (DesiredAllocationSize + (BytesPerCluster - 1)) &
  1156. ~(BytesPerCluster - 1);
  1157. //
  1158. // Check for the benign case that the file is already smaller than
  1159. // the desired truncation. Note that if it wraps, then a) it was
  1160. // specifying an offset in the maximally allocatable cluster and
  1161. // b) we're not asking to extend the file, either. So stop.
  1162. //
  1163. if (DesiredAllocationSize == 0 ||
  1164. DesiredAllocationSize >= FcbOrDcb->Header.AllocationSize.LowPart) {
  1165. DebugTrace(0, Dbg, "Desired size within current allocation.\n", 0);
  1166. DebugTrace(-1, Dbg, "FatTruncateFileAllocation -> (VOID)\n", 0);
  1167. return;
  1168. }
  1169. }
  1170. UnwindInitialAllocationSize = FcbOrDcb->Header.AllocationSize.LowPart;
  1171. UnwindInitialFirstClusterOfFile = FcbOrDcb->FirstClusterOfFile;
  1172. //
  1173. // Update the FcbOrDcb allocation size. If it is now zero, we have the
  1174. // additional task of modifying the FcbOrDcb and Dirent copies of
  1175. // FirstClusterInFile.
  1176. //
  1177. // Note that we must pin the dirent before actually deallocating the
  1178. // disk space since, in unwind, it would not be possible to reallocate
  1179. // deallocated disk space as someone else may have reallocated it and
  1180. // may cause an exception when you try to get some more disk space.
  1181. // Thus FatDeallocateDiskSpace must be the final dangerous operation.
  1182. //
  1183. try {
  1184. FcbOrDcb->Header.AllocationSize.QuadPart = DesiredAllocationSize;
  1185. //
  1186. // Special case 0
  1187. //
  1188. if (DesiredAllocationSize == 0) {
  1189. //
  1190. // We have to update the dirent and FcbOrDcb copies of
  1191. // FirstClusterOfFile since before it was 0
  1192. //
  1193. ASSERT( FcbOrDcb->FcbCondition == FcbGood );
  1194. FatGetDirentFromFcbOrDcb( IrpContext, FcbOrDcb, &Dirent, &Bcb );
  1195. ASSERT( Dirent && Bcb );
  1196. Dirent->FirstClusterOfFile = 0;
  1197. if (FatIsFat32(Vcb)) {
  1198. Dirent->FirstClusterOfFileHi = 0;
  1199. }
  1200. FcbOrDcb->FirstClusterOfFile = 0;
  1201. FatSetDirtyBcb( IrpContext, Bcb, Vcb, TRUE );
  1202. UpdatedDirent = TRUE;
  1203. FatDeallocateDiskSpace( IrpContext, Vcb, &FcbOrDcb->Mcb );
  1204. FatRemoveMcbEntry( FcbOrDcb->Vcb, &FcbOrDcb->Mcb, 0, 0xFFFFFFFF );
  1205. } else {
  1206. //
  1207. // Split the existing allocation into two parts, one we will keep, and
  1208. // one we will deallocate.
  1209. //
  1210. FsRtlInitializeLargeMcb( &RemainingMcb, PagedPool );
  1211. UnwindWeAllocatedMcb = TRUE;
  1212. FatSplitAllocation( IrpContext,
  1213. Vcb,
  1214. &FcbOrDcb->Mcb,
  1215. DesiredAllocationSize,
  1216. &RemainingMcb );
  1217. FatDeallocateDiskSpace( IrpContext, Vcb, &RemainingMcb );
  1218. FsRtlUninitializeLargeMcb( &RemainingMcb );
  1219. }
  1220. } finally {
  1221. DebugUnwind( FatTruncateFileAllocation );
  1222. //
  1223. // Is this really the right backout strategy? It would be nice if we could
  1224. // pretend the truncate worked if we knew that the file had gotten into
  1225. // a consistent state. Leaving dangled clusters is probably quite preferable.
  1226. //
  1227. if ( AbnormalTermination() ) {
  1228. FcbOrDcb->Header.AllocationSize.LowPart = UnwindInitialAllocationSize;
  1229. if ( (DesiredAllocationSize == 0) && (Dirent != NULL)) {
  1230. if (UpdatedDirent) {
  1231. //
  1232. // If the dirent has been updated ok and marked dirty, then we
  1233. // failed in deallocatediscspace, and don't know what state
  1234. // the on disc fat chain is in. So we throw away the mcb,
  1235. // and potentially loose a few clusters until the next
  1236. // chkdsk. The operation has succeeded, but the exception
  1237. // will still propogate. 5.1
  1238. //
  1239. FatRemoveMcbEntry( Vcb, &FcbOrDcb->Mcb, 0, 0xFFFFFFFF );
  1240. FcbOrDcb->Header.AllocationSize.QuadPart = 0;
  1241. }
  1242. else {
  1243. Dirent->FirstClusterOfFile = (USHORT)UnwindInitialFirstClusterOfFile;
  1244. if ( FatIsFat32(Vcb) ) {
  1245. Dirent->FirstClusterOfFileHi =
  1246. (USHORT)(UnwindInitialFirstClusterOfFile >> 16);
  1247. }
  1248. FcbOrDcb->FirstClusterOfFile = UnwindInitialFirstClusterOfFile;
  1249. }
  1250. }
  1251. if ( UnwindWeAllocatedMcb ) {
  1252. FsRtlUninitializeLargeMcb( &RemainingMcb );
  1253. }
  1254. //
  1255. // Note that in the non zero truncation case, we will also
  1256. // leak clusters. However, apart from this, the in memory and on disc
  1257. // structures will agree.
  1258. }
  1259. FatUnpinBcb( IrpContext, Bcb );
  1260. //
  1261. // Give FlushFileBuffer a clue here.
  1262. //
  1263. SetFlag(FcbOrDcb->FcbState, FCB_STATE_FLUSH_FAT);
  1264. DebugTrace(-1, Dbg, "FatTruncateFileAllocation -> (VOID)\n", 0);
  1265. }
  1266. }
  1267. VOID
  1268. FatLookupFileAllocationSize (
  1269. IN PIRP_CONTEXT IrpContext,
  1270. IN PFCB FcbOrDcb
  1271. )
  1272. /*++
  1273. Routine Description:
  1274. This routine retrieves the current file allocatio size for the
  1275. specified file/directory.
  1276. Arguments:
  1277. FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified
  1278. --*/
  1279. {
  1280. LBO Lbo;
  1281. ULONG ByteCount;
  1282. BOOLEAN DontCare;
  1283. PAGED_CODE();
  1284. DebugTrace(+1, Dbg, "FatLookupAllocationSize\n", 0);
  1285. DebugTrace( 0, Dbg, " FcbOrDcb = %8lx\n", FcbOrDcb);
  1286. //
  1287. // We call FatLookupFileAllocation with Vbo of 0xffffffff - 1.
  1288. //
  1289. FatLookupFileAllocation( IrpContext,
  1290. FcbOrDcb,
  1291. MAXULONG - 1,
  1292. &Lbo,
  1293. &ByteCount,
  1294. &DontCare,
  1295. &DontCare,
  1296. NULL );
  1297. //
  1298. // FileSize was set at Fcb creation time from the contents of the directory entry,
  1299. // and we are only now looking up the real length of the allocation chain. If it
  1300. // cannot be contained, this is trash. Probably more where that came from.
  1301. //
  1302. if (FcbOrDcb->Header.FileSize.LowPart > FcbOrDcb->Header.AllocationSize.LowPart) {
  1303. FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
  1304. FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
  1305. }
  1306. DebugTrace(-1, Dbg, "FatLookupFileAllocationSize -> (VOID)\n", 0);
  1307. return;
  1308. }
  1309. VOID
  1310. FatAllocateDiskSpace (
  1311. IN PIRP_CONTEXT IrpContext,
  1312. IN PVCB Vcb,
  1313. IN ULONG AbsoluteClusterHint,
  1314. IN PULONG ByteCount,
  1315. IN BOOLEAN ExactMatchRequired,
  1316. OUT PLARGE_MCB Mcb
  1317. )
  1318. /*++
  1319. Routine Description:
  1320. This procedure allocates additional disk space and builds an mcb
  1321. representing the newly allocated space. If the space cannot be
  1322. allocated then this procedure raises an appropriate status.
  1323. Searching starts from the hint index in the Vcb unless an alternative
  1324. non-zero hint is given in AlternateClusterHint. If we are using the
  1325. hint field in the Vcb, it is set to the cluster following our allocation
  1326. when we are done.
  1327. Disk space can only be allocated in cluster units so this procedure
  1328. will round up any byte count to the next cluster boundary.
  1329. Pictorially what is done is the following (where ! denotes the end of
  1330. the fat chain (i.e., FAT_CLUSTER_LAST)):
  1331. Mcb (empty)
  1332. becomes
  1333. Mcb |--a--|--b--|--c--!
  1334. ^
  1335. ByteCount ----------+
  1336. Arguments:
  1337. Vcb - Supplies the VCB being modified
  1338. AbsoluteClusterHint - Supplies an alternate hint index to start the
  1339. search from. If this is zero we use, and update,
  1340. the Vcb hint field.
  1341. ByteCount - Supplies the number of bytes that we are requesting, and
  1342. receives the number of bytes that we got.
  1343. ExactMatchRequired - Caller should set this to TRUE if only the precise run requested
  1344. is acceptable.
  1345. Mcb - Receives the MCB describing the newly allocated disk space. The
  1346. caller passes in an initialized Mcb that is filled in by this procedure.
  1347. Return Value:
  1348. TRUE - Allocated ok
  1349. FALSE - Failed to allocate exactly as requested (=> ExactMatchRequired was TRUE)
  1350. --*/
  1351. {
  1352. UCHAR LogOfBytesPerCluster;
  1353. ULONG BytesPerCluster;
  1354. ULONG StartingCluster;
  1355. ULONG ClusterCount;
  1356. ULONG WindowRelativeHint;
  1357. #if DBG
  1358. ULONG i;
  1359. ULONG PreviousClear;
  1360. #endif
  1361. PFAT_WINDOW Window;
  1362. BOOLEAN Wait;
  1363. BOOLEAN Result = TRUE;
  1364. PAGED_CODE();
  1365. DebugTrace(+1, Dbg, "FatAllocateDiskSpace\n", 0);
  1366. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  1367. DebugTrace( 0, Dbg, " *ByteCount = %8lx\n", *ByteCount);
  1368. DebugTrace( 0, Dbg, " Mcb = %8lx\n", Mcb);
  1369. DebugTrace( 0, Dbg, " Hint = %8lx\n", AbsoluteClusterHint);
  1370. ASSERT((AbsoluteClusterHint <= Vcb->AllocationSupport.NumberOfClusters + 2) && (1 != AbsoluteClusterHint));
  1371. //
  1372. // Make sure byte count is not zero
  1373. //
  1374. if (*ByteCount == 0) {
  1375. DebugTrace(0, Dbg, "Nothing to allocate.\n", 0);
  1376. DebugTrace(-1, Dbg, "FatAllocateDiskSpace -> (VOID)\n", 0);
  1377. return;
  1378. }
  1379. //
  1380. // Compute the cluster count based on the byte count, rounding up
  1381. // to the next cluster if there is any remainder. Note that the
  1382. // pathalogical case BytesCount == 0 has been eliminated above.
  1383. //
  1384. LogOfBytesPerCluster = Vcb->AllocationSupport.LogOfBytesPerCluster;
  1385. BytesPerCluster = 1 << LogOfBytesPerCluster;
  1386. *ByteCount = (*ByteCount + (BytesPerCluster - 1))
  1387. & ~(BytesPerCluster - 1);
  1388. //
  1389. // If ByteCount is NOW zero, then we were asked for the maximal
  1390. // filesize (or at least for bytes in the last allocatable sector).
  1391. //
  1392. if (*ByteCount == 0) {
  1393. *ByteCount = 0xffffffff;
  1394. ClusterCount = 1 << (32 - LogOfBytesPerCluster);
  1395. } else {
  1396. ClusterCount = (*ByteCount >> LogOfBytesPerCluster);
  1397. }
  1398. //
  1399. // Make sure there are enough free clusters to start with, and
  1400. // take them now so that nobody else takes them from us.
  1401. //
  1402. ExAcquireResourceSharedLite(&Vcb->ChangeBitMapResource, TRUE);
  1403. FatLockFreeClusterBitMap( Vcb );
  1404. if (ClusterCount <= Vcb->AllocationSupport.NumberOfFreeClusters) {
  1405. Vcb->AllocationSupport.NumberOfFreeClusters -= ClusterCount;
  1406. } else {
  1407. FatUnlockFreeClusterBitMap( Vcb );
  1408. ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
  1409. DebugTrace(0, Dbg, "Disk Full. Raise Status.\n", 0);
  1410. FatRaiseStatus( IrpContext, STATUS_DISK_FULL );
  1411. }
  1412. //
  1413. // Did the caller supply a hint?
  1414. //
  1415. if ((0 != AbsoluteClusterHint) && (AbsoluteClusterHint < (Vcb->AllocationSupport.NumberOfClusters + 2))) {
  1416. if (Vcb->NumberOfWindows > 1) {
  1417. //
  1418. // If we're being called upon to allocate clusters outside the
  1419. // current window (which happens only via MoveFile), it's a problem.
  1420. // We address this by changing the current window to be the one which
  1421. // contains the alternate cluster hint. Note that if the user's
  1422. // request would cross a window boundary, he doesn't really get what
  1423. // he wanted.
  1424. //
  1425. if (AbsoluteClusterHint < Vcb->CurrentWindow->FirstCluster ||
  1426. AbsoluteClusterHint > Vcb->CurrentWindow->LastCluster) {
  1427. ULONG BucketNum = FatWindowOfCluster( AbsoluteClusterHint );
  1428. ASSERT( BucketNum < Vcb->NumberOfWindows);
  1429. //
  1430. // Drop our shared lock on the ChangeBitMapResource, and pick it up again
  1431. // exclusive in preparation for making the window swap.
  1432. //
  1433. FatUnlockFreeClusterBitMap(Vcb);
  1434. ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
  1435. ExAcquireResourceExclusiveLite(&Vcb->ChangeBitMapResource, TRUE);
  1436. FatLockFreeClusterBitMap(Vcb);
  1437. Window = &Vcb->Windows[BucketNum];
  1438. //
  1439. // Again, test the current window against the one we want - some other
  1440. // thread could have sneaked in behind our backs and kindly set it to the one
  1441. // we need, when we dropped and reacquired the ChangeBitMapResource above.
  1442. //
  1443. if (Window != Vcb->CurrentWindow) {
  1444. try {
  1445. Wait = BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
  1446. SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
  1447. //
  1448. // Change to the new window (update Vcb->CurrentWindow) and scan it
  1449. // to build up a freespace bitmap etc.
  1450. //
  1451. FatExamineFatEntries( IrpContext, Vcb,
  1452. 0,
  1453. 0,
  1454. FALSE,
  1455. Window,
  1456. NULL);
  1457. } finally {
  1458. if (!Wait) {
  1459. ClearFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
  1460. }
  1461. if (AbnormalTermination()) {
  1462. //
  1463. // We will have raised as a result of failing to pick up the
  1464. // chunk of the FAT for this window move. Release our resources
  1465. // and return the cluster count to the volume.
  1466. //
  1467. Vcb->AllocationSupport.NumberOfFreeClusters += ClusterCount;
  1468. FatUnlockFreeClusterBitMap( Vcb );
  1469. ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
  1470. }
  1471. }
  1472. }
  1473. }
  1474. //
  1475. // Make the hint cluster number relative to the base of the current window...
  1476. //
  1477. // Currentwindow->Firstcluster is baised by +2 already, so we will lose the
  1478. // bias already in AbsoluteClusterHint. Put it back....
  1479. //
  1480. WindowRelativeHint = AbsoluteClusterHint - Vcb->CurrentWindow->FirstCluster + 2;
  1481. }
  1482. else {
  1483. //
  1484. // Only one 'window', ie fat16/12. No modification necessary.
  1485. //
  1486. WindowRelativeHint = AbsoluteClusterHint;
  1487. }
  1488. }
  1489. else {
  1490. //
  1491. // Either no hint supplied, or it was out of range, so grab one from the Vcb
  1492. //
  1493. // NOTE: Clusterhint in the Vcb is not guaranteed to be set (may be -1)
  1494. //
  1495. WindowRelativeHint = Vcb->ClusterHint;
  1496. AbsoluteClusterHint = 0;
  1497. //
  1498. // Vcb hint may not have been initialized yet. Force to valid cluster.
  1499. //
  1500. if (-1 == WindowRelativeHint) {
  1501. WindowRelativeHint = 2;
  1502. }
  1503. }
  1504. ASSERT((WindowRelativeHint >= 2) && (WindowRelativeHint < Vcb->FreeClusterBitMap.SizeOfBitMap + 2));
  1505. //
  1506. // Keep track of the window we're allocating from, so we can clean
  1507. // up correctly if the current window changes after we unlock the
  1508. // bitmap.
  1509. //
  1510. Window = Vcb->CurrentWindow;
  1511. //
  1512. // Try to find a run of free clusters large enough for us.
  1513. //
  1514. StartingCluster = FatFindFreeClusterRun( IrpContext,
  1515. Vcb,
  1516. ClusterCount,
  1517. WindowRelativeHint );
  1518. //
  1519. // If the above call was successful, we can just update the fat
  1520. // and Mcb and exit. Otherwise we have to look for smaller free
  1521. // runs.
  1522. //
  1523. // This test is a bit funky. Note that the error return from
  1524. // RtlFindClearBits is -1, and adding two to that is 1.
  1525. //
  1526. if ((StartingCluster != 1) &&
  1527. ((0 == AbsoluteClusterHint) || (StartingCluster == WindowRelativeHint))
  1528. ) {
  1529. #if DBG
  1530. PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
  1531. #endif // DBG
  1532. //
  1533. // Take the clusters we found, and unlock the bit map.
  1534. //
  1535. FatReserveClusters(IrpContext, Vcb, StartingCluster, ClusterCount);
  1536. Window->ClustersFree -= ClusterCount;
  1537. StartingCluster += Window->FirstCluster;
  1538. StartingCluster -= 2;
  1539. ASSERT( PreviousClear - ClusterCount == Window->ClustersFree );
  1540. FatUnlockFreeClusterBitMap( Vcb );
  1541. //
  1542. // Note that this call will never fail since there is always
  1543. // room for one entry in an empty Mcb.
  1544. //
  1545. FatAddMcbEntry( Vcb, Mcb,
  1546. 0,
  1547. FatGetLboFromIndex( Vcb, StartingCluster ),
  1548. *ByteCount);
  1549. try {
  1550. //
  1551. // Update the fat.
  1552. //
  1553. FatAllocateClusters(IrpContext, Vcb,
  1554. StartingCluster,
  1555. ClusterCount);
  1556. } finally {
  1557. DebugUnwind( FatAllocateDiskSpace );
  1558. //
  1559. // If the allocate clusters failed, remove the run from the Mcb,
  1560. // unreserve the clusters, and reset the free cluster count.
  1561. //
  1562. if (AbnormalTermination()) {
  1563. FatRemoveMcbEntry( Vcb, Mcb, 0, *ByteCount );
  1564. FatLockFreeClusterBitMap( Vcb );
  1565. // Only clear bits if the bitmap window is the same.
  1566. if (Window == Vcb->CurrentWindow) {
  1567. // Both values (startingcluster and window->firstcluster) are
  1568. // already biased by 2, so will cancel, so we need to add in the 2 again.
  1569. FatUnreserveClusters( IrpContext, Vcb,
  1570. StartingCluster - Window->FirstCluster + 2,
  1571. ClusterCount );
  1572. }
  1573. Window->ClustersFree += ClusterCount;
  1574. Vcb->AllocationSupport.NumberOfFreeClusters += ClusterCount;
  1575. FatUnlockFreeClusterBitMap( Vcb );
  1576. }
  1577. ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
  1578. }
  1579. } else {
  1580. //
  1581. // Note that Index is a zero-based window-relative number. When appropriate
  1582. // it'll get converted into a true cluster number and put in Cluster, which
  1583. // will be a volume relative true cluster number.
  1584. //
  1585. ULONG Index;
  1586. ULONG Cluster;
  1587. ULONG CurrentVbo;
  1588. ULONG PriorLastCluster;
  1589. ULONG BytesFound;
  1590. ULONG ClustersFound = 0;
  1591. ULONG ClustersRemaining;
  1592. BOOLEAN LockedBitMap = FALSE;
  1593. BOOLEAN SelectNextContigWindow = FALSE;
  1594. //
  1595. // Drop our shared lock on the ChangeBitMapResource, and pick it up again
  1596. // exclusive in preparation for making a window swap.
  1597. //
  1598. FatUnlockFreeClusterBitMap(Vcb);
  1599. ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
  1600. ExAcquireResourceExclusiveLite(&Vcb->ChangeBitMapResource, TRUE);
  1601. FatLockFreeClusterBitMap(Vcb);
  1602. LockedBitMap = TRUE;
  1603. try {
  1604. if ( ExactMatchRequired && (1 == Vcb->NumberOfWindows)) {
  1605. //
  1606. // Give up right now, there are no more windows to search! RtlFindClearBits
  1607. // searchs the whole bitmap, so we would have found any contiguous run
  1608. // large enough.
  1609. //
  1610. try_leave( Result = FALSE);
  1611. }
  1612. //
  1613. // While the request is still incomplete, look for the largest
  1614. // run of free clusters, mark them taken, allocate the run in
  1615. // the Mcb and Fat, and if this isn't the first time through
  1616. // the loop link it to prior run on the fat. The Mcb will
  1617. // coalesce automatically.
  1618. //
  1619. ClustersRemaining = ClusterCount;
  1620. CurrentVbo = 0;
  1621. PriorLastCluster = 0;
  1622. while (ClustersRemaining != 0) {
  1623. //
  1624. // If we just entered the loop, the bit map is already locked
  1625. //
  1626. if ( !LockedBitMap ) {
  1627. FatLockFreeClusterBitMap( Vcb );
  1628. LockedBitMap = TRUE;
  1629. }
  1630. //
  1631. // Find the largest run of free clusters. If the run is
  1632. // bigger than we need, only use what we need. Note that
  1633. // this will then be the last while() iteration.
  1634. //
  1635. // 12/3/95 - David Goebel: need to bias bitmap by 2 bits for the defrag
  1636. // hooks and the below macro became impossible to do without in-line
  1637. // procedures.
  1638. //
  1639. // ClustersFound = FatLongestFreeClusterRun( IrpContext, Vcb, &Index );
  1640. ClustersFound = 0;
  1641. if (!SelectNextContigWindow) {
  1642. if ( 0 != WindowRelativeHint) {
  1643. ULONG Desired = Vcb->FreeClusterBitMap.SizeOfBitMap - (WindowRelativeHint - 2);
  1644. //
  1645. // We will try to allocate contiguously. Try from the current hint the to
  1646. // end of current window. Don't try for more than we actually need.
  1647. //
  1648. if (Desired > ClustersRemaining) {
  1649. Desired = ClustersRemaining;
  1650. }
  1651. if (RtlAreBitsClear( &Vcb->FreeClusterBitMap,
  1652. WindowRelativeHint - 2,
  1653. Desired))
  1654. {
  1655. //
  1656. // Clusters from hint->...windowend are free. Take them.
  1657. //
  1658. Index = WindowRelativeHint - 2;
  1659. ClustersFound = Desired;
  1660. if (FatIsFat32(Vcb)) {
  1661. //
  1662. // We're now up against the end of the current window, so indicate that we
  1663. // want the next window in the sequence next time around. (If we're not up
  1664. // against the end of the window, then we got what we needed and won't be
  1665. // coming around again anyway).
  1666. //
  1667. SelectNextContigWindow = TRUE;
  1668. WindowRelativeHint = 2;
  1669. }
  1670. else {
  1671. //
  1672. // FAT 12/16 - we've run up against the end of the volume. Clear the
  1673. // hint, since we now have no idea where to look.
  1674. //
  1675. WindowRelativeHint = 0;
  1676. }
  1677. #if DBG
  1678. PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
  1679. #endif // DBG
  1680. }
  1681. else {
  1682. if (ExactMatchRequired) {
  1683. //
  1684. // If our caller required an exact match, then we're hosed. Bail out now.
  1685. //
  1686. try_leave( Result = FALSE);
  1687. }
  1688. //
  1689. // Hint failed, drop back to pot luck
  1690. //
  1691. WindowRelativeHint = 0;
  1692. }
  1693. }
  1694. if ((0 == WindowRelativeHint) && (0 == ClustersFound)) {
  1695. if (ClustersRemaining <= Vcb->CurrentWindow->ClustersFree) {
  1696. //
  1697. // The remaining allocation could be satisfied entirely from this
  1698. // window. We will ask only for what we need, to try and avoid
  1699. // unnecessarily fragmenting large runs of space by always using
  1700. // (part of) the largest run we can find. This call will return the
  1701. // first run large enough.
  1702. //
  1703. Index = RtlFindClearBits( &Vcb->FreeClusterBitMap, ClustersRemaining, 0);
  1704. if (-1 != Index) {
  1705. ClustersFound = ClustersRemaining;
  1706. }
  1707. }
  1708. if (0 == ClustersFound) {
  1709. //
  1710. // Still nothing, so just take the largest free run we can find.
  1711. //
  1712. ClustersFound = RtlFindLongestRunClear( &Vcb->FreeClusterBitMap, &Index );
  1713. }
  1714. #if DBG
  1715. PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
  1716. #endif // DBG
  1717. if (ClustersFound >= ClustersRemaining) {
  1718. ClustersFound = ClustersRemaining;
  1719. }
  1720. else {
  1721. //
  1722. // If we just ran up to the end of a window, set up a hint that
  1723. // we'd like the next consecutive window after this one. (FAT32 only)
  1724. //
  1725. if ( ((Index + ClustersFound) == Vcb->FreeClusterBitMap.SizeOfBitMap) &&
  1726. FatIsFat32( Vcb)
  1727. ) {
  1728. SelectNextContigWindow = TRUE;
  1729. WindowRelativeHint = 2;
  1730. }
  1731. }
  1732. }
  1733. }
  1734. if (ClustersFound == 0) {
  1735. ULONG FaveWindow = 0;
  1736. BOOLEAN SelectedWindow;
  1737. //
  1738. // If we found no free clusters on a single-window FAT,
  1739. // there was a bad problem with the free cluster count.
  1740. //
  1741. if (1 == Vcb->NumberOfWindows) {
  1742. FatBugCheck( 0, 5, 0 );
  1743. }
  1744. //
  1745. // Switch to a new bucket. Possibly the next one if we're
  1746. // currently on a roll (allocating contiguously)
  1747. //
  1748. SelectedWindow = FALSE;
  1749. if ( SelectNextContigWindow) {
  1750. ULONG NextWindow;
  1751. NextWindow = (((ULONG)((PUCHAR)Vcb->CurrentWindow - (PUCHAR)Vcb->Windows)) / sizeof( FAT_WINDOW)) + 1;
  1752. if ((NextWindow < Vcb->NumberOfWindows) &&
  1753. ( Vcb->Windows[ NextWindow].ClustersFree > 0)
  1754. ) {
  1755. FaveWindow = NextWindow;
  1756. SelectedWindow = TRUE;
  1757. }
  1758. else {
  1759. if (ExactMatchRequired) {
  1760. //
  1761. // Some dope tried to allocate a run past the end of the volume...
  1762. //
  1763. try_leave( Result = FALSE);
  1764. }
  1765. //
  1766. // Give up on the contiguous allocation attempts
  1767. //
  1768. WindowRelativeHint = 0;
  1769. }
  1770. SelectNextContigWindow = FALSE;
  1771. }
  1772. if (!SelectedWindow) {
  1773. //
  1774. // Select a new window to begin allocating from
  1775. //
  1776. FaveWindow = FatSelectBestWindow( Vcb);
  1777. }
  1778. //
  1779. // By now we'd better have found a window with some free clusters
  1780. //
  1781. if (0 == Vcb->Windows[ FaveWindow].ClustersFree) {
  1782. FatBugCheck( 0, 5, 1 );
  1783. }
  1784. Wait = BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
  1785. SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
  1786. FatExamineFatEntries( IrpContext, Vcb,
  1787. 0,
  1788. 0,
  1789. FALSE,
  1790. &Vcb->Windows[FaveWindow],
  1791. NULL);
  1792. if (!Wait) {
  1793. ClearFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
  1794. }
  1795. //
  1796. // Now we'll just go around the loop again, having switched windows,
  1797. // and allocate....
  1798. //
  1799. #if DBG
  1800. PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
  1801. #endif //DBG
  1802. } // if (clustersfound == 0)
  1803. else {
  1804. //
  1805. // Take the clusters we found, convert our index to a cluster number
  1806. // and unlock the bit map.
  1807. //
  1808. Window = Vcb->CurrentWindow;
  1809. FatReserveClusters( IrpContext, Vcb, (Index + 2), ClustersFound );
  1810. Cluster = Index + Window->FirstCluster;
  1811. Window->ClustersFree -= ClustersFound;
  1812. ASSERT( PreviousClear - ClustersFound == Window->ClustersFree );
  1813. FatUnlockFreeClusterBitMap( Vcb );
  1814. LockedBitMap = FALSE;
  1815. //
  1816. // Add the newly alloced run to the Mcb.
  1817. //
  1818. BytesFound = ClustersFound << LogOfBytesPerCluster;
  1819. FatAddMcbEntry( Vcb, Mcb,
  1820. CurrentVbo,
  1821. FatGetLboFromIndex( Vcb, Cluster ),
  1822. BytesFound );
  1823. //
  1824. // Connect the last allocated run with this one, and allocate
  1825. // this run on the Fat.
  1826. //
  1827. if (PriorLastCluster != 0) {
  1828. FatSetFatEntry( IrpContext,
  1829. Vcb,
  1830. PriorLastCluster,
  1831. (FAT_ENTRY)Cluster );
  1832. }
  1833. //
  1834. // Update the fat
  1835. //
  1836. FatAllocateClusters( IrpContext, Vcb, Cluster, ClustersFound );
  1837. //
  1838. // Prepare for the next iteration.
  1839. //
  1840. CurrentVbo += BytesFound;
  1841. ClustersRemaining -= ClustersFound;
  1842. PriorLastCluster = Cluster + ClustersFound - 1;
  1843. }
  1844. } // while (clustersremaining)
  1845. } finally {
  1846. DebugUnwind( FatAllocateDiskSpace );
  1847. ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
  1848. //
  1849. // Is there any unwinding to do?
  1850. //
  1851. if ( AbnormalTermination() || (FALSE == Result)) {
  1852. //
  1853. // Flag to the caller that they're getting nothing
  1854. //
  1855. *ByteCount = 0;
  1856. //
  1857. // There are three places we could have taken this exception:
  1858. // when switching the window (FatExamineFatEntries), adding
  1859. // a found run to the Mcb (FatAddMcbEntry), or when writing
  1860. // the changes to the FAT (FatSetFatEntry). In the first case
  1861. // we don't have anything to unwind before deallocation, and
  1862. // can detect this by seeing if we have the ClusterBitmap
  1863. // mutex out.
  1864. if (!LockedBitMap) {
  1865. FatLockFreeClusterBitMap( Vcb );
  1866. //
  1867. // In these cases, we have the possiblity that the FAT
  1868. // window is still in place and we need to clear the bits.
  1869. // If the Mcb entry isn't there (we raised trying to add
  1870. // it), the effect of trying to remove it is a noop.
  1871. //
  1872. if (Window == Vcb->CurrentWindow) {
  1873. //
  1874. // Cluster reservation works on cluster 2 based window-relative
  1875. // numbers, so we must convert. The subtraction will lose the
  1876. // cluster 2 base, so bias the result.
  1877. //
  1878. FatUnreserveClusters( IrpContext, Vcb,
  1879. (Cluster - Window->FirstCluster) + 2,
  1880. ClustersFound );
  1881. }
  1882. //
  1883. // Note that FatDeallocateDiskSpace will take care of adjusting
  1884. // to account for the entries in the Mcb. All we have to account
  1885. // for is the last run that didn't make it.
  1886. //
  1887. Window->ClustersFree += ClustersFound;
  1888. Vcb->AllocationSupport.NumberOfFreeClusters += ClustersFound;
  1889. FatUnlockFreeClusterBitMap( Vcb );
  1890. FatRemoveMcbEntry( Vcb, Mcb, CurrentVbo, BytesFound );
  1891. } else {
  1892. //
  1893. // Just drop the mutex now - we didn't manage to do anything
  1894. // that needs to be backed out.
  1895. //
  1896. FatUnlockFreeClusterBitMap( Vcb );
  1897. }
  1898. try {
  1899. //
  1900. // Now we have tidied up, we are ready to just send the Mcb
  1901. // off to deallocate disk space
  1902. //
  1903. FatDeallocateDiskSpace( IrpContext, Vcb, Mcb );
  1904. } finally {
  1905. //
  1906. // Now finally (really), remove all the entries from the mcb
  1907. //
  1908. FatRemoveMcbEntry( Vcb, Mcb, 0, 0xFFFFFFFF );
  1909. }
  1910. }
  1911. DebugTrace(-1, Dbg, "FatAllocateDiskSpace -> (VOID)\n", 0);
  1912. } // finally
  1913. }
  1914. return;
  1915. }
  1916. VOID
  1917. FatDeallocateDiskSpace (
  1918. IN PIRP_CONTEXT IrpContext,
  1919. IN PVCB Vcb,
  1920. IN PLARGE_MCB Mcb
  1921. )
  1922. /*++
  1923. Routine Description:
  1924. This procedure deallocates the disk space denoted by an input
  1925. mcb. Note that the input MCB does not need to necessarily describe
  1926. a chain that ends with a FAT_CLUSTER_LAST entry.
  1927. Pictorially what is done is the following
  1928. Fat |--a--|--b--|--c--|
  1929. Mcb |--a--|--b--|--c--|
  1930. becomes
  1931. Fat |--0--|--0--|--0--|
  1932. Mcb |--a--|--b--|--c--|
  1933. Arguments:
  1934. Vcb - Supplies the VCB being modified
  1935. Mcb - Supplies the MCB describing the disk space to deallocate. Note
  1936. that Mcb is unchanged by this procedure.
  1937. Return Value:
  1938. None.
  1939. --*/
  1940. {
  1941. LBO Lbo;
  1942. VBO Vbo;
  1943. ULONG RunsInMcb;
  1944. ULONG ByteCount;
  1945. ULONG ClusterCount;
  1946. ULONG ClusterIndex;
  1947. ULONG McbIndex;
  1948. UCHAR LogOfBytesPerCluster;
  1949. PFAT_WINDOW Window;
  1950. PAGED_CODE();
  1951. DebugTrace(+1, Dbg, "FatDeallocateDiskSpace\n", 0);
  1952. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  1953. DebugTrace( 0, Dbg, " Mcb = %8lx\n", Mcb);
  1954. LogOfBytesPerCluster = Vcb->AllocationSupport.LogOfBytesPerCluster;
  1955. RunsInMcb = FsRtlNumberOfRunsInLargeMcb( Mcb );
  1956. if ( RunsInMcb == 0 ) {
  1957. DebugTrace(-1, Dbg, "FatDeallocateDiskSpace -> (VOID)\n", 0);
  1958. return;
  1959. }
  1960. try {
  1961. //
  1962. // Run though the Mcb, freeing all the runs in the fat.
  1963. //
  1964. // We do this in two steps (first update the fat, then the bitmap
  1965. // (which can't fail)) to prevent other people from taking clusters
  1966. // that we need to re-allocate in the event of unwind.
  1967. //
  1968. ExAcquireResourceSharedLite(&Vcb->ChangeBitMapResource, TRUE);
  1969. RunsInMcb = FsRtlNumberOfRunsInLargeMcb( Mcb );
  1970. for ( McbIndex = 0; McbIndex < RunsInMcb; McbIndex++ ) {
  1971. FatGetNextMcbEntry( Vcb, Mcb, McbIndex, &Vbo, &Lbo, &ByteCount );
  1972. //
  1973. // Assert that Fat files have no holes.
  1974. //
  1975. ASSERT( Lbo != 0 );
  1976. //
  1977. // Write FAT_CLUSTER_AVAILABLE to each cluster in the run.
  1978. //
  1979. ClusterCount = ByteCount >> LogOfBytesPerCluster;
  1980. ClusterIndex = FatGetIndexFromLbo( Vcb, Lbo );
  1981. FatFreeClusters( IrpContext, Vcb, ClusterIndex, ClusterCount );
  1982. }
  1983. //
  1984. // From now on, nothing can go wrong .... (as in raise)
  1985. //
  1986. FatLockFreeClusterBitMap( Vcb );
  1987. for ( McbIndex = 0; McbIndex < RunsInMcb; McbIndex++ ) {
  1988. ULONG ClusterEnd;
  1989. ULONG MyStart, MyLength, count;
  1990. #if DBG
  1991. ULONG PreviousClear, i;
  1992. #endif
  1993. FatGetNextMcbEntry( Vcb, Mcb, McbIndex, &Vbo, &Lbo, &ByteCount );
  1994. //
  1995. // Mark the bits clear in the FreeClusterBitMap.
  1996. //
  1997. ClusterCount = ByteCount >> LogOfBytesPerCluster;
  1998. ClusterIndex = FatGetIndexFromLbo( Vcb, Lbo );
  1999. Window = Vcb->CurrentWindow;
  2000. //
  2001. // If we've divided the bitmap, elide bitmap manipulation for
  2002. // runs that are outside the current bucket.
  2003. //
  2004. ClusterEnd = ClusterIndex + ClusterCount - 1;
  2005. if (!(ClusterIndex > Window->LastCluster ||
  2006. ClusterEnd < Window->FirstCluster)) {
  2007. //
  2008. // The run being freed overlaps the current bucket, so we'll
  2009. // have to clear some bits.
  2010. //
  2011. if (ClusterIndex < Window->FirstCluster &&
  2012. ClusterEnd > Window->LastCluster) {
  2013. MyStart = Window->FirstCluster;
  2014. MyLength = Window->LastCluster - Window->FirstCluster + 1;
  2015. } else if (ClusterIndex < Window->FirstCluster) {
  2016. MyStart = Window->FirstCluster;
  2017. MyLength = ClusterEnd - Window->FirstCluster + 1;
  2018. } else {
  2019. //
  2020. // The range being freed starts in the bucket, and may possibly
  2021. // extend beyond the bucket.
  2022. //
  2023. MyStart = ClusterIndex;
  2024. if (ClusterEnd <= Window->LastCluster) {
  2025. MyLength = ClusterCount;
  2026. } else {
  2027. MyLength = Window->LastCluster - ClusterIndex + 1;
  2028. }
  2029. }
  2030. if (MyLength == 0) {
  2031. continue;
  2032. }
  2033. #if DBG
  2034. PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
  2035. //
  2036. // Verify that the Bits are all really set.
  2037. //
  2038. ASSERT( MyStart + MyLength - Window->FirstCluster <= Vcb->FreeClusterBitMap.SizeOfBitMap );
  2039. for (i = 0; i < MyLength; i++) {
  2040. ASSERT( RtlCheckBit(&Vcb->FreeClusterBitMap,
  2041. MyStart - Window->FirstCluster + i) == 1 );
  2042. }
  2043. #endif // DBG
  2044. FatUnreserveClusters( IrpContext, Vcb,
  2045. MyStart - Window->FirstCluster + 2,
  2046. MyLength );
  2047. }
  2048. //
  2049. // Adjust the ClustersFree count for each bitmap window, even the ones
  2050. // that are not the current window.
  2051. //
  2052. if (FatIsFat32(Vcb)) {
  2053. Window = &Vcb->Windows[FatWindowOfCluster( ClusterIndex )];
  2054. } else {
  2055. Window = &Vcb->Windows[0];
  2056. }
  2057. MyStart = ClusterIndex;
  2058. for (MyLength = ClusterCount; MyLength > 0; MyLength -= count) {
  2059. count = FatMin(Window->LastCluster - MyStart + 1, MyLength);
  2060. Window->ClustersFree += count;
  2061. //
  2062. // If this was not the last window this allocation spanned,
  2063. // advance to the next.
  2064. //
  2065. if (MyLength != count) {
  2066. Window++;
  2067. MyStart = Window->FirstCluster;
  2068. }
  2069. }
  2070. //
  2071. // Deallocation is now complete. Adjust the free cluster count.
  2072. //
  2073. Vcb->AllocationSupport.NumberOfFreeClusters += ClusterCount;
  2074. }
  2075. #if DBG
  2076. if (Vcb->CurrentWindow->ClustersFree !=
  2077. RtlNumberOfClearBits(&Vcb->FreeClusterBitMap)) {
  2078. DbgPrint("%x vs %x\n", Vcb->CurrentWindow->ClustersFree,
  2079. RtlNumberOfClearBits(&Vcb->FreeClusterBitMap));
  2080. DbgPrint("%x for %x\n", ClusterIndex, ClusterCount);
  2081. }
  2082. #endif
  2083. FatUnlockFreeClusterBitMap( Vcb );
  2084. } finally {
  2085. DebugUnwind( FatDeallocateDiskSpace );
  2086. //
  2087. // Is there any unwinding to do?
  2088. //
  2089. ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
  2090. if ( AbnormalTermination() ) {
  2091. LBO Lbo;
  2092. VBO Vbo;
  2093. ULONG Index;
  2094. ULONG Clusters;
  2095. ULONG FatIndex;
  2096. ULONG PriorLastIndex;
  2097. //
  2098. // For each entry we already deallocated, reallocate it,
  2099. // chaining together as nessecary. Note that we continue
  2100. // up to and including the last "for" iteration even though
  2101. // the SetFatRun could not have been successful. This
  2102. // allows us a convienent way to re-link the final successful
  2103. // SetFatRun.
  2104. //
  2105. // It is possible that the reason we got here will prevent us
  2106. // from succeeding in this operation.
  2107. //
  2108. PriorLastIndex = 0;
  2109. for (Index = 0; Index <= McbIndex; Index++) {
  2110. FatGetNextMcbEntry(Vcb, Mcb, Index, &Vbo, &Lbo, &ByteCount);
  2111. FatIndex = FatGetIndexFromLbo( Vcb, Lbo );
  2112. Clusters = ByteCount >> LogOfBytesPerCluster;
  2113. //
  2114. // We must always restore the prior iteration's last
  2115. // entry, pointing it to the first cluster of this run.
  2116. //
  2117. if (PriorLastIndex != 0) {
  2118. FatSetFatEntry( IrpContext,
  2119. Vcb,
  2120. PriorLastIndex,
  2121. (FAT_ENTRY)FatIndex );
  2122. }
  2123. //
  2124. // If this is not the last entry (the one that failed)
  2125. // then reallocate the disk space on the fat.
  2126. //
  2127. if ( Index < McbIndex ) {
  2128. FatAllocateClusters(IrpContext, Vcb, FatIndex, Clusters);
  2129. PriorLastIndex = FatIndex + Clusters - 1;
  2130. }
  2131. }
  2132. }
  2133. DebugTrace(-1, Dbg, "FatDeallocateDiskSpace -> (VOID)\n", 0);
  2134. }
  2135. return;
  2136. }
  2137. VOID
  2138. FatSplitAllocation (
  2139. IN PIRP_CONTEXT IrpContext,
  2140. IN PVCB Vcb,
  2141. IN OUT PLARGE_MCB Mcb,
  2142. IN VBO SplitAtVbo,
  2143. OUT PLARGE_MCB RemainingMcb
  2144. )
  2145. /*++
  2146. Routine Description:
  2147. This procedure takes a single mcb and splits its allocation into
  2148. two separate allocation units. The separation must only be done
  2149. on cluster boundaries, otherwise we bugcheck.
  2150. On the disk this actually works by inserting a FAT_CLUSTER_LAST into
  2151. the last index of the first part being split out.
  2152. Pictorially what is done is the following (where ! denotes the end of
  2153. the fat chain (i.e., FAT_CLUSTER_LAST)):
  2154. Mcb |--a--|--b--|--c--|--d--|--e--|--f--|
  2155. ^
  2156. SplitAtVbo ---------------------+
  2157. RemainingMcb (empty)
  2158. becomes
  2159. Mcb |--a--|--b--|--c--!
  2160. RemainingMcb |--d--|--e--|--f--|
  2161. Arguments:
  2162. Vcb - Supplies the VCB being modified
  2163. Mcb - Supplies the MCB describing the allocation being split into
  2164. two parts. Upon return this Mcb now contains the first chain.
  2165. SplitAtVbo - Supplies the VBO of the first byte for the second chain
  2166. that we creating.
  2167. RemainingMcb - Receives the MCB describing the second chain of allocated
  2168. disk space. The caller passes in an initialized Mcb that
  2169. is filled in by this procedure STARTING AT VBO 0.
  2170. Return Value:
  2171. VOID - TRUE if the operation completed and FALSE if it had to
  2172. block but could not.
  2173. --*/
  2174. {
  2175. VBO SourceVbo;
  2176. VBO TargetVbo;
  2177. VBO DontCare;
  2178. LBO Lbo;
  2179. ULONG ByteCount;
  2180. ULONG BytesPerCluster;
  2181. PAGED_CODE();
  2182. DebugTrace(+1, Dbg, "FatSplitAllocation\n", 0);
  2183. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  2184. DebugTrace( 0, Dbg, " Mcb = %8lx\n", Mcb);
  2185. DebugTrace( 0, Dbg, " SplitAtVbo = %8lx\n", SplitAtVbo);
  2186. DebugTrace( 0, Dbg, " RemainingMcb = %8lx\n", RemainingMcb);
  2187. BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
  2188. //
  2189. // Assert that the split point is cluster alligned
  2190. //
  2191. ASSERT( (SplitAtVbo & (BytesPerCluster - 1)) == 0 );
  2192. //
  2193. // We should never be handed an empty source MCB and asked to split
  2194. // at a non zero point.
  2195. //
  2196. ASSERT( !((0 != SplitAtVbo) && (0 == FsRtlNumberOfRunsInLargeMcb( Mcb))));
  2197. //
  2198. // Assert we were given an empty target Mcb.
  2199. //
  2200. //
  2201. // This assert is commented out to avoid hitting in the Ea error
  2202. // path. In that case we will be using the same Mcb's to split the
  2203. // allocation that we used to merge them. The target Mcb will contain
  2204. // the runs that the split will attempt to insert.
  2205. //
  2206. //
  2207. // ASSERT( FsRtlNumberOfRunsInMcb( RemainingMcb ) == 0 );
  2208. //
  2209. try {
  2210. //
  2211. // Move the runs after SplitAtVbo from the souce to the target
  2212. //
  2213. SourceVbo = SplitAtVbo;
  2214. TargetVbo = 0;
  2215. while (FatLookupMcbEntry(Vcb, Mcb, SourceVbo, &Lbo, &ByteCount, NULL)) {
  2216. FatAddMcbEntry( Vcb, RemainingMcb, TargetVbo, Lbo, ByteCount );
  2217. FatRemoveMcbEntry( Vcb, Mcb, SourceVbo, ByteCount );
  2218. TargetVbo += ByteCount;
  2219. SourceVbo += ByteCount;
  2220. //
  2221. // If SourceVbo overflows, we were actually snipping off the end
  2222. // of the maximal file ... and are now done.
  2223. //
  2224. if (SourceVbo == 0) {
  2225. break;
  2226. }
  2227. }
  2228. //
  2229. // Mark the last pre-split cluster as a FAT_LAST_CLUSTER
  2230. //
  2231. if ( SplitAtVbo != 0 ) {
  2232. FatLookupLastMcbEntry( Vcb, Mcb, &DontCare, &Lbo, NULL );
  2233. FatSetFatEntry( IrpContext,
  2234. Vcb,
  2235. FatGetIndexFromLbo( Vcb, Lbo ),
  2236. FAT_CLUSTER_LAST );
  2237. }
  2238. } finally {
  2239. DebugUnwind( FatSplitAllocation );
  2240. //
  2241. // If we got an exception, we must glue back together the Mcbs
  2242. //
  2243. if ( AbnormalTermination() ) {
  2244. TargetVbo = SplitAtVbo;
  2245. SourceVbo = 0;
  2246. while (FatLookupMcbEntry(Vcb, RemainingMcb, SourceVbo, &Lbo, &ByteCount, NULL)) {
  2247. FatAddMcbEntry( Vcb, Mcb, TargetVbo, Lbo, ByteCount );
  2248. FatRemoveMcbEntry( Vcb, RemainingMcb, SourceVbo, ByteCount );
  2249. TargetVbo += ByteCount;
  2250. SourceVbo += ByteCount;
  2251. }
  2252. }
  2253. DebugTrace(-1, Dbg, "FatSplitAllocation -> (VOID)\n", 0);
  2254. }
  2255. return;
  2256. }
  2257. VOID
  2258. FatMergeAllocation (
  2259. IN PIRP_CONTEXT IrpContext,
  2260. IN PVCB Vcb,
  2261. IN OUT PLARGE_MCB Mcb,
  2262. IN PLARGE_MCB SecondMcb
  2263. )
  2264. /*++
  2265. Routine Description:
  2266. This routine takes two separate allocations described by two MCBs and
  2267. joins them together into one allocation.
  2268. Pictorially what is done is the following (where ! denotes the end of
  2269. the fat chain (i.e., FAT_CLUSTER_LAST)):
  2270. Mcb |--a--|--b--|--c--!
  2271. SecondMcb |--d--|--e--|--f--|
  2272. becomes
  2273. Mcb |--a--|--b--|--c--|--d--|--e--|--f--|
  2274. SecondMcb |--d--|--e--|--f--|
  2275. Arguments:
  2276. Vcb - Supplies the VCB being modified
  2277. Mcb - Supplies the MCB of the first allocation that is being modified.
  2278. Upon return this Mcb will also describe the newly enlarged
  2279. allocation
  2280. SecondMcb - Supplies the ZERO VBO BASED MCB of the second allocation
  2281. that is being appended to the first allocation. This
  2282. procedure leaves SecondMcb unchanged.
  2283. Return Value:
  2284. VOID - TRUE if the operation completed and FALSE if it had to
  2285. block but could not.
  2286. --*/
  2287. {
  2288. VBO SpliceVbo;
  2289. LBO SpliceLbo;
  2290. VBO SourceVbo;
  2291. VBO TargetVbo;
  2292. LBO Lbo;
  2293. ULONG ByteCount;
  2294. PAGED_CODE();
  2295. DebugTrace(+1, Dbg, "FatMergeAllocation\n", 0);
  2296. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  2297. DebugTrace( 0, Dbg, " Mcb = %8lx\n", Mcb);
  2298. DebugTrace( 0, Dbg, " SecondMcb = %8lx\n", SecondMcb);
  2299. try {
  2300. //
  2301. // Append the runs from SecondMcb to Mcb
  2302. //
  2303. (void)FatLookupLastMcbEntry( Vcb, Mcb, &SpliceVbo, &SpliceLbo, NULL );
  2304. SourceVbo = 0;
  2305. TargetVbo = SpliceVbo + 1;
  2306. while (FatLookupMcbEntry(Vcb, SecondMcb, SourceVbo, &Lbo, &ByteCount, NULL)) {
  2307. FatAddMcbEntry( Vcb, Mcb, TargetVbo, Lbo, ByteCount );
  2308. SourceVbo += ByteCount;
  2309. TargetVbo += ByteCount;
  2310. }
  2311. //
  2312. // Link the last pre-merge cluster to the first cluster of SecondMcb
  2313. //
  2314. FatLookupMcbEntry( Vcb, SecondMcb, 0, &Lbo, (PULONG)NULL, NULL );
  2315. FatSetFatEntry( IrpContext,
  2316. Vcb,
  2317. FatGetIndexFromLbo( Vcb, SpliceLbo ),
  2318. (FAT_ENTRY)FatGetIndexFromLbo( Vcb, Lbo ) );
  2319. } finally {
  2320. DebugUnwind( FatMergeAllocation );
  2321. //
  2322. // If we got an exception, we must remove the runs added to Mcb
  2323. //
  2324. if ( AbnormalTermination() ) {
  2325. ULONG CutLength;
  2326. if ((CutLength = TargetVbo - (SpliceVbo + 1)) != 0) {
  2327. FatRemoveMcbEntry( Vcb, Mcb, SpliceVbo + 1, CutLength);
  2328. }
  2329. }
  2330. DebugTrace(-1, Dbg, "FatMergeAllocation -> (VOID)\n", 0);
  2331. }
  2332. return;
  2333. }
  2334. //
  2335. // Internal support routine
  2336. //
  2337. CLUSTER_TYPE
  2338. FatInterpretClusterType (
  2339. IN PVCB Vcb,
  2340. IN FAT_ENTRY Entry
  2341. )
  2342. /*++
  2343. Routine Description:
  2344. This procedure tells the caller how to interpret the input fat table
  2345. entry. It will indicate if the fat cluster is available, resereved,
  2346. bad, the last one, or the another fat index. This procedure can deal
  2347. with both 12 and 16 bit fat.
  2348. Arguments:
  2349. Vcb - Supplies the Vcb to examine, yields 12/16 bit info
  2350. Entry - Supplies the fat entry to examine
  2351. Return Value:
  2352. CLUSTER_TYPE - Is the type of the input Fat entry
  2353. --*/
  2354. {
  2355. DebugTrace(+1, Dbg, "InterpretClusterType\n", 0);
  2356. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  2357. DebugTrace( 0, Dbg, " Entry = %8lx\n", Entry);
  2358. PAGED_CODE();
  2359. switch(Vcb->AllocationSupport.FatIndexBitSize ) {
  2360. case 32:
  2361. Entry &= FAT32_ENTRY_MASK;
  2362. break;
  2363. case 12:
  2364. ASSERT( Entry <= 0xfff );
  2365. if (Entry >= 0x0ff0) {
  2366. Entry |= 0x0FFFF000;
  2367. }
  2368. break;
  2369. default:
  2370. case 16:
  2371. ASSERT( Entry <= 0xffff );
  2372. if (Entry >= 0x0fff0) {
  2373. Entry |= 0x0FFF0000;
  2374. }
  2375. break;
  2376. }
  2377. if (Entry == FAT_CLUSTER_AVAILABLE) {
  2378. DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterAvailable\n", 0);
  2379. return FatClusterAvailable;
  2380. } else if (Entry < FAT_CLUSTER_RESERVED) {
  2381. DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterNext\n", 0);
  2382. return FatClusterNext;
  2383. } else if (Entry < FAT_CLUSTER_BAD) {
  2384. DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterReserved\n", 0);
  2385. return FatClusterReserved;
  2386. } else if (Entry == FAT_CLUSTER_BAD) {
  2387. DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterBad\n", 0);
  2388. return FatClusterBad;
  2389. } else {
  2390. DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterLast\n", 0);
  2391. return FatClusterLast;
  2392. }
  2393. }
  2394. //
  2395. // Internal support routine
  2396. //
  2397. VOID
  2398. FatLookupFatEntry (
  2399. IN PIRP_CONTEXT IrpContext,
  2400. IN PVCB Vcb,
  2401. IN ULONG FatIndex,
  2402. IN OUT PULONG FatEntry,
  2403. IN OUT PFAT_ENUMERATION_CONTEXT Context
  2404. )
  2405. /*++
  2406. Routine Description:
  2407. This routine takes an index into the fat and gives back the value
  2408. in the Fat at this index. At any given time, for a 16 bit fat, this
  2409. routine allows only one page per volume of the fat to be pinned in
  2410. memory. For a 12 bit bit fat, the entire fat (max 6k) is pinned. This
  2411. extra layer of caching makes the vast majority of requests very
  2412. fast. The context for this caching stored in a structure in the Vcb.
  2413. Arguments:
  2414. Vcb - Supplies the Vcb to examine, yields 12/16 bit info,
  2415. fat access context, etc.
  2416. FatIndex - Supplies the fat index to examine.
  2417. FatEntry - Receives the fat entry pointed to by FatIndex. Note that
  2418. it must point to non-paged pool.
  2419. Context - This structure keeps track of a page of pinned fat between calls.
  2420. --*/
  2421. {
  2422. PAGED_CODE();
  2423. DebugTrace(+1, Dbg, "FatLookupFatEntry\n", 0);
  2424. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  2425. DebugTrace( 0, Dbg, " FatIndex = %4x\n", FatIndex);
  2426. DebugTrace( 0, Dbg, " FatEntry = %8lx\n", FatEntry);
  2427. //
  2428. // Make sure they gave us a valid fat index.
  2429. //
  2430. FatVerifyIndexIsValid(IrpContext, Vcb, FatIndex);
  2431. //
  2432. // Case on 12 or 16 bit fats.
  2433. //
  2434. // In the 12 bit case (mostly floppies) we always have the whole fat
  2435. // (max 6k bytes) pinned during allocation operations. This is possibly
  2436. // a wee bit slower, but saves headaches over fat entries with 8 bits
  2437. // on one page, and 4 bits on the next.
  2438. //
  2439. // The 16 bit case always keeps the last used page pinned until all
  2440. // operations are done and it is unpinned.
  2441. //
  2442. //
  2443. // DEAL WITH 12 BIT CASE
  2444. //
  2445. if (Vcb->AllocationSupport.FatIndexBitSize == 12) {
  2446. //
  2447. // Check to see if the fat is already pinned, otherwise pin it.
  2448. //
  2449. if (Context->Bcb == NULL) {
  2450. FatReadVolumeFile( IrpContext,
  2451. Vcb,
  2452. FatReservedBytes( &Vcb->Bpb ),
  2453. FatBytesPerFat( &Vcb->Bpb ),
  2454. &Context->Bcb,
  2455. &Context->PinnedPage );
  2456. }
  2457. //
  2458. // Load the return value.
  2459. //
  2460. FatLookup12BitEntry( Context->PinnedPage, FatIndex, FatEntry );
  2461. } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) {
  2462. //
  2463. // DEAL WITH 32 BIT CASE
  2464. //
  2465. ULONG PageEntryOffset;
  2466. ULONG OffsetIntoVolumeFile;
  2467. //
  2468. // Initialize two local variables that help us.
  2469. //
  2470. OffsetIntoVolumeFile = FatReservedBytes(&Vcb->Bpb) + FatIndex * sizeof(FAT_ENTRY);
  2471. PageEntryOffset = (OffsetIntoVolumeFile % PAGE_SIZE) / sizeof(FAT_ENTRY);
  2472. //
  2473. // Check to see if we need to read in a new page of fat
  2474. //
  2475. if ((Context->Bcb == NULL) ||
  2476. (OffsetIntoVolumeFile / PAGE_SIZE != Context->VboOfPinnedPage / PAGE_SIZE)) {
  2477. //
  2478. // The entry wasn't in the pinned page, so must we unpin the current
  2479. // page (if any) and read in a new page.
  2480. //
  2481. FatUnpinBcb( IrpContext, Context->Bcb );
  2482. FatReadVolumeFile( IrpContext,
  2483. Vcb,
  2484. OffsetIntoVolumeFile & ~(PAGE_SIZE - 1),
  2485. PAGE_SIZE,
  2486. &Context->Bcb,
  2487. &Context->PinnedPage );
  2488. Context->VboOfPinnedPage = OffsetIntoVolumeFile & ~(PAGE_SIZE - 1);
  2489. }
  2490. //
  2491. // Grab the fat entry from the pinned page, and return
  2492. //
  2493. *FatEntry = ((PULONG)(Context->PinnedPage))[PageEntryOffset] & FAT32_ENTRY_MASK;
  2494. } else {
  2495. //
  2496. // DEAL WITH 16 BIT CASE
  2497. //
  2498. ULONG PageEntryOffset;
  2499. ULONG OffsetIntoVolumeFile;
  2500. //
  2501. // Initialize two local variables that help us.
  2502. //
  2503. OffsetIntoVolumeFile = FatReservedBytes(&Vcb->Bpb) + FatIndex * sizeof(USHORT);
  2504. PageEntryOffset = (OffsetIntoVolumeFile % PAGE_SIZE) / sizeof(USHORT);
  2505. //
  2506. // Check to see if we need to read in a new page of fat
  2507. //
  2508. if ((Context->Bcb == NULL) ||
  2509. (OffsetIntoVolumeFile / PAGE_SIZE != Context->VboOfPinnedPage / PAGE_SIZE)) {
  2510. //
  2511. // The entry wasn't in the pinned page, so must we unpin the current
  2512. // page (if any) and read in a new page.
  2513. //
  2514. FatUnpinBcb( IrpContext, Context->Bcb );
  2515. FatReadVolumeFile( IrpContext,
  2516. Vcb,
  2517. OffsetIntoVolumeFile & ~(PAGE_SIZE - 1),
  2518. PAGE_SIZE,
  2519. &Context->Bcb,
  2520. &Context->PinnedPage );
  2521. Context->VboOfPinnedPage = OffsetIntoVolumeFile & ~(PAGE_SIZE - 1);
  2522. }
  2523. //
  2524. // Grab the fat entry from the pinned page, and return
  2525. //
  2526. *FatEntry = ((PUSHORT)(Context->PinnedPage))[PageEntryOffset];
  2527. }
  2528. DebugTrace(-1, Dbg, "FatLookupFatEntry -> (VOID)\n", 0);
  2529. return;
  2530. }
  2531. VOID
  2532. FatSetFatEntry (
  2533. IN PIRP_CONTEXT IrpContext,
  2534. IN PVCB Vcb,
  2535. IN ULONG FatIndex,
  2536. IN FAT_ENTRY FatEntry
  2537. )
  2538. /*++
  2539. Routine Description:
  2540. This routine takes an index into the fat and puts a value in the Fat
  2541. at this index. The routine special cases 12, 16 and 32 bit fats. In
  2542. all cases we go to the cache manager for a piece of the fat.
  2543. We have a special form of this call for setting the DOS-style dirty bit.
  2544. Unlike the dirty bit in the boot sector, we do not go to special effort
  2545. to make sure that this hits the disk synchronously - if the system goes
  2546. down in the window between the dirty bit being set in the boot sector
  2547. and the FAT index zero dirty bit being lazy written, then life is tough.
  2548. The only possible scenario is that Win9x may see what it thinks is a clean
  2549. volume that really isn't (hopefully Memphis will pay attention to our dirty
  2550. bit as well). The dirty bit will get out quickly, and if heavy activity is
  2551. occurring, then the dirty bit should actually be there virtually all of the
  2552. time since the act of cleaning the volume is the "rare" occurance.
  2553. There are synchronization concerns that would crop up if we tried to make
  2554. this synchronous. This thread may already own the Bcb shared for the first
  2555. sector of the FAT (so we can't get it exclusive for a writethrough). This
  2556. would require some more serious replumbing to work around than I want to
  2557. consider at this time.
  2558. We can and do, however, synchronously set the bit clean.
  2559. At this point the reader should understand why the NT dirty bit is where it is.
  2560. Arguments:
  2561. Vcb - Supplies the Vcb to examine, yields 12/16/32 bit info, etc.
  2562. FatIndex - Supplies the destination fat index.
  2563. FatEntry - Supplies the source fat entry.
  2564. --*/
  2565. {
  2566. LBO Lbo;
  2567. PBCB Bcb = NULL;
  2568. ULONG SectorSize;
  2569. ULONG OffsetIntoVolumeFile;
  2570. ULONG WasWait = TRUE;
  2571. BOOLEAN RegularOperation = TRUE;
  2572. BOOLEAN CleaningOperation = FALSE;
  2573. BOOLEAN ReleaseMutex = FALSE;
  2574. PAGED_CODE();
  2575. DebugTrace(+1, Dbg, "FatSetFatEntry\n", 0);
  2576. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  2577. DebugTrace( 0, Dbg, " FatIndex = %4x\n", FatIndex);
  2578. DebugTrace( 0, Dbg, " FatEntry = %4x\n", FatEntry);
  2579. //
  2580. // Make sure they gave us a valid fat index if this isn't the special
  2581. // clean-bit modifying call.
  2582. //
  2583. if (FatIndex == FAT_DIRTY_BIT_INDEX) {
  2584. //
  2585. // We are setting the clean bit state. Of course, we could
  2586. // have corruption that would cause us to try to fiddle the
  2587. // reserved index - we guard against this by having the
  2588. // special entry values use the reserved high 4 bits that
  2589. // we know that we'll never try to set.
  2590. //
  2591. //
  2592. // We don't want to repin the FAT pages involved here. Just
  2593. // let the lazy writer hit them when it can.
  2594. //
  2595. RegularOperation = FALSE;
  2596. switch (FatEntry) {
  2597. case FAT_CLEAN_VOLUME:
  2598. FatEntry = FAT_CLEAN_ENTRY;
  2599. CleaningOperation = TRUE;
  2600. break;
  2601. case FAT_DIRTY_VOLUME:
  2602. switch (Vcb->AllocationSupport.FatIndexBitSize) {
  2603. case 12:
  2604. FatEntry = FAT12_DIRTY_ENTRY;
  2605. break;
  2606. case 32:
  2607. FatEntry = FAT32_DIRTY_ENTRY;
  2608. break;
  2609. default:
  2610. FatEntry = FAT16_DIRTY_ENTRY;
  2611. break;
  2612. }
  2613. break;
  2614. default:
  2615. FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
  2616. break;
  2617. }
  2618. //
  2619. // Disable dirtying semantics for the duration of this operation. Force this
  2620. // operation to wait for the duration.
  2621. //
  2622. WasWait = FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT );
  2623. SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT | IRP_CONTEXT_FLAG_DISABLE_DIRTY );
  2624. } else {
  2625. ASSERT( !(FatEntry & ~FAT32_ENTRY_MASK) );
  2626. FatVerifyIndexIsValid(IrpContext, Vcb, FatIndex);
  2627. }
  2628. //
  2629. // Set Sector Size
  2630. //
  2631. SectorSize = 1 << Vcb->AllocationSupport.LogOfBytesPerSector;
  2632. //
  2633. // Case on 12 or 16 bit fats.
  2634. //
  2635. // In the 12 bit case (mostly floppies) we always have the whole fat
  2636. // (max 6k bytes) pinned during allocation operations. This is possibly
  2637. // a wee bit slower, but saves headaches over fat entries with 8 bits
  2638. // on one page, and 4 bits on the next.
  2639. //
  2640. // In the 16 bit case we only read the page that we need to set the fat
  2641. // entry.
  2642. //
  2643. //
  2644. // DEAL WITH 12 BIT CASE
  2645. //
  2646. try {
  2647. if (Vcb->AllocationSupport.FatIndexBitSize == 12) {
  2648. PVOID PinnedFat;
  2649. //
  2650. // Make sure we have a valid entry
  2651. //
  2652. FatEntry &= 0xfff;
  2653. //
  2654. // We read in the entire fat. Note that using prepare write marks
  2655. // the bcb pre-dirty, so we don't have to do it explicitly.
  2656. //
  2657. OffsetIntoVolumeFile = FatReservedBytes( &Vcb->Bpb ) + FatIndex * 3 / 2;
  2658. FatPrepareWriteVolumeFile( IrpContext,
  2659. Vcb,
  2660. FatReservedBytes( &Vcb->Bpb ),
  2661. FatBytesPerFat( &Vcb->Bpb ),
  2662. &Bcb,
  2663. &PinnedFat,
  2664. RegularOperation,
  2665. FALSE );
  2666. //
  2667. // Mark the sector(s) dirty in the DirtyFatMcb. This call is
  2668. // complicated somewhat for the 12 bit case since a single
  2669. // entry write can span two sectors (and pages).
  2670. //
  2671. // Get the Lbo for the sector where the entry starts, and add it to
  2672. // the dirty fat Mcb.
  2673. //
  2674. Lbo = OffsetIntoVolumeFile & ~(SectorSize - 1);
  2675. FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize);
  2676. //
  2677. // If the entry started on the last byte of the sector, it continues
  2678. // to the next sector, so mark the next sector dirty as well.
  2679. //
  2680. // Note that this entry will simply coalese with the last entry,
  2681. // so this operation cannot fail. Also if we get this far, we have
  2682. // made it, so no unwinding will be needed.
  2683. //
  2684. if ( (OffsetIntoVolumeFile & (SectorSize - 1)) == (SectorSize - 1) ) {
  2685. Lbo += SectorSize;
  2686. FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize );
  2687. }
  2688. //
  2689. // Store the entry into the fat; we need a little synchonization
  2690. // here and can't use a spinlock since the bytes might not be
  2691. // resident.
  2692. //
  2693. FatLockFreeClusterBitMap( Vcb );
  2694. ReleaseMutex = TRUE;
  2695. FatSet12BitEntry( PinnedFat, FatIndex, FatEntry );
  2696. FatUnlockFreeClusterBitMap( Vcb );
  2697. ReleaseMutex = FALSE;
  2698. } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) {
  2699. //
  2700. // DEAL WITH 32 BIT CASE
  2701. //
  2702. PULONG PinnedFatEntry32;
  2703. //
  2704. // Read in a new page of fat
  2705. //
  2706. OffsetIntoVolumeFile = FatReservedBytes( &Vcb->Bpb ) +
  2707. FatIndex * sizeof( FAT_ENTRY );
  2708. FatPrepareWriteVolumeFile( IrpContext,
  2709. Vcb,
  2710. OffsetIntoVolumeFile,
  2711. sizeof(FAT_ENTRY),
  2712. &Bcb,
  2713. (PVOID *)&PinnedFatEntry32,
  2714. RegularOperation,
  2715. FALSE );
  2716. //
  2717. // Mark the sector dirty in the DirtyFatMcb
  2718. //
  2719. Lbo = OffsetIntoVolumeFile & ~(SectorSize - 1);
  2720. FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize);
  2721. //
  2722. // Store the FatEntry to the pinned page.
  2723. //
  2724. // Preserve the reserved bits in FAT32 entries in the file heap.
  2725. //
  2726. #ifdef ALPHA
  2727. FatLockFreeClusterBitMap( Vcb );
  2728. ReleaseMutex = TRUE;
  2729. #endif // ALPHA
  2730. if (FatIndex != FAT_DIRTY_BIT_INDEX) {
  2731. *PinnedFatEntry32 = ((*PinnedFatEntry32 & ~FAT32_ENTRY_MASK) | FatEntry);
  2732. } else {
  2733. *PinnedFatEntry32 = FatEntry;
  2734. }
  2735. #ifdef ALPHA
  2736. FatUnlockFreeClusterBitMap( Vcb );
  2737. ReleaseMutex = FALSE;
  2738. #endif // ALPHA
  2739. } else {
  2740. //
  2741. // DEAL WITH 16 BIT CASE
  2742. //
  2743. PUSHORT PinnedFatEntry;
  2744. //
  2745. // Read in a new page of fat
  2746. //
  2747. OffsetIntoVolumeFile = FatReservedBytes( &Vcb->Bpb ) +
  2748. FatIndex * sizeof(USHORT);
  2749. FatPrepareWriteVolumeFile( IrpContext,
  2750. Vcb,
  2751. OffsetIntoVolumeFile,
  2752. sizeof(USHORT),
  2753. &Bcb,
  2754. (PVOID *)&PinnedFatEntry,
  2755. RegularOperation,
  2756. FALSE );
  2757. //
  2758. // Mark the sector dirty in the DirtyFatMcb
  2759. //
  2760. Lbo = OffsetIntoVolumeFile & ~(SectorSize - 1);
  2761. FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize);
  2762. //
  2763. // Store the FatEntry to the pinned page.
  2764. //
  2765. // We need extra synchronization here for broken architectures
  2766. // like the ALPHA that don't support atomic 16 bit writes.
  2767. //
  2768. #ifdef ALPHA
  2769. FatLockFreeClusterBitMap( Vcb );
  2770. ReleaseMutex = TRUE;
  2771. #endif // ALPHA
  2772. *PinnedFatEntry = (USHORT)FatEntry;
  2773. #ifdef ALPHA
  2774. FatUnlockFreeClusterBitMap( Vcb );
  2775. ReleaseMutex = FALSE;
  2776. #endif // ALPHA
  2777. }
  2778. } finally {
  2779. DebugUnwind( FatSetFatEntry );
  2780. //
  2781. // Re-enable volume dirtying in case this was a dirty bit operation.
  2782. //
  2783. ClearFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_DISABLE_DIRTY );
  2784. //
  2785. // Make this operation asynchronous again if needed.
  2786. //
  2787. if (!WasWait) {
  2788. ClearFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT );
  2789. }
  2790. //
  2791. // If we still somehow have the Mutex, release it.
  2792. //
  2793. if (ReleaseMutex) {
  2794. ASSERT( AbnormalTermination() );
  2795. FatUnlockFreeClusterBitMap( Vcb );
  2796. }
  2797. //
  2798. // Unpin the Bcb. For cleaning operations, we make this write-through.
  2799. //
  2800. if (CleaningOperation && Bcb) {
  2801. IO_STATUS_BLOCK IgnoreStatus;
  2802. CcRepinBcb( Bcb );
  2803. CcUnpinData( Bcb );
  2804. DbgDoit( IrpContext->PinCount -= 1 );
  2805. CcUnpinRepinnedBcb( Bcb, TRUE, &IgnoreStatus );
  2806. } else {
  2807. FatUnpinBcb(IrpContext, Bcb);
  2808. }
  2809. DebugTrace(-1, Dbg, "FatSetFatEntry -> (VOID)\n", 0);
  2810. }
  2811. return;
  2812. }
  2813. //
  2814. // Internal support routine
  2815. //
  2816. VOID
  2817. FatSetFatRun (
  2818. IN PIRP_CONTEXT IrpContext,
  2819. IN PVCB Vcb,
  2820. IN ULONG StartingFatIndex,
  2821. IN ULONG ClusterCount,
  2822. IN BOOLEAN ChainTogether
  2823. )
  2824. /*++
  2825. Routine Description:
  2826. This routine sets a continuous run of clusters in the fat. If ChainTogether
  2827. is TRUE, then the clusters are linked together as in normal Fat fasion,
  2828. with the last cluster receiving FAT_CLUSTER_LAST. If ChainTogether is
  2829. FALSE, all the entries are set to FAT_CLUSTER_AVAILABLE, effectively
  2830. freeing all the clusters in the run.
  2831. Arguments:
  2832. Vcb - Supplies the Vcb to examine, yields 12/16 bit info, etc.
  2833. StartingFatIndex - Supplies the destination fat index.
  2834. ClusterCount - Supplies the number of contiguous clusters to work on.
  2835. ChainTogether - Tells us whether to fill the entries with links, or
  2836. FAT_CLUSTER_AVAILABLE
  2837. Return Value:
  2838. VOID
  2839. --*/
  2840. {
  2841. #define MAXCOUNTCLUS 0x10000
  2842. #define COUNTSAVEDBCBS ((MAXCOUNTCLUS * sizeof(FAT_ENTRY) / PAGE_SIZE) + 2)
  2843. PBCB SavedBcbs[COUNTSAVEDBCBS][2];
  2844. ULONG SectorSize;
  2845. ULONG Cluster;
  2846. LBO StartSectorLbo;
  2847. LBO FinalSectorLbo;
  2848. LBO Lbo;
  2849. PVOID PinnedFat;
  2850. ULONG StartingPage;
  2851. BOOLEAN ReleaseMutex = FALSE;
  2852. ULONG SavedStartingFatIndex = StartingFatIndex;
  2853. PAGED_CODE();
  2854. DebugTrace(+1, Dbg, "FatSetFatRun\n", 0);
  2855. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  2856. DebugTrace( 0, Dbg, " StartingFatIndex = %8x\n", StartingFatIndex);
  2857. DebugTrace( 0, Dbg, " ClusterCount = %8lx\n", ClusterCount);
  2858. DebugTrace( 0, Dbg, " ChainTogether = %s\n", ChainTogether ? "TRUE":"FALSE");
  2859. //
  2860. // Make sure they gave us a valid fat run.
  2861. //
  2862. FatVerifyIndexIsValid(IrpContext, Vcb, StartingFatIndex);
  2863. FatVerifyIndexIsValid(IrpContext, Vcb, StartingFatIndex + ClusterCount - 1);
  2864. //
  2865. // Check special case
  2866. //
  2867. if (ClusterCount == 0) {
  2868. DebugTrace(-1, Dbg, "FatSetFatRun -> (VOID)\n", 0);
  2869. return;
  2870. }
  2871. //
  2872. // Set Sector Size
  2873. //
  2874. SectorSize = 1 << Vcb->AllocationSupport.LogOfBytesPerSector;
  2875. //
  2876. // Case on 12 or 16 bit fats.
  2877. //
  2878. // In the 12 bit case (mostly floppies) we always have the whole fat
  2879. // (max 6k bytes) pinned during allocation operations. This is possibly
  2880. // a wee bit slower, but saves headaches over fat entries with 8 bits
  2881. // on one page, and 4 bits on the next.
  2882. //
  2883. // In the 16 bit case we only read one page at a time, as needed.
  2884. //
  2885. //
  2886. // DEAL WITH 12 BIT CASE
  2887. //
  2888. try {
  2889. if (Vcb->AllocationSupport.FatIndexBitSize == 12) {
  2890. StartingPage = 0;
  2891. //
  2892. // We read in the entire fat. Note that using prepare write marks
  2893. // the bcb pre-dirty, so we don't have to do it explicitly.
  2894. //
  2895. RtlZeroMemory( &SavedBcbs[0], 2 * sizeof(PBCB) * 2);
  2896. FatPrepareWriteVolumeFile( IrpContext,
  2897. Vcb,
  2898. FatReservedBytes( &Vcb->Bpb ),
  2899. FatBytesPerFat( &Vcb->Bpb ),
  2900. &SavedBcbs[0][0],
  2901. &PinnedFat,
  2902. TRUE,
  2903. FALSE );
  2904. //
  2905. // Mark the affected sectors dirty. Note that FinalSectorLbo is
  2906. // the Lbo of the END of the entry (Thus * 3 + 2). This makes sure
  2907. // we catch the case of a dirty fat entry straddling a sector boundry.
  2908. //
  2909. // Note that if the first AddMcbEntry succeeds, all following ones
  2910. // will simply coalese, and thus also succeed.
  2911. //
  2912. StartSectorLbo = (FatReservedBytes( &Vcb->Bpb ) + StartingFatIndex * 3 / 2)
  2913. & ~(SectorSize - 1);
  2914. FinalSectorLbo = (FatReservedBytes( &Vcb->Bpb ) + ((StartingFatIndex +
  2915. ClusterCount) * 3 + 2) / 2) & ~(SectorSize - 1);
  2916. for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
  2917. FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize );
  2918. }
  2919. //
  2920. // Store the entries into the fat; we need a little
  2921. // synchonization here and can't use a spinlock since the bytes
  2922. // might not be resident.
  2923. //
  2924. FatLockFreeClusterBitMap( Vcb );
  2925. ReleaseMutex = TRUE;
  2926. for (Cluster = StartingFatIndex;
  2927. Cluster < StartingFatIndex + ClusterCount - 1;
  2928. Cluster++) {
  2929. FatSet12BitEntry( PinnedFat,
  2930. Cluster,
  2931. ChainTogether ? Cluster + 1 : FAT_CLUSTER_AVAILABLE );
  2932. }
  2933. //
  2934. // Save the last entry
  2935. //
  2936. FatSet12BitEntry( PinnedFat,
  2937. Cluster,
  2938. ChainTogether ?
  2939. FAT_CLUSTER_LAST & 0xfff : FAT_CLUSTER_AVAILABLE );
  2940. FatUnlockFreeClusterBitMap( Vcb );
  2941. ReleaseMutex = FALSE;
  2942. } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) {
  2943. //
  2944. // DEAL WITH 32 BIT CASE
  2945. //
  2946. for (;;) {
  2947. VBO StartOffsetInVolume;
  2948. VBO FinalOffsetInVolume;
  2949. ULONG Page;
  2950. ULONG FinalCluster;
  2951. PULONG FatEntry;
  2952. ULONG ClusterCountThisRun;
  2953. StartOffsetInVolume = FatReservedBytes(&Vcb->Bpb) +
  2954. StartingFatIndex * sizeof(FAT_ENTRY);
  2955. if (ClusterCount > MAXCOUNTCLUS) {
  2956. ClusterCountThisRun = MAXCOUNTCLUS;
  2957. } else {
  2958. ClusterCountThisRun = ClusterCount;
  2959. }
  2960. FinalOffsetInVolume = StartOffsetInVolume +
  2961. (ClusterCountThisRun - 1) * sizeof(FAT_ENTRY);
  2962. StartingPage = StartOffsetInVolume / PAGE_SIZE;
  2963. {
  2964. ULONG NumberOfPages;
  2965. ULONG Offset;
  2966. NumberOfPages = (FinalOffsetInVolume / PAGE_SIZE) -
  2967. (StartOffsetInVolume / PAGE_SIZE) + 1;
  2968. RtlZeroMemory( &SavedBcbs[0][0], (NumberOfPages + 1) * sizeof(PBCB) * 2 );
  2969. for ( Page = 0, Offset = StartOffsetInVolume & ~(PAGE_SIZE - 1);
  2970. Page < NumberOfPages;
  2971. Page++, Offset += PAGE_SIZE ) {
  2972. FatPrepareWriteVolumeFile( IrpContext,
  2973. Vcb,
  2974. Offset,
  2975. PAGE_SIZE,
  2976. &SavedBcbs[Page][0],
  2977. (PVOID *)&SavedBcbs[Page][1],
  2978. TRUE,
  2979. FALSE );
  2980. if (Page == 0) {
  2981. FatEntry = (PULONG)((PUCHAR)SavedBcbs[0][1] +
  2982. (StartOffsetInVolume % PAGE_SIZE));
  2983. }
  2984. }
  2985. }
  2986. //
  2987. // Mark the run dirty
  2988. //
  2989. StartSectorLbo = StartOffsetInVolume & ~(SectorSize - 1);
  2990. FinalSectorLbo = FinalOffsetInVolume & ~(SectorSize - 1);
  2991. for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
  2992. FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO)Lbo, Lbo, SectorSize );
  2993. }
  2994. //
  2995. // Store the entries
  2996. //
  2997. // We need extra synchronization here for broken architectures
  2998. // like the ALPHA that don't support atomic 16 bit writes.
  2999. //
  3000. #ifdef ALPHA
  3001. FatLockFreeClusterBitMap( Vcb );
  3002. ReleaseMutex = TRUE;
  3003. #endif // ALPHA
  3004. FinalCluster = StartingFatIndex + ClusterCountThisRun - 1;
  3005. Page = 0;
  3006. for (Cluster = StartingFatIndex;
  3007. Cluster <= FinalCluster;
  3008. Cluster++, FatEntry++) {
  3009. //
  3010. // If we just crossed a page boundry (as opposed to starting
  3011. // on one), update our idea of FatEntry.
  3012. if ( (((ULONG_PTR)FatEntry & (PAGE_SIZE-1)) == 0) &&
  3013. (Cluster != StartingFatIndex) ) {
  3014. Page += 1;
  3015. FatEntry = (PULONG)SavedBcbs[Page][1];
  3016. }
  3017. *FatEntry = ChainTogether ? (FAT_ENTRY)(Cluster + 1) :
  3018. FAT_CLUSTER_AVAILABLE;
  3019. }
  3020. //
  3021. // Fix up the last entry if we were chaining together
  3022. //
  3023. if ((ClusterCount <= MAXCOUNTCLUS) &&
  3024. ChainTogether ) {
  3025. *(FatEntry-1) = FAT_CLUSTER_LAST;
  3026. }
  3027. #ifdef ALPHA
  3028. FatUnlockFreeClusterBitMap( Vcb );
  3029. ReleaseMutex = FALSE;
  3030. #endif // ALPHA
  3031. {
  3032. ULONG i = 0;
  3033. //
  3034. // Unpin the Bcbs
  3035. //
  3036. while ( SavedBcbs[i][0] != NULL ) {
  3037. FatUnpinBcb( IrpContext, SavedBcbs[i][0] );
  3038. SavedBcbs[i][0] = NULL;
  3039. i += 1;
  3040. }
  3041. }
  3042. if (ClusterCount <= MAXCOUNTCLUS) {
  3043. break;
  3044. } else {
  3045. StartingFatIndex += MAXCOUNTCLUS;
  3046. ClusterCount -= MAXCOUNTCLUS;
  3047. }
  3048. }
  3049. } else {
  3050. //
  3051. // DEAL WITH 16 BIT CASE
  3052. //
  3053. VBO StartOffsetInVolume;
  3054. VBO FinalOffsetInVolume;
  3055. ULONG Page;
  3056. ULONG FinalCluster;
  3057. PUSHORT FatEntry;
  3058. StartOffsetInVolume = FatReservedBytes(&Vcb->Bpb) +
  3059. StartingFatIndex * sizeof(USHORT);
  3060. FinalOffsetInVolume = StartOffsetInVolume +
  3061. (ClusterCount - 1) * sizeof(USHORT);
  3062. StartingPage = StartOffsetInVolume / PAGE_SIZE;
  3063. //
  3064. // Read in one page of fat at a time. We cannot read in the
  3065. // all of the fat we need because of cache manager limitations.
  3066. //
  3067. // SavedBcb was initialized to be able to hold the largest
  3068. // possible number of pages in a fat plus and extra one to
  3069. // accomadate the boot sector, plus one more to make sure there
  3070. // is enough room for the RtlZeroMemory below that needs the mark
  3071. // the first Bcb after all the ones we will use as an end marker.
  3072. //
  3073. {
  3074. ULONG NumberOfPages;
  3075. ULONG Offset;
  3076. NumberOfPages = (FinalOffsetInVolume / PAGE_SIZE) -
  3077. (StartOffsetInVolume / PAGE_SIZE) + 1;
  3078. RtlZeroMemory( &SavedBcbs[0][0], (NumberOfPages + 1) * sizeof(PBCB) * 2 );
  3079. for ( Page = 0, Offset = StartOffsetInVolume & ~(PAGE_SIZE - 1);
  3080. Page < NumberOfPages;
  3081. Page++, Offset += PAGE_SIZE ) {
  3082. FatPrepareWriteVolumeFile( IrpContext,
  3083. Vcb,
  3084. Offset,
  3085. PAGE_SIZE,
  3086. &SavedBcbs[Page][0],
  3087. (PVOID *)&SavedBcbs[Page][1],
  3088. TRUE,
  3089. FALSE );
  3090. if (Page == 0) {
  3091. FatEntry = (PUSHORT)((PUCHAR)SavedBcbs[0][1] +
  3092. (StartOffsetInVolume % PAGE_SIZE));
  3093. }
  3094. }
  3095. }
  3096. //
  3097. // Mark the run dirty
  3098. //
  3099. StartSectorLbo = StartOffsetInVolume & ~(SectorSize - 1);
  3100. FinalSectorLbo = FinalOffsetInVolume & ~(SectorSize - 1);
  3101. for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
  3102. FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize );
  3103. }
  3104. //
  3105. // Store the entries
  3106. //
  3107. // We need extra synchronization here for broken architectures
  3108. // like the ALPHA that don't support atomic 16 bit writes.
  3109. //
  3110. #ifdef ALPHA
  3111. FatLockFreeClusterBitMap( Vcb );
  3112. ReleaseMutex = TRUE;
  3113. #endif // ALPHA
  3114. FinalCluster = StartingFatIndex + ClusterCount - 1;
  3115. Page = 0;
  3116. for (Cluster = StartingFatIndex;
  3117. Cluster <= FinalCluster;
  3118. Cluster++, FatEntry++) {
  3119. //
  3120. // If we just crossed a page boundry (as opposed to starting
  3121. // on one), update our idea of FatEntry.
  3122. if ( (((ULONG_PTR)FatEntry & (PAGE_SIZE-1)) == 0) &&
  3123. (Cluster != StartingFatIndex) ) {
  3124. Page += 1;
  3125. FatEntry = (PUSHORT)SavedBcbs[Page][1];
  3126. }
  3127. *FatEntry = (USHORT) (ChainTogether ? (FAT_ENTRY)(Cluster + 1) :
  3128. FAT_CLUSTER_AVAILABLE);
  3129. }
  3130. //
  3131. // Fix up the last entry if we were chaining together
  3132. //
  3133. if ( ChainTogether ) {
  3134. *(FatEntry-1) = (USHORT)FAT_CLUSTER_LAST;
  3135. }
  3136. #ifdef ALPHA
  3137. FatUnlockFreeClusterBitMap( Vcb );
  3138. ReleaseMutex = FALSE;
  3139. #endif // ALPHA
  3140. }
  3141. } finally {
  3142. ULONG i = 0;
  3143. DebugUnwind( FatSetFatRun );
  3144. //
  3145. // If we still somehow have the Mutex, release it.
  3146. //
  3147. if (ReleaseMutex) {
  3148. ASSERT( AbnormalTermination() );
  3149. FatUnlockFreeClusterBitMap( Vcb );
  3150. }
  3151. //
  3152. // Unpin the Bcbs
  3153. //
  3154. while ( SavedBcbs[i][0] != NULL ) {
  3155. FatUnpinBcb( IrpContext, SavedBcbs[i][0] );
  3156. i += 1;
  3157. }
  3158. //
  3159. // At this point nothing in this finally clause should have raised.
  3160. // So, now comes the unsafe (sigh) stuff.
  3161. //
  3162. if ( AbnormalTermination() &&
  3163. (Vcb->AllocationSupport.FatIndexBitSize == 32) ) {
  3164. //
  3165. // Fat32 unwind
  3166. //
  3167. // This case is more complex because the FAT12 and FAT16 cases
  3168. // pin all the needed FAT pages (128K max), after which it
  3169. // can't fail, before changing any FAT entries. In the Fat32
  3170. // case, it may not be practical to pin all the needed FAT
  3171. // pages, because that could span many megabytes. So Fat32
  3172. // attacks in chunks, and if a failure occurs once the first
  3173. // chunk has been updated, we have to back out the updates.
  3174. //
  3175. // The unwind consists of walking back over each FAT entry we
  3176. // have changed, setting it back to the previous value. Note
  3177. // that the previous value with either be FAT_CLUSTER_AVAILABLE
  3178. // (if ChainTogether==TRUE) or a simple link to the successor
  3179. // (if ChainTogether==FALSE).
  3180. //
  3181. // We concede that any one of these calls could fail too; our
  3182. // objective is to make this case no more likely than the case
  3183. // for a file consisting of multiple disjoint runs.
  3184. //
  3185. while ( StartingFatIndex > SavedStartingFatIndex ) {
  3186. StartingFatIndex--;
  3187. FatSetFatEntry( IrpContext, Vcb, StartingFatIndex,
  3188. ChainTogether ?
  3189. StartingFatIndex + 1 : FAT_CLUSTER_AVAILABLE );
  3190. }
  3191. }
  3192. DebugTrace(-1, Dbg, "FatSetFatRun -> (VOID)\n", 0);
  3193. }
  3194. return;
  3195. }
  3196. //
  3197. // Internal support routine
  3198. //
  3199. UCHAR
  3200. FatLogOf (
  3201. IN ULONG Value
  3202. )
  3203. /*++
  3204. Routine Description:
  3205. This routine just computes the base 2 log of an integer. It is only used
  3206. on objects that are know to be powers of two.
  3207. Arguments:
  3208. Value - The value to take the base 2 log of.
  3209. Return Value:
  3210. UCHAR - The base 2 log of Value.
  3211. --*/
  3212. {
  3213. UCHAR Log = 0;
  3214. PAGED_CODE();
  3215. DebugTrace(+1, Dbg, "LogOf\n", 0);
  3216. DebugTrace( 0, Dbg, " Value = %8lx\n", Value);
  3217. //
  3218. // Knock bits off until we we get a one at position 0
  3219. //
  3220. while ( (Value & 0xfffffffe) != 0 ) {
  3221. Log++;
  3222. Value >>= 1;
  3223. }
  3224. //
  3225. // If there was more than one bit set, the file system messed up,
  3226. // Bug Check.
  3227. //
  3228. if (Value != 0x1) {
  3229. DebugTrace( 0, Dbg, "Received non power of 2.\n", 0);
  3230. FatBugCheck( Value, Log, 0 );
  3231. }
  3232. DebugTrace(-1, Dbg, "LogOf -> %8lx\n", Log);
  3233. return Log;
  3234. }
  3235. VOID
  3236. FatExamineFatEntries(
  3237. IN PIRP_CONTEXT IrpContext,
  3238. IN PVCB Vcb,
  3239. IN ULONG StartIndex OPTIONAL,
  3240. IN ULONG EndIndex OPTIONAL,
  3241. IN BOOLEAN SetupWindows,
  3242. IN PFAT_WINDOW SwitchToWindow OPTIONAL,
  3243. IN PULONG BitMapBuffer OPTIONAL
  3244. )
  3245. /*++
  3246. Routine Description:
  3247. This routine handles scanning a segment of the FAT into in-memory structures.
  3248. There are three fundamental cases, with variations depending on the FAT type:
  3249. 1) During volume setup, FatSetupAllocations
  3250. 1a) for FAT12/16, read the FAT into our free clusterbitmap
  3251. 1b) for FAT32, perform the initial scan for window free cluster counts
  3252. 2) Switching FAT32 windows on the fly during system operation
  3253. 3) Reading arbitrary segments of the FAT for the purposes of the GetVolumeBitmap
  3254. call (only for FAT32)
  3255. There really is too much going on in here. At some point this should be
  3256. substantially rewritten.
  3257. Arguments:
  3258. Vcb - Supplies the volume involved
  3259. StartIndex - Supplies the starting cluster, ignored if SwitchToWindow supplied
  3260. EndIndex - Supplies the ending cluster, ignored if SwitchToWindow supplied
  3261. SetupWindows - Indicates if we are doing the initial FAT32 scan
  3262. SwitchToWindow - Supplies the FAT window we are examining and will switch to
  3263. BitMapBuffer - Supplies a specific bitmap to fill in, if not supplied we fill
  3264. in the volume free cluster bitmap if !SetupWindows
  3265. Return Value:
  3266. None. Lots of side effects.
  3267. --*/
  3268. {
  3269. ULONG FatIndexBitSize;
  3270. ULONG Page;
  3271. ULONG Offset;
  3272. ULONG FatIndex;
  3273. FAT_ENTRY FatEntry = FAT_CLUSTER_AVAILABLE;
  3274. FAT_ENTRY FirstFatEntry = FAT_CLUSTER_AVAILABLE;
  3275. PUSHORT FatBuffer;
  3276. PVOID pv;
  3277. PBCB Bcb;
  3278. ULONG EntriesPerWindow;
  3279. ULONG BitIndex;
  3280. ULONG ClustersThisRun;
  3281. ULONG StartIndexOfThisRun;
  3282. PULONG FreeClusterCount = NULL;
  3283. PFAT_WINDOW CurrentWindow = NULL;
  3284. PVOID NewBitMapBuffer = NULL;
  3285. PRTL_BITMAP BitMap = NULL;
  3286. RTL_BITMAP PrivateBitMap;
  3287. enum RunType {
  3288. FreeClusters,
  3289. AllocatedClusters,
  3290. UnknownClusters
  3291. } CurrentRun;
  3292. PAGED_CODE();
  3293. //
  3294. // Now assert correct usage.
  3295. //
  3296. FatIndexBitSize = Vcb->AllocationSupport.FatIndexBitSize;
  3297. ASSERT( !(SetupWindows && (SwitchToWindow || BitMapBuffer)));
  3298. ASSERT( !(SetupWindows && FatIndexBitSize != 32));
  3299. if (Vcb->NumberOfWindows > 1) {
  3300. //
  3301. // FAT32: Calculate the number of FAT entries covered by a window. This is
  3302. // equal to the number of bits in the freespace bitmap, the size of which
  3303. // is hardcoded.
  3304. //
  3305. EntriesPerWindow = MAX_CLUSTER_BITMAP_SIZE;
  3306. } else {
  3307. EntriesPerWindow = Vcb->AllocationSupport.NumberOfClusters;
  3308. }
  3309. //
  3310. // We will also fill in the cumulative count of free clusters for
  3311. // the entire volume. If this is not appropriate, NULL it out
  3312. // shortly.
  3313. //
  3314. FreeClusterCount = &Vcb->AllocationSupport.NumberOfFreeClusters;
  3315. if (SetupWindows) {
  3316. ASSERT(BitMapBuffer == NULL);
  3317. //
  3318. // In this case we're just supposed to scan the fat and set up
  3319. // the information regarding where the buckets fall and how many
  3320. // free clusters are in each.
  3321. //
  3322. // It is fine to monkey with the real windows, we must be able
  3323. // to do this to activate the volume.
  3324. //
  3325. BitMap = NULL;
  3326. CurrentWindow = &Vcb->Windows[0];
  3327. CurrentWindow->FirstCluster = StartIndex;
  3328. CurrentWindow->ClustersFree = 0;
  3329. //
  3330. // We always wish to calculate total free clusters when
  3331. // setting up the FAT windows.
  3332. //
  3333. } else if (BitMapBuffer == NULL) {
  3334. //
  3335. // We will be filling in the free cluster bitmap for the volume.
  3336. // Careful, we can raise out of here and be hopelessly hosed if
  3337. // we built this up in the main bitmap/window itself.
  3338. //
  3339. // For simplicity's sake, we'll do the swap for everyone. FAT32
  3340. // provokes the need since we can't tolerate partial results
  3341. // when switching windows.
  3342. //
  3343. ASSERT( SwitchToWindow );
  3344. CurrentWindow = SwitchToWindow;
  3345. StartIndex = CurrentWindow->FirstCluster;
  3346. EndIndex = CurrentWindow->LastCluster;
  3347. BitMap = &PrivateBitMap;
  3348. NewBitMapBuffer = FsRtlAllocatePoolWithTag( PagedPool,
  3349. (EntriesPerWindow + 7) / 8,
  3350. TAG_FAT_BITMAP );
  3351. RtlInitializeBitMap( &PrivateBitMap,
  3352. NewBitMapBuffer,
  3353. EndIndex - StartIndex + 1);
  3354. if (FatIndexBitSize == 32) {
  3355. //
  3356. // We do not wish count total clusters here.
  3357. //
  3358. FreeClusterCount = NULL;
  3359. }
  3360. } else {
  3361. BitMap = &PrivateBitMap;
  3362. RtlInitializeBitMap(&PrivateBitMap,
  3363. BitMapBuffer,
  3364. EndIndex - StartIndex + 1);
  3365. //
  3366. // We do not count total clusters here.
  3367. //
  3368. FreeClusterCount = NULL;
  3369. }
  3370. //
  3371. // Now, our start index better be in the file heap.
  3372. //
  3373. ASSERT( StartIndex >= 2 );
  3374. //
  3375. // Pick up the initial chunk of the FAT and first entry.
  3376. //
  3377. if (FatIndexBitSize == 12) {
  3378. //
  3379. // We read in the entire fat in the 12 bit case.
  3380. //
  3381. FatReadVolumeFile( IrpContext,
  3382. Vcb,
  3383. FatReservedBytes( &Vcb->Bpb ),
  3384. FatBytesPerFat( &Vcb->Bpb ),
  3385. &Bcb,
  3386. (PVOID *)&FatBuffer );
  3387. FatLookup12BitEntry(FatBuffer, 0, &FirstFatEntry);
  3388. } else {
  3389. //
  3390. // Read in one page of fat at a time. We cannot read in the
  3391. // all of the fat we need because of cache manager limitations.
  3392. //
  3393. ULONG BytesPerEntry = FatIndexBitSize >> 3;
  3394. ULONG EntriesPerPage = PAGE_SIZE / BytesPerEntry;
  3395. Page = (FatReservedBytes(&Vcb->Bpb) + StartIndex * BytesPerEntry) / PAGE_SIZE;
  3396. Offset = Page * PAGE_SIZE;
  3397. FatReadVolumeFile( IrpContext,
  3398. Vcb,
  3399. Offset,
  3400. PAGE_SIZE,
  3401. &Bcb,
  3402. &pv);
  3403. if (FatIndexBitSize == 32) {
  3404. FatBuffer = (PUSHORT)((PUCHAR)pv +
  3405. (FatReservedBytes(&Vcb->Bpb) + StartIndex * BytesPerEntry) %
  3406. PAGE_SIZE);
  3407. FirstFatEntry = *((PULONG)FatBuffer);
  3408. FirstFatEntry = FirstFatEntry & FAT32_ENTRY_MASK;
  3409. } else {
  3410. FatBuffer = (PUSHORT)((PUCHAR)pv +
  3411. FatReservedBytes(&Vcb->Bpb) % PAGE_SIZE) + 2;
  3412. FirstFatEntry = *FatBuffer;
  3413. }
  3414. }
  3415. CurrentRun = (FirstFatEntry == FAT_CLUSTER_AVAILABLE) ?
  3416. FreeClusters : AllocatedClusters;
  3417. StartIndexOfThisRun = StartIndex;
  3418. try {
  3419. for (FatIndex = StartIndex; FatIndex <= EndIndex; FatIndex++) {
  3420. if (FatIndexBitSize == 12) {
  3421. FatLookup12BitEntry(FatBuffer, FatIndex, &FatEntry);
  3422. } else {
  3423. //
  3424. // If we are setting up the FAT32 windows and have stepped into a new
  3425. // bucket, finalize this one and move forward.
  3426. //
  3427. if (SetupWindows &&
  3428. FatIndex > StartIndex &&
  3429. (FatIndex - 2) % EntriesPerWindow == 0) {
  3430. CurrentWindow->LastCluster = FatIndex - 1;
  3431. if (CurrentRun == FreeClusters) {
  3432. //
  3433. // We must be counting clusters in order to modify the
  3434. // contents of the window.
  3435. //
  3436. ASSERT( FreeClusterCount );
  3437. ClustersThisRun = FatIndex - StartIndexOfThisRun;
  3438. CurrentWindow->ClustersFree += ClustersThisRun;
  3439. if (FreeClusterCount) {
  3440. *FreeClusterCount += ClustersThisRun;
  3441. }
  3442. } else {
  3443. ASSERT(CurrentRun == AllocatedClusters);
  3444. ClustersThisRun = FatIndex - StartIndexOfThisRun;
  3445. }
  3446. StartIndexOfThisRun = FatIndex;
  3447. CurrentRun = UnknownClusters;
  3448. CurrentWindow++;
  3449. CurrentWindow->ClustersFree = 0;
  3450. CurrentWindow->FirstCluster = FatIndex;
  3451. }
  3452. //
  3453. // If we just stepped onto a new page, grab a new pointer.
  3454. //
  3455. if (((ULONG_PTR)FatBuffer & (PAGE_SIZE - 1)) == 0) {
  3456. FatUnpinBcb( IrpContext, Bcb );
  3457. Page++;
  3458. Offset += PAGE_SIZE;
  3459. FatReadVolumeFile( IrpContext,
  3460. Vcb,
  3461. Offset,
  3462. PAGE_SIZE,
  3463. &Bcb,
  3464. &pv );
  3465. FatBuffer = (PUSHORT)pv;
  3466. }
  3467. if (FatIndexBitSize == 32) {
  3468. FatEntry = *((PULONG)FatBuffer)++;
  3469. FatEntry = FatEntry & FAT32_ENTRY_MASK;
  3470. } else {
  3471. FatEntry = *FatBuffer;
  3472. FatBuffer += 1;
  3473. }
  3474. }
  3475. if (CurrentRun == UnknownClusters) {
  3476. CurrentRun = (FatEntry == FAT_CLUSTER_AVAILABLE) ?
  3477. FreeClusters : AllocatedClusters;
  3478. }
  3479. //
  3480. // Are we switching from a free run to an allocated run?
  3481. //
  3482. if (CurrentRun == FreeClusters &&
  3483. FatEntry != FAT_CLUSTER_AVAILABLE) {
  3484. ClustersThisRun = FatIndex - StartIndexOfThisRun;
  3485. if (FreeClusterCount) {
  3486. *FreeClusterCount += ClustersThisRun;
  3487. CurrentWindow->ClustersFree += ClustersThisRun;
  3488. }
  3489. if (BitMap) {
  3490. RtlClearBits( BitMap,
  3491. StartIndexOfThisRun - StartIndex,
  3492. ClustersThisRun );
  3493. }
  3494. CurrentRun = AllocatedClusters;
  3495. StartIndexOfThisRun = FatIndex;
  3496. }
  3497. //
  3498. // Are we switching from an allocated run to a free run?
  3499. //
  3500. if (CurrentRun == AllocatedClusters &&
  3501. FatEntry == FAT_CLUSTER_AVAILABLE) {
  3502. ClustersThisRun = FatIndex - StartIndexOfThisRun;
  3503. if (BitMap) {
  3504. RtlSetBits( BitMap,
  3505. StartIndexOfThisRun - StartIndex,
  3506. ClustersThisRun );
  3507. }
  3508. CurrentRun = FreeClusters;
  3509. StartIndexOfThisRun = FatIndex;
  3510. }
  3511. }
  3512. //
  3513. // Now we have to record the final run we encountered
  3514. //
  3515. ClustersThisRun = FatIndex - StartIndexOfThisRun;
  3516. if (CurrentRun == FreeClusters) {
  3517. if (FreeClusterCount) {
  3518. *FreeClusterCount += ClustersThisRun;
  3519. CurrentWindow->ClustersFree += ClustersThisRun;
  3520. }
  3521. if (BitMap) {
  3522. RtlClearBits( BitMap,
  3523. StartIndexOfThisRun - StartIndex,
  3524. ClustersThisRun );
  3525. }
  3526. } else {
  3527. if (BitMap) {
  3528. RtlSetBits( BitMap,
  3529. StartIndexOfThisRun - StartIndex,
  3530. ClustersThisRun );
  3531. }
  3532. }
  3533. //
  3534. // And finish the last window if we are in setup.
  3535. //
  3536. if (SetupWindows) {
  3537. CurrentWindow->LastCluster = FatIndex - 1;
  3538. }
  3539. //
  3540. // Now switch the active window if required. We've succesfully gotten everything
  3541. // nailed down.
  3542. //
  3543. // If we were tracking the free cluster count, this means we should update the
  3544. // window. This is the case of FAT12/16 initialization.
  3545. //
  3546. if (SwitchToWindow) {
  3547. if (Vcb->FreeClusterBitMap.Buffer) {
  3548. ExFreePool( Vcb->FreeClusterBitMap.Buffer );
  3549. }
  3550. RtlInitializeBitMap( &Vcb->FreeClusterBitMap,
  3551. NewBitMapBuffer,
  3552. EndIndex - StartIndex + 1 );
  3553. NewBitMapBuffer = NULL;
  3554. Vcb->CurrentWindow = SwitchToWindow;
  3555. Vcb->ClusterHint = -1;
  3556. if (FreeClusterCount) {
  3557. ASSERT( !SetupWindows );
  3558. ASSERT( FatIndexBitSize != 32 );
  3559. Vcb->CurrentWindow->ClustersFree = *FreeClusterCount;
  3560. }
  3561. }
  3562. //
  3563. // Make sure plausible things occured ...
  3564. //
  3565. if (!SetupWindows && BitMapBuffer == NULL) {
  3566. ASSERT_CURRENT_WINDOW_GOOD( Vcb );
  3567. }
  3568. ASSERT(Vcb->AllocationSupport.NumberOfFreeClusters <= Vcb->AllocationSupport.NumberOfClusters);
  3569. } finally {
  3570. //
  3571. // Unpin the last bcb and drop the temporary bitmap buffer if it exists.
  3572. //
  3573. FatUnpinBcb( IrpContext, Bcb);
  3574. if (NewBitMapBuffer) {
  3575. ExFreePool( NewBitMapBuffer );
  3576. }
  3577. }
  3578. }