Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3402 lines
95 KiB

  1. /*++
  2. Copyright (c) 1990 Microsoft Corporation
  3. Module Name:
  4. AllocSup.c
  5. Abstract:
  6. This module implements the Allocation support routines for Rx.
  7. Author:
  8. DavidGoebel [DavidGoe] 31-Oct-90
  9. Revision History:
  10. DavidGoebel [DavidGoe] 31-Oct-90
  11. Add unwinding support. Some steps had to be reordered, and whether
  12. operations cpuld fail carefully considered. In particular, attention
  13. was paid to to the order of Mcb operations (see note below).
  14. ##### ## # # #### ###### #####
  15. # # # # ## # # # # # #
  16. # # # # # # # # ##### # #
  17. # # ###### # # # # ### # #####
  18. # # # # # ## # # # # #
  19. ##### # # # # #### ###### # #
  20. ______________________________________________
  21. ++++++++++++++++++++++++++++++++++++++++++++++++++|
  22. | |
  23. | The unwinding aspects of this module depend on |
  24. | operational details of the Mcb package. Do not |
  25. | attempt to modify unwind procedures without |
  26. | thoughoughly understanding the innerworkings of |
  27. | the Mcb package. |
  28. | |
  29. ++++++++++++++++++++++++++++++++++++++++++++++++++|
  30. # # ## ##### # # # # # ####
  31. # # # # # # ## # # ## # # #
  32. # # # # # # # # # # # # # #
  33. # ## # ###### ##### # # # # # # # # ###
  34. ## ## # # # # # ## # # ## # #
  35. # # # # # # # # # # # ####
  36. ______________________________________________________
  37. --*/
  38. // ----------------------joejoe-----------found-------------#include "RxProcs.h"
  39. #include "precomp.h"
  40. #pragma hdrstop
  41. //
  42. // The Bug check file id for this module
  43. //
  44. #define BugCheckFileId (RDBSS_BUG_CHECK_ALLOCSUP)
  45. //
  46. // Local debug trace level
  47. //
  48. #define Dbg (DEBUG_TRACE_ALLOCSUP)
  49. //
  50. // Cluster/Index routines implemented in AllocSup.c
  51. //
  52. typedef enum _CLUSTER_TYPE {
  53. RxClusterAvailable,
  54. RxClusterReserved,
  55. RxClusterBad,
  56. RxClusterLast,
  57. RxClusterNext
  58. } CLUSTER_TYPE;
  59. //
  60. // This strucure is used by RxLookupRxEntry to remember a pinned page
  61. // of rx.
  62. //
  63. typedef struct _RDBSS_ENUMERATION_CONTEXT {
  64. VBO VboOfPinnedPage;
  65. PBCB Bcb;
  66. PVOID PinnedPage;
  67. } RDBSS_ENUMERATION_CONTEXT, *PRDBSS_ENUMERATION_CONTEXT;
  68. //
  69. // Local support routine prototypes
  70. //
  71. CLUSTER_TYPE
  72. RxInterpretClusterType (
  73. IN PVCB Vcb,
  74. IN RDBSS_ENTRY Entry
  75. );
  76. VOID
  77. RxLookupRxEntry(
  78. IN PRX_CONTEXT RxContext,
  79. IN PVCB Vcb,
  80. IN ULONG RxIndex,
  81. IN OUT PRDBSS_ENTRY RxEntry,
  82. IN OUT PRDBSS_ENUMERATION_CONTEXT Context
  83. );
  84. VOID
  85. RxSetRxEntry(
  86. IN PRX_CONTEXT RxContext,
  87. IN PVCB Vcb,
  88. IN ULONG RxIndex,
  89. IN RDBSS_ENTRY RxEntry
  90. );
  91. VOID
  92. RxSetRxRun(
  93. IN PRX_CONTEXT RxContext,
  94. IN PVCB Vcb,
  95. IN ULONG StartingRxIndex,
  96. IN ULONG ClusterCount,
  97. IN BOOLEAN ChainTogether
  98. );
  99. UCHAR
  100. RxLogOf(
  101. IN ULONG Value
  102. );
  103. //
  104. // The following macros provide a convenient way of hiding the details
  105. // of bitmap allocation schemes.
  106. //
  107. //
  108. // VOID
  109. // RxLockFreeClusterBitMap (
  110. // IN PVCB Vcb
  111. // );
  112. //
  113. #define RxLockFreeClusterBitMap(VCB) { \
  114. RXSTATUS Status; \
  115. Status = KeWaitForSingleObject( &(VCB)->FreeClusterBitMapEvent, \
  116. Executive, \
  117. KernelMode, \
  118. FALSE, \
  119. (PLARGE_INTEGER) NULL ); \
  120. ASSERT( NT_SUCCESS( Status ) ); \
  121. }
  122. //
  123. // VOID
  124. // RxUnlockFreeClusterBitMap (
  125. // IN PVCB Vcb
  126. // );
  127. //
  128. #define RxUnlockFreeClusterBitMap(VCB) { \
  129. ULONG PreviousState; \
  130. PreviousState = KeSetEvent( &(VCB)->FreeClusterBitMapEvent, 0, FALSE ); \
  131. ASSERT( PreviousState == 0 ); \
  132. }
  133. //
  134. // BOOLEAN
  135. // RxIsClusterFree (
  136. // IN PRX_CONTEXT RxContext,
  137. // IN PVCB Vcb,
  138. // IN ULONG RxIndex
  139. // );
  140. //
  141. #define RxIsClusterFree(RXCONTEXT,VCB,RDBSS_INDEX) \
  142. \
  143. (RtlCheckBit(&(VCB)->FreeClusterBitMap,(RDBSS_INDEX)) == 0)
  144. //
  145. // BOOLEAN
  146. // RxIsClusterAllocated (
  147. // IN PRX_CONTEXT RxContext,
  148. // IN PVCB Vcb,
  149. // IN ULONG RxIndex
  150. // );
  151. //
  152. #define RxIsClusterAllocated(RXCONTEXT,VCB,RDBSS_INDEX) \
  153. \
  154. (RtlCheckBit(&(VCB)->FreeClusterBitMap,(RDBSS_INDEX)) != 0)
  155. //
  156. // VOID
  157. // RxFreeClusters (
  158. // IN PRX_CONTEXT RxContext,
  159. // IN PVCB Vcb,
  160. // IN ULONG RxIndex,
  161. // IN ULONG ClusterCount
  162. // );
  163. //
  164. #ifdef DOUBLE_SPACE_WRITE
  165. #define RxFreeClusters(RXCONTEXT,VCB,RDBSS_INDEX,CLUSTER_COUNT) { \
  166. \
  167. ASSERTMSG("RxFreeClusters ", RtlCheckBit( &(VCB)->FreeClusterBitMap, 0 ) == 1 ); \
  168. ASSERTMSG("RxFreeClusters ", RtlCheckBit( &(VCB)->FreeClusterBitMap, 1 ) == 1 ); \
  169. \
  170. DebugTrace( 0, Dbg, "Free clusters (Index<<16 | Count) (%8lx)\n", \
  171. (RDBSS_INDEX)<<16 | (CLUSTER_COUNT)); \
  172. if ((CLUSTER_COUNT) == 1) { \
  173. RxSetRxEntry((RXCONTEXT),(VCB),(RDBSS_INDEX),RDBSS_CLUSTER_AVAILABLE); \
  174. } else { \
  175. RxSetRxRun((RXCONTEXT),(VCB),(RDBSS_INDEX),(CLUSTER_COUNT),FALSE); \
  176. } \
  177. if ((VCB)->Dscb != NULL) { \
  178. RxDblsDeallocateClusters((RXCONTEXT),(VCB)->Dscb,(RDBSS_INDEX),(CLUSTER_COUNT)); \
  179. } \
  180. }
  181. #else
  182. #define RxFreeClusters(RXCONTEXT,VCB,RDBSS_INDEX,CLUSTER_COUNT) { \
  183. \
  184. ASSERTMSG("RxFreeClusters ", RtlCheckBit( &(VCB)->FreeClusterBitMap, 0 ) == 1 ); \
  185. ASSERTMSG("RxFreeClusters ", RtlCheckBit( &(VCB)->FreeClusterBitMap, 1 ) == 1 ); \
  186. \
  187. DebugTrace( 0, Dbg, "Free clusters (Index<<16 | Count) (%8lx)\n", \
  188. (RDBSS_INDEX)<<16 | (CLUSTER_COUNT)); \
  189. if ((CLUSTER_COUNT) == 1) { \
  190. RxSetRxEntry((RXCONTEXT),(VCB),(RDBSS_INDEX),RDBSS_CLUSTER_AVAILABLE); \
  191. } else { \
  192. RxSetRxRun((RXCONTEXT),(VCB),(RDBSS_INDEX),(CLUSTER_COUNT),FALSE); \
  193. } \
  194. }
  195. #endif // DOUBLE_SPACE_WRITE
  196. //
  197. // VOID
  198. // RxAllocateClusters (
  199. // IN PRX_CONTEXT RxContext,
  200. // IN PVCB Vcb,
  201. // IN ULONG RxIndex,
  202. // IN ULONG ClusterCount
  203. // );
  204. //
  205. #define RxAllocateClusters(RXCONTEXT,VCB,RDBSS_INDEX,CLUSTER_COUNT) { \
  206. \
  207. ASSERTMSG("RxFreeClusters ", RtlCheckBit( &(VCB)->FreeClusterBitMap, 0 ) == 1 ); \
  208. ASSERTMSG("RxFreeClusters ", RtlCheckBit( &(VCB)->FreeClusterBitMap, 1 ) == 1 ); \
  209. \
  210. DebugTrace( 0, Dbg, "Allocate clusters (Index<<16 | Count) (%8lx)\n", \
  211. (RDBSS_INDEX)<<16 | (CLUSTER_COUNT)); \
  212. if ((CLUSTER_COUNT) == 1) { \
  213. RxSetRxEntry((RXCONTEXT),(VCB),(RDBSS_INDEX),RDBSS_CLUSTER_LAST); \
  214. } else { \
  215. RxSetRxRun((RXCONTEXT),(VCB),(RDBSS_INDEX),(CLUSTER_COUNT),TRUE); \
  216. } \
  217. }
  218. //
  219. // VOID
  220. // RxUnreserveClusters (
  221. // IN PRX_CONTEXT RxContext,
  222. // IN PVCB Vcb,
  223. // IN ULONG RxIndex,
  224. // IN ULONG ClusterCount
  225. // );
  226. //
  227. #define RxUnreserveClusters(RXCONTEXT,VCB,RDBSS_INDEX,CLUSTER_COUNT) { \
  228. \
  229. ASSERTMSG("RxFreeClusters ", RtlCheckBit( &(VCB)->FreeClusterBitMap, 0 ) == 1 ); \
  230. ASSERTMSG("RxFreeClusters ", RtlCheckBit( &(VCB)->FreeClusterBitMap, 1 ) == 1 ); \
  231. \
  232. RtlClearBits(&(VCB)->FreeClusterBitMap,(RDBSS_INDEX),(CLUSTER_COUNT)); \
  233. }
  234. //
  235. // VOID
  236. // RxReserveClusters (
  237. // IN PRX_CONTEXT RxContext,
  238. // IN PVCB Vcb,
  239. // IN ULONG RxIndex,
  240. // IN ULONG ClusterCount
  241. // );
  242. //
  243. #define RxReserveClusters(RXCONTEXT,VCB,RDBSS_INDEX,CLUSTER_COUNT) { \
  244. \
  245. ASSERTMSG("RxFreeClusters ", RtlCheckBit( &(VCB)->FreeClusterBitMap, 0 ) == 1 ); \
  246. ASSERTMSG("RxFreeClusters ", RtlCheckBit( &(VCB)->FreeClusterBitMap, 1 ) == 1 ); \
  247. \
  248. RtlSetBits(&(VCB)->FreeClusterBitMap,(RDBSS_INDEX),(CLUSTER_COUNT)); \
  249. }
  250. //
  251. // ULONG
  252. // RxFindFreeClusterRun (
  253. // IN PRX_CONTEXT RxContext,
  254. // IN PVCB Vcb,
  255. // IN ULONG ClusterCount,
  256. // IN PULONG AlternateClusterHint
  257. // );
  258. //
  259. #define RxFindFreeClusterRun(RXCONTEXT,VCB,CLUSTER_COUNT,CLUSTER_HINT) \
  260. \
  261. RtlFindClearBits( &(VCB)->FreeClusterBitMap, \
  262. (CLUSTER_COUNT), \
  263. ((CLUSTER_HINT) != 0)?(CLUSTER_HINT):(VCB)->ClusterHint )
  264. //
  265. // ULONG
  266. // RxLongestFreeClusterRun (
  267. // IN PRX_CONTEXT RxContext,
  268. // IN PVCB Vcb,
  269. // IN PULONG RxIndex,
  270. // );
  271. //
  272. #define RxLongestFreeClusterRun(RXCONTEXT,VCB,RDBSS_INDEX) \
  273. \
  274. RtlFindLongestRunClear(&(VCB)->FreeClusterBitMap,(RDBSS_INDEX))
  275. #if DBG
  276. extern KSPIN_LOCK VWRSpinLock;
  277. #endif
  278. #ifdef ALLOC_PRAGMA
  279. #pragma alloc_text(PAGE, RxLookupFileAllocation)
  280. #pragma alloc_text(PAGE, RxAddFileAllocation)
  281. #pragma alloc_text(PAGE, RxAllocateDiskSpace)
  282. #pragma alloc_text(PAGE, RxDeallocateDiskSpace)
  283. #pragma alloc_text(PAGE, RxInterpretClusterType)
  284. #pragma alloc_text(PAGE, RxLogOf)
  285. #pragma alloc_text(PAGE, RxLookupRxEntry)
  286. #pragma alloc_text(PAGE, RxLookupFileAllocationSize)
  287. #pragma alloc_text(PAGE, RxMergeAllocation)
  288. #pragma alloc_text(PAGE, RxSetRxEntry)
  289. #pragma alloc_text(PAGE, RxSetRxRun)
  290. #pragma alloc_text(PAGE, RxSetupAllocationSupport)
  291. #pragma alloc_text(PAGE, RxSplitAllocation)
  292. #pragma alloc_text(PAGE, RxTearDownAllocationSupport)
  293. #pragma alloc_text(PAGE, RxTruncateFileAllocation)
  294. #endif
  295. VOID
  296. RxSetupAllocationSupport (
  297. IN PRX_CONTEXT RxContext,
  298. IN PVCB Vcb
  299. )
  300. /*++
  301. Routine Description:
  302. This routine fills in the Allocation Support structure in the Vcb.
  303. Most entries are computed using rx.h macros supplied with data from
  304. the Bios Parameter Block. The free cluster count, however, requires
  305. going to the Rx and actually counting free sectors. At the same time
  306. the free cluster bit map is initalized.
  307. Arguments:
  308. Vcb - Supplies the Vcb to fill in.
  309. --*/
  310. {
  311. ULONG BitMapSize;
  312. PVOID BitMapBuffer;
  313. PBCB (*SavedBcbs)[2] = NULL;
  314. PBCB Bcbs[2][2];
  315. DebugTrace(+1, Dbg, "RxSetupAllocationSupport\n", 0);
  316. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  317. //
  318. // Compute a number of fields for Vcb.AllocationSupport
  319. //
  320. Vcb->AllocationSupport.RootDirectoryLbo = RxRootDirectoryLbo( &Vcb->Bpb );
  321. Vcb->AllocationSupport.RootDirectorySize = RxRootDirectorySize( &Vcb->Bpb );
  322. Vcb->AllocationSupport.FileAreaLbo = RxFileAreaLbo( &Vcb->Bpb );
  323. Vcb->AllocationSupport.NumberOfClusters = RxNumberOfClusters( &Vcb->Bpb );
  324. Vcb->AllocationSupport.RxIndexBitSize = RxIndexBitSize( &Vcb->Bpb );
  325. Vcb->AllocationSupport.LogOfBytesPerSector = RxLogOf(Vcb->Bpb.BytesPerSector);
  326. Vcb->AllocationSupport.LogOfBytesPerCluster = RxLogOf(
  327. RxBytesPerCluster( &Vcb->Bpb ));
  328. Vcb->AllocationSupport.NumberOfFreeClusters = 0;
  329. //
  330. // Deal with a bug in DOS 5 format, if the Rx is not big enough to
  331. // describe all the clusters on the disk, reduce this number.
  332. //
  333. {
  334. ULONG ClustersDescribableByRx;
  335. ClustersDescribableByRx = ( (Vcb->Bpb.SectorsPerRx *
  336. Vcb->Bpb.BytesPerSector * 8)
  337. / RxIndexBitSize(&Vcb->Bpb) ) - 2;
  338. if (Vcb->AllocationSupport.NumberOfClusters > ClustersDescribableByRx) {
  339. KdPrint(("FASTRDBSS: Mounting wierd volume!\n"));
  340. Vcb->AllocationSupport.NumberOfClusters = ClustersDescribableByRx;
  341. }
  342. }
  343. //
  344. // Extend the virtual volume file to include the Rx
  345. //
  346. {
  347. CC_FILE_SIZES FileSizes;
  348. FileSizes.AllocationSize.QuadPart =
  349. FileSizes.FileSize.QuadPart =
  350. RxReservedBytes( &Vcb->Bpb ) + RxBytesPerRx( &Vcb->Bpb );
  351. FileSizes.ValidDataLength = RxMaxLarge;
  352. if ( Vcb->VirtualVolumeFile->PrivateCacheMap == NULL ) {
  353. CcInitializeCacheMap( Vcb->VirtualVolumeFile,
  354. &FileSizes,
  355. TRUE,
  356. &RxData.CacheManagerNoOpCallbacks,
  357. Vcb );
  358. } else {
  359. CcSetFileSizes( Vcb->VirtualVolumeFile, &FileSizes );
  360. }
  361. }
  362. try {
  363. //
  364. // Initialize the free cluster BitMap. The number of bits is the
  365. // number of clusters plus the two reserved entries. Note that
  366. // FsRtlAllocatePool will always allocate me something longword alligned.
  367. //
  368. BitMapSize = Vcb->AllocationSupport.NumberOfClusters + 2;
  369. BitMapBuffer = FsRtlAllocatePool( PagedPool, (BitMapSize + 7) / 8 );
  370. RtlInitializeBitMap( &Vcb->FreeClusterBitMap,
  371. (PULONG)BitMapBuffer,
  372. BitMapSize );
  373. //
  374. // Read the rx and count up free clusters.
  375. //
  376. // Rather than just reading rx entries one at a time, a faster
  377. // approach is used. The entire Rx is read in and and we read
  378. // through it, keeping track of runs of free and runs of allocated
  379. // clusters. When we switch from free to aloocated or visa versa,
  380. // the previous run is marked in the bit map.
  381. //
  382. {
  383. ULONG Page;
  384. ULONG RxIndex;
  385. RDBSS_ENTRY RxEntry;
  386. PRDBSS_ENTRY RxBuffer;
  387. ULONG ClustersThisRun;
  388. ULONG RxIndexBitSize;
  389. ULONG StartIndexOfThisRun;
  390. PULONG FreeClusterCount;
  391. enum RunType {
  392. FreeClusters,
  393. AllocatedClusters
  394. } CurrentRun;
  395. //
  396. // Keep local copies of these variables around for speed.
  397. //
  398. FreeClusterCount = &Vcb->AllocationSupport.NumberOfFreeClusters;
  399. RxIndexBitSize = Vcb->AllocationSupport.RxIndexBitSize;
  400. //
  401. // Read in one page of rx at a time. We cannot read in the
  402. // all of the rx we need because of cache manager limitations.
  403. //
  404. // SavedBcb was initialized to be able to hold the largest
  405. // possible number of pages in a rx plus and extra one to
  406. // accomadate the boot sector, plus one more to make sure there
  407. // is enough room for the RtlZeroMemory below that needs the mark
  408. // the first Bcb after all the ones we will use as an end marker.
  409. //
  410. if ( RxIndexBitSize == 16 ) {
  411. ULONG NumberOfPages;
  412. ULONG Offset;
  413. NumberOfPages = ( RxReservedBytes(&Vcb->Bpb) +
  414. RxBytesPerRx(&Vcb->Bpb) +
  415. (PAGE_SIZE - 1) ) / PAGE_SIZE;
  416. //
  417. // Figure out how much memory we will need for the Bcb
  418. // buffer and fill it in.
  419. //
  420. SavedBcbs = FsRtlAllocatePool( PagedPool,
  421. (NumberOfPages + 1) * sizeof(PBCB) * 2 );
  422. RtlZeroMemory( &SavedBcbs[0][0], (NumberOfPages + 1) * sizeof(PBCB) * 2 );
  423. for ( Page = 0, Offset = 0;
  424. Page < NumberOfPages;
  425. Page++, Offset += PAGE_SIZE ) {
  426. RxReadVolumeFile( RxContext,
  427. Vcb,
  428. Offset,
  429. PAGE_SIZE,
  430. &SavedBcbs[Page][0],
  431. (PVOID *)&SavedBcbs[Page][1] );
  432. }
  433. Page = RxReservedBytes(&Vcb->Bpb) / PAGE_SIZE;
  434. RxBuffer = (PRDBSS_ENTRY)((PUCHAR)SavedBcbs[Page][1] +
  435. RxReservedBytes(&Vcb->Bpb) % PAGE_SIZE) + 2;
  436. } else {
  437. //
  438. // We read in the entire rx in the 12 bit case.
  439. //
  440. SavedBcbs = Bcbs;
  441. RtlZeroMemory( &SavedBcbs[0][0], 2 * sizeof(PBCB) * 2);
  442. RxReadVolumeFile( RxContext,
  443. Vcb,
  444. RxReservedBytes( &Vcb->Bpb ),
  445. RxBytesPerRx( &Vcb->Bpb ),
  446. &SavedBcbs[0][0],
  447. (PVOID *)&RxBuffer );
  448. }
  449. //
  450. // For a rx, we know the first two clusters are allways
  451. // reserved. So start an allocated run.
  452. //
  453. CurrentRun = AllocatedClusters;
  454. StartIndexOfThisRun = 0;
  455. for (RxIndex = 2; RxIndex < BitMapSize; RxIndex++) {
  456. if (RxIndexBitSize == 12) {
  457. RxLookup12BitEntry(RxBuffer, RxIndex, &RxEntry);
  458. } else {
  459. //
  460. // If we just stepped onto a new page, grab a new pointer.
  461. //
  462. if (((ULONG)RxBuffer & (PAGE_SIZE - 1)) == 0) {
  463. Page++;
  464. RxBuffer = (PRDBSS_ENTRY)SavedBcbs[Page][1];
  465. }
  466. RxEntry = *RxBuffer;
  467. RxBuffer += 1;
  468. }
  469. //
  470. // Are we switching from a free run to an allocated run?
  471. //
  472. if ((CurrentRun == FreeClusters) &&
  473. (RxEntry != RDBSS_CLUSTER_AVAILABLE)) {
  474. ClustersThisRun = RxIndex - StartIndexOfThisRun;
  475. *FreeClusterCount += ClustersThisRun;
  476. RtlClearBits( &Vcb->FreeClusterBitMap,
  477. StartIndexOfThisRun,
  478. ClustersThisRun );
  479. CurrentRun = AllocatedClusters;
  480. StartIndexOfThisRun = RxIndex;
  481. }
  482. //
  483. // Are we switching from an allocated run to a free run?
  484. //
  485. if ((CurrentRun == AllocatedClusters) &&
  486. (RxEntry == RDBSS_CLUSTER_AVAILABLE)) {
  487. ClustersThisRun = RxIndex - StartIndexOfThisRun;
  488. RtlSetBits( &Vcb->FreeClusterBitMap,
  489. StartIndexOfThisRun,
  490. ClustersThisRun );
  491. CurrentRun = FreeClusters;
  492. StartIndexOfThisRun = RxIndex;
  493. }
  494. }
  495. //
  496. // Now we have to record the final run we encoutered
  497. //
  498. ClustersThisRun = RxIndex - StartIndexOfThisRun;
  499. if ( CurrentRun == FreeClusters ) {
  500. *FreeClusterCount += ClustersThisRun;
  501. RtlClearBits( &Vcb->FreeClusterBitMap,
  502. StartIndexOfThisRun,
  503. ClustersThisRun );
  504. } else {
  505. RtlSetBits( &Vcb->FreeClusterBitMap,
  506. StartIndexOfThisRun,
  507. ClustersThisRun );
  508. }
  509. }
  510. ASSERT( RtlCheckBit( &Vcb->FreeClusterBitMap, 0 ) == 1 );
  511. ASSERT( RtlCheckBit( &Vcb->FreeClusterBitMap, 1 ) == 1 );
  512. } finally {
  513. ULONG i = 0;
  514. DebugUnwind( RxSetupAllocationSupport );
  515. //
  516. // If we hit an exception, back out.
  517. //
  518. if (AbnormalTermination()) {
  519. RxTearDownAllocationSupport( RxContext, Vcb );
  520. }
  521. //
  522. // We are done reading the Rx, so unpin the Bcbs.
  523. //
  524. if (SavedBcbs != NULL) {
  525. while ( SavedBcbs[i][0] != NULL ) {
  526. RxUnpinBcb( RxContext, SavedBcbs[i][0] );
  527. i += 1;
  528. }
  529. if (SavedBcbs != Bcbs) {
  530. ExFreePool( SavedBcbs );
  531. }
  532. }
  533. DebugTrace(-1, Dbg, "RxSetupAllocationSupport -> (VOID)\n", 0);
  534. }
  535. return;
  536. }
  537. VOID
  538. RxTearDownAllocationSupport (
  539. IN PRX_CONTEXT RxContext,
  540. IN PVCB Vcb
  541. )
  542. /*++
  543. Routine Description:
  544. This routine prepares the volume for closing. Specifically, we must
  545. release the free rx bit map buffer, and uninitialize the dirty rx
  546. Mcb.
  547. Arguments:
  548. Vcb - Supplies the Vcb to fill in.
  549. Return Value:
  550. VOID
  551. --*/
  552. {
  553. DebugTrace(+1, Dbg, "RxTearDownAllocationSupport\n", 0);
  554. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  555. //
  556. // Free the memory associated with the free cluster bitmap.
  557. //
  558. if ( Vcb->FreeClusterBitMap.Buffer != NULL ) {
  559. ExFreePool( Vcb->FreeClusterBitMap.Buffer );
  560. //
  561. // NULL this field as an flag.
  562. //
  563. Vcb->FreeClusterBitMap.Buffer = NULL;
  564. }
  565. //
  566. // And remove all the runs in the dirty rx Mcb
  567. //
  568. FsRtlRemoveMcbEntry( &Vcb->DirtyRxMcb, 0, 0xFFFFFFFF );
  569. DebugTrace(-1, Dbg, "RxTearDownAllocationSupport -> (VOID)\n", 0);
  570. UNREFERENCED_PARAMETER( RxContext );
  571. return;
  572. }
  573. VOID
  574. RxLookupFileAllocation (
  575. IN PRX_CONTEXT RxContext,
  576. IN PFCB FcbOrDcb,
  577. IN VBO Vbo,
  578. OUT PLBO Lbo,
  579. OUT PULONG ByteCount,
  580. OUT PBOOLEAN Allocated,
  581. OUT PULONG Index
  582. )
  583. /*++
  584. Routine Description:
  585. This routine looks up the existing mapping of VBO to LBO for a
  586. file/directory. The information it queries is either stored in the
  587. mcb field of the fcb/dcb or it is stored on in the rx table and
  588. needs to be retrieved and decoded, and updated in the mcb.
  589. Arguments:
  590. FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being queried
  591. Vbo - Supplies the VBO whose LBO we want returned
  592. Lbo - Receives the LBO corresponding to the input Vbo if one exists
  593. ByteCount - Receives the number of bytes within the run the run
  594. that correpond between the input vbo and output lbo.
  595. Allocated - Receives TRUE if the Vbo does have a corresponding Lbo
  596. and FALSE otherwise.
  597. Index - Receives the Index of the run
  598. --*/
  599. {
  600. VBO CurrentVbo;
  601. LBO CurrentLbo;
  602. LBO PriorLbo;
  603. VBO FirstVboOfCurrentRun;
  604. LBO FirstLboOfCurrentRun;
  605. BOOLEAN LastCluster;
  606. ULONG Runs;
  607. PVCB Vcb;
  608. RDBSS_ENTRY RxEntry;
  609. ULONG BytesPerCluster;
  610. ULONG BytesOnVolume;
  611. RDBSS_ENUMERATION_CONTEXT Context;
  612. DebugTrace(+1, Dbg, "RxLookupFileAllocation\n", 0);
  613. DebugTrace( 0, Dbg, " FcbOrDcb = %8lx\n", FcbOrDcb);
  614. DebugTrace( 0, Dbg, " Vbo = %8lx\n", Vbo);
  615. DebugTrace( 0, Dbg, " Lbo = %8lx\n", Lbo);
  616. DebugTrace( 0, Dbg, " ByteCount = %8lx\n", ByteCount);
  617. DebugTrace( 0, Dbg, " Allocated = %8lx\n", Allocated);
  618. Context.Bcb = NULL;
  619. //
  620. // Check the trivial case that the mapping is already in our
  621. // Mcb.
  622. //
  623. if ( FsRtlLookupMcbEntry(&FcbOrDcb->Mcb, Vbo, Lbo, ByteCount, Index) ) {
  624. *Allocated = TRUE;
  625. DebugTrace( 0, Dbg, "Found run in Mcb.\n", 0);
  626. DebugTrace(-1, Dbg, "RxLookupFileAllocation -> (VOID)\n", 0);
  627. return;
  628. }
  629. //
  630. // Initialize the Vcb, the cluster size, LastCluster, and
  631. // FirstLboOfCurrentRun (to be used as an indication of the first
  632. // itteration through the following while loop).
  633. //
  634. Vcb = FcbOrDcb->Vcb;
  635. BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
  636. BytesOnVolume = Vcb->AllocationSupport.NumberOfClusters * BytesPerCluster;
  637. LastCluster = FALSE;
  638. FirstLboOfCurrentRun = 0;
  639. //
  640. // Discard the case that the request extends beyond the end of
  641. // allocation. Note that if the allocation size if not known
  642. // AllocationSize is set to 0xffffffff.
  643. //
  644. if ( Vbo >= FcbOrDcb->Header.AllocationSize.LowPart ) {
  645. *Allocated = FALSE;
  646. DebugTrace( 0, Dbg, "Vbo beyond end of file.\n", 0);
  647. DebugTrace(-1, Dbg, "RxLookupFileAllocation -> (VOID)\n", 0);
  648. return;
  649. }
  650. //
  651. // The Vbo is beyond the last Mcb entry. So we adjust Current Vbo/Lbo
  652. // and RxEntry to describe the beginning of the last entry in the Mcb.
  653. // This is used as initialization for the following loop.
  654. //
  655. // If the Mcb was empty, we start at the beginning of the file with
  656. // CurrentVbo set to 0 to indicate a new run.
  657. //
  658. if (FsRtlLookupLastMcbEntry(&FcbOrDcb->Mcb, &CurrentVbo, &CurrentLbo)) {
  659. DebugTrace( 0, Dbg, "Current Mcb size = %8lx.\n", CurrentVbo + 1);
  660. CurrentVbo -= (BytesPerCluster - 1);
  661. CurrentLbo -= (BytesPerCluster - 1);
  662. Runs = FsRtlNumberOfRunsInMcb( &FcbOrDcb->Mcb );
  663. } else {
  664. DebugTrace( 0, Dbg, "Mcb empty.\n", 0);
  665. //
  666. // Check for an FcbOrDcb that has no allocation
  667. //
  668. if (FcbOrDcb->FirstClusterOfFile == 0) {
  669. *Allocated = FALSE;
  670. DebugTrace( 0, Dbg, "File has no allocation.\n", 0);
  671. DebugTrace(-1, Dbg, "RxLookupFileAllocation -> (VOID)\n", 0);
  672. return;
  673. } else {
  674. CurrentVbo = 0;
  675. CurrentLbo = RxGetLboFromIndex( Vcb, FcbOrDcb->FirstClusterOfFile );
  676. FirstVboOfCurrentRun = CurrentVbo;
  677. FirstLboOfCurrentRun = CurrentLbo;
  678. Runs = 0;
  679. DebugTrace( 0, Dbg, "First Lbo of file = %8lx\n", CurrentLbo);
  680. }
  681. }
  682. //
  683. // Now we know that we are looking up a valid Vbo, but it is
  684. // not in the Mcb, which is a monotonically increasing list of
  685. // Vbo's. Thus we have to go to the Rx, and update
  686. // the Mcb as we go. We use a try-finally to unpin the page
  687. // of rx hanging around. Also we mark *Allocated = FALSE, so that
  688. // the caller wont try to use the data if we hit an exception.
  689. //
  690. *Allocated = FALSE;
  691. try {
  692. RxEntry = (RDBSS_ENTRY)RxGetIndexFromLbo( Vcb, CurrentLbo );
  693. //
  694. // ASSERT that CurrentVbo and CurrentLbo are now cluster alligned.
  695. // The assumption here, is that only whole clusters of Vbos and Lbos
  696. // are mapped in the Mcb.
  697. //
  698. ASSERT( ((CurrentLbo - Vcb->AllocationSupport.FileAreaLbo)
  699. % BytesPerCluster == 0) &&
  700. (CurrentVbo % BytesPerCluster == 0) );
  701. //
  702. // Starting from the first Vbo after the last Mcb entry, scan through
  703. // the Rx looking for our Vbo. We continue through the Rx until we
  704. // hit a noncontiguity beyond the desired Vbo, or the last cluster.
  705. //
  706. while ( !LastCluster ) {
  707. //
  708. // Get the next rx entry, and update our Current variables.
  709. //
  710. RxLookupRxEntry( RxContext, Vcb, RxEntry, &RxEntry, &Context );
  711. PriorLbo = CurrentLbo;
  712. CurrentLbo = RxGetLboFromIndex( Vcb, RxEntry );
  713. CurrentVbo += BytesPerCluster;
  714. switch ( RxInterpretClusterType( Vcb, RxEntry )) {
  715. //
  716. // Check for a break in the Rx allocation chain.
  717. //
  718. case RxClusterAvailable:
  719. case RxClusterReserved:
  720. case RxClusterBad:
  721. DebugTrace( 0, Dbg, "Break in allocation chain, entry = %d\n", RxEntry);
  722. DebugTrace(-1, Dbg, "RxLookupFileAllocation -> Rx Corrupt. Raise Status.\n", 0);
  723. RxPopUpFileCorrupt( RxContext, FcbOrDcb );
  724. RxRaiseStatus( RxContext, RxStatus(FILE_CORRUPT_ERROR) );
  725. break;
  726. //
  727. // If this is the last cluster, we must update the Mcb and
  728. // exit the loop.
  729. //
  730. case RxClusterLast:
  731. //
  732. // Assert we know where the current run started. If the
  733. // Mcb was empty when we were called, thenFirstLboOfCurrentRun
  734. // was set to the start of the file. If the Mcb contained an
  735. // entry, then FirstLboOfCurrentRun was set on the first
  736. // itteration through the loop. Thus if FirstLboOfCurrentRun
  737. // is 0, then there was an Mcb entry and we are on our first
  738. // itteration, meaing that the last cluster in the Mcb was
  739. // really the last allocated cluster, but we checked Vbo
  740. // against AllocationSize, and found it OK, thus AllocationSize
  741. // must be too large.
  742. //
  743. // Note that, when we finally arrive here, CurrentVbo is actually
  744. // the first Vbo beyond the file allocation and CurrentLbo is
  745. // meaningless.
  746. //
  747. DebugTrace( 0, Dbg, "Read last cluster of file.\n", 0);
  748. LastCluster = TRUE;
  749. if (FirstLboOfCurrentRun != 0 ) {
  750. DebugTrace( 0, Dbg, "Adding a run to the Mcb.\n", 0);
  751. DebugTrace( 0, Dbg, " Vbo = %08lx.\n", FirstVboOfCurrentRun);
  752. DebugTrace( 0, Dbg, " Lbo = %08lx.\n", FirstLboOfCurrentRun);
  753. DebugTrace( 0, Dbg, " Length = %08lx.\n", CurrentVbo - FirstVboOfCurrentRun);
  754. (VOID)FsRtlAddMcbEntry( &FcbOrDcb->Mcb,
  755. FirstVboOfCurrentRun,
  756. FirstLboOfCurrentRun,
  757. CurrentVbo - FirstVboOfCurrentRun );
  758. Runs += 1;
  759. }
  760. //
  761. // Being at the end of allocation, make sure we have found
  762. // the Vbo. If we haven't, seeing as we checked VBO
  763. // against AllocationSize, the real disk allocation is less
  764. // than that of AllocationSize. This comes about when the
  765. // real allocation is not yet known, and AllocaitonSize
  766. // contains MAXULONG.
  767. //
  768. // KLUDGE! - If we were called by RxLookupFileAllocationSize
  769. // Vbo is set to MAXULONG - 1, and AllocationSize to MAXULONG.
  770. // Thus we merrily go along looking for a match that isn't
  771. // there, but in the meantime building an Mcb. If this is
  772. // the case, fill in AllocationSize and return.
  773. //
  774. if ( Vbo >= CurrentVbo ) {
  775. FcbOrDcb->Header.AllocationSize.QuadPart = CurrentVbo;
  776. *Allocated = FALSE;
  777. DebugTrace( 0, Dbg, "New file allocation size = %08lx.\n", CurrentVbo);
  778. try_return ( NOTHING );
  779. }
  780. break;
  781. //
  782. // This is a continuation in the chain. If the run has a
  783. // discontiguity at this point, update the Mcb, and if we are beyond
  784. // the desired Vbo, this is the end of the run, so set LastCluster
  785. // and exit the loop.
  786. //
  787. case RxClusterNext:
  788. //
  789. // Do a quick check here for cycles in that Rx that can
  790. // infinite loops here.
  791. //
  792. if ( CurrentVbo > BytesOnVolume ) {
  793. RxPopUpFileCorrupt( RxContext, FcbOrDcb );
  794. RxRaiseStatus( RxContext, RxStatus(FILE_CORRUPT_ERROR) );
  795. break;
  796. }
  797. if ( PriorLbo + BytesPerCluster != CurrentLbo ) {
  798. //
  799. // Note that on the first time through the loop
  800. // (FirstLboOfCurrentRun == 0), we don't add the
  801. // run to the Mcb since it curresponds to the last
  802. // run already stored in the Mcb.
  803. //
  804. if ( FirstLboOfCurrentRun != 0 ) {
  805. DebugTrace( 0, Dbg, "Adding a run to the Mcb.\n", 0);
  806. DebugTrace( 0, Dbg, " Vbo = %08lx.\n", FirstVboOfCurrentRun);
  807. DebugTrace( 0, Dbg, " Lbo = %08lx.\n", FirstLboOfCurrentRun);
  808. DebugTrace( 0, Dbg, " Length = %08lx.\n", CurrentVbo - FirstVboOfCurrentRun);
  809. FsRtlAddMcbEntry( &FcbOrDcb->Mcb,
  810. FirstVboOfCurrentRun,
  811. FirstLboOfCurrentRun,
  812. CurrentVbo - FirstVboOfCurrentRun );
  813. Runs += 1;
  814. }
  815. //
  816. // Since we are at a run boundry, with CurrentLbo and
  817. // CurrentVbo being the first cluster of the next run,
  818. // we see if the run we just added encompases the desired
  819. // Vbo, and if so exit. Otherwise we set up two new
  820. // First*boOfCurrentRun, and continue.
  821. //
  822. if (CurrentVbo > Vbo) {
  823. LastCluster = TRUE;
  824. } else {
  825. FirstVboOfCurrentRun = CurrentVbo;
  826. FirstLboOfCurrentRun = CurrentLbo;
  827. }
  828. }
  829. break;
  830. default:
  831. DebugTrace(0, Dbg, "Illegal Cluster Type.\n", RxEntry);
  832. RxBugCheck( 0, 0, 0 );
  833. break;
  834. } // switch()
  835. } // while()
  836. //
  837. // Load up the return parameters.
  838. //
  839. // On exit from the loop, Vbo still contains the desired Vbo, and
  840. // CurrentVbo is the first byte after the run that contained the
  841. // desired Vbo.
  842. //
  843. *Allocated = TRUE;
  844. *Lbo = FirstLboOfCurrentRun + (Vbo - FirstVboOfCurrentRun);
  845. *ByteCount = CurrentVbo - Vbo;
  846. if (ARGUMENT_PRESENT(Index)) {
  847. *Index = Runs - 1;
  848. }
  849. try_exit: NOTHING;
  850. } finally {
  851. DebugUnwind( RxLookupFileAllocation );
  852. //
  853. // We are done reading the Rx, so unpin the last page of rx
  854. // that is hanging around
  855. //
  856. RxUnpinBcb( RxContext, Context.Bcb );
  857. DebugTrace(-1, Dbg, "RxLookupFileAllocation -> (VOID)\n", 0);
  858. }
  859. return;
  860. }
  861. VOID
  862. RxAddFileAllocation (
  863. IN PRX_CONTEXT RxContext,
  864. IN PFCB FcbOrDcb,
  865. IN PFILE_OBJECT FileObject OPTIONAL,
  866. IN ULONG DesiredAllocationSize
  867. )
  868. /*++
  869. Routine Description:
  870. This routine adds additional allocation to the specified file/directory.
  871. Additional allocation is added by appending clusters to the file/directory.
  872. If the file already has a sufficient allocation then this procedure
  873. is effectively a noop.
  874. Arguments:
  875. FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified.
  876. This parameter must not specify the root dcb.
  877. FileObject - If supplied inform the cache manager of the change.
  878. DesiredAllocationSize - Supplies the minimum size, in bytes, that we want
  879. allocated to the file/directory.
  880. --*/
  881. {
  882. PVCB Vcb;
  883. DebugTrace(+1, Dbg, "RxAddFileAllocation\n", 0);
  884. DebugTrace( 0, Dbg, " FcbOrDcb = %8lx\n", FcbOrDcb);
  885. DebugTrace( 0, Dbg, " DesiredAllocationSize = %8lx\n", DesiredAllocationSize);
  886. //
  887. // If we haven't yet set the correct AllocationSize, do so.
  888. //
  889. if (FcbOrDcb->Header.AllocationSize.LowPart == 0xffffffff) {
  890. RxLookupFileAllocationSize( RxContext, FcbOrDcb );
  891. }
  892. //
  893. // Check for the benign case that the desired allocation is already
  894. // within the allocation size.
  895. //
  896. if (DesiredAllocationSize <= FcbOrDcb->Header.AllocationSize.LowPart) {
  897. DebugTrace(0, Dbg, "Desired size within current allocation.\n", 0);
  898. DebugTrace(-1, Dbg, "RxAddFileAllocation -> (VOID)\n", 0);
  899. return;
  900. }
  901. DebugTrace( 0, Dbg, "InitialAllocation = %08lx.\n", FcbOrDcb->Header.AllocationSize.LowPart);
  902. //
  903. // Get a chunk of disk space that will fullfill our needs. If there
  904. // was no initial allocation, start from the hint in the Vcb, otherwise
  905. // try to allocate from the cluster after the initial allocation.
  906. //
  907. // If there was no initial allocation to the file, we can just use the
  908. // Mcb in the FcbOrDcb, otherwise we have to use a new one, and merge
  909. // it to the one in the FcbOrDcb.
  910. //
  911. Vcb = FcbOrDcb->Vcb;
  912. if (FcbOrDcb->Header.AllocationSize.LowPart == 0) {
  913. PBCB Bcb = NULL;
  914. PDIRENT Dirent;
  915. LBO FirstLboOfFile;
  916. BOOLEAN UnwindWeAllocatedDiskSpace = FALSE;
  917. try {
  918. RxGetDirentFromFcbOrDcb( RxContext,
  919. FcbOrDcb,
  920. &Dirent,
  921. &Bcb );
  922. //
  923. // Set this dirty right now since this call can fail.
  924. //
  925. RxSetDirtyBcb( RxContext, Bcb, Vcb );
  926. RxAllocateDiskSpace( RxContext,
  927. Vcb,
  928. 0,
  929. &DesiredAllocationSize,
  930. &FcbOrDcb->Mcb );
  931. UnwindWeAllocatedDiskSpace = TRUE;
  932. //
  933. // We have to update the dirent and FcbOrDcb copies of
  934. // FirstClusterOfFile since before it was 0
  935. //
  936. FsRtlLookupMcbEntry( &FcbOrDcb->Mcb,
  937. 0,
  938. &FirstLboOfFile,
  939. (PULONG)NULL,
  940. NULL );
  941. DebugTrace( 0, Dbg, "First Lbo of file will be %08lx.\n", FirstLboOfFile );
  942. FcbOrDcb->FirstClusterOfFile = RxGetIndexFromLbo( Vcb, FirstLboOfFile );
  943. FcbOrDcb->Header.AllocationSize.QuadPart = DesiredAllocationSize;
  944. Dirent->FirstClusterOfFile = (RDBSS_ENTRY)FcbOrDcb->FirstClusterOfFile;
  945. //
  946. // Inform the cache manager to increase the section size
  947. //
  948. if ( ARGUMENT_PRESENT(FileObject) && CcIsFileCached(FileObject) ) {
  949. CcSetFileSizes( FileObject,
  950. (PCC_FILE_SIZES)&FcbOrDcb->Header.AllocationSize );
  951. }
  952. } finally {
  953. DebugUnwind( RxAddFileAllocation );
  954. if ( AbnormalTermination() && UnwindWeAllocatedDiskSpace ) {
  955. RxDeallocateDiskSpace( RxContext, Vcb, &FcbOrDcb->Mcb );
  956. }
  957. RxUnpinBcb( RxContext, Bcb );
  958. DebugTrace(-1, Dbg, "RxAddFileAllocation -> (VOID)\n", 0);
  959. }
  960. } else {
  961. MCB NewMcb;
  962. LBO LastAllocatedLbo;
  963. VBO DontCare;
  964. ULONG NewAllocation;
  965. BOOLEAN UnwindWeInitializedMcb = FALSE;
  966. BOOLEAN UnwindWeAllocatedDiskSpace = FALSE;
  967. try {
  968. //
  969. // Get the first cluster following the current allocation
  970. //
  971. FsRtlLookupLastMcbEntry( &FcbOrDcb->Mcb, &DontCare, &LastAllocatedLbo);
  972. //
  973. // Try to get some disk space starting from there
  974. //
  975. NewAllocation = DesiredAllocationSize - FcbOrDcb->Header.AllocationSize.LowPart;
  976. FsRtlInitializeMcb( &NewMcb, PagedPool );
  977. UnwindWeInitializedMcb = TRUE;
  978. RxAllocateDiskSpace( RxContext,
  979. Vcb,
  980. RxGetIndexFromLbo(Vcb, LastAllocatedLbo + 1),
  981. &NewAllocation,
  982. &NewMcb );
  983. UnwindWeAllocatedDiskSpace = TRUE;
  984. //
  985. // Tack the new Mcb onto the end of the FcbOrDcb one.
  986. //
  987. RxMergeAllocation( RxContext,
  988. Vcb,
  989. &FcbOrDcb->Mcb,
  990. &NewMcb );
  991. //
  992. // Now that we increased the allocation of the file, mark it in the
  993. // FcbOrDcb.
  994. //
  995. FcbOrDcb->Header.AllocationSize.LowPart += NewAllocation;
  996. //
  997. // Inform the cache manager to increase the section size
  998. //
  999. if ( ARGUMENT_PRESENT(FileObject) && CcIsFileCached(FileObject) ) {
  1000. CcSetFileSizes( FileObject,
  1001. (PCC_FILE_SIZES)&FcbOrDcb->Header.AllocationSize );
  1002. }
  1003. } finally {
  1004. DebugUnwind( RxAddFileAllocation );
  1005. //
  1006. // Detect the case where RxMergeAllocation failed, and
  1007. // Deallocate the disk space
  1008. //
  1009. if ( (UnwindWeAllocatedDiskSpace == TRUE) &&
  1010. (FcbOrDcb->Header.AllocationSize.LowPart < DesiredAllocationSize) ) {
  1011. RxDeallocateDiskSpace( RxContext, Vcb, &NewMcb );
  1012. }
  1013. if (UnwindWeInitializedMcb == TRUE) {
  1014. FsRtlUninitializeMcb( &NewMcb );
  1015. }
  1016. DebugTrace(-1, Dbg, "RxAddFileAllocation -> (VOID)\n", 0);
  1017. }
  1018. }
  1019. //
  1020. // Give FlushFileBuffer a clue here.
  1021. //
  1022. SetFlag(FcbOrDcb->FcbState, FCB_STATE_FLUSH_RDBSS);
  1023. return;
  1024. }
  1025. VOID
  1026. RxTruncateFileAllocation (
  1027. IN PRX_CONTEXT RxContext,
  1028. IN PFCB FcbOrDcb,
  1029. IN ULONG DesiredAllocationSize
  1030. )
  1031. /*++
  1032. Routine Description:
  1033. This routine truncates the allocation to the specified file/directory.
  1034. If the file is already smaller than the indicated size then this procedure
  1035. is effectively a noop.
  1036. Arguments:
  1037. FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified
  1038. This parameter must not specify the root dcb.
  1039. DesiredAllocationSize - Supplies the maximum size, in bytes, that we want
  1040. allocated to the file/directory. It is rounded
  1041. up to the nearest cluster.
  1042. Return Value:
  1043. VOID - TRUE if the operation completed and FALSE if it had to
  1044. block but could not.
  1045. --*/
  1046. {
  1047. PVCB Vcb;
  1048. PBCB Bcb = NULL;
  1049. MCB RemainingMcb;
  1050. ULONG BytesPerCluster;
  1051. PDIRENT Dirent = NULL;
  1052. ULONG UnwindInitialAllocationSize;
  1053. ULONG UnwindInitialFirstClusterOfFile;
  1054. BOOLEAN UnwindWeAllocatedMcb = FALSE;
  1055. DebugTrace(+1, Dbg, "RxTruncateFileAllocation\n", 0);
  1056. DebugTrace( 0, Dbg, " FcbOrDcb = %8lx\n", FcbOrDcb);
  1057. DebugTrace( 0, Dbg, " DesiredAllocationSize = %8lx\n", DesiredAllocationSize);
  1058. //
  1059. // If we haven't yet set the correct AllocationSize, do so.
  1060. //
  1061. if (FcbOrDcb->Header.AllocationSize.LowPart == 0xffffffff) {
  1062. RxLookupFileAllocationSize( RxContext, FcbOrDcb );
  1063. }
  1064. //
  1065. // Round up the Desired Allocation Size to the next cluster size
  1066. //
  1067. Vcb = FcbOrDcb->Vcb;
  1068. BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
  1069. DesiredAllocationSize = (DesiredAllocationSize + (BytesPerCluster - 1)) &
  1070. ~(BytesPerCluster - 1);
  1071. //
  1072. // Check for the benign case that the file is already smaller than
  1073. // the desired truncation.
  1074. //
  1075. if (DesiredAllocationSize >= FcbOrDcb->Header.AllocationSize.LowPart) {
  1076. DebugTrace(0, Dbg, "Desired size within current allocation.\n", 0);
  1077. DebugTrace(-1, Dbg, "RxTruncateFileAllocation -> (VOID)\n", 0);
  1078. return;
  1079. }
  1080. UnwindInitialAllocationSize = FcbOrDcb->Header.AllocationSize.LowPart;
  1081. UnwindInitialFirstClusterOfFile = FcbOrDcb->FirstClusterOfFile;
  1082. //
  1083. // Update the FcbOrDcb allocation size. If it is now zero, we have the
  1084. // additional task of modifying the FcbOrDcb and Dirent copies of
  1085. // FirstClusterInFile.
  1086. //
  1087. // Note that we must pin the dirent before actually deallocating the
  1088. // disk space since, in unwind, it would not be possible to reallocate
  1089. // deallocated disk space as someone else may have reallocated it and
  1090. // may cause an exception when you try to get some more disk space.
  1091. // Thus RxDeallocateDiskSpace must be the final dangerous operation.
  1092. //
  1093. try {
  1094. FcbOrDcb->Header.AllocationSize.QuadPart = DesiredAllocationSize;
  1095. //
  1096. // Special case 0
  1097. //
  1098. if (DesiredAllocationSize == 0) {
  1099. //
  1100. // We have to update the dirent and FcbOrDcb copies of
  1101. // FirstClusterOfFile since before it was 0
  1102. //
  1103. RxGetDirentFromFcbOrDcb( RxContext, FcbOrDcb, &Dirent, &Bcb );
  1104. Dirent->FirstClusterOfFile = 0;
  1105. FcbOrDcb->FirstClusterOfFile = 0;
  1106. RxSetDirtyBcb( RxContext, Bcb, Vcb );
  1107. RxDeallocateDiskSpace( RxContext, Vcb, &FcbOrDcb->Mcb );
  1108. FsRtlRemoveMcbEntry( &FcbOrDcb->Mcb, 0, 0xFFFFFFFF );
  1109. } else {
  1110. //
  1111. // Split the existing allocation into two parts, one we will keep, and
  1112. // one we will deallocate.
  1113. //
  1114. FsRtlInitializeMcb( &RemainingMcb, PagedPool );
  1115. UnwindWeAllocatedMcb = TRUE;
  1116. RxSplitAllocation( RxContext,
  1117. Vcb,
  1118. &FcbOrDcb->Mcb,
  1119. DesiredAllocationSize,
  1120. &RemainingMcb );
  1121. RxDeallocateDiskSpace( RxContext, Vcb, &RemainingMcb );
  1122. FsRtlUninitializeMcb( &RemainingMcb );
  1123. }
  1124. } finally {
  1125. DebugUnwind( RxTruncateFileAllocation );
  1126. if ( AbnormalTermination() ) {
  1127. FcbOrDcb->Header.AllocationSize.LowPart = UnwindInitialAllocationSize;
  1128. if ( (DesiredAllocationSize == 0) && (Dirent != NULL)) {
  1129. Dirent->FirstClusterOfFile = (RDBSS_ENTRY)UnwindInitialFirstClusterOfFile;
  1130. FcbOrDcb->FirstClusterOfFile = UnwindInitialFirstClusterOfFile;
  1131. }
  1132. if ( UnwindWeAllocatedMcb ) {
  1133. FsRtlUninitializeMcb( &RemainingMcb );
  1134. }
  1135. //
  1136. // God knows what state we left the disk allocation in.
  1137. // Clear the Mcb.
  1138. //
  1139. FsRtlRemoveMcbEntry( &FcbOrDcb->Mcb, 0, 0xFFFFFFFF );
  1140. }
  1141. RxUnpinBcb( RxContext, Bcb );
  1142. DebugTrace(-1, Dbg, "RxTruncateFileAllocation -> (VOID)\n", 0);
  1143. }
  1144. //
  1145. // Give FlushFileBuffer a clue here.
  1146. //
  1147. SetFlag(FcbOrDcb->FcbState, FCB_STATE_FLUSH_RDBSS);
  1148. return;
  1149. }
  1150. VOID
  1151. RxLookupFileAllocationSize (
  1152. IN PRX_CONTEXT RxContext,
  1153. IN PFCB FcbOrDcb
  1154. )
  1155. /*++
  1156. Routine Description:
  1157. This routine retrieves the current file allocatio size for the
  1158. specified file/directory.
  1159. Arguments:
  1160. FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified
  1161. --*/
  1162. {
  1163. LBO Lbo;
  1164. ULONG ByteCount;
  1165. BOOLEAN Allocated;
  1166. DebugTrace(+1, Dbg, "RxLookupAllocationSize\n", 0);
  1167. DebugTrace( 0, Dbg, " FcbOrDcb = %8lx\n", FcbOrDcb);
  1168. //
  1169. // We call RxLookupFileAllocation with Vbo of 0xffffffff - 1.
  1170. //
  1171. RxLookupFileAllocation( RxContext,
  1172. FcbOrDcb,
  1173. 0xffffffff - 1,
  1174. &Lbo,
  1175. &ByteCount,
  1176. &Allocated,
  1177. NULL );
  1178. //
  1179. // Assert that we found no allocation.
  1180. //
  1181. ASSERT( Allocated == FALSE );
  1182. DebugTrace(-1, Dbg, "RxLookupFileAllocationSize -> (VOID)\n", 0);
  1183. return;
  1184. }
  1185. VOID
  1186. RxAllocateDiskSpace (
  1187. IN PRX_CONTEXT RxContext,
  1188. IN PVCB Vcb,
  1189. IN ULONG AlternativeClusterHint,
  1190. IN PULONG ByteCount,
  1191. OUT PMCB Mcb
  1192. )
  1193. /*++
  1194. Routine Description:
  1195. This procedure allocates additional disk space and builds an mcb
  1196. representing the newly allocated space. If the space cannot be
  1197. allocated then this procedure raises an appropriate status.
  1198. Searching starts from the hint index in the Vcb unless an alternative
  1199. non-zero hint is given in AlternativeClusterHint. If we are using the
  1200. hint field in the Vcb, it is set to the cluster following our allocation
  1201. when we are done.
  1202. Disk space can only be allocated in cluster units so this procedure
  1203. will round up any byte count to the next cluster boundary.
  1204. Pictorially what is done is the following (where ! denotes the end of
  1205. the rx chain (i.e., RDBSS_CLUSTER_LAST)):
  1206. Mcb (empty)
  1207. becomes
  1208. Mcb |--a--|--b--|--c--!
  1209. ^
  1210. ByteCount ----------+
  1211. Arguments:
  1212. Vcb - Supplies the VCB being modified
  1213. AlternativeClusterHint - Supplies an alternative hint index to start the
  1214. search from. If this is zero we use, and update,
  1215. the Vcb hint field.
  1216. ByteCount - Supplies the number of bytes that we are requesting, and
  1217. receives the number of bytes that we got.
  1218. Mcb - Receives the MCB describing the newly allocated disk space. The
  1219. caller passes in an initialized Mcb that is fill in by this procedure.
  1220. --*/
  1221. {
  1222. UCHAR LogOfBytesPerCluster;
  1223. ULONG BytesPerCluster;
  1224. ULONG StartingCluster;
  1225. ULONG ClusterCount;
  1226. #if DBG
  1227. ULONG i;
  1228. ULONG PreviousClear;
  1229. #endif
  1230. DebugTrace(+1, Dbg, "RxAllocateDiskSpace\n", 0);
  1231. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  1232. DebugTrace( 0, Dbg, " *ByteCount = %8lx\n", *ByteCount);
  1233. DebugTrace( 0, Dbg, " Mcb = %8lx\n", Mcb);
  1234. //
  1235. // Make sure byte count is not zero
  1236. //
  1237. if (*ByteCount == 0) {
  1238. DebugTrace(0, Dbg, "Nothing to allocate.\n", 0);
  1239. DebugTrace(-1, Dbg, "RxAllocateDiskSpace -> (VOID)\n", 0);
  1240. return;
  1241. }
  1242. //
  1243. // Compute the cluster count based on the byte count, rounding up
  1244. // to the next cluster if there is any remainder. Note that the
  1245. // pathalogical case BytesCount == 0 has been eliminated above.
  1246. //
  1247. LogOfBytesPerCluster = Vcb->AllocationSupport.LogOfBytesPerCluster;
  1248. BytesPerCluster = 1 << LogOfBytesPerCluster;
  1249. *ByteCount = (*ByteCount + (BytesPerCluster - 1))
  1250. & ~(BytesPerCluster - 1);
  1251. //
  1252. // If ByteCount is NOW zero, then we rolled over and there is
  1253. // no way we can satisfy the request.
  1254. //
  1255. if (*ByteCount == 0) {
  1256. DebugTrace(0, Dbg, "Disk Full. Raise Status.\n", 0);
  1257. RxRaiseStatus( RxContext, RxStatus(DISK_FULL) );
  1258. }
  1259. ClusterCount = (*ByteCount >> LogOfBytesPerCluster);
  1260. //
  1261. // Make sure there are enough free clusters to start with, and
  1262. // take them so that nobody else does later. Bah Humbug!
  1263. //
  1264. RxLockFreeClusterBitMap( Vcb );
  1265. if (ClusterCount <= Vcb->AllocationSupport.NumberOfFreeClusters) {
  1266. Vcb->AllocationSupport.NumberOfFreeClusters -= ClusterCount;
  1267. } else {
  1268. RxUnlockFreeClusterBitMap( Vcb );
  1269. DebugTrace(0, Dbg, "Disk Full. Raise Status.\n", 0);
  1270. RxRaiseStatus( RxContext, RxStatus(DISK_FULL) );
  1271. }
  1272. //
  1273. // Try to find a run of free clusters large enough for us.
  1274. //
  1275. StartingCluster = RxFindFreeClusterRun( RxContext,
  1276. Vcb,
  1277. ClusterCount,
  1278. AlternativeClusterHint );
  1279. //
  1280. // If the above call was successful, we can just update the rx
  1281. // and Mcb and exit. Otherwise we have to look for smaller free
  1282. // runs.
  1283. //
  1284. if (StartingCluster != 0xffffffff) {
  1285. try {
  1286. #if DBG
  1287. //
  1288. // Verify that the Bits are all really zero.
  1289. //
  1290. for (i=0; i<ClusterCount; i++) {
  1291. ASSERT( RtlCheckBit(&Vcb->FreeClusterBitMap,
  1292. StartingCluster + i) == 0 );
  1293. }
  1294. PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
  1295. #endif // DBG
  1296. //
  1297. // Take the clusters we found, and unlock the bit map.
  1298. //
  1299. RxReserveClusters(RxContext, Vcb, StartingCluster, ClusterCount);
  1300. ASSERT( RtlNumberOfClearBits( &Vcb->FreeClusterBitMap ) ==
  1301. PreviousClear - ClusterCount );
  1302. RxUnlockFreeClusterBitMap( Vcb );
  1303. //
  1304. // Note that this call will never fail since there is always
  1305. // room for one entry in an empty Mcb.
  1306. //
  1307. FsRtlAddMcbEntry( Mcb,
  1308. 0,
  1309. RxGetLboFromIndex( Vcb, StartingCluster ),
  1310. *ByteCount);
  1311. //
  1312. // Update the rx.
  1313. //
  1314. RxAllocateClusters(RxContext, Vcb, StartingCluster, ClusterCount);
  1315. //
  1316. // If we used the Vcb hint index, update it.
  1317. //
  1318. if (AlternativeClusterHint == 0) {
  1319. Vcb->ClusterHint = StartingCluster + ClusterCount;
  1320. }
  1321. } finally {
  1322. DebugUnwind( RxAllocateDiskSpace );
  1323. //
  1324. // If the allocate clusters failed, remove the run from the Mcb,
  1325. // unreserve the clusters, and reset the free cluster count.
  1326. //
  1327. if ( AbnormalTermination() ) {
  1328. FsRtlRemoveMcbEntry( Mcb, 0, *ByteCount );
  1329. RxLockFreeClusterBitMap( Vcb );
  1330. #if DBG
  1331. PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
  1332. #endif
  1333. RxUnreserveClusters( RxContext, Vcb, StartingCluster, ClusterCount );
  1334. ASSERT( RtlNumberOfClearBits( &Vcb->FreeClusterBitMap ) ==
  1335. PreviousClear + ClusterCount );
  1336. Vcb->AllocationSupport.NumberOfFreeClusters += ClusterCount;
  1337. RxUnlockFreeClusterBitMap( Vcb );
  1338. }
  1339. }
  1340. } else {
  1341. ULONG Index;
  1342. ULONG CurrentVbo;
  1343. ULONG PriorLastIndex;
  1344. ULONG BytesFound;
  1345. ULONG ClustersFound;
  1346. ULONG ClustersRemaining;
  1347. try {
  1348. //
  1349. // While the request is still incomplete, look for the largest
  1350. // run of free clusters, mark them taken, allocate the run in
  1351. // the Mcb and Rx, and if this isn't the first time through
  1352. // the loop link it to prior run on the rx. The Mcb will
  1353. // coalesce automatically.
  1354. //
  1355. ClustersRemaining = ClusterCount;
  1356. CurrentVbo = 0;
  1357. PriorLastIndex = 0;
  1358. while (ClustersRemaining != 0) {
  1359. //
  1360. // If we just entered the loop, the bit map is already locked
  1361. //
  1362. if ( CurrentVbo != 0 ) {
  1363. RxLockFreeClusterBitMap( Vcb );
  1364. }
  1365. //
  1366. // Find the largest run of free clusters. If the run is
  1367. // bigger than we need, only use what we need. Note that
  1368. // this will then be the last while() itteration.
  1369. //
  1370. ClustersFound = RxLongestFreeClusterRun( RxContext, Vcb, &Index );
  1371. #if DBG
  1372. //
  1373. // Verify that the Bits are all really zero.
  1374. //
  1375. for (i=0; i<ClustersFound; i++) {
  1376. ASSERT( RtlCheckBit(&Vcb->FreeClusterBitMap,
  1377. Index + i) == 0 );
  1378. }
  1379. PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
  1380. #endif // DBG
  1381. if (ClustersFound > ClustersRemaining) {
  1382. ClustersFound = ClustersRemaining;
  1383. }
  1384. //
  1385. // If we found no free cluster, then our Vcb free cluster
  1386. // count is messed up, or our bit map is corrupted, or both.
  1387. //
  1388. if (ClustersFound == 0) {
  1389. RxBugCheck( 0, 0, 0 );
  1390. }
  1391. //
  1392. // Take the clusters we found, and unlock the bit map.
  1393. //
  1394. RxReserveClusters( RxContext, Vcb, Index, ClustersFound );
  1395. ASSERT( RtlNumberOfClearBits( &Vcb->FreeClusterBitMap ) ==
  1396. PreviousClear - ClustersFound );
  1397. RxUnlockFreeClusterBitMap( Vcb );
  1398. //
  1399. // Add the newly alloced run to the Mcb.
  1400. //
  1401. BytesFound = ClustersFound << LogOfBytesPerCluster;
  1402. FsRtlAddMcbEntry( Mcb,
  1403. CurrentVbo,
  1404. RxGetLboFromIndex( Vcb, Index ),
  1405. BytesFound );
  1406. //
  1407. // Connect the last allocated run with this one, and allocate
  1408. // this run on the Rx.
  1409. //
  1410. if (PriorLastIndex != 0) {
  1411. RxSetRxEntry( RxContext,
  1412. Vcb,
  1413. PriorLastIndex,
  1414. (RDBSS_ENTRY)Index );
  1415. }
  1416. //
  1417. // Update the rx
  1418. //
  1419. RxAllocateClusters( RxContext, Vcb, Index, ClustersFound );
  1420. //
  1421. // Prepare for the next itteration.
  1422. //
  1423. CurrentVbo += BytesFound;
  1424. ClustersRemaining -= ClustersFound;
  1425. PriorLastIndex = Index + ClustersFound - 1;
  1426. }
  1427. //
  1428. // Now all the requested clusters have been allocgated.
  1429. // If we were using the Vcb hint index, update it.
  1430. //
  1431. if (AlternativeClusterHint == 0) {
  1432. Vcb->ClusterHint = Index + ClustersFound;
  1433. }
  1434. } finally {
  1435. DebugUnwind( RxAllocateDiskSpace );
  1436. //
  1437. // Is there any unwinding to do?
  1438. //
  1439. if ( AbnormalTermination() ) {
  1440. //
  1441. // We must have failed during either the add mcb entry or
  1442. // allocate clusters. Thus we always have to unreserve
  1443. // the current run. If the allocate sectors failed, we
  1444. // must also remove the mcb run. We just unconditionally
  1445. // remove the entry since, if it is not there, the effect
  1446. // is benign.
  1447. //
  1448. RxLockFreeClusterBitMap( Vcb );
  1449. #if DBG
  1450. PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
  1451. #endif
  1452. RxUnreserveClusters( RxContext, Vcb, Index, ClustersFound );
  1453. Vcb->AllocationSupport.NumberOfFreeClusters += ClustersFound;
  1454. ASSERT( RtlNumberOfClearBits( &Vcb->FreeClusterBitMap ) ==
  1455. PreviousClear + ClustersFound );
  1456. RxUnlockFreeClusterBitMap( Vcb );
  1457. FsRtlRemoveMcbEntry( Mcb, CurrentVbo, BytesFound );
  1458. //
  1459. // Now we have tidyed up, we are ready to just send it
  1460. // off to deallocate disk space
  1461. //
  1462. RxDeallocateDiskSpace( RxContext, Vcb, Mcb );
  1463. //
  1464. // Now finally, remove all the entries from the mcb
  1465. //
  1466. FsRtlRemoveMcbEntry( Mcb, 0, 0xFFFFFFFF );
  1467. }
  1468. DebugTrace(-1, Dbg, "RxAllocateDiskSpace -> (VOID)\n", 0);
  1469. }
  1470. }
  1471. return;
  1472. }
  1473. VOID
  1474. RxDeallocateDiskSpace (
  1475. IN PRX_CONTEXT RxContext,
  1476. IN PVCB Vcb,
  1477. IN PMCB Mcb
  1478. )
  1479. /*++
  1480. Routine Description:
  1481. This procedure deallocates the disk space denoted by an input
  1482. mcb. Note that the input MCB does not need to necessarily describe
  1483. a chain that ends with a RDBSS_CLUSTER_LAST entry.
  1484. Pictorially what is done is the following
  1485. Rx |--a--|--b--|--c--|
  1486. Mcb |--a--|--b--|--c--|
  1487. becomes
  1488. Rx |--0--|--0--|--0--|
  1489. Mcb |--a--|--b--|--c--|
  1490. Arguments:
  1491. Vcb - Supplies the VCB being modified
  1492. Mcb - Supplies the MCB describing the disk space to deallocate. Note
  1493. that Mcb is unchanged by this procedure.
  1494. Return Value:
  1495. VOID - TRUE if the operation completed and FALSE if it had to
  1496. block but could not.
  1497. --*/
  1498. {
  1499. LBO Lbo;
  1500. VBO Vbo;
  1501. ULONG RunsInMcb;
  1502. ULONG ByteCount;
  1503. ULONG ClusterCount;
  1504. ULONG ClusterIndex;
  1505. ULONG McbIndex;
  1506. UCHAR LogOfBytesPerCluster;
  1507. DebugTrace(+1, Dbg, "RxDeallocateDiskSpace\n", 0);
  1508. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  1509. DebugTrace( 0, Dbg, " Mcb = %8lx\n", Mcb);
  1510. LogOfBytesPerCluster = Vcb->AllocationSupport.LogOfBytesPerCluster;
  1511. RunsInMcb = FsRtlNumberOfRunsInMcb( Mcb );
  1512. if ( RunsInMcb == 0 ) {
  1513. DebugTrace(-1, Dbg, "RxDeallocateDiskSpace -> (VOID)\n", 0);
  1514. return;
  1515. }
  1516. try {
  1517. //
  1518. // Run though the Mcb, freeing all the runs in the rx.
  1519. //
  1520. // We do this in two steps (first update the rx, then the bitmap
  1521. // (which can't fail)) to prevent other people from taking clusters
  1522. // that we need to re-allocate in the event of unwind.
  1523. //
  1524. RunsInMcb = FsRtlNumberOfRunsInMcb( Mcb );
  1525. for ( McbIndex = 0; McbIndex < RunsInMcb; McbIndex++ ) {
  1526. FsRtlGetNextMcbEntry( Mcb, McbIndex, &Vbo, &Lbo, &ByteCount );
  1527. //
  1528. // Assert that Rx files have no holes.
  1529. //
  1530. ASSERT( Lbo != 0 );
  1531. //
  1532. // Write RDBSS_CLUSTER_AVAILABLE to each cluster in the run.
  1533. //
  1534. ClusterCount = ByteCount >> LogOfBytesPerCluster;
  1535. ClusterIndex = RxGetIndexFromLbo( Vcb, Lbo );
  1536. RxFreeClusters( RxContext, Vcb, ClusterIndex, ClusterCount );
  1537. }
  1538. //
  1539. // From now on, nothing can go wrong .... (as in raise)
  1540. //
  1541. RxLockFreeClusterBitMap( Vcb );
  1542. for ( McbIndex = 0; McbIndex < RunsInMcb; McbIndex++ ) {
  1543. #if DBG
  1544. ULONG PreviousClear;
  1545. #endif
  1546. FsRtlGetNextMcbEntry( Mcb, McbIndex, &Vbo, &Lbo, &ByteCount );
  1547. //
  1548. // Write RDBSS_CLUSTER_AVAILABLE to each cluster in the run, and
  1549. // mark the bits clear in the FreeClusterBitMap.
  1550. //
  1551. ClusterCount = ByteCount >> LogOfBytesPerCluster;
  1552. ClusterIndex = RxGetIndexFromLbo( Vcb, Lbo );
  1553. #if DBG
  1554. PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
  1555. #endif
  1556. RxUnreserveClusters( RxContext, Vcb, ClusterIndex, ClusterCount );
  1557. ASSERT( RtlNumberOfClearBits( &Vcb->FreeClusterBitMap ) ==
  1558. PreviousClear + ClusterCount );
  1559. //
  1560. // Deallocation is now complete. Adjust the free cluster count.
  1561. //
  1562. Vcb->AllocationSupport.NumberOfFreeClusters += ClusterCount;
  1563. }
  1564. RxUnlockFreeClusterBitMap( Vcb );
  1565. } finally {
  1566. DebugUnwind( RxDeallocateDiskSpace );
  1567. //
  1568. // Is there any unwinding to do?
  1569. //
  1570. if ( AbnormalTermination() ) {
  1571. LBO Lbo;
  1572. VBO Vbo;
  1573. ULONG Index;
  1574. ULONG Clusters;
  1575. ULONG RxIndex;
  1576. ULONG PriorLastIndex;
  1577. //
  1578. // For each entry we already deallocated, reallocate it,
  1579. // chaining together as nessecary. Note that we continue
  1580. // up to and including the last "for" itteration even though
  1581. // the SetRxRun could not have been successful. This
  1582. // allows us a convienent way to re-link the final successful
  1583. // SetRxRun.
  1584. //
  1585. PriorLastIndex = 0;
  1586. for (Index = 0; Index <= McbIndex; Index++) {
  1587. FsRtlGetNextMcbEntry(Mcb, Index, &Vbo, &Lbo, &ByteCount);
  1588. RxIndex = RxGetIndexFromLbo( Vcb, Lbo );
  1589. Clusters = ByteCount >> LogOfBytesPerCluster;
  1590. //
  1591. // We must always restore the prior itteration's last
  1592. // entry, pointing it to the first cluster of this run.
  1593. //
  1594. if (PriorLastIndex != 0) {
  1595. RxSetRxEntry( RxContext,
  1596. Vcb,
  1597. PriorLastIndex,
  1598. (RDBSS_ENTRY)RxIndex );
  1599. }
  1600. //
  1601. // If this is not the last entry (the one that failed)
  1602. // then reallocate the disk space on the rx.
  1603. //
  1604. if ( Index < McbIndex ) {
  1605. RxAllocateClusters(RxContext, Vcb, RxIndex, Clusters);
  1606. PriorLastIndex = RxIndex + Clusters - 1;
  1607. }
  1608. }
  1609. }
  1610. DebugTrace(-1, Dbg, "RxDeallocateDiskSpace -> (VOID)\n", 0);
  1611. }
  1612. return;
  1613. }
  1614. VOID
  1615. RxSplitAllocation (
  1616. IN PRX_CONTEXT RxContext,
  1617. IN PVCB Vcb,
  1618. IN OUT PMCB Mcb,
  1619. IN VBO SplitAtVbo,
  1620. OUT PMCB RemainingMcb
  1621. )
  1622. /*++
  1623. Routine Description:
  1624. This procedure takes a single mcb and splits its allocation into
  1625. two separate allocation units. The separation must only be done
  1626. on cluster boundaries, otherwise we bugcheck.
  1627. On the disk this actually works by inserting a RDBSS_CLUSTER_LAST into
  1628. the last index of the first part being split out.
  1629. Pictorially what is done is the following (where ! denotes the end of
  1630. the rx chain (i.e., RDBSS_CLUSTER_LAST)):
  1631. Mcb |--a--|--b--|--c--|--d--|--e--|--f--|
  1632. ^
  1633. SplitAtVbo ---------------------+
  1634. RemainingMcb (empty)
  1635. becomes
  1636. Mcb |--a--|--b--|--c--!
  1637. RemainingMcb |--d--|--e--|--f--|
  1638. Arguments:
  1639. Vcb - Supplies the VCB being modified
  1640. Mcb - Supplies the MCB describing the allocation being split into
  1641. two parts. Upon return this Mcb now contains the first chain.
  1642. SplitAtVbo - Supplies the VBO of the first byte for the second chain
  1643. that we creating.
  1644. RemainingMcb - Receives the MCB describing the second chain of allocated
  1645. disk space. The caller passes in an initialized Mcb that
  1646. is filled in by this procedure STARTING AT VBO 0.
  1647. Return Value:
  1648. VOID - TRUE if the operation completed and FALSE if it had to
  1649. block but could not.
  1650. --*/
  1651. {
  1652. VBO SourceVbo;
  1653. VBO TargetVbo;
  1654. VBO DontCare;
  1655. LBO Lbo;
  1656. ULONG ByteCount;
  1657. ULONG BytesPerCluster;
  1658. DebugTrace(+1, Dbg, "RxSplitAllocation\n", 0);
  1659. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  1660. DebugTrace( 0, Dbg, " Mcb = %8lx\n", Mcb);
  1661. DebugTrace( 0, Dbg, " SplitAtVbo = %8lx\n", SplitAtVbo);
  1662. DebugTrace( 0, Dbg, " RemainingMcb = %8lx\n", RemainingMcb);
  1663. BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
  1664. //
  1665. // Assert that the split point is cluster alligned
  1666. //
  1667. ASSERT( (SplitAtVbo & (BytesPerCluster - 1)) == 0 );
  1668. //
  1669. // Assert we were given an empty target Mcb.
  1670. //
  1671. //
  1672. // This assert is commented out to avoid hitting in the Ea error
  1673. // path. In that case we will be using the same Mcb's to split the
  1674. // allocation that we used to merge them. The target Mcb will contain
  1675. // the runs that the split will attempt to insert.
  1676. //
  1677. //
  1678. // ASSERT( FsRtlNumberOfRunsInMcb( RemainingMcb ) == 0 );
  1679. //
  1680. try {
  1681. //
  1682. // Move the runs after SplitAtVbo from the souce to the target
  1683. //
  1684. SourceVbo = SplitAtVbo;
  1685. TargetVbo = 0;
  1686. while (FsRtlLookupMcbEntry(Mcb, SourceVbo, &Lbo, &ByteCount, NULL)) {
  1687. FsRtlAddMcbEntry( RemainingMcb, TargetVbo, Lbo, ByteCount );
  1688. FsRtlRemoveMcbEntry( Mcb, SourceVbo, ByteCount );
  1689. TargetVbo += ByteCount;
  1690. SourceVbo += ByteCount;
  1691. }
  1692. //
  1693. // Mark the last pre-split cluster as a RDBSS_LAST_CLUSTER
  1694. //
  1695. if ( SplitAtVbo != 0 ) {
  1696. FsRtlLookupLastMcbEntry( Mcb, &DontCare, &Lbo );
  1697. RxSetRxEntry( RxContext,
  1698. Vcb,
  1699. RxGetIndexFromLbo( Vcb, Lbo ),
  1700. RDBSS_CLUSTER_LAST );
  1701. }
  1702. } finally {
  1703. DebugUnwind( RxSplitAllocation );
  1704. //
  1705. // If we got an exception, we must glue back together the Mcbs
  1706. //
  1707. if ( AbnormalTermination() ) {
  1708. TargetVbo = SplitAtVbo;
  1709. SourceVbo = 0;
  1710. while (FsRtlLookupMcbEntry(RemainingMcb, SourceVbo, &Lbo, &ByteCount, NULL)) {
  1711. FsRtlAddMcbEntry( Mcb, TargetVbo, Lbo, ByteCount );
  1712. FsRtlRemoveMcbEntry( RemainingMcb, SourceVbo, ByteCount );
  1713. TargetVbo += ByteCount;
  1714. SourceVbo += ByteCount;
  1715. }
  1716. }
  1717. DebugTrace(-1, Dbg, "RxSplitAllocation -> (VOID)\n", 0);
  1718. }
  1719. return;
  1720. }
  1721. VOID
  1722. RxMergeAllocation (
  1723. IN PRX_CONTEXT RxContext,
  1724. IN PVCB Vcb,
  1725. IN OUT PMCB Mcb,
  1726. IN PMCB SecondMcb
  1727. )
  1728. /*++
  1729. Routine Description:
  1730. This routine takes two separate allocations described by two MCBs and
  1731. joins them together into one allocation.
  1732. Pictorially what is done is the following (where ! denotes the end of
  1733. the rx chain (i.e., RDBSS_CLUSTER_LAST)):
  1734. Mcb |--a--|--b--|--c--!
  1735. SecondMcb |--d--|--e--|--f--|
  1736. becomes
  1737. Mcb |--a--|--b--|--c--|--d--|--e--|--f--|
  1738. SecondMcb |--d--|--e--|--f--|
  1739. Arguments:
  1740. Vcb - Supplies the VCB being modified
  1741. Mcb - Supplies the MCB of the first allocation that is being modified.
  1742. Upon return this Mcb will also describe the newly enlarged
  1743. allocation
  1744. SecondMcb - Supplies the ZERO VBO BASED MCB of the second allocation
  1745. that is being appended to the first allocation. This
  1746. procedure leaves SecondMcb unchanged.
  1747. Return Value:
  1748. VOID - TRUE if the operation completed and FALSE if it had to
  1749. block but could not.
  1750. --*/
  1751. {
  1752. VBO SpliceVbo;
  1753. LBO SpliceLbo;
  1754. VBO SourceVbo;
  1755. VBO TargetVbo;
  1756. LBO Lbo;
  1757. ULONG ByteCount;
  1758. DebugTrace(+1, Dbg, "RxMergeAllocation\n", 0);
  1759. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  1760. DebugTrace( 0, Dbg, " Mcb = %8lx\n", Mcb);
  1761. DebugTrace( 0, Dbg, " SecondMcb = %8lx\n", SecondMcb);
  1762. try {
  1763. //
  1764. // Append the runs from SecondMcb to Mcb
  1765. //
  1766. FsRtlLookupLastMcbEntry( Mcb, &SpliceVbo, &SpliceLbo );
  1767. SourceVbo = 0;
  1768. TargetVbo = SpliceVbo + 1;
  1769. while (FsRtlLookupMcbEntry(SecondMcb, SourceVbo, &Lbo, &ByteCount, NULL)) {
  1770. FsRtlAddMcbEntry( Mcb, TargetVbo, Lbo, ByteCount );
  1771. SourceVbo += ByteCount;
  1772. TargetVbo += ByteCount;
  1773. }
  1774. //
  1775. // Link the last pre-merge cluster to the first cluster of SecondMcb
  1776. //
  1777. FsRtlLookupMcbEntry( SecondMcb, 0, &Lbo, (PULONG)NULL, NULL );
  1778. RxSetRxEntry( RxContext,
  1779. Vcb,
  1780. RxGetIndexFromLbo( Vcb, SpliceLbo ),
  1781. (RDBSS_ENTRY)RxGetIndexFromLbo( Vcb, Lbo ) );
  1782. } finally {
  1783. DebugUnwind( RxMergeAllocation );
  1784. //
  1785. // If we got an exception, we must remove the runs added to Mcb
  1786. //
  1787. if ( AbnormalTermination() ) {
  1788. ULONG CutLength;
  1789. if ((CutLength = TargetVbo - (SpliceVbo + 1)) != 0) {
  1790. FsRtlRemoveMcbEntry( Mcb, SpliceVbo + 1, CutLength);
  1791. }
  1792. }
  1793. DebugTrace(-1, Dbg, "RxMergeAllocation -> (VOID)\n", 0);
  1794. }
  1795. return;
  1796. }
  1797. //
  1798. // Internal support routine
  1799. //
  1800. CLUSTER_TYPE
  1801. RxInterpretClusterType (
  1802. IN PVCB Vcb,
  1803. IN RDBSS_ENTRY Entry
  1804. )
  1805. /*++
  1806. Routine Description:
  1807. This procedure tells the caller how to interpret the input rx table
  1808. entry. It will indicate if the rx cluster is available, resereved,
  1809. bad, the last one, or the another rx index. This procedure can deal
  1810. with both 12 and 16 bit rx.
  1811. Arguments:
  1812. Vcb - Supplies the Vcb to examine, yields 12/16 bit info
  1813. Entry - Supplies the rx entry to examine
  1814. Return Value:
  1815. CLUSTER_TYPE - Is the type of the input Rx entry
  1816. --*/
  1817. {
  1818. DebugTrace(+1, Dbg, "InterpretClusterType\n", 0);
  1819. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  1820. DebugTrace( 0, Dbg, " Entry = %8lx\n", Entry);
  1821. //
  1822. // check for 12 or 16 bit rx
  1823. //
  1824. if (Vcb->AllocationSupport.RxIndexBitSize == 12) {
  1825. //
  1826. // for 12 bit rx check for one of the cluster types, but first
  1827. // make sure we only looking at 12 bits of the entry
  1828. //
  1829. ASSERT( Entry <= 0xfff );
  1830. if (Entry == 0x000) {
  1831. DebugTrace(-1, Dbg, "RxInterpretClusterType -> RxClusterAvailable\n", 0);
  1832. return RxClusterAvailable;
  1833. } else if (Entry < 0xff0) {
  1834. DebugTrace(-1, Dbg, "RxInterpretClusterType -> RxClusterNext\n", 0);
  1835. return RxClusterNext;
  1836. } else if (Entry >= 0xff8) {
  1837. DebugTrace(-1, Dbg, "RxInterpretClusterType -> RxClusterLast\n", 0);
  1838. return RxClusterLast;
  1839. } else if (Entry <= 0xff6) {
  1840. DebugTrace(-1, Dbg, "RxInterpretClusterType -> RxClusterReserved\n", 0);
  1841. return RxClusterReserved;
  1842. } else if (Entry == 0xff7) {
  1843. DebugTrace(-1, Dbg, "RxInterpretClusterType -> RxClusterBad\n", 0);
  1844. return RxClusterBad;
  1845. }
  1846. } else {
  1847. //
  1848. // for 16 bit rx check for one of the cluster types, but first
  1849. // make sure we are only looking at 16 bits of the entry
  1850. //
  1851. ASSERT( Entry <= 0xffff );
  1852. if (Entry == 0x0000) {
  1853. DebugTrace(-1, Dbg, "RxInterpretClusterType -> RxClusterAvailable\n", 0);
  1854. return RxClusterAvailable;
  1855. } else if (Entry < 0xfff0) {
  1856. DebugTrace(-1, Dbg, "RxInterpretClusterType -> RxClusterNext\n", 0);
  1857. return RxClusterNext;
  1858. } else if (Entry >= 0xfff8) {
  1859. DebugTrace(-1, Dbg, "RxInterpretClusterType -> RxClusterLast\n", 0);
  1860. return RxClusterLast;
  1861. } else if (Entry <= 0xfff6) {
  1862. DebugTrace(-1, Dbg, "RxInterpretClusterType -> RxClusterReserved\n", 0);
  1863. return RxClusterReserved;
  1864. } else if (Entry == 0xfff7) {
  1865. DebugTrace(-1, Dbg, "RxInterpretClusterType -> RxClusterBad\n", 0);
  1866. return RxClusterBad;
  1867. }
  1868. }
  1869. }
  1870. //
  1871. // Internal support routine
  1872. //
  1873. VOID
  1874. RxLookupRxEntry (
  1875. IN PRX_CONTEXT RxContext,
  1876. IN PVCB Vcb,
  1877. IN ULONG RxIndex,
  1878. IN OUT PRDBSS_ENTRY RxEntry,
  1879. IN OUT PRDBSS_ENUMERATION_CONTEXT Context
  1880. )
  1881. /*++
  1882. Routine Description:
  1883. This routine takes an index into the rx and gives back the value
  1884. in the Rx at this index. At any given time, for a 16 bit rx, this
  1885. routine allows only one page per volume of the rx to be pinned in
  1886. memory. For a 12 bit bit rx, the entire rx (max 6k) is pinned. This
  1887. extra layer of caching makes the vast majority of requests very
  1888. fast. The context for this caching stored in a structure in the Vcb.
  1889. Arguments:
  1890. Vcb - Supplies the Vcb to examine, yields 12/16 bit info,
  1891. rx access context, etc.
  1892. RxIndex - Supplies the rx index to examine.
  1893. RxEntry - Receives the rx entry pointed to by RxIndex. Note that
  1894. it must point to non-paged pool.
  1895. Context - This structure keeps track of a page of pinned rx between calls.
  1896. --*/
  1897. {
  1898. DebugTrace(+1, Dbg, "RxLookupRxEntry\n", 0);
  1899. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  1900. DebugTrace( 0, Dbg, " RxIndex = %4x\n", RxIndex);
  1901. DebugTrace( 0, Dbg, " RxEntry = %8lx\n", RxEntry);
  1902. //
  1903. // Make sure they gave us a valid rx index.
  1904. //
  1905. RxVerifyIndexIsValid(RxContext, Vcb, RxIndex);
  1906. //
  1907. // Case on 12 or 16 bit rxs.
  1908. //
  1909. // In the 12 bit case (mostly floppies) we always have the whole rx
  1910. // (max 6k bytes) pinned during allocation operations. This is possibly
  1911. // a wee bit slower, but saves headaches over rx entries with 8 bits
  1912. // on one page, and 4 bits on the next.
  1913. //
  1914. // The 16 bit case always keeps the last used page pinned until all
  1915. // operations are done and it is unpinned.
  1916. //
  1917. //
  1918. // DEAL WITH 12 BIT CASE
  1919. //
  1920. if (Vcb->AllocationSupport.RxIndexBitSize == 12) {
  1921. //
  1922. // Check to see if the rx is already pinned, otherwise pin it.
  1923. //
  1924. if (Context->Bcb == NULL) {
  1925. RxReadVolumeFile( RxContext,
  1926. Vcb,
  1927. RxReservedBytes( &Vcb->Bpb ),
  1928. RxBytesPerRx( &Vcb->Bpb ),
  1929. &Context->Bcb,
  1930. &Context->PinnedPage );
  1931. }
  1932. //
  1933. // Load the return value.
  1934. //
  1935. RxLookup12BitEntry( Context->PinnedPage, RxIndex, RxEntry );
  1936. } else {
  1937. //
  1938. // DEAL WITH 16 BIT CASE
  1939. //
  1940. ULONG PageEntryOffset;
  1941. ULONG OffsetIntoVolumeFile;
  1942. //
  1943. // Initialize two local variables that help us.
  1944. //
  1945. OffsetIntoVolumeFile = RxReservedBytes(&Vcb->Bpb) + RxIndex * sizeof(RDBSS_ENTRY);
  1946. PageEntryOffset = (OffsetIntoVolumeFile % PAGE_SIZE) / sizeof(RDBSS_ENTRY);
  1947. //
  1948. // Check to see if we need to read in a new page of rx
  1949. //
  1950. if ((Context->Bcb == NULL) ||
  1951. (OffsetIntoVolumeFile / PAGE_SIZE != Context->VboOfPinnedPage / PAGE_SIZE)) {
  1952. //
  1953. // The entry wasn't in the pinned page, so must we unpin the current
  1954. // page (if any) and read in a new page.
  1955. //
  1956. RxUnpinBcb( RxContext, Context->Bcb );
  1957. RxReadVolumeFile( RxContext,
  1958. Vcb,
  1959. OffsetIntoVolumeFile & ~(PAGE_SIZE - 1),
  1960. PAGE_SIZE,
  1961. &Context->Bcb,
  1962. &Context->PinnedPage );
  1963. Context->VboOfPinnedPage = OffsetIntoVolumeFile & ~(PAGE_SIZE - 1);
  1964. }
  1965. //
  1966. // Grab the rx entry from the pinned page, and return
  1967. //
  1968. *RxEntry = ((PRDBSS_ENTRY)(Context->PinnedPage))[PageEntryOffset];
  1969. }
  1970. DebugTrace(-1, Dbg, "RxLookupRxEntry -> (VOID)\n", 0);
  1971. return;
  1972. }
  1973. //
  1974. // Internal support routine
  1975. //
  1976. VOID
  1977. RxSetRxEntry (
  1978. IN PRX_CONTEXT RxContext,
  1979. IN PVCB Vcb,
  1980. IN ULONG RxIndex,
  1981. IN RDBSS_ENTRY RxEntry
  1982. )
  1983. /*++
  1984. Routine Description:
  1985. This routine takes an index into the rx and puts a value in the Rx
  1986. at this index. The routine special cases 12 and 16 bit rxs. In both
  1987. cases we go to the cache manager for a piece of the rx.
  1988. Arguments:
  1989. Vcb - Supplies the Vcb to examine, yields 12/16 bit info, etc.
  1990. RxIndex - Supplies the destination rx index.
  1991. RxEntry - Supplies the source rx entry.
  1992. --*/
  1993. {
  1994. LBO Lbo;
  1995. PBCB Bcb = NULL;
  1996. ULONG SectorSize;
  1997. ULONG OffsetIntoVolumeFile;
  1998. BOOLEAN ReleaseMutex = FALSE;
  1999. DebugTrace(+1, Dbg, "RxSetRxEntry\n", 0);
  2000. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  2001. DebugTrace( 0, Dbg, " RxIndex = %4x\n", RxIndex);
  2002. DebugTrace( 0, Dbg, " RxEntry = %4x\n", RxEntry);
  2003. //
  2004. // Make sure they gave us a valid rx index.
  2005. //
  2006. RxVerifyIndexIsValid(RxContext, Vcb, RxIndex);
  2007. //
  2008. // Set Sector Size
  2009. //
  2010. SectorSize = 1 << Vcb->AllocationSupport.LogOfBytesPerSector;
  2011. //
  2012. // Case on 12 or 16 bit rxs.
  2013. //
  2014. // In the 12 bit case (mostly floppies) we always have the whole rx
  2015. // (max 6k bytes) pinned during allocation operations. This is possibly
  2016. // a wee bit slower, but saves headaches over rx entries with 8 bits
  2017. // on one page, and 4 bits on the next.
  2018. //
  2019. // In the 16 bit case we only read the page that we need to set the rx
  2020. // entry.
  2021. //
  2022. //
  2023. // DEAL WITH 12 BIT CASE
  2024. //
  2025. try {
  2026. if (Vcb->AllocationSupport.RxIndexBitSize == 12) {
  2027. PVOID PinnedRx;
  2028. //
  2029. // Make sure we have a valid entry
  2030. //
  2031. RxEntry &= 0xfff;
  2032. //
  2033. // We read in the entire rx. Note that using prepare write marks
  2034. // the bcb pre-dirty, so we don't have to do it explicitly.
  2035. //
  2036. OffsetIntoVolumeFile = RxReservedBytes( &Vcb->Bpb ) + RxIndex * 3 / 2;
  2037. RxPrepareWriteVolumeFile( RxContext,
  2038. Vcb,
  2039. RxReservedBytes( &Vcb->Bpb ),
  2040. RxBytesPerRx( &Vcb->Bpb ),
  2041. &Bcb,
  2042. &PinnedRx,
  2043. FALSE );
  2044. //
  2045. // Mark the sector(s) dirty in the DirtyRxMcb. This call is
  2046. // complicated somewhat for the 12 bit case since a single
  2047. // entry write can span two sectors (and pages).
  2048. //
  2049. // Get the Lbo for the sector where the entry starts, and add it to
  2050. // the dirty rx Mcb.
  2051. //
  2052. Lbo = OffsetIntoVolumeFile & ~(SectorSize - 1);
  2053. FsRtlAddMcbEntry( &Vcb->DirtyRxMcb, Lbo, Lbo, SectorSize);
  2054. //
  2055. // If the entry started on the last byte of the sector, it continues
  2056. // to the next sector, so mark the next sector dirty as well.
  2057. //
  2058. // Note that this entry will simply coalese with the last entry,
  2059. // so this operation cannot fail. Also if we get this far, we have
  2060. // made it, so no unwinding will be needed.
  2061. //
  2062. if ( (OffsetIntoVolumeFile & (SectorSize - 1)) == (SectorSize - 1) ) {
  2063. Lbo += SectorSize;
  2064. FsRtlAddMcbEntry( &Vcb->DirtyRxMcb, Lbo, Lbo, SectorSize );
  2065. }
  2066. //
  2067. // Store the entry into the rx; we need a little synchonization
  2068. // here and can't use a spinlock since the bytes might not be
  2069. // resident.
  2070. //
  2071. RxLockFreeClusterBitMap( Vcb );
  2072. ReleaseMutex = TRUE;
  2073. RxSet12BitEntry( PinnedRx, RxIndex, RxEntry );
  2074. ReleaseMutex = FALSE;
  2075. RxUnlockFreeClusterBitMap( Vcb );
  2076. } else {
  2077. //
  2078. // DEAL WITH 16 BIT CASE
  2079. //
  2080. PRDBSS_ENTRY PinnedRxEntry;
  2081. //
  2082. // Read in a new page of rx
  2083. //
  2084. OffsetIntoVolumeFile = RxReservedBytes( &Vcb->Bpb ) +
  2085. RxIndex * sizeof( RDBSS_ENTRY );
  2086. RxPrepareWriteVolumeFile( RxContext,
  2087. Vcb,
  2088. OffsetIntoVolumeFile,
  2089. sizeof(RDBSS_ENTRY),
  2090. &Bcb,
  2091. (PVOID *)&PinnedRxEntry,
  2092. FALSE );
  2093. //
  2094. // Mark the sector dirty in the DirtyRxMcb
  2095. //
  2096. Lbo = OffsetIntoVolumeFile & ~(SectorSize - 1);
  2097. FsRtlAddMcbEntry( &Vcb->DirtyRxMcb, Lbo, Lbo, SectorSize);
  2098. //
  2099. // Store the RxEntry to the pinned page.
  2100. //
  2101. // We need extra synchronization here for broken architectures
  2102. // like the ALPHA that don't support atomic 16 bit writes.
  2103. //
  2104. #ifdef ALPHA
  2105. RxLockFreeClusterBitMap( Vcb );
  2106. ReleaseMutex = TRUE;
  2107. *PinnedRxEntry = RxEntry;
  2108. ReleaseMutex = FALSE;
  2109. RxUnlockFreeClusterBitMap( Vcb );
  2110. #else
  2111. *PinnedRxEntry = RxEntry;
  2112. #endif // ALPHA
  2113. }
  2114. } finally {
  2115. DebugUnwind( RxSetRxEntry );
  2116. //
  2117. // If we still somehow have the Mutex, release it.
  2118. //
  2119. if (ReleaseMutex) {
  2120. ASSERT( AbnormalTermination() );
  2121. RxUnlockFreeClusterBitMap( Vcb );
  2122. }
  2123. //
  2124. // Unpin the Bcb
  2125. //
  2126. RxUnpinBcb(RxContext, Bcb);
  2127. DebugTrace(-1, Dbg, "RxSetRxEntry -> (VOID)\n", 0);
  2128. }
  2129. return;
  2130. }
  2131. //
  2132. // Internal support routine
  2133. //
  2134. VOID
  2135. RxSetRxRun (
  2136. IN PRX_CONTEXT RxContext,
  2137. IN PVCB Vcb,
  2138. IN ULONG StartingRxIndex,
  2139. IN ULONG ClusterCount,
  2140. IN BOOLEAN ChainTogether
  2141. )
  2142. /*++
  2143. Routine Description:
  2144. This routine sets a continues run of clusters in the rx. If ChainTogether
  2145. is TRUE, then the clusters are linked together as in normal Rx fasion,
  2146. with the last cluster receiving RDBSS_CLUSTER_LAST. If ChainTogether is
  2147. FALSE, all the entries are set to RDBSS_CLUSTER_AVAILABLE, effectively
  2148. freeing all the clusters in the run.
  2149. Arguments:
  2150. Vcb - Supplies the Vcb to examine, yields 12/16 bit info, etc.
  2151. StartingRxIndex - Supplies the destination rx index.
  2152. ClusterCount - Supplies the number of contiguous clusters to work on.
  2153. ChainTogether - Tells us whether to fill the entries with links, or
  2154. RDBSS_CLUSTER_AVAILABLE
  2155. Return Value:
  2156. VOID
  2157. --*/
  2158. {
  2159. PBCB SavedBcbs[(0x10000 * sizeof(RDBSS_ENTRY) / PAGE_SIZE) + 2][2];
  2160. ULONG SectorSize;
  2161. ULONG Cluster;
  2162. LBO StartSectorLbo;
  2163. LBO FinalSectorLbo;
  2164. LBO Lbo;
  2165. PVOID PinnedRx;
  2166. ULONG StartingPage;
  2167. BOOLEAN ReleaseMutex = FALSE;
  2168. DebugTrace(+1, Dbg, "RxSetRxRun\n", 0);
  2169. DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
  2170. DebugTrace( 0, Dbg, " StartingRxIndex = %8x\n", StartingRxIndex);
  2171. DebugTrace( 0, Dbg, " ClusterCount = %8lx\n", ClusterCount);
  2172. DebugTrace( 0, Dbg, " ChainTogether = %s\n", ChainTogether ? "TRUE":"FALSE");
  2173. //
  2174. // Make sure they gave us a valid rx run.
  2175. //
  2176. RxVerifyIndexIsValid(RxContext, Vcb, StartingRxIndex);
  2177. RxVerifyIndexIsValid(RxContext, Vcb, StartingRxIndex + ClusterCount - 1);
  2178. //
  2179. // Check special case
  2180. //
  2181. if (ClusterCount == 0) {
  2182. DebugTrace(-1, Dbg, "RxSetRxRun -> (VOID)\n", 0);
  2183. return;
  2184. }
  2185. //
  2186. // Set Sector Size
  2187. //
  2188. SectorSize = 1 << Vcb->AllocationSupport.LogOfBytesPerSector;
  2189. //
  2190. // Case on 12 or 16 bit rxs.
  2191. //
  2192. // In the 12 bit case (mostly floppies) we always have the whole rx
  2193. // (max 6k bytes) pinned during allocation operations. This is possibly
  2194. // a wee bit slower, but saves headaches over rx entries with 8 bits
  2195. // on one page, and 4 bits on the next.
  2196. //
  2197. // In the 16 bit case we only read one page at a time, as needed.
  2198. //
  2199. //
  2200. // DEAL WITH 12 BIT CASE
  2201. //
  2202. try {
  2203. if (Vcb->AllocationSupport.RxIndexBitSize == 12) {
  2204. StartingPage = 0;
  2205. //
  2206. // We read in the entire rx. Note that using prepare write marks
  2207. // the bcb pre-dirty, so we don't have to do it explicitly.
  2208. //
  2209. RtlZeroMemory( &SavedBcbs[0], 2 * sizeof(PBCB) * 2);
  2210. RxPrepareWriteVolumeFile( RxContext,
  2211. Vcb,
  2212. RxReservedBytes( &Vcb->Bpb ),
  2213. RxBytesPerRx( &Vcb->Bpb ),
  2214. &SavedBcbs[0][0],
  2215. &PinnedRx,
  2216. FALSE );
  2217. //
  2218. // Mark the affected sectors dirty. Note that FinalSectorLbo is
  2219. // the Lbo of the END of the entry (Thus * 3 + 2). This makes sure
  2220. // we catch the case of a dirty rx entry stragling a sector boundry.
  2221. //
  2222. // Note that if the first AddMcbEntry succeeds, all following ones
  2223. // will simply coalese, and thus also succeed.
  2224. //
  2225. StartSectorLbo = (RxReservedBytes( &Vcb->Bpb ) + StartingRxIndex * 3 / 2)
  2226. & ~(SectorSize - 1);
  2227. FinalSectorLbo = (RxReservedBytes( &Vcb->Bpb ) + ((StartingRxIndex +
  2228. ClusterCount) * 3 + 2) / 2) & ~(SectorSize - 1);
  2229. for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
  2230. FsRtlAddMcbEntry( &Vcb->DirtyRxMcb, Lbo, Lbo, SectorSize );
  2231. }
  2232. //
  2233. // Store the entries into the rx; we need a little
  2234. // synchonization here and can't use a spinlock since the bytes
  2235. // might not be resident.
  2236. //
  2237. RxLockFreeClusterBitMap( Vcb );
  2238. ReleaseMutex = TRUE;
  2239. for (Cluster = StartingRxIndex;
  2240. Cluster < StartingRxIndex + ClusterCount - 1;
  2241. Cluster++) {
  2242. RxSet12BitEntry( PinnedRx,
  2243. Cluster,
  2244. ChainTogether ? Cluster + 1 : RDBSS_CLUSTER_AVAILABLE );
  2245. }
  2246. //
  2247. // Save the last entry
  2248. //
  2249. RxSet12BitEntry( PinnedRx,
  2250. Cluster,
  2251. ChainTogether ?
  2252. RDBSS_CLUSTER_LAST & 0xfff : RDBSS_CLUSTER_AVAILABLE );
  2253. ReleaseMutex = FALSE;
  2254. RxUnlockFreeClusterBitMap( Vcb );
  2255. } else {
  2256. //
  2257. // DEAL WITH 16 BIT CASE
  2258. //
  2259. VBO StartOffsetInVolume;
  2260. VBO FinalOffsetInVolume;
  2261. ULONG Page;
  2262. ULONG FinalCluster;
  2263. PRDBSS_ENTRY RxEntry;
  2264. StartOffsetInVolume = RxReservedBytes(&Vcb->Bpb) +
  2265. StartingRxIndex * sizeof(RDBSS_ENTRY);
  2266. FinalOffsetInVolume = StartOffsetInVolume +
  2267. (ClusterCount - 1) * sizeof(RDBSS_ENTRY);
  2268. StartingPage = StartOffsetInVolume / PAGE_SIZE;
  2269. //
  2270. // Read in one page of rx at a time. We cannot read in the
  2271. // all of the rx we need because of cache manager limitations.
  2272. //
  2273. // SavedBcb was initialized to be able to hold the largest
  2274. // possible number of pages in a rx plus and extra one to
  2275. // accomadate the boot sector, plus one more to make sure there
  2276. // is enough room for the RtlZeroMemory below that needs the mark
  2277. // the first Bcb after all the ones we will use as an end marker.
  2278. //
  2279. {
  2280. ULONG NumberOfPages;
  2281. ULONG Offset;
  2282. NumberOfPages = (FinalOffsetInVolume / PAGE_SIZE) -
  2283. (StartOffsetInVolume / PAGE_SIZE) + 1;
  2284. RtlZeroMemory( &SavedBcbs[0][0], (NumberOfPages + 1) * sizeof(PBCB) * 2 );
  2285. for ( Page = 0, Offset = StartOffsetInVolume & ~(PAGE_SIZE - 1);
  2286. Page < NumberOfPages;
  2287. Page++, Offset += PAGE_SIZE ) {
  2288. RxPrepareWriteVolumeFile( RxContext,
  2289. Vcb,
  2290. Offset,
  2291. PAGE_SIZE,
  2292. &SavedBcbs[Page][0],
  2293. (PVOID *)&SavedBcbs[Page][1],
  2294. FALSE );
  2295. if (Page == 0) {
  2296. RxEntry = (PRDBSS_ENTRY)((PUCHAR)SavedBcbs[0][1] +
  2297. (StartOffsetInVolume % PAGE_SIZE));
  2298. }
  2299. }
  2300. }
  2301. //
  2302. // Mark the run dirty
  2303. //
  2304. StartSectorLbo = StartOffsetInVolume & ~(SectorSize - 1);
  2305. FinalSectorLbo = FinalOffsetInVolume & ~(SectorSize - 1);
  2306. for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
  2307. FsRtlAddMcbEntry( &Vcb->DirtyRxMcb, Lbo, Lbo, SectorSize );
  2308. }
  2309. //
  2310. // Store the entries
  2311. //
  2312. // We need extra synchronization here for broken architectures
  2313. // like the ALPHA that don't support atomic 16 bit writes.
  2314. //
  2315. #ifdef ALPHA
  2316. RxLockFreeClusterBitMap( Vcb );
  2317. ReleaseMutex = TRUE;
  2318. #endif // ALPHA
  2319. FinalCluster = StartingRxIndex + ClusterCount - 1;
  2320. Page = 0;
  2321. for (Cluster = StartingRxIndex;
  2322. Cluster <= FinalCluster;
  2323. Cluster++, RxEntry++) {
  2324. //
  2325. // If we just crossed a page boundry (as apposed to starting
  2326. // on one), update out idea of RxEntry.
  2327. if ( (((ULONG)RxEntry & (PAGE_SIZE-1)) == 0) &&
  2328. (Cluster != StartingRxIndex) ) {
  2329. Page += 1;
  2330. RxEntry = (PRDBSS_ENTRY)SavedBcbs[Page][1];
  2331. }
  2332. *RxEntry = ChainTogether ? (RDBSS_ENTRY)(Cluster + 1) :
  2333. RDBSS_CLUSTER_AVAILABLE;
  2334. }
  2335. //
  2336. // Fix up the last entry if we were chaining together
  2337. //
  2338. if ( ChainTogether ) {
  2339. *(RxEntry-1) = RDBSS_CLUSTER_LAST;
  2340. }
  2341. #ifdef ALPHA
  2342. ReleaseMutex = FALSE;
  2343. RxUnlockFreeClusterBitMap( Vcb );
  2344. #endif // ALPHA
  2345. }
  2346. } finally {
  2347. ULONG i = 0;
  2348. DebugUnwind( RxSetRxRun );
  2349. //
  2350. // If we still somehow have the Mutex, release it.
  2351. //
  2352. if (ReleaseMutex) {
  2353. ASSERT( AbnormalTermination() );
  2354. RxUnlockFreeClusterBitMap( Vcb );
  2355. }
  2356. //
  2357. // Unpin the Bcbs
  2358. //
  2359. while ( SavedBcbs[i][0] != NULL ) {
  2360. RxUnpinBcb( RxContext, SavedBcbs[i][0] );
  2361. i += 1;
  2362. }
  2363. DebugTrace(-1, Dbg, "RxSetRxRun -> (VOID)\n", 0);
  2364. }
  2365. return;
  2366. }
  2367. //
  2368. // Internal support routine
  2369. //
  2370. UCHAR
  2371. RxLogOf (
  2372. IN ULONG Value
  2373. )
  2374. /*++
  2375. Routine Description:
  2376. This routine just computes the base 2 log of an integer. It is only used
  2377. on objects that are know to be powers of two.
  2378. Arguments:
  2379. Value - The value to take the base 2 log of.
  2380. Return Value:
  2381. UCHAR - The base 2 log of Value.
  2382. --*/
  2383. {
  2384. UCHAR Log = 0;
  2385. DebugTrace(+1, Dbg, "LogOf\n", 0);
  2386. DebugTrace( 0, Dbg, " Value = %8lx\n", Value);
  2387. //
  2388. // Knock bits off until we we get a one at position 0
  2389. //
  2390. while ( (Value & 0xfffffffe) != 0 ) {
  2391. Log++;
  2392. Value >>= 1;
  2393. }
  2394. //
  2395. // If there was more than one bit set, the file system messed up,
  2396. // Bug Check.
  2397. //
  2398. if (Value != 0x1) {
  2399. DebugTrace( 0, Dbg, "Received non power of 2.\n", 0);
  2400. RxBugCheck( Value, Log, 0 );
  2401. }
  2402. DebugTrace(-1, Dbg, "LogOf -> %8lx\n", Log);
  2403. return Log;
  2404. }