Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3167 lines
95 KiB

  1. /*++
  2. Copyright (c) 1990 Microsoft Corporation
  3. Module Name:
  4. flushsec.c
  5. Abstract:
  6. This module contains the routines which implement the
  7. NtFlushVirtualMemory service.
  8. Author:
  9. Lou Perazzoli (loup) 8-May-1990
  10. Landy Wang (landyw) 02-June-1997
  11. Revision History:
  12. --*/
  13. #include "mi.h"
  14. PSUBSECTION
  15. MiGetSystemCacheSubsection (
  16. IN PVOID BaseAddress,
  17. OUT PMMPTE *ProtoPte
  18. );
  19. VOID
  20. MiFlushDirtyBitsToPfn (
  21. IN PMMPTE PointerPte,
  22. IN PMMPTE LastPte,
  23. IN PEPROCESS Process,
  24. IN BOOLEAN SystemCache
  25. );
  26. #ifdef ALLOC_PRAGMA
  27. #pragma alloc_text(PAGE,NtFlushVirtualMemory)
  28. #pragma alloc_text(PAGE,MmFlushVirtualMemory)
  29. #endif
  30. extern POBJECT_TYPE IoFileObjectType;
  31. NTSTATUS
  32. NtFlushVirtualMemory (
  33. IN HANDLE ProcessHandle,
  34. IN OUT PVOID *BaseAddress,
  35. IN OUT PSIZE_T RegionSize,
  36. OUT PIO_STATUS_BLOCK IoStatus
  37. )
  38. /*++
  39. Routine Description:
  40. This function flushes a range of virtual address which map
  41. a data file back into the data file if they have been modified.
  42. Arguments:
  43. ProcessHandle - Supplies an open handle to a process object.
  44. BaseAddress - Supplies a pointer to a variable that will receive
  45. the base address the flushed region. The initial value
  46. of this argument is the base address of the region of the
  47. pages to flush.
  48. RegionSize - Supplies a pointer to a variable that will receive
  49. the actual size in bytes of the flushed region of pages.
  50. The initial value of this argument is rounded up to the
  51. next host-page-size boundary.
  52. If this value is specified as zero, the mapped range from
  53. the base address to the end of the range is flushed.
  54. IoStatus - Returns the value of the IoStatus for the last attempted
  55. I/O operation.
  56. Return Value:
  57. Returns the status
  58. TBS
  59. --*/
  60. {
  61. PEPROCESS Process;
  62. KPROCESSOR_MODE PreviousMode;
  63. NTSTATUS Status;
  64. PVOID CapturedBase;
  65. SIZE_T CapturedRegionSize;
  66. IO_STATUS_BLOCK TemporaryIoStatus;
  67. PAGED_CODE();
  68. PreviousMode = KeGetPreviousMode();
  69. if (PreviousMode != KernelMode) {
  70. //
  71. // Establish an exception handler, probe the specified addresses
  72. // for write access and capture the initial values.
  73. //
  74. try {
  75. ProbeForWritePointer (BaseAddress);
  76. ProbeForWriteUlong_ptr (RegionSize);
  77. ProbeForWriteIoStatus (IoStatus);
  78. //
  79. // Capture the base address.
  80. //
  81. CapturedBase = *BaseAddress;
  82. //
  83. // Capture the region size.
  84. //
  85. CapturedRegionSize = *RegionSize;
  86. } except (EXCEPTION_EXECUTE_HANDLER) {
  87. //
  88. // If an exception occurs during the probe or capture
  89. // of the initial values, then handle the exception and
  90. // return the exception code as the status value.
  91. //
  92. return GetExceptionCode();
  93. }
  94. }
  95. else {
  96. //
  97. // Capture the base address.
  98. //
  99. CapturedBase = *BaseAddress;
  100. //
  101. // Capture the region size.
  102. //
  103. CapturedRegionSize = *RegionSize;
  104. }
  105. //
  106. // Make sure the specified starting and ending addresses are
  107. // within the user part of the virtual address space.
  108. //
  109. if (CapturedBase > MM_HIGHEST_USER_ADDRESS) {
  110. //
  111. // Invalid base address.
  112. //
  113. return STATUS_INVALID_PARAMETER_2;
  114. }
  115. if (((ULONG_PTR)MM_HIGHEST_USER_ADDRESS - (ULONG_PTR)CapturedBase) <
  116. CapturedRegionSize) {
  117. //
  118. // Invalid region size;
  119. //
  120. return STATUS_INVALID_PARAMETER_2;
  121. }
  122. Status = ObReferenceObjectByHandle ( ProcessHandle,
  123. PROCESS_VM_OPERATION,
  124. PsProcessType,
  125. PreviousMode,
  126. (PVOID *)&Process,
  127. NULL );
  128. if (!NT_SUCCESS(Status)) {
  129. return Status;
  130. }
  131. Status = MmFlushVirtualMemory (Process,
  132. &CapturedBase,
  133. &CapturedRegionSize,
  134. &TemporaryIoStatus);
  135. ObDereferenceObject (Process);
  136. //
  137. // Establish an exception handler and write the size and base
  138. // address.
  139. //
  140. try {
  141. *RegionSize = CapturedRegionSize;
  142. *BaseAddress = PAGE_ALIGN (CapturedBase);
  143. *IoStatus = TemporaryIoStatus;
  144. } except (EXCEPTION_EXECUTE_HANDLER) {
  145. }
  146. return Status;
  147. }
  148. VOID
  149. MiFlushAcquire (
  150. IN PCONTROL_AREA ControlArea
  151. )
  152. /*++
  153. Routine Description:
  154. This is a helper routine to reference count the control area if needed
  155. during a flush section call to prevent the section object from being
  156. deleted while the flush is ongoing.
  157. Arguments:
  158. ControlArea - Supplies a pointer to the control area.
  159. Return Value:
  160. None.
  161. --*/
  162. {
  163. KIRQL OldIrql;
  164. LOCK_PFN (OldIrql);
  165. ASSERT ((LONG)ControlArea->NumberOfMappedViews >= 1);
  166. ControlArea->NumberOfMappedViews += 1;
  167. UNLOCK_PFN (OldIrql);
  168. }
  169. VOID
  170. MiFlushRelease (
  171. IN PCONTROL_AREA ControlArea
  172. )
  173. /*++
  174. Routine Description:
  175. This is a helper routine to release the control area reference needed
  176. during a flush section call.
  177. Arguments:
  178. ControlArea - Supplies a pointer to the control area.
  179. Return Value:
  180. None.
  181. --*/
  182. {
  183. KIRQL OldIrql;
  184. LOCK_PFN (OldIrql);
  185. ASSERT ((LONG)ControlArea->NumberOfMappedViews >= 1);
  186. ControlArea->NumberOfMappedViews -= 1;
  187. //
  188. // Check to see if the control area should be deleted. This
  189. // will release the PFN lock.
  190. //
  191. MiCheckControlArea (ControlArea, NULL, OldIrql);
  192. }
  193. NTSTATUS
  194. MmFlushVirtualMemory (
  195. IN PEPROCESS Process,
  196. IN OUT PVOID *BaseAddress,
  197. IN OUT PSIZE_T RegionSize,
  198. OUT PIO_STATUS_BLOCK IoStatus
  199. )
  200. /*++
  201. Routine Description:
  202. This function flushes a range of virtual address which map
  203. a data file back into the data file if they have been modified.
  204. Note that the modification is this process's view of the pages,
  205. on certain implementations (like the Intel 386), the modify
  206. bit is captured in the PTE and not forced to the PFN database
  207. until the page is removed from the working set. This means
  208. that pages which have been modified by another process will
  209. not be flushed to the data file.
  210. Arguments:
  211. Process - Supplies a pointer to a process object.
  212. BaseAddress - Supplies a pointer to a variable that will receive
  213. the base address of the flushed region. The initial value
  214. of this argument is the base address of the region of the
  215. pages to flush.
  216. RegionSize - Supplies a pointer to a variable that will receive
  217. the actual size in bytes of the flushed region of pages.
  218. The initial value of this argument is rounded up to the
  219. next host-page-size boundary.
  220. If this value is specified as zero, the mapped range from
  221. the base address to the end of the range is flushed.
  222. IoStatus - Returns the value of the IoStatus for the last attempted
  223. I/O operation.
  224. Return Value:
  225. NTSTATUS.
  226. --*/
  227. {
  228. PMMVAD Vad;
  229. PVOID EndingAddress;
  230. PVOID Va;
  231. PEPROCESS CurrentProcess;
  232. BOOLEAN SystemCache;
  233. PCONTROL_AREA ControlArea;
  234. PMMPTE PointerPte;
  235. PMMPTE PointerPde;
  236. PMMPTE PointerPpe;
  237. PMMPTE PointerPxe;
  238. PMMPTE LastPte;
  239. PMMPTE FinalPte;
  240. PSUBSECTION Subsection;
  241. PSUBSECTION LastSubsection;
  242. NTSTATUS Status;
  243. ULONG ConsecutiveFileLockFailures;
  244. ULONG Waited;
  245. LOGICAL EntireRestOfVad;
  246. LOGICAL Attached;
  247. KAPC_STATE ApcState;
  248. PAGED_CODE();
  249. Attached = FALSE;
  250. //
  251. // Determine if the specified base address is within the system
  252. // cache and if so, don't attach, the working set mutex is still
  253. // required to "lock" paged pool pages (proto PTEs) into the
  254. // working set.
  255. //
  256. EndingAddress = (PVOID)(((ULONG_PTR)*BaseAddress + *RegionSize - 1) |
  257. (PAGE_SIZE - 1));
  258. *BaseAddress = PAGE_ALIGN (*BaseAddress);
  259. if (MI_IS_SESSION_ADDRESS (*BaseAddress)) {
  260. //
  261. // Nothing in session space needs flushing.
  262. //
  263. return STATUS_NOT_MAPPED_VIEW;
  264. }
  265. CurrentProcess = PsGetCurrentProcess ();
  266. if (!MI_IS_SYSTEM_CACHE_ADDRESS(*BaseAddress)) {
  267. SystemCache = FALSE;
  268. //
  269. // Attach to the specified process.
  270. //
  271. if (CurrentProcess != Process) {
  272. KeStackAttachProcess (&Process->Pcb, &ApcState);
  273. Attached = TRUE;
  274. }
  275. LOCK_ADDRESS_SPACE (Process);
  276. //
  277. // Make sure the address space was not deleted, if so, return an error.
  278. //
  279. if (Process->Flags & PS_PROCESS_FLAGS_VM_DELETED) {
  280. Status = STATUS_PROCESS_IS_TERMINATING;
  281. goto ErrorReturn;
  282. }
  283. Vad = MiLocateAddress (*BaseAddress);
  284. if (Vad == NULL) {
  285. //
  286. // No Virtual Address Descriptor located for Base Address.
  287. //
  288. Status = STATUS_NOT_MAPPED_VIEW;
  289. goto ErrorReturn;
  290. }
  291. if (*RegionSize == 0) {
  292. EndingAddress = MI_VPN_TO_VA_ENDING (Vad->EndingVpn);
  293. EntireRestOfVad = TRUE;
  294. }
  295. else {
  296. EntireRestOfVad = FALSE;
  297. }
  298. if ((Vad->u.VadFlags.PrivateMemory == 1) ||
  299. (MI_VA_TO_VPN (EndingAddress) > Vad->EndingVpn)) {
  300. //
  301. // This virtual address descriptor does not refer to a Segment
  302. // object.
  303. //
  304. Status = STATUS_NOT_MAPPED_VIEW;
  305. goto ErrorReturn;
  306. }
  307. //
  308. // Make sure this VAD maps a data file (not an image file).
  309. //
  310. ControlArea = Vad->ControlArea;
  311. if ((ControlArea->FilePointer == NULL) ||
  312. (Vad->u.VadFlags.ImageMap == 1)) {
  313. //
  314. // This virtual address descriptor does not refer to a Segment
  315. // object.
  316. //
  317. Status = STATUS_NOT_MAPPED_DATA;
  318. goto ErrorReturn;
  319. }
  320. LOCK_WS_UNSAFE (Process);
  321. }
  322. else {
  323. SATISFY_OVERZEALOUS_COMPILER (Vad = NULL);
  324. SATISFY_OVERZEALOUS_COMPILER (ControlArea = NULL);
  325. SATISFY_OVERZEALOUS_COMPILER (EntireRestOfVad = FALSE);
  326. SystemCache = TRUE;
  327. Process = CurrentProcess;
  328. LOCK_WS (Process);
  329. }
  330. PointerPxe = MiGetPxeAddress (*BaseAddress);
  331. PointerPpe = MiGetPpeAddress (*BaseAddress);
  332. PointerPde = MiGetPdeAddress (*BaseAddress);
  333. PointerPte = MiGetPteAddress (*BaseAddress);
  334. LastPte = MiGetPteAddress (EndingAddress);
  335. *RegionSize = (PCHAR)EndingAddress - (PCHAR)*BaseAddress + 1;
  336. retry:
  337. while (!MiDoesPxeExistAndMakeValid (PointerPxe, Process, MM_NOIRQL, &Waited)) {
  338. //
  339. // This page directory parent entry is empty, go to the next one.
  340. //
  341. PointerPxe += 1;
  342. PointerPpe = MiGetVirtualAddressMappedByPte (PointerPxe);
  343. PointerPde = MiGetVirtualAddressMappedByPte (PointerPpe);
  344. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  345. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  346. if (PointerPte > LastPte) {
  347. break;
  348. }
  349. }
  350. while (!MiDoesPpeExistAndMakeValid (PointerPpe, Process, MM_NOIRQL, &Waited)) {
  351. //
  352. // This page directory parent entry is empty, go to the next one.
  353. //
  354. PointerPpe += 1;
  355. PointerPxe = MiGetPteAddress (PointerPpe);
  356. PointerPde = MiGetVirtualAddressMappedByPte (PointerPpe);
  357. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  358. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  359. if (PointerPte > LastPte) {
  360. break;
  361. }
  362. #if (_MI_PAGING_LEVELS >= 4)
  363. if (MiIsPteOnPdeBoundary (PointerPpe)) {
  364. goto retry;
  365. }
  366. #endif
  367. }
  368. Waited = 0;
  369. if (PointerPte <= LastPte) {
  370. while (!MiDoesPdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL, &Waited)) {
  371. //
  372. // No page table page exists for this address.
  373. //
  374. PointerPde += 1;
  375. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  376. if (PointerPte > LastPte) {
  377. break;
  378. }
  379. #if (_MI_PAGING_LEVELS >= 3)
  380. if (MiIsPteOnPdeBoundary (PointerPde)) {
  381. if (MiIsPteOnPpeBoundary (PointerPde)) {
  382. PointerPxe = MiGetPdeAddress (PointerPde);
  383. }
  384. PointerPpe = MiGetPteAddress (PointerPde);
  385. goto retry;
  386. }
  387. #endif
  388. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  389. }
  390. //
  391. // If the PFN lock (and accordingly the WS mutex) was
  392. // released and reacquired we must retry the operation.
  393. //
  394. if ((PointerPte <= LastPte) && (Waited != 0)) {
  395. goto retry;
  396. }
  397. }
  398. MiFlushDirtyBitsToPfn (PointerPte, LastPte, Process, SystemCache);
  399. if (SystemCache) {
  400. //
  401. // No VADs exist for the system cache.
  402. //
  403. UNLOCK_WS (Process);
  404. Subsection = MiGetSystemCacheSubsection (*BaseAddress, &PointerPte);
  405. LastSubsection = MiGetSystemCacheSubsection (EndingAddress, &FinalPte);
  406. //
  407. // Flush the PTEs from the specified section.
  408. //
  409. Status = MiFlushSectionInternal (PointerPte,
  410. FinalPte,
  411. Subsection,
  412. LastSubsection,
  413. FALSE,
  414. TRUE,
  415. IoStatus);
  416. }
  417. else {
  418. //
  419. // Protect against the section being prematurely deleted.
  420. //
  421. MiFlushAcquire (ControlArea);
  422. PointerPte = MiGetProtoPteAddress (Vad, MI_VA_TO_VPN (*BaseAddress));
  423. Subsection = MiLocateSubsection (Vad, MI_VA_TO_VPN(*BaseAddress));
  424. LastSubsection = MiLocateSubsection (Vad, MI_VA_TO_VPN(EndingAddress));
  425. //
  426. // The last subsection is NULL if the section is not fully
  427. // committed. Only allow the flush if the caller said do the whole
  428. // thing, otherwise it's an error.
  429. //
  430. if (LastSubsection == NULL) {
  431. if (EntireRestOfVad == FALSE) {
  432. //
  433. // Caller can only specify the range that is committed or zero
  434. // to indicate the entire range.
  435. //
  436. UNLOCK_WS_AND_ADDRESS_SPACE (Process);
  437. if (Attached == TRUE) {
  438. KeUnstackDetachProcess (&ApcState);
  439. }
  440. MiFlushRelease (ControlArea);
  441. return STATUS_NOT_MAPPED_VIEW;
  442. }
  443. LastSubsection = Subsection;
  444. while (LastSubsection->NextSubsection) {
  445. LastSubsection = LastSubsection->NextSubsection;
  446. }
  447. //
  448. // A memory barrier is needed to read the subsection chains
  449. // in order to ensure the writes to the actual individual
  450. // subsection data structure fields are visible in correct
  451. // order. This avoids the need to acquire any stronger
  452. // synchronization (ie: PFN lock), thus yielding better
  453. // performance and pagability.
  454. //
  455. KeMemoryBarrier ();
  456. FinalPte = LastSubsection->SubsectionBase + LastSubsection->PtesInSubsection - 1;
  457. }
  458. else {
  459. FinalPte = MiGetProtoPteAddress (Vad, MI_VA_TO_VPN (EndingAddress));
  460. }
  461. UNLOCK_WS_AND_ADDRESS_SPACE (Process);
  462. if (Attached == TRUE) {
  463. KeUnstackDetachProcess (&ApcState);
  464. }
  465. //
  466. // Preacquire the file to synchronize the flush.
  467. //
  468. ConsecutiveFileLockFailures = 0;
  469. do {
  470. Status = FsRtlAcquireFileForCcFlushEx (ControlArea->FilePointer);
  471. if (!NT_SUCCESS(Status)) {
  472. break;
  473. }
  474. //
  475. // Flush the PTEs from the specified section.
  476. //
  477. Status = MiFlushSectionInternal (PointerPte,
  478. FinalPte,
  479. Subsection,
  480. LastSubsection,
  481. TRUE,
  482. TRUE,
  483. IoStatus);
  484. //
  485. // Release the file we acquired.
  486. //
  487. FsRtlReleaseFileForCcFlush (ControlArea->FilePointer);
  488. //
  489. // Only try the request more than once if the filesystem told us
  490. // it had a deadlock.
  491. //
  492. if (Status != STATUS_FILE_LOCK_CONFLICT) {
  493. break;
  494. }
  495. ConsecutiveFileLockFailures += 1;
  496. KeDelayExecutionThread (KernelMode, FALSE, (PLARGE_INTEGER)&MmShortTime);
  497. } while (ConsecutiveFileLockFailures < 5);
  498. MiFlushRelease (ControlArea);
  499. }
  500. return Status;
  501. ErrorReturn:
  502. ASSERT (SystemCache == FALSE);
  503. UNLOCK_ADDRESS_SPACE (Process);
  504. if (Attached == TRUE) {
  505. KeUnstackDetachProcess (&ApcState);
  506. }
  507. return Status;
  508. }
  509. NTSTATUS
  510. MmFlushSection (
  511. IN PSECTION_OBJECT_POINTERS SectionObjectPointer,
  512. IN PLARGE_INTEGER Offset,
  513. IN SIZE_T RegionSize,
  514. OUT PIO_STATUS_BLOCK IoStatus,
  515. IN ULONG AcquireFile
  516. )
  517. /*++
  518. Routine Description:
  519. This function flushes to the backing file any modified pages within
  520. the specified range of the section.
  521. Arguments:
  522. SectionObjectPointer - Supplies a pointer to the section objects.
  523. Offset - Supplies the offset into the section in which to begin
  524. flushing pages. If this argument is not present, then the
  525. whole section is flushed without regard to the region size
  526. argument.
  527. RegionSize - Supplies the size in bytes to flush. This is rounded
  528. to a page multiple.
  529. IoStatus - Returns the value of the IoStatus for the last attempted
  530. I/O operation.
  531. AcquireFile - Nonzero if the callback should be used to acquire the file.
  532. Return Value:
  533. Returns status of the operation.
  534. --*/
  535. {
  536. PCONTROL_AREA ControlArea;
  537. PMMPTE PointerPte;
  538. PMMPTE LastPte;
  539. KIRQL OldIrql;
  540. UINT64 PteOffset;
  541. UINT64 LastPteOffset;
  542. PSUBSECTION Subsection;
  543. PSUBSECTION TempSubsection;
  544. PSUBSECTION LastSubsection;
  545. PSUBSECTION LastSubsectionWithProtos;
  546. PMAPPED_FILE_SEGMENT Segment;
  547. PETHREAD CurrentThread;
  548. NTSTATUS status;
  549. BOOLEAN OldClusterState;
  550. ULONG ConsecutiveFileLockFailures;
  551. //
  552. // Initialize IoStatus for success, in case we take an early exit.
  553. //
  554. IoStatus->Status = STATUS_SUCCESS;
  555. IoStatus->Information = RegionSize;
  556. LOCK_PFN (OldIrql);
  557. ControlArea = ((PCONTROL_AREA)(SectionObjectPointer->DataSectionObject));
  558. ASSERT ((ControlArea == NULL) || (ControlArea->u.Flags.Image == 0));
  559. if ((ControlArea == NULL) ||
  560. (ControlArea->u.Flags.BeingDeleted) ||
  561. (ControlArea->u.Flags.BeingCreated) ||
  562. (ControlArea->u.Flags.Rom) ||
  563. (ControlArea->NumberOfPfnReferences == 0)) {
  564. //
  565. // This file no longer has an associated segment or is in the
  566. // process of coming or going.
  567. // If the number of PFN references is zero, then this control
  568. // area does not have any valid or transition pages that need
  569. // to be flushed.
  570. //
  571. UNLOCK_PFN (OldIrql);
  572. return STATUS_SUCCESS;
  573. }
  574. //
  575. // Locate the subsection.
  576. //
  577. ASSERT (ControlArea->u.Flags.Image == 0);
  578. ASSERT (ControlArea->u.Flags.GlobalOnlyPerSession == 0);
  579. ASSERT (ControlArea->u.Flags.PhysicalMemory == 0);
  580. Subsection = (PSUBSECTION)(ControlArea + 1);
  581. if (!ARGUMENT_PRESENT (Offset)) {
  582. //
  583. // If the offset is not specified, flush the complete file ignoring
  584. // the region size.
  585. //
  586. ASSERT (ControlArea->FilePointer != NULL);
  587. PteOffset = 0;
  588. LastSubsection = Subsection;
  589. Segment = (PMAPPED_FILE_SEGMENT) ControlArea->Segment;
  590. if (MiIsAddressValid (Segment, TRUE)) {
  591. if (Segment->LastSubsectionHint != NULL) {
  592. LastSubsection = (PSUBSECTION) Segment->LastSubsectionHint;
  593. }
  594. }
  595. while (LastSubsection->NextSubsection != NULL) {
  596. LastSubsection = LastSubsection->NextSubsection;
  597. }
  598. LastPteOffset = LastSubsection->PtesInSubsection - 1;
  599. }
  600. else {
  601. PteOffset = (UINT64)(Offset->QuadPart >> PAGE_SHIFT);
  602. //
  603. // Make sure the PTEs are not in the extended part of the segment.
  604. //
  605. while (PteOffset >= (UINT64) Subsection->PtesInSubsection) {
  606. PteOffset -= Subsection->PtesInSubsection;
  607. if (Subsection->NextSubsection == NULL) {
  608. //
  609. // Past end of mapping, just return success.
  610. //
  611. UNLOCK_PFN (OldIrql);
  612. return STATUS_SUCCESS;
  613. }
  614. Subsection = Subsection->NextSubsection;
  615. }
  616. ASSERT (PteOffset < (UINT64) Subsection->PtesInSubsection);
  617. //
  618. // Locate the address of the last prototype PTE to be flushed.
  619. //
  620. LastPteOffset = PteOffset + (((RegionSize + BYTE_OFFSET(Offset->LowPart)) - 1) >> PAGE_SHIFT);
  621. LastSubsection = Subsection;
  622. while (LastPteOffset >= (UINT64) LastSubsection->PtesInSubsection) {
  623. LastPteOffset -= LastSubsection->PtesInSubsection;
  624. if (LastSubsection->NextSubsection == NULL) {
  625. LastPteOffset = LastSubsection->PtesInSubsection - 1;
  626. break;
  627. }
  628. LastSubsection = LastSubsection->NextSubsection;
  629. }
  630. ASSERT (LastPteOffset < LastSubsection->PtesInSubsection);
  631. }
  632. //
  633. // Try for the fast reference on the first and last subsection.
  634. // If that cannot be gotten, then there are no prototype PTEs for this
  635. // subsection, therefore there is nothing in it to flush so leap forwards.
  636. //
  637. // Note that subsections in between do not need referencing as
  638. // MiFlushSectionInternal is smart enough to skip them if they're
  639. // nonresident.
  640. //
  641. if (MiReferenceSubsection ((PMSUBSECTION)Subsection) == FALSE) {
  642. do {
  643. //
  644. // If this increment would put us past the end offset, then nothing
  645. // to flush, just return success.
  646. //
  647. if (Subsection == LastSubsection) {
  648. UNLOCK_PFN (OldIrql);
  649. return STATUS_SUCCESS;
  650. }
  651. Subsection = Subsection->NextSubsection;
  652. //
  653. // If this increment put us past the end of section, then nothing
  654. // to flush, just return success.
  655. //
  656. if (Subsection == NULL) {
  657. UNLOCK_PFN (OldIrql);
  658. return STATUS_SUCCESS;
  659. }
  660. if ((PMSUBSECTION)Subsection->SubsectionBase == NULL) {
  661. continue;
  662. }
  663. if (MiReferenceSubsection ((PMSUBSECTION)Subsection) == FALSE) {
  664. continue;
  665. }
  666. //
  667. // Start the flush at this subsection which is now referenced.
  668. //
  669. PointerPte = &Subsection->SubsectionBase[0];
  670. break;
  671. } while (TRUE);
  672. }
  673. else {
  674. PointerPte = &Subsection->SubsectionBase[PteOffset];
  675. }
  676. ASSERT (Subsection->SubsectionBase != NULL);
  677. //
  678. // The first subsection is referenced, now reference count the last one.
  679. // If the first is the last, just double reference it anyway as it
  680. // simplifies cleanup later.
  681. //
  682. if (MiReferenceSubsection ((PMSUBSECTION)LastSubsection) == FALSE) {
  683. ASSERT (Subsection != LastSubsection);
  684. TempSubsection = Subsection->NextSubsection;
  685. LastSubsectionWithProtos = NULL;
  686. while (TempSubsection != LastSubsection) {
  687. //
  688. // If this increment put us past the end of section, then nothing
  689. // to flush, just return success.
  690. //
  691. ASSERT (TempSubsection != NULL);
  692. if ((PMSUBSECTION)TempSubsection->SubsectionBase != NULL) {
  693. LastSubsectionWithProtos = TempSubsection;
  694. }
  695. TempSubsection = TempSubsection->NextSubsection;
  696. }
  697. //
  698. // End the flush at this subsection and reference it.
  699. //
  700. if (LastSubsectionWithProtos == NULL) {
  701. ASSERT (Subsection != NULL);
  702. ASSERT (Subsection->SubsectionBase != NULL);
  703. TempSubsection = Subsection;
  704. }
  705. else {
  706. TempSubsection = LastSubsectionWithProtos;
  707. }
  708. if (MiReferenceSubsection ((PMSUBSECTION)TempSubsection) == FALSE) {
  709. ASSERT (FALSE);
  710. }
  711. ASSERT (TempSubsection->SubsectionBase != NULL);
  712. LastSubsection = TempSubsection;
  713. LastPteOffset = LastSubsection->PtesInSubsection - 1;
  714. }
  715. //
  716. // Up the map view count so the control area cannot be deleted
  717. // out from under the call.
  718. //
  719. ControlArea->NumberOfMappedViews += 1;
  720. UNLOCK_PFN (OldIrql);
  721. //
  722. // End the flush at this subsection which is now referenced.
  723. //
  724. LastPte = &LastSubsection->SubsectionBase[LastPteOffset];
  725. CurrentThread = PsGetCurrentThread();
  726. //
  727. // Indicate that disk verify errors should be returned as exceptions.
  728. //
  729. OldClusterState = CurrentThread->ForwardClusterOnly;
  730. CurrentThread->ForwardClusterOnly = TRUE;
  731. //
  732. // Preacquire the file if we are going to synchronize the flush.
  733. //
  734. if (AcquireFile == 0) {
  735. //
  736. // Flush the PTEs from the specified section.
  737. //
  738. status = MiFlushSectionInternal (PointerPte,
  739. LastPte,
  740. Subsection,
  741. LastSubsection,
  742. TRUE,
  743. TRUE,
  744. IoStatus);
  745. }
  746. else {
  747. ConsecutiveFileLockFailures = 0;
  748. do {
  749. status = FsRtlAcquireFileForCcFlushEx (ControlArea->FilePointer);
  750. if (!NT_SUCCESS(status)) {
  751. break;
  752. }
  753. //
  754. // Flush the PTEs from the specified section.
  755. //
  756. status = MiFlushSectionInternal (PointerPte,
  757. LastPte,
  758. Subsection,
  759. LastSubsection,
  760. TRUE,
  761. TRUE,
  762. IoStatus);
  763. //
  764. // Release the file we acquired.
  765. //
  766. FsRtlReleaseFileForCcFlush (ControlArea->FilePointer);
  767. //
  768. // Only try the request more than once if the filesystem told us
  769. // it had a deadlock.
  770. //
  771. if (status != STATUS_FILE_LOCK_CONFLICT) {
  772. break;
  773. }
  774. ConsecutiveFileLockFailures += 1;
  775. KeDelayExecutionThread (KernelMode, FALSE, (PLARGE_INTEGER)&MmShortTime);
  776. } while (ConsecutiveFileLockFailures < 5);
  777. }
  778. CurrentThread->ForwardClusterOnly = OldClusterState;
  779. LOCK_PFN (OldIrql);
  780. MiDecrementSubsections (Subsection, Subsection);
  781. MiDecrementSubsections (LastSubsection, LastSubsection);
  782. ASSERT ((LONG)ControlArea->NumberOfMappedViews >= 1);
  783. ControlArea->NumberOfMappedViews -= 1;
  784. //
  785. // Check to see if the control area should be deleted. This
  786. // will release the PFN lock.
  787. //
  788. MiCheckControlArea (ControlArea, NULL, OldIrql);
  789. return status;
  790. }
  791. LONGLONG
  792. MiStartingOffset(
  793. IN PSUBSECTION Subsection,
  794. IN PMMPTE PteAddress
  795. )
  796. /*++
  797. Routine Description:
  798. This function calculates the file offset given a subsection and a PTE
  799. offset. Note that images are stored in 512-byte units whereas data is
  800. stored in 4K units.
  801. When this is all debugged, this should be made into a macro.
  802. Arguments:
  803. Subsection - Supplies a subsection to reference for the file address.
  804. PteAddress - Supplies a PTE within the subsection
  805. Return Value:
  806. Returns the file offset to obtain the backing data from.
  807. --*/
  808. {
  809. LONGLONG PteByteOffset;
  810. LARGE_INTEGER StartAddress;
  811. if (Subsection->ControlArea->u.Flags.Image == 1) {
  812. return MI_STARTING_OFFSET ( Subsection,
  813. PteAddress);
  814. }
  815. ASSERT (Subsection->SubsectionBase != NULL);
  816. PteByteOffset = (LONGLONG)((PteAddress - Subsection->SubsectionBase))
  817. << PAGE_SHIFT;
  818. Mi4KStartFromSubsection (&StartAddress, Subsection);
  819. StartAddress.QuadPart = StartAddress.QuadPart << MM4K_SHIFT;
  820. PteByteOffset += StartAddress.QuadPart;
  821. return PteByteOffset;
  822. }
  823. LARGE_INTEGER
  824. MiEndingOffset(
  825. IN PSUBSECTION Subsection
  826. )
  827. /*++
  828. Routine Description:
  829. This function calculates the last valid file offset in a given subsection.
  830. offset. Note that images are stored in 512-byte units whereas data is
  831. stored in 4K units.
  832. When this is all debugged, this should be made into a macro.
  833. Arguments:
  834. Subsection - Supplies a subsection to reference for the file address.
  835. PteAddress - Supplies a PTE within the subsection
  836. Return Value:
  837. Returns the file offset to obtain the backing data from.
  838. --*/
  839. {
  840. LARGE_INTEGER FileByteOffset;
  841. if (Subsection->ControlArea->u.Flags.Image == 1) {
  842. FileByteOffset.QuadPart =
  843. ((UINT64)Subsection->StartingSector + (UINT64)Subsection->NumberOfFullSectors) <<
  844. MMSECTOR_SHIFT;
  845. }
  846. else {
  847. Mi4KStartFromSubsection (&FileByteOffset, Subsection);
  848. FileByteOffset.QuadPart += Subsection->NumberOfFullSectors;
  849. FileByteOffset.QuadPart = FileByteOffset.QuadPart << MM4K_SHIFT;
  850. }
  851. FileByteOffset.QuadPart += Subsection->u.SubsectionFlags.SectorEndOffset;
  852. return FileByteOffset;
  853. }
  854. NTSTATUS
  855. MiFlushSectionInternal (
  856. IN PMMPTE StartingPte,
  857. IN PMMPTE FinalPte,
  858. IN PSUBSECTION FirstSubsection,
  859. IN PSUBSECTION LastSubsection,
  860. IN ULONG Synchronize,
  861. IN LOGICAL WriteInProgressOk,
  862. OUT PIO_STATUS_BLOCK IoStatus
  863. )
  864. /*++
  865. Routine Description:
  866. This function flushes to the backing file any modified pages within
  867. the specified range of the section. The parameters describe the
  868. section's prototype PTEs (start and end) and the subsections
  869. which correspond to the starting and ending PTE.
  870. Each PTE in the subsection between the specified start and end
  871. is examined and if the page is either valid or transition AND
  872. the page has been modified, the modify bit is cleared in the PFN
  873. database and the page is flushed to its backing file.
  874. Arguments:
  875. StartingPte - Supplies a pointer to the first prototype PTE to
  876. be examined for flushing.
  877. FinalPte - Supplies a pointer to the last prototype PTE to be
  878. examined for flushing.
  879. FirstSubsection - Supplies the subsection that contains the
  880. StartingPte.
  881. LastSubsection - Supplies the subsection that contains the
  882. FinalPte.
  883. Synchronize - Supplies TRUE if synchronization with all threads
  884. doing flush operations to this section should occur.
  885. WriteInProgressOk - Supplies TRUE if the caller can tolerate a write
  886. already in progress for any dirty pages.
  887. IoStatus - Returns the value of the IoStatus for the last attempted
  888. I/O operation.
  889. Return Value:
  890. Returns status of the operation.
  891. --*/
  892. {
  893. LOGICAL DroppedPfnLock;
  894. PCONTROL_AREA ControlArea;
  895. PMMPTE PointerPte;
  896. PMMPTE LastPte;
  897. PMMPTE LastWritten;
  898. PMMPTE FirstWritten;
  899. MMPTE PteContents;
  900. PMMPFN Pfn1;
  901. PMMPFN Pfn2;
  902. KIRQL OldIrql;
  903. PMDL Mdl;
  904. KEVENT IoEvent;
  905. PSUBSECTION Subsection;
  906. PMSUBSECTION MappedSubsection;
  907. PPFN_NUMBER Page;
  908. PFN_NUMBER PageFrameIndex;
  909. PPFN_NUMBER LastPage;
  910. NTSTATUS Status;
  911. UINT64 StartingOffset;
  912. UINT64 TempOffset;
  913. LOGICAL WriteNow;
  914. LOGICAL Bail;
  915. PFN_NUMBER MdlHack[(sizeof(MDL)/sizeof(PFN_NUMBER)) + (MM_MAXIMUM_DISK_IO_SIZE / PAGE_SIZE) + 1];
  916. ULONG ReflushCount;
  917. ULONG MaxClusterSize;
  918. PFILE_OBJECT FilePointer;
  919. LOGICAL CurrentThreadIsDereferenceThread;
  920. //
  921. // WriteInProgressOk is only FALSE when the segment dereference thread is
  922. // doing a top-level flush just prior to cleaning the section or subsection.
  923. // Note that this flag may be TRUE even for the dereference thread because
  924. // the dereference thread calls filesystems who may then issue a flush.
  925. //
  926. if (WriteInProgressOk == FALSE) {
  927. CurrentThreadIsDereferenceThread = TRUE;
  928. ASSERT (PsGetCurrentThread()->StartAddress == (PVOID)(ULONG_PTR)MiDereferenceSegmentThread);
  929. }
  930. else {
  931. CurrentThreadIsDereferenceThread = FALSE;
  932. //
  933. // This may actually be the dereference thread as the segment deletion
  934. // dereferences the file object potentially calling the filesystem which
  935. // may then issue a CcFlushCache/MmFlushSection. For our purposes,
  936. // lower level flushes in this context are treated as though they
  937. // came from a different thread.
  938. //
  939. }
  940. WriteNow = FALSE;
  941. Bail = FALSE;
  942. IoStatus->Status = STATUS_SUCCESS;
  943. IoStatus->Information = 0;
  944. Mdl = (PMDL)&MdlHack[0];
  945. KeInitializeEvent (&IoEvent, NotificationEvent, FALSE);
  946. FinalPte += 1; // Point to 1 past the last one.
  947. FirstWritten = NULL;
  948. LastWritten = NULL;
  949. LastPage = 0;
  950. Subsection = FirstSubsection;
  951. PointerPte = StartingPte;
  952. ControlArea = FirstSubsection->ControlArea;
  953. FilePointer = ControlArea->FilePointer;
  954. ASSERT ((ControlArea->u.Flags.Image == 0) &&
  955. (FilePointer != NULL) &&
  956. (ControlArea->u.Flags.PhysicalMemory == 0));
  957. //
  958. // Initializing these is not needed for correctness
  959. // but without it the compiler cannot compile this code
  960. // W4 to check for use of uninitialized variables.
  961. //
  962. MappedSubsection = NULL;
  963. StartingOffset = 0;
  964. //
  965. // Try to cluster pages as long as the storage stack can handle it.
  966. //
  967. MaxClusterSize = MmModifiedWriteClusterSize;
  968. LOCK_PFN (OldIrql);
  969. ASSERT (ControlArea->u.Flags.Image == 0);
  970. if (ControlArea->NumberOfPfnReferences == 0) {
  971. //
  972. // No transition or valid prototype PTEs present, hence
  973. // no need to flush anything.
  974. //
  975. UNLOCK_PFN (OldIrql);
  976. return STATUS_SUCCESS;
  977. }
  978. while ((Synchronize) && (ControlArea->FlushInProgressCount != 0)) {
  979. //
  980. // Another thread is currently performing a flush operation on
  981. // this file. Wait for that flush to complete.
  982. //
  983. ControlArea->u.Flags.CollidedFlush = 1;
  984. //
  985. // Keep APCs blocked so no special APCs can be delivered in KeWait
  986. // which would cause the dispatcher lock to be released opening a
  987. // window where this thread could miss a pulse.
  988. //
  989. UNLOCK_PFN_AND_THEN_WAIT (APC_LEVEL);
  990. KeWaitForSingleObject (&MmCollidedFlushEvent,
  991. WrPageOut,
  992. KernelMode,
  993. FALSE,
  994. (PLARGE_INTEGER)&MmOneSecond);
  995. KeLowerIrql (OldIrql);
  996. LOCK_PFN (OldIrql);
  997. }
  998. ControlArea->FlushInProgressCount += 1;
  999. //
  1000. // Clear the deferred entry list as pages from it may get marked modified
  1001. // during the processing. Note that any transition page which is currently
  1002. // clean but has a nonzero reference count may get marked modified if
  1003. // there is a pending transaction and note well that this transaction may
  1004. // complete at any time ! Thus, this case must be carefully handled.
  1005. //
  1006. #if !defined(MI_MULTINODE)
  1007. if (MmPfnDeferredList != NULL) {
  1008. MiDeferredUnlockPages (MI_DEFER_PFN_HELD);
  1009. }
  1010. #else
  1011. //
  1012. // Each and every node's deferred list would have to be checked so
  1013. // we might as well go the long way and just call.
  1014. //
  1015. MiDeferredUnlockPages (MI_DEFER_PFN_HELD);
  1016. #endif
  1017. for (;;) {
  1018. if (LastSubsection != Subsection) {
  1019. //
  1020. // Flush to the last PTE in this subsection.
  1021. //
  1022. LastPte = &Subsection->SubsectionBase[Subsection->PtesInSubsection];
  1023. }
  1024. else {
  1025. //
  1026. // Flush to the end of the range.
  1027. //
  1028. LastPte = FinalPte;
  1029. }
  1030. if (Subsection->SubsectionBase == NULL) {
  1031. //
  1032. // The prototype PTEs for this subsection have either never been
  1033. // created or have been tossed due to memory pressure. Either
  1034. // way, this range can be skipped as there are obviously no
  1035. // dirty pages in it. If there are other dirty pages
  1036. // to be written, write them now as we are skipping over PTEs.
  1037. //
  1038. if (LastWritten != NULL) {
  1039. ASSERT (MappedSubsection != NULL);
  1040. WriteNow = TRUE;
  1041. goto CheckForWrite;
  1042. }
  1043. if (LastSubsection == Subsection) {
  1044. break;
  1045. }
  1046. Subsection = Subsection->NextSubsection;
  1047. PointerPte = Subsection->SubsectionBase;
  1048. continue;
  1049. }
  1050. //
  1051. // Up the number of mapped views to prevent other threads
  1052. // from freeing this to the unused subsection list while we're
  1053. // operating on it.
  1054. //
  1055. MappedSubsection = (PMSUBSECTION) Subsection;
  1056. MappedSubsection->NumberOfMappedViews += 1;
  1057. if (MappedSubsection->DereferenceList.Flink != NULL) {
  1058. //
  1059. // Remove this from the list of unused subsections.
  1060. //
  1061. RemoveEntryList (&MappedSubsection->DereferenceList);
  1062. MI_UNUSED_SUBSECTIONS_COUNT_REMOVE (MappedSubsection);
  1063. MappedSubsection->DereferenceList.Flink = NULL;
  1064. }
  1065. if (CurrentThreadIsDereferenceThread == FALSE) {
  1066. //
  1067. // Set the access bit so an already ongoing trim won't blindly
  1068. // delete the prototype PTEs on completion of a mapped write.
  1069. // This can happen if the current thread dirties some pages and
  1070. // then deletes the view before the trim write finishes - this
  1071. // bit informs the trimming thread that a rescan is needed so
  1072. // that writes are not lost.
  1073. //
  1074. MappedSubsection->u2.SubsectionFlags2.SubsectionAccessed = 1;
  1075. }
  1076. //
  1077. // If the prototype PTEs are paged out or have a share count
  1078. // of 1, they cannot contain any transition or valid PTEs.
  1079. //
  1080. if (!MiCheckProtoPtePageState(PointerPte, OldIrql, &DroppedPfnLock)) {
  1081. PointerPte = (PMMPTE)(((ULONG_PTR)PointerPte | (PAGE_SIZE - 1)) + 1);
  1082. }
  1083. while (PointerPte < LastPte) {
  1084. if (MiIsPteOnPdeBoundary(PointerPte)) {
  1085. //
  1086. // We are on a page boundary, make sure this PTE is resident.
  1087. //
  1088. if (!MiCheckProtoPtePageState(PointerPte, OldIrql, &DroppedPfnLock)) {
  1089. PointerPte = (PMMPTE)((PCHAR)PointerPte + PAGE_SIZE);
  1090. //
  1091. // If there are dirty pages to be written, write them
  1092. // now as we are skipping over PTEs.
  1093. //
  1094. if (LastWritten != NULL) {
  1095. WriteNow = TRUE;
  1096. goto CheckForWrite;
  1097. }
  1098. continue;
  1099. }
  1100. }
  1101. PteContents = *PointerPte;
  1102. if ((PteContents.u.Hard.Valid == 1) ||
  1103. ((PteContents.u.Soft.Prototype == 0) &&
  1104. (PteContents.u.Soft.Transition == 1))) {
  1105. //
  1106. // Prototype PTE in transition, there are 3 possible cases:
  1107. // 1. The page is part of an image which is sharable and
  1108. // refers to the paging file - dereference page file
  1109. // space and free the physical page.
  1110. // 2. The page refers to the segment but is not modified -
  1111. // free the physical page.
  1112. // 3. The page refers to the segment and is modified -
  1113. // write the page to the file and free the physical page.
  1114. //
  1115. if (PteContents.u.Hard.Valid == 1) {
  1116. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents);
  1117. }
  1118. else {
  1119. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&PteContents);
  1120. }
  1121. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1122. ASSERT (Pfn1->OriginalPte.u.Soft.Prototype == 1);
  1123. ASSERT (Pfn1->OriginalPte.u.Hard.Valid == 0);
  1124. //
  1125. // Note that any transition page which is currently clean but
  1126. // has a nonzero reference count may get marked modified if
  1127. // there is a pending transaction and note well that this
  1128. // transaction may complete at any time ! Thus, this case
  1129. // must be carefully handled since the segment dereference
  1130. // thread must be given a collision error for this one as it
  1131. // requires that no pages be dirtied after a successful return.
  1132. //
  1133. if ((CurrentThreadIsDereferenceThread == TRUE) &&
  1134. (Pfn1->u3.e2.ReferenceCount != 0)) {
  1135. #if DBG
  1136. if ((PteContents.u.Hard.Valid != 0) &&
  1137. (MappedSubsection->u2.SubsectionFlags2.SubsectionAccessed == 0) &&
  1138. (ControlArea->u.Flags.Accessed == 0)) {
  1139. DbgPrint ("MM: flushing valid proto, %p %p\n",
  1140. Pfn1, PointerPte);
  1141. DbgBreakPoint ();
  1142. }
  1143. #endif
  1144. PointerPte = LastPte;
  1145. Bail = TRUE;
  1146. if (LastWritten != NULL) {
  1147. WriteNow = TRUE;
  1148. }
  1149. goto CheckForWrite;
  1150. }
  1151. //
  1152. // If the page is modified OR a write is in progress
  1153. // flush it. The write in progress case catches problems
  1154. // where the modified page write continually writes a
  1155. // page and gets errors writing it, by writing pages
  1156. // in this state, the error will be propagated back to
  1157. // the caller.
  1158. //
  1159. if ((Pfn1->u3.e1.Modified == 1) ||
  1160. (Pfn1->u3.e1.WriteInProgress)) {
  1161. if ((WriteInProgressOk == FALSE) &&
  1162. (Pfn1->u3.e1.WriteInProgress)) {
  1163. PointerPte = LastPte;
  1164. Bail = TRUE;
  1165. if (LastWritten != NULL) {
  1166. WriteNow = TRUE;
  1167. }
  1168. goto CheckForWrite;
  1169. }
  1170. if (LastWritten == NULL) {
  1171. //
  1172. // This is the first page of a cluster, initialize
  1173. // the MDL, etc.
  1174. //
  1175. LastPage = (PPFN_NUMBER)(Mdl + 1);
  1176. //
  1177. // Calculate the offset to read into the file.
  1178. // offset = base + ((thispte - basepte) << PAGE_SHIFT)
  1179. //
  1180. StartingOffset = (UINT64) MiStartingOffset (
  1181. Subsection,
  1182. Pfn1->PteAddress);
  1183. MI_INITIALIZE_ZERO_MDL (Mdl);
  1184. Mdl->MdlFlags |= MDL_PAGES_LOCKED;
  1185. Mdl->StartVa =
  1186. (PVOID)ULongToPtr(Pfn1->u3.e1.PageColor << PAGE_SHIFT);
  1187. Mdl->Size = (CSHORT)(sizeof(MDL) +
  1188. (sizeof(PFN_NUMBER) * MaxClusterSize));
  1189. FirstWritten = PointerPte;
  1190. }
  1191. LastWritten = PointerPte;
  1192. Mdl->ByteCount += PAGE_SIZE;
  1193. if (Mdl->ByteCount == (PAGE_SIZE * MaxClusterSize)) {
  1194. WriteNow = TRUE;
  1195. }
  1196. if (PteContents.u.Hard.Valid == 0) {
  1197. //
  1198. // The page is in transition.
  1199. //
  1200. MiUnlinkPageFromList (Pfn1);
  1201. MI_ADD_LOCKED_PAGE_CHARGE_FOR_MODIFIED_PAGE(Pfn1, TRUE, 18);
  1202. }
  1203. else {
  1204. MI_ADD_LOCKED_PAGE_CHARGE(Pfn1, TRUE, 20);
  1205. }
  1206. //
  1207. // Clear the modified bit for this page.
  1208. //
  1209. MI_SET_MODIFIED (Pfn1, 0, 0x22);
  1210. //
  1211. // Up the reference count for the physical page as there
  1212. // is I/O in progress.
  1213. //
  1214. Pfn1->u3.e2.ReferenceCount += 1;
  1215. *LastPage = PageFrameIndex;
  1216. LastPage += 1;
  1217. }
  1218. else {
  1219. //
  1220. // This page was not modified and therefore ends the
  1221. // current write cluster if any. Set WriteNow to TRUE
  1222. // if there is a cluster being built.
  1223. //
  1224. if (LastWritten != NULL) {
  1225. WriteNow = TRUE;
  1226. }
  1227. }
  1228. }
  1229. else {
  1230. //
  1231. // This page was not modified and therefore ends the
  1232. // current write cluster if any. Set WriteNow to TRUE
  1233. // if there is a cluster being built.
  1234. //
  1235. if (LastWritten != NULL) {
  1236. WriteNow = TRUE;
  1237. }
  1238. }
  1239. PointerPte += 1;
  1240. CheckForWrite:
  1241. //
  1242. // Write the current cluster if it is complete,
  1243. // full, or the loop is now complete.
  1244. //
  1245. if ((WriteNow) ||
  1246. ((PointerPte == LastPte) && (LastWritten != NULL))) {
  1247. LARGE_INTEGER EndOfFile;
  1248. //
  1249. // Issue the write request.
  1250. //
  1251. UNLOCK_PFN (OldIrql);
  1252. WriteNow = FALSE;
  1253. //
  1254. // Make sure the write does not go past the
  1255. // end of file. (segment size).
  1256. //
  1257. EndOfFile = MiEndingOffset(Subsection);
  1258. TempOffset = (UINT64) EndOfFile.QuadPart;
  1259. if (StartingOffset + Mdl->ByteCount > TempOffset) {
  1260. ASSERT ((ULONG_PTR)(TempOffset - StartingOffset) >
  1261. (Mdl->ByteCount - PAGE_SIZE));
  1262. Mdl->ByteCount = (ULONG)(TempOffset - StartingOffset);
  1263. }
  1264. ReflushCount = 0;
  1265. while (TRUE) {
  1266. KeClearEvent (&IoEvent);
  1267. Status = IoSynchronousPageWrite (FilePointer,
  1268. Mdl,
  1269. (PLARGE_INTEGER)&StartingOffset,
  1270. &IoEvent,
  1271. IoStatus);
  1272. if (NT_SUCCESS(Status)) {
  1273. //
  1274. // Success was returned, so wait for the i/o event.
  1275. //
  1276. KeWaitForSingleObject (&IoEvent,
  1277. WrPageOut,
  1278. KernelMode,
  1279. FALSE,
  1280. NULL);
  1281. }
  1282. else {
  1283. //
  1284. // Copy the error to the IoStatus, for error
  1285. // handling below.
  1286. //
  1287. IoStatus->Status = Status;
  1288. }
  1289. if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
  1290. MmUnmapLockedPages (Mdl->MappedSystemVa, Mdl);
  1291. }
  1292. if (MmIsRetryIoStatus(IoStatus->Status)) {
  1293. ReflushCount -= 1;
  1294. if (ReflushCount & MiIoRetryMask) {
  1295. KeDelayExecutionThread (KernelMode, FALSE, (PLARGE_INTEGER)&Mm30Milliseconds);
  1296. continue;
  1297. }
  1298. }
  1299. break;
  1300. }
  1301. Page = (PPFN_NUMBER)(Mdl + 1);
  1302. LOCK_PFN (OldIrql);
  1303. if (MiIsPteOnPdeBoundary(PointerPte) == 0) {
  1304. //
  1305. // The next PTE is not in a different page, make
  1306. // sure the PTE for the prototype PTE page was not
  1307. // put in transition while the I/O was in progress.
  1308. // Note the prototype PTE page itself cannot be reused
  1309. // as each outstanding page has a sharecount on it - but
  1310. // the PTE mapping it can be put in transition regardless
  1311. // of sharecount because it is a system page.
  1312. //
  1313. if (MiGetPteAddress (PointerPte)->u.Hard.Valid == 0) {
  1314. MiMakeSystemAddressValidPfn (PointerPte, OldIrql);
  1315. }
  1316. }
  1317. if (NT_SUCCESS(IoStatus->Status)) {
  1318. //
  1319. // The I/O completed successfully, unlock the pages.
  1320. //
  1321. while (Page < LastPage) {
  1322. Pfn2 = MI_PFN_ELEMENT (*Page);
  1323. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF(Pfn2, 19);
  1324. Page += 1;
  1325. }
  1326. }
  1327. else {
  1328. //
  1329. // Don't count on the file system to convey
  1330. // anything in the information field on errors.
  1331. //
  1332. IoStatus->Information = 0;
  1333. //
  1334. // The I/O completed unsuccessfully, unlock the pages
  1335. // and return an error status.
  1336. //
  1337. while (Page < LastPage) {
  1338. Pfn2 = MI_PFN_ELEMENT (*Page);
  1339. //
  1340. // Mark the page dirty again so it can be rewritten.
  1341. //
  1342. MI_SET_MODIFIED (Pfn2, 1, 0x1);
  1343. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF (Pfn2, 21);
  1344. Page += 1;
  1345. }
  1346. if ((MmIsRetryIoStatus (IoStatus->Status)) &&
  1347. (MaxClusterSize != 1) &&
  1348. (Mdl->ByteCount > PAGE_SIZE)) {
  1349. //
  1350. // Retries of a cluster have failed, reissue
  1351. // the cluster one page at a time as the
  1352. // storage stack should always be able to
  1353. // make forward progress this way.
  1354. //
  1355. ASSERT (FirstWritten != NULL);
  1356. ASSERT (LastWritten != NULL);
  1357. ASSERT (FirstWritten != LastWritten);
  1358. PointerPte = FirstWritten;
  1359. if (MiGetPteAddress (PointerPte)->u.Hard.Valid == 0) {
  1360. MiMakeSystemAddressValidPfn (PointerPte, OldIrql);
  1361. }
  1362. MaxClusterSize = 1;
  1363. }
  1364. else {
  1365. //
  1366. // Calculate how much was written thus far
  1367. // and add that to the information field
  1368. // of the IOSB.
  1369. //
  1370. IoStatus->Information +=
  1371. (((LastWritten - StartingPte) << PAGE_SHIFT) -
  1372. Mdl->ByteCount);
  1373. LastWritten = NULL;
  1374. //
  1375. // Set this to force termination of the outermost loop.
  1376. //
  1377. Subsection = LastSubsection;
  1378. break;
  1379. }
  1380. } // end if error on i/o
  1381. //
  1382. // As the PFN lock has been released and
  1383. // reacquired, do this loop again as the
  1384. // PTE may have changed state.
  1385. //
  1386. LastWritten = NULL;
  1387. } // end if chunk to write
  1388. } //end while
  1389. ASSERT (MappedSubsection->DereferenceList.Flink == NULL);
  1390. ASSERT (((LONG_PTR)MappedSubsection->NumberOfMappedViews >= 1) ||
  1391. (MappedSubsection->u.SubsectionFlags.SubsectionStatic == 1));
  1392. MappedSubsection->NumberOfMappedViews -= 1;
  1393. if ((MappedSubsection->NumberOfMappedViews == 0) &&
  1394. (MappedSubsection->u.SubsectionFlags.SubsectionStatic == 0)) {
  1395. //
  1396. // Insert this subsection into the unused subsection list.
  1397. //
  1398. InsertTailList (&MmUnusedSubsectionList,
  1399. &MappedSubsection->DereferenceList);
  1400. MI_UNUSED_SUBSECTIONS_COUNT_INSERT (MappedSubsection);
  1401. }
  1402. if ((Bail == TRUE) || (Subsection == LastSubsection)) {
  1403. //
  1404. // The last range has been flushed or we have collided with the
  1405. // mapped page writer. Regardless, exit the top FOR loop
  1406. // and return.
  1407. //
  1408. break;
  1409. }
  1410. Subsection = Subsection->NextSubsection;
  1411. PointerPte = Subsection->SubsectionBase;
  1412. } //end for
  1413. ASSERT (LastWritten == NULL);
  1414. ControlArea->FlushInProgressCount -= 1;
  1415. if ((ControlArea->u.Flags.CollidedFlush == 1) &&
  1416. (ControlArea->FlushInProgressCount == 0)) {
  1417. ControlArea->u.Flags.CollidedFlush = 0;
  1418. KePulseEvent (&MmCollidedFlushEvent, 0, FALSE);
  1419. }
  1420. UNLOCK_PFN (OldIrql);
  1421. if (Bail == TRUE) {
  1422. //
  1423. // This routine collided with the mapped page writer and the caller
  1424. // expects an error for this. Give it to him.
  1425. //
  1426. return STATUS_MAPPED_WRITER_COLLISION;
  1427. }
  1428. return IoStatus->Status;
  1429. }
  1430. BOOLEAN
  1431. MmPurgeSection (
  1432. IN PSECTION_OBJECT_POINTERS SectionObjectPointer,
  1433. IN PLARGE_INTEGER Offset,
  1434. IN SIZE_T RegionSize,
  1435. IN ULONG IgnoreCacheViews
  1436. )
  1437. /*++
  1438. Routine Description:
  1439. This function determines if any views of the specified section
  1440. are mapped, and if not, purges valid pages (even modified ones)
  1441. from the specified section and returns any used pages to the free
  1442. list. This is accomplished by examining the prototype PTEs
  1443. from the specified offset to the end of the section, and if
  1444. any prototype PTEs are in the transition state, putting the
  1445. prototype PTE back into its original state and putting the
  1446. physical page on the free list.
  1447. NOTE:
  1448. If there is an I/O operation ongoing for one of the pages,
  1449. that page is eliminated from the segment and allowed to "float"
  1450. until the i/o is complete. Once the share count goes to zero
  1451. the page will be added to the free page list.
  1452. Arguments:
  1453. SectionObjectPointer - Supplies a pointer to the section objects.
  1454. Offset - Supplies the offset into the section in which to begin
  1455. purging pages. If this argument is not present, then the
  1456. whole section is purged without regard to the region size
  1457. argument.
  1458. RegionSize - Supplies the size of the region to purge. If this
  1459. is specified as zero and Offset is specified, the
  1460. region from Offset to the end of the file is purged.
  1461. Note: The largest value acceptable for RegionSize is
  1462. 0xFFFF0000;
  1463. IgnoreCacheViews - Supplies FALSE if mapped views in the system
  1464. cache should cause the function to return FALSE.
  1465. This is the normal case.
  1466. Supplies TRUE if mapped views should be ignored
  1467. and the flush should occur. NOTE THAT IF TRUE
  1468. IS SPECIFIED AND ANY DATA PURGED IS CURRENTLY MAPPED
  1469. AND VALID A BUGCHECK WILL OCCUR!!
  1470. Return Value:
  1471. Returns TRUE if either no section exists for the file object or
  1472. the section is not mapped and the purge was done, FALSE otherwise.
  1473. Note that FALSE is returned if during the purge operation, a page
  1474. could not be purged due to a non-zero reference count.
  1475. --*/
  1476. {
  1477. LOGICAL DroppedPfnLock;
  1478. PCONTROL_AREA ControlArea;
  1479. PMAPPED_FILE_SEGMENT Segment;
  1480. PMMPTE PointerPde;
  1481. PMMPTE PointerPte;
  1482. PMMPTE LastPte;
  1483. PMMPTE FinalPte;
  1484. MMPTE PteContents;
  1485. PMMPFN Pfn1;
  1486. PMMPFN Pfn2;
  1487. KIRQL OldIrql;
  1488. UINT64 PteOffset;
  1489. UINT64 LastPteOffset;
  1490. PMSUBSECTION MappedSubsection;
  1491. PSUBSECTION Subsection;
  1492. PSUBSECTION FirstSubsection;
  1493. PSUBSECTION LastSubsection;
  1494. PSUBSECTION TempSubsection;
  1495. PSUBSECTION LastSubsectionWithProtos;
  1496. LARGE_INTEGER LocalOffset;
  1497. BOOLEAN ReturnValue;
  1498. PFN_NUMBER PageFrameIndex;
  1499. PFN_NUMBER PageTableFrameIndex;
  1500. #if DBG
  1501. PFN_NUMBER LastLocked = 0;
  1502. #endif
  1503. //
  1504. // This is needed in case a page is on the mapped page writer list -
  1505. // the PFN lock will need to be released and APCs disabled.
  1506. //
  1507. ASSERT (KeGetCurrentIrql() < DISPATCH_LEVEL);
  1508. //
  1509. // Capture caller's file size, since we may modify it.
  1510. //
  1511. if (ARGUMENT_PRESENT(Offset)) {
  1512. LocalOffset = *Offset;
  1513. Offset = &LocalOffset;
  1514. }
  1515. //
  1516. // See if we can truncate this file to where the caller wants
  1517. // us to.
  1518. //
  1519. if (!MiCanFileBeTruncatedInternal(SectionObjectPointer, Offset, TRUE, &OldIrql)) {
  1520. return FALSE;
  1521. }
  1522. //
  1523. // PFN LOCK IS NOW HELD!
  1524. //
  1525. ControlArea = (PCONTROL_AREA)(SectionObjectPointer->DataSectionObject);
  1526. if ((ControlArea == NULL) || (ControlArea->u.Flags.Rom)) {
  1527. UNLOCK_PFN (OldIrql);
  1528. return TRUE;
  1529. }
  1530. //
  1531. // Even though MiCanFileBeTruncatedInternal returned TRUE, there could
  1532. // still be a system cache mapped view. We cannot truncate if
  1533. // the Cache Manager has a view mapped.
  1534. //
  1535. if ((IgnoreCacheViews == FALSE) &&
  1536. (ControlArea->NumberOfSystemCacheViews != 0)) {
  1537. UNLOCK_PFN (OldIrql);
  1538. return FALSE;
  1539. }
  1540. #if 0
  1541. //
  1542. // Prevent races when the control area is being deleted as the clean
  1543. // path releases the PFN lock midway through. File objects may still have
  1544. // section object pointers and data section objects that point at this
  1545. // control area, hence the purge can be issued.
  1546. //
  1547. // Check for this and fail the purge as the control area (and the section
  1548. // object pointers/data section objects) will be going away momentarily.
  1549. // Note that even though drivers have these data section objects, no one
  1550. // currently has an open section for this control area and no one is
  1551. // allowed to open one until the clean path finishes.
  1552. //
  1553. if (ControlArea->u.Flags.BeingDeleted == 1) {
  1554. UNLOCK_PFN (OldIrql);
  1555. return FALSE;
  1556. }
  1557. #else
  1558. //
  1559. // The above check can be removed as MiCanFileBeTruncatedInternal does
  1560. // the same check, so just assert it below.
  1561. //
  1562. ASSERT (ControlArea->u.Flags.BeingDeleted == 0);
  1563. #endif
  1564. //
  1565. // Purge the section - locate the subsection which
  1566. // contains the PTEs.
  1567. //
  1568. ASSERT (ControlArea->u.Flags.GlobalOnlyPerSession == 0);
  1569. Subsection = (PSUBSECTION)(ControlArea + 1);
  1570. if (!ARGUMENT_PRESENT (Offset)) {
  1571. //
  1572. // If the offset is not specified, flush the complete file ignoring
  1573. // the region size.
  1574. //
  1575. PteOffset = 0;
  1576. RegionSize = 0;
  1577. }
  1578. else {
  1579. PteOffset = (UINT64)(Offset->QuadPart >> PAGE_SHIFT);
  1580. //
  1581. // Make sure the PTEs are not in the extended part of the segment.
  1582. //
  1583. while (PteOffset >= (UINT64) Subsection->PtesInSubsection) {
  1584. PteOffset -= Subsection->PtesInSubsection;
  1585. Subsection = Subsection->NextSubsection;
  1586. if (Subsection == NULL) {
  1587. //
  1588. // The offset must be equal to the size of
  1589. // the section, don't purge anything just return.
  1590. //
  1591. UNLOCK_PFN (OldIrql);
  1592. return TRUE;
  1593. }
  1594. }
  1595. ASSERT (PteOffset < (UINT64) Subsection->PtesInSubsection);
  1596. }
  1597. //
  1598. // Locate the address of the last prototype PTE to be flushed.
  1599. //
  1600. if (RegionSize == 0) {
  1601. //
  1602. // Flush to end of section.
  1603. //
  1604. LastSubsection = Subsection;
  1605. Segment = (PMAPPED_FILE_SEGMENT) ControlArea->Segment;
  1606. if (MiIsAddressValid (Segment, TRUE)) {
  1607. if (Segment->LastSubsectionHint != NULL) {
  1608. LastSubsection = (PSUBSECTION) Segment->LastSubsectionHint;
  1609. }
  1610. }
  1611. while (LastSubsection->NextSubsection != NULL) {
  1612. LastSubsection = LastSubsection->NextSubsection;
  1613. }
  1614. LastPteOffset = LastSubsection->PtesInSubsection - 1;
  1615. }
  1616. else {
  1617. //
  1618. // Calculate the end of the region.
  1619. //
  1620. LastPteOffset = PteOffset +
  1621. (((RegionSize + BYTE_OFFSET(Offset->LowPart)) - 1) >> PAGE_SHIFT);
  1622. LastSubsection = Subsection;
  1623. while (LastPteOffset >= (UINT64) LastSubsection->PtesInSubsection) {
  1624. LastPteOffset -= LastSubsection->PtesInSubsection;
  1625. if (LastSubsection->NextSubsection == NULL) {
  1626. LastPteOffset = LastSubsection->PtesInSubsection - 1;
  1627. break;
  1628. }
  1629. LastSubsection = LastSubsection->NextSubsection;
  1630. }
  1631. ASSERT (LastPteOffset < (UINT64) LastSubsection->PtesInSubsection);
  1632. }
  1633. //
  1634. // Try for the fast reference on the first and last subsection.
  1635. // If that cannot be gotten, then there are no prototype PTEs for this
  1636. // subsection, therefore there is nothing in it to flush so leap forwards.
  1637. //
  1638. // Note that subsections in between do not need referencing as
  1639. // the purge is smart enough to skip them if they're nonresident.
  1640. //
  1641. if (MiReferenceSubsection ((PMSUBSECTION)Subsection) == FALSE) {
  1642. do {
  1643. //
  1644. // If this increment would put us past the end offset, then nothing
  1645. // to flush, just return success.
  1646. //
  1647. if (Subsection == LastSubsection) {
  1648. UNLOCK_PFN (OldIrql);
  1649. return TRUE;
  1650. }
  1651. Subsection = Subsection->NextSubsection;
  1652. //
  1653. // If this increment put us past the end of section, then nothing
  1654. // to flush, just return success.
  1655. //
  1656. if (Subsection == NULL) {
  1657. UNLOCK_PFN (OldIrql);
  1658. return TRUE;
  1659. }
  1660. if (MiReferenceSubsection ((PMSUBSECTION)Subsection) == FALSE) {
  1661. continue;
  1662. }
  1663. //
  1664. // Start the flush at this subsection which is now referenced.
  1665. //
  1666. PointerPte = &Subsection->SubsectionBase[0];
  1667. break;
  1668. } while (TRUE);
  1669. }
  1670. else {
  1671. PointerPte = &Subsection->SubsectionBase[PteOffset];
  1672. }
  1673. FirstSubsection = Subsection;
  1674. ASSERT (Subsection->SubsectionBase != NULL);
  1675. //
  1676. // The first subsection is referenced, now reference count the last one.
  1677. // If the first is the last, just double reference it anyway as it
  1678. // simplifies cleanup later.
  1679. //
  1680. if (MiReferenceSubsection ((PMSUBSECTION)LastSubsection) == FALSE) {
  1681. ASSERT (Subsection != LastSubsection);
  1682. TempSubsection = Subsection->NextSubsection;
  1683. LastSubsectionWithProtos = NULL;
  1684. while (TempSubsection != LastSubsection) {
  1685. //
  1686. // If this increment put us past the end of section, then nothing
  1687. // to flush, just return success.
  1688. //
  1689. ASSERT (TempSubsection != NULL);
  1690. if ((PMSUBSECTION)TempSubsection->SubsectionBase != NULL) {
  1691. LastSubsectionWithProtos = TempSubsection;
  1692. }
  1693. TempSubsection = TempSubsection->NextSubsection;
  1694. }
  1695. //
  1696. // End the flush at this subsection and reference it.
  1697. //
  1698. if (LastSubsectionWithProtos == NULL) {
  1699. ASSERT (Subsection != NULL);
  1700. ASSERT (Subsection->SubsectionBase != NULL);
  1701. TempSubsection = Subsection;
  1702. }
  1703. else {
  1704. TempSubsection = LastSubsectionWithProtos;
  1705. }
  1706. if (MiReferenceSubsection ((PMSUBSECTION)TempSubsection) == FALSE) {
  1707. ASSERT (FALSE);
  1708. }
  1709. ASSERT (TempSubsection->SubsectionBase != NULL);
  1710. LastSubsection = TempSubsection;
  1711. LastPteOffset = LastSubsection->PtesInSubsection - 1;
  1712. }
  1713. //
  1714. // End the flush at this subsection which is now referenced.
  1715. //
  1716. // Point final PTE to 1 beyond the end.
  1717. //
  1718. FinalPte = &LastSubsection->SubsectionBase[LastPteOffset + 1];
  1719. //
  1720. // Increment the number of mapped views to
  1721. // prevent the section from being deleted while the purge is
  1722. // in progress.
  1723. //
  1724. ControlArea->NumberOfMappedViews += 1;
  1725. //
  1726. // Set being purged so no one can map a view
  1727. // while the purge is going on.
  1728. //
  1729. ControlArea->u.Flags.BeingPurged = 1;
  1730. ControlArea->u.Flags.WasPurged = 1;
  1731. ReturnValue = TRUE;
  1732. for (;;) {
  1733. if (OldIrql == MM_NOIRQL) {
  1734. LOCK_PFN (OldIrql);
  1735. }
  1736. if (LastSubsection != Subsection) {
  1737. //
  1738. // Flush to the last PTE in this subsection.
  1739. //
  1740. LastPte = &Subsection->SubsectionBase[Subsection->PtesInSubsection];
  1741. }
  1742. else {
  1743. //
  1744. // Flush to the end of the range.
  1745. //
  1746. LastPte = FinalPte;
  1747. }
  1748. if (Subsection->SubsectionBase == NULL) {
  1749. //
  1750. // The prototype PTEs for this subsection have either never been
  1751. // created or have been tossed due to memory pressure. Either
  1752. // way, this range can be skipped as there are obviously no
  1753. // pages to purge in this range.
  1754. //
  1755. ASSERT (OldIrql != MM_NOIRQL);
  1756. UNLOCK_PFN (OldIrql);
  1757. OldIrql = MM_NOIRQL;
  1758. goto nextrange;
  1759. }
  1760. //
  1761. // Up the number of mapped views to prevent other threads
  1762. // from freeing this to the unused subsection list while we're
  1763. // operating on it.
  1764. //
  1765. MappedSubsection = (PMSUBSECTION) Subsection;
  1766. MappedSubsection->NumberOfMappedViews += 1;
  1767. if (MappedSubsection->DereferenceList.Flink != NULL) {
  1768. //
  1769. // Remove this from the list of unused subsections.
  1770. //
  1771. RemoveEntryList (&MappedSubsection->DereferenceList);
  1772. MI_UNUSED_SUBSECTIONS_COUNT_REMOVE (MappedSubsection);
  1773. MappedSubsection->DereferenceList.Flink = NULL;
  1774. }
  1775. //
  1776. // Set the access bit so an already ongoing trim won't blindly
  1777. // delete the prototype PTEs on completion of a mapped write.
  1778. // This can happen if the current thread dirties some pages and
  1779. // then deletes the view before the trim write finishes - this
  1780. // bit informs the trimming thread that a rescan is needed so
  1781. // that writes are not lost.
  1782. //
  1783. MappedSubsection->u2.SubsectionFlags2.SubsectionAccessed = 1;
  1784. //
  1785. // If the page table page containing the PTEs is not
  1786. // resident, then no PTEs can be in the valid or transition
  1787. // state! Skip over the PTEs.
  1788. //
  1789. if (!MiCheckProtoPtePageState(PointerPte, OldIrql, &DroppedPfnLock)) {
  1790. PointerPte = (PMMPTE)(((ULONG_PTR)PointerPte | (PAGE_SIZE - 1)) + 1);
  1791. }
  1792. while (PointerPte < LastPte) {
  1793. //
  1794. // If the page table page containing the PTEs is not
  1795. // resident, then no PTEs can be in the valid or transition
  1796. // state! Skip over the PTEs.
  1797. //
  1798. if (MiIsPteOnPdeBoundary(PointerPte)) {
  1799. if (!MiCheckProtoPtePageState(PointerPte, OldIrql, &DroppedPfnLock)) {
  1800. PointerPte = (PMMPTE)((PCHAR)PointerPte + PAGE_SIZE);
  1801. continue;
  1802. }
  1803. }
  1804. PteContents = *PointerPte;
  1805. if (PteContents.u.Hard.Valid == 1) {
  1806. //
  1807. // A valid PTE was found, it must be mapped in the
  1808. // system cache. Just exit the loop and return FALSE
  1809. // and let the caller fix this.
  1810. //
  1811. ReturnValue = FALSE;
  1812. break;
  1813. }
  1814. if ((PteContents.u.Soft.Prototype == 0) &&
  1815. (PteContents.u.Soft.Transition == 1)) {
  1816. if (OldIrql == MM_NOIRQL) {
  1817. PointerPde = MiGetPteAddress (PointerPte);
  1818. LOCK_PFN (OldIrql);
  1819. if (PointerPde->u.Hard.Valid == 0) {
  1820. MiMakeSystemAddressValidPfn (PointerPte, OldIrql);
  1821. }
  1822. continue;
  1823. }
  1824. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE(&PteContents);
  1825. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1826. if ((Pfn1->OriginalPte.u.Soft.Prototype != 1) ||
  1827. (Pfn1->OriginalPte.u.Hard.Valid != 0) ||
  1828. (Pfn1->PteAddress != PointerPte)) {
  1829. //
  1830. // The pool containing the prototype PTEs has been
  1831. // corrupted. Pool corruption like this is fatal.
  1832. //
  1833. KeBugCheckEx (POOL_CORRUPTION_IN_FILE_AREA,
  1834. 0x2,
  1835. (ULONG_PTR)PointerPte,
  1836. (ULONG_PTR)Pfn1->PteAddress,
  1837. (ULONG_PTR)PteContents.u.Long);
  1838. }
  1839. #if DBG
  1840. if ((Pfn1->u3.e2.ReferenceCount != 0) &&
  1841. (Pfn1->u3.e1.WriteInProgress == 0)) {
  1842. //
  1843. // There must be an I/O in progress on this page.
  1844. //
  1845. if (MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE(&PteContents) != LastLocked) {
  1846. UNLOCK_PFN (OldIrql);
  1847. LastLocked = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&PteContents);
  1848. PointerPde = MiGetPteAddress (PointerPte);
  1849. LOCK_PFN (OldIrql);
  1850. if (PointerPde->u.Hard.Valid == 0) {
  1851. MiMakeSystemAddressValidPfn (PointerPte, OldIrql);
  1852. }
  1853. continue;
  1854. }
  1855. }
  1856. #endif //DBG
  1857. //
  1858. // If the modified page writer has page locked for I/O
  1859. // wait for the I/O's to be completed and the pages
  1860. // to be unlocked. The eliminates a race condition
  1861. // when the modified page writer locks the pages, then
  1862. // a purge occurs and completes before the mapped
  1863. // writer thread runs.
  1864. //
  1865. if (Pfn1->u3.e1.WriteInProgress == 1) {
  1866. //
  1867. // A 3 or more thread deadlock can occur where:
  1868. //
  1869. // 1. The mapped page writer thread has issued a write
  1870. // and is in the filesystem code waiting for a resource.
  1871. //
  1872. // 2. Thread 2 owns the resource above but is waiting for
  1873. // the filesystem's quota mutex.
  1874. //
  1875. // 3. Thread 3 owns the quota mutex and is right here
  1876. // doing a purge from the cache manager when he notices
  1877. // the page to be purged is either already being written
  1878. // or is in the mapped page writer list. If it is
  1879. // already being written everything will unjam. If it
  1880. // is still on the mapped page writer list awaiting
  1881. // processing, then it must be cancelled - otherwise
  1882. // if this thread were to wait, deadlock can occur.
  1883. //
  1884. // The alternative to all this is for the filesystems to
  1885. // always release the quota mutex before purging but the
  1886. // filesystem overhead to do this is substantial.
  1887. //
  1888. if (MiCancelWriteOfMappedPfn (PageFrameIndex, OldIrql) == TRUE) {
  1889. //
  1890. // Stopping any failed writes (even deliberately
  1891. // cancelled ones) automatically cause a delay. A
  1892. // successful stop also results in the PFN lock
  1893. // being released and reacquired. So loop back to
  1894. // the top now as the world may have changed.
  1895. //
  1896. if (MiGetPteAddress (PointerPte)->u.Hard.Valid == 0) {
  1897. MiMakeSystemAddressValidPfn (PointerPte, OldIrql);
  1898. }
  1899. continue;
  1900. }
  1901. ASSERT (ControlArea->ModifiedWriteCount != 0);
  1902. ASSERT (Pfn1->u3.e2.ReferenceCount != 0);
  1903. ControlArea->u.Flags.SetMappedFileIoComplete = 1;
  1904. //
  1905. // Keep APCs blocked so no special APCs can be delivered
  1906. // in KeWait which would cause the dispatcher lock to be
  1907. // released opening a window where this thread could miss
  1908. // a pulse.
  1909. //
  1910. UNLOCK_PFN_AND_THEN_WAIT (APC_LEVEL);
  1911. KeWaitForSingleObject (&MmMappedFileIoComplete,
  1912. WrPageOut,
  1913. KernelMode,
  1914. FALSE,
  1915. NULL);
  1916. KeLowerIrql (OldIrql);
  1917. PointerPde = MiGetPteAddress (PointerPte);
  1918. LOCK_PFN (OldIrql);
  1919. if (PointerPde->u.Hard.Valid == 0) {
  1920. MiMakeSystemAddressValidPfn (PointerPte, OldIrql);
  1921. }
  1922. continue;
  1923. }
  1924. if (Pfn1->u3.e1.ReadInProgress == 1) {
  1925. //
  1926. // The page currently is being read in from the
  1927. // disk. Treat this just like a valid PTE and
  1928. // return false.
  1929. //
  1930. ReturnValue = FALSE;
  1931. break;
  1932. }
  1933. ASSERT (!((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
  1934. (Pfn1->OriginalPte.u.Soft.Transition == 1)));
  1935. MI_WRITE_INVALID_PTE (PointerPte, Pfn1->OriginalPte);
  1936. ASSERT (Pfn1->OriginalPte.u.Hard.Valid == 0);
  1937. ControlArea->NumberOfPfnReferences -= 1;
  1938. ASSERT ((LONG)ControlArea->NumberOfPfnReferences >= 0);
  1939. MiUnlinkPageFromList (Pfn1);
  1940. MI_SET_PFN_DELETED (Pfn1);
  1941. PageTableFrameIndex = Pfn1->u4.PteFrame;
  1942. Pfn2 = MI_PFN_ELEMENT (PageTableFrameIndex);
  1943. MiDecrementShareCountInline (Pfn2, PageTableFrameIndex);
  1944. //
  1945. // If the reference count for the page is zero, insert
  1946. // it into the free page list, otherwise leave it alone
  1947. // and when the reference count is decremented to zero
  1948. // the page will go to the free list.
  1949. //
  1950. if (Pfn1->u3.e2.ReferenceCount == 0) {
  1951. MiReleasePageFileSpace (Pfn1->OriginalPte);
  1952. MiInsertPageInFreeList (MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&PteContents));
  1953. }
  1954. }
  1955. PointerPte += 1;
  1956. if ((MiIsPteOnPdeBoundary(PointerPte)) && (OldIrql != MM_NOIRQL)) {
  1957. //
  1958. // Unlock PFN so large requests will not block other
  1959. // threads on MP systems.
  1960. //
  1961. UNLOCK_PFN (OldIrql);
  1962. OldIrql = MM_NOIRQL;
  1963. }
  1964. }
  1965. if (OldIrql == MM_NOIRQL) {
  1966. LOCK_PFN (OldIrql);
  1967. }
  1968. ASSERT (MappedSubsection->DereferenceList.Flink == NULL);
  1969. ASSERT (((LONG_PTR)MappedSubsection->NumberOfMappedViews >= 1) ||
  1970. (MappedSubsection->u.SubsectionFlags.SubsectionStatic == 1));
  1971. MappedSubsection->NumberOfMappedViews -= 1;
  1972. if ((MappedSubsection->NumberOfMappedViews == 0) &&
  1973. (MappedSubsection->u.SubsectionFlags.SubsectionStatic == 0)) {
  1974. //
  1975. // Insert this subsection into the unused subsection list.
  1976. //
  1977. InsertTailList (&MmUnusedSubsectionList,
  1978. &MappedSubsection->DereferenceList);
  1979. MI_UNUSED_SUBSECTIONS_COUNT_INSERT (MappedSubsection);
  1980. }
  1981. ASSERT (OldIrql != MM_NOIRQL);
  1982. UNLOCK_PFN (OldIrql);
  1983. OldIrql = MM_NOIRQL;
  1984. nextrange:
  1985. if ((LastSubsection != Subsection) && (ReturnValue)) {
  1986. //
  1987. // Get the next subsection in the list.
  1988. //
  1989. Subsection = Subsection->NextSubsection;
  1990. PointerPte = Subsection->SubsectionBase;
  1991. }
  1992. else {
  1993. //
  1994. // The last range has been flushed, exit the top FOR loop
  1995. // and return.
  1996. //
  1997. break;
  1998. }
  1999. }
  2000. if (OldIrql == MM_NOIRQL) {
  2001. LOCK_PFN (OldIrql);
  2002. }
  2003. MiDecrementSubsections (FirstSubsection, FirstSubsection);
  2004. MiDecrementSubsections (LastSubsection, LastSubsection);
  2005. ASSERT ((LONG)ControlArea->NumberOfMappedViews >= 1);
  2006. ControlArea->NumberOfMappedViews -= 1;
  2007. ControlArea->u.Flags.BeingPurged = 0;
  2008. //
  2009. // Check to see if the control area should be deleted. This
  2010. // will release the PFN lock.
  2011. //
  2012. MiCheckControlArea (ControlArea, NULL, OldIrql);
  2013. return ReturnValue;
  2014. }
  2015. BOOLEAN
  2016. MmFlushImageSection (
  2017. IN PSECTION_OBJECT_POINTERS SectionPointer,
  2018. IN MMFLUSH_TYPE FlushType
  2019. )
  2020. /*++
  2021. Routine Description:
  2022. This function determines if any views of the specified image section
  2023. are mapped, and if not, flushes valid pages (even modified ones)
  2024. from the specified section and returns any used pages to the free
  2025. list. This is accomplished by examining the prototype PTEs
  2026. from the specified offset to the end of the section, and if
  2027. any prototype PTEs are in the transition state, putting the
  2028. prototype PTE back into its original state and putting the
  2029. physical page on the free list.
  2030. Arguments:
  2031. SectionPointer - Supplies a pointer to a section object pointers
  2032. within the FCB.
  2033. FlushType - Supplies the type of flush to check for. One of
  2034. MmFlushForDelete or MmFlushForWrite.
  2035. Return Value:
  2036. Returns TRUE if either no section exists for the file object or
  2037. the section is not mapped and the purge was done, FALSE otherwise.
  2038. --*/
  2039. {
  2040. PLIST_ENTRY Next;
  2041. PCONTROL_AREA ControlArea;
  2042. PLARGE_CONTROL_AREA LargeControlArea;
  2043. KIRQL OldIrql;
  2044. LOGICAL state;
  2045. if (FlushType == MmFlushForDelete) {
  2046. //
  2047. // Do a quick check to see if there are any mapped views for
  2048. // the data section. If so, just return FALSE.
  2049. //
  2050. LOCK_PFN (OldIrql);
  2051. ControlArea = (PCONTROL_AREA)(SectionPointer->DataSectionObject);
  2052. if (ControlArea != NULL) {
  2053. if ((ControlArea->NumberOfUserReferences != 0) ||
  2054. (ControlArea->u.Flags.BeingCreated)) {
  2055. UNLOCK_PFN (OldIrql);
  2056. return FALSE;
  2057. }
  2058. }
  2059. UNLOCK_PFN (OldIrql);
  2060. }
  2061. //
  2062. // Check the status of the control area. If the control area is in use
  2063. // or the control area is being deleted, this operation cannot continue.
  2064. //
  2065. state = MiCheckControlAreaStatus (CheckImageSection,
  2066. SectionPointer,
  2067. FALSE,
  2068. &ControlArea,
  2069. &OldIrql);
  2070. if (ControlArea == NULL) {
  2071. return (BOOLEAN) state;
  2072. }
  2073. //
  2074. // PFN LOCK IS NOW HELD!
  2075. //
  2076. //
  2077. // Repeat until there are no more control areas - multiple control areas
  2078. // for the same image section occur to support user global DLLs - these DLLs
  2079. // require data that is shared within a session but not across sessions.
  2080. // Note this can only happen for Hydra.
  2081. //
  2082. do {
  2083. //
  2084. // Set the being deleted flag and up the number of mapped views
  2085. // for the segment. Upping the number of mapped views prevents
  2086. // the segment from being deleted and passed to the deletion thread
  2087. // while we are forcing a delete.
  2088. //
  2089. ControlArea->u.Flags.BeingDeleted = 1;
  2090. ControlArea->NumberOfMappedViews = 1;
  2091. LargeControlArea = NULL;
  2092. if (ControlArea->u.Flags.GlobalOnlyPerSession == 0) {
  2093. NOTHING;
  2094. }
  2095. else if (IsListEmpty(&((PLARGE_CONTROL_AREA)ControlArea)->UserGlobalList)) {
  2096. ASSERT (ControlArea ==
  2097. (PCONTROL_AREA)SectionPointer->ImageSectionObject);
  2098. }
  2099. else {
  2100. //
  2101. // Check if there's only one image section in this control area, so
  2102. // we don't reference the section object pointers as the
  2103. // MiCleanSection call may result in its deletion.
  2104. //
  2105. //
  2106. // There are multiple control areas, bump the reference count
  2107. // on one of them (doesn't matter which one) so that it can't
  2108. // go away. This ensures the section object pointers will stick
  2109. // around even after the calls below so we can safely reloop to
  2110. // flush any other remaining control areas.
  2111. //
  2112. ASSERT (ControlArea->u.Flags.GlobalOnlyPerSession == 1);
  2113. Next = ((PLARGE_CONTROL_AREA)ControlArea)->UserGlobalList.Flink;
  2114. LargeControlArea = CONTAINING_RECORD (Next,
  2115. LARGE_CONTROL_AREA,
  2116. UserGlobalList);
  2117. ASSERT (LargeControlArea->u.Flags.GlobalOnlyPerSession == 1);
  2118. LargeControlArea->NumberOfSectionReferences += 1;
  2119. }
  2120. //
  2121. // This is a page file backed or image segment. The segment is being
  2122. // deleted, remove all references to the paging file and physical
  2123. // memory.
  2124. //
  2125. UNLOCK_PFN (OldIrql);
  2126. MiCleanSection (ControlArea, TRUE);
  2127. //
  2128. // Get the next Hydra control area.
  2129. //
  2130. if (LargeControlArea != NULL) {
  2131. state = MiCheckControlAreaStatus (CheckImageSection,
  2132. SectionPointer,
  2133. FALSE,
  2134. &ControlArea,
  2135. &OldIrql);
  2136. if (!ControlArea) {
  2137. LOCK_PFN (OldIrql);
  2138. LargeControlArea->NumberOfSectionReferences -= 1;
  2139. MiCheckControlArea ((PCONTROL_AREA)LargeControlArea,
  2140. NULL,
  2141. OldIrql);
  2142. }
  2143. else {
  2144. LargeControlArea->NumberOfSectionReferences -= 1;
  2145. MiCheckControlArea ((PCONTROL_AREA)LargeControlArea,
  2146. NULL,
  2147. OldIrql);
  2148. LOCK_PFN (OldIrql);
  2149. }
  2150. }
  2151. else {
  2152. state = TRUE;
  2153. break;
  2154. }
  2155. } while (ControlArea);
  2156. return (BOOLEAN) state;
  2157. }
  2158. VOID
  2159. MiFlushDirtyBitsToPfn (
  2160. IN PMMPTE PointerPte,
  2161. IN PMMPTE LastPte,
  2162. IN PEPROCESS Process,
  2163. IN BOOLEAN SystemCache
  2164. )
  2165. {
  2166. KIRQL OldIrql;
  2167. MMPTE PteContents;
  2168. PMMPFN Pfn1;
  2169. PVOID Va;
  2170. PMMPTE PointerPde;
  2171. PMMPTE PointerPpe;
  2172. PMMPTE PointerPxe;
  2173. ULONG Waited;
  2174. MMPTE_FLUSH_LIST PteFlushList;
  2175. PteFlushList.Count = 0;
  2176. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  2177. LOCK_PFN (OldIrql);
  2178. while (PointerPte <= LastPte) {
  2179. PteContents = *PointerPte;
  2180. if ((PteContents.u.Hard.Valid == 1) &&
  2181. (MI_IS_PTE_DIRTY (PteContents))) {
  2182. //
  2183. // Flush the modify bit to the PFN database.
  2184. //
  2185. Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
  2186. MI_SET_MODIFIED (Pfn1, 1, 0x2);
  2187. MI_SET_PTE_CLEAN (PteContents);
  2188. //
  2189. // No need to capture the PTE contents as we are going to
  2190. // write the page anyway and the Modify bit will be cleared
  2191. // before the write is done.
  2192. //
  2193. MI_WRITE_VALID_PTE_NEW_PROTECTION (PointerPte, PteContents);
  2194. if (PteFlushList.Count < MM_MAXIMUM_FLUSH_COUNT) {
  2195. PteFlushList.FlushVa[PteFlushList.Count] = Va;
  2196. PteFlushList.Count += 1;
  2197. }
  2198. }
  2199. Va = (PVOID)((PCHAR)Va + PAGE_SIZE);
  2200. PointerPte += 1;
  2201. if (MiIsPteOnPdeBoundary (PointerPte)) {
  2202. if (PteFlushList.Count != 0) {
  2203. MiFlushPteList (&PteFlushList, SystemCache);
  2204. PteFlushList.Count = 0;
  2205. }
  2206. PointerPde = MiGetPteAddress (PointerPte);
  2207. while (PointerPte <= LastPte) {
  2208. PointerPxe = MiGetPdeAddress (PointerPde);
  2209. PointerPpe = MiGetPteAddress (PointerPde);
  2210. if (!MiDoesPxeExistAndMakeValid (PointerPxe,
  2211. Process,
  2212. OldIrql,
  2213. &Waited)) {
  2214. //
  2215. // No page directory parent page exists for this address.
  2216. //
  2217. PointerPxe += 1;
  2218. PointerPpe = MiGetVirtualAddressMappedByPte (PointerPxe);
  2219. PointerPde = MiGetVirtualAddressMappedByPte (PointerPpe);
  2220. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  2221. }
  2222. else if (!MiDoesPpeExistAndMakeValid (PointerPpe,
  2223. Process,
  2224. OldIrql,
  2225. &Waited)) {
  2226. //
  2227. // No page directory page exists for this address.
  2228. //
  2229. PointerPpe += 1;
  2230. PointerPde = MiGetVirtualAddressMappedByPte (PointerPpe);
  2231. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  2232. }
  2233. else {
  2234. Waited = 0;
  2235. if (!MiDoesPdeExistAndMakeValid (PointerPde,
  2236. Process,
  2237. OldIrql,
  2238. &Waited)) {
  2239. //
  2240. // No page table page exists for this address.
  2241. //
  2242. PointerPde += 1;
  2243. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  2244. }
  2245. else {
  2246. //
  2247. // If the PFN lock (and accordingly the WS mutex) was
  2248. // released and reacquired we must retry the operation.
  2249. //
  2250. if (Waited != 0) {
  2251. continue;
  2252. }
  2253. //
  2254. // The PFN lock has been held since we acquired the
  2255. // page directory parent, ie: this PTE we can operate on
  2256. // immediately.
  2257. //
  2258. break;
  2259. }
  2260. }
  2261. }
  2262. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  2263. }
  2264. }
  2265. if (PteFlushList.Count != 0) {
  2266. MiFlushPteList (&PteFlushList, SystemCache);
  2267. }
  2268. UNLOCK_PFN (OldIrql);
  2269. return;
  2270. }
  2271. PSUBSECTION
  2272. MiGetSystemCacheSubsection (
  2273. IN PVOID BaseAddress,
  2274. OUT PMMPTE *ProtoPte
  2275. )
  2276. {
  2277. PMMPTE PointerPte;
  2278. PSUBSECTION Subsection;
  2279. PointerPte = MiGetPteAddress (BaseAddress);
  2280. Subsection = MiGetSubsectionAndProtoFromPte (PointerPte, ProtoPte);
  2281. return Subsection;
  2282. }
  2283. LOGICAL
  2284. MiCheckProtoPtePageState (
  2285. IN PMMPTE PrototypePte,
  2286. IN KIRQL OldIrql,
  2287. OUT PLOGICAL DroppedPfnLock
  2288. )
  2289. /*++
  2290. Routine Description:
  2291. Checks the state of the page containing the specified prototype PTE.
  2292. If the page is valid or transition and has transition or valid prototype
  2293. PTEs contained with it, TRUE is returned and the page is made valid
  2294. (if transition). Otherwise return FALSE indicating no prototype
  2295. PTEs within this page are of interest.
  2296. Arguments:
  2297. PrototypePte - Supplies a pointer to a prototype PTE within the page.
  2298. OldIrql - Supplies the IRQL the caller acquired the PFN lock at or MM_NOIRQL
  2299. if the caller does not hold the PFN lock.
  2300. DroppedPfnLock - Supplies a pointer to a logical this routine sets to
  2301. TRUE if the PFN lock is released & reacquired.
  2302. Return Value:
  2303. TRUE if the page containing the proto PTE was made resident.
  2304. FALSE otherwise.
  2305. --*/
  2306. {
  2307. PMMPTE PointerPte;
  2308. MMPTE PteContents;
  2309. PFN_NUMBER PageFrameIndex;
  2310. PMMPFN Pfn;
  2311. *DroppedPfnLock = FALSE;
  2312. #if (_MI_PAGING_LEVELS >= 3)
  2313. //
  2314. // First check whether the page directory page is present. Since there
  2315. // is no lazy loading of PPEs, the validity check alone is sufficient.
  2316. //
  2317. PointerPte = MiGetPdeAddress (PrototypePte);
  2318. PteContents = *PointerPte;
  2319. if (PteContents.u.Hard.Valid == 0) {
  2320. return FALSE;
  2321. }
  2322. #endif
  2323. PointerPte = MiGetPteAddress (PrototypePte);
  2324. #if (_MI_PAGING_LEVELS < 3)
  2325. if (PointerPte->u.Hard.Valid == 0) {
  2326. MiCheckPdeForPagedPool (PrototypePte);
  2327. }
  2328. #endif
  2329. PteContents = *PointerPte;
  2330. if (PteContents.u.Hard.Valid == 1) {
  2331. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents);
  2332. Pfn = MI_PFN_ELEMENT (PageFrameIndex);
  2333. if (Pfn->u2.ShareCount != 1) {
  2334. return TRUE;
  2335. }
  2336. }
  2337. else if ((PteContents.u.Soft.Prototype == 0) &&
  2338. (PteContents.u.Soft.Transition == 1)) {
  2339. //
  2340. // Transition, if on standby or modified, return FALSE.
  2341. //
  2342. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&PteContents);
  2343. Pfn = MI_PFN_ELEMENT (PageFrameIndex);
  2344. if (Pfn->u3.e1.PageLocation >= ActiveAndValid) {
  2345. if (OldIrql != MM_NOIRQL) {
  2346. MiMakeSystemAddressValidPfn (PrototypePte, OldIrql);
  2347. *DroppedPfnLock = TRUE;
  2348. }
  2349. return TRUE;
  2350. }
  2351. }
  2352. //
  2353. // Page is not resident or is on standby / modified list.
  2354. //
  2355. return FALSE;
  2356. }