Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2034 lines
64 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. mmfault.c
  5. Abstract:
  6. This module contains the handlers for access check, page faults
  7. and write faults.
  8. Author:
  9. Lou Perazzoli (loup) 6-Apr-1989
  10. Landy Wang (landyw) 02-June-1997
  11. Revision History:
  12. --*/
  13. #include "mi.h"
  14. #define PROCESS_FOREGROUND_PRIORITY (9)
  15. LONG MiDelayPageFaults;
  16. #if DBG
  17. ULONG MmProtoPteVadLookups = 0;
  18. ULONG MmProtoPteDirect = 0;
  19. ULONG MmAutoEvaluate = 0;
  20. PMMPTE MmPteHit = NULL;
  21. extern PVOID PsNtosImageEnd;
  22. ULONG MmInjectUserInpageErrors;
  23. ULONG MmInjectedUserInpageErrors;
  24. ULONG MmInpageFraction = 0x1F; // Fail 1 out of every 32 inpages.
  25. #define MI_INPAGE_BACKTRACE_LENGTH 6
  26. typedef struct _MI_INPAGE_TRACES {
  27. PVOID InstructionPointer;
  28. PETHREAD Thread;
  29. PVOID StackTrace [MI_INPAGE_BACKTRACE_LENGTH];
  30. } MI_INPAGE_TRACES, *PMI_INPAGE_TRACES;
  31. #define MI_INPAGE_TRACE_SIZE 64
  32. LONG MiInpageIndex;
  33. MI_INPAGE_TRACES MiInpageTraces[MI_INPAGE_TRACE_SIZE];
  34. VOID
  35. FORCEINLINE
  36. MiSnapInPageError (
  37. IN PVOID InstructionPointer
  38. )
  39. {
  40. PMI_INPAGE_TRACES Information;
  41. ULONG Index;
  42. ULONG Hash;
  43. Index = InterlockedIncrement(&MiInpageIndex);
  44. Index &= (MI_INPAGE_TRACE_SIZE - 1);
  45. Information = &MiInpageTraces[Index];
  46. Information->InstructionPointer = InstructionPointer;
  47. Information->Thread = PsGetCurrentThread ();
  48. RtlZeroMemory (&Information->StackTrace[0], MI_INPAGE_BACKTRACE_LENGTH * sizeof(PVOID));
  49. RtlCaptureStackBackTrace (0, MI_INPAGE_BACKTRACE_LENGTH, Information->StackTrace, &Hash);
  50. }
  51. #endif
  52. NTSTATUS
  53. MmAccessFault (
  54. IN ULONG_PTR FaultStatus,
  55. IN PVOID VirtualAddress,
  56. IN KPROCESSOR_MODE PreviousMode,
  57. IN PVOID TrapInformation
  58. )
  59. /*++
  60. Routine Description:
  61. This function is called by the kernel on data or instruction
  62. access faults. The access fault was detected due to either
  63. an access violation, a PTE with the present bit clear, or a
  64. valid PTE with the dirty bit clear and a write operation.
  65. Also note that the access violation and the page fault could
  66. occur because of the Page Directory Entry contents as well.
  67. This routine determines what type of fault it is and calls
  68. the appropriate routine to handle the page fault or the write
  69. fault.
  70. Arguments:
  71. FaultStatus - Supplies fault status information bits.
  72. VirtualAddress - Supplies the virtual address which caused the fault.
  73. PreviousMode - Supplies the mode (kernel or user) in which the fault
  74. occurred.
  75. TrapInformation - Opaque information about the trap, interpreted by the
  76. kernel, not Mm. Needed to allow fast interlocked access
  77. to operate correctly.
  78. Return Value:
  79. Returns the status of the fault handling operation. Can be one of:
  80. - Success.
  81. - Access Violation.
  82. - Guard Page Violation.
  83. - In-page Error.
  84. Environment:
  85. Kernel mode, APCs disabled.
  86. --*/
  87. {
  88. ULONG ProtoProtect;
  89. PMMPTE PointerPxe;
  90. PMMPTE PointerPpe;
  91. PMMPTE PointerPde;
  92. PMMPTE PointerPte;
  93. PMMPTE PointerProtoPte;
  94. ULONG ProtectionCode;
  95. MMPTE TempPte;
  96. PEPROCESS CurrentProcess;
  97. KIRQL PreviousIrql;
  98. NTSTATUS status;
  99. ULONG ProtectCode;
  100. PFN_NUMBER PageFrameIndex;
  101. WSLE_NUMBER WorkingSetIndex;
  102. KIRQL OldIrql;
  103. PMMPFN Pfn1;
  104. PPAGE_FAULT_NOTIFY_ROUTINE NotifyRoutine;
  105. PEPROCESS FaultProcess;
  106. PMMSUPPORT Ws;
  107. LOGICAL SessionAddress;
  108. PVOID UsedPageTableHandle;
  109. ULONG BarrierStamp;
  110. LOGICAL ApcNeeded;
  111. LOGICAL RecheckAccess;
  112. #if (_MI_PAGING_LEVELS < 3)
  113. NTSTATUS SessionStatus;
  114. #endif
  115. PointerProtoPte = NULL;
  116. //
  117. // If the address is not canonical then return FALSE as the caller (which
  118. // may be the kernel debugger) is not expecting to get an unimplemented
  119. // address bit fault.
  120. //
  121. if (MI_RESERVED_BITS_CANONICAL(VirtualAddress) == FALSE) {
  122. if (PreviousMode == UserMode) {
  123. return STATUS_ACCESS_VIOLATION;
  124. }
  125. if (KeInvalidAccessAllowed(TrapInformation) == TRUE) {
  126. return STATUS_ACCESS_VIOLATION;
  127. }
  128. KeBugCheckEx (PAGE_FAULT_IN_NONPAGED_AREA,
  129. (ULONG_PTR)VirtualAddress,
  130. FaultStatus,
  131. (ULONG_PTR)TrapInformation,
  132. 4);
  133. }
  134. //
  135. // Block APCs and acquire the working set mutex. This prevents any
  136. // changes to the address space and it prevents valid PTEs from becoming
  137. // invalid.
  138. //
  139. CurrentProcess = PsGetCurrentProcess ();
  140. #if DBG
  141. if (MmDebug & MM_DBG_SHOW_FAULTS) {
  142. PETHREAD CurThread;
  143. CurThread = PsGetCurrentThread();
  144. DbgPrint("MM:**access fault - va %p process %p thread %p\n",
  145. VirtualAddress, CurrentProcess, CurThread);
  146. }
  147. #endif //DBG
  148. PreviousIrql = KeGetCurrentIrql ();
  149. //
  150. // Get the pointer to the PDE and the PTE for this page.
  151. //
  152. PointerPte = MiGetPteAddress (VirtualAddress);
  153. PointerPde = MiGetPdeAddress (VirtualAddress);
  154. PointerPpe = MiGetPpeAddress (VirtualAddress);
  155. PointerPxe = MiGetPxeAddress (VirtualAddress);
  156. #if DBG
  157. if (PointerPte == MmPteHit) {
  158. DbgPrint("MM: PTE hit at %p\n", MmPteHit);
  159. DbgBreakPoint();
  160. }
  161. #endif
  162. ApcNeeded = FALSE;
  163. if (PreviousIrql > APC_LEVEL) {
  164. //
  165. // The PFN database lock is an executive spin-lock. The pager could
  166. // get dirty faults or lock faults while servicing and it already owns
  167. // the PFN database lock.
  168. //
  169. #if (_MI_PAGING_LEVELS < 3)
  170. MiCheckPdeForPagedPool (VirtualAddress);
  171. if (PointerPde->u.Hard.Valid == 1) {
  172. if (PointerPde->u.Hard.LargePage == 1) {
  173. return STATUS_SUCCESS;
  174. }
  175. }
  176. #endif
  177. if (
  178. #if (_MI_PAGING_LEVELS >= 4)
  179. (PointerPxe->u.Hard.Valid == 0) ||
  180. #endif
  181. #if (_MI_PAGING_LEVELS >= 3)
  182. (PointerPpe->u.Hard.Valid == 0) ||
  183. #endif
  184. (PointerPde->u.Hard.Valid == 0) ||
  185. (PointerPte->u.Hard.Valid == 0)) {
  186. KdPrint(("MM:***PAGE FAULT AT IRQL > 1 Va %p, IRQL %lx\n",
  187. VirtualAddress,
  188. PreviousIrql));
  189. if (TrapInformation != NULL) {
  190. MI_DISPLAY_TRAP_INFORMATION (TrapInformation);
  191. }
  192. //
  193. // use reserved bit to signal fatal error to trap handlers
  194. //
  195. return STATUS_IN_PAGE_ERROR | 0x10000000;
  196. }
  197. if ((MI_FAULT_STATUS_INDICATES_WRITE(FaultStatus)) &&
  198. (PointerPte->u.Hard.CopyOnWrite != 0)) {
  199. KdPrint(("MM:***PAGE FAULT AT IRQL > 1 Va %p, IRQL %lx\n",
  200. VirtualAddress,
  201. PreviousIrql));
  202. if (TrapInformation != NULL) {
  203. MI_DISPLAY_TRAP_INFORMATION (TrapInformation);
  204. }
  205. //
  206. // use reserved bit to signal fatal error to trap handlers
  207. //
  208. return STATUS_IN_PAGE_ERROR | 0x10000000;
  209. }
  210. //
  211. // The PTE is valid and accessible, another thread must
  212. // have faulted the PTE in already, or the access bit
  213. // is clear and this is a access fault; Blindly set the
  214. // access bit and dismiss the fault.
  215. //
  216. #if DBG
  217. if (MmDebug & MM_DBG_SHOW_FAULTS) {
  218. DbgPrint("MM:no fault found - PTE is %p\n", PointerPte->u.Long);
  219. }
  220. #endif
  221. //
  222. // If PTE mappings with various protections are active and the faulting
  223. // address lies within these mappings, resolve the fault with
  224. // the appropriate protections.
  225. //
  226. if (!IsListEmpty (&MmProtectedPteList)) {
  227. if (MiCheckSystemPteProtection (
  228. MI_FAULT_STATUS_INDICATES_WRITE(FaultStatus),
  229. VirtualAddress) == TRUE) {
  230. return STATUS_SUCCESS;
  231. }
  232. }
  233. if (MI_FAULT_STATUS_INDICATES_WRITE(FaultStatus)) {
  234. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  235. if (((PointerPte->u.Long & MM_PTE_WRITE_MASK) == 0) &&
  236. ((Pfn1->OriginalPte.u.Soft.Protection & MM_READWRITE) == 0)) {
  237. KeBugCheckEx (ATTEMPTED_WRITE_TO_READONLY_MEMORY,
  238. (ULONG_PTR)VirtualAddress,
  239. (ULONG_PTR)PointerPte->u.Long,
  240. (ULONG_PTR)TrapInformation,
  241. 10);
  242. }
  243. }
  244. MI_NO_FAULT_FOUND (FaultStatus, PointerPte, VirtualAddress, FALSE);
  245. return STATUS_SUCCESS;
  246. }
  247. if (VirtualAddress >= MmSystemRangeStart) {
  248. //
  249. // This is a fault in the system address space. User
  250. // mode access is not allowed.
  251. //
  252. if (PreviousMode == UserMode) {
  253. return STATUS_ACCESS_VIOLATION;
  254. }
  255. #if (_MI_PAGING_LEVELS >= 4)
  256. if (PointerPxe->u.Hard.Valid == 0) {
  257. if (KeInvalidAccessAllowed(TrapInformation) == TRUE) {
  258. return STATUS_ACCESS_VIOLATION;
  259. }
  260. KeBugCheckEx (PAGE_FAULT_IN_NONPAGED_AREA,
  261. (ULONG_PTR)VirtualAddress,
  262. FaultStatus,
  263. (ULONG_PTR)TrapInformation,
  264. 7);
  265. }
  266. #endif
  267. #if (_MI_PAGING_LEVELS >= 3)
  268. if (PointerPpe->u.Hard.Valid == 0) {
  269. if (KeInvalidAccessAllowed(TrapInformation) == TRUE) {
  270. return STATUS_ACCESS_VIOLATION;
  271. }
  272. KeBugCheckEx (PAGE_FAULT_IN_NONPAGED_AREA,
  273. (ULONG_PTR)VirtualAddress,
  274. FaultStatus,
  275. (ULONG_PTR)TrapInformation,
  276. 5);
  277. }
  278. #endif
  279. RecheckPde:
  280. if (PointerPde->u.Hard.Valid == 1) {
  281. #ifdef _X86_
  282. if (PointerPde->u.Hard.LargePage == 1) {
  283. return STATUS_SUCCESS;
  284. }
  285. #endif //X86
  286. if (PointerPte->u.Hard.Valid == 1) {
  287. //
  288. // Session space faults cannot early exit here because
  289. // it may be a copy on write which must be checked for
  290. // and handled below.
  291. //
  292. if (MI_IS_SESSION_ADDRESS (VirtualAddress) == FALSE) {
  293. //
  294. // If PTE mappings with various protections are active
  295. // and the faulting address lies within these mappings,
  296. // resolve the fault with the appropriate protections.
  297. //
  298. if (!IsListEmpty (&MmProtectedPteList)) {
  299. if (MiCheckSystemPteProtection (
  300. MI_FAULT_STATUS_INDICATES_WRITE(FaultStatus),
  301. VirtualAddress) == TRUE) {
  302. return STATUS_SUCCESS;
  303. }
  304. }
  305. //
  306. // Acquire the PFN lock, check to see if the address is
  307. // still valid if writable, update dirty bit.
  308. //
  309. LOCK_PFN (OldIrql);
  310. TempPte = *(volatile MMPTE *)PointerPte;
  311. if (TempPte.u.Hard.Valid == 1) {
  312. Pfn1 = MI_PFN_ELEMENT (TempPte.u.Hard.PageFrameNumber);
  313. if ((MI_FAULT_STATUS_INDICATES_WRITE(FaultStatus)) &&
  314. ((TempPte.u.Long & MM_PTE_WRITE_MASK) == 0) &&
  315. ((Pfn1->OriginalPte.u.Soft.Protection & MM_READWRITE) == 0)) {
  316. KeBugCheckEx (ATTEMPTED_WRITE_TO_READONLY_MEMORY,
  317. (ULONG_PTR)VirtualAddress,
  318. (ULONG_PTR)TempPte.u.Long,
  319. (ULONG_PTR)TrapInformation,
  320. 11);
  321. }
  322. MI_NO_FAULT_FOUND (FaultStatus, PointerPte, VirtualAddress, TRUE);
  323. }
  324. UNLOCK_PFN (OldIrql);
  325. return STATUS_SUCCESS;
  326. }
  327. }
  328. #if (_MI_PAGING_LEVELS < 3)
  329. else {
  330. //
  331. // Handle trimmer references to paged pool PTEs where the PDE
  332. // might not be present. Only needed for
  333. // MmTrimAllSystemPagable memory.
  334. //
  335. MiCheckPdeForPagedPool (VirtualAddress);
  336. TempPte = *(volatile MMPTE *)PointerPte;
  337. if (TempPte.u.Hard.Valid == 1) {
  338. return STATUS_SUCCESS;
  339. }
  340. }
  341. #endif
  342. } else {
  343. //
  344. // Due to G-bits in kernel mode code, accesses to paged pool
  345. // PDEs may not fault even though the PDE is not valid. Make
  346. // sure the PDE is valid so PteFrames in the PFN database are
  347. // tracked properly.
  348. //
  349. #if (_MI_PAGING_LEVELS >= 3)
  350. if ((VirtualAddress >= (PVOID)PTE_BASE) && (VirtualAddress < (PVOID)MiGetPteAddress (HYPER_SPACE))) {
  351. //
  352. // This is a user mode PDE entry being faulted in by the Mm
  353. // referencing the page table page. This needs to be done
  354. // with the working set lock so that the PPE validity can be
  355. // relied on throughout the fault processing.
  356. //
  357. // The case when Mm faults in PPE entries by referencing the
  358. // page directory page is correctly handled by falling through
  359. // the below code.
  360. //
  361. goto UserFault;
  362. }
  363. #if defined (_MIALT4K_)
  364. if ((VirtualAddress >= (PVOID)ALT4KB_PERMISSION_TABLE_START) &&
  365. (VirtualAddress < (PVOID)ALT4KB_PERMISSION_TABLE_END)) {
  366. goto UserFault;
  367. }
  368. #endif
  369. #else
  370. MiCheckPdeForPagedPool (VirtualAddress);
  371. #endif
  372. if (PointerPde->u.Hard.Valid == 0) {
  373. if (KeInvalidAccessAllowed(TrapInformation) == TRUE) {
  374. return STATUS_ACCESS_VIOLATION;
  375. }
  376. KeBugCheckEx (PAGE_FAULT_IN_NONPAGED_AREA,
  377. (ULONG_PTR)VirtualAddress,
  378. FaultStatus,
  379. (ULONG_PTR)TrapInformation,
  380. 2);
  381. }
  382. //
  383. // Now that the PDE is valid, go look at the PTE again.
  384. //
  385. goto RecheckPde;
  386. }
  387. #if (_MI_PAGING_LEVELS < 3)
  388. //
  389. // First check to see if it's in the session space data
  390. // structures or page table pages.
  391. //
  392. SessionStatus = MiCheckPdeForSessionSpace (VirtualAddress);
  393. if (SessionStatus == STATUS_ACCESS_VIOLATION) {
  394. //
  395. // This thread faulted on a session space access, but this
  396. // process does not have one. This could be the system
  397. // process attempting to access a working buffer passed
  398. // to it from WIN32K or a driver loaded in session space
  399. // (video, printer, etc).
  400. //
  401. // The system process which contains the worker threads
  402. // NEVER has a session space - if code accidentally queues a
  403. // worker thread that points to a session space buffer, a
  404. // fault will occur. This must be bug checked since drivers
  405. // are responsible for making sure this never occurs.
  406. //
  407. // The only exception to this is when the working set manager
  408. // attaches to a session to age or trim it. However, the
  409. // working set manager will never fault and so the bugcheck
  410. // below is always valid. Note that a worker thread can get
  411. // away with a bad access if it happens while the working set
  412. // manager is attached, but there's really no way to prevent
  413. // this case which is a driver bug anyway.
  414. //
  415. if (KeInvalidAccessAllowed(TrapInformation) == TRUE) {
  416. return STATUS_ACCESS_VIOLATION;
  417. }
  418. KeBugCheckEx (PAGE_FAULT_IN_NONPAGED_AREA,
  419. (ULONG_PTR)VirtualAddress,
  420. FaultStatus,
  421. (ULONG_PTR)TrapInformation,
  422. 6);
  423. }
  424. #endif
  425. //
  426. // Fall though to further fault handling.
  427. //
  428. SessionAddress = MI_IS_SESSION_ADDRESS (VirtualAddress);
  429. if (SessionAddress == TRUE ||
  430. ((!MI_IS_PAGE_TABLE_ADDRESS(VirtualAddress)) &&
  431. (!MI_IS_HYPER_SPACE_ADDRESS(VirtualAddress)))) {
  432. if (SessionAddress == FALSE) {
  433. //
  434. // Acquire system working set lock. While this lock
  435. // is held, no pages may go from valid to invalid.
  436. //
  437. // HOWEVER - transition pages may go to valid, but
  438. // may not be added to the working set list. This
  439. // is done in the cache manager support routines to
  440. // shortcut faults on transition prototype PTEs.
  441. //
  442. PETHREAD Thread;
  443. Thread = PsGetCurrentThread();
  444. if (Thread == MmSystemLockOwner) {
  445. if (KeInvalidAccessAllowed(TrapInformation) == TRUE) {
  446. return STATUS_ACCESS_VIOLATION;
  447. }
  448. //
  449. // Recursively trying to acquire the system working set
  450. // fast mutex - cause an IRQL > 1 bug check.
  451. //
  452. return STATUS_IN_PAGE_ERROR | 0x10000000;
  453. }
  454. LOCK_SYSTEM_WS (PreviousIrql, Thread);
  455. }
  456. //
  457. // Note that for session space the below check is done without
  458. // acquiring the session WSL lock. This is because this thread
  459. // may already own it - ie: it may be adding a page to the
  460. // session space working set and the session's working set list is
  461. // not mapped in and causes a fault. The MiCheckPdeForSessionSpace
  462. // call above will fill in the PDE and then we must check the PTE
  463. // below - if that's not present then we couldn't possibly be
  464. // holding the session WSL lock, so we'll acquire it below.
  465. //
  466. #if defined (_X86PAE_)
  467. //
  468. // PAE PTEs are subject to write tearing due to the cache manager
  469. // shortcut routines that insert PTEs without acquiring the working
  470. // set lock. Synchronize here via the PFN lock.
  471. //
  472. LOCK_PFN (OldIrql);
  473. #endif
  474. TempPte = *PointerPte;
  475. #if defined (_X86PAE_)
  476. UNLOCK_PFN (OldIrql);
  477. #endif
  478. //
  479. // If the PTE is valid, make sure we do not have a copy on write.
  480. //
  481. if (TempPte.u.Hard.Valid != 0) {
  482. //
  483. // PTE is already valid, return. Unless it's Hydra where
  484. // kernel mode copy-on-write must be handled properly.
  485. //
  486. LOGICAL FaultHandled;
  487. //
  488. // If PTE mappings with various protections are active
  489. // and the faulting address lies within these mappings,
  490. // resolve the fault with the appropriate protections.
  491. //
  492. if (!IsListEmpty (&MmProtectedPteList)) {
  493. if (MiCheckSystemPteProtection (
  494. MI_FAULT_STATUS_INDICATES_WRITE(FaultStatus),
  495. VirtualAddress) == TRUE) {
  496. return STATUS_SUCCESS;
  497. }
  498. }
  499. FaultHandled = FALSE;
  500. LOCK_PFN (OldIrql);
  501. TempPte = *(volatile MMPTE *)PointerPte;
  502. if (TempPte.u.Hard.Valid == 1) {
  503. Pfn1 = MI_PFN_ELEMENT (TempPte.u.Hard.PageFrameNumber);
  504. if ((MI_FAULT_STATUS_INDICATES_WRITE(FaultStatus)) &&
  505. (TempPte.u.Hard.CopyOnWrite == 0) &&
  506. ((TempPte.u.Long & MM_PTE_WRITE_MASK) == 0) &&
  507. ((Pfn1->OriginalPte.u.Soft.Protection & MM_READWRITE) == 0)) {
  508. KeBugCheckEx (ATTEMPTED_WRITE_TO_READONLY_MEMORY,
  509. (ULONG_PTR)VirtualAddress,
  510. (ULONG_PTR)TempPte.u.Long,
  511. (ULONG_PTR)TrapInformation,
  512. 12);
  513. }
  514. //
  515. // Set the dirty bit in the PTE and the page frame.
  516. //
  517. if (SessionAddress == FALSE || TempPte.u.Hard.Write == 1) {
  518. FaultHandled = TRUE;
  519. MI_NO_FAULT_FOUND (FaultStatus, PointerPte, VirtualAddress, TRUE);
  520. }
  521. }
  522. UNLOCK_PFN (OldIrql);
  523. if (SessionAddress == FALSE) {
  524. UNLOCK_SYSTEM_WS (PreviousIrql);
  525. }
  526. if (SessionAddress == FALSE || FaultHandled == TRUE) {
  527. return STATUS_SUCCESS;
  528. }
  529. }
  530. if (SessionAddress == TRUE) {
  531. //
  532. // Acquire the session space working set lock. While this lock
  533. // is held, no session pages may go from valid to invalid.
  534. //
  535. PETHREAD CurrentThread;
  536. CurrentThread = PsGetCurrentThread ();
  537. if (CurrentThread == MmSessionSpace->WorkingSetLockOwner) {
  538. //
  539. // Recursively trying to acquire the session working set
  540. // lock - cause an IRQL > 1 bug check.
  541. //
  542. return STATUS_IN_PAGE_ERROR | 0x10000000;
  543. }
  544. LOCK_SESSION_SPACE_WS (PreviousIrql, CurrentThread);
  545. TempPte = *PointerPte;
  546. //
  547. // The PTE could have become valid while we waited
  548. // for the session space working set lock.
  549. //
  550. if (TempPte.u.Hard.Valid == 1) {
  551. LOCK_PFN (OldIrql);
  552. TempPte = *(volatile MMPTE *)PointerPte;
  553. //
  554. // Check for copy-on-write.
  555. //
  556. if (TempPte.u.Hard.Valid == 1) {
  557. if ((MI_FAULT_STATUS_INDICATES_WRITE(FaultStatus)) &&
  558. (TempPte.u.Hard.Write == 0)) {
  559. //
  560. // Copy on write only for loaded drivers...
  561. //
  562. ASSERT (MI_IS_SESSION_IMAGE_ADDRESS (VirtualAddress));
  563. UNLOCK_PFN (OldIrql);
  564. if (TempPte.u.Hard.CopyOnWrite == 0) {
  565. KeBugCheckEx (ATTEMPTED_WRITE_TO_READONLY_MEMORY,
  566. (ULONG_PTR)VirtualAddress,
  567. (ULONG_PTR)TempPte.u.Long,
  568. (ULONG_PTR)TrapInformation,
  569. 13);
  570. }
  571. MiSessionCopyOnWrite (VirtualAddress,
  572. PointerPte);
  573. UNLOCK_SESSION_SPACE_WS (PreviousIrql);
  574. return STATUS_SUCCESS;
  575. }
  576. #if DBG
  577. //
  578. // If we are allowing a store, it better be writable.
  579. //
  580. if (MI_FAULT_STATUS_INDICATES_WRITE(FaultStatus)) {
  581. ASSERT (TempPte.u.Hard.Write == 1);
  582. }
  583. #endif
  584. //
  585. // PTE is already valid, return.
  586. //
  587. MI_NO_FAULT_FOUND (FaultStatus, PointerPte, VirtualAddress, TRUE);
  588. }
  589. UNLOCK_PFN (OldIrql);
  590. UNLOCK_SESSION_SPACE_WS (PreviousIrql);
  591. return STATUS_SUCCESS;
  592. }
  593. }
  594. if (TempPte.u.Soft.Prototype != 0) {
  595. if (MmProtectFreedNonPagedPool == TRUE) {
  596. if (((VirtualAddress >= MmNonPagedPoolStart) &&
  597. (VirtualAddress < (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes))) ||
  598. ((VirtualAddress >= MmNonPagedPoolExpansionStart) &&
  599. (VirtualAddress < MmNonPagedPoolEnd))) {
  600. //
  601. // This is an access to previously freed
  602. // non paged pool - bugcheck!
  603. //
  604. if (KeInvalidAccessAllowed(TrapInformation) == TRUE) {
  605. goto AccessViolation;
  606. }
  607. KeBugCheckEx (DRIVER_CAUGHT_MODIFYING_FREED_POOL,
  608. (ULONG_PTR)VirtualAddress,
  609. FaultStatus,
  610. PreviousMode,
  611. 4);
  612. }
  613. }
  614. //
  615. // This is a PTE in prototype format, locate the corresponding
  616. // prototype PTE.
  617. //
  618. PointerProtoPte = MiPteToProto (&TempPte);
  619. if (SessionAddress == TRUE) {
  620. if (TempPte.u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED) {
  621. PointerProtoPte = MiCheckVirtualAddress (VirtualAddress,
  622. &ProtectionCode);
  623. if (PointerProtoPte == NULL) {
  624. UNLOCK_SESSION_SPACE_WS (PreviousIrql);
  625. return STATUS_IN_PAGE_ERROR | 0x10000000;
  626. }
  627. }
  628. else if (TempPte.u.Proto.ReadOnly == 1) {
  629. //
  630. // Writes are not allowed to this page.
  631. //
  632. } else if (MI_IS_SESSION_IMAGE_ADDRESS (VirtualAddress)) {
  633. //
  634. // Copy on write this page.
  635. //
  636. MI_WRITE_INVALID_PTE (PointerPte, PrototypePte);
  637. PointerPte->u.Soft.Protection = MM_EXECUTE_WRITECOPY;
  638. }
  639. }
  640. } else if ((TempPte.u.Soft.Transition == 0) &&
  641. (TempPte.u.Soft.Protection == 0)) {
  642. //
  643. // Page file format. If the protection is ZERO, this
  644. // is a page of free system PTEs - bugcheck!
  645. //
  646. if (KeInvalidAccessAllowed(TrapInformation) == TRUE) {
  647. goto AccessViolation;
  648. }
  649. KeBugCheckEx (PAGE_FAULT_IN_NONPAGED_AREA,
  650. (ULONG_PTR)VirtualAddress,
  651. FaultStatus,
  652. (ULONG_PTR)TrapInformation,
  653. 0);
  654. }
  655. else if (TempPte.u.Soft.Protection == MM_NOACCESS) {
  656. if (KeInvalidAccessAllowed(TrapInformation) == TRUE) {
  657. goto AccessViolation;
  658. }
  659. KeBugCheckEx (PAGE_FAULT_IN_NONPAGED_AREA,
  660. (ULONG_PTR)VirtualAddress,
  661. FaultStatus,
  662. (ULONG_PTR)TrapInformation,
  663. 1);
  664. }
  665. else if (TempPte.u.Soft.Protection == MM_KSTACK_OUTSWAPPED) {
  666. if (KeInvalidAccessAllowed(TrapInformation) == TRUE) {
  667. goto AccessViolation;
  668. }
  669. KeBugCheckEx (PAGE_FAULT_IN_NONPAGED_AREA,
  670. (ULONG_PTR)VirtualAddress,
  671. FaultStatus,
  672. (ULONG_PTR)TrapInformation,
  673. 3);
  674. }
  675. if (SessionAddress == TRUE) {
  676. MM_SESSION_SPACE_WS_LOCK_ASSERT ();
  677. //
  678. // If it's a write to a session space page that is ultimately
  679. // mapped by a prototype PTE, it's a copy-on-write piece of
  680. // a session driver. Since the page isn't even present yet,
  681. // turn the write access into a read access to fault it in.
  682. // We'll get a write fault on the present page when we retry
  683. // the operation at which point we'll sever the copy on write.
  684. //
  685. if ((PointerProtoPte != NULL) &&
  686. (MI_FAULT_STATUS_INDICATES_WRITE(FaultStatus)) &&
  687. (MI_IS_SESSION_IMAGE_ADDRESS (VirtualAddress))) {
  688. MI_CLEAR_FAULT_STATUS (FaultStatus);
  689. }
  690. FaultProcess = HYDRA_PROCESS;
  691. }
  692. else {
  693. FaultProcess = NULL;
  694. if (MI_FAULT_STATUS_INDICATES_WRITE(FaultStatus)) {
  695. if ((TempPte.u.Hard.Valid == 0) && (PointerProtoPte == NULL)) {
  696. if (TempPte.u.Soft.Transition == 1) {
  697. if ((TempPte.u.Trans.Protection & MM_READWRITE) == 0) {
  698. KeBugCheckEx (ATTEMPTED_WRITE_TO_READONLY_MEMORY,
  699. (ULONG_PTR)VirtualAddress,
  700. (ULONG_PTR)TempPte.u.Long,
  701. (ULONG_PTR)TrapInformation,
  702. 14);
  703. }
  704. }
  705. else {
  706. if ((TempPte.u.Soft.Protection & MM_READWRITE) == 0) {
  707. KeBugCheckEx (ATTEMPTED_WRITE_TO_READONLY_MEMORY,
  708. (ULONG_PTR)VirtualAddress,
  709. (ULONG_PTR)TempPte.u.Long,
  710. (ULONG_PTR)TrapInformation,
  711. 15);
  712. }
  713. }
  714. }
  715. }
  716. }
  717. status = MiDispatchFault (FaultStatus,
  718. VirtualAddress,
  719. PointerPte,
  720. PointerProtoPte,
  721. FaultProcess,
  722. &ApcNeeded);
  723. ASSERT (ApcNeeded == FALSE);
  724. ASSERT (KeGetCurrentIrql() == APC_LEVEL);
  725. if (SessionAddress == TRUE) {
  726. Ws = &MmSessionSpace->Vm;
  727. PageFrameIndex = Ws->PageFaultCount;
  728. MM_SESSION_SPACE_WS_LOCK_ASSERT();
  729. }
  730. else {
  731. Ws = &MmSystemCacheWs;
  732. PageFrameIndex = MmSystemCacheWs.PageFaultCount;
  733. }
  734. if (Ws->Flags.AllowWorkingSetAdjustment == MM_GROW_WSLE_HASH) {
  735. MiGrowWsleHash (Ws);
  736. Ws->Flags.AllowWorkingSetAdjustment = TRUE;
  737. }
  738. if (SessionAddress == TRUE) {
  739. UNLOCK_SESSION_SPACE_WS (PreviousIrql);
  740. }
  741. else {
  742. UNLOCK_SYSTEM_WS (PreviousIrql);
  743. }
  744. if (((PageFrameIndex & 0xFFF) == 0) &&
  745. (MmAvailablePages < MmMoreThanEnoughFreePages + 220)) {
  746. //
  747. // The system cache or this session is taking too many faults,
  748. // delay execution so the modified page writer gets a quick
  749. // shot and increase the working set size.
  750. //
  751. if (PsGetCurrentThread()->MemoryMaker == 0) {
  752. KeDelayExecutionThread (KernelMode, FALSE, (PLARGE_INTEGER)&MmShortTime);
  753. }
  754. }
  755. PERFINFO_FAULT_NOTIFICATION(VirtualAddress, TrapInformation);
  756. NotifyRoutine = MmPageFaultNotifyRoutine;
  757. if (NotifyRoutine) {
  758. if (status != STATUS_SUCCESS) {
  759. (*NotifyRoutine) (
  760. status,
  761. VirtualAddress,
  762. TrapInformation
  763. );
  764. }
  765. }
  766. return status;
  767. }
  768. #if (_MI_PAGING_LEVELS < 3)
  769. if (MiCheckPdeForPagedPool (VirtualAddress) == STATUS_WAIT_1) {
  770. return STATUS_SUCCESS;
  771. }
  772. #endif
  773. }
  774. #if (_MI_PAGING_LEVELS >= 3)
  775. UserFault:
  776. #endif
  777. if (MiDelayPageFaults ||
  778. ((MmModifiedPageListHead.Total >= (MmModifiedPageMaximum + 100)) &&
  779. (MmAvailablePages < (1024*1024 / PAGE_SIZE)) &&
  780. (CurrentProcess->ModifiedPageCount > ((64*1024)/PAGE_SIZE)))) {
  781. //
  782. // This process has placed more than 64k worth of pages on the modified
  783. // list. Delay for a short period and set the count to zero.
  784. //
  785. KeDelayExecutionThread (KernelMode,
  786. FALSE,
  787. (CurrentProcess->Pcb.BasePriority < PROCESS_FOREGROUND_PRIORITY) ?
  788. (PLARGE_INTEGER)&MmHalfSecond : (PLARGE_INTEGER)&Mm30Milliseconds);
  789. CurrentProcess->ModifiedPageCount = 0;
  790. }
  791. //
  792. // FAULT IN USER SPACE OR PAGE DIRECTORY/PAGE TABLE PAGES.
  793. //
  794. //
  795. // Block APCs and acquire the working set lock.
  796. //
  797. LOCK_WS (CurrentProcess);
  798. #if DBG
  799. if (PreviousMode == KernelMode) {
  800. #if defined(MM_SHARED_USER_DATA_VA)
  801. if (PAGE_ALIGN(VirtualAddress) != (PVOID) MM_SHARED_USER_DATA_VA) {
  802. #endif
  803. LARGE_INTEGER CurrentTime;
  804. ULONG_PTR InstructionPointer;
  805. if ((MmInjectUserInpageErrors & 0x2) ||
  806. (CurrentProcess->Flags & PS_PROCESS_INJECT_INPAGE_ERRORS)) {
  807. KeQueryTickCount(&CurrentTime);
  808. if ((CurrentTime.LowPart & MmInpageFraction) == 0) {
  809. if (TrapInformation != NULL) {
  810. #if defined(_X86_)
  811. InstructionPointer = ((PKTRAP_FRAME)TrapInformation)->Eip;
  812. #elif defined(_IA64_)
  813. InstructionPointer = ((PKTRAP_FRAME)TrapInformation)->StIIP;
  814. #elif defined(_AMD64_)
  815. InstructionPointer = ((PKTRAP_FRAME)TrapInformation)->Rip;
  816. #else
  817. error
  818. #endif
  819. if (MmInjectUserInpageErrors & 0x1) {
  820. MmInjectedUserInpageErrors += 1;
  821. MiSnapInPageError ((PVOID)InstructionPointer);
  822. status = STATUS_NO_MEMORY;
  823. goto ReturnStatus2;
  824. }
  825. if ((InstructionPointer >= (ULONG_PTR) PsNtosImageBase) &&
  826. (InstructionPointer < (ULONG_PTR) PsNtosImageEnd)) {
  827. MmInjectedUserInpageErrors += 1;
  828. MiSnapInPageError ((PVOID)InstructionPointer);
  829. status = STATUS_NO_MEMORY;
  830. goto ReturnStatus2;
  831. }
  832. }
  833. }
  834. }
  835. #if defined(MM_SHARED_USER_DATA_VA)
  836. }
  837. #endif
  838. }
  839. #endif
  840. #if (_MI_PAGING_LEVELS >= 4)
  841. //
  842. // Locate the Extended Page Directory Parent Entry which maps this virtual
  843. // address and check for accessibility and validity. The page directory
  844. // parent page must be made valid before any other checks are made.
  845. //
  846. if (PointerPxe->u.Hard.Valid == 0) {
  847. //
  848. // If the PXE is zero, check to see if there is a virtual address
  849. // mapped at this location, and if so create the necessary
  850. // structures to map it.
  851. //
  852. if ((PointerPxe->u.Long == MM_ZERO_PTE) ||
  853. (PointerPxe->u.Long == MM_ZERO_KERNEL_PTE)) {
  854. MiCheckVirtualAddress (VirtualAddress, &ProtectCode);
  855. #ifdef LARGE_PAGES
  856. if (ProtectCode == MM_LARGE_PAGES) {
  857. status = STATUS_SUCCESS;
  858. goto ReturnStatus2;
  859. }
  860. #endif //LARGE_PAGES
  861. if (ProtectCode == MM_NOACCESS) {
  862. status = STATUS_ACCESS_VIOLATION;
  863. if (PointerPxe->u.Hard.Valid == 1) {
  864. status = STATUS_SUCCESS;
  865. }
  866. #if DBG
  867. if ((MmDebug & MM_DBG_STOP_ON_ACCVIO) &&
  868. (status == STATUS_ACCESS_VIOLATION)) {
  869. DbgPrint("MM:access violation - %p\n",VirtualAddress);
  870. MiFormatPte(PointerPxe);
  871. DbgBreakPoint();
  872. }
  873. #endif //DEBUG
  874. goto ReturnStatus2;
  875. }
  876. //
  877. // Build a demand zero PXE and operate on it.
  878. //
  879. #if (_MI_PAGING_LEVELS > 4)
  880. ASSERT (FALSE); // UseCounts will need to be kept.
  881. #endif
  882. *PointerPxe = DemandZeroPde;
  883. }
  884. //
  885. // The PXE is not valid, call the page fault routine passing
  886. // in the address of the PXE. If the PXE is valid, determine
  887. // the status of the corresponding PPE.
  888. //
  889. // Note this call may result in ApcNeeded getting set to TRUE.
  890. // This is deliberate as there may be another call to MiDispatchFault
  891. // issued later in this routine and we don't want to lose the APC
  892. // status.
  893. //
  894. status = MiDispatchFault (TRUE, //page table page always written
  895. PointerPpe, // Virtual address
  896. PointerPxe, // PTE (PXE in this case)
  897. NULL,
  898. CurrentProcess,
  899. &ApcNeeded);
  900. #if DBG
  901. if (ApcNeeded == TRUE) {
  902. ASSERT (PsGetCurrentThread()->NestedFaultCount == 0);
  903. ASSERT (PsGetCurrentThread()->ApcNeeded == 0);
  904. }
  905. #endif
  906. ASSERT (KeGetCurrentIrql() == APC_LEVEL);
  907. if (PointerPxe->u.Hard.Valid == 0) {
  908. //
  909. // The PXE is not valid, return the status.
  910. //
  911. goto ReturnStatus1;
  912. }
  913. MI_SET_PAGE_DIRTY (PointerPxe, PointerPde, FALSE);
  914. //
  915. // Now that the PXE is accessible, get the PPE - let this fall
  916. // through.
  917. //
  918. }
  919. #endif
  920. #if (_MI_PAGING_LEVELS >= 3)
  921. //
  922. // Locate the Page Directory Parent Entry which maps this virtual
  923. // address and check for accessibility and validity. The page directory
  924. // page must be made valid before any other checks are made.
  925. //
  926. if (PointerPpe->u.Hard.Valid == 0) {
  927. //
  928. // If the PPE is zero, check to see if there is a virtual address
  929. // mapped at this location, and if so create the necessary
  930. // structures to map it.
  931. //
  932. if ((PointerPpe->u.Long == MM_ZERO_PTE) ||
  933. (PointerPpe->u.Long == MM_ZERO_KERNEL_PTE)) {
  934. MiCheckVirtualAddress (VirtualAddress, &ProtectCode);
  935. #ifdef LARGE_PAGES
  936. if (ProtectCode == MM_LARGE_PAGES) {
  937. status = STATUS_SUCCESS;
  938. goto ReturnStatus2;
  939. }
  940. #endif //LARGE_PAGES
  941. if (ProtectCode == MM_NOACCESS) {
  942. status = STATUS_ACCESS_VIOLATION;
  943. if (PointerPpe->u.Hard.Valid == 1) {
  944. status = STATUS_SUCCESS;
  945. }
  946. #if DBG
  947. if ((MmDebug & MM_DBG_STOP_ON_ACCVIO) &&
  948. (status == STATUS_ACCESS_VIOLATION)) {
  949. DbgPrint("MM:access violation - %p\n",VirtualAddress);
  950. MiFormatPte(PointerPpe);
  951. DbgBreakPoint();
  952. }
  953. #endif //DEBUG
  954. goto ReturnStatus2;
  955. }
  956. #if (_MI_PAGING_LEVELS >= 4)
  957. //
  958. // Increment the count of non-zero page directory parent entries
  959. // for this page directory parent.
  960. //
  961. if (VirtualAddress <= MM_HIGHEST_USER_ADDRESS) {
  962. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (PointerPde);
  963. MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  964. }
  965. #endif
  966. //
  967. // Build a demand zero PPE and operate on it.
  968. //
  969. *PointerPpe = DemandZeroPde;
  970. }
  971. //
  972. // The PPE is not valid, call the page fault routine passing
  973. // in the address of the PPE. If the PPE is valid, determine
  974. // the status of the corresponding PDE.
  975. //
  976. // Note this call may result in ApcNeeded getting set to TRUE.
  977. // This is deliberate as there may be another call to MiDispatchFault
  978. // issued later in this routine and we don't want to lose the APC
  979. // status.
  980. //
  981. status = MiDispatchFault (TRUE, //page table page always written
  982. PointerPde, //Virtual address
  983. PointerPpe, // PTE (PPE in this case)
  984. NULL,
  985. CurrentProcess,
  986. &ApcNeeded);
  987. #if DBG
  988. if (ApcNeeded == TRUE) {
  989. ASSERT (PsGetCurrentThread()->NestedFaultCount == 0);
  990. ASSERT (PsGetCurrentThread()->ApcNeeded == 0);
  991. }
  992. #endif
  993. ASSERT (KeGetCurrentIrql() == APC_LEVEL);
  994. if (PointerPpe->u.Hard.Valid == 0) {
  995. //
  996. // The PPE is not valid, return the status.
  997. //
  998. goto ReturnStatus1;
  999. }
  1000. MI_SET_PAGE_DIRTY (PointerPpe, PointerPde, FALSE);
  1001. //
  1002. // Now that the PPE is accessible, get the PDE - let this fall
  1003. // through.
  1004. //
  1005. }
  1006. #endif
  1007. //
  1008. // Locate the Page Directory Entry which maps this virtual
  1009. // address and check for accessibility and validity.
  1010. //
  1011. //
  1012. // Check to see if the page table page (PDE entry) is valid.
  1013. // If not, the page table page must be made valid first.
  1014. //
  1015. if (PointerPde->u.Hard.Valid == 0) {
  1016. //
  1017. // If the PDE is zero, check to see if there is a virtual address
  1018. // mapped at this location, and if so create the necessary
  1019. // structures to map it.
  1020. //
  1021. if ((PointerPde->u.Long == MM_ZERO_PTE) ||
  1022. (PointerPde->u.Long == MM_ZERO_KERNEL_PTE)) {
  1023. MiCheckVirtualAddress (VirtualAddress, &ProtectCode);
  1024. #ifdef LARGE_PAGES
  1025. if (ProtectCode == MM_LARGE_PAGES) {
  1026. status = STATUS_SUCCESS;
  1027. goto ReturnStatus2;
  1028. }
  1029. #endif
  1030. if (ProtectCode == MM_NOACCESS) {
  1031. status = STATUS_ACCESS_VIOLATION;
  1032. #if (_MI_PAGING_LEVELS < 3)
  1033. MiCheckPdeForPagedPool (VirtualAddress);
  1034. #endif
  1035. if (PointerPde->u.Hard.Valid == 1) {
  1036. status = STATUS_SUCCESS;
  1037. }
  1038. #if DBG
  1039. if ((MmDebug & MM_DBG_STOP_ON_ACCVIO) &&
  1040. (status == STATUS_ACCESS_VIOLATION)) {
  1041. DbgPrint("MM:access violation - %p\n",VirtualAddress);
  1042. MiFormatPte(PointerPde);
  1043. DbgBreakPoint();
  1044. }
  1045. #endif
  1046. goto ReturnStatus2;
  1047. }
  1048. #if (_MI_PAGING_LEVELS >= 3)
  1049. //
  1050. // Increment the count of non-zero page directory entries for this
  1051. // page directory.
  1052. //
  1053. if (VirtualAddress <= MM_HIGHEST_USER_ADDRESS) {
  1054. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (PointerPte);
  1055. MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  1056. }
  1057. #endif
  1058. //
  1059. // Build a demand zero PDE and operate on it.
  1060. //
  1061. MI_WRITE_INVALID_PTE (PointerPde, DemandZeroPde);
  1062. }
  1063. //
  1064. // The PDE is not valid, call the page fault routine passing
  1065. // in the address of the PDE. If the PDE is valid, determine
  1066. // the status of the corresponding PTE.
  1067. //
  1068. status = MiDispatchFault (TRUE, //page table page always written
  1069. PointerPte, //Virtual address
  1070. PointerPde, // PTE (PDE in this case)
  1071. NULL,
  1072. CurrentProcess,
  1073. &ApcNeeded);
  1074. #if DBG
  1075. if (ApcNeeded == TRUE) {
  1076. ASSERT (PsGetCurrentThread()->NestedFaultCount == 0);
  1077. ASSERT (PsGetCurrentThread()->ApcNeeded == 0);
  1078. }
  1079. #endif
  1080. ASSERT (KeGetCurrentIrql() == APC_LEVEL);
  1081. #if (_MI_PAGING_LEVELS >= 4)
  1082. //
  1083. // Note that the page directory parent page itself could have been
  1084. // paged out or deleted while MiDispatchFault was executing without
  1085. // the working set lock, so this must be checked for here in the PXE.
  1086. //
  1087. if (PointerPxe->u.Hard.Valid == 0) {
  1088. //
  1089. // The PXE is not valid, return the status.
  1090. //
  1091. goto ReturnStatus1;
  1092. }
  1093. #endif
  1094. #if (_MI_PAGING_LEVELS >= 3)
  1095. //
  1096. // Note that the page directory page itself could have been paged out
  1097. // or deleted while MiDispatchFault was executing without the working
  1098. // set lock, so this must be checked for here in the PPE.
  1099. //
  1100. if (PointerPpe->u.Hard.Valid == 0) {
  1101. //
  1102. // The PPE is not valid, return the status.
  1103. //
  1104. goto ReturnStatus1;
  1105. }
  1106. #endif
  1107. if (PointerPde->u.Hard.Valid == 0) {
  1108. //
  1109. // The PDE is not valid, return the status.
  1110. //
  1111. goto ReturnStatus1;
  1112. }
  1113. MI_SET_PAGE_DIRTY (PointerPde, PointerPte, FALSE);
  1114. //
  1115. // Now that the PDE is accessible, get the PTE - let this fall
  1116. // through.
  1117. //
  1118. }
  1119. //
  1120. // The PDE is valid and accessible, get the PTE contents.
  1121. //
  1122. TempPte = *PointerPte;
  1123. if (TempPte.u.Hard.Valid != 0) {
  1124. //
  1125. // The PTE is valid and accessible, is this a write fault
  1126. // copy on write or setting of some dirty bit?
  1127. //
  1128. #if DBG
  1129. if (MmDebug & MM_DBG_PTE_UPDATE) {
  1130. MiFormatPte(PointerPte);
  1131. }
  1132. #endif
  1133. status = STATUS_SUCCESS;
  1134. if (MI_FAULT_STATUS_INDICATES_WRITE(FaultStatus)) {
  1135. //
  1136. // This was a write operation. If the copy on write
  1137. // bit is set in the PTE perform the copy on write,
  1138. // else check to ensure write access to the PTE.
  1139. //
  1140. if (TempPte.u.Hard.CopyOnWrite != 0) {
  1141. MiCopyOnWrite (VirtualAddress, PointerPte);
  1142. status = STATUS_PAGE_FAULT_COPY_ON_WRITE;
  1143. goto ReturnStatus2;
  1144. } else {
  1145. if (TempPte.u.Hard.Write == 0) {
  1146. status = STATUS_ACCESS_VIOLATION;
  1147. }
  1148. }
  1149. } else if (MI_FAULT_STATUS_INDICATES_EXECUTION(FaultStatus)) {
  1150. //
  1151. // Ensure execute access is enabled in the PTE.
  1152. //
  1153. if (!MI_IS_PTE_EXECUTABLE(&TempPte)) {
  1154. status = STATUS_ACCESS_VIOLATION;
  1155. }
  1156. } else {
  1157. //
  1158. // The PTE is valid and accessible, another thread must
  1159. // have faulted the PTE in already, or the access bit
  1160. // is clear and this is a access fault; Blindly set the
  1161. // access bit and dismiss the fault.
  1162. //
  1163. #if DBG
  1164. if (MmDebug & MM_DBG_SHOW_FAULTS) {
  1165. DbgPrint("MM:no fault found - PTE is %p\n", PointerPte->u.Long);
  1166. }
  1167. #endif
  1168. }
  1169. if (status == STATUS_SUCCESS) {
  1170. LOCK_PFN (OldIrql);
  1171. if (PointerPte->u.Hard.Valid != 0) {
  1172. MI_NO_FAULT_FOUND (FaultStatus, PointerPte, VirtualAddress, TRUE);
  1173. }
  1174. UNLOCK_PFN (OldIrql);
  1175. }
  1176. goto ReturnStatus2;
  1177. }
  1178. //
  1179. // If the PTE is zero, check to see if there is a virtual address
  1180. // mapped at this location, and if so create the necessary
  1181. // structures to map it.
  1182. //
  1183. //
  1184. // Check explicitly for demand zero pages.
  1185. //
  1186. if (TempPte.u.Long == MM_DEMAND_ZERO_WRITE_PTE) {
  1187. MiResolveDemandZeroFault (VirtualAddress,
  1188. PointerPte,
  1189. CurrentProcess,
  1190. 0);
  1191. status = STATUS_PAGE_FAULT_DEMAND_ZERO;
  1192. goto ReturnStatus1;
  1193. }
  1194. RecheckAccess = FALSE;
  1195. if ((TempPte.u.Long == MM_ZERO_PTE) ||
  1196. (TempPte.u.Long == MM_ZERO_KERNEL_PTE)) {
  1197. //
  1198. // PTE is needs to be evaluated with respect to its virtual
  1199. // address descriptor (VAD). At this point there are 3
  1200. // possibilities, bogus address, demand zero, or refers to
  1201. // a prototype PTE.
  1202. //
  1203. PointerProtoPte = MiCheckVirtualAddress (VirtualAddress,
  1204. &ProtectionCode);
  1205. if (ProtectionCode == MM_NOACCESS) {
  1206. status = STATUS_ACCESS_VIOLATION;
  1207. //
  1208. // Check to make sure this is not a page table page for
  1209. // paged pool which needs extending.
  1210. //
  1211. #if (_MI_PAGING_LEVELS < 3)
  1212. MiCheckPdeForPagedPool (VirtualAddress);
  1213. #endif
  1214. if (PointerPte->u.Hard.Valid == 1) {
  1215. status = STATUS_SUCCESS;
  1216. }
  1217. #if DBG
  1218. if ((MmDebug & MM_DBG_STOP_ON_ACCVIO) &&
  1219. (status == STATUS_ACCESS_VIOLATION)) {
  1220. DbgPrint("MM:access vio - %p\n",VirtualAddress);
  1221. MiFormatPte(PointerPte);
  1222. DbgBreakPoint();
  1223. }
  1224. #endif //DEBUG
  1225. goto ReturnStatus2;
  1226. }
  1227. //
  1228. // Increment the count of non-zero page table entries for this
  1229. // page table.
  1230. //
  1231. if (VirtualAddress <= MM_HIGHEST_USER_ADDRESS) {
  1232. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (VirtualAddress);
  1233. MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  1234. }
  1235. #if (_MI_PAGING_LEVELS >= 3)
  1236. else if (MI_IS_PAGE_TABLE_ADDRESS(VirtualAddress)) {
  1237. PVOID RealVa;
  1238. RealVa = MiGetVirtualAddressMappedByPte(VirtualAddress);
  1239. if (RealVa <= MM_HIGHEST_USER_ADDRESS) {
  1240. //
  1241. // This is really a page table page. Increment the use count
  1242. // on the appropriate page directory.
  1243. //
  1244. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (VirtualAddress);
  1245. MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  1246. }
  1247. }
  1248. #endif
  1249. //
  1250. // Is this page a guard page?
  1251. //
  1252. if (ProtectionCode & MM_GUARD_PAGE) {
  1253. //
  1254. // This is a guard page exception.
  1255. //
  1256. PointerPte->u.Soft.Protection = ProtectionCode & ~MM_GUARD_PAGE;
  1257. if (PointerProtoPte != NULL) {
  1258. //
  1259. // This is a prototype PTE, build the PTE to not
  1260. // be a guard page.
  1261. //
  1262. PointerPte->u.Soft.PageFileHigh = MI_PTE_LOOKUP_NEEDED;
  1263. PointerPte->u.Soft.Prototype = 1;
  1264. }
  1265. UNLOCK_WS (CurrentProcess);
  1266. ASSERT (KeGetCurrentIrql() == PreviousIrql);
  1267. if (ApcNeeded == TRUE) {
  1268. ASSERT (PsGetCurrentThread()->NestedFaultCount == 0);
  1269. ASSERT (PsGetCurrentThread()->ApcNeeded == 0);
  1270. ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL);
  1271. KeRaiseIrql (APC_LEVEL, &PreviousIrql);
  1272. IoRetryIrpCompletions ();
  1273. KeLowerIrql (PreviousIrql);
  1274. }
  1275. return MiCheckForUserStackOverflow (VirtualAddress);
  1276. }
  1277. if (PointerProtoPte == NULL) {
  1278. //
  1279. // Assert that this is not for a PDE.
  1280. //
  1281. if (PointerPde == MiGetPdeAddress((PVOID)PTE_BASE)) {
  1282. //
  1283. // This PTE is really a PDE, set contents as such.
  1284. //
  1285. MI_WRITE_INVALID_PTE (PointerPte, DemandZeroPde);
  1286. } else {
  1287. PointerPte->u.Soft.Protection = ProtectionCode;
  1288. }
  1289. //
  1290. // If a fork operation is in progress and the faulting thread
  1291. // is not the thread performing the fork operation, block until
  1292. // the fork is completed.
  1293. //
  1294. if (CurrentProcess->ForkInProgress != NULL) {
  1295. if (MiWaitForForkToComplete (CurrentProcess, FALSE) == TRUE) {
  1296. status = STATUS_SUCCESS;
  1297. goto ReturnStatus1;
  1298. }
  1299. }
  1300. LOCK_PFN (OldIrql);
  1301. if (!MiEnsureAvailablePageOrWait (CurrentProcess,
  1302. VirtualAddress)) {
  1303. ULONG Color;
  1304. Color = MI_PAGE_COLOR_VA_PROCESS (VirtualAddress,
  1305. &CurrentProcess->NextPageColor);
  1306. PageFrameIndex = MiRemoveZeroPageIfAny (Color);
  1307. if (PageFrameIndex == 0) {
  1308. PageFrameIndex = MiRemoveAnyPage (Color);
  1309. UNLOCK_PFN (OldIrql);
  1310. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1311. MiZeroPhysicalPage (PageFrameIndex, Color);
  1312. #if MI_BARRIER_SUPPORTED
  1313. //
  1314. // Note the stamping must occur after the page is zeroed.
  1315. //
  1316. MI_BARRIER_STAMP_ZEROED_PAGE (&BarrierStamp);
  1317. Pfn1->u4.PteFrame = BarrierStamp;
  1318. #endif
  1319. LOCK_PFN (OldIrql);
  1320. }
  1321. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1322. CurrentProcess->NumberOfPrivatePages += 1;
  1323. MmInfoCounters.DemandZeroCount += 1;
  1324. //
  1325. // This barrier check is needed after zeroing the page and
  1326. // before setting the PTE valid.
  1327. // Capture it now, check it at the last possible moment.
  1328. //
  1329. BarrierStamp = (ULONG)Pfn1->u4.PteFrame;
  1330. MiInitializePfn (PageFrameIndex, PointerPte, 1);
  1331. UNLOCK_PFN (OldIrql);
  1332. //
  1333. // As this page is demand zero, set the modified bit in the
  1334. // PFN database element and set the dirty bit in the PTE.
  1335. //
  1336. MI_MAKE_VALID_PTE (TempPte,
  1337. PageFrameIndex,
  1338. PointerPte->u.Soft.Protection,
  1339. PointerPte);
  1340. if (TempPte.u.Hard.Write != 0) {
  1341. MI_SET_PTE_DIRTY (TempPte);
  1342. }
  1343. MI_BARRIER_SYNCHRONIZE (BarrierStamp);
  1344. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1345. ASSERT (Pfn1->u1.Event == 0);
  1346. Pfn1->u1.Event = (PVOID)PsGetCurrentThread();
  1347. WorkingSetIndex = MiLocateAndReserveWsle (&CurrentProcess->Vm);
  1348. MiUpdateWsle (&WorkingSetIndex,
  1349. VirtualAddress,
  1350. MmWorkingSetList,
  1351. Pfn1);
  1352. MI_SET_PTE_IN_WORKING_SET (PointerPte, WorkingSetIndex);
  1353. KeFillEntryTb ((PHARDWARE_PTE)PointerPte,
  1354. VirtualAddress,
  1355. FALSE);
  1356. } else {
  1357. UNLOCK_PFN (OldIrql);
  1358. }
  1359. status = STATUS_PAGE_FAULT_DEMAND_ZERO;
  1360. goto ReturnStatus1;
  1361. }
  1362. //
  1363. // This is a prototype PTE.
  1364. //
  1365. if (ProtectionCode == MM_UNKNOWN_PROTECTION) {
  1366. //
  1367. // The protection field is stored in the prototype PTE.
  1368. //
  1369. PointerPte->u.Long = MiProtoAddressForPte (PointerProtoPte);
  1370. } else {
  1371. MI_WRITE_INVALID_PTE (PointerPte, PrototypePte);
  1372. PointerPte->u.Soft.Protection = ProtectionCode;
  1373. }
  1374. TempPte = *PointerPte;
  1375. } else {
  1376. //
  1377. // The PTE is non-zero and not valid, see if it is a prototype PTE.
  1378. //
  1379. ProtectionCode = MI_GET_PROTECTION_FROM_SOFT_PTE(&TempPte);
  1380. if (TempPte.u.Soft.Prototype != 0) {
  1381. if (TempPte.u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED) {
  1382. #if DBG
  1383. MmProtoPteVadLookups += 1;
  1384. #endif //DBG
  1385. PointerProtoPte = MiCheckVirtualAddress (VirtualAddress,
  1386. &ProtectCode);
  1387. if (PointerProtoPte == NULL) {
  1388. status = STATUS_ACCESS_VIOLATION;
  1389. goto ReturnStatus1;
  1390. }
  1391. } else {
  1392. #if DBG
  1393. MmProtoPteDirect += 1;
  1394. #endif //DBG
  1395. //
  1396. // Protection is in the prototype PTE, indicate an
  1397. // access check should not be performed on the current PTE.
  1398. //
  1399. PointerProtoPte = MiPteToProto (&TempPte);
  1400. ProtectionCode = MM_UNKNOWN_PROTECTION;
  1401. //
  1402. // Check to see if the proto protection has been overridden.
  1403. //
  1404. if (TempPte.u.Proto.ReadOnly != 0) {
  1405. ProtectionCode = MM_READONLY;
  1406. }
  1407. else {
  1408. ProtectionCode = MM_UNKNOWN_PROTECTION;
  1409. if (CurrentProcess->CloneRoot != NULL) {
  1410. RecheckAccess = TRUE;
  1411. }
  1412. }
  1413. }
  1414. }
  1415. }
  1416. if (ProtectionCode != MM_UNKNOWN_PROTECTION) {
  1417. status = MiAccessCheck (PointerPte,
  1418. MI_FAULT_STATUS_INDICATES_WRITE(FaultStatus),
  1419. PreviousMode,
  1420. ProtectionCode,
  1421. FALSE );
  1422. if (status != STATUS_SUCCESS) {
  1423. #if DBG
  1424. if ((MmDebug & MM_DBG_STOP_ON_ACCVIO) && (status == STATUS_ACCESS_VIOLATION)) {
  1425. DbgPrint("MM:access violate - %p\n",VirtualAddress);
  1426. MiFormatPte(PointerPte);
  1427. DbgBreakPoint();
  1428. }
  1429. #endif //DEBUG
  1430. UNLOCK_WS (CurrentProcess);
  1431. ASSERT (KeGetCurrentIrql() == PreviousIrql);
  1432. if (ApcNeeded == TRUE) {
  1433. ASSERT (PsGetCurrentThread()->NestedFaultCount == 0);
  1434. ASSERT (PsGetCurrentThread()->ApcNeeded == 0);
  1435. ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL);
  1436. KeRaiseIrql (APC_LEVEL, &PreviousIrql);
  1437. IoRetryIrpCompletions ();
  1438. KeLowerIrql (PreviousIrql);
  1439. }
  1440. //
  1441. // Check to see if this is a guard page violation
  1442. // and if so, should the user's stack be extended.
  1443. //
  1444. if (status == STATUS_GUARD_PAGE_VIOLATION) {
  1445. return MiCheckForUserStackOverflow (VirtualAddress);
  1446. }
  1447. return status;
  1448. }
  1449. }
  1450. //
  1451. // Initializing Pfn1 is not needed for
  1452. // correctness but without it the compiler cannot compile this code
  1453. // W4 to check for use of uninitialized variables.
  1454. //
  1455. Pfn1 = NULL;
  1456. //
  1457. // This is a page fault, invoke the page fault handler.
  1458. //
  1459. if (PointerProtoPte != NULL) {
  1460. //
  1461. // Lock page containing prototype PTEs in memory by
  1462. // incrementing the reference count for the page.
  1463. //
  1464. ASSERT (!MI_IS_PHYSICAL_ADDRESS(PointerProtoPte));
  1465. PointerPde = MiGetPteAddress (PointerProtoPte);
  1466. LOCK_PFN (OldIrql);
  1467. if (PointerPde->u.Hard.Valid == 0) {
  1468. MiMakeSystemAddressValidPfn (PointerProtoPte);
  1469. }
  1470. if (RecheckAccess == TRUE) {
  1471. //
  1472. // This is a forked process so shared prototype PTEs may actually
  1473. // be fork clone prototypes. These have the protection within the
  1474. // fork clone yet the hardware PTEs always share it. This must be
  1475. // checked here for the case where the NO_ACCESS permission has
  1476. // been put into the fork clone because it would not necessarily
  1477. // be in the hardware PTEs (like it is for normal prototypes).
  1478. //
  1479. // First make sure the proto is in transition or paged out as only
  1480. // these states can be no access.
  1481. //
  1482. if ((PointerProtoPte->u.Hard.Valid == 0) &&
  1483. (PointerProtoPte->u.Soft.Prototype == 0)) {
  1484. ProtoProtect = MI_GET_PROTECTION_FROM_SOFT_PTE (PointerProtoPte);
  1485. if (ProtoProtect == MM_NOACCESS) {
  1486. ASSERT (MiLocateCloneAddress (CurrentProcess, PointerProtoPte) != NULL);
  1487. UNLOCK_PFN (OldIrql);
  1488. UNLOCK_WS (CurrentProcess);
  1489. ASSERT (KeGetCurrentIrql() == PreviousIrql);
  1490. return STATUS_ACCESS_VIOLATION;
  1491. }
  1492. }
  1493. }
  1494. Pfn1 = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber);
  1495. MI_ADD_LOCKED_PAGE_CHARGE(Pfn1, 2);
  1496. Pfn1->u3.e2.ReferenceCount += 1;
  1497. ASSERT (Pfn1->u3.e2.ReferenceCount > 1);
  1498. UNLOCK_PFN (OldIrql);
  1499. }
  1500. status = MiDispatchFault (FaultStatus,
  1501. VirtualAddress,
  1502. PointerPte,
  1503. PointerProtoPte,
  1504. CurrentProcess,
  1505. &ApcNeeded);
  1506. #if DBG
  1507. if (ApcNeeded == TRUE) {
  1508. ASSERT (PsGetCurrentThread()->NestedFaultCount == 0);
  1509. ASSERT (PsGetCurrentThread()->ApcNeeded == 0);
  1510. }
  1511. #endif
  1512. if (PointerProtoPte != NULL) {
  1513. //
  1514. // Unlock page containing prototype PTEs.
  1515. //
  1516. ASSERT (PointerProtoPte != NULL);
  1517. LOCK_PFN (OldIrql);
  1518. //
  1519. // The reference count on the prototype PTE page will always be greater
  1520. // than 1 if it is a genuine prototype PTE pool allocation. However,
  1521. // if it is a fork prototype PTE allocation, it is possible the pool has
  1522. // already been deallocated and in this case, the Pfn1 frame below will
  1523. // be in transition limbo with a share count of 0 and a reference count
  1524. // of 1 awaiting our final dereference below which will put it on the
  1525. // free list.
  1526. //
  1527. ASSERT (Pfn1->u3.e2.ReferenceCount >= 1);
  1528. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF (Pfn1, 3);
  1529. UNLOCK_PFN (OldIrql);
  1530. }
  1531. ReturnStatus1:
  1532. ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
  1533. if (CurrentProcess->Vm.Flags.AllowWorkingSetAdjustment == MM_GROW_WSLE_HASH) {
  1534. MiGrowWsleHash (&CurrentProcess->Vm);
  1535. CurrentProcess->Vm.Flags.AllowWorkingSetAdjustment = TRUE;
  1536. }
  1537. ReturnStatus2:
  1538. PageFrameIndex = CurrentProcess->Vm.WorkingSetSize - CurrentProcess->Vm.MinimumWorkingSetSize;
  1539. UNLOCK_WS (CurrentProcess);
  1540. ASSERT (KeGetCurrentIrql() == PreviousIrql);
  1541. if (ApcNeeded == TRUE) {
  1542. ASSERT (PsGetCurrentThread()->NestedFaultCount == 0);
  1543. ASSERT (PsGetCurrentThread()->ApcNeeded == 0);
  1544. ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL);
  1545. KeRaiseIrql (APC_LEVEL, &PreviousIrql);
  1546. IoRetryIrpCompletions ();
  1547. KeLowerIrql (PreviousIrql);
  1548. }
  1549. if (MmAvailablePages < MmMoreThanEnoughFreePages + 220) {
  1550. if (((SPFN_NUMBER)PageFrameIndex > 100) &&
  1551. (PsGetCurrentThread()->Tcb.Priority >= LOW_REALTIME_PRIORITY)) {
  1552. //
  1553. // This thread is realtime and is well over the process'
  1554. // working set minimum. Delay execution so the trimmer & the
  1555. // modified page writer get a quick shot at making pages.
  1556. //
  1557. KeDelayExecutionThread (KernelMode, FALSE, (PLARGE_INTEGER)&MmShortTime);
  1558. }
  1559. }
  1560. PERFINFO_FAULT_NOTIFICATION(VirtualAddress, TrapInformation);
  1561. NotifyRoutine = MmPageFaultNotifyRoutine;
  1562. if (NotifyRoutine) {
  1563. if (status != STATUS_SUCCESS) {
  1564. (*NotifyRoutine) (
  1565. status,
  1566. VirtualAddress,
  1567. TrapInformation
  1568. );
  1569. }
  1570. }
  1571. return status;
  1572. AccessViolation:
  1573. if (SessionAddress == TRUE) {
  1574. UNLOCK_SESSION_SPACE_WS (PreviousIrql);
  1575. }
  1576. else {
  1577. UNLOCK_SYSTEM_WS (PreviousIrql);
  1578. }
  1579. return STATUS_ACCESS_VIOLATION;
  1580. }