Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2422 lines
71 KiB

  1. /*++
  2. Copyright (c) 1990 Microsoft Corporation
  3. Module Name:
  4. mapcache.c
  5. Abstract:
  6. This module contains the routines which implement mapping views
  7. of sections into the system-wide cache.
  8. Author:
  9. Lou Perazzoli (loup) 22-May-1990
  10. Landy Wang (landyw) 02-Jun-1997
  11. Revision History:
  12. --*/
  13. #include "mi.h"
  14. #ifdef ALLOC_PRAGMA
  15. #pragma alloc_text(INIT,MiInitializeSystemCache)
  16. #pragma alloc_text(PAGE,MiAddMappedPtes)
  17. #endif
  18. extern ULONG MmFrontOfList;
  19. #define X256K 0x40000
  20. PMMPTE MmFirstFreeSystemCache;
  21. PMMPTE MmLastFreeSystemCache;
  22. PMMPTE MmSystemCachePteBase;
  23. ULONG MiMapCacheFailures;
  24. LONG
  25. MiMapCacheExceptionFilter (
  26. IN PNTSTATUS Status,
  27. IN PEXCEPTION_POINTERS ExceptionPointer
  28. );
  29. NTSTATUS
  30. MmMapViewInSystemCache (
  31. IN PVOID SectionToMap,
  32. OUT PVOID *CapturedBase,
  33. IN OUT PLARGE_INTEGER SectionOffset,
  34. IN OUT PULONG CapturedViewSize
  35. )
  36. /*++
  37. Routine Description:
  38. This function maps a view in the specified subject process to
  39. the section object. The page protection is identical to that
  40. of the prototype PTE.
  41. This function is a kernel mode interface to allow LPC to map
  42. a section given the section pointer to map.
  43. This routine assumes all arguments have been probed and captured.
  44. Arguments:
  45. SectionToMap - Supplies a pointer to the section object.
  46. BaseAddress - Supplies a pointer to a variable that will receive
  47. the base address of the view. If the initial value
  48. of this argument is not null, then the view will
  49. be allocated starting at the specified virtual
  50. address rounded down to the next 64kb address
  51. boundary. If the initial value of this argument is
  52. null, then the operating system will determine
  53. where to allocate the view using the information
  54. specified by the ZeroBits argument value and the
  55. section allocation attributes (i.e. based and tiled).
  56. SectionOffset - Supplies the offset from the beginning of the section
  57. to the view in bytes. This value must be a multiple
  58. of 256k.
  59. ViewSize - Supplies a pointer to a variable that will receive
  60. the actual size in bytes of the view. The initial
  61. values of this argument specifies the size of the view
  62. in bytes and is rounded up to the next host page size
  63. boundary and must be less than or equal to 256k.
  64. Return Value:
  65. NTSTATUS.
  66. Environment:
  67. Kernel mode, APC_LEVEL or below.
  68. --*/
  69. {
  70. PSECTION Section;
  71. UINT64 PteOffset;
  72. UINT64 LastPteOffset;
  73. KIRQL OldIrql;
  74. PMMPTE PointerPte;
  75. PMMPTE LastPte;
  76. PMMPTE ProtoPte;
  77. PMMPTE LastProto;
  78. PSUBSECTION Subsection;
  79. PCONTROL_AREA ControlArea;
  80. NTSTATUS Status;
  81. ULONG Waited;
  82. MMPTE PteContents;
  83. PFN_NUMBER NumberOfPages;
  84. #if DBG
  85. PMMPTE PointerPte2;
  86. #endif
  87. ASSERT (KeGetCurrentIrql () <= APC_LEVEL);
  88. Section = SectionToMap;
  89. //
  90. // Assert the view size is less than 256k and the section offset
  91. // is aligned on a 256k boundary.
  92. //
  93. ASSERT (*CapturedViewSize <= X256K);
  94. ASSERT ((SectionOffset->LowPart & (X256K - 1)) == 0);
  95. //
  96. // Make sure the section is not an image section or a page file
  97. // backed section.
  98. //
  99. if (Section->u.Flags.Image) {
  100. return STATUS_NOT_MAPPED_DATA;
  101. }
  102. ControlArea = Section->Segment->ControlArea;
  103. ASSERT (*CapturedViewSize != 0);
  104. NumberOfPages = BYTES_TO_PAGES (*CapturedViewSize);
  105. ASSERT (ControlArea->u.Flags.GlobalOnlyPerSession == 0);
  106. if (ControlArea->u.Flags.Rom == 0) {
  107. Subsection = (PSUBSECTION)(ControlArea + 1);
  108. }
  109. else {
  110. Subsection = (PSUBSECTION)((PLARGE_CONTROL_AREA)ControlArea + 1);
  111. }
  112. //
  113. // Calculate the first prototype PTE address.
  114. //
  115. PteOffset = (UINT64)(SectionOffset->QuadPart >> PAGE_SHIFT);
  116. LastPteOffset = PteOffset + NumberOfPages;
  117. //
  118. // Make sure the PTEs are not in the extended part of the segment.
  119. //
  120. while (PteOffset >= (UINT64) Subsection->PtesInSubsection) {
  121. PteOffset -= Subsection->PtesInSubsection;
  122. LastPteOffset -= Subsection->PtesInSubsection;
  123. Subsection = Subsection->NextSubsection;
  124. }
  125. LOCK_PFN (OldIrql);
  126. ASSERT (ControlArea->u.Flags.BeingCreated == 0);
  127. ASSERT (ControlArea->u.Flags.BeingDeleted == 0);
  128. ASSERT (ControlArea->u.Flags.BeingPurged == 0);
  129. //
  130. // Find a free 256k base in the cache.
  131. //
  132. if (MmFirstFreeSystemCache == (PMMPTE)MM_EMPTY_LIST) {
  133. UNLOCK_PFN (OldIrql);
  134. return STATUS_NO_MEMORY;
  135. }
  136. PointerPte = MmFirstFreeSystemCache;
  137. //
  138. // Update next free entry.
  139. //
  140. ASSERT (PointerPte->u.Hard.Valid == 0);
  141. MmFirstFreeSystemCache = MmSystemCachePteBase + PointerPte->u.List.NextEntry;
  142. ASSERT (MmFirstFreeSystemCache <= MiGetPteAddress (MmSystemCacheEnd));
  143. //
  144. // Increment the count of the number of views for the
  145. // section object. This requires the PFN lock to be held.
  146. //
  147. ControlArea->NumberOfMappedViews += 1;
  148. ControlArea->NumberOfSystemCacheViews += 1;
  149. ASSERT (ControlArea->NumberOfSectionReferences != 0);
  150. //
  151. // An unoccupied address range has been found, put the PTEs in
  152. // the range into prototype PTEs.
  153. //
  154. if (ControlArea->FilePointer != NULL) {
  155. //
  156. // Increment the view count for every subsection spanned by this view,
  157. // creating prototype PTEs if needed.
  158. //
  159. // N.B. This call always returns with the PFN lock released !
  160. //
  161. Status = MiAddViewsForSection ((PMSUBSECTION)Subsection,
  162. LastPteOffset,
  163. OldIrql,
  164. &Waited);
  165. ASSERT (KeGetCurrentIrql () <= APC_LEVEL);
  166. if (!NT_SUCCESS (Status)) {
  167. //
  168. // Zero both the next and TB flush stamp PTEs before unmapping so
  169. // the unmap won't hit entries it can't decode.
  170. //
  171. MiMapCacheFailures += 1;
  172. PointerPte->u.List.NextEntry = 0;
  173. (PointerPte + 1)->u.List.NextEntry = 0;
  174. MmUnmapViewInSystemCache (MiGetVirtualAddressMappedByPte (PointerPte),
  175. SectionToMap,
  176. FALSE);
  177. return Status;
  178. }
  179. }
  180. else {
  181. UNLOCK_PFN (OldIrql);
  182. }
  183. if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
  184. KeBugCheckEx (MEMORY_MANAGEMENT,
  185. 0x778,
  186. (ULONG_PTR)PointerPte,
  187. 0,
  188. 0);
  189. }
  190. //
  191. // Check to see if the TB needs to be flushed. Note that due to natural
  192. // TB traffic and the number of system cache views, this is an extremely
  193. // rare operation.
  194. //
  195. if ((PointerPte + 1)->u.List.NextEntry == (KeReadTbFlushTimeStamp() & MM_FLUSH_COUNTER_MASK)) {
  196. KeFlushEntireTb (TRUE, TRUE);
  197. }
  198. //
  199. // Zero this explicitly now since the number of pages may be only 1.
  200. //
  201. (PointerPte + 1)->u.List.NextEntry = 0;
  202. *CapturedBase = MiGetVirtualAddressMappedByPte (PointerPte);
  203. ProtoPte = &Subsection->SubsectionBase[PteOffset];
  204. LastProto = &Subsection->SubsectionBase[Subsection->PtesInSubsection];
  205. LastPte = PointerPte + NumberOfPages;
  206. #if DBG
  207. for (PointerPte2 = PointerPte + 2; PointerPte2 < LastPte; PointerPte2 += 1) {
  208. ASSERT (PointerPte2->u.Long == ZeroKernelPte.u.Long);
  209. }
  210. #endif
  211. while (PointerPte < LastPte) {
  212. if (ProtoPte >= LastProto) {
  213. //
  214. // Handle extended subsections.
  215. //
  216. Subsection = Subsection->NextSubsection;
  217. ProtoPte = Subsection->SubsectionBase;
  218. LastProto = &Subsection->SubsectionBase[
  219. Subsection->PtesInSubsection];
  220. }
  221. PteContents.u.Long = MiProtoAddressForKernelPte (ProtoPte);
  222. MI_WRITE_INVALID_PTE (PointerPte, PteContents);
  223. ASSERT (((ULONG_PTR)PointerPte & (MM_COLOR_MASK << PTE_SHIFT)) ==
  224. (((ULONG_PTR)ProtoPte & (MM_COLOR_MASK << PTE_SHIFT))));
  225. PointerPte += 1;
  226. ProtoPte += 1;
  227. }
  228. return STATUS_SUCCESS;
  229. }
  230. NTSTATUS
  231. MiAddMappedPtes (
  232. IN PMMPTE FirstPte,
  233. IN PFN_NUMBER NumberOfPtes,
  234. IN PCONTROL_AREA ControlArea
  235. )
  236. /*++
  237. Routine Description:
  238. This function maps a view in the current address space to the
  239. specified control area. The page protection is identical to that
  240. of the prototype PTE.
  241. This routine assumes the caller has called MiCheckPurgeAndUpMapCount,
  242. hence the PFN lock is not needed here.
  243. Arguments:
  244. FirstPte - Supplies a pointer to the first PTE of the current address
  245. space to initialize.
  246. NumberOfPtes - Supplies the number of PTEs to initialize.
  247. ControlArea - Supplies the control area to point the PTEs at.
  248. Return Value:
  249. NTSTATUS.
  250. Environment:
  251. Kernel mode.
  252. --*/
  253. {
  254. MMPTE PteContents;
  255. PMMPTE PointerPte;
  256. PMMPTE ProtoPte;
  257. PMMPTE LastProto;
  258. PMMPTE LastPte;
  259. PSUBSECTION Subsection;
  260. NTSTATUS Status;
  261. if ((ControlArea->u.Flags.GlobalOnlyPerSession == 0) &&
  262. (ControlArea->u.Flags.Rom == 0)) {
  263. Subsection = (PSUBSECTION)(ControlArea + 1);
  264. }
  265. else {
  266. Subsection = (PSUBSECTION)((PLARGE_CONTROL_AREA)ControlArea + 1);
  267. }
  268. PointerPte = FirstPte;
  269. ASSERT (NumberOfPtes != 0);
  270. LastPte = FirstPte + NumberOfPtes;
  271. ASSERT (ControlArea->NumberOfMappedViews >= 1);
  272. ASSERT (ControlArea->NumberOfUserReferences >= 1);
  273. ASSERT (ControlArea->u.Flags.HadUserReference == 1);
  274. ASSERT (ControlArea->NumberOfSectionReferences != 0);
  275. ASSERT (ControlArea->u.Flags.BeingCreated == 0);
  276. ASSERT (ControlArea->u.Flags.BeingDeleted == 0);
  277. ASSERT (ControlArea->u.Flags.BeingPurged == 0);
  278. if ((ControlArea->FilePointer != NULL) &&
  279. (ControlArea->u.Flags.Image == 0) &&
  280. (ControlArea->u.Flags.PhysicalMemory == 0)) {
  281. //
  282. // Increment the view count for every subsection spanned by this view.
  283. //
  284. Status = MiAddViewsForSectionWithPfn ((PMSUBSECTION)Subsection,
  285. NumberOfPtes);
  286. if (!NT_SUCCESS (Status)) {
  287. return Status;
  288. }
  289. }
  290. ProtoPte = Subsection->SubsectionBase;
  291. LastProto = &Subsection->SubsectionBase[Subsection->PtesInSubsection];
  292. while (PointerPte < LastPte) {
  293. if (ProtoPte >= LastProto) {
  294. //
  295. // Handle extended subsections.
  296. //
  297. Subsection = Subsection->NextSubsection;
  298. ProtoPte = Subsection->SubsectionBase;
  299. LastProto = &Subsection->SubsectionBase[
  300. Subsection->PtesInSubsection];
  301. }
  302. ASSERT (PointerPte->u.Long == ZeroKernelPte.u.Long);
  303. PteContents.u.Long = MiProtoAddressForKernelPte (ProtoPte);
  304. MI_WRITE_INVALID_PTE (PointerPte, PteContents);
  305. ASSERT (((ULONG_PTR)PointerPte & (MM_COLOR_MASK << PTE_SHIFT)) ==
  306. (((ULONG_PTR)ProtoPte & (MM_COLOR_MASK << PTE_SHIFT))));
  307. PointerPte += 1;
  308. ProtoPte += 1;
  309. }
  310. return STATUS_SUCCESS;
  311. }
  312. VOID
  313. MmUnmapViewInSystemCache (
  314. IN PVOID BaseAddress,
  315. IN PVOID SectionToUnmap,
  316. IN ULONG AddToFront
  317. )
  318. /*++
  319. Routine Description:
  320. This function unmaps a view from the system cache.
  321. NOTE: When this function is called, no pages may be locked in
  322. the cache for the specified view.
  323. Arguments:
  324. BaseAddress - Supplies the base address of the section in the
  325. system cache.
  326. SectionToUnmap - Supplies a pointer to the section which the
  327. base address maps.
  328. AddToFront - Supplies TRUE if the unmapped pages should be
  329. added to the front of the standby list (i.e., their
  330. value in the cache is low). FALSE otherwise.
  331. Return Value:
  332. None.
  333. Environment:
  334. Kernel mode.
  335. --*/
  336. {
  337. PMMPTE PointerPte;
  338. PMMPTE LastPte;
  339. PMMPFN Pfn1;
  340. PMMPFN Pfn2;
  341. PMMPTE FirstPte;
  342. PMMPTE ProtoPte;
  343. PMMPTE PointerPde;
  344. MMPTE ProtoPteContents;
  345. MMPTE PteContents;
  346. KIRQL OldIrql;
  347. WSLE_NUMBER WorkingSetIndex;
  348. PCONTROL_AREA ControlArea;
  349. ULONG WsHeld;
  350. PFN_NUMBER PageFrameIndex;
  351. PFN_NUMBER PageTableFrameIndex;
  352. PMSUBSECTION MappedSubsection;
  353. PMSUBSECTION LastSubsection;
  354. PETHREAD CurrentThread;
  355. #if DBG
  356. PFN_NUMBER i;
  357. PFN_NUMBER j;
  358. PMSUBSECTION SubsectionArray[X256K / PAGE_SIZE];
  359. PMMPTE PteArray[X256K / PAGE_SIZE];
  360. i = 0;
  361. RtlZeroMemory (SubsectionArray, sizeof(SubsectionArray));
  362. RtlCopyMemory (PteArray, MiGetPteAddress (BaseAddress), sizeof (PteArray));
  363. #endif
  364. WsHeld = FALSE;
  365. CurrentThread = PsGetCurrentThread ();
  366. ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
  367. PointerPte = MiGetPteAddress (BaseAddress);
  368. LastPte = PointerPte + (X256K / PAGE_SIZE);
  369. FirstPte = PointerPte;
  370. PageTableFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (MiGetPteAddress (PointerPte));
  371. Pfn2 = MI_PFN_ELEMENT (PageTableFrameIndex);
  372. //
  373. // Get the control area for the segment which is mapped here.
  374. //
  375. ControlArea = ((PSECTION)SectionToUnmap)->Segment->ControlArea;
  376. LastSubsection = NULL;
  377. ASSERT ((ControlArea->u.Flags.Image == 0) &&
  378. (ControlArea->u.Flags.PhysicalMemory == 0) &&
  379. (ControlArea->u.Flags.GlobalOnlyPerSession == 0));
  380. do {
  381. //
  382. // The cache is organized in chunks of 256k bytes, clear
  383. // the first chunk then check to see if this is the last chunk.
  384. //
  385. // The page table page is always resident for the system cache.
  386. // Check each PTE: it is in one of three states, either valid or
  387. // prototype PTE format or zero.
  388. //
  389. PteContents = *PointerPte;
  390. if (PteContents.u.Hard.Valid == 1) {
  391. //
  392. // The PTE is valid.
  393. //
  394. if (!WsHeld) {
  395. WsHeld = TRUE;
  396. LOCK_SYSTEM_WS (CurrentThread);
  397. continue;
  398. }
  399. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents);
  400. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  401. WorkingSetIndex = MiLocateWsle (BaseAddress,
  402. MmSystemCacheWorkingSetList,
  403. Pfn1->u1.WsIndex);
  404. MiRemoveWsle (WorkingSetIndex, MmSystemCacheWorkingSetList);
  405. MiReleaseWsle (WorkingSetIndex, &MmSystemCacheWs);
  406. MI_SET_PTE_IN_WORKING_SET (PointerPte, 0);
  407. //
  408. // Decrement the view count for every subsection this view spans.
  409. // But make sure it's only done once per subsection in a given view.
  410. //
  411. // The subsections can only be decremented after all the
  412. // PTEs have been cleared and PFN sharecounts decremented so no
  413. // prototype PTEs will be valid if it is indeed the final subsection
  414. // dereference. This is critical so the dereference segment
  415. // thread doesn't free pool containing valid prototype PTEs.
  416. //
  417. if (ControlArea->FilePointer != NULL) {
  418. ASSERT (Pfn1->u3.e1.PrototypePte);
  419. ASSERT (Pfn1->OriginalPte.u.Soft.Prototype);
  420. if ((LastSubsection != NULL) &&
  421. (Pfn1->PteAddress >= LastSubsection->SubsectionBase) &&
  422. (Pfn1->PteAddress < LastSubsection->SubsectionBase + LastSubsection->PtesInSubsection)) {
  423. NOTHING;
  424. }
  425. else {
  426. MappedSubsection = (PMSUBSECTION)MiGetSubsectionAddress (&Pfn1->OriginalPte);
  427. if (MappedSubsection->ControlArea != ControlArea) {
  428. KeBugCheckEx (MEMORY_MANAGEMENT,
  429. 0x780,
  430. (ULONG_PTR) PointerPte,
  431. (ULONG_PTR) Pfn1,
  432. (ULONG_PTR) Pfn1->OriginalPte.u.Long);
  433. }
  434. ASSERT ((MappedSubsection->NumberOfMappedViews >= 1) ||
  435. (MappedSubsection->u.SubsectionFlags.SubsectionStatic == 1));
  436. if (LastSubsection != MappedSubsection) {
  437. if (LastSubsection != NULL) {
  438. #if DBG
  439. for (j = 0; j < i; j += 1) {
  440. ASSERT (SubsectionArray[j] != MappedSubsection);
  441. }
  442. SubsectionArray[i] = MappedSubsection;
  443. #endif
  444. LOCK_PFN (OldIrql);
  445. MiRemoveViewsFromSection (LastSubsection,
  446. LastSubsection->PtesInSubsection);
  447. UNLOCK_PFN (OldIrql);
  448. }
  449. LastSubsection = MappedSubsection;
  450. }
  451. }
  452. }
  453. LOCK_PFN (OldIrql);
  454. //
  455. // Capture the state of the modified bit for this PTE.
  456. //
  457. MI_CAPTURE_DIRTY_BIT_TO_PFN (PointerPte, Pfn1);
  458. //
  459. // Decrement the share and valid counts of the page table
  460. // page which maps this PTE.
  461. //
  462. MiDecrementShareCountInline (Pfn2, PageTableFrameIndex);
  463. //
  464. // Decrement the share count for the physical page.
  465. //
  466. #if DBG
  467. if (ControlArea->NumberOfMappedViews == 1) {
  468. ASSERT (Pfn1->u2.ShareCount == 1);
  469. }
  470. #endif
  471. MmFrontOfList = AddToFront;
  472. MiDecrementShareCountInline (Pfn1, PageFrameIndex);
  473. MmFrontOfList = FALSE;
  474. UNLOCK_PFN (OldIrql);
  475. }
  476. else {
  477. ASSERT ((PteContents.u.Long == ZeroKernelPte.u.Long) ||
  478. (PteContents.u.Soft.Prototype == 1));
  479. if (PteContents.u.Soft.Prototype == 1) {
  480. //
  481. // Decrement the view count for every subsection this view
  482. // spans. But make sure it's only done once per subsection
  483. // in a given view.
  484. //
  485. if (ControlArea->FilePointer != NULL) {
  486. ProtoPte = MiPteToProto (&PteContents);
  487. if ((LastSubsection != NULL) &&
  488. (ProtoPte >= LastSubsection->SubsectionBase) &&
  489. (ProtoPte < LastSubsection->SubsectionBase + LastSubsection->PtesInSubsection)) {
  490. NOTHING;
  491. }
  492. else {
  493. PointerPde = MiGetPteAddress (ProtoPte);
  494. LOCK_PFN (OldIrql);
  495. //
  496. // PTE is not valid, check the state of
  497. // the prototype PTE.
  498. //
  499. if (PointerPde->u.Hard.Valid == 0) {
  500. if (WsHeld) {
  501. MiMakeSystemAddressValidPfnSystemWs (ProtoPte,
  502. OldIrql);
  503. }
  504. else {
  505. MiMakeSystemAddressValidPfn (ProtoPte, OldIrql);
  506. }
  507. //
  508. // Page fault occurred, recheck state
  509. // of original PTE.
  510. //
  511. UNLOCK_PFN (OldIrql);
  512. continue;
  513. }
  514. ProtoPteContents = *ProtoPte;
  515. if (ProtoPteContents.u.Hard.Valid == 1) {
  516. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&ProtoPteContents);
  517. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  518. ProtoPte = &Pfn1->OriginalPte;
  519. }
  520. else if ((ProtoPteContents.u.Soft.Transition == 1) &&
  521. (ProtoPteContents.u.Soft.Prototype == 0)) {
  522. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&ProtoPteContents);
  523. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  524. ProtoPte = &Pfn1->OriginalPte;
  525. }
  526. else {
  527. Pfn1 = NULL;
  528. ASSERT (ProtoPteContents.u.Soft.Prototype == 1);
  529. }
  530. MappedSubsection = (PMSUBSECTION)MiGetSubsectionAddress (ProtoPte);
  531. if (MappedSubsection->ControlArea != ControlArea) {
  532. KeBugCheckEx (MEMORY_MANAGEMENT,
  533. 0x781,
  534. (ULONG_PTR) PointerPte,
  535. (ULONG_PTR) Pfn1,
  536. (ULONG_PTR) ProtoPte);
  537. }
  538. ASSERT ((MappedSubsection->NumberOfMappedViews >= 1) ||
  539. (MappedSubsection->u.SubsectionFlags.SubsectionStatic == 1));
  540. if (LastSubsection != MappedSubsection) {
  541. if (LastSubsection != NULL) {
  542. #if DBG
  543. for (j = 0; j < i; j += 1) {
  544. ASSERT (SubsectionArray[j] != MappedSubsection);
  545. }
  546. SubsectionArray[i] = MappedSubsection;
  547. #endif
  548. MiRemoveViewsFromSection (LastSubsection,
  549. LastSubsection->PtesInSubsection);
  550. }
  551. LastSubsection = MappedSubsection;
  552. }
  553. UNLOCK_PFN (OldIrql);
  554. }
  555. }
  556. }
  557. if (WsHeld) {
  558. UNLOCK_SYSTEM_WS ();
  559. WsHeld = FALSE;
  560. }
  561. }
  562. MI_WRITE_INVALID_PTE (PointerPte, ZeroKernelPte);
  563. PointerPte += 1;
  564. BaseAddress = (PVOID)((PCHAR)BaseAddress + PAGE_SIZE);
  565. #if DBG
  566. i += 1;
  567. #endif
  568. } while (PointerPte < LastPte);
  569. if (WsHeld) {
  570. UNLOCK_SYSTEM_WS ();
  571. }
  572. FirstPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
  573. (FirstPte+1)->u.List.NextEntry = (KeReadTbFlushTimeStamp() & MM_FLUSH_COUNTER_MASK);
  574. LOCK_PFN (OldIrql);
  575. //
  576. // Free this entry to the end of the list.
  577. //
  578. MmLastFreeSystemCache->u.List.NextEntry = FirstPte - MmSystemCachePteBase;
  579. MmLastFreeSystemCache = FirstPte;
  580. if (LastSubsection != NULL) {
  581. MiRemoveViewsFromSection (LastSubsection,
  582. LastSubsection->PtesInSubsection);
  583. }
  584. //
  585. // Decrement the number of mapped views for the segment
  586. // and check to see if the segment should be deleted.
  587. //
  588. ControlArea->NumberOfMappedViews -= 1;
  589. ControlArea->NumberOfSystemCacheViews -= 1;
  590. //
  591. // Check to see if the control area (segment) should be deleted.
  592. // This routine releases the PFN lock.
  593. //
  594. MiCheckControlArea (ControlArea, NULL, OldIrql);
  595. return;
  596. }
  597. VOID
  598. MiRemoveMappedPtes (
  599. IN PVOID BaseAddress,
  600. IN ULONG NumberOfPtes,
  601. IN PCONTROL_AREA ControlArea,
  602. IN PMMSUPPORT Ws
  603. )
  604. /*++
  605. Routine Description:
  606. This function unmaps a view from the system or session view space.
  607. NOTE: When this function is called, no pages may be locked in
  608. the space for the specified view.
  609. Arguments:
  610. BaseAddress - Supplies the base address of the section in the
  611. system or session view space.
  612. NumberOfPtes - Supplies the number of PTEs to unmap.
  613. ControlArea - Supplies the control area mapping the view.
  614. Ws - Supplies the charged working set structures.
  615. Return Value:
  616. None.
  617. Environment:
  618. Kernel mode.
  619. --*/
  620. {
  621. PMMPTE PointerPte;
  622. PMMPTE PointerPde;
  623. PMMPFN Pfn1;
  624. PMMPTE FirstPte;
  625. PMMPTE ProtoPte;
  626. MMPTE PteContents;
  627. KIRQL OldIrql;
  628. WSLE_NUMBER WorkingSetIndex;
  629. ULONG DereferenceSegment;
  630. MMPTE_FLUSH_LIST PteFlushList;
  631. MMPTE ProtoPteContents;
  632. PFN_NUMBER PageFrameIndex;
  633. ULONG WsHeld;
  634. PMMPFN Pfn2;
  635. PFN_NUMBER PageTableFrameIndex;
  636. PMSUBSECTION MappedSubsection;
  637. PMSUBSECTION LastSubsection;
  638. PETHREAD CurrentThread;
  639. CurrentThread = PsGetCurrentThread ();
  640. DereferenceSegment = FALSE;
  641. WsHeld = FALSE;
  642. LastSubsection = NULL;
  643. PteFlushList.Count = 0;
  644. PointerPte = MiGetPteAddress (BaseAddress);
  645. FirstPte = PointerPte;
  646. //
  647. // Get the control area for the segment which is mapped here.
  648. //
  649. while (NumberOfPtes) {
  650. //
  651. // The page table page is always resident for the system space (and
  652. // for a session space) map.
  653. //
  654. // Check each PTE, it is in one of two states, either valid or
  655. // prototype PTE format.
  656. //
  657. PteContents = *PointerPte;
  658. if (PteContents.u.Hard.Valid == 1) {
  659. //
  660. // Lock the working set to prevent races with the trimmer,
  661. // then re-examine the PTE.
  662. //
  663. if (!WsHeld) {
  664. WsHeld = TRUE;
  665. LOCK_WORKING_SET (Ws);
  666. continue;
  667. }
  668. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents);
  669. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  670. WorkingSetIndex = MiLocateWsle (BaseAddress,
  671. Ws->VmWorkingSetList,
  672. Pfn1->u1.WsIndex);
  673. ASSERT (WorkingSetIndex != WSLE_NULL_INDEX);
  674. MiRemoveWsle (WorkingSetIndex,
  675. Ws->VmWorkingSetList);
  676. MiReleaseWsle (WorkingSetIndex, Ws);
  677. MI_SET_PTE_IN_WORKING_SET (PointerPte, 0);
  678. PointerPde = MiGetPteAddress (PointerPte);
  679. LOCK_PFN (OldIrql);
  680. //
  681. // The PTE is valid.
  682. //
  683. //
  684. // Decrement the view count for every subsection this view spans.
  685. // But make sure it's only done once per subsection in a given view.
  686. //
  687. // The subsections can only be decremented after all the
  688. // PTEs have been cleared and PFN sharecounts decremented so no
  689. // prototype PTEs will be valid if it is indeed the final subsection
  690. // dereference. This is critical so the dereference segment
  691. // thread doesn't free pool containing valid prototype PTEs.
  692. //
  693. if ((Pfn1->u3.e1.PrototypePte) &&
  694. (Pfn1->OriginalPte.u.Soft.Prototype)) {
  695. if ((LastSubsection != NULL) &&
  696. (Pfn1->PteAddress >= LastSubsection->SubsectionBase) &&
  697. (Pfn1->PteAddress < LastSubsection->SubsectionBase + LastSubsection->PtesInSubsection)) {
  698. NOTHING;
  699. }
  700. else {
  701. MappedSubsection = (PMSUBSECTION)MiGetSubsectionAddress (&Pfn1->OriginalPte);
  702. if (LastSubsection != MappedSubsection) {
  703. ASSERT (ControlArea == MappedSubsection->ControlArea);
  704. if ((ControlArea->FilePointer != NULL) &&
  705. (ControlArea->u.Flags.Image == 0) &&
  706. (ControlArea->u.Flags.PhysicalMemory == 0)) {
  707. if (LastSubsection != NULL) {
  708. MiRemoveViewsFromSection (LastSubsection,
  709. LastSubsection->PtesInSubsection);
  710. }
  711. LastSubsection = MappedSubsection;
  712. }
  713. }
  714. }
  715. }
  716. //
  717. // Capture the state of the modified bit for this PTE.
  718. //
  719. MI_CAPTURE_DIRTY_BIT_TO_PFN (PointerPte, Pfn1);
  720. //
  721. // Flush the TB for this page.
  722. //
  723. if (PteFlushList.Count != MM_MAXIMUM_FLUSH_COUNT) {
  724. PteFlushList.FlushVa[PteFlushList.Count] = BaseAddress;
  725. PteFlushList.Count += 1;
  726. }
  727. #if (_MI_PAGING_LEVELS < 3)
  728. //
  729. // The PDE must be carefully checked against the master table
  730. // because the PDEs are all zeroed in process creation. If this
  731. // process has never faulted on any address in this range (all
  732. // references prior and above were filled directly by the TB as
  733. // the PTEs are global on non-Hydra), then the PDE reference
  734. // below to determine the page table frame will be zero.
  735. //
  736. // Note this cannot happen on NT64 as no master table is used.
  737. //
  738. if (PointerPde->u.Long == 0) {
  739. PMMPTE MasterPde;
  740. MasterPde = &MmSystemPagePtes [((ULONG_PTR)PointerPde &
  741. (PD_PER_SYSTEM * (sizeof(MMPTE) * PDE_PER_PAGE) - 1)) / sizeof(MMPTE)];
  742. ASSERT (MasterPde->u.Hard.Valid == 1);
  743. MI_WRITE_VALID_PTE (PointerPde, *MasterPde);
  744. }
  745. #endif
  746. //
  747. // Decrement the share and valid counts of the page table
  748. // page which maps this PTE.
  749. //
  750. PageTableFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPde);
  751. Pfn2 = MI_PFN_ELEMENT (PageTableFrameIndex);
  752. MiDecrementShareCountInline (Pfn2, PageTableFrameIndex);
  753. //
  754. // Decrement the share count for the physical page.
  755. //
  756. MiDecrementShareCount (Pfn1, PageFrameIndex);
  757. UNLOCK_PFN (OldIrql);
  758. }
  759. else {
  760. ASSERT ((PteContents.u.Long == ZeroKernelPte.u.Long) ||
  761. (PteContents.u.Soft.Prototype == 1));
  762. if (PteContents.u.Soft.Prototype == 1) {
  763. //
  764. // Decrement the view count for every subsection this view
  765. // spans. But make sure it's only done once per subsection
  766. // in a given view.
  767. //
  768. ProtoPte = MiPteToProto (&PteContents);
  769. if ((LastSubsection != NULL) &&
  770. (ProtoPte >= LastSubsection->SubsectionBase) &&
  771. (ProtoPte < LastSubsection->SubsectionBase + LastSubsection->PtesInSubsection)) {
  772. NOTHING;
  773. }
  774. else {
  775. if (WsHeld) {
  776. UNLOCK_WORKING_SET (Ws);
  777. WsHeld = FALSE;
  778. }
  779. //
  780. // PTE is not valid, check the state of the prototype PTE.
  781. //
  782. PointerPde = MiGetPteAddress (ProtoPte);
  783. LOCK_PFN (OldIrql);
  784. if (PointerPde->u.Hard.Valid == 0) {
  785. MiMakeSystemAddressValidPfn (ProtoPte, OldIrql);
  786. //
  787. // Page fault occurred, recheck state of original PTE.
  788. //
  789. UNLOCK_PFN (OldIrql);
  790. continue;
  791. }
  792. ProtoPteContents = *ProtoPte;
  793. if (ProtoPteContents.u.Hard.Valid == 1) {
  794. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&ProtoPteContents);
  795. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  796. ProtoPte = &Pfn1->OriginalPte;
  797. if (ProtoPte->u.Soft.Prototype == 0) {
  798. ProtoPte = NULL;
  799. }
  800. }
  801. else if ((ProtoPteContents.u.Soft.Transition == 1) &&
  802. (ProtoPteContents.u.Soft.Prototype == 0)) {
  803. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&ProtoPteContents);
  804. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  805. ProtoPte = &Pfn1->OriginalPte;
  806. if (ProtoPte->u.Soft.Prototype == 0) {
  807. ProtoPte = NULL;
  808. }
  809. }
  810. else if (ProtoPteContents.u.Soft.Prototype == 1) {
  811. NOTHING;
  812. }
  813. else {
  814. //
  815. // Could be a zero PTE or a demand zero PTE.
  816. // Neither belong to a mapped file.
  817. //
  818. ProtoPte = NULL;
  819. }
  820. if (ProtoPte != NULL) {
  821. MappedSubsection = (PMSUBSECTION)MiGetSubsectionAddress (ProtoPte);
  822. if (LastSubsection != MappedSubsection) {
  823. ASSERT (ControlArea == MappedSubsection->ControlArea);
  824. if ((ControlArea->FilePointer != NULL) &&
  825. (ControlArea->u.Flags.Image == 0) &&
  826. (ControlArea->u.Flags.PhysicalMemory == 0)) {
  827. if (LastSubsection != NULL) {
  828. MiRemoveViewsFromSection (LastSubsection,
  829. LastSubsection->PtesInSubsection);
  830. }
  831. LastSubsection = MappedSubsection;
  832. }
  833. }
  834. }
  835. UNLOCK_PFN (OldIrql);
  836. }
  837. }
  838. }
  839. MI_WRITE_INVALID_PTE (PointerPte, ZeroKernelPte);
  840. PointerPte += 1;
  841. BaseAddress = (PVOID)((PCHAR)BaseAddress + PAGE_SIZE);
  842. NumberOfPtes -= 1;
  843. }
  844. if (WsHeld) {
  845. UNLOCK_WORKING_SET (Ws);
  846. }
  847. if (PteFlushList.Count != 0) {
  848. MiFlushPteList (&PteFlushList, TRUE);
  849. }
  850. if (Ws != &MmSystemCacheWs) {
  851. //
  852. // Session space has no ASN - flush the entire TB.
  853. //
  854. MI_FLUSH_ENTIRE_SESSION_TB (TRUE, TRUE);
  855. }
  856. LOCK_PFN (OldIrql);
  857. if (LastSubsection != NULL) {
  858. MiRemoveViewsFromSection (LastSubsection,
  859. LastSubsection->PtesInSubsection);
  860. }
  861. //
  862. // Decrement the number of user references as the caller upped them
  863. // via MiCheckPurgeAndUpMapCount when this was originally mapped.
  864. //
  865. ControlArea->NumberOfUserReferences -= 1;
  866. //
  867. // Decrement the number of mapped views for the segment
  868. // and check to see if the segment should be deleted.
  869. //
  870. ControlArea->NumberOfMappedViews -= 1;
  871. //
  872. // Check to see if the control area (segment) should be deleted.
  873. // This routine releases the PFN lock.
  874. //
  875. MiCheckControlArea (ControlArea, NULL, OldIrql);
  876. }
  877. VOID
  878. MiInitializeSystemCache (
  879. IN ULONG MinimumWorkingSet,
  880. IN ULONG MaximumWorkingSet
  881. )
  882. /*++
  883. Routine Description:
  884. This routine initializes the system cache working set and
  885. data management structures.
  886. Arguments:
  887. MinimumWorkingSet - Supplies the minimum working set for the system
  888. cache.
  889. MaximumWorkingSet - Supplies the maximum working set size for the
  890. system cache.
  891. Return Value:
  892. None.
  893. Environment:
  894. Kernel mode, called only at phase 0 initialization.
  895. --*/
  896. {
  897. ULONG Color;
  898. ULONG_PTR SizeOfSystemCacheInPages;
  899. ULONG_PTR HunksOf256KInCache;
  900. PMMWSLE WslEntry;
  901. ULONG NumberOfEntriesMapped;
  902. PFN_NUMBER i;
  903. MMPTE PteContents;
  904. PMMPTE PointerPte;
  905. KIRQL OldIrql;
  906. PointerPte = MiGetPteAddress (MmSystemCacheWorkingSetList);
  907. PteContents = ValidKernelPte;
  908. Color = MI_GET_PAGE_COLOR_FROM_PTE (PointerPte);
  909. LOCK_PFN (OldIrql);
  910. i = MiRemoveZeroPage (Color);
  911. PteContents.u.Hard.PageFrameNumber = i;
  912. MI_WRITE_VALID_PTE (PointerPte, PteContents);
  913. MiInitializePfn (i, PointerPte, 1L);
  914. MmResidentAvailablePages -= 1;
  915. UNLOCK_PFN (OldIrql);
  916. #if defined (_WIN64)
  917. MmSystemCacheWsle = (PMMWSLE)(MmSystemCacheWorkingSetList + 1);
  918. #else
  919. MmSystemCacheWsle =
  920. (PMMWSLE)(&MmSystemCacheWorkingSetList->UsedPageTableEntries[0]);
  921. #endif
  922. MmSystemCacheWs.VmWorkingSetList = MmSystemCacheWorkingSetList;
  923. MmSystemCacheWs.WorkingSetSize = 0;
  924. //
  925. // Don't use entry 0 as an index of zero in the PFN database
  926. // means that the page can be assigned to a slot. This is not
  927. // a problem for process working sets as page 0 is private.
  928. //
  929. #if defined (_MI_DEBUG_WSLE)
  930. MmSystemCacheWorkingSetList->Quota = 0;
  931. #endif
  932. MmSystemCacheWorkingSetList->FirstFree = 1;
  933. MmSystemCacheWorkingSetList->FirstDynamic = 1;
  934. MmSystemCacheWorkingSetList->NextSlot = 1;
  935. MmSystemCacheWorkingSetList->HashTable = NULL;
  936. MmSystemCacheWorkingSetList->HashTableSize = 0;
  937. MmSystemCacheWorkingSetList->Wsle = MmSystemCacheWsle;
  938. MmSystemCacheWorkingSetList->HashTableStart =
  939. (PVOID)((PCHAR)PAGE_ALIGN (&MmSystemCacheWorkingSetList->Wsle[MM_MAXIMUM_WORKING_SET]) + PAGE_SIZE);
  940. MmSystemCacheWorkingSetList->HighestPermittedHashAddress = MmSystemCacheStart;
  941. NumberOfEntriesMapped = (ULONG)(((PMMWSLE)((PCHAR)MmSystemCacheWorkingSetList +
  942. PAGE_SIZE)) - MmSystemCacheWsle);
  943. MinimumWorkingSet = NumberOfEntriesMapped - 1;
  944. MmSystemCacheWs.MinimumWorkingSetSize = MinimumWorkingSet;
  945. MmSystemCacheWorkingSetList->LastEntry = MinimumWorkingSet;
  946. if (MaximumWorkingSet <= MinimumWorkingSet) {
  947. MaximumWorkingSet = MinimumWorkingSet + (PAGE_SIZE / sizeof (MMWSLE));
  948. }
  949. MmSystemCacheWs.MaximumWorkingSetSize = MaximumWorkingSet;
  950. //
  951. // Initialize the following slots as free.
  952. //
  953. WslEntry = MmSystemCacheWsle + 1;
  954. for (i = 1; i < NumberOfEntriesMapped; i++) {
  955. //
  956. // Build the free list, note that the first working
  957. // set entries (CurrentEntry) are not on the free list.
  958. // These entries are reserved for the pages which
  959. // map the working set and the page which contains the PDE.
  960. //
  961. WslEntry->u1.Long = (i + 1) << MM_FREE_WSLE_SHIFT;
  962. WslEntry += 1;
  963. }
  964. WslEntry -= 1;
  965. WslEntry->u1.Long = WSLE_NULL_INDEX << MM_FREE_WSLE_SHIFT; // End of list.
  966. MmSystemCacheWorkingSetList->LastInitializedWsle = NumberOfEntriesMapped - 1;
  967. //
  968. // Build a free list structure in the PTEs for the system cache.
  969. //
  970. MmSystemCachePteBase = MI_PTE_BASE_FOR_LOWEST_KERNEL_ADDRESS;
  971. SizeOfSystemCacheInPages = MI_COMPUTE_PAGES_SPANNED (MmSystemCacheStart,
  972. (PCHAR)MmSystemCacheEnd - (PCHAR)MmSystemCacheStart + 1);
  973. HunksOf256KInCache = SizeOfSystemCacheInPages / (X256K / PAGE_SIZE);
  974. PointerPte = MiGetPteAddress (MmSystemCacheStart);
  975. MmFirstFreeSystemCache = PointerPte;
  976. for (i = 0; i < HunksOf256KInCache; i += 1) {
  977. PointerPte->u.List.NextEntry = (PointerPte + (X256K / PAGE_SIZE)) - MmSystemCachePteBase;
  978. PointerPte += X256K / PAGE_SIZE;
  979. }
  980. PointerPte -= X256K / PAGE_SIZE;
  981. #if defined(_X86_)
  982. //
  983. // Add any extended ranges.
  984. //
  985. if (MiSystemCacheEndExtra != MmSystemCacheEnd) {
  986. SizeOfSystemCacheInPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (MiSystemCacheStartExtra,
  987. (PCHAR)MiSystemCacheEndExtra - (PCHAR)MiSystemCacheStartExtra + 1);
  988. HunksOf256KInCache = SizeOfSystemCacheInPages / (X256K / PAGE_SIZE);
  989. if (HunksOf256KInCache) {
  990. PMMPTE PointerPteExtended;
  991. PointerPteExtended = MiGetPteAddress (MiSystemCacheStartExtra);
  992. PointerPte->u.List.NextEntry = PointerPteExtended - MmSystemCachePteBase;
  993. PointerPte = PointerPteExtended;
  994. for (i = 0; i < HunksOf256KInCache; i += 1) {
  995. PointerPte->u.List.NextEntry = (PointerPte + (X256K / PAGE_SIZE)) - MmSystemCachePteBase;
  996. PointerPte += X256K / PAGE_SIZE;
  997. }
  998. PointerPte -= X256K / PAGE_SIZE;
  999. }
  1000. }
  1001. #endif
  1002. PointerPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
  1003. MmLastFreeSystemCache = PointerPte;
  1004. MiAllowWorkingSetExpansion (&MmSystemCacheWs);
  1005. }
  1006. BOOLEAN
  1007. MmCheckCachedPageState (
  1008. IN PVOID SystemCacheAddress,
  1009. IN BOOLEAN SetToZero
  1010. )
  1011. /*++
  1012. Routine Description:
  1013. This routine checks the state of the specified page that is mapped in
  1014. the system cache. If the specified virtual address can be made valid
  1015. (i.e., the page is already in memory), it is made valid and the value
  1016. TRUE is returned.
  1017. If the page is not in memory, and SetToZero is FALSE, the
  1018. value FALSE is returned. However, if SetToZero is TRUE, a page of
  1019. zeroes is materialized for the specified virtual address and the address
  1020. is made valid and the value TRUE is returned.
  1021. This routine is for usage by the cache manager.
  1022. Arguments:
  1023. SystemCacheAddress - Supplies the address of a page mapped in the
  1024. system cache.
  1025. SetToZero - Supplies TRUE if a page of zeroes should be created in the
  1026. case where no page is already mapped.
  1027. Return Value:
  1028. FALSE if touching this page would cause a page fault resulting
  1029. in a page read.
  1030. TRUE if there is a physical page in memory for this address.
  1031. Environment:
  1032. Kernel mode.
  1033. --*/
  1034. {
  1035. PETHREAD Thread;
  1036. MMWSLE WsleMask;
  1037. ULONG Flags;
  1038. PMMPTE PointerPte;
  1039. PMMPTE PointerPde;
  1040. PMMPTE ProtoPte;
  1041. PFN_NUMBER PageFrameIndex;
  1042. WSLE_NUMBER WorkingSetIndex;
  1043. MMPTE TempPte;
  1044. MMPTE ProtoPteContents;
  1045. PMMPFN Pfn1;
  1046. PMMPFN Pfn2;
  1047. KIRQL OldIrql;
  1048. LOGICAL BarrierNeeded;
  1049. ULONG BarrierStamp;
  1050. PSUBSECTION Subsection;
  1051. PFILE_OBJECT FileObject;
  1052. LONGLONG FileOffset;
  1053. PointerPte = MiGetPteAddress (SystemCacheAddress);
  1054. //
  1055. // Make the PTE valid if possible.
  1056. //
  1057. if (PointerPte->u.Hard.Valid == 1) {
  1058. return TRUE;
  1059. }
  1060. BarrierNeeded = FALSE;
  1061. Thread = PsGetCurrentThread ();
  1062. LOCK_SYSTEM_WS (Thread);
  1063. if (PointerPte->u.Hard.Valid == 1) {
  1064. UNLOCK_SYSTEM_WS ();
  1065. return TRUE;
  1066. }
  1067. ASSERT (PointerPte->u.Soft.Prototype == 1);
  1068. ProtoPte = MiPteToProto (PointerPte);
  1069. PointerPde = MiGetPteAddress (ProtoPte);
  1070. LOCK_PFN (OldIrql);
  1071. ASSERT (PointerPte->u.Hard.Valid == 0);
  1072. ASSERT (PointerPte->u.Soft.Prototype == 1);
  1073. //
  1074. // PTE is not valid, check the state of the prototype PTE.
  1075. //
  1076. if (PointerPde->u.Hard.Valid == 0) {
  1077. MiMakeSystemAddressValidPfnSystemWs (ProtoPte, OldIrql);
  1078. //
  1079. // Page fault occurred, recheck state of original PTE.
  1080. //
  1081. if (PointerPte->u.Hard.Valid == 1) {
  1082. goto UnlockAndReturnTrue;
  1083. }
  1084. }
  1085. ProtoPteContents = *ProtoPte;
  1086. if (ProtoPteContents.u.Hard.Valid == 1) {
  1087. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&ProtoPteContents);
  1088. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1089. //
  1090. // The prototype PTE is valid, make the cache PTE
  1091. // valid and add it to the working set.
  1092. //
  1093. TempPte = ProtoPteContents;
  1094. }
  1095. else if ((ProtoPteContents.u.Soft.Transition == 1) &&
  1096. (ProtoPteContents.u.Soft.Prototype == 0)) {
  1097. //
  1098. // Prototype PTE is in the transition state. Remove the page
  1099. // from the page list and make it valid.
  1100. //
  1101. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&ProtoPteContents);
  1102. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1103. if ((Pfn1->u3.e1.ReadInProgress) || (Pfn1->u4.InPageError)) {
  1104. //
  1105. // Collided page fault, return.
  1106. //
  1107. goto UnlockAndReturnTrue;
  1108. }
  1109. if (MmAvailablePages < MM_HIGH_LIMIT) {
  1110. //
  1111. // This can only happen if the system is utilizing
  1112. // a hardware compression cache. This ensures that
  1113. // only a safe amount of the compressed virtual cache
  1114. // is directly mapped so that if the hardware gets
  1115. // into trouble, we can bail it out.
  1116. //
  1117. // The same is true when machines are low on memory - we don't
  1118. // want this thread to gobble up the pages from every modified
  1119. // write that completes because that would starve waiting threads.
  1120. //
  1121. // Just unlock everything here to give the compression
  1122. // reaper a chance to ravage pages and then retry.
  1123. //
  1124. if ((PsGetCurrentThread()->MemoryMaker == 0) ||
  1125. (MmAvailablePages == 0)) {
  1126. goto UnlockAndReturnTrue;
  1127. }
  1128. }
  1129. MiUnlinkPageFromList (Pfn1);
  1130. Pfn1->u3.e2.ReferenceCount += 1;
  1131. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1132. ASSERT (Pfn1->u3.e1.CacheAttribute == MiCached);
  1133. MI_SNAP_DATA (Pfn1, ProtoPte, 1);
  1134. MI_MAKE_VALID_PTE (TempPte,
  1135. PageFrameIndex,
  1136. Pfn1->OriginalPte.u.Soft.Protection,
  1137. NULL );
  1138. MI_WRITE_VALID_PTE (ProtoPte, TempPte);
  1139. //
  1140. // Increment the valid PTE count for the page containing
  1141. // the prototype PTE.
  1142. //
  1143. Pfn2 = MI_PFN_ELEMENT (Pfn1->u4.PteFrame);
  1144. }
  1145. else {
  1146. //
  1147. // Page is not in memory, if a page of zeroes is requested,
  1148. // get a page of zeroes and make it valid.
  1149. //
  1150. if ((SetToZero == FALSE) || (MmAvailablePages < MM_HIGH_LIMIT)) {
  1151. UNLOCK_PFN (OldIrql);
  1152. UNLOCK_SYSTEM_WS ();
  1153. //
  1154. // Fault the page into memory.
  1155. //
  1156. MmAccessFault (FALSE, SystemCacheAddress, KernelMode, NULL);
  1157. return FALSE;
  1158. }
  1159. //
  1160. // Increment the count of Pfn references for the control area
  1161. // corresponding to this file.
  1162. //
  1163. MiGetSubsectionAddress (
  1164. ProtoPte)->ControlArea->NumberOfPfnReferences += 1;
  1165. PageFrameIndex = MiRemoveZeroPage(MI_GET_PAGE_COLOR_FROM_PTE (ProtoPte));
  1166. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1167. //
  1168. // This barrier check is needed after zeroing the page and
  1169. // before setting the PTE (not the prototype PTE) valid.
  1170. // Capture it now, check it at the last possible moment.
  1171. //
  1172. BarrierNeeded = TRUE;
  1173. BarrierStamp = (ULONG)Pfn1->u4.PteFrame;
  1174. MiInitializePfn (PageFrameIndex, ProtoPte, 1);
  1175. Pfn1->u2.ShareCount = 0;
  1176. Pfn1->u3.e1.PrototypePte = 1;
  1177. MI_SNAP_DATA (Pfn1, ProtoPte, 2);
  1178. MI_MAKE_VALID_PTE (TempPte,
  1179. PageFrameIndex,
  1180. Pfn1->OriginalPte.u.Soft.Protection,
  1181. NULL );
  1182. MI_WRITE_VALID_PTE (ProtoPte, TempPte);
  1183. }
  1184. //
  1185. // Increment the share count since the page is being put into a working
  1186. // set.
  1187. //
  1188. Pfn1->u2.ShareCount += 1;
  1189. //
  1190. // Increment the reference count of the page table
  1191. // page for this PTE.
  1192. //
  1193. PointerPde = MiGetPteAddress (PointerPte);
  1194. Pfn2 = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber);
  1195. Pfn2->u2.ShareCount += 1;
  1196. MI_SET_GLOBAL_STATE (TempPte, 1);
  1197. TempPte.u.Hard.Owner = MI_PTE_OWNER_KERNEL;
  1198. if (BarrierNeeded) {
  1199. MI_BARRIER_SYNCHRONIZE (BarrierStamp);
  1200. }
  1201. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1202. //
  1203. // Capture the original PTE as it is needed for prefetch fault information.
  1204. //
  1205. TempPte = Pfn1->OriginalPte;
  1206. UNLOCK_PFN (OldIrql);
  1207. WsleMask.u1.Long = 0;
  1208. WsleMask.u1.e1.SameProtectAsProto = 1;
  1209. WorkingSetIndex = MiAllocateWsle (&MmSystemCacheWs,
  1210. PointerPte,
  1211. Pfn1,
  1212. WsleMask.u1.Long);
  1213. if (WorkingSetIndex == 0) {
  1214. //
  1215. // No working set entry was available so just trim the page.
  1216. // Note another thread may be writing too so the page must be
  1217. // trimmed instead of just tossed.
  1218. //
  1219. // The protection is in the prototype PTE.
  1220. //
  1221. ASSERT (Pfn1->u3.e1.PrototypePte == 1);
  1222. ASSERT (ProtoPte == Pfn1->PteAddress);
  1223. TempPte.u.Long = MiProtoAddressForPte (ProtoPte);
  1224. MiTrimPte (SystemCacheAddress, PointerPte, Pfn1, NULL, TempPte);
  1225. }
  1226. UNLOCK_SYSTEM_WS ();
  1227. if ((WorkingSetIndex != 0) &&
  1228. (CCPF_IS_PREFETCHER_ACTIVE()) &&
  1229. (TempPte.u.Soft.Prototype == 1)) {
  1230. Subsection = MiGetSubsectionAddress (&TempPte);
  1231. //
  1232. // Log prefetch fault information now that the PFN lock has been
  1233. // released and the PTE has been made valid. This minimizes PFN
  1234. // lock contention, allows CcPfLogPageFault to allocate (and fault
  1235. // on) pool, and allows other threads in this process to execute
  1236. // without faulting on this address.
  1237. //
  1238. FileObject = Subsection->ControlArea->FilePointer;
  1239. FileOffset = MiStartingOffset (Subsection, ProtoPte);
  1240. Flags = 0;
  1241. ASSERT (Subsection->ControlArea->u.Flags.Image == 0);
  1242. if (Subsection->ControlArea->u.Flags.Rom) {
  1243. Flags |= CCPF_TYPE_ROM;
  1244. }
  1245. CcPfLogPageFault (FileObject, FileOffset, Flags);
  1246. }
  1247. return TRUE;
  1248. UnlockAndReturnTrue:
  1249. UNLOCK_PFN (OldIrql);
  1250. UNLOCK_SYSTEM_WS ();
  1251. return TRUE;
  1252. }
  1253. NTSTATUS
  1254. MmCopyToCachedPage (
  1255. IN PVOID SystemCacheAddress,
  1256. IN PVOID UserBuffer,
  1257. IN ULONG Offset,
  1258. IN SIZE_T CountInBytes,
  1259. IN BOOLEAN DontZero
  1260. )
  1261. /*++
  1262. Routine Description:
  1263. This routine checks the state of the specified page that is mapped in
  1264. the system cache. If the specified virtual address can be made valid
  1265. (i.e., the page is already in memory), it is made valid and success
  1266. is returned.
  1267. This routine is for usage by the cache manager.
  1268. Arguments:
  1269. SystemCacheAddress - Supplies the address of a page mapped in the system
  1270. cache. This MUST be a page aligned address!
  1271. UserBuffer - Supplies the address of a user buffer to copy into the
  1272. system cache at the specified address + offset.
  1273. Offset - Supplies the offset into the UserBuffer to copy the data.
  1274. CountInBytes - Supplies the byte count to copy from the user buffer.
  1275. DontZero - Supplies TRUE if the buffer should not be zeroed (the
  1276. caller will track zeroing). FALSE if it should be zeroed.
  1277. Return Value:
  1278. NTSTATUS.
  1279. Environment:
  1280. Kernel mode, <= APC_LEVEL.
  1281. --*/
  1282. {
  1283. PMMPTE CopyPte;
  1284. PVOID CopyAddress;
  1285. MMWSLE WsleMask;
  1286. ULONG Flags;
  1287. PMMPTE PointerPte;
  1288. PMMPTE PointerPde;
  1289. PMMPTE ProtoPte;
  1290. PFN_NUMBER PageFrameIndex;
  1291. WSLE_NUMBER WorkingSetIndex;
  1292. MMPTE TempPte;
  1293. MMPTE TempPte2;
  1294. MMPTE ProtoPteContents;
  1295. PMMPFN Pfn1;
  1296. PMMPFN Pfn2;
  1297. KIRQL OldIrql;
  1298. SIZE_T EndFill;
  1299. PVOID Buffer;
  1300. NTSTATUS Status;
  1301. NTSTATUS ExceptionStatus;
  1302. PCONTROL_AREA ControlArea;
  1303. PETHREAD Thread;
  1304. ULONG SavedState;
  1305. PSUBSECTION Subsection;
  1306. PFILE_OBJECT FileObject;
  1307. LONGLONG FileOffset;
  1308. LOGICAL NewPage;
  1309. UNREFERENCED_PARAMETER (DontZero);
  1310. NewPage = FALSE;
  1311. WsleMask.u1.Long = 0;
  1312. Status = STATUS_SUCCESS;
  1313. Pfn1 = NULL;
  1314. Thread = PsGetCurrentThread ();
  1315. SATISFY_OVERZEALOUS_COMPILER (TempPte2.u.Soft.Prototype = 0);
  1316. SATISFY_OVERZEALOUS_COMPILER (ProtoPte = NULL);
  1317. SATISFY_OVERZEALOUS_COMPILER (TempPte.u.Long = 0);
  1318. SATISFY_OVERZEALOUS_COMPILER (Pfn1 = NULL);
  1319. SATISFY_OVERZEALOUS_COMPILER (Pfn2 = NULL);
  1320. SATISFY_OVERZEALOUS_COMPILER (PageFrameIndex = 0);
  1321. ASSERT (((ULONG_PTR)SystemCacheAddress & (PAGE_SIZE - 1)) == 0);
  1322. ASSERT ((CountInBytes + Offset) <= PAGE_SIZE);
  1323. ASSERT (KeGetCurrentIrql() < DISPATCH_LEVEL);
  1324. PointerPte = MiGetPteAddress (SystemCacheAddress);
  1325. if (PointerPte->u.Hard.Valid == 1) {
  1326. goto Copy;
  1327. }
  1328. //
  1329. // Acquire the working set mutex now as it is highly likely we will
  1330. // be inserting this system cache address into the working set list.
  1331. // This allows us to safely recover if no WSLEs are available because
  1332. // it prevents any other threads from locking down the address until
  1333. // we are done here.
  1334. //
  1335. LOCK_SYSTEM_WS (Thread);
  1336. //
  1337. // Note the world may change while we waited for the working set mutex.
  1338. //
  1339. if (PointerPte->u.Hard.Valid == 1) {
  1340. UNLOCK_SYSTEM_WS ();
  1341. goto Copy;
  1342. }
  1343. ASSERT (PointerPte->u.Soft.Prototype == 1);
  1344. ProtoPte = MiPteToProto (PointerPte);
  1345. PointerPde = MiGetPteAddress (ProtoPte);
  1346. LOCK_PFN (OldIrql);
  1347. ASSERT (PointerPte->u.Hard.Valid == 0);
  1348. Recheck:
  1349. if (PointerPte->u.Hard.Valid == 1) {
  1350. if (Pfn1 != NULL) {
  1351. //
  1352. // Toss the page as we won't be needing it after all, another
  1353. // thread has won the race.
  1354. //
  1355. PageFrameIndex = Pfn1 - MmPfnDatabase;
  1356. MiInsertPageInFreeList (PageFrameIndex);
  1357. }
  1358. UNLOCK_PFN (OldIrql);
  1359. UNLOCK_SYSTEM_WS ();
  1360. goto Copy;
  1361. }
  1362. //
  1363. // Make the PTE valid if possible.
  1364. //
  1365. ASSERT (PointerPte->u.Soft.Prototype == 1);
  1366. //
  1367. // PTE is not valid, check the state of the prototype PTE.
  1368. //
  1369. if (PointerPde->u.Hard.Valid == 0) {
  1370. MiMakeSystemAddressValidPfnSystemWs (ProtoPte, OldIrql);
  1371. //
  1372. // Page fault occurred, recheck state of original PTE.
  1373. //
  1374. if (PointerPte->u.Hard.Valid == 1) {
  1375. if (Pfn1 != NULL) {
  1376. //
  1377. // Toss the page as we won't be needing it after all, another
  1378. // thread has won the race.
  1379. //
  1380. PageFrameIndex = Pfn1 - MmPfnDatabase;
  1381. MiInsertPageInFreeList (PageFrameIndex);
  1382. }
  1383. UNLOCK_PFN (OldIrql);
  1384. UNLOCK_SYSTEM_WS ();
  1385. goto Copy;
  1386. }
  1387. }
  1388. ProtoPteContents = *ProtoPte;
  1389. if (ProtoPteContents.u.Hard.Valid == 1) {
  1390. if (Pfn1 != NULL) {
  1391. //
  1392. // Toss the page as we won't be needing it after all, another
  1393. // thread has won the race.
  1394. //
  1395. PageFrameIndex = Pfn1 - MmPfnDatabase;
  1396. MiInsertPageInFreeList (PageFrameIndex);
  1397. }
  1398. //
  1399. // The prototype PTE is valid, make the cache PTE
  1400. // valid and add it to the working set.
  1401. //
  1402. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&ProtoPteContents);
  1403. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1404. //
  1405. // Increment the share count as this prototype PTE will be
  1406. // mapped into the system cache shortly.
  1407. //
  1408. Pfn1->u2.ShareCount += 1;
  1409. TempPte = ProtoPteContents;
  1410. ASSERT (Pfn1->u1.Event != NULL);
  1411. }
  1412. else if ((ProtoPteContents.u.Soft.Transition == 1) &&
  1413. (ProtoPteContents.u.Soft.Prototype == 0)) {
  1414. if (Pfn1 != NULL) {
  1415. //
  1416. // Toss the page as we won't be needing it after all, another
  1417. // thread has won the race.
  1418. //
  1419. PageFrameIndex = Pfn1 - MmPfnDatabase;
  1420. MiInsertPageInFreeList (PageFrameIndex);
  1421. }
  1422. //
  1423. // Prototype PTE is in the transition state. Remove the page
  1424. // from the page list and make it valid.
  1425. //
  1426. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&ProtoPteContents);
  1427. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1428. if ((Pfn1->u3.e1.ReadInProgress) || (Pfn1->u4.InPageError)) {
  1429. //
  1430. // Collided page fault or in page error, try the copy
  1431. // operation incurring a page fault.
  1432. //
  1433. UNLOCK_PFN (OldIrql);
  1434. UNLOCK_SYSTEM_WS ();
  1435. goto Copy;
  1436. }
  1437. ASSERT ((SPFN_NUMBER)MmAvailablePages >= 0);
  1438. if (MmAvailablePages < MM_LOW_LIMIT) {
  1439. //
  1440. // This can only happen if the system is utilizing a hardware
  1441. // compression cache. This ensures that only a safe amount
  1442. // of the compressed virtual cache is directly mapped so that
  1443. // if the hardware gets into trouble, we can bail it out.
  1444. //
  1445. // The same is true when machines are low on memory - we don't
  1446. // want this thread to gobble up the pages from every modified
  1447. // write that completes because that would starve waiting threads.
  1448. //
  1449. if (MiEnsureAvailablePageOrWait (NULL, SystemCacheAddress, OldIrql)) {
  1450. //
  1451. // A wait operation occurred which could have changed the
  1452. // state of the PTE. Recheck the PTE state.
  1453. //
  1454. Pfn1 = NULL;
  1455. goto Recheck;
  1456. }
  1457. }
  1458. MiUnlinkPageFromList (Pfn1);
  1459. Pfn1->u3.e2.ReferenceCount += 1;
  1460. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  1461. MI_SET_MODIFIED (Pfn1, 1, 0x6);
  1462. ASSERT (Pfn1->u2.ShareCount == 0);
  1463. Pfn1->u2.ShareCount += 1;
  1464. MI_SNAP_DATA (Pfn1, ProtoPte, 3);
  1465. MI_MAKE_VALID_PTE (TempPte,
  1466. PageFrameIndex,
  1467. Pfn1->OriginalPte.u.Soft.Protection,
  1468. NULL);
  1469. MI_SET_PTE_DIRTY (TempPte);
  1470. MI_WRITE_VALID_PTE (ProtoPte, TempPte);
  1471. //
  1472. // Do NOT increment the share count for the page containing
  1473. // the prototype PTE because it is already correct (the share
  1474. // count is for both transition & valid PTE entries and this one
  1475. // was transition before we just made it valid).
  1476. //
  1477. }
  1478. else {
  1479. if (Pfn1 == NULL) {
  1480. //
  1481. // Page is not in memory, if a page of zeroes is requested,
  1482. // get a page of zeroes and make it valid.
  1483. //
  1484. if ((MmAvailablePages < MM_HIGH_LIMIT) &&
  1485. (MiEnsureAvailablePageOrWait (NULL, SystemCacheAddress, OldIrql))) {
  1486. //
  1487. // A wait operation occurred which could have changed the
  1488. // state of the PTE. Recheck the PTE state.
  1489. //
  1490. goto Recheck;
  1491. }
  1492. //
  1493. // Remove any page from the list in preparation for receiving
  1494. // the user data.
  1495. //
  1496. PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (ProtoPte));
  1497. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1498. ASSERT (Pfn1->u2.ShareCount == 0);
  1499. ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
  1500. //
  1501. // Temporarily mark the page as bad so that contiguous
  1502. // memory allocators won't steal it when we release
  1503. // the PFN lock below. This also prevents the
  1504. // MiIdentifyPfn code from trying to identify it as
  1505. // we haven't filled in all the fields yet.
  1506. //
  1507. Pfn1->u3.e1.PageLocation = BadPageList;
  1508. //
  1509. // Map the page with a system PTE and do the copy into the page
  1510. // directly. Then retry the whole operation in case another racing
  1511. // syscache-address-accessing thread has raced ahead of us for the
  1512. // same address.
  1513. //
  1514. UNLOCK_PFN (OldIrql);
  1515. UNLOCK_SYSTEM_WS ();
  1516. CopyPte = MiReserveSystemPtes (1, SystemPteSpace);
  1517. if (CopyPte == NULL) {
  1518. //
  1519. // No PTEs available for us to take the fast path, the cache
  1520. // manager will have to copy the data directly.
  1521. //
  1522. LOCK_PFN (OldIrql);
  1523. MiInsertPageInFreeList (PageFrameIndex);
  1524. UNLOCK_PFN (OldIrql);
  1525. return STATUS_INSUFFICIENT_RESOURCES;
  1526. }
  1527. MI_MAKE_VALID_PTE (TempPte,
  1528. PageFrameIndex,
  1529. MM_READWRITE,
  1530. CopyPte);
  1531. MI_SET_PTE_DIRTY (TempPte);
  1532. MI_WRITE_VALID_PTE (CopyPte, TempPte);
  1533. CopyAddress = MiGetVirtualAddressMappedByPte (CopyPte);
  1534. //
  1535. // Zero the memory outside the range we're going to copy.
  1536. //
  1537. if (Offset != 0) {
  1538. RtlZeroMemory (CopyAddress, Offset);
  1539. }
  1540. Buffer = (PVOID)((PCHAR) CopyAddress + Offset);
  1541. EndFill = PAGE_SIZE - (Offset + CountInBytes);
  1542. if (EndFill != 0) {
  1543. RtlZeroMemory ((PVOID)((PCHAR)Buffer + CountInBytes),
  1544. EndFill);
  1545. }
  1546. //
  1547. // Perform the copy of the user buffer into the page under
  1548. // an exception handler.
  1549. //
  1550. MmSavePageFaultReadAhead (Thread, &SavedState);
  1551. MmSetPageFaultReadAhead (Thread, 0);
  1552. ExceptionStatus = STATUS_SUCCESS;
  1553. try {
  1554. RtlCopyBytes (Buffer, UserBuffer, CountInBytes);
  1555. } except (MiMapCacheExceptionFilter (&ExceptionStatus, GetExceptionInformation())) {
  1556. ASSERT (ExceptionStatus != STATUS_MULTIPLE_FAULT_VIOLATION);
  1557. Status = ExceptionStatus;
  1558. }
  1559. MmResetPageFaultReadAhead (Thread, SavedState);
  1560. MiReleaseSystemPtes (CopyPte, 1, SystemPteSpace);
  1561. if (!NT_SUCCESS (Status)) {
  1562. LOCK_PFN (OldIrql);
  1563. MiInsertPageInFreeList (PageFrameIndex);
  1564. UNLOCK_PFN (OldIrql);
  1565. return Status;
  1566. }
  1567. //
  1568. // Recheck everything as the world may have changed while we
  1569. // released our locks. Loop up and see if another thread has
  1570. // already changed things (free our page if so), otherwise
  1571. // we'll use this page the next time through.
  1572. //
  1573. LOCK_SYSTEM_WS (Thread);
  1574. LOCK_PFN (OldIrql);
  1575. goto Recheck;
  1576. }
  1577. PageFrameIndex = Pfn1 - MmPfnDatabase;
  1578. ASSERT (Pfn1->u3.e1.PageLocation == BadPageList);
  1579. ASSERT (Pfn1->u2.ShareCount == 0);
  1580. ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
  1581. //
  1582. // Increment the valid PTE count for the page containing
  1583. // the prototype PTE.
  1584. //
  1585. MiInitializePfn (PageFrameIndex, ProtoPte, 1);
  1586. ASSERT (Pfn1->u3.e1.ReadInProgress == 0);
  1587. Pfn1->u3.e1.PrototypePte = 1;
  1588. Pfn1->u1.Event = NULL;
  1589. //
  1590. // Increment the count of PFN references for the control area
  1591. // corresponding to this file.
  1592. //
  1593. ControlArea = MiGetSubsectionAddress (ProtoPte)->ControlArea;
  1594. ControlArea->NumberOfPfnReferences += 1;
  1595. NewPage = TRUE;
  1596. MI_SNAP_DATA (Pfn1, ProtoPte, 4);
  1597. MI_MAKE_VALID_PTE (TempPte,
  1598. PageFrameIndex,
  1599. Pfn1->OriginalPte.u.Soft.Protection,
  1600. NULL);
  1601. MI_SET_PTE_DIRTY (TempPte);
  1602. MI_SET_GLOBAL_STATE (TempPte, 0);
  1603. MI_WRITE_VALID_PTE (ProtoPte, TempPte);
  1604. }
  1605. //
  1606. // Capture prefetch fault information.
  1607. //
  1608. TempPte2 = Pfn1->OriginalPte;
  1609. //
  1610. // Increment the share count of the page table page for this PTE.
  1611. //
  1612. PointerPde = MiGetPteAddress (PointerPte);
  1613. Pfn2 = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber);
  1614. Pfn2->u2.ShareCount += 1;
  1615. MI_SET_GLOBAL_STATE (TempPte, 1);
  1616. TempPte.u.Hard.Owner = MI_PTE_OWNER_KERNEL;
  1617. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1618. ASSERT (Pfn1->u3.e2.ReferenceCount != 0);
  1619. ASSERT (Pfn1->PteAddress == ProtoPte);
  1620. UNLOCK_PFN (OldIrql);
  1621. WsleMask.u1.e1.SameProtectAsProto = 1;
  1622. WorkingSetIndex = MiAllocateWsle (&MmSystemCacheWs,
  1623. PointerPte,
  1624. Pfn1,
  1625. WsleMask.u1.Long);
  1626. if (WorkingSetIndex == 0) {
  1627. //
  1628. // No working set entry was available so just trim the page.
  1629. // Note another thread may be writing too so the page must be
  1630. // trimmed instead of just tossed.
  1631. //
  1632. // The protection is in the prototype PTE.
  1633. //
  1634. ASSERT (Pfn1->u3.e1.PrototypePte == 1);
  1635. ASSERT (ProtoPte == Pfn1->PteAddress);
  1636. TempPte.u.Long = MiProtoAddressForPte (ProtoPte);
  1637. MiTrimPte (SystemCacheAddress, PointerPte, Pfn1, NULL, TempPte);
  1638. }
  1639. UNLOCK_SYSTEM_WS ();
  1640. Copy:
  1641. if (NewPage == FALSE) {
  1642. //
  1643. // Perform the copy since it hasn't been done already.
  1644. //
  1645. MmSavePageFaultReadAhead (Thread, &SavedState);
  1646. MmSetPageFaultReadAhead (Thread, 0);
  1647. //
  1648. // Copy the user buffer into the cache under an exception handler.
  1649. //
  1650. ExceptionStatus = STATUS_SUCCESS;
  1651. Buffer = (PVOID)((PCHAR) SystemCacheAddress + Offset);
  1652. try {
  1653. RtlCopyBytes (Buffer, UserBuffer, CountInBytes);
  1654. } except (MiMapCacheExceptionFilter (&ExceptionStatus, GetExceptionInformation())) {
  1655. ASSERT (ExceptionStatus != STATUS_MULTIPLE_FAULT_VIOLATION);
  1656. Status = ExceptionStatus;
  1657. }
  1658. MmResetPageFaultReadAhead (Thread, SavedState);
  1659. }
  1660. //
  1661. // If a virtual address was made directly present (ie: not via the normal
  1662. // fault mechanisms), then log prefetch fault information now that the
  1663. // PFN lock has been released and the PTE has been made valid. This
  1664. // minimizes PFN lock contention, allows CcPfLogPageFault to allocate
  1665. // (and fault on) pool, and allows other threads in this process to
  1666. // execute without faulting on this address.
  1667. //
  1668. if ((WsleMask.u1.e1.SameProtectAsProto == 1) &&
  1669. (TempPte2.u.Soft.Prototype == 1)) {
  1670. Subsection = MiGetSubsectionAddress (&TempPte2);
  1671. FileObject = Subsection->ControlArea->FilePointer;
  1672. FileOffset = MiStartingOffset (Subsection, ProtoPte);
  1673. Flags = 0;
  1674. ASSERT (Subsection->ControlArea->u.Flags.Image == 0);
  1675. if (Subsection->ControlArea->u.Flags.Rom) {
  1676. Flags |= CCPF_TYPE_ROM;
  1677. }
  1678. CcPfLogPageFault (FileObject, FileOffset, Flags);
  1679. }
  1680. return Status;
  1681. }
  1682. LONG
  1683. MiMapCacheExceptionFilter (
  1684. IN PNTSTATUS Status,
  1685. IN PEXCEPTION_POINTERS ExceptionPointer
  1686. )
  1687. /*++
  1688. Routine Description:
  1689. This routine is a filter for exceptions during copying data
  1690. from the user buffer to the system cache. It stores the
  1691. status code from the exception record into the status argument.
  1692. In the case of an in page i/o error it returns the actual
  1693. error code and in the case of an access violation it returns
  1694. STATUS_INVALID_USER_BUFFER.
  1695. Arguments:
  1696. Status - Returns the status from the exception record.
  1697. ExceptionCode - Supplies the exception code to being checked.
  1698. Return Value:
  1699. ULONG - returns EXCEPTION_EXECUTE_HANDLER
  1700. --*/
  1701. {
  1702. NTSTATUS local;
  1703. local = ExceptionPointer->ExceptionRecord->ExceptionCode;
  1704. //
  1705. // If the exception is STATUS_IN_PAGE_ERROR, get the I/O error code
  1706. // from the exception record.
  1707. //
  1708. if (local == STATUS_IN_PAGE_ERROR) {
  1709. if (ExceptionPointer->ExceptionRecord->NumberParameters >= 3) {
  1710. local = (NTSTATUS)ExceptionPointer->ExceptionRecord->ExceptionInformation[2];
  1711. }
  1712. }
  1713. if (local == STATUS_ACCESS_VIOLATION) {
  1714. local = STATUS_INVALID_USER_BUFFER;
  1715. }
  1716. *Status = local;
  1717. return EXCEPTION_EXECUTE_HANDLER;
  1718. }
  1719. VOID
  1720. MmUnlockCachedPage (
  1721. IN PVOID AddressInCache
  1722. )
  1723. /*++
  1724. Routine Description:
  1725. This routine unlocks a previous locked cached page.
  1726. Arguments:
  1727. AddressInCache - Supplies the address where the page was locked
  1728. in the system cache. This must be the same
  1729. address that MmCopyToCachedPage was called with.
  1730. Return Value:
  1731. None.
  1732. --*/
  1733. {
  1734. PMMPTE PointerPte;
  1735. PMMPFN Pfn1;
  1736. KIRQL OldIrql;
  1737. PointerPte = MiGetPteAddress (AddressInCache);
  1738. ASSERT (PointerPte->u.Hard.Valid == 1);
  1739. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  1740. LOCK_PFN (OldIrql);
  1741. if (Pfn1->u3.e2.ReferenceCount <= 1) {
  1742. KeBugCheckEx (MEMORY_MANAGEMENT,
  1743. 0x777,
  1744. (ULONG_PTR)PointerPte->u.Hard.PageFrameNumber,
  1745. Pfn1->u3.e2.ReferenceCount,
  1746. (ULONG_PTR)AddressInCache);
  1747. return;
  1748. }
  1749. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF(Pfn1, 25);
  1750. UNLOCK_PFN (OldIrql);
  1751. return;
  1752. }