Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

13401 lines
376 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. iosup.c
  5. Abstract:
  6. This module contains routines which provide support for the I/O system.
  7. Author:
  8. Lou Perazzoli (loup) 25-Apr-1989
  9. Landy Wang (landyw) 02-June-1997
  10. Revision History:
  11. --*/
  12. #include "mi.h"
  13. #undef MmIsRecursiveIoFault
  14. ULONG MiCacheOverride[4];
  15. #if DBG
  16. ULONG MmShowMapOverlaps;
  17. #endif
  18. extern LONG MmTotalSystemDriverPages;
  19. BOOLEAN
  20. MmIsRecursiveIoFault (
  21. VOID
  22. );
  23. PVOID
  24. MiAllocateContiguousMemory (
  25. IN SIZE_T NumberOfBytes,
  26. IN PFN_NUMBER LowestAcceptablePfn,
  27. IN PFN_NUMBER HighestAcceptablePfn,
  28. IN PFN_NUMBER BoundaryPfn,
  29. IN MEMORY_CACHING_TYPE CacheType,
  30. PVOID CallingAddress
  31. );
  32. PVOID
  33. MiMapLockedPagesInUserSpace (
  34. IN PMDL MemoryDescriptorList,
  35. IN PVOID StartingVa,
  36. IN MEMORY_CACHING_TYPE CacheType,
  37. IN PVOID BaseVa
  38. );
  39. VOID
  40. MiUnmapLockedPagesInUserSpace (
  41. IN PVOID BaseAddress,
  42. IN PMDL MemoryDescriptorList
  43. );
  44. VOID
  45. MiAddMdlTracker (
  46. IN PMDL MemoryDescriptorList,
  47. IN PVOID CallingAddress,
  48. IN PVOID CallersCaller,
  49. IN PFN_NUMBER NumberOfPagesToLock,
  50. IN ULONG Who
  51. );
  52. KSPIN_LOCK MmIoTrackerLock;
  53. LIST_ENTRY MmIoHeader;
  54. #if DBG
  55. PFN_NUMBER MmIoHeaderCount;
  56. ULONG MmIoHeaderNumberOfEntries;
  57. ULONG MmIoHeaderNumberOfEntriesPeak;
  58. #endif
  59. PCHAR MiCacheStrings[] = {
  60. "noncached",
  61. "cached",
  62. "writecombined",
  63. "None"
  64. };
  65. typedef struct _PTE_TRACKER {
  66. LIST_ENTRY ListEntry;
  67. PMDL Mdl;
  68. PFN_NUMBER Count;
  69. PVOID SystemVa;
  70. PVOID StartVa;
  71. ULONG Offset;
  72. ULONG Length;
  73. ULONG_PTR Page;
  74. PVOID CallingAddress;
  75. PVOID CallersCaller;
  76. BOOLEAN IoMapping;
  77. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  78. } PTE_TRACKER, *PPTE_TRACKER;
  79. typedef struct _SYSPTES_HEADER {
  80. LIST_ENTRY ListHead;
  81. PFN_NUMBER Count;
  82. PFN_NUMBER NumberOfEntries;
  83. PFN_NUMBER NumberOfEntriesPeak;
  84. } SYSPTES_HEADER, *PSYSPTES_HEADER;
  85. ULONG MmTrackPtes = 0;
  86. BOOLEAN MiTrackPtesAborted = FALSE;
  87. SYSPTES_HEADER MiPteHeader;
  88. SLIST_HEADER MiDeadPteTrackerSListHead;
  89. KSPIN_LOCK MiPteTrackerLock;
  90. KSPIN_LOCK MiTrackIoLock;
  91. #if (_MI_PAGING_LEVELS>=3)
  92. KSPIN_LOCK MiLargePageLock;
  93. RTL_BITMAP MiLargeVaBitMap;
  94. #endif
  95. ULONG MiNonCachedCollisions;
  96. #if DBG
  97. PFN_NUMBER MiCurrentAdvancedPages;
  98. PFN_NUMBER MiAdvancesGiven;
  99. PFN_NUMBER MiAdvancesFreed;
  100. #endif
  101. VOID
  102. MiInsertPteTracker (
  103. IN PMDL MemoryDescriptorList,
  104. IN ULONG Flags,
  105. IN LOGICAL IoMapping,
  106. IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
  107. IN PVOID MyCaller,
  108. IN PVOID MyCallersCaller
  109. );
  110. VOID
  111. MiRemovePteTracker (
  112. IN PMDL MemoryDescriptorList OPTIONAL,
  113. IN PVOID VirtualAddress,
  114. IN PFN_NUMBER NumberOfPtes
  115. );
  116. LOGICAL
  117. MiReferenceIoSpace (
  118. IN PMDL MemoryDescriptorList,
  119. IN PPFN_NUMBER Page
  120. );
  121. LOGICAL
  122. MiDereferenceIoSpace (
  123. IN PMDL MemoryDescriptorList
  124. );
  125. VOID
  126. MiPhysicalViewInserter (
  127. IN PEPROCESS Process,
  128. IN PMI_PHYSICAL_VIEW PhysicalView
  129. );
  130. VOID
  131. MiZeroAwePageWorker (
  132. IN PVOID Context
  133. );
  134. #if DBG
  135. ULONG MiPrintLockedPages;
  136. VOID
  137. MiVerifyLockedPageCharges (
  138. VOID
  139. );
  140. #endif
  141. #ifdef ALLOC_PRAGMA
  142. #pragma alloc_text(INIT, MmSetPageProtection)
  143. #pragma alloc_text(INIT, MiInitializeIoTrackers)
  144. #pragma alloc_text(INIT, MiInitializeLargePageSupport)
  145. #pragma alloc_text(PAGE, MmAllocateIndependentPages)
  146. #pragma alloc_text(PAGE, MmFreeIndependentPages)
  147. #pragma alloc_text(PAGE, MmLockPagableDataSection)
  148. #pragma alloc_text(PAGE, MiLookupDataTableEntry)
  149. #pragma alloc_text(PAGE, MmSetBankedSection)
  150. #pragma alloc_text(PAGE, MmProbeAndLockProcessPages)
  151. #pragma alloc_text(PAGE, MmProbeAndLockSelectedPages)
  152. #pragma alloc_text(PAGE, MmMapVideoDisplay)
  153. #pragma alloc_text(PAGE, MmUnmapVideoDisplay)
  154. #pragma alloc_text(PAGE, MmGetSectionRange)
  155. #pragma alloc_text(PAGE, MiMapSinglePage)
  156. #pragma alloc_text(PAGE, MiUnmapSinglePage)
  157. #pragma alloc_text(PAGE, MmAllocateMappingAddress)
  158. #pragma alloc_text(PAGE, MmFreeMappingAddress)
  159. #pragma alloc_text(PAGE, MmAllocateNonCachedMemory)
  160. #pragma alloc_text(PAGE, MmFreeNonCachedMemory)
  161. #pragma alloc_text(PAGE, MmLockPagedPool)
  162. #pragma alloc_text(PAGE, MmLockPagableSectionByHandle)
  163. #pragma alloc_text(PAGE, MiZeroAwePageWorker)
  164. #pragma alloc_text(PAGELK, MmEnablePAT)
  165. #pragma alloc_text(PAGELK, MiUnmapLockedPagesInUserSpace)
  166. #pragma alloc_text(PAGELK, MmAllocatePagesForMdl)
  167. #pragma alloc_text(PAGELK, MiZeroInParallel)
  168. #pragma alloc_text(PAGELK, MmFreePagesFromMdl)
  169. #pragma alloc_text(PAGELK, MmUnlockPagedPool)
  170. #pragma alloc_text(PAGELK, MmGatherMemoryForHibernate)
  171. #pragma alloc_text(PAGELK, MmReturnMemoryForHibernate)
  172. #pragma alloc_text(PAGELK, MmReleaseDumpAddresses)
  173. #pragma alloc_text(PAGELK, MmMapUserAddressesToPage)
  174. #pragma alloc_text(PAGELK, MiPhysicalViewInserter)
  175. #pragma alloc_text(PAGELK, MiPhysicalViewAdjuster)
  176. #pragma alloc_text(PAGEVRFY, MmIsSystemAddressLocked)
  177. #pragma alloc_text(PAGEVRFY, MmAreMdlPagesLocked)
  178. #endif
  179. extern POOL_DESCRIPTOR NonPagedPoolDescriptor;
  180. PFN_NUMBER MmMdlPagesAllocated;
  181. KEVENT MmCollidedLockEvent;
  182. LONG MmCollidedLockWait;
  183. BOOLEAN MiWriteCombiningPtes = FALSE;
  184. #if DBG
  185. ULONG MiPrintAwe;
  186. ULONG MmStopOnBadProbe = 1;
  187. #endif
  188. #define MI_PROBE_RAISE_SIZE 16
  189. ULONG MiProbeRaises[MI_PROBE_RAISE_SIZE];
  190. #define MI_INSTRUMENT_PROBE_RAISES(i) \
  191. ASSERT (i < MI_PROBE_RAISE_SIZE); \
  192. MiProbeRaises[i] += 1;
  193. //
  194. // Note: this should be > 2041 to account for the cache manager's
  195. // aggressive zeroing logic.
  196. //
  197. ULONG MmReferenceCountCheck = MAXUSHORT / 2;
  198. ULONG MiMdlsAdjusted = FALSE;
  199. VOID
  200. MmProbeAndLockPages (
  201. IN OUT PMDL MemoryDescriptorList,
  202. IN KPROCESSOR_MODE AccessMode,
  203. IN LOCK_OPERATION Operation
  204. )
  205. /*++
  206. Routine Description:
  207. This routine probes the specified pages, makes the pages resident and
  208. locks the physical pages mapped by the virtual pages in memory. The
  209. Memory descriptor list is updated to describe the physical pages.
  210. Arguments:
  211. MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List
  212. (MDL). The supplied MDL must supply a virtual
  213. address, byte offset and length field. The
  214. physical page portion of the MDL is updated when
  215. the pages are locked in memory.
  216. AccessMode - Supplies the access mode in which to probe the arguments.
  217. One of KernelMode or UserMode.
  218. Operation - Supplies the operation type. One of IoReadAccess, IoWriteAccess
  219. or IoModifyAccess.
  220. Return Value:
  221. None - exceptions are raised.
  222. Environment:
  223. Kernel mode. APC_LEVEL and below for pagable addresses,
  224. DISPATCH_LEVEL and below for non-pagable addresses.
  225. --*/
  226. {
  227. ULONG Processor;
  228. PPFN_NUMBER Page;
  229. MMPTE PteContents;
  230. PMMPTE LastPte;
  231. PMMPTE PointerPte;
  232. PMMPTE PointerPde;
  233. PMMPTE PointerPpe;
  234. PMMPTE PointerPxe;
  235. PVOID Va;
  236. PVOID EndVa;
  237. PVOID AlignedVa;
  238. PMMPFN Pfn1;
  239. PFN_NUMBER PageFrameIndex;
  240. PFN_NUMBER LastPageFrameIndex;
  241. PEPROCESS CurrentProcess;
  242. KIRQL OldIrql;
  243. PFN_NUMBER NumberOfPagesToLock;
  244. PFN_NUMBER NumberOfPagesSpanned;
  245. NTSTATUS status;
  246. NTSTATUS ProbeStatus;
  247. PETHREAD Thread;
  248. ULONG SavedState;
  249. PMI_PHYSICAL_VIEW PhysicalView;
  250. PCHAR StartVa;
  251. PVOID CallingAddress;
  252. PVOID CallersCaller;
  253. PAWEINFO AweInfo;
  254. PEX_PUSH_LOCK PushLock;
  255. TABLE_SEARCH_RESULT SearchResult;
  256. #if defined (_MIALT4K_)
  257. MMPTE AltPteContents;
  258. PMMPTE PointerAltPte;
  259. PMMPTE LastPointerAltPte;
  260. PMMPTE AltPointerPte;
  261. PMMPTE AltPointerPde;
  262. PMMPTE AltPointerPpe;
  263. PMMPTE AltPointerPxe;
  264. #endif
  265. ASSERT (MemoryDescriptorList->ByteCount != 0);
  266. ASSERT (((ULONG)MemoryDescriptorList->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
  267. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  268. ASSERT (((ULONG_PTR)MemoryDescriptorList->StartVa & (PAGE_SIZE - 1)) == 0);
  269. AlignedVa = (PVOID)MemoryDescriptorList->StartVa;
  270. ASSERT ((MemoryDescriptorList->MdlFlags & (
  271. MDL_PAGES_LOCKED |
  272. MDL_MAPPED_TO_SYSTEM_VA |
  273. MDL_SOURCE_IS_NONPAGED_POOL |
  274. MDL_PARTIAL |
  275. MDL_IO_SPACE)) == 0);
  276. Va = (PCHAR)AlignedVa + MemoryDescriptorList->ByteOffset;
  277. StartVa = Va;
  278. //
  279. // Endva is one byte past the end of the buffer, if ACCESS_MODE is not
  280. // kernel, make sure the EndVa is in user space AND the byte count
  281. // does not cause it to wrap.
  282. //
  283. EndVa = (PVOID)((PCHAR)Va + MemoryDescriptorList->ByteCount);
  284. if ((AccessMode != KernelMode) &&
  285. ((EndVa > (PVOID)MM_USER_PROBE_ADDRESS) || (Va >= EndVa))) {
  286. *Page = MM_EMPTY_LIST;
  287. MI_INSTRUMENT_PROBE_RAISES(0);
  288. ExRaiseStatus (STATUS_ACCESS_VIOLATION);
  289. return;
  290. }
  291. //
  292. // You would think there is an optimization which could be performed here:
  293. // if the operation is for WriteAccess and the complete page is
  294. // being modified, we can remove the current page, if it is not
  295. // resident, and substitute a demand zero page.
  296. // Note, that after analysis by marking the thread and then
  297. // noting if a page read was done, this rarely occurs.
  298. //
  299. Thread = PsGetCurrentThread ();
  300. NumberOfPagesToLock = ADDRESS_AND_SIZE_TO_SPAN_PAGES (Va,
  301. MemoryDescriptorList->ByteCount);
  302. ASSERT (NumberOfPagesToLock != 0);
  303. if (Va <= MM_HIGHEST_USER_ADDRESS) {
  304. CurrentProcess = PsGetCurrentProcessByThread (Thread);
  305. if (CurrentProcess->AweInfo != NULL) {
  306. AweInfo = CurrentProcess->AweInfo;
  307. //
  308. // Block APCs to prevent recursive pushlock scenarios as
  309. // this is not supported.
  310. //
  311. KeEnterGuardedRegionThread (&Thread->Tcb);
  312. PushLock = ExAcquireCacheAwarePushLockShared (AweInfo->PushLock);
  313. //
  314. // Provide a fast path for transfers that are within
  315. // a single AWE region.
  316. //
  317. Processor = KeGetCurrentProcessorNumber ();
  318. PhysicalView = AweInfo->PhysicalViewHint[Processor];
  319. if ((PhysicalView != NULL) &&
  320. ((PVOID)StartVa >= MI_VPN_TO_VA (PhysicalView->StartingVpn)) &&
  321. ((PVOID)((PCHAR)EndVa - 1) <= MI_VPN_TO_VA_ENDING (PhysicalView->EndingVpn))) {
  322. NOTHING;
  323. }
  324. else {
  325. //
  326. // Lookup the element and save the result.
  327. //
  328. SearchResult = MiFindNodeOrParent (&AweInfo->AweVadRoot,
  329. MI_VA_TO_VPN (StartVa),
  330. (PMMADDRESS_NODE *) &PhysicalView);
  331. if ((SearchResult == TableFoundNode) &&
  332. ((PVOID)StartVa >= MI_VPN_TO_VA (PhysicalView->StartingVpn)) &&
  333. ((PVOID)((PCHAR)EndVa - 1) <= MI_VPN_TO_VA_ENDING (PhysicalView->EndingVpn))) {
  334. AweInfo->PhysicalViewHint[Processor] = PhysicalView;
  335. }
  336. else {
  337. ExReleaseCacheAwarePushLockShared (PushLock);
  338. KeLeaveGuardedRegionThread (&Thread->Tcb);
  339. goto DefaultProbeAndLock;
  340. }
  341. }
  342. MemoryDescriptorList->Process = CurrentProcess;
  343. MemoryDescriptorList->MdlFlags |= (MDL_PAGES_LOCKED | MDL_DESCRIBES_AWE);
  344. if (PhysicalView->u.LongFlags & MI_PHYSICAL_VIEW_AWE) {
  345. PointerPte = MiGetPteAddress (StartVa);
  346. LastPte = MiGetPteAddress ((PCHAR)EndVa - 1);
  347. do {
  348. PteContents = *PointerPte;
  349. if (PteContents.u.Hard.Valid == 0) {
  350. ExReleaseCacheAwarePushLockShared (PushLock);
  351. KeLeaveGuardedRegionThread (&Thread->Tcb);
  352. *Page = MM_EMPTY_LIST;
  353. MI_INSTRUMENT_PROBE_RAISES(9);
  354. status = STATUS_ACCESS_VIOLATION;
  355. goto failure2;
  356. }
  357. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  358. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  359. if (Pfn1->AweReferenceCount >= (LONG)MmReferenceCountCheck) {
  360. ASSERT (FALSE);
  361. ExReleaseCacheAwarePushLockShared (PushLock);
  362. KeLeaveGuardedRegionThread (&Thread->Tcb);
  363. *Page = MM_EMPTY_LIST;
  364. status = STATUS_WORKING_SET_QUOTA;
  365. goto failure2;
  366. }
  367. InterlockedIncrement (&Pfn1->AweReferenceCount);
  368. *Page = PageFrameIndex;
  369. Page += 1;
  370. PointerPte += 1;
  371. } while (PointerPte <= LastPte);
  372. ExReleaseCacheAwarePushLockShared (PushLock);
  373. KeLeaveGuardedRegionThread (&Thread->Tcb);
  374. return;
  375. }
  376. if (PhysicalView->u.LongFlags & MI_PHYSICAL_VIEW_LARGE) {
  377. //
  378. // The PTE cannot be referenced (it doesn't exist), but it
  379. // serves the useful purpose of identifying when we cross
  380. // PDEs and therefore must recompute the base PFN.
  381. //
  382. PointerPte = MiGetPteAddress (StartVa);
  383. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (StartVa);
  384. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  385. do {
  386. if (Pfn1->AweReferenceCount >= (LONG)MmReferenceCountCheck) {
  387. ASSERT (FALSE);
  388. ExReleaseCacheAwarePushLockShared (PushLock);
  389. KeLeaveGuardedRegionThread (&Thread->Tcb);
  390. *Page = MM_EMPTY_LIST;
  391. status = STATUS_WORKING_SET_QUOTA;
  392. goto failure2;
  393. }
  394. InterlockedIncrement (&Pfn1->AweReferenceCount);
  395. *Page = PageFrameIndex;
  396. NumberOfPagesToLock -= 1;
  397. if (NumberOfPagesToLock == 0) {
  398. break;
  399. }
  400. Page += 1;
  401. PointerPte += 1;
  402. if (!MiIsPteOnPdeBoundary (PointerPte)) {
  403. PageFrameIndex += 1;
  404. Pfn1 += 1;
  405. }
  406. else {
  407. StartVa = MiGetVirtualAddressMappedByPte (PointerPte);
  408. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (StartVa);
  409. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  410. }
  411. } while (TRUE);
  412. ExReleaseCacheAwarePushLockShared (PushLock);
  413. KeLeaveGuardedRegionThread (&Thread->Tcb);
  414. return;
  415. }
  416. }
  417. }
  418. DefaultProbeAndLock:
  419. NumberOfPagesSpanned = NumberOfPagesToLock;
  420. if (!MI_IS_PHYSICAL_ADDRESS(Va)) {
  421. ProbeStatus = STATUS_SUCCESS;
  422. MmSavePageFaultReadAhead (Thread, &SavedState);
  423. MmSetPageFaultReadAhead (Thread, (ULONG)(NumberOfPagesToLock - 1));
  424. try {
  425. do {
  426. *Page = MM_EMPTY_LIST;
  427. //
  428. // Make sure the page is resident.
  429. //
  430. *(volatile CHAR *)Va;
  431. if ((Operation != IoReadAccess) &&
  432. (Va <= MM_HIGHEST_USER_ADDRESS)) {
  433. //
  434. // Probe for write access as well.
  435. //
  436. ProbeForWriteChar ((PCHAR)Va);
  437. }
  438. NumberOfPagesToLock -= 1;
  439. MmSetPageFaultReadAhead (Thread, (ULONG)(NumberOfPagesToLock - 1));
  440. Va = (PVOID) (((ULONG_PTR)Va + PAGE_SIZE) & ~(PAGE_SIZE - 1));
  441. Page += 1;
  442. } while (Va < EndVa);
  443. ASSERT (NumberOfPagesToLock == 0);
  444. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  445. } except (EXCEPTION_EXECUTE_HANDLER) {
  446. ProbeStatus = GetExceptionCode();
  447. }
  448. //
  449. // We may still fault again below but it's generally rare.
  450. // Restore this thread's normal fault behavior now.
  451. //
  452. MmResetPageFaultReadAhead (Thread, SavedState);
  453. if (ProbeStatus != STATUS_SUCCESS) {
  454. MI_INSTRUMENT_PROBE_RAISES(1);
  455. MemoryDescriptorList->Process = NULL;
  456. ExRaiseStatus (ProbeStatus);
  457. return;
  458. }
  459. PointerPte = MiGetPteAddress (StartVa);
  460. }
  461. else {
  462. //
  463. // Set PointerPte to NULL to indicate this is a physical address range.
  464. //
  465. if (Va <= MM_HIGHEST_USER_ADDRESS) {
  466. PointerPte = MiGetPteAddress (StartVa);
  467. }
  468. else {
  469. PointerPte = NULL;
  470. }
  471. *Page = MM_EMPTY_LIST;
  472. }
  473. PointerPxe = MiGetPxeAddress (StartVa);
  474. PointerPpe = MiGetPpeAddress (StartVa);
  475. PointerPde = MiGetPdeAddress (StartVa);
  476. Va = AlignedVa;
  477. ASSERT (Page == (PPFN_NUMBER)(MemoryDescriptorList + 1));
  478. //
  479. // Indicate whether this is a read or write operation.
  480. //
  481. if (Operation != IoReadAccess) {
  482. MemoryDescriptorList->MdlFlags |= MDL_WRITE_OPERATION;
  483. }
  484. else {
  485. MemoryDescriptorList->MdlFlags &= ~(MDL_WRITE_OPERATION);
  486. }
  487. //
  488. // Initialize MdlFlags (assume the probe will succeed).
  489. //
  490. MemoryDescriptorList->MdlFlags |= MDL_PAGES_LOCKED;
  491. if (Va <= MM_HIGHEST_USER_ADDRESS) {
  492. //
  493. // These are user space addresses, check them carefully.
  494. //
  495. ASSERT (NumberOfPagesSpanned != 0);
  496. CurrentProcess = PsGetCurrentProcess ();
  497. //
  498. // Initialize the MDL process field (assume the probe will succeed).
  499. //
  500. MemoryDescriptorList->Process = CurrentProcess;
  501. LastPte = MiGetPteAddress ((PCHAR)EndVa - 1);
  502. InterlockedExchangeAddSizeT (&CurrentProcess->NumberOfLockedPages,
  503. NumberOfPagesSpanned);
  504. }
  505. else {
  506. CurrentProcess = NULL;
  507. MemoryDescriptorList->Process = NULL;
  508. Va = (PCHAR)Va + MemoryDescriptorList->ByteOffset;
  509. NumberOfPagesToLock = ADDRESS_AND_SIZE_TO_SPAN_PAGES (Va,
  510. MemoryDescriptorList->ByteCount);
  511. if (PointerPte == NULL) {
  512. //
  513. // On certain architectures, virtual addresses
  514. // may be physical and hence have no corresponding PTE.
  515. //
  516. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (Va);
  517. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  518. LastPageFrameIndex = PageFrameIndex + NumberOfPagesToLock;
  519. //
  520. // Acquire the PFN database lock.
  521. //
  522. LOCK_PFN2 (OldIrql);
  523. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0);
  524. //
  525. // Ensure the systemwide locked pages count remains fluid.
  526. //
  527. if (MI_NONPAGABLE_MEMORY_AVAILABLE() <= (SPFN_NUMBER) NumberOfPagesToLock) {
  528. //
  529. // This page is for nonpaged privileged code or data and must
  530. // already be resident so continue onwards.
  531. //
  532. MI_INSTRUMENT_PROBE_RAISES(8);
  533. }
  534. do {
  535. //
  536. // Check to make sure each page is not locked down an unusually
  537. // high number of times.
  538. //
  539. ASSERT (MI_IS_PFN (PageFrameIndex));
  540. ASSERT (PageFrameIndex <= MmHighestPhysicalPage);
  541. if (Pfn1->u3.e2.ReferenceCount >= MmReferenceCountCheck) {
  542. UNLOCK_PFN2 (OldIrql);
  543. ASSERT (FALSE);
  544. status = STATUS_WORKING_SET_QUOTA;
  545. goto failure;
  546. }
  547. MI_ADD_LOCKED_PAGE_CHARGE(Pfn1, TRUE, 0);
  548. if (MemoryDescriptorList->MdlFlags & MDL_WRITE_OPERATION) {
  549. MI_SNAP_DIRTY (Pfn1, 1, 0x99);
  550. }
  551. Pfn1->u3.e2.ReferenceCount += 1;
  552. *Page = PageFrameIndex;
  553. Page += 1;
  554. PageFrameIndex += 1;
  555. Pfn1 += 1;
  556. } while (PageFrameIndex < LastPageFrameIndex);
  557. UNLOCK_PFN2 (OldIrql);
  558. return;
  559. }
  560. //
  561. // Since this operation is to a system address, no need to check for
  562. // PTE write access below so mark the access as a read so only the
  563. // operation type (and not where the Va is) needs to be checked in the
  564. // subsequent loop.
  565. //
  566. Operation = IoReadAccess;
  567. LastPte = MiGetPteAddress ((PCHAR)EndVa - 1);
  568. }
  569. LOCK_PFN2 (OldIrql);
  570. do {
  571. while (
  572. #if (_MI_PAGING_LEVELS>=4)
  573. (PointerPxe->u.Hard.Valid == 0) ||
  574. #endif
  575. #if (_MI_PAGING_LEVELS>=3)
  576. (PointerPpe->u.Hard.Valid == 0) ||
  577. #endif
  578. ((PointerPde->u.Hard.Valid == 0) ||
  579. (((MI_PDE_MAPS_LARGE_PAGE (PointerPde)) == 0) &&
  580. (PointerPte->u.Hard.Valid == 0)))) {
  581. //
  582. // The VA is not resident, release the PFN lock and access the page
  583. // to make it appear.
  584. //
  585. UNLOCK_PFN2 (OldIrql);
  586. MmSavePageFaultReadAhead (Thread, &SavedState);
  587. MmSetPageFaultReadAhead (Thread, 0);
  588. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  589. status = MmAccessFault (FALSE, Va, KernelMode, NULL);
  590. MmResetPageFaultReadAhead (Thread, SavedState);
  591. if (!NT_SUCCESS(status)) {
  592. goto failure;
  593. }
  594. LOCK_PFN2 (OldIrql);
  595. }
  596. if (MI_PDE_MAPS_LARGE_PAGE (PointerPde)) {
  597. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  598. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPde) + MiGetPteOffset (Va);
  599. }
  600. else {
  601. PteContents = *PointerPte;
  602. //
  603. // There is a subtle race here where the PTE contents can get zeroed
  604. // by a thread running on another processor. This can only happen
  605. // for an AWE address space because these ranges (deliberately for
  606. // performance reasons) do not acquire the PFN lock during remap
  607. // operations. In this case, one of 2 scenarios is possible -
  608. // either the old PTE is read or the new. The new may be a zero
  609. // PTE if the map request was to invalidate *or* non-zero (and
  610. // valid) if the map request was inserting a new entry. For the
  611. // latter, we don't care if we lock the old or new frame here as
  612. // it's an application bug to provoke this behavior - and
  613. // regardless of which is used, no corruption can occur because
  614. // the PFN lock is acquired during an NtFreeUserPhysicalPages.
  615. // But the former must be checked for explicitly here. As a
  616. // separate note, the PXE/PPE/PDE accesses above are always safe
  617. // even for the AWE deletion race because these tables
  618. // are never lazy-allocated for AWE ranges.
  619. //
  620. if (PteContents.u.Hard.Valid == 0) {
  621. ASSERT (PteContents.u.Long == 0);
  622. ASSERT (PsGetCurrentProcess ()->AweInfo != NULL);
  623. UNLOCK_PFN2 (OldIrql);
  624. status = STATUS_ACCESS_VIOLATION;
  625. goto failure;
  626. }
  627. #if defined (_MIALT4K_)
  628. if (PteContents.u.Hard.Cache == MM_PTE_CACHE_RESERVED) {
  629. //
  630. // This is a wow64 split page - ie: the individual 4k
  631. // pages have different permissions, so each 4k page within
  632. // this native page must be probed individually.
  633. //
  634. // Note split pages are generally rare.
  635. //
  636. ASSERT (PsGetCurrentProcess()->Wow64Process != NULL);
  637. ASSERT (EndVa <= MmWorkingSetList->HighestUserAddress);
  638. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  639. PointerAltPte = MiGetAltPteAddress (Va);
  640. LastPointerAltPte = PointerAltPte + (PAGE_SIZE / PAGE_4K) - 1;
  641. AltPointerPxe = MiGetPxeAddress (PointerAltPte);
  642. AltPointerPpe = MiGetPpeAddress (PointerAltPte);
  643. AltPointerPde = MiGetPdeAddress (PointerAltPte);
  644. AltPointerPte = MiGetPteAddress (PointerAltPte);
  645. #if (_MI_PAGING_LEVELS==4)
  646. while ((AltPointerPxe->u.Hard.Valid == 0) ||
  647. (AltPointerPpe->u.Hard.Valid == 0) ||
  648. (AltPointerPde->u.Hard.Valid == 0) ||
  649. (AltPointerPte->u.Hard.Valid == 0))
  650. #elif (_MI_PAGING_LEVELS==3)
  651. while ((AltPointerPpe->u.Hard.Valid == 0) ||
  652. (AltPointerPde->u.Hard.Valid == 0) ||
  653. (AltPointerPte->u.Hard.Valid == 0))
  654. #else
  655. while ((AltPointerPde->u.Hard.Valid == 0) ||
  656. (AltPointerPte->u.Hard.Valid == 0))
  657. #endif
  658. {
  659. //
  660. // The ALTPTEs are not resident, release the PFN lock and
  661. // access it to make it appear. Then restart the entire
  662. // operation as the PFN lock was released so anything
  663. // could have happened to the address space.
  664. //
  665. UNLOCK_PFN2 (OldIrql);
  666. MmSavePageFaultReadAhead (Thread, &SavedState);
  667. MmSetPageFaultReadAhead (Thread, 0);
  668. status = MmAccessFault (FALSE, PointerAltPte, KernelMode, NULL);
  669. MmResetPageFaultReadAhead (Thread, SavedState);
  670. if (!NT_SUCCESS(status)) {
  671. goto failure;
  672. }
  673. LOCK_PFN2 (OldIrql);
  674. continue;
  675. }
  676. //
  677. // The ALTPTEs are now present and the PFN lock is held again.
  678. // Examine the individual 4k page states in the ALTPTEs.
  679. //
  680. // Note that only the relevant 4k pages can be examined - ie:
  681. // if the transfer starts in the 2nd 4k of a native page,
  682. // then don't examine the 1st 4k. If the transfer ends in
  683. // the first half of a native page, then don't examine the
  684. // 2nd 4k.
  685. //
  686. ASSERT (PAGE_SIZE == 2 * PAGE_4K);
  687. if (PAGE_ALIGN (StartVa) == PAGE_ALIGN (Va)) {
  688. //
  689. // We are in the first page, see if we need to round up.
  690. //
  691. if (BYTE_OFFSET (StartVa) >= PAGE_4K) {
  692. PointerAltPte += 1;
  693. Va = (PVOID)((ULONG_PTR)Va + PAGE_4K);
  694. }
  695. }
  696. if (PAGE_ALIGN ((PCHAR)EndVa - 1) == PAGE_ALIGN (Va)) {
  697. //
  698. // We are in the last page, see if we need to round down.
  699. //
  700. if (BYTE_OFFSET ((PCHAR)EndVa - 1) < PAGE_4K) {
  701. LastPointerAltPte -= 1;
  702. }
  703. }
  704. //
  705. // We better not have rounded up and down in the same page !
  706. //
  707. ASSERT (PointerAltPte <= LastPointerAltPte);
  708. ASSERT (PointerAltPte != NULL);
  709. do {
  710. //
  711. // If the sub 4k page is :
  712. //
  713. // 1 - No access or
  714. // 2 - This is a private not-committed page or
  715. // 3 - This is write operation and the page is read only
  716. //
  717. // then return an access violation.
  718. //
  719. AltPteContents = *PointerAltPte;
  720. if (AltPteContents.u.Alt.NoAccess != 0) {
  721. status = STATUS_ACCESS_VIOLATION;
  722. UNLOCK_PFN2 (OldIrql);
  723. goto failure;
  724. }
  725. if ((AltPteContents.u.Alt.Commit == 0) && (AltPteContents.u.Alt.Private != 0)) {
  726. status = STATUS_ACCESS_VIOLATION;
  727. UNLOCK_PFN2 (OldIrql);
  728. goto failure;
  729. }
  730. if (Operation != IoReadAccess) {
  731. //
  732. // If the caller is writing and the ALTPTE indicates
  733. // it's not writable or copy on write, then AV.
  734. //
  735. // If it's copy on write, then fall through for further
  736. // interrogation.
  737. //
  738. if ((AltPteContents.u.Alt.Write == 0) &&
  739. (AltPteContents.u.Alt.CopyOnWrite == 0)) {
  740. status = STATUS_ACCESS_VIOLATION;
  741. UNLOCK_PFN2 (OldIrql);
  742. goto failure;
  743. }
  744. }
  745. //
  746. // If the sub 4k page is :
  747. //
  748. // 1 - has not been accessed yet or
  749. // 2 - demand-fill zero or
  750. // 3 - copy-on-write, and this is a write operation
  751. //
  752. // then go the long way and see if it can be paged in.
  753. //
  754. if ((AltPteContents.u.Alt.Accessed == 0) ||
  755. (AltPteContents.u.Alt.FillZero != 0) ||
  756. ((Operation != IoReadAccess) && (AltPteContents.u.Alt.CopyOnWrite == 1))) {
  757. UNLOCK_PFN2 (OldIrql);
  758. MmSavePageFaultReadAhead (Thread, &SavedState);
  759. MmSetPageFaultReadAhead (Thread, 0);
  760. status = MmX86Fault (FALSE, Va, KernelMode, NULL);
  761. MmResetPageFaultReadAhead (Thread, SavedState);
  762. if (!NT_SUCCESS(status)) {
  763. goto failure;
  764. }
  765. //
  766. // Clear PointerAltPte to signify a restart is needed
  767. // (because the PFN lock was released so the address
  768. // space may have changed).
  769. //
  770. PointerAltPte = NULL;
  771. LOCK_PFN2 (OldIrql);
  772. break;
  773. }
  774. PointerAltPte += 1;
  775. Va = (PVOID)((ULONG_PTR)Va + PAGE_4K);
  776. } while (PointerAltPte <= LastPointerAltPte);
  777. if (PointerAltPte == NULL) {
  778. continue;
  779. }
  780. }
  781. #endif
  782. if (Operation != IoReadAccess) {
  783. if ((PteContents.u.Long & MM_PTE_WRITE_MASK) == 0) {
  784. if (PteContents.u.Long & MM_PTE_COPY_ON_WRITE_MASK) {
  785. //
  786. // The protection has changed from writable to copy on
  787. // write. This can happen if a fork is in progress for
  788. // example. Restart the operation at the top.
  789. //
  790. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  791. if (Va <= MM_HIGHEST_USER_ADDRESS) {
  792. UNLOCK_PFN2 (OldIrql);
  793. MmSavePageFaultReadAhead (Thread, &SavedState);
  794. MmSetPageFaultReadAhead (Thread, 0);
  795. status = MmAccessFault (TRUE, Va, KernelMode, NULL);
  796. MmResetPageFaultReadAhead (Thread, SavedState);
  797. if (!NT_SUCCESS(status)) {
  798. goto failure;
  799. }
  800. LOCK_PFN2 (OldIrql);
  801. continue;
  802. }
  803. }
  804. //
  805. // The caller has made the page protection more
  806. // restrictive, this should never be done once the
  807. // request has been issued ! Rather than wading
  808. // through the PFN database entry to see if it
  809. // could possibly work out, give the caller an
  810. // access violation.
  811. //
  812. #if DBG
  813. DbgPrint ("MmProbeAndLockPages: PTE %p %p changed\n",
  814. PointerPte,
  815. PteContents.u.Long);
  816. if (MmStopOnBadProbe) {
  817. DbgBreakPoint ();
  818. }
  819. #endif
  820. UNLOCK_PFN2 (OldIrql);
  821. status = STATUS_ACCESS_VIOLATION;
  822. goto failure;
  823. }
  824. }
  825. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents);
  826. }
  827. if (MI_IS_PFN (PageFrameIndex)) {
  828. if (MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) {
  829. //
  830. // MDLs cannot be filled with a mixture of real and I/O
  831. // space page frame numbers.
  832. //
  833. MI_INSTRUMENT_PROBE_RAISES(6);
  834. UNLOCK_PFN2 (OldIrql);
  835. status = STATUS_ACCESS_VIOLATION;
  836. goto failure;
  837. }
  838. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  839. //
  840. // Check to make sure this page is not locked down an unusually
  841. // high number of times.
  842. //
  843. if (Pfn1->u3.e2.ReferenceCount >= MmReferenceCountCheck) {
  844. MI_INSTRUMENT_PROBE_RAISES(3);
  845. UNLOCK_PFN2 (OldIrql);
  846. ASSERT (FALSE);
  847. status = STATUS_WORKING_SET_QUOTA;
  848. goto failure;
  849. }
  850. //
  851. // Ensure the systemwide locked pages count is fluid.
  852. //
  853. if (MI_NONPAGABLE_MEMORY_AVAILABLE() <= 0) {
  854. //
  855. // If this page is for privileged code/data,
  856. // then force it in regardless.
  857. //
  858. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  859. if ((Va < MM_HIGHEST_USER_ADDRESS) ||
  860. (MI_IS_SYSTEM_CACHE_ADDRESS(Va)) ||
  861. ((Va >= MmPagedPoolStart) && (Va <= MmPagedPoolEnd))) {
  862. MI_INSTRUMENT_PROBE_RAISES(5);
  863. UNLOCK_PFN2 (OldIrql);
  864. status = STATUS_WORKING_SET_QUOTA;
  865. goto failure;
  866. }
  867. MI_INSTRUMENT_PROBE_RAISES(12);
  868. }
  869. if (MemoryDescriptorList->MdlFlags & MDL_WRITE_OPERATION) {
  870. MI_SNAP_DIRTY (Pfn1, 1, 0x98);
  871. }
  872. if (MI_ADD_LOCKED_PAGE_CHARGE(Pfn1, FALSE, 0) == FALSE) {
  873. //
  874. // If this page is for privileged code/data,
  875. // then force it in regardless.
  876. //
  877. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  878. if ((Va < MM_HIGHEST_USER_ADDRESS) ||
  879. (MI_IS_SYSTEM_CACHE_ADDRESS(Va)) ||
  880. ((Va >= MmPagedPoolStart) && (Va <= MmPagedPoolEnd))) {
  881. UNLOCK_PFN2 (OldIrql);
  882. MI_INSTRUMENT_PROBE_RAISES(10);
  883. status = STATUS_WORKING_SET_QUOTA;
  884. goto failure;
  885. }
  886. MI_ADD_LOCKED_PAGE_CHARGE(Pfn1, TRUE, 0);
  887. }
  888. Pfn1->u3.e2.ReferenceCount += 1;
  889. }
  890. else {
  891. //
  892. // This is an I/O space address - there is no PFN database entry
  893. // for it, so no reference counts may be modified for these pages.
  894. //
  895. // Don't charge page locking for this transfer as it is all
  896. // physical, just add it to the MDL.
  897. //
  898. if (CurrentProcess != NULL) {
  899. //
  900. // The VA better be within a \Device\PhysicalMemory VAD.
  901. //
  902. if (CurrentProcess->PhysicalVadRoot == NULL) {
  903. #if DBG
  904. DbgPrint ("MmProbeAndLockPages: Physical VA0 %p not found\n", Va);
  905. DbgBreakPoint ();
  906. #endif
  907. UNLOCK_PFN2 (OldIrql);
  908. MI_INSTRUMENT_PROBE_RAISES (2);
  909. status = STATUS_ACCESS_VIOLATION;
  910. goto failure;
  911. }
  912. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  913. SearchResult = MiFindNodeOrParent (CurrentProcess->PhysicalVadRoot,
  914. MI_VA_TO_VPN (Va),
  915. (PMMADDRESS_NODE *) &PhysicalView);
  916. if ((SearchResult == TableFoundNode) &&
  917. (PhysicalView->u.LongFlags & (MI_PHYSICAL_VIEW_PHYS))) {
  918. ASSERT (PhysicalView->Vad->u.VadFlags.PhysicalMapping == 1);
  919. //
  920. // The range lies within a physical VAD.
  921. //
  922. if (Operation != IoReadAccess) {
  923. //
  924. // Ensure the VAD is writable. Changing individual PTE
  925. // protections in a physical VAD is not allowed.
  926. //
  927. if ((PhysicalView->Vad->u.VadFlags.Protection & MM_READWRITE) == 0) {
  928. MI_INSTRUMENT_PROBE_RAISES(4);
  929. UNLOCK_PFN2 (OldIrql);
  930. status = STATUS_ACCESS_VIOLATION;
  931. goto failure;
  932. }
  933. }
  934. if (((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0) &&
  935. (Page != (PPFN_NUMBER)(MemoryDescriptorList + 1))) {
  936. //
  937. // MDLs cannot be filled with a mixture of real and I/O
  938. // space page frame numbers.
  939. //
  940. MI_INSTRUMENT_PROBE_RAISES(7);
  941. UNLOCK_PFN2 (OldIrql);
  942. status = STATUS_ACCESS_VIOLATION;
  943. goto failure;
  944. }
  945. }
  946. else {
  947. #if DBG
  948. DbgPrint ("MmProbeAndLockPages: Physical VA1 %p not found\n", Va);
  949. DbgBreakPoint ();
  950. #endif
  951. UNLOCK_PFN2 (OldIrql);
  952. MI_INSTRUMENT_PROBE_RAISES (11);
  953. status = STATUS_ACCESS_VIOLATION;
  954. goto failure;
  955. }
  956. }
  957. #if DBG
  958. //
  959. // This page is in I/O space, therefore all the argument pages
  960. // better be.
  961. //
  962. if (Page != (PPFN_NUMBER)(MemoryDescriptorList + 1)) {
  963. ASSERT (!MI_IS_PFN (*(PPFN_NUMBER)(MemoryDescriptorList + 1)));
  964. }
  965. #endif
  966. if (((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0) &&
  967. (CurrentProcess != NULL)) {
  968. InterlockedExchangeAddSizeT (&CurrentProcess->NumberOfLockedPages,
  969. 0 - NumberOfPagesSpanned);
  970. }
  971. MemoryDescriptorList->MdlFlags |= MDL_IO_SPACE;
  972. }
  973. *Page = PageFrameIndex;
  974. Page += 1;
  975. PointerPte += 1;
  976. if (MiIsPteOnPdeBoundary (PointerPte)) {
  977. PointerPde += 1;
  978. if (MiIsPteOnPpeBoundary (PointerPte)) {
  979. PointerPpe += 1;
  980. if (MiIsPteOnPxeBoundary (PointerPte)) {
  981. PointerPxe += 1;
  982. }
  983. }
  984. }
  985. } while (PointerPte <= LastPte);
  986. UNLOCK_PFN2 (OldIrql);
  987. if (AlignedVa <= MM_HIGHEST_USER_ADDRESS) {
  988. //
  989. // User space buffers that reside in I/O space need to be reference
  990. // counted because SANs will want to reuse the physical space but cannot
  991. // do this unless it is guaranteed there are no more pending I/Os
  992. // going from/to it.
  993. //
  994. if (MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) {
  995. if (MiReferenceIoSpace (MemoryDescriptorList, Page) == FALSE) {
  996. status = STATUS_INSUFFICIENT_RESOURCES;
  997. goto failure;
  998. }
  999. }
  1000. if (MmTrackLockedPages == TRUE) {
  1001. ASSERT (NumberOfPagesSpanned != 0);
  1002. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  1003. MiAddMdlTracker (MemoryDescriptorList,
  1004. CallingAddress,
  1005. CallersCaller,
  1006. NumberOfPagesSpanned,
  1007. 1);
  1008. }
  1009. }
  1010. return;
  1011. failure:
  1012. //
  1013. // An exception occurred. Unlock the pages locked so far.
  1014. //
  1015. if (MmTrackLockedPages == TRUE) {
  1016. //
  1017. // Adjust the MDL length so that MmUnlockPages only
  1018. // processes the part that was completed.
  1019. //
  1020. ULONG PagesLocked;
  1021. PagesLocked = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartVa,
  1022. MemoryDescriptorList->ByteCount);
  1023. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  1024. MiAddMdlTracker (MemoryDescriptorList,
  1025. CallingAddress,
  1026. CallersCaller,
  1027. PagesLocked,
  1028. 0);
  1029. }
  1030. failure2:
  1031. MmUnlockPages (MemoryDescriptorList);
  1032. //
  1033. // Raise an exception of access violation to the caller.
  1034. //
  1035. MI_INSTRUMENT_PROBE_RAISES(13);
  1036. ExRaiseStatus (status);
  1037. return;
  1038. }
  1039. NTKERNELAPI
  1040. VOID
  1041. MmProbeAndLockProcessPages (
  1042. IN OUT PMDL MemoryDescriptorList,
  1043. IN PEPROCESS Process,
  1044. IN KPROCESSOR_MODE AccessMode,
  1045. IN LOCK_OPERATION Operation
  1046. )
  1047. /*++
  1048. Routine Description:
  1049. This routine probes and locks the address range specified by
  1050. the MemoryDescriptorList in the specified Process for the AccessMode
  1051. and Operation.
  1052. Arguments:
  1053. MemoryDescriptorList - Supplies a pre-initialized MDL that describes the
  1054. address range to be probed and locked.
  1055. Process - Specifies the address of the process whose address range is
  1056. to be locked.
  1057. AccessMode - The mode for which the probe should check access to the range.
  1058. Operation - Supplies the type of access which for which to check the range.
  1059. Return Value:
  1060. None.
  1061. --*/
  1062. {
  1063. KAPC_STATE ApcState;
  1064. LOGICAL Attached;
  1065. NTSTATUS Status;
  1066. Attached = FALSE;
  1067. Status = STATUS_SUCCESS;
  1068. if (Process != PsGetCurrentProcess ()) {
  1069. KeStackAttachProcess (&Process->Pcb, &ApcState);
  1070. Attached = TRUE;
  1071. }
  1072. try {
  1073. MmProbeAndLockPages (MemoryDescriptorList,
  1074. AccessMode,
  1075. Operation);
  1076. } except (EXCEPTION_EXECUTE_HANDLER) {
  1077. Status = GetExceptionCode();
  1078. }
  1079. if (Attached) {
  1080. KeUnstackDetachProcess (&ApcState);
  1081. }
  1082. if (Status != STATUS_SUCCESS) {
  1083. ExRaiseStatus (Status);
  1084. }
  1085. return;
  1086. }
  1087. VOID
  1088. MiAddMdlTracker (
  1089. IN PMDL MemoryDescriptorList,
  1090. IN PVOID CallingAddress,
  1091. IN PVOID CallersCaller,
  1092. IN PFN_NUMBER NumberOfPagesToLock,
  1093. IN ULONG Who
  1094. )
  1095. /*++
  1096. Routine Description:
  1097. This routine adds an MDL to the specified process' chain.
  1098. Arguments:
  1099. MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List
  1100. (MDL). The MDL must supply the length. The
  1101. physical page portion of the MDL is updated when
  1102. the pages are locked in memory.
  1103. CallingAddress - Supplies the address of the caller of our caller.
  1104. CallersCaller - Supplies the address of the caller of CallingAddress.
  1105. NumberOfPagesToLock - Specifies the number of pages to lock.
  1106. Who - Specifies which routine is adding the entry.
  1107. Return Value:
  1108. None - exceptions are raised.
  1109. Environment:
  1110. Kernel mode. APC_LEVEL and below.
  1111. --*/
  1112. {
  1113. PEPROCESS Process;
  1114. PLOCK_HEADER LockedPagesHeader;
  1115. PLOCK_TRACKER Tracker;
  1116. PLOCK_TRACKER P;
  1117. PLIST_ENTRY NextEntry;
  1118. KLOCK_QUEUE_HANDLE LockHandle;
  1119. ASSERT (MmTrackLockedPages == TRUE);
  1120. Process = MemoryDescriptorList->Process;
  1121. if (Process == NULL) {
  1122. return;
  1123. }
  1124. LockedPagesHeader = Process->LockedPagesList;
  1125. if (LockedPagesHeader == NULL) {
  1126. return;
  1127. }
  1128. //
  1129. // It's ok to check unsynchronized for aborted tracking as the worst case
  1130. // is just that one more entry gets added which will be freed later anyway.
  1131. // The main purpose behind aborted tracking is that frees and exits don't
  1132. // mistakenly bugcheck when an entry cannot be found.
  1133. //
  1134. if (LockedPagesHeader->Valid == FALSE) {
  1135. return;
  1136. }
  1137. Tracker = ExAllocatePoolWithTag (NonPagedPool,
  1138. sizeof (LOCK_TRACKER),
  1139. 'kLmM');
  1140. if (Tracker == NULL) {
  1141. //
  1142. // It's ok to set this without synchronization as the worst case
  1143. // is just that a few more entries gets added which will be freed
  1144. // later anyway. The main purpose behind aborted tracking is that
  1145. // frees and exits don't mistakenly bugcheck when an entry cannot
  1146. // be found.
  1147. //
  1148. LockedPagesHeader->Valid = FALSE;
  1149. return;
  1150. }
  1151. Tracker->Mdl = MemoryDescriptorList;
  1152. Tracker->Count = NumberOfPagesToLock;
  1153. Tracker->StartVa = MemoryDescriptorList->StartVa;
  1154. Tracker->Offset = MemoryDescriptorList->ByteOffset;
  1155. Tracker->Length = MemoryDescriptorList->ByteCount;
  1156. Tracker->Page = *(PPFN_NUMBER)(MemoryDescriptorList + 1);
  1157. Tracker->CallingAddress = CallingAddress;
  1158. Tracker->CallersCaller = CallersCaller;
  1159. Tracker->Who = Who;
  1160. Tracker->Process = Process;
  1161. //
  1162. // Update the list for this process. First make sure it's not already
  1163. // inserted.
  1164. //
  1165. KeAcquireInStackQueuedSpinLock (&LockedPagesHeader->Lock, &LockHandle);
  1166. NextEntry = LockedPagesHeader->ListHead.Flink;
  1167. while (NextEntry != &LockedPagesHeader->ListHead) {
  1168. P = CONTAINING_RECORD (NextEntry,
  1169. LOCK_TRACKER,
  1170. ListEntry);
  1171. if (P->Mdl == MemoryDescriptorList) {
  1172. KeBugCheckEx (LOCKED_PAGES_TRACKER_CORRUPTION,
  1173. 0x1,
  1174. (ULONG_PTR) P,
  1175. (ULONG_PTR) MemoryDescriptorList,
  1176. (ULONG_PTR) LockedPagesHeader->Count);
  1177. }
  1178. NextEntry = NextEntry->Flink;
  1179. }
  1180. InsertTailList (&LockedPagesHeader->ListHead, &Tracker->ListEntry);
  1181. LockedPagesHeader->Count += NumberOfPagesToLock;
  1182. KeReleaseInStackQueuedSpinLock (&LockHandle);
  1183. }
  1184. LOGICAL
  1185. MiFreeMdlTracker (
  1186. IN OUT PMDL MemoryDescriptorList,
  1187. IN PFN_NUMBER NumberOfPages
  1188. )
  1189. /*++
  1190. Routine Description:
  1191. This deletes an MDL from the specified process' chain. Used specifically
  1192. by MmProbeAndLockSelectedPages () because it builds an MDL in its local
  1193. stack and then copies the requested pages into the real MDL. This lets
  1194. us track these pages.
  1195. Arguments:
  1196. MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List
  1197. (MDL). The MDL must supply the length.
  1198. NumberOfPages - Supplies the number of pages to be freed.
  1199. Return Value:
  1200. TRUE.
  1201. Environment:
  1202. Kernel mode. APC_LEVEL and below.
  1203. --*/
  1204. {
  1205. KLOCK_QUEUE_HANDLE LockHandle;
  1206. PLOCK_TRACKER Tracker;
  1207. PLIST_ENTRY NextEntry;
  1208. PLOCK_HEADER LockedPagesHeader;
  1209. PPFN_NUMBER Page;
  1210. PVOID PoolToFree;
  1211. ASSERT (MemoryDescriptorList->Process != NULL);
  1212. LockedPagesHeader = (PLOCK_HEADER)MemoryDescriptorList->Process->LockedPagesList;
  1213. if (LockedPagesHeader == NULL) {
  1214. return TRUE;
  1215. }
  1216. PoolToFree = NULL;
  1217. Page = (PPFN_NUMBER) (MemoryDescriptorList + 1);
  1218. KeAcquireInStackQueuedSpinLock (&LockedPagesHeader->Lock, &LockHandle);
  1219. NextEntry = LockedPagesHeader->ListHead.Flink;
  1220. while (NextEntry != &LockedPagesHeader->ListHead) {
  1221. Tracker = CONTAINING_RECORD (NextEntry,
  1222. LOCK_TRACKER,
  1223. ListEntry);
  1224. if (MemoryDescriptorList == Tracker->Mdl) {
  1225. if (PoolToFree != NULL) {
  1226. KeBugCheckEx (LOCKED_PAGES_TRACKER_CORRUPTION,
  1227. 0x3,
  1228. (ULONG_PTR) PoolToFree,
  1229. (ULONG_PTR) Tracker,
  1230. (ULONG_PTR) MemoryDescriptorList);
  1231. }
  1232. ASSERT (Tracker->Page == *Page);
  1233. ASSERT (Tracker->Count == NumberOfPages);
  1234. RemoveEntryList (NextEntry);
  1235. LockedPagesHeader->Count -= NumberOfPages;
  1236. PoolToFree = (PVOID) Tracker;
  1237. }
  1238. NextEntry = Tracker->ListEntry.Flink;
  1239. }
  1240. KeReleaseInStackQueuedSpinLock (&LockHandle);
  1241. if (PoolToFree == NULL) {
  1242. //
  1243. // A driver is trying to unlock pages that aren't locked.
  1244. //
  1245. if (LockedPagesHeader->Valid == FALSE) {
  1246. return TRUE;
  1247. }
  1248. KeBugCheckEx (PROCESS_HAS_LOCKED_PAGES,
  1249. 1,
  1250. (ULONG_PTR)MemoryDescriptorList,
  1251. MemoryDescriptorList->Process->NumberOfLockedPages,
  1252. (ULONG_PTR)MemoryDescriptorList->Process->LockedPagesList);
  1253. }
  1254. ExFreePool (PoolToFree);
  1255. return TRUE;
  1256. }
  1257. LOGICAL
  1258. MmUpdateMdlTracker (
  1259. IN PMDL MemoryDescriptorList,
  1260. IN PVOID CallingAddress,
  1261. IN PVOID CallersCaller
  1262. )
  1263. /*++
  1264. Routine Description:
  1265. This updates an MDL in the specified process' chain. Used by the I/O
  1266. system so that proper driver identification can be done even when I/O
  1267. is actually locking the pages on their behalf.
  1268. Arguments:
  1269. MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List.
  1270. CallingAddress - Supplies the address of the caller of our caller.
  1271. CallersCaller - Supplies the address of the caller of CallingAddress.
  1272. Return Value:
  1273. TRUE if the MDL was found, FALSE if not.
  1274. Environment:
  1275. Kernel mode. APC_LEVEL and below.
  1276. --*/
  1277. {
  1278. KLOCK_QUEUE_HANDLE LockHandle;
  1279. PLOCK_TRACKER Tracker;
  1280. PLIST_ENTRY NextEntry;
  1281. PLOCK_HEADER LockedPagesHeader;
  1282. PEPROCESS Process;
  1283. ASSERT (MmTrackLockedPages == TRUE);
  1284. Process = MemoryDescriptorList->Process;
  1285. if (Process == NULL) {
  1286. return FALSE;
  1287. }
  1288. LockedPagesHeader = (PLOCK_HEADER) Process->LockedPagesList;
  1289. if (LockedPagesHeader == NULL) {
  1290. return FALSE;
  1291. }
  1292. KeAcquireInStackQueuedSpinLock (&LockedPagesHeader->Lock, &LockHandle);
  1293. //
  1294. // Walk the list backwards as it's likely the MDL was
  1295. // just recently inserted.
  1296. //
  1297. NextEntry = LockedPagesHeader->ListHead.Blink;
  1298. while (NextEntry != &LockedPagesHeader->ListHead) {
  1299. Tracker = CONTAINING_RECORD (NextEntry,
  1300. LOCK_TRACKER,
  1301. ListEntry);
  1302. if (MemoryDescriptorList == Tracker->Mdl) {
  1303. ASSERT (Tracker->Page == *(PPFN_NUMBER) (MemoryDescriptorList + 1));
  1304. Tracker->CallingAddress = CallingAddress;
  1305. Tracker->CallersCaller = CallersCaller;
  1306. KeReleaseInStackQueuedSpinLock (&LockHandle);
  1307. return TRUE;
  1308. }
  1309. NextEntry = Tracker->ListEntry.Blink;
  1310. }
  1311. KeReleaseInStackQueuedSpinLock (&LockHandle);
  1312. //
  1313. // The caller is trying to update an MDL that is no longer locked.
  1314. //
  1315. return FALSE;
  1316. }
  1317. LOGICAL
  1318. MiUpdateMdlTracker (
  1319. IN PMDL MemoryDescriptorList,
  1320. IN ULONG AdvancePages
  1321. )
  1322. /*++
  1323. Routine Description:
  1324. This updates an MDL in the specified process' chain.
  1325. Arguments:
  1326. MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List.
  1327. AdvancePages - Supplies the number of pages being advanced.
  1328. Return Value:
  1329. TRUE if the MDL was found, FALSE if not.
  1330. Environment:
  1331. Kernel mode. DISPATCH_LEVEL and below.
  1332. --*/
  1333. {
  1334. KLOCK_QUEUE_HANDLE LockHandle;
  1335. PPFN_NUMBER Page;
  1336. PLOCK_TRACKER Tracker;
  1337. PLIST_ENTRY NextEntry;
  1338. PLOCK_HEADER LockedPagesHeader;
  1339. PEPROCESS Process;
  1340. ASSERT (MmTrackLockedPages == TRUE);
  1341. Process = MemoryDescriptorList->Process;
  1342. if (Process == NULL) {
  1343. return FALSE;
  1344. }
  1345. LockedPagesHeader = (PLOCK_HEADER) Process->LockedPagesList;
  1346. if (LockedPagesHeader == NULL) {
  1347. return FALSE;
  1348. }
  1349. KeAcquireInStackQueuedSpinLock (&LockedPagesHeader->Lock, &LockHandle);
  1350. //
  1351. // Walk the list backwards as it's likely the MDL was
  1352. // just recently inserted.
  1353. //
  1354. NextEntry = LockedPagesHeader->ListHead.Blink;
  1355. while (NextEntry != &LockedPagesHeader->ListHead) {
  1356. Tracker = CONTAINING_RECORD (NextEntry,
  1357. LOCK_TRACKER,
  1358. ListEntry);
  1359. if (MemoryDescriptorList == Tracker->Mdl) {
  1360. Page = (PPFN_NUMBER) (MemoryDescriptorList + 1);
  1361. ASSERT (Tracker->Page == *Page);
  1362. ASSERT (Tracker->Count > AdvancePages);
  1363. Tracker->Page = *(Page + AdvancePages);
  1364. Tracker->Count -= AdvancePages;
  1365. KeReleaseInStackQueuedSpinLock (&LockHandle);
  1366. return TRUE;
  1367. }
  1368. NextEntry = Tracker->ListEntry.Blink;
  1369. }
  1370. KeReleaseInStackQueuedSpinLock (&LockHandle);
  1371. //
  1372. // The caller is trying to update an MDL that is no longer locked.
  1373. //
  1374. return FALSE;
  1375. }
  1376. NTKERNELAPI
  1377. VOID
  1378. MmProbeAndLockSelectedPages (
  1379. IN OUT PMDL MemoryDescriptorList,
  1380. IN PFILE_SEGMENT_ELEMENT SegmentArray,
  1381. IN KPROCESSOR_MODE AccessMode,
  1382. IN LOCK_OPERATION Operation
  1383. )
  1384. /*++
  1385. Routine Description:
  1386. This routine probes the specified pages, makes the pages resident and
  1387. locks the physical pages mapped by the virtual pages in memory. The
  1388. Memory descriptor list is updated to describe the physical pages.
  1389. Arguments:
  1390. MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List
  1391. (MDL). The MDL must supply the length. The
  1392. physical page portion of the MDL is updated when
  1393. the pages are locked in memory.
  1394. SegmentArray - Supplies a pointer to a list of buffer segments to be
  1395. probed and locked.
  1396. AccessMode - Supplies the access mode in which to probe the arguments.
  1397. One of KernelMode or UserMode.
  1398. Operation - Supplies the operation type. One of IoReadAccess, IoWriteAccess
  1399. or IoModifyAccess.
  1400. Return Value:
  1401. None - exceptions are raised.
  1402. Environment:
  1403. Kernel mode. APC_LEVEL and below.
  1404. --*/
  1405. {
  1406. NTSTATUS Status;
  1407. PMDL TempMdl;
  1408. PFN_NUMBER MdlHack[(sizeof(MDL)/sizeof(PFN_NUMBER)) + 1];
  1409. PPFN_NUMBER Page;
  1410. PFILE_SEGMENT_ELEMENT LastSegment;
  1411. PVOID CallingAddress;
  1412. PVOID CallersCaller;
  1413. ULONG NumberOfPagesToLock;
  1414. PAGED_CODE();
  1415. NumberOfPagesToLock = 0;
  1416. ASSERT (MemoryDescriptorList->ByteCount != 0);
  1417. ASSERT (((ULONG_PTR)MemoryDescriptorList->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
  1418. ASSERT ((MemoryDescriptorList->MdlFlags & (
  1419. MDL_PAGES_LOCKED |
  1420. MDL_MAPPED_TO_SYSTEM_VA |
  1421. MDL_SOURCE_IS_NONPAGED_POOL |
  1422. MDL_PARTIAL |
  1423. MDL_IO_SPACE)) == 0);
  1424. //
  1425. // Initialize TempMdl.
  1426. //
  1427. TempMdl = (PMDL) MdlHack;
  1428. //
  1429. // Even systems without 64 bit pointers are required to zero the
  1430. // upper 32 bits of the segment address so use alignment rather
  1431. // than the buffer pointer.
  1432. //
  1433. MmInitializeMdl (TempMdl, SegmentArray->Buffer, PAGE_SIZE);
  1434. Page = (PPFN_NUMBER) (MemoryDescriptorList + 1);
  1435. //
  1436. // Calculate the end of the segment list.
  1437. //
  1438. LastSegment = SegmentArray +
  1439. BYTES_TO_PAGES (MemoryDescriptorList->ByteCount);
  1440. ASSERT (SegmentArray < LastSegment);
  1441. //
  1442. // Build a small Mdl for each segment and call probe and lock pages.
  1443. // Then copy the PFNs to the real mdl. The first page is processed
  1444. // outside of the try/finally to ensure that the flags and process
  1445. // field are correctly set in case MmUnlockPages needs to be called.
  1446. //
  1447. // Note that if the MmProbeAndLockPages of the first page raises an
  1448. // exception, it is not handled here, but instead handed directly to
  1449. // our caller (who must be handling it).
  1450. //
  1451. MmProbeAndLockPages (TempMdl, AccessMode, Operation);
  1452. if (MmTrackLockedPages == TRUE) {
  1453. //
  1454. // Since we move the page from the temp MDL to the real one below
  1455. // and never free the temp one, fixup our accounting now.
  1456. //
  1457. if (MiFreeMdlTracker (TempMdl, 1) == TRUE) {
  1458. NumberOfPagesToLock += 1;
  1459. }
  1460. }
  1461. *Page = *((PPFN_NUMBER) (TempMdl + 1));
  1462. Page += 1;
  1463. //
  1464. // Copy the flags and process fields.
  1465. //
  1466. MemoryDescriptorList->MdlFlags |= TempMdl->MdlFlags;
  1467. MemoryDescriptorList->Process = TempMdl->Process;
  1468. Status = STATUS_SUCCESS;
  1469. SegmentArray += 1;
  1470. try {
  1471. while (SegmentArray < LastSegment) {
  1472. //
  1473. // Even systems without 64 bit pointers are required to zero the
  1474. // upper 32 bits of the segment address so use alignment rather
  1475. // than the buffer pointer.
  1476. //
  1477. TempMdl->StartVa = (PVOID)(ULONG_PTR)SegmentArray->Buffer;
  1478. TempMdl->MdlFlags = 0;
  1479. SegmentArray += 1;
  1480. MmProbeAndLockPages (TempMdl, AccessMode, Operation);
  1481. if (MmTrackLockedPages == TRUE) {
  1482. //
  1483. // Since we move the page from the temp MDL to the real one
  1484. // below and never free the temp one, fixup our accounting now.
  1485. //
  1486. if (MiFreeMdlTracker (TempMdl, 1) == TRUE) {
  1487. NumberOfPagesToLock += 1;
  1488. }
  1489. }
  1490. *Page = *((PPFN_NUMBER) (TempMdl + 1));
  1491. Page += 1;
  1492. }
  1493. } except (EXCEPTION_EXECUTE_HANDLER) {
  1494. Status = GetExceptionCode ();
  1495. ASSERT (!NT_SUCCESS (Status));
  1496. }
  1497. if (!NT_SUCCESS (Status)) {
  1498. //
  1499. // Adjust the MDL length so that MmUnlockPages only processes
  1500. // the part that was completed.
  1501. //
  1502. MemoryDescriptorList->ByteCount =
  1503. (ULONG) (Page - (PPFN_NUMBER) (MemoryDescriptorList + 1)) << PAGE_SHIFT;
  1504. if (MmTrackLockedPages == TRUE) {
  1505. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  1506. MiAddMdlTracker (MemoryDescriptorList,
  1507. CallingAddress,
  1508. CallersCaller,
  1509. NumberOfPagesToLock,
  1510. 2);
  1511. }
  1512. MmUnlockPages (MemoryDescriptorList);
  1513. ExRaiseStatus (Status);
  1514. }
  1515. if (MmTrackLockedPages == TRUE) {
  1516. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  1517. MiAddMdlTracker (MemoryDescriptorList,
  1518. CallingAddress,
  1519. CallersCaller,
  1520. NumberOfPagesToLock,
  1521. 3);
  1522. }
  1523. return;
  1524. }
  1525. VOID
  1526. MiDecrementReferenceCountForAwePage (
  1527. IN PMMPFN Pfn1,
  1528. IN LOGICAL PfnHeld
  1529. )
  1530. /*++
  1531. Routine Description:
  1532. This routine decrements the reference count for an AWE-allocated page.
  1533. Descriptor List. If this decrements the count to zero, the page is
  1534. put on the freelist and various resident available and commitment
  1535. counters are updated.
  1536. Arguments:
  1537. Pfn - Supplies a pointer to the PFN database element for the physical
  1538. page to decrement the reference count for.
  1539. PfnHeld - Supplies TRUE if the caller holds the PFN lock.
  1540. Return Value:
  1541. None.
  1542. Environment:
  1543. Kernel mode, IRQL of DISPATCH_LEVEL or below.
  1544. --*/
  1545. {
  1546. KIRQL OldIrql;
  1547. if (PfnHeld == FALSE) {
  1548. LOCK_PFN2 (OldIrql);
  1549. }
  1550. else {
  1551. OldIrql = PASSIVE_LEVEL;
  1552. }
  1553. ASSERT (Pfn1->u3.e2.ReferenceCount != 0);
  1554. ASSERT (Pfn1->AweReferenceCount == 0);
  1555. ASSERT (Pfn1->u4.AweAllocation == 1);
  1556. if (Pfn1->u3.e2.ReferenceCount >= 2) {
  1557. Pfn1->u3.e2.ReferenceCount -= 1;
  1558. if (PfnHeld == FALSE) {
  1559. UNLOCK_PFN2 (OldIrql);
  1560. }
  1561. }
  1562. else {
  1563. //
  1564. // This is the final dereference - the page was sitting in
  1565. // limbo (not on any list) waiting for this last I/O to complete.
  1566. //
  1567. ASSERT (Pfn1->u3.e1.PageLocation != ActiveAndValid);
  1568. ASSERT (Pfn1->u2.ShareCount == 0);
  1569. MiDecrementReferenceCount (Pfn1, MI_PFN_ELEMENT_TO_INDEX (Pfn1));
  1570. if (PfnHeld == FALSE) {
  1571. UNLOCK_PFN2 (OldIrql);
  1572. }
  1573. }
  1574. MI_INCREMENT_RESIDENT_AVAILABLE (1, MM_RESAVAIL_FREE_AWE);
  1575. MiReturnCommitment (1);
  1576. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_MDL_PAGES, 1);
  1577. InterlockedExchangeAddSizeT (&MmMdlPagesAllocated, -1);
  1578. return;
  1579. }
  1580. SINGLE_LIST_ENTRY MmLockedIoPagesHead;
  1581. LOGICAL
  1582. MiReferenceIoSpace (
  1583. IN OUT PMDL MemoryDescriptorList,
  1584. IN PPFN_NUMBER Page
  1585. )
  1586. /*++
  1587. Routine Description:
  1588. This routine reference counts physical pages which reside in I/O space
  1589. when they are probed on behalf of a user-initiated transfer. These counts
  1590. cannot be kept inside PFN database entries because there are no PFN entries
  1591. for I/O space.
  1592. These counts are kept because SANs will want to reuse the physical space
  1593. but cannot do this unless it is guaranteed there are no more pending I/Os
  1594. going from/to it. If the process has not exited, but the SAN driver has
  1595. unmapped the views to its I/O space, it still needs this as a way to know
  1596. that the application does not have a transfer in progress to the previously
  1597. mapped space.
  1598. Arguments:
  1599. MemoryDescriptorList - Supplies a pointer to the memory descriptor list.
  1600. Page - Supplies a pointer to the PFN just after the end of the MDL.
  1601. Return Value:
  1602. TRUE if the reference counts were updated, FALSE if not.
  1603. Environment:
  1604. Kernel mode, IRQL of DISPATCH_LEVEL or below.
  1605. --*/
  1606. {
  1607. PMDL Tracker;
  1608. KIRQL OldIrql;
  1609. SIZE_T MdlSize;
  1610. MdlSize = (PCHAR)Page - (PCHAR)MemoryDescriptorList;
  1611. Tracker = ExAllocatePoolWithTag (NonPagedPool, MdlSize, 'tImM');
  1612. if (Tracker == NULL) {
  1613. return FALSE;
  1614. }
  1615. RtlCopyMemory ((PVOID) Tracker,
  1616. (PVOID) MemoryDescriptorList,
  1617. MdlSize);
  1618. Tracker->MappedSystemVa = (PVOID) MemoryDescriptorList;
  1619. //
  1620. // Add this transfer to the list.
  1621. //
  1622. ExAcquireSpinLock (&MiTrackIoLock, &OldIrql);
  1623. PushEntryList (&MmLockedIoPagesHead, (PSINGLE_LIST_ENTRY) Tracker);
  1624. ExReleaseSpinLock (&MiTrackIoLock, OldIrql);
  1625. return TRUE;
  1626. }
  1627. NTKERNELAPI
  1628. LOGICAL
  1629. MiDereferenceIoSpace (
  1630. IN OUT PMDL MemoryDescriptorList
  1631. )
  1632. /*++
  1633. Routine Description:
  1634. This routine decrements the reference counts on physical pages which
  1635. reside in I/O space that were previously probed on behalf of a
  1636. user-initiated transfer. These counts cannot be kept inside PFN
  1637. database entries because there are no PFN entries for I/O space.
  1638. These counts are kept because SANs will want to reuse the physical space
  1639. but cannot do this unless it is guaranteed there are no more pending I/Os
  1640. going from/to it. If the process has not exited, but the SAN driver has
  1641. unmapped the views to its I/O space, it still needs this as a way to know
  1642. that the application does not have a transfer in progress to the previously
  1643. mapped space.
  1644. Arguments:
  1645. MemoryDescriptorList - Supplies a pointer to the memory descriptor list.
  1646. Return Value:
  1647. TRUE if the reference counts were updated, FALSE if not.
  1648. Environment:
  1649. Kernel mode, IRQL of DISPATCH_LEVEL or below.
  1650. --*/
  1651. {
  1652. KIRQL OldIrql;
  1653. PMDL PrevEntry;
  1654. PMDL NextEntry;
  1655. PrevEntry = NULL;
  1656. ExAcquireSpinLock (&MiTrackIoLock, &OldIrql);
  1657. NextEntry = (PMDL) MmLockedIoPagesHead.Next;
  1658. while (NextEntry != NULL) {
  1659. if (NextEntry->MappedSystemVa == (PVOID) MemoryDescriptorList) {
  1660. if (PrevEntry != NULL) {
  1661. PrevEntry->Next = NextEntry->Next;
  1662. }
  1663. else {
  1664. MmLockedIoPagesHead.Next = (PSINGLE_LIST_ENTRY) NextEntry->Next;
  1665. }
  1666. ExReleaseSpinLock (&MiTrackIoLock, OldIrql);
  1667. ExFreePool (NextEntry);
  1668. return TRUE;
  1669. }
  1670. PrevEntry = NextEntry;
  1671. NextEntry = NextEntry->Next;
  1672. }
  1673. ExReleaseSpinLock (&MiTrackIoLock, OldIrql);
  1674. return FALSE;
  1675. }
  1676. LOGICAL
  1677. MmIsIoSpaceActive (
  1678. IN PHYSICAL_ADDRESS StartAddress,
  1679. IN SIZE_T NumberOfBytes
  1680. )
  1681. /*++
  1682. Routine Description:
  1683. This routine returns TRUE if any portion of the requested range still has
  1684. an outstanding pending I/O. It is the calling driver's responsibility to
  1685. unmap all usermode mappings to the specified range (so another transfer
  1686. cannot be initiated) prior to calling this API.
  1687. These counts are kept because SANs will want to reuse the physical space
  1688. but cannot do this unless it is guaranteed there are no more pending I/Os
  1689. going from/to it. If the process has not exited, but the SAN driver has
  1690. unmapped the views to its I/O space, it still needs this as a way to know
  1691. that the application does not have a transfer in progress to the previously
  1692. mapped space.
  1693. Arguments:
  1694. StartAddress - Supplies the physical address of the start of the I/O range.
  1695. This MUST NOT be within system DRAM as those pages are not
  1696. tracked by this structure.
  1697. NumberOfBytes - Supplies the number of bytes in the range.
  1698. Return Value:
  1699. TRUE if any page in the range is currently locked for I/O, FALSE if not.
  1700. Environment:
  1701. Kernel mode, IRQL of DISPATCH_LEVEL or below.
  1702. --*/
  1703. {
  1704. KIRQL OldIrql;
  1705. PFN_NUMBER NumberOfPages;
  1706. PPFN_NUMBER Page;
  1707. PPFN_NUMBER LastPage;
  1708. PVOID StartingVa;
  1709. PMDL MemoryDescriptorList;
  1710. PFN_NUMBER StartPage;
  1711. PFN_NUMBER EndPage;
  1712. PHYSICAL_ADDRESS EndAddress;
  1713. ASSERT (NumberOfBytes != 0);
  1714. StartPage = (PFN_NUMBER) (StartAddress.QuadPart >> PAGE_SHIFT);
  1715. EndAddress.QuadPart = StartAddress.QuadPart + NumberOfBytes - 1;
  1716. EndPage = (PFN_NUMBER) (EndAddress.QuadPart >> PAGE_SHIFT);
  1717. #if DBG
  1718. do {
  1719. ASSERT (!MI_IS_PFN (StartPage));
  1720. StartPage += 1;
  1721. } while (StartPage <= EndPage);
  1722. StartPage = (PFN_NUMBER) (StartAddress.QuadPart >> PAGE_SHIFT);
  1723. #endif
  1724. ExAcquireSpinLock (&MiTrackIoLock, &OldIrql);
  1725. MemoryDescriptorList = (PMDL) MmLockedIoPagesHead.Next;
  1726. while (MemoryDescriptorList != NULL) {
  1727. StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
  1728. MemoryDescriptorList->ByteOffset);
  1729. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartingVa,
  1730. MemoryDescriptorList->ByteCount);
  1731. ASSERT (NumberOfPages != 0);
  1732. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  1733. LastPage = Page + NumberOfPages;
  1734. do {
  1735. if (*Page == MM_EMPTY_LIST) {
  1736. //
  1737. // There are no more locked pages.
  1738. //
  1739. break;
  1740. }
  1741. if ((*Page >= StartPage) && (*Page <= EndPage)) {
  1742. ExReleaseSpinLock (&MiTrackIoLock, OldIrql);
  1743. return TRUE;
  1744. }
  1745. Page += 1;
  1746. } while (Page < LastPage);
  1747. MemoryDescriptorList = MemoryDescriptorList->Next;
  1748. }
  1749. ExReleaseSpinLock (&MiTrackIoLock, OldIrql);
  1750. return FALSE;
  1751. }
  1752. VOID
  1753. MmUnlockPages (
  1754. IN OUT PMDL MemoryDescriptorList
  1755. )
  1756. /*++
  1757. Routine Description:
  1758. This routine unlocks physical pages which are described by a Memory
  1759. Descriptor List.
  1760. Arguments:
  1761. MemoryDescriptorList - Supplies a pointer to a memory descriptor list
  1762. (MDL). The supplied MDL must have been supplied
  1763. to MmLockPages to lock the pages down. As the
  1764. pages are unlocked, the MDL is updated.
  1765. Return Value:
  1766. None.
  1767. Environment:
  1768. Kernel mode, IRQL of DISPATCH_LEVEL or below.
  1769. --*/
  1770. {
  1771. LONG EntryCount;
  1772. LONG OriginalCount;
  1773. PVOID OldValue;
  1774. PEPROCESS Process;
  1775. PFN_NUMBER NumberOfPages;
  1776. PPFN_NUMBER Page;
  1777. PPFN_NUMBER LastPage;
  1778. PVOID StartingVa;
  1779. KIRQL OldIrql;
  1780. PMMPFN Pfn1;
  1781. CSHORT MdlFlags;
  1782. PSLIST_ENTRY SingleListEntry;
  1783. PMI_PFN_DEREFERENCE_CHUNK DerefMdl;
  1784. PSLIST_HEADER PfnDereferenceSListHead;
  1785. PSLIST_ENTRY *PfnDeferredList;
  1786. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PAGES_LOCKED) != 0);
  1787. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
  1788. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PARTIAL) == 0);
  1789. ASSERT (MemoryDescriptorList->ByteCount != 0);
  1790. Process = MemoryDescriptorList->Process;
  1791. //
  1792. // Carefully snap a copy of the MDL flags - realize that bits in it may
  1793. // change due to some of the subroutines called below. Only bits that
  1794. // we know can't change are examined in this local copy. This is done
  1795. // to reduce the amount of processing while the PFN lock is held.
  1796. //
  1797. MdlFlags = MemoryDescriptorList->MdlFlags;
  1798. if (MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
  1799. //
  1800. // This MDL has been mapped into system space, unmap now.
  1801. //
  1802. MmUnmapLockedPages (MemoryDescriptorList->MappedSystemVa,
  1803. MemoryDescriptorList);
  1804. }
  1805. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  1806. StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
  1807. MemoryDescriptorList->ByteOffset);
  1808. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartingVa,
  1809. MemoryDescriptorList->ByteCount);
  1810. ASSERT (NumberOfPages != 0);
  1811. if (MdlFlags & MDL_DESCRIBES_AWE) {
  1812. ASSERT (Process != NULL);
  1813. ASSERT (Process->AweInfo != NULL);
  1814. LastPage = Page + NumberOfPages;
  1815. //
  1816. // Note neither AWE nor PFN locks are needed for unlocking these MDLs
  1817. // in all but the very rare cases (see below).
  1818. //
  1819. do {
  1820. if (*Page == MM_EMPTY_LIST) {
  1821. //
  1822. // There are no more locked pages - if there were none at all
  1823. // then we're done.
  1824. //
  1825. break;
  1826. }
  1827. ASSERT (MI_IS_PFN (*Page));
  1828. ASSERT (*Page <= MmHighestPhysicalPage);
  1829. Pfn1 = MI_PFN_ELEMENT (*Page);
  1830. do {
  1831. EntryCount = Pfn1->AweReferenceCount;
  1832. ASSERT ((LONG)EntryCount > 0);
  1833. ASSERT (Pfn1->u4.AweAllocation == 1);
  1834. ASSERT (Pfn1->u3.e2.ReferenceCount != 0);
  1835. OriginalCount = InterlockedCompareExchange (&Pfn1->AweReferenceCount,
  1836. EntryCount - 1,
  1837. EntryCount);
  1838. if (OriginalCount == EntryCount) {
  1839. //
  1840. // This thread can be racing against other threads also
  1841. // calling MmUnlockPages and also a thread calling
  1842. // NtFreeUserPhysicalPages. All threads can safely do
  1843. // interlocked decrements on the "AWE reference count".
  1844. // Whichever thread drives it to zero is responsible for
  1845. // decrementing the actual PFN reference count (which may
  1846. // be greater than 1 due to other non-AWE API calls being
  1847. // used on the same page). The thread that drives this
  1848. // reference count to zero must put the page on the actual
  1849. // freelist at that time and decrement various resident
  1850. // available and commitment counters also.
  1851. //
  1852. if (OriginalCount == 1) {
  1853. //
  1854. // This thread has driven the AWE reference count to
  1855. // zero so it must initiate a decrement of the PFN
  1856. // reference count (while holding the PFN lock), etc.
  1857. //
  1858. // This path should be rare since typically I/Os
  1859. // complete before these types of pages are freed by
  1860. // the app.
  1861. //
  1862. MiDecrementReferenceCountForAwePage (Pfn1, FALSE);
  1863. }
  1864. break;
  1865. }
  1866. } while (TRUE);
  1867. Page += 1;
  1868. } while (Page < LastPage);
  1869. MemoryDescriptorList->MdlFlags &= ~(MDL_PAGES_LOCKED | MDL_DESCRIBES_AWE);
  1870. return;
  1871. }
  1872. if ((MmTrackLockedPages == TRUE) && (Process != NULL)) {
  1873. MiFreeMdlTracker (MemoryDescriptorList, NumberOfPages);
  1874. }
  1875. //
  1876. // Only unlock if not I/O space.
  1877. //
  1878. if ((MdlFlags & MDL_IO_SPACE) == 0) {
  1879. if (Process != NULL) {
  1880. ASSERT ((SPFN_NUMBER)Process->NumberOfLockedPages >= 0);
  1881. InterlockedExchangeAddSizeT (&Process->NumberOfLockedPages,
  1882. 0 - NumberOfPages);
  1883. }
  1884. LastPage = Page + NumberOfPages;
  1885. //
  1886. // Calculate PFN addresses and termination without the PFN lock
  1887. // (it's not needed for this) to reduce PFN lock contention.
  1888. //
  1889. ASSERT (sizeof(PFN_NUMBER) == sizeof(PMMPFN));
  1890. do {
  1891. if (*Page == MM_EMPTY_LIST) {
  1892. //
  1893. // There are no more locked pages - if there were none at all
  1894. // then we're done.
  1895. //
  1896. if (Page == (PPFN_NUMBER)(MemoryDescriptorList + 1)) {
  1897. MemoryDescriptorList->MdlFlags &= ~MDL_PAGES_LOCKED;
  1898. return;
  1899. }
  1900. LastPage = Page;
  1901. break;
  1902. }
  1903. ASSERT (MI_IS_PFN (*Page));
  1904. ASSERT (*Page <= MmHighestPhysicalPage);
  1905. Pfn1 = MI_PFN_ELEMENT (*Page);
  1906. *Page = (PFN_NUMBER) Pfn1;
  1907. Page += 1;
  1908. } while (Page < LastPage);
  1909. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  1910. //
  1911. // If the MDL can be queued so the PFN acquisition/release can be
  1912. // amortized then do so.
  1913. //
  1914. if (NumberOfPages <= MI_MAX_DEREFERENCE_CHUNK) {
  1915. #if defined(MI_MULTINODE)
  1916. PKNODE Node = KeGetCurrentNode ();
  1917. //
  1918. // The node may change beneath us but that should be fairly
  1919. // infrequent and not worth checking for. Just make sure the
  1920. // same node that gives us a free entry gets the deferred entry
  1921. // back.
  1922. //
  1923. PfnDereferenceSListHead = &Node->PfnDereferenceSListHead;
  1924. #else
  1925. PfnDereferenceSListHead = &MmPfnDereferenceSListHead;
  1926. #endif
  1927. //
  1928. // Pop an entry from the freelist.
  1929. //
  1930. SingleListEntry = InterlockedPopEntrySList (PfnDereferenceSListHead);
  1931. if (SingleListEntry != NULL) {
  1932. DerefMdl = CONTAINING_RECORD (SingleListEntry,
  1933. MI_PFN_DEREFERENCE_CHUNK,
  1934. ListEntry);
  1935. DerefMdl->Flags = MdlFlags;
  1936. DerefMdl->NumberOfPages = (USHORT) (LastPage - Page);
  1937. #if defined (_WIN64)
  1938. //
  1939. // Avoid the majority of the high cost of RtlCopyMemory on
  1940. // 64 bit platforms.
  1941. //
  1942. if (DerefMdl->NumberOfPages == 1) {
  1943. DerefMdl->Pfns[0] = *Page;
  1944. }
  1945. else if (DerefMdl->NumberOfPages == 2) {
  1946. DerefMdl->Pfns[0] = *Page;
  1947. DerefMdl->Pfns[1] = *(Page + 1);
  1948. }
  1949. else
  1950. #endif
  1951. RtlCopyMemory ((PVOID)(&DerefMdl->Pfns[0]),
  1952. (PVOID)Page,
  1953. (LastPage - Page) * sizeof (PFN_NUMBER));
  1954. MemoryDescriptorList->MdlFlags &= ~MDL_PAGES_LOCKED;
  1955. //
  1956. // Push this entry on the deferred list.
  1957. //
  1958. #if defined(MI_MULTINODE)
  1959. PfnDeferredList = &Node->PfnDeferredList;
  1960. #else
  1961. PfnDeferredList = &MmPfnDeferredList;
  1962. #endif
  1963. do {
  1964. OldValue = *PfnDeferredList;
  1965. SingleListEntry->Next = OldValue;
  1966. } while (InterlockedCompareExchangePointer (
  1967. PfnDeferredList,
  1968. SingleListEntry,
  1969. OldValue) != OldValue);
  1970. return;
  1971. }
  1972. }
  1973. SingleListEntry = NULL;
  1974. if (MdlFlags & MDL_WRITE_OPERATION) {
  1975. LOCK_PFN2 (OldIrql);
  1976. do {
  1977. //
  1978. // If this was a write operation set the modified bit in the
  1979. // PFN database.
  1980. //
  1981. Pfn1 = (PMMPFN) (*Page);
  1982. MI_SET_MODIFIED (Pfn1, 1, 0x3);
  1983. if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
  1984. (Pfn1->u3.e1.WriteInProgress == 0)) {
  1985. ULONG FreeBit;
  1986. FreeBit = GET_PAGING_FILE_OFFSET (Pfn1->OriginalPte);
  1987. if ((FreeBit != 0) && (FreeBit != MI_PTE_LOOKUP_NEEDED)) {
  1988. MiReleaseConfirmedPageFileSpace (Pfn1->OriginalPte);
  1989. Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
  1990. }
  1991. }
  1992. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF(Pfn1, 1);
  1993. Page += 1;
  1994. } while (Page < LastPage);
  1995. }
  1996. else {
  1997. LOCK_PFN2 (OldIrql);
  1998. do {
  1999. Pfn1 = (PMMPFN) (*Page);
  2000. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF(Pfn1, 1);
  2001. Page += 1;
  2002. } while (Page < LastPage);
  2003. }
  2004. if (NumberOfPages <= MI_MAX_DEREFERENCE_CHUNK) {
  2005. //
  2006. // The only reason this code path is being reached is because
  2007. // a deferred entry was not available so clear the list now.
  2008. //
  2009. MiDeferredUnlockPages (MI_DEFER_PFN_HELD | MI_DEFER_DRAIN_LOCAL_ONLY);
  2010. }
  2011. UNLOCK_PFN2 (OldIrql);
  2012. }
  2013. else {
  2014. MiDereferenceIoSpace (MemoryDescriptorList);
  2015. }
  2016. MemoryDescriptorList->MdlFlags &= ~MDL_PAGES_LOCKED;
  2017. return;
  2018. }
  2019. VOID
  2020. MiDeferredUnlockPages (
  2021. ULONG Flags
  2022. )
  2023. /*++
  2024. Routine Description:
  2025. This routine unlocks physical pages which were previously described by
  2026. a Memory Descriptor List.
  2027. Arguments:
  2028. Flags - Supplies a bitfield of the caller's needs :
  2029. MI_DEFER_PFN_HELD - Indicates the caller holds the PFN lock on entry.
  2030. MI_DEFER_DRAIN_LOCAL_ONLY - Indicates the caller only wishes to drain
  2031. the current processor's queue. This only
  2032. has meaning in NUMA systems.
  2033. Return Value:
  2034. None.
  2035. Environment:
  2036. Kernel mode, PFN database lock *MAY* be held on entry (see Flags).
  2037. --*/
  2038. {
  2039. KIRQL OldIrql = 0;
  2040. ULONG FreeBit;
  2041. ULONG i;
  2042. ULONG ListCount;
  2043. ULONG TotalNodes;
  2044. PFN_NUMBER NumberOfPages;
  2045. PPFN_NUMBER Page;
  2046. PPFN_NUMBER LastPage;
  2047. PMMPFN Pfn1;
  2048. CSHORT MdlFlags;
  2049. PSLIST_ENTRY SingleListEntry;
  2050. PSLIST_ENTRY LastEntry;
  2051. PSLIST_ENTRY FirstEntry;
  2052. PSLIST_ENTRY NextEntry;
  2053. PSLIST_ENTRY VeryLastEntry;
  2054. PMI_PFN_DEREFERENCE_CHUNK DerefMdl;
  2055. PSLIST_HEADER PfnDereferenceSListHead;
  2056. PSLIST_ENTRY *PfnDeferredList;
  2057. #if defined(MI_MULTINODE)
  2058. PKNODE Node;
  2059. #endif
  2060. i = 0;
  2061. ListCount = 0;
  2062. TotalNodes = 1;
  2063. if ((Flags & MI_DEFER_PFN_HELD) == 0) {
  2064. LOCK_PFN2 (OldIrql);
  2065. }
  2066. MM_PFN_LOCK_ASSERT();
  2067. #if defined(MI_MULTINODE)
  2068. if (Flags & MI_DEFER_DRAIN_LOCAL_ONLY) {
  2069. Node = KeGetCurrentNode();
  2070. PfnDeferredList = &Node->PfnDeferredList;
  2071. PfnDereferenceSListHead = &Node->PfnDereferenceSListHead;
  2072. }
  2073. else {
  2074. TotalNodes = KeNumberNodes;
  2075. Node = KeNodeBlock[0];
  2076. PfnDeferredList = &Node->PfnDeferredList;
  2077. PfnDereferenceSListHead = &Node->PfnDereferenceSListHead;
  2078. }
  2079. #else
  2080. PfnDeferredList = &MmPfnDeferredList;
  2081. PfnDereferenceSListHead = &MmPfnDereferenceSListHead;
  2082. #endif
  2083. do {
  2084. if (*PfnDeferredList == NULL) {
  2085. #if !defined(MI_MULTINODE)
  2086. if ((Flags & MI_DEFER_PFN_HELD) == 0) {
  2087. UNLOCK_PFN2 (OldIrql);
  2088. }
  2089. return;
  2090. #else
  2091. i += 1;
  2092. if (i < TotalNodes) {
  2093. Node = KeNodeBlock[i];
  2094. PfnDeferredList = &Node->PfnDeferredList;
  2095. PfnDereferenceSListHead = &Node->PfnDereferenceSListHead;
  2096. continue;
  2097. }
  2098. break;
  2099. #endif
  2100. }
  2101. //
  2102. // Process each deferred unlock entry until they're all done.
  2103. //
  2104. LastEntry = NULL;
  2105. VeryLastEntry = NULL;
  2106. do {
  2107. SingleListEntry = *PfnDeferredList;
  2108. FirstEntry = SingleListEntry;
  2109. do {
  2110. NextEntry = SingleListEntry->Next;
  2111. //
  2112. // Process the deferred entry.
  2113. //
  2114. DerefMdl = CONTAINING_RECORD (SingleListEntry,
  2115. MI_PFN_DEREFERENCE_CHUNK,
  2116. ListEntry);
  2117. MdlFlags = DerefMdl->Flags;
  2118. NumberOfPages = (PFN_NUMBER) DerefMdl->NumberOfPages;
  2119. ASSERT (NumberOfPages <= MI_MAX_DEREFERENCE_CHUNK);
  2120. Page = &DerefMdl->Pfns[0];
  2121. LastPage = Page + NumberOfPages;
  2122. #if DBG
  2123. //
  2124. // Mark the entry as processed so if it mistakenly gets
  2125. // reprocessed, we will assert above.
  2126. //
  2127. DerefMdl->NumberOfPages |= 0x80;
  2128. #endif
  2129. if (MdlFlags & MDL_WRITE_OPERATION) {
  2130. do {
  2131. //
  2132. // If this was a write operation set the modified bit
  2133. // in the PFN database.
  2134. //
  2135. Pfn1 = (PMMPFN) (*Page);
  2136. MI_SET_MODIFIED (Pfn1, 1, 0x4);
  2137. if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
  2138. (Pfn1->u3.e1.WriteInProgress == 0)) {
  2139. FreeBit = GET_PAGING_FILE_OFFSET (Pfn1->OriginalPte);
  2140. if ((FreeBit != 0) && (FreeBit != MI_PTE_LOOKUP_NEEDED)) {
  2141. MiReleaseConfirmedPageFileSpace (Pfn1->OriginalPte);
  2142. Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
  2143. }
  2144. }
  2145. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF(Pfn1, 1);
  2146. Page += 1;
  2147. } while (Page < LastPage);
  2148. }
  2149. else {
  2150. do {
  2151. Pfn1 = (PMMPFN) (*Page);
  2152. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF(Pfn1, 1);
  2153. Page += 1;
  2154. } while (Page < LastPage);
  2155. }
  2156. ListCount += 1;
  2157. //
  2158. // March on to the next entry if there is one.
  2159. //
  2160. if (NextEntry == LastEntry) {
  2161. break;
  2162. }
  2163. SingleListEntry = NextEntry;
  2164. } while (TRUE);
  2165. if (VeryLastEntry == NULL) {
  2166. VeryLastEntry = SingleListEntry;
  2167. }
  2168. if ((*PfnDeferredList == FirstEntry) &&
  2169. (InterlockedCompareExchangePointer (PfnDeferredList,
  2170. NULL,
  2171. FirstEntry) == FirstEntry)) {
  2172. ASSERT (*PfnDeferredList != FirstEntry);
  2173. break;
  2174. }
  2175. ASSERT (*PfnDeferredList != FirstEntry);
  2176. LastEntry = FirstEntry;
  2177. } while (TRUE);
  2178. //
  2179. // Push the processed list chain on the freelist.
  2180. //
  2181. ASSERT (ListCount != 0);
  2182. ASSERT (FirstEntry != NULL);
  2183. ASSERT (VeryLastEntry != NULL);
  2184. #if defined(MI_MULTINODE)
  2185. InterlockedPushListSList (PfnDereferenceSListHead,
  2186. FirstEntry,
  2187. VeryLastEntry,
  2188. ListCount);
  2189. i += 1;
  2190. if (i < TotalNodes) {
  2191. Node = KeNodeBlock[i];
  2192. PfnDeferredList = &Node->PfnDeferredList;
  2193. PfnDereferenceSListHead = &Node->PfnDereferenceSListHead;
  2194. ListCount = 0;
  2195. }
  2196. else {
  2197. break;
  2198. }
  2199. } while (TRUE);
  2200. #else
  2201. } while (FALSE);
  2202. #endif
  2203. if ((Flags & MI_DEFER_PFN_HELD) == 0) {
  2204. UNLOCK_PFN2 (OldIrql);
  2205. }
  2206. #if !defined(MI_MULTINODE)
  2207. //
  2208. // If possible, push the processed chain after releasing the PFN lock.
  2209. //
  2210. InterlockedPushListSList (PfnDereferenceSListHead,
  2211. FirstEntry,
  2212. VeryLastEntry,
  2213. ListCount);
  2214. #endif
  2215. }
  2216. VOID
  2217. MmBuildMdlForNonPagedPool (
  2218. IN OUT PMDL MemoryDescriptorList
  2219. )
  2220. /*++
  2221. Routine Description:
  2222. This routine fills in the "pages" portion of the MDL using the PFN
  2223. numbers corresponding to the buffers which reside in non-paged pool.
  2224. Unlike MmProbeAndLockPages, there is no corresponding unlock as no
  2225. reference counts are incremented as the buffers being in nonpaged
  2226. pool are always resident.
  2227. Arguments:
  2228. MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List
  2229. (MDL). The supplied MDL must supply a virtual
  2230. address, byte offset and length field. The
  2231. physical page portion of the MDL is updated when
  2232. the pages are locked in memory. The virtual
  2233. address must be within the non-paged portion
  2234. of the system space.
  2235. Return Value:
  2236. None.
  2237. Environment:
  2238. Kernel mode, IRQL of DISPATCH_LEVEL or below.
  2239. --*/
  2240. {
  2241. PPFN_NUMBER Page;
  2242. PPFN_NUMBER EndPage;
  2243. PMMPTE PointerPte;
  2244. PVOID VirtualAddress;
  2245. PFN_NUMBER PageFrameIndex;
  2246. PFN_NUMBER NumberOfPages;
  2247. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  2248. ASSERT (MemoryDescriptorList->ByteCount != 0);
  2249. ASSERT ((MemoryDescriptorList->MdlFlags & (
  2250. MDL_PAGES_LOCKED |
  2251. MDL_MAPPED_TO_SYSTEM_VA |
  2252. MDL_SOURCE_IS_NONPAGED_POOL |
  2253. MDL_PARTIAL)) == 0);
  2254. MemoryDescriptorList->Process = NULL;
  2255. //
  2256. // Endva is last byte of the buffer.
  2257. //
  2258. MemoryDescriptorList->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
  2259. ASSERT (MmIsNonPagedSystemAddressValid (MemoryDescriptorList->StartVa));
  2260. VirtualAddress = MemoryDescriptorList->StartVa;
  2261. MemoryDescriptorList->MappedSystemVa =
  2262. (PVOID)((PCHAR)VirtualAddress + MemoryDescriptorList->ByteOffset);
  2263. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (MemoryDescriptorList->MappedSystemVa,
  2264. MemoryDescriptorList->ByteCount);
  2265. ASSERT (NumberOfPages != 0);
  2266. EndPage = Page + NumberOfPages;
  2267. if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) {
  2268. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (VirtualAddress);
  2269. do {
  2270. *Page = PageFrameIndex;
  2271. Page += 1;
  2272. PageFrameIndex += 1;
  2273. } while (Page < EndPage);
  2274. }
  2275. else {
  2276. PointerPte = MiGetPteAddress (VirtualAddress);
  2277. do {
  2278. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  2279. *Page = PageFrameIndex;
  2280. Page += 1;
  2281. PointerPte += 1;
  2282. } while (Page < EndPage);
  2283. }
  2284. //
  2285. // Assume either all the frames are in the PFN database (ie: the MDL maps
  2286. // pool) or none of them (the MDL maps dualport RAM) are.
  2287. //
  2288. if (!MI_IS_PFN (PageFrameIndex)) {
  2289. MemoryDescriptorList->MdlFlags |= MDL_IO_SPACE;
  2290. }
  2291. return;
  2292. }
  2293. VOID
  2294. MiInitializeIoTrackers (
  2295. VOID
  2296. )
  2297. {
  2298. InitializeSListHead (&MiDeadPteTrackerSListHead);
  2299. KeInitializeSpinLock (&MiPteTrackerLock);
  2300. InitializeListHead (&MiPteHeader.ListHead);
  2301. KeInitializeSpinLock (&MmIoTrackerLock);
  2302. InitializeListHead (&MmIoHeader);
  2303. KeInitializeSpinLock (&MiTrackIoLock);
  2304. }
  2305. VOID
  2306. MiInsertPteTracker (
  2307. IN PMDL MemoryDescriptorList,
  2308. IN ULONG Flags,
  2309. IN LOGICAL IoMapping,
  2310. IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
  2311. IN PVOID MyCaller,
  2312. IN PVOID MyCallersCaller
  2313. )
  2314. /*++
  2315. Routine Description:
  2316. This function inserts a PTE tracking block as the caller has just
  2317. consumed system PTEs.
  2318. Arguments:
  2319. MemoryDescriptorList - Supplies a valid Memory Descriptor List.
  2320. Flags - Supplies the following values:
  2321. 0 - Indicates all the fields of the MDL are legitimate and can
  2322. be snapped.
  2323. 1 - Indicates the caller is mapping physically contiguous memory.
  2324. The only valid MDL fields are Page[0] & ByteCount.
  2325. Page[0] contains the PFN start, ByteCount the byte count.
  2326. 2 - Indicates the caller is just reserving mapping PTEs.
  2327. The only valid MDL fields are Page[0] & ByteCount.
  2328. Page[0] contains the pool tag, ByteCount the byte count.
  2329. MyCaller - Supplies the return address of the caller who consumed the
  2330. system PTEs to map this MDL.
  2331. MyCallersCaller - Supplies the return address of the caller of the caller
  2332. who consumed the system PTEs to map this MDL.
  2333. Return Value:
  2334. None.
  2335. Environment:
  2336. Kernel mode, DISPATCH_LEVEL or below.
  2337. --*/
  2338. {
  2339. KIRQL OldIrql;
  2340. PVOID StartingVa;
  2341. PPTE_TRACKER Tracker;
  2342. PSLIST_ENTRY SingleListEntry;
  2343. PSLIST_ENTRY NextSingleListEntry;
  2344. PFN_NUMBER NumberOfPtes;
  2345. ASSERT (KeGetCurrentIrql() <= DISPATCH_LEVEL);
  2346. if (ExQueryDepthSList (&MiDeadPteTrackerSListHead) < 10) {
  2347. Tracker = (PPTE_TRACKER) InterlockedPopEntrySList (&MiDeadPteTrackerSListHead);
  2348. }
  2349. else {
  2350. SingleListEntry = ExInterlockedFlushSList (&MiDeadPteTrackerSListHead);
  2351. Tracker = (PPTE_TRACKER) SingleListEntry;
  2352. if (SingleListEntry != NULL) {
  2353. SingleListEntry = SingleListEntry->Next;
  2354. while (SingleListEntry != NULL) {
  2355. NextSingleListEntry = SingleListEntry->Next;
  2356. ExFreePool (SingleListEntry);
  2357. SingleListEntry = NextSingleListEntry;
  2358. }
  2359. }
  2360. }
  2361. if (Tracker == NULL) {
  2362. Tracker = ExAllocatePoolWithTag (NonPagedPool,
  2363. sizeof (PTE_TRACKER),
  2364. 'ySmM');
  2365. if (Tracker == NULL) {
  2366. MiTrackPtesAborted = TRUE;
  2367. return;
  2368. }
  2369. }
  2370. switch (Flags) {
  2371. case 0:
  2372. //
  2373. // Regular MDL mapping.
  2374. //
  2375. StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
  2376. MemoryDescriptorList->ByteOffset);
  2377. NumberOfPtes = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingVa,
  2378. MemoryDescriptorList->ByteCount);
  2379. Tracker->Mdl = MemoryDescriptorList;
  2380. Tracker->StartVa = MemoryDescriptorList->StartVa;
  2381. Tracker->Offset = MemoryDescriptorList->ByteOffset;
  2382. Tracker->Length = MemoryDescriptorList->ByteCount;
  2383. break;
  2384. case 1:
  2385. //
  2386. // MmMapIoSpace call (ie: physically contiguous mapping).
  2387. //
  2388. StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
  2389. MemoryDescriptorList->ByteOffset);
  2390. NumberOfPtes = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingVa,
  2391. MemoryDescriptorList->ByteCount);
  2392. Tracker->Mdl = (PVOID)1;
  2393. break;
  2394. default:
  2395. ASSERT (FALSE);
  2396. // Fall through
  2397. case 2:
  2398. //
  2399. // MmAllocateReservedMapping call (ie: currently maps nothing).
  2400. //
  2401. NumberOfPtes = (MemoryDescriptorList->ByteCount >> PAGE_SHIFT);
  2402. Tracker->Mdl = NULL;
  2403. break;
  2404. }
  2405. Tracker->Count = NumberOfPtes;
  2406. Tracker->CallingAddress = MyCaller;
  2407. Tracker->CallersCaller = MyCallersCaller;
  2408. Tracker->SystemVa = MemoryDescriptorList->MappedSystemVa;
  2409. Tracker->Page = *(PPFN_NUMBER)(MemoryDescriptorList + 1);
  2410. Tracker->CacheAttribute = CacheAttribute;
  2411. Tracker->IoMapping = (BOOLEAN) IoMapping;
  2412. ExAcquireSpinLock (&MiPteTrackerLock, &OldIrql);
  2413. InsertHeadList (&MiPteHeader.ListHead, &Tracker->ListEntry);
  2414. MiPteHeader.Count += NumberOfPtes;
  2415. MiPteHeader.NumberOfEntries += 1;
  2416. if (MiPteHeader.NumberOfEntries > MiPteHeader.NumberOfEntriesPeak) {
  2417. MiPteHeader.NumberOfEntriesPeak = MiPteHeader.NumberOfEntries;
  2418. }
  2419. ExReleaseSpinLock (&MiPteTrackerLock, OldIrql);
  2420. }
  2421. VOID
  2422. MiRemovePteTracker (
  2423. IN PMDL MemoryDescriptorList OPTIONAL,
  2424. IN PVOID VirtualAddress,
  2425. IN PFN_NUMBER NumberOfPtes
  2426. )
  2427. /*++
  2428. Routine Description:
  2429. This function removes a PTE tracking block from the lists as the PTEs
  2430. are being freed.
  2431. Arguments:
  2432. MemoryDescriptorList - Supplies a valid Memory Descriptor List.
  2433. PteAddress - Supplies the address the system PTEs were mapped to.
  2434. NumberOfPtes - Supplies the number of system PTEs allocated.
  2435. Return Value:
  2436. None.
  2437. Environment:
  2438. Kernel mode, DISPATCH_LEVEL or below. Locks (including the PFN) may be held.
  2439. --*/
  2440. {
  2441. KIRQL OldIrql;
  2442. PPTE_TRACKER Tracker;
  2443. PLIST_ENTRY LastFound;
  2444. PLIST_ENTRY NextEntry;
  2445. LastFound = NULL;
  2446. VirtualAddress = PAGE_ALIGN (VirtualAddress);
  2447. ExAcquireSpinLock (&MiPteTrackerLock, &OldIrql);
  2448. NextEntry = MiPteHeader.ListHead.Flink;
  2449. while (NextEntry != &MiPteHeader.ListHead) {
  2450. Tracker = (PPTE_TRACKER) CONTAINING_RECORD (NextEntry,
  2451. PTE_TRACKER,
  2452. ListEntry.Flink);
  2453. if (VirtualAddress == PAGE_ALIGN (Tracker->SystemVa)) {
  2454. if (LastFound != NULL) {
  2455. //
  2456. // Duplicate map entry.
  2457. //
  2458. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2459. 0x1,
  2460. (ULONG_PTR)Tracker,
  2461. (ULONG_PTR)MemoryDescriptorList,
  2462. (ULONG_PTR)LastFound);
  2463. }
  2464. if (Tracker->Count != NumberOfPtes) {
  2465. //
  2466. // Not unmapping the same of number of PTEs that were mapped.
  2467. //
  2468. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2469. 0x2,
  2470. (ULONG_PTR)Tracker,
  2471. Tracker->Count,
  2472. NumberOfPtes);
  2473. }
  2474. if ((ARGUMENT_PRESENT (MemoryDescriptorList)) &&
  2475. ((MemoryDescriptorList->MdlFlags & MDL_FREE_EXTRA_PTES) == 0) &&
  2476. (MiMdlsAdjusted == FALSE)) {
  2477. if (Tracker->SystemVa != MemoryDescriptorList->MappedSystemVa) {
  2478. //
  2479. // Not unmapping the same address that was mapped.
  2480. //
  2481. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2482. 0x3,
  2483. (ULONG_PTR)Tracker,
  2484. (ULONG_PTR)Tracker->SystemVa,
  2485. (ULONG_PTR)MemoryDescriptorList->MappedSystemVa);
  2486. }
  2487. if (Tracker->Page != *(PPFN_NUMBER)(MemoryDescriptorList + 1)) {
  2488. //
  2489. // The first page in the MDL has changed since it was mapped.
  2490. //
  2491. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2492. 0x4,
  2493. (ULONG_PTR)Tracker,
  2494. (ULONG_PTR)Tracker->Page,
  2495. (ULONG_PTR) *(PPFN_NUMBER)(MemoryDescriptorList + 1));
  2496. }
  2497. if (Tracker->StartVa != MemoryDescriptorList->StartVa) {
  2498. //
  2499. // Map and unmap don't match up.
  2500. //
  2501. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2502. 0x5,
  2503. (ULONG_PTR)Tracker,
  2504. (ULONG_PTR)Tracker->StartVa,
  2505. (ULONG_PTR)MemoryDescriptorList->StartVa);
  2506. }
  2507. }
  2508. RemoveEntryList (NextEntry);
  2509. LastFound = NextEntry;
  2510. }
  2511. NextEntry = Tracker->ListEntry.Flink;
  2512. }
  2513. if ((LastFound == NULL) && (MiTrackPtesAborted == FALSE)) {
  2514. //
  2515. // Can't unmap something that was never (or isn't currently) mapped.
  2516. //
  2517. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2518. 0x6,
  2519. (ULONG_PTR)MemoryDescriptorList,
  2520. (ULONG_PTR)VirtualAddress,
  2521. (ULONG_PTR)NumberOfPtes);
  2522. }
  2523. MiPteHeader.Count -= NumberOfPtes;
  2524. MiPteHeader.NumberOfEntries -= 1;
  2525. ExReleaseSpinLock (&MiPteTrackerLock, OldIrql);
  2526. //
  2527. // Insert the tracking block into the dead PTE list for later
  2528. // release. Locks (including the PFN lock) may be held on entry, thus the
  2529. // block cannot be directly freed to pool at this time.
  2530. //
  2531. if (LastFound != NULL) {
  2532. InterlockedPushEntrySList (&MiDeadPteTrackerSListHead,
  2533. (PSLIST_ENTRY)LastFound);
  2534. }
  2535. return;
  2536. }
  2537. PVOID
  2538. MiGetHighestPteConsumer (
  2539. OUT PULONG_PTR NumberOfPtes
  2540. )
  2541. /*++
  2542. Routine Description:
  2543. This function examines the PTE tracking blocks and returns the biggest
  2544. consumer.
  2545. Arguments:
  2546. None.
  2547. Return Value:
  2548. The loaded module entry of the biggest consumer.
  2549. Environment:
  2550. Kernel mode, called during bugcheck only. Many locks may be held.
  2551. --*/
  2552. {
  2553. PPTE_TRACKER Tracker;
  2554. PVOID BaseAddress;
  2555. PFN_NUMBER NumberOfPages;
  2556. PLIST_ENTRY NextEntry;
  2557. PLIST_ENTRY NextEntry2;
  2558. PKLDR_DATA_TABLE_ENTRY DataTableEntry;
  2559. ULONG_PTR Highest;
  2560. ULONG_PTR PagesByThisModule;
  2561. PKLDR_DATA_TABLE_ENTRY HighDataTableEntry;
  2562. *NumberOfPtes = 0;
  2563. //
  2564. // No locks are acquired as this is only called during a bugcheck.
  2565. //
  2566. if ((MmTrackPtes & 0x1) == 0) {
  2567. return NULL;
  2568. }
  2569. if (MiTrackPtesAborted == TRUE) {
  2570. return NULL;
  2571. }
  2572. if (IsListEmpty(&MiPteHeader.ListHead)) {
  2573. return NULL;
  2574. }
  2575. if (PsLoadedModuleList.Flink == NULL) {
  2576. return NULL;
  2577. }
  2578. Highest = 0;
  2579. HighDataTableEntry = NULL;
  2580. NextEntry = PsLoadedModuleList.Flink;
  2581. while (NextEntry != &PsLoadedModuleList) {
  2582. DataTableEntry = CONTAINING_RECORD(NextEntry,
  2583. KLDR_DATA_TABLE_ENTRY,
  2584. InLoadOrderLinks);
  2585. PagesByThisModule = 0;
  2586. //
  2587. // Walk the PTE mapping list and update each driver's counts.
  2588. //
  2589. NextEntry2 = MiPteHeader.ListHead.Flink;
  2590. while (NextEntry2 != &MiPteHeader.ListHead) {
  2591. Tracker = (PPTE_TRACKER) CONTAINING_RECORD (NextEntry2,
  2592. PTE_TRACKER,
  2593. ListEntry.Flink);
  2594. BaseAddress = Tracker->CallingAddress;
  2595. NumberOfPages = Tracker->Count;
  2596. if ((BaseAddress >= DataTableEntry->DllBase) &&
  2597. (BaseAddress < (PVOID)((ULONG_PTR)(DataTableEntry->DllBase) + DataTableEntry->SizeOfImage))) {
  2598. PagesByThisModule += NumberOfPages;
  2599. }
  2600. NextEntry2 = NextEntry2->Flink;
  2601. }
  2602. if (PagesByThisModule > Highest) {
  2603. Highest = PagesByThisModule;
  2604. HighDataTableEntry = DataTableEntry;
  2605. }
  2606. NextEntry = NextEntry->Flink;
  2607. }
  2608. *NumberOfPtes = Highest;
  2609. return (PVOID)HighDataTableEntry;
  2610. }
  2611. MI_PFN_CACHE_ATTRIBUTE
  2612. MiInsertIoSpaceMap (
  2613. IN PVOID BaseVa,
  2614. IN PFN_NUMBER PageFrameIndex,
  2615. IN PFN_NUMBER NumberOfPages,
  2616. IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute
  2617. )
  2618. /*++
  2619. Routine Description:
  2620. This function inserts an I/O space tracking block, returning the cache
  2621. type the caller should use. The cache type is different from the input
  2622. cache type if an overlap collision is detected.
  2623. Arguments:
  2624. BaseVa - Supplies the virtual address that will be used for the mapping.
  2625. PageFrameIndex - Supplies the starting physical page number that will be
  2626. mapped.
  2627. NumberOfPages - Supplies the number of pages to map.
  2628. CacheAttribute - Supplies the caller's desired cache attribute.
  2629. Return Value:
  2630. The cache attribute that is safe to use.
  2631. Environment:
  2632. Kernel mode, DISPATCH_LEVEL or below.
  2633. --*/
  2634. {
  2635. KIRQL OldIrql;
  2636. PMMIO_TRACKER Tracker;
  2637. PMMIO_TRACKER Tracker2;
  2638. PLIST_ENTRY NextEntry;
  2639. ULONG Hash;
  2640. ASSERT (KeGetCurrentIrql() <= DISPATCH_LEVEL);
  2641. Tracker = ExAllocatePoolWithTag (NonPagedPool,
  2642. sizeof (MMIO_TRACKER),
  2643. 'ySmM');
  2644. if (Tracker == NULL) {
  2645. return MiNotMapped;
  2646. }
  2647. Tracker->BaseVa = BaseVa;
  2648. Tracker->PageFrameIndex = PageFrameIndex;
  2649. Tracker->NumberOfPages = NumberOfPages;
  2650. Tracker->CacheAttribute = CacheAttribute;
  2651. RtlZeroMemory (&Tracker->StackTrace[0], MI_IO_BACKTRACE_LENGTH * sizeof(PVOID));
  2652. RtlCaptureStackBackTrace (2, MI_IO_BACKTRACE_LENGTH, Tracker->StackTrace, &Hash);
  2653. ASSERT (!MI_IS_PFN (PageFrameIndex));
  2654. ExAcquireSpinLock (&MmIoTrackerLock, &OldIrql);
  2655. //
  2656. // Scan I/O space mappings for duplicate or overlapping entries.
  2657. //
  2658. NextEntry = MmIoHeader.Flink;
  2659. while (NextEntry != &MmIoHeader) {
  2660. Tracker2 = (PMMIO_TRACKER) CONTAINING_RECORD (NextEntry,
  2661. MMIO_TRACKER,
  2662. ListEntry.Flink);
  2663. if ((Tracker->PageFrameIndex < Tracker2->PageFrameIndex + Tracker2->NumberOfPages) &&
  2664. (Tracker->PageFrameIndex + Tracker->NumberOfPages > Tracker2->PageFrameIndex)) {
  2665. #if DBG
  2666. if ((MmShowMapOverlaps & 0x1) ||
  2667. ((Tracker->CacheAttribute != Tracker2->CacheAttribute) &&
  2668. (MmShowMapOverlaps & 0x2))) {
  2669. DbgPrint ("MM: Iospace mapping overlap %p %p\n",
  2670. Tracker,
  2671. Tracker2);
  2672. DbgPrint ("Physical range 0x%p->%p first mapped %s at VA %p\n",
  2673. Tracker2->PageFrameIndex << PAGE_SHIFT,
  2674. (Tracker2->PageFrameIndex + Tracker2->NumberOfPages) << PAGE_SHIFT,
  2675. MiCacheStrings[Tracker2->CacheAttribute],
  2676. Tracker2->BaseVa);
  2677. DbgPrint ("\tCall stack: %p %p %p %p %p %p\n",
  2678. Tracker2->StackTrace[0],
  2679. Tracker2->StackTrace[1],
  2680. Tracker2->StackTrace[2],
  2681. Tracker2->StackTrace[3],
  2682. Tracker2->StackTrace[4],
  2683. Tracker2->StackTrace[5]);
  2684. DbgPrint ("Physical range 0x%p->%p now being mapped %s at VA %p\n",
  2685. Tracker->PageFrameIndex << PAGE_SHIFT,
  2686. (Tracker->PageFrameIndex + Tracker->NumberOfPages) << PAGE_SHIFT,
  2687. MiCacheStrings[Tracker->CacheAttribute],
  2688. Tracker->BaseVa);
  2689. DbgPrint ("\tCall stack: %p %p %p %p %p %p\n",
  2690. Tracker->StackTrace[0],
  2691. Tracker->StackTrace[1],
  2692. Tracker->StackTrace[2],
  2693. Tracker->StackTrace[3],
  2694. Tracker->StackTrace[4],
  2695. Tracker->StackTrace[5]);
  2696. if (MmShowMapOverlaps & 0x80000000) {
  2697. DbgBreakPoint ();
  2698. }
  2699. }
  2700. #endif
  2701. if (Tracker->CacheAttribute != Tracker2->CacheAttribute) {
  2702. MiCacheOverride[3] += 1;
  2703. Tracker->CacheAttribute = Tracker2->CacheAttribute;
  2704. }
  2705. //
  2706. // Don't bother checking for overlapping multiple entries.
  2707. // This would be a very strange driver bug and is already
  2708. // caught by the verifier anyway.
  2709. //
  2710. }
  2711. NextEntry = Tracker2->ListEntry.Flink;
  2712. }
  2713. InsertHeadList (&MmIoHeader, &Tracker->ListEntry);
  2714. #if DBG
  2715. MmIoHeaderCount += NumberOfPages;
  2716. MmIoHeaderNumberOfEntries += 1;
  2717. if (MmIoHeaderNumberOfEntries > MmIoHeaderNumberOfEntriesPeak) {
  2718. MmIoHeaderNumberOfEntriesPeak = MmIoHeaderNumberOfEntries;
  2719. }
  2720. #endif
  2721. ExReleaseSpinLock (&MmIoTrackerLock, OldIrql);
  2722. return Tracker->CacheAttribute;
  2723. }
  2724. VOID
  2725. MiRemoveIoSpaceMap (
  2726. IN PVOID BaseVa,
  2727. IN PFN_NUMBER NumberOfPages
  2728. )
  2729. /*++
  2730. Routine Description:
  2731. This function removes an I/O space tracking block from the lists.
  2732. Arguments:
  2733. BaseVa - Supplies the virtual address that will be used for the unmapping.
  2734. NumberOfPages - Supplies the number of pages to unmap.
  2735. Return Value:
  2736. None.
  2737. Environment:
  2738. Kernel mode, DISPATCH_LEVEL or below.
  2739. --*/
  2740. {
  2741. KIRQL OldIrql;
  2742. PMMIO_TRACKER Tracker;
  2743. PLIST_ENTRY NextEntry;
  2744. PVOID AlignedVa;
  2745. AlignedVa = PAGE_ALIGN (BaseVa);
  2746. ExAcquireSpinLock (&MmIoTrackerLock, &OldIrql);
  2747. NextEntry = MmIoHeader.Flink;
  2748. while (NextEntry != &MmIoHeader) {
  2749. Tracker = (PMMIO_TRACKER) CONTAINING_RECORD (NextEntry,
  2750. MMIO_TRACKER,
  2751. ListEntry.Flink);
  2752. if ((PAGE_ALIGN (Tracker->BaseVa) == AlignedVa) &&
  2753. (Tracker->NumberOfPages == NumberOfPages)) {
  2754. RemoveEntryList (NextEntry);
  2755. #if DBG
  2756. MmIoHeaderCount -= NumberOfPages;
  2757. MmIoHeaderNumberOfEntries -= 1;
  2758. #endif
  2759. ExReleaseSpinLock (&MmIoTrackerLock, OldIrql);
  2760. ExFreePool (Tracker);
  2761. return;
  2762. }
  2763. NextEntry = Tracker->ListEntry.Flink;
  2764. }
  2765. //
  2766. // Can't unmap something that was never (or isn't currently) mapped.
  2767. //
  2768. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2769. 0x400,
  2770. (ULONG_PTR)BaseVa,
  2771. (ULONG_PTR)NumberOfPages,
  2772. 0);
  2773. }
  2774. PVOID
  2775. MiMapSinglePage (
  2776. IN PVOID VirtualAddress OPTIONAL,
  2777. IN PFN_NUMBER PageFrameIndex,
  2778. IN MEMORY_CACHING_TYPE CacheType,
  2779. IN MM_PAGE_PRIORITY Priority
  2780. )
  2781. /*++
  2782. Routine Description:
  2783. This function (re)maps a single system PTE to the specified physical page.
  2784. Arguments:
  2785. VirtualAddress - Supplies the virtual address to map the page frame at.
  2786. NULL indicates a system PTE is needed. Non-NULL supplies
  2787. the virtual address returned by an earlier
  2788. MiMapSinglePage call.
  2789. PageFrameIndex - Supplies the page frame index to map.
  2790. CacheType - Supplies the type of cache mapping to use for the MDL.
  2791. MmCached indicates "normal" kernel or user mappings.
  2792. Priority - Supplies an indication as to how important it is that this
  2793. request succeed under low available PTE conditions.
  2794. Return Value:
  2795. Returns the base address where the page is mapped, or NULL if the
  2796. mapping failed.
  2797. Environment:
  2798. Kernel mode. APC_LEVEL or below.
  2799. --*/
  2800. {
  2801. PMMPTE PointerPte;
  2802. MMPTE TempPte;
  2803. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  2804. PAGED_CODE ();
  2805. UNREFERENCED_PARAMETER (Priority);
  2806. //
  2807. // If this routine is ever changed to allow other than fully cachable
  2808. // requests then checks must be added for large page TB overlaps which
  2809. // can result in this function failing where it cannot today.
  2810. //
  2811. ASSERT (CacheType == MmCached);
  2812. if (VirtualAddress == NULL) {
  2813. PointerPte = MiReserveSystemPtes (1, SystemPteSpace);
  2814. if (PointerPte == NULL) {
  2815. //
  2816. // Not enough system PTES are available.
  2817. //
  2818. return NULL;
  2819. }
  2820. ASSERT (PointerPte->u.Hard.Valid == 0);
  2821. VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte);
  2822. }
  2823. else {
  2824. ASSERT (MI_IS_PHYSICAL_ADDRESS (VirtualAddress) == 0);
  2825. ASSERT (VirtualAddress >= MM_SYSTEM_RANGE_START);
  2826. PointerPte = MiGetPteAddress (VirtualAddress);
  2827. ASSERT (PointerPte->u.Hard.Valid == 1);
  2828. MI_WRITE_INVALID_PTE (PointerPte, ZeroPte);
  2829. KeFlushSingleTb (VirtualAddress, TRUE);
  2830. }
  2831. TempPte = ValidKernelPte;
  2832. CacheAttribute = MI_TRANSLATE_CACHETYPE (CacheType, 0);
  2833. switch (CacheAttribute) {
  2834. case MiNonCached:
  2835. MI_DISABLE_CACHING (TempPte);
  2836. break;
  2837. case MiCached:
  2838. break;
  2839. case MiWriteCombined:
  2840. MI_SET_PTE_WRITE_COMBINE (TempPte);
  2841. break;
  2842. default:
  2843. ASSERT (FALSE);
  2844. break;
  2845. }
  2846. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  2847. MI_PREPARE_FOR_NONCACHED (CacheAttribute);
  2848. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  2849. MI_SWEEP_CACHE (CacheAttribute, VirtualAddress, PAGE_SIZE);
  2850. return VirtualAddress;
  2851. }
  2852. PVOID
  2853. MmMapLockedPages (
  2854. IN PMDL MemoryDescriptorList,
  2855. IN KPROCESSOR_MODE AccessMode
  2856. )
  2857. /*++
  2858. Routine Description:
  2859. This function maps physical pages described by a memory descriptor
  2860. list into the system virtual address space or the user portion of
  2861. the virtual address space.
  2862. Arguments:
  2863. MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
  2864. been updated by MmProbeAndLockPages.
  2865. AccessMode - Supplies an indicator of where to map the pages;
  2866. KernelMode indicates that the pages should be mapped in the
  2867. system part of the address space, UserMode indicates the
  2868. pages should be mapped in the user part of the address space.
  2869. Return Value:
  2870. Returns the base address where the pages are mapped. The base address
  2871. has the same offset as the virtual address in the MDL.
  2872. This routine will raise an exception if the processor mode is USER_MODE
  2873. and quota limits or VM limits are exceeded.
  2874. Environment:
  2875. Kernel mode. DISPATCH_LEVEL or below if access mode is KernelMode,
  2876. APC_LEVEL or below if access mode is UserMode.
  2877. --*/
  2878. {
  2879. return MmMapLockedPagesSpecifyCache (MemoryDescriptorList,
  2880. AccessMode,
  2881. MmCached,
  2882. NULL,
  2883. TRUE,
  2884. HighPagePriority);
  2885. }
  2886. VOID
  2887. MiUnmapSinglePage (
  2888. IN PVOID VirtualAddress
  2889. )
  2890. /*++
  2891. Routine Description:
  2892. This routine unmaps a single locked page which was previously mapped via
  2893. an MiMapSinglePage call.
  2894. Arguments:
  2895. VirtualAddress - Supplies the virtual address used to map the page.
  2896. Return Value:
  2897. None.
  2898. Environment:
  2899. Kernel mode. APC_LEVEL or below, base address is within system space.
  2900. --*/
  2901. {
  2902. PMMPTE PointerPte;
  2903. PAGED_CODE ();
  2904. ASSERT (MI_IS_PHYSICAL_ADDRESS (VirtualAddress) == 0);
  2905. ASSERT (VirtualAddress >= MM_SYSTEM_RANGE_START);
  2906. PointerPte = MiGetPteAddress (VirtualAddress);
  2907. MiReleaseSystemPtes (PointerPte, 1, SystemPteSpace);
  2908. return;
  2909. }
  2910. PVOID
  2911. MmAllocateMappingAddress (
  2912. IN SIZE_T NumberOfBytes,
  2913. IN ULONG PoolTag
  2914. )
  2915. /*++
  2916. Routine Description:
  2917. This function allocates a system PTE mapping of the requested length
  2918. that can be used later to map arbitrary addresses.
  2919. Arguments:
  2920. NumberOfBytes - Supplies the maximum number of bytes the mapping can span.
  2921. PoolTag - Supplies a pool tag to associate this mapping to the caller.
  2922. Return Value:
  2923. Returns a virtual address where to use for later mappings.
  2924. Environment:
  2925. Kernel mode. PASSIVE_LEVEL.
  2926. --*/
  2927. {
  2928. PPFN_NUMBER Page;
  2929. PMMPTE PointerPte;
  2930. PVOID BaseVa;
  2931. PVOID CallingAddress;
  2932. PVOID CallersCaller;
  2933. PFN_NUMBER MdlHack[(sizeof(MDL)/sizeof(PFN_NUMBER)) + 1];
  2934. PMDL MemoryDescriptorList;
  2935. PFN_NUMBER NumberOfPages;
  2936. ASSERT (KeGetCurrentIrql () == PASSIVE_LEVEL);
  2937. //
  2938. // Make sure there are enough PTEs of the requested size.
  2939. // Try to ensure available PTEs inline when we're rich.
  2940. // Otherwise go the long way.
  2941. //
  2942. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (0, NumberOfBytes);
  2943. if (NumberOfPages == 0) {
  2944. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  2945. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2946. 0x100,
  2947. NumberOfPages,
  2948. PoolTag,
  2949. (ULONG_PTR) CallingAddress);
  2950. }
  2951. //
  2952. // Callers must identify themselves.
  2953. //
  2954. if (PoolTag == 0) {
  2955. return NULL;
  2956. }
  2957. //
  2958. // Leave space to stash the length and tag.
  2959. //
  2960. NumberOfPages += 2;
  2961. PointerPte = MiReserveSystemPtes ((ULONG)NumberOfPages, SystemPteSpace);
  2962. if (PointerPte == NULL) {
  2963. //
  2964. // Not enough system PTES are available.
  2965. //
  2966. return NULL;
  2967. }
  2968. //
  2969. // Make sure the valid bit is always zero in the stash PTEs.
  2970. //
  2971. *(PULONG_PTR)PointerPte = (NumberOfPages << 1);
  2972. PointerPte += 1;
  2973. *(PULONG_PTR)PointerPte = (PoolTag & ~0x1);
  2974. PointerPte += 1;
  2975. BaseVa = MiGetVirtualAddressMappedByPte (PointerPte);
  2976. if (MmTrackPtes & 0x1) {
  2977. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  2978. MemoryDescriptorList = (PMDL) MdlHack;
  2979. MemoryDescriptorList->MappedSystemVa = BaseVa;
  2980. MemoryDescriptorList->StartVa = (PVOID)(ULONG_PTR)PoolTag;
  2981. MemoryDescriptorList->ByteOffset = 0;
  2982. MemoryDescriptorList->ByteCount = (ULONG)((NumberOfPages - 2) * PAGE_SIZE);
  2983. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  2984. *Page = 0;
  2985. MiInsertPteTracker (MemoryDescriptorList,
  2986. 2,
  2987. FALSE,
  2988. MiCached,
  2989. CallingAddress,
  2990. CallersCaller);
  2991. }
  2992. return BaseVa;
  2993. }
  2994. VOID
  2995. MmFreeMappingAddress (
  2996. IN PVOID BaseAddress,
  2997. IN ULONG PoolTag
  2998. )
  2999. /*++
  3000. Routine Description:
  3001. This routine unmaps a virtual address range previously reserved with
  3002. MmAllocateMappingAddress.
  3003. Arguments:
  3004. BaseAddress - Supplies the base address previously reserved.
  3005. PoolTag - Supplies the caller's identifying tag.
  3006. Return Value:
  3007. None.
  3008. Environment:
  3009. Kernel mode. PASSIVE_LEVEL.
  3010. --*/
  3011. {
  3012. ULONG OriginalPoolTag;
  3013. PFN_NUMBER NumberOfPages;
  3014. PMMPTE PointerBase;
  3015. PMMPTE PointerPte;
  3016. PMMPTE LastPte;
  3017. ASSERT (KeGetCurrentIrql () == PASSIVE_LEVEL);
  3018. ASSERT (!MI_IS_PHYSICAL_ADDRESS (BaseAddress));
  3019. ASSERT (BaseAddress > MM_HIGHEST_USER_ADDRESS);
  3020. PointerPte = MiGetPteAddress (BaseAddress);
  3021. PointerBase = PointerPte - 2;
  3022. OriginalPoolTag = *(PULONG) (PointerPte - 1);
  3023. ASSERT ((OriginalPoolTag & 0x1) == 0);
  3024. if (OriginalPoolTag != (PoolTag & ~0x1)) {
  3025. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  3026. 0x101,
  3027. (ULONG_PTR)BaseAddress,
  3028. PoolTag,
  3029. OriginalPoolTag);
  3030. }
  3031. NumberOfPages = *(PULONG_PTR)PointerBase;
  3032. ASSERT ((NumberOfPages & 0x1) == 0);
  3033. NumberOfPages >>= 1;
  3034. if (NumberOfPages <= 2) {
  3035. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  3036. 0x102,
  3037. (ULONG_PTR)BaseAddress,
  3038. PoolTag,
  3039. NumberOfPages);
  3040. }
  3041. NumberOfPages -= 2;
  3042. LastPte = PointerPte + NumberOfPages;
  3043. while (PointerPte < LastPte) {
  3044. if (PointerPte->u.Long != 0) {
  3045. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  3046. 0x103,
  3047. (ULONG_PTR)BaseAddress,
  3048. PoolTag,
  3049. NumberOfPages);
  3050. }
  3051. PointerPte += 1;
  3052. }
  3053. if (MmTrackPtes & 0x1) {
  3054. MiRemovePteTracker (NULL, BaseAddress, NumberOfPages);
  3055. }
  3056. //
  3057. // Note the tag and size are nulled out when the PTEs are released below
  3058. // so any drivers that try to use their mapping after freeing it get
  3059. // caught immediately.
  3060. //
  3061. MiReleaseSystemPtes (PointerBase, (ULONG)NumberOfPages + 2, SystemPteSpace);
  3062. return;
  3063. }
  3064. PVOID
  3065. MmMapLockedPagesWithReservedMapping (
  3066. IN PVOID MappingAddress,
  3067. IN ULONG PoolTag,
  3068. IN PMDL MemoryDescriptorList,
  3069. IN MEMORY_CACHING_TYPE CacheType
  3070. )
  3071. /*++
  3072. Routine Description:
  3073. This function maps physical pages described by a memory descriptor
  3074. list into the system virtual address space.
  3075. Arguments:
  3076. MappingAddress - Supplies a valid mapping address obtained earlier via
  3077. MmAllocateMappingAddress.
  3078. PoolTag - Supplies the caller's identifying tag.
  3079. MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
  3080. been updated by MmProbeAndLockPages.
  3081. CacheType - Supplies the type of cache mapping to use for the MDL.
  3082. MmCached indicates "normal" kernel or user mappings.
  3083. Return Value:
  3084. Returns the base address where the pages are mapped. The base address
  3085. has the same offset as the virtual address in the MDL.
  3086. This routine will return NULL if the cache type requested is incompatible
  3087. with the pages being mapped or if the caller tries to map an MDL that is
  3088. larger than the virtual address range originally reserved.
  3089. Environment:
  3090. Kernel mode. DISPATCH_LEVEL or below. The caller must synchronize usage
  3091. of the argument virtual address space.
  3092. --*/
  3093. {
  3094. KIRQL OldIrql;
  3095. CSHORT IoMapping;
  3096. PFN_NUMBER NumberOfPages;
  3097. PFN_NUMBER VaPageSpan;
  3098. PFN_NUMBER SavedPageCount;
  3099. PPFN_NUMBER Page;
  3100. PMMPTE PointerBase;
  3101. PMMPTE PointerPte;
  3102. PMMPTE LastPte;
  3103. MMPTE TempPte;
  3104. PVOID StartingVa;
  3105. PFN_NUMBER NumberOfPtes;
  3106. PFN_NUMBER PageFrameIndex;
  3107. ULONG OriginalPoolTag;
  3108. PMMPFN Pfn2;
  3109. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  3110. ASSERT (KeGetCurrentIrql () <= DISPATCH_LEVEL);
  3111. StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
  3112. MemoryDescriptorList->ByteOffset);
  3113. ASSERT (MemoryDescriptorList->ByteCount != 0);
  3114. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
  3115. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  3116. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingVa,
  3117. MemoryDescriptorList->ByteCount);
  3118. PointerPte = MiGetPteAddress (MappingAddress);
  3119. PointerBase = PointerPte - 2;
  3120. OriginalPoolTag = *(PULONG) (PointerPte - 1);
  3121. ASSERT ((OriginalPoolTag & 0x1) == 0);
  3122. if (OriginalPoolTag != (PoolTag & ~0x1)) {
  3123. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  3124. 0x104,
  3125. (ULONG_PTR)MappingAddress,
  3126. PoolTag,
  3127. OriginalPoolTag);
  3128. }
  3129. VaPageSpan = *(PULONG_PTR)PointerBase;
  3130. ASSERT ((VaPageSpan & 0x1) == 0);
  3131. VaPageSpan >>= 1;
  3132. if (VaPageSpan <= 2) {
  3133. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  3134. 0x105,
  3135. (ULONG_PTR)MappingAddress,
  3136. PoolTag,
  3137. VaPageSpan);
  3138. }
  3139. if (NumberOfPages > VaPageSpan - 2) {
  3140. //
  3141. // The caller is trying to map an MDL that spans a range larger than
  3142. // the reserving mapping ! This is a driver bug.
  3143. //
  3144. ASSERT (FALSE);
  3145. return NULL;
  3146. }
  3147. //
  3148. // All the mapping PTEs must be zero.
  3149. //
  3150. LastPte = PointerPte + VaPageSpan - 2;
  3151. while (PointerPte < LastPte) {
  3152. if (PointerPte->u.Long != 0) {
  3153. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  3154. 0x107,
  3155. (ULONG_PTR)MappingAddress,
  3156. (ULONG_PTR)PointerPte,
  3157. (ULONG_PTR)LastPte);
  3158. }
  3159. PointerPte += 1;
  3160. }
  3161. PointerPte = PointerBase + 2;
  3162. SavedPageCount = NumberOfPages;
  3163. ASSERT ((MemoryDescriptorList->MdlFlags & (
  3164. MDL_MAPPED_TO_SYSTEM_VA |
  3165. MDL_SOURCE_IS_NONPAGED_POOL |
  3166. MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
  3167. ASSERT ((MemoryDescriptorList->MdlFlags & (
  3168. MDL_PAGES_LOCKED |
  3169. MDL_PARTIAL)) != 0);
  3170. //
  3171. // If a noncachable mapping is requested, none of the pages in the
  3172. // requested MDL can reside in a large page. Otherwise we would be
  3173. // creating an incoherent overlapping TB entry as the same physical
  3174. // page would be mapped by 2 different TB entries with different
  3175. // cache attributes.
  3176. //
  3177. IoMapping = MemoryDescriptorList->MdlFlags & MDL_IO_SPACE;
  3178. CacheAttribute = MI_TRANSLATE_CACHETYPE (CacheType, IoMapping);
  3179. if (CacheAttribute != MiCached) {
  3180. LOCK_PFN2 (OldIrql);
  3181. do {
  3182. if (*Page == MM_EMPTY_LIST) {
  3183. break;
  3184. }
  3185. PageFrameIndex = *Page;
  3186. if (MI_PAGE_FRAME_INDEX_MUST_BE_CACHED (PageFrameIndex)) {
  3187. UNLOCK_PFN2 (OldIrql);
  3188. MiNonCachedCollisions += 1;
  3189. return NULL;
  3190. }
  3191. Page += 1;
  3192. NumberOfPages -= 1;
  3193. } while (NumberOfPages != 0);
  3194. UNLOCK_PFN2 (OldIrql);
  3195. NumberOfPages = SavedPageCount;
  3196. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  3197. MI_PREPARE_FOR_NONCACHED (CacheAttribute);
  3198. }
  3199. NumberOfPtes = NumberOfPages;
  3200. TempPte = ValidKernelPte;
  3201. MI_ADD_EXECUTE_TO_VALID_PTE_IF_PAE (TempPte);
  3202. switch (CacheAttribute) {
  3203. case MiNonCached:
  3204. MI_DISABLE_CACHING (TempPte);
  3205. break;
  3206. case MiCached:
  3207. break;
  3208. case MiWriteCombined:
  3209. MI_SET_PTE_WRITE_COMBINE (TempPte);
  3210. break;
  3211. default:
  3212. ASSERT (FALSE);
  3213. break;
  3214. }
  3215. OldIrql = HIGH_LEVEL;
  3216. do {
  3217. if (*Page == MM_EMPTY_LIST) {
  3218. break;
  3219. }
  3220. ASSERT (PointerPte->u.Hard.Valid == 0);
  3221. if (IoMapping == 0) {
  3222. Pfn2 = MI_PFN_ELEMENT (*Page);
  3223. ASSERT (Pfn2->u3.e2.ReferenceCount != 0);
  3224. TempPte = ValidKernelPte;
  3225. switch (Pfn2->u3.e1.CacheAttribute) {
  3226. case MiCached:
  3227. if (CacheAttribute != MiCached) {
  3228. //
  3229. // The caller asked for a noncached or writecombined
  3230. // mapping, but the page is already mapped cached by
  3231. // someone else. Override the caller's request in
  3232. // order to keep the TB page attribute coherent.
  3233. //
  3234. MiCacheOverride[0] += 1;
  3235. }
  3236. break;
  3237. case MiNonCached:
  3238. if (CacheAttribute != MiNonCached) {
  3239. //
  3240. // The caller asked for a cached or writecombined
  3241. // mapping, but the page is already mapped noncached
  3242. // by someone else. Override the caller's request
  3243. // in order to keep the TB page attribute coherent.
  3244. //
  3245. MiCacheOverride[1] += 1;
  3246. }
  3247. MI_DISABLE_CACHING (TempPte);
  3248. break;
  3249. case MiWriteCombined:
  3250. if (CacheAttribute != MiWriteCombined) {
  3251. //
  3252. // The caller asked for a cached or noncached
  3253. // mapping, but the page is already mapped
  3254. // writecombined by someone else. Override the
  3255. // caller's request in order to keep the TB page
  3256. // attribute coherent.
  3257. //
  3258. MiCacheOverride[2] += 1;
  3259. }
  3260. MI_SET_PTE_WRITE_COMBINE (TempPte);
  3261. break;
  3262. case MiNotMapped:
  3263. //
  3264. // This better be for a page allocated with
  3265. // MmAllocatePagesForMdl. Otherwise it might be a
  3266. // page on the freelist which could subsequently be
  3267. // given out with a different attribute !
  3268. //
  3269. ASSERT ((Pfn2->u4.PteFrame == MI_MAGIC_AWE_PTEFRAME) ||
  3270. (Pfn2->PteAddress == (PVOID) (ULONG_PTR)(X64K | 0x1)));
  3271. if (OldIrql == HIGH_LEVEL) {
  3272. LOCK_PFN2 (OldIrql);
  3273. }
  3274. switch (CacheAttribute) {
  3275. case MiCached:
  3276. Pfn2->u3.e1.CacheAttribute = MiCached;
  3277. break;
  3278. case MiNonCached:
  3279. Pfn2->u3.e1.CacheAttribute = MiNonCached;
  3280. MI_DISABLE_CACHING (TempPte);
  3281. break;
  3282. case MiWriteCombined:
  3283. Pfn2->u3.e1.CacheAttribute = MiWriteCombined;
  3284. MI_SET_PTE_WRITE_COMBINE (TempPte);
  3285. break;
  3286. default:
  3287. ASSERT (FALSE);
  3288. break;
  3289. }
  3290. break;
  3291. default:
  3292. ASSERT (FALSE);
  3293. break;
  3294. }
  3295. }
  3296. TempPte.u.Hard.PageFrameNumber = *Page;
  3297. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  3298. Page += 1;
  3299. PointerPte += 1;
  3300. NumberOfPages -= 1;
  3301. } while (NumberOfPages != 0);
  3302. if (OldIrql != HIGH_LEVEL) {
  3303. UNLOCK_PFN2 (OldIrql);
  3304. }
  3305. MI_SWEEP_CACHE (CacheAttribute, MappingAddress, SavedPageCount * PAGE_SIZE);
  3306. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
  3307. MemoryDescriptorList->MappedSystemVa = MappingAddress;
  3308. MemoryDescriptorList->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
  3309. if ((MemoryDescriptorList->MdlFlags & MDL_PARTIAL) != 0) {
  3310. MemoryDescriptorList->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
  3311. }
  3312. MappingAddress = (PVOID)((PCHAR)MappingAddress + MemoryDescriptorList->ByteOffset);
  3313. return MappingAddress;
  3314. }
  3315. VOID
  3316. MmUnmapReservedMapping (
  3317. IN PVOID BaseAddress,
  3318. IN ULONG PoolTag,
  3319. IN PMDL MemoryDescriptorList
  3320. )
  3321. /*++
  3322. Routine Description:
  3323. This routine unmaps locked pages which were previously mapped via
  3324. a MmMapLockedPagesWithReservedMapping call.
  3325. Arguments:
  3326. BaseAddress - Supplies the base address where the pages were previously
  3327. mapped.
  3328. PoolTag - Supplies the caller's identifying tag.
  3329. MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
  3330. been updated by MmProbeAndLockPages.
  3331. Return Value:
  3332. None.
  3333. Environment:
  3334. Kernel mode. DISPATCH_LEVEL or below. The caller must synchronize usage
  3335. of the argument virtual address space.
  3336. --*/
  3337. {
  3338. ULONG OriginalPoolTag;
  3339. PFN_NUMBER NumberOfPages;
  3340. PFN_NUMBER ExtraPages;
  3341. PFN_NUMBER VaPageSpan;
  3342. PMMPTE PointerBase;
  3343. PMMPTE LastPte;
  3344. PMMPTE LastMdlPte;
  3345. PVOID StartingVa;
  3346. PVOID VaFlushList[MM_MAXIMUM_FLUSH_COUNT];
  3347. PMMPTE PointerPte;
  3348. PFN_NUMBER i;
  3349. PPFN_NUMBER Page;
  3350. PPFN_NUMBER LastCurrentPage;
  3351. ASSERT (KeGetCurrentIrql () <= DISPATCH_LEVEL);
  3352. ASSERT (MemoryDescriptorList->ByteCount != 0);
  3353. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) != 0);
  3354. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
  3355. ASSERT (!MI_IS_PHYSICAL_ADDRESS (BaseAddress));
  3356. ASSERT (BaseAddress > MM_HIGHEST_USER_ADDRESS);
  3357. PointerPte = MiGetPteAddress (BaseAddress);
  3358. PointerBase = PointerPte - 2;
  3359. OriginalPoolTag = *(PULONG) (PointerPte - 1);
  3360. ASSERT ((OriginalPoolTag & 0x1) == 0);
  3361. if (OriginalPoolTag != (PoolTag & ~0x1)) {
  3362. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  3363. 0x108,
  3364. (ULONG_PTR)BaseAddress,
  3365. PoolTag,
  3366. OriginalPoolTag);
  3367. }
  3368. VaPageSpan = *(PULONG_PTR)PointerBase;
  3369. ASSERT ((VaPageSpan & 0x1) == 0);
  3370. VaPageSpan >>= 1;
  3371. if (VaPageSpan <= 2) {
  3372. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  3373. 0x109,
  3374. (ULONG_PTR)BaseAddress,
  3375. PoolTag,
  3376. VaPageSpan);
  3377. }
  3378. StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
  3379. MemoryDescriptorList->ByteOffset);
  3380. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingVa,
  3381. MemoryDescriptorList->ByteCount);
  3382. if (NumberOfPages > VaPageSpan - 2) {
  3383. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  3384. 0x10A,
  3385. (ULONG_PTR)BaseAddress,
  3386. VaPageSpan,
  3387. NumberOfPages);
  3388. }
  3389. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  3390. LastCurrentPage = Page + NumberOfPages;
  3391. if (MemoryDescriptorList->MdlFlags & MDL_FREE_EXTRA_PTES) {
  3392. ExtraPages = *(Page + NumberOfPages);
  3393. ASSERT (ExtraPages <= MiCurrentAdvancedPages);
  3394. ASSERT (NumberOfPages + ExtraPages <= VaPageSpan - 2);
  3395. NumberOfPages += ExtraPages;
  3396. #if DBG
  3397. InterlockedExchangeAddSizeT (&MiCurrentAdvancedPages, 0 - ExtraPages);
  3398. MiAdvancesFreed += ExtraPages;
  3399. #endif
  3400. }
  3401. LastMdlPte = PointerPte + NumberOfPages;
  3402. LastPte = PointerPte + VaPageSpan - 2;
  3403. //
  3404. // The range described by the argument MDL must be mapped.
  3405. //
  3406. while (PointerPte < LastMdlPte) {
  3407. if (PointerPte->u.Hard.Valid == 0) {
  3408. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  3409. 0x10B,
  3410. (ULONG_PTR)BaseAddress,
  3411. PoolTag,
  3412. NumberOfPages);
  3413. }
  3414. #if DBG
  3415. ASSERT ((*Page == MI_GET_PAGE_FRAME_FROM_PTE (PointerPte)) ||
  3416. (MemoryDescriptorList->MdlFlags & MDL_FREE_EXTRA_PTES));
  3417. if (((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0) &&
  3418. (Page < LastCurrentPage)) {
  3419. PMMPFN Pfn3;
  3420. Pfn3 = MI_PFN_ELEMENT (*Page);
  3421. ASSERT (Pfn3->u3.e2.ReferenceCount != 0);
  3422. }
  3423. Page += 1;
  3424. #endif
  3425. PointerPte += 1;
  3426. }
  3427. //
  3428. // The range past the argument MDL must be unmapped.
  3429. //
  3430. while (PointerPte < LastPte) {
  3431. if (PointerPte->u.Long != 0) {
  3432. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  3433. 0x10C,
  3434. (ULONG_PTR)BaseAddress,
  3435. PoolTag,
  3436. NumberOfPages);
  3437. }
  3438. PointerPte += 1;
  3439. }
  3440. MiZeroMemoryPte (PointerBase + 2, NumberOfPages);
  3441. if (NumberOfPages == 1) {
  3442. KeFlushSingleTb (BaseAddress, TRUE);
  3443. }
  3444. else if (NumberOfPages < MM_MAXIMUM_FLUSH_COUNT) {
  3445. for (i = 0; i < NumberOfPages; i += 1) {
  3446. VaFlushList[i] = BaseAddress;
  3447. BaseAddress = (PVOID)((PCHAR)BaseAddress + PAGE_SIZE);
  3448. }
  3449. KeFlushMultipleTb ((ULONG)NumberOfPages, &VaFlushList[0], TRUE);
  3450. }
  3451. else {
  3452. KeFlushEntireTb (TRUE, TRUE);
  3453. }
  3454. MemoryDescriptorList->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
  3455. MDL_PARTIAL_HAS_BEEN_MAPPED);
  3456. return;
  3457. }
  3458. NTKERNELAPI
  3459. NTSTATUS
  3460. MmAdvanceMdl (
  3461. IN PMDL Mdl,
  3462. IN ULONG NumberOfBytes
  3463. )
  3464. /*++
  3465. Routine Description:
  3466. This routine takes the specified MDL and "advances" it forward
  3467. by the specified number of bytes. If this causes the MDL to advance
  3468. past the initial page, the pages that are advanced over are immediately
  3469. unlocked and the system VA that maps the MDL is also adjusted (along
  3470. with the user address).
  3471. WARNING ! WARNING ! WARNING !
  3472. This means the caller MUST BE AWARE that the "advanced" pages are
  3473. immediately reused and therefore MUST NOT BE REFERENCED by the caller
  3474. once this routine has been called. Likewise the virtual address as
  3475. that is also being adjusted here.
  3476. Even if the caller has statically allocated this MDL on his local stack,
  3477. he cannot use more than the space currently described by the MDL on return
  3478. from this routine unless he first unmaps the MDL (if it was mapped).
  3479. Otherwise the system PTE lists will be corrupted.
  3480. Arguments:
  3481. MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
  3482. been updated by MmProbeAndLockPages.
  3483. NumberOfBytes - The number of bytes to advance the MDL by.
  3484. Return Value:
  3485. NTSTATUS.
  3486. --*/
  3487. {
  3488. ULONG i;
  3489. ULONG PageCount;
  3490. ULONG FreeBit;
  3491. ULONG Slush;
  3492. KIRQL OldIrql;
  3493. PPFN_NUMBER Page;
  3494. PPFN_NUMBER NewPage;
  3495. ULONG OffsetPages;
  3496. PEPROCESS Process;
  3497. PMMPFN Pfn1;
  3498. CSHORT MdlFlags;
  3499. PVOID StartingVa;
  3500. PFN_NUMBER NumberOfPages;
  3501. ASSERT (KeGetCurrentIrql () <= DISPATCH_LEVEL);
  3502. ASSERT (Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_SOURCE_IS_NONPAGED_POOL));
  3503. ASSERT (BYTE_OFFSET (Mdl->StartVa) == 0);
  3504. //
  3505. // Disallow advancement past the end of the MDL.
  3506. //
  3507. if (NumberOfBytes >= Mdl->ByteCount) {
  3508. return STATUS_INVALID_PARAMETER_2;
  3509. }
  3510. PageCount = 0;
  3511. MiMdlsAdjusted = TRUE;
  3512. StartingVa = (PVOID)((PCHAR)Mdl->StartVa + Mdl->ByteOffset);
  3513. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartingVa, Mdl->ByteCount);
  3514. if (Mdl->ByteOffset != 0) {
  3515. Slush = PAGE_SIZE - Mdl->ByteOffset;
  3516. if (NumberOfBytes < Slush) {
  3517. Mdl->ByteCount -= NumberOfBytes;
  3518. Mdl->ByteOffset += NumberOfBytes;
  3519. //
  3520. // StartVa never includes the byte offset (it's always page-aligned)
  3521. // so don't adjust it here. MappedSystemVa does include byte
  3522. // offsets so do adjust that.
  3523. //
  3524. if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
  3525. Mdl->MappedSystemVa = (PVOID) ((PCHAR)Mdl->MappedSystemVa + NumberOfBytes);
  3526. }
  3527. return STATUS_SUCCESS;
  3528. }
  3529. NumberOfBytes -= Slush;
  3530. Mdl->StartVa = (PVOID) ((PCHAR)Mdl->StartVa + PAGE_SIZE);
  3531. Mdl->ByteOffset = 0;
  3532. Mdl->ByteCount -= Slush;
  3533. if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
  3534. Mdl->MappedSystemVa = (PVOID) ((PCHAR)Mdl->MappedSystemVa + Slush);
  3535. }
  3536. //
  3537. // Up the number of pages (and addresses) that need to slide.
  3538. //
  3539. PageCount += 1;
  3540. }
  3541. //
  3542. // The MDL start is now nicely page aligned. Make sure there's still
  3543. // data left in it (we may have finished it off above), then operate on it.
  3544. //
  3545. if (NumberOfBytes != 0) {
  3546. Mdl->ByteCount -= NumberOfBytes;
  3547. Mdl->ByteOffset = BYTE_OFFSET (NumberOfBytes);
  3548. OffsetPages = NumberOfBytes >> PAGE_SHIFT;
  3549. Mdl->StartVa = (PVOID) ((PCHAR)Mdl->StartVa + (OffsetPages << PAGE_SHIFT));
  3550. PageCount += OffsetPages;
  3551. if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
  3552. Mdl->MappedSystemVa = (PVOID) ((PCHAR)Mdl->MappedSystemVa +
  3553. (OffsetPages << PAGE_SHIFT) +
  3554. Mdl->ByteOffset);
  3555. }
  3556. }
  3557. ASSERT (PageCount <= NumberOfPages);
  3558. if (PageCount != 0) {
  3559. //
  3560. // Slide the page frame numbers forward decrementing reference counts
  3561. // on the ones that are released. Then adjust the mapped system VA
  3562. // (if there is one) to reflect the current frame. Note that the TB
  3563. // does not need to be flushed due to the careful sliding and when
  3564. // the MDL is finally completely unmapped, the extra information
  3565. // added to the MDL here is used to free the entire original PTE
  3566. // mapping range in one chunk so as not to fragment the PTE space.
  3567. //
  3568. Page = (PPFN_NUMBER)(Mdl + 1);
  3569. NewPage = Page;
  3570. Process = Mdl->Process;
  3571. MdlFlags = Mdl->MdlFlags;
  3572. if (Process != NULL) {
  3573. if ((MdlFlags & MDL_PAGES_LOCKED) &&
  3574. ((MdlFlags & MDL_IO_SPACE) == 0)) {
  3575. ASSERT ((MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
  3576. ASSERT ((SPFN_NUMBER)Process->NumberOfLockedPages >= 0);
  3577. InterlockedExchangeAddSizeT (&Process->NumberOfLockedPages,
  3578. 0 - PageCount);
  3579. }
  3580. if (MmTrackLockedPages == TRUE) {
  3581. MiUpdateMdlTracker (Mdl, PageCount);
  3582. }
  3583. }
  3584. LOCK_PFN2 (OldIrql);
  3585. for (i = 0; i < PageCount; i += 1) {
  3586. //
  3587. // Decrement the stale page frames now, this will unlock them
  3588. // resulting in them being immediately reused if necessary.
  3589. //
  3590. if ((MdlFlags & MDL_PAGES_LOCKED) &&
  3591. ((MdlFlags & MDL_IO_SPACE) == 0)) {
  3592. ASSERT ((MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
  3593. Pfn1 = MI_PFN_ELEMENT (*Page);
  3594. if (MdlFlags & MDL_WRITE_OPERATION) {
  3595. //
  3596. // If this was a write operation set the modified bit
  3597. // in the PFN database.
  3598. //
  3599. MI_SET_MODIFIED (Pfn1, 1, 0x3);
  3600. if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
  3601. (Pfn1->u3.e1.WriteInProgress == 0)) {
  3602. FreeBit = GET_PAGING_FILE_OFFSET (Pfn1->OriginalPte);
  3603. if ((FreeBit != 0) && (FreeBit != MI_PTE_LOOKUP_NEEDED)) {
  3604. MiReleaseConfirmedPageFileSpace (Pfn1->OriginalPte);
  3605. Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
  3606. }
  3607. }
  3608. }
  3609. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF(Pfn1, 1);
  3610. }
  3611. Page += 1;
  3612. }
  3613. UNLOCK_PFN2 (OldIrql);
  3614. //
  3615. // Now ripple the remaining pages to the front of the MDL, effectively
  3616. // purging the old ones which have just been released.
  3617. //
  3618. ASSERT (i < NumberOfPages);
  3619. for ( ; i < NumberOfPages; i += 1) {
  3620. if (*Page == MM_EMPTY_LIST) {
  3621. break;
  3622. }
  3623. *NewPage = *Page;
  3624. NewPage += 1;
  3625. Page += 1;
  3626. }
  3627. //
  3628. // If the MDL has been mapped, stash the number of pages advanced
  3629. // at the end of the frame list inside the MDL and mark the MDL as
  3630. // containing extra PTEs to free. Thus when the MDL is finally
  3631. // completely unmapped, this can be used so the entire original PTE
  3632. // mapping range can be freed in one chunk so as not to fragment the
  3633. // PTE space.
  3634. //
  3635. if (MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
  3636. #if DBG
  3637. InterlockedExchangeAddSizeT (&MiCurrentAdvancedPages, PageCount);
  3638. MiAdvancesGiven += PageCount;
  3639. #endif
  3640. if (MdlFlags & MDL_FREE_EXTRA_PTES) {
  3641. //
  3642. // This MDL has already been advanced at least once. Any
  3643. // PTEs from those advancements need to be preserved now.
  3644. //
  3645. ASSERT (*Page <= MiCurrentAdvancedPages - PageCount);
  3646. PageCount += *(PULONG)Page;
  3647. }
  3648. else {
  3649. Mdl->MdlFlags |= MDL_FREE_EXTRA_PTES;
  3650. }
  3651. *NewPage = PageCount;
  3652. }
  3653. }
  3654. return STATUS_SUCCESS;
  3655. }
  3656. NTKERNELAPI
  3657. NTSTATUS
  3658. MmProtectMdlSystemAddress (
  3659. IN PMDL MemoryDescriptorList,
  3660. IN ULONG NewProtect
  3661. )
  3662. /*++
  3663. Routine Description:
  3664. This function protects the system address range specified
  3665. by the argument Memory Descriptor List.
  3666. Note the caller must make this MDL mapping readwrite before finally
  3667. freeing (or reusing) it.
  3668. Arguments:
  3669. MemoryDescriptorList - Supplies the MDL describing the virtual range.
  3670. NewProtect - Supplies the protection to set the pages to (PAGE_XX).
  3671. Return Value:
  3672. NTSTATUS.
  3673. Environment:
  3674. Kernel mode, IRQL DISPATCH_LEVEL or below. The caller is responsible for
  3675. synchronizing access to this routine.
  3676. --*/
  3677. {
  3678. KIRQL OldIrql;
  3679. PVOID BaseAddress;
  3680. PVOID SystemVa;
  3681. MMPTE PteContents;
  3682. PMMPTE PointerPte;
  3683. ULONG ProtectionMask;
  3684. #if DBG
  3685. PMMPFN Pfn1;
  3686. PPFN_NUMBER Page;
  3687. #endif
  3688. PFN_NUMBER PageFrameIndex;
  3689. PFN_NUMBER NumberOfPages;
  3690. MMPTE_FLUSH_LIST PteFlushList;
  3691. MMPTE OriginalPte;
  3692. LOGICAL WasValid;
  3693. PMM_PTE_MAPPING Map;
  3694. PMM_PTE_MAPPING MapEntry;
  3695. PMM_PTE_MAPPING FoundMap;
  3696. PLIST_ENTRY NextEntry;
  3697. ASSERT (KeGetCurrentIrql () <= DISPATCH_LEVEL);
  3698. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PAGES_LOCKED) != 0);
  3699. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
  3700. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PARTIAL) == 0);
  3701. ASSERT (MemoryDescriptorList->ByteCount != 0);
  3702. if ((MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0) {
  3703. return STATUS_NOT_MAPPED_VIEW;
  3704. }
  3705. BaseAddress = MemoryDescriptorList->MappedSystemVa;
  3706. ASSERT (BaseAddress > MM_HIGHEST_USER_ADDRESS);
  3707. ASSERT (!MI_IS_PHYSICAL_ADDRESS (BaseAddress));
  3708. ProtectionMask = MiMakeProtectionMask (NewProtect);
  3709. //
  3710. // No bogus or copy-on-write protections allowed for these.
  3711. //
  3712. if ((ProtectionMask == MM_INVALID_PROTECTION) ||
  3713. (ProtectionMask == MM_GUARD_PAGE) ||
  3714. (ProtectionMask == MM_DECOMMIT) ||
  3715. (ProtectionMask == MM_NOCACHE) ||
  3716. (ProtectionMask == MM_WRITECOPY) ||
  3717. (ProtectionMask == MM_EXECUTE_WRITECOPY)) {
  3718. return STATUS_INVALID_PAGE_PROTECTION;
  3719. }
  3720. PointerPte = MiGetPteAddress (BaseAddress);
  3721. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (BaseAddress,
  3722. MemoryDescriptorList->ByteCount);
  3723. SystemVa = PAGE_ALIGN (BaseAddress);
  3724. //
  3725. // Initializing Map is not needed for correctness
  3726. // but without it the compiler cannot compile this code
  3727. // W4 to check for use of uninitialized variables.
  3728. //
  3729. Map = NULL;
  3730. if (ProtectionMask != MM_READWRITE) {
  3731. Map = ExAllocatePoolWithTag (NonPagedPool,
  3732. sizeof(MM_PTE_MAPPING),
  3733. 'mPmM');
  3734. if (Map == NULL) {
  3735. return STATUS_INSUFFICIENT_RESOURCES;
  3736. }
  3737. Map->SystemVa = SystemVa;
  3738. Map->SystemEndVa = (PVOID)((ULONG_PTR)SystemVa + (NumberOfPages << PAGE_SHIFT));
  3739. Map->Protection = ProtectionMask;
  3740. }
  3741. #if DBG
  3742. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  3743. #endif
  3744. PteFlushList.Count = 0;
  3745. while (NumberOfPages != 0) {
  3746. PteContents = *PointerPte;
  3747. if (PteContents.u.Hard.Valid == 1) {
  3748. WasValid = TRUE;
  3749. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents);
  3750. OriginalPte = PteContents;
  3751. }
  3752. else if ((PteContents.u.Soft.Transition == 1) &&
  3753. (PteContents.u.Soft.Protection == MM_NOACCESS)) {
  3754. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&PteContents);
  3755. WasValid = FALSE;
  3756. #if defined(_IA64_)
  3757. OriginalPte.u.Hard.Cache = PteContents.u.Trans.Rsvd0;
  3758. #else
  3759. OriginalPte.u.Hard.WriteThrough = PteContents.u.Soft.PageFileLow;
  3760. OriginalPte.u.Hard.CacheDisable = (PteContents.u.Soft.PageFileLow >> 1);
  3761. #endif
  3762. }
  3763. else {
  3764. KeBugCheckEx (MEMORY_MANAGEMENT,
  3765. 0x1235,
  3766. (ULONG_PTR)MemoryDescriptorList,
  3767. (ULONG_PTR)PointerPte,
  3768. (ULONG_PTR)PteContents.u.Long);
  3769. }
  3770. #if DBG
  3771. ASSERT (*Page == PageFrameIndex);
  3772. if ((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0) {
  3773. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  3774. ASSERT (Pfn1->u3.e2.ReferenceCount != 0);
  3775. }
  3776. Page += 1;
  3777. #endif
  3778. if (ProtectionMask == MM_NOACCESS) {
  3779. //
  3780. // To generate a bugcheck on bogus access: Prototype must stay
  3781. // clear, transition must stay set, protection must stay NO_ACCESS.
  3782. //
  3783. MI_MAKE_VALID_PTE_TRANSITION (PteContents, MM_NOACCESS);
  3784. //
  3785. // Stash the cache attributes into the software PTE so they can
  3786. // be restored later.
  3787. //
  3788. #if defined(_IA64_)
  3789. PteContents.u.Trans.Rsvd0 = OriginalPte.u.Hard.Cache;
  3790. #else
  3791. PteContents.u.Soft.PageFileLow = OriginalPte.u.Hard.WriteThrough;
  3792. PteContents.u.Soft.PageFileLow |= (OriginalPte.u.Hard.CacheDisable << 1);
  3793. #endif
  3794. MI_WRITE_INVALID_PTE (PointerPte, PteContents);
  3795. }
  3796. else {
  3797. MI_MAKE_VALID_PTE (PteContents,
  3798. PageFrameIndex,
  3799. ProtectionMask,
  3800. PointerPte);
  3801. if (ProtectionMask & MM_READWRITE) {
  3802. MI_SET_PTE_DIRTY (PteContents);
  3803. }
  3804. //
  3805. // Extract cache type from the original PTE so it can be preserved.
  3806. // Note that since we only allow protection changes (not caching
  3807. // attribute changes), there is no need to flush or sweep TBs on
  3808. // insertion below.
  3809. //
  3810. #if defined(_IA64_)
  3811. PteContents.u.Hard.Cache = OriginalPte.u.Hard.Cache;
  3812. #else
  3813. PteContents.u.Hard.WriteThrough = OriginalPte.u.Hard.WriteThrough;
  3814. PteContents.u.Hard.CacheDisable = OriginalPte.u.Hard.CacheDisable;
  3815. #endif
  3816. if (WasValid == TRUE) {
  3817. MI_WRITE_VALID_PTE_NEW_PROTECTION (PointerPte, PteContents);
  3818. }
  3819. else {
  3820. MI_WRITE_VALID_PTE (PointerPte, PteContents);
  3821. }
  3822. }
  3823. if ((WasValid == TRUE) &&
  3824. (PteFlushList.Count != MM_MAXIMUM_FLUSH_COUNT)) {
  3825. PteFlushList.FlushVa[PteFlushList.Count] = BaseAddress;
  3826. PteFlushList.Count += 1;
  3827. }
  3828. BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
  3829. PointerPte += 1;
  3830. NumberOfPages -= 1;
  3831. }
  3832. //
  3833. // Flush the TB entries for any relevant pages.
  3834. //
  3835. if (PteFlushList.Count != 0) {
  3836. MiFlushPteList (&PteFlushList, TRUE);
  3837. }
  3838. if (ProtectionMask != MM_READWRITE) {
  3839. //
  3840. // Insert (or update) the list entry describing this range.
  3841. // Don't bother sorting the list as there will never be many entries.
  3842. //
  3843. FoundMap = NULL;
  3844. OldIrql = KeAcquireSpinLockRaiseToSynch (&MmProtectedPteLock);
  3845. NextEntry = MmProtectedPteList.Flink;
  3846. while (NextEntry != &MmProtectedPteList) {
  3847. MapEntry = CONTAINING_RECORD (NextEntry,
  3848. MM_PTE_MAPPING,
  3849. ListEntry);
  3850. if (MapEntry->SystemVa == SystemVa) {
  3851. ASSERT (MapEntry->SystemEndVa == Map->SystemEndVa);
  3852. MapEntry->Protection = Map->Protection;
  3853. FoundMap = MapEntry;
  3854. break;
  3855. }
  3856. NextEntry = NextEntry->Flink;
  3857. }
  3858. if (FoundMap == NULL) {
  3859. InsertHeadList (&MmProtectedPteList, &Map->ListEntry);
  3860. }
  3861. KeReleaseSpinLock (&MmProtectedPteLock, OldIrql);
  3862. if (FoundMap != NULL) {
  3863. ExFreePool (Map);
  3864. }
  3865. }
  3866. else {
  3867. //
  3868. // If there is an existing list entry describing this range, remove it.
  3869. //
  3870. if (!IsListEmpty (&MmProtectedPteList)) {
  3871. FoundMap = NULL;
  3872. OldIrql = KeAcquireSpinLockRaiseToSynch (&MmProtectedPteLock);
  3873. NextEntry = MmProtectedPteList.Flink;
  3874. while (NextEntry != &MmProtectedPteList) {
  3875. MapEntry = CONTAINING_RECORD (NextEntry,
  3876. MM_PTE_MAPPING,
  3877. ListEntry);
  3878. if (MapEntry->SystemVa == SystemVa) {
  3879. RemoveEntryList (NextEntry);
  3880. FoundMap = MapEntry;
  3881. break;
  3882. }
  3883. NextEntry = NextEntry->Flink;
  3884. }
  3885. KeReleaseSpinLock (&MmProtectedPteLock, OldIrql);
  3886. if (FoundMap != NULL) {
  3887. ExFreePool (FoundMap);
  3888. }
  3889. }
  3890. }
  3891. ASSERT (MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
  3892. return STATUS_SUCCESS;
  3893. }
  3894. LOGICAL
  3895. MiCheckSystemPteProtection (
  3896. IN ULONG_PTR StoreInstruction,
  3897. IN PVOID VirtualAddress
  3898. )
  3899. /*++
  3900. Routine Description:
  3901. This function determines whether the faulting virtual address lies
  3902. within the non-writable alternate system PTE mappings.
  3903. Arguments:
  3904. StoreInstruction - Supplies nonzero if the operation causes a write into
  3905. memory, zero if not.
  3906. VirtualAddress - Supplies the virtual address which caused the fault.
  3907. Return Value:
  3908. TRUE if the fault was handled by this code (and PTE updated), FALSE if not.
  3909. Environment:
  3910. Kernel mode. Called from the fault handler at any IRQL.
  3911. --*/
  3912. {
  3913. KIRQL OldIrql;
  3914. PMMPTE PointerPte;
  3915. ULONG ProtectionCode;
  3916. PLIST_ENTRY NextEntry;
  3917. PMM_PTE_MAPPING MapEntry;
  3918. //
  3919. // If PTE mappings with various protections are active and the faulting
  3920. // address lies within these mappings, resolve the fault with
  3921. // the appropriate protections.
  3922. //
  3923. if (IsListEmpty (&MmProtectedPteList)) {
  3924. return FALSE;
  3925. }
  3926. OldIrql = KeAcquireSpinLockRaiseToSynch (&MmProtectedPteLock);
  3927. NextEntry = MmProtectedPteList.Flink;
  3928. while (NextEntry != &MmProtectedPteList) {
  3929. MapEntry = CONTAINING_RECORD (NextEntry,
  3930. MM_PTE_MAPPING,
  3931. ListEntry);
  3932. if ((VirtualAddress >= MapEntry->SystemVa) &&
  3933. (VirtualAddress < MapEntry->SystemEndVa)) {
  3934. ProtectionCode = MapEntry->Protection;
  3935. KeReleaseSpinLock (&MmProtectedPteLock, OldIrql);
  3936. PointerPte = MiGetPteAddress (VirtualAddress);
  3937. if (StoreInstruction != 0) {
  3938. if ((ProtectionCode & MM_READWRITE) == 0) {
  3939. KeBugCheckEx (ATTEMPTED_WRITE_TO_READONLY_MEMORY,
  3940. (ULONG_PTR)VirtualAddress,
  3941. (ULONG_PTR)PointerPte->u.Long,
  3942. 0,
  3943. 16);
  3944. }
  3945. }
  3946. MI_NO_FAULT_FOUND (StoreInstruction,
  3947. PointerPte,
  3948. VirtualAddress,
  3949. FALSE);
  3950. //
  3951. // Fault was handled directly here, no need for the caller to
  3952. // do anything.
  3953. //
  3954. return TRUE;
  3955. }
  3956. NextEntry = NextEntry->Flink;
  3957. }
  3958. KeReleaseSpinLock (&MmProtectedPteLock, OldIrql);
  3959. return FALSE;
  3960. }
  3961. VOID
  3962. MiInsertPhysicalVadRoot (
  3963. IN PEPROCESS Process,
  3964. IN PMM_AVL_TABLE PhysicalVadRoot
  3965. )
  3966. /*++
  3967. Routine Description:
  3968. This function is a nonpaged wrapper which acquires the PFN lock to insert
  3969. the physical VAD AVL root table into the specified process.
  3970. Arguments:
  3971. Process - Supplies the process to add the physical VAD root to.
  3972. PhysicalVadRoot - Supplies the physical vad root table to link in.
  3973. Return Value:
  3974. None.
  3975. Environment:
  3976. Kernel mode. APC_LEVEL, address space (and optionally working set)
  3977. mutex held.
  3978. --*/
  3979. {
  3980. KIRQL OldIrql;
  3981. ASSERT (KeGetOwnerGuardedMutex (&Process->AddressCreationLock) == KeGetCurrentThread ());
  3982. //
  3983. // Acquire the PFN lock to synchronize with concurrent threads calling
  3984. // MmProbeAndLockPages which examines this table.
  3985. //
  3986. LOCK_PFN (OldIrql);
  3987. ASSERT (Process->PhysicalVadRoot == NULL);
  3988. Process->PhysicalVadRoot = PhysicalVadRoot;
  3989. UNLOCK_PFN (OldIrql);
  3990. return;
  3991. }
  3992. VOID
  3993. MiPhysicalViewInserter (
  3994. IN PEPROCESS Process,
  3995. IN PMI_PHYSICAL_VIEW PhysicalView
  3996. )
  3997. /*++
  3998. Routine Description:
  3999. This function is a nonpaged wrapper which acquires the PFN lock to insert
  4000. a physical VAD into the process chain.
  4001. Arguments:
  4002. Process - Supplies the process to add the physical VAD to.
  4003. PhysicalView - Supplies the physical view data to link in.
  4004. Return Value:
  4005. None.
  4006. Environment:
  4007. Kernel mode. APC_LEVEL, working set and address space mutexes held.
  4008. --*/
  4009. {
  4010. KIRQL OldIrql;
  4011. MmLockPagableSectionByHandle (ExPageLockHandle);
  4012. LOCK_PFN (OldIrql);
  4013. ASSERT (Process->PhysicalVadRoot != NULL);
  4014. MiInsertNode ((PMMADDRESS_NODE)PhysicalView, Process->PhysicalVadRoot);
  4015. UNLOCK_PFN (OldIrql);
  4016. if (PhysicalView->Vad->u.VadFlags.WriteWatch == 1) {
  4017. //
  4018. // Mark this process as forever containing write-watch
  4019. // address space(s).
  4020. //
  4021. PS_SET_BITS (&Process->Flags, PS_PROCESS_FLAGS_USING_WRITE_WATCH);
  4022. }
  4023. MmUnlockPagableImageSection (ExPageLockHandle);
  4024. return;
  4025. }
  4026. VOID
  4027. MiPhysicalViewRemover (
  4028. IN PEPROCESS Process,
  4029. IN PMMVAD Vad
  4030. )
  4031. /*++
  4032. Routine Description:
  4033. This function is a nonpaged wrapper which acquires the PFN lock to remove
  4034. a physical VAD from the process chain.
  4035. Arguments:
  4036. Process - Supplies the process to remove the physical VAD from.
  4037. Vad - Supplies the Vad to remove.
  4038. Return Value:
  4039. None.
  4040. Environment:
  4041. Kernel mode, APC_LEVEL, working set and address space mutexes held.
  4042. --*/
  4043. {
  4044. KIRQL OldIrql;
  4045. PRTL_BITMAP BitMap;
  4046. PMI_PHYSICAL_VIEW PhysicalView;
  4047. ULONG BitMapSize;
  4048. TABLE_SEARCH_RESULT SearchResult;
  4049. LOCK_PFN (OldIrql);
  4050. //
  4051. // Lookup the element and save the result.
  4052. //
  4053. ASSERT (Process->PhysicalVadRoot != NULL);
  4054. SearchResult = MiFindNodeOrParent (Process->PhysicalVadRoot,
  4055. Vad->StartingVpn,
  4056. (PMMADDRESS_NODE *) &PhysicalView);
  4057. ASSERT (SearchResult == TableFoundNode);
  4058. ASSERT (PhysicalView->Vad == Vad);
  4059. MiRemoveNode ((PMMADDRESS_NODE)PhysicalView, Process->PhysicalVadRoot);
  4060. UNLOCK_PFN (OldIrql);
  4061. if (Vad->u.VadFlags.WriteWatch == 1) {
  4062. BitMap = PhysicalView->u.BitMap;
  4063. BitMapSize = sizeof(RTL_BITMAP) + (ULONG)(((BitMap->SizeOfBitMap + 31) / 32) * 4);
  4064. PsReturnProcessNonPagedPoolQuota (Process, BitMapSize);
  4065. ExFreePool (BitMap);
  4066. }
  4067. ExFreePool (PhysicalView);
  4068. return;
  4069. }
  4070. VOID
  4071. MiPhysicalViewAdjuster (
  4072. IN PEPROCESS Process,
  4073. IN PMMVAD OldVad,
  4074. IN PMMVAD NewVad
  4075. )
  4076. /*++
  4077. Routine Description:
  4078. This function is a nonpaged wrapper which acquires the PFN lock to repoint
  4079. a physical VAD in the process chain.
  4080. Arguments:
  4081. Process - Supplies the process in which to adjust the physical VAD.
  4082. Vad - Supplies the old Vad to replace.
  4083. NewVad - Supplies the newVad to substitute.
  4084. Return Value:
  4085. None.
  4086. Environment:
  4087. Kernel mode, called with APCs disabled, working set mutex held.
  4088. --*/
  4089. {
  4090. KIRQL OldIrql;
  4091. PMI_PHYSICAL_VIEW PhysicalView;
  4092. TABLE_SEARCH_RESULT SearchResult;
  4093. MmLockPagableSectionByHandle (ExPageLockHandle);
  4094. LOCK_PFN (OldIrql);
  4095. //
  4096. // Lookup the element and save the result.
  4097. //
  4098. ASSERT (Process->PhysicalVadRoot != NULL);
  4099. SearchResult = MiFindNodeOrParent (Process->PhysicalVadRoot,
  4100. OldVad->StartingVpn,
  4101. (PMMADDRESS_NODE *) &PhysicalView);
  4102. ASSERT (SearchResult == TableFoundNode);
  4103. ASSERT (PhysicalView->Vad == OldVad);
  4104. PhysicalView->Vad = NewVad;
  4105. UNLOCK_PFN (OldIrql);
  4106. MmUnlockPagableImageSection (ExPageLockHandle);
  4107. return;
  4108. }
  4109. PVOID
  4110. MiMapLockedPagesInUserSpace (
  4111. IN PMDL MemoryDescriptorList,
  4112. IN PVOID StartingVa,
  4113. IN MEMORY_CACHING_TYPE CacheType,
  4114. IN PVOID BaseVa
  4115. )
  4116. /*++
  4117. Routine Description:
  4118. This function maps physical pages described by a memory descriptor
  4119. list into the user portion of the virtual address space.
  4120. Arguments:
  4121. MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
  4122. been updated by MmProbeAndLockPages.
  4123. StartingVa - Supplies the starting address.
  4124. CacheType - Supplies the type of cache mapping to use for the MDL.
  4125. MmCached indicates "normal" user mappings.
  4126. BaseVa - Supplies the base address of the view. If the initial
  4127. value of this argument is not null, then the view will
  4128. be allocated starting at the specified virtual
  4129. address rounded down to the next 64kb address
  4130. boundary. If the initial value of this argument is
  4131. null, then the operating system will determine
  4132. where to allocate the view.
  4133. Return Value:
  4134. Returns the base address where the pages are mapped. The base address
  4135. has the same offset as the virtual address in the MDL.
  4136. This routine will raise an exception if quota limits or VM limits are
  4137. exceeded.
  4138. Environment:
  4139. Kernel mode. APC_LEVEL or below.
  4140. --*/
  4141. {
  4142. KIRQL OldIrql;
  4143. CSHORT IoMapping;
  4144. PFN_NUMBER NumberOfPages;
  4145. PFN_NUMBER SavedPageCount;
  4146. PFN_NUMBER PageFrameIndex;
  4147. PPFN_NUMBER Page;
  4148. PMMPTE PointerPte;
  4149. PMMPTE PointerPde;
  4150. PCHAR Va;
  4151. MMPTE TempPte;
  4152. PVOID EndingAddress;
  4153. PMMVAD_LONG Vad;
  4154. PEPROCESS Process;
  4155. PMMPFN Pfn2;
  4156. PVOID UsedPageTableHandle;
  4157. PMI_PHYSICAL_VIEW PhysicalView;
  4158. NTSTATUS Status;
  4159. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  4160. PMM_AVL_TABLE PhysicalVadRoot;
  4161. PAGED_CODE ();
  4162. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  4163. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingVa,
  4164. MemoryDescriptorList->ByteCount);
  4165. //
  4166. // If a noncachable mapping is requested, none of the pages in the
  4167. // requested MDL can reside in a large page. Otherwise we would be
  4168. // creating an incoherent overlapping TB entry as the same physical
  4169. // page would be mapped by 2 different TB entries with different
  4170. // cache attributes.
  4171. //
  4172. IoMapping = MemoryDescriptorList->MdlFlags & MDL_IO_SPACE;
  4173. CacheAttribute = MI_TRANSLATE_CACHETYPE (CacheType, IoMapping);
  4174. if (CacheAttribute != MiCached) {
  4175. SavedPageCount = NumberOfPages;
  4176. LOCK_PFN (OldIrql);
  4177. do {
  4178. if (*Page == MM_EMPTY_LIST) {
  4179. break;
  4180. }
  4181. PageFrameIndex = *Page;
  4182. if (MI_PAGE_FRAME_INDEX_MUST_BE_CACHED (PageFrameIndex)) {
  4183. UNLOCK_PFN (OldIrql);
  4184. MiNonCachedCollisions += 1;
  4185. ExRaiseStatus (STATUS_INVALID_ADDRESS);
  4186. return NULL;
  4187. }
  4188. Page += 1;
  4189. NumberOfPages -= 1;
  4190. } while (NumberOfPages != 0);
  4191. UNLOCK_PFN (OldIrql);
  4192. NumberOfPages = SavedPageCount;
  4193. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  4194. }
  4195. //
  4196. // Map the pages into the user part of the address as user
  4197. // read/write no-delete.
  4198. //
  4199. Vad = ExAllocatePoolWithTag (NonPagedPool, sizeof(MMVAD_LONG), 'ldaV');
  4200. if (Vad == NULL) {
  4201. ExRaiseStatus (STATUS_INSUFFICIENT_RESOURCES);
  4202. return NULL;
  4203. }
  4204. PhysicalView = (PMI_PHYSICAL_VIEW)ExAllocatePoolWithTag (NonPagedPool,
  4205. sizeof(MI_PHYSICAL_VIEW),
  4206. MI_PHYSICAL_VIEW_KEY);
  4207. if (PhysicalView == NULL) {
  4208. ExFreePool (Vad);
  4209. ExRaiseStatus (STATUS_INSUFFICIENT_RESOURCES);
  4210. return NULL;
  4211. }
  4212. RtlZeroMemory (Vad, sizeof (MMVAD_LONG));
  4213. ASSERT (Vad->ControlArea == NULL);
  4214. ASSERT (Vad->FirstPrototypePte == NULL);
  4215. ASSERT (Vad->u.LongFlags == 0);
  4216. Vad->u.VadFlags.Protection = MM_READWRITE;
  4217. Vad->u.VadFlags.PhysicalMapping = 1;
  4218. Vad->u.VadFlags.PrivateMemory = 1;
  4219. Vad->u2.VadFlags2.LongVad = 1;
  4220. PhysicalView->Vad = (PMMVAD) Vad;
  4221. PhysicalView->u.LongFlags = MI_PHYSICAL_VIEW_PHYS;
  4222. Process = PsGetCurrentProcess ();
  4223. //
  4224. // Make sure the specified starting and ending addresses are
  4225. // within the user part of the virtual address space.
  4226. //
  4227. if (BaseVa != NULL) {
  4228. if (BYTE_OFFSET (BaseVa) != 0) {
  4229. //
  4230. // Invalid base address.
  4231. //
  4232. Status = STATUS_INVALID_ADDRESS;
  4233. goto ErrorReturn;
  4234. }
  4235. EndingAddress = (PVOID)((PCHAR)BaseVa + ((ULONG_PTR)NumberOfPages * PAGE_SIZE) - 1);
  4236. if ((EndingAddress <= BaseVa) || (EndingAddress > MM_HIGHEST_VAD_ADDRESS)) {
  4237. //
  4238. // Invalid region size.
  4239. //
  4240. Status = STATUS_INVALID_ADDRESS;
  4241. goto ErrorReturn;
  4242. }
  4243. LOCK_ADDRESS_SPACE (Process);
  4244. //
  4245. // Make sure the address space was not deleted, if so, return an error.
  4246. //
  4247. if (Process->Flags & PS_PROCESS_FLAGS_VM_DELETED) {
  4248. UNLOCK_ADDRESS_SPACE (Process);
  4249. Status = STATUS_PROCESS_IS_TERMINATING;
  4250. goto ErrorReturn;
  4251. }
  4252. //
  4253. // Make sure the address space is not already in use.
  4254. //
  4255. if (MiCheckForConflictingVadExistence (Process, BaseVa, EndingAddress) == TRUE) {
  4256. UNLOCK_ADDRESS_SPACE (Process);
  4257. Status = STATUS_CONFLICTING_ADDRESSES;
  4258. goto ErrorReturn;
  4259. }
  4260. }
  4261. else {
  4262. //
  4263. // Get the address creation mutex.
  4264. //
  4265. LOCK_ADDRESS_SPACE (Process);
  4266. //
  4267. // Make sure the address space was not deleted, if so, return an error.
  4268. //
  4269. if (Process->Flags & PS_PROCESS_FLAGS_VM_DELETED) {
  4270. UNLOCK_ADDRESS_SPACE (Process);
  4271. Status = STATUS_PROCESS_IS_TERMINATING;
  4272. goto ErrorReturn;
  4273. }
  4274. Status = MiFindEmptyAddressRange ((ULONG_PTR)NumberOfPages * PAGE_SIZE,
  4275. X64K,
  4276. 0,
  4277. &BaseVa);
  4278. if (!NT_SUCCESS (Status)) {
  4279. UNLOCK_ADDRESS_SPACE (Process);
  4280. goto ErrorReturn;
  4281. }
  4282. EndingAddress = (PVOID)((PCHAR)BaseVa + ((ULONG_PTR)NumberOfPages * PAGE_SIZE) - 1);
  4283. }
  4284. Vad->StartingVpn = MI_VA_TO_VPN (BaseVa);
  4285. Vad->EndingVpn = MI_VA_TO_VPN (EndingAddress);
  4286. PhysicalView->StartingVpn = Vad->StartingVpn;
  4287. PhysicalView->EndingVpn = Vad->EndingVpn;
  4288. PhysicalVadRoot = Process->PhysicalVadRoot;
  4289. //
  4290. // The address space mutex synchronizes the allocation of the
  4291. // EPROCESS PhysicalVadRoot. This table root is not deleted until
  4292. // the process exits.
  4293. //
  4294. if (PhysicalVadRoot == NULL) {
  4295. PhysicalVadRoot = (PMM_AVL_TABLE) ExAllocatePoolWithTag (
  4296. NonPagedPool,
  4297. sizeof (MM_AVL_TABLE),
  4298. MI_PHYSICAL_VIEW_ROOT_KEY);
  4299. if (PhysicalVadRoot == NULL) {
  4300. UNLOCK_ADDRESS_SPACE (Process);
  4301. Status = STATUS_INSUFFICIENT_RESOURCES;
  4302. goto ErrorReturn;
  4303. }
  4304. RtlZeroMemory (PhysicalVadRoot, sizeof (MM_AVL_TABLE));
  4305. ASSERT (PhysicalVadRoot->NumberGenericTableElements == 0);
  4306. PhysicalVadRoot->BalancedRoot.u1.Parent = &PhysicalVadRoot->BalancedRoot;
  4307. MiInsertPhysicalVadRoot (Process, PhysicalVadRoot);
  4308. }
  4309. LOCK_WS_UNSAFE (Process);
  4310. Status = MiInsertVad ((PMMVAD) Vad);
  4311. if (!NT_SUCCESS(Status)) {
  4312. UNLOCK_WS_AND_ADDRESS_SPACE (Process);
  4313. goto ErrorReturn;
  4314. }
  4315. //
  4316. // The VAD has been inserted, but the physical view descriptor cannot
  4317. // be until the page table page hierarchy is in place. This is to
  4318. // prevent races with probes.
  4319. //
  4320. //
  4321. // Create a page table and fill in the mappings for the Vad.
  4322. //
  4323. Va = BaseVa;
  4324. PointerPte = MiGetPteAddress (BaseVa);
  4325. MI_PREPARE_FOR_NONCACHED (CacheAttribute);
  4326. do {
  4327. if (*Page == MM_EMPTY_LIST) {
  4328. break;
  4329. }
  4330. PointerPde = MiGetPteAddress (PointerPte);
  4331. MiMakePdeExistAndMakeValid (PointerPde, Process, MM_NOIRQL);
  4332. ASSERT (PointerPte->u.Hard.Valid == 0);
  4333. //
  4334. // Another zeroed PTE is being made non-zero.
  4335. //
  4336. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (Va);
  4337. MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  4338. TempPte = ValidUserPte;
  4339. TempPte.u.Hard.PageFrameNumber = *Page;
  4340. if (IoMapping == 0) {
  4341. Pfn2 = MI_PFN_ELEMENT (*Page);
  4342. ASSERT (Pfn2->u3.e2.ReferenceCount != 0);
  4343. switch (Pfn2->u3.e1.CacheAttribute) {
  4344. case MiCached:
  4345. if (CacheAttribute != MiCached) {
  4346. //
  4347. // The caller asked for a noncached or writecombined
  4348. // mapping, but the page is already mapped cached by
  4349. // someone else. Override the caller's request in
  4350. // order to keep the TB page attribute coherent.
  4351. //
  4352. MiCacheOverride[0] += 1;
  4353. }
  4354. break;
  4355. case MiNonCached:
  4356. if (CacheAttribute != MiNonCached) {
  4357. //
  4358. // The caller asked for a cached or writecombined
  4359. // mapping, but the page is already mapped noncached
  4360. // by someone else. Override the caller's request
  4361. // in order to keep the TB page attribute coherent.
  4362. //
  4363. MiCacheOverride[1] += 1;
  4364. }
  4365. MI_DISABLE_CACHING (TempPte);
  4366. break;
  4367. case MiWriteCombined:
  4368. if (CacheAttribute != MiWriteCombined) {
  4369. //
  4370. // The caller asked for a cached or noncached
  4371. // mapping, but the page is already mapped
  4372. // writecombined by someone else. Override the
  4373. // caller's request in order to keep the TB page
  4374. // attribute coherent.
  4375. //
  4376. MiCacheOverride[2] += 1;
  4377. }
  4378. MI_SET_PTE_WRITE_COMBINE (TempPte);
  4379. break;
  4380. case MiNotMapped:
  4381. //
  4382. // This better be for a page allocated with
  4383. // MmAllocatePagesForMdl. Otherwise it might be a
  4384. // page on the freelist which could subsequently be
  4385. // given out with a different attribute !
  4386. //
  4387. ASSERT ((Pfn2->u4.PteFrame == MI_MAGIC_AWE_PTEFRAME) ||
  4388. (Pfn2->PteAddress == (PVOID) (ULONG_PTR)(X64K | 0x1)));
  4389. switch (CacheAttribute) {
  4390. case MiCached:
  4391. Pfn2->u3.e1.CacheAttribute = MiCached;
  4392. break;
  4393. case MiNonCached:
  4394. Pfn2->u3.e1.CacheAttribute = MiNonCached;
  4395. MI_DISABLE_CACHING (TempPte);
  4396. break;
  4397. case MiWriteCombined:
  4398. Pfn2->u3.e1.CacheAttribute = MiWriteCombined;
  4399. MI_SET_PTE_WRITE_COMBINE (TempPte);
  4400. break;
  4401. default:
  4402. ASSERT (FALSE);
  4403. break;
  4404. }
  4405. break;
  4406. default:
  4407. ASSERT (FALSE);
  4408. break;
  4409. }
  4410. }
  4411. else {
  4412. switch (CacheAttribute) {
  4413. case MiCached:
  4414. break;
  4415. case MiNonCached:
  4416. MI_DISABLE_CACHING (TempPte);
  4417. break;
  4418. case MiWriteCombined:
  4419. MI_SET_PTE_WRITE_COMBINE (TempPte);
  4420. break;
  4421. default:
  4422. ASSERT (FALSE);
  4423. break;
  4424. }
  4425. }
  4426. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  4427. //
  4428. // A PTE just went from not present, not transition to
  4429. // present. The share count and valid count must be
  4430. // updated in the page table page which contains this PTE.
  4431. //
  4432. Pfn2 = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber);
  4433. Pfn2->u2.ShareCount += 1;
  4434. Page += 1;
  4435. PointerPte += 1;
  4436. NumberOfPages -= 1;
  4437. Va += PAGE_SIZE;
  4438. } while (NumberOfPages != 0);
  4439. MI_SWEEP_CACHE (CacheAttribute, BaseVa, MemoryDescriptorList->ByteCount);
  4440. //
  4441. // Insert the physical view descriptor now that the page table page
  4442. // hierarchy is in place. Note probes can find this descriptor immediately.
  4443. //
  4444. MiPhysicalViewInserter (Process, PhysicalView);
  4445. UNLOCK_WS_AND_ADDRESS_SPACE (Process);
  4446. ASSERT (BaseVa != NULL);
  4447. BaseVa = (PVOID)((PCHAR)BaseVa + MemoryDescriptorList->ByteOffset);
  4448. return BaseVa;
  4449. ErrorReturn:
  4450. ExFreePool (Vad);
  4451. ExFreePool (PhysicalView);
  4452. ExRaiseStatus (Status);
  4453. return NULL;
  4454. }
  4455. VOID
  4456. MiUnmapLockedPagesInUserSpace (
  4457. IN PVOID BaseAddress,
  4458. IN PMDL MemoryDescriptorList
  4459. )
  4460. /*++
  4461. Routine Description:
  4462. This routine unmaps locked pages which were previously mapped via
  4463. a MmMapLockedPages function.
  4464. Arguments:
  4465. BaseAddress - Supplies the base address where the pages were previously
  4466. mapped.
  4467. MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
  4468. been updated by MmProbeAndLockPages.
  4469. Return Value:
  4470. None.
  4471. Environment:
  4472. Kernel mode. DISPATCH_LEVEL or below if base address is within system
  4473. space, APC_LEVEL or below if base address is in user space.
  4474. --*/
  4475. {
  4476. PFN_NUMBER NumberOfPages;
  4477. PPFN_NUMBER Page;
  4478. PMMPTE PointerPte;
  4479. PMMPTE PointerPde;
  4480. #if (_MI_PAGING_LEVELS >= 3)
  4481. PMMPTE PointerPpe;
  4482. #endif
  4483. #if (_MI_PAGING_LEVELS >= 4)
  4484. PMMPTE PointerPxe;
  4485. #endif
  4486. PVOID StartingVa;
  4487. PVOID EndingVa;
  4488. KIRQL OldIrql;
  4489. PMMVAD Vad;
  4490. PMMVAD PreviousVad;
  4491. PMMVAD NextVad;
  4492. PVOID TempVa;
  4493. PEPROCESS Process;
  4494. PMMPFN PageTablePfn;
  4495. PFN_NUMBER PageTablePage;
  4496. PVOID UsedPageTableHandle;
  4497. MMPTE_FLUSH_LIST PteFlushList;
  4498. PteFlushList.Count = 0;
  4499. MmLockPagableSectionByHandle (ExPageLockHandle);
  4500. StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
  4501. MemoryDescriptorList->ByteOffset);
  4502. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  4503. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingVa,
  4504. MemoryDescriptorList->ByteCount);
  4505. ASSERT (NumberOfPages != 0);
  4506. PointerPte = MiGetPteAddress (BaseAddress);
  4507. PointerPde = MiGetPdeAddress (BaseAddress);
  4508. //
  4509. // This was mapped into the user portion of the address space and
  4510. // the corresponding virtual address descriptor must be deleted.
  4511. //
  4512. //
  4513. // Get the working set mutex and address creation mutex.
  4514. //
  4515. Process = PsGetCurrentProcess ();
  4516. LOCK_ADDRESS_SPACE (Process);
  4517. Vad = MiLocateAddress (BaseAddress);
  4518. if ((Vad == NULL) || (Vad->u.VadFlags.PhysicalMapping == 0)) {
  4519. UNLOCK_ADDRESS_SPACE (Process);
  4520. MmUnlockPagableImageSection(ExPageLockHandle);
  4521. return;
  4522. }
  4523. PreviousVad = MiGetPreviousVad (Vad);
  4524. NextVad = MiGetNextVad (Vad);
  4525. StartingVa = MI_VPN_TO_VA (Vad->StartingVpn);
  4526. EndingVa = MI_VPN_TO_VA_ENDING (Vad->EndingVpn);
  4527. LOCK_WS_UNSAFE (Process);
  4528. MiPhysicalViewRemover (Process, Vad);
  4529. MiRemoveVad (Vad);
  4530. //
  4531. // Return commitment for page table pages if possible.
  4532. //
  4533. MiReturnPageTablePageCommitment (StartingVa,
  4534. EndingVa,
  4535. Process,
  4536. PreviousVad,
  4537. NextVad);
  4538. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (BaseAddress);
  4539. PageTablePage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPde);
  4540. PageTablePfn = MI_PFN_ELEMENT (PageTablePage);
  4541. //
  4542. // Get the PFN lock so we can safely decrement share and valid
  4543. // counts on page table pages.
  4544. //
  4545. LOCK_PFN (OldIrql);
  4546. do {
  4547. if (*Page == MM_EMPTY_LIST) {
  4548. break;
  4549. }
  4550. ASSERT64 (MiGetPdeAddress(PointerPte)->u.Hard.Valid == 1);
  4551. ASSERT (MiGetPteAddress(PointerPte)->u.Hard.Valid == 1);
  4552. ASSERT (PointerPte->u.Hard.Valid == 1);
  4553. //
  4554. // Another PTE is being zeroed.
  4555. //
  4556. MI_DECREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  4557. MI_WRITE_INVALID_PTE (PointerPte, ZeroPte);
  4558. if (PteFlushList.Count < MM_MAXIMUM_FLUSH_COUNT) {
  4559. PteFlushList.FlushVa[PteFlushList.Count] = BaseAddress;
  4560. PteFlushList.Count += 1;
  4561. }
  4562. MiDecrementShareCountInline (PageTablePfn, PageTablePage);
  4563. PointerPte += 1;
  4564. NumberOfPages -= 1;
  4565. BaseAddress = (PVOID)((PCHAR)BaseAddress + PAGE_SIZE);
  4566. Page += 1;
  4567. if ((MiIsPteOnPdeBoundary(PointerPte)) || (NumberOfPages == 0)) {
  4568. if (PteFlushList.Count != 0) {
  4569. MiFlushPteList (&PteFlushList, FALSE);
  4570. PteFlushList.Count = 0;
  4571. }
  4572. PointerPde = MiGetPteAddress(PointerPte - 1);
  4573. ASSERT (PointerPde->u.Hard.Valid == 1);
  4574. //
  4575. // If all the entries have been eliminated from the previous
  4576. // page table page, delete the page table page itself. Likewise
  4577. // with the page directory and parent pages.
  4578. //
  4579. if (MI_GET_USED_PTES_FROM_HANDLE (UsedPageTableHandle) == 0) {
  4580. ASSERT (PointerPde->u.Long != 0);
  4581. #if (_MI_PAGING_LEVELS >= 3)
  4582. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (PointerPte - 1);
  4583. MI_DECREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  4584. #endif
  4585. TempVa = MiGetVirtualAddressMappedByPte (PointerPde);
  4586. MiDeletePte (PointerPde,
  4587. TempVa,
  4588. FALSE,
  4589. Process,
  4590. NULL,
  4591. NULL,
  4592. OldIrql);
  4593. #if (_MI_PAGING_LEVELS >= 3)
  4594. if ((MiIsPteOnPpeBoundary(PointerPte)) || (NumberOfPages == 0)) {
  4595. PointerPpe = MiGetPteAddress (PointerPde);
  4596. ASSERT (PointerPpe->u.Hard.Valid == 1);
  4597. //
  4598. // If all the entries have been eliminated from the previous
  4599. // page directory page, delete the page directory page too.
  4600. //
  4601. if (MI_GET_USED_PTES_FROM_HANDLE (UsedPageTableHandle) == 0) {
  4602. ASSERT (PointerPpe->u.Long != 0);
  4603. #if (_MI_PAGING_LEVELS >= 4)
  4604. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (PointerPde);
  4605. MI_DECREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  4606. #endif
  4607. TempVa = MiGetVirtualAddressMappedByPte(PointerPpe);
  4608. MiDeletePte (PointerPpe,
  4609. TempVa,
  4610. FALSE,
  4611. Process,
  4612. NULL,
  4613. NULL,
  4614. OldIrql);
  4615. #if (_MI_PAGING_LEVELS >= 4)
  4616. if ((MiIsPteOnPxeBoundary(PointerPte)) || (NumberOfPages == 0)) {
  4617. PointerPxe = MiGetPdeAddress (PointerPde);
  4618. ASSERT (PointerPxe->u.Long != 0);
  4619. if (MI_GET_USED_PTES_FROM_HANDLE (UsedPageTableHandle) == 0) {
  4620. TempVa = MiGetVirtualAddressMappedByPte(PointerPxe);
  4621. MiDeletePte (PointerPxe,
  4622. TempVa,
  4623. FALSE,
  4624. Process,
  4625. NULL,
  4626. NULL,
  4627. OldIrql);
  4628. }
  4629. }
  4630. #endif
  4631. }
  4632. }
  4633. #endif
  4634. }
  4635. if (NumberOfPages == 0) {
  4636. break;
  4637. }
  4638. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (BaseAddress);
  4639. PointerPde += 1;
  4640. PageTablePage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPde);
  4641. PageTablePfn = MI_PFN_ELEMENT (PageTablePage);
  4642. }
  4643. } while (NumberOfPages != 0);
  4644. if (PteFlushList.Count != 0) {
  4645. MiFlushPteList (&PteFlushList, FALSE);
  4646. }
  4647. UNLOCK_PFN (OldIrql);
  4648. UNLOCK_WS_AND_ADDRESS_SPACE (Process);
  4649. ExFreePool (Vad);
  4650. MmUnlockPagableImageSection(ExPageLockHandle);
  4651. return;
  4652. }
  4653. #define MI_LARGE_PAGE_VA_SPACE ((ULONG64)8 * 1024 * 1024 * 1024) // Relatively arbitrary
  4654. #if (_MI_PAGING_LEVELS>=3)
  4655. PVOID MiLargeVaStart;
  4656. ULONG MiLargeVaInUse [(MI_LARGE_PAGE_VA_SPACE / MM_MINIMUM_VA_FOR_LARGE_PAGE) / 32];
  4657. #endif
  4658. VOID
  4659. MiInitializeLargePageSupport (
  4660. VOID
  4661. )
  4662. /*++
  4663. Routine Description:
  4664. This function is called once at system initialization.
  4665. Arguments:
  4666. None.
  4667. Return Value:
  4668. None.
  4669. Environment:
  4670. Kernel mode, INIT time. Resident available pages are not yet initialized,
  4671. but everything else is.
  4672. --*/
  4673. {
  4674. #if (_MI_PAGING_LEVELS>=3)
  4675. ULONG PageColor;
  4676. KIRQL OldIrql;
  4677. MMPTE TempPte;
  4678. PMMPTE PointerPpe;
  4679. PFN_NUMBER PageFrameIndex;
  4680. PFN_NUMBER NumberOfPages;
  4681. MiLargeVaStart = (PVOID)-1;
  4682. RtlInitializeBitMap (&MiLargeVaBitMap,
  4683. MiLargeVaInUse,
  4684. (ULONG) sizeof (MiLargeVaInUse) * 8);
  4685. ASSERT (MmNonPagedPoolEnd != NULL);
  4686. KeInitializeSpinLock (&MiLargePageLock);
  4687. #if (_MI_PAGING_LEVELS>=4)
  4688. PointerPpe = MiGetPxeAddress (MmNonPagedPoolEnd) + 1;
  4689. while (PointerPpe->u.Long != 0) {
  4690. PointerPpe += 1;
  4691. }
  4692. //
  4693. // Allocate the top level extended page directory parent.
  4694. //
  4695. if (MiChargeCommitment (1, NULL) == FALSE) {
  4696. RtlSetAllBits (&MiLargeVaBitMap);
  4697. return;
  4698. }
  4699. MM_TRACK_COMMIT (MM_DBG_COMMIT_LARGE_VA_PAGES, 1);
  4700. ASSERT (PointerPpe->u.Long == 0);
  4701. PointerPpe->u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  4702. LOCK_PFN (OldIrql);
  4703. if (MmAvailablePages < MM_HIGH_LIMIT) {
  4704. MiEnsureAvailablePageOrWait (NULL, NULL, OldIrql);
  4705. }
  4706. PageColor = MI_PAGE_COLOR_VA_PROCESS (VirtualAddress,
  4707. &MI_SYSTEM_PAGE_COLOR);
  4708. PageFrameIndex = MiRemoveZeroPage (PageColor);
  4709. MiInitializePfn (PageFrameIndex, PointerPpe, 1);
  4710. UNLOCK_PFN (OldIrql);
  4711. MI_MAKE_VALID_PTE (TempPte, PageFrameIndex, MM_READWRITE, PointerPpe);
  4712. MI_SET_PTE_DIRTY (TempPte);
  4713. MI_WRITE_VALID_PTE (PointerPpe, TempPte);
  4714. PointerPpe = MiGetVirtualAddressMappedByPte (PointerPpe);
  4715. #else
  4716. PointerPpe = MiGetPpeAddress (MmNonPagedPoolEnd) + 1;
  4717. while (PointerPpe->u.Long != 0) {
  4718. PointerPpe += 1;
  4719. }
  4720. #endif
  4721. MiLargeVaStart = MiGetVirtualAddressMappedByPpe (PointerPpe);
  4722. NumberOfPages = (MI_LARGE_PAGE_VA_SPACE / MM_VA_MAPPED_BY_PPE);
  4723. ASSERT (NumberOfPages != 0);
  4724. if (MiChargeCommitment (NumberOfPages, NULL) == FALSE) {
  4725. RtlSetAllBits (&MiLargeVaBitMap);
  4726. return;
  4727. }
  4728. MM_TRACK_COMMIT (MM_DBG_COMMIT_LARGE_VA_PAGES, NumberOfPages);
  4729. do {
  4730. ASSERT (PointerPpe->u.Long == 0);
  4731. PointerPpe->u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  4732. LOCK_PFN (OldIrql);
  4733. if (MmAvailablePages < MM_HIGH_LIMIT) {
  4734. MiEnsureAvailablePageOrWait (NULL, NULL, OldIrql);
  4735. }
  4736. PageColor = MI_PAGE_COLOR_VA_PROCESS (VirtualAddress,
  4737. &MI_SYSTEM_PAGE_COLOR);
  4738. PageFrameIndex = MiRemoveZeroPage (PageColor);
  4739. MiInitializePfn (PageFrameIndex, PointerPpe, 1);
  4740. UNLOCK_PFN (OldIrql);
  4741. MI_MAKE_VALID_PTE (TempPte, PageFrameIndex, MM_READWRITE, PointerPpe);
  4742. MI_SET_PTE_DIRTY (TempPte);
  4743. MI_WRITE_VALID_PTE (PointerPpe, TempPte);
  4744. PointerPpe += 1;
  4745. NumberOfPages -= 1;
  4746. } while (NumberOfPages != 0);
  4747. RtlClearAllBits (&MiLargeVaBitMap);
  4748. #else
  4749. //
  4750. // Initialize process tracking so that large page system PTE mappings
  4751. // can be rippled during creation/deletion.
  4752. //
  4753. MiLargePageHyperPte = MiReserveSystemPtes (1, SystemPteSpace);
  4754. if (MiLargePageHyperPte == NULL) {
  4755. MiIssueNoPtesBugcheck (1, SystemPteSpace);
  4756. }
  4757. MiLargePageHyperPte->u.Long = 0;
  4758. InitializeListHead (&MmProcessList);
  4759. InsertTailList (&MmProcessList, &PsGetCurrentProcess()->MmProcessLinks);
  4760. #endif
  4761. return;
  4762. }
  4763. #if !defined (_WIN64)
  4764. PMMPTE MiInitialSystemPageDirectory;
  4765. #endif
  4766. PVOID
  4767. MiMapWithLargePages (
  4768. IN PFN_NUMBER PageFrameIndex,
  4769. IN PFN_NUMBER NumberOfPages,
  4770. IN ULONG Protection,
  4771. IN MEMORY_CACHING_TYPE CacheType
  4772. )
  4773. /*++
  4774. Routine Description:
  4775. This function maps the specified physical address into the non-pagable
  4776. portion of the system address space using large TB entries. If the range
  4777. cannot be mapped using large TB entries then NULL is returned and the
  4778. caller will map it with small TB entries.
  4779. Arguments:
  4780. PageFrameIndex - Supplies the starting page frame index to map.
  4781. NumberOfPages - Supplies the number of pages to map.
  4782. Protection - Supplies the number of pages to map.
  4783. CacheType - Supplies MmNonCached if the physical address is to be mapped
  4784. as non-cached, MmCached if the address should be cached, and
  4785. MmWriteCombined if the address should be cached and
  4786. write-combined as a frame buffer which is to be used only by
  4787. the video port driver. All other callers should use
  4788. MmUSWCCached. MmUSWCCached is available only if the PAT
  4789. feature is present and available.
  4790. For I/O device registers, this is usually specified
  4791. as MmNonCached.
  4792. Return Value:
  4793. Returns the virtual address which maps the specified physical addresses.
  4794. The value NULL is returned if sufficient large virtual address space for
  4795. the mapping could not be found.
  4796. Environment:
  4797. Kernel mode, Should be IRQL of APC_LEVEL or below, but unfortunately
  4798. callers are coming in at DISPATCH_LEVEL and it's too late to change the
  4799. rules now. This means you can never make this routine pagable.
  4800. --*/
  4801. {
  4802. MMPTE TempPde;
  4803. PMMPTE PointerPde;
  4804. PMMPTE LastPde;
  4805. PVOID BaseVa;
  4806. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  4807. KIRQL OldIrql;
  4808. LOGICAL IoMapping;
  4809. #if defined(_WIN64)
  4810. ULONG StartPosition;
  4811. ULONG NumberOfBits;
  4812. #else
  4813. PMMPTE TargetPde;
  4814. PMMPTE TargetPdeBase;
  4815. PMMPTE PointerPdeBase;
  4816. PFN_NUMBER PageDirectoryIndex;
  4817. PEPROCESS Process;
  4818. PEPROCESS CurrentProcess;
  4819. PLIST_ENTRY NextEntry;
  4820. #endif
  4821. #if defined (_X86PAE_)
  4822. ULONG i;
  4823. PMMPTE PaeTop;
  4824. #endif
  4825. ASSERT ((NumberOfPages % (MM_MINIMUM_VA_FOR_LARGE_PAGE >> PAGE_SHIFT)) == 0);
  4826. ASSERT ((PageFrameIndex % (MM_MINIMUM_VA_FOR_LARGE_PAGE >> PAGE_SHIFT)) == 0);
  4827. #ifdef _X86_
  4828. if ((KeFeatureBits & KF_LARGE_PAGE) == 0) {
  4829. return NULL;
  4830. }
  4831. #endif
  4832. CacheAttribute = MI_TRANSLATE_CACHETYPE (CacheType, TRUE);
  4833. IoMapping = !MI_IS_PFN (PageFrameIndex);
  4834. #if defined(_WIN64)
  4835. NumberOfBits = (ULONG)(NumberOfPages / (MM_MINIMUM_VA_FOR_LARGE_PAGE >> PAGE_SHIFT));
  4836. ExAcquireSpinLock (&MiLargePageLock, &OldIrql);
  4837. StartPosition = RtlFindClearBitsAndSet (&MiLargeVaBitMap,
  4838. NumberOfBits,
  4839. 0);
  4840. ExReleaseSpinLock (&MiLargePageLock, OldIrql);
  4841. if (StartPosition == NO_BITS_FOUND) {
  4842. return NULL;
  4843. }
  4844. BaseVa = (PVOID)((PCHAR)MiLargeVaStart + (StartPosition * MM_MINIMUM_VA_FOR_LARGE_PAGE));
  4845. if (IoMapping) {
  4846. CacheAttribute = MiInsertIoSpaceMap (BaseVa,
  4847. PageFrameIndex,
  4848. NumberOfPages,
  4849. CacheAttribute);
  4850. if (CacheAttribute == MiNotMapped) {
  4851. ExAcquireSpinLock (&MiLargePageLock, &OldIrql);
  4852. RtlClearBits (&MiLargeVaBitMap, StartPosition, NumberOfBits);
  4853. ExReleaseSpinLock (&MiLargePageLock, OldIrql);
  4854. return NULL;
  4855. }
  4856. }
  4857. PointerPde = MiGetPdeAddress (BaseVa);
  4858. #else
  4859. PointerPde = MiReserveAlignedSystemPtes ((ULONG)NumberOfPages,
  4860. SystemPteSpace,
  4861. MM_MINIMUM_VA_FOR_LARGE_PAGE);
  4862. if (PointerPde == NULL) {
  4863. return NULL;
  4864. }
  4865. ASSERT (BYTE_OFFSET (PointerPde) == 0);
  4866. BaseVa = MiGetVirtualAddressMappedByPte (PointerPde);
  4867. ASSERT (((ULONG_PTR)BaseVa & (MM_VA_MAPPED_BY_PDE - 1)) == 0);
  4868. if (IoMapping) {
  4869. CacheAttribute = MiInsertIoSpaceMap (BaseVa,
  4870. PageFrameIndex,
  4871. NumberOfPages,
  4872. CacheAttribute);
  4873. if (CacheAttribute == MiNotMapped) {
  4874. MiReleaseSystemPtes (PointerPde,
  4875. NumberOfPages,
  4876. SystemPteSpace);
  4877. return NULL;
  4878. }
  4879. }
  4880. PointerPde = MiGetPteAddress (PointerPde);
  4881. PointerPdeBase = PointerPde;
  4882. #endif
  4883. MI_MAKE_VALID_PTE (TempPde,
  4884. PageFrameIndex,
  4885. Protection,
  4886. PointerPde);
  4887. MI_SET_PTE_DIRTY (TempPde);
  4888. MI_SET_ACCESSED_IN_PTE (&TempPde, 1);
  4889. #if defined(_X86PAE_)
  4890. if (MiUseGlobalBitInLargePdes == TRUE) {
  4891. TempPde.u.Hard.Global = 1;
  4892. }
  4893. #elif defined(_X86_) || defined (_AMD64_)
  4894. if (ValidKernelPde.u.Long & MM_PTE_GLOBAL_MASK) {
  4895. TempPde.u.Hard.Global = 1;
  4896. }
  4897. #endif
  4898. MI_MAKE_PDE_MAP_LARGE_PAGE (&TempPde);
  4899. switch (CacheAttribute) {
  4900. case MiNonCached:
  4901. MI_DISABLE_LARGE_PTE_CACHING (TempPde);
  4902. break;
  4903. case MiCached:
  4904. break;
  4905. case MiWriteCombined:
  4906. MI_SET_LARGE_PTE_WRITE_COMBINE (TempPde);
  4907. break;
  4908. default:
  4909. ASSERT (FALSE);
  4910. break;
  4911. }
  4912. LastPde = PointerPde + (NumberOfPages / (MM_VA_MAPPED_BY_PDE >> PAGE_SHIFT));
  4913. MI_PREPARE_FOR_NONCACHED (CacheAttribute);
  4914. #if defined(_WIN64)
  4915. while (PointerPde < LastPde) {
  4916. ASSERT (PointerPde->u.Long == 0);
  4917. MI_WRITE_VALID_PTE (PointerPde, TempPde);
  4918. TempPde.u.Hard.PageFrameNumber += (MM_VA_MAPPED_BY_PDE >> PAGE_SHIFT);
  4919. PointerPde += 1;
  4920. }
  4921. #else
  4922. CurrentProcess = PsGetCurrentProcess ();
  4923. LOCK_EXPANSION2 (OldIrql);
  4924. NextEntry = MmProcessList.Flink;
  4925. while (NextEntry != &MmProcessList) {
  4926. Process = CONTAINING_RECORD (NextEntry, EPROCESS, MmProcessLinks);
  4927. // Two process states must be carefully handled here -
  4928. //
  4929. // 1. Processes that are just being created where they are still
  4930. // initializing their page directory, etc.
  4931. //
  4932. // 2. Processes that are outswapped.
  4933. //
  4934. if (Process->Flags & PS_PROCESS_FLAGS_PDE_UPDATE_NEEDED) {
  4935. //
  4936. // The process is further along in process creation or is still
  4937. // outswapped. Either way, an update is already queued so our
  4938. // current changes will be processed later anyway before the process
  4939. // can run so no need to do anything here.
  4940. //
  4941. NOTHING;
  4942. }
  4943. else if (Process->Pcb.DirectoryTableBase[0] == 0) {
  4944. //
  4945. // This process is being created and there is no way to tell where
  4946. // during creation it is (ie: it may be filling PDEs right now!).
  4947. // So just mark the process as needing a PDE update at the
  4948. // beginning of MmInitializeProcessAddressSpace.
  4949. //
  4950. PS_SET_BITS (&Process->Flags, PS_PROCESS_FLAGS_PDE_UPDATE_NEEDED);
  4951. }
  4952. else if (Process->Flags & PS_PROCESS_FLAGS_OUTSWAPPED) {
  4953. //
  4954. // This process is outswapped. Even though the page directory
  4955. // may still be in transition, the process must be inswapped
  4956. // before it can run again, so just mark the process as needing
  4957. // a PDE update at that time.
  4958. //
  4959. PS_SET_BITS (&Process->Flags, PS_PROCESS_FLAGS_PDE_UPDATE_NEEDED);
  4960. }
  4961. else {
  4962. //
  4963. // This process is resident so update the relevant PDEs in its
  4964. // address space right now.
  4965. //
  4966. PointerPde = PointerPdeBase;
  4967. TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
  4968. #if !defined (_X86PAE_)
  4969. PageDirectoryIndex = Process->Pcb.DirectoryTableBase[0] >> PAGE_SHIFT;
  4970. #else
  4971. //
  4972. // The range cannot cross PAE PDPTE entries, but we do need to
  4973. // locate which entry it does lie in.
  4974. //
  4975. PaeTop = Process->PaeTop;
  4976. i = (((ULONG_PTR) PointerPde - PDE_BASE) >> PAGE_SHIFT);
  4977. ASSERT ((PaeTop + i)->u.Hard.Valid == 1);
  4978. PageDirectoryIndex = (PFN_NUMBER)((PaeTop + i)->u.Hard.PageFrameNumber);
  4979. #endif
  4980. TargetPdeBase = (PMMPTE) MiMapPageInHyperSpaceAtDpc (
  4981. CurrentProcess,
  4982. PageDirectoryIndex);
  4983. TargetPde = (PMMPTE)((PCHAR) TargetPdeBase + BYTE_OFFSET (PointerPde));
  4984. while (PointerPde < LastPde) {
  4985. ASSERT (TargetPde->u.Long != 0);
  4986. ASSERT (TargetPde->u.Hard.Valid != 0);
  4987. *TargetPde = TempPde;
  4988. TempPde.u.Hard.PageFrameNumber += (MM_VA_MAPPED_BY_PDE >> PAGE_SHIFT);
  4989. PointerPde += 1;
  4990. TargetPde += 1;
  4991. }
  4992. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, TargetPdeBase);
  4993. }
  4994. NextEntry = NextEntry->Flink;
  4995. }
  4996. UNLOCK_EXPANSION2 (OldIrql);
  4997. #endif
  4998. MI_SWEEP_CACHE (CacheAttribute, BaseVa, NumberOfPages << PAGE_SHIFT);
  4999. //
  5000. // Force all processors to use the latest mappings.
  5001. //
  5002. KeFlushEntireTb (TRUE, TRUE);
  5003. return BaseVa;
  5004. }
  5005. VOID
  5006. MiUnmapLargePages (
  5007. IN PVOID BaseAddress,
  5008. IN SIZE_T NumberOfBytes
  5009. )
  5010. /*++
  5011. Routine Description:
  5012. This function unmaps a range of physical addresses which were previously
  5013. mapped via MiMapWithLargePages.
  5014. Arguments:
  5015. BaseAddress - Supplies the base virtual address where the physical
  5016. address was previously mapped.
  5017. NumberOfBytes - Supplies the number of bytes which were mapped.
  5018. Return Value:
  5019. None.
  5020. Environment:
  5021. Kernel mode, Should be IRQL of APC_LEVEL or below, but unfortunately
  5022. callers are coming in at DISPATCH_LEVEL and it's too late to change the
  5023. rules now. This means you can never make this routine pagable.
  5024. --*/
  5025. {
  5026. PMMPTE PointerPde;
  5027. PMMPTE LastPde;
  5028. KIRQL OldIrql;
  5029. #if defined(_WIN64)
  5030. ULONG StartPosition;
  5031. ULONG NumberOfBits;
  5032. #else
  5033. PMMPTE RestorePde;
  5034. PMMPTE TargetPde;
  5035. PMMPTE TargetPdeBase;
  5036. PMMPTE PointerPdeBase;
  5037. PFN_NUMBER PageDirectoryIndex;
  5038. PEPROCESS Process;
  5039. PEPROCESS CurrentProcess;
  5040. PLIST_ENTRY NextEntry;
  5041. #endif
  5042. #if defined (_X86PAE_)
  5043. PMMPTE PaeTop;
  5044. ULONG i;
  5045. #endif
  5046. ASSERT (NumberOfBytes != 0);
  5047. ASSERT (((ULONG_PTR)BaseAddress % MM_MINIMUM_VA_FOR_LARGE_PAGE) == 0);
  5048. ASSERT ((NumberOfBytes % MM_MINIMUM_VA_FOR_LARGE_PAGE) == 0);
  5049. #if defined(_WIN64)
  5050. NumberOfBits = (ULONG)(NumberOfBytes / MM_MINIMUM_VA_FOR_LARGE_PAGE);
  5051. StartPosition = (ULONG)(((ULONG_PTR)BaseAddress - (ULONG_PTR)MiLargeVaStart) / MM_MINIMUM_VA_FOR_LARGE_PAGE);
  5052. ASSERT (RtlAreBitsSet (&MiLargeVaBitMap, StartPosition, NumberOfBits) == TRUE);
  5053. #endif
  5054. PointerPde = MiGetPdeAddress (BaseAddress);
  5055. LastPde = PointerPde + (NumberOfBytes / MM_VA_MAPPED_BY_PDE);
  5056. #if defined(_WIN64)
  5057. while (PointerPde < LastPde) {
  5058. ASSERT (PointerPde->u.Hard.Valid != 0);
  5059. ASSERT (PointerPde->u.Long != 0);
  5060. ASSERT (MI_PDE_MAPS_LARGE_PAGE (PointerPde));
  5061. MI_WRITE_INVALID_PTE (PointerPde, ZeroKernelPte);
  5062. PointerPde += 1;
  5063. }
  5064. #else
  5065. PointerPdeBase = PointerPde;
  5066. CurrentProcess = PsGetCurrentProcess ();
  5067. LOCK_EXPANSION2 (OldIrql);
  5068. NextEntry = MmProcessList.Flink;
  5069. while (NextEntry != &MmProcessList) {
  5070. Process = CONTAINING_RECORD (NextEntry, EPROCESS, MmProcessLinks);
  5071. // Two process states must be carefully handled here -
  5072. //
  5073. // 1. Processes that are just being created where they are still
  5074. // initializing their page directory, etc.
  5075. //
  5076. // 2. Processes that are outswapped.
  5077. //
  5078. if (Process->Flags & PS_PROCESS_FLAGS_PDE_UPDATE_NEEDED) {
  5079. //
  5080. // The process is further along in process creation or is still
  5081. // outswapped. Either way, an update is already queued so our
  5082. // current changes will be processed later anyway before the process
  5083. // can run so no need to do anything here.
  5084. //
  5085. NOTHING;
  5086. }
  5087. else if (Process->Pcb.DirectoryTableBase[0] == 0) {
  5088. //
  5089. // This process is being created and there is no way to tell where
  5090. // during creation it is (ie: it may be filling PDEs right now!).
  5091. // So just mark the process as needing a PDE update at the
  5092. // beginning of MmInitializeProcessAddressSpace.
  5093. //
  5094. PS_SET_BITS (&Process->Flags, PS_PROCESS_FLAGS_PDE_UPDATE_NEEDED);
  5095. }
  5096. else if (Process->Flags & PS_PROCESS_FLAGS_OUTSWAPPED) {
  5097. //
  5098. // This process is outswapped. Even though the page directory
  5099. // may still be in transition, the process must be inswapped
  5100. // before it can run again, so just mark the process as needing
  5101. // a PDE update at that time.
  5102. //
  5103. PS_SET_BITS (&Process->Flags, PS_PROCESS_FLAGS_PDE_UPDATE_NEEDED);
  5104. }
  5105. else {
  5106. //
  5107. // This process is resident so update the relevant PDEs in its
  5108. // address space right now.
  5109. //
  5110. PointerPde = PointerPdeBase;
  5111. #if !defined (_X86PAE_)
  5112. PageDirectoryIndex = Process->Pcb.DirectoryTableBase[0] >> PAGE_SHIFT;
  5113. #else
  5114. //
  5115. // The range cannot cross PAE PDPTE entries, but we do need to
  5116. // locate which entry it does lie in.
  5117. //
  5118. PaeTop = Process->PaeTop;
  5119. i = (((ULONG_PTR) PointerPde - PDE_BASE) >> PAGE_SHIFT);
  5120. ASSERT ((PaeTop + i)->u.Hard.Valid == 1);
  5121. PageDirectoryIndex = (PFN_NUMBER)((PaeTop + i)->u.Hard.PageFrameNumber);
  5122. #endif
  5123. TargetPdeBase = (PMMPTE) MiMapPageInHyperSpaceAtDpc (
  5124. CurrentProcess,
  5125. PageDirectoryIndex);
  5126. TargetPde = (PMMPTE)((PCHAR) TargetPdeBase + BYTE_OFFSET (PointerPde));
  5127. RestorePde = MiInitialSystemPageDirectory + (PointerPde - (PMMPTE)PDE_BASE);
  5128. while (PointerPde < LastPde) {
  5129. ASSERT (TargetPde->u.Long != 0);
  5130. ASSERT (TargetPde->u.Hard.Valid != 0);
  5131. ASSERT (RestorePde->u.Long != 0);
  5132. ASSERT (RestorePde->u.Hard.Valid != 0);
  5133. *TargetPde = *RestorePde;
  5134. PointerPde += 1;
  5135. TargetPde += 1;
  5136. RestorePde += 1;
  5137. }
  5138. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, TargetPdeBase);
  5139. }
  5140. NextEntry = NextEntry->Flink;
  5141. }
  5142. UNLOCK_EXPANSION2 (OldIrql);
  5143. #endif
  5144. //
  5145. // Force all processors to use the latest mappings.
  5146. //
  5147. KeFlushEntireTb (TRUE, TRUE);
  5148. #if defined(_WIN64)
  5149. ExAcquireSpinLock (&MiLargePageLock, &OldIrql);
  5150. RtlClearBits (&MiLargeVaBitMap, StartPosition, NumberOfBits);
  5151. ExReleaseSpinLock (&MiLargePageLock, OldIrql);
  5152. #else
  5153. PointerPde = MiGetVirtualAddressMappedByPte (PointerPdeBase);
  5154. MiReleaseSystemPtes (PointerPde,
  5155. (ULONG)(NumberOfBytes >> PAGE_SHIFT),
  5156. SystemPteSpace);
  5157. #endif
  5158. return;
  5159. }
  5160. PVOID
  5161. MmMapIoSpace (
  5162. IN PHYSICAL_ADDRESS PhysicalAddress,
  5163. IN SIZE_T NumberOfBytes,
  5164. IN MEMORY_CACHING_TYPE CacheType
  5165. )
  5166. /*++
  5167. Routine Description:
  5168. This function maps the specified physical address into the non-pagable
  5169. portion of the system address space.
  5170. Arguments:
  5171. PhysicalAddress - Supplies the starting physical address to map.
  5172. NumberOfBytes - Supplies the number of bytes to map.
  5173. CacheType - Supplies MmNonCached if the physical address is to be mapped
  5174. as non-cached, MmCached if the address should be cached, and
  5175. MmWriteCombined if the address should be cached and
  5176. write-combined as a frame buffer which is to be used only by
  5177. the video port driver. All other callers should use
  5178. MmUSWCCached. MmUSWCCached is available only if the PAT
  5179. feature is present and available.
  5180. For I/O device registers, this is usually specified
  5181. as MmNonCached.
  5182. Return Value:
  5183. Returns the virtual address which maps the specified physical addresses.
  5184. The value NULL is returned if sufficient virtual address space for
  5185. the mapping could not be found.
  5186. Environment:
  5187. Kernel mode, Should be IRQL of APC_LEVEL or below, but unfortunately
  5188. callers are coming in at DISPATCH_LEVEL and it's too late to change the
  5189. rules now. This means you can never make this routine pagable.
  5190. --*/
  5191. {
  5192. KIRQL OldIrql;
  5193. CSHORT IoMapping;
  5194. PMMPFN Pfn1;
  5195. PFN_NUMBER NumberOfPages;
  5196. PFN_NUMBER PageFrameIndex;
  5197. PFN_NUMBER LastPageFrameIndex;
  5198. PMMPTE PointerPte;
  5199. PVOID BaseVa;
  5200. MMPTE TempPte;
  5201. PMDL TempMdl;
  5202. PFN_NUMBER MdlHack[(sizeof(MDL)/sizeof(PFN_NUMBER)) + 1];
  5203. PVOID CallingAddress;
  5204. PVOID CallersCaller;
  5205. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  5206. //
  5207. // For compatibility for when CacheType used to be passed as a BOOLEAN
  5208. // mask off the upper bits (TRUE == MmCached, FALSE == MmNonCached).
  5209. //
  5210. CacheType &= 0xFF;
  5211. if (CacheType >= MmMaximumCacheType) {
  5212. return NULL;
  5213. }
  5214. #if !defined (_MI_MORE_THAN_4GB_)
  5215. ASSERT (PhysicalAddress.HighPart == 0);
  5216. #endif
  5217. ASSERT (NumberOfBytes != 0);
  5218. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (PhysicalAddress.LowPart,
  5219. NumberOfBytes);
  5220. //
  5221. // See if the first frame is in the PFN database and if so, they all must
  5222. // be. Note since the caller is mapping the frames, they must already be
  5223. // locked so the PFN lock is not needed to protect against a hot-remove.
  5224. //
  5225. PageFrameIndex = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
  5226. IoMapping = (CSHORT) (!MI_IS_PFN (PageFrameIndex));
  5227. CacheAttribute = MI_TRANSLATE_CACHETYPE (CacheType, IoMapping);
  5228. if (IoMapping) {
  5229. //
  5230. // If the size and start address are an exact multiple of the
  5231. // minimum large page size, try to use large pages to map the request.
  5232. //
  5233. if (((PhysicalAddress.LowPart % MM_MINIMUM_VA_FOR_LARGE_PAGE) == 0) &&
  5234. ((NumberOfBytes % MM_MINIMUM_VA_FOR_LARGE_PAGE) == 0)) {
  5235. BaseVa = MiMapWithLargePages (PageFrameIndex,
  5236. NumberOfPages,
  5237. MM_EXECUTE_READWRITE,
  5238. CacheType);
  5239. if (BaseVa != NULL) {
  5240. goto Done;
  5241. }
  5242. }
  5243. Pfn1 = NULL;
  5244. }
  5245. else {
  5246. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  5247. }
  5248. PointerPte = MiReserveSystemPtes ((ULONG)NumberOfPages, SystemPteSpace);
  5249. if (PointerPte == NULL) {
  5250. return NULL;
  5251. }
  5252. BaseVa = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte);
  5253. PageFrameIndex = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
  5254. if (Pfn1 == NULL) {
  5255. CacheAttribute = MiInsertIoSpaceMap (BaseVa,
  5256. PageFrameIndex,
  5257. NumberOfPages,
  5258. CacheAttribute);
  5259. if (CacheAttribute == MiNotMapped) {
  5260. MiReleaseSystemPtes (PointerPte, (ULONG) NumberOfPages, SystemPteSpace);
  5261. return NULL;
  5262. }
  5263. }
  5264. if (CacheAttribute != MiCached) {
  5265. //
  5266. // If a noncachable mapping is requested, none of the pages in the
  5267. // requested MDL can reside in a large page. Otherwise we would be
  5268. // creating an incoherent overlapping TB entry as the same physical
  5269. // page would be mapped by 2 different TB entries with different
  5270. // cache attributes.
  5271. //
  5272. LastPageFrameIndex = PageFrameIndex + NumberOfPages;
  5273. LOCK_PFN2 (OldIrql);
  5274. do {
  5275. if (MI_PAGE_FRAME_INDEX_MUST_BE_CACHED (PageFrameIndex)) {
  5276. UNLOCK_PFN2 (OldIrql);
  5277. MiNonCachedCollisions += 1;
  5278. if (Pfn1 == NULL) {
  5279. MiRemoveIoSpaceMap (BaseVa, NumberOfPages);
  5280. }
  5281. MiReleaseSystemPtes (PointerPte,
  5282. (ULONG) NumberOfPages,
  5283. SystemPteSpace);
  5284. return NULL;
  5285. }
  5286. PageFrameIndex += 1;
  5287. } while (PageFrameIndex < LastPageFrameIndex);
  5288. UNLOCK_PFN2 (OldIrql);
  5289. }
  5290. BaseVa = (PVOID)((PCHAR)BaseVa + BYTE_OFFSET(PhysicalAddress.LowPart));
  5291. TempPte = ValidKernelPte;
  5292. switch (CacheAttribute) {
  5293. case MiNonCached:
  5294. MI_DISABLE_CACHING (TempPte);
  5295. break;
  5296. case MiCached:
  5297. break;
  5298. case MiWriteCombined:
  5299. MI_SET_PTE_WRITE_COMBINE (TempPte);
  5300. break;
  5301. default:
  5302. ASSERT (FALSE);
  5303. break;
  5304. }
  5305. #if defined(_X86_)
  5306. //
  5307. // Set the physical range to the proper caching type. If the PAT feature
  5308. // is supported, then we just use the caching type in the PTE. Otherwise
  5309. // modify the MTRRs if applicable.
  5310. //
  5311. // Note if the cache request is for cached or noncached, don't waste
  5312. // an MTRR on this range because the PTEs can be encoded to provide
  5313. // equivalent functionality.
  5314. //
  5315. if ((MiWriteCombiningPtes == FALSE) && (CacheAttribute == MiWriteCombined)) {
  5316. //
  5317. // If the address is an I/O space address, use MTRRs if possible.
  5318. //
  5319. NTSTATUS Status;
  5320. //
  5321. // If the address is a memory address, don't risk using MTRRs because
  5322. // other pages in the range are likely mapped with differing attributes
  5323. // in the TB and we must not add a conflicting range.
  5324. //
  5325. if (Pfn1 != NULL) {
  5326. if (Pfn1 == NULL) {
  5327. MiRemoveIoSpaceMap (BaseVa, NumberOfPages);
  5328. }
  5329. MiReleaseSystemPtes(PointerPte, NumberOfPages, SystemPteSpace);
  5330. return NULL;
  5331. }
  5332. //
  5333. // Since the attribute may have been overridden (due to a collision
  5334. // with a prior exiting mapping), make sure the CacheType is also
  5335. // consistent before editing the MTRRs.
  5336. //
  5337. CacheType = MmWriteCombined;
  5338. Status = KeSetPhysicalCacheTypeRange (PhysicalAddress,
  5339. NumberOfBytes,
  5340. CacheType);
  5341. if (!NT_SUCCESS(Status)) {
  5342. //
  5343. // There's still a problem, fail the request.
  5344. //
  5345. if (Pfn1 == NULL) {
  5346. MiRemoveIoSpaceMap (BaseVa, NumberOfPages);
  5347. }
  5348. MiReleaseSystemPtes(PointerPte, NumberOfPages, SystemPteSpace);
  5349. return NULL;
  5350. }
  5351. //
  5352. // Override the write combine (weak UC) bits in the PTE and
  5353. // instead use a cached attribute. This is because the processor
  5354. // will use the least cachable (ie: functionally safer) attribute
  5355. // of the PTE & MTRR to use - so specifying fully cached for the PTE
  5356. // ensures that the MTRR value will win out.
  5357. //
  5358. TempPte = ValidKernelPte;
  5359. }
  5360. #endif
  5361. MI_ADD_EXECUTE_TO_VALID_PTE_IF_PAE (TempPte);
  5362. PageFrameIndex = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
  5363. ASSERT ((Pfn1 == MI_PFN_ELEMENT (PageFrameIndex)) || (Pfn1 == NULL));
  5364. OldIrql = HIGH_LEVEL;
  5365. MI_PREPARE_FOR_NONCACHED (CacheAttribute);
  5366. do {
  5367. ASSERT (PointerPte->u.Hard.Valid == 0);
  5368. if (Pfn1 != NULL) {
  5369. ASSERT ((Pfn1->u3.e2.ReferenceCount != 0) ||
  5370. ((Pfn1->u3.e1.Rom == 1) && (CacheType == MmCached)));
  5371. TempPte = ValidKernelPte;
  5372. MI_ADD_EXECUTE_TO_VALID_PTE_IF_PAE (TempPte);
  5373. switch (Pfn1->u3.e1.CacheAttribute) {
  5374. case MiCached:
  5375. if (CacheAttribute != MiCached) {
  5376. //
  5377. // The caller asked for a noncached or writecombined
  5378. // mapping, but the page is already mapped cached by
  5379. // someone else. Override the caller's request in
  5380. // order to keep the TB page attribute coherent.
  5381. //
  5382. MiCacheOverride[0] += 1;
  5383. }
  5384. break;
  5385. case MiNonCached:
  5386. if (CacheAttribute != MiNonCached) {
  5387. //
  5388. // The caller asked for a cached or writecombined
  5389. // mapping, but the page is already mapped noncached
  5390. // by someone else. Override the caller's request
  5391. // in order to keep the TB page attribute coherent.
  5392. //
  5393. MiCacheOverride[1] += 1;
  5394. }
  5395. MI_DISABLE_CACHING (TempPte);
  5396. break;
  5397. case MiWriteCombined:
  5398. if (CacheAttribute != MiWriteCombined) {
  5399. //
  5400. // The caller asked for a cached or noncached
  5401. // mapping, but the page is already mapped
  5402. // writecombined by someone else. Override the
  5403. // caller's request in order to keep the TB page
  5404. // attribute coherent.
  5405. //
  5406. MiCacheOverride[2] += 1;
  5407. }
  5408. MI_SET_PTE_WRITE_COMBINE (TempPte);
  5409. break;
  5410. case MiNotMapped:
  5411. //
  5412. // This better be for a page allocated with
  5413. // MmAllocatePagesForMdl. Otherwise it might be a
  5414. // page on the freelist which could subsequently be
  5415. // given out with a different attribute !
  5416. //
  5417. #if defined (_MI_MORE_THAN_4GB_)
  5418. ASSERT ((Pfn1->u4.PteFrame == MI_MAGIC_AWE_PTEFRAME) ||
  5419. (Pfn1->PteAddress == (PVOID) (ULONG_PTR)(X64K | 0x1)) ||
  5420. (Pfn1->u4.PteFrame == MI_MAGIC_4GB_RECLAIM));
  5421. #else
  5422. ASSERT ((Pfn1->u4.PteFrame == MI_MAGIC_AWE_PTEFRAME) ||
  5423. (Pfn1->PteAddress == (PVOID) (ULONG_PTR)(X64K | 0x1)));
  5424. #endif
  5425. if (OldIrql == HIGH_LEVEL) {
  5426. LOCK_PFN2 (OldIrql);
  5427. }
  5428. switch (CacheAttribute) {
  5429. case MiCached:
  5430. Pfn1->u3.e1.CacheAttribute = MiCached;
  5431. break;
  5432. case MiNonCached:
  5433. Pfn1->u3.e1.CacheAttribute = MiNonCached;
  5434. MI_DISABLE_CACHING (TempPte);
  5435. break;
  5436. case MiWriteCombined:
  5437. Pfn1->u3.e1.CacheAttribute = MiWriteCombined;
  5438. MI_SET_PTE_WRITE_COMBINE (TempPte);
  5439. break;
  5440. default:
  5441. ASSERT (FALSE);
  5442. break;
  5443. }
  5444. break;
  5445. default:
  5446. ASSERT (FALSE);
  5447. break;
  5448. }
  5449. Pfn1 += 1;
  5450. }
  5451. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  5452. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  5453. PointerPte += 1;
  5454. PageFrameIndex += 1;
  5455. NumberOfPages -= 1;
  5456. } while (NumberOfPages != 0);
  5457. if (OldIrql != HIGH_LEVEL) {
  5458. UNLOCK_PFN2 (OldIrql);
  5459. }
  5460. MI_SWEEP_CACHE (CacheAttribute, BaseVa, NumberOfBytes);
  5461. Done:
  5462. if (MmTrackPtes & 0x1) {
  5463. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  5464. TempMdl = (PMDL) MdlHack;
  5465. TempMdl->MappedSystemVa = BaseVa;
  5466. PageFrameIndex = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
  5467. *(PPFN_NUMBER)(TempMdl + 1) = PageFrameIndex;
  5468. TempMdl->StartVa = (PVOID)(PAGE_ALIGN((ULONG_PTR)PhysicalAddress.QuadPart));
  5469. TempMdl->ByteOffset = BYTE_OFFSET(PhysicalAddress.LowPart);
  5470. TempMdl->ByteCount = (ULONG)NumberOfBytes;
  5471. CacheAttribute = MI_TRANSLATE_CACHETYPE (CacheType, IoMapping);
  5472. MiInsertPteTracker (TempMdl,
  5473. 1,
  5474. IoMapping,
  5475. CacheAttribute,
  5476. CallingAddress,
  5477. CallersCaller);
  5478. }
  5479. return BaseVa;
  5480. }
  5481. VOID
  5482. MmUnmapIoSpace (
  5483. IN PVOID BaseAddress,
  5484. IN SIZE_T NumberOfBytes
  5485. )
  5486. /*++
  5487. Routine Description:
  5488. This function unmaps a range of physical addresses which were previously
  5489. mapped via an MmMapIoSpace function call.
  5490. Arguments:
  5491. BaseAddress - Supplies the base virtual address where the physical
  5492. address was previously mapped.
  5493. NumberOfBytes - Supplies the number of bytes which were mapped.
  5494. Return Value:
  5495. None.
  5496. Environment:
  5497. Kernel mode, Should be IRQL of APC_LEVEL or below, but unfortunately
  5498. callers are coming in at DISPATCH_LEVEL and it's too late to change the
  5499. rules now. This means you can never make this routine pagable.
  5500. --*/
  5501. {
  5502. PMMPTE PointerPte;
  5503. PMMPTE PointerPde;
  5504. PFN_NUMBER NumberOfPages;
  5505. PFN_NUMBER PageFrameIndex;
  5506. ASSERT (NumberOfBytes != 0);
  5507. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (BaseAddress, NumberOfBytes);
  5508. if (MmTrackPtes & 0x1) {
  5509. MiRemovePteTracker (NULL, BaseAddress, NumberOfPages);
  5510. }
  5511. PointerPde = MiGetPdeAddress (BaseAddress);
  5512. if (MI_PDE_MAPS_LARGE_PAGE (PointerPde) == 0) {
  5513. PointerPte = MiGetPteAddress (BaseAddress);
  5514. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  5515. if (!MI_IS_PFN (PageFrameIndex)) {
  5516. //
  5517. // The PTEs must be invalidated and the TB flushed *BEFORE*
  5518. // removing the I/O space map. This is because another
  5519. // thread can immediately map the same I/O space with another
  5520. // set of PTEs (and conflicting TB attributes) before we
  5521. // call MiReleaseSystemPtes.
  5522. //
  5523. MiZeroMemoryPte (PointerPte, NumberOfPages);
  5524. if (NumberOfPages == 1) {
  5525. KeFlushSingleTb (BaseAddress, TRUE);
  5526. }
  5527. else {
  5528. KeFlushEntireTb (TRUE, TRUE);
  5529. }
  5530. MiRemoveIoSpaceMap (BaseAddress, NumberOfPages);
  5531. }
  5532. MiReleaseSystemPtes (PointerPte, (ULONG)NumberOfPages, SystemPteSpace);
  5533. }
  5534. else {
  5535. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPde) +
  5536. MiGetPteOffset (BaseAddress);
  5537. if (!MI_IS_PFN (PageFrameIndex)) {
  5538. MiRemoveIoSpaceMap (BaseAddress, NumberOfPages);
  5539. }
  5540. //
  5541. // There is a race here because the I/O space mapping entry has been
  5542. // removed but the TB has not yet been flushed (nor have the PDEs
  5543. // been invalidated). Another thread could request a map in between
  5544. // the above removal and the below invalidation. If this map occurs
  5545. // where a driver provides the wrong page attribute it will not be
  5546. // detected. This is not worth closing as it is a driver bug anyway
  5547. // and you really can't always stop them from hurting themselves if
  5548. // they are determined to do so. Note the alternative of invalidating
  5549. // first isn't attractive because then the same PTEs could be
  5550. // immediately reallocated and the new owner might want to add an
  5551. // I/O space entry before this thread got to remove his. So additional
  5552. // lock serialization would need to be added. Not worth it.
  5553. //
  5554. MiUnmapLargePages (BaseAddress, NumberOfBytes);
  5555. }
  5556. return;
  5557. }
  5558. PVOID
  5559. MiAllocateContiguousMemory (
  5560. IN SIZE_T NumberOfBytes,
  5561. IN PFN_NUMBER LowestAcceptablePfn,
  5562. IN PFN_NUMBER HighestAcceptablePfn,
  5563. IN PFN_NUMBER BoundaryPfn,
  5564. IN MEMORY_CACHING_TYPE CacheType,
  5565. PVOID CallingAddress
  5566. )
  5567. /*++
  5568. Routine Description:
  5569. This function allocates a range of physically contiguous non-paged
  5570. pool. It relies on the fact that non-paged pool is built at
  5571. system initialization time from a contiguous range of physical
  5572. memory. It allocates the specified size of non-paged pool and
  5573. then checks to ensure it is contiguous as pool expansion does
  5574. not maintain the contiguous nature of non-paged pool.
  5575. This routine is designed to be used by a driver's initialization
  5576. routine to allocate a contiguous block of physical memory for
  5577. issuing DMA requests from.
  5578. Arguments:
  5579. NumberOfBytes - Supplies the number of bytes to allocate.
  5580. LowestAcceptablePfn - Supplies the lowest page frame number
  5581. which is valid for the allocation.
  5582. HighestAcceptablePfn - Supplies the highest page frame number
  5583. which is valid for the allocation.
  5584. BoundaryPfn - Supplies the page frame number multiple the allocation must
  5585. not cross. 0 indicates it can cross any boundary.
  5586. CacheType - Supplies the type of cache mapping that will be used for the
  5587. memory.
  5588. CallingAddress - Supplies the calling address of the allocator.
  5589. Return Value:
  5590. NULL - a contiguous range could not be found to satisfy the request.
  5591. NON-NULL - Returns a pointer (virtual address in the nonpaged portion
  5592. of the system) to the allocated physically contiguous
  5593. memory.
  5594. Environment:
  5595. Kernel mode, IRQL of DISPATCH_LEVEL or below.
  5596. --*/
  5597. {
  5598. PVOID BaseAddress;
  5599. PFN_NUMBER SizeInPages;
  5600. PFN_NUMBER LowestPfn;
  5601. PFN_NUMBER HighestPfn;
  5602. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  5603. ASSERT (NumberOfBytes != 0);
  5604. LowestPfn = LowestAcceptablePfn;
  5605. #if defined (_MI_MORE_THAN_4GB_)
  5606. if (MiNoLowMemory != 0) {
  5607. if (HighestAcceptablePfn < MiNoLowMemory) {
  5608. return MiAllocateLowMemory (NumberOfBytes,
  5609. LowestAcceptablePfn,
  5610. HighestAcceptablePfn,
  5611. BoundaryPfn,
  5612. CallingAddress,
  5613. CacheType,
  5614. 'tnoC');
  5615. }
  5616. LowestPfn = MiNoLowMemory;
  5617. }
  5618. #endif
  5619. CacheAttribute = MI_TRANSLATE_CACHETYPE (CacheType, 0);
  5620. //
  5621. // N.B. This setting of SizeInPages to exactly the request size
  5622. // means the non-NULL return value from MiCheckForContiguousMemory
  5623. // is guaranteed to be the BaseAddress. If this size is ever
  5624. // changed, then the non-NULL return value must be checked and
  5625. // split/returned accordingly.
  5626. //
  5627. SizeInPages = BYTES_TO_PAGES (NumberOfBytes);
  5628. HighestPfn = HighestAcceptablePfn;
  5629. if (CacheAttribute == MiCached) {
  5630. BaseAddress = ExAllocatePoolWithTag (NonPagedPoolCacheAligned,
  5631. NumberOfBytes,
  5632. 'mCmM');
  5633. if (BaseAddress != NULL) {
  5634. if (MiCheckForContiguousMemory (BaseAddress,
  5635. SizeInPages,
  5636. SizeInPages,
  5637. LowestPfn,
  5638. HighestPfn,
  5639. BoundaryPfn,
  5640. CacheAttribute)) {
  5641. return BaseAddress;
  5642. }
  5643. //
  5644. // The allocation from pool does not meet the contiguous
  5645. // requirements. Free the allocation and see if any of
  5646. // the free pool pages do.
  5647. //
  5648. ExFreePool (BaseAddress);
  5649. }
  5650. }
  5651. if (KeGetCurrentIrql() > APC_LEVEL) {
  5652. return NULL;
  5653. }
  5654. BaseAddress = MiFindContiguousMemory (LowestPfn,
  5655. HighestPfn,
  5656. BoundaryPfn,
  5657. SizeInPages,
  5658. CacheType,
  5659. CallingAddress);
  5660. return BaseAddress;
  5661. }
  5662. PVOID
  5663. MmAllocateContiguousMemorySpecifyCache (
  5664. IN SIZE_T NumberOfBytes,
  5665. IN PHYSICAL_ADDRESS LowestAcceptableAddress,
  5666. IN PHYSICAL_ADDRESS HighestAcceptableAddress,
  5667. IN PHYSICAL_ADDRESS BoundaryAddressMultiple OPTIONAL,
  5668. IN MEMORY_CACHING_TYPE CacheType
  5669. )
  5670. /*++
  5671. Routine Description:
  5672. This function allocates a range of physically contiguous non-cached,
  5673. non-paged memory. This is accomplished by using MmAllocateContiguousMemory
  5674. which uses nonpaged pool virtual addresses to map the found memory chunk.
  5675. Then this function establishes another map to the same physical addresses,
  5676. but this alternate map is initialized as non-cached. All references by
  5677. our caller will be done through this alternate map.
  5678. This routine is designed to be used by a driver's initialization
  5679. routine to allocate a contiguous block of noncached physical memory for
  5680. things like the AGP GART.
  5681. Arguments:
  5682. NumberOfBytes - Supplies the number of bytes to allocate.
  5683. LowestAcceptableAddress - Supplies the lowest physical address
  5684. which is valid for the allocation. For
  5685. example, if the device can only reference
  5686. physical memory in the 8M to 16MB range, this
  5687. value would be set to 0x800000 (8Mb).
  5688. HighestAcceptableAddress - Supplies the highest physical address
  5689. which is valid for the allocation. For
  5690. example, if the device can only reference
  5691. physical memory below 16MB, this
  5692. value would be set to 0xFFFFFF (16Mb - 1).
  5693. BoundaryAddressMultiple - Supplies the physical address multiple this
  5694. allocation must not cross.
  5695. Return Value:
  5696. NULL - a contiguous range could not be found to satisfy the request.
  5697. NON-NULL - Returns a pointer (virtual address in the nonpaged portion
  5698. of the system) to the allocated physically contiguous
  5699. memory.
  5700. Environment:
  5701. Kernel mode, IRQL of DISPATCH_LEVEL or below.
  5702. --*/
  5703. {
  5704. PVOID BaseAddress;
  5705. PFN_NUMBER LowestPfn;
  5706. PFN_NUMBER HighestPfn;
  5707. PFN_NUMBER BoundaryPfn;
  5708. PVOID CallingAddress;
  5709. PVOID CallersCaller;
  5710. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  5711. ASSERT (NumberOfBytes != 0);
  5712. LowestPfn = (PFN_NUMBER)(LowestAcceptableAddress.QuadPart >> PAGE_SHIFT);
  5713. if (BYTE_OFFSET(LowestAcceptableAddress.LowPart)) {
  5714. LowestPfn += 1;
  5715. }
  5716. if (BYTE_OFFSET(BoundaryAddressMultiple.LowPart)) {
  5717. return NULL;
  5718. }
  5719. BoundaryPfn = (PFN_NUMBER)(BoundaryAddressMultiple.QuadPart >> PAGE_SHIFT);
  5720. HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
  5721. if (HighestPfn > MmHighestPossiblePhysicalPage) {
  5722. HighestPfn = MmHighestPossiblePhysicalPage;
  5723. }
  5724. if (LowestPfn > HighestPfn) {
  5725. //
  5726. // The caller's range is beyond what physically exists, it cannot
  5727. // succeed. Bail now to avoid an expensive fruitless search.
  5728. //
  5729. return NULL;
  5730. }
  5731. BaseAddress = MiAllocateContiguousMemory (NumberOfBytes,
  5732. LowestPfn,
  5733. HighestPfn,
  5734. BoundaryPfn,
  5735. CacheType,
  5736. CallingAddress);
  5737. return BaseAddress;
  5738. }
  5739. PVOID
  5740. MmAllocateContiguousMemory (
  5741. IN SIZE_T NumberOfBytes,
  5742. IN PHYSICAL_ADDRESS HighestAcceptableAddress
  5743. )
  5744. /*++
  5745. Routine Description:
  5746. This function allocates a range of physically contiguous non-paged pool.
  5747. This routine is designed to be used by a driver's initialization
  5748. routine to allocate a contiguous block of physical memory for
  5749. issuing DMA requests from.
  5750. Arguments:
  5751. NumberOfBytes - Supplies the number of bytes to allocate.
  5752. HighestAcceptableAddress - Supplies the highest physical address
  5753. which is valid for the allocation. For
  5754. example, if the device can only reference
  5755. physical memory in the lower 16MB this
  5756. value would be set to 0xFFFFFF (16Mb - 1).
  5757. Return Value:
  5758. NULL - a contiguous range could not be found to satisfy the request.
  5759. NON-NULL - Returns a pointer (virtual address in the nonpaged portion
  5760. of the system) to the allocated physically contiguous
  5761. memory.
  5762. Environment:
  5763. Kernel mode, IRQL of DISPATCH_LEVEL or below.
  5764. --*/
  5765. {
  5766. PFN_NUMBER HighestPfn;
  5767. PVOID CallingAddress;
  5768. PVOID VirtualAddress;
  5769. PVOID CallersCaller;
  5770. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  5771. HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
  5772. if (HighestPfn > MmHighestPossiblePhysicalPage) {
  5773. HighestPfn = MmHighestPossiblePhysicalPage;
  5774. }
  5775. VirtualAddress = MiAllocateContiguousMemory (NumberOfBytes,
  5776. 0,
  5777. HighestPfn,
  5778. 0,
  5779. MmCached,
  5780. CallingAddress);
  5781. return VirtualAddress;
  5782. }
  5783. #if defined (_WIN64)
  5784. #define SPECIAL_POOL_ADDRESS(p) \
  5785. ((((p) >= MmSpecialPoolStart) && ((p) < MmSpecialPoolEnd)) || \
  5786. (((p) >= MmSessionSpecialPoolStart) && ((p) < MmSessionSpecialPoolEnd)))
  5787. #else
  5788. #define SPECIAL_POOL_ADDRESS(p) \
  5789. (((p) >= MmSpecialPoolStart) && ((p) < MmSpecialPoolEnd))
  5790. #endif
  5791. VOID
  5792. MmFreeContiguousMemory (
  5793. IN PVOID BaseAddress
  5794. )
  5795. /*++
  5796. Routine Description:
  5797. This function deallocates a range of physically contiguous non-paged
  5798. pool which was allocated with the MmAllocateContiguousMemory function.
  5799. Arguments:
  5800. BaseAddress - Supplies the base virtual address where the physical
  5801. address was previously mapped.
  5802. Return Value:
  5803. None.
  5804. Environment:
  5805. Kernel mode, IRQL of APC_LEVEL or below.
  5806. --*/
  5807. {
  5808. KIRQL OldIrql;
  5809. ULONG SizeInPages;
  5810. PMMPTE PointerPte;
  5811. PFN_NUMBER PageFrameIndex;
  5812. PFN_NUMBER LastPage;
  5813. PMMPFN Pfn1;
  5814. PMMPFN StartPfn;
  5815. PAGED_CODE();
  5816. #if defined (_MI_MORE_THAN_4GB_)
  5817. if (MiNoLowMemory != 0) {
  5818. if (MiFreeLowMemory (BaseAddress, 'tnoC') == TRUE) {
  5819. return;
  5820. }
  5821. }
  5822. #endif
  5823. if (((BaseAddress >= MmNonPagedPoolStart) &&
  5824. (BaseAddress < (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes))) ||
  5825. ((BaseAddress >= MmNonPagedPoolExpansionStart) &&
  5826. (BaseAddress < MmNonPagedPoolEnd)) ||
  5827. (SPECIAL_POOL_ADDRESS(BaseAddress))) {
  5828. ExFreePool (BaseAddress);
  5829. }
  5830. else {
  5831. //
  5832. // The contiguous memory being freed may be the target of a delayed
  5833. // unlock. Since these pages may be immediately released, force
  5834. // any pending delayed actions to occur now.
  5835. //
  5836. MiDeferredUnlockPages (0);
  5837. PointerPte = MiGetPteAddress (BaseAddress);
  5838. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  5839. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  5840. if (Pfn1->u3.e1.StartOfAllocation == 0) {
  5841. KeBugCheckEx (BAD_POOL_CALLER,
  5842. 0x60,
  5843. (ULONG_PTR)BaseAddress,
  5844. 0,
  5845. 0);
  5846. }
  5847. StartPfn = Pfn1;
  5848. Pfn1->u3.e1.StartOfAllocation = 0;
  5849. Pfn1 -= 1;
  5850. do {
  5851. Pfn1 += 1;
  5852. ASSERT (Pfn1->u3.e2.ReferenceCount == 1);
  5853. ASSERT (Pfn1->u2.ShareCount == 1);
  5854. ASSERT (Pfn1->PteAddress == PointerPte);
  5855. ASSERT (Pfn1->OriginalPte.u.Long == MM_DEMAND_ZERO_WRITE_PTE);
  5856. ASSERT (Pfn1->u4.PteFrame == MI_GET_PAGE_FRAME_FROM_PTE (MiGetPteAddress(PointerPte)));
  5857. ASSERT (Pfn1->u3.e1.PageLocation == ActiveAndValid);
  5858. ASSERT (Pfn1->u4.VerifierAllocation == 0);
  5859. ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0);
  5860. ASSERT (Pfn1->u3.e1.PrototypePte == 0);
  5861. MI_SET_PFN_DELETED(Pfn1);
  5862. PointerPte += 1;
  5863. } while (Pfn1->u3.e1.EndOfAllocation == 0);
  5864. Pfn1->u3.e1.EndOfAllocation = 0;
  5865. SizeInPages = (ULONG)(Pfn1 - StartPfn + 1);
  5866. //
  5867. // Notify deadlock verifier that a region that can contain locks
  5868. // will become invalid.
  5869. //
  5870. if (MmVerifierData.Level & DRIVER_VERIFIER_DEADLOCK_DETECTION) {
  5871. VerifierDeadlockFreePool (BaseAddress, SizeInPages << PAGE_SHIFT);
  5872. }
  5873. //
  5874. // Release the mapping.
  5875. //
  5876. MmUnmapIoSpace (BaseAddress, SizeInPages << PAGE_SHIFT);
  5877. //
  5878. // Release the actual pages.
  5879. //
  5880. LastPage = PageFrameIndex + SizeInPages;
  5881. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  5882. LOCK_PFN (OldIrql);
  5883. do {
  5884. MiDecrementShareCount (Pfn1, PageFrameIndex);
  5885. PageFrameIndex += 1;
  5886. Pfn1 += 1;
  5887. } while (PageFrameIndex < LastPage);
  5888. UNLOCK_PFN (OldIrql);
  5889. MI_INCREMENT_RESIDENT_AVAILABLE (SizeInPages, MM_RESAVAIL_FREE_CONTIGUOUS2);
  5890. MiReturnCommitment (SizeInPages);
  5891. }
  5892. }
  5893. VOID
  5894. MmFreeContiguousMemorySpecifyCache (
  5895. IN PVOID BaseAddress,
  5896. IN SIZE_T NumberOfBytes,
  5897. IN MEMORY_CACHING_TYPE CacheType
  5898. )
  5899. /*++
  5900. Routine Description:
  5901. This function deallocates a range of noncached memory in
  5902. the non-paged portion of the system address space.
  5903. Arguments:
  5904. BaseAddress - Supplies the base virtual address where the noncached
  5905. NumberOfBytes - Supplies the number of bytes allocated to the request.
  5906. This must be the same number that was obtained with
  5907. the MmAllocateContiguousMemorySpecifyCache call.
  5908. CacheType - Supplies the cachetype used when the caller made the
  5909. MmAllocateContiguousMemorySpecifyCache call.
  5910. Return Value:
  5911. None.
  5912. Environment:
  5913. Kernel mode, IRQL of APC_LEVEL or below.
  5914. --*/
  5915. {
  5916. UNREFERENCED_PARAMETER (NumberOfBytes);
  5917. UNREFERENCED_PARAMETER (CacheType);
  5918. MmFreeContiguousMemory (BaseAddress);
  5919. }
  5920. PVOID
  5921. MmAllocateIndependentPages (
  5922. IN SIZE_T NumberOfBytes,
  5923. IN ULONG Node
  5924. )
  5925. /*++
  5926. Routine Description:
  5927. This function allocates a range of virtually contiguous nonpaged pages
  5928. without using superpages. This allows the caller to apply independent
  5929. page protections to each page.
  5930. Arguments:
  5931. NumberOfBytes - Supplies the number of bytes to allocate.
  5932. Node - Supplies the preferred node number for the backing physical pages.
  5933. If pages on the preferred node are not available, any page will
  5934. be used. -1 indicates no preferred node.
  5935. Return Value:
  5936. The virtual address of the memory or NULL if none could be allocated.
  5937. Environment:
  5938. Kernel mode, IRQL of APC_LEVEL or below.
  5939. --*/
  5940. {
  5941. ULONG PageColor;
  5942. PFN_NUMBER NumberOfPages;
  5943. PMMPTE PointerPte;
  5944. MMPTE TempPte;
  5945. PFN_NUMBER PageFrameIndex;
  5946. PVOID BaseAddress;
  5947. KIRQL OldIrql;
  5948. ASSERT ((Node == (ULONG)-1) || (Node < KeNumberNodes));
  5949. NumberOfPages = BYTES_TO_PAGES (NumberOfBytes);
  5950. PointerPte = MiReserveSystemPtes ((ULONG)NumberOfPages, SystemPteSpace);
  5951. if (PointerPte == NULL) {
  5952. return NULL;
  5953. }
  5954. if (MiChargeCommitment (NumberOfPages, NULL) == FALSE) {
  5955. MiReleaseSystemPtes (PointerPte, (ULONG)NumberOfPages, SystemPteSpace);
  5956. return NULL;
  5957. }
  5958. BaseAddress = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte);
  5959. LOCK_PFN (OldIrql);
  5960. if ((SPFN_NUMBER)NumberOfPages > MI_NONPAGABLE_MEMORY_AVAILABLE()) {
  5961. UNLOCK_PFN (OldIrql);
  5962. MiReturnCommitment (NumberOfPages);
  5963. MiReleaseSystemPtes (PointerPte, (ULONG)NumberOfPages, SystemPteSpace);
  5964. return NULL;
  5965. }
  5966. MM_TRACK_COMMIT (MM_DBG_COMMIT_INDEPENDENT_PAGES, NumberOfPages);
  5967. MI_DECREMENT_RESIDENT_AVAILABLE (NumberOfPages, MM_RESAVAIL_ALLOCATE_INDEPENDENT);
  5968. do {
  5969. ASSERT (PointerPte->u.Hard.Valid == 0);
  5970. if (MmAvailablePages < MM_HIGH_LIMIT) {
  5971. MiEnsureAvailablePageOrWait (NULL, NULL, OldIrql);
  5972. }
  5973. if (Node == (ULONG)-1) {
  5974. PageColor = MI_GET_PAGE_COLOR_FROM_PTE (PointerPte);
  5975. }
  5976. else {
  5977. PageColor = (((MI_SYSTEM_PAGE_COLOR++) & MmSecondaryColorMask) |
  5978. (Node << MmSecondaryColorNodeShift));
  5979. }
  5980. PageFrameIndex = MiRemoveAnyPage (PageColor);
  5981. MI_MAKE_VALID_PTE (TempPte,
  5982. PageFrameIndex,
  5983. MM_READWRITE,
  5984. PointerPte);
  5985. MI_SET_PTE_DIRTY (TempPte);
  5986. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  5987. MiInitializePfn (PageFrameIndex, PointerPte, 1);
  5988. PointerPte += 1;
  5989. NumberOfPages -= 1;
  5990. } while (NumberOfPages != 0);
  5991. UNLOCK_PFN (OldIrql);
  5992. NumberOfPages = BYTES_TO_PAGES (NumberOfBytes);
  5993. return BaseAddress;
  5994. }
  5995. BOOLEAN
  5996. MmSetPageProtection (
  5997. IN PVOID VirtualAddress,
  5998. IN SIZE_T NumberOfBytes,
  5999. IN ULONG NewProtect
  6000. )
  6001. /*++
  6002. Routine Description:
  6003. This function sets the specified virtual address range to the desired
  6004. protection. This assumes that the virtual addresses are backed by PTEs
  6005. which can be set (ie: not in kseg0 or large pages).
  6006. Arguments:
  6007. VirtualAddress - Supplies the start address to protect.
  6008. NumberOfBytes - Supplies the number of bytes to set.
  6009. NewProtect - Supplies the protection to set the pages to (PAGE_XX).
  6010. Return Value:
  6011. TRUE if the protection was applied, FALSE if not.
  6012. Environment:
  6013. Kernel mode, IRQL of APC_LEVEL or below.
  6014. --*/
  6015. {
  6016. PFN_NUMBER i;
  6017. PFN_NUMBER NumberOfPages;
  6018. PMMPTE PointerPte;
  6019. MMPTE TempPte;
  6020. MMPTE NewPteContents;
  6021. KIRQL OldIrql;
  6022. ULONG ProtectionMask;
  6023. MMPTE_FLUSH_LIST PteFlushList;
  6024. ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
  6025. if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) {
  6026. return FALSE;
  6027. }
  6028. ProtectionMask = MiMakeProtectionMask (NewProtect);
  6029. if (ProtectionMask == MM_INVALID_PROTECTION) {
  6030. return FALSE;
  6031. }
  6032. PointerPte = MiGetPteAddress (VirtualAddress);
  6033. NumberOfPages = BYTES_TO_PAGES (NumberOfBytes);
  6034. ASSERT (NumberOfPages != 0);
  6035. PteFlushList.Count = 0;
  6036. LOCK_PFN (OldIrql);
  6037. for (i = 0; i < NumberOfPages; i += 1) {
  6038. TempPte = *PointerPte;
  6039. MI_MAKE_VALID_PTE (NewPteContents,
  6040. TempPte.u.Hard.PageFrameNumber,
  6041. ProtectionMask,
  6042. PointerPte);
  6043. NewPteContents.u.Hard.Dirty = TempPte.u.Hard.Dirty;
  6044. MI_WRITE_VALID_PTE_NEW_PROTECTION (PointerPte, NewPteContents);
  6045. if (PteFlushList.Count < MM_MAXIMUM_FLUSH_COUNT) {
  6046. PteFlushList.FlushVa[PteFlushList.Count] =
  6047. (PVOID)((PUCHAR)VirtualAddress + (i << PAGE_SHIFT));
  6048. PteFlushList.Count += 1;
  6049. }
  6050. PointerPte += 1;
  6051. }
  6052. ASSERT (PteFlushList.Count != 0);
  6053. MiFlushPteList (&PteFlushList, TRUE);
  6054. UNLOCK_PFN (OldIrql);
  6055. return TRUE;
  6056. }
  6057. VOID
  6058. MmFreeIndependentPages (
  6059. IN PVOID VirtualAddress,
  6060. IN SIZE_T NumberOfBytes
  6061. )
  6062. /*++
  6063. Routine Description:
  6064. Returns pages previously allocated with MmAllocateIndependentPages.
  6065. Arguments:
  6066. VirtualAddress - Supplies the virtual address to free.
  6067. NumberOfBytes - Supplies the number of bytes to free.
  6068. Return Value:
  6069. None.
  6070. Environment:
  6071. Kernel mode, IRQL of APC_LEVEL or below.
  6072. --*/
  6073. {
  6074. KIRQL OldIrql;
  6075. MMPTE PteContents;
  6076. PMMPTE PointerPte;
  6077. PMMPTE BasePte;
  6078. PMMPTE EndPte;
  6079. PMMPFN Pfn1;
  6080. PMMPFN Pfn2;
  6081. PFN_NUMBER NumberOfPages;
  6082. PFN_NUMBER PageFrameIndex;
  6083. ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
  6084. NumberOfPages = BYTES_TO_PAGES (NumberOfBytes);
  6085. PointerPte = MiGetPteAddress (VirtualAddress);
  6086. BasePte = PointerPte;
  6087. EndPte = PointerPte + NumberOfPages;
  6088. LOCK_PFN (OldIrql);
  6089. do {
  6090. PteContents = *PointerPte;
  6091. ASSERT (PteContents.u.Hard.Valid == 1);
  6092. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents);
  6093. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  6094. Pfn2 = MI_PFN_ELEMENT (Pfn1->u4.PteFrame);
  6095. MiDecrementShareCount (Pfn2, Pfn1->u4.PteFrame);
  6096. MI_SET_PFN_DELETED (Pfn1);
  6097. MiDecrementShareCount (Pfn1, PageFrameIndex);
  6098. PointerPte += 1;
  6099. } while (PointerPte < EndPte);
  6100. //
  6101. // Update the count of resident available pages.
  6102. //
  6103. UNLOCK_PFN (OldIrql);
  6104. MI_INCREMENT_RESIDENT_AVAILABLE (NumberOfPages, MM_RESAVAIL_FREE_INDEPENDENT);
  6105. //
  6106. // Return PTEs and commitment.
  6107. //
  6108. MiReleaseSystemPtes (BasePte, (ULONG)NumberOfPages, SystemPteSpace);
  6109. MiReturnCommitment (NumberOfPages);
  6110. MM_TRACK_COMMIT (MM_DBG_COMMIT_INDEPENDENT_PAGES, NumberOfPages);
  6111. }
  6112. VOID
  6113. MiZeroAwePageWorker (
  6114. IN PVOID Context
  6115. )
  6116. /*++
  6117. Routine Description:
  6118. This routine is the worker routine executed by all processors to
  6119. fan out page zeroing for AWE allocations.
  6120. Arguments:
  6121. Context - Supplies a pointer to the workitem.
  6122. Return Value:
  6123. None.
  6124. Environment:
  6125. Kernel mode.
  6126. --*/
  6127. {
  6128. #if defined(MI_MULTINODE)
  6129. LOGICAL SetIdeal;
  6130. ULONG Processor;
  6131. PEPROCESS DefaultProcess;
  6132. #endif
  6133. PKTHREAD Thread;
  6134. KPRIORITY OldPriority;
  6135. PMMPFN Pfn1;
  6136. SCHAR OldBasePriority;
  6137. PMMPFN PfnNextColored;
  6138. PCOLORED_PAGE_INFO ColoredPageInfo;
  6139. MMPTE TempPte;
  6140. PMMPTE BasePte;
  6141. PMMPTE PointerPte;
  6142. PVOID VirtualAddress;
  6143. PFN_NUMBER PageFrameIndex;
  6144. PFN_COUNT i;
  6145. PFN_COUNT RequestedPtes;
  6146. ColoredPageInfo = (PCOLORED_PAGE_INFO) Context;
  6147. //
  6148. // Use the initiating thread's priority instead of the default system
  6149. // thread priority.
  6150. //
  6151. Thread = KeGetCurrentThread ();
  6152. OldBasePriority = Thread->BasePriority;
  6153. Thread->BasePriority = ColoredPageInfo->BasePriority;
  6154. OldPriority = KeSetPriorityThread (Thread, Thread->BasePriority);
  6155. //
  6156. // Dispatch each worker thread to a processor local to the memory being
  6157. // zeroed.
  6158. //
  6159. #if defined(MI_MULTINODE)
  6160. Processor = 0;
  6161. if (PsInitialSystemProcess != NULL) {
  6162. DefaultProcess = PsInitialSystemProcess;
  6163. }
  6164. else {
  6165. DefaultProcess = PsIdleProcess;
  6166. }
  6167. if (ColoredPageInfo->Affinity != DefaultProcess->Pcb.Affinity) {
  6168. KeFindFirstSetLeftAffinity (ColoredPageInfo->Affinity, &Processor);
  6169. Processor = (CCHAR) KeSetIdealProcessorThread (Thread,
  6170. (CCHAR) Processor);
  6171. SetIdeal = TRUE;
  6172. }
  6173. else {
  6174. SetIdeal = FALSE;
  6175. }
  6176. #endif
  6177. Pfn1 = ColoredPageInfo->PfnAllocation;
  6178. ASSERT (Pfn1 != (PMMPFN) MM_EMPTY_LIST);
  6179. ASSERT (ColoredPageInfo->PagesQueued != 0);
  6180. //
  6181. // Zero all argument pages.
  6182. //
  6183. do {
  6184. ASSERT (ColoredPageInfo->PagesQueued != 0);
  6185. RequestedPtes = ColoredPageInfo->PagesQueued;
  6186. #if !defined (_WIN64)
  6187. //
  6188. // NT64 has an abundance of PTEs so try to map the whole request with
  6189. // a single call. For NT32, this resource needs to be carefully shared.
  6190. //
  6191. if (RequestedPtes > (1024 * 1024) / PAGE_SIZE) {
  6192. RequestedPtes = (1024 * 1024) / PAGE_SIZE;
  6193. }
  6194. #endif
  6195. do {
  6196. BasePte = MiReserveSystemPtes (RequestedPtes, SystemPteSpace);
  6197. if (BasePte != NULL) {
  6198. break;
  6199. }
  6200. RequestedPtes >>= 1;
  6201. } while (RequestedPtes != 0);
  6202. if (BasePte != NULL) {
  6203. //
  6204. // Able to get a reasonable chunk, go for a big zero.
  6205. //
  6206. PointerPte = BasePte;
  6207. MI_MAKE_VALID_PTE (TempPte,
  6208. 0,
  6209. MM_READWRITE,
  6210. PointerPte);
  6211. MI_SET_PTE_DIRTY (TempPte);
  6212. for (i = 0; i < RequestedPtes; i += 1) {
  6213. ASSERT (Pfn1 != (PMMPFN) MM_EMPTY_LIST);
  6214. PageFrameIndex = MI_PFN_ELEMENT_TO_INDEX (Pfn1);
  6215. ASSERT (PointerPte->u.Hard.Valid == 0);
  6216. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  6217. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  6218. PfnNextColored = (PMMPFN) (ULONG_PTR) Pfn1->OriginalPte.u.Long;
  6219. Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  6220. Pfn1 = PfnNextColored;
  6221. PointerPte += 1;
  6222. }
  6223. ColoredPageInfo->PagesQueued -= RequestedPtes;
  6224. VirtualAddress = MiGetVirtualAddressMappedByPte (BasePte);
  6225. KeZeroPages (VirtualAddress, ((ULONG_PTR)RequestedPtes) << PAGE_SHIFT);
  6226. MiReleaseSystemPtes (BasePte, RequestedPtes, SystemPteSpace);
  6227. }
  6228. else {
  6229. //
  6230. // No PTEs left, zero a single page at a time.
  6231. //
  6232. MiZeroPhysicalPage (MI_PFN_ELEMENT_TO_INDEX (Pfn1), 0);
  6233. PfnNextColored = (PMMPFN) (ULONG_PTR) Pfn1->OriginalPte.u.Long;
  6234. Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  6235. Pfn1 = PfnNextColored;
  6236. ColoredPageInfo->PagesQueued -= 1;
  6237. }
  6238. } while (Pfn1 != (PMMPFN) MM_EMPTY_LIST);
  6239. //
  6240. // Let the initiator know we've zeroed our share.
  6241. //
  6242. KeSetEvent (&ColoredPageInfo->Event, 0, FALSE);
  6243. //
  6244. // Restore the entry thread priority and ideal processor - this is critical
  6245. // if we were called directly from the initiator instead of in the
  6246. // context of a system thread.
  6247. //
  6248. #if defined(MI_MULTINODE)
  6249. if (SetIdeal == TRUE) {
  6250. KeSetIdealProcessorThread (Thread, (CCHAR) Processor);
  6251. }
  6252. #endif
  6253. KeSetPriorityThread (Thread, OldPriority);
  6254. Thread->BasePriority = OldBasePriority;
  6255. return;
  6256. }
  6257. VOID
  6258. MiZeroInParallel (
  6259. IN PCOLORED_PAGE_INFO ColoredPageInfoBase
  6260. )
  6261. /*++
  6262. Routine Description:
  6263. This routine zeroes all the free & standby pages, fanning out the work.
  6264. This is done even on UP machines because the worker thread code maps
  6265. large MDLs and is thus better performing than zeroing a single
  6266. page at a time.
  6267. Arguments:
  6268. ColoredPageInfoBase - Supplies the informational structure about which
  6269. pages to zero.
  6270. Return Value:
  6271. None.
  6272. Environment:
  6273. Kernel mode, IRQL of APC_LEVEL or below. The caller must lock down the
  6274. PAGELK section that this routine resides in.
  6275. --*/
  6276. {
  6277. #if defined(MI_MULTINODE)
  6278. ULONG i;
  6279. #endif
  6280. ULONG WaitCount;
  6281. PKEVENT WaitObjects[MAXIMUM_WAIT_OBJECTS];
  6282. KWAIT_BLOCK WaitBlockArray[MAXIMUM_WAIT_OBJECTS];
  6283. KPRIORITY OldPriority;
  6284. SCHAR OldBasePriority;
  6285. NTSTATUS WakeupStatus;
  6286. PETHREAD EThread;
  6287. PKTHREAD Thread;
  6288. OBJECT_ATTRIBUTES ObjectAttributes;
  6289. HANDLE ThreadHandle;
  6290. SCHAR WorkerThreadPriority;
  6291. PCOLORED_PAGE_INFO ColoredPageInfo;
  6292. ULONG Color;
  6293. PMMPFN Pfn1;
  6294. NTSTATUS Status;
  6295. WaitCount = 0;
  6296. EThread = PsGetCurrentThread ();
  6297. Thread = &EThread->Tcb;
  6298. //
  6299. // Raise our priority to the highest non-realtime priority. This
  6300. // usually allows us to spawn all our worker threads without
  6301. // interruption from them, but also doesn't starve the modified or
  6302. // mapped page writers because we are going to access pagable code
  6303. // and data below and during the spawning.
  6304. //
  6305. OldBasePriority = Thread->BasePriority;
  6306. ASSERT ((OldBasePriority >= 1) || (InitializationPhase == 0));
  6307. Thread->BasePriority = LOW_REALTIME_PRIORITY - 1;
  6308. OldPriority = KeSetPriorityThread (Thread, LOW_REALTIME_PRIORITY - 1);
  6309. WorkerThreadPriority = OldBasePriority;
  6310. for (Color = 0; Color < MmSecondaryColors; Color += 1) {
  6311. ColoredPageInfo = &ColoredPageInfoBase[Color];
  6312. Pfn1 = ColoredPageInfo->PfnAllocation;
  6313. if (Pfn1 != (PMMPFN) MM_EMPTY_LIST) {
  6314. //
  6315. // Assume no memory penalty for non-local memory on this
  6316. // machine so there's no need to run with restricted affinity.
  6317. //
  6318. ColoredPageInfo->BasePriority = WorkerThreadPriority;
  6319. #if defined(MI_MULTINODE)
  6320. if (PsInitialSystemProcess != NULL) {
  6321. ColoredPageInfo->Affinity = PsInitialSystemProcess->Pcb.Affinity;
  6322. }
  6323. else {
  6324. ColoredPageInfo->Affinity = PsIdleProcess->Pcb.Affinity;
  6325. }
  6326. for (i = 0; i < KeNumberNodes; i += 1) {
  6327. if (KeNodeBlock[i]->Color == (Color >> MmSecondaryColorNodeShift)) {
  6328. ColoredPageInfo->Affinity = KeNodeBlock[i]->ProcessorMask;
  6329. break;
  6330. }
  6331. }
  6332. #endif
  6333. KeInitializeEvent (&ColoredPageInfo->Event,
  6334. SynchronizationEvent,
  6335. FALSE);
  6336. //
  6337. // Don't spawn threads to zero the memory if we are a system
  6338. // thread. This is because this routine can be called by
  6339. // drivers in the context of the segment dereference thread, so
  6340. // referencing pagable memory here can cause a deadlock.
  6341. //
  6342. if ((IS_SYSTEM_THREAD (EThread)) || (InitializationPhase == 0)) {
  6343. MiZeroAwePageWorker ((PVOID) ColoredPageInfo);
  6344. }
  6345. else {
  6346. InitializeObjectAttributes (&ObjectAttributes,
  6347. NULL,
  6348. 0,
  6349. NULL,
  6350. NULL);
  6351. //
  6352. // We are creating a system thread for each memory
  6353. // node instead of using the executive worker thread
  6354. // pool. This is because we want to run the threads
  6355. // at a lower priority to keep the machine responsive
  6356. // during all this zeroing. And doing this to a worker
  6357. // thread can cause a deadlock as various other
  6358. // components (registry, etc) expect worker threads to be
  6359. // available at a higher priority right away.
  6360. //
  6361. Status = PsCreateSystemThread (&ThreadHandle,
  6362. THREAD_ALL_ACCESS,
  6363. &ObjectAttributes,
  6364. 0L,
  6365. NULL,
  6366. MiZeroAwePageWorker,
  6367. (PVOID) ColoredPageInfo);
  6368. if (NT_SUCCESS(Status)) {
  6369. ZwClose (ThreadHandle);
  6370. }
  6371. else {
  6372. MiZeroAwePageWorker ((PVOID) ColoredPageInfo);
  6373. }
  6374. }
  6375. WaitObjects[WaitCount] = &ColoredPageInfo->Event;
  6376. WaitCount += 1;
  6377. if (WaitCount == MAXIMUM_WAIT_OBJECTS) {
  6378. //
  6379. // Done issuing the first round of workitems,
  6380. // lower priority & wait.
  6381. //
  6382. KeSetPriorityThread (Thread, OldPriority);
  6383. Thread->BasePriority = OldBasePriority;
  6384. WakeupStatus = KeWaitForMultipleObjects (WaitCount,
  6385. &WaitObjects[0],
  6386. WaitAll,
  6387. Executive,
  6388. KernelMode,
  6389. FALSE,
  6390. NULL,
  6391. &WaitBlockArray[0]);
  6392. ASSERT (WakeupStatus == STATUS_SUCCESS);
  6393. WaitCount = 0;
  6394. Thread->BasePriority = LOW_REALTIME_PRIORITY - 1;
  6395. KeSetPriorityThread (Thread, LOW_REALTIME_PRIORITY - 1);
  6396. }
  6397. }
  6398. }
  6399. //
  6400. // Done issuing all the workitems, lower priority & wait.
  6401. //
  6402. KeSetPriorityThread (Thread, OldPriority);
  6403. Thread->BasePriority = OldBasePriority;
  6404. if (WaitCount != 0) {
  6405. WakeupStatus = KeWaitForMultipleObjects (WaitCount,
  6406. &WaitObjects[0],
  6407. WaitAll,
  6408. Executive,
  6409. KernelMode,
  6410. FALSE,
  6411. NULL,
  6412. &WaitBlockArray[0]);
  6413. ASSERT (WakeupStatus == STATUS_SUCCESS);
  6414. }
  6415. return;
  6416. }
  6417. PMDL
  6418. MmAllocatePagesForMdl (
  6419. IN PHYSICAL_ADDRESS LowAddress,
  6420. IN PHYSICAL_ADDRESS HighAddress,
  6421. IN PHYSICAL_ADDRESS SkipBytes,
  6422. IN SIZE_T TotalBytes
  6423. )
  6424. /*++
  6425. Routine Description:
  6426. This routine searches the PFN database for free, zeroed or standby pages
  6427. to satisfy the request. This does not map the pages - it just allocates
  6428. them and puts them into an MDL. It is expected that our caller will
  6429. map the MDL as needed.
  6430. NOTE: this routine may return an MDL mapping a smaller number of bytes
  6431. than the amount requested. It is the caller's responsibility to check the
  6432. MDL upon return for the size actually allocated.
  6433. These pages comprise physical non-paged memory and are zero-filled.
  6434. This routine is designed to be used by an AGP driver to obtain physical
  6435. memory in a specified range since hardware may provide substantial
  6436. performance wins depending on where the backing memory is allocated.
  6437. Because the caller may use these pages for a noncached mapping, care is
  6438. taken to never allocate any pages that reside in a large page (in order
  6439. to prevent TB incoherency of the same page being mapped by multiple
  6440. translations with different attributes).
  6441. Arguments:
  6442. LowAddress - Supplies the low physical address of the first range that
  6443. the allocated pages can come from.
  6444. HighAddress - Supplies the high physical address of the first range that
  6445. the allocated pages can come from.
  6446. SkipBytes - Number of bytes to skip (from the Low Address) to get to the
  6447. next physical address range that allocated pages can come from.
  6448. TotalBytes - Supplies the number of bytes to allocate.
  6449. Return Value:
  6450. MDL - An MDL mapping a range of pages in the specified range.
  6451. This may map less memory than the caller requested if the full amount
  6452. is not currently available.
  6453. NULL - No pages in the specified range OR not enough virtually contiguous
  6454. nonpaged pool for the MDL is available at this time.
  6455. Environment:
  6456. Kernel mode, IRQL of APC_LEVEL or below.
  6457. --*/
  6458. {
  6459. PMDL MemoryDescriptorList;
  6460. PMDL MemoryDescriptorList2;
  6461. PMMPFN Pfn1;
  6462. PMMPFN PfnNextColored;
  6463. PMMPFN PfnNextFlink;
  6464. PMMPFN PfnLastColored;
  6465. KIRQL OldIrql;
  6466. PFN_NUMBER start;
  6467. PFN_NUMBER Page;
  6468. PFN_NUMBER NextPage;
  6469. PFN_NUMBER found;
  6470. PFN_NUMBER BasePage;
  6471. PFN_NUMBER LowPage;
  6472. PFN_NUMBER HighPage;
  6473. PFN_NUMBER SizeInPages;
  6474. PFN_NUMBER MdlPageSpan;
  6475. PFN_NUMBER SkipPages;
  6476. PFN_NUMBER MaxPages;
  6477. PFN_NUMBER PagesExamined;
  6478. PPFN_NUMBER MdlPage;
  6479. ULONG Color;
  6480. PMMCOLOR_TABLES ColorHead;
  6481. MMLISTS MemoryList;
  6482. PFN_NUMBER LowPage1;
  6483. PFN_NUMBER HighPage1;
  6484. LOGICAL PagePlacementOk;
  6485. PFN_NUMBER PageNextColored;
  6486. PFN_NUMBER PageNextFlink;
  6487. PFN_NUMBER PageLastColored;
  6488. PMMPFNLIST ListHead;
  6489. PCOLORED_PAGE_INFO ColoredPageInfoBase;
  6490. PCOLORED_PAGE_INFO ColoredPageInfo;
  6491. ULONG ColorHeadsDrained;
  6492. ULONG NodePassesLeft;
  6493. ULONG ColorCount;
  6494. ULONG BaseColor;
  6495. PFN_NUMBER ZeroCount;
  6496. #if DBG
  6497. PPFN_NUMBER LastMdlPage;
  6498. ULONG FinishedCount;
  6499. PEPROCESS Process;
  6500. #endif
  6501. ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
  6502. //
  6503. // The skip increment must be a page-size multiple.
  6504. //
  6505. if (BYTE_OFFSET(SkipBytes.LowPart)) {
  6506. return NULL;
  6507. }
  6508. LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
  6509. HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
  6510. if (HighPage > MmHighestPossiblePhysicalPage) {
  6511. HighPage = MmHighestPossiblePhysicalPage;
  6512. }
  6513. //
  6514. // Maximum allocation size is constrained by the MDL ByteCount field.
  6515. //
  6516. if (TotalBytes > (SIZE_T)((ULONG)(MAXULONG - PAGE_SIZE))) {
  6517. TotalBytes = (SIZE_T)((ULONG)(MAXULONG - PAGE_SIZE));
  6518. }
  6519. SizeInPages = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
  6520. SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
  6521. BasePage = LowPage;
  6522. //
  6523. // Check without the PFN lock as the actual number of pages to get will
  6524. // be recalculated later while holding the lock.
  6525. //
  6526. MaxPages = MI_NONPAGABLE_MEMORY_AVAILABLE() - 1024;
  6527. if ((SPFN_NUMBER)MaxPages <= 0) {
  6528. SizeInPages = 0;
  6529. }
  6530. else if (SizeInPages > MaxPages) {
  6531. SizeInPages = MaxPages;
  6532. }
  6533. if (SizeInPages == 0) {
  6534. return NULL;
  6535. }
  6536. #if DBG
  6537. if (SizeInPages < (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes)) {
  6538. if (MiPrintAwe != 0) {
  6539. DbgPrint("MmAllocatePagesForMdl1: unable to get %p pages, trying for %p instead\n",
  6540. ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes),
  6541. SizeInPages);
  6542. }
  6543. }
  6544. #endif
  6545. //
  6546. // Allocate an MDL to return the pages in.
  6547. //
  6548. do {
  6549. MemoryDescriptorList = MmCreateMdl (NULL,
  6550. NULL,
  6551. SizeInPages << PAGE_SHIFT);
  6552. if (MemoryDescriptorList != NULL) {
  6553. break;
  6554. }
  6555. SizeInPages -= (SizeInPages >> 4);
  6556. } while (SizeInPages != 0);
  6557. if (MemoryDescriptorList == NULL) {
  6558. return NULL;
  6559. }
  6560. //
  6561. // Ensure there is enough commit prior to allocating the pages.
  6562. //
  6563. if (MiChargeCommitment (SizeInPages, NULL) == FALSE) {
  6564. ExFreePool (MemoryDescriptorList);
  6565. return NULL;
  6566. }
  6567. //
  6568. // Allocate a list of colored anchors.
  6569. //
  6570. ColoredPageInfoBase = (PCOLORED_PAGE_INFO) ExAllocatePoolWithTag (
  6571. NonPagedPool,
  6572. MmSecondaryColors * sizeof (COLORED_PAGE_INFO),
  6573. 'ldmM');
  6574. if (ColoredPageInfoBase == NULL) {
  6575. ExFreePool (MemoryDescriptorList);
  6576. MiReturnCommitment (SizeInPages);
  6577. return NULL;
  6578. }
  6579. for (Color = 0; Color < MmSecondaryColors; Color += 1) {
  6580. ColoredPageInfoBase[Color].PfnAllocation = (PMMPFN) MM_EMPTY_LIST;
  6581. ColoredPageInfoBase[Color].PagesQueued = 0;
  6582. }
  6583. MdlPageSpan = SizeInPages;
  6584. //
  6585. // Recalculate the total size while holding the PFN lock.
  6586. //
  6587. start = 0;
  6588. found = 0;
  6589. ZeroCount = 0;
  6590. MdlPage = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  6591. MmLockPagableSectionByHandle (ExPageLockHandle);
  6592. KeAcquireGuardedMutex (&MmDynamicMemoryMutex);
  6593. LOCK_PFN (OldIrql);
  6594. MiDeferredUnlockPages (MI_DEFER_PFN_HELD);
  6595. MaxPages = MI_NONPAGABLE_MEMORY_AVAILABLE() - 1024;
  6596. if ((SPFN_NUMBER)MaxPages <= 0) {
  6597. SizeInPages = 0;
  6598. }
  6599. else if (SizeInPages > MaxPages) {
  6600. SizeInPages = MaxPages;
  6601. }
  6602. //
  6603. // Systems utilizing memory compression may have more pages on the zero,
  6604. // free and standby lists than we want to give out. Explicitly check
  6605. // MmAvailablePages instead (and recheck whenever the PFN lock is released
  6606. // and reacquired).
  6607. //
  6608. if ((SPFN_NUMBER)SizeInPages > (SPFN_NUMBER)(MmAvailablePages - MM_HIGH_LIMIT)) {
  6609. if (MmAvailablePages > MM_HIGH_LIMIT) {
  6610. SizeInPages = MmAvailablePages - MM_HIGH_LIMIT;
  6611. }
  6612. else {
  6613. SizeInPages = 0;
  6614. }
  6615. }
  6616. if (SizeInPages == 0) {
  6617. UNLOCK_PFN (OldIrql);
  6618. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  6619. MmUnlockPagableImageSection (ExPageLockHandle);
  6620. ExFreePool (MemoryDescriptorList);
  6621. MiReturnCommitment (MdlPageSpan);
  6622. ExFreePool (ColoredPageInfoBase);
  6623. return NULL;
  6624. }
  6625. MM_TRACK_COMMIT (MM_DBG_COMMIT_MDL_PAGES, SizeInPages);
  6626. //
  6627. // Charge resident available pages now for all the pages so the PFN lock
  6628. // can be released between the loops below. Excess charging is returned
  6629. // at the conclusion of the loops.
  6630. //
  6631. InterlockedExchangeAddSizeT (&MmMdlPagesAllocated, SizeInPages);
  6632. MI_DECREMENT_RESIDENT_AVAILABLE (SizeInPages, MM_RESAVAIL_ALLOCATE_FOR_MDL);
  6633. //
  6634. // Grab all zeroed (and then free) pages first directly from the
  6635. // colored lists to avoid multiple walks down these singly linked lists.
  6636. // Then snatch transition pages as needed. In addition to optimizing
  6637. // the speed of the removals this also avoids cannibalizing the page
  6638. // cache unless it's absolutely needed.
  6639. //
  6640. NodePassesLeft = 1;
  6641. ColorCount = MmSecondaryColors;
  6642. BaseColor = 0;
  6643. #if defined(MI_MULTINODE)
  6644. if (KeNumberNodes > 1) {
  6645. PKNODE Node;
  6646. Node = KeGetCurrentNode ();
  6647. if ((Node->FreeCount[ZeroedPageList]) ||
  6648. (Node->FreeCount[FreePageList])) {
  6649. //
  6650. // There are available pages on this node. Restrict search.
  6651. //
  6652. NodePassesLeft = 2;
  6653. ColorCount = MmSecondaryColorMask + 1;
  6654. BaseColor = Node->MmShiftedColor;
  6655. ASSERT(ColorCount == MmSecondaryColors / KeNumberNodes);
  6656. }
  6657. }
  6658. do {
  6659. //
  6660. // Loop: 1st pass restricted to node, 2nd pass unrestricted.
  6661. //
  6662. #endif
  6663. MemoryList = ZeroedPageList;
  6664. do {
  6665. //
  6666. // Scan the zero list and then the free list.
  6667. //
  6668. ASSERT (MemoryList <= FreePageList);
  6669. ListHead = MmPageLocationList[MemoryList];
  6670. //
  6671. // Initialize the loop iteration controls. Clearly pages
  6672. // can be added or removed from the colored lists when we
  6673. // deliberately drop the PFN lock below (just to be a good
  6674. // citizen), but even if we never released the lock, we wouldn't
  6675. // have scanned more than the colorhead count anyway, so
  6676. // this is a much better way to go.
  6677. //
  6678. ColorHeadsDrained = 0;
  6679. PagesExamined = 0;
  6680. ColorHead = &MmFreePagesByColor[MemoryList][BaseColor];
  6681. ColoredPageInfo = &ColoredPageInfoBase[BaseColor];
  6682. for (Color = 0; Color < ColorCount; Color += 1) {
  6683. ASSERT (ColorHead->Count <= MmNumberOfPhysicalPages);
  6684. ColoredPageInfo->PagesLeftToScan = ColorHead->Count;
  6685. if (ColorHead->Count == 0) {
  6686. ColorHeadsDrained += 1;
  6687. }
  6688. ColorHead += 1;
  6689. ColoredPageInfo += 1;
  6690. }
  6691. Color = 0;
  6692. #if defined(MI_MULTINODE)
  6693. Color = (Color & MmSecondaryColorMask) | BaseColor;
  6694. #endif
  6695. ASSERT (Color < MmSecondaryColors);
  6696. do {
  6697. //
  6698. // Scan the current list by color.
  6699. //
  6700. ColorHead = &MmFreePagesByColor[MemoryList][Color];
  6701. ColoredPageInfo = &ColoredPageInfoBase[Color];
  6702. if (NodePassesLeft == 1) {
  6703. //
  6704. // Unrestricted search across all colors.
  6705. //
  6706. Color += 1;
  6707. if (Color >= MmSecondaryColors) {
  6708. Color = 0;
  6709. }
  6710. }
  6711. #if defined(MI_MULTINODE)
  6712. else {
  6713. //
  6714. // Restrict first pass searches to current node.
  6715. //
  6716. ASSERT (NodePassesLeft == 2);
  6717. Color = BaseColor | ((Color + 1) & MmSecondaryColorMask);
  6718. }
  6719. #endif
  6720. if (ColoredPageInfo->PagesLeftToScan == 0) {
  6721. //
  6722. // This colored list has already been completely
  6723. // searched.
  6724. //
  6725. continue;
  6726. }
  6727. if (ColorHead->Flink == MM_EMPTY_LIST) {
  6728. //
  6729. // This colored list is empty.
  6730. //
  6731. ColoredPageInfo->PagesLeftToScan = 0;
  6732. ColorHeadsDrained += 1;
  6733. continue;
  6734. }
  6735. while (ColorHead->Flink != MM_EMPTY_LIST) {
  6736. ASSERT (ColoredPageInfo->PagesLeftToScan != 0);
  6737. ColoredPageInfo->PagesLeftToScan -= 1;
  6738. if (ColoredPageInfo->PagesLeftToScan == 0) {
  6739. ColorHeadsDrained += 1;
  6740. }
  6741. PagesExamined += 1;
  6742. Page = ColorHead->Flink;
  6743. Pfn1 = MI_PFN_ELEMENT(Page);
  6744. ASSERT ((MMLISTS)Pfn1->u3.e1.PageLocation == MemoryList);
  6745. //
  6746. // See if the page is within the caller's page constraints.
  6747. //
  6748. PagePlacementOk = FALSE;
  6749. //
  6750. // Since the caller may do anything with these frames
  6751. // including mapping them uncached or write combined,
  6752. // don't give out frames that are being mapped
  6753. // by (cached) superpages.
  6754. //
  6755. if (Pfn1->u4.MustBeCached == 0) {
  6756. LowPage1 = LowPage;
  6757. HighPage1 = HighPage;
  6758. do {
  6759. if ((Page >= LowPage1) && (Page <= HighPage1)) {
  6760. PagePlacementOk = TRUE;
  6761. break;
  6762. }
  6763. if (SkipPages == 0) {
  6764. break;
  6765. }
  6766. LowPage1 += SkipPages;
  6767. HighPage1 += SkipPages;
  6768. if (HighPage1 > MmHighestPhysicalPage) {
  6769. HighPage1 = MmHighestPhysicalPage;
  6770. }
  6771. } while (LowPage1 <= MmHighestPhysicalPage);
  6772. }
  6773. //
  6774. // The Flink and Blink must be nonzero here for the page
  6775. // to be on the listhead. Only code that scans the
  6776. // MmPhysicalMemoryBlock has to check for the zero case.
  6777. //
  6778. ASSERT (Pfn1->u1.Flink != 0);
  6779. ASSERT (Pfn1->u2.Blink != 0);
  6780. if (PagePlacementOk == FALSE) {
  6781. if (ColoredPageInfo->PagesLeftToScan == 0) {
  6782. //
  6783. // No more pages to scan in this colored chain.
  6784. //
  6785. break;
  6786. }
  6787. //
  6788. // If the colored list has more than one entry then
  6789. // move this page to the end of this colored list.
  6790. //
  6791. PageNextColored = (PFN_NUMBER)Pfn1->OriginalPte.u.Long;
  6792. if (PageNextColored == MM_EMPTY_LIST) {
  6793. //
  6794. // No more pages in this colored chain.
  6795. //
  6796. ColoredPageInfo->PagesLeftToScan = 0;
  6797. ColorHeadsDrained += 1;
  6798. break;
  6799. }
  6800. ASSERT (Pfn1->u1.Flink != 0);
  6801. ASSERT (Pfn1->u1.Flink != MM_EMPTY_LIST);
  6802. ASSERT (Pfn1->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  6803. PfnNextColored = MI_PFN_ELEMENT(PageNextColored);
  6804. ASSERT ((MMLISTS)PfnNextColored->u3.e1.PageLocation == MemoryList);
  6805. ASSERT (PfnNextColored->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  6806. //
  6807. // Adjust the free page list so Page
  6808. // follows PageNextFlink.
  6809. //
  6810. PageNextFlink = Pfn1->u1.Flink;
  6811. PfnNextFlink = MI_PFN_ELEMENT(PageNextFlink);
  6812. ASSERT ((MMLISTS)PfnNextFlink->u3.e1.PageLocation == MemoryList);
  6813. ASSERT (PfnNextFlink->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  6814. PfnLastColored = ColorHead->Blink;
  6815. ASSERT (PfnLastColored != (PMMPFN)MM_EMPTY_LIST);
  6816. ASSERT (PfnLastColored->OriginalPte.u.Long == MM_EMPTY_LIST);
  6817. ASSERT (PfnLastColored->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  6818. ASSERT (PfnLastColored->u2.Blink != MM_EMPTY_LIST);
  6819. ASSERT ((MMLISTS)PfnLastColored->u3.e1.PageLocation == MemoryList);
  6820. PageLastColored = MI_PFN_ELEMENT_TO_INDEX (PfnLastColored);
  6821. if (ListHead->Flink == Page) {
  6822. ASSERT (Pfn1->u2.Blink == MM_EMPTY_LIST);
  6823. ASSERT (ListHead->Blink != Page);
  6824. ListHead->Flink = PageNextFlink;
  6825. PfnNextFlink->u2.Blink = MM_EMPTY_LIST;
  6826. }
  6827. else {
  6828. ASSERT (Pfn1->u2.Blink != MM_EMPTY_LIST);
  6829. ASSERT ((MMLISTS)(MI_PFN_ELEMENT((MI_PFN_ELEMENT(Pfn1->u2.Blink)->u1.Flink)))->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  6830. ASSERT ((MMLISTS)(MI_PFN_ELEMENT((MI_PFN_ELEMENT(Pfn1->u2.Blink)->u1.Flink)))->u3.e1.PageLocation == MemoryList);
  6831. MI_PFN_ELEMENT(Pfn1->u2.Blink)->u1.Flink = PageNextFlink;
  6832. PfnNextFlink->u2.Blink = Pfn1->u2.Blink;
  6833. }
  6834. #if DBG
  6835. if (PfnLastColored->u1.Flink == MM_EMPTY_LIST) {
  6836. ASSERT (ListHead->Blink == PageLastColored);
  6837. }
  6838. #endif
  6839. Pfn1->u1.Flink = PfnLastColored->u1.Flink;
  6840. Pfn1->u2.Blink = PageLastColored;
  6841. if (ListHead->Blink == PageLastColored) {
  6842. ListHead->Blink = Page;
  6843. }
  6844. //
  6845. // Adjust the colored chains.
  6846. //
  6847. if (PfnLastColored->u1.Flink != MM_EMPTY_LIST) {
  6848. ASSERT (MI_PFN_ELEMENT(PfnLastColored->u1.Flink)->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  6849. ASSERT ((MMLISTS)(MI_PFN_ELEMENT(PfnLastColored->u1.Flink)->u3.e1.PageLocation) == MemoryList);
  6850. MI_PFN_ELEMENT(PfnLastColored->u1.Flink)->u2.Blink = Page;
  6851. }
  6852. PfnLastColored->u1.Flink = Page;
  6853. ColorHead->Flink = PageNextColored;
  6854. PfnNextColored->u4.PteFrame = MM_EMPTY_LIST;
  6855. Pfn1->OriginalPte.u.Long = MM_EMPTY_LIST;
  6856. Pfn1->u4.PteFrame = PageLastColored;
  6857. ASSERT (PfnLastColored->OriginalPte.u.Long == MM_EMPTY_LIST);
  6858. PfnLastColored->OriginalPte.u.Long = Page;
  6859. ColorHead->Blink = Pfn1;
  6860. continue;
  6861. }
  6862. found += 1;
  6863. ASSERT (Pfn1->u3.e1.ReadInProgress == 0);
  6864. MiUnlinkFreeOrZeroedPage (Pfn1);
  6865. Pfn1->u3.e2.ReferenceCount = 1;
  6866. Pfn1->u2.ShareCount = 1;
  6867. MI_SET_PFN_DELETED(Pfn1);
  6868. Pfn1->u4.PteFrame = MI_MAGIC_AWE_PTEFRAME;
  6869. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  6870. ASSERT (Pfn1->u3.e1.CacheAttribute == MiNotMapped);
  6871. Pfn1->u3.e1.StartOfAllocation = 1;
  6872. Pfn1->u3.e1.EndOfAllocation = 1;
  6873. Pfn1->u4.VerifierAllocation = 0;
  6874. Pfn1->u3.e1.LargeSessionAllocation = 0;
  6875. //
  6876. // Add free pages to the list of pages to be
  6877. // zeroed before returning.
  6878. //
  6879. if (MemoryList == FreePageList) {
  6880. Pfn1->OriginalPte.u.Long = (ULONG_PTR) ColoredPageInfo->PfnAllocation;
  6881. ColoredPageInfo->PfnAllocation = Pfn1;
  6882. ColoredPageInfo->PagesQueued += 1;
  6883. ZeroCount += 1;
  6884. }
  6885. else {
  6886. Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  6887. }
  6888. *MdlPage = Page;
  6889. MdlPage += 1;
  6890. if (found == SizeInPages) {
  6891. //
  6892. // All the pages requested are available.
  6893. //
  6894. #if DBG
  6895. FinishedCount = 0;
  6896. for (Color = 0; Color < ColorCount; Color += 1) {
  6897. if (ColoredPageInfoBase[Color + BaseColor].PagesLeftToScan == 0) {
  6898. FinishedCount += 1;
  6899. }
  6900. }
  6901. ASSERT (FinishedCount == ColorHeadsDrained);
  6902. #endif
  6903. goto pass2_done;
  6904. }
  6905. //
  6906. // March on to the next colored chain so the overall
  6907. // allocation round-robins the page colors.
  6908. //
  6909. PagesExamined = PAGE_SIZE;
  6910. break;
  6911. }
  6912. //
  6913. // If we've held the PFN lock for a while, release it to
  6914. // give DPCs and other processors a chance to run.
  6915. //
  6916. if (PagesExamined >= PAGE_SIZE) {
  6917. UNLOCK_PFN (OldIrql);
  6918. PagesExamined = 0;
  6919. LOCK_PFN (OldIrql);
  6920. }
  6921. //
  6922. // Systems utilizing memory compression may have more
  6923. // pages on the zero, free and standby lists than we
  6924. // want to give out. The same is true when machines
  6925. // are low on memory - we don't want this thread to gobble
  6926. // up the pages from every modified write that completes
  6927. // because that would starve waiting threads.
  6928. //
  6929. // Explicitly check MmAvailablePages instead (and recheck
  6930. // whenever the PFN lock is released and reacquired).
  6931. //
  6932. if (MmAvailablePages < MM_HIGH_LIMIT) {
  6933. goto pass2_done;
  6934. }
  6935. } while (ColorHeadsDrained != ColorCount);
  6936. //
  6937. // Release the PFN lock to give DPCs and other processors
  6938. // a chance to run. Nothing magic about the instructions
  6939. // between the unlock and the relock.
  6940. //
  6941. UNLOCK_PFN (OldIrql);
  6942. #if DBG
  6943. FinishedCount = 0;
  6944. for (Color = 0; Color < ColorCount; Color += 1) {
  6945. if (ColoredPageInfoBase[Color + BaseColor].PagesLeftToScan == 0) {
  6946. FinishedCount += 1;
  6947. }
  6948. }
  6949. ASSERT (FinishedCount == ColorHeadsDrained);
  6950. #endif
  6951. MemoryList += 1;
  6952. LOCK_PFN (OldIrql);
  6953. } while (MemoryList <= FreePageList);
  6954. #if defined(MI_MULTINODE)
  6955. //
  6956. // Expand range to all colors for next pass.
  6957. //
  6958. ColorCount = MmSecondaryColors;
  6959. BaseColor = 0;
  6960. NodePassesLeft -= 1;
  6961. } while (NodePassesLeft != 0);
  6962. #endif
  6963. //
  6964. // Briefly release the PFN lock to give DPCs and other processors
  6965. // a time slice.
  6966. //
  6967. UNLOCK_PFN (OldIrql);
  6968. LOCK_PFN (OldIrql);
  6969. //
  6970. // Walk the transition list looking for pages satisfying the
  6971. // constraints as walking the physical memory block can be draining.
  6972. //
  6973. for (Page = MmStandbyPageListHead.Flink; Page != MM_EMPTY_LIST; Page = NextPage) {
  6974. //
  6975. // Systems utilizing memory compression may have more
  6976. // pages on the zero, free and standby lists than we
  6977. // want to give out. The same is true when machines
  6978. // are low on memory - we don't want this thread to gobble
  6979. // up the pages from every modified write that completes
  6980. // because that would starve waiting threads.
  6981. //
  6982. // Explicitly check MmAvailablePages instead (and recheck whenever
  6983. // the PFN lock is released and reacquired).
  6984. //
  6985. if (MmAvailablePages < MM_HIGH_LIMIT) {
  6986. break;
  6987. }
  6988. Pfn1 = MI_PFN_ELEMENT (Page);
  6989. NextPage = Pfn1->u1.Flink;
  6990. //
  6991. // Since the caller may do anything with these frames including
  6992. // mapping them uncached or write combined, don't give out frames
  6993. // that are being mapped by (cached) superpages.
  6994. //
  6995. if (Pfn1->u4.MustBeCached == 1) {
  6996. continue;
  6997. }
  6998. LowPage1 = LowPage;
  6999. HighPage1 = HighPage;
  7000. PagePlacementOk = FALSE;
  7001. do {
  7002. if ((Page >= LowPage1) && (Page <= HighPage1)) {
  7003. PagePlacementOk = TRUE;
  7004. break;
  7005. }
  7006. if (SkipPages == 0) {
  7007. break;
  7008. }
  7009. LowPage1 += SkipPages;
  7010. HighPage1 += SkipPages;
  7011. if (HighPage1 > MmHighestPhysicalPage) {
  7012. HighPage1 = MmHighestPhysicalPage;
  7013. }
  7014. } while (LowPage1 <= MmHighestPhysicalPage);
  7015. if (PagePlacementOk == TRUE) {
  7016. ASSERT (Pfn1->u3.e1.ReadInProgress == 0);
  7017. found += 1;
  7018. //
  7019. // This page is in the desired range - grab it.
  7020. //
  7021. MiUnlinkPageFromList (Pfn1);
  7022. ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
  7023. MiRestoreTransitionPte (Pfn1);
  7024. Pfn1->u3.e2.ReferenceCount = 1;
  7025. Pfn1->u2.ShareCount = 1;
  7026. MI_SET_PFN_DELETED(Pfn1);
  7027. Pfn1->u4.PteFrame = MI_MAGIC_AWE_PTEFRAME;
  7028. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  7029. ASSERT (Pfn1->u3.e1.CacheAttribute == MiNotMapped);
  7030. Pfn1->u3.e1.StartOfAllocation = 1;
  7031. Pfn1->u3.e1.EndOfAllocation = 1;
  7032. Pfn1->u4.VerifierAllocation = 0;
  7033. Pfn1->u3.e1.LargeSessionAllocation = 0;
  7034. //
  7035. // Add standby pages to the list of pages to be
  7036. // zeroed before returning.
  7037. //
  7038. Color = MI_GET_COLOR_FROM_LIST_ENTRY (Page, Pfn1);
  7039. ColoredPageInfo = &ColoredPageInfoBase[Color];
  7040. Pfn1->OriginalPte.u.Long = (ULONG_PTR) ColoredPageInfo->PfnAllocation;
  7041. ColoredPageInfo->PfnAllocation = Pfn1;
  7042. ColoredPageInfo->PagesQueued += 1;
  7043. ZeroCount += 1;
  7044. *MdlPage = Page;
  7045. MdlPage += 1;
  7046. if (found == SizeInPages) {
  7047. //
  7048. // All the pages requested are available.
  7049. //
  7050. break;
  7051. }
  7052. }
  7053. }
  7054. pass2_done:
  7055. //
  7056. // The full amount was charged up front - remove any excess now.
  7057. //
  7058. UNLOCK_PFN (OldIrql);
  7059. MI_INCREMENT_RESIDENT_AVAILABLE (SizeInPages - found, MM_RESAVAIL_FREE_FOR_MDL_EXCESS);
  7060. InterlockedExchangeAddSizeT (&MmMdlPagesAllocated, 0 - (SizeInPages - found));
  7061. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  7062. MmUnlockPagableImageSection (ExPageLockHandle);
  7063. if (found != MdlPageSpan) {
  7064. ASSERT (found < MdlPageSpan);
  7065. MiReturnCommitment (MdlPageSpan - found);
  7066. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_AWE_EXCESS, MdlPageSpan - found);
  7067. }
  7068. if (found == 0) {
  7069. ExFreePool (ColoredPageInfoBase);
  7070. ExFreePool (MemoryDescriptorList);
  7071. return NULL;
  7072. }
  7073. if (ZeroCount != 0) {
  7074. //
  7075. // Zero all the free & standby pages, fanning out the work. This
  7076. // is done even on UP machines because the worker thread code maps
  7077. // large MDLs and is thus better performing than zeroing a single
  7078. // page at a time.
  7079. //
  7080. MiZeroInParallel (ColoredPageInfoBase);
  7081. //
  7082. // Denote that no pages are left to be zeroed because in addition
  7083. // to zeroing them, we have reset all their OriginalPte fields
  7084. // to demand zero so they cannot be walked by the zeroing loop
  7085. // below.
  7086. //
  7087. ZeroCount = 0;
  7088. }
  7089. ExFreePool (ColoredPageInfoBase);
  7090. MemoryDescriptorList->ByteCount = (ULONG)(found << PAGE_SHIFT);
  7091. if (found != SizeInPages) {
  7092. *MdlPage = MM_EMPTY_LIST;
  7093. }
  7094. //
  7095. // If the number of pages allocated was substantially less than the
  7096. // initial request amount, attempt to allocate a smaller MDL to save
  7097. // pool.
  7098. //
  7099. if ((MdlPageSpan - found) > ((4 * PAGE_SIZE) / sizeof (PFN_NUMBER))) {
  7100. MemoryDescriptorList2 = MmCreateMdl ((PMDL)0,
  7101. (PVOID)0,
  7102. found << PAGE_SHIFT);
  7103. if (MemoryDescriptorList2 != NULL) {
  7104. RtlCopyMemory ((PVOID)(MemoryDescriptorList2 + 1),
  7105. (PVOID)(MemoryDescriptorList + 1),
  7106. found * sizeof (PFN_NUMBER));
  7107. ExFreePool (MemoryDescriptorList);
  7108. MemoryDescriptorList = MemoryDescriptorList2;
  7109. }
  7110. }
  7111. #if DBG
  7112. //
  7113. // Ensure all pages are within the caller's page constraints.
  7114. //
  7115. MdlPage = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  7116. LastMdlPage = MdlPage + found;
  7117. LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
  7118. HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
  7119. Process = PsGetCurrentProcess ();
  7120. MmLockPagableSectionByHandle (ExPageLockHandle);
  7121. while (MdlPage < LastMdlPage) {
  7122. Page = *MdlPage;
  7123. PagePlacementOk = FALSE;
  7124. LowPage1 = LowPage;
  7125. HighPage1 = HighPage;
  7126. do {
  7127. if ((Page >= LowPage1) && (Page <= HighPage1)) {
  7128. PagePlacementOk = TRUE;
  7129. break;
  7130. }
  7131. if (SkipPages == 0) {
  7132. break;
  7133. }
  7134. LowPage1 += SkipPages;
  7135. HighPage1 += SkipPages;
  7136. if (LowPage1 > MmHighestPhysicalPage) {
  7137. break;
  7138. }
  7139. if (HighPage1 > MmHighestPhysicalPage) {
  7140. HighPage1 = MmHighestPhysicalPage;
  7141. }
  7142. } while (TRUE);
  7143. #if 0
  7144. //
  7145. // Make sure page really is zero.
  7146. //
  7147. VirtualAddress = MiMapPageInHyperSpace (Process, Page, &OldIrql);
  7148. ASSERT (RtlCompareMemoryUlong (VirtualAddress, PAGE_SIZE, 0) == PAGE_SIZE);
  7149. MiUnmapPageInHyperSpace (Process, VirtualAddress, OldIrql);
  7150. #endif
  7151. ASSERT (PagePlacementOk == TRUE);
  7152. Pfn1 = MI_PFN_ELEMENT(*MdlPage);
  7153. ASSERT (Pfn1->u4.PteFrame == MI_MAGIC_AWE_PTEFRAME);
  7154. MdlPage += 1;
  7155. }
  7156. MmUnlockPagableImageSection (ExPageLockHandle);
  7157. #endif
  7158. //
  7159. // Mark the MDL's pages as locked so the the kernelmode caller can
  7160. // map the MDL using MmMapLockedPages* without asserting.
  7161. //
  7162. MemoryDescriptorList->MdlFlags |= MDL_PAGES_LOCKED;
  7163. return MemoryDescriptorList;
  7164. }
  7165. VOID
  7166. MmFreePagesFromMdl (
  7167. IN PMDL MemoryDescriptorList
  7168. )
  7169. /*++
  7170. Routine Description:
  7171. This routine walks the argument MDL freeing each physical page back to
  7172. the PFN database. This is designed to free pages acquired via
  7173. MmAllocatePagesForMdl only.
  7174. Arguments:
  7175. MemoryDescriptorList - Supplies an MDL which contains the pages to be freed.
  7176. Return Value:
  7177. None.
  7178. Environment:
  7179. Kernel mode, IRQL of APC_LEVEL or below.
  7180. --*/
  7181. {
  7182. PMMPFN Pfn1;
  7183. KIRQL OldIrql;
  7184. PVOID StartingAddress;
  7185. PVOID AlignedVa;
  7186. PPFN_NUMBER Page;
  7187. PFN_NUMBER NumberOfPages;
  7188. PFN_NUMBER TotalPages;
  7189. PFN_NUMBER DeltaPages;
  7190. LONG EntryCount;
  7191. LONG OriginalCount;
  7192. ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
  7193. DeltaPages = 0;
  7194. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  7195. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0);
  7196. ASSERT (((ULONG_PTR)MemoryDescriptorList->StartVa & (PAGE_SIZE - 1)) == 0);
  7197. AlignedVa = (PVOID)MemoryDescriptorList->StartVa;
  7198. StartingAddress = (PVOID)((PCHAR)AlignedVa +
  7199. MemoryDescriptorList->ByteOffset);
  7200. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartingAddress,
  7201. MemoryDescriptorList->ByteCount);
  7202. TotalPages = NumberOfPages;
  7203. MI_MAKING_MULTIPLE_PTES_INVALID (TRUE);
  7204. MmLockPagableSectionByHandle (ExPageLockHandle);
  7205. LOCK_PFN (OldIrql);
  7206. do {
  7207. if (*Page == MM_EMPTY_LIST) {
  7208. //
  7209. // There are no more locked pages.
  7210. //
  7211. break;
  7212. }
  7213. ASSERT (MI_IS_PFN (*Page));
  7214. ASSERT (*Page <= MmHighestPhysicalPage);
  7215. Pfn1 = MI_PFN_ELEMENT (*Page);
  7216. ASSERT (Pfn1->u2.ShareCount == 1);
  7217. ASSERT (MI_IS_PFN_DELETED (Pfn1) == TRUE);
  7218. ASSERT (MI_PFN_IS_AWE (Pfn1) == TRUE);
  7219. ASSERT (Pfn1->u4.PteFrame == MI_MAGIC_AWE_PTEFRAME);
  7220. Pfn1->u3.e1.StartOfAllocation = 0;
  7221. Pfn1->u3.e1.EndOfAllocation = 0;
  7222. Pfn1->u2.ShareCount = 0;
  7223. #if DBG
  7224. Pfn1->u4.PteFrame -= 1;
  7225. Pfn1->u3.e1.PageLocation = StandbyPageList;
  7226. #endif
  7227. if (Pfn1->u4.AweAllocation == 1) {
  7228. do {
  7229. EntryCount = Pfn1->AweReferenceCount;
  7230. ASSERT ((LONG)EntryCount > 0);
  7231. ASSERT (Pfn1->u3.e2.ReferenceCount != 0);
  7232. OriginalCount = InterlockedCompareExchange (&Pfn1->AweReferenceCount,
  7233. EntryCount - 1,
  7234. EntryCount);
  7235. if (OriginalCount == EntryCount) {
  7236. //
  7237. // This thread can be racing against other threads
  7238. // calling MmUnlockPages. All threads can safely do
  7239. // interlocked decrements on the "AWE reference count".
  7240. // Whichever thread drives it to zero is responsible for
  7241. // decrementing the actual PFN reference count (which may
  7242. // be greater than 1 due to other non-AWE API calls being
  7243. // used on the same page). The thread that drives this
  7244. // reference count to zero must put the page on the actual
  7245. // freelist at that time and decrement various resident
  7246. // available and commitment counters also.
  7247. //
  7248. if (OriginalCount == 1) {
  7249. //
  7250. // This thread has driven the AWE reference count to
  7251. // zero so it must initiate a decrement of the PFN
  7252. // reference count (while holding the PFN lock), etc.
  7253. //
  7254. // This path should be the frequent one since typically
  7255. // I/Os complete before these types of pages are
  7256. // freed by the app.
  7257. //
  7258. MiDecrementReferenceCountForAwePage (Pfn1, TRUE);
  7259. }
  7260. break;
  7261. }
  7262. } while (TRUE);
  7263. }
  7264. else {
  7265. MiDecrementReferenceCountInline (Pfn1, *Page);
  7266. DeltaPages += 1;
  7267. }
  7268. *Page++ = MM_EMPTY_LIST;
  7269. //
  7270. // Nothing magic about the divisor here - just releasing the PFN lock
  7271. // periodically to allow other processors and DPCs a chance to execute.
  7272. //
  7273. if ((NumberOfPages & 0xF) == 0) {
  7274. UNLOCK_PFN (OldIrql);
  7275. LOCK_PFN (OldIrql);
  7276. }
  7277. NumberOfPages -= 1;
  7278. } while (NumberOfPages != 0);
  7279. UNLOCK_PFN (OldIrql);
  7280. MmUnlockPagableImageSection (ExPageLockHandle);
  7281. if (DeltaPages != 0) {
  7282. MI_INCREMENT_RESIDENT_AVAILABLE (DeltaPages, MM_RESAVAIL_FREE_FROM_MDL);
  7283. InterlockedExchangeAddSizeT (&MmMdlPagesAllocated, 0 - DeltaPages);
  7284. MiReturnCommitment (DeltaPages);
  7285. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_MDL_PAGES, DeltaPages);
  7286. }
  7287. MemoryDescriptorList->MdlFlags &= ~MDL_PAGES_LOCKED;
  7288. }
  7289. NTSTATUS
  7290. MmMapUserAddressesToPage (
  7291. IN PVOID BaseAddress,
  7292. IN SIZE_T NumberOfBytes,
  7293. IN PVOID PageAddress
  7294. )
  7295. /*++
  7296. Routine Description:
  7297. This function maps a range of addresses in a physical memory VAD to the
  7298. specified page address. This is typically used by a driver to nicely
  7299. remove an application's access to things like video memory when the
  7300. application is not responding to requests to relinquish it.
  7301. Note the entire range must be currently mapped (ie, all the PTEs must
  7302. be valid) by the caller.
  7303. Arguments:
  7304. BaseAddress - Supplies the base virtual address where the physical
  7305. address is mapped.
  7306. NumberOfBytes - Supplies the number of bytes to remap to the new address.
  7307. PageAddress - Supplies the virtual address of the page this is remapped to.
  7308. This must be nonpaged memory.
  7309. Return Value:
  7310. Various NTSTATUS codes.
  7311. Environment:
  7312. Kernel mode, IRQL of APC_LEVEL or below.
  7313. --*/
  7314. {
  7315. PMMVAD Vad;
  7316. PMMPTE PointerPte;
  7317. MMPTE PteContents;
  7318. PMMPTE LastPte;
  7319. PEPROCESS Process;
  7320. NTSTATUS Status;
  7321. PVOID EndingAddress;
  7322. PFN_NUMBER PageFrameNumber;
  7323. SIZE_T NumberOfPtes;
  7324. PHYSICAL_ADDRESS PhysicalAddress;
  7325. KIRQL OldIrql;
  7326. PAGED_CODE();
  7327. if (BaseAddress > MM_HIGHEST_USER_ADDRESS) {
  7328. return STATUS_INVALID_PARAMETER_1;
  7329. }
  7330. if ((ULONG_PTR)BaseAddress + NumberOfBytes > (ULONG64)MM_HIGHEST_USER_ADDRESS) {
  7331. return STATUS_INVALID_PARAMETER_2;
  7332. }
  7333. Process = PsGetCurrentProcess();
  7334. EndingAddress = (PVOID)((PCHAR)BaseAddress + NumberOfBytes - 1);
  7335. LOCK_ADDRESS_SPACE (Process);
  7336. //
  7337. // Make sure the address space was not deleted.
  7338. //
  7339. if (Process->Flags & PS_PROCESS_FLAGS_VM_DELETED) {
  7340. Status = STATUS_PROCESS_IS_TERMINATING;
  7341. goto ErrorReturn;
  7342. }
  7343. Vad = (PMMVAD)MiLocateAddress (BaseAddress);
  7344. if (Vad == NULL) {
  7345. //
  7346. // No virtual address descriptor located.
  7347. //
  7348. Status = STATUS_MEMORY_NOT_ALLOCATED;
  7349. goto ErrorReturn;
  7350. }
  7351. if (NumberOfBytes == 0) {
  7352. //
  7353. // If the region size is specified as 0, the base address
  7354. // must be the starting address for the region. The entire VAD
  7355. // will then be repointed.
  7356. //
  7357. if (MI_VA_TO_VPN (BaseAddress) != Vad->StartingVpn) {
  7358. Status = STATUS_FREE_VM_NOT_AT_BASE;
  7359. goto ErrorReturn;
  7360. }
  7361. BaseAddress = MI_VPN_TO_VA (Vad->StartingVpn);
  7362. EndingAddress = MI_VPN_TO_VA_ENDING (Vad->EndingVpn);
  7363. NumberOfBytes = (PCHAR)EndingAddress - (PCHAR)BaseAddress + 1;
  7364. }
  7365. //
  7366. // Found the associated virtual address descriptor.
  7367. //
  7368. if (Vad->EndingVpn < MI_VA_TO_VPN (EndingAddress)) {
  7369. //
  7370. // The entire range to remap is not contained within a single
  7371. // virtual address descriptor. Return an error.
  7372. //
  7373. Status = STATUS_INVALID_PARAMETER_2;
  7374. goto ErrorReturn;
  7375. }
  7376. if (Vad->u.VadFlags.PhysicalMapping == 0) {
  7377. //
  7378. // The virtual address descriptor is not a physical mapping.
  7379. //
  7380. Status = STATUS_INVALID_ADDRESS;
  7381. goto ErrorReturn;
  7382. }
  7383. PointerPte = MiGetPteAddress (BaseAddress);
  7384. LastPte = MiGetPteAddress (EndingAddress);
  7385. NumberOfPtes = LastPte - PointerPte + 1;
  7386. //
  7387. // Lock down because the PFN lock is going to be acquired shortly.
  7388. //
  7389. MmLockPagableSectionByHandle(ExPageLockHandle);
  7390. LOCK_WS_UNSAFE (Process);
  7391. PhysicalAddress = MmGetPhysicalAddress (PageAddress);
  7392. PageFrameNumber = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
  7393. PteContents = *PointerPte;
  7394. PteContents.u.Hard.PageFrameNumber = PageFrameNumber;
  7395. #if DBG
  7396. //
  7397. // All the PTEs must be valid or the filling will corrupt the
  7398. // UsedPageTableCounts.
  7399. //
  7400. do {
  7401. ASSERT (PointerPte->u.Hard.Valid == 1);
  7402. PointerPte += 1;
  7403. } while (PointerPte < LastPte);
  7404. PointerPte = MiGetPteAddress (BaseAddress);
  7405. #endif
  7406. //
  7407. // Fill the PTEs and flush at the end - no race here because it doesn't
  7408. // matter whether the user app sees the old or the new data until we
  7409. // return (writes going to either page is acceptable prior to return
  7410. // from this function). There is no race with I/O and ProbeAndLockPages
  7411. // because the PFN lock is acquired here.
  7412. //
  7413. LOCK_PFN (OldIrql);
  7414. #if !defined (_X86PAE_)
  7415. MiFillMemoryPte (PointerPte, NumberOfPtes, PteContents.u.Long);
  7416. #else
  7417. //
  7418. // Note that the PAE architecture must very carefully fill these PTEs.
  7419. //
  7420. do {
  7421. ASSERT (PointerPte->u.Hard.Valid == 1);
  7422. PointerPte += 1;
  7423. InterlockedExchangePte (PointerPte, PteContents.u.Long);
  7424. } while (PointerPte < LastPte);
  7425. PointerPte = MiGetPteAddress (BaseAddress);
  7426. #endif
  7427. if (NumberOfPtes == 1) {
  7428. KeFlushSingleTb (BaseAddress, FALSE);
  7429. }
  7430. else {
  7431. KeFlushProcessTb (FALSE);
  7432. }
  7433. UNLOCK_PFN (OldIrql);
  7434. UNLOCK_WS_UNSAFE (Process);
  7435. MmUnlockPagableImageSection (ExPageLockHandle);
  7436. Status = STATUS_SUCCESS;
  7437. ErrorReturn:
  7438. UNLOCK_ADDRESS_SPACE (Process);
  7439. return Status;
  7440. }
  7441. PHYSICAL_ADDRESS
  7442. MmGetPhysicalAddress (
  7443. IN PVOID BaseAddress
  7444. )
  7445. /*++
  7446. Routine Description:
  7447. This function returns the corresponding physical address for a
  7448. valid virtual address.
  7449. Arguments:
  7450. BaseAddress - Supplies the virtual address for which to return the
  7451. physical address.
  7452. Return Value:
  7453. Returns the corresponding physical address.
  7454. Environment:
  7455. Kernel mode. Any IRQL level.
  7456. --*/
  7457. {
  7458. PMMPTE PointerPte;
  7459. PHYSICAL_ADDRESS PhysicalAddress;
  7460. if (MI_IS_PHYSICAL_ADDRESS(BaseAddress)) {
  7461. PhysicalAddress.QuadPart = MI_CONVERT_PHYSICAL_TO_PFN (BaseAddress);
  7462. }
  7463. else {
  7464. #if (_MI_PAGING_LEVELS>=4)
  7465. PointerPte = MiGetPxeAddress (BaseAddress);
  7466. if (PointerPte->u.Hard.Valid == 0) {
  7467. KdPrint(("MM:MmGetPhysicalAddressFailed base address was %p",
  7468. BaseAddress));
  7469. ZERO_LARGE (PhysicalAddress);
  7470. return PhysicalAddress;
  7471. }
  7472. #endif
  7473. #if (_MI_PAGING_LEVELS>=3)
  7474. PointerPte = MiGetPpeAddress (BaseAddress);
  7475. if (PointerPte->u.Hard.Valid == 0) {
  7476. KdPrint(("MM:MmGetPhysicalAddressFailed base address was %p",
  7477. BaseAddress));
  7478. ZERO_LARGE (PhysicalAddress);
  7479. return PhysicalAddress;
  7480. }
  7481. #endif
  7482. PointerPte = MiGetPdeAddress (BaseAddress);
  7483. if (PointerPte->u.Hard.Valid == 0) {
  7484. KdPrint(("MM:MmGetPhysicalAddressFailed base address was %p",
  7485. BaseAddress));
  7486. ZERO_LARGE (PhysicalAddress);
  7487. return PhysicalAddress;
  7488. }
  7489. if (MI_PDE_MAPS_LARGE_PAGE (PointerPte)) {
  7490. PhysicalAddress.QuadPart = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte) +
  7491. MiGetPteOffset (BaseAddress);
  7492. }
  7493. else {
  7494. PointerPte = MiGetPteAddress (BaseAddress);
  7495. if (PointerPte->u.Hard.Valid == 0) {
  7496. KdPrint(("MM:MmGetPhysicalAddressFailed base address was %p",
  7497. BaseAddress));
  7498. ZERO_LARGE (PhysicalAddress);
  7499. return PhysicalAddress;
  7500. }
  7501. PhysicalAddress.QuadPart = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  7502. }
  7503. }
  7504. PhysicalAddress.QuadPart = PhysicalAddress.QuadPart << PAGE_SHIFT;
  7505. PhysicalAddress.LowPart += BYTE_OFFSET(BaseAddress);
  7506. return PhysicalAddress;
  7507. }
  7508. PVOID
  7509. MmGetVirtualForPhysical (
  7510. IN PHYSICAL_ADDRESS PhysicalAddress
  7511. )
  7512. /*++
  7513. Routine Description:
  7514. This function returns the corresponding virtual address for a physical
  7515. address whose primary virtual address is in system space.
  7516. Arguments:
  7517. PhysicalAddress - Supplies the physical address for which to return the
  7518. virtual address.
  7519. Return Value:
  7520. Returns the corresponding virtual address.
  7521. Environment:
  7522. Kernel mode. Any IRQL level.
  7523. --*/
  7524. {
  7525. PFN_NUMBER PageFrameIndex;
  7526. PMMPFN Pfn;
  7527. PageFrameIndex = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
  7528. Pfn = MI_PFN_ELEMENT (PageFrameIndex);
  7529. return (PVOID)((PCHAR)MiGetVirtualAddressMappedByPte (Pfn->PteAddress) +
  7530. BYTE_OFFSET (PhysicalAddress.LowPart));
  7531. }
  7532. //
  7533. // Nonpaged helper routine.
  7534. //
  7535. VOID
  7536. MiMarkMdlPageAttributes (
  7537. IN PMDL Mdl,
  7538. IN PFN_NUMBER NumberOfPages,
  7539. IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute
  7540. )
  7541. {
  7542. PMMPFN Pfn1;
  7543. PFN_NUMBER PageFrameIndex;
  7544. PPFN_NUMBER Page;
  7545. Page = (PPFN_NUMBER)(Mdl + 1);
  7546. do {
  7547. PageFrameIndex = *Page;
  7548. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  7549. ASSERT (Pfn1->u3.e1.CacheAttribute == MiNotMapped);
  7550. Pfn1->u3.e1.CacheAttribute = CacheAttribute;
  7551. Page += 1;
  7552. NumberOfPages -= 1;
  7553. } while (NumberOfPages != 0);
  7554. }
  7555. PVOID
  7556. MmAllocateNonCachedMemory (
  7557. IN SIZE_T NumberOfBytes
  7558. )
  7559. /*++
  7560. Routine Description:
  7561. This function allocates a range of noncached memory in
  7562. the non-paged portion of the system address space.
  7563. This routine is designed to be used by a driver's initialization
  7564. routine to allocate a noncached block of virtual memory for
  7565. various device specific buffers.
  7566. Arguments:
  7567. NumberOfBytes - Supplies the number of bytes to allocate.
  7568. Return Value:
  7569. NON-NULL - Returns a pointer (virtual address in the nonpaged portion
  7570. of the system) to the allocated physically contiguous
  7571. memory.
  7572. NULL - The specified request could not be satisfied.
  7573. Environment:
  7574. Kernel mode, IRQL of APC_LEVEL or below.
  7575. --*/
  7576. {
  7577. PPFN_NUMBER Page;
  7578. PMMPTE PointerPte;
  7579. MMPTE TempPte;
  7580. PFN_NUMBER NumberOfPages;
  7581. PFN_NUMBER PageFrameIndex;
  7582. PMDL Mdl;
  7583. PVOID BaseAddress;
  7584. PHYSICAL_ADDRESS LowAddress;
  7585. PHYSICAL_ADDRESS HighAddress;
  7586. PHYSICAL_ADDRESS SkipBytes;
  7587. PFN_NUMBER NumberOfPagesAllocated;
  7588. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  7589. ASSERT (NumberOfBytes != 0);
  7590. #if defined (_WIN64)
  7591. //
  7592. // Maximum allocation size is constrained by the MDL ByteCount field.
  7593. //
  7594. if (NumberOfBytes >= _4gb) {
  7595. return NULL;
  7596. }
  7597. #endif
  7598. NumberOfPages = BYTES_TO_PAGES(NumberOfBytes);
  7599. //
  7600. // Even though an MDL is not needed per se, it is much more convenient
  7601. // to use the routine below because it checks for things like appropriate
  7602. // cachability of the pages, etc. Note that the MDL returned may map
  7603. // fewer pages than requested - check for this and if so, return NULL.
  7604. //
  7605. LowAddress.QuadPart = 0;
  7606. HighAddress.QuadPart = (ULONGLONG)-1;
  7607. SkipBytes.QuadPart = 0;
  7608. Mdl = MmAllocatePagesForMdl (LowAddress,
  7609. HighAddress,
  7610. SkipBytes,
  7611. NumberOfBytes);
  7612. if (Mdl == NULL) {
  7613. return NULL;
  7614. }
  7615. BaseAddress = (PVOID)((PCHAR)Mdl->StartVa + Mdl->ByteOffset);
  7616. NumberOfPagesAllocated = ADDRESS_AND_SIZE_TO_SPAN_PAGES (BaseAddress, Mdl->ByteCount);
  7617. if (NumberOfPages != NumberOfPagesAllocated) {
  7618. ASSERT (NumberOfPages > NumberOfPagesAllocated);
  7619. MmFreePagesFromMdl (Mdl);
  7620. ExFreePool (Mdl);
  7621. return NULL;
  7622. }
  7623. //
  7624. // Obtain enough virtual space to map the pages. Add an extra PTE so the
  7625. // MDL can be stashed now and retrieved on release.
  7626. //
  7627. PointerPte = MiReserveSystemPtes ((ULONG)NumberOfPages + 1, SystemPteSpace);
  7628. if (PointerPte == NULL) {
  7629. MmFreePagesFromMdl (Mdl);
  7630. ExFreePool (Mdl);
  7631. return NULL;
  7632. }
  7633. *(PMDL *)PointerPte = Mdl;
  7634. PointerPte += 1;
  7635. BaseAddress = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte);
  7636. Page = (PPFN_NUMBER)(Mdl + 1);
  7637. MI_MAKE_VALID_PTE (TempPte,
  7638. 0,
  7639. MM_READWRITE,
  7640. PointerPte);
  7641. MI_SET_PTE_DIRTY (TempPte);
  7642. CacheAttribute = MI_TRANSLATE_CACHETYPE (MmNonCached, FALSE);
  7643. switch (CacheAttribute) {
  7644. case MiNonCached:
  7645. MI_DISABLE_CACHING (TempPte);
  7646. break;
  7647. case MiCached:
  7648. break;
  7649. case MiWriteCombined:
  7650. MI_SET_PTE_WRITE_COMBINE (TempPte);
  7651. break;
  7652. default:
  7653. ASSERT (FALSE);
  7654. break;
  7655. }
  7656. MI_PREPARE_FOR_NONCACHED (CacheAttribute);
  7657. do {
  7658. ASSERT (PointerPte->u.Hard.Valid == 0);
  7659. PageFrameIndex = *Page;
  7660. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  7661. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  7662. Page += 1;
  7663. PointerPte += 1;
  7664. NumberOfPages -= 1;
  7665. } while (NumberOfPages != 0);
  7666. MI_SWEEP_CACHE (CacheAttribute, BaseAddress, NumberOfBytes);
  7667. MiMarkMdlPageAttributes (Mdl, NumberOfPagesAllocated, CacheAttribute);
  7668. return BaseAddress;
  7669. }
  7670. VOID
  7671. MmFreeNonCachedMemory (
  7672. IN PVOID BaseAddress,
  7673. IN SIZE_T NumberOfBytes
  7674. )
  7675. /*++
  7676. Routine Description:
  7677. This function deallocates a range of noncached memory in
  7678. the non-paged portion of the system address space.
  7679. Arguments:
  7680. BaseAddress - Supplies the base virtual address where the noncached
  7681. memory resides.
  7682. NumberOfBytes - Supplies the number of bytes allocated to the request.
  7683. This must be the same number that was obtained with
  7684. the MmAllocateNonCachedMemory call.
  7685. Return Value:
  7686. None.
  7687. Environment:
  7688. Kernel mode, IRQL of APC_LEVEL or below.
  7689. --*/
  7690. {
  7691. PMDL Mdl;
  7692. PMMPTE PointerPte;
  7693. PFN_NUMBER NumberOfPages;
  7694. #if DBG
  7695. PFN_NUMBER i;
  7696. PVOID StartingAddress;
  7697. #endif
  7698. ASSERT (NumberOfBytes != 0);
  7699. ASSERT (PAGE_ALIGN (BaseAddress) == BaseAddress);
  7700. MI_MAKING_MULTIPLE_PTES_INVALID (TRUE);
  7701. NumberOfPages = BYTES_TO_PAGES(NumberOfBytes);
  7702. PointerPte = MiGetPteAddress (BaseAddress);
  7703. Mdl = *(PMDL *)(PointerPte - 1);
  7704. #if DBG
  7705. StartingAddress = (PVOID)((PCHAR)Mdl->StartVa + Mdl->ByteOffset);
  7706. i = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingAddress, Mdl->ByteCount);
  7707. ASSERT (NumberOfPages == i);
  7708. #endif
  7709. MmFreePagesFromMdl (Mdl);
  7710. ExFreePool (Mdl);
  7711. MiReleaseSystemPtes (PointerPte - 1,
  7712. (ULONG)NumberOfPages + 1,
  7713. SystemPteSpace);
  7714. return;
  7715. }
  7716. SIZE_T
  7717. MmSizeOfMdl (
  7718. IN PVOID Base,
  7719. IN SIZE_T Length
  7720. )
  7721. /*++
  7722. Routine Description:
  7723. This function returns the number of bytes required for an MDL for a
  7724. given buffer and size.
  7725. Arguments:
  7726. Base - Supplies the base virtual address for the buffer.
  7727. Length - Supplies the size of the buffer in bytes.
  7728. Return Value:
  7729. Returns the number of bytes required to contain the MDL.
  7730. Environment:
  7731. Kernel mode. Any IRQL level.
  7732. --*/
  7733. {
  7734. return( sizeof( MDL ) +
  7735. (ADDRESS_AND_SIZE_TO_SPAN_PAGES( Base, Length ) *
  7736. sizeof( PFN_NUMBER ))
  7737. );
  7738. }
  7739. PMDL
  7740. MmCreateMdl (
  7741. IN PMDL MemoryDescriptorList OPTIONAL,
  7742. IN PVOID Base,
  7743. IN SIZE_T Length
  7744. )
  7745. /*++
  7746. Routine Description:
  7747. This function optionally allocates and initializes an MDL.
  7748. Arguments:
  7749. MemoryDescriptorList - Optionally supplies the address of the MDL
  7750. to initialize. If this address is supplied as NULL
  7751. an MDL is allocated from non-paged pool and
  7752. initialized.
  7753. Base - Supplies the base virtual address for the buffer.
  7754. Length - Supplies the size of the buffer in bytes.
  7755. Return Value:
  7756. Returns the address of the initialized MDL.
  7757. Environment:
  7758. Kernel mode, IRQL of DISPATCH_LEVEL or below.
  7759. --*/
  7760. {
  7761. SIZE_T MdlSize;
  7762. #if defined (_WIN64)
  7763. //
  7764. // Since the Length has to fit into the MDL's ByteCount field, ensure it
  7765. // doesn't wrap on 64-bit systems.
  7766. //
  7767. if (Length >= _4gb) {
  7768. return NULL;
  7769. }
  7770. #endif
  7771. MdlSize = MmSizeOfMdl (Base, Length);
  7772. if (!ARGUMENT_PRESENT(MemoryDescriptorList)) {
  7773. MemoryDescriptorList = (PMDL)ExAllocatePoolWithTag (NonPagedPool,
  7774. MdlSize,
  7775. 'ldmM');
  7776. if (MemoryDescriptorList == (PMDL)0) {
  7777. return NULL;
  7778. }
  7779. }
  7780. MmInitializeMdl (MemoryDescriptorList, Base, Length);
  7781. return MemoryDescriptorList;
  7782. }
  7783. BOOLEAN
  7784. MmSetAddressRangeModified (
  7785. IN PVOID Address,
  7786. IN SIZE_T Length
  7787. )
  7788. /*++
  7789. Routine Description:
  7790. This routine sets the modified bit in the PFN database for the
  7791. pages that correspond to the specified address range.
  7792. Note that the dirty bit in the PTE is cleared by this operation.
  7793. Arguments:
  7794. Address - Supplies the address of the start of the range. This
  7795. range must reside within the system cache.
  7796. Length - Supplies the length of the range.
  7797. Return Value:
  7798. TRUE if at least one PTE was dirty in the range, FALSE otherwise.
  7799. Environment:
  7800. Kernel mode. APC_LEVEL and below for pagable addresses,
  7801. DISPATCH_LEVEL and below for non-pagable addresses.
  7802. --*/
  7803. {
  7804. PMMPTE PointerPte;
  7805. PMMPTE LastPte;
  7806. PMMPFN Pfn1;
  7807. MMPTE PteContents;
  7808. KIRQL OldIrql;
  7809. PVOID VaFlushList[MM_MAXIMUM_FLUSH_COUNT];
  7810. ULONG Count;
  7811. BOOLEAN Result;
  7812. Count = 0;
  7813. Result = FALSE;
  7814. //
  7815. // Loop on the copy on write case until the page is only
  7816. // writable.
  7817. //
  7818. PointerPte = MiGetPteAddress (Address);
  7819. LastPte = MiGetPteAddress ((PVOID)((PCHAR)Address + Length - 1));
  7820. LOCK_PFN2 (OldIrql);
  7821. do {
  7822. PteContents = *PointerPte;
  7823. if (PteContents.u.Hard.Valid == 1) {
  7824. Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
  7825. MI_SET_MODIFIED (Pfn1, 1, 0x5);
  7826. if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
  7827. (Pfn1->u3.e1.WriteInProgress == 0)) {
  7828. MiReleasePageFileSpace (Pfn1->OriginalPte);
  7829. Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
  7830. }
  7831. #ifdef NT_UP
  7832. //
  7833. // On uniprocessor systems no need to flush if this processor
  7834. // doesn't think the PTE is dirty.
  7835. //
  7836. if (MI_IS_PTE_DIRTY (PteContents)) {
  7837. Result = TRUE;
  7838. #else //NT_UP
  7839. Result |= (BOOLEAN)(MI_IS_PTE_DIRTY (PteContents));
  7840. #endif //NT_UP
  7841. //
  7842. // Clear the write bit in the PTE so new writes can be tracked.
  7843. //
  7844. MI_SET_PTE_CLEAN (PteContents);
  7845. MI_WRITE_VALID_PTE_NEW_PROTECTION (PointerPte, PteContents);
  7846. if (Count != MM_MAXIMUM_FLUSH_COUNT) {
  7847. VaFlushList[Count] = Address;
  7848. Count += 1;
  7849. }
  7850. #ifdef NT_UP
  7851. }
  7852. #endif //NT_UP
  7853. }
  7854. PointerPte += 1;
  7855. Address = (PVOID)((PCHAR)Address + PAGE_SIZE);
  7856. } while (PointerPte <= LastPte);
  7857. if (Count != 0) {
  7858. if (Count == 1) {
  7859. KeFlushSingleTb (VaFlushList[0], TRUE);
  7860. }
  7861. else if (Count != MM_MAXIMUM_FLUSH_COUNT) {
  7862. KeFlushMultipleTb (Count, &VaFlushList[0], TRUE);
  7863. }
  7864. else {
  7865. KeFlushEntireTb (FALSE, TRUE);
  7866. }
  7867. }
  7868. UNLOCK_PFN2 (OldIrql);
  7869. return Result;
  7870. }
  7871. PVOID
  7872. MiCheckForContiguousMemory (
  7873. IN PVOID BaseAddress,
  7874. IN PFN_NUMBER BaseAddressPages,
  7875. IN PFN_NUMBER SizeInPages,
  7876. IN PFN_NUMBER LowestPfn,
  7877. IN PFN_NUMBER HighestPfn,
  7878. IN PFN_NUMBER BoundaryPfn,
  7879. IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute
  7880. )
  7881. /*++
  7882. Routine Description:
  7883. This routine checks to see if the physical memory mapped
  7884. by the specified BaseAddress for the specified size is
  7885. contiguous and that the first page is greater than or equal to
  7886. the specified LowestPfn and that the last page of the physical memory is
  7887. less than or equal to the specified HighestPfn.
  7888. Arguments:
  7889. BaseAddress - Supplies the base address to start checking at.
  7890. BaseAddressPages - Supplies the number of pages to scan from the
  7891. BaseAddress.
  7892. SizeInPages - Supplies the number of pages in the range.
  7893. LowestPfn - Supplies lowest PFN acceptable as a physical page.
  7894. HighestPfn - Supplies the highest PFN acceptable as a physical page.
  7895. BoundaryPfn - Supplies the PFN multiple the allocation must
  7896. not cross. 0 indicates it can cross any boundary.
  7897. CacheAttribute - Supplies the type of cache mapping that will be used
  7898. for the memory.
  7899. Return Value:
  7900. Returns the usable virtual address within the argument range that the
  7901. caller should return to his caller. NULL if there is no usable address.
  7902. Environment:
  7903. Kernel mode, memory management internal.
  7904. --*/
  7905. {
  7906. KIRQL OldIrql;
  7907. PMMPTE PointerPte;
  7908. PMMPTE LastPte;
  7909. PFN_NUMBER PreviousPage;
  7910. PFN_NUMBER Page;
  7911. PFN_NUMBER HighestStartPage;
  7912. PFN_NUMBER LastPage;
  7913. PFN_NUMBER OriginalPage;
  7914. PFN_NUMBER OriginalLastPage;
  7915. PVOID BoundaryAllocation;
  7916. PFN_NUMBER BoundaryMask;
  7917. PFN_NUMBER PageCount;
  7918. MMPTE PteContents;
  7919. BoundaryMask = ~(BoundaryPfn - 1);
  7920. if (LowestPfn > HighestPfn) {
  7921. return NULL;
  7922. }
  7923. if (LowestPfn + SizeInPages <= LowestPfn) {
  7924. return NULL;
  7925. }
  7926. if (LowestPfn + SizeInPages - 1 > HighestPfn) {
  7927. return NULL;
  7928. }
  7929. if (BaseAddressPages < SizeInPages) {
  7930. return NULL;
  7931. }
  7932. if (MI_IS_PHYSICAL_ADDRESS (BaseAddress)) {
  7933. //
  7934. // All physical addresses are by definition cached and therefore do
  7935. // not qualify for our caller.
  7936. //
  7937. if (CacheAttribute != MiCached) {
  7938. return NULL;
  7939. }
  7940. OriginalPage = MI_CONVERT_PHYSICAL_TO_PFN(BaseAddress);
  7941. OriginalLastPage = OriginalPage + BaseAddressPages;
  7942. Page = OriginalPage;
  7943. LastPage = OriginalLastPage;
  7944. //
  7945. // Close the gaps, then examine the range for a fit.
  7946. //
  7947. if (Page < LowestPfn) {
  7948. Page = LowestPfn;
  7949. }
  7950. if (LastPage > HighestPfn + 1) {
  7951. LastPage = HighestPfn + 1;
  7952. }
  7953. HighestStartPage = LastPage - SizeInPages;
  7954. if (Page > HighestStartPage) {
  7955. return NULL;
  7956. }
  7957. if (BoundaryPfn != 0) {
  7958. do {
  7959. if (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask) == 0) {
  7960. //
  7961. // This portion of the range meets the alignment
  7962. // requirements.
  7963. //
  7964. break;
  7965. }
  7966. Page |= (BoundaryPfn - 1);
  7967. Page += 1;
  7968. } while (Page <= HighestStartPage);
  7969. if (Page > HighestStartPage) {
  7970. return NULL;
  7971. }
  7972. BoundaryAllocation = (PVOID)((PCHAR)BaseAddress + ((Page - OriginalPage) << PAGE_SHIFT));
  7973. //
  7974. // The request can be satisfied. Since specific alignment was
  7975. // requested, return the fit now without getting fancy.
  7976. //
  7977. return BoundaryAllocation;
  7978. }
  7979. //
  7980. // If possible return a chunk on the end to reduce fragmentation.
  7981. //
  7982. if (LastPage == OriginalLastPage) {
  7983. return (PVOID)((PCHAR)BaseAddress + ((BaseAddressPages - SizeInPages) << PAGE_SHIFT));
  7984. }
  7985. //
  7986. // The end chunk did not satisfy the requirements. The next best option
  7987. // is to return a chunk from the beginning. Since that's where the search
  7988. // began, just return the current chunk.
  7989. //
  7990. return (PVOID)((PCHAR)BaseAddress + ((Page - OriginalPage) << PAGE_SHIFT));
  7991. }
  7992. //
  7993. // Check the virtual addresses for physical contiguity.
  7994. //
  7995. PointerPte = MiGetPteAddress (BaseAddress);
  7996. LastPte = PointerPte + BaseAddressPages;
  7997. HighestStartPage = HighestPfn + 1 - SizeInPages;
  7998. PageCount = 0;
  7999. //
  8000. // Initializing PreviousPage is not needed for correctness
  8001. // but without it the compiler cannot compile this code
  8002. // W4 to check for use of uninitialized variables.
  8003. //
  8004. PreviousPage = 0;
  8005. while (PointerPte < LastPte) {
  8006. PteContents = *PointerPte;
  8007. ASSERT (PteContents.u.Hard.Valid == 1);
  8008. Page = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents);
  8009. //
  8010. // Before starting a new run, ensure that it
  8011. // can satisfy the location & boundary requirements (if any).
  8012. //
  8013. if (PageCount == 0) {
  8014. if ((Page >= LowestPfn) &&
  8015. (Page <= HighestStartPage) &&
  8016. ((CacheAttribute == MiCached) || (MI_PFN_ELEMENT (Page)->u4.MustBeCached == 0))) {
  8017. if (BoundaryPfn == 0) {
  8018. PageCount += 1;
  8019. }
  8020. else if (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask) == 0) {
  8021. //
  8022. // This run's physical address meets the alignment
  8023. // requirement.
  8024. //
  8025. PageCount += 1;
  8026. }
  8027. }
  8028. if (PageCount == SizeInPages) {
  8029. if (CacheAttribute != MiCached) {
  8030. //
  8031. // Recheck the cachability while holding the PFN lock.
  8032. //
  8033. LOCK_PFN2 (OldIrql);
  8034. if (MI_PFN_ELEMENT (Page)->u4.MustBeCached == 0) {
  8035. PageCount = 1;
  8036. }
  8037. else {
  8038. PageCount = 0;
  8039. }
  8040. UNLOCK_PFN2 (OldIrql);
  8041. }
  8042. if (PageCount != 0) {
  8043. //
  8044. // Success - found a single page satifying the requirements.
  8045. //
  8046. BaseAddress = MiGetVirtualAddressMappedByPte (PointerPte);
  8047. return BaseAddress;
  8048. }
  8049. }
  8050. PreviousPage = Page;
  8051. PointerPte += 1;
  8052. continue;
  8053. }
  8054. if (Page != PreviousPage + 1) {
  8055. //
  8056. // This page is not physically contiguous. Start over.
  8057. //
  8058. PageCount = 0;
  8059. continue;
  8060. }
  8061. PageCount += 1;
  8062. if (PageCount == SizeInPages) {
  8063. if (CacheAttribute != MiCached) {
  8064. LOCK_PFN2 (OldIrql);
  8065. do {
  8066. if ((MI_PFN_ELEMENT (Page))->u4.MustBeCached == 1) {
  8067. break;
  8068. }
  8069. Page -= 1;
  8070. PageCount -= 1;
  8071. } while (PageCount != 0);
  8072. UNLOCK_PFN2 (OldIrql);
  8073. if (PageCount != 0) {
  8074. PageCount = 0;
  8075. continue;
  8076. }
  8077. PageCount = SizeInPages;
  8078. }
  8079. //
  8080. // Success - found a page range satifying the requirements.
  8081. //
  8082. BaseAddress = MiGetVirtualAddressMappedByPte (PointerPte - PageCount + 1);
  8083. return BaseAddress;
  8084. }
  8085. PreviousPage = Page;
  8086. PointerPte += 1;
  8087. }
  8088. return NULL;
  8089. }
  8090. VOID
  8091. MmLockPagableSectionByHandle (
  8092. IN PVOID ImageSectionHandle
  8093. )
  8094. /*++
  8095. Routine Description:
  8096. This routine checks to see if the specified pages are resident in
  8097. the process's working set and if so the reference count for the
  8098. page is incremented. The allows the virtual address to be accessed
  8099. without getting a hard page fault (have to go to the disk... except
  8100. for extremely rare case when the page table page is removed from the
  8101. working set and migrates to the disk.
  8102. If the virtual address is that of the system wide global "cache" the
  8103. virtual address of the "locked" pages is always guaranteed to
  8104. be valid.
  8105. NOTE: This routine is not to be used for general locking of user
  8106. addresses - use MmProbeAndLockPages. This routine is intended for
  8107. well behaved system code like the file system caches which allocates
  8108. virtual addresses for mapping files AND guarantees that the mapping
  8109. will not be modified (deleted or changed) while the pages are locked.
  8110. Arguments:
  8111. ImageSectionHandle - Supplies the value returned by a previous call
  8112. to MmLockPagableDataSection. This is a pointer to
  8113. the section header for the image.
  8114. Return Value:
  8115. None.
  8116. Environment:
  8117. Kernel mode, IRQL of APC_LEVEL or below.
  8118. --*/
  8119. {
  8120. ULONG EntryCount;
  8121. ULONG OriginalCount;
  8122. PKTHREAD CurrentThread;
  8123. PIMAGE_SECTION_HEADER NtSection;
  8124. PVOID BaseAddress;
  8125. ULONG SizeToLock;
  8126. PMMPTE PointerPte;
  8127. PMMPTE LastPte;
  8128. PLONG SectionLockCountPointer;
  8129. if (MI_IS_PHYSICAL_ADDRESS(ImageSectionHandle)) {
  8130. //
  8131. // No need to lock physical addresses.
  8132. //
  8133. return;
  8134. }
  8135. NtSection = (PIMAGE_SECTION_HEADER)ImageSectionHandle;
  8136. BaseAddress = SECTION_BASE_ADDRESS(NtSection);
  8137. SectionLockCountPointer = SECTION_LOCK_COUNT_POINTER (NtSection);
  8138. ASSERT (!MI_IS_SYSTEM_CACHE_ADDRESS(BaseAddress));
  8139. //
  8140. // The address must be within the system space.
  8141. //
  8142. ASSERT (BaseAddress >= MmSystemRangeStart);
  8143. SizeToLock = NtSection->SizeOfRawData;
  8144. //
  8145. // Generally, SizeOfRawData is larger than VirtualSize for each
  8146. // section because it includes the padding to get to the subsection
  8147. // alignment boundary. However, if the image is linked with
  8148. // subsection alignment == native page alignment, the linker will
  8149. // have VirtualSize be much larger than SizeOfRawData because it
  8150. // will account for all the bss.
  8151. //
  8152. if (SizeToLock < NtSection->Misc.VirtualSize) {
  8153. SizeToLock = NtSection->Misc.VirtualSize;
  8154. }
  8155. PointerPte = MiGetPteAddress(BaseAddress);
  8156. LastPte = MiGetPteAddress((PCHAR)BaseAddress + SizeToLock - 1);
  8157. ASSERT (SizeToLock != 0);
  8158. CurrentThread = KeGetCurrentThread ();
  8159. KeEnterCriticalRegionThread (CurrentThread);
  8160. //
  8161. // The lock count values have the following meanings :
  8162. //
  8163. // Value of 0 means unlocked.
  8164. // Value of 1 means lock in progress by another thread.
  8165. // Value of 2 or more means locked.
  8166. //
  8167. // If the value is 1, this thread must block until the other thread's
  8168. // lock operation is complete.
  8169. //
  8170. do {
  8171. EntryCount = *SectionLockCountPointer;
  8172. if (EntryCount != 1) {
  8173. OriginalCount = InterlockedCompareExchange (SectionLockCountPointer,
  8174. EntryCount + 1,
  8175. EntryCount);
  8176. if (OriginalCount == EntryCount) {
  8177. //
  8178. // Success - this is the first thread to update.
  8179. //
  8180. ASSERT (OriginalCount != 1);
  8181. break;
  8182. }
  8183. //
  8184. // Another thread updated the count before this thread's attempt
  8185. // so it's time to start over.
  8186. //
  8187. }
  8188. else {
  8189. //
  8190. // A lock is in progress, wait for it to finish. This should be
  8191. // generally rare, and even in this case, the pulse will usually
  8192. // wake us. A timeout is used so that the wait and the pulse do
  8193. // not need to be interlocked.
  8194. //
  8195. InterlockedIncrement (&MmCollidedLockWait);
  8196. KeWaitForSingleObject (&MmCollidedLockEvent,
  8197. WrVirtualMemory,
  8198. KernelMode,
  8199. FALSE,
  8200. (PLARGE_INTEGER)&MmShortTime);
  8201. InterlockedDecrement (&MmCollidedLockWait);
  8202. }
  8203. } while (TRUE);
  8204. if (OriginalCount >= 2) {
  8205. //
  8206. // Already locked, just return.
  8207. //
  8208. KeLeaveCriticalRegionThread (CurrentThread);
  8209. return;
  8210. }
  8211. ASSERT (OriginalCount == 0);
  8212. ASSERT (*SectionLockCountPointer == 1);
  8213. //
  8214. // Value was 0 when the lock was obtained. It is now 1 indicating
  8215. // a lock is in progress.
  8216. //
  8217. MiLockCode (PointerPte, LastPte, MM_LOCK_BY_REFCOUNT);
  8218. //
  8219. // Set lock count to 2 (it was 1 when this started) and check
  8220. // to see if any other threads tried to lock while this was happening.
  8221. //
  8222. ASSERT (*SectionLockCountPointer == 1);
  8223. OriginalCount = InterlockedIncrement (SectionLockCountPointer);
  8224. ASSERT (OriginalCount >= 2);
  8225. if (MmCollidedLockWait != 0) {
  8226. KePulseEvent (&MmCollidedLockEvent, 0, FALSE);
  8227. }
  8228. //
  8229. // Enable user APCs now that the pulse has occurred. They had to be
  8230. // blocked to prevent any suspensions of this thread as that would
  8231. // stop all waiters indefinitely.
  8232. //
  8233. KeLeaveCriticalRegionThread (CurrentThread);
  8234. return;
  8235. }
  8236. VOID
  8237. MiLockCode (
  8238. IN PMMPTE FirstPte,
  8239. IN PMMPTE LastPte,
  8240. IN ULONG LockType
  8241. )
  8242. /*++
  8243. Routine Description:
  8244. This routine checks to see if the specified pages are resident in
  8245. the process's working set and if so the reference count for the
  8246. page is incremented. This allows the virtual address to be accessed
  8247. without getting a hard page fault (have to go to the disk...) except
  8248. for the extremely rare case when the page table page is removed from the
  8249. working set and migrates to the disk.
  8250. If the virtual address is that of the system wide global "cache", the
  8251. virtual address of the "locked" pages is always guaranteed to
  8252. be valid.
  8253. NOTE: This routine is not to be used for general locking of user
  8254. addresses - use MmProbeAndLockPages. This routine is intended for
  8255. well behaved system code like the file system caches which allocates
  8256. virtual addresses for mapping files AND guarantees that the mapping
  8257. will not be modified (deleted or changed) while the pages are locked.
  8258. Arguments:
  8259. FirstPte - Supplies the base address to begin locking.
  8260. LastPte - The last PTE to lock.
  8261. LockType - Supplies either MM_LOCK_BY_REFCOUNT or MM_LOCK_NONPAGE.
  8262. LOCK_BY_REFCOUNT increments the reference count to keep
  8263. the page in memory, LOCK_NONPAGE removes the page from
  8264. the working set so it's locked just like nonpaged pool.
  8265. Return Value:
  8266. None.
  8267. Environment:
  8268. Kernel mode.
  8269. --*/
  8270. {
  8271. PMMPFN Pfn1;
  8272. PMMPTE PointerPte;
  8273. MMPTE TempPte;
  8274. MMPTE PteContents;
  8275. WSLE_NUMBER WorkingSetIndex;
  8276. WSLE_NUMBER SwapEntry;
  8277. PFN_NUMBER PageFrameIndex;
  8278. KIRQL OldIrql;
  8279. LOGICAL SessionSpace;
  8280. PMMWSL WorkingSetList;
  8281. PMMSUPPORT Vm;
  8282. PETHREAD CurrentThread;
  8283. ASSERT (!MI_IS_PHYSICAL_ADDRESS(MiGetVirtualAddressMappedByPte(FirstPte)));
  8284. PointerPte = FirstPte;
  8285. CurrentThread = PsGetCurrentThread ();
  8286. SessionSpace = MI_IS_SESSION_IMAGE_ADDRESS (MiGetVirtualAddressMappedByPte(FirstPte));
  8287. if (SessionSpace == TRUE) {
  8288. Vm = &MmSessionSpace->GlobalVirtualAddress->Vm;
  8289. WorkingSetList = MmSessionSpace->Vm.VmWorkingSetList;
  8290. //
  8291. // Session space is never locked by refcount.
  8292. //
  8293. ASSERT (LockType != MM_LOCK_BY_REFCOUNT);
  8294. }
  8295. else {
  8296. Vm = &MmSystemCacheWs;
  8297. WorkingSetList = NULL;
  8298. }
  8299. LOCK_WORKING_SET (Vm);
  8300. LOCK_PFN (OldIrql);
  8301. do {
  8302. PteContents = *PointerPte;
  8303. ASSERT (PteContents.u.Long != ZeroKernelPte.u.Long);
  8304. if (PteContents.u.Hard.Valid == 1) {
  8305. //
  8306. // This address is already in the system (or session) working set.
  8307. //
  8308. Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
  8309. //
  8310. // Up the reference count so the page cannot be released.
  8311. //
  8312. MI_ADD_LOCKED_PAGE_CHARGE (Pfn1, TRUE, 36);
  8313. Pfn1->u3.e2.ReferenceCount += 1;
  8314. if (LockType != MM_LOCK_BY_REFCOUNT) {
  8315. //
  8316. // If the page is in the system working set, remove it.
  8317. // The system working set lock MUST be owned to check to
  8318. // see if this page is in the working set or not. This
  8319. // is because the pager may have just released the PFN lock,
  8320. // acquired the system lock and is now trying to add the
  8321. // page to the system working set.
  8322. //
  8323. // If the page is in the SESSION working set, it cannot be
  8324. // removed as all these pages are carefully accounted for.
  8325. // Instead move it to the locked portion of the working set
  8326. // if it is not there already.
  8327. //
  8328. if (Pfn1->u1.WsIndex != 0) {
  8329. UNLOCK_PFN (OldIrql);
  8330. if (SessionSpace == TRUE) {
  8331. WorkingSetIndex = MiLocateWsle (
  8332. MiGetVirtualAddressMappedByPte(PointerPte),
  8333. WorkingSetList,
  8334. Pfn1->u1.WsIndex);
  8335. if (WorkingSetIndex >= WorkingSetList->FirstDynamic) {
  8336. SwapEntry = WorkingSetList->FirstDynamic;
  8337. if (WorkingSetIndex != WorkingSetList->FirstDynamic) {
  8338. //
  8339. // Swap this entry with the one at first
  8340. // dynamic. Note that the working set index
  8341. // in the PTE is updated here as well.
  8342. //
  8343. MiSwapWslEntries (WorkingSetIndex,
  8344. SwapEntry,
  8345. Vm,
  8346. FALSE);
  8347. }
  8348. WorkingSetList->FirstDynamic += 1;
  8349. //
  8350. // Indicate that the page is now locked.
  8351. //
  8352. MmSessionSpace->Wsle[SwapEntry].u1.e1.LockedInWs = 1;
  8353. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_NP_LOCK_CODE2, 1);
  8354. InterlockedExchangeAddSizeT (&MmSessionSpace->NonPagablePages, 1);
  8355. LOCK_PFN (OldIrql);
  8356. Pfn1->u1.WsIndex = SwapEntry;
  8357. //
  8358. // Adjust available pages as this page is now not
  8359. // in any working set, just like a non-paged pool
  8360. // page.
  8361. //
  8362. MI_DECREMENT_RESIDENT_AVAILABLE (1, MM_RESAVAIL_ALLOCATE_LOCK_CODE1);
  8363. if (Pfn1->u3.e1.PrototypePte == 0) {
  8364. InterlockedDecrement (&MmTotalSystemDriverPages);
  8365. }
  8366. }
  8367. else {
  8368. ASSERT (MmSessionSpace->Wsle[WorkingSetIndex].u1.e1.LockedInWs == 1);
  8369. LOCK_PFN (OldIrql);
  8370. }
  8371. }
  8372. else {
  8373. MiRemoveWsle (Pfn1->u1.WsIndex, MmSystemCacheWorkingSetList);
  8374. MiReleaseWsle (Pfn1->u1.WsIndex, &MmSystemCacheWs);
  8375. MI_SET_PTE_IN_WORKING_SET (PointerPte, 0);
  8376. LOCK_PFN (OldIrql);
  8377. MI_ZERO_WSINDEX (Pfn1);
  8378. //
  8379. // Adjust available pages as this page is now not in any
  8380. // working set, just like a non-paged pool page.
  8381. //
  8382. MI_DECREMENT_RESIDENT_AVAILABLE (1, MM_RESAVAIL_ALLOCATE_LOCK_CODE2);
  8383. if (Pfn1->u3.e1.PrototypePte == 0) {
  8384. InterlockedDecrement (&MmTotalSystemDriverPages);
  8385. }
  8386. }
  8387. }
  8388. ASSERT (Pfn1->u3.e2.ReferenceCount > 1);
  8389. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF (Pfn1, 37);
  8390. }
  8391. }
  8392. else if (PteContents.u.Soft.Prototype == 1) {
  8393. //
  8394. // Page is not in memory and it is a prototype.
  8395. //
  8396. MiMakeSystemAddressValidPfnSystemWs (
  8397. MiGetVirtualAddressMappedByPte(PointerPte), OldIrql);
  8398. continue;
  8399. }
  8400. else if (PteContents.u.Soft.Transition == 1) {
  8401. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&PteContents);
  8402. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  8403. if ((Pfn1->u3.e1.ReadInProgress) ||
  8404. (Pfn1->u4.InPageError)) {
  8405. //
  8406. // Page read is ongoing, force a collided fault.
  8407. //
  8408. MiMakeSystemAddressValidPfnSystemWs (
  8409. MiGetVirtualAddressMappedByPte(PointerPte), OldIrql);
  8410. continue;
  8411. }
  8412. //
  8413. // Paged pool is trimmed without regard to sharecounts.
  8414. // This means a paged pool PTE can be in transition while
  8415. // the page is still marked active.
  8416. //
  8417. if (Pfn1->u3.e1.PageLocation == ActiveAndValid) {
  8418. ASSERT (((Pfn1->PteAddress >= MiGetPteAddress(MmPagedPoolStart)) &&
  8419. (Pfn1->PteAddress <= MiGetPteAddress(MmPagedPoolEnd))) ||
  8420. ((Pfn1->PteAddress >= MiGetPteAddress(MmSpecialPoolStart)) &&
  8421. (Pfn1->PteAddress <= MiGetPteAddress(MmSpecialPoolEnd))));
  8422. //
  8423. // Don't increment the valid PTE count for the
  8424. // paged pool page.
  8425. //
  8426. ASSERT (Pfn1->u2.ShareCount != 0);
  8427. ASSERT (Pfn1->u3.e2.ReferenceCount != 0);
  8428. Pfn1->u2.ShareCount += 1;
  8429. }
  8430. else {
  8431. if (MmAvailablePages == 0) {
  8432. //
  8433. // This can only happen if the system is utilizing
  8434. // a hardware compression cache. This ensures that
  8435. // only a safe amount of the compressed virtual cache
  8436. // is directly mapped so that if the hardware gets
  8437. // into trouble, we can bail it out.
  8438. //
  8439. // Just unlock everything here to give the compression
  8440. // reaper a chance to ravage pages and then retry.
  8441. //
  8442. UNLOCK_PFN (OldIrql);
  8443. UNLOCK_WORKING_SET (Vm);
  8444. LOCK_WORKING_SET (Vm);
  8445. LOCK_PFN (OldIrql);
  8446. continue;
  8447. }
  8448. MiUnlinkPageFromList (Pfn1);
  8449. //
  8450. // Increment the reference count and set the share count to 1.
  8451. // Note the reference count may be 1 already if a modified page
  8452. // write is underway. The systemwide locked page charges
  8453. // are correct in either case and nothing needs to be done
  8454. // just yet.
  8455. //
  8456. Pfn1->u3.e2.ReferenceCount += 1;
  8457. Pfn1->u2.ShareCount = 1;
  8458. }
  8459. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  8460. Pfn1->u3.e1.CacheAttribute = MiCached;
  8461. MI_MAKE_VALID_PTE (TempPte,
  8462. PageFrameIndex,
  8463. Pfn1->OriginalPte.u.Soft.Protection,
  8464. PointerPte);
  8465. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  8466. //
  8467. // Increment the reference count one for putting it the
  8468. // working set list and one for locking it for I/O.
  8469. //
  8470. if (LockType == MM_LOCK_BY_REFCOUNT) {
  8471. //
  8472. // Lock the page in the working set by upping the
  8473. // reference count.
  8474. //
  8475. MI_ADD_LOCKED_PAGE_CHARGE (Pfn1, TRUE, 34);
  8476. Pfn1->u3.e2.ReferenceCount += 1;
  8477. Pfn1->u1.Event = NULL;
  8478. UNLOCK_PFN (OldIrql);
  8479. WorkingSetIndex = MiAllocateWsle (Vm,
  8480. PointerPte,
  8481. Pfn1,
  8482. 0);
  8483. if (WorkingSetIndex == 0) {
  8484. //
  8485. // No working set entry was available. Another (broken
  8486. // or malicious thread) may have already written to this
  8487. // page since the PTE was made valid. So trim the
  8488. // page instead of discarding it.
  8489. //
  8490. // Note the page cannot be a prototype because the
  8491. // PTE was transition above.
  8492. //
  8493. ASSERT (Pfn1->u3.e1.PrototypePte == 0);
  8494. LOCK_PFN (OldIrql);
  8495. //
  8496. // Undo the reference count & locked page charge (if any).
  8497. //
  8498. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF (Pfn1, 51);
  8499. UNLOCK_PFN (OldIrql);
  8500. MiTrimPte (MiGetVirtualAddressMappedByPte (PointerPte),
  8501. PointerPte,
  8502. Pfn1,
  8503. NULL,
  8504. ZeroPte);
  8505. //
  8506. // Release all locks so that other threads (like the
  8507. // working set trimmer) can try to freely make memory.
  8508. //
  8509. UNLOCK_WORKING_SET (Vm);
  8510. KeDelayExecutionThread (KernelMode,
  8511. FALSE,
  8512. (PLARGE_INTEGER)&Mm30Milliseconds);
  8513. LOCK_WORKING_SET (Vm);
  8514. LOCK_PFN (OldIrql);
  8515. //
  8516. // Retry the same page now.
  8517. //
  8518. continue;
  8519. }
  8520. LOCK_PFN (OldIrql);
  8521. }
  8522. else {
  8523. //
  8524. // The wsindex field must be zero because the
  8525. // page is not in the system (or session) working set.
  8526. //
  8527. ASSERT (Pfn1->u1.WsIndex == 0);
  8528. //
  8529. // Adjust available pages as this page is now not in any
  8530. // working set, just like a non-paged pool page. On entry
  8531. // this page was in transition so it was part of the
  8532. // available pages by definition.
  8533. //
  8534. MI_DECREMENT_RESIDENT_AVAILABLE (1, MM_RESAVAIL_ALLOCATE_LOCK_CODE3);
  8535. if (Pfn1->u3.e1.PrototypePte == 0) {
  8536. InterlockedDecrement (&MmTotalSystemDriverPages);
  8537. }
  8538. if (SessionSpace == TRUE) {
  8539. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_NP_LOCK_CODE1, 1);
  8540. InterlockedExchangeAddSizeT (&MmSessionSpace->NonPagablePages, 1);
  8541. }
  8542. }
  8543. }
  8544. else {
  8545. //
  8546. // Page is not in memory.
  8547. //
  8548. MiMakeSystemAddressValidPfnSystemWs (
  8549. MiGetVirtualAddressMappedByPte(PointerPte), OldIrql);
  8550. continue;
  8551. }
  8552. PointerPte += 1;
  8553. } while (PointerPte <= LastPte);
  8554. UNLOCK_PFN (OldIrql);
  8555. UNLOCK_WORKING_SET (Vm);
  8556. return;
  8557. }
  8558. NTSTATUS
  8559. MmGetSectionRange (
  8560. IN PVOID AddressWithinSection,
  8561. OUT PVOID *StartingSectionAddress,
  8562. OUT PULONG SizeofSection
  8563. )
  8564. {
  8565. ULONG Span;
  8566. PKTHREAD CurrentThread;
  8567. PKLDR_DATA_TABLE_ENTRY DataTableEntry;
  8568. ULONG i;
  8569. PIMAGE_NT_HEADERS NtHeaders;
  8570. PIMAGE_SECTION_HEADER NtSection;
  8571. NTSTATUS Status;
  8572. ULONG_PTR Rva;
  8573. PAGED_CODE();
  8574. //
  8575. // Search the loaded module list for the data table entry that describes
  8576. // the DLL that was just unloaded. It is possible that an entry is not in
  8577. // the list if a failure occurred at a point in loading the DLL just before
  8578. // the data table entry was generated.
  8579. //
  8580. Status = STATUS_NOT_FOUND;
  8581. CurrentThread = KeGetCurrentThread ();
  8582. KeEnterCriticalRegionThread (CurrentThread);
  8583. ExAcquireResourceSharedLite (&PsLoadedModuleResource, TRUE);
  8584. DataTableEntry = MiLookupDataTableEntry (AddressWithinSection, TRUE);
  8585. if (DataTableEntry) {
  8586. Rva = (ULONG_PTR)((PUCHAR)AddressWithinSection - (ULONG_PTR)DataTableEntry->DllBase);
  8587. NtHeaders = (PIMAGE_NT_HEADERS) RtlImageNtHeader (DataTableEntry->DllBase);
  8588. if (NtHeaders == NULL) {
  8589. Status = STATUS_NOT_FOUND;
  8590. goto Finished;
  8591. }
  8592. NtSection = (PIMAGE_SECTION_HEADER)((PCHAR)NtHeaders +
  8593. sizeof(ULONG) +
  8594. sizeof(IMAGE_FILE_HEADER) +
  8595. NtHeaders->FileHeader.SizeOfOptionalHeader
  8596. );
  8597. for (i = 0; i < NtHeaders->FileHeader.NumberOfSections; i += 1) {
  8598. //
  8599. // Generally, SizeOfRawData is larger than VirtualSize for each
  8600. // section because it includes the padding to get to the subsection
  8601. // alignment boundary. However if the image is linked with
  8602. // subsection alignment == native page alignment, the linker will
  8603. // have VirtualSize be much larger than SizeOfRawData because it
  8604. // will account for all the bss.
  8605. //
  8606. Span = NtSection->SizeOfRawData;
  8607. if (Span < NtSection->Misc.VirtualSize) {
  8608. Span = NtSection->Misc.VirtualSize;
  8609. }
  8610. if ((Rva >= NtSection->VirtualAddress) &&
  8611. (Rva < NtSection->VirtualAddress + Span)) {
  8612. //
  8613. // Found it.
  8614. //
  8615. *StartingSectionAddress = (PVOID)
  8616. ((PCHAR) DataTableEntry->DllBase + NtSection->VirtualAddress);
  8617. *SizeofSection = Span;
  8618. Status = STATUS_SUCCESS;
  8619. break;
  8620. }
  8621. NtSection += 1;
  8622. }
  8623. }
  8624. Finished:
  8625. ExReleaseResourceLite (&PsLoadedModuleResource);
  8626. KeLeaveCriticalRegionThread (CurrentThread);
  8627. return Status;
  8628. }
  8629. PVOID
  8630. MmLockPagableDataSection (
  8631. IN PVOID AddressWithinSection
  8632. )
  8633. /*++
  8634. Routine Description:
  8635. This functions locks the entire section that contains the specified
  8636. section in memory. This allows pagable code to be brought into
  8637. memory and to be used as if the code was not really pagable. This
  8638. should not be done with a high degree of frequency.
  8639. Arguments:
  8640. AddressWithinSection - Supplies the address of a function
  8641. contained within a section that should be brought in and locked
  8642. in memory.
  8643. Return Value:
  8644. This function returns a value to be used in a subsequent call to
  8645. MmUnlockPagableImageSection.
  8646. --*/
  8647. {
  8648. ULONG Span;
  8649. PLONG SectionLockCountPointer;
  8650. PKTHREAD CurrentThread;
  8651. PKLDR_DATA_TABLE_ENTRY DataTableEntry;
  8652. ULONG i;
  8653. PIMAGE_NT_HEADERS NtHeaders;
  8654. PIMAGE_SECTION_HEADER NtSection;
  8655. PIMAGE_SECTION_HEADER FoundSection;
  8656. ULONG_PTR Rva;
  8657. PAGED_CODE();
  8658. if (MI_IS_PHYSICAL_ADDRESS(AddressWithinSection)) {
  8659. //
  8660. // Physical address, just return that as the handle.
  8661. //
  8662. return AddressWithinSection;
  8663. }
  8664. //
  8665. // Search the loaded module list for the data table entry that describes
  8666. // the DLL that was just unloaded. It is possible that an entry is not in
  8667. // the list if a failure occurred at a point in loading the DLL just before
  8668. // the data table entry was generated.
  8669. //
  8670. FoundSection = NULL;
  8671. CurrentThread = KeGetCurrentThread ();
  8672. KeEnterCriticalRegionThread (CurrentThread);
  8673. ExAcquireResourceSharedLite (&PsLoadedModuleResource, TRUE);
  8674. DataTableEntry = MiLookupDataTableEntry (AddressWithinSection, TRUE);
  8675. Rva = (ULONG_PTR)((PUCHAR)AddressWithinSection - (ULONG_PTR)DataTableEntry->DllBase);
  8676. NtHeaders = (PIMAGE_NT_HEADERS) RtlImageNtHeader (DataTableEntry->DllBase);
  8677. if (NtHeaders == NULL) {
  8678. //
  8679. // This is a firmware entry - no one should be trying to lock these.
  8680. //
  8681. KeBugCheckEx (MEMORY_MANAGEMENT,
  8682. 0x1234,
  8683. (ULONG_PTR)AddressWithinSection,
  8684. 1,
  8685. 0);
  8686. }
  8687. NtSection = (PIMAGE_SECTION_HEADER)((ULONG_PTR)NtHeaders +
  8688. sizeof(ULONG) +
  8689. sizeof(IMAGE_FILE_HEADER) +
  8690. NtHeaders->FileHeader.SizeOfOptionalHeader
  8691. );
  8692. for (i = 0; i < NtHeaders->FileHeader.NumberOfSections; i += 1) {
  8693. //
  8694. // Generally, SizeOfRawData is larger than VirtualSize for each
  8695. // section because it includes the padding to get to the subsection
  8696. // alignment boundary. However, if the image is linked with
  8697. // subsection alignment == native page alignment, the linker will
  8698. // have VirtualSize be much larger than SizeOfRawData because it
  8699. // will account for all the bss.
  8700. //
  8701. Span = NtSection->SizeOfRawData;
  8702. if (Span < NtSection->Misc.VirtualSize) {
  8703. Span = NtSection->Misc.VirtualSize;
  8704. }
  8705. if ((Rva >= NtSection->VirtualAddress) &&
  8706. (Rva < NtSection->VirtualAddress + Span)) {
  8707. FoundSection = NtSection;
  8708. if (SECTION_BASE_ADDRESS(NtSection) != ((PUCHAR)DataTableEntry->DllBase +
  8709. NtSection->VirtualAddress)) {
  8710. //
  8711. // Overwrite the PointerToRelocations field (and on Win64, the
  8712. // PointerToLinenumbers field also) so that it contains
  8713. // the Va of this section.
  8714. //
  8715. // NumberOfRelocations & NumberOfLinenumbers contains
  8716. // the Lock Count for the section.
  8717. //
  8718. SECTION_BASE_ADDRESS(NtSection) = ((PUCHAR)DataTableEntry->DllBase +
  8719. NtSection->VirtualAddress);
  8720. SectionLockCountPointer = SECTION_LOCK_COUNT_POINTER (NtSection);
  8721. *SectionLockCountPointer = 0;
  8722. }
  8723. //
  8724. // Now lock in the code.
  8725. //
  8726. #if DBG
  8727. if (MmDebug & MM_DBG_LOCK_CODE) {
  8728. SectionLockCountPointer = SECTION_LOCK_COUNT_POINTER (NtSection);
  8729. DbgPrint("MM Lock %wZ %8s %p -> %p : %p %3ld.\n",
  8730. &DataTableEntry->BaseDllName,
  8731. NtSection->Name,
  8732. AddressWithinSection,
  8733. NtSection,
  8734. SECTION_BASE_ADDRESS(NtSection),
  8735. *SectionLockCountPointer);
  8736. }
  8737. #endif //DBG
  8738. MmLockPagableSectionByHandle ((PVOID)NtSection);
  8739. break;
  8740. }
  8741. NtSection += 1;
  8742. }
  8743. ExReleaseResourceLite (&PsLoadedModuleResource);
  8744. KeLeaveCriticalRegionThread (CurrentThread);
  8745. if (!FoundSection) {
  8746. KeBugCheckEx (MEMORY_MANAGEMENT,
  8747. 0x1234,
  8748. (ULONG_PTR)AddressWithinSection,
  8749. 0,
  8750. 0);
  8751. }
  8752. return (PVOID)FoundSection;
  8753. }
  8754. PKLDR_DATA_TABLE_ENTRY
  8755. MiLookupDataTableEntry (
  8756. IN PVOID AddressWithinSection,
  8757. IN ULONG ResourceHeld
  8758. )
  8759. /*++
  8760. Routine Description:
  8761. This functions locates the data table entry that maps the specified address.
  8762. Arguments:
  8763. AddressWithinSection - Supplies the address of a function contained
  8764. within the desired module.
  8765. ResourceHeld - Supplies TRUE if the loaded module resource is already held,
  8766. FALSE if not.
  8767. Return Value:
  8768. The address of the loaded module list data table entry that maps the
  8769. argument address.
  8770. --*/
  8771. {
  8772. PKTHREAD CurrentThread;
  8773. PKLDR_DATA_TABLE_ENTRY DataTableEntry;
  8774. PKLDR_DATA_TABLE_ENTRY FoundEntry;
  8775. PLIST_ENTRY NextEntry;
  8776. PAGED_CODE();
  8777. FoundEntry = NULL;
  8778. //
  8779. // Search the loaded module list for the data table entry that describes
  8780. // the DLL that was just unloaded. It is possible that an entry is not in
  8781. // the list if a failure occurred at a point in loading the DLL just before
  8782. // the data table entry was generated.
  8783. //
  8784. if (!ResourceHeld) {
  8785. CurrentThread = KeGetCurrentThread ();
  8786. KeEnterCriticalRegionThread (CurrentThread);
  8787. ExAcquireResourceSharedLite (&PsLoadedModuleResource, TRUE);
  8788. }
  8789. else {
  8790. CurrentThread = NULL;
  8791. }
  8792. NextEntry = PsLoadedModuleList.Flink;
  8793. ASSERT (NextEntry != NULL);
  8794. do {
  8795. DataTableEntry = CONTAINING_RECORD(NextEntry,
  8796. KLDR_DATA_TABLE_ENTRY,
  8797. InLoadOrderLinks);
  8798. //
  8799. // Locate the loaded module that contains this address.
  8800. //
  8801. if ( AddressWithinSection >= DataTableEntry->DllBase &&
  8802. AddressWithinSection < (PVOID)((PUCHAR)DataTableEntry->DllBase+DataTableEntry->SizeOfImage) ) {
  8803. FoundEntry = DataTableEntry;
  8804. break;
  8805. }
  8806. NextEntry = NextEntry->Flink;
  8807. } while (NextEntry != &PsLoadedModuleList);
  8808. if (CurrentThread != NULL) {
  8809. ExReleaseResourceLite (&PsLoadedModuleResource);
  8810. KeLeaveCriticalRegionThread (CurrentThread);
  8811. }
  8812. return FoundEntry;
  8813. }
  8814. VOID
  8815. MmUnlockPagableImageSection (
  8816. IN PVOID ImageSectionHandle
  8817. )
  8818. /*++
  8819. Routine Description:
  8820. This function unlocks from memory, the pages locked by a preceding call to
  8821. MmLockPagableDataSection.
  8822. Arguments:
  8823. ImageSectionHandle - Supplies the value returned by a previous call
  8824. to MmLockPagableDataSection.
  8825. Return Value:
  8826. None.
  8827. --*/
  8828. {
  8829. PKTHREAD CurrentThread;
  8830. PIMAGE_SECTION_HEADER NtSection;
  8831. PMMPTE PointerPte;
  8832. PMMPTE LastPte;
  8833. PFN_NUMBER PageFrameIndex;
  8834. PMMPFN Pfn1;
  8835. KIRQL OldIrql;
  8836. PVOID BaseAddress;
  8837. ULONG SizeToUnlock;
  8838. ULONG Count;
  8839. PLONG SectionLockCountPointer;
  8840. if (MI_IS_PHYSICAL_ADDRESS(ImageSectionHandle)) {
  8841. //
  8842. // No need to unlock physical addresses.
  8843. //
  8844. return;
  8845. }
  8846. NtSection = (PIMAGE_SECTION_HEADER)ImageSectionHandle;
  8847. //
  8848. // Address must be in the system working set.
  8849. //
  8850. BaseAddress = SECTION_BASE_ADDRESS(NtSection);
  8851. SectionLockCountPointer = SECTION_LOCK_COUNT_POINTER (NtSection);
  8852. SizeToUnlock = NtSection->SizeOfRawData;
  8853. //
  8854. // Generally, SizeOfRawData is larger than VirtualSize for each
  8855. // section because it includes the padding to get to the subsection
  8856. // alignment boundary. However, if the image is linked with
  8857. // subsection alignment == native page alignment, the linker will
  8858. // have VirtualSize be much larger than SizeOfRawData because it
  8859. // will account for all the bss.
  8860. //
  8861. if (SizeToUnlock < NtSection->Misc.VirtualSize) {
  8862. SizeToUnlock = NtSection->Misc.VirtualSize;
  8863. }
  8864. PointerPte = MiGetPteAddress(BaseAddress);
  8865. LastPte = MiGetPteAddress((PCHAR)BaseAddress + SizeToUnlock - 1);
  8866. CurrentThread = KeGetCurrentThread ();
  8867. //
  8868. // Block user APCs as the initial decrement below could push the count to 1.
  8869. // This puts this thread into the critical path that must finish as all
  8870. // other threads trying to lock the section will be waiting for this thread.
  8871. // Entering a critical region here ensures that a suspend cannot stop us.
  8872. //
  8873. KeEnterCriticalRegionThread (CurrentThread);
  8874. Count = InterlockedDecrement (SectionLockCountPointer);
  8875. if (Count < 1) {
  8876. KeBugCheckEx (MEMORY_MANAGEMENT,
  8877. 0x1010,
  8878. (ULONG_PTR)BaseAddress,
  8879. (ULONG_PTR)NtSection,
  8880. *SectionLockCountPointer);
  8881. }
  8882. if (Count != 1) {
  8883. KeLeaveCriticalRegionThread (CurrentThread);
  8884. return;
  8885. }
  8886. LOCK_PFN2 (OldIrql);
  8887. do {
  8888. ASSERT (PointerPte->u.Hard.Valid == 1);
  8889. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  8890. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  8891. ASSERT (Pfn1->u3.e2.ReferenceCount > 1);
  8892. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF (Pfn1, 37);
  8893. PointerPte += 1;
  8894. } while (PointerPte <= LastPte);
  8895. UNLOCK_PFN2 (OldIrql);
  8896. ASSERT (*SectionLockCountPointer == 1);
  8897. Count = InterlockedDecrement (SectionLockCountPointer);
  8898. ASSERT (Count == 0);
  8899. if (MmCollidedLockWait != 0) {
  8900. KePulseEvent (&MmCollidedLockEvent, 0, FALSE);
  8901. }
  8902. //
  8903. // Enable user APCs now that the pulse has occurred. They had to be
  8904. // blocked to prevent any suspensions of this thread as that would
  8905. // stop all waiters indefinitely.
  8906. //
  8907. KeLeaveCriticalRegionThread (CurrentThread);
  8908. return;
  8909. }
  8910. BOOLEAN
  8911. MmIsRecursiveIoFault (
  8912. VOID
  8913. )
  8914. /*++
  8915. Routine Description:
  8916. This function examines the thread's page fault clustering information
  8917. and determines if the current page fault is occurring during an I/O
  8918. operation.
  8919. Arguments:
  8920. None.
  8921. Return Value:
  8922. Returns TRUE if the fault is occurring during an I/O operation,
  8923. FALSE otherwise.
  8924. --*/
  8925. {
  8926. PETHREAD Thread;
  8927. Thread = PsGetCurrentThread ();
  8928. return (BOOLEAN)(Thread->DisablePageFaultClustering |
  8929. Thread->ForwardClusterOnly);
  8930. }
  8931. VOID
  8932. MmMapMemoryDumpMdl (
  8933. IN OUT PMDL MemoryDumpMdl
  8934. )
  8935. /*++
  8936. Routine Description:
  8937. For use by crash dump routine ONLY. Maps an MDL into a fixed
  8938. portion of the address space. Only 1 MDL can be mapped at a
  8939. time.
  8940. Arguments:
  8941. MemoryDumpMdl - Supplies the MDL to map.
  8942. Return Value:
  8943. None, fields in MDL updated.
  8944. --*/
  8945. {
  8946. PFN_NUMBER NumberOfPages;
  8947. PMMPTE PointerPte;
  8948. PCHAR BaseVa;
  8949. MMPTE TempPte;
  8950. PMMPFN Pfn1;
  8951. PPFN_NUMBER Page;
  8952. NumberOfPages = BYTES_TO_PAGES (MemoryDumpMdl->ByteCount + MemoryDumpMdl->ByteOffset);
  8953. ASSERT (NumberOfPages <= 16);
  8954. PointerPte = MmCrashDumpPte;
  8955. BaseVa = (PCHAR)MiGetVirtualAddressMappedByPte(PointerPte);
  8956. MemoryDumpMdl->MappedSystemVa = (PCHAR)BaseVa + MemoryDumpMdl->ByteOffset;
  8957. TempPte = ValidKernelPte;
  8958. Page = (PPFN_NUMBER)(MemoryDumpMdl + 1);
  8959. //
  8960. // If the pages don't span the entire dump virtual address range,
  8961. // build a barrier. Otherwise use the default barrier provided at the
  8962. // end of the dump virtual address range.
  8963. //
  8964. if (NumberOfPages < 16) {
  8965. MI_WRITE_INVALID_PTE (PointerPte + NumberOfPages, ZeroPte);
  8966. KiFlushSingleTb (BaseVa + (NumberOfPages << PAGE_SHIFT));
  8967. }
  8968. do {
  8969. Pfn1 = MI_PFN_ELEMENT (*Page);
  8970. TempPte = ValidKernelPte;
  8971. switch (Pfn1->u3.e1.CacheAttribute) {
  8972. case MiCached:
  8973. break;
  8974. case MiNonCached:
  8975. MI_DISABLE_CACHING (TempPte);
  8976. break;
  8977. case MiWriteCombined:
  8978. MI_SET_PTE_WRITE_COMBINE (TempPte);
  8979. break;
  8980. default:
  8981. break;
  8982. }
  8983. TempPte.u.Hard.PageFrameNumber = *Page;
  8984. //
  8985. // Note this PTE may be valid or invalid prior to the overwriting here.
  8986. //
  8987. if (PointerPte->u.Hard.Valid == 1) {
  8988. if (PointerPte->u.Long != TempPte.u.Long) {
  8989. MI_WRITE_VALID_PTE_NEW_PAGE (PointerPte, TempPte);
  8990. KiFlushSingleTb (BaseVa);
  8991. }
  8992. }
  8993. else {
  8994. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  8995. }
  8996. Page += 1;
  8997. PointerPte += 1;
  8998. BaseVa += PAGE_SIZE;
  8999. NumberOfPages -= 1;
  9000. } while (NumberOfPages != 0);
  9001. return;
  9002. }
  9003. VOID
  9004. MmReleaseDumpAddresses (
  9005. IN PFN_NUMBER NumberOfPages
  9006. )
  9007. /*++
  9008. Routine Description:
  9009. For use by hibernate routine ONLY. Puts zeros back into the
  9010. used dump PTEs.
  9011. Arguments:
  9012. NumberOfPages - Supplies the number of PTEs to zero.
  9013. Return Value:
  9014. None.
  9015. --*/
  9016. {
  9017. PVOID BaseVa;
  9018. PMMPTE PointerPte;
  9019. PointerPte = MmCrashDumpPte;
  9020. BaseVa = MiGetVirtualAddressMappedByPte (PointerPte);
  9021. MiZeroMemoryPte (MmCrashDumpPte, NumberOfPages);
  9022. while (NumberOfPages != 0) {
  9023. KiFlushSingleTb (BaseVa);
  9024. BaseVa = (PVOID) ((PCHAR) BaseVa + PAGE_SIZE);
  9025. NumberOfPages -= 1;
  9026. }
  9027. }
  9028. NTSTATUS
  9029. MmSetBankedSection (
  9030. IN HANDLE ProcessHandle,
  9031. IN PVOID VirtualAddress,
  9032. IN ULONG BankLength,
  9033. IN BOOLEAN ReadWriteBank,
  9034. IN PBANKED_SECTION_ROUTINE BankRoutine,
  9035. IN PVOID Context
  9036. )
  9037. /*++
  9038. Routine Description:
  9039. This function declares a mapped video buffer as a banked
  9040. section. This allows banked video devices (i.e., even
  9041. though the video controller has a megabyte or so of memory,
  9042. only a small bank (like 64k) can be mapped at any one time.
  9043. In order to overcome this problem, the pager handles faults
  9044. to this memory, unmaps the current bank, calls off to the
  9045. video driver and then maps in the new bank.
  9046. This function creates the necessary structures to allow the
  9047. video driver to be called from the pager.
  9048. ********************* NOTE NOTE NOTE *************************
  9049. At this time only read/write banks are supported!
  9050. Arguments:
  9051. ProcessHandle - Supplies a handle to the process in which to
  9052. support the banked video function.
  9053. VirtualAddress - Supplies the virtual address where the video
  9054. buffer is mapped in the specified process.
  9055. BankLength - Supplies the size of the bank.
  9056. ReadWriteBank - Supplies TRUE if the bank is read and write.
  9057. BankRoutine - Supplies a pointer to the routine that should be
  9058. called by the pager.
  9059. Context - Supplies a context to be passed by the pager to the
  9060. BankRoutine.
  9061. Return Value:
  9062. Returns the status of the function.
  9063. Environment:
  9064. Kernel mode, APC_LEVEL or below.
  9065. --*/
  9066. {
  9067. KAPC_STATE ApcState;
  9068. NTSTATUS Status;
  9069. PEPROCESS Process;
  9070. PMMVAD Vad;
  9071. PMMPTE PointerPte;
  9072. PMMPTE LastPte;
  9073. MMPTE TempPte;
  9074. ULONG_PTR size;
  9075. LONG count;
  9076. ULONG NumberOfPtes;
  9077. PMMBANKED_SECTION Bank;
  9078. PAGED_CODE ();
  9079. UNREFERENCED_PARAMETER (ReadWriteBank);
  9080. //
  9081. // Reference the specified process handle for VM_OPERATION access.
  9082. //
  9083. Status = ObReferenceObjectByHandle ( ProcessHandle,
  9084. PROCESS_VM_OPERATION,
  9085. PsProcessType,
  9086. KernelMode,
  9087. (PVOID *)&Process,
  9088. NULL );
  9089. if (!NT_SUCCESS(Status)) {
  9090. return Status;
  9091. }
  9092. KeStackAttachProcess (&Process->Pcb, &ApcState);
  9093. //
  9094. // Get the address creation mutex to block multiple threads from
  9095. // creating or deleting address space at the same time and
  9096. // get the working set mutex so virtual address descriptors can
  9097. // be inserted and walked. Block APCs so an APC which takes a page
  9098. // fault does not corrupt various structures.
  9099. //
  9100. LOCK_ADDRESS_SPACE (Process);
  9101. //
  9102. // Make sure the address space was not deleted, if so, return an error.
  9103. //
  9104. if (Process->Flags & PS_PROCESS_FLAGS_VM_DELETED) {
  9105. Status = STATUS_PROCESS_IS_TERMINATING;
  9106. goto ErrorReturn;
  9107. }
  9108. Vad = MiLocateAddress (VirtualAddress);
  9109. if ((Vad == NULL) ||
  9110. (Vad->StartingVpn != MI_VA_TO_VPN (VirtualAddress)) ||
  9111. (Vad->u.VadFlags.PhysicalMapping == 0)) {
  9112. Status = STATUS_NOT_MAPPED_DATA;
  9113. goto ErrorReturn;
  9114. }
  9115. size = PAGE_SIZE + ((Vad->EndingVpn - Vad->StartingVpn) << PAGE_SHIFT);
  9116. if ((size % BankLength) != 0) {
  9117. Status = STATUS_INVALID_VIEW_SIZE;
  9118. goto ErrorReturn;
  9119. }
  9120. count = -1;
  9121. NumberOfPtes = BankLength;
  9122. do {
  9123. NumberOfPtes = NumberOfPtes >> 1;
  9124. count += 1;
  9125. } while (NumberOfPtes != 0);
  9126. //
  9127. // Turn VAD into Banked VAD
  9128. //
  9129. NumberOfPtes = BankLength >> PAGE_SHIFT;
  9130. Bank = ExAllocatePoolWithTag (NonPagedPool,
  9131. sizeof (MMBANKED_SECTION) +
  9132. (NumberOfPtes - 1) * sizeof(MMPTE),
  9133. 'kBmM');
  9134. if (Bank == NULL) {
  9135. Status = STATUS_INSUFFICIENT_RESOURCES;
  9136. goto ErrorReturn;
  9137. }
  9138. Bank->BankShift = PTE_SHIFT + count - PAGE_SHIFT;
  9139. PointerPte = MiGetPteAddress(MI_VPN_TO_VA (Vad->StartingVpn));
  9140. ASSERT (PointerPte->u.Hard.Valid == 1);
  9141. Bank->BasePhysicalPage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  9142. Bank->BasedPte = PointerPte;
  9143. Bank->BankSize = BankLength;
  9144. Bank->BankedRoutine = BankRoutine;
  9145. Bank->Context = Context;
  9146. Bank->CurrentMappedPte = PointerPte;
  9147. //
  9148. // Build the template PTEs structure.
  9149. //
  9150. count = 0;
  9151. TempPte = ZeroPte;
  9152. MI_MAKE_VALID_PTE (TempPte,
  9153. Bank->BasePhysicalPage,
  9154. MM_READWRITE,
  9155. PointerPte);
  9156. if (TempPte.u.Hard.Write) {
  9157. MI_SET_PTE_DIRTY (TempPte);
  9158. }
  9159. do {
  9160. Bank->BankTemplate[count] = TempPte;
  9161. TempPte.u.Hard.PageFrameNumber += 1;
  9162. count += 1;
  9163. } while ((ULONG)count < NumberOfPtes );
  9164. LastPte = MiGetPteAddress (MI_VPN_TO_VA (Vad->EndingVpn));
  9165. //
  9166. // Set all PTEs within this range to zero. Any faults within
  9167. // this range will call the banked routine before making the
  9168. // page valid.
  9169. //
  9170. LOCK_WS_UNSAFE (Process);
  9171. ((PMMVAD_LONG) Vad)->u4.Banked = Bank;
  9172. RtlFillMemory (PointerPte,
  9173. (size >> (PAGE_SHIFT - PTE_SHIFT)),
  9174. (UCHAR)ZeroPte.u.Long);
  9175. KeFlushEntireTb (TRUE, TRUE);
  9176. UNLOCK_WS_UNSAFE (Process);
  9177. Status = STATUS_SUCCESS;
  9178. ErrorReturn:
  9179. UNLOCK_ADDRESS_SPACE (Process);
  9180. KeUnstackDetachProcess (&ApcState);
  9181. ObDereferenceObject (Process);
  9182. return Status;
  9183. }
  9184. PVOID
  9185. MmMapVideoDisplay (
  9186. IN PHYSICAL_ADDRESS PhysicalAddress,
  9187. IN SIZE_T NumberOfBytes,
  9188. IN MEMORY_CACHING_TYPE CacheType
  9189. )
  9190. /*++
  9191. Routine Description:
  9192. This function maps the specified physical address into the non-pagable
  9193. portion of the system address space.
  9194. Arguments:
  9195. PhysicalAddress - Supplies the starting physical address to map.
  9196. NumberOfBytes - Supplies the number of bytes to map.
  9197. CacheType - Supplies MmNonCached if the physical address is to be mapped
  9198. as non-cached, MmCached if the address should be cached, and
  9199. MmWriteCombined if the address should be cached and
  9200. write-combined as a frame buffer. For I/O device registers,
  9201. this is usually specified as MmNonCached.
  9202. Return Value:
  9203. Returns the virtual address which maps the specified physical addresses.
  9204. The value NULL is returned if sufficient virtual address space for
  9205. the mapping could not be found.
  9206. Environment:
  9207. Kernel mode, IRQL of APC_LEVEL or below.
  9208. --*/
  9209. {
  9210. PAGED_CODE();
  9211. return MmMapIoSpace (PhysicalAddress, NumberOfBytes, CacheType);
  9212. }
  9213. VOID
  9214. MmUnmapVideoDisplay (
  9215. IN PVOID BaseAddress,
  9216. IN SIZE_T NumberOfBytes
  9217. )
  9218. /*++
  9219. Routine Description:
  9220. This function unmaps a range of physical address which were previously
  9221. mapped via an MmMapVideoDisplay function call.
  9222. Arguments:
  9223. BaseAddress - Supplies the base virtual address where the physical
  9224. address was previously mapped.
  9225. NumberOfBytes - Supplies the number of bytes which were mapped.
  9226. Return Value:
  9227. None.
  9228. Environment:
  9229. Kernel mode, IRQL of APC_LEVEL or below.
  9230. --*/
  9231. {
  9232. MmUnmapIoSpace (BaseAddress, NumberOfBytes);
  9233. return;
  9234. }
  9235. VOID
  9236. MmLockPagedPool (
  9237. IN PVOID Address,
  9238. IN SIZE_T SizeInBytes
  9239. )
  9240. /*++
  9241. Routine Description:
  9242. Locks the specified address (which MUST reside in paged pool) into
  9243. memory until MmUnlockPagedPool is called.
  9244. Arguments:
  9245. Address - Supplies the address in paged pool to lock.
  9246. SizeInBytes - Supplies the size in bytes to lock.
  9247. Return Value:
  9248. None.
  9249. Environment:
  9250. Kernel mode, IRQL of APC_LEVEL or below.
  9251. --*/
  9252. {
  9253. PMMPTE PointerPte;
  9254. PMMPTE LastPte;
  9255. PointerPte = MiGetPteAddress (Address);
  9256. LastPte = MiGetPteAddress ((PVOID)((PCHAR)Address + (SizeInBytes - 1)));
  9257. MiLockCode (PointerPte, LastPte, MM_LOCK_BY_REFCOUNT);
  9258. return;
  9259. }
  9260. NTKERNELAPI
  9261. VOID
  9262. MmUnlockPagedPool (
  9263. IN PVOID Address,
  9264. IN SIZE_T SizeInBytes
  9265. )
  9266. /*++
  9267. Routine Description:
  9268. Unlocks paged pool that was locked with MmLockPagedPool.
  9269. Arguments:
  9270. Address - Supplies the address in paged pool to unlock.
  9271. Size - Supplies the size to unlock.
  9272. Return Value:
  9273. None.
  9274. Environment:
  9275. Kernel mode, IRQL of APC_LEVEL or below.
  9276. --*/
  9277. {
  9278. PMMPTE PointerPte;
  9279. PMMPTE LastPte;
  9280. KIRQL OldIrql;
  9281. PFN_NUMBER PageFrameIndex;
  9282. PMMPFN Pfn1;
  9283. MmLockPagableSectionByHandle(ExPageLockHandle);
  9284. PointerPte = MiGetPteAddress (Address);
  9285. LastPte = MiGetPteAddress ((PVOID)((PCHAR)Address + (SizeInBytes - 1)));
  9286. LOCK_PFN (OldIrql);
  9287. do {
  9288. ASSERT (PointerPte->u.Hard.Valid == 1);
  9289. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  9290. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  9291. ASSERT (Pfn1->u3.e2.ReferenceCount > 1);
  9292. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF (Pfn1, 35);
  9293. PointerPte += 1;
  9294. } while (PointerPte <= LastPte);
  9295. UNLOCK_PFN (OldIrql);
  9296. MmUnlockPagableImageSection(ExPageLockHandle);
  9297. return;
  9298. }
  9299. NTKERNELAPI
  9300. ULONG
  9301. MmGatherMemoryForHibernate (
  9302. IN PMDL Mdl,
  9303. IN BOOLEAN Wait
  9304. )
  9305. /*++
  9306. Routine Description:
  9307. Finds enough memory to fill in the pages of the MDL for power management
  9308. hibernate function.
  9309. Arguments:
  9310. Mdl - Supplies an MDL, the start VA field should be NULL. The length
  9311. field indicates how many pages to obtain.
  9312. Wait - FALSE to fail immediately if the pages aren't available.
  9313. Return Value:
  9314. TRUE if the MDL could be filled in, FALSE otherwise.
  9315. Environment:
  9316. Kernel mode, IRQL of DISPATCH_LEVEL or below.
  9317. --*/
  9318. {
  9319. KIRQL OldIrql;
  9320. PFN_NUMBER AvailablePages;
  9321. PFN_NUMBER PagesNeeded;
  9322. PPFN_NUMBER Pages;
  9323. PFN_NUMBER i;
  9324. PFN_NUMBER PageFrameIndex;
  9325. PMMPFN Pfn1;
  9326. ULONG status;
  9327. PKTHREAD CurrentThread;
  9328. status = FALSE;
  9329. PagesNeeded = Mdl->ByteCount >> PAGE_SHIFT;
  9330. Pages = (PPFN_NUMBER)(Mdl + 1);
  9331. i = Wait ? 100 : 1;
  9332. CurrentThread = KeGetCurrentThread ();
  9333. KeEnterCriticalRegionThread (CurrentThread);
  9334. InterlockedIncrement (&MiDelayPageFaults);
  9335. do {
  9336. LOCK_PFN2 (OldIrql);
  9337. MiDeferredUnlockPages (MI_DEFER_PFN_HELD);
  9338. //
  9339. // Don't use MmAvailablePages here because if compression hardware is
  9340. // being used we would bail prematurely. Check the lists explicitly
  9341. // in order to provide our caller with the maximum number of pages.
  9342. //
  9343. AvailablePages = MmZeroedPageListHead.Total +
  9344. MmFreePageListHead.Total +
  9345. MmStandbyPageListHead.Total;
  9346. if (AvailablePages > PagesNeeded) {
  9347. //
  9348. // Fill in the MDL.
  9349. //
  9350. do {
  9351. PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (NULL));
  9352. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  9353. #if DBG
  9354. Pfn1->PteAddress = (PVOID) (ULONG_PTR)X64K;
  9355. #endif
  9356. MI_SET_PFN_DELETED (Pfn1);
  9357. Pfn1->u3.e2.ReferenceCount += 1;
  9358. Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  9359. *Pages = PageFrameIndex;
  9360. Pages += 1;
  9361. PagesNeeded -= 1;
  9362. } while (PagesNeeded);
  9363. UNLOCK_PFN2 (OldIrql);
  9364. Mdl->MdlFlags |= MDL_PAGES_LOCKED;
  9365. status = TRUE;
  9366. break;
  9367. }
  9368. UNLOCK_PFN2 (OldIrql);
  9369. //
  9370. // If we're being called at DISPATCH_LEVEL we cannot move pages to
  9371. // the standby list because mutexes must be acquired to do so.
  9372. //
  9373. if (OldIrql > APC_LEVEL) {
  9374. break;
  9375. }
  9376. if (!i) {
  9377. break;
  9378. }
  9379. //
  9380. // Attempt to move pages to the standby list.
  9381. //
  9382. MmEmptyAllWorkingSets ();
  9383. MiFlushAllPages();
  9384. KeDelayExecutionThread (KernelMode,
  9385. FALSE,
  9386. (PLARGE_INTEGER)&Mm30Milliseconds);
  9387. i -= 1;
  9388. } while (TRUE);
  9389. InterlockedDecrement (&MiDelayPageFaults);
  9390. KeLeaveCriticalRegionThread (CurrentThread);
  9391. return status;
  9392. }
  9393. NTKERNELAPI
  9394. VOID
  9395. MmReturnMemoryForHibernate (
  9396. IN PMDL Mdl
  9397. )
  9398. /*++
  9399. Routine Description:
  9400. Returns memory from MmGatherMemoryForHibername.
  9401. Arguments:
  9402. Mdl - Supplies an MDL, the start VA field should be NULL. The length
  9403. field indicates how many pages to obtain.
  9404. Return Value:
  9405. None.
  9406. Environment:
  9407. Kernel mode, IRQL of APC_LEVEL or below.
  9408. --*/
  9409. {
  9410. PMMPFN Pfn1;
  9411. KIRQL OldIrql;
  9412. PPFN_NUMBER Pages;
  9413. PPFN_NUMBER LastPage;
  9414. Pages = (PPFN_NUMBER)(Mdl + 1);
  9415. LastPage = Pages + (Mdl->ByteCount >> PAGE_SHIFT);
  9416. LOCK_PFN2 (OldIrql);
  9417. do {
  9418. Pfn1 = MI_PFN_ELEMENT (*Pages);
  9419. MiDecrementReferenceCount (Pfn1, *Pages);
  9420. Pages += 1;
  9421. } while (Pages < LastPage);
  9422. UNLOCK_PFN2 (OldIrql);
  9423. return;
  9424. }
  9425. VOID
  9426. MmEnablePAT (
  9427. VOID
  9428. )
  9429. /*++
  9430. Routine Description:
  9431. This routine enables the page attribute capability for individual PTE
  9432. mappings.
  9433. Arguments:
  9434. None.
  9435. Return Value:
  9436. None.
  9437. Environment:
  9438. Kernel mode.
  9439. --*/
  9440. {
  9441. MiWriteCombiningPtes = TRUE;
  9442. }
  9443. LOGICAL
  9444. MmIsSystemAddressLocked (
  9445. IN PVOID VirtualAddress
  9446. )
  9447. /*++
  9448. Routine Description:
  9449. This routine determines whether the specified system address is currently
  9450. locked.
  9451. This routine should only be called for debugging purposes, as it is not
  9452. guaranteed upon return to the caller that the address is still locked.
  9453. (The address could easily have been trimmed prior to return).
  9454. Arguments:
  9455. VirtualAddress - Supplies the virtual address to check.
  9456. Return Value:
  9457. TRUE if the address is locked. FALSE if not.
  9458. Environment:
  9459. DISPATCH LEVEL or below. No memory management locks may be held.
  9460. --*/
  9461. {
  9462. PMMPFN Pfn1;
  9463. KIRQL OldIrql;
  9464. PMMPTE PointerPte;
  9465. PFN_NUMBER PageFrameIndex;
  9466. if (IS_SYSTEM_ADDRESS (VirtualAddress) == FALSE) {
  9467. return FALSE;
  9468. }
  9469. if (MI_IS_PHYSICAL_ADDRESS (VirtualAddress)) {
  9470. return TRUE;
  9471. }
  9472. //
  9473. // Hyperspace and page maps are not treated as locked down.
  9474. //
  9475. if (MI_IS_PROCESS_SPACE_ADDRESS (VirtualAddress) == TRUE) {
  9476. return FALSE;
  9477. }
  9478. #if defined (_IA64_)
  9479. if (MI_IS_KERNEL_PTE_ADDRESS (VirtualAddress) == TRUE) {
  9480. return FALSE;
  9481. }
  9482. #endif
  9483. PointerPte = MiGetPteAddress (VirtualAddress);
  9484. LOCK_PFN2 (OldIrql);
  9485. if (MiIsAddressValid (VirtualAddress, TRUE) == FALSE) {
  9486. UNLOCK_PFN2 (OldIrql);
  9487. return FALSE;
  9488. }
  9489. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  9490. //
  9491. // Note that the mapped page may not be in the PFN database. Treat
  9492. // this as locked.
  9493. //
  9494. if (!MI_IS_PFN (PageFrameIndex)) {
  9495. UNLOCK_PFN2 (OldIrql);
  9496. return TRUE;
  9497. }
  9498. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  9499. //
  9500. // Check for the page being locked by reference.
  9501. //
  9502. if (Pfn1->u3.e2.ReferenceCount > 1) {
  9503. UNLOCK_PFN2 (OldIrql);
  9504. return TRUE;
  9505. }
  9506. if (Pfn1->u3.e2.ReferenceCount > Pfn1->u2.ShareCount) {
  9507. UNLOCK_PFN2 (OldIrql);
  9508. return TRUE;
  9509. }
  9510. //
  9511. // Check whether the page is locked into the working set.
  9512. //
  9513. if (Pfn1->u1.Event == NULL) {
  9514. UNLOCK_PFN2 (OldIrql);
  9515. return TRUE;
  9516. }
  9517. UNLOCK_PFN2 (OldIrql);
  9518. return FALSE;
  9519. }
  9520. LOGICAL
  9521. MmAreMdlPagesLocked (
  9522. IN PMDL MemoryDescriptorList
  9523. )
  9524. /*++
  9525. Routine Description:
  9526. This routine determines whether the pages described by the argument
  9527. MDL are currently locked.
  9528. This routine should only be called for debugging purposes, as it is not
  9529. guaranteed upon return to the caller that the pages are still locked.
  9530. Arguments:
  9531. MemoryDescriptorList - Supplies the memory descriptor list to check.
  9532. Return Value:
  9533. TRUE if ALL the pages described by the argument MDL are locked.
  9534. FALSE if not.
  9535. Environment:
  9536. DISPATCH LEVEL or below. No memory management locks may be held.
  9537. --*/
  9538. {
  9539. PFN_NUMBER NumberOfPages;
  9540. PPFN_NUMBER Page;
  9541. PVOID StartingVa;
  9542. PMMPFN Pfn1;
  9543. KIRQL OldIrql;
  9544. //
  9545. // We'd like to assert that MDL_PAGES_LOCKED is set but can't because
  9546. // some drivers have privately constructed MDLs and they never set the
  9547. // bit properly.
  9548. //
  9549. if ((MemoryDescriptorList->MdlFlags & (MDL_IO_SPACE | MDL_SOURCE_IS_NONPAGED_POOL)) != 0) {
  9550. return TRUE;
  9551. }
  9552. StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
  9553. MemoryDescriptorList->ByteOffset);
  9554. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartingVa,
  9555. MemoryDescriptorList->ByteCount);
  9556. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  9557. LOCK_PFN2 (OldIrql);
  9558. do {
  9559. if (*Page == MM_EMPTY_LIST) {
  9560. //
  9561. // There are no more locked pages.
  9562. //
  9563. break;
  9564. }
  9565. //
  9566. // Note that the mapped page may not be in the PFN database. Treat
  9567. // this as locked.
  9568. //
  9569. if (MI_IS_PFN (*Page)) {
  9570. Pfn1 = MI_PFN_ELEMENT (*Page);
  9571. //
  9572. // Check for the page being locked by reference
  9573. //
  9574. // - or -
  9575. //
  9576. // whether the page is locked into the working set.
  9577. //
  9578. if ((Pfn1->u3.e2.ReferenceCount <= Pfn1->u2.ShareCount) &&
  9579. (Pfn1->u3.e2.ReferenceCount <= 1) &&
  9580. (Pfn1->u1.Event != NULL)) {
  9581. //
  9582. // The page is not locked by reference or in a working set.
  9583. //
  9584. UNLOCK_PFN2 (OldIrql);
  9585. return FALSE;
  9586. }
  9587. }
  9588. Page += 1;
  9589. NumberOfPages -= 1;
  9590. } while (NumberOfPages != 0);
  9591. UNLOCK_PFN2 (OldIrql);
  9592. return TRUE;
  9593. }
  9594. #if DBG
  9595. VOID
  9596. MiVerifyLockedPageCharges (
  9597. VOID
  9598. )
  9599. {
  9600. PMMPFN Pfn1;
  9601. KIRQL OldIrql;
  9602. PFN_NUMBER start;
  9603. PFN_NUMBER count;
  9604. PFN_NUMBER Page;
  9605. PFN_NUMBER LockCharged;
  9606. if (MiPrintLockedPages == 0) {
  9607. return;
  9608. }
  9609. if (KeGetCurrentIrql() > APC_LEVEL) {
  9610. return;
  9611. }
  9612. start = 0;
  9613. LockCharged = 0;
  9614. KeAcquireGuardedMutex (&MmDynamicMemoryMutex);
  9615. LOCK_PFN (OldIrql);
  9616. do {
  9617. count = MmPhysicalMemoryBlock->Run[start].PageCount;
  9618. Page = MmPhysicalMemoryBlock->Run[start].BasePage;
  9619. if (count != 0) {
  9620. Pfn1 = MI_PFN_ELEMENT (Page);
  9621. do {
  9622. if (Pfn1->u4.LockCharged == 1) {
  9623. if (MiPrintLockedPages & 0x4) {
  9624. DbgPrint ("%x ", MI_PFN_ELEMENT_TO_INDEX (Pfn1));
  9625. }
  9626. LockCharged += 1;
  9627. }
  9628. count -= 1;
  9629. Pfn1 += 1;
  9630. } while (count != 0);
  9631. }
  9632. start += 1;
  9633. } while (start != MmPhysicalMemoryBlock->NumberOfRuns);
  9634. if (LockCharged != MmSystemLockPagesCount) {
  9635. if (MiPrintLockedPages & 0x1) {
  9636. DbgPrint ("MM: Locked pages MISMATCH %u %u\n",
  9637. LockCharged, MmSystemLockPagesCount);
  9638. }
  9639. }
  9640. else {
  9641. if (MiPrintLockedPages & 0x2) {
  9642. DbgPrint ("MM: Locked pages ok %u\n",
  9643. LockCharged);
  9644. }
  9645. }
  9646. UNLOCK_PFN (OldIrql);
  9647. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  9648. return;
  9649. }
  9650. #endif