Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

11804 lines
322 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. iosup.c
  5. Abstract:
  6. This module contains routines which provide support for the I/O system.
  7. Author:
  8. Lou Perazzoli (loup) 25-Apr-1989
  9. Landy Wang (landyw) 02-June-1997
  10. Revision History:
  11. --*/
  12. #include "mi.h"
  13. #undef MmIsRecursiveIoFault
  14. ULONG MiCacheOverride[3];
  15. extern ULONG MmTotalSystemDriverPages;
  16. BOOLEAN
  17. MmIsRecursiveIoFault (
  18. VOID
  19. );
  20. PVOID
  21. MiAllocateContiguousMemory (
  22. IN SIZE_T NumberOfBytes,
  23. IN PFN_NUMBER LowestAcceptablePfn,
  24. IN PFN_NUMBER HighestAcceptablePfn,
  25. IN PFN_NUMBER BoundaryPfn,
  26. IN MEMORY_CACHING_TYPE CacheType,
  27. PVOID CallingAddress
  28. );
  29. PVOID
  30. MiMapLockedPagesInUserSpace (
  31. IN PMDL MemoryDescriptorList,
  32. IN PVOID StartingVa,
  33. IN MEMORY_CACHING_TYPE CacheType,
  34. IN PVOID BaseVa
  35. );
  36. VOID
  37. MiUnmapLockedPagesInUserSpace (
  38. IN PVOID BaseAddress,
  39. IN PMDL MemoryDescriptorList
  40. );
  41. VOID
  42. MiAddMdlTracker (
  43. IN PMDL MemoryDescriptorList,
  44. IN PVOID CallingAddress,
  45. IN PVOID CallersCaller,
  46. IN PFN_NUMBER NumberOfPagesToLock,
  47. IN ULONG Who
  48. );
  49. typedef struct _PTE_TRACKER {
  50. LIST_ENTRY ListEntry;
  51. PMDL Mdl;
  52. PFN_NUMBER Count;
  53. PVOID SystemVa;
  54. PVOID StartVa;
  55. ULONG Offset;
  56. ULONG Length;
  57. ULONG_PTR Page;
  58. PVOID CallingAddress;
  59. PVOID CallersCaller;
  60. PVOID PteAddress;
  61. } PTE_TRACKER, *PPTE_TRACKER;
  62. typedef struct _SYSPTES_HEADER {
  63. LIST_ENTRY ListHead;
  64. PFN_NUMBER Count;
  65. } SYSPTES_HEADER, *PSYSPTES_HEADER;
  66. ULONG MmTrackPtes = 0;
  67. BOOLEAN MiTrackPtesAborted = FALSE;
  68. SYSPTES_HEADER MiPteHeader;
  69. SLIST_HEADER MiDeadPteTrackerSListHead;
  70. KSPIN_LOCK MiPteTrackerLock;
  71. LOCK_HEADER MmLockedPagesHead;
  72. BOOLEAN MiTrackingAborted = FALSE;
  73. ULONG MiNonCachedCollisions;
  74. #if DBG
  75. PFN_NUMBER MiCurrentAdvancedPages;
  76. PFN_NUMBER MiAdvancesGiven;
  77. PFN_NUMBER MiAdvancesFreed;
  78. #endif
  79. VOID
  80. MiInsertPteTracker (
  81. IN PPTE_TRACKER PteTracker,
  82. IN PMDL MemoryDescriptorList,
  83. IN PFN_NUMBER NumberOfPtes,
  84. IN PVOID MyCaller,
  85. IN PVOID MyCallersCaller
  86. );
  87. VOID
  88. MiRemovePteTracker (
  89. IN PMDL MemoryDescriptorList OPTIONAL,
  90. IN PVOID PteAddress,
  91. IN PFN_NUMBER NumberOfPtes
  92. );
  93. PPTE_TRACKER
  94. MiReleaseDeadPteTrackers (
  95. VOID
  96. );
  97. VOID
  98. MiProtectFreeNonPagedPool (
  99. IN PVOID VirtualAddress,
  100. IN ULONG SizeInPages
  101. );
  102. LOGICAL
  103. MiUnProtectFreeNonPagedPool (
  104. IN PVOID VirtualAddress,
  105. IN ULONG SizeInPages
  106. );
  107. VOID
  108. MiPhysicalViewInserter (
  109. IN PEPROCESS Process,
  110. IN PMI_PHYSICAL_VIEW PhysicalView
  111. );
  112. #if DBG
  113. ULONG MiPrintLockedPages;
  114. VOID
  115. MiVerifyLockedPageCharges (
  116. VOID
  117. );
  118. #endif
  119. #ifdef ALLOC_PRAGMA
  120. #pragma alloc_text(INIT, MmSetPageProtection)
  121. #pragma alloc_text(INIT, MmFreeIndependentPages)
  122. #pragma alloc_text(INIT, MiInitializeIoTrackers)
  123. #pragma alloc_text(PAGE, MmAllocateIndependentPages)
  124. #pragma alloc_text(PAGE, MmLockPagableDataSection)
  125. #pragma alloc_text(PAGE, MiLookupDataTableEntry)
  126. #pragma alloc_text(PAGE, MmSetBankedSection)
  127. #pragma alloc_text(PAGE, MmProbeAndLockProcessPages)
  128. #pragma alloc_text(PAGE, MmProbeAndLockSelectedPages)
  129. #pragma alloc_text(PAGE, MmMapVideoDisplay)
  130. #pragma alloc_text(PAGE, MmUnmapVideoDisplay)
  131. #pragma alloc_text(PAGE, MmGetSectionRange)
  132. #pragma alloc_text(PAGE, MiMapSinglePage)
  133. #pragma alloc_text(PAGE, MiUnmapSinglePage)
  134. #pragma alloc_text(PAGE, MmAllocateMappingAddress)
  135. #pragma alloc_text(PAGE, MmFreeMappingAddress)
  136. #pragma alloc_text(PAGE, MmAllocateNonCachedMemory)
  137. #pragma alloc_text(PAGE, MmFreeNonCachedMemory)
  138. #pragma alloc_text(PAGE, MmLockPagedPool)
  139. #pragma alloc_text(PAGE, MmLockPagableSectionByHandle)
  140. #pragma alloc_text(PAGE, MiMapLockedPagesInUserSpace)
  141. #pragma alloc_text(PAGELK, MmEnablePAT)
  142. #pragma alloc_text(PAGELK, MiUnmapLockedPagesInUserSpace)
  143. #pragma alloc_text(PAGELK, MmAllocatePagesForMdl)
  144. #pragma alloc_text(PAGELK, MmFreePagesFromMdl)
  145. #pragma alloc_text(PAGELK, MmUnlockPagedPool)
  146. #pragma alloc_text(PAGELK, MmGatherMemoryForHibernate)
  147. #pragma alloc_text(PAGELK, MmReturnMemoryForHibernate)
  148. #pragma alloc_text(PAGELK, MmReleaseDumpAddresses)
  149. #pragma alloc_text(PAGELK, MmMapUserAddressesToPage)
  150. #pragma alloc_text(PAGELK, MiPhysicalViewInserter)
  151. #pragma alloc_text(PAGELK, MiPhysicalViewAdjuster)
  152. #pragma alloc_text(PAGEVRFY, MmIsSystemAddressLocked)
  153. #pragma alloc_text(PAGEVRFY, MmAreMdlPagesLocked)
  154. #endif
  155. extern POOL_DESCRIPTOR NonPagedPoolDescriptor;
  156. PFN_NUMBER MmMdlPagesAllocated;
  157. KEVENT MmCollidedLockEvent;
  158. LONG MmCollidedLockWait;
  159. SIZE_T MmLockedCode;
  160. BOOLEAN MiWriteCombiningPtes = FALSE;
  161. #ifdef LARGE_PAGES
  162. ULONG MmLargeVideoMapped;
  163. #endif
  164. #if DBG
  165. ULONG MiPrintAwe;
  166. ULONG MmStopOnBadProbe = 1;
  167. #endif
  168. #define MI_PROBE_RAISE_SIZE 10
  169. ULONG MiProbeRaises[MI_PROBE_RAISE_SIZE];
  170. #define MI_INSTRUMENT_PROBE_RAISES(i) \
  171. ASSERT (i < MI_PROBE_RAISE_SIZE); \
  172. MiProbeRaises[i] += 1;
  173. //
  174. // Note: this should be > 2041 to account for the cache manager's
  175. // aggressive zeroing logic.
  176. //
  177. ULONG MmReferenceCountCheck = 2500;
  178. ULONG MiMdlsAdjusted = FALSE;
  179. VOID
  180. MmProbeAndLockPages (
  181. IN OUT PMDL MemoryDescriptorList,
  182. IN KPROCESSOR_MODE AccessMode,
  183. IN LOCK_OPERATION Operation
  184. )
  185. /*++
  186. Routine Description:
  187. This routine probes the specified pages, makes the pages resident and
  188. locks the physical pages mapped by the virtual pages in memory. The
  189. Memory descriptor list is updated to describe the physical pages.
  190. Arguments:
  191. MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List
  192. (MDL). The supplied MDL must supply a virtual
  193. address, byte offset and length field. The
  194. physical page portion of the MDL is updated when
  195. the pages are locked in memory.
  196. AccessMode - Supplies the access mode in which to probe the arguments.
  197. One of KernelMode or UserMode.
  198. Operation - Supplies the operation type. One of IoReadAccess, IoWriteAccess
  199. or IoModifyAccess.
  200. Return Value:
  201. None - exceptions are raised.
  202. Environment:
  203. Kernel mode. APC_LEVEL and below for pagable addresses,
  204. DISPATCH_LEVEL and below for non-pagable addresses.
  205. --*/
  206. {
  207. PPFN_NUMBER Page;
  208. MMPTE PteContents;
  209. PMMPTE LastPte;
  210. PMMPTE PointerPte;
  211. PMMPTE PointerPde;
  212. PMMPTE PointerPpe;
  213. PMMPTE PointerPxe;
  214. PVOID Va;
  215. PVOID EndVa;
  216. PVOID AlignedVa;
  217. PMMPFN Pfn1;
  218. PFN_NUMBER PageFrameIndex;
  219. PFN_NUMBER LastPageFrameIndex;
  220. PEPROCESS CurrentProcess;
  221. KIRQL OldIrql;
  222. PFN_NUMBER NumberOfPagesToLock;
  223. PFN_NUMBER NumberOfPagesSpanned;
  224. NTSTATUS status;
  225. NTSTATUS ProbeStatus;
  226. PETHREAD Thread;
  227. ULONG SavedState;
  228. LOGICAL AddressIsPhysical;
  229. PLIST_ENTRY NextEntry;
  230. PMI_PHYSICAL_VIEW PhysicalView;
  231. PCHAR StartVa;
  232. PVOID CallingAddress;
  233. PVOID CallersCaller;
  234. #if defined (_MIALT4K_)
  235. MMPTE AltPteContents;
  236. PMMPTE PointerAltPte;
  237. PMMPTE LastPointerAltPte;
  238. PMMPTE AltPointerPte;
  239. PMMPTE AltPointerPde;
  240. PMMPTE AltPointerPpe;
  241. PMMPTE AltPointerPxe;
  242. #endif
  243. ASSERT (MemoryDescriptorList->ByteCount != 0);
  244. ASSERT (((ULONG)MemoryDescriptorList->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
  245. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  246. ASSERT (((ULONG_PTR)MemoryDescriptorList->StartVa & (PAGE_SIZE - 1)) == 0);
  247. AlignedVa = (PVOID)MemoryDescriptorList->StartVa;
  248. ASSERT ((MemoryDescriptorList->MdlFlags & (
  249. MDL_PAGES_LOCKED |
  250. MDL_MAPPED_TO_SYSTEM_VA |
  251. MDL_SOURCE_IS_NONPAGED_POOL |
  252. MDL_PARTIAL |
  253. MDL_IO_SPACE)) == 0);
  254. Va = (PCHAR)AlignedVa + MemoryDescriptorList->ByteOffset;
  255. StartVa = Va;
  256. //
  257. // Endva is one byte past the end of the buffer, if ACCESS_MODE is not
  258. // kernel, make sure the EndVa is in user space AND the byte count
  259. // does not cause it to wrap.
  260. //
  261. EndVa = (PVOID)((PCHAR)Va + MemoryDescriptorList->ByteCount);
  262. if ((AccessMode != KernelMode) &&
  263. ((EndVa > (PVOID)MM_USER_PROBE_ADDRESS) || (Va >= EndVa))) {
  264. *Page = MM_EMPTY_LIST;
  265. MI_INSTRUMENT_PROBE_RAISES(0);
  266. ExRaiseStatus (STATUS_ACCESS_VIOLATION);
  267. return;
  268. }
  269. //
  270. // You would think there is an optimization which could be performed here:
  271. // if the operation is for WriteAccess and the complete page is
  272. // being modified, we can remove the current page, if it is not
  273. // resident, and substitute a demand zero page.
  274. // Note, that after analysis by marking the thread and then
  275. // noting if a page read was done, this rarely occurs.
  276. //
  277. Thread = PsGetCurrentThread ();
  278. if (!MI_IS_PHYSICAL_ADDRESS(Va)) {
  279. AddressIsPhysical = FALSE;
  280. ProbeStatus = STATUS_SUCCESS;
  281. NumberOfPagesToLock = ADDRESS_AND_SIZE_TO_SPAN_PAGES (Va,
  282. MemoryDescriptorList->ByteCount);
  283. ASSERT (NumberOfPagesToLock != 0);
  284. NumberOfPagesSpanned = NumberOfPagesToLock;
  285. PointerPxe = MiGetPxeAddress (Va);
  286. PointerPpe = MiGetPpeAddress (Va);
  287. PointerPde = MiGetPdeAddress (Va);
  288. PointerPte = MiGetPteAddress (Va);
  289. MmSavePageFaultReadAhead (Thread, &SavedState);
  290. MmSetPageFaultReadAhead (Thread, (ULONG)(NumberOfPagesToLock - 1));
  291. try {
  292. do {
  293. *Page = MM_EMPTY_LIST;
  294. //
  295. // Make sure the page is resident.
  296. //
  297. *(volatile CHAR *)Va;
  298. if ((Operation != IoReadAccess) &&
  299. (Va <= MM_HIGHEST_USER_ADDRESS)) {
  300. //
  301. // Probe for write access as well.
  302. //
  303. ProbeForWriteChar ((PCHAR)Va);
  304. }
  305. NumberOfPagesToLock -= 1;
  306. MmSetPageFaultReadAhead (Thread, (ULONG)(NumberOfPagesToLock - 1));
  307. Va = (PVOID) (((ULONG_PTR)Va + PAGE_SIZE) & ~(PAGE_SIZE - 1));
  308. Page += 1;
  309. } while (Va < EndVa);
  310. ASSERT (NumberOfPagesToLock == 0);
  311. } except (EXCEPTION_EXECUTE_HANDLER) {
  312. ProbeStatus = GetExceptionCode();
  313. }
  314. //
  315. // We may still fault again below but it's generally rare.
  316. // Restore this thread's normal fault behavior now.
  317. //
  318. MmResetPageFaultReadAhead (Thread, SavedState);
  319. if (ProbeStatus != STATUS_SUCCESS) {
  320. MI_INSTRUMENT_PROBE_RAISES(1);
  321. MemoryDescriptorList->Process = NULL;
  322. ExRaiseStatus (ProbeStatus);
  323. return;
  324. }
  325. }
  326. else {
  327. AddressIsPhysical = TRUE;
  328. *Page = MM_EMPTY_LIST;
  329. //
  330. // Initializing these is not needed for correctness
  331. // but without it the compiler cannot compile this code
  332. // W4 to check for use of uninitialized variables.
  333. //
  334. NumberOfPagesSpanned = 0;
  335. PointerPxe = NULL;
  336. PointerPpe = NULL;
  337. PointerPde = NULL;
  338. PointerPte = NULL;
  339. SavedState = 0;
  340. }
  341. Va = AlignedVa;
  342. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  343. //
  344. // Indicate that this is a write operation.
  345. //
  346. if (Operation != IoReadAccess) {
  347. MemoryDescriptorList->MdlFlags |= MDL_WRITE_OPERATION;
  348. }
  349. else {
  350. MemoryDescriptorList->MdlFlags &= ~(MDL_WRITE_OPERATION);
  351. }
  352. //
  353. // Initialize MdlFlags (assume the probe will succeed).
  354. //
  355. MemoryDescriptorList->MdlFlags |= MDL_PAGES_LOCKED;
  356. if (Va <= MM_HIGHEST_USER_ADDRESS) {
  357. //
  358. // These are user space addresses, check to see if the
  359. // working set size will allow these pages to be locked.
  360. //
  361. ASSERT (AddressIsPhysical == FALSE);
  362. ASSERT (NumberOfPagesSpanned != 0);
  363. CurrentProcess = PsGetCurrentProcess ();
  364. //
  365. // Initialize the MDL process field (assume the probe will succeed).
  366. //
  367. MemoryDescriptorList->Process = CurrentProcess;
  368. LastPte = MiGetPteAddress ((PCHAR)EndVa - 1);
  369. //
  370. // Acquire the PFN database lock.
  371. //
  372. LOCK_PFN2 (OldIrql);
  373. //
  374. // Check for a transfer to/from a physical VAD - no reference counts
  375. // may be modified for these pages.
  376. //
  377. if (CurrentProcess->Flags & PS_PROCESS_FLAGS_HAS_PHYSICAL_VAD) {
  378. //
  379. // This process has a physical VAD which maps directly to RAM
  380. // not necessarily present in the PFN database. See if the
  381. // MDL request intersects this physical VAD.
  382. //
  383. NextEntry = CurrentProcess->PhysicalVadList.Flink;
  384. while (NextEntry != &CurrentProcess->PhysicalVadList) {
  385. PhysicalView = CONTAINING_RECORD(NextEntry,
  386. MI_PHYSICAL_VIEW,
  387. ListEntry);
  388. if (PhysicalView->Vad->u.VadFlags.PhysicalMapping == 0) {
  389. NextEntry = NextEntry->Flink;
  390. continue;
  391. }
  392. if (StartVa < PhysicalView->StartVa) {
  393. if ((PCHAR)EndVa - 1 >= PhysicalView->StartVa) {
  394. //
  395. // The range encompasses a physical VAD. This is not
  396. // allowed.
  397. //
  398. UNLOCK_PFN2 (OldIrql);
  399. MI_INSTRUMENT_PROBE_RAISES(2);
  400. MemoryDescriptorList->Process = NULL;
  401. MemoryDescriptorList->MdlFlags &= ~MDL_PAGES_LOCKED;
  402. ExRaiseStatus (STATUS_ACCESS_VIOLATION);
  403. return;
  404. }
  405. NextEntry = NextEntry->Flink;
  406. continue;
  407. }
  408. if (StartVa <= PhysicalView->EndVa) {
  409. //
  410. // Ensure that the entire range lies within the VAD.
  411. //
  412. if ((PCHAR)EndVa - 1 > PhysicalView->EndVa) {
  413. //
  414. // The range goes past the end of the VAD - not allowed.
  415. //
  416. UNLOCK_PFN2 (OldIrql);
  417. MI_INSTRUMENT_PROBE_RAISES(3);
  418. MemoryDescriptorList->Process = NULL;
  419. MemoryDescriptorList->MdlFlags &= ~MDL_PAGES_LOCKED;
  420. ExRaiseStatus (STATUS_ACCESS_VIOLATION);
  421. return;
  422. }
  423. //
  424. // The range lies within a physical VAD.
  425. //
  426. if (Operation != IoReadAccess) {
  427. //
  428. // Ensure the VAD is writable. Changing individual PTE
  429. // protections in a physical VAD is not allowed.
  430. //
  431. if ((PhysicalView->Vad->u.VadFlags.Protection & MM_READWRITE) == 0) {
  432. UNLOCK_PFN2 (OldIrql);
  433. MI_INSTRUMENT_PROBE_RAISES(4);
  434. MemoryDescriptorList->Process = NULL;
  435. MemoryDescriptorList->MdlFlags &= ~MDL_PAGES_LOCKED;
  436. ExRaiseStatus (STATUS_ACCESS_VIOLATION);
  437. return;
  438. }
  439. }
  440. //
  441. // Don't charge page locking for this transfer as it is all
  442. // physical, just initialize the MDL. Note the pages do not
  443. // have to be physically contiguous, so the frames must be
  444. // extracted from the PTEs.
  445. //
  446. // Treat this as an I/O space address and don't allow
  447. // operations on addresses not in the PFN database.
  448. //
  449. LastPte = PointerPte + NumberOfPagesSpanned;
  450. do {
  451. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  452. *Page = PageFrameIndex;
  453. Page += 1;
  454. PointerPte += 1;
  455. } while (PointerPte < LastPte);
  456. UNLOCK_PFN2 (OldIrql);
  457. MemoryDescriptorList->MdlFlags |= (MDL_IO_SPACE | MDL_PAGES_LOCKED);
  458. return;
  459. }
  460. NextEntry = NextEntry->Flink;
  461. }
  462. }
  463. InterlockedExchangeAddSizeT (&CurrentProcess->NumberOfLockedPages,
  464. NumberOfPagesSpanned);
  465. }
  466. else {
  467. MemoryDescriptorList->Process = NULL;
  468. if (AddressIsPhysical == TRUE) {
  469. //
  470. // On certain architectures, virtual addresses
  471. // may be physical and hence have no corresponding PTE.
  472. //
  473. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (Va);
  474. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  475. Va = (PCHAR)Va + MemoryDescriptorList->ByteOffset;
  476. NumberOfPagesToLock = ADDRESS_AND_SIZE_TO_SPAN_PAGES (Va,
  477. MemoryDescriptorList->ByteCount);
  478. LastPageFrameIndex = PageFrameIndex + NumberOfPagesToLock;
  479. //
  480. // Acquire the PFN database lock.
  481. //
  482. LOCK_PFN2 (OldIrql);
  483. ASSERT (PageFrameIndex <= MmHighestPhysicalPage);
  484. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0);
  485. //
  486. // Ensure the systemwide locked pages count remains fluid.
  487. //
  488. if (MI_NONPAGABLE_MEMORY_AVAILABLE() <= (SPFN_NUMBER) NumberOfPagesToLock) {
  489. //
  490. // If this page is for paged pool or privileged code/data,
  491. // then force it in regardless.
  492. //
  493. if ((Va < MM_HIGHEST_USER_ADDRESS) ||
  494. (MI_IS_SYSTEM_CACHE_ADDRESS(Va))) {
  495. UNLOCK_PFN2 (OldIrql);
  496. MI_INSTRUMENT_PROBE_RAISES(5);
  497. MemoryDescriptorList->MdlFlags &= ~MDL_PAGES_LOCKED;
  498. ExRaiseStatus (STATUS_WORKING_SET_QUOTA);
  499. return;
  500. }
  501. MI_INSTRUMENT_PROBE_RAISES(8);
  502. }
  503. do {
  504. //
  505. // Check to make sure each page is not locked down an unusually
  506. // high number of times.
  507. //
  508. if (Pfn1->u3.e2.ReferenceCount >= MmReferenceCountCheck) {
  509. UNLOCK_PFN2 (OldIrql);
  510. ASSERT (FALSE);
  511. status = STATUS_WORKING_SET_QUOTA;
  512. goto failure;
  513. }
  514. if (MemoryDescriptorList->MdlFlags & MDL_WRITE_OPERATION) {
  515. MI_SNAP_DIRTY (Pfn1, 1, 0x99);
  516. }
  517. MI_ADD_LOCKED_PAGE_CHARGE(Pfn1, 0);
  518. Pfn1->u3.e2.ReferenceCount += 1;
  519. *Page = PageFrameIndex;
  520. Page += 1;
  521. PageFrameIndex += 1;
  522. Pfn1 += 1;
  523. } while (PageFrameIndex < LastPageFrameIndex);
  524. UNLOCK_PFN2 (OldIrql);
  525. return;
  526. }
  527. //
  528. // Since this operation is to a system address, no need to check for
  529. // PTE write access below so mark the access as a read so only the
  530. // operation type (and not where the Va is) needs to be checked in the
  531. // subsequent loop.
  532. //
  533. ASSERT (Va > MM_HIGHEST_USER_ADDRESS);
  534. Operation = IoReadAccess;
  535. LastPte = MiGetPteAddress ((PCHAR)EndVa - 1);
  536. LOCK_PFN2 (OldIrql);
  537. }
  538. do {
  539. #if (_MI_PAGING_LEVELS==4)
  540. while ((PointerPxe->u.Hard.Valid == 0) ||
  541. (PointerPpe->u.Hard.Valid == 0) ||
  542. (PointerPde->u.Hard.Valid == 0) ||
  543. (PointerPte->u.Hard.Valid == 0))
  544. #elif (_MI_PAGING_LEVELS==3)
  545. while ((PointerPpe->u.Hard.Valid == 0) ||
  546. (PointerPde->u.Hard.Valid == 0) ||
  547. (PointerPte->u.Hard.Valid == 0))
  548. #else
  549. while ((PointerPde->u.Hard.Valid == 0) ||
  550. (PointerPte->u.Hard.Valid == 0))
  551. #endif
  552. {
  553. //
  554. // PDE is not resident, release the PFN lock and access the page
  555. // to make it appear.
  556. //
  557. UNLOCK_PFN2 (OldIrql);
  558. MmSetPageFaultReadAhead (Thread, 0);
  559. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  560. status = MmAccessFault (FALSE, Va, KernelMode, NULL);
  561. MmResetPageFaultReadAhead (Thread, SavedState);
  562. if (!NT_SUCCESS(status)) {
  563. goto failure;
  564. }
  565. LOCK_PFN2 (OldIrql);
  566. }
  567. PteContents = *PointerPte;
  568. //
  569. // There is a subtle race here where the PTE contents can get zeroed
  570. // by a thread running on another processor. This can only happen
  571. // for an AWE address space because these ranges (deliberately for
  572. // performance reasons) do not acquire the PFN lock during remap
  573. // operations. In this case, one of 2 scenarios is possible -
  574. // either the old PTE is read or the new. The new may be a zero
  575. // PTE if the map request was to invalidate *or* non-zero (and
  576. // valid) if the map request was inserting a new entry. For the
  577. // latter, we don't care if we lock the old or new frame here as
  578. // it's an application bug to provoke this behavior - and
  579. // regardless of which is used, no corruption can occur because
  580. // the PFN lock is acquired during an NtFreeUserPhysicalPages.
  581. // But the former must be checked for explicitly here. As a
  582. // separate note, the PXE/PPE/PDE accesses above are always safe
  583. // even for the AWE deletion race because these tables
  584. // are never lazy-allocated for AWE ranges.
  585. //
  586. if (PteContents.u.Hard.Valid == 0) {
  587. ASSERT (PteContents.u.Long == 0);
  588. ASSERT (PsGetCurrentProcess ()->AweInfo != NULL);
  589. UNLOCK_PFN2 (OldIrql);
  590. status = STATUS_ACCESS_VIOLATION;
  591. goto failure;
  592. }
  593. #if defined (_MIALT4K_)
  594. if (PteContents.u.Hard.Cache == MM_PTE_CACHE_RESERVED) {
  595. //
  596. // This is a wow64 split page - ie: the individual 4k
  597. // pages have different permissions, so each 4k page within
  598. // this native page must be probed individually.
  599. //
  600. // Note split pages are generally rare.
  601. //
  602. ASSERT (PsGetCurrentProcess()->Wow64Process != NULL);
  603. ASSERT (EndVa < (PVOID)MM_MAX_WOW64_ADDRESS);
  604. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  605. PointerAltPte = MiGetAltPteAddress (Va);
  606. LastPointerAltPte = PointerAltPte + (PAGE_SIZE / PAGE_4K) - 1;
  607. AltPointerPxe = MiGetPxeAddress (PointerAltPte);
  608. AltPointerPpe = MiGetPpeAddress (PointerAltPte);
  609. AltPointerPde = MiGetPdeAddress (PointerAltPte);
  610. AltPointerPte = MiGetPteAddress (PointerAltPte);
  611. #if (_MI_PAGING_LEVELS==4)
  612. while ((AltPointerPxe->u.Hard.Valid == 0) ||
  613. (AltPointerPpe->u.Hard.Valid == 0) ||
  614. (AltPointerPde->u.Hard.Valid == 0) ||
  615. (AltPointerPte->u.Hard.Valid == 0))
  616. #elif (_MI_PAGING_LEVELS==3)
  617. while ((AltPointerPpe->u.Hard.Valid == 0) ||
  618. (AltPointerPde->u.Hard.Valid == 0) ||
  619. (AltPointerPte->u.Hard.Valid == 0))
  620. #else
  621. while ((AltPointerPde->u.Hard.Valid == 0) ||
  622. (AltPointerPte->u.Hard.Valid == 0))
  623. #endif
  624. {
  625. //
  626. // The ALTPTEs are not resident, release the PFN lock and
  627. // access it to make it appear. Then restart the entire
  628. // operation as the PFN lock was released so anything
  629. // could have happened to the address space.
  630. //
  631. UNLOCK_PFN2 (OldIrql);
  632. MmSetPageFaultReadAhead (Thread, 0);
  633. status = MmAccessFault (FALSE, PointerAltPte, KernelMode, NULL);
  634. MmResetPageFaultReadAhead (Thread, SavedState);
  635. if (!NT_SUCCESS(status)) {
  636. goto failure;
  637. }
  638. LOCK_PFN2 (OldIrql);
  639. continue;
  640. }
  641. //
  642. // The ALTPTEs are now present and the PFN lock is held again.
  643. // Examine the individual 4k page states in the ALTPTEs.
  644. //
  645. // Note that only the relevant 4k pages can be examined - ie:
  646. // if the transfer starts in the 2nd 4k of a native page,
  647. // then don't examine the 1st 4k. If the transfer ends in
  648. // the first half of a native page, then don't examine the
  649. // 2nd 4k.
  650. //
  651. ASSERT (PAGE_SIZE == 2 * PAGE_4K);
  652. if (PAGE_ALIGN (StartVa) == PAGE_ALIGN (Va)) {
  653. //
  654. // We are in the first page, see if we need to round up.
  655. //
  656. if (BYTE_OFFSET (StartVa) >= PAGE_4K) {
  657. PointerAltPte += 1;
  658. Va = (PVOID)((ULONG_PTR)Va + PAGE_4K);
  659. }
  660. }
  661. if (PAGE_ALIGN ((PCHAR)EndVa - 1) == PAGE_ALIGN (Va)) {
  662. //
  663. // We are in the last page, see if we need to round down.
  664. //
  665. if (BYTE_OFFSET ((PCHAR)EndVa - 1) < PAGE_4K) {
  666. LastPointerAltPte -= 1;
  667. }
  668. }
  669. //
  670. // We better not have rounded up and down in the same page !
  671. //
  672. ASSERT (PointerAltPte <= LastPointerAltPte);
  673. ASSERT (PointerAltPte != NULL);
  674. do {
  675. //
  676. // If the sub 4k page is :
  677. //
  678. // 1 - No access or
  679. // 2 - This is a private not-committed page or
  680. // 3 - This is write operation and the page is read only
  681. //
  682. // then return an access violation.
  683. //
  684. AltPteContents = *PointerAltPte;
  685. if (AltPteContents.u.Alt.NoAccess != 0) {
  686. status = STATUS_ACCESS_VIOLATION;
  687. UNLOCK_PFN2 (OldIrql);
  688. goto failure;
  689. }
  690. if ((AltPteContents.u.Alt.Commit == 0) && (AltPteContents.u.Alt.Private != 0)) {
  691. status = STATUS_ACCESS_VIOLATION;
  692. UNLOCK_PFN2 (OldIrql);
  693. goto failure;
  694. }
  695. if (Operation != IoReadAccess) {
  696. //
  697. // If the caller is writing and the ALTPTE indicates
  698. // it's not writable or copy on write, then AV.
  699. //
  700. // If it's copy on write, then fall through for further
  701. // interrogation.
  702. //
  703. if ((AltPteContents.u.Alt.Write == 0) &&
  704. (AltPteContents.u.Alt.CopyOnWrite == 0)) {
  705. status = STATUS_ACCESS_VIOLATION;
  706. UNLOCK_PFN2 (OldIrql);
  707. goto failure;
  708. }
  709. }
  710. //
  711. // If the sub 4k page is :
  712. //
  713. // 1 - has not been accessed yet or
  714. // 2 - demand-fill zero or
  715. // 3 - copy-on-write, and this is a write operation
  716. //
  717. // then go the long way and see if it can be paged in.
  718. //
  719. if ((AltPteContents.u.Alt.Accessed == 0) ||
  720. (AltPteContents.u.Alt.FillZero != 0) ||
  721. ((Operation != IoReadAccess) && (AltPteContents.u.Alt.CopyOnWrite == 1))) {
  722. UNLOCK_PFN2 (OldIrql);
  723. MmSetPageFaultReadAhead (Thread, 0);
  724. status = MmX86Fault (FALSE, Va, KernelMode, NULL);
  725. MmResetPageFaultReadAhead (Thread, SavedState);
  726. if (!NT_SUCCESS(status)) {
  727. goto failure;
  728. }
  729. //
  730. // Clear PointerAltPte to signify a restart is needed
  731. // (because the PFN lock was released so the address
  732. // space may have changed).
  733. //
  734. PointerAltPte = NULL;
  735. LOCK_PFN2 (OldIrql);
  736. break;
  737. }
  738. PointerAltPte += 1;
  739. Va = (PVOID)((ULONG_PTR)Va + PAGE_4K);
  740. } while (PointerAltPte <= LastPointerAltPte);
  741. if (PointerAltPte == NULL) {
  742. continue;
  743. }
  744. }
  745. #endif
  746. if (Operation != IoReadAccess) {
  747. if ((PteContents.u.Long & MM_PTE_WRITE_MASK) == 0) {
  748. if (PteContents.u.Long & MM_PTE_COPY_ON_WRITE_MASK) {
  749. //
  750. // The protection has changed from writable to copy on
  751. // write. This can happen if a fork is in progress for
  752. // example. Restart the operation at the top.
  753. //
  754. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  755. if (Va <= MM_HIGHEST_USER_ADDRESS) {
  756. UNLOCK_PFN2 (OldIrql);
  757. MmSetPageFaultReadAhead (Thread, 0);
  758. status = MmAccessFault (FALSE, Va, KernelMode, NULL);
  759. MmResetPageFaultReadAhead (Thread, SavedState);
  760. if (!NT_SUCCESS(status)) {
  761. goto failure;
  762. }
  763. LOCK_PFN2 (OldIrql);
  764. continue;
  765. }
  766. }
  767. //
  768. // The caller has made the page protection more
  769. // restrictive, this should never be done once the
  770. // request has been issued ! Rather than wading
  771. // through the PFN database entry to see if it
  772. // could possibly work out, give the caller an
  773. // access violation.
  774. //
  775. #if DBG
  776. DbgPrint ("MmProbeAndLockPages: PTE %p %p changed\n",
  777. PointerPte,
  778. PteContents.u.Long);
  779. if (MmStopOnBadProbe) {
  780. DbgBreakPoint ();
  781. }
  782. #endif
  783. UNLOCK_PFN2 (OldIrql);
  784. status = STATUS_ACCESS_VIOLATION;
  785. goto failure;
  786. }
  787. }
  788. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents);
  789. if (PageFrameIndex <= MmHighestPhysicalPage) {
  790. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0);
  791. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  792. //
  793. // Check to make sure this page is not locked down an unusually
  794. // high number of times.
  795. //
  796. if (Pfn1->u3.e2.ReferenceCount >= MmReferenceCountCheck) {
  797. UNLOCK_PFN2 (OldIrql);
  798. ASSERT (FALSE);
  799. status = STATUS_WORKING_SET_QUOTA;
  800. goto failure;
  801. }
  802. //
  803. // Ensure the systemwide locked pages count is fluid.
  804. //
  805. if (MI_NONPAGABLE_MEMORY_AVAILABLE() <= 0) {
  806. //
  807. // If this page is for paged pool or privileged code/data,
  808. // then force it in regardless.
  809. //
  810. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  811. if ((Va < MM_HIGHEST_USER_ADDRESS) ||
  812. (MI_IS_SYSTEM_CACHE_ADDRESS(Va))) {
  813. MI_INSTRUMENT_PROBE_RAISES(5);
  814. UNLOCK_PFN2 (OldIrql);
  815. status = STATUS_WORKING_SET_QUOTA;
  816. goto failure;
  817. }
  818. MI_INSTRUMENT_PROBE_RAISES(8);
  819. }
  820. if (MemoryDescriptorList->MdlFlags & MDL_WRITE_OPERATION) {
  821. MI_SNAP_DIRTY (Pfn1, 1, 0x98);
  822. }
  823. MI_ADD_LOCKED_PAGE_CHARGE(Pfn1, 0);
  824. Pfn1->u3.e2.ReferenceCount += 1;
  825. }
  826. else {
  827. //
  828. // This is an I/O space address - don't allow operations
  829. // on addresses not in the PFN database.
  830. //
  831. MemoryDescriptorList->MdlFlags |= MDL_IO_SPACE;
  832. }
  833. *Page = PageFrameIndex;
  834. Page += 1;
  835. PointerPte += 1;
  836. if (MiIsPteOnPdeBoundary(PointerPte)) {
  837. PointerPde += 1;
  838. if (MiIsPteOnPpeBoundary(PointerPte)) {
  839. PointerPpe += 1;
  840. if (MiIsPteOnPxeBoundary(PointerPte)) {
  841. PointerPxe += 1;
  842. }
  843. }
  844. }
  845. } while (PointerPte <= LastPte);
  846. UNLOCK_PFN2 (OldIrql);
  847. if ((MmTrackLockedPages == TRUE) && (AlignedVa <= MM_HIGHEST_USER_ADDRESS)) {
  848. ASSERT (NumberOfPagesSpanned != 0);
  849. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  850. MiAddMdlTracker (MemoryDescriptorList,
  851. CallingAddress,
  852. CallersCaller,
  853. NumberOfPagesSpanned,
  854. 1);
  855. }
  856. return;
  857. failure:
  858. //
  859. // An exception occurred. Unlock the pages locked so far.
  860. //
  861. if (MmTrackLockedPages == TRUE) {
  862. //
  863. // Adjust the MDL length so that MmUnlockPages only
  864. // processes the part that was completed.
  865. //
  866. ULONG PagesLocked;
  867. PagesLocked = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartVa,
  868. MemoryDescriptorList->ByteCount);
  869. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  870. MiAddMdlTracker (MemoryDescriptorList,
  871. CallingAddress,
  872. CallersCaller,
  873. PagesLocked,
  874. 0);
  875. }
  876. MmUnlockPages (MemoryDescriptorList);
  877. //
  878. // Raise an exception of access violation to the caller.
  879. //
  880. MI_INSTRUMENT_PROBE_RAISES(7);
  881. ExRaiseStatus (status);
  882. return;
  883. }
  884. NTKERNELAPI
  885. VOID
  886. MmProbeAndLockProcessPages (
  887. IN OUT PMDL MemoryDescriptorList,
  888. IN PEPROCESS Process,
  889. IN KPROCESSOR_MODE AccessMode,
  890. IN LOCK_OPERATION Operation
  891. )
  892. /*++
  893. Routine Description:
  894. This routine probes and locks the address range specified by
  895. the MemoryDescriptorList in the specified Process for the AccessMode
  896. and Operation.
  897. Arguments:
  898. MemoryDescriptorList - Supplies a pre-initialized MDL that describes the
  899. address range to be probed and locked.
  900. Process - Specifies the address of the process whose address range is
  901. to be locked.
  902. AccessMode - The mode for which the probe should check access to the range.
  903. Operation - Supplies the type of access which for which to check the range.
  904. Return Value:
  905. None.
  906. --*/
  907. {
  908. KAPC_STATE ApcState;
  909. LOGICAL Attached;
  910. NTSTATUS Status;
  911. Attached = FALSE;
  912. Status = STATUS_SUCCESS;
  913. if (Process != PsGetCurrentProcess ()) {
  914. KeStackAttachProcess (&Process->Pcb, &ApcState);
  915. Attached = TRUE;
  916. }
  917. try {
  918. MmProbeAndLockPages (MemoryDescriptorList,
  919. AccessMode,
  920. Operation);
  921. } except (EXCEPTION_EXECUTE_HANDLER) {
  922. Status = GetExceptionCode();
  923. }
  924. if (Attached) {
  925. KeUnstackDetachProcess (&ApcState);
  926. }
  927. if (Status != STATUS_SUCCESS) {
  928. ExRaiseStatus (Status);
  929. }
  930. return;
  931. }
  932. VOID
  933. MiAddMdlTracker (
  934. IN PMDL MemoryDescriptorList,
  935. IN PVOID CallingAddress,
  936. IN PVOID CallersCaller,
  937. IN PFN_NUMBER NumberOfPagesToLock,
  938. IN ULONG Who
  939. )
  940. /*++
  941. Routine Description:
  942. This routine adds an MDL to the specified process' chain.
  943. Arguments:
  944. MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List
  945. (MDL). The MDL must supply the length. The
  946. physical page portion of the MDL is updated when
  947. the pages are locked in memory.
  948. CallingAddress - Supplies the address of the caller of our caller.
  949. CallersCaller - Supplies the address of the caller of CallingAddress.
  950. NumberOfPagesToLock - Specifies the number of pages to lock.
  951. Who - Specifies which routine is adding the entry.
  952. Return Value:
  953. None - exceptions are raised.
  954. Environment:
  955. Kernel mode. APC_LEVEL and below.
  956. --*/
  957. {
  958. KIRQL OldIrql;
  959. PEPROCESS Process;
  960. PLOCK_HEADER LockedPagesHeader;
  961. PLOCK_TRACKER Tracker;
  962. PLOCK_TRACKER P;
  963. PLIST_ENTRY NextEntry;
  964. ASSERT (MmTrackLockedPages == TRUE);
  965. Process = MemoryDescriptorList->Process;
  966. if (Process == NULL) {
  967. return;
  968. }
  969. LockedPagesHeader = Process->LockedPagesList;
  970. if (LockedPagesHeader == NULL) {
  971. return;
  972. }
  973. //
  974. // It's ok to check unsynchronized for aborted tracking as the worst case
  975. // is just that one more entry gets added which will be freed later anyway.
  976. // The main purpose behind aborted tracking is that frees and exits don't
  977. // mistakenly bugcheck when an entry cannot be found.
  978. //
  979. if (MiTrackingAborted == TRUE) {
  980. return;
  981. }
  982. Tracker = ExAllocatePoolWithTag (NonPagedPool,
  983. sizeof (LOCK_TRACKER),
  984. 'kLmM');
  985. if (Tracker == NULL) {
  986. //
  987. // It's ok to set this without synchronization as the worst case
  988. // is just that a few more entries gets added which will be freed
  989. // later anyway. The main purpose behind aborted tracking is that
  990. // frees and exits don't mistakenly bugcheck when an entry cannot
  991. // be found.
  992. //
  993. MiTrackingAborted = TRUE;
  994. return;
  995. }
  996. Tracker->Mdl = MemoryDescriptorList;
  997. Tracker->Count = NumberOfPagesToLock;
  998. Tracker->StartVa = MemoryDescriptorList->StartVa;
  999. Tracker->Offset = MemoryDescriptorList->ByteOffset;
  1000. Tracker->Length = MemoryDescriptorList->ByteCount;
  1001. Tracker->Page = *(PPFN_NUMBER)(MemoryDescriptorList + 1);
  1002. Tracker->CallingAddress = CallingAddress;
  1003. Tracker->CallersCaller = CallersCaller;
  1004. Tracker->Who = Who;
  1005. Tracker->Process = Process;
  1006. ExAcquireSpinLock (&MiTrackLockedPagesLock, &OldIrql);
  1007. //
  1008. // Update the list for this process. First make sure it's not already
  1009. // inserted.
  1010. //
  1011. NextEntry = LockedPagesHeader->ListHead.Flink;
  1012. while (NextEntry != &LockedPagesHeader->ListHead) {
  1013. P = CONTAINING_RECORD (NextEntry,
  1014. LOCK_TRACKER,
  1015. ListEntry);
  1016. if (P->Mdl == MemoryDescriptorList) {
  1017. KeBugCheckEx (LOCKED_PAGES_TRACKER_CORRUPTION,
  1018. 0x1,
  1019. (ULONG_PTR)P,
  1020. (ULONG_PTR)MemoryDescriptorList,
  1021. (ULONG_PTR)MmLockedPagesHead.Count);
  1022. }
  1023. NextEntry = NextEntry->Flink;
  1024. }
  1025. InsertTailList (&LockedPagesHeader->ListHead, &Tracker->ListEntry);
  1026. LockedPagesHeader->Count += NumberOfPagesToLock;
  1027. //
  1028. // Update the systemwide global list. First make sure it's not
  1029. // already inserted.
  1030. //
  1031. NextEntry = MmLockedPagesHead.ListHead.Flink;
  1032. while (NextEntry != &MmLockedPagesHead.ListHead) {
  1033. P = CONTAINING_RECORD(NextEntry,
  1034. LOCK_TRACKER,
  1035. GlobalListEntry);
  1036. if (P->Mdl == MemoryDescriptorList) {
  1037. KeBugCheckEx (LOCKED_PAGES_TRACKER_CORRUPTION,
  1038. 0x2,
  1039. (ULONG_PTR)P,
  1040. (ULONG_PTR)MemoryDescriptorList,
  1041. (ULONG_PTR)MmLockedPagesHead.Count);
  1042. }
  1043. NextEntry = NextEntry->Flink;
  1044. }
  1045. InsertTailList (&MmLockedPagesHead.ListHead, &Tracker->GlobalListEntry);
  1046. MmLockedPagesHead.Count += NumberOfPagesToLock;
  1047. ExReleaseSpinLock (&MiTrackLockedPagesLock, OldIrql);
  1048. }
  1049. LOGICAL
  1050. MiFreeMdlTracker (
  1051. IN OUT PMDL MemoryDescriptorList,
  1052. IN PFN_NUMBER NumberOfPages
  1053. )
  1054. /*++
  1055. Routine Description:
  1056. This deletes an MDL from the specified process' chain. Used specifically
  1057. by MmProbeAndLockSelectedPages () because it builds an MDL in its local
  1058. stack and then copies the requested pages into the real MDL. this lets
  1059. us track these pages.
  1060. Arguments:
  1061. MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List
  1062. (MDL). The MDL must supply the length.
  1063. NumberOfPages - Supplies the number of pages to be freed.
  1064. Return Value:
  1065. TRUE.
  1066. Environment:
  1067. Kernel mode. APC_LEVEL and below.
  1068. --*/
  1069. {
  1070. KIRQL OldIrql;
  1071. PLOCK_TRACKER Tracker;
  1072. PLIST_ENTRY NextEntry;
  1073. PLOCK_HEADER LockedPagesHeader;
  1074. PPFN_NUMBER Page;
  1075. PLOCK_TRACKER Found;
  1076. PVOID PoolToFree;
  1077. ASSERT (MemoryDescriptorList->Process != NULL);
  1078. LockedPagesHeader = (PLOCK_HEADER)MemoryDescriptorList->Process->LockedPagesList;
  1079. if (LockedPagesHeader == NULL) {
  1080. return TRUE;
  1081. }
  1082. //
  1083. // Initializing PoolToFree is not needed for correctness
  1084. // but without it the compiler cannot compile this code
  1085. // W4 to check for use of uninitialized variables.
  1086. //
  1087. PoolToFree = NULL;
  1088. Found = NULL;
  1089. Page = (PPFN_NUMBER) (MemoryDescriptorList + 1);
  1090. ExAcquireSpinLock (&MiTrackLockedPagesLock, &OldIrql);
  1091. NextEntry = LockedPagesHeader->ListHead.Flink;
  1092. while (NextEntry != &LockedPagesHeader->ListHead) {
  1093. Tracker = CONTAINING_RECORD (NextEntry,
  1094. LOCK_TRACKER,
  1095. ListEntry);
  1096. if (MemoryDescriptorList == Tracker->Mdl) {
  1097. if (Found != NULL) {
  1098. KeBugCheckEx (LOCKED_PAGES_TRACKER_CORRUPTION,
  1099. 0x3,
  1100. (ULONG_PTR)Found,
  1101. (ULONG_PTR)Tracker,
  1102. (ULONG_PTR)MemoryDescriptorList);
  1103. }
  1104. ASSERT (Tracker->Page == *Page);
  1105. ASSERT (NumberOfPages == Tracker->Count);
  1106. Tracker->Count = (PFN_NUMBER)-1;
  1107. RemoveEntryList (NextEntry);
  1108. LockedPagesHeader->Count -= NumberOfPages;
  1109. RemoveEntryList (&Tracker->GlobalListEntry);
  1110. MmLockedPagesHead.Count -= NumberOfPages;
  1111. Found = Tracker;
  1112. PoolToFree = (PVOID)NextEntry;
  1113. }
  1114. NextEntry = Tracker->ListEntry.Flink;
  1115. }
  1116. ExReleaseSpinLock (&MiTrackLockedPagesLock, OldIrql);
  1117. if (Found == NULL) {
  1118. //
  1119. // A driver is trying to unlock pages that aren't locked.
  1120. //
  1121. if (MiTrackingAborted == TRUE) {
  1122. return TRUE;
  1123. }
  1124. KeBugCheckEx (PROCESS_HAS_LOCKED_PAGES,
  1125. 1,
  1126. (ULONG_PTR)MemoryDescriptorList,
  1127. MemoryDescriptorList->Process->NumberOfLockedPages,
  1128. (ULONG_PTR)MemoryDescriptorList->Process->LockedPagesList);
  1129. }
  1130. ExFreePool (PoolToFree);
  1131. return TRUE;
  1132. }
  1133. LOGICAL
  1134. MmUpdateMdlTracker (
  1135. IN PMDL MemoryDescriptorList,
  1136. IN PVOID CallingAddress,
  1137. IN PVOID CallersCaller
  1138. )
  1139. /*++
  1140. Routine Description:
  1141. This updates an MDL in the specified process' chain. Used by the I/O
  1142. system so that proper driver identification can be done even when I/O
  1143. is actually locking the pages on their behalf.
  1144. Arguments:
  1145. MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List.
  1146. CallingAddress - Supplies the address of the caller of our caller.
  1147. CallersCaller - Supplies the address of the caller of CallingAddress.
  1148. Return Value:
  1149. TRUE if the MDL was found, FALSE if not.
  1150. Environment:
  1151. Kernel mode. APC_LEVEL and below.
  1152. --*/
  1153. {
  1154. KIRQL OldIrql;
  1155. PLOCK_TRACKER Tracker;
  1156. PLIST_ENTRY NextEntry;
  1157. PLOCK_HEADER LockedPagesHeader;
  1158. PEPROCESS Process;
  1159. ASSERT (MmTrackLockedPages == TRUE);
  1160. Process = MemoryDescriptorList->Process;
  1161. if (Process == NULL) {
  1162. return FALSE;
  1163. }
  1164. LockedPagesHeader = (PLOCK_HEADER) Process->LockedPagesList;
  1165. if (LockedPagesHeader == NULL) {
  1166. return FALSE;
  1167. }
  1168. ExAcquireSpinLock (&MiTrackLockedPagesLock, &OldIrql);
  1169. //
  1170. // Walk the list backwards as it's likely the MDL was
  1171. // just recently inserted.
  1172. //
  1173. NextEntry = LockedPagesHeader->ListHead.Blink;
  1174. while (NextEntry != &LockedPagesHeader->ListHead) {
  1175. Tracker = CONTAINING_RECORD (NextEntry,
  1176. LOCK_TRACKER,
  1177. ListEntry);
  1178. if (MemoryDescriptorList == Tracker->Mdl) {
  1179. ASSERT (Tracker->Page == *(PPFN_NUMBER) (MemoryDescriptorList + 1));
  1180. Tracker->CallingAddress = CallingAddress;
  1181. Tracker->CallersCaller = CallersCaller;
  1182. ExReleaseSpinLock (&MiTrackLockedPagesLock, OldIrql);
  1183. return TRUE;
  1184. }
  1185. NextEntry = Tracker->ListEntry.Blink;
  1186. }
  1187. ExReleaseSpinLock (&MiTrackLockedPagesLock, OldIrql);
  1188. //
  1189. // The caller is trying to update an MDL that is no longer locked.
  1190. //
  1191. return FALSE;
  1192. }
  1193. LOGICAL
  1194. MiUpdateMdlTracker (
  1195. IN PMDL MemoryDescriptorList,
  1196. IN ULONG AdvancePages
  1197. )
  1198. /*++
  1199. Routine Description:
  1200. This updates an MDL in the specified process' chain.
  1201. Arguments:
  1202. MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List.
  1203. AdvancePages - Supplies the number of pages being advanced.
  1204. Return Value:
  1205. TRUE if the MDL was found, FALSE if not.
  1206. Environment:
  1207. Kernel mode. DISPATCH_LEVEL and below.
  1208. --*/
  1209. {
  1210. PPFN_NUMBER Page;
  1211. KIRQL OldIrql;
  1212. PLOCK_TRACKER Tracker;
  1213. PLIST_ENTRY NextEntry;
  1214. PLOCK_HEADER LockedPagesHeader;
  1215. PEPROCESS Process;
  1216. ASSERT (MmTrackLockedPages == TRUE);
  1217. Process = MemoryDescriptorList->Process;
  1218. if (Process == NULL) {
  1219. return FALSE;
  1220. }
  1221. LockedPagesHeader = (PLOCK_HEADER) Process->LockedPagesList;
  1222. if (LockedPagesHeader == NULL) {
  1223. return FALSE;
  1224. }
  1225. ExAcquireSpinLock (&MiTrackLockedPagesLock, &OldIrql);
  1226. //
  1227. // Walk the list backwards as it's likely the MDL was
  1228. // just recently inserted.
  1229. //
  1230. NextEntry = LockedPagesHeader->ListHead.Blink;
  1231. while (NextEntry != &LockedPagesHeader->ListHead) {
  1232. Tracker = CONTAINING_RECORD (NextEntry,
  1233. LOCK_TRACKER,
  1234. ListEntry);
  1235. if (MemoryDescriptorList == Tracker->Mdl) {
  1236. Page = (PPFN_NUMBER) (MemoryDescriptorList + 1);
  1237. ASSERT (Tracker->Page == *Page);
  1238. ASSERT (Tracker->Count > AdvancePages);
  1239. Tracker->Page = *(Page + AdvancePages);
  1240. Tracker->Count -= AdvancePages;
  1241. MmLockedPagesHead.Count -= AdvancePages;
  1242. ExReleaseSpinLock (&MiTrackLockedPagesLock, OldIrql);
  1243. return TRUE;
  1244. }
  1245. NextEntry = Tracker->ListEntry.Blink;
  1246. }
  1247. ExReleaseSpinLock (&MiTrackLockedPagesLock, OldIrql);
  1248. //
  1249. // The caller is trying to update an MDL that is no longer locked.
  1250. //
  1251. return FALSE;
  1252. }
  1253. NTKERNELAPI
  1254. VOID
  1255. MmProbeAndLockSelectedPages (
  1256. IN OUT PMDL MemoryDescriptorList,
  1257. IN PFILE_SEGMENT_ELEMENT SegmentArray,
  1258. IN KPROCESSOR_MODE AccessMode,
  1259. IN LOCK_OPERATION Operation
  1260. )
  1261. /*++
  1262. Routine Description:
  1263. This routine probes the specified pages, makes the pages resident and
  1264. locks the physical pages mapped by the virtual pages in memory. The
  1265. Memory descriptor list is updated to describe the physical pages.
  1266. Arguments:
  1267. MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List
  1268. (MDL). The MDL must supply the length. The
  1269. physical page portion of the MDL is updated when
  1270. the pages are locked in memory.
  1271. SegmentArray - Supplies a pointer to a list of buffer segments to be
  1272. probed and locked.
  1273. AccessMode - Supplies the access mode in which to probe the arguments.
  1274. One of KernelMode or UserMode.
  1275. Operation - Supplies the operation type. One of IoReadAccess, IoWriteAccess
  1276. or IoModifyAccess.
  1277. Return Value:
  1278. None - exceptions are raised.
  1279. Environment:
  1280. Kernel mode. APC_LEVEL and below.
  1281. --*/
  1282. {
  1283. PMDL TempMdl;
  1284. PFN_NUMBER MdlHack[(sizeof(MDL)/sizeof(PFN_NUMBER)) + 1];
  1285. PPFN_NUMBER Page;
  1286. PFILE_SEGMENT_ELEMENT LastSegment;
  1287. PVOID CallingAddress;
  1288. PVOID CallersCaller;
  1289. ULONG NumberOfPagesToLock;
  1290. PAGED_CODE();
  1291. NumberOfPagesToLock = 0;
  1292. ASSERT (MemoryDescriptorList->ByteCount != 0);
  1293. ASSERT (((ULONG_PTR)MemoryDescriptorList->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
  1294. ASSERT ((MemoryDescriptorList->MdlFlags & (
  1295. MDL_PAGES_LOCKED |
  1296. MDL_MAPPED_TO_SYSTEM_VA |
  1297. MDL_SOURCE_IS_NONPAGED_POOL |
  1298. MDL_PARTIAL |
  1299. MDL_IO_SPACE)) == 0);
  1300. //
  1301. // Initialize TempMdl.
  1302. //
  1303. TempMdl = (PMDL) &MdlHack;
  1304. MmInitializeMdl( TempMdl, SegmentArray->Buffer, PAGE_SIZE );
  1305. Page = (PPFN_NUMBER) (MemoryDescriptorList + 1);
  1306. //
  1307. // Calculate the end of the segment list.
  1308. //
  1309. LastSegment = SegmentArray +
  1310. BYTES_TO_PAGES(MemoryDescriptorList->ByteCount);
  1311. ASSERT(SegmentArray < LastSegment);
  1312. //
  1313. // Build a small Mdl for each segment and call probe and lock pages.
  1314. // Then copy the PFNs to the real mdl. The first page is processed
  1315. // outside of the try/finally to ensure that the flags and process
  1316. // field are correctly set in case MmUnlockPages needs to be called.
  1317. //
  1318. //
  1319. // Even systems without 64 bit pointers are required to zero the
  1320. // upper 32 bits of the segment address so use alignment rather
  1321. // than the buffer pointer.
  1322. //
  1323. SegmentArray += 1;
  1324. MmProbeAndLockPages( TempMdl, AccessMode, Operation );
  1325. if (MmTrackLockedPages == TRUE) {
  1326. //
  1327. // Since we move the page from the temp MDL to the real one below
  1328. // and never free the temp one, fixup our accounting now.
  1329. //
  1330. if (MiFreeMdlTracker (TempMdl, 1) == TRUE) {
  1331. NumberOfPagesToLock += 1;
  1332. }
  1333. }
  1334. *Page++ = *((PPFN_NUMBER) (TempMdl + 1));
  1335. //
  1336. // Copy the flags and process fields.
  1337. //
  1338. MemoryDescriptorList->MdlFlags |= TempMdl->MdlFlags;
  1339. MemoryDescriptorList->Process = TempMdl->Process;
  1340. try {
  1341. while (SegmentArray < LastSegment) {
  1342. //
  1343. // Even systems without 64 bit pointers are required to zero the
  1344. // upper 32 bits of the segment address so use alignment rather
  1345. // than the buffer pointer.
  1346. //
  1347. TempMdl->StartVa = (PVOID)(ULONG_PTR)SegmentArray->Buffer;
  1348. TempMdl->MdlFlags = 0;
  1349. SegmentArray += 1;
  1350. MmProbeAndLockPages( TempMdl, AccessMode, Operation );
  1351. if (MmTrackLockedPages == TRUE) {
  1352. //
  1353. // Since we move the page from the temp MDL to the real one
  1354. // below and never free the temp one, fixup our accounting now.
  1355. //
  1356. if (MiFreeMdlTracker (TempMdl, 1) == TRUE) {
  1357. NumberOfPagesToLock += 1;
  1358. }
  1359. }
  1360. *Page++ = *((PPFN_NUMBER) (TempMdl + 1));
  1361. }
  1362. } finally {
  1363. if (abnormal_termination()) {
  1364. //
  1365. // Adjust the MDL length so that MmUnlockPages only processes
  1366. // the part that was completed.
  1367. //
  1368. MemoryDescriptorList->ByteCount =
  1369. (ULONG) (Page - (PPFN_NUMBER) (MemoryDescriptorList + 1)) << PAGE_SHIFT;
  1370. if (MmTrackLockedPages == TRUE) {
  1371. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  1372. MiAddMdlTracker (MemoryDescriptorList,
  1373. CallingAddress,
  1374. CallersCaller,
  1375. NumberOfPagesToLock,
  1376. 2);
  1377. }
  1378. MmUnlockPages (MemoryDescriptorList);
  1379. }
  1380. else if (MmTrackLockedPages == TRUE) {
  1381. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  1382. MiAddMdlTracker (MemoryDescriptorList,
  1383. CallingAddress,
  1384. CallersCaller,
  1385. NumberOfPagesToLock,
  1386. 3);
  1387. }
  1388. }
  1389. }
  1390. VOID
  1391. MmUnlockPages (
  1392. IN OUT PMDL MemoryDescriptorList
  1393. )
  1394. /*++
  1395. Routine Description:
  1396. This routine unlocks physical pages which are described by a Memory
  1397. Descriptor List.
  1398. Arguments:
  1399. MemoryDescriptorList - Supplies a pointer to a memory descriptor list
  1400. (MDL). The supplied MDL must have been supplied
  1401. to MmLockPages to lock the pages down. As the
  1402. pages are unlocked, the MDL is updated.
  1403. Return Value:
  1404. None.
  1405. Environment:
  1406. Kernel mode, IRQL of DISPATCH_LEVEL or below.
  1407. --*/
  1408. {
  1409. PVOID OldValue;
  1410. PEPROCESS Process;
  1411. PFN_NUMBER NumberOfPages;
  1412. PPFN_NUMBER Page;
  1413. PPFN_NUMBER LastPage;
  1414. PVOID StartingVa;
  1415. KIRQL OldIrql;
  1416. PMMPFN Pfn1;
  1417. CSHORT MdlFlags;
  1418. PSINGLE_LIST_ENTRY SingleListEntry;
  1419. PMI_PFN_DEREFERENCE_CHUNK DerefMdl;
  1420. PSLIST_HEADER PfnDereferenceSListHead;
  1421. PSINGLE_LIST_ENTRY *PfnDeferredList;
  1422. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PAGES_LOCKED) != 0);
  1423. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
  1424. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PARTIAL) == 0);
  1425. ASSERT (MemoryDescriptorList->ByteCount != 0);
  1426. Process = MemoryDescriptorList->Process;
  1427. //
  1428. // Carefully snap a copy of the MDL flags - realize that bits in it may
  1429. // change due to some of the subroutines called below. Only bits that
  1430. // we know can't change are examined in this local copy. This is done
  1431. // to reduce the amount of processing while the PFN lock is held.
  1432. //
  1433. MdlFlags = MemoryDescriptorList->MdlFlags;
  1434. if (MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
  1435. //
  1436. // This MDL has been mapped into system space, unmap now.
  1437. //
  1438. MmUnmapLockedPages (MemoryDescriptorList->MappedSystemVa,
  1439. MemoryDescriptorList);
  1440. }
  1441. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  1442. StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
  1443. MemoryDescriptorList->ByteOffset);
  1444. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartingVa,
  1445. MemoryDescriptorList->ByteCount);
  1446. ASSERT (NumberOfPages != 0);
  1447. if (MmTrackLockedPages == TRUE) {
  1448. if ((Process != NULL) &&
  1449. ((MdlFlags & MDL_IO_SPACE) == 0)) {
  1450. MiFreeMdlTracker (MemoryDescriptorList, NumberOfPages);
  1451. }
  1452. if (MmLockedPagesHead.ListHead.Flink != 0) {
  1453. PLOCK_TRACKER P;
  1454. PLIST_ENTRY NextEntry;
  1455. ExAcquireSpinLock (&MiTrackLockedPagesLock, &OldIrql);
  1456. NextEntry = MmLockedPagesHead.ListHead.Flink;
  1457. while (NextEntry != &MmLockedPagesHead.ListHead) {
  1458. P = CONTAINING_RECORD(NextEntry,
  1459. LOCK_TRACKER,
  1460. GlobalListEntry);
  1461. if (P->Mdl == MemoryDescriptorList) {
  1462. KeBugCheckEx (LOCKED_PAGES_TRACKER_CORRUPTION,
  1463. 0x4,
  1464. (ULONG_PTR)P,
  1465. (ULONG_PTR)MemoryDescriptorList,
  1466. 0);
  1467. }
  1468. NextEntry = NextEntry->Flink;
  1469. }
  1470. ExReleaseSpinLock (&MiTrackLockedPagesLock, OldIrql);
  1471. }
  1472. }
  1473. //
  1474. // Only unlock if not I/O space.
  1475. //
  1476. if ((MdlFlags & MDL_IO_SPACE) == 0) {
  1477. if (Process != NULL) {
  1478. ASSERT ((SPFN_NUMBER)Process->NumberOfLockedPages >= 0);
  1479. InterlockedExchangeAddSizeT (&Process->NumberOfLockedPages,
  1480. 0 - NumberOfPages);
  1481. }
  1482. LastPage = Page + NumberOfPages;
  1483. //
  1484. // Calculate PFN addresses and termination without the PFN lock
  1485. // (it's not needed for this) to reduce PFN lock contention.
  1486. //
  1487. ASSERT (sizeof(PFN_NUMBER) == sizeof(PMMPFN));
  1488. do {
  1489. if (*Page == MM_EMPTY_LIST) {
  1490. //
  1491. // There are no more locked pages - if there were none at all
  1492. // then we're done.
  1493. //
  1494. if (Page == (PPFN_NUMBER)(MemoryDescriptorList + 1)) {
  1495. MemoryDescriptorList->MdlFlags &= ~MDL_PAGES_LOCKED;
  1496. return;
  1497. }
  1498. LastPage = Page;
  1499. break;
  1500. }
  1501. ASSERT (*Page <= MmHighestPhysicalPage);
  1502. Pfn1 = MI_PFN_ELEMENT (*Page);
  1503. *Page = (PFN_NUMBER) Pfn1;
  1504. Page += 1;
  1505. } while (Page < LastPage);
  1506. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  1507. //
  1508. // If the MDL can be queued so the PFN acquisition/release can be
  1509. // amortized then do so.
  1510. //
  1511. if (NumberOfPages <= MI_MAX_DEREFERENCE_CHUNK) {
  1512. #if defined(MI_MULTINODE)
  1513. PKNODE Node = KeGetCurrentNode ();
  1514. //
  1515. // The node may change beneath us but that should be fairly
  1516. // infrequent and not worth checking for. Just make sure the
  1517. // same node that gives us a free entry gets the deferred entry
  1518. // back.
  1519. //
  1520. PfnDereferenceSListHead = &Node->PfnDereferenceSListHead;
  1521. #else
  1522. PfnDereferenceSListHead = &MmPfnDereferenceSListHead;
  1523. #endif
  1524. //
  1525. // Pop an entry from the freelist.
  1526. //
  1527. SingleListEntry = InterlockedPopEntrySList (PfnDereferenceSListHead);
  1528. if (SingleListEntry != NULL) {
  1529. DerefMdl = CONTAINING_RECORD (SingleListEntry,
  1530. MI_PFN_DEREFERENCE_CHUNK,
  1531. ListEntry);
  1532. DerefMdl->Flags = MdlFlags;
  1533. DerefMdl->NumberOfPages = (USHORT) (LastPage - Page);
  1534. RtlCopyMemory ((PVOID)(&DerefMdl->Pfns[0]),
  1535. (PVOID)Page,
  1536. (LastPage - Page) * sizeof (PFN_NUMBER));
  1537. MemoryDescriptorList->MdlFlags &= ~MDL_PAGES_LOCKED;
  1538. //
  1539. // Push this entry on the deferred list.
  1540. //
  1541. #if defined(MI_MULTINODE)
  1542. PfnDeferredList = &Node->PfnDeferredList;
  1543. #else
  1544. PfnDeferredList = &MmPfnDeferredList;
  1545. #endif
  1546. do {
  1547. OldValue = *PfnDeferredList;
  1548. SingleListEntry->Next = OldValue;
  1549. } while (InterlockedCompareExchangePointer (
  1550. PfnDeferredList,
  1551. SingleListEntry,
  1552. OldValue) != OldValue);
  1553. return;
  1554. }
  1555. }
  1556. SingleListEntry = NULL;
  1557. if (MdlFlags & MDL_WRITE_OPERATION) {
  1558. LOCK_PFN2 (OldIrql);
  1559. do {
  1560. //
  1561. // If this was a write operation set the modified bit in the
  1562. // PFN database.
  1563. //
  1564. Pfn1 = (PMMPFN) (*Page);
  1565. MI_SET_MODIFIED (Pfn1, 1, 0x3);
  1566. if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
  1567. (Pfn1->u3.e1.WriteInProgress == 0)) {
  1568. ULONG FreeBit;
  1569. FreeBit = GET_PAGING_FILE_OFFSET (Pfn1->OriginalPte);
  1570. if ((FreeBit != 0) && (FreeBit != MI_PTE_LOOKUP_NEEDED)) {
  1571. MiReleaseConfirmedPageFileSpace (Pfn1->OriginalPte);
  1572. Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
  1573. }
  1574. }
  1575. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF(Pfn1, 1);
  1576. Page += 1;
  1577. } while (Page < LastPage);
  1578. }
  1579. else {
  1580. LOCK_PFN2 (OldIrql);
  1581. do {
  1582. Pfn1 = (PMMPFN) (*Page);
  1583. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF(Pfn1, 1);
  1584. Page += 1;
  1585. } while (Page < LastPage);
  1586. }
  1587. if (NumberOfPages <= MI_MAX_DEREFERENCE_CHUNK) {
  1588. //
  1589. // The only reason this code path is being reached is because
  1590. // a deferred entry was not available so clear the list now.
  1591. //
  1592. MiDeferredUnlockPages (MI_DEFER_PFN_HELD | MI_DEFER_DRAIN_LOCAL_ONLY);
  1593. }
  1594. UNLOCK_PFN2 (OldIrql);
  1595. }
  1596. MemoryDescriptorList->MdlFlags &= ~MDL_PAGES_LOCKED;
  1597. return;
  1598. }
  1599. VOID
  1600. MiDeferredUnlockPages (
  1601. ULONG Flags
  1602. )
  1603. /*++
  1604. Routine Description:
  1605. This routine unlocks physical pages which were previously described by
  1606. a Memory Descriptor List.
  1607. Arguments:
  1608. Flags - Supplies a bitfield of the caller's needs :
  1609. MI_DEFER_PFN_HELD - Indicates the caller holds the PFN lock on entry.
  1610. MI_DEFER_DRAIN_LOCAL_ONLY - Indicates the caller only wishes to drain
  1611. the current processor's queue. This only
  1612. has meaning in NUMA systems.
  1613. Return Value:
  1614. None.
  1615. Environment:
  1616. Kernel mode, PFN database lock *MAY* be held on entry (see Flags).
  1617. --*/
  1618. {
  1619. KIRQL OldIrql = 0;
  1620. ULONG FreeBit;
  1621. ULONG i;
  1622. ULONG ListCount;
  1623. ULONG TotalNodes;
  1624. PFN_NUMBER NumberOfPages;
  1625. PPFN_NUMBER Page;
  1626. PPFN_NUMBER LastPage;
  1627. PMMPFN Pfn1;
  1628. CSHORT MdlFlags;
  1629. PSINGLE_LIST_ENTRY SingleListEntry;
  1630. PSINGLE_LIST_ENTRY LastEntry;
  1631. PSINGLE_LIST_ENTRY FirstEntry;
  1632. PSINGLE_LIST_ENTRY NextEntry;
  1633. PSINGLE_LIST_ENTRY VeryLastEntry;
  1634. PMI_PFN_DEREFERENCE_CHUNK DerefMdl;
  1635. PSLIST_HEADER PfnDereferenceSListHead;
  1636. PSINGLE_LIST_ENTRY *PfnDeferredList;
  1637. #if defined(MI_MULTINODE)
  1638. PKNODE Node;
  1639. #endif
  1640. i = 0;
  1641. ListCount = 0;
  1642. TotalNodes = 1;
  1643. if ((Flags & MI_DEFER_PFN_HELD) == 0) {
  1644. LOCK_PFN2 (OldIrql);
  1645. }
  1646. MM_PFN_LOCK_ASSERT();
  1647. #if defined(MI_MULTINODE)
  1648. if (Flags & MI_DEFER_DRAIN_LOCAL_ONLY) {
  1649. Node = KeGetCurrentNode();
  1650. PfnDeferredList = &Node->PfnDeferredList;
  1651. PfnDereferenceSListHead = &Node->PfnDereferenceSListHead;
  1652. }
  1653. else {
  1654. TotalNodes = KeNumberNodes;
  1655. Node = KeNodeBlock[0];
  1656. PfnDeferredList = &Node->PfnDeferredList;
  1657. PfnDereferenceSListHead = &Node->PfnDereferenceSListHead;
  1658. }
  1659. #else
  1660. PfnDeferredList = &MmPfnDeferredList;
  1661. PfnDereferenceSListHead = &MmPfnDereferenceSListHead;
  1662. #endif
  1663. do {
  1664. if (*PfnDeferredList == NULL) {
  1665. #if !defined(MI_MULTINODE)
  1666. if ((Flags & MI_DEFER_PFN_HELD) == 0) {
  1667. UNLOCK_PFN2 (OldIrql);
  1668. }
  1669. return;
  1670. #else
  1671. i += 1;
  1672. if (i < TotalNodes) {
  1673. Node = KeNodeBlock[i];
  1674. PfnDeferredList = &Node->PfnDeferredList;
  1675. PfnDereferenceSListHead = &Node->PfnDereferenceSListHead;
  1676. continue;
  1677. }
  1678. break;
  1679. #endif
  1680. }
  1681. //
  1682. // Process each deferred unlock entry until they're all done.
  1683. //
  1684. LastEntry = NULL;
  1685. VeryLastEntry = NULL;
  1686. do {
  1687. SingleListEntry = *PfnDeferredList;
  1688. FirstEntry = SingleListEntry;
  1689. do {
  1690. NextEntry = SingleListEntry->Next;
  1691. //
  1692. // Process the deferred entry.
  1693. //
  1694. DerefMdl = CONTAINING_RECORD (SingleListEntry,
  1695. MI_PFN_DEREFERENCE_CHUNK,
  1696. ListEntry);
  1697. MdlFlags = DerefMdl->Flags;
  1698. NumberOfPages = (PFN_NUMBER) DerefMdl->NumberOfPages;
  1699. ASSERT (NumberOfPages <= MI_MAX_DEREFERENCE_CHUNK);
  1700. Page = &DerefMdl->Pfns[0];
  1701. LastPage = Page + NumberOfPages;
  1702. if (MdlFlags & MDL_WRITE_OPERATION) {
  1703. do {
  1704. //
  1705. // If this was a write operation set the modified bit
  1706. // in the PFN database.
  1707. //
  1708. Pfn1 = (PMMPFN) (*Page);
  1709. MI_SET_MODIFIED (Pfn1, 1, 0x4);
  1710. if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
  1711. (Pfn1->u3.e1.WriteInProgress == 0)) {
  1712. FreeBit = GET_PAGING_FILE_OFFSET (Pfn1->OriginalPte);
  1713. if ((FreeBit != 0) && (FreeBit != MI_PTE_LOOKUP_NEEDED)) {
  1714. MiReleaseConfirmedPageFileSpace (Pfn1->OriginalPte);
  1715. Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
  1716. }
  1717. }
  1718. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF(Pfn1, 1);
  1719. Page += 1;
  1720. } while (Page < LastPage);
  1721. }
  1722. else {
  1723. do {
  1724. Pfn1 = (PMMPFN) (*Page);
  1725. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF(Pfn1, 1);
  1726. Page += 1;
  1727. } while (Page < LastPage);
  1728. }
  1729. ListCount += 1;
  1730. //
  1731. // March on to the next entry if there is one.
  1732. //
  1733. if (NextEntry == LastEntry) {
  1734. break;
  1735. }
  1736. SingleListEntry = NextEntry;
  1737. } while (TRUE);
  1738. if (VeryLastEntry == NULL) {
  1739. VeryLastEntry = SingleListEntry;
  1740. }
  1741. if ((*PfnDeferredList == FirstEntry) &&
  1742. (InterlockedCompareExchangePointer (PfnDeferredList,
  1743. NULL,
  1744. FirstEntry) == FirstEntry)) {
  1745. break;
  1746. }
  1747. LastEntry = FirstEntry;
  1748. } while (TRUE);
  1749. //
  1750. // Push the processed list chain on the freelist.
  1751. //
  1752. ASSERT (ListCount != 0);
  1753. ASSERT (FirstEntry != NULL);
  1754. ASSERT (VeryLastEntry != NULL);
  1755. #if defined(MI_MULTINODE)
  1756. InterlockedPushListSList (PfnDereferenceSListHead,
  1757. FirstEntry,
  1758. VeryLastEntry,
  1759. ListCount);
  1760. i += 1;
  1761. if (i < TotalNodes) {
  1762. Node = KeNodeBlock[i];
  1763. PfnDeferredList = &Node->PfnDeferredList;
  1764. PfnDereferenceSListHead = &Node->PfnDereferenceSListHead;
  1765. ListCount = 0;
  1766. }
  1767. else {
  1768. break;
  1769. }
  1770. } while (TRUE);
  1771. #else
  1772. } while (FALSE);
  1773. #endif
  1774. if ((Flags & MI_DEFER_PFN_HELD) == 0) {
  1775. UNLOCK_PFN2 (OldIrql);
  1776. }
  1777. #if !defined(MI_MULTINODE)
  1778. //
  1779. // If possible, push the processed chain after releasing the PFN lock.
  1780. //
  1781. InterlockedPushListSList (PfnDereferenceSListHead,
  1782. FirstEntry,
  1783. VeryLastEntry,
  1784. ListCount);
  1785. #endif
  1786. }
  1787. VOID
  1788. MmBuildMdlForNonPagedPool (
  1789. IN OUT PMDL MemoryDescriptorList
  1790. )
  1791. /*++
  1792. Routine Description:
  1793. This routine fills in the "pages" portion of the MDL using the PFN
  1794. numbers corresponding to the buffers which resides in non-paged pool.
  1795. Unlike MmProbeAndLockPages, there is no corresponding unlock as no
  1796. reference counts are incremented as the buffers being in nonpaged
  1797. pool are always resident.
  1798. Arguments:
  1799. MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List
  1800. (MDL). The supplied MDL must supply a virtual
  1801. address, byte offset and length field. The
  1802. physical page portion of the MDL is updated when
  1803. the pages are locked in memory. The virtual
  1804. address must be within the non-paged portion
  1805. of the system space.
  1806. Return Value:
  1807. None.
  1808. Environment:
  1809. Kernel mode, IRQL of DISPATCH_LEVEL or below.
  1810. --*/
  1811. {
  1812. PPFN_NUMBER Page;
  1813. PPFN_NUMBER EndPage;
  1814. PMMPTE PointerPte;
  1815. PVOID VirtualAddress;
  1816. PFN_NUMBER PageFrameIndex;
  1817. PFN_NUMBER NumberOfPages;
  1818. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  1819. ASSERT (MemoryDescriptorList->ByteCount != 0);
  1820. ASSERT ((MemoryDescriptorList->MdlFlags & (
  1821. MDL_PAGES_LOCKED |
  1822. MDL_MAPPED_TO_SYSTEM_VA |
  1823. MDL_SOURCE_IS_NONPAGED_POOL |
  1824. MDL_PARTIAL)) == 0);
  1825. MemoryDescriptorList->Process = NULL;
  1826. //
  1827. // Endva is last byte of the buffer.
  1828. //
  1829. MemoryDescriptorList->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
  1830. ASSERT (MmIsNonPagedSystemAddressValid (MemoryDescriptorList->StartVa));
  1831. VirtualAddress = MemoryDescriptorList->StartVa;
  1832. MemoryDescriptorList->MappedSystemVa =
  1833. (PVOID)((PCHAR)VirtualAddress + MemoryDescriptorList->ByteOffset);
  1834. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (MemoryDescriptorList->MappedSystemVa,
  1835. MemoryDescriptorList->ByteCount);
  1836. ASSERT (NumberOfPages != 0);
  1837. EndPage = Page + NumberOfPages;
  1838. if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) {
  1839. PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (VirtualAddress);
  1840. do {
  1841. *Page = PageFrameIndex;
  1842. Page += 1;
  1843. PageFrameIndex += 1;
  1844. } while (Page < EndPage);
  1845. }
  1846. else {
  1847. PointerPte = MiGetPteAddress (VirtualAddress);
  1848. do {
  1849. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  1850. *Page = PageFrameIndex;
  1851. Page += 1;
  1852. PointerPte += 1;
  1853. } while (Page < EndPage);
  1854. }
  1855. //
  1856. // Assume either all the frames are in the PFN database (ie: the MDL maps
  1857. // pool) or none of them (the MDL maps dualport RAM) are. Avoid picking
  1858. // up a spinlock for the determination as this is a hotpath and assume
  1859. // that dualport RAM spaces will be after physical memory - this may need
  1860. // revisiting for sparse physical spaces.
  1861. //
  1862. if (PageFrameIndex > MmHighestPhysicalPage) {
  1863. MemoryDescriptorList->MdlFlags |= MDL_IO_SPACE;
  1864. }
  1865. return;
  1866. }
  1867. VOID
  1868. MiInitializeIoTrackers (
  1869. VOID
  1870. )
  1871. {
  1872. if (MmTrackPtes != 0) {
  1873. InitializeSListHead (&MiDeadPteTrackerSListHead);
  1874. KeInitializeSpinLock (&MiPteTrackerLock);
  1875. InitializeListHead (&MiPteHeader.ListHead);
  1876. }
  1877. if (MmTrackLockedPages == TRUE) {
  1878. KeInitializeSpinLock (&MiTrackLockedPagesLock);
  1879. InitializeListHead (&MmLockedPagesHead.ListHead);
  1880. }
  1881. }
  1882. VOID
  1883. MiInsertPteTracker (
  1884. IN PPTE_TRACKER Tracker,
  1885. IN PMDL MemoryDescriptorList,
  1886. IN PFN_NUMBER NumberOfPtes,
  1887. IN PVOID MyCaller,
  1888. IN PVOID MyCallersCaller
  1889. )
  1890. /*++
  1891. Routine Description:
  1892. This function inserts a PTE tracking block as the caller has just
  1893. consumed system PTEs.
  1894. Arguments:
  1895. Tracker - Supplies a tracker pool block. This is supplied by the caller
  1896. since the MmSystemSpaceLock is held on entry hence pool
  1897. allocations may not be done here.
  1898. MemoryDescriptorList - Supplies a valid Memory Descriptor List.
  1899. NumberOfPtes - Supplies the number of system PTEs allocated.
  1900. MyCaller - Supplies the return address of the caller who consumed the
  1901. system PTEs to map this MDL.
  1902. MyCallersCaller - Supplies the return address of the caller of the caller
  1903. who consumed the system PTEs to map this MDL.
  1904. Return Value:
  1905. None.
  1906. Environment:
  1907. Kernel mode, DISPATCH_LEVEL or below.
  1908. --*/
  1909. {
  1910. KIRQL OldIrql;
  1911. Tracker->Mdl = MemoryDescriptorList;
  1912. Tracker->SystemVa = MemoryDescriptorList->MappedSystemVa;
  1913. Tracker->Count = NumberOfPtes;
  1914. Tracker->StartVa = MemoryDescriptorList->StartVa;
  1915. Tracker->Offset = MemoryDescriptorList->ByteOffset;
  1916. Tracker->Length = MemoryDescriptorList->ByteCount;
  1917. Tracker->Page = *(PPFN_NUMBER)(MemoryDescriptorList + 1);
  1918. Tracker->CallingAddress = MyCaller;
  1919. Tracker->CallersCaller = MyCallersCaller;
  1920. Tracker->PteAddress = MiGetPteAddress (Tracker->SystemVa);
  1921. ExAcquireSpinLock (&MiPteTrackerLock, &OldIrql);
  1922. MiPteHeader.Count += NumberOfPtes;
  1923. InsertHeadList (&MiPteHeader.ListHead, &Tracker->ListEntry);
  1924. ExReleaseSpinLock (&MiPteTrackerLock, OldIrql);
  1925. }
  1926. VOID
  1927. MiRemovePteTracker (
  1928. IN PMDL MemoryDescriptorList OPTIONAL,
  1929. IN PVOID PteAddress,
  1930. IN PFN_NUMBER NumberOfPtes
  1931. )
  1932. /*++
  1933. Routine Description:
  1934. This function removes a PTE tracking block from the lists as the PTEs
  1935. are being freed.
  1936. Arguments:
  1937. MemoryDescriptorList - Supplies a valid Memory Descriptor List.
  1938. PteAddress - Supplies the address the system PTEs were mapped to.
  1939. NumberOfPtes - Supplies the number of system PTEs allocated.
  1940. Return Value:
  1941. The pool block that held the tracking info that must be freed by our
  1942. caller _AFTER_ our caller releases MmSystemSpaceLock (to prevent deadlock).
  1943. Environment:
  1944. Kernel mode, DISPATCH_LEVEL or below. Locks (including the PFN) may be held.
  1945. --*/
  1946. {
  1947. KIRQL OldIrql;
  1948. PPTE_TRACKER Tracker;
  1949. PFN_NUMBER Page;
  1950. PVOID BaseAddress;
  1951. PLIST_ENTRY LastFound;
  1952. PLIST_ENTRY NextEntry;
  1953. //
  1954. // Initializing Page is not needed for correctness
  1955. // but without it the compiler cannot compile this code
  1956. // W4 to check for use of uninitialized variables.
  1957. //
  1958. Page = 0;
  1959. BaseAddress = MiGetVirtualAddressMappedByPte (PteAddress);
  1960. if (ARGUMENT_PRESENT (MemoryDescriptorList)) {
  1961. Page = *(PPFN_NUMBER)(MemoryDescriptorList + 1);
  1962. }
  1963. LastFound = NULL;
  1964. ExAcquireSpinLock (&MiPteTrackerLock, &OldIrql);
  1965. NextEntry = MiPteHeader.ListHead.Flink;
  1966. while (NextEntry != &MiPteHeader.ListHead) {
  1967. Tracker = (PPTE_TRACKER) CONTAINING_RECORD (NextEntry,
  1968. PTE_TRACKER,
  1969. ListEntry.Flink);
  1970. if (PteAddress == Tracker->PteAddress) {
  1971. if (LastFound != NULL) {
  1972. //
  1973. // Duplicate map entry.
  1974. //
  1975. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  1976. 0x1,
  1977. (ULONG_PTR)Tracker,
  1978. (ULONG_PTR)MemoryDescriptorList,
  1979. (ULONG_PTR)LastFound);
  1980. }
  1981. if (Tracker->Count != NumberOfPtes) {
  1982. //
  1983. // Not unmapping the same of number of PTEs that were mapped.
  1984. //
  1985. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  1986. 0x2,
  1987. (ULONG_PTR)Tracker,
  1988. Tracker->Count,
  1989. NumberOfPtes);
  1990. }
  1991. if ((ARGUMENT_PRESENT (MemoryDescriptorList)) &&
  1992. ((MemoryDescriptorList->MdlFlags & MDL_FREE_EXTRA_PTES) == 0) &&
  1993. (MiMdlsAdjusted == FALSE)) {
  1994. if (Tracker->SystemVa != MemoryDescriptorList->MappedSystemVa) {
  1995. //
  1996. // Not unmapping the same address that was mapped.
  1997. //
  1998. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  1999. 0x3,
  2000. (ULONG_PTR)Tracker,
  2001. (ULONG_PTR)Tracker->SystemVa,
  2002. (ULONG_PTR)MemoryDescriptorList->MappedSystemVa);
  2003. }
  2004. if (Tracker->Page != Page) {
  2005. //
  2006. // The first page in the MDL has changed since it was mapped.
  2007. //
  2008. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2009. 0x4,
  2010. (ULONG_PTR)Tracker,
  2011. (ULONG_PTR)Tracker->Page,
  2012. (ULONG_PTR)Page);
  2013. }
  2014. if (Tracker->StartVa != MemoryDescriptorList->StartVa) {
  2015. //
  2016. // Map and unmap don't match up.
  2017. //
  2018. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2019. 0x5,
  2020. (ULONG_PTR)Tracker,
  2021. (ULONG_PTR)Tracker->StartVa,
  2022. (ULONG_PTR)MemoryDescriptorList->StartVa);
  2023. }
  2024. }
  2025. RemoveEntryList (NextEntry);
  2026. LastFound = NextEntry;
  2027. }
  2028. NextEntry = Tracker->ListEntry.Flink;
  2029. }
  2030. if ((LastFound == NULL) && (MiTrackPtesAborted == FALSE)) {
  2031. //
  2032. // Can't unmap something that was never (or isn't currently) mapped.
  2033. //
  2034. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2035. 0x6,
  2036. (ULONG_PTR)MemoryDescriptorList,
  2037. (ULONG_PTR)BaseAddress,
  2038. (ULONG_PTR)NumberOfPtes);
  2039. }
  2040. MiPteHeader.Count -= NumberOfPtes;
  2041. ExReleaseSpinLock (&MiPteTrackerLock, OldIrql);
  2042. //
  2043. // Insert the tracking block into the dead PTE list for later
  2044. // release. Locks (including the PFN lock) may be held on entry, thus the
  2045. // block cannot be directly freed to pool at this time.
  2046. //
  2047. if (LastFound != NULL) {
  2048. InterlockedPushEntrySList (&MiDeadPteTrackerSListHead,
  2049. (PSINGLE_LIST_ENTRY)LastFound);
  2050. }
  2051. return;
  2052. }
  2053. PPTE_TRACKER
  2054. MiReleaseDeadPteTrackers (
  2055. VOID
  2056. )
  2057. /*++
  2058. Routine Description:
  2059. This routine removes tracking blocks from the dead PTE list and frees
  2060. them to pool. One entry is returned (if possible) to the caller to use
  2061. for the next allocation.
  2062. Arguments:
  2063. None.
  2064. Return Value:
  2065. A PTE tracking block or NULL.
  2066. Environment:
  2067. Kernel mode. No locks held.
  2068. --*/
  2069. {
  2070. LOGICAL ListToProcess;
  2071. PPTE_TRACKER Tracker;
  2072. PSINGLE_LIST_ENTRY SingleListEntry;
  2073. PSINGLE_LIST_ENTRY NextSingleListEntry;
  2074. ASSERT (KeGetCurrentIrql() <= DISPATCH_LEVEL);
  2075. if (ExQueryDepthSList (&MiDeadPteTrackerSListHead) < 10) {
  2076. SingleListEntry = InterlockedPopEntrySList (&MiDeadPteTrackerSListHead);
  2077. ListToProcess = FALSE;
  2078. }
  2079. else {
  2080. SingleListEntry = ExInterlockedFlushSList (&MiDeadPteTrackerSListHead);
  2081. ListToProcess = TRUE;
  2082. }
  2083. if (SingleListEntry == NULL) {
  2084. Tracker = ExAllocatePoolWithTag (NonPagedPool,
  2085. sizeof (PTE_TRACKER),
  2086. 'ySmM');
  2087. if (Tracker == NULL) {
  2088. MiTrackPtesAborted = TRUE;
  2089. }
  2090. return Tracker;
  2091. }
  2092. Tracker = (PPTE_TRACKER) SingleListEntry;
  2093. if (ListToProcess == TRUE) {
  2094. SingleListEntry = SingleListEntry->Next;
  2095. while (SingleListEntry != NULL) {
  2096. NextSingleListEntry = SingleListEntry->Next;
  2097. ExFreePool (SingleListEntry);
  2098. SingleListEntry = NextSingleListEntry;
  2099. }
  2100. }
  2101. return Tracker;
  2102. }
  2103. PVOID
  2104. MiGetHighestPteConsumer (
  2105. OUT PULONG_PTR NumberOfPtes
  2106. )
  2107. /*++
  2108. Routine Description:
  2109. This function examines the PTE tracking blocks and returns the biggest
  2110. consumer.
  2111. Arguments:
  2112. None.
  2113. Return Value:
  2114. The loaded module entry of the biggest consumer.
  2115. Environment:
  2116. Kernel mode, called during bugcheck only. Many locks may be held.
  2117. --*/
  2118. {
  2119. PPTE_TRACKER Tracker;
  2120. PVOID BaseAddress;
  2121. PFN_NUMBER NumberOfPages;
  2122. PLIST_ENTRY NextEntry;
  2123. PLIST_ENTRY NextEntry2;
  2124. PKLDR_DATA_TABLE_ENTRY DataTableEntry;
  2125. ULONG_PTR Highest;
  2126. ULONG_PTR PagesByThisModule;
  2127. PKLDR_DATA_TABLE_ENTRY HighDataTableEntry;
  2128. *NumberOfPtes = 0;
  2129. //
  2130. // No locks are acquired as this is only called during a bugcheck.
  2131. //
  2132. if ((MmTrackPtes & 0x1) == 0) {
  2133. return NULL;
  2134. }
  2135. if (MiTrackPtesAborted == TRUE) {
  2136. return NULL;
  2137. }
  2138. if (IsListEmpty(&MiPteHeader.ListHead)) {
  2139. return NULL;
  2140. }
  2141. if (PsLoadedModuleList.Flink == NULL) {
  2142. return NULL;
  2143. }
  2144. Highest = 0;
  2145. HighDataTableEntry = NULL;
  2146. NextEntry = PsLoadedModuleList.Flink;
  2147. while (NextEntry != &PsLoadedModuleList) {
  2148. DataTableEntry = CONTAINING_RECORD(NextEntry,
  2149. KLDR_DATA_TABLE_ENTRY,
  2150. InLoadOrderLinks);
  2151. PagesByThisModule = 0;
  2152. //
  2153. // Walk the PTE mapping list and update each driver's counts.
  2154. //
  2155. NextEntry2 = MiPteHeader.ListHead.Flink;
  2156. while (NextEntry2 != &MiPteHeader.ListHead) {
  2157. Tracker = (PPTE_TRACKER) CONTAINING_RECORD (NextEntry2,
  2158. PTE_TRACKER,
  2159. ListEntry.Flink);
  2160. BaseAddress = Tracker->CallingAddress;
  2161. NumberOfPages = Tracker->Count;
  2162. if ((BaseAddress >= DataTableEntry->DllBase) &&
  2163. (BaseAddress < (PVOID)((ULONG_PTR)(DataTableEntry->DllBase) + DataTableEntry->SizeOfImage))) {
  2164. PagesByThisModule += NumberOfPages;
  2165. }
  2166. NextEntry2 = NextEntry2->Flink;
  2167. }
  2168. if (PagesByThisModule > Highest) {
  2169. Highest = PagesByThisModule;
  2170. HighDataTableEntry = DataTableEntry;
  2171. }
  2172. NextEntry = NextEntry->Flink;
  2173. }
  2174. *NumberOfPtes = Highest;
  2175. return (PVOID)HighDataTableEntry;
  2176. }
  2177. PVOID
  2178. MmMapLockedPagesSpecifyCache (
  2179. IN PMDL MemoryDescriptorList,
  2180. IN KPROCESSOR_MODE AccessMode,
  2181. IN MEMORY_CACHING_TYPE CacheType,
  2182. IN PVOID RequestedAddress,
  2183. IN ULONG BugCheckOnFailure,
  2184. IN MM_PAGE_PRIORITY Priority
  2185. )
  2186. /*++
  2187. Routine Description:
  2188. This function maps physical pages described by a memory descriptor
  2189. list into the system virtual address space or the user portion of
  2190. the virtual address space.
  2191. Arguments:
  2192. MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
  2193. been updated by MmProbeAndLockPages.
  2194. AccessMode - Supplies an indicator of where to map the pages;
  2195. KernelMode indicates that the pages should be mapped in the
  2196. system part of the address space, UserMode indicates the
  2197. pages should be mapped in the user part of the address space.
  2198. CacheType - Supplies the type of cache mapping to use for the MDL.
  2199. MmCached indicates "normal" kernel or user mappings.
  2200. RequestedAddress - Supplies the base user address of the view.
  2201. This is only treated as an address if the AccessMode
  2202. is UserMode. If the initial value of this argument
  2203. is not NULL, then the view will be allocated starting
  2204. at the specified virtual address rounded down to the
  2205. next 64kb address boundary. If the initial value of
  2206. this argument is NULL, then the operating system
  2207. will determine where to allocate the view.
  2208. If the AccessMode is KernelMode, then this argument is
  2209. treated as a bit field of attributes.
  2210. BugCheckOnFailure - Supplies whether to bugcheck if the mapping cannot be
  2211. obtained. This flag is only checked if the MDL's
  2212. MDL_MAPPING_CAN_FAIL is zero, which implies that the
  2213. default MDL behavior is to bugcheck. This flag then
  2214. provides an additional avenue to avoid the bugcheck.
  2215. Done this way in order to provide WDM compatibility.
  2216. Priority - Supplies an indication as to how important it is that this
  2217. request succeed under low available PTE conditions.
  2218. Return Value:
  2219. Returns the base address where the pages are mapped. The base address
  2220. has the same offset as the virtual address in the MDL.
  2221. This routine will raise an exception if the processor mode is USER_MODE
  2222. and quota limits or VM limits are exceeded.
  2223. Environment:
  2224. Kernel mode. DISPATCH_LEVEL or below if access mode is KernelMode,
  2225. APC_LEVEL or below if access mode is UserMode.
  2226. --*/
  2227. {
  2228. KIRQL OldIrql;
  2229. CSHORT IoMapping;
  2230. PFN_NUMBER NumberOfPages;
  2231. PFN_NUMBER SavedPageCount;
  2232. PPFN_NUMBER Page;
  2233. PPFN_NUMBER LastPage;
  2234. PMMPTE PointerPte;
  2235. PVOID BaseVa;
  2236. MMPTE TempPte;
  2237. PVOID StartingVa;
  2238. PFN_NUMBER NumberOfPtes;
  2239. PVOID CallingAddress;
  2240. PVOID CallersCaller;
  2241. PVOID Tracker;
  2242. PFN_NUMBER PageFrameIndex;
  2243. PMMPFN Pfn2;
  2244. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  2245. //
  2246. // If this assert fires, the MiPlatformCacheAttributes array
  2247. // initialization needs to be checked.
  2248. //
  2249. ASSERT (MmMaximumCacheType == 6);
  2250. StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
  2251. MemoryDescriptorList->ByteOffset);
  2252. ASSERT (MemoryDescriptorList->ByteCount != 0);
  2253. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
  2254. if (AccessMode == KernelMode) {
  2255. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  2256. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingVa,
  2257. MemoryDescriptorList->ByteCount);
  2258. SavedPageCount = NumberOfPages;
  2259. //
  2260. // Map the pages into the system part of the address space as
  2261. // kernel read/write.
  2262. //
  2263. ASSERT ((MemoryDescriptorList->MdlFlags & (
  2264. MDL_MAPPED_TO_SYSTEM_VA |
  2265. MDL_SOURCE_IS_NONPAGED_POOL |
  2266. MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
  2267. ASSERT ((MemoryDescriptorList->MdlFlags & (
  2268. MDL_PAGES_LOCKED |
  2269. MDL_PARTIAL)) != 0);
  2270. //
  2271. // Make sure there are enough PTEs of the requested size.
  2272. // Try to ensure available PTEs inline when we're rich.
  2273. // Otherwise go the long way.
  2274. //
  2275. if ((Priority != HighPagePriority) &&
  2276. ((LONG)(NumberOfPages) > (LONG)MmTotalFreeSystemPtes[SystemPteSpace] - 2048) &&
  2277. (MiGetSystemPteAvailability ((ULONG)NumberOfPages, Priority) == FALSE)) {
  2278. return NULL;
  2279. }
  2280. IoMapping = MemoryDescriptorList->MdlFlags & MDL_IO_SPACE;
  2281. CacheAttribute = MI_TRANSLATE_CACHETYPE (CacheType, IoMapping);
  2282. //
  2283. // If a noncachable mapping is requested, none of the pages in the
  2284. // requested MDL can reside in a large page. Otherwise we would be
  2285. // creating an incoherent overlapping TB entry as the same physical
  2286. // page would be mapped by 2 different TB entries with different
  2287. // cache attributes.
  2288. //
  2289. if (CacheAttribute != MiCached) {
  2290. LastPage = Page + NumberOfPages;
  2291. do {
  2292. if (*Page == MM_EMPTY_LIST) {
  2293. break;
  2294. }
  2295. PageFrameIndex = *Page;
  2296. if (MI_PAGE_FRAME_INDEX_MUST_BE_CACHED (PageFrameIndex)) {
  2297. MiNonCachedCollisions += 1;
  2298. if (((MemoryDescriptorList->MdlFlags & MDL_MAPPING_CAN_FAIL) == 0) && (BugCheckOnFailure)) {
  2299. KeBugCheckEx (MEMORY_MANAGEMENT,
  2300. 0x1000,
  2301. (ULONG_PTR)MemoryDescriptorList,
  2302. (ULONG_PTR)PageFrameIndex,
  2303. (ULONG_PTR)CacheAttribute);
  2304. }
  2305. return NULL;
  2306. }
  2307. Page += 1;
  2308. } while (Page < LastPage);
  2309. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  2310. }
  2311. PointerPte = MiReserveSystemPtes ((ULONG)NumberOfPages,
  2312. SystemPteSpace);
  2313. if (PointerPte == NULL) {
  2314. if (((MemoryDescriptorList->MdlFlags & MDL_MAPPING_CAN_FAIL) == 0) &&
  2315. (BugCheckOnFailure)) {
  2316. MiIssueNoPtesBugcheck ((ULONG)NumberOfPages, SystemPteSpace);
  2317. }
  2318. //
  2319. // Not enough system PTES are available.
  2320. //
  2321. return NULL;
  2322. }
  2323. BaseVa = (PVOID)((PCHAR)MiGetVirtualAddressMappedByPte (PointerPte) +
  2324. MemoryDescriptorList->ByteOffset);
  2325. NumberOfPtes = NumberOfPages;
  2326. TempPte = ValidKernelPte;
  2327. switch (CacheAttribute) {
  2328. case MiNonCached:
  2329. MI_DISABLE_CACHING (TempPte);
  2330. break;
  2331. case MiCached:
  2332. break;
  2333. case MiWriteCombined:
  2334. MI_SET_PTE_WRITE_COMBINE (TempPte);
  2335. break;
  2336. default:
  2337. ASSERT (FALSE);
  2338. break;
  2339. }
  2340. OldIrql = HIGH_LEVEL;
  2341. LastPage = Page + NumberOfPages;
  2342. MI_PREPARE_FOR_NONCACHED (CacheAttribute);
  2343. do {
  2344. if (*Page == MM_EMPTY_LIST) {
  2345. break;
  2346. }
  2347. ASSERT (PointerPte->u.Hard.Valid == 0);
  2348. if (IoMapping == 0) {
  2349. Pfn2 = MI_PFN_ELEMENT (*Page);
  2350. ASSERT (Pfn2->u3.e2.ReferenceCount != 0);
  2351. TempPte = ValidKernelPte;
  2352. switch (Pfn2->u3.e1.CacheAttribute) {
  2353. case MiCached:
  2354. if (CacheAttribute != MiCached) {
  2355. //
  2356. // The caller asked for a noncached or writecombined
  2357. // mapping, but the page is already mapped cached by
  2358. // someone else. Override the caller's request in
  2359. // order to keep the TB page attribute coherent.
  2360. //
  2361. MiCacheOverride[0] += 1;
  2362. }
  2363. break;
  2364. case MiNonCached:
  2365. if (CacheAttribute != MiNonCached) {
  2366. //
  2367. // The caller asked for a cached or writecombined
  2368. // mapping, but the page is already mapped noncached
  2369. // by someone else. Override the caller's request
  2370. // in order to keep the TB page attribute coherent.
  2371. //
  2372. MiCacheOverride[1] += 1;
  2373. }
  2374. MI_DISABLE_CACHING (TempPte);
  2375. break;
  2376. case MiWriteCombined:
  2377. if (CacheAttribute != MiWriteCombined) {
  2378. //
  2379. // The caller asked for a cached or noncached
  2380. // mapping, but the page is already mapped
  2381. // writecombined by someone else. Override the
  2382. // caller's request in order to keep the TB page
  2383. // attribute coherent.
  2384. //
  2385. MiCacheOverride[2] += 1;
  2386. }
  2387. MI_SET_PTE_WRITE_COMBINE (TempPte);
  2388. break;
  2389. case MiNotMapped:
  2390. //
  2391. // This better be for a page allocated with
  2392. // MmAllocatePagesForMdl. Otherwise it might be a
  2393. // page on the freelist which could subsequently be
  2394. // given out with a different attribute !
  2395. //
  2396. ASSERT ((Pfn2->u4.PteFrame == MI_MAGIC_AWE_PTEFRAME) ||
  2397. (Pfn2->PteAddress == (PVOID) (ULONG_PTR)(X64K | 0x1)));
  2398. if (OldIrql == HIGH_LEVEL) {
  2399. LOCK_PFN2 (OldIrql);
  2400. }
  2401. switch (CacheAttribute) {
  2402. case MiCached:
  2403. Pfn2->u3.e1.CacheAttribute = MiCached;
  2404. break;
  2405. case MiNonCached:
  2406. Pfn2->u3.e1.CacheAttribute = MiNonCached;
  2407. MI_DISABLE_CACHING (TempPte);
  2408. break;
  2409. case MiWriteCombined:
  2410. Pfn2->u3.e1.CacheAttribute = MiWriteCombined;
  2411. MI_SET_PTE_WRITE_COMBINE (TempPte);
  2412. break;
  2413. default:
  2414. ASSERT (FALSE);
  2415. break;
  2416. }
  2417. break;
  2418. default:
  2419. ASSERT (FALSE);
  2420. break;
  2421. }
  2422. }
  2423. TempPte.u.Hard.PageFrameNumber = *Page;
  2424. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  2425. Page += 1;
  2426. PointerPte += 1;
  2427. } while (Page < LastPage);
  2428. if (OldIrql != HIGH_LEVEL) {
  2429. UNLOCK_PFN2 (OldIrql);
  2430. }
  2431. MI_SWEEP_CACHE (CacheAttribute, BaseVa, SavedPageCount * PAGE_SIZE);
  2432. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
  2433. MemoryDescriptorList->MappedSystemVa = BaseVa;
  2434. MemoryDescriptorList->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
  2435. if (MmTrackPtes & 0x1) {
  2436. //
  2437. // First free any zombie blocks as no locks are being held.
  2438. //
  2439. Tracker = MiReleaseDeadPteTrackers ();
  2440. if (Tracker != NULL) {
  2441. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  2442. MiInsertPteTracker (Tracker,
  2443. MemoryDescriptorList,
  2444. NumberOfPtes,
  2445. CallingAddress,
  2446. CallersCaller);
  2447. }
  2448. }
  2449. if ((MemoryDescriptorList->MdlFlags & MDL_PARTIAL) != 0) {
  2450. MemoryDescriptorList->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
  2451. }
  2452. return BaseVa;
  2453. }
  2454. return MiMapLockedPagesInUserSpace (MemoryDescriptorList,
  2455. StartingVa,
  2456. CacheType,
  2457. RequestedAddress);
  2458. }
  2459. PVOID
  2460. MiMapSinglePage (
  2461. IN PVOID VirtualAddress OPTIONAL,
  2462. IN PFN_NUMBER PageFrameIndex,
  2463. IN MEMORY_CACHING_TYPE CacheType,
  2464. IN MM_PAGE_PRIORITY Priority
  2465. )
  2466. /*++
  2467. Routine Description:
  2468. This function (re)maps a single system PTE to the specified physical page.
  2469. Arguments:
  2470. VirtualAddress - Supplies the virtual address to map the page frame at.
  2471. NULL indicates a system PTE is needed. Non-NULL supplies
  2472. the virtual address returned by an earlier
  2473. MiMapSinglePage call.
  2474. PageFrameIndex - Supplies the page frame index to map.
  2475. CacheType - Supplies the type of cache mapping to use for the MDL.
  2476. MmCached indicates "normal" kernel or user mappings.
  2477. Priority - Supplies an indication as to how important it is that this
  2478. request succeed under low available PTE conditions.
  2479. Return Value:
  2480. Returns the base address where the page is mapped, or NULL if the
  2481. mapping failed.
  2482. Environment:
  2483. Kernel mode. APC_LEVEL or below.
  2484. --*/
  2485. {
  2486. PMMPTE PointerPte;
  2487. MMPTE TempPte;
  2488. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  2489. PAGED_CODE ();
  2490. UNREFERENCED_PARAMETER (Priority);
  2491. //
  2492. // If this routine is ever changed to allow other than fully cachable
  2493. // requests then checks must be added for large page TB overlaps which
  2494. // can result in this function failing where it cannot today.
  2495. //
  2496. ASSERT (CacheType == MmCached);
  2497. if (VirtualAddress == NULL) {
  2498. PointerPte = MiReserveSystemPtes (1, SystemPteSpace);
  2499. if (PointerPte == NULL) {
  2500. //
  2501. // Not enough system PTES are available.
  2502. //
  2503. return NULL;
  2504. }
  2505. ASSERT (PointerPte->u.Hard.Valid == 0);
  2506. VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte);
  2507. }
  2508. else {
  2509. ASSERT (MI_IS_PHYSICAL_ADDRESS (VirtualAddress) == 0);
  2510. ASSERT (VirtualAddress >= MM_SYSTEM_RANGE_START);
  2511. PointerPte = MiGetPteAddress (VirtualAddress);
  2512. ASSERT (PointerPte->u.Hard.Valid == 1);
  2513. MI_WRITE_INVALID_PTE (PointerPte, ZeroPte);
  2514. KeFlushSingleTb (VirtualAddress,
  2515. TRUE,
  2516. TRUE,
  2517. (PHARDWARE_PTE)PointerPte,
  2518. ZeroPte.u.Flush);
  2519. }
  2520. TempPte = ValidKernelPte;
  2521. CacheAttribute = MI_TRANSLATE_CACHETYPE (CacheType, 0);
  2522. switch (CacheAttribute) {
  2523. case MiNonCached:
  2524. MI_DISABLE_CACHING (TempPte);
  2525. break;
  2526. case MiCached:
  2527. break;
  2528. case MiWriteCombined:
  2529. MI_SET_PTE_WRITE_COMBINE (TempPte);
  2530. break;
  2531. default:
  2532. ASSERT (FALSE);
  2533. break;
  2534. }
  2535. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  2536. MI_PREPARE_FOR_NONCACHED (CacheAttribute);
  2537. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  2538. MI_SWEEP_CACHE (CacheAttribute, VirtualAddress, PAGE_SIZE);
  2539. return VirtualAddress;
  2540. }
  2541. PVOID
  2542. MmMapLockedPages (
  2543. IN PMDL MemoryDescriptorList,
  2544. IN KPROCESSOR_MODE AccessMode
  2545. )
  2546. /*++
  2547. Routine Description:
  2548. This function maps physical pages described by a memory descriptor
  2549. list into the system virtual address space or the user portion of
  2550. the virtual address space.
  2551. Arguments:
  2552. MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
  2553. been updated by MmProbeAndLockPages.
  2554. AccessMode - Supplies an indicator of where to map the pages;
  2555. KernelMode indicates that the pages should be mapped in the
  2556. system part of the address space, UserMode indicates the
  2557. pages should be mapped in the user part of the address space.
  2558. Return Value:
  2559. Returns the base address where the pages are mapped. The base address
  2560. has the same offset as the virtual address in the MDL.
  2561. This routine will raise an exception if the processor mode is USER_MODE
  2562. and quota limits or VM limits are exceeded.
  2563. Environment:
  2564. Kernel mode. DISPATCH_LEVEL or below if access mode is KernelMode,
  2565. APC_LEVEL or below if access mode is UserMode.
  2566. --*/
  2567. {
  2568. return MmMapLockedPagesSpecifyCache (MemoryDescriptorList,
  2569. AccessMode,
  2570. MmCached,
  2571. NULL,
  2572. TRUE,
  2573. HighPagePriority);
  2574. }
  2575. VOID
  2576. MiUnmapSinglePage (
  2577. IN PVOID VirtualAddress
  2578. )
  2579. /*++
  2580. Routine Description:
  2581. This routine unmaps a single locked page which was previously mapped via
  2582. an MiMapSinglePage call.
  2583. Arguments:
  2584. VirtualAddress - Supplies the virtual address used to map the page.
  2585. Return Value:
  2586. None.
  2587. Environment:
  2588. Kernel mode. APC_LEVEL or below, base address is within system space.
  2589. --*/
  2590. {
  2591. PMMPTE PointerPte;
  2592. PAGED_CODE ();
  2593. ASSERT (MI_IS_PHYSICAL_ADDRESS (VirtualAddress) == 0);
  2594. ASSERT (VirtualAddress >= MM_SYSTEM_RANGE_START);
  2595. PointerPte = MiGetPteAddress (VirtualAddress);
  2596. MiReleaseSystemPtes (PointerPte, 1, SystemPteSpace);
  2597. return;
  2598. }
  2599. PVOID
  2600. MmAllocateMappingAddress (
  2601. IN SIZE_T NumberOfBytes,
  2602. IN ULONG PoolTag
  2603. )
  2604. /*++
  2605. Routine Description:
  2606. This function allocates a system PTE mapping of the requested length
  2607. that can be used later to map arbitrary addresses.
  2608. Arguments:
  2609. NumberOfBytes - Supplies the maximum number of bytes the mapping can span.
  2610. PoolTag - Supplies a pool tag to associate this mapping to the caller.
  2611. Return Value:
  2612. Returns a virtual address where to use for later mappings.
  2613. Environment:
  2614. Kernel mode. PASSIVE_LEVEL.
  2615. --*/
  2616. {
  2617. PPFN_NUMBER Page;
  2618. PMMPTE PointerPte;
  2619. PVOID BaseVa;
  2620. PVOID CallingAddress;
  2621. PVOID CallersCaller;
  2622. PVOID Tracker;
  2623. PFN_NUMBER MdlHack[(sizeof(MDL)/sizeof(PFN_NUMBER)) + 1];
  2624. PMDL MemoryDescriptorList;
  2625. PFN_NUMBER NumberOfPages;
  2626. ASSERT (KeGetCurrentIrql () == PASSIVE_LEVEL);
  2627. //
  2628. // Make sure there are enough PTEs of the requested size.
  2629. // Try to ensure available PTEs inline when we're rich.
  2630. // Otherwise go the long way.
  2631. //
  2632. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (0, NumberOfBytes);
  2633. if (NumberOfPages == 0) {
  2634. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  2635. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2636. 0x100,
  2637. NumberOfPages,
  2638. PoolTag,
  2639. (ULONG_PTR) CallingAddress);
  2640. }
  2641. //
  2642. // Callers must identify themselves.
  2643. //
  2644. if (PoolTag == 0) {
  2645. return NULL;
  2646. }
  2647. //
  2648. // Leave space to stash the length and tag.
  2649. //
  2650. NumberOfPages += 2;
  2651. PointerPte = MiReserveSystemPtes ((ULONG)NumberOfPages, SystemPteSpace);
  2652. if (PointerPte == NULL) {
  2653. //
  2654. // Not enough system PTES are available.
  2655. //
  2656. return NULL;
  2657. }
  2658. //
  2659. // Make sure the valid bit is always zero in the stash PTEs.
  2660. //
  2661. *(PULONG_PTR)PointerPte = (NumberOfPages << 1);
  2662. PointerPte += 1;
  2663. *(PULONG_PTR)PointerPte = (PoolTag & ~0x1);
  2664. PointerPte += 1;
  2665. BaseVa = MiGetVirtualAddressMappedByPte (PointerPte);
  2666. if (MmTrackPtes & 0x1) {
  2667. //
  2668. // First free any zombie blocks as no locks are being held.
  2669. //
  2670. Tracker = MiReleaseDeadPteTrackers ();
  2671. if (Tracker != NULL) {
  2672. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  2673. MemoryDescriptorList = (PMDL) &MdlHack;
  2674. MemoryDescriptorList->MappedSystemVa = BaseVa;
  2675. MemoryDescriptorList->StartVa = (PVOID)(ULONG_PTR)PoolTag;
  2676. MemoryDescriptorList->ByteOffset = 0;
  2677. MemoryDescriptorList->ByteCount = (ULONG)((NumberOfPages - 2) * PAGE_SIZE);
  2678. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  2679. *Page = 0;
  2680. MiInsertPteTracker (Tracker,
  2681. MemoryDescriptorList,
  2682. NumberOfPages - 2,
  2683. CallingAddress,
  2684. CallersCaller);
  2685. }
  2686. }
  2687. return BaseVa;
  2688. }
  2689. VOID
  2690. MmFreeMappingAddress (
  2691. IN PVOID BaseAddress,
  2692. IN ULONG PoolTag
  2693. )
  2694. /*++
  2695. Routine Description:
  2696. This routine unmaps a virtual address range previously reserved with
  2697. MmAllocateMappingAddress.
  2698. Arguments:
  2699. BaseAddress - Supplies the base address previously reserved.
  2700. PoolTag - Supplies the caller's identifying tag.
  2701. Return Value:
  2702. None.
  2703. Environment:
  2704. Kernel mode. PASSIVE_LEVEL.
  2705. --*/
  2706. {
  2707. ULONG OriginalPoolTag;
  2708. PFN_NUMBER NumberOfPages;
  2709. PMMPTE PointerBase;
  2710. PMMPTE PointerPte;
  2711. PMMPTE LastPte;
  2712. ASSERT (KeGetCurrentIrql () == PASSIVE_LEVEL);
  2713. ASSERT (!MI_IS_PHYSICAL_ADDRESS (BaseAddress));
  2714. ASSERT (BaseAddress > MM_HIGHEST_USER_ADDRESS);
  2715. PointerPte = MiGetPteAddress (BaseAddress);
  2716. PointerBase = PointerPte - 2;
  2717. OriginalPoolTag = *(PULONG) (PointerPte - 1);
  2718. ASSERT ((OriginalPoolTag & 0x1) == 0);
  2719. if (OriginalPoolTag != (PoolTag & ~0x1)) {
  2720. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2721. 0x101,
  2722. (ULONG_PTR)BaseAddress,
  2723. PoolTag,
  2724. OriginalPoolTag);
  2725. }
  2726. NumberOfPages = *(PULONG_PTR)PointerBase;
  2727. ASSERT ((NumberOfPages & 0x1) == 0);
  2728. NumberOfPages >>= 1;
  2729. if (NumberOfPages <= 2) {
  2730. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2731. 0x102,
  2732. (ULONG_PTR)BaseAddress,
  2733. PoolTag,
  2734. NumberOfPages);
  2735. }
  2736. NumberOfPages -= 2;
  2737. LastPte = PointerPte + NumberOfPages;
  2738. while (PointerPte < LastPte) {
  2739. if (PointerPte->u.Long != 0) {
  2740. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2741. 0x103,
  2742. (ULONG_PTR)BaseAddress,
  2743. PoolTag,
  2744. NumberOfPages);
  2745. }
  2746. PointerPte += 1;
  2747. }
  2748. if (MmTrackPtes & 0x1) {
  2749. MiRemovePteTracker (NULL,
  2750. PointerBase + 2,
  2751. NumberOfPages);
  2752. }
  2753. //
  2754. // Note the tag and size are nulled out when the PTEs are released below
  2755. // so any drivers that try to use their mapping after freeing it get
  2756. // caught immediately.
  2757. //
  2758. MiReleaseSystemPtes (PointerBase, (ULONG)NumberOfPages + 2, SystemPteSpace);
  2759. return;
  2760. }
  2761. PVOID
  2762. MmMapLockedPagesWithReservedMapping (
  2763. IN PVOID MappingAddress,
  2764. IN ULONG PoolTag,
  2765. IN PMDL MemoryDescriptorList,
  2766. IN MEMORY_CACHING_TYPE CacheType
  2767. )
  2768. /*++
  2769. Routine Description:
  2770. This function maps physical pages described by a memory descriptor
  2771. list into the system virtual address space.
  2772. Arguments:
  2773. MappingAddress - Supplies a valid mapping address obtained earlier via
  2774. MmAllocateMappingAddress.
  2775. PoolTag - Supplies the caller's identifying tag.
  2776. MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
  2777. been updated by MmProbeAndLockPages.
  2778. CacheType - Supplies the type of cache mapping to use for the MDL.
  2779. MmCached indicates "normal" kernel or user mappings.
  2780. Return Value:
  2781. Returns the base address where the pages are mapped. The base address
  2782. has the same offset as the virtual address in the MDL.
  2783. This routine will return NULL if the cache type requested is incompatible
  2784. with the pages being mapped or if the caller tries to map an MDL that is
  2785. larger than the virtual address range originally reserved.
  2786. Environment:
  2787. Kernel mode. DISPATCH_LEVEL or below. The caller must synchronize usage
  2788. of the argument virtual address space.
  2789. --*/
  2790. {
  2791. KIRQL OldIrql;
  2792. CSHORT IoMapping;
  2793. PFN_NUMBER NumberOfPages;
  2794. PFN_NUMBER VaPageSpan;
  2795. PFN_NUMBER SavedPageCount;
  2796. PPFN_NUMBER Page;
  2797. PMMPTE PointerBase;
  2798. PMMPTE PointerPte;
  2799. PMMPTE LastPte;
  2800. MMPTE TempPte;
  2801. PVOID StartingVa;
  2802. PFN_NUMBER NumberOfPtes;
  2803. PFN_NUMBER PageFrameIndex;
  2804. ULONG OriginalPoolTag;
  2805. PMMPFN Pfn2;
  2806. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  2807. ASSERT (KeGetCurrentIrql () <= DISPATCH_LEVEL);
  2808. StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
  2809. MemoryDescriptorList->ByteOffset);
  2810. ASSERT (MemoryDescriptorList->ByteCount != 0);
  2811. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
  2812. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  2813. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingVa,
  2814. MemoryDescriptorList->ByteCount);
  2815. PointerPte = MiGetPteAddress (MappingAddress);
  2816. PointerBase = PointerPte - 2;
  2817. OriginalPoolTag = *(PULONG) (PointerPte - 1);
  2818. ASSERT ((OriginalPoolTag & 0x1) == 0);
  2819. if (OriginalPoolTag != (PoolTag & ~0x1)) {
  2820. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2821. 0x104,
  2822. (ULONG_PTR)MappingAddress,
  2823. PoolTag,
  2824. OriginalPoolTag);
  2825. }
  2826. VaPageSpan = *(PULONG_PTR)PointerBase;
  2827. ASSERT ((VaPageSpan & 0x1) == 0);
  2828. VaPageSpan >>= 1;
  2829. if (VaPageSpan <= 2) {
  2830. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2831. 0x105,
  2832. (ULONG_PTR)MappingAddress,
  2833. PoolTag,
  2834. VaPageSpan);
  2835. }
  2836. if (NumberOfPages > VaPageSpan - 2) {
  2837. //
  2838. // The caller is trying to map an MDL that spans a range larger than
  2839. // the reserving mapping ! This is a driver bug.
  2840. //
  2841. ASSERT (FALSE);
  2842. return NULL;
  2843. }
  2844. //
  2845. // All the mapping PTEs must be zero.
  2846. //
  2847. LastPte = PointerPte + VaPageSpan - 2;
  2848. while (PointerPte < LastPte) {
  2849. if (PointerPte->u.Long != 0) {
  2850. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  2851. 0x107,
  2852. (ULONG_PTR)MappingAddress,
  2853. (ULONG_PTR)PointerPte,
  2854. (ULONG_PTR)LastPte);
  2855. }
  2856. PointerPte += 1;
  2857. }
  2858. PointerPte = PointerBase + 2;
  2859. SavedPageCount = NumberOfPages;
  2860. ASSERT ((MemoryDescriptorList->MdlFlags & (
  2861. MDL_MAPPED_TO_SYSTEM_VA |
  2862. MDL_SOURCE_IS_NONPAGED_POOL |
  2863. MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
  2864. ASSERT ((MemoryDescriptorList->MdlFlags & (
  2865. MDL_PAGES_LOCKED |
  2866. MDL_PARTIAL)) != 0);
  2867. //
  2868. // If a noncachable mapping is requested, none of the pages in the
  2869. // requested MDL can reside in a large page. Otherwise we would be
  2870. // creating an incoherent overlapping TB entry as the same physical
  2871. // page would be mapped by 2 different TB entries with different
  2872. // cache attributes.
  2873. //
  2874. IoMapping = MemoryDescriptorList->MdlFlags & MDL_IO_SPACE;
  2875. CacheAttribute = MI_TRANSLATE_CACHETYPE (CacheType, IoMapping);
  2876. if (CacheAttribute != MiCached) {
  2877. do {
  2878. if (*Page == MM_EMPTY_LIST) {
  2879. break;
  2880. }
  2881. PageFrameIndex = *Page;
  2882. if (MI_PAGE_FRAME_INDEX_MUST_BE_CACHED (PageFrameIndex)) {
  2883. MiNonCachedCollisions += 1;
  2884. return NULL;
  2885. }
  2886. Page += 1;
  2887. NumberOfPages -= 1;
  2888. } while (NumberOfPages != 0);
  2889. NumberOfPages = SavedPageCount;
  2890. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  2891. MI_PREPARE_FOR_NONCACHED (CacheAttribute);
  2892. }
  2893. NumberOfPtes = NumberOfPages;
  2894. TempPte = ValidKernelPte;
  2895. switch (CacheAttribute) {
  2896. case MiNonCached:
  2897. MI_DISABLE_CACHING (TempPte);
  2898. break;
  2899. case MiCached:
  2900. break;
  2901. case MiWriteCombined:
  2902. MI_SET_PTE_WRITE_COMBINE (TempPte);
  2903. break;
  2904. default:
  2905. ASSERT (FALSE);
  2906. break;
  2907. }
  2908. OldIrql = HIGH_LEVEL;
  2909. do {
  2910. if (*Page == MM_EMPTY_LIST) {
  2911. break;
  2912. }
  2913. ASSERT (PointerPte->u.Hard.Valid == 0);
  2914. if (IoMapping == 0) {
  2915. Pfn2 = MI_PFN_ELEMENT (*Page);
  2916. ASSERT (Pfn2->u3.e2.ReferenceCount != 0);
  2917. TempPte = ValidKernelPte;
  2918. switch (Pfn2->u3.e1.CacheAttribute) {
  2919. case MiCached:
  2920. if (CacheAttribute != MiCached) {
  2921. //
  2922. // The caller asked for a noncached or writecombined
  2923. // mapping, but the page is already mapped cached by
  2924. // someone else. Override the caller's request in
  2925. // order to keep the TB page attribute coherent.
  2926. //
  2927. MiCacheOverride[0] += 1;
  2928. }
  2929. break;
  2930. case MiNonCached:
  2931. if (CacheAttribute != MiNonCached) {
  2932. //
  2933. // The caller asked for a cached or writecombined
  2934. // mapping, but the page is already mapped noncached
  2935. // by someone else. Override the caller's request
  2936. // in order to keep the TB page attribute coherent.
  2937. //
  2938. MiCacheOverride[1] += 1;
  2939. }
  2940. MI_DISABLE_CACHING (TempPte);
  2941. break;
  2942. case MiWriteCombined:
  2943. if (CacheAttribute != MiWriteCombined) {
  2944. //
  2945. // The caller asked for a cached or noncached
  2946. // mapping, but the page is already mapped
  2947. // writecombined by someone else. Override the
  2948. // caller's request in order to keep the TB page
  2949. // attribute coherent.
  2950. //
  2951. MiCacheOverride[2] += 1;
  2952. }
  2953. MI_SET_PTE_WRITE_COMBINE (TempPte);
  2954. break;
  2955. case MiNotMapped:
  2956. //
  2957. // This better be for a page allocated with
  2958. // MmAllocatePagesForMdl. Otherwise it might be a
  2959. // page on the freelist which could subsequently be
  2960. // given out with a different attribute !
  2961. //
  2962. ASSERT ((Pfn2->u4.PteFrame == MI_MAGIC_AWE_PTEFRAME) ||
  2963. (Pfn2->PteAddress == (PVOID) (ULONG_PTR)(X64K | 0x1)));
  2964. if (OldIrql == HIGH_LEVEL) {
  2965. LOCK_PFN2 (OldIrql);
  2966. }
  2967. switch (CacheAttribute) {
  2968. case MiCached:
  2969. Pfn2->u3.e1.CacheAttribute = MiCached;
  2970. break;
  2971. case MiNonCached:
  2972. Pfn2->u3.e1.CacheAttribute = MiNonCached;
  2973. MI_DISABLE_CACHING (TempPte);
  2974. break;
  2975. case MiWriteCombined:
  2976. Pfn2->u3.e1.CacheAttribute = MiWriteCombined;
  2977. MI_SET_PTE_WRITE_COMBINE (TempPte);
  2978. break;
  2979. default:
  2980. ASSERT (FALSE);
  2981. break;
  2982. }
  2983. break;
  2984. default:
  2985. ASSERT (FALSE);
  2986. break;
  2987. }
  2988. }
  2989. TempPte.u.Hard.PageFrameNumber = *Page;
  2990. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  2991. Page += 1;
  2992. PointerPte += 1;
  2993. NumberOfPages -= 1;
  2994. } while (NumberOfPages != 0);
  2995. if (OldIrql != HIGH_LEVEL) {
  2996. UNLOCK_PFN2 (OldIrql);
  2997. }
  2998. MI_SWEEP_CACHE (CacheAttribute, MappingAddress, SavedPageCount * PAGE_SIZE);
  2999. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
  3000. MemoryDescriptorList->MappedSystemVa = MappingAddress;
  3001. MemoryDescriptorList->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
  3002. if ((MemoryDescriptorList->MdlFlags & MDL_PARTIAL) != 0) {
  3003. MemoryDescriptorList->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
  3004. }
  3005. MappingAddress = (PVOID)((PCHAR)MappingAddress + MemoryDescriptorList->ByteOffset);
  3006. return MappingAddress;
  3007. }
  3008. VOID
  3009. MmUnmapReservedMapping (
  3010. IN PVOID BaseAddress,
  3011. IN ULONG PoolTag,
  3012. IN PMDL MemoryDescriptorList
  3013. )
  3014. /*++
  3015. Routine Description:
  3016. This routine unmaps locked pages which were previously mapped via
  3017. a MmMapLockedPagesWithReservedMapping call.
  3018. Arguments:
  3019. BaseAddress - Supplies the base address where the pages were previously
  3020. mapped.
  3021. PoolTag - Supplies the caller's identifying tag.
  3022. MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
  3023. been updated by MmProbeAndLockPages.
  3024. Return Value:
  3025. None.
  3026. Environment:
  3027. Kernel mode. DISPATCH_LEVEL or below. The caller must synchronize usage
  3028. of the argument virtual address space.
  3029. --*/
  3030. {
  3031. ULONG OriginalPoolTag;
  3032. PFN_NUMBER NumberOfPages;
  3033. PFN_NUMBER ExtraPages;
  3034. PFN_NUMBER VaPageSpan;
  3035. PMMPTE PointerBase;
  3036. PMMPTE LastPte;
  3037. PMMPTE LastMdlPte;
  3038. PVOID StartingVa;
  3039. PVOID VaFlushList[MM_MAXIMUM_FLUSH_COUNT];
  3040. PMMPTE PointerPte;
  3041. PFN_NUMBER i;
  3042. PPFN_NUMBER Page;
  3043. PPFN_NUMBER LastCurrentPage;
  3044. ASSERT (KeGetCurrentIrql () <= DISPATCH_LEVEL);
  3045. ASSERT (MemoryDescriptorList->ByteCount != 0);
  3046. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) != 0);
  3047. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
  3048. ASSERT (!MI_IS_PHYSICAL_ADDRESS (BaseAddress));
  3049. ASSERT (BaseAddress > MM_HIGHEST_USER_ADDRESS);
  3050. PointerPte = MiGetPteAddress (BaseAddress);
  3051. PointerBase = PointerPte - 2;
  3052. OriginalPoolTag = *(PULONG) (PointerPte - 1);
  3053. ASSERT ((OriginalPoolTag & 0x1) == 0);
  3054. if (OriginalPoolTag != (PoolTag & ~0x1)) {
  3055. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  3056. 0x108,
  3057. (ULONG_PTR)BaseAddress,
  3058. PoolTag,
  3059. OriginalPoolTag);
  3060. }
  3061. VaPageSpan = *(PULONG_PTR)PointerBase;
  3062. ASSERT ((VaPageSpan & 0x1) == 0);
  3063. VaPageSpan >>= 1;
  3064. if (VaPageSpan <= 2) {
  3065. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  3066. 0x109,
  3067. (ULONG_PTR)BaseAddress,
  3068. PoolTag,
  3069. VaPageSpan);
  3070. }
  3071. StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
  3072. MemoryDescriptorList->ByteOffset);
  3073. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingVa,
  3074. MemoryDescriptorList->ByteCount);
  3075. if (NumberOfPages > VaPageSpan - 2) {
  3076. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  3077. 0x10A,
  3078. (ULONG_PTR)BaseAddress,
  3079. VaPageSpan,
  3080. NumberOfPages);
  3081. }
  3082. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  3083. LastCurrentPage = Page + NumberOfPages;
  3084. if (MemoryDescriptorList->MdlFlags & MDL_FREE_EXTRA_PTES) {
  3085. ExtraPages = *(Page + NumberOfPages);
  3086. ASSERT (ExtraPages <= MiCurrentAdvancedPages);
  3087. ASSERT (NumberOfPages + ExtraPages <= VaPageSpan - 2);
  3088. NumberOfPages += ExtraPages;
  3089. #if DBG
  3090. InterlockedExchangeAddSizeT (&MiCurrentAdvancedPages, 0 - ExtraPages);
  3091. MiAdvancesFreed += ExtraPages;
  3092. #endif
  3093. }
  3094. LastMdlPte = PointerPte + NumberOfPages;
  3095. LastPte = PointerPte + VaPageSpan - 2;
  3096. //
  3097. // The range described by the argument MDL must be mapped.
  3098. //
  3099. while (PointerPte < LastMdlPte) {
  3100. if (PointerPte->u.Hard.Valid == 0) {
  3101. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  3102. 0x10B,
  3103. (ULONG_PTR)BaseAddress,
  3104. PoolTag,
  3105. NumberOfPages);
  3106. }
  3107. #if DBG
  3108. ASSERT ((*Page == MI_GET_PAGE_FRAME_FROM_PTE (PointerPte)) ||
  3109. (MemoryDescriptorList->MdlFlags & MDL_FREE_EXTRA_PTES));
  3110. if (((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0) &&
  3111. (Page < LastCurrentPage)) {
  3112. PMMPFN Pfn3;
  3113. Pfn3 = MI_PFN_ELEMENT (*Page);
  3114. ASSERT (Pfn3->u3.e2.ReferenceCount != 0);
  3115. }
  3116. Page += 1;
  3117. #endif
  3118. PointerPte += 1;
  3119. }
  3120. //
  3121. // The range past the argument MDL must be unmapped.
  3122. //
  3123. while (PointerPte < LastPte) {
  3124. if (PointerPte->u.Long != 0) {
  3125. KeBugCheckEx (SYSTEM_PTE_MISUSE,
  3126. 0x10C,
  3127. (ULONG_PTR)BaseAddress,
  3128. PoolTag,
  3129. NumberOfPages);
  3130. }
  3131. PointerPte += 1;
  3132. }
  3133. MiFillMemoryPte (PointerBase + 2,
  3134. NumberOfPages * sizeof (MMPTE),
  3135. ZeroPte.u.Long);
  3136. if (NumberOfPages == 1) {
  3137. KeFlushSingleTb (BaseAddress,
  3138. TRUE,
  3139. TRUE,
  3140. (PHARDWARE_PTE)(PointerBase + 2),
  3141. ZeroPte.u.Flush);
  3142. }
  3143. else if (NumberOfPages < MM_MAXIMUM_FLUSH_COUNT) {
  3144. for (i = 0; i < NumberOfPages; i += 1) {
  3145. VaFlushList[i] = BaseAddress;
  3146. BaseAddress = (PVOID)((PCHAR)BaseAddress + PAGE_SIZE);
  3147. }
  3148. KeFlushMultipleTb ((ULONG)NumberOfPages,
  3149. &VaFlushList[0],
  3150. TRUE,
  3151. TRUE,
  3152. NULL,
  3153. *(PHARDWARE_PTE)&ZeroPte.u.Flush);
  3154. }
  3155. else {
  3156. KeFlushEntireTb (TRUE, TRUE);
  3157. }
  3158. MemoryDescriptorList->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
  3159. MDL_PARTIAL_HAS_BEEN_MAPPED);
  3160. return;
  3161. }
  3162. NTKERNELAPI
  3163. NTSTATUS
  3164. MmAdvanceMdl (
  3165. IN PMDL Mdl,
  3166. IN ULONG NumberOfBytes
  3167. )
  3168. /*++
  3169. Routine Description:
  3170. This routine takes the specified MDL and "advances" it forward
  3171. by the specified number of bytes. If this causes the MDL to advance
  3172. past the initial page, the pages that are advanced over are immediately
  3173. unlocked and the system VA that maps the MDL is also adjusted (along
  3174. with the user address).
  3175. WARNING ! WARNING ! WARNING !
  3176. This means the caller MUST BE AWARE that the "advanced" pages are
  3177. immediately reused and therefore MUST NOT BE REFERENCED by the caller
  3178. once this routine has been called. Likewise the virtual address as
  3179. that is also being adjusted here.
  3180. Even if the caller has statically allocated this MDL on his local stack,
  3181. he cannot use more than the space currently described by the MDL on return
  3182. from this routine unless he first unmaps the MDL (if it was mapped).
  3183. Otherwise the system PTE lists will be corrupted.
  3184. Arguments:
  3185. MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
  3186. been updated by MmProbeAndLockPages.
  3187. NumberOfBytes - The number of bytes to advance the MDL by.
  3188. Return Value:
  3189. NTSTATUS.
  3190. --*/
  3191. {
  3192. ULONG i;
  3193. ULONG PageCount;
  3194. ULONG FreeBit;
  3195. ULONG Slush;
  3196. KIRQL OldIrql;
  3197. PPFN_NUMBER Page;
  3198. PPFN_NUMBER NewPage;
  3199. ULONG OffsetPages;
  3200. PEPROCESS Process;
  3201. PMMPFN Pfn1;
  3202. CSHORT MdlFlags;
  3203. PVOID StartingVa;
  3204. PFN_NUMBER NumberOfPages;
  3205. ASSERT (KeGetCurrentIrql () <= DISPATCH_LEVEL);
  3206. ASSERT (Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_SOURCE_IS_NONPAGED_POOL));
  3207. ASSERT (BYTE_OFFSET (Mdl->StartVa) == 0);
  3208. //
  3209. // Disallow advancement past the end of the MDL.
  3210. //
  3211. if (NumberOfBytes >= Mdl->ByteCount) {
  3212. return STATUS_INVALID_PARAMETER_2;
  3213. }
  3214. PageCount = 0;
  3215. MiMdlsAdjusted = TRUE;
  3216. StartingVa = (PVOID)((PCHAR)Mdl->StartVa + Mdl->ByteOffset);
  3217. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartingVa, Mdl->ByteCount);
  3218. if (Mdl->ByteOffset != 0) {
  3219. Slush = PAGE_SIZE - Mdl->ByteOffset;
  3220. if (NumberOfBytes < Slush) {
  3221. Mdl->ByteCount -= NumberOfBytes;
  3222. Mdl->ByteOffset += NumberOfBytes;
  3223. //
  3224. // StartVa never includes the byte offset (it's always page-aligned)
  3225. // so don't adjust it here. MappedSystemVa does include byte
  3226. // offsets so do adjust that.
  3227. //
  3228. if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
  3229. Mdl->MappedSystemVa = (PVOID) ((PCHAR)Mdl->MappedSystemVa + NumberOfBytes);
  3230. }
  3231. return STATUS_SUCCESS;
  3232. }
  3233. NumberOfBytes -= Slush;
  3234. Mdl->StartVa = (PVOID) ((PCHAR)Mdl->StartVa + PAGE_SIZE);
  3235. Mdl->ByteOffset = 0;
  3236. Mdl->ByteCount -= Slush;
  3237. if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
  3238. Mdl->MappedSystemVa = (PVOID) ((PCHAR)Mdl->MappedSystemVa + Slush);
  3239. }
  3240. //
  3241. // Up the number of pages (and addresses) that need to slide.
  3242. //
  3243. PageCount += 1;
  3244. }
  3245. //
  3246. // The MDL start is now nicely page aligned. Make sure there's still
  3247. // data left in it (we may have finished it off above), then operate on it.
  3248. //
  3249. if (NumberOfBytes != 0) {
  3250. Mdl->ByteCount -= NumberOfBytes;
  3251. Mdl->ByteOffset = BYTE_OFFSET (NumberOfBytes);
  3252. OffsetPages = NumberOfBytes >> PAGE_SHIFT;
  3253. Mdl->StartVa = (PVOID) ((PCHAR)Mdl->StartVa + (OffsetPages << PAGE_SHIFT));
  3254. PageCount += OffsetPages;
  3255. if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
  3256. Mdl->MappedSystemVa = (PVOID) ((PCHAR)Mdl->MappedSystemVa +
  3257. (OffsetPages << PAGE_SHIFT) +
  3258. Mdl->ByteOffset);
  3259. }
  3260. }
  3261. ASSERT (PageCount <= NumberOfPages);
  3262. if (PageCount != 0) {
  3263. //
  3264. // Slide the page frame numbers forward decrementing reference counts
  3265. // on the ones that are released. Then adjust the mapped system VA
  3266. // (if there is one) to reflect the current frame. Note that the TB
  3267. // does not need to be flushed due to the careful sliding and when
  3268. // the MDL is finally completely unmapped, the extra information
  3269. // added to the MDL here is used to free the entire original PTE
  3270. // mapping range in one chunk so as not to fragment the PTE space.
  3271. //
  3272. Page = (PPFN_NUMBER)(Mdl + 1);
  3273. NewPage = Page;
  3274. Process = Mdl->Process;
  3275. MdlFlags = Mdl->MdlFlags;
  3276. if (Process != NULL) {
  3277. if ((MdlFlags & MDL_PAGES_LOCKED) &&
  3278. ((MdlFlags & MDL_IO_SPACE) == 0)) {
  3279. ASSERT ((MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
  3280. ASSERT ((SPFN_NUMBER)Process->NumberOfLockedPages >= 0);
  3281. InterlockedExchangeAddSizeT (&Process->NumberOfLockedPages,
  3282. 0 - PageCount);
  3283. }
  3284. if (MmTrackLockedPages == TRUE) {
  3285. MiUpdateMdlTracker (Mdl, PageCount);
  3286. }
  3287. }
  3288. LOCK_PFN2 (OldIrql);
  3289. for (i = 0; i < PageCount; i += 1) {
  3290. //
  3291. // Decrement the stale page frames now, this will unlock them
  3292. // resulting in them being immediately reused if necessary.
  3293. //
  3294. if ((MdlFlags & MDL_PAGES_LOCKED) &&
  3295. ((MdlFlags & MDL_IO_SPACE) == 0)) {
  3296. ASSERT ((MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
  3297. Pfn1 = MI_PFN_ELEMENT (*Page);
  3298. if (MdlFlags & MDL_WRITE_OPERATION) {
  3299. //
  3300. // If this was a write operation set the modified bit
  3301. // in the PFN database.
  3302. //
  3303. MI_SET_MODIFIED (Pfn1, 1, 0x3);
  3304. if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
  3305. (Pfn1->u3.e1.WriteInProgress == 0)) {
  3306. FreeBit = GET_PAGING_FILE_OFFSET (Pfn1->OriginalPte);
  3307. if ((FreeBit != 0) && (FreeBit != MI_PTE_LOOKUP_NEEDED)) {
  3308. MiReleaseConfirmedPageFileSpace (Pfn1->OriginalPte);
  3309. Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
  3310. }
  3311. }
  3312. }
  3313. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF(Pfn1, 1);
  3314. }
  3315. Page += 1;
  3316. }
  3317. UNLOCK_PFN2 (OldIrql);
  3318. //
  3319. // Now ripple the remaining pages to the front of the MDL, effectively
  3320. // purging the old ones which have just been released.
  3321. //
  3322. ASSERT (i < NumberOfPages);
  3323. for ( ; i < NumberOfPages; i += 1) {
  3324. if (*Page == MM_EMPTY_LIST) {
  3325. break;
  3326. }
  3327. *NewPage = *Page;
  3328. NewPage += 1;
  3329. Page += 1;
  3330. }
  3331. //
  3332. // If the MDL has been mapped, stash the number of pages advanced
  3333. // at the end of the frame list inside the MDL and mark the MDL as
  3334. // containing extra PTEs to free. Thus when the MDL is finally
  3335. // completely unmapped, this can be used so the entire original PTE
  3336. // mapping range can be freed in one chunk so as not to fragment the
  3337. // PTE space.
  3338. //
  3339. if (MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
  3340. #if DBG
  3341. InterlockedExchangeAddSizeT (&MiCurrentAdvancedPages, PageCount);
  3342. MiAdvancesGiven += PageCount;
  3343. #endif
  3344. if (MdlFlags & MDL_FREE_EXTRA_PTES) {
  3345. //
  3346. // This MDL has already been advanced at least once. Any
  3347. // PTEs from those advancements need to be preserved now.
  3348. //
  3349. ASSERT (*Page <= MiCurrentAdvancedPages - PageCount);
  3350. PageCount += *(PULONG)Page;
  3351. }
  3352. else {
  3353. Mdl->MdlFlags |= MDL_FREE_EXTRA_PTES;
  3354. }
  3355. *NewPage = PageCount;
  3356. }
  3357. }
  3358. return STATUS_SUCCESS;
  3359. }
  3360. NTKERNELAPI
  3361. NTSTATUS
  3362. MmProtectMdlSystemAddress (
  3363. IN PMDL MemoryDescriptorList,
  3364. IN ULONG NewProtect
  3365. )
  3366. /*++
  3367. Routine Description:
  3368. This function protects the system address range specified
  3369. by the argument Memory Descriptor List.
  3370. Note the caller must make this MDL mapping readwrite before finally
  3371. freeing (or reusing) it.
  3372. Arguments:
  3373. MemoryDescriptorList - Supplies the MDL describing the virtual range.
  3374. NewProtect - Supplies the protection to set the pages to (PAGE_XX).
  3375. Return Value:
  3376. NTSTATUS.
  3377. Environment:
  3378. Kernel mode, IRQL DISPATCH_LEVEL or below. The caller is responsible for
  3379. synchronizing access to this routine.
  3380. --*/
  3381. {
  3382. KIRQL OldIrql;
  3383. PVOID BaseAddress;
  3384. PVOID SystemVa;
  3385. MMPTE PteContents;
  3386. MMPTE JunkPte;
  3387. PMMPTE PointerPte;
  3388. ULONG ProtectionMask;
  3389. #if DBG
  3390. PMMPFN Pfn1;
  3391. PPFN_NUMBER Page;
  3392. #endif
  3393. PFN_NUMBER PageFrameIndex;
  3394. PFN_NUMBER NumberOfPages;
  3395. MMPTE_FLUSH_LIST PteFlushList;
  3396. MMPTE OriginalPte;
  3397. LOGICAL WasValid;
  3398. PMM_PTE_MAPPING Map;
  3399. PMM_PTE_MAPPING MapEntry;
  3400. PMM_PTE_MAPPING FoundMap;
  3401. PLIST_ENTRY NextEntry;
  3402. ASSERT (KeGetCurrentIrql () <= DISPATCH_LEVEL);
  3403. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PAGES_LOCKED) != 0);
  3404. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
  3405. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PARTIAL) == 0);
  3406. ASSERT (MemoryDescriptorList->ByteCount != 0);
  3407. if ((MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0) {
  3408. return STATUS_NOT_MAPPED_VIEW;
  3409. }
  3410. BaseAddress = MemoryDescriptorList->MappedSystemVa;
  3411. ASSERT (BaseAddress > MM_HIGHEST_USER_ADDRESS);
  3412. ASSERT (!MI_IS_PHYSICAL_ADDRESS (BaseAddress));
  3413. ProtectionMask = MiMakeProtectionMask (NewProtect);
  3414. //
  3415. // No bogus or copy-on-write protections allowed for these.
  3416. //
  3417. if ((ProtectionMask == MM_INVALID_PROTECTION) ||
  3418. (ProtectionMask == MM_GUARD_PAGE) ||
  3419. (ProtectionMask == MM_DECOMMIT) ||
  3420. (ProtectionMask == MM_NOCACHE) ||
  3421. (ProtectionMask == MM_WRITECOPY) ||
  3422. (ProtectionMask == MM_EXECUTE_WRITECOPY)) {
  3423. return STATUS_INVALID_PAGE_PROTECTION;
  3424. }
  3425. PointerPte = MiGetPteAddress (BaseAddress);
  3426. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (BaseAddress,
  3427. MemoryDescriptorList->ByteCount);
  3428. SystemVa = PAGE_ALIGN (BaseAddress);
  3429. //
  3430. // Initializing Map is not needed for correctness
  3431. // but without it the compiler cannot compile this code
  3432. // W4 to check for use of uninitialized variables.
  3433. //
  3434. Map = NULL;
  3435. if (ProtectionMask != MM_READWRITE) {
  3436. Map = ExAllocatePoolWithTag (NonPagedPool,
  3437. sizeof(MM_PTE_MAPPING),
  3438. 'mPmM');
  3439. if (Map == NULL) {
  3440. return STATUS_INSUFFICIENT_RESOURCES;
  3441. }
  3442. Map->SystemVa = SystemVa;
  3443. Map->SystemEndVa = (PVOID)((ULONG_PTR)SystemVa + (NumberOfPages << PAGE_SHIFT));
  3444. Map->Protection = ProtectionMask;
  3445. }
  3446. #if DBG
  3447. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  3448. #endif
  3449. PteFlushList.Count = 0;
  3450. while (NumberOfPages != 0) {
  3451. PteContents = *PointerPte;
  3452. if (PteContents.u.Hard.Valid == 1) {
  3453. WasValid = TRUE;
  3454. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents);
  3455. OriginalPte = PteContents;
  3456. }
  3457. else if ((PteContents.u.Soft.Transition == 1) &&
  3458. (PteContents.u.Soft.Protection == MM_NOACCESS)) {
  3459. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&PteContents);
  3460. WasValid = FALSE;
  3461. #if defined(_IA64_)
  3462. OriginalPte.u.Hard.Cache = PteContents.u.Trans.Rsvd0;
  3463. #else
  3464. OriginalPte.u.Hard.WriteThrough = PteContents.u.Soft.PageFileLow;
  3465. OriginalPte.u.Hard.CacheDisable = (PteContents.u.Soft.PageFileLow >> 1);
  3466. #endif
  3467. }
  3468. else {
  3469. KeBugCheckEx (MEMORY_MANAGEMENT,
  3470. 0x1235,
  3471. (ULONG_PTR)MemoryDescriptorList,
  3472. (ULONG_PTR)PointerPte,
  3473. (ULONG_PTR)PteContents.u.Long);
  3474. }
  3475. #if DBG
  3476. ASSERT (*Page == PageFrameIndex);
  3477. if ((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0) {
  3478. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  3479. ASSERT (Pfn1->u3.e2.ReferenceCount != 0);
  3480. }
  3481. Page += 1;
  3482. #endif
  3483. if (ProtectionMask == MM_NOACCESS) {
  3484. //
  3485. // To generate a bugcheck on bogus access: Prototype must stay
  3486. // clear, transition must stay set, protection must stay NO_ACCESS.
  3487. //
  3488. MI_MAKE_VALID_PTE_TRANSITION (PteContents, MM_NOACCESS);
  3489. //
  3490. // Stash the cache attributes into the software PTE so they can
  3491. // be restored later.
  3492. //
  3493. #if defined(_IA64_)
  3494. PteContents.u.Trans.Rsvd0 = OriginalPte.u.Hard.Cache;
  3495. #else
  3496. PteContents.u.Soft.PageFileLow = OriginalPte.u.Hard.WriteThrough;
  3497. PteContents.u.Soft.PageFileLow |= (OriginalPte.u.Hard.CacheDisable << 1);
  3498. #endif
  3499. }
  3500. else {
  3501. MI_MAKE_VALID_PTE (PteContents,
  3502. PageFrameIndex,
  3503. ProtectionMask,
  3504. PointerPte);
  3505. if (ProtectionMask & MM_READWRITE) {
  3506. MI_SET_PTE_DIRTY (PteContents);
  3507. }
  3508. //
  3509. // Extract cache type from the original PTE so it can be preserved.
  3510. // Note that since we only allow protection changes (not caching
  3511. // attribute changes), there is no need to flush or sweep TBs on
  3512. // insertion below.
  3513. //
  3514. #if defined(_IA64_)
  3515. PteContents.u.Hard.Cache = OriginalPte.u.Hard.Cache;
  3516. #else
  3517. PteContents.u.Hard.WriteThrough = OriginalPte.u.Hard.WriteThrough;
  3518. PteContents.u.Hard.CacheDisable = OriginalPte.u.Hard.CacheDisable;
  3519. #endif
  3520. }
  3521. *PointerPte = PteContents;
  3522. if ((WasValid == TRUE) &&
  3523. (PteFlushList.Count != MM_MAXIMUM_FLUSH_COUNT)) {
  3524. PteFlushList.FlushVa[PteFlushList.Count] = BaseAddress;
  3525. PteFlushList.FlushPte[PteFlushList.Count] = &JunkPte;
  3526. PteFlushList.Count += 1;
  3527. }
  3528. BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
  3529. PointerPte += 1;
  3530. NumberOfPages -= 1;
  3531. }
  3532. //
  3533. // Flush the TB entries for any relevant pages. Note the ZeroPte is
  3534. // not written to the actual PTEs as they have already been set above.
  3535. //
  3536. if (PteFlushList.Count != 0) {
  3537. MiFlushPteList (&PteFlushList, FALSE, ZeroPte);
  3538. }
  3539. if (ProtectionMask != MM_READWRITE) {
  3540. //
  3541. // Insert (or update) the list entry describing this range.
  3542. // Don't bother sorting the list as there will never be many entries.
  3543. //
  3544. FoundMap = NULL;
  3545. OldIrql = KeAcquireSpinLockRaiseToSynch (&MmProtectedPteLock);
  3546. NextEntry = MmProtectedPteList.Flink;
  3547. while (NextEntry != &MmProtectedPteList) {
  3548. MapEntry = CONTAINING_RECORD (NextEntry,
  3549. MM_PTE_MAPPING,
  3550. ListEntry);
  3551. if (MapEntry->SystemVa == SystemVa) {
  3552. ASSERT (MapEntry->SystemEndVa == Map->SystemEndVa);
  3553. MapEntry->Protection = Map->Protection;
  3554. FoundMap = MapEntry;
  3555. break;
  3556. }
  3557. NextEntry = NextEntry->Flink;
  3558. }
  3559. if (FoundMap == NULL) {
  3560. InsertHeadList (&MmProtectedPteList, &Map->ListEntry);
  3561. }
  3562. KeReleaseSpinLock (&MmProtectedPteLock, OldIrql);
  3563. if (FoundMap != NULL) {
  3564. ExFreePool (Map);
  3565. }
  3566. }
  3567. else {
  3568. //
  3569. // If there is an existing list entry describing this range, remove it.
  3570. //
  3571. if (!IsListEmpty (&MmProtectedPteList)) {
  3572. FoundMap = NULL;
  3573. OldIrql = KeAcquireSpinLockRaiseToSynch (&MmProtectedPteLock);
  3574. NextEntry = MmProtectedPteList.Flink;
  3575. while (NextEntry != &MmProtectedPteList) {
  3576. MapEntry = CONTAINING_RECORD (NextEntry,
  3577. MM_PTE_MAPPING,
  3578. ListEntry);
  3579. if (MapEntry->SystemVa == SystemVa) {
  3580. RemoveEntryList (NextEntry);
  3581. FoundMap = MapEntry;
  3582. break;
  3583. }
  3584. NextEntry = NextEntry->Flink;
  3585. }
  3586. KeReleaseSpinLock (&MmProtectedPteLock, OldIrql);
  3587. if (FoundMap != NULL) {
  3588. ExFreePool (FoundMap);
  3589. }
  3590. }
  3591. }
  3592. ASSERT (MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
  3593. return STATUS_SUCCESS;
  3594. }
  3595. LOGICAL
  3596. MiCheckSystemPteProtection (
  3597. IN ULONG_PTR StoreInstruction,
  3598. IN PVOID VirtualAddress
  3599. )
  3600. /*++
  3601. Routine Description:
  3602. This function determines whether the faulting virtual address lies
  3603. within the non-writable alternate system PTE mappings.
  3604. Arguments:
  3605. StoreInstruction - Supplies nonzero if the operation causes a write into
  3606. memory, zero if not.
  3607. VirtualAddress - Supplies the virtual address which caused the fault.
  3608. Return Value:
  3609. TRUE if the fault was handled by this code (and PTE updated), FALSE if not.
  3610. Environment:
  3611. Kernel mode. Called from the fault handler at any IRQL.
  3612. --*/
  3613. {
  3614. KIRQL OldIrql;
  3615. PMMPTE PointerPte;
  3616. ULONG ProtectionCode;
  3617. PLIST_ENTRY NextEntry;
  3618. PMM_PTE_MAPPING MapEntry;
  3619. //
  3620. // If PTE mappings with various protections are active and the faulting
  3621. // address lies within these mappings, resolve the fault with
  3622. // the appropriate protections.
  3623. //
  3624. if (IsListEmpty (&MmProtectedPteList)) {
  3625. return FALSE;
  3626. }
  3627. OldIrql = KeAcquireSpinLockRaiseToSynch (&MmProtectedPteLock);
  3628. NextEntry = MmProtectedPteList.Flink;
  3629. while (NextEntry != &MmProtectedPteList) {
  3630. MapEntry = CONTAINING_RECORD (NextEntry,
  3631. MM_PTE_MAPPING,
  3632. ListEntry);
  3633. if ((VirtualAddress >= MapEntry->SystemVa) &&
  3634. (VirtualAddress < MapEntry->SystemEndVa)) {
  3635. ProtectionCode = MapEntry->Protection;
  3636. KeReleaseSpinLock (&MmProtectedPteLock, OldIrql);
  3637. PointerPte = MiGetPteAddress (VirtualAddress);
  3638. if (StoreInstruction != 0) {
  3639. if ((ProtectionCode & MM_READWRITE) == 0) {
  3640. KeBugCheckEx (ATTEMPTED_WRITE_TO_READONLY_MEMORY,
  3641. (ULONG_PTR)VirtualAddress,
  3642. (ULONG_PTR)PointerPte->u.Long,
  3643. 0,
  3644. 16);
  3645. }
  3646. }
  3647. MI_NO_FAULT_FOUND (StoreInstruction,
  3648. PointerPte,
  3649. VirtualAddress,
  3650. FALSE);
  3651. //
  3652. // Fault was handled directly here, no need for the caller to
  3653. // do anything.
  3654. //
  3655. return TRUE;
  3656. }
  3657. NextEntry = NextEntry->Flink;
  3658. }
  3659. KeReleaseSpinLock (&MmProtectedPteLock, OldIrql);
  3660. return FALSE;
  3661. }
  3662. VOID
  3663. MiPhysicalViewInserter (
  3664. IN PEPROCESS Process,
  3665. IN PMI_PHYSICAL_VIEW PhysicalView
  3666. )
  3667. /*++
  3668. Routine Description:
  3669. This function is a nonpaged wrapper which acquires the PFN lock to insert
  3670. a physical VAD into the process chain.
  3671. Arguments:
  3672. Process - Supplies the process to add the physical VAD to.
  3673. PhysicalView - Supplies the physical view data to link in.
  3674. Return Value:
  3675. None.
  3676. Environment:
  3677. Kernel mode. APC_LEVEL, working set and address space mutexes held.
  3678. --*/
  3679. {
  3680. KIRQL OldIrql;
  3681. MmLockPagableSectionByHandle (ExPageLockHandle);
  3682. LOCK_PFN (OldIrql);
  3683. InsertTailList (&Process->PhysicalVadList, &PhysicalView->ListEntry);
  3684. if (PhysicalView->Vad->u.VadFlags.WriteWatch == 1) {
  3685. MiActiveWriteWatch += 1;
  3686. }
  3687. if (PhysicalView->Vad->u.VadFlags.PhysicalMapping == 1) {
  3688. PS_SET_BITS (&Process->Flags, PS_PROCESS_FLAGS_HAS_PHYSICAL_VAD);
  3689. }
  3690. UNLOCK_PFN (OldIrql);
  3691. if (PhysicalView->Vad->u.VadFlags.WriteWatch == 1) {
  3692. //
  3693. // Mark this process as forever containing write-watch
  3694. // address space(s).
  3695. //
  3696. if ((Process->Flags & PS_PROCESS_FLAGS_USING_WRITE_WATCH) == 0) {
  3697. PS_SET_BITS (&Process->Flags, PS_PROCESS_FLAGS_USING_WRITE_WATCH);
  3698. }
  3699. }
  3700. MmUnlockPagableImageSection (ExPageLockHandle);
  3701. }
  3702. VOID
  3703. MiPhysicalViewRemover (
  3704. IN PEPROCESS Process,
  3705. IN PMMVAD Vad
  3706. )
  3707. /*++
  3708. Routine Description:
  3709. This function is a nonpaged wrapper which acquires the PFN lock to remove
  3710. a physical VAD from the process chain.
  3711. Arguments:
  3712. Process - Supplies the process to remove the physical VAD from.
  3713. Vad - Supplies the Vad to remove.
  3714. Return Value:
  3715. None.
  3716. Environment:
  3717. Kernel mode, APC_LEVEL, working set and address space mutexes held.
  3718. --*/
  3719. {
  3720. KIRQL OldIrql;
  3721. PRTL_BITMAP BitMap;
  3722. PLIST_ENTRY NextEntry;
  3723. PMI_PHYSICAL_VIEW PhysicalView;
  3724. ULONG BitMapSize;
  3725. ULONG PhysicalVadCount;
  3726. BitMap = NULL;
  3727. PhysicalVadCount = 0;
  3728. LOCK_PFN (OldIrql);
  3729. NextEntry = Process->PhysicalVadList.Flink;
  3730. while (NextEntry != &Process->PhysicalVadList) {
  3731. PhysicalView = CONTAINING_RECORD(NextEntry,
  3732. MI_PHYSICAL_VIEW,
  3733. ListEntry);
  3734. if (PhysicalView->Vad == Vad) {
  3735. RemoveEntryList (NextEntry);
  3736. if (Vad->u.VadFlags.WriteWatch == 1) {
  3737. MiActiveWriteWatch -= 1;
  3738. BitMap = PhysicalView->u.BitMap;
  3739. ASSERT (BitMap != NULL);
  3740. }
  3741. else if (Vad->u.VadFlags.PhysicalMapping == 1) {
  3742. ASSERT (Process->Flags & PS_PROCESS_FLAGS_HAS_PHYSICAL_VAD);
  3743. //
  3744. // If this might be the last physical VAD, scan the rest to
  3745. // see. If so, then mark the process as no longer having
  3746. // any so probe and locks can execute faster.
  3747. //
  3748. if (PhysicalVadCount == 0) {
  3749. NextEntry = NextEntry->Flink;
  3750. while (NextEntry != &Process->PhysicalVadList) {
  3751. if (Vad->u.VadFlags.PhysicalMapping == 1) {
  3752. PhysicalVadCount += 1;
  3753. break;
  3754. }
  3755. NextEntry = NextEntry->Flink;
  3756. }
  3757. if (PhysicalVadCount == 0) {
  3758. PS_CLEAR_BITS (&Process->Flags, PS_PROCESS_FLAGS_HAS_PHYSICAL_VAD);
  3759. }
  3760. }
  3761. }
  3762. UNLOCK_PFN (OldIrql);
  3763. ExFreePool (PhysicalView);
  3764. if (BitMap != NULL) {
  3765. BitMapSize = sizeof(RTL_BITMAP) + (ULONG)(((BitMap->SizeOfBitMap + 31) / 32) * 4);
  3766. PsReturnProcessNonPagedPoolQuota (Process, BitMapSize);
  3767. ExFreePool (BitMap);
  3768. }
  3769. return;
  3770. }
  3771. if (Vad->u.VadFlags.PhysicalMapping == 1) {
  3772. PhysicalVadCount += 1;
  3773. }
  3774. NextEntry = NextEntry->Flink;
  3775. }
  3776. ASSERT (FALSE);
  3777. UNLOCK_PFN (OldIrql);
  3778. }
  3779. VOID
  3780. MiPhysicalViewAdjuster (
  3781. IN PEPROCESS Process,
  3782. IN PMMVAD OldVad,
  3783. IN PMMVAD NewVad
  3784. )
  3785. /*++
  3786. Routine Description:
  3787. This function is a nonpaged wrapper which acquires the PFN lock to repoint
  3788. a physical VAD in the process chain.
  3789. Arguments:
  3790. Process - Supplies the process in which to adjust the physical VAD.
  3791. Vad - Supplies the old Vad to replace.
  3792. NewVad - Supplies the newVad to substitute.
  3793. Return Value:
  3794. None.
  3795. Environment:
  3796. Kernel mode, called with APCs disabled, working set mutex held.
  3797. --*/
  3798. {
  3799. KIRQL OldIrql;
  3800. PLIST_ENTRY NextEntry;
  3801. PMI_PHYSICAL_VIEW PhysicalView;
  3802. MmLockPagableSectionByHandle (ExPageLockHandle);
  3803. LOCK_PFN (OldIrql);
  3804. NextEntry = Process->PhysicalVadList.Flink;
  3805. while (NextEntry != &Process->PhysicalVadList) {
  3806. PhysicalView = CONTAINING_RECORD(NextEntry,
  3807. MI_PHYSICAL_VIEW,
  3808. ListEntry);
  3809. if (PhysicalView->Vad == OldVad) {
  3810. PhysicalView->Vad = NewVad;
  3811. UNLOCK_PFN (OldIrql);
  3812. MmUnlockPagableImageSection (ExPageLockHandle);
  3813. return;
  3814. }
  3815. NextEntry = NextEntry->Flink;
  3816. }
  3817. ASSERT (FALSE);
  3818. UNLOCK_PFN (OldIrql);
  3819. MmUnlockPagableImageSection (ExPageLockHandle);
  3820. }
  3821. PVOID
  3822. MiMapLockedPagesInUserSpace (
  3823. IN PMDL MemoryDescriptorList,
  3824. IN PVOID StartingVa,
  3825. IN MEMORY_CACHING_TYPE CacheType,
  3826. IN PVOID BaseVa
  3827. )
  3828. /*++
  3829. Routine Description:
  3830. This function maps physical pages described by a memory descriptor
  3831. list into the user portion of the virtual address space.
  3832. Arguments:
  3833. MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
  3834. been updated by MmProbeAndLockPages.
  3835. StartingVa - Supplies the starting address.
  3836. CacheType - Supplies the type of cache mapping to use for the MDL.
  3837. MmCached indicates "normal" user mappings.
  3838. BaseVa - Supplies the base address of the view. If the initial
  3839. value of this argument is not null, then the view will
  3840. be allocated starting at the specified virtual
  3841. address rounded down to the next 64kb address
  3842. boundary. If the initial value of this argument is
  3843. null, then the operating system will determine
  3844. where to allocate the view.
  3845. Return Value:
  3846. Returns the base address where the pages are mapped. The base address
  3847. has the same offset as the virtual address in the MDL.
  3848. This routine will raise an exception if quota limits or VM limits are
  3849. exceeded.
  3850. Environment:
  3851. Kernel mode. APC_LEVEL or below.
  3852. --*/
  3853. {
  3854. CSHORT IoMapping;
  3855. PFN_NUMBER NumberOfPages;
  3856. PFN_NUMBER SavedPageCount;
  3857. PFN_NUMBER PageFrameIndex;
  3858. PPFN_NUMBER Page;
  3859. PMMPTE PointerPte;
  3860. PMMPTE PointerPde;
  3861. PCHAR Va;
  3862. MMPTE TempPte;
  3863. PVOID EndingAddress;
  3864. PMMVAD_LONG Vad;
  3865. PEPROCESS Process;
  3866. PMMPFN Pfn2;
  3867. PVOID UsedPageTableHandle;
  3868. PMI_PHYSICAL_VIEW PhysicalView;
  3869. NTSTATUS Status;
  3870. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  3871. PAGED_CODE ();
  3872. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  3873. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingVa,
  3874. MemoryDescriptorList->ByteCount);
  3875. //
  3876. // If a noncachable mapping is requested, none of the pages in the
  3877. // requested MDL can reside in a large page. Otherwise we would be
  3878. // creating an incoherent overlapping TB entry as the same physical
  3879. // page would be mapped by 2 different TB entries with different
  3880. // cache attributes.
  3881. //
  3882. IoMapping = MemoryDescriptorList->MdlFlags & MDL_IO_SPACE;
  3883. CacheAttribute = MI_TRANSLATE_CACHETYPE (CacheType, IoMapping);
  3884. if (CacheAttribute != MiCached) {
  3885. SavedPageCount = NumberOfPages;
  3886. do {
  3887. if (*Page == MM_EMPTY_LIST) {
  3888. break;
  3889. }
  3890. PageFrameIndex = *Page;
  3891. if (MI_PAGE_FRAME_INDEX_MUST_BE_CACHED (PageFrameIndex)) {
  3892. MiNonCachedCollisions += 1;
  3893. ExRaiseStatus (STATUS_INVALID_ADDRESS);
  3894. return NULL;
  3895. }
  3896. Page += 1;
  3897. NumberOfPages -= 1;
  3898. } while (NumberOfPages != 0);
  3899. NumberOfPages = SavedPageCount;
  3900. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  3901. }
  3902. //
  3903. // Map the pages into the user part of the address as user
  3904. // read/write no-delete.
  3905. //
  3906. Vad = ExAllocatePoolWithTag (NonPagedPool, sizeof(MMVAD_LONG), 'ldaV');
  3907. if (Vad == NULL) {
  3908. ExRaiseStatus (STATUS_INSUFFICIENT_RESOURCES);
  3909. return NULL;
  3910. }
  3911. PhysicalView = (PMI_PHYSICAL_VIEW)ExAllocatePoolWithTag (NonPagedPool,
  3912. sizeof(MI_PHYSICAL_VIEW),
  3913. MI_PHYSICAL_VIEW_KEY);
  3914. if (PhysicalView == NULL) {
  3915. ExFreePool (Vad);
  3916. ExRaiseStatus (STATUS_INSUFFICIENT_RESOURCES);
  3917. return NULL;
  3918. }
  3919. RtlZeroMemory (Vad, sizeof (MMVAD_LONG));
  3920. ASSERT (Vad->ControlArea == NULL);
  3921. ASSERT (Vad->FirstPrototypePte == NULL);
  3922. ASSERT (Vad->u.LongFlags == 0);
  3923. Vad->u.VadFlags.Protection = MM_READWRITE;
  3924. Vad->u.VadFlags.PhysicalMapping = 1;
  3925. Vad->u.VadFlags.PrivateMemory = 1;
  3926. Vad->u2.VadFlags2.LongVad = 1;
  3927. PhysicalView->Vad = (PMMVAD) Vad;
  3928. PhysicalView->u.LongFlags = MI_PHYSICAL_VIEW_PHYS;
  3929. Process = PsGetCurrentProcess ();
  3930. //
  3931. // Make sure the specified starting and ending addresses are
  3932. // within the user part of the virtual address space.
  3933. //
  3934. if (BaseVa != NULL) {
  3935. if (BYTE_OFFSET (BaseVa) != 0) {
  3936. //
  3937. // Invalid base address.
  3938. //
  3939. Status = STATUS_INVALID_ADDRESS;
  3940. goto ErrorReturn;
  3941. }
  3942. EndingAddress = (PVOID)((PCHAR)BaseVa + ((ULONG_PTR)NumberOfPages * PAGE_SIZE) - 1);
  3943. if ((EndingAddress <= BaseVa) || (EndingAddress > MM_HIGHEST_VAD_ADDRESS)) {
  3944. //
  3945. // Invalid region size.
  3946. //
  3947. Status = STATUS_INVALID_ADDRESS;
  3948. goto ErrorReturn;
  3949. }
  3950. LOCK_ADDRESS_SPACE (Process);
  3951. //
  3952. // Make sure the address space was not deleted, if so, return an error.
  3953. //
  3954. if (Process->Flags & PS_PROCESS_FLAGS_VM_DELETED) {
  3955. UNLOCK_ADDRESS_SPACE (Process);
  3956. Status = STATUS_PROCESS_IS_TERMINATING;
  3957. goto ErrorReturn;
  3958. }
  3959. //
  3960. // Make sure the address space is not already in use.
  3961. //
  3962. if (MiCheckForConflictingVadExistence (Process, BaseVa, EndingAddress) == TRUE) {
  3963. UNLOCK_ADDRESS_SPACE (Process);
  3964. Status = STATUS_CONFLICTING_ADDRESSES;
  3965. goto ErrorReturn;
  3966. }
  3967. }
  3968. else {
  3969. //
  3970. // Get the address creation mutex.
  3971. //
  3972. LOCK_ADDRESS_SPACE (Process);
  3973. //
  3974. // Make sure the address space was not deleted, if so, return an error.
  3975. //
  3976. if (Process->Flags & PS_PROCESS_FLAGS_VM_DELETED) {
  3977. UNLOCK_ADDRESS_SPACE (Process);
  3978. Status = STATUS_PROCESS_IS_TERMINATING;
  3979. goto ErrorReturn;
  3980. }
  3981. Status = MiFindEmptyAddressRange ((ULONG_PTR)NumberOfPages * PAGE_SIZE,
  3982. X64K,
  3983. 0,
  3984. &BaseVa);
  3985. if (!NT_SUCCESS (Status)) {
  3986. UNLOCK_ADDRESS_SPACE (Process);
  3987. goto ErrorReturn;
  3988. }
  3989. EndingAddress = (PVOID)((PCHAR)BaseVa + ((ULONG_PTR)NumberOfPages * PAGE_SIZE) - 1);
  3990. }
  3991. PhysicalView->StartVa = BaseVa;
  3992. PhysicalView->EndVa = EndingAddress;
  3993. Vad->StartingVpn = MI_VA_TO_VPN (BaseVa);
  3994. Vad->EndingVpn = MI_VA_TO_VPN (EndingAddress);
  3995. LOCK_WS_UNSAFE (Process);
  3996. Status = MiInsertVad ((PMMVAD) Vad);
  3997. if (!NT_SUCCESS(Status)) {
  3998. UNLOCK_WS_AND_ADDRESS_SPACE (Process);
  3999. goto ErrorReturn;
  4000. }
  4001. //
  4002. // The VAD has been inserted, but the physical view descriptor cannot
  4003. // be until the page table page hierarchy is in place. This is to
  4004. // prevent races with probes.
  4005. //
  4006. //
  4007. // Create a page table and fill in the mappings for the Vad.
  4008. //
  4009. Va = BaseVa;
  4010. PointerPte = MiGetPteAddress (BaseVa);
  4011. MI_PREPARE_FOR_NONCACHED (CacheAttribute);
  4012. do {
  4013. if (*Page == MM_EMPTY_LIST) {
  4014. break;
  4015. }
  4016. PointerPde = MiGetPteAddress (PointerPte);
  4017. MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE);
  4018. ASSERT (PointerPte->u.Hard.Valid == 0);
  4019. //
  4020. // Another zeroed PTE is being made non-zero.
  4021. //
  4022. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (Va);
  4023. MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  4024. TempPte = ValidUserPte;
  4025. TempPte.u.Hard.PageFrameNumber = *Page;
  4026. if (IoMapping == 0) {
  4027. Pfn2 = MI_PFN_ELEMENT (*Page);
  4028. ASSERT (Pfn2->u3.e2.ReferenceCount != 0);
  4029. switch (Pfn2->u3.e1.CacheAttribute) {
  4030. case MiCached:
  4031. if (CacheAttribute != MiCached) {
  4032. //
  4033. // The caller asked for a noncached or writecombined
  4034. // mapping, but the page is already mapped cached by
  4035. // someone else. Override the caller's request in
  4036. // order to keep the TB page attribute coherent.
  4037. //
  4038. MiCacheOverride[0] += 1;
  4039. }
  4040. break;
  4041. case MiNonCached:
  4042. if (CacheAttribute != MiNonCached) {
  4043. //
  4044. // The caller asked for a cached or writecombined
  4045. // mapping, but the page is already mapped noncached
  4046. // by someone else. Override the caller's request
  4047. // in order to keep the TB page attribute coherent.
  4048. //
  4049. MiCacheOverride[1] += 1;
  4050. }
  4051. MI_DISABLE_CACHING (TempPte);
  4052. break;
  4053. case MiWriteCombined:
  4054. if (CacheAttribute != MiWriteCombined) {
  4055. //
  4056. // The caller asked for a cached or noncached
  4057. // mapping, but the page is already mapped
  4058. // writecombined by someone else. Override the
  4059. // caller's request in order to keep the TB page
  4060. // attribute coherent.
  4061. //
  4062. MiCacheOverride[2] += 1;
  4063. }
  4064. MI_SET_PTE_WRITE_COMBINE (TempPte);
  4065. break;
  4066. case MiNotMapped:
  4067. //
  4068. // This better be for a page allocated with
  4069. // MmAllocatePagesForMdl. Otherwise it might be a
  4070. // page on the freelist which could subsequently be
  4071. // given out with a different attribute !
  4072. //
  4073. ASSERT ((Pfn2->u4.PteFrame == MI_MAGIC_AWE_PTEFRAME) ||
  4074. (Pfn2->PteAddress == (PVOID) (ULONG_PTR)(X64K | 0x1)));
  4075. switch (CacheAttribute) {
  4076. case MiCached:
  4077. Pfn2->u3.e1.CacheAttribute = MiCached;
  4078. break;
  4079. case MiNonCached:
  4080. Pfn2->u3.e1.CacheAttribute = MiNonCached;
  4081. MI_DISABLE_CACHING (TempPte);
  4082. break;
  4083. case MiWriteCombined:
  4084. Pfn2->u3.e1.CacheAttribute = MiWriteCombined;
  4085. MI_SET_PTE_WRITE_COMBINE (TempPte);
  4086. break;
  4087. default:
  4088. ASSERT (FALSE);
  4089. break;
  4090. }
  4091. break;
  4092. default:
  4093. ASSERT (FALSE);
  4094. break;
  4095. }
  4096. }
  4097. else {
  4098. switch (CacheAttribute) {
  4099. case MiCached:
  4100. break;
  4101. case MiNonCached:
  4102. MI_DISABLE_CACHING (TempPte);
  4103. break;
  4104. case MiWriteCombined:
  4105. MI_SET_PTE_WRITE_COMBINE (TempPte);
  4106. break;
  4107. default:
  4108. ASSERT (FALSE);
  4109. break;
  4110. }
  4111. }
  4112. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  4113. //
  4114. // A PTE just went from not present, not transition to
  4115. // present. The share count and valid count must be
  4116. // updated in the page table page which contains this PTE.
  4117. //
  4118. Pfn2 = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber);
  4119. Pfn2->u2.ShareCount += 1;
  4120. Page += 1;
  4121. PointerPte += 1;
  4122. NumberOfPages -= 1;
  4123. Va += PAGE_SIZE;
  4124. } while (NumberOfPages != 0);
  4125. MI_SWEEP_CACHE (CacheAttribute, BaseVa, MemoryDescriptorList->ByteCount);
  4126. //
  4127. // Insert the physical view descriptor now that the page table page
  4128. // hierarchy is in place. Note probes can find this descriptor immediately.
  4129. //
  4130. MiPhysicalViewInserter (Process, PhysicalView);
  4131. UNLOCK_WS_AND_ADDRESS_SPACE (Process);
  4132. ASSERT (BaseVa != NULL);
  4133. BaseVa = (PVOID)((PCHAR)BaseVa + MemoryDescriptorList->ByteOffset);
  4134. return BaseVa;
  4135. ErrorReturn:
  4136. ExFreePool (Vad);
  4137. ExFreePool (PhysicalView);
  4138. ExRaiseStatus (Status);
  4139. return NULL;
  4140. }
  4141. VOID
  4142. MmUnmapLockedPages (
  4143. IN PVOID BaseAddress,
  4144. IN PMDL MemoryDescriptorList
  4145. )
  4146. /*++
  4147. Routine Description:
  4148. This routine unmaps locked pages which were previously mapped via
  4149. a MmMapLockedPages call.
  4150. Arguments:
  4151. BaseAddress - Supplies the base address where the pages were previously
  4152. mapped.
  4153. MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
  4154. been updated by MmProbeAndLockPages.
  4155. Return Value:
  4156. None.
  4157. Environment:
  4158. Kernel mode. DISPATCH_LEVEL or below if base address is within
  4159. system space; APC_LEVEL or below if base address is user space.
  4160. Note that in some instances the PFN lock is held by the caller.
  4161. --*/
  4162. {
  4163. PFN_NUMBER NumberOfPages;
  4164. PMMPTE PointerBase;
  4165. PVOID StartingVa;
  4166. PPFN_NUMBER Page;
  4167. #if DBG
  4168. PMMPTE PointerPte;
  4169. PFN_NUMBER i;
  4170. #endif
  4171. ASSERT (MemoryDescriptorList->ByteCount != 0);
  4172. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
  4173. ASSERT (!MI_IS_PHYSICAL_ADDRESS (BaseAddress));
  4174. if (BaseAddress > MM_HIGHEST_USER_ADDRESS) {
  4175. StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
  4176. MemoryDescriptorList->ByteOffset);
  4177. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingVa,
  4178. MemoryDescriptorList->ByteCount);
  4179. PointerBase = MiGetPteAddress (BaseAddress);
  4180. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) != 0);
  4181. #if DBG
  4182. PointerPte = PointerBase;
  4183. i = NumberOfPages;
  4184. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  4185. while (i != 0) {
  4186. ASSERT (PointerPte->u.Hard.Valid == 1);
  4187. ASSERT (*Page == MI_GET_PAGE_FRAME_FROM_PTE (PointerPte));
  4188. if ((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0) {
  4189. PMMPFN Pfn3;
  4190. Pfn3 = MI_PFN_ELEMENT (*Page);
  4191. ASSERT (Pfn3->u3.e2.ReferenceCount != 0);
  4192. }
  4193. Page += 1;
  4194. PointerPte += 1;
  4195. i -= 1;
  4196. }
  4197. #endif
  4198. if (MemoryDescriptorList->MdlFlags & MDL_FREE_EXTRA_PTES) {
  4199. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  4200. Page += NumberOfPages;
  4201. ASSERT (*Page <= MiCurrentAdvancedPages);
  4202. NumberOfPages += *Page;
  4203. PointerBase -= *Page;
  4204. #if DBG
  4205. InterlockedExchangeAddSizeT (&MiCurrentAdvancedPages, 0 - *Page);
  4206. MiAdvancesFreed += *Page;
  4207. #endif
  4208. }
  4209. if (MmTrackPtes & 0x1) {
  4210. MiRemovePteTracker (MemoryDescriptorList,
  4211. PointerBase,
  4212. NumberOfPages);
  4213. }
  4214. MiReleaseSystemPtes (PointerBase, (ULONG)NumberOfPages, SystemPteSpace);
  4215. MemoryDescriptorList->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
  4216. MDL_PARTIAL_HAS_BEEN_MAPPED |
  4217. MDL_FREE_EXTRA_PTES);
  4218. return;
  4219. }
  4220. MiUnmapLockedPagesInUserSpace (BaseAddress,
  4221. MemoryDescriptorList);
  4222. }
  4223. VOID
  4224. MiUnmapLockedPagesInUserSpace (
  4225. IN PVOID BaseAddress,
  4226. IN PMDL MemoryDescriptorList
  4227. )
  4228. /*++
  4229. Routine Description:
  4230. This routine unmaps locked pages which were previously mapped via
  4231. a MmMapLockedPages function.
  4232. Arguments:
  4233. BaseAddress - Supplies the base address where the pages were previously
  4234. mapped.
  4235. MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
  4236. been updated by MmProbeAndLockPages.
  4237. Return Value:
  4238. None.
  4239. Environment:
  4240. Kernel mode. DISPATCH_LEVEL or below if base address is within system
  4241. space, APC_LEVEL or below if base address is in user space.
  4242. --*/
  4243. {
  4244. PFN_NUMBER NumberOfPages;
  4245. PPFN_NUMBER Page;
  4246. PMMPTE PointerPte;
  4247. PMMPTE PointerPde;
  4248. #if (_MI_PAGING_LEVELS >= 3)
  4249. PMMPTE PointerPpe;
  4250. #endif
  4251. #if (_MI_PAGING_LEVELS >= 4)
  4252. PMMPTE PointerPxe;
  4253. #endif
  4254. PVOID StartingVa;
  4255. KIRQL OldIrql;
  4256. PMMVAD Vad;
  4257. PMMVAD PreviousVad;
  4258. PMMVAD NextVad;
  4259. PVOID TempVa;
  4260. PEPROCESS Process;
  4261. PMMPFN PageTablePfn;
  4262. PFN_NUMBER PageTablePage;
  4263. PVOID UsedPageTableHandle;
  4264. MmLockPagableSectionByHandle (ExPageLockHandle);
  4265. StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
  4266. MemoryDescriptorList->ByteOffset);
  4267. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  4268. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingVa,
  4269. MemoryDescriptorList->ByteCount);
  4270. ASSERT (NumberOfPages != 0);
  4271. PointerPte = MiGetPteAddress (BaseAddress);
  4272. PointerPde = MiGetPdeAddress (BaseAddress);
  4273. //
  4274. // This was mapped into the user portion of the address space and
  4275. // the corresponding virtual address descriptor must be deleted.
  4276. //
  4277. //
  4278. // Get the working set mutex and address creation mutex.
  4279. //
  4280. Process = PsGetCurrentProcess ();
  4281. LOCK_ADDRESS_SPACE (Process);
  4282. Vad = MiLocateAddress (BaseAddress);
  4283. if ((Vad == NULL) || (Vad->u.VadFlags.PhysicalMapping == 0)) {
  4284. UNLOCK_ADDRESS_SPACE (Process);
  4285. MmUnlockPagableImageSection(ExPageLockHandle);
  4286. return;
  4287. }
  4288. PreviousVad = MiGetPreviousVad (Vad);
  4289. NextVad = MiGetNextVad (Vad);
  4290. LOCK_WS_UNSAFE (Process);
  4291. MiPhysicalViewRemover (Process, Vad);
  4292. MiRemoveVad (Vad);
  4293. //
  4294. // Return commitment for page table pages if possible.
  4295. //
  4296. MiReturnPageTablePageCommitment (MI_VPN_TO_VA (Vad->StartingVpn),
  4297. MI_VPN_TO_VA_ENDING (Vad->EndingVpn),
  4298. Process,
  4299. PreviousVad,
  4300. NextVad);
  4301. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (BaseAddress);
  4302. PageTablePage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPde);
  4303. PageTablePfn = MI_PFN_ELEMENT (PageTablePage);
  4304. //
  4305. // Get the PFN lock so we can safely decrement share and valid
  4306. // counts on page table pages.
  4307. //
  4308. LOCK_PFN (OldIrql);
  4309. do {
  4310. if (*Page == MM_EMPTY_LIST) {
  4311. break;
  4312. }
  4313. ASSERT64 (MiGetPdeAddress(PointerPte)->u.Hard.Valid == 1);
  4314. ASSERT (MiGetPteAddress(PointerPte)->u.Hard.Valid == 1);
  4315. ASSERT (PointerPte->u.Hard.Valid == 1);
  4316. //
  4317. // Another PTE is being zeroed.
  4318. //
  4319. MI_DECREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  4320. KeFlushSingleTb (BaseAddress,
  4321. TRUE,
  4322. FALSE,
  4323. (PHARDWARE_PTE)PointerPte,
  4324. ZeroPte.u.Flush);
  4325. MiDecrementShareCountInline (PageTablePfn, PageTablePage);
  4326. PointerPte += 1;
  4327. NumberOfPages -= 1;
  4328. BaseAddress = (PVOID)((PCHAR)BaseAddress + PAGE_SIZE);
  4329. Page += 1;
  4330. if ((MiIsPteOnPdeBoundary(PointerPte)) || (NumberOfPages == 0)) {
  4331. PointerPde = MiGetPteAddress(PointerPte - 1);
  4332. ASSERT (PointerPde->u.Hard.Valid == 1);
  4333. //
  4334. // If all the entries have been eliminated from the previous
  4335. // page table page, delete the page table page itself. Likewise
  4336. // with the page directory and parent pages.
  4337. //
  4338. if (MI_GET_USED_PTES_FROM_HANDLE (UsedPageTableHandle) == 0) {
  4339. ASSERT (PointerPde->u.Long != 0);
  4340. #if (_MI_PAGING_LEVELS >= 3)
  4341. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (PointerPte - 1);
  4342. MI_DECREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  4343. #endif
  4344. TempVa = MiGetVirtualAddressMappedByPte (PointerPde);
  4345. MiDeletePte (PointerPde,
  4346. TempVa,
  4347. FALSE,
  4348. Process,
  4349. NULL,
  4350. NULL);
  4351. #if (_MI_PAGING_LEVELS >= 3)
  4352. if ((MiIsPteOnPpeBoundary(PointerPte)) || (NumberOfPages == 0)) {
  4353. PointerPpe = MiGetPteAddress (PointerPde);
  4354. ASSERT (PointerPpe->u.Hard.Valid == 1);
  4355. //
  4356. // If all the entries have been eliminated from the previous
  4357. // page directory page, delete the page directory page too.
  4358. //
  4359. if (MI_GET_USED_PTES_FROM_HANDLE (UsedPageTableHandle) == 0) {
  4360. ASSERT (PointerPpe->u.Long != 0);
  4361. #if (_MI_PAGING_LEVELS >= 4)
  4362. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (PointerPde);
  4363. MI_DECREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  4364. #endif
  4365. TempVa = MiGetVirtualAddressMappedByPte(PointerPpe);
  4366. MiDeletePte (PointerPpe,
  4367. TempVa,
  4368. FALSE,
  4369. Process,
  4370. NULL,
  4371. NULL);
  4372. #if (_MI_PAGING_LEVELS >= 4)
  4373. if ((MiIsPteOnPxeBoundary(PointerPte)) || (NumberOfPages == 0)) {
  4374. PointerPxe = MiGetPdeAddress (PointerPde);
  4375. ASSERT (PointerPxe->u.Long != 0);
  4376. if (MI_GET_USED_PTES_FROM_HANDLE (UsedPageTableHandle) == 0) {
  4377. TempVa = MiGetVirtualAddressMappedByPte(PointerPxe);
  4378. MiDeletePte (PointerPxe,
  4379. TempVa,
  4380. FALSE,
  4381. Process,
  4382. NULL,
  4383. NULL);
  4384. }
  4385. }
  4386. #endif
  4387. }
  4388. }
  4389. #endif
  4390. }
  4391. if (NumberOfPages == 0) {
  4392. break;
  4393. }
  4394. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (BaseAddress);
  4395. PointerPde += 1;
  4396. PageTablePage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPde);
  4397. PageTablePfn = MI_PFN_ELEMENT (PageTablePage);
  4398. }
  4399. } while (NumberOfPages != 0);
  4400. UNLOCK_PFN (OldIrql);
  4401. UNLOCK_WS_AND_ADDRESS_SPACE (Process);
  4402. ExFreePool (Vad);
  4403. MmUnlockPagableImageSection(ExPageLockHandle);
  4404. return;
  4405. }
  4406. PVOID
  4407. MmMapIoSpace (
  4408. IN PHYSICAL_ADDRESS PhysicalAddress,
  4409. IN SIZE_T NumberOfBytes,
  4410. IN MEMORY_CACHING_TYPE CacheType
  4411. )
  4412. /*++
  4413. Routine Description:
  4414. This function maps the specified physical address into the non-pagable
  4415. portion of the system address space.
  4416. Arguments:
  4417. PhysicalAddress - Supplies the starting physical address to map.
  4418. NumberOfBytes - Supplies the number of bytes to map.
  4419. CacheType - Supplies MmNonCached if the physical address is to be mapped
  4420. as non-cached, MmCached if the address should be cached, and
  4421. MmWriteCombined if the address should be cached and
  4422. write-combined as a frame buffer which is to be used only by
  4423. the video port driver. All other callers should use
  4424. MmUSWCCached. MmUSWCCached is available only if the PAT
  4425. feature is present and available.
  4426. For I/O device registers, this is usually specified
  4427. as MmNonCached.
  4428. Return Value:
  4429. Returns the virtual address which maps the specified physical addresses.
  4430. The value NULL is returned if sufficient virtual address space for
  4431. the mapping could not be found.
  4432. Environment:
  4433. Kernel mode, Should be IRQL of APC_LEVEL or below, but unfortunately
  4434. callers are coming in at DISPATCH_LEVEL and it's too late to change the
  4435. rules now. This means you can never make this routine pagable.
  4436. --*/
  4437. {
  4438. KIRQL OldIrql;
  4439. CSHORT IoMapping;
  4440. ULONG Hint;
  4441. PMMPFN Pfn1;
  4442. PFN_NUMBER NumberOfPages;
  4443. PFN_NUMBER PageFrameIndex;
  4444. PFN_NUMBER LastPageFrameIndex;
  4445. PMMPTE PointerPte;
  4446. PVOID BaseVa;
  4447. MMPTE TempPte;
  4448. PMDL TempMdl;
  4449. PFN_NUMBER MdlHack[(sizeof(MDL)/sizeof(PFN_NUMBER)) + 1];
  4450. PPTE_TRACKER Tracker;
  4451. PVOID CallingAddress;
  4452. PVOID CallersCaller;
  4453. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  4454. //
  4455. // For compatibility for when CacheType used to be passed as a BOOLEAN
  4456. // mask off the upper bits (TRUE == MmCached, FALSE == MmNonCached).
  4457. //
  4458. CacheType &= 0xFF;
  4459. if (CacheType >= MmMaximumCacheType) {
  4460. return NULL;
  4461. }
  4462. //
  4463. // See if the first frame is in the PFN database and if so, they all must
  4464. // be.
  4465. //
  4466. PageFrameIndex = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
  4467. Pfn1 = NULL;
  4468. IoMapping = 1;
  4469. Hint = 0;
  4470. if (MiIsPhysicalMemoryAddress (PageFrameIndex, &Hint, TRUE) == TRUE) {
  4471. IoMapping = 0;
  4472. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  4473. }
  4474. CacheAttribute = MI_TRANSLATE_CACHETYPE (CacheType, IoMapping);
  4475. #if !defined (_MI_MORE_THAN_4GB_)
  4476. ASSERT (PhysicalAddress.HighPart == 0);
  4477. #endif
  4478. ASSERT (NumberOfBytes != 0);
  4479. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (PhysicalAddress.LowPart,
  4480. NumberOfBytes);
  4481. if (CacheAttribute != MiCached) {
  4482. //
  4483. // If a noncachable mapping is requested, none of the pages in the
  4484. // requested MDL can reside in a large page. Otherwise we would be
  4485. // creating an incoherent overlapping TB entry as the same physical
  4486. // page would be mapped by 2 different TB entries with different
  4487. // cache attributes.
  4488. //
  4489. PageFrameIndex = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
  4490. LastPageFrameIndex = PageFrameIndex + NumberOfPages;
  4491. do {
  4492. if (MI_PAGE_FRAME_INDEX_MUST_BE_CACHED (PageFrameIndex)) {
  4493. MiNonCachedCollisions += 1;
  4494. return NULL;
  4495. }
  4496. PageFrameIndex += 1;
  4497. } while (PageFrameIndex < LastPageFrameIndex);
  4498. }
  4499. PointerPte = MiReserveSystemPtes ((ULONG)NumberOfPages, SystemPteSpace);
  4500. if (PointerPte == NULL) {
  4501. return NULL;
  4502. }
  4503. BaseVa = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte);
  4504. BaseVa = (PVOID)((PCHAR)BaseVa + BYTE_OFFSET(PhysicalAddress.LowPart));
  4505. TempPte = ValidKernelPte;
  4506. switch (CacheAttribute) {
  4507. case MiNonCached:
  4508. MI_DISABLE_CACHING (TempPte);
  4509. break;
  4510. case MiCached:
  4511. break;
  4512. case MiWriteCombined:
  4513. MI_SET_PTE_WRITE_COMBINE (TempPte);
  4514. break;
  4515. default:
  4516. ASSERT (FALSE);
  4517. break;
  4518. }
  4519. #if defined(_X86_)
  4520. //
  4521. // Set the physical range to the proper caching type. If the PAT feature
  4522. // is supported, then we just use the caching type in the PTE. Otherwise
  4523. // modify the MTRRs if applicable.
  4524. //
  4525. // Note if the cache request is for cached or noncached, don't waste
  4526. // an MTRR on this range because the PTEs can be encoded to provide
  4527. // equivalent functionality.
  4528. //
  4529. if ((MiWriteCombiningPtes == FALSE) && (CacheAttribute == MiWriteCombined)) {
  4530. //
  4531. // If the address is an I/O space address, use MTRRs if possible.
  4532. //
  4533. NTSTATUS Status;
  4534. //
  4535. // If the address is a memory address, don't risk using MTRRs because
  4536. // other pages in the range are likely mapped with differing attributes
  4537. // in the TB and we must not add a conflicting range.
  4538. //
  4539. if (Pfn1 != NULL) {
  4540. MiReleaseSystemPtes(PointerPte, NumberOfPages, SystemPteSpace);
  4541. return NULL;
  4542. }
  4543. //
  4544. // Since the attribute may have been overridden (due to a collision
  4545. // with a prior exiting mapping), make sure the CacheType is also
  4546. // consistent before editing the MTRRs.
  4547. //
  4548. CacheType = MmWriteCombined;
  4549. Status = KeSetPhysicalCacheTypeRange (PhysicalAddress,
  4550. NumberOfBytes,
  4551. CacheType);
  4552. if (!NT_SUCCESS(Status)) {
  4553. //
  4554. // There's still a problem, fail the request.
  4555. //
  4556. MiReleaseSystemPtes(PointerPte, NumberOfPages, SystemPteSpace);
  4557. return NULL;
  4558. }
  4559. //
  4560. // Override the write combine (weak UC) bits in the PTE and
  4561. // instead use a cached attribute. This is because the processor
  4562. // will use the least cachable (ie: functionally safer) attribute
  4563. // of the PTE & MTRR to use - so specifying fully cached for the PTE
  4564. // ensures that the MTRR value will win out.
  4565. //
  4566. TempPte = ValidKernelPte;
  4567. }
  4568. #endif
  4569. PageFrameIndex = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
  4570. ASSERT ((Pfn1 == MI_PFN_ELEMENT (PageFrameIndex)) || (Pfn1 == NULL));
  4571. Hint = 0;
  4572. OldIrql = HIGH_LEVEL;
  4573. MI_PREPARE_FOR_NONCACHED (CacheAttribute);
  4574. do {
  4575. ASSERT (PointerPte->u.Hard.Valid == 0);
  4576. if (Pfn1 != NULL) {
  4577. ASSERT ((Pfn1->u3.e2.ReferenceCount != 0) ||
  4578. ((Pfn1->u3.e1.Rom == 1) && (CacheType == MmCached)));
  4579. TempPte = ValidKernelPte;
  4580. switch (Pfn1->u3.e1.CacheAttribute) {
  4581. case MiCached:
  4582. if (CacheAttribute != MiCached) {
  4583. //
  4584. // The caller asked for a noncached or writecombined
  4585. // mapping, but the page is already mapped cached by
  4586. // someone else. Override the caller's request in
  4587. // order to keep the TB page attribute coherent.
  4588. //
  4589. MiCacheOverride[0] += 1;
  4590. }
  4591. break;
  4592. case MiNonCached:
  4593. if (CacheAttribute != MiNonCached) {
  4594. //
  4595. // The caller asked for a cached or writecombined
  4596. // mapping, but the page is already mapped noncached
  4597. // by someone else. Override the caller's request
  4598. // in order to keep the TB page attribute coherent.
  4599. //
  4600. MiCacheOverride[1] += 1;
  4601. }
  4602. MI_DISABLE_CACHING (TempPte);
  4603. break;
  4604. case MiWriteCombined:
  4605. if (CacheAttribute != MiWriteCombined) {
  4606. //
  4607. // The caller asked for a cached or noncached
  4608. // mapping, but the page is already mapped
  4609. // writecombined by someone else. Override the
  4610. // caller's request in order to keep the TB page
  4611. // attribute coherent.
  4612. //
  4613. MiCacheOverride[2] += 1;
  4614. }
  4615. MI_SET_PTE_WRITE_COMBINE (TempPte);
  4616. break;
  4617. case MiNotMapped:
  4618. //
  4619. // This better be for a page allocated with
  4620. // MmAllocatePagesForMdl. Otherwise it might be a
  4621. // page on the freelist which could subsequently be
  4622. // given out with a different attribute !
  4623. //
  4624. #if defined (_MI_MORE_THAN_4GB_)
  4625. ASSERT ((Pfn1->u4.PteFrame == MI_MAGIC_AWE_PTEFRAME) ||
  4626. (Pfn1->PteAddress == (PVOID) (ULONG_PTR)(X64K | 0x1)) ||
  4627. (Pfn1->u4.PteFrame == MI_MAGIC_4GB_RECLAIM));
  4628. #else
  4629. ASSERT ((Pfn1->u4.PteFrame == MI_MAGIC_AWE_PTEFRAME) ||
  4630. (Pfn1->PteAddress == (PVOID) (ULONG_PTR)(X64K | 0x1)));
  4631. #endif
  4632. if (OldIrql == HIGH_LEVEL) {
  4633. LOCK_PFN2 (OldIrql);
  4634. }
  4635. switch (CacheAttribute) {
  4636. case MiCached:
  4637. Pfn1->u3.e1.CacheAttribute = MiCached;
  4638. break;
  4639. case MiNonCached:
  4640. Pfn1->u3.e1.CacheAttribute = MiNonCached;
  4641. MI_DISABLE_CACHING (TempPte);
  4642. break;
  4643. case MiWriteCombined:
  4644. Pfn1->u3.e1.CacheAttribute = MiWriteCombined;
  4645. MI_SET_PTE_WRITE_COMBINE (TempPte);
  4646. break;
  4647. default:
  4648. ASSERT (FALSE);
  4649. break;
  4650. }
  4651. break;
  4652. default:
  4653. ASSERT (FALSE);
  4654. break;
  4655. }
  4656. Pfn1 += 1;
  4657. }
  4658. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  4659. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  4660. PointerPte += 1;
  4661. PageFrameIndex += 1;
  4662. NumberOfPages -= 1;
  4663. } while (NumberOfPages != 0);
  4664. if (OldIrql != HIGH_LEVEL) {
  4665. UNLOCK_PFN2 (OldIrql);
  4666. }
  4667. MI_SWEEP_CACHE (CacheAttribute, BaseVa, NumberOfBytes);
  4668. if (MmTrackPtes & 0x1) {
  4669. //
  4670. // First free any zombie blocks as no locks are being held.
  4671. //
  4672. Tracker = MiReleaseDeadPteTrackers ();
  4673. if (Tracker != NULL) {
  4674. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  4675. TempMdl = (PMDL) &MdlHack;
  4676. TempMdl->MappedSystemVa = BaseVa;
  4677. TempMdl->StartVa = (PVOID)(ULONG_PTR)PhysicalAddress.QuadPart;
  4678. TempMdl->ByteOffset = BYTE_OFFSET(PhysicalAddress.LowPart);
  4679. TempMdl->ByteCount = (ULONG)NumberOfBytes;
  4680. MiInsertPteTracker (Tracker,
  4681. TempMdl,
  4682. ADDRESS_AND_SIZE_TO_SPAN_PAGES (PhysicalAddress.LowPart,
  4683. NumberOfBytes),
  4684. CallingAddress,
  4685. CallersCaller);
  4686. }
  4687. }
  4688. return BaseVa;
  4689. }
  4690. VOID
  4691. MmUnmapIoSpace (
  4692. IN PVOID BaseAddress,
  4693. IN SIZE_T NumberOfBytes
  4694. )
  4695. /*++
  4696. Routine Description:
  4697. This function unmaps a range of physical address which were previously
  4698. mapped via an MmMapIoSpace function call.
  4699. Arguments:
  4700. BaseAddress - Supplies the base virtual address where the physical
  4701. address was previously mapped.
  4702. NumberOfBytes - Supplies the number of bytes which were mapped.
  4703. Return Value:
  4704. None.
  4705. Environment:
  4706. Kernel mode, Should be IRQL of APC_LEVEL or below, but unfortunately
  4707. callers are coming in at DISPATCH_LEVEL and it's too late to change the
  4708. rules now. This means you can never make this routine pagable.
  4709. --*/
  4710. {
  4711. PFN_NUMBER NumberOfPages;
  4712. PMMPTE FirstPte;
  4713. ASSERT (NumberOfBytes != 0);
  4714. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (BaseAddress, NumberOfBytes);
  4715. FirstPte = MiGetPteAddress (BaseAddress);
  4716. MiReleaseSystemPtes (FirstPte, (ULONG)NumberOfPages, SystemPteSpace);
  4717. if (MmTrackPtes & 0x1) {
  4718. MiRemovePteTracker (NULL, FirstPte, NumberOfPages);
  4719. }
  4720. return;
  4721. }
  4722. PVOID
  4723. MiAllocateContiguousMemory (
  4724. IN SIZE_T NumberOfBytes,
  4725. IN PFN_NUMBER LowestAcceptablePfn,
  4726. IN PFN_NUMBER HighestAcceptablePfn,
  4727. IN PFN_NUMBER BoundaryPfn,
  4728. IN MEMORY_CACHING_TYPE CacheType,
  4729. PVOID CallingAddress
  4730. )
  4731. /*++
  4732. Routine Description:
  4733. This function allocates a range of physically contiguous non-paged
  4734. pool. It relies on the fact that non-paged pool is built at
  4735. system initialization time from a contiguous range of physical
  4736. memory. It allocates the specified size of non-paged pool and
  4737. then checks to ensure it is contiguous as pool expansion does
  4738. not maintain the contiguous nature of non-paged pool.
  4739. This routine is designed to be used by a driver's initialization
  4740. routine to allocate a contiguous block of physical memory for
  4741. issuing DMA requests from.
  4742. Arguments:
  4743. NumberOfBytes - Supplies the number of bytes to allocate.
  4744. LowestAcceptablePfn - Supplies the lowest page frame number
  4745. which is valid for the allocation.
  4746. HighestAcceptablePfn - Supplies the highest page frame number
  4747. which is valid for the allocation.
  4748. BoundaryPfn - Supplies the page frame number multiple the allocation must
  4749. not cross. 0 indicates it can cross any boundary.
  4750. CacheType - Supplies the type of cache mapping that will be used for the
  4751. memory.
  4752. CallingAddress - Supplies the calling address of the allocator.
  4753. Return Value:
  4754. NULL - a contiguous range could not be found to satisfy the request.
  4755. NON-NULL - Returns a pointer (virtual address in the nonpaged portion
  4756. of the system) to the allocated physically contiguous
  4757. memory.
  4758. Environment:
  4759. Kernel mode, IRQL of DISPATCH_LEVEL or below.
  4760. --*/
  4761. {
  4762. PVOID BaseAddress;
  4763. PFN_NUMBER SizeInPages;
  4764. PFN_NUMBER LowestPfn;
  4765. PFN_NUMBER HighestPfn;
  4766. PFN_NUMBER i;
  4767. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  4768. ASSERT (NumberOfBytes != 0);
  4769. LowestPfn = LowestAcceptablePfn;
  4770. #if defined (_MI_MORE_THAN_4GB_)
  4771. if (MiNoLowMemory != 0) {
  4772. if (HighestAcceptablePfn < MiNoLowMemory) {
  4773. return MiAllocateLowMemory (NumberOfBytes,
  4774. LowestAcceptablePfn,
  4775. HighestAcceptablePfn,
  4776. BoundaryPfn,
  4777. CallingAddress,
  4778. CacheType,
  4779. 'tnoC');
  4780. }
  4781. LowestPfn = MiNoLowMemory;
  4782. }
  4783. #endif
  4784. CacheAttribute = MI_TRANSLATE_CACHETYPE (CacheType, 0);
  4785. //
  4786. // N.B. This setting of SizeInPages to exactly the request size
  4787. // means the non-NULL return value from MiCheckForContiguousMemory
  4788. // is guaranteed to be the BaseAddress. If this size is ever
  4789. // changed, then the non-NULL return value must be checked and
  4790. // split/returned accordingly.
  4791. //
  4792. SizeInPages = BYTES_TO_PAGES (NumberOfBytes);
  4793. HighestPfn = HighestAcceptablePfn;
  4794. if (CacheAttribute == MiCached) {
  4795. BaseAddress = ExAllocatePoolWithTag (NonPagedPoolCacheAligned,
  4796. NumberOfBytes,
  4797. 'mCmM');
  4798. if (BaseAddress != NULL) {
  4799. if (MiCheckForContiguousMemory (BaseAddress,
  4800. SizeInPages,
  4801. SizeInPages,
  4802. LowestPfn,
  4803. HighestPfn,
  4804. BoundaryPfn,
  4805. CacheAttribute)) {
  4806. return BaseAddress;
  4807. }
  4808. //
  4809. // The allocation from pool does not meet the contiguous
  4810. // requirements. Free the allocation and see if any of
  4811. // the free pool pages do.
  4812. //
  4813. ExFreePool (BaseAddress);
  4814. }
  4815. }
  4816. if (KeGetCurrentIrql() > APC_LEVEL) {
  4817. return NULL;
  4818. }
  4819. BaseAddress = NULL;
  4820. i = 3;
  4821. InterlockedIncrement (&MiDelayPageFaults);
  4822. do {
  4823. BaseAddress = MiFindContiguousMemory (LowestPfn,
  4824. HighestPfn,
  4825. BoundaryPfn,
  4826. SizeInPages,
  4827. CacheType,
  4828. CallingAddress);
  4829. if ((BaseAddress != NULL) || (i == 0)) {
  4830. break;
  4831. }
  4832. //
  4833. // Attempt to move pages to the standby list. This is done with
  4834. // gradually increasing aggresiveness so as not to prematurely
  4835. // drain modified writes unless it's truly needed. This is because
  4836. // the writing can be an expensive cost performance wise if drivers
  4837. // are calling this routine every few seconds (and some really do).
  4838. //
  4839. switch (i) {
  4840. case 3:
  4841. MmEmptyAllWorkingSets ();
  4842. break;
  4843. case 2:
  4844. MiFlushAllPages ();
  4845. KeDelayExecutionThread (KernelMode,
  4846. FALSE,
  4847. (PLARGE_INTEGER)&MmHalfSecond);
  4848. break;
  4849. default:
  4850. MmEmptyAllWorkingSets ();
  4851. MiFlushAllPages ();
  4852. KeDelayExecutionThread (KernelMode,
  4853. FALSE,
  4854. (PLARGE_INTEGER)&MmOneSecond);
  4855. break;
  4856. }
  4857. i -= 1;
  4858. } while (TRUE);
  4859. InterlockedDecrement (&MiDelayPageFaults);
  4860. return BaseAddress;
  4861. }
  4862. PVOID
  4863. MmAllocateContiguousMemorySpecifyCache (
  4864. IN SIZE_T NumberOfBytes,
  4865. IN PHYSICAL_ADDRESS LowestAcceptableAddress,
  4866. IN PHYSICAL_ADDRESS HighestAcceptableAddress,
  4867. IN PHYSICAL_ADDRESS BoundaryAddressMultiple OPTIONAL,
  4868. IN MEMORY_CACHING_TYPE CacheType
  4869. )
  4870. /*++
  4871. Routine Description:
  4872. This function allocates a range of physically contiguous non-cached,
  4873. non-paged memory. This is accomplished by using MmAllocateContiguousMemory
  4874. which uses nonpaged pool virtual addresses to map the found memory chunk.
  4875. Then this function establishes another map to the same physical addresses,
  4876. but this alternate map is initialized as non-cached. All references by
  4877. our caller will be done through this alternate map.
  4878. This routine is designed to be used by a driver's initialization
  4879. routine to allocate a contiguous block of noncached physical memory for
  4880. things like the AGP GART.
  4881. Arguments:
  4882. NumberOfBytes - Supplies the number of bytes to allocate.
  4883. LowestAcceptableAddress - Supplies the lowest physical address
  4884. which is valid for the allocation. For
  4885. example, if the device can only reference
  4886. physical memory in the 8M to 16MB range, this
  4887. value would be set to 0x800000 (8Mb).
  4888. HighestAcceptableAddress - Supplies the highest physical address
  4889. which is valid for the allocation. For
  4890. example, if the device can only reference
  4891. physical memory below 16MB, this
  4892. value would be set to 0xFFFFFF (16Mb - 1).
  4893. BoundaryAddressMultiple - Supplies the physical address multiple this
  4894. allocation must not cross.
  4895. Return Value:
  4896. NULL - a contiguous range could not be found to satisfy the request.
  4897. NON-NULL - Returns a pointer (virtual address in the nonpaged portion
  4898. of the system) to the allocated physically contiguous
  4899. memory.
  4900. Environment:
  4901. Kernel mode, IRQL of DISPATCH_LEVEL or below.
  4902. --*/
  4903. {
  4904. PVOID BaseAddress;
  4905. PFN_NUMBER LowestPfn;
  4906. PFN_NUMBER HighestPfn;
  4907. PFN_NUMBER BoundaryPfn;
  4908. PVOID CallingAddress;
  4909. PVOID CallersCaller;
  4910. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  4911. ASSERT (NumberOfBytes != 0);
  4912. LowestPfn = (PFN_NUMBER)(LowestAcceptableAddress.QuadPart >> PAGE_SHIFT);
  4913. if (BYTE_OFFSET(LowestAcceptableAddress.LowPart)) {
  4914. LowestPfn += 1;
  4915. }
  4916. if (BYTE_OFFSET(BoundaryAddressMultiple.LowPart)) {
  4917. return NULL;
  4918. }
  4919. BoundaryPfn = (PFN_NUMBER)(BoundaryAddressMultiple.QuadPart >> PAGE_SHIFT);
  4920. HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
  4921. if (HighestPfn > MmHighestPossiblePhysicalPage) {
  4922. HighestPfn = MmHighestPossiblePhysicalPage;
  4923. }
  4924. if (LowestPfn > HighestPfn) {
  4925. //
  4926. // The caller's range is beyond what physically exists, it cannot
  4927. // succeed. Bail now to avoid an expensive fruitless search.
  4928. //
  4929. return NULL;
  4930. }
  4931. BaseAddress = MiAllocateContiguousMemory (NumberOfBytes,
  4932. LowestPfn,
  4933. HighestPfn,
  4934. BoundaryPfn,
  4935. CacheType,
  4936. CallingAddress);
  4937. return BaseAddress;
  4938. }
  4939. PVOID
  4940. MmAllocateContiguousMemory (
  4941. IN SIZE_T NumberOfBytes,
  4942. IN PHYSICAL_ADDRESS HighestAcceptableAddress
  4943. )
  4944. /*++
  4945. Routine Description:
  4946. This function allocates a range of physically contiguous non-paged pool.
  4947. This routine is designed to be used by a driver's initialization
  4948. routine to allocate a contiguous block of physical memory for
  4949. issuing DMA requests from.
  4950. Arguments:
  4951. NumberOfBytes - Supplies the number of bytes to allocate.
  4952. HighestAcceptableAddress - Supplies the highest physical address
  4953. which is valid for the allocation. For
  4954. example, if the device can only reference
  4955. physical memory in the lower 16MB this
  4956. value would be set to 0xFFFFFF (16Mb - 1).
  4957. Return Value:
  4958. NULL - a contiguous range could not be found to satisfy the request.
  4959. NON-NULL - Returns a pointer (virtual address in the nonpaged portion
  4960. of the system) to the allocated physically contiguous
  4961. memory.
  4962. Environment:
  4963. Kernel mode, IRQL of DISPATCH_LEVEL or below.
  4964. --*/
  4965. {
  4966. PFN_NUMBER HighestPfn;
  4967. PVOID CallingAddress;
  4968. PVOID VirtualAddress;
  4969. PVOID CallersCaller;
  4970. RtlGetCallersAddress (&CallingAddress, &CallersCaller);
  4971. HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
  4972. if (HighestPfn > MmHighestPossiblePhysicalPage) {
  4973. HighestPfn = MmHighestPossiblePhysicalPage;
  4974. }
  4975. VirtualAddress = MiAllocateContiguousMemory (NumberOfBytes,
  4976. 0,
  4977. HighestPfn,
  4978. 0,
  4979. MmCached,
  4980. CallingAddress);
  4981. return VirtualAddress;
  4982. }
  4983. #if defined (_WIN64)
  4984. #define SPECIAL_POOL_ADDRESS(p) \
  4985. ((((p) >= MmSpecialPoolStart) && ((p) < MmSpecialPoolEnd)) || \
  4986. (((p) >= MmSessionSpecialPoolStart) && ((p) < MmSessionSpecialPoolEnd)))
  4987. #else
  4988. #define SPECIAL_POOL_ADDRESS(p) \
  4989. (((p) >= MmSpecialPoolStart) && ((p) < MmSpecialPoolEnd))
  4990. #endif
  4991. VOID
  4992. MmFreeContiguousMemory (
  4993. IN PVOID BaseAddress
  4994. )
  4995. /*++
  4996. Routine Description:
  4997. This function deallocates a range of physically contiguous non-paged
  4998. pool which was allocated with the MmAllocateContiguousMemory function.
  4999. Arguments:
  5000. BaseAddress - Supplies the base virtual address where the physical
  5001. address was previously mapped.
  5002. Return Value:
  5003. None.
  5004. Environment:
  5005. Kernel mode, IRQL of APC_LEVEL or below.
  5006. --*/
  5007. {
  5008. KIRQL OldIrql;
  5009. ULONG SizeInPages;
  5010. PMMPTE PointerPte;
  5011. PFN_NUMBER PageFrameIndex;
  5012. PFN_NUMBER LastPage;
  5013. PMMPFN Pfn1;
  5014. PMMPFN StartPfn;
  5015. PAGED_CODE();
  5016. #if defined (_MI_MORE_THAN_4GB_)
  5017. if (MiNoLowMemory != 0) {
  5018. if (MiFreeLowMemory (BaseAddress, 'tnoC') == TRUE) {
  5019. return;
  5020. }
  5021. }
  5022. #endif
  5023. if (((BaseAddress >= MmNonPagedPoolStart) &&
  5024. (BaseAddress < (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes))) ||
  5025. ((BaseAddress >= MmNonPagedPoolExpansionStart) &&
  5026. (BaseAddress < MmNonPagedPoolEnd)) ||
  5027. (SPECIAL_POOL_ADDRESS(BaseAddress))) {
  5028. ExFreePool (BaseAddress);
  5029. }
  5030. else {
  5031. //
  5032. // The contiguous memory being freed may be the target of a delayed
  5033. // unlock. Since these pages may be immediately released, force
  5034. // any pending delayed actions to occur now.
  5035. //
  5036. MiDeferredUnlockPages (0);
  5037. PointerPte = MiGetPteAddress (BaseAddress);
  5038. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  5039. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  5040. if (Pfn1->u3.e1.StartOfAllocation == 0) {
  5041. KeBugCheckEx (BAD_POOL_CALLER,
  5042. 0x60,
  5043. (ULONG_PTR)BaseAddress,
  5044. 0,
  5045. 0);
  5046. }
  5047. StartPfn = Pfn1;
  5048. Pfn1->u3.e1.StartOfAllocation = 0;
  5049. Pfn1 -= 1;
  5050. do {
  5051. Pfn1 += 1;
  5052. ASSERT (Pfn1->u3.e2.ReferenceCount == 1);
  5053. ASSERT (Pfn1->u2.ShareCount == 1);
  5054. ASSERT (Pfn1->PteAddress == PointerPte);
  5055. ASSERT (Pfn1->OriginalPte.u.Long == MM_DEMAND_ZERO_WRITE_PTE);
  5056. ASSERT (Pfn1->u4.PteFrame == MI_GET_PAGE_FRAME_FROM_PTE (MiGetPteAddress(PointerPte)));
  5057. ASSERT (Pfn1->u3.e1.PageLocation == ActiveAndValid);
  5058. ASSERT (Pfn1->u4.VerifierAllocation == 0);
  5059. ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0);
  5060. ASSERT (Pfn1->u3.e1.PrototypePte == 0);
  5061. MI_SET_PFN_DELETED(Pfn1);
  5062. PointerPte += 1;
  5063. } while (Pfn1->u3.e1.EndOfAllocation == 0);
  5064. Pfn1->u3.e1.EndOfAllocation = 0;
  5065. SizeInPages = (ULONG)(Pfn1 - StartPfn + 1);
  5066. //
  5067. // Notify deadlock verifier that a region that can contain locks
  5068. // will become invalid.
  5069. //
  5070. if (MmVerifierData.Level & DRIVER_VERIFIER_DEADLOCK_DETECTION) {
  5071. VerifierDeadlockFreePool (BaseAddress, SizeInPages << PAGE_SHIFT);
  5072. }
  5073. //
  5074. // Release the mapping.
  5075. //
  5076. MmUnmapIoSpace (BaseAddress, SizeInPages << PAGE_SHIFT);
  5077. //
  5078. // Release the actual pages.
  5079. //
  5080. LastPage = PageFrameIndex + SizeInPages;
  5081. LOCK_PFN (OldIrql);
  5082. do {
  5083. MiDecrementShareCount (PageFrameIndex);
  5084. PageFrameIndex += 1;
  5085. } while (PageFrameIndex < LastPage);
  5086. MmResidentAvailablePages += SizeInPages;
  5087. MM_BUMP_COUNTER(20, SizeInPages);
  5088. UNLOCK_PFN (OldIrql);
  5089. MiReturnCommitment (SizeInPages);
  5090. }
  5091. }
  5092. VOID
  5093. MmFreeContiguousMemorySpecifyCache (
  5094. IN PVOID BaseAddress,
  5095. IN SIZE_T NumberOfBytes,
  5096. IN MEMORY_CACHING_TYPE CacheType
  5097. )
  5098. /*++
  5099. Routine Description:
  5100. This function deallocates a range of noncached memory in
  5101. the non-paged portion of the system address space.
  5102. Arguments:
  5103. BaseAddress - Supplies the base virtual address where the noncached
  5104. NumberOfBytes - Supplies the number of bytes allocated to the request.
  5105. This must be the same number that was obtained with
  5106. the MmAllocateContiguousMemorySpecifyCache call.
  5107. CacheType - Supplies the cachetype used when the caller made the
  5108. MmAllocateContiguousMemorySpecifyCache call.
  5109. Return Value:
  5110. None.
  5111. Environment:
  5112. Kernel mode, IRQL of APC_LEVEL or below.
  5113. --*/
  5114. {
  5115. UNREFERENCED_PARAMETER (NumberOfBytes);
  5116. UNREFERENCED_PARAMETER (CacheType);
  5117. MmFreeContiguousMemory (BaseAddress);
  5118. }
  5119. PVOID
  5120. MmAllocateIndependentPages (
  5121. IN SIZE_T NumberOfBytes,
  5122. IN ULONG Node
  5123. )
  5124. /*++
  5125. Routine Description:
  5126. This function allocates a range of virtually contiguous nonpaged pages
  5127. without using superpages. This allows the caller to apply independent
  5128. page protections to each page.
  5129. Arguments:
  5130. NumberOfBytes - Supplies the number of bytes to allocate.
  5131. Node - Supplies the preferred node number for the backing physical pages.
  5132. If pages on the preferred node are not available, any page will
  5133. be used. -1 indicates no preferred node.
  5134. Return Value:
  5135. The virtual address of the memory or NULL if none could be allocated.
  5136. Environment:
  5137. Kernel mode, IRQL of APC_LEVEL or below.
  5138. --*/
  5139. {
  5140. ULONG PageColor;
  5141. PFN_NUMBER NumberOfPages;
  5142. PMMPTE PointerPte;
  5143. MMPTE TempPte;
  5144. PFN_NUMBER PageFrameIndex;
  5145. PVOID BaseAddress;
  5146. KIRQL OldIrql;
  5147. ASSERT ((Node == (ULONG)-1) || (Node < KeNumberNodes));
  5148. NumberOfPages = BYTES_TO_PAGES (NumberOfBytes);
  5149. PointerPte = MiReserveSystemPtes ((ULONG)NumberOfPages, SystemPteSpace);
  5150. if (PointerPte == NULL) {
  5151. return NULL;
  5152. }
  5153. if (MiChargeCommitment (NumberOfPages, NULL) == FALSE) {
  5154. MiReleaseSystemPtes (PointerPte, (ULONG)NumberOfPages, SystemPteSpace);
  5155. return NULL;
  5156. }
  5157. BaseAddress = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte);
  5158. LOCK_PFN (OldIrql);
  5159. if ((SPFN_NUMBER)NumberOfPages > MI_NONPAGABLE_MEMORY_AVAILABLE()) {
  5160. UNLOCK_PFN (OldIrql);
  5161. MiReturnCommitment (NumberOfPages);
  5162. MiReleaseSystemPtes (PointerPte, (ULONG)NumberOfPages, SystemPteSpace);
  5163. return NULL;
  5164. }
  5165. MM_TRACK_COMMIT (MM_DBG_COMMIT_INDEPENDENT_PAGES, NumberOfPages);
  5166. MmResidentAvailablePages -= NumberOfPages;
  5167. MM_BUMP_COUNTER(28, NumberOfPages);
  5168. do {
  5169. ASSERT (PointerPte->u.Hard.Valid == 0);
  5170. MiEnsureAvailablePageOrWait (NULL, NULL);
  5171. if (Node == (ULONG)-1) {
  5172. PageColor = MI_GET_PAGE_COLOR_FROM_PTE (PointerPte);
  5173. }
  5174. else {
  5175. PageColor = (((MI_SYSTEM_PAGE_COLOR++) & MmSecondaryColorMask) |
  5176. (Node << MmSecondaryColorNodeShift));
  5177. }
  5178. PageFrameIndex = MiRemoveAnyPage (PageColor);
  5179. MI_MAKE_VALID_PTE (TempPte,
  5180. PageFrameIndex,
  5181. MM_READWRITE,
  5182. PointerPte);
  5183. MI_SET_PTE_DIRTY (TempPte);
  5184. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  5185. MiInitializePfn (PageFrameIndex, PointerPte, 1);
  5186. PointerPte += 1;
  5187. NumberOfPages -= 1;
  5188. } while (NumberOfPages != 0);
  5189. UNLOCK_PFN (OldIrql);
  5190. NumberOfPages = BYTES_TO_PAGES (NumberOfBytes);
  5191. return BaseAddress;
  5192. }
  5193. BOOLEAN
  5194. MmSetPageProtection (
  5195. IN PVOID VirtualAddress,
  5196. IN SIZE_T NumberOfBytes,
  5197. IN ULONG NewProtect
  5198. )
  5199. /*++
  5200. Routine Description:
  5201. This function sets the specified virtual address range to the desired
  5202. protection. This assumes that the virtual addresses are backed by PTEs
  5203. which can be set (ie: not in kseg0 or large pages).
  5204. Arguments:
  5205. VirtualAddress - Supplies the start address to protect.
  5206. NumberOfBytes - Supplies the number of bytes to set.
  5207. NewProtect - Supplies the protection to set the pages to (PAGE_XX).
  5208. Return Value:
  5209. TRUE if the protection was applied, FALSE if not.
  5210. Environment:
  5211. Kernel mode, IRQL of APC_LEVEL or below.
  5212. --*/
  5213. {
  5214. PFN_NUMBER i;
  5215. PFN_NUMBER NumberOfPages;
  5216. PMMPTE PointerPte;
  5217. MMPTE TempPte;
  5218. MMPTE NewPteContents;
  5219. KIRQL OldIrql;
  5220. ULONG ProtectionMask;
  5221. ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
  5222. if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) {
  5223. return FALSE;
  5224. }
  5225. ProtectionMask = MiMakeProtectionMask (NewProtect);
  5226. if (ProtectionMask == MM_INVALID_PROTECTION) {
  5227. return FALSE;
  5228. }
  5229. PointerPte = MiGetPteAddress (VirtualAddress);
  5230. NumberOfPages = BYTES_TO_PAGES (NumberOfBytes);
  5231. LOCK_PFN (OldIrql);
  5232. for (i = 0; i < NumberOfPages; i += 1) {
  5233. TempPte.u.Long = PointerPte->u.Long;
  5234. MI_MAKE_VALID_PTE (NewPteContents,
  5235. TempPte.u.Hard.PageFrameNumber,
  5236. ProtectionMask,
  5237. PointerPte);
  5238. NewPteContents.u.Hard.Dirty = TempPte.u.Hard.Dirty;
  5239. KeFlushSingleTb ((PVOID)((PUCHAR)VirtualAddress + (i << PAGE_SHIFT)),
  5240. TRUE,
  5241. TRUE,
  5242. (PHARDWARE_PTE)PointerPte,
  5243. NewPteContents.u.Flush);
  5244. PointerPte += 1;
  5245. }
  5246. UNLOCK_PFN (OldIrql);
  5247. return TRUE;
  5248. }
  5249. VOID
  5250. MmFreeIndependentPages (
  5251. IN PVOID VirtualAddress,
  5252. IN SIZE_T NumberOfBytes
  5253. )
  5254. /*++
  5255. Routine Description:
  5256. Returns pages previously allocated with MmAllocateIndependentPages.
  5257. Arguments:
  5258. VirtualAddress - Supplies the virtual address to free.
  5259. NumberOfBytes - Supplies the number of bytes to free.
  5260. Return Value:
  5261. None.
  5262. Environment:
  5263. Kernel mode, IRQL of APC_LEVEL or below.
  5264. --*/
  5265. {
  5266. ULONG i;
  5267. KIRQL OldIrql;
  5268. MMPTE PteContents;
  5269. PMMPTE PointerPte;
  5270. PMMPTE BasePte;
  5271. PMMPFN Pfn1;
  5272. PFN_NUMBER NumberOfPages;
  5273. PFN_NUMBER PageFrameIndex;
  5274. ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
  5275. NumberOfPages = BYTES_TO_PAGES (NumberOfBytes);
  5276. PointerPte = MiGetPteAddress (VirtualAddress);
  5277. BasePte = PointerPte;
  5278. LOCK_PFN (OldIrql);
  5279. for (i = 0; i < NumberOfPages; i += 1) {
  5280. PteContents = *PointerPte;
  5281. ASSERT (PteContents.u.Hard.Valid == 1);
  5282. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents);
  5283. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  5284. MiDecrementShareAndValidCount (Pfn1->u4.PteFrame);
  5285. MI_SET_PFN_DELETED (Pfn1);
  5286. MiDecrementShareCountOnly (MI_GET_PAGE_FRAME_FROM_PTE (&PteContents));
  5287. PointerPte += 1;
  5288. }
  5289. //
  5290. // Update the count of resident available pages.
  5291. //
  5292. MmResidentAvailablePages += NumberOfPages;
  5293. MM_BUMP_COUNTER(30, NumberOfPages);
  5294. UNLOCK_PFN (OldIrql);
  5295. //
  5296. // Return PTEs and commitment.
  5297. //
  5298. MiReleaseSystemPtes (BasePte, (ULONG)NumberOfPages, SystemPteSpace);
  5299. MiReturnCommitment (NumberOfPages);
  5300. MM_TRACK_COMMIT (MM_DBG_COMMIT_INDEPENDENT_PAGES, NumberOfPages);
  5301. }
  5302. PFN_NUMBER MiLastCallLowPage;
  5303. PFN_NUMBER MiLastCallHighPage;
  5304. ULONG MiLastCallColor;
  5305. PMDL
  5306. MmAllocatePagesForMdl (
  5307. IN PHYSICAL_ADDRESS LowAddress,
  5308. IN PHYSICAL_ADDRESS HighAddress,
  5309. IN PHYSICAL_ADDRESS SkipBytes,
  5310. IN SIZE_T TotalBytes
  5311. )
  5312. /*++
  5313. Routine Description:
  5314. This routine searches the PFN database for free, zeroed or standby pages
  5315. to satisfy the request. This does not map the pages - it just allocates
  5316. them and puts them into an MDL. It is expected that our caller will
  5317. map the MDL as needed.
  5318. NOTE: this routine may return an MDL mapping a smaller number of bytes
  5319. than the amount requested. It is the caller's responsibility to check the
  5320. MDL upon return for the size actually allocated.
  5321. These pages comprise physical non-paged memory and are zero-filled.
  5322. This routine is designed to be used by an AGP driver to obtain physical
  5323. memory in a specified range since hardware may provide substantial
  5324. performance wins depending on where the backing memory is allocated.
  5325. Because the caller may use these pages for a noncached mapping, care is
  5326. taken to never allocate any pages that reside in a large page (in order
  5327. to prevent TB incoherency of the same page being mapped by multiple
  5328. translations with different attributes).
  5329. Arguments:
  5330. LowAddress - Supplies the low physical address of the first range that
  5331. the allocated pages can come from.
  5332. HighAddress - Supplies the high physical address of the first range that
  5333. the allocated pages can come from.
  5334. SkipBytes - Number of bytes to skip (from the Low Address) to get to the
  5335. next physical address range that allocated pages can come from.
  5336. TotalBytes - Supplies the number of bytes to allocate.
  5337. Return Value:
  5338. MDL - An MDL mapping a range of pages in the specified range.
  5339. This may map less memory than the caller requested if the full amount
  5340. is not currently available.
  5341. NULL - No pages in the specified range OR not enough virtually contiguous
  5342. nonpaged pool for the MDL is available at this time.
  5343. Environment:
  5344. Kernel mode, IRQL of APC_LEVEL or below.
  5345. --*/
  5346. {
  5347. PMDL MemoryDescriptorList;
  5348. PMDL MemoryDescriptorList2;
  5349. PMMPFN Pfn1;
  5350. PMMPFN PfnNextColored;
  5351. PMMPFN PfnNextFlink;
  5352. PMMPFN PfnLastColored;
  5353. KIRQL OldIrql;
  5354. PFN_NUMBER start;
  5355. PFN_NUMBER count;
  5356. PFN_NUMBER Page;
  5357. PFN_NUMBER NextPage;
  5358. PFN_NUMBER found;
  5359. PFN_NUMBER BasePage;
  5360. PFN_NUMBER LowPage;
  5361. PFN_NUMBER HighPage;
  5362. PFN_NUMBER SizeInPages;
  5363. PFN_NUMBER MdlPageSpan;
  5364. PFN_NUMBER SkipPages;
  5365. PFN_NUMBER MaxPages;
  5366. PPFN_NUMBER MdlPage;
  5367. PPFN_NUMBER LastMdlPage;
  5368. ULONG Color;
  5369. PMMCOLOR_TABLES ColorHead;
  5370. MMLISTS MemoryList;
  5371. PPFN_NUMBER ZeroRunStart[2];
  5372. PPFN_NUMBER ZeroRunEnd[2];
  5373. PFN_NUMBER LowPage1;
  5374. PFN_NUMBER HighPage1;
  5375. LOGICAL PagePlacementOk;
  5376. PFN_NUMBER PageNextColored;
  5377. PFN_NUMBER PageNextFlink;
  5378. PFN_NUMBER PageLastColored;
  5379. PMMPFNLIST ListHead;
  5380. PPFN_NUMBER ColoredPagesLeftToScanBase;
  5381. PPFN_NUMBER ColoredPagesLeftToScan;
  5382. ULONG ColorHeadsDrained;
  5383. ULONG RunsToZero;
  5384. LOGICAL MoreNodePasses;
  5385. ULONG ColorCount;
  5386. ULONG BaseColor;
  5387. #if DBG
  5388. ULONG FinishedCount;
  5389. #endif
  5390. ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
  5391. //
  5392. // The skip increment must be a page-size multiple.
  5393. //
  5394. if (BYTE_OFFSET(SkipBytes.LowPart)) {
  5395. return NULL;
  5396. }
  5397. LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
  5398. HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
  5399. if (HighPage > MmHighestPossiblePhysicalPage) {
  5400. HighPage = MmHighestPossiblePhysicalPage;
  5401. }
  5402. //
  5403. // Maximum allocation size is constrained by the MDL ByteCount field.
  5404. //
  5405. if (TotalBytes > (SIZE_T)((ULONG)(MAXULONG - PAGE_SIZE))) {
  5406. TotalBytes = (SIZE_T)((ULONG)(MAXULONG - PAGE_SIZE));
  5407. }
  5408. SizeInPages = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
  5409. SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
  5410. BasePage = LowPage;
  5411. //
  5412. // Check without the PFN lock as the actual number of pages to get will
  5413. // be recalculated later while holding the lock.
  5414. //
  5415. MaxPages = MI_NONPAGABLE_MEMORY_AVAILABLE() - 1024;
  5416. if ((SPFN_NUMBER)MaxPages <= 0) {
  5417. SizeInPages = 0;
  5418. }
  5419. else if (SizeInPages > MaxPages) {
  5420. SizeInPages = MaxPages;
  5421. }
  5422. if (SizeInPages == 0) {
  5423. return NULL;
  5424. }
  5425. #if DBG
  5426. if (SizeInPages < (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes)) {
  5427. if (MiPrintAwe != 0) {
  5428. DbgPrint("MmAllocatePagesForMdl1: unable to get %p pages, trying for %p instead\n",
  5429. ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes),
  5430. SizeInPages);
  5431. }
  5432. }
  5433. #endif
  5434. //
  5435. // Allocate an MDL to return the pages in.
  5436. //
  5437. do {
  5438. MemoryDescriptorList = MmCreateMdl (NULL,
  5439. NULL,
  5440. SizeInPages << PAGE_SHIFT);
  5441. if (MemoryDescriptorList != NULL) {
  5442. break;
  5443. }
  5444. SizeInPages -= (SizeInPages >> 4);
  5445. } while (SizeInPages != 0);
  5446. if (MemoryDescriptorList == NULL) {
  5447. return NULL;
  5448. }
  5449. //
  5450. // Ensure there is enough commit prior to allocating the pages.
  5451. //
  5452. if (MiChargeCommitment (SizeInPages, NULL) == FALSE) {
  5453. ExFreePool (MemoryDescriptorList);
  5454. return NULL;
  5455. }
  5456. //
  5457. // Allocate a list of colored anchors.
  5458. //
  5459. ColoredPagesLeftToScanBase = (PPFN_NUMBER) ExAllocatePoolWithTag (NonPagedPool,
  5460. MmSecondaryColors * sizeof (PFN_NUMBER),
  5461. 'ldmM');
  5462. if (ColoredPagesLeftToScanBase == NULL) {
  5463. ExFreePool (MemoryDescriptorList);
  5464. MiReturnCommitment (SizeInPages);
  5465. return NULL;
  5466. }
  5467. MdlPageSpan = SizeInPages;
  5468. //
  5469. // Recalculate the total size while holding the PFN lock.
  5470. //
  5471. start = 0;
  5472. found = 0;
  5473. MdlPage = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  5474. RunsToZero = 0;
  5475. MmLockPagableSectionByHandle (ExPageLockHandle);
  5476. ExAcquireFastMutex (&MmDynamicMemoryMutex);
  5477. LOCK_PFN (OldIrql);
  5478. MiDeferredUnlockPages (MI_DEFER_PFN_HELD);
  5479. MaxPages = MI_NONPAGABLE_MEMORY_AVAILABLE() - 1024;
  5480. if ((SPFN_NUMBER)MaxPages <= 0) {
  5481. SizeInPages = 0;
  5482. }
  5483. else if (SizeInPages > MaxPages) {
  5484. SizeInPages = MaxPages;
  5485. }
  5486. //
  5487. // Systems utilizing memory compression may have more pages on the zero,
  5488. // free and standby lists than we want to give out. Explicitly check
  5489. // MmAvailablePages instead (and recheck whenever the PFN lock is released
  5490. // and reacquired).
  5491. //
  5492. if (SizeInPages > MmAvailablePages) {
  5493. SizeInPages = MmAvailablePages;
  5494. }
  5495. if (SizeInPages == 0) {
  5496. UNLOCK_PFN (OldIrql);
  5497. ExReleaseFastMutex (&MmDynamicMemoryMutex);
  5498. MmUnlockPagableImageSection (ExPageLockHandle);
  5499. ExFreePool (MemoryDescriptorList);
  5500. MiReturnCommitment (MdlPageSpan);
  5501. ExFreePool (ColoredPagesLeftToScanBase);
  5502. return NULL;
  5503. }
  5504. MM_TRACK_COMMIT (MM_DBG_COMMIT_MDL_PAGES, SizeInPages);
  5505. if ((MiLastCallLowPage != LowPage) || (MiLastCallHighPage != HighPage)) {
  5506. MiLastCallColor = 0;
  5507. }
  5508. MiLastCallLowPage = LowPage;
  5509. MiLastCallHighPage = HighPage;
  5510. //
  5511. // Charge resident available pages now for all the pages so the PFN lock
  5512. // can be released between the loops below. Excess charging is returned
  5513. // at the conclusion of the loops.
  5514. //
  5515. MmMdlPagesAllocated += SizeInPages;
  5516. MmResidentAvailablePages -= SizeInPages;
  5517. MM_BUMP_COUNTER(34, SizeInPages);
  5518. do {
  5519. //
  5520. // Grab all zeroed (and then free) pages first directly from the
  5521. // colored lists to avoid multiple walks down these singly linked lists.
  5522. // Then snatch transition pages as needed. In addition to optimizing
  5523. // the speed of the removals this also avoids cannibalizing the page
  5524. // cache unless it's absolutely needed.
  5525. //
  5526. MoreNodePasses = FALSE;
  5527. ColorCount = MmSecondaryColors;
  5528. BaseColor = 0;
  5529. #if defined(MI_MULTINODE)
  5530. if (KeNumberNodes > 1) {
  5531. PKNODE Node;
  5532. Node = KeGetCurrentNode();
  5533. if ((Node->FreeCount[ZeroedPageList]) ||
  5534. (Node->FreeCount[FreePageList])) {
  5535. //
  5536. // There are available pages on this node. Restrict search.
  5537. //
  5538. MoreNodePasses = TRUE;
  5539. ColorCount = MmSecondaryColorMask + 1;
  5540. BaseColor = Node->MmShiftedColor;
  5541. ASSERT(ColorCount == MmSecondaryColors / KeNumberNodes);
  5542. }
  5543. }
  5544. do {
  5545. //
  5546. // Loop: 1st pass restricted to node, 2nd pass unrestricted.
  5547. //
  5548. #endif
  5549. MemoryList = ZeroedPageList;
  5550. do {
  5551. //
  5552. // Scan the zero list and then the free list.
  5553. //
  5554. ASSERT (MemoryList <= FreePageList);
  5555. ListHead = MmPageLocationList[MemoryList];
  5556. //
  5557. // Initialize the loop iteration controls. Clearly pages
  5558. // can be added or removed from the colored lists when we
  5559. // deliberately drop the PFN lock below (just to be a good
  5560. // citizen), but even if we never released the lock, we wouldn't
  5561. // have scanned more than the colorhead count anyway, so
  5562. // this is a much better way to go.
  5563. //
  5564. ColorHeadsDrained = 0;
  5565. ColorHead = &MmFreePagesByColor[MemoryList][BaseColor];
  5566. ColoredPagesLeftToScan = &ColoredPagesLeftToScanBase[BaseColor];
  5567. for (Color = 0; Color < ColorCount; Color += 1) {
  5568. ASSERT (ColorHead->Count <= MmNumberOfPhysicalPages);
  5569. *ColoredPagesLeftToScan = ColorHead->Count;
  5570. if (ColorHead->Count == 0) {
  5571. ColorHeadsDrained += 1;
  5572. }
  5573. ColorHead += 1;
  5574. ColoredPagesLeftToScan += 1;
  5575. }
  5576. Color = MiLastCallColor;
  5577. #if defined(MI_MULTINODE)
  5578. Color = (Color & MmSecondaryColorMask) | BaseColor;
  5579. #endif
  5580. ASSERT (Color < MmSecondaryColors);
  5581. do {
  5582. //
  5583. // Scan the current list by color.
  5584. //
  5585. ColorHead = &MmFreePagesByColor[MemoryList][Color];
  5586. ColoredPagesLeftToScan = &ColoredPagesLeftToScanBase[Color];
  5587. if (MoreNodePasses == FALSE) {
  5588. //
  5589. // Unrestricted search across all colors.
  5590. //
  5591. Color += 1;
  5592. if (Color >= MmSecondaryColors) {
  5593. Color = 0;
  5594. }
  5595. }
  5596. #if defined(MI_MULTINODE)
  5597. else {
  5598. //
  5599. // Restrict first pass searches to current node.
  5600. //
  5601. Color = BaseColor |
  5602. ((Color + 1) & MmSecondaryColorMask);
  5603. }
  5604. #endif
  5605. if (*ColoredPagesLeftToScan == 0) {
  5606. //
  5607. // This colored list has already been completely
  5608. // searched.
  5609. //
  5610. continue;
  5611. }
  5612. if (ColorHead->Flink == MM_EMPTY_LIST) {
  5613. //
  5614. // This colored list is empty.
  5615. //
  5616. ColorHeadsDrained += 1;
  5617. *ColoredPagesLeftToScan = 0;
  5618. continue;
  5619. }
  5620. while (ColorHead->Flink != MM_EMPTY_LIST) {
  5621. ASSERT (*ColoredPagesLeftToScan != 0);
  5622. *ColoredPagesLeftToScan = *ColoredPagesLeftToScan - 1;
  5623. if (*ColoredPagesLeftToScan == 0) {
  5624. ColorHeadsDrained += 1;
  5625. }
  5626. Page = ColorHead->Flink;
  5627. Pfn1 = MI_PFN_ELEMENT(Page);
  5628. ASSERT ((MMLISTS)Pfn1->u3.e1.PageLocation == MemoryList);
  5629. //
  5630. // See if the page is within the caller's page constraints.
  5631. //
  5632. PagePlacementOk = FALSE;
  5633. LowPage1 = LowPage;
  5634. HighPage1 = HighPage;
  5635. do {
  5636. if (((Page >= LowPage1) && (Page <= HighPage1)) &&
  5637. (!MI_PAGE_FRAME_INDEX_MUST_BE_CACHED(Page))) {
  5638. PagePlacementOk = TRUE;
  5639. break;
  5640. }
  5641. if (SkipPages == 0) {
  5642. break;
  5643. }
  5644. LowPage1 += SkipPages;
  5645. HighPage1 += SkipPages;
  5646. if (LowPage1 > MmHighestPhysicalPage) {
  5647. break;
  5648. }
  5649. if (HighPage1 > MmHighestPhysicalPage) {
  5650. HighPage1 = MmHighestPhysicalPage;
  5651. }
  5652. } while (TRUE);
  5653. //
  5654. // The Flink and Blink must be nonzero here for the page
  5655. // to be on the listhead. Only code that scans the
  5656. // MmPhysicalMemoryBlock has to check for the zero case.
  5657. //
  5658. ASSERT (Pfn1->u1.Flink != 0);
  5659. ASSERT (Pfn1->u2.Blink != 0);
  5660. if (PagePlacementOk == FALSE) {
  5661. if (*ColoredPagesLeftToScan == 0) {
  5662. //
  5663. // No more pages to scan in this colored chain.
  5664. //
  5665. break;
  5666. }
  5667. //
  5668. // If the colored list has more than one entry then
  5669. // move this page to the end of this colored list.
  5670. //
  5671. PageNextColored = (PFN_NUMBER)Pfn1->OriginalPte.u.Long;
  5672. if (PageNextColored == MM_EMPTY_LIST) {
  5673. //
  5674. // No more pages in this colored chain.
  5675. //
  5676. *ColoredPagesLeftToScan = 0;
  5677. ColorHeadsDrained += 1;
  5678. break;
  5679. }
  5680. ASSERT (Pfn1->u1.Flink != 0);
  5681. ASSERT (Pfn1->u1.Flink != MM_EMPTY_LIST);
  5682. ASSERT (Pfn1->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  5683. PfnNextColored = MI_PFN_ELEMENT(PageNextColored);
  5684. ASSERT ((MMLISTS)PfnNextColored->u3.e1.PageLocation == MemoryList);
  5685. ASSERT (PfnNextColored->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  5686. //
  5687. // Adjust the free page list so Page
  5688. // follows PageNextFlink.
  5689. //
  5690. PageNextFlink = Pfn1->u1.Flink;
  5691. PfnNextFlink = MI_PFN_ELEMENT(PageNextFlink);
  5692. ASSERT ((MMLISTS)PfnNextFlink->u3.e1.PageLocation == MemoryList);
  5693. ASSERT (PfnNextFlink->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  5694. PfnLastColored = ColorHead->Blink;
  5695. ASSERT (PfnLastColored != (PMMPFN)MM_EMPTY_LIST);
  5696. ASSERT (PfnLastColored->OriginalPte.u.Long == MM_EMPTY_LIST);
  5697. ASSERT (PfnLastColored->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  5698. ASSERT (PfnLastColored->u2.Blink != MM_EMPTY_LIST);
  5699. ASSERT ((MMLISTS)PfnLastColored->u3.e1.PageLocation == MemoryList);
  5700. PageLastColored = PfnLastColored - MmPfnDatabase;
  5701. if (ListHead->Flink == Page) {
  5702. ASSERT (Pfn1->u2.Blink == MM_EMPTY_LIST);
  5703. ASSERT (ListHead->Blink != Page);
  5704. ListHead->Flink = PageNextFlink;
  5705. PfnNextFlink->u2.Blink = MM_EMPTY_LIST;
  5706. }
  5707. else {
  5708. ASSERT (Pfn1->u2.Blink != MM_EMPTY_LIST);
  5709. ASSERT ((MMLISTS)(MI_PFN_ELEMENT((MI_PFN_ELEMENT(Pfn1->u2.Blink)->u1.Flink)))->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  5710. ASSERT ((MMLISTS)(MI_PFN_ELEMENT((MI_PFN_ELEMENT(Pfn1->u2.Blink)->u1.Flink)))->u3.e1.PageLocation == MemoryList);
  5711. MI_PFN_ELEMENT(Pfn1->u2.Blink)->u1.Flink = PageNextFlink;
  5712. PfnNextFlink->u2.Blink = Pfn1->u2.Blink;
  5713. }
  5714. #if DBG
  5715. if (PfnLastColored->u1.Flink == MM_EMPTY_LIST) {
  5716. ASSERT (ListHead->Blink == PageLastColored);
  5717. }
  5718. #endif
  5719. Pfn1->u1.Flink = PfnLastColored->u1.Flink;
  5720. Pfn1->u2.Blink = PageLastColored;
  5721. if (ListHead->Blink == PageLastColored) {
  5722. ListHead->Blink = Page;
  5723. }
  5724. //
  5725. // Adjust the colored chains.
  5726. //
  5727. if (PfnLastColored->u1.Flink != MM_EMPTY_LIST) {
  5728. ASSERT (MI_PFN_ELEMENT(PfnLastColored->u1.Flink)->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  5729. ASSERT ((MMLISTS)(MI_PFN_ELEMENT(PfnLastColored->u1.Flink)->u3.e1.PageLocation) == MemoryList);
  5730. MI_PFN_ELEMENT(PfnLastColored->u1.Flink)->u2.Blink = Page;
  5731. }
  5732. PfnLastColored->u1.Flink = Page;
  5733. ColorHead->Flink = PageNextColored;
  5734. Pfn1->OriginalPte.u.Long = MM_EMPTY_LIST;
  5735. ASSERT (PfnLastColored->OriginalPte.u.Long == MM_EMPTY_LIST);
  5736. PfnLastColored->OriginalPte.u.Long = (ULONG)Page;
  5737. ColorHead->Blink = Pfn1;
  5738. continue;
  5739. }
  5740. found += 1;
  5741. ASSERT (Pfn1->u3.e1.ReadInProgress == 0);
  5742. MiUnlinkFreeOrZeroedPage (Page);
  5743. Pfn1->u3.e2.ReferenceCount = 1;
  5744. Pfn1->u2.ShareCount = 1;
  5745. MI_SET_PFN_DELETED(Pfn1);
  5746. Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  5747. Pfn1->u4.PteFrame = MI_MAGIC_AWE_PTEFRAME;
  5748. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  5749. ASSERT (Pfn1->u3.e1.CacheAttribute == MiNotMapped);
  5750. Pfn1->u3.e1.StartOfAllocation = 1;
  5751. Pfn1->u3.e1.EndOfAllocation = 1;
  5752. Pfn1->u4.VerifierAllocation = 0;
  5753. Pfn1->u3.e1.LargeSessionAllocation = 0;
  5754. *MdlPage = Page;
  5755. MdlPage += 1;
  5756. if (found == SizeInPages) {
  5757. //
  5758. // All the pages requested are available.
  5759. //
  5760. if (MemoryList == ZeroedPageList) {
  5761. MiLastCallColor = Color;
  5762. }
  5763. #if DBG
  5764. FinishedCount = 0;
  5765. for (Color = 0; Color < ColorCount; Color += 1) {
  5766. if (ColoredPagesLeftToScanBase[Color + BaseColor] == 0) {
  5767. FinishedCount += 1;
  5768. }
  5769. }
  5770. ASSERT (FinishedCount == ColorHeadsDrained);
  5771. #endif
  5772. goto pass2_done;
  5773. }
  5774. //
  5775. // March on to the next colored chain so the overall
  5776. // allocation round-robins the page colors.
  5777. //
  5778. break;
  5779. }
  5780. //
  5781. // Release the PFN lock to give DPCs and other processors
  5782. // a chance to run.
  5783. //
  5784. UNLOCK_PFN (OldIrql);
  5785. LOCK_PFN (OldIrql);
  5786. //
  5787. // Systems utilizing memory compression may have more
  5788. // pages on the zero, free and standby lists than we
  5789. // want to give out. Explicitly check MmAvailablePages
  5790. // instead (and recheck whenever the PFN lock is released
  5791. // and reacquired).
  5792. //
  5793. if (MmAvailablePages == 0) {
  5794. goto pass2_done;
  5795. }
  5796. } while (ColorHeadsDrained != ColorCount);
  5797. //
  5798. // Release the PFN lock to give DPCs and other processors
  5799. // a chance to run. Nothing magic about the instructions
  5800. // between the unlock and the relock.
  5801. //
  5802. UNLOCK_PFN (OldIrql);
  5803. #if DBG
  5804. FinishedCount = 0;
  5805. for (Color = 0; Color < ColorCount; Color += 1) {
  5806. if (ColoredPagesLeftToScanBase[Color + BaseColor] == 0) {
  5807. FinishedCount += 1;
  5808. }
  5809. }
  5810. ASSERT (FinishedCount == ColorHeadsDrained);
  5811. #endif
  5812. if (MemoryList == ZeroedPageList) {
  5813. ZeroRunStart[RunsToZero] = MdlPage;
  5814. RunsToZero += 1;
  5815. }
  5816. else {
  5817. ZeroRunEnd[RunsToZero - 1] = MdlPage;
  5818. }
  5819. MemoryList += 1;
  5820. if (MemoryList > FreePageList) {
  5821. break;
  5822. }
  5823. LOCK_PFN (OldIrql);
  5824. //
  5825. // Systems utilizing memory compression may have more
  5826. // pages on the zero, free and standby lists than we
  5827. // want to give out. Explicitly check MmAvailablePages
  5828. // instead (and recheck whenever the PFN lock is released
  5829. // and reacquired).
  5830. //
  5831. if (MmAvailablePages == 0) {
  5832. goto pass2_done;
  5833. }
  5834. MiLastCallColor = 0;
  5835. } while (TRUE);
  5836. #if defined(MI_MULTINODE)
  5837. if (MoreNodePasses == FALSE) {
  5838. break;
  5839. }
  5840. //
  5841. // Expand range to all colors for next pass.
  5842. //
  5843. ColorCount = MmSecondaryColors;
  5844. BaseColor = 0;
  5845. MoreNodePasses = FALSE;
  5846. LOCK_PFN (OldIrql);
  5847. //
  5848. // Systems utilizing memory compression may have more
  5849. // pages on the zero, free and standby lists than we
  5850. // want to give out. Explicitly check MmAvailablePages
  5851. // instead (and recheck whenever the PFN lock is released
  5852. // and reacquired).
  5853. //
  5854. if (MmAvailablePages == 0) {
  5855. goto pass2_done;
  5856. }
  5857. } while (TRUE);
  5858. #endif
  5859. //
  5860. // Walk the transition list looking for pages satisfying the
  5861. // constraints as walking the physical memory block can be draining.
  5862. //
  5863. LOCK_PFN (OldIrql);
  5864. count = MmStandbyPageListHead.Total;
  5865. Page = MmStandbyPageListHead.Flink;
  5866. while (count != 0) {
  5867. LowPage1 = LowPage;
  5868. HighPage1 = HighPage;
  5869. PagePlacementOk = FALSE;
  5870. Pfn1 = MI_PFN_ELEMENT (Page);
  5871. do {
  5872. if (((Page >= LowPage1) && (Page <= HighPage1)) &&
  5873. (!MI_PAGE_FRAME_INDEX_MUST_BE_CACHED(Page))) {
  5874. ASSERT (Pfn1->u3.e1.ReadInProgress == 0);
  5875. //
  5876. // Systems utilizing memory compression may have more
  5877. // pages on the zero, free and standby lists than we
  5878. // want to give out. Explicitly check MmAvailablePages
  5879. // instead (and recheck whenever the PFN lock is released
  5880. // and reacquired).
  5881. //
  5882. if (MmAvailablePages == 0) {
  5883. goto pass2_done;
  5884. }
  5885. found += 1;
  5886. //
  5887. // This page is in the desired range - grab it.
  5888. //
  5889. NextPage = Pfn1->u1.Flink;
  5890. MiUnlinkPageFromList (Pfn1);
  5891. MiRestoreTransitionPte (Page);
  5892. Pfn1->u3.e2.ReferenceCount = 1;
  5893. Pfn1->u2.ShareCount = 1;
  5894. MI_SET_PFN_DELETED(Pfn1);
  5895. Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  5896. Pfn1->u4.PteFrame = MI_MAGIC_AWE_PTEFRAME;
  5897. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  5898. ASSERT (Pfn1->u3.e1.CacheAttribute == MiNotMapped);
  5899. Pfn1->u3.e1.StartOfAllocation = 1;
  5900. Pfn1->u3.e1.EndOfAllocation = 1;
  5901. Pfn1->u4.VerifierAllocation = 0;
  5902. Pfn1->u3.e1.LargeSessionAllocation = 0;
  5903. *MdlPage = Page;
  5904. MdlPage += 1;
  5905. if (found == SizeInPages) {
  5906. //
  5907. // All the pages requested are available.
  5908. //
  5909. goto pass2_done;
  5910. }
  5911. PagePlacementOk = TRUE;
  5912. Page = NextPage;
  5913. break;
  5914. }
  5915. if (SkipPages == 0) {
  5916. break;
  5917. }
  5918. LowPage1 += SkipPages;
  5919. HighPage1 += SkipPages;
  5920. if (LowPage1 > MmHighestPhysicalPage) {
  5921. break;
  5922. }
  5923. if (HighPage1 > MmHighestPhysicalPage) {
  5924. HighPage1 = MmHighestPhysicalPage;
  5925. }
  5926. } while (TRUE);
  5927. if (PagePlacementOk == FALSE) {
  5928. Page = Pfn1->u1.Flink;
  5929. }
  5930. count -= 1;
  5931. }
  5932. UNLOCK_PFN (OldIrql);
  5933. if (SkipPages == 0) {
  5934. LOCK_PFN (OldIrql);
  5935. break;
  5936. }
  5937. LowPage += SkipPages;
  5938. HighPage += SkipPages;
  5939. if (LowPage > MmHighestPhysicalPage) {
  5940. LOCK_PFN (OldIrql);
  5941. break;
  5942. }
  5943. if (HighPage > MmHighestPhysicalPage) {
  5944. HighPage = MmHighestPhysicalPage;
  5945. }
  5946. //
  5947. // Reinitialize the zeroed list variable in preparation
  5948. // for another loop.
  5949. //
  5950. MemoryList = ZeroedPageList;
  5951. LOCK_PFN (OldIrql);
  5952. //
  5953. // Systems utilizing memory compression may have more
  5954. // pages on the zero, free and standby lists than we
  5955. // want to give out. Explicitly check MmAvailablePages
  5956. // instead (and recheck whenever the PFN lock is released
  5957. // and reacquired).
  5958. //
  5959. } while (MmAvailablePages != 0);
  5960. pass2_done:
  5961. //
  5962. // The full amount was charged up front - remove any excess now.
  5963. //
  5964. MmMdlPagesAllocated -= (SizeInPages - found);
  5965. MmResidentAvailablePages += (SizeInPages - found);
  5966. MM_BUMP_COUNTER(38, SizeInPages - found);
  5967. UNLOCK_PFN (OldIrql);
  5968. ExReleaseFastMutex (&MmDynamicMemoryMutex);
  5969. MmUnlockPagableImageSection (ExPageLockHandle);
  5970. ExFreePool (ColoredPagesLeftToScanBase);
  5971. if (found != MdlPageSpan) {
  5972. ASSERT (found < MdlPageSpan);
  5973. MiReturnCommitment (MdlPageSpan - found);
  5974. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_AWE_EXCESS, MdlPageSpan - found);
  5975. }
  5976. if (found == 0) {
  5977. ExFreePool (MemoryDescriptorList);
  5978. return NULL;
  5979. }
  5980. MemoryDescriptorList->ByteCount = (ULONG)(found << PAGE_SHIFT);
  5981. if (found != SizeInPages) {
  5982. *MdlPage = MM_EMPTY_LIST;
  5983. }
  5984. //
  5985. // If the number of pages allocated was substantially less than the
  5986. // initial request amount, attempt to allocate a smaller MDL to save
  5987. // pool.
  5988. //
  5989. if ((MdlPageSpan - found) > ((4 * PAGE_SIZE) / sizeof (PFN_NUMBER))) {
  5990. MemoryDescriptorList2 = MmCreateMdl ((PMDL)0,
  5991. (PVOID)0,
  5992. found << PAGE_SHIFT);
  5993. if (MemoryDescriptorList2 != (PMDL)0) {
  5994. ULONG n;
  5995. PFN_NUMBER Diff;
  5996. RtlCopyMemory ((PVOID)(MemoryDescriptorList2 + 1),
  5997. (PVOID)(MemoryDescriptorList + 1),
  5998. found * sizeof (PFN_NUMBER));
  5999. Diff = (PPFN_NUMBER)(MemoryDescriptorList2 + 1) -
  6000. (PPFN_NUMBER)(MemoryDescriptorList + 1);
  6001. for (n = 0; n < RunsToZero; n += 1) {
  6002. ZeroRunStart[n] += Diff;
  6003. ZeroRunEnd[n] += Diff;
  6004. }
  6005. ExFreePool (MemoryDescriptorList);
  6006. MemoryDescriptorList = MemoryDescriptorList2;
  6007. }
  6008. }
  6009. MdlPage = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  6010. LastMdlPage = MdlPage + found;
  6011. #if DBG
  6012. //
  6013. // Ensure all pages are within the caller's page constraints.
  6014. //
  6015. LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
  6016. HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
  6017. while (MdlPage < LastMdlPage) {
  6018. Page = *MdlPage;
  6019. PagePlacementOk = FALSE;
  6020. LowPage1 = LowPage;
  6021. HighPage1 = HighPage;
  6022. do {
  6023. if ((Page >= LowPage1) && (Page <= HighPage1)) {
  6024. PagePlacementOk = TRUE;
  6025. break;
  6026. }
  6027. if (SkipPages == 0) {
  6028. break;
  6029. }
  6030. LowPage1 += SkipPages;
  6031. HighPage1 += SkipPages;
  6032. if (LowPage1 > MmHighestPhysicalPage) {
  6033. break;
  6034. }
  6035. if (HighPage1 > MmHighestPhysicalPage) {
  6036. HighPage1 = MmHighestPhysicalPage;
  6037. }
  6038. } while (TRUE);
  6039. ASSERT (PagePlacementOk == TRUE);
  6040. Pfn1 = MI_PFN_ELEMENT(*MdlPage);
  6041. ASSERT (Pfn1->u4.PteFrame == MI_MAGIC_AWE_PTEFRAME);
  6042. MdlPage += 1;
  6043. }
  6044. MdlPage = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  6045. ASSERT(RunsToZero <= 2);
  6046. #endif
  6047. //
  6048. // Zero any pages that were allocated from the free or standby lists.
  6049. //
  6050. if (RunsToZero) {
  6051. //
  6052. // Lengthen the last run to include the standby pages.
  6053. //
  6054. ZeroRunEnd[RunsToZero - 1] = LastMdlPage;
  6055. while (RunsToZero != 0) {
  6056. RunsToZero -= 1;
  6057. for (MdlPage = ZeroRunStart[RunsToZero];
  6058. MdlPage < ZeroRunEnd[RunsToZero];
  6059. MdlPage += 1) {
  6060. MiZeroPhysicalPage (*MdlPage, 0);
  6061. }
  6062. }
  6063. }
  6064. //
  6065. // Mark the MDL's pages as locked so the the kernelmode caller can
  6066. // map the MDL using MmMapLockedPages* without asserting.
  6067. //
  6068. MemoryDescriptorList->MdlFlags |= MDL_PAGES_LOCKED;
  6069. return MemoryDescriptorList;
  6070. }
  6071. VOID
  6072. MmFreePagesFromMdl (
  6073. IN PMDL MemoryDescriptorList
  6074. )
  6075. /*++
  6076. Routine Description:
  6077. This routine walks the argument MDL freeing each physical page back to
  6078. the PFN database. This is designed to free pages acquired via
  6079. MmAllocatePagesForMdl only.
  6080. Arguments:
  6081. MemoryDescriptorList - Supplies an MDL which contains the pages to be freed.
  6082. Return Value:
  6083. None.
  6084. Environment:
  6085. Kernel mode, IRQL of APC_LEVEL or below.
  6086. --*/
  6087. {
  6088. PMMPFN Pfn1;
  6089. KIRQL OldIrql;
  6090. PVOID StartingAddress;
  6091. PVOID AlignedVa;
  6092. PPFN_NUMBER Page;
  6093. PFN_NUMBER NumberOfPages;
  6094. PFN_NUMBER TotalPages;
  6095. PFN_NUMBER DeltaPages;
  6096. ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
  6097. MmLockPagableSectionByHandle (ExPageLockHandle);
  6098. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  6099. ASSERT ((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0);
  6100. ASSERT (((ULONG_PTR)MemoryDescriptorList->StartVa & (PAGE_SIZE - 1)) == 0);
  6101. AlignedVa = (PVOID)MemoryDescriptorList->StartVa;
  6102. StartingAddress = (PVOID)((PCHAR)AlignedVa +
  6103. MemoryDescriptorList->ByteOffset);
  6104. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartingAddress,
  6105. MemoryDescriptorList->ByteCount);
  6106. TotalPages = NumberOfPages;
  6107. //
  6108. // Notify deadlock verifier that a region that can contain locks
  6109. // will become invalid.
  6110. //
  6111. if (MmVerifierData.Level & DRIVER_VERIFIER_DEADLOCK_DETECTION) {
  6112. VerifierDeadlockFreePool (StartingAddress, TotalPages << PAGE_SHIFT);
  6113. }
  6114. MI_MAKING_MULTIPLE_PTES_INVALID (TRUE);
  6115. LOCK_PFN (OldIrql);
  6116. do {
  6117. if (*Page == MM_EMPTY_LIST) {
  6118. //
  6119. // There are no more locked pages.
  6120. //
  6121. break;
  6122. }
  6123. ASSERT (*Page <= MmHighestPhysicalPage);
  6124. Pfn1 = MI_PFN_ELEMENT (*Page);
  6125. ASSERT (Pfn1->u2.ShareCount == 1);
  6126. ASSERT (MI_IS_PFN_DELETED (Pfn1) == TRUE);
  6127. ASSERT (MI_PFN_IS_AWE (Pfn1) == TRUE);
  6128. ASSERT (Pfn1->u4.PteFrame == MI_MAGIC_AWE_PTEFRAME);
  6129. Pfn1->u3.e1.StartOfAllocation = 0;
  6130. Pfn1->u3.e1.EndOfAllocation = 0;
  6131. Pfn1->u2.ShareCount = 0;
  6132. #if DBG
  6133. Pfn1->u4.PteFrame -= 1;
  6134. Pfn1->u3.e1.PageLocation = StandbyPageList;
  6135. #endif
  6136. MiDecrementReferenceCountInline (Pfn1, *Page);
  6137. *Page++ = MM_EMPTY_LIST;
  6138. //
  6139. // Nothing magic about the divisor here - just releasing the PFN lock
  6140. // periodically to allow other processors and DPCs a chance to execute.
  6141. //
  6142. if ((NumberOfPages & 0xFF) == 0) {
  6143. DeltaPages = TotalPages - NumberOfPages;
  6144. MmMdlPagesAllocated -= DeltaPages;
  6145. MmResidentAvailablePages += DeltaPages;
  6146. MM_BUMP_COUNTER(35, DeltaPages);
  6147. UNLOCK_PFN (OldIrql);
  6148. MiReturnCommitment (DeltaPages);
  6149. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_MDL_PAGES, DeltaPages);
  6150. TotalPages -= DeltaPages;
  6151. LOCK_PFN (OldIrql);
  6152. }
  6153. NumberOfPages -= 1;
  6154. } while (NumberOfPages != 0);
  6155. MmMdlPagesAllocated -= TotalPages;
  6156. MmResidentAvailablePages += TotalPages;
  6157. MM_BUMP_COUNTER(35, TotalPages);
  6158. UNLOCK_PFN (OldIrql);
  6159. MmUnlockPagableImageSection (ExPageLockHandle);
  6160. MiReturnCommitment (TotalPages);
  6161. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_MDL_PAGES, TotalPages);
  6162. MemoryDescriptorList->MdlFlags &= ~MDL_PAGES_LOCKED;
  6163. }
  6164. NTSTATUS
  6165. MmMapUserAddressesToPage (
  6166. IN PVOID BaseAddress,
  6167. IN SIZE_T NumberOfBytes,
  6168. IN PVOID PageAddress
  6169. )
  6170. /*++
  6171. Routine Description:
  6172. This function maps a range of addresses in a physical memory VAD to the
  6173. specified page address. This is typically used by a driver to nicely
  6174. remove an application's access to things like video memory when the
  6175. application is not responding to requests to relinquish it.
  6176. Note the entire range must be currently mapped (ie, all the PTEs must
  6177. be valid) by the caller.
  6178. Arguments:
  6179. BaseAddress - Supplies the base virtual address where the physical
  6180. address is mapped.
  6181. NumberOfBytes - Supplies the number of bytes to remap to the new address.
  6182. PageAddress - Supplies the virtual address of the page this is remapped to.
  6183. This must be nonpaged memory.
  6184. Return Value:
  6185. Various NTSTATUS codes.
  6186. Environment:
  6187. Kernel mode, IRQL of APC_LEVEL or below.
  6188. --*/
  6189. {
  6190. PMMVAD Vad;
  6191. PMMPTE PointerPte;
  6192. MMPTE PteContents;
  6193. PMMPTE LastPte;
  6194. PEPROCESS Process;
  6195. NTSTATUS Status;
  6196. PVOID EndingAddress;
  6197. PFN_NUMBER PageFrameNumber;
  6198. SIZE_T NumberOfPtes;
  6199. PHYSICAL_ADDRESS PhysicalAddress;
  6200. KIRQL OldIrql;
  6201. PAGED_CODE();
  6202. if (BaseAddress > MM_HIGHEST_USER_ADDRESS) {
  6203. return STATUS_INVALID_PARAMETER_1;
  6204. }
  6205. if ((ULONG_PTR)BaseAddress + NumberOfBytes > (ULONG64)MM_HIGHEST_USER_ADDRESS) {
  6206. return STATUS_INVALID_PARAMETER_2;
  6207. }
  6208. Process = PsGetCurrentProcess();
  6209. EndingAddress = (PVOID)((PCHAR)BaseAddress + NumberOfBytes - 1);
  6210. LOCK_ADDRESS_SPACE (Process);
  6211. //
  6212. // Make sure the address space was not deleted.
  6213. //
  6214. if (Process->Flags & PS_PROCESS_FLAGS_VM_DELETED) {
  6215. Status = STATUS_PROCESS_IS_TERMINATING;
  6216. goto ErrorReturn;
  6217. }
  6218. Vad = (PMMVAD)MiLocateAddress (BaseAddress);
  6219. if (Vad == NULL) {
  6220. //
  6221. // No virtual address descriptor located.
  6222. //
  6223. Status = STATUS_MEMORY_NOT_ALLOCATED;
  6224. goto ErrorReturn;
  6225. }
  6226. if (NumberOfBytes == 0) {
  6227. //
  6228. // If the region size is specified as 0, the base address
  6229. // must be the starting address for the region. The entire VAD
  6230. // will then be repointed.
  6231. //
  6232. if (MI_VA_TO_VPN (BaseAddress) != Vad->StartingVpn) {
  6233. Status = STATUS_FREE_VM_NOT_AT_BASE;
  6234. goto ErrorReturn;
  6235. }
  6236. BaseAddress = MI_VPN_TO_VA (Vad->StartingVpn);
  6237. EndingAddress = MI_VPN_TO_VA_ENDING (Vad->EndingVpn);
  6238. NumberOfBytes = (PCHAR)EndingAddress - (PCHAR)BaseAddress + 1;
  6239. }
  6240. //
  6241. // Found the associated virtual address descriptor.
  6242. //
  6243. if (Vad->EndingVpn < MI_VA_TO_VPN (EndingAddress)) {
  6244. //
  6245. // The entire range to remap is not contained within a single
  6246. // virtual address descriptor. Return an error.
  6247. //
  6248. Status = STATUS_INVALID_PARAMETER_2;
  6249. goto ErrorReturn;
  6250. }
  6251. if (Vad->u.VadFlags.PhysicalMapping == 0) {
  6252. //
  6253. // The virtual address descriptor is not a physical mapping.
  6254. //
  6255. Status = STATUS_INVALID_ADDRESS;
  6256. goto ErrorReturn;
  6257. }
  6258. PointerPte = MiGetPteAddress (BaseAddress);
  6259. LastPte = MiGetPteAddress (EndingAddress);
  6260. NumberOfPtes = LastPte - PointerPte + 1;
  6261. //
  6262. // Lock down because the PFN lock is going to be acquired shortly.
  6263. //
  6264. MmLockPagableSectionByHandle(ExPageLockHandle);
  6265. LOCK_WS_UNSAFE (Process);
  6266. PhysicalAddress = MmGetPhysicalAddress (PageAddress);
  6267. PageFrameNumber = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
  6268. PteContents = *PointerPte;
  6269. PteContents.u.Hard.PageFrameNumber = PageFrameNumber;
  6270. #if DBG
  6271. //
  6272. // All the PTEs must be valid or the filling will corrupt the
  6273. // UsedPageTableCounts.
  6274. //
  6275. do {
  6276. ASSERT (PointerPte->u.Hard.Valid == 1);
  6277. PointerPte += 1;
  6278. } while (PointerPte < LastPte);
  6279. PointerPte = MiGetPteAddress (BaseAddress);
  6280. #endif
  6281. //
  6282. // Fill the PTEs and flush at the end - no race here because it doesn't
  6283. // matter whether the user app sees the old or the new data until we
  6284. // return (writes going to either page is acceptable prior to return
  6285. // from this function). There is no race with I/O and ProbeAndLockPages
  6286. // because the PFN lock is acquired here.
  6287. //
  6288. LOCK_PFN (OldIrql);
  6289. #if !defined (_X86PAE_)
  6290. MiFillMemoryPte (PointerPte,
  6291. NumberOfPtes * sizeof (MMPTE),
  6292. PteContents.u.Long);
  6293. #else
  6294. //
  6295. // Note that the PAE architecture must very carefully fill these PTEs.
  6296. //
  6297. do {
  6298. ASSERT (PointerPte->u.Hard.Valid == 1);
  6299. PointerPte += 1;
  6300. (VOID)KeInterlockedSwapPte ((PHARDWARE_PTE)PointerPte,
  6301. (PHARDWARE_PTE)&PteContents);
  6302. } while (PointerPte < LastPte);
  6303. PointerPte = MiGetPteAddress (BaseAddress);
  6304. #endif
  6305. if (NumberOfPtes == 1) {
  6306. (VOID)KeFlushSingleTb (BaseAddress,
  6307. TRUE,
  6308. TRUE,
  6309. (PHARDWARE_PTE)PointerPte,
  6310. PteContents.u.Flush);
  6311. }
  6312. else {
  6313. KeFlushEntireTb (TRUE, TRUE);
  6314. }
  6315. UNLOCK_PFN (OldIrql);
  6316. UNLOCK_WS_UNSAFE (Process);
  6317. MmUnlockPagableImageSection (ExPageLockHandle);
  6318. Status = STATUS_SUCCESS;
  6319. ErrorReturn:
  6320. UNLOCK_ADDRESS_SPACE (Process);
  6321. return Status;
  6322. }
  6323. PHYSICAL_ADDRESS
  6324. MmGetPhysicalAddress (
  6325. IN PVOID BaseAddress
  6326. )
  6327. /*++
  6328. Routine Description:
  6329. This function returns the corresponding physical address for a
  6330. valid virtual address.
  6331. Arguments:
  6332. BaseAddress - Supplies the virtual address for which to return the
  6333. physical address.
  6334. Return Value:
  6335. Returns the corresponding physical address.
  6336. Environment:
  6337. Kernel mode. Any IRQL level.
  6338. --*/
  6339. {
  6340. PMMPTE PointerPte;
  6341. PHYSICAL_ADDRESS PhysicalAddress;
  6342. if (MI_IS_PHYSICAL_ADDRESS(BaseAddress)) {
  6343. PhysicalAddress.QuadPart = MI_CONVERT_PHYSICAL_TO_PFN (BaseAddress);
  6344. }
  6345. else {
  6346. PointerPte = MiGetPdeAddress (BaseAddress);
  6347. if (PointerPte->u.Hard.Valid == 0) {
  6348. KdPrint(("MM:MmGetPhysicalAddressFailed base address was %p",
  6349. BaseAddress));
  6350. ZERO_LARGE (PhysicalAddress);
  6351. return PhysicalAddress;
  6352. }
  6353. PointerPte = MiGetPteAddress(BaseAddress);
  6354. if (PointerPte->u.Hard.Valid == 0) {
  6355. KdPrint(("MM:MmGetPhysicalAddressFailed base address was %p",
  6356. BaseAddress));
  6357. ZERO_LARGE (PhysicalAddress);
  6358. return PhysicalAddress;
  6359. }
  6360. PhysicalAddress.QuadPart = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  6361. }
  6362. PhysicalAddress.QuadPart = PhysicalAddress.QuadPart << PAGE_SHIFT;
  6363. PhysicalAddress.LowPart += BYTE_OFFSET(BaseAddress);
  6364. return PhysicalAddress;
  6365. }
  6366. PVOID
  6367. MmGetVirtualForPhysical (
  6368. IN PHYSICAL_ADDRESS PhysicalAddress
  6369. )
  6370. /*++
  6371. Routine Description:
  6372. This function returns the corresponding virtual address for a physical
  6373. address whose primary virtual address is in system space.
  6374. Arguments:
  6375. PhysicalAddress - Supplies the physical address for which to return the
  6376. virtual address.
  6377. Return Value:
  6378. Returns the corresponding virtual address.
  6379. Environment:
  6380. Kernel mode. Any IRQL level.
  6381. --*/
  6382. {
  6383. PFN_NUMBER PageFrameIndex;
  6384. PMMPFN Pfn;
  6385. PageFrameIndex = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
  6386. Pfn = MI_PFN_ELEMENT (PageFrameIndex);
  6387. return (PVOID)((PCHAR)MiGetVirtualAddressMappedByPte (Pfn->PteAddress) +
  6388. BYTE_OFFSET (PhysicalAddress.LowPart));
  6389. }
  6390. //
  6391. // Nonpaged helper routine.
  6392. //
  6393. VOID
  6394. MiMarkMdlPageAttributes (
  6395. IN PMDL Mdl,
  6396. IN PFN_NUMBER NumberOfPages,
  6397. IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute
  6398. )
  6399. {
  6400. PMMPFN Pfn1;
  6401. PFN_NUMBER PageFrameIndex;
  6402. PPFN_NUMBER Page;
  6403. Page = (PPFN_NUMBER)(Mdl + 1);
  6404. do {
  6405. PageFrameIndex = *Page;
  6406. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  6407. ASSERT (Pfn1->u3.e1.CacheAttribute == MiNotMapped);
  6408. Pfn1->u3.e1.CacheAttribute = CacheAttribute;
  6409. Page += 1;
  6410. NumberOfPages -= 1;
  6411. } while (NumberOfPages != 0);
  6412. }
  6413. PVOID
  6414. MmAllocateNonCachedMemory (
  6415. IN SIZE_T NumberOfBytes
  6416. )
  6417. /*++
  6418. Routine Description:
  6419. This function allocates a range of noncached memory in
  6420. the non-paged portion of the system address space.
  6421. This routine is designed to be used by a driver's initialization
  6422. routine to allocate a noncached block of virtual memory for
  6423. various device specific buffers.
  6424. Arguments:
  6425. NumberOfBytes - Supplies the number of bytes to allocate.
  6426. Return Value:
  6427. NON-NULL - Returns a pointer (virtual address in the nonpaged portion
  6428. of the system) to the allocated physically contiguous
  6429. memory.
  6430. NULL - The specified request could not be satisfied.
  6431. Environment:
  6432. Kernel mode, IRQL of APC_LEVEL or below.
  6433. --*/
  6434. {
  6435. PPFN_NUMBER Page;
  6436. PMMPTE PointerPte;
  6437. MMPTE TempPte;
  6438. PFN_NUMBER NumberOfPages;
  6439. PFN_NUMBER PageFrameIndex;
  6440. PMDL Mdl;
  6441. PVOID BaseAddress;
  6442. PHYSICAL_ADDRESS LowAddress;
  6443. PHYSICAL_ADDRESS HighAddress;
  6444. PHYSICAL_ADDRESS SkipBytes;
  6445. PFN_NUMBER NumberOfPagesAllocated;
  6446. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  6447. ASSERT (NumberOfBytes != 0);
  6448. #if defined (_WIN64)
  6449. //
  6450. // Maximum allocation size is constrained by the MDL ByteCount field.
  6451. //
  6452. if (NumberOfBytes >= _4gb) {
  6453. return NULL;
  6454. }
  6455. #endif
  6456. NumberOfPages = BYTES_TO_PAGES(NumberOfBytes);
  6457. //
  6458. // Even though an MDL is not needed per se, it is much more convenient
  6459. // to use the routine below because it checks for things like appropriate
  6460. // cachability of the pages, etc. Note that the MDL returned may map
  6461. // fewer pages than requested - check for this and if so, return NULL.
  6462. //
  6463. LowAddress.QuadPart = 0;
  6464. HighAddress.QuadPart = (ULONGLONG)-1;
  6465. SkipBytes.QuadPart = 0;
  6466. Mdl = MmAllocatePagesForMdl (LowAddress,
  6467. HighAddress,
  6468. SkipBytes,
  6469. NumberOfBytes);
  6470. if (Mdl == NULL) {
  6471. return NULL;
  6472. }
  6473. BaseAddress = (PVOID)((PCHAR)Mdl->StartVa + Mdl->ByteOffset);
  6474. NumberOfPagesAllocated = ADDRESS_AND_SIZE_TO_SPAN_PAGES (BaseAddress, Mdl->ByteCount);
  6475. if (NumberOfPages != NumberOfPagesAllocated) {
  6476. ASSERT (NumberOfPages > NumberOfPagesAllocated);
  6477. MmFreePagesFromMdl (Mdl);
  6478. ExFreePool (Mdl);
  6479. return NULL;
  6480. }
  6481. //
  6482. // Obtain enough virtual space to map the pages. Add an extra PTE so the
  6483. // MDL can be stashed now and retrieved on release.
  6484. //
  6485. PointerPte = MiReserveSystemPtes ((ULONG)NumberOfPages + 1, SystemPteSpace);
  6486. if (PointerPte == NULL) {
  6487. MmFreePagesFromMdl (Mdl);
  6488. ExFreePool (Mdl);
  6489. return NULL;
  6490. }
  6491. *(PMDL *)PointerPte = Mdl;
  6492. PointerPte += 1;
  6493. BaseAddress = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte);
  6494. Page = (PPFN_NUMBER)(Mdl + 1);
  6495. MI_MAKE_VALID_PTE (TempPte,
  6496. 0,
  6497. MM_READWRITE,
  6498. PointerPte);
  6499. MI_SET_PTE_DIRTY (TempPte);
  6500. CacheAttribute = MI_TRANSLATE_CACHETYPE (MmNonCached, FALSE);
  6501. switch (CacheAttribute) {
  6502. case MiNonCached:
  6503. MI_DISABLE_CACHING (TempPte);
  6504. break;
  6505. case MiCached:
  6506. break;
  6507. case MiWriteCombined:
  6508. MI_SET_PTE_WRITE_COMBINE (TempPte);
  6509. break;
  6510. default:
  6511. ASSERT (FALSE);
  6512. break;
  6513. }
  6514. MI_PREPARE_FOR_NONCACHED (CacheAttribute);
  6515. do {
  6516. ASSERT (PointerPte->u.Hard.Valid == 0);
  6517. PageFrameIndex = *Page;
  6518. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  6519. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  6520. Page += 1;
  6521. PointerPte += 1;
  6522. NumberOfPages -= 1;
  6523. } while (NumberOfPages != 0);
  6524. MI_SWEEP_CACHE (CacheAttribute, BaseAddress, NumberOfBytes);
  6525. MiMarkMdlPageAttributes (Mdl, NumberOfPagesAllocated, CacheAttribute);
  6526. return BaseAddress;
  6527. }
  6528. VOID
  6529. MmFreeNonCachedMemory (
  6530. IN PVOID BaseAddress,
  6531. IN SIZE_T NumberOfBytes
  6532. )
  6533. /*++
  6534. Routine Description:
  6535. This function deallocates a range of noncached memory in
  6536. the non-paged portion of the system address space.
  6537. Arguments:
  6538. BaseAddress - Supplies the base virtual address where the noncached
  6539. memory resides.
  6540. NumberOfBytes - Supplies the number of bytes allocated to the request.
  6541. This must be the same number that was obtained with
  6542. the MmAllocateNonCachedMemory call.
  6543. Return Value:
  6544. None.
  6545. Environment:
  6546. Kernel mode, IRQL of APC_LEVEL or below.
  6547. --*/
  6548. {
  6549. PMDL Mdl;
  6550. PMMPTE PointerPte;
  6551. PFN_NUMBER NumberOfPages;
  6552. #if DBG
  6553. PFN_NUMBER i;
  6554. PVOID StartingAddress;
  6555. #endif
  6556. ASSERT (NumberOfBytes != 0);
  6557. ASSERT (PAGE_ALIGN (BaseAddress) == BaseAddress);
  6558. MI_MAKING_MULTIPLE_PTES_INVALID (TRUE);
  6559. NumberOfPages = BYTES_TO_PAGES(NumberOfBytes);
  6560. PointerPte = MiGetPteAddress (BaseAddress);
  6561. Mdl = *(PMDL *)(PointerPte - 1);
  6562. #if DBG
  6563. StartingAddress = (PVOID)((PCHAR)Mdl->StartVa + Mdl->ByteOffset);
  6564. i = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingAddress, Mdl->ByteCount);
  6565. ASSERT (NumberOfPages == i);
  6566. #endif
  6567. MmFreePagesFromMdl (Mdl);
  6568. ExFreePool (Mdl);
  6569. MiReleaseSystemPtes (PointerPte - 1,
  6570. (ULONG)NumberOfPages + 1,
  6571. SystemPteSpace);
  6572. return;
  6573. }
  6574. SIZE_T
  6575. MmSizeOfMdl (
  6576. IN PVOID Base,
  6577. IN SIZE_T Length
  6578. )
  6579. /*++
  6580. Routine Description:
  6581. This function returns the number of bytes required for an MDL for a
  6582. given buffer and size.
  6583. Arguments:
  6584. Base - Supplies the base virtual address for the buffer.
  6585. Length - Supplies the size of the buffer in bytes.
  6586. Return Value:
  6587. Returns the number of bytes required to contain the MDL.
  6588. Environment:
  6589. Kernel mode. Any IRQL level.
  6590. --*/
  6591. {
  6592. return( sizeof( MDL ) +
  6593. (ADDRESS_AND_SIZE_TO_SPAN_PAGES( Base, Length ) *
  6594. sizeof( PFN_NUMBER ))
  6595. );
  6596. }
  6597. PMDL
  6598. MmCreateMdl (
  6599. IN PMDL MemoryDescriptorList OPTIONAL,
  6600. IN PVOID Base,
  6601. IN SIZE_T Length
  6602. )
  6603. /*++
  6604. Routine Description:
  6605. This function optionally allocates and initializes an MDL.
  6606. Arguments:
  6607. MemoryDescriptorList - Optionally supplies the address of the MDL
  6608. to initialize. If this address is supplied as NULL
  6609. an MDL is allocated from non-paged pool and
  6610. initialized.
  6611. Base - Supplies the base virtual address for the buffer.
  6612. Length - Supplies the size of the buffer in bytes.
  6613. Return Value:
  6614. Returns the address of the initialized MDL.
  6615. Environment:
  6616. Kernel mode, IRQL of DISPATCH_LEVEL or below.
  6617. --*/
  6618. {
  6619. SIZE_T MdlSize;
  6620. #if defined (_WIN64)
  6621. //
  6622. // Since the Length has to fit into the MDL's ByteCount field, ensure it
  6623. // doesn't wrap on 64-bit systems.
  6624. //
  6625. if (Length >= _4gb) {
  6626. return NULL;
  6627. }
  6628. #endif
  6629. MdlSize = MmSizeOfMdl (Base, Length);
  6630. if (!ARGUMENT_PRESENT(MemoryDescriptorList)) {
  6631. MemoryDescriptorList = (PMDL)ExAllocatePoolWithTag (NonPagedPool,
  6632. MdlSize,
  6633. 'ldmM');
  6634. if (MemoryDescriptorList == (PMDL)0) {
  6635. return NULL;
  6636. }
  6637. }
  6638. MmInitializeMdl (MemoryDescriptorList, Base, Length);
  6639. return MemoryDescriptorList;
  6640. }
  6641. BOOLEAN
  6642. MmSetAddressRangeModified (
  6643. IN PVOID Address,
  6644. IN SIZE_T Length
  6645. )
  6646. /*++
  6647. Routine Description:
  6648. This routine sets the modified bit in the PFN database for the
  6649. pages that correspond to the specified address range.
  6650. Note that the dirty bit in the PTE is cleared by this operation.
  6651. Arguments:
  6652. Address - Supplies the address of the start of the range. This
  6653. range must reside within the system cache.
  6654. Length - Supplies the length of the range.
  6655. Return Value:
  6656. TRUE if at least one PTE was dirty in the range, FALSE otherwise.
  6657. Environment:
  6658. Kernel mode. APC_LEVEL and below for pagable addresses,
  6659. DISPATCH_LEVEL and below for non-pagable addresses.
  6660. --*/
  6661. {
  6662. PMMPTE PointerPte;
  6663. PMMPTE LastPte;
  6664. PMMPFN Pfn1;
  6665. PMMPTE FlushPte;
  6666. MMPTE PteContents;
  6667. MMPTE FlushContents;
  6668. KIRQL OldIrql;
  6669. PVOID VaFlushList[MM_MAXIMUM_FLUSH_COUNT];
  6670. ULONG Count;
  6671. BOOLEAN Result;
  6672. Count = 0;
  6673. Result = FALSE;
  6674. //
  6675. // Initializing Flush* is not needed for correctness
  6676. // but without it the compiler cannot compile this code
  6677. // W4 to check for use of uninitialized variables.
  6678. //
  6679. FlushPte = NULL;
  6680. FlushContents = ZeroPte;
  6681. //
  6682. // Loop on the copy on write case until the page is only
  6683. // writable.
  6684. //
  6685. PointerPte = MiGetPteAddress (Address);
  6686. LastPte = MiGetPteAddress ((PVOID)((PCHAR)Address + Length - 1));
  6687. LOCK_PFN2 (OldIrql);
  6688. do {
  6689. PteContents = *PointerPte;
  6690. if (PteContents.u.Hard.Valid == 1) {
  6691. Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
  6692. MI_SET_MODIFIED (Pfn1, 1, 0x5);
  6693. if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
  6694. (Pfn1->u3.e1.WriteInProgress == 0)) {
  6695. MiReleasePageFileSpace (Pfn1->OriginalPte);
  6696. Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
  6697. }
  6698. #ifdef NT_UP
  6699. //
  6700. // On uniprocessor systems no need to flush if this processor
  6701. // doesn't think the PTE is dirty.
  6702. //
  6703. if (MI_IS_PTE_DIRTY (PteContents)) {
  6704. Result = TRUE;
  6705. #else //NT_UP
  6706. Result |= (BOOLEAN)(MI_IS_PTE_DIRTY (PteContents));
  6707. #endif //NT_UP
  6708. MI_SET_PTE_CLEAN (PteContents);
  6709. MI_WRITE_VALID_PTE_NEW_PROTECTION (PointerPte, PteContents);
  6710. FlushContents = PteContents;
  6711. FlushPte = PointerPte;
  6712. //
  6713. // Clear the write bit in the PTE so new writes can be tracked.
  6714. //
  6715. if (Count != MM_MAXIMUM_FLUSH_COUNT) {
  6716. VaFlushList[Count] = Address;
  6717. Count += 1;
  6718. }
  6719. #ifdef NT_UP
  6720. }
  6721. #endif //NT_UP
  6722. }
  6723. PointerPte += 1;
  6724. Address = (PVOID)((PCHAR)Address + PAGE_SIZE);
  6725. } while (PointerPte <= LastPte);
  6726. if (Count != 0) {
  6727. if (Count == 1) {
  6728. (VOID)KeFlushSingleTb (VaFlushList[0],
  6729. FALSE,
  6730. TRUE,
  6731. (PHARDWARE_PTE)FlushPte,
  6732. FlushContents.u.Flush);
  6733. }
  6734. else if (Count != MM_MAXIMUM_FLUSH_COUNT) {
  6735. KeFlushMultipleTb (Count,
  6736. &VaFlushList[0],
  6737. FALSE,
  6738. TRUE,
  6739. NULL,
  6740. *(PHARDWARE_PTE)&ZeroPte.u.Flush);
  6741. }
  6742. else {
  6743. KeFlushEntireTb (FALSE, TRUE);
  6744. }
  6745. }
  6746. UNLOCK_PFN2 (OldIrql);
  6747. return Result;
  6748. }
  6749. PVOID
  6750. MiCheckForContiguousMemory (
  6751. IN PVOID BaseAddress,
  6752. IN PFN_NUMBER BaseAddressPages,
  6753. IN PFN_NUMBER SizeInPages,
  6754. IN PFN_NUMBER LowestPfn,
  6755. IN PFN_NUMBER HighestPfn,
  6756. IN PFN_NUMBER BoundaryPfn,
  6757. IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute
  6758. )
  6759. /*++
  6760. Routine Description:
  6761. This routine checks to see if the physical memory mapped
  6762. by the specified BaseAddress for the specified size is
  6763. contiguous and that the first page is greater than or equal to
  6764. the specified LowestPfn and that the last page of the physical memory is
  6765. less than or equal to the specified HighestPfn.
  6766. Arguments:
  6767. BaseAddress - Supplies the base address to start checking at.
  6768. BaseAddressPages - Supplies the number of pages to scan from the
  6769. BaseAddress.
  6770. SizeInPages - Supplies the number of pages in the range.
  6771. LowestPfn - Supplies lowest PFN acceptable as a physical page.
  6772. HighestPfn - Supplies the highest PFN acceptable as a physical page.
  6773. BoundaryPfn - Supplies the PFN multiple the allocation must
  6774. not cross. 0 indicates it can cross any boundary.
  6775. CacheAttribute - Supplies the type of cache mapping that will be used
  6776. for the memory.
  6777. Return Value:
  6778. Returns the usable virtual address within the argument range that the
  6779. caller should return to his caller. NULL if there is no usable address.
  6780. Environment:
  6781. Kernel mode, memory management internal.
  6782. --*/
  6783. {
  6784. PMMPTE PointerPte;
  6785. PMMPTE LastPte;
  6786. PFN_NUMBER PreviousPage;
  6787. PFN_NUMBER Page;
  6788. PFN_NUMBER HighestStartPage;
  6789. PFN_NUMBER LastPage;
  6790. PFN_NUMBER OriginalPage;
  6791. PFN_NUMBER OriginalLastPage;
  6792. PVOID BoundaryAllocation;
  6793. PFN_NUMBER BoundaryMask;
  6794. ULONG PageCount;
  6795. MMPTE PteContents;
  6796. BoundaryMask = ~(BoundaryPfn - 1);
  6797. if (LowestPfn > HighestPfn) {
  6798. return NULL;
  6799. }
  6800. if (LowestPfn + SizeInPages <= LowestPfn) {
  6801. return NULL;
  6802. }
  6803. if (LowestPfn + SizeInPages - 1 > HighestPfn) {
  6804. return NULL;
  6805. }
  6806. if (BaseAddressPages < SizeInPages) {
  6807. return NULL;
  6808. }
  6809. if (MI_IS_PHYSICAL_ADDRESS (BaseAddress)) {
  6810. //
  6811. // All physical addresses are by definition cached and therefore do
  6812. // not qualify for our caller.
  6813. //
  6814. if (CacheAttribute != MiCached) {
  6815. return NULL;
  6816. }
  6817. OriginalPage = MI_CONVERT_PHYSICAL_TO_PFN(BaseAddress);
  6818. OriginalLastPage = OriginalPage + BaseAddressPages;
  6819. Page = OriginalPage;
  6820. LastPage = OriginalLastPage;
  6821. //
  6822. // Close the gaps, then examine the range for a fit.
  6823. //
  6824. if (Page < LowestPfn) {
  6825. Page = LowestPfn;
  6826. }
  6827. if (LastPage > HighestPfn + 1) {
  6828. LastPage = HighestPfn + 1;
  6829. }
  6830. HighestStartPage = LastPage - SizeInPages;
  6831. if (Page > HighestStartPage) {
  6832. return NULL;
  6833. }
  6834. if (BoundaryPfn != 0) {
  6835. do {
  6836. if (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask) == 0) {
  6837. //
  6838. // This portion of the range meets the alignment
  6839. // requirements.
  6840. //
  6841. break;
  6842. }
  6843. Page |= (BoundaryPfn - 1);
  6844. Page += 1;
  6845. } while (Page <= HighestStartPage);
  6846. if (Page > HighestStartPage) {
  6847. return NULL;
  6848. }
  6849. BoundaryAllocation = (PVOID)((PCHAR)BaseAddress + ((Page - OriginalPage) << PAGE_SHIFT));
  6850. //
  6851. // The request can be satisfied. Since specific alignment was
  6852. // requested, return the fit now without getting fancy.
  6853. //
  6854. return BoundaryAllocation;
  6855. }
  6856. //
  6857. // If possible return a chunk on the end to reduce fragmentation.
  6858. //
  6859. if (LastPage == OriginalLastPage) {
  6860. return (PVOID)((PCHAR)BaseAddress + ((BaseAddressPages - SizeInPages) << PAGE_SHIFT));
  6861. }
  6862. //
  6863. // The end chunk did not satisfy the requirements. The next best option
  6864. // is to return a chunk from the beginning. Since that's where the search
  6865. // began, just return the current chunk.
  6866. //
  6867. return (PVOID)((PCHAR)BaseAddress + ((Page - OriginalPage) << PAGE_SHIFT));
  6868. }
  6869. //
  6870. // Check the virtual addresses for physical contiguity.
  6871. //
  6872. PointerPte = MiGetPteAddress (BaseAddress);
  6873. LastPte = PointerPte + BaseAddressPages;
  6874. HighestStartPage = HighestPfn + 1 - SizeInPages;
  6875. PageCount = 0;
  6876. //
  6877. // Initializing PreviousPage is not needed for correctness
  6878. // but without it the compiler cannot compile this code
  6879. // W4 to check for use of uninitialized variables.
  6880. //
  6881. PreviousPage = 0;
  6882. while (PointerPte < LastPte) {
  6883. PteContents = *PointerPte;
  6884. ASSERT (PteContents.u.Hard.Valid == 1);
  6885. Page = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents);
  6886. //
  6887. // Before starting a new run, ensure that it
  6888. // can satisfy the location & boundary requirements (if any).
  6889. //
  6890. if (PageCount == 0) {
  6891. if ((Page >= LowestPfn) &&
  6892. (Page <= HighestStartPage) &&
  6893. ((CacheAttribute == MiCached) || (!MI_PAGE_FRAME_INDEX_MUST_BE_CACHED (Page)))) {
  6894. if (BoundaryPfn == 0) {
  6895. PageCount += 1;
  6896. }
  6897. else if (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask) == 0) {
  6898. //
  6899. // This run's physical address meets the alignment
  6900. // requirement.
  6901. //
  6902. PageCount += 1;
  6903. }
  6904. }
  6905. if (PageCount == SizeInPages) {
  6906. //
  6907. // Success - found a single page satifying the requirements.
  6908. //
  6909. BaseAddress = MiGetVirtualAddressMappedByPte (PointerPte);
  6910. return BaseAddress;
  6911. }
  6912. PreviousPage = Page;
  6913. PointerPte += 1;
  6914. continue;
  6915. }
  6916. if (Page != PreviousPage + 1) {
  6917. //
  6918. // This page is not physically contiguous. Start over.
  6919. //
  6920. PageCount = 0;
  6921. continue;
  6922. }
  6923. PageCount += 1;
  6924. if (PageCount == SizeInPages) {
  6925. //
  6926. // Success - found a page range satifying the requirements.
  6927. //
  6928. BaseAddress = MiGetVirtualAddressMappedByPte (PointerPte - PageCount + 1);
  6929. return BaseAddress;
  6930. }
  6931. PointerPte += 1;
  6932. }
  6933. return NULL;
  6934. }
  6935. VOID
  6936. MmLockPagableSectionByHandle (
  6937. IN PVOID ImageSectionHandle
  6938. )
  6939. /*++
  6940. Routine Description:
  6941. This routine checks to see if the specified pages are resident in
  6942. the process's working set and if so the reference count for the
  6943. page is incremented. The allows the virtual address to be accessed
  6944. without getting a hard page fault (have to go to the disk... except
  6945. for extremely rare case when the page table page is removed from the
  6946. working set and migrates to the disk.
  6947. If the virtual address is that of the system wide global "cache" the
  6948. virtual address of the "locked" pages is always guaranteed to
  6949. be valid.
  6950. NOTE: This routine is not to be used for general locking of user
  6951. addresses - use MmProbeAndLockPages. This routine is intended for
  6952. well behaved system code like the file system caches which allocates
  6953. virtual addresses for mapping files AND guarantees that the mapping
  6954. will not be modified (deleted or changed) while the pages are locked.
  6955. Arguments:
  6956. ImageSectionHandle - Supplies the value returned by a previous call
  6957. to MmLockPagableDataSection. This is a pointer to
  6958. the section header for the image.
  6959. Return Value:
  6960. None.
  6961. Environment:
  6962. Kernel mode, IRQL of APC_LEVEL or below.
  6963. --*/
  6964. {
  6965. ULONG EntryCount;
  6966. ULONG OriginalCount;
  6967. PETHREAD CurrentThread;
  6968. PIMAGE_SECTION_HEADER NtSection;
  6969. PVOID BaseAddress;
  6970. ULONG SizeToLock;
  6971. PMMPTE PointerPte;
  6972. PMMPTE LastPte;
  6973. PLONG SectionLockCountPointer;
  6974. if (MI_IS_PHYSICAL_ADDRESS(ImageSectionHandle)) {
  6975. //
  6976. // No need to lock physical addresses.
  6977. //
  6978. return;
  6979. }
  6980. NtSection = (PIMAGE_SECTION_HEADER)ImageSectionHandle;
  6981. BaseAddress = SECTION_BASE_ADDRESS(NtSection);
  6982. SectionLockCountPointer = SECTION_LOCK_COUNT_POINTER (NtSection);
  6983. ASSERT (!MI_IS_SYSTEM_CACHE_ADDRESS(BaseAddress));
  6984. //
  6985. // The address must be within the system space.
  6986. //
  6987. ASSERT (BaseAddress >= MmSystemRangeStart);
  6988. SizeToLock = NtSection->SizeOfRawData;
  6989. PointerPte = MiGetPteAddress(BaseAddress);
  6990. LastPte = MiGetPteAddress((PCHAR)BaseAddress + SizeToLock - 1);
  6991. ASSERT (SizeToLock != 0);
  6992. CurrentThread = PsGetCurrentThread ();
  6993. KeEnterCriticalRegionThread (&CurrentThread->Tcb);
  6994. //
  6995. // The lock count values have the following meanings :
  6996. //
  6997. // Value of 0 means unlocked.
  6998. // Value of 1 means lock in progress by another thread.
  6999. // Value of 2 or more means locked.
  7000. //
  7001. // If the value is 1, this thread must block until the other thread's
  7002. // lock operation is complete.
  7003. //
  7004. do {
  7005. EntryCount = *SectionLockCountPointer;
  7006. if (EntryCount != 1) {
  7007. OriginalCount = InterlockedCompareExchange (SectionLockCountPointer,
  7008. EntryCount + 1,
  7009. EntryCount);
  7010. if (OriginalCount == EntryCount) {
  7011. //
  7012. // Success - this is the first thread to update.
  7013. //
  7014. ASSERT (OriginalCount != 1);
  7015. break;
  7016. }
  7017. //
  7018. // Another thread updated the count before this thread's attempt
  7019. // so it's time to start over.
  7020. //
  7021. }
  7022. else {
  7023. //
  7024. // A lock is in progress, wait for it to finish. This should be
  7025. // generally rare, and even in this case, the pulse will usually
  7026. // wake us. A timeout is used so that the wait and the pulse do
  7027. // not need to be interlocked.
  7028. //
  7029. InterlockedIncrement (&MmCollidedLockWait);
  7030. KeWaitForSingleObject (&MmCollidedLockEvent,
  7031. WrVirtualMemory,
  7032. KernelMode,
  7033. FALSE,
  7034. (PLARGE_INTEGER)&MmShortTime);
  7035. InterlockedDecrement (&MmCollidedLockWait);
  7036. }
  7037. } while (TRUE);
  7038. if (OriginalCount >= 2) {
  7039. //
  7040. // Already locked, just return.
  7041. //
  7042. KeLeaveCriticalRegionThread (&CurrentThread->Tcb);
  7043. return;
  7044. }
  7045. ASSERT (OriginalCount == 0);
  7046. ASSERT (*SectionLockCountPointer == 1);
  7047. //
  7048. // Value was 0 when the lock was obtained. It is now 1 indicating
  7049. // a lock is in progress.
  7050. //
  7051. MiLockCode (PointerPte, LastPte, MM_LOCK_BY_REFCOUNT);
  7052. //
  7053. // Set lock count to 2 (it was 1 when this started) and check
  7054. // to see if any other threads tried to lock while this was happening.
  7055. //
  7056. ASSERT (*SectionLockCountPointer == 1);
  7057. OriginalCount = InterlockedIncrement (SectionLockCountPointer);
  7058. ASSERT (OriginalCount >= 2);
  7059. if (MmCollidedLockWait != 0) {
  7060. KePulseEvent (&MmCollidedLockEvent, 0, FALSE);
  7061. }
  7062. //
  7063. // Enable user APCs now that the pulse has occurred. They had to be
  7064. // blocked to prevent any suspensions of this thread as that would
  7065. // stop all waiters indefinitely.
  7066. //
  7067. KeLeaveCriticalRegionThread (&CurrentThread->Tcb);
  7068. return;
  7069. }
  7070. VOID
  7071. MiLockCode (
  7072. IN PMMPTE FirstPte,
  7073. IN PMMPTE LastPte,
  7074. IN ULONG LockType
  7075. )
  7076. /*++
  7077. Routine Description:
  7078. This routine checks to see if the specified pages are resident in
  7079. the process's working set and if so the reference count for the
  7080. page is incremented. This allows the virtual address to be accessed
  7081. without getting a hard page fault (have to go to the disk...) except
  7082. for the extremely rare case when the page table page is removed from the
  7083. working set and migrates to the disk.
  7084. If the virtual address is that of the system wide global "cache", the
  7085. virtual address of the "locked" pages is always guaranteed to
  7086. be valid.
  7087. NOTE: This routine is not to be used for general locking of user
  7088. addresses - use MmProbeAndLockPages. This routine is intended for
  7089. well behaved system code like the file system caches which allocates
  7090. virtual addresses for mapping files AND guarantees that the mapping
  7091. will not be modified (deleted or changed) while the pages are locked.
  7092. Arguments:
  7093. FirstPte - Supplies the base address to begin locking.
  7094. LastPte - The last PTE to lock.
  7095. LockType - Supplies either MM_LOCK_BY_REFCOUNT or MM_LOCK_NONPAGE.
  7096. LOCK_BY_REFCOUNT increments the reference count to keep
  7097. the page in memory, LOCK_NONPAGE removes the page from
  7098. the working set so it's locked just like nonpaged pool.
  7099. Return Value:
  7100. None.
  7101. Environment:
  7102. Kernel mode.
  7103. --*/
  7104. {
  7105. PMMPFN Pfn1;
  7106. PMMPTE PointerPte;
  7107. MMPTE TempPte;
  7108. MMPTE PteContents;
  7109. WSLE_NUMBER WorkingSetIndex;
  7110. WSLE_NUMBER SwapEntry;
  7111. PFN_NUMBER PageFrameIndex;
  7112. KIRQL OldIrql;
  7113. KIRQL OldIrqlWs;
  7114. KIRQL DontCareIrql;
  7115. LOGICAL SessionSpace;
  7116. LOGICAL NewlyLocked;
  7117. PMMWSL WorkingSetList;
  7118. PMMSUPPORT Vm;
  7119. PETHREAD CurrentThread;
  7120. ASSERT (!MI_IS_PHYSICAL_ADDRESS(MiGetVirtualAddressMappedByPte(FirstPte)));
  7121. PointerPte = FirstPte;
  7122. CurrentThread = PsGetCurrentThread ();
  7123. SessionSpace = MI_IS_SESSION_IMAGE_ADDRESS (MiGetVirtualAddressMappedByPte(FirstPte));
  7124. if (SessionSpace == TRUE) {
  7125. Vm = &MmSessionSpace->Vm;
  7126. WorkingSetList = MmSessionSpace->Vm.VmWorkingSetList;
  7127. //
  7128. // Session space is never locked by refcount.
  7129. //
  7130. ASSERT (LockType != MM_LOCK_BY_REFCOUNT);
  7131. LOCK_SESSION_SPACE_WS (OldIrqlWs, CurrentThread);
  7132. }
  7133. else {
  7134. //
  7135. // Initializing these is not needed for correctness
  7136. // but without it the compiler cannot compile this code
  7137. // W4 to check for use of uninitialized variables.
  7138. //
  7139. WorkingSetList = NULL;
  7140. Vm = NULL;
  7141. LOCK_SYSTEM_WS (OldIrqlWs, CurrentThread);
  7142. }
  7143. LOCK_PFN (OldIrql);
  7144. MmLockedCode += 1 + LastPte - FirstPte;
  7145. do {
  7146. PteContents = *PointerPte;
  7147. ASSERT (PteContents.u.Long != ZeroKernelPte.u.Long);
  7148. if (PteContents.u.Hard.Valid == 1) {
  7149. //
  7150. // This address is already in the system (or session) working set.
  7151. //
  7152. Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
  7153. //
  7154. // Up the reference count so the page cannot be released.
  7155. //
  7156. MI_ADD_LOCKED_PAGE_CHARGE (Pfn1, 36);
  7157. Pfn1->u3.e2.ReferenceCount += 1;
  7158. if (LockType != MM_LOCK_BY_REFCOUNT) {
  7159. //
  7160. // If the page is in the system working set, remove it.
  7161. // The system working set lock MUST be owned to check to
  7162. // see if this page is in the working set or not. This
  7163. // is because the pager may have just released the PFN lock,
  7164. // acquired the system lock and is now trying to add the
  7165. // page to the system working set.
  7166. //
  7167. // If the page is in the SESSION working set, it cannot be
  7168. // removed as all these pages are carefully accounted for.
  7169. // Instead move it to the locked portion of the working set
  7170. // if it is not there already.
  7171. //
  7172. if (Pfn1->u1.WsIndex != 0) {
  7173. UNLOCK_PFN (APC_LEVEL);
  7174. if (SessionSpace == TRUE) {
  7175. WorkingSetIndex = MiLocateWsle (
  7176. MiGetVirtualAddressMappedByPte(PointerPte),
  7177. WorkingSetList,
  7178. Pfn1->u1.WsIndex);
  7179. if (WorkingSetIndex >= WorkingSetList->FirstDynamic) {
  7180. SwapEntry = WorkingSetList->FirstDynamic;
  7181. if (WorkingSetIndex != WorkingSetList->FirstDynamic) {
  7182. //
  7183. // Swap this entry with the one at first
  7184. // dynamic. Note that the working set index
  7185. // in the PTE is updated here as well.
  7186. //
  7187. MiSwapWslEntries (WorkingSetIndex,
  7188. SwapEntry,
  7189. Vm);
  7190. }
  7191. WorkingSetList->FirstDynamic += 1;
  7192. NewlyLocked = TRUE;
  7193. //
  7194. // Indicate that the page is now locked.
  7195. //
  7196. MmSessionSpace->Wsle[SwapEntry].u1.e1.LockedInWs = 1;
  7197. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_NP_LOCK_CODE2, 1);
  7198. MmSessionSpace->NonPagablePages += 1;
  7199. LOCK_PFN (DontCareIrql);
  7200. MM_BUMP_COUNTER(25, 1);
  7201. Pfn1->u1.WsIndex = SwapEntry;
  7202. }
  7203. else {
  7204. NewlyLocked = FALSE;
  7205. ASSERT (MmSessionSpace->Wsle[WorkingSetIndex].u1.e1.LockedInWs == 1);
  7206. LOCK_PFN (DontCareIrql);
  7207. }
  7208. }
  7209. else {
  7210. NewlyLocked = TRUE;
  7211. MiRemoveWsle (Pfn1->u1.WsIndex, MmSystemCacheWorkingSetList);
  7212. MiReleaseWsle (Pfn1->u1.WsIndex, &MmSystemCacheWs);
  7213. MI_SET_PTE_IN_WORKING_SET (PointerPte, 0);
  7214. LOCK_PFN (DontCareIrql);
  7215. MM_BUMP_COUNTER(29, 1);
  7216. MI_ZERO_WSINDEX (Pfn1);
  7217. }
  7218. //
  7219. // Adjust available pages as this page is now not in any
  7220. // working set, just like a non-paged pool page.
  7221. //
  7222. if (NewlyLocked == TRUE) {
  7223. MmResidentAvailablePages -= 1;
  7224. if (Pfn1->u3.e1.PrototypePte == 0) {
  7225. MmTotalSystemDriverPages -= 1;
  7226. }
  7227. }
  7228. }
  7229. ASSERT (Pfn1->u3.e2.ReferenceCount > 1);
  7230. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF (Pfn1, 37);
  7231. }
  7232. }
  7233. else if (PteContents.u.Soft.Prototype == 1) {
  7234. //
  7235. // Page is not in memory and it is a prototype.
  7236. //
  7237. MiMakeSystemAddressValidPfnSystemWs (
  7238. MiGetVirtualAddressMappedByPte(PointerPte));
  7239. continue;
  7240. }
  7241. else if (PteContents.u.Soft.Transition == 1) {
  7242. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&PteContents);
  7243. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  7244. if ((Pfn1->u3.e1.ReadInProgress) ||
  7245. (Pfn1->u4.InPageError)) {
  7246. //
  7247. // Page read is ongoing, force a collided fault.
  7248. //
  7249. MiMakeSystemAddressValidPfnSystemWs (
  7250. MiGetVirtualAddressMappedByPte(PointerPte));
  7251. continue;
  7252. }
  7253. //
  7254. // Paged pool is trimmed without regard to sharecounts.
  7255. // This means a paged pool PTE can be in transition while
  7256. // the page is still marked active.
  7257. //
  7258. if (Pfn1->u3.e1.PageLocation == ActiveAndValid) {
  7259. ASSERT (((Pfn1->PteAddress >= MiGetPteAddress(MmPagedPoolStart)) &&
  7260. (Pfn1->PteAddress <= MiGetPteAddress(MmPagedPoolEnd))) ||
  7261. ((Pfn1->PteAddress >= MiGetPteAddress(MmSpecialPoolStart)) &&
  7262. (Pfn1->PteAddress <= MiGetPteAddress(MmSpecialPoolEnd))));
  7263. //
  7264. // Don't increment the valid PTE count for the
  7265. // paged pool page.
  7266. //
  7267. ASSERT (Pfn1->u2.ShareCount != 0);
  7268. ASSERT (Pfn1->u3.e2.ReferenceCount != 0);
  7269. Pfn1->u2.ShareCount += 1;
  7270. }
  7271. else {
  7272. if (MmAvailablePages == 0) {
  7273. //
  7274. // This can only happen if the system is utilizing
  7275. // a hardware compression cache. This ensures that
  7276. // only a safe amount of the compressed virtual cache
  7277. // is directly mapped so that if the hardware gets
  7278. // into trouble, we can bail it out.
  7279. //
  7280. // Just unlock everything here to give the compression
  7281. // reaper a chance to ravage pages and then retry.
  7282. //
  7283. UNLOCK_PFN (APC_LEVEL);
  7284. if (SessionSpace == TRUE) {
  7285. UNLOCK_SESSION_SPACE_WS (OldIrqlWs);
  7286. LOCK_SESSION_SPACE_WS (OldIrqlWs, CurrentThread);
  7287. }
  7288. else {
  7289. UNLOCK_SYSTEM_WS (OldIrqlWs);
  7290. LOCK_SYSTEM_WS (OldIrql, CurrentThread);
  7291. }
  7292. LOCK_PFN (DontCareIrql);
  7293. continue;
  7294. }
  7295. MiUnlinkPageFromList (Pfn1);
  7296. //
  7297. // Set the reference count and share counts to 1. Note the
  7298. // reference count may be 1 already if a modified page
  7299. // write is underway. The systemwide locked page charges
  7300. // are correct in either case and nothing needs to be done
  7301. // just yet.
  7302. //
  7303. Pfn1->u3.e2.ReferenceCount += 1;
  7304. Pfn1->u2.ShareCount = 1;
  7305. }
  7306. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  7307. Pfn1->u3.e1.CacheAttribute = MiCached;
  7308. MI_MAKE_VALID_PTE (TempPte,
  7309. PageFrameIndex,
  7310. Pfn1->OriginalPte.u.Soft.Protection,
  7311. PointerPte);
  7312. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  7313. //
  7314. // Increment the reference count one for putting it the
  7315. // working set list and one for locking it for I/O.
  7316. //
  7317. if (LockType == MM_LOCK_BY_REFCOUNT) {
  7318. //
  7319. // Lock the page in the working set by upping the
  7320. // reference count.
  7321. //
  7322. MI_ADD_LOCKED_PAGE_CHARGE (Pfn1, 34);
  7323. Pfn1->u3.e2.ReferenceCount += 1;
  7324. Pfn1->u1.Event = (PVOID) CurrentThread;
  7325. UNLOCK_PFN (APC_LEVEL);
  7326. WorkingSetIndex = MiLocateAndReserveWsle (&MmSystemCacheWs);
  7327. MiUpdateWsle (&WorkingSetIndex,
  7328. MiGetVirtualAddressMappedByPte (PointerPte),
  7329. MmSystemCacheWorkingSetList,
  7330. Pfn1);
  7331. MI_SET_PTE_IN_WORKING_SET (PointerPte, WorkingSetIndex);
  7332. LOCK_PFN (DontCareIrql);
  7333. }
  7334. else {
  7335. //
  7336. // The wsindex field must be zero because the
  7337. // page is not in the system (or session) working set.
  7338. //
  7339. ASSERT (Pfn1->u1.WsIndex == 0);
  7340. //
  7341. // Adjust available pages as this page is now not in any
  7342. // working set, just like a non-paged pool page. On entry
  7343. // this page was in transition so it was part of the
  7344. // available pages by definition.
  7345. //
  7346. MmResidentAvailablePages -= 1;
  7347. if (Pfn1->u3.e1.PrototypePte == 0) {
  7348. MmTotalSystemDriverPages -= 1;
  7349. }
  7350. if (SessionSpace == TRUE) {
  7351. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_NP_LOCK_CODE1, 1);
  7352. MmSessionSpace->NonPagablePages += 1;
  7353. }
  7354. MM_BUMP_COUNTER(26, 1);
  7355. }
  7356. }
  7357. else {
  7358. //
  7359. // Page is not in memory.
  7360. //
  7361. MiMakeSystemAddressValidPfnSystemWs (
  7362. MiGetVirtualAddressMappedByPte(PointerPte));
  7363. continue;
  7364. }
  7365. PointerPte += 1;
  7366. } while (PointerPte <= LastPte);
  7367. UNLOCK_PFN (OldIrql);
  7368. if (SessionSpace == TRUE) {
  7369. UNLOCK_SESSION_SPACE_WS (OldIrqlWs);
  7370. }
  7371. else {
  7372. UNLOCK_SYSTEM_WS (OldIrqlWs);
  7373. }
  7374. return;
  7375. }
  7376. NTSTATUS
  7377. MmGetSectionRange (
  7378. IN PVOID AddressWithinSection,
  7379. OUT PVOID *StartingSectionAddress,
  7380. OUT PULONG SizeofSection
  7381. )
  7382. {
  7383. ULONG Span;
  7384. PKTHREAD CurrentThread;
  7385. PKLDR_DATA_TABLE_ENTRY DataTableEntry;
  7386. ULONG i;
  7387. PIMAGE_NT_HEADERS NtHeaders;
  7388. PIMAGE_SECTION_HEADER NtSection;
  7389. NTSTATUS Status;
  7390. ULONG_PTR Rva;
  7391. PAGED_CODE();
  7392. //
  7393. // Search the loaded module list for the data table entry that describes
  7394. // the DLL that was just unloaded. It is possible that an entry is not in
  7395. // the list if a failure occurred at a point in loading the DLL just before
  7396. // the data table entry was generated.
  7397. //
  7398. Status = STATUS_NOT_FOUND;
  7399. CurrentThread = KeGetCurrentThread ();
  7400. KeEnterCriticalRegionThread (CurrentThread);
  7401. ExAcquireResourceSharedLite (&PsLoadedModuleResource, TRUE);
  7402. DataTableEntry = MiLookupDataTableEntry (AddressWithinSection, TRUE);
  7403. if (DataTableEntry) {
  7404. Rva = (ULONG_PTR)((PUCHAR)AddressWithinSection - (ULONG_PTR)DataTableEntry->DllBase);
  7405. NtHeaders = (PIMAGE_NT_HEADERS)RtlImageNtHeader(DataTableEntry->DllBase);
  7406. NtSection = (PIMAGE_SECTION_HEADER)((PCHAR)NtHeaders +
  7407. sizeof(ULONG) +
  7408. sizeof(IMAGE_FILE_HEADER) +
  7409. NtHeaders->FileHeader.SizeOfOptionalHeader
  7410. );
  7411. for (i = 0; i < NtHeaders->FileHeader.NumberOfSections; i += 1) {
  7412. //
  7413. // Generally, SizeOfRawData is larger than VirtualSize for each
  7414. // section because it includes the padding to get to the subsection
  7415. // alignment boundary. However, on MP kernels where we link with
  7416. // subsection alignment == native page alignment, the linker will
  7417. // have VirtualSize be much larger than SizeOfRawData because it
  7418. // will account for all the bss.
  7419. //
  7420. Span = NtSection->SizeOfRawData;
  7421. if (Span < NtSection->Misc.VirtualSize) {
  7422. Span = NtSection->Misc.VirtualSize;
  7423. }
  7424. if ((Rva >= NtSection->VirtualAddress) &&
  7425. (Rva < NtSection->VirtualAddress + Span)) {
  7426. //
  7427. // Found it.
  7428. //
  7429. *StartingSectionAddress = (PVOID)
  7430. ((PCHAR) DataTableEntry->DllBase + NtSection->VirtualAddress);
  7431. *SizeofSection = NtSection->SizeOfRawData;
  7432. Status = STATUS_SUCCESS;
  7433. break;
  7434. }
  7435. NtSection += 1;
  7436. }
  7437. }
  7438. ExReleaseResourceLite (&PsLoadedModuleResource);
  7439. KeLeaveCriticalRegionThread (CurrentThread);
  7440. return Status;
  7441. }
  7442. PVOID
  7443. MmLockPagableDataSection (
  7444. IN PVOID AddressWithinSection
  7445. )
  7446. /*++
  7447. Routine Description:
  7448. This functions locks the entire section that contains the specified
  7449. section in memory. This allows pagable code to be brought into
  7450. memory and to be used as if the code was not really pagable. This
  7451. should not be done with a high degree of frequency.
  7452. Arguments:
  7453. AddressWithinSection - Supplies the address of a function
  7454. contained within a section that should be brought in and locked
  7455. in memory.
  7456. Return Value:
  7457. This function returns a value to be used in a subsequent call to
  7458. MmUnlockPagableImageSection.
  7459. --*/
  7460. {
  7461. PLONG SectionLockCountPointer;
  7462. PKTHREAD CurrentThread;
  7463. PKLDR_DATA_TABLE_ENTRY DataTableEntry;
  7464. ULONG i;
  7465. PIMAGE_NT_HEADERS NtHeaders;
  7466. PIMAGE_SECTION_HEADER NtSection;
  7467. PIMAGE_SECTION_HEADER FoundSection;
  7468. ULONG_PTR Rva;
  7469. PAGED_CODE();
  7470. if (MI_IS_PHYSICAL_ADDRESS(AddressWithinSection)) {
  7471. //
  7472. // Physical address, just return that as the handle.
  7473. //
  7474. return AddressWithinSection;
  7475. }
  7476. //
  7477. // Search the loaded module list for the data table entry that describes
  7478. // the DLL that was just unloaded. It is possible that an entry is not in
  7479. // the list if a failure occurred at a point in loading the DLL just before
  7480. // the data table entry was generated.
  7481. //
  7482. FoundSection = NULL;
  7483. CurrentThread = KeGetCurrentThread ();
  7484. KeEnterCriticalRegionThread (CurrentThread);
  7485. ExAcquireResourceSharedLite (&PsLoadedModuleResource, TRUE);
  7486. DataTableEntry = MiLookupDataTableEntry (AddressWithinSection, TRUE);
  7487. Rva = (ULONG_PTR)((PUCHAR)AddressWithinSection - (ULONG_PTR)DataTableEntry->DllBase);
  7488. NtHeaders = (PIMAGE_NT_HEADERS)RtlImageNtHeader(DataTableEntry->DllBase);
  7489. NtSection = (PIMAGE_SECTION_HEADER)((ULONG_PTR)NtHeaders +
  7490. sizeof(ULONG) +
  7491. sizeof(IMAGE_FILE_HEADER) +
  7492. NtHeaders->FileHeader.SizeOfOptionalHeader
  7493. );
  7494. for (i = 0; i < NtHeaders->FileHeader.NumberOfSections; i += 1) {
  7495. if ( Rva >= NtSection->VirtualAddress &&
  7496. Rva < NtSection->VirtualAddress + NtSection->SizeOfRawData ) {
  7497. FoundSection = NtSection;
  7498. if (SECTION_BASE_ADDRESS(NtSection) != ((PUCHAR)DataTableEntry->DllBase +
  7499. NtSection->VirtualAddress)) {
  7500. //
  7501. // Overwrite the PointerToRelocations field (and on Win64, the
  7502. // PointerToLinenumbers field also) so that it contains
  7503. // the Va of this section.
  7504. //
  7505. // NumberOfRelocations & NumberOfLinenumbers contains
  7506. // the Lock Count for the section.
  7507. //
  7508. SECTION_BASE_ADDRESS(NtSection) = ((PUCHAR)DataTableEntry->DllBase +
  7509. NtSection->VirtualAddress);
  7510. SectionLockCountPointer = SECTION_LOCK_COUNT_POINTER (NtSection);
  7511. *SectionLockCountPointer = 0;
  7512. }
  7513. //
  7514. // Now lock in the code
  7515. //
  7516. #if DBG
  7517. if (MmDebug & MM_DBG_LOCK_CODE) {
  7518. SectionLockCountPointer = SECTION_LOCK_COUNT_POINTER (NtSection);
  7519. DbgPrint("MM Lock %wZ %8s %p -> %p : %p %3ld.\n",
  7520. &DataTableEntry->BaseDllName,
  7521. NtSection->Name,
  7522. AddressWithinSection,
  7523. NtSection,
  7524. SECTION_BASE_ADDRESS(NtSection),
  7525. *SectionLockCountPointer);
  7526. }
  7527. #endif //DBG
  7528. MmLockPagableSectionByHandle ((PVOID)NtSection);
  7529. break;
  7530. }
  7531. NtSection += 1;
  7532. }
  7533. ExReleaseResourceLite (&PsLoadedModuleResource);
  7534. KeLeaveCriticalRegionThread (CurrentThread);
  7535. if (!FoundSection) {
  7536. KeBugCheckEx (MEMORY_MANAGEMENT,
  7537. 0x1234,
  7538. (ULONG_PTR)AddressWithinSection,
  7539. 0,
  7540. 0);
  7541. }
  7542. return (PVOID)FoundSection;
  7543. }
  7544. PKLDR_DATA_TABLE_ENTRY
  7545. MiLookupDataTableEntry (
  7546. IN PVOID AddressWithinSection,
  7547. IN ULONG ResourceHeld
  7548. )
  7549. /*++
  7550. Routine Description:
  7551. This functions locates the data table entry that maps the specified address.
  7552. Arguments:
  7553. AddressWithinSection - Supplies the address of a function contained
  7554. within the desired module.
  7555. ResourceHeld - Supplies TRUE if the loaded module resource is already held,
  7556. FALSE if not.
  7557. Return Value:
  7558. The address of the loaded module list data table entry that maps the
  7559. argument address.
  7560. --*/
  7561. {
  7562. PKTHREAD CurrentThread;
  7563. PKLDR_DATA_TABLE_ENTRY DataTableEntry;
  7564. PKLDR_DATA_TABLE_ENTRY FoundEntry;
  7565. PLIST_ENTRY NextEntry;
  7566. PAGED_CODE();
  7567. FoundEntry = NULL;
  7568. //
  7569. // Search the loaded module list for the data table entry that describes
  7570. // the DLL that was just unloaded. It is possible that an entry is not in
  7571. // the list if a failure occurred at a point in loading the DLL just before
  7572. // the data table entry was generated.
  7573. //
  7574. if (!ResourceHeld) {
  7575. CurrentThread = KeGetCurrentThread ();
  7576. KeEnterCriticalRegionThread (CurrentThread);
  7577. ExAcquireResourceSharedLite (&PsLoadedModuleResource, TRUE);
  7578. }
  7579. else {
  7580. CurrentThread = NULL;
  7581. }
  7582. NextEntry = PsLoadedModuleList.Flink;
  7583. do {
  7584. DataTableEntry = CONTAINING_RECORD(NextEntry,
  7585. KLDR_DATA_TABLE_ENTRY,
  7586. InLoadOrderLinks);
  7587. //
  7588. // Locate the loaded module that contains this address.
  7589. //
  7590. if ( AddressWithinSection >= DataTableEntry->DllBase &&
  7591. AddressWithinSection < (PVOID)((PUCHAR)DataTableEntry->DllBase+DataTableEntry->SizeOfImage) ) {
  7592. FoundEntry = DataTableEntry;
  7593. break;
  7594. }
  7595. NextEntry = NextEntry->Flink;
  7596. } while (NextEntry != &PsLoadedModuleList);
  7597. if (CurrentThread != NULL) {
  7598. ExReleaseResourceLite (&PsLoadedModuleResource);
  7599. KeLeaveCriticalRegionThread (CurrentThread);
  7600. }
  7601. return FoundEntry;
  7602. }
  7603. VOID
  7604. MmUnlockPagableImageSection (
  7605. IN PVOID ImageSectionHandle
  7606. )
  7607. /*++
  7608. Routine Description:
  7609. This function unlocks from memory, the pages locked by a preceding call to
  7610. MmLockPagableDataSection.
  7611. Arguments:
  7612. ImageSectionHandle - Supplies the value returned by a previous call
  7613. to MmLockPagableDataSection.
  7614. Return Value:
  7615. None.
  7616. --*/
  7617. {
  7618. PKTHREAD CurrentThread;
  7619. PIMAGE_SECTION_HEADER NtSection;
  7620. PMMPTE PointerPte;
  7621. PMMPTE LastPte;
  7622. PFN_NUMBER PageFrameIndex;
  7623. PMMPFN Pfn1;
  7624. KIRQL OldIrql;
  7625. PVOID BaseAddress;
  7626. ULONG SizeToUnlock;
  7627. ULONG Count;
  7628. PLONG SectionLockCountPointer;
  7629. if (MI_IS_PHYSICAL_ADDRESS(ImageSectionHandle)) {
  7630. //
  7631. // No need to unlock physical addresses.
  7632. //
  7633. return;
  7634. }
  7635. NtSection = (PIMAGE_SECTION_HEADER)ImageSectionHandle;
  7636. //
  7637. // Address must be in the system working set.
  7638. //
  7639. BaseAddress = SECTION_BASE_ADDRESS(NtSection);
  7640. SectionLockCountPointer = SECTION_LOCK_COUNT_POINTER (NtSection);
  7641. SizeToUnlock = NtSection->SizeOfRawData;
  7642. PointerPte = MiGetPteAddress(BaseAddress);
  7643. LastPte = MiGetPteAddress((PCHAR)BaseAddress + SizeToUnlock - 1);
  7644. CurrentThread = KeGetCurrentThread ();
  7645. //
  7646. // Block user APCs as the initial decrement below could push the count to 1.
  7647. // This puts this thread into the critical path that must finish as all
  7648. // other threads trying to lock the section will be waiting for this thread.
  7649. // Entering a critical region here ensures that a suspend cannot stop us.
  7650. //
  7651. KeEnterCriticalRegionThread (CurrentThread);
  7652. Count = InterlockedDecrement (SectionLockCountPointer);
  7653. if (Count < 1) {
  7654. KeBugCheckEx (MEMORY_MANAGEMENT,
  7655. 0x1010,
  7656. (ULONG_PTR)BaseAddress,
  7657. (ULONG_PTR)NtSection,
  7658. *SectionLockCountPointer);
  7659. }
  7660. if (Count != 1) {
  7661. KeLeaveCriticalRegionThread (CurrentThread);
  7662. return;
  7663. }
  7664. LOCK_PFN2 (OldIrql);
  7665. do {
  7666. ASSERT (PointerPte->u.Hard.Valid == 1);
  7667. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  7668. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  7669. ASSERT (Pfn1->u3.e2.ReferenceCount > 1);
  7670. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF (Pfn1, 37);
  7671. PointerPte += 1;
  7672. } while (PointerPte <= LastPte);
  7673. UNLOCK_PFN2 (OldIrql);
  7674. ASSERT (*SectionLockCountPointer == 1);
  7675. Count = InterlockedDecrement (SectionLockCountPointer);
  7676. ASSERT (Count == 0);
  7677. if (MmCollidedLockWait != 0) {
  7678. KePulseEvent (&MmCollidedLockEvent, 0, FALSE);
  7679. }
  7680. //
  7681. // Enable user APCs now that the pulse has occurred. They had to be
  7682. // blocked to prevent any suspensions of this thread as that would
  7683. // stop all waiters indefinitely.
  7684. //
  7685. KeLeaveCriticalRegionThread (CurrentThread);
  7686. return;
  7687. }
  7688. BOOLEAN
  7689. MmIsRecursiveIoFault (
  7690. VOID
  7691. )
  7692. /*++
  7693. Routine Description:
  7694. This function examines the thread's page fault clustering information
  7695. and determines if the current page fault is occurring during an I/O
  7696. operation.
  7697. Arguments:
  7698. None.
  7699. Return Value:
  7700. Returns TRUE if the fault is occurring during an I/O operation,
  7701. FALSE otherwise.
  7702. --*/
  7703. {
  7704. PETHREAD Thread;
  7705. Thread = PsGetCurrentThread ();
  7706. return (BOOLEAN)(Thread->DisablePageFaultClustering |
  7707. Thread->ForwardClusterOnly);
  7708. }
  7709. VOID
  7710. MmMapMemoryDumpMdl (
  7711. IN OUT PMDL MemoryDumpMdl
  7712. )
  7713. /*++
  7714. Routine Description:
  7715. For use by crash dump routine ONLY. Maps an MDL into a fixed
  7716. portion of the address space. Only 1 MDL can be mapped at a
  7717. time.
  7718. Arguments:
  7719. MemoryDumpMdl - Supplies the MDL to map.
  7720. Return Value:
  7721. None, fields in MDL updated.
  7722. --*/
  7723. {
  7724. PFN_NUMBER NumberOfPages;
  7725. PMMPTE PointerPte;
  7726. PCHAR BaseVa;
  7727. MMPTE TempPte;
  7728. PPFN_NUMBER Page;
  7729. NumberOfPages = BYTES_TO_PAGES (MemoryDumpMdl->ByteCount + MemoryDumpMdl->ByteOffset);
  7730. ASSERT (NumberOfPages <= 16);
  7731. PointerPte = MmCrashDumpPte;
  7732. BaseVa = (PCHAR)MiGetVirtualAddressMappedByPte(PointerPte);
  7733. MemoryDumpMdl->MappedSystemVa = (PCHAR)BaseVa + MemoryDumpMdl->ByteOffset;
  7734. TempPte = ValidKernelPte;
  7735. Page = (PPFN_NUMBER)(MemoryDumpMdl + 1);
  7736. //
  7737. // If the pages don't span the entire dump virtual address range,
  7738. // build a barrier. Otherwise use the default barrier provided at the
  7739. // end of the dump virtual address range.
  7740. //
  7741. if (NumberOfPages < 16) {
  7742. (PointerPte + NumberOfPages)->u.Long = MM_KERNEL_DEMAND_ZERO_PTE;
  7743. KiFlushSingleTb (TRUE, BaseVa + (NumberOfPages << PAGE_SHIFT));
  7744. }
  7745. do {
  7746. TempPte.u.Hard.PageFrameNumber = *Page;
  7747. //
  7748. // Note this PTE may be valid or invalid prior to the overwriting here.
  7749. //
  7750. *PointerPte = TempPte;
  7751. KiFlushSingleTb (TRUE, BaseVa);
  7752. Page += 1;
  7753. PointerPte += 1;
  7754. BaseVa += PAGE_SIZE;
  7755. NumberOfPages -= 1;
  7756. } while (NumberOfPages != 0);
  7757. return;
  7758. }
  7759. VOID
  7760. MmReleaseDumpAddresses (
  7761. IN PFN_NUMBER Pages
  7762. )
  7763. /*++
  7764. Routine Description:
  7765. For use by hibernate routine ONLY. Puts zeros back into the
  7766. used dump PTEs.
  7767. Arguments:
  7768. None
  7769. Return Value:
  7770. None
  7771. --*/
  7772. {
  7773. PMMPTE PointerPte;
  7774. PCHAR BaseVa;
  7775. PointerPte = MmCrashDumpPte;
  7776. BaseVa = (PCHAR)MiGetVirtualAddressMappedByPte(PointerPte);
  7777. while (Pages) {
  7778. PointerPte->u.Long = MM_ZERO_PTE;
  7779. KiFlushSingleTb (TRUE, BaseVa);
  7780. PointerPte += 1;
  7781. BaseVa += PAGE_SIZE;
  7782. Pages -= 1;
  7783. }
  7784. }
  7785. NTSTATUS
  7786. MmSetBankedSection (
  7787. IN HANDLE ProcessHandle,
  7788. IN PVOID VirtualAddress,
  7789. IN ULONG BankLength,
  7790. IN BOOLEAN ReadWriteBank,
  7791. IN PBANKED_SECTION_ROUTINE BankRoutine,
  7792. IN PVOID Context
  7793. )
  7794. /*++
  7795. Routine Description:
  7796. This function declares a mapped video buffer as a banked
  7797. section. This allows banked video devices (i.e., even
  7798. though the video controller has a megabyte or so of memory,
  7799. only a small bank (like 64k) can be mapped at any one time.
  7800. In order to overcome this problem, the pager handles faults
  7801. to this memory, unmaps the current bank, calls off to the
  7802. video driver and then maps in the new bank.
  7803. This function creates the necessary structures to allow the
  7804. video driver to be called from the pager.
  7805. ********************* NOTE NOTE NOTE *************************
  7806. At this time only read/write banks are supported!
  7807. Arguments:
  7808. ProcessHandle - Supplies a handle to the process in which to
  7809. support the banked video function.
  7810. VirtualAddress - Supplies the virtual address where the video
  7811. buffer is mapped in the specified process.
  7812. BankLength - Supplies the size of the bank.
  7813. ReadWriteBank - Supplies TRUE if the bank is read and write.
  7814. BankRoutine - Supplies a pointer to the routine that should be
  7815. called by the pager.
  7816. Context - Supplies a context to be passed by the pager to the
  7817. BankRoutine.
  7818. Return Value:
  7819. Returns the status of the function.
  7820. Environment:
  7821. Kernel mode, APC_LEVEL or below.
  7822. --*/
  7823. {
  7824. KAPC_STATE ApcState;
  7825. NTSTATUS Status;
  7826. PEPROCESS Process;
  7827. PMMVAD Vad;
  7828. PMMPTE PointerPte;
  7829. PMMPTE LastPte;
  7830. MMPTE TempPte;
  7831. ULONG_PTR size;
  7832. LONG count;
  7833. ULONG NumberOfPtes;
  7834. PMMBANKED_SECTION Bank;
  7835. PAGED_CODE ();
  7836. UNREFERENCED_PARAMETER (ReadWriteBank);
  7837. //
  7838. // Reference the specified process handle for VM_OPERATION access.
  7839. //
  7840. Status = ObReferenceObjectByHandle ( ProcessHandle,
  7841. PROCESS_VM_OPERATION,
  7842. PsProcessType,
  7843. KernelMode,
  7844. (PVOID *)&Process,
  7845. NULL );
  7846. if (!NT_SUCCESS(Status)) {
  7847. return Status;
  7848. }
  7849. KeStackAttachProcess (&Process->Pcb, &ApcState);
  7850. //
  7851. // Get the address creation mutex to block multiple threads from
  7852. // creating or deleting address space at the same time and
  7853. // get the working set mutex so virtual address descriptors can
  7854. // be inserted and walked. Block APCs so an APC which takes a page
  7855. // fault does not corrupt various structures.
  7856. //
  7857. LOCK_ADDRESS_SPACE (Process);
  7858. //
  7859. // Make sure the address space was not deleted, if so, return an error.
  7860. //
  7861. if (Process->Flags & PS_PROCESS_FLAGS_VM_DELETED) {
  7862. Status = STATUS_PROCESS_IS_TERMINATING;
  7863. goto ErrorReturn;
  7864. }
  7865. Vad = MiLocateAddress (VirtualAddress);
  7866. if ((Vad == NULL) ||
  7867. (Vad->StartingVpn != MI_VA_TO_VPN (VirtualAddress)) ||
  7868. (Vad->u.VadFlags.PhysicalMapping == 0)) {
  7869. Status = STATUS_NOT_MAPPED_DATA;
  7870. goto ErrorReturn;
  7871. }
  7872. size = PAGE_SIZE + ((Vad->EndingVpn - Vad->StartingVpn) << PAGE_SHIFT);
  7873. if ((size % BankLength) != 0) {
  7874. Status = STATUS_INVALID_VIEW_SIZE;
  7875. goto ErrorReturn;
  7876. }
  7877. count = -1;
  7878. NumberOfPtes = BankLength;
  7879. do {
  7880. NumberOfPtes = NumberOfPtes >> 1;
  7881. count += 1;
  7882. } while (NumberOfPtes != 0);
  7883. //
  7884. // Turn VAD into Banked VAD
  7885. //
  7886. NumberOfPtes = BankLength >> PAGE_SHIFT;
  7887. Bank = ExAllocatePoolWithTag (NonPagedPool,
  7888. sizeof (MMBANKED_SECTION) +
  7889. (NumberOfPtes - 1) * sizeof(MMPTE),
  7890. 'kBmM');
  7891. if (Bank == NULL) {
  7892. Status = STATUS_INSUFFICIENT_RESOURCES;
  7893. goto ErrorReturn;
  7894. }
  7895. Bank->BankShift = PTE_SHIFT + count - PAGE_SHIFT;
  7896. PointerPte = MiGetPteAddress(MI_VPN_TO_VA (Vad->StartingVpn));
  7897. ASSERT (PointerPte->u.Hard.Valid == 1);
  7898. Bank->BasePhysicalPage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  7899. Bank->BasedPte = PointerPte;
  7900. Bank->BankSize = BankLength;
  7901. Bank->BankedRoutine = BankRoutine;
  7902. Bank->Context = Context;
  7903. Bank->CurrentMappedPte = PointerPte;
  7904. //
  7905. // Build the template PTEs structure.
  7906. //
  7907. count = 0;
  7908. TempPte = ZeroPte;
  7909. MI_MAKE_VALID_PTE (TempPte,
  7910. Bank->BasePhysicalPage,
  7911. MM_READWRITE,
  7912. PointerPte);
  7913. if (TempPte.u.Hard.Write) {
  7914. MI_SET_PTE_DIRTY (TempPte);
  7915. }
  7916. do {
  7917. Bank->BankTemplate[count] = TempPte;
  7918. TempPte.u.Hard.PageFrameNumber += 1;
  7919. count += 1;
  7920. } while ((ULONG)count < NumberOfPtes );
  7921. LastPte = MiGetPteAddress (MI_VPN_TO_VA (Vad->EndingVpn));
  7922. //
  7923. // Set all PTEs within this range to zero. Any faults within
  7924. // this range will call the banked routine before making the
  7925. // page valid.
  7926. //
  7927. LOCK_WS_UNSAFE (Process);
  7928. ((PMMVAD_LONG) Vad)->u4.Banked = Bank;
  7929. RtlFillMemory (PointerPte,
  7930. (size >> (PAGE_SHIFT - PTE_SHIFT)),
  7931. (UCHAR)ZeroPte.u.Long);
  7932. KeFlushEntireTb (TRUE, TRUE);
  7933. UNLOCK_WS_UNSAFE (Process);
  7934. Status = STATUS_SUCCESS;
  7935. ErrorReturn:
  7936. UNLOCK_ADDRESS_SPACE (Process);
  7937. KeUnstackDetachProcess (&ApcState);
  7938. ObDereferenceObject (Process);
  7939. return Status;
  7940. }
  7941. PVOID
  7942. MmMapVideoDisplay (
  7943. IN PHYSICAL_ADDRESS PhysicalAddress,
  7944. IN SIZE_T NumberOfBytes,
  7945. IN MEMORY_CACHING_TYPE CacheType
  7946. )
  7947. /*++
  7948. Routine Description:
  7949. This function maps the specified physical address into the non-pagable
  7950. portion of the system address space.
  7951. Arguments:
  7952. PhysicalAddress - Supplies the starting physical address to map.
  7953. NumberOfBytes - Supplies the number of bytes to map.
  7954. CacheType - Supplies MmNonCached if the physical address is to be mapped
  7955. as non-cached, MmCached if the address should be cached, and
  7956. MmWriteCombined if the address should be cached and
  7957. write-combined as a frame buffer. For I/O device registers,
  7958. this is usually specified as MmNonCached.
  7959. Return Value:
  7960. Returns the virtual address which maps the specified physical addresses.
  7961. The value NULL is returned if sufficient virtual address space for
  7962. the mapping could not be found.
  7963. Environment:
  7964. Kernel mode, IRQL of APC_LEVEL or below.
  7965. --*/
  7966. {
  7967. PMMPTE PointerPte;
  7968. PVOID BaseVa;
  7969. #ifdef LARGE_PAGES
  7970. MMPTE TempPte;
  7971. PFN_NUMBER PageFrameIndex;
  7972. PFN_NUMBER NumberOfPages;
  7973. ULONG size;
  7974. PMMPTE protoPte;
  7975. PMMPTE largePte;
  7976. ULONG pageSize;
  7977. PSUBSECTION Subsection;
  7978. ULONG Alignment;
  7979. ULONG EmPageSize;
  7980. #endif LARGE_PAGES
  7981. ULONG LargePages;
  7982. LargePages = FALSE;
  7983. PointerPte = NULL;
  7984. #if !defined (_MI_MORE_THAN_4GB_)
  7985. ASSERT (PhysicalAddress.HighPart == 0);
  7986. #endif
  7987. PAGED_CODE();
  7988. ASSERT (NumberOfBytes != 0);
  7989. #ifdef LARGE_PAGES
  7990. If this is ever enabled, care must be taken not to insert overlapping
  7991. TB entries with different cache attributes.
  7992. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (PhysicalAddress.LowPart,
  7993. NumberOfBytes);
  7994. TempPte = ValidKernelPte;
  7995. MI_DISABLE_CACHING (TempPte);
  7996. PageFrameIndex = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
  7997. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  7998. if ((NumberOfBytes > X64K) && (!MmLargeVideoMapped)) {
  7999. size = (NumberOfBytes - 1) >> (PAGE_SHIFT + 1);
  8000. pageSize = PAGE_SIZE;
  8001. while (size != 0) {
  8002. size = size >> 2;
  8003. pageSize = pageSize << 2;
  8004. }
  8005. Alignment = pageSize << 1;
  8006. if (Alignment < MM_VA_MAPPED_BY_PDE) {
  8007. Alignment = MM_VA_MAPPED_BY_PDE;
  8008. }
  8009. #if defined(_IA64_)
  8010. //
  8011. // Convert pageSize to the EM specific page-size field format
  8012. //
  8013. EmPageSize = 0;
  8014. size = pageSize - 1 ;
  8015. while (size) {
  8016. size = size >> 1;
  8017. EmPageSize += 1;
  8018. }
  8019. if (NumberOfBytes > pageSize) {
  8020. if (MmPageSizeInfo & (pageSize << 1)) {
  8021. //
  8022. // if larger page size is supported in the implementation
  8023. //
  8024. pageSize = pageSize << 1;
  8025. EmPageSize += 1;
  8026. }
  8027. else {
  8028. EmPageSize = EmPageSize | pageSize;
  8029. }
  8030. }
  8031. pageSize = EmPageSize;
  8032. #endif
  8033. NumberOfPages = Alignment >> PAGE_SHIFT;
  8034. PointerPte = MiReserveAlignedSystemPtes (NumberOfPages,
  8035. SystemPteSpace,
  8036. Alignment);
  8037. if (PointerPte == NULL) {
  8038. goto MapWithSmallPages;
  8039. }
  8040. protoPte = ExAllocatePoolWithTag (PagedPool,
  8041. sizeof (MMPTE),
  8042. 'bSmM');
  8043. if (protoPte == NULL) {
  8044. MiReleaseSystemPtes(PointerPte, NumberOfPages, SystemPteSpace);
  8045. goto MapWithSmallPages;
  8046. }
  8047. Subsection = ExAllocatePoolWithTag (NonPagedPool,
  8048. sizeof(SUBSECTION) + (4 * sizeof(MMPTE)),
  8049. 'bSmM');
  8050. if (Subsection == NULL) {
  8051. ExFreePool (protoPte);
  8052. MiReleaseSystemPtes(PointerPte, NumberOfPages, SystemPteSpace);
  8053. goto MapWithSmallPages;
  8054. }
  8055. MiFillMemoryPte (PointerPte,
  8056. Alignment >> (PAGE_SHIFT - PTE_SHIFT),
  8057. MM_ZERO_KERNEL_PTE);
  8058. //
  8059. // Build large page descriptor and fill in all the PTEs.
  8060. //
  8061. Subsection->StartingSector = pageSize;
  8062. Subsection->EndingSector = (ULONG)NumberOfPages;
  8063. Subsection->u.LongFlags = 0;
  8064. Subsection->u.SubsectionFlags.LargePages = 1;
  8065. Subsection->u.SubsectionFlags.Protection = MM_READWRITE | MM_NOCACHE;
  8066. Subsection->PtesInSubsection = Alignment;
  8067. Subsection->SubsectionBase = PointerPte;
  8068. largePte = (PMMPTE)(Subsection + 1);
  8069. //
  8070. // Build the first 2 PTEs as entries for the TLB to
  8071. // map the specified physical address.
  8072. //
  8073. *largePte = TempPte;
  8074. largePte += 1;
  8075. if (NumberOfBytes > pageSize) {
  8076. *largePte = TempPte;
  8077. largePte->u.Hard.PageFrameNumber += (pageSize >> PAGE_SHIFT);
  8078. }
  8079. else {
  8080. *largePte = ZeroKernelPte;
  8081. }
  8082. //
  8083. // Build the first prototype PTE as a paging file format PTE
  8084. // referring to the subsection.
  8085. //
  8086. protoPte->u.Long = MiGetSubsectionAddressForPte(Subsection);
  8087. protoPte->u.Soft.Prototype = 1;
  8088. protoPte->u.Soft.Protection = MM_READWRITE | MM_NOCACHE;
  8089. //
  8090. // Set the PTE up for all the user's PTE entries in prototype PTE
  8091. // format pointing to the 3rd prototype PTE.
  8092. //
  8093. TempPte.u.Long = MiProtoAddressForPte (protoPte);
  8094. MI_SET_GLOBAL_STATE (TempPte, 1);
  8095. LargePages = TRUE;
  8096. MmLargeVideoMapped = TRUE;
  8097. }
  8098. if (PointerPte != NULL) {
  8099. BaseVa = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte);
  8100. BaseVa = (PVOID)((PCHAR)BaseVa + BYTE_OFFSET(PhysicalAddress.LowPart));
  8101. do {
  8102. ASSERT (PointerPte->u.Hard.Valid == 0);
  8103. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  8104. PointerPte += 1;
  8105. NumberOfPages -= 1;
  8106. } while (NumberOfPages != 0);
  8107. }
  8108. else {
  8109. MapWithSmallPages:
  8110. #endif //LARGE_PAGES
  8111. BaseVa = MmMapIoSpace (PhysicalAddress,
  8112. NumberOfBytes,
  8113. CacheType);
  8114. #ifdef LARGE_PAGES
  8115. }
  8116. #endif //LARGE_PAGES
  8117. return BaseVa;
  8118. }
  8119. VOID
  8120. MmUnmapVideoDisplay (
  8121. IN PVOID BaseAddress,
  8122. IN SIZE_T NumberOfBytes
  8123. )
  8124. /*++
  8125. Routine Description:
  8126. This function unmaps a range of physical address which were previously
  8127. mapped via an MmMapVideoDisplay function call.
  8128. Arguments:
  8129. BaseAddress - Supplies the base virtual address where the physical
  8130. address was previously mapped.
  8131. NumberOfBytes - Supplies the number of bytes which were mapped.
  8132. Return Value:
  8133. None.
  8134. Environment:
  8135. Kernel mode, IRQL of APC_LEVEL or below.
  8136. --*/
  8137. {
  8138. #ifdef LARGE_PAGES
  8139. PFN_NUMBER NumberOfPages;
  8140. ULONG i;
  8141. PMMPTE FirstPte;
  8142. KIRQL OldIrql;
  8143. PMMPTE LargePte;
  8144. PSUBSECTION Subsection;
  8145. PAGED_CODE();
  8146. ASSERT (NumberOfBytes != 0);
  8147. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (BaseAddress, NumberOfBytes);
  8148. FirstPte = MiGetPteAddress (BaseAddress);
  8149. if ((NumberOfBytes > X64K) && (FirstPte->u.Hard.Valid == 0)) {
  8150. ASSERT (MmLargeVideoMapped);
  8151. LargePte = MiPteToProto (FirstPte);
  8152. Subsection = MiGetSubsectionAddress (LargePte);
  8153. ASSERT (Subsection->SubsectionBase == FirstPte);
  8154. NumberOfPages = Subsection->EndingSector;
  8155. ExFreePool (Subsection);
  8156. ExFreePool (LargePte);
  8157. MmLargeVideoMapped = FALSE;
  8158. KeFillFixedEntryTb ((PHARDWARE_PTE)FirstPte, (PVOID)KSEG0_BASE, LARGE_ENTRY);
  8159. }
  8160. MiReleaseSystemPtes(FirstPte, NumberOfPages, SystemPteSpace);
  8161. return;
  8162. #else // LARGE_PAGES
  8163. MmUnmapIoSpace (BaseAddress, NumberOfBytes);
  8164. return;
  8165. #endif //LARGE_PAGES
  8166. }
  8167. VOID
  8168. MmLockPagedPool (
  8169. IN PVOID Address,
  8170. IN SIZE_T SizeInBytes
  8171. )
  8172. /*++
  8173. Routine Description:
  8174. Locks the specified address (which MUST reside in paged pool) into
  8175. memory until MmUnlockPagedPool is called.
  8176. Arguments:
  8177. Address - Supplies the address in paged pool to lock.
  8178. SizeInBytes - Supplies the size in bytes to lock.
  8179. Return Value:
  8180. None.
  8181. Environment:
  8182. Kernel mode, IRQL of APC_LEVEL or below.
  8183. --*/
  8184. {
  8185. PMMPTE PointerPte;
  8186. PMMPTE LastPte;
  8187. PointerPte = MiGetPteAddress (Address);
  8188. LastPte = MiGetPteAddress ((PVOID)((PCHAR)Address + (SizeInBytes - 1)));
  8189. MiLockCode (PointerPte, LastPte, MM_LOCK_BY_REFCOUNT);
  8190. return;
  8191. }
  8192. NTKERNELAPI
  8193. VOID
  8194. MmUnlockPagedPool (
  8195. IN PVOID Address,
  8196. IN SIZE_T SizeInBytes
  8197. )
  8198. /*++
  8199. Routine Description:
  8200. Unlocks paged pool that was locked with MmLockPagedPool.
  8201. Arguments:
  8202. Address - Supplies the address in paged pool to unlock.
  8203. Size - Supplies the size to unlock.
  8204. Return Value:
  8205. None.
  8206. Environment:
  8207. Kernel mode, IRQL of APC_LEVEL or below.
  8208. --*/
  8209. {
  8210. PMMPTE PointerPte;
  8211. PMMPTE LastPte;
  8212. KIRQL OldIrql;
  8213. PFN_NUMBER PageFrameIndex;
  8214. PMMPFN Pfn1;
  8215. MmLockPagableSectionByHandle(ExPageLockHandle);
  8216. PointerPte = MiGetPteAddress (Address);
  8217. LastPte = MiGetPteAddress ((PVOID)((PCHAR)Address + (SizeInBytes - 1)));
  8218. LOCK_PFN (OldIrql);
  8219. do {
  8220. ASSERT (PointerPte->u.Hard.Valid == 1);
  8221. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  8222. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  8223. ASSERT (Pfn1->u3.e2.ReferenceCount > 1);
  8224. MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF (Pfn1, 35);
  8225. PointerPte += 1;
  8226. } while (PointerPte <= LastPte);
  8227. UNLOCK_PFN (OldIrql);
  8228. MmUnlockPagableImageSection(ExPageLockHandle);
  8229. return;
  8230. }
  8231. NTKERNELAPI
  8232. ULONG
  8233. MmGatherMemoryForHibernate (
  8234. IN PMDL Mdl,
  8235. IN BOOLEAN Wait
  8236. )
  8237. /*++
  8238. Routine Description:
  8239. Finds enough memory to fill in the pages of the MDL for power management
  8240. hibernate function.
  8241. Arguments:
  8242. Mdl - Supplies an MDL, the start VA field should be NULL. The length
  8243. field indicates how many pages to obtain.
  8244. Wait - FALSE to fail immediately if the pages aren't available.
  8245. Return Value:
  8246. TRUE if the MDL could be filled in, FALSE otherwise.
  8247. Environment:
  8248. Kernel mode, IRQL of APC_LEVEL or below.
  8249. --*/
  8250. {
  8251. KIRQL OldIrql;
  8252. PFN_NUMBER AvailablePages;
  8253. PFN_NUMBER PagesNeeded;
  8254. PPFN_NUMBER Pages;
  8255. PFN_NUMBER i;
  8256. PFN_NUMBER PageFrameIndex;
  8257. PMMPFN Pfn1;
  8258. ULONG status;
  8259. status = FALSE;
  8260. PagesNeeded = Mdl->ByteCount >> PAGE_SHIFT;
  8261. Pages = (PPFN_NUMBER)(Mdl + 1);
  8262. i = Wait ? 100 : 1;
  8263. InterlockedIncrement (&MiDelayPageFaults);
  8264. do {
  8265. LOCK_PFN2 (OldIrql);
  8266. MiDeferredUnlockPages (MI_DEFER_PFN_HELD);
  8267. //
  8268. // Don't use MmAvailablePages here because if compression hardware is
  8269. // being used we would bail prematurely. Check the lists explicitly
  8270. // in order to provide our caller with the maximum number of pages.
  8271. //
  8272. AvailablePages = MmZeroedPageListHead.Total +
  8273. MmFreePageListHead.Total +
  8274. MmStandbyPageListHead.Total;
  8275. if (AvailablePages > PagesNeeded) {
  8276. //
  8277. // Fill in the MDL.
  8278. //
  8279. do {
  8280. PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (NULL));
  8281. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  8282. #if DBG
  8283. Pfn1->PteAddress = (PVOID) (ULONG_PTR)X64K;
  8284. #endif
  8285. MI_SET_PFN_DELETED (Pfn1);
  8286. Pfn1->u3.e2.ReferenceCount += 1;
  8287. Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  8288. *Pages = PageFrameIndex;
  8289. Pages += 1;
  8290. PagesNeeded -= 1;
  8291. } while (PagesNeeded);
  8292. UNLOCK_PFN2 (OldIrql);
  8293. Mdl->MdlFlags |= MDL_PAGES_LOCKED;
  8294. status = TRUE;
  8295. break;
  8296. }
  8297. UNLOCK_PFN2 (OldIrql);
  8298. //
  8299. // If we're being called at DISPATCH_LEVEL we cannot move pages to
  8300. // the standby list because mutexes must be acquired to do so.
  8301. //
  8302. if (OldIrql > APC_LEVEL) {
  8303. break;
  8304. }
  8305. if (!i) {
  8306. break;
  8307. }
  8308. //
  8309. // Attempt to move pages to the standby list.
  8310. //
  8311. MmEmptyAllWorkingSets ();
  8312. MiFlushAllPages();
  8313. KeDelayExecutionThread (KernelMode,
  8314. FALSE,
  8315. (PLARGE_INTEGER)&Mm30Milliseconds);
  8316. i -= 1;
  8317. } while (TRUE);
  8318. InterlockedDecrement (&MiDelayPageFaults);
  8319. return status;
  8320. }
  8321. NTKERNELAPI
  8322. VOID
  8323. MmReturnMemoryForHibernate (
  8324. IN PMDL Mdl
  8325. )
  8326. /*++
  8327. Routine Description:
  8328. Returns memory from MmGatherMemoryForHibername.
  8329. Arguments:
  8330. Mdl - Supplies an MDL, the start VA field should be NULL. The length
  8331. field indicates how many pages to obtain.
  8332. Return Value:
  8333. None.
  8334. Environment:
  8335. Kernel mode, IRQL of APC_LEVEL or below.
  8336. --*/
  8337. {
  8338. KIRQL OldIrql;
  8339. PPFN_NUMBER Pages;
  8340. PPFN_NUMBER LastPage;
  8341. Pages = (PPFN_NUMBER)(Mdl + 1);
  8342. LastPage = Pages + (Mdl->ByteCount >> PAGE_SHIFT);
  8343. LOCK_PFN2 (OldIrql);
  8344. do {
  8345. MiDecrementReferenceCount (*Pages);
  8346. Pages += 1;
  8347. } while (Pages < LastPage);
  8348. UNLOCK_PFN2 (OldIrql);
  8349. return;
  8350. }
  8351. VOID
  8352. MmEnablePAT (
  8353. VOID
  8354. )
  8355. /*++
  8356. Routine Description:
  8357. This routine enables the page attribute capability for individual PTE
  8358. mappings.
  8359. Arguments:
  8360. None.
  8361. Return Value:
  8362. None.
  8363. Environment:
  8364. Kernel mode.
  8365. --*/
  8366. {
  8367. MiWriteCombiningPtes = TRUE;
  8368. }
  8369. LOGICAL
  8370. MmIsSystemAddressLocked (
  8371. IN PVOID VirtualAddress
  8372. )
  8373. /*++
  8374. Routine Description:
  8375. This routine determines whether the specified system address is currently
  8376. locked.
  8377. This routine should only be called for debugging purposes, as it is not
  8378. guaranteed upon return to the caller that the address is still locked.
  8379. (The address could easily have been trimmed prior to return).
  8380. Arguments:
  8381. VirtualAddress - Supplies the virtual address to check.
  8382. Return Value:
  8383. TRUE if the address is locked. FALSE if not.
  8384. Environment:
  8385. DISPATCH LEVEL or below. No memory management locks may be held.
  8386. --*/
  8387. {
  8388. PMMPFN Pfn1;
  8389. KIRQL OldIrql;
  8390. PMMPTE PointerPte;
  8391. PFN_NUMBER PageFrameIndex;
  8392. if (IS_SYSTEM_ADDRESS (VirtualAddress) == FALSE) {
  8393. return FALSE;
  8394. }
  8395. if (MI_IS_PHYSICAL_ADDRESS (VirtualAddress)) {
  8396. return TRUE;
  8397. }
  8398. //
  8399. // Hyperspace and page maps are not treated as locked down.
  8400. //
  8401. if (MI_IS_PROCESS_SPACE_ADDRESS (VirtualAddress) == TRUE) {
  8402. return FALSE;
  8403. }
  8404. #if defined (_IA64_)
  8405. if (MI_IS_KERNEL_PTE_ADDRESS (VirtualAddress) == TRUE) {
  8406. return FALSE;
  8407. }
  8408. #endif
  8409. LOCK_PFN2 (OldIrql);
  8410. if (MmIsAddressValid (VirtualAddress) == FALSE) {
  8411. UNLOCK_PFN2 (OldIrql);
  8412. return FALSE;
  8413. }
  8414. PointerPte = MiGetPteAddress (VirtualAddress);
  8415. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  8416. //
  8417. // Note that the mapped page may not be in the PFN database. Treat
  8418. // this as locked. There is no way to detect if the PFN database is
  8419. // sparse without walking the loader blocks. Don't bother doing this
  8420. // as few machines are still sparse today.
  8421. //
  8422. if (PageFrameIndex > MmHighestPhysicalPage) {
  8423. UNLOCK_PFN2 (OldIrql);
  8424. return FALSE;
  8425. }
  8426. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  8427. //
  8428. // Check for the page being locked by reference.
  8429. //
  8430. if (Pfn1->u3.e2.ReferenceCount > 1) {
  8431. UNLOCK_PFN2 (OldIrql);
  8432. return TRUE;
  8433. }
  8434. if (Pfn1->u3.e2.ReferenceCount > Pfn1->u2.ShareCount) {
  8435. UNLOCK_PFN2 (OldIrql);
  8436. return TRUE;
  8437. }
  8438. //
  8439. // Check whether the page is locked into the working set.
  8440. //
  8441. if (Pfn1->u1.Event == NULL) {
  8442. UNLOCK_PFN2 (OldIrql);
  8443. return TRUE;
  8444. }
  8445. UNLOCK_PFN2 (OldIrql);
  8446. return FALSE;
  8447. }
  8448. LOGICAL
  8449. MmAreMdlPagesLocked (
  8450. IN PMDL MemoryDescriptorList
  8451. )
  8452. /*++
  8453. Routine Description:
  8454. This routine determines whether the pages described by the argument
  8455. MDL are currently locked.
  8456. This routine should only be called for debugging purposes, as it is not
  8457. guaranteed upon return to the caller that the pages are still locked.
  8458. Arguments:
  8459. MemoryDescriptorList - Supplies the memory descriptor list to check.
  8460. Return Value:
  8461. TRUE if ALL the pages described by the argument MDL are locked.
  8462. FALSE if not.
  8463. Environment:
  8464. DISPATCH LEVEL or below. No memory management locks may be held.
  8465. --*/
  8466. {
  8467. PFN_NUMBER NumberOfPages;
  8468. PPFN_NUMBER Page;
  8469. PVOID StartingVa;
  8470. PMMPFN Pfn1;
  8471. KIRQL OldIrql;
  8472. //
  8473. // We'd like to assert that MDL_PAGES_LOCKED is set but can't because
  8474. // some drivers have privately constructed MDLs and they never set the
  8475. // bit properly.
  8476. //
  8477. if ((MemoryDescriptorList->MdlFlags & (MDL_IO_SPACE | MDL_SOURCE_IS_NONPAGED_POOL)) != 0) {
  8478. return TRUE;
  8479. }
  8480. StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
  8481. MemoryDescriptorList->ByteOffset);
  8482. NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartingVa,
  8483. MemoryDescriptorList->ByteCount);
  8484. Page = (PPFN_NUMBER)(MemoryDescriptorList + 1);
  8485. LOCK_PFN2 (OldIrql);
  8486. do {
  8487. if (*Page == MM_EMPTY_LIST) {
  8488. //
  8489. // There are no more locked pages.
  8490. //
  8491. break;
  8492. }
  8493. //
  8494. // Note that the mapped page may not be in the PFN database. Treat
  8495. // this as locked. There is no way to detect if the PFN database is
  8496. // sparse without walking the loader blocks. Don't bother doing this
  8497. // as few machines are still sparse today.
  8498. //
  8499. if (*Page <= MmHighestPhysicalPage) {
  8500. Pfn1 = MI_PFN_ELEMENT (*Page);
  8501. //
  8502. // Check for the page being locked by reference
  8503. //
  8504. // - or -
  8505. //
  8506. // whether the page is locked into the working set.
  8507. //
  8508. if ((Pfn1->u3.e2.ReferenceCount <= Pfn1->u2.ShareCount) &&
  8509. (Pfn1->u3.e2.ReferenceCount <= 1) &&
  8510. (Pfn1->u1.Event != NULL)) {
  8511. //
  8512. // The page is not locked by reference or in a working set.
  8513. //
  8514. UNLOCK_PFN2 (OldIrql);
  8515. return FALSE;
  8516. }
  8517. }
  8518. Page += 1;
  8519. NumberOfPages -= 1;
  8520. } while (NumberOfPages != 0);
  8521. UNLOCK_PFN2 (OldIrql);
  8522. return TRUE;
  8523. }
  8524. #if DBG
  8525. VOID
  8526. MiVerifyLockedPageCharges (
  8527. VOID
  8528. )
  8529. {
  8530. PMMPFN Pfn1;
  8531. KIRQL OldIrql;
  8532. PFN_NUMBER start;
  8533. PFN_NUMBER count;
  8534. PFN_NUMBER Page;
  8535. PFN_NUMBER LockCharged;
  8536. if (MiPrintLockedPages == 0) {
  8537. return;
  8538. }
  8539. if (KeGetCurrentIrql() > APC_LEVEL) {
  8540. return;
  8541. }
  8542. start = 0;
  8543. LockCharged = 0;
  8544. ExAcquireFastMutex (&MmDynamicMemoryMutex);
  8545. LOCK_PFN (OldIrql);
  8546. do {
  8547. count = MmPhysicalMemoryBlock->Run[start].PageCount;
  8548. Page = MmPhysicalMemoryBlock->Run[start].BasePage;
  8549. if (count != 0) {
  8550. Pfn1 = MI_PFN_ELEMENT (Page);
  8551. do {
  8552. if (Pfn1->u3.e1.LockCharged == 1) {
  8553. if (MiPrintLockedPages & 0x4) {
  8554. DbgPrint ("%x ", Pfn1 - MmPfnDatabase);
  8555. }
  8556. LockCharged += 1;
  8557. }
  8558. count -= 1;
  8559. Pfn1 += 1;
  8560. } while (count != 0);
  8561. }
  8562. start += 1;
  8563. } while (start != MmPhysicalMemoryBlock->NumberOfRuns);
  8564. if (LockCharged != MmSystemLockPagesCount) {
  8565. if (MiPrintLockedPages & 0x1) {
  8566. DbgPrint ("MM: Locked pages MISMATCH %u %u\n",
  8567. LockCharged, MmSystemLockPagesCount);
  8568. }
  8569. }
  8570. else {
  8571. if (MiPrintLockedPages & 0x2) {
  8572. DbgPrint ("MM: Locked pages ok %u\n",
  8573. LockCharged);
  8574. }
  8575. }
  8576. UNLOCK_PFN (OldIrql);
  8577. ExReleaseFastMutex (&MmDynamicMemoryMutex);
  8578. return;
  8579. }
  8580. #endif