Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3392 lines
101 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. allocvm.c
  5. Abstract:
  6. This module contains the routines which implement the
  7. NtAllocateVirtualMemory service.
  8. Author:
  9. Lou Perazzoli (loup) 22-May-1989
  10. Landy Wang (landyw) 02-June-1997
  11. Revision History:
  12. --*/
  13. #include "mi.h"
  14. #if DBG
  15. PEPROCESS MmWatchProcess;
  16. #endif // DBG
  17. const ULONG MMVADKEY = ' daV'; //Vad
  18. NTSTATUS
  19. MiResetVirtualMemory (
  20. IN PVOID StartingAddress,
  21. IN PVOID EndingAddress,
  22. IN PMMVAD Vad,
  23. IN PEPROCESS Process
  24. );
  25. LOGICAL
  26. MiCreatePageTablesForPhysicalRange (
  27. IN PEPROCESS Process,
  28. IN PVOID StartingAddress,
  29. IN PVOID EndingAddress
  30. );
  31. VOID
  32. MiFlushAcquire (
  33. IN PCONTROL_AREA ControlArea
  34. );
  35. VOID
  36. MiFlushRelease (
  37. IN PCONTROL_AREA ControlArea
  38. );
  39. #ifdef ALLOC_PRAGMA
  40. #pragma alloc_text(PAGE,NtAllocateVirtualMemory)
  41. #pragma alloc_text(PAGE,MmCommitSessionMappedView)
  42. #pragma alloc_text(PAGELK,MiCreatePageTablesForPhysicalRange)
  43. #pragma alloc_text(PAGELK,MiDeletePageTablesForPhysicalRange)
  44. #pragma alloc_text(PAGELK,MiResetVirtualMemory)
  45. #endif
  46. SIZE_T MmTotalProcessCommit; // Only used for debugging
  47. NTSTATUS
  48. NtAllocateVirtualMemory (
  49. IN HANDLE ProcessHandle,
  50. IN OUT PVOID *BaseAddress,
  51. IN ULONG_PTR ZeroBits,
  52. IN OUT PSIZE_T RegionSize,
  53. IN ULONG AllocationType,
  54. IN ULONG Protect
  55. )
  56. /*++
  57. Routine Description:
  58. This function creates a region of pages within the virtual address
  59. space of a subject process.
  60. Arguments:
  61. ProcessHandle - Supplies an open handle to a process object.
  62. BaseAddress - Supplies a pointer to a variable that will receive
  63. the base address of the allocated region of pages.
  64. If the initial value of this argument is not null,
  65. then the region will be allocated starting at the
  66. specified virtual address rounded down to the next
  67. host page size address boundary. If the initial
  68. value of this argument is null, then the operating
  69. system will determine where to allocate the region.
  70. ZeroBits - Supplies the number of high order address bits that
  71. must be zero in the base address of the section view. The
  72. value of this argument must be less than or equal to the
  73. maximum number of zero bits and is only used when memory
  74. management determines where to allocate the view (i.e. when
  75. BaseAddress is null).
  76. If ZeroBits is zero, then no zero bit constraints are applied.
  77. If ZeroBits is greater than 0 and less than 32, then it is
  78. the number of leading zero bits from bit 31. Bits 63:32 are
  79. also required to be zero. This retains compatibility
  80. with 32-bit systems.
  81. If ZeroBits is greater than 32, then it is considered as
  82. a mask and then number of leading zero are counted out
  83. in the mask. This then becomes the zero bits argument.
  84. RegionSize - Supplies a pointer to a variable that will receive
  85. the actual size in bytes of the allocated region
  86. of pages. The initial value of this argument
  87. specifies the size in bytes of the region and is
  88. rounded up to the next host page size boundary.
  89. AllocationType - Supplies a set of flags that describe the type
  90. of allocation that is to be performed for the
  91. specified region of pages. Flags are:
  92. MEM_COMMIT - The specified region of pages is to be committed.
  93. MEM_RESERVE - The specified region of pages is to be reserved.
  94. MEM_TOP_DOWN - The specified region should be created at the
  95. highest virtual address possible based on ZeroBits.
  96. MEM_RESET - Reset the state of the specified region so
  97. that if the pages are in a paging file, they
  98. are discarded and if referenced later, pages of zeroes
  99. are materialized.
  100. If the pages are in memory and modified, they are marked
  101. as not modified so they will not be written out to
  102. the paging file. The contents are NOT zeroed.
  103. The Protect argument is ignored, but a valid protection
  104. must be specified.
  105. MEM_PHYSICAL - The specified region of pages will map physical memory
  106. directly via the AWE APIs.
  107. MEM_LARGE_PAGES - The specified region of pages will be allocated from
  108. physically contiguous (non-paged) pages and be mapped
  109. with a large TB entry.
  110. MEM_WRITE_WATCH - The specified private region is to be used for
  111. write-watch purposes.
  112. Protect - Supplies the protection desired for the committed region of pages.
  113. PAGE_NOACCESS - No access to the committed region
  114. of pages is allowed. An attempt to read,
  115. write, or execute the committed region
  116. results in an access violation.
  117. PAGE_EXECUTE - Execute access to the committed
  118. region of pages is allowed. An attempt to
  119. read or write the committed region results in
  120. an access violation.
  121. PAGE_READONLY - Read only and execute access to the
  122. committed region of pages is allowed. An
  123. attempt to write the committed region results
  124. in an access violation.
  125. PAGE_READWRITE - Read, write, and execute access to
  126. the committed region of pages is allowed. If
  127. write access to the underlying section is
  128. allowed, then a single copy of the pages are
  129. shared. Otherwise the pages are shared read
  130. only/copy on write.
  131. PAGE_NOCACHE - The region of pages should be allocated
  132. as non-cachable.
  133. Return Value:
  134. Various NTSTATUS codes.
  135. --*/
  136. {
  137. ULONG Locked;
  138. ULONG_PTR Alignment;
  139. PMMVAD Vad;
  140. PMMVAD FoundVad;
  141. PMMVAD PreviousVad;
  142. PMMVAD NextVad;
  143. PEPROCESS Process;
  144. KPROCESSOR_MODE PreviousMode;
  145. PVOID StartingAddress;
  146. PVOID EndingAddress;
  147. NTSTATUS Status;
  148. PVOID TopAddress;
  149. PVOID CapturedBase;
  150. SIZE_T CapturedRegionSize;
  151. SIZE_T NumberOfPages;
  152. PMMPTE PointerPte;
  153. PMMPTE CommitLimitPte;
  154. ULONG ProtectionMask;
  155. PMMPTE LastPte;
  156. PMMPTE PointerPde;
  157. PMMPTE StartingPte;
  158. MMPTE TempPte;
  159. ULONG OldProtect;
  160. SIZE_T QuotaCharge;
  161. SIZE_T QuotaFree;
  162. SIZE_T CopyOnWriteCharge;
  163. LOGICAL Attached;
  164. LOGICAL ChargedExactQuota;
  165. MMPTE DecommittedPte;
  166. ULONG ChangeProtection;
  167. PVOID UsedPageTableHandle;
  168. PUCHAR Va;
  169. LOGICAL ChargedJobCommit;
  170. PMI_PHYSICAL_VIEW PhysicalView;
  171. PRTL_BITMAP BitMap;
  172. ULONG BitMapSize;
  173. ULONG BitMapBits;
  174. KAPC_STATE ApcState;
  175. SECTION Section;
  176. LARGE_INTEGER NewSize;
  177. PCONTROL_AREA ControlArea;
  178. PSEGMENT Segment;
  179. PMM_AVL_TABLE PhysicalVadRoot;
  180. #if defined(_MIALT4K_)
  181. PVOID OriginalBase;
  182. SIZE_T OriginalRegionSize;
  183. PVOID WowProcess;
  184. PVOID StartingAddressFor4k;
  185. PVOID EndingAddressFor4k;
  186. SIZE_T CapturedRegionSizeFor4k;
  187. ULONG OriginalProtectionMask;
  188. ULONG AltFlags;
  189. ULONG NativePageProtection;
  190. #endif
  191. PETHREAD CurrentThread;
  192. PEPROCESS CurrentProcess;
  193. ULONG ExecutePermission;
  194. PAGED_CODE();
  195. Attached = FALSE;
  196. //
  197. // Check the zero bits argument for correctness.
  198. //
  199. #if defined (_WIN64)
  200. if (ZeroBits >= 32) {
  201. //
  202. // ZeroBits is a mask instead of a count. Translate it to a count now.
  203. //
  204. ZeroBits = 64 - RtlFindMostSignificantBit (ZeroBits) -1;
  205. }
  206. else if (ZeroBits) {
  207. ZeroBits += 32;
  208. }
  209. #endif
  210. if (ZeroBits > MM_MAXIMUM_ZERO_BITS) {
  211. return STATUS_INVALID_PARAMETER_3;
  212. }
  213. //
  214. // Check the AllocationType for correctness.
  215. //
  216. if ((AllocationType & ~(MEM_COMMIT | MEM_RESERVE | MEM_PHYSICAL |
  217. MEM_LARGE_PAGES |
  218. MEM_TOP_DOWN | MEM_RESET | MEM_WRITE_WATCH)) != 0) {
  219. return STATUS_INVALID_PARAMETER_5;
  220. }
  221. //
  222. // One of MEM_COMMIT, MEM_RESET or MEM_RESERVE must be set.
  223. //
  224. if ((AllocationType & (MEM_COMMIT | MEM_RESERVE | MEM_RESET)) == 0) {
  225. return STATUS_INVALID_PARAMETER_5;
  226. }
  227. if ((AllocationType & MEM_RESET) && (AllocationType != MEM_RESET)) {
  228. //
  229. // MEM_RESET may not be used with any other flag.
  230. //
  231. return STATUS_INVALID_PARAMETER_5;
  232. }
  233. if (AllocationType & MEM_LARGE_PAGES) {
  234. //
  235. // Large page address spaces must be committed and cannot be combined
  236. // with physical, reset or write watch.
  237. //
  238. if ((AllocationType & MEM_COMMIT) == 0) {
  239. return STATUS_INVALID_PARAMETER_5;
  240. }
  241. if (AllocationType & (MEM_PHYSICAL | MEM_RESET | MEM_WRITE_WATCH)) {
  242. return STATUS_INVALID_PARAMETER_5;
  243. }
  244. }
  245. if (AllocationType & MEM_WRITE_WATCH) {
  246. //
  247. // Write watch address spaces can only be created with MEM_RESERVE.
  248. //
  249. if ((AllocationType & MEM_RESERVE) == 0) {
  250. return STATUS_INVALID_PARAMETER_5;
  251. }
  252. }
  253. if (AllocationType & MEM_PHYSICAL) {
  254. //
  255. // MEM_PHYSICAL must be used with MEM_RESERVE.
  256. // MEM_TOP_DOWN is optional.
  257. // Anything else is invalid.
  258. //
  259. // This memory is always read-write when allocated.
  260. //
  261. if ((AllocationType & MEM_RESERVE) == 0) {
  262. return STATUS_INVALID_PARAMETER_5;
  263. }
  264. if (AllocationType & ~(MEM_RESERVE | MEM_TOP_DOWN | MEM_PHYSICAL)) {
  265. return STATUS_INVALID_PARAMETER_5;
  266. }
  267. if (Protect != PAGE_READWRITE) {
  268. return STATUS_INVALID_PARAMETER_6;
  269. }
  270. }
  271. //
  272. // Check the protection field.
  273. //
  274. ProtectionMask = MiMakeProtectionMask (Protect);
  275. if (ProtectionMask == MM_INVALID_PROTECTION) {
  276. return STATUS_INVALID_PAGE_PROTECTION;
  277. }
  278. ChangeProtection = FALSE;
  279. CurrentThread = PsGetCurrentThread ();
  280. CurrentProcess = PsGetCurrentProcessByThread (CurrentThread);
  281. PreviousMode = KeGetPreviousModeByThread (&CurrentThread->Tcb);
  282. //
  283. // Establish an exception handler, probe the specified addresses
  284. // for write access and capture the initial values.
  285. //
  286. try {
  287. if (PreviousMode != KernelMode) {
  288. ProbeForWritePointer (BaseAddress);
  289. ProbeForWriteUlong_ptr (RegionSize);
  290. }
  291. //
  292. // Capture the base address.
  293. //
  294. CapturedBase = *BaseAddress;
  295. //
  296. // Capture the region size.
  297. //
  298. CapturedRegionSize = *RegionSize;
  299. } except (ExSystemExceptionFilter()) {
  300. //
  301. // If an exception occurs during the probe or capture
  302. // of the initial values, then handle the exception and
  303. // return the exception code as the status value.
  304. //
  305. return GetExceptionCode();
  306. }
  307. #if defined(_MIALT4K_)
  308. OriginalBase = CapturedBase;
  309. OriginalRegionSize = CapturedRegionSize;
  310. #endif
  311. //
  312. // Make sure the specified starting and ending addresses are
  313. // within the user part of the virtual address space.
  314. //
  315. if (CapturedBase > MM_HIGHEST_VAD_ADDRESS) {
  316. //
  317. // Invalid base address.
  318. //
  319. return STATUS_INVALID_PARAMETER_2;
  320. }
  321. if ((((ULONG_PTR)MM_HIGHEST_VAD_ADDRESS + 1) - (ULONG_PTR)CapturedBase) <
  322. CapturedRegionSize) {
  323. //
  324. // Invalid region size;
  325. //
  326. return STATUS_INVALID_PARAMETER_4;
  327. }
  328. if (CapturedRegionSize == 0) {
  329. //
  330. // Region size cannot be 0.
  331. //
  332. return STATUS_INVALID_PARAMETER_4;
  333. }
  334. //
  335. // Reference the specified process handle for VM_OPERATION access.
  336. //
  337. if (ProcessHandle == NtCurrentProcess()) {
  338. Process = CurrentProcess;
  339. }
  340. else {
  341. Status = ObReferenceObjectByHandle ( ProcessHandle,
  342. PROCESS_VM_OPERATION,
  343. PsProcessType,
  344. PreviousMode,
  345. (PVOID *)&Process,
  346. NULL );
  347. if (!NT_SUCCESS(Status)) {
  348. return Status;
  349. }
  350. }
  351. //
  352. // Check for privilege before attaching to prevent unprivileged apps
  353. // from dumping memory into a privileged process.
  354. //
  355. if (AllocationType & MEM_LARGE_PAGES) {
  356. if (!SeSinglePrivilegeCheck (SeLockMemoryPrivilege, PreviousMode)) {
  357. Status = STATUS_PRIVILEGE_NOT_HELD;
  358. goto ErrorReturn1;
  359. }
  360. }
  361. //
  362. // If the specified process is not the current process, attach
  363. // to the specified process.
  364. //
  365. if (CurrentProcess != Process) {
  366. KeStackAttachProcess (&Process->Pcb, &ApcState);
  367. Attached = TRUE;
  368. }
  369. //
  370. // Add execute permission if necessary.
  371. //
  372. #if defined (_WIN64)
  373. if (Process->Wow64Process == NULL && AllocationType & MEM_COMMIT)
  374. #elif defined (_X86PAE_)
  375. if (AllocationType & MEM_COMMIT)
  376. #else
  377. if (FALSE)
  378. #endif
  379. {
  380. if (Process->Peb != NULL) {
  381. ExecutePermission = 0;
  382. try {
  383. ExecutePermission = Process->Peb->ExecuteOptions & MEM_EXECUTE_OPTION_DATA;
  384. } except (EXCEPTION_EXECUTE_HANDLER) {
  385. Status = GetExceptionCode();
  386. goto ErrorReturn1;
  387. }
  388. if (ExecutePermission != 0) {
  389. switch (Protect & 0xF) {
  390. case PAGE_READONLY:
  391. Protect &= ~PAGE_READONLY;
  392. Protect |= PAGE_EXECUTE_READ;
  393. break;
  394. case PAGE_READWRITE:
  395. Protect &= ~PAGE_READWRITE;
  396. Protect |= PAGE_EXECUTE_READWRITE;
  397. break;
  398. case PAGE_WRITECOPY:
  399. Protect &= ~PAGE_WRITECOPY;
  400. Protect |= PAGE_EXECUTE_WRITECOPY;
  401. break;
  402. default:
  403. break;
  404. }
  405. //
  406. // Recheck protection.
  407. //
  408. ProtectionMask = MiMakeProtectionMask (Protect);
  409. if (ProtectionMask == MM_INVALID_PROTECTION) {
  410. Status = STATUS_INVALID_PAGE_PROTECTION;
  411. goto ErrorReturn1;
  412. }
  413. }
  414. }
  415. }
  416. //
  417. // Get the address creation mutex to block multiple threads from
  418. // creating or deleting address space at the same time and
  419. // get the working set mutex so virtual address descriptors can
  420. // be inserted and walked. Block APCs so an APC which takes a page
  421. // fault does not corrupt various structures.
  422. //
  423. QuotaCharge = 0;
  424. if ((CapturedBase == NULL) || (AllocationType & MEM_RESERVE)) {
  425. //
  426. // PAGE_WRITECOPY is not valid for private pages.
  427. //
  428. if ((Protect & PAGE_WRITECOPY) ||
  429. (Protect & PAGE_EXECUTE_WRITECOPY)) {
  430. Status = STATUS_INVALID_PAGE_PROTECTION;
  431. goto ErrorReturn1;
  432. }
  433. Alignment = X64K;
  434. //
  435. // Reserve the address space.
  436. //
  437. if (CapturedBase == NULL) {
  438. //
  439. // No base address was specified. This MUST be a reserve or
  440. // reserve and commit.
  441. //
  442. CapturedRegionSize = ROUND_TO_PAGES (CapturedRegionSize);
  443. //
  444. // If the number of zero bits is greater than zero, then calculate
  445. // the highest address.
  446. //
  447. if (ZeroBits != 0) {
  448. TopAddress = (PVOID)(((ULONG_PTR)MM_USER_ADDRESS_RANGE_LIMIT) >> ZeroBits);
  449. //
  450. // Keep the top address below the highest user vad address
  451. // regardless.
  452. //
  453. if (TopAddress > MM_HIGHEST_VAD_ADDRESS) {
  454. Status = STATUS_INVALID_PARAMETER_3;
  455. goto ErrorReturn1;
  456. }
  457. }
  458. else {
  459. TopAddress = (PVOID)MM_HIGHEST_VAD_ADDRESS;
  460. }
  461. //
  462. // Check whether the registry indicates that all applications
  463. // should be given virtual address ranges from the highest
  464. // address downwards in order to test 3GB-aware apps on 32-bit
  465. // machines and 64-bit apps on NT64.
  466. //
  467. if (Process->VmTopDown == 1) {
  468. AllocationType |= MEM_TOP_DOWN;
  469. }
  470. //
  471. // Note this calculation assumes the starting address will be
  472. // allocated on at least a page boundary.
  473. //
  474. NumberOfPages = BYTES_TO_PAGES (CapturedRegionSize);
  475. SATISFY_OVERZEALOUS_COMPILER (StartingAddress = NULL);
  476. SATISFY_OVERZEALOUS_COMPILER (EndingAddress = NULL);
  477. if (AllocationType & MEM_LARGE_PAGES) {
  478. #ifdef _X86_
  479. if ((KeFeatureBits & KF_LARGE_PAGE) == 0) {
  480. Status = STATUS_NOT_SUPPORTED;
  481. goto ErrorReturn1;
  482. }
  483. #endif
  484. //
  485. // Ensure the region size meets minimum size and alignment.
  486. //
  487. ASSERT (MM_MINIMUM_VA_FOR_LARGE_PAGE >= X64K);
  488. //
  489. // Ensure the size is a multiple of the minimum large page size.
  490. //
  491. if (CapturedRegionSize % MM_MINIMUM_VA_FOR_LARGE_PAGE) {
  492. Status = STATUS_INVALID_PARAMETER_4;
  493. goto ErrorReturn1;
  494. }
  495. //
  496. // Align the starting address to a natural boundary.
  497. //
  498. Alignment = MM_MINIMUM_VA_FOR_LARGE_PAGE;
  499. }
  500. }
  501. else {
  502. //
  503. // A non-NULL base address was specified. Check to make sure
  504. // the specified base address to ending address is currently
  505. // unused.
  506. //
  507. EndingAddress = (PVOID)(((ULONG_PTR)CapturedBase +
  508. CapturedRegionSize - 1L) | (PAGE_SIZE - 1L));
  509. if (AllocationType & MEM_LARGE_PAGES) {
  510. #ifdef _X86_
  511. if ((KeFeatureBits & KF_LARGE_PAGE) == 0) {
  512. Status = STATUS_NOT_SUPPORTED;
  513. goto ErrorReturn1;
  514. }
  515. #endif
  516. //
  517. // Ensure the region size meets minimum size and alignment.
  518. //
  519. ASSERT (MM_MINIMUM_VA_FOR_LARGE_PAGE >= X64K);
  520. //
  521. // Ensure the size is a multiple of the minimum large page size.
  522. //
  523. if (CapturedRegionSize % MM_MINIMUM_VA_FOR_LARGE_PAGE) {
  524. Status = STATUS_INVALID_PARAMETER_4;
  525. goto ErrorReturn1;
  526. }
  527. //
  528. // Align the starting address to a natural boundary.
  529. //
  530. Alignment = MM_MINIMUM_VA_FOR_LARGE_PAGE;
  531. StartingAddress = (PVOID) MI_ALIGN_TO_SIZE (CapturedBase, Alignment);
  532. }
  533. else {
  534. //
  535. // Align the starting address on a 64k boundary.
  536. //
  537. StartingAddress = (PVOID)MI_64K_ALIGN (CapturedBase);
  538. }
  539. NumberOfPages = BYTES_TO_PAGES ((PCHAR)EndingAddress -
  540. (PCHAR)StartingAddress);
  541. SATISFY_OVERZEALOUS_COMPILER (TopAddress = NULL);
  542. }
  543. BitMapSize = 0;
  544. //
  545. // Allocate resources up front before acquiring mutexes to reduce
  546. // contention.
  547. //
  548. Vad = ExAllocatePoolWithTag (NonPagedPool, sizeof(MMVAD_SHORT), 'SdaV');
  549. if (Vad == NULL) {
  550. Status = STATUS_INSUFFICIENT_RESOURCES;
  551. goto ErrorReturn1;
  552. }
  553. Vad->u.LongFlags = 0;
  554. //
  555. // Calculate the page file quota for this address range.
  556. //
  557. if (AllocationType & MEM_COMMIT) {
  558. QuotaCharge = NumberOfPages;
  559. Vad->u.VadFlags.MemCommit = 1;
  560. }
  561. if (AllocationType & MEM_PHYSICAL) {
  562. Vad->u.VadFlags.UserPhysicalPages = 1;
  563. }
  564. else if (AllocationType & MEM_LARGE_PAGES) {
  565. Vad->u.VadFlags.LargePages = 1;
  566. }
  567. Vad->u.VadFlags.Protection = ProtectionMask;
  568. Vad->u.VadFlags.PrivateMemory = 1;
  569. Vad->u.VadFlags.CommitCharge = QuotaCharge;
  570. SATISFY_OVERZEALOUS_COMPILER (BitMap = NULL);
  571. SATISFY_OVERZEALOUS_COMPILER (PhysicalView = NULL);
  572. if (AllocationType & (MEM_PHYSICAL | MEM_LARGE_PAGES)) {
  573. if (AllocationType & MEM_WRITE_WATCH) {
  574. ExFreePool (Vad);
  575. Status = STATUS_INVALID_PARAMETER_5;
  576. goto ErrorReturn1;
  577. }
  578. if ((Process->AweInfo == NULL) && (MiAllocateAweInfo () == NULL)) {
  579. ExFreePool (Vad);
  580. Status = STATUS_INSUFFICIENT_RESOURCES;
  581. goto ErrorReturn1;
  582. }
  583. PhysicalView = (PMI_PHYSICAL_VIEW) ExAllocatePoolWithTag (
  584. NonPagedPool,
  585. sizeof(MI_PHYSICAL_VIEW),
  586. MI_PHYSICAL_VIEW_KEY);
  587. if (PhysicalView == NULL) {
  588. ExFreePool (Vad);
  589. Status = STATUS_INSUFFICIENT_RESOURCES;
  590. goto ErrorReturn1;
  591. }
  592. PhysicalView->Vad = Vad;
  593. if (AllocationType & MEM_PHYSICAL) {
  594. PhysicalView->u.LongFlags = MI_PHYSICAL_VIEW_AWE;
  595. }
  596. else {
  597. PhysicalView->u.LongFlags = MI_PHYSICAL_VIEW_LARGE;
  598. }
  599. }
  600. else if (AllocationType & MEM_WRITE_WATCH) {
  601. ASSERT (AllocationType & MEM_RESERVE);
  602. #if defined (_WIN64)
  603. if (NumberOfPages >= _4gb) {
  604. //
  605. // The bitmap package only handles 32 bits.
  606. //
  607. ExFreePool (Vad);
  608. Status = STATUS_INSUFFICIENT_RESOURCES;
  609. goto ErrorReturn1;
  610. }
  611. #endif
  612. PhysicalVadRoot = Process->PhysicalVadRoot;
  613. //
  614. // The address space mutex synchronizes the allocation of the
  615. // EPROCESS PhysicalVadRoot. This table root is not deleted until
  616. // the process exits.
  617. //
  618. if (Process->PhysicalVadRoot == NULL) {
  619. PhysicalVadRoot = (PMM_AVL_TABLE) ExAllocatePoolWithTag (
  620. NonPagedPool,
  621. sizeof (MM_AVL_TABLE),
  622. MI_PHYSICAL_VIEW_ROOT_KEY);
  623. if (PhysicalVadRoot == NULL) {
  624. ExFreePool (Vad);
  625. Status = STATUS_INSUFFICIENT_RESOURCES;
  626. goto ErrorReturn1;
  627. }
  628. RtlZeroMemory (PhysicalVadRoot, sizeof (MM_AVL_TABLE));
  629. ASSERT (PhysicalVadRoot->NumberGenericTableElements == 0);
  630. PhysicalVadRoot->BalancedRoot.u1.Parent = &PhysicalVadRoot->BalancedRoot;
  631. LOCK_ADDRESS_SPACE (Process);
  632. if (Process->PhysicalVadRoot == NULL) {
  633. MiInsertPhysicalVadRoot (Process, PhysicalVadRoot);
  634. UNLOCK_ADDRESS_SPACE (Process);
  635. }
  636. else {
  637. UNLOCK_ADDRESS_SPACE (Process);
  638. ExFreePool (PhysicalVadRoot);
  639. }
  640. }
  641. BitMapBits = (ULONG)NumberOfPages;
  642. BitMapSize = sizeof(RTL_BITMAP) + (ULONG)(((BitMapBits + 31) / 32) * 4);
  643. BitMap = ExAllocatePoolWithTag (NonPagedPool, BitMapSize, 'wwmM');
  644. if (BitMap == NULL) {
  645. ExFreePool (Vad);
  646. Status = STATUS_INSUFFICIENT_RESOURCES;
  647. goto ErrorReturn1;
  648. }
  649. //
  650. // Charge quota for the nonpaged pool for the bitmap. This is
  651. // done here rather than by using ExAllocatePoolWithQuota
  652. // so the process object is not referenced by the quota charge.
  653. //
  654. Status = PsChargeProcessNonPagedPoolQuota (Process,
  655. BitMapSize);
  656. if (!NT_SUCCESS(Status)) {
  657. ExFreePool (Vad);
  658. ExFreePool (BitMap);
  659. goto ErrorReturn1;
  660. }
  661. PhysicalView = (PMI_PHYSICAL_VIEW) ExAllocatePoolWithTag (
  662. NonPagedPool,
  663. sizeof(MI_PHYSICAL_VIEW),
  664. MI_WRITEWATCH_VIEW_KEY);
  665. if (PhysicalView == NULL) {
  666. ExFreePool (Vad);
  667. ExFreePool (BitMap);
  668. PsReturnProcessNonPagedPoolQuota (Process, BitMapSize);
  669. Status = STATUS_INSUFFICIENT_RESOURCES;
  670. goto ErrorReturn1;
  671. }
  672. RtlInitializeBitMap (BitMap, (PULONG)(BitMap + 1), BitMapBits);
  673. RtlClearAllBits (BitMap);
  674. PhysicalView->Vad = Vad;
  675. PhysicalView->u.BitMap = BitMap;
  676. Vad->u.VadFlags.WriteWatch = 1;
  677. }
  678. //
  679. // Now acquire mutexes, check ranges and insert.
  680. //
  681. LOCK_ADDRESS_SPACE (Process);
  682. //
  683. // Make sure the address space was not deleted, if so,
  684. // return an error.
  685. //
  686. if (Process->Flags & PS_PROCESS_FLAGS_VM_DELETED) {
  687. Status = STATUS_PROCESS_IS_TERMINATING;
  688. goto ErrorReleaseVad;
  689. }
  690. //
  691. // Find a (or validate the) starting address.
  692. //
  693. if (CapturedBase == NULL) {
  694. if (AllocationType & MEM_TOP_DOWN) {
  695. //
  696. // Start from the top of memory downward.
  697. //
  698. Status = MiFindEmptyAddressRangeDown (&Process->VadRoot,
  699. CapturedRegionSize,
  700. TopAddress,
  701. Alignment,
  702. &StartingAddress);
  703. }
  704. else {
  705. Status = MiFindEmptyAddressRange (CapturedRegionSize,
  706. Alignment,
  707. (ULONG)ZeroBits,
  708. &StartingAddress);
  709. }
  710. if (!NT_SUCCESS (Status)) {
  711. goto ErrorReleaseVad;
  712. }
  713. //
  714. // Calculate the ending address based on the top address.
  715. //
  716. EndingAddress = (PVOID)(((ULONG_PTR)StartingAddress +
  717. CapturedRegionSize - 1L) | (PAGE_SIZE - 1L));
  718. if (EndingAddress > TopAddress) {
  719. //
  720. // The allocation does not honor the zero bits argument.
  721. //
  722. Status = STATUS_NO_MEMORY;
  723. goto ErrorReleaseVad;
  724. }
  725. }
  726. else {
  727. //
  728. // See if a VAD overlaps with this starting/ending address pair.
  729. //
  730. if (MiCheckForConflictingVadExistence (Process, StartingAddress, EndingAddress) == TRUE) {
  731. Status = STATUS_CONFLICTING_ADDRESSES;
  732. goto ErrorReleaseVad;
  733. }
  734. }
  735. //
  736. // An unoccupied address range has been found, finish initializing
  737. // the virtual address descriptor to describe this range, then
  738. // insert it into the tree.
  739. //
  740. Vad->StartingVpn = MI_VA_TO_VPN (StartingAddress);
  741. Vad->EndingVpn = MI_VA_TO_VPN (EndingAddress);
  742. LOCK_WS_UNSAFE (Process);
  743. Status = MiInsertVad (Vad);
  744. if (!NT_SUCCESS(Status)) {
  745. UNLOCK_WS_UNSAFE (Process);
  746. ErrorReleaseVad:
  747. //
  748. // The quota charge in InsertVad failed, deallocate the pool
  749. // and return an error.
  750. //
  751. UNLOCK_ADDRESS_SPACE (Process);
  752. ExFreePool (Vad);
  753. if (AllocationType & (MEM_PHYSICAL | MEM_LARGE_PAGES)) {
  754. ExFreePool (PhysicalView);
  755. }
  756. else if (BitMapSize != 0) {
  757. ExFreePool (PhysicalView);
  758. ExFreePool (BitMap);
  759. PsReturnProcessNonPagedPoolQuota (Process, BitMapSize);
  760. }
  761. goto ErrorReturn1;
  762. }
  763. //
  764. // Initialize page directory and table pages for the physical range.
  765. //
  766. if (AllocationType & (MEM_PHYSICAL | MEM_LARGE_PAGES)) {
  767. if (AllocationType & MEM_LARGE_PAGES) {
  768. //
  769. // Temporarily make the VAD protection no access. This allows
  770. // us to safely release the working set mutex while trying to
  771. // find contiguous memory to fill the large page range.
  772. // If another thread tries to access the large page VA range
  773. // before we find (and insert) a contiguous chunk, the thread
  774. // will get an AV.
  775. //
  776. Vad->u.VadFlags.Protection = MM_NOACCESS;
  777. ASSERT (((ULONG_PTR)StartingAddress % MM_MINIMUM_VA_FOR_LARGE_PAGE) == 0);
  778. UNLOCK_WS_UNSAFE (Process);
  779. Status = MiAllocateLargePages (StartingAddress,
  780. EndingAddress);
  781. //
  782. // Restore the correct protection.
  783. //
  784. LOCK_WS_UNSAFE (Process);
  785. Vad->u.VadFlags.Protection = ProtectionMask;
  786. }
  787. else if (MiCreatePageTablesForPhysicalRange (Process,
  788. StartingAddress,
  789. EndingAddress) == FALSE) {
  790. Status = STATUS_INSUFFICIENT_RESOURCES;
  791. }
  792. if (!NT_SUCCESS (Status)) {
  793. ASSERT (!NT_SUCCESS (Status));
  794. PreviousVad = MiGetPreviousVad (Vad);
  795. NextVad = MiGetNextVad (Vad);
  796. MiRemoveVad (Vad);
  797. //
  798. // Return commitment for page table pages if possible.
  799. //
  800. MiReturnPageTablePageCommitment (StartingAddress,
  801. EndingAddress,
  802. Process,
  803. PreviousVad,
  804. NextVad);
  805. UNLOCK_WS_AND_ADDRESS_SPACE (Process);
  806. ExFreePool (Vad);
  807. ExFreePool (PhysicalView);
  808. goto ErrorReturn1;
  809. }
  810. PhysicalView->StartingVpn = Vad->StartingVpn;
  811. PhysicalView->EndingVpn = Vad->EndingVpn;
  812. //
  813. // Insert the physical view into this process' list using a
  814. // nonpaged wrapper since the PFN lock is required.
  815. //
  816. MiAweViewInserter (Process, PhysicalView);
  817. }
  818. else if (BitMapSize != 0) {
  819. PhysicalView->StartingVpn = Vad->StartingVpn;
  820. PhysicalView->EndingVpn = Vad->EndingVpn;
  821. MiPhysicalViewInserter (Process, PhysicalView);
  822. }
  823. //
  824. // Unlock the working set lock, page faults can now be taken.
  825. //
  826. UNLOCK_WS_UNSAFE (Process);
  827. //
  828. // Update the current virtual size in the process header, the
  829. // address space lock protects this operation.
  830. //
  831. CapturedRegionSize = (PCHAR)EndingAddress - (PCHAR)StartingAddress + 1L;
  832. Process->VirtualSize += CapturedRegionSize;
  833. if (Process->VirtualSize > Process->PeakVirtualSize) {
  834. Process->PeakVirtualSize = Process->VirtualSize;
  835. }
  836. #if defined(_MIALT4K_)
  837. if (Process->Wow64Process != NULL) {
  838. if (OriginalBase == NULL) {
  839. OriginalRegionSize = ROUND_TO_4K_PAGES(OriginalRegionSize);
  840. EndingAddress = (PVOID)(((ULONG_PTR) StartingAddress +
  841. OriginalRegionSize - 1L) | (PAGE_4K - 1L));
  842. }
  843. else {
  844. EndingAddress = (PVOID)(((ULONG_PTR)OriginalBase +
  845. OriginalRegionSize - 1L) | (PAGE_4K - 1L));
  846. }
  847. CapturedRegionSize = (PCHAR)EndingAddress - (PCHAR)StartingAddress + 1L;
  848. //
  849. // Set the alternate permission table
  850. //
  851. AltFlags = (AllocationType & MEM_COMMIT) ? ALT_COMMIT : 0;
  852. MiProtectFor4kPage (StartingAddress,
  853. CapturedRegionSize,
  854. ProtectionMask,
  855. ALT_ALLOCATE|AltFlags,
  856. Process);
  857. }
  858. #endif
  859. //
  860. // Release the address space lock, lower IRQL, detach, and dereference
  861. // the process object.
  862. //
  863. UNLOCK_ADDRESS_SPACE(Process);
  864. if (Attached == TRUE) {
  865. KeUnstackDetachProcess (&ApcState);
  866. }
  867. if (ProcessHandle != NtCurrentProcess()) {
  868. ObDereferenceObject (Process);
  869. }
  870. //
  871. // Establish an exception handler and write the size and base
  872. // address.
  873. //
  874. try {
  875. *RegionSize = CapturedRegionSize;
  876. *BaseAddress = StartingAddress;
  877. } except (EXCEPTION_EXECUTE_HANDLER) {
  878. //
  879. // Return success at this point even if the results
  880. // cannot be written.
  881. //
  882. NOTHING;
  883. }
  884. return STATUS_SUCCESS;
  885. }
  886. //
  887. // Commit previously reserved pages. Note that these pages could
  888. // be either private or a section.
  889. //
  890. if (AllocationType == MEM_RESET) {
  891. //
  892. // Round up to page boundaries so good data is not reset.
  893. //
  894. EndingAddress = (PVOID)((ULONG_PTR)PAGE_ALIGN ((ULONG_PTR)CapturedBase +
  895. CapturedRegionSize) - 1);
  896. StartingAddress = (PVOID)PAGE_ALIGN((PUCHAR)CapturedBase + PAGE_SIZE - 1);
  897. if (StartingAddress > EndingAddress) {
  898. Status = STATUS_CONFLICTING_ADDRESSES;
  899. goto ErrorReturn1;
  900. }
  901. }
  902. else {
  903. EndingAddress = (PVOID)(((ULONG_PTR)CapturedBase +
  904. CapturedRegionSize - 1) | (PAGE_SIZE - 1));
  905. StartingAddress = (PVOID)PAGE_ALIGN(CapturedBase);
  906. }
  907. CapturedRegionSize = (PCHAR)EndingAddress - (PCHAR)StartingAddress + 1;
  908. LOCK_ADDRESS_SPACE (Process);
  909. //
  910. // Make sure the address space was not deleted, if so,
  911. // return an error.
  912. //
  913. if (Process->Flags & PS_PROCESS_FLAGS_VM_DELETED) {
  914. Status = STATUS_PROCESS_IS_TERMINATING;
  915. goto ErrorReturn0;
  916. }
  917. FoundVad = MiCheckForConflictingVad (Process, StartingAddress, EndingAddress);
  918. if (FoundVad == NULL) {
  919. //
  920. // No virtual address is reserved at the specified base address,
  921. // return an error.
  922. //
  923. Status = STATUS_CONFLICTING_ADDRESSES;
  924. goto ErrorReturn0;
  925. }
  926. if ((FoundVad->u.VadFlags.UserPhysicalPages == 1) ||
  927. (FoundVad->u.VadFlags.LargePages == 1)) {
  928. Status = STATUS_CONFLICTING_ADDRESSES;
  929. goto ErrorReturn0;
  930. }
  931. //
  932. // Ensure that the starting and ending addresses are all within
  933. // the same virtual address descriptor.
  934. //
  935. if ((MI_VA_TO_VPN (StartingAddress) < FoundVad->StartingVpn) ||
  936. (MI_VA_TO_VPN (EndingAddress) > FoundVad->EndingVpn)) {
  937. //
  938. // Not within the section virtual address descriptor,
  939. // return an error.
  940. //
  941. Status = STATUS_CONFLICTING_ADDRESSES;
  942. goto ErrorReturn0;
  943. }
  944. if (FoundVad->u.VadFlags.CommitCharge == MM_MAX_COMMIT) {
  945. //
  946. // This is a special VAD, don't let any commits occur.
  947. //
  948. Status = STATUS_CONFLICTING_ADDRESSES;
  949. goto ErrorReturn0;
  950. }
  951. #if defined(_MIALT4K_)
  952. WowProcess = Process->Wow64Process;
  953. OriginalProtectionMask = 0;
  954. if (WowProcess != NULL) {
  955. OriginalProtectionMask = MiMakeProtectionMask (Protect);
  956. if (OriginalProtectionMask == MM_INVALID_PROTECTION) {
  957. Status = STATUS_INVALID_PAGE_PROTECTION;
  958. goto ErrorReturn0;
  959. }
  960. if (StartingAddress >= MmWorkingSetList->HighestUserAddress) {
  961. Status = STATUS_CONFLICTING_ADDRESSES;
  962. goto ErrorReturn0;
  963. }
  964. //
  965. // If protection changes on this region are allowed then proceed.
  966. //
  967. if (FoundVad->u.VadFlags.NoChange == 0) {
  968. NativePageProtection = MiMakeProtectForNativePage (StartingAddress,
  969. Protect,
  970. Process);
  971. ProtectionMask = MiMakeProtectionMask (NativePageProtection);
  972. if (ProtectionMask == MM_INVALID_PROTECTION) {
  973. Status = STATUS_INVALID_PAGE_PROTECTION;
  974. goto ErrorReturn0;
  975. }
  976. }
  977. }
  978. #endif
  979. if (AllocationType == MEM_RESET) {
  980. Status = MiResetVirtualMemory (StartingAddress,
  981. EndingAddress,
  982. FoundVad,
  983. Process);
  984. UNLOCK_ADDRESS_SPACE (Process);
  985. goto done;
  986. }
  987. if (FoundVad->u.VadFlags.PrivateMemory == 0) {
  988. Status = STATUS_SUCCESS;
  989. //
  990. // The no cache option is not allowed for sections.
  991. //
  992. if (Protect & PAGE_NOCACHE) {
  993. Status = STATUS_INVALID_PAGE_PROTECTION;
  994. goto ErrorReturn0;
  995. }
  996. if (FoundVad->u.VadFlags.NoChange == 1) {
  997. //
  998. // An attempt is made at changing the protection
  999. // of a SEC_NO_CHANGE section.
  1000. //
  1001. Status = MiCheckSecuredVad (FoundVad,
  1002. CapturedBase,
  1003. CapturedRegionSize,
  1004. ProtectionMask);
  1005. if (!NT_SUCCESS (Status)) {
  1006. goto ErrorReturn0;
  1007. }
  1008. }
  1009. if (FoundVad->ControlArea->FilePointer != NULL) {
  1010. if (FoundVad->u2.VadFlags2.ExtendableFile == 0) {
  1011. //
  1012. // Only page file backed sections can be committed.
  1013. //
  1014. Status = STATUS_ALREADY_COMMITTED;
  1015. goto ErrorReturn0;
  1016. }
  1017. //
  1018. // Commit the requested portions of the extendable file.
  1019. //
  1020. RtlZeroMemory (&Section, sizeof(SECTION));
  1021. ControlArea = FoundVad->ControlArea;
  1022. Section.Segment = ControlArea->Segment;
  1023. Section.u.LongFlags = ControlArea->u.LongFlags;
  1024. Section.InitialPageProtection = PAGE_READWRITE;
  1025. NewSize.QuadPart = FoundVad->u2.VadFlags2.FileOffset;
  1026. NewSize.QuadPart = NewSize.QuadPart << 16;
  1027. NewSize.QuadPart += 1 +
  1028. ((PCHAR)EndingAddress - (PCHAR)MI_VPN_TO_VA (FoundVad->StartingVpn));
  1029. //
  1030. // The working set and address space mutexes must be
  1031. // released prior to calling MmExtendSection otherwise
  1032. // a deadlock with the filesystem can occur.
  1033. //
  1034. // Prevent the control area from being deleted while
  1035. // the (potential) extension is ongoing.
  1036. //
  1037. MiFlushAcquire (ControlArea);
  1038. UNLOCK_ADDRESS_SPACE (Process);
  1039. Status = MmExtendSection (&Section, &NewSize, FALSE);
  1040. MiFlushRelease (ControlArea);
  1041. if (NT_SUCCESS(Status)) {
  1042. LOCK_ADDRESS_SPACE (Process);
  1043. //
  1044. // The Vad and/or the control area may have been changed
  1045. // or deleted before the mutexes were regained above.
  1046. // So everything must be revalidated. Note that
  1047. // if anything has changed, success is silently
  1048. // returned just as if the protection change had failed.
  1049. // It is the caller's fault if any of these has gone
  1050. // away and they will suffer.
  1051. //
  1052. if (Process->Flags & PS_PROCESS_FLAGS_VM_DELETED) {
  1053. // Status = STATUS_PROCESS_IS_TERMINATING;
  1054. goto ErrorReturn0;
  1055. }
  1056. FoundVad = MiCheckForConflictingVad (Process,
  1057. StartingAddress,
  1058. EndingAddress);
  1059. if (FoundVad == NULL) {
  1060. //
  1061. // No virtual address is reserved at the specified
  1062. // base address, return an error.
  1063. //
  1064. // Status = STATUS_CONFLICTING_ADDRESSES;
  1065. goto ErrorReturn0;
  1066. }
  1067. if (ControlArea != FoundVad->ControlArea) {
  1068. goto ErrorReturn0;
  1069. }
  1070. if ((FoundVad->u.VadFlags.UserPhysicalPages == 1) ||
  1071. (FoundVad->u.VadFlags.LargePages == 1)) {
  1072. // Status = STATUS_CONFLICTING_ADDRESSES;
  1073. goto ErrorReturn0;
  1074. }
  1075. if (FoundVad->u.VadFlags.CommitCharge == MM_MAX_COMMIT) {
  1076. //
  1077. // This is a special VAD, no commits are allowed.
  1078. //
  1079. // Status = STATUS_CONFLICTING_ADDRESSES;
  1080. goto ErrorReturn0;
  1081. }
  1082. //
  1083. // Ensure that the starting and ending addresses are
  1084. // all within the same virtual address descriptor.
  1085. //
  1086. if ((MI_VA_TO_VPN (StartingAddress) < FoundVad->StartingVpn) ||
  1087. (MI_VA_TO_VPN (EndingAddress) > FoundVad->EndingVpn)) {
  1088. //
  1089. // Not within the section virtual address
  1090. // descriptor, return an error.
  1091. //
  1092. // Status = STATUS_CONFLICTING_ADDRESSES;
  1093. goto ErrorReturn0;
  1094. }
  1095. if (FoundVad->u.VadFlags.NoChange == 1) {
  1096. //
  1097. // An attempt is made at changing the protection
  1098. // of a SEC_NO_CHANGE section.
  1099. //
  1100. NTSTATUS Status2;
  1101. Status2 = MiCheckSecuredVad (FoundVad,
  1102. CapturedBase,
  1103. CapturedRegionSize,
  1104. ProtectionMask);
  1105. if (!NT_SUCCESS (Status2)) {
  1106. goto ErrorReturn0;
  1107. }
  1108. }
  1109. if (FoundVad->ControlArea->FilePointer == NULL) {
  1110. goto ErrorReturn0;
  1111. }
  1112. if (FoundVad->u2.VadFlags2.ExtendableFile == 0) {
  1113. goto ErrorReturn0;
  1114. }
  1115. #if defined(_MIALT4K_)
  1116. if (WowProcess != NULL) {
  1117. StartingAddressFor4k = (PVOID)PAGE_4K_ALIGN(OriginalBase);
  1118. EndingAddressFor4k = (PVOID)(((ULONG_PTR)OriginalBase +
  1119. OriginalRegionSize - 1) | (PAGE_4K - 1));
  1120. CapturedRegionSizeFor4k = (ULONG_PTR)EndingAddressFor4k -
  1121. (ULONG_PTR)StartingAddressFor4k + 1L;
  1122. if ((FoundVad->u.VadFlags.ImageMap == 1) ||
  1123. (FoundVad->u2.VadFlags2.CopyOnWrite == 1)) {
  1124. //
  1125. // Only set the MM_PROTECTION_COPY_MASK if the new protection includes
  1126. // MM_PROTECTION_WRITE_MASK, otherwise, it will be considered as MM_READ
  1127. // inside MiProtectFor4kPage().
  1128. //
  1129. if ((OriginalProtectionMask & MM_PROTECTION_WRITE_MASK) == MM_PROTECTION_WRITE_MASK) {
  1130. OriginalProtectionMask |= MM_PROTECTION_COPY_MASK;
  1131. }
  1132. }
  1133. MiProtectFor4kPage (StartingAddressFor4k,
  1134. CapturedRegionSizeFor4k,
  1135. OriginalProtectionMask,
  1136. ALT_COMMIT,
  1137. Process);
  1138. }
  1139. #endif
  1140. MiSetProtectionOnSection (Process,
  1141. FoundVad,
  1142. StartingAddress,
  1143. EndingAddress,
  1144. Protect,
  1145. &OldProtect,
  1146. TRUE,
  1147. &Locked);
  1148. //
  1149. // *** WARNING ***
  1150. //
  1151. // The alternate PTE support routines called by
  1152. // MiSetProtectionOnSection may have deleted the old (small)
  1153. // VAD and replaced it with a different (large) VAD - if so,
  1154. // the old VAD is freed and cannot be referenced.
  1155. //
  1156. UNLOCK_ADDRESS_SPACE (Process);
  1157. }
  1158. goto ErrorReturn1;
  1159. }
  1160. StartingPte = MiGetProtoPteAddress (FoundVad,
  1161. MI_VA_TO_VPN(StartingAddress));
  1162. LastPte = MiGetProtoPteAddress (FoundVad,
  1163. MI_VA_TO_VPN(EndingAddress));
  1164. #if 0
  1165. if (AllocationType & MEM_CHECK_COMMIT_STATE) {
  1166. //
  1167. // Make sure none of the pages are already committed.
  1168. //
  1169. KeAcquireGuardedMutexUnsafe (&MmSectionCommitMutex);
  1170. PointerPte = StartingPte;
  1171. while (PointerPte <= LastPte) {
  1172. //
  1173. // Check to see if the prototype PTE is committed.
  1174. // Note that prototype PTEs cannot be decommitted so
  1175. // the PTEs only need to be checked for zeroes.
  1176. //
  1177. if (PointerPte->u.Long != 0) {
  1178. KeReleaseGuardedMutexUnsafe (&MmSectionCommitMutex);
  1179. UNLOCK_ADDRESS_SPACE (Process);
  1180. Status = STATUS_ALREADY_COMMITTED;
  1181. goto ErrorReturn1;
  1182. }
  1183. PointerPte += 1;
  1184. }
  1185. KeReleaseGuardedMutexUnsafe (&MmSectionCommitMutex);
  1186. }
  1187. #endif //0
  1188. //
  1189. // Check to ensure these pages can be committed if this
  1190. // is a page file backed segment. Note that page file quota
  1191. // has already been charged for this.
  1192. //
  1193. PointerPte = StartingPte;
  1194. QuotaCharge = 1 + LastPte - StartingPte;
  1195. CopyOnWriteCharge = 0;
  1196. if (MI_IS_PTE_PROTECTION_COPY_WRITE(ProtectionMask)) {
  1197. //
  1198. // If the protection is copy on write, charge for
  1199. // the copy on writes.
  1200. //
  1201. CopyOnWriteCharge = QuotaCharge;
  1202. }
  1203. //
  1204. // Charge commitment for the range.
  1205. //
  1206. ChargedExactQuota = FALSE;
  1207. ChargedJobCommit = FALSE;
  1208. if (CopyOnWriteCharge != 0) {
  1209. Status = PsChargeProcessPageFileQuota (Process, CopyOnWriteCharge);
  1210. if (!NT_SUCCESS (Status)) {
  1211. UNLOCK_ADDRESS_SPACE (Process);
  1212. goto ErrorReturn1;
  1213. }
  1214. //
  1215. // Note this job charging is unusual because it is not
  1216. // followed by an immediate process charge.
  1217. //
  1218. if (Process->CommitChargeLimit) {
  1219. if (Process->CommitCharge + CopyOnWriteCharge > Process->CommitChargeLimit) {
  1220. if (Process->Job) {
  1221. PsReportProcessMemoryLimitViolation ();
  1222. }
  1223. UNLOCK_ADDRESS_SPACE (Process);
  1224. PsReturnProcessPageFileQuota (Process, CopyOnWriteCharge);
  1225. Status = STATUS_COMMITMENT_LIMIT;
  1226. goto ErrorReturn1;
  1227. }
  1228. }
  1229. if (Process->JobStatus & PS_JOB_STATUS_REPORT_COMMIT_CHANGES) {
  1230. if (PsChangeJobMemoryUsage(PS_JOB_STATUS_REPORT_COMMIT_CHANGES, CopyOnWriteCharge) == FALSE) {
  1231. UNLOCK_ADDRESS_SPACE (Process);
  1232. PsReturnProcessPageFileQuota (Process, CopyOnWriteCharge);
  1233. Status = STATUS_COMMITMENT_LIMIT;
  1234. goto ErrorReturn1;
  1235. }
  1236. ChargedJobCommit = TRUE;
  1237. }
  1238. }
  1239. do {
  1240. if (MiChargeCommitment (QuotaCharge + CopyOnWriteCharge, NULL) == TRUE) {
  1241. break;
  1242. }
  1243. //
  1244. // Reduce the charge we are asking for if possible.
  1245. //
  1246. if (ChargedExactQuota == TRUE) {
  1247. //
  1248. // We have already tried for the precise charge,
  1249. // so just return an error.
  1250. //
  1251. KeReleaseGuardedMutexUnsafe (&MmSectionCommitMutex);
  1252. if (CopyOnWriteCharge != 0) {
  1253. if (ChargedJobCommit == TRUE) {
  1254. PsChangeJobMemoryUsage (PS_JOB_STATUS_REPORT_COMMIT_CHANGES, -(SSIZE_T)CopyOnWriteCharge);
  1255. }
  1256. UNLOCK_ADDRESS_SPACE (Process);
  1257. PsReturnProcessPageFileQuota (Process, CopyOnWriteCharge);
  1258. }
  1259. else {
  1260. UNLOCK_ADDRESS_SPACE (Process);
  1261. }
  1262. Status = STATUS_COMMITMENT_LIMIT;
  1263. goto ErrorReturn1;
  1264. }
  1265. //
  1266. // The commitment charging of quota failed, calculate the
  1267. // exact quota taking into account pages that may already be
  1268. // committed and retry the operation.
  1269. //
  1270. KeAcquireGuardedMutexUnsafe (&MmSectionCommitMutex);
  1271. while (PointerPte <= LastPte) {
  1272. //
  1273. // Check to see if the prototype PTE is committed.
  1274. // Note that prototype PTEs cannot be decommitted so
  1275. // PTEs only need to be checked for zeroes.
  1276. //
  1277. if (PointerPte->u.Long != 0) {
  1278. QuotaCharge -= 1;
  1279. }
  1280. PointerPte += 1;
  1281. }
  1282. PointerPte = StartingPte;
  1283. ChargedExactQuota = TRUE;
  1284. //
  1285. // If the entire range is committed then there's nothing to charge.
  1286. //
  1287. if (QuotaCharge + CopyOnWriteCharge == 0) {
  1288. KeReleaseGuardedMutexUnsafe (&MmSectionCommitMutex);
  1289. QuotaFree = 0;
  1290. goto FinishedCharging;
  1291. }
  1292. } while (TRUE);
  1293. if (ChargedExactQuota == FALSE) {
  1294. KeAcquireGuardedMutexUnsafe (&MmSectionCommitMutex);
  1295. }
  1296. //
  1297. // Commit all the pages.
  1298. //
  1299. Segment = FoundVad->ControlArea->Segment;
  1300. TempPte = Segment->SegmentPteTemplate;
  1301. ASSERT (TempPte.u.Long != 0);
  1302. QuotaFree = 0;
  1303. while (PointerPte <= LastPte) {
  1304. if (PointerPte->u.Long != 0) {
  1305. //
  1306. // Page is already committed, back out commitment.
  1307. //
  1308. QuotaFree += 1;
  1309. }
  1310. else {
  1311. MI_WRITE_INVALID_PTE (PointerPte, TempPte);
  1312. }
  1313. PointerPte += 1;
  1314. }
  1315. //
  1316. // Subtract out any excess, then update the segment charges.
  1317. // Note only segment commit is excess - process commit must
  1318. // remain fully charged.
  1319. //
  1320. if (ChargedExactQuota == FALSE) {
  1321. ASSERT (QuotaCharge >= QuotaFree);
  1322. QuotaCharge -= QuotaFree;
  1323. //
  1324. // Return the QuotaFree excess commitment after the
  1325. // mutexes are released to remove needless contention.
  1326. //
  1327. }
  1328. else {
  1329. //
  1330. // Exact quota was charged so zero this to signify
  1331. // there is no excess to return.
  1332. //
  1333. QuotaFree = 0;
  1334. }
  1335. if (QuotaCharge != 0) {
  1336. Segment->NumberOfCommittedPages += QuotaCharge;
  1337. InterlockedExchangeAddSizeT (&MmSharedCommit, QuotaCharge);
  1338. MM_TRACK_COMMIT (MM_DBG_COMMIT_ALLOCVM_SEGMENT, QuotaCharge);
  1339. }
  1340. KeReleaseGuardedMutexUnsafe (&MmSectionCommitMutex);
  1341. //
  1342. // Update the per-process charges.
  1343. //
  1344. if (CopyOnWriteCharge != 0) {
  1345. FoundVad->u.VadFlags.CommitCharge += CopyOnWriteCharge;
  1346. Process->CommitCharge += CopyOnWriteCharge;
  1347. MI_INCREMENT_TOTAL_PROCESS_COMMIT (CopyOnWriteCharge);
  1348. if (Process->CommitCharge > Process->CommitChargePeak) {
  1349. Process->CommitChargePeak = Process->CommitCharge;
  1350. }
  1351. MM_TRACK_COMMIT (MM_DBG_COMMIT_ALLOCVM_PROCESS, CopyOnWriteCharge);
  1352. }
  1353. FinishedCharging:
  1354. #if defined(_MIALT4K_)
  1355. //
  1356. // Update the alternate table before PTEs are created
  1357. // for the protection change.
  1358. //
  1359. if (WowProcess != NULL) {
  1360. StartingAddressFor4k = (PVOID)PAGE_4K_ALIGN(OriginalBase);
  1361. EndingAddressFor4k = (PVOID)(((ULONG_PTR)OriginalBase +
  1362. OriginalRegionSize - 1) | (PAGE_4K - 1));
  1363. CapturedRegionSizeFor4k = (ULONG_PTR)EndingAddressFor4k -
  1364. (ULONG_PTR)StartingAddressFor4k + 1L;
  1365. if ((FoundVad->u.VadFlags.ImageMap == 1) ||
  1366. (FoundVad->u2.VadFlags2.CopyOnWrite == 1)) {
  1367. //
  1368. // Only set the MM_PROTECTION_COPY_MASK if the new protection includes
  1369. // MM_PROTECTION_WRITE_MASK, otherwise, it will be considered as MM_READ
  1370. // inside MiProtectFor4kPage().
  1371. //
  1372. if ((OriginalProtectionMask & MM_PROTECTION_WRITE_MASK) == MM_PROTECTION_WRITE_MASK) {
  1373. OriginalProtectionMask |= MM_PROTECTION_COPY_MASK;
  1374. }
  1375. }
  1376. //
  1377. // Set the alternate permission table.
  1378. //
  1379. MiProtectFor4kPage (StartingAddressFor4k,
  1380. CapturedRegionSizeFor4k,
  1381. OriginalProtectionMask,
  1382. ALT_COMMIT,
  1383. Process);
  1384. }
  1385. else {
  1386. SATISFY_OVERZEALOUS_COMPILER (StartingAddressFor4k = NULL);
  1387. SATISFY_OVERZEALOUS_COMPILER (CapturedRegionSizeFor4k = 0);
  1388. }
  1389. #endif
  1390. //
  1391. // Change all the protections to be protected as specified.
  1392. //
  1393. MiSetProtectionOnSection (Process,
  1394. FoundVad,
  1395. StartingAddress,
  1396. EndingAddress,
  1397. Protect,
  1398. &OldProtect,
  1399. TRUE,
  1400. &Locked);
  1401. //
  1402. // *** WARNING ***
  1403. //
  1404. // The alternate PTE support routines called by
  1405. // MiSetProtectionOnSection may have deleted the old (small)
  1406. // VAD and replaced it with a different (large) VAD - if so,
  1407. // the old VAD is freed and cannot be referenced.
  1408. //
  1409. UNLOCK_ADDRESS_SPACE (Process);
  1410. //
  1411. // Return any excess segment commit that may have been charged.
  1412. //
  1413. if (QuotaFree != 0) {
  1414. MiReturnCommitment (QuotaFree);
  1415. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_ALLOCVM_SEGMENT, QuotaFree);
  1416. }
  1417. if (Attached == TRUE) {
  1418. KeUnstackDetachProcess (&ApcState);
  1419. }
  1420. if (ProcessHandle != NtCurrentProcess()) {
  1421. ObDereferenceObject (Process);
  1422. }
  1423. #if defined(_MIALT4K_)
  1424. if (WowProcess != NULL) {
  1425. CapturedRegionSize = CapturedRegionSizeFor4k;
  1426. StartingAddress = StartingAddressFor4k;
  1427. }
  1428. #endif
  1429. try {
  1430. *RegionSize = CapturedRegionSize;
  1431. *BaseAddress = StartingAddress;
  1432. } except (EXCEPTION_EXECUTE_HANDLER) {
  1433. //
  1434. // Return success at this point even if the results
  1435. // cannot be written.
  1436. //
  1437. NOTHING;
  1438. }
  1439. return STATUS_SUCCESS;
  1440. }
  1441. //
  1442. // PAGE_WRITECOPY is not valid for private pages.
  1443. //
  1444. if ((Protect & PAGE_WRITECOPY) ||
  1445. (Protect & PAGE_EXECUTE_WRITECOPY)) {
  1446. Status = STATUS_INVALID_PAGE_PROTECTION;
  1447. goto ErrorReturn0;
  1448. }
  1449. //
  1450. // Ensure none of the pages are already committed as described
  1451. // in the virtual address descriptor.
  1452. //
  1453. #if 0
  1454. if (AllocationType & MEM_CHECK_COMMIT_STATE) {
  1455. if ( !MiIsEntireRangeDecommitted(StartingAddress,
  1456. EndingAddress,
  1457. FoundVad,
  1458. Process)) {
  1459. //
  1460. // Previously reserved pages have been committed, or
  1461. // an error occurred, release mutex and return status.
  1462. //
  1463. Status = STATUS_ALREADY_COMMITTED;
  1464. goto ErrorReturn0;
  1465. }
  1466. }
  1467. #endif //0
  1468. //
  1469. // Build a demand zero PTE with the proper protection.
  1470. //
  1471. TempPte = ZeroPte;
  1472. TempPte.u.Soft.Protection = ProtectionMask;
  1473. DecommittedPte = ZeroPte;
  1474. DecommittedPte.u.Soft.Protection = MM_DECOMMIT;
  1475. if (FoundVad->u.VadFlags.MemCommit) {
  1476. CommitLimitPte = MiGetPteAddress (MI_VPN_TO_VA (FoundVad->EndingVpn));
  1477. }
  1478. else {
  1479. CommitLimitPte = NULL;
  1480. }
  1481. //
  1482. // The address range has not been committed, commit it now.
  1483. // Note that for private pages, commitment is handled by
  1484. // explicitly updating PTEs to contain Demand Zero entries.
  1485. //
  1486. PointerPde = MiGetPdeAddress (StartingAddress);
  1487. PointerPte = MiGetPteAddress (StartingAddress);
  1488. LastPte = MiGetPteAddress (EndingAddress);
  1489. //
  1490. // Check to ensure these pages can be committed.
  1491. //
  1492. QuotaCharge = 1 + LastPte - PointerPte;
  1493. //
  1494. // Charge quota and commitment for the range.
  1495. //
  1496. ChargedExactQuota = FALSE;
  1497. do {
  1498. ChargedJobCommit = FALSE;
  1499. if (Process->CommitChargeLimit) {
  1500. if (Process->CommitCharge + QuotaCharge > Process->CommitChargeLimit) {
  1501. if (Process->Job) {
  1502. PsReportProcessMemoryLimitViolation ();
  1503. }
  1504. Status = STATUS_COMMITMENT_LIMIT;
  1505. goto Failed;
  1506. }
  1507. }
  1508. if (Process->JobStatus & PS_JOB_STATUS_REPORT_COMMIT_CHANGES) {
  1509. if (PsChangeJobMemoryUsage(PS_JOB_STATUS_REPORT_COMMIT_CHANGES, QuotaCharge) == FALSE) {
  1510. Status = STATUS_COMMITMENT_LIMIT;
  1511. goto Failed;
  1512. }
  1513. ChargedJobCommit = TRUE;
  1514. }
  1515. if (MiChargeCommitment (QuotaCharge, NULL) == FALSE) {
  1516. Status = STATUS_COMMITMENT_LIMIT;
  1517. goto Failed;
  1518. }
  1519. Status = PsChargeProcessPageFileQuota (Process, QuotaCharge);
  1520. if (!NT_SUCCESS (Status)) {
  1521. MiReturnCommitment (QuotaCharge);
  1522. goto Failed;
  1523. }
  1524. MM_TRACK_COMMIT (MM_DBG_COMMIT_ALLOCVM_PROCESS2, QuotaCharge);
  1525. FoundVad->u.VadFlags.CommitCharge += QuotaCharge;
  1526. Process->CommitCharge += QuotaCharge;
  1527. MI_INCREMENT_TOTAL_PROCESS_COMMIT (QuotaCharge);
  1528. if (Process->CommitCharge > Process->CommitChargePeak) {
  1529. Process->CommitChargePeak = Process->CommitCharge;
  1530. }
  1531. //
  1532. // Successful so break out now.
  1533. //
  1534. break;
  1535. Failed:
  1536. //
  1537. // Charging of commitment failed. Release the held mutexes and return
  1538. // the failure status to the user.
  1539. //
  1540. if (ChargedJobCommit == TRUE) {
  1541. PsChangeJobMemoryUsage (PS_JOB_STATUS_REPORT_COMMIT_CHANGES, 0 - QuotaCharge);
  1542. }
  1543. if (ChargedExactQuota == TRUE) {
  1544. //
  1545. // We have already tried for the precise charge,
  1546. // return an error.
  1547. //
  1548. goto ErrorReturn;
  1549. }
  1550. LOCK_WS_UNSAFE (Process);
  1551. //
  1552. // Quota charge failed, calculate the exact quota
  1553. // taking into account pages that may already be
  1554. // committed, subtract this from the total and retry the operation.
  1555. //
  1556. QuotaFree = MiCalculatePageCommitment (StartingAddress,
  1557. EndingAddress,
  1558. FoundVad,
  1559. Process);
  1560. if (QuotaFree == 0) {
  1561. goto ErrorReturn;
  1562. }
  1563. ChargedExactQuota = TRUE;
  1564. QuotaCharge -= QuotaFree;
  1565. ASSERT ((SSIZE_T)QuotaCharge >= 0);
  1566. if (QuotaCharge == 0) {
  1567. //
  1568. // All the pages are already committed so just march on.
  1569. // Explicitly set status to success as code above may have
  1570. // generated a failure status when overcharging.
  1571. //
  1572. Status = STATUS_SUCCESS;
  1573. break;
  1574. }
  1575. } while (TRUE);
  1576. QuotaFree = 0;
  1577. if (ChargedExactQuota == FALSE) {
  1578. LOCK_WS_UNSAFE (Process);
  1579. }
  1580. //
  1581. // Fill in all the page directory and page table pages with the
  1582. // demand zero PTE.
  1583. //
  1584. MiMakePdeExistAndMakeValid (PointerPde, Process, MM_NOIRQL);
  1585. while (PointerPte <= LastPte) {
  1586. if (MiIsPteOnPdeBoundary (PointerPte)) {
  1587. PointerPde = MiGetPteAddress (PointerPte);
  1588. //
  1589. // Pointing to the next page table page, make
  1590. // a page table page exist and make it valid.
  1591. //
  1592. MiMakePdeExistAndMakeValid (PointerPde, Process, MM_NOIRQL);
  1593. }
  1594. if (PointerPte->u.Long == 0) {
  1595. if (PointerPte <= CommitLimitPte) {
  1596. //
  1597. // This page is implicitly committed.
  1598. //
  1599. QuotaFree += 1;
  1600. }
  1601. //
  1602. // Increment the count of non-zero page table entries
  1603. // for this page table and the number of private pages
  1604. // for the process.
  1605. //
  1606. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  1607. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (Va);
  1608. MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  1609. MI_WRITE_INVALID_PTE (PointerPte, TempPte);
  1610. }
  1611. else {
  1612. if (PointerPte->u.Long == DecommittedPte.u.Long) {
  1613. //
  1614. // Only commit the page if it is already decommitted.
  1615. //
  1616. MI_WRITE_INVALID_PTE (PointerPte, TempPte);
  1617. }
  1618. else {
  1619. QuotaFree += 1;
  1620. //
  1621. // Make sure the protection for the page is right.
  1622. //
  1623. if (!ChangeProtection &&
  1624. (Protect != MiGetPageProtection (PointerPte,
  1625. Process,
  1626. FALSE))) {
  1627. ChangeProtection = TRUE;
  1628. }
  1629. }
  1630. }
  1631. PointerPte += 1;
  1632. }
  1633. UNLOCK_WS_UNSAFE (Process);
  1634. #if defined(_MIALT4K_)
  1635. if (WowProcess != NULL) {
  1636. StartingAddress = (PVOID) PAGE_4K_ALIGN(OriginalBase);
  1637. EndingAddress = (PVOID)(((ULONG_PTR)OriginalBase +
  1638. OriginalRegionSize - 1) | (PAGE_4K - 1));
  1639. CapturedRegionSize = (ULONG_PTR)EndingAddress -
  1640. (ULONG_PTR)StartingAddress + 1L;
  1641. //
  1642. // Update the alternate permission table.
  1643. //
  1644. MiProtectFor4kPage (StartingAddress,
  1645. CapturedRegionSize,
  1646. OriginalProtectionMask,
  1647. ALT_COMMIT,
  1648. Process);
  1649. }
  1650. #endif
  1651. if ((ChargedExactQuota == FALSE) && (QuotaFree != 0)) {
  1652. FoundVad->u.VadFlags.CommitCharge -= QuotaFree;
  1653. ASSERT ((LONG_PTR)FoundVad->u.VadFlags.CommitCharge >= 0);
  1654. Process->CommitCharge -= QuotaFree;
  1655. UNLOCK_ADDRESS_SPACE (Process);
  1656. MI_INCREMENT_TOTAL_PROCESS_COMMIT (0 - QuotaFree);
  1657. MiReturnCommitment (QuotaFree);
  1658. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_ALLOCVM2, QuotaFree);
  1659. PsReturnProcessPageFileQuota (Process, QuotaFree);
  1660. if (ChargedJobCommit) {
  1661. PsChangeJobMemoryUsage (PS_JOB_STATUS_REPORT_COMMIT_CHANGES, -(SSIZE_T)QuotaFree);
  1662. }
  1663. }
  1664. else {
  1665. UNLOCK_ADDRESS_SPACE (Process);
  1666. }
  1667. //
  1668. // Previously reserved pages have been committed or an error occurred.
  1669. // Detach, dereference process and return status.
  1670. //
  1671. done:
  1672. if (ChangeProtection) {
  1673. PVOID Start;
  1674. SIZE_T Size;
  1675. ULONG LastProtect;
  1676. Start = StartingAddress;
  1677. Size = CapturedRegionSize;
  1678. MiProtectVirtualMemory (Process,
  1679. &Start,
  1680. &Size,
  1681. Protect,
  1682. &LastProtect);
  1683. }
  1684. if (Attached == TRUE) {
  1685. KeUnstackDetachProcess (&ApcState);
  1686. }
  1687. if (ProcessHandle != NtCurrentProcess()) {
  1688. ObDereferenceObject (Process);
  1689. }
  1690. //
  1691. // Establish an exception handler and write the size and base
  1692. // address.
  1693. //
  1694. try {
  1695. *RegionSize = CapturedRegionSize;
  1696. *BaseAddress = StartingAddress;
  1697. } except (EXCEPTION_EXECUTE_HANDLER) {
  1698. return GetExceptionCode();
  1699. }
  1700. return Status;
  1701. ErrorReturn:
  1702. UNLOCK_WS_UNSAFE (Process);
  1703. ErrorReturn0:
  1704. UNLOCK_ADDRESS_SPACE (Process);
  1705. ErrorReturn1:
  1706. if (Attached == TRUE) {
  1707. KeUnstackDetachProcess (&ApcState);
  1708. }
  1709. if (ProcessHandle != NtCurrentProcess()) {
  1710. ObDereferenceObject (Process);
  1711. }
  1712. return Status;
  1713. }
  1714. NTSTATUS
  1715. MmCommitSessionMappedView (
  1716. IN PVOID MappedAddress,
  1717. IN SIZE_T ViewSize
  1718. )
  1719. /*++
  1720. Routine Description:
  1721. This function commits a region of pages within the session mapped
  1722. view virtual address space.
  1723. Arguments:
  1724. MappedAddress - Supplies the non-NULL address within a session mapped view
  1725. to begin committing pages at. Note the backing section
  1726. must be pagefile backed.
  1727. ViewSize - Supplies the actual size in bytes to be committed.
  1728. Return Value:
  1729. Various NTSTATUS codes.
  1730. --*/
  1731. {
  1732. PSUBSECTION Subsection;
  1733. ULONG_PTR Base16;
  1734. ULONG Hash;
  1735. ULONG Size;
  1736. ULONG count;
  1737. PMMSESSION Session;
  1738. PVOID ViewBaseAddress;
  1739. PVOID StartingAddress;
  1740. PVOID EndingAddress;
  1741. PMMPTE PointerPte;
  1742. PMMPTE LastPte;
  1743. PMMPTE StartingPte;
  1744. MMPTE TempPte;
  1745. SIZE_T QuotaCharge;
  1746. SIZE_T QuotaFree;
  1747. LOGICAL ChargedExactQuota;
  1748. PCONTROL_AREA ControlArea;
  1749. PSEGMENT Segment;
  1750. PAGED_CODE();
  1751. //
  1752. // Make sure the specified starting and ending addresses are
  1753. // within the session view portion of the virtual address space.
  1754. //
  1755. if (((ULONG_PTR)MappedAddress < MiSessionViewStart) ||
  1756. ((ULONG_PTR)MappedAddress >= MiSessionViewStart + MmSessionViewSize)) {
  1757. //
  1758. // Invalid base address.
  1759. //
  1760. return STATUS_INVALID_PARAMETER_1;
  1761. }
  1762. if ((ULONG_PTR)MiSessionViewStart + MmSessionViewSize - (ULONG_PTR)MappedAddress <
  1763. ViewSize) {
  1764. //
  1765. // Invalid region size;
  1766. //
  1767. return STATUS_INVALID_PARAMETER_2;
  1768. }
  1769. ASSERT (ViewSize != 0);
  1770. if ((PsGetCurrentProcess()->Flags & PS_PROCESS_FLAGS_IN_SESSION) == 0) {
  1771. return STATUS_NOT_MAPPED_VIEW;
  1772. }
  1773. //
  1774. // Commit previously reserved pages.
  1775. //
  1776. StartingAddress = (PVOID)PAGE_ALIGN (MappedAddress);
  1777. EndingAddress = (PVOID)(((ULONG_PTR)MappedAddress +
  1778. ViewSize - 1) | (PAGE_SIZE - 1));
  1779. ViewSize = (PCHAR)EndingAddress - (PCHAR)StartingAddress + 1;
  1780. ASSERT (MmIsAddressValid (MmSessionSpace) == TRUE);
  1781. Session = &MmSessionSpace->Session;
  1782. ChargedExactQuota = FALSE;
  1783. QuotaCharge = (MiGetPteAddress (EndingAddress) - MiGetPteAddress (StartingAddress) + 1);
  1784. //
  1785. // Get the session view mutex to prevent win32k referencing bugs where
  1786. // they might be trying to delete the view at the same time in another
  1787. // thread. This also blocks APCs so an APC which takes a page
  1788. // fault does not corrupt various structures.
  1789. //
  1790. count = 0;
  1791. Base16 = (ULONG_PTR)StartingAddress >> 16;
  1792. LOCK_SYSTEM_VIEW_SPACE (Session);
  1793. Hash = (ULONG)(Base16 % Session->SystemSpaceHashKey);
  1794. do {
  1795. ViewBaseAddress = (PVOID)(Session->SystemSpaceViewTable[Hash].Entry & ~0xFFFF);
  1796. Size = (ULONG) ((Session->SystemSpaceViewTable[Hash].Entry & 0xFFFF) * X64K);
  1797. if ((StartingAddress >= ViewBaseAddress) &&
  1798. (EndingAddress < (PVOID)((PCHAR)ViewBaseAddress + Size))) {
  1799. break;
  1800. }
  1801. Hash += 1;
  1802. if (Hash >= Session->SystemSpaceHashSize) {
  1803. Hash = 0;
  1804. count += 1;
  1805. if (count == 2) {
  1806. KeBugCheckEx (DRIVER_UNMAPPING_INVALID_VIEW,
  1807. (ULONG_PTR)StartingAddress,
  1808. 2,
  1809. 0,
  1810. 0);
  1811. }
  1812. }
  1813. } while (TRUE);
  1814. ControlArea = Session->SystemSpaceViewTable[Hash].ControlArea;
  1815. if (ControlArea->FilePointer != NULL) {
  1816. //
  1817. // Only page file backed sections can be committed.
  1818. //
  1819. UNLOCK_SYSTEM_VIEW_SPACE (Session);
  1820. return STATUS_ALREADY_COMMITTED;
  1821. }
  1822. //
  1823. // Session views always start at the beginning of the file which makes
  1824. // calculating the corresponding prototype PTE here straightforward.
  1825. //
  1826. if ((ControlArea->u.Flags.GlobalOnlyPerSession == 0) &&
  1827. (ControlArea->u.Flags.Rom == 0)) {
  1828. Subsection = (PSUBSECTION)(ControlArea + 1);
  1829. }
  1830. else {
  1831. Subsection = (PSUBSECTION)((PLARGE_CONTROL_AREA)ControlArea + 1);
  1832. }
  1833. StartingPte = Subsection->SubsectionBase;
  1834. StartingPte += (((ULONG_PTR) StartingAddress - (ULONG_PTR) ViewBaseAddress) >> PAGE_SHIFT);
  1835. LastPte = StartingPte + QuotaCharge;
  1836. if (LastPte >= Subsection->SubsectionBase + Subsection->PtesInSubsection) {
  1837. UNLOCK_SYSTEM_VIEW_SPACE (Session);
  1838. return STATUS_INVALID_PARAMETER_2;
  1839. }
  1840. //
  1841. // Charge commitment for the range.
  1842. //
  1843. PointerPte = StartingPte;
  1844. do {
  1845. if (MiChargeCommitment (QuotaCharge, NULL) == TRUE) {
  1846. break;
  1847. }
  1848. //
  1849. // Reduce the charge we are asking for if possible.
  1850. //
  1851. if (ChargedExactQuota == TRUE) {
  1852. //
  1853. // We have already tried for the precise charge,
  1854. // so just return an error.
  1855. //
  1856. KeReleaseGuardedMutexUnsafe (&MmSectionCommitMutex);
  1857. UNLOCK_SYSTEM_VIEW_SPACE (Session);
  1858. return STATUS_COMMITMENT_LIMIT;
  1859. }
  1860. //
  1861. // The commitment charging of quota failed, calculate the
  1862. // exact quota taking into account pages that may already be
  1863. // committed and retry the operation.
  1864. //
  1865. KeAcquireGuardedMutexUnsafe (&MmSectionCommitMutex);
  1866. while (PointerPte < LastPte) {
  1867. //
  1868. // Check to see if the prototype PTE is committed.
  1869. // Note that prototype PTEs cannot be decommitted so
  1870. // PTEs only need to be checked for zeroes.
  1871. //
  1872. if (PointerPte->u.Long != 0) {
  1873. QuotaCharge -= 1;
  1874. }
  1875. PointerPte += 1;
  1876. }
  1877. PointerPte = StartingPte;
  1878. ChargedExactQuota = TRUE;
  1879. //
  1880. // If the entire range is committed then there's nothing to charge.
  1881. //
  1882. if (QuotaCharge == 0) {
  1883. KeReleaseGuardedMutexUnsafe (&MmSectionCommitMutex);
  1884. UNLOCK_SYSTEM_VIEW_SPACE (Session);
  1885. return STATUS_SUCCESS;
  1886. }
  1887. } while (TRUE);
  1888. if (ChargedExactQuota == FALSE) {
  1889. KeAcquireGuardedMutexUnsafe (&MmSectionCommitMutex);
  1890. }
  1891. //
  1892. // Commit all the pages.
  1893. //
  1894. Segment = ControlArea->Segment;
  1895. TempPte = Segment->SegmentPteTemplate;
  1896. ASSERT (TempPte.u.Long != 0);
  1897. QuotaFree = 0;
  1898. while (PointerPte < LastPte) {
  1899. if (PointerPte->u.Long != 0) {
  1900. //
  1901. // Page is already committed, back out commitment.
  1902. //
  1903. QuotaFree += 1;
  1904. }
  1905. else {
  1906. MI_WRITE_INVALID_PTE (PointerPte, TempPte);
  1907. }
  1908. PointerPte += 1;
  1909. }
  1910. //
  1911. // Subtract out any excess, then update the segment charges.
  1912. // Note only segment commit is excess - process commit must
  1913. // remain fully charged.
  1914. //
  1915. if (ChargedExactQuota == FALSE) {
  1916. ASSERT (QuotaCharge >= QuotaFree);
  1917. QuotaCharge -= QuotaFree;
  1918. //
  1919. // Return the QuotaFree excess commitment after the
  1920. // mutexes are released to remove needless contention.
  1921. //
  1922. }
  1923. else {
  1924. //
  1925. // Exact quota was charged so zero this to signify
  1926. // there is no excess to return.
  1927. //
  1928. QuotaFree = 0;
  1929. }
  1930. if (QuotaCharge != 0) {
  1931. Segment->NumberOfCommittedPages += QuotaCharge;
  1932. InterlockedExchangeAddSizeT (&MmSharedCommit, QuotaCharge);
  1933. MM_TRACK_COMMIT (MM_DBG_COMMIT_ALLOCVM_SEGMENT, QuotaCharge);
  1934. }
  1935. KeReleaseGuardedMutexUnsafe (&MmSectionCommitMutex);
  1936. //
  1937. // Update the per-process charges.
  1938. //
  1939. UNLOCK_SYSTEM_VIEW_SPACE (Session);
  1940. //
  1941. // Return any excess segment commit that may have been charged.
  1942. //
  1943. if (QuotaFree != 0) {
  1944. MiReturnCommitment (QuotaFree);
  1945. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_ALLOCVM_SEGMENT, QuotaFree);
  1946. }
  1947. return STATUS_SUCCESS;
  1948. }
  1949. NTSTATUS
  1950. MiResetVirtualMemory (
  1951. IN PVOID StartingAddress,
  1952. IN PVOID EndingAddress,
  1953. IN PMMVAD Vad,
  1954. IN PEPROCESS Process
  1955. )
  1956. /*++
  1957. Routine Description:
  1958. Arguments:
  1959. StartingAddress - Supplies the starting address of the range.
  1960. EndingAddress - Supplies the ending address of the range.
  1961. Vad - Supplies the relevant VAD for the range.
  1962. Process - Supplies the current process.
  1963. Return Value:
  1964. NTSTATUS.
  1965. Environment:
  1966. Kernel mode, APCs disabled, AddressCreation mutex held.
  1967. --*/
  1968. {
  1969. PVOID TempVa;
  1970. PMMPTE PointerPte;
  1971. PMMPTE ProtoPte;
  1972. PMMPTE PointerPde;
  1973. PMMPTE PointerPpe;
  1974. PMMPTE PointerPxe;
  1975. PMMPTE LastPte;
  1976. MMPTE PteContents;
  1977. ULONG Waited;
  1978. ULONG First;
  1979. KIRQL OldIrql;
  1980. PMMPFN Pfn1;
  1981. PMMCLONE_BLOCK CloneBlock;
  1982. #if DBG
  1983. PMMCLONE_DESCRIPTOR CloneDescriptor;
  1984. #endif
  1985. MMPTE_FLUSH_LIST PteFlushList;
  1986. #if defined(_X86_) || defined(_AMD64_)
  1987. WSLE_NUMBER WsPfnIndex;
  1988. WSLE_NUMBER WorkingSetIndex;
  1989. #endif
  1990. if (Vad->u.VadFlags.PrivateMemory == 0) {
  1991. if (Vad->ControlArea->FilePointer != NULL) {
  1992. //
  1993. // Only page file backed sections can be reset.
  1994. //
  1995. return STATUS_USER_MAPPED_FILE;
  1996. }
  1997. }
  1998. OldIrql = MM_NOIRQL;
  1999. First = TRUE;
  2000. PointerPte = MiGetPteAddress (StartingAddress);
  2001. LastPte = MiGetPteAddress (EndingAddress);
  2002. PteFlushList.Count = 0;
  2003. MmLockPagableSectionByHandle (ExPageLockHandle);
  2004. //
  2005. // Examine all the PTEs in the range.
  2006. //
  2007. LOCK_WS_UNSAFE (Process);
  2008. while (PointerPte <= LastPte) {
  2009. if (MiIsPteOnPdeBoundary (PointerPte) || (First)) {
  2010. if (PteFlushList.Count != 0) {
  2011. MiFlushPteList (&PteFlushList, FALSE);
  2012. PteFlushList.Count = 0;
  2013. }
  2014. if (MiIsPteOnPpeBoundary (PointerPte) || (First)) {
  2015. if (MiIsPteOnPxeBoundary (PointerPte) || (First)) {
  2016. PointerPxe = MiGetPpeAddress (PointerPte);
  2017. if (!MiDoesPxeExistAndMakeValid (PointerPxe,
  2018. Process,
  2019. OldIrql,
  2020. &Waited)) {
  2021. //
  2022. // This extended page directory parent entry is empty,
  2023. // go to the next one.
  2024. //
  2025. PointerPxe += 1;
  2026. PointerPpe = MiGetVirtualAddressMappedByPte (PointerPxe);
  2027. PointerPde = MiGetVirtualAddressMappedByPte (PointerPpe);
  2028. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  2029. continue;
  2030. }
  2031. }
  2032. PointerPpe = MiGetPdeAddress (PointerPte);
  2033. if (!MiDoesPpeExistAndMakeValid (PointerPpe,
  2034. Process,
  2035. OldIrql,
  2036. &Waited)) {
  2037. //
  2038. // This page directory parent entry is empty,
  2039. // go to the next one.
  2040. //
  2041. PointerPpe += 1;
  2042. PointerPde = MiGetVirtualAddressMappedByPte (PointerPpe);
  2043. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  2044. continue;
  2045. }
  2046. }
  2047. //
  2048. // Pointing to the next page table page, make
  2049. // a page table page exist and make it valid.
  2050. //
  2051. First = FALSE;
  2052. PointerPde = MiGetPteAddress (PointerPte);
  2053. if (!MiDoesPdeExistAndMakeValid (PointerPde,
  2054. Process,
  2055. OldIrql,
  2056. &Waited)) {
  2057. //
  2058. // This page directory entry is empty, go to the next one.
  2059. //
  2060. PointerPde += 1;
  2061. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  2062. continue;
  2063. }
  2064. }
  2065. PteContents = *PointerPte;
  2066. ProtoPte = NULL;
  2067. if ((PteContents.u.Hard.Valid == 0) &&
  2068. (PteContents.u.Soft.Prototype == 1)) {
  2069. //
  2070. // This is a prototype PTE, evaluate the prototype PTE. Note that
  2071. // the fact it is a prototype PTE does not guarantee that this is a
  2072. // regular or long VAD - it may be a short VAD in a forked process,
  2073. // so check PrivateMemory before referencing the FirstPrototypePte
  2074. // field.
  2075. //
  2076. if ((Vad->u.VadFlags.PrivateMemory == 0) &&
  2077. (Vad->FirstPrototypePte != NULL)) {
  2078. ProtoPte = MiGetProtoPteAddress (Vad,
  2079. MI_VA_TO_VPN (
  2080. MiGetVirtualAddressMappedByPte (PointerPte)));
  2081. }
  2082. else {
  2083. CloneBlock = (PMMCLONE_BLOCK) MiPteToProto (PointerPte);
  2084. ProtoPte = (PMMPTE) CloneBlock;
  2085. #if DBG
  2086. CloneDescriptor = MiLocateCloneAddress (Process, (PVOID)CloneBlock);
  2087. ASSERT (CloneDescriptor != NULL);
  2088. #endif
  2089. }
  2090. if (OldIrql == MM_NOIRQL) {
  2091. ASSERT (PteFlushList.Count == 0);
  2092. LOCK_PFN (OldIrql);
  2093. ASSERT (OldIrql != MM_NOIRQL);
  2094. }
  2095. //
  2096. // The working set mutex may be released in order to make the
  2097. // prototype PTE which resides in paged pool resident. If this
  2098. // occurs, the page directory and/or page table of the original
  2099. // user address may get trimmed. Account for that here.
  2100. //
  2101. if (MiGetPteAddress (ProtoPte)->u.Hard.Valid == 0) {
  2102. if (PteFlushList.Count != 0) {
  2103. MiFlushPteList (&PteFlushList, FALSE);
  2104. PteFlushList.Count = 0;
  2105. }
  2106. if (MiMakeSystemAddressValidPfnWs (ProtoPte, Process, OldIrql) != 0) {
  2107. //
  2108. // Working set mutex was released and PFN lock were
  2109. // released & reacquired, restart from the top.
  2110. //
  2111. First = TRUE;
  2112. continue;
  2113. }
  2114. }
  2115. PteContents = *ProtoPte;
  2116. }
  2117. if (PteContents.u.Hard.Valid == 1) {
  2118. Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
  2119. #if defined(_X86_) || defined(_AMD64_)
  2120. if (!ProtoPte) {
  2121. //
  2122. // The access bit is set (and TB inserted) automatically by the
  2123. // processor if the valid bit is set so clear it here in both
  2124. // the PTE and the WSLE so we know it's more worthwhile to trim
  2125. // should we need the memory. If the access bit is already
  2126. // clear then just skip the WSLE search under the premise
  2127. // that it is already getting aged.
  2128. //
  2129. if (MI_GET_ACCESSED_IN_PTE (&PteContents) == 1) {
  2130. MI_SET_ACCESSED_IN_PTE (PointerPte, 0);
  2131. WsPfnIndex = Pfn1->u1.WsIndex;
  2132. TempVa = MiGetVirtualAddressMappedByPte (PointerPte);
  2133. WorkingSetIndex = MiLocateWsle (TempVa,
  2134. MmWorkingSetList,
  2135. WsPfnIndex);
  2136. ASSERT (WorkingSetIndex != WSLE_NULL_INDEX);
  2137. MmWsle[WorkingSetIndex].u1.e1.Age = 3;
  2138. }
  2139. }
  2140. #endif
  2141. if (OldIrql == MM_NOIRQL) {
  2142. ASSERT (PteFlushList.Count == 0);
  2143. LOCK_PFN (OldIrql);
  2144. ASSERT (OldIrql != MM_NOIRQL);
  2145. continue;
  2146. }
  2147. if (Pfn1->u3.e2.ReferenceCount == 1) {
  2148. //
  2149. // Only this process has the page mapped.
  2150. //
  2151. MI_SET_MODIFIED (Pfn1, 0, 0x20);
  2152. MiReleasePageFileSpace (Pfn1->OriginalPte);
  2153. Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
  2154. }
  2155. if (!ProtoPte) {
  2156. if (MI_IS_PTE_DIRTY (PteContents)) {
  2157. //
  2158. // Clear the dirty bit and flush TB since it
  2159. // is NOT a prototype PTE.
  2160. //
  2161. MI_SET_ACCESSED_IN_PTE (&PteContents, 0);
  2162. MI_SET_PTE_CLEAN (PteContents);
  2163. MI_WRITE_VALID_PTE_NEW_PROTECTION (PointerPte, PteContents);
  2164. if (PteFlushList.Count < MM_MAXIMUM_FLUSH_COUNT) {
  2165. TempVa = MiGetVirtualAddressMappedByPte (PointerPte);
  2166. PteFlushList.FlushVa[PteFlushList.Count] = TempVa;
  2167. PteFlushList.Count += 1;
  2168. }
  2169. }
  2170. }
  2171. }
  2172. else if (PteContents.u.Soft.Transition == 1) {
  2173. Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);
  2174. if (OldIrql == MM_NOIRQL) {
  2175. //
  2176. // This must be a private page (because the PFN lock is not
  2177. // held). If the page is clean, just march on to the next one.
  2178. //
  2179. ASSERT (!ProtoPte);
  2180. ASSERT (PteFlushList.Count == 0);
  2181. if (Pfn1->u3.e1.PageLocation == StandbyPageList) {
  2182. PointerPte += 1;
  2183. continue;
  2184. }
  2185. LOCK_PFN (OldIrql);
  2186. ASSERT (OldIrql != MM_NOIRQL);
  2187. continue;
  2188. }
  2189. if ((Pfn1->u3.e1.PageLocation == ModifiedPageList) &&
  2190. (Pfn1->u3.e2.ReferenceCount == 0)) {
  2191. //
  2192. // Remove from the modified list, release the page
  2193. // file space and insert on the standby list.
  2194. //
  2195. MI_SET_MODIFIED (Pfn1, 0, 0x21);
  2196. MiUnlinkPageFromList (Pfn1);
  2197. MiReleasePageFileSpace (Pfn1->OriginalPte);
  2198. Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
  2199. MiInsertPageInList (&MmStandbyPageListHead,
  2200. MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE(&PteContents));
  2201. }
  2202. }
  2203. else {
  2204. if (PteContents.u.Soft.PageFileHigh != 0) {
  2205. if (OldIrql == MM_NOIRQL) {
  2206. //
  2207. // This must be a private page (because the PFN
  2208. // lock is not held).
  2209. //
  2210. ASSERT (!ProtoPte);
  2211. ASSERT (PteFlushList.Count == 0);
  2212. LOCK_PFN (OldIrql);
  2213. ASSERT (OldIrql != MM_NOIRQL);
  2214. }
  2215. MiReleasePageFileSpace (PteContents);
  2216. if (PteFlushList.Count != 0) {
  2217. MiFlushPteList (&PteFlushList, FALSE);
  2218. PteFlushList.Count = 0;
  2219. }
  2220. if (ProtoPte) {
  2221. ProtoPte->u.Soft.PageFileHigh = 0;
  2222. }
  2223. UNLOCK_PFN (OldIrql);
  2224. OldIrql = MM_NOIRQL;
  2225. if (!ProtoPte) {
  2226. PointerPte->u.Soft.PageFileHigh = 0;
  2227. }
  2228. }
  2229. else {
  2230. if (OldIrql != MM_NOIRQL) {
  2231. if (PteFlushList.Count != 0) {
  2232. MiFlushPteList (&PteFlushList, FALSE);
  2233. PteFlushList.Count = 0;
  2234. }
  2235. UNLOCK_PFN (OldIrql);
  2236. OldIrql = MM_NOIRQL;
  2237. }
  2238. }
  2239. }
  2240. PointerPte += 1;
  2241. }
  2242. if (OldIrql != MM_NOIRQL) {
  2243. if (PteFlushList.Count != 0) {
  2244. MiFlushPteList (&PteFlushList, FALSE);
  2245. }
  2246. UNLOCK_PFN (OldIrql);
  2247. OldIrql = MM_NOIRQL;
  2248. }
  2249. else {
  2250. ASSERT (PteFlushList.Count == 0);
  2251. }
  2252. UNLOCK_WS_UNSAFE (Process);
  2253. MmUnlockPagableImageSection (ExPageLockHandle);
  2254. return STATUS_SUCCESS;
  2255. }
  2256. LOGICAL
  2257. MiCreatePageTablesForPhysicalRange (
  2258. IN PEPROCESS Process,
  2259. IN PVOID StartingAddress,
  2260. IN PVOID EndingAddress
  2261. )
  2262. /*++
  2263. Routine Description:
  2264. This routine initializes page directory and page table pages for a
  2265. user-controlled physical range of pages.
  2266. Arguments:
  2267. Process - Supplies the current process.
  2268. StartingAddress - Supplies the starting address of the range.
  2269. EndingAddress - Supplies the ending address of the range.
  2270. Return Value:
  2271. TRUE if the page tables were created, FALSE if not.
  2272. Environment:
  2273. Kernel mode, APCs disabled, WorkingSetMutex and AddressCreation mutexes
  2274. held.
  2275. --*/
  2276. {
  2277. MMPTE PteContents;
  2278. PMMPTE LastPte;
  2279. PMMPTE LastPde;
  2280. PMMPTE LastPpe;
  2281. PMMPTE PointerPte;
  2282. PMMPTE PointerPde;
  2283. PMMPTE PointerPpe;
  2284. PVOID UsedPageTableHandle;
  2285. KIRQL OldIrql;
  2286. PMMPFN Pfn1;
  2287. PFN_NUMBER PagesNeeded;
  2288. PointerPpe = MiGetPpeAddress (StartingAddress);
  2289. PointerPde = MiGetPdeAddress (StartingAddress);
  2290. PointerPte = MiGetPteAddress (StartingAddress);
  2291. LastPpe = MiGetPpeAddress (EndingAddress);
  2292. LastPde = MiGetPdeAddress (EndingAddress);
  2293. LastPte = MiGetPteAddress (EndingAddress);
  2294. //
  2295. // Charge resident available pages for all of the page directory and table
  2296. // pages as they will not be paged until the VAD is freed.
  2297. //
  2298. if (LastPte != PointerPte) {
  2299. PagesNeeded = MI_COMPUTE_PAGES_SPANNED (PointerPte,
  2300. LastPte - PointerPte);
  2301. #if (_MI_PAGING_LEVELS >= 3)
  2302. if (LastPde != PointerPde) {
  2303. PagesNeeded += MI_COMPUTE_PAGES_SPANNED (PointerPde,
  2304. LastPde - PointerPde);
  2305. #if (_MI_PAGING_LEVELS >= 4)
  2306. if (LastPpe != PointerPpe) {
  2307. PagesNeeded += MI_COMPUTE_PAGES_SPANNED (PointerPpe,
  2308. LastPpe - PointerPpe);
  2309. }
  2310. #endif
  2311. }
  2312. #endif
  2313. }
  2314. else {
  2315. PagesNeeded = 1;
  2316. #if (_MI_PAGING_LEVELS >= 3)
  2317. PagesNeeded += 1;
  2318. #endif
  2319. #if (_MI_PAGING_LEVELS >= 4)
  2320. PagesNeeded += 1;
  2321. #endif
  2322. }
  2323. MmLockPagableSectionByHandle (ExPageLockHandle);
  2324. LOCK_PFN (OldIrql);
  2325. if ((SPFN_NUMBER)PagesNeeded > MI_NONPAGABLE_MEMORY_AVAILABLE() - 20) {
  2326. UNLOCK_PFN (OldIrql);
  2327. MmUnlockPagableImageSection (ExPageLockHandle);
  2328. return FALSE;
  2329. }
  2330. MI_DECREMENT_RESIDENT_AVAILABLE (PagesNeeded, MM_RESAVAIL_ALLOCATE_USER_PAGE_TABLE);
  2331. UNLOCK_PFN (OldIrql);
  2332. UsedPageTableHandle = NULL;
  2333. //
  2334. // Fill in all the page table pages with the zero PTE.
  2335. //
  2336. while (PointerPte <= LastPte) {
  2337. if (MiIsPteOnPdeBoundary (PointerPte) || UsedPageTableHandle == NULL) {
  2338. PointerPde = MiGetPteAddress (PointerPte);
  2339. //
  2340. // Pointing to the next page table page, make
  2341. // a page table page exist and make it valid.
  2342. //
  2343. // Note this ripples sharecounts through the paging hierarchy so
  2344. // there is no need to up sharecounts to prevent trimming of the
  2345. // page directory (and parent) page as making the page table
  2346. // valid below does this automatically.
  2347. //
  2348. MiMakePdeExistAndMakeValid (PointerPde, Process, MM_NOIRQL);
  2349. //
  2350. // Up the sharecount so the page table page will not get
  2351. // trimmed even if it has no currently valid entries.
  2352. //
  2353. PteContents = *PointerPde;
  2354. Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
  2355. LOCK_PFN (OldIrql);
  2356. Pfn1->u2.ShareCount += 1;
  2357. UNLOCK_PFN (OldIrql);
  2358. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (StartingAddress);
  2359. }
  2360. ASSERT (PointerPte->u.Long == 0);
  2361. //
  2362. // Increment the count of non-zero page table entries
  2363. // for this page table - even though this entry is still zero,
  2364. // this is a special case.
  2365. //
  2366. MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  2367. PointerPte += 1;
  2368. StartingAddress = (PVOID)((PUCHAR)StartingAddress + PAGE_SIZE);
  2369. }
  2370. MmUnlockPagableImageSection (ExPageLockHandle);
  2371. return TRUE;
  2372. }
  2373. VOID
  2374. MiDeletePageTablesForPhysicalRange (
  2375. IN PVOID StartingAddress,
  2376. IN PVOID EndingAddress
  2377. )
  2378. /*++
  2379. Routine Description:
  2380. This routine deletes page directory and page table pages for a
  2381. user-controlled physical range of pages.
  2382. Even though PTEs may be zero in this range, UsedPageTable counts were
  2383. incremented for these special ranges and must be decremented now.
  2384. Arguments:
  2385. StartingAddress - Supplies the starting address of the range.
  2386. EndingAddress - Supplies the ending address of the range.
  2387. Return Value:
  2388. None.
  2389. Environment:
  2390. Kernel mode, APCs disabled, WorkingSetMutex and AddressCreation mutexes
  2391. held.
  2392. --*/
  2393. {
  2394. PVOID TempVa;
  2395. MMPTE PteContents;
  2396. PMMPTE LastPte;
  2397. PMMPTE LastPde;
  2398. PMMPTE LastPpe;
  2399. PMMPTE PointerPte;
  2400. PMMPTE PointerPde;
  2401. PFN_NUMBER PagesNeeded;
  2402. PEPROCESS CurrentProcess;
  2403. PVOID UsedPageTableHandle;
  2404. KIRQL OldIrql;
  2405. PMMPFN Pfn1;
  2406. PMMPTE PointerPpe;
  2407. #if (_MI_PAGING_LEVELS >= 4)
  2408. PMMPTE PointerPxe;
  2409. #endif
  2410. CurrentProcess = PsGetCurrentProcess();
  2411. PointerPpe = MiGetPpeAddress (StartingAddress);
  2412. PointerPde = MiGetPdeAddress (StartingAddress);
  2413. PointerPte = MiGetPteAddress (StartingAddress);
  2414. LastPpe = MiGetPpeAddress (EndingAddress);
  2415. LastPde = MiGetPdeAddress (EndingAddress);
  2416. LastPte = MiGetPteAddress (EndingAddress);
  2417. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (StartingAddress);
  2418. //
  2419. // Each PTE is already zeroed - just delete the containing pages.
  2420. //
  2421. // Restore resident available pages for all of the page directory and table
  2422. // pages as they can now be paged again.
  2423. //
  2424. if (LastPte != PointerPte) {
  2425. PagesNeeded = MI_COMPUTE_PAGES_SPANNED (PointerPte,
  2426. LastPte - PointerPte);
  2427. #if (_MI_PAGING_LEVELS >= 3)
  2428. if (LastPde != PointerPde) {
  2429. PagesNeeded += MI_COMPUTE_PAGES_SPANNED (PointerPde,
  2430. LastPde - PointerPde);
  2431. #if (_MI_PAGING_LEVELS >= 4)
  2432. if (LastPpe != PointerPpe) {
  2433. PagesNeeded += MI_COMPUTE_PAGES_SPANNED (PointerPpe,
  2434. LastPpe - PointerPpe);
  2435. }
  2436. #endif
  2437. }
  2438. #endif
  2439. }
  2440. else {
  2441. PagesNeeded = 1;
  2442. #if (_MI_PAGING_LEVELS >= 3)
  2443. PagesNeeded += 1;
  2444. #endif
  2445. #if (_MI_PAGING_LEVELS >= 4)
  2446. PagesNeeded += 1;
  2447. #endif
  2448. }
  2449. MmLockPagableSectionByHandle (ExPageLockHandle);
  2450. LOCK_PFN (OldIrql);
  2451. while (PointerPte <= LastPte) {
  2452. ASSERT (PointerPte->u.Long == 0);
  2453. PointerPte += 1;
  2454. MI_DECREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  2455. if ((MiIsPteOnPdeBoundary(PointerPte)) || (PointerPte > LastPte)) {
  2456. //
  2457. // The virtual address is on a page directory boundary or it is
  2458. // the last address in the entire range.
  2459. //
  2460. // If all the entries have been eliminated from the previous
  2461. // page table page, delete the page table page itself.
  2462. //
  2463. PointerPde = MiGetPteAddress (PointerPte - 1);
  2464. ASSERT (PointerPde->u.Hard.Valid == 1);
  2465. //
  2466. // Down the sharecount on the finished page table page.
  2467. //
  2468. PteContents = *PointerPde;
  2469. Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
  2470. ASSERT (Pfn1->u2.ShareCount > 1);
  2471. Pfn1->u2.ShareCount -= 1;
  2472. //
  2473. // If all the entries have been eliminated from the previous
  2474. // page table page, delete the page table page itself.
  2475. //
  2476. if (MI_GET_USED_PTES_FROM_HANDLE (UsedPageTableHandle) == 0) {
  2477. ASSERT (PointerPde->u.Long != 0);
  2478. #if (_MI_PAGING_LEVELS >= 3)
  2479. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (PointerPte - 1);
  2480. MI_DECREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  2481. #endif
  2482. TempVa = MiGetVirtualAddressMappedByPte(PointerPde);
  2483. MiDeletePte (PointerPde,
  2484. TempVa,
  2485. FALSE,
  2486. CurrentProcess,
  2487. NULL,
  2488. NULL,
  2489. OldIrql);
  2490. #if (_MI_PAGING_LEVELS >= 3)
  2491. if ((MiIsPteOnPpeBoundary(PointerPte)) || (PointerPte > LastPte)) {
  2492. PointerPpe = MiGetPteAddress (PointerPde);
  2493. ASSERT (PointerPpe->u.Hard.Valid == 1);
  2494. //
  2495. // If all the entries have been eliminated from the previous
  2496. // page directory page, delete the page directory page too.
  2497. //
  2498. if (MI_GET_USED_PTES_FROM_HANDLE (UsedPageTableHandle) == 0) {
  2499. ASSERT (PointerPpe->u.Long != 0);
  2500. #if (_MI_PAGING_LEVELS >= 4)
  2501. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (PointerPde);
  2502. MI_DECREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  2503. #endif
  2504. TempVa = MiGetVirtualAddressMappedByPte(PointerPpe);
  2505. MiDeletePte (PointerPpe,
  2506. TempVa,
  2507. FALSE,
  2508. CurrentProcess,
  2509. NULL,
  2510. NULL,
  2511. OldIrql);
  2512. #if (_MI_PAGING_LEVELS >= 4)
  2513. if ((MiIsPteOnPxeBoundary(PointerPte)) || (PointerPte > LastPte)) {
  2514. PointerPxe = MiGetPdeAddress (PointerPde);
  2515. if (MI_GET_USED_PTES_FROM_HANDLE (UsedPageTableHandle) == 0) {
  2516. ASSERT (PointerPxe->u.Long != 0);
  2517. TempVa = MiGetVirtualAddressMappedByPte(PointerPxe);
  2518. MiDeletePte (PointerPxe,
  2519. TempVa,
  2520. FALSE,
  2521. CurrentProcess,
  2522. NULL,
  2523. NULL,
  2524. OldIrql);
  2525. }
  2526. }
  2527. #endif
  2528. }
  2529. }
  2530. #endif
  2531. }
  2532. if (PointerPte > LastPte) {
  2533. break;
  2534. }
  2535. //
  2536. // Release the PFN lock. This prevents a single thread
  2537. // from forcing other high priority threads from being
  2538. // blocked while a large address range is deleted.
  2539. //
  2540. UNLOCK_PFN (OldIrql);
  2541. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE ((PVOID)((PUCHAR)StartingAddress + PAGE_SIZE));
  2542. LOCK_PFN (OldIrql);
  2543. }
  2544. StartingAddress = (PVOID)((PUCHAR)StartingAddress + PAGE_SIZE);
  2545. }
  2546. UNLOCK_PFN (OldIrql);
  2547. MI_INCREMENT_RESIDENT_AVAILABLE (PagesNeeded, MM_RESAVAIL_FREE_USER_PAGE_TABLE);
  2548. MmUnlockPagableImageSection (ExPageLockHandle);
  2549. //
  2550. // All done, return.
  2551. //
  2552. return;
  2553. }