Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1701 lines
50 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. freevm.c
  5. Abstract:
  6. This module contains the routines which implement the
  7. NtFreeVirtualMemory service.
  8. Author:
  9. Lou Perazzoli (loup) 22-May-1989
  10. Landy Wang (landyw) 02-June-1997
  11. Revision History:
  12. --*/
  13. #include "mi.h"
  14. #define MEM_CHECK_COMMIT_STATE 0x400000
  15. #define MM_VALID_PTE_SIZE (256)
  16. MMPTE MmDecommittedPte = {MM_DECOMMIT << MM_PROTECT_FIELD_SHIFT};
  17. #if DBG
  18. extern PEPROCESS MmWatchProcess;
  19. #endif // DBG
  20. #ifdef ALLOC_PRAGMA
  21. #pragma alloc_text(PAGE,NtFreeVirtualMemory)
  22. #pragma alloc_text(PAGE,MiIsEntireRangeCommitted)
  23. #endif
  24. VOID
  25. MiProcessValidPteList (
  26. IN PMMPTE *PteList,
  27. IN ULONG Count
  28. );
  29. ULONG
  30. MiDecommitPages (
  31. IN PVOID StartingAddress,
  32. IN PMMPTE EndingPte,
  33. IN PEPROCESS Process,
  34. IN PMMVAD_SHORT Vad
  35. );
  36. NTSTATUS
  37. NtFreeVirtualMemory(
  38. IN HANDLE ProcessHandle,
  39. IN OUT PVOID *BaseAddress,
  40. IN OUT PSIZE_T RegionSize,
  41. IN ULONG FreeType
  42. )
  43. /*++
  44. Routine Description:
  45. This function deletes a region of pages within the virtual address
  46. space of a subject process.
  47. Arguments:
  48. ProcessHandle - An open handle to a process object.
  49. BaseAddress - The base address of the region of pages
  50. to be freed. This value is rounded down to the
  51. next host page address boundary.
  52. RegionSize - A pointer to a variable that will receive
  53. the actual size in bytes of the freed region of
  54. pages. The initial value of this argument is
  55. rounded up to the next host page size boundary.
  56. FreeType - A set of flags that describe the type of
  57. free that is to be performed for the specified
  58. region of pages.
  59. FreeType Flags
  60. MEM_DECOMMIT - The specified region of pages is to be decommitted.
  61. MEM_RELEASE - The specified region of pages is to be released.
  62. Return Value:
  63. NTSTATUS.
  64. --*/
  65. {
  66. KAPC_STATE ApcState;
  67. PMMVAD_SHORT Vad;
  68. PMMVAD_SHORT NewVad;
  69. PMMVAD PreviousVad;
  70. PMMVAD NextVad;
  71. PEPROCESS Process;
  72. KPROCESSOR_MODE PreviousMode;
  73. PVOID StartingAddress;
  74. PVOID EndingAddress;
  75. NTSTATUS Status;
  76. LOGICAL Attached;
  77. SIZE_T CapturedRegionSize;
  78. PVOID CapturedBase;
  79. PMMPTE StartingPte;
  80. PMMPTE EndingPte;
  81. SIZE_T OldQuota;
  82. SIZE_T QuotaCharge;
  83. SIZE_T CommitReduction;
  84. ULONG_PTR OldEnd;
  85. LOGICAL UserPhysicalPages;
  86. #if defined(_MIALT4K_)
  87. PVOID StartingAddress4k;
  88. PVOID EndingAddress4k;
  89. PVOID Wow64Process;
  90. #endif
  91. PETHREAD CurrentThread;
  92. PEPROCESS CurrentProcess;
  93. PAGED_CODE();
  94. //
  95. // Check to make sure FreeType is good.
  96. //
  97. if ((FreeType & ~(MEM_DECOMMIT | MEM_RELEASE)) != 0) {
  98. return STATUS_INVALID_PARAMETER_4;
  99. }
  100. //
  101. // One of MEM_DECOMMIT or MEM_RELEASE must be specified, but not both.
  102. //
  103. if (((FreeType & (MEM_DECOMMIT | MEM_RELEASE)) == 0) ||
  104. ((FreeType & (MEM_DECOMMIT | MEM_RELEASE)) ==
  105. (MEM_DECOMMIT | MEM_RELEASE))) {
  106. return STATUS_INVALID_PARAMETER_4;
  107. }
  108. CurrentThread = PsGetCurrentThread ();
  109. CurrentProcess = PsGetCurrentProcessByThread (CurrentThread);
  110. PreviousMode = KeGetPreviousModeByThread(&CurrentThread->Tcb);
  111. //
  112. // Establish an exception handler, probe the specified addresses
  113. // for write access and capture the initial values.
  114. //
  115. try {
  116. if (PreviousMode != KernelMode) {
  117. ProbeForWritePointer (BaseAddress);
  118. ProbeForWriteUlong_ptr (RegionSize);
  119. }
  120. //
  121. // Capture the base address.
  122. //
  123. CapturedBase = *BaseAddress;
  124. //
  125. // Capture the region size.
  126. //
  127. CapturedRegionSize = *RegionSize;
  128. } except (ExSystemExceptionFilter()) {
  129. //
  130. // If an exception occurs during the probe or capture
  131. // of the initial values, then handle the exception and
  132. // return the exception code as the status value.
  133. //
  134. return GetExceptionCode();
  135. }
  136. //
  137. // Make sure the specified starting and ending addresses are
  138. // within the user part of the virtual address space.
  139. //
  140. if (CapturedBase > MM_HIGHEST_USER_ADDRESS) {
  141. //
  142. // Invalid base address.
  143. //
  144. return STATUS_INVALID_PARAMETER_2;
  145. }
  146. if ((ULONG_PTR)MM_HIGHEST_USER_ADDRESS - (ULONG_PTR)CapturedBase <
  147. CapturedRegionSize) {
  148. //
  149. // Invalid region size;
  150. //
  151. return STATUS_INVALID_PARAMETER_3;
  152. }
  153. EndingAddress = (PVOID)(((LONG_PTR)CapturedBase + CapturedRegionSize - 1) |
  154. (PAGE_SIZE - 1));
  155. StartingAddress = PAGE_ALIGN(CapturedBase);
  156. Attached = FALSE;
  157. if (ProcessHandle == NtCurrentProcess()) {
  158. Process = CurrentProcess;
  159. }
  160. else {
  161. //
  162. // Reference the specified process handle for VM_OPERATION access.
  163. //
  164. Status = ObReferenceObjectByHandle ( ProcessHandle,
  165. PROCESS_VM_OPERATION,
  166. PsProcessType,
  167. PreviousMode,
  168. (PVOID *)&Process,
  169. NULL );
  170. if (!NT_SUCCESS(Status)) {
  171. return Status;
  172. }
  173. //
  174. // If the specified process is not the current process, attach
  175. // to the specified process.
  176. //
  177. if (CurrentProcess != Process) {
  178. KeStackAttachProcess (&Process->Pcb, &ApcState);
  179. Attached = TRUE;
  180. }
  181. }
  182. CommitReduction = 0;
  183. //
  184. // Get the address creation mutex to block multiple threads from
  185. // creating or deleting address space at the same time and
  186. // get the working set mutex so virtual address descriptors can
  187. // be inserted and walked. Block APCs to prevent page faults while
  188. // we own the working set mutex.
  189. //
  190. LOCK_ADDRESS_SPACE (Process);
  191. //
  192. // Make sure the address space was not deleted.
  193. //
  194. if (Process->Flags & PS_PROCESS_FLAGS_VM_DELETED) {
  195. Status = STATUS_PROCESS_IS_TERMINATING;
  196. goto ErrorReturn;
  197. }
  198. #if defined(_MIALT4K_)
  199. Wow64Process = Process->Wow64Process;
  200. //
  201. // Initializing these is not needed for correctness, but
  202. // without it the compiler cannot compile this code W4 to check
  203. // for use of uninitialized variables.
  204. //
  205. StartingAddress4k = NULL;
  206. EndingAddress4k = NULL;
  207. if (CapturedRegionSize != 0) {
  208. if (Wow64Process != NULL) {
  209. //
  210. // Adjust Starting/EndingAddress for the native page size.
  211. //
  212. // StartingAddress: if this happened to be 4k aligned, but not
  213. // native aligned, then look at the previous 4k page and if it's
  214. // allocated then align the starting page to the next native
  215. // page, otherwise align it to the current one.
  216. //
  217. // EndingAddress: if this happened to be 4k aligned but not
  218. // native aligned, then look at the next 4k page and if it's
  219. // allocated, then make the ending address the previous
  220. // native page, otherwise make it the current.
  221. //
  222. // This is to ensure VADs are not leaked inside
  223. // the process when releasing partial allocations.
  224. //
  225. ASSERT (StartingAddress == PAGE_ALIGN(StartingAddress));
  226. StartingAddress4k = (PVOID)PAGE_4K_ALIGN(CapturedBase);
  227. if (StartingAddress4k >= MmWorkingSetList->HighestUserAddress) {
  228. //
  229. // The caller's address is not in the WOW64 area, pass it
  230. // through as a native request.
  231. //
  232. Wow64Process = NULL;
  233. goto NativeRequest;
  234. }
  235. EndingAddress4k = (PVOID)(((LONG_PTR)CapturedBase + CapturedRegionSize - 1) |
  236. (PAGE_4K - 1));
  237. if (BYTE_OFFSET (StartingAddress4k) != 0) {
  238. if (MiArePreceding4kPagesAllocated (StartingAddress4k) == TRUE) {
  239. StartingAddress = PAGE_NEXT_ALIGN (StartingAddress4k);
  240. }
  241. }
  242. if (EndingAddress4k >= MmWorkingSetList->HighestUserAddress) {
  243. //
  244. // The caller's address is not in the WOW64 area, pass it
  245. // through as a native request.
  246. //
  247. Wow64Process = NULL;
  248. goto NativeRequest;
  249. }
  250. if (BYTE_OFFSET (EndingAddress4k) != PAGE_SIZE - 1) {
  251. if (MiAreFollowing4kPagesAllocated (EndingAddress4k) == TRUE) {
  252. EndingAddress = (PVOID)((ULONG_PTR)PAGE_ALIGN(EndingAddress4k) - 1);
  253. }
  254. }
  255. if (StartingAddress > EndingAddress) {
  256. //
  257. // There is no need to free native pages.
  258. //
  259. Vad = NULL;
  260. goto FreeAltPages;
  261. }
  262. }
  263. }
  264. NativeRequest:
  265. #endif
  266. Vad = (PMMVAD_SHORT)MiLocateAddress (StartingAddress);
  267. if (Vad == NULL) {
  268. //
  269. // No Virtual Address Descriptor located for Base Address.
  270. //
  271. Status = STATUS_MEMORY_NOT_ALLOCATED;
  272. goto ErrorReturn;
  273. }
  274. //
  275. // Found the associated Virtual Address Descriptor.
  276. //
  277. if (Vad->EndingVpn < MI_VA_TO_VPN (EndingAddress)) {
  278. //
  279. // The entire range to delete is not contained within a single
  280. // virtual address descriptor. Return an error.
  281. //
  282. Status = STATUS_UNABLE_TO_FREE_VM;
  283. goto ErrorReturn;
  284. }
  285. //
  286. // Check to ensure this Vad is deletable. Delete is required
  287. // for both decommit and release.
  288. //
  289. if ((Vad->u.VadFlags.PrivateMemory == 0) ||
  290. (Vad->u.VadFlags.PhysicalMapping == 1)) {
  291. Status = STATUS_UNABLE_TO_DELETE_SECTION;
  292. goto ErrorReturn;
  293. }
  294. if (Vad->u.VadFlags.NoChange == 1) {
  295. //
  296. // An attempt is being made to delete a secured VAD, check
  297. // to see if this deletion is allowed.
  298. //
  299. if (FreeType & MEM_RELEASE) {
  300. //
  301. // Specify the whole range, this solves the problem with
  302. // splitting the VAD and trying to decide where the various
  303. // secure ranges need to go.
  304. //
  305. Status = MiCheckSecuredVad ((PMMVAD)Vad,
  306. MI_VPN_TO_VA (Vad->StartingVpn),
  307. ((Vad->EndingVpn - Vad->StartingVpn) << PAGE_SHIFT) +
  308. (PAGE_SIZE - 1),
  309. MM_SECURE_DELETE_CHECK);
  310. }
  311. else {
  312. Status = MiCheckSecuredVad ((PMMVAD)Vad,
  313. CapturedBase,
  314. CapturedRegionSize,
  315. MM_SECURE_DELETE_CHECK);
  316. }
  317. if (!NT_SUCCESS (Status)) {
  318. goto ErrorReturn;
  319. }
  320. }
  321. UserPhysicalPages = FALSE;
  322. PreviousVad = MiGetPreviousVad (Vad);
  323. NextVad = MiGetNextVad (Vad);
  324. if (FreeType & MEM_RELEASE) {
  325. //
  326. // *****************************************************************
  327. // MEM_RELEASE was specified.
  328. // *****************************************************************
  329. //
  330. //
  331. // The descriptor for the address range is deletable. Remove or split
  332. // the descriptor.
  333. //
  334. //
  335. // If the region size is zero, remove the whole VAD.
  336. //
  337. if (CapturedRegionSize == 0) {
  338. //
  339. // If the region size is specified as 0, the base address
  340. // must be the starting address for the region.
  341. //
  342. if (MI_VA_TO_VPN (CapturedBase) != Vad->StartingVpn) {
  343. Status = STATUS_FREE_VM_NOT_AT_BASE;
  344. goto ErrorReturn;
  345. }
  346. //
  347. // This Virtual Address Descriptor has been deleted.
  348. //
  349. StartingAddress = MI_VPN_TO_VA (Vad->StartingVpn);
  350. EndingAddress = MI_VPN_TO_VA_ENDING (Vad->EndingVpn);
  351. #if defined(_MIALT4K_)
  352. StartingAddress4k = StartingAddress;
  353. EndingAddress4k = EndingAddress;
  354. #endif
  355. //
  356. // Free all the physical pages that this VAD might be mapping.
  357. // Since only the AWE lock synchronizes the remap API, carefully
  358. // remove this VAD from the list first.
  359. //
  360. LOCK_WS_UNSAFE (Process);
  361. if (Vad->u.VadFlags.LargePages == 1) {
  362. MiAweViewRemover (Process, (PMMVAD)Vad);
  363. MiFreeLargePages (MI_VPN_TO_VA (Vad->StartingVpn),
  364. MI_VPN_TO_VA_ENDING (Vad->EndingVpn));
  365. }
  366. else if (Vad->u.VadFlags.UserPhysicalPages == 1) {
  367. MiAweViewRemover (Process, (PMMVAD)Vad);
  368. MiRemoveUserPhysicalPagesVad (Vad);
  369. UserPhysicalPages = TRUE;
  370. }
  371. else if (Vad->u.VadFlags.WriteWatch == 1) {
  372. MiPhysicalViewRemover (Process, (PMMVAD)Vad);
  373. }
  374. MiRemoveVad ((PMMVAD)Vad);
  375. //
  376. // Free the VAD pool after releasing our mutexes
  377. // to reduce contention.
  378. //
  379. }
  380. else {
  381. //
  382. // Region's size was not specified as zero, delete the
  383. // whole VAD or split the VAD.
  384. //
  385. if (MI_VA_TO_VPN (StartingAddress) == Vad->StartingVpn) {
  386. if (MI_VA_TO_VPN (EndingAddress) == Vad->EndingVpn) {
  387. //
  388. // This Virtual Address Descriptor has been deleted.
  389. //
  390. //
  391. // Free all the physical pages that this VAD might be
  392. // mapping. Since only the AWE lock synchronizes the
  393. // remap API, carefully remove this VAD from the list first.
  394. //
  395. LOCK_WS_UNSAFE (Process);
  396. if (Vad->u.VadFlags.LargePages == 1) {
  397. MiAweViewRemover (Process, (PMMVAD)Vad);
  398. MiFreeLargePages (MI_VPN_TO_VA (Vad->StartingVpn),
  399. MI_VPN_TO_VA_ENDING (Vad->EndingVpn));
  400. }
  401. else if (Vad->u.VadFlags.UserPhysicalPages == 1) {
  402. MiAweViewRemover (Process, (PMMVAD)Vad);
  403. MiRemoveUserPhysicalPagesVad (Vad);
  404. UserPhysicalPages = TRUE;
  405. }
  406. else if (Vad->u.VadFlags.WriteWatch == 1) {
  407. MiPhysicalViewRemover (Process, (PMMVAD)Vad);
  408. }
  409. MiRemoveVad ((PMMVAD)Vad);
  410. //
  411. // Free the VAD pool after releasing our mutexes
  412. // to reduce contention.
  413. //
  414. }
  415. else {
  416. if ((Vad->u.VadFlags.UserPhysicalPages == 1) ||
  417. (Vad->u.VadFlags.LargePages == 1) ||
  418. (Vad->u.VadFlags.WriteWatch == 1)) {
  419. //
  420. // Splitting or chopping a physical VAD, large page VAD
  421. // or a write-watch VAD is not allowed.
  422. //
  423. Status = STATUS_FREE_VM_NOT_AT_BASE;
  424. goto ErrorReturn;
  425. }
  426. LOCK_WS_UNSAFE (Process);
  427. //
  428. // This Virtual Address Descriptor has a new starting
  429. // address.
  430. //
  431. CommitReduction = MiCalculatePageCommitment (
  432. StartingAddress,
  433. EndingAddress,
  434. (PMMVAD)Vad,
  435. Process);
  436. Vad->StartingVpn = MI_VA_TO_VPN ((PCHAR)EndingAddress + 1);
  437. Vad->u.VadFlags.CommitCharge -= CommitReduction;
  438. ASSERT ((SSIZE_T)Vad->u.VadFlags.CommitCharge >= 0);
  439. NextVad = (PMMVAD)Vad;
  440. Vad = NULL;
  441. }
  442. }
  443. else {
  444. if ((Vad->u.VadFlags.UserPhysicalPages == 1) ||
  445. (Vad->u.VadFlags.LargePages == 1) ||
  446. (Vad->u.VadFlags.WriteWatch == 1)) {
  447. //
  448. // Splitting or chopping a physical VAD, large page VAD
  449. // or a write-watch VAD is not allowed.
  450. //
  451. Status = STATUS_FREE_VM_NOT_AT_BASE;
  452. goto ErrorReturn;
  453. }
  454. //
  455. // Starting address is greater than start of VAD.
  456. //
  457. if (MI_VA_TO_VPN (EndingAddress) == Vad->EndingVpn) {
  458. //
  459. // Change the ending address of the VAD.
  460. //
  461. LOCK_WS_UNSAFE (Process);
  462. CommitReduction = MiCalculatePageCommitment (
  463. StartingAddress,
  464. EndingAddress,
  465. (PMMVAD)Vad,
  466. Process);
  467. Vad->u.VadFlags.CommitCharge -= CommitReduction;
  468. Vad->EndingVpn = MI_VA_TO_VPN ((PCHAR)StartingAddress - 1);
  469. PreviousVad = (PMMVAD)Vad;
  470. }
  471. else {
  472. //
  473. // Split this VAD as the address range is within the VAD.
  474. //
  475. NewVad = ExAllocatePoolWithTag (NonPagedPool,
  476. sizeof(MMVAD_SHORT),
  477. 'FdaV');
  478. if (NewVad == NULL) {
  479. Status = STATUS_INSUFFICIENT_RESOURCES;
  480. goto ErrorReturn;
  481. }
  482. *NewVad = *Vad;
  483. NewVad->StartingVpn = MI_VA_TO_VPN ((PCHAR)EndingAddress + 1);
  484. //
  485. // Set the commit charge to zero so MiInsertVad will
  486. // not charge commitment for splitting the VAD.
  487. //
  488. NewVad->u.VadFlags.CommitCharge = 0;
  489. OldEnd = Vad->EndingVpn;
  490. LOCK_WS_UNSAFE (Process);
  491. CommitReduction = MiCalculatePageCommitment (
  492. StartingAddress,
  493. EndingAddress,
  494. (PMMVAD)Vad,
  495. Process);
  496. OldQuota = Vad->u.VadFlags.CommitCharge - CommitReduction;
  497. Vad->EndingVpn = MI_VA_TO_VPN ((PCHAR)StartingAddress - 1);
  498. //
  499. // Insert the VAD, this could fail due to quota charges.
  500. //
  501. Status = MiInsertVad ((PMMVAD)NewVad);
  502. if (!NT_SUCCESS(Status)) {
  503. //
  504. // Inserting the Vad failed, reset the original
  505. // Vad, free the new Vad and return an error.
  506. //
  507. Vad->EndingVpn = OldEnd;
  508. UNLOCK_WS_AND_ADDRESS_SPACE (Process);
  509. ExFreePool (NewVad);
  510. goto ErrorReturn2;
  511. }
  512. //
  513. // As we have split the original VAD into 2 separate VADs
  514. // there is no way of knowing what the commit charge
  515. // is for each VAD. Calculate the charge and reset
  516. // each VAD. Note that we also use the previous value
  517. // to make sure the books stay balanced.
  518. //
  519. QuotaCharge = MiCalculatePageCommitment (MI_VPN_TO_VA (Vad->StartingVpn),
  520. (PCHAR)StartingAddress - 1,
  521. (PMMVAD)Vad,
  522. Process);
  523. Vad->u.VadFlags.CommitCharge = QuotaCharge;
  524. //
  525. // Give the remaining charge to the new VAD.
  526. //
  527. NewVad->u.VadFlags.CommitCharge = OldQuota - QuotaCharge;
  528. PreviousVad = (PMMVAD)Vad;
  529. NextVad = (PMMVAD)NewVad;
  530. }
  531. Vad = NULL;
  532. }
  533. }
  534. //
  535. // Return commitment for page table pages if possible.
  536. //
  537. MiReturnPageTablePageCommitment (StartingAddress,
  538. EndingAddress,
  539. Process,
  540. PreviousVad,
  541. NextVad);
  542. if (UserPhysicalPages == TRUE) {
  543. MiDeletePageTablesForPhysicalRange (StartingAddress, EndingAddress);
  544. }
  545. else {
  546. MiDeleteVirtualAddresses (StartingAddress,
  547. EndingAddress,
  548. NULL);
  549. }
  550. UNLOCK_WS_UNSAFE (Process);
  551. CapturedRegionSize = 1 + (PCHAR)EndingAddress - (PCHAR)StartingAddress;
  552. //
  553. // Update the virtual size in the process header.
  554. //
  555. Process->VirtualSize -= CapturedRegionSize;
  556. #if defined(_MIALT4K_)
  557. if (Wow64Process != NULL) {
  558. goto FreeAltPages;
  559. }
  560. #endif
  561. Process->CommitCharge -= CommitReduction;
  562. UNLOCK_ADDRESS_SPACE (Process);
  563. if (CommitReduction != 0) {
  564. MI_INCREMENT_TOTAL_PROCESS_COMMIT (0 - CommitReduction);
  565. ASSERT (Vad == NULL);
  566. PsReturnProcessPageFileQuota (Process, CommitReduction);
  567. MiReturnCommitment (CommitReduction);
  568. if (Process->JobStatus & PS_JOB_STATUS_REPORT_COMMIT_CHANGES) {
  569. PsChangeJobMemoryUsage (PS_JOB_STATUS_REPORT_COMMIT_CHANGES, -(SSIZE_T)CommitReduction);
  570. }
  571. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_NTFREEVM1, CommitReduction);
  572. }
  573. else if (Vad != NULL) {
  574. ExFreePool (Vad);
  575. }
  576. if (Attached == TRUE) {
  577. KeUnstackDetachProcess (&ApcState);
  578. }
  579. if (ProcessHandle != NtCurrentProcess()) {
  580. ObDereferenceObject (Process);
  581. }
  582. //
  583. // Establish an exception handler and write the size and base
  584. // address.
  585. //
  586. try {
  587. *RegionSize = CapturedRegionSize;
  588. *BaseAddress = StartingAddress;
  589. } except (EXCEPTION_EXECUTE_HANDLER) {
  590. //
  591. // An exception occurred, don't take any action (just handle
  592. // the exception and return success.
  593. }
  594. return STATUS_SUCCESS;
  595. }
  596. //
  597. // **************************************************************
  598. //
  599. // MEM_DECOMMIT was specified.
  600. //
  601. // **************************************************************
  602. //
  603. if (Vad->u.VadFlags.UserPhysicalPages == 1) {
  604. //
  605. // Pages from a physical VAD must be released via
  606. // NtFreeUserPhysicalPages, not this routine.
  607. //
  608. Status = STATUS_MEMORY_NOT_ALLOCATED;
  609. goto ErrorReturn;
  610. }
  611. if (Vad->u.VadFlags.LargePages == 1) {
  612. //
  613. // Pages from a large page VAD must be released -
  614. // they cannot be merely decommitted.
  615. //
  616. Status = STATUS_MEMORY_NOT_ALLOCATED;
  617. goto ErrorReturn;
  618. }
  619. //
  620. // Check to ensure the complete range of pages is already committed.
  621. //
  622. if (CapturedRegionSize == 0) {
  623. if (MI_VA_TO_VPN (CapturedBase) != Vad->StartingVpn) {
  624. Status = STATUS_FREE_VM_NOT_AT_BASE;
  625. goto ErrorReturn;
  626. }
  627. EndingAddress = MI_VPN_TO_VA_ENDING (Vad->EndingVpn);
  628. #if defined(_MIALT4K_)
  629. StartingAddress4k = StartingAddress;
  630. EndingAddress4k = EndingAddress;
  631. #endif
  632. }
  633. #if 0
  634. if (FreeType & MEM_CHECK_COMMIT_STATE) {
  635. if ( !MiIsEntireRangeCommitted(StartingAddress,
  636. EndingAddress,
  637. Vad,
  638. Process)) {
  639. //
  640. // The entire range to be decommitted is not committed,
  641. // return an error.
  642. //
  643. Status = STATUS_UNABLE_TO_DECOMMIT_VM;
  644. goto ErrorReturn;
  645. }
  646. }
  647. #endif //0
  648. //
  649. // The address range is entirely committed, decommit it now.
  650. //
  651. //
  652. // Calculate the initial quotas and commit charges for this VAD.
  653. //
  654. StartingPte = MiGetPteAddress (StartingAddress);
  655. EndingPte = MiGetPteAddress (EndingAddress);
  656. CommitReduction = 1 + EndingPte - StartingPte;
  657. LOCK_WS_UNSAFE (Process);
  658. //
  659. // Check to see if the entire range can be decommitted by
  660. // just updating the virtual address descriptor.
  661. //
  662. CommitReduction -= MiDecommitPages (StartingAddress,
  663. EndingPte,
  664. Process,
  665. Vad);
  666. UNLOCK_WS_UNSAFE (Process);
  667. //
  668. // Adjust the quota charges.
  669. //
  670. ASSERT ((LONG)CommitReduction >= 0);
  671. Vad->u.VadFlags.CommitCharge -= CommitReduction;
  672. ASSERT ((LONG)Vad->u.VadFlags.CommitCharge >= 0);
  673. Vad = NULL;
  674. #if defined(_MIALT4K_)
  675. FreeAltPages:
  676. if (Wow64Process != NULL) {
  677. if (FreeType & MEM_RELEASE) {
  678. MiReleaseFor4kPage (StartingAddress4k,
  679. EndingAddress4k,
  680. Process);
  681. }
  682. else {
  683. MiDecommitFor4kPage (StartingAddress4k,
  684. EndingAddress4k,
  685. Process);
  686. }
  687. StartingAddress = StartingAddress4k;
  688. EndingAddress = EndingAddress4k;
  689. }
  690. #endif
  691. Process->CommitCharge -= CommitReduction;
  692. UNLOCK_ADDRESS_SPACE (Process);
  693. if (CommitReduction != 0) {
  694. MI_INCREMENT_TOTAL_PROCESS_COMMIT (0 - CommitReduction);
  695. PsReturnProcessPageFileQuota (Process, CommitReduction);
  696. MiReturnCommitment (CommitReduction);
  697. if (Process->JobStatus & PS_JOB_STATUS_REPORT_COMMIT_CHANGES) {
  698. PsChangeJobMemoryUsage (PS_JOB_STATUS_REPORT_COMMIT_CHANGES, -(SSIZE_T)CommitReduction);
  699. }
  700. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_NTFREEVM2, CommitReduction);
  701. }
  702. else if (Vad != NULL) {
  703. ExFreePool (Vad);
  704. }
  705. if (Attached == TRUE) {
  706. KeUnstackDetachProcess (&ApcState);
  707. }
  708. if (ProcessHandle != NtCurrentProcess()) {
  709. ObDereferenceObject (Process);
  710. }
  711. //
  712. // Establish an exception handler and write the size and base address.
  713. //
  714. try {
  715. *RegionSize = 1 + (PCHAR)EndingAddress - (PCHAR)StartingAddress;
  716. *BaseAddress = StartingAddress;
  717. } except (EXCEPTION_EXECUTE_HANDLER) {
  718. NOTHING;
  719. }
  720. return STATUS_SUCCESS;
  721. ErrorReturn:
  722. UNLOCK_ADDRESS_SPACE (Process);
  723. ErrorReturn2:
  724. if (Attached == TRUE) {
  725. KeUnstackDetachProcess (&ApcState);
  726. }
  727. if (ProcessHandle != NtCurrentProcess()) {
  728. ObDereferenceObject (Process);
  729. }
  730. return Status;
  731. }
  732. ULONG
  733. MiIsEntireRangeCommitted (
  734. IN PVOID StartingAddress,
  735. IN PVOID EndingAddress,
  736. IN PMMVAD Vad,
  737. IN PEPROCESS Process
  738. )
  739. /*++
  740. Routine Description:
  741. This routine examines the range of pages from the starting address
  742. up to and including the ending address and returns TRUE if every
  743. page in the range is committed, FALSE otherwise.
  744. Arguments:
  745. StartingAddress - Supplies the starting address of the range.
  746. EndingAddress - Supplies the ending address of the range.
  747. Vad - Supplies the virtual address descriptor which describes the range.
  748. Process - Supplies the current process.
  749. Return Value:
  750. TRUE if the entire range is committed.
  751. FALSE if any page within the range is not committed.
  752. Environment:
  753. Kernel mode, APCs disabled, WorkingSetMutex and AddressCreation mutexes
  754. held.
  755. --*/
  756. {
  757. PMMPTE PointerPte;
  758. PMMPTE LastPte;
  759. PMMPTE PointerPde;
  760. PMMPTE PointerPpe;
  761. PMMPTE PointerPxe;
  762. ULONG FirstTime;
  763. ULONG Waited;
  764. PVOID Va;
  765. PAGED_CODE();
  766. FirstTime = TRUE;
  767. PointerPde = MiGetPdeAddress (StartingAddress);
  768. PointerPte = MiGetPteAddress (StartingAddress);
  769. LastPte = MiGetPteAddress (EndingAddress);
  770. //
  771. // Set the Va to the starting address + 8, this solves problems
  772. // associated with address 0 (NULL) being used as a valid virtual
  773. // address and NULL in the VAD commitment field indicating no pages
  774. // are committed.
  775. //
  776. Va = (PVOID)((PCHAR)StartingAddress + 8);
  777. while (PointerPte <= LastPte) {
  778. if (MiIsPteOnPdeBoundary(PointerPte) || (FirstTime)) {
  779. //
  780. // This may be a PXE/PPE/PDE boundary, check to see if all the
  781. // PXE/PPE/PDE pages exist.
  782. //
  783. FirstTime = FALSE;
  784. PointerPde = MiGetPteAddress (PointerPte);
  785. PointerPpe = MiGetPteAddress (PointerPde);
  786. PointerPxe = MiGetPteAddress (PointerPpe);
  787. do {
  788. #if (_MI_PAGING_LEVELS >= 4)
  789. retry:
  790. #endif
  791. while (!MiDoesPxeExistAndMakeValid (PointerPxe, Process, MM_NOIRQL, &Waited)) {
  792. //
  793. // No PPE exists for the starting address, check the VAD
  794. // to see if the pages are committed.
  795. //
  796. PointerPxe += 1;
  797. PointerPpe = MiGetVirtualAddressMappedByPte (PointerPxe);
  798. PointerPde = MiGetVirtualAddressMappedByPte (PointerPpe);
  799. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  800. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  801. if (PointerPte > LastPte) {
  802. //
  803. // Make sure the entire range is committed.
  804. //
  805. if (Vad->u.VadFlags.MemCommit == 0) {
  806. //
  807. // The entire range to be decommitted is not
  808. // committed, return an error.
  809. //
  810. return FALSE;
  811. }
  812. return TRUE;
  813. }
  814. //
  815. // Make sure the range thus far is committed.
  816. //
  817. if (Vad->u.VadFlags.MemCommit == 0) {
  818. //
  819. // The entire range to be decommitted is not committed,
  820. // return an error.
  821. //
  822. return FALSE;
  823. }
  824. }
  825. while (!MiDoesPpeExistAndMakeValid (PointerPpe, Process, MM_NOIRQL, &Waited)) {
  826. //
  827. // No PDE exists for the starting address, check the VAD
  828. // to see if the pages are committed.
  829. //
  830. PointerPpe += 1;
  831. PointerPde = MiGetVirtualAddressMappedByPte (PointerPpe);
  832. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  833. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  834. if (PointerPte > LastPte) {
  835. //
  836. // Make sure the entire range is committed.
  837. //
  838. if (Vad->u.VadFlags.MemCommit == 0) {
  839. //
  840. // The entire range to be decommitted is not
  841. // committed, return an error.
  842. //
  843. return FALSE;
  844. }
  845. return TRUE;
  846. }
  847. //
  848. // Make sure the range thus far is committed.
  849. //
  850. if (Vad->u.VadFlags.MemCommit == 0) {
  851. //
  852. // The entire range to be decommitted is not committed,
  853. // return an error.
  854. //
  855. return FALSE;
  856. }
  857. #if (_MI_PAGING_LEVELS >= 4)
  858. if (MiIsPteOnPdeBoundary (PointerPpe)) {
  859. PointerPxe = MiGetPteAddress (PointerPpe);
  860. goto retry;
  861. }
  862. #endif
  863. }
  864. Waited = 0;
  865. while (!MiDoesPdeExistAndMakeValid (PointerPde, Process, MM_NOIRQL, &Waited)) {
  866. //
  867. // No PDE exists for the starting address, check the VAD
  868. // to see if the pages are committed.
  869. //
  870. PointerPde += 1;
  871. PointerPpe = MiGetPteAddress (PointerPde);
  872. PointerPxe = MiGetPdeAddress (PointerPde);
  873. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  874. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  875. if (PointerPte > LastPte) {
  876. //
  877. // Make sure the entire range is committed.
  878. //
  879. if (Vad->u.VadFlags.MemCommit == 0) {
  880. //
  881. // The entire range to be decommitted is not committed,
  882. // return an error.
  883. //
  884. return FALSE;
  885. }
  886. return TRUE;
  887. }
  888. //
  889. // Make sure the range thus far is committed.
  890. //
  891. if (Vad->u.VadFlags.MemCommit == 0) {
  892. //
  893. // The entire range to be decommitted is not committed,
  894. // return an error.
  895. //
  896. return FALSE;
  897. }
  898. #if (_MI_PAGING_LEVELS >= 3)
  899. if (MiIsPteOnPdeBoundary (PointerPde)) {
  900. PointerPpe = MiGetPteAddress (PointerPde);
  901. #if (_MI_PAGING_LEVELS >= 4)
  902. if (MiIsPteOnPpeBoundary (PointerPde)) {
  903. PointerPxe = MiGetPdeAddress (PointerPde);
  904. Waited = 1;
  905. break;
  906. }
  907. #endif
  908. Waited = 1;
  909. break;
  910. }
  911. #endif
  912. }
  913. } while (Waited != 0);
  914. }
  915. //
  916. // The page table page exists, check each PTE for commitment.
  917. //
  918. if (PointerPte->u.Long == 0) {
  919. //
  920. // This page has not been committed, check the VAD.
  921. //
  922. if (Vad->u.VadFlags.MemCommit == 0) {
  923. //
  924. // The entire range to be decommitted is not committed,
  925. // return an error.
  926. //
  927. return FALSE;
  928. }
  929. }
  930. else {
  931. //
  932. // Has this page been explicitly decommitted?
  933. //
  934. if (MiIsPteDecommittedPage (PointerPte)) {
  935. //
  936. // This page has been explicitly decommitted, return an error.
  937. //
  938. return FALSE;
  939. }
  940. }
  941. PointerPte += 1;
  942. Va = (PVOID)((PCHAR)(Va) + PAGE_SIZE);
  943. }
  944. return TRUE;
  945. }
  946. ULONG
  947. MiDecommitPages (
  948. IN PVOID StartingAddress,
  949. IN PMMPTE EndingPte,
  950. IN PEPROCESS Process,
  951. IN PMMVAD_SHORT Vad
  952. )
  953. /*++
  954. Routine Description:
  955. This routine decommits the specified range of pages.
  956. Arguments:
  957. StartingAddress - Supplies the starting address of the range.
  958. EndingPte - Supplies the ending PTE of the range.
  959. Process - Supplies the current process.
  960. Vad - Supplies the virtual address descriptor which describes the range.
  961. Return Value:
  962. Value to reduce commitment by for the VAD.
  963. Environment:
  964. Kernel mode, APCs disabled, WorkingSetMutex and AddressCreation mutexes
  965. held.
  966. --*/
  967. {
  968. PMMPTE PointerPde;
  969. PMMPTE PointerPte;
  970. PVOID Va;
  971. ULONG CommitReduction;
  972. PMMPTE CommitLimitPte;
  973. KIRQL OldIrql;
  974. PMMPTE ValidPteList[MM_VALID_PTE_SIZE];
  975. ULONG count;
  976. WSLE_NUMBER WorkingSetIndex;
  977. PMMPFN Pfn1;
  978. PMMPFN Pfn2;
  979. WSLE_NUMBER Entry;
  980. MMWSLENTRY Locked;
  981. MMPTE PteContents;
  982. PFN_NUMBER PageTableFrameIndex;
  983. PVOID UsedPageTableHandle;
  984. count = 0;
  985. CommitReduction = 0;
  986. if (Vad->u.VadFlags.MemCommit) {
  987. CommitLimitPte = MiGetPteAddress (MI_VPN_TO_VA (Vad->EndingVpn));
  988. }
  989. else {
  990. CommitLimitPte = NULL;
  991. }
  992. //
  993. // Decommit each page by setting the PTE to be explicitly
  994. // decommitted. The PTEs cannot be deleted all at once as
  995. // this would set the PTEs to zero which would auto-evaluate
  996. // as committed if referenced by another thread when a page
  997. // table page is being in-paged.
  998. //
  999. PointerPde = MiGetPdeAddress (StartingAddress);
  1000. PointerPte = MiGetPteAddress (StartingAddress);
  1001. Va = StartingAddress;
  1002. //
  1003. // Loop through all the PDEs which map this region and ensure that
  1004. // they exist. If they don't exist create them by touching a
  1005. // PTE mapped by the PDE.
  1006. //
  1007. MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
  1008. while (PointerPte <= EndingPte) {
  1009. if (MiIsPteOnPdeBoundary (PointerPte)) {
  1010. PointerPde = MiGetPdeAddress (Va);
  1011. if (count != 0) {
  1012. MiProcessValidPteList (&ValidPteList[0], count);
  1013. count = 0;
  1014. }
  1015. MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
  1016. }
  1017. //
  1018. // The working set lock is held. No PTEs can go from
  1019. // invalid to valid or valid to invalid. Transition
  1020. // PTEs can go from transition to pagefile.
  1021. //
  1022. PteContents = *PointerPte;
  1023. if (PteContents.u.Long != 0) {
  1024. if (PointerPte->u.Long == MmDecommittedPte.u.Long) {
  1025. //
  1026. // This PTE is already decommitted.
  1027. //
  1028. CommitReduction += 1;
  1029. }
  1030. else {
  1031. Process->NumberOfPrivatePages -= 1;
  1032. if (PteContents.u.Hard.Valid == 1) {
  1033. //
  1034. // Make sure this is not a forked PTE.
  1035. //
  1036. Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
  1037. if (Pfn1->u3.e1.PrototypePte) {
  1038. LOCK_PFN (OldIrql);
  1039. MiDeletePte (PointerPte,
  1040. Va,
  1041. FALSE,
  1042. Process,
  1043. NULL,
  1044. NULL,
  1045. OldIrql);
  1046. UNLOCK_PFN (OldIrql);
  1047. Process->NumberOfPrivatePages += 1;
  1048. MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
  1049. }
  1050. else {
  1051. //
  1052. // PTE is valid, process later when PFN lock is held.
  1053. //
  1054. if (count == MM_VALID_PTE_SIZE) {
  1055. MiProcessValidPteList (&ValidPteList[0], count);
  1056. count = 0;
  1057. }
  1058. ValidPteList[count] = PointerPte;
  1059. count += 1;
  1060. //
  1061. // Remove address from working set list.
  1062. //
  1063. WorkingSetIndex = Pfn1->u1.WsIndex;
  1064. ASSERT (PAGE_ALIGN(MmWsle[WorkingSetIndex].u1.Long) ==
  1065. Va);
  1066. //
  1067. // Check to see if this entry is locked in the
  1068. // working set or locked in memory.
  1069. //
  1070. Locked = MmWsle[WorkingSetIndex].u1.e1;
  1071. MiRemoveWsle (WorkingSetIndex, MmWorkingSetList);
  1072. //
  1073. // Add this entry to the list of free working set
  1074. // entries and adjust the working set count.
  1075. //
  1076. MiReleaseWsle (WorkingSetIndex, &Process->Vm);
  1077. if ((Locked.LockedInWs == 1) || (Locked.LockedInMemory == 1)) {
  1078. //
  1079. // This entry is locked.
  1080. //
  1081. MmWorkingSetList->FirstDynamic -= 1;
  1082. if (WorkingSetIndex != MmWorkingSetList->FirstDynamic) {
  1083. Entry = MmWorkingSetList->FirstDynamic;
  1084. ASSERT (MmWsle[Entry].u1.e1.Valid);
  1085. MiSwapWslEntries (Entry,
  1086. WorkingSetIndex,
  1087. &Process->Vm,
  1088. FALSE);
  1089. }
  1090. }
  1091. MI_SET_PTE_IN_WORKING_SET (PointerPte, 0);
  1092. }
  1093. }
  1094. else if (PteContents.u.Soft.Prototype) {
  1095. //
  1096. // This is a forked PTE, just delete it.
  1097. //
  1098. LOCK_PFN (OldIrql);
  1099. MiDeletePte (PointerPte,
  1100. Va,
  1101. FALSE,
  1102. Process,
  1103. NULL,
  1104. NULL,
  1105. OldIrql);
  1106. UNLOCK_PFN (OldIrql);
  1107. Process->NumberOfPrivatePages += 1;
  1108. MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
  1109. }
  1110. else if (PteContents.u.Soft.Transition == 1) {
  1111. //
  1112. // Transition PTE, get the PFN database lock
  1113. // and reprocess this one.
  1114. //
  1115. LOCK_PFN (OldIrql);
  1116. PteContents = *PointerPte;
  1117. if (PteContents.u.Soft.Transition == 1) {
  1118. //
  1119. // PTE is still in transition, delete it.
  1120. //
  1121. Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);
  1122. MI_SET_PFN_DELETED (Pfn1);
  1123. PageTableFrameIndex = Pfn1->u4.PteFrame;
  1124. Pfn2 = MI_PFN_ELEMENT (PageTableFrameIndex);
  1125. MiDecrementShareCountInline (Pfn2, PageTableFrameIndex);
  1126. //
  1127. // Check the reference count for the page, if the
  1128. // reference count is zero, move the page to the
  1129. // free list, if the reference count is not zero,
  1130. // ignore this page. When the reference count
  1131. // goes to zero, it will be placed on the free list.
  1132. //
  1133. if (Pfn1->u3.e2.ReferenceCount == 0) {
  1134. MiUnlinkPageFromList (Pfn1);
  1135. MiReleasePageFileSpace (Pfn1->OriginalPte);
  1136. MiInsertPageInFreeList (MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE(&PteContents));
  1137. }
  1138. }
  1139. else {
  1140. //
  1141. // Page MUST be in page file format!
  1142. //
  1143. ASSERT (PteContents.u.Soft.Valid == 0);
  1144. ASSERT (PteContents.u.Soft.Prototype == 0);
  1145. ASSERT (PteContents.u.Soft.PageFileHigh != 0);
  1146. MiReleasePageFileSpace (PteContents);
  1147. }
  1148. MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
  1149. UNLOCK_PFN (OldIrql);
  1150. }
  1151. else {
  1152. //
  1153. // Must be demand zero or paging file format.
  1154. //
  1155. if (PteContents.u.Soft.PageFileHigh != 0) {
  1156. LOCK_PFN (OldIrql);
  1157. MiReleasePageFileSpace (PteContents);
  1158. UNLOCK_PFN (OldIrql);
  1159. }
  1160. else {
  1161. //
  1162. // Don't subtract out the private page count for
  1163. // a demand zero page.
  1164. //
  1165. Process->NumberOfPrivatePages += 1;
  1166. }
  1167. MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
  1168. }
  1169. }
  1170. }
  1171. else {
  1172. //
  1173. // The PTE is already zero.
  1174. //
  1175. //
  1176. // Increment the count of non-zero page table entries for this
  1177. // page table and the number of private pages for the process.
  1178. //
  1179. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (Va);
  1180. MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  1181. if (PointerPte > CommitLimitPte) {
  1182. //
  1183. // PTE is not committed.
  1184. //
  1185. CommitReduction += 1;
  1186. }
  1187. MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
  1188. }
  1189. PointerPte += 1;
  1190. Va = (PVOID)((PCHAR)Va + PAGE_SIZE);
  1191. }
  1192. if (count != 0) {
  1193. MiProcessValidPteList (&ValidPteList[0], count);
  1194. }
  1195. return CommitReduction;
  1196. }
  1197. VOID
  1198. MiProcessValidPteList (
  1199. IN PMMPTE *ValidPteList,
  1200. IN ULONG Count
  1201. )
  1202. /*++
  1203. Routine Description:
  1204. This routine flushes the specified range of valid PTEs.
  1205. Arguments:
  1206. ValidPteList - Supplies a pointer to an array of PTEs to flush.
  1207. Count - Supplies the count of the number of elements in the array.
  1208. Return Value:
  1209. none.
  1210. Environment:
  1211. Kernel mode, APCs disabled, WorkingSetMutex and AddressCreation mutexes
  1212. held.
  1213. --*/
  1214. {
  1215. ULONG i;
  1216. MMPTE_FLUSH_LIST PteFlushList;
  1217. MMPTE PteContents;
  1218. PMMPFN Pfn1;
  1219. PMMPFN Pfn2;
  1220. PFN_NUMBER PageFrameIndex;
  1221. PFN_NUMBER PageTableFrameIndex;
  1222. KIRQL OldIrql;
  1223. i = 0;
  1224. PteFlushList.Count = Count;
  1225. if (Count < MM_MAXIMUM_FLUSH_COUNT) {
  1226. do {
  1227. PteFlushList.FlushVa[i] =
  1228. MiGetVirtualAddressMappedByPte (ValidPteList[i]);
  1229. i += 1;
  1230. } while (i != Count);
  1231. i = 0;
  1232. }
  1233. LOCK_PFN (OldIrql);
  1234. do {
  1235. PteContents = *ValidPteList[i];
  1236. ASSERT (PteContents.u.Hard.Valid == 1);
  1237. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE(&PteContents);
  1238. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1239. //
  1240. // Decrement the share and valid counts of the page table
  1241. // page which maps this PTE.
  1242. //
  1243. PageTableFrameIndex = Pfn1->u4.PteFrame;
  1244. Pfn2 = MI_PFN_ELEMENT (PageTableFrameIndex);
  1245. MiDecrementShareCountInline (Pfn2, PageTableFrameIndex);
  1246. MI_SET_PFN_DELETED (Pfn1);
  1247. //
  1248. // Decrement the share count for the physical page. As the page
  1249. // is private it will be put on the free list.
  1250. //
  1251. MiDecrementShareCount (Pfn1, PageFrameIndex);
  1252. *ValidPteList[i] = MmDecommittedPte;
  1253. i += 1;
  1254. } while (i != Count);
  1255. MiFlushPteList (&PteFlushList, FALSE);
  1256. UNLOCK_PFN (OldIrql);
  1257. return;
  1258. }