Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4360 lines
117 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. wslist.c
  5. Abstract:
  6. This module contains routines which operate on the working
  7. set list structure.
  8. Author:
  9. Lou Perazzoli (loup) 10-Apr-1989
  10. Landy Wang (landyw) 02-Jun-1997
  11. Revision History:
  12. --*/
  13. #include "mi.h"
  14. #pragma alloc_text(INIT, MiInitializeSessionWsSupport)
  15. #pragma alloc_text(PAGE, MmAssignProcessToJob)
  16. #pragma alloc_text(PAGE, MiInitializeWorkingSetList)
  17. #define MM_SYSTEM_CACHE_THRESHOLD ((1024*1024) / PAGE_SIZE)
  18. extern WSLE_NUMBER MmMaximumWorkingSetSize;
  19. ULONG MmSystemCodePage;
  20. ULONG MmSystemCachePage;
  21. ULONG MmPagedPoolPage;
  22. ULONG MmSystemDriverPage;
  23. extern LOGICAL MiReplacing;
  24. #define MM_RETRY_COUNT 2
  25. extern PFN_NUMBER MmTransitionSharedPages;
  26. PFN_NUMBER MmTransitionSharedPagesPeak;
  27. extern LOGICAL MiTrimRemovalPagesOnly;
  28. VOID
  29. MiDoReplacement (
  30. IN PMMSUPPORT WsInfo,
  31. IN LOGICAL MustReplace
  32. );
  33. VOID
  34. MiReplaceWorkingSetEntry (
  35. IN PMMSUPPORT WsInfo,
  36. IN LOGICAL MustReplace
  37. );
  38. VOID
  39. MiCheckWsleHash (
  40. IN PMMWSL WorkingSetList
  41. );
  42. VOID
  43. MiEliminateWorkingSetEntry (
  44. IN WSLE_NUMBER WorkingSetIndex,
  45. IN PMMPTE PointerPte,
  46. IN PMMPFN Pfn,
  47. IN PMMWSLE Wsle
  48. );
  49. ULONG
  50. MiAddWorkingSetPage (
  51. IN PMMSUPPORT WsInfo
  52. );
  53. VOID
  54. MiRemoveWorkingSetPages (
  55. IN PMMWSL WorkingSetList,
  56. IN PMMSUPPORT WsInfo
  57. );
  58. VOID
  59. MiCheckNullIndex (
  60. IN PMMWSL WorkingSetList
  61. );
  62. VOID
  63. MiDumpWsleInCacheBlock (
  64. IN PMMPTE CachePte
  65. );
  66. ULONG
  67. MiDumpPteInCacheBlock (
  68. IN PMMPTE PointerPte
  69. );
  70. #ifdef ALLOC_PRAGMA
  71. #pragma alloc_text(PAGELK, MmAdjustWorkingSetSize)
  72. #pragma alloc_text(PAGELK, MiSessionInitializeWorkingSetList)
  73. #endif
  74. ULONG MiWsleFailures;
  75. WSLE_NUMBER
  76. MiLocateAndReserveWsle (
  77. IN PMMSUPPORT WsInfo
  78. )
  79. /*++
  80. Routine Description:
  81. This function examines the Working Set List for the current
  82. process and locates an entry to contain a new page. If the
  83. working set is not currently at its quota, the new page is
  84. added without removing a page, if the working set is at its
  85. quota a page is removed from the working set and the new
  86. page added in its place.
  87. Arguments:
  88. None.
  89. Return Value:
  90. Returns the working set index which is now reserved for the
  91. next page to be added.
  92. Environment:
  93. Kernel mode, APCs disabled, working set lock. PFN lock NOT held.
  94. --*/
  95. {
  96. WSLE_NUMBER WorkingSetIndex;
  97. PMMWSL WorkingSetList;
  98. PMMWSLE Wsle;
  99. WorkingSetList = WsInfo->VmWorkingSetList;
  100. Wsle = WorkingSetList->Wsle;
  101. //
  102. // Update page fault counts.
  103. //
  104. WsInfo->PageFaultCount += 1;
  105. MmInfoCounters.PageFaultCount += 1;
  106. retry:
  107. //
  108. // Determine if a page should be removed from the working set to make
  109. // room for the new page. If so, remove it.
  110. //
  111. MiDoReplacement (WsInfo, FALSE);
  112. if (WorkingSetList->FirstFree == WSLE_NULL_INDEX) {
  113. //
  114. // Add more pages to the working set list structure.
  115. //
  116. if (MiAddWorkingSetPage (WsInfo) == FALSE) {
  117. //
  118. // No page was added to the working set list structure.
  119. // We must replace a page within this working set.
  120. //
  121. MiDoReplacement (WsInfo, TRUE);
  122. if (WorkingSetList->FirstFree == WSLE_NULL_INDEX) {
  123. MiWsleFailures += 1;
  124. KeDelayExecutionThread (KernelMode, FALSE, (PLARGE_INTEGER)&MmShortTime);
  125. goto retry;
  126. }
  127. }
  128. }
  129. //
  130. // Get the working set entry from the free list.
  131. //
  132. ASSERT (WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle);
  133. ASSERT (WorkingSetList->FirstFree >= WorkingSetList->FirstDynamic);
  134. WorkingSetIndex = WorkingSetList->FirstFree;
  135. WorkingSetList->FirstFree = (WSLE_NUMBER)(Wsle[WorkingSetIndex].u1.Long >> MM_FREE_WSLE_SHIFT);
  136. ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
  137. (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
  138. WsInfo->WorkingSetSize += 1;
  139. if (WsInfo->WorkingSetSize > WsInfo->MinimumWorkingSetSize) {
  140. MmPagesAboveWsMinimum += 1;
  141. }
  142. if (WsInfo->WorkingSetSize > WsInfo->PeakWorkingSetSize) {
  143. WsInfo->PeakWorkingSetSize = WsInfo->WorkingSetSize;
  144. }
  145. if (WsInfo == &MmSystemCacheWs) {
  146. if (WsInfo->WorkingSetSize + MmTransitionSharedPages > MmTransitionSharedPagesPeak) {
  147. MmTransitionSharedPagesPeak = WsInfo->WorkingSetSize + MmTransitionSharedPages;
  148. }
  149. }
  150. if (WorkingSetIndex > WorkingSetList->LastEntry) {
  151. WorkingSetList->LastEntry = WorkingSetIndex;
  152. }
  153. //
  154. // The returned entry is guaranteed to be available at this point.
  155. //
  156. ASSERT (Wsle[WorkingSetIndex].u1.e1.Valid == 0);
  157. return WorkingSetIndex;
  158. }
  159. VOID
  160. MiDoReplacement (
  161. IN PMMSUPPORT WsInfo,
  162. IN LOGICAL MustReplace
  163. )
  164. /*++
  165. Routine Description:
  166. This function determines whether the working set should be
  167. grown or if a page should be replaced. Replacement is
  168. done here if deemed necessary.
  169. Arguments:
  170. WsInfo - Supplies the working set information structure to replace within.
  171. MustReplace - Supplies TRUE if replacement must succeed.
  172. Return Value:
  173. None.
  174. Environment:
  175. Kernel mode, APCs disabled, working set lock. PFN lock NOT held.
  176. --*/
  177. {
  178. WSLE_NUMBER PagesTrimmed;
  179. ULONG MemoryMaker;
  180. PMMWSL WorkingSetList;
  181. WSLE_NUMBER CurrentSize;
  182. LARGE_INTEGER CurrentTime;
  183. PFN_NUMBER Dummy1;
  184. PFN_NUMBER Dummy2;
  185. WSLE_NUMBER Trim;
  186. ULONG TrimAge;
  187. ULONG GrowthSinceLastEstimate;
  188. WorkingSetList = WsInfo->VmWorkingSetList;
  189. GrowthSinceLastEstimate = 1;
  190. PERFINFO_BIGFOOT_REPLACEMENT_CLAIMS(WorkingSetList, WsInfo);
  191. PagesTrimmed = 0;
  192. //
  193. // Determine the number of pages that need to be available to
  194. // grow the working set and how much the quota should be
  195. // boosted if the working set grows over it.
  196. //
  197. // If below the Minimum use the defaults.
  198. //
  199. recheck:
  200. if (WsInfo->WorkingSetSize >= WsInfo->MinimumWorkingSetSize) {
  201. if (WsInfo->Flags.AllowWorkingSetAdjustment == MM_FORCE_TRIM) {
  202. //
  203. // The working set manager cannot attach to this process
  204. // to trim it. Force a trim now and update the working
  205. // set manager's fields properly to indicate a trim occurred.
  206. //
  207. Trim = WsInfo->Claim >>
  208. ((WsInfo->Flags.MemoryPriority == MEMORY_PRIORITY_FOREGROUND)
  209. ? MI_FOREGROUND_CLAIM_AVAILABLE_SHIFT
  210. : MI_BACKGROUND_CLAIM_AVAILABLE_SHIFT);
  211. if (MmAvailablePages < MM_HIGH_LIMIT + 64) {
  212. if (WsInfo->WorkingSetSize > WsInfo->MinimumWorkingSetSize) {
  213. Trim = (WsInfo->WorkingSetSize - WsInfo->MinimumWorkingSetSize) >> 2;
  214. }
  215. TrimAge = MI_PASS4_TRIM_AGE;
  216. }
  217. else {
  218. TrimAge = MI_PASS0_TRIM_AGE;
  219. }
  220. PagesTrimmed += MiTrimWorkingSet (Trim, WsInfo, TrimAge);
  221. MiAgeAndEstimateAvailableInWorkingSet (WsInfo,
  222. TRUE,
  223. NULL,
  224. &Dummy1,
  225. &Dummy2);
  226. KeQuerySystemTime (&CurrentTime);
  227. WsInfo->LastTrimTime = CurrentTime;
  228. WsInfo->Flags.AllowWorkingSetAdjustment = TRUE;
  229. goto recheck;
  230. }
  231. CurrentSize = WsInfo->WorkingSetSize;
  232. ASSERT (CurrentSize <= (WorkingSetList->LastInitializedWsle + 1));
  233. if ((WsInfo->Flags.WorkingSetHard) &&
  234. (CurrentSize >= WsInfo->MaximumWorkingSetSize)) {
  235. //
  236. // This is an enforced working set maximum triggering a replace.
  237. //
  238. MiReplaceWorkingSetEntry (WsInfo, MustReplace);
  239. return;
  240. }
  241. //
  242. // Don't grow if :
  243. // - we're over the max
  244. // - there aren't any pages to take
  245. // - or if we are growing too much in this time interval
  246. // and there isn't much memory available
  247. //
  248. MemoryMaker = PsGetCurrentThread()->MemoryMaker;
  249. if (((CurrentSize > MM_MAXIMUM_WORKING_SET) && (MemoryMaker == 0)) ||
  250. (MmAvailablePages == 0) ||
  251. (MustReplace == TRUE) ||
  252. ((MmAvailablePages < 10000) &&
  253. (MI_WS_GROWING_TOO_FAST(WsInfo)) &&
  254. (MemoryMaker == 0))) {
  255. //
  256. // Can't grow this one.
  257. //
  258. MiReplacing = TRUE;
  259. if (MemoryMaker == 0) {
  260. MiReplaceWorkingSetEntry (WsInfo, MustReplace);
  261. //
  262. // Set the must trim flag because this could be a realtime
  263. // thread where the fault straddles a page boundary. If
  264. // it's realtime, the balance set manager will never get to
  265. // run and the thread will endlessly replace one WSL entry
  266. // with the other half of the straddler. Setting this flag
  267. // guarantees the next fault will guarantee a forced trim
  268. // and allow a reasonable available page threshold trim
  269. // calculation since GrowthSinceLastEstimate will be
  270. // cleared.
  271. //
  272. WsInfo->Flags.AllowWorkingSetAdjustment = MM_FORCE_TRIM;
  273. GrowthSinceLastEstimate = 0;
  274. }
  275. else {
  276. //
  277. // If we've only trimmed a single page, then don't force
  278. // replacement on the next fault. This prevents a single
  279. // instruction causing alternating faults on the referenced
  280. // code & data in a (realtime) thread from looping endlessly.
  281. //
  282. if (PagesTrimmed > 1) {
  283. WsInfo->Flags.AllowWorkingSetAdjustment = MM_FORCE_TRIM;
  284. }
  285. }
  286. }
  287. }
  288. //
  289. // If there isn't enough memory to allow growth, find a good page
  290. // to remove and remove it.
  291. //
  292. WsInfo->GrowthSinceLastEstimate += GrowthSinceLastEstimate;
  293. return;
  294. }
  295. LOGICAL
  296. MmEnforceWorkingSetLimit (
  297. IN PMMSUPPORT WsInfo,
  298. IN LOGICAL Enable
  299. )
  300. /*++
  301. Routine Description:
  302. This function enables hard enforcement of the working set maximum for
  303. the specified WsInfo.
  304. Arguments:
  305. WsInfo - Supplies the working set info pointer.
  306. Enable - Supplies TRUE if enabling hard enforcement, FALSE if not.
  307. Return Value:
  308. The previous state of the working set enforcement.
  309. Environment:
  310. Kernel mode, APCs disabled. The working set lock must NOT be held.
  311. The caller guarantees that the target WsInfo cannot go away.
  312. --*/
  313. {
  314. KIRQL OldIrql;
  315. LOGICAL PreviousWorkingSetEnforcement;
  316. LOCK_EXPANSION (OldIrql);
  317. PreviousWorkingSetEnforcement = WsInfo->Flags.WorkingSetHard;
  318. WsInfo->Flags.WorkingSetHard = Enable;
  319. UNLOCK_EXPANSION (OldIrql);
  320. #if 0
  321. PEPROCESS CurrentProcess;
  322. //
  323. // Get the working set lock and disable APCs.
  324. // The working set could be trimmed at this point if it is excessive.
  325. //
  326. // The working set lock cannot be acquired at this point without updating
  327. // ps in order to avoid deadlock.
  328. //
  329. if (WsInfo == &MmSystemCacheWs) {
  330. LOCK_SYSTEM_WS (OldIrql2, PsGetCurrentThread ());
  331. UNLOCK_SYSTEM_WS (OldIrql2);
  332. }
  333. else if (WsInfo->u.Flags.SessionSpace == 0) {
  334. CurrentProcess = PsGetCurrentProcess ();
  335. LOCK_WS (CurrentProcess);
  336. UNLOCK_WS (CurrentProcess);
  337. }
  338. #endif
  339. return PreviousWorkingSetEnforcement;
  340. }
  341. VOID
  342. MiReplaceWorkingSetEntry (
  343. IN PMMSUPPORT WsInfo,
  344. IN LOGICAL MustReplace
  345. )
  346. /*++
  347. Routine Description:
  348. This function tries to find a good working set entry to replace.
  349. Arguments:
  350. WsInfo - Supplies the working set info pointer.
  351. MustReplace - Supplies TRUE if replacement must succeed.
  352. Return Value:
  353. None
  354. Environment:
  355. Kernel mode, APCs disabled, working set lock. PFN lock NOT held.
  356. --*/
  357. {
  358. WSLE_NUMBER WorkingSetIndex;
  359. WSLE_NUMBER FirstDynamic;
  360. WSLE_NUMBER LastEntry;
  361. PMMWSL WorkingSetList;
  362. PMMWSLE Wsle;
  363. ULONG NumberOfCandidates;
  364. PMMPTE PointerPte;
  365. WSLE_NUMBER TheNextSlot;
  366. WSLE_NUMBER OldestWorkingSetIndex;
  367. LONG OldestAge;
  368. WorkingSetList = WsInfo->VmWorkingSetList;
  369. Wsle = WorkingSetList->Wsle;
  370. //
  371. // Toss a page out of the working set.
  372. //
  373. LastEntry = WorkingSetList->LastEntry;
  374. FirstDynamic = WorkingSetList->FirstDynamic;
  375. WorkingSetIndex = WorkingSetList->NextSlot;
  376. if (WorkingSetIndex > LastEntry || WorkingSetIndex < FirstDynamic) {
  377. WorkingSetIndex = FirstDynamic;
  378. }
  379. TheNextSlot = WorkingSetIndex;
  380. NumberOfCandidates = 0;
  381. OldestWorkingSetIndex = WSLE_NULL_INDEX;
  382. OldestAge = -1;
  383. while (TRUE) {
  384. //
  385. // Keep track of the oldest page along the way in case we
  386. // don't find one that's >= MI_IMMEDIATE_REPLACEMENT_AGE
  387. // before we've looked at MM_WORKING_SET_LIST_SEARCH
  388. // entries.
  389. //
  390. while (Wsle[WorkingSetIndex].u1.e1.Valid == 0) {
  391. WorkingSetIndex += 1;
  392. if (WorkingSetIndex > LastEntry) {
  393. WorkingSetIndex = FirstDynamic;
  394. }
  395. if (WorkingSetIndex == TheNextSlot && MustReplace == FALSE) {
  396. //
  397. // Entire working set list has been searched, increase
  398. // the working set size.
  399. //
  400. WsInfo->GrowthSinceLastEstimate += 1;
  401. return;
  402. }
  403. }
  404. if (OldestWorkingSetIndex == WSLE_NULL_INDEX) {
  405. //
  406. // First time through, so initialize the OldestWorkingSetIndex
  407. // to the first valid WSLE. As we go along, this will be repointed
  408. // at the oldest candidate we come across.
  409. //
  410. OldestWorkingSetIndex = WorkingSetIndex;
  411. OldestAge = -1;
  412. }
  413. PointerPte = MiGetPteAddress(Wsle[WorkingSetIndex].u1.VirtualAddress);
  414. if (MustReplace == TRUE ||
  415. ((MI_GET_ACCESSED_IN_PTE(PointerPte) == 0) &&
  416. (OldestAge < (LONG) MI_GET_WSLE_AGE(PointerPte, &Wsle[WorkingSetIndex])))) {
  417. //
  418. // This one is not used and it's older.
  419. //
  420. OldestAge = MI_GET_WSLE_AGE(PointerPte, &Wsle[WorkingSetIndex]);
  421. OldestWorkingSetIndex = WorkingSetIndex;
  422. }
  423. //
  424. // If it's old enough or we've searched too much then use this entry.
  425. //
  426. if (MustReplace == TRUE ||
  427. OldestAge >= MI_IMMEDIATE_REPLACEMENT_AGE ||
  428. NumberOfCandidates > MM_WORKING_SET_LIST_SEARCH) {
  429. PERFINFO_PAGE_INFO_REPLACEMENT_DECL();
  430. if (OldestWorkingSetIndex != WorkingSetIndex) {
  431. WorkingSetIndex = OldestWorkingSetIndex;
  432. PointerPte = MiGetPteAddress(Wsle[WorkingSetIndex].u1.VirtualAddress);
  433. }
  434. PERFINFO_GET_PAGE_INFO_REPLACEMENT(PointerPte);
  435. if (MiFreeWsle(WorkingSetIndex, WsInfo, PointerPte)) {
  436. PERFINFO_LOG_WS_REPLACEMENT(WsInfo);
  437. //
  438. // This entry was removed.
  439. //
  440. WorkingSetList->NextSlot = WorkingSetIndex + 1;
  441. break;
  442. }
  443. //
  444. // We failed to remove a page, try the next one.
  445. //
  446. // Clear the OldestWorkingSetIndex so that
  447. // it gets set to the next valid entry above like the
  448. // first time around.
  449. //
  450. WorkingSetIndex = OldestWorkingSetIndex + 1;
  451. OldestWorkingSetIndex = WSLE_NULL_INDEX;
  452. }
  453. else {
  454. WorkingSetIndex += 1;
  455. }
  456. if (WorkingSetIndex > LastEntry) {
  457. WorkingSetIndex = FirstDynamic;
  458. }
  459. NumberOfCandidates += 1;
  460. if (WorkingSetIndex == TheNextSlot && MustReplace == FALSE) {
  461. //
  462. // Entire working set list has been searched, increase
  463. // the working set size.
  464. //
  465. WsInfo->GrowthSinceLastEstimate += 1;
  466. break;
  467. }
  468. }
  469. }
  470. ULONG
  471. MiRemovePageFromWorkingSet (
  472. IN PMMPTE PointerPte,
  473. IN PMMPFN Pfn1,
  474. IN PMMSUPPORT WsInfo
  475. )
  476. /*++
  477. Routine Description:
  478. This function removes the page mapped by the specified PTE from
  479. the process's working set list.
  480. Arguments:
  481. PointerPte - Supplies a pointer to the PTE mapping the page to
  482. be removed from the working set list.
  483. Pfn1 - Supplies a pointer to the PFN database element referred to
  484. by the PointerPte.
  485. Return Value:
  486. Returns TRUE if the specified page was locked in the working set,
  487. FALSE otherwise.
  488. Environment:
  489. Kernel mode, APCs disabled, working set mutex held.
  490. --*/
  491. {
  492. WSLE_NUMBER WorkingSetIndex;
  493. PVOID VirtualAddress;
  494. WSLE_NUMBER Entry;
  495. PVOID SwapVa;
  496. MMWSLENTRY Locked;
  497. PMMWSL WorkingSetList;
  498. PMMWSLE Wsle;
  499. KIRQL OldIrql;
  500. WorkingSetList = WsInfo->VmWorkingSetList;
  501. Wsle = WorkingSetList->Wsle;
  502. VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte);
  503. WorkingSetIndex = MiLocateWsle (VirtualAddress,
  504. WorkingSetList,
  505. Pfn1->u1.WsIndex);
  506. ASSERT (WorkingSetIndex != WSLE_NULL_INDEX);
  507. LOCK_PFN (OldIrql);
  508. MiEliminateWorkingSetEntry (WorkingSetIndex,
  509. PointerPte,
  510. Pfn1,
  511. Wsle);
  512. UNLOCK_PFN (OldIrql);
  513. //
  514. // Check to see if this entry is locked in the working set
  515. // or locked in memory.
  516. //
  517. Locked = Wsle[WorkingSetIndex].u1.e1;
  518. MiRemoveWsle (WorkingSetIndex, WorkingSetList);
  519. //
  520. // Add this entry to the list of free working set entries
  521. // and adjust the working set count.
  522. //
  523. MiReleaseWsle ((WSLE_NUMBER)WorkingSetIndex, WsInfo);
  524. if ((Locked.LockedInWs == 1) || (Locked.LockedInMemory == 1)) {
  525. //
  526. // This entry is locked.
  527. //
  528. WorkingSetList->FirstDynamic -= 1;
  529. if (WorkingSetIndex != WorkingSetList->FirstDynamic) {
  530. SwapVa = Wsle[WorkingSetList->FirstDynamic].u1.VirtualAddress;
  531. SwapVa = PAGE_ALIGN (SwapVa);
  532. PointerPte = MiGetPteAddress (SwapVa);
  533. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  534. Entry = MiLocateWsle (SwapVa, WorkingSetList, Pfn1->u1.WsIndex);
  535. MiSwapWslEntries (Entry, WorkingSetIndex, WsInfo);
  536. }
  537. return TRUE;
  538. }
  539. else {
  540. ASSERT (WorkingSetIndex >= WorkingSetList->FirstDynamic);
  541. }
  542. return FALSE;
  543. }
  544. VOID
  545. MiReleaseWsle (
  546. IN WSLE_NUMBER WorkingSetIndex,
  547. IN PMMSUPPORT WsInfo
  548. )
  549. /*++
  550. Routine Description:
  551. This function releases a previously reserved working set entry to
  552. be reused. A release occurs when a page fault is retried due to
  553. changes in PTEs and working sets during an I/O operation.
  554. Arguments:
  555. WorkingSetIndex - Supplies the index of the working set entry to
  556. release.
  557. Return Value:
  558. None.
  559. Environment:
  560. Kernel mode, APCs disabled, working set lock held and PFN lock held.
  561. --*/
  562. {
  563. PMMWSL WorkingSetList;
  564. PMMWSLE Wsle;
  565. WorkingSetList = WsInfo->VmWorkingSetList;
  566. Wsle = WorkingSetList->Wsle;
  567. #if DBG
  568. if (WsInfo == &MmSystemCacheWs) {
  569. MM_SYSTEM_WS_LOCK_ASSERT();
  570. }
  571. #endif //DBG
  572. ASSERT (WorkingSetIndex <= WorkingSetList->LastInitializedWsle);
  573. //
  574. // Put the entry on the free list and decrement the current
  575. // size.
  576. //
  577. ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
  578. (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
  579. Wsle[WorkingSetIndex].u1.Long = WorkingSetList->FirstFree << MM_FREE_WSLE_SHIFT;
  580. WorkingSetList->FirstFree = WorkingSetIndex;
  581. ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
  582. (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
  583. if (WsInfo->WorkingSetSize > WsInfo->MinimumWorkingSetSize) {
  584. MmPagesAboveWsMinimum -= 1;
  585. }
  586. WsInfo->WorkingSetSize -= 1;
  587. return;
  588. }
  589. VOID
  590. MiUpdateWsle (
  591. IN OUT PWSLE_NUMBER DesiredIndex,
  592. IN PVOID VirtualAddress,
  593. PMMWSL WorkingSetList,
  594. IN PMMPFN Pfn
  595. )
  596. /*++
  597. Routine Description:
  598. This routine updates a reserved working set entry to place it into
  599. the valid state.
  600. Arguments:
  601. DesiredIndex - Supplies the index of the working set entry to update.
  602. VirtualAddress - Supplies the virtual address which the working set
  603. entry maps.
  604. WsInfo - Supplies a pointer to the working set info block for the
  605. process (or system cache).
  606. Pfn - Supplies a pointer to the PFN element for the page.
  607. Return Value:
  608. None.
  609. Environment:
  610. Kernel mode, APCs disabled, working set lock held.
  611. --*/
  612. {
  613. PMMWSLE Wsle;
  614. WSLE_NUMBER Index;
  615. WSLE_NUMBER WorkingSetIndex;
  616. //
  617. // The value 0 is invalid. This is due to the fact that the working
  618. // set lock is a process wide lock and two threads in different
  619. // processes could be adding the same physical page to their working
  620. // sets. Each one could see the WsIndex field in the PFN as 0, and
  621. // set the direct bit. To solve this, the WsIndex field is set to
  622. // the current thread pointer.
  623. //
  624. ASSERT (Pfn->u1.WsIndex != 0);
  625. WorkingSetIndex = *DesiredIndex;
  626. Wsle = WorkingSetList->Wsle;
  627. if (WorkingSetList == MmSystemCacheWorkingSetList) {
  628. //
  629. // This assert doesn't hold for NT64 as we can be adding page
  630. // directories and page tables for the system cache WSLE hash tables.
  631. //
  632. ASSERT32 ((VirtualAddress < (PVOID)PTE_BASE) ||
  633. (VirtualAddress >= (PVOID)MM_SYSTEM_SPACE_START));
  634. }
  635. else {
  636. ASSERT ((VirtualAddress < (PVOID)MM_SYSTEM_SPACE_START) ||
  637. (MI_IS_SESSION_ADDRESS (VirtualAddress)));
  638. }
  639. ASSERT (WorkingSetIndex >= WorkingSetList->FirstDynamic);
  640. if (WorkingSetList == MmSystemCacheWorkingSetList) {
  641. MM_SYSTEM_WS_LOCK_ASSERT();
  642. //
  643. // count system space inserts and removals.
  644. //
  645. #if defined(_X86_)
  646. if (MI_IS_SYSTEM_CACHE_ADDRESS(VirtualAddress)) {
  647. MmSystemCachePage += 1;
  648. }
  649. else
  650. #endif
  651. if (VirtualAddress < MmSystemCacheStart) {
  652. MmSystemCodePage += 1;
  653. }
  654. else if (VirtualAddress < MM_PAGED_POOL_START) {
  655. MmSystemCachePage += 1;
  656. }
  657. else if (VirtualAddress < MmNonPagedSystemStart) {
  658. MmPagedPoolPage += 1;
  659. }
  660. else {
  661. MmSystemDriverPage += 1;
  662. }
  663. }
  664. //
  665. // Make the wsle valid, referring to the corresponding virtual
  666. // page number.
  667. //
  668. //
  669. // The value 0 is invalid. This is due to the fact that the working
  670. // set lock is a process wide lock and two threads in different
  671. // processes could be adding the same physical page to their working
  672. // sets. Each one could see the WsIndex field in the PFN as 0, and
  673. // set the direct bit. To solve this, the WsIndex field is set to
  674. // the current thread pointer.
  675. //
  676. ASSERT (Pfn->u1.WsIndex != 0);
  677. #if DBG
  678. if (Pfn->u1.WsIndex <= WorkingSetList->LastInitializedWsle) {
  679. ASSERT ((PAGE_ALIGN(VirtualAddress) !=
  680. PAGE_ALIGN(Wsle[Pfn->u1.WsIndex].u1.VirtualAddress)) ||
  681. (Wsle[Pfn->u1.WsIndex].u1.e1.Valid == 0));
  682. }
  683. #endif //DBG
  684. Wsle[WorkingSetIndex].u1.VirtualAddress = VirtualAddress;
  685. Wsle[WorkingSetIndex].u1.Long &= ~(PAGE_SIZE - 1);
  686. Wsle[WorkingSetIndex].u1.e1.Valid = 1;
  687. if ((ULONG_PTR)Pfn->u1.Event == (ULONG_PTR)PsGetCurrentThread()) {
  688. //
  689. // Directly index into the WSL for this entry via the PFN database
  690. // element.
  691. //
  692. //
  693. // The entire working set index union must be zeroed on NT64. ie:
  694. // The WSLE_NUMBER is currently 32 bits and the PKEVENT is 64 - we
  695. // must zero the top 32 bits as well. So instead of setting the
  696. // WsIndex field, set the overlaid Event field with appropriate casts.
  697. //
  698. Pfn->u1.Event = (PKEVENT) (ULONG_PTR) WorkingSetIndex;
  699. Wsle[WorkingSetIndex].u1.e1.Direct = 1;
  700. return;
  701. }
  702. if (WorkingSetList->HashTable == NULL) {
  703. //
  704. // Try to insert at WsIndex.
  705. //
  706. Index = Pfn->u1.WsIndex;
  707. if ((Index < WorkingSetList->LastInitializedWsle) &&
  708. (Index > WorkingSetList->FirstDynamic) &&
  709. (Index != WorkingSetIndex)) {
  710. if (Wsle[Index].u1.e1.Valid) {
  711. if (Wsle[Index].u1.e1.Direct) {
  712. //
  713. // Only move direct indexed entries.
  714. //
  715. PMMSUPPORT WsInfo;
  716. if (Wsle == MmWsle) {
  717. WsInfo = &PsGetCurrentProcess()->Vm;
  718. }
  719. else if (Wsle == MmSystemCacheWsle) {
  720. WsInfo = &MmSystemCacheWs;
  721. }
  722. else {
  723. WsInfo = &MmSessionSpace->Vm;
  724. }
  725. MiSwapWslEntries (Index, WorkingSetIndex, WsInfo);
  726. WorkingSetIndex = Index;
  727. }
  728. }
  729. else {
  730. //
  731. // On free list, try to remove quickly without walking
  732. // all the free pages.
  733. //
  734. WSLE_NUMBER FreeIndex;
  735. MMWSLE Temp;
  736. FreeIndex = 0;
  737. ASSERT (WorkingSetList->FirstFree >= WorkingSetList->FirstDynamic);
  738. ASSERT (WorkingSetIndex >= WorkingSetList->FirstDynamic);
  739. if (WorkingSetList->FirstFree == Index) {
  740. WorkingSetList->FirstFree = WorkingSetIndex;
  741. Temp = Wsle[WorkingSetIndex];
  742. Wsle[WorkingSetIndex] = Wsle[Index];
  743. Wsle[Index] = Temp;
  744. WorkingSetIndex = Index;
  745. ASSERT (((Wsle[WorkingSetList->FirstFree].u1.Long >> MM_FREE_WSLE_SHIFT)
  746. <= WorkingSetList->LastInitializedWsle) ||
  747. ((Wsle[WorkingSetList->FirstFree].u1.Long >> MM_FREE_WSLE_SHIFT)
  748. == WSLE_NULL_INDEX));
  749. }
  750. else if (Wsle[Index - 1].u1.e1.Valid == 0) {
  751. if ((Wsle[Index - 1].u1.Long >> MM_FREE_WSLE_SHIFT) == Index) {
  752. FreeIndex = Index - 1;
  753. }
  754. }
  755. else if (Wsle[Index + 1].u1.e1.Valid == 0) {
  756. if ((Wsle[Index + 1].u1.Long >> MM_FREE_WSLE_SHIFT) == Index) {
  757. FreeIndex = Index + 1;
  758. }
  759. }
  760. if (FreeIndex != 0) {
  761. //
  762. // Link the Wsle into the free list.
  763. //
  764. Temp = Wsle[WorkingSetIndex];
  765. Wsle[FreeIndex].u1.Long = WorkingSetIndex << MM_FREE_WSLE_SHIFT;
  766. Wsle[WorkingSetIndex] = Wsle[Index];
  767. Wsle[Index] = Temp;
  768. WorkingSetIndex = Index;
  769. ASSERT (((Wsle[FreeIndex].u1.Long >> MM_FREE_WSLE_SHIFT)
  770. <= WorkingSetList->LastInitializedWsle) ||
  771. ((Wsle[FreeIndex].u1.Long >> MM_FREE_WSLE_SHIFT)
  772. == WSLE_NULL_INDEX));
  773. }
  774. }
  775. *DesiredIndex = WorkingSetIndex;
  776. if (WorkingSetIndex > WorkingSetList->LastEntry) {
  777. WorkingSetList->LastEntry = WorkingSetIndex;
  778. }
  779. }
  780. }
  781. WorkingSetList->NonDirectCount += 1;
  782. if (WorkingSetList->HashTable != NULL) {
  783. //
  784. // Insert the valid WSLE into the working set hash list.
  785. //
  786. MiInsertWsleHash (WorkingSetIndex, WorkingSetList);
  787. }
  788. return;
  789. }
  790. ULONG
  791. MiFreeWsle (
  792. IN WSLE_NUMBER WorkingSetIndex,
  793. IN PMMSUPPORT WsInfo,
  794. IN PMMPTE PointerPte
  795. )
  796. /*++
  797. Routine Description:
  798. This routine frees the specified WSLE and decrements the share
  799. count for the corresponding page, putting the PTE into a transition
  800. state if the share count goes to 0.
  801. Arguments:
  802. WorkingSetIndex - Supplies the index of the working set entry to free.
  803. WsInfo - Supplies a pointer to the working set structure (process or
  804. system cache).
  805. PointerPte - Supplies a pointer to the PTE for the working set entry.
  806. Return Value:
  807. Returns TRUE if the WSLE was removed, FALSE if it was not removed.
  808. Pages with valid PTEs are not removed (i.e. page table pages
  809. that contain valid or transition PTEs).
  810. Environment:
  811. Kernel mode, APCs disabled, working set lock. PFN lock NOT held.
  812. --*/
  813. {
  814. PMMPFN Pfn1;
  815. PMMWSL WorkingSetList;
  816. PMMWSLE Wsle;
  817. KIRQL OldIrql;
  818. WorkingSetList = WsInfo->VmWorkingSetList;
  819. Wsle = WorkingSetList->Wsle;
  820. #if DBG
  821. if (WsInfo == &MmSystemCacheWs) {
  822. MM_SYSTEM_WS_LOCK_ASSERT();
  823. }
  824. #endif //DBG
  825. ASSERT (Wsle[WorkingSetIndex].u1.e1.Valid == 1);
  826. //
  827. // Check to see if the located entry is eligible for removal.
  828. //
  829. ASSERT (PointerPte->u.Hard.Valid == 1);
  830. ASSERT (WorkingSetIndex >= WorkingSetList->FirstDynamic);
  831. //
  832. // Check to see if this is a page table with valid PTEs.
  833. //
  834. // Note, don't clear the access bit for page table pages
  835. // with valid PTEs as this could cause an access trap fault which
  836. // would not be handled (it is only handled for PTEs not PDEs).
  837. //
  838. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  839. LOCK_PFN (OldIrql);
  840. //
  841. // If the PTE is a page table page with non-zero share count or
  842. // within the system cache with its reference count greater
  843. // than 1, don't remove it.
  844. //
  845. if (WsInfo == &MmSystemCacheWs) {
  846. if (Pfn1->u3.e2.ReferenceCount > 1) {
  847. UNLOCK_PFN (OldIrql);
  848. return FALSE;
  849. }
  850. }
  851. else {
  852. if ((Pfn1->u2.ShareCount > 1) &&
  853. (Pfn1->u3.e1.PrototypePte == 0)) {
  854. #if DBG
  855. if (WsInfo->Flags.SessionSpace == 1) {
  856. ASSERT (MI_IS_SESSION_ADDRESS (Wsle[WorkingSetIndex].u1.VirtualAddress));
  857. }
  858. else {
  859. ASSERT32 ((Wsle[WorkingSetIndex].u1.VirtualAddress >= (PVOID)PTE_BASE) &&
  860. (Wsle[WorkingSetIndex].u1.VirtualAddress<= (PVOID)PTE_TOP));
  861. }
  862. #endif
  863. //
  864. // Don't remove page table pages from the working set until
  865. // all transition pages have exited.
  866. //
  867. UNLOCK_PFN (OldIrql);
  868. return FALSE;
  869. }
  870. }
  871. //
  872. // Found a candidate, remove the page from the working set.
  873. //
  874. MiEliminateWorkingSetEntry (WorkingSetIndex,
  875. PointerPte,
  876. Pfn1,
  877. Wsle);
  878. UNLOCK_PFN (OldIrql);
  879. //
  880. // Remove the working set entry from the working set.
  881. //
  882. MiRemoveWsle (WorkingSetIndex, WorkingSetList);
  883. ASSERT (WorkingSetList->FirstFree >= WorkingSetList->FirstDynamic);
  884. ASSERT (WorkingSetIndex >= WorkingSetList->FirstDynamic);
  885. //
  886. // Put the entry on the free list and decrement the current
  887. // size.
  888. //
  889. ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
  890. (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
  891. Wsle[WorkingSetIndex].u1.Long = WorkingSetList->FirstFree << MM_FREE_WSLE_SHIFT;
  892. WorkingSetList->FirstFree = WorkingSetIndex;
  893. ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
  894. (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
  895. if (WsInfo->WorkingSetSize > WsInfo->MinimumWorkingSetSize) {
  896. MmPagesAboveWsMinimum -= 1;
  897. }
  898. WsInfo->WorkingSetSize -= 1;
  899. return TRUE;
  900. }
  901. #define MI_INITIALIZE_WSLE(_VirtualAddress, _WslEntry) { \
  902. PMMPFN _Pfn1; \
  903. _WslEntry->u1.VirtualAddress = (PVOID)(_VirtualAddress); \
  904. _WslEntry->u1.e1.Valid = 1; \
  905. _WslEntry->u1.e1.LockedInWs = 1; \
  906. _WslEntry->u1.e1.Direct = 1; \
  907. _Pfn1 = MI_PFN_ELEMENT (MiGetPteAddress ((PVOID)(_VirtualAddress))->u.Hard.PageFrameNumber); \
  908. ASSERT (_Pfn1->u1.WsIndex == 0); \
  909. _Pfn1->u1.WsIndex = (WSLE_NUMBER)(_WslEntry - MmWsle); \
  910. (_WslEntry) += 1; \
  911. }
  912. PFN_NUMBER
  913. MiInitializeExtraWorkingSetPages (
  914. IN PEPROCESS CurrentProcess,
  915. IN WSLE_NUMBER NumberOfEntriesMapped,
  916. IN PMMWSLE WslEntry
  917. )
  918. /*++
  919. Routine Description:
  920. This is a nonpaged helper routine to obtain extra pages to initialize
  921. large working sets.
  922. Arguments:
  923. CurrentProcess - Supplies a pointer to the process.
  924. NumberOfEntriesMapped - Supplies the number of entries currently mapped.
  925. WslEntry - Supplies a pointer to the current working set list entry.
  926. Return Value:
  927. Pages added.
  928. --*/
  929. {
  930. KIRQL OldIrql;
  931. MMPTE TempPte;
  932. PMMPTE PointerPte;
  933. ULONG_PTR CurrentVa;
  934. PFN_NUMBER WorkingSetPage;
  935. PFN_NUMBER PagesAdded;
  936. CurrentVa = (ULONG_PTR) ROUND_TO_PAGES (MmWsle);
  937. PointerPte = MiGetPteAddress ((PVOID) CurrentVa);
  938. PagesAdded = 0;
  939. do {
  940. if (MiChargeCommitment (1, NULL) == FALSE) {
  941. break;
  942. }
  943. MM_TRACK_COMMIT (MM_DBG_COMMIT_EXTRA_WS_PAGES, 1);
  944. ASSERT (PointerPte->u.Long == 0);
  945. PointerPte->u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  946. LOCK_PFN (OldIrql);
  947. MiEnsureAvailablePageOrWait (NULL, NULL);
  948. WorkingSetPage = MiRemoveZeroPage (
  949. MI_PAGE_COLOR_PTE_PROCESS (PointerPte,
  950. &CurrentProcess->NextPageColor));
  951. MiInitializePfn (WorkingSetPage, PointerPte, 1);
  952. UNLOCK_PFN (OldIrql);
  953. MI_MAKE_VALID_PTE (TempPte, WorkingSetPage, MM_READWRITE, PointerPte);
  954. MI_SET_PTE_DIRTY (TempPte);
  955. MI_SET_PTE_IN_WORKING_SET (&TempPte, WslEntry - MmWsle);
  956. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  957. MI_INITIALIZE_WSLE (CurrentVa, WslEntry);
  958. PagesAdded += 1;
  959. NumberOfEntriesMapped += PAGE_SIZE / sizeof(MMWSLE);
  960. CurrentVa += PAGE_SIZE;
  961. PointerPte += 1;
  962. } while (CurrentProcess->Vm.MaximumWorkingSetSize >= NumberOfEntriesMapped);
  963. return PagesAdded;
  964. }
  965. VOID
  966. MiInitializeWorkingSetList (
  967. IN PEPROCESS CurrentProcess
  968. )
  969. /*++
  970. Routine Description:
  971. This routine initializes a process's working set to the empty
  972. state.
  973. Arguments:
  974. CurrentProcess - Supplies a pointer to the process to initialize.
  975. Return Value:
  976. None.
  977. Environment:
  978. Kernel mode, APCs disabled.
  979. --*/
  980. {
  981. PMMPFN Pfn1;
  982. WSLE_NUMBER i;
  983. PMMWSLE WslEntry;
  984. WSLE_NUMBER CurrentWsIndex;
  985. WSLE_NUMBER NumberOfEntriesMapped;
  986. PFN_NUMBER PagesAdded;
  987. PVOID VirtualAddress;
  988. WslEntry = MmWsle;
  989. //
  990. // Initialize the working set list control cells.
  991. //
  992. MmWorkingSetList->LastEntry = CurrentProcess->Vm.MinimumWorkingSetSize;
  993. MmWorkingSetList->HashTable = NULL;
  994. MmWorkingSetList->HashTableSize = 0;
  995. MmWorkingSetList->NumberOfImageWaiters = 0;
  996. MmWorkingSetList->Wsle = MmWsle;
  997. MmWorkingSetList->VadBitMapHint = 1;
  998. MmWorkingSetList->HashTableStart =
  999. (PVOID)((PCHAR)PAGE_ALIGN (&MmWsle[MM_MAXIMUM_WORKING_SET]) + PAGE_SIZE);
  1000. MmWorkingSetList->HighestPermittedHashAddress = (PVOID)((ULONG_PTR)HYPER_SPACE_END + 1);
  1001. //
  1002. // Fill in the reserved slots.
  1003. // Start with the top level page directory page.
  1004. //
  1005. #if (_MI_PAGING_LEVELS >= 4)
  1006. VirtualAddress = (PVOID) PXE_BASE;
  1007. #elif (_MI_PAGING_LEVELS >= 3)
  1008. VirtualAddress = (PVOID) PDE_TBASE;
  1009. #else
  1010. VirtualAddress = (PVOID) PDE_BASE;
  1011. #endif
  1012. MI_INITIALIZE_WSLE (VirtualAddress, WslEntry);
  1013. #if defined (_X86PAE_)
  1014. //
  1015. // Fill in the additional page directory entries.
  1016. //
  1017. for (i = 1; i < PD_PER_SYSTEM; i += 1) {
  1018. MI_INITIALIZE_WSLE (PDE_BASE + i * PAGE_SIZE, WslEntry);
  1019. }
  1020. VirtualAddress = (PVOID)((ULONG_PTR)VirtualAddress + ((PD_PER_SYSTEM - 1) * PAGE_SIZE));
  1021. #endif
  1022. Pfn1 = MI_PFN_ELEMENT (MiGetPteAddress ((PVOID)(VirtualAddress))->u.Hard.PageFrameNumber);
  1023. ASSERT (Pfn1->u4.PteFrame == (ULONG_PTR)(Pfn1 - MmPfnDatabase));
  1024. Pfn1->u1.Event = (PVOID) CurrentProcess;
  1025. #if (_MI_PAGING_LEVELS >= 4)
  1026. //
  1027. // Fill in the entry for the hyper space page directory parent page.
  1028. //
  1029. MI_INITIALIZE_WSLE (MiGetPpeAddress (HYPER_SPACE), WslEntry);
  1030. #endif
  1031. #if (_MI_PAGING_LEVELS >= 3)
  1032. //
  1033. // Fill in the entry for the hyper space page directory page.
  1034. //
  1035. MI_INITIALIZE_WSLE (MiGetPdeAddress (HYPER_SPACE), WslEntry);
  1036. #endif
  1037. //
  1038. // Fill in the entry for the page table page which maps hyper space.
  1039. //
  1040. MI_INITIALIZE_WSLE (MiGetPteAddress (HYPER_SPACE), WslEntry);
  1041. #if defined (_X86PAE_)
  1042. //
  1043. // Fill in the entry for the second page table page which maps hyper space.
  1044. //
  1045. MI_INITIALIZE_WSLE (MiGetPteAddress (HYPER_SPACE2), WslEntry);
  1046. #endif
  1047. //
  1048. // Fill in the entry for the first VAD bitmap page.
  1049. //
  1050. // Note when booted /3GB, the second VAD bitmap page is automatically
  1051. // inserted as part of the working set list page as the page is shared
  1052. // by both.
  1053. //
  1054. MI_INITIALIZE_WSLE (VAD_BITMAP_SPACE, WslEntry);
  1055. //
  1056. // Fill in the entry for the page which contains the working set list.
  1057. //
  1058. MI_INITIALIZE_WSLE (MmWorkingSetList, WslEntry);
  1059. //
  1060. // Check to see if more pages are required in the working set list
  1061. // to map the current maximum working set size.
  1062. //
  1063. NumberOfEntriesMapped = (PAGE_SIZE - BYTE_OFFSET (MmWsle)) / sizeof (MMWSLE);
  1064. if (CurrentProcess->Vm.MaximumWorkingSetSize >= NumberOfEntriesMapped) {
  1065. //
  1066. // The working set requires more than a single page.
  1067. //
  1068. PagesAdded = MiInitializeExtraWorkingSetPages (CurrentProcess,
  1069. NumberOfEntriesMapped,
  1070. WslEntry);
  1071. WslEntry += PagesAdded;
  1072. NumberOfEntriesMapped += (((WSLE_NUMBER)PagesAdded * PAGE_SIZE) / sizeof(MMWSLE));
  1073. }
  1074. CurrentWsIndex = (WSLE_NUMBER)(WslEntry - MmWsle);
  1075. CurrentProcess->Vm.WorkingSetSize = CurrentWsIndex;
  1076. MmWorkingSetList->FirstFree = CurrentWsIndex;
  1077. MmWorkingSetList->FirstDynamic = CurrentWsIndex;
  1078. MmWorkingSetList->NextSlot = CurrentWsIndex;
  1079. //
  1080. //
  1081. // Build the free list starting at the first dynamic entry.
  1082. //
  1083. i = CurrentWsIndex + 1;
  1084. do {
  1085. WslEntry->u1.Long = i << MM_FREE_WSLE_SHIFT;
  1086. WslEntry += 1;
  1087. i += 1;
  1088. } while (i <= NumberOfEntriesMapped);
  1089. //
  1090. // Mark the end of the list.
  1091. //
  1092. WslEntry -= 1;
  1093. WslEntry->u1.Long = WSLE_NULL_INDEX << MM_FREE_WSLE_SHIFT;
  1094. MmWorkingSetList->LastInitializedWsle = NumberOfEntriesMapped - 1;
  1095. if (CurrentProcess->Vm.MaximumWorkingSetSize > ((1536*1024) >> PAGE_SHIFT)) {
  1096. //
  1097. // The working set list consists of more than a single page.
  1098. //
  1099. MiGrowWsleHash (&CurrentProcess->Vm);
  1100. }
  1101. return;
  1102. }
  1103. VOID
  1104. MiInitializeSessionWsSupport (
  1105. VOID
  1106. )
  1107. /*++
  1108. Routine Description:
  1109. This routine initializes the session space working set support.
  1110. Arguments:
  1111. None.
  1112. Return Value:
  1113. None.
  1114. Environment:
  1115. Kernel mode, APC_LEVEL or below, no mutexes held.
  1116. --*/
  1117. {
  1118. //
  1119. // This is the list of all session spaces ordered in a working set list.
  1120. //
  1121. InitializeListHead (&MiSessionWsList);
  1122. }
  1123. NTSTATUS
  1124. MiSessionInitializeWorkingSetList (
  1125. VOID
  1126. )
  1127. /*++
  1128. Routine Description:
  1129. This function initializes the working set for the session space and adds
  1130. it to the list of session space working sets.
  1131. Arguments:
  1132. None.
  1133. Return Value:
  1134. NT_SUCCESS if success or STATUS_NO_MEMORY on failure.
  1135. Environment:
  1136. Kernel mode, APC_LEVEL or below, no mutexes held.
  1137. --*/
  1138. {
  1139. WSLE_NUMBER i;
  1140. ULONG MaximumEntries;
  1141. ULONG PageTableCost;
  1142. KIRQL OldIrql;
  1143. PMMPTE PointerPte;
  1144. PMMPTE PointerPde;
  1145. MMPTE TempPte;
  1146. PMMWSLE WslEntry;
  1147. PMMPFN Pfn1;
  1148. ULONG PageColor;
  1149. PFN_NUMBER ResidentPages;
  1150. PFN_NUMBER PageFrameIndex;
  1151. WSLE_NUMBER CurrentEntry;
  1152. WSLE_NUMBER NumberOfEntriesMapped;
  1153. ULONG_PTR AdditionalBytes;
  1154. WSLE_NUMBER NumberOfEntriesMappedByFirstPage;
  1155. ULONG WorkingSetMaximum;
  1156. PMM_SESSION_SPACE SessionGlobal;
  1157. LOGICAL AllocatedPageTable;
  1158. PMMWSL WorkingSetList;
  1159. #if (_MI_PAGING_LEVELS < 3)
  1160. ULONG Index;
  1161. #endif
  1162. //
  1163. // Use the global address for pointer references by
  1164. // MmWorkingSetManager before it attaches to the address space.
  1165. //
  1166. SessionGlobal = SESSION_GLOBAL (MmSessionSpace);
  1167. //
  1168. // Set up the working set variables.
  1169. //
  1170. WorkingSetMaximum = MI_SESSION_SPACE_WORKING_SET_MAXIMUM;
  1171. WorkingSetList = (PMMWSL) MiSessionSpaceWs;
  1172. MmSessionSpace->Vm.VmWorkingSetList = WorkingSetList;
  1173. #if (_MI_PAGING_LEVELS >= 3)
  1174. MmSessionSpace->Wsle = (PMMWSLE) (WorkingSetList + 1);
  1175. #else
  1176. MmSessionSpace->Wsle = (PMMWSLE) (&WorkingSetList->UsedPageTableEntries[0]);
  1177. #endif
  1178. ASSERT (MmSessionSpace->WorkingSetLockOwner == NULL);
  1179. //
  1180. // Build the PDE entry for the working set - note that the global bit
  1181. // must be turned off.
  1182. //
  1183. PointerPde = MiGetPdeAddress (WorkingSetList);
  1184. //
  1185. // The page table page for the working set and its first data page
  1186. // are charged against MmResidentAvailablePages and for commitment.
  1187. //
  1188. if (PointerPde->u.Hard.Valid == 1) {
  1189. //
  1190. // The page directory entry for the working set is the same
  1191. // as for another range in the session space. Share the PDE.
  1192. //
  1193. #ifndef _IA64_
  1194. ASSERT (PointerPde->u.Hard.Global == 0);
  1195. #endif
  1196. AllocatedPageTable = FALSE;
  1197. ResidentPages = 1;
  1198. }
  1199. else {
  1200. AllocatedPageTable = TRUE;
  1201. ResidentPages = 2;
  1202. }
  1203. PointerPte = MiGetPteAddress (WorkingSetList);
  1204. //
  1205. // The data pages needed to map up to maximum working set size are also
  1206. // charged against MmResidentAvailablePages and for commitment.
  1207. //
  1208. NumberOfEntriesMappedByFirstPage = (WSLE_NUMBER)(
  1209. ((PMMWSLE)((ULONG_PTR)WorkingSetList + PAGE_SIZE)) -
  1210. MmSessionSpace->Wsle);
  1211. if (WorkingSetMaximum > NumberOfEntriesMappedByFirstPage) {
  1212. AdditionalBytes = (WorkingSetMaximum - NumberOfEntriesMappedByFirstPage) * sizeof (MMWSLE);
  1213. ResidentPages += BYTES_TO_PAGES (AdditionalBytes);
  1214. }
  1215. if (MiChargeCommitment (ResidentPages, NULL) == FALSE) {
  1216. #if DBG
  1217. DbgPrint("MiSessionInitializeWorkingSetList: No commit for %d pages\n",
  1218. ResidentPages);
  1219. #endif
  1220. MM_BUMP_SESSION_FAILURES (MM_SESSION_FAILURE_NO_COMMIT);
  1221. return STATUS_NO_MEMORY;
  1222. }
  1223. //
  1224. // Use the global address for resources since they are linked
  1225. // into the global system wide resource list.
  1226. //
  1227. ExInitializeResourceLite (&SessionGlobal->WsLock);
  1228. MmLockPagableSectionByHandle (ExPageLockHandle);
  1229. LOCK_PFN (OldIrql);
  1230. //
  1231. // Check to make sure the physical pages are available.
  1232. //
  1233. if ((SPFN_NUMBER)ResidentPages > MI_NONPAGABLE_MEMORY_AVAILABLE() - 20) {
  1234. #if DBG
  1235. DbgPrint("MiSessionInitializeWorkingSetList: No Resident Pages %d, Need %d\n",
  1236. MmResidentAvailablePages,
  1237. ResidentPages);
  1238. #endif
  1239. UNLOCK_PFN (OldIrql);
  1240. MmUnlockPagableImageSection (ExPageLockHandle);
  1241. MiReturnCommitment (ResidentPages);
  1242. ExDeleteResourceLite (&SessionGlobal->WsLock);
  1243. MM_BUMP_SESSION_FAILURES (MM_SESSION_FAILURE_NO_RESIDENT);
  1244. return STATUS_NO_MEMORY;
  1245. }
  1246. MM_TRACK_COMMIT (MM_DBG_COMMIT_SESSION_WS_INIT, ResidentPages);
  1247. MmResidentAvailablePages -= ResidentPages;
  1248. MM_BUMP_COUNTER(50, ResidentPages);
  1249. if (AllocatedPageTable == TRUE) {
  1250. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_WS_PAGETABLE_ALLOC, 1);
  1251. MiEnsureAvailablePageOrWait (NULL, NULL);
  1252. PageColor = MI_GET_PAGE_COLOR_FROM_VA (NULL);
  1253. PageFrameIndex = MiRemoveZeroPageMayReleaseLocks (PageColor, OldIrql);
  1254. //
  1255. // The global bit is masked off since we need to make sure the TB entry
  1256. // is flushed when we switch to a process in a different session space.
  1257. //
  1258. TempPte.u.Long = ValidKernelPdeLocal.u.Long;
  1259. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1260. MI_WRITE_VALID_PTE (PointerPde, TempPte);
  1261. #if (_MI_PAGING_LEVELS < 3)
  1262. //
  1263. // Add this to the session structure so other processes can fault it in.
  1264. //
  1265. Index = MiGetPdeSessionIndex (WorkingSetList);
  1266. MmSessionSpace->PageTables[Index] = TempPte;
  1267. #endif
  1268. //
  1269. // This page frame references the session space page table page.
  1270. //
  1271. MiInitializePfnForOtherProcess (PageFrameIndex,
  1272. PointerPde,
  1273. MmSessionSpace->SessionPageDirectoryIndex);
  1274. MiFillMemoryPte (PointerPte, PAGE_SIZE, ZeroKernelPte.u.Long);
  1275. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1276. //
  1277. // This page is never paged, ensure that its WsIndex stays clear so the
  1278. // release of the page will be handled correctly.
  1279. //
  1280. ASSERT (Pfn1->u1.WsIndex == 0);
  1281. KeFillEntryTb ((PHARDWARE_PTE) PointerPde, PointerPte, FALSE);
  1282. }
  1283. MiEnsureAvailablePageOrWait (NULL, NULL);
  1284. PageColor = MI_GET_PAGE_COLOR_FROM_VA (NULL);
  1285. PageFrameIndex = MiRemoveZeroPageIfAny (PageColor);
  1286. if (PageFrameIndex == 0) {
  1287. PageFrameIndex = MiRemoveAnyPage (PageColor);
  1288. UNLOCK_PFN (OldIrql);
  1289. MiZeroPhysicalPage (PageFrameIndex, PageColor);
  1290. LOCK_PFN (OldIrql);
  1291. }
  1292. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_WS_PAGE_ALLOC, (ULONG)(ResidentPages - 1));
  1293. #if DBG
  1294. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1295. ASSERT (Pfn1->u1.WsIndex == 0);
  1296. #endif
  1297. //
  1298. // The global bit is masked off since we need to make sure the TB entry
  1299. // is flushed when we switch to a process in a different session space.
  1300. //
  1301. TempPte.u.Long = ValidKernelPteLocal.u.Long;
  1302. MI_SET_PTE_DIRTY (TempPte);
  1303. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1304. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1305. MiInitializePfn (PageFrameIndex, PointerPte, 1);
  1306. #if DBG
  1307. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1308. ASSERT (Pfn1->u1.WsIndex == 0);
  1309. #endif
  1310. UNLOCK_PFN (OldIrql);
  1311. KeFillEntryTb ((PHARDWARE_PTE) PointerPte,
  1312. (PMMPTE)WorkingSetList,
  1313. FALSE);
  1314. #define MI_INITIALIZE_SESSION_WSLE(_VirtualAddress, _WslEntry) { \
  1315. PMMPFN _Pfn1; \
  1316. _WslEntry->u1.VirtualAddress = (PVOID)(_VirtualAddress); \
  1317. _WslEntry->u1.e1.Valid = 1; \
  1318. _WslEntry->u1.e1.LockedInWs = 1; \
  1319. _WslEntry->u1.e1.Direct = 1; \
  1320. _Pfn1 = MI_PFN_ELEMENT (MiGetPteAddress ((PVOID)(_VirtualAddress))->u.Hard.PageFrameNumber); \
  1321. ASSERT (_Pfn1->u1.WsIndex == 0); \
  1322. _Pfn1->u1.WsIndex = (WSLE_NUMBER)(_WslEntry - MmSessionSpace->Wsle); \
  1323. (_WslEntry) += 1; \
  1324. }
  1325. //
  1326. // Fill in the reserved slots starting with the 2 session data pages.
  1327. //
  1328. WslEntry = MmSessionSpace->Wsle;
  1329. MI_INITIALIZE_SESSION_WSLE (MmSessionSpace, WslEntry);
  1330. MI_INITIALIZE_SESSION_WSLE ((ULONG_PTR)MmSessionSpace + PAGE_SIZE, WslEntry);
  1331. //
  1332. // The next reserved slot is for the page table page mapping
  1333. // the session data page.
  1334. //
  1335. MI_INITIALIZE_SESSION_WSLE (MiGetPteAddress (MmSessionSpace), WslEntry);
  1336. //
  1337. // The next reserved slot is for the working set page.
  1338. //
  1339. MI_INITIALIZE_SESSION_WSLE (WorkingSetList, WslEntry);
  1340. if (AllocatedPageTable == TRUE) {
  1341. //
  1342. // The next reserved slot is for the page table page
  1343. // mapping the working set page.
  1344. //
  1345. MI_INITIALIZE_SESSION_WSLE (PointerPte, WslEntry);
  1346. }
  1347. //
  1348. // The next reserved slot is for the page table page
  1349. // mapping the first session paged pool page.
  1350. //
  1351. MI_INITIALIZE_SESSION_WSLE (MiGetPteAddress (MmSessionSpace->PagedPoolStart), WslEntry);
  1352. CurrentEntry = (WSLE_NUMBER)(WslEntry - MmSessionSpace->Wsle);
  1353. MmSessionSpace->Vm.Flags.SessionSpace = 1;
  1354. MmSessionSpace->Vm.MinimumWorkingSetSize = MI_SESSION_SPACE_WORKING_SET_MINIMUM;
  1355. MmSessionSpace->Vm.MaximumWorkingSetSize = WorkingSetMaximum;
  1356. //
  1357. // Don't trim from this session till we're finished setting up and
  1358. // it's got some pages in it...
  1359. //
  1360. MmSessionSpace->Vm.Flags.AllowWorkingSetAdjustment = FALSE;
  1361. WorkingSetList->LastEntry = MI_SESSION_SPACE_WORKING_SET_MINIMUM;
  1362. WorkingSetList->HashTable = NULL;
  1363. WorkingSetList->HashTableSize = 0;
  1364. WorkingSetList->Wsle = MmSessionSpace->Wsle;
  1365. //
  1366. // Calculate the maximum number of entries dynamically as the size of
  1367. // session space is registry configurable. Then add in page table and
  1368. // page directory overhead.
  1369. //
  1370. MaximumEntries = (ULONG)((MiSessionSpaceEnd - MmSessionBase) >> PAGE_SHIFT);
  1371. PageTableCost = MaximumEntries / PTE_PER_PAGE + 1;
  1372. MaximumEntries += PageTableCost;
  1373. WorkingSetList->HashTableStart =
  1374. (PVOID)((PCHAR)PAGE_ALIGN (&MmSessionSpace->Wsle[MaximumEntries]) + PAGE_SIZE);
  1375. #if defined (_X86PAE_)
  1376. //
  1377. // One less page table page is needed on PAE systems as the session
  1378. // working set structures easily fit within 2MB.
  1379. //
  1380. WorkingSetList->HighestPermittedHashAddress =
  1381. (PVOID)(MiSessionImageStart - MM_VA_MAPPED_BY_PDE);
  1382. #else
  1383. WorkingSetList->HighestPermittedHashAddress =
  1384. (PVOID)(MiSessionImageStart - MI_SESSION_SPACE_STRUCT_SIZE);
  1385. #endif
  1386. NumberOfEntriesMapped = (WSLE_NUMBER)(((PMMWSLE)((ULONG_PTR)WorkingSetList +
  1387. PAGE_SIZE)) - MmSessionSpace->Wsle);
  1388. while (NumberOfEntriesMapped < WorkingSetMaximum) {
  1389. if (MiChargeCommitment (1, NULL) == FALSE) {
  1390. break;
  1391. }
  1392. MM_TRACK_COMMIT (MM_DBG_COMMIT_EXTRA_INITIAL_SESSION_WS_PAGES, 1);
  1393. PointerPte += 1;
  1394. PointerPte->u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  1395. LOCK_PFN (OldIrql);
  1396. MiEnsureAvailablePageOrWait (NULL, NULL);
  1397. PageFrameIndex = MiRemoveZeroPage(MI_GET_PAGE_COLOR_FROM_VA (NULL));
  1398. MiInitializePfn (PageFrameIndex, PointerPte, 1);
  1399. UNLOCK_PFN (OldIrql);
  1400. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  1401. MI_SET_PTE_IN_WORKING_SET (&TempPte, CurrentEntry);
  1402. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  1403. MI_INITIALIZE_SESSION_WSLE (MiGetVirtualAddressMappedByPte (PointerPte),
  1404. WslEntry);
  1405. CurrentEntry += 1;
  1406. NumberOfEntriesMapped += PAGE_SIZE / sizeof(MMWSLE);
  1407. }
  1408. MmSessionSpace->Vm.WorkingSetSize = CurrentEntry;
  1409. WorkingSetList->FirstFree = CurrentEntry;
  1410. WorkingSetList->FirstDynamic = CurrentEntry;
  1411. WorkingSetList->NextSlot = CurrentEntry;
  1412. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_NP_INIT_WS, (ULONG)ResidentPages);
  1413. MmSessionSpace->NonPagablePages += ResidentPages;
  1414. InterlockedExchangeAddSizeT (&MmSessionSpace->CommittedPages,
  1415. ResidentPages);
  1416. //
  1417. // Initialize the following slots as free.
  1418. //
  1419. WslEntry = MmSessionSpace->Wsle + CurrentEntry;
  1420. for (i = CurrentEntry + 1; i < NumberOfEntriesMapped; i += 1) {
  1421. //
  1422. // Build the free list, note that the first working
  1423. // set entries (CurrentEntry) are not on the free list.
  1424. // These entries are reserved for the pages which
  1425. // map the working set and the page which contains the PDE.
  1426. //
  1427. WslEntry->u1.Long = i << MM_FREE_WSLE_SHIFT;
  1428. WslEntry += 1;
  1429. }
  1430. WslEntry->u1.Long = WSLE_NULL_INDEX << MM_FREE_WSLE_SHIFT; // End of list.
  1431. WorkingSetList->LastInitializedWsle = NumberOfEntriesMapped - 1;
  1432. if (WorkingSetMaximum > ((1536*1024) >> PAGE_SHIFT)) {
  1433. //
  1434. // The working set list consists of more than a single page.
  1435. //
  1436. MiGrowWsleHash (&MmSessionSpace->Vm);
  1437. }
  1438. //
  1439. // Put this session's working set in lists using its global address.
  1440. //
  1441. LOCK_EXPANSION (OldIrql);
  1442. InsertTailList (&MiSessionWsList, &SessionGlobal->WsListEntry);
  1443. MmSessionSpace->u.Flags.HasWsLock = 1;
  1444. MmSessionSpace->u.Flags.SessionListInserted = 1;
  1445. UNLOCK_EXPANSION (OldIrql);
  1446. MmUnlockPagableImageSection (ExPageLockHandle);
  1447. return STATUS_SUCCESS;
  1448. }
  1449. LOGICAL
  1450. MmAssignProcessToJob (
  1451. IN PEPROCESS Process
  1452. )
  1453. /*++
  1454. Routine Description:
  1455. This routine acquires the address space mutex so a consistent snapshot of
  1456. the argument process' commit charges can be used by Ps when adding this
  1457. process to a job.
  1458. Note that the working set mutex is not acquired here so the argument
  1459. process' working set sizes cannot be reliably snapped by Ps, but since Ps
  1460. doesn't look at that anyway, it's not a problem.
  1461. Arguments:
  1462. Process - Supplies a pointer to the process to operate upon.
  1463. Return Value:
  1464. TRUE if the process is allowed to join the job, FALSE otherwise.
  1465. Note that FALSE cannot be returned without changing the code in Ps.
  1466. Environment:
  1467. Kernel mode, IRQL APC_LEVEL or below. The caller provides protection
  1468. from the target process going away.
  1469. --*/
  1470. {
  1471. LOGICAL Attached;
  1472. LOGICAL Status;
  1473. KAPC_STATE ApcState;
  1474. PAGED_CODE ();
  1475. Attached = FALSE;
  1476. if (PsGetCurrentProcess() != Process) {
  1477. KeStackAttachProcess (&Process->Pcb, &ApcState);
  1478. Attached = TRUE;
  1479. }
  1480. LOCK_ADDRESS_SPACE (Process);
  1481. Status = PsChangeJobMemoryUsage (Process->CommitCharge);
  1482. //
  1483. // Join the job unconditionally. If the process is over any limits, it
  1484. // will be caught on its next request.
  1485. //
  1486. Process->JobStatus |= PS_JOB_STATUS_REPORT_COMMIT_CHANGES;
  1487. UNLOCK_ADDRESS_SPACE (Process);
  1488. if (Attached) {
  1489. KeUnstackDetachProcess (&ApcState);
  1490. }
  1491. //
  1492. // Note that FALSE cannot be returned without changing the code in Ps.
  1493. //
  1494. return TRUE;
  1495. }
  1496. NTSTATUS
  1497. MmAdjustWorkingSetSize (
  1498. IN SIZE_T WorkingSetMinimumInBytes,
  1499. IN SIZE_T WorkingSetMaximumInBytes,
  1500. IN ULONG SystemCache,
  1501. IN BOOLEAN IncreaseOkay
  1502. )
  1503. /*++
  1504. Routine Description:
  1505. This routine adjusts the current size of a process's working set
  1506. list. If the maximum value is above the current maximum, pages
  1507. are removed from the working set list.
  1508. A failure status is returned if the limit cannot be granted. This
  1509. could occur if too many pages were locked in the process's
  1510. working set.
  1511. Note: if the minimum and maximum are both (SIZE_T)-1, the working set
  1512. is purged, but the default sizes are not changed.
  1513. Arguments:
  1514. WorkingSetMinimumInBytes - Supplies the new minimum working set size in
  1515. bytes.
  1516. WorkingSetMaximumInBytes - Supplies the new maximum working set size in
  1517. bytes.
  1518. SystemCache - Supplies TRUE if the system cache working set is being
  1519. adjusted, FALSE for all other working sets.
  1520. Return Value:
  1521. NTSTATUS.
  1522. Environment:
  1523. Kernel mode, IRQL APC_LEVEL or below.
  1524. --*/
  1525. {
  1526. PEPROCESS CurrentProcess;
  1527. WSLE_NUMBER Entry;
  1528. WSLE_NUMBER LastFreed;
  1529. PMMWSLE Wsle;
  1530. KIRQL OldIrql;
  1531. KIRQL OldIrql2;
  1532. SPFN_NUMBER i;
  1533. PMMPTE PointerPte;
  1534. NTSTATUS ReturnStatus;
  1535. LONG PagesAbove;
  1536. LONG NewPagesAbove;
  1537. ULONG FreeTryCount;
  1538. PMMSUPPORT WsInfo;
  1539. PMMWSL WorkingSetList;
  1540. WSLE_NUMBER WorkingSetMinimum;
  1541. WSLE_NUMBER WorkingSetMaximum;
  1542. PERFINFO_PAGE_INFO_DECL();
  1543. FreeTryCount = 0;
  1544. if (SystemCache) {
  1545. //
  1546. // Initializing CurrentProcess is not needed for correctness, but
  1547. // without it the compiler cannot compile this code W4 to check
  1548. // for use of uninitialized variables.
  1549. //
  1550. CurrentProcess = NULL;
  1551. WsInfo = &MmSystemCacheWs;
  1552. }
  1553. else {
  1554. CurrentProcess = PsGetCurrentProcess ();
  1555. WsInfo = &CurrentProcess->Vm;
  1556. }
  1557. if ((WorkingSetMinimumInBytes == (SIZE_T)-1) &&
  1558. (WorkingSetMaximumInBytes == (SIZE_T)-1)) {
  1559. return MiEmptyWorkingSet (WsInfo, TRUE);
  1560. }
  1561. ReturnStatus = STATUS_SUCCESS;
  1562. MmLockPagableSectionByHandle(ExPageLockHandle);
  1563. //
  1564. // Get the working set lock and disable APCs.
  1565. //
  1566. if (SystemCache) {
  1567. LOCK_SYSTEM_WS (OldIrql2, PsGetCurrentThread ());
  1568. }
  1569. else {
  1570. //
  1571. // Initializing OldIrql2 is not needed for correctness, but
  1572. // without it the compiler cannot compile this code W4 to check
  1573. // for use of uninitialized variables.
  1574. //
  1575. OldIrql2 = PASSIVE_LEVEL;
  1576. LOCK_WS (CurrentProcess);
  1577. if (CurrentProcess->Flags & PS_PROCESS_FLAGS_VM_DELETED) {
  1578. ReturnStatus = STATUS_PROCESS_IS_TERMINATING;
  1579. goto Returns;
  1580. }
  1581. }
  1582. if (WorkingSetMinimumInBytes == 0) {
  1583. WorkingSetMinimum = WsInfo->MinimumWorkingSetSize;
  1584. }
  1585. else {
  1586. WorkingSetMinimum = (WSLE_NUMBER)(WorkingSetMinimumInBytes >> PAGE_SHIFT);
  1587. }
  1588. if (WorkingSetMaximumInBytes == 0) {
  1589. WorkingSetMaximum = WsInfo->MaximumWorkingSetSize;
  1590. }
  1591. else {
  1592. WorkingSetMaximum = (WSLE_NUMBER)(WorkingSetMaximumInBytes >> PAGE_SHIFT);
  1593. }
  1594. if (WorkingSetMinimum > WorkingSetMaximum) {
  1595. ReturnStatus = STATUS_BAD_WORKING_SET_LIMIT;
  1596. goto Returns;
  1597. }
  1598. if (WorkingSetMaximum > MmMaximumWorkingSetSize) {
  1599. WorkingSetMaximum = MmMaximumWorkingSetSize;
  1600. ReturnStatus = STATUS_WORKING_SET_LIMIT_RANGE;
  1601. }
  1602. if (WorkingSetMinimum > MmMaximumWorkingSetSize) {
  1603. WorkingSetMinimum = MmMaximumWorkingSetSize;
  1604. ReturnStatus = STATUS_WORKING_SET_LIMIT_RANGE;
  1605. }
  1606. if (WorkingSetMinimum < MmMinimumWorkingSetSize) {
  1607. WorkingSetMinimum = (ULONG)MmMinimumWorkingSetSize;
  1608. ReturnStatus = STATUS_WORKING_SET_LIMIT_RANGE;
  1609. }
  1610. //
  1611. // Make sure that the number of locked pages will not
  1612. // make the working set not fluid.
  1613. //
  1614. if ((WsInfo->VmWorkingSetList->FirstDynamic + MM_FLUID_WORKING_SET) >=
  1615. WorkingSetMaximum) {
  1616. ReturnStatus = STATUS_BAD_WORKING_SET_LIMIT;
  1617. goto Returns;
  1618. }
  1619. WorkingSetList = WsInfo->VmWorkingSetList;
  1620. Wsle = WorkingSetList->Wsle;
  1621. //
  1622. // Check to make sure ample resident physical pages exist for
  1623. // this operation.
  1624. //
  1625. LOCK_PFN (OldIrql);
  1626. i = (SPFN_NUMBER)WorkingSetMinimum - (SPFN_NUMBER)WsInfo->MinimumWorkingSetSize;
  1627. if (i > 0) {
  1628. //
  1629. // New minimum working set is greater than the old one.
  1630. // Ensure that increasing is okay, and that we don't allow
  1631. // this process' working set minimum to increase to a point
  1632. // where subsequent nonpaged pool allocations could cause us
  1633. // to run out of pages. Additionally, leave 100 extra pages
  1634. // around so the user can later bring up tlist and kill
  1635. // processes if necessary.
  1636. //
  1637. if (IncreaseOkay == FALSE) {
  1638. UNLOCK_PFN (OldIrql);
  1639. ReturnStatus = STATUS_PRIVILEGE_NOT_HELD;
  1640. goto Returns;
  1641. }
  1642. if (MmAvailablePages < (20 + (i / (PAGE_SIZE / sizeof (MMWSLE))))) {
  1643. UNLOCK_PFN (OldIrql);
  1644. ReturnStatus = STATUS_INSUFFICIENT_RESOURCES;
  1645. goto Returns;
  1646. }
  1647. if (MI_NONPAGABLE_MEMORY_AVAILABLE() - 100 < i) {
  1648. UNLOCK_PFN (OldIrql);
  1649. ReturnStatus = STATUS_INSUFFICIENT_RESOURCES;
  1650. goto Returns;
  1651. }
  1652. }
  1653. //
  1654. // Adjust the number of resident pages up or down dependent on
  1655. // the size of the new minimum working set size versus the previous
  1656. // minimum size.
  1657. //
  1658. MmResidentAvailablePages -= i;
  1659. MM_BUMP_COUNTER(27, i);
  1660. UNLOCK_PFN (OldIrql);
  1661. if (WsInfo->Flags.AllowWorkingSetAdjustment == FALSE) {
  1662. MmAllowWorkingSetExpansion ();
  1663. }
  1664. if (WorkingSetMaximum > WorkingSetList->LastInitializedWsle) {
  1665. do {
  1666. //
  1667. // The maximum size of the working set is being increased, check
  1668. // to ensure the proper number of pages are mapped to cover
  1669. // the complete working set list.
  1670. //
  1671. if (!MiAddWorkingSetPage (WsInfo)) {
  1672. //
  1673. // Back out the increase to prevent the process from running
  1674. // into WSLE replacement crashes later if the process locks
  1675. // all the new pages into the working set and then cannot
  1676. // replace to make room for a new page and would instead have
  1677. // to add a working set page. Lack of commit at that point
  1678. // may prevent adding a working set page so deal with this
  1679. // now by backing out.
  1680. //
  1681. LOCK_PFN (OldIrql);
  1682. MmResidentAvailablePages += i;
  1683. MM_BUMP_COUNTER(27, 0-i);
  1684. UNLOCK_PFN (OldIrql);
  1685. ReturnStatus = STATUS_INSUFFICIENT_RESOURCES;
  1686. goto Returns;
  1687. }
  1688. } while (WorkingSetMaximum > WorkingSetList->LastInitializedWsle);
  1689. }
  1690. else {
  1691. //
  1692. // The new working set maximum is less than the current working set
  1693. // maximum.
  1694. //
  1695. if (WsInfo->WorkingSetSize > WorkingSetMaximum) {
  1696. //
  1697. // Remove some pages from the working set.
  1698. //
  1699. // Make sure that the number of locked pages will not
  1700. // make the working set not fluid.
  1701. //
  1702. if ((WorkingSetList->FirstDynamic + MM_FLUID_WORKING_SET) >=
  1703. WorkingSetMaximum) {
  1704. ReturnStatus = STATUS_BAD_WORKING_SET_LIMIT;
  1705. LOCK_PFN (OldIrql);
  1706. MmResidentAvailablePages += i;
  1707. MM_BUMP_COUNTER(54, i);
  1708. UNLOCK_PFN (OldIrql);
  1709. goto Returns;
  1710. }
  1711. //
  1712. // Attempt to remove the pages from the Maximum downward.
  1713. //
  1714. LastFreed = WorkingSetList->LastEntry;
  1715. if (WorkingSetList->LastEntry > WorkingSetMaximum) {
  1716. while (LastFreed >= WorkingSetMaximum) {
  1717. PointerPte = MiGetPteAddress(
  1718. Wsle[LastFreed].u1.VirtualAddress);
  1719. PERFINFO_GET_PAGE_INFO(PointerPte);
  1720. if ((Wsle[LastFreed].u1.e1.Valid != 0) &&
  1721. (!MiFreeWsle (LastFreed,
  1722. WsInfo,
  1723. PointerPte))) {
  1724. //
  1725. // This LastFreed could not be removed.
  1726. //
  1727. break;
  1728. }
  1729. PERFINFO_LOG_WS_REMOVAL(PERFINFO_LOG_TYPE_OUTWS_ADJUSTWS, WsInfo);
  1730. LastFreed -= 1;
  1731. }
  1732. WorkingSetList->LastEntry = LastFreed;
  1733. }
  1734. //
  1735. // Remove pages.
  1736. //
  1737. Entry = WorkingSetList->FirstDynamic;
  1738. while (WsInfo->WorkingSetSize > WorkingSetMaximum) {
  1739. if (Wsle[Entry].u1.e1.Valid != 0) {
  1740. PointerPte = MiGetPteAddress (
  1741. Wsle[Entry].u1.VirtualAddress);
  1742. PERFINFO_GET_PAGE_INFO(PointerPte);
  1743. if (MiFreeWsle(Entry, WsInfo, PointerPte)) {
  1744. PERFINFO_LOG_WS_REMOVAL(PERFINFO_LOG_TYPE_OUTWS_ADJUSTWS,
  1745. WsInfo);
  1746. }
  1747. }
  1748. Entry += 1;
  1749. if (Entry > LastFreed) {
  1750. FreeTryCount += 1;
  1751. if (FreeTryCount > MM_RETRY_COUNT) {
  1752. //
  1753. // Page table pages are not becoming free, give up
  1754. // and return an error.
  1755. //
  1756. ReturnStatus = STATUS_BAD_WORKING_SET_LIMIT;
  1757. break;
  1758. }
  1759. Entry = WorkingSetList->FirstDynamic;
  1760. }
  1761. }
  1762. }
  1763. }
  1764. //
  1765. // Adjust the number of pages above the working set minimum.
  1766. //
  1767. PagesAbove = (LONG)WsInfo->WorkingSetSize -
  1768. (LONG)WsInfo->MinimumWorkingSetSize;
  1769. NewPagesAbove = (LONG)WsInfo->WorkingSetSize -
  1770. (LONG)WorkingSetMinimum;
  1771. LOCK_PFN (OldIrql);
  1772. if (PagesAbove > 0) {
  1773. MmPagesAboveWsMinimum -= (ULONG)PagesAbove;
  1774. }
  1775. if (NewPagesAbove > 0) {
  1776. MmPagesAboveWsMinimum += (ULONG)NewPagesAbove;
  1777. }
  1778. if (FreeTryCount <= MM_RETRY_COUNT) {
  1779. UNLOCK_PFN (OldIrql);
  1780. WsInfo->MaximumWorkingSetSize = WorkingSetMaximum;
  1781. WsInfo->MinimumWorkingSetSize = WorkingSetMinimum;
  1782. }
  1783. else {
  1784. MmResidentAvailablePages += i;
  1785. MM_BUMP_COUNTER(55, i);
  1786. UNLOCK_PFN (OldIrql);
  1787. }
  1788. ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
  1789. (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
  1790. if ((WorkingSetList->HashTable == NULL) &&
  1791. (WsInfo->MaximumWorkingSetSize > ((1536*1024) >> PAGE_SHIFT))) {
  1792. //
  1793. // The working set list consists of more than a single page.
  1794. //
  1795. MiGrowWsleHash (WsInfo);
  1796. }
  1797. Returns:
  1798. if (SystemCache) {
  1799. UNLOCK_SYSTEM_WS (OldIrql2);
  1800. }
  1801. else {
  1802. UNLOCK_WS (CurrentProcess);
  1803. }
  1804. MmUnlockPagableImageSection (ExPageLockHandle);
  1805. return ReturnStatus;
  1806. }
  1807. #define MI_ALLOCATED_PAGE_TABLE 0x1
  1808. #define MI_ALLOCATED_PAGE_DIRECTORY 0x2
  1809. ULONG
  1810. MiAddWorkingSetPage (
  1811. IN PMMSUPPORT WsInfo
  1812. )
  1813. /*++
  1814. Routine Description:
  1815. This function grows the working set list above working set
  1816. maximum during working set adjustment. At most one page
  1817. can be added at a time.
  1818. Arguments:
  1819. None.
  1820. Return Value:
  1821. Returns FALSE if no working set page could be added.
  1822. Environment:
  1823. Kernel mode, APCs disabled, working set mutex held.
  1824. --*/
  1825. {
  1826. WSLE_NUMBER SwapEntry;
  1827. WSLE_NUMBER CurrentEntry;
  1828. PMMWSLE WslEntry;
  1829. WSLE_NUMBER i;
  1830. PMMPTE PointerPte;
  1831. PMMPTE Va;
  1832. MMPTE TempPte;
  1833. WSLE_NUMBER NumberOfEntriesMapped;
  1834. PFN_NUMBER WorkingSetPage;
  1835. WSLE_NUMBER WorkingSetIndex;
  1836. PMMWSL WorkingSetList;
  1837. PMMWSLE Wsle;
  1838. PMMPFN Pfn1;
  1839. KIRQL OldIrql;
  1840. ULONG PageTablePageAllocated;
  1841. LOGICAL PfnHeld;
  1842. ULONG NumberOfPages;
  1843. #if (_MI_PAGING_LEVELS >= 3)
  1844. PVOID VirtualAddress;
  1845. PMMPTE PointerPde;
  1846. #endif
  1847. #if (_MI_PAGING_LEVELS >= 4)
  1848. PMMPTE PointerPpe;
  1849. #endif
  1850. //
  1851. // Initializing OldIrql is not needed for correctness, but
  1852. // without it the compiler cannot compile this code W4 to check
  1853. // for use of uninitialized variables.
  1854. //
  1855. OldIrql = PASSIVE_LEVEL;
  1856. WorkingSetList = WsInfo->VmWorkingSetList;
  1857. Wsle = WorkingSetList->Wsle;
  1858. #if DBG
  1859. if (WsInfo == &MmSystemCacheWs) {
  1860. MM_SYSTEM_WS_LOCK_ASSERT();
  1861. }
  1862. #endif
  1863. //
  1864. // The maximum size of the working set is being increased, check
  1865. // to ensure the proper number of pages are mapped to cover
  1866. // the complete working set list.
  1867. //
  1868. PointerPte = MiGetPteAddress (&Wsle[WorkingSetList->LastInitializedWsle]);
  1869. ASSERT (PointerPte->u.Hard.Valid == 1);
  1870. PointerPte += 1;
  1871. Va = (PMMPTE)MiGetVirtualAddressMappedByPte (PointerPte);
  1872. if ((PVOID)Va >= WorkingSetList->HashTableStart) {
  1873. //
  1874. // Adding this entry would overrun the hash table. The caller
  1875. // must replace instead.
  1876. //
  1877. return FALSE;
  1878. }
  1879. //
  1880. // Ensure enough commitment is available prior to acquiring pages.
  1881. // Excess is released after the pages are acquired.
  1882. //
  1883. if (MiChargeCommitmentCantExpand (_MI_PAGING_LEVELS - 1, FALSE) == FALSE) {
  1884. return FALSE;
  1885. }
  1886. MM_TRACK_COMMIT (MM_DBG_COMMIT_SESSION_ADDITIONAL_WS_PAGES, _MI_PAGING_LEVELS - 1);
  1887. PageTablePageAllocated = 0;
  1888. PfnHeld = FALSE;
  1889. NumberOfPages = 0;
  1890. //
  1891. // The PPE is guaranteed to always be resident for architectures using
  1892. // 3 level lookup. This is because the hash table PPE immediately
  1893. // follows the working set PPE.
  1894. //
  1895. // For x86 PAE the same paradigm holds in guaranteeing that the PDE is
  1896. // always resident.
  1897. //
  1898. // x86 non-PAE uses the same PDE and hence it also guarantees PDE residency.
  1899. //
  1900. // Architectures employing 4 level lookup use a single PXE for this, but
  1901. // each PPE must be checked.
  1902. //
  1903. // All architectures must check for page table page residency.
  1904. //
  1905. #if (_MI_PAGING_LEVELS >= 4)
  1906. //
  1907. // Allocate a PPE if one is needed.
  1908. //
  1909. PointerPpe = MiGetPdeAddress (PointerPte);
  1910. if (PointerPpe->u.Hard.Valid == 0) {
  1911. ASSERT (WsInfo->Flags.SessionSpace == 0);
  1912. //
  1913. // Map in a new page directory for the working set expansion.
  1914. // Continue holding the PFN lock until the entire hierarchy is
  1915. // allocated. This eliminates error recovery which would be needed
  1916. // if the lock was released and then when reacquired it is discovered
  1917. // that one of the pages cannot be allocated.
  1918. //
  1919. PfnHeld = TRUE;
  1920. LOCK_PFN (OldIrql);
  1921. if (MmAvailablePages < 21) {
  1922. //
  1923. // No pages are available, set the quota to the last
  1924. // initialized WSLE and return.
  1925. //
  1926. UNLOCK_PFN (OldIrql);
  1927. MiReturnCommitment (_MI_PAGING_LEVELS - 1 - NumberOfPages);
  1928. MM_TRACK_COMMIT_REDUCTION (MM_DBG_COMMIT_SESSION_ADDITIONAL_WS_PAGES,
  1929. _MI_PAGING_LEVELS - 1 - NumberOfPages);
  1930. return FALSE;
  1931. }
  1932. PageTablePageAllocated |= MI_ALLOCATED_PAGE_DIRECTORY;
  1933. WorkingSetPage = MiRemoveZeroPage (MI_GET_PAGE_COLOR_FROM_PTE (PointerPpe));
  1934. PointerPpe->u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  1935. MiInitializePfn (WorkingSetPage, PointerPpe, 1);
  1936. MI_MAKE_VALID_PTE (TempPte,
  1937. WorkingSetPage,
  1938. MM_READWRITE,
  1939. PointerPpe);
  1940. MI_SET_PTE_DIRTY (TempPte);
  1941. MI_WRITE_VALID_PTE (PointerPpe, TempPte);
  1942. NumberOfPages += 1;
  1943. }
  1944. #endif
  1945. #if (_MI_PAGING_LEVELS >= 3)
  1946. //
  1947. // Map in a new page table (if needed) for the working set expansion.
  1948. //
  1949. PointerPde = MiGetPteAddress (PointerPte);
  1950. if (PointerPde->u.Hard.Valid == 0) {
  1951. PageTablePageAllocated |= MI_ALLOCATED_PAGE_TABLE;
  1952. if (PfnHeld == FALSE) {
  1953. PfnHeld = TRUE;
  1954. LOCK_PFN (OldIrql);
  1955. if (MmAvailablePages < 21) {
  1956. //
  1957. // No pages are available, set the quota to the last
  1958. // initialized WSLE and return.
  1959. //
  1960. UNLOCK_PFN (OldIrql);
  1961. MiReturnCommitment (_MI_PAGING_LEVELS - 1 - NumberOfPages);
  1962. MM_TRACK_COMMIT_REDUCTION (MM_DBG_COMMIT_SESSION_ADDITIONAL_WS_PAGES,
  1963. _MI_PAGING_LEVELS - 1 - NumberOfPages);
  1964. return FALSE;
  1965. }
  1966. }
  1967. else {
  1968. ASSERT (MmAvailablePages >= 20);
  1969. }
  1970. WorkingSetPage = MiRemoveZeroPage (MI_GET_PAGE_COLOR_FROM_PTE (PointerPde));
  1971. PointerPde->u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  1972. MiInitializePfn (WorkingSetPage, PointerPde, 1);
  1973. MI_MAKE_VALID_PTE (TempPte,
  1974. WorkingSetPage,
  1975. MM_READWRITE,
  1976. PointerPde);
  1977. MI_SET_PTE_DIRTY (TempPte);
  1978. MI_WRITE_VALID_PTE (PointerPde, TempPte);
  1979. NumberOfPages += 1;
  1980. }
  1981. #endif
  1982. ASSERT (PointerPte->u.Hard.Valid == 0);
  1983. //
  1984. // Finally allocate and map the actual working set page now. The PFN lock
  1985. // is only held if another page in the hierarchy needed to be allocated.
  1986. //
  1987. // Further down in this routine (once an actual working set page
  1988. // has been allocated) the quota will be increased by 1 to reflect
  1989. // the working set size entry for the new page directory page.
  1990. // The page directory page will be put in a working set entry which will
  1991. // be locked into the working set.
  1992. //
  1993. if (PfnHeld == FALSE) {
  1994. LOCK_PFN (OldIrql);
  1995. if (MmAvailablePages < 21) {
  1996. //
  1997. // No pages are available, set the quota to the last
  1998. // initialized WSLE and return.
  1999. //
  2000. UNLOCK_PFN (OldIrql);
  2001. MiReturnCommitment (_MI_PAGING_LEVELS - 1 - NumberOfPages);
  2002. MM_TRACK_COMMIT_REDUCTION (MM_DBG_COMMIT_SESSION_ADDITIONAL_WS_PAGES,
  2003. _MI_PAGING_LEVELS - 1 - NumberOfPages);
  2004. return FALSE;
  2005. }
  2006. }
  2007. else {
  2008. ASSERT (MmAvailablePages >= 19);
  2009. }
  2010. WorkingSetPage = MiRemoveZeroPage (MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
  2011. PointerPte->u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  2012. MiInitializePfn (WorkingSetPage, PointerPte, 1);
  2013. NumberOfPages += 1;
  2014. //
  2015. // Apply any resident available charges now before releasing the PFN lock.
  2016. //
  2017. if (WsInfo->Flags.SessionSpace == 1) {
  2018. MM_BUMP_COUNTER (48, 1);
  2019. MmResidentAvailablePages -= NumberOfPages;
  2020. }
  2021. UNLOCK_PFN (OldIrql);
  2022. if (_MI_PAGING_LEVELS - 1 - NumberOfPages != 0) {
  2023. MiReturnCommitment (_MI_PAGING_LEVELS - 1 - NumberOfPages);
  2024. MM_TRACK_COMMIT_REDUCTION (MM_DBG_COMMIT_SESSION_ADDITIONAL_WS_PAGES,
  2025. _MI_PAGING_LEVELS - 1 - NumberOfPages);
  2026. }
  2027. MI_MAKE_VALID_PTE (TempPte, WorkingSetPage, MM_READWRITE, PointerPte);
  2028. MI_SET_PTE_DIRTY (TempPte);
  2029. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  2030. NumberOfEntriesMapped = (WSLE_NUMBER)(((PMMWSLE)((PCHAR)Va + PAGE_SIZE)) - Wsle);
  2031. if (WsInfo->Flags.SessionSpace == 1) {
  2032. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_NP_WS_GROW, NumberOfPages);
  2033. MmSessionSpace->NonPagablePages += NumberOfPages;
  2034. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_WS_PAGE_ALLOC_GROWTH, NumberOfPages);
  2035. InterlockedExchangeAddSizeT (&MmSessionSpace->CommittedPages,
  2036. NumberOfPages);
  2037. }
  2038. CurrentEntry = WorkingSetList->LastInitializedWsle + 1;
  2039. ASSERT (NumberOfEntriesMapped > CurrentEntry);
  2040. WslEntry = &Wsle[CurrentEntry - 1];
  2041. for (i = CurrentEntry; i < NumberOfEntriesMapped; i += 1) {
  2042. //
  2043. // Build the free list, note that the first working
  2044. // set entries (CurrentEntry) are not on the free list.
  2045. // These entries are reserved for the pages which
  2046. // map the working set and the page which contains the PDE.
  2047. //
  2048. WslEntry += 1;
  2049. WslEntry->u1.Long = (i + 1) << MM_FREE_WSLE_SHIFT;
  2050. }
  2051. WslEntry->u1.Long = WorkingSetList->FirstFree << MM_FREE_WSLE_SHIFT;
  2052. ASSERT (CurrentEntry >= WorkingSetList->FirstDynamic);
  2053. WorkingSetList->FirstFree = CurrentEntry;
  2054. WorkingSetList->LastInitializedWsle = (NumberOfEntriesMapped - 1);
  2055. ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
  2056. (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
  2057. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  2058. Pfn1->u1.Event = (PVOID)PsGetCurrentThread();
  2059. //
  2060. // Get a working set entry.
  2061. //
  2062. WsInfo->WorkingSetSize += 1;
  2063. ASSERT (WorkingSetList->FirstFree != WSLE_NULL_INDEX);
  2064. ASSERT (WorkingSetList->FirstFree >= WorkingSetList->FirstDynamic);
  2065. WorkingSetIndex = WorkingSetList->FirstFree;
  2066. WorkingSetList->FirstFree = (WSLE_NUMBER)(Wsle[WorkingSetIndex].u1.Long >> MM_FREE_WSLE_SHIFT);
  2067. ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
  2068. (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
  2069. if (WsInfo->WorkingSetSize > WsInfo->MinimumWorkingSetSize) {
  2070. MmPagesAboveWsMinimum += 1;
  2071. }
  2072. if (WorkingSetIndex > WorkingSetList->LastEntry) {
  2073. WorkingSetList->LastEntry = WorkingSetIndex;
  2074. }
  2075. MiUpdateWsle (&WorkingSetIndex, Va, WorkingSetList, Pfn1);
  2076. MI_SET_PTE_IN_WORKING_SET (PointerPte, WorkingSetIndex);
  2077. //
  2078. // Lock any created page table pages into the working set.
  2079. //
  2080. if (WorkingSetIndex >= WorkingSetList->FirstDynamic) {
  2081. SwapEntry = WorkingSetList->FirstDynamic;
  2082. if (WorkingSetIndex != WorkingSetList->FirstDynamic) {
  2083. //
  2084. // Swap this entry with the one at first dynamic.
  2085. //
  2086. MiSwapWslEntries (WorkingSetIndex, SwapEntry, WsInfo);
  2087. }
  2088. WorkingSetList->FirstDynamic += 1;
  2089. Wsle[SwapEntry].u1.e1.LockedInWs = 1;
  2090. ASSERT (Wsle[SwapEntry].u1.e1.Valid == 1);
  2091. }
  2092. #if (_MI_PAGING_LEVELS >= 3)
  2093. while (PageTablePageAllocated != 0) {
  2094. if (PageTablePageAllocated & MI_ALLOCATED_PAGE_TABLE) {
  2095. PageTablePageAllocated &= ~MI_ALLOCATED_PAGE_TABLE;
  2096. Pfn1 = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber);
  2097. VirtualAddress = PointerPte;
  2098. }
  2099. #if (_MI_PAGING_LEVELS >= 4)
  2100. else if (PageTablePageAllocated & MI_ALLOCATED_PAGE_DIRECTORY) {
  2101. PageTablePageAllocated &= ~MI_ALLOCATED_PAGE_DIRECTORY;
  2102. Pfn1 = MI_PFN_ELEMENT (PointerPpe->u.Hard.PageFrameNumber);
  2103. VirtualAddress = PointerPde;
  2104. }
  2105. #endif
  2106. else {
  2107. ASSERT (FALSE);
  2108. //
  2109. // Initializing VirtualAddress is not needed for correctness, but
  2110. // without it the compiler cannot compile this code W4 to check
  2111. // for use of uninitialized variables.
  2112. //
  2113. VirtualAddress = NULL;
  2114. }
  2115. Pfn1->u1.Event = (PVOID)PsGetCurrentThread();
  2116. //
  2117. // Get a working set entry.
  2118. //
  2119. WsInfo->WorkingSetSize += 1;
  2120. ASSERT (WorkingSetList->FirstFree != WSLE_NULL_INDEX);
  2121. ASSERT (WorkingSetList->FirstFree >= WorkingSetList->FirstDynamic);
  2122. WorkingSetIndex = WorkingSetList->FirstFree;
  2123. WorkingSetList->FirstFree = (WSLE_NUMBER)(Wsle[WorkingSetIndex].u1.Long >> MM_FREE_WSLE_SHIFT);
  2124. ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
  2125. (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
  2126. if (WsInfo->WorkingSetSize > WsInfo->MinimumWorkingSetSize) {
  2127. MmPagesAboveWsMinimum += 1;
  2128. }
  2129. if (WorkingSetIndex > WorkingSetList->LastEntry) {
  2130. WorkingSetList->LastEntry = WorkingSetIndex;
  2131. }
  2132. MiUpdateWsle (&WorkingSetIndex, VirtualAddress, WorkingSetList, Pfn1);
  2133. MI_SET_PTE_IN_WORKING_SET (MiGetPteAddress (VirtualAddress),
  2134. WorkingSetIndex);
  2135. //
  2136. // Lock the created page table page into the working set.
  2137. //
  2138. if (WorkingSetIndex >= WorkingSetList->FirstDynamic) {
  2139. SwapEntry = WorkingSetList->FirstDynamic;
  2140. if (WorkingSetIndex != WorkingSetList->FirstDynamic) {
  2141. //
  2142. // Swap this entry with the one at first dynamic.
  2143. //
  2144. MiSwapWslEntries (WorkingSetIndex, SwapEntry, WsInfo);
  2145. }
  2146. WorkingSetList->FirstDynamic += 1;
  2147. Wsle[SwapEntry].u1.e1.LockedInWs = 1;
  2148. ASSERT (Wsle[SwapEntry].u1.e1.Valid == 1);
  2149. }
  2150. }
  2151. #endif
  2152. ASSERT ((MiGetPteAddress(&Wsle[WorkingSetList->LastInitializedWsle]))->u.Hard.Valid == 1);
  2153. if ((WorkingSetList->HashTable == NULL) &&
  2154. (MmAvailablePages > 20)) {
  2155. //
  2156. // Add a hash table to support shared pages in the working set to
  2157. // eliminate costly lookups.
  2158. //
  2159. WsInfo->Flags.AllowWorkingSetAdjustment = MM_GROW_WSLE_HASH;
  2160. }
  2161. return TRUE;
  2162. }
  2163. LOGICAL
  2164. MiAddWsleHash (
  2165. IN PMMSUPPORT WsInfo,
  2166. IN PMMPTE PointerPte
  2167. )
  2168. /*++
  2169. Routine Description:
  2170. This function adds a page directory, page table or actual mapping page
  2171. for hash table creation (or expansion) for the current process.
  2172. Arguments:
  2173. WsInfo - Supplies a pointer to the working set info block for the
  2174. process (or system cache).
  2175. PointerPte - Supplies a pointer to the PTE to be filled.
  2176. Return Value:
  2177. None.
  2178. Environment:
  2179. Kernel mode, APCs disabled, working set lock held.
  2180. --*/
  2181. {
  2182. KIRQL OldIrql;
  2183. PMMPFN Pfn1;
  2184. WSLE_NUMBER SwapEntry;
  2185. MMPTE TempPte;
  2186. PVOID Va;
  2187. PMMWSLE Wsle;
  2188. PFN_NUMBER WorkingSetPage;
  2189. WSLE_NUMBER WorkingSetIndex;
  2190. PMMWSL WorkingSetList;
  2191. if (MiChargeCommitmentCantExpand (1, FALSE) == FALSE) {
  2192. return FALSE;
  2193. }
  2194. WorkingSetList = WsInfo->VmWorkingSetList;
  2195. Wsle = WorkingSetList->Wsle;
  2196. ASSERT (PointerPte->u.Hard.Valid == 0);
  2197. LOCK_PFN (OldIrql);
  2198. if (MmAvailablePages < 10) {
  2199. UNLOCK_PFN (OldIrql);
  2200. MiReturnCommitment (1);
  2201. return FALSE;
  2202. }
  2203. MM_TRACK_COMMIT (MM_DBG_COMMIT_SESSION_ADDITIONAL_WS_HASHPAGES, 1);
  2204. WorkingSetPage = MiRemoveZeroPage (
  2205. MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
  2206. PointerPte->u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  2207. MiInitializePfn (WorkingSetPage, PointerPte, 1);
  2208. UNLOCK_PFN (OldIrql);
  2209. MI_MAKE_VALID_PTE (TempPte,
  2210. WorkingSetPage,
  2211. MM_READWRITE,
  2212. PointerPte);
  2213. MI_SET_PTE_DIRTY (TempPte);
  2214. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  2215. //
  2216. // As we are growing the working set, we know that quota
  2217. // is above the current working set size. Just take the
  2218. // next free WSLE from the list and use it.
  2219. //
  2220. Pfn1 = MI_PFN_ELEMENT (WorkingSetPage);
  2221. Pfn1->u1.Event = (PVOID)PsGetCurrentThread();
  2222. Va = (PMMPTE)MiGetVirtualAddressMappedByPte (PointerPte);
  2223. WorkingSetIndex = MiLocateAndReserveWsle (WsInfo);
  2224. MiUpdateWsle (&WorkingSetIndex, Va, WorkingSetList, Pfn1);
  2225. MI_SET_PTE_IN_WORKING_SET (PointerPte, WorkingSetIndex);
  2226. //
  2227. // Lock any created page table pages into the working set.
  2228. //
  2229. if (WorkingSetIndex >= WorkingSetList->FirstDynamic) {
  2230. SwapEntry = WorkingSetList->FirstDynamic;
  2231. if (WorkingSetIndex != WorkingSetList->FirstDynamic) {
  2232. //
  2233. // Swap this entry with the one at first dynamic.
  2234. //
  2235. MiSwapWslEntries (WorkingSetIndex, SwapEntry, WsInfo);
  2236. }
  2237. WorkingSetList->FirstDynamic += 1;
  2238. Wsle[SwapEntry].u1.e1.LockedInWs = 1;
  2239. ASSERT (Wsle[SwapEntry].u1.e1.Valid == 1);
  2240. }
  2241. if (WsInfo->Flags.SessionSpace == 1) {
  2242. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_NP_HASH_GROW, 1);
  2243. MmSessionSpace->NonPagablePages += 1;
  2244. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_WS_HASHPAGE_ALLOC, 1);
  2245. InterlockedExchangeAddSizeT (&MmSessionSpace->CommittedPages, 1);
  2246. }
  2247. return TRUE;
  2248. }
  2249. VOID
  2250. MiGrowWsleHash (
  2251. IN PMMSUPPORT WsInfo
  2252. )
  2253. /*++
  2254. Routine Description:
  2255. This function grows (or adds) a hash table to the working set list
  2256. to allow direct indexing for WSLEs than cannot be located via the
  2257. PFN database WSINDEX field.
  2258. The hash table is located AFTER the WSLE array and the pages are
  2259. locked into the working set just like standard WSLEs.
  2260. Note that the hash table is expanded by setting the hash table
  2261. field in the working set to NULL, but leaving the size as non-zero.
  2262. This indicates that the hash should be expanded and the initial
  2263. portion of the table zeroed.
  2264. Arguments:
  2265. WsInfo - Supplies a pointer to the working set info block for the
  2266. process (or system cache).
  2267. Return Value:
  2268. None.
  2269. Environment:
  2270. Kernel mode, APCs disabled, working set lock held.
  2271. --*/
  2272. {
  2273. ULONG Tries;
  2274. LONG Size;
  2275. PMMWSLE Wsle;
  2276. PMMPTE StartPte;
  2277. PMMPTE EndPte;
  2278. PMMPTE PointerPte;
  2279. ULONG First;
  2280. WSLE_NUMBER Hash;
  2281. ULONG NewSize;
  2282. PMMWSLE_HASH Table;
  2283. PMMWSLE_HASH OriginalTable;
  2284. ULONG j;
  2285. PMMWSL WorkingSetList;
  2286. WSLE_NUMBER Count;
  2287. PVOID EntryHashTableEnd;
  2288. #if (_MI_PAGING_LEVELS >= 3)
  2289. PFN_NUMBER CommittedPages;
  2290. KIRQL OldIrql;
  2291. PVOID TempVa;
  2292. PEPROCESS CurrentProcess;
  2293. LOGICAL LoopStart;
  2294. PMMPTE PointerPde;
  2295. PMMPTE PointerPpe;
  2296. PMMPTE AllocatedPde;
  2297. PMMPTE AllocatedPpe;
  2298. PMMPTE PointerPxe;
  2299. PMMPTE AllocatedPxe;
  2300. #endif
  2301. WorkingSetList = WsInfo->VmWorkingSetList;
  2302. Wsle = WorkingSetList->Wsle;
  2303. Table = WorkingSetList->HashTable;
  2304. OriginalTable = WorkingSetList->HashTable;
  2305. First = WorkingSetList->HashTableSize;
  2306. if (Table == NULL) {
  2307. NewSize = PtrToUlong(PAGE_ALIGN (((1 + WorkingSetList->NonDirectCount) *
  2308. 2 * sizeof(MMWSLE_HASH)) + PAGE_SIZE - 1));
  2309. //
  2310. // Note that the Table may be NULL and the HashTableSize/PTEs nonzero
  2311. // in the case where the hash has been contracted.
  2312. //
  2313. j = First * sizeof(MMWSLE_HASH);
  2314. //
  2315. // Don't try for additional hash pages if we already have
  2316. // the right amount (or too many).
  2317. //
  2318. if ((j + PAGE_SIZE > NewSize) && (j != 0)) {
  2319. return;
  2320. }
  2321. Table = (PMMWSLE_HASH)(WorkingSetList->HashTableStart);
  2322. EntryHashTableEnd = &Table[WorkingSetList->HashTableSize];
  2323. WorkingSetList->HashTableSize = 0;
  2324. }
  2325. else {
  2326. //
  2327. // Attempt to add 4 pages, make sure the working set list has
  2328. // 4 free entries.
  2329. //
  2330. if ((WorkingSetList->LastInitializedWsle + 5) > WsInfo->WorkingSetSize) {
  2331. NewSize = PAGE_SIZE * 4;
  2332. }
  2333. else {
  2334. NewSize = PAGE_SIZE;
  2335. }
  2336. EntryHashTableEnd = &Table[WorkingSetList->HashTableSize];
  2337. }
  2338. if ((PCHAR)EntryHashTableEnd + NewSize > (PCHAR)WorkingSetList->HighestPermittedHashAddress) {
  2339. NewSize =
  2340. (ULONG)((PCHAR)(WorkingSetList->HighestPermittedHashAddress) -
  2341. ((PCHAR)EntryHashTableEnd));
  2342. if (NewSize == 0) {
  2343. if (OriginalTable == NULL) {
  2344. WorkingSetList->HashTableSize = First;
  2345. }
  2346. return;
  2347. }
  2348. }
  2349. #if (_MI_PAGING_LEVELS >= 4)
  2350. ASSERT64 ((MiGetPxeAddress(EntryHashTableEnd)->u.Hard.Valid == 0) ||
  2351. (MiGetPpeAddress(EntryHashTableEnd)->u.Hard.Valid == 0) ||
  2352. (MiGetPdeAddress(EntryHashTableEnd)->u.Hard.Valid == 0) ||
  2353. (MiGetPteAddress(EntryHashTableEnd)->u.Hard.Valid == 0));
  2354. #else
  2355. ASSERT64 ((MiGetPpeAddress(EntryHashTableEnd)->u.Hard.Valid == 0) ||
  2356. (MiGetPdeAddress(EntryHashTableEnd)->u.Hard.Valid == 0) ||
  2357. (MiGetPteAddress(EntryHashTableEnd)->u.Hard.Valid == 0));
  2358. #endif
  2359. //
  2360. // Note PAE virtual address space is packed even more densely than
  2361. // regular x86. The working set list hash table can grow until it
  2362. // is directly beneath the system cache data structures. Hence the
  2363. // assert below factors that in by checking HighestPermittedHashAddress
  2364. // first.
  2365. //
  2366. ASSERT32 ((EntryHashTableEnd == WorkingSetList->HighestPermittedHashAddress) || (MiGetPteAddress(EntryHashTableEnd)->u.Hard.Valid == 0));
  2367. Size = NewSize;
  2368. PointerPte = MiGetPteAddress (EntryHashTableEnd);
  2369. StartPte = PointerPte;
  2370. EndPte = PointerPte + (NewSize >> PAGE_SHIFT);
  2371. #if (_MI_PAGING_LEVELS >= 3)
  2372. LoopStart = TRUE;
  2373. AllocatedPde = NULL;
  2374. AllocatedPpe = NULL;
  2375. AllocatedPxe = NULL;
  2376. #endif
  2377. do {
  2378. #if (_MI_PAGING_LEVELS >= 3)
  2379. if (LoopStart == TRUE || MiIsPteOnPdeBoundary(PointerPte)) {
  2380. PointerPxe = MiGetPpeAddress(PointerPte);
  2381. PointerPpe = MiGetPdeAddress(PointerPte);
  2382. PointerPde = MiGetPteAddress(PointerPte);
  2383. #if (_MI_PAGING_LEVELS >= 4)
  2384. if (PointerPxe->u.Hard.Valid == 0) {
  2385. if (MiAddWsleHash (WsInfo, PointerPxe) == FALSE) {
  2386. break;
  2387. }
  2388. AllocatedPxe = PointerPxe;
  2389. }
  2390. #endif
  2391. if (PointerPpe->u.Hard.Valid == 0) {
  2392. if (MiAddWsleHash (WsInfo, PointerPpe) == FALSE) {
  2393. break;
  2394. }
  2395. AllocatedPpe = PointerPpe;
  2396. }
  2397. if (PointerPde->u.Hard.Valid == 0) {
  2398. if (MiAddWsleHash (WsInfo, PointerPde) == FALSE) {
  2399. break;
  2400. }
  2401. AllocatedPde = PointerPde;
  2402. }
  2403. LoopStart = FALSE;
  2404. }
  2405. else {
  2406. AllocatedPde = NULL;
  2407. AllocatedPpe = NULL;
  2408. AllocatedPxe = NULL;
  2409. }
  2410. #endif
  2411. if (PointerPte->u.Hard.Valid == 0) {
  2412. if (MiAddWsleHash (WsInfo, PointerPte) == FALSE) {
  2413. break;
  2414. }
  2415. }
  2416. PointerPte += 1;
  2417. Size -= PAGE_SIZE;
  2418. } while (Size > 0);
  2419. //
  2420. // If MiAddWsleHash was unable to allocate memory above, then roll back
  2421. // any extra PPEs & PDEs that may have been created. Note NewSize must
  2422. // be recalculated to handle the fact that memory may have run out.
  2423. //
  2424. #if (_MI_PAGING_LEVELS < 3)
  2425. if (PointerPte == StartPte) {
  2426. if (OriginalTable == NULL) {
  2427. WorkingSetList->HashTableSize = First;
  2428. }
  2429. return;
  2430. }
  2431. #else
  2432. if (PointerPte != EndPte) {
  2433. //
  2434. // Clean up the last allocated PPE/PDE as they are not needed.
  2435. // Note that the system cache and the session space working sets
  2436. // have no current process (which MiDeletePte requires) which is
  2437. // needed for WSLE and PrivatePages adjustments.
  2438. //
  2439. if (WsInfo != &MmSystemCacheWs && WsInfo->Flags.SessionSpace == 0) {
  2440. CurrentProcess = PsGetCurrentProcess();
  2441. CommittedPages = 0;
  2442. if (AllocatedPde != NULL) {
  2443. ASSERT (AllocatedPde->u.Hard.Valid == 1);
  2444. TempVa = MiGetVirtualAddressMappedByPte(AllocatedPde);
  2445. LOCK_PFN (OldIrql);
  2446. MiDeletePte (AllocatedPde,
  2447. TempVa,
  2448. FALSE,
  2449. CurrentProcess,
  2450. NULL,
  2451. NULL);
  2452. //
  2453. // Add back in the private page MiDeletePte subtracted.
  2454. //
  2455. CurrentProcess->NumberOfPrivatePages += 1;
  2456. UNLOCK_PFN (OldIrql);
  2457. CommittedPages += 1;
  2458. }
  2459. if (AllocatedPpe != NULL) {
  2460. ASSERT (AllocatedPpe->u.Hard.Valid == 1);
  2461. TempVa = MiGetVirtualAddressMappedByPte(AllocatedPpe);
  2462. LOCK_PFN (OldIrql);
  2463. MiDeletePte (AllocatedPpe,
  2464. TempVa,
  2465. FALSE,
  2466. CurrentProcess,
  2467. NULL,
  2468. NULL);
  2469. //
  2470. // Add back in the private page MiDeletePte subtracted.
  2471. //
  2472. CurrentProcess->NumberOfPrivatePages += 1;
  2473. UNLOCK_PFN (OldIrql);
  2474. CommittedPages += 1;
  2475. }
  2476. if (AllocatedPxe != NULL) {
  2477. ASSERT (AllocatedPxe->u.Hard.Valid == 1);
  2478. TempVa = MiGetVirtualAddressMappedByPte(AllocatedPxe);
  2479. LOCK_PFN (OldIrql);
  2480. MiDeletePte (AllocatedPxe,
  2481. TempVa,
  2482. FALSE,
  2483. CurrentProcess,
  2484. NULL,
  2485. NULL);
  2486. //
  2487. // Add back in the private page MiDeletePte subtracted.
  2488. //
  2489. CurrentProcess->NumberOfPrivatePages += 1;
  2490. UNLOCK_PFN (OldIrql);
  2491. CommittedPages += 1;
  2492. }
  2493. }
  2494. if (PointerPte == StartPte) {
  2495. if (OriginalTable == NULL) {
  2496. WorkingSetList->HashTableSize = First;
  2497. }
  2498. }
  2499. return;
  2500. }
  2501. #endif
  2502. NewSize = (ULONG)((PointerPte - StartPte) << PAGE_SHIFT);
  2503. ASSERT ((MiGetVirtualAddressMappedByPte(PointerPte) == WorkingSetList->HighestPermittedHashAddress) ||
  2504. (PointerPte->u.Hard.Valid == 0));
  2505. WorkingSetList->HashTableSize = First + NewSize / sizeof (MMWSLE_HASH);
  2506. WorkingSetList->HashTable = Table;
  2507. ASSERT ((&Table[WorkingSetList->HashTableSize] == WorkingSetList->HighestPermittedHashAddress) ||
  2508. (MiGetPteAddress(&Table[WorkingSetList->HashTableSize])->u.Hard.Valid == 0));
  2509. if (First != 0) {
  2510. RtlZeroMemory (Table, First * sizeof(MMWSLE_HASH));
  2511. }
  2512. //
  2513. // Fill hash table.
  2514. //
  2515. j = 0;
  2516. Count = WorkingSetList->NonDirectCount;
  2517. Size = WorkingSetList->HashTableSize;
  2518. do {
  2519. if ((Wsle[j].u1.e1.Valid == 1) &&
  2520. (Wsle[j].u1.e1.Direct == 0)) {
  2521. //
  2522. // Hash this.
  2523. //
  2524. Count -= 1;
  2525. Hash = MI_WSLE_HASH(Wsle[j].u1.Long, WorkingSetList);
  2526. Tries = 0;
  2527. while (Table[Hash].Key != 0) {
  2528. Hash += 1;
  2529. if (Hash >= (ULONG)Size) {
  2530. if (Tries != 0) {
  2531. //
  2532. // Not enough space to hash everything but that's ok.
  2533. // Just bail out, we'll do linear walks to lookup this
  2534. // entry until the hash can be further expanded later.
  2535. //
  2536. return;
  2537. }
  2538. Tries = 1;
  2539. Hash = 0;
  2540. Size = MI_WSLE_HASH(Wsle[j].u1.Long, WorkingSetList);
  2541. }
  2542. }
  2543. Table[Hash].Key = PAGE_ALIGN (Wsle[j].u1.Long);
  2544. Table[Hash].Index = j;
  2545. #if DBG
  2546. PointerPte = MiGetPteAddress(Wsle[j].u1.VirtualAddress);
  2547. ASSERT (PointerPte->u.Hard.Valid);
  2548. #endif
  2549. }
  2550. ASSERT (j <= WorkingSetList->LastEntry);
  2551. j += 1;
  2552. } while (Count);
  2553. #if DBG
  2554. MiCheckWsleHash (WorkingSetList);
  2555. #endif
  2556. return;
  2557. }
  2558. WSLE_NUMBER
  2559. MiTrimWorkingSet (
  2560. IN WSLE_NUMBER Reduction,
  2561. IN PMMSUPPORT WsInfo,
  2562. IN ULONG TrimAge
  2563. )
  2564. /*++
  2565. Routine Description:
  2566. This function reduces the working set by the specified amount.
  2567. Arguments:
  2568. Reduction - Supplies the number of pages to remove from the working set.
  2569. WsInfo - Supplies a pointer to the working set information for the
  2570. process (or system cache) to trim.
  2571. TrimAge - Supplies the age value to use - ie: pages of this age or older
  2572. will be removed.
  2573. Return Value:
  2574. Returns the actual number of pages removed.
  2575. Environment:
  2576. Kernel mode, APCs disabled, working set lock. PFN lock NOT held.
  2577. --*/
  2578. {
  2579. WSLE_NUMBER TryToFree;
  2580. WSLE_NUMBER StartEntry;
  2581. WSLE_NUMBER LastEntry;
  2582. PMMWSL WorkingSetList;
  2583. PMMWSLE Wsle;
  2584. PMMPTE PointerPte;
  2585. WSLE_NUMBER NumberLeftToRemove;
  2586. NumberLeftToRemove = Reduction;
  2587. WorkingSetList = WsInfo->VmWorkingSetList;
  2588. Wsle = WorkingSetList->Wsle;
  2589. #if DBG
  2590. if (WsInfo == &MmSystemCacheWs) {
  2591. MM_SYSTEM_WS_LOCK_ASSERT();
  2592. }
  2593. #endif
  2594. LastEntry = WorkingSetList->LastEntry;
  2595. TryToFree = WorkingSetList->NextSlot;
  2596. if (TryToFree > LastEntry || TryToFree < WorkingSetList->FirstDynamic) {
  2597. TryToFree = WorkingSetList->FirstDynamic;
  2598. }
  2599. StartEntry = TryToFree;
  2600. while (NumberLeftToRemove != 0) {
  2601. if (Wsle[TryToFree].u1.e1.Valid == 1) {
  2602. PointerPte = MiGetPteAddress (Wsle[TryToFree].u1.VirtualAddress);
  2603. if ((TrimAge == 0) ||
  2604. ((MI_GET_ACCESSED_IN_PTE (PointerPte) == 0) &&
  2605. (MI_GET_WSLE_AGE(PointerPte, &Wsle[TryToFree]) >= TrimAge))) {
  2606. PERFINFO_GET_PAGE_INFO_WITH_DECL(PointerPte);
  2607. if (MiFreeWsle (TryToFree, WsInfo, PointerPte)) {
  2608. PERFINFO_LOG_WS_REMOVAL(PERFINFO_LOG_TYPE_OUTWS_VOLUNTRIM, WsInfo);
  2609. NumberLeftToRemove -= 1;
  2610. }
  2611. }
  2612. }
  2613. TryToFree += 1;
  2614. if (TryToFree > LastEntry) {
  2615. TryToFree = WorkingSetList->FirstDynamic;
  2616. }
  2617. if (TryToFree == StartEntry) {
  2618. break;
  2619. }
  2620. }
  2621. WorkingSetList->NextSlot = TryToFree;
  2622. //
  2623. // If this is not the system cache or a session working set, see if the
  2624. // working set list can be contracted.
  2625. //
  2626. if (WsInfo != &MmSystemCacheWs && WsInfo->Flags.SessionSpace == 0) {
  2627. //
  2628. // Make sure we are at least a page above the working set maximum.
  2629. //
  2630. if (WorkingSetList->FirstDynamic == WsInfo->WorkingSetSize) {
  2631. MiRemoveWorkingSetPages (WorkingSetList, WsInfo);
  2632. }
  2633. else {
  2634. if ((WsInfo->WorkingSetSize + 15 + (PAGE_SIZE / sizeof(MMWSLE))) <
  2635. WorkingSetList->LastEntry) {
  2636. if ((WsInfo->MaximumWorkingSetSize + 15 + (PAGE_SIZE / sizeof(MMWSLE))) <
  2637. WorkingSetList->LastEntry ) {
  2638. MiRemoveWorkingSetPages (WorkingSetList, WsInfo);
  2639. }
  2640. }
  2641. }
  2642. }
  2643. return Reduction - NumberLeftToRemove;
  2644. }
  2645. VOID
  2646. MiEliminateWorkingSetEntry (
  2647. IN WSLE_NUMBER WorkingSetIndex,
  2648. IN PMMPTE PointerPte,
  2649. IN PMMPFN Pfn,
  2650. IN PMMWSLE Wsle
  2651. )
  2652. /*++
  2653. Routine Description:
  2654. This routine removes the specified working set list entry
  2655. from the working set, flushes the TB for the page, decrements
  2656. the share count for the physical page, and, if necessary turns
  2657. the PTE into a transition PTE.
  2658. Arguments:
  2659. WorkingSetIndex - Supplies the working set index to remove.
  2660. PointerPte - Supplies a pointer to the PTE corresponding to the virtual
  2661. address in the working set.
  2662. Pfn - Supplies a pointer to the PFN element corresponding to the PTE.
  2663. Wsle - Supplies a pointer to the first working set list entry for this
  2664. working set.
  2665. Return Value:
  2666. None.
  2667. Environment:
  2668. Kernel mode, Working set lock and PFN lock held, APCs disabled.
  2669. --*/
  2670. {
  2671. PMMPTE ContainingPageTablePage;
  2672. MMPTE TempPte;
  2673. MMPTE PreviousPte;
  2674. PFN_NUMBER PageFrameIndex;
  2675. PFN_NUMBER PageTableFrameIndex;
  2676. PEPROCESS Process;
  2677. PVOID VirtualAddress;
  2678. PMMPFN Pfn2;
  2679. //
  2680. // Remove the page from the working set.
  2681. //
  2682. MM_PFN_LOCK_ASSERT ();
  2683. TempPte = *PointerPte;
  2684. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&TempPte);
  2685. ASSERT (Pfn == MI_PFN_ELEMENT(PageFrameIndex));
  2686. #ifdef _X86_
  2687. #if DBG
  2688. #if !defined(NT_UP)
  2689. if (TempPte.u.Hard.Writable == 1) {
  2690. ASSERT (TempPte.u.Hard.Dirty == 1);
  2691. }
  2692. #endif //NTUP
  2693. #endif //DBG
  2694. #endif //X86
  2695. MI_MAKING_VALID_PTE_INVALID (FALSE);
  2696. if (Pfn->u3.e1.PrototypePte) {
  2697. //
  2698. // This is a prototype PTE. The PFN database does not contain
  2699. // the contents of this PTE it contains the contents of the
  2700. // prototype PTE. This PTE must be reconstructed to contain
  2701. // a pointer to the prototype PTE.
  2702. //
  2703. // The working set list entry contains information about
  2704. // how to reconstruct the PTE.
  2705. //
  2706. if (MI_IS_SESSION_IMAGE_PTE (PointerPte)) {
  2707. TempPte.u.Long = MiProtoAddressForPte (Pfn->PteAddress);
  2708. //
  2709. // If the session address was readonly, keep it so.
  2710. //
  2711. if (Wsle[WorkingSetIndex].u1.e1.SameProtectAsProto == 1) {
  2712. MI_ASSERT_NOT_SESSION_DATA (PointerPte);
  2713. TempPte.u.Proto.ReadOnly = 1;
  2714. }
  2715. }
  2716. else if (Wsle[WorkingSetIndex].u1.e1.SameProtectAsProto == 0) {
  2717. //
  2718. // The protection for the prototype PTE is in the WSLE.
  2719. //
  2720. ASSERT (Wsle[WorkingSetIndex].u1.e1.Protection != 0);
  2721. TempPte.u.Long = 0;
  2722. TempPte.u.Soft.Protection =
  2723. MI_GET_PROTECTION_FROM_WSLE (&Wsle[WorkingSetIndex]);
  2724. TempPte.u.Soft.PageFileHigh = MI_PTE_LOOKUP_NEEDED;
  2725. }
  2726. else {
  2727. //
  2728. // The protection is in the prototype PTE.
  2729. //
  2730. TempPte.u.Long = MiProtoAddressForPte (Pfn->PteAddress);
  2731. }
  2732. TempPte.u.Proto.Prototype = 1;
  2733. //
  2734. // Decrement the share count of the containing page table
  2735. // page as the PTE for the removed page is no longer valid
  2736. // or in transition.
  2737. //
  2738. ContainingPageTablePage = MiGetPteAddress (PointerPte);
  2739. #if (_MI_PAGING_LEVELS >= 3)
  2740. ASSERT (ContainingPageTablePage->u.Hard.Valid == 1);
  2741. #else
  2742. if (ContainingPageTablePage->u.Hard.Valid == 0) {
  2743. if (!NT_SUCCESS(MiCheckPdeForPagedPool (PointerPte))) {
  2744. KeBugCheckEx (MEMORY_MANAGEMENT,
  2745. 0x61940,
  2746. (ULONG_PTR)PointerPte,
  2747. (ULONG_PTR)ContainingPageTablePage->u.Long,
  2748. (ULONG_PTR)MiGetVirtualAddressMappedByPte(PointerPte));
  2749. }
  2750. }
  2751. #endif
  2752. PageTableFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (ContainingPageTablePage);
  2753. Pfn2 = MI_PFN_ELEMENT (PageTableFrameIndex);
  2754. MiDecrementShareCountInline (Pfn2, PageTableFrameIndex);
  2755. }
  2756. else {
  2757. //
  2758. // This is a private page, make it transition.
  2759. //
  2760. //
  2761. // Assert that the share count is 1 for all user mode pages.
  2762. //
  2763. ASSERT ((Pfn->u2.ShareCount == 1) ||
  2764. (Wsle[WorkingSetIndex].u1.VirtualAddress >
  2765. (PVOID)MM_HIGHEST_USER_ADDRESS));
  2766. //
  2767. // Set the working set index to zero. This allows page table
  2768. // pages to be brought back in with the proper WSINDEX.
  2769. //
  2770. ASSERT (Pfn->u1.WsIndex != 0);
  2771. MI_ZERO_WSINDEX (Pfn);
  2772. MI_MAKE_VALID_PTE_TRANSITION (TempPte,
  2773. Pfn->OriginalPte.u.Soft.Protection);
  2774. }
  2775. if (Wsle == MmWsle) {
  2776. PreviousPte.u.Flush = KeFlushSingleTb (
  2777. Wsle[WorkingSetIndex].u1.VirtualAddress,
  2778. TRUE,
  2779. FALSE,
  2780. (PHARDWARE_PTE)PointerPte,
  2781. TempPte.u.Flush);
  2782. }
  2783. else if (Wsle == MmSystemCacheWsle) {
  2784. //
  2785. // Must be the system cache.
  2786. //
  2787. PreviousPte.u.Flush = KeFlushSingleTb (
  2788. Wsle[WorkingSetIndex].u1.VirtualAddress,
  2789. TRUE,
  2790. TRUE,
  2791. (PHARDWARE_PTE)PointerPte,
  2792. TempPte.u.Flush);
  2793. }
  2794. else {
  2795. //
  2796. // Must be a session space.
  2797. //
  2798. MI_FLUSH_SINGLE_SESSION_TB (Wsle[WorkingSetIndex].u1.VirtualAddress,
  2799. TRUE,
  2800. FALSE,
  2801. (PHARDWARE_PTE)PointerPte,
  2802. TempPte.u.Flush,
  2803. PreviousPte);
  2804. }
  2805. ASSERT (PreviousPte.u.Hard.Valid == 1);
  2806. //
  2807. // A page is being removed from the working set, on certain
  2808. // hardware the dirty bit should be ORed into the modify bit in
  2809. // the PFN element.
  2810. //
  2811. MI_CAPTURE_DIRTY_BIT_TO_PFN (&PreviousPte, Pfn);
  2812. //
  2813. // If the PTE indicates the page has been modified (this is different
  2814. // from the PFN indicating this), then ripple it back to the write watch
  2815. // bitmap now since we are still in the correct process context.
  2816. //
  2817. if (MiActiveWriteWatch != 0) {
  2818. if ((Pfn->u3.e1.PrototypePte == 0) &&
  2819. (MI_IS_PTE_DIRTY(PreviousPte))) {
  2820. Process = PsGetCurrentProcess();
  2821. if (Process->Flags & PS_PROCESS_FLAGS_USING_WRITE_WATCH) {
  2822. //
  2823. // This process has (or had) write watch VADs. Search now
  2824. // for a write watch region encapsulating the PTE being
  2825. // invalidated.
  2826. //
  2827. VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte);
  2828. MiCaptureWriteWatchDirtyBit (Process, VirtualAddress);
  2829. }
  2830. }
  2831. }
  2832. //
  2833. // Flush the translation buffer and decrement the number of valid
  2834. // PTEs within the containing page table page. Note that for a
  2835. // private page, the page table page is still needed because the
  2836. // page is in transition.
  2837. //
  2838. MiDecrementShareCountInline (Pfn, PageFrameIndex);
  2839. return;
  2840. }
  2841. VOID
  2842. MiRemoveWorkingSetPages (
  2843. IN PMMWSL WorkingSetList,
  2844. IN PMMSUPPORT WsInfo
  2845. )
  2846. /*++
  2847. Routine Description:
  2848. This routine compresses the WSLEs into the front of the working set
  2849. and frees the pages for unneeded working set entries.
  2850. Arguments:
  2851. WorkingSetList - Supplies a pointer to the working set list to compress.
  2852. Return Value:
  2853. None.
  2854. Environment:
  2855. Kernel mode, Working set lock held, APCs disabled.
  2856. --*/
  2857. {
  2858. PMMWSLE FreeEntry;
  2859. PMMWSLE LastEntry;
  2860. PMMWSLE Wsle;
  2861. WSLE_NUMBER FreeIndex;
  2862. WSLE_NUMBER LastIndex;
  2863. ULONG LastInvalid;
  2864. PMMPTE LastPte;
  2865. PMMPTE PointerPte;
  2866. PMMPFN Pfn1;
  2867. PEPROCESS CurrentProcess;
  2868. ULONG NewSize;
  2869. PMMWSLE_HASH Table;
  2870. ASSERT (WsInfo != &MmSystemCacheWs);
  2871. CurrentProcess = PsGetCurrentProcess();
  2872. #if DBG
  2873. MiCheckNullIndex (WorkingSetList);
  2874. #endif
  2875. //
  2876. // Check to see if the wsle hash table should be contracted.
  2877. //
  2878. if (WorkingSetList->HashTable) {
  2879. Table = WorkingSetList->HashTable;
  2880. #if DBG
  2881. if ((PVOID)(&Table[WorkingSetList->HashTableSize]) < WorkingSetList->HighestPermittedHashAddress) {
  2882. ASSERT (MiGetPteAddress(&Table[WorkingSetList->HashTableSize])->u.Hard.Valid == 0);
  2883. }
  2884. #endif
  2885. if (WsInfo->WorkingSetSize < 200) {
  2886. NewSize = 0;
  2887. }
  2888. else {
  2889. NewSize = PtrToUlong(PAGE_ALIGN ((WorkingSetList->NonDirectCount * 2 *
  2890. sizeof(MMWSLE_HASH)) + PAGE_SIZE - 1));
  2891. NewSize = NewSize / sizeof(MMWSLE_HASH);
  2892. }
  2893. if (NewSize < WorkingSetList->HashTableSize) {
  2894. if (NewSize && WsInfo->Flags.AllowWorkingSetAdjustment) {
  2895. WsInfo->Flags.AllowWorkingSetAdjustment = MM_GROW_WSLE_HASH;
  2896. }
  2897. //
  2898. // Remove pages from hash table.
  2899. //
  2900. ASSERT (((ULONG_PTR)&WorkingSetList->HashTable[NewSize] &
  2901. (PAGE_SIZE - 1)) == 0);
  2902. PointerPte = MiGetPteAddress (&WorkingSetList->HashTable[NewSize]);
  2903. LastPte = MiGetPteAddress (WorkingSetList->HighestPermittedHashAddress);
  2904. //
  2905. // Set the hash table to null indicating that no hashing
  2906. // is going on.
  2907. //
  2908. WorkingSetList->HashTable = NULL;
  2909. WorkingSetList->HashTableSize = NewSize;
  2910. MiDeletePteRange (CurrentProcess, PointerPte, LastPte, FALSE);
  2911. }
  2912. #if (_MI_PAGING_LEVELS >= 4)
  2913. //
  2914. // For NT64, the page tables and page directories are also
  2915. // deleted during contraction.
  2916. //
  2917. ASSERT ((MiGetPxeAddress(&Table[WorkingSetList->HashTableSize])->u.Hard.Valid == 0) ||
  2918. (MiGetPpeAddress(&Table[WorkingSetList->HashTableSize])->u.Hard.Valid == 0) ||
  2919. (MiGetPdeAddress(&Table[WorkingSetList->HashTableSize])->u.Hard.Valid == 0) ||
  2920. (MiGetPteAddress(&Table[WorkingSetList->HashTableSize])->u.Hard.Valid == 0));
  2921. #elif (_MI_PAGING_LEVELS >= 3)
  2922. //
  2923. // For NT64, the page tables and page directories are also
  2924. // deleted during contraction.
  2925. //
  2926. ASSERT ((MiGetPpeAddress(&Table[WorkingSetList->HashTableSize])->u.Hard.Valid == 0) ||
  2927. (MiGetPdeAddress(&Table[WorkingSetList->HashTableSize])->u.Hard.Valid == 0) ||
  2928. (MiGetPteAddress(&Table[WorkingSetList->HashTableSize])->u.Hard.Valid == 0));
  2929. #else
  2930. ASSERT ((&Table[WorkingSetList->HashTableSize] == WorkingSetList->HighestPermittedHashAddress) || (MiGetPteAddress(&Table[WorkingSetList->HashTableSize])->u.Hard.Valid == 0));
  2931. #endif
  2932. }
  2933. //
  2934. // If the only pages in the working set are locked pages (that
  2935. // is all pages are BEFORE first dynamic, just reorganize the
  2936. // free list).
  2937. //
  2938. Wsle = WorkingSetList->Wsle;
  2939. if (WorkingSetList->FirstDynamic == WsInfo->WorkingSetSize) {
  2940. LastIndex = WorkingSetList->FirstDynamic;
  2941. LastEntry = &Wsle[LastIndex];
  2942. }
  2943. else {
  2944. //
  2945. // Start from the first dynamic and move towards the end looking
  2946. // for free entries. At the same time start from the end and
  2947. // move towards first dynamic looking for valid entries.
  2948. //
  2949. LastInvalid = 0;
  2950. FreeIndex = WorkingSetList->FirstDynamic;
  2951. FreeEntry = &Wsle[FreeIndex];
  2952. LastIndex = WorkingSetList->LastEntry;
  2953. LastEntry = &Wsle[LastIndex];
  2954. while (FreeEntry < LastEntry) {
  2955. if (FreeEntry->u1.e1.Valid == 1) {
  2956. FreeEntry += 1;
  2957. FreeIndex += 1;
  2958. }
  2959. else if (LastEntry->u1.e1.Valid == 0) {
  2960. LastEntry -= 1;
  2961. LastIndex -= 1;
  2962. }
  2963. else {
  2964. //
  2965. // Move the WSLE at LastEntry to the free slot at FreeEntry.
  2966. //
  2967. LastInvalid = 1;
  2968. *FreeEntry = *LastEntry;
  2969. PointerPte = MiGetPteAddress (LastEntry->u1.VirtualAddress);
  2970. if (LastEntry->u1.e1.Direct) {
  2971. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  2972. Pfn1->u1.WsIndex = FreeIndex;
  2973. }
  2974. else {
  2975. //
  2976. // This entry is in the working set. Remove it
  2977. // and then add the entry add the free slot.
  2978. //
  2979. MiRemoveWsle (LastIndex, WorkingSetList);
  2980. WorkingSetList->NonDirectCount += 1;
  2981. MiInsertWsleHash (FreeIndex, WorkingSetList);
  2982. }
  2983. MI_SET_PTE_IN_WORKING_SET (PointerPte, FreeIndex);
  2984. LastEntry->u1.Long = 0;
  2985. LastEntry -= 1;
  2986. LastIndex -= 1;
  2987. FreeEntry += 1;
  2988. FreeIndex += 1;
  2989. }
  2990. }
  2991. //
  2992. // If no entries were freed, just return.
  2993. //
  2994. if (LastInvalid == 0) {
  2995. #if DBG
  2996. MiCheckNullIndex (WorkingSetList);
  2997. #endif
  2998. return;
  2999. }
  3000. }
  3001. //
  3002. // Reorganize the free list. Make last entry the first free.
  3003. //
  3004. ASSERT ((LastEntry - 1)->u1.e1.Valid == 1);
  3005. if (LastEntry->u1.e1.Valid == 1) {
  3006. LastEntry += 1;
  3007. LastIndex += 1;
  3008. }
  3009. WorkingSetList->LastEntry = LastIndex - 1;
  3010. WorkingSetList->FirstFree = LastIndex;
  3011. ASSERT ((LastEntry - 1)->u1.e1.Valid == 1);
  3012. ASSERT ((LastEntry)->u1.e1.Valid == 0);
  3013. //
  3014. // Point free entry to the first invalid page.
  3015. //
  3016. FreeEntry = LastEntry;
  3017. while (LastIndex < WorkingSetList->LastInitializedWsle) {
  3018. //
  3019. // Put the remainder of the WSLEs on the free list.
  3020. //
  3021. ASSERT (LastEntry->u1.e1.Valid == 0);
  3022. LastIndex += 1;
  3023. LastEntry->u1.Long = LastIndex << MM_FREE_WSLE_SHIFT;
  3024. LastEntry += 1;
  3025. }
  3026. //
  3027. // Delete the working set pages at the end.
  3028. //
  3029. LastPte = MiGetPteAddress (&Wsle[WorkingSetList->LastInitializedWsle]) + 1;
  3030. if (&Wsle[WsInfo->MinimumWorkingSetSize] > FreeEntry) {
  3031. FreeEntry = &Wsle[WsInfo->MinimumWorkingSetSize];
  3032. }
  3033. PointerPte = MiGetPteAddress (FreeEntry) + 1;
  3034. ASSERT (WorkingSetList->FirstFree >= WorkingSetList->FirstDynamic);
  3035. MiDeletePteRange (CurrentProcess, PointerPte, LastPte, FALSE);
  3036. ASSERT (WorkingSetList->FirstFree >= WorkingSetList->FirstDynamic);
  3037. //
  3038. // Mark the last PTE in the list as free.
  3039. //
  3040. LastEntry = (PMMWSLE)((PCHAR)(PAGE_ALIGN(FreeEntry)) + PAGE_SIZE);
  3041. LastEntry -= 1;
  3042. ASSERT (LastEntry->u1.e1.Valid == 0);
  3043. LastEntry->u1.Long = WSLE_NULL_INDEX << MM_FREE_WSLE_SHIFT; //End of List.
  3044. ASSERT (LastEntry > &Wsle[0]);
  3045. WorkingSetList->LastInitializedWsle = (WSLE_NUMBER)(LastEntry - &Wsle[0]);
  3046. WorkingSetList->NextSlot = WorkingSetList->FirstDynamic;
  3047. ASSERT (WorkingSetList->LastEntry <= WorkingSetList->LastInitializedWsle);
  3048. ASSERT ((MiGetPteAddress(&Wsle[WorkingSetList->LastInitializedWsle]))->u.Hard.Valid == 1);
  3049. ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
  3050. (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
  3051. #if DBG
  3052. MiCheckNullIndex (WorkingSetList);
  3053. #endif
  3054. return;
  3055. }
  3056. NTSTATUS
  3057. MiEmptyWorkingSet (
  3058. IN PMMSUPPORT WsInfo,
  3059. IN LOGICAL NeedLock
  3060. )
  3061. /*++
  3062. Routine Description:
  3063. This routine frees all pages from the working set.
  3064. Arguments:
  3065. WsInfo - Supplies the working set information entry to trim.
  3066. NeedLock - Supplies TRUE if the caller needs us to acquire mutex
  3067. synchronization for the working set. Supplies FALSE if the
  3068. caller has already acquired synchronization.
  3069. Return Value:
  3070. Status of operation.
  3071. Environment:
  3072. Kernel mode. No locks. For session operations, the caller is responsible
  3073. for attaching into the proper session.
  3074. --*/
  3075. {
  3076. PETHREAD Thread;
  3077. PEPROCESS Process;
  3078. KIRQL OldIrql;
  3079. PMMPTE PointerPte;
  3080. WSLE_NUMBER Entry;
  3081. WSLE_NUMBER LastFreed;
  3082. PMMWSL WorkingSetList;
  3083. PMMWSLE Wsle;
  3084. PMMPFN Pfn1;
  3085. PFN_NUMBER PageFrameIndex;
  3086. WSLE_NUMBER Last;
  3087. NTSTATUS Status;
  3088. //
  3089. // Initializing OldIrql and Process is not needed for correctness, but
  3090. // without it the compiler cannot compile this code W4 to check
  3091. // for use of uninitialized variables.
  3092. //
  3093. OldIrql = PASSIVE_LEVEL;
  3094. Process = NULL;
  3095. Thread = NULL;
  3096. if (WsInfo == &MmSystemCacheWs) {
  3097. if (NeedLock == TRUE) {
  3098. LOCK_SYSTEM_WS (OldIrql, PsGetCurrentThread ());
  3099. }
  3100. else {
  3101. MM_SYSTEM_WS_LOCK_ASSERT ();
  3102. }
  3103. }
  3104. else if (WsInfo->Flags.SessionSpace == 0) {
  3105. Process = PsGetCurrentProcess ();
  3106. if (NeedLock == TRUE) {
  3107. LOCK_WS (Process);
  3108. }
  3109. if (Process->Flags & PS_PROCESS_FLAGS_VM_DELETED) {
  3110. Status = STATUS_PROCESS_IS_TERMINATING;
  3111. goto Deleted;
  3112. }
  3113. }
  3114. else {
  3115. if (NeedLock == TRUE) {
  3116. LOCK_SESSION_SPACE_WS (OldIrql, PsGetCurrentThread ());
  3117. }
  3118. }
  3119. WorkingSetList = WsInfo->VmWorkingSetList;
  3120. Wsle = WorkingSetList->Wsle;
  3121. //
  3122. // Attempt to remove the pages starting at the bottom.
  3123. //
  3124. LastFreed = WorkingSetList->LastEntry;
  3125. for (Entry = WorkingSetList->FirstDynamic; Entry <= LastFreed; Entry += 1) {
  3126. if (Wsle[Entry].u1.e1.Valid != 0) {
  3127. PERFINFO_PAGE_INFO_DECL();
  3128. PointerPte = MiGetPteAddress (Wsle[Entry].u1.VirtualAddress);
  3129. PERFINFO_GET_PAGE_INFO(PointerPte);
  3130. if (MiTrimRemovalPagesOnly == TRUE) {
  3131. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  3132. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  3133. if (Pfn1->u3.e1.RemovalRequested == 0) {
  3134. Pfn1 = MI_PFN_ELEMENT (Pfn1->u4.PteFrame);
  3135. if (Pfn1->u3.e1.RemovalRequested == 0) {
  3136. #if (_MI_PAGING_LEVELS >= 3)
  3137. Pfn1 = MI_PFN_ELEMENT (Pfn1->u4.PteFrame);
  3138. if (Pfn1->u3.e1.RemovalRequested == 0) {
  3139. continue;
  3140. }
  3141. #else
  3142. continue;
  3143. #endif
  3144. }
  3145. }
  3146. }
  3147. if (MiFreeWsle (Entry, WsInfo, PointerPte)) {
  3148. PERFINFO_LOG_WS_REMOVAL(PERFINFO_LOG_TYPE_OUTWS_EMPTYQ, WsInfo);
  3149. }
  3150. }
  3151. }
  3152. if (WsInfo != &MmSystemCacheWs && WsInfo->Flags.SessionSpace == 0) {
  3153. MiRemoveWorkingSetPages (WorkingSetList, WsInfo);
  3154. }
  3155. WorkingSetList->NextSlot = WorkingSetList->FirstDynamic;
  3156. //
  3157. // Attempt to remove the pages from the front to the end.
  3158. //
  3159. //
  3160. // Reorder the free list.
  3161. //
  3162. Last = 0;
  3163. Entry = WorkingSetList->FirstDynamic;
  3164. LastFreed = WorkingSetList->LastInitializedWsle;
  3165. while (Entry <= LastFreed) {
  3166. if (Wsle[Entry].u1.e1.Valid == 0) {
  3167. if (Last == 0) {
  3168. WorkingSetList->FirstFree = Entry;
  3169. }
  3170. else {
  3171. Wsle[Last].u1.Long = Entry << MM_FREE_WSLE_SHIFT;
  3172. }
  3173. Last = Entry;
  3174. }
  3175. Entry += 1;
  3176. }
  3177. if (Last != 0) {
  3178. Wsle[Last].u1.Long = WSLE_NULL_INDEX << MM_FREE_WSLE_SHIFT; // End of list.
  3179. }
  3180. Status = STATUS_SUCCESS;
  3181. Deleted:
  3182. if (NeedLock == TRUE) {
  3183. if (WsInfo == &MmSystemCacheWs) {
  3184. UNLOCK_SYSTEM_WS (OldIrql);
  3185. }
  3186. else if (WsInfo->Flags.SessionSpace == 0) {
  3187. UNLOCK_WS (Process);
  3188. }
  3189. else {
  3190. UNLOCK_SESSION_SPACE_WS (OldIrql);
  3191. }
  3192. }
  3193. return Status;
  3194. }
  3195. #if DBG
  3196. VOID
  3197. MiCheckNullIndex (
  3198. IN PMMWSL WorkingSetList
  3199. )
  3200. {
  3201. PMMWSLE Wsle;
  3202. ULONG j;
  3203. ULONG Nulls = 0;
  3204. Wsle = WorkingSetList->Wsle;
  3205. for (j = 0;j <= WorkingSetList->LastInitializedWsle; j += 1) {
  3206. if ((((Wsle[j].u1.Long)) >> MM_FREE_WSLE_SHIFT) == WSLE_NULL_INDEX) {
  3207. Nulls += 1;
  3208. }
  3209. }
  3210. // ASSERT ((Nulls == 1) || (WorkingSetList->FirstFre == WSLE_NULL_INDEX));
  3211. return;
  3212. }
  3213. #endif //DBG