Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2348 lines
66 KiB

  1. /*++
  2. Copyright (c) 1999 Microsoft Corporation
  3. Module Name:
  4. dynmem.c
  5. Abstract:
  6. This module contains the routines which implement dynamically adding
  7. and removing physical memory from the system.
  8. Author:
  9. Landy Wang (landyw) 05-Feb-1999
  10. Revision History:
  11. --*/
  12. #include "mi.h"
  13. KGUARDED_MUTEX MmDynamicMemoryMutex;
  14. LOGICAL MiTrimRemovalPagesOnly = FALSE;
  15. #if DBG
  16. ULONG MiShowStuckPages;
  17. ULONG MiDynmemData[9];
  18. #endif
  19. #if defined (_MI_COMPRESSION)
  20. extern PMM_SET_COMPRESSION_THRESHOLD MiSetCompressionThreshold;
  21. #endif
  22. //
  23. // Leave the low 3 bits clear as this will be inserted into the PFN PteAddress.
  24. //
  25. #define PFN_REMOVED ((PMMPTE)(INT_PTR)(int)0x99887768)
  26. PFN_COUNT
  27. MiRemovePhysicalPages (
  28. IN PFN_NUMBER StartPage,
  29. IN PFN_NUMBER EndPage
  30. );
  31. NTSTATUS
  32. MiRemovePhysicalMemory (
  33. IN PPHYSICAL_ADDRESS StartAddress,
  34. IN OUT PLARGE_INTEGER NumberOfBytes,
  35. IN LOGICAL PermanentRemoval,
  36. IN ULONG Flags
  37. );
  38. #ifdef ALLOC_PRAGMA
  39. #pragma alloc_text(PAGE,MmRemovePhysicalMemory)
  40. #pragma alloc_text(PAGE,MmMarkPhysicalMemoryAsBad)
  41. #pragma alloc_text(PAGELK,MmAddPhysicalMemory)
  42. #pragma alloc_text(PAGELK,MmAddPhysicalMemoryEx)
  43. #pragma alloc_text(PAGELK,MiRemovePhysicalMemory)
  44. #pragma alloc_text(PAGELK,MmMarkPhysicalMemoryAsGood)
  45. #pragma alloc_text(PAGELK,MmGetPhysicalMemoryRanges)
  46. #pragma alloc_text(PAGELK,MiRemovePhysicalPages)
  47. #endif
  48. NTSTATUS
  49. MmAddPhysicalMemory (
  50. IN PPHYSICAL_ADDRESS StartAddress,
  51. IN OUT PLARGE_INTEGER NumberOfBytes
  52. )
  53. /*++
  54. Routine Description:
  55. A wrapper for MmAddPhysicalMemoryEx.
  56. Arguments:
  57. StartAddress - Supplies the starting physical address.
  58. NumberOfBytes - Supplies a pointer to the number of bytes being added.
  59. If any bytes were added (ie: STATUS_SUCCESS is being
  60. returned), the actual amount is returned here.
  61. Return Value:
  62. NTSTATUS.
  63. Environment:
  64. Kernel mode. PASSIVE level. No locks held.
  65. --*/
  66. {
  67. return MmAddPhysicalMemoryEx (StartAddress, NumberOfBytes, 0);
  68. }
  69. NTSTATUS
  70. MmAddPhysicalMemoryEx (
  71. IN PPHYSICAL_ADDRESS StartAddress,
  72. IN OUT PLARGE_INTEGER NumberOfBytes,
  73. IN ULONG Flags
  74. )
  75. /*++
  76. Routine Description:
  77. This routine adds the specified physical address range to the system.
  78. This includes initializing PFN database entries and adding it to the
  79. freelists.
  80. Arguments:
  81. StartAddress - Supplies the starting physical address.
  82. NumberOfBytes - Supplies a pointer to the number of bytes being added.
  83. If any bytes were added (ie: STATUS_SUCCESS is being
  84. returned), the actual amount is returned here.
  85. Flags - Supplies relevant flags describing the memory range.
  86. Return Value:
  87. NTSTATUS.
  88. Environment:
  89. Kernel mode. PASSIVE level. No locks held.
  90. --*/
  91. {
  92. ULONG i;
  93. PMMPFN Pfn1;
  94. KIRQL OldIrql;
  95. LOGICAL Inserted;
  96. LOGICAL Updated;
  97. MMPTE TempPte;
  98. PMMPTE PointerPte;
  99. PMMPTE LastPte;
  100. PFN_NUMBER PagesToReturn;
  101. PFN_NUMBER NumberOfPages;
  102. PFN_NUMBER start;
  103. PFN_NUMBER count;
  104. PFN_NUMBER StartPage;
  105. PFN_NUMBER EndPage;
  106. PFN_NUMBER PageFrameIndex;
  107. PFN_NUMBER Page;
  108. PFN_NUMBER LastPage;
  109. PFN_NUMBER TotalPagesAllowed;
  110. PFN_COUNT PagesNeeded;
  111. PPHYSICAL_MEMORY_DESCRIPTOR OldPhysicalMemoryBlock;
  112. PPHYSICAL_MEMORY_DESCRIPTOR NewPhysicalMemoryBlock;
  113. PPHYSICAL_MEMORY_RUN NewRun;
  114. LOGICAL PfnDatabaseIsPhysical;
  115. ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL);
  116. if (BYTE_OFFSET(StartAddress->LowPart) != 0) {
  117. return STATUS_INVALID_PARAMETER_1;
  118. }
  119. if (BYTE_OFFSET(NumberOfBytes->LowPart) != 0) {
  120. return STATUS_INVALID_PARAMETER_2;
  121. }
  122. #if defined (_MI_COMPRESSION)
  123. if (Flags & ~MM_PHYSICAL_MEMORY_PRODUCED_VIA_COMPRESSION) {
  124. return STATUS_INVALID_PARAMETER_3;
  125. }
  126. #else
  127. if (Flags != 0) {
  128. return STATUS_INVALID_PARAMETER_3;
  129. }
  130. #endif
  131. //
  132. // The system must be configured for dynamic memory addition. This is
  133. // critical as only then is the database guaranteed to be non-sparse.
  134. //
  135. if (MmDynamicPfn == 0) {
  136. return STATUS_NOT_SUPPORTED;
  137. }
  138. if (MI_IS_PHYSICAL_ADDRESS(MmPfnDatabase)) {
  139. PfnDatabaseIsPhysical = TRUE;
  140. }
  141. else {
  142. PfnDatabaseIsPhysical = FALSE;
  143. }
  144. StartPage = (PFN_NUMBER)(StartAddress->QuadPart >> PAGE_SHIFT);
  145. NumberOfPages = (PFN_NUMBER)(NumberOfBytes->QuadPart >> PAGE_SHIFT);
  146. EndPage = StartPage + NumberOfPages;
  147. if (StartPage >= EndPage) {
  148. return STATUS_INVALID_PARAMETER_1;
  149. }
  150. if (EndPage - 1 > MmHighestPossiblePhysicalPage) {
  151. //
  152. // Truncate the request into something that can be mapped by the PFN
  153. // database.
  154. //
  155. EndPage = MmHighestPossiblePhysicalPage + 1;
  156. NumberOfPages = EndPage - StartPage;
  157. }
  158. if (StartPage >= EndPage) {
  159. return STATUS_INVALID_PARAMETER_1;
  160. }
  161. //
  162. // Ensure that the memory being added does not exceed the license
  163. // restrictions.
  164. //
  165. if (ExVerifySuite(DataCenter) == TRUE) {
  166. TotalPagesAllowed = MI_DTC_MAX_PAGES;
  167. }
  168. else if ((MmProductType != 0x00690057) &&
  169. (ExVerifySuite(Enterprise) == TRUE)) {
  170. TotalPagesAllowed = MI_ADS_MAX_PAGES;
  171. }
  172. else {
  173. TotalPagesAllowed = MI_DEFAULT_MAX_PAGES;
  174. }
  175. if (MmNumberOfPhysicalPages + NumberOfPages > TotalPagesAllowed) {
  176. //
  177. // Truncate the request appropriately.
  178. //
  179. NumberOfPages = TotalPagesAllowed - MmNumberOfPhysicalPages;
  180. EndPage = StartPage + NumberOfPages;
  181. }
  182. //
  183. // The range cannot wrap.
  184. //
  185. if (StartPage >= EndPage) {
  186. return STATUS_INVALID_PARAMETER_1;
  187. }
  188. KeAcquireGuardedMutex (&MmDynamicMemoryMutex);
  189. OldPhysicalMemoryBlock = MmPhysicalMemoryBlock;
  190. i = (sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
  191. (sizeof(PHYSICAL_MEMORY_RUN) * (MmPhysicalMemoryBlock->NumberOfRuns + 1)));
  192. NewPhysicalMemoryBlock = ExAllocatePoolWithTag (NonPagedPool,
  193. i,
  194. ' mM');
  195. if (NewPhysicalMemoryBlock == NULL) {
  196. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  197. return STATUS_INSUFFICIENT_RESOURCES;
  198. }
  199. //
  200. // The range cannot overlap any ranges that are already present.
  201. //
  202. start = 0;
  203. TempPte = ValidKernelPte;
  204. MmLockPagableSectionByHandle (ExPageLockHandle);
  205. LOCK_PFN (OldIrql);
  206. #if defined (_MI_COMPRESSION)
  207. //
  208. // Adding compression-generated ranges can only be done if the hardware
  209. // has already successfully announced itself.
  210. //
  211. if (Flags & MM_PHYSICAL_MEMORY_PRODUCED_VIA_COMPRESSION) {
  212. if (MiSetCompressionThreshold == NULL) {
  213. UNLOCK_PFN (OldIrql);
  214. MmUnlockPagableImageSection(ExPageLockHandle);
  215. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  216. ExFreePool (NewPhysicalMemoryBlock);
  217. return STATUS_NOT_SUPPORTED;
  218. }
  219. }
  220. #endif
  221. do {
  222. count = MmPhysicalMemoryBlock->Run[start].PageCount;
  223. Page = MmPhysicalMemoryBlock->Run[start].BasePage;
  224. if (count != 0) {
  225. LastPage = Page + count;
  226. if ((StartPage < Page) && (EndPage > Page)) {
  227. UNLOCK_PFN (OldIrql);
  228. MmUnlockPagableImageSection(ExPageLockHandle);
  229. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  230. ExFreePool (NewPhysicalMemoryBlock);
  231. return STATUS_CONFLICTING_ADDRESSES;
  232. }
  233. if ((StartPage >= Page) && (StartPage < LastPage)) {
  234. UNLOCK_PFN (OldIrql);
  235. MmUnlockPagableImageSection(ExPageLockHandle);
  236. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  237. ExFreePool (NewPhysicalMemoryBlock);
  238. return STATUS_CONFLICTING_ADDRESSES;
  239. }
  240. }
  241. start += 1;
  242. } while (start != MmPhysicalMemoryBlock->NumberOfRuns);
  243. //
  244. // Fill any gaps in the (sparse) PFN database needed for these pages,
  245. // unless the PFN database was physically allocated and completely
  246. // committed up front.
  247. //
  248. PagesNeeded = 0;
  249. if (PfnDatabaseIsPhysical == FALSE) {
  250. PointerPte = MiGetPteAddress (MI_PFN_ELEMENT(StartPage));
  251. LastPte = MiGetPteAddress ((PCHAR)(MI_PFN_ELEMENT(EndPage)) - 1);
  252. while (PointerPte <= LastPte) {
  253. if (PointerPte->u.Hard.Valid == 0) {
  254. PagesNeeded += 1;
  255. }
  256. PointerPte += 1;
  257. }
  258. if (MmAvailablePages < PagesNeeded) {
  259. UNLOCK_PFN (OldIrql);
  260. MmUnlockPagableImageSection(ExPageLockHandle);
  261. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  262. ExFreePool (NewPhysicalMemoryBlock);
  263. return STATUS_INSUFFICIENT_RESOURCES;
  264. }
  265. PointerPte = MiGetPteAddress (MI_PFN_ELEMENT(StartPage));
  266. while (PointerPte <= LastPte) {
  267. if (PointerPte->u.Hard.Valid == 0) {
  268. PageFrameIndex = MiRemoveZeroPage(MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
  269. MiInitializePfn (PageFrameIndex, PointerPte, 0);
  270. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  271. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  272. }
  273. PointerPte += 1;
  274. }
  275. MI_DECREMENT_RESIDENT_AVAILABLE (PagesNeeded, MM_RESAVAIL_ALLOCATE_HOTADD_PFNDB);
  276. }
  277. //
  278. // If the new range is adjacent to an existing range, just merge it into
  279. // the old block. Otherwise use the new block as a new entry will have to
  280. // be used.
  281. //
  282. NewPhysicalMemoryBlock->NumberOfRuns = MmPhysicalMemoryBlock->NumberOfRuns + 1;
  283. NewPhysicalMemoryBlock->NumberOfPages = MmPhysicalMemoryBlock->NumberOfPages + NumberOfPages;
  284. NewRun = &NewPhysicalMemoryBlock->Run[0];
  285. start = 0;
  286. Inserted = FALSE;
  287. Updated = FALSE;
  288. do {
  289. Page = MmPhysicalMemoryBlock->Run[start].BasePage;
  290. count = MmPhysicalMemoryBlock->Run[start].PageCount;
  291. if (Inserted == FALSE) {
  292. //
  293. // Note overlaps into adjacent ranges were already checked above.
  294. //
  295. if (StartPage == Page + count) {
  296. MmPhysicalMemoryBlock->Run[start].PageCount += NumberOfPages;
  297. OldPhysicalMemoryBlock = NewPhysicalMemoryBlock;
  298. MmPhysicalMemoryBlock->NumberOfPages += NumberOfPages;
  299. //
  300. // Coalesce below and above to avoid leaving zero length gaps
  301. // as these gaps would prevent callers from removing ranges
  302. // the span them.
  303. //
  304. if (start + 1 < MmPhysicalMemoryBlock->NumberOfRuns) {
  305. start += 1;
  306. Page = MmPhysicalMemoryBlock->Run[start].BasePage;
  307. count = MmPhysicalMemoryBlock->Run[start].PageCount;
  308. if (StartPage + NumberOfPages == Page) {
  309. MmPhysicalMemoryBlock->Run[start - 1].PageCount +=
  310. count;
  311. MmPhysicalMemoryBlock->NumberOfRuns -= 1;
  312. //
  313. // Copy any remaining entries.
  314. //
  315. if (start != MmPhysicalMemoryBlock->NumberOfRuns) {
  316. RtlMoveMemory (&MmPhysicalMemoryBlock->Run[start],
  317. &MmPhysicalMemoryBlock->Run[start + 1],
  318. (MmPhysicalMemoryBlock->NumberOfRuns - start) * sizeof (PHYSICAL_MEMORY_RUN));
  319. }
  320. }
  321. }
  322. Updated = TRUE;
  323. break;
  324. }
  325. if (StartPage + NumberOfPages == Page) {
  326. MmPhysicalMemoryBlock->Run[start].BasePage = StartPage;
  327. MmPhysicalMemoryBlock->Run[start].PageCount += NumberOfPages;
  328. OldPhysicalMemoryBlock = NewPhysicalMemoryBlock;
  329. MmPhysicalMemoryBlock->NumberOfPages += NumberOfPages;
  330. Updated = TRUE;
  331. break;
  332. }
  333. if (StartPage + NumberOfPages <= Page) {
  334. if (start + 1 < MmPhysicalMemoryBlock->NumberOfRuns) {
  335. if (StartPage + NumberOfPages <= MmPhysicalMemoryBlock->Run[start + 1].BasePage) {
  336. //
  337. // Don't insert here - the new entry really belongs
  338. // (at least) one entry further down.
  339. //
  340. continue;
  341. }
  342. }
  343. NewRun->BasePage = StartPage;
  344. NewRun->PageCount = NumberOfPages;
  345. NewRun += 1;
  346. Inserted = TRUE;
  347. Updated = TRUE;
  348. }
  349. }
  350. *NewRun = MmPhysicalMemoryBlock->Run[start];
  351. NewRun += 1;
  352. start += 1;
  353. } while (start != MmPhysicalMemoryBlock->NumberOfRuns);
  354. //
  355. // If the memory block has not been updated, then the new entry must
  356. // be added at the very end.
  357. //
  358. if (Updated == FALSE) {
  359. ASSERT (Inserted == FALSE);
  360. NewRun->BasePage = StartPage;
  361. NewRun->PageCount = NumberOfPages;
  362. Inserted = TRUE;
  363. }
  364. //
  365. // Repoint the MmPhysicalMemoryBlock at the new chunk, free the old after
  366. // releasing the PFN lock.
  367. //
  368. if (Inserted == TRUE) {
  369. OldPhysicalMemoryBlock = MmPhysicalMemoryBlock;
  370. MmPhysicalMemoryBlock = NewPhysicalMemoryBlock;
  371. }
  372. //
  373. // Note that the page directory (page parent entries on Win64) must be
  374. // filled in at system boot so that already-created processes do not fault
  375. // when referencing the new PFNs.
  376. //
  377. //
  378. // Walk through the memory descriptors and add pages to the
  379. // free list in the PFN database.
  380. //
  381. PageFrameIndex = StartPage;
  382. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  383. if (EndPage - 1 > MmHighestPhysicalPage) {
  384. MmHighestPhysicalPage = EndPage - 1;
  385. }
  386. while (PageFrameIndex < EndPage) {
  387. ASSERT (Pfn1->u2.ShareCount == 0);
  388. ASSERT (Pfn1->u3.e2.ShortFlags == 0);
  389. ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
  390. ASSERT64 (Pfn1->UsedPageTableEntries == 0);
  391. ASSERT (Pfn1->OriginalPte.u.Long == ZeroKernelPte.u.Long);
  392. ASSERT (Pfn1->u4.PteFrame == 0);
  393. ASSERT ((Pfn1->PteAddress == PFN_REMOVED) ||
  394. (Pfn1->PteAddress == (PMMPTE)(UINT_PTR)0));
  395. //
  396. // Initialize the color for NUMA purposes.
  397. //
  398. MiDetermineNode (PageFrameIndex, Pfn1);
  399. //
  400. // Set the PTE address to the physical page for
  401. // virtual address alignment checking.
  402. //
  403. Pfn1->PteAddress = (PMMPTE)(PageFrameIndex << PTE_SHIFT);
  404. MiInsertPageInFreeList (PageFrameIndex);
  405. PageFrameIndex += 1;
  406. Pfn1 += 1;
  407. }
  408. MmNumberOfPhysicalPages += (PFN_COUNT)NumberOfPages;
  409. //
  410. // Only non-compression ranges get to contribute to ResidentAvailable as
  411. // adding compression ranges to this could crash the system.
  412. //
  413. // For the same reason, compression range additions also need to subtract
  414. // from AvailablePages the amount the above MiInsertPageInFreeList added.
  415. //
  416. PagesToReturn = NumberOfPages;
  417. #if defined (_MI_COMPRESSION)
  418. if (Flags & MM_PHYSICAL_MEMORY_PRODUCED_VIA_COMPRESSION) {
  419. MmAvailablePages -= (PFN_COUNT) NumberOfPages;
  420. //
  421. // Signal applications if allocating these pages caused a threshold cross.
  422. //
  423. MiNotifyMemoryEvents ();
  424. MiNumberOfCompressionPages += NumberOfPages;
  425. PagesToReturn = 0;
  426. }
  427. else {
  428. //
  429. // Since real (noncompression-generated) physical memory was added,
  430. // rearm the interrupt to occur at a higher threshold.
  431. //
  432. MiArmCompressionInterrupt ();
  433. }
  434. #endif
  435. RtlSetBits (&MiPfnBitMap, (ULONG) StartPage, (ULONG) (EndPage - StartPage));
  436. UNLOCK_PFN (OldIrql);
  437. MI_INCREMENT_RESIDENT_AVAILABLE (PagesToReturn,
  438. MM_RESAVAIL_FREE_HOTADD_MEMORY);
  439. InterlockedExchangeAdd ((PLONG)&SharedUserData->NumberOfPhysicalPages,
  440. (LONG) NumberOfPages);
  441. //
  442. // Carefully increase all commit limits to reflect the additional memory -
  443. // notice the current usage must be done first so no one else cuts the
  444. // line.
  445. //
  446. InterlockedExchangeAddSizeT (&MmTotalCommittedPages, PagesNeeded);
  447. InterlockedExchangeAddSizeT (&MmTotalCommitLimitMaximum, NumberOfPages);
  448. InterlockedExchangeAddSizeT (&MmTotalCommitLimit, NumberOfPages);
  449. MmUnlockPagableImageSection(ExPageLockHandle);
  450. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  451. ExFreePool (OldPhysicalMemoryBlock);
  452. //
  453. // Indicate number of bytes actually added to our caller.
  454. //
  455. NumberOfBytes->QuadPart = (ULONGLONG)NumberOfPages * PAGE_SIZE;
  456. return STATUS_SUCCESS;
  457. }
  458. NTSTATUS
  459. MiRemovePhysicalMemory (
  460. IN PPHYSICAL_ADDRESS StartAddress,
  461. IN OUT PLARGE_INTEGER NumberOfBytes,
  462. IN LOGICAL PermanentRemoval,
  463. IN ULONG Flags
  464. )
  465. /*++
  466. Routine Description:
  467. This routine attempts to remove the specified physical address range
  468. from the system.
  469. Arguments:
  470. StartAddress - Supplies the starting physical address.
  471. NumberOfBytes - Supplies a pointer to the number of bytes being removed.
  472. PermanentRemoval - Supplies TRUE if the memory is being permanently
  473. (ie: physically) removed. FALSE if not (ie: just a
  474. bad page detected via ECC which is being marked
  475. "don't-use".
  476. Return Value:
  477. NTSTATUS.
  478. Environment:
  479. Kernel mode. PASSIVE level. No locks held.
  480. --*/
  481. {
  482. ULONG i;
  483. ULONG Additional;
  484. PFN_NUMBER Page;
  485. PFN_NUMBER LastPage;
  486. PFN_NUMBER OriginalLastPage;
  487. PFN_NUMBER start;
  488. PFN_NUMBER PagesReleased;
  489. PFN_NUMBER ResAvailPagesReleased;
  490. PMMPFN Pfn1;
  491. PMMPFN StartPfn;
  492. PMMPFN EndPfn;
  493. KIRQL OldIrql;
  494. PFN_NUMBER StartPage;
  495. PFN_NUMBER EndPage;
  496. PFN_COUNT NumberOfPages;
  497. PFN_COUNT ParityPages;
  498. SPFN_NUMBER MaxPages;
  499. PFN_NUMBER PageFrameIndex;
  500. PFN_NUMBER RemovedPages;
  501. PFN_NUMBER RemovedPagesThisPass;
  502. LOGICAL Inserted;
  503. NTSTATUS Status;
  504. PMMPTE PointerPte;
  505. PMMPTE EndPte;
  506. PVOID VirtualAddress;
  507. PPHYSICAL_MEMORY_DESCRIPTOR OldPhysicalMemoryBlock;
  508. PPHYSICAL_MEMORY_DESCRIPTOR NewPhysicalMemoryBlock;
  509. PPHYSICAL_MEMORY_RUN NewRun;
  510. LOGICAL PfnDatabaseIsPhysical;
  511. PFN_NUMBER HighestPossiblePhysicalPage;
  512. PFN_COUNT FluidPages;
  513. MMPTE_FLUSH_LIST PteFlushList;
  514. ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL);
  515. ASSERT (BYTE_OFFSET(NumberOfBytes->LowPart) == 0);
  516. ASSERT (BYTE_OFFSET(StartAddress->LowPart) == 0);
  517. if (MI_IS_PHYSICAL_ADDRESS(MmPfnDatabase)) {
  518. if (PermanentRemoval == TRUE) {
  519. //
  520. // The system must be configured for dynamic memory addition. This
  521. // is not strictly required to remove the memory, but it's better
  522. // to check for it now under the assumption that the administrator
  523. // is probably going to want to add this range of memory back in -
  524. // better to give the error now and refuse the removal than to
  525. // refuse the addition later.
  526. //
  527. if (MmDynamicPfn == 0) {
  528. return STATUS_NOT_SUPPORTED;
  529. }
  530. }
  531. PfnDatabaseIsPhysical = TRUE;
  532. }
  533. else {
  534. PfnDatabaseIsPhysical = FALSE;
  535. }
  536. if (PermanentRemoval == TRUE) {
  537. HighestPossiblePhysicalPage = MmHighestPossiblePhysicalPage;
  538. FluidPages = 100;
  539. }
  540. else {
  541. HighestPossiblePhysicalPage = MmHighestPhysicalPage;
  542. FluidPages = 0;
  543. }
  544. StartPage = (PFN_NUMBER)(StartAddress->QuadPart >> PAGE_SHIFT);
  545. NumberOfPages = (PFN_COUNT)(NumberOfBytes->QuadPart >> PAGE_SHIFT);
  546. EndPage = StartPage + NumberOfPages;
  547. if (StartPage >= EndPage) {
  548. return STATUS_INVALID_PARAMETER_1;
  549. }
  550. if (EndPage - 1 > HighestPossiblePhysicalPage) {
  551. //
  552. // Truncate the request into something that can be mapped by the PFN
  553. // database.
  554. //
  555. EndPage = MmHighestPossiblePhysicalPage + 1;
  556. NumberOfPages = (PFN_COUNT)(EndPage - StartPage);
  557. }
  558. if (StartPage >= EndPage) {
  559. return STATUS_INVALID_PARAMETER_1;
  560. }
  561. //
  562. // The range cannot wrap.
  563. //
  564. if (StartPage >= EndPage) {
  565. return STATUS_INVALID_PARAMETER_1;
  566. }
  567. #if !defined (_MI_COMPRESSION)
  568. if (Flags != 0) {
  569. return STATUS_INVALID_PARAMETER_4;
  570. }
  571. #endif
  572. StartPfn = MI_PFN_ELEMENT (StartPage);
  573. EndPfn = MI_PFN_ELEMENT (EndPage);
  574. KeAcquireGuardedMutex (&MmDynamicMemoryMutex);
  575. //
  576. // Make sure the caller is freeing real memory (ie: PFN-backed).
  577. //
  578. if (RtlAreBitsSet (&MiPfnBitMap,
  579. (PFN_COUNT) StartPage,
  580. (PFN_COUNT) (EndPage - StartPage)) == FALSE) {
  581. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  582. return STATUS_INVALID_PARAMETER_1;
  583. }
  584. #if DBG
  585. MiDynmemData[0] += 1;
  586. #endif
  587. //
  588. // Attempt to decrease all commit limits to reflect the removed memory.
  589. //
  590. if (MiChargeTemporaryCommitmentForReduction (NumberOfPages + FluidPages) == FALSE) {
  591. #if DBG
  592. MiDynmemData[1] += 1;
  593. #endif
  594. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  595. return STATUS_INSUFFICIENT_RESOURCES;
  596. }
  597. //
  598. // Reduce the systemwide commit limit - note this is carefully done
  599. // *PRIOR* to returning this commitment so no one else (including a DPC
  600. // in this very thread) can consume past the limit.
  601. //
  602. InterlockedExchangeAddSizeT (&MmTotalCommitLimit, 0 - (PFN_NUMBER)NumberOfPages);
  603. InterlockedExchangeAddSizeT (&MmTotalCommitLimitMaximum, 0 - (PFN_NUMBER)NumberOfPages);
  604. //
  605. // Now that the systemwide commit limit has been lowered, the amount
  606. // we have removed can be safely returned.
  607. //
  608. MiReturnCommitment (NumberOfPages + FluidPages);
  609. MmLockPagableSectionByHandle (ExPageLockHandle);
  610. //
  611. // Check for outstanding promises that cannot be broken.
  612. //
  613. LOCK_PFN (OldIrql);
  614. if (PermanentRemoval == FALSE) {
  615. //
  616. // If it's just the removal of ECC-marked bad pages, then don't
  617. // allow the caller to remove any pages that have already been
  618. // ECC-removed. This is to prevent recursive erroneous charges.
  619. //
  620. for (Pfn1 = StartPfn; Pfn1 < EndPfn; Pfn1 += 1) {
  621. if (Pfn1->u3.e1.ParityError == 1) {
  622. UNLOCK_PFN (OldIrql);
  623. Status = STATUS_INVALID_PARAMETER_2;
  624. goto giveup2;
  625. }
  626. }
  627. }
  628. MaxPages = MI_NONPAGABLE_MEMORY_AVAILABLE() - FluidPages;
  629. if ((SPFN_NUMBER)NumberOfPages > MaxPages) {
  630. #if DBG
  631. MiDynmemData[2] += 1;
  632. #endif
  633. UNLOCK_PFN (OldIrql);
  634. Status = STATUS_INSUFFICIENT_RESOURCES;
  635. goto giveup2;
  636. }
  637. //
  638. // The range must be contained in a single entry. It is
  639. // permissible for it to be part of a single entry, but it
  640. // must not cross multiple entries.
  641. //
  642. Additional = (ULONG)-2;
  643. start = 0;
  644. do {
  645. Page = MmPhysicalMemoryBlock->Run[start].BasePage;
  646. LastPage = Page + MmPhysicalMemoryBlock->Run[start].PageCount;
  647. if ((StartPage >= Page) && (EndPage <= LastPage)) {
  648. if ((StartPage == Page) && (EndPage == LastPage)) {
  649. Additional = (ULONG)-1;
  650. }
  651. else if ((StartPage == Page) || (EndPage == LastPage)) {
  652. Additional = 0;
  653. }
  654. else {
  655. Additional = 1;
  656. }
  657. break;
  658. }
  659. start += 1;
  660. } while (start != MmPhysicalMemoryBlock->NumberOfRuns);
  661. if (Additional == (ULONG)-2) {
  662. #if DBG
  663. MiDynmemData[3] += 1;
  664. #endif
  665. UNLOCK_PFN (OldIrql);
  666. Status = STATUS_CONFLICTING_ADDRESSES;
  667. goto giveup2;
  668. }
  669. for (Pfn1 = StartPfn; Pfn1 < EndPfn; Pfn1 += 1) {
  670. Pfn1->u3.e1.RemovalRequested = 1;
  671. }
  672. if (PermanentRemoval == TRUE) {
  673. MmNumberOfPhysicalPages -= NumberOfPages;
  674. InterlockedExchangeAdd ((PLONG)&SharedUserData->NumberOfPhysicalPages,
  675. 0 - NumberOfPages);
  676. }
  677. #if defined (_MI_COMPRESSION)
  678. //
  679. // Only removal of non-compression ranges decrement ResidentAvailable as
  680. // only those ranges actually incremented this when they were added.
  681. //
  682. if ((Flags & MM_PHYSICAL_MEMORY_PRODUCED_VIA_COMPRESSION) == 0) {
  683. MI_DECREMENT_RESIDENT_AVAILABLE (NumberOfPages, MM_RESAVAIL_ALLOCATE_HOTREMOVE_MEMORY);
  684. //
  685. // Since real (noncompression-generated) physical memory is being
  686. // removed, rearm the interrupt to occur at a lower threshold.
  687. //
  688. if (PermanentRemoval == TRUE) {
  689. MiArmCompressionInterrupt ();
  690. }
  691. }
  692. #else
  693. MI_DECREMENT_RESIDENT_AVAILABLE (NumberOfPages, MM_RESAVAIL_ALLOCATE_HOTREMOVE_MEMORY);
  694. #endif
  695. //
  696. // The free and zero lists must be pruned now before releasing the PFN
  697. // lock otherwise if another thread allocates the page from these lists,
  698. // the allocation will clear the RemovalRequested flag forever.
  699. //
  700. RemovedPages = MiRemovePhysicalPages (StartPage, EndPage);
  701. #if defined (_MI_COMPRESSION)
  702. //
  703. // Compression range removals add back into AvailablePages the same
  704. // amount that MiUnlinkPageFromList removes (as the original addition
  705. // of these ranges never bumps this counter).
  706. //
  707. if (Flags & MM_PHYSICAL_MEMORY_PRODUCED_VIA_COMPRESSION) {
  708. MmAvailablePages += (PFN_COUNT) RemovedPages;
  709. //
  710. // Signal applications if allocating these pages caused a threshold cross.
  711. //
  712. MiNotifyMemoryEvents ();
  713. MiNumberOfCompressionPages -= RemovedPages;
  714. }
  715. #endif
  716. if (RemovedPages != NumberOfPages) {
  717. #if DBG
  718. retry:
  719. #endif
  720. Pfn1 = StartPfn;
  721. InterlockedIncrement (&MiDelayPageFaults);
  722. for (i = 0; i < 5; i += 1) {
  723. UNLOCK_PFN (OldIrql);
  724. //
  725. // Attempt to move pages to the standby list. Note that only the
  726. // pages with RemovalRequested set are moved.
  727. //
  728. MiTrimRemovalPagesOnly = TRUE;
  729. MmEmptyAllWorkingSets ();
  730. MiTrimRemovalPagesOnly = FALSE;
  731. MiFlushAllPages ();
  732. KeDelayExecutionThread (KernelMode, FALSE, (PLARGE_INTEGER)&MmHalfSecond);
  733. if (i >= 2) {
  734. //
  735. // Purge the transition list as transition pages keep
  736. // page tables from being taken and we need to try harder.
  737. //
  738. MiPurgeTransitionList ();
  739. }
  740. LOCK_PFN (OldIrql);
  741. RemovedPagesThisPass = MiRemovePhysicalPages (StartPage, EndPage);
  742. RemovedPages += RemovedPagesThisPass;
  743. #if defined (_MI_COMPRESSION)
  744. //
  745. // Compression range removals add back into AvailablePages the same
  746. // amount that MiUnlinkPageFromList removes (as the original
  747. // addition of these ranges never bumps this counter).
  748. //
  749. if (Flags & MM_PHYSICAL_MEMORY_PRODUCED_VIA_COMPRESSION) {
  750. MmAvailablePages += (PFN_COUNT) RemovedPagesThisPass;
  751. //
  752. // Signal applications if allocating these pages
  753. // caused a threshold cross.
  754. //
  755. MiNotifyMemoryEvents ();
  756. MiNumberOfCompressionPages -= RemovedPagesThisPass;
  757. }
  758. #endif
  759. if (RemovedPages == NumberOfPages) {
  760. break;
  761. }
  762. //
  763. // RemovedPages doesn't include pages that were freed directly
  764. // to the bad page list via MiDecrementReferenceCount or by
  765. // ECC marking. So use the above check purely as an optimization -
  766. // and walk here before ever giving up.
  767. //
  768. for ( ; Pfn1 < EndPfn; Pfn1 += 1) {
  769. if (Pfn1->u3.e1.PageLocation != BadPageList) {
  770. break;
  771. }
  772. }
  773. if (Pfn1 == EndPfn) {
  774. RemovedPages = NumberOfPages;
  775. break;
  776. }
  777. }
  778. InterlockedDecrement (&MiDelayPageFaults);
  779. }
  780. if (RemovedPages != NumberOfPages) {
  781. #if DBG
  782. MiDynmemData[4] += 1;
  783. if (MiShowStuckPages != 0) {
  784. RemovedPages = 0;
  785. for (Pfn1 = StartPfn; Pfn1 < EndPfn; Pfn1 += 1) {
  786. if (Pfn1->u3.e1.PageLocation != BadPageList) {
  787. RemovedPages += 1;
  788. }
  789. }
  790. ASSERT (RemovedPages != 0);
  791. DbgPrint("MiRemovePhysicalMemory : could not get %d of %d pages\n",
  792. RemovedPages, NumberOfPages);
  793. if (MiShowStuckPages & 0x2) {
  794. ULONG PfnsPrinted;
  795. ULONG EnoughShown;
  796. PMMPFN FirstPfn;
  797. PFN_COUNT PfnCount;
  798. PfnCount = 0;
  799. PfnsPrinted = 0;
  800. EnoughShown = 100;
  801. //
  802. // Initializing FirstPfn is not needed for correctness
  803. // but without it the compiler cannot compile this code
  804. // W4 to check for use of uninitialized variables.
  805. //
  806. FirstPfn = NULL;
  807. if (MiShowStuckPages & 0x4) {
  808. EnoughShown = (ULONG)-1;
  809. }
  810. DbgPrint("Stuck PFN list: ");
  811. for (Pfn1 = StartPfn; Pfn1 < EndPfn; Pfn1 += 1) {
  812. if (Pfn1->u3.e1.PageLocation != BadPageList) {
  813. if (PfnCount == 0) {
  814. FirstPfn = Pfn1;
  815. }
  816. PfnCount += 1;
  817. }
  818. else {
  819. if (PfnCount != 0) {
  820. DbgPrint("%x -> %x ; ", MI_PFN_ELEMENT_TO_INDEX (FirstPfn),
  821. MI_PFN_ELEMENT_TO_INDEX (FirstPfn + PfnCount - 1));
  822. PfnsPrinted += 1;
  823. if (PfnsPrinted == EnoughShown) {
  824. break;
  825. }
  826. PfnCount = 0;
  827. }
  828. }
  829. }
  830. if (PfnCount != 0) {
  831. DbgPrint("%x -> %x ; ", MI_PFN_ELEMENT_TO_INDEX (FirstPfn),
  832. MI_PFN_ELEMENT_TO_INDEX (FirstPfn + PfnCount - 1));
  833. }
  834. DbgPrint("\n");
  835. }
  836. if (MiShowStuckPages & 0x8) {
  837. DbgBreakPoint ();
  838. }
  839. if (MiShowStuckPages & 0x10) {
  840. goto retry;
  841. }
  842. }
  843. #endif
  844. UNLOCK_PFN (OldIrql);
  845. Status = STATUS_NO_MEMORY;
  846. goto giveup;
  847. }
  848. #if DBG
  849. for (Pfn1 = StartPfn; Pfn1 < EndPfn; Pfn1 += 1) {
  850. ASSERT (Pfn1->u3.e1.PageLocation == BadPageList);
  851. }
  852. #endif
  853. //
  854. // All the pages in the range have been removed.
  855. //
  856. if (PermanentRemoval == FALSE) {
  857. //
  858. // If it's just the removal of ECC-marked bad pages, then no
  859. // adjustment to the physical memory block ranges or PFN database
  860. // trimming is needed. Exit now.
  861. //
  862. for (Pfn1 = StartPfn; Pfn1 < EndPfn; Pfn1 += 1) {
  863. ASSERT (Pfn1->u3.e1.ParityError == 0);
  864. Pfn1->u3.e1.ParityError = 1;
  865. }
  866. UNLOCK_PFN (OldIrql);
  867. MmUnlockPagableImageSection(ExPageLockHandle);
  868. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  869. NumberOfBytes->QuadPart = (ULONGLONG)NumberOfPages * PAGE_SIZE;
  870. return STATUS_SUCCESS;
  871. }
  872. //
  873. // Update the physical memory blocks and other associated housekeeping.
  874. //
  875. if (Additional == 0) {
  876. //
  877. // The range can be split off from an end of an existing chunk so no
  878. // pool growth or shrinkage is required.
  879. //
  880. NewPhysicalMemoryBlock = MmPhysicalMemoryBlock;
  881. OldPhysicalMemoryBlock = NULL;
  882. }
  883. else {
  884. //
  885. // The range cannot be split off from an end of an existing chunk so
  886. // pool growth or shrinkage is required.
  887. //
  888. UNLOCK_PFN (OldIrql);
  889. i = (sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
  890. (sizeof(PHYSICAL_MEMORY_RUN) * (MmPhysicalMemoryBlock->NumberOfRuns + Additional)));
  891. NewPhysicalMemoryBlock = ExAllocatePoolWithTag (NonPagedPool,
  892. i,
  893. ' mM');
  894. if (NewPhysicalMemoryBlock == NULL) {
  895. Status = STATUS_INSUFFICIENT_RESOURCES;
  896. #if DBG
  897. MiDynmemData[5] += 1;
  898. #endif
  899. goto giveup;
  900. }
  901. OldPhysicalMemoryBlock = MmPhysicalMemoryBlock;
  902. RtlZeroMemory (NewPhysicalMemoryBlock, i);
  903. LOCK_PFN (OldIrql);
  904. }
  905. //
  906. // Remove or split the requested range from the existing memory block.
  907. //
  908. NewPhysicalMemoryBlock->NumberOfRuns = MmPhysicalMemoryBlock->NumberOfRuns + Additional;
  909. NewPhysicalMemoryBlock->NumberOfPages = MmPhysicalMemoryBlock->NumberOfPages - NumberOfPages;
  910. NewRun = &NewPhysicalMemoryBlock->Run[0];
  911. start = 0;
  912. Inserted = FALSE;
  913. do {
  914. Page = MmPhysicalMemoryBlock->Run[start].BasePage;
  915. LastPage = Page + MmPhysicalMemoryBlock->Run[start].PageCount;
  916. if (Inserted == FALSE) {
  917. if ((StartPage >= Page) && (EndPage <= LastPage)) {
  918. if ((StartPage == Page) && (EndPage == LastPage)) {
  919. ASSERT (Additional == -1);
  920. start += 1;
  921. continue;
  922. }
  923. else if ((StartPage == Page) || (EndPage == LastPage)) {
  924. ASSERT (Additional == 0);
  925. if (StartPage == Page) {
  926. MmPhysicalMemoryBlock->Run[start].BasePage += NumberOfPages;
  927. }
  928. MmPhysicalMemoryBlock->Run[start].PageCount -= NumberOfPages;
  929. }
  930. else {
  931. ASSERT (Additional == 1);
  932. OriginalLastPage = LastPage;
  933. MmPhysicalMemoryBlock->Run[start].PageCount =
  934. StartPage - MmPhysicalMemoryBlock->Run[start].BasePage;
  935. *NewRun = MmPhysicalMemoryBlock->Run[start];
  936. NewRun += 1;
  937. NewRun->BasePage = EndPage;
  938. NewRun->PageCount = OriginalLastPage - EndPage;
  939. NewRun += 1;
  940. start += 1;
  941. continue;
  942. }
  943. Inserted = TRUE;
  944. }
  945. }
  946. *NewRun = MmPhysicalMemoryBlock->Run[start];
  947. NewRun += 1;
  948. start += 1;
  949. } while (start != MmPhysicalMemoryBlock->NumberOfRuns);
  950. //
  951. // Repoint the MmPhysicalMemoryBlock at the new chunk.
  952. // Free the old block after releasing the PFN lock.
  953. //
  954. MmPhysicalMemoryBlock = NewPhysicalMemoryBlock;
  955. if (EndPage - 1 == MmHighestPhysicalPage) {
  956. MmHighestPhysicalPage = StartPage - 1;
  957. }
  958. //
  959. // Throw away all the removed pages that are currently enqueued.
  960. //
  961. ParityPages = 0;
  962. for (Pfn1 = StartPfn; Pfn1 < EndPfn; Pfn1 += 1) {
  963. ASSERT (Pfn1->u3.e1.PageLocation == BadPageList);
  964. ASSERT (Pfn1->u3.e1.RemovalRequested == 1);
  965. //
  966. // Some pages may have already been ECC-removed. For these pages,
  967. // the commit limits and resident available pages have already been
  968. // adjusted - tally them here so we can undo the extraneous charge
  969. // just applied.
  970. //
  971. if (Pfn1->u3.e1.ParityError == 1) {
  972. ParityPages += 1;
  973. }
  974. MiUnlinkPageFromList (Pfn1);
  975. ASSERT (Pfn1->u1.Flink == 0);
  976. ASSERT (Pfn1->u2.Blink == 0);
  977. ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
  978. ASSERT64 (Pfn1->UsedPageTableEntries == 0);
  979. Pfn1->PteAddress = PFN_REMOVED;
  980. //
  981. // Note this clears ParityError among other flags...
  982. //
  983. Pfn1->u3.e2.ShortFlags = 0;
  984. Pfn1->OriginalPte.u.Long = ZeroKernelPte.u.Long;
  985. Pfn1->u4.PteFrame = 0;
  986. }
  987. //
  988. // Now that the removed pages have been discarded, eliminate the PFN
  989. // entries that mapped them. Straddling entries left over from an
  990. // adjacent earlier removal are not collapsed at this point.
  991. //
  992. //
  993. PagesReleased = 0;
  994. PteFlushList.Count = 0;
  995. if (PfnDatabaseIsPhysical == FALSE) {
  996. VirtualAddress = (PVOID)ROUND_TO_PAGES(MI_PFN_ELEMENT(StartPage));
  997. PointerPte = MiGetPteAddress (VirtualAddress);
  998. EndPte = MiGetPteAddress (PAGE_ALIGN(MI_PFN_ELEMENT(EndPage)));
  999. while (PointerPte < EndPte) {
  1000. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  1001. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1002. ASSERT (Pfn1->u2.ShareCount == 1);
  1003. ASSERT (Pfn1->u3.e2.ReferenceCount == 1);
  1004. Pfn1->u2.ShareCount = 0;
  1005. MI_SET_PFN_DELETED (Pfn1);
  1006. #if DBG
  1007. Pfn1->u3.e1.PageLocation = StandbyPageList;
  1008. #endif //DBG
  1009. MiDecrementReferenceCount (Pfn1, PageFrameIndex);
  1010. MI_WRITE_INVALID_PTE (PointerPte, ZeroKernelPte);
  1011. if (PteFlushList.Count < MM_MAXIMUM_FLUSH_COUNT) {
  1012. PteFlushList.FlushVa[PteFlushList.Count] = VirtualAddress;
  1013. PteFlushList.Count += 1;
  1014. }
  1015. PagesReleased += 1;
  1016. PointerPte += 1;
  1017. VirtualAddress = (PVOID)((PCHAR)VirtualAddress + PAGE_SIZE);
  1018. }
  1019. }
  1020. if (PteFlushList.Count != 0) {
  1021. MiFlushPteList (&PteFlushList, TRUE);
  1022. }
  1023. #if DBG
  1024. MiDynmemData[6] += 1;
  1025. #endif
  1026. RtlClearBits (&MiPfnBitMap,
  1027. (ULONG) MI_PFN_ELEMENT_TO_INDEX (StartPfn),
  1028. (ULONG) (EndPfn - StartPfn));
  1029. UNLOCK_PFN (OldIrql);
  1030. //
  1031. // Give back anything that has been double-charged.
  1032. //
  1033. ResAvailPagesReleased = PagesReleased;
  1034. if (ParityPages != 0) {
  1035. ResAvailPagesReleased += ParityPages;
  1036. }
  1037. if (ResAvailPagesReleased != 0) {
  1038. MI_INCREMENT_RESIDENT_AVAILABLE (ResAvailPagesReleased,
  1039. MM_RESAVAIL_FREE_HOTREMOVE_MEMORY1);
  1040. }
  1041. //
  1042. // Give back anything that has been double-charged.
  1043. //
  1044. if (ParityPages != 0) {
  1045. InterlockedExchangeAddSizeT (&MmTotalCommitLimitMaximum, ParityPages);
  1046. InterlockedExchangeAddSizeT (&MmTotalCommitLimit, ParityPages);
  1047. }
  1048. if (PagesReleased != 0) {
  1049. MiReturnCommitment (PagesReleased);
  1050. }
  1051. MmUnlockPagableImageSection(ExPageLockHandle);
  1052. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  1053. if (OldPhysicalMemoryBlock != NULL) {
  1054. ExFreePool (OldPhysicalMemoryBlock);
  1055. }
  1056. NumberOfBytes->QuadPart = (ULONGLONG)NumberOfPages * PAGE_SIZE;
  1057. return STATUS_SUCCESS;
  1058. giveup:
  1059. //
  1060. // All the pages in the range were not obtained. Back everything out.
  1061. //
  1062. PageFrameIndex = StartPage;
  1063. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1064. LOCK_PFN (OldIrql);
  1065. while (PageFrameIndex < EndPage) {
  1066. ASSERT (Pfn1->u3.e1.RemovalRequested == 1);
  1067. Pfn1->u3.e1.RemovalRequested = 0;
  1068. if (Pfn1->u3.e1.PageLocation == BadPageList) {
  1069. MiUnlinkPageFromList (Pfn1);
  1070. MiInsertPageInFreeList (PageFrameIndex);
  1071. }
  1072. Pfn1 += 1;
  1073. PageFrameIndex += 1;
  1074. }
  1075. ResAvailPagesReleased = NumberOfPages;
  1076. #if defined (_MI_COMPRESSION)
  1077. //
  1078. // Only removal of non-compression ranges decrement ResidentAvailable as
  1079. // only those ranges actually incremented this when they were added.
  1080. //
  1081. if (Flags & MM_PHYSICAL_MEMORY_PRODUCED_VIA_COMPRESSION) {
  1082. //
  1083. // Compression range removals add back into AvailablePages the same
  1084. // amount that MiUnlinkPageFromList removes (as the original
  1085. // addition of these ranges never bumps this counter).
  1086. //
  1087. ResAvailPagesReleased = 0;
  1088. MmAvailablePages -= (PFN_COUNT) RemovedPages;
  1089. //
  1090. // Signal applications if allocating these pages caused a threshold cross.
  1091. //
  1092. MiNotifyMemoryEvents ();
  1093. MiNumberOfCompressionPages += RemovedPages;
  1094. }
  1095. #endif
  1096. if (PermanentRemoval == TRUE) {
  1097. MmNumberOfPhysicalPages += NumberOfPages;
  1098. InterlockedExchangeAdd ((PLONG)&SharedUserData->NumberOfPhysicalPages,
  1099. NumberOfPages);
  1100. #if defined (_MI_COMPRESSION)
  1101. //
  1102. // Rearm the interrupt to occur at the original threshold.
  1103. //
  1104. if ((Flags & MM_PHYSICAL_MEMORY_PRODUCED_VIA_COMPRESSION) == 0) {
  1105. MiArmCompressionInterrupt ();
  1106. }
  1107. #endif
  1108. }
  1109. UNLOCK_PFN (OldIrql);
  1110. MI_INCREMENT_RESIDENT_AVAILABLE (ResAvailPagesReleased, MM_RESAVAIL_FREE_HOTREMOVE_FAILED);
  1111. giveup2:
  1112. InterlockedExchangeAddSizeT (&MmTotalCommitLimitMaximum, NumberOfPages);
  1113. InterlockedExchangeAddSizeT (&MmTotalCommitLimit, NumberOfPages);
  1114. MmUnlockPagableImageSection(ExPageLockHandle);
  1115. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  1116. return Status;
  1117. }
  1118. NTSTATUS
  1119. MmRemovePhysicalMemory (
  1120. IN PPHYSICAL_ADDRESS StartAddress,
  1121. IN OUT PLARGE_INTEGER NumberOfBytes
  1122. )
  1123. /*++
  1124. Routine Description:
  1125. A wrapper for MmRemovePhysicalMemoryEx.
  1126. Arguments:
  1127. StartAddress - Supplies the starting physical address.
  1128. NumberOfBytes - Supplies a pointer to the number of bytes being removed.
  1129. Return Value:
  1130. NTSTATUS.
  1131. Environment:
  1132. Kernel mode. PASSIVE level. No locks held.
  1133. --*/
  1134. {
  1135. return MmRemovePhysicalMemoryEx (StartAddress, NumberOfBytes, 0);
  1136. }
  1137. NTSTATUS
  1138. MmRemovePhysicalMemoryEx (
  1139. IN PPHYSICAL_ADDRESS StartAddress,
  1140. IN OUT PLARGE_INTEGER NumberOfBytes,
  1141. IN ULONG Flags
  1142. )
  1143. /*++
  1144. Routine Description:
  1145. This routine attempts to remove the specified physical address range
  1146. from the system.
  1147. Arguments:
  1148. StartAddress - Supplies the starting physical address.
  1149. NumberOfBytes - Supplies a pointer to the number of bytes being removed.
  1150. Flags - Supplies relevant flags describing the memory range.
  1151. Return Value:
  1152. NTSTATUS.
  1153. Environment:
  1154. Kernel mode. PASSIVE level. No locks held.
  1155. --*/
  1156. {
  1157. NTSTATUS Status;
  1158. #if defined (_X86_) || defined (_AMD64_)
  1159. BOOLEAN CachesFlushed;
  1160. #endif
  1161. #if defined(_IA64_)
  1162. PVOID VirtualAddress;
  1163. PVOID SingleVirtualAddress;
  1164. SIZE_T SizeInBytes;
  1165. SIZE_T MapSizeInBytes;
  1166. PFN_COUNT NumberOfPages;
  1167. PFN_COUNT i;
  1168. PFN_NUMBER StartPage;
  1169. #endif
  1170. PAGED_CODE();
  1171. #if defined (_MI_COMPRESSION_SUPPORTED_)
  1172. if (Flags & MM_PHYSICAL_MEMORY_PRODUCED_VIA_COMPRESSION) {
  1173. return STATUS_NOT_SUPPORTED;
  1174. }
  1175. #else
  1176. if (Flags != 0) {
  1177. return STATUS_INVALID_PARAMETER_3;
  1178. }
  1179. #endif
  1180. #if defined (_X86_) || defined (_AMD64_)
  1181. //
  1182. // Issue a cache invalidation here just as a test to make sure the
  1183. // machine can support it. If not, then don't bother trying to remove
  1184. // any memory.
  1185. //
  1186. CachesFlushed = KeInvalidateAllCaches ();
  1187. if (CachesFlushed == FALSE) {
  1188. return STATUS_NOT_SUPPORTED;
  1189. }
  1190. #endif
  1191. #if defined(_IA64_)
  1192. //
  1193. // Pick up at least a single PTE mapping now as we do not want to fail this
  1194. // call if no PTEs are available after a successful remove. Resorting to
  1195. // actually using this PTE should be a very rare case indeed.
  1196. //
  1197. SingleVirtualAddress = (PMMPTE)MiMapSinglePage (NULL,
  1198. 0,
  1199. MmCached,
  1200. HighPagePriority);
  1201. if (SingleVirtualAddress == NULL) {
  1202. return STATUS_INSUFFICIENT_RESOURCES;
  1203. }
  1204. #endif
  1205. Status = MiRemovePhysicalMemory (StartAddress, NumberOfBytes, TRUE, Flags);
  1206. if (NT_SUCCESS (Status)) {
  1207. #if defined (_X86_) || defined (_AMD64_)
  1208. CachesFlushed = KeInvalidateAllCaches ();
  1209. ASSERT (CachesFlushed == TRUE);
  1210. #endif
  1211. #if defined(_IA64_)
  1212. SizeInBytes = (SIZE_T)NumberOfBytes->QuadPart;
  1213. //
  1214. // Flush the entire TB to remove any KSEG translations that may map the
  1215. // pages being removed. Otherwise hardware or software speculation
  1216. // can reference the memory speculatively which would crash the machine.
  1217. //
  1218. KeFlushEntireTb (TRUE, TRUE);
  1219. //
  1220. // Establish an uncached mapping to the pages being removed.
  1221. //
  1222. MapSizeInBytes = SizeInBytes;
  1223. //
  1224. // Initializing VirtualAddress is not needed for correctness
  1225. // but without it the compiler cannot compile this code
  1226. // W4 to check for use of uninitialized variables.
  1227. //
  1228. VirtualAddress = NULL;
  1229. while (MapSizeInBytes > PAGE_SIZE) {
  1230. VirtualAddress = MmMapIoSpace (*StartAddress,
  1231. MapSizeInBytes,
  1232. MmNonCached);
  1233. if (VirtualAddress != NULL) {
  1234. break;
  1235. }
  1236. MapSizeInBytes = MapSizeInBytes >> 1;
  1237. }
  1238. if (MapSizeInBytes <= PAGE_SIZE) {
  1239. StartPage = (PFN_NUMBER)(StartAddress->QuadPart >> PAGE_SHIFT);
  1240. NumberOfPages = (PFN_COUNT)(NumberOfBytes->QuadPart >> PAGE_SHIFT);
  1241. for (i = 0; i < NumberOfPages; i += 1) {
  1242. SingleVirtualAddress = (PMMPTE)MiMapSinglePage (SingleVirtualAddress,
  1243. StartPage,
  1244. MmCached,
  1245. HighPagePriority);
  1246. KeSweepCacheRangeWithDrain (TRUE,
  1247. SingleVirtualAddress,
  1248. PAGE_SIZE);
  1249. StartPage += 1;
  1250. }
  1251. }
  1252. else {
  1253. //
  1254. // Drain all pending transactions and prefetches and perform cache
  1255. // evictions. Only drain 4gb max at a time as this API takes a
  1256. // ULONG.
  1257. //
  1258. while (SizeInBytes > _4gb) {
  1259. KeSweepCacheRangeWithDrain (TRUE, VirtualAddress, _4gb - 1);
  1260. SizeInBytes -= (_4gb - 1);
  1261. }
  1262. KeSweepCacheRangeWithDrain (TRUE,
  1263. VirtualAddress,
  1264. (ULONG)SizeInBytes);
  1265. MmUnmapIoSpace (VirtualAddress, NumberOfBytes->QuadPart);
  1266. }
  1267. #endif
  1268. }
  1269. #if defined(_IA64_)
  1270. MiUnmapSinglePage (SingleVirtualAddress);
  1271. #endif
  1272. return Status;
  1273. }
  1274. NTSTATUS
  1275. MmMarkPhysicalMemoryAsBad (
  1276. IN PPHYSICAL_ADDRESS StartAddress,
  1277. IN OUT PLARGE_INTEGER NumberOfBytes
  1278. )
  1279. /*++
  1280. Routine Description:
  1281. This routine attempts to mark the specified physical address range
  1282. as bad so the system will not use it. This is generally done for pages
  1283. which contain ECC errors.
  1284. Note that this is different from removing pages permanently (ie: physically
  1285. removing the memory board) which should be done via the
  1286. MmRemovePhysicalMemory API.
  1287. The caller is responsible for maintaining a global table so that subsequent
  1288. boots can examine it and remove the ECC pages before loading the kernel.
  1289. Arguments:
  1290. StartAddress - Supplies the starting physical address.
  1291. NumberOfBytes - Supplies a pointer to the number of bytes being removed.
  1292. Return Value:
  1293. NTSTATUS.
  1294. Environment:
  1295. Kernel mode. PASSIVE level. No locks held.
  1296. --*/
  1297. {
  1298. PAGED_CODE();
  1299. return MiRemovePhysicalMemory (StartAddress, NumberOfBytes, FALSE, 0);
  1300. }
  1301. NTSTATUS
  1302. MmMarkPhysicalMemoryAsGood (
  1303. IN PPHYSICAL_ADDRESS StartAddress,
  1304. IN OUT PLARGE_INTEGER NumberOfBytes
  1305. )
  1306. /*++
  1307. Routine Description:
  1308. This routine attempts to mark the specified physical address range
  1309. as good so the system will use it. This is generally done for pages
  1310. which used to (but presumably no longer do) contain ECC errors.
  1311. Note that this is different from adding pages permanently (ie: physically
  1312. inserting a new memory board) which should be done via the
  1313. MmAddPhysicalMemory API.
  1314. The caller is responsible for removing these entries from a global table
  1315. so that subsequent boots will use the pages.
  1316. Arguments:
  1317. StartAddress - Supplies the starting physical address.
  1318. NumberOfBytes - Supplies a pointer to the number of bytes being removed.
  1319. Return Value:
  1320. NTSTATUS.
  1321. Environment:
  1322. Kernel mode. PASSIVE level. No locks held.
  1323. --*/
  1324. {
  1325. PMMPFN Pfn1;
  1326. KIRQL OldIrql;
  1327. PFN_NUMBER NumberOfPages;
  1328. PFN_NUMBER start;
  1329. PFN_NUMBER count;
  1330. PFN_NUMBER StartPage;
  1331. PFN_NUMBER EndPage;
  1332. PFN_NUMBER PageFrameIndex;
  1333. PFN_NUMBER Page;
  1334. PFN_NUMBER LastPage;
  1335. ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL);
  1336. ASSERT (BYTE_OFFSET(NumberOfBytes->LowPart) == 0);
  1337. ASSERT (BYTE_OFFSET(StartAddress->LowPart) == 0);
  1338. StartPage = (PFN_NUMBER)(StartAddress->QuadPart >> PAGE_SHIFT);
  1339. NumberOfPages = (PFN_NUMBER)(NumberOfBytes->QuadPart >> PAGE_SHIFT);
  1340. EndPage = StartPage + NumberOfPages;
  1341. KeAcquireGuardedMutex (&MmDynamicMemoryMutex);
  1342. if (EndPage - 1 > MmHighestPhysicalPage) {
  1343. //
  1344. // Truncate the request into something that can be mapped by the PFN
  1345. // database.
  1346. //
  1347. EndPage = MmHighestPhysicalPage + 1;
  1348. NumberOfPages = EndPage - StartPage;
  1349. }
  1350. //
  1351. // The range cannot wrap.
  1352. //
  1353. if (StartPage >= EndPage) {
  1354. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  1355. return STATUS_INVALID_PARAMETER_1;
  1356. }
  1357. //
  1358. // The request must lie within an already present range.
  1359. //
  1360. start = 0;
  1361. MmLockPagableSectionByHandle (ExPageLockHandle);
  1362. LOCK_PFN (OldIrql);
  1363. do {
  1364. count = MmPhysicalMemoryBlock->Run[start].PageCount;
  1365. Page = MmPhysicalMemoryBlock->Run[start].BasePage;
  1366. if (count != 0) {
  1367. LastPage = Page + count;
  1368. if ((StartPage >= Page) && (EndPage <= LastPage)) {
  1369. break;
  1370. }
  1371. }
  1372. start += 1;
  1373. } while (start != MmPhysicalMemoryBlock->NumberOfRuns);
  1374. if (start == MmPhysicalMemoryBlock->NumberOfRuns) {
  1375. UNLOCK_PFN (OldIrql);
  1376. MmUnlockPagableImageSection(ExPageLockHandle);
  1377. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  1378. return STATUS_CONFLICTING_ADDRESSES;
  1379. }
  1380. //
  1381. // Walk through the range and add only pages previously removed to the
  1382. // free list in the PFN database.
  1383. //
  1384. PageFrameIndex = StartPage;
  1385. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1386. NumberOfPages = 0;
  1387. while (PageFrameIndex < EndPage) {
  1388. if ((Pfn1->u3.e1.ParityError == 1) &&
  1389. (Pfn1->u3.e1.RemovalRequested == 1) &&
  1390. (Pfn1->u3.e1.PageLocation == BadPageList)) {
  1391. Pfn1->u3.e1.ParityError = 0;
  1392. Pfn1->u3.e1.RemovalRequested = 0;
  1393. MiUnlinkPageFromList (Pfn1);
  1394. MiInsertPageInFreeList (PageFrameIndex);
  1395. NumberOfPages += 1;
  1396. }
  1397. Pfn1 += 1;
  1398. PageFrameIndex += 1;
  1399. }
  1400. UNLOCK_PFN (OldIrql);
  1401. MI_INCREMENT_RESIDENT_AVAILABLE (NumberOfPages, MM_RESAVAIL_FREE_HOTADD_ECC);
  1402. //
  1403. // Increase all commit limits to reflect the additional memory.
  1404. //
  1405. InterlockedExchangeAddSizeT (&MmTotalCommitLimitMaximum, NumberOfPages);
  1406. InterlockedExchangeAddSizeT (&MmTotalCommitLimit, NumberOfPages);
  1407. MmUnlockPagableImageSection(ExPageLockHandle);
  1408. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  1409. //
  1410. // Indicate number of bytes actually added to our caller.
  1411. //
  1412. NumberOfBytes->QuadPart = (ULONGLONG)NumberOfPages * PAGE_SIZE;
  1413. return STATUS_SUCCESS;
  1414. }
  1415. PPHYSICAL_MEMORY_RANGE
  1416. MmGetPhysicalMemoryRanges (
  1417. VOID
  1418. )
  1419. /*++
  1420. Routine Description:
  1421. This routine returns the virtual address of a nonpaged pool block which
  1422. contains the physical memory ranges in the system.
  1423. The returned block contains physical address and page count pairs.
  1424. The last entry contains zero for both.
  1425. The caller must understand that this block can change at any point before
  1426. or after this snapshot.
  1427. It is the caller's responsibility to free this block.
  1428. Arguments:
  1429. None.
  1430. Return Value:
  1431. NULL on failure.
  1432. Environment:
  1433. Kernel mode. PASSIVE level. No locks held.
  1434. --*/
  1435. {
  1436. ULONG i;
  1437. KIRQL OldIrql;
  1438. PPHYSICAL_MEMORY_RANGE p;
  1439. PPHYSICAL_MEMORY_RANGE PhysicalMemoryBlock;
  1440. ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL);
  1441. KeAcquireGuardedMutex (&MmDynamicMemoryMutex);
  1442. i = sizeof(PHYSICAL_MEMORY_RANGE) * (MmPhysicalMemoryBlock->NumberOfRuns + 1);
  1443. PhysicalMemoryBlock = ExAllocatePoolWithTag (NonPagedPool,
  1444. i,
  1445. 'hPmM');
  1446. if (PhysicalMemoryBlock == NULL) {
  1447. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  1448. return NULL;
  1449. }
  1450. p = PhysicalMemoryBlock;
  1451. MmLockPagableSectionByHandle (ExPageLockHandle);
  1452. LOCK_PFN (OldIrql);
  1453. ASSERT (i == (sizeof(PHYSICAL_MEMORY_RANGE) * (MmPhysicalMemoryBlock->NumberOfRuns + 1)));
  1454. for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i += 1) {
  1455. p->BaseAddress.QuadPart = (LONGLONG)MmPhysicalMemoryBlock->Run[i].BasePage * PAGE_SIZE;
  1456. p->NumberOfBytes.QuadPart = (LONGLONG)MmPhysicalMemoryBlock->Run[i].PageCount * PAGE_SIZE;
  1457. p += 1;
  1458. }
  1459. p->BaseAddress.QuadPart = 0;
  1460. p->NumberOfBytes.QuadPart = 0;
  1461. UNLOCK_PFN (OldIrql);
  1462. MmUnlockPagableImageSection(ExPageLockHandle);
  1463. KeReleaseGuardedMutex (&MmDynamicMemoryMutex);
  1464. return PhysicalMemoryBlock;
  1465. }
  1466. PFN_COUNT
  1467. MiRemovePhysicalPages (
  1468. IN PFN_NUMBER StartPage,
  1469. IN PFN_NUMBER EndPage
  1470. )
  1471. /*++
  1472. Routine Description:
  1473. This routine searches the PFN database for free, zeroed or standby pages
  1474. that are marked for removal.
  1475. Arguments:
  1476. StartPage - Supplies the low physical frame number to remove.
  1477. EndPage - Supplies the last physical frame number to remove.
  1478. Return Value:
  1479. Returns the number of pages removed from the free, zeroed and standby lists.
  1480. Environment:
  1481. Kernel mode, PFN lock held. Since this routine is PAGELK, the caller is
  1482. responsible for locking it down and unlocking it on return.
  1483. --*/
  1484. {
  1485. PMMPFN Pfn1;
  1486. PMMPFN Pfn2;
  1487. PMMPFN PfnNextColored;
  1488. PMMPFN PfnNextFlink;
  1489. PMMPFN PfnLastColored;
  1490. PFN_NUMBER Page;
  1491. LOGICAL RemovePage;
  1492. ULONG Color;
  1493. PMMCOLOR_TABLES ColorHead;
  1494. PFN_NUMBER MovedPage;
  1495. MMLISTS MemoryList;
  1496. PFN_NUMBER PageNextColored;
  1497. PFN_NUMBER PageNextFlink;
  1498. PFN_NUMBER PageLastColored;
  1499. PFN_COUNT NumberOfPages;
  1500. PMMPFNLIST ListHead;
  1501. LOGICAL RescanNeeded;
  1502. MM_PFN_LOCK_ASSERT();
  1503. NumberOfPages = 0;
  1504. rescan:
  1505. //
  1506. // Grab all zeroed (and then free) pages first directly from the
  1507. // colored lists to avoid multiple walks down these singly linked lists.
  1508. // Handle transition pages last.
  1509. //
  1510. for (MemoryList = ZeroedPageList; MemoryList <= FreePageList; MemoryList += 1) {
  1511. ListHead = MmPageLocationList[MemoryList];
  1512. for (Color = 0; Color < MmSecondaryColors; Color += 1) {
  1513. ColorHead = &MmFreePagesByColor[MemoryList][Color];
  1514. MovedPage = (PFN_NUMBER) MM_EMPTY_LIST;
  1515. while (ColorHead->Flink != MM_EMPTY_LIST) {
  1516. Page = ColorHead->Flink;
  1517. Pfn1 = MI_PFN_ELEMENT(Page);
  1518. ASSERT ((MMLISTS)Pfn1->u3.e1.PageLocation == MemoryList);
  1519. //
  1520. // The Flink and Blink must be nonzero here for the page
  1521. // to be on the listhead. Only code that scans the
  1522. // MmPhysicalMemoryBlock has to check for the zero case.
  1523. //
  1524. ASSERT (Pfn1->u1.Flink != 0);
  1525. ASSERT (Pfn1->u2.Blink != 0);
  1526. //
  1527. // See if the page is desired by the caller.
  1528. //
  1529. // Systems utilizing memory compression may have more
  1530. // pages on the zero, free and standby lists than we
  1531. // want to give out. Explicitly check MmAvailablePages
  1532. // instead (and recheck whenever the PFN lock is
  1533. // released and reacquired).
  1534. //
  1535. if ((Pfn1->u3.e1.RemovalRequested == 1) &&
  1536. (MmAvailablePages != 0)) {
  1537. ASSERT (Pfn1->u3.e1.ReadInProgress == 0);
  1538. MiUnlinkFreeOrZeroedPage (Pfn1);
  1539. MiInsertPageInList (&MmBadPageListHead, Page);
  1540. NumberOfPages += 1;
  1541. }
  1542. else {
  1543. //
  1544. // Unwanted so put the page on the end of list.
  1545. // If first time, save pfn.
  1546. //
  1547. if (MovedPage == MM_EMPTY_LIST) {
  1548. MovedPage = Page;
  1549. }
  1550. else if (Page == MovedPage) {
  1551. //
  1552. // No more pages available in this colored chain.
  1553. //
  1554. break;
  1555. }
  1556. //
  1557. // If the colored chain has more than one entry then
  1558. // put this page on the end.
  1559. //
  1560. PageNextColored = (PFN_NUMBER)Pfn1->OriginalPte.u.Long;
  1561. if (PageNextColored == MM_EMPTY_LIST) {
  1562. //
  1563. // No more pages available in this colored chain.
  1564. //
  1565. break;
  1566. }
  1567. ASSERT (Pfn1->u1.Flink != 0);
  1568. ASSERT (Pfn1->u1.Flink != MM_EMPTY_LIST);
  1569. ASSERT (Pfn1->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  1570. PfnNextColored = MI_PFN_ELEMENT(PageNextColored);
  1571. ASSERT ((MMLISTS)PfnNextColored->u3.e1.PageLocation == MemoryList);
  1572. ASSERT (PfnNextColored->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  1573. //
  1574. // Adjust the free page list so Page
  1575. // follows PageNextFlink.
  1576. //
  1577. PageNextFlink = Pfn1->u1.Flink;
  1578. PfnNextFlink = MI_PFN_ELEMENT(PageNextFlink);
  1579. ASSERT ((MMLISTS)PfnNextFlink->u3.e1.PageLocation == MemoryList);
  1580. ASSERT (PfnNextFlink->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  1581. PfnLastColored = ColorHead->Blink;
  1582. ASSERT (PfnLastColored != (PMMPFN)MM_EMPTY_LIST);
  1583. ASSERT (PfnLastColored->OriginalPte.u.Long == MM_EMPTY_LIST);
  1584. ASSERT (PfnLastColored->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  1585. ASSERT (PfnLastColored->u2.Blink != MM_EMPTY_LIST);
  1586. ASSERT ((MMLISTS)PfnLastColored->u3.e1.PageLocation == MemoryList);
  1587. PageLastColored = MI_PFN_ELEMENT_TO_INDEX (PfnLastColored);
  1588. if (ListHead->Flink == Page) {
  1589. ASSERT (Pfn1->u2.Blink == MM_EMPTY_LIST);
  1590. ASSERT (ListHead->Blink != Page);
  1591. ListHead->Flink = PageNextFlink;
  1592. PfnNextFlink->u2.Blink = MM_EMPTY_LIST;
  1593. }
  1594. else {
  1595. ASSERT (Pfn1->u2.Blink != MM_EMPTY_LIST);
  1596. ASSERT ((MMLISTS)(MI_PFN_ELEMENT((MI_PFN_ELEMENT(Pfn1->u2.Blink)->u1.Flink)))->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  1597. ASSERT ((MMLISTS)(MI_PFN_ELEMENT((MI_PFN_ELEMENT(Pfn1->u2.Blink)->u1.Flink)))->u3.e1.PageLocation == MemoryList);
  1598. MI_PFN_ELEMENT(Pfn1->u2.Blink)->u1.Flink = PageNextFlink;
  1599. PfnNextFlink->u2.Blink = Pfn1->u2.Blink;
  1600. }
  1601. #if DBG
  1602. if (PfnLastColored->u1.Flink == MM_EMPTY_LIST) {
  1603. ASSERT (ListHead->Blink == PageLastColored);
  1604. }
  1605. #endif
  1606. Pfn1->u1.Flink = PfnLastColored->u1.Flink;
  1607. Pfn1->u2.Blink = PageLastColored;
  1608. if (ListHead->Blink == PageLastColored) {
  1609. ListHead->Blink = Page;
  1610. }
  1611. //
  1612. // Adjust the colored chains.
  1613. //
  1614. if (PfnLastColored->u1.Flink != MM_EMPTY_LIST) {
  1615. ASSERT (MI_PFN_ELEMENT(PfnLastColored->u1.Flink)->u4.PteFrame != MI_MAGIC_AWE_PTEFRAME);
  1616. ASSERT ((MMLISTS)(MI_PFN_ELEMENT(PfnLastColored->u1.Flink)->u3.e1.PageLocation) == MemoryList);
  1617. MI_PFN_ELEMENT(PfnLastColored->u1.Flink)->u2.Blink = Page;
  1618. }
  1619. ASSERT (PfnLastColored->OriginalPte.u.Long == MM_EMPTY_LIST);
  1620. PfnLastColored->u1.Flink = Page;
  1621. ColorHead->Flink = PageNextColored;
  1622. PfnNextColored->u4.PteFrame = MM_EMPTY_LIST;
  1623. Pfn1->OriginalPte.u.Long = MM_EMPTY_LIST;
  1624. Pfn1->u4.PteFrame = PageLastColored;
  1625. PfnLastColored->OriginalPte.u.Long = Page;
  1626. ColorHead->Blink = Pfn1;
  1627. }
  1628. }
  1629. }
  1630. }
  1631. RescanNeeded = FALSE;
  1632. Pfn1 = MI_PFN_ELEMENT (StartPage);
  1633. do {
  1634. if ((Pfn1->u3.e1.PageLocation == StandbyPageList) &&
  1635. (Pfn1->u1.Flink != 0) &&
  1636. (Pfn1->u2.Blink != 0) &&
  1637. (Pfn1->u3.e2.ReferenceCount == 0) &&
  1638. (MmAvailablePages != 0)) {
  1639. //
  1640. // Systems utilizing memory compression may have more
  1641. // pages on the zero, free and standby lists than we
  1642. // want to give out. Explicitly check MmAvailablePages
  1643. // above instead (and recheck whenever the PFN lock is
  1644. // released and reacquired).
  1645. //
  1646. ASSERT (Pfn1->u3.e1.ReadInProgress == 0);
  1647. RemovePage = TRUE;
  1648. if (Pfn1->u3.e1.RemovalRequested == 0) {
  1649. //
  1650. // This page is not directly needed for a hot remove - but if
  1651. // it contains a chunk of prototype PTEs (and this chunk is
  1652. // in a page that needs to be removed), then any pages
  1653. // referenced by transition prototype PTEs must also be removed
  1654. // before the desired page can be removed.
  1655. //
  1656. // The same analogy holds for page table, directory, parent
  1657. // and extended parent pages.
  1658. //
  1659. Pfn2 = MI_PFN_ELEMENT (Pfn1->u4.PteFrame);
  1660. if (Pfn2->u3.e1.RemovalRequested == 0) {
  1661. #if (_MI_PAGING_LEVELS >= 3)
  1662. Pfn2 = MI_PFN_ELEMENT (Pfn2->u4.PteFrame);
  1663. if (Pfn2->u3.e1.RemovalRequested == 0) {
  1664. RemovePage = FALSE;
  1665. }
  1666. else if (Pfn2->u2.ShareCount == 1) {
  1667. RescanNeeded = TRUE;
  1668. }
  1669. #if (_MI_PAGING_LEVELS >= 4)
  1670. Pfn2 = MI_PFN_ELEMENT (Pfn2->u4.PteFrame);
  1671. if (Pfn2->u3.e1.RemovalRequested == 0) {
  1672. RemovePage = FALSE;
  1673. }
  1674. else if (Pfn2->u2.ShareCount == 1) {
  1675. RescanNeeded = TRUE;
  1676. }
  1677. #endif
  1678. #else
  1679. RemovePage = FALSE;
  1680. #endif
  1681. }
  1682. else if (Pfn2->u2.ShareCount == 1) {
  1683. RescanNeeded = TRUE;
  1684. }
  1685. }
  1686. if (RemovePage == TRUE) {
  1687. //
  1688. // This page is in the desired range - grab it.
  1689. //
  1690. MiUnlinkPageFromList (Pfn1);
  1691. MiRestoreTransitionPte (Pfn1);
  1692. MiInsertPageInList (&MmBadPageListHead, StartPage);
  1693. NumberOfPages += 1;
  1694. }
  1695. }
  1696. StartPage += 1;
  1697. Pfn1 += 1;
  1698. } while (StartPage < EndPage);
  1699. if (RescanNeeded == TRUE) {
  1700. //
  1701. // A page table, directory or parent was freed by removing a transition
  1702. // page from the cache. Rescan from the top to pick it up.
  1703. //
  1704. #if DBG
  1705. MiDynmemData[7] += 1;
  1706. #endif
  1707. goto rescan;
  1708. }
  1709. #if DBG
  1710. else {
  1711. MiDynmemData[8] += 1;
  1712. }
  1713. #endif
  1714. return NumberOfPages;
  1715. }