Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1096 lines
31 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. nolowmem.c
  5. Abstract:
  6. This module contains routines which remove physical memory below 4GB
  7. to make testing for driver addressing errors easier.
  8. Author:
  9. Landy Wang (landyw) 30-Nov-1998
  10. Revision History:
  11. --*/
  12. #include "mi.h"
  13. //
  14. // If /NOLOWMEM is used, this is set to the boundary PFN (pages below this
  15. // value are not used whenever possible).
  16. //
  17. PFN_NUMBER MiNoLowMemory;
  18. #if defined (_MI_MORE_THAN_4GB_)
  19. VOID
  20. MiFillRemovedPages (
  21. IN ULONG StartPage,
  22. IN ULONG NumberOfPages
  23. );
  24. ULONG
  25. MiRemoveModuloPages (
  26. IN ULONG StartPage,
  27. IN ULONG LastPage
  28. );
  29. #ifdef ALLOC_PRAGMA
  30. #pragma alloc_text(INIT,MiRemoveLowPages)
  31. #pragma alloc_text(INIT,MiFillRemovedPages)
  32. #pragma alloc_text(INIT,MiRemoveModuloPages)
  33. #endif
  34. PRTL_BITMAP MiLowMemoryBitMap;
  35. LOGICAL MiFillModuloPages = FALSE;
  36. VOID
  37. MiFillRemovedPages (
  38. IN ULONG StartPage,
  39. IN ULONG NumberOfPages
  40. )
  41. /*++
  42. Routine Description:
  43. This routine fills low pages with a recognizable pattern. Thus, if the
  44. page is ever mistakenly used by a broken component, it will be easy to
  45. see exactly which bytes were corrupted.
  46. Arguments:
  47. StartPage - Supplies the low page to fill.
  48. NumberOfPages - Supplies the number of pages to fill.
  49. Return Value:
  50. None.
  51. Environment:
  52. Phase 0 initialization.
  53. --*/
  54. {
  55. ULONG Page;
  56. ULONG LastPage;
  57. PVOID LastChunkVa;
  58. ULONG MaxPageChunk;
  59. ULONG ThisPageChunk;
  60. PVOID TempVa;
  61. PVOID BaseVa;
  62. SIZE_T NumberOfBytes;
  63. PHYSICAL_ADDRESS PhysicalAddress;
  64. //
  65. // Do 256MB at a time when possible (don't want to overflow unit
  66. // conversions or fail to allocate system PTEs needlessly).
  67. //
  68. MaxPageChunk = (256 * 1024 * 1024) / PAGE_SIZE;
  69. LastPage = StartPage + NumberOfPages;
  70. PhysicalAddress.QuadPart = StartPage;
  71. PhysicalAddress.QuadPart = PhysicalAddress.QuadPart << PAGE_SHIFT;
  72. Page = StartPage;
  73. while (Page < LastPage) {
  74. if (NumberOfPages > MaxPageChunk) {
  75. ThisPageChunk = MaxPageChunk;
  76. }
  77. else {
  78. ThisPageChunk = NumberOfPages;
  79. }
  80. NumberOfBytes = ThisPageChunk << PAGE_SHIFT;
  81. BaseVa = MmMapIoSpace (PhysicalAddress, NumberOfBytes, MmCached);
  82. if (BaseVa != NULL) {
  83. //
  84. // Fill the actual page with a recognizable data pattern. No
  85. // one should write to these pages unless they are allocated for
  86. // a contiguous memory request.
  87. //
  88. TempVa = BaseVa;
  89. LastChunkVa = (PVOID)((ULONG_PTR)BaseVa + NumberOfBytes);
  90. while (TempVa < LastChunkVa) {
  91. RtlFillMemoryUlong (TempVa,
  92. PAGE_SIZE,
  93. (ULONG)Page | MI_LOWMEM_MAGIC_BIT);
  94. TempVa = (PVOID)((ULONG_PTR)TempVa + PAGE_SIZE);
  95. Page += 1;
  96. }
  97. MmUnmapIoSpace (BaseVa, NumberOfBytes);
  98. }
  99. else {
  100. MaxPageChunk /= 2;
  101. if (MaxPageChunk == 0) {
  102. #if DBG
  103. DbgPrint ("Not even one PTE available for filling lowmem pages\n");
  104. DbgBreakPoint ();
  105. #endif
  106. break;
  107. }
  108. }
  109. }
  110. }
  111. ULONG
  112. MiRemoveModuloPages (
  113. IN ULONG StartPage,
  114. IN ULONG LastPage
  115. )
  116. /*++
  117. Routine Description:
  118. This routine removes pages above 4GB.
  119. For every page below 4GB that could not be reclaimed, don't use the
  120. high modulo-4GB equivalent page. The motivation is to prevent
  121. code bugs that drop the high bits from destroying critical
  122. system data in the unclaimed pages (like the GDT, IDT, kernel code
  123. and data, etc).
  124. Arguments:
  125. StartPage - Supplies the low page to modulo-ize and remove.
  126. LastPage - Supplies the final low page to modulo-ize and remove.
  127. Return Value:
  128. None.
  129. Environment:
  130. Phase 0 initialization.
  131. --*/
  132. {
  133. PEPROCESS Process;
  134. ULONG Page;
  135. ULONG PagesRemoved;
  136. PVOID TempVa;
  137. KIRQL OldIrql;
  138. PFN_NUMBER HighPage;
  139. PMMPFN Pfn1;
  140. //
  141. // Removing modulo pages can take a long (on the order of 30 minutes!) on
  142. // large memory systems because the various PFN lists generally need to
  143. // linearly walked in order to find and cross-remove from the colored chains
  144. // the requested pages. Since actually putting these pages out of
  145. // circulation is of dubious benefit, default this behavior to disabled
  146. // but leave the data variable so a questionable machine can have this
  147. // enabled without needing a new kernel.
  148. //
  149. if (MiFillModuloPages == FALSE) {
  150. return 0;
  151. }
  152. Process = PsGetCurrentProcess ();
  153. PagesRemoved = 0;
  154. #if DBG
  155. DbgPrint ("Removing modulo pages %x %x\n", StartPage, LastPage);
  156. #endif
  157. for (Page = StartPage; Page < LastPage; Page += 1) {
  158. //
  159. // Search for any high modulo pages and remove them.
  160. //
  161. HighPage = Page + MiNoLowMemory;
  162. LOCK_PFN (OldIrql);
  163. while (HighPage <= MmHighestPhysicalPage) {
  164. Pfn1 = MI_PFN_ELEMENT (HighPage);
  165. if ((MmIsAddressValid(Pfn1)) &&
  166. (MmIsAddressValid((PCHAR)Pfn1 + sizeof(MMPFN) - 1)) &&
  167. ((ULONG)Pfn1->u3.e1.PageLocation <= (ULONG)StandbyPageList) &&
  168. (Pfn1->u1.Flink != 0) &&
  169. (Pfn1->u2.Blink != 0) &&
  170. (Pfn1->u3.e2.ReferenceCount == 0) &&
  171. (MmAvailablePages > 0)) {
  172. //
  173. // Systems utilizing memory compression may have more
  174. // pages on the zero, free and standby lists than we
  175. // want to give out. Explicitly check MmAvailablePages
  176. // above instead (and recheck whenever the PFN lock is
  177. // released and reacquired).
  178. //
  179. //
  180. // This page can be taken.
  181. //
  182. if (Pfn1->u3.e1.PageLocation == StandbyPageList) {
  183. MiUnlinkPageFromList (Pfn1);
  184. MiRestoreTransitionPte (Pfn1);
  185. }
  186. else {
  187. MiUnlinkFreeOrZeroedPage (Pfn1);
  188. }
  189. Pfn1->u3.e2.ShortFlags = 0;
  190. Pfn1->u3.e2.ReferenceCount = 1;
  191. Pfn1->u2.ShareCount = 1;
  192. Pfn1->PteAddress = (PMMPTE)(ULONG_PTR)0xFFFFFFF8;
  193. Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  194. Pfn1->u4.PteFrame = MI_MAGIC_4GB_RECLAIM;
  195. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  196. Pfn1->u3.e1.CacheAttribute = MiNotMapped;
  197. Pfn1->u4.VerifierAllocation = 0;
  198. Pfn1->u3.e1.LargeSessionAllocation = 0;
  199. Pfn1->u3.e1.StartOfAllocation = 1;
  200. Pfn1->u3.e1.EndOfAllocation = 1;
  201. //
  202. // Fill the actual page with a recognizable data
  203. // pattern. No one else should write to these
  204. // pages unless they are allocated for
  205. // a contiguous memory request.
  206. //
  207. MmNumberOfPhysicalPages -= 1;
  208. UNLOCK_PFN (OldIrql);
  209. TempVa = (PULONG)MiMapPageInHyperSpace (Process,
  210. HighPage,
  211. &OldIrql);
  212. RtlFillMemoryUlong (TempVa,
  213. PAGE_SIZE,
  214. (ULONG)HighPage | MI_LOWMEM_MAGIC_BIT);
  215. MiUnmapPageInHyperSpace (Process, TempVa, OldIrql);
  216. PagesRemoved += 1;
  217. LOCK_PFN (OldIrql);
  218. }
  219. HighPage += MiNoLowMemory;
  220. }
  221. UNLOCK_PFN (OldIrql);
  222. }
  223. #if DBG
  224. DbgPrint ("Done removing modulo pages %x %x\n", StartPage, LastPage);
  225. #endif
  226. return PagesRemoved;
  227. }
  228. VOID
  229. MiRemoveLowPages (
  230. ULONG RemovePhase
  231. )
  232. /*++
  233. Routine Description:
  234. This routine removes all pages below physical 4GB in the system. This lets
  235. us find problems with device drivers by putting all accesses high.
  236. Arguments:
  237. RemovePhase - Supplies the current phase of page removal.
  238. Return Value:
  239. None.
  240. Environment:
  241. Kernel mode.
  242. --*/
  243. {
  244. KIRQL OldIrql;
  245. ULONG i;
  246. ULONG BitMapIndex;
  247. ULONG BitMapHint;
  248. ULONG LengthOfClearRun;
  249. ULONG LengthOfSetRun;
  250. ULONG StartingRunIndex;
  251. ULONG ModuloRemoved;
  252. ULONG PagesRemoved;
  253. PFN_COUNT PageCount;
  254. PMMPFN PfnNextColored;
  255. PMMPFN PfnNextFlink;
  256. PMMPFN PfnLastColored;
  257. PFN_NUMBER PageNextColored;
  258. PFN_NUMBER PageNextFlink;
  259. PFN_NUMBER PageLastColored;
  260. PFN_NUMBER Page;
  261. PMMPFN Pfn1;
  262. PMMPFNLIST ListHead;
  263. ULONG Color;
  264. PMMCOLOR_TABLES ColorHead;
  265. PFN_NUMBER MovedPage;
  266. if (RemovePhase == 0) {
  267. MiCreateBitMap (&MiLowMemoryBitMap, (ULONG)MiNoLowMemory, NonPagedPool);
  268. if (MiLowMemoryBitMap != NULL) {
  269. RtlClearAllBits (MiLowMemoryBitMap);
  270. MmMakeLowMemory = TRUE;
  271. }
  272. }
  273. if (MiLowMemoryBitMap == NULL) {
  274. return;
  275. }
  276. ListHead = &MmFreePageListHead;
  277. PageCount = 0;
  278. LOCK_PFN (OldIrql);
  279. for (Color = 0; Color < MmSecondaryColors; Color += 1) {
  280. ColorHead = &MmFreePagesByColor[FreePageList][Color];
  281. MovedPage = MM_EMPTY_LIST;
  282. while (ColorHead->Flink != MM_EMPTY_LIST) {
  283. Page = ColorHead->Flink;
  284. Pfn1 = MI_PFN_ELEMENT(Page);
  285. ASSERT ((MMLISTS)Pfn1->u3.e1.PageLocation == FreePageList);
  286. //
  287. // The Flink and Blink must be nonzero here for the page
  288. // to be on the listhead. Only code that scans the
  289. // MmPhysicalMemoryBlock has to check for the zero case.
  290. //
  291. ASSERT (Pfn1->u1.Flink != 0);
  292. ASSERT (Pfn1->u2.Blink != 0);
  293. //
  294. // See if the page is below 4GB - if not, skip it.
  295. //
  296. if (Page >= MiNoLowMemory) {
  297. //
  298. // Put page on end of list and if first time, save pfn.
  299. //
  300. if (MovedPage == MM_EMPTY_LIST) {
  301. MovedPage = Page;
  302. }
  303. else if (Page == MovedPage) {
  304. //
  305. // No more pages available in this colored chain.
  306. //
  307. break;
  308. }
  309. //
  310. // If the colored chain has more than one entry then
  311. // put this page on the end.
  312. //
  313. PageNextColored = (PFN_NUMBER)Pfn1->OriginalPte.u.Long;
  314. if (PageNextColored == MM_EMPTY_LIST) {
  315. //
  316. // No more pages available in this colored chain.
  317. //
  318. break;
  319. }
  320. ASSERT (Pfn1->u1.Flink != 0);
  321. ASSERT (Pfn1->u1.Flink != MM_EMPTY_LIST);
  322. ASSERT (Pfn1->u4.PteFrame != MI_MAGIC_4GB_RECLAIM);
  323. PfnNextColored = MI_PFN_ELEMENT(PageNextColored);
  324. ASSERT ((MMLISTS)PfnNextColored->u3.e1.PageLocation == FreePageList);
  325. ASSERT (PfnNextColored->u4.PteFrame != MI_MAGIC_4GB_RECLAIM);
  326. PfnNextColored->u4.PteFrame = MM_EMPTY_LIST;
  327. //
  328. // Adjust the free page list so Page follows PageNextFlink.
  329. //
  330. PageNextFlink = Pfn1->u1.Flink;
  331. PfnNextFlink = MI_PFN_ELEMENT(PageNextFlink);
  332. ASSERT ((MMLISTS)PfnNextFlink->u3.e1.PageLocation == FreePageList);
  333. ASSERT (PfnNextFlink->u4.PteFrame != MI_MAGIC_4GB_RECLAIM);
  334. PfnLastColored = ColorHead->Blink;
  335. ASSERT (PfnLastColored != (PMMPFN)MM_EMPTY_LIST);
  336. ASSERT (PfnLastColored->OriginalPte.u.Long == MM_EMPTY_LIST);
  337. ASSERT (PfnLastColored->u4.PteFrame != MI_MAGIC_4GB_RECLAIM);
  338. ASSERT (PfnLastColored->u2.Blink != MM_EMPTY_LIST);
  339. ASSERT ((MMLISTS)PfnLastColored->u3.e1.PageLocation == FreePageList);
  340. PageLastColored = MI_PFN_ELEMENT_TO_INDEX (PfnLastColored);
  341. if (ListHead->Flink == Page) {
  342. ASSERT (Pfn1->u2.Blink == MM_EMPTY_LIST);
  343. ASSERT (ListHead->Blink != Page);
  344. ListHead->Flink = PageNextFlink;
  345. PfnNextFlink->u2.Blink = MM_EMPTY_LIST;
  346. }
  347. else {
  348. ASSERT (Pfn1->u2.Blink != MM_EMPTY_LIST);
  349. ASSERT ((MMLISTS)(MI_PFN_ELEMENT((MI_PFN_ELEMENT(Pfn1->u2.Blink)->u1.Flink)))->u4.PteFrame != MI_MAGIC_4GB_RECLAIM);
  350. ASSERT ((MMLISTS)(MI_PFN_ELEMENT((MI_PFN_ELEMENT(Pfn1->u2.Blink)->u1.Flink)))->u3.e1.PageLocation == FreePageList);
  351. MI_PFN_ELEMENT(Pfn1->u2.Blink)->u1.Flink = PageNextFlink;
  352. PfnNextFlink->u2.Blink = Pfn1->u2.Blink;
  353. }
  354. #if DBG
  355. if (PfnLastColored->u1.Flink == MM_EMPTY_LIST) {
  356. ASSERT (ListHead->Blink == PageLastColored);
  357. }
  358. #endif
  359. Pfn1->u1.Flink = PfnLastColored->u1.Flink;
  360. Pfn1->u2.Blink = PageLastColored;
  361. if (ListHead->Blink == PageLastColored) {
  362. ListHead->Blink = Page;
  363. }
  364. //
  365. // Adjust the colored chains.
  366. //
  367. if (PfnLastColored->u1.Flink != MM_EMPTY_LIST) {
  368. ASSERT (MI_PFN_ELEMENT(PfnLastColored->u1.Flink)->u4.PteFrame != MI_MAGIC_4GB_RECLAIM);
  369. ASSERT ((MMLISTS)(MI_PFN_ELEMENT(PfnLastColored->u1.Flink)->u3.e1.PageLocation) == FreePageList);
  370. MI_PFN_ELEMENT(PfnLastColored->u1.Flink)->u2.Blink = Page;
  371. }
  372. PfnLastColored->u1.Flink = Page;
  373. ColorHead->Flink = PageNextColored;
  374. Pfn1->OriginalPte.u.Long = MM_EMPTY_LIST;
  375. Pfn1->u4.PteFrame = PageLastColored;
  376. ASSERT (PfnLastColored->OriginalPte.u.Long == MM_EMPTY_LIST);
  377. PfnLastColored->OriginalPte.u.Long = Page;
  378. ColorHead->Blink = Pfn1;
  379. continue;
  380. }
  381. //
  382. // Page is below 4GB so reclaim it.
  383. //
  384. ASSERT (Pfn1->u3.e1.ReadInProgress == 0);
  385. MiUnlinkFreeOrZeroedPage (Pfn1);
  386. Pfn1->u3.e2.ReferenceCount = 1;
  387. Pfn1->u2.ShareCount = 1;
  388. MI_SET_PFN_DELETED(Pfn1);
  389. Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  390. Pfn1->u4.PteFrame = MI_MAGIC_4GB_RECLAIM;
  391. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  392. Pfn1->u3.e1.CacheAttribute = MiNotMapped;
  393. Pfn1->u3.e1.StartOfAllocation = 1;
  394. Pfn1->u3.e1.EndOfAllocation = 1;
  395. Pfn1->u4.VerifierAllocation = 0;
  396. Pfn1->u3.e1.LargeSessionAllocation = 0;
  397. ASSERT (Page < MiLowMemoryBitMap->SizeOfBitMap);
  398. ASSERT (RtlCheckBit (MiLowMemoryBitMap, Page) == 0);
  399. RtlSetBit (MiLowMemoryBitMap, (ULONG)Page);
  400. PageCount += 1;
  401. }
  402. }
  403. MmNumberOfPhysicalPages -= PageCount;
  404. UNLOCK_PFN (OldIrql);
  405. #if DBG
  406. DbgPrint ("Removed 0x%x pages from low memory for LOW MEMORY testing\n", PageCount);
  407. #endif
  408. ModuloRemoved = 0;
  409. if (RemovePhase == 1) {
  410. //
  411. // For every page below 4GB that could not be reclaimed, don't use the
  412. // high modulo-4GB equivalent page. The motivation is to prevent
  413. // code bugs that drop the high bits from destroying critical
  414. // system data in the unclaimed pages (like the GDT, IDT, kernel code
  415. // and data, etc).
  416. //
  417. BitMapHint = 0;
  418. PagesRemoved = 0;
  419. StartingRunIndex = 0;
  420. LengthOfClearRun = 0;
  421. #if DBG
  422. DbgPrint ("%x Unclaimable Pages below 4GB are:\n\n",
  423. MiLowMemoryBitMap->SizeOfBitMap - RtlNumberOfSetBits (MiLowMemoryBitMap));
  424. DbgPrint ("StartPage EndPage Length\n");
  425. #endif
  426. do {
  427. BitMapIndex = RtlFindSetBits (MiLowMemoryBitMap, 1, BitMapHint);
  428. if (BitMapIndex < BitMapHint) {
  429. break;
  430. }
  431. if (BitMapIndex == NO_BITS_FOUND) {
  432. break;
  433. }
  434. //
  435. // Print the page run that was clear as we didn't get those pages.
  436. //
  437. if (BitMapIndex != 0) {
  438. #if DBG
  439. DbgPrint ("%08lx %08lx %08lx\n",
  440. StartingRunIndex,
  441. BitMapIndex - 1,
  442. BitMapIndex - StartingRunIndex);
  443. #endif
  444. //
  445. // Also remove high modulo pages corresponding to the low ones
  446. // we couldn't get.
  447. //
  448. ModuloRemoved += MiRemoveModuloPages (StartingRunIndex,
  449. BitMapIndex);
  450. }
  451. //
  452. // Found at least one page to copy - try for a cluster.
  453. //
  454. LengthOfClearRun = RtlFindNextForwardRunClear (MiLowMemoryBitMap,
  455. BitMapIndex,
  456. &StartingRunIndex);
  457. if (LengthOfClearRun != 0) {
  458. LengthOfSetRun = StartingRunIndex - BitMapIndex;
  459. }
  460. else {
  461. LengthOfSetRun = MiLowMemoryBitMap->SizeOfBitMap - BitMapIndex;
  462. }
  463. PagesRemoved += LengthOfSetRun;
  464. //
  465. // Fill the page run with unique patterns.
  466. //
  467. MiFillRemovedPages (BitMapIndex, LengthOfSetRun);
  468. //
  469. // Clear the cache attribute bit in each page as MmMapIoSpace
  470. // will have set it, but no one else has cleared it.
  471. //
  472. Pfn1 = MI_PFN_ELEMENT(BitMapIndex);
  473. i = LengthOfSetRun;
  474. LOCK_PFN (OldIrql);
  475. do {
  476. Pfn1->u3.e1.CacheAttribute = MiNotMapped;
  477. Pfn1 += 1;
  478. i -= 1;
  479. } while (i != 0);
  480. UNLOCK_PFN (OldIrql);
  481. BitMapHint = BitMapIndex + LengthOfSetRun + LengthOfClearRun;
  482. } while (BitMapHint < MiLowMemoryBitMap->SizeOfBitMap);
  483. if (LengthOfClearRun != 0) {
  484. #if DBG
  485. DbgPrint ("%08lx %08lx %08lx\n",
  486. StartingRunIndex,
  487. StartingRunIndex + LengthOfClearRun - 1,
  488. LengthOfClearRun);
  489. #endif
  490. ModuloRemoved += MiRemoveModuloPages (StartingRunIndex,
  491. StartingRunIndex + LengthOfClearRun);
  492. }
  493. ASSERT (RtlNumberOfSetBits(MiLowMemoryBitMap) == PagesRemoved);
  494. }
  495. #if DBG
  496. if (ModuloRemoved != 0) {
  497. DbgPrint ("Total 0x%x Above-4GB Alias Pages also reclaimed\n\n",
  498. ModuloRemoved);
  499. }
  500. #endif
  501. }
  502. PVOID
  503. MiAllocateLowMemory (
  504. IN SIZE_T NumberOfBytes,
  505. IN PFN_NUMBER LowestAcceptablePfn,
  506. IN PFN_NUMBER HighestAcceptablePfn,
  507. IN PFN_NUMBER BoundaryPfn,
  508. IN PVOID CallingAddress,
  509. IN MEMORY_CACHING_TYPE CacheType,
  510. IN ULONG Tag
  511. )
  512. /*++
  513. Routine Description:
  514. This is a special routine for allocating contiguous physical memory below
  515. 4GB on a system that has been booted in test mode where all this memory
  516. has been made generally unavailable to all components. This lets us find
  517. problems with device drivers.
  518. Arguments:
  519. NumberOfBytes - Supplies the number of bytes to allocate.
  520. LowestAcceptablePfn - Supplies the lowest page frame number
  521. which is valid for the allocation.
  522. HighestAcceptablePfn - Supplies the highest page frame number
  523. which is valid for the allocation.
  524. BoundaryPfn - Supplies the page frame number multiple the allocation must
  525. not cross. 0 indicates it can cross any boundary.
  526. CallingAddress - Supplies the calling address of the allocator.
  527. CacheType - Supplies the type of cache mapping that will be used for the
  528. memory.
  529. Tag - Supplies the tag to tie to this allocation.
  530. Return Value:
  531. NULL - a contiguous range could not be found to satisfy the request.
  532. NON-NULL - Returns a pointer (virtual address in the system PTEs portion
  533. of the system) to the allocated physically contiguous
  534. memory.
  535. Environment:
  536. Kernel mode, IRQL of APC_LEVEL or below.
  537. --*/
  538. {
  539. PFN_NUMBER Page;
  540. PFN_NUMBER BoundaryMask;
  541. PVOID BaseAddress;
  542. KIRQL OldIrql;
  543. PMMPFN Pfn1;
  544. PMMPFN StartPfn;
  545. ULONG BitMapHint;
  546. PFN_NUMBER SizeInPages;
  547. PFN_NUMBER PageFrameIndex;
  548. PFN_NUMBER StartPage;
  549. PFN_NUMBER LastPage;
  550. PMMPTE PointerPte;
  551. PMMPTE DummyPte;
  552. PHYSICAL_ADDRESS PhysicalAddress;
  553. MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
  554. PAGED_CODE();
  555. UNREFERENCED_PARAMETER (Tag);
  556. UNREFERENCED_PARAMETER (CallingAddress);
  557. //
  558. // This cast is ok because the callers check the PFNs first.
  559. //
  560. ASSERT64 (LowestAcceptablePfn < _4gb);
  561. BitMapHint = (ULONG)LowestAcceptablePfn;
  562. SizeInPages = BYTES_TO_PAGES (NumberOfBytes);
  563. BoundaryMask = ~(BoundaryPfn - 1);
  564. CacheAttribute = MI_TRANSLATE_CACHETYPE (CacheType, 0);
  565. LOCK_PFN (OldIrql);
  566. do {
  567. Page = RtlFindSetBits (MiLowMemoryBitMap, (ULONG)SizeInPages, BitMapHint);
  568. if (Page == (ULONG)-1) {
  569. UNLOCK_PFN (OldIrql);
  570. return NULL;
  571. }
  572. if (BoundaryPfn == 0) {
  573. break;
  574. }
  575. //
  576. // If a noncachable mapping is requested, none of the pages in the
  577. // requested MDL can reside in a large page. Otherwise we would be
  578. // creating an incoherent overlapping TB entry as the same physical
  579. // page would be mapped by 2 different TB entries with different
  580. // cache attributes.
  581. //
  582. if (CacheAttribute != MiCached) {
  583. for (PageFrameIndex = Page; PageFrameIndex < Page + SizeInPages; PageFrameIndex += 1) {
  584. if (MI_PAGE_FRAME_INDEX_MUST_BE_CACHED (PageFrameIndex)) {
  585. MiNonCachedCollisions += 1;
  586. //
  587. // Keep it simple and just march one page at a time.
  588. //
  589. BitMapHint += 1;
  590. goto FindNext;
  591. }
  592. }
  593. }
  594. if (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask) == 0) {
  595. //
  596. // This portion of the range meets the alignment requirements.
  597. //
  598. break;
  599. }
  600. BitMapHint = (ULONG)((Page & BoundaryMask) + BoundaryPfn);
  601. FindNext:
  602. if ((BitMapHint >= MiLowMemoryBitMap->SizeOfBitMap) ||
  603. (BitMapHint + SizeInPages > HighestAcceptablePfn)) {
  604. UNLOCK_PFN (OldIrql);
  605. return NULL;
  606. }
  607. } while (TRUE);
  608. if (Page + SizeInPages > HighestAcceptablePfn) {
  609. UNLOCK_PFN (OldIrql);
  610. return NULL;
  611. }
  612. RtlClearBits (MiLowMemoryBitMap, (ULONG)Page, (ULONG)SizeInPages);
  613. //
  614. // No need to update ResidentAvailable or commit as these pages were
  615. // never added to either.
  616. //
  617. Pfn1 = MI_PFN_ELEMENT (Page);
  618. StartPfn = Pfn1;
  619. StartPage = Page;
  620. LastPage = Page + SizeInPages;
  621. DummyPte = MiGetPteAddress (MmNonPagedPoolExpansionStart);
  622. do {
  623. ASSERT (Pfn1->u3.e1.PageLocation == ActiveAndValid);
  624. ASSERT (Pfn1->u3.e1.CacheAttribute == MiNotMapped);
  625. ASSERT (Pfn1->u3.e2.ReferenceCount == 1);
  626. ASSERT (Pfn1->u2.ShareCount == 1);
  627. ASSERT (Pfn1->OriginalPte.u.Long == MM_DEMAND_ZERO_WRITE_PTE);
  628. ASSERT (Pfn1->u4.VerifierAllocation == 0);
  629. ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0);
  630. MiDetermineNode (Page, Pfn1);
  631. Pfn1->u3.e1.CacheAttribute = CacheAttribute;
  632. Pfn1->u3.e1.EndOfAllocation = 0;
  633. //
  634. // Initialize PteAddress so an MiIdentifyPfn scan
  635. // won't crash. The real value is put in after the loop.
  636. //
  637. Pfn1->PteAddress = DummyPte;
  638. Pfn1 += 1;
  639. Page += 1;
  640. } while (Page < LastPage);
  641. Pfn1 -= 1;
  642. Pfn1->u3.e1.EndOfAllocation = 1;
  643. StartPfn->u3.e1.StartOfAllocation = 1;
  644. UNLOCK_PFN (OldIrql);
  645. PhysicalAddress.QuadPart = StartPage;
  646. PhysicalAddress.QuadPart = PhysicalAddress.QuadPart << PAGE_SHIFT;
  647. BaseAddress = MmMapIoSpace (PhysicalAddress,
  648. SizeInPages << PAGE_SHIFT,
  649. CacheType);
  650. if (BaseAddress == NULL) {
  651. //
  652. // Release the actual pages.
  653. //
  654. LOCK_PFN (OldIrql);
  655. ASSERT (Pfn1->u3.e1.EndOfAllocation == 1);
  656. Pfn1->u3.e1.EndOfAllocation = 0;
  657. Pfn1->u3.e1.CacheAttribute = MiNotMapped;
  658. RtlSetBits (MiLowMemoryBitMap, (ULONG)StartPage, (ULONG)SizeInPages);
  659. UNLOCK_PFN (OldIrql);
  660. return NULL;
  661. }
  662. PointerPte = MiGetPteAddress (BaseAddress);
  663. do {
  664. StartPfn->PteAddress = PointerPte;
  665. StartPfn->u4.PteFrame = MI_GET_PAGE_FRAME_FROM_PTE (MiGetPteAddress(PointerPte));
  666. StartPfn += 1;
  667. PointerPte += 1;
  668. } while (StartPfn <= Pfn1);
  669. #if 0
  670. MiInsertContiguousTag (BaseAddress,
  671. SizeInPages << PAGE_SHIFT,
  672. CallingAddress);
  673. #endif
  674. return BaseAddress;
  675. }
  676. LOGICAL
  677. MiFreeLowMemory (
  678. IN PVOID BaseAddress,
  679. IN ULONG Tag
  680. )
  681. /*++
  682. Routine Description:
  683. This is a special routine which returns allocated contiguous physical
  684. memory below 4GB on a system that has been booted in test mode where
  685. all this memory has been made generally unavailable to all components.
  686. This lets us find problems with device drivers.
  687. Arguments:
  688. BaseAddress - Supplies the base virtual address where the physical
  689. address was previously mapped.
  690. Tag - Supplies the tag for this address.
  691. Return Value:
  692. TRUE if the allocation was freed by this routine, FALSE if not.
  693. Environment:
  694. Kernel mode, IRQL of APC_LEVEL or below.
  695. --*/
  696. {
  697. PFN_NUMBER Page;
  698. PFN_NUMBER StartPage;
  699. KIRQL OldIrql;
  700. KIRQL OldIrqlHyper;
  701. PMMPFN Pfn1;
  702. PMMPFN Pfn2;
  703. PFN_NUMBER SizeInPages;
  704. PMMPTE PointerPte;
  705. PMMPTE StartPte;
  706. PULONG TempVa;
  707. PEPROCESS Process;
  708. PAGED_CODE();
  709. UNREFERENCED_PARAMETER (Tag);
  710. //
  711. // If the address is superpage mapped then it must be a regular pool
  712. // address.
  713. //
  714. if (MI_IS_PHYSICAL_ADDRESS(BaseAddress)) {
  715. return FALSE;
  716. }
  717. Process = PsGetCurrentProcess ();
  718. PointerPte = MiGetPteAddress (BaseAddress);
  719. StartPte = PointerPte;
  720. ASSERT (PointerPte->u.Hard.Valid == 1);
  721. Page = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  722. //
  723. // Only free allocations here that really were obtained from the low pool.
  724. //
  725. if (Page >= MiNoLowMemory) {
  726. return FALSE;
  727. }
  728. StartPage = Page;
  729. Pfn1 = MI_PFN_ELEMENT (Page);
  730. ASSERT (Pfn1->u3.e1.StartOfAllocation == 1);
  731. //
  732. // The PFNs can be walked without the PFN lock as no one can be changing
  733. // the allocation bits while this allocation is being freed.
  734. //
  735. Pfn2 = Pfn1;
  736. while (Pfn2->u3.e1.EndOfAllocation == 0) {
  737. Pfn2 += 1;
  738. }
  739. SizeInPages = Pfn2 - Pfn1 + 1;
  740. MmUnmapIoSpace (BaseAddress, SizeInPages << PAGE_SHIFT);
  741. LOCK_PFN (OldIrql);
  742. Pfn1->u3.e1.StartOfAllocation = 0;
  743. do {
  744. ASSERT (Pfn1->u3.e1.PageLocation == ActiveAndValid);
  745. ASSERT (Pfn1->u2.ShareCount == 1);
  746. ASSERT (Pfn1->OriginalPte.u.Long == MM_DEMAND_ZERO_WRITE_PTE);
  747. ASSERT (Pfn1->u4.VerifierAllocation == 0);
  748. ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0);
  749. while (Pfn1->u3.e2.ReferenceCount != 1) {
  750. //
  751. // A driver is still transferring data even though the caller
  752. // is freeing the memory. Wait a bit before filling this page.
  753. //
  754. UNLOCK_PFN (OldIrql);
  755. //
  756. // Drain the deferred lists as these pages may be
  757. // sitting in there right now.
  758. //
  759. MiDeferredUnlockPages (0);
  760. KeDelayExecutionThread (KernelMode, FALSE, (PLARGE_INTEGER)&MmShortTime);
  761. LOCK_PFN (OldIrql);
  762. ASSERT (Pfn1->u3.e1.StartOfAllocation == 0);
  763. continue;
  764. }
  765. Pfn1->u4.PteFrame = MI_MAGIC_4GB_RECLAIM;
  766. Pfn1->u3.e1.CacheAttribute = MiNotMapped;
  767. //
  768. // Fill the actual page with a recognizable data
  769. // pattern. No one else should write to these
  770. // pages unless they are allocated for
  771. // a contiguous memory request.
  772. //
  773. TempVa = (PULONG)MiMapPageInHyperSpace (Process, Page, &OldIrqlHyper);
  774. RtlFillMemoryUlong (TempVa, PAGE_SIZE, (ULONG)Page | MI_LOWMEM_MAGIC_BIT);
  775. MiUnmapPageInHyperSpace (Process, TempVa, OldIrqlHyper);
  776. if (Pfn1 == Pfn2) {
  777. break;
  778. }
  779. Pfn1 += 1;
  780. Page += 1;
  781. } while (TRUE);
  782. Pfn1->u3.e1.EndOfAllocation = 0;
  783. //
  784. // Note the clearing of the bitmap range cannot be done until all the
  785. // PFNs above are finished.
  786. //
  787. ASSERT (RtlAreBitsClear (MiLowMemoryBitMap, (ULONG)StartPage, (ULONG)SizeInPages) == TRUE);
  788. RtlSetBits (MiLowMemoryBitMap, (ULONG)StartPage, (ULONG)SizeInPages);
  789. //
  790. // No need to update ResidentAvailable or commit as these pages were
  791. // never added to either.
  792. //
  793. UNLOCK_PFN (OldIrql);
  794. return TRUE;
  795. }
  796. #endif