Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

627 lines
17 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Copyright (c) 1992 Digital Equipment Corporation
  4. Module Name:
  5. physsect.c
  6. Abstract:
  7. This module contains the routine for mapping physical sections for
  8. ALPHA machines.
  9. Author:
  10. Lou Perazzoli (loup) 22-May-1989
  11. Joe Notarangelo 21-Sep-1992
  12. Revision History:
  13. Landy Wang (landyw) 08-April-1998 : Modifications for 3-level 64-bit NT.
  14. --*/
  15. #include "mi.h"
  16. //#define FIRSTDBG 1
  17. //#define AGGREGATE_DBG FIRSTDBG
  18. static
  19. ULONG
  20. MaximumAlignment( ULONG );
  21. static
  22. ULONG
  23. AggregatePages( PMMPTE, ULONG, ULONG, PULONG );
  24. NTSTATUS
  25. MiMapViewOfPhysicalSection (
  26. IN PCONTROL_AREA ControlArea,
  27. IN PEPROCESS Process,
  28. IN PVOID *CapturedBase,
  29. IN PLARGE_INTEGER SectionOffset,
  30. IN PSIZE_T CapturedViewSize,
  31. IN ULONG ProtectionMask,
  32. IN ULONG_PTR ZeroBits,
  33. IN ULONG AllocationType,
  34. IN BOOLEAN WriteCombined,
  35. OUT PBOOLEAN ReleasedWsMutex
  36. )
  37. /*++
  38. Routine Description:
  39. This routine maps the specified physical section into the
  40. specified process's address space.
  41. Arguments:
  42. see MmMapViewOfSection above...
  43. ControlArea - Supplies the control area for the section.
  44. Process - Supplies the process pointer which is receiving the section.
  45. ProtectionMask - Supplies the initial page protection-mask.
  46. ReleasedWsMutex - Supplies FALSE, receives TRUE if the working set
  47. mutex is released.
  48. Return Value:
  49. Status of the map view operation.
  50. Environment:
  51. Kernel Mode, working set mutex and address creation mutex held.
  52. --*/
  53. {
  54. PMMVAD Vad;
  55. PVOID StartingAddress;
  56. PVOID EndingAddress;
  57. KIRQL OldIrql;
  58. PMMPTE PointerPpe;
  59. PMMPTE PointerPde;
  60. PMMPTE PointerPte;
  61. PMMPTE LastPte;
  62. MMPTE TempPte;
  63. PMMPFN Pfn2;
  64. ULONG PhysicalViewSize;
  65. ULONG Alignment;
  66. ULONG PagesToMap;
  67. ULONG NextPfn;
  68. PVOID UsedPageTableHandle;
  69. PVOID UsedPageDirectoryHandle;
  70. PMI_PHYSICAL_VIEW PhysicalView;
  71. //
  72. // Physical memory section.
  73. //
  74. #ifdef FIRSTDBG
  75. DbgPrint( "MM: Physsect CaptureBase = %x SectionOffset = %x\n",
  76. CapturedBase, SectionOffset->LowPart );
  77. DbgPrint( "MM: Physsect Allocation Type = %x, MEM_LARGE_PAGES = %x\n",
  78. AllocationType, MEM_LARGE_PAGES );
  79. #endif //FIRSTDBG
  80. //
  81. // Compute the alignment we require for the virtual mapping.
  82. // The default is 64K to match protection boundaries.
  83. // Larger page sizes are used if MEM_LARGE_PAGES is requested.
  84. // The Alpha AXP architecture supports granularity hints so that
  85. // larger pages can be defined in the following multiples of
  86. // PAGE_SIZE:
  87. // 8**(GH) * PAGE_SIZE, where GH element of {0,1,2,3}
  88. //
  89. Alignment = X64K;
  90. if( AllocationType & MEM_LARGE_PAGES ){
  91. //
  92. // MaxAlignment is the maximum boundary alignment of the
  93. // SectionOffset (where the maximum boundary is one of the possible
  94. // granularity hints boundaries)
  95. //
  96. ULONG MaxAlignment = MaximumAlignment( SectionOffset->LowPart );
  97. Alignment = (MaxAlignment > Alignment) ? MaxAlignment : Alignment;
  98. #ifdef FIRSTDBG
  99. DbgPrint( "MM: Alignment = %x, SectionOffset = %x\n",
  100. Alignment, SectionOffset->LowPart );
  101. #endif //FIRSTDBG
  102. }
  103. LOCK_WS_UNSAFE (Process);
  104. if (*CapturedBase == NULL) {
  105. //
  106. // Attempt to locate address space. This could raise an
  107. // exception.
  108. //
  109. try {
  110. //
  111. // Find a starting address on an alignment boundary.
  112. //
  113. PhysicalViewSize = (SectionOffset->LowPart + *CapturedViewSize) -
  114. (ULONG)MI_64K_ALIGN(SectionOffset->LowPart);
  115. StartingAddress = MiFindEmptyAddressRange (PhysicalViewSize,
  116. Alignment,
  117. (ULONG)ZeroBits);
  118. } except (EXCEPTION_EXECUTE_HANDLER) {
  119. return GetExceptionCode();
  120. }
  121. EndingAddress = (PVOID)(((ULONG)StartingAddress +
  122. PhysicalViewSize - 1L) | (PAGE_SIZE - 1L));
  123. StartingAddress = (PVOID)((ULONG)StartingAddress +
  124. (SectionOffset->LowPart & (X64K - 1)));
  125. if (ZeroBits > 0) {
  126. if (EndingAddress > (PVOID)((ULONG)0xFFFFFFFF >> ZeroBits)) {
  127. return STATUS_NO_MEMORY;
  128. }
  129. }
  130. } else {
  131. //
  132. // Check to make sure the specified base address to ending address
  133. // is currently unused.
  134. //
  135. PhysicalViewSize = (SectionOffset->LowPart + *CapturedViewSize) -
  136. (ULONG)MI_64K_ALIGN(SectionOffset->LowPart);
  137. StartingAddress = (PVOID)((ULONG)MI_64K_ALIGN(*CapturedBase) +
  138. (SectionOffset->LowPart & (X64K - 1)));
  139. EndingAddress = (PVOID)(((ULONG)StartingAddress +
  140. *CapturedViewSize - 1L) | (PAGE_SIZE - 1L));
  141. Vad = MiCheckForConflictingVad (StartingAddress, EndingAddress);
  142. if (Vad != (PMMVAD)NULL) {
  143. #if 0
  144. MiDumpConflictingVad (StartingAddress, EndingAddress, Vad);
  145. #endif
  146. return STATUS_CONFLICTING_ADDRESSES;
  147. }
  148. }
  149. //
  150. // An unoccuppied address range has been found, build the virtual
  151. // address descriptor to describe this range.
  152. //
  153. //
  154. // Establish an exception handler and attempt to allocate
  155. // the pool and charge quota. Note that the InsertVad routine
  156. // will also charge quota which could raise an exception.
  157. //
  158. try {
  159. PhysicalView = (PMI_PHYSICAL_VIEW)ExAllocatePoolWithTag (NonPagedPool,
  160. sizeof(MI_PHYSICAL_VIEW),
  161. MI_PHYSICAL_VIEW_KEY);
  162. if (PhysicalView == NULL) {
  163. ExRaiseStatus (STATUS_INSUFFICIENT_RESOURCES);
  164. }
  165. Vad = (PMMVAD)ExAllocatePoolWithTag (NonPagedPool, sizeof(MMVAD), ' daV');
  166. if (Vad == NULL) {
  167. ExRaiseStatus (STATUS_INSUFFICIENT_RESOURCES);
  168. }
  169. PhysicalView->Vad = Vad;
  170. PhysicalView->StartVa = StartingAddress;
  171. PhysicalView->EndVa = EndingAddress;
  172. Vad->StartingVpn = MI_VA_TO_VPN (StartingAddress);
  173. Vad->EndingVpn = MI_VA_TO_VPN (EndingAddress);
  174. Vad->ControlArea = ControlArea;
  175. Vad->u.LongFlags = 0;
  176. Vad->u2.VadFlags2.Inherit = ViewUnmap;
  177. Vad->u.VadFlags.PhysicalMapping = 1;
  178. Vad->u4.Banked = NULL;
  179. // Vad->u.VadFlags.ImageMap = 0;
  180. Vad->u.VadFlags.Protection = ProtectionMask;
  181. Vad->u2.VadFlags2.CopyOnWrite = 0;
  182. // Vad->u.VadFlags.LargePages = 0;
  183. Vad->FirstPrototypePte =
  184. (PMMPTE)(MI_CONVERT_PHYSICAL_BUS_TO_PFN(*SectionOffset));
  185. //
  186. // Set the first prototype PTE field in the Vad.
  187. //
  188. Vad->LastContiguousPte =
  189. (PMMPTE)(MI_CONVERT_PHYSICAL_BUS_TO_PFN(*SectionOffset));
  190. //
  191. // Insert the VAD. This could get an exception.
  192. //
  193. MiInsertVad (Vad);
  194. } except (EXCEPTION_EXECUTE_HANDLER) {
  195. if (PhysicalView != NULL) {
  196. ExFreePool (PhysicalView);
  197. }
  198. if (Vad != (PMMVAD)NULL) {
  199. //
  200. // The pool allocation suceeded, but the quota charge
  201. // in InsertVad failed, deallocate the pool and return
  202. // an error.
  203. //
  204. ExFreePool (Vad);
  205. return GetExceptionCode();
  206. }
  207. return STATUS_INSUFFICIENT_RESOURCES;
  208. }
  209. // Increment the count of the number of views for the
  210. // section object. This requires the PFN mutex to be held.
  211. //
  212. LOCK_AWE (Process, OldIrql);
  213. LOCK_PFN_AT_DPC ();
  214. if (PhysicalView->Vad->u.VadFlags.PhysicalMapping == 1) {
  215. Process->HasPhysicalVad = 1;
  216. }
  217. InsertHeadList (&Process->PhysicalVadList, &PhysicalView->ListEntry);
  218. ControlArea->NumberOfMappedViews += 1;
  219. ControlArea->NumberOfUserReferences += 1;
  220. ASSERT (ControlArea->NumberOfSectionReferences != 0);
  221. UNLOCK_PFN_FROM_DPC ();
  222. UNLOCK_AWE (Process, OldIrql);
  223. //
  224. // Build the PTEs in the address space.
  225. //
  226. PointerPpe = MiGetPpeAddress (StartingAddress);
  227. PointerPde = MiGetPdeAddress (StartingAddress);
  228. PointerPte = MiGetPteAddress (StartingAddress);
  229. LastPte = MiGetPteAddress (EndingAddress);
  230. #if defined (_WIN64)
  231. MiMakePpeExistAndMakeValid (PointerPpe, Process, FALSE);
  232. if (PointerPde->u.Long == 0) {
  233. UsedPageDirectoryHandle = MI_GET_USED_PTES_HANDLE (PointerPte);
  234. ASSERT (MI_GET_USED_PTES_FROM_HANDLE (UsedPageDirectoryHandle) == 0);
  235. MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageDirectoryHandle);
  236. }
  237. #endif
  238. MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE);
  239. Pfn2 = MI_PFN_ELEMENT(PointerPde->u.Hard.PageFrameNumber);
  240. PagesToMap = ( ((ULONG)EndingAddress - (ULONG)StartingAddress)
  241. + (PAGE_SIZE-1) ) >> PAGE_SHIFT;
  242. NextPfn = MI_CONVERT_PHYSICAL_BUS_TO_PFN(*SectionOffset);
  243. #ifdef FIRSTDBG
  244. DbgPrint( "MM: Physsect, PagesToMap = %x NextPfn = %x\n",
  245. PagesToMap, NextPfn );
  246. #endif //FIRSTDBG
  247. MI_MAKE_VALID_PTE (TempPte,
  248. NextPfn,
  249. ProtectionMask,
  250. PointerPte);
  251. if (WriteCombined == TRUE) {
  252. MI_SET_PTE_WRITE_COMBINE (TempPte);
  253. }
  254. if (TempPte.u.Hard.Write) {
  255. TempPte.u.Hard.Dirty = 1;
  256. }
  257. while (PointerPte <= LastPte) {
  258. ULONG PagesTogether;
  259. ULONG GranularityHint;
  260. //
  261. // Compute the number of pages that can be mapped together
  262. //
  263. if (AllocationType & MEM_LARGE_PAGES) {
  264. PagesTogether = AggregatePages (PointerPte,
  265. NextPfn,
  266. PagesToMap,
  267. &GranularityHint);
  268. } else {
  269. PagesTogether = 1;
  270. GranularityHint = 0;
  271. }
  272. #ifdef FIRSTDBG
  273. DbgPrint( "MM: Physsect PointerPte = %x, NextPfn = %x\n",
  274. PointerPte, NextPfn );
  275. DbgPrint( "MM: Va = %x TempPte.Pfn = %x\n",
  276. MiGetVirtualAddressMappedByPte( PointerPte ),
  277. TempPte.u.Hard.PageFrameNumber );
  278. DbgPrint( "MM: PagesToMap = %x\n", PagesToMap );
  279. DbgPrint( "MM: PagesTogether = %x, GH = %x\n",
  280. PagesTogether, GranularityHint );
  281. #endif //FIRSTDBG
  282. TempPte.u.Hard.GranularityHint = GranularityHint;
  283. NextPfn += PagesTogether;
  284. PagesToMap -= PagesTogether;
  285. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (MiGetVirtualAddressMappedByPte (PointerPte));
  286. while (PagesTogether--) {
  287. if (MiIsPteOnPdeBoundary (PointerPte)) {
  288. PointerPde = MiGetPteAddress (PointerPte);
  289. if (MiIsPteOnPpeBoundary (PointerPte)) {
  290. PointerPpe = MiGetPteAddress (PointerPde);
  291. MiMakePpeExistAndMakeValid (PointerPpe, Process, FALSE);
  292. if (PointerPde->u.Long == 0) {
  293. UsedPageDirectoryHandle = MI_GET_USED_PTES_HANDLE (PointerPte);
  294. ASSERT (MI_GET_USED_PTES_FROM_HANDLE (UsedPageDirectoryHandle) == 0);
  295. MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageDirectoryHandle);
  296. }
  297. }
  298. MiMakePdeExistAndMakeValid (PointerPde, Process, FALSE);
  299. Pfn2 = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber);
  300. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (MiGetVirtualAddressMappedByPte (PointerPte));
  301. }
  302. ASSERT( PointerPte->u.Long == 0 );
  303. *PointerPte = TempPte;
  304. #if PFN_CONSISTENCY
  305. LOCK_PFN (OldIrql);
  306. #endif
  307. Pfn2->u2.ShareCount += 1;
  308. #if PFN_CONSISTENCY
  309. UNLOCK_PFN (OldIrql);
  310. #endif
  311. //
  312. // Increment the count of non-zero page table entries for this
  313. // page table and the number of private pages for the process.
  314. //
  315. MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  316. PointerPte += 1;
  317. TempPte.u.Hard.PageFrameNumber += 1;
  318. } // while (PagesTogether-- )
  319. } // while (PointerPte <= LastPte)
  320. UNLOCK_WS_UNSAFE (Process);
  321. *ReleasedWsMutex = TRUE;
  322. //
  323. // Update the current virtual size in the process header.
  324. //
  325. *CapturedViewSize = (ULONG)EndingAddress - (ULONG)StartingAddress + 1L;
  326. Process->VirtualSize += *CapturedViewSize;
  327. if (Process->VirtualSize > Process->PeakVirtualSize) {
  328. Process->PeakVirtualSize = Process->VirtualSize;
  329. }
  330. //
  331. // Translate the virtual address to a quasi-virtual address for
  332. // use by drivers that touch mapped devices. Note: the routine
  333. // HalCreateQva will not translate the StartingAddress if the
  334. // StartingAddress is within system memory address space.
  335. //
  336. // N.B. - It will not work to attempt map addresses that begin in
  337. // system memory and extend through i/o space.
  338. //
  339. *CapturedBase = HalCreateQva( *SectionOffset, StartingAddress );
  340. return STATUS_SUCCESS;
  341. }
  342. ULONG
  343. MaximumAlignment(
  344. IN ULONG Offset
  345. )
  346. /*++
  347. Routine Description:
  348. This routine returns the maximum granularity hint alignment boundary
  349. to which Offset is naturally aligned.
  350. Arguments:
  351. Offset - Supplies the address offset to check for alignment.
  352. Return Value:
  353. The number which represents the largest natural alignment of Offset.
  354. Environment:
  355. --*/
  356. {
  357. if( (Offset & (GH3_PAGE_SIZE - 1)) == 0 ){
  358. return GH3_PAGE_SIZE;
  359. }
  360. if( (Offset & (GH2_PAGE_SIZE - 1)) == 0 ){
  361. return GH2_PAGE_SIZE;
  362. }
  363. if( (Offset & (GH1_PAGE_SIZE - 1)) == 0 ){
  364. return GH1_PAGE_SIZE;
  365. }
  366. if( (Offset & (PAGE_SIZE - 1)) == 0 ){
  367. return PAGE_SIZE;
  368. }
  369. return 0;
  370. }
  371. ULONG
  372. AggregatePages(
  373. IN PMMPTE PointerPte,
  374. IN ULONG Pfn,
  375. IN ULONG Pages,
  376. OUT PULONG GranularityHint
  377. )
  378. /*++
  379. Routine Description:
  380. This routine computes the number of standard size pages that can be
  381. aggregated into a single large page and returns the granularity hint
  382. for that size large page.
  383. Arguments:
  384. PointerPte - Supplies the PTE pointer for the starting virtual address
  385. of the mapping.
  386. Pfn - Supplies the starting page frame number of the memory to be
  387. mapped.
  388. Pages - Supplies the number of pages to map.
  389. GranularityHint - Receives the granularity hint for the large page used
  390. to aggregate the standard pages.
  391. Return Value:
  392. The number of pages that can be aggregated together.
  393. Environment:
  394. --*/
  395. {
  396. ULONG MaxVirtualAlignment;
  397. ULONG MaxPhysicalAlignment;
  398. ULONG MaxPageAlignment;
  399. ULONG MaxAlignment;
  400. //
  401. // Determine the largest page that will map a maximum of Pages.
  402. // The largest page must be both virtually and physically aligned
  403. // to the large page size boundary.
  404. // Determine the largest common alignment for the virtual and
  405. // physical addresses, factor in Pages, and then match to the
  406. // largest page size possible via the granularity hints.
  407. //
  408. MaxVirtualAlignment = MaximumAlignment((ULONG)
  409. MiGetVirtualAddressMappedByPte( PointerPte ) );
  410. MaxPhysicalAlignment = MaximumAlignment( (ULONG)(Pfn << PAGE_SHIFT) );
  411. MaxPageAlignment = (ULONG)(Pages << PAGE_SHIFT);
  412. #ifdef AGGREGATE_DBG
  413. DbgPrint( "MM: Aggregate MaxVirtualAlign = %x\n", MaxVirtualAlignment );
  414. DbgPrint( "MM: Aggregate MaxPhysicalAlign = %x\n", MaxPhysicalAlignment );
  415. DbgPrint( "MM: Aggregate MaxPageAlign = %x\n", MaxPageAlignment );
  416. #endif //AGGREGATE_DBG
  417. //
  418. // Maximum alignment is the minimum of the virtual and physical alignments.
  419. //
  420. MaxAlignment = (MaxVirtualAlignment > MaxPhysicalAlignment) ?
  421. MaxPhysicalAlignment : MaxVirtualAlignment;
  422. MaxAlignment = (MaxAlignment > MaxPageAlignment) ?
  423. MaxPageAlignment : MaxAlignment;
  424. //
  425. // Convert MaxAlignment to granularity hint value
  426. //
  427. if( (MaxAlignment & (GH3_PAGE_SIZE - 1)) == 0 ){
  428. *GranularityHint = GH3;
  429. } else if( (MaxAlignment & (GH2_PAGE_SIZE - 1)) == 0 ){
  430. *GranularityHint = GH2;
  431. } else if( (MaxAlignment & (GH1_PAGE_SIZE - 1)) == 0 ){
  432. *GranularityHint = GH1;
  433. } else if( (MaxAlignment & (PAGE_SIZE - 1)) == 0 ){
  434. *GranularityHint = GH0;
  435. } else {
  436. *GranularityHint = GH0;
  437. #if DBG
  438. DbgPrint( "MM: Aggregate Physical pages - not page aligned\n" );
  439. #endif //DBG
  440. } // end, if then elseif
  441. //
  442. // Return number of pages aggregated.
  443. //
  444. return( MaxAlignment >> PAGE_SHIFT );
  445. }