Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

634 lines
17 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Copyright (c) 1992 Digital Equipment Corporation
  4. Module Name:
  5. physsect.c
  6. Abstract:
  7. This module contains the routine for mapping physical sections for
  8. ALPHA machines.
  9. Author:
  10. Lou Perazzoli (loup) 22-May-1989
  11. Joe Notarangelo 21-Sep-1992
  12. Revision History:
  13. --*/
  14. #include "mi.h"
  15. //#define FIRSTDBG 1
  16. //#define AGGREGATE_DBG FIRSTDBG
  17. static
  18. ULONG
  19. MaximumAlignment(
  20. ULONG Offset
  21. );
  22. static
  23. ULONG
  24. AggregatePages(
  25. PMMPTE,
  26. PFN_NUMBER,
  27. ULONG,
  28. PULONG
  29. );
  30. NTSTATUS
  31. MiMapViewOfPhysicalSection (
  32. IN PCONTROL_AREA ControlArea,
  33. IN PEPROCESS Process,
  34. IN PVOID *CapturedBase,
  35. IN PLARGE_INTEGER SectionOffset,
  36. IN PSIZE_T CapturedViewSize,
  37. IN ULONG ProtectionMask,
  38. IN ULONG_PTR ZeroBits,
  39. IN ULONG AllocationType,
  40. IN BOOLEAN WriteCombined,
  41. OUT PBOOLEAN ReleasedWsMutex
  42. )
  43. /*++
  44. Routine Description:
  45. This routine maps the specified physical section into the
  46. specified process's address space.
  47. Arguments:
  48. see MmMapViewOfSection above...
  49. ControlArea - Supplies the control area for the section.
  50. Process - Supplies the process pointer which is receiving the section.
  51. ProtectionMask - Supplies the initial page protection-mask.
  52. ReleasedWsMutex - Supplies FALSE, receives TRUE if the working set
  53. mutex is released.
  54. Return Value:
  55. Status of the map view operation.
  56. Environment:
  57. Kernel Mode, working set mutex and address creation mutex held.
  58. --*/
  59. {
  60. PMMVAD Vad;
  61. PVOID StartingAddress;
  62. PVOID EndingAddress;
  63. KIRQL OldIrql;
  64. PMMPTE PointerPpe;
  65. PMMPTE PointerPde;
  66. PMMPTE PointerPte;
  67. PMMPTE LastPte;
  68. MMPTE TempPte;
  69. PMMPFN Pfn2;
  70. SIZE_T PhysicalViewSize;
  71. ULONG Alignment;
  72. ULONG PagesToMap;
  73. PFN_NUMBER NextPfn;
  74. PVOID UsedPageTableHandle;
  75. PVOID UsedPageDirectoryHandle;
  76. PMI_PHYSICAL_VIEW PhysicalView;
  77. //
  78. // Physical memory section.
  79. //
  80. #ifdef FIRSTDBG
  81. DbgPrint( "MM: Physsect CaptureBase = %x SectionOffset = %x\n",
  82. CapturedBase, SectionOffset->LowPart );
  83. DbgPrint( "MM: Physsect Allocation Type = %x, MEM_LARGE_PAGES = %x\n",
  84. AllocationType, MEM_LARGE_PAGES );
  85. #endif //FIRSTDBG
  86. //
  87. // Compute the alignment we require for the virtual mapping.
  88. // The default is 64K to match protection boundaries.
  89. // Larger page sizes are used if MEM_LARGE_PAGES is requested.
  90. // The Alpha AXP architecture supports granularity hints so that
  91. // larger pages can be defined in the following multiples of
  92. // PAGE_SIZE:
  93. // 8**(GH) * PAGE_SIZE, where GH element of {0,1,2,3}
  94. //
  95. Alignment = X64K;
  96. if( AllocationType & MEM_LARGE_PAGES ){
  97. //
  98. // MaxAlignment is the maximum boundary alignment of the
  99. // SectionOffset (where the maximum boundary is one of the possible
  100. // granularity hints boundaries)
  101. //
  102. ULONG MaxAlignment = MaximumAlignment( SectionOffset->LowPart );
  103. Alignment = (MaxAlignment > Alignment) ? MaxAlignment : Alignment;
  104. #ifdef FIRSTDBG
  105. DbgPrint( "MM: Alignment = %x, SectionOffset = %x\n",
  106. Alignment, SectionOffset->LowPart );
  107. #endif //FIRSTDBG
  108. }
  109. LOCK_WS (Process);
  110. if (*CapturedBase == NULL) {
  111. //
  112. // Attempt to locate address space. This could raise an
  113. // exception.
  114. //
  115. try {
  116. //
  117. // Find a starting address on an alignment boundary.
  118. //
  119. PhysicalViewSize = (SectionOffset->LowPart + *CapturedViewSize) -
  120. (ULONG_PTR)MI_64K_ALIGN(SectionOffset->LowPart);
  121. StartingAddress = MiFindEmptyAddressRange (PhysicalViewSize,
  122. Alignment,
  123. (ULONG)ZeroBits);
  124. } except (EXCEPTION_EXECUTE_HANDLER) {
  125. return GetExceptionCode();
  126. }
  127. EndingAddress = (PVOID)(((ULONG_PTR)StartingAddress +
  128. PhysicalViewSize - 1L) | (PAGE_SIZE - 1L));
  129. StartingAddress = (PVOID)((ULONG_PTR)StartingAddress +
  130. (SectionOffset->LowPart & (X64K - 1)));
  131. if (ZeroBits > 0) {
  132. if (EndingAddress > (PVOID)((LONG_PTR)0xFFFFFFFF >> ZeroBits)) {
  133. return STATUS_NO_MEMORY;
  134. }
  135. }
  136. } else {
  137. //
  138. // Check to make sure the specified base address to ending address
  139. // is currently unused.
  140. //
  141. PhysicalViewSize = (SectionOffset->LowPart + *CapturedViewSize) -
  142. (ULONG_PTR)MI_64K_ALIGN(SectionOffset->LowPart);
  143. StartingAddress = (PVOID)((ULONG_PTR)MI_64K_ALIGN(*CapturedBase) +
  144. (SectionOffset->LowPart & (X64K - 1)));
  145. EndingAddress = (PVOID)(((ULONG_PTR)StartingAddress +
  146. *CapturedViewSize - 1L) | (PAGE_SIZE - 1L));
  147. Vad = MiCheckForConflictingVad (StartingAddress, EndingAddress);
  148. if (Vad != (PMMVAD)NULL) {
  149. #if 0
  150. MiDumpConflictingVad (StartingAddress, EndingAddress, Vad);
  151. #endif
  152. return STATUS_CONFLICTING_ADDRESSES;
  153. }
  154. }
  155. //
  156. // An unoccuppied address range has been found, build the virtual
  157. // address descriptor to describe this range.
  158. //
  159. //
  160. // Establish an exception handler and attempt to allocate
  161. // the pool and charge quota. Note that the InsertVad routine
  162. // will also charge quota which could raise an exception.
  163. //
  164. try {
  165. PhysicalView = (PMI_PHYSICAL_VIEW)ExAllocatePoolWithTag (NonPagedPool,
  166. sizeof(MI_PHYSICAL_VIEW),
  167. MI_PHYSICAL_VIEW_KEY);
  168. if (PhysicalView == NULL) {
  169. ExRaiseStatus (STATUS_INSUFFICIENT_RESOURCES);
  170. }
  171. Vad = (PMMVAD)ExAllocatePoolWithTag (NonPagedPool, sizeof(MMVAD), ' daV');
  172. if (Vad == NULL) {
  173. ExRaiseStatus (STATUS_INSUFFICIENT_RESOURCES);
  174. }
  175. PhysicalView->Vad = Vad;
  176. PhysicalView->StartVa = StartingAddress;
  177. PhysicalView->EndVa = EndingAddress;
  178. Vad->StartingVpn = MI_VA_TO_VPN (StartingAddress);
  179. Vad->EndingVpn = MI_VA_TO_VPN (EndingAddress);
  180. Vad->ControlArea = ControlArea;
  181. Vad->u.LongFlags = 0;
  182. Vad->u2.VadFlags2.Inherit = ViewUnmap;
  183. Vad->u.VadFlags.PhysicalMapping = 1;
  184. Vad->u4.Banked = NULL;
  185. // Vad->u.VadFlags.ImageMap = 0;
  186. Vad->u.VadFlags.Protection = ProtectionMask;
  187. Vad->u2.VadFlags2.CopyOnWrite = 0;
  188. // Vad->u.VadFlags.LargePages = 0;
  189. Vad->FirstPrototypePte =
  190. (PMMPTE)(MI_CONVERT_PHYSICAL_BUS_TO_PFN(*SectionOffset));
  191. //
  192. // Set the first prototype PTE field in the Vad.
  193. //
  194. Vad->LastContiguousPte =
  195. (PMMPTE)(MI_CONVERT_PHYSICAL_BUS_TO_PFN(*SectionOffset));
  196. //
  197. // Insert the VAD. This could get an exception.
  198. //
  199. MiInsertVad (Vad);
  200. } except (EXCEPTION_EXECUTE_HANDLER) {
  201. if (PhysicalView != NULL) {
  202. ExFreePool (PhysicalView);
  203. }
  204. if (Vad != (PMMVAD)NULL) {
  205. //
  206. // The pool allocation suceeded, but the quota charge
  207. // in InsertVad failed, deallocate the pool and return
  208. // and error.
  209. //
  210. ExFreePool (Vad);
  211. return GetExceptionCode();
  212. }
  213. return STATUS_INSUFFICIENT_RESOURCES;
  214. }
  215. // Increment the count of the number of views for the
  216. // section object. This requires the PFN mutex to be held.
  217. //
  218. LOCK_AWE (Process, OldIrql);
  219. LOCK_PFN_AT_DPC ();
  220. if (PhysicalView->Vad->u.VadFlags.PhysicalMapping == 1) {
  221. Process->HasPhysicalVad = 1;
  222. }
  223. InsertHeadList (&Process->PhysicalVadList, &PhysicalView->ListEntry);
  224. ControlArea->NumberOfMappedViews += 1;
  225. ControlArea->NumberOfUserReferences += 1;
  226. ASSERT (ControlArea->NumberOfSectionReferences != 0);
  227. UNLOCK_PFN_FROM_DPC ();
  228. UNLOCK_AWE (Process, OldIrql);
  229. //
  230. // Build the PTEs in the address space.
  231. //
  232. PointerPpe = MiGetPpeAddress (StartingAddress);
  233. PointerPde = MiGetPdeAddress (StartingAddress);
  234. PointerPte = MiGetPteAddress (StartingAddress);
  235. LastPte = MiGetPteAddress (EndingAddress);
  236. #if defined (_WIN64)
  237. MiMakePpeExistAndMakeValid (PointerPpe, Process, FALSE);
  238. if (PointerPde->u.Long == 0) {
  239. UsedPageDirectoryHandle = MI_GET_USED_PTES_HANDLE (PointerPte);
  240. ASSERT (MI_GET_USED_PTES_FROM_HANDLE (UsedPageDirectoryHandle) == 0);
  241. MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageDirectoryHandle);
  242. }
  243. #endif
  244. MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE);
  245. Pfn2 = MI_PFN_ELEMENT(PointerPde->u.Hard.PageFrameNumber);
  246. PagesToMap = (ULONG)((((ULONG_PTR)EndingAddress - (ULONG_PTR)StartingAddress))
  247. + (PAGE_SIZE-1) ) >> PAGE_SHIFT;
  248. NextPfn = MI_CONVERT_PHYSICAL_BUS_TO_PFN(*SectionOffset);
  249. #ifdef FIRSTDBG
  250. DbgPrint( "MM: Physsect, PagesToMap = %x NextPfn = %x\n",
  251. PagesToMap, NextPfn );
  252. #endif //FIRSTDBG
  253. MI_MAKE_VALID_PTE (TempPte,
  254. NextPfn,
  255. ProtectionMask,
  256. PointerPte);
  257. if (WriteCombined == TRUE) {
  258. MI_SET_PTE_WRITE_COMBINE (TempPte);
  259. }
  260. if (TempPte.u.Hard.Write) {
  261. TempPte.u.Hard.FaultOnWrite = 1;
  262. }
  263. while (PointerPte <= LastPte) {
  264. ULONG PagesTogether;
  265. ULONG GranularityHint;
  266. //
  267. // Compute the number of pages that can be mapped together
  268. //
  269. if( AllocationType & MEM_LARGE_PAGES ){
  270. PagesTogether = AggregatePages( PointerPte,
  271. NextPfn,
  272. PagesToMap,
  273. &GranularityHint );
  274. } else {
  275. PagesTogether = 1;
  276. GranularityHint = 0;
  277. }
  278. #ifdef FIRSTDBG
  279. DbgPrint( "MM: Physsect PointerPte = %x, NextPfn = %x\n",
  280. PointerPte, NextPfn );
  281. DbgPrint( "MM: Va = %x TempPte.Pfn = %x\n",
  282. MiGetVirtualAddressMappedByPte( PointerPte ),
  283. TempPte.u.Hard.PageFrameNumber );
  284. DbgPrint( "MM: PagesToMap = %x\n", PagesToMap );
  285. DbgPrint( "MM: PagesTogether = %x, GH = %x\n",
  286. PagesTogether, GranularityHint );
  287. #endif //FIRSTDBG
  288. TempPte.u.Hard.GranularityHint = GranularityHint;
  289. NextPfn += PagesTogether;
  290. PagesToMap -= PagesTogether;
  291. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (MiGetVirtualAddressMappedByPte (PointerPte));
  292. while( PagesTogether-- ){
  293. if (MiIsPteOnPdeBoundary (PointerPte)) {
  294. PointerPde = MiGetPteAddress (PointerPte);
  295. if (MiIsPteOnPpeBoundary (PointerPte)) {
  296. PointerPpe = MiGetPteAddress (PointerPde);
  297. MiMakePpeExistAndMakeValid (PointerPpe, Process, FALSE);
  298. if (PointerPde->u.Long == 0) {
  299. UsedPageDirectoryHandle = MI_GET_USED_PTES_HANDLE (PointerPte);
  300. ASSERT (MI_GET_USED_PTES_FROM_HANDLE (UsedPageDirectoryHandle) == 0);
  301. MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageDirectoryHandle);
  302. }
  303. }
  304. MiMakePdeExistAndMakeValid (PointerPde, Process, FALSE);
  305. Pfn2 = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber);
  306. UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (MiGetVirtualAddressMappedByPte (PointerPte));
  307. }
  308. ASSERT( PointerPte->u.Long == 0 );
  309. *PointerPte = TempPte;
  310. #if PFN_CONSISTENCY
  311. LOCK_PFN (OldIrql);
  312. #endif
  313. Pfn2->u2.ShareCount += 1;
  314. #if PFN_CONSISTENCY
  315. UNLOCK_PFN (OldIrql);
  316. #endif
  317. //
  318. // Increment the count of non-zero page table entries for this
  319. // page table and the number of private pages for the process.
  320. //
  321. MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);
  322. PointerPte += 1;
  323. TempPte.u.Hard.PageFrameNumber += 1;
  324. } // while (PagesTogether-- )
  325. } // while (PointerPte <= LastPte)
  326. UNLOCK_WS (Process);
  327. *ReleasedWsMutex = TRUE;
  328. //
  329. // Update the current virtual size in the process header.
  330. //
  331. *CapturedViewSize = (ULONG)((ULONG_PTR)EndingAddress - (ULONG_PTR)StartingAddress + 1L);
  332. Process->VirtualSize += *CapturedViewSize;
  333. if (Process->VirtualSize > Process->PeakVirtualSize) {
  334. Process->PeakVirtualSize = Process->VirtualSize;
  335. }
  336. //
  337. // Translate the virtual address to a quasi-virtual address for
  338. // use by drivers that touch mapped devices. Note: the routine
  339. // HalCreateQva will not translate the StartingAddress if the
  340. // StartingAddress is within system memory address space.
  341. //
  342. // N.B. - It will not work to attempt map addresses that begin in
  343. // system memory and extend through i/o space.
  344. //
  345. *CapturedBase = HalCreateQva( *SectionOffset, StartingAddress );
  346. return STATUS_SUCCESS;
  347. }
  348. ULONG
  349. MaximumAlignment(
  350. IN ULONG Offset
  351. )
  352. /*++
  353. Routine Description:
  354. This routine returns the maximum granularity hint alignment boundary
  355. to which Offset is naturally aligned.
  356. Arguments:
  357. Offset - Supplies the address offset to check for alignment.
  358. Return Value:
  359. The number which represents the largest natural alignment of Offset.
  360. Environment:
  361. --*/
  362. {
  363. if( (Offset & (GH3_PAGE_SIZE - 1)) == 0 ){
  364. return GH3_PAGE_SIZE;
  365. }
  366. if( (Offset & (GH2_PAGE_SIZE - 1)) == 0 ){
  367. return GH2_PAGE_SIZE;
  368. }
  369. if( (Offset & (GH1_PAGE_SIZE - 1)) == 0 ){
  370. return GH1_PAGE_SIZE;
  371. }
  372. if( (Offset & (PAGE_SIZE - 1)) == 0 ){
  373. return PAGE_SIZE;
  374. }
  375. return 0;
  376. }
  377. ULONG
  378. AggregatePages(
  379. IN PMMPTE PointerPte,
  380. IN PFN_NUMBER Pfn,
  381. IN ULONG Pages,
  382. OUT PULONG GranularityHint
  383. )
  384. /*++
  385. Routine Description:
  386. This routine computes the number of standard size pages that can be
  387. aggregated into a single large page and returns the granularity hint
  388. for that size large page.
  389. Arguments:
  390. PointerPte - Supplies the PTE pointer for the starting virtual address
  391. of the mapping.
  392. Pfn - Supplies the starting page frame number of the memory to be
  393. mapped.
  394. Pages - Supplies the number of pages to map.
  395. GranularityHint - Receives the granularity hint for the large page used
  396. to aggregate the standard pages.
  397. Return Value:
  398. The number of pages that can be aggregated together.
  399. Environment:
  400. --*/
  401. {
  402. ULONG MaxVirtualAlignment;
  403. ULONG MaxPhysicalAlignment;
  404. ULONG MaxPageAlignment;
  405. ULONG MaxAlignment;
  406. //
  407. // Determine the largest page that will map a maximum of Pages.
  408. // The largest page must be both virtually and physically aligned
  409. // to the large page size boundary.
  410. // Determine the largest common alignment for the virtual and
  411. // physical addresses, factor in Pages, and then match to the
  412. // largest page size possible via the granularity hints.
  413. //
  414. MaxVirtualAlignment =
  415. MaximumAlignment((ULONG)((ULONG_PTR)MiGetVirtualAddressMappedByPte(PointerPte)));
  416. MaxPhysicalAlignment = MaximumAlignment( (ULONG)(Pfn << PAGE_SHIFT) );
  417. MaxPageAlignment = (ULONG)(Pages << PAGE_SHIFT);
  418. #ifdef AGGREGATE_DBG
  419. DbgPrint( "MM: Aggregate MaxVirtualAlign = %x\n", MaxVirtualAlignment );
  420. DbgPrint( "MM: Aggregate MaxPhysicalAlign = %x\n", MaxPhysicalAlignment );
  421. DbgPrint( "MM: Aggregate MaxPageAlign = %x\n", MaxPageAlignment );
  422. #endif //AGGREGATE_DBG
  423. //
  424. // Maximum alignment is the minimum of the virtual and physical alignments.
  425. //
  426. MaxAlignment = (MaxVirtualAlignment > MaxPhysicalAlignment) ?
  427. MaxPhysicalAlignment : MaxVirtualAlignment;
  428. MaxAlignment = (MaxAlignment > MaxPageAlignment) ?
  429. MaxPageAlignment : MaxAlignment;
  430. //
  431. // Convert MaxAlignment to granularity hint value
  432. //
  433. if( (MaxAlignment & (GH3_PAGE_SIZE - 1)) == 0 ){
  434. *GranularityHint = GH3;
  435. } else if( (MaxAlignment & (GH2_PAGE_SIZE - 1)) == 0 ){
  436. *GranularityHint = GH2;
  437. } else if( (MaxAlignment & (GH1_PAGE_SIZE - 1)) == 0 ){
  438. *GranularityHint = GH1;
  439. } else if( (MaxAlignment & (PAGE_SIZE - 1)) == 0 ){
  440. *GranularityHint = GH0;
  441. } else {
  442. *GranularityHint = GH0;
  443. #if DBG
  444. DbgPrint( "MM: Aggregate Physical pages - not page aligned\n" );
  445. #endif //DBG
  446. } // end, if then elseif
  447. //
  448. // Return number of pages aggregated.
  449. //
  450. return( MaxAlignment >> PAGE_SHIFT );
  451. }