Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

609 lines
13 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. hypermap.c
  5. Abstract:
  6. This module contains the routines which map physical pages into
  7. reserved PTEs within hyper space.
  8. Author:
  9. Lou Perazzoli (loup) 5-Apr-1989
  10. Landy Wang (landyw) 02-June-1997
  11. Revision History:
  12. --*/
  13. #include "mi.h"
  14. PMMPTE MiFirstReservedZeroingPte;
  15. KEVENT MiImageMappingPteEvent;
  16. #pragma alloc_text(PAGE,MiMapImageHeaderInHyperSpace)
  17. #pragma alloc_text(PAGE,MiUnmapImageHeaderInHyperSpace)
  18. PVOID
  19. MiMapPageInHyperSpace (
  20. IN PEPROCESS Process,
  21. IN PFN_NUMBER PageFrameIndex,
  22. IN PKIRQL OldIrql
  23. )
  24. /*++
  25. Routine Description:
  26. This procedure maps the specified physical page into hyper space
  27. and returns the virtual address which maps the page.
  28. ************************************
  29. * *
  30. * Returns with a spin lock held!!! *
  31. * *
  32. ************************************
  33. Arguments:
  34. Process - Supplies the current process.
  35. PageFrameIndex - Supplies the physical page number to map.
  36. OldIrql - Supplies a pointer in which to return the entry IRQL.
  37. Return Value:
  38. Returns the address where the requested page was mapped.
  39. RETURNS WITH THE HYPERSPACE SPIN LOCK HELD!!!!
  40. The routine MiUnmapHyperSpaceMap MUST be called to release the lock!!!!
  41. Environment:
  42. Kernel mode.
  43. --*/
  44. {
  45. MMPTE TempPte;
  46. PMMPTE PointerPte;
  47. PFN_NUMBER offset;
  48. ASSERT (PageFrameIndex != 0);
  49. PointerPte = MmFirstReservedMappingPte;
  50. #if defined(NT_UP)
  51. UNREFERENCED_PARAMETER (Process);
  52. #endif
  53. LOCK_HYPERSPACE (Process, OldIrql);
  54. //
  55. // Get offset to first free PTE.
  56. //
  57. offset = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  58. if (offset == 0) {
  59. //
  60. // All the reserved PTEs have been used, make them all invalid.
  61. //
  62. MI_MAKING_MULTIPLE_PTES_INVALID (FALSE);
  63. #if DBG
  64. {
  65. PMMPTE LastPte;
  66. LastPte = PointerPte + NUMBER_OF_MAPPING_PTES;
  67. do {
  68. ASSERT (LastPte->u.Long == 0);
  69. LastPte -= 1;
  70. } while (LastPte > PointerPte);
  71. }
  72. #endif
  73. //
  74. // Use the page frame number field of the first PTE as an
  75. // offset into the available mapping PTEs.
  76. //
  77. offset = NUMBER_OF_MAPPING_PTES;
  78. //
  79. // Flush entire TB only on processors executing this process.
  80. //
  81. KeFlushProcessTb (FALSE);
  82. }
  83. //
  84. // Change offset for next time through.
  85. //
  86. PointerPte->u.Hard.PageFrameNumber = offset - 1;
  87. //
  88. // Point to free entry and make it valid.
  89. //
  90. PointerPte += offset;
  91. ASSERT (PointerPte->u.Hard.Valid == 0);
  92. TempPte = ValidPtePte;
  93. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  94. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  95. //
  96. // Return the VA that maps the page.
  97. //
  98. return MiGetVirtualAddressMappedByPte (PointerPte);
  99. }
  100. PVOID
  101. MiMapPageInHyperSpaceAtDpc (
  102. IN PEPROCESS Process,
  103. IN PFN_NUMBER PageFrameIndex
  104. )
  105. /*++
  106. Routine Description:
  107. This procedure maps the specified physical page into hyper space
  108. and returns the virtual address which maps the page.
  109. ************************************
  110. * *
  111. * Returns with a spin lock held!!! *
  112. * *
  113. ************************************
  114. Arguments:
  115. Process - Supplies the current process.
  116. PageFrameIndex - Supplies the physical page number to map.
  117. Return Value:
  118. Returns the address where the requested page was mapped.
  119. RETURNS WITH THE HYPERSPACE SPIN LOCK HELD!!!!
  120. The routine MiUnmapHyperSpaceMap MUST be called to release the lock!!!!
  121. Environment:
  122. Kernel mode, DISPATCH_LEVEL on entry.
  123. --*/
  124. {
  125. MMPTE TempPte;
  126. PMMPTE PointerPte;
  127. PFN_NUMBER offset;
  128. #if defined(NT_UP)
  129. UNREFERENCED_PARAMETER (Process);
  130. #endif
  131. ASSERT (KeGetCurrentIrql() == DISPATCH_LEVEL);
  132. ASSERT (PageFrameIndex != 0);
  133. LOCK_HYPERSPACE_AT_DPC (Process);
  134. //
  135. // Get offset to first free PTE.
  136. //
  137. PointerPte = MmFirstReservedMappingPte;
  138. offset = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  139. if (offset == 0) {
  140. //
  141. // All the reserved PTEs have been used, make them all invalid.
  142. //
  143. MI_MAKING_MULTIPLE_PTES_INVALID (FALSE);
  144. #if DBG
  145. {
  146. PMMPTE LastPte;
  147. LastPte = PointerPte + NUMBER_OF_MAPPING_PTES;
  148. do {
  149. ASSERT (LastPte->u.Long == 0);
  150. LastPte -= 1;
  151. } while (LastPte > PointerPte);
  152. }
  153. #endif
  154. //
  155. // Use the page frame number field of the first PTE as an
  156. // offset into the available mapping PTEs.
  157. //
  158. offset = NUMBER_OF_MAPPING_PTES;
  159. //
  160. // Flush entire TB only on processors executing this process.
  161. //
  162. KeFlushProcessTb (FALSE);
  163. }
  164. //
  165. // Change offset for next time through.
  166. //
  167. PointerPte->u.Hard.PageFrameNumber = offset - 1;
  168. //
  169. // Point to free entry and make it valid.
  170. //
  171. PointerPte += offset;
  172. ASSERT (PointerPte->u.Hard.Valid == 0);
  173. TempPte = ValidPtePte;
  174. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  175. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  176. //
  177. // Return the VA that maps the page.
  178. //
  179. return MiGetVirtualAddressMappedByPte (PointerPte);
  180. }
  181. PVOID
  182. MiMapImageHeaderInHyperSpace (
  183. IN PFN_NUMBER PageFrameIndex
  184. )
  185. /*++
  186. Routine Description:
  187. This procedure maps the specified physical page into the
  188. PTE within hyper space reserved explicitly for image page
  189. header mapping. By reserving an explicit PTE for mapping
  190. the PTE, page faults can occur while the PTE is mapped within
  191. hyperspace and no other hyperspace maps will affect this PTE.
  192. Note that if another thread attempts to map an image at the
  193. same time, it will be forced into a wait state until the
  194. header is "unmapped".
  195. Arguments:
  196. PageFrameIndex - Supplies the physical page number to map.
  197. Return Value:
  198. Returns the virtual address where the specified physical page was
  199. mapped.
  200. Environment:
  201. Kernel mode.
  202. --*/
  203. {
  204. MMPTE TempPte;
  205. MMPTE OriginalPte;
  206. PMMPTE PointerPte;
  207. ASSERT (PageFrameIndex != 0);
  208. TempPte = ValidPtePte;
  209. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  210. //
  211. // Ensure both modified and accessed bits are set so the hardware doesn't
  212. // ever write this PTE.
  213. //
  214. ASSERT (TempPte.u.Hard.Dirty == 1);
  215. ASSERT (TempPte.u.Hard.Accessed == 1);
  216. PointerPte = MiGetPteAddress (IMAGE_MAPPING_PTE);
  217. do {
  218. OriginalPte.u.Long = 0;
  219. OriginalPte.u.Long = InterlockedCompareExchangePte (
  220. PointerPte,
  221. TempPte.u.Long,
  222. OriginalPte.u.Long);
  223. if (OriginalPte.u.Long == 0) {
  224. break;
  225. }
  226. //
  227. // Another thread modified the PTE just before us or the PTE was
  228. // already in use. This should be very rare - go the long way.
  229. //
  230. InterlockedIncrement ((PLONG)&MmWorkingSetList->NumberOfImageWaiters);
  231. //
  232. // Deliberately wait with a timeout since the PTE release runs
  233. // without lock synchronization so there is the extremely rare
  234. // race window which the timeout saves us from.
  235. //
  236. KeWaitForSingleObject (&MiImageMappingPteEvent,
  237. Executive,
  238. KernelMode,
  239. FALSE,
  240. (PLARGE_INTEGER)&MmOneSecond);
  241. InterlockedDecrement ((PLONG)&MmWorkingSetList->NumberOfImageWaiters);
  242. } while (TRUE);
  243. //
  244. // Flush the specified TB entry without writing the PTE as we
  245. // always want to do interlocked writes to this PTE and this is
  246. // being done above.
  247. //
  248. // Note the flush must be made across all processors as this thread
  249. // may migrate. Also this must be done here instead of in the unmap
  250. // in order to support lock-free operation.
  251. //
  252. KeFlushSingleTb (IMAGE_MAPPING_PTE, TRUE);
  253. return (PVOID) IMAGE_MAPPING_PTE;
  254. }
  255. VOID
  256. MiUnmapImageHeaderInHyperSpace (
  257. VOID
  258. )
  259. /*++
  260. Routine Description:
  261. This procedure unmaps the PTE reserved for mapping the image
  262. header, flushes the TB, and, if the WaitingForImageMapping field
  263. is not NULL, sets the specified event.
  264. Arguments:
  265. None.
  266. Return Value:
  267. None.
  268. Environment:
  269. Kernel mode.
  270. --*/
  271. {
  272. PMMPTE PointerPte;
  273. PointerPte = MiGetPteAddress (IMAGE_MAPPING_PTE);
  274. //
  275. // Capture the number of waiters.
  276. //
  277. ASSERT (PointerPte->u.Long != 0);
  278. InterlockedExchangePte (PointerPte, ZeroPte.u.Long);
  279. if (MmWorkingSetList->NumberOfImageWaiters != 0) {
  280. //
  281. // If there are any threads waiting, wake them all now. Note this
  282. // will wake threads in other processes as well, but it is very
  283. // rare that there are any waiters in the entire system period.
  284. //
  285. KePulseEvent (&MiImageMappingPteEvent, 0, FALSE);
  286. }
  287. return;
  288. }
  289. PVOID
  290. MiMapPagesToZeroInHyperSpace (
  291. IN PMMPFN Pfn1,
  292. IN PFN_COUNT NumberOfPages
  293. )
  294. /*++
  295. Routine Description:
  296. This procedure maps the specified physical pages for the zero page thread
  297. and returns the virtual address which maps them.
  298. This is ONLY to be used by THE zeroing page thread.
  299. Arguments:
  300. Pfn1 - Supplies the pointer to the physical page numbers to map.
  301. NumberOfPages - Supplies the number of pages to map.
  302. Return Value:
  303. Returns the virtual address where the specified physical pages were
  304. mapped.
  305. Environment:
  306. PASSIVE_LEVEL.
  307. --*/
  308. {
  309. PFN_NUMBER offset;
  310. MMPTE TempPte;
  311. PMMPTE PointerPte;
  312. PFN_NUMBER PageFrameIndex;
  313. ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL);
  314. ASSERT (NumberOfPages != 0);
  315. ASSERT (NumberOfPages <= NUMBER_OF_ZEROING_PTES);
  316. PointerPte = MiFirstReservedZeroingPte;
  317. //
  318. // Get offset to first free PTE.
  319. //
  320. offset = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  321. if (NumberOfPages > offset) {
  322. //
  323. // Not enough unused PTEs left, make them all invalid.
  324. //
  325. MI_MAKING_MULTIPLE_PTES_INVALID (FALSE);
  326. #if DBG
  327. {
  328. PMMPTE LastPte;
  329. LastPte = PointerPte + NUMBER_OF_ZEROING_PTES;
  330. do {
  331. ASSERT (LastPte->u.Long == 0);
  332. LastPte -= 1;
  333. } while (LastPte > PointerPte);
  334. }
  335. #endif
  336. //
  337. // Use the page frame number field of the first PTE as an
  338. // offset into the available zeroing PTEs.
  339. //
  340. offset = NUMBER_OF_ZEROING_PTES;
  341. PointerPte->u.Hard.PageFrameNumber = offset;
  342. //
  343. // Flush entire TB only on processors executing this process as this
  344. // thread may migrate there at any time.
  345. //
  346. KeFlushProcessTb (FALSE);
  347. }
  348. //
  349. // Change offset for next time through.
  350. //
  351. PointerPte->u.Hard.PageFrameNumber = offset - NumberOfPages;
  352. //
  353. // Point to free entries and make them valid. Note that the frames
  354. // are mapped in reverse order but our caller doesn't care anyway.
  355. //
  356. PointerPte += (offset + 1);
  357. TempPte = ValidPtePte;
  358. ASSERT (Pfn1 != (PMMPFN) MM_EMPTY_LIST);
  359. do {
  360. PageFrameIndex = MI_PFN_ELEMENT_TO_INDEX (Pfn1);
  361. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  362. PointerPte -= 1;
  363. ASSERT (PointerPte->u.Hard.Valid == 0);
  364. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  365. Pfn1 = (PMMPFN) Pfn1->u1.Flink;
  366. } while (Pfn1 != (PMMPFN) MM_EMPTY_LIST);
  367. //
  368. // Return the VA that maps the page.
  369. //
  370. return MiGetVirtualAddressMappedByPte (PointerPte);
  371. }
  372. VOID
  373. MiUnmapPagesInZeroSpace (
  374. IN PVOID VirtualAddress,
  375. IN PFN_COUNT NumberOfPages
  376. )
  377. /*++
  378. Routine Description:
  379. This procedure unmaps the specified physical pages for the zero page thread.
  380. This is ONLY to be used by THE zeroing page thread.
  381. Arguments:
  382. VirtualAddress - Supplies the pointer to the physical page numbers to unmap.
  383. NumberOfPages - Supplies the number of pages to unmap.
  384. Return Value:
  385. None.
  386. Environment:
  387. PASSIVE_LEVEL.
  388. --*/
  389. {
  390. PMMPTE PointerPte;
  391. ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL);
  392. ASSERT (NumberOfPages != 0);
  393. ASSERT (NumberOfPages <= NUMBER_OF_ZEROING_PTES);
  394. PointerPte = MiGetPteAddress (VirtualAddress);
  395. MiZeroMemoryPte (PointerPte, NumberOfPages);
  396. return;
  397. }