Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

6595 lines
185 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. procsup.c
  5. Abstract:
  6. This module contains routines which support the process structure.
  7. Author:
  8. Lou Perazzoli (loup) 25-Apr-1989
  9. Landy Wang (landyw) 02-June-1997
  10. Revision History:
  11. --*/
  12. #include "mi.h"
  13. #if (_MI_PAGING_LEVELS >= 3)
  14. #include "wow64t.h"
  15. #define MI_LARGE_STACK_SIZE KERNEL_LARGE_STACK_SIZE
  16. #if defined(_AMD64_)
  17. #define MM_PROCESS_COMMIT_CHARGE 6
  18. #define MM_PROCESS_CREATE_CHARGE 8
  19. #elif defined(_IA64_)
  20. #define MM_PROCESS_COMMIT_CHARGE 5
  21. #define MM_PROCESS_CREATE_CHARGE 8
  22. #endif
  23. #else
  24. //
  25. // Registry settable but must always be a page multiple and less than
  26. // or equal to KERNEL_LARGE_STACK_SIZE.
  27. //
  28. ULONG MmLargeStackSize = KERNEL_LARGE_STACK_SIZE;
  29. #define MI_LARGE_STACK_SIZE MmLargeStackSize
  30. #if !defined (_X86PAE_)
  31. #define MM_PROCESS_COMMIT_CHARGE 4
  32. #define MM_PROCESS_CREATE_CHARGE 6
  33. #else
  34. #define MM_PROCESS_COMMIT_CHARGE 8
  35. #define MM_PROCESS_CREATE_CHARGE 10
  36. #endif
  37. #endif
  38. #define DONTASSERT(x)
  39. extern ULONG MmProductType;
  40. extern MM_SYSTEMSIZE MmSystemSize;
  41. extern PVOID BBTBuffer;
  42. SIZE_T MmProcessCommit;
  43. ULONG MmKernelStackPages;
  44. PFN_NUMBER MmKernelStackResident;
  45. ULONG MmLargeStacks;
  46. ULONG MmSmallStacks;
  47. MMPTE KernelDemandZeroPte = {MM_KERNEL_DEMAND_ZERO_PTE};
  48. CCHAR MmRotatingUniprocessorNumber;
  49. //
  50. // Enforced minimal commit for user mode stacks
  51. //
  52. ULONG MmMinimumStackCommitInBytes;
  53. PFN_NUMBER
  54. MiMakeOutswappedPageResident (
  55. IN PMMPTE ActualPteAddress,
  56. IN PMMPTE PointerTempPte,
  57. IN ULONG Global,
  58. IN PFN_NUMBER ContainingPage
  59. );
  60. NTSTATUS
  61. MiCreatePebOrTeb (
  62. IN PEPROCESS TargetProcess,
  63. IN ULONG Size,
  64. OUT PVOID *Base
  65. );
  66. VOID
  67. MiDeleteAddressesInWorkingSet (
  68. IN PEPROCESS Process
  69. );
  70. VOID
  71. MiDeleteValidAddress (
  72. IN PVOID Va,
  73. IN PEPROCESS CurrentProcess
  74. );
  75. VOID
  76. MiDeleteFreeVm (
  77. IN PVOID StartingAddress,
  78. IN PVOID EndingAddress
  79. );
  80. VOID
  81. VadTreeWalk (
  82. VOID
  83. );
  84. PMMVAD
  85. MiAllocateVad(
  86. IN ULONG_PTR StartingVirtualAddress,
  87. IN ULONG_PTR EndingVirtualAddress,
  88. IN LOGICAL Deletable
  89. );
  90. #ifdef ALLOC_PRAGMA
  91. #pragma alloc_text(PAGE,MmCreateTeb)
  92. #pragma alloc_text(PAGE,MmCreatePeb)
  93. #pragma alloc_text(PAGE,MiCreatePebOrTeb)
  94. #pragma alloc_text(PAGE,MmDeleteTeb)
  95. #pragma alloc_text(PAGE,MiAllocateVad)
  96. #pragma alloc_text(PAGE,MiDeleteAddressesInWorkingSet)
  97. #pragma alloc_text(PAGE,MmSetMemoryPriorityProcess)
  98. #pragma alloc_text(PAGE,MmInitializeHandBuiltProcess)
  99. #pragma alloc_text(PAGE,MmInitializeHandBuiltProcess2)
  100. #pragma alloc_text(PAGE,MmGetDirectoryFrameFromProcess)
  101. #endif
  102. BOOLEAN
  103. MmCreateProcessAddressSpace (
  104. IN ULONG MinimumWorkingSetSize,
  105. IN PEPROCESS NewProcess,
  106. OUT PULONG_PTR DirectoryTableBase
  107. )
  108. /*++
  109. Routine Description:
  110. This routine creates an address space which maps the system
  111. portion and contains a hyper space entry.
  112. Arguments:
  113. MinimumWorkingSetSize - Supplies the minimum working set size for
  114. this address space. This value is only used
  115. to ensure that ample physical pages exist
  116. to create this process.
  117. NewProcess - Supplies a pointer to the process object being created.
  118. DirectoryTableBase - Returns the value of the newly created
  119. address space's Page Directory (PD) page and
  120. hyper space page.
  121. Return Value:
  122. Returns TRUE if an address space was successfully created, FALSE
  123. if ample physical pages do not exist.
  124. Environment:
  125. Kernel mode. APCs Disabled.
  126. --*/
  127. {
  128. PFN_NUMBER PageDirectoryIndex;
  129. PFN_NUMBER HyperSpaceIndex;
  130. PFN_NUMBER PageContainingWorkingSet;
  131. PFN_NUMBER VadBitMapPage;
  132. MMPTE TempPte;
  133. PEPROCESS CurrentProcess;
  134. KIRQL OldIrql;
  135. PMMPFN Pfn1;
  136. ULONG Color;
  137. PMMPTE PointerPte;
  138. #if (_MI_PAGING_LEVELS >= 4)
  139. PMMPTE PointerPxe;
  140. PFN_NUMBER PageDirectoryParentIndex;
  141. #endif
  142. #if (_MI_PAGING_LEVELS >= 3)
  143. PMMPTE PointerPpe;
  144. PMMPTE PointerPde;
  145. PFN_NUMBER HyperDirectoryIndex;
  146. #endif
  147. #if defined (_X86PAE_)
  148. ULONG MaximumStart;
  149. ULONG TopQuad;
  150. MMPTE TopPte;
  151. PPAE_ENTRY PaeVa;
  152. ULONG i;
  153. ULONG NumberOfPdes;
  154. PFN_NUMBER HyperSpaceIndex2;
  155. PFN_NUMBER PageDirectories[PD_PER_SYSTEM];
  156. #endif
  157. #if !defined (_IA64_)
  158. PMMPTE PointerFillPte;
  159. PMMPTE CurrentAddressSpacePde;
  160. #endif
  161. CurrentProcess = PsGetCurrentProcess ();
  162. //
  163. // Charge commitment for the page directory pages, working set page table
  164. // page, and working set list. If Vad bitmap lookups are enabled, then
  165. // charge for a page or two for that as well.
  166. //
  167. if (MiChargeCommitment (MM_PROCESS_COMMIT_CHARGE, NULL) == FALSE) {
  168. return FALSE;
  169. }
  170. NewProcess->NextPageColor = (USHORT)(RtlRandom(&MmProcessColorSeed));
  171. KeInitializeSpinLock (&NewProcess->HyperSpaceLock);
  172. #if defined (_X86PAE_)
  173. TopQuad = MiPaeAllocate (&PaeVa);
  174. if (TopQuad == 0) {
  175. MiReturnCommitment (MM_PROCESS_COMMIT_CHARGE);
  176. return FALSE;
  177. }
  178. #endif
  179. LOCK_WS (CurrentProcess);
  180. //
  181. // Get the PFN lock to prevent another thread in this
  182. // process from using hyper space and to get physical pages.
  183. //
  184. LOCK_PFN (OldIrql);
  185. //
  186. // Check to make sure the physical pages are available.
  187. //
  188. if (MI_NONPAGABLE_MEMORY_AVAILABLE() <= (SPFN_NUMBER)MinimumWorkingSetSize){
  189. UNLOCK_PFN (OldIrql);
  190. UNLOCK_WS (CurrentProcess);
  191. MiReturnCommitment (MM_PROCESS_COMMIT_CHARGE);
  192. #if defined (_X86PAE_)
  193. MiPaeFree (PaeVa);
  194. #endif
  195. //
  196. // Indicate no directory base was allocated.
  197. //
  198. return FALSE;
  199. }
  200. MM_TRACK_COMMIT (MM_DBG_COMMIT_PROCESS_CREATE, MM_PROCESS_COMMIT_CHARGE);
  201. MmResidentAvailablePages -= MinimumWorkingSetSize;
  202. MM_BUMP_COUNTER(6, MinimumWorkingSetSize);
  203. MmProcessCommit += MM_PROCESS_COMMIT_CHARGE;
  204. ASSERT (NewProcess->AddressSpaceInitialized == 0);
  205. PS_SET_BITS (&NewProcess->Flags, PS_PROCESS_FLAGS_ADDRESS_SPACE1);
  206. ASSERT (NewProcess->AddressSpaceInitialized == 1);
  207. NewProcess->Vm.MinimumWorkingSetSize = MinimumWorkingSetSize;
  208. //
  209. // Allocate a page directory (parent for 64-bit systems) page.
  210. //
  211. MiEnsureAvailablePageOrWait (CurrentProcess, NULL);
  212. Color = MI_PAGE_COLOR_PTE_PROCESS (PDE_BASE,
  213. &CurrentProcess->NextPageColor);
  214. PageDirectoryIndex = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql);
  215. #if defined (_X86PAE_)
  216. //
  217. // Allocate the additional page directory pages.
  218. //
  219. for (i = 0; i < PD_PER_SYSTEM - 1; i += 1) {
  220. MiEnsureAvailablePageOrWait (CurrentProcess, NULL);
  221. Color = MI_PAGE_COLOR_PTE_PROCESS (PDE_BASE,
  222. &CurrentProcess->NextPageColor);
  223. PageDirectories[i] = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql);
  224. }
  225. PageDirectories[i] = PageDirectoryIndex;
  226. //
  227. // Recursively map each page directory page so it points to itself.
  228. //
  229. TempPte = ValidPdePde;
  230. MI_SET_GLOBAL_STATE (TempPte, 0);
  231. PointerPte = (PMMPTE)MiMapPageInHyperSpaceAtDpc (CurrentProcess, PageDirectoryIndex);
  232. for (i = 0; i < PD_PER_SYSTEM; i += 1) {
  233. TempPte.u.Hard.PageFrameNumber = PageDirectories[i];
  234. PointerPte[i] = TempPte;
  235. }
  236. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PointerPte);
  237. //
  238. // Initialize the parent page directory entries.
  239. //
  240. TopPte.u.Long = TempPte.u.Long & ~MM_PAE_PDPTE_MASK;
  241. for (i = 0; i < PD_PER_SYSTEM; i += 1) {
  242. TopPte.u.Hard.PageFrameNumber = PageDirectories[i];
  243. PaeVa->PteEntry[i].u.Long = TopPte.u.Long;
  244. }
  245. NewProcess->PaeTop = (PVOID)PaeVa;
  246. DirectoryTableBase[0] = TopQuad;
  247. #else
  248. INITIALIZE_DIRECTORY_TABLE_BASE(&DirectoryTableBase[0], PageDirectoryIndex);
  249. #endif
  250. #if (_MI_PAGING_LEVELS >= 3)
  251. PointerPpe = KSEG_ADDRESS (PageDirectoryIndex);
  252. TempPte = ValidPdePde;
  253. //
  254. // Map the top level page directory parent page recursively onto itself.
  255. //
  256. TempPte.u.Hard.PageFrameNumber = PageDirectoryIndex;
  257. //
  258. // Set the PTE address in the PFN for the top level page directory page.
  259. //
  260. Pfn1 = MI_PFN_ELEMENT (PageDirectoryIndex);
  261. #if (_MI_PAGING_LEVELS >= 4)
  262. PageDirectoryParentIndex = PageDirectoryIndex;
  263. PointerPxe = (PMMPTE)MiMapPageInHyperSpaceAtDpc (CurrentProcess,
  264. PageDirectoryIndex);
  265. Pfn1->PteAddress = MiGetPteAddress(PXE_BASE);
  266. PointerPxe[MiGetPxeOffset(PXE_BASE)] = TempPte;
  267. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PointerPxe);
  268. //
  269. // Now that the top level extended page parent page is initialized,
  270. // allocate a page parent page.
  271. //
  272. MiEnsureAvailablePageOrWait (CurrentProcess, NULL);
  273. Color = MI_PAGE_COLOR_PTE_PROCESS (PDE_BASE,
  274. &CurrentProcess->NextPageColor);
  275. PageDirectoryIndex = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql);
  276. //
  277. //
  278. // Map this directory parent page into the top level
  279. // extended page directory parent page.
  280. //
  281. TempPte.u.Hard.PageFrameNumber = PageDirectoryIndex;
  282. PointerPxe = (PMMPTE)MiMapPageInHyperSpaceAtDpc (CurrentProcess,
  283. PageDirectoryParentIndex);
  284. PointerPxe[MiGetPxeOffset(HYPER_SPACE)] = TempPte;
  285. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PointerPxe);
  286. #else
  287. Pfn1->PteAddress = MiGetPteAddress((PVOID)PDE_TBASE);
  288. PointerPpe[MiGetPpeOffset(PDE_TBASE)] = TempPte;
  289. #endif
  290. //
  291. // Allocate the page directory for hyper space and map this directory
  292. // page into the page directory parent page.
  293. //
  294. MiEnsureAvailablePageOrWait (CurrentProcess, NULL);
  295. Color = MI_PAGE_COLOR_PTE_PROCESS (MiGetPpeAddress(HYPER_SPACE),
  296. &CurrentProcess->NextPageColor);
  297. HyperDirectoryIndex = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql);
  298. TempPte.u.Hard.PageFrameNumber = HyperDirectoryIndex;
  299. #if (_MI_PAGING_LEVELS >= 4)
  300. PointerPpe = (PMMPTE)MiMapPageInHyperSpaceAtDpc (CurrentProcess,
  301. PageDirectoryIndex);
  302. #endif
  303. PointerPpe[MiGetPpeOffset(HYPER_SPACE)] = TempPte;
  304. #if (_MI_PAGING_LEVELS >= 4)
  305. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PointerPpe);
  306. #endif
  307. #if defined (_IA64_)
  308. //
  309. // Initialize the page directory parent for the session (or win32k) space.
  310. // Any new process shares the session (or win32k) address space (and TB)
  311. // of its parent.
  312. //
  313. NewProcess->Pcb.SessionParentBase = CurrentProcess->Pcb.SessionParentBase;
  314. NewProcess->Pcb.SessionMapInfo = CurrentProcess->Pcb.SessionMapInfo;
  315. #endif
  316. #endif
  317. //
  318. // Allocate the hyper space page table page.
  319. //
  320. MiEnsureAvailablePageOrWait (CurrentProcess, NULL);
  321. Color = MI_PAGE_COLOR_PTE_PROCESS (MiGetPdeAddress(HYPER_SPACE),
  322. &CurrentProcess->NextPageColor);
  323. HyperSpaceIndex = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql);
  324. #if (_MI_PAGING_LEVELS >= 3)
  325. #if defined (_IA64_)
  326. TempPte.u.Hard.PageFrameNumber = HyperSpaceIndex;
  327. PointerPde = KSEG_ADDRESS (HyperDirectoryIndex);
  328. PointerPde[MiGetPdeOffset(HYPER_SPACE)] = TempPte;
  329. #endif
  330. #if (_AMD64_)
  331. TempPte.u.Hard.PageFrameNumber = HyperSpaceIndex;
  332. PointerPde = (PMMPTE)MiMapPageInHyperSpaceAtDpc (CurrentProcess,
  333. HyperDirectoryIndex);
  334. PointerPde[MiGetPdeOffset(HYPER_SPACE)] = TempPte;
  335. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PointerPde);
  336. #endif
  337. #endif
  338. #if defined (_X86PAE_)
  339. //
  340. // Allocate the second hyper space page table page.
  341. // Save it in the first PTE used by the first hyperspace PDE.
  342. //
  343. MiEnsureAvailablePageOrWait (CurrentProcess, NULL);
  344. Color = MI_PAGE_COLOR_PTE_PROCESS (MiGetPdeAddress(HYPER_SPACE2),
  345. &CurrentProcess->NextPageColor);
  346. HyperSpaceIndex2 = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql);
  347. //
  348. // Unlike DirectoryTableBase[0], the HyperSpaceIndex is stored as an
  349. // absolute PFN and does not need to be below 4GB.
  350. //
  351. DirectoryTableBase[1] = HyperSpaceIndex;
  352. #else
  353. INITIALIZE_DIRECTORY_TABLE_BASE(&DirectoryTableBase[1], HyperSpaceIndex);
  354. #endif
  355. //
  356. // Remove page(s) for the VAD bitmap.
  357. //
  358. MiEnsureAvailablePageOrWait (CurrentProcess, NULL);
  359. Color = MI_PAGE_COLOR_VA_PROCESS (MmWorkingSetList,
  360. &CurrentProcess->NextPageColor);
  361. VadBitMapPage = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql);
  362. //
  363. // Remove page for the working set list.
  364. //
  365. MiEnsureAvailablePageOrWait (CurrentProcess, NULL);
  366. Color = MI_PAGE_COLOR_VA_PROCESS (MmWorkingSetList,
  367. &CurrentProcess->NextPageColor);
  368. PageContainingWorkingSet = MiRemoveZeroPageIfAny (Color);
  369. if (PageContainingWorkingSet == 0) {
  370. PageContainingWorkingSet = MiRemoveAnyPage (Color);
  371. UNLOCK_PFN (OldIrql);
  372. MiZeroPhysicalPage (PageContainingWorkingSet, Color);
  373. }
  374. else {
  375. //
  376. // Release the PFN lock as the needed pages have been allocated.
  377. //
  378. UNLOCK_PFN (OldIrql);
  379. }
  380. NewProcess->WorkingSetPage = PageContainingWorkingSet;
  381. //
  382. // Initialize the page reserved for hyper space.
  383. //
  384. MI_INITIALIZE_HYPERSPACE_MAP (HyperSpaceIndex);
  385. #if (_MI_PAGING_LEVELS >= 3)
  386. //
  387. // Set the PTE address in the PFN for the hyper space page directory page.
  388. //
  389. Pfn1 = MI_PFN_ELEMENT (HyperDirectoryIndex);
  390. Pfn1->PteAddress = MiGetPpeAddress(HYPER_SPACE);
  391. #if defined (_AMD64_)
  392. //
  393. // Copy the system mappings including the shared user page & session space.
  394. //
  395. CurrentAddressSpacePde = MiGetPxeAddress(KI_USER_SHARED_DATA);
  396. PointerPxe = (PMMPTE)MiMapPageInHyperSpace (CurrentProcess,
  397. PageDirectoryParentIndex,
  398. &OldIrql);
  399. PointerFillPte = &PointerPxe[MiGetPxeOffset(KI_USER_SHARED_DATA)];
  400. RtlCopyMemory (PointerFillPte,
  401. CurrentAddressSpacePde,
  402. ((1 + (MiGetPxeAddress(MM_SYSTEM_SPACE_END) -
  403. CurrentAddressSpacePde)) * sizeof(MMPTE)));
  404. MiUnmapPageInHyperSpace (CurrentProcess, PointerPxe, OldIrql);
  405. #endif
  406. TempPte = ValidPdePde;
  407. TempPte.u.Hard.PageFrameNumber = VadBitMapPage;
  408. MI_SET_GLOBAL_STATE (TempPte, 0);
  409. #if defined (_AMD64_)
  410. PointerPte = (PMMPTE)MiMapPageInHyperSpace (CurrentProcess,
  411. HyperSpaceIndex,
  412. &OldIrql);
  413. PointerPte[MiGetPteOffset(VAD_BITMAP_SPACE)] = TempPte;
  414. TempPte.u.Hard.PageFrameNumber = PageContainingWorkingSet;
  415. PointerPte[MiGetPteOffset(MmWorkingSetList)] = TempPte;
  416. MiUnmapPageInHyperSpace (CurrentProcess, PointerPte, OldIrql);
  417. #else
  418. PointerPte = KSEG_ADDRESS (HyperSpaceIndex);
  419. PointerPte[MiGetPteOffset(VAD_BITMAP_SPACE)] = TempPte;
  420. TempPte.u.Hard.PageFrameNumber = PageContainingWorkingSet;
  421. PointerPte[MiGetPteOffset(MmWorkingSetList)] = TempPte;
  422. #endif
  423. #else // the following is for (_MI_PAGING_LEVELS < 3) only
  424. #if defined (_X86PAE_)
  425. //
  426. // Stash the second hyperspace PDE in the first PTE for the initial
  427. // hyperspace entry.
  428. //
  429. TempPte = ValidPdePde;
  430. TempPte.u.Hard.PageFrameNumber = HyperSpaceIndex2;
  431. MI_SET_GLOBAL_STATE (TempPte, 0);
  432. PointerPte = (PMMPTE)MiMapPageInHyperSpace (CurrentProcess, HyperSpaceIndex, &OldIrql);
  433. PointerPte[0] = TempPte;
  434. TempPte.u.Hard.PageFrameNumber = VadBitMapPage;
  435. PointerPte[MiGetPteOffset(VAD_BITMAP_SPACE)] = TempPte;
  436. TempPte.u.Hard.PageFrameNumber = PageContainingWorkingSet;
  437. PointerPte[MiGetPteOffset(MmWorkingSetList)] = TempPte;
  438. MiUnmapPageInHyperSpace (CurrentProcess, PointerPte, OldIrql);
  439. #else
  440. TempPte = ValidPdePde;
  441. TempPte.u.Hard.PageFrameNumber = VadBitMapPage;
  442. MI_SET_GLOBAL_STATE (TempPte, 0);
  443. PointerPte = (PMMPTE)MiMapPageInHyperSpace (CurrentProcess, HyperSpaceIndex, &OldIrql);
  444. PointerPte[MiGetPteOffset(VAD_BITMAP_SPACE)] = TempPte;
  445. TempPte.u.Hard.PageFrameNumber = PageContainingWorkingSet;
  446. PointerPte[MiGetPteOffset(MmWorkingSetList)] = TempPte;
  447. MiUnmapPageInHyperSpace (CurrentProcess, PointerPte, OldIrql);
  448. #endif
  449. //
  450. // Set the PTE address in the PFN for the page directory page.
  451. //
  452. Pfn1 = MI_PFN_ELEMENT (PageDirectoryIndex);
  453. Pfn1->PteAddress = (PMMPTE)PDE_BASE;
  454. TempPte = ValidPdePde;
  455. TempPte.u.Hard.PageFrameNumber = HyperSpaceIndex;
  456. MI_SET_GLOBAL_STATE (TempPte, 0);
  457. //
  458. // Map the page directory page in hyperspace.
  459. // Note for PAE, this is the high 1GB virtual only.
  460. //
  461. PointerPte = (PMMPTE)MiMapPageInHyperSpace (CurrentProcess, PageDirectoryIndex, &OldIrql);
  462. PointerPte[MiGetPdeOffset(HYPER_SPACE)] = TempPte;
  463. #if defined (_X86PAE_)
  464. //
  465. // Map in the second hyperspace page directory.
  466. // The page directory page is already recursively mapped.
  467. //
  468. TempPte.u.Hard.PageFrameNumber = HyperSpaceIndex2;
  469. PointerPte[MiGetPdeOffset(HYPER_SPACE2)] = TempPte;
  470. #else
  471. //
  472. // Recursively map the page directory page so it points to itself.
  473. //
  474. TempPte.u.Hard.PageFrameNumber = PageDirectoryIndex;
  475. PointerPte[MiGetPdeOffset(PTE_BASE)] = TempPte;
  476. #endif
  477. //
  478. // Map in the non paged portion of the system.
  479. //
  480. //
  481. // For the PAE case, only the last page directory is currently mapped, so
  482. // only copy the system PDEs for the last 1GB - any that need copying in
  483. // the 2gb->3gb range will be done a little later.
  484. //
  485. if (MmVirtualBias != 0) {
  486. PointerFillPte = &PointerPte[MiGetPdeOffset(CODE_START + MmVirtualBias)];
  487. CurrentAddressSpacePde = MiGetPdeAddress(CODE_START + MmVirtualBias);
  488. RtlCopyMemory (PointerFillPte,
  489. CurrentAddressSpacePde,
  490. (((1 + CODE_END) - CODE_START) / MM_VA_MAPPED_BY_PDE) * sizeof(MMPTE));
  491. }
  492. PointerFillPte = &PointerPte[MiGetPdeOffset(MmNonPagedSystemStart)];
  493. CurrentAddressSpacePde = MiGetPdeAddress(MmNonPagedSystemStart);
  494. RtlCopyMemory (PointerFillPte,
  495. CurrentAddressSpacePde,
  496. ((1 + (MiGetPdeAddress(NON_PAGED_SYSTEM_END) -
  497. CurrentAddressSpacePde))) * sizeof(MMPTE));
  498. //
  499. // Map in the system cache page table pages.
  500. //
  501. PointerFillPte = &PointerPte[MiGetPdeOffset(MM_SYSTEM_CACHE_WORKING_SET)];
  502. CurrentAddressSpacePde = MiGetPdeAddress(MM_SYSTEM_CACHE_WORKING_SET);
  503. RtlCopyMemory (PointerFillPte,
  504. CurrentAddressSpacePde,
  505. ((1 + (MiGetPdeAddress(MmSystemCacheEnd) -
  506. CurrentAddressSpacePde))) * sizeof(MMPTE));
  507. #if !defined (_X86PAE_)
  508. //
  509. // Map all the virtual space in the 2GB->3GB range when it's not user space.
  510. // This includes kernel/HAL code & data, the PFN database, initial nonpaged
  511. // pool, any extra system PTE or system cache areas, system views and
  512. // session space.
  513. //
  514. if (MmVirtualBias == 0) {
  515. PointerFillPte = &PointerPte[MiGetPdeOffset(CODE_START)];
  516. CurrentAddressSpacePde = MiGetPdeAddress(CODE_START);
  517. RtlCopyMemory (PointerFillPte,
  518. CurrentAddressSpacePde,
  519. ((MM_SESSION_SPACE_DEFAULT_END - CODE_START) / MM_VA_MAPPED_BY_PDE) * sizeof(MMPTE));
  520. }
  521. else {
  522. //
  523. // Booted /3GB, so copy the bootstrap entry for session space as it's
  524. // not included in 2GB->3GB copy above.
  525. //
  526. PointerFillPte = &PointerPte[MiGetPdeOffset(MmSessionSpace)];
  527. CurrentAddressSpacePde = MiGetPdeAddress(MmSessionSpace);
  528. if (CurrentAddressSpacePde->u.Hard.Valid == 1) {
  529. MI_WRITE_VALID_PTE (PointerFillPte, *CurrentAddressSpacePde);
  530. }
  531. else {
  532. MI_WRITE_INVALID_PTE (PointerFillPte, *CurrentAddressSpacePde);
  533. }
  534. }
  535. if (MiMaximumSystemExtraSystemPdes) {
  536. PointerFillPte = &PointerPte[MiGetPdeOffset(MiUseMaximumSystemSpace)];
  537. CurrentAddressSpacePde = MiGetPdeAddress(MiUseMaximumSystemSpace);
  538. RtlCopyMemory (PointerFillPte,
  539. CurrentAddressSpacePde,
  540. MiMaximumSystemExtraSystemPdes * sizeof(MMPTE));
  541. }
  542. #endif
  543. MiUnmapPageInHyperSpace (CurrentProcess, PointerPte, OldIrql);
  544. #if defined (_X86PAE_)
  545. //
  546. // Map all the virtual space in the 2GB->3GB range when it's not user space.
  547. // This includes kernel/HAL code & data, the PFN database, initial nonpaged
  548. // pool, any extra system PTE or system cache areas, system views and
  549. // session space.
  550. //
  551. if (MmVirtualBias == 0) {
  552. PageDirectoryIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PaeVa->PteEntry[PD_PER_SYSTEM - 2]);
  553. PointerPte = (PMMPTE)MiMapPageInHyperSpace (CurrentProcess, PageDirectoryIndex, &OldIrql);
  554. PointerFillPte = &PointerPte[MiGetPdeOffset(CODE_START)];
  555. CurrentAddressSpacePde = MiGetPdeAddress(CODE_START);
  556. RtlCopyMemory (PointerFillPte,
  557. CurrentAddressSpacePde,
  558. ((MM_SESSION_SPACE_DEFAULT_END - CODE_START) / MM_VA_MAPPED_BY_PDE) * sizeof(MMPTE));
  559. MiUnmapPageInHyperSpace (CurrentProcess, PointerPte, OldIrql);
  560. }
  561. //
  562. // If portions of the range between 1GB and 2GB are being used for
  563. // additional system PTEs, then copy those too.
  564. //
  565. if (MiMaximumSystemExtraSystemPdes != 0) {
  566. MaximumStart = MiUseMaximumSystemSpace;
  567. while (MaximumStart < MiUseMaximumSystemSpaceEnd) {
  568. i = MiGetPdPteOffset (MiUseMaximumSystemSpace);
  569. PageDirectoryIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PaeVa->PteEntry[i]);
  570. PointerPte = (PMMPTE)MiMapPageInHyperSpace (CurrentProcess,
  571. PageDirectoryIndex,
  572. &OldIrql);
  573. PointerFillPte = &PointerPte[MiGetPdeOffset(MaximumStart)];
  574. CurrentAddressSpacePde = MiGetPdeAddress(MaximumStart);
  575. NumberOfPdes = PDE_PER_PAGE - MiGetPdeOffset(MaximumStart);
  576. RtlCopyMemory (PointerFillPte,
  577. CurrentAddressSpacePde,
  578. NumberOfPdes * sizeof(MMPTE));
  579. MiUnmapPageInHyperSpace (CurrentProcess, PointerPte, OldIrql);
  580. MaximumStart = (ULONG) MiGetVirtualAddressMappedByPde (CurrentAddressSpacePde + NumberOfPdes);
  581. }
  582. }
  583. #endif
  584. #endif // end of (_MI_PAGING_LEVELS < 3) specific else
  585. //
  586. // Up the session space reference count.
  587. //
  588. MiSessionAddProcess (NewProcess);
  589. //
  590. // Release working set mutex and lower IRQL.
  591. //
  592. UNLOCK_WS (CurrentProcess);
  593. return TRUE;
  594. }
  595. NTSTATUS
  596. MmInitializeProcessAddressSpace (
  597. IN PEPROCESS ProcessToInitialize,
  598. IN PEPROCESS ProcessToClone OPTIONAL,
  599. IN PVOID SectionToMap OPTIONAL,
  600. OUT POBJECT_NAME_INFORMATION *AuditName OPTIONAL
  601. )
  602. /*++
  603. Routine Description:
  604. This routine initializes the working set and mutexes within a
  605. newly created address space to support paging.
  606. No page faults may occur in a new process until this routine has
  607. completed.
  608. Arguments:
  609. ProcessToInitialize - Supplies a pointer to the process to initialize.
  610. ProcessToClone - Optionally supplies a pointer to the process whose
  611. address space should be copied into the
  612. ProcessToInitialize address space.
  613. SectionToMap - Optionally supplies a section to map into the newly
  614. initialized address space.
  615. Only one of ProcessToClone and SectionToMap may be specified.
  616. AuditName - Supplies an opaque object name information pointer.
  617. Return Value:
  618. NTSTATUS.
  619. Environment:
  620. Kernel mode. APCs disabled.
  621. --*/
  622. {
  623. KIRQL OldIrql;
  624. MMPTE TempPte;
  625. PMMPTE PointerPte;
  626. PVOID BaseAddress;
  627. SIZE_T ViewSize;
  628. NTSTATUS Status;
  629. PFILE_OBJECT FilePointer;
  630. PFN_NUMBER PageContainingWorkingSet;
  631. LARGE_INTEGER SectionOffset;
  632. PSECTION_IMAGE_INFORMATION ImageInfo;
  633. PMMVAD VadShare;
  634. PMMVAD VadReserve;
  635. PLOCK_HEADER LockedPagesHeader;
  636. PFN_NUMBER PdePhysicalPage;
  637. PFN_NUMBER VadBitMapPage;
  638. ULONG i;
  639. ULONG NumberOfPages;
  640. #if defined (_X86PAE_)
  641. PFN_NUMBER PdePhysicalPage2;
  642. #endif
  643. #if (_MI_PAGING_LEVELS >= 3)
  644. PFN_NUMBER PpePhysicalPage;
  645. #if DBG
  646. ULONG j;
  647. PUCHAR p;
  648. #endif
  649. #endif
  650. #if (_MI_PAGING_LEVELS >= 4)
  651. PFN_NUMBER PxePhysicalPage;
  652. #endif
  653. #if defined(_WIN64)
  654. PWOW64_PROCESS Wow64Process;
  655. #endif
  656. //
  657. // Initialize Working Set Mutex in process header.
  658. //
  659. KeAttachProcess (&ProcessToInitialize->Pcb);
  660. ASSERT (ProcessToInitialize->AddressSpaceInitialized <= 1);
  661. PS_CLEAR_BITS (&ProcessToInitialize->Flags, PS_PROCESS_FLAGS_ADDRESS_SPACE1);
  662. ASSERT (ProcessToInitialize->AddressSpaceInitialized == 0);
  663. PS_SET_BITS (&ProcessToInitialize->Flags, PS_PROCESS_FLAGS_ADDRESS_SPACE2);
  664. ASSERT (ProcessToInitialize->AddressSpaceInitialized == 2);
  665. ExInitializeFastMutex (&ProcessToInitialize->AddressCreationLock);
  666. ExInitializeFastMutex (&ProcessToInitialize->WorkingSetLock);
  667. //
  668. // NOTE: The process block has been zeroed when allocated, so
  669. // there is no need to zero fields and set pointers to NULL.
  670. //
  671. ASSERT (ProcessToInitialize->VadRoot == NULL);
  672. KeQuerySystemTime(&ProcessToInitialize->Vm.LastTrimTime);
  673. ProcessToInitialize->Vm.VmWorkingSetList = MmWorkingSetList;
  674. //
  675. // Obtain a page to map the working set and initialize the
  676. // working set. Get the PFN lock to allocate physical pages.
  677. //
  678. LOCK_PFN (OldIrql);
  679. //
  680. // Initialize the PFN database for the Page Directory and the
  681. // PDE which maps hyper space.
  682. //
  683. #if (_MI_PAGING_LEVELS >= 3)
  684. #if (_MI_PAGING_LEVELS >= 4)
  685. PointerPte = MiGetPteAddress (PXE_BASE);
  686. PxePhysicalPage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  687. MiInitializePfn (PxePhysicalPage, PointerPte, 1);
  688. PointerPte = MiGetPxeAddress (HYPER_SPACE);
  689. #else
  690. PointerPte = MiGetPteAddress ((PVOID)PDE_TBASE);
  691. #endif
  692. PpePhysicalPage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  693. MiInitializePfn (PpePhysicalPage, PointerPte, 1);
  694. PointerPte = MiGetPpeAddress (HYPER_SPACE);
  695. #elif defined (_X86PAE_)
  696. PointerPte = MiGetPdeAddress (PDE_BASE);
  697. #else
  698. PointerPte = MiGetPteAddress (PDE_BASE);
  699. #endif
  700. PdePhysicalPage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  701. MiInitializePfn (PdePhysicalPage, PointerPte, 1);
  702. PointerPte = MiGetPdeAddress (HYPER_SPACE);
  703. MiInitializePfn (MI_GET_PAGE_FRAME_FROM_PTE (PointerPte), PointerPte, 1);
  704. #if defined (_X86PAE_)
  705. for (i = 0; i < PD_PER_SYSTEM - 1; i += 1) {
  706. PointerPte = MiGetPteAddress (PDE_BASE + (i << PAGE_SHIFT));
  707. PdePhysicalPage2 = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  708. MiInitializePfn (PdePhysicalPage2, PointerPte, 1);
  709. }
  710. PointerPte = MiGetPdeAddress (HYPER_SPACE2);
  711. MiInitializePfn (MI_GET_PAGE_FRAME_FROM_PTE (PointerPte), PointerPte, 1);
  712. #endif
  713. //
  714. // The VAD bitmap spans one page when booted 2GB and the working set
  715. // page follows it. If booted 3GB, the VAD bitmap spans 1.5 pages and
  716. // the working set list uses the last half of the second page.
  717. //
  718. NumberOfPages = 2;
  719. PointerPte = MiGetPteAddress (VAD_BITMAP_SPACE);
  720. for (i = 0; i < NumberOfPages; i += 1) {
  721. ASSERT (PointerPte->u.Long != 0);
  722. VadBitMapPage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  723. PointerPte->u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  724. MiInitializePfn (VadBitMapPage, PointerPte, 1);
  725. MI_MAKE_VALID_PTE (TempPte,
  726. VadBitMapPage,
  727. MM_READWRITE,
  728. PointerPte);
  729. MI_SET_PTE_DIRTY (TempPte);
  730. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  731. PointerPte += 1;
  732. }
  733. UNLOCK_PFN (OldIrql);
  734. PageContainingWorkingSet = ProcessToInitialize->WorkingSetPage;
  735. ASSERT (ProcessToInitialize->LockedPagesList == NULL);
  736. if (MmTrackLockedPages == TRUE) {
  737. LockedPagesHeader = ExAllocatePoolWithTag (NonPagedPool,
  738. sizeof(LOCK_HEADER),
  739. 'xTmM');
  740. if (LockedPagesHeader != NULL) {
  741. LockedPagesHeader->Count = 0;
  742. InitializeListHead (&LockedPagesHeader->ListHead);
  743. ProcessToInitialize->LockedPagesList = (PVOID)LockedPagesHeader;
  744. }
  745. }
  746. MiInitializeWorkingSetList (ProcessToInitialize);
  747. InitializeListHead (&ProcessToInitialize->PhysicalVadList);
  748. #if (_MI_PAGING_LEVELS >= 3)
  749. //
  750. // Allocate the commitment tracking bitmaps for page directory and page
  751. // table pages. This must be done before any VAD creations occur.
  752. //
  753. ASSERT (MmWorkingSetList->CommittedPageTables == NULL);
  754. ASSERT (MmWorkingSetList->NumberOfCommittedPageDirectories == 0);
  755. ASSERT ((ULONG_PTR)MM_SYSTEM_RANGE_START % (PTE_PER_PAGE * PAGE_SIZE) == 0);
  756. MmWorkingSetList->CommittedPageTables = (PULONG)
  757. ExAllocatePoolWithTag (MmPagedPoolEnd != NULL ? PagedPool : NonPagedPool,
  758. (MM_USER_PAGE_TABLE_PAGES + 7) / 8,
  759. 'dPmM');
  760. if (MmWorkingSetList->CommittedPageTables == NULL) {
  761. KeDetachProcess ();
  762. return STATUS_NO_MEMORY;
  763. }
  764. #if (_MI_PAGING_LEVELS >= 4)
  765. #if DBG
  766. p = (PUCHAR) MmWorkingSetList->CommittedPageDirectoryParents;
  767. for (j = 0; j < ((MM_USER_PAGE_DIRECTORY_PARENT_PAGES + 7) / 8); j += 1) {
  768. ASSERT (*p == 0);
  769. p += 1;
  770. }
  771. #endif
  772. ASSERT (MmWorkingSetList->CommittedPageDirectories == NULL);
  773. ASSERT (MmWorkingSetList->NumberOfCommittedPageDirectoryParents == 0);
  774. MmWorkingSetList->CommittedPageDirectories = (PULONG)
  775. ExAllocatePoolWithTag (MmPagedPoolEnd != NULL ? PagedPool : NonPagedPool,
  776. (MM_USER_PAGE_DIRECTORY_PAGES + 7) / 8,
  777. 'dPmM');
  778. if (MmWorkingSetList->CommittedPageDirectories == NULL) {
  779. ExFreePool (MmWorkingSetList->CommittedPageTables);
  780. MmWorkingSetList->CommittedPageTables = NULL;
  781. KeDetachProcess ();
  782. return STATUS_NO_MEMORY;
  783. }
  784. RtlZeroMemory (MmWorkingSetList->CommittedPageDirectories,
  785. (MM_USER_PAGE_DIRECTORY_PAGES + 7) / 8);
  786. #endif
  787. RtlZeroMemory (MmWorkingSetList->CommittedPageTables,
  788. (MM_USER_PAGE_TABLE_PAGES + 7) / 8);
  789. #if DBG
  790. p = (PUCHAR) MmWorkingSetList->CommittedPageDirectories;
  791. for (j = 0; j < ((MM_USER_PAGE_DIRECTORY_PAGES + 7) / 8); j += 1) {
  792. ASSERT (*p == 0);
  793. p += 1;
  794. }
  795. #endif
  796. #endif
  797. //
  798. // Page faults may be taken now.
  799. //
  800. // If the system has been biased to an alternate base address to allow
  801. // 3gb of user address space and a process is not being cloned, then
  802. // create a VAD for the shared memory page.
  803. //
  804. #if defined(_X86_) && defined(MM_SHARED_USER_DATA_VA)
  805. if ((MmVirtualBias != 0) && (ProcessToClone == NULL)) {
  806. VadShare = NULL;
  807. //
  808. // Allocate a VAD to map the shared memory page. If a VAD cannot be
  809. // allocated, then detach from the target process and return a failure
  810. // status. This VAD is marked as not deletable.
  811. //
  812. if (MmHighestUserAddress > (PVOID) MM_SHARED_USER_DATA_VA) {
  813. VadShare = MiAllocateVad (MM_SHARED_USER_DATA_VA,
  814. MM_SHARED_USER_DATA_VA,
  815. FALSE);
  816. if (VadShare == NULL) {
  817. KeDetachProcess ();
  818. return STATUS_NO_MEMORY;
  819. }
  820. }
  821. //
  822. // If a section is being mapped and the executable is not large
  823. // address space aware, then create a VAD that reserves the address
  824. // space between 2gb and the highest user address.
  825. //
  826. if (SectionToMap != NULL) {
  827. if (!((PSECTION)SectionToMap)->u.Flags.Image) {
  828. KeDetachProcess ();
  829. if (VadShare != NULL) {
  830. ExFreePool (VadShare);
  831. }
  832. return STATUS_SECTION_NOT_IMAGE;
  833. }
  834. ImageInfo = ((PSECTION)SectionToMap)->Segment->u2.ImageInformation;
  835. if ((ImageInfo->ImageCharacteristics & IMAGE_FILE_LARGE_ADDRESS_AWARE) == 0) {
  836. //
  837. // Allocate a VAD to map the address space between 2gb and
  838. // the highest user address. If a VAD cannot be allocated,
  839. // then deallocate the shared address space VAD, detach from
  840. // the target process, and return a failure status.
  841. // This VAD is marked as not deletable.
  842. //
  843. VadReserve = MiAllocateVad (_2gb,
  844. (ULONG_PTR)MM_HIGHEST_VAD_ADDRESS,
  845. FALSE);
  846. if (VadReserve == NULL) {
  847. KeDetachProcess ();
  848. if (VadShare != NULL) {
  849. ExFreePool (VadShare);
  850. }
  851. return STATUS_NO_MEMORY;
  852. }
  853. //
  854. // Insert the VAD.
  855. //
  856. // N.B. No failure can occur since there is no commit charge.
  857. //
  858. Status = MiInsertVad (VadReserve);
  859. ASSERT (NT_SUCCESS(Status));
  860. }
  861. }
  862. //
  863. // Insert the VAD.
  864. //
  865. // N.B. No failure can occur since there is no commit charge.
  866. //
  867. if (VadShare != NULL) {
  868. Status = MiInsertVad (VadShare);
  869. ASSERT (NT_SUCCESS(Status));
  870. }
  871. }
  872. #endif
  873. #if defined(_WIN64)
  874. if (ProcessToClone == NULL) {
  875. //
  876. // Reserve the address space just below KUSER_SHARED_DATA as the
  877. // compatibility area. This range (and pieces of it) can be
  878. // unreserved by user mode code such as WOW64 or csrss. Hence
  879. // commit must be charged for the page directory and table pages.
  880. //
  881. ASSERT(MiCheckForConflictingVad(ProcessToInitialize, WOW64_COMPATIBILITY_AREA_ADDRESS, MM_SHARED_USER_DATA_VA) == NULL);
  882. VadShare = MiAllocateVad (WOW64_COMPATIBILITY_AREA_ADDRESS,
  883. MM_SHARED_USER_DATA_VA,
  884. TRUE);
  885. if (VadShare == NULL) {
  886. KeDetachProcess ();
  887. return STATUS_NO_MEMORY;
  888. }
  889. //
  890. // Zero the commit charge so inserting the VAD will result in the
  891. // proper charges being applied. This way when it is split later,
  892. // the correct commitment will be returned.
  893. //
  894. // N.B. The system process is not allocated with commit because
  895. // paged pool and quotas don't exist at the point in Phase0
  896. // where this is called.
  897. //
  898. if (MmPagedPoolEnd != NULL) {
  899. VadShare->u.VadFlags.CommitCharge = 0;
  900. }
  901. //
  902. // Reserve the memory above 2GB to prevent 32 bit (WOW64) process
  903. // access.
  904. //
  905. if (SectionToMap != NULL) {
  906. if (!((PSECTION)SectionToMap)->u.Flags.Image) {
  907. KeDetachProcess ();
  908. ExFreePool (VadShare);
  909. return STATUS_SECTION_NOT_IMAGE;
  910. }
  911. ImageInfo = ((PSECTION)SectionToMap)->Segment->u2.ImageInformation;
  912. if (((ProcessToInitialize->Flags & PS_PROCESS_FLAGS_OVERRIDE_ADDRESS_SPACE) == 0) &&
  913. ((ImageInfo->ImageCharacteristics & IMAGE_FILE_LARGE_ADDRESS_AWARE) == 0 ||
  914. ImageInfo->Machine == IMAGE_FILE_MACHINE_I386)) {
  915. //
  916. // Allocate a VAD to reserve the address space between 2gb and
  917. // the highest user address. If a VAD cannot be allocated,
  918. // then deallocate the compatibility VAD, detach from the target
  919. // process and return a failure status.
  920. //
  921. VadReserve = MiAllocateVad (_2gb,
  922. (ULONG_PTR)MM_HIGHEST_USER_ADDRESS,
  923. TRUE);
  924. if (VadReserve == NULL) {
  925. KeDetachProcess ();
  926. ExFreePool (VadShare);
  927. return STATUS_NO_MEMORY;
  928. }
  929. //
  930. // Insert the VAD.
  931. //
  932. // N.B. No failure can occur since there is no commit charge.
  933. //
  934. Status = MiInsertVad (VadReserve);
  935. ASSERT (NT_SUCCESS(Status));
  936. if (ImageInfo->Machine == IMAGE_FILE_MACHINE_I386) {
  937. //
  938. // Initialize the Wow64 process structure.
  939. //
  940. Wow64Process = (PWOW64_PROCESS) ExAllocatePoolWithTag (
  941. NonPagedPool,
  942. sizeof(WOW64_PROCESS),
  943. 'WowM');
  944. if (Wow64Process == NULL) {
  945. KeDetachProcess ();
  946. ExFreePool (VadShare);
  947. return STATUS_NO_MEMORY;
  948. }
  949. RtlZeroMemory (Wow64Process, sizeof(WOW64_PROCESS));
  950. ProcessToInitialize->Wow64Process = Wow64Process;
  951. #if defined(_MIALT4K_)
  952. //
  953. // Initialize the alternate page table for the 4k
  954. // page functionality.
  955. //
  956. Status = MiInitializeAlternateTable (ProcessToInitialize);
  957. if (Status != STATUS_SUCCESS) {
  958. KeDetachProcess ();
  959. ExFreePool (VadShare);
  960. return Status;
  961. }
  962. #endif
  963. }
  964. }
  965. }
  966. //
  967. // Insert the VAD. Since this VAD has a commit charge, the working set
  968. // mutex must be held (as calls inside MiInsertVad to support routines
  969. // to charge commit require it), failures can occur and must be handled.
  970. //
  971. LOCK_WS (ProcessToInitialize);
  972. Status = MiInsertVad (VadShare);
  973. UNLOCK_WS (ProcessToInitialize);
  974. if (!NT_SUCCESS(Status)) {
  975. //
  976. // Note that the VadReserve and Wow64 allocations are automatically
  977. // released on process destruction so there is no need to tear
  978. // them down here.
  979. //
  980. ExFreePool (VadShare);
  981. KeDetachProcess ();
  982. return Status;
  983. }
  984. }
  985. #endif
  986. if (SectionToMap != NULL) {
  987. //
  988. // Map the specified section into the address space of the
  989. // process but only if it is an image section.
  990. //
  991. if (!((PSECTION)SectionToMap)->u.Flags.Image) {
  992. Status = STATUS_SECTION_NOT_IMAGE;
  993. }
  994. else {
  995. UNICODE_STRING UnicodeString;
  996. ULONG n;
  997. PWSTR Src;
  998. PCHAR Dst;
  999. PSECTION_IMAGE_INFORMATION ImageInformation;
  1000. FilePointer = ((PSECTION)SectionToMap)->Segment->ControlArea->FilePointer;
  1001. ImageInformation = ((PSECTION)SectionToMap)->Segment->u2.ImageInformation;
  1002. UnicodeString = FilePointer->FileName;
  1003. Src = (PWSTR)((PCHAR)UnicodeString.Buffer + UnicodeString.Length);
  1004. n = 0;
  1005. if (UnicodeString.Buffer != NULL) {
  1006. while (Src > UnicodeString.Buffer) {
  1007. if (*--Src == OBJ_NAME_PATH_SEPARATOR) {
  1008. Src += 1;
  1009. break;
  1010. }
  1011. else {
  1012. n += 1;
  1013. }
  1014. }
  1015. }
  1016. Dst = (PCHAR)ProcessToInitialize->ImageFileName;
  1017. if (n >= sizeof (ProcessToInitialize->ImageFileName)) {
  1018. n = sizeof (ProcessToInitialize->ImageFileName) - 1;
  1019. }
  1020. while (n--) {
  1021. *Dst++ = (UCHAR)*Src++;
  1022. }
  1023. *Dst = '\0';
  1024. if (AuditName != NULL) {
  1025. Status = SeInitializeProcessAuditName (FilePointer, FALSE, AuditName);
  1026. if (!NT_SUCCESS(Status)) {
  1027. KeDetachProcess ();
  1028. return Status;
  1029. }
  1030. }
  1031. ProcessToInitialize->SubSystemMajorVersion =
  1032. (UCHAR)ImageInformation->SubSystemMajorVersion;
  1033. ProcessToInitialize->SubSystemMinorVersion =
  1034. (UCHAR)ImageInformation->SubSystemMinorVersion;
  1035. BaseAddress = NULL;
  1036. ViewSize = 0;
  1037. ZERO_LARGE (SectionOffset);
  1038. Status = MmMapViewOfSection ((PSECTION)SectionToMap,
  1039. ProcessToInitialize,
  1040. &BaseAddress,
  1041. 0,
  1042. 0,
  1043. &SectionOffset,
  1044. &ViewSize,
  1045. ViewShare,
  1046. 0,
  1047. PAGE_READWRITE);
  1048. ProcessToInitialize->SectionBaseAddress = BaseAddress;
  1049. }
  1050. KeDetachProcess ();
  1051. return Status;
  1052. }
  1053. if (ProcessToClone != NULL) {
  1054. strcpy ((PCHAR)ProcessToInitialize->ImageFileName,
  1055. (PCHAR)ProcessToClone->ImageFileName);
  1056. //
  1057. // Clone the address space of the specified process.
  1058. //
  1059. // As the page directory and page tables are private to each
  1060. // process, the physical pages which map the directory page
  1061. // and the page table usage must be mapped into system space
  1062. // so they can be updated while in the context of the process
  1063. // we are cloning.
  1064. //
  1065. #if defined(_WIN64)
  1066. if (ProcessToClone->Wow64Process != NULL) {
  1067. //
  1068. // Initialize the Wow64 process structure.
  1069. //
  1070. Wow64Process = (PWOW64_PROCESS) ExAllocatePoolWithTag (
  1071. NonPagedPool,
  1072. sizeof(WOW64_PROCESS),
  1073. 'WowM');
  1074. if (Wow64Process == NULL) {
  1075. KeDetachProcess ();
  1076. return STATUS_NO_MEMORY;
  1077. }
  1078. RtlZeroMemory (Wow64Process, sizeof(WOW64_PROCESS));
  1079. ProcessToInitialize->Wow64Process = Wow64Process;
  1080. #if defined(_MIALT4K_)
  1081. //
  1082. // Initialize the alternate page table for the 4k
  1083. // page functionality.
  1084. //
  1085. Status = MiInitializeAlternateTable (ProcessToInitialize);
  1086. if (Status != STATUS_SUCCESS) {
  1087. KeDetachProcess ();
  1088. return Status;
  1089. }
  1090. #endif
  1091. }
  1092. #endif
  1093. KeDetachProcess ();
  1094. return MiCloneProcessAddressSpace (ProcessToClone,
  1095. ProcessToInitialize,
  1096. #if (_MI_PAGING_LEVELS >= 4)
  1097. PxePhysicalPage,
  1098. #elif (_MI_PAGING_LEVELS >= 3)
  1099. PpePhysicalPage,
  1100. #else
  1101. PdePhysicalPage,
  1102. #endif
  1103. PageContainingWorkingSet);
  1104. }
  1105. //
  1106. // System Process.
  1107. //
  1108. KeDetachProcess ();
  1109. return STATUS_SUCCESS;
  1110. }
  1111. VOID
  1112. MmInitializeHandBuiltProcess (
  1113. IN PEPROCESS ProcessToInitialize,
  1114. OUT PULONG_PTR DirectoryTableBase
  1115. )
  1116. /*++
  1117. Routine Description:
  1118. This routine initializes the working set mutex and
  1119. address creation mutex for this "hand built" process.
  1120. Normally the call to MmInitializeAddressSpace initializes the
  1121. working set mutex. However, in this case, we have already initialized
  1122. the address space and we are now creating a second process using
  1123. the address space of the idle thread.
  1124. Arguments:
  1125. ProcessToInitialize - Supplies a pointer to the process to initialize.
  1126. DirectoryTableBase - Receives the pair of directory table base pointers.
  1127. Return Value:
  1128. None.
  1129. Environment:
  1130. Kernel mode. APCs disabled, idle process context.
  1131. --*/
  1132. {
  1133. PEPROCESS CurrentProcess;
  1134. #if defined (_X86PAE_)
  1135. ULONG i;
  1136. PMMPTE PdeBase;
  1137. PFN_NUMBER PageFrameIndex;
  1138. #endif
  1139. CurrentProcess = PsGetCurrentProcess();
  1140. DirectoryTableBase[0] = CurrentProcess->Pcb.DirectoryTableBase[0];
  1141. DirectoryTableBase[1] = CurrentProcess->Pcb.DirectoryTableBase[1];
  1142. #if defined(_IA64_)
  1143. ProcessToInitialize->Pcb.SessionMapInfo = CurrentProcess->Pcb.SessionMapInfo;
  1144. ProcessToInitialize->Pcb.SessionParentBase = CurrentProcess->Pcb.SessionParentBase;
  1145. #endif
  1146. ExInitializeFastMutex(&ProcessToInitialize->WorkingSetLock);
  1147. ExInitializeFastMutex(&ProcessToInitialize->AddressCreationLock);
  1148. KeInitializeSpinLock (&ProcessToInitialize->HyperSpaceLock);
  1149. ASSERT (ProcessToInitialize->VadRoot == NULL);
  1150. ProcessToInitialize->Vm.WorkingSetSize = CurrentProcess->Vm.WorkingSetSize;
  1151. ProcessToInitialize->Vm.VmWorkingSetList = MmWorkingSetList;
  1152. KeQuerySystemTime(&ProcessToInitialize->Vm.LastTrimTime);
  1153. #if defined (_X86PAE_)
  1154. if (MiSystemPaeVa.PteEntry[0].u.Long == 0) {
  1155. PageFrameIndex = (((PKPROCESS)CurrentProcess)->DirectoryTableBase[0] >> PAGE_SHIFT);
  1156. PdeBase = (PMMPTE)MiMapSinglePage (NULL,
  1157. PageFrameIndex,
  1158. MmCached,
  1159. HighPagePriority);
  1160. if (PdeBase == NULL) {
  1161. KeBugCheckEx (MEMORY_MANAGEMENT,
  1162. 0x3452,
  1163. (ULONG_PTR)CurrentProcess,
  1164. (ULONG_PTR)ProcessToInitialize,
  1165. 1);
  1166. }
  1167. for (i = 0; i < PD_PER_SYSTEM; i += 1) {
  1168. MiSystemPaeVa.PteEntry[i] = *PdeBase;
  1169. PdeBase += 1;
  1170. }
  1171. PdeBase -= PD_PER_SYSTEM;
  1172. MiUnmapSinglePage (PdeBase);
  1173. }
  1174. #endif
  1175. }
  1176. NTSTATUS
  1177. MmInitializeHandBuiltProcess2 (
  1178. IN PEPROCESS ProcessToInitialize
  1179. )
  1180. /*++
  1181. Routine Description:
  1182. This routine initializes the shared user VAD. This only needs to be done
  1183. for x86 when booted /3GB because on all other systems, the shared user
  1184. address is located above the highest user address. For x86 /3GB, this
  1185. VAD must be allocated so that other random VAD allocations do not overlap
  1186. this area which would cause the mapping to receive the wrong data.
  1187. Arguments:
  1188. ProcessToInitialize - Supplies the process that needs initialization.
  1189. Return Value:
  1190. NTSTATUS.
  1191. Environment:
  1192. Kernel mode. APCs Disabled.
  1193. --*/
  1194. {
  1195. #if defined(_X86_) && defined(MM_SHARED_USER_DATA_VA)
  1196. PMMVAD VadShare;
  1197. NTSTATUS Status;
  1198. Status = STATUS_SUCCESS;
  1199. //
  1200. // Allocate a VAD to map the shared memory page. If a VAD cannot be
  1201. // allocated, then detach from the target process and return a failure
  1202. // status. This VAD is marked as not deletable.
  1203. //
  1204. if ((MmVirtualBias != 0) &&
  1205. (MmHighestUserAddress > (PVOID) MM_SHARED_USER_DATA_VA)) {
  1206. KeAttachProcess (&ProcessToInitialize->Pcb);
  1207. VadShare = MiAllocateVad (MM_SHARED_USER_DATA_VA,
  1208. MM_SHARED_USER_DATA_VA,
  1209. FALSE);
  1210. //
  1211. // Insert the VAD.
  1212. //
  1213. // N.B. No failure can occur since there is no commit charge.
  1214. //
  1215. if (VadShare != NULL) {
  1216. Status = MiInsertVad (VadShare);
  1217. ASSERT (NT_SUCCESS(Status));
  1218. }
  1219. else {
  1220. Status = STATUS_NO_MEMORY;
  1221. }
  1222. KeDetachProcess ();
  1223. }
  1224. return Status;
  1225. #else
  1226. UNREFERENCED_PARAMETER (ProcessToInitialize);
  1227. return STATUS_SUCCESS;
  1228. #endif
  1229. }
  1230. VOID
  1231. MmDeleteProcessAddressSpace (
  1232. IN PEPROCESS Process
  1233. )
  1234. /*++
  1235. Routine Description:
  1236. This routine deletes a process's Page Directory and working set page.
  1237. Arguments:
  1238. Process - Supplies a pointer to the deleted process.
  1239. Return Value:
  1240. None.
  1241. Environment:
  1242. Kernel mode. APCs Disabled.
  1243. --*/
  1244. {
  1245. PEPROCESS CurrentProcess;
  1246. PMMPFN Pfn1;
  1247. KIRQL OldIrql;
  1248. PMMPTE PointerPte;
  1249. PFN_NUMBER PageFrameIndex;
  1250. PFN_NUMBER VadBitMapPage;
  1251. PFN_NUMBER PageFrameIndex2;
  1252. #if (_MI_PAGING_LEVELS >= 4)
  1253. PFN_NUMBER PageFrameIndex3;
  1254. PMMPTE ExtendedPageDirectoryParent;
  1255. PMMPTE PointerPxe;
  1256. #endif
  1257. #if (_MI_PAGING_LEVELS >= 3)
  1258. PMMPTE PageDirectoryParent;
  1259. PMMPTE PointerPpe;
  1260. #endif
  1261. #if defined (_X86PAE_)
  1262. ULONG i;
  1263. PFN_NUMBER HyperPage2;
  1264. PPAE_ENTRY PaeVa;
  1265. PaeVa = (PPAE_ENTRY) Process->PaeTop;
  1266. #endif
  1267. CurrentProcess = PsGetCurrentProcess ();
  1268. //
  1269. // Return commitment.
  1270. //
  1271. MiReturnCommitment (MM_PROCESS_COMMIT_CHARGE);
  1272. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_PROCESS_DELETE, MM_PROCESS_COMMIT_CHARGE);
  1273. ASSERT (Process->CommitCharge == 0);
  1274. //
  1275. // Remove the working set list page from the deleted process.
  1276. //
  1277. Pfn1 = MI_PFN_ELEMENT (Process->WorkingSetPage);
  1278. LOCK_PFN (OldIrql);
  1279. MmProcessCommit -= MM_PROCESS_COMMIT_CHARGE;
  1280. if (Process->AddressSpaceInitialized == 2) {
  1281. MI_SET_PFN_DELETED (Pfn1);
  1282. MiDecrementShareAndValidCount (Pfn1->u4.PteFrame);
  1283. MiDecrementShareCountOnly (Process->WorkingSetPage);
  1284. ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) || (Pfn1->u3.e1.WriteInProgress));
  1285. //
  1286. // Map the hyper space page table page from the deleted process
  1287. // so the vad bit map (and second hyperspace page) can be captured.
  1288. //
  1289. PageFrameIndex = MI_GET_HYPER_PAGE_TABLE_FRAME_FROM_PROCESS (Process);
  1290. PointerPte = (PMMPTE)MiMapPageInHyperSpaceAtDpc (CurrentProcess,
  1291. PageFrameIndex);
  1292. #if defined (_X86PAE_)
  1293. PageFrameIndex2 = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte);
  1294. #endif
  1295. VadBitMapPage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte + MiGetPteOffset(VAD_BITMAP_SPACE));
  1296. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PointerPte);
  1297. //
  1298. // Remove the VAD bitmap page.
  1299. //
  1300. Pfn1 = MI_PFN_ELEMENT (VadBitMapPage);
  1301. MI_SET_PFN_DELETED (Pfn1);
  1302. MiDecrementShareAndValidCount (Pfn1->u4.PteFrame);
  1303. MiDecrementShareCountOnly (VadBitMapPage);
  1304. ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) || (Pfn1->u3.e1.WriteInProgress));
  1305. //
  1306. // Remove the first hyper space page table page.
  1307. //
  1308. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1309. MI_SET_PFN_DELETED (Pfn1);
  1310. MiDecrementShareAndValidCount (Pfn1->u4.PteFrame);
  1311. MiDecrementShareCountOnly (PageFrameIndex);
  1312. ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) || (Pfn1->u3.e1.WriteInProgress));
  1313. #if defined (_X86PAE_)
  1314. //
  1315. // Remove the second hyper space page table page.
  1316. //
  1317. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex2);
  1318. MI_SET_PFN_DELETED (Pfn1);
  1319. MiDecrementShareAndValidCount (Pfn1->u4.PteFrame);
  1320. MiDecrementShareCountOnly (PageFrameIndex2);
  1321. ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) || (Pfn1->u3.e1.WriteInProgress));
  1322. //
  1323. // Remove the page directory pages.
  1324. //
  1325. PointerPte = (PMMPTE) PaeVa;
  1326. for (i = 0; i < PD_PER_SYSTEM - 1; i += 1) {
  1327. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  1328. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1329. MI_SET_PFN_DELETED (Pfn1);
  1330. MiDecrementShareAndValidCount (PageFrameIndex);
  1331. MiDecrementShareAndValidCount (Pfn1->u4.PteFrame);
  1332. ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) || (Pfn1->u3.e1.WriteInProgress));
  1333. PointerPte += 1;
  1334. }
  1335. #endif
  1336. //
  1337. // Remove the top level page directory page.
  1338. //
  1339. PageFrameIndex = MI_GET_DIRECTORY_FRAME_FROM_PROCESS(Process);
  1340. #if (_MI_PAGING_LEVELS >= 3)
  1341. //
  1342. // Get a pointer to the top-level page directory parent page via
  1343. // its KSEG0 address.
  1344. //
  1345. #if (_MI_PAGING_LEVELS >= 4)
  1346. ExtendedPageDirectoryParent = (PMMPTE) MiMapPageInHyperSpaceAtDpc (
  1347. CurrentProcess,
  1348. PageFrameIndex);
  1349. //
  1350. // Remove the hyper space page directory parent page
  1351. // from the deleted process.
  1352. //
  1353. PointerPxe = &ExtendedPageDirectoryParent[MiGetPxeOffset(HYPER_SPACE)];
  1354. PageFrameIndex3 = MI_GET_PAGE_FRAME_FROM_PTE(PointerPxe);
  1355. ASSERT (MI_PFN_ELEMENT(PageFrameIndex3)->u4.PteFrame == PageFrameIndex);
  1356. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, ExtendedPageDirectoryParent);
  1357. PageDirectoryParent = (PMMPTE) MiMapPageInHyperSpaceAtDpc (
  1358. CurrentProcess,
  1359. PageFrameIndex3);
  1360. #else
  1361. PageDirectoryParent = KSEG_ADDRESS (PageFrameIndex);
  1362. #endif
  1363. //
  1364. // Remove the hyper space page directory page from the deleted process.
  1365. //
  1366. PointerPpe = &PageDirectoryParent[MiGetPpeOffset(HYPER_SPACE)];
  1367. PageFrameIndex2 = MI_GET_PAGE_FRAME_FROM_PTE(PointerPpe);
  1368. #if (_MI_PAGING_LEVELS >= 4)
  1369. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PageDirectoryParent);
  1370. #endif
  1371. Pfn1 = MI_PFN_ELEMENT(PageFrameIndex2);
  1372. MI_SET_PFN_DELETED (Pfn1);
  1373. MiDecrementShareAndValidCount (Pfn1->u4.PteFrame);
  1374. MiDecrementShareCountOnly (PageFrameIndex2);
  1375. ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) || (Pfn1->u3.e1.WriteInProgress));
  1376. #if (_MI_PAGING_LEVELS >= 4)
  1377. Pfn1 = MI_PFN_ELEMENT(PageFrameIndex3);
  1378. MI_SET_PFN_DELETED (Pfn1);
  1379. MiDecrementShareAndValidCount (Pfn1->u4.PteFrame);
  1380. MiDecrementShareCountOnly (PageFrameIndex3);
  1381. ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) || (Pfn1->u3.e1.WriteInProgress));
  1382. #endif
  1383. #endif
  1384. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1385. MI_SET_PFN_DELETED (Pfn1);
  1386. MiDecrementShareAndValidCount (PageFrameIndex);
  1387. MiDecrementShareCountOnly (PageFrameIndex);
  1388. ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) || (Pfn1->u3.e1.WriteInProgress));
  1389. }
  1390. else {
  1391. //
  1392. // Process initialization never completed, just return the pages
  1393. // to the free list.
  1394. //
  1395. MiInsertPageInFreeList (Process->WorkingSetPage);
  1396. PageFrameIndex = MI_GET_DIRECTORY_FRAME_FROM_PROCESS (Process);
  1397. #if (_MI_PAGING_LEVELS >= 3)
  1398. //
  1399. // Get a pointer to the top-level page directory parent page via
  1400. // its KSEG0 address.
  1401. //
  1402. PageDirectoryParent = KSEG_ADDRESS (PageFrameIndex);
  1403. #if (_MI_PAGING_LEVELS >= 4)
  1404. PageDirectoryParent = (PMMPTE) MiMapPageInHyperSpaceAtDpc (
  1405. CurrentProcess,
  1406. PageFrameIndex);
  1407. PageFrameIndex3 = MI_GET_PAGE_FRAME_FROM_PTE (&PageDirectoryParent[MiGetPxeOffset(HYPER_SPACE)]);
  1408. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PageDirectoryParent);
  1409. PageDirectoryParent = (PMMPTE) MiMapPageInHyperSpaceAtDpc (
  1410. CurrentProcess,
  1411. PageFrameIndex3);
  1412. #endif
  1413. PointerPpe = &PageDirectoryParent[MiGetPpeOffset(HYPER_SPACE)];
  1414. PageFrameIndex2 = MI_GET_PAGE_FRAME_FROM_PTE(PointerPpe);
  1415. #if (_MI_PAGING_LEVELS >= 4)
  1416. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PageDirectoryParent);
  1417. #endif
  1418. MiInsertPageInFreeList (PageFrameIndex2);
  1419. #if (_MI_PAGING_LEVELS >= 4)
  1420. MiInsertPageInFreeList (PageFrameIndex3);
  1421. #endif
  1422. #endif
  1423. PageFrameIndex2 = MI_GET_HYPER_PAGE_TABLE_FRAME_FROM_PROCESS (Process);
  1424. PointerPte = (PMMPTE)MiMapPageInHyperSpaceAtDpc (CurrentProcess,
  1425. PageFrameIndex2);
  1426. #if defined (_X86PAE_)
  1427. HyperPage2 = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  1428. #endif
  1429. VadBitMapPage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte + MiGetPteOffset(VAD_BITMAP_SPACE));
  1430. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PointerPte);
  1431. //
  1432. // Free the VAD bitmap page.
  1433. //
  1434. MiInsertPageInFreeList (VadBitMapPage);
  1435. //
  1436. // Free the first hyper space page table page.
  1437. //
  1438. MiInsertPageInFreeList (PageFrameIndex2);
  1439. #if defined (_X86PAE_)
  1440. MiInsertPageInFreeList (HyperPage2);
  1441. PointerPte = (PMMPTE) PaeVa;
  1442. for (i = 0; i < PD_PER_SYSTEM - 1; i += 1) {
  1443. PageFrameIndex2 = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  1444. MiInsertPageInFreeList (PageFrameIndex2);
  1445. PointerPte += 1;
  1446. }
  1447. #endif
  1448. //
  1449. // Free the topmost page directory page.
  1450. //
  1451. MiInsertPageInFreeList (PageFrameIndex);
  1452. }
  1453. MmResidentAvailablePages += MM_PROCESS_CREATE_CHARGE;
  1454. MM_BUMP_COUNTER(7, MM_PROCESS_CREATE_CHARGE);
  1455. UNLOCK_PFN (OldIrql);
  1456. #if defined (_X86PAE_)
  1457. //
  1458. // Free the page directory page pointers.
  1459. //
  1460. MiPaeFree (PaeVa);
  1461. #endif
  1462. if (Process->Session != NULL) {
  1463. //
  1464. // The Terminal Server session space data page and mapping PTE can only
  1465. // be freed when the last process in the session is deleted. This is
  1466. // because IA64 maps session space into region 1 and exited processes
  1467. // maintain their session space mapping as attaches may occur even
  1468. // after process exit that reference win32k, etc. Since the region 1
  1469. // mapping is being inserted into region registers during swap context,
  1470. // these mappings cannot be torn down until the very last deletion
  1471. // occurs.
  1472. //
  1473. MiReleaseProcessReferenceToSessionDataPage (Process->Session);
  1474. }
  1475. //
  1476. // Check to see if the paging files should be contracted.
  1477. //
  1478. MiContractPagingFiles ();
  1479. return;
  1480. }
  1481. VOID
  1482. MiDeletePteRange (
  1483. IN PEPROCESS Process,
  1484. IN PMMPTE PointerPte,
  1485. IN PMMPTE LastPte,
  1486. IN LOGICAL AddressSpaceDeletion
  1487. )
  1488. /*++
  1489. Routine Description:
  1490. This routine deletes a range of PTEs and when possible, the PDEs, PPEs and
  1491. PXEs as well. Commit is returned here for the hierarchies here.
  1492. Arguments:
  1493. Process - Supplies the process whose PTEs are being deleted.
  1494. PointerPte - Supplies the PTE to begin deleting at.
  1495. LastPte - Supplies the PTE to stop deleting at (don't delete this one).
  1496. -1 signifies keep going until a nonvalid PTE is found.
  1497. AddressSpaceDeletion - Supplies TRUE if the address space is in the final
  1498. stages of deletion, FALSE otherwise.
  1499. Return Value:
  1500. None.
  1501. Environment:
  1502. Kernel mode, APCs disabled.
  1503. --*/
  1504. {
  1505. PVOID TempVa;
  1506. KIRQL OldIrql;
  1507. MMPTE_FLUSH_LIST PteFlushList;
  1508. PFN_NUMBER CommittedPages;
  1509. #if (_MI_PAGING_LEVELS >= 3)
  1510. PMMPTE PointerPpe;
  1511. PMMPTE PointerPde;
  1512. LOGICAL Boundary;
  1513. LOGICAL FinalPte;
  1514. PMMPFN Pfn1;
  1515. #endif
  1516. #if (_MI_PAGING_LEVELS >= 4)
  1517. PMMPTE PointerPxe;
  1518. #endif
  1519. if (PointerPte >= LastPte) {
  1520. return;
  1521. }
  1522. CommittedPages = 0;
  1523. PteFlushList.Count = 0;
  1524. //
  1525. // Initializing OldIrql is not needed for correctness
  1526. // but without it the compiler cannot compile this code
  1527. // W4 to check for use of uninitialized variables.
  1528. //
  1529. OldIrql = PASSIVE_LEVEL;
  1530. //
  1531. // Final address space deletion is done with the PFN lock already held.
  1532. //
  1533. if (AddressSpaceDeletion == FALSE) {
  1534. LOCK_PFN (OldIrql);
  1535. }
  1536. #if (_MI_PAGING_LEVELS >= 3)
  1537. PointerPpe = MiGetPdeAddress (PointerPte);
  1538. PointerPde = MiGetPteAddress (PointerPte);
  1539. #if (_MI_PAGING_LEVELS >= 4)
  1540. PointerPxe = MiGetPpeAddress (PointerPte);
  1541. if ((PointerPxe->u.Hard.Valid == 1) &&
  1542. (PointerPpe->u.Hard.Valid == 1) &&
  1543. (PointerPde->u.Hard.Valid == 1) &&
  1544. (PointerPte->u.Hard.Valid == 1))
  1545. #else
  1546. if ((PointerPpe->u.Hard.Valid == 1) &&
  1547. (PointerPde->u.Hard.Valid == 1) &&
  1548. (PointerPte->u.Hard.Valid == 1))
  1549. #endif
  1550. {
  1551. do {
  1552. ASSERT (PointerPte->u.Hard.Valid == 1);
  1553. TempVa = MiGetVirtualAddressMappedByPte(PointerPte);
  1554. MiDeletePte (PointerPte,
  1555. TempVa,
  1556. AddressSpaceDeletion,
  1557. Process,
  1558. NULL,
  1559. &PteFlushList);
  1560. CommittedPages += 1;
  1561. PointerPte += 1;
  1562. Process->NumberOfPrivatePages += 1;
  1563. //
  1564. // If all the entries have been removed from the previous page
  1565. // table page, delete the page table page itself. Likewise with
  1566. // the page directory page.
  1567. //
  1568. if (MiIsPteOnPdeBoundary(PointerPte)) {
  1569. Boundary = TRUE;
  1570. }
  1571. else {
  1572. Boundary = FALSE;
  1573. }
  1574. if ((PointerPte >= LastPte) ||
  1575. #if (_MI_PAGING_LEVELS >= 4)
  1576. ((MiGetPpeAddress(PointerPte))->u.Hard.Valid == 0) ||
  1577. #endif
  1578. ((MiGetPdeAddress(PointerPte))->u.Hard.Valid == 0) ||
  1579. ((MiGetPteAddress(PointerPte))->u.Hard.Valid == 0) ||
  1580. (PointerPte->u.Hard.Valid == 0)) {
  1581. FinalPte = TRUE;
  1582. }
  1583. else {
  1584. FinalPte = FALSE;
  1585. }
  1586. if ((Boundary == TRUE) || (FinalPte == TRUE)) {
  1587. MiFlushPteList (&PteFlushList, FALSE, ZeroPte);
  1588. PointerPde = MiGetPteAddress (PointerPte - 1);
  1589. ASSERT (PointerPde->u.Hard.Valid == 1);
  1590. Pfn1 = MI_PFN_ELEMENT (MI_GET_PAGE_FRAME_FROM_PTE (PointerPde));
  1591. if (Pfn1->u2.ShareCount == 1 && Pfn1->u3.e2.ReferenceCount == 1)
  1592. {
  1593. MiDeletePte (PointerPde,
  1594. PointerPte - 1,
  1595. AddressSpaceDeletion,
  1596. Process,
  1597. NULL,
  1598. NULL);
  1599. CommittedPages += 1;
  1600. Process->NumberOfPrivatePages += 1;
  1601. if ((FinalPte == TRUE) || (MiIsPteOnPpeBoundary(PointerPte))) {
  1602. PointerPpe = MiGetPteAddress (PointerPde);
  1603. ASSERT (PointerPpe->u.Hard.Valid == 1);
  1604. Pfn1 = MI_PFN_ELEMENT (MI_GET_PAGE_FRAME_FROM_PTE (PointerPpe));
  1605. if (Pfn1->u2.ShareCount == 1 && Pfn1->u3.e2.ReferenceCount == 1)
  1606. {
  1607. MiDeletePte (PointerPpe,
  1608. PointerPde,
  1609. AddressSpaceDeletion,
  1610. Process,
  1611. NULL,
  1612. NULL);
  1613. CommittedPages += 1;
  1614. Process->NumberOfPrivatePages += 1;
  1615. #if (_MI_PAGING_LEVELS >= 4)
  1616. if ((FinalPte == TRUE) || (MiIsPteOnPxeBoundary(PointerPte))) {
  1617. PointerPxe = MiGetPdeAddress (PointerPde);
  1618. ASSERT (PointerPxe->u.Hard.Valid == 1);
  1619. Pfn1 = MI_PFN_ELEMENT (MI_GET_PAGE_FRAME_FROM_PTE (PointerPxe));
  1620. if (Pfn1->u2.ShareCount == 1 && Pfn1->u3.e2.ReferenceCount == 1)
  1621. {
  1622. MiDeletePte (PointerPxe,
  1623. PointerPpe,
  1624. AddressSpaceDeletion,
  1625. Process,
  1626. NULL,
  1627. NULL);
  1628. CommittedPages += 1;
  1629. Process->NumberOfPrivatePages += 1;
  1630. }
  1631. }
  1632. #endif
  1633. }
  1634. }
  1635. }
  1636. if (FinalPte == TRUE) {
  1637. break;
  1638. }
  1639. }
  1640. ASSERT (PointerPte->u.Hard.Valid == 1);
  1641. } while (TRUE);
  1642. }
  1643. #else
  1644. while (PointerPte->u.Hard.Valid) {
  1645. TempVa = MiGetVirtualAddressMappedByPte(PointerPte);
  1646. MiDeletePte (PointerPte,
  1647. TempVa,
  1648. AddressSpaceDeletion,
  1649. Process,
  1650. NULL,
  1651. &PteFlushList);
  1652. CommittedPages += 1;
  1653. Process->NumberOfPrivatePages += 1;
  1654. PointerPte += 1;
  1655. if (PointerPte >= LastPte) {
  1656. break;
  1657. }
  1658. }
  1659. #endif
  1660. if (PteFlushList.Count != 0) {
  1661. MiFlushPteList (&PteFlushList, FALSE, ZeroPte);
  1662. }
  1663. if (AddressSpaceDeletion == FALSE) {
  1664. UNLOCK_PFN (OldIrql);
  1665. }
  1666. if (CommittedPages != 0) {
  1667. MiReturnCommitment (CommittedPages);
  1668. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_PTE_RANGE, CommittedPages);
  1669. }
  1670. return;
  1671. }
  1672. VOID
  1673. MmCleanProcessAddressSpace (
  1674. IN PEPROCESS Process
  1675. )
  1676. /*++
  1677. Routine Description:
  1678. This routine cleans an address space by deleting all the user and
  1679. pagable portions of the address space. At the completion of this
  1680. routine, no page faults may occur within the process.
  1681. Arguments:
  1682. None.
  1683. Return Value:
  1684. None.
  1685. Environment:
  1686. Kernel mode, APCs disabled.
  1687. --*/
  1688. {
  1689. PKTHREAD CurrentThread;
  1690. PMMVAD Vad;
  1691. KEVENT Event;
  1692. KIRQL OldIrql;
  1693. PMMPTE LastPte;
  1694. PMMPTE PointerPte;
  1695. LONG AboveWsMin;
  1696. ULONG NumberOfCommittedPageTables;
  1697. #if defined (_WIN64)
  1698. PWOW64_PROCESS TempWow64;
  1699. #endif
  1700. if ((Process->Flags & PS_PROCESS_FLAGS_VM_DELETED) ||
  1701. (Process->AddressSpaceInitialized == 0)) {
  1702. //
  1703. // This process's address space has already been deleted. However,
  1704. // this process can still have a session space. Get rid of it now.
  1705. //
  1706. MiSessionRemoveProcess ();
  1707. return;
  1708. }
  1709. if (Process->AddressSpaceInitialized == 1) {
  1710. //
  1711. // The process has been created but not fully initialized.
  1712. // Return partial resources now.
  1713. //
  1714. LOCK_PFN (OldIrql);
  1715. MmResidentAvailablePages += (Process->Vm.MinimumWorkingSetSize -
  1716. MM_PROCESS_CREATE_CHARGE);
  1717. MM_BUMP_COUNTER(41, Process->Vm.MinimumWorkingSetSize -
  1718. MM_PROCESS_CREATE_CHARGE);
  1719. UNLOCK_PFN (OldIrql);
  1720. //
  1721. // Clear the AddressSpaceInitialized flag so we don't over-return
  1722. // resident available as this routine can be called more than once
  1723. // for the same process.
  1724. //
  1725. PS_CLEAR_BITS (&Process->Flags, PS_PROCESS_FLAGS_ADDRESS_SPACE1);
  1726. ASSERT (Process->AddressSpaceInitialized == 0);
  1727. //
  1728. // This process's address space has already been deleted. However,
  1729. // this process can still have a session space. Get rid of it now.
  1730. //
  1731. MiSessionRemoveProcess ();
  1732. return;
  1733. }
  1734. //
  1735. // If working set expansion for this process is allowed, disable
  1736. // it and remove the process from expanded process list if it
  1737. // is on it.
  1738. //
  1739. LOCK_EXPANSION (OldIrql);
  1740. if (Process->Vm.Flags.BeingTrimmed) {
  1741. //
  1742. // Initialize an event and put the event address
  1743. // in the blink field. When the trimming is complete,
  1744. // this event will be set.
  1745. //
  1746. KeInitializeEvent(&Event, NotificationEvent, FALSE);
  1747. Process->Vm.WorkingSetExpansionLinks.Blink = (PLIST_ENTRY)&Event;
  1748. //
  1749. // Release the mutex and wait for the event.
  1750. //
  1751. CurrentThread = KeGetCurrentThread ();
  1752. KeEnterCriticalRegionThread (CurrentThread);
  1753. UNLOCK_EXPANSION_AND_THEN_WAIT (OldIrql);
  1754. KeWaitForSingleObject(&Event,
  1755. WrVirtualMemory,
  1756. KernelMode,
  1757. FALSE,
  1758. (PLARGE_INTEGER)NULL);
  1759. KeLeaveCriticalRegionThread (CurrentThread);
  1760. }
  1761. else if (Process->Vm.WorkingSetExpansionLinks.Flink == MM_NO_WS_EXPANSION) {
  1762. //
  1763. // No trimming is in progress and no expansion allowed, so this cannot
  1764. // be on any lists.
  1765. //
  1766. ASSERT (Process->Vm.WorkingSetExpansionLinks.Blink != MM_WS_EXPANSION_IN_PROGRESS);
  1767. UNLOCK_EXPANSION (OldIrql);
  1768. }
  1769. else {
  1770. RemoveEntryList (&Process->Vm.WorkingSetExpansionLinks);
  1771. //
  1772. // Disable expansion.
  1773. //
  1774. Process->Vm.WorkingSetExpansionLinks.Flink = MM_NO_WS_EXPANSION;
  1775. UNLOCK_EXPANSION (OldIrql);
  1776. }
  1777. MiSessionRemoveProcess ();
  1778. PointerPte = MiGetPteAddress (&MmWsle[MM_MAXIMUM_WORKING_SET]) + 1;
  1779. //
  1780. // Delete all the user owned pagable virtual addresses in the process.
  1781. //
  1782. //
  1783. // Both mutexes must be owned to synchronize with the bit setting and
  1784. // clearing of VM_DELETED. This is because various callers acquire
  1785. // only one of them (either one) before checking.
  1786. //
  1787. LOCK_WS_AND_ADDRESS_SPACE (Process);
  1788. PS_SET_BITS (&Process->Flags, PS_PROCESS_FLAGS_VM_DELETED);
  1789. //
  1790. // Delete all the valid user mode addresses from the working set
  1791. // list. At this point NO page faults are allowed on user space
  1792. // addresses. Faults are allowed on page tables for user space, which
  1793. // requires that we keep the working set structure consistent until we
  1794. // finally take it all down.
  1795. //
  1796. MiDeleteAddressesInWorkingSet (Process);
  1797. //
  1798. // Remove hash table pages, if any. This is the first time we do this
  1799. // during the deletion path, but we need to do it again before we finish
  1800. // because we may fault in some page tables during the VAD clearing. We
  1801. // could have maintained the hash table validity during the WorkingSet
  1802. // deletion above in order to avoid freeing the hash table twice, but since
  1803. // we're just deleting it all anyway, it's faster to do it this way. Note
  1804. // that if we don't do this or maintain the validity, we can trap later
  1805. // in MiGrowWsleHash.
  1806. //
  1807. LastPte = MiGetPteAddress (MmWorkingSetList->HighestPermittedHashAddress);
  1808. MiDeletePteRange (Process, PointerPte, LastPte, FALSE);
  1809. //
  1810. // Clear the hash fields as a fault may occur below on the page table
  1811. // pages during VAD clearing and resolution of the fault may result in
  1812. // adding a hash table. Thus these fields must be consistent with the
  1813. // clearing just done above.
  1814. //
  1815. MmWorkingSetList->HashTableSize = 0;
  1816. MmWorkingSetList->HashTable = NULL;
  1817. //
  1818. // Delete the virtual address descriptors and dereference any
  1819. // section objects.
  1820. //
  1821. Vad = Process->VadRoot;
  1822. while (Vad != (PMMVAD)NULL) {
  1823. MiRemoveVad (Vad);
  1824. //
  1825. // If the system has been biased to an alternate base address to
  1826. // allow 3gb of user address space, then check if the current VAD
  1827. // describes the shared memory page.
  1828. //
  1829. #if defined(_X86_) && defined(MM_SHARED_USER_DATA_VA)
  1830. if (MmVirtualBias != 0) {
  1831. //
  1832. // If the VAD describes the shared memory page, then free the
  1833. // VAD and continue with the next entry.
  1834. //
  1835. if (Vad->StartingVpn == MI_VA_TO_VPN (MM_SHARED_USER_DATA_VA)) {
  1836. ASSERT (MmHighestUserAddress > (PVOID) MM_SHARED_USER_DATA_VA);
  1837. goto LoopEnd;
  1838. }
  1839. }
  1840. #endif
  1841. if (((Vad->u.VadFlags.PrivateMemory == 0) &&
  1842. (Vad->ControlArea != NULL)) ||
  1843. (Vad->u.VadFlags.PhysicalMapping == 1)) {
  1844. //
  1845. // This VAD represents a mapped view or a driver-mapped physical
  1846. // view - delete the view and perform any section related cleanup
  1847. // operations.
  1848. //
  1849. MiRemoveMappedView (Process, Vad);
  1850. }
  1851. else {
  1852. if (Vad->u.VadFlags.UserPhysicalPages == 1) {
  1853. //
  1854. // Free all the physical pages that this VAD might be mapping.
  1855. // Since only the AWE lock synchronizes the remap API, carefully
  1856. // remove this VAD from the list first.
  1857. //
  1858. MiAweViewRemover (Process, Vad);
  1859. MiRemoveUserPhysicalPagesVad ((PMMVAD_SHORT)Vad);
  1860. MiDeletePageTablesForPhysicalRange (
  1861. MI_VPN_TO_VA (Vad->StartingVpn),
  1862. MI_VPN_TO_VA_ENDING (Vad->EndingVpn));
  1863. }
  1864. else {
  1865. if (Vad->u.VadFlags.WriteWatch == 1) {
  1866. MiPhysicalViewRemover (Process, Vad);
  1867. }
  1868. LOCK_PFN (OldIrql);
  1869. //
  1870. // Don't specify address space deletion as TRUE as
  1871. // the working set must be consistent as page faults may
  1872. // be taken during clone removal, protoPTE lookup, etc.
  1873. //
  1874. MiDeleteVirtualAddresses (MI_VPN_TO_VA (Vad->StartingVpn),
  1875. MI_VPN_TO_VA_ENDING (Vad->EndingVpn),
  1876. FALSE,
  1877. Vad);
  1878. UNLOCK_PFN (OldIrql);
  1879. }
  1880. }
  1881. #if defined(_X86_) && defined(MM_SHARED_USER_DATA_VA)
  1882. LoopEnd:
  1883. #endif
  1884. ExFreePool (Vad);
  1885. Vad = Process->VadRoot;
  1886. }
  1887. ASSERT (Process->NumberOfVads == 0);
  1888. ASSERT (IsListEmpty (&Process->PhysicalVadList) != 0);
  1889. MiCleanPhysicalProcessPages (Process);
  1890. //
  1891. // Delete the shared data page, if any.
  1892. //
  1893. LOCK_PFN (OldIrql);
  1894. #if defined(MM_SHARED_USER_DATA_VA)
  1895. #if defined (_X86_)
  1896. if (MmHighestUserAddress > (PVOID) MM_SHARED_USER_DATA_VA) {
  1897. #endif
  1898. MiDeleteVirtualAddresses ((PVOID) MM_SHARED_USER_DATA_VA,
  1899. (PVOID) MM_SHARED_USER_DATA_VA,
  1900. FALSE,
  1901. NULL);
  1902. #if defined (_X86_)
  1903. }
  1904. #endif
  1905. #endif
  1906. //
  1907. // Delete the system portion of the address space.
  1908. // Only now is it safe to specify TRUE to MiDelete because now that the
  1909. // VADs have been deleted we can no longer fault on user space pages.
  1910. //
  1911. Process->Vm.Flags.AddressSpaceBeingDeleted = 1;
  1912. //
  1913. // Adjust the count of pages above working set maximum. This
  1914. // must be done here because the working set list is not
  1915. // updated during this deletion.
  1916. //
  1917. AboveWsMin = (LONG)Process->Vm.WorkingSetSize - (LONG)Process->Vm.MinimumWorkingSetSize;
  1918. if (AboveWsMin > 0) {
  1919. MmPagesAboveWsMinimum -= AboveWsMin;
  1920. }
  1921. UNLOCK_PFN (OldIrql);
  1922. //
  1923. // Return commitment for page table pages.
  1924. //
  1925. NumberOfCommittedPageTables = MmWorkingSetList->NumberOfCommittedPageTables;
  1926. #if (_MI_PAGING_LEVELS >= 3)
  1927. NumberOfCommittedPageTables += MmWorkingSetList->NumberOfCommittedPageDirectories;
  1928. #endif
  1929. #if (_MI_PAGING_LEVELS >= 4)
  1930. NumberOfCommittedPageTables += MmWorkingSetList->NumberOfCommittedPageDirectoryParents;
  1931. #endif
  1932. MiReturnCommitment (NumberOfCommittedPageTables);
  1933. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_PROCESS_CLEAN_PAGETABLES,
  1934. NumberOfCommittedPageTables);
  1935. if (Process->JobStatus & PS_JOB_STATUS_REPORT_COMMIT_CHANGES) {
  1936. PsChangeJobMemoryUsage(-(SSIZE_T)NumberOfCommittedPageTables);
  1937. }
  1938. Process->CommitCharge -= NumberOfCommittedPageTables;
  1939. PsReturnProcessPageFileQuota (Process, NumberOfCommittedPageTables);
  1940. MI_INCREMENT_TOTAL_PROCESS_COMMIT (0 - NumberOfCommittedPageTables);
  1941. #if (_MI_PAGING_LEVELS >= 3)
  1942. if (MmWorkingSetList->CommittedPageTables != NULL) {
  1943. ExFreePool (MmWorkingSetList->CommittedPageTables);
  1944. MmWorkingSetList->CommittedPageTables = NULL;
  1945. }
  1946. #endif
  1947. #if (_MI_PAGING_LEVELS >= 4)
  1948. if (MmWorkingSetList->CommittedPageDirectories != NULL) {
  1949. ExFreePool (MmWorkingSetList->CommittedPageDirectories);
  1950. MmWorkingSetList->CommittedPageDirectories = NULL;
  1951. }
  1952. #endif
  1953. //
  1954. // Check to make sure all the clone descriptors went away.
  1955. //
  1956. ASSERT (Process->CloneRoot == (PMMCLONE_DESCRIPTOR)NULL);
  1957. if (Process->NumberOfLockedPages != 0) {
  1958. if (Process->LockedPagesList) {
  1959. PLIST_ENTRY NextEntry;
  1960. PLOCK_TRACKER Tracker;
  1961. PLOCK_HEADER LockedPagesHeader;
  1962. LockedPagesHeader = (PLOCK_HEADER)Process->LockedPagesList;
  1963. if ((LockedPagesHeader->Count != 0) && (MiTrackingAborted == FALSE)) {
  1964. ASSERT (IsListEmpty (&LockedPagesHeader->ListHead) == 0);
  1965. NextEntry = LockedPagesHeader->ListHead.Flink;
  1966. Tracker = CONTAINING_RECORD (NextEntry,
  1967. LOCK_TRACKER,
  1968. ListEntry);
  1969. KeBugCheckEx (DRIVER_LEFT_LOCKED_PAGES_IN_PROCESS,
  1970. (ULONG_PTR)Tracker->CallingAddress,
  1971. (ULONG_PTR)Tracker->CallersCaller,
  1972. (ULONG_PTR)Tracker->Mdl,
  1973. Process->NumberOfLockedPages);
  1974. }
  1975. }
  1976. KeBugCheckEx (PROCESS_HAS_LOCKED_PAGES,
  1977. 0,
  1978. (ULONG_PTR)Process,
  1979. Process->NumberOfLockedPages,
  1980. (ULONG_PTR)Process->LockedPagesList);
  1981. return;
  1982. }
  1983. if (Process->LockedPagesList) {
  1984. ASSERT (MmTrackLockedPages == TRUE);
  1985. ExFreePool (Process->LockedPagesList);
  1986. Process->LockedPagesList = NULL;
  1987. }
  1988. #if DBG
  1989. if ((Process->NumberOfPrivatePages != 0) && (MmDebug & MM_DBG_PRIVATE_PAGES)) {
  1990. DbgPrint("MM: Process contains private pages %ld\n",
  1991. Process->NumberOfPrivatePages);
  1992. DbgBreakPoint();
  1993. }
  1994. #endif
  1995. #if defined(_WIN64)
  1996. //
  1997. // Delete the WowProcess structure.
  1998. //
  1999. if (Process->Wow64Process != NULL) {
  2000. #if defined(_MIALT4K_)
  2001. MiDeleteAlternateTable(Process);
  2002. #endif
  2003. TempWow64 = Process->Wow64Process;
  2004. Process->Wow64Process = NULL;
  2005. ExFreePool (TempWow64);
  2006. }
  2007. #endif
  2008. //
  2009. // Remove the working set list pages (except for the first one).
  2010. // These pages are not removed because DPCs could still occur within
  2011. // the address space. In a DPC, nonpagedpool could be allocated
  2012. // which could require removing a page from the standby list, requiring
  2013. // hyperspace to map the previous PTE.
  2014. //
  2015. PointerPte = MiGetPteAddress (MmWorkingSetList) + 1;
  2016. LOCK_PFN (OldIrql);
  2017. MiDeletePteRange (Process, PointerPte, (PMMPTE)-1, TRUE);
  2018. //
  2019. // Remove hash table pages, if any. Yes, we've already done this once
  2020. // during the deletion path, but we need to do it again because we may
  2021. // have faulted in some page tables during the VAD clearing.
  2022. //
  2023. PointerPte = MiGetPteAddress (&MmWsle[MM_MAXIMUM_WORKING_SET]) + 1;
  2024. ASSERT (PointerPte < LastPte);
  2025. MiDeletePteRange (Process, PointerPte, LastPte, TRUE);
  2026. //
  2027. // Update the count of available resident pages.
  2028. //
  2029. ASSERT (Process->Vm.MinimumWorkingSetSize >= MM_PROCESS_CREATE_CHARGE);
  2030. MmResidentAvailablePages += Process->Vm.MinimumWorkingSetSize -
  2031. MM_PROCESS_CREATE_CHARGE;
  2032. MM_BUMP_COUNTER(8, Process->Vm.MinimumWorkingSetSize -
  2033. MM_PROCESS_CREATE_CHARGE);
  2034. ASSERT (Process->Vm.WorkingSetExpansionLinks.Flink == MM_NO_WS_EXPANSION);
  2035. UNLOCK_PFN (OldIrql);
  2036. UNLOCK_WS_AND_ADDRESS_SPACE (Process);
  2037. return;
  2038. }
  2039. #if !defined(_IA64_)
  2040. #define KERNEL_BSTORE_SIZE 0
  2041. #define KERNEL_LARGE_BSTORE_SIZE 0
  2042. #define KERNEL_LARGE_BSTORE_COMMIT 0
  2043. #define KERNEL_STACK_GUARD_PAGES 1
  2044. #else
  2045. #define KERNEL_STACK_GUARD_PAGES 2 // One for stack, one for RSE.
  2046. #endif
  2047. PVOID
  2048. MmCreateKernelStack (
  2049. IN BOOLEAN LargeStack,
  2050. IN UCHAR PreferredNode
  2051. )
  2052. /*++
  2053. Routine Description:
  2054. This routine allocates a kernel stack and a no-access page within
  2055. the non-pagable portion of the system address space.
  2056. Arguments:
  2057. LargeStack - Supplies the value TRUE if a large stack should be
  2058. created. FALSE if a small stack is to be created.
  2059. PreferredNode - Supplies the preferred node to use for the physical
  2060. page allocations. MP/NUMA systems only.
  2061. Return Value:
  2062. Returns a pointer to the base of the kernel stack. Note, that the
  2063. base address points to the guard page, so space must be allocated
  2064. on the stack before accessing the stack.
  2065. If a kernel stack cannot be created, the value NULL is returned.
  2066. Environment:
  2067. Kernel mode. APCs Disabled.
  2068. --*/
  2069. {
  2070. PMMPFN Pfn1;
  2071. PMMPTE PointerPte;
  2072. PMMPTE BasePte;
  2073. MMPTE TempPte;
  2074. PFN_NUMBER NumberOfPages;
  2075. ULONG NumberOfPtes;
  2076. ULONG ChargedPtes;
  2077. ULONG RequestedPtes;
  2078. ULONG NumberOfBackingStorePtes;
  2079. PFN_NUMBER PageFrameIndex;
  2080. ULONG i;
  2081. PVOID StackVa;
  2082. KIRQL OldIrql;
  2083. PSLIST_HEADER DeadStackList;
  2084. if (!LargeStack) {
  2085. //
  2086. // Check to see if any unused stacks are available.
  2087. //
  2088. #if defined(MI_MULTINODE)
  2089. DeadStackList = &KeNodeBlock[PreferredNode]->DeadStackList;
  2090. #else
  2091. UNREFERENCED_PARAMETER (PreferredNode);
  2092. DeadStackList = &MmDeadStackSListHead;
  2093. #endif
  2094. if (ExQueryDepthSList (DeadStackList) != 0) {
  2095. Pfn1 = (PMMPFN) InterlockedPopEntrySList (DeadStackList);
  2096. if (Pfn1 != NULL) {
  2097. PointerPte = Pfn1->PteAddress;
  2098. PointerPte += 1;
  2099. StackVa = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte);
  2100. return StackVa;
  2101. }
  2102. }
  2103. NumberOfPtes = BYTES_TO_PAGES (KERNEL_STACK_SIZE);
  2104. NumberOfBackingStorePtes = BYTES_TO_PAGES (KERNEL_BSTORE_SIZE);
  2105. NumberOfPages = NumberOfPtes + NumberOfBackingStorePtes;
  2106. }
  2107. else {
  2108. NumberOfPtes = BYTES_TO_PAGES (MI_LARGE_STACK_SIZE);
  2109. NumberOfBackingStorePtes = BYTES_TO_PAGES (KERNEL_LARGE_BSTORE_SIZE);
  2110. NumberOfPages = BYTES_TO_PAGES (KERNEL_LARGE_STACK_COMMIT
  2111. + KERNEL_LARGE_BSTORE_COMMIT);
  2112. }
  2113. ChargedPtes = NumberOfPtes + NumberOfBackingStorePtes;
  2114. //
  2115. // Charge commitment for the page file space for the kernel stack.
  2116. //
  2117. if (MiChargeCommitment (ChargedPtes, NULL) == FALSE) {
  2118. //
  2119. // Commitment exceeded, return NULL, indicating no kernel
  2120. // stacks are available.
  2121. //
  2122. return NULL;
  2123. }
  2124. //
  2125. // Obtain enough pages to contain the stack plus a guard page from
  2126. // the system PTE pool. The system PTE pool contains nonpaged PTEs
  2127. // which are currently empty.
  2128. //
  2129. // Note for IA64, the PTE allocation is divided between kernel stack
  2130. // and RSE space. The stack grows downward and the RSE grows upward.
  2131. //
  2132. RequestedPtes = ChargedPtes + KERNEL_STACK_GUARD_PAGES;
  2133. BasePte = MiReserveSystemPtes (RequestedPtes, SystemPteSpace);
  2134. if (BasePte == NULL) {
  2135. MiReturnCommitment (ChargedPtes);
  2136. return NULL;
  2137. }
  2138. PointerPte = BasePte;
  2139. StackVa = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte + NumberOfPtes + 1);
  2140. if (LargeStack) {
  2141. PointerPte += BYTES_TO_PAGES (MI_LARGE_STACK_SIZE - KERNEL_LARGE_STACK_COMMIT);
  2142. }
  2143. LOCK_PFN (OldIrql);
  2144. //
  2145. // Check to make sure the physical pages are available.
  2146. //
  2147. if (MI_NONPAGABLE_MEMORY_AVAILABLE() <= (SPFN_NUMBER)NumberOfPages) {
  2148. UNLOCK_PFN (OldIrql);
  2149. MiReleaseSystemPtes (BasePte, RequestedPtes, SystemPteSpace);
  2150. MiReturnCommitment (ChargedPtes);
  2151. return NULL;
  2152. }
  2153. MM_TRACK_COMMIT (MM_DBG_COMMIT_KERNEL_STACK_CREATE, ChargedPtes);
  2154. MmResidentAvailablePages -= NumberOfPages;
  2155. MM_BUMP_COUNTER(9, NumberOfPages);
  2156. for (i = 0; i < NumberOfPages; i += 1) {
  2157. PointerPte += 1;
  2158. ASSERT (PointerPte->u.Hard.Valid == 0);
  2159. MiEnsureAvailablePageOrWait (NULL, NULL);
  2160. PageFrameIndex = MiRemoveAnyPage (
  2161. MI_GET_PAGE_COLOR_NODE (PreferredNode));
  2162. PointerPte->u.Long = MM_KERNEL_DEMAND_ZERO_PTE;
  2163. PointerPte->u.Soft.Protection = MM_KSTACK_OUTSWAPPED;
  2164. MiInitializePfn (PageFrameIndex, PointerPte, 1);
  2165. MI_MAKE_VALID_PTE (TempPte,
  2166. PageFrameIndex,
  2167. MM_READWRITE,
  2168. PointerPte);
  2169. MI_SET_PTE_DIRTY (TempPte);
  2170. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  2171. }
  2172. MmProcessCommit += ChargedPtes;
  2173. MmKernelStackResident += NumberOfPages;
  2174. MmLargeStacks += LargeStack;
  2175. MmSmallStacks += !LargeStack;
  2176. MmKernelStackPages += RequestedPtes;
  2177. UNLOCK_PFN (OldIrql);
  2178. return StackVa;
  2179. }
  2180. VOID
  2181. MmDeleteKernelStack (
  2182. IN PVOID PointerKernelStack,
  2183. IN BOOLEAN LargeStack
  2184. )
  2185. /*++
  2186. Routine Description:
  2187. This routine deletes a kernel stack and the no-access page within
  2188. the non-pagable portion of the system address space.
  2189. Arguments:
  2190. PointerKernelStack - Supplies a pointer to the base of the kernel stack.
  2191. LargeStack - Supplies the value TRUE if a large stack is being deleted.
  2192. FALSE if a small stack is to be deleted.
  2193. Return Value:
  2194. None.
  2195. Environment:
  2196. Kernel mode. APCs Disabled.
  2197. --*/
  2198. {
  2199. PMMPTE PointerPte;
  2200. PMMPFN Pfn1;
  2201. PMMPFN Pfn2;
  2202. PFN_NUMBER NumberOfPages;
  2203. ULONG NumberOfPtes;
  2204. ULONG NumberOfStackPtes;
  2205. PFN_NUMBER PageFrameIndex;
  2206. PFN_NUMBER PageTableFrameIndex;
  2207. ULONG i;
  2208. KIRQL OldIrql;
  2209. MMPTE PteContents;
  2210. PSLIST_HEADER DeadStackList;
  2211. PointerPte = MiGetPteAddress (PointerKernelStack);
  2212. //
  2213. // PointerPte points to the guard page, point to the previous
  2214. // page before removing physical pages.
  2215. //
  2216. PointerPte -= 1;
  2217. //
  2218. // Check to see if the stack page should be placed on the dead
  2219. // kernel stack page list. The dead kernel stack list is a
  2220. // singly linked list of kernel stacks from terminated threads.
  2221. // The stacks are saved on a linked list up to a maximum number
  2222. // to avoid the overhead of flushing the entire TB on all processors
  2223. // everytime a thread terminates. The TB on all processors must
  2224. // be flushed as kernel stacks reside in the non paged system part
  2225. // of the address space.
  2226. //
  2227. if (!LargeStack) {
  2228. #if defined(MI_MULTINODE)
  2229. //
  2230. // Scan the physical page frames and only place this stack on the
  2231. // dead stack list if all the pages are on the same node. Realize
  2232. // if this push goes cross node it may make the interlocked instruction
  2233. // slightly more expensive, but worth it all things considered.
  2234. //
  2235. ULONG NodeNumber;
  2236. PteContents = *PointerPte;
  2237. ASSERT (PteContents.u.Hard.Valid == 1);
  2238. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents);
  2239. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  2240. NodeNumber = Pfn1->u3.e1.PageColor;
  2241. DeadStackList = &KeNodeBlock[NodeNumber]->DeadStackList;
  2242. #else
  2243. DeadStackList = &MmDeadStackSListHead;
  2244. #endif
  2245. NumberOfPtes = BYTES_TO_PAGES (KERNEL_STACK_SIZE + KERNEL_BSTORE_SIZE);
  2246. if (ExQueryDepthSList (DeadStackList) < MmMaximumDeadKernelStacks) {
  2247. #if defined(MI_MULTINODE)
  2248. //
  2249. // The node could use some more dead stacks - but first make sure
  2250. // all the physical pages are from the same node in a multinode
  2251. // system.
  2252. //
  2253. if (KeNumberNodes > 1) {
  2254. ULONG CheckPtes;
  2255. //
  2256. // Note IA64 RSE space is not included for checking purposes
  2257. // since it's never trimmed for small stacks.
  2258. //
  2259. CheckPtes = BYTES_TO_PAGES (KERNEL_STACK_SIZE);
  2260. PointerPte -= 1;
  2261. for (i = 1; i < CheckPtes; i += 1) {
  2262. PteContents = *PointerPte;
  2263. if (PteContents.u.Hard.Valid == 0) {
  2264. break;
  2265. }
  2266. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents);
  2267. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  2268. if (NodeNumber != Pfn1->u3.e1.PageColor) {
  2269. PointerPte += i;
  2270. goto FreeStack;
  2271. }
  2272. PointerPte -= 1;
  2273. }
  2274. PointerPte += CheckPtes;
  2275. }
  2276. #endif
  2277. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  2278. InterlockedPushEntrySList (DeadStackList, &Pfn1->u1.NextStackPfn);
  2279. PERFINFO_DELETE_STACK(PointerPte, NumberOfPtes);
  2280. return;
  2281. }
  2282. }
  2283. else {
  2284. NumberOfPtes = BYTES_TO_PAGES (MI_LARGE_STACK_SIZE + KERNEL_LARGE_BSTORE_SIZE);
  2285. }
  2286. #if defined(MI_MULTINODE)
  2287. FreeStack:
  2288. #endif
  2289. #if defined(_IA64_)
  2290. //
  2291. // Note on IA64, PointerKernelStack points to the center of the stack space,
  2292. // the size of kernel backing store needs to be added to get the
  2293. // top of the stack space.
  2294. //
  2295. if (LargeStack) {
  2296. PointerPte = MiGetPteAddress (
  2297. (PCHAR)PointerKernelStack + KERNEL_LARGE_BSTORE_SIZE);
  2298. }
  2299. else {
  2300. PointerPte = MiGetPteAddress (
  2301. (PCHAR)PointerKernelStack + KERNEL_BSTORE_SIZE);
  2302. }
  2303. //
  2304. // PointerPte points to the guard page, point to the previous
  2305. // page before removing physical pages.
  2306. //
  2307. PointerPte -= 1;
  2308. #endif
  2309. //
  2310. // We have exceeded the limit of dead kernel stacks or this is a large
  2311. // stack, delete this kernel stack.
  2312. //
  2313. NumberOfPages = 0;
  2314. NumberOfStackPtes = NumberOfPtes + KERNEL_STACK_GUARD_PAGES;
  2315. LOCK_PFN (OldIrql);
  2316. for (i = 0; i < NumberOfPtes; i += 1) {
  2317. PteContents = *PointerPte;
  2318. if (PteContents.u.Hard.Valid == 1) {
  2319. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents);
  2320. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  2321. PageTableFrameIndex = Pfn1->u4.PteFrame;
  2322. Pfn2 = MI_PFN_ELEMENT (PageTableFrameIndex);
  2323. MiDecrementShareCountInline (Pfn2, PageTableFrameIndex);
  2324. //
  2325. // Mark the page as deleted so it will be freed when the
  2326. // reference count goes to zero.
  2327. //
  2328. MI_SET_PFN_DELETED (Pfn1);
  2329. MiDecrementShareCountOnly (PageFrameIndex);
  2330. NumberOfPages += 1;
  2331. }
  2332. PointerPte -= 1;
  2333. }
  2334. //
  2335. // Now at the stack guard page, ensure it is still a guard page.
  2336. //
  2337. ASSERT (PointerPte->u.Hard.Valid == 0);
  2338. //
  2339. // Update the count of resident available pages.
  2340. //
  2341. MmResidentAvailablePages += NumberOfPages;
  2342. MM_BUMP_COUNTER(10, NumberOfPages);
  2343. MmKernelStackPages -= NumberOfStackPtes;
  2344. MmKernelStackResident -= NumberOfPages;
  2345. MmProcessCommit -= NumberOfPtes;
  2346. MmLargeStacks -= LargeStack;
  2347. MmSmallStacks -= !LargeStack;
  2348. UNLOCK_PFN (OldIrql);
  2349. //
  2350. // Return PTEs and commitment.
  2351. //
  2352. MiReleaseSystemPtes (PointerPte, NumberOfStackPtes, SystemPteSpace);
  2353. MiReturnCommitment (NumberOfPtes);
  2354. MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_KERNEL_STACK_DELETE, NumberOfPtes);
  2355. return;
  2356. }
  2357. #if defined(_IA64_)
  2358. ULONG MiStackGrowthFailures[2];
  2359. #else
  2360. ULONG MiStackGrowthFailures[1];
  2361. #endif
  2362. NTSTATUS
  2363. MmGrowKernelStack (
  2364. IN PVOID CurrentStack
  2365. )
  2366. /*++
  2367. Routine Description:
  2368. This function attempts to grows the current thread's kernel stack
  2369. such that there is always KERNEL_LARGE_STACK_COMMIT bytes below
  2370. the current stack pointer.
  2371. Arguments:
  2372. CurrentStack - Supplies a pointer to the current stack pointer.
  2373. Return Value:
  2374. STATUS_SUCCESS is returned if the stack was grown.
  2375. STATUS_STACK_OVERFLOW is returned if there was not enough space reserved
  2376. for the commitment.
  2377. STATUS_NO_MEMORY is returned if there was not enough physical memory
  2378. in the system.
  2379. --*/
  2380. {
  2381. PMMPTE NewLimit;
  2382. PMMPTE StackLimit;
  2383. PMMPTE EndStack;
  2384. PETHREAD Thread;
  2385. PFN_NUMBER NumberOfPages;
  2386. KIRQL OldIrql;
  2387. PFN_NUMBER PageFrameIndex;
  2388. MMPTE TempPte;
  2389. Thread = PsGetCurrentThread ();
  2390. ASSERT (((PCHAR)Thread->Tcb.StackBase - (PCHAR)Thread->Tcb.StackLimit) <=
  2391. ((LONG)MI_LARGE_STACK_SIZE + PAGE_SIZE));
  2392. StackLimit = MiGetPteAddress (Thread->Tcb.StackLimit);
  2393. ASSERT (StackLimit->u.Hard.Valid == 1);
  2394. NewLimit = MiGetPteAddress ((PVOID)((PUCHAR)CurrentStack -
  2395. KERNEL_LARGE_STACK_COMMIT));
  2396. if (NewLimit == StackLimit) {
  2397. return STATUS_SUCCESS;
  2398. }
  2399. //
  2400. // If the new stack limit exceeds the reserved region for the kernel
  2401. // stack, then return an error.
  2402. //
  2403. EndStack = MiGetPteAddress ((PVOID)((PUCHAR)Thread->Tcb.StackBase -
  2404. MI_LARGE_STACK_SIZE));
  2405. if (NewLimit < EndStack) {
  2406. //
  2407. // Don't go into guard page.
  2408. //
  2409. MiStackGrowthFailures[0] += 1;
  2410. #if DBG
  2411. DbgPrint ("MmGrowKernelStack failed: Thread %p %p %p\n",
  2412. Thread, NewLimit, EndStack);
  2413. #endif
  2414. return STATUS_STACK_OVERFLOW;
  2415. }
  2416. //
  2417. // Lock the PFN database and attempt to expand the kernel stack.
  2418. //
  2419. StackLimit -= 1;
  2420. NumberOfPages = (PFN_NUMBER) (StackLimit - NewLimit + 1);
  2421. LOCK_PFN (OldIrql);
  2422. if (MI_NONPAGABLE_MEMORY_AVAILABLE() <= (SPFN_NUMBER)NumberOfPages) {
  2423. UNLOCK_PFN (OldIrql);
  2424. return STATUS_NO_MEMORY;
  2425. }
  2426. //
  2427. // Note MmResidentAvailablePages must be charged before calling
  2428. // MiEnsureAvailablePageOrWait as it may release the PFN lock.
  2429. //
  2430. MmResidentAvailablePages -= NumberOfPages;
  2431. MM_BUMP_COUNTER(11, NumberOfPages);
  2432. while (StackLimit >= NewLimit) {
  2433. ASSERT (StackLimit->u.Hard.Valid == 0);
  2434. MiEnsureAvailablePageOrWait (NULL, NULL);
  2435. PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (StackLimit));
  2436. StackLimit->u.Long = MM_KERNEL_DEMAND_ZERO_PTE;
  2437. StackLimit->u.Soft.Protection = MM_KSTACK_OUTSWAPPED;
  2438. MiInitializePfn (PageFrameIndex, StackLimit, 1);
  2439. MI_MAKE_VALID_PTE (TempPte,
  2440. PageFrameIndex,
  2441. MM_READWRITE,
  2442. StackLimit);
  2443. MI_SET_PTE_DIRTY (TempPte);
  2444. *StackLimit = TempPte;
  2445. StackLimit -= 1;
  2446. }
  2447. MmKernelStackResident += NumberOfPages;
  2448. UNLOCK_PFN (OldIrql);
  2449. #if DBG
  2450. ASSERT (NewLimit->u.Hard.Valid == 1);
  2451. if (NewLimit != EndStack) {
  2452. ASSERT ((NewLimit - 1)->u.Hard.Valid == 0);
  2453. }
  2454. #endif
  2455. Thread->Tcb.StackLimit = MiGetVirtualAddressMappedByPte (NewLimit);
  2456. PERFINFO_GROW_STACK(Thread);
  2457. return STATUS_SUCCESS;
  2458. }
  2459. #if defined(_IA64_)
  2460. NTSTATUS
  2461. MmGrowKernelBackingStore (
  2462. IN PVOID CurrentBackingStorePointer
  2463. )
  2464. /*++
  2465. Routine Description:
  2466. This function attempts to grows the backing store for the current thread's
  2467. kernel stack such that there is always KERNEL_LARGE_STACK_COMMIT bytes
  2468. above the current backing store pointer.
  2469. Arguments:
  2470. CurrentBackingStorePointer - Supplies a pointer to the current backing
  2471. store pointer for the active kernel stack.
  2472. Return Value:
  2473. NTSTATUS.
  2474. --*/
  2475. {
  2476. PMMPTE NewLimit;
  2477. PMMPTE BstoreLimit;
  2478. PMMPTE EndStack;
  2479. PETHREAD Thread;
  2480. PFN_NUMBER NumberOfPages;
  2481. KIRQL OldIrql;
  2482. PFN_NUMBER PageFrameIndex;
  2483. MMPTE TempPte;
  2484. Thread = PsGetCurrentThread ();
  2485. ASSERT (((PCHAR)Thread->Tcb.BStoreLimit - (PCHAR)Thread->Tcb.StackBase) <=
  2486. (KERNEL_LARGE_BSTORE_SIZE + PAGE_SIZE));
  2487. BstoreLimit = MiGetPteAddress ((PVOID)((PCHAR)Thread->Tcb.BStoreLimit - 1));
  2488. ASSERT (BstoreLimit->u.Hard.Valid == 1);
  2489. NewLimit = MiGetPteAddress ((PVOID)((PUCHAR)CurrentBackingStorePointer +
  2490. KERNEL_LARGE_BSTORE_COMMIT-1));
  2491. if (NewLimit == BstoreLimit) {
  2492. return STATUS_SUCCESS;
  2493. }
  2494. //
  2495. // If the new stack limit exceeds the reserved region for the kernel
  2496. // stack, then return an error.
  2497. //
  2498. EndStack = MiGetPteAddress ((PVOID)((PUCHAR)Thread->Tcb.StackBase +
  2499. KERNEL_LARGE_BSTORE_SIZE-1));
  2500. if (NewLimit > EndStack) {
  2501. //
  2502. // Don't go into guard page.
  2503. //
  2504. MiStackGrowthFailures[1] += 1;
  2505. #if DBG
  2506. DbgPrint ("MmGrowKernelBackingStore failed: Thread %p %p %p\n",
  2507. Thread, NewLimit, EndStack);
  2508. #endif
  2509. return STATUS_STACK_OVERFLOW;
  2510. }
  2511. //
  2512. // Lock the PFN database and attempt to expand the backing store.
  2513. //
  2514. BstoreLimit += 1;
  2515. NumberOfPages = (PFN_NUMBER)(NewLimit - BstoreLimit + 1);
  2516. LOCK_PFN (OldIrql);
  2517. if (MI_NONPAGABLE_MEMORY_AVAILABLE() <= (SPFN_NUMBER)NumberOfPages) {
  2518. UNLOCK_PFN (OldIrql);
  2519. return STATUS_NO_MEMORY;
  2520. }
  2521. //
  2522. // Note MmResidentAvailablePages must be charged before calling
  2523. // MiEnsureAvailablePageOrWait as it may release the PFN lock.
  2524. //
  2525. MmResidentAvailablePages -= NumberOfPages;
  2526. MM_BUMP_COUNTER(2, NumberOfPages);
  2527. while (BstoreLimit <= NewLimit) {
  2528. ASSERT (BstoreLimit->u.Hard.Valid == 0);
  2529. MiEnsureAvailablePageOrWait (NULL, NULL);
  2530. PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (BstoreLimit));
  2531. BstoreLimit->u.Long = MM_KERNEL_DEMAND_ZERO_PTE;
  2532. BstoreLimit->u.Soft.Protection = MM_KSTACK_OUTSWAPPED;
  2533. MiInitializePfn (PageFrameIndex, BstoreLimit, 1);
  2534. MI_MAKE_VALID_PTE (TempPte,
  2535. PageFrameIndex,
  2536. MM_READWRITE,
  2537. BstoreLimit);
  2538. MI_SET_PTE_DIRTY (TempPte);
  2539. *BstoreLimit = TempPte;
  2540. BstoreLimit += 1;
  2541. }
  2542. MmKernelStackResident += NumberOfPages;
  2543. UNLOCK_PFN (OldIrql);
  2544. #if DBG
  2545. ASSERT (NewLimit->u.Hard.Valid == 1);
  2546. if (NewLimit != EndStack) {
  2547. ASSERT ((NewLimit + 1)->u.Hard.Valid == 0);
  2548. }
  2549. #endif
  2550. Thread->Tcb.BStoreLimit = MiGetVirtualAddressMappedByPte (BstoreLimit);
  2551. return STATUS_SUCCESS;
  2552. }
  2553. #endif // defined(_IA64_)
  2554. VOID
  2555. MmOutPageKernelStack (
  2556. IN PKTHREAD Thread
  2557. )
  2558. /*++
  2559. Routine Description:
  2560. This routine makes the specified kernel stack non-resident and
  2561. puts the pages on the transition list. Note that pages below
  2562. the CurrentStackPointer are not useful and these pages are freed here.
  2563. Arguments:
  2564. Thread - Supplies a pointer to the thread whose stack should be removed.
  2565. Return Value:
  2566. None.
  2567. Environment:
  2568. Kernel mode.
  2569. --*/
  2570. #define MAX_STACK_PAGES ((KERNEL_LARGE_STACK_SIZE + KERNEL_LARGE_BSTORE_SIZE) / PAGE_SIZE)
  2571. {
  2572. PMMPTE PointerPte;
  2573. PMMPTE LastPte;
  2574. PMMPTE EndOfStackPte;
  2575. PMMPFN Pfn1;
  2576. PMMPFN Pfn2;
  2577. PFN_NUMBER PageFrameIndex;
  2578. PFN_NUMBER PageTableFrameIndex;
  2579. KIRQL OldIrql;
  2580. MMPTE TempPte;
  2581. PVOID BaseOfKernelStack;
  2582. PVOID FlushVa[MAX_STACK_PAGES];
  2583. ULONG StackSize;
  2584. ULONG Count;
  2585. PMMPTE LimitPte;
  2586. PMMPTE LowestLivePte;
  2587. ASSERT (KERNEL_LARGE_STACK_SIZE >= MI_LARGE_STACK_SIZE);
  2588. ASSERT (((PCHAR)Thread->StackBase - (PCHAR)Thread->StackLimit) <=
  2589. ((LONG)MI_LARGE_STACK_SIZE + PAGE_SIZE));
  2590. if (NtGlobalFlag & FLG_DISABLE_PAGE_KERNEL_STACKS) {
  2591. return;
  2592. }
  2593. //
  2594. // The first page of the stack is the page before the base
  2595. // of the stack.
  2596. //
  2597. BaseOfKernelStack = ((PCHAR)Thread->StackBase - PAGE_SIZE);
  2598. PointerPte = MiGetPteAddress (BaseOfKernelStack);
  2599. LastPte = MiGetPteAddress ((PULONG)Thread->KernelStack - 1);
  2600. if (Thread->LargeStack) {
  2601. StackSize = MI_LARGE_STACK_SIZE >> PAGE_SHIFT;
  2602. //
  2603. // The stack pagein won't necessarily bring back all the pages.
  2604. // Make sure that we account now for the ones that will disappear.
  2605. //
  2606. LimitPte = MiGetPteAddress (Thread->StackLimit);
  2607. LowestLivePte = MiGetPteAddress ((PVOID)((PUCHAR)Thread->InitialStack -
  2608. KERNEL_LARGE_STACK_COMMIT));
  2609. if (LowestLivePte < LimitPte) {
  2610. LowestLivePte = LimitPte;
  2611. }
  2612. }
  2613. else {
  2614. StackSize = KERNEL_STACK_SIZE >> PAGE_SHIFT;
  2615. LowestLivePte = MiGetPteAddress (Thread->StackLimit);
  2616. }
  2617. EndOfStackPte = PointerPte - StackSize;
  2618. ASSERT (LowestLivePte <= LastPte);
  2619. //
  2620. // Put a signature at the current stack location - sizeof(ULONG_PTR).
  2621. //
  2622. *((PULONG_PTR)Thread->KernelStack - 1) = (ULONG_PTR)Thread;
  2623. Count = 0;
  2624. LOCK_PFN (OldIrql);
  2625. do {
  2626. ASSERT (PointerPte->u.Hard.Valid == 1);
  2627. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  2628. TempPte = *PointerPte;
  2629. MI_MAKE_VALID_PTE_TRANSITION (TempPte, 0);
  2630. TempPte.u.Soft.Protection = MM_KSTACK_OUTSWAPPED;
  2631. Pfn2 = MI_PFN_ELEMENT (PageFrameIndex);
  2632. Pfn2->OriginalPte.u.Soft.Protection = MM_KSTACK_OUTSWAPPED;
  2633. MI_WRITE_INVALID_PTE (PointerPte, TempPte);
  2634. FlushVa[Count] = BaseOfKernelStack;
  2635. MiDecrementShareCount (PageFrameIndex);
  2636. PointerPte -= 1;
  2637. Count += 1;
  2638. BaseOfKernelStack = ((PCHAR)BaseOfKernelStack - PAGE_SIZE);
  2639. } while (PointerPte >= LastPte);
  2640. //
  2641. // Just toss the pages that won't ever come back in.
  2642. //
  2643. while (PointerPte != EndOfStackPte) {
  2644. if (PointerPte->u.Hard.Valid == 0) {
  2645. break;
  2646. }
  2647. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  2648. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  2649. PageTableFrameIndex = Pfn1->u4.PteFrame;
  2650. Pfn2 = MI_PFN_ELEMENT (PageTableFrameIndex);
  2651. MiDecrementShareCountInline (Pfn2, PageTableFrameIndex);
  2652. MI_SET_PFN_DELETED (Pfn1);
  2653. MiDecrementShareCountOnly (MI_GET_PAGE_FRAME_FROM_PTE (PointerPte));
  2654. TempPte = KernelDemandZeroPte;
  2655. TempPte.u.Soft.Protection = MM_KSTACK_OUTSWAPPED;
  2656. MI_WRITE_INVALID_PTE (PointerPte, TempPte);
  2657. FlushVa[Count] = BaseOfKernelStack;
  2658. Count += 1;
  2659. //
  2660. // Return resident available for pages beyond the guaranteed portion
  2661. // as an explicit call to grow the kernel stack will be needed to get
  2662. // these pages back.
  2663. //
  2664. if (PointerPte < LowestLivePte) {
  2665. ASSERT (Thread->LargeStack);
  2666. MmResidentAvailablePages += 1;
  2667. MM_BUMP_COUNTER(12, 1);
  2668. }
  2669. PointerPte -= 1;
  2670. BaseOfKernelStack = ((PCHAR)BaseOfKernelStack - PAGE_SIZE);
  2671. }
  2672. #if defined(_IA64_)
  2673. //
  2674. // Transition or free RSE stack pages as appropriate.
  2675. //
  2676. BaseOfKernelStack = Thread->StackBase;
  2677. PointerPte = MiGetPteAddress (BaseOfKernelStack);
  2678. LastPte = MiGetPteAddress ((PULONG)Thread->KernelBStore);
  2679. if (Thread->LargeStack) {
  2680. StackSize = KERNEL_LARGE_BSTORE_SIZE >> PAGE_SHIFT;
  2681. LowestLivePte = MiGetPteAddress ((PVOID) ((PUCHAR) Thread->InitialBStore + KERNEL_LARGE_BSTORE_COMMIT - 1));
  2682. }
  2683. else {
  2684. StackSize = KERNEL_BSTORE_SIZE >> PAGE_SHIFT;
  2685. LowestLivePte = PointerPte + StackSize;
  2686. }
  2687. EndOfStackPte = PointerPte + StackSize;
  2688. do {
  2689. ASSERT (PointerPte->u.Hard.Valid == 1);
  2690. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  2691. TempPte = *PointerPte;
  2692. MI_MAKE_VALID_PTE_TRANSITION (TempPte, 0);
  2693. TempPte.u.Soft.Protection = MM_KSTACK_OUTSWAPPED;
  2694. Pfn2 = MI_PFN_ELEMENT(PageFrameIndex);
  2695. Pfn2->OriginalPte.u.Soft.Protection = MM_KSTACK_OUTSWAPPED;
  2696. MI_WRITE_INVALID_PTE (PointerPte, TempPte);
  2697. FlushVa[Count] = BaseOfKernelStack;
  2698. MiDecrementShareCount (PageFrameIndex);
  2699. PointerPte += 1;
  2700. Count += 1;
  2701. BaseOfKernelStack = ((PCHAR)BaseOfKernelStack + PAGE_SIZE);
  2702. } while (PointerPte <= LastPte);
  2703. while (PointerPte != EndOfStackPte) {
  2704. if (PointerPte->u.Hard.Valid == 0) {
  2705. break;
  2706. }
  2707. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  2708. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  2709. PageTableFrameIndex = Pfn1->u4.PteFrame;
  2710. Pfn2 = MI_PFN_ELEMENT (PageTableFrameIndex);
  2711. MiDecrementShareCountInline (Pfn2, PageTableFrameIndex);
  2712. MI_SET_PFN_DELETED (Pfn1);
  2713. MiDecrementShareCountOnly (MI_GET_PAGE_FRAME_FROM_PTE (PointerPte));
  2714. TempPte = KernelDemandZeroPte;
  2715. TempPte.u.Soft.Protection = MM_KSTACK_OUTSWAPPED;
  2716. MI_WRITE_INVALID_PTE (PointerPte, TempPte);
  2717. FlushVa[Count] = BaseOfKernelStack;
  2718. Count += 1;
  2719. //
  2720. // Return resident available for pages beyond the guaranteed portion
  2721. // as an explicit call to grow the kernel stack will be needed to get
  2722. // these pages back.
  2723. //
  2724. if (PointerPte > LowestLivePte) {
  2725. ASSERT (Thread->LargeStack);
  2726. MmResidentAvailablePages += 1;
  2727. MM_BUMP_COUNTER(4, 1);
  2728. }
  2729. PointerPte += 1;
  2730. BaseOfKernelStack = ((PCHAR)BaseOfKernelStack + PAGE_SIZE);
  2731. }
  2732. #endif // _IA64_
  2733. //
  2734. // Increase the available pages by the number of pages that were
  2735. // deleted and turned into demand zero.
  2736. //
  2737. MmKernelStackResident -= Count;
  2738. UNLOCK_PFN (OldIrql);
  2739. ASSERT (Count <= MAX_STACK_PAGES);
  2740. if (Count < MM_MAXIMUM_FLUSH_COUNT) {
  2741. KeFlushMultipleTb (Count,
  2742. &FlushVa[0],
  2743. TRUE,
  2744. TRUE,
  2745. NULL,
  2746. *(PHARDWARE_PTE)&ZeroPte.u.Flush);
  2747. }
  2748. else {
  2749. KeFlushEntireTb (TRUE, TRUE);
  2750. }
  2751. return;
  2752. }
  2753. VOID
  2754. MmInPageKernelStack (
  2755. IN PKTHREAD Thread
  2756. )
  2757. /*++
  2758. Routine Description:
  2759. This routine makes the specified kernel stack resident.
  2760. Arguments:
  2761. Supplies a pointer to the base of the kernel stack.
  2762. Return Value:
  2763. Thread - Supplies a pointer to the thread whose stack should be
  2764. made resident.
  2765. Environment:
  2766. Kernel mode.
  2767. --*/
  2768. {
  2769. PVOID BaseOfKernelStack;
  2770. PMMPTE PointerPte;
  2771. PMMPTE EndOfStackPte;
  2772. PMMPTE SignaturePte;
  2773. ULONG DiskRead;
  2774. PFN_NUMBER ContainingPage;
  2775. KIRQL OldIrql;
  2776. ASSERT (((PCHAR)Thread->StackBase - (PCHAR)Thread->StackLimit) <=
  2777. ((LONG)MI_LARGE_STACK_SIZE + PAGE_SIZE));
  2778. if (NtGlobalFlag & FLG_DISABLE_PAGE_KERNEL_STACKS) {
  2779. return;
  2780. }
  2781. //
  2782. // The first page of the stack is the page before the base
  2783. // of the stack.
  2784. //
  2785. if (Thread->LargeStack) {
  2786. PointerPte = MiGetPteAddress ((PVOID)((PUCHAR)Thread->StackLimit));
  2787. EndOfStackPte = MiGetPteAddress ((PVOID)((PUCHAR)Thread->InitialStack -
  2788. KERNEL_LARGE_STACK_COMMIT));
  2789. //
  2790. // Trim back the stack. Make sure that the stack does not grow, i.e.
  2791. // StackLimit remains the limit.
  2792. //
  2793. if (EndOfStackPte < PointerPte) {
  2794. EndOfStackPte = PointerPte;
  2795. }
  2796. Thread->StackLimit = MiGetVirtualAddressMappedByPte (EndOfStackPte);
  2797. }
  2798. else {
  2799. EndOfStackPte = MiGetPteAddress (Thread->StackLimit);
  2800. }
  2801. #if defined(_IA64_)
  2802. if (Thread->LargeStack) {
  2803. PVOID TempAddress = (PVOID)((PUCHAR)Thread->BStoreLimit);
  2804. BaseOfKernelStack = (PVOID)(((ULONG_PTR)Thread->InitialBStore +
  2805. KERNEL_LARGE_BSTORE_COMMIT) &
  2806. ~(ULONG_PTR)(PAGE_SIZE - 1));
  2807. //
  2808. // Make sure the guard page is not set to valid.
  2809. //
  2810. if (BaseOfKernelStack > TempAddress) {
  2811. BaseOfKernelStack = TempAddress;
  2812. }
  2813. Thread->BStoreLimit = BaseOfKernelStack;
  2814. }
  2815. BaseOfKernelStack = ((PCHAR)Thread->BStoreLimit - PAGE_SIZE);
  2816. #else
  2817. BaseOfKernelStack = ((PCHAR)Thread->StackBase - PAGE_SIZE);
  2818. #endif // _IA64_
  2819. PointerPte = MiGetPteAddress (BaseOfKernelStack);
  2820. DiskRead = 0;
  2821. SignaturePte = MiGetPteAddress ((PULONG_PTR)Thread->KernelStack - 1);
  2822. ASSERT (SignaturePte->u.Hard.Valid == 0);
  2823. if ((SignaturePte->u.Long != MM_KERNEL_DEMAND_ZERO_PTE) &&
  2824. (SignaturePte->u.Soft.Transition == 0)) {
  2825. DiskRead = 1;
  2826. }
  2827. LOCK_PFN (OldIrql);
  2828. while (PointerPte >= EndOfStackPte) {
  2829. if (!((PointerPte->u.Long == KernelDemandZeroPte.u.Long) ||
  2830. (PointerPte->u.Soft.Protection == MM_KSTACK_OUTSWAPPED))) {
  2831. KeBugCheckEx (MEMORY_MANAGEMENT,
  2832. 0x3451,
  2833. (ULONG_PTR)PointerPte,
  2834. (ULONG_PTR)Thread,
  2835. 0);
  2836. }
  2837. ASSERT (PointerPte->u.Hard.Valid == 0);
  2838. if (PointerPte->u.Soft.Protection == MM_KSTACK_OUTSWAPPED) {
  2839. PointerPte->u.Soft.Protection = PAGE_READWRITE;
  2840. }
  2841. ContainingPage = MI_GET_PAGE_FRAME_FROM_PTE (MiGetPteAddress (PointerPte));
  2842. MiMakeOutswappedPageResident (PointerPte,
  2843. PointerPte,
  2844. 1,
  2845. ContainingPage);
  2846. PointerPte -= 1;
  2847. MmKernelStackResident += 1;
  2848. }
  2849. //
  2850. // Check the signature at the current stack location - 4.
  2851. //
  2852. if (*((PULONG_PTR)Thread->KernelStack - 1) != (ULONG_PTR)Thread) {
  2853. KeBugCheckEx (KERNEL_STACK_INPAGE_ERROR,
  2854. DiskRead,
  2855. *((PULONG_PTR)Thread->KernelStack - 1),
  2856. 0,
  2857. (ULONG_PTR)Thread->KernelStack);
  2858. }
  2859. UNLOCK_PFN (OldIrql);
  2860. return;
  2861. }
  2862. VOID
  2863. MmOutSwapProcess (
  2864. IN PKPROCESS Process
  2865. )
  2866. /*++
  2867. Routine Description:
  2868. This routine out swaps the specified process.
  2869. Arguments:
  2870. Process - Supplies a pointer to the process that is swapped out of memory.
  2871. Return Value:
  2872. None.
  2873. --*/
  2874. {
  2875. KIRQL OldIrql;
  2876. PEPROCESS OutProcess;
  2877. PMMPTE PointerPte;
  2878. PMMPFN Pfn1;
  2879. PFN_NUMBER HyperSpacePageTable;
  2880. PMMPTE HyperSpacePageTableMap;
  2881. PFN_NUMBER PdePage;
  2882. PFN_NUMBER ProcessPage;
  2883. MMPTE TempPte;
  2884. PMMPTE PageDirectoryMap;
  2885. PFN_NUMBER VadBitMapPage;
  2886. MMPTE TempPte2;
  2887. PEPROCESS CurrentProcess;
  2888. #if defined (_X86PAE_)
  2889. ULONG i;
  2890. PFN_NUMBER PdePage2;
  2891. PFN_NUMBER HyperPage2;
  2892. PPAE_ENTRY PaeVa;
  2893. #endif
  2894. #if (_MI_PAGING_LEVELS >= 3)
  2895. PFN_NUMBER PpePage;
  2896. #endif
  2897. #if (_MI_PAGING_LEVELS >= 4)
  2898. PFN_NUMBER PxePage;
  2899. #endif
  2900. OutProcess = CONTAINING_RECORD (Process, EPROCESS, Pcb);
  2901. PS_SET_BITS (&OutProcess->Flags, PS_PROCESS_FLAGS_OUTSWAP_ENABLED);
  2902. #if DBG
  2903. if ((MmDebug & MM_DBG_SWAP_PROCESS) != 0) {
  2904. return;
  2905. }
  2906. #endif
  2907. if (OutProcess->Flags & PS_PROCESS_FLAGS_IN_SESSION) {
  2908. MiSessionOutSwapProcess (OutProcess);
  2909. }
  2910. CurrentProcess = PsGetCurrentProcess ();
  2911. if ((OutProcess->Vm.WorkingSetSize == MM_PROCESS_COMMIT_CHARGE) &&
  2912. (OutProcess->Vm.Flags.AllowWorkingSetAdjustment)) {
  2913. LOCK_EXPANSION (OldIrql);
  2914. ASSERT (OutProcess->Outswapped == 0);
  2915. if (OutProcess->Vm.Flags.BeingTrimmed == TRUE) {
  2916. //
  2917. // An outswap is not allowed at this point because the process
  2918. // has been attached to and is being trimmed.
  2919. //
  2920. UNLOCK_EXPANSION (OldIrql);
  2921. return;
  2922. }
  2923. //
  2924. // Swap the process working set info and page parent/directory/table
  2925. // pages from memory.
  2926. //
  2927. PS_SET_BITS (&OutProcess->Flags, PS_PROCESS_FLAGS_OUTSWAPPED);
  2928. UNLOCK_EXPANSION (OldIrql);
  2929. LOCK_PFN (OldIrql);
  2930. //
  2931. // Remove the working set list page from the process.
  2932. //
  2933. HyperSpacePageTable = MI_GET_HYPER_PAGE_TABLE_FRAME_FROM_PROCESS (OutProcess);
  2934. HyperSpacePageTableMap = MiMapPageInHyperSpaceAtDpc (CurrentProcess,
  2935. HyperSpacePageTable);
  2936. TempPte = HyperSpacePageTableMap[MiGetPteOffset(MmWorkingSetList)];
  2937. MI_MAKE_VALID_PTE_TRANSITION (TempPte, MM_READWRITE);
  2938. HyperSpacePageTableMap[MiGetPteOffset(MmWorkingSetList)] = TempPte;
  2939. PointerPte = &HyperSpacePageTableMap[MiGetPteOffset (VAD_BITMAP_SPACE)];
  2940. TempPte2 = *PointerPte;
  2941. VadBitMapPage = MI_GET_PAGE_FRAME_FROM_PTE((PMMPTE)&TempPte2);
  2942. MI_MAKE_VALID_PTE_TRANSITION (TempPte2, MM_READWRITE);
  2943. *PointerPte = TempPte2;
  2944. #if defined (_X86PAE_)
  2945. TempPte2 = HyperSpacePageTableMap[0];
  2946. HyperPage2 = MI_GET_PAGE_FRAME_FROM_PTE((PMMPTE)&TempPte2);
  2947. MI_MAKE_VALID_PTE_TRANSITION (TempPte2, MM_READWRITE);
  2948. HyperSpacePageTableMap[0] = TempPte2;
  2949. #endif
  2950. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, HyperSpacePageTableMap);
  2951. //
  2952. // Remove the VAD bitmap page from the process.
  2953. //
  2954. ASSERT ((MI_PFN_ELEMENT (VadBitMapPage))->u3.e1.Modified == 1);
  2955. MiDecrementShareCount (VadBitMapPage);
  2956. //
  2957. // Remove the hyper space page from the process.
  2958. //
  2959. ASSERT ((MI_PFN_ELEMENT (OutProcess->WorkingSetPage))->u3.e1.Modified == 1);
  2960. MiDecrementShareCount (OutProcess->WorkingSetPage);
  2961. //
  2962. // Remove the hyper space page table from the process.
  2963. //
  2964. Pfn1 = MI_PFN_ELEMENT (HyperSpacePageTable);
  2965. PdePage = Pfn1->u4.PteFrame;
  2966. ASSERT (PdePage);
  2967. PageDirectoryMap = MiMapPageInHyperSpaceAtDpc (CurrentProcess, PdePage);
  2968. TempPte = PageDirectoryMap[MiGetPdeOffset(MmWorkingSetList)];
  2969. ASSERT (TempPte.u.Hard.Valid == 1);
  2970. ASSERT (TempPte.u.Hard.PageFrameNumber == HyperSpacePageTable);
  2971. MI_MAKE_VALID_PTE_TRANSITION (TempPte, MM_READWRITE);
  2972. PageDirectoryMap[MiGetPdeOffset(MmWorkingSetList)] = TempPte;
  2973. ASSERT (Pfn1->u3.e1.Modified == 1);
  2974. MiDecrementShareCount (HyperSpacePageTable);
  2975. #if defined (_X86PAE_)
  2976. //
  2977. // Remove the second hyper space page from the process.
  2978. //
  2979. Pfn1 = MI_PFN_ELEMENT (HyperPage2);
  2980. ASSERT (Pfn1->u3.e1.Modified == 1);
  2981. PdePage = Pfn1->u4.PteFrame;
  2982. ASSERT (PdePage);
  2983. PageDirectoryMap[MiGetPdeOffset(HYPER_SPACE2)] = TempPte2;
  2984. MiDecrementShareCount (HyperPage2);
  2985. //
  2986. // Remove the additional page directory pages.
  2987. //
  2988. PaeVa = (PPAE_ENTRY)OutProcess->PaeTop;
  2989. for (i = 0; i < PD_PER_SYSTEM - 1; i += 1) {
  2990. TempPte = PageDirectoryMap[i];
  2991. PdePage2 = MI_GET_PAGE_FRAME_FROM_PTE((PMMPTE)&TempPte);
  2992. MI_MAKE_VALID_PTE_TRANSITION (TempPte, MM_READWRITE);
  2993. PageDirectoryMap[i] = TempPte;
  2994. Pfn1 = MI_PFN_ELEMENT (PdePage2);
  2995. ASSERT (Pfn1->u3.e1.Modified == 1);
  2996. MiDecrementShareCount (PdePage2);
  2997. PaeVa->PteEntry[i].u.Long = TempPte.u.Long;
  2998. }
  2999. #if DBG
  3000. TempPte = PageDirectoryMap[i];
  3001. PdePage2 = MI_GET_PAGE_FRAME_FROM_PTE((PMMPTE)&TempPte);
  3002. Pfn1 = MI_PFN_ELEMENT (PdePage2);
  3003. ASSERT (Pfn1->u3.e1.Modified == 1);
  3004. #endif
  3005. #endif
  3006. #if (_MI_PAGING_LEVELS >= 3)
  3007. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PageDirectoryMap);
  3008. //
  3009. // Remove the page directory page.
  3010. //
  3011. Pfn1 = MI_PFN_ELEMENT (PdePage);
  3012. PpePage = Pfn1->u4.PteFrame;
  3013. ASSERT (PpePage);
  3014. #if (_MI_PAGING_LEVELS==3)
  3015. ASSERT (PpePage == MI_GET_PAGE_FRAME_FROM_PTE((PMMPTE)(&(OutProcess->Pcb.DirectoryTableBase[0]))));
  3016. #endif
  3017. PageDirectoryMap = MiMapPageInHyperSpaceAtDpc (CurrentProcess, PpePage);
  3018. TempPte = PageDirectoryMap[MiGetPpeOffset(MmWorkingSetList)];
  3019. ASSERT (TempPte.u.Hard.Valid == 1);
  3020. ASSERT (TempPte.u.Hard.PageFrameNumber == PdePage);
  3021. MI_MAKE_VALID_PTE_TRANSITION (TempPte, MM_READWRITE);
  3022. PageDirectoryMap[MiGetPpeOffset(MmWorkingSetList)] = TempPte;
  3023. ASSERT (Pfn1->u3.e1.Modified == 1);
  3024. MiDecrementShareCount (PdePage);
  3025. #if (_MI_PAGING_LEVELS >= 4)
  3026. //
  3027. // Remove the page directory parent page. Then remove
  3028. // the top level extended page directory parent page.
  3029. //
  3030. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PageDirectoryMap);
  3031. PxePage = Pfn1->u4.PteFrame;
  3032. ASSERT (PxePage);
  3033. ASSERT (PxePage == MI_GET_PAGE_FRAME_FROM_PTE((PMMPTE)(&(OutProcess->Pcb.DirectoryTableBase[0]))));
  3034. PageDirectoryMap = MiMapPageInHyperSpaceAtDpc (CurrentProcess, PxePage);
  3035. TempPte = PageDirectoryMap[MiGetPxeOffset(MmWorkingSetList)];
  3036. ASSERT (TempPte.u.Hard.Valid == 1);
  3037. ASSERT (TempPte.u.Hard.PageFrameNumber == PpePage);
  3038. MI_MAKE_VALID_PTE_TRANSITION (TempPte, MM_READWRITE);
  3039. PageDirectoryMap[MiGetPxeOffset(MmWorkingSetList)] = TempPte;
  3040. ASSERT (MI_PFN_ELEMENT(PpePage)->u3.e1.Modified == 1);
  3041. MiDecrementShareCount (PpePage);
  3042. TempPte = PageDirectoryMap[MiGetPxeOffset(PXE_BASE)];
  3043. MI_MAKE_VALID_PTE_TRANSITION (TempPte, MM_READWRITE);
  3044. PageDirectoryMap[MiGetPxeOffset(PXE_BASE)] = TempPte;
  3045. Pfn1 = MI_PFN_ELEMENT (PxePage);
  3046. #else
  3047. //
  3048. // Remove the top level page directory parent page.
  3049. //
  3050. TempPte = PageDirectoryMap[MiGetPpeOffset(PDE_TBASE)];
  3051. MI_MAKE_VALID_PTE_TRANSITION (TempPte,
  3052. MM_READWRITE);
  3053. PageDirectoryMap[MiGetPpeOffset(PDE_TBASE)] = TempPte;
  3054. Pfn1 = MI_PFN_ELEMENT (PpePage);
  3055. #endif
  3056. #else
  3057. //
  3058. // Remove the top level page directory page.
  3059. //
  3060. TempPte = PageDirectoryMap[MiGetPdeOffset(PDE_BASE)];
  3061. MI_MAKE_VALID_PTE_TRANSITION (TempPte, MM_READWRITE);
  3062. PageDirectoryMap[MiGetPdeOffset(PDE_BASE)] = TempPte;
  3063. Pfn1 = MI_PFN_ELEMENT (PdePage);
  3064. #endif
  3065. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PageDirectoryMap);
  3066. //
  3067. // Decrement share count so the top level page directory page gets
  3068. // removed. This can cause the PteCount to equal the sharecount as the
  3069. // page directory page no longer contains itself, yet can have
  3070. // itself as a transition page.
  3071. //
  3072. Pfn1->u2.ShareCount -= 2;
  3073. Pfn1->PteAddress = (PMMPTE)&OutProcess->PageDirectoryPte;
  3074. OutProcess->PageDirectoryPte = TempPte.u.Flush;
  3075. #if defined (_X86PAE_)
  3076. PaeVa->PteEntry[i].u.Long = TempPte.u.Long;
  3077. #endif
  3078. if (MI_IS_PHYSICAL_ADDRESS(OutProcess)) {
  3079. ProcessPage = MI_CONVERT_PHYSICAL_TO_PFN (OutProcess);
  3080. }
  3081. else {
  3082. PointerPte = MiGetPteAddress (OutProcess);
  3083. ProcessPage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  3084. }
  3085. Pfn1->u4.PteFrame = ProcessPage;
  3086. Pfn1 = MI_PFN_ELEMENT (ProcessPage);
  3087. //
  3088. // Increment the share count for the process page.
  3089. //
  3090. Pfn1->u2.ShareCount += 1;
  3091. UNLOCK_PFN (OldIrql);
  3092. LOCK_EXPANSION (OldIrql);
  3093. if (OutProcess->Vm.WorkingSetExpansionLinks.Flink >
  3094. MM_IO_IN_PROGRESS) {
  3095. //
  3096. // The entry must be on the list.
  3097. //
  3098. RemoveEntryList (&OutProcess->Vm.WorkingSetExpansionLinks);
  3099. OutProcess->Vm.WorkingSetExpansionLinks.Flink = MM_WS_SWAPPED_OUT;
  3100. }
  3101. UNLOCK_EXPANSION (OldIrql);
  3102. OutProcess->WorkingSetPage = 0;
  3103. OutProcess->Vm.WorkingSetSize = 0;
  3104. #if defined(_IA64_)
  3105. //
  3106. // Force assignment of new PID as we have removed
  3107. // the page directory page.
  3108. // Note that a TB flush would not work here as we
  3109. // are in the wrong process context.
  3110. //
  3111. Process->ProcessRegion.SequenceNumber = 0;
  3112. #endif _IA64_
  3113. }
  3114. return;
  3115. }
  3116. VOID
  3117. MmInSwapProcess (
  3118. IN PKPROCESS Process
  3119. )
  3120. /*++
  3121. Routine Description:
  3122. This routine in swaps the specified process.
  3123. Arguments:
  3124. Process - Supplies a pointer to the process that is to be swapped
  3125. into memory.
  3126. Return Value:
  3127. None.
  3128. --*/
  3129. {
  3130. KIRQL OldIrql;
  3131. PEPROCESS OutProcess;
  3132. PEPROCESS CurrentProcess;
  3133. PFN_NUMBER PdePage;
  3134. PMMPTE PageDirectoryMap;
  3135. MMPTE VadBitMapPteContents;
  3136. PFN_NUMBER VadBitMapPage;
  3137. ULONG WorkingSetListPteOffset;
  3138. ULONG VadBitMapPteOffset;
  3139. PMMPTE WorkingSetListPte;
  3140. PMMPTE VadBitMapPte;
  3141. MMPTE TempPte;
  3142. PFN_NUMBER HyperSpacePageTable;
  3143. PMMPTE HyperSpacePageTableMap;
  3144. PFN_NUMBER WorkingSetPage;
  3145. PMMPFN Pfn1;
  3146. PMMPTE PointerPte;
  3147. PFN_NUMBER ProcessPage;
  3148. #if (_MI_PAGING_LEVELS >= 3)
  3149. PFN_NUMBER TopPage;
  3150. PFN_NUMBER PageDirectoryPage;
  3151. PMMPTE PageDirectoryParentMap;
  3152. #endif
  3153. #if defined (_X86PAE_)
  3154. ULONG i;
  3155. PPAE_ENTRY PaeVa;
  3156. MMPTE TempPte2;
  3157. MMPTE PageDirectoryPtes[PD_PER_SYSTEM];
  3158. #endif
  3159. CurrentProcess = PsGetCurrentProcess ();
  3160. OutProcess = CONTAINING_RECORD (Process, EPROCESS, Pcb);
  3161. if (OutProcess->Flags & PS_PROCESS_FLAGS_OUTSWAPPED) {
  3162. //
  3163. // The process is out of memory, rebuild the initialized page
  3164. // structure.
  3165. //
  3166. if (MI_IS_PHYSICAL_ADDRESS(OutProcess)) {
  3167. ProcessPage = MI_CONVERT_PHYSICAL_TO_PFN (OutProcess);
  3168. }
  3169. else {
  3170. PointerPte = MiGetPteAddress (OutProcess);
  3171. ProcessPage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  3172. }
  3173. WorkingSetListPteOffset = MiGetPteOffset (MmWorkingSetList);
  3174. VadBitMapPteOffset = MiGetPteOffset (VAD_BITMAP_SPACE);
  3175. WorkingSetListPte = MiGetPteAddress (MmWorkingSetList);
  3176. VadBitMapPte = MiGetPteAddress (VAD_BITMAP_SPACE);
  3177. LOCK_PFN (OldIrql);
  3178. PdePage = MiMakeOutswappedPageResident (
  3179. #if (_MI_PAGING_LEVELS >= 4)
  3180. MiGetPteAddress (PXE_BASE),
  3181. #elif (_MI_PAGING_LEVELS >= 3)
  3182. MiGetPteAddress ((PVOID)PDE_TBASE),
  3183. #else
  3184. MiGetPteAddress (PDE_BASE),
  3185. #endif
  3186. (PMMPTE)&OutProcess->PageDirectoryPte,
  3187. 0,
  3188. ProcessPage);
  3189. //
  3190. // Adjust the counts for the process page.
  3191. //
  3192. Pfn1 = MI_PFN_ELEMENT (ProcessPage);
  3193. Pfn1->u2.ShareCount -= 1;
  3194. ASSERT ((LONG)Pfn1->u2.ShareCount >= 1);
  3195. #if (_MI_PAGING_LEVELS >= 3)
  3196. TopPage = PdePage;
  3197. #endif
  3198. //
  3199. // Adjust the counts properly for the page directory page.
  3200. //
  3201. Pfn1 = MI_PFN_ELEMENT (PdePage);
  3202. Pfn1->u2.ShareCount += 1;
  3203. Pfn1->u1.Event = (PVOID)OutProcess;
  3204. Pfn1->u4.PteFrame = PdePage;
  3205. #if (_MI_PAGING_LEVELS >= 4)
  3206. Pfn1->PteAddress = MiGetPteAddress (PXE_BASE);
  3207. #elif (_MI_PAGING_LEVELS >= 3)
  3208. Pfn1->PteAddress = MiGetPteAddress ((PVOID)PDE_TBASE);
  3209. #else
  3210. Pfn1->PteAddress = MiGetPteAddress (PDE_BASE);
  3211. #endif
  3212. #if (_MI_PAGING_LEVELS >= 4)
  3213. //
  3214. // Only the extended page directory parent page has really been
  3215. // read in above. Read in the page directory parent page now.
  3216. //
  3217. PageDirectoryParentMap = MiMapPageInHyperSpaceAtDpc (CurrentProcess, PdePage);
  3218. TempPte = PageDirectoryParentMap[MiGetPxeOffset(MmWorkingSetList)];
  3219. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PageDirectoryParentMap);
  3220. PageDirectoryPage = MiMakeOutswappedPageResident (
  3221. MiGetPxeAddress (MmWorkingSetList),
  3222. &TempPte,
  3223. 0,
  3224. PdePage);
  3225. ASSERT (PageDirectoryPage == TempPte.u.Hard.PageFrameNumber);
  3226. ASSERT (Pfn1->u2.ShareCount >= 3);
  3227. PageDirectoryParentMap = MiMapPageInHyperSpaceAtDpc (CurrentProcess, PdePage);
  3228. PageDirectoryParentMap[MiGetPxeOffset(PXE_BASE)].u.Flush =
  3229. OutProcess->PageDirectoryPte;
  3230. PageDirectoryParentMap[MiGetPxeOffset(MmWorkingSetList)] = TempPte;
  3231. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PageDirectoryParentMap);
  3232. PdePage = PageDirectoryPage;
  3233. #endif
  3234. #if (_MI_PAGING_LEVELS >= 3)
  3235. //
  3236. // Only the page directory parent page has really been read in above
  3237. // (and the extended page directory parent for 4-level architectures).
  3238. // Read in the page directory page now.
  3239. //
  3240. PageDirectoryParentMap = MiMapPageInHyperSpaceAtDpc (CurrentProcess, PdePage);
  3241. TempPte = PageDirectoryParentMap[MiGetPpeOffset(MmWorkingSetList)];
  3242. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PageDirectoryParentMap);
  3243. PageDirectoryPage = MiMakeOutswappedPageResident (
  3244. MiGetPpeAddress (MmWorkingSetList),
  3245. &TempPte,
  3246. 0,
  3247. PdePage);
  3248. ASSERT (PageDirectoryPage == TempPte.u.Hard.PageFrameNumber);
  3249. PageDirectoryParentMap = MiMapPageInHyperSpaceAtDpc (CurrentProcess, PdePage);
  3250. #if (_MI_PAGING_LEVELS==3)
  3251. ASSERT (Pfn1->u2.ShareCount >= 3);
  3252. PageDirectoryParentMap[MiGetPpeOffset(PDE_TBASE)].u.Flush =
  3253. OutProcess->PageDirectoryPte;
  3254. #endif
  3255. PageDirectoryParentMap[MiGetPpeOffset(MmWorkingSetList)] = TempPte;
  3256. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PageDirectoryParentMap);
  3257. PdePage = PageDirectoryPage;
  3258. #endif
  3259. #if defined (_X86PAE_)
  3260. //
  3261. // Locate the additional page directory pages and make them resident.
  3262. //
  3263. PaeVa = (PPAE_ENTRY)OutProcess->PaeTop;
  3264. PageDirectoryMap = MiMapPageInHyperSpaceAtDpc (CurrentProcess, PdePage);
  3265. for (i = 0; i < PD_PER_SYSTEM - 1; i += 1) {
  3266. PageDirectoryPtes[i] = PageDirectoryMap[i];
  3267. }
  3268. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PageDirectoryMap);
  3269. for (i = 0; i < PD_PER_SYSTEM - 1; i += 1) {
  3270. MiMakeOutswappedPageResident (
  3271. MiGetPteAddress (PDE_BASE + (i << PAGE_SHIFT)),
  3272. &PageDirectoryPtes[i],
  3273. 0,
  3274. PdePage);
  3275. PaeVa->PteEntry[i].u.Long = (PageDirectoryPtes[i].u.Long & ~MM_PAE_PDPTE_MASK);
  3276. }
  3277. PageDirectoryMap = MiMapPageInHyperSpaceAtDpc (CurrentProcess, PdePage);
  3278. for (i = 0; i < PD_PER_SYSTEM - 1; i += 1) {
  3279. PageDirectoryMap[i] = PageDirectoryPtes[i];
  3280. }
  3281. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PageDirectoryMap);
  3282. TempPte.u.Flush = OutProcess->PageDirectoryPte;
  3283. TempPte.u.Long &= ~MM_PAE_PDPTE_MASK;
  3284. PaeVa->PteEntry[i].u.Flush = TempPte.u.Flush;
  3285. //
  3286. // Locate the second page table page for hyperspace & make it resident.
  3287. //
  3288. PageDirectoryMap = MiMapPageInHyperSpaceAtDpc (CurrentProcess, PdePage);
  3289. TempPte = PageDirectoryMap[MiGetPdeOffset(HYPER_SPACE2)];
  3290. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PageDirectoryMap);
  3291. HyperSpacePageTable = MiMakeOutswappedPageResident (
  3292. MiGetPdeAddress (HYPER_SPACE2),
  3293. &TempPte,
  3294. 0,
  3295. PdePage);
  3296. PageDirectoryMap = MiMapPageInHyperSpaceAtDpc (CurrentProcess, PdePage);
  3297. PageDirectoryMap[MiGetPdeOffset(HYPER_SPACE2)] = TempPte;
  3298. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PageDirectoryMap);
  3299. TempPte2 = TempPte;
  3300. #endif
  3301. //
  3302. // Locate the page table page for hyperspace and make it resident.
  3303. //
  3304. PageDirectoryMap = MiMapPageInHyperSpaceAtDpc (CurrentProcess, PdePage);
  3305. TempPte = PageDirectoryMap[MiGetPdeOffset(MmWorkingSetList)];
  3306. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PageDirectoryMap);
  3307. HyperSpacePageTable = MiMakeOutswappedPageResident (
  3308. MiGetPdeAddress (HYPER_SPACE),
  3309. &TempPte,
  3310. 0,
  3311. PdePage);
  3312. ASSERT (Pfn1->u2.ShareCount >= 3);
  3313. PageDirectoryMap = MiMapPageInHyperSpaceAtDpc (CurrentProcess, PdePage);
  3314. #if (_MI_PAGING_LEVELS==2)
  3315. PageDirectoryMap[MiGetPdeOffset(PDE_BASE)].u.Flush =
  3316. OutProcess->PageDirectoryPte;
  3317. #endif
  3318. PageDirectoryMap[MiGetPdeOffset(MmWorkingSetList)] = TempPte;
  3319. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PageDirectoryMap);
  3320. //
  3321. // Map in the hyper space page table page and retrieve the
  3322. // PTEs that map the working set list and VAD bitmap. Note that
  3323. // although both PTEs lie in the same page table page, they must
  3324. // be retrieved separately because: the Vad PTE may indicate its page
  3325. // is in a paging file and the WSL PTE may indicate its PTE is in
  3326. // transition. The VAD page inswap may take the WSL page from
  3327. // the transition list - CHANGING the WSL PTE ! So the WSL PTE cannot
  3328. // be captured until after the VAD inswap completes.
  3329. //
  3330. HyperSpacePageTableMap = MiMapPageInHyperSpaceAtDpc (CurrentProcess, HyperSpacePageTable);
  3331. VadBitMapPteContents = HyperSpacePageTableMap[VadBitMapPteOffset];
  3332. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, HyperSpacePageTableMap);
  3333. Pfn1 = MI_PFN_ELEMENT (HyperSpacePageTable);
  3334. Pfn1->u1.WsIndex = 1;
  3335. //
  3336. // Read in the VAD bitmap page.
  3337. //
  3338. VadBitMapPage = MiMakeOutswappedPageResident (VadBitMapPte,
  3339. &VadBitMapPteContents,
  3340. 0,
  3341. HyperSpacePageTable);
  3342. //
  3343. // Read in the working set list page.
  3344. //
  3345. HyperSpacePageTableMap = MiMapPageInHyperSpaceAtDpc (CurrentProcess, HyperSpacePageTable);
  3346. TempPte = HyperSpacePageTableMap[WorkingSetListPteOffset];
  3347. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, HyperSpacePageTableMap);
  3348. WorkingSetPage = MiMakeOutswappedPageResident (WorkingSetListPte,
  3349. &TempPte,
  3350. 0,
  3351. HyperSpacePageTable);
  3352. //
  3353. // Update the PTEs, this can be done together for PTEs that lie within
  3354. // the same page table page.
  3355. //
  3356. HyperSpacePageTableMap = MiMapPageInHyperSpaceAtDpc (CurrentProcess, HyperSpacePageTable);
  3357. HyperSpacePageTableMap[WorkingSetListPteOffset] = TempPte;
  3358. #if defined (_X86PAE_)
  3359. HyperSpacePageTableMap[0] = TempPte2;
  3360. #endif
  3361. HyperSpacePageTableMap[VadBitMapPteOffset] = VadBitMapPteContents;
  3362. MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, HyperSpacePageTableMap);
  3363. Pfn1 = MI_PFN_ELEMENT (WorkingSetPage);
  3364. Pfn1->u1.WsIndex = 3;
  3365. Pfn1 = MI_PFN_ELEMENT (VadBitMapPage);
  3366. Pfn1->u1.WsIndex = 2;
  3367. UNLOCK_PFN (OldIrql);
  3368. LOCK_EXPANSION (OldIrql);
  3369. //
  3370. // Allow working set trimming on this process.
  3371. //
  3372. OutProcess->Vm.Flags.AllowWorkingSetAdjustment = TRUE;
  3373. if (OutProcess->Vm.WorkingSetExpansionLinks.Flink == MM_WS_SWAPPED_OUT) {
  3374. InsertTailList (&MmWorkingSetExpansionHead.ListHead,
  3375. &OutProcess->Vm.WorkingSetExpansionLinks);
  3376. }
  3377. UNLOCK_EXPANSION (OldIrql);
  3378. //
  3379. // Set up process structures.
  3380. //
  3381. #if (_MI_PAGING_LEVELS >= 3)
  3382. PdePage = TopPage;
  3383. #endif
  3384. OutProcess->WorkingSetPage = WorkingSetPage;
  3385. OutProcess->Vm.WorkingSetSize = MM_PROCESS_COMMIT_CHARGE;
  3386. #if !defined (_X86PAE_)
  3387. INITIALIZE_DIRECTORY_TABLE_BASE (&Process->DirectoryTableBase[0],
  3388. PdePage);
  3389. INITIALIZE_DIRECTORY_TABLE_BASE (&Process->DirectoryTableBase[1],
  3390. HyperSpacePageTable);
  3391. #else
  3392. //
  3393. // The DirectoryTableBase[0] never changes for PAE processes.
  3394. //
  3395. Process->DirectoryTableBase[1] = HyperSpacePageTable;
  3396. #endif
  3397. PS_CLEAR_BITS (&OutProcess->Flags, PS_PROCESS_FLAGS_OUTSWAPPED);
  3398. }
  3399. if (OutProcess->Flags & PS_PROCESS_FLAGS_IN_SESSION) {
  3400. MiSessionInSwapProcess (OutProcess);
  3401. }
  3402. PS_CLEAR_BITS (&OutProcess->Flags, PS_PROCESS_FLAGS_OUTSWAP_ENABLED);
  3403. if (PERFINFO_IS_GROUP_ON(PERF_MEMORY)) {
  3404. PERFINFO_SWAPPROCESS_INFORMATION PerfInfoSwapProcess;
  3405. PerfInfoSwapProcess.ProcessId = HandleToUlong((OutProcess)->UniqueProcessId);
  3406. PerfInfoSwapProcess.PageDirectoryBase = MmGetDirectoryFrameFromProcess(OutProcess);
  3407. PerfInfoLogBytes (PERFINFO_LOG_TYPE_INSWAPPROCESS,
  3408. &PerfInfoSwapProcess,
  3409. sizeof(PerfInfoSwapProcess));
  3410. }
  3411. return;
  3412. }
  3413. NTSTATUS
  3414. MiCreatePebOrTeb (
  3415. IN PEPROCESS TargetProcess,
  3416. IN ULONG Size,
  3417. OUT PVOID *Base
  3418. )
  3419. /*++
  3420. Routine Description:
  3421. This routine creates a TEB or PEB page within the target process.
  3422. Arguments:
  3423. TargetProcess - Supplies a pointer to the process in which to create
  3424. the structure.
  3425. Size - Supplies the size of the structure to create a VAD for.
  3426. Base - Supplies a pointer to place the PEB/TEB virtual address on success.
  3427. This has no meaning if success is not returned.
  3428. Return Value:
  3429. NTSTATUS.
  3430. Environment:
  3431. Kernel mode, attached to the specified process.
  3432. --*/
  3433. {
  3434. PMMVAD_LONG Vad;
  3435. NTSTATUS Status;
  3436. //
  3437. // Allocate and initialize the Vad before acquiring the address space
  3438. // and working set mutexes so as to minimize mutex hold duration.
  3439. //
  3440. Vad = (PMMVAD_LONG) ExAllocatePoolWithTag (NonPagedPool,
  3441. sizeof(MMVAD_LONG),
  3442. 'ldaV');
  3443. if (Vad == NULL) {
  3444. return STATUS_NO_MEMORY;
  3445. }
  3446. Vad->u.LongFlags = 0;
  3447. Vad->u.VadFlags.CommitCharge = BYTES_TO_PAGES (Size);
  3448. Vad->u.VadFlags.MemCommit = 1;
  3449. Vad->u.VadFlags.PrivateMemory = 1;
  3450. Vad->u.VadFlags.Protection = MM_EXECUTE_READWRITE;
  3451. //
  3452. // Mark VAD as not deletable, no protection change.
  3453. //
  3454. Vad->u.VadFlags.NoChange = 1;
  3455. Vad->u2.LongFlags2 = 0;
  3456. Vad->u2.VadFlags2.OneSecured = 1;
  3457. Vad->u2.VadFlags2.LongVad = 1;
  3458. Vad->u2.VadFlags2.ReadOnly = 0;
  3459. #if defined(_MIALT4K_)
  3460. Vad->AliasInformation = NULL;
  3461. #endif
  3462. //
  3463. // Get the address creation mutex to block multiple threads from
  3464. // creating or deleting address space at the same time and
  3465. // get the working set mutex so virtual address descriptors can
  3466. // be inserted and walked.
  3467. //
  3468. LOCK_ADDRESS_SPACE (TargetProcess);
  3469. //
  3470. // Find a VA for the PEB on a page-size boundary.
  3471. //
  3472. Status = MiFindEmptyAddressRangeDown (TargetProcess->VadRoot,
  3473. ROUND_TO_PAGES (Size),
  3474. ((PCHAR)MM_HIGHEST_VAD_ADDRESS + 1),
  3475. PAGE_SIZE,
  3476. Base);
  3477. if (!NT_SUCCESS(Status)) {
  3478. //
  3479. // No range was available, deallocate the Vad and return the status.
  3480. //
  3481. UNLOCK_ADDRESS_SPACE (TargetProcess);
  3482. ExFreePool (Vad);
  3483. return Status;
  3484. }
  3485. //
  3486. // An unoccupied address range has been found, finish initializing the
  3487. // virtual address descriptor to describe this range.
  3488. //
  3489. Vad->StartingVpn = MI_VA_TO_VPN (*Base);
  3490. Vad->EndingVpn = MI_VA_TO_VPN ((PCHAR)*Base + Size - 1);
  3491. Vad->u3.Secured.StartVpn = (ULONG_PTR)*Base;
  3492. Vad->u3.Secured.EndVpn = (ULONG_PTR)MI_VPN_TO_VA_ENDING (Vad->EndingVpn);
  3493. LOCK_WS_UNSAFE (TargetProcess);
  3494. Status = MiInsertVad ((PMMVAD) Vad);
  3495. UNLOCK_WS_UNSAFE (TargetProcess);
  3496. #if defined (_IA64_)
  3497. if ((NT_SUCCESS(Status)) && (TargetProcess->Wow64Process != NULL)) {
  3498. MiProtectFor4kPage (*Base,
  3499. ROUND_TO_PAGES (Size),
  3500. MM_READWRITE ,
  3501. ALT_COMMIT,
  3502. TargetProcess);
  3503. }
  3504. #endif
  3505. UNLOCK_ADDRESS_SPACE (TargetProcess);
  3506. if (!NT_SUCCESS(Status)) {
  3507. //
  3508. // A failure has occurred. Deallocate the Vad and return the status.
  3509. //
  3510. ExFreePool (Vad);
  3511. }
  3512. return Status;
  3513. }
  3514. NTSTATUS
  3515. MmCreateTeb (
  3516. IN PEPROCESS TargetProcess,
  3517. IN PINITIAL_TEB InitialTeb,
  3518. IN PCLIENT_ID ClientId,
  3519. OUT PTEB *Base
  3520. )
  3521. /*++
  3522. Routine Description:
  3523. This routine creates a TEB page within the target process
  3524. and copies the initial TEB values into it.
  3525. Arguments:
  3526. TargetProcess - Supplies a pointer to the process in which to create
  3527. and initialize the TEB.
  3528. InitialTeb - Supplies a pointer to the initial TEB to copy into the
  3529. newly created TEB.
  3530. ClientId - Supplies a client ID.
  3531. Base - Supplies a location to return the base of the newly created
  3532. TEB on success.
  3533. Return Value:
  3534. NTSTATUS.
  3535. Environment:
  3536. Kernel mode.
  3537. --*/
  3538. {
  3539. PTEB TebBase;
  3540. NTSTATUS Status;
  3541. ULONG TebSize;
  3542. #if defined(_WIN64)
  3543. PWOW64_PROCESS Wow64Process;
  3544. PTEB32 Teb32Base = NULL;
  3545. #endif
  3546. //
  3547. // Attach to the specified process.
  3548. //
  3549. KeAttachProcess (&TargetProcess->Pcb);
  3550. TebSize = sizeof (TEB);
  3551. #if defined(_WIN64)
  3552. Wow64Process = TargetProcess->Wow64Process;
  3553. if (Wow64Process != NULL) {
  3554. TebSize = ROUND_TO_PAGES (sizeof (TEB)) + sizeof (TEB32);
  3555. }
  3556. #endif
  3557. Status = MiCreatePebOrTeb (TargetProcess, TebSize, (PVOID) &TebBase);
  3558. if (!NT_SUCCESS(Status)) {
  3559. KeDetachProcess();
  3560. return Status;
  3561. }
  3562. //
  3563. // Initialize the TEB. Note accesses to the TEB can raise exceptions
  3564. // if no address space is available for the TEB or the user has exceeded
  3565. // quota (non-paged, pagefile, commit) or the TEB is paged out and an
  3566. // inpage error occurs when fetching it.
  3567. //
  3568. //
  3569. // Note that since the TEB is populated with demand zero pages, only
  3570. // nonzero fields need to be initialized here.
  3571. //
  3572. try {
  3573. #if !defined(_WIN64)
  3574. TebBase->NtTib.ExceptionList = EXCEPTION_CHAIN_END;
  3575. #endif
  3576. //
  3577. // Although various fields must be zero for the process to launch
  3578. // properly, don't assert them as an ordinary user could provoke these
  3579. // by maliciously writing over random addresses in another thread,
  3580. // hoping to nail a just-being-created TEB.
  3581. //
  3582. DONTASSERT (TebBase->NtTib.SubSystemTib == NULL);
  3583. TebBase->NtTib.Version = OS2_VERSION;
  3584. DONTASSERT (TebBase->NtTib.ArbitraryUserPointer == NULL);
  3585. TebBase->NtTib.Self = (PNT_TIB)TebBase;
  3586. DONTASSERT (TebBase->EnvironmentPointer == NULL);
  3587. TebBase->ProcessEnvironmentBlock = TargetProcess->Peb;
  3588. TebBase->ClientId = *ClientId;
  3589. TebBase->RealClientId = *ClientId;
  3590. DONTASSERT (TebBase->ActivationContextStack.Flags == 0);
  3591. DONTASSERT (TebBase->ActivationContextStack.ActiveFrame == NULL);
  3592. InitializeListHead(&TebBase->ActivationContextStack.FrameListCache);
  3593. TebBase->ActivationContextStack.NextCookieSequenceNumber = 1;
  3594. if ((InitialTeb->OldInitialTeb.OldStackBase == NULL) &&
  3595. (InitialTeb->OldInitialTeb.OldStackLimit == NULL)) {
  3596. TebBase->NtTib.StackBase = InitialTeb->StackBase;
  3597. TebBase->NtTib.StackLimit = InitialTeb->StackLimit;
  3598. TebBase->DeallocationStack = InitialTeb->StackAllocationBase;
  3599. #if defined(_IA64_)
  3600. TebBase->BStoreLimit = InitialTeb->BStoreLimit;
  3601. TebBase->DeallocationBStore = (PCHAR)InitialTeb->StackBase
  3602. + ((ULONG_PTR)InitialTeb->StackBase - (ULONG_PTR)InitialTeb->StackAllocationBase);
  3603. #endif
  3604. }
  3605. else {
  3606. TebBase->NtTib.StackBase = InitialTeb->OldInitialTeb.OldStackBase;
  3607. TebBase->NtTib.StackLimit = InitialTeb->OldInitialTeb.OldStackLimit;
  3608. }
  3609. TebBase->StaticUnicodeString.Buffer = TebBase->StaticUnicodeBuffer;
  3610. TebBase->StaticUnicodeString.MaximumLength = (USHORT) sizeof (TebBase->StaticUnicodeBuffer);
  3611. DONTASSERT (TebBase->StaticUnicodeString.Length == 0);
  3612. //
  3613. // Used for BBT of ntdll and kernel32.dll.
  3614. //
  3615. TebBase->ReservedForPerf = BBTBuffer;
  3616. #if defined(_WIN64)
  3617. if (Wow64Process != NULL) {
  3618. Teb32Base = (PTEB32)((PCHAR)TebBase + ROUND_TO_PAGES (sizeof(TEB)));
  3619. Teb32Base->NtTib.ExceptionList = PtrToUlong (EXCEPTION_CHAIN_END);
  3620. Teb32Base->NtTib.Version = TebBase->NtTib.Version;
  3621. Teb32Base->NtTib.Self = PtrToUlong (Teb32Base);
  3622. Teb32Base->ProcessEnvironmentBlock = PtrToUlong (Wow64Process->Wow64);
  3623. Teb32Base->ClientId.UniqueProcess = PtrToUlong (TebBase->ClientId.UniqueProcess);
  3624. Teb32Base->ClientId.UniqueThread = PtrToUlong (TebBase->ClientId.UniqueThread);
  3625. Teb32Base->RealClientId.UniqueProcess = PtrToUlong (TebBase->RealClientId.UniqueProcess);
  3626. Teb32Base->RealClientId.UniqueThread = PtrToUlong (TebBase->RealClientId.UniqueThread);
  3627. Teb32Base->StaticUnicodeString.Buffer = PtrToUlong (Teb32Base->StaticUnicodeBuffer);
  3628. Teb32Base->StaticUnicodeString.MaximumLength = (USHORT)sizeof (Teb32Base->StaticUnicodeBuffer);
  3629. ASSERT (Teb32Base->StaticUnicodeString.Length == 0);
  3630. Teb32Base->GdiBatchCount = PtrToUlong (TebBase);
  3631. Teb32Base->Vdm = PtrToUlong (TebBase->Vdm);
  3632. ASSERT (Teb32Base->ActivationContextStack.Flags == 0);
  3633. Teb32Base->ActivationContextStack.ActiveFrame = PtrToUlong(TebBase->ActivationContextStack.ActiveFrame);
  3634. InitializeListHead32 (&Teb32Base->ActivationContextStack.FrameListCache);
  3635. Teb32Base->ActivationContextStack.NextCookieSequenceNumber = TebBase->ActivationContextStack.NextCookieSequenceNumber;
  3636. }
  3637. TebBase->NtTib.ExceptionList = (PVOID)Teb32Base;
  3638. #endif
  3639. } except (EXCEPTION_EXECUTE_HANDLER) {
  3640. //
  3641. // An exception has occurred, inform our caller.
  3642. //
  3643. Status = GetExceptionCode ();
  3644. }
  3645. KeDetachProcess();
  3646. *Base = TebBase;
  3647. return Status;
  3648. }
  3649. //
  3650. // This code is built twice on the Win64 build - once for PE32+
  3651. // and once for PE32 images.
  3652. //
  3653. #define MI_INIT_PEB_FROM_IMAGE(Hdrs, ImgConfig) { \
  3654. PebBase->ImageSubsystem = (Hdrs)->OptionalHeader.Subsystem; \
  3655. PebBase->ImageSubsystemMajorVersion = \
  3656. (Hdrs)->OptionalHeader.MajorSubsystemVersion; \
  3657. PebBase->ImageSubsystemMinorVersion = \
  3658. (Hdrs)->OptionalHeader.MinorSubsystemVersion; \
  3659. \
  3660. /* */ \
  3661. /* See if this image wants GetVersion to lie about who the system is */ \
  3662. /* If so, capture the lie into the PEB for the process. */ \
  3663. /* */ \
  3664. \
  3665. if ((Hdrs)->OptionalHeader.Win32VersionValue != 0) { \
  3666. PebBase->OSMajorVersion = \
  3667. (Hdrs)->OptionalHeader.Win32VersionValue & 0xFF; \
  3668. PebBase->OSMinorVersion = \
  3669. ((Hdrs)->OptionalHeader.Win32VersionValue >> 8) & 0xFF; \
  3670. PebBase->OSBuildNumber = \
  3671. (USHORT)(((Hdrs)->OptionalHeader.Win32VersionValue >> 16) & 0x3FFF); \
  3672. if ((ImgConfig) != NULL && (ImgConfig)->CSDVersion != 0) { \
  3673. PebBase->OSCSDVersion = (ImgConfig)->CSDVersion; \
  3674. } \
  3675. \
  3676. /* Win32 API GetVersion returns the following bogus bit definitions */ \
  3677. /* in the high two bits: */ \
  3678. /* */ \
  3679. /* 00 - Windows NT */ \
  3680. /* 01 - reserved */ \
  3681. /* 10 - Win32s running on Windows 3.x */ \
  3682. /* 11 - Windows 95 */ \
  3683. /* */ \
  3684. /* */ \
  3685. /* Win32 API GetVersionEx returns a dwPlatformId with the following */ \
  3686. /* values defined in winbase.h */ \
  3687. /* */ \
  3688. /* 00 - VER_PLATFORM_WIN32s */ \
  3689. /* 01 - VER_PLATFORM_WIN32_WINDOWS */ \
  3690. /* 10 - VER_PLATFORM_WIN32_NT */ \
  3691. /* 11 - reserved */ \
  3692. /* */ \
  3693. /* */ \
  3694. /* So convert the former from the Win32VersionValue field into the */ \
  3695. /* OSPlatformId field. This is done by XORing with 0x2. The */ \
  3696. /* translation is symmetric so there is the same code to do the */ \
  3697. /* reverse in windows\base\client\module.c (GetVersion) */ \
  3698. /* */ \
  3699. PebBase->OSPlatformId = \
  3700. ((Hdrs)->OptionalHeader.Win32VersionValue >> 30) ^ 0x2; \
  3701. } \
  3702. }
  3703. #if defined(_WIN64)
  3704. NTSTATUS
  3705. MiInitializeWowPeb (
  3706. IN PIMAGE_NT_HEADERS NtHeaders,
  3707. IN PPEB PebBase,
  3708. IN PEPROCESS TargetProcess
  3709. )
  3710. /*++
  3711. Routine Description:
  3712. This routine creates a PEB32 page within the target process
  3713. and copies the initial PEB32 values into it.
  3714. Arguments:
  3715. NtHeaders - Supplies a pointer to the NT headers for the image.
  3716. PebBase - Supplies a pointer to the initial PEB to derive the PEB32 values
  3717. from.
  3718. TargetProcess - Supplies a pointer to the process in which to create
  3719. and initialize the PEB32.
  3720. Return Value:
  3721. NTSTATUS.
  3722. Environment:
  3723. Kernel mode.
  3724. --*/
  3725. {
  3726. NTSTATUS Status;
  3727. ULONG ReturnedSize;
  3728. PPEB32 PebBase32;
  3729. ULONG ProcessAffinityMask;
  3730. PIMAGE_LOAD_CONFIG_DIRECTORY32 ImageConfigData32;
  3731. ProcessAffinityMask = 0;
  3732. ImageConfigData32 = NULL;
  3733. //
  3734. // All references to the Peb and NtHeaders must be wrapped in try-except
  3735. // in case the user has exceeded quota (non-paged, pagefile, commit)
  3736. // or any inpage errors happen for the user addresses, etc.
  3737. //
  3738. //
  3739. // Image is 32-bit.
  3740. //
  3741. try {
  3742. ImageConfigData32 = RtlImageDirectoryEntryToData (
  3743. PebBase->ImageBaseAddress,
  3744. TRUE,
  3745. IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG,
  3746. &ReturnedSize);
  3747. ProbeForReadSmallStructure ((PVOID)ImageConfigData32,
  3748. sizeof (*ImageConfigData32),
  3749. sizeof (ULONG));
  3750. MI_INIT_PEB_FROM_IMAGE ((PIMAGE_NT_HEADERS32)NtHeaders,
  3751. ImageConfigData32);
  3752. if ((ImageConfigData32 != NULL) && (ImageConfigData32->ProcessAffinityMask != 0)) {
  3753. ProcessAffinityMask = ImageConfigData32->ProcessAffinityMask;
  3754. }
  3755. } except (EXCEPTION_EXECUTE_HANDLER) {
  3756. return STATUS_INVALID_IMAGE_PROTECT;
  3757. }
  3758. //
  3759. // Create a PEB32 for the process.
  3760. //
  3761. Status = MiCreatePebOrTeb (TargetProcess,
  3762. (ULONG)sizeof (PEB32),
  3763. (PVOID)&PebBase32);
  3764. if (!NT_SUCCESS(Status)) {
  3765. return Status;
  3766. }
  3767. //
  3768. // Mark the process as WOW64 by storing the 32-bit PEB pointer
  3769. // in the Wow64 field.
  3770. //
  3771. TargetProcess->Wow64Process->Wow64 = PebBase32;
  3772. //
  3773. // Clone the PEB into the PEB32.
  3774. //
  3775. try {
  3776. PebBase32->InheritedAddressSpace = PebBase->InheritedAddressSpace;
  3777. PebBase32->Mutant = PtrToUlong(PebBase->Mutant);
  3778. PebBase32->ImageBaseAddress = PtrToUlong(PebBase->ImageBaseAddress);
  3779. PebBase32->AnsiCodePageData = PtrToUlong(PebBase->AnsiCodePageData);
  3780. PebBase32->OemCodePageData = PtrToUlong(PebBase->OemCodePageData);
  3781. PebBase32->UnicodeCaseTableData = PtrToUlong(PebBase->UnicodeCaseTableData);
  3782. PebBase32->NumberOfProcessors = PebBase->NumberOfProcessors;
  3783. PebBase32->BeingDebugged = PebBase->BeingDebugged;
  3784. PebBase32->NtGlobalFlag = PebBase->NtGlobalFlag;
  3785. PebBase32->CriticalSectionTimeout = PebBase->CriticalSectionTimeout;
  3786. if (PebBase->HeapSegmentReserve > 1024*1024*1024) { // 1GB
  3787. PebBase32->HeapSegmentReserve = 1024*1024; // 1MB
  3788. }
  3789. else {
  3790. PebBase32->HeapSegmentReserve = (ULONG)PebBase->HeapSegmentReserve;
  3791. }
  3792. if (PebBase->HeapSegmentCommit > PebBase32->HeapSegmentReserve) {
  3793. PebBase32->HeapSegmentCommit = 2*PAGE_SIZE;
  3794. }
  3795. else {
  3796. PebBase32->HeapSegmentCommit = (ULONG)PebBase->HeapSegmentCommit;
  3797. }
  3798. PebBase32->HeapDeCommitTotalFreeThreshold = (ULONG)PebBase->HeapDeCommitTotalFreeThreshold;
  3799. PebBase32->HeapDeCommitFreeBlockThreshold = (ULONG)PebBase->HeapDeCommitFreeBlockThreshold;
  3800. PebBase32->NumberOfHeaps = PebBase->NumberOfHeaps;
  3801. PebBase32->MaximumNumberOfHeaps = (PAGE_SIZE - sizeof(PEB32)) / sizeof(ULONG);
  3802. PebBase32->ProcessHeaps = PtrToUlong(PebBase32+1);
  3803. PebBase32->OSMajorVersion = PebBase->OSMajorVersion;
  3804. PebBase32->OSMinorVersion = PebBase->OSMinorVersion;
  3805. PebBase32->OSBuildNumber = PebBase->OSBuildNumber;
  3806. PebBase32->OSPlatformId = PebBase->OSPlatformId;
  3807. PebBase32->OSCSDVersion = PebBase->OSCSDVersion;
  3808. PebBase32->ImageSubsystem = PebBase->ImageSubsystem;
  3809. PebBase32->ImageSubsystemMajorVersion = PebBase->ImageSubsystemMajorVersion;
  3810. PebBase32->ImageSubsystemMinorVersion = PebBase->ImageSubsystemMinorVersion;
  3811. PebBase32->SessionId = MmGetSessionId (TargetProcess);
  3812. DONTASSERT (PebBase32->pShimData == 0);
  3813. DONTASSERT (PebBase32->AppCompatFlags.QuadPart == 0);
  3814. //
  3815. // Leave the AffinityMask in the 32bit PEB as zero and let the
  3816. // 64bit NTDLL set the initial mask. This is to allow the
  3817. // round robin scheduling of non MP safe imaging in the
  3818. // caller to work correctly.
  3819. //
  3820. // Later code will set the affinity mask in the PEB32 if the
  3821. // image actually specifies one.
  3822. //
  3823. // Note that the AffinityMask in the PEB is simply a mechanism
  3824. // to pass affinity information from the image to the loader.
  3825. //
  3826. // Pass the affinity mask up to the 32 bit NTDLL via
  3827. // the PEB32. The 32 bit NTDLL will determine that the
  3828. // affinity is not zero and try to set the affinity
  3829. // mask from user-mode. This call will be intercepted
  3830. // by the wow64 thunks which will convert it
  3831. // into a 64bit affinity mask and call the kernel.
  3832. //
  3833. PebBase32->ImageProcessAffinityMask = ProcessAffinityMask;
  3834. DONTASSERT (PebBase32->ActivationContextData == 0);
  3835. DONTASSERT (PebBase32->SystemDefaultActivationContextData == 0);
  3836. } except (EXCEPTION_EXECUTE_HANDLER) {
  3837. Status = GetExceptionCode ();
  3838. }
  3839. return Status;
  3840. }
  3841. #endif
  3842. NTSTATUS
  3843. MmCreatePeb (
  3844. IN PEPROCESS TargetProcess,
  3845. IN PINITIAL_PEB InitialPeb,
  3846. OUT PPEB *Base
  3847. )
  3848. /*++
  3849. Routine Description:
  3850. This routine creates a PEB page within the target process
  3851. and copies the initial PEB values into it.
  3852. Arguments:
  3853. TargetProcess - Supplies a pointer to the process in which to create
  3854. and initialize the PEB.
  3855. InitialPeb - Supplies a pointer to the initial PEB to copy into the
  3856. newly created PEB.
  3857. Base - Supplies a location to return the base of the newly created
  3858. PEB on success.
  3859. Return Value:
  3860. NTSTATUS.
  3861. Environment:
  3862. Kernel mode.
  3863. --*/
  3864. {
  3865. PPEB PebBase;
  3866. USHORT Magic;
  3867. USHORT Characteristics;
  3868. NTSTATUS Status;
  3869. PVOID ViewBase;
  3870. LARGE_INTEGER SectionOffset;
  3871. PIMAGE_NT_HEADERS NtHeaders;
  3872. SIZE_T ViewSize;
  3873. ULONG ReturnedSize;
  3874. PIMAGE_LOAD_CONFIG_DIRECTORY ImageConfigData;
  3875. ULONG_PTR ProcessAffinityMask;
  3876. ViewBase = NULL;
  3877. SectionOffset.LowPart = 0;
  3878. SectionOffset.HighPart = 0;
  3879. ViewSize = 0;
  3880. //
  3881. // Attach to the specified process.
  3882. //
  3883. KeAttachProcess (&TargetProcess->Pcb);
  3884. //
  3885. // Map the NLS tables into the application's address space.
  3886. //
  3887. Status = MmMapViewOfSection (InitNlsSectionPointer,
  3888. TargetProcess,
  3889. &ViewBase,
  3890. 0L,
  3891. 0L,
  3892. &SectionOffset,
  3893. &ViewSize,
  3894. ViewShare,
  3895. MEM_TOP_DOWN | SEC_NO_CHANGE,
  3896. PAGE_READONLY);
  3897. if (!NT_SUCCESS(Status)) {
  3898. KeDetachProcess ();
  3899. return Status;
  3900. }
  3901. Status = MiCreatePebOrTeb (TargetProcess, sizeof(PEB), (PVOID)&PebBase);
  3902. if (!NT_SUCCESS(Status)) {
  3903. KeDetachProcess ();
  3904. return Status;
  3905. }
  3906. //
  3907. // Initialize the Peb. Every reference to the Peb
  3908. // must be wrapped in try-except in case the inpage fails. The inpage
  3909. // can fail for any reason including network failures, disk errors,
  3910. // low resources, etc.
  3911. //
  3912. try {
  3913. PebBase->InheritedAddressSpace = InitialPeb->InheritedAddressSpace;
  3914. PebBase->Mutant = InitialPeb->Mutant;
  3915. PebBase->ImageBaseAddress = TargetProcess->SectionBaseAddress;
  3916. PebBase->AnsiCodePageData = (PVOID)((PUCHAR)ViewBase+InitAnsiCodePageDataOffset);
  3917. PebBase->OemCodePageData = (PVOID)((PUCHAR)ViewBase+InitOemCodePageDataOffset);
  3918. PebBase->UnicodeCaseTableData = (PVOID)((PUCHAR)ViewBase+InitUnicodeCaseTableDataOffset);
  3919. PebBase->NumberOfProcessors = KeNumberProcessors;
  3920. PebBase->BeingDebugged = (BOOLEAN)(TargetProcess->DebugPort != NULL ? TRUE : FALSE);
  3921. PebBase->NtGlobalFlag = NtGlobalFlag;
  3922. PebBase->CriticalSectionTimeout = MmCriticalSectionTimeout;
  3923. PebBase->HeapSegmentReserve = MmHeapSegmentReserve;
  3924. PebBase->HeapSegmentCommit = MmHeapSegmentCommit;
  3925. PebBase->HeapDeCommitTotalFreeThreshold = MmHeapDeCommitTotalFreeThreshold;
  3926. PebBase->HeapDeCommitFreeBlockThreshold = MmHeapDeCommitFreeBlockThreshold;
  3927. DONTASSERT (PebBase->NumberOfHeaps == 0);
  3928. PebBase->MaximumNumberOfHeaps = (PAGE_SIZE - sizeof (PEB)) / sizeof( PVOID);
  3929. PebBase->ProcessHeaps = (PVOID *)(PebBase+1);
  3930. PebBase->OSMajorVersion = NtMajorVersion;
  3931. PebBase->OSMinorVersion = NtMinorVersion;
  3932. PebBase->OSBuildNumber = (USHORT)(NtBuildNumber & 0x3FFF);
  3933. PebBase->OSPlatformId = 2; // VER_PLATFORM_WIN32_NT from winbase.h
  3934. PebBase->OSCSDVersion = (USHORT)CmNtCSDVersion;
  3935. DONTASSERT (PebBase->pShimData == 0);
  3936. DONTASSERT (PebBase->AppCompatFlags.QuadPart == 0);
  3937. DONTASSERT (PebBase->ActivationContextData == NULL);
  3938. DONTASSERT (PebBase->SystemDefaultActivationContextData == NULL);
  3939. if (TargetProcess->Session != NULL) {
  3940. PebBase->SessionId = MmGetSessionId (TargetProcess);
  3941. }
  3942. PebBase->MinimumStackCommit = (SIZE_T)MmMinimumStackCommitInBytes;
  3943. } except (EXCEPTION_EXECUTE_HANDLER) {
  3944. KeDetachProcess();
  3945. return GetExceptionCode ();
  3946. }
  3947. //
  3948. // Every reference to NtHeaders (including the call to RtlImageNtHeader)
  3949. // must be wrapped in try-except in case the inpage fails. The inpage
  3950. // can fail for any reason including network failures, disk errors,
  3951. // low resources, etc.
  3952. //
  3953. try {
  3954. NtHeaders = RtlImageNtHeader (PebBase->ImageBaseAddress);
  3955. Magic = NtHeaders->OptionalHeader.Magic;
  3956. Characteristics = NtHeaders->FileHeader.Characteristics;
  3957. } except (EXCEPTION_EXECUTE_HANDLER) {
  3958. KeDetachProcess();
  3959. return STATUS_INVALID_IMAGE_PROTECT;
  3960. }
  3961. if (NtHeaders != NULL) {
  3962. ProcessAffinityMask = 0;
  3963. #if defined(_WIN64)
  3964. if (TargetProcess->Wow64Process) {
  3965. Status = MiInitializeWowPeb (NtHeaders, PebBase, TargetProcess);
  3966. if (!NT_SUCCESS(Status)) {
  3967. KeDetachProcess ();
  3968. return Status;
  3969. }
  3970. }
  3971. else // a PE32+ image
  3972. #endif
  3973. {
  3974. try {
  3975. ImageConfigData = RtlImageDirectoryEntryToData (
  3976. PebBase->ImageBaseAddress,
  3977. TRUE,
  3978. IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG,
  3979. &ReturnedSize);
  3980. ProbeForReadSmallStructure ((PVOID)ImageConfigData,
  3981. sizeof (*ImageConfigData),
  3982. PROBE_ALIGNMENT (IMAGE_LOAD_CONFIG_DIRECTORY));
  3983. MI_INIT_PEB_FROM_IMAGE(NtHeaders, ImageConfigData);
  3984. if (ImageConfigData != NULL && ImageConfigData->ProcessAffinityMask != 0) {
  3985. ProcessAffinityMask = ImageConfigData->ProcessAffinityMask;
  3986. }
  3987. } except (EXCEPTION_EXECUTE_HANDLER) {
  3988. KeDetachProcess();
  3989. return STATUS_INVALID_IMAGE_PROTECT;
  3990. }
  3991. }
  3992. //
  3993. // Note NT4 examined the NtHeaders->FileHeader.Characteristics
  3994. // for the IMAGE_FILE_AGGRESIVE_WS_TRIM bit, but this is not needed
  3995. // or used for NT5 and above.
  3996. //
  3997. //
  3998. // See if image wants to override the default processor affinity mask.
  3999. //
  4000. try {
  4001. if (Characteristics & IMAGE_FILE_UP_SYSTEM_ONLY) {
  4002. //
  4003. // Image is NOT MP safe. Assign it a processor on a rotating
  4004. // basis to spread these processes around on MP systems.
  4005. //
  4006. do {
  4007. PebBase->ImageProcessAffinityMask = (KAFFINITY)(0x1 << MmRotatingUniprocessorNumber);
  4008. if (++MmRotatingUniprocessorNumber >= KeNumberProcessors) {
  4009. MmRotatingUniprocessorNumber = 0;
  4010. }
  4011. } while ((PebBase->ImageProcessAffinityMask & KeActiveProcessors) == 0);
  4012. }
  4013. else {
  4014. if (ProcessAffinityMask != 0) {
  4015. //
  4016. // Pass the affinity mask from the image header
  4017. // to LdrpInitializeProcess via the PEB.
  4018. //
  4019. PebBase->ImageProcessAffinityMask = ProcessAffinityMask;
  4020. }
  4021. }
  4022. } except (EXCEPTION_EXECUTE_HANDLER) {
  4023. KeDetachProcess();
  4024. return STATUS_INVALID_IMAGE_PROTECT;
  4025. }
  4026. }
  4027. KeDetachProcess();
  4028. *Base = PebBase;
  4029. return STATUS_SUCCESS;
  4030. }
  4031. VOID
  4032. MmDeleteTeb (
  4033. IN PEPROCESS TargetProcess,
  4034. IN PVOID TebBase
  4035. )
  4036. /*++
  4037. Routine Description:
  4038. This routine deletes a TEB page within the target process.
  4039. Arguments:
  4040. TargetProcess - Supplies a pointer to the process in which to delete
  4041. the TEB.
  4042. TebBase - Supplies the base address of the TEB to delete.
  4043. Return Value:
  4044. None.
  4045. Environment:
  4046. Kernel mode.
  4047. --*/
  4048. {
  4049. PVOID EndingAddress;
  4050. PMMVAD_LONG Vad;
  4051. NTSTATUS Status;
  4052. PMMSECURE_ENTRY Secure;
  4053. PMMVAD PreviousVad;
  4054. PMMVAD NextVad;
  4055. EndingAddress = ((PCHAR)TebBase +
  4056. ROUND_TO_PAGES (sizeof(TEB)) - 1);
  4057. #if defined(_WIN64)
  4058. if (TargetProcess->Wow64Process) {
  4059. EndingAddress = ((PCHAR)EndingAddress + ROUND_TO_PAGES (sizeof(TEB32)));
  4060. }
  4061. #endif
  4062. //
  4063. // Attach to the specified process.
  4064. //
  4065. KeAttachProcess (&TargetProcess->Pcb);
  4066. //
  4067. // Get the address creation mutex to block multiple threads from
  4068. // creating or deleting address space at the same time and
  4069. // get the working set mutex so virtual address descriptors can
  4070. // be inserted and walked.
  4071. //
  4072. LOCK_ADDRESS_SPACE (TargetProcess);
  4073. Vad = (PMMVAD_LONG) MiLocateAddress (TebBase);
  4074. ASSERT (Vad != NULL);
  4075. ASSERT ((Vad->StartingVpn == MI_VA_TO_VPN (TebBase)) &&
  4076. (Vad->EndingVpn == MI_VA_TO_VPN (EndingAddress)));
  4077. #if defined(_MIALT4K_)
  4078. ASSERT (Vad->AliasInformation == NULL);
  4079. #endif
  4080. //
  4081. // If someone has secured the TEB (in addition to the standard securing
  4082. // that was done by memory management on creation, then don't delete it
  4083. // now - just leave it around until the entire process is deleted.
  4084. //
  4085. ASSERT (Vad->u.VadFlags.NoChange == 1);
  4086. if (Vad->u2.VadFlags2.OneSecured) {
  4087. Status = STATUS_SUCCESS;
  4088. }
  4089. else {
  4090. ASSERT (Vad->u2.VadFlags2.MultipleSecured);
  4091. ASSERT (IsListEmpty (&Vad->u3.List) == 0);
  4092. //
  4093. // If there's only one entry, then that's the one we defined when we
  4094. // initially created the TEB. So TEB deletion can take place right
  4095. // now. If there's more than one entry, let the TEB sit around until
  4096. // the process goes away.
  4097. //
  4098. Secure = CONTAINING_RECORD (Vad->u3.List.Flink,
  4099. MMSECURE_ENTRY,
  4100. List);
  4101. if (Secure->List.Flink == &Vad->u3.List) {
  4102. Status = STATUS_SUCCESS;
  4103. }
  4104. else {
  4105. Status = STATUS_NOT_FOUND;
  4106. }
  4107. }
  4108. if (NT_SUCCESS(Status)) {
  4109. PreviousVad = MiGetPreviousVad (Vad);
  4110. NextVad = MiGetNextVad (Vad);
  4111. LOCK_WS_UNSAFE (TargetProcess);
  4112. MiRemoveVad ((PMMVAD)Vad);
  4113. //
  4114. // Return commitment for page table pages and clear VAD bitmaps
  4115. // if possible.
  4116. //
  4117. MiReturnPageTablePageCommitment (TebBase,
  4118. EndingAddress,
  4119. TargetProcess,
  4120. PreviousVad,
  4121. NextVad);
  4122. MiDeleteFreeVm (TebBase, EndingAddress);
  4123. UNLOCK_WS_AND_ADDRESS_SPACE (TargetProcess);
  4124. ExFreePool (Vad);
  4125. }
  4126. else {
  4127. UNLOCK_ADDRESS_SPACE (TargetProcess);
  4128. }
  4129. KeDetachProcess();
  4130. }
  4131. VOID
  4132. MmAllowWorkingSetExpansion (
  4133. VOID
  4134. )
  4135. /*++
  4136. Routine Description:
  4137. This routine updates the working set list head FLINK field to
  4138. indicate that working set adjustment is allowed.
  4139. NOTE: This routine may be called more than once per process.
  4140. Arguments:
  4141. None.
  4142. Return Value:
  4143. None.
  4144. Environment:
  4145. Kernel mode.
  4146. --*/
  4147. {
  4148. PEPROCESS CurrentProcess;
  4149. KIRQL OldIrql;
  4150. //
  4151. // Check the current state of the working set adjustment flag
  4152. // in the process header.
  4153. //
  4154. CurrentProcess = PsGetCurrentProcess();
  4155. LOCK_EXPANSION (OldIrql);
  4156. if (!CurrentProcess->Vm.Flags.AllowWorkingSetAdjustment) {
  4157. CurrentProcess->Vm.Flags.AllowWorkingSetAdjustment = TRUE;
  4158. InsertTailList (&MmWorkingSetExpansionHead.ListHead,
  4159. &CurrentProcess->Vm.WorkingSetExpansionLinks);
  4160. }
  4161. UNLOCK_EXPANSION (OldIrql);
  4162. return;
  4163. }
  4164. #if DBG
  4165. ULONG MiDeleteLocked;
  4166. #endif
  4167. VOID
  4168. MiDeleteAddressesInWorkingSet (
  4169. IN PEPROCESS Process
  4170. )
  4171. /*++
  4172. Routine Description:
  4173. This routine deletes all user mode addresses from the working set
  4174. list.
  4175. Arguments:
  4176. Process = Pointer to the current process.
  4177. Return Value:
  4178. None.
  4179. Environment:
  4180. Kernel mode, Working Set Lock held.
  4181. --*/
  4182. {
  4183. PMMWSLE Wsle;
  4184. WSLE_NUMBER index;
  4185. WSLE_NUMBER Entry;
  4186. PVOID Va;
  4187. #if DBG
  4188. PVOID SwapVa;
  4189. PMMPTE PointerPte;
  4190. PMMPFN Pfn1;
  4191. PMMWSLE LastWsle;
  4192. #endif
  4193. //
  4194. // Go through the working set and for any user-accessible page which is
  4195. // in it, rip it out of the working set and free the page.
  4196. //
  4197. index = 2;
  4198. Wsle = &MmWsle[index];
  4199. MmWorkingSetList->HashTable = NULL;
  4200. //
  4201. // Go through the working set list and remove all pages for user
  4202. // space addresses.
  4203. //
  4204. while (index <= MmWorkingSetList->LastEntry) {
  4205. if (Wsle->u1.e1.Valid == 1) {
  4206. #if (_MI_PAGING_LEVELS >= 4)
  4207. ASSERT(MiGetPxeAddress(Wsle->u1.VirtualAddress)->u.Hard.Valid == 1);
  4208. #endif
  4209. #if (_MI_PAGING_LEVELS >= 3)
  4210. ASSERT(MiGetPpeAddress(Wsle->u1.VirtualAddress)->u.Hard.Valid == 1);
  4211. #endif
  4212. ASSERT(MiGetPdeAddress(Wsle->u1.VirtualAddress)->u.Hard.Valid == 1);
  4213. ASSERT(MiGetPteAddress(Wsle->u1.VirtualAddress)->u.Hard.Valid == 1);
  4214. if (Wsle->u1.VirtualAddress < (PVOID)MM_HIGHEST_USER_ADDRESS) {
  4215. //
  4216. // This is a user mode address, for each one we remove we must
  4217. // maintain the NonDirectCount. This is because we may fault
  4218. // later for page tables and need to grow the hash table when
  4219. // updating the working set. NonDirectCount needs to be correct
  4220. // at that point.
  4221. //
  4222. if (Wsle->u1.e1.Direct == 0) {
  4223. Process->Vm.VmWorkingSetList->NonDirectCount -= 1;
  4224. }
  4225. //
  4226. // This entry is in the working set list.
  4227. //
  4228. Va = Wsle->u1.VirtualAddress;
  4229. MiReleaseWsle (index, &Process->Vm);
  4230. MiDeleteValidAddress (Va, Process);
  4231. if (index < MmWorkingSetList->FirstDynamic) {
  4232. //
  4233. // This entry is locked.
  4234. //
  4235. MmWorkingSetList->FirstDynamic -= 1;
  4236. if (index != MmWorkingSetList->FirstDynamic) {
  4237. Entry = MmWorkingSetList->FirstDynamic;
  4238. #if DBG
  4239. MiDeleteLocked += 1;
  4240. SwapVa = MmWsle[MmWorkingSetList->FirstDynamic].u1.VirtualAddress;
  4241. SwapVa = PAGE_ALIGN (SwapVa);
  4242. PointerPte = MiGetPteAddress (SwapVa);
  4243. Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
  4244. ASSERT (Entry == MiLocateWsle (SwapVa, MmWorkingSetList, Pfn1->u1.WsIndex));
  4245. #endif
  4246. MiSwapWslEntries (Entry, index, &Process->Vm);
  4247. }
  4248. }
  4249. }
  4250. }
  4251. index += 1;
  4252. Wsle += 1;
  4253. }
  4254. #if DBG
  4255. Wsle = &MmWsle[2];
  4256. LastWsle = &MmWsle[MmWorkingSetList->LastInitializedWsle];
  4257. while (Wsle <= LastWsle) {
  4258. if (Wsle->u1.e1.Valid == 1) {
  4259. #if (_MI_PAGING_LEVELS >= 4)
  4260. ASSERT(MiGetPxeAddress(Wsle->u1.VirtualAddress)->u.Hard.Valid == 1);
  4261. #endif
  4262. #if (_MI_PAGING_LEVELS >= 3)
  4263. ASSERT(MiGetPpeAddress(Wsle->u1.VirtualAddress)->u.Hard.Valid == 1);
  4264. #endif
  4265. ASSERT(MiGetPdeAddress(Wsle->u1.VirtualAddress)->u.Hard.Valid == 1);
  4266. ASSERT(MiGetPteAddress(Wsle->u1.VirtualAddress)->u.Hard.Valid == 1);
  4267. }
  4268. Wsle += 1;
  4269. }
  4270. #endif
  4271. }
  4272. VOID
  4273. MiDeleteValidAddress (
  4274. IN PVOID Va,
  4275. IN PEPROCESS CurrentProcess
  4276. )
  4277. /*++
  4278. Routine Description:
  4279. This routine deletes the specified virtual address.
  4280. Arguments:
  4281. Va - Supplies the virtual address to delete.
  4282. CurrentProcess - Supplies the current process.
  4283. Return Value:
  4284. None.
  4285. Environment:
  4286. Kernel mode. PFN LOCK HELD.
  4287. Note since this is only called during process teardown, the write watch
  4288. bits are not updated. If this ever called from other places, code
  4289. will need to be added here to update those bits.
  4290. --*/
  4291. {
  4292. PMMPTE PointerPde;
  4293. PMMPTE PointerPte;
  4294. PMMPFN Pfn1;
  4295. PMMPFN Pfn2;
  4296. KIRQL OldIrql;
  4297. PMMCLONE_BLOCK CloneBlock;
  4298. PMMCLONE_DESCRIPTOR CloneDescriptor;
  4299. PFN_NUMBER PageFrameIndex;
  4300. PFN_NUMBER PageTableFrameIndex;
  4301. CloneDescriptor = NULL;
  4302. //
  4303. // Initializing CloneBlock is not needed for correctness
  4304. // but without it the compiler cannot compile this code
  4305. // W4 to check for use of uninitialized variables.
  4306. //
  4307. CloneBlock = NULL;
  4308. PointerPte = MiGetPteAddress (Va);
  4309. LOCK_PFN (OldIrql);
  4310. #if (_MI_PAGING_LEVELS >= 4)
  4311. ASSERT(MiGetPxeAddress(Va)->u.Hard.Valid == 1);
  4312. #endif
  4313. #if (_MI_PAGING_LEVELS >= 3)
  4314. ASSERT(MiGetPpeAddress(Va)->u.Hard.Valid == 1);
  4315. #endif
  4316. ASSERT(MiGetPdeAddress(Va)->u.Hard.Valid == 1);
  4317. ASSERT (PointerPte->u.Hard.Valid == 1);
  4318. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  4319. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  4320. if (Pfn1->u3.e1.PrototypePte == 1) {
  4321. CloneBlock = (PMMCLONE_BLOCK)Pfn1->PteAddress;
  4322. //
  4323. // Capture the state of the modified bit for this PTE.
  4324. //
  4325. MI_CAPTURE_DIRTY_BIT_TO_PFN (PointerPte, Pfn1);
  4326. //
  4327. // Decrement the share and valid counts of the page table
  4328. // page which maps this PTE.
  4329. //
  4330. PointerPde = MiGetPteAddress (PointerPte);
  4331. PageTableFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPde);
  4332. Pfn2 = MI_PFN_ELEMENT (PageTableFrameIndex);
  4333. MiDecrementShareCountInline (Pfn2, PageTableFrameIndex);
  4334. //
  4335. // Decrement the share count for the physical page.
  4336. //
  4337. MiDecrementShareCount (PageFrameIndex);
  4338. //
  4339. // Check to see if this is a fork prototype PTE and if so
  4340. // update the clone descriptor address.
  4341. //
  4342. if (Va <= MM_HIGHEST_USER_ADDRESS) {
  4343. //
  4344. // Locate the clone descriptor within the clone tree.
  4345. //
  4346. CloneDescriptor = MiLocateCloneAddress (CurrentProcess, (PVOID)CloneBlock);
  4347. }
  4348. }
  4349. else {
  4350. //
  4351. // This PTE is a NOT a prototype PTE, delete the physical page.
  4352. //
  4353. //
  4354. // Decrement the share and valid counts of the page table
  4355. // page which maps this PTE.
  4356. //
  4357. PageTableFrameIndex = Pfn1->u4.PteFrame;
  4358. Pfn2 = MI_PFN_ELEMENT (PageTableFrameIndex);
  4359. MiDecrementShareCountInline (Pfn2, PageTableFrameIndex);
  4360. MI_SET_PFN_DELETED (Pfn1);
  4361. //
  4362. // Decrement the share count for the physical page. As the page
  4363. // is private it will be put on the free list.
  4364. //
  4365. MiDecrementShareCountOnly (PageFrameIndex);
  4366. //
  4367. // Decrement the count for the number of private pages.
  4368. //
  4369. CurrentProcess->NumberOfPrivatePages -= 1;
  4370. }
  4371. //
  4372. // Set the pointer to PTE to be a demand zero PTE. This allows
  4373. // the page usage count to be kept properly and handles the case
  4374. // when a page table page has only valid PTEs and needs to be
  4375. // deleted later when the VADs are removed.
  4376. //
  4377. PointerPte->u.Long = MM_DEMAND_ZERO_WRITE_PTE;
  4378. if (CloneDescriptor != NULL) {
  4379. //
  4380. // Decrement the reference count for the clone block,
  4381. // note that this could release and reacquire
  4382. // the mutexes hence cannot be done until after the
  4383. // working set index has been removed.
  4384. //
  4385. if (MiDecrementCloneBlockReference (CloneDescriptor,
  4386. CloneBlock,
  4387. CurrentProcess)) {
  4388. }
  4389. }
  4390. UNLOCK_PFN (OldIrql);
  4391. }
  4392. PFN_NUMBER
  4393. MiMakeOutswappedPageResident (
  4394. IN PMMPTE ActualPteAddress,
  4395. IN OUT PMMPTE PointerTempPte,
  4396. IN ULONG Global,
  4397. IN PFN_NUMBER ContainingPage
  4398. )
  4399. /*++
  4400. Routine Description:
  4401. This routine makes the specified PTE valid.
  4402. Arguments:
  4403. ActualPteAddress - Supplies the actual address that the PTE will
  4404. reside at. This is used for page coloring.
  4405. PointerTempPte - Supplies the PTE to operate on, returns a valid
  4406. PTE.
  4407. Global - Supplies 1 if the resulting PTE is global.
  4408. ContainingPage - Supplies the physical page number of the page which
  4409. contains the resulting PTE. If this value is 0, no
  4410. operations on the containing page are performed.
  4411. Return Value:
  4412. Returns the physical page number that was allocated for the PTE.
  4413. Environment:
  4414. Kernel mode, PFN LOCK HELD!
  4415. --*/
  4416. {
  4417. MMPTE TempPte;
  4418. KIRQL OldIrql;
  4419. PFN_NUMBER PageFrameIndex;
  4420. PMMPFN Pfn1;
  4421. PFN_NUMBER MdlHack[(sizeof(MDL)/sizeof(PFN_NUMBER)) + 1];
  4422. PMDL Mdl;
  4423. LARGE_INTEGER StartingOffset;
  4424. KEVENT Event;
  4425. IO_STATUS_BLOCK IoStatus;
  4426. PFN_NUMBER PageFileNumber;
  4427. NTSTATUS Status;
  4428. PPFN_NUMBER Page;
  4429. ULONG RefaultCount;
  4430. #if DBG
  4431. PVOID HyperVa;
  4432. PEPROCESS CurrentProcess;
  4433. #endif
  4434. MM_PFN_LOCK_ASSERT();
  4435. #if defined (_IA64_)
  4436. UNREFERENCED_PARAMETER (Global);
  4437. #endif
  4438. restart:
  4439. OldIrql = APC_LEVEL;
  4440. ASSERT (PointerTempPte->u.Hard.Valid == 0);
  4441. if (PointerTempPte->u.Long == MM_KERNEL_DEMAND_ZERO_PTE) {
  4442. //
  4443. // Any page will do.
  4444. //
  4445. MiEnsureAvailablePageOrWait (NULL, NULL);
  4446. PageFrameIndex = MiRemoveAnyPage (
  4447. MI_GET_PAGE_COLOR_FROM_PTE (ActualPteAddress));
  4448. MI_MAKE_VALID_PTE (TempPte,
  4449. PageFrameIndex,
  4450. MM_READWRITE,
  4451. ActualPteAddress);
  4452. MI_SET_PTE_DIRTY (TempPte);
  4453. MI_SET_GLOBAL_STATE (TempPte, Global);
  4454. MI_WRITE_VALID_PTE (PointerTempPte, TempPte);
  4455. MiInitializePfnForOtherProcess (PageFrameIndex,
  4456. ActualPteAddress,
  4457. ContainingPage);
  4458. }
  4459. else if (PointerTempPte->u.Soft.Transition == 1) {
  4460. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (PointerTempPte);
  4461. PointerTempPte->u.Trans.Protection = MM_READWRITE;
  4462. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  4463. ASSERT (Pfn1->u3.e1.CacheAttribute == MiCached);
  4464. if ((MmAvailablePages == 0) ||
  4465. ((Pfn1->u4.InPageError == 1) && (Pfn1->u3.e1.ReadInProgress == 1))) {
  4466. //
  4467. // This can only happen if the system is utilizing a hardware
  4468. // compression cache. This ensures that only a safe amount
  4469. // of the compressed virtual cache is directly mapped so that
  4470. // if the hardware gets into trouble, we can bail it out.
  4471. //
  4472. UNLOCK_PFN (OldIrql);
  4473. KeDelayExecutionThread (KernelMode,
  4474. FALSE,
  4475. (PLARGE_INTEGER)&MmHalfSecond);
  4476. LOCK_PFN (OldIrql);
  4477. goto restart;
  4478. }
  4479. //
  4480. // PTE refers to a transition PTE.
  4481. //
  4482. if (Pfn1->u3.e1.PageLocation != ActiveAndValid) {
  4483. MiUnlinkPageFromList (Pfn1);
  4484. //
  4485. // Even though this routine is only used to bring in special
  4486. // system pages that are separately charged, a modified write
  4487. // may be in progress and if so, will have applied a systemwide
  4488. // charge against the locked pages count. This all works out nicely
  4489. // (with no code needed here) as the write completion will see
  4490. // the nonzero ShareCount and remove the charge.
  4491. //
  4492. ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) ||
  4493. (Pfn1->u3.e1.LockCharged == 1));
  4494. Pfn1->u3.e2.ReferenceCount += 1;
  4495. Pfn1->u3.e1.PageLocation = ActiveAndValid;
  4496. }
  4497. //
  4498. // Update the PFN database, the share count is now 1 and
  4499. // the reference count is incremented as the share count
  4500. // just went from zero to 1.
  4501. //
  4502. Pfn1->u2.ShareCount += 1;
  4503. MI_SET_MODIFIED (Pfn1, 1, 0x12);
  4504. if (Pfn1->u3.e1.WriteInProgress == 0) {
  4505. //
  4506. // Release the page file space for this page.
  4507. //
  4508. MiReleasePageFileSpace (Pfn1->OriginalPte);
  4509. Pfn1->OriginalPte.u.Long = MM_KERNEL_DEMAND_ZERO_PTE;
  4510. }
  4511. MI_MAKE_TRANSITION_PTE_VALID (TempPte, PointerTempPte);
  4512. MI_SET_PTE_DIRTY (TempPte);
  4513. MI_SET_GLOBAL_STATE (TempPte, Global);
  4514. MI_WRITE_VALID_PTE (PointerTempPte, TempPte);
  4515. }
  4516. else {
  4517. //
  4518. // Page resides in a paging file.
  4519. // Any page will do.
  4520. //
  4521. MiEnsureAvailablePageOrWait (NULL, NULL);
  4522. PageFrameIndex = MiRemoveAnyPage (
  4523. MI_GET_PAGE_COLOR_FROM_PTE (ActualPteAddress));
  4524. //
  4525. // Initialize the PFN database element, but don't
  4526. // set read in progress as collided page faults cannot
  4527. // occur here.
  4528. //
  4529. MiInitializePfnForOtherProcess (PageFrameIndex,
  4530. ActualPteAddress,
  4531. ContainingPage);
  4532. UNLOCK_PFN (OldIrql);
  4533. PointerTempPte->u.Soft.Protection = MM_READWRITE;
  4534. KeInitializeEvent (&Event, NotificationEvent, FALSE);
  4535. //
  4536. // Calculate the VPN for the in-page operation.
  4537. //
  4538. TempPte = *PointerTempPte;
  4539. PageFileNumber = GET_PAGING_FILE_NUMBER (TempPte);
  4540. StartingOffset.QuadPart = (LONGLONG)(GET_PAGING_FILE_OFFSET (TempPte)) <<
  4541. PAGE_SHIFT;
  4542. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  4543. //
  4544. // Build MDL for request.
  4545. //
  4546. Mdl = (PMDL)&MdlHack[0];
  4547. MmInitializeMdl (Mdl,
  4548. MiGetVirtualAddressMappedByPte (ActualPteAddress),
  4549. PAGE_SIZE);
  4550. Mdl->MdlFlags |= MDL_PAGES_LOCKED;
  4551. Page = (PPFN_NUMBER)(Mdl + 1);
  4552. *Page = PageFrameIndex;
  4553. #if DBG
  4554. CurrentProcess = PsGetCurrentProcess ();
  4555. HyperVa = MiMapPageInHyperSpace (CurrentProcess, PageFrameIndex, &OldIrql);
  4556. RtlFillMemoryUlong (HyperVa,
  4557. PAGE_SIZE,
  4558. 0x34785690);
  4559. MiUnmapPageInHyperSpace (CurrentProcess, HyperVa, OldIrql);
  4560. #endif
  4561. //
  4562. // Issue the read request.
  4563. //
  4564. RefaultCount = 0;
  4565. Refault:
  4566. Status = IoPageRead (MmPagingFile[PageFileNumber]->File,
  4567. Mdl,
  4568. &StartingOffset,
  4569. &Event,
  4570. &IoStatus);
  4571. if (Status == STATUS_PENDING) {
  4572. KeWaitForSingleObject (&Event,
  4573. WrPageIn,
  4574. KernelMode,
  4575. FALSE,
  4576. (PLARGE_INTEGER)NULL);
  4577. Status = IoStatus.Status;
  4578. }
  4579. if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
  4580. MmUnmapLockedPages (Mdl->MappedSystemVa, Mdl);
  4581. }
  4582. if (NT_SUCCESS(Status)) {
  4583. if (IoStatus.Information != PAGE_SIZE) {
  4584. KeBugCheckEx (KERNEL_STACK_INPAGE_ERROR,
  4585. 2,
  4586. IoStatus.Status,
  4587. PageFileNumber,
  4588. StartingOffset.LowPart);
  4589. }
  4590. }
  4591. if ((!NT_SUCCESS(Status)) || (!NT_SUCCESS(IoStatus.Status))) {
  4592. if ((MmIsRetryIoStatus (Status)) ||
  4593. (MmIsRetryIoStatus (IoStatus.Status))) {
  4594. RefaultCount -= 1;
  4595. if (RefaultCount & MiFaultRetryMask) {
  4596. //
  4597. // Insufficient resources, delay and reissue
  4598. // the in page operation.
  4599. //
  4600. KeDelayExecutionThread (KernelMode,
  4601. FALSE,
  4602. (PLARGE_INTEGER)&MmHalfSecond);
  4603. KeClearEvent (&Event);
  4604. RefaultCount -= 1;
  4605. goto Refault;
  4606. }
  4607. }
  4608. KeBugCheckEx (KERNEL_STACK_INPAGE_ERROR,
  4609. Status,
  4610. IoStatus.Status,
  4611. PageFileNumber,
  4612. StartingOffset.LowPart);
  4613. }
  4614. LOCK_PFN (OldIrql);
  4615. //
  4616. // Release the page file space.
  4617. //
  4618. MiReleasePageFileSpace (TempPte);
  4619. Pfn1->OriginalPte.u.Long = MM_KERNEL_DEMAND_ZERO_PTE;
  4620. ASSERT (Pfn1->u3.e1.CacheAttribute == MiCached);
  4621. MI_MAKE_VALID_PTE (TempPte,
  4622. PageFrameIndex,
  4623. MM_READWRITE,
  4624. ActualPteAddress);
  4625. MI_SET_PTE_DIRTY (TempPte);
  4626. MI_SET_MODIFIED (Pfn1, 1, 0x13);
  4627. MI_SET_GLOBAL_STATE (TempPte, Global);
  4628. MI_WRITE_VALID_PTE (PointerTempPte, TempPte);
  4629. }
  4630. return PageFrameIndex;
  4631. }
  4632. UCHAR
  4633. MiSetMemoryPriorityProcess (
  4634. IN PEPROCESS Process,
  4635. IN UCHAR MemoryPriority
  4636. )
  4637. /*++
  4638. Routine Description:
  4639. Nonpaged wrapper to set the memory priority of a process.
  4640. Arguments:
  4641. Process - Supplies the process to update.
  4642. MemoryPriority - Supplies the new memory priority of the process.
  4643. Return Value:
  4644. Old priority.
  4645. --*/
  4646. {
  4647. KIRQL OldIrql;
  4648. UCHAR OldPriority;
  4649. LOCK_EXPANSION (OldIrql);
  4650. OldPriority = (UCHAR) Process->Vm.Flags.MemoryPriority;
  4651. Process->Vm.Flags.MemoryPriority = MemoryPriority;
  4652. UNLOCK_EXPANSION (OldIrql);
  4653. return OldPriority;
  4654. }
  4655. VOID
  4656. MmSetMemoryPriorityProcess (
  4657. IN PEPROCESS Process,
  4658. IN UCHAR MemoryPriority
  4659. )
  4660. /*++
  4661. Routine Description:
  4662. Sets the memory priority of a process.
  4663. Arguments:
  4664. Process - Supplies the process to update
  4665. MemoryPriority - Supplies the new memory priority of the process
  4666. Return Value:
  4667. None.
  4668. --*/
  4669. {
  4670. if (MmSystemSize == MmSmallSystem && MmNumberOfPhysicalPages < ((15*1024*1024)/PAGE_SIZE)) {
  4671. //
  4672. // If this is a small system, make every process BACKGROUND.
  4673. //
  4674. MemoryPriority = MEMORY_PRIORITY_BACKGROUND;
  4675. }
  4676. MiSetMemoryPriorityProcess (Process, MemoryPriority);
  4677. return;
  4678. }
  4679. PMMVAD
  4680. MiAllocateVad (
  4681. IN ULONG_PTR StartingVirtualAddress,
  4682. IN ULONG_PTR EndingVirtualAddress,
  4683. IN LOGICAL Deletable
  4684. )
  4685. /*++
  4686. Routine Description:
  4687. Reserve the specified range of address space.
  4688. Arguments:
  4689. StartingVirtualAddress - Supplies the starting virtual address.
  4690. EndingVirtualAddress - Supplies the ending virtual address.
  4691. Deletable - Supplies TRUE if the VAD is to be marked as deletable, FALSE
  4692. if deletions of this VAD should be disallowed.
  4693. Return Value:
  4694. A VAD pointer on success, NULL on failure.
  4695. --*/
  4696. {
  4697. PMMVAD_LONG Vad;
  4698. ASSERT (StartingVirtualAddress <= EndingVirtualAddress);
  4699. if (Deletable == TRUE) {
  4700. Vad = (PMMVAD_LONG)ExAllocatePoolWithTag (NonPagedPool, sizeof(MMVAD_SHORT), 'SdaV');
  4701. }
  4702. else {
  4703. Vad = (PMMVAD_LONG)ExAllocatePoolWithTag (NonPagedPool, sizeof(MMVAD_LONG), 'ldaV');
  4704. }
  4705. if (Vad == NULL) {
  4706. return NULL;
  4707. }
  4708. //
  4709. // Set the starting and ending virtual page numbers of the VAD.
  4710. //
  4711. Vad->StartingVpn = MI_VA_TO_VPN (StartingVirtualAddress);
  4712. Vad->EndingVpn = MI_VA_TO_VPN (EndingVirtualAddress);
  4713. //
  4714. // Mark VAD as no commitment, private, and readonly.
  4715. //
  4716. Vad->u.LongFlags = 0;
  4717. Vad->u.VadFlags.CommitCharge = MM_MAX_COMMIT;
  4718. Vad->u.VadFlags.Protection = MM_READONLY;
  4719. Vad->u.VadFlags.PrivateMemory = 1;
  4720. if (Deletable == TRUE) {
  4721. ASSERT (Vad->u.VadFlags.NoChange == 0);
  4722. }
  4723. else {
  4724. Vad->u.VadFlags.NoChange = 1;
  4725. Vad->u2.LongFlags2 = 0;
  4726. Vad->u2.VadFlags2.OneSecured = 1;
  4727. Vad->u2.VadFlags2.LongVad = 1;
  4728. Vad->u2.VadFlags2.ReadOnly = 1;
  4729. Vad->u3.Secured.StartVpn = StartingVirtualAddress;
  4730. Vad->u3.Secured.EndVpn = EndingVirtualAddress;
  4731. #if defined(_MIALT4K_)
  4732. Vad->AliasInformation = NULL;
  4733. #endif
  4734. }
  4735. return (PMMVAD) Vad;
  4736. }
  4737. #if 0
  4738. VOID
  4739. MiVerifyReferenceCounts (
  4740. IN ULONG PdePage
  4741. )
  4742. //
  4743. // Verify the share and valid PTE counts for page directory page.
  4744. //
  4745. {
  4746. PMMPFN Pfn1;
  4747. PMMPFN Pfn3;
  4748. PMMPTE Pte1;
  4749. ULONG Share = 0;
  4750. ULONG Valid = 0;
  4751. ULONG i, ix, iy;
  4752. PMMPTE PageDirectoryMap;
  4753. KIRQL OldIrql;
  4754. PEPROCESS Process;
  4755. Process = PsGetCurrentProcess ();
  4756. PageDirectoryMap = (PMMPTE)MiMapPageInHyperSpace (Process, PdePage, &OldIrql);
  4757. Pfn1 = MI_PFN_ELEMENT (PdePage);
  4758. Pte1 = (PMMPTE)PageDirectoryMap;
  4759. //
  4760. // Map in the non paged portion of the system.
  4761. //
  4762. ix = MiGetPdeOffset(CODE_START);
  4763. for (i = 0;i < ix; i += 1) {
  4764. if (Pte1->u.Hard.Valid == 1) {
  4765. Valid += 1;
  4766. }
  4767. else if ((Pte1->u.Soft.Prototype == 0) &&
  4768. (Pte1->u.Soft.Transition == 1)) {
  4769. Pfn3 = MI_PFN_ELEMENT (Pte1->u.Trans.PageFrameNumber);
  4770. if (Pfn3->u3.e1.PageLocation == ActiveAndValid) {
  4771. ASSERT (Pfn1->u2.ShareCount > 1);
  4772. Valid += 1;
  4773. }
  4774. else {
  4775. Share += 1;
  4776. }
  4777. }
  4778. Pte1 += 1;
  4779. }
  4780. iy = MiGetPdeOffset(PTE_BASE);
  4781. Pte1 = &PageDirectoryMap[iy];
  4782. ix = MiGetPdeOffset(HYPER_SPACE_END) + 1;
  4783. for (i = iy; i < ix; i += 1) {
  4784. if (Pte1->u.Hard.Valid == 1) {
  4785. Valid += 1;
  4786. }
  4787. else if ((Pte1->u.Soft.Prototype == 0) &&
  4788. (Pte1->u.Soft.Transition == 1)) {
  4789. Pfn3 = MI_PFN_ELEMENT (Pte1->u.Trans.PageFrameNumber);
  4790. if (Pfn3->u3.e1.PageLocation == ActiveAndValid) {
  4791. ASSERT (Pfn1->u2.ShareCount > 1);
  4792. Valid += 1;
  4793. }
  4794. else {
  4795. Share += 1;
  4796. }
  4797. }
  4798. Pte1 += 1;
  4799. }
  4800. if (Pfn1->u2.ShareCount != (Share+Valid+1)) {
  4801. DbgPrint ("MMPROCSUP - PDE page %lx ShareCount %lx found %lx\n",
  4802. PdePage, Pfn1->u2.ShareCount, Valid+Share+1);
  4803. }
  4804. MiUnmapPageInHyperSpace (Process, PageDirectoryMap, OldIrql);
  4805. ASSERT (Pfn1->u2.ShareCount == (Share+Valid+1));
  4806. return;
  4807. }
  4808. #endif //0
  4809. PFN_NUMBER
  4810. MmGetDirectoryFrameFromProcess(
  4811. IN PEPROCESS Process
  4812. )
  4813. /*++
  4814. Routine Description:
  4815. This routine retrieves the PFN of the process's top pagetable page. It can
  4816. be used to map physical pages back to a process.
  4817. Arguments:
  4818. Process - Supplies the process to query.
  4819. Return Value:
  4820. Page frame number of the top level page table page.
  4821. Environment:
  4822. Kernel mode. No locks held.
  4823. --*/
  4824. {
  4825. ASSERT (KeGetCurrentIrql () == PASSIVE_LEVEL);
  4826. return MI_GET_DIRECTORY_FRAME_FROM_PROCESS(Process);
  4827. }