Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

880 lines
21 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. crashdmp.c
  5. Abstract:
  6. This module contains routines which provide support for writing out
  7. a crashdump on system failure.
  8. Author:
  9. Landy Wang (landyw) 04-Oct-2000
  10. Revision History:
  11. --*/
  12. #include "mi.h"
  13. LOGICAL
  14. MiIsAddressRangeValid (
  15. IN PVOID VirtualAddress,
  16. IN SIZE_T Length
  17. )
  18. {
  19. PUCHAR Va;
  20. PUCHAR EndVa;
  21. ULONG Pages;
  22. Va = PAGE_ALIGN (VirtualAddress);
  23. Pages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (VirtualAddress, Length);
  24. EndVa = Va + (Pages << PAGE_SHIFT);
  25. while (Va < EndVa) {
  26. if (!MmIsAddressValid (Va)) {
  27. return FALSE;
  28. }
  29. Va += PAGE_SIZE;
  30. }
  31. return TRUE;
  32. }
  33. VOID
  34. MiRemoveFreePoolMemoryFromDump (
  35. IN PMM_KERNEL_DUMP_CONTEXT Context
  36. )
  37. /*++
  38. Routine Description:
  39. Removes all memory from the nonpaged pool free page lists to reduce the size
  40. of a kernel memory dump.
  41. Because the entries in these structures are destroyed by errant drivers
  42. that modify pool after freeing it, the entries are carefully
  43. validated prior to any dereferences.
  44. Arguments:
  45. Context - Supplies the dump context pointer that must be passed to
  46. IoFreeDumpRange.
  47. Return Value:
  48. None.
  49. Environment:
  50. Kernel-mode, post-bugcheck.
  51. For use by crashdump routines ONLY.
  52. --*/
  53. {
  54. PLIST_ENTRY Entry;
  55. PLIST_ENTRY List;
  56. PLIST_ENTRY ListEnd;
  57. PMMFREE_POOL_ENTRY PoolEntry;
  58. ULONG LargePageMapped;
  59. List = &MmNonPagedPoolFreeListHead[0];
  60. ListEnd = List + MI_MAX_FREE_LIST_HEADS;
  61. for ( ; List < ListEnd; List += 1) {
  62. for (Entry = List->Flink; Entry != List; Entry = Entry->Flink) {
  63. PoolEntry = CONTAINING_RECORD (Entry,
  64. MMFREE_POOL_ENTRY,
  65. List);
  66. //
  67. // Check for corrupted values.
  68. //
  69. if (BYTE_OFFSET(PoolEntry) != 0) {
  70. break;
  71. }
  72. //
  73. // Check that the entry has not been corrupted.
  74. //
  75. if (MiIsAddressRangeValid (PoolEntry, sizeof (MMFREE_POOL_ENTRY)) == FALSE) {
  76. break;
  77. }
  78. if (PoolEntry->Size == 0) {
  79. break;
  80. }
  81. //
  82. // Signature is only maintained in checked builds.
  83. //
  84. ASSERT (PoolEntry->Signature == MM_FREE_POOL_SIGNATURE);
  85. //
  86. // Verify that the element's flinks and blinks are valid.
  87. //
  88. if ((!MiIsAddressRangeValid (Entry->Flink, sizeof (LIST_ENTRY))) ||
  89. (!MiIsAddressRangeValid (Entry->Blink, sizeof (LIST_ENTRY))) ||
  90. (Entry->Blink->Flink != Entry) ||
  91. (Entry->Flink->Blink != Entry)) {
  92. break;
  93. }
  94. //
  95. // The list entry is valid, remove it from the dump.
  96. //
  97. if (MI_IS_PHYSICAL_ADDRESS (PoolEntry)) {
  98. LargePageMapped = 1;
  99. }
  100. else {
  101. LargePageMapped = 0;
  102. }
  103. Context->FreeDumpRange (Context,
  104. PoolEntry,
  105. PoolEntry->Size,
  106. LargePageMapped);
  107. }
  108. }
  109. }
  110. LOGICAL
  111. MiIsPhysicalMemoryAddress (
  112. IN PFN_NUMBER PageFrameIndex,
  113. IN OUT PULONG Hint,
  114. IN LOGICAL PfnLockNeeded
  115. )
  116. /*++
  117. Routine Description:
  118. Check if a given address is backed by RAM or IO space.
  119. Arguments:
  120. PageFrameIndex - Supplies a page frame number to check.
  121. Hint - Supplies a hint at which memory run we should start
  122. searching for this pfn. The hint is updated on success
  123. and failure.
  124. PfnLockNeeded - Supplies TRUE if the caller needs this routine to
  125. acquire the PFN lock. FALSE if not (ie: the caller
  126. already holds the PFN lock or we are crashing the system
  127. and so the PFN lock may already be held by someone else).
  128. Return Value:
  129. TRUE - If the address is backed by RAM.
  130. FALSE - If the address is IO mapped memory.
  131. Environment:
  132. Kernel-mode, post-bugcheck.
  133. For use by crash dump and other Mm internal routines.
  134. --*/
  135. {
  136. ULONG Index;
  137. KIRQL OldIrql;
  138. PPHYSICAL_MEMORY_RUN Run;
  139. PPHYSICAL_MEMORY_DESCRIPTOR PhysicalMemoryBlock;
  140. //
  141. // Initializing OldIrql is not needed for correctness, but without it
  142. // the compiler cannot compile this code W4 to check for use of
  143. // uninitialized variables.
  144. //
  145. OldIrql = PASSIVE_LEVEL;
  146. if (PfnLockNeeded) {
  147. LOCK_PFN2 (OldIrql);
  148. }
  149. PhysicalMemoryBlock = MmPhysicalMemoryBlock;
  150. if (PageFrameIndex > MmHighestPhysicalPage) {
  151. if (PfnLockNeeded) {
  152. UNLOCK_PFN2 (OldIrql);
  153. }
  154. return FALSE;
  155. }
  156. if (*Hint < PhysicalMemoryBlock->NumberOfRuns) {
  157. Run = &PhysicalMemoryBlock->Run[*Hint];
  158. if ((PageFrameIndex >= Run->BasePage) &&
  159. (PageFrameIndex < Run->BasePage + Run->PageCount)) {
  160. if (PfnLockNeeded) {
  161. UNLOCK_PFN2 (OldIrql);
  162. }
  163. return TRUE;
  164. }
  165. }
  166. for (Index = 0; Index < PhysicalMemoryBlock->NumberOfRuns; Index += 1) {
  167. Run = &PhysicalMemoryBlock->Run[Index];
  168. if ((PageFrameIndex >= Run->BasePage) &&
  169. (PageFrameIndex < Run->BasePage + Run->PageCount)) {
  170. *Hint = Index;
  171. if (PfnLockNeeded) {
  172. UNLOCK_PFN2 (OldIrql);
  173. }
  174. return TRUE;
  175. }
  176. //
  177. // Since the physical memory block is ordered by increasing
  178. // base page PFN number, if this PFN is smaller, then bail.
  179. //
  180. if (Run->BasePage + Run->PageCount > PageFrameIndex) {
  181. *Hint = Index;
  182. break;
  183. }
  184. }
  185. if (PfnLockNeeded) {
  186. UNLOCK_PFN2 (OldIrql);
  187. }
  188. return FALSE;
  189. }
  190. VOID
  191. MiAddPagesWithNoMappings (
  192. IN PMM_KERNEL_DUMP_CONTEXT Context
  193. )
  194. /*++
  195. Routine Description:
  196. Add pages to a kernel memory crashdump that do not have a
  197. virtual mapping in this process context.
  198. This includes entries that are wired directly into the TB.
  199. Arguments:
  200. Context - Crashdump context pointer.
  201. Return Value:
  202. None.
  203. Environment:
  204. Kernel-mode, post-bugcheck.
  205. For use by crash dump routines ONLY.
  206. --*/
  207. {
  208. #if defined (_X86_)
  209. ULONG LargePageMapped;
  210. PVOID Va;
  211. PHYSICAL_ADDRESS DirBase;
  212. //
  213. // Add the current page directory table page - don't use the directory
  214. // table base for the crashing process as we have switched cr3 on
  215. // stack overflow crashes, etc.
  216. //
  217. _asm {
  218. mov eax, cr3
  219. mov DirBase.LowPart, eax
  220. }
  221. //
  222. // cr3 is always located below 4gb physical.
  223. //
  224. DirBase.HighPart = 0;
  225. Va = MmGetVirtualForPhysical (DirBase);
  226. if (MI_IS_PHYSICAL_ADDRESS (Va)) {
  227. LargePageMapped = 1;
  228. }
  229. else {
  230. LargePageMapped = 0;
  231. }
  232. Context->SetDumpRange (Context,
  233. Va,
  234. 1,
  235. LargePageMapped);
  236. #elif defined(_AMD64_)
  237. ULONG LargePageMapped;
  238. PVOID Va;
  239. PHYSICAL_ADDRESS DirBase;
  240. //
  241. // Add the current page directory table page - don't use the directory
  242. // table base for the crashing process as we have switched cr3 on
  243. // stack overflow crashes, etc.
  244. //
  245. DirBase.QuadPart = ReadCR3 ();
  246. Va = MmGetVirtualForPhysical (DirBase);
  247. if (MI_IS_PHYSICAL_ADDRESS (Va)) {
  248. LargePageMapped = 1;
  249. }
  250. else {
  251. LargePageMapped = 0;
  252. }
  253. Context->SetDumpRange (Context,
  254. Va,
  255. 1,
  256. LargePageMapped);
  257. #elif defined(_IA64_)
  258. if (MiKseg0Mapping == TRUE) {
  259. Context->SetDumpRange (
  260. Context,
  261. MiKseg0Start,
  262. (((ULONG_PTR)MiKseg0End - (ULONG_PTR)MiKseg0Start) >> PAGE_SHIFT) + 1,
  263. 1);
  264. }
  265. #endif
  266. }
  267. LOGICAL
  268. MiAddRangeToCrashDump (
  269. IN PMM_KERNEL_DUMP_CONTEXT Context,
  270. IN PVOID Va,
  271. IN SIZE_T NumberOfBytes
  272. )
  273. /*++
  274. Routine Description:
  275. Adds the specified range of memory to the crashdump.
  276. Arguments:
  277. Context - Supplies the crashdump context pointer.
  278. Va - Supplies the starting virtual address.
  279. NumberOfBytes - Supplies the number of bytes to dump. Note that for IA64,
  280. this must not cause the range to cross a region boundary.
  281. Return Value:
  282. TRUE if all valid pages were added to the crashdump, FALSE otherwise.
  283. Environment:
  284. Kernel mode, post-bugcheck.
  285. For use by crash dump routines ONLY.
  286. --*/
  287. {
  288. LOGICAL Status;
  289. LOGICAL AddThisPage;
  290. ULONG Hint;
  291. PVOID EndingAddress;
  292. PMMPTE PointerPte;
  293. PMMPTE PointerPde;
  294. PMMPTE PointerPpe;
  295. PMMPTE PointerPxe;
  296. PFN_NUMBER PageFrameIndex;
  297. #if defined (_X86_) || defined (_AMD64_)
  298. PFN_NUMBER NumberOfPages;
  299. #endif
  300. Hint = 0;
  301. Status = TRUE;
  302. EndingAddress = (PVOID)((ULONG_PTR)Va + NumberOfBytes - 1);
  303. #if defined(_IA64_)
  304. //
  305. // IA64 has a separate page directory parent for each region and
  306. // unimplemented address bits are ignored by the processor (as
  307. // long as they are canonical), but we must watch for them
  308. // here so the incrementing PPE walk doesn't go off the end.
  309. // This is done by truncating any given region request so it does
  310. // not go past the end of the specified region. Note this
  311. // automatically will include the page maps which are sign extended
  312. // because the PPEs would just wrap anyway.
  313. //
  314. if (((ULONG_PTR)EndingAddress & ~VRN_MASK) >= MM_VA_MAPPED_BY_PPE * PDE_PER_PAGE) {
  315. EndingAddress = (PVOID)(((ULONG_PTR)EndingAddress & VRN_MASK) |
  316. ((MM_VA_MAPPED_BY_PPE * PDE_PER_PAGE) - 1));
  317. }
  318. #endif
  319. Va = PAGE_ALIGN (Va);
  320. PointerPxe = MiGetPxeAddress (Va);
  321. PointerPpe = MiGetPpeAddress (Va);
  322. PointerPde = MiGetPdeAddress (Va);
  323. PointerPte = MiGetPteAddress (Va);
  324. do {
  325. #if (_MI_PAGING_LEVELS >= 3)
  326. restart:
  327. #endif
  328. KdCheckForDebugBreak ();
  329. #if (_MI_PAGING_LEVELS >= 4)
  330. while (PointerPxe->u.Hard.Valid == 0) {
  331. //
  332. // This extended page directory parent entry is empty,
  333. // go to the next one.
  334. //
  335. PointerPxe += 1;
  336. PointerPpe = MiGetVirtualAddressMappedByPte (PointerPxe);
  337. PointerPde = MiGetVirtualAddressMappedByPte (PointerPpe);
  338. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  339. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  340. if ((Va > EndingAddress) || (Va == NULL)) {
  341. //
  342. // All done, return.
  343. //
  344. return Status;
  345. }
  346. }
  347. #endif
  348. ASSERT (MiGetPpeAddress(Va) == PointerPpe);
  349. #if (_MI_PAGING_LEVELS >= 3)
  350. while (PointerPpe->u.Hard.Valid == 0) {
  351. //
  352. // This page directory parent entry is empty, go to the next one.
  353. //
  354. PointerPpe += 1;
  355. PointerPde = MiGetVirtualAddressMappedByPte (PointerPpe);
  356. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  357. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  358. if ((Va > EndingAddress) || (Va == NULL)) {
  359. //
  360. // All done, return.
  361. //
  362. return Status;
  363. }
  364. #if (_MI_PAGING_LEVELS >= 4)
  365. if (MiIsPteOnPdeBoundary (PointerPpe)) {
  366. PointerPxe += 1;
  367. ASSERT (PointerPxe == MiGetPteAddress (PointerPpe));
  368. goto restart;
  369. }
  370. #endif
  371. }
  372. #endif
  373. while (PointerPde->u.Hard.Valid == 0) {
  374. //
  375. // This page directory entry is empty, go to the next one.
  376. //
  377. PointerPde += 1;
  378. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  379. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  380. if ((Va > EndingAddress) || (Va == NULL)) {
  381. //
  382. // All done, return.
  383. //
  384. return Status;
  385. }
  386. #if (_MI_PAGING_LEVELS >= 3)
  387. if (MiIsPteOnPdeBoundary (PointerPde)) {
  388. PointerPpe += 1;
  389. ASSERT (PointerPpe == MiGetPteAddress (PointerPde));
  390. PointerPxe = MiGetPteAddress (PointerPpe);
  391. goto restart;
  392. }
  393. #endif
  394. }
  395. //
  396. // A valid PDE has been located, examine each PTE.
  397. //
  398. ASSERT64 (PointerPpe->u.Hard.Valid == 1);
  399. ASSERT (PointerPde->u.Hard.Valid == 1);
  400. ASSERT (Va <= EndingAddress);
  401. #if defined (_X86_) || defined (_AMD64_)
  402. if (PointerPde->u.Hard.LargePage == 1) {
  403. //
  404. // Large pages are always backed by RAM, not mapped to
  405. // I/O space, so always add them to the dump.
  406. //
  407. NumberOfPages = (((ULONG_PTR)MiGetVirtualAddressMappedByPde (PointerPde + 1) - (ULONG_PTR)Va) / PAGE_SIZE);
  408. Status = Context->SetDumpRange (Context,
  409. Va,
  410. NumberOfPages,
  411. 1);
  412. if (!NT_SUCCESS (Status)) {
  413. #if DBG
  414. DbgPrint ("Adding large VA %p to crashdump failed\n", Va);
  415. DbgBreakPoint ();
  416. #endif
  417. Status = FALSE;
  418. }
  419. PointerPde += 1;
  420. Va = MiGetVirtualAddressMappedByPde (PointerPde);
  421. if ((Va > EndingAddress) || (Va == NULL)) {
  422. return Status;
  423. }
  424. PointerPte = MiGetPteAddress (Va);
  425. PointerPpe = MiGetPpeAddress (Va);
  426. PointerPxe = MiGetPxeAddress (Va);
  427. //
  428. // March on to the next page directory.
  429. //
  430. continue;
  431. }
  432. #endif
  433. //
  434. // Exclude memory that is mapped in the system cache.
  435. // Note the system cache starts and ends on page directory boundaries
  436. // and is never mapped with large pages.
  437. //
  438. if (MI_IS_SYSTEM_CACHE_ADDRESS (Va)) {
  439. PointerPde += 1;
  440. Va = MiGetVirtualAddressMappedByPde (PointerPde);
  441. if ((Va > EndingAddress) || (Va == NULL)) {
  442. return Status;
  443. }
  444. PointerPte = MiGetPteAddress (Va);
  445. PointerPpe = MiGetPpeAddress (Va);
  446. PointerPxe = MiGetPxeAddress (Va);
  447. //
  448. // March on to the next page directory.
  449. //
  450. continue;
  451. }
  452. do {
  453. AddThisPage = FALSE;
  454. PageFrameIndex = 0;
  455. if (PointerPte->u.Hard.Valid == 1) {
  456. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  457. AddThisPage = TRUE;
  458. }
  459. else if ((PointerPte->u.Soft.Prototype == 0) &&
  460. (PointerPte->u.Soft.Transition == 1)) {
  461. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (PointerPte);
  462. AddThisPage = TRUE;
  463. }
  464. if (AddThisPage == TRUE) {
  465. //
  466. // Include only addresses that are backed by RAM, not mapped to
  467. // I/O space.
  468. //
  469. if (MiIsPhysicalMemoryAddress (PageFrameIndex, &Hint, FALSE)) {
  470. //
  471. // Add this page to the dump.
  472. //
  473. Status = Context->SetDumpRange (Context,
  474. (PVOID) PageFrameIndex,
  475. 1,
  476. 2);
  477. if (!NT_SUCCESS (Status)) {
  478. #if DBG
  479. DbgPrint ("Adding VA %p to crashdump failed\n", Va);
  480. DbgBreakPoint ();
  481. #endif
  482. Status = FALSE;
  483. }
  484. }
  485. }
  486. Va = (PVOID)((ULONG_PTR)Va + PAGE_SIZE);
  487. PointerPte += 1;
  488. ASSERT64 (PointerPpe->u.Hard.Valid == 1);
  489. ASSERT (PointerPde->u.Hard.Valid == 1);
  490. if ((Va > EndingAddress) || (Va == NULL)) {
  491. return Status;
  492. }
  493. //
  494. // If not at the end of a page table and still within the specified
  495. // range, just march directly on to the next PTE.
  496. //
  497. // Otherwise, if the virtual address is on a page directory boundary
  498. // then attempt to leap forward skipping over empty mappings
  499. // where possible.
  500. //
  501. } while (!MiIsVirtualAddressOnPdeBoundary(Va));
  502. ASSERT (PointerPte == MiGetPteAddress (Va));
  503. PointerPde = MiGetPdeAddress (Va);
  504. PointerPpe = MiGetPpeAddress (Va);
  505. PointerPxe = MiGetPxeAddress (Va);
  506. } while (TRUE);
  507. // NEVER REACHED
  508. }
  509. VOID
  510. MiAddActivePageDirectories (
  511. IN PMM_KERNEL_DUMP_CONTEXT Context
  512. )
  513. {
  514. UCHAR i;
  515. PKPRCB Prcb;
  516. PKPROCESS Process;
  517. PFN_NUMBER PageFrameIndex;
  518. #if defined (_X86PAE_)
  519. PMMPTE PointerPte;
  520. ULONG j;
  521. #endif
  522. for (i = 0; i < KeNumberProcessors; i += 1) {
  523. Prcb = KiProcessorBlock[i];
  524. Process = Prcb->CurrentThread->ApcState.Process;
  525. #if defined (_X86PAE_)
  526. //
  527. // Note that on PAE systems, the idle and system process have
  528. // NULL initialized PaeTop fields. Thus this field must be
  529. // explicitly checked for before being referenced here.
  530. //
  531. //
  532. // Add the 4 top level page directory pages to the dump.
  533. //
  534. PointerPte = (PMMPTE) ((PEPROCESS)Process)->PaeTop;
  535. if (PointerPte == NULL) {
  536. PointerPte = &MiSystemPaeVa.PteEntry[0];
  537. }
  538. for (j = 0; j < PD_PER_SYSTEM; j += 1) {
  539. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte);
  540. PointerPte += 1;
  541. Context->SetDumpRange (Context, (PVOID) PageFrameIndex, 1, 2);
  542. }
  543. //
  544. // Add the real cr3 page to the dump, note that the value stored in the
  545. // directory table base is really a physical address (not a frame).
  546. //
  547. PageFrameIndex = Process->DirectoryTableBase[0];
  548. PageFrameIndex = (PageFrameIndex >> PAGE_SHIFT);
  549. #else
  550. PageFrameIndex =
  551. MI_GET_DIRECTORY_FRAME_FROM_PROCESS ((PEPROCESS)(Process));
  552. #endif
  553. //
  554. // Add this physical page to the dump.
  555. //
  556. Context->SetDumpRange (Context, (PVOID) PageFrameIndex, 1, 2);
  557. }
  558. #if defined(_IA64_)
  559. //
  560. // The first processor's PCR is mapped in region 4 which is not (and cannot)
  561. // be scanned later, so explicitly add it to the dump here.
  562. //
  563. Prcb = KiProcessorBlock[0];
  564. Context->SetDumpRange (Context, (PVOID) Prcb->PcrPage, 1, 2);
  565. #endif
  566. }
  567. VOID
  568. MmGetKernelDumpRange (
  569. IN PMM_KERNEL_DUMP_CONTEXT Context
  570. )
  571. /*++
  572. Routine Description:
  573. Add (and subtract) ranges of system memory to the crashdump.
  574. Arguments:
  575. Context - Crashdump context pointer.
  576. Return Value:
  577. None.
  578. Environment:
  579. Kernel mode, post-bugcheck.
  580. For use by crash dump routines ONLY.
  581. --*/
  582. {
  583. PVOID Va;
  584. SIZE_T NumberOfBytes;
  585. ASSERT ((Context != NULL) &&
  586. (Context->SetDumpRange != NULL) &&
  587. (Context->FreeDumpRange != NULL));
  588. MiAddActivePageDirectories (Context);
  589. #if defined(_IA64_)
  590. //
  591. // Note each IA64 region must be passed separately to MiAddRange...
  592. //
  593. Va = (PVOID) ALT4KB_PERMISSION_TABLE_START;
  594. NumberOfBytes = PDE_UTBASE + PAGE_SIZE - (ULONG_PTR) Va;
  595. MiAddRangeToCrashDump (Context, Va, NumberOfBytes);
  596. Va = (PVOID) MM_SESSION_SPACE_DEFAULT;
  597. NumberOfBytes = PDE_STBASE + PAGE_SIZE - (ULONG_PTR) Va;
  598. MiAddRangeToCrashDump (Context, Va, NumberOfBytes);
  599. Va = (PVOID) KADDRESS_BASE;
  600. NumberOfBytes = PDE_KTBASE + PAGE_SIZE - (ULONG_PTR) Va;
  601. MiAddRangeToCrashDump (Context, Va, NumberOfBytes);
  602. #elif defined(_AMD64_)
  603. Va = (PVOID) MM_SYSTEM_RANGE_START;
  604. NumberOfBytes = MM_KSEG0_BASE - (ULONG_PTR) Va;
  605. MiAddRangeToCrashDump (Context, Va, NumberOfBytes);
  606. Va = (PVOID) MM_KSEG2_BASE;
  607. NumberOfBytes = MM_SYSTEM_SPACE_START - (ULONG_PTR) Va;
  608. MiAddRangeToCrashDump (Context, Va, NumberOfBytes);
  609. Va = (PVOID) MM_PAGED_POOL_START;
  610. NumberOfBytes = MM_SYSTEM_SPACE_END - (ULONG_PTR) Va + 1;
  611. MiAddRangeToCrashDump (Context, Va, NumberOfBytes);
  612. #else
  613. Va = MmSystemRangeStart;
  614. NumberOfBytes = MM_SYSTEM_SPACE_END - (ULONG_PTR) Va + 1;
  615. MiAddRangeToCrashDump (Context, Va, NumberOfBytes);
  616. #endif
  617. //
  618. // Add any memory that is a part of the kernel space, but does not
  619. // have a virtual mapping (hence was not collected above).
  620. //
  621. MiAddPagesWithNoMappings (Context);
  622. //
  623. // Remove nonpaged pool that is not in use.
  624. //
  625. MiRemoveFreePoolMemoryFromDump (Context);
  626. }