Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

758 lines
19 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. crashdmp.c
  5. Abstract:
  6. This module contains routines which provide support for writing out
  7. a crashdump on system failure.
  8. Author:
  9. Landy Wang (landyw) 04-Oct-2000
  10. Revision History:
  11. --*/
  12. #include "mi.h"
  13. LOGICAL
  14. MiIsAddressRangeValid (
  15. IN PVOID VirtualAddress,
  16. IN SIZE_T Length
  17. )
  18. {
  19. PUCHAR Va;
  20. PUCHAR EndVa;
  21. ULONG Pages;
  22. Va = PAGE_ALIGN (VirtualAddress);
  23. Pages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (VirtualAddress, Length);
  24. EndVa = Va + (Pages << PAGE_SHIFT);
  25. while (Va < EndVa) {
  26. if (!MiIsAddressValid (Va, TRUE)) {
  27. return FALSE;
  28. }
  29. Va += PAGE_SIZE;
  30. }
  31. return TRUE;
  32. }
  33. VOID
  34. MiRemoveFreePoolMemoryFromDump (
  35. IN PMM_KERNEL_DUMP_CONTEXT Context
  36. )
  37. /*++
  38. Routine Description:
  39. Removes all memory from the nonpaged pool free page lists to reduce the size
  40. of a kernel memory dump.
  41. Because the entries in these structures are destroyed by errant drivers
  42. that modify pool after freeing it, the entries are carefully
  43. validated prior to any dereferences.
  44. Arguments:
  45. Context - Supplies the dump context pointer that must be passed to
  46. IoFreeDumpRange.
  47. Return Value:
  48. None.
  49. Environment:
  50. Kernel-mode, post-bugcheck.
  51. For use by crashdump routines ONLY.
  52. --*/
  53. {
  54. PLIST_ENTRY Entry;
  55. PLIST_ENTRY List;
  56. PLIST_ENTRY ListEnd;
  57. PMMFREE_POOL_ENTRY PoolEntry;
  58. ULONG LargePageMapped;
  59. List = &MmNonPagedPoolFreeListHead[0];
  60. ListEnd = List + MI_MAX_FREE_LIST_HEADS;
  61. for ( ; List < ListEnd; List += 1) {
  62. for (Entry = List->Flink; Entry != List; Entry = Entry->Flink) {
  63. PoolEntry = CONTAINING_RECORD (Entry,
  64. MMFREE_POOL_ENTRY,
  65. List);
  66. //
  67. // Check for corrupted values.
  68. //
  69. if (BYTE_OFFSET(PoolEntry) != 0) {
  70. break;
  71. }
  72. //
  73. // Check that the entry has not been corrupted.
  74. //
  75. if (MiIsAddressRangeValid (PoolEntry, sizeof (MMFREE_POOL_ENTRY)) == FALSE) {
  76. break;
  77. }
  78. if (PoolEntry->Size == 0) {
  79. break;
  80. }
  81. //
  82. // Signature is only maintained in checked builds.
  83. //
  84. ASSERT (PoolEntry->Signature == MM_FREE_POOL_SIGNATURE);
  85. //
  86. // Verify that the element's flinks and blinks are valid.
  87. //
  88. if ((!MiIsAddressRangeValid (Entry->Flink, sizeof (LIST_ENTRY))) ||
  89. (!MiIsAddressRangeValid (Entry->Blink, sizeof (LIST_ENTRY))) ||
  90. (Entry->Blink->Flink != Entry) ||
  91. (Entry->Flink->Blink != Entry)) {
  92. break;
  93. }
  94. //
  95. // The list entry is valid, remove it from the dump.
  96. //
  97. if (MI_IS_PHYSICAL_ADDRESS (PoolEntry)) {
  98. LargePageMapped = 1;
  99. }
  100. else {
  101. LargePageMapped = 0;
  102. }
  103. Context->FreeDumpRange (Context,
  104. PoolEntry,
  105. PoolEntry->Size,
  106. LargePageMapped);
  107. }
  108. }
  109. }
  110. VOID
  111. MiAddPagesWithNoMappings (
  112. IN PMM_KERNEL_DUMP_CONTEXT Context
  113. )
  114. /*++
  115. Routine Description:
  116. Add pages to a kernel memory crashdump that do not have a
  117. virtual mapping in this process context.
  118. This includes entries that are wired directly into the TB.
  119. Arguments:
  120. Context - Crashdump context pointer.
  121. Return Value:
  122. None.
  123. Environment:
  124. Kernel-mode, post-bugcheck.
  125. For use by crash dump routines ONLY.
  126. --*/
  127. {
  128. #if defined (_X86_)
  129. ULONG LargePageMapped;
  130. PVOID Va;
  131. PHYSICAL_ADDRESS DirBase;
  132. //
  133. // Add the current page directory table page - don't use the directory
  134. // table base for the crashing process as we have switched cr3 on
  135. // stack overflow crashes, etc.
  136. //
  137. _asm {
  138. mov eax, cr3
  139. mov DirBase.LowPart, eax
  140. }
  141. //
  142. // cr3 is always located below 4gb physical.
  143. //
  144. DirBase.HighPart = 0;
  145. Va = MmGetVirtualForPhysical (DirBase);
  146. if (MI_IS_PHYSICAL_ADDRESS (Va)) {
  147. LargePageMapped = 1;
  148. }
  149. else {
  150. LargePageMapped = 0;
  151. }
  152. Context->SetDumpRange (Context,
  153. Va,
  154. 1,
  155. LargePageMapped);
  156. #elif defined(_AMD64_)
  157. ULONG LargePageMapped;
  158. PVOID Va;
  159. PHYSICAL_ADDRESS DirBase;
  160. //
  161. // Add the current page directory table page - don't use the directory
  162. // table base for the crashing process as we have switched cr3 on
  163. // stack overflow crashes, etc.
  164. //
  165. DirBase.QuadPart = ReadCR3 ();
  166. Va = MmGetVirtualForPhysical (DirBase);
  167. if (MI_IS_PHYSICAL_ADDRESS (Va)) {
  168. LargePageMapped = 1;
  169. }
  170. else {
  171. LargePageMapped = 0;
  172. }
  173. Context->SetDumpRange (Context,
  174. Va,
  175. 1,
  176. LargePageMapped);
  177. #elif defined(_IA64_)
  178. if (MiKseg0Mapping == TRUE) {
  179. Context->SetDumpRange (
  180. Context,
  181. MiKseg0Start,
  182. (((ULONG_PTR)MiKseg0End - (ULONG_PTR)MiKseg0Start) >> PAGE_SHIFT) + 1,
  183. 1);
  184. }
  185. #endif
  186. }
  187. LOGICAL
  188. MiAddRangeToCrashDump (
  189. IN PMM_KERNEL_DUMP_CONTEXT Context,
  190. IN PVOID Va,
  191. IN SIZE_T NumberOfBytes
  192. )
  193. /*++
  194. Routine Description:
  195. Adds the specified range of memory to the crashdump.
  196. Arguments:
  197. Context - Supplies the crashdump context pointer.
  198. Va - Supplies the starting virtual address.
  199. NumberOfBytes - Supplies the number of bytes to dump. Note that for IA64,
  200. this must not cause the range to cross a region boundary.
  201. Return Value:
  202. TRUE if all valid pages were added to the crashdump, FALSE otherwise.
  203. Environment:
  204. Kernel mode, post-bugcheck.
  205. For use by crash dump routines ONLY.
  206. --*/
  207. {
  208. LOGICAL Status;
  209. LOGICAL AddThisPage;
  210. ULONG Hint;
  211. PVOID EndingAddress;
  212. PMMPTE PointerPte;
  213. PMMPTE PointerPde;
  214. PMMPTE PointerPpe;
  215. PMMPTE PointerPxe;
  216. PFN_NUMBER PageFrameIndex;
  217. PFN_NUMBER NumberOfPages;
  218. Hint = 0;
  219. Status = TRUE;
  220. EndingAddress = (PVOID)((ULONG_PTR)Va + NumberOfBytes - 1);
  221. #if defined(_IA64_)
  222. //
  223. // IA64 has a separate page directory parent for each region and
  224. // unimplemented address bits are ignored by the processor (as
  225. // long as they are canonical), but we must watch for them
  226. // here so the incrementing PPE walk doesn't go off the end.
  227. // This is done by truncating any given region request so it does
  228. // not go past the end of the specified region. Note this
  229. // automatically will include the page maps which are sign extended
  230. // because the PPEs would just wrap anyway.
  231. //
  232. if (((ULONG_PTR)EndingAddress & ~VRN_MASK) >= MM_VA_MAPPED_BY_PPE * PDE_PER_PAGE) {
  233. EndingAddress = (PVOID)(((ULONG_PTR)EndingAddress & VRN_MASK) |
  234. ((MM_VA_MAPPED_BY_PPE * PDE_PER_PAGE) - 1));
  235. }
  236. #endif
  237. Va = PAGE_ALIGN (Va);
  238. PointerPxe = MiGetPxeAddress (Va);
  239. PointerPpe = MiGetPpeAddress (Va);
  240. PointerPde = MiGetPdeAddress (Va);
  241. PointerPte = MiGetPteAddress (Va);
  242. do {
  243. #if (_MI_PAGING_LEVELS >= 3)
  244. restart:
  245. #endif
  246. KdCheckForDebugBreak ();
  247. #if (_MI_PAGING_LEVELS >= 4)
  248. while (PointerPxe->u.Hard.Valid == 0) {
  249. //
  250. // This extended page directory parent entry is empty,
  251. // go to the next one.
  252. //
  253. PointerPxe += 1;
  254. PointerPpe = MiGetVirtualAddressMappedByPte (PointerPxe);
  255. PointerPde = MiGetVirtualAddressMappedByPte (PointerPpe);
  256. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  257. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  258. if ((Va > EndingAddress) || (Va == NULL)) {
  259. //
  260. // All done, return.
  261. //
  262. return Status;
  263. }
  264. }
  265. #endif
  266. ASSERT (MiGetPpeAddress(Va) == PointerPpe);
  267. #if (_MI_PAGING_LEVELS >= 3)
  268. while (PointerPpe->u.Hard.Valid == 0) {
  269. //
  270. // This page directory parent entry is empty, go to the next one.
  271. //
  272. PointerPpe += 1;
  273. PointerPde = MiGetVirtualAddressMappedByPte (PointerPpe);
  274. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  275. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  276. if ((Va > EndingAddress) || (Va == NULL)) {
  277. //
  278. // All done, return.
  279. //
  280. return Status;
  281. }
  282. #if (_MI_PAGING_LEVELS >= 4)
  283. if (MiIsPteOnPdeBoundary (PointerPpe)) {
  284. PointerPxe += 1;
  285. ASSERT (PointerPxe == MiGetPteAddress (PointerPpe));
  286. goto restart;
  287. }
  288. #endif
  289. }
  290. #endif
  291. while (PointerPde->u.Hard.Valid == 0) {
  292. //
  293. // This page directory entry is empty, go to the next one.
  294. //
  295. PointerPde += 1;
  296. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  297. Va = MiGetVirtualAddressMappedByPte (PointerPte);
  298. if ((Va > EndingAddress) || (Va == NULL)) {
  299. //
  300. // All done, return.
  301. //
  302. return Status;
  303. }
  304. #if (_MI_PAGING_LEVELS >= 3)
  305. if (MiIsPteOnPdeBoundary (PointerPde)) {
  306. PointerPpe += 1;
  307. ASSERT (PointerPpe == MiGetPteAddress (PointerPde));
  308. PointerPxe = MiGetPteAddress (PointerPpe);
  309. goto restart;
  310. }
  311. #endif
  312. }
  313. //
  314. // A valid PDE has been located, examine each PTE.
  315. //
  316. ASSERT64 (PointerPpe->u.Hard.Valid == 1);
  317. ASSERT (PointerPde->u.Hard.Valid == 1);
  318. ASSERT (Va <= EndingAddress);
  319. if (MI_PDE_MAPS_LARGE_PAGE (PointerPde)) {
  320. //
  321. // This is a large page mapping - if the first page is backed
  322. // by RAM, then they all must be, so add the entire range
  323. // to the dump.
  324. //
  325. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPde);
  326. if (MI_IS_PFN (PageFrameIndex)) {
  327. NumberOfPages = (((ULONG_PTR)MiGetVirtualAddressMappedByPde (PointerPde + 1) - (ULONG_PTR)Va) / PAGE_SIZE);
  328. Status = Context->SetDumpRange (Context,
  329. Va,
  330. NumberOfPages,
  331. 1);
  332. if (!NT_SUCCESS (Status)) {
  333. #if DBG
  334. DbgPrint ("Adding large VA %p to crashdump failed\n", Va);
  335. DbgBreakPoint ();
  336. #endif
  337. Status = FALSE;
  338. }
  339. }
  340. PointerPde += 1;
  341. Va = MiGetVirtualAddressMappedByPde (PointerPde);
  342. if ((Va > EndingAddress) || (Va == NULL)) {
  343. return Status;
  344. }
  345. PointerPte = MiGetPteAddress (Va);
  346. PointerPpe = MiGetPpeAddress (Va);
  347. PointerPxe = MiGetPxeAddress (Va);
  348. //
  349. // March on to the next page directory.
  350. //
  351. continue;
  352. }
  353. //
  354. // Exclude memory that is mapped in the system cache.
  355. // Note the system cache starts and ends on page directory boundaries
  356. // and is never mapped with large pages.
  357. //
  358. if (MI_IS_SYSTEM_CACHE_ADDRESS (Va)) {
  359. PointerPde += 1;
  360. Va = MiGetVirtualAddressMappedByPde (PointerPde);
  361. if ((Va > EndingAddress) || (Va == NULL)) {
  362. return Status;
  363. }
  364. PointerPte = MiGetPteAddress (Va);
  365. PointerPpe = MiGetPpeAddress (Va);
  366. PointerPxe = MiGetPxeAddress (Va);
  367. //
  368. // March on to the next page directory.
  369. //
  370. continue;
  371. }
  372. do {
  373. AddThisPage = FALSE;
  374. PageFrameIndex = 0;
  375. if (PointerPte->u.Hard.Valid == 1) {
  376. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  377. AddThisPage = TRUE;
  378. }
  379. else if ((PointerPte->u.Soft.Prototype == 0) &&
  380. (PointerPte->u.Soft.Transition == 1)) {
  381. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (PointerPte);
  382. AddThisPage = TRUE;
  383. }
  384. if (AddThisPage == TRUE) {
  385. //
  386. // Include only addresses that are backed by RAM, not mapped to
  387. // I/O space.
  388. //
  389. AddThisPage = MI_IS_PFN (PageFrameIndex);
  390. if (AddThisPage == TRUE) {
  391. //
  392. // Add this page to the dump.
  393. //
  394. Status = Context->SetDumpRange (Context,
  395. (PVOID) PageFrameIndex,
  396. 1,
  397. 2);
  398. if (!NT_SUCCESS (Status)) {
  399. #if DBG
  400. DbgPrint ("Adding VA %p to crashdump failed\n", Va);
  401. DbgBreakPoint ();
  402. #endif
  403. Status = FALSE;
  404. }
  405. }
  406. }
  407. Va = (PVOID)((ULONG_PTR)Va + PAGE_SIZE);
  408. PointerPte += 1;
  409. ASSERT64 (PointerPpe->u.Hard.Valid == 1);
  410. ASSERT (PointerPde->u.Hard.Valid == 1);
  411. if ((Va > EndingAddress) || (Va == NULL)) {
  412. return Status;
  413. }
  414. //
  415. // If not at the end of a page table and still within the specified
  416. // range, just march directly on to the next PTE.
  417. //
  418. // Otherwise, if the virtual address is on a page directory boundary
  419. // then attempt to leap forward skipping over empty mappings
  420. // where possible.
  421. //
  422. } while (!MiIsVirtualAddressOnPdeBoundary(Va));
  423. ASSERT (PointerPte == MiGetPteAddress (Va));
  424. PointerPde = MiGetPdeAddress (Va);
  425. PointerPpe = MiGetPpeAddress (Va);
  426. PointerPxe = MiGetPxeAddress (Va);
  427. } while (TRUE);
  428. // NEVER REACHED
  429. }
  430. VOID
  431. MiAddActivePageDirectories (
  432. IN PMM_KERNEL_DUMP_CONTEXT Context
  433. )
  434. {
  435. UCHAR i;
  436. PKPRCB Prcb;
  437. PKPROCESS Process;
  438. PFN_NUMBER PageFrameIndex;
  439. #if defined (_X86PAE_)
  440. PMMPTE PointerPte;
  441. ULONG j;
  442. #endif
  443. for (i = 0; i < KeNumberProcessors; i += 1) {
  444. Prcb = KiProcessorBlock[i];
  445. Process = Prcb->CurrentThread->ApcState.Process;
  446. #if defined (_X86PAE_)
  447. //
  448. // Add the 4 top level page directory pages to the dump.
  449. //
  450. PointerPte = (PMMPTE) ((PEPROCESS)Process)->PaeTop;
  451. for (j = 0; j < PD_PER_SYSTEM; j += 1) {
  452. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte);
  453. PointerPte += 1;
  454. Context->SetDumpRange (Context, (PVOID) PageFrameIndex, 1, 2);
  455. }
  456. //
  457. // Add the real cr3 page to the dump, note that the value stored in the
  458. // directory table base is really a physical address (not a frame).
  459. //
  460. PageFrameIndex = Process->DirectoryTableBase[0];
  461. PageFrameIndex = (PageFrameIndex >> PAGE_SHIFT);
  462. #else
  463. PageFrameIndex =
  464. MI_GET_DIRECTORY_FRAME_FROM_PROCESS ((PEPROCESS)(Process));
  465. #endif
  466. //
  467. // Add this physical page to the dump.
  468. //
  469. Context->SetDumpRange (Context, (PVOID) PageFrameIndex, 1, 2);
  470. }
  471. #if defined(_IA64_)
  472. //
  473. // The first processor's PCR is mapped in region 4 which is not (and cannot)
  474. // be scanned later, so explicitly add it to the dump here.
  475. //
  476. Prcb = KiProcessorBlock[0];
  477. Context->SetDumpRange (Context, (PVOID) Prcb->PcrPage, 1, 2);
  478. #endif
  479. }
  480. VOID
  481. MmGetKernelDumpRange (
  482. IN PMM_KERNEL_DUMP_CONTEXT Context
  483. )
  484. /*++
  485. Routine Description:
  486. Add (and subtract) ranges of system memory to the crashdump.
  487. Arguments:
  488. Context - Crashdump context pointer.
  489. Return Value:
  490. None.
  491. Environment:
  492. Kernel mode, post-bugcheck.
  493. For use by crash dump routines ONLY.
  494. --*/
  495. {
  496. PVOID Va;
  497. SIZE_T NumberOfBytes;
  498. ASSERT ((Context != NULL) &&
  499. (Context->SetDumpRange != NULL) &&
  500. (Context->FreeDumpRange != NULL));
  501. MiAddActivePageDirectories (Context);
  502. #if defined(_IA64_)
  503. //
  504. // Note each IA64 region must be passed separately to MiAddRange...
  505. //
  506. Va = (PVOID) ALT4KB_PERMISSION_TABLE_START;
  507. NumberOfBytes = PDE_UTBASE + PAGE_SIZE - (ULONG_PTR) Va;
  508. MiAddRangeToCrashDump (Context, Va, NumberOfBytes);
  509. Va = (PVOID) MM_SESSION_SPACE_DEFAULT;
  510. NumberOfBytes = PDE_STBASE + PAGE_SIZE - (ULONG_PTR) Va;
  511. MiAddRangeToCrashDump (Context, Va, NumberOfBytes);
  512. Va = (PVOID) KADDRESS_BASE;
  513. NumberOfBytes = PDE_KTBASE + PAGE_SIZE - (ULONG_PTR) Va;
  514. MiAddRangeToCrashDump (Context, Va, NumberOfBytes);
  515. #elif defined(_AMD64_)
  516. Va = (PVOID) MM_SYSTEM_RANGE_START;
  517. NumberOfBytes = MM_KSEG0_BASE - (ULONG_PTR) Va;
  518. MiAddRangeToCrashDump (Context, Va, NumberOfBytes);
  519. Va = (PVOID) MM_KSEG2_BASE;
  520. NumberOfBytes = MM_SYSTEM_SPACE_START - (ULONG_PTR) Va;
  521. MiAddRangeToCrashDump (Context, Va, NumberOfBytes);
  522. Va = (PVOID) MM_PAGED_POOL_START;
  523. NumberOfBytes = MM_SYSTEM_SPACE_END - (ULONG_PTR) Va + 1;
  524. MiAddRangeToCrashDump (Context, Va, NumberOfBytes);
  525. #else
  526. Va = MmSystemRangeStart;
  527. NumberOfBytes = MM_SYSTEM_SPACE_END - (ULONG_PTR) Va + 1;
  528. MiAddRangeToCrashDump (Context, Va, NumberOfBytes);
  529. #endif
  530. //
  531. // Add any memory that is a part of the kernel space, but does not
  532. // have a virtual mapping (hence was not collected above).
  533. //
  534. MiAddPagesWithNoMappings (Context);
  535. //
  536. // Remove nonpaged pool that is not in use.
  537. //
  538. MiRemoveFreePoolMemoryFromDump (Context);
  539. }