Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2600 lines
63 KiB

  1. /*++
  2. Copyright (c) 1989 Microsoft Corporation
  3. Module Name:
  4. specpool.c
  5. Abstract:
  6. This module contains the routines which allocate and deallocate
  7. pages from special pool.
  8. Author:
  9. Lou Perazzoli (loup) 6-Apr-1989
  10. Landy Wang (landyw) 02-June-1997
  11. Revision History:
  12. --*/
  13. #include "mi.h"
  14. #ifndef NO_POOL_CHECKS
  15. VOID
  16. MiInitializeSpecialPoolCriteria (
  17. IN VOID
  18. );
  19. VOID
  20. MiSpecialPoolTimerDispatch (
  21. IN PKDPC Dpc,
  22. IN PVOID DeferredContext,
  23. IN PVOID SystemArgument1,
  24. IN PVOID SystemArgument2
  25. );
  26. #endif
  27. LOGICAL
  28. MmSetSpecialPool (
  29. IN LOGICAL Enable
  30. );
  31. PVOID
  32. MiAllocateSpecialPool (
  33. IN SIZE_T NumberOfBytes,
  34. IN ULONG Tag,
  35. IN POOL_TYPE PoolType,
  36. IN ULONG SpecialPoolType
  37. );
  38. VOID
  39. MmFreeSpecialPool (
  40. IN PVOID P
  41. );
  42. LOGICAL
  43. MiProtectSpecialPool (
  44. IN PVOID VirtualAddress,
  45. IN ULONG NewProtect
  46. );
  47. VOID
  48. MiMakeSpecialPoolPagable (
  49. IN PVOID VirtualAddress,
  50. IN PMMPTE PointerPte,
  51. IN POOL_TYPE PoolType
  52. );
  53. LOGICAL
  54. MiExpandSpecialPool (
  55. IN POOL_TYPE PoolType,
  56. IN KIRQL OldIrql
  57. );
  58. #ifdef ALLOC_PRAGMA
  59. #ifndef NO_POOL_CHECKS
  60. #pragma alloc_text(INIT, MiInitializeSpecialPoolCriteria)
  61. #pragma alloc_text(PAGE, MiEnableRandomSpecialPool)
  62. #endif
  63. #if defined (_WIN64)
  64. #pragma alloc_text(PAGESPEC, MiDeleteSessionSpecialPool)
  65. #pragma alloc_text(PAGE, MiInitializeSpecialPool)
  66. #else
  67. #pragma alloc_text(INIT, MiInitializeSpecialPool)
  68. #endif
  69. #pragma alloc_text(PAGESPEC, MiExpandSpecialPool)
  70. #pragma alloc_text(PAGESPEC, MmFreeSpecialPool)
  71. #pragma alloc_text(PAGESPEC, MiAllocateSpecialPool)
  72. #pragma alloc_text(PAGESPEC, MiMakeSpecialPoolPagable)
  73. #pragma alloc_text(PAGESPEC, MiProtectSpecialPool)
  74. #endif
  75. ULONG MmSpecialPoolTag;
  76. PVOID MmSpecialPoolStart;
  77. PVOID MmSpecialPoolEnd;
  78. #if defined (_WIN64)
  79. PVOID MmSessionSpecialPoolStart;
  80. PVOID MmSessionSpecialPoolEnd;
  81. #else
  82. PMMPTE MiSpecialPoolExtra;
  83. ULONG MiSpecialPoolExtraCount;
  84. #endif
  85. ULONG MmSpecialPoolRejected[6];
  86. LOGICAL MmSpecialPoolCatchOverruns = TRUE;
  87. PMMPTE MiSpecialPoolFirstPte;
  88. PMMPTE MiSpecialPoolLastPte;
  89. ULONG MiSpecialPagesNonPaged;
  90. ULONG MiSpecialPagesPagable;
  91. ULONG MmSpecialPagesInUse; // Used by the debugger
  92. ULONG MiSpecialPagesNonPagedPeak;
  93. ULONG MiSpecialPagesPagablePeak;
  94. ULONG MiSpecialPagesInUsePeak;
  95. ULONG MiSpecialPagesNonPagedMaximum;
  96. LOGICAL MiSpecialPoolEnabled = TRUE;
  97. extern LOGICAL MmPagedPoolMaximumDesired;
  98. extern ULONG MmPteFailures[MaximumPtePoolTypes];
  99. #if defined (_X86_)
  100. extern ULONG MiExtraPtes1;
  101. KSPIN_LOCK MiSpecialPoolLock;
  102. #endif
  103. #if !defined (_WIN64)
  104. LOGICAL
  105. MiInitializeSpecialPool (
  106. IN POOL_TYPE PoolType
  107. )
  108. /*++
  109. Routine Description:
  110. This routine initializes the special pool used to catch pool corruptors.
  111. Arguments:
  112. None.
  113. Return Value:
  114. None.
  115. Environment:
  116. Kernel mode, no locks held.
  117. --*/
  118. {
  119. ULONG i;
  120. PMMPTE PointerPte;
  121. PMMPTE PointerPteBase;
  122. ULONG SpecialPoolPtes;
  123. UNREFERENCED_PARAMETER (PoolType);
  124. if ((MmVerifyDriverBufferLength == (ULONG)-1) &&
  125. ((MmSpecialPoolTag == 0) || (MmSpecialPoolTag == (ULONG)-1))) {
  126. return FALSE;
  127. }
  128. //
  129. // Even though we asked for some number of system PTEs to map special pool,
  130. // we may not have been given them all. Large memory systems are
  131. // autoconfigured so that a large nonpaged pool is the default.
  132. // x86 systems booted with the 3GB switch don't have enough
  133. // contiguous virtual address space to support this, so our request may
  134. // have been trimmed. Handle that intelligently here so we don't exhaust
  135. // the system PTE pool and fail to handle thread stacks and I/O.
  136. //
  137. if (MmNumberOfSystemPtes < 0x3000) {
  138. SpecialPoolPtes = MmNumberOfSystemPtes / 6;
  139. }
  140. else {
  141. SpecialPoolPtes = MmNumberOfSystemPtes / 3;
  142. }
  143. //
  144. // 32-bit systems are very cramped on virtual address space. Apply
  145. // a cap here to prevent overzealousness.
  146. //
  147. if (SpecialPoolPtes > MM_SPECIAL_POOL_PTES) {
  148. SpecialPoolPtes = MM_SPECIAL_POOL_PTES;
  149. }
  150. SpecialPoolPtes = MI_ROUND_TO_SIZE (SpecialPoolPtes, PTE_PER_PAGE);
  151. #if defined (_X86_)
  152. //
  153. // For x86, we can actually use an additional range of special PTEs to
  154. // map memory with and so we can raise the limit from 25000 to approximately
  155. // 256000.
  156. //
  157. if ((MiExtraPtes1 != 0) &&
  158. (ExpMultiUserTS == FALSE) &&
  159. (MiRequestedSystemPtes != (ULONG)-1)) {
  160. if (MmPagedPoolMaximumDesired == TRUE) {
  161. //
  162. // The low PTEs between 2 and 3GB virtual must be used
  163. // for both regular system PTE usage and special pool usage.
  164. //
  165. SpecialPoolPtes = (MiNumberOfExtraSystemPdes / 2) * PTE_PER_PAGE;
  166. }
  167. else {
  168. //
  169. // The low PTEs between 2 and 3GB virtual can be used
  170. // exclusively for special pool.
  171. //
  172. SpecialPoolPtes = MiNumberOfExtraSystemPdes * PTE_PER_PAGE;
  173. }
  174. }
  175. KeInitializeSpinLock (&MiSpecialPoolLock);
  176. #endif
  177. //
  178. // A PTE disappears for double mapping the system page directory.
  179. // When guard paging for system PTEs is enabled, a few more go also.
  180. // Thus, not being able to get all the PTEs we wanted is not fatal and
  181. // we just back off a bit and retry.
  182. //
  183. //
  184. // Always request an even number of PTEs so each one can be guard paged.
  185. //
  186. ASSERT ((SpecialPoolPtes & (PTE_PER_PAGE - 1)) == 0);
  187. do {
  188. PointerPte = MiReserveAlignedSystemPtes (SpecialPoolPtes,
  189. SystemPteSpace,
  190. MM_VA_MAPPED_BY_PDE);
  191. if (PointerPte != NULL) {
  192. break;
  193. }
  194. ASSERT (SpecialPoolPtes >= PTE_PER_PAGE);
  195. SpecialPoolPtes -= PTE_PER_PAGE;
  196. } while (SpecialPoolPtes != 0);
  197. //
  198. // We deliberately try to get a huge number of system PTEs. Don't let
  199. // any of these count as a real failure in our debugging counters.
  200. //
  201. MmPteFailures[SystemPteSpace] = 0;
  202. if (SpecialPoolPtes == 0) {
  203. return FALSE;
  204. }
  205. ASSERT (SpecialPoolPtes >= PTE_PER_PAGE);
  206. //
  207. // Build the list of PTE pairs using only the first page table page for
  208. // now. Keep the other PTEs in reserve so they can be returned to the
  209. // PTE pool in case some driver wants a huge amount.
  210. //
  211. PointerPteBase = PointerPte;
  212. MmSpecialPoolStart = MiGetVirtualAddressMappedByPte (PointerPte);
  213. ASSERT (MiIsVirtualAddressOnPdeBoundary (MmSpecialPoolStart));
  214. for (i = 0; i < PTE_PER_PAGE; i += 2) {
  215. PointerPte->u.List.NextEntry = ((PointerPte + 2) - MmSystemPteBase);
  216. PointerPte += 2;
  217. }
  218. MiSpecialPoolExtra = PointerPte;
  219. MiSpecialPoolExtraCount = SpecialPoolPtes - PTE_PER_PAGE;
  220. PointerPte -= 2;
  221. PointerPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
  222. MmSpecialPoolEnd = MiGetVirtualAddressMappedByPte (PointerPte + 1);
  223. MiSpecialPoolLastPte = PointerPte;
  224. MiSpecialPoolFirstPte = PointerPteBase;
  225. //
  226. // Limit nonpaged special pool based on the memory size.
  227. //
  228. MiSpecialPagesNonPagedMaximum = (ULONG)(MmResidentAvailablePages >> 4);
  229. if (MmNumberOfPhysicalPages > 0x3FFF) {
  230. MiSpecialPagesNonPagedMaximum = (ULONG)(MmResidentAvailablePages >> 3);
  231. }
  232. ExSetPoolFlags (EX_SPECIAL_POOL_ENABLED);
  233. return TRUE;
  234. }
  235. #else
  236. PMMPTE MiSpecialPoolNextPdeForSpecialPoolExpansion;
  237. PMMPTE MiSpecialPoolLastPdeForSpecialPoolExpansion;
  238. LOGICAL
  239. MiInitializeSpecialPool (
  240. IN POOL_TYPE PoolType
  241. )
  242. /*++
  243. Routine Description:
  244. This routine initializes special pool used to catch pool corruptors.
  245. Only NT64 systems have sufficient virtual address space to make use of this.
  246. Arguments:
  247. PoolType - Supplies the pool type (system global or session) being
  248. initialized.
  249. Return Value:
  250. TRUE if the requested special pool was initialized, FALSE if not.
  251. Environment:
  252. Kernel mode, no locks held.
  253. --*/
  254. {
  255. PVOID BaseAddress;
  256. PVOID EndAddress;
  257. KIRQL OldIrql;
  258. MMPTE TempPte;
  259. PMMPTE PointerPte;
  260. PMMPTE PointerPde;
  261. PMMPTE PointerPpe;
  262. PMMPTE EndPpe;
  263. PMMPTE EndPde;
  264. LOGICAL SpecialPoolCreated;
  265. SIZE_T AdditionalCommittedPages;
  266. PFN_NUMBER PageFrameIndex;
  267. PAGED_CODE ();
  268. if (PoolType & SESSION_POOL_MASK) {
  269. ASSERT (MmSessionSpace->SpecialPoolFirstPte == NULL);
  270. if (MmSessionSpecialPoolStart == 0) {
  271. return FALSE;
  272. }
  273. BaseAddress = MmSessionSpecialPoolStart;
  274. ASSERT (((ULONG_PTR)BaseAddress & (MM_VA_MAPPED_BY_PDE - 1)) == 0);
  275. EndAddress = (PVOID)((ULONG_PTR)MmSessionSpecialPoolEnd - 1);
  276. }
  277. else {
  278. if (MmSpecialPoolStart == 0) {
  279. return FALSE;
  280. }
  281. BaseAddress = MmSpecialPoolStart;
  282. ASSERT (((ULONG_PTR)BaseAddress & (MM_VA_MAPPED_BY_PDE - 1)) == 0);
  283. EndAddress = (PVOID)((ULONG_PTR)MmSpecialPoolEnd - 1);
  284. //
  285. // Construct empty page directory parent mappings as needed.
  286. //
  287. PointerPpe = MiGetPpeAddress (BaseAddress);
  288. EndPpe = MiGetPpeAddress (EndAddress);
  289. TempPte = ValidKernelPde;
  290. AdditionalCommittedPages = 0;
  291. LOCK_PFN (OldIrql);
  292. while (PointerPpe <= EndPpe) {
  293. if (PointerPpe->u.Long == 0) {
  294. PageFrameIndex = MiRemoveZeroPage (
  295. MI_GET_PAGE_COLOR_FROM_PTE (PointerPpe));
  296. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  297. MI_WRITE_VALID_PTE (PointerPpe, TempPte);
  298. MiInitializePfn (PageFrameIndex, PointerPpe, 1);
  299. MmResidentAvailablePages -= 1;
  300. AdditionalCommittedPages += 1;
  301. }
  302. PointerPpe += 1;
  303. }
  304. UNLOCK_PFN (OldIrql);
  305. InterlockedExchangeAddSizeT (&MmTotalCommittedPages,
  306. AdditionalCommittedPages);
  307. }
  308. //
  309. // Build just one page table page for session special pool - the rest
  310. // are built on demand.
  311. //
  312. ASSERT (MiGetPpeAddress(BaseAddress)->u.Hard.Valid == 1);
  313. PointerPte = MiGetPteAddress (BaseAddress);
  314. PointerPde = MiGetPdeAddress (BaseAddress);
  315. EndPde = MiGetPdeAddress (EndAddress);
  316. #if DBG
  317. //
  318. // The special pool address range better be unused.
  319. //
  320. while (PointerPde <= EndPde) {
  321. ASSERT (PointerPde->u.Long == 0);
  322. PointerPde += 1;
  323. }
  324. PointerPde = MiGetPdeAddress (BaseAddress);
  325. #endif
  326. if (PoolType & SESSION_POOL_MASK) {
  327. MmSessionSpace->NextPdeForSpecialPoolExpansion = PointerPde;
  328. MmSessionSpace->LastPdeForSpecialPoolExpansion = EndPde;
  329. }
  330. else {
  331. MiSpecialPoolNextPdeForSpecialPoolExpansion = PointerPde;
  332. MiSpecialPoolLastPdeForSpecialPoolExpansion = EndPde;
  333. //
  334. // Cap nonpaged special pool based on the memory size.
  335. //
  336. MiSpecialPagesNonPagedMaximum = (ULONG)(MmResidentAvailablePages >> 4);
  337. if (MmNumberOfPhysicalPages > 0x3FFF) {
  338. MiSpecialPagesNonPagedMaximum = (ULONG)(MmResidentAvailablePages >> 3);
  339. }
  340. }
  341. LOCK_PFN (OldIrql);
  342. SpecialPoolCreated = MiExpandSpecialPool (PoolType, OldIrql);
  343. UNLOCK_PFN (OldIrql);
  344. return SpecialPoolCreated;
  345. }
  346. VOID
  347. MiDeleteSessionSpecialPool (
  348. VOID
  349. )
  350. /*++
  351. Routine Description:
  352. This routine deletes the session special pool range used to catch
  353. pool corruptors. Only NT64 systems have the extra virtual address
  354. space in the session to make use of this.
  355. Arguments:
  356. None.
  357. Return Value:
  358. None.
  359. Environment:
  360. Kernel mode, no locks held.
  361. --*/
  362. {
  363. PVOID BaseAddress;
  364. PVOID EndAddress;
  365. KIRQL OldIrql;
  366. PMMPTE PointerPte;
  367. PMMPTE PointerPde;
  368. PMMPTE StartPde;
  369. PFN_NUMBER PageFrameIndex;
  370. PFN_NUMBER PageTablePages;
  371. PMMPTE EndPde;
  372. #if DBG
  373. PMMPTE StartPte;
  374. PMMPTE EndPte;
  375. #endif
  376. PAGED_CODE ();
  377. //
  378. // If the initial creation of this session's special pool failed, then
  379. // there's nothing to delete.
  380. //
  381. if (MmSessionSpace->SpecialPoolFirstPte == NULL) {
  382. return;
  383. }
  384. if (MmSessionSpace->SpecialPagesInUse != 0) {
  385. KeBugCheckEx (SESSION_HAS_VALID_SPECIAL_POOL_ON_EXIT,
  386. (ULONG_PTR)MmSessionSpace->SessionId,
  387. MmSessionSpace->SpecialPagesInUse,
  388. 0,
  389. 0);
  390. }
  391. //
  392. // Special pool page table pages are expanded such that all PDEs after the
  393. // first blank one must also be blank.
  394. //
  395. BaseAddress = MmSessionSpecialPoolStart;
  396. EndAddress = (PVOID)((ULONG_PTR)MmSessionSpecialPoolEnd - 1);
  397. ASSERT (((ULONG_PTR)BaseAddress & (MM_VA_MAPPED_BY_PDE - 1)) == 0);
  398. ASSERT (MiGetPpeAddress(BaseAddress)->u.Hard.Valid == 1);
  399. ASSERT (MiGetPdeAddress(BaseAddress)->u.Hard.Valid == 1);
  400. PointerPte = MiGetPteAddress (BaseAddress);
  401. PointerPde = MiGetPdeAddress (BaseAddress);
  402. EndPde = MiGetPdeAddress (EndAddress);
  403. StartPde = PointerPde;
  404. //
  405. // No need to flush the TB below as the entire TB will be flushed
  406. // on return when the rest of the session space is destroyed.
  407. //
  408. while (PointerPde <= EndPde) {
  409. if (PointerPde->u.Long == 0) {
  410. break;
  411. }
  412. #if DBG
  413. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  414. StartPte = PointerPte;
  415. EndPte = PointerPte + PTE_PER_PAGE;
  416. while (PointerPte < EndPte) {
  417. ASSERT ((PointerPte + 1)->u.Long == 0);
  418. PointerPte += 2;
  419. }
  420. #endif
  421. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPde);
  422. MiSessionPageTableRelease (PageFrameIndex);
  423. *PointerPde = ZeroKernelPte;
  424. PointerPde += 1;
  425. }
  426. PageTablePages = PointerPde - StartPde;
  427. #if DBG
  428. //
  429. // The remaining session special pool address range better be unused.
  430. //
  431. while (PointerPde <= EndPde) {
  432. ASSERT (PointerPde->u.Long == 0);
  433. PointerPde += 1;
  434. }
  435. #endif
  436. MiReturnCommitment (PageTablePages);
  437. MM_TRACK_COMMIT (MM_DBG_COMMIT_SESSION_POOL_PAGE_TABLES, 0 - PageTablePages);
  438. MM_BUMP_COUNTER(42, 0 - PageTablePages);
  439. MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_PAGEDPOOL_PAGETABLE_ALLOC,
  440. (ULONG)(0 - PageTablePages));
  441. LOCK_SESSION_SPACE_WS (OldIrql, PsGetCurrentThread ());
  442. MmSessionSpace->NonPagablePages -= PageTablePages;
  443. UNLOCK_SESSION_SPACE_WS (OldIrql);
  444. InterlockedExchangeAddSizeT (&MmSessionSpace->CommittedPages, 0 - PageTablePages);
  445. MmSessionSpace->SpecialPoolFirstPte = NULL;
  446. }
  447. #endif
  448. #if defined (_X86_)
  449. LOGICAL
  450. MiRecoverSpecialPtes (
  451. IN ULONG NumberOfPtes
  452. )
  453. {
  454. KIRQL OldIrql;
  455. PMMPTE PointerPte;
  456. if (MiSpecialPoolExtraCount == 0) {
  457. return FALSE;
  458. }
  459. //
  460. // Round the requested number of PTEs up to a full page table multiple.
  461. //
  462. NumberOfPtes = MI_ROUND_TO_SIZE (NumberOfPtes, PTE_PER_PAGE);
  463. //
  464. // If the caller needs more than we have, then do nothing and return FALSE.
  465. //
  466. ExAcquireSpinLock (&MiSpecialPoolLock, &OldIrql);
  467. if (NumberOfPtes > MiSpecialPoolExtraCount) {
  468. ExReleaseSpinLock (&MiSpecialPoolLock, OldIrql);
  469. return FALSE;
  470. }
  471. //
  472. // Return the tail end of the extra reserve.
  473. //
  474. MiSpecialPoolExtraCount -= NumberOfPtes;
  475. PointerPte = MiSpecialPoolExtra + MiSpecialPoolExtraCount;
  476. ExReleaseSpinLock (&MiSpecialPoolLock, OldIrql);
  477. MiReleaseSplitSystemPtes (PointerPte, NumberOfPtes, SystemPteSpace);
  478. return TRUE;
  479. }
  480. #endif
  481. LOGICAL
  482. MiExpandSpecialPool (
  483. IN POOL_TYPE PoolType,
  484. IN KIRQL OldIrql
  485. )
  486. /*++
  487. Routine Description:
  488. This routine attempts to allocate another page table page for the
  489. requested special pool.
  490. Arguments:
  491. PoolType - Supplies the special pool type being expanded.
  492. OldIrql - Supplies the previous irql the PFN lock was acquired at.
  493. Return Value:
  494. TRUE if expansion occurred, FALSE if not.
  495. Environment:
  496. Kernel mode, PFN lock held. The PFN lock may released and reacquired.
  497. --*/
  498. {
  499. #if defined (_WIN64)
  500. PMMPTE PointerPte;
  501. PMMPTE PointerPde;
  502. PFN_NUMBER PageFrameIndex;
  503. NTSTATUS Status;
  504. PMMPTE SpecialPoolFirstPte;
  505. PMMPTE SpecialPoolLastPte;
  506. PMMPTE *NextPde;
  507. PMMPTE *LastPde;
  508. PMMPTE PteBase;
  509. PFN_NUMBER ContainingFrame;
  510. LOGICAL SessionAllocation;
  511. PMMPTE *SpecialPoolFirstPteGlobal;
  512. PMMPTE *SpecialPoolLastPteGlobal;
  513. if (PoolType & SESSION_POOL_MASK) {
  514. NextPde = &MmSessionSpace->NextPdeForSpecialPoolExpansion;
  515. LastPde = &MmSessionSpace->LastPdeForSpecialPoolExpansion;
  516. PteBase = MI_PTE_BASE_FOR_LOWEST_SESSION_ADDRESS;
  517. ContainingFrame = MmSessionSpace->SessionPageDirectoryIndex;
  518. SessionAllocation = TRUE;
  519. SpecialPoolFirstPteGlobal = &MmSessionSpace->SpecialPoolFirstPte;
  520. SpecialPoolLastPteGlobal = &MmSessionSpace->SpecialPoolLastPte;
  521. }
  522. else {
  523. NextPde = &MiSpecialPoolNextPdeForSpecialPoolExpansion;
  524. LastPde = &MiSpecialPoolLastPdeForSpecialPoolExpansion;
  525. PteBase = MmSystemPteBase;
  526. ContainingFrame = 0;
  527. SessionAllocation = FALSE;
  528. SpecialPoolFirstPteGlobal = &MiSpecialPoolFirstPte;
  529. SpecialPoolLastPteGlobal = &MiSpecialPoolLastPte;
  530. }
  531. PointerPde = *NextPde;
  532. if (PointerPde > *LastPde) {
  533. return FALSE;
  534. }
  535. UNLOCK_PFN2 (OldIrql);
  536. //
  537. // Acquire a page and initialize it. If no one else has done this in
  538. // the interim, then insert it into the list.
  539. //
  540. // Note that CantExpand commitment charging must be used because this
  541. // path can get called in the idle thread context while processing DPCs
  542. // and the normal commitment charging may queue a pagefile extension using
  543. // an event on the local stack which is illegal.
  544. //
  545. if (MiChargeCommitmentCantExpand (1, FALSE) == FALSE) {
  546. if (PoolType & SESSION_POOL_MASK) {
  547. MM_BUMP_SESSION_FAILURES (MM_SESSION_FAILURE_NO_COMMIT);
  548. }
  549. LOCK_PFN2 (OldIrql);
  550. return FALSE;
  551. }
  552. if ((PoolType & SESSION_POOL_MASK) == 0) {
  553. ContainingFrame = MI_GET_PAGE_FRAME_FROM_PTE (MiGetPteAddress(PointerPde));
  554. }
  555. Status = MiInitializeAndChargePfn (&PageFrameIndex,
  556. PointerPde,
  557. ContainingFrame,
  558. SessionAllocation);
  559. if (!NT_SUCCESS(Status)) {
  560. MiReturnCommitment (1);
  561. LOCK_PFN2 (OldIrql);
  562. //
  563. // Don't retry even if STATUS_RETRY is returned above because if we
  564. // preempted the thread that allocated the PDE before he gets a
  565. // chance to update the PTE chain, we can loop forever.
  566. //
  567. return FALSE;
  568. }
  569. PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
  570. KeFillEntryTb ((PHARDWARE_PTE) PointerPde, PointerPte, FALSE);
  571. MM_BUMP_COUNTER(42, 1);
  572. if (PoolType & SESSION_POOL_MASK) {
  573. MM_TRACK_COMMIT (MM_DBG_COMMIT_SESSION_POOL_PAGE_TABLES, 1);
  574. MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_PAGEDPOOL_PAGETABLE_ALLOC, 1);
  575. MM_BUMP_SESS_COUNTER (MM_DBG_SESSION_NP_POOL_CREATE, 1);
  576. LOCK_SESSION_SPACE_WS (OldIrql, PsGetCurrentThread ());
  577. MmSessionSpace->NonPagablePages += 1;
  578. UNLOCK_SESSION_SPACE_WS (OldIrql);
  579. InterlockedExchangeAddSizeT (&MmSessionSpace->CommittedPages, 1);
  580. }
  581. else {
  582. MM_TRACK_COMMIT (MM_DBG_COMMIT_SPECIAL_POOL_MAPPING_PAGES, 1);
  583. }
  584. //
  585. // Build the list of PTE pairs.
  586. //
  587. SpecialPoolFirstPte = PointerPte;
  588. SpecialPoolLastPte = PointerPte + PTE_PER_PAGE;
  589. while (PointerPte < SpecialPoolLastPte) {
  590. PointerPte->u.List.NextEntry = (PointerPte + 2 - PteBase);
  591. (PointerPte + 1)->u.Long = 0;
  592. PointerPte += 2;
  593. }
  594. PointerPte -= 2;
  595. PointerPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
  596. ASSERT (PointerPde == *NextPde);
  597. ASSERT (PointerPde <= *LastPde);
  598. //
  599. // Insert the new page table page into the head of the current list (if
  600. // one exists) so it gets used first.
  601. //
  602. if (*SpecialPoolFirstPteGlobal == NULL) {
  603. //
  604. // This is the initial creation.
  605. //
  606. *SpecialPoolFirstPteGlobal = SpecialPoolFirstPte;
  607. *SpecialPoolLastPteGlobal = PointerPte;
  608. ExSetPoolFlags (EX_SPECIAL_POOL_ENABLED);
  609. LOCK_PFN2 (OldIrql);
  610. }
  611. else {
  612. //
  613. // This is actually an expansion.
  614. //
  615. LOCK_PFN2 (OldIrql);
  616. PointerPte->u.List.NextEntry = *SpecialPoolFirstPteGlobal - PteBase;
  617. *SpecialPoolFirstPteGlobal = SpecialPoolFirstPte;
  618. }
  619. ASSERT ((*SpecialPoolLastPteGlobal)->u.List.NextEntry == MM_EMPTY_PTE_LIST);
  620. *NextPde = *NextPde + 1;
  621. #else
  622. ULONG i;
  623. PMMPTE PointerPte;
  624. UNREFERENCED_PARAMETER (PoolType);
  625. if (MiSpecialPoolExtraCount == 0) {
  626. return FALSE;
  627. }
  628. ExAcquireSpinLock (&MiSpecialPoolLock, &OldIrql);
  629. if (MiSpecialPoolExtraCount == 0) {
  630. ExReleaseSpinLock (&MiSpecialPoolLock, OldIrql);
  631. return FALSE;
  632. }
  633. ASSERT (MiSpecialPoolExtraCount >= PTE_PER_PAGE);
  634. PointerPte = MiSpecialPoolExtra;
  635. for (i = 0; i < PTE_PER_PAGE - 2; i += 2) {
  636. PointerPte->u.List.NextEntry = ((PointerPte + 2) - MmSystemPteBase);
  637. PointerPte += 2;
  638. }
  639. PointerPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
  640. MmSpecialPoolEnd = MiGetVirtualAddressMappedByPte (PointerPte + 1);
  641. MiSpecialPoolLastPte = PointerPte;
  642. MiSpecialPoolFirstPte = MiSpecialPoolExtra;
  643. MiSpecialPoolExtraCount -= PTE_PER_PAGE;
  644. MiSpecialPoolExtra += PTE_PER_PAGE;
  645. ExReleaseSpinLock (&MiSpecialPoolLock, OldIrql);
  646. #endif
  647. return TRUE;
  648. }
  649. PVOID
  650. MmAllocateSpecialPool (
  651. IN SIZE_T NumberOfBytes,
  652. IN ULONG Tag,
  653. IN POOL_TYPE PoolType,
  654. IN ULONG SpecialPoolType
  655. )
  656. /*++
  657. Routine Description:
  658. This routine allocates virtual memory from special pool. This allocation
  659. is made from the end of a physical page with the next PTE set to no access
  660. so that any reads or writes will cause an immediate fatal system crash.
  661. This lets us catch components that corrupt pool.
  662. Arguments:
  663. NumberOfBytes - Supplies the number of bytes to commit.
  664. Tag - Supplies the tag of the requested allocation.
  665. PoolType - Supplies the pool type of the requested allocation.
  666. SpecialPoolType - Supplies the special pool type of the
  667. requested allocation.
  668. - 0 indicates overruns.
  669. - 1 indicates underruns.
  670. - 2 indicates use the systemwide pool policy.
  671. Return Value:
  672. A non-NULL pointer if the requested allocation was fulfilled from special
  673. pool. NULL if the allocation was not made.
  674. Environment:
  675. Kernel mode, no pool locks held.
  676. Note this is a nonpagable wrapper so that machines without special pool
  677. can still support drivers allocating nonpaged pool at DISPATCH_LEVEL
  678. requesting special pool.
  679. --*/
  680. {
  681. if (MiSpecialPoolFirstPte == NULL) {
  682. //
  683. // The special pool allocation code was never initialized.
  684. //
  685. return NULL;
  686. }
  687. #if defined (_WIN64)
  688. if (PoolType & SESSION_POOL_MASK) {
  689. if (MmSessionSpace->SpecialPoolFirstPte == NULL) {
  690. //
  691. // The special pool allocation code was never initialized.
  692. //
  693. return NULL;
  694. }
  695. }
  696. #endif
  697. return MiAllocateSpecialPool (NumberOfBytes,
  698. Tag,
  699. PoolType,
  700. SpecialPoolType);
  701. }
  702. PVOID
  703. MiAllocateSpecialPool (
  704. IN SIZE_T NumberOfBytes,
  705. IN ULONG Tag,
  706. IN POOL_TYPE PoolType,
  707. IN ULONG SpecialPoolType
  708. )
  709. /*++
  710. Routine Description:
  711. This routine allocates virtual memory from special pool. This allocation
  712. is made from the end of a physical page with the next PTE set to no access
  713. so that any reads or writes will cause an immediate fatal system crash.
  714. This lets us catch components that corrupt pool.
  715. Arguments:
  716. NumberOfBytes - Supplies the number of bytes to commit.
  717. Tag - Supplies the tag of the requested allocation.
  718. PoolType - Supplies the pool type of the requested allocation.
  719. SpecialPoolType - Supplies the special pool type of the
  720. requested allocation.
  721. - 0 indicates overruns.
  722. - 1 indicates underruns.
  723. - 2 indicates use the systemwide pool policy.
  724. Return Value:
  725. A non-NULL pointer if the requested allocation was fulfilled from special
  726. pool. NULL if the allocation was not made.
  727. Environment:
  728. Kernel mode, no locks (not even pool locks) held.
  729. --*/
  730. {
  731. MMPTE TempPte;
  732. PFN_NUMBER PageFrameIndex;
  733. PMMPTE PointerPte;
  734. KIRQL OldIrql;
  735. PVOID Entry;
  736. PPOOL_HEADER Header;
  737. LARGE_INTEGER CurrentTime;
  738. LOGICAL CatchOverruns;
  739. PMMPTE SpecialPoolFirstPte;
  740. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  741. if (KeGetCurrentIrql() > APC_LEVEL) {
  742. KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
  743. KeGetCurrentIrql(),
  744. PoolType,
  745. NumberOfBytes,
  746. 0x30);
  747. }
  748. }
  749. else {
  750. if (KeGetCurrentIrql() > DISPATCH_LEVEL) {
  751. KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
  752. KeGetCurrentIrql(),
  753. PoolType,
  754. NumberOfBytes,
  755. 0x30);
  756. }
  757. }
  758. #if !defined (_WIN64) && !defined (_X86PAE_)
  759. if ((MiExtraPtes1 != 0) || (MiUseMaximumSystemSpace != 0)) {
  760. extern const ULONG MMSECT;
  761. //
  762. // Prototype PTEs cannot come from lower special pool because
  763. // their address is encoded into PTEs and the encoding only covers
  764. // a max of 1GB from the start of paged pool. Likewise fork
  765. // prototype PTEs.
  766. //
  767. if (Tag == MMSECT || Tag == 'lCmM') {
  768. return NULL;
  769. }
  770. }
  771. if (Tag == 'bSmM' || Tag == 'iCmM' || Tag == 'aCmM' || Tag == 'dSmM' || Tag == 'cSmM') {
  772. //
  773. // Mm subsections cannot come from this special pool because they
  774. // get encoded into PTEs - they must come from normal nonpaged pool.
  775. //
  776. return NULL;
  777. }
  778. #endif
  779. if (MiChargeCommitmentCantExpand (1, FALSE) == FALSE) {
  780. MmSpecialPoolRejected[5] += 1;
  781. return NULL;
  782. }
  783. TempPte = ValidKernelPte;
  784. MI_SET_PTE_DIRTY (TempPte);
  785. LOCK_PFN2 (OldIrql);
  786. restart:
  787. if (MiSpecialPoolEnabled == FALSE) {
  788. //
  789. // The special pool allocation code is currently disabled.
  790. //
  791. UNLOCK_PFN2 (OldIrql);
  792. MiReturnCommitment (1);
  793. return NULL;
  794. }
  795. if (MmAvailablePages < 200) {
  796. UNLOCK_PFN2 (OldIrql);
  797. MmSpecialPoolRejected[0] += 1;
  798. MiReturnCommitment (1);
  799. return NULL;
  800. }
  801. //
  802. // Don't get too aggressive until a paging file gets set up.
  803. //
  804. if (MmNumberOfPagingFiles == 0 && MmSpecialPagesInUse > MmAvailablePages / 2) {
  805. UNLOCK_PFN2 (OldIrql);
  806. MmSpecialPoolRejected[3] += 1;
  807. MiReturnCommitment (1);
  808. return NULL;
  809. }
  810. SpecialPoolFirstPte = MiSpecialPoolFirstPte;
  811. #if defined (_WIN64)
  812. if (PoolType & SESSION_POOL_MASK) {
  813. SpecialPoolFirstPte = MmSessionSpace->SpecialPoolFirstPte;
  814. }
  815. #endif
  816. if (SpecialPoolFirstPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
  817. //
  818. // Add another page table page (virtual address space and resources
  819. // permitting) and then restart the request. The PFN lock may be
  820. // released and reacquired during this call.
  821. //
  822. if (MiExpandSpecialPool (PoolType, OldIrql) == TRUE) {
  823. goto restart;
  824. }
  825. UNLOCK_PFN2 (OldIrql);
  826. MmSpecialPoolRejected[2] += 1;
  827. MiReturnCommitment (1);
  828. return NULL;
  829. }
  830. if (MI_NONPAGABLE_MEMORY_AVAILABLE() < 100) {
  831. UNLOCK_PFN2 (OldIrql);
  832. MmSpecialPoolRejected[4] += 1;
  833. MiReturnCommitment (1);
  834. return NULL;
  835. }
  836. //
  837. // Cap nonpaged allocations to prevent runaways.
  838. //
  839. if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) {
  840. if (MiSpecialPagesNonPaged > MiSpecialPagesNonPagedMaximum) {
  841. UNLOCK_PFN2 (OldIrql);
  842. MmSpecialPoolRejected[1] += 1;
  843. MiReturnCommitment (1);
  844. return NULL;
  845. }
  846. MmResidentAvailablePages -= 1;
  847. MM_BUMP_COUNTER(31, 1);
  848. MiSpecialPagesNonPaged += 1;
  849. if (MiSpecialPagesNonPaged > MiSpecialPagesNonPagedPeak) {
  850. MiSpecialPagesNonPagedPeak = MiSpecialPagesNonPaged;
  851. }
  852. }
  853. else {
  854. MiSpecialPagesPagable += 1;
  855. if (MiSpecialPagesPagable > MiSpecialPagesPagablePeak) {
  856. MiSpecialPagesPagablePeak = MiSpecialPagesPagable;
  857. }
  858. }
  859. MM_TRACK_COMMIT (MM_DBG_COMMIT_SPECIAL_POOL_PAGES, 1);
  860. PointerPte = SpecialPoolFirstPte;
  861. ASSERT (PointerPte->u.List.NextEntry != MM_EMPTY_PTE_LIST);
  862. #if defined (_WIN64)
  863. if (PoolType & SESSION_POOL_MASK) {
  864. MmSessionSpace->SpecialPoolFirstPte = PointerPte->u.List.NextEntry +
  865. MI_PTE_BASE_FOR_LOWEST_SESSION_ADDRESS;
  866. MmSessionSpace->SpecialPagesInUse += 1;
  867. }
  868. else
  869. #endif
  870. {
  871. MiSpecialPoolFirstPte = PointerPte->u.List.NextEntry + MmSystemPteBase;
  872. }
  873. PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
  874. MmSpecialPagesInUse += 1;
  875. if (MmSpecialPagesInUse > MiSpecialPagesInUsePeak) {
  876. MiSpecialPagesInUsePeak = MmSpecialPagesInUse;
  877. }
  878. TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
  879. MI_WRITE_VALID_PTE (PointerPte, TempPte);
  880. MiInitializePfn (PageFrameIndex, PointerPte, 1);
  881. UNLOCK_PFN2 (OldIrql);
  882. //
  883. // Fill the page with a random pattern.
  884. //
  885. KeQueryTickCount(&CurrentTime);
  886. Entry = MiGetVirtualAddressMappedByPte (PointerPte);
  887. RtlFillMemory (Entry, PAGE_SIZE, (UCHAR) (CurrentTime.LowPart | 0x1));
  888. if (SpecialPoolType == 0) {
  889. CatchOverruns = TRUE;
  890. }
  891. else if (SpecialPoolType == 1) {
  892. CatchOverruns = FALSE;
  893. }
  894. else if (MmSpecialPoolCatchOverruns == TRUE) {
  895. CatchOverruns = TRUE;
  896. }
  897. else {
  898. CatchOverruns = FALSE;
  899. }
  900. if (CatchOverruns == TRUE) {
  901. Header = (PPOOL_HEADER) Entry;
  902. Entry = (PVOID)(((LONG_PTR)(((PCHAR)Entry + (PAGE_SIZE - NumberOfBytes)))) & ~((LONG_PTR)POOL_OVERHEAD - 1));
  903. }
  904. else {
  905. Header = (PPOOL_HEADER) ((PCHAR)Entry + PAGE_SIZE - POOL_OVERHEAD);
  906. }
  907. //
  908. // Zero the header and stash any information needed at release time.
  909. //
  910. RtlZeroMemory (Header, POOL_OVERHEAD);
  911. Header->Ulong1 = (ULONG)NumberOfBytes;
  912. ASSERT (NumberOfBytes <= PAGE_SIZE - POOL_OVERHEAD && PAGE_SIZE <= 32 * 1024);
  913. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  914. Header->Ulong1 |= MI_SPECIAL_POOL_PAGABLE;
  915. MiMakeSpecialPoolPagable (Entry, PointerPte, PoolType);
  916. (PointerPte + 1)->u.Soft.PageFileHigh = MI_SPECIAL_POOL_PTE_PAGABLE;
  917. }
  918. else {
  919. (PointerPte + 1)->u.Soft.PageFileHigh = MI_SPECIAL_POOL_PTE_NONPAGABLE;
  920. }
  921. #if defined (_WIN64)
  922. if (PoolType & SESSION_POOL_MASK) {
  923. Header->Ulong1 |= MI_SPECIAL_POOL_IN_SESSION;
  924. }
  925. #endif
  926. if (PoolType & POOL_VERIFIER_MASK) {
  927. Header->Ulong1 |= MI_SPECIAL_POOL_VERIFIER;
  928. }
  929. Header->BlockSize = (UCHAR) (CurrentTime.LowPart | 0x1);
  930. Header->PoolTag = Tag;
  931. ASSERT ((Header->PoolType & POOL_QUOTA_MASK) == 0);
  932. return Entry;
  933. }
  934. #define SPECIAL_POOL_FREE_TRACE_LENGTH 16
  935. typedef struct _SPECIAL_POOL_FREE_TRACE {
  936. PVOID StackTrace [SPECIAL_POOL_FREE_TRACE_LENGTH];
  937. } SPECIAL_POOL_FREE_TRACE, *PSPECIAL_POOL_FREE_TRACE;
  938. VOID
  939. MmFreeSpecialPool (
  940. IN PVOID P
  941. )
  942. /*++
  943. Routine Description:
  944. This routine frees a special pool allocation. The backing page is freed
  945. and the mapping virtual address is made no access (the next virtual
  946. address is already no access).
  947. The virtual address PTE pair is then placed into an LRU queue to provide
  948. maximum no-access (protection) life to catch components that access
  949. deallocated pool.
  950. Arguments:
  951. VirtualAddress - Supplies the special pool virtual address to free.
  952. Return Value:
  953. None.
  954. Environment:
  955. Kernel mode, no locks (not even pool locks) held.
  956. --*/
  957. {
  958. MMPTE PteContents;
  959. PFN_NUMBER PageFrameIndex;
  960. PFN_NUMBER PageTableFrameIndex;
  961. PMMPTE PointerPte;
  962. PMMPFN Pfn1;
  963. PMMPFN Pfn2;
  964. KIRQL OldIrql;
  965. ULONG SlopBytes;
  966. ULONG NumberOfBytesCalculated;
  967. ULONG NumberOfBytesRequested;
  968. POOL_TYPE PoolType;
  969. MMPTE LocalNoAccessPte;
  970. PPOOL_HEADER Header;
  971. PUCHAR Slop;
  972. ULONG i;
  973. LOGICAL BufferAtPageEnd;
  974. PMI_FREED_SPECIAL_POOL AllocationBase;
  975. LARGE_INTEGER CurrentTime;
  976. #if defined (_X86_)
  977. PULONG_PTR StackPointer;
  978. #else
  979. ULONG Hash;
  980. #endif
  981. PointerPte = MiGetPteAddress (P);
  982. PteContents = *PointerPte;
  983. //
  984. // Check the PTE now so we can give a more friendly bugcheck rather than
  985. // crashing below on a bad reference.
  986. //
  987. if (PteContents.u.Hard.Valid == 0) {
  988. if ((PteContents.u.Soft.Protection == 0) ||
  989. (PteContents.u.Soft.Protection == MM_NOACCESS)) {
  990. KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
  991. (ULONG_PTR)P,
  992. (ULONG_PTR)PteContents.u.Long,
  993. 0,
  994. 0x20);
  995. }
  996. }
  997. if (((ULONG_PTR)P & (PAGE_SIZE - 1))) {
  998. Header = PAGE_ALIGN (P);
  999. BufferAtPageEnd = TRUE;
  1000. }
  1001. else {
  1002. Header = (PPOOL_HEADER)((PCHAR)PAGE_ALIGN (P) + PAGE_SIZE - POOL_OVERHEAD);
  1003. BufferAtPageEnd = FALSE;
  1004. }
  1005. if (Header->Ulong1 & MI_SPECIAL_POOL_PAGABLE) {
  1006. ASSERT ((PointerPte + 1)->u.Soft.PageFileHigh == MI_SPECIAL_POOL_PTE_PAGABLE);
  1007. if (KeGetCurrentIrql() > APC_LEVEL) {
  1008. KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
  1009. KeGetCurrentIrql(),
  1010. PagedPool,
  1011. (ULONG_PTR)P,
  1012. 0x31);
  1013. }
  1014. PoolType = PagedPool;
  1015. }
  1016. else {
  1017. ASSERT ((PointerPte + 1)->u.Soft.PageFileHigh == MI_SPECIAL_POOL_PTE_NONPAGABLE);
  1018. if (KeGetCurrentIrql() > DISPATCH_LEVEL) {
  1019. KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
  1020. KeGetCurrentIrql(),
  1021. NonPagedPool,
  1022. (ULONG_PTR)P,
  1023. 0x31);
  1024. }
  1025. PoolType = NonPagedPool;
  1026. }
  1027. #if defined (_WIN64)
  1028. if (Header->Ulong1 & MI_SPECIAL_POOL_IN_SESSION) {
  1029. PoolType |= SESSION_POOL_MASK;
  1030. }
  1031. #endif
  1032. NumberOfBytesRequested = (ULONG)(USHORT)(Header->Ulong1 & ~(MI_SPECIAL_POOL_PAGABLE | MI_SPECIAL_POOL_VERIFIER | MI_SPECIAL_POOL_IN_SESSION));
  1033. //
  1034. // We gave the caller pool-header aligned data, so account for
  1035. // that when checking here.
  1036. //
  1037. if (BufferAtPageEnd == TRUE) {
  1038. NumberOfBytesCalculated = PAGE_SIZE - BYTE_OFFSET(P);
  1039. if (NumberOfBytesRequested > NumberOfBytesCalculated) {
  1040. //
  1041. // Seems like we didn't give the caller enough - this is an error.
  1042. //
  1043. KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
  1044. (ULONG_PTR)P,
  1045. NumberOfBytesRequested,
  1046. NumberOfBytesCalculated,
  1047. 0x21);
  1048. }
  1049. if (NumberOfBytesRequested + POOL_OVERHEAD < NumberOfBytesCalculated) {
  1050. //
  1051. // Seems like we gave the caller too much - also an error.
  1052. //
  1053. KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
  1054. (ULONG_PTR)P,
  1055. NumberOfBytesRequested,
  1056. NumberOfBytesCalculated,
  1057. 0x22);
  1058. }
  1059. //
  1060. // Check the memory before the start of the caller's allocation.
  1061. //
  1062. Slop = (PUCHAR)(Header + 1);
  1063. if (Header->Ulong1 & MI_SPECIAL_POOL_VERIFIER) {
  1064. Slop += sizeof(MI_VERIFIER_POOL_HEADER);
  1065. }
  1066. for ( ; Slop < (PUCHAR)P; Slop += 1) {
  1067. if (*Slop != Header->BlockSize) {
  1068. KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
  1069. (ULONG_PTR)P,
  1070. (ULONG_PTR)Slop,
  1071. Header->Ulong1,
  1072. 0x23);
  1073. }
  1074. }
  1075. }
  1076. else {
  1077. NumberOfBytesCalculated = 0;
  1078. }
  1079. //
  1080. // Check the memory after the end of the caller's allocation.
  1081. //
  1082. Slop = (PUCHAR)P + NumberOfBytesRequested;
  1083. SlopBytes = (ULONG)((PUCHAR)(PAGE_ALIGN(P)) + PAGE_SIZE - Slop);
  1084. if (BufferAtPageEnd == FALSE) {
  1085. SlopBytes -= POOL_OVERHEAD;
  1086. if (Header->Ulong1 & MI_SPECIAL_POOL_VERIFIER) {
  1087. SlopBytes -= sizeof(MI_VERIFIER_POOL_HEADER);
  1088. }
  1089. }
  1090. for (i = 0; i < SlopBytes; i += 1) {
  1091. if (*Slop != Header->BlockSize) {
  1092. //
  1093. // The caller wrote slop between the free alignment we gave and the
  1094. // end of the page (this is not detectable from page protection).
  1095. //
  1096. KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
  1097. (ULONG_PTR)P,
  1098. (ULONG_PTR)Slop,
  1099. Header->Ulong1,
  1100. 0x24);
  1101. }
  1102. Slop += 1;
  1103. }
  1104. //
  1105. // Note session pool is directly tracked by default already so there is
  1106. // no need to notify the verifier for session special pool allocations.
  1107. //
  1108. if ((Header->Ulong1 & (MI_SPECIAL_POOL_VERIFIER | MI_SPECIAL_POOL_IN_SESSION)) == MI_SPECIAL_POOL_VERIFIER) {
  1109. VerifierFreeTrackedPool (P,
  1110. NumberOfBytesRequested,
  1111. PoolType,
  1112. TRUE);
  1113. }
  1114. AllocationBase = (PMI_FREED_SPECIAL_POOL)(PAGE_ALIGN (P));
  1115. AllocationBase->Signature = MI_FREED_SPECIAL_POOL_SIGNATURE;
  1116. KeQueryTickCount(&CurrentTime);
  1117. AllocationBase->TickCount = CurrentTime.LowPart;
  1118. AllocationBase->NumberOfBytesRequested = NumberOfBytesRequested;
  1119. AllocationBase->Pagable = (ULONG)PoolType;
  1120. AllocationBase->VirtualAddress = P;
  1121. AllocationBase->Thread = PsGetCurrentThread ();
  1122. #if defined (_X86_)
  1123. _asm {
  1124. mov StackPointer, esp
  1125. }
  1126. AllocationBase->StackPointer = StackPointer;
  1127. //
  1128. // For now, don't get fancy with copying more than what's in the current
  1129. // stack page. To do so would require checking the thread stack limits,
  1130. // DPC stack limits, etc.
  1131. //
  1132. AllocationBase->StackBytes = PAGE_SIZE - BYTE_OFFSET(StackPointer);
  1133. if (AllocationBase->StackBytes != 0) {
  1134. if (AllocationBase->StackBytes > MI_STACK_BYTES) {
  1135. AllocationBase->StackBytes = MI_STACK_BYTES;
  1136. }
  1137. RtlCopyMemory (AllocationBase->StackData,
  1138. StackPointer,
  1139. AllocationBase->StackBytes);
  1140. }
  1141. #else
  1142. AllocationBase->StackPointer = NULL;
  1143. AllocationBase->StackBytes = 0;
  1144. RtlZeroMemory (AllocationBase->StackData, sizeof (SPECIAL_POOL_FREE_TRACE));
  1145. RtlCaptureStackBackTrace (0,
  1146. SPECIAL_POOL_FREE_TRACE_LENGTH,
  1147. (PVOID *)AllocationBase->StackData,
  1148. &Hash);
  1149. #endif
  1150. if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) {
  1151. LocalNoAccessPte.u.Long = MM_KERNEL_NOACCESS_PTE;
  1152. MiDeleteSystemPagableVm (PointerPte,
  1153. 1,
  1154. LocalNoAccessPte,
  1155. (PoolType & SESSION_POOL_MASK) ? TRUE : FALSE,
  1156. NULL);
  1157. LOCK_PFN (OldIrql);
  1158. MiSpecialPagesPagable -= 1;
  1159. }
  1160. else {
  1161. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
  1162. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1163. PageTableFrameIndex = Pfn1->u4.PteFrame;
  1164. Pfn2 = MI_PFN_ELEMENT (PageTableFrameIndex);
  1165. LOCK_PFN2 (OldIrql);
  1166. MiSpecialPagesNonPaged -= 1;
  1167. MI_SET_PFN_DELETED (Pfn1);
  1168. MiDecrementShareCount (PageFrameIndex);
  1169. MiDecrementShareCountInline (Pfn2, PageTableFrameIndex);
  1170. KeFlushSingleTb (PAGE_ALIGN(P),
  1171. TRUE,
  1172. TRUE,
  1173. (PHARDWARE_PTE)PointerPte,
  1174. ZeroKernelPte.u.Flush);
  1175. MmResidentAvailablePages += 1;
  1176. MM_BUMP_COUNTER(37, 1);
  1177. }
  1178. //
  1179. // Clear the adjacent PTE to support MmIsSpecialPoolAddressFree().
  1180. //
  1181. (PointerPte + 1)->u.Long = 0;
  1182. PointerPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
  1183. #if defined (_WIN64)
  1184. if (PoolType & SESSION_POOL_MASK) {
  1185. ASSERT (MmSessionSpace->SpecialPoolLastPte->u.List.NextEntry == MM_EMPTY_PTE_LIST);
  1186. MmSessionSpace->SpecialPoolLastPte->u.List.NextEntry = PointerPte -
  1187. MI_PTE_BASE_FOR_LOWEST_SESSION_ADDRESS;
  1188. MmSessionSpace->SpecialPoolLastPte = PointerPte;
  1189. MmSessionSpace->SpecialPagesInUse -= 1;
  1190. }
  1191. else
  1192. #endif
  1193. {
  1194. ASSERT (MiSpecialPoolLastPte->u.List.NextEntry == MM_EMPTY_PTE_LIST);
  1195. MiSpecialPoolLastPte->u.List.NextEntry = PointerPte - MmSystemPteBase;
  1196. MiSpecialPoolLastPte = PointerPte;
  1197. }
  1198. MmSpecialPagesInUse -= 1;
  1199. UNLOCK_PFN2 (OldIrql);
  1200. MiReturnCommitment (1);
  1201. MM_TRACK_COMMIT_REDUCTION (MM_DBG_COMMIT_SPECIAL_POOL_PAGES, 1);
  1202. return;
  1203. }
  1204. SIZE_T
  1205. MmQuerySpecialPoolBlockSize (
  1206. IN PVOID P
  1207. )
  1208. /*++
  1209. Routine Description:
  1210. This routine returns the size of a special pool allocation.
  1211. Arguments:
  1212. VirtualAddress - Supplies the special pool virtual address to query.
  1213. Return Value:
  1214. The size in bytes of the allocation.
  1215. Environment:
  1216. Kernel mode, APC_LEVEL or below for pagable addresses, DISPATCH_LEVEL or
  1217. below for nonpaged addresses.
  1218. --*/
  1219. {
  1220. PPOOL_HEADER Header;
  1221. #if defined (_WIN64)
  1222. ASSERT (((P >= MmSessionSpecialPoolStart) && (P < MmSessionSpecialPoolEnd)) ||
  1223. ((P >= MmSpecialPoolStart) && (P < MmSpecialPoolEnd)));
  1224. #else
  1225. ASSERT ((P >= MmSpecialPoolStart) && (P < MmSpecialPoolEnd));
  1226. #endif
  1227. if (((ULONG_PTR)P & (PAGE_SIZE - 1))) {
  1228. Header = PAGE_ALIGN (P);
  1229. }
  1230. else {
  1231. Header = (PPOOL_HEADER)((PCHAR)PAGE_ALIGN (P) + PAGE_SIZE - POOL_OVERHEAD);
  1232. }
  1233. return (SIZE_T)(Header->Ulong1 & ~(MI_SPECIAL_POOL_PAGABLE | MI_SPECIAL_POOL_VERIFIER | MI_SPECIAL_POOL_IN_SESSION));
  1234. }
  1235. VOID
  1236. MiMakeSpecialPoolPagable (
  1237. IN PVOID VirtualAddress,
  1238. IN PMMPTE PointerPte,
  1239. IN POOL_TYPE PoolType
  1240. )
  1241. /*++
  1242. Routine Description:
  1243. Make a special pool allocation pagable.
  1244. Arguments:
  1245. VirtualAddress - Supplies the faulting address.
  1246. PointerPte - Supplies the PTE for the faulting address.
  1247. PoolType - Supplies the pool type of the allocation.
  1248. Return Value:
  1249. None.
  1250. Environment:
  1251. Kernel mode, no locks (not even pool locks) held.
  1252. --*/
  1253. {
  1254. PMMPFN Pfn1;
  1255. MMPTE TempPte;
  1256. KIRQL PreviousIrql;
  1257. PFN_NUMBER PageFrameIndex;
  1258. PMMSUPPORT VmSupport;
  1259. PETHREAD CurrentThread;
  1260. CurrentThread = PsGetCurrentThread ();
  1261. #if defined (_WIN64)
  1262. if (PoolType & SESSION_POOL_MASK) {
  1263. VmSupport = &MmSessionSpace->Vm;
  1264. LOCK_SESSION_SPACE_WS (PreviousIrql, CurrentThread);
  1265. }
  1266. else
  1267. #endif
  1268. {
  1269. VmSupport = &MmSystemCacheWs;
  1270. PoolType = PoolType;
  1271. LOCK_SYSTEM_WS (PreviousIrql, CurrentThread);
  1272. }
  1273. //
  1274. // As this page is now allocated, add it to the system working set to
  1275. // make it pagable.
  1276. //
  1277. TempPte = *PointerPte;
  1278. PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&TempPte);
  1279. Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
  1280. ASSERT (Pfn1->u1.Event == 0);
  1281. Pfn1->u1.Event = (PVOID) CurrentThread;
  1282. MiAddValidPageToWorkingSet (VirtualAddress,
  1283. PointerPte,
  1284. Pfn1,
  1285. 0);
  1286. ASSERT (KeGetCurrentIrql() == APC_LEVEL);
  1287. if (VmSupport->Flags.AllowWorkingSetAdjustment == MM_GROW_WSLE_HASH) {
  1288. MiGrowWsleHash (VmSupport);
  1289. VmSupport->Flags.AllowWorkingSetAdjustment = TRUE;
  1290. }
  1291. #if defined (_WIN64)
  1292. if (PoolType & SESSION_POOL_MASK) {
  1293. UNLOCK_SESSION_SPACE_WS (PreviousIrql);
  1294. }
  1295. else
  1296. #endif
  1297. {
  1298. UNLOCK_SYSTEM_WS (PreviousIrql);
  1299. }
  1300. }
  1301. LOGICAL
  1302. MmIsSpecialPoolAddress (
  1303. IN PVOID VirtualAddress
  1304. )
  1305. /*++
  1306. Routine Description:
  1307. This function returns TRUE if the argument address is in special pool.
  1308. FALSE if not.
  1309. Arguments:
  1310. VirtualAddress - Supplies the address in question.
  1311. Return Value:
  1312. See above.
  1313. Environment:
  1314. Kernel mode.
  1315. --*/
  1316. {
  1317. if ((VirtualAddress >= MmSpecialPoolStart) &&
  1318. (VirtualAddress < MmSpecialPoolEnd)) {
  1319. return TRUE;
  1320. }
  1321. #if defined (_WIN64)
  1322. if ((VirtualAddress >= MmSessionSpecialPoolStart) &&
  1323. (VirtualAddress < MmSessionSpecialPoolEnd)) {
  1324. return TRUE;
  1325. }
  1326. #endif
  1327. return FALSE;
  1328. }
  1329. LOGICAL
  1330. MmIsSpecialPoolAddressFree (
  1331. IN PVOID VirtualAddress
  1332. )
  1333. /*++
  1334. Routine Description:
  1335. This function returns TRUE if a special pool address has been freed.
  1336. FALSE is returned if it is inuse (ie: the caller overran).
  1337. Arguments:
  1338. VirtualAddress - Supplies the special pool address in question.
  1339. Return Value:
  1340. See above.
  1341. Environment:
  1342. Kernel mode.
  1343. --*/
  1344. {
  1345. PMMPTE PointerPte;
  1346. //
  1347. // Caller must check that the address in in special pool.
  1348. //
  1349. ASSERT (MmIsSpecialPoolAddress (VirtualAddress) == TRUE);
  1350. PointerPte = MiGetPteAddress (VirtualAddress);
  1351. //
  1352. // Take advantage of the fact that adjacent PTEs have the paged/nonpaged
  1353. // bits set when in use and these bits are cleared on free. Note also
  1354. // that freed pages get their PTEs chained together through PageFileHigh.
  1355. //
  1356. if ((PointerPte->u.Soft.PageFileHigh == MI_SPECIAL_POOL_PTE_PAGABLE) ||
  1357. (PointerPte->u.Soft.PageFileHigh == MI_SPECIAL_POOL_PTE_NONPAGABLE)) {
  1358. return FALSE;
  1359. }
  1360. return TRUE;
  1361. }
  1362. LOGICAL
  1363. MiIsSpecialPoolAddressNonPaged (
  1364. IN PVOID VirtualAddress
  1365. )
  1366. /*++
  1367. Routine Description:
  1368. This function returns TRUE if the special pool address is nonpaged,
  1369. FALSE if not.
  1370. Arguments:
  1371. VirtualAddress - Supplies the special pool address in question.
  1372. Return Value:
  1373. See above.
  1374. Environment:
  1375. Kernel mode.
  1376. --*/
  1377. {
  1378. PMMPTE PointerPte;
  1379. //
  1380. // Caller must check that the address in in special pool.
  1381. //
  1382. ASSERT (MmIsSpecialPoolAddress (VirtualAddress) == TRUE);
  1383. PointerPte = MiGetPteAddress (VirtualAddress);
  1384. //
  1385. // Take advantage of the fact that adjacent PTEs have the paged/nonpaged
  1386. // bits set when in use and these bits are cleared on free. Note also
  1387. // that freed pages get their PTEs chained together through PageFileHigh.
  1388. //
  1389. if ((PointerPte + 1)->u.Soft.PageFileHigh == MI_SPECIAL_POOL_PTE_NONPAGABLE) {
  1390. return TRUE;
  1391. }
  1392. return FALSE;
  1393. }
  1394. LOGICAL
  1395. MmProtectSpecialPool (
  1396. IN PVOID VirtualAddress,
  1397. IN ULONG NewProtect
  1398. )
  1399. /*++
  1400. Routine Description:
  1401. This function protects a special pool allocation.
  1402. Arguments:
  1403. VirtualAddress - Supplies the special pool address to protect.
  1404. NewProtect - Supplies the protection to set the pages to (PAGE_XX).
  1405. Return Value:
  1406. TRUE if the protection was successfully applied, FALSE if not.
  1407. Environment:
  1408. Kernel mode, IRQL at APC_LEVEL or below for pagable pool, DISPATCH or
  1409. below for nonpagable pool.
  1410. Note that setting an allocation to NO_ACCESS implies that an accessible
  1411. protection must be applied by the caller prior to this allocation being
  1412. freed.
  1413. Note this is a nonpagable wrapper so that machines without special pool
  1414. can still support code attempting to protect special pool at
  1415. DISPATCH_LEVEL.
  1416. --*/
  1417. {
  1418. if (MiSpecialPoolFirstPte == NULL) {
  1419. //
  1420. // The special pool allocation code was never initialized.
  1421. //
  1422. return (ULONG)-1;
  1423. }
  1424. return MiProtectSpecialPool (VirtualAddress, NewProtect);
  1425. }
  1426. LOGICAL
  1427. MiProtectSpecialPool (
  1428. IN PVOID VirtualAddress,
  1429. IN ULONG NewProtect
  1430. )
  1431. /*++
  1432. Routine Description:
  1433. This function protects a special pool allocation.
  1434. Arguments:
  1435. VirtualAddress - Supplies the special pool address to protect.
  1436. NewProtect - Supplies the protection to set the pages to (PAGE_XX).
  1437. Return Value:
  1438. TRUE if the protection was successfully applied, FALSE if not.
  1439. Environment:
  1440. Kernel mode, IRQL at APC_LEVEL or below for pagable pool, DISPATCH or
  1441. below for nonpagable pool.
  1442. Note that setting an allocation to NO_ACCESS implies that an accessible
  1443. protection must be applied by the caller prior to this allocation being
  1444. freed.
  1445. --*/
  1446. {
  1447. KIRQL OldIrql;
  1448. KIRQL OldIrql2;
  1449. MMPTE PteContents;
  1450. MMPTE NewPteContents;
  1451. MMPTE PreviousPte;
  1452. PMMPTE PointerPte;
  1453. PMMPFN Pfn1;
  1454. ULONG ProtectionMask;
  1455. WSLE_NUMBER WsIndex;
  1456. LOGICAL Pagable;
  1457. LOGICAL SystemWsLocked;
  1458. PMMSUPPORT VmSupport;
  1459. #if defined (_WIN64)
  1460. if ((VirtualAddress >= MmSessionSpecialPoolStart) &&
  1461. (VirtualAddress < MmSessionSpecialPoolEnd)) {
  1462. VmSupport = &MmSessionSpace->Vm;
  1463. }
  1464. else
  1465. #endif
  1466. if (VirtualAddress >= MmSpecialPoolStart && VirtualAddress < MmSpecialPoolEnd)
  1467. {
  1468. VmSupport = &MmSystemCacheWs;
  1469. }
  1470. #if defined (_PROTECT_PAGED_POOL)
  1471. else if ((VirtualAddress >= MmPagedPoolStart) &&
  1472. (VirtualAddress < PagedPoolEnd)) {
  1473. VmSupport = &MmSystemCacheWs;
  1474. }
  1475. #endif
  1476. else {
  1477. return (ULONG)-1;
  1478. }
  1479. ProtectionMask = MiMakeProtectionMask (NewProtect);
  1480. if (ProtectionMask == MM_INVALID_PROTECTION) {
  1481. return (ULONG)-1;
  1482. }
  1483. SystemWsLocked = FALSE;
  1484. PointerPte = MiGetPteAddress (VirtualAddress);
  1485. //
  1486. // Initializing OldIrql is not needed for
  1487. // correctness but without it the compiler cannot compile this code
  1488. // W4 to check for use of uninitialized variables.
  1489. //
  1490. OldIrql = PASSIVE_LEVEL;
  1491. #if defined (_PROTECT_PAGED_POOL)
  1492. if ((VirtualAddress >= MmPagedPoolStart) &&
  1493. (VirtualAddress < PagedPoolEnd)) {
  1494. Pagable = TRUE;
  1495. }
  1496. else
  1497. #endif
  1498. if ((PointerPte + 1)->u.Soft.PageFileHigh == MI_SPECIAL_POOL_PTE_PAGABLE) {
  1499. Pagable = TRUE;
  1500. }
  1501. else {
  1502. Pagable = FALSE;
  1503. }
  1504. if (Pagable == TRUE) {
  1505. if (VmSupport == &MmSystemCacheWs) {
  1506. LOCK_SYSTEM_WS (OldIrql, PsGetCurrentThread ());
  1507. }
  1508. else {
  1509. LOCK_SESSION_SPACE_WS (OldIrql, PsGetCurrentThread ());
  1510. }
  1511. SystemWsLocked = TRUE;
  1512. }
  1513. PteContents = *PointerPte;
  1514. if (ProtectionMask == MM_NOACCESS) {
  1515. if (SystemWsLocked == TRUE) {
  1516. retry1:
  1517. ASSERT (SystemWsLocked == TRUE);
  1518. if (PteContents.u.Hard.Valid == 1) {
  1519. Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
  1520. WsIndex = Pfn1->u1.WsIndex;
  1521. ASSERT (WsIndex != 0);
  1522. Pfn1->OriginalPte.u.Soft.Protection = ProtectionMask;
  1523. MiRemovePageFromWorkingSet (PointerPte,
  1524. Pfn1,
  1525. VmSupport);
  1526. }
  1527. else if (PteContents.u.Soft.Transition == 1) {
  1528. LOCK_PFN2 (OldIrql2);
  1529. PteContents = *(volatile MMPTE *)PointerPte;
  1530. if (PteContents.u.Soft.Transition == 0) {
  1531. UNLOCK_PFN2 (OldIrql2);
  1532. goto retry1;
  1533. }
  1534. Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);
  1535. Pfn1->OriginalPte.u.Soft.Protection = ProtectionMask;
  1536. PointerPte->u.Soft.Protection = ProtectionMask;
  1537. UNLOCK_PFN2(OldIrql2);
  1538. }
  1539. else {
  1540. //
  1541. // Must be page file space or demand zero.
  1542. //
  1543. PointerPte->u.Soft.Protection = ProtectionMask;
  1544. }
  1545. ASSERT (SystemWsLocked == TRUE);
  1546. if (VmSupport == &MmSystemCacheWs) {
  1547. UNLOCK_SYSTEM_WS (OldIrql);
  1548. }
  1549. else {
  1550. UNLOCK_SESSION_SPACE_WS (OldIrql);
  1551. }
  1552. }
  1553. else {
  1554. ASSERT (SystemWsLocked == FALSE);
  1555. //
  1556. // Make it no access regardless of its previous protection state.
  1557. // Note that the page frame number is preserved.
  1558. //
  1559. PteContents.u.Hard.Valid = 0;
  1560. PteContents.u.Soft.Prototype = 0;
  1561. PteContents.u.Soft.Protection = MM_NOACCESS;
  1562. Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
  1563. LOCK_PFN2 (OldIrql2);
  1564. Pfn1->OriginalPte.u.Soft.Protection = ProtectionMask;
  1565. PreviousPte.u.Flush = KeFlushSingleTb (VirtualAddress,
  1566. TRUE,
  1567. TRUE,
  1568. (PHARDWARE_PTE)PointerPte,
  1569. PteContents.u.Flush);
  1570. MI_CAPTURE_DIRTY_BIT_TO_PFN (&PreviousPte, Pfn1);
  1571. UNLOCK_PFN2(OldIrql2);
  1572. }
  1573. return TRUE;
  1574. }
  1575. //
  1576. // No guard pages, noncached pages or copy-on-write for special pool.
  1577. //
  1578. if ((ProtectionMask >= MM_NOCACHE) || (ProtectionMask == MM_WRITECOPY) || (ProtectionMask == MM_EXECUTE_WRITECOPY)) {
  1579. if (SystemWsLocked == TRUE) {
  1580. if (VmSupport == &MmSystemCacheWs) {
  1581. UNLOCK_SYSTEM_WS (OldIrql);
  1582. }
  1583. else {
  1584. UNLOCK_SESSION_SPACE_WS (OldIrql);
  1585. }
  1586. }
  1587. return FALSE;
  1588. }
  1589. //
  1590. // Set accessible permissions - the page may already be protected or not.
  1591. //
  1592. if (Pagable == FALSE) {
  1593. Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
  1594. Pfn1->OriginalPte.u.Soft.Protection = ProtectionMask;
  1595. MI_MAKE_VALID_PTE (NewPteContents,
  1596. PteContents.u.Hard.PageFrameNumber,
  1597. ProtectionMask,
  1598. PointerPte);
  1599. KeFlushSingleTb (VirtualAddress,
  1600. TRUE,
  1601. TRUE,
  1602. (PHARDWARE_PTE)PointerPte,
  1603. NewPteContents.u.Flush);
  1604. ASSERT (SystemWsLocked == FALSE);
  1605. return TRUE;
  1606. }
  1607. retry2:
  1608. ASSERT (SystemWsLocked == TRUE);
  1609. if (PteContents.u.Hard.Valid == 1) {
  1610. Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
  1611. ASSERT (Pfn1->u1.WsIndex != 0);
  1612. LOCK_PFN2 (OldIrql2);
  1613. Pfn1->OriginalPte.u.Soft.Protection = ProtectionMask;
  1614. MI_MAKE_VALID_PTE (PteContents,
  1615. PteContents.u.Hard.PageFrameNumber,
  1616. ProtectionMask,
  1617. PointerPte);
  1618. PreviousPte.u.Flush = KeFlushSingleTb (VirtualAddress,
  1619. TRUE,
  1620. TRUE,
  1621. (PHARDWARE_PTE)PointerPte,
  1622. PteContents.u.Flush);
  1623. MI_CAPTURE_DIRTY_BIT_TO_PFN (&PreviousPte, Pfn1);
  1624. UNLOCK_PFN2 (OldIrql2);
  1625. }
  1626. else if (PteContents.u.Soft.Transition == 1) {
  1627. LOCK_PFN2 (OldIrql2);
  1628. PteContents = *(volatile MMPTE *)PointerPte;
  1629. if (PteContents.u.Soft.Transition == 0) {
  1630. UNLOCK_PFN2 (OldIrql2);
  1631. goto retry2;
  1632. }
  1633. Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);
  1634. Pfn1->OriginalPte.u.Soft.Protection = ProtectionMask;
  1635. PointerPte->u.Soft.Protection = ProtectionMask;
  1636. UNLOCK_PFN2(OldIrql2);
  1637. }
  1638. else {
  1639. //
  1640. // Must be page file space or demand zero.
  1641. //
  1642. PointerPte->u.Soft.Protection = ProtectionMask;
  1643. }
  1644. if (VmSupport == &MmSystemCacheWs) {
  1645. UNLOCK_SYSTEM_WS (OldIrql);
  1646. }
  1647. else {
  1648. UNLOCK_SESSION_SPACE_WS (OldIrql);
  1649. }
  1650. return TRUE;
  1651. }
  1652. LOGICAL
  1653. MmSetSpecialPool (
  1654. IN LOGICAL Enable
  1655. )
  1656. /*++
  1657. Routine Description:
  1658. This routine enables/disables special pool. This allows callers to ensure
  1659. that subsequent allocations do not come from special pool. It is relied
  1660. upon by callers that require KSEG0 addresses.
  1661. Arguments:
  1662. Enable - Supplies TRUE to enable special pool, FALSE to disable it.
  1663. Return Value:
  1664. Current special pool state (enabled or disabled).
  1665. Environment:
  1666. Kernel mode, IRQL of DISPATCH_LEVEL or below.
  1667. --*/
  1668. {
  1669. KIRQL OldIrql;
  1670. LOGICAL OldEnable;
  1671. LOCK_PFN2 (OldIrql);
  1672. OldEnable = MiSpecialPoolEnabled;
  1673. MiSpecialPoolEnabled = Enable;
  1674. UNLOCK_PFN2 (OldIrql);
  1675. return OldEnable;
  1676. }
  1677. #ifndef NO_POOL_CHECKS
  1678. typedef struct _MI_BAD_TAGS {
  1679. USHORT Enabled;
  1680. UCHAR TargetChar;
  1681. UCHAR AllOthers;
  1682. ULONG Dispatches;
  1683. ULONG Allocations;
  1684. ULONG RandomizerEnabled;
  1685. } MI_BAD_TAGS, *PMI_BAD_TAGS;
  1686. MI_BAD_TAGS MiBadTags;
  1687. KTIMER MiSpecialPoolTimer;
  1688. KDPC MiSpecialPoolTimerDpc;
  1689. LARGE_INTEGER MiTimerDueTime;
  1690. #define MI_THREE_SECONDS 3
  1691. VOID
  1692. MiSpecialPoolTimerDispatch (
  1693. IN PKDPC Dpc,
  1694. IN PVOID DeferredContext,
  1695. IN PVOID SystemArgument1,
  1696. IN PVOID SystemArgument2
  1697. )
  1698. /*++
  1699. Routine Description:
  1700. This routine is executed every 3 seconds. Just toggle the enable bit.
  1701. If not many squeezed allocations have been made then just leave it
  1702. continuously enabled. Switch to a different tag if it looks like this
  1703. one isn't getting any hits.
  1704. No locks needed.
  1705. Arguments:
  1706. Dpc - Supplies a pointer to a control object of type DPC.
  1707. DeferredContext - Optional deferred context; not used.
  1708. SystemArgument1 - Optional argument 1; not used.
  1709. SystemArgument2 - Optional argument 2; not used.
  1710. Return Value:
  1711. None.
  1712. --*/
  1713. {
  1714. UCHAR NewChar;
  1715. UNREFERENCED_PARAMETER (Dpc);
  1716. UNREFERENCED_PARAMETER (DeferredContext);
  1717. UNREFERENCED_PARAMETER (SystemArgument1);
  1718. UNREFERENCED_PARAMETER (SystemArgument2);
  1719. MiBadTags.Dispatches += 1;
  1720. if (MiBadTags.Allocations > 500) {
  1721. MiBadTags.Enabled += 1;
  1722. }
  1723. else if ((MiBadTags.Allocations == 0) && (MiBadTags.Dispatches > 100)) {
  1724. if (MiBadTags.AllOthers == 0) {
  1725. NewChar = (UCHAR)(MiBadTags.TargetChar + 1);
  1726. if (NewChar >= 'a' && NewChar <= 'z') {
  1727. MiBadTags.TargetChar = NewChar;
  1728. }
  1729. else if (NewChar == 'z' + 1) {
  1730. MiBadTags.TargetChar = 'a';
  1731. }
  1732. else if (NewChar >= 'A' && NewChar <= 'Z') {
  1733. MiBadTags.TargetChar = NewChar;
  1734. }
  1735. else {
  1736. MiBadTags.TargetChar = 'A';
  1737. }
  1738. }
  1739. }
  1740. }
  1741. extern ULONG InitializationPhase;
  1742. VOID
  1743. MiInitializeSpecialPoolCriteria (
  1744. VOID
  1745. )
  1746. {
  1747. LARGE_INTEGER SystemTime;
  1748. TIME_FIELDS TimeFields;
  1749. if (InitializationPhase == 0) {
  1750. #if defined (_MI_SPECIAL_POOL_BY_DEFAULT)
  1751. if (MmSpecialPoolTag == 0) {
  1752. MmSpecialPoolTag = (ULONG)-2;
  1753. }
  1754. #endif
  1755. return;
  1756. }
  1757. if (MmSpecialPoolTag != (ULONG)-2) {
  1758. return;
  1759. }
  1760. KeQuerySystemTime (&SystemTime);
  1761. RtlTimeToTimeFields (&SystemTime, &TimeFields);
  1762. if (TimeFields.Second <= 25) {
  1763. MiBadTags.TargetChar = (UCHAR)('a' + (UCHAR)TimeFields.Second);
  1764. }
  1765. else if (TimeFields.Second <= 51) {
  1766. MiBadTags.TargetChar = (UCHAR)('A' + (UCHAR)(TimeFields.Second - 26));
  1767. }
  1768. else {
  1769. MiBadTags.AllOthers = 1;
  1770. }
  1771. MiBadTags.RandomizerEnabled = 1;
  1772. //
  1773. // Initialize a periodic timer to go off every three seconds.
  1774. //
  1775. KeInitializeDpc (&MiSpecialPoolTimerDpc, MiSpecialPoolTimerDispatch, NULL);
  1776. KeInitializeTimer (&MiSpecialPoolTimer);
  1777. MiTimerDueTime.QuadPart = Int32x32To64 (MI_THREE_SECONDS, -10000000);
  1778. KeSetTimerEx (&MiSpecialPoolTimer,
  1779. MiTimerDueTime,
  1780. MI_THREE_SECONDS * 1000,
  1781. &MiSpecialPoolTimerDpc);
  1782. MiBadTags.Enabled += 1;
  1783. }
  1784. LOGICAL
  1785. MmSqueezeBadTags (
  1786. IN ULONG Tag
  1787. )
  1788. /*++
  1789. Routine Description:
  1790. This routine squeezes bad tags by forcing them into special pool in a
  1791. systematic fashion.
  1792. Arguments:
  1793. Tag - Supplies the tag of the requested allocation.
  1794. Return Value:
  1795. TRUE if the caller should attempt to satisfy the requested allocation from
  1796. special pool, FALSE if not.
  1797. Environment:
  1798. Kernel mode, no locks (not even pool locks) held.
  1799. --*/
  1800. {
  1801. PUCHAR tc;
  1802. if ((MiBadTags.Enabled % 0x10) == 0) {
  1803. return FALSE;
  1804. }
  1805. if (MiBadTags.RandomizerEnabled == 0) {
  1806. return FALSE;
  1807. }
  1808. tc = (PUCHAR)&Tag;
  1809. if (*tc == MiBadTags.TargetChar) {
  1810. ;
  1811. }
  1812. else if (MiBadTags.AllOthers == 1) {
  1813. if (*tc >= 'a' && *tc <= 'z') {
  1814. return FALSE;
  1815. }
  1816. if (*tc >= 'A' && *tc <= 'Z') {
  1817. return FALSE;
  1818. }
  1819. }
  1820. else {
  1821. return FALSE;
  1822. }
  1823. MiBadTags.Allocations += 1;
  1824. return TRUE;
  1825. }
  1826. VOID
  1827. MiEnableRandomSpecialPool (
  1828. IN LOGICAL Enable
  1829. )
  1830. {
  1831. MiBadTags.RandomizerEnabled = Enable;
  1832. }
  1833. #endif
  1834. LOGICAL
  1835. MiCheckSingleFilter (
  1836. ULONG Tag,
  1837. ULONG Filter
  1838. )
  1839. /*++
  1840. Routine Description:
  1841. This function checks if a pool tag matches a given pattern.
  1842. ? - matches a single character
  1843. * - terminates match with TRUE
  1844. N.B.: ability inspired by the !poolfind debugger extension.
  1845. Arguments:
  1846. Tag - a pool tag
  1847. Filter - a globish pattern (chars and/or ?,*)
  1848. Return Value:
  1849. TRUE if a match exists, FALSE otherwise.
  1850. --*/
  1851. {
  1852. ULONG i;
  1853. PUCHAR tc;
  1854. PUCHAR fc;
  1855. tc = (PUCHAR) &Tag;
  1856. fc = (PUCHAR) &Filter;
  1857. for (i = 0; i < 4; i += 1, tc += 1, fc += 1) {
  1858. if (*fc == '*') {
  1859. break;
  1860. }
  1861. if (*fc == '?') {
  1862. continue;
  1863. }
  1864. if (i == 3 && ((*tc) & ~(PROTECTED_POOL >> 24)) == *fc) {
  1865. continue;
  1866. }
  1867. if (*tc != *fc) {
  1868. return FALSE;
  1869. }
  1870. }
  1871. return TRUE;
  1872. }
  1873. LOGICAL
  1874. MmUseSpecialPool (
  1875. IN SIZE_T NumberOfBytes,
  1876. IN ULONG Tag
  1877. )
  1878. /*++
  1879. Routine Description:
  1880. This routine checks whether the specified allocation should be attempted
  1881. from special pool. Both the tag string and the number of bytes are used
  1882. to match against, if either cause a hit, then special pool is recommended.
  1883. Arguments:
  1884. NumberOfBytes - Supplies the number of bytes to commit.
  1885. Tag - Supplies the tag of the requested allocation.
  1886. Return Value:
  1887. TRUE if the caller should attempt to satisfy the requested allocation from
  1888. special pool, FALSE if not.
  1889. Environment:
  1890. Kernel mode, no locks (not even pool locks) held.
  1891. --*/
  1892. {
  1893. if ((NumberOfBytes <= POOL_BUDDY_MAX) &&
  1894. (MmSpecialPoolTag != 0) &&
  1895. (NumberOfBytes != 0)) {
  1896. #ifndef NO_POOL_CHECKS
  1897. if (MmSqueezeBadTags (Tag) == TRUE) {
  1898. return TRUE;
  1899. }
  1900. #endif
  1901. //
  1902. // Check for a special pool tag match by tag string and size ranges.
  1903. //
  1904. if ((MiCheckSingleFilter (Tag, MmSpecialPoolTag)) ||
  1905. ((MmSpecialPoolTag >= (NumberOfBytes + POOL_OVERHEAD)) &&
  1906. (MmSpecialPoolTag < (NumberOfBytes + POOL_OVERHEAD + POOL_SMALLEST_BLOCK)))) {
  1907. return TRUE;
  1908. }
  1909. }
  1910. return FALSE;
  1911. }