Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2068 lines
53 KiB

  1. /*++
  2. Copyright (c) 1991-2000 Microsoft Corporation
  3. Module Name:
  4. mtrr.c
  5. Abstract:
  6. This module implements interfaces that support manipulation of
  7. memory type range registers.
  8. These entry points only exist on x86 machines.
  9. Author:
  10. Ken Reneris (kenr) 11-Oct-95
  11. Environment:
  12. Kernel mode only.
  13. Revision History:
  14. --*/
  15. #include "ki.h"
  16. #include "mtrr.h"
  17. #define STATIC
  18. #define IDBG 0
  19. #if DBG
  20. #define DBGMSG(a) DbgPrint(a)
  21. #else
  22. #define DBGMSG(a)
  23. #endif
  24. //
  25. // Define MTTR variable values for 36-bits of physical address support.
  26. //
  27. // N.B. During system initialization these variables may be changed for
  28. // 40-bits of physical address support.
  29. //
  30. LONG64 KiMtrrMaskBase = 0x0000000ffffff000;
  31. LONG64 KiMtrrMaskMask = 0x0000000ffffff000;
  32. LONG64 KiMtrrOverflowMask = (~0x1000000000);
  33. LONG64 KiMtrrResBitMask = 0xfffffffff;
  34. UCHAR KiMtrrMaxRangeShift = 36;
  35. //
  36. // Internal declarations
  37. //
  38. //
  39. // Range in generic terms
  40. //
  41. typedef struct _ONE_RANGE {
  42. ULONGLONG Base;
  43. ULONGLONG Limit;
  44. UCHAR Type;
  45. } ONE_RANGE, *PONE_RANGE;
  46. #define GROW_RANGE_TABLE 4
  47. //
  48. // Range in specific mtrr terms
  49. //
  50. typedef struct _MTRR_RANGE {
  51. MTRR_VARIABLE_BASE Base;
  52. MTRR_VARIABLE_MASK Mask;
  53. } MTRR_RANGE, *PMTRR_RANGE;
  54. //
  55. // System static information concerning cached range types
  56. //
  57. typedef struct _RANGE_INFO {
  58. //
  59. // Global MTRR info
  60. //
  61. MTRR_DEFAULT Default; // h/w mtrr default
  62. MTRR_CAPABILITIES Capabilities; // h/w mtrr Capabilities
  63. UCHAR DefaultCachedType; // default type for MmCached
  64. //
  65. // Variable MTRR information
  66. //
  67. BOOLEAN RangesValid; // Ranges initialized and valid.
  68. BOOLEAN MtrrWorkaround; // Work Around needed/not.
  69. UCHAR NoRange; // No ranges currently in Ranges
  70. UCHAR MaxRange; // Max size of Ranges
  71. PONE_RANGE Ranges; // Current ranges as set into h/w
  72. } RANGE_INFO, *PRANGE_INFO;
  73. //
  74. // Structure used while processing range database
  75. //
  76. typedef struct _NEW_RANGE {
  77. //
  78. // Current Status
  79. //
  80. NTSTATUS Status;
  81. //
  82. // Generic info on new range
  83. //
  84. ULONGLONG Base;
  85. ULONGLONG Limit;
  86. UCHAR Type;
  87. //
  88. // MTRR image to be set into h/w
  89. //
  90. PMTRR_RANGE MTRR;
  91. //
  92. // RangeDatabase before edits were started
  93. //
  94. UCHAR NoRange;
  95. PONE_RANGE Ranges;
  96. //
  97. // IPI context to coordinate concurrent processor update
  98. //
  99. ULONG NoMTRR;
  100. PROCESSOR_LOCKSTEP Synchronize;
  101. ULONG Processor;
  102. } NEW_RANGE, *PNEW_RANGE;
  103. //
  104. // Prototypes
  105. //
  106. VOID
  107. KiInitializeMTRR (
  108. IN BOOLEAN LastProcessor
  109. );
  110. BOOLEAN
  111. KiRemoveRange (
  112. IN PNEW_RANGE NewRange,
  113. IN ULONGLONG Base,
  114. IN ULONGLONG Limit,
  115. IN PBOOLEAN RemoveThisType
  116. );
  117. VOID
  118. KiAddRange (
  119. IN PNEW_RANGE NewRange,
  120. IN ULONGLONG Base,
  121. IN ULONGLONG Limit,
  122. IN UCHAR Type
  123. );
  124. VOID
  125. KiStartEffectiveRangeChange (
  126. IN PNEW_RANGE NewRange
  127. );
  128. VOID
  129. KiCompleteEffectiveRangeChange (
  130. IN PNEW_RANGE NewRange
  131. );
  132. STATIC ULONG
  133. KiRangeWeight (
  134. IN PONE_RANGE Range
  135. );
  136. STATIC ULONG
  137. KiFindFirstSetLeftBit (
  138. IN ULONGLONG Set
  139. );
  140. STATIC ULONG
  141. KiFindFirstSetRightBit (
  142. IN ULONGLONG Set
  143. );
  144. VOID
  145. KiLoadMTRRTarget (
  146. IN PKIPI_CONTEXT SignalDone,
  147. IN PVOID Context,
  148. IN PVOID Parameter2,
  149. IN PVOID Parameter3
  150. );
  151. NTSTATUS
  152. KiLoadMTRR (
  153. IN PNEW_RANGE Context
  154. );
  155. ULONGLONG
  156. KiMaskToLength (
  157. IN ULONGLONG Mask
  158. );
  159. ULONGLONG
  160. KiLengthToMask (
  161. IN ULONGLONG Length
  162. );
  163. #if IDBG
  164. VOID
  165. KiDumpMTRR (
  166. PUCHAR DebugString,
  167. PMTRR_RANGE MTRR
  168. );
  169. #endif
  170. //
  171. // --- AMD - Prototypes for AMD K6 MTRR Support functions. ---
  172. //
  173. NTSTATUS
  174. KiAmdK6MtrrSetMemoryType (
  175. IN ULONG BaseAddress,
  176. IN ULONG NumberOfBytes,
  177. IN MEMORY_CACHING_TYPE CacheType
  178. );
  179. VOID
  180. KiAmdK6MtrrWRMSR (
  181. VOID
  182. );
  183. // --- AMD - End ---
  184. #ifdef ALLOC_PRAGMA
  185. #pragma alloc_text(INIT,KiInitializeMTRR)
  186. #pragma alloc_text(PAGELK,KiRemoveRange)
  187. #pragma alloc_text(PAGELK,KiAddRange)
  188. #pragma alloc_text(PAGELK,KiStartEffectiveRangeChange)
  189. #pragma alloc_text(PAGELK,KiCompleteEffectiveRangeChange)
  190. #pragma alloc_text(PAGELK,KiRangeWeight)
  191. #pragma alloc_text(PAGELK,KiFindFirstSetLeftBit)
  192. #pragma alloc_text(PAGELK,KiFindFirstSetRightBit)
  193. #pragma alloc_text(PAGELK,KiLoadMTRR)
  194. #pragma alloc_text(PAGELK,KiLoadMTRRTarget)
  195. #pragma alloc_text(PAGELK,KiLockStepExecution)
  196. #pragma alloc_text(PAGELK,KiLengthToMask)
  197. #pragma alloc_text(PAGELK,KiMaskToLength)
  198. #if IDBG
  199. #pragma alloc_text(PAGELK,KiDumpMTRR)
  200. #endif
  201. #endif
  202. //
  203. // KiRangeLock - Used to synchronize accesses to KiRangeInfo
  204. //
  205. KSPIN_LOCK KiRangeLock;
  206. //
  207. // KiRangeInfo - Range type mapping information. Details specific h/w support
  208. // and contains the current range database of how physical
  209. // addresses have been set
  210. RANGE_INFO KiRangeInfo;
  211. VOID
  212. KiInitializeMTRR (
  213. IN BOOLEAN LastProcessor
  214. )
  215. /*++
  216. Routine Description:
  217. Called to incrementally initialize the physical range
  218. database feature. First processor's MTRR set is read into the
  219. physical range database.
  220. Arguments:
  221. LastProcessor - If set this is the last processor to execute this routine
  222. such that when this processor finishes, the initialization is complete.
  223. Return Value:
  224. None - if there was a problem the function
  225. KeSetPhysicalCacheTypeRange type is disabled.
  226. --*/
  227. {
  228. BOOLEAN Status;
  229. ULONG Index;
  230. MTRR_DEFAULT Default;
  231. MTRR_CAPABILITIES Capabilities;
  232. NEW_RANGE NewRange;
  233. MTRR_VARIABLE_BASE MtrrBase;
  234. MTRR_VARIABLE_MASK MtrrMask;
  235. ULONGLONG Base, Mask, Length;
  236. PKPRCB Prcb;
  237. Status = TRUE;
  238. RtlZeroMemory (&NewRange, sizeof (NewRange));
  239. NewRange.Status = STATUS_UNSUCCESSFUL;
  240. //
  241. // If this is the first processor, initialize some fields
  242. //
  243. if (KeGetPcr()->Number == 0) {
  244. KeInitializeSpinLock (&KiRangeLock);
  245. KiRangeInfo.Capabilities.u.QuadPart = RDMSR(MTRR_MSR_CAPABILITIES);
  246. KiRangeInfo.Default.u.QuadPart = RDMSR(MTRR_MSR_DEFAULT);
  247. KiRangeInfo.DefaultCachedType = MTRR_TYPE_MAX;
  248. //
  249. // If h/w mtrr support is not enabled, disable OS support
  250. //
  251. if (!KiRangeInfo.Default.u.hw.MtrrEnabled ||
  252. KiRangeInfo.Capabilities.u.hw.VarCnt == 0 ||
  253. KiRangeInfo.Default.u.hw.Type != MTRR_TYPE_UC) {
  254. DBGMSG("MTRR feature disabled.\n");
  255. Status = FALSE;
  256. } else {
  257. //
  258. // If USWC type is supported by hardware, but the MTRR
  259. // feature is not set in KeFeatureBits, it is because
  260. // the HAL indicated USWC should not be used on this
  261. // machine. (Possibly due to shared memory clusters).
  262. //
  263. if (KiRangeInfo.Capabilities.u.hw.UswcSupported &&
  264. ((KeFeatureBits & KF_MTRR) == 0)) {
  265. DBGMSG("KiInitializeMTRR: MTRR use globally disabled on this machine.\n");
  266. KiRangeInfo.Capabilities.u.hw.UswcSupported = 0;
  267. }
  268. //
  269. // Allocate initial range type database
  270. //
  271. KiRangeInfo.NoRange = 0;
  272. KiRangeInfo.MaxRange = (UCHAR) KiRangeInfo.Capabilities.u.hw.VarCnt + GROW_RANGE_TABLE;
  273. //
  274. // Don't allocate a new range on reinitialization from
  275. // hibernate.
  276. //
  277. if (KiRangeInfo.Ranges == NULL) {
  278. KiRangeInfo.Ranges = ExAllocatePoolWithTag (NonPagedPool,
  279. sizeof(ONE_RANGE) * KiRangeInfo.MaxRange,
  280. ' eK');
  281. }
  282. if (KiRangeInfo.Ranges != NULL) {
  283. RtlZeroMemory (KiRangeInfo.Ranges,
  284. sizeof(ONE_RANGE) * KiRangeInfo.MaxRange);
  285. }
  286. }
  287. }
  288. //
  289. // Workaround for cpu signatures 611, 612, 616 and 617
  290. // - if the request for setting a variable MTRR specifies
  291. // an address which is not 4M aligned or length is not
  292. // a multiple of 4M then possible problem for INVLPG inst.
  293. // Detect if workaround is required
  294. //
  295. Prcb = KeGetCurrentPrcb();
  296. if (Prcb->CpuType == 6 &&
  297. (Prcb->CpuStep == 0x0101 || Prcb->CpuStep == 0x0102 ||
  298. Prcb->CpuStep == 0x0106 || Prcb->CpuStep == 0x0107 )) {
  299. if (strcmp((PCHAR)Prcb->VendorString, "GenuineIntel") == 0) {
  300. //
  301. // Only do this if it's an Intel part, other
  302. // manufacturers may have the same stepping
  303. // numbers but no bug.
  304. //
  305. KiRangeInfo.MtrrWorkaround = TRUE;
  306. }
  307. }
  308. //
  309. // If MTRR support disabled on first processor or if
  310. // buffer not allocated then fall through
  311. //
  312. if (!KiRangeInfo.Ranges){
  313. Status = FALSE;
  314. Capabilities.u.QuadPart = 0; // satisfy no_opt compilation
  315. } else {
  316. //
  317. // Verify MTRR support is symmetric
  318. //
  319. Capabilities.u.QuadPart = RDMSR(MTRR_MSR_CAPABILITIES);
  320. if ((Capabilities.u.hw.UswcSupported) &&
  321. ((KeFeatureBits & KF_MTRR) == 0)) {
  322. DBGMSG ("KiInitializeMTRR: setting UswcSupported FALSE\n");
  323. Capabilities.u.hw.UswcSupported = 0;
  324. }
  325. Default.u.QuadPart = RDMSR(MTRR_MSR_DEFAULT);
  326. if (Default.u.QuadPart != KiRangeInfo.Default.u.QuadPart ||
  327. Capabilities.u.QuadPart != KiRangeInfo.Capabilities.u.QuadPart) {
  328. DBGMSG ("KiInitializeMTRR: asymmetric mtrr support\n");
  329. Status = FALSE;
  330. }
  331. }
  332. NewRange.Status = STATUS_SUCCESS;
  333. //
  334. // MTRR registers should be identically set on each processor.
  335. // Ranges should be added to the range database only for one
  336. // processor.
  337. //
  338. if (Status && (KeGetPcr()->Number == 0)) {
  339. #if IDBG
  340. KiDumpMTRR ("Processor MTRR:", NULL);
  341. #endif
  342. //
  343. // Read current MTRR settings for various cached range types
  344. // and add them to the range database
  345. //
  346. for (Index=0; Index < Capabilities.u.hw.VarCnt; Index++) {
  347. MtrrBase.u.QuadPart = RDMSR(MTRR_MSR_VARIABLE_BASE+Index*2);
  348. MtrrMask.u.QuadPart = RDMSR(MTRR_MSR_VARIABLE_MASK+Index*2);
  349. Mask = MtrrMask.u.QuadPart & KiMtrrMaskMask;
  350. Base = MtrrBase.u.QuadPart & KiMtrrMaskBase;
  351. //
  352. // Note - the variable MTRR Mask does NOT contain the length
  353. // spanned by the variable MTRR. Thus just checking the Valid
  354. // Bit should be sufficient for identifying a valid MTRR.
  355. //
  356. if (MtrrMask.u.hw.Valid) {
  357. Length = KiMaskToLength(Mask);
  358. //
  359. // Check for non-contiguous MTRR mask.
  360. //
  361. if ((Mask + Length) & KiMtrrOverflowMask) {
  362. DBGMSG ("KiInitializeMTRR: Found non-contiguous MTRR mask!\n");
  363. Status = FALSE;
  364. }
  365. //
  366. // Add this MTRR to the range database
  367. //
  368. Base &= Mask;
  369. KiAddRange (
  370. &NewRange,
  371. Base,
  372. Base + Length - 1,
  373. (UCHAR) MtrrBase.u.hw.Type
  374. );
  375. //
  376. // Check for default cache type
  377. //
  378. if (MtrrBase.u.hw.Type == MTRR_TYPE_WB) {
  379. KiRangeInfo.DefaultCachedType = MTRR_TYPE_WB;
  380. }
  381. if (KiRangeInfo.DefaultCachedType == MTRR_TYPE_MAX &&
  382. MtrrBase.u.hw.Type == MTRR_TYPE_WT) {
  383. KiRangeInfo.DefaultCachedType = MTRR_TYPE_WT;
  384. }
  385. }
  386. }
  387. //
  388. // If a default type for "cached" was not found, assume write-back
  389. //
  390. if (KiRangeInfo.DefaultCachedType == MTRR_TYPE_MAX) {
  391. DBGMSG ("KiInitializeMTRR: assume write-back\n");
  392. KiRangeInfo.DefaultCachedType = MTRR_TYPE_WB;
  393. }
  394. }
  395. //
  396. // Done
  397. //
  398. if (!NT_SUCCESS(NewRange.Status)) {
  399. Status = FALSE;
  400. }
  401. if (!Status) {
  402. DBGMSG ("KiInitializeMTRR: OS support for MTRRs disabled\n");
  403. if (KiRangeInfo.Ranges != NULL) {
  404. ExFreePool (KiRangeInfo.Ranges);
  405. KiRangeInfo.Ranges = NULL;
  406. }
  407. } else {
  408. // if last processor indicate initialization complete
  409. if (LastProcessor) {
  410. KiRangeInfo.RangesValid = TRUE;
  411. }
  412. }
  413. }
  414. VOID
  415. KeRestoreMtrr (
  416. VOID
  417. )
  418. /*++
  419. Routine Description:
  420. This function reloads the MTRR registers to be the current
  421. known values. This is used on a system wakeup to ensure the
  422. registers are sane.
  423. N.B. The caller must have the PAGELK code locked
  424. Arguments:
  425. none
  426. Return Value:
  427. none
  428. --*/
  429. {
  430. NEW_RANGE NewRange;
  431. KIRQL OldIrql;
  432. if (KiRangeInfo.RangesValid) {
  433. RtlZeroMemory (&NewRange, sizeof (NewRange));
  434. KeAcquireSpinLock (&KiRangeLock, &OldIrql);
  435. KiStartEffectiveRangeChange (&NewRange);
  436. ASSERT (NT_SUCCESS(NewRange.Status));
  437. KiCompleteEffectiveRangeChange (&NewRange);
  438. KeReleaseSpinLock (&KiRangeLock, OldIrql);
  439. return;
  440. }
  441. //
  442. // If the processor is a AMD K6 with MTRR support then perform
  443. // processor specific implentaiton.
  444. //
  445. if (KeFeatureBits & KF_AMDK6MTRR) {
  446. KeAcquireSpinLock (&KiRangeLock, &OldIrql);
  447. KiLoadMTRR(NULL);
  448. KeReleaseSpinLock (&KiRangeLock, OldIrql);
  449. }
  450. }
  451. NTSTATUS
  452. KeSetPhysicalCacheTypeRange (
  453. IN PHYSICAL_ADDRESS PhysicalAddress,
  454. IN ULONG NumberOfBytes,
  455. IN MEMORY_CACHING_TYPE CacheType
  456. )
  457. /*++
  458. Routine Description:
  459. This function sets a physical range to a particular cache type.
  460. If the system does not support setting cache policies based on
  461. physical ranges, no action is taken.
  462. Arguments:
  463. PhysicalAddress - The starting address of the range being set
  464. NumberOfBytes - The length, in bytes, of the range being set
  465. CacheType - The caching type for which the physical range is
  466. to be set to.
  467. NonCached:
  468. Setting ranges to be NonCached is done for
  469. book keeping reasons. A return of SUCCESS when
  470. setting a range NonCached does not mean it has
  471. been physically set to as NonCached. The caller
  472. must use a cache-disabled virtual pointer for
  473. any NonCached range.
  474. Cached:
  475. A successful return indicates that the physical
  476. range has been set to cached. This mode requires
  477. the caller to be at irql < dispatch_level.
  478. FrameBuffer:
  479. A successful return indicates that the physical
  480. range has been set to be framebuffer cached.
  481. This mode requires the caller to be at irql <
  482. dispatch_level.
  483. USWCCached:
  484. This type is to be satisfied only via PAT and
  485. fails for the MTRR interface.
  486. Return Value:
  487. STATUS_SUCCESS - if success, the cache attributes of the physical range
  488. have been set.
  489. STATUS_NOT_SUPPORTED - either feature not supported or not yet initialized,
  490. or MmWriteCombined type not supported and is
  491. requested, or input range does not match restrictions
  492. imposed by workarounds for current processor stepping
  493. or is below 1M (in the fixed MTRR range), or not yet
  494. initialized.
  495. STATUS_UNSUCCESSFUL - Unable to satisfy request due to
  496. - Unable to map software image into limited # of
  497. hardware MTRRs.
  498. - irql was not < DISPATCH_LEVEL.
  499. - Failure due to other internal error (out of memory).
  500. STATUS_INVALID_PARAMETER - Incorrect input memory type.
  501. --*/
  502. {
  503. KIRQL OldIrql;
  504. NEW_RANGE NewRange;
  505. BOOLEAN RemoveThisType[MTRR_TYPE_MAX];
  506. BOOLEAN EffectRangeChange, AddToRangeDatabase;
  507. //
  508. // If caller has requested the MmUSWCCached memory type then fail
  509. // - MmUSWCCached is supported via PAT and not otherwise
  510. //
  511. if (CacheType == MmUSWCCached) {
  512. return STATUS_NOT_SUPPORTED;
  513. }
  514. //
  515. // Addresses above 4GB, below 1MB or not page aligned and
  516. // page length are not supported.
  517. //
  518. if ((PhysicalAddress.HighPart != 0) ||
  519. (PhysicalAddress.LowPart < (1 * 1024 * 1024)) ||
  520. (PhysicalAddress.LowPart & 0xfff) ||
  521. (NumberOfBytes & 0xfff) ) {
  522. return STATUS_NOT_SUPPORTED;
  523. }
  524. ASSERT (NumberOfBytes != 0);
  525. //
  526. // If the processor is a AMD K6 with MTRR support then perform
  527. // processor specific implentaiton.
  528. //
  529. if (KeFeatureBits & KF_AMDK6MTRR) {
  530. if ((CacheType != MmWriteCombined) && (CacheType != MmNonCached)) {
  531. return STATUS_NOT_SUPPORTED;
  532. }
  533. return KiAmdK6MtrrSetMemoryType(PhysicalAddress.LowPart,
  534. NumberOfBytes,
  535. CacheType);
  536. }
  537. //
  538. // If processor doesn't have the memory type range feature
  539. // return not supported.
  540. //
  541. if (!KiRangeInfo.RangesValid) {
  542. return STATUS_NOT_SUPPORTED;
  543. }
  544. //
  545. // Workaround for cpu signatures 611, 612, 616 and 617
  546. // - if the request for setting a variable MTRR specifies
  547. // an address which is not 4M aligned or length is not
  548. // a multiple of 4M then return status not supported
  549. //
  550. if ((KiRangeInfo.MtrrWorkaround) &&
  551. ((PhysicalAddress.LowPart & 0x3fffff) ||
  552. (NumberOfBytes & 0x3fffff))) {
  553. return STATUS_NOT_SUPPORTED;
  554. }
  555. RtlZeroMemory (&NewRange, sizeof (NewRange));
  556. NewRange.Base = PhysicalAddress.QuadPart;
  557. NewRange.Limit = NewRange.Base + NumberOfBytes - 1;
  558. //
  559. // Determine what the new mtrr range type is. If setting NonCached then
  560. // the database need not be updated to reflect the virtual change. This
  561. // is because non-cached virtual pointers are mapped as cache disabled.
  562. //
  563. EffectRangeChange = TRUE;
  564. AddToRangeDatabase = TRUE;
  565. switch (CacheType) {
  566. case MmNonCached:
  567. NewRange.Type = MTRR_TYPE_UC;
  568. //
  569. // NonCached ranges do not need to be reflected into the h/w state
  570. // as all non-cached ranges are mapped with cache-disabled pointers.
  571. // This also means that cache-disabled ranges do not need to
  572. // be put into mtrrs, or held in the range, regardless of the default
  573. // range type.
  574. //
  575. EffectRangeChange = FALSE;
  576. AddToRangeDatabase = FALSE;
  577. break;
  578. case MmCached:
  579. NewRange.Type = KiRangeInfo.DefaultCachedType;
  580. break;
  581. case MmWriteCombined:
  582. NewRange.Type = MTRR_TYPE_USWC;
  583. //
  584. // If USWC type isn't supported, then request can not be honored
  585. //
  586. if (!KiRangeInfo.Capabilities.u.hw.UswcSupported) {
  587. DBGMSG ("KeSetPhysicalCacheTypeRange: USWC not supported\n");
  588. return STATUS_NOT_SUPPORTED;
  589. }
  590. break;
  591. default:
  592. DBGMSG ("KeSetPhysicalCacheTypeRange: no such cache type\n");
  593. return STATUS_INVALID_PARAMETER;
  594. break;
  595. }
  596. NewRange.Status = STATUS_SUCCESS;
  597. //
  598. // The default type is UC thus the range is still mapped using
  599. // a Cache Disabled VirtualPointer and hence it need not be added.
  600. //
  601. //
  602. // If h/w needs updated, lock down the code required to effect the change
  603. //
  604. if (EffectRangeChange) {
  605. if (KeGetCurrentIrql() >= DISPATCH_LEVEL) {
  606. //
  607. // Code can not be locked down. Supplying a new range type requires
  608. // that the caller calls at irql < dispatch_level.
  609. //
  610. DBGMSG ("KeSetPhysicalCacheTypeRange failed due to calling IRQL == DISPATCH_LEVEL\n");
  611. return STATUS_UNSUCCESSFUL;
  612. }
  613. MmLockPagableSectionByHandle(ExPageLockHandle);
  614. }
  615. //
  616. // Serialize the range type database
  617. //
  618. KeAcquireSpinLock (&KiRangeLock, &OldIrql);
  619. //
  620. // If h/w is going to need updated, then start an effective range change
  621. //
  622. if (EffectRangeChange) {
  623. KiStartEffectiveRangeChange (&NewRange);
  624. }
  625. if (NT_SUCCESS (NewRange.Status)) {
  626. //
  627. // If the new range is NonCached, then don't remove standard memory
  628. // caching types
  629. //
  630. memset (RemoveThisType, TRUE, MTRR_TYPE_MAX);
  631. if (NewRange.Type != MTRR_TYPE_UC) {
  632. //
  633. // If the requested type is uncached then the physical
  634. // memory region is mapped using a cache disabled virtual pointer.
  635. // The effective memory type for that region will be the lowest
  636. // common denominator of the MTRR type and the cache type in the
  637. // PTE. Therefore for a request of type UC, the effective type
  638. // will be UC irrespective of the MTRR settings in that range.
  639. // Hence it is not necessary to remove the existing MTRR settings
  640. // (if any) for that range.
  641. //
  642. //
  643. // Clip/remove any ranges in the target area
  644. //
  645. KiRemoveRange (&NewRange, NewRange.Base, NewRange.Limit, RemoveThisType);
  646. }
  647. //
  648. // If needed, add new range type
  649. //
  650. if (AddToRangeDatabase) {
  651. ASSERT (EffectRangeChange == TRUE);
  652. KiAddRange (&NewRange, NewRange.Base, NewRange.Limit, NewRange.Type);
  653. }
  654. //
  655. // If this is an effect range change, then complete it
  656. //
  657. if (EffectRangeChange) {
  658. KiCompleteEffectiveRangeChange (&NewRange);
  659. }
  660. }
  661. KeReleaseSpinLock (&KiRangeLock, OldIrql);
  662. if (EffectRangeChange) {
  663. MmUnlockPagableImageSection(ExPageLockHandle);
  664. }
  665. return NewRange.Status;
  666. }
  667. BOOLEAN
  668. KiRemoveRange (
  669. IN PNEW_RANGE NewRange,
  670. IN ULONGLONG Base,
  671. IN ULONGLONG Limit,
  672. IN PBOOLEAN RemoveThisType
  673. )
  674. /*++
  675. Routine Description:
  676. This function removes any range overlapping with the passed range, of
  677. type supplied in RemoveThisType from the global range database.
  678. Arguments:
  679. NewRange - Context information
  680. Base - Base & Limit signify the first & last address of a range
  681. Limit - which is to be removed from the range database
  682. RemoveThisType - A TRUE flag for each type which can not overlap the
  683. target range
  684. Return Value:
  685. TRUE - if the range database was altered such that it may no longer
  686. be sorted.
  687. --*/
  688. {
  689. ULONG i;
  690. PONE_RANGE Range;
  691. BOOLEAN DatabaseNeedsSorted;
  692. DatabaseNeedsSorted = FALSE;
  693. //
  694. // Check each range
  695. //
  696. for (i=0, Range=KiRangeInfo.Ranges; i < KiRangeInfo.NoRange; i++, Range++) {
  697. //
  698. // If this range type doesn't need to be altered, skip it
  699. //
  700. if (!RemoveThisType[Range->Type]) {
  701. continue;
  702. }
  703. //
  704. // Check range to see if it overlaps with range being removed
  705. //
  706. if (Range->Base < Base) {
  707. if (Range->Limit >= Base && Range->Limit <= Limit) {
  708. //
  709. // Truncate range to not overlap with area being removed
  710. //
  711. Range->Limit = Base - 1;
  712. }
  713. if (Range->Limit > Limit) {
  714. //
  715. // Target area is contained totally within this area.
  716. // Split into two ranges
  717. //
  718. //
  719. // Add range at end
  720. //
  721. DatabaseNeedsSorted = TRUE;
  722. KiAddRange (
  723. NewRange,
  724. Limit+1,
  725. Range->Limit,
  726. Range->Type
  727. );
  728. //
  729. // Turn current range into range at beginning
  730. //
  731. Range->Limit = Base - 1;
  732. }
  733. } else {
  734. // Range->Base >= Base
  735. if (Range->Base <= Limit) {
  736. if (Range->Limit <= Limit) {
  737. //
  738. // This range is totally within the target area. Remove it.
  739. //
  740. DatabaseNeedsSorted = TRUE;
  741. KiRangeInfo.NoRange -= 1;
  742. Range->Base = KiRangeInfo.Ranges[KiRangeInfo.NoRange].Base;
  743. Range->Limit = KiRangeInfo.Ranges[KiRangeInfo.NoRange].Limit;
  744. Range->Type = KiRangeInfo.Ranges[KiRangeInfo.NoRange].Type;
  745. //
  746. // recheck at current location
  747. //
  748. i -= 1;
  749. Range -= 1;
  750. } else {
  751. //
  752. // Bump beginning past area being removed
  753. //
  754. Range->Base = Limit + 1;
  755. }
  756. }
  757. }
  758. }
  759. if (!NT_SUCCESS (NewRange->Status)) {
  760. DBGMSG ("KiRemoveRange: failure\n");
  761. }
  762. return DatabaseNeedsSorted;
  763. }
  764. VOID
  765. KiAddRange (
  766. IN PNEW_RANGE NewRange,
  767. IN ULONGLONG Base,
  768. IN ULONGLONG Limit,
  769. IN UCHAR Type
  770. )
  771. /*++
  772. Routine Description:
  773. This function adds the passed range to the global range database.
  774. Arguments:
  775. NewRange - Context information
  776. Base - Base & Limit signify the first & last address of a range
  777. Limit - which is to be added to the range database
  778. Type - Type of caching required for this range
  779. Return Value:
  780. None - Context is updated with an error if the table has overflowed
  781. --*/
  782. {
  783. PONE_RANGE Range, OldRange;
  784. ULONG size;
  785. if (KiRangeInfo.NoRange >= KiRangeInfo.MaxRange) {
  786. //
  787. // Table is out of space, get a bigger one
  788. //
  789. OldRange = KiRangeInfo.Ranges;
  790. size = sizeof(ONE_RANGE) * (KiRangeInfo.MaxRange + GROW_RANGE_TABLE);
  791. Range = ExAllocatePoolWithTag (NonPagedPool, size, ' eK');
  792. if (!Range) {
  793. NewRange->Status = STATUS_UNSUCCESSFUL;
  794. return ;
  795. }
  796. //
  797. // Grow table
  798. //
  799. RtlZeroMemory (Range, size);
  800. RtlCopyMemory (Range, OldRange, sizeof(ONE_RANGE) * KiRangeInfo.MaxRange);
  801. KiRangeInfo.Ranges = Range;
  802. KiRangeInfo.MaxRange += GROW_RANGE_TABLE;
  803. ExFreePool (OldRange);
  804. }
  805. //
  806. // Add new entry to table
  807. //
  808. KiRangeInfo.Ranges[KiRangeInfo.NoRange].Base = Base;
  809. KiRangeInfo.Ranges[KiRangeInfo.NoRange].Limit = Limit;
  810. KiRangeInfo.Ranges[KiRangeInfo.NoRange].Type = Type;
  811. KiRangeInfo.NoRange += 1;
  812. }
  813. VOID
  814. KiStartEffectiveRangeChange (
  815. IN PNEW_RANGE NewRange
  816. )
  817. /*++
  818. Routine Description:
  819. This functions sets up the context information required to
  820. track & later effect a range change in hardware
  821. Arguments:
  822. NewRange - Context information
  823. Return Value:
  824. None
  825. --*/
  826. {
  827. ULONG size;
  828. //
  829. // Allocate working space for MTRR image
  830. //
  831. size = sizeof(MTRR_RANGE) * ((ULONG) KiRangeInfo.Capabilities.u.hw.VarCnt + 1);
  832. NewRange->MTRR = ExAllocatePoolWithTag (NonPagedPool, size, ' eK');
  833. if (!NewRange->MTRR) {
  834. NewRange->Status = STATUS_UNSUCCESSFUL;
  835. return ;
  836. }
  837. RtlZeroMemory (NewRange->MTRR, size);
  838. //
  839. // Save current range information in case of an error
  840. //
  841. size = sizeof(ONE_RANGE) * KiRangeInfo.NoRange;
  842. NewRange->NoRange = KiRangeInfo.NoRange;
  843. NewRange->Ranges = ExAllocatePoolWithTag (NonPagedPool, size, ' eK');
  844. if (!NewRange->Ranges) {
  845. NewRange->Status = STATUS_UNSUCCESSFUL;
  846. return ;
  847. }
  848. RtlCopyMemory (NewRange->Ranges, KiRangeInfo.Ranges, size);
  849. }
  850. VOID
  851. KiCompleteEffectiveRangeChange (
  852. IN PNEW_RANGE NewRange
  853. )
  854. /*++
  855. Routine Description:
  856. This functions commits the range database to hardware, or backs
  857. out the current changes to it.
  858. Arguments:
  859. NewRange - Context information
  860. Return Value:
  861. None
  862. --*/
  863. {
  864. BOOLEAN Restart;
  865. ULONG Index, Index2, RemIndex2, NoMTRR;
  866. ULONGLONG BestLength, WhichMtrr;
  867. ULONGLONG CurrLength;
  868. ULONGLONG l, Base, Length, MLength;
  869. PONE_RANGE Range;
  870. ONE_RANGE OneRange;
  871. PMTRR_RANGE MTRR;
  872. BOOLEAN RoundDown;
  873. BOOLEAN RemoveThisType[MTRR_TYPE_MAX];
  874. PKPRCB Prcb;
  875. KIRQL OldIrql;
  876. #if !defined(NT_UP)
  877. KIRQL OldIrql2;
  878. KAFFINITY TargetProcessors;
  879. #endif
  880. ASSERT (KeGetCurrentIrql() == DISPATCH_LEVEL);
  881. Prcb = KeGetCurrentPrcb();
  882. //
  883. // Round all ranges, according to type, to match what h/w can support
  884. //
  885. for (Index=0; Index < KiRangeInfo.NoRange; Index++) {
  886. Range = &KiRangeInfo.Ranges[Index];
  887. //
  888. // Determine rounding for this range type
  889. //
  890. RoundDown = TRUE;
  891. if (Range->Type == MTRR_TYPE_UC) {
  892. RoundDown = FALSE;
  893. }
  894. //
  895. // Apply rounding
  896. //
  897. if (RoundDown) {
  898. Range->Base = (Range->Base + MTRR_PAGE_SIZE - 1) & MTRR_PAGE_MASK;
  899. Range->Limit = ((Range->Limit+1) & MTRR_PAGE_MASK)-1;
  900. } else {
  901. Range->Base = (Range->Base & MTRR_PAGE_MASK);
  902. Range->Limit = ((Range->Limit + MTRR_PAGE_SIZE) & MTRR_PAGE_MASK)-1;
  903. }
  904. }
  905. do {
  906. Restart = FALSE;
  907. //
  908. // Sort the ranges by base address
  909. //
  910. for (Index=0; Index < KiRangeInfo.NoRange; Index++) {
  911. Range = &KiRangeInfo.Ranges[Index];
  912. for (Index2=Index+1; Index2 < KiRangeInfo.NoRange; Index2++) {
  913. if (KiRangeInfo.Ranges[Index2].Base < Range->Base) {
  914. //
  915. // Swap KiRangeInfo.Ranges[Index] with KiRangeInfo.Ranges[Index2]
  916. //
  917. OneRange = *Range;
  918. *Range = KiRangeInfo.Ranges[Index2];
  919. KiRangeInfo.Ranges[Index2] = OneRange;
  920. }
  921. }
  922. }
  923. //
  924. // At this point the range database is sorted on
  925. // base address. Scan range database combining adjacent and
  926. // overlapping ranges of the same type
  927. //
  928. for (Index=0; Index < (ULONG) KiRangeInfo.NoRange-1; Index++) {
  929. Range = &KiRangeInfo.Ranges[Index];
  930. //
  931. // Scan the range database. If ranges are adjacent/overlap and are of
  932. // the same type, combine them.
  933. //
  934. for (Index2 = Index+1; Index2 < (ULONG) KiRangeInfo.NoRange; Index2++) {
  935. l = Range[0].Limit + 1;
  936. if (l < Range[0].Limit) {
  937. l = Range[0].Limit;
  938. }
  939. if (l >= KiRangeInfo.Ranges[Index2].Base &&
  940. Range[0].Type == KiRangeInfo.Ranges[Index2].Type) {
  941. //
  942. // Increase Range[0] limit to cover Range[Index2]
  943. //
  944. if (KiRangeInfo.Ranges[Index2].Limit > Range[0].Limit) {
  945. Range[0].Limit = KiRangeInfo.Ranges[Index2].Limit;
  946. }
  947. //
  948. // Remove KiRangeInfo.Ranges[Index2]
  949. //
  950. if (Index2 < (ULONG) KiRangeInfo.NoRange - 1 ) {
  951. //
  952. // Copy everything from Index2 till end
  953. // of range list. # Entries to copy is
  954. // (KiRangeInfo.NoRange -1) - (Index2+1) + 1
  955. //
  956. RtlCopyMemory(
  957. &(KiRangeInfo.Ranges[Index2]),
  958. &(KiRangeInfo.Ranges[Index2+1]),
  959. sizeof(ONE_RANGE) * (KiRangeInfo.NoRange-Index2-1)
  960. );
  961. }
  962. KiRangeInfo.NoRange -= 1;
  963. //
  964. // Recheck current location
  965. //
  966. Index2 -= 1;
  967. }
  968. }
  969. }
  970. //
  971. // At this point the range database is sorted on base
  972. // address and adjacent/overlapping ranges of the same
  973. // type are combined. Check for overlapping ranges -
  974. // If legal then allow else truncate the less "weighty" range
  975. //
  976. for (Index = 0; Index < (ULONG) KiRangeInfo.NoRange-1 && !Restart; Index++) {
  977. Range = &KiRangeInfo.Ranges[Index];
  978. l = Range[0].Limit + 1;
  979. if (l < Range[0].Limit) {
  980. l = Range[0].Limit;
  981. }
  982. //
  983. // If ranges overlap and are not of same type, and if the
  984. // overlap is not legal then carve them to the best cache type
  985. // available.
  986. //
  987. for (Index2 = Index+1; Index2 < (ULONG) KiRangeInfo.NoRange && !Restart; Index2++) {
  988. if (l > KiRangeInfo.Ranges[Index2].Base) {
  989. if (Range[0].Type == MTRR_TYPE_UC ||
  990. KiRangeInfo.Ranges[Index2].Type == MTRR_TYPE_UC) {
  991. //
  992. // Overlap of a UC type with a range of any other type is
  993. // legal
  994. //
  995. } else if ((Range[0].Type == MTRR_TYPE_WT &&
  996. KiRangeInfo.Ranges[Index2].Type == MTRR_TYPE_WB) ||
  997. (Range[0].Type == MTRR_TYPE_WB &&
  998. KiRangeInfo.Ranges[Index2].Type == MTRR_TYPE_WT) ) {
  999. //
  1000. // Overlap of WT and WB range is legal. The overlap range will
  1001. // be WT.
  1002. //
  1003. } else {
  1004. //
  1005. // This is an illegal overlap and we need to carve the ranges
  1006. // to remove the overlap.
  1007. //
  1008. // Pick range which has the cache type which should be used for
  1009. // the overlapped area
  1010. //
  1011. if (KiRangeWeight(&Range[0]) > KiRangeWeight(&(KiRangeInfo.Ranges[Index2]))){
  1012. RemIndex2 = Index2;
  1013. } else {
  1014. RemIndex2 = Index;
  1015. }
  1016. //
  1017. // Remove ranges of type which do not belong in the overlapped area
  1018. //
  1019. RtlZeroMemory (RemoveThisType, MTRR_TYPE_MAX);
  1020. RemoveThisType[KiRangeInfo.Ranges[RemIndex2].Type] = TRUE;
  1021. //
  1022. // Remove just the overlapped portion of the range.
  1023. //
  1024. Restart = KiRemoveRange (
  1025. NewRange,
  1026. KiRangeInfo.Ranges[Index2].Base,
  1027. (Range[0].Limit < KiRangeInfo.Ranges[Index2].Limit ?
  1028. Range[0].Limit : KiRangeInfo.Ranges[Index2].Limit),
  1029. RemoveThisType
  1030. );
  1031. }
  1032. }
  1033. }
  1034. }
  1035. } while (Restart);
  1036. //
  1037. // The range database is now rounded to fit in the h/w and sorted.
  1038. // Attempt to build MTRR settings which exactly describe the ranges
  1039. //
  1040. MTRR = NewRange->MTRR;
  1041. NoMTRR = 0;
  1042. for (Index=0;NT_SUCCESS(NewRange->Status)&& Index<KiRangeInfo.NoRange;Index++) {
  1043. Range = &KiRangeInfo.Ranges[Index];
  1044. //
  1045. // Build MTRRs to fit this range
  1046. //
  1047. Base = Range->Base;
  1048. Length = Range->Limit - Base + 1;
  1049. while (Length) {
  1050. //
  1051. // Compute MTRR length for current range base & length
  1052. //
  1053. if (Base == 0) {
  1054. MLength = Length;
  1055. } else {
  1056. MLength = (ULONGLONG) 1 << KiFindFirstSetRightBit(Base);
  1057. }
  1058. if (MLength > Length) {
  1059. MLength = Length;
  1060. }
  1061. l = (ULONGLONG) 1 << KiFindFirstSetLeftBit (MLength);
  1062. if (MLength > l) {
  1063. MLength = l;
  1064. }
  1065. //
  1066. // Store it in the next MTRR
  1067. //
  1068. MTRR[NoMTRR].Base.u.QuadPart = Base;
  1069. MTRR[NoMTRR].Base.u.hw.Type = Range->Type;
  1070. MTRR[NoMTRR].Mask.u.QuadPart = KiLengthToMask(MLength);
  1071. MTRR[NoMTRR].Mask.u.hw.Valid = 1;
  1072. NoMTRR += 1;
  1073. //
  1074. // Adjust off amount of data covered by that last MTRR
  1075. //
  1076. Base += MLength;
  1077. Length -= MLength;
  1078. //
  1079. // If there are too many MTRRs, and currently setting a
  1080. // Non-USWC range try to remove a USWC MTRR.
  1081. // (ie, convert some MmWriteCombined to MmNonCached).
  1082. //
  1083. if (NoMTRR > (ULONG) KiRangeInfo.Capabilities.u.hw.VarCnt) {
  1084. if (Range->Type != MTRR_TYPE_USWC) {
  1085. //
  1086. // Find smallest USWC type and drop it
  1087. //
  1088. // This is okay only if the default type is UC.
  1089. // Default type should always be UC unless BIOS changes
  1090. // it. Still ASSERT!
  1091. //
  1092. ASSERT(KiRangeInfo.Default.u.hw.Type == MTRR_TYPE_UC);
  1093. WhichMtrr = 0; // satisfy no_opt compilation
  1094. BestLength = (ULONGLONG) 1 << (KiMtrrMaxRangeShift + 1);
  1095. for (Index2=0; Index2 < KiRangeInfo.Capabilities.u.hw.VarCnt; Index2++) {
  1096. if (MTRR[Index2].Base.u.hw.Type == MTRR_TYPE_USWC) {
  1097. CurrLength = KiMaskToLength(MTRR[Index2].Mask.u.QuadPart &
  1098. KiMtrrMaskMask);
  1099. if (CurrLength < BestLength) {
  1100. WhichMtrr = Index2;
  1101. BestLength = CurrLength;
  1102. }
  1103. }
  1104. }
  1105. if (BestLength == ((ULONGLONG) 1 << (KiMtrrMaxRangeShift + 1))) {
  1106. //
  1107. // Range was not found which could be dropped. Abort process
  1108. //
  1109. NewRange->Status = STATUS_UNSUCCESSFUL;
  1110. Length = 0;
  1111. } else {
  1112. //
  1113. // Remove WhichMtrr
  1114. //
  1115. NoMTRR -= 1;
  1116. MTRR[WhichMtrr] = MTRR[NoMTRR];
  1117. }
  1118. } else {
  1119. NewRange->Status = STATUS_UNSUCCESSFUL;
  1120. Length =0;
  1121. }
  1122. }
  1123. }
  1124. }
  1125. //
  1126. // Done building new MTRRs
  1127. //
  1128. if (NT_SUCCESS(NewRange->Status)) {
  1129. //
  1130. // Update the MTRRs on all processors
  1131. //
  1132. #if IDBG
  1133. KiDumpMTRR ("Loading the following MTRR:", NewRange->MTRR);
  1134. #endif
  1135. NewRange->Synchronize.TargetCount = 0;
  1136. NewRange->Synchronize.TargetPhase = &Prcb->ReverseStall;
  1137. NewRange->Synchronize.Processor = Prcb->Number;
  1138. //
  1139. // Previously enabled MTRRs with index > NoMTRR
  1140. // which could conflict with existing setting should be disabled
  1141. // This is taken care of by setting NewRange->NoMTRR to total
  1142. // number of variable MTRRs.
  1143. //
  1144. NewRange->NoMTRR = (ULONG) KiRangeInfo.Capabilities.u.hw.VarCnt;
  1145. //
  1146. // Synchronize with other IPI functions which may stall
  1147. //
  1148. KeAcquireSpinLock (&KiReverseStallIpiLock, &OldIrql);
  1149. #if !defined(NT_UP)
  1150. //
  1151. // Collect all the (other) processors
  1152. //
  1153. TargetProcessors = KeActiveProcessors & ~Prcb->SetMember;
  1154. if (TargetProcessors != 0) {
  1155. KiIpiSendSynchronousPacket (
  1156. Prcb,
  1157. TargetProcessors,
  1158. KiLoadMTRRTarget,
  1159. (PVOID) NewRange,
  1160. NULL,
  1161. NULL
  1162. );
  1163. //
  1164. // Wait for all processors to be collected
  1165. //
  1166. KiIpiStallOnPacketTargets(TargetProcessors);
  1167. //
  1168. // All processors are now waiting. Raise to high level to
  1169. // ensure this processor doesn't enter the debugger due to
  1170. // some interrupt service routine.
  1171. //
  1172. KeRaiseIrql (HIGH_LEVEL, &OldIrql2);
  1173. //
  1174. // There's no reason for any debug events now, so signal
  1175. // the other processors that they can all disable interrupts
  1176. // and begin the MTRR update
  1177. //
  1178. Prcb->ReverseStall += 1;
  1179. }
  1180. #endif
  1181. //
  1182. // Update MTRRs
  1183. //
  1184. KiLoadMTRR (NewRange);
  1185. //
  1186. // Release lock
  1187. //
  1188. KeReleaseSpinLock (&KiReverseStallIpiLock, OldIrql);
  1189. #if IDBG
  1190. KiDumpMTRR ("Processor MTRR:", NewRange->MTRR);
  1191. #endif
  1192. } else {
  1193. //
  1194. // There was an error, put original range database back
  1195. //
  1196. DBGMSG ("KiCompleteEffectiveRangeChange: mtrr update did not occur\n");
  1197. if (NewRange->Ranges) {
  1198. KiRangeInfo.NoRange = NewRange->NoRange;
  1199. RtlCopyMemory (
  1200. KiRangeInfo.Ranges,
  1201. NewRange->Ranges,
  1202. sizeof (ONE_RANGE) * KiRangeInfo.NoRange
  1203. );
  1204. }
  1205. }
  1206. //
  1207. // Cleanup
  1208. //
  1209. ExFreePool (NewRange->Ranges);
  1210. ExFreePool (NewRange->MTRR);
  1211. }
  1212. STATIC ULONG
  1213. KiRangeWeight (
  1214. IN PONE_RANGE Range
  1215. )
  1216. /*++
  1217. Routine Description:
  1218. This functions returns a weighting of the passed in range's cache
  1219. type. When two or more regions collide within the same h/w region
  1220. the types are weighted and that cache type of the higher weight
  1221. is used for the collision area.
  1222. Arguments:
  1223. Range - Range to obtain weighting for
  1224. Return Value:
  1225. The weight of the particular cache type
  1226. --*/
  1227. {
  1228. ULONG Weight;
  1229. switch (Range->Type) {
  1230. case MTRR_TYPE_UC: Weight = 5; break;
  1231. case MTRR_TYPE_USWC: Weight = 4; break;
  1232. case MTRR_TYPE_WP: Weight = 3; break;
  1233. case MTRR_TYPE_WT: Weight = 2; break;
  1234. case MTRR_TYPE_WB: Weight = 1; break;
  1235. default: Weight = 0; break;
  1236. }
  1237. return Weight;
  1238. }
  1239. STATIC ULONGLONG
  1240. KiMaskToLength (
  1241. IN ULONGLONG Mask
  1242. )
  1243. /*++
  1244. Routine Description:
  1245. This function returns the length specified by a particular
  1246. mtrr variable register mask.
  1247. --*/
  1248. {
  1249. if (Mask == 0) {
  1250. // Zero Mask signifies a length of 2**36 or 2**40
  1251. return(((ULONGLONG) 1 << KiMtrrMaxRangeShift));
  1252. } else {
  1253. return(((ULONGLONG) 1 << KiFindFirstSetRightBit(Mask)));
  1254. }
  1255. }
  1256. STATIC ULONGLONG
  1257. KiLengthToMask (
  1258. IN ULONGLONG Length
  1259. )
  1260. /*++
  1261. Routine Description:
  1262. This function constructs the mask corresponding to the input length
  1263. to be set in a variable MTRR register. The length is assumed to be
  1264. a multiple of 4K.
  1265. --*/
  1266. {
  1267. ULONGLONG FullMask = 0xffffff;
  1268. if (Length == ((ULONGLONG) 1 << KiMtrrMaxRangeShift)) {
  1269. return(0);
  1270. } else {
  1271. return(((FullMask << KiFindFirstSetRightBit(Length)) & KiMtrrResBitMask));
  1272. }
  1273. }
  1274. STATIC ULONG
  1275. KiFindFirstSetRightBit (
  1276. IN ULONGLONG Set
  1277. )
  1278. /*++
  1279. Routine Description:
  1280. This function returns a bit position of the least significant
  1281. bit set in the passed ULONGLONG parameter. Passed parameter
  1282. must be non-zero.
  1283. --*/
  1284. {
  1285. ULONG bitno;
  1286. ASSERT(Set != 0);
  1287. for (bitno=0; !(Set & 0xFF); bitno += 8, Set >>= 8) ;
  1288. return KiFindFirstSetRight[Set & 0xFF] + bitno;
  1289. }
  1290. STATIC ULONG
  1291. KiFindFirstSetLeftBit (
  1292. IN ULONGLONG Set
  1293. )
  1294. /*++
  1295. Routine Description:
  1296. This function returns a bit position of the most significant
  1297. bit set in the passed ULONGLONG parameter. Passed parameter
  1298. must be non-zero.
  1299. --*/
  1300. {
  1301. ULONG bitno;
  1302. ASSERT(Set != 0);
  1303. for (bitno=56;!(Set & 0xFF00000000000000); bitno -= 8, Set <<= 8) ;
  1304. return KiFindFirstSetLeft[Set >> 56] + bitno;
  1305. }
  1306. #if IDBG
  1307. VOID
  1308. KiDumpMTRR (
  1309. PUCHAR DebugString,
  1310. PMTRR_RANGE MTRR
  1311. )
  1312. /*++
  1313. Routine Description:
  1314. This function dumps the MTRR information to the debugger
  1315. --*/
  1316. {
  1317. static PUCHAR Type[] = {
  1318. // 0 1 2 3 4 5 6
  1319. "UC ", "USWC", "????", "????", "WT ", "WP ", "WB " };
  1320. MTRR_VARIABLE_BASE Base;
  1321. MTRR_VARIABLE_MASK Mask;
  1322. ULONG Index;
  1323. ULONG i;
  1324. PUCHAR p;
  1325. DbgPrint ("%s\n", DebugString);
  1326. for (Index=0; Index < (ULONG) KiRangeInfo.Capabilities.u.hw.VarCnt; Index++) {
  1327. if (MTRR) {
  1328. Base = MTRR[Index].Base;
  1329. Mask = MTRR[Index].Mask;
  1330. } else {
  1331. Base.u.QuadPart = RDMSR(MTRR_MSR_VARIABLE_BASE+2*Index);
  1332. Mask.u.QuadPart = RDMSR(MTRR_MSR_VARIABLE_MASK+2*Index);
  1333. }
  1334. DbgPrint (" %d. ", Index);
  1335. if (Mask.u.hw.Valid) {
  1336. p = "????";
  1337. if (Base.u.hw.Type < 7) {
  1338. p = Type[Base.u.hw.Type];
  1339. }
  1340. DbgPrint ("%s %08x:%08x %08x:%08x",
  1341. p,
  1342. (ULONG) (Base.u.QuadPart >> 32),
  1343. ((ULONG) (Base.u.QuadPart & KiMtrrMaskBase)),
  1344. (ULONG) (Mask.u.QuadPart >> 32),
  1345. ((ULONG) (Mask.u.QuadPart & KiMtrrMaskMask))
  1346. );
  1347. }
  1348. DbgPrint ("\n");
  1349. }
  1350. }
  1351. #endif
  1352. VOID
  1353. KiLoadMTRRTarget (
  1354. IN PKIPI_CONTEXT SignalDone,
  1355. IN PVOID NewRange,
  1356. IN PVOID Parameter2,
  1357. IN PVOID Parameter3
  1358. )
  1359. {
  1360. PNEW_RANGE Context;
  1361. UNREFERENCED_PARAMETER (Parameter2);
  1362. UNREFERENCED_PARAMETER (Parameter3);
  1363. Context = (PNEW_RANGE) NewRange;
  1364. //
  1365. // Wait for all processors to be ready
  1366. //
  1367. KiIpiSignalPacketDoneAndStall(SignalDone,
  1368. Context->Synchronize.TargetPhase);
  1369. //
  1370. // Update MTRRs
  1371. //
  1372. KiLoadMTRR (Context);
  1373. }
  1374. #define MOV_EAX_CR4 _emit { 0Fh, 20h, E0h }
  1375. #define MOV_CR4_EAX _emit { 0Fh, 22h, E0h }
  1376. NTSTATUS
  1377. KiLoadMTRR (
  1378. IN PNEW_RANGE Context
  1379. )
  1380. /*++
  1381. Routine Description:
  1382. This function loads the memory type range registers into all processors
  1383. Arguments:
  1384. Context - Context which include the MTRRs to load
  1385. Return Value:
  1386. All processors are set into the new state
  1387. --*/
  1388. {
  1389. MTRR_DEFAULT Default;
  1390. BOOLEAN Enable;
  1391. ULONG HldCr0, HldCr4;
  1392. ULONG Index;
  1393. //
  1394. // Disable interrupts
  1395. //
  1396. Enable = KeDisableInterrupts();
  1397. //
  1398. // Synchronize all processors
  1399. //
  1400. if (!(KeFeatureBits & KF_AMDK6MTRR)) {
  1401. KiLockStepExecution (&Context->Synchronize);
  1402. }
  1403. _asm {
  1404. ;
  1405. ; Get current CR0
  1406. ;
  1407. mov eax, cr0
  1408. mov HldCr0, eax
  1409. ;
  1410. ; Disable caching & line fill
  1411. ;
  1412. and eax, not CR0_NW
  1413. or eax, CR0_CD
  1414. mov cr0, eax
  1415. ;
  1416. ; Flush caches
  1417. ;
  1418. ;
  1419. ; wbinvd
  1420. ;
  1421. _emit 0Fh
  1422. _emit 09h
  1423. ;
  1424. ; Get current cr4
  1425. ;
  1426. _emit 0Fh
  1427. _emit 20h
  1428. _emit 0E0h ; mov eax, cr4
  1429. mov HldCr4, eax
  1430. ;
  1431. ; Disable global page
  1432. ;
  1433. and eax, not CR4_PGE
  1434. _emit 0Fh
  1435. _emit 22h
  1436. _emit 0E0h ; mov cr4, eax
  1437. ;
  1438. ; Flush TLB
  1439. ;
  1440. mov eax, cr3
  1441. mov cr3, eax
  1442. }
  1443. if (KeFeatureBits & KF_AMDK6MTRR) {
  1444. //
  1445. // Write the MTRRs
  1446. //
  1447. KiAmdK6MtrrWRMSR();
  1448. } else {
  1449. //
  1450. // Disable MTRRs
  1451. //
  1452. Default.u.QuadPart = RDMSR(MTRR_MSR_DEFAULT);
  1453. Default.u.hw.MtrrEnabled = 0;
  1454. WRMSR (MTRR_MSR_DEFAULT, Default.u.QuadPart);
  1455. //
  1456. // Load new MTRRs
  1457. //
  1458. for (Index=0; Index < Context->NoMTRR; Index++) {
  1459. WRMSR (MTRR_MSR_VARIABLE_BASE+2*Index, Context->MTRR[Index].Base.u.QuadPart);
  1460. WRMSR (MTRR_MSR_VARIABLE_MASK+2*Index, Context->MTRR[Index].Mask.u.QuadPart);
  1461. }
  1462. }
  1463. _asm {
  1464. ;
  1465. ; Flush caches (this should be a "nop", but it was in the Intel reference algorithm)
  1466. ; This is required because of aggressive prefetch of both instr + data
  1467. ;
  1468. ;
  1469. ; wbinvd
  1470. ;
  1471. _emit 0Fh
  1472. _emit 09h
  1473. ;
  1474. ; Flush TLBs (same comment as above)
  1475. ; Same explanation as above
  1476. ;
  1477. mov eax, cr3
  1478. mov cr3, eax
  1479. }
  1480. if (!(KeFeatureBits & KF_AMDK6MTRR)) {
  1481. //
  1482. // Enable MTRRs
  1483. //
  1484. Default.u.hw.MtrrEnabled = 1;
  1485. WRMSR (MTRR_MSR_DEFAULT, Default.u.QuadPart);
  1486. }
  1487. _asm {
  1488. ;
  1489. ; Restore CR4 (global page enable)
  1490. ;
  1491. mov eax, HldCr4
  1492. _emit 0Fh
  1493. _emit 22h
  1494. _emit 0E0h ; mov cr4, eax
  1495. ;
  1496. ; Restore CR0 (cache enable)
  1497. ;
  1498. mov eax, HldCr0
  1499. mov cr0, eax
  1500. }
  1501. //
  1502. // Wait for all processors to reach the same place,
  1503. // restore interrupts and return.
  1504. //
  1505. if (!(KeFeatureBits & KF_AMDK6MTRR)) {
  1506. KiLockStepExecution (&Context->Synchronize);
  1507. }
  1508. KeEnableInterrupts (Enable);
  1509. return STATUS_SUCCESS;
  1510. }
  1511. VOID
  1512. KiLockStepExecution (
  1513. IN PPROCESSOR_LOCKSTEP Context
  1514. )
  1515. {
  1516. #if !defined(NT_UP)
  1517. LONG CurrentPhase;
  1518. volatile PLONG TargetPhase;
  1519. PKPRCB Prcb;
  1520. TargetPhase = (volatile PLONG) Context->TargetPhase;
  1521. Prcb = KeGetCurrentPrcb();
  1522. if (Prcb->Number == (CCHAR) Context->Processor) {
  1523. //
  1524. // Wait for all processors to signal
  1525. //
  1526. while (Context->TargetCount != (ULONG) KeNumberProcessors - 1) {
  1527. KeYieldProcessor ();
  1528. }
  1529. //
  1530. // Reset count for next time
  1531. //
  1532. Context->TargetCount = 0;
  1533. //
  1534. // Let waiting processor go to next synchronization point
  1535. //
  1536. InterlockedIncrement (TargetPhase);
  1537. } else {
  1538. //
  1539. // Get current phase
  1540. //
  1541. CurrentPhase = *TargetPhase;
  1542. //
  1543. // Signal that we have completed the current phase
  1544. //
  1545. InterlockedIncrement ((PLONG)&Context->TargetCount);
  1546. //
  1547. // Wait for new phase to begin
  1548. //
  1549. while (*TargetPhase == CurrentPhase) {
  1550. KeYieldProcessor ();
  1551. }
  1552. }
  1553. #else
  1554. UNREFERENCED_PARAMETER (Context);
  1555. #endif
  1556. }