Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3814 lines
79 KiB

  1. /*++
  2. Copyright (c) 2000 Microsoft Corporation
  3. Module Name:
  4. amd64.h
  5. Abstract:
  6. This module contains the AMD64 hardware specific header file.
  7. Author:
  8. David N. Cutler (davec) 3-May-2000
  9. Revision History:
  10. --*/
  11. #ifndef __amd64_
  12. #define __amd64_
  13. #if !(defined(_NTDRIVER_) || defined(_NTDDK_) || defined(_NTIFS_) || defined(_NTHAL_) || defined(_NTOSP_)) && !defined(_BLDR_)
  14. #define ExRaiseException RtlRaiseException
  15. #define ExRaiseStatus RtlRaiseStatus
  16. #endif
  17. // begin_ntddk begin_wdm begin_nthal begin_ntndis begin_ntosp
  18. #if defined(_M_AMD64) && !defined(RC_INVOKED) && !defined(MIDL_PASS)
  19. //
  20. // Define intrinsic function to do in's and out's.
  21. //
  22. #ifdef __cplusplus
  23. extern "C" {
  24. #endif
  25. UCHAR
  26. __inbyte (
  27. IN USHORT Port
  28. );
  29. USHORT
  30. __inword (
  31. IN USHORT Port
  32. );
  33. ULONG
  34. __indword (
  35. IN USHORT Port
  36. );
  37. VOID
  38. __outbyte (
  39. IN USHORT Port,
  40. IN UCHAR Data
  41. );
  42. VOID
  43. __outword (
  44. IN USHORT Port,
  45. IN USHORT Data
  46. );
  47. VOID
  48. __outdword (
  49. IN USHORT Port,
  50. IN ULONG Data
  51. );
  52. VOID
  53. __inbytestring (
  54. IN USHORT Port,
  55. IN PUCHAR Buffer,
  56. IN ULONG Count
  57. );
  58. VOID
  59. __inwordstring (
  60. IN USHORT Port,
  61. IN PUSHORT Buffer,
  62. IN ULONG Count
  63. );
  64. VOID
  65. __indwordstring (
  66. IN USHORT Port,
  67. IN PULONG Buffer,
  68. IN ULONG Count
  69. );
  70. VOID
  71. __outbytestring (
  72. IN USHORT Port,
  73. IN PUCHAR Buffer,
  74. IN ULONG Count
  75. );
  76. VOID
  77. __outwordstring (
  78. IN USHORT Port,
  79. IN PUSHORT Buffer,
  80. IN ULONG Count
  81. );
  82. VOID
  83. __outdwordstring (
  84. IN USHORT Port,
  85. IN PULONG Buffer,
  86. IN ULONG Count
  87. );
  88. #ifdef __cplusplus
  89. }
  90. #endif
  91. #pragma intrinsic(__inbyte)
  92. #pragma intrinsic(__inword)
  93. #pragma intrinsic(__indword)
  94. #pragma intrinsic(__outbyte)
  95. #pragma intrinsic(__outword)
  96. #pragma intrinsic(__outdword)
  97. #pragma intrinsic(__inbytestring)
  98. #pragma intrinsic(__inwordstring)
  99. #pragma intrinsic(__indwordstring)
  100. #pragma intrinsic(__outbytestring)
  101. #pragma intrinsic(__outwordstring)
  102. #pragma intrinsic(__outdwordstring)
  103. //
  104. // Interlocked intrinsic functions.
  105. //
  106. #define InterlockedAnd _InterlockedAnd
  107. #define InterlockedOr _InterlockedOr
  108. #define InterlockedXor _InterlockedXor
  109. #define InterlockedIncrement _InterlockedIncrement
  110. #define InterlockedIncrementAcquire InterlockedIncrement
  111. #define InterlockedIncrementRelease InterlockedIncrement
  112. #define InterlockedDecrement _InterlockedDecrement
  113. #define InterlockedDecrementAcquire InterlockedDecrement
  114. #define InterlockedDecrementRelease InterlockedDecrement
  115. #define InterlockedAdd _InterlockedAdd
  116. #define InterlockedExchange _InterlockedExchange
  117. #define InterlockedExchangeAdd _InterlockedExchangeAdd
  118. #define InterlockedCompareExchange _InterlockedCompareExchange
  119. #define InterlockedCompareExchangeAcquire InterlockedCompareExchange
  120. #define InterlockedCompareExchangeRelease InterlockedCompareExchange
  121. #define InterlockedAnd64 _InterlockedAnd64
  122. #define InterlockedOr64 _InterlockedOr64
  123. #define InterlockedXor64 _InterlockedXor64
  124. #define InterlockedIncrement64 _InterlockedIncrement64
  125. #define InterlockedDecrement64 _InterlockedDecrement64
  126. #define InterlockedAdd64 _InterlockedAdd64
  127. #define InterlockedExchange64 _InterlockedExchange64
  128. #define InterlockedExchangeAcquire64 InterlockedExchange64
  129. #define InterlockedExchangeAdd64 _InterlockedExchangeAdd64
  130. #define InterlockedCompareExchange64 _InterlockedCompareExchange64
  131. #define InterlockedCompareExchangeAcquire64 InterlockedCompareExchange64
  132. #define InterlockedCompareExchangeRelease64 InterlockedCompareExchange64
  133. #define InterlockedExchangePointer _InterlockedExchangePointer
  134. #define InterlockedCompareExchangePointer _InterlockedCompareExchangePointer
  135. #ifdef __cplusplus
  136. extern "C" {
  137. #endif
  138. LONG
  139. InterlockedAnd (
  140. IN OUT LONG volatile *Destination,
  141. IN LONG Value
  142. );
  143. LONG
  144. InterlockedOr (
  145. IN OUT LONG volatile *Destination,
  146. IN LONG Value
  147. );
  148. LONG
  149. InterlockedXor (
  150. IN OUT LONG volatile *Destination,
  151. IN LONG Value
  152. );
  153. LONG64
  154. InterlockedAnd64 (
  155. IN OUT LONG64 volatile *Destination,
  156. IN LONG64 Value
  157. );
  158. LONG64
  159. InterlockedOr64 (
  160. IN OUT LONG64 volatile *Destination,
  161. IN LONG64 Value
  162. );
  163. LONG64
  164. InterlockedXor64 (
  165. IN OUT LONG64 volatile *Destination,
  166. IN LONG64 Value
  167. );
  168. LONG
  169. InterlockedIncrement(
  170. IN OUT LONG volatile *Addend
  171. );
  172. LONG
  173. InterlockedDecrement(
  174. IN OUT LONG volatile *Addend
  175. );
  176. LONG
  177. InterlockedExchange(
  178. IN OUT LONG volatile *Target,
  179. IN LONG Value
  180. );
  181. LONG
  182. InterlockedExchangeAdd(
  183. IN OUT LONG volatile *Addend,
  184. IN LONG Value
  185. );
  186. #if !defined(_X86AMD64_)
  187. __forceinline
  188. LONG
  189. InterlockedAdd(
  190. IN OUT LONG volatile *Addend,
  191. IN LONG Value
  192. )
  193. {
  194. return InterlockedExchangeAdd(Addend, Value) + Value;
  195. }
  196. #endif
  197. LONG
  198. InterlockedCompareExchange (
  199. IN OUT LONG volatile *Destination,
  200. IN LONG ExChange,
  201. IN LONG Comperand
  202. );
  203. LONG64
  204. InterlockedIncrement64(
  205. IN OUT LONG64 volatile *Addend
  206. );
  207. LONG64
  208. InterlockedDecrement64(
  209. IN OUT LONG64 volatile *Addend
  210. );
  211. LONG64
  212. InterlockedExchange64(
  213. IN OUT LONG64 volatile *Target,
  214. IN LONG64 Value
  215. );
  216. LONG64
  217. InterlockedExchangeAdd64(
  218. IN OUT LONG64 volatile *Addend,
  219. IN LONG64 Value
  220. );
  221. #if !defined(_X86AMD64_)
  222. __forceinline
  223. LONG64
  224. InterlockedAdd64(
  225. IN OUT LONG64 volatile *Addend,
  226. IN LONG64 Value
  227. )
  228. {
  229. return InterlockedExchangeAdd64(Addend, Value) + Value;
  230. }
  231. #endif
  232. LONG64
  233. InterlockedCompareExchange64 (
  234. IN OUT LONG64 volatile *Destination,
  235. IN LONG64 ExChange,
  236. IN LONG64 Comperand
  237. );
  238. PVOID
  239. InterlockedCompareExchangePointer (
  240. IN OUT PVOID volatile *Destination,
  241. IN PVOID Exchange,
  242. IN PVOID Comperand
  243. );
  244. PVOID
  245. InterlockedExchangePointer(
  246. IN OUT PVOID volatile *Target,
  247. IN PVOID Value
  248. );
  249. #pragma intrinsic(_InterlockedAnd)
  250. #pragma intrinsic(_InterlockedOr)
  251. #pragma intrinsic(_InterlockedXor)
  252. #pragma intrinsic(_InterlockedIncrement)
  253. #pragma intrinsic(_InterlockedDecrement)
  254. #pragma intrinsic(_InterlockedExchange)
  255. #pragma intrinsic(_InterlockedExchangeAdd)
  256. #pragma intrinsic(_InterlockedCompareExchange)
  257. #pragma intrinsic(_InterlockedAnd64)
  258. #pragma intrinsic(_InterlockedOr64)
  259. #pragma intrinsic(_InterlockedXor64)
  260. #pragma intrinsic(_InterlockedIncrement64)
  261. #pragma intrinsic(_InterlockedDecrement64)
  262. #pragma intrinsic(_InterlockedExchange64)
  263. #pragma intrinsic(_InterlockedExchangeAdd64)
  264. #pragma intrinsic(_InterlockedCompareExchange64)
  265. #pragma intrinsic(_InterlockedExchangePointer)
  266. #pragma intrinsic(_InterlockedCompareExchangePointer)
  267. #ifdef __cplusplus
  268. }
  269. #endif
  270. #endif // defined(_M_AMD64) && !defined(RC_INVOKED) && !defined(MIDL_PASS)
  271. #if defined(_AMD64_)
  272. //
  273. // Types to use to contain PFNs and their counts.
  274. //
  275. typedef ULONG PFN_COUNT;
  276. typedef LONG64 SPFN_NUMBER, *PSPFN_NUMBER;
  277. typedef ULONG64 PFN_NUMBER, *PPFN_NUMBER;
  278. //
  279. // Define maximum size of flush multiple TB request.
  280. //
  281. #define FLUSH_MULTIPLE_MAXIMUM 32
  282. //
  283. // Indicate that the AMD64 compiler supports the allocate pragmas.
  284. //
  285. #define ALLOC_PRAGMA 1
  286. #define ALLOC_DATA_PRAGMA 1
  287. // end_ntddk end_nthal end_ntndis end_wdm end_ntosp
  288. //
  289. // Length on interrupt object dispatch code in longwords.
  290. // (shielint) Reserve 9*4 space for ABIOS stack mapping. If NO
  291. // ABIOS support the size of DISPATCH_LENGTH should be 74.
  292. //
  293. // begin_nthal
  294. #define NORMAL_DISPATCH_LENGTH 106 // ntddk wdm
  295. #define DISPATCH_LENGTH NORMAL_DISPATCH_LENGTH // ntddk wdm
  296. // ntddk wdm
  297. // begin_ntosp
  298. //
  299. // Define constants for bits in CR0.
  300. //
  301. #define CR0_PE 0x00000001 // protection enable
  302. #define CR0_MP 0x00000002 // math present
  303. #define CR0_EM 0x00000004 // emulate math coprocessor
  304. #define CR0_TS 0x00000008 // task switched
  305. #define CR0_ET 0x00000010 // extension type (80387)
  306. #define CR0_NE 0x00000020 // numeric error
  307. #define CR0_WP 0x00010000 // write protect
  308. #define CR0_AM 0x00040000 // alignment mask
  309. #define CR0_NW 0x20000000 // not write-through
  310. #define CR0_CD 0x40000000 // cache disable
  311. #define CR0_PG 0x80000000 // paging
  312. //
  313. // Define functions to read and write CR0.
  314. //
  315. #ifdef __cplusplus
  316. extern "C" {
  317. #endif
  318. #define ReadCR0() __readcr0()
  319. ULONG64
  320. __readcr0 (
  321. VOID
  322. );
  323. #define WriteCR0(Data) __writecr0(Data)
  324. VOID
  325. __writecr0 (
  326. IN ULONG64 Data
  327. );
  328. #pragma intrinsic(__readcr0)
  329. #pragma intrinsic(__writecr0)
  330. //
  331. // Define functions to read and write CR3.
  332. //
  333. #define ReadCR3() __readcr3()
  334. ULONG64
  335. __readcr3 (
  336. VOID
  337. );
  338. #define WriteCR3(Data) __writecr3(Data)
  339. VOID
  340. __writecr3 (
  341. IN ULONG64 Data
  342. );
  343. #pragma intrinsic(__readcr3)
  344. #pragma intrinsic(__writecr3)
  345. //
  346. // Define constants for bits in CR4.
  347. //
  348. #define CR4_VME 0x00000001 // V86 mode extensions
  349. #define CR4_PVI 0x00000002 // Protected mode virtual interrupts
  350. #define CR4_TSD 0x00000004 // Time stamp disable
  351. #define CR4_DE 0x00000008 // Debugging Extensions
  352. #define CR4_PSE 0x00000010 // Page size extensions
  353. #define CR4_PAE 0x00000020 // Physical address extensions
  354. #define CR4_MCE 0x00000040 // Machine check enable
  355. #define CR4_PGE 0x00000080 // Page global enable
  356. #define CR4_FXSR 0x00000200 // FXSR used by OS
  357. #define CR4_XMMEXCPT 0x00000400 // XMMI used by OS
  358. //
  359. // Define functions to read and write CR4.
  360. //
  361. #define ReadCR4() __readcr4()
  362. ULONG64
  363. __readcr4 (
  364. VOID
  365. );
  366. #define WriteCR4(Data) __writecr4(Data)
  367. VOID
  368. __writecr4 (
  369. IN ULONG64 Data
  370. );
  371. #pragma intrinsic(__readcr4)
  372. #pragma intrinsic(__writecr4)
  373. //
  374. // Define functions to read and write CR8.
  375. //
  376. // CR8 is the APIC TPR register.
  377. //
  378. #define ReadCR8() __readcr8()
  379. ULONG64
  380. __readcr8 (
  381. VOID
  382. );
  383. #define WriteCR8(Data) __writecr8(Data)
  384. VOID
  385. __writecr8 (
  386. IN ULONG64 Data
  387. );
  388. #pragma intrinsic(__readcr8)
  389. #pragma intrinsic(__writecr8)
  390. #ifdef __cplusplus
  391. }
  392. #endif
  393. // end_nthal end_ntosp
  394. //
  395. // External references to the code labels.
  396. //
  397. extern ULONG KiInterruptTemplate[NORMAL_DISPATCH_LENGTH];
  398. // begin_ntddk begin_wdm begin_nthal begin_ntosp
  399. //
  400. // Interrupt Request Level definitions
  401. //
  402. #define PASSIVE_LEVEL 0 // Passive release level
  403. #define LOW_LEVEL 0 // Lowest interrupt level
  404. #define APC_LEVEL 1 // APC interrupt level
  405. #define DISPATCH_LEVEL 2 // Dispatcher level
  406. #define CLOCK_LEVEL 13 // Interval clock level
  407. #define IPI_LEVEL 14 // Interprocessor interrupt level
  408. #define POWER_LEVEL 14 // Power failure level
  409. #define PROFILE_LEVEL 15 // timer used for profiling.
  410. #define HIGH_LEVEL 15 // Highest interrupt level
  411. // end_ntddk end_wdm end_ntosp
  412. #if defined(NT_UP)
  413. // synchronization level (UP)
  414. #define SYNCH_LEVEL DISPATCH_LEVEL
  415. #else
  416. // synchronization level (MP)
  417. #define SYNCH_LEVEL (IPI_LEVEL-2) // ntddk wdm ntosp
  418. #endif
  419. #define IRQL_VECTOR_OFFSET 2 // offset from IRQL to vector / 16
  420. #define KiSynchIrql SYNCH_LEVEL // enable portable code
  421. //
  422. // Machine type definitions
  423. //
  424. #define MACHINE_TYPE_ISA 0
  425. #define MACHINE_TYPE_EISA 1
  426. #define MACHINE_TYPE_MCA 2
  427. // end_nthal
  428. //
  429. // The previous values and the following are or'ed in KeI386MachineType.
  430. //
  431. #define MACHINE_TYPE_PC_AT_COMPATIBLE 0x00000000
  432. #define MACHINE_TYPE_PC_9800_COMPATIBLE 0x00000100
  433. #define MACHINE_TYPE_FMR_COMPATIBLE 0x00000200
  434. extern ULONG KeI386MachineType;
  435. // begin_nthal
  436. //
  437. // Define constants used in selector tests.
  438. //
  439. // N.B. MODE_MASK and MODE_BIT assumes that all code runs at either ring-0
  440. // or ring-3 and is used to test the mode. RPL_MASK is used for merging
  441. // or extracting RPL values.
  442. //
  443. #define MODE_BIT 0
  444. #define MODE_MASK 1 // ntosp
  445. #define RPL_MASK 3
  446. //
  447. // Startup count value for KeStallExecution. This value is used
  448. // until KiInitializeStallExecution can compute the real one.
  449. // Pick a value long enough for very fast processors.
  450. //
  451. #define INITIAL_STALL_COUNT 100
  452. // end_nthal
  453. //
  454. // begin_nthal
  455. //
  456. // Macro to extract the high word of a long offset
  457. //
  458. #define HIGHWORD(l) \
  459. ((USHORT)(((ULONG)(l)>>16) & 0xffff))
  460. //
  461. // Macro to extract the low word of a long offset
  462. //
  463. #define LOWWORD(l) \
  464. ((USHORT)((ULONG)l & 0x0000ffff))
  465. //
  466. // Macro to combine two USHORT offsets into a long offset
  467. //
  468. #if !defined(MAKEULONG)
  469. #define MAKEULONG(x, y) \
  470. (((((ULONG)(x))<<16) & 0xffff0000) | \
  471. ((ULONG)(y) & 0xffff))
  472. #endif
  473. // end_nthal
  474. //
  475. // Request a software interrupt.
  476. //
  477. #define KiRequestSoftwareInterrupt(RequestIrql) \
  478. HalRequestSoftwareInterrupt( RequestIrql )
  479. // begin_ntddk begin_wdm begin_nthal begin_ntndis begin_ntosp
  480. //
  481. // I/O space read and write macros.
  482. //
  483. // The READ/WRITE_REGISTER_* calls manipulate I/O registers in MEMORY space.
  484. //
  485. // The READ/WRITE_PORT_* calls manipulate I/O registers in PORT space.
  486. //
  487. __forceinline
  488. UCHAR
  489. READ_REGISTER_UCHAR (
  490. volatile UCHAR *Register
  491. )
  492. {
  493. return *Register;
  494. }
  495. __forceinline
  496. USHORT
  497. READ_REGISTER_USHORT (
  498. volatile USHORT *Register
  499. )
  500. {
  501. return *Register;
  502. }
  503. __forceinline
  504. ULONG
  505. READ_REGISTER_ULONG (
  506. volatile ULONG *Register
  507. )
  508. {
  509. return *Register;
  510. }
  511. __forceinline
  512. VOID
  513. READ_REGISTER_BUFFER_UCHAR (
  514. PUCHAR Register,
  515. PUCHAR Buffer,
  516. ULONG Count
  517. )
  518. {
  519. __movsb(Buffer, Register, Count);
  520. return;
  521. }
  522. __forceinline
  523. VOID
  524. READ_REGISTER_BUFFER_USHORT (
  525. PUSHORT Register,
  526. PUSHORT Buffer,
  527. ULONG Count
  528. )
  529. {
  530. __movsw(Buffer, Register, Count);
  531. return;
  532. }
  533. __forceinline
  534. VOID
  535. READ_REGISTER_BUFFER_ULONG (
  536. PULONG Register,
  537. PULONG Buffer,
  538. ULONG Count
  539. )
  540. {
  541. __movsd(Buffer, Register, Count);
  542. return;
  543. }
  544. __forceinline
  545. VOID
  546. WRITE_REGISTER_UCHAR (
  547. PUCHAR Register,
  548. UCHAR Value
  549. )
  550. {
  551. *Register = Value;
  552. StoreFence();
  553. return;
  554. }
  555. __forceinline
  556. VOID
  557. WRITE_REGISTER_USHORT (
  558. PUSHORT Register,
  559. USHORT Value
  560. )
  561. {
  562. *Register = Value;
  563. StoreFence();
  564. return;
  565. }
  566. __forceinline
  567. VOID
  568. WRITE_REGISTER_ULONG (
  569. PULONG Register,
  570. ULONG Value
  571. )
  572. {
  573. *Register = Value;
  574. StoreFence();
  575. return;
  576. }
  577. __forceinline
  578. VOID
  579. WRITE_REGISTER_BUFFER_UCHAR (
  580. PUCHAR Register,
  581. PUCHAR Buffer,
  582. ULONG Count
  583. )
  584. {
  585. __movsb(Register, Buffer, Count);
  586. StoreFence();
  587. return;
  588. }
  589. __forceinline
  590. VOID
  591. WRITE_REGISTER_BUFFER_USHORT (
  592. PUSHORT Register,
  593. PUSHORT Buffer,
  594. ULONG Count
  595. )
  596. {
  597. __movsw(Register, Buffer, Count);
  598. StoreFence();
  599. return;
  600. }
  601. __forceinline
  602. VOID
  603. WRITE_REGISTER_BUFFER_ULONG (
  604. PULONG Register,
  605. PULONG Buffer,
  606. ULONG Count
  607. )
  608. {
  609. __movsd(Register, Buffer, Count);
  610. StoreFence();
  611. return;
  612. }
  613. __forceinline
  614. UCHAR
  615. READ_PORT_UCHAR (
  616. PUCHAR Port
  617. )
  618. {
  619. return __inbyte((USHORT)((ULONG64)Port));
  620. }
  621. __forceinline
  622. USHORT
  623. READ_PORT_USHORT (
  624. PUSHORT Port
  625. )
  626. {
  627. return __inword((USHORT)((ULONG64)Port));
  628. }
  629. __forceinline
  630. ULONG
  631. READ_PORT_ULONG (
  632. PULONG Port
  633. )
  634. {
  635. return __indword((USHORT)((ULONG64)Port));
  636. }
  637. __forceinline
  638. VOID
  639. READ_PORT_BUFFER_UCHAR (
  640. PUCHAR Port,
  641. PUCHAR Buffer,
  642. ULONG Count
  643. )
  644. {
  645. __inbytestring((USHORT)((ULONG64)Port), Buffer, Count);
  646. return;
  647. }
  648. __forceinline
  649. VOID
  650. READ_PORT_BUFFER_USHORT (
  651. PUSHORT Port,
  652. PUSHORT Buffer,
  653. ULONG Count
  654. )
  655. {
  656. __inwordstring((USHORT)((ULONG64)Port), Buffer, Count);
  657. return;
  658. }
  659. __forceinline
  660. VOID
  661. READ_PORT_BUFFER_ULONG (
  662. PULONG Port,
  663. PULONG Buffer,
  664. ULONG Count
  665. )
  666. {
  667. __indwordstring((USHORT)((ULONG64)Port), Buffer, Count);
  668. return;
  669. }
  670. __forceinline
  671. VOID
  672. WRITE_PORT_UCHAR (
  673. PUCHAR Port,
  674. UCHAR Value
  675. )
  676. {
  677. __outbyte((USHORT)((ULONG64)Port), Value);
  678. return;
  679. }
  680. __forceinline
  681. VOID
  682. WRITE_PORT_USHORT (
  683. PUSHORT Port,
  684. USHORT Value
  685. )
  686. {
  687. __outword((USHORT)((ULONG64)Port), Value);
  688. return;
  689. }
  690. __forceinline
  691. VOID
  692. WRITE_PORT_ULONG (
  693. PULONG Port,
  694. ULONG Value
  695. )
  696. {
  697. __outdword((USHORT)((ULONG64)Port), Value);
  698. return;
  699. }
  700. __forceinline
  701. VOID
  702. WRITE_PORT_BUFFER_UCHAR (
  703. PUCHAR Port,
  704. PUCHAR Buffer,
  705. ULONG Count
  706. )
  707. {
  708. __outbytestring((USHORT)((ULONG64)Port), Buffer, Count);
  709. return;
  710. }
  711. __forceinline
  712. VOID
  713. WRITE_PORT_BUFFER_USHORT (
  714. PUSHORT Port,
  715. PUSHORT Buffer,
  716. ULONG Count
  717. )
  718. {
  719. __outwordstring((USHORT)((ULONG64)Port), Buffer, Count);
  720. return;
  721. }
  722. __forceinline
  723. VOID
  724. WRITE_PORT_BUFFER_ULONG (
  725. PULONG Port,
  726. PULONG Buffer,
  727. ULONG Count
  728. )
  729. {
  730. __outdwordstring((USHORT)((ULONG64)Port), Buffer, Count);
  731. return;
  732. }
  733. // end_ntndis
  734. //
  735. // Get data cache fill size.
  736. //
  737. #if PRAGMA_DEPRECATED_DDK
  738. #pragma deprecated(KeGetDcacheFillSize) // Use GetDmaAlignment
  739. #endif
  740. #define KeGetDcacheFillSize() 1L
  741. // end_ntddk end_wdm end_nthal end_ntosp
  742. //
  743. // Fill TB entry and flush single TB entry.
  744. //
  745. #define KeFillEntryTb(Virtual) \
  746. InvalidatePage(Virtual);
  747. #if !defined(_NTHAL_) && !defined(RC_INVOKED) && !defined(MIDL_PASS)
  748. __forceinline
  749. VOID
  750. KeFlushCurrentTb (
  751. VOID
  752. )
  753. {
  754. ULONG64 Cr4;
  755. Cr4 = ReadCR4();
  756. WriteCR4(Cr4 & ~CR4_PGE);
  757. WriteCR4(Cr4);
  758. return;
  759. }
  760. __forceinline
  761. VOID
  762. KiFlushProcessTb (
  763. VOID
  764. )
  765. {
  766. ULONG64 Cr3;
  767. Cr3 = ReadCR3();
  768. WriteCR3(Cr3);
  769. return;
  770. }
  771. #else
  772. // begin_nthal
  773. NTKERNELAPI
  774. VOID
  775. KeFlushCurrentTb (
  776. VOID
  777. );
  778. // end_nthal
  779. #endif
  780. #define KiFlushSingleTb(Virtual) InvalidatePage(Virtual)
  781. //
  782. // Data cache, instruction cache, I/O buffer, and write buffer flush routine
  783. // prototypes.
  784. //
  785. // AMD64 has transparent caches, so these are noops.
  786. #define KeSweepDcache(AllProcessors)
  787. #define KeSweepCurrentDcache()
  788. #define KeSweepIcache(AllProcessors)
  789. #define KeSweepCurrentIcache()
  790. #define KeSweepIcacheRange(AllProcessors, BaseAddress, Length)
  791. // begin_ntddk begin_wdm begin_nthal begin_ntndis begin_ntosp
  792. #define KeFlushIoBuffers(Mdl, ReadOperation, DmaOperation)
  793. // end_ntddk end_wdm end_ntndis end_ntosp
  794. #define KeYieldProcessor()
  795. // end_nthal
  796. //
  797. // Define executive macros for acquiring and releasing executive spinlocks.
  798. // These macros can ONLY be used by executive components and NOT by drivers.
  799. // Drivers MUST use the kernel interfaces since they must be MP enabled on
  800. // all systems.
  801. //
  802. #if defined(NT_UP) && !DBG && !defined(_NTDDK_) && !defined(_NTIFS_)
  803. #if !defined(_NTDRIVER_)
  804. #define ExAcquireSpinLock(Lock, OldIrql) (*OldIrql) = KeRaiseIrqlToDpcLevel();
  805. #define ExReleaseSpinLock(Lock, OldIrql) KeLowerIrql((OldIrql))
  806. #else
  807. #define ExAcquireSpinLock(Lock, OldIrql) KeAcquireSpinLock((Lock), (OldIrql))
  808. #define ExReleaseSpinLock(Lock, OldIrql) KeReleaseSpinLock((Lock), (OldIrql))
  809. #endif
  810. #define ExAcquireSpinLockAtDpcLevel(Lock)
  811. #define ExReleaseSpinLockFromDpcLevel(Lock)
  812. #else
  813. // begin_wdm begin_ntddk begin_ntosp
  814. #define ExAcquireSpinLock(Lock, OldIrql) KeAcquireSpinLock((Lock), (OldIrql))
  815. #define ExReleaseSpinLock(Lock, OldIrql) KeReleaseSpinLock((Lock), (OldIrql))
  816. #define ExAcquireSpinLockAtDpcLevel(Lock) KeAcquireSpinLockAtDpcLevel(Lock)
  817. #define ExReleaseSpinLockFromDpcLevel(Lock) KeReleaseSpinLockFromDpcLevel(Lock)
  818. // end_wdm end_ntddk end_ntosp
  819. #endif
  820. // begin_nthal
  821. //
  822. // The acquire and release fast lock macros disable and enable interrupts
  823. // on UP nondebug systems. On MP or debug systems, the spinlock routines
  824. // are used.
  825. //
  826. // N.B. Extreme caution should be observed when using these routines.
  827. //
  828. #if defined(_M_AMD64) && !defined(USER_MODE_CODE)
  829. VOID
  830. _disable (
  831. VOID
  832. );
  833. VOID
  834. _enable (
  835. VOID
  836. );
  837. #pragma warning(push)
  838. #pragma warning(disable:4164)
  839. #pragma intrinsic(_disable)
  840. #pragma intrinsic(_enable)
  841. #pragma warning(pop)
  842. #endif
  843. // end_nthal
  844. #if defined(NT_UP) && !DBG && !defined(USER_MODE_CODE)
  845. #define ExAcquireFastLock(Lock, OldIrql) _disable()
  846. #else
  847. #define ExAcquireFastLock(Lock, OldIrql) \
  848. ExAcquireSpinLock(Lock, OldIrql)
  849. #endif
  850. #if defined(NT_UP) && !DBG && !defined(USER_MODE_CODE)
  851. #define ExReleaseFastLock(Lock, OldIrql) _enable()
  852. #else
  853. #define ExReleaseFastLock(Lock, OldIrql) \
  854. ExReleaseSpinLock(Lock, OldIrql)
  855. #endif
  856. //
  857. // The following function prototypes must be in this module so that the
  858. // above macros can call them directly.
  859. //
  860. // begin_nthal
  861. #if defined(NT_UP)
  862. #define KiAcquireSpinLock(SpinLock)
  863. #define KiReleaseSpinLock(SpinLock)
  864. #else
  865. #define KiAcquireSpinLock(SpinLock) KeAcquireSpinLockAtDpcLevel(SpinLock)
  866. #define KiReleaseSpinLock(SpinLock) KeReleaseSpinLockFromDpcLevel(SpinLock)
  867. #endif // defined(NT_UP)
  868. // end_nthal
  869. //
  870. // Define query tick count macro.
  871. //
  872. // begin_ntddk begin_nthal begin_ntosp begin_wdm
  873. #define KI_USER_SHARED_DATA 0xFFFFF78000000000UI64
  874. #define SharedUserData ((KUSER_SHARED_DATA * const)KI_USER_SHARED_DATA)
  875. #define SharedInterruptTime (KI_USER_SHARED_DATA + 0x8)
  876. #define SharedSystemTime (KI_USER_SHARED_DATA + 0x14)
  877. #define SharedTickCount (KI_USER_SHARED_DATA + 0x320)
  878. #define KeQueryInterruptTime() *((volatile ULONG64 *)(SharedInterruptTime))
  879. #define KeQuerySystemTime(CurrentCount) \
  880. *((PULONG64)(CurrentCount)) = *((volatile ULONG64 *)(SharedSystemTime))
  881. #define KeQueryTickCount(CurrentCount) \
  882. *((PULONG64)(CurrentCount)) = *((volatile ULONG64 *)(SharedTickCount))
  883. // end_ntddk end_nthal end_ntosp end_wdm
  884. C_ASSERT((FIELD_OFFSET(KUSER_SHARED_DATA, InterruptTime) & 7) == 0);
  885. C_ASSERT(FIELD_OFFSET(KUSER_SHARED_DATA, InterruptTime) == 0x8);
  886. C_ASSERT(FIELD_OFFSET(KUSER_SHARED_DATA, SystemTime) == 0x14);
  887. C_ASSERT((FIELD_OFFSET(KUSER_SHARED_DATA, TickCount) & 7) == 0);
  888. C_ASSERT(FIELD_OFFSET(KUSER_SHARED_DATA, TickCount) == 0x320);
  889. //
  890. // Define query interrupt time macro.
  891. //
  892. C_ASSERT((FIELD_OFFSET(KUSER_SHARED_DATA, InterruptTime) & 7) == 0);
  893. #define KiQueryInterruptTime(CurrentTime) \
  894. ((PLARGE_INTEGER)(CurrentTime))->QuadPart = *(PLONG64)(&SharedUserData->InterruptTime)
  895. // begin_nthal begin_ntosp
  896. //
  897. // AMD64 hardware structures
  898. //
  899. // A Page Table Entry on an AMD64 has the following definition.
  900. //
  901. #define _HARDWARE_PTE_WORKING_SET_BITS 11
  902. typedef struct _HARDWARE_PTE {
  903. ULONG64 Valid : 1;
  904. ULONG64 Write : 1; // UP version
  905. ULONG64 Owner : 1;
  906. ULONG64 WriteThrough : 1;
  907. ULONG64 CacheDisable : 1;
  908. ULONG64 Accessed : 1;
  909. ULONG64 Dirty : 1;
  910. ULONG64 LargePage : 1;
  911. ULONG64 Global : 1;
  912. ULONG64 CopyOnWrite : 1; // software field
  913. ULONG64 Prototype : 1; // software field
  914. ULONG64 reserved0 : 1; // software field
  915. ULONG64 PageFrameNumber : 28;
  916. ULONG64 reserved1 : 24 - (_HARDWARE_PTE_WORKING_SET_BITS+1);
  917. ULONG64 SoftwareWsIndex : _HARDWARE_PTE_WORKING_SET_BITS;
  918. ULONG64 NoExecute : 1;
  919. } HARDWARE_PTE, *PHARDWARE_PTE;
  920. //
  921. // Define macro to initialize directory table base.
  922. //
  923. #define INITIALIZE_DIRECTORY_TABLE_BASE(dirbase,pfn) \
  924. *((PULONG64)(dirbase)) = (((ULONG64)(pfn)) << PAGE_SHIFT)
  925. //
  926. // Define Global Descriptor Table (GDT) entry structure and constants.
  927. //
  928. // Define descriptor type codes.
  929. //
  930. #define TYPE_CODE 0x1A // 11010 = code, read only
  931. #define TYPE_DATA 0x12 // 10010 = data, read and write
  932. #define TYPE_TSS64 0x09 // 01001 = task state segment
  933. //
  934. // Define descriptor privilege levels for user and system.
  935. //
  936. #define DPL_USER 3
  937. #define DPL_SYSTEM 0
  938. //
  939. // Define limit granularity.
  940. //
  941. #define GRANULARITY_BYTE 0
  942. #define GRANULARITY_PAGE 1
  943. #define SELECTOR_TABLE_INDEX 0x04
  944. typedef union _KGDTENTRY64 {
  945. struct {
  946. USHORT LimitLow;
  947. USHORT BaseLow;
  948. union {
  949. struct {
  950. UCHAR BaseMiddle;
  951. UCHAR Flags1;
  952. UCHAR Flags2;
  953. UCHAR BaseHigh;
  954. } Bytes;
  955. struct {
  956. ULONG BaseMiddle : 8;
  957. ULONG Type : 5;
  958. ULONG Dpl : 2;
  959. ULONG Present : 1;
  960. ULONG LimitHigh : 4;
  961. ULONG System : 1;
  962. ULONG LongMode : 1;
  963. ULONG DefaultBig : 1;
  964. ULONG Granularity : 1;
  965. ULONG BaseHigh : 8;
  966. } Bits;
  967. };
  968. ULONG BaseUpper;
  969. ULONG MustBeZero;
  970. };
  971. ULONG64 Alignment;
  972. } KGDTENTRY64, *PKGDTENTRY64;
  973. //
  974. // Define Interrupt Descriptor Table (IDT) entry structure and constants.
  975. //
  976. typedef union _KIDTENTRY64 {
  977. struct {
  978. USHORT OffsetLow;
  979. USHORT Selector;
  980. USHORT IstIndex : 3;
  981. USHORT Reserved0 : 5;
  982. USHORT Type : 5;
  983. USHORT Dpl : 2;
  984. USHORT Present : 1;
  985. USHORT OffsetMiddle;
  986. ULONG OffsetHigh;
  987. ULONG Reserved1;
  988. };
  989. ULONG64 Alignment;
  990. } KIDTENTRY64, *PKIDTENTRY64;
  991. //
  992. // Define two union definitions used for parsing addresses into the
  993. // component fields required by a GDT.
  994. //
  995. typedef union _KGDT_BASE {
  996. struct {
  997. USHORT BaseLow;
  998. UCHAR BaseMiddle;
  999. UCHAR BaseHigh;
  1000. ULONG BaseUpper;
  1001. };
  1002. ULONG64 Base;
  1003. } KGDT_BASE, *PKGDT_BASE;
  1004. C_ASSERT(sizeof(KGDT_BASE) == sizeof(ULONG64));
  1005. typedef union _KGDT_LIMIT {
  1006. struct {
  1007. USHORT LimitLow;
  1008. USHORT LimitHigh : 4;
  1009. USHORT MustBeZero : 12;
  1010. };
  1011. ULONG Limit;
  1012. } KGDT_LIMIT, *PKGDT_LIMIT;
  1013. C_ASSERT(sizeof(KGDT_LIMIT) == sizeof(ULONG));
  1014. //
  1015. // Define Task State Segment (TSS) structure and constants.
  1016. //
  1017. // Task switches are not supported by the AMD64, but a task state segment
  1018. // must be present to define the kernel stack pointer and I/O map base.
  1019. //
  1020. // N.B. This structure is misaligned as per the AMD64 specification.
  1021. //
  1022. // N.B. The size of TSS must be <= 0xDFFF.
  1023. //
  1024. #pragma pack(push, 4)
  1025. typedef struct _KTSS64 {
  1026. ULONG Reserved0;
  1027. ULONG64 Rsp0;
  1028. ULONG64 Rsp1;
  1029. ULONG64 Rsp2;
  1030. //
  1031. // Element 0 of the Ist is reserved
  1032. //
  1033. ULONG64 Ist[8];
  1034. ULONG64 Reserved1;
  1035. USHORT IoMapBase;
  1036. } KTSS64, *PKTSS64;
  1037. #pragma pack(pop)
  1038. C_ASSERT((sizeof(KTSS64) % sizeof(PVOID)) == 0);
  1039. #define TSS_IST_RESERVED 0
  1040. #define TSS_IST_PANIC 1
  1041. #define TSS_IST_MCA 2
  1042. #define IO_ACCESS_MAP_NONE FALSE
  1043. #define KiComputeIopmOffset(Enable) (sizeof(KTSS64))
  1044. // begin_windbgkd
  1045. #if defined(_AMD64_)
  1046. //
  1047. // Define pseudo descriptor structures for both 64- and 32-bit mode.
  1048. //
  1049. typedef struct _KDESCRIPTOR {
  1050. USHORT Pad[3];
  1051. USHORT Limit;
  1052. PVOID Base;
  1053. } KDESCRIPTOR, *PKDESCRIPTOR;
  1054. typedef struct _KDESCRIPTOR32 {
  1055. USHORT Pad[3];
  1056. USHORT Limit;
  1057. ULONG Base;
  1058. } KDESCRIPTOR32, *PKDESCRIPTOR32;
  1059. //
  1060. // Define special kernel registers and the initial MXCSR value.
  1061. //
  1062. typedef struct _KSPECIAL_REGISTERS {
  1063. ULONG64 Cr0;
  1064. ULONG64 Cr2;
  1065. ULONG64 Cr3;
  1066. ULONG64 Cr4;
  1067. ULONG64 KernelDr0;
  1068. ULONG64 KernelDr1;
  1069. ULONG64 KernelDr2;
  1070. ULONG64 KernelDr3;
  1071. ULONG64 KernelDr6;
  1072. ULONG64 KernelDr7;
  1073. KDESCRIPTOR Gdtr;
  1074. KDESCRIPTOR Idtr;
  1075. USHORT Tr;
  1076. USHORT Ldtr;
  1077. ULONG MxCsr;
  1078. ULONG64 DebugControl;
  1079. ULONG64 LastBranchToRip;
  1080. ULONG64 LastBranchFromRip;
  1081. ULONG64 LastExceptionToRip;
  1082. ULONG64 LastExceptionFromRip;
  1083. ULONG64 Cr8;
  1084. ULONG64 MsrGsBase;
  1085. ULONG64 MsrGsSwap;
  1086. ULONG64 MsrStar;
  1087. ULONG64 MsrLStar;
  1088. ULONG64 MsrCStar;
  1089. ULONG64 MsrSyscallMask;
  1090. } KSPECIAL_REGISTERS, *PKSPECIAL_REGISTERS;
  1091. //
  1092. // Define processor state structure.
  1093. //
  1094. typedef struct _KPROCESSOR_STATE {
  1095. KSPECIAL_REGISTERS SpecialRegisters;
  1096. CONTEXT ContextFrame;
  1097. } KPROCESSOR_STATE, *PKPROCESSOR_STATE;
  1098. #endif // _AMD64_
  1099. // end_windbgkd
  1100. //
  1101. // DPC data structure definition.
  1102. //
  1103. typedef struct _KDPC_DATA {
  1104. LIST_ENTRY DpcListHead;
  1105. KSPIN_LOCK DpcLock;
  1106. volatile ULONG DpcQueueDepth;
  1107. ULONG DpcCount;
  1108. } KDPC_DATA, *PKDPC_DATA;
  1109. //
  1110. // Processor Control Block (PRCB)
  1111. //
  1112. #define PRCB_MAJOR_VERSION 1
  1113. #define PRCB_MINOR_VERSION 1
  1114. #define PRCB_BUILD_DEBUG 0x1
  1115. #define PRCB_BUILD_UNIPROCESSOR 0x2
  1116. typedef struct _KPRCB {
  1117. //
  1118. // Start of the architecturally defined section of the PRCB. This section
  1119. // may be directly addressed by vendor/platform specific HAL code and will
  1120. // not change from version to version of NT.
  1121. //
  1122. USHORT MinorVersion;
  1123. USHORT MajorVersion;
  1124. CCHAR Number;
  1125. CCHAR Reserved;
  1126. USHORT BuildType;
  1127. struct _KTHREAD *CurrentThread;
  1128. struct _KTHREAD *NextThread;
  1129. struct _KTHREAD *IdleThread;
  1130. KAFFINITY SetMember;
  1131. KAFFINITY NotSetMember;
  1132. KSPIN_LOCK PrcbLock;
  1133. KPROCESSOR_STATE ProcessorState;
  1134. CCHAR CpuType;
  1135. CCHAR CpuID;
  1136. USHORT CpuStep;
  1137. ULONG PrcbPad00;
  1138. ULONG64 HalReserved[8];
  1139. UCHAR PrcbPad0[104];
  1140. //
  1141. // End of the architecturally defined section of the PRCB.
  1142. //
  1143. // end_nthal end_ntosp
  1144. //
  1145. // Numbered queued spin locks - 128-byte aligned.
  1146. //
  1147. KSPIN_LOCK_QUEUE LockQueue[16];
  1148. UCHAR PrcbPad1[16];
  1149. //
  1150. // Nonpaged per processor lookaside lists - 128-byte aligned.
  1151. //
  1152. PP_LOOKASIDE_LIST PPLookasideList[16];
  1153. //
  1154. // Nonpaged per processor small pool lookaside lists - 128-byte aligned.
  1155. //
  1156. PP_LOOKASIDE_LIST PPNPagedLookasideList[POOL_SMALL_LISTS];
  1157. //
  1158. // Paged per processor small pool lookaside lists.
  1159. //
  1160. PP_LOOKASIDE_LIST PPPagedLookasideList[POOL_SMALL_LISTS];
  1161. //
  1162. // MP interprocessor request packet barrier - 128-byte aligned.
  1163. //
  1164. volatile KAFFINITY PacketBarrier;
  1165. UCHAR PrcbPad2[120];
  1166. //
  1167. // MP interprocessor request packet and summary - 128-byte aligned.
  1168. //
  1169. volatile PVOID CurrentPacket[3];
  1170. volatile KAFFINITY TargetSet;
  1171. volatile PKIPI_WORKER WorkerRoutine;
  1172. volatile ULONG IpiFrozen;
  1173. UCHAR PrcbPad3[84];
  1174. //
  1175. // MP interprocessor request summary and packet address - 128-byte aligned.
  1176. //
  1177. // N.B. Request summary includes the request summary mask as well as the
  1178. // request packet. The address occupies the upper 48-bits and the mask
  1179. // the lower 16-bits
  1180. //
  1181. #define IPI_PACKET_SHIFT 16
  1182. volatile LONG64 RequestSummary;
  1183. UCHAR PrcbPad4[120];
  1184. //
  1185. // DPC listhead, counts, and batching parameters - 128-byte aligned.
  1186. //
  1187. KDPC_DATA DpcData[2];
  1188. PVOID DpcStack;
  1189. PVOID SavedRsp;
  1190. ULONG MaximumDpcQueueDepth;
  1191. ULONG DpcRequestRate;
  1192. ULONG MinimumDpcRate;
  1193. volatile BOOLEAN DpcInterruptRequested;
  1194. volatile BOOLEAN DpcThreadRequested;
  1195. //
  1196. // N.B. the following two fields must be on a word boundary.
  1197. //
  1198. volatile BOOLEAN DpcRoutineActive;
  1199. volatile BOOLEAN DpcThreadActive;
  1200. union {
  1201. volatile ULONG64 TimerHand;
  1202. volatile ULONG64 TimerRequest;
  1203. };
  1204. ULONG64 PrcbPad40;
  1205. ULONG DpcLastCount;
  1206. BOOLEAN ThreadDpcEnable;
  1207. volatile BOOLEAN QuantumEnd;
  1208. UCHAR PrcbPad50;
  1209. volatile BOOLEAN IdleSchedule;
  1210. LONG DpcSetEventRequest;
  1211. UCHAR PrcbPad5[4];
  1212. //
  1213. // DPC thread and generic call DPC - 128-byte aligned
  1214. //
  1215. PVOID DpcThread;
  1216. KEVENT DpcEvent;
  1217. KDPC CallDpc;
  1218. SINGLE_LIST_ENTRY DeferredReadyListHead;
  1219. ULONG64 PrcbPad7[3];
  1220. //
  1221. // Per-processor ready summary and ready queues - 128-byte aligned.
  1222. //
  1223. // N.B. Ready summary is in the first cache line as the queue for priority
  1224. // zero is never used.
  1225. //
  1226. LIST_ENTRY WaitListHead;
  1227. ULONG ReadySummary;
  1228. ULONG SelectNextLast;
  1229. LIST_ENTRY DispatcherReadyListHead[MAXIMUM_PRIORITY];
  1230. //
  1231. // Miscellaneous counters.
  1232. //
  1233. ULONG InterruptCount;
  1234. ULONG KernelTime;
  1235. ULONG UserTime;
  1236. ULONG DpcTime;
  1237. ULONG InterruptTime;
  1238. ULONG AdjustDpcThreshold;
  1239. ULONG PageColor;
  1240. BOOLEAN SkipTick;
  1241. KIRQL DebuggerSavedIRQL;
  1242. UCHAR PollSlot;
  1243. UCHAR PrcbPad8[1];
  1244. struct _KNODE * ParentNode;
  1245. KAFFINITY MultiThreadProcessorSet;
  1246. ULONG ThreadStartCount[2];
  1247. ULONG DebugDpcTime;
  1248. UCHAR PrcbPad9[44];
  1249. //
  1250. // Performance counters - 128-byte aligned.
  1251. //
  1252. // Cache manager performance counters.
  1253. //
  1254. ULONG CcFastReadNoWait;
  1255. ULONG CcFastReadWait;
  1256. ULONG CcFastReadNotPossible;
  1257. ULONG CcCopyReadNoWait;
  1258. ULONG CcCopyReadWait;
  1259. ULONG CcCopyReadNoWaitMiss;
  1260. //
  1261. // Kernel performance counters.
  1262. //
  1263. ULONG KeAlignmentFixupCount;
  1264. ULONG SpareCounter0;
  1265. ULONG KeDcacheFlushCount;
  1266. ULONG KeExceptionDispatchCount;
  1267. ULONG KeFirstLevelTbFills;
  1268. ULONG KeFloatingEmulationCount;
  1269. ULONG KeIcacheFlushCount;
  1270. ULONG KeSecondLevelTbFills;
  1271. ULONG KeSystemCalls;
  1272. ULONG SpareCounter1;
  1273. //
  1274. // I/O IRP float.
  1275. //
  1276. LONG LookasideIrpFloat;
  1277. //
  1278. // Processor information.
  1279. //
  1280. UCHAR VendorString[13];
  1281. UCHAR InitialApicId;
  1282. UCHAR LogicalProcessorsPerPhysicalProcessor;
  1283. ULONG MHz;
  1284. ULONG FeatureBits;
  1285. LARGE_INTEGER UpdateSignature;
  1286. //
  1287. // Processors power state
  1288. //
  1289. PROCESSOR_POWER_STATE PowerState;
  1290. // begin_nthal begin_ntosp
  1291. } KPRCB, *PKPRCB, *RESTRICTED_POINTER PRKPRCB;
  1292. // end_nthal end_ntosp
  1293. #if !defined(_X86AMD64_)
  1294. C_ASSERT(((FIELD_OFFSET(KPRCB, LockQueue) + 16) & (128 - 1)) == 0);
  1295. C_ASSERT((FIELD_OFFSET(KPRCB, PPLookasideList) & (128 - 1)) == 0);
  1296. C_ASSERT((FIELD_OFFSET(KPRCB, PPNPagedLookasideList) & (128 - 1)) == 0);
  1297. C_ASSERT((FIELD_OFFSET(KPRCB, PacketBarrier) & (128 - 1)) == 0);
  1298. C_ASSERT((FIELD_OFFSET(KPRCB, RequestSummary) & (128 - 1)) == 0);
  1299. C_ASSERT((FIELD_OFFSET(KPRCB, DpcData) & (128 - 1)) == 0);
  1300. C_ASSERT(((FIELD_OFFSET(KPRCB, DpcRoutineActive)) & (1)) == 0);
  1301. C_ASSERT((FIELD_OFFSET(KPRCB, DpcThread) & (128 - 1)) == 0);
  1302. C_ASSERT((FIELD_OFFSET(KPRCB, WaitListHead) & (128 - 1)) == 0);
  1303. C_ASSERT((FIELD_OFFSET(KPRCB, CcFastReadNoWait) & (128 - 1)) == 0);
  1304. #endif
  1305. // begin_nthal begin_ntosp begin_ntddk
  1306. //
  1307. // Processor Control Region Structure Definition
  1308. //
  1309. #define PCR_MINOR_VERSION 1
  1310. #define PCR_MAJOR_VERSION 1
  1311. typedef struct _KPCR {
  1312. //
  1313. // Start of the architecturally defined section of the PCR. This section
  1314. // may be directly addressed by vendor/platform specific HAL code and will
  1315. // not change from version to version of NT.
  1316. //
  1317. // Certain fields in the TIB are not used in kernel mode. These include the
  1318. // exception list, stack base, stack limit, subsystem TIB, fiber data, and
  1319. // the arbitrary user pointer. Therefore, these fields are overlaid with
  1320. // other data to get better cache locality.
  1321. union {
  1322. NT_TIB NtTib;
  1323. struct {
  1324. union _KGDTENTRY64 *GdtBase;
  1325. struct _KTSS64 *TssBase;
  1326. PVOID PerfGlobalGroupMask;
  1327. struct _KPCR *Self;
  1328. ULONG ContextSwitches;
  1329. ULONG NotUsed;
  1330. KAFFINITY SetMember;
  1331. PVOID Used_Self;
  1332. };
  1333. };
  1334. struct _KPRCB *CurrentPrcb;
  1335. ULONG64 SavedRcx;
  1336. ULONG64 SavedR11;
  1337. KIRQL Irql;
  1338. UCHAR SecondLevelCacheAssociativity;
  1339. UCHAR Number;
  1340. UCHAR Fill0;
  1341. ULONG Irr;
  1342. ULONG IrrActive;
  1343. ULONG Idr;
  1344. USHORT MajorVersion;
  1345. USHORT MinorVersion;
  1346. ULONG StallScaleFactor;
  1347. union _KIDTENTRY64 *IdtBase;
  1348. PVOID Unused1;
  1349. PVOID Unused2;
  1350. // end_ntddk end_ntosp
  1351. ULONG KernelReserved[15];
  1352. ULONG SecondLevelCacheSize;
  1353. ULONG HalReserved[16];
  1354. ULONG MxCsr;
  1355. PVOID KdVersionBlock;
  1356. PVOID Unused3;
  1357. //
  1358. // End of the architecturally defined section of the PCR.
  1359. //
  1360. // end_nthal
  1361. //
  1362. ULONG PcrAlign1[24];
  1363. KPRCB Prcb;
  1364. // begin_nthal begin_ntddk begin_ntosp
  1365. } KPCR, *PKPCR;
  1366. // end_nthal end_ntddk end_ntosp
  1367. #if !defined (_X86AMD64_)
  1368. C_ASSERT(FIELD_OFFSET(KPCR, NtTib.ExceptionList) == FIELD_OFFSET(KPCR, GdtBase));
  1369. C_ASSERT(FIELD_OFFSET(KPCR, NtTib.StackBase) == FIELD_OFFSET(KPCR, TssBase));
  1370. C_ASSERT(FIELD_OFFSET(KPCR, NtTib.StackLimit) == FIELD_OFFSET(KPCR, PerfGlobalGroupMask));
  1371. C_ASSERT(FIELD_OFFSET(KPCR, NtTib.SubSystemTib) == FIELD_OFFSET(KPCR, Self));
  1372. C_ASSERT(FIELD_OFFSET(KPCR, NtTib.FiberData) == FIELD_OFFSET(KPCR, ContextSwitches));
  1373. C_ASSERT(FIELD_OFFSET(KPCR, NtTib.ArbitraryUserPointer) == FIELD_OFFSET(KPCR, SetMember));
  1374. C_ASSERT(FIELD_OFFSET(KPCR, NtTib.Self) == FIELD_OFFSET(KPCR, Used_Self));
  1375. C_ASSERT((FIELD_OFFSET(KPCR, Prcb) & (128 - 1)) == 0);
  1376. //
  1377. // The offset of the DebuggerDataBlock must not change.
  1378. //
  1379. C_ASSERT(FIELD_OFFSET(KPCR, KdVersionBlock) == 0x108);
  1380. #endif
  1381. __forceinline
  1382. ULONG
  1383. KeGetContextSwitches (
  1384. PKPRCB Prcb
  1385. )
  1386. {
  1387. PKPCR Pcr;
  1388. Pcr = CONTAINING_RECORD(Prcb, KPCR, Prcb);
  1389. return Pcr->ContextSwitches;
  1390. }
  1391. VOID
  1392. KeRestoreLegacyFloatingPointState (
  1393. PLEGACY_SAVE_AREA NpxFrame
  1394. );
  1395. VOID
  1396. KeSaveLegacyFloatingPointState (
  1397. PLEGACY_SAVE_AREA NpxFrame
  1398. );
  1399. // begin_nthal begin_ntosp
  1400. //
  1401. // Define legacy floating status word bit masks.
  1402. //
  1403. #define FSW_INVALID_OPERATION 0x1
  1404. #define FSW_DENORMAL 0x2
  1405. #define FSW_ZERO_DIVIDE 0x4
  1406. #define FSW_OVERFLOW 0x8
  1407. #define FSW_UNDERFLOW 0x10
  1408. #define FSW_PRECISION 0x20
  1409. #define FSW_STACK_FAULT 0x40
  1410. #define FSW_CONDITION_CODE_0 0x100
  1411. #define FSW_CONDITION_CODE_1 0x200
  1412. #define FSW_CONDITION_CODE_2 0x400
  1413. #define FSW_CONDITION_CODE_3 0x4000
  1414. #define FSW_ERROR_MASK (FSW_INVALID_OPERATION | FSW_DENORMAL | \
  1415. FSW_ZERO_DIVIDE | FSW_OVERFLOW | FSW_UNDERFLOW | \
  1416. FSW_PRECISION)
  1417. //
  1418. // Define legacy floating states.
  1419. //
  1420. #define LEGACY_STATE_UNUSED 0
  1421. #define LEGACY_STATE_SCRUB 1
  1422. #define LEGACY_STATE_SWITCH 2
  1423. //
  1424. // Define MxCsr floating control/status word bit masks.
  1425. //
  1426. // No flush to zero, round to nearest, and all exception masked.
  1427. //
  1428. #define XSW_INVALID_OPERATION 0x1
  1429. #define XSW_DENORMAL 0x2
  1430. #define XSW_ZERO_DIVIDE 0x4
  1431. #define XSW_OVERFLOW 0x8
  1432. #define XSW_UNDERFLOW 0x10
  1433. #define XSW_PRECISION 0x20
  1434. #define XSW_ERROR_MASK (XSW_INVALID_OPERATION | XSW_DENORMAL | \
  1435. XSW_ZERO_DIVIDE | XSW_OVERFLOW | XSW_UNDERFLOW | \
  1436. XSW_PRECISION)
  1437. #define XSW_ERROR_SHIFT 7
  1438. #define XCW_INVALID_OPERATION 0x80
  1439. #define XCW_DENORMAL 0x100
  1440. #define XCW_ZERO_DIVIDE 0x200
  1441. #define XCW_OVERFLOW 0x400
  1442. #define XCW_UNDERFLOW 0x800
  1443. #define XCW_PRECISION 0x1000
  1444. #define XCW_ROUND_CONTROL 0x6000
  1445. #define XCW_FLUSH_ZERO 0x8000
  1446. //
  1447. // Define EFLAG bit masks and shift offsets.
  1448. //
  1449. #define EFLAGS_CF_MASK 0x00000001 // carry flag
  1450. #define EFLAGS_PF_MASK 0x00000004 // parity flag
  1451. #define EFALGS_AF_MASK 0x00000010 // auxiliary carry flag
  1452. #define EFLAGS_ZF_MASK 0x00000040 // zero flag
  1453. #define EFLAGS_SF_MASK 0x00000080 // sign flag
  1454. #define EFLAGS_TF_MASK 0x00000100 // trap flag
  1455. #define EFLAGS_IF_MASK 0x00000200 // interrupt flag
  1456. #define EFLAGS_DF_MASK 0x00000400 // direction flag
  1457. #define EFLAGS_OF_MASK 0x00000800 // overflow flag
  1458. #define EFLAGS_IOPL_MASK 0x00003000 // I/O privilege level
  1459. #define EFLAGS_NT_MASK 0x00004000 // nested task
  1460. #define EFLAGS_RF_MASK 0x00010000 // resume flag
  1461. #define EFLAGS_VM_MASK 0x00020000 // virtual 8086 mode
  1462. #define EFLAGS_AC_MASK 0x00040000 // alignment check
  1463. #define EFLAGS_VIF_MASK 0x00080000 // virtual interrupt flag
  1464. #define EFLAGS_VIP_MASK 0x00100000 // virtual interrupt pending
  1465. #define EFLAGS_ID_MASK 0x00200000 // identification flag
  1466. #define EFLAGS_TF_SHIFT 8 // trap
  1467. #define EFLAGS_IF_SHIFT 9 // interrupt enable
  1468. // end_nthal
  1469. //
  1470. // Define sanitize EFLAGS macro.
  1471. //
  1472. // If kernel mode, then
  1473. // caller can specify Carry, Parity, AuxCarry, Zero, Sign, Trap,
  1474. // Interrupt, Direction, Overflow, and identification.
  1475. //
  1476. // If user mode, then
  1477. // caller can specify Carry, Parity, AuxCarry, Zero, Sign, Trap,
  1478. // Direction, Overflow, and force Interrupt on.
  1479. //
  1480. #define EFLAGS_KERNEL_SANITIZE 0x00210fd5L
  1481. #define EFLAGS_USER_SANITIZE 0x00010dd5L
  1482. #define SANITIZE_EFLAGS(eFlags, mode) ( \
  1483. ((mode) == KernelMode ? \
  1484. ((eFlags) & EFLAGS_KERNEL_SANITIZE) : \
  1485. (((eFlags) & EFLAGS_USER_SANITIZE) | EFLAGS_IF_MASK)))
  1486. //
  1487. // Define sanitize debug register macros.
  1488. //
  1489. // Define control register settable bits and active mask.
  1490. //
  1491. #define DR7_LEGAL 0xffff0155
  1492. #define DR7_ACTIVE 0x00000055
  1493. //
  1494. // Define macro to sanitize the debug control register.
  1495. //
  1496. #define SANITIZE_DR7(Dr7, mode) ((Dr7 & DR7_LEGAL));
  1497. //
  1498. // Define macro to santitize debug address registers.
  1499. //
  1500. #define SANITIZE_DRADDR(DrReg, mode) \
  1501. ((mode) == KernelMode ? \
  1502. (DrReg) : \
  1503. (((PVOID)(DrReg) <= MM_HIGHEST_USER_ADDRESS) ? (DrReg) : 0)) \
  1504. //
  1505. // Define macro to clear reserved bits from MXCSR.
  1506. //
  1507. #define SANITIZE_MXCSR(_mxcsr_) ((_mxcsr_) & 0xffbf)
  1508. //
  1509. // Define macro to clear reserved bits for legacy FP control word.
  1510. //
  1511. #define SANITIZE_FCW(_fcw_) ((_fcw_) & 0x1f37)
  1512. // begin_nthal begin_ntddk
  1513. //
  1514. // Exception frame
  1515. //
  1516. // This frame is established when handling an exception. It provides a place
  1517. // to save all nonvolatile registers. The volatile registers will already
  1518. // have been saved in a trap frame.
  1519. //
  1520. // N.B. The exception frame has a built in exception record capable of
  1521. // storing information for four parameter values. This exception
  1522. // record is used exclusively within the trap handling code.
  1523. //
  1524. #define EXCEPTION_AREA_SIZE 64
  1525. typedef struct _KEXCEPTION_FRAME {
  1526. //
  1527. // Home address for the parameter registers.
  1528. //
  1529. ULONG64 P1Home;
  1530. ULONG64 P2Home;
  1531. ULONG64 P3Home;
  1532. ULONG64 P4Home;
  1533. ULONG64 P5;
  1534. //
  1535. // Kernel callout initial stack value.
  1536. //
  1537. ULONG64 InitialStack;
  1538. //
  1539. // Saved nonvolatile floating registers.
  1540. //
  1541. M128 Xmm6;
  1542. M128 Xmm7;
  1543. M128 Xmm8;
  1544. M128 Xmm9;
  1545. M128 Xmm10;
  1546. M128 Xmm11;
  1547. M128 Xmm12;
  1548. M128 Xmm13;
  1549. M128 Xmm14;
  1550. M128 Xmm15;
  1551. //
  1552. // Kernel callout frame variables.
  1553. //
  1554. ULONG64 TrapFrame;
  1555. ULONG64 CallbackStack;
  1556. ULONG64 OutputBuffer;
  1557. ULONG64 OutputLength;
  1558. //
  1559. // Exception record for exceptions.
  1560. //
  1561. UCHAR ExceptionRecord[EXCEPTION_AREA_SIZE];
  1562. //
  1563. // Saved nonvolatile register - not always saved.
  1564. //
  1565. ULONG64 Fill1;
  1566. ULONG64 Rbp;
  1567. //
  1568. // Saved nonvolatile registers.
  1569. //
  1570. ULONG64 Rbx;
  1571. ULONG64 Rdi;
  1572. ULONG64 Rsi;
  1573. ULONG64 R12;
  1574. ULONG64 R13;
  1575. ULONG64 R14;
  1576. ULONG64 R15;
  1577. //
  1578. // EFLAGS and return address.
  1579. //
  1580. ULONG64 Return;
  1581. } KEXCEPTION_FRAME, *PKEXCEPTION_FRAME;
  1582. // end_ntddk
  1583. #define KEXCEPTION_FRAME_LENGTH sizeof(KEXCEPTION_FRAME)
  1584. C_ASSERT((sizeof(KEXCEPTION_FRAME) & STACK_ROUND) == 0);
  1585. #define EXCEPTION_RECORD_LENGTH \
  1586. ((sizeof(EXCEPTION_RECORD) + STACK_ROUND) & ~STACK_ROUND)
  1587. #if !defined(_X86AMD64_)
  1588. C_ASSERT(EXCEPTION_AREA_SIZE == (FIELD_OFFSET(EXCEPTION_RECORD, ExceptionInformation) + (4 * sizeof(ULONG_PTR))));
  1589. #endif
  1590. //
  1591. // Machine Frame
  1592. //
  1593. // This frame is established by code that trampolines to user mode (e.g. user
  1594. // APC, user callback, dispatch user exception, etc.). The purpose of this
  1595. // frame is to allow unwinding through these callbacks if an exception occurs.
  1596. //
  1597. // N.B. This frame is identical to the frame that is pushed for a trap without
  1598. // an error code and is identical to the hardware part of a trap frame.
  1599. //
  1600. typedef struct _MACHINE_FRAME {
  1601. ULONG64 Rip;
  1602. USHORT SegCs;
  1603. USHORT Fill1[3];
  1604. ULONG EFlags;
  1605. ULONG Fill2;
  1606. ULONG64 Rsp;
  1607. USHORT SegSs;
  1608. USHORT Fill3[3];
  1609. } MACHINE_FRAME, *PMACHINE_FRAME;
  1610. #define MACHINE_FRAME_LENGTH sizeof(MACHINE_FRAME)
  1611. C_ASSERT((sizeof(MACHINE_FRAME) & STACK_ROUND) == 8);
  1612. //
  1613. // Switch Frame
  1614. //
  1615. // This frame is established by the code that switches context from one
  1616. // thread to the next and is used by the thread initialization code to
  1617. // construct a stack that will start the execution of a thread in the
  1618. // thread start up code.
  1619. //
  1620. typedef struct _KSWITCH_FRAME {
  1621. ULONG64 P1Home;
  1622. ULONG64 P2Home;
  1623. ULONG64 P3Home;
  1624. ULONG64 P4Home;
  1625. ULONG64 P5Home;
  1626. ULONG MxCsr;
  1627. KIRQL ApcBypass;
  1628. UCHAR Fill1[3];
  1629. ULONG64 Rbp;
  1630. ULONG64 Return;
  1631. } KSWITCH_FRAME, *PKSWITCH_FRAME;
  1632. #define KSWITCH_FRAME_LENGTH sizeof(KSWITCH_FRAME)
  1633. C_ASSERT((sizeof(KSWITCH_FRAME) & STACK_ROUND) == 0);
  1634. //
  1635. // Start system thread frame.
  1636. //
  1637. // This frame is established by the AMD64 specific thread initialization
  1638. // code. It is used to store the initial context for starting a system
  1639. // thread.
  1640. //
  1641. typedef struct _KSTART_FRAME {
  1642. ULONG64 P1Home;
  1643. ULONG64 P2Home;
  1644. ULONG64 P3Home;
  1645. ULONG64 Return;
  1646. } KSTART_FRAME, *PKSTART_FRAME;
  1647. #define KSTART_FRAME_LENGTH sizeof(KSTART_FRAME)
  1648. C_ASSERT((sizeof(KSTART_FRAME) & STACK_ROUND) == 0);
  1649. // begin_ntddk
  1650. //
  1651. // Trap frame
  1652. //
  1653. // This frame is established when handling a trap. It provides a place to
  1654. // save all volatile registers. The nonvolatile registers are saved in an
  1655. // exception frame or through the normal C calling conventions for saved
  1656. // registers.
  1657. //
  1658. typedef struct _KTRAP_FRAME {
  1659. //
  1660. // Home address for the parameter registers.
  1661. //
  1662. ULONG64 P1Home;
  1663. ULONG64 P2Home;
  1664. ULONG64 P3Home;
  1665. ULONG64 P4Home;
  1666. ULONG64 P5;
  1667. //
  1668. // Previous processor mode (system services only) and previous IRQL
  1669. // (interrupts only).
  1670. //
  1671. KPROCESSOR_MODE PreviousMode;
  1672. KIRQL PreviousIrql;
  1673. //
  1674. // Page fault load/store indicator.
  1675. //
  1676. UCHAR FaultIndicator;
  1677. UCHAR Fill0;
  1678. //
  1679. // Floating point state.
  1680. //
  1681. ULONG MxCsr;
  1682. //
  1683. // Volatile registers.
  1684. //
  1685. // N.B. These registers are only saved on exceptions and interrupts. They
  1686. // are not saved for system calls.
  1687. //
  1688. ULONG64 Rax;
  1689. ULONG64 Rcx;
  1690. ULONG64 Rdx;
  1691. ULONG64 R8;
  1692. ULONG64 R9;
  1693. ULONG64 R10;
  1694. ULONG64 R11;
  1695. ULONG64 Spare0;
  1696. //
  1697. // Volatile floating registers.
  1698. //
  1699. // N.B. These registers are only saved on exceptions and interrupts. They
  1700. // are not saved for system calls.
  1701. //
  1702. M128 Xmm0;
  1703. M128 Xmm1;
  1704. M128 Xmm2;
  1705. M128 Xmm3;
  1706. M128 Xmm4;
  1707. M128 Xmm5;
  1708. //
  1709. // Page fault address.
  1710. //
  1711. ULONG64 FaultAddress;
  1712. //
  1713. // Debug registers.
  1714. //
  1715. ULONG64 Dr0;
  1716. ULONG64 Dr1;
  1717. ULONG64 Dr2;
  1718. ULONG64 Dr3;
  1719. ULONG64 Dr6;
  1720. ULONG64 Dr7;
  1721. //
  1722. // Special debug registers.
  1723. //
  1724. ULONG64 DebugControl;
  1725. ULONG64 LastBranchToRip;
  1726. ULONG64 LastBranchFromRip;
  1727. ULONG64 LastExceptionToRip;
  1728. ULONG64 LastExceptionFromRip;
  1729. //
  1730. // Segment registers
  1731. //
  1732. USHORT SegDs;
  1733. USHORT SegEs;
  1734. USHORT SegFs;
  1735. USHORT SegGs;
  1736. //
  1737. // Previous trap frame address.
  1738. //
  1739. ULONG64 TrapFrame;
  1740. //
  1741. // Saved nonvolatile registers RBX, RDI and RSI. These registers are only
  1742. // saved in system service trap frames.
  1743. //
  1744. ULONG64 Rbx;
  1745. ULONG64 Rdi;
  1746. ULONG64 Rsi;
  1747. //
  1748. // Saved nonvolatile register RBP. This register is used as a frame
  1749. // pointer during trap processing and is saved in all trap frames.
  1750. //
  1751. ULONG64 Rbp;
  1752. //
  1753. // Information pushed by hardware.
  1754. //
  1755. // N.B. The error code is not always pushed by hardware. For those cases
  1756. // where it is not pushed by hardware a dummy error code is allocated
  1757. // on the stack.
  1758. //
  1759. ULONG64 ErrorCode;
  1760. ULONG64 Rip;
  1761. USHORT SegCs;
  1762. USHORT Fill1[3];
  1763. ULONG EFlags;
  1764. ULONG Fill2;
  1765. ULONG64 Rsp;
  1766. USHORT SegSs;
  1767. USHORT Fill3[3];
  1768. } KTRAP_FRAME, *PKTRAP_FRAME;
  1769. // end_ntddk
  1770. #define KTRAP_FRAME_LENGTH sizeof(KTRAP_FRAME)
  1771. C_ASSERT((sizeof(KTRAP_FRAME) & STACK_ROUND) == 0);
  1772. //
  1773. // IPI, profile, update run time, and update system time interrupt routines.
  1774. //
  1775. NTKERNELAPI
  1776. VOID
  1777. KeIpiInterrupt (
  1778. IN PKTRAP_FRAME TrapFrame
  1779. );
  1780. NTKERNELAPI
  1781. VOID
  1782. KeProfileInterruptWithSource (
  1783. IN PKTRAP_FRAME TrapFrame,
  1784. IN KPROFILE_SOURCE ProfileSource
  1785. );
  1786. NTKERNELAPI
  1787. VOID
  1788. KeUpdateRunTime (
  1789. IN PKTRAP_FRAME TrapFrame
  1790. );
  1791. NTKERNELAPI
  1792. VOID
  1793. KeUpdateSystemTime (
  1794. IN PKTRAP_FRAME TrapFrame,
  1795. IN ULONG64 Increment
  1796. );
  1797. // end_nthal
  1798. //
  1799. // The frame saved by the call out to user mode code is defined here to allow
  1800. // the kernel debugger to trace the entire kernel stack when user mode callouts
  1801. // are active.
  1802. //
  1803. // N.B. The kernel callout frame is the same as an exception frame.
  1804. //
  1805. typedef KEXCEPTION_FRAME KCALLOUT_FRAME;
  1806. typedef PKEXCEPTION_FRAME PKCALLOUT_FRAME;
  1807. typedef struct _UCALLOUT_FRAME {
  1808. ULONG64 P1Home;
  1809. ULONG64 P2Home;
  1810. ULONG64 P3Home;
  1811. ULONG64 P4Home;
  1812. PVOID Buffer;
  1813. ULONG Length;
  1814. ULONG ApiNumber;
  1815. MACHINE_FRAME MachineFrame;
  1816. } UCALLOUT_FRAME, *PUCALLOUT_FRAME;
  1817. #define UCALLOUT_FRAME_LENGTH sizeof(UCALLOUT_FRAME)
  1818. C_ASSERT((sizeof(UCALLOUT_FRAME) & STACK_ROUND) == 8);
  1819. // begin_ntddk begin_wdm
  1820. //
  1821. // The nonvolatile floating state
  1822. //
  1823. typedef struct _KFLOATING_SAVE {
  1824. ULONG MxCsr;
  1825. } KFLOATING_SAVE, *PKFLOATING_SAVE;
  1826. // end_ntddk end_wdm end_ntosp
  1827. //
  1828. // Define profile values.
  1829. //
  1830. #define DEFAULT_PROFILE_INTERVAL 39063
  1831. //
  1832. // The minimum acceptable profiling interval is set to 1221 which is the
  1833. // fast RTC clock rate we can get. If this
  1834. // value is too small, the system will run very slowly.
  1835. //
  1836. #define MINIMUM_PROFILE_INTERVAL 1221
  1837. // begin_ntddk begin_wdm begin_nthal begin_ntndis begin_ntosp
  1838. //
  1839. // AMD64 Specific portions of mm component.
  1840. //
  1841. // Define the page size for the AMD64 as 4096 (0x1000).
  1842. //
  1843. #define PAGE_SIZE 0x1000
  1844. //
  1845. // Define the number of trailing zeroes in a page aligned virtual address.
  1846. // This is used as the shift count when shifting virtual addresses to
  1847. // virtual page numbers.
  1848. //
  1849. #define PAGE_SHIFT 12L
  1850. // end_ntndis end_wdm
  1851. #define PXE_BASE 0xFFFFF6FB7DBED000UI64
  1852. #define PXE_SELFMAP 0xFFFFF6FB7DBEDF68UI64
  1853. #define PPE_BASE 0xFFFFF6FB7DA00000UI64
  1854. #define PDE_BASE 0xFFFFF6FB40000000UI64
  1855. #define PTE_BASE 0xFFFFF68000000000UI64
  1856. #define PXE_TOP 0xFFFFF6FB7DBEDFFFUI64
  1857. #define PPE_TOP 0xFFFFF6FB7DBFFFFFUI64
  1858. #define PDE_TOP 0xFFFFF6FB7FFFFFFFUI64
  1859. #define PTE_TOP 0xFFFFF6FFFFFFFFFFUI64
  1860. #define PDE_KTBASE_AMD64 PPE_BASE
  1861. #define PTI_SHIFT 12
  1862. #define PDI_SHIFT 21
  1863. #define PPI_SHIFT 30
  1864. #define PXI_SHIFT 39
  1865. #define PTE_PER_PAGE 512
  1866. #define PDE_PER_PAGE 512
  1867. #define PPE_PER_PAGE 512
  1868. #define PXE_PER_PAGE 512
  1869. #define PTI_MASK_AMD64 (PTE_PER_PAGE - 1)
  1870. #define PDI_MASK_AMD64 (PDE_PER_PAGE - 1)
  1871. #define PPI_MASK (PPE_PER_PAGE - 1)
  1872. #define PXI_MASK (PXE_PER_PAGE - 1)
  1873. //
  1874. // Define the highest user address and user probe address.
  1875. //
  1876. // end_ntddk end_nthal end_ntosp
  1877. #if defined(_NTDRIVER_) || defined(_NTDDK_) || defined(_NTIFS_) || defined(_NTHAL_)
  1878. // begin_ntddk begin_nthal begin_ntosp
  1879. extern PVOID *MmHighestUserAddress;
  1880. extern PVOID *MmSystemRangeStart;
  1881. extern ULONG64 *MmUserProbeAddress;
  1882. #define MM_HIGHEST_USER_ADDRESS *MmHighestUserAddress
  1883. #define MM_SYSTEM_RANGE_START *MmSystemRangeStart
  1884. #define MM_USER_PROBE_ADDRESS *MmUserProbeAddress
  1885. // end_ntddk end_nthal end_ntosp
  1886. #else
  1887. extern PVOID MmHighestUserAddress;
  1888. extern PVOID MmSystemRangeStart;
  1889. extern ULONG64 MmUserProbeAddress;
  1890. #define MM_HIGHEST_USER_ADDRESS MmHighestUserAddress
  1891. #define MM_SYSTEM_RANGE_START MmSystemRangeStart
  1892. #define MM_USER_PROBE_ADDRESS MmUserProbeAddress
  1893. #define MI_HIGHEST_USER_ADDRESS (PVOID) (ULONG_PTR)((0x80000000000 - 0x10000 - 1)) // highest user address
  1894. #define MI_SYSTEM_RANGE_START (PVOID)(0xFFFF080000000000) // start of system space
  1895. #define MI_USER_PROBE_ADDRESS ((ULONG_PTR)(0x80000000000UI64 - 0x10000)) // starting address of guard page
  1896. #endif
  1897. // begin_nthal
  1898. //
  1899. // 4MB at the top of VA space is reserved for the HAL's use.
  1900. //
  1901. #define HAL_VA_START 0xFFFFFFFFFFC00000UI64
  1902. #define HAL_VA_SIZE (4 * 1024 * 1024)
  1903. // end_nthal
  1904. // begin_ntddk begin_nthal begin_ntosp
  1905. //
  1906. // The lowest user address reserves the low 64k.
  1907. //
  1908. #define MM_LOWEST_USER_ADDRESS (PVOID)0x10000
  1909. //
  1910. // The lowest address for system space.
  1911. //
  1912. #define MM_LOWEST_SYSTEM_ADDRESS (PVOID)0xFFFF080000000000
  1913. // begin_wdm
  1914. #define MmGetProcedureAddress(Address) (Address)
  1915. #define MmLockPagableCodeSection(Address) MmLockPagableDataSection(Address)
  1916. // end_ntddk end_wdm end_ntosp
  1917. //
  1918. // Define virtual base and alternate virtual base of kernel.
  1919. //
  1920. #define KSEG0_BASE 0xFFFFF80000000000UI64
  1921. //
  1922. // Generate kernel segment physical address.
  1923. //
  1924. #define KSEG_ADDRESS(PAGE) ((PVOID)(KSEG0_BASE | ((ULONG_PTR)(PAGE) << PAGE_SHIFT)))
  1925. // begin_ntddk begin_ntosp
  1926. //
  1927. // Intrinsic functions
  1928. //
  1929. // begin_wdm
  1930. #if defined(_M_AMD64) && !defined(RC_INVOKED) && !defined(MIDL_PASS)
  1931. // end_wdm
  1932. //
  1933. // The following routines are provided for backward compatibility with old
  1934. // code. They are no longer the preferred way to accomplish these functions.
  1935. //
  1936. #if PRAGMA_DEPRECATED_DDK
  1937. #pragma deprecated(ExInterlockedIncrementLong) // Use InterlockedIncrement
  1938. #pragma deprecated(ExInterlockedDecrementLong) // Use InterlockedDecrement
  1939. #pragma deprecated(ExInterlockedExchangeUlong) // Use InterlockedExchange
  1940. #endif
  1941. #define RESULT_ZERO 0
  1942. #define RESULT_NEGATIVE 1
  1943. #define RESULT_POSITIVE 2
  1944. typedef enum _INTERLOCKED_RESULT {
  1945. ResultNegative = RESULT_NEGATIVE,
  1946. ResultZero = RESULT_ZERO,
  1947. ResultPositive = RESULT_POSITIVE
  1948. } INTERLOCKED_RESULT;
  1949. #define ExInterlockedDecrementLong(Addend, Lock) \
  1950. _ExInterlockedDecrementLong(Addend)
  1951. __forceinline
  1952. LONG
  1953. _ExInterlockedDecrementLong (
  1954. IN OUT PLONG Addend
  1955. )
  1956. {
  1957. LONG Result;
  1958. Result = InterlockedDecrement(Addend);
  1959. if (Result < 0) {
  1960. return ResultNegative;
  1961. } else if (Result > 0) {
  1962. return ResultPositive;
  1963. } else {
  1964. return ResultZero;
  1965. }
  1966. }
  1967. #define ExInterlockedIncrementLong(Addend, Lock) \
  1968. _ExInterlockedIncrementLong(Addend)
  1969. __forceinline
  1970. LONG
  1971. _ExInterlockedIncrementLong (
  1972. IN OUT PLONG Addend
  1973. )
  1974. {
  1975. LONG Result;
  1976. Result = InterlockedIncrement(Addend);
  1977. if (Result < 0) {
  1978. return ResultNegative;
  1979. } else if (Result > 0) {
  1980. return ResultPositive;
  1981. } else {
  1982. return ResultZero;
  1983. }
  1984. }
  1985. #define ExInterlockedExchangeUlong(Target, Value, Lock) \
  1986. _ExInterlockedExchangeUlong(Target, Value)
  1987. __forceinline
  1988. _ExInterlockedExchangeUlong (
  1989. IN OUT PULONG Target,
  1990. IN ULONG Value
  1991. )
  1992. {
  1993. return (ULONG)InterlockedExchange((PLONG)Target, (LONG)Value);
  1994. }
  1995. // begin_wdm
  1996. #endif // defined(_M_AMD64) && !defined(RC_INVOKED) && !defined(MIDL_PASS)
  1997. // end_wdm end_ntddk end_nthal end_ntosp
  1998. // begin_ntosp begin_nthal begin_ntddk begin_wdm
  1999. #if !defined(MIDL_PASS) && defined(_M_AMD64)
  2000. //
  2001. // AMD646 function prototype definitions
  2002. //
  2003. // end_wdm
  2004. // end_ntddk end_ntosp
  2005. //
  2006. // Get address of current processor block.
  2007. //
  2008. __forceinline
  2009. PKPCR
  2010. KeGetPcr (
  2011. VOID
  2012. )
  2013. {
  2014. return (PKPCR)__readgsqword(FIELD_OFFSET(KPCR, Self));
  2015. }
  2016. // begin_ntosp
  2017. //
  2018. // Get address of current processor block.
  2019. //
  2020. __forceinline
  2021. PKPRCB
  2022. KeGetCurrentPrcb (
  2023. VOID
  2024. )
  2025. {
  2026. return (PKPRCB)__readgsqword(FIELD_OFFSET(KPCR, CurrentPrcb));
  2027. }
  2028. // begin_ntddk
  2029. //
  2030. // Get the current processor number
  2031. //
  2032. __forceinline
  2033. ULONG
  2034. KeGetCurrentProcessorNumber (
  2035. VOID
  2036. )
  2037. {
  2038. return (ULONG)__readgsbyte(FIELD_OFFSET(KPCR, Number));
  2039. }
  2040. // end_nthal end_ntddk end_ntosp
  2041. //
  2042. // Get address of current kernel thread object.
  2043. //
  2044. // WARNING: This inline macro can not be used for device drivers or HALs
  2045. // they must call the kernel function KeGetCurrentThread.
  2046. //
  2047. __forceinline
  2048. struct _KTHREAD *
  2049. KeGetCurrentThread (
  2050. VOID
  2051. )
  2052. {
  2053. return (struct _KTHREAD *)__readgsqword(FIELD_OFFSET(KPCR, Prcb.CurrentThread));
  2054. }
  2055. //
  2056. // If processor executing a DPC.
  2057. //
  2058. // WARNING: This inline macro is always MP enabled because filesystems
  2059. // utilize it
  2060. //
  2061. __forceinline
  2062. ULONG
  2063. KeIsExecutingDpc (
  2064. VOID
  2065. )
  2066. {
  2067. return (__readgsword(FIELD_OFFSET(KPCR, Prcb.DpcRoutineActive)) != 0);
  2068. }
  2069. // begin_nthal begin_ntddk begin_ntosp
  2070. // begin_wdm
  2071. #endif // !defined(MIDL_PASS) && defined(_M_AMD64)
  2072. // end_nthal end_ntddk end_wdm end_ntosp
  2073. // begin_ntddk begin_nthal begin_ntndis begin_wdm begin_ntosp
  2074. //++
  2075. //
  2076. //
  2077. // VOID
  2078. // KeMemoryBarrier (
  2079. // VOID
  2080. // )
  2081. //
  2082. // VOID
  2083. // KeMemoryBarrierWithoutFence (
  2084. // VOID
  2085. // )
  2086. //
  2087. //
  2088. // Routine Description:
  2089. //
  2090. // These functions order memory accesses as seen by other processors.
  2091. //
  2092. // Arguments:
  2093. //
  2094. // None.
  2095. //
  2096. // Return Value:
  2097. //
  2098. // None.
  2099. //
  2100. //--
  2101. #if !defined(_CROSS_PLATFORM_)
  2102. #ifdef __cplusplus
  2103. extern "C" {
  2104. #endif
  2105. VOID
  2106. _ReadWriteBarrier (
  2107. VOID
  2108. );
  2109. #pragma intrinsic(_ReadWriteBarrier)
  2110. #ifdef __cplusplus
  2111. }
  2112. #endif
  2113. #define KeMemoryBarrier() _ReadWriteBarrier()
  2114. #define KeMemoryBarrierWithoutFence() _ReadWriteBarrier()
  2115. #else
  2116. #define KeMemoryBarrier()
  2117. #define KeMemoryBarrierWithoutFence()
  2118. #endif
  2119. // end_ntddk end_nthal end_ntndis end_wdm end_ntosp
  2120. // begin_nthal
  2121. //
  2122. // Define inline functions to get and set the handler address in and IDT
  2123. // entry.
  2124. //
  2125. typedef union _KIDT_HANDLER_ADDRESS {
  2126. struct {
  2127. USHORT OffsetLow;
  2128. USHORT OffsetMiddle;
  2129. ULONG OffsetHigh;
  2130. };
  2131. ULONG64 Address;
  2132. } KIDT_HANDLER_ADDRESS, *PKIDT_HANDLER_ADDRESS;
  2133. #define KiGetIdtFromVector(Vector) \
  2134. &KeGetPcr()->IdtBase[HalVectorToIDTEntry(Vector)]
  2135. #define KeGetIdtHandlerAddress(Vector,Addr) { \
  2136. KIDT_HANDLER_ADDRESS Handler; \
  2137. PKIDTENTRY64 Idt; \
  2138. \
  2139. Idt = KiGetIdtFromVector(Vector); \
  2140. Handler.OffsetLow = Idt->OffsetLow; \
  2141. Handler.OffsetMiddle = Idt->OffsetMiddle; \
  2142. Handler.OffsetHigh = Idt->OffsetHigh; \
  2143. *(Addr) = (PVOID)(Handler.Address); \
  2144. }
  2145. #define KeSetIdtHandlerAddress(Vector,Addr) { \
  2146. KIDT_HANDLER_ADDRESS Handler; \
  2147. PKIDTENTRY64 Idt; \
  2148. \
  2149. Idt = KiGetIdtFromVector(Vector); \
  2150. Handler.Address = (ULONG64)(Addr); \
  2151. Idt->OffsetLow = Handler.OffsetLow; \
  2152. Idt->OffsetMiddle = Handler.OffsetMiddle; \
  2153. Idt->OffsetHigh = Handler.OffsetHigh; \
  2154. }
  2155. // end_nthal
  2156. //++
  2157. //
  2158. // BOOLEAN
  2159. // KiIsThreadNumericStateSaved(
  2160. // IN PKTHREAD Address
  2161. // )
  2162. //
  2163. //--
  2164. #define KiIsThreadNumericStateSaved(a) TRUE
  2165. //++
  2166. //
  2167. // VOID
  2168. // KiRundownThread(
  2169. // IN PKTHREAD Address
  2170. // )
  2171. //
  2172. //--
  2173. #define KiRundownThread(a)
  2174. //
  2175. // functions specific to structure
  2176. //
  2177. VOID
  2178. KiSetIRR (
  2179. IN ULONG SWInterruptMask
  2180. );
  2181. // begin_ntddk begin_wdm begin_ntosp
  2182. NTKERNELAPI
  2183. NTSTATUS
  2184. KeSaveFloatingPointState (
  2185. OUT PKFLOATING_SAVE SaveArea
  2186. );
  2187. NTKERNELAPI
  2188. NTSTATUS
  2189. KeRestoreFloatingPointState (
  2190. IN PKFLOATING_SAVE SaveArea
  2191. );
  2192. // end_ntddk end_wdm end_ntosp
  2193. // begin_nthal begin_ntddk begin_wdm begin_ntndis begin_ntosp
  2194. #endif // defined(_AMD64_)
  2195. // end_nthal end_ntddk end_wdm end_ntndis end_ntosp
  2196. //
  2197. // Architecture specific kernel functions.
  2198. //
  2199. // begin_ntosp
  2200. //
  2201. // Platform specific kernel fucntions to raise and lower IRQL.
  2202. //
  2203. // These functions are imported for ntddk, ntifs, and wdm. They are
  2204. // inlined for nthal, ntosp, and the system.
  2205. //
  2206. #if defined(_NTDRIVER_) || defined(_NTDDK_) || defined(_NTIFS_) || defined(_WDMDDK_)
  2207. // begin_ntddk begin_wdm
  2208. #if defined(_AMD64_)
  2209. NTKERNELAPI
  2210. KIRQL
  2211. KeGetCurrentIrql (
  2212. VOID
  2213. );
  2214. NTKERNELAPI
  2215. VOID
  2216. KeLowerIrql (
  2217. IN KIRQL NewIrql
  2218. );
  2219. #define KeRaiseIrql(a,b) *(b) = KfRaiseIrql(a)
  2220. NTKERNELAPI
  2221. KIRQL
  2222. KfRaiseIrql (
  2223. IN KIRQL NewIrql
  2224. );
  2225. // end_wdm
  2226. NTKERNELAPI
  2227. KIRQL
  2228. KeRaiseIrqlToDpcLevel (
  2229. VOID
  2230. );
  2231. NTKERNELAPI
  2232. KIRQL
  2233. KeRaiseIrqlToSynchLevel (
  2234. VOID
  2235. );
  2236. // begin_wdm
  2237. #endif // defined(_AMD64_)
  2238. // end_ntddk end_wdm
  2239. #else
  2240. // begin_nthal
  2241. #if defined(_AMD64_) && !defined(MIDL_PASS)
  2242. __forceinline
  2243. KIRQL
  2244. KeGetCurrentIrql (
  2245. VOID
  2246. )
  2247. /*++
  2248. Routine Description:
  2249. This function return the current IRQL.
  2250. Arguments:
  2251. None.
  2252. Return Value:
  2253. The current IRQL is returned as the function value.
  2254. --*/
  2255. {
  2256. return (KIRQL)ReadCR8();
  2257. }
  2258. __forceinline
  2259. VOID
  2260. KeLowerIrql (
  2261. IN KIRQL NewIrql
  2262. )
  2263. /*++
  2264. Routine Description:
  2265. This function lowers the IRQL to the specified value.
  2266. Arguments:
  2267. NewIrql - Supplies the new IRQL value.
  2268. Return Value:
  2269. None.
  2270. --*/
  2271. {
  2272. ASSERT(KeGetCurrentIrql() >= NewIrql);
  2273. WriteCR8(NewIrql);
  2274. return;
  2275. }
  2276. #define KeRaiseIrql(a,b) *(b) = KfRaiseIrql(a)
  2277. __forceinline
  2278. KIRQL
  2279. KfRaiseIrql (
  2280. IN KIRQL NewIrql
  2281. )
  2282. /*++
  2283. Routine Description:
  2284. This function raises the current IRQL to the specified value and returns
  2285. the previous IRQL.
  2286. Arguments:
  2287. NewIrql (cl) - Supplies the new IRQL value.
  2288. Return Value:
  2289. The previous IRQL is retured as the function value.
  2290. --*/
  2291. {
  2292. KIRQL OldIrql;
  2293. OldIrql = KeGetCurrentIrql();
  2294. ASSERT(OldIrql <= NewIrql);
  2295. WriteCR8(NewIrql);
  2296. return OldIrql;
  2297. }
  2298. __forceinline
  2299. KIRQL
  2300. KeRaiseIrqlToDpcLevel (
  2301. VOID
  2302. )
  2303. /*++
  2304. Routine Description:
  2305. This function raises the current IRQL to DPC_LEVEL and returns the
  2306. previous IRQL.
  2307. Arguments:
  2308. None.
  2309. Return Value:
  2310. The previous IRQL is retured as the function value.
  2311. --*/
  2312. {
  2313. KIRQL OldIrql;
  2314. OldIrql = KeGetCurrentIrql();
  2315. ASSERT(OldIrql <= DISPATCH_LEVEL);
  2316. WriteCR8(DISPATCH_LEVEL);
  2317. return OldIrql;
  2318. }
  2319. __forceinline
  2320. KIRQL
  2321. KeRaiseIrqlToSynchLevel (
  2322. VOID
  2323. )
  2324. /*++
  2325. Routine Description:
  2326. This function raises the current IRQL to SYNCH_LEVEL and returns the
  2327. previous IRQL.
  2328. Arguments:
  2329. Return Value:
  2330. The previous IRQL is retured as the function value.
  2331. --*/
  2332. {
  2333. KIRQL OldIrql;
  2334. OldIrql = KeGetCurrentIrql();
  2335. ASSERT(OldIrql <= SYNCH_LEVEL);
  2336. WriteCR8(SYNCH_LEVEL);
  2337. return OldIrql;
  2338. }
  2339. #endif // defined(_AMD64_) && !defined(MIDL_PASS)
  2340. // end_nthal
  2341. #endif // defined(_NTDRIVER_) || defined(_NTDDK_) || defined(_NTIFS_) || defined(_WDMDDK_)
  2342. // end_ntosp
  2343. //
  2344. // misc routines
  2345. //
  2346. VOID
  2347. KeOptimizeProcessorControlState (
  2348. VOID
  2349. );
  2350. // begin_nthal
  2351. #if defined(_AMD64_)
  2352. //
  2353. // Structure to aid in booting secondary processors
  2354. //
  2355. #pragma pack(push,2)
  2356. typedef struct _FAR_JMP_16 {
  2357. UCHAR OpCode; // = 0xe9
  2358. USHORT Offset;
  2359. } FAR_JMP_16;
  2360. typedef struct _FAR_TARGET_32 {
  2361. ULONG Offset;
  2362. USHORT Selector;
  2363. } FAR_TARGET_32;
  2364. typedef struct _PSEUDO_DESCRIPTOR_32 {
  2365. USHORT Limit;
  2366. ULONG Base;
  2367. } PSEUDO_DESCRIPTOR_32;
  2368. #pragma pack(pop)
  2369. #define PSB_GDT32_NULL 0 * 16
  2370. #define PSB_GDT32_CODE64 1 * 16
  2371. #define PSB_GDT32_DATA32 2 * 16
  2372. #define PSB_GDT32_CODE32 3 * 16
  2373. #define PSB_GDT32_MAX 3
  2374. typedef struct _PROCESSOR_START_BLOCK *PPROCESSOR_START_BLOCK;
  2375. typedef struct _PROCESSOR_START_BLOCK {
  2376. //
  2377. // The block starts with a jmp instruction to the end of the block
  2378. //
  2379. FAR_JMP_16 Jmp;
  2380. //
  2381. // Completion flag is set to non-zero when the target processor has
  2382. // started
  2383. //
  2384. ULONG CompletionFlag;
  2385. //
  2386. // Pseudo descriptors for GDT and IDT.
  2387. //
  2388. PSEUDO_DESCRIPTOR_32 Gdt32;
  2389. PSEUDO_DESCRIPTOR_32 Idt32;
  2390. //
  2391. // The temporary 32-bit GDT itself resides here.
  2392. //
  2393. KGDTENTRY64 Gdt[PSB_GDT32_MAX + 1];
  2394. //
  2395. // Physical address of the 64-bit top-level identity-mapped page table.
  2396. //
  2397. ULONG64 TiledCr3;
  2398. //
  2399. // Far jump target from Rm to Pm code
  2400. //
  2401. FAR_TARGET_32 PmTarget;
  2402. //
  2403. // Far jump target from Pm to Lm code
  2404. //
  2405. FAR_TARGET_32 LmIdentityTarget;
  2406. //
  2407. // Address of LmTarget
  2408. //
  2409. PVOID LmTarget;
  2410. //
  2411. // Linear address of this structure
  2412. //
  2413. PPROCESSOR_START_BLOCK SelfMap;
  2414. //
  2415. // Contents of the PAT msr
  2416. //
  2417. ULONG64 MsrPat;
  2418. //
  2419. // Initial processor state for the processor to be started
  2420. //
  2421. KPROCESSOR_STATE ProcessorState;
  2422. } PROCESSOR_START_BLOCK;
  2423. //
  2424. // AMD64 functions for special instructions
  2425. //
  2426. typedef struct _CPU_INFO {
  2427. ULONG Eax;
  2428. ULONG Ebx;
  2429. ULONG Ecx;
  2430. ULONG Edx;
  2431. } CPU_INFO, *PCPU_INFO;
  2432. VOID
  2433. KiCpuId (
  2434. ULONG Function,
  2435. PCPU_INFO CpuInfo
  2436. );
  2437. //
  2438. // Define read/write MSR functions and register definitions.
  2439. //
  2440. #define MSR_TSC 0x10 // time stamp counter
  2441. #define MSR_PAT 0x277 // page attributes table
  2442. #define MSR_MCG_CAP 0x179 // machine check capabilities
  2443. #define MSR_MCG_STATUS 0x17a // machine check status
  2444. #define MSR_MCG_CTL 0x17b // machine check control
  2445. #define MSR_MC0_CTL 0x400 // machine check control, status,
  2446. #define MSR_MC0_STATUS 0x401 // address, and miscellaneous
  2447. #define MSR_MC0_ADDR 0x402 // registers for machine check
  2448. #define MSR_MC0_MISC 0x403 // sources
  2449. #define MSR_EFER 0xc0000080 // extended function enable register
  2450. #define MSR_STAR 0xc0000081 // system call selectors
  2451. #define MSR_LSTAR 0xc0000082 // system call 64-bit entry
  2452. #define MSR_CSTAR 0xc0000083 // system call 32-bit entry
  2453. #define MSR_SYSCALL_MASK 0xc0000084 // system call flags mask
  2454. #define MSR_FS_BASE 0xc0000100 // fs long mode base address register
  2455. #define MSR_GS_BASE 0xc0000101 // gs long mode base address register
  2456. #define MSR_GS_SWAP 0xc0000102 // gs long mode swap GS base register
  2457. #define MSR_PERF_EVT_SEL0 0xc0010000 // performance event select registers
  2458. #define MSR_PERF_EVT_SEL1 0xc0010001 //
  2459. #define MSR_PERF_EVT_SEL2 0xc0010002 //
  2460. #define MSR_PERF_EVT_SEL3 0xc0010003 //
  2461. #define MSR_PERF_CTR0 0xc0010004 // performance counter registers
  2462. #define MSR_PERF_CTR1 0xc0010005 //
  2463. #define MSR_PERF_CTR2 0xc0010006 //
  2464. #define MSR_PERF_CTR3 0xc0010007 //
  2465. //
  2466. // Flags within MSR_EFER
  2467. //
  2468. #define MSR_SCE 0x00000001 // system call enable
  2469. #define MSR_LME 0x00000100 // long mode enable
  2470. #define MSR_LMA 0x00000400 // long mode active
  2471. #define MSR_NXE 0x00000800 // no execute enable
  2472. //
  2473. // Page attributes table.
  2474. //
  2475. #define PAT_TYPE_STRONG_UC 0 // uncacheable/strongly ordered
  2476. #define PAT_TYPE_USWC 1 // write combining/weakly ordered
  2477. #define PAT_TYPE_WT 4 // write through
  2478. #define PAT_TYPE_WP 5 // write protected
  2479. #define PAT_TYPE_WB 6 // write back
  2480. #define PAT_TYPE_WEAK_UC 7 // uncacheable/weakly ordered
  2481. //
  2482. // Page attributes table structure.
  2483. //
  2484. typedef union _PAT_ATTRIBUTES {
  2485. struct {
  2486. UCHAR Pat[8];
  2487. } hw;
  2488. ULONG64 QuadPart;
  2489. } PAT_ATTRIBUTES, *PPAT_ATTRIBUTES;
  2490. #define ReadMSR(Msr) __readmsr(Msr)
  2491. ULONG64
  2492. __readmsr (
  2493. IN ULONG Msr
  2494. );
  2495. #define WriteMSR(Msr, Data) __writemsr(Msr, Data)
  2496. VOID
  2497. __writemsr (
  2498. IN ULONG Msr,
  2499. IN ULONG64 Value
  2500. );
  2501. #define InvalidatePage(Page) __invlpg(Page)
  2502. VOID
  2503. __invlpg (
  2504. IN PVOID Page
  2505. );
  2506. #define WritebackInvalidate() __wbinvd()
  2507. VOID
  2508. __wbinvd (
  2509. VOID
  2510. );
  2511. #pragma intrinsic(__readmsr)
  2512. #pragma intrinsic(__writemsr)
  2513. #pragma intrinsic(__invlpg)
  2514. #pragma intrinsic(__wbinvd)
  2515. #endif // _AMD64_
  2516. // end_nthal
  2517. #if !(defined(_NTDRIVER_) || defined(_NTDDK_) || defined(_NTIFS_) || defined(_NTHAL_) || defined(_NTOSP_) || defined(_WDMDDK_))
  2518. __forceinline
  2519. VOID
  2520. KxAcquireSpinLock (
  2521. IN PKSPIN_LOCK SpinLock
  2522. )
  2523. /*++
  2524. Routine Description:
  2525. This function acquires a spin lock at the current IRQL.
  2526. Arguments:
  2527. SpinLock - Supplies a pointer to an spin lock.
  2528. Return Value:
  2529. None.
  2530. --*/
  2531. {
  2532. //
  2533. // Acquire the specified spin lock at the current IRQL.
  2534. //
  2535. #if !defined(NT_UP)
  2536. #if DBG
  2537. LONG64 Thread;
  2538. Thread = (LONG64)KeGetCurrentThread() + 1;
  2539. while (InterlockedCompareExchange64((PLONG64)SpinLock,
  2540. Thread,
  2541. 0) != 0) {
  2542. #else
  2543. while (InterlockedBitTestAndSet64((LONG64 *)SpinLock, 0)) {
  2544. #endif // DBG
  2545. do {
  2546. KeMemoryBarrierWithoutFence();
  2547. } while (BitTest64((LONG64 *)SpinLock, 0));
  2548. }
  2549. #else
  2550. UNREFERENCED_PARAMETER(SpinLock);
  2551. #endif // !defined(NT_UP)
  2552. return;
  2553. }
  2554. __forceinline
  2555. BOOLEAN
  2556. KxTryToAcquireSpinLock (
  2557. IN PKSPIN_LOCK SpinLock
  2558. )
  2559. /*++
  2560. Routine Description:
  2561. This function attempts acquires a spin lock at the current IRQL. If
  2562. the spinlock is already owned, then FALSE is returned. Otherwise,
  2563. TRUE is returned.
  2564. Arguments:
  2565. SpinLock - Supplies a pointer to a spin lock.
  2566. Return Value:
  2567. If the spin lock is acquired a value TRUE is returned. Otherwise, FALSE
  2568. is returned as the function value.
  2569. --*/
  2570. {
  2571. //
  2572. // Try to acquire the specified spin lock at the current IRQL.
  2573. //
  2574. #if !defined(NT_UP)
  2575. KeMemoryBarrierWithoutFence();
  2576. if (!BitTest64((LONG64 *)SpinLock, 0)) {
  2577. #if DBG
  2578. LONG64 Thread;
  2579. Thread = (LONG64)KeGetCurrentThread() + 1;
  2580. return InterlockedCompareExchange64((PLONG64)SpinLock,
  2581. Thread,
  2582. 0) == 0 ? TRUE : FALSE;
  2583. #else
  2584. return !InterlockedBitTestAndSet64((LONG64 *)SpinLock, 0);
  2585. #endif // DBG
  2586. } else {
  2587. return FALSE;
  2588. }
  2589. #else
  2590. UNREFERENCED_PARAMETER(SpinLock);
  2591. return TRUE;
  2592. #endif // !defined(NT_UP)
  2593. }
  2594. __forceinline
  2595. KIRQL
  2596. KeAcquireSpinLockRaiseToDpc (
  2597. IN PKSPIN_LOCK SpinLock
  2598. )
  2599. /*++
  2600. Routine Description:
  2601. This function raises IRQL to DISPATCH_LEVEL and acquires the specified
  2602. spin lock.
  2603. Arguments:
  2604. SpinLock - Supplies a pointer to a spin lock.
  2605. Return Value:
  2606. The previous IRQL is returned.
  2607. --*/
  2608. {
  2609. KIRQL OldIrql;
  2610. //
  2611. // Raise IRQL to DISPATCH_LEVEL and acquire the specified spin lock.
  2612. //
  2613. OldIrql = KfRaiseIrql(DISPATCH_LEVEL);
  2614. KxAcquireSpinLock(SpinLock);
  2615. return OldIrql;
  2616. }
  2617. __forceinline
  2618. KIRQL
  2619. KeAcquireSpinLockRaiseToSynch (
  2620. IN PKSPIN_LOCK SpinLock
  2621. )
  2622. /*++
  2623. Routine Description:
  2624. This function raises IRQL to SYNCH_LEVEL and acquires the specified
  2625. spin lock.
  2626. Arguments:
  2627. SpinLock - Supplies a pointer to a spin lock.
  2628. Return Value:
  2629. The previous IRQL is returned as the function value.
  2630. --*/
  2631. {
  2632. KIRQL OldIrql;
  2633. //
  2634. // Raise IRQL to SYNCH_LEVEL and acquire the specified spin lock.
  2635. //
  2636. OldIrql = KfRaiseIrql(SYNCH_LEVEL);
  2637. KxAcquireSpinLock(SpinLock);
  2638. return OldIrql;
  2639. }
  2640. __forceinline
  2641. VOID
  2642. KeAcquireSpinLockAtDpcLevel (
  2643. IN PKSPIN_LOCK SpinLock
  2644. )
  2645. /*++
  2646. Routine Description:
  2647. This function acquires a spin lock at the current IRQL.
  2648. Arguments:
  2649. SpinLock - Supplies a pointer to an spin lock.
  2650. Return Value:
  2651. None.
  2652. --*/
  2653. {
  2654. //
  2655. // Acquired the specified spin lock at the current IRQL.
  2656. //
  2657. KxAcquireSpinLock(SpinLock);
  2658. return;
  2659. }
  2660. __forceinline
  2661. VOID
  2662. KxReleaseSpinLock (
  2663. IN PKSPIN_LOCK SpinLock
  2664. )
  2665. /*++
  2666. Routine Description:
  2667. This function releases the specified spin lock at the current IRQL.
  2668. Arguments:
  2669. SpinLock - Supplies a pointer to a spin lock.
  2670. Return Value:
  2671. None.
  2672. --*/
  2673. {
  2674. #if !defined(NT_UP)
  2675. #if DBG
  2676. ASSERT(*(volatile LONG64 *)SpinLock == (LONG64)KeGetCurrentThread() + 1);
  2677. #endif // DBG
  2678. KeMemoryBarrierWithoutFence();
  2679. *(volatile LONG64 *)SpinLock = 0;
  2680. #else
  2681. UNREFERENCED_PARAMETER(SpinLock);
  2682. #endif // !defined(NT_UP)
  2683. return;
  2684. }
  2685. __forceinline
  2686. VOID
  2687. KeReleaseSpinLock (
  2688. IN PKSPIN_LOCK SpinLock,
  2689. IN KIRQL OldIrql
  2690. )
  2691. /*++
  2692. Routine Description:
  2693. This function releases the specified spin lock and lowers IRQL to a
  2694. previous value.
  2695. Arguments:
  2696. SpinLock - Supplies a pointer to a spin lock.
  2697. OldIrql - Supplies the previous IRQL value.
  2698. Return Value:
  2699. None.
  2700. --*/
  2701. {
  2702. KxReleaseSpinLock(SpinLock);
  2703. KeLowerIrql(OldIrql);
  2704. return;
  2705. }
  2706. __forceinline
  2707. VOID
  2708. KeReleaseSpinLockFromDpcLevel (
  2709. IN PKSPIN_LOCK SpinLock
  2710. )
  2711. /*++
  2712. Routine Description:
  2713. This function releases a spin lock at the current IRQL.
  2714. Arguments:
  2715. SpinLock - Supplies a pointer to a spin lock.
  2716. Return Value:
  2717. None.
  2718. --*/
  2719. {
  2720. KxReleaseSpinLock(SpinLock);
  2721. return;
  2722. }
  2723. __forceinline
  2724. BOOLEAN
  2725. KeTestSpinLock (
  2726. IN PKSPIN_LOCK SpinLock
  2727. )
  2728. /*++
  2729. Routine Description:
  2730. This function tests a spin lock to determine if it is currently owned.
  2731. If the spinlock is already owned, then FALSE is returned. Otherwise,
  2732. TRUE is returned.
  2733. Arguments:
  2734. SpinLock - Supplies a pointer to a spin lock.
  2735. Return Value:
  2736. If the spin lock is currently owned, then a value of FALSE is returned.
  2737. Otherwise, a value of TRUE is returned.
  2738. --*/
  2739. {
  2740. KeMemoryBarrierWithoutFence();
  2741. return !BitTest64((LONG64 *)SpinLock, 0);
  2742. }
  2743. __forceinline
  2744. BOOLEAN
  2745. KeTryToAcquireSpinLock (
  2746. IN PKSPIN_LOCK SpinLock,
  2747. OUT PKIRQL OldIrql
  2748. )
  2749. /*++
  2750. Routine Description:
  2751. This function raises IRQL to DISPATCH level and attempts to acquire a
  2752. spin lock. If the spin lock is already owned, then IRQL is restored to
  2753. its previous value and FALSE is returned. Otherwise, the spin lock is
  2754. acquired and TRUE is returned.
  2755. Arguments:
  2756. SpinLock - Supplies a pointer to a spin lock.
  2757. OldIrql - Supplies a pointer to a variable that receives the old IRQL.
  2758. Return Value:
  2759. If the spin lock is acquired a value TRUE is returned. Otherwise, FALSE
  2760. is returned.
  2761. --*/
  2762. {
  2763. //
  2764. // Raise IRQL to DISPATCH level and attempt to acquire the specified
  2765. // spin lock.
  2766. //
  2767. *OldIrql = KfRaiseIrql(DISPATCH_LEVEL);
  2768. if (KxTryToAcquireSpinLock(SpinLock) == FALSE) {
  2769. KeLowerIrql(*OldIrql);
  2770. return FALSE;
  2771. }
  2772. return TRUE;
  2773. }
  2774. __forceinline
  2775. BOOLEAN
  2776. KeTryToAcquireSpinLockAtDpcLevel (
  2777. IN PKSPIN_LOCK SpinLock
  2778. )
  2779. /*++
  2780. Routine Description:
  2781. This function attempts acquires a spin lock at the current IRQL. If
  2782. the spinlock is already owned, then FALSE is returned. Otherwise,
  2783. TRUE is returned.
  2784. Arguments:
  2785. SpinLock - Supplies a pointer to a spin lock.
  2786. Return Value:
  2787. If the spin lock is acquired a value TRUE is returned. Otherwise, FALSE
  2788. is returned as the function value.
  2789. --*/
  2790. {
  2791. //
  2792. // Try to acquire the specified spin lock at the current IRQL.
  2793. //
  2794. return KxTryToAcquireSpinLock(SpinLock);
  2795. }
  2796. #endif
  2797. //
  2798. // Define software feature bit definitions.
  2799. //
  2800. #define KF_V86_VIS 0x00000001
  2801. #define KF_RDTSC 0x00000002
  2802. #define KF_CR4 0x00000004
  2803. #define KF_CMOV 0x00000008
  2804. #define KF_GLOBAL_PAGE 0x00000010
  2805. #define KF_LARGE_PAGE 0x00000020
  2806. #define KF_MTRR 0x00000040
  2807. #define KF_CMPXCHG8B 0x00000080
  2808. #define KF_MMX 0x00000100
  2809. #define KF_WORKING_PTE 0x00000200
  2810. #define KF_PAT 0x00000400
  2811. #define KF_FXSR 0x00000800
  2812. #define KF_FAST_SYSCALL 0x00001000
  2813. #define KF_XMMI 0x00002000
  2814. #define KF_3DNOW 0x00004000
  2815. #define KF_AMDK6MTRR 0x00008000
  2816. #define KF_XMMI64 0x00010000
  2817. #define KF_DTS 0x00020000
  2818. #define KF_SMT 0x00040000
  2819. //
  2820. // Define required software feature bits.
  2821. //
  2822. #define KF_REQUIRED (KF_RDTSC | KF_CR4 | KF_CMOV | KF_GLOBAL_PAGE | \
  2823. KF_LARGE_PAGE | KF_CMPXCHG8B | KF_MMX | KF_WORKING_PTE | \
  2824. KF_PAT | KF_FXSR | KF_FAST_SYSCALL | KF_XMMI | KF_XMMI64)
  2825. //
  2826. // Define hardware feature bits definitions.
  2827. //
  2828. #define HF_FPU 0x00000001 // FPU is on chip
  2829. #define HF_VME 0x00000002 // virtual 8086 mode enhancement
  2830. #define HF_DE 0x00000004 // debugging extension
  2831. #define HF_PSE 0x00000008 // page size extension
  2832. #define HF_TSC 0x00000010 // time stamp counter
  2833. #define HF_MSR 0x00000020 // rdmsr and wrmsr support
  2834. #define HF_PAE 0x00000040 // physical address extension
  2835. #define HF_MCE 0x00000080 // machine check exception
  2836. #define HF_CXS 0x00000100 // cmpxchg8b instruction supported
  2837. #define HF_APIC 0x00000200 // APIC on chip
  2838. #define HF_UNUSED0 0x00000400 // unused bit
  2839. #define HF_SYSCALL 0x00000800 // fast system call
  2840. #define HF_MTRR 0x00001000 // memory type range registers
  2841. #define HF_PGE 0x00002000 // global page TB support
  2842. #define HF_MCA 0x00004000 // machine check architecture
  2843. #define HF_CMOV 0x00008000 // cmov instruction supported
  2844. #define HF_PAT 0x00010000 // physical attributes table
  2845. #define HF_UNUSED1 0x00020000 // unused bit
  2846. #define HF_UNUSED2 0x00040000 // unused bit
  2847. #define HF_UNUSED3 0x00080000 // unused bit
  2848. #define HF_NOEXECUTE 0x00100000 // no execute protection
  2849. #define HF_UNUSED5 0x00200000 // unused bit
  2850. #define HF_UNUSED6 0x00400000 // unused bit
  2851. #define HF_MMX 0x00800000 // MMX technology supported
  2852. #define HF_FXSR 0x01000000 // fxsr instruction supported
  2853. #define HF_XMMI 0x02000000 // xmm (SSE) registers supported
  2854. #define HF_XMMI64 0x04000000 // xmm (SSE2) registers supported
  2855. //
  2856. // Define required hardware feature bits.
  2857. //
  2858. #define HF_REQUIRED (HF_FPU | HF_DE | HF_PSE | HF_TSC | HF_MSR | \
  2859. HF_PAE | HF_MCE | HF_CXS | HF_APIC | HF_SYSCALL | \
  2860. HF_PGE | HF_MCA | HF_CMOV | HF_PAT | HF_MMX | \
  2861. HF_FXSR | HF_XMMI | HF_XMMI64)
  2862. //
  2863. // Define extended hardware feature bit definitions.
  2864. //
  2865. #define XHF_3DNOW 0x80000000 // 3DNOW supported
  2866. #endif // __amd64_