Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3186 lines
63 KiB

  1. /*++
  2. Copyright (c) 2000 Microsoft Corporation
  3. Module Name:
  4. amd64.h
  5. Abstract:
  6. This module contains the AMD64 hardware specific header file.
  7. Author:
  8. David N. Cutler (davec) 3-May-2000
  9. Revision History:
  10. --*/
  11. #ifndef __amd64_
  12. #define __amd64_
  13. #if !(defined(_NTDRIVER_) || defined(_NTDDK_) || defined(_NTIFS_) || defined(_NTHAL_) || defined(_NTOSP_)) && !defined(_BLDR_)
  14. #define ExRaiseException RtlRaiseException
  15. #define ExRaiseStatus RtlRaiseStatus
  16. #endif
  17. // begin_ntddk begin_wdm begin_nthal begin_ntndis begin_ntosp
  18. #if defined(_M_AMD64) && !defined(RC_INVOKED) && !defined(MIDL_PASS)
  19. //
  20. // Define intrinsic function to do in's and out's.
  21. //
  22. #ifdef __cplusplus
  23. extern "C" {
  24. #endif
  25. UCHAR
  26. __inbyte (
  27. IN USHORT Port
  28. );
  29. USHORT
  30. __inword (
  31. IN USHORT Port
  32. );
  33. ULONG
  34. __indword (
  35. IN USHORT Port
  36. );
  37. VOID
  38. __outbyte (
  39. IN USHORT Port,
  40. IN UCHAR Data
  41. );
  42. VOID
  43. __outword (
  44. IN USHORT Port,
  45. IN USHORT Data
  46. );
  47. VOID
  48. __outdword (
  49. IN USHORT Port,
  50. IN ULONG Data
  51. );
  52. VOID
  53. __inbytestring (
  54. IN USHORT Port,
  55. IN PUCHAR Buffer,
  56. IN ULONG Count
  57. );
  58. VOID
  59. __inwordstring (
  60. IN USHORT Port,
  61. IN PUSHORT Buffer,
  62. IN ULONG Count
  63. );
  64. VOID
  65. __indwordstring (
  66. IN USHORT Port,
  67. IN PULONG Buffer,
  68. IN ULONG Count
  69. );
  70. VOID
  71. __outbytestring (
  72. IN USHORT Port,
  73. IN PUCHAR Buffer,
  74. IN ULONG Count
  75. );
  76. VOID
  77. __outwordstring (
  78. IN USHORT Port,
  79. IN PUSHORT Buffer,
  80. IN ULONG Count
  81. );
  82. VOID
  83. __outdwordstring (
  84. IN USHORT Port,
  85. IN PULONG Buffer,
  86. IN ULONG Count
  87. );
  88. #ifdef __cplusplus
  89. }
  90. #endif
  91. #pragma intrinsic(__inbyte)
  92. #pragma intrinsic(__inword)
  93. #pragma intrinsic(__indword)
  94. #pragma intrinsic(__outbyte)
  95. #pragma intrinsic(__outword)
  96. #pragma intrinsic(__outdword)
  97. #pragma intrinsic(__inbytestring)
  98. #pragma intrinsic(__inwordstring)
  99. #pragma intrinsic(__indwordstring)
  100. #pragma intrinsic(__outbytestring)
  101. #pragma intrinsic(__outwordstring)
  102. #pragma intrinsic(__outdwordstring)
  103. //
  104. // Interlocked intrinsic functions.
  105. //
  106. #define InterlockedAnd _InterlockedAnd
  107. #define InterlockedOr _InterlockedOr
  108. #define InterlockedXor _InterlockedXor
  109. #define InterlockedIncrement _InterlockedIncrement
  110. #define InterlockedDecrement _InterlockedDecrement
  111. #define InterlockedAdd _InterlockedAdd
  112. #define InterlockedExchange _InterlockedExchange
  113. #define InterlockedExchangeAdd _InterlockedExchangeAdd
  114. #define InterlockedCompareExchange _InterlockedCompareExchange
  115. #define InterlockedAnd64 _InterlockedAnd64
  116. #define InterlockedOr64 _InterlockedOr64
  117. #define InterlockedXor64 _InterlockedXor64
  118. #define InterlockedIncrement64 _InterlockedIncrement64
  119. #define InterlockedDecrement64 _InterlockedDecrement64
  120. #define InterlockedAdd64 _InterlockedAdd64
  121. #define InterlockedExchange64 _InterlockedExchange64
  122. #define InterlockedExchangeAdd64 _InterlockedExchangeAdd64
  123. #define InterlockedCompareExchange64 _InterlockedCompareExchange64
  124. #define InterlockedExchangePointer _InterlockedExchangePointer
  125. #define InterlockedCompareExchangePointer _InterlockedCompareExchangePointer
  126. #ifdef __cplusplus
  127. extern "C" {
  128. #endif
  129. LONG
  130. InterlockedAnd (
  131. IN OUT LONG volatile *Destination,
  132. IN LONG Value
  133. );
  134. LONG
  135. InterlockedOr (
  136. IN OUT LONG volatile *Destination,
  137. IN LONG Value
  138. );
  139. LONG
  140. InterlockedXor (
  141. IN OUT LONG volatile *Destination,
  142. IN LONG Value
  143. );
  144. LONG64
  145. InterlockedAnd64 (
  146. IN OUT LONG64 volatile *Destination,
  147. IN LONG64 Value
  148. );
  149. LONG64
  150. InterlockedOr64 (
  151. IN OUT LONG64 volatile *Destination,
  152. IN LONG64 Value
  153. );
  154. LONG64
  155. InterlockedXor64 (
  156. IN OUT LONG64 volatile *Destination,
  157. IN LONG64 Value
  158. );
  159. LONG
  160. InterlockedIncrement(
  161. IN OUT LONG volatile *Addend
  162. );
  163. LONG
  164. InterlockedDecrement(
  165. IN OUT LONG volatile *Addend
  166. );
  167. LONG
  168. InterlockedExchange(
  169. IN OUT LONG volatile *Target,
  170. IN LONG Value
  171. );
  172. LONG
  173. InterlockedExchangeAdd(
  174. IN OUT LONG volatile *Addend,
  175. IN LONG Value
  176. );
  177. #if !defined(_X86AMD64_)
  178. __forceinline
  179. LONG
  180. InterlockedAdd(
  181. IN OUT LONG volatile *Addend,
  182. IN LONG Value
  183. )
  184. {
  185. return InterlockedExchangeAdd(Addend, Value) + Value;
  186. }
  187. #endif
  188. LONG
  189. InterlockedCompareExchange (
  190. IN OUT LONG volatile *Destination,
  191. IN LONG ExChange,
  192. IN LONG Comperand
  193. );
  194. LONG64
  195. InterlockedIncrement64(
  196. IN OUT LONG64 volatile *Addend
  197. );
  198. LONG64
  199. InterlockedDecrement64(
  200. IN OUT LONG64 volatile *Addend
  201. );
  202. LONG64
  203. InterlockedExchange64(
  204. IN OUT LONG64 volatile *Target,
  205. IN LONG64 Value
  206. );
  207. LONG64
  208. InterlockedExchangeAdd64(
  209. IN OUT LONG64 volatile *Addend,
  210. IN LONG64 Value
  211. );
  212. #if !defined(_X86AMD64_)
  213. __forceinline
  214. LONG64
  215. InterlockedAdd64(
  216. IN OUT LONG64 volatile *Addend,
  217. IN LONG64 Value
  218. )
  219. {
  220. return InterlockedExchangeAdd64(Addend, Value) + Value;
  221. }
  222. #endif
  223. LONG64
  224. InterlockedCompareExchange64 (
  225. IN OUT LONG64 volatile *Destination,
  226. IN LONG64 ExChange,
  227. IN LONG64 Comperand
  228. );
  229. PVOID
  230. InterlockedCompareExchangePointer (
  231. IN OUT PVOID volatile *Destination,
  232. IN PVOID Exchange,
  233. IN PVOID Comperand
  234. );
  235. PVOID
  236. InterlockedExchangePointer(
  237. IN OUT PVOID volatile *Target,
  238. IN PVOID Value
  239. );
  240. #pragma intrinsic(_InterlockedAnd)
  241. #pragma intrinsic(_InterlockedOr)
  242. #pragma intrinsic(_InterlockedXor)
  243. #pragma intrinsic(_InterlockedIncrement)
  244. #pragma intrinsic(_InterlockedDecrement)
  245. #pragma intrinsic(_InterlockedExchange)
  246. #pragma intrinsic(_InterlockedExchangeAdd)
  247. #pragma intrinsic(_InterlockedCompareExchange)
  248. #pragma intrinsic(_InterlockedAnd64)
  249. #pragma intrinsic(_InterlockedOr64)
  250. #pragma intrinsic(_InterlockedXor64)
  251. #pragma intrinsic(_InterlockedIncrement64)
  252. #pragma intrinsic(_InterlockedDecrement64)
  253. #pragma intrinsic(_InterlockedExchange64)
  254. #pragma intrinsic(_InterlockedExchangeAdd64)
  255. #pragma intrinsic(_InterlockedCompareExchange64)
  256. #pragma intrinsic(_InterlockedExchangePointer)
  257. #pragma intrinsic(_InterlockedCompareExchangePointer)
  258. #ifdef __cplusplus
  259. }
  260. #endif
  261. #endif // defined(_M_AMD64) && !defined(RC_INVOKED) && !defined(MIDL_PASS)
  262. #if defined(_AMD64_)
  263. //
  264. // Types to use to contain PFNs and their counts.
  265. //
  266. typedef ULONG PFN_COUNT;
  267. typedef LONG64 SPFN_NUMBER, *PSPFN_NUMBER;
  268. typedef ULONG64 PFN_NUMBER, *PPFN_NUMBER;
  269. //
  270. // Define maximum size of flush multiple TB request.
  271. //
  272. #define FLUSH_MULTIPLE_MAXIMUM 16
  273. //
  274. // Indicate that the AMD64 compiler supports the allocate pragmas.
  275. //
  276. #define ALLOC_PRAGMA 1
  277. #define ALLOC_DATA_PRAGMA 1
  278. // end_ntddk end_nthal end_ntndis end_wdm end_ntosp
  279. //
  280. // Length on interrupt object dispatch code in longwords.
  281. // (shielint) Reserve 9*4 space for ABIOS stack mapping. If NO
  282. // ABIOS support the size of DISPATCH_LENGTH should be 74.
  283. //
  284. // begin_nthal
  285. #define NORMAL_DISPATCH_LENGTH 106 // ntddk wdm
  286. #define DISPATCH_LENGTH NORMAL_DISPATCH_LENGTH // ntddk wdm
  287. // ntddk wdm
  288. // begin_ntosp
  289. //
  290. // Define constants for bits in CR0.
  291. //
  292. #define CR0_PE 0x00000001 // protection enable
  293. #define CR0_MP 0x00000002 // math present
  294. #define CR0_EM 0x00000004 // emulate math coprocessor
  295. #define CR0_TS 0x00000008 // task switched
  296. #define CR0_ET 0x00000010 // extension type (80387)
  297. #define CR0_NE 0x00000020 // numeric error
  298. #define CR0_WP 0x00010000 // write protect
  299. #define CR0_AM 0x00040000 // alignment mask
  300. #define CR0_NW 0x20000000 // not write-through
  301. #define CR0_CD 0x40000000 // cache disable
  302. #define CR0_PG 0x80000000 // paging
  303. //
  304. // Define functions to read and write CR0.
  305. //
  306. #ifdef __cplusplus
  307. extern "C" {
  308. #endif
  309. #define ReadCR0() __readcr0()
  310. ULONG64
  311. __readcr0 (
  312. VOID
  313. );
  314. #define WriteCR0(Data) __writecr0(Data)
  315. VOID
  316. __writecr0 (
  317. IN ULONG64 Data
  318. );
  319. #pragma intrinsic(__readcr0)
  320. #pragma intrinsic(__writecr0)
  321. //
  322. // Define functions to read and write CR3.
  323. //
  324. #define ReadCR3() __readcr3()
  325. ULONG64
  326. __readcr3 (
  327. VOID
  328. );
  329. #define WriteCR3(Data) __writecr3(Data)
  330. VOID
  331. __writecr3 (
  332. IN ULONG64 Data
  333. );
  334. #pragma intrinsic(__readcr3)
  335. #pragma intrinsic(__writecr3)
  336. //
  337. // Define constants for bits in CR4.
  338. //
  339. #define CR4_VME 0x00000001 // V86 mode extensions
  340. #define CR4_PVI 0x00000002 // Protected mode virtual interrupts
  341. #define CR4_TSD 0x00000004 // Time stamp disable
  342. #define CR4_DE 0x00000008 // Debugging Extensions
  343. #define CR4_PSE 0x00000010 // Page size extensions
  344. #define CR4_PAE 0x00000020 // Physical address extensions
  345. #define CR4_MCE 0x00000040 // Machine check enable
  346. #define CR4_PGE 0x00000080 // Page global enable
  347. #define CR4_FXSR 0x00000200 // FXSR used by OS
  348. #define CR4_XMMEXCPT 0x00000400 // XMMI used by OS
  349. //
  350. // Define functions to read and write CR4.
  351. //
  352. #define ReadCR4() __readcr4()
  353. ULONG64
  354. __readcr4 (
  355. VOID
  356. );
  357. #define WriteCR4(Data) __writecr4(Data)
  358. VOID
  359. __writecr4 (
  360. IN ULONG64 Data
  361. );
  362. #pragma intrinsic(__readcr4)
  363. #pragma intrinsic(__writecr4)
  364. //
  365. // Define functions to read and write CR8.
  366. //
  367. // CR8 is the APIC TPR register.
  368. //
  369. #define ReadCR8() __readcr8()
  370. ULONG64
  371. __readcr8 (
  372. VOID
  373. );
  374. #define WriteCR8(Data) __writecr8(Data)
  375. VOID
  376. __writecr8 (
  377. IN ULONG64 Data
  378. );
  379. #pragma intrinsic(__readcr8)
  380. #pragma intrinsic(__writecr8)
  381. #ifdef __cplusplus
  382. }
  383. #endif
  384. // end_nthal end_ntosp
  385. //
  386. // External references to the code labels.
  387. //
  388. extern ULONG KiInterruptTemplate[NORMAL_DISPATCH_LENGTH];
  389. // begin_ntddk begin_wdm begin_nthal begin_ntosp
  390. //
  391. // Interrupt Request Level definitions
  392. //
  393. #define PASSIVE_LEVEL 0 // Passive release level
  394. #define LOW_LEVEL 0 // Lowest interrupt level
  395. #define APC_LEVEL 1 // APC interrupt level
  396. #define DISPATCH_LEVEL 2 // Dispatcher level
  397. #define CLOCK_LEVEL 13 // Interval clock level
  398. #define IPI_LEVEL 14 // Interprocessor interrupt level
  399. #define POWER_LEVEL 14 // Power failure level
  400. #define PROFILE_LEVEL 15 // timer used for profiling.
  401. #define HIGH_LEVEL 15 // Highest interrupt level
  402. #if defined(NT_UP)
  403. #define SYNCH_LEVEL DISPATCH_LEVEL // synchronization level
  404. #else
  405. #define SYNCH_LEVEL (IPI_LEVEL - 1) // synchronization level
  406. #endif
  407. #define IRQL_VECTOR_OFFSET 2 // offset from IRQL to vector / 16
  408. // end_ntddk end_wdm end_ntosp
  409. #define KiSynchIrql SYNCH_LEVEL // enable portable code
  410. //
  411. // Machine type definitions
  412. //
  413. #define MACHINE_TYPE_ISA 0
  414. #define MACHINE_TYPE_EISA 1
  415. #define MACHINE_TYPE_MCA 2
  416. // end_nthal
  417. //
  418. // The previous values and the following are or'ed in KeI386MachineType.
  419. //
  420. #define MACHINE_TYPE_PC_AT_COMPATIBLE 0x00000000
  421. #define MACHINE_TYPE_PC_9800_COMPATIBLE 0x00000100
  422. #define MACHINE_TYPE_FMR_COMPATIBLE 0x00000200
  423. extern ULONG KeI386MachineType;
  424. // begin_nthal
  425. //
  426. // Define constants used in selector tests.
  427. //
  428. // N.B. MODE_MASK and MODE_BIT assumes that all code runs at either ring-0
  429. // or ring-3 and is used to test the mode. RPL_MASK is used for merging
  430. // or extracting RPL values.
  431. //
  432. #define MODE_BIT 0
  433. #define MODE_MASK 1 // ntosp
  434. #define RPL_MASK 3
  435. //
  436. // Startup count value for KeStallExecution. This value is used
  437. // until KiInitializeStallExecution can compute the real one.
  438. // Pick a value long enough for very fast processors.
  439. //
  440. #define INITIAL_STALL_COUNT 100
  441. // end_nthal
  442. //
  443. // begin_nthal
  444. //
  445. // Macro to extract the high word of a long offset
  446. //
  447. #define HIGHWORD(l) \
  448. ((USHORT)(((ULONG)(l)>>16) & 0xffff))
  449. //
  450. // Macro to extract the low word of a long offset
  451. //
  452. #define LOWWORD(l) \
  453. ((USHORT)((ULONG)l & 0x0000ffff))
  454. //
  455. // Macro to combine two USHORT offsets into a long offset
  456. //
  457. #if !defined(MAKEULONG)
  458. #define MAKEULONG(x, y) \
  459. (((((ULONG)(x))<<16) & 0xffff0000) | \
  460. ((ULONG)(y) & 0xffff))
  461. #endif
  462. // end_nthal
  463. //
  464. // Request a software interrupt.
  465. //
  466. #define KiRequestSoftwareInterrupt(RequestIrql) \
  467. HalRequestSoftwareInterrupt( RequestIrql )
  468. // begin_ntddk begin_wdm begin_nthal begin_ntndis begin_ntosp
  469. //
  470. // I/O space read and write macros.
  471. //
  472. // The READ/WRITE_REGISTER_* calls manipulate I/O registers in MEMORY space.
  473. // (Use move instructions, with LOCK prefix to force correct behavior
  474. // w.r.t. caches and write buffers.)
  475. //
  476. // The READ/WRITE_PORT_* calls manipulate I/O registers in PORT space.
  477. // (Use in/out instructions.)
  478. //
  479. __forceinline
  480. UCHAR
  481. READ_REGISTER_UCHAR (
  482. volatile UCHAR *Register
  483. )
  484. {
  485. return *Register;
  486. }
  487. __forceinline
  488. USHORT
  489. READ_REGISTER_USHORT (
  490. volatile USHORT *Register
  491. )
  492. {
  493. return *Register;
  494. }
  495. __forceinline
  496. ULONG
  497. READ_REGISTER_ULONG (
  498. volatile ULONG *Register
  499. )
  500. {
  501. return *Register;
  502. }
  503. __forceinline
  504. VOID
  505. READ_REGISTER_BUFFER_UCHAR (
  506. PUCHAR Register,
  507. PUCHAR Buffer,
  508. ULONG Count
  509. )
  510. {
  511. __movsb(Register, Buffer, Count);
  512. return;
  513. }
  514. __forceinline
  515. VOID
  516. READ_REGISTER_BUFFER_USHORT (
  517. PUSHORT Register,
  518. PUSHORT Buffer,
  519. ULONG Count
  520. )
  521. {
  522. __movsw(Register, Buffer, Count);
  523. return;
  524. }
  525. __forceinline
  526. VOID
  527. READ_REGISTER_BUFFER_ULONG (
  528. PULONG Register,
  529. PULONG Buffer,
  530. ULONG Count
  531. )
  532. {
  533. __movsd(Register, Buffer, Count);
  534. return;
  535. }
  536. __forceinline
  537. VOID
  538. WRITE_REGISTER_UCHAR (
  539. PUCHAR Register,
  540. UCHAR Value
  541. )
  542. {
  543. LONG Synch;
  544. *Register = Value;
  545. InterlockedOr(&Synch, 1);
  546. return;
  547. }
  548. __forceinline
  549. VOID
  550. WRITE_REGISTER_USHORT (
  551. PUSHORT Register,
  552. USHORT Value
  553. )
  554. {
  555. LONG Synch;
  556. *Register = Value;
  557. InterlockedOr(&Synch, 1);
  558. return;
  559. }
  560. __forceinline
  561. VOID
  562. WRITE_REGISTER_ULONG (
  563. PULONG Register,
  564. ULONG Value
  565. )
  566. {
  567. LONG Synch;
  568. *Register = Value;
  569. InterlockedOr(&Synch, 1);
  570. return;
  571. }
  572. __forceinline
  573. VOID
  574. WRITE_REGISTER_BUFFER_UCHAR (
  575. PUCHAR Register,
  576. PUCHAR Buffer,
  577. ULONG Count
  578. )
  579. {
  580. LONG Synch;
  581. __movsb(Register, Buffer, Count);
  582. InterlockedOr(&Synch, 1);
  583. return;
  584. }
  585. __forceinline
  586. VOID
  587. WRITE_REGISTER_BUFFER_USHORT (
  588. PUSHORT Register,
  589. PUSHORT Buffer,
  590. ULONG Count
  591. )
  592. {
  593. LONG Synch;
  594. __movsw(Register, Buffer, Count);
  595. InterlockedOr(&Synch, 1);
  596. return;
  597. }
  598. __forceinline
  599. VOID
  600. WRITE_REGISTER_BUFFER_ULONG (
  601. PULONG Register,
  602. PULONG Buffer,
  603. ULONG Count
  604. )
  605. {
  606. LONG Synch;
  607. __movsd(Register, Buffer, Count);
  608. InterlockedOr(&Synch, 1);
  609. return;
  610. }
  611. __forceinline
  612. UCHAR
  613. READ_PORT_UCHAR (
  614. PUCHAR Port
  615. )
  616. {
  617. return __inbyte((USHORT)((ULONG64)Port));
  618. }
  619. __forceinline
  620. USHORT
  621. READ_PORT_USHORT (
  622. PUSHORT Port
  623. )
  624. {
  625. return __inword((USHORT)((ULONG64)Port));
  626. }
  627. __forceinline
  628. ULONG
  629. READ_PORT_ULONG (
  630. PULONG Port
  631. )
  632. {
  633. return __indword((USHORT)((ULONG64)Port));
  634. }
  635. __forceinline
  636. VOID
  637. READ_PORT_BUFFER_UCHAR (
  638. PUCHAR Port,
  639. PUCHAR Buffer,
  640. ULONG Count
  641. )
  642. {
  643. __inbytestring((USHORT)((ULONG64)Port), Buffer, Count);
  644. return;
  645. }
  646. __forceinline
  647. VOID
  648. READ_PORT_BUFFER_USHORT (
  649. PUSHORT Port,
  650. PUSHORT Buffer,
  651. ULONG Count
  652. )
  653. {
  654. __inwordstring((USHORT)((ULONG64)Port), Buffer, Count);
  655. return;
  656. }
  657. __forceinline
  658. VOID
  659. READ_PORT_BUFFER_ULONG (
  660. PULONG Port,
  661. PULONG Buffer,
  662. ULONG Count
  663. )
  664. {
  665. __indwordstring((USHORT)((ULONG64)Port), Buffer, Count);
  666. return;
  667. }
  668. __forceinline
  669. VOID
  670. WRITE_PORT_UCHAR (
  671. PUCHAR Port,
  672. UCHAR Value
  673. )
  674. {
  675. __outbyte((USHORT)((ULONG64)Port), Value);
  676. return;
  677. }
  678. __forceinline
  679. VOID
  680. WRITE_PORT_USHORT (
  681. PUSHORT Port,
  682. USHORT Value
  683. )
  684. {
  685. __outword((USHORT)((ULONG64)Port), Value);
  686. return;
  687. }
  688. __forceinline
  689. VOID
  690. WRITE_PORT_ULONG (
  691. PULONG Port,
  692. ULONG Value
  693. )
  694. {
  695. __outdword((USHORT)((ULONG64)Port), Value);
  696. return;
  697. }
  698. __forceinline
  699. VOID
  700. WRITE_PORT_BUFFER_UCHAR (
  701. PUCHAR Port,
  702. PUCHAR Buffer,
  703. ULONG Count
  704. )
  705. {
  706. __outbytestring((USHORT)((ULONG64)Port), Buffer, Count);
  707. return;
  708. }
  709. __forceinline
  710. VOID
  711. WRITE_PORT_BUFFER_USHORT (
  712. PUSHORT Port,
  713. PUSHORT Buffer,
  714. ULONG Count
  715. )
  716. {
  717. __outwordstring((USHORT)((ULONG64)Port), Buffer, Count);
  718. return;
  719. }
  720. __forceinline
  721. VOID
  722. WRITE_PORT_BUFFER_ULONG (
  723. PULONG Port,
  724. PULONG Buffer,
  725. ULONG Count
  726. )
  727. {
  728. __outdwordstring((USHORT)((ULONG64)Port), Buffer, Count);
  729. return;
  730. }
  731. // end_ntndis
  732. //
  733. // Get data cache fill size.
  734. //
  735. #if PRAGMA_DEPRECATED_DDK
  736. #pragma deprecated(KeGetDcacheFillSize) // Use GetDmaAlignment
  737. #endif
  738. #define KeGetDcacheFillSize() 1L
  739. // end_ntddk end_wdm end_nthal end_ntosp
  740. //
  741. // Fill TB entry and flush single TB entry.
  742. //
  743. #define KeFillEntryTb(Pte, Virtual, Invalid) \
  744. if (Invalid != FALSE) { \
  745. InvalidatePage(Virtual); \
  746. }
  747. // begin_nthal
  748. #if !defined(_NTHAL_) && !defined(RC_INVOKED) && !defined(MIDL_PASS)
  749. __forceinline
  750. VOID
  751. KeFlushCurrentTb (
  752. VOID
  753. )
  754. {
  755. ULONG64 Cr4;
  756. Cr4 = ReadCR4();
  757. WriteCR4(Cr4 & ~CR4_PGE);
  758. WriteCR4(Cr4 | CR4_PGE);
  759. return;
  760. }
  761. #else
  762. NTKERNELAPI
  763. VOID
  764. KeFlushCurrentTb (
  765. VOID
  766. );
  767. #endif
  768. // end_nthal
  769. #define KiFlushSingleTb(Invalid, Virtual) InvalidatePage(Virtual)
  770. //
  771. // Data cache, instruction cache, I/O buffer, and write buffer flush routine
  772. // prototypes.
  773. //
  774. // AMD64 has transparent caches, so these are noops.
  775. #define KeSweepDcache(AllProcessors)
  776. #define KeSweepCurrentDcache()
  777. #define KeSweepIcache(AllProcessors)
  778. #define KeSweepCurrentIcache()
  779. #define KeSweepIcacheRange(AllProcessors, BaseAddress, Length)
  780. // begin_ntddk begin_wdm begin_nthal begin_ntndis begin_ntosp
  781. #define KeFlushIoBuffers(Mdl, ReadOperation, DmaOperation)
  782. // end_ntddk end_wdm end_ntndis end_ntosp
  783. #define KeYieldProcessor()
  784. // end_nthal
  785. //
  786. // Define executive macros for acquiring and releasing executive spinlocks.
  787. // These macros can ONLY be used by executive components and NOT by drivers.
  788. // Drivers MUST use the kernel interfaces since they must be MP enabled on
  789. // all systems.
  790. //
  791. #if defined(NT_UP) && !DBG && !defined(_NTDDK_) && !defined(_NTIFS_)
  792. #if !defined(_NTDRIVER_)
  793. #define ExAcquireSpinLock(Lock, OldIrql) (*OldIrql) = KeRaiseIrqlToDpcLevel();
  794. #define ExReleaseSpinLock(Lock, OldIrql) KeLowerIrql((OldIrql))
  795. #else
  796. #define ExAcquireSpinLock(Lock, OldIrql) KeAcquireSpinLock((Lock), (OldIrql))
  797. #define ExReleaseSpinLock(Lock, OldIrql) KeReleaseSpinLock((Lock), (OldIrql))
  798. #endif
  799. #define ExAcquireSpinLockAtDpcLevel(Lock)
  800. #define ExReleaseSpinLockFromDpcLevel(Lock)
  801. #else
  802. // begin_wdm begin_ntddk begin_ntosp
  803. #define ExAcquireSpinLock(Lock, OldIrql) KeAcquireSpinLock((Lock), (OldIrql))
  804. #define ExReleaseSpinLock(Lock, OldIrql) KeReleaseSpinLock((Lock), (OldIrql))
  805. #define ExAcquireSpinLockAtDpcLevel(Lock) KeAcquireSpinLockAtDpcLevel(Lock)
  806. #define ExReleaseSpinLockFromDpcLevel(Lock) KeReleaseSpinLockFromDpcLevel(Lock)
  807. // end_wdm end_ntddk end_ntosp
  808. #endif
  809. // begin_nthal
  810. //
  811. // The acquire and release fast lock macros disable and enable interrupts
  812. // on UP nondebug systems. On MP or debug systems, the spinlock routines
  813. // are used.
  814. //
  815. // N.B. Extreme caution should be observed when using these routines.
  816. //
  817. #if defined(_M_AMD64) && !defined(USER_MODE_CODE)
  818. VOID
  819. _disable (
  820. VOID
  821. );
  822. VOID
  823. _enable (
  824. VOID
  825. );
  826. #pragma warning(push)
  827. #pragma warning(disable:4164)
  828. #pragma intrinsic(_disable)
  829. #pragma intrinsic(_enable)
  830. #pragma warning(pop)
  831. #endif
  832. // end_nthal
  833. #if defined(NT_UP) && !DBG && !defined(USER_MODE_CODE)
  834. #define ExAcquireFastLock(Lock, OldIrql) _disable()
  835. #else
  836. #define ExAcquireFastLock(Lock, OldIrql) \
  837. ExAcquireSpinLock(Lock, OldIrql)
  838. #endif
  839. #if defined(NT_UP) && !DBG && !defined(USER_MODE_CODE)
  840. #define ExReleaseFastLock(Lock, OldIrql) _enable()
  841. #else
  842. #define ExReleaseFastLock(Lock, OldIrql) \
  843. ExReleaseSpinLock(Lock, OldIrql)
  844. #endif
  845. //
  846. // The following function prototypes must be in this module so that the
  847. // above macros can call them directly.
  848. //
  849. // begin_nthal
  850. #if defined(NT_UP)
  851. #define KiAcquireSpinLock(SpinLock)
  852. #define KiReleaseSpinLock(SpinLock)
  853. #else
  854. #define KiAcquireSpinLock(SpinLock) KeAcquireSpinLockAtDpcLevel(SpinLock)
  855. #define KiReleaseSpinLock(SpinLock) KeReleaseSpinLockFromDpcLevel(SpinLock)
  856. #endif // defined(NT_UP)
  857. //
  858. // KeTestSpinLock may be used to spin at low IRQL until the lock is
  859. // available. The IRQL must then be raised and the lock acquired with
  860. // KeTryToAcquireSpinLock. If that fails, lower the IRQL and start again.
  861. //
  862. #if defined(NT_UP)
  863. #define KeTestSpinLock(SpinLock) (TRUE)
  864. #else
  865. BOOLEAN
  866. KeTestSpinLock (
  867. IN PKSPIN_LOCK SpinLock
  868. );
  869. #endif
  870. // end_nthal
  871. //
  872. // Define query tick count macro.
  873. //
  874. // begin_ntddk begin_nthal begin_ntosp
  875. #if defined(_NTDRIVER_) || defined(_NTDDK_) || defined(_NTIFS_)
  876. // begin_wdm
  877. #define KeQueryTickCount(CurrentCount ) \
  878. *(PULONG64)(CurrentCount) = **((volatile ULONG64 **)(&KeTickCount));
  879. // end_wdm
  880. #else
  881. // end_ntddk end_nthal end_ntosp
  882. #define KiQueryTickCount(CurrentCount) \
  883. *(PULONG64)(CurrentCount) = KeTickCount.QuadPart;
  884. // begin_ntddk begin_nthal begin_ntosp
  885. VOID
  886. KeQueryTickCount (
  887. OUT PLARGE_INTEGER CurrentCount
  888. );
  889. #endif // defined(_NTDRIVER_) || defined(_NTDDK_) || defined(_NTIFS_)
  890. // end_ntddk end_nthal end_ntosp
  891. BOOLEAN
  892. KiEmulateReference (
  893. IN OUT PEXCEPTION_RECORD ExceptionRecord,
  894. IN OUT struct _KEXCEPTION_FRAME *ExceptionFrame,
  895. IN OUT struct _KTRAP_FRAME *TrapFrame
  896. );
  897. // begin_nthal begin_ntosp
  898. //
  899. // AMD64 hardware structures
  900. //
  901. // A Page Table Entry on an AMD64 has the following definition.
  902. //
  903. #define _HARDWARE_PTE_WORKING_SET_BITS 11
  904. typedef struct _HARDWARE_PTE {
  905. ULONG64 Valid : 1;
  906. ULONG64 Write : 1; // UP version
  907. ULONG64 Owner : 1;
  908. ULONG64 WriteThrough : 1;
  909. ULONG64 CacheDisable : 1;
  910. ULONG64 Accessed : 1;
  911. ULONG64 Dirty : 1;
  912. ULONG64 LargePage : 1;
  913. ULONG64 Global : 1;
  914. ULONG64 CopyOnWrite : 1; // software field
  915. ULONG64 Prototype : 1; // software field
  916. ULONG64 reserved0 : 1; // software field
  917. ULONG64 PageFrameNumber : 28;
  918. ULONG64 reserved1 : 24 - (_HARDWARE_PTE_WORKING_SET_BITS+1);
  919. ULONG64 SoftwareWsIndex : _HARDWARE_PTE_WORKING_SET_BITS;
  920. ULONG64 NoExecute : 1;
  921. } HARDWARE_PTE, *PHARDWARE_PTE;
  922. //
  923. // Define macro to initialize directory table base.
  924. //
  925. #define INITIALIZE_DIRECTORY_TABLE_BASE(dirbase,pfn) \
  926. *((PULONG64)(dirbase)) = (((ULONG64)(pfn)) << PAGE_SHIFT)
  927. //
  928. // Define Global Descriptor Table (GDT) entry structure and constants.
  929. //
  930. // Define descriptor type codes.
  931. //
  932. #define TYPE_CODE 0x1A // 11010 = code, read only
  933. #define TYPE_DATA 0x12 // 10010 = data, read and write
  934. #define TYPE_TSS64 0x09 // 01001 = task state segment
  935. //
  936. // Define descriptor privilege levels for user and system.
  937. //
  938. #define DPL_USER 3
  939. #define DPL_SYSTEM 0
  940. //
  941. // Define limit granularity.
  942. //
  943. #define GRANULARITY_BYTE 0
  944. #define GRANULARITY_PAGE 1
  945. #define SELECTOR_TABLE_INDEX 0x04
  946. typedef union _KGDTENTRY64 {
  947. struct {
  948. USHORT LimitLow;
  949. USHORT BaseLow;
  950. union {
  951. struct {
  952. UCHAR BaseMiddle;
  953. UCHAR Flags1;
  954. UCHAR Flags2;
  955. UCHAR BaseHigh;
  956. } Bytes;
  957. struct {
  958. ULONG BaseMiddle : 8;
  959. ULONG Type : 5;
  960. ULONG Dpl : 2;
  961. ULONG Present : 1;
  962. ULONG LimitHigh : 4;
  963. ULONG System : 1;
  964. ULONG LongMode : 1;
  965. ULONG DefaultBig : 1;
  966. ULONG Granularity : 1;
  967. ULONG BaseHigh : 8;
  968. } Bits;
  969. };
  970. ULONG BaseUpper;
  971. ULONG MustBeZero;
  972. };
  973. ULONG64 Alignment;
  974. } KGDTENTRY64, *PKGDTENTRY64;
  975. //
  976. // Define Interrupt Descriptor Table (IDT) entry structure and constants.
  977. //
  978. typedef union _KIDTENTRY64 {
  979. struct {
  980. USHORT OffsetLow;
  981. USHORT Selector;
  982. USHORT IstIndex : 3;
  983. USHORT Reserved0 : 5;
  984. USHORT Type : 5;
  985. USHORT Dpl : 2;
  986. USHORT Present : 1;
  987. USHORT OffsetMiddle;
  988. ULONG OffsetHigh;
  989. ULONG Reserved1;
  990. };
  991. ULONG64 Alignment;
  992. } KIDTENTRY64, *PKIDTENTRY64;
  993. //
  994. // Define two union definitions used for parsing addresses into the
  995. // component fields required by a GDT.
  996. //
  997. typedef union _KGDT_BASE {
  998. struct {
  999. USHORT BaseLow;
  1000. UCHAR BaseMiddle;
  1001. UCHAR BaseHigh;
  1002. ULONG BaseUpper;
  1003. };
  1004. ULONG64 Base;
  1005. } KGDT_BASE, *PKGDT_BASE;
  1006. C_ASSERT(sizeof(KGDT_BASE) == sizeof(ULONG64));
  1007. typedef union _KGDT_LIMIT {
  1008. struct {
  1009. USHORT LimitLow;
  1010. USHORT LimitHigh : 4;
  1011. USHORT MustBeZero : 12;
  1012. };
  1013. ULONG Limit;
  1014. } KGDT_LIMIT, *PKGDT_LIMIT;
  1015. C_ASSERT(sizeof(KGDT_LIMIT) == sizeof(ULONG));
  1016. //
  1017. // Define Task State Segment (TSS) structure and constants.
  1018. //
  1019. // Task switches are not supported by the AMD64, but a task state segment
  1020. // must be present to define the kernel stack pointer and I/O map base.
  1021. //
  1022. // N.B. This structure is misaligned as per the AMD64 specification.
  1023. //
  1024. // N.B. The size of TSS must be <= 0xDFFF.
  1025. //
  1026. #define IOPM_SIZE 8192
  1027. typedef UCHAR KIO_ACCESS_MAP[IOPM_SIZE];
  1028. typedef KIO_ACCESS_MAP *PKIO_ACCESS_MAP;
  1029. #pragma pack(push, 4)
  1030. typedef struct _KTSS64 {
  1031. ULONG Reserved0;
  1032. ULONG64 Rsp0;
  1033. ULONG64 Rsp1;
  1034. ULONG64 Rsp2;
  1035. //
  1036. // Element 0 of the Ist is reserved
  1037. //
  1038. ULONG64 Ist[8];
  1039. ULONG64 Reserved1;
  1040. USHORT IoMapBase;
  1041. KIO_ACCESS_MAP IoMap;
  1042. ULONG IoMapEnd;
  1043. ULONG Reserved2;
  1044. } KTSS64, *PKTSS64;
  1045. #pragma pack(pop)
  1046. C_ASSERT((sizeof(KTSS64) % sizeof(PVOID)) == 0);
  1047. #define TSS_IST_RESERVED 0
  1048. #define TSS_IST_PANIC 1
  1049. #define TSS_IST_MCA 2
  1050. #define IO_ACCESS_MAP_NONE FALSE
  1051. #define KiComputeIopmOffset(Enable) \
  1052. ((Enable == FALSE) ? \
  1053. (USHORT)(sizeof(KTSS64)) : (USHORT)(FIELD_OFFSET(KTSS64, IoMap[0])))
  1054. // begin_windbgkd
  1055. #if defined(_AMD64_)
  1056. //
  1057. // Define pseudo descriptor structures for both 64- and 32-bit mode.
  1058. //
  1059. typedef struct _KDESCRIPTOR {
  1060. USHORT Pad[3];
  1061. USHORT Limit;
  1062. PVOID Base;
  1063. } KDESCRIPTOR, *PKDESCRIPTOR;
  1064. typedef struct _KDESCRIPTOR32 {
  1065. USHORT Pad[3];
  1066. USHORT Limit;
  1067. ULONG Base;
  1068. } KDESCRIPTOR32, *PKDESCRIPTOR32;
  1069. //
  1070. // Define special kernel registers and the initial MXCSR value.
  1071. //
  1072. typedef struct _KSPECIAL_REGISTERS {
  1073. ULONG64 Cr0;
  1074. ULONG64 Cr2;
  1075. ULONG64 Cr3;
  1076. ULONG64 Cr4;
  1077. ULONG64 KernelDr0;
  1078. ULONG64 KernelDr1;
  1079. ULONG64 KernelDr2;
  1080. ULONG64 KernelDr3;
  1081. ULONG64 KernelDr6;
  1082. ULONG64 KernelDr7;
  1083. KDESCRIPTOR Gdtr;
  1084. KDESCRIPTOR Idtr;
  1085. USHORT Tr;
  1086. USHORT Ldtr;
  1087. ULONG MxCsr;
  1088. } KSPECIAL_REGISTERS, *PKSPECIAL_REGISTERS;
  1089. //
  1090. // Define processor state structure.
  1091. //
  1092. typedef struct _KPROCESSOR_STATE {
  1093. KSPECIAL_REGISTERS SpecialRegisters;
  1094. CONTEXT ContextFrame;
  1095. } KPROCESSOR_STATE, *PKPROCESSOR_STATE;
  1096. #endif // _AMD64_
  1097. // end_windbgkd
  1098. //
  1099. // Processor Control Block (PRCB)
  1100. //
  1101. #define PRCB_MAJOR_VERSION 1
  1102. #define PRCB_MINOR_VERSION 1
  1103. #define PRCB_BUILD_DEBUG 0x1
  1104. #define PRCB_BUILD_UNIPROCESSOR 0x2
  1105. typedef struct _KPRCB {
  1106. //
  1107. // Start of the architecturally defined section of the PRCB. This section
  1108. // may be directly addressed by vendor/platform specific HAL code and will
  1109. // not change from version to version of NT.
  1110. //
  1111. USHORT MinorVersion;
  1112. USHORT MajorVersion;
  1113. CCHAR Number;
  1114. CCHAR Reserved;
  1115. USHORT BuildType;
  1116. struct _KTHREAD *CurrentThread;
  1117. struct _KTHREAD *NextThread;
  1118. struct _KTHREAD *IdleThread;
  1119. KAFFINITY SetMember;
  1120. KAFFINITY NotSetMember;
  1121. KPROCESSOR_STATE ProcessorState;
  1122. CCHAR CpuType;
  1123. CCHAR CpuID;
  1124. USHORT CpuStep;
  1125. ULONG KernelReserved[16];
  1126. ULONG HalReserved[16];
  1127. UCHAR PrcbPad0[88 + 112];
  1128. //
  1129. // End of the architecturally defined section of the PRCB.
  1130. //
  1131. // end_nthal end_ntosp
  1132. //
  1133. // Numbered queued spin locks - 128-byte aligned.
  1134. //
  1135. KSPIN_LOCK_QUEUE LockQueue[16];
  1136. UCHAR PrcbPad1[16];
  1137. //
  1138. // Nonpaged per processor lookaside lists - 128-byte aligned.
  1139. //
  1140. PP_LOOKASIDE_LIST PPLookasideList[16];
  1141. //
  1142. // Nonpaged per processor small pool lookaside lists - 128-byte aligned.
  1143. //
  1144. PP_LOOKASIDE_LIST PPNPagedLookasideList[POOL_SMALL_LISTS];
  1145. //
  1146. // Paged per processor small pool lookaside lists.
  1147. //
  1148. PP_LOOKASIDE_LIST PPPagedLookasideList[POOL_SMALL_LISTS];
  1149. //
  1150. // MP interprocessor request packet barrier - 128-byte aligned.
  1151. //
  1152. volatile ULONG PacketBarrier;
  1153. UCHAR PrcbPad2[124];
  1154. //
  1155. // MP interprocessor request packet and summary - 128-byte aligned.
  1156. //
  1157. volatile PVOID CurrentPacket[3];
  1158. volatile KAFFINITY TargetSet;
  1159. volatile PKIPI_WORKER WorkerRoutine;
  1160. volatile ULONG IpiFrozen;
  1161. UCHAR PrcbPad3[84];
  1162. //
  1163. // MP interprocessor request summary and packet address - 128-byte aligned.
  1164. //
  1165. // N.B. Request summary includes the request summary mask as well as the
  1166. // request packet. The address occupies the upper 48-bits and the mask
  1167. // the lower 16-bits
  1168. //
  1169. #define IPI_PACKET_SHIFT 16
  1170. volatile LONG64 RequestSummary;
  1171. UCHAR PrcbPad4[120];
  1172. //
  1173. // DPC listhead, counts, and batching parameters - 128-byte aligned.
  1174. //
  1175. LIST_ENTRY DpcListHead;
  1176. PVOID DpcStack;
  1177. PVOID SavedRsp;
  1178. ULONG DpcCount;
  1179. volatile ULONG DpcQueueDepth;
  1180. volatile LOGICAL DpcRoutineActive;
  1181. volatile LOGICAL DpcInterruptRequested;
  1182. ULONG DpcLastCount;
  1183. ULONG DpcRequestRate;
  1184. ULONG MaximumDpcQueueDepth;
  1185. ULONG MinimumDpcRate;
  1186. ULONG QuantumEnd;
  1187. UCHAR PrcbPad5[60];
  1188. //
  1189. // DPC list lock - 128-byte aligned.
  1190. //
  1191. KSPIN_LOCK DpcLock;
  1192. UCHAR PrcbPad6[120];
  1193. //
  1194. // Miscellaneous counters - 128-byte aligned.
  1195. //
  1196. ULONG InterruptCount;
  1197. ULONG KernelTime;
  1198. ULONG UserTime;
  1199. ULONG DpcTime;
  1200. ULONG InterruptTime;
  1201. ULONG AdjustDpcThreshold;
  1202. ULONG PageColor;
  1203. LOGICAL SkipTick;
  1204. ULONG TimerHand;
  1205. struct _KNODE * ParentNode;
  1206. KAFFINITY MultiThreadProcessorSet;
  1207. ULONG ThreadStartCount[2];
  1208. UCHAR PrcbPad7[64];
  1209. //
  1210. // Performacne counters - 128-byte aligned.
  1211. //
  1212. // Cache manager performance counters.
  1213. //
  1214. ULONG CcFastReadNoWait;
  1215. ULONG CcFastReadWait;
  1216. ULONG CcFastReadNotPossible;
  1217. ULONG CcCopyReadNoWait;
  1218. ULONG CcCopyReadWait;
  1219. ULONG CcCopyReadNoWaitMiss;
  1220. //
  1221. // Kernel performance counters.
  1222. //
  1223. ULONG KeAlignmentFixupCount;
  1224. ULONG KeContextSwitches;
  1225. ULONG KeDcacheFlushCount;
  1226. ULONG KeExceptionDispatchCount;
  1227. ULONG KeFirstLevelTbFills;
  1228. ULONG KeFloatingEmulationCount;
  1229. ULONG KeIcacheFlushCount;
  1230. ULONG KeSecondLevelTbFills;
  1231. ULONG KeSystemCalls;
  1232. ULONG SpareCounter0[1];
  1233. //
  1234. // I/O IRP float.
  1235. //
  1236. LONG LookasideIrpFloat;
  1237. //
  1238. // Processor information.
  1239. //
  1240. UCHAR VendorString[13];
  1241. UCHAR InitialApicId;
  1242. UCHAR LogicalProcessorsPerPhysicalProcessor;
  1243. ULONG MHz;
  1244. ULONG FeatureBits;
  1245. LARGE_INTEGER UpdateSignature;
  1246. //
  1247. // Processors power state
  1248. //
  1249. PROCESSOR_POWER_STATE PowerState;
  1250. // begin_nthal begin_ntosp
  1251. } KPRCB, *PKPRCB, *RESTRICTED_POINTER PRKPRCB;
  1252. // end_nthal end_ntosp
  1253. #if !defined(_X86AMD64_)
  1254. C_ASSERT(((FIELD_OFFSET(KPRCB, LockQueue) + 16) & (128 - 1)) == 0);
  1255. C_ASSERT((FIELD_OFFSET(KPRCB, PPLookasideList) & (128 - 1)) == 0);
  1256. C_ASSERT((FIELD_OFFSET(KPRCB, PPNPagedLookasideList) & (128 - 1)) == 0);
  1257. C_ASSERT((FIELD_OFFSET(KPRCB, PacketBarrier) & (128 - 1)) == 0);
  1258. C_ASSERT((FIELD_OFFSET(KPRCB, RequestSummary) & (128 - 1)) == 0);
  1259. C_ASSERT((FIELD_OFFSET(KPRCB, DpcListHead) & (128 - 1)) == 0);
  1260. C_ASSERT((FIELD_OFFSET(KPRCB, DpcLock) & (128 - 1)) == 0);
  1261. C_ASSERT((FIELD_OFFSET(KPRCB, InterruptCount) & (128 - 1)) == 0);
  1262. #endif
  1263. // begin_nthal begin_ntosp begin_ntddk
  1264. //
  1265. // Processor Control Region Structure Definition
  1266. //
  1267. #define PCR_MINOR_VERSION 1
  1268. #define PCR_MAJOR_VERSION 1
  1269. typedef struct _KPCR {
  1270. //
  1271. // Start of the architecturally defined section of the PCR. This section
  1272. // may be directly addressed by vendor/platform specific HAL code and will
  1273. // not change from version to version of NT.
  1274. //
  1275. NT_TIB NtTib;
  1276. struct _KPRCB *CurrentPrcb;
  1277. ULONG64 SavedRcx;
  1278. ULONG64 SavedR11;
  1279. KIRQL Irql;
  1280. UCHAR SecondLevelCacheAssociativity;
  1281. UCHAR Number;
  1282. UCHAR Fill0;
  1283. ULONG Irr;
  1284. ULONG IrrActive;
  1285. ULONG Idr;
  1286. USHORT MajorVersion;
  1287. USHORT MinorVersion;
  1288. ULONG StallScaleFactor;
  1289. union _KIDTENTRY64 *IdtBase;
  1290. union _KGDTENTRY64 *GdtBase;
  1291. struct _KTSS64 *TssBase;
  1292. // end_ntddk end_ntosp
  1293. ULONG KernelReserved[15];
  1294. ULONG SecondLevelCacheSize;
  1295. ULONG HalReserved[16];
  1296. ULONG MxCsr;
  1297. PVOID KdVersionBlock;
  1298. struct _KPCR *Self;
  1299. //
  1300. // End of the architecturally defined section of the PCR.
  1301. //
  1302. // end_nthal
  1303. //
  1304. ULONG PcrAlign1[24];
  1305. KPRCB Prcb;
  1306. // begin_nthal begin_ntddk begin_ntosp
  1307. } KPCR, *PKPCR;
  1308. // end_nthal end_ntddk end_ntosp
  1309. #if !defined (_X86AMD64_)
  1310. C_ASSERT((FIELD_OFFSET(KPCR, Prcb) & (128 - 1)) == 0);
  1311. //
  1312. // The offset of the DebuggerDataBlock must not change.
  1313. //
  1314. C_ASSERT(FIELD_OFFSET(KPCR, KdVersionBlock) == 0x108);
  1315. #endif
  1316. // begin_nthal begin_ntosp
  1317. //
  1318. // Define legacy floating status word bit masks.
  1319. //
  1320. #define FSW_INVALID_OPERATION 0x1
  1321. #define FSW_DENORMAL 0x2
  1322. #define FSW_ZERO_DIVIDE 0x4
  1323. #define FSW_OVERFLOW 0x8
  1324. #define FSW_UNDERFLOW 0x10
  1325. #define FSW_PRECISION 0x20
  1326. #define FSW_STACK_FAULT 0x40
  1327. #define FSW_CONDITION_CODE_0 0x100
  1328. #define FSW_CONDITION_CODE_1 0x200
  1329. #define FSW_CONDITION_CODE_2 0x400
  1330. #define FSW_CONDITION_CODE_3 0x4000
  1331. #define FSW_ERROR_MASK (FSW_INVALID_OPERATION | FSW_DENORMAL | \
  1332. FSW_ZERO_DIVIDE | FSW_OVERFLOW | FSW_UNDERFLOW | \
  1333. FSW_PRECISION | FSW_STACK_FAULT)
  1334. //
  1335. // Define MxCsr floating control/status word bit masks.
  1336. //
  1337. // No flush to zero, round to nearest, and all exception masked.
  1338. //
  1339. #define INITIAL_MXCSR 0x1f80 // initial MXCSR vlaue
  1340. #define XSW_INVALID_OPERATION 0x1
  1341. #define XSW_DENORMAL 0x2
  1342. #define XSW_ZERO_DIVIDE 0x4
  1343. #define XSW_OVERFLOW 0x8
  1344. #define XSW_UNDERFLOW 0x10
  1345. #define XSW_PRECISION 0x20
  1346. #define XSW_ERROR_MASK (XSW_INVALID_OPERATION | XSW_DENORMAL | \
  1347. XSW_ZERO_DIVIDE | XSW_OVERFLOW | XSW_UNDERFLOW | \
  1348. XSW_PRECISION)
  1349. #define XSW_ERROR_SHIFT 7
  1350. #define XCW_INVALID_OPERATION 0x80
  1351. #define XCW_DENORMAL 0x100
  1352. #define XCW_ZERO_DIVIDE 0x200
  1353. #define XCW_OVERFLOW 0x400
  1354. #define XCW_UNDERFLOW 0x800
  1355. #define XCW_PRECISION 0x1000
  1356. #define XCW_ROUND_CONTROL 0x6000
  1357. #define XCW_FLUSH_ZERO 0x8000
  1358. //
  1359. // Define EFLAG bit masks and shift offsets.
  1360. //
  1361. #define EFLAGS_CF_MASK 0x00000001 // carry flag
  1362. #define EFLAGS_PF_MASK 0x00000004 // parity flag
  1363. #define EFALGS_AF_MASK 0x00000010 // auxiliary carry flag
  1364. #define EFLAGS_ZF_MASK 0x00000040 // zero flag
  1365. #define EFLAGS_SF_MASK 0x00000080 // sign flag
  1366. #define EFLAGS_TF_MASK 0x00000100 // trap flag
  1367. #define EFLAGS_IF_MASK 0x00000200 // interrupt flag
  1368. #define EFLAGS_DF_MASK 0x00000400 // direction flag
  1369. #define EFLAGS_OF_MASK 0x00000800 // overflow flag
  1370. #define EFLAGS_IOPL_MASK 0x00003000 // I/O privilege level
  1371. #define EFLAGS_NT_MASK 0x00004000 // nested task
  1372. #define EFLAGS_RF_MASK 0x00010000 // resume flag
  1373. #define EFLAGS_VM_MASK 0x00020000 // virtual 8086 mode
  1374. #define EFLAGS_AC_MASK 0x00040000 // alignment check
  1375. #define EFLAGS_VIF_MASK 0x00080000 // virtual interrupt flag
  1376. #define EFLAGS_VIP_MASK 0x00100000 // virtual interrupt pending
  1377. #define EFLAGS_ID_MASK 0x00200000 // identification flag
  1378. #define EFLAGS_TF_SHIFT 8 // trap
  1379. #define EFLAGS_IF_SHIFT 9 // interrupt enable
  1380. // end_nthal
  1381. //
  1382. // Define sanitize EFLAGS macro.
  1383. //
  1384. // If kernel mode, then
  1385. // caller can specify Carry, Parity, AuxCarry, Zero, Sign, Trap,
  1386. // Interrupt, Direction, Overflow, Align Check, identification.
  1387. //
  1388. // If user mode, then
  1389. // caller can specify Carry, Parity, AuxCarry, Zero, Sign, Trap,
  1390. // Direction, Overflow, Align Check, and force Interrupt on.
  1391. //
  1392. #define EFLAGS_KERNEL_SANITIZE 0x00240fd5L
  1393. #define EFLAGS_USER_SANITIZE 0x00040dd5L
  1394. #define SANITIZE_EFLAGS(eFlags, mode) ( \
  1395. ((mode) == KernelMode ? \
  1396. ((eFlags) & EFLAGS_KERNEL_SANITIZE) : \
  1397. (((eFlags) & EFLAGS_USER_SANITIZE) | EFLAGS_IF_MASK)))
  1398. //
  1399. // Define sanitize debug register macros.
  1400. //
  1401. // Define control register settable bits and active mask.
  1402. //
  1403. #define DR7_LEGAL 0xffff0155
  1404. #define DR7_ACTIVE 0x00000055
  1405. //
  1406. // Define macro to sanitize the debug control register.
  1407. //
  1408. #define SANITIZE_DR7(Dr7, mode) ((Dr7 & DR7_LEGAL));
  1409. //
  1410. // Define macro to santitize debug address registers.
  1411. //
  1412. #define SANITIZE_DRADDR(DrReg, mode) \
  1413. ((mode) == KernelMode ? \
  1414. (DrReg) : \
  1415. (((PVOID)(DrReg) <= MM_HIGHEST_USER_ADDRESS) ? (DrReg) : 0)) \
  1416. //
  1417. // Define macro to clear reserved bits from MXCSR.
  1418. //
  1419. #define SANITIZE_MXCSR(_mxcsr_) ((_mxcsr_) & 0xffbf)
  1420. //
  1421. // Define macro to clear reserved bits for legacy FP control word.
  1422. //
  1423. #define SANITIZE_FCW(_fcw_) ((_fcw_) & 0x1f37)
  1424. // begin_nthal
  1425. //
  1426. // Exception frame
  1427. //
  1428. // This frame is established when handling an exception. It provides a place
  1429. // to save all nonvolatile registers. The volatile registers will already
  1430. // have been saved in a trap frame.
  1431. //
  1432. typedef struct _KEXCEPTION_FRAME {
  1433. //
  1434. // Home address for the parameter registers.
  1435. //
  1436. ULONG64 P1Home;
  1437. ULONG64 P2Home;
  1438. ULONG64 P3Home;
  1439. ULONG64 P4Home;
  1440. ULONG64 P5;
  1441. //
  1442. // Kernel callout initial stack value.
  1443. //
  1444. ULONG64 InitialStack;
  1445. //
  1446. // Saved nonvolatile floating registers.
  1447. //
  1448. M128 Xmm6;
  1449. M128 Xmm7;
  1450. M128 Xmm8;
  1451. M128 Xmm9;
  1452. M128 Xmm10;
  1453. M128 Xmm11;
  1454. M128 Xmm12;
  1455. M128 Xmm13;
  1456. M128 Xmm14;
  1457. M128 Xmm15;
  1458. //
  1459. // Kernel callout frame variables.
  1460. //
  1461. ULONG64 TrapFrame;
  1462. ULONG64 CallbackStack;
  1463. ULONG64 OutputBuffer;
  1464. ULONG64 OutputLength;
  1465. //
  1466. // Saved nonvolatile register - not always saved.
  1467. //
  1468. ULONG64 Fill1;
  1469. ULONG64 Rbp;
  1470. //
  1471. // Saved nonvolatile registers.
  1472. //
  1473. ULONG64 Rbx;
  1474. ULONG64 Rdi;
  1475. ULONG64 Rsi;
  1476. ULONG64 R12;
  1477. ULONG64 R13;
  1478. ULONG64 R14;
  1479. ULONG64 R15;
  1480. //
  1481. // EFLAGS and return address.
  1482. //
  1483. ULONG64 Return;
  1484. } KEXCEPTION_FRAME, *PKEXCEPTION_FRAME;
  1485. #define KEXCEPTION_FRAME_LENGTH sizeof(KEXCEPTION_FRAME)
  1486. C_ASSERT((sizeof(KEXCEPTION_FRAME) & STACK_ROUND) == 0);
  1487. #define EXCEPTION_RECORD_LENGTH \
  1488. ((sizeof(EXCEPTION_RECORD) + STACK_ROUND) & ~STACK_ROUND)
  1489. //
  1490. // Machine Frame
  1491. //
  1492. // This frame is established by code that trampolines to user mode (e.g. user
  1493. // APC, user callback, dispatch user exception, etc.). The purpose of this
  1494. // frame is to allow unwinding through these callbacks if an exception occurs.
  1495. //
  1496. // N.B. This frame is identical to the frame that is pushed for a trap without
  1497. // an error code and is identical to the hardware part of a trap frame.
  1498. //
  1499. typedef struct _MACHINE_FRAME {
  1500. ULONG64 Rip;
  1501. USHORT SegCs;
  1502. USHORT Fill1[3];
  1503. ULONG EFlags;
  1504. ULONG Fill2;
  1505. ULONG64 Rsp;
  1506. USHORT SegSs;
  1507. USHORT Fill3[3];
  1508. } MACHINE_FRAME, *PMACHINE_FRAME;
  1509. #define MACHINE_FRAME_LENGTH sizeof(MACHINE_FRAME)
  1510. C_ASSERT((sizeof(MACHINE_FRAME) & STACK_ROUND) == 8);
  1511. //
  1512. // Switch Frame
  1513. //
  1514. // This frame is established by the code that switches context from one
  1515. // thread to the next and is used by the thread initialization code to
  1516. // construct a stack that will start the execution of a thread in the
  1517. // thread start up code.
  1518. //
  1519. typedef struct _KSWITCH_FRAME {
  1520. ULONG64 Fill0;
  1521. ULONG MxCsr;
  1522. KIRQL ApcBypass;
  1523. BOOLEAN NpxSave;
  1524. UCHAR Fill1[2];
  1525. ULONG64 Rbp;
  1526. ULONG64 Return;
  1527. } KSWITCH_FRAME, *PKSWITCH_FRAME;
  1528. #define KSWITCH_FRAME_LENGTH sizeof(KSWITCH_FRAME)
  1529. C_ASSERT((sizeof(KSWITCH_FRAME) & STACK_ROUND) == 0);
  1530. //
  1531. // Trap frame
  1532. //
  1533. // This frame is established when handling a trap. It provides a place to
  1534. // save all volatile registers. The nonvolatile registers are saved in an
  1535. // exception frame or through the normal C calling conventions for saved
  1536. // registers.
  1537. //
  1538. typedef struct _KTRAP_FRAME {
  1539. //
  1540. // Home address for the parameter registers.
  1541. //
  1542. ULONG64 P1Home;
  1543. ULONG64 P2Home;
  1544. ULONG64 P3Home;
  1545. ULONG64 P4Home;
  1546. ULONG64 P5;
  1547. //
  1548. // Previous processor mode (system services only) and previous IRQL
  1549. // (interrupts only).
  1550. //
  1551. KPROCESSOR_MODE PreviousMode;
  1552. KIRQL PreviousIrql;
  1553. UCHAR Fill0[2];
  1554. //
  1555. // Floating point state.
  1556. //
  1557. ULONG MxCsr;
  1558. //
  1559. // Volatile registers.
  1560. //
  1561. // N.B. These registers are only saved on exceptions and interrupts. They
  1562. // are not saved for system calls.
  1563. //
  1564. ULONG64 Rax;
  1565. ULONG64 Rcx;
  1566. ULONG64 Rdx;
  1567. ULONG64 R8;
  1568. ULONG64 R9;
  1569. ULONG64 R10;
  1570. ULONG64 R11;
  1571. ULONG64 Spare0;
  1572. //
  1573. // Volatile floating registers.
  1574. //
  1575. // N.B. These registers are only saved on exceptions and interrupts. They
  1576. // are not saved for system calls.
  1577. //
  1578. M128 Xmm0;
  1579. M128 Xmm1;
  1580. M128 Xmm2;
  1581. M128 Xmm3;
  1582. M128 Xmm4;
  1583. M128 Xmm5;
  1584. //
  1585. // Debug registers.
  1586. //
  1587. ULONG64 Dr0;
  1588. ULONG64 Dr1;
  1589. ULONG64 Dr2;
  1590. ULONG64 Dr3;
  1591. ULONG64 Dr6;
  1592. ULONG64 Dr7;
  1593. //
  1594. // Segment registers
  1595. //
  1596. USHORT SegDs;
  1597. USHORT SegEs;
  1598. USHORT SegFs;
  1599. USHORT SegGs;
  1600. //
  1601. // Previous trap frame address.
  1602. //
  1603. ULONG64 TrapFrame;
  1604. //
  1605. // Exception record for exceptions.
  1606. //
  1607. UCHAR ExceptionRecord[(sizeof(EXCEPTION_RECORD) + 15) & (~15)];
  1608. //
  1609. // Saved nonvolatile registers RBX, RDI and RSI. These registers are only
  1610. // saved in system service trap frames.
  1611. //
  1612. ULONG64 Rbx;
  1613. ULONG64 Rdi;
  1614. ULONG64 Rsi;
  1615. //
  1616. // Saved nonvolatile register RBP. This register is used as a frame
  1617. // pointer during trap processing and is saved in all trap frames.
  1618. //
  1619. ULONG64 Rbp;
  1620. //
  1621. // Information pushed by hardware.
  1622. //
  1623. // N.B. The error code is not always pushed by hardware. For those cases
  1624. // where it is not pushed by hardware a dummy error code is allocated
  1625. // on the stack.
  1626. //
  1627. ULONG64 ErrorCode;
  1628. ULONG64 Rip;
  1629. USHORT SegCs;
  1630. USHORT Fill1[3];
  1631. ULONG EFlags;
  1632. ULONG Fill2;
  1633. ULONG64 Rsp;
  1634. USHORT SegSs;
  1635. USHORT Fill3[3];
  1636. } KTRAP_FRAME, *PKTRAP_FRAME;
  1637. #define KTRAP_FRAME_LENGTH sizeof(KTRAP_FRAME)
  1638. C_ASSERT((sizeof(KTRAP_FRAME) & STACK_ROUND) == 0);
  1639. //
  1640. // IPI, profile, update run time, and update system time interrupt routines.
  1641. //
  1642. NTKERNELAPI
  1643. VOID
  1644. KeIpiInterrupt (
  1645. IN PKTRAP_FRAME TrapFrame
  1646. );
  1647. NTKERNELAPI
  1648. VOID
  1649. KeProfileInterruptWithSource (
  1650. IN PKTRAP_FRAME TrapFrame,
  1651. IN KPROFILE_SOURCE ProfileSource
  1652. );
  1653. NTKERNELAPI
  1654. VOID
  1655. KeUpdateRunTime (
  1656. IN PKTRAP_FRAME TrapFrame
  1657. );
  1658. NTKERNELAPI
  1659. VOID
  1660. KeUpdateSystemTime (
  1661. IN PKTRAP_FRAME TrapFrame,
  1662. IN ULONG64 Increment
  1663. );
  1664. // end_nthal
  1665. //
  1666. // The frame saved by the call out to user mode code is defined here to allow
  1667. // the kernel debugger to trace the entire kernel stack when user mode callouts
  1668. // are active.
  1669. //
  1670. // N.B. The kernel callout frame is the same as an exception frame.
  1671. //
  1672. typedef KEXCEPTION_FRAME KCALLOUT_FRAME;
  1673. typedef PKEXCEPTION_FRAME PKCALLOUT_FRAME;
  1674. typedef struct _UCALLOUT_FRAME {
  1675. ULONG64 P1Home;
  1676. ULONG64 P2Home;
  1677. ULONG64 P3Home;
  1678. ULONG64 P4Home;
  1679. PVOID Buffer;
  1680. ULONG Length;
  1681. ULONG ApiNumber;
  1682. MACHINE_FRAME MachineFrame;
  1683. } UCALLOUT_FRAME, *PUCALLOUT_FRAME;
  1684. #define UCALLOUT_FRAME_LENGTH sizeof(UCALLOUT_FRAME)
  1685. C_ASSERT((sizeof(UCALLOUT_FRAME) & STACK_ROUND) == 8);
  1686. // begin_ntddk begin_wdm
  1687. //
  1688. // The nonvolatile floating state
  1689. //
  1690. typedef struct _KFLOATING_SAVE {
  1691. ULONG MxCsr;
  1692. } KFLOATING_SAVE, *PKFLOATING_SAVE;
  1693. // end_ntddk end_wdm end_ntosp
  1694. //
  1695. // Define profile values.
  1696. //
  1697. #define DEFAULT_PROFILE_INTERVAL 39063
  1698. //
  1699. // The minimum acceptable profiling interval is set to 1221 which is the
  1700. // fast RTC clock rate we can get. If this
  1701. // value is too small, the system will run very slowly.
  1702. //
  1703. #define MINIMUM_PROFILE_INTERVAL 1221
  1704. // begin_ntddk begin_wdm begin_nthal begin_ntndis begin_ntosp
  1705. //
  1706. // AMD64 Specific portions of mm component.
  1707. //
  1708. // Define the page size for the AMD64 as 4096 (0x1000).
  1709. //
  1710. #define PAGE_SIZE 0x1000
  1711. //
  1712. // Define the number of trailing zeroes in a page aligned virtual address.
  1713. // This is used as the shift count when shifting virtual addresses to
  1714. // virtual page numbers.
  1715. //
  1716. #define PAGE_SHIFT 12L
  1717. // end_ntndis end_wdm
  1718. #define PXE_BASE 0xFFFFF6FB7DBED000UI64
  1719. #define PXE_SELFMAP 0xFFFFF6FB7DBEDF68UI64
  1720. #define PPE_BASE 0xFFFFF6FB7DA00000UI64
  1721. #define PDE_BASE 0xFFFFF6FB40000000UI64
  1722. #define PTE_BASE 0xFFFFF68000000000UI64
  1723. #define PXE_TOP 0xFFFFF6FB7DBEDFFFUI64
  1724. #define PPE_TOP 0xFFFFF6FB7DBFFFFFUI64
  1725. #define PDE_TOP 0xFFFFF6FB7FFFFFFFUI64
  1726. #define PTE_TOP 0xFFFFF6FFFFFFFFFFUI64
  1727. #define PDE_KTBASE_AMD64 PPE_BASE
  1728. #define PTI_SHIFT 12
  1729. #define PDI_SHIFT 21
  1730. #define PPI_SHIFT 30
  1731. #define PXI_SHIFT 39
  1732. #define PTE_PER_PAGE 512
  1733. #define PDE_PER_PAGE 512
  1734. #define PPE_PER_PAGE 512
  1735. #define PXE_PER_PAGE 512
  1736. #define PTI_MASK_AMD64 (PTE_PER_PAGE - 1)
  1737. #define PDI_MASK_AMD64 (PDE_PER_PAGE - 1)
  1738. #define PPI_MASK (PPE_PER_PAGE - 1)
  1739. #define PXI_MASK (PXE_PER_PAGE - 1)
  1740. //
  1741. // Define the highest user address and user probe address.
  1742. //
  1743. // end_ntddk end_nthal end_ntosp
  1744. #if defined(_NTDRIVER_) || defined(_NTDDK_) || defined(_NTIFS_) || defined(_NTHAL_)
  1745. // begin_ntddk begin_nthal begin_ntosp
  1746. extern PVOID *MmHighestUserAddress;
  1747. extern PVOID *MmSystemRangeStart;
  1748. extern ULONG64 *MmUserProbeAddress;
  1749. #define MM_HIGHEST_USER_ADDRESS *MmHighestUserAddress
  1750. #define MM_SYSTEM_RANGE_START *MmSystemRangeStart
  1751. #define MM_USER_PROBE_ADDRESS *MmUserProbeAddress
  1752. // end_ntddk end_nthal end_ntosp
  1753. #else
  1754. extern PVOID MmHighestUserAddress;
  1755. extern PVOID MmSystemRangeStart;
  1756. extern ULONG64 MmUserProbeAddress;
  1757. #define MM_HIGHEST_USER_ADDRESS MmHighestUserAddress
  1758. #define MM_SYSTEM_RANGE_START MmSystemRangeStart
  1759. #define MM_USER_PROBE_ADDRESS MmUserProbeAddress
  1760. #define MI_HIGHEST_USER_ADDRESS (PVOID) (ULONG_PTR)((0x80000000000 - 0x10000 - 1)) // highest user address
  1761. #define MI_SYSTEM_RANGE_START (PVOID)(0xFFFF080000000000) // start of system space
  1762. #define MI_USER_PROBE_ADDRESS ((ULONG_PTR)(0x80000000000UI64 - 0x10000)) // starting address of guard page
  1763. #endif
  1764. // begin_nthal
  1765. //
  1766. // 4MB at the top of VA space is reserved for the HAL's use.
  1767. //
  1768. #define HAL_VA_START 0xFFFFFFFFFFC00000UI64
  1769. #define HAL_VA_SIZE (4 * 1024 * 1024)
  1770. // end_nthal
  1771. // begin_ntddk begin_nthal begin_ntosp
  1772. //
  1773. // The lowest user address reserves the low 64k.
  1774. //
  1775. #define MM_LOWEST_USER_ADDRESS (PVOID)0x10000
  1776. //
  1777. // The lowest address for system space.
  1778. //
  1779. #define MM_LOWEST_SYSTEM_ADDRESS (PVOID)0xFFFF080000000000
  1780. // begin_wdm
  1781. #define MmGetProcedureAddress(Address) (Address)
  1782. #define MmLockPagableCodeSection(Address) MmLockPagableDataSection(Address)
  1783. // end_ntddk end_wdm end_ntosp
  1784. //
  1785. // Define virtual base and alternate virtual base of kernel.
  1786. //
  1787. #define KSEG0_BASE 0xFFFFF80000000000UI64
  1788. //
  1789. // Generate kernel segment physical address.
  1790. //
  1791. #define KSEG_ADDRESS(PAGE) ((PVOID)(KSEG0_BASE | ((ULONG_PTR)(PAGE) << PAGE_SHIFT)))
  1792. // begin_ntddk begin_ntosp
  1793. #define KI_USER_SHARED_DATA 0xFFFFF78000000000UI64
  1794. #define SharedUserData ((KUSER_SHARED_DATA * const) KI_USER_SHARED_DATA)
  1795. //
  1796. // Intrinsic functions
  1797. //
  1798. // begin_wdm
  1799. #if defined(_M_AMD64) && !defined(RC_INVOKED) && !defined(MIDL_PASS)
  1800. // end_wdm
  1801. //
  1802. // The following routines are provided for backward compatibility with old
  1803. // code. They are no longer the preferred way to accomplish these functions.
  1804. //
  1805. #if PRAGMA_DEPRECATED_DDK
  1806. #pragma deprecated(ExInterlockedIncrementLong) // Use InterlockedIncrement
  1807. #pragma deprecated(ExInterlockedDecrementLong) // Use InterlockedDecrement
  1808. #pragma deprecated(ExInterlockedExchangeUlong) // Use InterlockedExchange
  1809. #endif
  1810. #define RESULT_ZERO 0
  1811. #define RESULT_NEGATIVE 1
  1812. #define RESULT_POSITIVE 2
  1813. typedef enum _INTERLOCKED_RESULT {
  1814. ResultNegative = RESULT_NEGATIVE,
  1815. ResultZero = RESULT_ZERO,
  1816. ResultPositive = RESULT_POSITIVE
  1817. } INTERLOCKED_RESULT;
  1818. #define ExInterlockedDecrementLong(Addend, Lock) \
  1819. _ExInterlockedDecrementLong(Addend)
  1820. __forceinline
  1821. LONG
  1822. _ExInterlockedDecrementLong (
  1823. IN OUT PLONG Addend
  1824. )
  1825. {
  1826. LONG Result;
  1827. Result = InterlockedDecrement(Addend);
  1828. if (Result < 0) {
  1829. return ResultNegative;
  1830. } else if (Result > 0) {
  1831. return ResultPositive;
  1832. } else {
  1833. return ResultZero;
  1834. }
  1835. }
  1836. #define ExInterlockedIncrementLong(Addend, Lock) \
  1837. _ExInterlockedIncrementLong(Addend)
  1838. __forceinline
  1839. LONG
  1840. _ExInterlockedIncrementLong (
  1841. IN OUT PLONG Addend
  1842. )
  1843. {
  1844. LONG Result;
  1845. Result = InterlockedIncrement(Addend);
  1846. if (Result < 0) {
  1847. return ResultNegative;
  1848. } else if (Result > 0) {
  1849. return ResultPositive;
  1850. } else {
  1851. return ResultZero;
  1852. }
  1853. }
  1854. #define ExInterlockedExchangeUlong(Target, Value, Lock) \
  1855. _ExInterlockedExchangeUlong(Target, Value)
  1856. __forceinline
  1857. _ExInterlockedExchangeUlong (
  1858. IN OUT PULONG Target,
  1859. IN ULONG Value
  1860. )
  1861. {
  1862. return (ULONG)InterlockedExchange((PLONG)Target, (LONG)Value);
  1863. }
  1864. // begin_wdm
  1865. #endif // defined(_M_AMD64) && !defined(RC_INVOKED) && !defined(MIDL_PASS)
  1866. // end_wdm end_ntddk end_nthal end_ntosp
  1867. // begin_ntosp begin_nthal begin_ntddk begin_wdm
  1868. #if !defined(MIDL_PASS) && defined(_M_AMD64)
  1869. //
  1870. // AMD646 function prototype definitions
  1871. //
  1872. // end_wdm
  1873. // end_ntddk end_ntosp
  1874. //
  1875. // Get address of current processor block.
  1876. //
  1877. __forceinline
  1878. PKPCR
  1879. KeGetPcr (
  1880. VOID
  1881. )
  1882. {
  1883. return (PKPCR)__readgsqword(FIELD_OFFSET(KPCR, Self));
  1884. }
  1885. // begin_ntosp
  1886. //
  1887. // Get address of current processor block.
  1888. //
  1889. __forceinline
  1890. PKPRCB
  1891. KeGetCurrentPrcb (
  1892. VOID
  1893. )
  1894. {
  1895. return (PKPRCB)__readgsqword(FIELD_OFFSET(KPCR, CurrentPrcb));
  1896. }
  1897. // begin_ntddk
  1898. //
  1899. // Get the current processor number
  1900. //
  1901. __forceinline
  1902. ULONG
  1903. KeGetCurrentProcessorNumber (
  1904. VOID
  1905. )
  1906. {
  1907. return (ULONG)__readgsbyte(FIELD_OFFSET(KPCR, Number));
  1908. }
  1909. // end_nthal end_ntddk end_ntosp
  1910. //
  1911. // Get address of current kernel thread object.
  1912. //
  1913. // WARNING: This inline macro can not be used for device drivers or HALs
  1914. // they must call the kernel function KeGetCurrentThread.
  1915. //
  1916. __forceinline
  1917. struct _KTHREAD *
  1918. KeGetCurrentThread (
  1919. VOID
  1920. )
  1921. {
  1922. return (struct _KTHREAD *)__readgsqword(FIELD_OFFSET(KPCR, Prcb.CurrentThread));
  1923. }
  1924. //
  1925. // If processor executing a DPC.
  1926. //
  1927. // WARNING: This inline macro is always MP enabled because filesystems
  1928. // utilize it
  1929. //
  1930. __forceinline
  1931. ULONG
  1932. KeIsExecutingDpc (
  1933. VOID
  1934. )
  1935. {
  1936. return (__readgsdword(FIELD_OFFSET(KPCR, Prcb.DpcRoutineActive)) != 0);
  1937. }
  1938. // begin_nthal begin_ntddk begin_ntosp
  1939. // begin_wdm
  1940. #endif // !defined(MIDL_PASS) && defined(_M_AMD64)
  1941. // end_nthal end_ntddk end_wdm end_ntosp
  1942. //++
  1943. //
  1944. //
  1945. // VOID
  1946. // KeMemoryBarrier (
  1947. // VOID
  1948. // )
  1949. //
  1950. //
  1951. // Routine Description:
  1952. //
  1953. // This function cases ordering of memory acceses as seen by other processors.
  1954. // Memory ordering isn't an issue on amd64.
  1955. //
  1956. //
  1957. // Arguments:
  1958. //
  1959. // None.
  1960. //
  1961. // Return Value:
  1962. //
  1963. // None.
  1964. //--
  1965. #define KeMemoryBarrier()
  1966. // begin_nthal
  1967. //
  1968. // Define inline functions to get and set the handler address in and IDT
  1969. // entry.
  1970. //
  1971. typedef union _KIDT_HANDLER_ADDRESS {
  1972. struct {
  1973. USHORT OffsetLow;
  1974. USHORT OffsetMiddle;
  1975. ULONG OffsetHigh;
  1976. };
  1977. ULONG64 Address;
  1978. } KIDT_HANDLER_ADDRESS, *PKIDT_HANDLER_ADDRESS;
  1979. #define KiGetIdtFromVector(Vector) \
  1980. &KeGetPcr()->IdtBase[HalVectorToIDTEntry(Vector)]
  1981. #define KeGetIdtHandlerAddress(Vector,Addr) { \
  1982. KIDT_HANDLER_ADDRESS Handler; \
  1983. PKIDTENTRY64 Idt; \
  1984. \
  1985. Idt = KiGetIdtFromVector(Vector); \
  1986. Handler.OffsetLow = Idt->OffsetLow; \
  1987. Handler.OffsetMiddle = Idt->OffsetMiddle; \
  1988. Handler.OffsetHigh = Idt->OffsetHigh; \
  1989. *(Addr) = (PVOID)(Handler.Address); \
  1990. }
  1991. #define KeSetIdtHandlerAddress(Vector,Addr) { \
  1992. KIDT_HANDLER_ADDRESS Handler; \
  1993. PKIDTENTRY64 Idt; \
  1994. \
  1995. Idt = KiGetIdtFromVector(Vector); \
  1996. Handler.Address = (ULONG64)(Addr); \
  1997. Idt->OffsetLow = Handler.OffsetLow; \
  1998. Idt->OffsetMiddle = Handler.OffsetMiddle; \
  1999. Idt->OffsetHigh = Handler.OffsetHigh; \
  2000. }
  2001. // end_nthal
  2002. //++
  2003. //
  2004. // BOOLEAN
  2005. // KiIsThreadNumericStateSaved(
  2006. // IN PKTHREAD Address
  2007. // )
  2008. //
  2009. //--
  2010. #define KiIsThreadNumericStateSaved(a) TRUE
  2011. //++
  2012. //
  2013. // VOID
  2014. // KiRundownThread(
  2015. // IN PKTHREAD Address
  2016. // )
  2017. //
  2018. //--
  2019. #define KiRundownThread(a)
  2020. //
  2021. // functions specific to structure
  2022. //
  2023. VOID
  2024. KiSetIRR (
  2025. IN ULONG SWInterruptMask
  2026. );
  2027. // begin_ntddk begin_wdm begin_ntosp
  2028. NTKERNELAPI
  2029. NTSTATUS
  2030. KeSaveFloatingPointState (
  2031. OUT PKFLOATING_SAVE SaveArea
  2032. );
  2033. NTKERNELAPI
  2034. NTSTATUS
  2035. KeRestoreFloatingPointState (
  2036. IN PKFLOATING_SAVE SaveArea
  2037. );
  2038. // end_ntddk end_wdm end_ntosp
  2039. // begin_nthal begin_ntddk begin_wdm begin_ntndis begin_ntosp
  2040. #endif // defined(_AMD64_)
  2041. // end_nthal end_ntddk end_wdm end_ntndis end_ntosp
  2042. //
  2043. // Architecture specific kernel functions.
  2044. //
  2045. // begin_ntosp
  2046. #ifdef _AMD64_
  2047. VOID
  2048. KeSetIoAccessMap (
  2049. PKIO_ACCESS_MAP IoAccessMap
  2050. );
  2051. VOID
  2052. KeQueryIoAccessMap (
  2053. PKIO_ACCESS_MAP IoAccessMap
  2054. );
  2055. VOID
  2056. KeSetIoAccessProcess (
  2057. struct _KPROCESS *Process,
  2058. BOOLEAN Enable
  2059. );
  2060. VOID
  2061. KiEditIopmDpc (
  2062. IN struct _KDPC *Dpc,
  2063. IN PVOID DeferredContext,
  2064. IN PVOID SystemArgument1,
  2065. IN PVOID SystemArgument2
  2066. );
  2067. #endif //_AMD64_
  2068. //
  2069. // Platform specific kernel fucntions to raise and lower IRQL.
  2070. //
  2071. // These functions are imported for ntddk, ntifs, and wdm. They are
  2072. // inlined for nthal, ntosp, and the system.
  2073. //
  2074. #if defined(_NTDRIVER_) || defined(_NTDDK_) || defined(_NTIFS_) || defined(_WDMDDK_)
  2075. // begin_ntddk begin_wdm
  2076. #if defined(_AMD64_)
  2077. NTKERNELAPI
  2078. KIRQL
  2079. KeGetCurrentIrql (
  2080. VOID
  2081. );
  2082. NTKERNELAPI
  2083. VOID
  2084. KeLowerIrql (
  2085. IN KIRQL NewIrql
  2086. );
  2087. #define KeRaiseIrql(a,b) *(b) = KfRaiseIrql(a)
  2088. NTKERNELAPI
  2089. KIRQL
  2090. KfRaiseIrql (
  2091. IN KIRQL NewIrql
  2092. );
  2093. // end_wdm
  2094. NTKERNELAPI
  2095. KIRQL
  2096. KeRaiseIrqlToDpcLevel (
  2097. VOID
  2098. );
  2099. NTKERNELAPI
  2100. KIRQL
  2101. KeRaiseIrqlToSynchLevel (
  2102. VOID
  2103. );
  2104. // begin_wdm
  2105. #endif // defined(_AMD64_)
  2106. // end_ntddk end_wdm
  2107. #else
  2108. // begin_nthal
  2109. #if defined(_AMD64_) && !defined(MIDL_PASS)
  2110. __forceinline
  2111. KIRQL
  2112. KeGetCurrentIrql (
  2113. VOID
  2114. )
  2115. /*++
  2116. Routine Description:
  2117. This function return the current IRQL.
  2118. Arguments:
  2119. None.
  2120. Return Value:
  2121. The current IRQL is returned as the function value.
  2122. --*/
  2123. {
  2124. return (KIRQL)ReadCR8();
  2125. }
  2126. __forceinline
  2127. VOID
  2128. KeLowerIrql (
  2129. IN KIRQL NewIrql
  2130. )
  2131. /*++
  2132. Routine Description:
  2133. This function lowers the IRQL to the specified value.
  2134. Arguments:
  2135. NewIrql - Supplies the new IRQL value.
  2136. Return Value:
  2137. None.
  2138. --*/
  2139. {
  2140. ASSERT(KeGetCurrentIrql() >= NewIrql);
  2141. WriteCR8(NewIrql);
  2142. return;
  2143. }
  2144. #define KeRaiseIrql(a,b) *(b) = KfRaiseIrql(a)
  2145. __forceinline
  2146. KIRQL
  2147. KfRaiseIrql (
  2148. IN KIRQL NewIrql
  2149. )
  2150. /*++
  2151. Routine Description:
  2152. This function raises the current IRQL to the specified value and returns
  2153. the previous IRQL.
  2154. Arguments:
  2155. NewIrql (cl) - Supplies the new IRQL value.
  2156. Return Value:
  2157. The previous IRQL is retured as the function value.
  2158. --*/
  2159. {
  2160. KIRQL OldIrql;
  2161. OldIrql = KeGetCurrentIrql();
  2162. ASSERT(OldIrql <= NewIrql);
  2163. WriteCR8(NewIrql);
  2164. return OldIrql;
  2165. }
  2166. __forceinline
  2167. KIRQL
  2168. KeRaiseIrqlToDpcLevel (
  2169. VOID
  2170. )
  2171. /*++
  2172. Routine Description:
  2173. This function raises the current IRQL to DPC_LEVEL and returns the
  2174. previous IRQL.
  2175. Arguments:
  2176. None.
  2177. Return Value:
  2178. The previous IRQL is retured as the function value.
  2179. --*/
  2180. {
  2181. KIRQL OldIrql;
  2182. OldIrql = KeGetCurrentIrql();
  2183. ASSERT(OldIrql <= DISPATCH_LEVEL);
  2184. WriteCR8(DISPATCH_LEVEL);
  2185. return OldIrql;
  2186. }
  2187. __forceinline
  2188. KIRQL
  2189. KeRaiseIrqlToSynchLevel (
  2190. VOID
  2191. )
  2192. /*++
  2193. Routine Description:
  2194. This function raises the current IRQL to SYNCH_LEVEL and returns the
  2195. previous IRQL.
  2196. Arguments:
  2197. Return Value:
  2198. The previous IRQL is retured as the function value.
  2199. --*/
  2200. {
  2201. KIRQL OldIrql;
  2202. OldIrql = KeGetCurrentIrql();
  2203. ASSERT(OldIrql <= SYNCH_LEVEL);
  2204. WriteCR8(SYNCH_LEVEL);
  2205. return OldIrql;
  2206. }
  2207. #endif // defined(_AMD64_) && !defined(MIDL_PASS)
  2208. // end_nthal
  2209. #endif // defined(_NTDRIVER_) || defined(_NTDDK_) || defined(_NTIFS_) || defined(_WDMDDK_)
  2210. // end_ntosp
  2211. //
  2212. // misc routines
  2213. //
  2214. VOID
  2215. KeOptimizeProcessorControlState (
  2216. VOID
  2217. );
  2218. // begin_nthal
  2219. #if defined(_AMD64_)
  2220. //
  2221. // Structure to aid in booting secondary processors
  2222. //
  2223. #pragma pack(push,1)
  2224. typedef struct _FAR_JMP_16 {
  2225. UCHAR OpCode; // = 0xe9
  2226. USHORT Offset;
  2227. } FAR_JMP_16;
  2228. typedef struct _FAR_TARGET_32 {
  2229. USHORT Selector;
  2230. ULONG Offset;
  2231. } FAR_TARGET_32;
  2232. typedef struct _FAR_TARGET_64 {
  2233. USHORT Selector;
  2234. ULONG64 Offset;
  2235. } FAR_TARGET_64;
  2236. typedef struct _PSEUDO_DESCRIPTOR_32 {
  2237. USHORT Limit;
  2238. ULONG Base;
  2239. } PSEUDO_DESCRIPTOR_32;
  2240. #pragma pack(pop)
  2241. #define PSB_GDT32_NULL 0 * 16
  2242. #define PSB_GDT32_CODE64 1 * 16
  2243. #define PSB_GDT32_DATA32 2 * 16
  2244. #define PSB_GDT32_CODE32 3 * 16
  2245. #define PSB_GDT32_MAX 3
  2246. typedef struct _PROCESSOR_START_BLOCK *PPROCESSOR_START_BLOCK;
  2247. typedef struct _PROCESSOR_START_BLOCK {
  2248. //
  2249. // The block starts with a jmp instruction to the end of the block
  2250. //
  2251. FAR_JMP_16 Jmp;
  2252. //
  2253. // Completion flag is set to non-zero when the target processor has
  2254. // started
  2255. //
  2256. ULONG CompletionFlag;
  2257. //
  2258. // Pseudo descriptors for GDT and IDT.
  2259. //
  2260. PSEUDO_DESCRIPTOR_32 Gdt32;
  2261. PSEUDO_DESCRIPTOR_32 Idt32;
  2262. //
  2263. // The temporary 32-bit GDT itself resides here.
  2264. //
  2265. KGDTENTRY64 Gdt[PSB_GDT32_MAX + 1];
  2266. //
  2267. // Physical address of the 64-bit top-level identity-mapped page table.
  2268. //
  2269. ULONG64 TiledCr3;
  2270. //
  2271. // Far jump target from Rm to Pm code
  2272. //
  2273. FAR_TARGET_32 PmTarget;
  2274. //
  2275. // Far jump target from Pm to Lm code
  2276. //
  2277. FAR_TARGET_64 LmTarget;
  2278. //
  2279. // Linear address of this structure
  2280. //
  2281. PPROCESSOR_START_BLOCK SelfMap;
  2282. //
  2283. // Initial processor state for the processor to be started
  2284. //
  2285. KPROCESSOR_STATE ProcessorState;
  2286. } PROCESSOR_START_BLOCK;
  2287. //
  2288. // AMD64 functions for special instructions
  2289. //
  2290. typedef struct _CPU_INFO {
  2291. ULONG Eax;
  2292. ULONG Ebx;
  2293. ULONG Ecx;
  2294. ULONG Edx;
  2295. } CPU_INFO, *PCPU_INFO;
  2296. VOID
  2297. KiCpuId (
  2298. ULONG Function,
  2299. PCPU_INFO CpuInfo
  2300. );
  2301. //
  2302. // Define read/write MSR fucntions and register definitions.
  2303. //
  2304. #define MSR_TSC 0x10 // time stamp counter
  2305. #define MSR_PAT 0x277 // page attributes table
  2306. #define MSR_EFER 0xc0000080 // extended function enable register
  2307. #define MSR_STAR 0xc0000081 // system call selectors
  2308. #define MSR_LSTAR 0xc0000082 // system call 64-bit entry
  2309. #define MSR_CSTAR 0xc0000083 // system call 32-bit entry
  2310. #define MSR_SYSCALL_MASK 0xc0000084 // system call flags mask
  2311. #define MSR_FS_BASE 0xc0000100 // fs long mode base address register
  2312. #define MSR_GS_BASE 0xc0000101 // gs long mode base address register
  2313. #define MSR_GS_SWAP 0xc0000102 // gs long mode swap GS base register
  2314. //
  2315. // Flags within MSR_EFER
  2316. //
  2317. #define MSR_SCE 0x00000001 // system call enable
  2318. #define MSR_LME 0x00000100 // long mode enable
  2319. #define MSR_LMA 0x00000400 // long mode active
  2320. //
  2321. // Page attributes table.
  2322. //
  2323. #define PAT_TYPE_STRONG_UC 0 // uncacheable/strongly ordered
  2324. #define PAT_TYPE_USWC 1 // write combining/weakly ordered
  2325. #define PAT_TYPE_WT 4 // write through
  2326. #define PAT_TYPE_WP 5 // write protected
  2327. #define PAT_TYPE_WB 6 // write back
  2328. #define PAT_TYPE_WEAK_UC 7 // uncacheable/weakly ordered
  2329. //
  2330. // Page attributes table structure.
  2331. //
  2332. typedef union _PAT_ATTRIBUTES {
  2333. struct {
  2334. UCHAR Pat[8];
  2335. } hw;
  2336. ULONG64 QuadPart;
  2337. } PAT_ATTRIBUTES, *PPAT_ATTRIBUTES;
  2338. #define ReadMSR(Msr) __readmsr(Msr)
  2339. ULONG64
  2340. __readmsr (
  2341. IN ULONG Msr
  2342. );
  2343. #define WriteMSR(Msr, Data) __writemsr(Msr, Data)
  2344. VOID
  2345. __writemsr (
  2346. IN ULONG Msr,
  2347. IN ULONG64 Value
  2348. );
  2349. #define InvalidatePage(Page) __invlpg(Page)
  2350. VOID
  2351. __invlpg (
  2352. IN PVOID Page
  2353. );
  2354. #define WritebackInvalidate() __wbinvd()
  2355. VOID
  2356. __wbinvd (
  2357. VOID
  2358. );
  2359. #pragma intrinsic(__readmsr)
  2360. #pragma intrinsic(__writemsr)
  2361. #pragma intrinsic(__invlpg)
  2362. #pragma intrinsic(__wbinvd)
  2363. #endif // _AMD64_
  2364. // end_nthal
  2365. //
  2366. // Define software feature bit definitions.
  2367. //
  2368. #define KF_V86_VIS 0x00000001
  2369. #define KF_RDTSC 0x00000002
  2370. #define KF_CR4 0x00000004
  2371. #define KF_CMOV 0x00000008
  2372. #define KF_GLOBAL_PAGE 0x00000010
  2373. #define KF_LARGE_PAGE 0x00000020
  2374. #define KF_MTRR 0x00000040
  2375. #define KF_CMPXCHG8B 0x00000080
  2376. #define KF_MMX 0x00000100
  2377. #define KF_WORKING_PTE 0x00000200
  2378. #define KF_PAT 0x00000400
  2379. #define KF_FXSR 0x00000800
  2380. #define KF_FAST_SYSCALL 0x00001000
  2381. #define KF_XMMI 0x00002000
  2382. #define KF_3DNOW 0x00004000
  2383. #define KF_AMDK6MTRR 0x00008000
  2384. #define KF_XMMI64 0x00010000
  2385. #define KF_DTS 0x00020000
  2386. #define KF_SMT 0x00040000
  2387. //
  2388. // Define required software feature bits.
  2389. //
  2390. #define KF_REQUIRED (KF_RDTSC | KF_CR4 | KF_CMOV | KF_GLOBAL_PAGE | \
  2391. KF_LARGE_PAGE | KF_CMPXCHG8B | KF_MMX | KF_WORKING_PTE | \
  2392. KF_PAT | KF_FXSR | KF_FAST_SYSCALL | KF_XMMI | KF_XMMI64)
  2393. //
  2394. // Define hardware feature bits definitions.
  2395. //
  2396. #define HF_FPU 0x00000001 // FPU is on chip
  2397. #define HF_VME 0x00000002 // virtual 8086 mode enhancement
  2398. #define HF_DE 0x00000004 // debugging extension
  2399. #define HF_PSE 0x00000008 // page size extension
  2400. #define HF_TSC 0x00000010 // time stamp counter
  2401. #define HF_MSR 0x00000020 // rdmsr and wrmsr support
  2402. #define HF_PAE 0x00000040 // physical address extension
  2403. #define HF_MCE 0x00000080 // machine check exception
  2404. #define HF_CXS 0x00000100 // cmpxchg8b instruction supported
  2405. #define HF_APIC 0x00000200 // APIC on chip
  2406. #define HF_UNUSED0 0x00000400 // unused bit
  2407. #define HF_SYSCALL 0x00000800 // fast system call
  2408. #define HF_MTRR 0x00001000 // memory type range registers
  2409. #define HF_PGE 0x00002000 // global page TB support
  2410. #define HF_MCA 0x00004000 // machine check architecture
  2411. #define HF_CMOV 0x00008000 // cmov instruction supported
  2412. #define HF_PAT 0x00010000 // physical attributes table
  2413. #define HF_UNUSED1 0x00020000 // unused bit
  2414. #define HF_UNUSED2 0x00040000 // unused bit
  2415. #define HF_UNUSED3 0x00080000 // unused bit
  2416. #define HF_UNUSED4 0x00100000 // unused bit
  2417. #define HF_UNUSED5 0x00200000 // unused bit
  2418. #define HF_UNUSED6 0x00400000 // unused bit
  2419. #define HF_MMX 0x00800000 // MMX technology supported
  2420. #define HF_FXSR 0x01000000 // fxsr instruction supported
  2421. #define HF_XMMI 0x02000000 // xmm (SSE) registers supported
  2422. #define HF_XMMI64 0x04000000 // xmm (SSE2) registers supported
  2423. //
  2424. // Define required hardware feature bits.
  2425. //
  2426. #define HF_REQUIRED (HF_FPU | HF_DE | HF_PSE | HF_TSC | HF_MSR | \
  2427. HF_PAE | HF_MCE | HF_CXS | HF_APIC | HF_SYSCALL | \
  2428. HF_PGE | HF_MCA | HF_CMOV | HF_PAT | HF_MMX | \
  2429. HF_FXSR | HF_XMMI | HF_XMMI64)
  2430. //
  2431. // Define extended hardware feature bit definitions.
  2432. //
  2433. #define XHF_3DNOW 0x80000000 // 3DNOW supported
  2434. #endif // __amd64_