Counter Strike : Global Offensive Source Code
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2040 lines
43 KiB

  1. //===== Copyright � 1996-2005, Valve Corporation, All rights reserved. ======//
  2. //
  3. // Purpose:
  4. //
  5. // $NoKeywords: $
  6. //
  7. //===========================================================================//
  8. #ifndef K8PERFORMANCECOUNTERS_H
  9. #define K8PERFORMANCECOUNTERS_H
  10. /*
  11. * AMD K8 events.
  12. *
  13. */
  14. #ifdef COMPILER_MSVC64
  15. extern "C"
  16. {
  17. unsigned __int64 __readpmc(unsigned long);
  18. }
  19. #pragma intrinsic(__readpmc)
  20. #endif
  21. typedef union EVENT_MASK(NULL_MASK)
  22. {
  23. // no tests defined
  24. uint16 flat;
  25. } EVENT_MASK(NULL_MASK);
  26. #define MSR_K8_EVNTSEL0 0xC0010000 /* .. 0xC0010003 */
  27. #define MSR_K8_PERFCTR0 0xC0010004 /* .. 0xC0010007 */
  28. # pragma pack(push, 1)
  29. // access to these bits is through the methods
  30. typedef union PerfEvtSel
  31. {
  32. struct
  33. {
  34. uint64 EventMask : 8;
  35. uint64 UnitMask : 8;
  36. uint64 USR : 1;
  37. uint64 OS : 1;
  38. uint64 Edge : 1;
  39. uint64 PC : 1;
  40. uint64 INTAPIC : 1;
  41. uint64 Reserved21 : 1;
  42. uint64 Enable : 1;
  43. uint64 Complement : 1; // aka INV
  44. uint64 Threshold : 8; // aka CounterMask
  45. uint64 Reserver32 : 32;
  46. };
  47. uint64 flat;
  48. } PerfEvtSel;
  49. enum UnitEncode
  50. {
  51. FP,
  52. LS,
  53. DC,
  54. BU,
  55. IC,
  56. UE_Unknown,
  57. FR,
  58. NB
  59. };
  60. # pragma pack(pop)
  61. // Turn off the no return value warning in ReadCounter.
  62. #pragma warning( disable : 4035 )
  63. #define k8NUM_COUNTERS 4
  64. class k8BaseEvent
  65. {
  66. public:
  67. PME * pme;
  68. PerfEvtSel eventSelect[k8NUM_COUNTERS];
  69. unsigned short m_eventMask;
  70. int event_id;
  71. tchar * name;
  72. tchar revRequired;
  73. int eventSelectNum;
  74. UnitEncode unitEncode;
  75. void SetCounter(int n)
  76. {
  77. if (n < 0)
  78. n = 0;
  79. else if (n > 3)
  80. n = 3;
  81. eventSelectNum = n;
  82. }
  83. k8BaseEvent()
  84. {
  85. pme = PME::Instance();
  86. for(int i = 0; i< k8NUM_COUNTERS; i++)
  87. {
  88. eventSelect[i].flat = 0;
  89. }
  90. eventSelectNum = 0;
  91. m_eventMask = 0;
  92. event_id = 0;
  93. name = 0;
  94. revRequired = 'A';
  95. }
  96. void SetCaptureMode(PrivilegeCapture priv)
  97. {
  98. PerfEvtSel & select = eventSelect[eventSelectNum];
  99. StopCounter();
  100. switch (priv)
  101. {
  102. case OS_Only:
  103. select.USR = 0;
  104. select.OS = 1;
  105. break;
  106. case USR_Only:
  107. select.USR = 1;
  108. select.OS = 0;
  109. break;
  110. case OS_and_USR:
  111. select.USR = 1;
  112. select.OS = 1;
  113. break;
  114. }
  115. select.UnitMask = m_eventMask;
  116. select.EventMask = event_id;
  117. int selectPort = MSR_K8_EVNTSEL0 + eventSelectNum;
  118. pme->WriteMSR(selectPort, select.flat);
  119. }
  120. void SetFiltering(CompareState compareEnable,
  121. CompareMethod compareMethod,
  122. uint8 threshold,
  123. EdgeState edgeEnable)
  124. {
  125. PerfEvtSel & select = eventSelect[eventSelectNum];
  126. StopCounter();
  127. if (compareEnable == CompareDisable)
  128. select.Threshold = 0;
  129. else
  130. select.Threshold = threshold;
  131. select.Complement = compareMethod;
  132. select.Edge = edgeEnable;
  133. int selectPort = MSR_K8_EVNTSEL0 + eventSelectNum;
  134. pme->WriteMSR(selectPort, select.flat);
  135. }
  136. void StartCounter()
  137. {
  138. PerfEvtSel & select = eventSelect[eventSelectNum];
  139. select.Enable = 1;
  140. int selectPort = MSR_K8_EVNTSEL0 + eventSelectNum;
  141. pme->WriteMSR(selectPort, select.flat);
  142. }
  143. void StopCounter()
  144. {
  145. PerfEvtSel & select = eventSelect[eventSelectNum];
  146. select.Enable = 0;
  147. int selectPort = MSR_K8_EVNTSEL0 + eventSelectNum;
  148. pme->WriteMSR(selectPort, select.flat);
  149. }
  150. void ClearCounter()
  151. {
  152. PerfEvtSel & select = eventSelect[eventSelectNum];
  153. int counterPort = MSR_K8_PERFCTR0 + eventSelectNum;
  154. pme->WriteMSR(counterPort, 0ui64 ); // clear
  155. }
  156. void WriteCounter(int64 value)
  157. {
  158. PerfEvtSel & select = eventSelect[eventSelectNum];
  159. int counterPort = MSR_K8_PERFCTR0 + eventSelectNum;
  160. pme->WriteMSR(counterPort, value); // clear
  161. }
  162. int64 ReadCounter()
  163. {
  164. #if PME_DEBUG
  165. PerfEvtSel & select = eventSelect[eventSelectNum];
  166. if (select.USR == 0 && select.OS == 0)
  167. return -1; // no area to collect, use SetCaptureMode
  168. if (select.EventMask == 0)
  169. return -2; // no event mask set
  170. if (eventSelectNum < 0 || eventSelectNum > 3)
  171. return -3; // counter not legal
  172. // check revision
  173. #endif
  174. // ReadMSR should work here too, but RDPMC should be faster
  175. //ReadMSR(counterPort, int64);
  176. // we need to copy this into a temp for some reason
  177. #ifdef COMPILER_MSVC64
  178. return __readpmc((unsigned long) eventSelectNum);
  179. #else
  180. int temp = eventSelectNum;
  181. _asm
  182. {
  183. mov ecx, temp
  184. RDPMC
  185. }
  186. #endif
  187. }
  188. };
  189. #pragma warning( default : 4035 )
  190. typedef union EVENT_MASK(k8_dispatched_fpu_ops)
  191. {
  192. // event 0
  193. struct
  194. {
  195. uint16 AddPipeOps:1; // Add pipe ops excluding junk ops" },
  196. uint16 MulPipeOps:1; // Multiply pipe ops excluding junk ops" },,
  197. uint16 StoreOps:1; // Store pipe ops excluding junk ops" },
  198. uint16 AndPipeOpsJunk:1; // Add pipe junk ops" },,
  199. uint16 MulPipeOpsJunk:1; // Multiply pipe junk ops" },
  200. uint16 StoreOpsJunk:1; // Store pipe junk ops" } }
  201. };
  202. uint16 flat;
  203. } EVENT_MASK(k8_dispatched_fpu_ops);
  204. class k8Event_DISPATCHED_FPU_OPS : public k8BaseEvent
  205. {
  206. public:
  207. k8Event_DISPATCHED_FPU_OPS()
  208. {
  209. eventMask = (EVENT_MASK(k8_dispatched_fpu_ops) *)&m_eventMask;
  210. event_id = 0x00;
  211. unitEncode = FP;
  212. name = _T("Dispatched FPU ops");
  213. revRequired = 'B';
  214. }
  215. EVENT_MASK(k8_dispatched_fpu_ops) * eventMask;
  216. };
  217. //////////////////////////////////////////////////////////
  218. class k8Event_NO_FPU_OPS : public k8BaseEvent
  219. {
  220. public:
  221. k8Event_NO_FPU_OPS()
  222. {
  223. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  224. event_id = 0x01;
  225. unitEncode = FP;
  226. name = _T("Cycles with no FPU ops retired");
  227. revRequired = 'B';
  228. }
  229. EVENT_MASK(NULL_MASK) * eventMask;
  230. };
  231. //////////////////////////////////////////////////////////
  232. class k8Event_FAST_FPU_OPS : public k8BaseEvent
  233. {
  234. public:
  235. k8Event_FAST_FPU_OPS()
  236. {
  237. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  238. event_id = 0x02;
  239. unitEncode = FP;
  240. name = _T("Dispatched FPU ops that use the fast flag interface");
  241. revRequired = 'B';
  242. }
  243. EVENT_MASK(NULL_MASK) * eventMask;
  244. };
  245. //////////////////////////////////////////////////////////
  246. typedef union EVENT_MASK(k8_segment_register_load)
  247. {
  248. struct
  249. {
  250. uint16 ES:1;
  251. uint16 CS:1;
  252. uint16 SS:1;
  253. uint16 DS:1;
  254. uint16 FS:1;
  255. uint16 GS:1;
  256. uint16 HS:1;
  257. };
  258. uint16 flat;
  259. } EVENT_MASK(k8_segment_register_load);
  260. class k8Event_SEG_REG_LOAD : public k8BaseEvent
  261. {
  262. public:
  263. k8Event_SEG_REG_LOAD()
  264. {
  265. eventMask = (EVENT_MASK(k8_segment_register_load) *)&m_eventMask;
  266. name = _T("Segment register load");
  267. event_id = 0x20;
  268. unitEncode = LS;
  269. }
  270. EVENT_MASK(k8_segment_register_load) * eventMask;
  271. };
  272. class k8Event_SELF_MODIFY_RESYNC : public k8BaseEvent
  273. {
  274. public:
  275. k8Event_SELF_MODIFY_RESYNC()
  276. {
  277. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  278. name = _T("Microarchitectural resync caused by self modifying code");
  279. event_id = 0x21;
  280. unitEncode = LS;
  281. }
  282. EVENT_MASK(NULL_MASK) * eventMask;
  283. };
  284. class k8Event_LS_RESYNC_BY_SNOOP : public k8BaseEvent
  285. {
  286. public:
  287. k8Event_LS_RESYNC_BY_SNOOP()
  288. {
  289. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  290. event_id = 0x22;
  291. unitEncode = LS;
  292. name = _T("Microarchitectural resync caused by snoop");
  293. }
  294. EVENT_MASK(NULL_MASK) * eventMask;
  295. };
  296. class k8Event_LS_BUFFER_FULL : public k8BaseEvent
  297. {
  298. public:
  299. k8Event_LS_BUFFER_FULL()
  300. {
  301. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  302. name = _T("LS Buffer 2 Full");
  303. event_id = 0x23;
  304. unitEncode = LS;
  305. }
  306. EVENT_MASK(NULL_MASK) * eventMask;
  307. };
  308. typedef union EVENT_MASK(k8_locked_op)
  309. {
  310. struct
  311. {
  312. uint16 NumLockInstr : 1; //Number of lock instructions executed
  313. uint16 NumCyclesInRequestGrant : 1; //Number of cycles spent in the lock request/grant stage
  314. uint16 NumCyclesForLock:1;
  315. /*Number of cycles a lock takes to complete once it is
  316. non-speculative and is the oldest load/store operation
  317. (non-speculative cycles in Ls2 entry 0)*/
  318. };
  319. uint16 flat;
  320. } EVENT_MASK(k8_locked_op);
  321. class k8Event_LOCKED_OP : public k8BaseEvent
  322. {
  323. public:
  324. EVENT_MASK(k8_locked_op) * eventMask;
  325. k8Event_LOCKED_OP()
  326. {
  327. eventMask = (EVENT_MASK(k8_locked_op) *)&m_eventMask;
  328. name = _T("Locked operation");
  329. event_id = 0x24;
  330. unitEncode = LS;
  331. revRequired = 'C';
  332. }
  333. };
  334. class k8Event_OP_LATE_CANCEL : public k8BaseEvent
  335. {
  336. public:
  337. k8Event_OP_LATE_CANCEL()
  338. {
  339. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  340. name = _T("Microarchitectural late cancel of an operation");
  341. event_id = 0x25;
  342. unitEncode = LS;
  343. }
  344. EVENT_MASK(NULL_MASK) * eventMask;
  345. // name = _T("OP_LATE_CANCEL");
  346. };
  347. class k8Event_CFLUSH_RETIRED : public k8BaseEvent
  348. {
  349. public:
  350. k8Event_CFLUSH_RETIRED()
  351. {
  352. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  353. name = _T("Retired CFLUSH instructions");
  354. event_id = 0x26;
  355. unitEncode = LS;
  356. }
  357. EVENT_MASK(NULL_MASK) * eventMask;
  358. // name = _T("CFLUSH_RETIRED");
  359. };
  360. class k8Event_CPUID_RETIRED : public k8BaseEvent
  361. {
  362. public:
  363. k8Event_CPUID_RETIRED()
  364. {
  365. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  366. name = _T("Retired CPUID instructions");
  367. event_id = 0x27;
  368. unitEncode = LS;
  369. }
  370. EVENT_MASK(NULL_MASK) * eventMask;
  371. // name = _T("CPUID_RETIRED");
  372. };
  373. typedef union EVENT_MASK( k8_cache)
  374. {
  375. struct
  376. {
  377. uint16 Invalid:1;
  378. uint16 Exclusive:1;
  379. uint16 Shared:1;
  380. uint16 Owner:1;
  381. uint16 Modified:1;
  382. };
  383. uint16 flat;
  384. }EVENT_MASK( k8_cache);
  385. /* 0x40-0x47: from K7 official event set */
  386. class k8Event_DATA_CACHE_ACCESSES : public k8BaseEvent
  387. {
  388. k8Event_DATA_CACHE_ACCESSES()
  389. {
  390. event_id = 0x40;
  391. unitEncode = DC;
  392. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  393. //_T("DATA_CACHE_ACCESSES"),
  394. name = _T("Data cache accesses");
  395. }
  396. EVENT_MASK(NULL_MASK) * eventMask;
  397. };
  398. class k8Event_DATA_CACHE_MISSES : public k8BaseEvent
  399. {
  400. k8Event_DATA_CACHE_MISSES()
  401. {
  402. event_id = 0x41;
  403. unitEncode = DC;
  404. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  405. //_T("DATA_CACHE_MISSES"),
  406. name = _T("Data cache misses");
  407. }
  408. EVENT_MASK(NULL_MASK) * eventMask;
  409. };
  410. class k8Event_DATA_CACHE_REFILLS_FROM_L2 : public k8BaseEvent
  411. {
  412. k8Event_DATA_CACHE_REFILLS_FROM_L2()
  413. {
  414. event_id = 0x42;
  415. unitEncode = DC;
  416. eventMask = (EVENT_MASK(k8_cache) *)&m_eventMask;
  417. name = _T("Data cache refills from L2");
  418. }
  419. EVENT_MASK(k8_cache) * eventMask;
  420. };
  421. class k8Event_DATA_CACHE_REFILLS_FROM_SYSTEM : public k8BaseEvent
  422. {
  423. k8Event_DATA_CACHE_REFILLS_FROM_SYSTEM()
  424. {
  425. event_id = 0x43;
  426. unitEncode = DC;
  427. eventMask = (EVENT_MASK(k8_cache) *)&m_eventMask;
  428. //UM(k7_um_moesi),
  429. //_T("DATA_CACHE_REFILLS_FROM_SYSTEM"),
  430. name = _T("Data cache refills from system");
  431. }
  432. EVENT_MASK(k8_cache) * eventMask;
  433. };
  434. class k8Event_DATA_CACHE_WRITEBACKS : public k8BaseEvent
  435. {
  436. k8Event_DATA_CACHE_WRITEBACKS()
  437. {
  438. event_id = 0x44;
  439. unitEncode = DC;
  440. eventMask = (EVENT_MASK(k8_cache) *)&m_eventMask;
  441. //UM(k7_um_moesi),
  442. //_T("DATA_CACHE_WRITEBACKS"),
  443. name = _T("Data cache writebacks");
  444. }
  445. EVENT_MASK(k8_cache) * eventMask;
  446. };
  447. class k8Event_L1_DTLB_MISSES_AND_L2_DTLB_HITS : public k8BaseEvent
  448. {
  449. k8Event_L1_DTLB_MISSES_AND_L2_DTLB_HITS()
  450. {
  451. event_id = 0x45;
  452. unitEncode = DC;
  453. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  454. name = _T("L1 DTLB misses and L2 DTLB hits");
  455. }
  456. EVENT_MASK(NULL_MASK) * eventMask;
  457. };
  458. class k8Event_L1_AND_L2_DTLB_MISSES : public k8BaseEvent
  459. {
  460. k8Event_L1_AND_L2_DTLB_MISSES()
  461. {
  462. event_id = 0x46;
  463. unitEncode = DC;
  464. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  465. name = _T("L1 and L2 DTLB misses") ;
  466. }
  467. EVENT_MASK(NULL_MASK) * eventMask;
  468. };
  469. class k8Event_MISALIGNED_DATA_REFERENCES : public k8BaseEvent
  470. {
  471. k8Event_MISALIGNED_DATA_REFERENCES()
  472. {
  473. event_id = 0x47;
  474. unitEncode = DC;
  475. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  476. //NULL, _T("MISALIGNED_DATA_REFERENCES"),
  477. name = _T("Misaligned data references");
  478. }
  479. EVENT_MASK(NULL_MASK) * eventMask;
  480. };
  481. class k8Event_ACCESS_CANCEL_LATE : public k8BaseEvent
  482. {
  483. public:
  484. k8Event_ACCESS_CANCEL_LATE()
  485. {
  486. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  487. name = _T("Microarchitectural late cancel of an access");
  488. event_id = 0x48;
  489. unitEncode = DC;
  490. }
  491. EVENT_MASK(NULL_MASK) * eventMask;
  492. // name = _T("ACCESS_CANCEL_LATE");
  493. };
  494. class k8Event_ACCESS_CANCEL_EARLY : public k8BaseEvent
  495. {
  496. public:
  497. k8Event_ACCESS_CANCEL_EARLY()
  498. {
  499. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  500. name = _T("Microarchitectural early cancel of an access");
  501. event_id = 0x49;
  502. unitEncode = DC;
  503. }
  504. EVENT_MASK(NULL_MASK) * eventMask;
  505. // name = _T("ACCESS_CANCEL_EARLY");
  506. };
  507. typedef union EVENT_MASK( k8_ecc)
  508. {
  509. struct
  510. {
  511. uint16 ScrubberError : 1; // Scrubber error" },
  512. uint16 PiggybackScrubberErrors : 1; // Piggyback scrubber errors" } }
  513. };
  514. uint16 flat;
  515. }EVENT_MASK( k8_ecc);
  516. class k8Event_ECC_BIT_ERR : public k8BaseEvent
  517. {
  518. public:
  519. k8Event_ECC_BIT_ERR()
  520. {
  521. eventMask = (EVENT_MASK(k8_ecc) *)&m_eventMask;
  522. name = _T("One bit ECC error recorded found by scrubber");
  523. event_id = 0x4A;
  524. unitEncode = DC;
  525. }
  526. EVENT_MASK(k8_ecc) * eventMask;
  527. // name = _T("ECC_BIT_ERR");
  528. };
  529. // 4B
  530. typedef union EVENT_MASK( k8_distpatch_prefetch_instructions)
  531. {
  532. struct
  533. {
  534. uint16 Load : 1;
  535. uint16 Store : 1;
  536. uint16 NTA : 1;
  537. };
  538. uint16 flat;
  539. }EVENT_MASK( k8_distpatch_prefetch_instructions);
  540. class k8Event_DISPATCHED_PRE_INSTRS : public k8BaseEvent
  541. {
  542. public:
  543. k8Event_DISPATCHED_PRE_INSTRS()
  544. {
  545. eventMask = (EVENT_MASK(k8_distpatch_prefetch_instructions) *)&m_eventMask;
  546. name = _T("Dispatched prefetch instructions");
  547. event_id = 0x4B;
  548. unitEncode = DC;
  549. }
  550. EVENT_MASK(k8_distpatch_prefetch_instructions) * eventMask;
  551. // name = _T("DISPATCHED_PRE_INSTRS");
  552. /* 0x4C: added in Revision C */
  553. };
  554. typedef union EVENT_MASK( k8_lock_accesses)
  555. {
  556. struct
  557. {
  558. uint16 DcacheAccesses:1; // Number of dcache accesses by lock instructions" },
  559. uint16 DcacheMisses:1; // Number of dcache misses by lock instructions" } }
  560. };
  561. uint16 flat;
  562. }EVENT_MASK( k8_lock_accesses);
  563. class k8Event_LOCK_ACCESSES : public k8BaseEvent
  564. {
  565. public:
  566. k8Event_LOCK_ACCESSES()
  567. {
  568. eventMask = (EVENT_MASK(k8_lock_accesses) *)&m_eventMask;
  569. name = _T("DCACHE accesses by locks") ;
  570. event_id = 0x4C;
  571. unitEncode = DC;
  572. revRequired = 'C';
  573. }
  574. EVENT_MASK(k8_lock_accesses) * eventMask;
  575. };
  576. class k8Event_CYCLES_PROCESSOR_IS_RUNNING : public k8BaseEvent
  577. {
  578. public:
  579. k8Event_CYCLES_PROCESSOR_IS_RUNNING()
  580. {
  581. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  582. name = _T("Cycles processor is running (not in HLT or STPCLK)");
  583. event_id = 0x76;
  584. unitEncode = BU;
  585. }
  586. EVENT_MASK(NULL_MASK) * eventMask;
  587. // name = _T("CYCLES_PROCESSOR_IS_RUNNING"); /* undocumented *;
  588. };
  589. typedef union EVENT_MASK( k8_internal_L2_request)
  590. {
  591. struct
  592. {
  593. uint16 ICFill:1; // IC fill" },
  594. uint16 DCFill:1; // DC fill" },
  595. uint16 TLBReload:1; // TLB reload" },
  596. uint16 TagSnoopRequest:1; // Tag snoop request" },
  597. uint16 CancelledRequest:1; // Cancelled request" } }
  598. };
  599. uint16 flat;
  600. }EVENT_MASK( k8_internal_L2_request);
  601. class k8Event_BU_INT_L2_REQ : public k8BaseEvent
  602. {
  603. public:
  604. k8Event_BU_INT_L2_REQ()
  605. {
  606. eventMask = (EVENT_MASK(k8_internal_L2_request) *)&m_eventMask;
  607. name = _T("Internal L2 request");
  608. unitEncode = BU;
  609. event_id = 0x7D;
  610. }
  611. EVENT_MASK(k8_internal_L2_request) * eventMask;
  612. } ;
  613. // name = _T("BU_INT_L2_REQ");
  614. // 7E
  615. typedef union EVENT_MASK( k8_fill_request_missed_L2)
  616. {
  617. struct
  618. {
  619. uint16 ICFill:1; // IC fill" },
  620. uint16 DCFill:1; // DC fill" },
  621. uint16 TLBReload:1; // TLB reload" },
  622. };
  623. uint16 flat;
  624. } EVENT_MASK( k8_fill_request_missed_L2);
  625. class k8Event_BU_FILL_REQ : public k8BaseEvent
  626. {
  627. public:
  628. k8Event_BU_FILL_REQ()
  629. {
  630. eventMask = (EVENT_MASK(k8_fill_request_missed_L2) *)&m_eventMask;
  631. name = _T("Fill request that missed in L2");
  632. event_id = 0x7E;
  633. unitEncode = BU;
  634. }
  635. EVENT_MASK(k8_fill_request_missed_L2) * eventMask;
  636. // name = _T("BU_FILL_REQ");
  637. };
  638. // 7F
  639. typedef union EVENT_MASK( k8_fill_into_L2)
  640. {
  641. struct
  642. {
  643. uint16 DirtyL2Victim:1; // Dirty L2 victim
  644. uint16 VictimFromL2:1; // Victim from L2
  645. };
  646. uint16 flat;
  647. }EVENT_MASK( k8_fill_into_L2);
  648. class k8Event_BU_FILL_L2 : public k8BaseEvent
  649. {
  650. public:
  651. k8Event_BU_FILL_L2()
  652. {
  653. eventMask = (EVENT_MASK(k8_fill_into_L2) *)&m_eventMask;
  654. name = _T("Fill into L2");
  655. event_id = 0x7F;
  656. unitEncode = BU;
  657. }
  658. EVENT_MASK(k8_fill_into_L2) * eventMask;
  659. // name = _T("BU_FILL_L2");
  660. };
  661. class k8Event_INSTRUCTION_CACHE_FETCHES : public k8BaseEvent
  662. {
  663. public:
  664. k8Event_INSTRUCTION_CACHE_FETCHES()
  665. {
  666. event_id = 0x80;
  667. unitEncode = IC;
  668. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  669. name = _T("Instruction cache fetches");
  670. }
  671. EVENT_MASK(NULL_MASK) * eventMask;
  672. };
  673. class k8Event_INSTRUCTION_CACHE_MISSES : public k8BaseEvent
  674. {
  675. public:
  676. k8Event_INSTRUCTION_CACHE_MISSES()
  677. {
  678. event_id = 0x81;
  679. unitEncode = IC;
  680. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  681. //0xF, NULL, _T("INSTRUCTION_CACHE_MISSES"),
  682. name = _T("Instruction cache misses");
  683. }
  684. EVENT_MASK(NULL_MASK) * eventMask;
  685. };
  686. class k8Event_IC_REFILL_FROM_L2 : public k8BaseEvent
  687. {
  688. public:
  689. k8Event_IC_REFILL_FROM_L2()
  690. {
  691. eventMask = (EVENT_MASK(k8_cache) *)&m_eventMask;
  692. name = _T("Refill from L2");
  693. event_id = 0x82;
  694. unitEncode = IC;
  695. }
  696. EVENT_MASK(k8_cache) * eventMask;
  697. // name = _T("IC_REFILL_FROM_L2");
  698. };
  699. class k8Event_IC_REFILL_FROM_SYS : public k8BaseEvent
  700. {
  701. public:
  702. k8Event_IC_REFILL_FROM_SYS()
  703. {
  704. eventMask = (EVENT_MASK(k8_cache) *)&m_eventMask;
  705. name = _T("Refill from system");
  706. event_id = 0x83;
  707. unitEncode = IC;
  708. }
  709. EVENT_MASK(k8_cache) * eventMask;
  710. // name = _T("IC_REFILL_FROM_SYS");
  711. };
  712. class k8Event_L1_ITLB_MISSES_AND_L2_ITLB_HITS : public k8BaseEvent
  713. {
  714. public:
  715. k8Event_L1_ITLB_MISSES_AND_L2_ITLB_HITS()
  716. {
  717. event_id = 0x84;
  718. unitEncode = IC;
  719. name = _T("L1 ITLB misses (and L2 ITLB hits)");
  720. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  721. }
  722. EVENT_MASK(NULL_MASK) * eventMask;
  723. };
  724. class k8Event_L1_AND_L2_ITLB_MISSES : public k8BaseEvent
  725. {
  726. public:
  727. k8Event_L1_AND_L2_ITLB_MISSES()
  728. {
  729. event_id = 0x85;
  730. unitEncode = IC;
  731. name = _T("(L1 and) L2 ITLB misses");
  732. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  733. }
  734. EVENT_MASK(NULL_MASK) * eventMask;
  735. };
  736. class k8Event_IC_RESYNC_BY_SNOOP : public k8BaseEvent
  737. {
  738. public:
  739. k8Event_IC_RESYNC_BY_SNOOP()
  740. {
  741. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  742. event_id = 0x86;
  743. unitEncode = IC;
  744. name = _T("Microarchitectural resync caused by snoop");
  745. }
  746. EVENT_MASK(NULL_MASK) * eventMask;
  747. // name = _T("IC_RESYNC_BY_SNOOP");
  748. /* similar to 0x22; but IC unit instead of LS unit */
  749. };
  750. class k8Event_IC_FETCH_STALL : public k8BaseEvent
  751. {
  752. public:
  753. k8Event_IC_FETCH_STALL()
  754. {
  755. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  756. name = _T("Instruction fetch stall");
  757. event_id = 0x87;
  758. unitEncode = IC;
  759. }
  760. EVENT_MASK(NULL_MASK) * eventMask;
  761. // name = _T("IC_FETCH_STALL");
  762. };
  763. class k8Event_IC_STACK_HIT : public k8BaseEvent
  764. {
  765. public:
  766. k8Event_IC_STACK_HIT()
  767. {
  768. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  769. name = _T("Return stack hit");
  770. event_id = 0x88;
  771. unitEncode = IC;
  772. }
  773. EVENT_MASK(NULL_MASK) * eventMask;
  774. // name = _T("IC_STACK_HIT");
  775. };
  776. class k8Event_IC_STACK_OVERFLOW : public k8BaseEvent
  777. {
  778. public:
  779. k8Event_IC_STACK_OVERFLOW()
  780. {
  781. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  782. name = _T("Return stack overflow");
  783. event_id = 0x89;
  784. unitEncode = IC;
  785. }
  786. EVENT_MASK(NULL_MASK) * eventMask;
  787. // name = _T("IC_STACK_OVERFLOW");
  788. };
  789. /* 0xC0-0xC7: from K7 official event set */
  790. class k8Event_RETIRED_INSTRUCTIONS : public k8BaseEvent
  791. {
  792. public:
  793. k8Event_RETIRED_INSTRUCTIONS()
  794. {
  795. event_id = 0xC0;
  796. unitEncode = FR;
  797. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  798. //0xF, NULL, _T("RETIRED_INSTRUCTIONS"),
  799. name = _T("Retired instructions (includes exceptions, interrupts, resyncs)");
  800. }
  801. EVENT_MASK(NULL_MASK) * eventMask;
  802. };
  803. class k8Event_RETIRED_OPS : public k8BaseEvent
  804. {
  805. public:
  806. k8Event_RETIRED_OPS()
  807. {
  808. event_id = 0xC1;
  809. unitEncode = FR;
  810. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  811. //0xF, NULL, _T("RETIRED_OPS"),
  812. name = _T("Retired Ops") ;
  813. }
  814. EVENT_MASK(NULL_MASK) * eventMask;
  815. };
  816. class k8Event_RETIRED_BRANCHES : public k8BaseEvent
  817. {
  818. public:
  819. k8Event_RETIRED_BRANCHES()
  820. {
  821. event_id = 0xC2;
  822. unitEncode = FR;
  823. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  824. //0xF, NULL, _T("RETIRED_BRANCHES"),
  825. name = _T("Retired branches (conditional, unconditional, exceptions, interrupts)") ;
  826. }
  827. EVENT_MASK(NULL_MASK) * eventMask;
  828. };
  829. class k8Event_RETIRED_BRANCHES_MISPREDICTED : public k8BaseEvent
  830. {
  831. public:
  832. k8Event_RETIRED_BRANCHES_MISPREDICTED()
  833. {
  834. event_id = 0xC3;
  835. unitEncode = FR;
  836. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  837. //0xF, NULL, _T("RETIRED_BRANCHES_MISPREDICTED"),
  838. name = _T("Retired branches mispredicted") ;
  839. }
  840. EVENT_MASK(NULL_MASK) * eventMask;
  841. };
  842. class k8Event_RETIRED_TAKEN_BRANCHES : public k8BaseEvent
  843. {
  844. public:
  845. k8Event_RETIRED_TAKEN_BRANCHES()
  846. {
  847. event_id = 0xC4;
  848. unitEncode = FR;
  849. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  850. //0xF, NULL, _T("RETIRED_TAKEN_BRANCHES"),
  851. name = _T("Retired taken branches") ;
  852. }
  853. EVENT_MASK(NULL_MASK) * eventMask;
  854. };
  855. class k8Event_RETIRED_TAKEN_BRANCHES_MISPREDICTED : public k8BaseEvent
  856. {
  857. public:
  858. k8Event_RETIRED_TAKEN_BRANCHES_MISPREDICTED()
  859. {
  860. event_id = 0xC5;
  861. unitEncode = FR;
  862. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  863. //0xF, NULL, _T("RETIRED_TAKEN_BRANCHES_MISPREDICTED"),
  864. name = _T("Retired taken branches mispredicted") ;
  865. }
  866. EVENT_MASK(NULL_MASK) * eventMask;
  867. };
  868. class k8Event_RETIRED_FAR_CONTROL_TRANSFERS : public k8BaseEvent
  869. {
  870. public:
  871. k8Event_RETIRED_FAR_CONTROL_TRANSFERS()
  872. {
  873. event_id = 0xC6;
  874. unitEncode = FR;
  875. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  876. //0xF, NULL, _T("RETIRED_FAR_CONTROL_TRANSFERS"),
  877. name = _T("Retired far control transfers") ;
  878. }
  879. EVENT_MASK(NULL_MASK) * eventMask;
  880. };
  881. class k8Event_RETIRED_RESYNC_BRANCHES : public k8BaseEvent
  882. {
  883. public:
  884. k8Event_RETIRED_RESYNC_BRANCHES()
  885. {
  886. event_id = 0xC7;
  887. unitEncode = FR;
  888. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  889. //0xF, NULL, _T("RETIRED_RESYNC_BRANCHES"),
  890. name = _T("Retired resync branches (only non-control transfer branches counted)") ;
  891. }
  892. EVENT_MASK(NULL_MASK) * eventMask;
  893. };
  894. class k8Event_RETIRED_NEAR_RETURNS : public k8BaseEvent
  895. {
  896. public:
  897. k8Event_RETIRED_NEAR_RETURNS()
  898. {
  899. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  900. name = _T("Retired near returns");
  901. event_id = 0xC8;
  902. unitEncode = FR;
  903. }
  904. EVENT_MASK(NULL_MASK) * eventMask;
  905. };
  906. class k8Event_RETIRED_RETURNS_MISPREDICT : public k8BaseEvent
  907. {
  908. public:
  909. k8Event_RETIRED_RETURNS_MISPREDICT()
  910. {
  911. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  912. name = _T("Retired near returns mispredicted");
  913. event_id = 0xC9;
  914. unitEncode = FR;
  915. }
  916. EVENT_MASK(NULL_MASK) * eventMask;
  917. // name = _T("RETIRED_RETURNS_MISPREDICT");
  918. };
  919. class k8Event_RETIRED_BRANCH_MISCOMPARE : public k8BaseEvent
  920. {
  921. public:
  922. k8Event_RETIRED_BRANCH_MISCOMPARE()
  923. {
  924. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  925. name = _T("Retired taken branches mispredicted due to address miscompare");
  926. event_id = 0xCA;
  927. unitEncode = FR;
  928. }
  929. EVENT_MASK(NULL_MASK) * eventMask;
  930. // name = _T("RETIRED_BRANCH_MISCOMPARE");
  931. };
  932. /* Revision B and later */
  933. typedef union EVENT_MASK( k8_retired_fpu_instr)
  934. {
  935. struct
  936. {
  937. uint16 DirtyL2Victim:1; // x87 instructions
  938. uint16 CombinedMMX_3DNow:1; // Combined MMX & 3DNow! instructions" },
  939. uint16 CombinedPackedSSE_SSE2:1; // Combined packed SSE and SSE2 instructions" },
  940. uint16 CombinedScalarSSE_SSE2:1; // Combined scalar SSE and SSE2 instructions" } }
  941. };
  942. uint16 flat;
  943. }EVENT_MASK( k8_retired_fpu_instr);
  944. class k8Event_RETIRED_FPU_INSTRS : public k8BaseEvent
  945. {
  946. public:
  947. k8Event_RETIRED_FPU_INSTRS()
  948. {
  949. eventMask = (EVENT_MASK(k8_retired_fpu_instr) *)&m_eventMask;
  950. event_id = 0xCB;
  951. unitEncode = FR;
  952. name = _T("Retired FPU instructions");
  953. revRequired = 'B';
  954. }
  955. EVENT_MASK(k8_retired_fpu_instr) * eventMask;
  956. /* Revision B and later */
  957. };
  958. // CC
  959. typedef union EVENT_MASK( k8_retired_fastpath_double_op_instr )
  960. {
  961. struct
  962. {
  963. uint16 LowOpPosition0:1; // With low op in position 0" },
  964. uint16 LowOpPosition1:1; // With low op in position 1" },
  965. uint16 LowOpPosition2:1; // With low op in position 2" } }
  966. };
  967. uint16 flat;
  968. }EVENT_MASK( k8_retired_fastpath_double_op_instr);
  969. class k8Event_RETIRED_FASTPATH_INSTRS : public k8BaseEvent
  970. {
  971. public:
  972. k8Event_RETIRED_FASTPATH_INSTRS()
  973. {
  974. eventMask = (EVENT_MASK(k8_retired_fastpath_double_op_instr) *)&m_eventMask;
  975. event_id = 0xCC;
  976. unitEncode = FR;
  977. name = _T("Retired fastpath double op instructions");
  978. revRequired = 'B';
  979. }
  980. EVENT_MASK(k8_retired_fastpath_double_op_instr) * eventMask;
  981. };
  982. class k8Event_INTERRUPTS_MASKED_CYCLES : public k8BaseEvent
  983. {
  984. public:
  985. k8Event_INTERRUPTS_MASKED_CYCLES()
  986. {
  987. event_id = 0xCD;
  988. unitEncode = FR;
  989. //0xF, NULL, _T("INTERRUPTS_MASKED_CYCLES"),
  990. name = _T("Interrupts masked cycles (IF=0)") ;
  991. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  992. }
  993. EVENT_MASK(NULL_MASK) * eventMask;
  994. };
  995. class k8Event_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES : public k8BaseEvent
  996. {
  997. public:
  998. k8Event_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES()
  999. {
  1000. event_id = 0xCE;
  1001. unitEncode = FR;
  1002. //0xF, NULL, _T("INTERRUPTS_MASKED_WHILE_PENDING_CYCLES"),
  1003. name = _T("Interrupts masked while pending cycles (INTR while IF=0)") ;
  1004. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1005. }
  1006. EVENT_MASK(NULL_MASK) * eventMask;
  1007. };
  1008. class k8Event_NUMBER_OF_TAKEN_HARDWARE_INTERRUPTS : public k8BaseEvent
  1009. {
  1010. public:
  1011. k8Event_NUMBER_OF_TAKEN_HARDWARE_INTERRUPTS()
  1012. {
  1013. event_id = 0xCF;
  1014. unitEncode = FR;
  1015. //0xF, NULL, _T("NUMBER_OF_TAKEN_HARDWARE_INTERRUPTS"),
  1016. name = _T("Number of taken hardware interrupts") ;
  1017. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1018. }
  1019. EVENT_MASK(NULL_MASK) * eventMask;
  1020. };
  1021. class k8Event_DECODER_EMPTY : public k8BaseEvent
  1022. {
  1023. public:
  1024. k8Event_DECODER_EMPTY()
  1025. {
  1026. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1027. name = _T("Nothing to dispatch (decoder empty)");
  1028. event_id = 0xD0;
  1029. unitEncode = FR;
  1030. }
  1031. EVENT_MASK(NULL_MASK) * eventMask;
  1032. // name = _T("DECODER_EMPTY");
  1033. };
  1034. class k8Event_DISPATCH_STALLS : public k8BaseEvent
  1035. {
  1036. public:
  1037. k8Event_DISPATCH_STALLS()
  1038. {
  1039. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1040. name = _T("Dispatch stalls (events 0xD2-0xDA combined)");
  1041. event_id = 0xD1;
  1042. unitEncode = FR;
  1043. }
  1044. EVENT_MASK(NULL_MASK) * eventMask;
  1045. // name = _T("DISPATCH_STALLS");
  1046. };
  1047. class k8Event_DISPATCH_STALL_FROM_BRANCH_ABORT : public k8BaseEvent
  1048. {
  1049. public:
  1050. k8Event_DISPATCH_STALL_FROM_BRANCH_ABORT()
  1051. {
  1052. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1053. name = _T("Dispatch stall from branch abort to retire");
  1054. event_id = 0xD2;
  1055. unitEncode = FR;
  1056. }
  1057. EVENT_MASK(NULL_MASK) * eventMask;
  1058. // name = _T("DISPATCH_STALL_FROM_BRANCH_ABORT");
  1059. };
  1060. class k8Event_DISPATCH_STALL_SERIALIZATION : public k8BaseEvent
  1061. {
  1062. public:
  1063. k8Event_DISPATCH_STALL_SERIALIZATION()
  1064. {
  1065. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1066. name = _T("Dispatch stall for serialization");
  1067. event_id = 0xD3;
  1068. unitEncode = FR;
  1069. }
  1070. EVENT_MASK(NULL_MASK) * eventMask;
  1071. // name = _T("DISPATCH_STALL_SERIALIZATION");
  1072. };
  1073. class k8Event_DISPATCH_STALL_SEG_LOAD : public k8BaseEvent
  1074. {
  1075. public:
  1076. k8Event_DISPATCH_STALL_SEG_LOAD()
  1077. {
  1078. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1079. name = _T("Dispatch stall for segment load");
  1080. event_id = 0xD4;
  1081. unitEncode = FR;
  1082. }
  1083. EVENT_MASK(NULL_MASK) * eventMask;
  1084. // name = _T("DISPATCH_STALL_SEG_LOAD");
  1085. };
  1086. class k8Event_DISPATCH_STALL_REORDER_BUFFER : public k8BaseEvent
  1087. {
  1088. public:
  1089. k8Event_DISPATCH_STALL_REORDER_BUFFER()
  1090. {
  1091. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1092. name = _T("Dispatch stall when reorder buffer is full");
  1093. event_id = 0xD5;
  1094. unitEncode = FR;
  1095. }
  1096. EVENT_MASK(NULL_MASK) * eventMask;
  1097. // name = _T("DISPATCH_STALL_REORDER_BUFFER");
  1098. };
  1099. class k8Event_DISPATCH_STALL_RESERVE_STATIONS : public k8BaseEvent
  1100. {
  1101. public:
  1102. k8Event_DISPATCH_STALL_RESERVE_STATIONS()
  1103. {
  1104. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1105. name = _T("Dispatch stall when reservation stations are full");
  1106. event_id = 0xD6;
  1107. unitEncode = FR;
  1108. }
  1109. EVENT_MASK(NULL_MASK) * eventMask;
  1110. // name = _T("DISPATCH_STALL_RESERVE_STATIONS");
  1111. };
  1112. class k8Event_DISPATCH_STALL_FPU : public k8BaseEvent
  1113. {
  1114. public:
  1115. k8Event_DISPATCH_STALL_FPU()
  1116. {
  1117. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1118. name = _T("Dispatch stall when FPU is full");
  1119. event_id = 0xD7;
  1120. unitEncode = FR;
  1121. }
  1122. EVENT_MASK(NULL_MASK) * eventMask;
  1123. // name = _T("DISPATCH_STALL_FPU");
  1124. };
  1125. class k8Event_DISPATCH_STALL_LS : public k8BaseEvent
  1126. {
  1127. public:
  1128. k8Event_DISPATCH_STALL_LS()
  1129. {
  1130. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1131. name = _T("Dispatch stall when LS is full");
  1132. event_id = 0xD8;
  1133. unitEncode = FR;
  1134. }
  1135. EVENT_MASK(NULL_MASK) * eventMask;
  1136. // name = _T("DISPATCH_STALL_LS");
  1137. };
  1138. class k8Event_DISPATCH_STALL_QUIET_WAIT : public k8BaseEvent
  1139. {
  1140. public:
  1141. k8Event_DISPATCH_STALL_QUIET_WAIT()
  1142. {
  1143. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1144. name = _T("Dispatch stall when waiting for all to be quiet");
  1145. event_id = 0xD9;
  1146. unitEncode = FR;
  1147. }
  1148. EVENT_MASK(NULL_MASK) * eventMask;
  1149. // name = _T("DISPATCH_STALL_QUIET_WAIT");
  1150. };
  1151. class k8Event_DISPATCH_STALL_PENDING : public k8BaseEvent
  1152. {
  1153. public:
  1154. k8Event_DISPATCH_STALL_PENDING()
  1155. {
  1156. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1157. name = _T("Dispatch stall when far control transfer or resync branch is pending");
  1158. event_id = 0xDA;
  1159. unitEncode = FR;
  1160. }
  1161. EVENT_MASK(NULL_MASK) * eventMask;
  1162. // name = _T("DISPATCH_STALL_PENDING");
  1163. };
  1164. typedef union EVENT_MASK( k8_fpu_exceptions)
  1165. {
  1166. struct
  1167. {
  1168. uint16 x87ReclassMicrofaults:1; // x87 reclass microfaults" },
  1169. uint16 SSERetypeMicrofaults:1; // SSE retype microfaults" },
  1170. uint16 SSEReclassMicrofaults:1; // SSE reclass microfaults" },
  1171. uint16 SSE_x87Microtraps:1; // SSE and x87 microtraps" } }
  1172. };
  1173. uint16 flat;
  1174. }EVENT_MASK( k8_fpu_exceptions);
  1175. class k8Event_FPU_EXCEPTIONS : public k8BaseEvent
  1176. {
  1177. public:
  1178. k8Event_FPU_EXCEPTIONS()
  1179. {
  1180. eventMask = (EVENT_MASK(k8_fpu_exceptions) *)&m_eventMask;
  1181. event_id = 0xDB;
  1182. unitEncode = FR;
  1183. name = _T("FPU exceptions");
  1184. revRequired = 'B';
  1185. }
  1186. EVENT_MASK(k8_fpu_exceptions) * eventMask;
  1187. // name = _T("FPU_EXCEPTIONS");
  1188. /* Revision B and later */
  1189. };
  1190. class k8Event_DR0_BREAKPOINTS : public k8BaseEvent
  1191. {
  1192. public:
  1193. k8Event_DR0_BREAKPOINTS()
  1194. {
  1195. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1196. name = _T("Number of breakpoints for DR0");
  1197. event_id = 0xDC;
  1198. unitEncode = FR;
  1199. }
  1200. EVENT_MASK(NULL_MASK) * eventMask;
  1201. // name = _T("DR0_BREAKPOINTS");
  1202. };
  1203. class k8Event_DR1_BREAKPOINTS : public k8BaseEvent
  1204. {
  1205. public:
  1206. k8Event_DR1_BREAKPOINTS()
  1207. {
  1208. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1209. name = _T("Number of breakpoints for DR1");
  1210. event_id = 0xDD;
  1211. unitEncode = FR;
  1212. }
  1213. EVENT_MASK(NULL_MASK) * eventMask;
  1214. // name = _T("DR1_BREAKPOINTS");
  1215. };
  1216. class k8Event_DR2_BREAKPOINTS : public k8BaseEvent
  1217. {
  1218. public:
  1219. k8Event_DR2_BREAKPOINTS()
  1220. {
  1221. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1222. name = _T("Number of breakpoints for DR2");
  1223. event_id = 0xDE;
  1224. unitEncode = FR;
  1225. }
  1226. EVENT_MASK(NULL_MASK) * eventMask;
  1227. // name = _T("DR2_BREAKPOINTS");
  1228. };
  1229. class k8Event_DR3_BREAKPOINTS : public k8BaseEvent
  1230. {
  1231. public:
  1232. k8Event_DR3_BREAKPOINTS()
  1233. {
  1234. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1235. name = _T("Number of breakpoints for DR3");
  1236. event_id = 0xDF;
  1237. unitEncode = FR;
  1238. }
  1239. EVENT_MASK(NULL_MASK) * eventMask;
  1240. // name = _T("DR3_BREAKPOINTS");
  1241. };
  1242. // E0
  1243. typedef union EVENT_MASK( k8_page_access_event)
  1244. {
  1245. struct
  1246. {
  1247. uint16 PageHit:1; // Page hit" },
  1248. uint16 PageMiss:1; // Page miss" },
  1249. uint16 PageConflict:1; // Page conflict" } }
  1250. };
  1251. uint16 flat;
  1252. }EVENT_MASK( k8_page_access_event);
  1253. class k8Event_MEM_PAGE_ACCESS : public k8BaseEvent
  1254. {
  1255. public:
  1256. k8Event_MEM_PAGE_ACCESS()
  1257. {
  1258. eventMask = (EVENT_MASK(k8_page_access_event) *)&m_eventMask;
  1259. name = _T("Memory controller page access");
  1260. event_id = 0xE0;
  1261. unitEncode = NB;
  1262. }
  1263. EVENT_MASK(k8_page_access_event) * eventMask;
  1264. // name = _T("MEM_PAGE_ACCESS");
  1265. };
  1266. class k8Event_MEM_PAGE_TBL_OVERFLOW : public k8BaseEvent
  1267. {
  1268. public:
  1269. k8Event_MEM_PAGE_TBL_OVERFLOW()
  1270. {
  1271. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1272. name = _T("Memory controller page table overflow");
  1273. event_id = 0xE1;
  1274. unitEncode = NB;
  1275. }
  1276. EVENT_MASK(NULL_MASK) * eventMask;
  1277. // name = _T("MEM_PAGE_TBL_OVERFLOW");
  1278. };
  1279. class k8Event_DRAM_SLOTS_MISSED : public k8BaseEvent
  1280. {
  1281. public:
  1282. k8Event_DRAM_SLOTS_MISSED()
  1283. {
  1284. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1285. name = _T("Memory controller DRAM command slots missed (in MemClks)");
  1286. event_id = 0xE2;
  1287. unitEncode = NB;
  1288. }
  1289. EVENT_MASK(NULL_MASK) * eventMask;
  1290. // name = _T("DRAM_SLOTS_MISSED");
  1291. };
  1292. // e3
  1293. typedef union EVENT_MASK( k8_turnaround)
  1294. {
  1295. struct
  1296. {
  1297. uint16 DIMMTurnaround:1; //DIMM turnaround" },
  1298. uint16 ReadToWriteTurnaround:1; //Read to write turnaround" },
  1299. uint16 WriteToReadTurnaround:1; //Write to read turnaround" } }
  1300. };
  1301. uint16 flat;
  1302. }EVENT_MASK( k8_turnaround);
  1303. class k8Event_MEM_TURNAROUND : public k8BaseEvent
  1304. {
  1305. public:
  1306. k8Event_MEM_TURNAROUND()
  1307. {
  1308. eventMask = (EVENT_MASK(k8_turnaround) *)&m_eventMask;
  1309. name = _T("Memory controller turnaround");
  1310. event_id = 0xE3;
  1311. unitEncode = NB;
  1312. }
  1313. EVENT_MASK(k8_turnaround) * eventMask;
  1314. // name = _T("MEM_TURNAROUND");
  1315. };
  1316. // E4
  1317. typedef union EVENT_MASK( k8_bypass_counter_saturation)
  1318. {
  1319. struct
  1320. {
  1321. uint16 MEM_HighPriorityBypass:1; // Memory controller high priority bypass" },
  1322. uint16 MEM_LowPriorityBypass:1; // Memory controller low priority bypass" },
  1323. uint16 DRAM_InterfaceBypass:1; // DRAM controller interface bypass" },
  1324. uint16 DRAM_QueueBypass:1; // DRAM controller queue bypass" } }
  1325. };
  1326. uint16 flat;
  1327. }EVENT_MASK( k8_bypass_counter_saturation);
  1328. class k8Event_MEM_BYPASS_SAT : public k8BaseEvent
  1329. {
  1330. public:
  1331. k8Event_MEM_BYPASS_SAT()
  1332. {
  1333. eventMask = (EVENT_MASK(k8_bypass_counter_saturation) *)&m_eventMask;
  1334. name = _T("Memory controller bypass counter saturation");
  1335. event_id = 0xE4;
  1336. unitEncode = NB;
  1337. }
  1338. EVENT_MASK(k8_bypass_counter_saturation) * eventMask;
  1339. // name = _T("MEM_BYPASS_SAT");
  1340. };
  1341. //EB
  1342. typedef union EVENT_MASK( k8_sized_commands)
  1343. {
  1344. struct
  1345. {
  1346. uint16 NonPostWrSzByte:1; // NonPostWrSzByte" },
  1347. uint16 NonPostWrSzDword:1; // NonPostWrSzDword" },
  1348. uint16 PostWrSzByte:1; // PostWrSzByte" },
  1349. uint16 PostWrSzDword:1; // PostWrSzDword" },
  1350. uint16 RdSzByte:1; // RdSzByte" },
  1351. uint16 RdSzDword:1; // RdSzDword" },
  1352. uint16 RdModWr:1; // RdModWr" } }
  1353. };
  1354. uint16 flat;
  1355. }EVENT_MASK( k8_sized_commands);
  1356. class k8Event_SIZED_COMMANDS : public k8BaseEvent
  1357. {
  1358. public:
  1359. k8Event_SIZED_COMMANDS()
  1360. {
  1361. eventMask = (EVENT_MASK(k8_sized_commands) *)&m_eventMask;
  1362. name = _T("Sized commands");
  1363. event_id = 0xEB;
  1364. unitEncode = NB;
  1365. }
  1366. EVENT_MASK(k8_sized_commands) * eventMask;
  1367. // name = _T("SIZED_COMMANDS");
  1368. };
  1369. typedef union EVENT_MASK( k8_probe_result)
  1370. {
  1371. struct
  1372. {
  1373. uint16 ProbeMiss:1; // Probe miss" },
  1374. uint16 ProbeHit:1; // Probe hit" },
  1375. uint16 ProbeHitDirtyWithoutMemoryCancel:1; // Probe hit dirty without memory cancel" },
  1376. uint16 ProbeHitDirtyWithMemoryCancel:1; // Probe hit dirty with memory cancel" } }
  1377. uint16 UpstreamDisplayRefreshReads:1; // Rev D and later
  1378. uint16 UpstreamNonDisplayRefreshReads:1; // Rev D and later
  1379. uint16 UpstreamWrites:1; // Rev D and later
  1380. };
  1381. uint16 flat;
  1382. }EVENT_MASK( k8_probe_result);
  1383. class k8Event_PROBE_RESULT : public k8BaseEvent
  1384. {
  1385. public:
  1386. k8Event_PROBE_RESULT()
  1387. {
  1388. eventMask = (EVENT_MASK(k8_probe_result) *)&m_eventMask;
  1389. name = _T("Probe result");
  1390. event_id = 0xEC;
  1391. unitEncode = NB;
  1392. }
  1393. EVENT_MASK(k8_probe_result) * eventMask;
  1394. // name = _T("PROBE_RESULT");
  1395. };
  1396. typedef union EVENT_MASK( k8_ht)
  1397. {
  1398. struct
  1399. {
  1400. uint16 CommandSent:1; //Command sent" },
  1401. uint16 DataSent:1; //Data sent" },
  1402. uint16 BufferReleaseSent:1; //Buffer release sent"
  1403. uint16 NopSent:1; //Nop sent" } }
  1404. };
  1405. uint16 flat;
  1406. }EVENT_MASK( k8_ht);
  1407. class k8Event_HYPERTRANSPORT_BUS0_WIDTH : public k8BaseEvent
  1408. {
  1409. public:
  1410. k8Event_HYPERTRANSPORT_BUS0_WIDTH()
  1411. {
  1412. eventMask = (EVENT_MASK(k8_ht) *)&m_eventMask;
  1413. name = _T("Hypertransport (tm) bus 0 bandwidth");
  1414. event_id = 0xF6;
  1415. unitEncode = NB;
  1416. }
  1417. EVENT_MASK(k8_ht) * eventMask;
  1418. // name = _T("HYPERTRANSPORT_BUS0_WIDTH");
  1419. };
  1420. class k8Event_HYPERTRANSPORT_BUS1_WIDTH : public k8BaseEvent
  1421. {
  1422. public:
  1423. k8Event_HYPERTRANSPORT_BUS1_WIDTH()
  1424. {
  1425. eventMask = (EVENT_MASK(k8_ht) *)&m_eventMask;
  1426. name = _T("Hypertransport (tm) bus 1 bandwidth");
  1427. event_id = 0xF7;
  1428. unitEncode = NB;
  1429. }
  1430. EVENT_MASK(k8_ht) * eventMask;
  1431. // name = _T("HYPERTRANSPORT_BUS1_WIDTH");
  1432. };
  1433. class k8Event_HYPERTRANSPORT_BUS2_WIDTH : public k8BaseEvent
  1434. {
  1435. public:
  1436. k8Event_HYPERTRANSPORT_BUS2_WIDTH()
  1437. {
  1438. eventMask = (EVENT_MASK(k8_ht) *)&m_eventMask;
  1439. name = _T("Hypertransport (tm) bus 2 bandwidth");
  1440. event_id = 0xF8;
  1441. unitEncode = NB;
  1442. }
  1443. EVENT_MASK(k8_ht) * eventMask;
  1444. // name = _T("HYPERTRANSPORT_BUS2_WIDTH");
  1445. };
  1446. //
  1447. //typedef union EVENT_MASK( perfctr_event_set k8_common_event_set)
  1448. //{
  1449. //
  1450. // .cpu_type = PERFCTR_X86_AMD_K8,
  1451. // .event_prefix = _T("K8_"),
  1452. // .include = &k7_official_event_set,
  1453. // .nevents = ARRAY_SIZE(k8_common_events),
  1454. // .events = k8_common_events,
  1455. //}EVENT_MASK( perfctr_event_set k8_common_event_set);
  1456. //
  1457. //typedef union EVENT_MASK( perfctr_event k8_events[])
  1458. //{
  1459. //
  1460. // { 0x24, 0xF, UM(NULL), _T("LOCKED_OP"), /* unit mask changed in Rev. C */
  1461. // _T("Locked operation") },
  1462. //}EVENT_MASK( perfctr_event k8_events[]);
  1463. //const struct perfctr_event_set perfctr_k8_event_set)
  1464. //{
  1465. //
  1466. // .cpu_type = PERFCTR_X86_AMD_K8,
  1467. // .event_prefix = _T("K8_"),
  1468. // .include = &k8_common_event_set,
  1469. // .nevents = ARRAY_SIZE(k8_events),
  1470. // .events = k8_events,
  1471. //};
  1472. //
  1473. /*
  1474. * K8 Revision C. Starts at CPUID 0xF58 for Opteron/Athlon64FX and
  1475. * CPUID 0xF48 for Athlon64. (CPUID 0xF51 is Opteron Revision B3.)
  1476. */
  1477. //
  1478. //typedef union EVENT_MASK( k8_lock_accesses)
  1479. //{
  1480. // struct
  1481. // {
  1482. // uint16 DcacheAccesses:1; // Number of dcache accesses by lock instructions" },
  1483. // uint16 DcacheMisses:1; // Number of dcache misses by lock instructions" } }
  1484. // };
  1485. // uint16 flat;
  1486. //
  1487. //}EVENT_MASK( k8_lock_accesses);
  1488. //
  1489. #endif // K8PERFORMANCECOUNTERS_H