Team Fortress 2 Source Code as on 22/4/2020
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2032 lines
42 KiB

  1. //========= Copyright Valve Corporation, All rights reserved. ============//
  2. //
  3. // Purpose:
  4. //
  5. // $NoKeywords: $
  6. //
  7. //=============================================================================//
  8. #ifndef K8PERFORMANCECOUNTERS_H
  9. #define K8PERFORMANCECOUNTERS_H
  10. /*
  11. * AMD K8 events.
  12. *
  13. */
  14. typedef union EVENT_MASK(NULL_MASK)
  15. {
  16. // no tests defined
  17. uint16 flat;
  18. } EVENT_MASK(NULL_MASK);
  19. #define MSR_K8_EVNTSEL0 0xC0010000 /* .. 0xC0010003 */
  20. #define MSR_K8_PERFCTR0 0xC0010004 /* .. 0xC0010007 */
  21. # pragma pack(push, 1)
  22. // access to these bits is through the methods
  23. typedef union PerfEvtSel
  24. {
  25. struct
  26. {
  27. uint64 EventMask : 8;
  28. uint64 UnitMask : 8;
  29. uint64 USR : 1;
  30. uint64 OS : 1;
  31. uint64 Edge : 1;
  32. uint64 PC : 1;
  33. uint64 INTAPIC : 1;
  34. uint64 Reserved21 : 1;
  35. uint64 Enable : 1;
  36. uint64 Complement : 1; // aka INV
  37. uint64 Threshold : 8; // aka CounterMask
  38. uint64 Reserver32 : 32;
  39. };
  40. uint64 flat;
  41. } PerfEvtSel;
  42. enum UnitEncode
  43. {
  44. FP,
  45. LS,
  46. DC,
  47. BU,
  48. IC,
  49. UE_Unknown,
  50. FR,
  51. NB
  52. };
  53. # pragma pack(pop)
  54. // Turn off the no return value warning in ReadCounter.
  55. #pragma warning( disable : 4035 )
  56. #define k8NUM_COUNTERS 4
  57. class k8BaseEvent
  58. {
  59. public:
  60. PME * pme;
  61. PerfEvtSel eventSelect[k8NUM_COUNTERS];
  62. unsigned short m_eventMask;
  63. int event_id;
  64. tchar * name;
  65. tchar revRequired;
  66. int eventSelectNum;
  67. UnitEncode unitEncode;
  68. void SetCounter(int n)
  69. {
  70. if (n < 0)
  71. n = 0;
  72. else if (n > 3)
  73. n = 3;
  74. eventSelectNum = n;
  75. }
  76. k8BaseEvent()
  77. {
  78. pme = PME::Instance();
  79. for(int i = 0; i< k8NUM_COUNTERS; i++)
  80. {
  81. eventSelect[i].flat = 0;
  82. }
  83. eventSelectNum = 0;
  84. m_eventMask = 0;
  85. event_id = 0;
  86. name = 0;
  87. revRequired = 'A';
  88. }
  89. void SetCaptureMode(PrivilegeCapture priv)
  90. {
  91. PerfEvtSel & select = eventSelect[eventSelectNum];
  92. StopCounter();
  93. switch (priv)
  94. {
  95. case OS_Only:
  96. select.USR = 0;
  97. select.OS = 1;
  98. break;
  99. case USR_Only:
  100. select.USR = 1;
  101. select.OS = 0;
  102. break;
  103. case OS_and_USR:
  104. select.USR = 1;
  105. select.OS = 1;
  106. break;
  107. }
  108. select.UnitMask = m_eventMask;
  109. select.EventMask = event_id;
  110. int selectPort = MSR_K8_EVNTSEL0 + eventSelectNum;
  111. pme->WriteMSR(selectPort, select.flat);
  112. }
  113. void SetFiltering(CompareState compareEnable,
  114. CompareMethod compareMethod,
  115. uint8 threshold,
  116. EdgeState edgeEnable)
  117. {
  118. PerfEvtSel & select = eventSelect[eventSelectNum];
  119. StopCounter();
  120. if (compareEnable == CompareDisable)
  121. select.Threshold = 0;
  122. else
  123. select.Threshold = threshold;
  124. select.Complement = compareMethod;
  125. select.Edge = edgeEnable;
  126. int selectPort = MSR_K8_EVNTSEL0 + eventSelectNum;
  127. pme->WriteMSR(selectPort, select.flat);
  128. }
  129. void StartCounter()
  130. {
  131. PerfEvtSel & select = eventSelect[eventSelectNum];
  132. select.Enable = 1;
  133. int selectPort = MSR_K8_EVNTSEL0 + eventSelectNum;
  134. pme->WriteMSR(selectPort, select.flat);
  135. }
  136. void StopCounter()
  137. {
  138. PerfEvtSel & select = eventSelect[eventSelectNum];
  139. select.Enable = 0;
  140. int selectPort = MSR_K8_EVNTSEL0 + eventSelectNum;
  141. pme->WriteMSR(selectPort, select.flat);
  142. }
  143. void ClearCounter()
  144. {
  145. PerfEvtSel & select = eventSelect[eventSelectNum];
  146. int counterPort = MSR_K8_PERFCTR0 + eventSelectNum;
  147. pme->WriteMSR(counterPort, 0ui64 ); // clear
  148. }
  149. void WriteCounter(int64 value)
  150. {
  151. PerfEvtSel & select = eventSelect[eventSelectNum];
  152. int counterPort = MSR_K8_PERFCTR0 + eventSelectNum;
  153. pme->WriteMSR(counterPort, value); // clear
  154. }
  155. int64 ReadCounter()
  156. {
  157. #if PME_DEBUG
  158. PerfEvtSel & select = eventSelect[eventSelectNum];
  159. if (select.USR == 0 && select.OS == 0)
  160. return -1; // no area to collect, use SetCaptureMode
  161. if (select.EventMask == 0)
  162. return -2; // no event mask set
  163. if (eventSelectNum < 0 || eventSelectNum > 3)
  164. return -3; // counter not legal
  165. // check revision
  166. #endif
  167. // ReadMSR should work here too, but RDPMC should be faster
  168. //ReadMSR(counterPort, int64);
  169. // we need to copy this into a temp for some reason
  170. #ifdef COMPILER_MSVC64
  171. return __readpmc((unsigned long) eventSelectNum);
  172. #else
  173. int temp = eventSelectNum;
  174. _asm
  175. {
  176. mov ecx, temp
  177. RDPMC
  178. }
  179. #endif
  180. }
  181. };
  182. #pragma warning( default : 4035 )
  183. typedef union EVENT_MASK(k8_dispatched_fpu_ops)
  184. {
  185. // event 0
  186. struct
  187. {
  188. uint16 AddPipeOps:1; // Add pipe ops excluding junk ops" },
  189. uint16 MulPipeOps:1; // Multiply pipe ops excluding junk ops" },,
  190. uint16 StoreOps:1; // Store pipe ops excluding junk ops" },
  191. uint16 AndPipeOpsJunk:1; // Add pipe junk ops" },,
  192. uint16 MulPipeOpsJunk:1; // Multiply pipe junk ops" },
  193. uint16 StoreOpsJunk:1; // Store pipe junk ops" } }
  194. };
  195. uint16 flat;
  196. } EVENT_MASK(k8_dispatched_fpu_ops);
  197. class k8Event_DISPATCHED_FPU_OPS : public k8BaseEvent
  198. {
  199. public:
  200. k8Event_DISPATCHED_FPU_OPS()
  201. {
  202. eventMask = (EVENT_MASK(k8_dispatched_fpu_ops) *)&m_eventMask;
  203. event_id = 0x00;
  204. unitEncode = FP;
  205. name = _T("Dispatched FPU ops");
  206. revRequired = 'B';
  207. }
  208. EVENT_MASK(k8_dispatched_fpu_ops) * eventMask;
  209. };
  210. //////////////////////////////////////////////////////////
  211. class k8Event_NO_FPU_OPS : public k8BaseEvent
  212. {
  213. public:
  214. k8Event_NO_FPU_OPS()
  215. {
  216. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  217. event_id = 0x01;
  218. unitEncode = FP;
  219. name = _T("Cycles with no FPU ops retired");
  220. revRequired = 'B';
  221. }
  222. EVENT_MASK(NULL_MASK) * eventMask;
  223. };
  224. //////////////////////////////////////////////////////////
  225. class k8Event_FAST_FPU_OPS : public k8BaseEvent
  226. {
  227. public:
  228. k8Event_FAST_FPU_OPS()
  229. {
  230. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  231. event_id = 0x02;
  232. unitEncode = FP;
  233. name = _T("Dispatched FPU ops that use the fast flag interface");
  234. revRequired = 'B';
  235. }
  236. EVENT_MASK(NULL_MASK) * eventMask;
  237. };
  238. //////////////////////////////////////////////////////////
  239. typedef union EVENT_MASK(k8_segment_register_load)
  240. {
  241. struct
  242. {
  243. uint16 ES:1;
  244. uint16 CS:1;
  245. uint16 SS:1;
  246. uint16 DS:1;
  247. uint16 FS:1;
  248. uint16 GS:1;
  249. uint16 HS:1;
  250. };
  251. uint16 flat;
  252. } EVENT_MASK(k8_segment_register_load);
  253. class k8Event_SEG_REG_LOAD : public k8BaseEvent
  254. {
  255. public:
  256. k8Event_SEG_REG_LOAD()
  257. {
  258. eventMask = (EVENT_MASK(k8_segment_register_load) *)&m_eventMask;
  259. name = _T("Segment register load");
  260. event_id = 0x20;
  261. unitEncode = LS;
  262. }
  263. EVENT_MASK(k8_segment_register_load) * eventMask;
  264. };
  265. class k8Event_SELF_MODIFY_RESYNC : public k8BaseEvent
  266. {
  267. public:
  268. k8Event_SELF_MODIFY_RESYNC()
  269. {
  270. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  271. name = _T("Microarchitectural resync caused by self modifying code");
  272. event_id = 0x21;
  273. unitEncode = LS;
  274. }
  275. EVENT_MASK(NULL_MASK) * eventMask;
  276. };
  277. class k8Event_LS_RESYNC_BY_SNOOP : public k8BaseEvent
  278. {
  279. public:
  280. k8Event_LS_RESYNC_BY_SNOOP()
  281. {
  282. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  283. event_id = 0x22;
  284. unitEncode = LS;
  285. name = _T("Microarchitectural resync caused by snoop");
  286. }
  287. EVENT_MASK(NULL_MASK) * eventMask;
  288. };
  289. class k8Event_LS_BUFFER_FULL : public k8BaseEvent
  290. {
  291. public:
  292. k8Event_LS_BUFFER_FULL()
  293. {
  294. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  295. name = _T("LS Buffer 2 Full");
  296. event_id = 0x23;
  297. unitEncode = LS;
  298. }
  299. EVENT_MASK(NULL_MASK) * eventMask;
  300. };
  301. typedef union EVENT_MASK(k8_locked_op)
  302. {
  303. struct
  304. {
  305. uint16 NumLockInstr : 1; //Number of lock instructions executed
  306. uint16 NumCyclesInRequestGrant : 1; //Number of cycles spent in the lock request/grant stage
  307. uint16 NumCyclesForLock:1;
  308. /*Number of cycles a lock takes to complete once it is
  309. non-speculative and is the oldest load/store operation
  310. (non-speculative cycles in Ls2 entry 0)*/
  311. };
  312. uint16 flat;
  313. } EVENT_MASK(k8_locked_op);
  314. class k8Event_LOCKED_OP : public k8BaseEvent
  315. {
  316. public:
  317. EVENT_MASK(k8_locked_op) * eventMask;
  318. k8Event_LOCKED_OP()
  319. {
  320. eventMask = (EVENT_MASK(k8_locked_op) *)&m_eventMask;
  321. name = _T("Locked operation");
  322. event_id = 0x24;
  323. unitEncode = LS;
  324. revRequired = 'C';
  325. }
  326. };
  327. class k8Event_OP_LATE_CANCEL : public k8BaseEvent
  328. {
  329. public:
  330. k8Event_OP_LATE_CANCEL()
  331. {
  332. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  333. name = _T("Microarchitectural late cancel of an operation");
  334. event_id = 0x25;
  335. unitEncode = LS;
  336. }
  337. EVENT_MASK(NULL_MASK) * eventMask;
  338. // name = _T("OP_LATE_CANCEL");
  339. };
  340. class k8Event_CFLUSH_RETIRED : public k8BaseEvent
  341. {
  342. public:
  343. k8Event_CFLUSH_RETIRED()
  344. {
  345. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  346. name = _T("Retired CFLUSH instructions");
  347. event_id = 0x26;
  348. unitEncode = LS;
  349. }
  350. EVENT_MASK(NULL_MASK) * eventMask;
  351. // name = _T("CFLUSH_RETIRED");
  352. };
  353. class k8Event_CPUID_RETIRED : public k8BaseEvent
  354. {
  355. public:
  356. k8Event_CPUID_RETIRED()
  357. {
  358. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  359. name = _T("Retired CPUID instructions");
  360. event_id = 0x27;
  361. unitEncode = LS;
  362. }
  363. EVENT_MASK(NULL_MASK) * eventMask;
  364. // name = _T("CPUID_RETIRED");
  365. };
  366. typedef union EVENT_MASK( k8_cache)
  367. {
  368. struct
  369. {
  370. uint16 Invalid:1;
  371. uint16 Exclusive:1;
  372. uint16 Shared:1;
  373. uint16 Owner:1;
  374. uint16 Modified:1;
  375. };
  376. uint16 flat;
  377. }EVENT_MASK( k8_cache);
  378. /* 0x40-0x47: from K7 official event set */
  379. class k8Event_DATA_CACHE_ACCESSES : public k8BaseEvent
  380. {
  381. k8Event_DATA_CACHE_ACCESSES()
  382. {
  383. event_id = 0x40;
  384. unitEncode = DC;
  385. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  386. //_T("DATA_CACHE_ACCESSES"),
  387. name = _T("Data cache accesses");
  388. }
  389. EVENT_MASK(NULL_MASK) * eventMask;
  390. };
  391. class k8Event_DATA_CACHE_MISSES : public k8BaseEvent
  392. {
  393. k8Event_DATA_CACHE_MISSES()
  394. {
  395. event_id = 0x41;
  396. unitEncode = DC;
  397. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  398. //_T("DATA_CACHE_MISSES"),
  399. name = _T("Data cache misses");
  400. }
  401. EVENT_MASK(NULL_MASK) * eventMask;
  402. };
  403. class k8Event_DATA_CACHE_REFILLS_FROM_L2 : public k8BaseEvent
  404. {
  405. k8Event_DATA_CACHE_REFILLS_FROM_L2()
  406. {
  407. event_id = 0x42;
  408. unitEncode = DC;
  409. eventMask = (EVENT_MASK(k8_cache) *)&m_eventMask;
  410. name = _T("Data cache refills from L2");
  411. }
  412. EVENT_MASK(k8_cache) * eventMask;
  413. };
  414. class k8Event_DATA_CACHE_REFILLS_FROM_SYSTEM : public k8BaseEvent
  415. {
  416. k8Event_DATA_CACHE_REFILLS_FROM_SYSTEM()
  417. {
  418. event_id = 0x43;
  419. unitEncode = DC;
  420. eventMask = (EVENT_MASK(k8_cache) *)&m_eventMask;
  421. //UM(k7_um_moesi),
  422. //_T("DATA_CACHE_REFILLS_FROM_SYSTEM"),
  423. name = _T("Data cache refills from system");
  424. }
  425. EVENT_MASK(k8_cache) * eventMask;
  426. };
  427. class k8Event_DATA_CACHE_WRITEBACKS : public k8BaseEvent
  428. {
  429. k8Event_DATA_CACHE_WRITEBACKS()
  430. {
  431. event_id = 0x44;
  432. unitEncode = DC;
  433. eventMask = (EVENT_MASK(k8_cache) *)&m_eventMask;
  434. //UM(k7_um_moesi),
  435. //_T("DATA_CACHE_WRITEBACKS"),
  436. name = _T("Data cache writebacks");
  437. }
  438. EVENT_MASK(k8_cache) * eventMask;
  439. };
  440. class k8Event_L1_DTLB_MISSES_AND_L2_DTLB_HITS : public k8BaseEvent
  441. {
  442. k8Event_L1_DTLB_MISSES_AND_L2_DTLB_HITS()
  443. {
  444. event_id = 0x45;
  445. unitEncode = DC;
  446. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  447. name = _T("L1 DTLB misses and L2 DTLB hits");
  448. }
  449. EVENT_MASK(NULL_MASK) * eventMask;
  450. };
  451. class k8Event_L1_AND_L2_DTLB_MISSES : public k8BaseEvent
  452. {
  453. k8Event_L1_AND_L2_DTLB_MISSES()
  454. {
  455. event_id = 0x46;
  456. unitEncode = DC;
  457. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  458. name = _T("L1 and L2 DTLB misses") ;
  459. }
  460. EVENT_MASK(NULL_MASK) * eventMask;
  461. };
  462. class k8Event_MISALIGNED_DATA_REFERENCES : public k8BaseEvent
  463. {
  464. k8Event_MISALIGNED_DATA_REFERENCES()
  465. {
  466. event_id = 0x47;
  467. unitEncode = DC;
  468. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  469. //NULL, _T("MISALIGNED_DATA_REFERENCES"),
  470. name = _T("Misaligned data references");
  471. }
  472. EVENT_MASK(NULL_MASK) * eventMask;
  473. };
  474. class k8Event_ACCESS_CANCEL_LATE : public k8BaseEvent
  475. {
  476. public:
  477. k8Event_ACCESS_CANCEL_LATE()
  478. {
  479. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  480. name = _T("Microarchitectural late cancel of an access");
  481. event_id = 0x48;
  482. unitEncode = DC;
  483. }
  484. EVENT_MASK(NULL_MASK) * eventMask;
  485. // name = _T("ACCESS_CANCEL_LATE");
  486. };
  487. class k8Event_ACCESS_CANCEL_EARLY : public k8BaseEvent
  488. {
  489. public:
  490. k8Event_ACCESS_CANCEL_EARLY()
  491. {
  492. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  493. name = _T("Microarchitectural early cancel of an access");
  494. event_id = 0x49;
  495. unitEncode = DC;
  496. }
  497. EVENT_MASK(NULL_MASK) * eventMask;
  498. // name = _T("ACCESS_CANCEL_EARLY");
  499. };
  500. typedef union EVENT_MASK( k8_ecc)
  501. {
  502. struct
  503. {
  504. uint16 ScrubberError : 1; // Scrubber error" },
  505. uint16 PiggybackScrubberErrors : 1; // Piggyback scrubber errors" } }
  506. };
  507. uint16 flat;
  508. }EVENT_MASK( k8_ecc);
  509. class k8Event_ECC_BIT_ERR : public k8BaseEvent
  510. {
  511. public:
  512. k8Event_ECC_BIT_ERR()
  513. {
  514. eventMask = (EVENT_MASK(k8_ecc) *)&m_eventMask;
  515. name = _T("One bit ECC error recorded found by scrubber");
  516. event_id = 0x4A;
  517. unitEncode = DC;
  518. }
  519. EVENT_MASK(k8_ecc) * eventMask;
  520. // name = _T("ECC_BIT_ERR");
  521. };
  522. // 4B
  523. typedef union EVENT_MASK( k8_distpatch_prefetch_instructions)
  524. {
  525. struct
  526. {
  527. uint16 Load : 1;
  528. uint16 Store : 1;
  529. uint16 NTA : 1;
  530. };
  531. uint16 flat;
  532. }EVENT_MASK( k8_distpatch_prefetch_instructions);
  533. class k8Event_DISPATCHED_PRE_INSTRS : public k8BaseEvent
  534. {
  535. public:
  536. k8Event_DISPATCHED_PRE_INSTRS()
  537. {
  538. eventMask = (EVENT_MASK(k8_distpatch_prefetch_instructions) *)&m_eventMask;
  539. name = _T("Dispatched prefetch instructions");
  540. event_id = 0x4B;
  541. unitEncode = DC;
  542. }
  543. EVENT_MASK(k8_distpatch_prefetch_instructions) * eventMask;
  544. // name = _T("DISPATCHED_PRE_INSTRS");
  545. /* 0x4C: added in Revision C */
  546. };
  547. typedef union EVENT_MASK( k8_lock_accesses)
  548. {
  549. struct
  550. {
  551. uint16 DcacheAccesses:1; // Number of dcache accesses by lock instructions" },
  552. uint16 DcacheMisses:1; // Number of dcache misses by lock instructions" } }
  553. };
  554. uint16 flat;
  555. }EVENT_MASK( k8_lock_accesses);
  556. class k8Event_LOCK_ACCESSES : public k8BaseEvent
  557. {
  558. public:
  559. k8Event_LOCK_ACCESSES()
  560. {
  561. eventMask = (EVENT_MASK(k8_lock_accesses) *)&m_eventMask;
  562. name = _T("DCACHE accesses by locks") ;
  563. event_id = 0x4C;
  564. unitEncode = DC;
  565. revRequired = 'C';
  566. }
  567. EVENT_MASK(k8_lock_accesses) * eventMask;
  568. };
  569. class k8Event_CYCLES_PROCESSOR_IS_RUNNING : public k8BaseEvent
  570. {
  571. public:
  572. k8Event_CYCLES_PROCESSOR_IS_RUNNING()
  573. {
  574. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  575. name = _T("Cycles processor is running (not in HLT or STPCLK)");
  576. event_id = 0x76;
  577. unitEncode = BU;
  578. }
  579. EVENT_MASK(NULL_MASK) * eventMask;
  580. // name = _T("CYCLES_PROCESSOR_IS_RUNNING"); /* undocumented *;
  581. };
  582. typedef union EVENT_MASK( k8_internal_L2_request)
  583. {
  584. struct
  585. {
  586. uint16 ICFill:1; // IC fill" },
  587. uint16 DCFill:1; // DC fill" },
  588. uint16 TLBReload:1; // TLB reload" },
  589. uint16 TagSnoopRequest:1; // Tag snoop request" },
  590. uint16 CancelledRequest:1; // Cancelled request" } }
  591. };
  592. uint16 flat;
  593. }EVENT_MASK( k8_internal_L2_request);
  594. class k8Event_BU_INT_L2_REQ : public k8BaseEvent
  595. {
  596. public:
  597. k8Event_BU_INT_L2_REQ()
  598. {
  599. eventMask = (EVENT_MASK(k8_internal_L2_request) *)&m_eventMask;
  600. name = _T("Internal L2 request");
  601. unitEncode = BU;
  602. event_id = 0x7D;
  603. }
  604. EVENT_MASK(k8_internal_L2_request) * eventMask;
  605. } ;
  606. // name = _T("BU_INT_L2_REQ");
  607. // 7E
  608. typedef union EVENT_MASK( k8_fill_request_missed_L2)
  609. {
  610. struct
  611. {
  612. uint16 ICFill:1; // IC fill" },
  613. uint16 DCFill:1; // DC fill" },
  614. uint16 TLBReload:1; // TLB reload" },
  615. };
  616. uint16 flat;
  617. } EVENT_MASK( k8_fill_request_missed_L2);
  618. class k8Event_BU_FILL_REQ : public k8BaseEvent
  619. {
  620. public:
  621. k8Event_BU_FILL_REQ()
  622. {
  623. eventMask = (EVENT_MASK(k8_fill_request_missed_L2) *)&m_eventMask;
  624. name = _T("Fill request that missed in L2");
  625. event_id = 0x7E;
  626. unitEncode = BU;
  627. }
  628. EVENT_MASK(k8_fill_request_missed_L2) * eventMask;
  629. // name = _T("BU_FILL_REQ");
  630. };
  631. // 7F
  632. typedef union EVENT_MASK( k8_fill_into_L2)
  633. {
  634. struct
  635. {
  636. uint16 DirtyL2Victim:1; // Dirty L2 victim
  637. uint16 VictimFromL2:1; // Victim from L2
  638. };
  639. uint16 flat;
  640. }EVENT_MASK( k8_fill_into_L2);
  641. class k8Event_BU_FILL_L2 : public k8BaseEvent
  642. {
  643. public:
  644. k8Event_BU_FILL_L2()
  645. {
  646. eventMask = (EVENT_MASK(k8_fill_into_L2) *)&m_eventMask;
  647. name = _T("Fill into L2");
  648. event_id = 0x7F;
  649. unitEncode = BU;
  650. }
  651. EVENT_MASK(k8_fill_into_L2) * eventMask;
  652. // name = _T("BU_FILL_L2");
  653. };
  654. class k8Event_INSTRUCTION_CACHE_FETCHES : public k8BaseEvent
  655. {
  656. public:
  657. k8Event_INSTRUCTION_CACHE_FETCHES()
  658. {
  659. event_id = 0x80;
  660. unitEncode = IC;
  661. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  662. name = _T("Instruction cache fetches");
  663. }
  664. EVENT_MASK(NULL_MASK) * eventMask;
  665. };
  666. class k8Event_INSTRUCTION_CACHE_MISSES : public k8BaseEvent
  667. {
  668. public:
  669. k8Event_INSTRUCTION_CACHE_MISSES()
  670. {
  671. event_id = 0x81;
  672. unitEncode = IC;
  673. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  674. //0xF, NULL, _T("INSTRUCTION_CACHE_MISSES"),
  675. name = _T("Instruction cache misses");
  676. }
  677. EVENT_MASK(NULL_MASK) * eventMask;
  678. };
  679. class k8Event_IC_REFILL_FROM_L2 : public k8BaseEvent
  680. {
  681. public:
  682. k8Event_IC_REFILL_FROM_L2()
  683. {
  684. eventMask = (EVENT_MASK(k8_cache) *)&m_eventMask;
  685. name = _T("Refill from L2");
  686. event_id = 0x82;
  687. unitEncode = IC;
  688. }
  689. EVENT_MASK(k8_cache) * eventMask;
  690. // name = _T("IC_REFILL_FROM_L2");
  691. };
  692. class k8Event_IC_REFILL_FROM_SYS : public k8BaseEvent
  693. {
  694. public:
  695. k8Event_IC_REFILL_FROM_SYS()
  696. {
  697. eventMask = (EVENT_MASK(k8_cache) *)&m_eventMask;
  698. name = _T("Refill from system");
  699. event_id = 0x83;
  700. unitEncode = IC;
  701. }
  702. EVENT_MASK(k8_cache) * eventMask;
  703. // name = _T("IC_REFILL_FROM_SYS");
  704. };
  705. class k8Event_L1_ITLB_MISSES_AND_L2_ITLB_HITS : public k8BaseEvent
  706. {
  707. public:
  708. k8Event_L1_ITLB_MISSES_AND_L2_ITLB_HITS()
  709. {
  710. event_id = 0x84;
  711. unitEncode = IC;
  712. name = _T("L1 ITLB misses (and L2 ITLB hits)");
  713. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  714. }
  715. EVENT_MASK(NULL_MASK) * eventMask;
  716. };
  717. class k8Event_L1_AND_L2_ITLB_MISSES : public k8BaseEvent
  718. {
  719. public:
  720. k8Event_L1_AND_L2_ITLB_MISSES()
  721. {
  722. event_id = 0x85;
  723. unitEncode = IC;
  724. name = _T("(L1 and) L2 ITLB misses");
  725. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  726. }
  727. EVENT_MASK(NULL_MASK) * eventMask;
  728. };
  729. class k8Event_IC_RESYNC_BY_SNOOP : public k8BaseEvent
  730. {
  731. public:
  732. k8Event_IC_RESYNC_BY_SNOOP()
  733. {
  734. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  735. event_id = 0x86;
  736. unitEncode = IC;
  737. name = _T("Microarchitectural resync caused by snoop");
  738. }
  739. EVENT_MASK(NULL_MASK) * eventMask;
  740. // name = _T("IC_RESYNC_BY_SNOOP");
  741. /* similar to 0x22; but IC unit instead of LS unit */
  742. };
  743. class k8Event_IC_FETCH_STALL : public k8BaseEvent
  744. {
  745. public:
  746. k8Event_IC_FETCH_STALL()
  747. {
  748. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  749. name = _T("Instruction fetch stall");
  750. event_id = 0x87;
  751. unitEncode = IC;
  752. }
  753. EVENT_MASK(NULL_MASK) * eventMask;
  754. // name = _T("IC_FETCH_STALL");
  755. };
  756. class k8Event_IC_STACK_HIT : public k8BaseEvent
  757. {
  758. public:
  759. k8Event_IC_STACK_HIT()
  760. {
  761. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  762. name = _T("Return stack hit");
  763. event_id = 0x88;
  764. unitEncode = IC;
  765. }
  766. EVENT_MASK(NULL_MASK) * eventMask;
  767. // name = _T("IC_STACK_HIT");
  768. };
  769. class k8Event_IC_STACK_OVERFLOW : public k8BaseEvent
  770. {
  771. public:
  772. k8Event_IC_STACK_OVERFLOW()
  773. {
  774. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  775. name = _T("Return stack overflow");
  776. event_id = 0x89;
  777. unitEncode = IC;
  778. }
  779. EVENT_MASK(NULL_MASK) * eventMask;
  780. // name = _T("IC_STACK_OVERFLOW");
  781. };
  782. /* 0xC0-0xC7: from K7 official event set */
  783. class k8Event_RETIRED_INSTRUCTIONS : public k8BaseEvent
  784. {
  785. public:
  786. k8Event_RETIRED_INSTRUCTIONS()
  787. {
  788. event_id = 0xC0;
  789. unitEncode = FR;
  790. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  791. //0xF, NULL, _T("RETIRED_INSTRUCTIONS"),
  792. name = _T("Retired instructions (includes exceptions, interrupts, resyncs)");
  793. }
  794. EVENT_MASK(NULL_MASK) * eventMask;
  795. };
  796. class k8Event_RETIRED_OPS : public k8BaseEvent
  797. {
  798. public:
  799. k8Event_RETIRED_OPS()
  800. {
  801. event_id = 0xC1;
  802. unitEncode = FR;
  803. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  804. //0xF, NULL, _T("RETIRED_OPS"),
  805. name = _T("Retired Ops") ;
  806. }
  807. EVENT_MASK(NULL_MASK) * eventMask;
  808. };
  809. class k8Event_RETIRED_BRANCHES : public k8BaseEvent
  810. {
  811. public:
  812. k8Event_RETIRED_BRANCHES()
  813. {
  814. event_id = 0xC2;
  815. unitEncode = FR;
  816. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  817. //0xF, NULL, _T("RETIRED_BRANCHES"),
  818. name = _T("Retired branches (conditional, unconditional, exceptions, interrupts)") ;
  819. }
  820. EVENT_MASK(NULL_MASK) * eventMask;
  821. };
  822. class k8Event_RETIRED_BRANCHES_MISPREDICTED : public k8BaseEvent
  823. {
  824. public:
  825. k8Event_RETIRED_BRANCHES_MISPREDICTED()
  826. {
  827. event_id = 0xC3;
  828. unitEncode = FR;
  829. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  830. //0xF, NULL, _T("RETIRED_BRANCHES_MISPREDICTED"),
  831. name = _T("Retired branches mispredicted") ;
  832. }
  833. EVENT_MASK(NULL_MASK) * eventMask;
  834. };
  835. class k8Event_RETIRED_TAKEN_BRANCHES : public k8BaseEvent
  836. {
  837. public:
  838. k8Event_RETIRED_TAKEN_BRANCHES()
  839. {
  840. event_id = 0xC4;
  841. unitEncode = FR;
  842. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  843. //0xF, NULL, _T("RETIRED_TAKEN_BRANCHES"),
  844. name = _T("Retired taken branches") ;
  845. }
  846. EVENT_MASK(NULL_MASK) * eventMask;
  847. };
  848. class k8Event_RETIRED_TAKEN_BRANCHES_MISPREDICTED : public k8BaseEvent
  849. {
  850. public:
  851. k8Event_RETIRED_TAKEN_BRANCHES_MISPREDICTED()
  852. {
  853. event_id = 0xC5;
  854. unitEncode = FR;
  855. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  856. //0xF, NULL, _T("RETIRED_TAKEN_BRANCHES_MISPREDICTED"),
  857. name = _T("Retired taken branches mispredicted") ;
  858. }
  859. EVENT_MASK(NULL_MASK) * eventMask;
  860. };
  861. class k8Event_RETIRED_FAR_CONTROL_TRANSFERS : public k8BaseEvent
  862. {
  863. public:
  864. k8Event_RETIRED_FAR_CONTROL_TRANSFERS()
  865. {
  866. event_id = 0xC6;
  867. unitEncode = FR;
  868. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  869. //0xF, NULL, _T("RETIRED_FAR_CONTROL_TRANSFERS"),
  870. name = _T("Retired far control transfers") ;
  871. }
  872. EVENT_MASK(NULL_MASK) * eventMask;
  873. };
  874. class k8Event_RETIRED_RESYNC_BRANCHES : public k8BaseEvent
  875. {
  876. public:
  877. k8Event_RETIRED_RESYNC_BRANCHES()
  878. {
  879. event_id = 0xC7;
  880. unitEncode = FR;
  881. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  882. //0xF, NULL, _T("RETIRED_RESYNC_BRANCHES"),
  883. name = _T("Retired resync branches (only non-control transfer branches counted)") ;
  884. }
  885. EVENT_MASK(NULL_MASK) * eventMask;
  886. };
  887. class k8Event_RETIRED_NEAR_RETURNS : public k8BaseEvent
  888. {
  889. public:
  890. k8Event_RETIRED_NEAR_RETURNS()
  891. {
  892. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  893. name = _T("Retired near returns");
  894. event_id = 0xC8;
  895. unitEncode = FR;
  896. }
  897. EVENT_MASK(NULL_MASK) * eventMask;
  898. };
  899. class k8Event_RETIRED_RETURNS_MISPREDICT : public k8BaseEvent
  900. {
  901. public:
  902. k8Event_RETIRED_RETURNS_MISPREDICT()
  903. {
  904. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  905. name = _T("Retired near returns mispredicted");
  906. event_id = 0xC9;
  907. unitEncode = FR;
  908. }
  909. EVENT_MASK(NULL_MASK) * eventMask;
  910. // name = _T("RETIRED_RETURNS_MISPREDICT");
  911. };
  912. class k8Event_RETIRED_BRANCH_MISCOMPARE : public k8BaseEvent
  913. {
  914. public:
  915. k8Event_RETIRED_BRANCH_MISCOMPARE()
  916. {
  917. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  918. name = _T("Retired taken branches mispredicted due to address miscompare");
  919. event_id = 0xCA;
  920. unitEncode = FR;
  921. }
  922. EVENT_MASK(NULL_MASK) * eventMask;
  923. // name = _T("RETIRED_BRANCH_MISCOMPARE");
  924. };
  925. /* Revision B and later */
  926. typedef union EVENT_MASK( k8_retired_fpu_instr)
  927. {
  928. struct
  929. {
  930. uint16 DirtyL2Victim:1; // x87 instructions
  931. uint16 CombinedMMX_3DNow:1; // Combined MMX & 3DNow! instructions" },
  932. uint16 CombinedPackedSSE_SSE2:1; // Combined packed SSE and SSE2 instructions" },
  933. uint16 CombinedScalarSSE_SSE2:1; // Combined scalar SSE and SSE2 instructions" } }
  934. };
  935. uint16 flat;
  936. }EVENT_MASK( k8_retired_fpu_instr);
  937. class k8Event_RETIRED_FPU_INSTRS : public k8BaseEvent
  938. {
  939. public:
  940. k8Event_RETIRED_FPU_INSTRS()
  941. {
  942. eventMask = (EVENT_MASK(k8_retired_fpu_instr) *)&m_eventMask;
  943. event_id = 0xCB;
  944. unitEncode = FR;
  945. name = _T("Retired FPU instructions");
  946. revRequired = 'B';
  947. }
  948. EVENT_MASK(k8_retired_fpu_instr) * eventMask;
  949. /* Revision B and later */
  950. };
  951. // CC
  952. typedef union EVENT_MASK( k8_retired_fastpath_double_op_instr )
  953. {
  954. struct
  955. {
  956. uint16 LowOpPosition0:1; // With low op in position 0" },
  957. uint16 LowOpPosition1:1; // With low op in position 1" },
  958. uint16 LowOpPosition2:1; // With low op in position 2" } }
  959. };
  960. uint16 flat;
  961. }EVENT_MASK( k8_retired_fastpath_double_op_instr);
  962. class k8Event_RETIRED_FASTPATH_INSTRS : public k8BaseEvent
  963. {
  964. public:
  965. k8Event_RETIRED_FASTPATH_INSTRS()
  966. {
  967. eventMask = (EVENT_MASK(k8_retired_fastpath_double_op_instr) *)&m_eventMask;
  968. event_id = 0xCC;
  969. unitEncode = FR;
  970. name = _T("Retired fastpath double op instructions");
  971. revRequired = 'B';
  972. }
  973. EVENT_MASK(k8_retired_fastpath_double_op_instr) * eventMask;
  974. };
  975. class k8Event_INTERRUPTS_MASKED_CYCLES : public k8BaseEvent
  976. {
  977. public:
  978. k8Event_INTERRUPTS_MASKED_CYCLES()
  979. {
  980. event_id = 0xCD;
  981. unitEncode = FR;
  982. //0xF, NULL, _T("INTERRUPTS_MASKED_CYCLES"),
  983. name = _T("Interrupts masked cycles (IF=0)") ;
  984. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  985. }
  986. EVENT_MASK(NULL_MASK) * eventMask;
  987. };
  988. class k8Event_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES : public k8BaseEvent
  989. {
  990. public:
  991. k8Event_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES()
  992. {
  993. event_id = 0xCE;
  994. unitEncode = FR;
  995. //0xF, NULL, _T("INTERRUPTS_MASKED_WHILE_PENDING_CYCLES"),
  996. name = _T("Interrupts masked while pending cycles (INTR while IF=0)") ;
  997. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  998. }
  999. EVENT_MASK(NULL_MASK) * eventMask;
  1000. };
  1001. class k8Event_NUMBER_OF_TAKEN_HARDWARE_INTERRUPTS : public k8BaseEvent
  1002. {
  1003. public:
  1004. k8Event_NUMBER_OF_TAKEN_HARDWARE_INTERRUPTS()
  1005. {
  1006. event_id = 0xCF;
  1007. unitEncode = FR;
  1008. //0xF, NULL, _T("NUMBER_OF_TAKEN_HARDWARE_INTERRUPTS"),
  1009. name = _T("Number of taken hardware interrupts") ;
  1010. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1011. }
  1012. EVENT_MASK(NULL_MASK) * eventMask;
  1013. };
  1014. class k8Event_DECODER_EMPTY : public k8BaseEvent
  1015. {
  1016. public:
  1017. k8Event_DECODER_EMPTY()
  1018. {
  1019. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1020. name = _T("Nothing to dispatch (decoder empty)");
  1021. event_id = 0xD0;
  1022. unitEncode = FR;
  1023. }
  1024. EVENT_MASK(NULL_MASK) * eventMask;
  1025. // name = _T("DECODER_EMPTY");
  1026. };
  1027. class k8Event_DISPATCH_STALLS : public k8BaseEvent
  1028. {
  1029. public:
  1030. k8Event_DISPATCH_STALLS()
  1031. {
  1032. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1033. name = _T("Dispatch stalls (events 0xD2-0xDA combined)");
  1034. event_id = 0xD1;
  1035. unitEncode = FR;
  1036. }
  1037. EVENT_MASK(NULL_MASK) * eventMask;
  1038. // name = _T("DISPATCH_STALLS");
  1039. };
  1040. class k8Event_DISPATCH_STALL_FROM_BRANCH_ABORT : public k8BaseEvent
  1041. {
  1042. public:
  1043. k8Event_DISPATCH_STALL_FROM_BRANCH_ABORT()
  1044. {
  1045. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1046. name = _T("Dispatch stall from branch abort to retire");
  1047. event_id = 0xD2;
  1048. unitEncode = FR;
  1049. }
  1050. EVENT_MASK(NULL_MASK) * eventMask;
  1051. // name = _T("DISPATCH_STALL_FROM_BRANCH_ABORT");
  1052. };
  1053. class k8Event_DISPATCH_STALL_SERIALIZATION : public k8BaseEvent
  1054. {
  1055. public:
  1056. k8Event_DISPATCH_STALL_SERIALIZATION()
  1057. {
  1058. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1059. name = _T("Dispatch stall for serialization");
  1060. event_id = 0xD3;
  1061. unitEncode = FR;
  1062. }
  1063. EVENT_MASK(NULL_MASK) * eventMask;
  1064. // name = _T("DISPATCH_STALL_SERIALIZATION");
  1065. };
  1066. class k8Event_DISPATCH_STALL_SEG_LOAD : public k8BaseEvent
  1067. {
  1068. public:
  1069. k8Event_DISPATCH_STALL_SEG_LOAD()
  1070. {
  1071. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1072. name = _T("Dispatch stall for segment load");
  1073. event_id = 0xD4;
  1074. unitEncode = FR;
  1075. }
  1076. EVENT_MASK(NULL_MASK) * eventMask;
  1077. // name = _T("DISPATCH_STALL_SEG_LOAD");
  1078. };
  1079. class k8Event_DISPATCH_STALL_REORDER_BUFFER : public k8BaseEvent
  1080. {
  1081. public:
  1082. k8Event_DISPATCH_STALL_REORDER_BUFFER()
  1083. {
  1084. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1085. name = _T("Dispatch stall when reorder buffer is full");
  1086. event_id = 0xD5;
  1087. unitEncode = FR;
  1088. }
  1089. EVENT_MASK(NULL_MASK) * eventMask;
  1090. // name = _T("DISPATCH_STALL_REORDER_BUFFER");
  1091. };
  1092. class k8Event_DISPATCH_STALL_RESERVE_STATIONS : public k8BaseEvent
  1093. {
  1094. public:
  1095. k8Event_DISPATCH_STALL_RESERVE_STATIONS()
  1096. {
  1097. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1098. name = _T("Dispatch stall when reservation stations are full");
  1099. event_id = 0xD6;
  1100. unitEncode = FR;
  1101. }
  1102. EVENT_MASK(NULL_MASK) * eventMask;
  1103. // name = _T("DISPATCH_STALL_RESERVE_STATIONS");
  1104. };
  1105. class k8Event_DISPATCH_STALL_FPU : public k8BaseEvent
  1106. {
  1107. public:
  1108. k8Event_DISPATCH_STALL_FPU()
  1109. {
  1110. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1111. name = _T("Dispatch stall when FPU is full");
  1112. event_id = 0xD7;
  1113. unitEncode = FR;
  1114. }
  1115. EVENT_MASK(NULL_MASK) * eventMask;
  1116. // name = _T("DISPATCH_STALL_FPU");
  1117. };
  1118. class k8Event_DISPATCH_STALL_LS : public k8BaseEvent
  1119. {
  1120. public:
  1121. k8Event_DISPATCH_STALL_LS()
  1122. {
  1123. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1124. name = _T("Dispatch stall when LS is full");
  1125. event_id = 0xD8;
  1126. unitEncode = FR;
  1127. }
  1128. EVENT_MASK(NULL_MASK) * eventMask;
  1129. // name = _T("DISPATCH_STALL_LS");
  1130. };
  1131. class k8Event_DISPATCH_STALL_QUIET_WAIT : public k8BaseEvent
  1132. {
  1133. public:
  1134. k8Event_DISPATCH_STALL_QUIET_WAIT()
  1135. {
  1136. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1137. name = _T("Dispatch stall when waiting for all to be quiet");
  1138. event_id = 0xD9;
  1139. unitEncode = FR;
  1140. }
  1141. EVENT_MASK(NULL_MASK) * eventMask;
  1142. // name = _T("DISPATCH_STALL_QUIET_WAIT");
  1143. };
  1144. class k8Event_DISPATCH_STALL_PENDING : public k8BaseEvent
  1145. {
  1146. public:
  1147. k8Event_DISPATCH_STALL_PENDING()
  1148. {
  1149. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1150. name = _T("Dispatch stall when far control transfer or resync branch is pending");
  1151. event_id = 0xDA;
  1152. unitEncode = FR;
  1153. }
  1154. EVENT_MASK(NULL_MASK) * eventMask;
  1155. // name = _T("DISPATCH_STALL_PENDING");
  1156. };
  1157. typedef union EVENT_MASK( k8_fpu_exceptions)
  1158. {
  1159. struct
  1160. {
  1161. uint16 x87ReclassMicrofaults:1; // x87 reclass microfaults" },
  1162. uint16 SSERetypeMicrofaults:1; // SSE retype microfaults" },
  1163. uint16 SSEReclassMicrofaults:1; // SSE reclass microfaults" },
  1164. uint16 SSE_x87Microtraps:1; // SSE and x87 microtraps" } }
  1165. };
  1166. uint16 flat;
  1167. }EVENT_MASK( k8_fpu_exceptions);
  1168. class k8Event_FPU_EXCEPTIONS : public k8BaseEvent
  1169. {
  1170. public:
  1171. k8Event_FPU_EXCEPTIONS()
  1172. {
  1173. eventMask = (EVENT_MASK(k8_fpu_exceptions) *)&m_eventMask;
  1174. event_id = 0xDB;
  1175. unitEncode = FR;
  1176. name = _T("FPU exceptions");
  1177. revRequired = 'B';
  1178. }
  1179. EVENT_MASK(k8_fpu_exceptions) * eventMask;
  1180. // name = _T("FPU_EXCEPTIONS");
  1181. /* Revision B and later */
  1182. };
  1183. class k8Event_DR0_BREAKPOINTS : public k8BaseEvent
  1184. {
  1185. public:
  1186. k8Event_DR0_BREAKPOINTS()
  1187. {
  1188. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1189. name = _T("Number of breakpoints for DR0");
  1190. event_id = 0xDC;
  1191. unitEncode = FR;
  1192. }
  1193. EVENT_MASK(NULL_MASK) * eventMask;
  1194. // name = _T("DR0_BREAKPOINTS");
  1195. };
  1196. class k8Event_DR1_BREAKPOINTS : public k8BaseEvent
  1197. {
  1198. public:
  1199. k8Event_DR1_BREAKPOINTS()
  1200. {
  1201. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1202. name = _T("Number of breakpoints for DR1");
  1203. event_id = 0xDD;
  1204. unitEncode = FR;
  1205. }
  1206. EVENT_MASK(NULL_MASK) * eventMask;
  1207. // name = _T("DR1_BREAKPOINTS");
  1208. };
  1209. class k8Event_DR2_BREAKPOINTS : public k8BaseEvent
  1210. {
  1211. public:
  1212. k8Event_DR2_BREAKPOINTS()
  1213. {
  1214. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1215. name = _T("Number of breakpoints for DR2");
  1216. event_id = 0xDE;
  1217. unitEncode = FR;
  1218. }
  1219. EVENT_MASK(NULL_MASK) * eventMask;
  1220. // name = _T("DR2_BREAKPOINTS");
  1221. };
  1222. class k8Event_DR3_BREAKPOINTS : public k8BaseEvent
  1223. {
  1224. public:
  1225. k8Event_DR3_BREAKPOINTS()
  1226. {
  1227. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1228. name = _T("Number of breakpoints for DR3");
  1229. event_id = 0xDF;
  1230. unitEncode = FR;
  1231. }
  1232. EVENT_MASK(NULL_MASK) * eventMask;
  1233. // name = _T("DR3_BREAKPOINTS");
  1234. };
  1235. // E0
  1236. typedef union EVENT_MASK( k8_page_access_event)
  1237. {
  1238. struct
  1239. {
  1240. uint16 PageHit:1; // Page hit" },
  1241. uint16 PageMiss:1; // Page miss" },
  1242. uint16 PageConflict:1; // Page conflict" } }
  1243. };
  1244. uint16 flat;
  1245. }EVENT_MASK( k8_page_access_event);
  1246. class k8Event_MEM_PAGE_ACCESS : public k8BaseEvent
  1247. {
  1248. public:
  1249. k8Event_MEM_PAGE_ACCESS()
  1250. {
  1251. eventMask = (EVENT_MASK(k8_page_access_event) *)&m_eventMask;
  1252. name = _T("Memory controller page access");
  1253. event_id = 0xE0;
  1254. unitEncode = NB;
  1255. }
  1256. EVENT_MASK(k8_page_access_event) * eventMask;
  1257. // name = _T("MEM_PAGE_ACCESS");
  1258. };
  1259. class k8Event_MEM_PAGE_TBL_OVERFLOW : public k8BaseEvent
  1260. {
  1261. public:
  1262. k8Event_MEM_PAGE_TBL_OVERFLOW()
  1263. {
  1264. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1265. name = _T("Memory controller page table overflow");
  1266. event_id = 0xE1;
  1267. unitEncode = NB;
  1268. }
  1269. EVENT_MASK(NULL_MASK) * eventMask;
  1270. // name = _T("MEM_PAGE_TBL_OVERFLOW");
  1271. };
  1272. class k8Event_DRAM_SLOTS_MISSED : public k8BaseEvent
  1273. {
  1274. public:
  1275. k8Event_DRAM_SLOTS_MISSED()
  1276. {
  1277. eventMask = (EVENT_MASK(NULL_MASK) *)&m_eventMask;
  1278. name = _T("Memory controller DRAM command slots missed (in MemClks)");
  1279. event_id = 0xE2;
  1280. unitEncode = NB;
  1281. }
  1282. EVENT_MASK(NULL_MASK) * eventMask;
  1283. // name = _T("DRAM_SLOTS_MISSED");
  1284. };
  1285. // e3
  1286. typedef union EVENT_MASK( k8_turnaround)
  1287. {
  1288. struct
  1289. {
  1290. uint16 DIMMTurnaround:1; //DIMM turnaround" },
  1291. uint16 ReadToWriteTurnaround:1; //Read to write turnaround" },
  1292. uint16 WriteToReadTurnaround:1; //Write to read turnaround" } }
  1293. };
  1294. uint16 flat;
  1295. }EVENT_MASK( k8_turnaround);
  1296. class k8Event_MEM_TURNAROUND : public k8BaseEvent
  1297. {
  1298. public:
  1299. k8Event_MEM_TURNAROUND()
  1300. {
  1301. eventMask = (EVENT_MASK(k8_turnaround) *)&m_eventMask;
  1302. name = _T("Memory controller turnaround");
  1303. event_id = 0xE3;
  1304. unitEncode = NB;
  1305. }
  1306. EVENT_MASK(k8_turnaround) * eventMask;
  1307. // name = _T("MEM_TURNAROUND");
  1308. };
  1309. // E4
  1310. typedef union EVENT_MASK( k8_bypass_counter_saturation)
  1311. {
  1312. struct
  1313. {
  1314. uint16 MEM_HighPriorityBypass:1; // Memory controller high priority bypass" },
  1315. uint16 MEM_LowPriorityBypass:1; // Memory controller low priority bypass" },
  1316. uint16 DRAM_InterfaceBypass:1; // DRAM controller interface bypass" },
  1317. uint16 DRAM_QueueBypass:1; // DRAM controller queue bypass" } }
  1318. };
  1319. uint16 flat;
  1320. }EVENT_MASK( k8_bypass_counter_saturation);
  1321. class k8Event_MEM_BYPASS_SAT : public k8BaseEvent
  1322. {
  1323. public:
  1324. k8Event_MEM_BYPASS_SAT()
  1325. {
  1326. eventMask = (EVENT_MASK(k8_bypass_counter_saturation) *)&m_eventMask;
  1327. name = _T("Memory controller bypass counter saturation");
  1328. event_id = 0xE4;
  1329. unitEncode = NB;
  1330. }
  1331. EVENT_MASK(k8_bypass_counter_saturation) * eventMask;
  1332. // name = _T("MEM_BYPASS_SAT");
  1333. };
  1334. //EB
  1335. typedef union EVENT_MASK( k8_sized_commands)
  1336. {
  1337. struct
  1338. {
  1339. uint16 NonPostWrSzByte:1; // NonPostWrSzByte" },
  1340. uint16 NonPostWrSzDword:1; // NonPostWrSzDword" },
  1341. uint16 PostWrSzByte:1; // PostWrSzByte" },
  1342. uint16 PostWrSzDword:1; // PostWrSzDword" },
  1343. uint16 RdSzByte:1; // RdSzByte" },
  1344. uint16 RdSzDword:1; // RdSzDword" },
  1345. uint16 RdModWr:1; // RdModWr" } }
  1346. };
  1347. uint16 flat;
  1348. }EVENT_MASK( k8_sized_commands);
  1349. class k8Event_SIZED_COMMANDS : public k8BaseEvent
  1350. {
  1351. public:
  1352. k8Event_SIZED_COMMANDS()
  1353. {
  1354. eventMask = (EVENT_MASK(k8_sized_commands) *)&m_eventMask;
  1355. name = _T("Sized commands");
  1356. event_id = 0xEB;
  1357. unitEncode = NB;
  1358. }
  1359. EVENT_MASK(k8_sized_commands) * eventMask;
  1360. // name = _T("SIZED_COMMANDS");
  1361. };
  1362. typedef union EVENT_MASK( k8_probe_result)
  1363. {
  1364. struct
  1365. {
  1366. uint16 ProbeMiss:1; // Probe miss" },
  1367. uint16 ProbeHit:1; // Probe hit" },
  1368. uint16 ProbeHitDirtyWithoutMemoryCancel:1; // Probe hit dirty without memory cancel" },
  1369. uint16 ProbeHitDirtyWithMemoryCancel:1; // Probe hit dirty with memory cancel" } }
  1370. uint16 UpstreamDisplayRefreshReads:1; // Rev D and later
  1371. uint16 UpstreamNonDisplayRefreshReads:1; // Rev D and later
  1372. uint16 UpstreamWrites:1; // Rev D and later
  1373. };
  1374. uint16 flat;
  1375. }EVENT_MASK( k8_probe_result);
  1376. class k8Event_PROBE_RESULT : public k8BaseEvent
  1377. {
  1378. public:
  1379. k8Event_PROBE_RESULT()
  1380. {
  1381. eventMask = (EVENT_MASK(k8_probe_result) *)&m_eventMask;
  1382. name = _T("Probe result");
  1383. event_id = 0xEC;
  1384. unitEncode = NB;
  1385. }
  1386. EVENT_MASK(k8_probe_result) * eventMask;
  1387. // name = _T("PROBE_RESULT");
  1388. };
  1389. typedef union EVENT_MASK( k8_ht)
  1390. {
  1391. struct
  1392. {
  1393. uint16 CommandSent:1; //Command sent" },
  1394. uint16 DataSent:1; //Data sent" },
  1395. uint16 BufferReleaseSent:1; //Buffer release sent"
  1396. uint16 NopSent:1; //Nop sent" } }
  1397. };
  1398. uint16 flat;
  1399. }EVENT_MASK( k8_ht);
  1400. class k8Event_HYPERTRANSPORT_BUS0_WIDTH : public k8BaseEvent
  1401. {
  1402. public:
  1403. k8Event_HYPERTRANSPORT_BUS0_WIDTH()
  1404. {
  1405. eventMask = (EVENT_MASK(k8_ht) *)&m_eventMask;
  1406. name = _T("Hypertransport (tm) bus 0 bandwidth");
  1407. event_id = 0xF6;
  1408. unitEncode = NB;
  1409. }
  1410. EVENT_MASK(k8_ht) * eventMask;
  1411. // name = _T("HYPERTRANSPORT_BUS0_WIDTH");
  1412. };
  1413. class k8Event_HYPERTRANSPORT_BUS1_WIDTH : public k8BaseEvent
  1414. {
  1415. public:
  1416. k8Event_HYPERTRANSPORT_BUS1_WIDTH()
  1417. {
  1418. eventMask = (EVENT_MASK(k8_ht) *)&m_eventMask;
  1419. name = _T("Hypertransport (tm) bus 1 bandwidth");
  1420. event_id = 0xF7;
  1421. unitEncode = NB;
  1422. }
  1423. EVENT_MASK(k8_ht) * eventMask;
  1424. // name = _T("HYPERTRANSPORT_BUS1_WIDTH");
  1425. };
  1426. class k8Event_HYPERTRANSPORT_BUS2_WIDTH : public k8BaseEvent
  1427. {
  1428. public:
  1429. k8Event_HYPERTRANSPORT_BUS2_WIDTH()
  1430. {
  1431. eventMask = (EVENT_MASK(k8_ht) *)&m_eventMask;
  1432. name = _T("Hypertransport (tm) bus 2 bandwidth");
  1433. event_id = 0xF8;
  1434. unitEncode = NB;
  1435. }
  1436. EVENT_MASK(k8_ht) * eventMask;
  1437. // name = _T("HYPERTRANSPORT_BUS2_WIDTH");
  1438. };
  1439. //
  1440. //typedef union EVENT_MASK( perfctr_event_set k8_common_event_set)
  1441. //{
  1442. //
  1443. // .cpu_type = PERFCTR_X86_AMD_K8,
  1444. // .event_prefix = _T("K8_"),
  1445. // .include = &k7_official_event_set,
  1446. // .nevents = ARRAY_SIZE(k8_common_events),
  1447. // .events = k8_common_events,
  1448. //}EVENT_MASK( perfctr_event_set k8_common_event_set);
  1449. //
  1450. //typedef union EVENT_MASK( perfctr_event k8_events[])
  1451. //{
  1452. //
  1453. // { 0x24, 0xF, UM(NULL), _T("LOCKED_OP"), /* unit mask changed in Rev. C */
  1454. // _T("Locked operation") },
  1455. //}EVENT_MASK( perfctr_event k8_events[]);
  1456. //const struct perfctr_event_set perfctr_k8_event_set)
  1457. //{
  1458. //
  1459. // .cpu_type = PERFCTR_X86_AMD_K8,
  1460. // .event_prefix = _T("K8_"),
  1461. // .include = &k8_common_event_set,
  1462. // .nevents = ARRAY_SIZE(k8_events),
  1463. // .events = k8_events,
  1464. //};
  1465. //
  1466. /*
  1467. * K8 Revision C. Starts at CPUID 0xF58 for Opteron/Athlon64FX and
  1468. * CPUID 0xF48 for Athlon64. (CPUID 0xF51 is Opteron Revision B3.)
  1469. */
  1470. //
  1471. //typedef union EVENT_MASK( k8_lock_accesses)
  1472. //{
  1473. // struct
  1474. // {
  1475. // uint16 DcacheAccesses:1; // Number of dcache accesses by lock instructions" },
  1476. // uint16 DcacheMisses:1; // Number of dcache misses by lock instructions" } }
  1477. // };
  1478. // uint16 flat;
  1479. //
  1480. //}EVENT_MASK( k8_lock_accesses);
  1481. //
  1482. #endif // K8PERFORMANCECOUNTERS_H