1 #ifdef CONFIG_CPU_SUP_AMD
3 static __initconst
const u64 amd_hw_cache_event_ids
4 [PERF_COUNT_HW_CACHE_MAX
]
5 [PERF_COUNT_HW_CACHE_OP_MAX
]
6 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
10 [ C(RESULT_ACCESS
) ] = 0x0040, /* Data Cache Accesses */
11 [ C(RESULT_MISS
) ] = 0x0141, /* Data Cache Misses */
14 [ C(RESULT_ACCESS
) ] = 0x0142, /* Data Cache Refills :system */
15 [ C(RESULT_MISS
) ] = 0,
17 [ C(OP_PREFETCH
) ] = {
18 [ C(RESULT_ACCESS
) ] = 0x0267, /* Data Prefetcher :attempts */
19 [ C(RESULT_MISS
) ] = 0x0167, /* Data Prefetcher :cancelled */
24 [ C(RESULT_ACCESS
) ] = 0x0080, /* Instruction cache fetches */
25 [ C(RESULT_MISS
) ] = 0x0081, /* Instruction cache misses */
28 [ C(RESULT_ACCESS
) ] = -1,
29 [ C(RESULT_MISS
) ] = -1,
31 [ C(OP_PREFETCH
) ] = {
32 [ C(RESULT_ACCESS
) ] = 0x014B, /* Prefetch Instructions :Load */
33 [ C(RESULT_MISS
) ] = 0,
38 [ C(RESULT_ACCESS
) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
39 [ C(RESULT_MISS
) ] = 0x037E, /* L2 Cache Misses : IC+DC */
42 [ C(RESULT_ACCESS
) ] = 0x017F, /* L2 Fill/Writeback */
43 [ C(RESULT_MISS
) ] = 0,
45 [ C(OP_PREFETCH
) ] = {
46 [ C(RESULT_ACCESS
) ] = 0,
47 [ C(RESULT_MISS
) ] = 0,
52 [ C(RESULT_ACCESS
) ] = 0x0040, /* Data Cache Accesses */
53 [ C(RESULT_MISS
) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
56 [ C(RESULT_ACCESS
) ] = 0,
57 [ C(RESULT_MISS
) ] = 0,
59 [ C(OP_PREFETCH
) ] = {
60 [ C(RESULT_ACCESS
) ] = 0,
61 [ C(RESULT_MISS
) ] = 0,
66 [ C(RESULT_ACCESS
) ] = 0x0080, /* Instruction fecthes */
67 [ C(RESULT_MISS
) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
70 [ C(RESULT_ACCESS
) ] = -1,
71 [ C(RESULT_MISS
) ] = -1,
73 [ C(OP_PREFETCH
) ] = {
74 [ C(RESULT_ACCESS
) ] = -1,
75 [ C(RESULT_MISS
) ] = -1,
80 [ C(RESULT_ACCESS
) ] = 0x00c2, /* Retired Branch Instr. */
81 [ C(RESULT_MISS
) ] = 0x00c3, /* Retired Mispredicted BI */
84 [ C(RESULT_ACCESS
) ] = -1,
85 [ C(RESULT_MISS
) ] = -1,
87 [ C(OP_PREFETCH
) ] = {
88 [ C(RESULT_ACCESS
) ] = -1,
89 [ C(RESULT_MISS
) ] = -1,
94 [ C(RESULT_ACCESS
) ] = 0xb8e9, /* CPU Request to Memory, l+r */
95 [ C(RESULT_MISS
) ] = 0x98e9, /* CPU Request to Memory, r */
98 [ C(RESULT_ACCESS
) ] = -1,
99 [ C(RESULT_MISS
) ] = -1,
101 [ C(OP_PREFETCH
) ] = {
102 [ C(RESULT_ACCESS
) ] = -1,
103 [ C(RESULT_MISS
) ] = -1,
109 * AMD Performance Monitor K7 and later.
111 static const u64 amd_perfmon_event_map
[] =
113 [PERF_COUNT_HW_CPU_CYCLES
] = 0x0076,
114 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
115 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x0080,
116 [PERF_COUNT_HW_CACHE_MISSES
] = 0x0081,
117 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c2,
118 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c3,
119 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = 0x00d0, /* "Decoder empty" event */
120 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = 0x00d1, /* "Dispatch stalls" event */
123 static u64
amd_pmu_event_map(int hw_event
)
125 return amd_perfmon_event_map
[hw_event
];
128 static int amd_pmu_hw_config(struct perf_event
*event
)
130 int ret
= x86_pmu_hw_config(event
);
135 if (event
->attr
.type
!= PERF_TYPE_RAW
)
138 event
->hw
.config
|= event
->attr
.config
& AMD64_RAW_EVENT_MASK
;
144 * AMD64 events are detected based on their event codes.
146 static inline unsigned int amd_get_event_code(struct hw_perf_event
*hwc
)
148 return ((hwc
->config
>> 24) & 0x0f00) | (hwc
->config
& 0x00ff);
151 static inline int amd_is_nb_event(struct hw_perf_event
*hwc
)
153 return (hwc
->config
& 0xe0) == 0xe0;
156 static inline int amd_has_nb(struct cpu_hw_events
*cpuc
)
158 struct amd_nb
*nb
= cpuc
->amd_nb
;
160 return nb
&& nb
->nb_id
!= -1;
163 static void amd_put_event_constraints(struct cpu_hw_events
*cpuc
,
164 struct perf_event
*event
)
166 struct hw_perf_event
*hwc
= &event
->hw
;
167 struct amd_nb
*nb
= cpuc
->amd_nb
;
171 * only care about NB events
173 if (!(amd_has_nb(cpuc
) && amd_is_nb_event(hwc
)))
177 * need to scan whole list because event may not have
178 * been assigned during scheduling
180 * no race condition possible because event can only
181 * be removed on one CPU at a time AND PMU is disabled
184 for (i
= 0; i
< x86_pmu
.num_counters
; i
++) {
185 if (nb
->owners
[i
] == event
) {
186 cmpxchg(nb
->owners
+i
, event
, NULL
);
193 * AMD64 NorthBridge events need special treatment because
194 * counter access needs to be synchronized across all cores
195 * of a package. Refer to BKDG section 3.12
197 * NB events are events measuring L3 cache, Hypertransport
198 * traffic. They are identified by an event code >= 0xe00.
199 * They measure events on the NorthBride which is shared
200 * by all cores on a package. NB events are counted on a
201 * shared set of counters. When a NB event is programmed
202 * in a counter, the data actually comes from a shared
203 * counter. Thus, access to those counters needs to be
206 * We implement the synchronization such that no two cores
207 * can be measuring NB events using the same counters. Thus,
208 * we maintain a per-NB allocation table. The available slot
209 * is propagated using the event_constraint structure.
211 * We provide only one choice for each NB event based on
212 * the fact that only NB events have restrictions. Consequently,
213 * if a counter is available, there is a guarantee the NB event
214 * will be assigned to it. If no slot is available, an empty
215 * constraint is returned and scheduling will eventually fail
218 * Note that all cores attached the same NB compete for the same
219 * counters to host NB events, this is why we use atomic ops. Some
220 * multi-chip CPUs may have more than one NB.
222 * Given that resources are allocated (cmpxchg), they must be
223 * eventually freed for others to use. This is accomplished by
224 * calling amd_put_event_constraints().
226 * Non NB events are not impacted by this restriction.
228 static struct event_constraint
*
229 amd_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
231 struct hw_perf_event
*hwc
= &event
->hw
;
232 struct amd_nb
*nb
= cpuc
->amd_nb
;
233 struct perf_event
*old
= NULL
;
234 int max
= x86_pmu
.num_counters
;
238 * if not NB event or no NB, then no constraints
240 if (!(amd_has_nb(cpuc
) && amd_is_nb_event(hwc
)))
241 return &unconstrained
;
244 * detect if already present, if so reuse
246 * cannot merge with actual allocation
247 * because of possible holes
249 * event can already be present yet not assigned (in hwc->idx)
250 * because of successive calls to x86_schedule_events() from
251 * hw_perf_group_sched_in() without hw_perf_enable()
253 for (i
= 0; i
< max
; i
++) {
255 * keep track of first free slot
257 if (k
== -1 && !nb
->owners
[i
])
260 /* already present, reuse */
261 if (nb
->owners
[i
] == event
)
265 * not present, so grab a new slot
266 * starting either at:
268 if (hwc
->idx
!= -1) {
269 /* previous assignment */
271 } else if (k
!= -1) {
272 /* start from free slot found */
276 * event not found, no slot found in
277 * first pass, try again from the
284 old
= cmpxchg(nb
->owners
+i
, NULL
, event
);
292 return &nb
->event_constraints
[i
];
294 return &emptyconstraint
;
297 static struct amd_nb
*amd_alloc_nb(int cpu
)
302 nb
= kmalloc_node(sizeof(struct amd_nb
), GFP_KERNEL
| __GFP_ZERO
,
310 * initialize all possible NB constraints
312 for (i
= 0; i
< x86_pmu
.num_counters
; i
++) {
313 __set_bit(i
, nb
->event_constraints
[i
].idxmsk
);
314 nb
->event_constraints
[i
].weight
= 1;
319 static int amd_pmu_cpu_prepare(int cpu
)
321 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
323 WARN_ON_ONCE(cpuc
->amd_nb
);
325 if (boot_cpu_data
.x86_max_cores
< 2)
328 cpuc
->amd_nb
= amd_alloc_nb(cpu
);
335 static void amd_pmu_cpu_starting(int cpu
)
337 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
341 if (boot_cpu_data
.x86_max_cores
< 2)
344 nb_id
= amd_get_nb_id(cpu
);
345 WARN_ON_ONCE(nb_id
== BAD_APICID
);
347 for_each_online_cpu(i
) {
348 nb
= per_cpu(cpu_hw_events
, i
).amd_nb
;
349 if (WARN_ON_ONCE(!nb
))
352 if (nb
->nb_id
== nb_id
) {
359 cpuc
->amd_nb
->nb_id
= nb_id
;
360 cpuc
->amd_nb
->refcnt
++;
363 static void amd_pmu_cpu_dead(int cpu
)
365 struct cpu_hw_events
*cpuhw
;
367 if (boot_cpu_data
.x86_max_cores
< 2)
370 cpuhw
= &per_cpu(cpu_hw_events
, cpu
);
373 struct amd_nb
*nb
= cpuhw
->amd_nb
;
375 if (nb
->nb_id
== -1 || --nb
->refcnt
== 0)
378 cpuhw
->amd_nb
= NULL
;
382 static __initconst
const struct x86_pmu amd_pmu
= {
384 .handle_irq
= x86_pmu_handle_irq
,
385 .disable_all
= x86_pmu_disable_all
,
386 .enable_all
= x86_pmu_enable_all
,
387 .enable
= x86_pmu_enable_event
,
388 .disable
= x86_pmu_disable_event
,
389 .hw_config
= amd_pmu_hw_config
,
390 .schedule_events
= x86_schedule_events
,
391 .eventsel
= MSR_K7_EVNTSEL0
,
392 .perfctr
= MSR_K7_PERFCTR0
,
393 .event_map
= amd_pmu_event_map
,
394 .max_events
= ARRAY_SIZE(amd_perfmon_event_map
),
397 .cntval_mask
= (1ULL << 48) - 1,
399 /* use highest bit to detect overflow */
400 .max_period
= (1ULL << 47) - 1,
401 .get_event_constraints
= amd_get_event_constraints
,
402 .put_event_constraints
= amd_put_event_constraints
,
404 .cpu_prepare
= amd_pmu_cpu_prepare
,
405 .cpu_starting
= amd_pmu_cpu_starting
,
406 .cpu_dead
= amd_pmu_cpu_dead
,
411 #define AMD_EVENT_TYPE_MASK 0x000000F0ULL
413 #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
414 #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
415 #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
416 #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
417 #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
418 #define AMD_EVENT_EX_LS 0x000000C0ULL
419 #define AMD_EVENT_DE 0x000000D0ULL
420 #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
423 * AMD family 15h event code/PMC mappings:
425 * type = event_code & 0x0F0:
427 * 0x000 FP PERF_CTL[5:3]
428 * 0x010 FP PERF_CTL[5:3]
429 * 0x020 LS PERF_CTL[5:0]
430 * 0x030 LS PERF_CTL[5:0]
431 * 0x040 DC PERF_CTL[5:0]
432 * 0x050 DC PERF_CTL[5:0]
433 * 0x060 CU PERF_CTL[2:0]
434 * 0x070 CU PERF_CTL[2:0]
435 * 0x080 IC/DE PERF_CTL[2:0]
436 * 0x090 IC/DE PERF_CTL[2:0]
439 * 0x0C0 EX/LS PERF_CTL[5:0]
440 * 0x0D0 DE PERF_CTL[2:0]
441 * 0x0E0 NB NB_PERF_CTL[3:0]
442 * 0x0F0 NB NB_PERF_CTL[3:0]
446 * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
447 * 0x003 FP PERF_CTL[3]
448 * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
449 * 0x00B FP PERF_CTL[3]
450 * 0x00D FP PERF_CTL[3]
451 * 0x023 DE PERF_CTL[2:0]
452 * 0x02D LS PERF_CTL[3]
453 * 0x02E LS PERF_CTL[3,0]
454 * 0x043 CU PERF_CTL[2:0]
455 * 0x045 CU PERF_CTL[2:0]
456 * 0x046 CU PERF_CTL[2:0]
457 * 0x054 CU PERF_CTL[2:0]
458 * 0x055 CU PERF_CTL[2:0]
459 * 0x08F IC PERF_CTL[0]
460 * 0x187 DE PERF_CTL[0]
461 * 0x188 DE PERF_CTL[0]
462 * 0x0DB EX PERF_CTL[5:0]
463 * 0x0DC LS PERF_CTL[5:0]
464 * 0x0DD LS PERF_CTL[5:0]
465 * 0x0DE LS PERF_CTL[5:0]
466 * 0x0DF LS PERF_CTL[5:0]
467 * 0x1D6 EX PERF_CTL[5:0]
468 * 0x1D8 EX PERF_CTL[5:0]
470 * (*) depending on the umask all FPU counters may be used
473 static struct event_constraint amd_f15_PMC0
= EVENT_CONSTRAINT(0, 0x01, 0);
474 static struct event_constraint amd_f15_PMC20
= EVENT_CONSTRAINT(0, 0x07, 0);
475 static struct event_constraint amd_f15_PMC3
= EVENT_CONSTRAINT(0, 0x08, 0);
476 static struct event_constraint amd_f15_PMC30
= EVENT_CONSTRAINT(0, 0x09, 0);
477 static struct event_constraint amd_f15_PMC50
= EVENT_CONSTRAINT(0, 0x3F, 0);
478 static struct event_constraint amd_f15_PMC53
= EVENT_CONSTRAINT(0, 0x38, 0);
480 static struct event_constraint
*
481 amd_get_event_constraints_f15h(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
483 struct hw_perf_event
*hwc
= &event
->hw
;
484 unsigned int event_code
= amd_get_event_code(hwc
);
486 switch (event_code
& AMD_EVENT_TYPE_MASK
) {
488 switch (event_code
) {
490 if (!(hwc
->config
& 0x0000F000ULL
))
492 if (!(hwc
->config
& 0x00000F00ULL
))
494 return &amd_f15_PMC3
;
496 if (hweight_long(hwc
->config
& ARCH_PERFMON_EVENTSEL_UMASK
) <= 1)
498 return &amd_f15_PMC3
;
502 return &amd_f15_PMC3
;
504 return &amd_f15_PMC53
;
507 case AMD_EVENT_EX_LS
:
508 switch (event_code
) {
515 return &amd_f15_PMC20
;
517 return &amd_f15_PMC3
;
519 return &amd_f15_PMC30
;
521 return &amd_f15_PMC50
;
524 case AMD_EVENT_IC_DE
:
526 switch (event_code
) {
530 return &amd_f15_PMC0
;
531 case 0x0DB ... 0x0DF:
534 return &amd_f15_PMC50
;
536 return &amd_f15_PMC20
;
539 /* not yet implemented */
540 return &emptyconstraint
;
542 return &emptyconstraint
;
546 static __initconst
const struct x86_pmu amd_pmu_f15h
= {
547 .name
= "AMD Family 15h",
548 .handle_irq
= x86_pmu_handle_irq
,
549 .disable_all
= x86_pmu_disable_all
,
550 .enable_all
= x86_pmu_enable_all
,
551 .enable
= x86_pmu_enable_event
,
552 .disable
= x86_pmu_disable_event
,
553 .hw_config
= amd_pmu_hw_config
,
554 .schedule_events
= x86_schedule_events
,
555 .eventsel
= MSR_F15H_PERF_CTL
,
556 .perfctr
= MSR_F15H_PERF_CTR
,
557 .event_map
= amd_pmu_event_map
,
558 .max_events
= ARRAY_SIZE(amd_perfmon_event_map
),
561 .cntval_mask
= (1ULL << 48) - 1,
563 /* use highest bit to detect overflow */
564 .max_period
= (1ULL << 47) - 1,
565 .get_event_constraints
= amd_get_event_constraints_f15h
,
566 /* nortbridge counters not yet implemented: */
568 .put_event_constraints
= amd_put_event_constraints
,
570 .cpu_prepare
= amd_pmu_cpu_prepare
,
571 .cpu_starting
= amd_pmu_cpu_starting
,
572 .cpu_dead
= amd_pmu_cpu_dead
,
576 static __init
int amd_pmu_init(void)
578 /* Performance-monitoring supported from K7 and later: */
579 if (boot_cpu_data
.x86
< 6)
583 * If core performance counter extensions exists, it must be
584 * family 15h, otherwise fail. See x86_pmu_addr_offset().
586 switch (boot_cpu_data
.x86
) {
588 if (!cpu_has_perfctr_core
)
590 x86_pmu
= amd_pmu_f15h
;
593 if (cpu_has_perfctr_core
)
599 /* Events are common for all AMDs */
600 memcpy(hw_cache_event_ids
, amd_hw_cache_event_ids
,
601 sizeof(hw_cache_event_ids
));
606 #else /* CONFIG_CPU_SUP_AMD */
608 static int amd_pmu_init(void)