1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Advanced Micro Devices, Inc.
5 * Author: Jacob Shin <jacob.shin@amd.com>
8 #include <linux/perf_event.h>
9 #include <linux/percpu.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
16 #include <asm/cpufeature.h>
17 #include <asm/perf_event.h>
21 #define NUM_COUNTERS_NB 4
22 #define NUM_COUNTERS_L2 4
23 #define NUM_COUNTERS_L3 6
24 #define MAX_COUNTERS 6
26 #define RDPMC_BASE_NB 6
27 #define RDPMC_BASE_LLC 10
29 #define COUNTER_SHIFT 16
32 #define pr_fmt(fmt) "amd_uncore: " fmt
34 static int num_counters_llc
;
35 static int num_counters_nb
;
38 static HLIST_HEAD(uncore_unused_list
);
47 cpumask_t
*active_mask
;
49 struct perf_event
*events
[MAX_COUNTERS
];
50 struct hlist_node node
;
53 static struct amd_uncore
* __percpu
*amd_uncore_nb
;
54 static struct amd_uncore
* __percpu
*amd_uncore_llc
;
56 static struct pmu amd_nb_pmu
;
57 static struct pmu amd_llc_pmu
;
59 static cpumask_t amd_nb_active_mask
;
60 static cpumask_t amd_llc_active_mask
;
62 static bool is_nb_event(struct perf_event
*event
)
64 return event
->pmu
->type
== amd_nb_pmu
.type
;
67 static bool is_llc_event(struct perf_event
*event
)
69 return event
->pmu
->type
== amd_llc_pmu
.type
;
72 static struct amd_uncore
*event_to_amd_uncore(struct perf_event
*event
)
74 if (is_nb_event(event
) && amd_uncore_nb
)
75 return *per_cpu_ptr(amd_uncore_nb
, event
->cpu
);
76 else if (is_llc_event(event
) && amd_uncore_llc
)
77 return *per_cpu_ptr(amd_uncore_llc
, event
->cpu
);
82 static void amd_uncore_read(struct perf_event
*event
)
84 struct hw_perf_event
*hwc
= &event
->hw
;
89 * since we do not enable counter overflow interrupts,
90 * we do not have to worry about prev_count changing on us
93 prev
= local64_read(&hwc
->prev_count
);
94 rdpmcl(hwc
->event_base_rdpmc
, new);
95 local64_set(&hwc
->prev_count
, new);
96 delta
= (new << COUNTER_SHIFT
) - (prev
<< COUNTER_SHIFT
);
97 delta
>>= COUNTER_SHIFT
;
98 local64_add(delta
, &event
->count
);
101 static void amd_uncore_start(struct perf_event
*event
, int flags
)
103 struct hw_perf_event
*hwc
= &event
->hw
;
105 if (flags
& PERF_EF_RELOAD
)
106 wrmsrl(hwc
->event_base
, (u64
)local64_read(&hwc
->prev_count
));
109 wrmsrl(hwc
->config_base
, (hwc
->config
| ARCH_PERFMON_EVENTSEL_ENABLE
));
110 perf_event_update_userpage(event
);
113 static void amd_uncore_stop(struct perf_event
*event
, int flags
)
115 struct hw_perf_event
*hwc
= &event
->hw
;
117 wrmsrl(hwc
->config_base
, hwc
->config
);
118 hwc
->state
|= PERF_HES_STOPPED
;
120 if ((flags
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
121 amd_uncore_read(event
);
122 hwc
->state
|= PERF_HES_UPTODATE
;
126 static int amd_uncore_add(struct perf_event
*event
, int flags
)
129 struct amd_uncore
*uncore
= event_to_amd_uncore(event
);
130 struct hw_perf_event
*hwc
= &event
->hw
;
132 /* are we already assigned? */
133 if (hwc
->idx
!= -1 && uncore
->events
[hwc
->idx
] == event
)
136 for (i
= 0; i
< uncore
->num_counters
; i
++) {
137 if (uncore
->events
[i
] == event
) {
143 /* if not, take the first available counter */
145 for (i
= 0; i
< uncore
->num_counters
; i
++) {
146 if (cmpxchg(&uncore
->events
[i
], NULL
, event
) == NULL
) {
156 hwc
->config_base
= uncore
->msr_base
+ (2 * hwc
->idx
);
157 hwc
->event_base
= uncore
->msr_base
+ 1 + (2 * hwc
->idx
);
158 hwc
->event_base_rdpmc
= uncore
->rdpmc_base
+ hwc
->idx
;
159 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
161 if (flags
& PERF_EF_START
)
162 amd_uncore_start(event
, PERF_EF_RELOAD
);
167 static void amd_uncore_del(struct perf_event
*event
, int flags
)
170 struct amd_uncore
*uncore
= event_to_amd_uncore(event
);
171 struct hw_perf_event
*hwc
= &event
->hw
;
173 amd_uncore_stop(event
, PERF_EF_UPDATE
);
175 for (i
= 0; i
< uncore
->num_counters
; i
++) {
176 if (cmpxchg(&uncore
->events
[i
], event
, NULL
) == event
)
184 * Return a full thread and slice mask unless user
187 static u64
l3_thread_slice_mask(u64 config
)
189 if (boot_cpu_data
.x86
<= 0x18)
190 return ((config
& AMD64_L3_SLICE_MASK
) ? : AMD64_L3_SLICE_MASK
) |
191 ((config
& AMD64_L3_THREAD_MASK
) ? : AMD64_L3_THREAD_MASK
);
194 * If the user doesn't specify a threadmask, they're not trying to
195 * count core 0, so we enable all cores & threads.
196 * We'll also assume that they want to count slice 0 if they specify
197 * a threadmask and leave sliceid and enallslices unpopulated.
199 if (!(config
& AMD64_L3_F19H_THREAD_MASK
))
200 return AMD64_L3_F19H_THREAD_MASK
| AMD64_L3_EN_ALL_SLICES
|
201 AMD64_L3_EN_ALL_CORES
;
203 return config
& (AMD64_L3_F19H_THREAD_MASK
| AMD64_L3_SLICEID_MASK
|
204 AMD64_L3_EN_ALL_CORES
| AMD64_L3_EN_ALL_SLICES
|
205 AMD64_L3_COREID_MASK
);
208 static int amd_uncore_event_init(struct perf_event
*event
)
210 struct amd_uncore
*uncore
;
211 struct hw_perf_event
*hwc
= &event
->hw
;
213 if (event
->attr
.type
!= event
->pmu
->type
)
217 * NB and Last level cache counters (MSRs) are shared across all cores
218 * that share the same NB / Last level cache. On family 16h and below,
219 * Interrupts can be directed to a single target core, however, event
220 * counts generated by processes running on other cores cannot be masked
221 * out. So we do not support sampling and per-thread events via
222 * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
224 hwc
->config
= event
->attr
.config
& AMD64_RAW_EVENT_MASK_NB
;
231 * SliceMask and ThreadMask need to be set for certain L3 events.
232 * For other events, the two fields do not affect the count.
234 if (l3_mask
&& is_llc_event(event
))
235 hwc
->config
|= l3_thread_slice_mask(event
->attr
.config
);
237 uncore
= event_to_amd_uncore(event
);
242 * since request can come in to any of the shared cores, we will remap
243 * to a single common cpu.
245 event
->cpu
= uncore
->cpu
;
250 static ssize_t
amd_uncore_attr_show_cpumask(struct device
*dev
,
251 struct device_attribute
*attr
,
254 cpumask_t
*active_mask
;
255 struct pmu
*pmu
= dev_get_drvdata(dev
);
257 if (pmu
->type
== amd_nb_pmu
.type
)
258 active_mask
= &amd_nb_active_mask
;
259 else if (pmu
->type
== amd_llc_pmu
.type
)
260 active_mask
= &amd_llc_active_mask
;
264 return cpumap_print_to_pagebuf(true, buf
, active_mask
);
266 static DEVICE_ATTR(cpumask
, S_IRUGO
, amd_uncore_attr_show_cpumask
, NULL
);
268 static struct attribute
*amd_uncore_attrs
[] = {
269 &dev_attr_cpumask
.attr
,
273 static struct attribute_group amd_uncore_attr_group
= {
274 .attrs
= amd_uncore_attrs
,
277 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
278 static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
279 struct kobj_attribute *attr, \
282 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
283 return sprintf(page, _format "\n"); \
285 static struct kobj_attribute format_attr_##_var = \
286 __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
288 DEFINE_UNCORE_FORMAT_ATTR(event12
, event
, "config:0-7,32-35");
289 DEFINE_UNCORE_FORMAT_ATTR(event14
, event
, "config:0-7,32-35,59-60"); /* F17h+ DF */
290 DEFINE_UNCORE_FORMAT_ATTR(event8
, event
, "config:0-7"); /* F17h+ L3 */
291 DEFINE_UNCORE_FORMAT_ATTR(umask
, umask
, "config:8-15");
292 DEFINE_UNCORE_FORMAT_ATTR(coreid
, coreid
, "config:42-44"); /* F19h L3 */
293 DEFINE_UNCORE_FORMAT_ATTR(slicemask
, slicemask
, "config:48-51"); /* F17h L3 */
294 DEFINE_UNCORE_FORMAT_ATTR(threadmask8
, threadmask
, "config:56-63"); /* F17h L3 */
295 DEFINE_UNCORE_FORMAT_ATTR(threadmask2
, threadmask
, "config:56-57"); /* F19h L3 */
296 DEFINE_UNCORE_FORMAT_ATTR(enallslices
, enallslices
, "config:46"); /* F19h L3 */
297 DEFINE_UNCORE_FORMAT_ATTR(enallcores
, enallcores
, "config:47"); /* F19h L3 */
298 DEFINE_UNCORE_FORMAT_ATTR(sliceid
, sliceid
, "config:48-50"); /* F19h L3 */
300 static struct attribute
*amd_uncore_df_format_attr
[] = {
301 &format_attr_event12
.attr
, /* event14 if F17h+ */
302 &format_attr_umask
.attr
,
306 static struct attribute
*amd_uncore_l3_format_attr
[] = {
307 &format_attr_event12
.attr
, /* event8 if F17h+ */
308 &format_attr_umask
.attr
,
309 NULL
, /* slicemask if F17h, coreid if F19h */
310 NULL
, /* threadmask8 if F17h, enallslices if F19h */
311 NULL
, /* enallcores if F19h */
312 NULL
, /* sliceid if F19h */
313 NULL
, /* threadmask2 if F19h */
317 static struct attribute_group amd_uncore_df_format_group
= {
319 .attrs
= amd_uncore_df_format_attr
,
322 static struct attribute_group amd_uncore_l3_format_group
= {
324 .attrs
= amd_uncore_l3_format_attr
,
327 static const struct attribute_group
*amd_uncore_df_attr_groups
[] = {
328 &amd_uncore_attr_group
,
329 &amd_uncore_df_format_group
,
333 static const struct attribute_group
*amd_uncore_l3_attr_groups
[] = {
334 &amd_uncore_attr_group
,
335 &amd_uncore_l3_format_group
,
339 static struct pmu amd_nb_pmu
= {
340 .task_ctx_nr
= perf_invalid_context
,
341 .attr_groups
= amd_uncore_df_attr_groups
,
343 .event_init
= amd_uncore_event_init
,
344 .add
= amd_uncore_add
,
345 .del
= amd_uncore_del
,
346 .start
= amd_uncore_start
,
347 .stop
= amd_uncore_stop
,
348 .read
= amd_uncore_read
,
349 .capabilities
= PERF_PMU_CAP_NO_EXCLUDE
| PERF_PMU_CAP_NO_INTERRUPT
,
352 static struct pmu amd_llc_pmu
= {
353 .task_ctx_nr
= perf_invalid_context
,
354 .attr_groups
= amd_uncore_l3_attr_groups
,
356 .event_init
= amd_uncore_event_init
,
357 .add
= amd_uncore_add
,
358 .del
= amd_uncore_del
,
359 .start
= amd_uncore_start
,
360 .stop
= amd_uncore_stop
,
361 .read
= amd_uncore_read
,
362 .capabilities
= PERF_PMU_CAP_NO_EXCLUDE
| PERF_PMU_CAP_NO_INTERRUPT
,
365 static struct amd_uncore
*amd_uncore_alloc(unsigned int cpu
)
367 return kzalloc_node(sizeof(struct amd_uncore
), GFP_KERNEL
,
371 static int amd_uncore_cpu_up_prepare(unsigned int cpu
)
373 struct amd_uncore
*uncore_nb
= NULL
, *uncore_llc
;
376 uncore_nb
= amd_uncore_alloc(cpu
);
379 uncore_nb
->cpu
= cpu
;
380 uncore_nb
->num_counters
= num_counters_nb
;
381 uncore_nb
->rdpmc_base
= RDPMC_BASE_NB
;
382 uncore_nb
->msr_base
= MSR_F15H_NB_PERF_CTL
;
383 uncore_nb
->active_mask
= &amd_nb_active_mask
;
384 uncore_nb
->pmu
= &amd_nb_pmu
;
386 *per_cpu_ptr(amd_uncore_nb
, cpu
) = uncore_nb
;
389 if (amd_uncore_llc
) {
390 uncore_llc
= amd_uncore_alloc(cpu
);
393 uncore_llc
->cpu
= cpu
;
394 uncore_llc
->num_counters
= num_counters_llc
;
395 uncore_llc
->rdpmc_base
= RDPMC_BASE_LLC
;
396 uncore_llc
->msr_base
= MSR_F16H_L2I_PERF_CTL
;
397 uncore_llc
->active_mask
= &amd_llc_active_mask
;
398 uncore_llc
->pmu
= &amd_llc_pmu
;
400 *per_cpu_ptr(amd_uncore_llc
, cpu
) = uncore_llc
;
407 *per_cpu_ptr(amd_uncore_nb
, cpu
) = NULL
;
412 static struct amd_uncore
*
413 amd_uncore_find_online_sibling(struct amd_uncore
*this,
414 struct amd_uncore
* __percpu
*uncores
)
417 struct amd_uncore
*that
;
419 for_each_online_cpu(cpu
) {
420 that
= *per_cpu_ptr(uncores
, cpu
);
428 if (this->id
== that
->id
) {
429 hlist_add_head(&this->node
, &uncore_unused_list
);
439 static int amd_uncore_cpu_starting(unsigned int cpu
)
441 unsigned int eax
, ebx
, ecx
, edx
;
442 struct amd_uncore
*uncore
;
445 uncore
= *per_cpu_ptr(amd_uncore_nb
, cpu
);
446 cpuid(0x8000001e, &eax
, &ebx
, &ecx
, &edx
);
447 uncore
->id
= ecx
& 0xff;
449 uncore
= amd_uncore_find_online_sibling(uncore
, amd_uncore_nb
);
450 *per_cpu_ptr(amd_uncore_nb
, cpu
) = uncore
;
453 if (amd_uncore_llc
) {
454 uncore
= *per_cpu_ptr(amd_uncore_llc
, cpu
);
455 uncore
->id
= per_cpu(cpu_llc_id
, cpu
);
457 uncore
= amd_uncore_find_online_sibling(uncore
, amd_uncore_llc
);
458 *per_cpu_ptr(amd_uncore_llc
, cpu
) = uncore
;
464 static void uncore_clean_online(void)
466 struct amd_uncore
*uncore
;
467 struct hlist_node
*n
;
469 hlist_for_each_entry_safe(uncore
, n
, &uncore_unused_list
, node
) {
470 hlist_del(&uncore
->node
);
475 static void uncore_online(unsigned int cpu
,
476 struct amd_uncore
* __percpu
*uncores
)
478 struct amd_uncore
*uncore
= *per_cpu_ptr(uncores
, cpu
);
480 uncore_clean_online();
482 if (cpu
== uncore
->cpu
)
483 cpumask_set_cpu(cpu
, uncore
->active_mask
);
486 static int amd_uncore_cpu_online(unsigned int cpu
)
489 uncore_online(cpu
, amd_uncore_nb
);
492 uncore_online(cpu
, amd_uncore_llc
);
497 static void uncore_down_prepare(unsigned int cpu
,
498 struct amd_uncore
* __percpu
*uncores
)
501 struct amd_uncore
*this = *per_cpu_ptr(uncores
, cpu
);
503 if (this->cpu
!= cpu
)
506 /* this cpu is going down, migrate to a shared sibling if possible */
507 for_each_online_cpu(i
) {
508 struct amd_uncore
*that
= *per_cpu_ptr(uncores
, i
);
514 perf_pmu_migrate_context(this->pmu
, cpu
, i
);
515 cpumask_clear_cpu(cpu
, that
->active_mask
);
516 cpumask_set_cpu(i
, that
->active_mask
);
523 static int amd_uncore_cpu_down_prepare(unsigned int cpu
)
526 uncore_down_prepare(cpu
, amd_uncore_nb
);
529 uncore_down_prepare(cpu
, amd_uncore_llc
);
534 static void uncore_dead(unsigned int cpu
, struct amd_uncore
* __percpu
*uncores
)
536 struct amd_uncore
*uncore
= *per_cpu_ptr(uncores
, cpu
);
538 if (cpu
== uncore
->cpu
)
539 cpumask_clear_cpu(cpu
, uncore
->active_mask
);
541 if (!--uncore
->refcnt
)
543 *per_cpu_ptr(uncores
, cpu
) = NULL
;
546 static int amd_uncore_cpu_dead(unsigned int cpu
)
549 uncore_dead(cpu
, amd_uncore_nb
);
552 uncore_dead(cpu
, amd_uncore_llc
);
557 static int __init
amd_uncore_init(void)
559 struct attribute
**df_attr
= amd_uncore_df_format_attr
;
560 struct attribute
**l3_attr
= amd_uncore_l3_format_attr
;
563 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
&&
564 boot_cpu_data
.x86_vendor
!= X86_VENDOR_HYGON
)
567 if (!boot_cpu_has(X86_FEATURE_TOPOEXT
))
570 num_counters_nb
= NUM_COUNTERS_NB
;
571 num_counters_llc
= NUM_COUNTERS_L2
;
572 if (boot_cpu_data
.x86
>= 0x17) {
574 * For F17h and above, the Northbridge counters are
575 * repurposed as Data Fabric counters. Also, L3
576 * counters are supported too. The PMUs are exported
577 * based on family as either L2 or L3 and NB or DF.
579 num_counters_llc
= NUM_COUNTERS_L3
;
580 amd_nb_pmu
.name
= "amd_df";
581 amd_llc_pmu
.name
= "amd_l3";
585 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB
)) {
586 if (boot_cpu_data
.x86
>= 0x17)
587 *df_attr
= &format_attr_event14
.attr
;
589 amd_uncore_nb
= alloc_percpu(struct amd_uncore
*);
590 if (!amd_uncore_nb
) {
594 ret
= perf_pmu_register(&amd_nb_pmu
, amd_nb_pmu
.name
, -1);
598 pr_info("%d %s %s counters detected\n", num_counters_nb
,
599 boot_cpu_data
.x86_vendor
== X86_VENDOR_HYGON
? "HYGON" : "",
605 if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC
)) {
606 if (boot_cpu_data
.x86
>= 0x19) {
607 *l3_attr
++ = &format_attr_event8
.attr
;
608 *l3_attr
++ = &format_attr_umask
.attr
;
609 *l3_attr
++ = &format_attr_coreid
.attr
;
610 *l3_attr
++ = &format_attr_enallslices
.attr
;
611 *l3_attr
++ = &format_attr_enallcores
.attr
;
612 *l3_attr
++ = &format_attr_sliceid
.attr
;
613 *l3_attr
++ = &format_attr_threadmask2
.attr
;
614 } else if (boot_cpu_data
.x86
>= 0x17) {
615 *l3_attr
++ = &format_attr_event8
.attr
;
616 *l3_attr
++ = &format_attr_umask
.attr
;
617 *l3_attr
++ = &format_attr_slicemask
.attr
;
618 *l3_attr
++ = &format_attr_threadmask8
.attr
;
621 amd_uncore_llc
= alloc_percpu(struct amd_uncore
*);
622 if (!amd_uncore_llc
) {
626 ret
= perf_pmu_register(&amd_llc_pmu
, amd_llc_pmu
.name
, -1);
630 pr_info("%d %s %s counters detected\n", num_counters_llc
,
631 boot_cpu_data
.x86_vendor
== X86_VENDOR_HYGON
? "HYGON" : "",
637 * Install callbacks. Core will call them for each online cpu.
639 if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP
,
640 "perf/x86/amd/uncore:prepare",
641 amd_uncore_cpu_up_prepare
, amd_uncore_cpu_dead
))
644 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING
,
645 "perf/x86/amd/uncore:starting",
646 amd_uncore_cpu_starting
, NULL
))
648 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE
,
649 "perf/x86/amd/uncore:online",
650 amd_uncore_cpu_online
,
651 amd_uncore_cpu_down_prepare
))
656 cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING
);
658 cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP
);
660 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB
))
661 perf_pmu_unregister(&amd_nb_pmu
);
663 free_percpu(amd_uncore_llc
);
666 free_percpu(amd_uncore_nb
);
670 device_initcall(amd_uncore_init
);