2 * Copyright (C) 2013 Advanced Micro Devices, Inc.
4 * Author: Jacob Shin <jacob.shin@amd.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/perf_event.h>
12 #include <linux/percpu.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/cpu.h>
17 #include <linux/cpumask.h>
19 #include <asm/cpufeature.h>
20 #include <asm/perf_event.h>
23 #define NUM_COUNTERS_NB 4
24 #define NUM_COUNTERS_L2 4
25 #define NUM_COUNTERS_L3 6
26 #define MAX_COUNTERS 6
28 #define RDPMC_BASE_NB 6
29 #define RDPMC_BASE_LLC 10
31 #define COUNTER_SHIFT 16
34 #define pr_fmt(fmt) "amd_uncore: " fmt
36 static int num_counters_llc
;
37 static int num_counters_nb
;
40 static HLIST_HEAD(uncore_unused_list
);
49 cpumask_t
*active_mask
;
51 struct perf_event
*events
[MAX_COUNTERS
];
52 struct hlist_node node
;
55 static struct amd_uncore
* __percpu
*amd_uncore_nb
;
56 static struct amd_uncore
* __percpu
*amd_uncore_llc
;
58 static struct pmu amd_nb_pmu
;
59 static struct pmu amd_llc_pmu
;
61 static cpumask_t amd_nb_active_mask
;
62 static cpumask_t amd_llc_active_mask
;
64 static bool is_nb_event(struct perf_event
*event
)
66 return event
->pmu
->type
== amd_nb_pmu
.type
;
69 static bool is_llc_event(struct perf_event
*event
)
71 return event
->pmu
->type
== amd_llc_pmu
.type
;
74 static struct amd_uncore
*event_to_amd_uncore(struct perf_event
*event
)
76 if (is_nb_event(event
) && amd_uncore_nb
)
77 return *per_cpu_ptr(amd_uncore_nb
, event
->cpu
);
78 else if (is_llc_event(event
) && amd_uncore_llc
)
79 return *per_cpu_ptr(amd_uncore_llc
, event
->cpu
);
84 static void amd_uncore_read(struct perf_event
*event
)
86 struct hw_perf_event
*hwc
= &event
->hw
;
91 * since we do not enable counter overflow interrupts,
92 * we do not have to worry about prev_count changing on us
95 prev
= local64_read(&hwc
->prev_count
);
96 rdpmcl(hwc
->event_base_rdpmc
, new);
97 local64_set(&hwc
->prev_count
, new);
98 delta
= (new << COUNTER_SHIFT
) - (prev
<< COUNTER_SHIFT
);
99 delta
>>= COUNTER_SHIFT
;
100 local64_add(delta
, &event
->count
);
103 static void amd_uncore_start(struct perf_event
*event
, int flags
)
105 struct hw_perf_event
*hwc
= &event
->hw
;
107 if (flags
& PERF_EF_RELOAD
)
108 wrmsrl(hwc
->event_base
, (u64
)local64_read(&hwc
->prev_count
));
111 wrmsrl(hwc
->config_base
, (hwc
->config
| ARCH_PERFMON_EVENTSEL_ENABLE
));
112 perf_event_update_userpage(event
);
115 static void amd_uncore_stop(struct perf_event
*event
, int flags
)
117 struct hw_perf_event
*hwc
= &event
->hw
;
119 wrmsrl(hwc
->config_base
, hwc
->config
);
120 hwc
->state
|= PERF_HES_STOPPED
;
122 if ((flags
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
123 amd_uncore_read(event
);
124 hwc
->state
|= PERF_HES_UPTODATE
;
128 static int amd_uncore_add(struct perf_event
*event
, int flags
)
131 struct amd_uncore
*uncore
= event_to_amd_uncore(event
);
132 struct hw_perf_event
*hwc
= &event
->hw
;
134 /* are we already assigned? */
135 if (hwc
->idx
!= -1 && uncore
->events
[hwc
->idx
] == event
)
138 for (i
= 0; i
< uncore
->num_counters
; i
++) {
139 if (uncore
->events
[i
] == event
) {
145 /* if not, take the first available counter */
147 for (i
= 0; i
< uncore
->num_counters
; i
++) {
148 if (cmpxchg(&uncore
->events
[i
], NULL
, event
) == NULL
) {
158 hwc
->config_base
= uncore
->msr_base
+ (2 * hwc
->idx
);
159 hwc
->event_base
= uncore
->msr_base
+ 1 + (2 * hwc
->idx
);
160 hwc
->event_base_rdpmc
= uncore
->rdpmc_base
+ hwc
->idx
;
161 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
163 if (flags
& PERF_EF_START
)
164 amd_uncore_start(event
, PERF_EF_RELOAD
);
169 static void amd_uncore_del(struct perf_event
*event
, int flags
)
172 struct amd_uncore
*uncore
= event_to_amd_uncore(event
);
173 struct hw_perf_event
*hwc
= &event
->hw
;
175 amd_uncore_stop(event
, PERF_EF_UPDATE
);
177 for (i
= 0; i
< uncore
->num_counters
; i
++) {
178 if (cmpxchg(&uncore
->events
[i
], event
, NULL
) == event
)
185 static int amd_uncore_event_init(struct perf_event
*event
)
187 struct amd_uncore
*uncore
;
188 struct hw_perf_event
*hwc
= &event
->hw
;
190 if (event
->attr
.type
!= event
->pmu
->type
)
194 * NB and Last level cache counters (MSRs) are shared across all cores
195 * that share the same NB / Last level cache. Interrupts can be directed
196 * to a single target core, however, event counts generated by processes
197 * running on other cores cannot be masked out. So we do not support
198 * sampling and per-thread events.
200 if (is_sampling_event(event
) || event
->attach_state
& PERF_ATTACH_TASK
)
203 /* NB and Last level cache counters do not have usr/os/guest/host bits */
204 if (event
->attr
.exclude_user
|| event
->attr
.exclude_kernel
||
205 event
->attr
.exclude_host
|| event
->attr
.exclude_guest
)
208 /* and we do not enable counter overflow interrupts */
209 hwc
->config
= event
->attr
.config
& AMD64_RAW_EVENT_MASK_NB
;
213 * SliceMask and ThreadMask need to be set for certain L3 events in
214 * Family 17h. For other events, the two fields do not affect the count.
217 hwc
->config
|= (AMD64_L3_SLICE_MASK
| AMD64_L3_THREAD_MASK
);
222 uncore
= event_to_amd_uncore(event
);
227 * since request can come in to any of the shared cores, we will remap
228 * to a single common cpu.
230 event
->cpu
= uncore
->cpu
;
235 static ssize_t
amd_uncore_attr_show_cpumask(struct device
*dev
,
236 struct device_attribute
*attr
,
239 cpumask_t
*active_mask
;
240 struct pmu
*pmu
= dev_get_drvdata(dev
);
242 if (pmu
->type
== amd_nb_pmu
.type
)
243 active_mask
= &amd_nb_active_mask
;
244 else if (pmu
->type
== amd_llc_pmu
.type
)
245 active_mask
= &amd_llc_active_mask
;
249 return cpumap_print_to_pagebuf(true, buf
, active_mask
);
251 static DEVICE_ATTR(cpumask
, S_IRUGO
, amd_uncore_attr_show_cpumask
, NULL
);
253 static struct attribute
*amd_uncore_attrs
[] = {
254 &dev_attr_cpumask
.attr
,
258 static struct attribute_group amd_uncore_attr_group
= {
259 .attrs
= amd_uncore_attrs
,
263 * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based
266 #define AMD_FORMAT_ATTR(_dev, _name, _format) \
268 _dev##_show##_name(struct device *dev, \
269 struct device_attribute *attr, \
272 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
273 return sprintf(page, _format "\n"); \
275 static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);
277 /* Used for each uncore counter type */
278 #define AMD_ATTRIBUTE(_name) \
279 static struct attribute *amd_uncore_format_attr_##_name[] = { \
280 &format_attr_event_##_name.attr, \
281 &format_attr_umask.attr, \
284 static struct attribute_group amd_uncore_format_group_##_name = { \
286 .attrs = amd_uncore_format_attr_##_name, \
288 static const struct attribute_group *amd_uncore_attr_groups_##_name[] = { \
289 &amd_uncore_attr_group, \
290 &amd_uncore_format_group_##_name, \
294 AMD_FORMAT_ATTR(event
, , "config:0-7,32-35");
295 AMD_FORMAT_ATTR(umask
, , "config:8-15");
296 AMD_FORMAT_ATTR(event
, _df
, "config:0-7,32-35,59-60");
297 AMD_FORMAT_ATTR(event
, _l3
, "config:0-7");
301 static struct pmu amd_nb_pmu
= {
302 .task_ctx_nr
= perf_invalid_context
,
303 .event_init
= amd_uncore_event_init
,
304 .add
= amd_uncore_add
,
305 .del
= amd_uncore_del
,
306 .start
= amd_uncore_start
,
307 .stop
= amd_uncore_stop
,
308 .read
= amd_uncore_read
,
311 static struct pmu amd_llc_pmu
= {
312 .task_ctx_nr
= perf_invalid_context
,
313 .event_init
= amd_uncore_event_init
,
314 .add
= amd_uncore_add
,
315 .del
= amd_uncore_del
,
316 .start
= amd_uncore_start
,
317 .stop
= amd_uncore_stop
,
318 .read
= amd_uncore_read
,
321 static struct amd_uncore
*amd_uncore_alloc(unsigned int cpu
)
323 return kzalloc_node(sizeof(struct amd_uncore
), GFP_KERNEL
,
327 static int amd_uncore_cpu_up_prepare(unsigned int cpu
)
329 struct amd_uncore
*uncore_nb
= NULL
, *uncore_llc
;
332 uncore_nb
= amd_uncore_alloc(cpu
);
335 uncore_nb
->cpu
= cpu
;
336 uncore_nb
->num_counters
= num_counters_nb
;
337 uncore_nb
->rdpmc_base
= RDPMC_BASE_NB
;
338 uncore_nb
->msr_base
= MSR_F15H_NB_PERF_CTL
;
339 uncore_nb
->active_mask
= &amd_nb_active_mask
;
340 uncore_nb
->pmu
= &amd_nb_pmu
;
342 *per_cpu_ptr(amd_uncore_nb
, cpu
) = uncore_nb
;
345 if (amd_uncore_llc
) {
346 uncore_llc
= amd_uncore_alloc(cpu
);
349 uncore_llc
->cpu
= cpu
;
350 uncore_llc
->num_counters
= num_counters_llc
;
351 uncore_llc
->rdpmc_base
= RDPMC_BASE_LLC
;
352 uncore_llc
->msr_base
= MSR_F16H_L2I_PERF_CTL
;
353 uncore_llc
->active_mask
= &amd_llc_active_mask
;
354 uncore_llc
->pmu
= &amd_llc_pmu
;
356 *per_cpu_ptr(amd_uncore_llc
, cpu
) = uncore_llc
;
363 *per_cpu_ptr(amd_uncore_nb
, cpu
) = NULL
;
368 static struct amd_uncore
*
369 amd_uncore_find_online_sibling(struct amd_uncore
*this,
370 struct amd_uncore
* __percpu
*uncores
)
373 struct amd_uncore
*that
;
375 for_each_online_cpu(cpu
) {
376 that
= *per_cpu_ptr(uncores
, cpu
);
384 if (this->id
== that
->id
) {
385 hlist_add_head(&this->node
, &uncore_unused_list
);
395 static int amd_uncore_cpu_starting(unsigned int cpu
)
397 unsigned int eax
, ebx
, ecx
, edx
;
398 struct amd_uncore
*uncore
;
401 uncore
= *per_cpu_ptr(amd_uncore_nb
, cpu
);
402 cpuid(0x8000001e, &eax
, &ebx
, &ecx
, &edx
);
403 uncore
->id
= ecx
& 0xff;
405 uncore
= amd_uncore_find_online_sibling(uncore
, amd_uncore_nb
);
406 *per_cpu_ptr(amd_uncore_nb
, cpu
) = uncore
;
409 if (amd_uncore_llc
) {
410 unsigned int apicid
= cpu_data(cpu
).apicid
;
411 unsigned int nshared
, subleaf
, prev_eax
= 0;
413 uncore
= *per_cpu_ptr(amd_uncore_llc
, cpu
);
415 * Iterate over Cache Topology Definition leaves until no
416 * more cache descriptions are available.
418 for (subleaf
= 0; subleaf
< 5; subleaf
++) {
419 cpuid_count(0x8000001d, subleaf
, &eax
, &ebx
, &ecx
, &edx
);
421 /* EAX[0:4] gives type of cache */
427 nshared
= ((prev_eax
>> 14) & 0xfff) + 1;
429 uncore
->id
= apicid
- (apicid
% nshared
);
431 uncore
= amd_uncore_find_online_sibling(uncore
, amd_uncore_llc
);
432 *per_cpu_ptr(amd_uncore_llc
, cpu
) = uncore
;
438 static void uncore_clean_online(void)
440 struct amd_uncore
*uncore
;
441 struct hlist_node
*n
;
443 hlist_for_each_entry_safe(uncore
, n
, &uncore_unused_list
, node
) {
444 hlist_del(&uncore
->node
);
449 static void uncore_online(unsigned int cpu
,
450 struct amd_uncore
* __percpu
*uncores
)
452 struct amd_uncore
*uncore
= *per_cpu_ptr(uncores
, cpu
);
454 uncore_clean_online();
456 if (cpu
== uncore
->cpu
)
457 cpumask_set_cpu(cpu
, uncore
->active_mask
);
460 static int amd_uncore_cpu_online(unsigned int cpu
)
463 uncore_online(cpu
, amd_uncore_nb
);
466 uncore_online(cpu
, amd_uncore_llc
);
471 static void uncore_down_prepare(unsigned int cpu
,
472 struct amd_uncore
* __percpu
*uncores
)
475 struct amd_uncore
*this = *per_cpu_ptr(uncores
, cpu
);
477 if (this->cpu
!= cpu
)
480 /* this cpu is going down, migrate to a shared sibling if possible */
481 for_each_online_cpu(i
) {
482 struct amd_uncore
*that
= *per_cpu_ptr(uncores
, i
);
488 perf_pmu_migrate_context(this->pmu
, cpu
, i
);
489 cpumask_clear_cpu(cpu
, that
->active_mask
);
490 cpumask_set_cpu(i
, that
->active_mask
);
497 static int amd_uncore_cpu_down_prepare(unsigned int cpu
)
500 uncore_down_prepare(cpu
, amd_uncore_nb
);
503 uncore_down_prepare(cpu
, amd_uncore_llc
);
508 static void uncore_dead(unsigned int cpu
, struct amd_uncore
* __percpu
*uncores
)
510 struct amd_uncore
*uncore
= *per_cpu_ptr(uncores
, cpu
);
512 if (cpu
== uncore
->cpu
)
513 cpumask_clear_cpu(cpu
, uncore
->active_mask
);
515 if (!--uncore
->refcnt
)
517 *per_cpu_ptr(uncores
, cpu
) = NULL
;
520 static int amd_uncore_cpu_dead(unsigned int cpu
)
523 uncore_dead(cpu
, amd_uncore_nb
);
526 uncore_dead(cpu
, amd_uncore_llc
);
531 static int __init
amd_uncore_init(void)
535 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
)
538 if (!boot_cpu_has(X86_FEATURE_TOPOEXT
))
541 if (boot_cpu_data
.x86
== 0x17) {
543 * For F17h, the Northbridge counters are repurposed as Data
544 * Fabric counters. Also, L3 counters are supported too. The PMUs
545 * are exported based on family as either L2 or L3 and NB or DF.
547 num_counters_nb
= NUM_COUNTERS_NB
;
548 num_counters_llc
= NUM_COUNTERS_L3
;
549 amd_nb_pmu
.name
= "amd_df";
550 amd_llc_pmu
.name
= "amd_l3";
551 format_attr_event_df
.show
= &event_show_df
;
552 format_attr_event_l3
.show
= &event_show_l3
;
555 num_counters_nb
= NUM_COUNTERS_NB
;
556 num_counters_llc
= NUM_COUNTERS_L2
;
557 amd_nb_pmu
.name
= "amd_nb";
558 amd_llc_pmu
.name
= "amd_l2";
559 format_attr_event_df
= format_attr_event
;
560 format_attr_event_l3
= format_attr_event
;
564 amd_nb_pmu
.attr_groups
= amd_uncore_attr_groups_df
;
565 amd_llc_pmu
.attr_groups
= amd_uncore_attr_groups_l3
;
567 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB
)) {
568 amd_uncore_nb
= alloc_percpu(struct amd_uncore
*);
569 if (!amd_uncore_nb
) {
573 ret
= perf_pmu_register(&amd_nb_pmu
, amd_nb_pmu
.name
, -1);
577 pr_info("AMD NB counters detected\n");
581 if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC
)) {
582 amd_uncore_llc
= alloc_percpu(struct amd_uncore
*);
583 if (!amd_uncore_llc
) {
587 ret
= perf_pmu_register(&amd_llc_pmu
, amd_llc_pmu
.name
, -1);
591 pr_info("AMD LLC counters detected\n");
596 * Install callbacks. Core will call them for each online cpu.
598 if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP
,
599 "perf/x86/amd/uncore:prepare",
600 amd_uncore_cpu_up_prepare
, amd_uncore_cpu_dead
))
603 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING
,
604 "perf/x86/amd/uncore:starting",
605 amd_uncore_cpu_starting
, NULL
))
607 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE
,
608 "perf/x86/amd/uncore:online",
609 amd_uncore_cpu_online
,
610 amd_uncore_cpu_down_prepare
))
615 cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING
);
617 cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP
);
619 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB
))
620 perf_pmu_unregister(&amd_nb_pmu
);
622 free_percpu(amd_uncore_llc
);
625 free_percpu(amd_uncore_nb
);
629 device_initcall(amd_uncore_init
);