Merge tag 'x86-urgent-2020-08-15' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / arch / x86 / events / amd / uncore.c
blob76400c052b0eb2c7029bad8eb6a778267ab0d6c8
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Advanced Micro Devices, Inc.
5 * Author: Jacob Shin <jacob.shin@amd.com>
6 */
8 #include <linux/perf_event.h>
9 #include <linux/percpu.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
16 #include <asm/cpufeature.h>
17 #include <asm/perf_event.h>
18 #include <asm/msr.h>
19 #include <asm/smp.h>
21 #define NUM_COUNTERS_NB 4
22 #define NUM_COUNTERS_L2 4
23 #define NUM_COUNTERS_L3 6
24 #define MAX_COUNTERS 6
26 #define RDPMC_BASE_NB 6
27 #define RDPMC_BASE_LLC 10
29 #define COUNTER_SHIFT 16
31 #undef pr_fmt
32 #define pr_fmt(fmt) "amd_uncore: " fmt
34 static int num_counters_llc;
35 static int num_counters_nb;
36 static bool l3_mask;
38 static HLIST_HEAD(uncore_unused_list);
40 struct amd_uncore {
41 int id;
42 int refcnt;
43 int cpu;
44 int num_counters;
45 int rdpmc_base;
46 u32 msr_base;
47 cpumask_t *active_mask;
48 struct pmu *pmu;
49 struct perf_event *events[MAX_COUNTERS];
50 struct hlist_node node;
53 static struct amd_uncore * __percpu *amd_uncore_nb;
54 static struct amd_uncore * __percpu *amd_uncore_llc;
56 static struct pmu amd_nb_pmu;
57 static struct pmu amd_llc_pmu;
59 static cpumask_t amd_nb_active_mask;
60 static cpumask_t amd_llc_active_mask;
62 static bool is_nb_event(struct perf_event *event)
64 return event->pmu->type == amd_nb_pmu.type;
67 static bool is_llc_event(struct perf_event *event)
69 return event->pmu->type == amd_llc_pmu.type;
72 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
74 if (is_nb_event(event) && amd_uncore_nb)
75 return *per_cpu_ptr(amd_uncore_nb, event->cpu);
76 else if (is_llc_event(event) && amd_uncore_llc)
77 return *per_cpu_ptr(amd_uncore_llc, event->cpu);
79 return NULL;
82 static void amd_uncore_read(struct perf_event *event)
84 struct hw_perf_event *hwc = &event->hw;
85 u64 prev, new;
86 s64 delta;
89 * since we do not enable counter overflow interrupts,
90 * we do not have to worry about prev_count changing on us
93 prev = local64_read(&hwc->prev_count);
94 rdpmcl(hwc->event_base_rdpmc, new);
95 local64_set(&hwc->prev_count, new);
96 delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
97 delta >>= COUNTER_SHIFT;
98 local64_add(delta, &event->count);
101 static void amd_uncore_start(struct perf_event *event, int flags)
103 struct hw_perf_event *hwc = &event->hw;
105 if (flags & PERF_EF_RELOAD)
106 wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
108 hwc->state = 0;
109 wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
110 perf_event_update_userpage(event);
113 static void amd_uncore_stop(struct perf_event *event, int flags)
115 struct hw_perf_event *hwc = &event->hw;
117 wrmsrl(hwc->config_base, hwc->config);
118 hwc->state |= PERF_HES_STOPPED;
120 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
121 amd_uncore_read(event);
122 hwc->state |= PERF_HES_UPTODATE;
126 static int amd_uncore_add(struct perf_event *event, int flags)
128 int i;
129 struct amd_uncore *uncore = event_to_amd_uncore(event);
130 struct hw_perf_event *hwc = &event->hw;
132 /* are we already assigned? */
133 if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
134 goto out;
136 for (i = 0; i < uncore->num_counters; i++) {
137 if (uncore->events[i] == event) {
138 hwc->idx = i;
139 goto out;
143 /* if not, take the first available counter */
144 hwc->idx = -1;
145 for (i = 0; i < uncore->num_counters; i++) {
146 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
147 hwc->idx = i;
148 break;
152 out:
153 if (hwc->idx == -1)
154 return -EBUSY;
156 hwc->config_base = uncore->msr_base + (2 * hwc->idx);
157 hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
158 hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
159 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
161 if (flags & PERF_EF_START)
162 amd_uncore_start(event, PERF_EF_RELOAD);
164 return 0;
167 static void amd_uncore_del(struct perf_event *event, int flags)
169 int i;
170 struct amd_uncore *uncore = event_to_amd_uncore(event);
171 struct hw_perf_event *hwc = &event->hw;
173 amd_uncore_stop(event, PERF_EF_UPDATE);
175 for (i = 0; i < uncore->num_counters; i++) {
176 if (cmpxchg(&uncore->events[i], event, NULL) == event)
177 break;
180 hwc->idx = -1;
184 * Convert logical CPU number to L3 PMC Config ThreadMask format
186 static u64 l3_thread_slice_mask(int cpu)
188 u64 thread_mask, core = topology_core_id(cpu);
189 unsigned int shift, thread = 0;
191 if (topology_smt_supported() && !topology_is_primary_thread(cpu))
192 thread = 1;
194 if (boot_cpu_data.x86 <= 0x18) {
195 shift = AMD64_L3_THREAD_SHIFT + 2 * (core % 4) + thread;
196 thread_mask = BIT_ULL(shift);
198 return AMD64_L3_SLICE_MASK | thread_mask;
201 core = (core << AMD64_L3_COREID_SHIFT) & AMD64_L3_COREID_MASK;
202 shift = AMD64_L3_THREAD_SHIFT + thread;
203 thread_mask = BIT_ULL(shift);
205 return AMD64_L3_EN_ALL_SLICES | core | thread_mask;
208 static int amd_uncore_event_init(struct perf_event *event)
210 struct amd_uncore *uncore;
211 struct hw_perf_event *hwc = &event->hw;
213 if (event->attr.type != event->pmu->type)
214 return -ENOENT;
217 * NB and Last level cache counters (MSRs) are shared across all cores
218 * that share the same NB / Last level cache. On family 16h and below,
219 * Interrupts can be directed to a single target core, however, event
220 * counts generated by processes running on other cores cannot be masked
221 * out. So we do not support sampling and per-thread events via
222 * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
224 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
225 hwc->idx = -1;
227 if (event->cpu < 0)
228 return -EINVAL;
231 * SliceMask and ThreadMask need to be set for certain L3 events.
232 * For other events, the two fields do not affect the count.
234 if (l3_mask && is_llc_event(event))
235 hwc->config |= l3_thread_slice_mask(event->cpu);
237 uncore = event_to_amd_uncore(event);
238 if (!uncore)
239 return -ENODEV;
242 * since request can come in to any of the shared cores, we will remap
243 * to a single common cpu.
245 event->cpu = uncore->cpu;
247 return 0;
250 static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
251 struct device_attribute *attr,
252 char *buf)
254 cpumask_t *active_mask;
255 struct pmu *pmu = dev_get_drvdata(dev);
257 if (pmu->type == amd_nb_pmu.type)
258 active_mask = &amd_nb_active_mask;
259 else if (pmu->type == amd_llc_pmu.type)
260 active_mask = &amd_llc_active_mask;
261 else
262 return 0;
264 return cpumap_print_to_pagebuf(true, buf, active_mask);
266 static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
268 static struct attribute *amd_uncore_attrs[] = {
269 &dev_attr_cpumask.attr,
270 NULL,
273 static struct attribute_group amd_uncore_attr_group = {
274 .attrs = amd_uncore_attrs,
278 * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based
279 * on family
281 #define AMD_FORMAT_ATTR(_dev, _name, _format) \
282 static ssize_t \
283 _dev##_show##_name(struct device *dev, \
284 struct device_attribute *attr, \
285 char *page) \
287 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
288 return sprintf(page, _format "\n"); \
290 static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);
292 /* Used for each uncore counter type */
293 #define AMD_ATTRIBUTE(_name) \
294 static struct attribute *amd_uncore_format_attr_##_name[] = { \
295 &format_attr_event_##_name.attr, \
296 &format_attr_umask.attr, \
297 NULL, \
298 }; \
299 static struct attribute_group amd_uncore_format_group_##_name = { \
300 .name = "format", \
301 .attrs = amd_uncore_format_attr_##_name, \
302 }; \
303 static const struct attribute_group *amd_uncore_attr_groups_##_name[] = { \
304 &amd_uncore_attr_group, \
305 &amd_uncore_format_group_##_name, \
306 NULL, \
309 AMD_FORMAT_ATTR(event, , "config:0-7,32-35");
310 AMD_FORMAT_ATTR(umask, , "config:8-15");
311 AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60");
312 AMD_FORMAT_ATTR(event, _l3, "config:0-7");
313 AMD_ATTRIBUTE(df);
314 AMD_ATTRIBUTE(l3);
316 static struct pmu amd_nb_pmu = {
317 .task_ctx_nr = perf_invalid_context,
318 .event_init = amd_uncore_event_init,
319 .add = amd_uncore_add,
320 .del = amd_uncore_del,
321 .start = amd_uncore_start,
322 .stop = amd_uncore_stop,
323 .read = amd_uncore_read,
324 .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
327 static struct pmu amd_llc_pmu = {
328 .task_ctx_nr = perf_invalid_context,
329 .event_init = amd_uncore_event_init,
330 .add = amd_uncore_add,
331 .del = amd_uncore_del,
332 .start = amd_uncore_start,
333 .stop = amd_uncore_stop,
334 .read = amd_uncore_read,
335 .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
338 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
340 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
341 cpu_to_node(cpu));
344 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
346 struct amd_uncore *uncore_nb = NULL, *uncore_llc;
348 if (amd_uncore_nb) {
349 uncore_nb = amd_uncore_alloc(cpu);
350 if (!uncore_nb)
351 goto fail;
352 uncore_nb->cpu = cpu;
353 uncore_nb->num_counters = num_counters_nb;
354 uncore_nb->rdpmc_base = RDPMC_BASE_NB;
355 uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
356 uncore_nb->active_mask = &amd_nb_active_mask;
357 uncore_nb->pmu = &amd_nb_pmu;
358 uncore_nb->id = -1;
359 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
362 if (amd_uncore_llc) {
363 uncore_llc = amd_uncore_alloc(cpu);
364 if (!uncore_llc)
365 goto fail;
366 uncore_llc->cpu = cpu;
367 uncore_llc->num_counters = num_counters_llc;
368 uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
369 uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
370 uncore_llc->active_mask = &amd_llc_active_mask;
371 uncore_llc->pmu = &amd_llc_pmu;
372 uncore_llc->id = -1;
373 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
376 return 0;
378 fail:
379 if (amd_uncore_nb)
380 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
381 kfree(uncore_nb);
382 return -ENOMEM;
385 static struct amd_uncore *
386 amd_uncore_find_online_sibling(struct amd_uncore *this,
387 struct amd_uncore * __percpu *uncores)
389 unsigned int cpu;
390 struct amd_uncore *that;
392 for_each_online_cpu(cpu) {
393 that = *per_cpu_ptr(uncores, cpu);
395 if (!that)
396 continue;
398 if (this == that)
399 continue;
401 if (this->id == that->id) {
402 hlist_add_head(&this->node, &uncore_unused_list);
403 this = that;
404 break;
408 this->refcnt++;
409 return this;
412 static int amd_uncore_cpu_starting(unsigned int cpu)
414 unsigned int eax, ebx, ecx, edx;
415 struct amd_uncore *uncore;
417 if (amd_uncore_nb) {
418 uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
419 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
420 uncore->id = ecx & 0xff;
422 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
423 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
426 if (amd_uncore_llc) {
427 uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
428 uncore->id = per_cpu(cpu_llc_id, cpu);
430 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
431 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
434 return 0;
437 static void uncore_clean_online(void)
439 struct amd_uncore *uncore;
440 struct hlist_node *n;
442 hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
443 hlist_del(&uncore->node);
444 kfree(uncore);
448 static void uncore_online(unsigned int cpu,
449 struct amd_uncore * __percpu *uncores)
451 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
453 uncore_clean_online();
455 if (cpu == uncore->cpu)
456 cpumask_set_cpu(cpu, uncore->active_mask);
459 static int amd_uncore_cpu_online(unsigned int cpu)
461 if (amd_uncore_nb)
462 uncore_online(cpu, amd_uncore_nb);
464 if (amd_uncore_llc)
465 uncore_online(cpu, amd_uncore_llc);
467 return 0;
470 static void uncore_down_prepare(unsigned int cpu,
471 struct amd_uncore * __percpu *uncores)
473 unsigned int i;
474 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
476 if (this->cpu != cpu)
477 return;
479 /* this cpu is going down, migrate to a shared sibling if possible */
480 for_each_online_cpu(i) {
481 struct amd_uncore *that = *per_cpu_ptr(uncores, i);
483 if (cpu == i)
484 continue;
486 if (this == that) {
487 perf_pmu_migrate_context(this->pmu, cpu, i);
488 cpumask_clear_cpu(cpu, that->active_mask);
489 cpumask_set_cpu(i, that->active_mask);
490 that->cpu = i;
491 break;
496 static int amd_uncore_cpu_down_prepare(unsigned int cpu)
498 if (amd_uncore_nb)
499 uncore_down_prepare(cpu, amd_uncore_nb);
501 if (amd_uncore_llc)
502 uncore_down_prepare(cpu, amd_uncore_llc);
504 return 0;
507 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
509 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
511 if (cpu == uncore->cpu)
512 cpumask_clear_cpu(cpu, uncore->active_mask);
514 if (!--uncore->refcnt)
515 kfree(uncore);
516 *per_cpu_ptr(uncores, cpu) = NULL;
519 static int amd_uncore_cpu_dead(unsigned int cpu)
521 if (amd_uncore_nb)
522 uncore_dead(cpu, amd_uncore_nb);
524 if (amd_uncore_llc)
525 uncore_dead(cpu, amd_uncore_llc);
527 return 0;
530 static int __init amd_uncore_init(void)
532 int ret = -ENODEV;
534 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
535 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
536 return -ENODEV;
538 if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
539 return -ENODEV;
541 if (boot_cpu_data.x86 >= 0x17) {
543 * For F17h and above, the Northbridge counters are
544 * repurposed as Data Fabric counters. Also, L3
545 * counters are supported too. The PMUs are exported
546 * based on family as either L2 or L3 and NB or DF.
548 num_counters_nb = NUM_COUNTERS_NB;
549 num_counters_llc = NUM_COUNTERS_L3;
550 amd_nb_pmu.name = "amd_df";
551 amd_llc_pmu.name = "amd_l3";
552 format_attr_event_df.show = &event_show_df;
553 format_attr_event_l3.show = &event_show_l3;
554 l3_mask = true;
555 } else {
556 num_counters_nb = NUM_COUNTERS_NB;
557 num_counters_llc = NUM_COUNTERS_L2;
558 amd_nb_pmu.name = "amd_nb";
559 amd_llc_pmu.name = "amd_l2";
560 format_attr_event_df = format_attr_event;
561 format_attr_event_l3 = format_attr_event;
562 l3_mask = false;
565 amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
566 amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3;
568 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
569 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
570 if (!amd_uncore_nb) {
571 ret = -ENOMEM;
572 goto fail_nb;
574 ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
575 if (ret)
576 goto fail_nb;
578 pr_info("%s NB counters detected\n",
579 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
580 "HYGON" : "AMD");
581 ret = 0;
584 if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
585 amd_uncore_llc = alloc_percpu(struct amd_uncore *);
586 if (!amd_uncore_llc) {
587 ret = -ENOMEM;
588 goto fail_llc;
590 ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
591 if (ret)
592 goto fail_llc;
594 pr_info("%s LLC counters detected\n",
595 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
596 "HYGON" : "AMD");
597 ret = 0;
601 * Install callbacks. Core will call them for each online cpu.
603 if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
604 "perf/x86/amd/uncore:prepare",
605 amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
606 goto fail_llc;
608 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
609 "perf/x86/amd/uncore:starting",
610 amd_uncore_cpu_starting, NULL))
611 goto fail_prep;
612 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
613 "perf/x86/amd/uncore:online",
614 amd_uncore_cpu_online,
615 amd_uncore_cpu_down_prepare))
616 goto fail_start;
617 return 0;
619 fail_start:
620 cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
621 fail_prep:
622 cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
623 fail_llc:
624 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
625 perf_pmu_unregister(&amd_nb_pmu);
626 if (amd_uncore_llc)
627 free_percpu(amd_uncore_llc);
628 fail_nb:
629 if (amd_uncore_nb)
630 free_percpu(amd_uncore_nb);
632 return ret;
634 device_initcall(amd_uncore_init);