Linux 4.18.10
[linux/fpc-iii.git] / arch / x86 / events / amd / uncore.c
blob981ba5e8241ba2ece923ef22f162ac3820c684c4
1 /*
2 * Copyright (C) 2013 Advanced Micro Devices, Inc.
4 * Author: Jacob Shin <jacob.shin@amd.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
11 #include <linux/perf_event.h>
12 #include <linux/percpu.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/cpu.h>
17 #include <linux/cpumask.h>
19 #include <asm/cpufeature.h>
20 #include <asm/perf_event.h>
21 #include <asm/msr.h>
22 #include <asm/smp.h>
24 #define NUM_COUNTERS_NB 4
25 #define NUM_COUNTERS_L2 4
26 #define NUM_COUNTERS_L3 6
27 #define MAX_COUNTERS 6
29 #define RDPMC_BASE_NB 6
30 #define RDPMC_BASE_LLC 10
32 #define COUNTER_SHIFT 16
34 #undef pr_fmt
35 #define pr_fmt(fmt) "amd_uncore: " fmt
37 static int num_counters_llc;
38 static int num_counters_nb;
40 static HLIST_HEAD(uncore_unused_list);
42 struct amd_uncore {
43 int id;
44 int refcnt;
45 int cpu;
46 int num_counters;
47 int rdpmc_base;
48 u32 msr_base;
49 cpumask_t *active_mask;
50 struct pmu *pmu;
51 struct perf_event *events[MAX_COUNTERS];
52 struct hlist_node node;
55 static struct amd_uncore * __percpu *amd_uncore_nb;
56 static struct amd_uncore * __percpu *amd_uncore_llc;
58 static struct pmu amd_nb_pmu;
59 static struct pmu amd_llc_pmu;
61 static cpumask_t amd_nb_active_mask;
62 static cpumask_t amd_llc_active_mask;
64 static bool is_nb_event(struct perf_event *event)
66 return event->pmu->type == amd_nb_pmu.type;
69 static bool is_llc_event(struct perf_event *event)
71 return event->pmu->type == amd_llc_pmu.type;
74 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
76 if (is_nb_event(event) && amd_uncore_nb)
77 return *per_cpu_ptr(amd_uncore_nb, event->cpu);
78 else if (is_llc_event(event) && amd_uncore_llc)
79 return *per_cpu_ptr(amd_uncore_llc, event->cpu);
81 return NULL;
84 static void amd_uncore_read(struct perf_event *event)
86 struct hw_perf_event *hwc = &event->hw;
87 u64 prev, new;
88 s64 delta;
91 * since we do not enable counter overflow interrupts,
92 * we do not have to worry about prev_count changing on us
95 prev = local64_read(&hwc->prev_count);
96 rdpmcl(hwc->event_base_rdpmc, new);
97 local64_set(&hwc->prev_count, new);
98 delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
99 delta >>= COUNTER_SHIFT;
100 local64_add(delta, &event->count);
103 static void amd_uncore_start(struct perf_event *event, int flags)
105 struct hw_perf_event *hwc = &event->hw;
107 if (flags & PERF_EF_RELOAD)
108 wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
110 hwc->state = 0;
111 wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
112 perf_event_update_userpage(event);
115 static void amd_uncore_stop(struct perf_event *event, int flags)
117 struct hw_perf_event *hwc = &event->hw;
119 wrmsrl(hwc->config_base, hwc->config);
120 hwc->state |= PERF_HES_STOPPED;
122 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
123 amd_uncore_read(event);
124 hwc->state |= PERF_HES_UPTODATE;
128 static int amd_uncore_add(struct perf_event *event, int flags)
130 int i;
131 struct amd_uncore *uncore = event_to_amd_uncore(event);
132 struct hw_perf_event *hwc = &event->hw;
134 /* are we already assigned? */
135 if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
136 goto out;
138 for (i = 0; i < uncore->num_counters; i++) {
139 if (uncore->events[i] == event) {
140 hwc->idx = i;
141 goto out;
145 /* if not, take the first available counter */
146 hwc->idx = -1;
147 for (i = 0; i < uncore->num_counters; i++) {
148 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
149 hwc->idx = i;
150 break;
154 out:
155 if (hwc->idx == -1)
156 return -EBUSY;
158 hwc->config_base = uncore->msr_base + (2 * hwc->idx);
159 hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
160 hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
161 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
163 if (flags & PERF_EF_START)
164 amd_uncore_start(event, PERF_EF_RELOAD);
166 return 0;
169 static void amd_uncore_del(struct perf_event *event, int flags)
171 int i;
172 struct amd_uncore *uncore = event_to_amd_uncore(event);
173 struct hw_perf_event *hwc = &event->hw;
175 amd_uncore_stop(event, PERF_EF_UPDATE);
177 for (i = 0; i < uncore->num_counters; i++) {
178 if (cmpxchg(&uncore->events[i], event, NULL) == event)
179 break;
182 hwc->idx = -1;
185 static int amd_uncore_event_init(struct perf_event *event)
187 struct amd_uncore *uncore;
188 struct hw_perf_event *hwc = &event->hw;
190 if (event->attr.type != event->pmu->type)
191 return -ENOENT;
194 * NB and Last level cache counters (MSRs) are shared across all cores
195 * that share the same NB / Last level cache. Interrupts can be directed
196 * to a single target core, however, event counts generated by processes
197 * running on other cores cannot be masked out. So we do not support
198 * sampling and per-thread events.
200 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
201 return -EINVAL;
203 /* NB and Last level cache counters do not have usr/os/guest/host bits */
204 if (event->attr.exclude_user || event->attr.exclude_kernel ||
205 event->attr.exclude_host || event->attr.exclude_guest)
206 return -EINVAL;
208 /* and we do not enable counter overflow interrupts */
209 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
210 hwc->idx = -1;
212 if (event->cpu < 0)
213 return -EINVAL;
215 uncore = event_to_amd_uncore(event);
216 if (!uncore)
217 return -ENODEV;
220 * since request can come in to any of the shared cores, we will remap
221 * to a single common cpu.
223 event->cpu = uncore->cpu;
225 return 0;
228 static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
229 struct device_attribute *attr,
230 char *buf)
232 cpumask_t *active_mask;
233 struct pmu *pmu = dev_get_drvdata(dev);
235 if (pmu->type == amd_nb_pmu.type)
236 active_mask = &amd_nb_active_mask;
237 else if (pmu->type == amd_llc_pmu.type)
238 active_mask = &amd_llc_active_mask;
239 else
240 return 0;
242 return cpumap_print_to_pagebuf(true, buf, active_mask);
244 static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
246 static struct attribute *amd_uncore_attrs[] = {
247 &dev_attr_cpumask.attr,
248 NULL,
251 static struct attribute_group amd_uncore_attr_group = {
252 .attrs = amd_uncore_attrs,
256 * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based
257 * on family
259 #define AMD_FORMAT_ATTR(_dev, _name, _format) \
260 static ssize_t \
261 _dev##_show##_name(struct device *dev, \
262 struct device_attribute *attr, \
263 char *page) \
265 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
266 return sprintf(page, _format "\n"); \
268 static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);
270 /* Used for each uncore counter type */
271 #define AMD_ATTRIBUTE(_name) \
272 static struct attribute *amd_uncore_format_attr_##_name[] = { \
273 &format_attr_event_##_name.attr, \
274 &format_attr_umask.attr, \
275 NULL, \
276 }; \
277 static struct attribute_group amd_uncore_format_group_##_name = { \
278 .name = "format", \
279 .attrs = amd_uncore_format_attr_##_name, \
280 }; \
281 static const struct attribute_group *amd_uncore_attr_groups_##_name[] = { \
282 &amd_uncore_attr_group, \
283 &amd_uncore_format_group_##_name, \
284 NULL, \
287 AMD_FORMAT_ATTR(event, , "config:0-7,32-35");
288 AMD_FORMAT_ATTR(umask, , "config:8-15");
289 AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60");
290 AMD_FORMAT_ATTR(event, _l3, "config:0-7");
291 AMD_ATTRIBUTE(df);
292 AMD_ATTRIBUTE(l3);
294 static struct pmu amd_nb_pmu = {
295 .task_ctx_nr = perf_invalid_context,
296 .event_init = amd_uncore_event_init,
297 .add = amd_uncore_add,
298 .del = amd_uncore_del,
299 .start = amd_uncore_start,
300 .stop = amd_uncore_stop,
301 .read = amd_uncore_read,
304 static struct pmu amd_llc_pmu = {
305 .task_ctx_nr = perf_invalid_context,
306 .event_init = amd_uncore_event_init,
307 .add = amd_uncore_add,
308 .del = amd_uncore_del,
309 .start = amd_uncore_start,
310 .stop = amd_uncore_stop,
311 .read = amd_uncore_read,
314 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
316 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
317 cpu_to_node(cpu));
320 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
322 struct amd_uncore *uncore_nb = NULL, *uncore_llc;
324 if (amd_uncore_nb) {
325 uncore_nb = amd_uncore_alloc(cpu);
326 if (!uncore_nb)
327 goto fail;
328 uncore_nb->cpu = cpu;
329 uncore_nb->num_counters = num_counters_nb;
330 uncore_nb->rdpmc_base = RDPMC_BASE_NB;
331 uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
332 uncore_nb->active_mask = &amd_nb_active_mask;
333 uncore_nb->pmu = &amd_nb_pmu;
334 uncore_nb->id = -1;
335 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
338 if (amd_uncore_llc) {
339 uncore_llc = amd_uncore_alloc(cpu);
340 if (!uncore_llc)
341 goto fail;
342 uncore_llc->cpu = cpu;
343 uncore_llc->num_counters = num_counters_llc;
344 uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
345 uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
346 uncore_llc->active_mask = &amd_llc_active_mask;
347 uncore_llc->pmu = &amd_llc_pmu;
348 uncore_llc->id = -1;
349 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
352 return 0;
354 fail:
355 if (amd_uncore_nb)
356 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
357 kfree(uncore_nb);
358 return -ENOMEM;
361 static struct amd_uncore *
362 amd_uncore_find_online_sibling(struct amd_uncore *this,
363 struct amd_uncore * __percpu *uncores)
365 unsigned int cpu;
366 struct amd_uncore *that;
368 for_each_online_cpu(cpu) {
369 that = *per_cpu_ptr(uncores, cpu);
371 if (!that)
372 continue;
374 if (this == that)
375 continue;
377 if (this->id == that->id) {
378 hlist_add_head(&this->node, &uncore_unused_list);
379 this = that;
380 break;
384 this->refcnt++;
385 return this;
388 static int amd_uncore_cpu_starting(unsigned int cpu)
390 unsigned int eax, ebx, ecx, edx;
391 struct amd_uncore *uncore;
393 if (amd_uncore_nb) {
394 uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
395 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
396 uncore->id = ecx & 0xff;
398 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
399 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
402 if (amd_uncore_llc) {
403 uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
404 uncore->id = per_cpu(cpu_llc_id, cpu);
406 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
407 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
410 return 0;
413 static void uncore_clean_online(void)
415 struct amd_uncore *uncore;
416 struct hlist_node *n;
418 hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
419 hlist_del(&uncore->node);
420 kfree(uncore);
424 static void uncore_online(unsigned int cpu,
425 struct amd_uncore * __percpu *uncores)
427 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
429 uncore_clean_online();
431 if (cpu == uncore->cpu)
432 cpumask_set_cpu(cpu, uncore->active_mask);
435 static int amd_uncore_cpu_online(unsigned int cpu)
437 if (amd_uncore_nb)
438 uncore_online(cpu, amd_uncore_nb);
440 if (amd_uncore_llc)
441 uncore_online(cpu, amd_uncore_llc);
443 return 0;
446 static void uncore_down_prepare(unsigned int cpu,
447 struct amd_uncore * __percpu *uncores)
449 unsigned int i;
450 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
452 if (this->cpu != cpu)
453 return;
455 /* this cpu is going down, migrate to a shared sibling if possible */
456 for_each_online_cpu(i) {
457 struct amd_uncore *that = *per_cpu_ptr(uncores, i);
459 if (cpu == i)
460 continue;
462 if (this == that) {
463 perf_pmu_migrate_context(this->pmu, cpu, i);
464 cpumask_clear_cpu(cpu, that->active_mask);
465 cpumask_set_cpu(i, that->active_mask);
466 that->cpu = i;
467 break;
472 static int amd_uncore_cpu_down_prepare(unsigned int cpu)
474 if (amd_uncore_nb)
475 uncore_down_prepare(cpu, amd_uncore_nb);
477 if (amd_uncore_llc)
478 uncore_down_prepare(cpu, amd_uncore_llc);
480 return 0;
483 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
485 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
487 if (cpu == uncore->cpu)
488 cpumask_clear_cpu(cpu, uncore->active_mask);
490 if (!--uncore->refcnt)
491 kfree(uncore);
492 *per_cpu_ptr(uncores, cpu) = NULL;
495 static int amd_uncore_cpu_dead(unsigned int cpu)
497 if (amd_uncore_nb)
498 uncore_dead(cpu, amd_uncore_nb);
500 if (amd_uncore_llc)
501 uncore_dead(cpu, amd_uncore_llc);
503 return 0;
506 static int __init amd_uncore_init(void)
508 int ret = -ENODEV;
510 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
511 return -ENODEV;
513 if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
514 return -ENODEV;
516 if (boot_cpu_data.x86 == 0x17) {
518 * For F17h, the Northbridge counters are repurposed as Data
519 * Fabric counters. Also, L3 counters are supported too. The PMUs
520 * are exported based on family as either L2 or L3 and NB or DF.
522 num_counters_nb = NUM_COUNTERS_NB;
523 num_counters_llc = NUM_COUNTERS_L3;
524 amd_nb_pmu.name = "amd_df";
525 amd_llc_pmu.name = "amd_l3";
526 format_attr_event_df.show = &event_show_df;
527 format_attr_event_l3.show = &event_show_l3;
528 } else {
529 num_counters_nb = NUM_COUNTERS_NB;
530 num_counters_llc = NUM_COUNTERS_L2;
531 amd_nb_pmu.name = "amd_nb";
532 amd_llc_pmu.name = "amd_l2";
533 format_attr_event_df = format_attr_event;
534 format_attr_event_l3 = format_attr_event;
537 amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
538 amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3;
540 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
541 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
542 if (!amd_uncore_nb) {
543 ret = -ENOMEM;
544 goto fail_nb;
546 ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
547 if (ret)
548 goto fail_nb;
550 pr_info("AMD NB counters detected\n");
551 ret = 0;
554 if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
555 amd_uncore_llc = alloc_percpu(struct amd_uncore *);
556 if (!amd_uncore_llc) {
557 ret = -ENOMEM;
558 goto fail_llc;
560 ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
561 if (ret)
562 goto fail_llc;
564 pr_info("AMD LLC counters detected\n");
565 ret = 0;
569 * Install callbacks. Core will call them for each online cpu.
571 if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
572 "perf/x86/amd/uncore:prepare",
573 amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
574 goto fail_llc;
576 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
577 "perf/x86/amd/uncore:starting",
578 amd_uncore_cpu_starting, NULL))
579 goto fail_prep;
580 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
581 "perf/x86/amd/uncore:online",
582 amd_uncore_cpu_online,
583 amd_uncore_cpu_down_prepare))
584 goto fail_start;
585 return 0;
587 fail_start:
588 cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
589 fail_prep:
590 cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
591 fail_llc:
592 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
593 perf_pmu_unregister(&amd_nb_pmu);
594 if (amd_uncore_llc)
595 free_percpu(amd_uncore_llc);
596 fail_nb:
597 if (amd_uncore_nb)
598 free_percpu(amd_uncore_nb);
600 return ret;
602 device_initcall(amd_uncore_init);