hugetlb: introduce generic version of hugetlb_free_pgd_range
[linux/fpc-iii.git] / arch / x86 / events / amd / uncore.c
blob398df6eaa1094b749cb38bcdda285712d314c9ff
1 /*
2 * Copyright (C) 2013 Advanced Micro Devices, Inc.
4 * Author: Jacob Shin <jacob.shin@amd.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
11 #include <linux/perf_event.h>
12 #include <linux/percpu.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/cpu.h>
17 #include <linux/cpumask.h>
19 #include <asm/cpufeature.h>
20 #include <asm/perf_event.h>
21 #include <asm/msr.h>
22 #include <asm/smp.h>
24 #define NUM_COUNTERS_NB 4
25 #define NUM_COUNTERS_L2 4
26 #define NUM_COUNTERS_L3 6
27 #define MAX_COUNTERS 6
29 #define RDPMC_BASE_NB 6
30 #define RDPMC_BASE_LLC 10
32 #define COUNTER_SHIFT 16
34 #undef pr_fmt
35 #define pr_fmt(fmt) "amd_uncore: " fmt
37 static int num_counters_llc;
38 static int num_counters_nb;
39 static bool l3_mask;
41 static HLIST_HEAD(uncore_unused_list);
43 struct amd_uncore {
44 int id;
45 int refcnt;
46 int cpu;
47 int num_counters;
48 int rdpmc_base;
49 u32 msr_base;
50 cpumask_t *active_mask;
51 struct pmu *pmu;
52 struct perf_event *events[MAX_COUNTERS];
53 struct hlist_node node;
56 static struct amd_uncore * __percpu *amd_uncore_nb;
57 static struct amd_uncore * __percpu *amd_uncore_llc;
59 static struct pmu amd_nb_pmu;
60 static struct pmu amd_llc_pmu;
62 static cpumask_t amd_nb_active_mask;
63 static cpumask_t amd_llc_active_mask;
65 static bool is_nb_event(struct perf_event *event)
67 return event->pmu->type == amd_nb_pmu.type;
70 static bool is_llc_event(struct perf_event *event)
72 return event->pmu->type == amd_llc_pmu.type;
75 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
77 if (is_nb_event(event) && amd_uncore_nb)
78 return *per_cpu_ptr(amd_uncore_nb, event->cpu);
79 else if (is_llc_event(event) && amd_uncore_llc)
80 return *per_cpu_ptr(amd_uncore_llc, event->cpu);
82 return NULL;
85 static void amd_uncore_read(struct perf_event *event)
87 struct hw_perf_event *hwc = &event->hw;
88 u64 prev, new;
89 s64 delta;
92 * since we do not enable counter overflow interrupts,
93 * we do not have to worry about prev_count changing on us
96 prev = local64_read(&hwc->prev_count);
97 rdpmcl(hwc->event_base_rdpmc, new);
98 local64_set(&hwc->prev_count, new);
99 delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
100 delta >>= COUNTER_SHIFT;
101 local64_add(delta, &event->count);
104 static void amd_uncore_start(struct perf_event *event, int flags)
106 struct hw_perf_event *hwc = &event->hw;
108 if (flags & PERF_EF_RELOAD)
109 wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
111 hwc->state = 0;
112 wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
113 perf_event_update_userpage(event);
116 static void amd_uncore_stop(struct perf_event *event, int flags)
118 struct hw_perf_event *hwc = &event->hw;
120 wrmsrl(hwc->config_base, hwc->config);
121 hwc->state |= PERF_HES_STOPPED;
123 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
124 amd_uncore_read(event);
125 hwc->state |= PERF_HES_UPTODATE;
129 static int amd_uncore_add(struct perf_event *event, int flags)
131 int i;
132 struct amd_uncore *uncore = event_to_amd_uncore(event);
133 struct hw_perf_event *hwc = &event->hw;
135 /* are we already assigned? */
136 if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
137 goto out;
139 for (i = 0; i < uncore->num_counters; i++) {
140 if (uncore->events[i] == event) {
141 hwc->idx = i;
142 goto out;
146 /* if not, take the first available counter */
147 hwc->idx = -1;
148 for (i = 0; i < uncore->num_counters; i++) {
149 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
150 hwc->idx = i;
151 break;
155 out:
156 if (hwc->idx == -1)
157 return -EBUSY;
159 hwc->config_base = uncore->msr_base + (2 * hwc->idx);
160 hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
161 hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
162 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
164 if (flags & PERF_EF_START)
165 amd_uncore_start(event, PERF_EF_RELOAD);
167 return 0;
170 static void amd_uncore_del(struct perf_event *event, int flags)
172 int i;
173 struct amd_uncore *uncore = event_to_amd_uncore(event);
174 struct hw_perf_event *hwc = &event->hw;
176 amd_uncore_stop(event, PERF_EF_UPDATE);
178 for (i = 0; i < uncore->num_counters; i++) {
179 if (cmpxchg(&uncore->events[i], event, NULL) == event)
180 break;
183 hwc->idx = -1;
186 static int amd_uncore_event_init(struct perf_event *event)
188 struct amd_uncore *uncore;
189 struct hw_perf_event *hwc = &event->hw;
191 if (event->attr.type != event->pmu->type)
192 return -ENOENT;
195 * NB and Last level cache counters (MSRs) are shared across all cores
196 * that share the same NB / Last level cache. Interrupts can be directed
197 * to a single target core, however, event counts generated by processes
198 * running on other cores cannot be masked out. So we do not support
199 * sampling and per-thread events.
201 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
202 return -EINVAL;
204 /* NB and Last level cache counters do not have usr/os/guest/host bits */
205 if (event->attr.exclude_user || event->attr.exclude_kernel ||
206 event->attr.exclude_host || event->attr.exclude_guest)
207 return -EINVAL;
209 /* and we do not enable counter overflow interrupts */
210 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
211 hwc->idx = -1;
214 * SliceMask and ThreadMask need to be set for certain L3 events in
215 * Family 17h. For other events, the two fields do not affect the count.
217 if (l3_mask)
218 hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
220 if (event->cpu < 0)
221 return -EINVAL;
223 uncore = event_to_amd_uncore(event);
224 if (!uncore)
225 return -ENODEV;
228 * since request can come in to any of the shared cores, we will remap
229 * to a single common cpu.
231 event->cpu = uncore->cpu;
233 return 0;
236 static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
237 struct device_attribute *attr,
238 char *buf)
240 cpumask_t *active_mask;
241 struct pmu *pmu = dev_get_drvdata(dev);
243 if (pmu->type == amd_nb_pmu.type)
244 active_mask = &amd_nb_active_mask;
245 else if (pmu->type == amd_llc_pmu.type)
246 active_mask = &amd_llc_active_mask;
247 else
248 return 0;
250 return cpumap_print_to_pagebuf(true, buf, active_mask);
252 static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
254 static struct attribute *amd_uncore_attrs[] = {
255 &dev_attr_cpumask.attr,
256 NULL,
259 static struct attribute_group amd_uncore_attr_group = {
260 .attrs = amd_uncore_attrs,
264 * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based
265 * on family
267 #define AMD_FORMAT_ATTR(_dev, _name, _format) \
268 static ssize_t \
269 _dev##_show##_name(struct device *dev, \
270 struct device_attribute *attr, \
271 char *page) \
273 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
274 return sprintf(page, _format "\n"); \
276 static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);
278 /* Used for each uncore counter type */
279 #define AMD_ATTRIBUTE(_name) \
280 static struct attribute *amd_uncore_format_attr_##_name[] = { \
281 &format_attr_event_##_name.attr, \
282 &format_attr_umask.attr, \
283 NULL, \
284 }; \
285 static struct attribute_group amd_uncore_format_group_##_name = { \
286 .name = "format", \
287 .attrs = amd_uncore_format_attr_##_name, \
288 }; \
289 static const struct attribute_group *amd_uncore_attr_groups_##_name[] = { \
290 &amd_uncore_attr_group, \
291 &amd_uncore_format_group_##_name, \
292 NULL, \
295 AMD_FORMAT_ATTR(event, , "config:0-7,32-35");
296 AMD_FORMAT_ATTR(umask, , "config:8-15");
297 AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60");
298 AMD_FORMAT_ATTR(event, _l3, "config:0-7");
299 AMD_ATTRIBUTE(df);
300 AMD_ATTRIBUTE(l3);
302 static struct pmu amd_nb_pmu = {
303 .task_ctx_nr = perf_invalid_context,
304 .event_init = amd_uncore_event_init,
305 .add = amd_uncore_add,
306 .del = amd_uncore_del,
307 .start = amd_uncore_start,
308 .stop = amd_uncore_stop,
309 .read = amd_uncore_read,
312 static struct pmu amd_llc_pmu = {
313 .task_ctx_nr = perf_invalid_context,
314 .event_init = amd_uncore_event_init,
315 .add = amd_uncore_add,
316 .del = amd_uncore_del,
317 .start = amd_uncore_start,
318 .stop = amd_uncore_stop,
319 .read = amd_uncore_read,
322 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
324 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
325 cpu_to_node(cpu));
328 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
330 struct amd_uncore *uncore_nb = NULL, *uncore_llc;
332 if (amd_uncore_nb) {
333 uncore_nb = amd_uncore_alloc(cpu);
334 if (!uncore_nb)
335 goto fail;
336 uncore_nb->cpu = cpu;
337 uncore_nb->num_counters = num_counters_nb;
338 uncore_nb->rdpmc_base = RDPMC_BASE_NB;
339 uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
340 uncore_nb->active_mask = &amd_nb_active_mask;
341 uncore_nb->pmu = &amd_nb_pmu;
342 uncore_nb->id = -1;
343 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
346 if (amd_uncore_llc) {
347 uncore_llc = amd_uncore_alloc(cpu);
348 if (!uncore_llc)
349 goto fail;
350 uncore_llc->cpu = cpu;
351 uncore_llc->num_counters = num_counters_llc;
352 uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
353 uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
354 uncore_llc->active_mask = &amd_llc_active_mask;
355 uncore_llc->pmu = &amd_llc_pmu;
356 uncore_llc->id = -1;
357 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
360 return 0;
362 fail:
363 if (amd_uncore_nb)
364 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
365 kfree(uncore_nb);
366 return -ENOMEM;
369 static struct amd_uncore *
370 amd_uncore_find_online_sibling(struct amd_uncore *this,
371 struct amd_uncore * __percpu *uncores)
373 unsigned int cpu;
374 struct amd_uncore *that;
376 for_each_online_cpu(cpu) {
377 that = *per_cpu_ptr(uncores, cpu);
379 if (!that)
380 continue;
382 if (this == that)
383 continue;
385 if (this->id == that->id) {
386 hlist_add_head(&this->node, &uncore_unused_list);
387 this = that;
388 break;
392 this->refcnt++;
393 return this;
396 static int amd_uncore_cpu_starting(unsigned int cpu)
398 unsigned int eax, ebx, ecx, edx;
399 struct amd_uncore *uncore;
401 if (amd_uncore_nb) {
402 uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
403 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
404 uncore->id = ecx & 0xff;
406 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
407 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
410 if (amd_uncore_llc) {
411 uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
412 uncore->id = per_cpu(cpu_llc_id, cpu);
414 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
415 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
418 return 0;
421 static void uncore_clean_online(void)
423 struct amd_uncore *uncore;
424 struct hlist_node *n;
426 hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
427 hlist_del(&uncore->node);
428 kfree(uncore);
432 static void uncore_online(unsigned int cpu,
433 struct amd_uncore * __percpu *uncores)
435 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
437 uncore_clean_online();
439 if (cpu == uncore->cpu)
440 cpumask_set_cpu(cpu, uncore->active_mask);
443 static int amd_uncore_cpu_online(unsigned int cpu)
445 if (amd_uncore_nb)
446 uncore_online(cpu, amd_uncore_nb);
448 if (amd_uncore_llc)
449 uncore_online(cpu, amd_uncore_llc);
451 return 0;
454 static void uncore_down_prepare(unsigned int cpu,
455 struct amd_uncore * __percpu *uncores)
457 unsigned int i;
458 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
460 if (this->cpu != cpu)
461 return;
463 /* this cpu is going down, migrate to a shared sibling if possible */
464 for_each_online_cpu(i) {
465 struct amd_uncore *that = *per_cpu_ptr(uncores, i);
467 if (cpu == i)
468 continue;
470 if (this == that) {
471 perf_pmu_migrate_context(this->pmu, cpu, i);
472 cpumask_clear_cpu(cpu, that->active_mask);
473 cpumask_set_cpu(i, that->active_mask);
474 that->cpu = i;
475 break;
480 static int amd_uncore_cpu_down_prepare(unsigned int cpu)
482 if (amd_uncore_nb)
483 uncore_down_prepare(cpu, amd_uncore_nb);
485 if (amd_uncore_llc)
486 uncore_down_prepare(cpu, amd_uncore_llc);
488 return 0;
491 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
493 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
495 if (cpu == uncore->cpu)
496 cpumask_clear_cpu(cpu, uncore->active_mask);
498 if (!--uncore->refcnt)
499 kfree(uncore);
500 *per_cpu_ptr(uncores, cpu) = NULL;
503 static int amd_uncore_cpu_dead(unsigned int cpu)
505 if (amd_uncore_nb)
506 uncore_dead(cpu, amd_uncore_nb);
508 if (amd_uncore_llc)
509 uncore_dead(cpu, amd_uncore_llc);
511 return 0;
514 static int __init amd_uncore_init(void)
516 int ret = -ENODEV;
518 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
519 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
520 return -ENODEV;
522 if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
523 return -ENODEV;
525 if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
527 * For F17h or F18h, the Northbridge counters are
528 * repurposed as Data Fabric counters. Also, L3
529 * counters are supported too. The PMUs are exported
530 * based on family as either L2 or L3 and NB or DF.
532 num_counters_nb = NUM_COUNTERS_NB;
533 num_counters_llc = NUM_COUNTERS_L3;
534 amd_nb_pmu.name = "amd_df";
535 amd_llc_pmu.name = "amd_l3";
536 format_attr_event_df.show = &event_show_df;
537 format_attr_event_l3.show = &event_show_l3;
538 l3_mask = true;
539 } else {
540 num_counters_nb = NUM_COUNTERS_NB;
541 num_counters_llc = NUM_COUNTERS_L2;
542 amd_nb_pmu.name = "amd_nb";
543 amd_llc_pmu.name = "amd_l2";
544 format_attr_event_df = format_attr_event;
545 format_attr_event_l3 = format_attr_event;
546 l3_mask = false;
549 amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
550 amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3;
552 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
553 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
554 if (!amd_uncore_nb) {
555 ret = -ENOMEM;
556 goto fail_nb;
558 ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
559 if (ret)
560 goto fail_nb;
562 pr_info("%s NB counters detected\n",
563 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
564 "HYGON" : "AMD");
565 ret = 0;
568 if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
569 amd_uncore_llc = alloc_percpu(struct amd_uncore *);
570 if (!amd_uncore_llc) {
571 ret = -ENOMEM;
572 goto fail_llc;
574 ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
575 if (ret)
576 goto fail_llc;
578 pr_info("%s LLC counters detected\n",
579 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
580 "HYGON" : "AMD");
581 ret = 0;
585 * Install callbacks. Core will call them for each online cpu.
587 if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
588 "perf/x86/amd/uncore:prepare",
589 amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
590 goto fail_llc;
592 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
593 "perf/x86/amd/uncore:starting",
594 amd_uncore_cpu_starting, NULL))
595 goto fail_prep;
596 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
597 "perf/x86/amd/uncore:online",
598 amd_uncore_cpu_online,
599 amd_uncore_cpu_down_prepare))
600 goto fail_start;
601 return 0;
603 fail_start:
604 cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
605 fail_prep:
606 cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
607 fail_llc:
608 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
609 perf_pmu_unregister(&amd_nb_pmu);
610 if (amd_uncore_llc)
611 free_percpu(amd_uncore_llc);
612 fail_nb:
613 if (amd_uncore_nb)
614 free_percpu(amd_uncore_nb);
616 return ret;
618 device_initcall(amd_uncore_init);