1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
4 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
5 * Copyright (C) 2009 Jaswinder Singh Rajput
6 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
7 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
8 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
9 * Copyright (C) 2009 Google, Inc., Stephane Eranian
10 * Copyright 2014 Tilera Corporation. All Rights Reserved.
11 * Copyright (C) 2018 Andes Technology Corporation
13 * Perf_events support for RISC-V platforms.
15 * Since the spec. (as of now, Priv-Spec 1.10) does not provide enough
16 * functionality for perf event to fully work, this file provides
17 * the very basic framework only.
19 * For platform portings, please check Documentations/riscv/pmu.txt.
21 * The Copyright line includes x86 and tile ones.
24 #include <linux/kprobes.h>
25 #include <linux/kernel.h>
26 #include <linux/kdebug.h>
27 #include <linux/mutex.h>
28 #include <linux/bitmap.h>
29 #include <linux/irq.h>
30 #include <linux/perf_event.h>
31 #include <linux/atomic.h>
33 #include <asm/perf_event.h>
35 static const struct riscv_pmu
*riscv_pmu __read_mostly
;
36 static DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
);
39 * Hardware & cache maps and their methods
42 static const int riscv_hw_event_map
[] = {
43 [PERF_COUNT_HW_CPU_CYCLES
] = RISCV_PMU_CYCLE
,
44 [PERF_COUNT_HW_INSTRUCTIONS
] = RISCV_PMU_INSTRET
,
45 [PERF_COUNT_HW_CACHE_REFERENCES
] = RISCV_OP_UNSUPP
,
46 [PERF_COUNT_HW_CACHE_MISSES
] = RISCV_OP_UNSUPP
,
47 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = RISCV_OP_UNSUPP
,
48 [PERF_COUNT_HW_BRANCH_MISSES
] = RISCV_OP_UNSUPP
,
49 [PERF_COUNT_HW_BUS_CYCLES
] = RISCV_OP_UNSUPP
,
52 #define C(x) PERF_COUNT_HW_CACHE_##x
53 static const int riscv_cache_event_map
[PERF_COUNT_HW_CACHE_MAX
]
54 [PERF_COUNT_HW_CACHE_OP_MAX
]
55 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
58 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
59 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
62 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
63 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
66 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
67 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
72 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
73 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
76 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
77 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
80 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
81 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
86 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
87 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
90 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
91 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
94 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
95 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
100 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
101 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
104 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
105 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
108 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
109 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
114 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
115 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
118 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
119 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
122 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
123 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
128 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
129 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
132 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
133 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
136 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
137 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
142 static int riscv_map_hw_event(u64 config
)
144 if (config
>= riscv_pmu
->max_events
)
147 return riscv_pmu
->hw_events
[config
];
150 static int riscv_map_cache_decode(u64 config
, unsigned int *type
,
151 unsigned int *op
, unsigned int *result
)
156 static int riscv_map_cache_event(u64 config
)
158 unsigned int type
, op
, result
;
162 err
= riscv_map_cache_decode(config
, &type
, &op
, &result
);
163 if (!riscv_pmu
->cache_events
|| err
)
166 if (type
>= PERF_COUNT_HW_CACHE_MAX
||
167 op
>= PERF_COUNT_HW_CACHE_OP_MAX
||
168 result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
171 code
= (*riscv_pmu
->cache_events
)[type
][op
][result
];
172 if (code
== RISCV_OP_UNSUPP
)
179 * Low-level functions: reading/writing counters
182 static inline u64
read_counter(int idx
)
187 case RISCV_PMU_CYCLE
:
188 val
= csr_read(CSR_CYCLE
);
190 case RISCV_PMU_INSTRET
:
191 val
= csr_read(CSR_INSTRET
);
194 WARN_ON_ONCE(idx
< 0 || idx
> RISCV_MAX_COUNTERS
);
201 static inline void write_counter(int idx
, u64 value
)
203 /* currently not supported */
208 * pmu->read: read and update the counter
210 * Other architectures' implementation often have a xxx_perf_event_update
211 * routine, which can return counter values when called in the IRQ, but
212 * return void when being called by the pmu->read method.
214 static void riscv_pmu_read(struct perf_event
*event
)
216 struct hw_perf_event
*hwc
= &event
->hw
;
217 u64 prev_raw_count
, new_raw_count
;
223 prev_raw_count
= local64_read(&hwc
->prev_count
);
224 new_raw_count
= read_counter(idx
);
226 oldval
= local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
228 } while (oldval
!= prev_raw_count
);
231 * delta is the value to update the counter we maintain in the kernel.
233 delta
= (new_raw_count
- prev_raw_count
) &
234 ((1ULL << riscv_pmu
->counter_width
) - 1);
235 local64_add(delta
, &event
->count
);
237 * Something like local64_sub(delta, &hwc->period_left) here is
238 * needed if there is an interrupt for perf.
243 * State transition functions:
245 * stop()/start() & add()/del()
249 * pmu->stop: stop the counter
251 static void riscv_pmu_stop(struct perf_event
*event
, int flags
)
253 struct hw_perf_event
*hwc
= &event
->hw
;
255 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
256 hwc
->state
|= PERF_HES_STOPPED
;
258 if ((flags
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
259 riscv_pmu
->pmu
->read(event
);
260 hwc
->state
|= PERF_HES_UPTODATE
;
265 * pmu->start: start the event.
267 static void riscv_pmu_start(struct perf_event
*event
, int flags
)
269 struct hw_perf_event
*hwc
= &event
->hw
;
271 if (WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_STOPPED
)))
274 if (flags
& PERF_EF_RELOAD
) {
275 WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_UPTODATE
));
278 * Set the counter to the period to the next interrupt here,
284 perf_event_update_userpage(event
);
287 * Since we cannot write to counters, this serves as an initialization
288 * to the delta-mechanism in pmu->read(); otherwise, the delta would be
289 * wrong when pmu->read is called for the first time.
291 local64_set(&hwc
->prev_count
, read_counter(hwc
->idx
));
295 * pmu->add: add the event to PMU.
297 static int riscv_pmu_add(struct perf_event
*event
, int flags
)
299 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
300 struct hw_perf_event
*hwc
= &event
->hw
;
302 if (cpuc
->n_events
== riscv_pmu
->num_counters
)
306 * We don't have general conunters, so no binding-event-to-counter
309 * Indexing using hwc->config generally not works, since config may
310 * contain extra information, but here the only info we have in
311 * hwc->config is the event index.
313 hwc
->idx
= hwc
->config
;
314 cpuc
->events
[hwc
->idx
] = event
;
317 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
319 if (flags
& PERF_EF_START
)
320 riscv_pmu
->pmu
->start(event
, PERF_EF_RELOAD
);
326 * pmu->del: delete the event from PMU.
328 static void riscv_pmu_del(struct perf_event
*event
, int flags
)
330 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
331 struct hw_perf_event
*hwc
= &event
->hw
;
333 cpuc
->events
[hwc
->idx
] = NULL
;
335 riscv_pmu
->pmu
->stop(event
, PERF_EF_UPDATE
);
336 perf_event_update_userpage(event
);
340 * Interrupt: a skeletion for reference.
343 static DEFINE_MUTEX(pmc_reserve_mutex
);
345 static irqreturn_t
riscv_base_pmu_handle_irq(int irq_num
, void *dev
)
350 static int reserve_pmc_hardware(void)
354 mutex_lock(&pmc_reserve_mutex
);
355 if (riscv_pmu
->irq
>= 0 && riscv_pmu
->handle_irq
) {
356 err
= request_irq(riscv_pmu
->irq
, riscv_pmu
->handle_irq
,
357 IRQF_PERCPU
, "riscv-base-perf", NULL
);
359 mutex_unlock(&pmc_reserve_mutex
);
364 static void release_pmc_hardware(void)
366 mutex_lock(&pmc_reserve_mutex
);
367 if (riscv_pmu
->irq
>= 0)
368 free_irq(riscv_pmu
->irq
, NULL
);
369 mutex_unlock(&pmc_reserve_mutex
);
373 * Event Initialization/Finalization
376 static atomic_t riscv_active_events
= ATOMIC_INIT(0);
378 static void riscv_event_destroy(struct perf_event
*event
)
380 if (atomic_dec_return(&riscv_active_events
) == 0)
381 release_pmc_hardware();
384 static int riscv_event_init(struct perf_event
*event
)
386 struct perf_event_attr
*attr
= &event
->attr
;
387 struct hw_perf_event
*hwc
= &event
->hw
;
391 if (atomic_inc_return(&riscv_active_events
) == 1) {
392 err
= reserve_pmc_hardware();
395 pr_warn("PMC hardware not available\n");
396 atomic_dec(&riscv_active_events
);
401 switch (event
->attr
.type
) {
402 case PERF_TYPE_HARDWARE
:
403 code
= riscv_pmu
->map_hw_event(attr
->config
);
405 case PERF_TYPE_HW_CACHE
:
406 code
= riscv_pmu
->map_cache_event(attr
->config
);
414 event
->destroy
= riscv_event_destroy
;
416 event
->destroy(event
);
421 * idx is set to -1 because the index of a general event should not be
422 * decided until binding to some counter in pmu->add().
424 * But since we don't have such support, later in pmu->add(), we just
425 * use hwc->config as the index instead.
437 static struct pmu min_pmu
= {
438 .name
= "riscv-base",
439 .event_init
= riscv_event_init
,
440 .add
= riscv_pmu_add
,
441 .del
= riscv_pmu_del
,
442 .start
= riscv_pmu_start
,
443 .stop
= riscv_pmu_stop
,
444 .read
= riscv_pmu_read
,
447 static const struct riscv_pmu riscv_base_pmu
= {
449 .max_events
= ARRAY_SIZE(riscv_hw_event_map
),
450 .map_hw_event
= riscv_map_hw_event
,
451 .hw_events
= riscv_hw_event_map
,
452 .map_cache_event
= riscv_map_cache_event
,
453 .cache_events
= &riscv_cache_event_map
,
455 .num_counters
= RISCV_BASE_COUNTERS
+ 0,
456 .handle_irq
= &riscv_base_pmu_handle_irq
,
458 /* This means this PMU has no IRQ. */
462 static const struct of_device_id riscv_pmu_of_ids
[] = {
463 {.compatible
= "riscv,base-pmu", .data
= &riscv_base_pmu
},
464 { /* sentinel value */ }
467 static int __init
init_hw_perf_events(void)
469 struct device_node
*node
= of_find_node_by_type(NULL
, "pmu");
470 const struct of_device_id
*of_id
;
472 riscv_pmu
= &riscv_base_pmu
;
475 of_id
= of_match_node(riscv_pmu_of_ids
, node
);
478 riscv_pmu
= of_id
->data
;
482 perf_pmu_register(riscv_pmu
->pmu
, "cpu", PERF_TYPE_RAW
);
485 arch_initcall(init_hw_perf_events
);