1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
4 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
5 * Copyright (C) 2009 Jaswinder Singh Rajput
6 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
7 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
8 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
9 * Copyright (C) 2009 Google, Inc., Stephane Eranian
10 * Copyright 2014 Tilera Corporation. All Rights Reserved.
11 * Copyright (C) 2018 Andes Technology Corporation
13 * Perf_events support for RISC-V platforms.
15 * Since the spec. (as of now, Priv-Spec 1.10) does not provide enough
16 * functionality for perf event to fully work, this file provides
17 * the very basic framework only.
19 * For platform portings, please check Documentations/riscv/pmu.txt.
21 * The Copyright line includes x86 and tile ones.
24 #include <linux/kprobes.h>
25 #include <linux/kernel.h>
26 #include <linux/kdebug.h>
27 #include <linux/mutex.h>
28 #include <linux/bitmap.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/perf_event.h>
32 #include <linux/atomic.h>
34 #include <asm/perf_event.h>
36 static const struct riscv_pmu
*riscv_pmu __read_mostly
;
37 static DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
);
40 * Hardware & cache maps and their methods
43 static const int riscv_hw_event_map
[] = {
44 [PERF_COUNT_HW_CPU_CYCLES
] = RISCV_PMU_CYCLE
,
45 [PERF_COUNT_HW_INSTRUCTIONS
] = RISCV_PMU_INSTRET
,
46 [PERF_COUNT_HW_CACHE_REFERENCES
] = RISCV_OP_UNSUPP
,
47 [PERF_COUNT_HW_CACHE_MISSES
] = RISCV_OP_UNSUPP
,
48 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = RISCV_OP_UNSUPP
,
49 [PERF_COUNT_HW_BRANCH_MISSES
] = RISCV_OP_UNSUPP
,
50 [PERF_COUNT_HW_BUS_CYCLES
] = RISCV_OP_UNSUPP
,
53 #define C(x) PERF_COUNT_HW_CACHE_##x
54 static const int riscv_cache_event_map
[PERF_COUNT_HW_CACHE_MAX
]
55 [PERF_COUNT_HW_CACHE_OP_MAX
]
56 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
59 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
60 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
63 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
64 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
67 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
68 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
73 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
74 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
77 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
78 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
81 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
82 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
87 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
88 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
91 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
92 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
95 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
96 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
101 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
102 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
105 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
106 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
109 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
110 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
115 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
116 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
119 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
120 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
123 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
124 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
129 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
130 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
133 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
134 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
137 [C(RESULT_ACCESS
)] = RISCV_OP_UNSUPP
,
138 [C(RESULT_MISS
)] = RISCV_OP_UNSUPP
,
143 static int riscv_map_hw_event(u64 config
)
145 if (config
>= riscv_pmu
->max_events
)
148 return riscv_pmu
->hw_events
[config
];
151 int riscv_map_cache_decode(u64 config
, unsigned int *type
,
152 unsigned int *op
, unsigned int *result
)
157 static int riscv_map_cache_event(u64 config
)
159 unsigned int type
, op
, result
;
163 err
= riscv_map_cache_decode(config
, &type
, &op
, &result
);
164 if (!riscv_pmu
->cache_events
|| err
)
167 if (type
>= PERF_COUNT_HW_CACHE_MAX
||
168 op
>= PERF_COUNT_HW_CACHE_OP_MAX
||
169 result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
172 code
= (*riscv_pmu
->cache_events
)[type
][op
][result
];
173 if (code
== RISCV_OP_UNSUPP
)
180 * Low-level functions: reading/writing counters
183 static inline u64
read_counter(int idx
)
188 case RISCV_PMU_CYCLE
:
189 val
= csr_read(cycle
);
191 case RISCV_PMU_INSTRET
:
192 val
= csr_read(instret
);
195 WARN_ON_ONCE(idx
< 0 || idx
> RISCV_MAX_COUNTERS
);
202 static inline void write_counter(int idx
, u64 value
)
204 /* currently not supported */
209 * pmu->read: read and update the counter
211 * Other architectures' implementation often have a xxx_perf_event_update
212 * routine, which can return counter values when called in the IRQ, but
213 * return void when being called by the pmu->read method.
215 static void riscv_pmu_read(struct perf_event
*event
)
217 struct hw_perf_event
*hwc
= &event
->hw
;
218 u64 prev_raw_count
, new_raw_count
;
224 prev_raw_count
= local64_read(&hwc
->prev_count
);
225 new_raw_count
= read_counter(idx
);
227 oldval
= local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
229 } while (oldval
!= prev_raw_count
);
232 * delta is the value to update the counter we maintain in the kernel.
234 delta
= (new_raw_count
- prev_raw_count
) &
235 ((1ULL << riscv_pmu
->counter_width
) - 1);
236 local64_add(delta
, &event
->count
);
238 * Something like local64_sub(delta, &hwc->period_left) here is
239 * needed if there is an interrupt for perf.
244 * State transition functions:
246 * stop()/start() & add()/del()
250 * pmu->stop: stop the counter
252 static void riscv_pmu_stop(struct perf_event
*event
, int flags
)
254 struct hw_perf_event
*hwc
= &event
->hw
;
256 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
257 hwc
->state
|= PERF_HES_STOPPED
;
259 if ((flags
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
260 riscv_pmu
->pmu
->read(event
);
261 hwc
->state
|= PERF_HES_UPTODATE
;
266 * pmu->start: start the event.
268 static void riscv_pmu_start(struct perf_event
*event
, int flags
)
270 struct hw_perf_event
*hwc
= &event
->hw
;
272 if (WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_STOPPED
)))
275 if (flags
& PERF_EF_RELOAD
) {
276 WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_UPTODATE
));
279 * Set the counter to the period to the next interrupt here,
285 perf_event_update_userpage(event
);
288 * Since we cannot write to counters, this serves as an initialization
289 * to the delta-mechanism in pmu->read(); otherwise, the delta would be
290 * wrong when pmu->read is called for the first time.
292 local64_set(&hwc
->prev_count
, read_counter(hwc
->idx
));
296 * pmu->add: add the event to PMU.
298 static int riscv_pmu_add(struct perf_event
*event
, int flags
)
300 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
301 struct hw_perf_event
*hwc
= &event
->hw
;
303 if (cpuc
->n_events
== riscv_pmu
->num_counters
)
307 * We don't have general conunters, so no binding-event-to-counter
310 * Indexing using hwc->config generally not works, since config may
311 * contain extra information, but here the only info we have in
312 * hwc->config is the event index.
314 hwc
->idx
= hwc
->config
;
315 cpuc
->events
[hwc
->idx
] = event
;
318 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
320 if (flags
& PERF_EF_START
)
321 riscv_pmu
->pmu
->start(event
, PERF_EF_RELOAD
);
327 * pmu->del: delete the event from PMU.
329 static void riscv_pmu_del(struct perf_event
*event
, int flags
)
331 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
332 struct hw_perf_event
*hwc
= &event
->hw
;
334 cpuc
->events
[hwc
->idx
] = NULL
;
336 riscv_pmu
->pmu
->stop(event
, PERF_EF_UPDATE
);
337 perf_event_update_userpage(event
);
341 * Interrupt: a skeletion for reference.
344 static DEFINE_MUTEX(pmc_reserve_mutex
);
346 irqreturn_t
riscv_base_pmu_handle_irq(int irq_num
, void *dev
)
351 static int reserve_pmc_hardware(void)
355 mutex_lock(&pmc_reserve_mutex
);
356 if (riscv_pmu
->irq
>= 0 && riscv_pmu
->handle_irq
) {
357 err
= request_irq(riscv_pmu
->irq
, riscv_pmu
->handle_irq
,
358 IRQF_PERCPU
, "riscv-base-perf", NULL
);
360 mutex_unlock(&pmc_reserve_mutex
);
365 void release_pmc_hardware(void)
367 mutex_lock(&pmc_reserve_mutex
);
368 if (riscv_pmu
->irq
>= 0)
369 free_irq(riscv_pmu
->irq
, NULL
);
370 mutex_unlock(&pmc_reserve_mutex
);
374 * Event Initialization/Finalization
377 static atomic_t riscv_active_events
= ATOMIC_INIT(0);
379 static void riscv_event_destroy(struct perf_event
*event
)
381 if (atomic_dec_return(&riscv_active_events
) == 0)
382 release_pmc_hardware();
385 static int riscv_event_init(struct perf_event
*event
)
387 struct perf_event_attr
*attr
= &event
->attr
;
388 struct hw_perf_event
*hwc
= &event
->hw
;
392 if (atomic_inc_return(&riscv_active_events
) == 1) {
393 err
= reserve_pmc_hardware();
396 pr_warn("PMC hardware not available\n");
397 atomic_dec(&riscv_active_events
);
402 switch (event
->attr
.type
) {
403 case PERF_TYPE_HARDWARE
:
404 code
= riscv_pmu
->map_hw_event(attr
->config
);
406 case PERF_TYPE_HW_CACHE
:
407 code
= riscv_pmu
->map_cache_event(attr
->config
);
415 event
->destroy
= riscv_event_destroy
;
417 event
->destroy(event
);
422 * idx is set to -1 because the index of a general event should not be
423 * decided until binding to some counter in pmu->add().
425 * But since we don't have such support, later in pmu->add(), we just
426 * use hwc->config as the index instead.
438 static struct pmu min_pmu
= {
439 .name
= "riscv-base",
440 .event_init
= riscv_event_init
,
441 .add
= riscv_pmu_add
,
442 .del
= riscv_pmu_del
,
443 .start
= riscv_pmu_start
,
444 .stop
= riscv_pmu_stop
,
445 .read
= riscv_pmu_read
,
448 static const struct riscv_pmu riscv_base_pmu
= {
450 .max_events
= ARRAY_SIZE(riscv_hw_event_map
),
451 .map_hw_event
= riscv_map_hw_event
,
452 .hw_events
= riscv_hw_event_map
,
453 .map_cache_event
= riscv_map_cache_event
,
454 .cache_events
= &riscv_cache_event_map
,
456 .num_counters
= RISCV_BASE_COUNTERS
+ 0,
457 .handle_irq
= &riscv_base_pmu_handle_irq
,
459 /* This means this PMU has no IRQ. */
463 static const struct of_device_id riscv_pmu_of_ids
[] = {
464 {.compatible
= "riscv,base-pmu", .data
= &riscv_base_pmu
},
465 { /* sentinel value */ }
468 int __init
init_hw_perf_events(void)
470 struct device_node
*node
= of_find_node_by_type(NULL
, "pmu");
471 const struct of_device_id
*of_id
;
473 riscv_pmu
= &riscv_base_pmu
;
476 of_id
= of_match_node(riscv_pmu_of_ids
, node
);
479 riscv_pmu
= of_id
->data
;
482 perf_pmu_register(riscv_pmu
->pmu
, "cpu", PERF_TYPE_RAW
);
485 arch_initcall(init_hw_perf_events
);