dccp: do not assume DCCP code is non preemptible
[linux/fpc-iii.git] / arch / xtensa / kernel / perf_event.c
blob54f01188c29c1a4048ac166d9383d7f3a26905c8
1 /*
2 * Xtensa Performance Monitor Module driver
3 * See Tensilica Debug User's Guide for PMU registers documentation.
5 * Copyright (C) 2015 Cadence Design Systems Inc.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/interrupt.h>
13 #include <linux/irqdomain.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/perf_event.h>
17 #include <linux/platform_device.h>
19 #include <asm/processor.h>
20 #include <asm/stacktrace.h>
22 /* Global control/status for all perf counters */
23 #define XTENSA_PMU_PMG 0x1000
24 /* Perf counter values */
25 #define XTENSA_PMU_PM(i) (0x1080 + (i) * 4)
26 /* Perf counter control registers */
27 #define XTENSA_PMU_PMCTRL(i) (0x1100 + (i) * 4)
28 /* Perf counter status registers */
29 #define XTENSA_PMU_PMSTAT(i) (0x1180 + (i) * 4)
31 #define XTENSA_PMU_PMG_PMEN 0x1
33 #define XTENSA_PMU_COUNTER_MASK 0xffffffffULL
34 #define XTENSA_PMU_COUNTER_MAX 0x7fffffff
36 #define XTENSA_PMU_PMCTRL_INTEN 0x00000001
37 #define XTENSA_PMU_PMCTRL_KRNLCNT 0x00000008
38 #define XTENSA_PMU_PMCTRL_TRACELEVEL 0x000000f0
39 #define XTENSA_PMU_PMCTRL_SELECT_SHIFT 8
40 #define XTENSA_PMU_PMCTRL_SELECT 0x00001f00
41 #define XTENSA_PMU_PMCTRL_MASK_SHIFT 16
42 #define XTENSA_PMU_PMCTRL_MASK 0xffff0000
44 #define XTENSA_PMU_MASK(select, mask) \
45 (((select) << XTENSA_PMU_PMCTRL_SELECT_SHIFT) | \
46 ((mask) << XTENSA_PMU_PMCTRL_MASK_SHIFT) | \
47 XTENSA_PMU_PMCTRL_TRACELEVEL | \
48 XTENSA_PMU_PMCTRL_INTEN)
50 #define XTENSA_PMU_PMSTAT_OVFL 0x00000001
51 #define XTENSA_PMU_PMSTAT_INTASRT 0x00000010
53 struct xtensa_pmu_events {
54 /* Array of events currently on this core */
55 struct perf_event *event[XCHAL_NUM_PERF_COUNTERS];
56 /* Bitmap of used hardware counters */
57 unsigned long used_mask[BITS_TO_LONGS(XCHAL_NUM_PERF_COUNTERS)];
59 static DEFINE_PER_CPU(struct xtensa_pmu_events, xtensa_pmu_events);
61 static const u32 xtensa_hw_ctl[] = {
62 [PERF_COUNT_HW_CPU_CYCLES] = XTENSA_PMU_MASK(0, 0x1),
63 [PERF_COUNT_HW_INSTRUCTIONS] = XTENSA_PMU_MASK(2, 0xffff),
64 [PERF_COUNT_HW_CACHE_REFERENCES] = XTENSA_PMU_MASK(10, 0x1),
65 [PERF_COUNT_HW_CACHE_MISSES] = XTENSA_PMU_MASK(12, 0x1),
66 /* Taken and non-taken branches + taken loop ends */
67 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XTENSA_PMU_MASK(2, 0x490),
68 /* Instruction-related + other global stall cycles */
69 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XTENSA_PMU_MASK(4, 0x1ff),
70 /* Data-related global stall cycles */
71 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = XTENSA_PMU_MASK(3, 0x1ff),
74 #define C(_x) PERF_COUNT_HW_CACHE_##_x
76 static const u32 xtensa_cache_ctl[][C(OP_MAX)][C(RESULT_MAX)] = {
77 [C(L1D)] = {
78 [C(OP_READ)] = {
79 [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(10, 0x1),
80 [C(RESULT_MISS)] = XTENSA_PMU_MASK(10, 0x2),
82 [C(OP_WRITE)] = {
83 [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(11, 0x1),
84 [C(RESULT_MISS)] = XTENSA_PMU_MASK(11, 0x2),
87 [C(L1I)] = {
88 [C(OP_READ)] = {
89 [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(8, 0x1),
90 [C(RESULT_MISS)] = XTENSA_PMU_MASK(8, 0x2),
93 [C(DTLB)] = {
94 [C(OP_READ)] = {
95 [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(9, 0x1),
96 [C(RESULT_MISS)] = XTENSA_PMU_MASK(9, 0x8),
99 [C(ITLB)] = {
100 [C(OP_READ)] = {
101 [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(7, 0x1),
102 [C(RESULT_MISS)] = XTENSA_PMU_MASK(7, 0x8),
107 static int xtensa_pmu_cache_event(u64 config)
109 unsigned int cache_type, cache_op, cache_result;
110 int ret;
112 cache_type = (config >> 0) & 0xff;
113 cache_op = (config >> 8) & 0xff;
114 cache_result = (config >> 16) & 0xff;
116 if (cache_type >= ARRAY_SIZE(xtensa_cache_ctl) ||
117 cache_op >= C(OP_MAX) ||
118 cache_result >= C(RESULT_MAX))
119 return -EINVAL;
121 ret = xtensa_cache_ctl[cache_type][cache_op][cache_result];
123 if (ret == 0)
124 return -EINVAL;
126 return ret;
129 static inline uint32_t xtensa_pmu_read_counter(int idx)
131 return get_er(XTENSA_PMU_PM(idx));
134 static inline void xtensa_pmu_write_counter(int idx, uint32_t v)
136 set_er(v, XTENSA_PMU_PM(idx));
139 static void xtensa_perf_event_update(struct perf_event *event,
140 struct hw_perf_event *hwc, int idx)
142 uint64_t prev_raw_count, new_raw_count;
143 int64_t delta;
145 do {
146 prev_raw_count = local64_read(&hwc->prev_count);
147 new_raw_count = xtensa_pmu_read_counter(event->hw.idx);
148 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
149 new_raw_count) != prev_raw_count);
151 delta = (new_raw_count - prev_raw_count) & XTENSA_PMU_COUNTER_MASK;
153 local64_add(delta, &event->count);
154 local64_sub(delta, &hwc->period_left);
157 static bool xtensa_perf_event_set_period(struct perf_event *event,
158 struct hw_perf_event *hwc, int idx)
160 bool rc = false;
161 s64 left;
163 if (!is_sampling_event(event)) {
164 left = XTENSA_PMU_COUNTER_MAX;
165 } else {
166 s64 period = hwc->sample_period;
168 left = local64_read(&hwc->period_left);
169 if (left <= -period) {
170 left = period;
171 local64_set(&hwc->period_left, left);
172 hwc->last_period = period;
173 rc = true;
174 } else if (left <= 0) {
175 left += period;
176 local64_set(&hwc->period_left, left);
177 hwc->last_period = period;
178 rc = true;
180 if (left > XTENSA_PMU_COUNTER_MAX)
181 left = XTENSA_PMU_COUNTER_MAX;
184 local64_set(&hwc->prev_count, -left);
185 xtensa_pmu_write_counter(idx, -left);
186 perf_event_update_userpage(event);
188 return rc;
191 static void xtensa_pmu_enable(struct pmu *pmu)
193 set_er(get_er(XTENSA_PMU_PMG) | XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG);
196 static void xtensa_pmu_disable(struct pmu *pmu)
198 set_er(get_er(XTENSA_PMU_PMG) & ~XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG);
201 static int xtensa_pmu_event_init(struct perf_event *event)
203 int ret;
205 switch (event->attr.type) {
206 case PERF_TYPE_HARDWARE:
207 if (event->attr.config >= ARRAY_SIZE(xtensa_hw_ctl) ||
208 xtensa_hw_ctl[event->attr.config] == 0)
209 return -EINVAL;
210 event->hw.config = xtensa_hw_ctl[event->attr.config];
211 return 0;
213 case PERF_TYPE_HW_CACHE:
214 ret = xtensa_pmu_cache_event(event->attr.config);
215 if (ret < 0)
216 return ret;
217 event->hw.config = ret;
218 return 0;
220 case PERF_TYPE_RAW:
221 /* Not 'previous counter' select */
222 if ((event->attr.config & XTENSA_PMU_PMCTRL_SELECT) ==
223 (1 << XTENSA_PMU_PMCTRL_SELECT_SHIFT))
224 return -EINVAL;
225 event->hw.config = (event->attr.config &
226 (XTENSA_PMU_PMCTRL_KRNLCNT |
227 XTENSA_PMU_PMCTRL_TRACELEVEL |
228 XTENSA_PMU_PMCTRL_SELECT |
229 XTENSA_PMU_PMCTRL_MASK)) |
230 XTENSA_PMU_PMCTRL_INTEN;
231 return 0;
233 default:
234 return -ENOENT;
239 * Starts/Stops a counter present on the PMU. The PMI handler
240 * should stop the counter when perf_event_overflow() returns
241 * !0. ->start() will be used to continue.
243 static void xtensa_pmu_start(struct perf_event *event, int flags)
245 struct hw_perf_event *hwc = &event->hw;
246 int idx = hwc->idx;
248 if (WARN_ON_ONCE(idx == -1))
249 return;
251 if (flags & PERF_EF_RELOAD) {
252 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
253 xtensa_perf_event_set_period(event, hwc, idx);
256 hwc->state = 0;
258 set_er(hwc->config, XTENSA_PMU_PMCTRL(idx));
261 static void xtensa_pmu_stop(struct perf_event *event, int flags)
263 struct hw_perf_event *hwc = &event->hw;
264 int idx = hwc->idx;
266 if (!(hwc->state & PERF_HES_STOPPED)) {
267 set_er(0, XTENSA_PMU_PMCTRL(idx));
268 set_er(get_er(XTENSA_PMU_PMSTAT(idx)),
269 XTENSA_PMU_PMSTAT(idx));
270 hwc->state |= PERF_HES_STOPPED;
273 if ((flags & PERF_EF_UPDATE) &&
274 !(event->hw.state & PERF_HES_UPTODATE)) {
275 xtensa_perf_event_update(event, &event->hw, idx);
276 event->hw.state |= PERF_HES_UPTODATE;
281 * Adds/Removes a counter to/from the PMU, can be done inside
282 * a transaction, see the ->*_txn() methods.
284 static int xtensa_pmu_add(struct perf_event *event, int flags)
286 struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
287 struct hw_perf_event *hwc = &event->hw;
288 int idx = hwc->idx;
290 if (__test_and_set_bit(idx, ev->used_mask)) {
291 idx = find_first_zero_bit(ev->used_mask,
292 XCHAL_NUM_PERF_COUNTERS);
293 if (idx == XCHAL_NUM_PERF_COUNTERS)
294 return -EAGAIN;
296 __set_bit(idx, ev->used_mask);
297 hwc->idx = idx;
299 ev->event[idx] = event;
301 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
303 if (flags & PERF_EF_START)
304 xtensa_pmu_start(event, PERF_EF_RELOAD);
306 perf_event_update_userpage(event);
307 return 0;
310 static void xtensa_pmu_del(struct perf_event *event, int flags)
312 struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
314 xtensa_pmu_stop(event, PERF_EF_UPDATE);
315 __clear_bit(event->hw.idx, ev->used_mask);
316 perf_event_update_userpage(event);
319 static void xtensa_pmu_read(struct perf_event *event)
321 xtensa_perf_event_update(event, &event->hw, event->hw.idx);
324 static int callchain_trace(struct stackframe *frame, void *data)
326 struct perf_callchain_entry *entry = data;
328 perf_callchain_store(entry, frame->pc);
329 return 0;
332 void perf_callchain_kernel(struct perf_callchain_entry *entry,
333 struct pt_regs *regs)
335 xtensa_backtrace_kernel(regs, PERF_MAX_STACK_DEPTH,
336 callchain_trace, NULL, entry);
339 void perf_callchain_user(struct perf_callchain_entry *entry,
340 struct pt_regs *regs)
342 xtensa_backtrace_user(regs, PERF_MAX_STACK_DEPTH,
343 callchain_trace, entry);
346 void perf_event_print_debug(void)
348 unsigned long flags;
349 unsigned i;
351 local_irq_save(flags);
352 pr_info("CPU#%d: PMG: 0x%08lx\n", smp_processor_id(),
353 get_er(XTENSA_PMU_PMG));
354 for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i)
355 pr_info("PM%d: 0x%08lx, PMCTRL%d: 0x%08lx, PMSTAT%d: 0x%08lx\n",
356 i, get_er(XTENSA_PMU_PM(i)),
357 i, get_er(XTENSA_PMU_PMCTRL(i)),
358 i, get_er(XTENSA_PMU_PMSTAT(i)));
359 local_irq_restore(flags);
362 irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id)
364 irqreturn_t rc = IRQ_NONE;
365 struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
366 unsigned i;
368 for (i = find_first_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS);
369 i < XCHAL_NUM_PERF_COUNTERS;
370 i = find_next_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS, i + 1)) {
371 uint32_t v = get_er(XTENSA_PMU_PMSTAT(i));
372 struct perf_event *event = ev->event[i];
373 struct hw_perf_event *hwc = &event->hw;
374 u64 last_period;
376 if (!(v & XTENSA_PMU_PMSTAT_OVFL))
377 continue;
379 set_er(v, XTENSA_PMU_PMSTAT(i));
380 xtensa_perf_event_update(event, hwc, i);
381 last_period = hwc->last_period;
382 if (xtensa_perf_event_set_period(event, hwc, i)) {
383 struct perf_sample_data data;
384 struct pt_regs *regs = get_irq_regs();
386 perf_sample_data_init(&data, 0, last_period);
387 if (perf_event_overflow(event, &data, regs))
388 xtensa_pmu_stop(event, 0);
391 rc = IRQ_HANDLED;
393 return rc;
396 static struct pmu xtensa_pmu = {
397 .pmu_enable = xtensa_pmu_enable,
398 .pmu_disable = xtensa_pmu_disable,
399 .event_init = xtensa_pmu_event_init,
400 .add = xtensa_pmu_add,
401 .del = xtensa_pmu_del,
402 .start = xtensa_pmu_start,
403 .stop = xtensa_pmu_stop,
404 .read = xtensa_pmu_read,
407 static void xtensa_pmu_setup(void)
409 unsigned i;
411 set_er(0, XTENSA_PMU_PMG);
412 for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i) {
413 set_er(0, XTENSA_PMU_PMCTRL(i));
414 set_er(get_er(XTENSA_PMU_PMSTAT(i)), XTENSA_PMU_PMSTAT(i));
418 static int xtensa_pmu_notifier(struct notifier_block *self,
419 unsigned long action, void *data)
421 switch (action & ~CPU_TASKS_FROZEN) {
422 case CPU_STARTING:
423 xtensa_pmu_setup();
424 break;
426 default:
427 break;
430 return NOTIFY_OK;
433 static int __init xtensa_pmu_init(void)
435 int ret;
436 int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT);
438 perf_cpu_notifier(xtensa_pmu_notifier);
439 #if XTENSA_FAKE_NMI
440 enable_irq(irq);
441 #else
442 ret = request_irq(irq, xtensa_pmu_irq_handler, IRQF_PERCPU,
443 "pmu", NULL);
444 if (ret < 0)
445 return ret;
446 #endif
448 ret = perf_pmu_register(&xtensa_pmu, "cpu", PERF_TYPE_RAW);
449 if (ret)
450 free_irq(irq, NULL);
452 return ret;
454 early_initcall(xtensa_pmu_init);