2 * Linux performance counter support for ARC700 series
4 * Copyright (C) 2013-2015 Synopsys, Inc. (www.synopsys.com)
6 * This code is inspired by the perf support of various other architectures.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/errno.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
17 #include <linux/perf_event.h>
18 #include <linux/platform_device.h>
19 #include <asm/arcregs.h>
20 #include <asm/stacktrace.h>
27 int ev_hw_idx
[PERF_COUNT_ARC_HW_MAX
];
32 * A 1 bit for an index indicates that the counter is being used for
33 * an event. A 0 means that the counter can be used.
35 unsigned long used_mask
[BITS_TO_LONGS(ARC_PERF_MAX_COUNTERS
)];
38 * The events that are active on the PMU for the given index.
40 struct perf_event
*act_counter
[ARC_PERF_MAX_COUNTERS
];
43 struct arc_callchain_trace
{
48 static int callchain_trace(unsigned int addr
, void *data
)
50 struct arc_callchain_trace
*ctrl
= data
;
51 struct perf_callchain_entry_ctx
*entry
= ctrl
->perf_stuff
;
52 perf_callchain_store(entry
, addr
);
54 if (ctrl
->depth
++ < 3)
61 perf_callchain_kernel(struct perf_callchain_entry_ctx
*entry
, struct pt_regs
*regs
)
63 struct arc_callchain_trace ctrl
= {
68 arc_unwind_core(NULL
, regs
, callchain_trace
, &ctrl
);
72 perf_callchain_user(struct perf_callchain_entry_ctx
*entry
, struct pt_regs
*regs
)
75 * User stack can't be unwound trivially with kernel dwarf unwinder
76 * So for now just record the user PC
78 perf_callchain_store(entry
, instruction_pointer(regs
));
81 static struct arc_pmu
*arc_pmu
;
82 static DEFINE_PER_CPU(struct arc_pmu_cpu
, arc_pmu_cpu
);
84 /* read counter #idx; note that counter# != event# on ARC! */
85 static uint64_t arc_pmu_read_counter(int idx
)
91 * ARC supports making 'snapshots' of the counters, so we don't
92 * need to care about counters wrapping to 0 underneath our feet
94 write_aux_reg(ARC_REG_PCT_INDEX
, idx
);
95 tmp
= read_aux_reg(ARC_REG_PCT_CONTROL
);
96 write_aux_reg(ARC_REG_PCT_CONTROL
, tmp
| ARC_REG_PCT_CONTROL_SN
);
97 result
= (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH
)) << 32;
98 result
|= read_aux_reg(ARC_REG_PCT_SNAPL
);
103 static void arc_perf_event_update(struct perf_event
*event
,
104 struct hw_perf_event
*hwc
, int idx
)
106 uint64_t prev_raw_count
= local64_read(&hwc
->prev_count
);
107 uint64_t new_raw_count
= arc_pmu_read_counter(idx
);
108 int64_t delta
= new_raw_count
- prev_raw_count
;
111 * We aren't afraid of hwc->prev_count changing beneath our feet
112 * because there's no way for us to re-enter this function anytime.
114 local64_set(&hwc
->prev_count
, new_raw_count
);
115 local64_add(delta
, &event
->count
);
116 local64_sub(delta
, &hwc
->period_left
);
119 static void arc_pmu_read(struct perf_event
*event
)
121 arc_perf_event_update(event
, &event
->hw
, event
->hw
.idx
);
124 static int arc_pmu_cache_event(u64 config
)
126 unsigned int cache_type
, cache_op
, cache_result
;
129 cache_type
= (config
>> 0) & 0xff;
130 cache_op
= (config
>> 8) & 0xff;
131 cache_result
= (config
>> 16) & 0xff;
132 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
134 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
136 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
139 ret
= arc_pmu_cache_map
[cache_type
][cache_op
][cache_result
];
141 if (ret
== CACHE_OP_UNSUPPORTED
)
144 pr_debug("init cache event: type/op/result %d/%d/%d with h/w %d \'%s\'\n",
145 cache_type
, cache_op
, cache_result
, ret
,
146 arc_pmu_ev_hw_map
[ret
]);
151 /* initializes hw_perf_event structure if event is supported */
152 static int arc_pmu_event_init(struct perf_event
*event
)
154 struct hw_perf_event
*hwc
= &event
->hw
;
157 if (!is_sampling_event(event
)) {
158 hwc
->sample_period
= arc_pmu
->max_period
;
159 hwc
->last_period
= hwc
->sample_period
;
160 local64_set(&hwc
->period_left
, hwc
->sample_period
);
165 if (is_isa_arcv2()) {
166 /* "exclude user" means "count only kernel" */
167 if (event
->attr
.exclude_user
)
168 hwc
->config
|= ARC_REG_PCT_CONFIG_KERN
;
170 /* "exclude kernel" means "count only user" */
171 if (event
->attr
.exclude_kernel
)
172 hwc
->config
|= ARC_REG_PCT_CONFIG_USER
;
175 switch (event
->attr
.type
) {
176 case PERF_TYPE_HARDWARE
:
177 if (event
->attr
.config
>= PERF_COUNT_HW_MAX
)
179 if (arc_pmu
->ev_hw_idx
[event
->attr
.config
] < 0)
181 hwc
->config
|= arc_pmu
->ev_hw_idx
[event
->attr
.config
];
182 pr_debug("init event %d with h/w %08x \'%s\'\n",
183 (int)event
->attr
.config
, (int)hwc
->config
,
184 arc_pmu_ev_hw_map
[event
->attr
.config
]);
187 case PERF_TYPE_HW_CACHE
:
188 ret
= arc_pmu_cache_event(event
->attr
.config
);
191 hwc
->config
|= arc_pmu
->ev_hw_idx
[ret
];
192 pr_debug("init cache event with h/w %08x \'%s\'\n",
193 (int)hwc
->config
, arc_pmu_ev_hw_map
[ret
]);
200 /* starts all counters */
201 static void arc_pmu_enable(struct pmu
*pmu
)
204 tmp
= read_aux_reg(ARC_REG_PCT_CONTROL
);
205 write_aux_reg(ARC_REG_PCT_CONTROL
, (tmp
& 0xffff0000) | 0x1);
208 /* stops all counters */
209 static void arc_pmu_disable(struct pmu
*pmu
)
212 tmp
= read_aux_reg(ARC_REG_PCT_CONTROL
);
213 write_aux_reg(ARC_REG_PCT_CONTROL
, (tmp
& 0xffff0000) | 0x0);
216 static int arc_pmu_event_set_period(struct perf_event
*event
)
218 struct hw_perf_event
*hwc
= &event
->hw
;
219 s64 left
= local64_read(&hwc
->period_left
);
220 s64 period
= hwc
->sample_period
;
225 if (unlikely(left
<= -period
)) {
226 /* left underflowed by more than period. */
228 local64_set(&hwc
->period_left
, left
);
229 hwc
->last_period
= period
;
231 } else if (unlikely(left
<= 0)) {
232 /* left underflowed by less than period. */
234 local64_set(&hwc
->period_left
, left
);
235 hwc
->last_period
= period
;
239 if (left
> arc_pmu
->max_period
)
240 left
= arc_pmu
->max_period
;
242 value
= arc_pmu
->max_period
- left
;
243 local64_set(&hwc
->prev_count
, value
);
246 write_aux_reg(ARC_REG_PCT_INDEX
, idx
);
249 write_aux_reg(ARC_REG_PCT_COUNTL
, (u32
)value
);
250 write_aux_reg(ARC_REG_PCT_COUNTH
, (value
>> 32));
252 perf_event_update_userpage(event
);
258 * Assigns hardware counter to hardware condition.
259 * Note that there is no separate start/stop mechanism;
260 * stopping is achieved by assigning the 'never' condition
262 static void arc_pmu_start(struct perf_event
*event
, int flags
)
264 struct hw_perf_event
*hwc
= &event
->hw
;
267 if (WARN_ON_ONCE(idx
== -1))
270 if (flags
& PERF_EF_RELOAD
)
271 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
275 arc_pmu_event_set_period(event
);
277 /* Enable interrupt for this counter */
278 if (is_sampling_event(event
))
279 write_aux_reg(ARC_REG_PCT_INT_CTRL
,
280 read_aux_reg(ARC_REG_PCT_INT_CTRL
) | (1 << idx
));
282 /* enable ARC pmu here */
283 write_aux_reg(ARC_REG_PCT_INDEX
, idx
); /* counter # */
284 write_aux_reg(ARC_REG_PCT_CONFIG
, hwc
->config
); /* condition */
287 static void arc_pmu_stop(struct perf_event
*event
, int flags
)
289 struct hw_perf_event
*hwc
= &event
->hw
;
292 /* Disable interrupt for this counter */
293 if (is_sampling_event(event
)) {
295 * Reset interrupt flag by writing of 1. This is required
296 * to make sure pending interrupt was not left.
298 write_aux_reg(ARC_REG_PCT_INT_ACT
, 1 << idx
);
299 write_aux_reg(ARC_REG_PCT_INT_CTRL
,
300 read_aux_reg(ARC_REG_PCT_INT_CTRL
) & ~(1 << idx
));
303 if (!(event
->hw
.state
& PERF_HES_STOPPED
)) {
304 /* stop ARC pmu here */
305 write_aux_reg(ARC_REG_PCT_INDEX
, idx
);
307 /* condition code #0 is always "never" */
308 write_aux_reg(ARC_REG_PCT_CONFIG
, 0);
310 event
->hw
.state
|= PERF_HES_STOPPED
;
313 if ((flags
& PERF_EF_UPDATE
) &&
314 !(event
->hw
.state
& PERF_HES_UPTODATE
)) {
315 arc_perf_event_update(event
, &event
->hw
, idx
);
316 event
->hw
.state
|= PERF_HES_UPTODATE
;
320 static void arc_pmu_del(struct perf_event
*event
, int flags
)
322 struct arc_pmu_cpu
*pmu_cpu
= this_cpu_ptr(&arc_pmu_cpu
);
324 arc_pmu_stop(event
, PERF_EF_UPDATE
);
325 __clear_bit(event
->hw
.idx
, pmu_cpu
->used_mask
);
327 pmu_cpu
->act_counter
[event
->hw
.idx
] = 0;
329 perf_event_update_userpage(event
);
332 /* allocate hardware counter and optionally start counting */
333 static int arc_pmu_add(struct perf_event
*event
, int flags
)
335 struct arc_pmu_cpu
*pmu_cpu
= this_cpu_ptr(&arc_pmu_cpu
);
336 struct hw_perf_event
*hwc
= &event
->hw
;
339 if (__test_and_set_bit(idx
, pmu_cpu
->used_mask
)) {
340 idx
= find_first_zero_bit(pmu_cpu
->used_mask
,
341 arc_pmu
->n_counters
);
342 if (idx
== arc_pmu
->n_counters
)
345 __set_bit(idx
, pmu_cpu
->used_mask
);
349 write_aux_reg(ARC_REG_PCT_INDEX
, idx
);
351 pmu_cpu
->act_counter
[idx
] = event
;
353 if (is_sampling_event(event
)) {
354 /* Mimic full counter overflow as other arches do */
355 write_aux_reg(ARC_REG_PCT_INT_CNTL
, (u32
)arc_pmu
->max_period
);
356 write_aux_reg(ARC_REG_PCT_INT_CNTH
,
357 (arc_pmu
->max_period
>> 32));
360 write_aux_reg(ARC_REG_PCT_CONFIG
, 0);
361 write_aux_reg(ARC_REG_PCT_COUNTL
, 0);
362 write_aux_reg(ARC_REG_PCT_COUNTH
, 0);
363 local64_set(&hwc
->prev_count
, 0);
365 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
366 if (flags
& PERF_EF_START
)
367 arc_pmu_start(event
, PERF_EF_RELOAD
);
369 perf_event_update_userpage(event
);
374 #ifdef CONFIG_ISA_ARCV2
375 static irqreturn_t
arc_pmu_intr(int irq
, void *dev
)
377 struct perf_sample_data data
;
378 struct arc_pmu_cpu
*pmu_cpu
= this_cpu_ptr(&arc_pmu_cpu
);
379 struct pt_regs
*regs
;
383 arc_pmu_disable(&arc_pmu
->pmu
);
385 active_ints
= read_aux_reg(ARC_REG_PCT_INT_ACT
);
387 regs
= get_irq_regs();
389 for (idx
= 0; idx
< arc_pmu
->n_counters
; idx
++) {
390 struct perf_event
*event
= pmu_cpu
->act_counter
[idx
];
391 struct hw_perf_event
*hwc
;
393 if (!(active_ints
& (1 << idx
)))
396 /* Reset interrupt flag by writing of 1 */
397 write_aux_reg(ARC_REG_PCT_INT_ACT
, 1 << idx
);
400 * On reset of "interrupt active" bit corresponding
401 * "interrupt enable" bit gets automatically reset as well.
402 * Now we need to re-enable interrupt for the counter.
404 write_aux_reg(ARC_REG_PCT_INT_CTRL
,
405 read_aux_reg(ARC_REG_PCT_INT_CTRL
) | (1 << idx
));
409 WARN_ON_ONCE(hwc
->idx
!= idx
);
411 arc_perf_event_update(event
, &event
->hw
, event
->hw
.idx
);
412 perf_sample_data_init(&data
, 0, hwc
->last_period
);
413 if (!arc_pmu_event_set_period(event
))
416 if (perf_event_overflow(event
, &data
, regs
))
417 arc_pmu_stop(event
, 0);
420 arc_pmu_enable(&arc_pmu
->pmu
);
426 static irqreturn_t
arc_pmu_intr(int irq
, void *dev
)
431 #endif /* CONFIG_ISA_ARCV2 */
433 static void arc_cpu_pmu_irq_init(void *data
)
435 int irq
= *(int *)data
;
437 enable_percpu_irq(irq
, IRQ_TYPE_NONE
);
439 /* Clear all pending interrupt flags */
440 write_aux_reg(ARC_REG_PCT_INT_ACT
, 0xffffffff);
443 static int arc_pmu_device_probe(struct platform_device
*pdev
)
445 struct arc_reg_pct_build pct_bcr
;
446 struct arc_reg_cc_build cc_bcr
;
447 int i
, j
, has_interrupts
;
448 int counter_size
; /* in bits */
452 uint32_t word0
, word1
;
459 READ_BCR(ARC_REG_PCT_BUILD
, pct_bcr
);
461 pr_err("This core does not have performance counters!\n");
464 BUG_ON(pct_bcr
.c
> ARC_PERF_MAX_COUNTERS
);
466 READ_BCR(ARC_REG_CC_BUILD
, cc_bcr
);
467 BUG_ON(!cc_bcr
.v
); /* Counters exist but No countable conditions ? */
469 arc_pmu
= devm_kzalloc(&pdev
->dev
, sizeof(struct arc_pmu
), GFP_KERNEL
);
473 has_interrupts
= is_isa_arcv2() ? pct_bcr
.i
: 0;
475 arc_pmu
->n_counters
= pct_bcr
.c
;
476 counter_size
= 32 + (pct_bcr
.s
<< 4);
478 arc_pmu
->max_period
= (1ULL << counter_size
) / 2 - 1ULL;
480 pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n",
481 arc_pmu
->n_counters
, counter_size
, cc_bcr
.c
,
482 has_interrupts
? ", [overflow IRQ support]":"");
485 for (i
= 0; i
< PERF_COUNT_ARC_HW_MAX
; i
++)
486 arc_pmu
->ev_hw_idx
[i
] = -1;
488 /* loop thru all available h/w condition indexes */
489 for (j
= 0; j
< cc_bcr
.c
; j
++) {
490 write_aux_reg(ARC_REG_CC_INDEX
, j
);
491 cc_name
.indiv
.word0
= le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME0
));
492 cc_name
.indiv
.word1
= le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME1
));
494 /* See if it has been mapped to a perf event_id */
495 for (i
= 0; i
< ARRAY_SIZE(arc_pmu_ev_hw_map
); i
++) {
496 if (arc_pmu_ev_hw_map
[i
] &&
497 !strcmp(arc_pmu_ev_hw_map
[i
], cc_name
.str
) &&
498 strlen(arc_pmu_ev_hw_map
[i
])) {
499 pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
501 arc_pmu
->ev_hw_idx
[i
] = j
;
506 arc_pmu
->pmu
= (struct pmu
) {
507 .pmu_enable
= arc_pmu_enable
,
508 .pmu_disable
= arc_pmu_disable
,
509 .event_init
= arc_pmu_event_init
,
512 .start
= arc_pmu_start
,
513 .stop
= arc_pmu_stop
,
514 .read
= arc_pmu_read
,
517 if (has_interrupts
) {
518 int irq
= platform_get_irq(pdev
, 0);
521 pr_err("Cannot get IRQ number for the platform\n");
527 /* intc map function ensures irq_set_percpu_devid() called */
528 request_percpu_irq(irq
, arc_pmu_intr
, "ARC perf counters",
529 this_cpu_ptr(&arc_pmu_cpu
));
531 on_each_cpu(arc_cpu_pmu_irq_init
, &irq
, 1);
534 arc_pmu
->pmu
.capabilities
|= PERF_PMU_CAP_NO_INTERRUPT
;
536 return perf_pmu_register(&arc_pmu
->pmu
, pdev
->name
, PERF_TYPE_RAW
);
540 static const struct of_device_id arc_pmu_match
[] = {
541 { .compatible
= "snps,arc700-pct" },
542 { .compatible
= "snps,archs-pct" },
545 MODULE_DEVICE_TABLE(of
, arc_pmu_match
);
548 static struct platform_driver arc_pmu_driver
= {
551 .of_match_table
= of_match_ptr(arc_pmu_match
),
553 .probe
= arc_pmu_device_probe
,
556 module_platform_driver(arc_pmu_driver
);
558 MODULE_LICENSE("GPL");
559 MODULE_AUTHOR("Mischa Jonker <mjonker@synopsys.com>");
560 MODULE_DESCRIPTION("ARC PMU driver");