1 // SPDX-License-Identifier: GPL-2.0
3 * Performance events support for SH-4A performance counters
5 * Copyright (C) 2009, 2010 Paul Mundt
7 #include <linux/kernel.h>
8 #include <linux/init.h>
10 #include <linux/irq.h>
11 #include <linux/perf_event.h>
12 #include <asm/processor.h>
14 #define PPC_CCBR(idx) (0xff200800 + (sizeof(u32) * idx))
15 #define PPC_PMCTR(idx) (0xfc100000 + (sizeof(u32) * idx))
17 #define CCBR_CIT_MASK (0x7ff << 6)
18 #define CCBR_DUC (1 << 3)
19 #define CCBR_CMDS (1 << 1)
20 #define CCBR_PPCE (1 << 0)
22 #ifdef CONFIG_CPU_SHX3
24 * The PMCAT location for SH-X3 CPUs was quietly moved, while the CCBR
25 * and PMCTR locations remains tentatively constant. This change remains
26 * wholly undocumented, and was simply found through trial and error.
28 * Early cuts of SH-X3 still appear to use the SH-X/SH-X2 locations, and
29 * it's unclear when this ceased to be the case. For now we always use
30 * the new location (if future parts keep up with this trend then
31 * scanning for them at runtime also remains a viable option.)
33 * The gap in the register space also suggests that there are other
34 * undocumented counters, so this will need to be revisited at a later
37 #define PPC_PMCAT 0xfc100240
39 #define PPC_PMCAT 0xfc100080
42 #define PMCAT_OVF3 (1 << 27)
43 #define PMCAT_CNN3 (1 << 26)
44 #define PMCAT_CLR3 (1 << 25)
45 #define PMCAT_OVF2 (1 << 19)
46 #define PMCAT_CLR2 (1 << 17)
47 #define PMCAT_OVF1 (1 << 11)
48 #define PMCAT_CNN1 (1 << 10)
49 #define PMCAT_CLR1 (1 << 9)
50 #define PMCAT_OVF0 (1 << 3)
51 #define PMCAT_CLR0 (1 << 1)
53 static struct sh_pmu sh4a_pmu
;
56 * Supported raw event codes:
58 * Event Code Description
59 * ---------- -----------
61 * 0x0000 number of elapsed cycles
62 * 0x0200 number of elapsed cycles in privileged mode
63 * 0x0280 number of elapsed cycles while SR.BL is asserted
64 * 0x0202 instruction execution
65 * 0x0203 instruction execution in parallel
66 * 0x0204 number of unconditional branches
67 * 0x0208 number of exceptions
68 * 0x0209 number of interrupts
69 * 0x0220 UTLB miss caused by instruction fetch
70 * 0x0222 UTLB miss caused by operand access
71 * 0x02a0 number of ITLB misses
72 * 0x0028 number of accesses to instruction memories
73 * 0x0029 number of accesses to instruction cache
74 * 0x002a instruction cache miss
75 * 0x022e number of access to instruction X/Y memory
76 * 0x0030 number of reads to operand memories
77 * 0x0038 number of writes to operand memories
78 * 0x0031 number of operand cache read accesses
79 * 0x0039 number of operand cache write accesses
80 * 0x0032 operand cache read miss
81 * 0x003a operand cache write miss
82 * 0x0236 number of reads to operand X/Y memory
83 * 0x023e number of writes to operand X/Y memory
84 * 0x0237 number of reads to operand U memory
85 * 0x023f number of writes to operand U memory
86 * 0x0337 number of U memory read buffer misses
87 * 0x02b4 number of wait cycles due to operand read access
88 * 0x02bc number of wait cycles due to operand write access
89 * 0x0033 number of wait cycles due to operand cache read miss
90 * 0x003b number of wait cycles due to operand cache write miss
94 * Special reserved bits used by hardware emulators, read values will
95 * vary, but writes must always be 0.
97 #define PMCAT_EMU_CLR_MASK ((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0))
99 static const int sh4a_general_events
[] = {
100 [PERF_COUNT_HW_CPU_CYCLES
] = 0x0000,
101 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x0202,
102 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x0029, /* I-cache */
103 [PERF_COUNT_HW_CACHE_MISSES
] = 0x002a, /* I-cache */
104 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x0204,
105 [PERF_COUNT_HW_BRANCH_MISSES
] = -1,
106 [PERF_COUNT_HW_BUS_CYCLES
] = -1,
109 #define C(x) PERF_COUNT_HW_CACHE_##x
111 static const int sh4a_cache_events
112 [PERF_COUNT_HW_CACHE_MAX
]
113 [PERF_COUNT_HW_CACHE_OP_MAX
]
114 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
118 [ C(RESULT_ACCESS
) ] = 0x0031,
119 [ C(RESULT_MISS
) ] = 0x0032,
122 [ C(RESULT_ACCESS
) ] = 0x0039,
123 [ C(RESULT_MISS
) ] = 0x003a,
125 [ C(OP_PREFETCH
) ] = {
126 [ C(RESULT_ACCESS
) ] = 0,
127 [ C(RESULT_MISS
) ] = 0,
133 [ C(RESULT_ACCESS
) ] = 0x0029,
134 [ C(RESULT_MISS
) ] = 0x002a,
137 [ C(RESULT_ACCESS
) ] = -1,
138 [ C(RESULT_MISS
) ] = -1,
140 [ C(OP_PREFETCH
) ] = {
141 [ C(RESULT_ACCESS
) ] = 0,
142 [ C(RESULT_MISS
) ] = 0,
148 [ C(RESULT_ACCESS
) ] = 0x0030,
149 [ C(RESULT_MISS
) ] = 0,
152 [ C(RESULT_ACCESS
) ] = 0x0038,
153 [ C(RESULT_MISS
) ] = 0,
155 [ C(OP_PREFETCH
) ] = {
156 [ C(RESULT_ACCESS
) ] = 0,
157 [ C(RESULT_MISS
) ] = 0,
163 [ C(RESULT_ACCESS
) ] = 0x0222,
164 [ C(RESULT_MISS
) ] = 0x0220,
167 [ C(RESULT_ACCESS
) ] = 0,
168 [ C(RESULT_MISS
) ] = 0,
170 [ C(OP_PREFETCH
) ] = {
171 [ C(RESULT_ACCESS
) ] = 0,
172 [ C(RESULT_MISS
) ] = 0,
178 [ C(RESULT_ACCESS
) ] = 0,
179 [ C(RESULT_MISS
) ] = 0x02a0,
182 [ C(RESULT_ACCESS
) ] = -1,
183 [ C(RESULT_MISS
) ] = -1,
185 [ C(OP_PREFETCH
) ] = {
186 [ C(RESULT_ACCESS
) ] = -1,
187 [ C(RESULT_MISS
) ] = -1,
193 [ C(RESULT_ACCESS
) ] = -1,
194 [ C(RESULT_MISS
) ] = -1,
197 [ C(RESULT_ACCESS
) ] = -1,
198 [ C(RESULT_MISS
) ] = -1,
200 [ C(OP_PREFETCH
) ] = {
201 [ C(RESULT_ACCESS
) ] = -1,
202 [ C(RESULT_MISS
) ] = -1,
208 [ C(RESULT_ACCESS
) ] = -1,
209 [ C(RESULT_MISS
) ] = -1,
212 [ C(RESULT_ACCESS
) ] = -1,
213 [ C(RESULT_MISS
) ] = -1,
215 [ C(OP_PREFETCH
) ] = {
216 [ C(RESULT_ACCESS
) ] = -1,
217 [ C(RESULT_MISS
) ] = -1,
222 static int sh4a_event_map(int event
)
224 return sh4a_general_events
[event
];
227 static u64
sh4a_pmu_read(int idx
)
229 return __raw_readl(PPC_PMCTR(idx
));
232 static void sh4a_pmu_disable(struct hw_perf_event
*hwc
, int idx
)
236 tmp
= __raw_readl(PPC_CCBR(idx
));
237 tmp
&= ~(CCBR_CIT_MASK
| CCBR_DUC
);
238 __raw_writel(tmp
, PPC_CCBR(idx
));
241 static void sh4a_pmu_enable(struct hw_perf_event
*hwc
, int idx
)
245 tmp
= __raw_readl(PPC_PMCAT
);
246 tmp
&= ~PMCAT_EMU_CLR_MASK
;
247 tmp
|= idx
? PMCAT_CLR1
: PMCAT_CLR0
;
248 __raw_writel(tmp
, PPC_PMCAT
);
250 tmp
= __raw_readl(PPC_CCBR(idx
));
251 tmp
|= (hwc
->config
<< 6) | CCBR_CMDS
| CCBR_PPCE
;
252 __raw_writel(tmp
, PPC_CCBR(idx
));
254 __raw_writel(__raw_readl(PPC_CCBR(idx
)) | CCBR_DUC
, PPC_CCBR(idx
));
257 static void sh4a_pmu_disable_all(void)
261 for (i
= 0; i
< sh4a_pmu
.num_events
; i
++)
262 __raw_writel(__raw_readl(PPC_CCBR(i
)) & ~CCBR_DUC
, PPC_CCBR(i
));
265 static void sh4a_pmu_enable_all(void)
269 for (i
= 0; i
< sh4a_pmu
.num_events
; i
++)
270 __raw_writel(__raw_readl(PPC_CCBR(i
)) | CCBR_DUC
, PPC_CCBR(i
));
273 static struct sh_pmu sh4a_pmu
= {
276 .event_map
= sh4a_event_map
,
277 .max_events
= ARRAY_SIZE(sh4a_general_events
),
278 .raw_event_mask
= 0x3ff,
279 .cache_events
= &sh4a_cache_events
,
280 .read
= sh4a_pmu_read
,
281 .disable
= sh4a_pmu_disable
,
282 .enable
= sh4a_pmu_enable
,
283 .disable_all
= sh4a_pmu_disable_all
,
284 .enable_all
= sh4a_pmu_enable_all
,
287 static int __init
sh4a_pmu_init(void)
290 * Make sure this CPU actually has perf counters.
292 if (!(boot_cpu_data
.flags
& CPU_HAS_PERF_COUNTER
)) {
293 pr_notice("HW perf events unsupported, software events only.\n");
297 return register_sh_pmu(&sh4a_pmu
);
299 early_initcall(sh4a_pmu_init
);