1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Linux performance counter support for ARC
5 * Copyright (C) 2014-2015 Synopsys, Inc. (www.synopsys.com)
6 * Copyright (C) 2011-2013 Synopsys, Inc. (www.synopsys.com)
9 #ifndef __ASM_PERF_EVENT_H
10 #define __ASM_PERF_EVENT_H
12 /* Max number of counters that PCT block may ever have */
13 #define ARC_PERF_MAX_COUNTERS 32
15 #define ARC_REG_CC_BUILD 0xF6
16 #define ARC_REG_CC_INDEX 0x240
17 #define ARC_REG_CC_NAME0 0x241
18 #define ARC_REG_CC_NAME1 0x242
20 #define ARC_REG_PCT_BUILD 0xF5
21 #define ARC_REG_PCT_COUNTL 0x250
22 #define ARC_REG_PCT_COUNTH 0x251
23 #define ARC_REG_PCT_SNAPL 0x252
24 #define ARC_REG_PCT_SNAPH 0x253
25 #define ARC_REG_PCT_CONFIG 0x254
26 #define ARC_REG_PCT_CONTROL 0x255
27 #define ARC_REG_PCT_INDEX 0x256
28 #define ARC_REG_PCT_INT_CNTL 0x25C
29 #define ARC_REG_PCT_INT_CNTH 0x25D
30 #define ARC_REG_PCT_INT_CTRL 0x25E
31 #define ARC_REG_PCT_INT_ACT 0x25F
33 #define ARC_REG_PCT_CONFIG_USER (1 << 18) /* count in user mode */
34 #define ARC_REG_PCT_CONFIG_KERN (1 << 19) /* count in kernel mode */
36 #define ARC_REG_PCT_CONTROL_CC (1 << 16) /* clear counts */
37 #define ARC_REG_PCT_CONTROL_SN (1 << 17) /* snapshot */
39 struct arc_reg_pct_build
{
40 #ifdef CONFIG_CPU_BIG_ENDIAN
41 unsigned int m
:8, c
:8, r
:5, i
:1, s
:2, v
:8;
43 unsigned int v
:8, s
:2, i
:1, r
:5, c
:8, m
:8;
47 struct arc_reg_cc_build
{
48 #ifdef CONFIG_CPU_BIG_ENDIAN
49 unsigned int c
:16, r
:8, v
:8;
51 unsigned int v
:8, r
:8, c
:16;
55 #define PERF_COUNT_ARC_DCLM (PERF_COUNT_HW_MAX + 0)
56 #define PERF_COUNT_ARC_DCSM (PERF_COUNT_HW_MAX + 1)
57 #define PERF_COUNT_ARC_ICM (PERF_COUNT_HW_MAX + 2)
58 #define PERF_COUNT_ARC_BPOK (PERF_COUNT_HW_MAX + 3)
59 #define PERF_COUNT_ARC_EDTLB (PERF_COUNT_HW_MAX + 4)
60 #define PERF_COUNT_ARC_EITLB (PERF_COUNT_HW_MAX + 5)
61 #define PERF_COUNT_ARC_LDC (PERF_COUNT_HW_MAX + 6)
62 #define PERF_COUNT_ARC_STC (PERF_COUNT_HW_MAX + 7)
64 #define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 8)
67 * Some ARC pct quirks:
69 * PERF_COUNT_HW_STALLED_CYCLES_BACKEND
70 * PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
71 * The ARC 700 can either measure stalls per pipeline stage, or all stalls
72 * combined; for now we assign all stalls to STALLED_CYCLES_BACKEND
73 * and all pipeline flushes (e.g. caused by mispredicts, etc.) to
74 * STALLED_CYCLES_FRONTEND.
76 * We could start multiple performance counters and combine everything
77 * afterwards, but that makes it complicated.
79 * Note that I$ cache misses aren't counted by either of the two!
83 * ARC PCT has hardware conditions with fixed "names" but variable "indexes"
84 * (based on a specific RTL build)
85 * Below is the static map between perf generic/arc specific event_id and
86 * h/w condition names.
87 * At the time of probe, we loop thru each index and find it's name to
88 * complete the mapping of perf event_id to h/w index as latter is needed
89 * to program the counter really
91 static const char * const arc_pmu_ev_hw_map
[] = {
93 [PERF_COUNT_HW_CPU_CYCLES
] = "crun",
94 [PERF_COUNT_HW_REF_CPU_CYCLES
] = "crun",
95 [PERF_COUNT_HW_BUS_CYCLES
] = "crun",
97 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = "bflush",
98 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = "bstall",
100 /* counts condition */
101 [PERF_COUNT_HW_INSTRUCTIONS
] = "iall",
102 /* All jump instructions that are taken */
103 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = "ijmptak",
104 #ifdef CONFIG_ISA_ARCV2
105 [PERF_COUNT_HW_BRANCH_MISSES
] = "bpmp",
107 [PERF_COUNT_ARC_BPOK
] = "bpok", /* NP-NT, PT-T, PNT-NT */
108 [PERF_COUNT_HW_BRANCH_MISSES
] = "bpfail", /* NP-T, PT-NT, PNT-T */
110 [PERF_COUNT_ARC_LDC
] = "imemrdc", /* Instr: mem read cached */
111 [PERF_COUNT_ARC_STC
] = "imemwrc", /* Instr: mem write cached */
113 [PERF_COUNT_ARC_DCLM
] = "dclm", /* D-cache Load Miss */
114 [PERF_COUNT_ARC_DCSM
] = "dcsm", /* D-cache Store Miss */
115 [PERF_COUNT_ARC_ICM
] = "icm", /* I-cache Miss */
116 [PERF_COUNT_ARC_EDTLB
] = "edtlb", /* D-TLB Miss */
117 [PERF_COUNT_ARC_EITLB
] = "eitlb", /* I-TLB Miss */
119 [PERF_COUNT_HW_CACHE_REFERENCES
] = "imemrdc", /* Instr: mem read cached */
120 [PERF_COUNT_HW_CACHE_MISSES
] = "dclm", /* D-cache Load Miss */
123 #define C(_x) PERF_COUNT_HW_CACHE_##_x
124 #define CACHE_OP_UNSUPPORTED 0xffff
126 static const unsigned arc_pmu_cache_map
[C(MAX
)][C(OP_MAX
)][C(RESULT_MAX
)] = {
129 [C(RESULT_ACCESS
)] = PERF_COUNT_ARC_LDC
,
130 [C(RESULT_MISS
)] = PERF_COUNT_ARC_DCLM
,
133 [C(RESULT_ACCESS
)] = PERF_COUNT_ARC_STC
,
134 [C(RESULT_MISS
)] = PERF_COUNT_ARC_DCSM
,
137 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
138 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
143 [C(RESULT_ACCESS
)] = PERF_COUNT_HW_INSTRUCTIONS
,
144 [C(RESULT_MISS
)] = PERF_COUNT_ARC_ICM
,
147 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
148 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
151 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
152 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
157 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
158 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
161 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
162 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
165 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
166 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
171 [C(RESULT_ACCESS
)] = PERF_COUNT_ARC_LDC
,
172 [C(RESULT_MISS
)] = PERF_COUNT_ARC_EDTLB
,
174 /* DTLB LD/ST Miss not segregated by h/w*/
176 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
177 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
180 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
181 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
186 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
187 [C(RESULT_MISS
)] = PERF_COUNT_ARC_EITLB
,
190 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
191 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
194 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
195 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
200 [C(RESULT_ACCESS
)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS
,
201 [C(RESULT_MISS
)] = PERF_COUNT_HW_BRANCH_MISSES
,
204 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
205 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
208 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
209 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
214 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
215 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
218 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
219 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
222 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
223 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
228 #endif /* __ASM_PERF_EVENT_H */