x86/mm: Add TLB purge to free pmd/pte page interfaces
[linux/fpc-iii.git] / arch / arc / include / asm / perf_event.h
blob9185541035cc3a716b59eb12a159850b18c5a7ea
1 /*
2 * Linux performance counter support for ARC
4 * Copyright (C) 2014-2015 Synopsys, Inc. (www.synopsys.com)
5 * Copyright (C) 2011-2013 Synopsys, Inc. (www.synopsys.com)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 #ifndef __ASM_PERF_EVENT_H
14 #define __ASM_PERF_EVENT_H
16 /* Max number of counters that PCT block may ever have */
17 #define ARC_PERF_MAX_COUNTERS 32
19 #define ARC_REG_CC_BUILD 0xF6
20 #define ARC_REG_CC_INDEX 0x240
21 #define ARC_REG_CC_NAME0 0x241
22 #define ARC_REG_CC_NAME1 0x242
24 #define ARC_REG_PCT_BUILD 0xF5
25 #define ARC_REG_PCT_COUNTL 0x250
26 #define ARC_REG_PCT_COUNTH 0x251
27 #define ARC_REG_PCT_SNAPL 0x252
28 #define ARC_REG_PCT_SNAPH 0x253
29 #define ARC_REG_PCT_CONFIG 0x254
30 #define ARC_REG_PCT_CONTROL 0x255
31 #define ARC_REG_PCT_INDEX 0x256
32 #define ARC_REG_PCT_INT_CNTL 0x25C
33 #define ARC_REG_PCT_INT_CNTH 0x25D
34 #define ARC_REG_PCT_INT_CTRL 0x25E
35 #define ARC_REG_PCT_INT_ACT 0x25F
37 #define ARC_REG_PCT_CONFIG_USER (1 << 18) /* count in user mode */
38 #define ARC_REG_PCT_CONFIG_KERN (1 << 19) /* count in kernel mode */
40 #define ARC_REG_PCT_CONTROL_CC (1 << 16) /* clear counts */
41 #define ARC_REG_PCT_CONTROL_SN (1 << 17) /* snapshot */
43 struct arc_reg_pct_build {
44 #ifdef CONFIG_CPU_BIG_ENDIAN
45 unsigned int m:8, c:8, r:5, i:1, s:2, v:8;
46 #else
47 unsigned int v:8, s:2, i:1, r:5, c:8, m:8;
48 #endif
51 struct arc_reg_cc_build {
52 #ifdef CONFIG_CPU_BIG_ENDIAN
53 unsigned int c:16, r:8, v:8;
54 #else
55 unsigned int v:8, r:8, c:16;
56 #endif
59 #define PERF_COUNT_ARC_DCLM (PERF_COUNT_HW_MAX + 0)
60 #define PERF_COUNT_ARC_DCSM (PERF_COUNT_HW_MAX + 1)
61 #define PERF_COUNT_ARC_ICM (PERF_COUNT_HW_MAX + 2)
62 #define PERF_COUNT_ARC_BPOK (PERF_COUNT_HW_MAX + 3)
63 #define PERF_COUNT_ARC_EDTLB (PERF_COUNT_HW_MAX + 4)
64 #define PERF_COUNT_ARC_EITLB (PERF_COUNT_HW_MAX + 5)
65 #define PERF_COUNT_ARC_LDC (PERF_COUNT_HW_MAX + 6)
66 #define PERF_COUNT_ARC_STC (PERF_COUNT_HW_MAX + 7)
68 #define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 8)
71 * Some ARC pct quirks:
73 * PERF_COUNT_HW_STALLED_CYCLES_BACKEND
74 * PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
75 * The ARC 700 can either measure stalls per pipeline stage, or all stalls
76 * combined; for now we assign all stalls to STALLED_CYCLES_BACKEND
77 * and all pipeline flushes (e.g. caused by mispredicts, etc.) to
78 * STALLED_CYCLES_FRONTEND.
80 * We could start multiple performance counters and combine everything
81 * afterwards, but that makes it complicated.
83 * Note that I$ cache misses aren't counted by either of the two!
87 * ARC PCT has hardware conditions with fixed "names" but variable "indexes"
88 * (based on a specific RTL build)
89 * Below is the static map between perf generic/arc specific event_id and
90 * h/w condition names.
91 * At the time of probe, we loop thru each index and find it's name to
92 * complete the mapping of perf event_id to h/w index as latter is needed
93 * to program the counter really
95 static const char * const arc_pmu_ev_hw_map[] = {
96 /* count cycles */
97 [PERF_COUNT_HW_CPU_CYCLES] = "crun",
98 [PERF_COUNT_HW_REF_CPU_CYCLES] = "crun",
99 [PERF_COUNT_HW_BUS_CYCLES] = "crun",
101 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush",
102 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall",
104 /* counts condition */
105 [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
106 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
107 [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
108 #ifdef CONFIG_ISA_ARCV2
109 [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
110 #else
111 [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
112 #endif
113 [PERF_COUNT_ARC_LDC] = "imemrdc", /* Instr: mem read cached */
114 [PERF_COUNT_ARC_STC] = "imemwrc", /* Instr: mem write cached */
116 [PERF_COUNT_ARC_DCLM] = "dclm", /* D-cache Load Miss */
117 [PERF_COUNT_ARC_DCSM] = "dcsm", /* D-cache Store Miss */
118 [PERF_COUNT_ARC_ICM] = "icm", /* I-cache Miss */
119 [PERF_COUNT_ARC_EDTLB] = "edtlb", /* D-TLB Miss */
120 [PERF_COUNT_ARC_EITLB] = "eitlb", /* I-TLB Miss */
122 [PERF_COUNT_HW_CACHE_REFERENCES] = "imemrdc", /* Instr: mem read cached */
123 [PERF_COUNT_HW_CACHE_MISSES] = "dclm", /* D-cache Load Miss */
126 #define C(_x) PERF_COUNT_HW_CACHE_##_x
127 #define CACHE_OP_UNSUPPORTED 0xffff
129 static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
130 [C(L1D)] = {
131 [C(OP_READ)] = {
132 [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
133 [C(RESULT_MISS)] = PERF_COUNT_ARC_DCLM,
135 [C(OP_WRITE)] = {
136 [C(RESULT_ACCESS)] = PERF_COUNT_ARC_STC,
137 [C(RESULT_MISS)] = PERF_COUNT_ARC_DCSM,
139 [C(OP_PREFETCH)] = {
140 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
141 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
144 [C(L1I)] = {
145 [C(OP_READ)] = {
146 [C(RESULT_ACCESS)] = PERF_COUNT_HW_INSTRUCTIONS,
147 [C(RESULT_MISS)] = PERF_COUNT_ARC_ICM,
149 [C(OP_WRITE)] = {
150 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
151 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
153 [C(OP_PREFETCH)] = {
154 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
155 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
158 [C(LL)] = {
159 [C(OP_READ)] = {
160 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
161 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
163 [C(OP_WRITE)] = {
164 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
165 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
167 [C(OP_PREFETCH)] = {
168 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
169 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
172 [C(DTLB)] = {
173 [C(OP_READ)] = {
174 [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
175 [C(RESULT_MISS)] = PERF_COUNT_ARC_EDTLB,
177 /* DTLB LD/ST Miss not segregated by h/w*/
178 [C(OP_WRITE)] = {
179 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
180 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
182 [C(OP_PREFETCH)] = {
183 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
184 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
187 [C(ITLB)] = {
188 [C(OP_READ)] = {
189 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
190 [C(RESULT_MISS)] = PERF_COUNT_ARC_EITLB,
192 [C(OP_WRITE)] = {
193 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
194 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
196 [C(OP_PREFETCH)] = {
197 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
198 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
201 [C(BPU)] = {
202 [C(OP_READ)] = {
203 [C(RESULT_ACCESS)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
204 [C(RESULT_MISS)] = PERF_COUNT_HW_BRANCH_MISSES,
206 [C(OP_WRITE)] = {
207 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
208 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
210 [C(OP_PREFETCH)] = {
211 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
212 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
215 [C(NODE)] = {
216 [C(OP_READ)] = {
217 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
218 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
220 [C(OP_WRITE)] = {
221 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
222 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
224 [C(OP_PREFETCH)] = {
225 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
226 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
231 #endif /* __ASM_PERF_EVENT_H */