1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PERF_EVENT_H
3 #define _ASM_X86_PERF_EVENT_H
6 * Performance event hw details:
9 #define INTEL_PMC_MAX_GENERIC 32
10 #define INTEL_PMC_MAX_FIXED 3
11 #define INTEL_PMC_IDX_FIXED 32
13 #define X86_PMC_IDX_MAX 64
15 #define MSR_ARCH_PERFMON_PERFCTR0 0xc1
16 #define MSR_ARCH_PERFMON_PERFCTR1 0xc2
18 #define MSR_ARCH_PERFMON_EVENTSEL0 0x186
19 #define MSR_ARCH_PERFMON_EVENTSEL1 0x187
21 #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
22 #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
23 #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
24 #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
25 #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
26 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
27 #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
28 #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
29 #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
30 #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
31 #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
33 #define HSW_IN_TX (1ULL << 32)
34 #define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
36 #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
37 #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
38 #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
40 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
41 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
42 (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
44 #define AMD64_EVENTSEL_EVENT \
45 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
46 #define INTEL_ARCH_EVENT_MASK \
47 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
49 #define AMD64_L3_SLICE_SHIFT 48
50 #define AMD64_L3_SLICE_MASK \
51 ((0xFULL) << AMD64_L3_SLICE_SHIFT)
53 #define AMD64_L3_THREAD_SHIFT 56
54 #define AMD64_L3_THREAD_MASK \
55 ((0xFFULL) << AMD64_L3_THREAD_SHIFT)
57 #define X86_RAW_EVENT_MASK \
58 (ARCH_PERFMON_EVENTSEL_EVENT | \
59 ARCH_PERFMON_EVENTSEL_UMASK | \
60 ARCH_PERFMON_EVENTSEL_EDGE | \
61 ARCH_PERFMON_EVENTSEL_INV | \
62 ARCH_PERFMON_EVENTSEL_CMASK)
63 #define X86_ALL_EVENT_FLAGS \
64 (ARCH_PERFMON_EVENTSEL_EDGE | \
65 ARCH_PERFMON_EVENTSEL_INV | \
66 ARCH_PERFMON_EVENTSEL_CMASK | \
67 ARCH_PERFMON_EVENTSEL_ANY | \
68 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \
70 HSW_IN_TX_CHECKPOINTED)
71 #define AMD64_RAW_EVENT_MASK \
72 (X86_RAW_EVENT_MASK | \
74 #define AMD64_RAW_EVENT_MASK_NB \
75 (AMD64_EVENTSEL_EVENT | \
76 ARCH_PERFMON_EVENTSEL_UMASK)
77 #define AMD64_NUM_COUNTERS 4
78 #define AMD64_NUM_COUNTERS_CORE 6
79 #define AMD64_NUM_COUNTERS_NB 4
81 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
82 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
83 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
84 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
85 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
87 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
88 #define ARCH_PERFMON_EVENTS_COUNT 7
91 * Intel "Architectural Performance Monitoring" CPUID
92 * detection/enumeration details:
96 unsigned int version_id
:8;
97 unsigned int num_counters
:8;
98 unsigned int bit_width
:8;
99 unsigned int mask_length
:8;
106 unsigned int no_unhalted_core_cycles
:1;
107 unsigned int no_instructions_retired
:1;
108 unsigned int no_unhalted_reference_cycles
:1;
109 unsigned int no_llc_reference
:1;
110 unsigned int no_llc_misses
:1;
111 unsigned int no_branch_instruction_retired
:1;
112 unsigned int no_branch_misses_retired
:1;
119 unsigned int num_counters_fixed
:5;
120 unsigned int bit_width_fixed
:8;
121 unsigned int reserved
:19;
126 struct x86_pmu_capability
{
129 int num_counters_fixed
;
132 unsigned int events_mask
;
137 * Fixed-purpose performance events:
141 * All 3 fixed-mode PMCs are configured via this single MSR:
143 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
146 * The counts are available in three separate MSRs:
149 /* Instr_Retired.Any: */
150 #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
151 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
153 /* CPU_CLK_Unhalted.Core: */
154 #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
155 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
157 /* CPU_CLK_Unhalted.Ref: */
158 #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
159 #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
160 #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
163 * We model BTS tracing as another fixed-mode PMC.
165 * We choose a value in the middle of the fixed event range, since lower
166 * values are used by actual fixed events and higher values are used
167 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
169 #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
171 #define GLOBAL_STATUS_COND_CHG BIT_ULL(63)
172 #define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(62)
173 #define GLOBAL_STATUS_UNC_OVF BIT_ULL(61)
174 #define GLOBAL_STATUS_ASIF BIT_ULL(60)
175 #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
176 #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58)
177 #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
180 * IBS cpuid feature detection
183 #define IBS_CPUID_FEATURES 0x8000001b
186 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
187 * bit 0 is used to indicate the existence of IBS.
189 #define IBS_CAPS_AVAIL (1U<<0)
190 #define IBS_CAPS_FETCHSAM (1U<<1)
191 #define IBS_CAPS_OPSAM (1U<<2)
192 #define IBS_CAPS_RDWROPCNT (1U<<3)
193 #define IBS_CAPS_OPCNT (1U<<4)
194 #define IBS_CAPS_BRNTRGT (1U<<5)
195 #define IBS_CAPS_OPCNTEXT (1U<<6)
196 #define IBS_CAPS_RIPINVALIDCHK (1U<<7)
197 #define IBS_CAPS_OPBRNFUSE (1U<<8)
198 #define IBS_CAPS_FETCHCTLEXTD (1U<<9)
199 #define IBS_CAPS_OPDATA4 (1U<<10)
201 #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
202 | IBS_CAPS_FETCHSAM \
209 #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
210 #define IBSCTL_LVT_OFFSET_MASK 0x0F
212 /* ibs fetch bits/masks */
213 #define IBS_FETCH_RAND_EN (1ULL<<57)
214 #define IBS_FETCH_VAL (1ULL<<49)
215 #define IBS_FETCH_ENABLE (1ULL<<48)
216 #define IBS_FETCH_CNT 0xFFFF0000ULL
217 #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
219 /* ibs op bits/masks */
220 /* lower 4 bits of the current count are ignored: */
221 #define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
222 #define IBS_OP_CNT_CTL (1ULL<<19)
223 #define IBS_OP_VAL (1ULL<<18)
224 #define IBS_OP_ENABLE (1ULL<<17)
225 #define IBS_OP_MAX_CNT 0x0000FFFFULL
226 #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
227 #define IBS_RIP_INVALID (1ULL<<38)
229 #ifdef CONFIG_X86_LOCAL_APIC
230 extern u32
get_ibs_caps(void);
232 static inline u32
get_ibs_caps(void) { return 0; }
235 #ifdef CONFIG_PERF_EVENTS
236 extern void perf_events_lapic_init(void);
239 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
240 * unused and ABI specified to be 0, so nobody should care what we do with
243 * EXACT - the IP points to the exact instruction that triggered the
244 * event (HW bugs exempt).
245 * VM - original X86_VM_MASK; see set_linear_ip().
247 #define PERF_EFLAGS_EXACT (1UL << 3)
248 #define PERF_EFLAGS_VM (1UL << 5)
251 extern unsigned long perf_instruction_pointer(struct pt_regs
*regs
);
252 extern unsigned long perf_misc_flags(struct pt_regs
*regs
);
253 #define perf_misc_flags(regs) perf_misc_flags(regs)
255 #include <asm/stacktrace.h>
258 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
259 * and the comment with PERF_EFLAGS_EXACT.
261 #define perf_arch_fetch_caller_regs(regs, __ip) { \
262 (regs)->ip = (__ip); \
263 (regs)->bp = caller_frame_pointer(); \
264 (regs)->cs = __KERNEL_CS; \
267 _ASM_MOV "%%"_ASM_SP ", %0\n" \
268 : "=m" ((regs)->sp) \
273 struct perf_guest_switch_msr
{
278 extern struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
);
279 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability
*cap
);
280 extern void perf_check_microcode(void);
282 static inline struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
)
288 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability
*cap
)
290 memset(cap
, 0, sizeof(*cap
));
293 static inline void perf_events_lapic_init(void) { }
294 static inline void perf_check_microcode(void) { }
297 #ifdef CONFIG_CPU_SUP_INTEL
298 extern void intel_pt_handle_vmx(int on
);
301 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
302 extern void amd_pmu_enable_virt(void);
303 extern void amd_pmu_disable_virt(void);
305 static inline void amd_pmu_enable_virt(void) { }
306 static inline void amd_pmu_disable_virt(void) { }
309 #define arch_perf_out_copy_user copy_from_user_nmi
311 #endif /* _ASM_X86_PERF_EVENT_H */