1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PERF_EVENT_H
3 #define _ASM_X86_PERF_EVENT_H
6 * Performance event hw details:
9 #define INTEL_PMC_MAX_GENERIC 32
10 #define INTEL_PMC_MAX_FIXED 4
11 #define INTEL_PMC_IDX_FIXED 32
13 #define X86_PMC_IDX_MAX 64
15 #define MSR_ARCH_PERFMON_PERFCTR0 0xc1
16 #define MSR_ARCH_PERFMON_PERFCTR1 0xc2
18 #define MSR_ARCH_PERFMON_EVENTSEL0 0x186
19 #define MSR_ARCH_PERFMON_EVENTSEL1 0x187
21 #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
22 #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
23 #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
24 #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
25 #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
26 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
27 #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
28 #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
29 #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
30 #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
31 #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
33 #define HSW_IN_TX (1ULL << 32)
34 #define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
35 #define ICL_EVENTSEL_ADAPTIVE (1ULL << 34)
36 #define ICL_FIXED_0_ADAPTIVE (1ULL << 32)
38 #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
39 #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
40 #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
42 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
43 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
44 (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
46 #define AMD64_EVENTSEL_EVENT \
47 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
48 #define INTEL_ARCH_EVENT_MASK \
49 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
51 #define AMD64_L3_SLICE_SHIFT 48
52 #define AMD64_L3_SLICE_MASK \
53 (0xFULL << AMD64_L3_SLICE_SHIFT)
54 #define AMD64_L3_SLICEID_MASK \
55 (0x7ULL << AMD64_L3_SLICE_SHIFT)
57 #define AMD64_L3_THREAD_SHIFT 56
58 #define AMD64_L3_THREAD_MASK \
59 (0xFFULL << AMD64_L3_THREAD_SHIFT)
60 #define AMD64_L3_F19H_THREAD_MASK \
61 (0x3ULL << AMD64_L3_THREAD_SHIFT)
63 #define AMD64_L3_EN_ALL_CORES BIT_ULL(47)
64 #define AMD64_L3_EN_ALL_SLICES BIT_ULL(46)
66 #define AMD64_L3_COREID_SHIFT 42
67 #define AMD64_L3_COREID_MASK \
68 (0x7ULL << AMD64_L3_COREID_SHIFT)
70 #define X86_RAW_EVENT_MASK \
71 (ARCH_PERFMON_EVENTSEL_EVENT | \
72 ARCH_PERFMON_EVENTSEL_UMASK | \
73 ARCH_PERFMON_EVENTSEL_EDGE | \
74 ARCH_PERFMON_EVENTSEL_INV | \
75 ARCH_PERFMON_EVENTSEL_CMASK)
76 #define X86_ALL_EVENT_FLAGS \
77 (ARCH_PERFMON_EVENTSEL_EDGE | \
78 ARCH_PERFMON_EVENTSEL_INV | \
79 ARCH_PERFMON_EVENTSEL_CMASK | \
80 ARCH_PERFMON_EVENTSEL_ANY | \
81 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \
83 HSW_IN_TX_CHECKPOINTED)
84 #define AMD64_RAW_EVENT_MASK \
85 (X86_RAW_EVENT_MASK | \
87 #define AMD64_RAW_EVENT_MASK_NB \
88 (AMD64_EVENTSEL_EVENT | \
89 ARCH_PERFMON_EVENTSEL_UMASK)
90 #define AMD64_NUM_COUNTERS 4
91 #define AMD64_NUM_COUNTERS_CORE 6
92 #define AMD64_NUM_COUNTERS_NB 4
94 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
95 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
96 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
97 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
98 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
100 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
101 #define ARCH_PERFMON_EVENTS_COUNT 7
103 #define PEBS_DATACFG_MEMINFO BIT_ULL(0)
104 #define PEBS_DATACFG_GP BIT_ULL(1)
105 #define PEBS_DATACFG_XMMS BIT_ULL(2)
106 #define PEBS_DATACFG_LBRS BIT_ULL(3)
107 #define PEBS_DATACFG_LBR_SHIFT 24
110 * Intel "Architectural Performance Monitoring" CPUID
111 * detection/enumeration details:
115 unsigned int version_id
:8;
116 unsigned int num_counters
:8;
117 unsigned int bit_width
:8;
118 unsigned int mask_length
:8;
125 unsigned int no_unhalted_core_cycles
:1;
126 unsigned int no_instructions_retired
:1;
127 unsigned int no_unhalted_reference_cycles
:1;
128 unsigned int no_llc_reference
:1;
129 unsigned int no_llc_misses
:1;
130 unsigned int no_branch_instruction_retired
:1;
131 unsigned int no_branch_misses_retired
:1;
138 unsigned int num_counters_fixed
:5;
139 unsigned int bit_width_fixed
:8;
140 unsigned int reserved
:19;
145 struct x86_pmu_capability
{
148 int num_counters_fixed
;
151 unsigned int events_mask
;
156 * Fixed-purpose performance events:
160 * All 3 fixed-mode PMCs are configured via this single MSR:
162 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
165 * The counts are available in three separate MSRs:
168 /* Instr_Retired.Any: */
169 #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
170 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
172 /* CPU_CLK_Unhalted.Core: */
173 #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
174 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
176 /* CPU_CLK_Unhalted.Ref: */
177 #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
178 #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
179 #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
182 * We model BTS tracing as another fixed-mode PMC.
184 * We choose a value in the middle of the fixed event range, since lower
185 * values are used by actual fixed events and higher values are used
186 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
188 #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
190 #define GLOBAL_STATUS_COND_CHG BIT_ULL(63)
191 #define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(62)
192 #define GLOBAL_STATUS_UNC_OVF BIT_ULL(61)
193 #define GLOBAL_STATUS_ASIF BIT_ULL(60)
194 #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
195 #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58)
196 #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
205 u64 applicable_counters
;
209 struct pebs_meminfo
{
217 u64 flags
, ip
, ax
, cx
, dx
, bx
, sp
, bp
, si
, di
;
218 u64 r8
, r9
, r10
, r11
, r12
, r13
, r14
, r15
;
222 u64 xmm
[16*2]; /* two entries for each register */
225 struct pebs_lbr_entry
{
230 struct pebs_lbr_entry lbr
[0]; /* Variable length */
234 * IBS cpuid feature detection
237 #define IBS_CPUID_FEATURES 0x8000001b
240 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
241 * bit 0 is used to indicate the existence of IBS.
243 #define IBS_CAPS_AVAIL (1U<<0)
244 #define IBS_CAPS_FETCHSAM (1U<<1)
245 #define IBS_CAPS_OPSAM (1U<<2)
246 #define IBS_CAPS_RDWROPCNT (1U<<3)
247 #define IBS_CAPS_OPCNT (1U<<4)
248 #define IBS_CAPS_BRNTRGT (1U<<5)
249 #define IBS_CAPS_OPCNTEXT (1U<<6)
250 #define IBS_CAPS_RIPINVALIDCHK (1U<<7)
251 #define IBS_CAPS_OPBRNFUSE (1U<<8)
252 #define IBS_CAPS_FETCHCTLEXTD (1U<<9)
253 #define IBS_CAPS_OPDATA4 (1U<<10)
255 #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
256 | IBS_CAPS_FETCHSAM \
263 #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
264 #define IBSCTL_LVT_OFFSET_MASK 0x0F
266 /* IBS fetch bits/masks */
267 #define IBS_FETCH_RAND_EN (1ULL<<57)
268 #define IBS_FETCH_VAL (1ULL<<49)
269 #define IBS_FETCH_ENABLE (1ULL<<48)
270 #define IBS_FETCH_CNT 0xFFFF0000ULL
271 #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
275 * The lower 7 bits of the current count are random bits
276 * preloaded by hardware and ignored in software
278 #define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
279 #define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
280 #define IBS_OP_CNT_CTL (1ULL<<19)
281 #define IBS_OP_VAL (1ULL<<18)
282 #define IBS_OP_ENABLE (1ULL<<17)
283 #define IBS_OP_MAX_CNT 0x0000FFFFULL
284 #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
285 #define IBS_RIP_INVALID (1ULL<<38)
287 #ifdef CONFIG_X86_LOCAL_APIC
288 extern u32
get_ibs_caps(void);
290 static inline u32
get_ibs_caps(void) { return 0; }
293 #ifdef CONFIG_PERF_EVENTS
294 extern void perf_events_lapic_init(void);
297 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
298 * unused and ABI specified to be 0, so nobody should care what we do with
301 * EXACT - the IP points to the exact instruction that triggered the
302 * event (HW bugs exempt).
303 * VM - original X86_VM_MASK; see set_linear_ip().
305 #define PERF_EFLAGS_EXACT (1UL << 3)
306 #define PERF_EFLAGS_VM (1UL << 5)
309 struct x86_perf_regs
{
314 extern unsigned long perf_instruction_pointer(struct pt_regs
*regs
);
315 extern unsigned long perf_misc_flags(struct pt_regs
*regs
);
316 #define perf_misc_flags(regs) perf_misc_flags(regs)
318 #include <asm/stacktrace.h>
321 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
322 * and the comment with PERF_EFLAGS_EXACT.
324 #define perf_arch_fetch_caller_regs(regs, __ip) { \
325 (regs)->ip = (__ip); \
326 (regs)->sp = (unsigned long)__builtin_frame_address(0); \
327 (regs)->cs = __KERNEL_CS; \
331 struct perf_guest_switch_msr
{
336 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability
*cap
);
337 extern void perf_check_microcode(void);
338 extern int x86_perf_rdpmc_index(struct perf_event
*event
);
340 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability
*cap
)
342 memset(cap
, 0, sizeof(*cap
));
345 static inline void perf_events_lapic_init(void) { }
346 static inline void perf_check_microcode(void) { }
349 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
350 extern struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
);
352 static inline struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
)
359 #ifdef CONFIG_CPU_SUP_INTEL
360 extern void intel_pt_handle_vmx(int on
);
362 static inline void intel_pt_handle_vmx(int on
)
368 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
369 extern void amd_pmu_enable_virt(void);
370 extern void amd_pmu_disable_virt(void);
372 static inline void amd_pmu_enable_virt(void) { }
373 static inline void amd_pmu_disable_virt(void) { }
376 #define arch_perf_out_copy_user copy_from_user_nmi
378 #endif /* _ASM_X86_PERF_EVENT_H */