1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PERF_EVENT_H
3 #define _ASM_X86_PERF_EVENT_H
6 * Performance event hw details:
9 #define INTEL_PMC_MAX_GENERIC 32
10 #define INTEL_PMC_MAX_FIXED 4
11 #define INTEL_PMC_IDX_FIXED 32
13 #define X86_PMC_IDX_MAX 64
15 #define MSR_ARCH_PERFMON_PERFCTR0 0xc1
16 #define MSR_ARCH_PERFMON_PERFCTR1 0xc2
18 #define MSR_ARCH_PERFMON_EVENTSEL0 0x186
19 #define MSR_ARCH_PERFMON_EVENTSEL1 0x187
21 #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
22 #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
23 #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
24 #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
25 #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
26 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
27 #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
28 #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
29 #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
30 #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
31 #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
33 #define HSW_IN_TX (1ULL << 32)
34 #define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
35 #define ICL_EVENTSEL_ADAPTIVE (1ULL << 34)
36 #define ICL_FIXED_0_ADAPTIVE (1ULL << 32)
38 #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
39 #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
40 #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
42 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
43 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
44 (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
46 #define AMD64_EVENTSEL_EVENT \
47 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
48 #define INTEL_ARCH_EVENT_MASK \
49 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
51 #define AMD64_L3_SLICE_SHIFT 48
52 #define AMD64_L3_SLICE_MASK \
53 ((0xFULL) << AMD64_L3_SLICE_SHIFT)
55 #define AMD64_L3_THREAD_SHIFT 56
56 #define AMD64_L3_THREAD_MASK \
57 ((0xFFULL) << AMD64_L3_THREAD_SHIFT)
59 #define X86_RAW_EVENT_MASK \
60 (ARCH_PERFMON_EVENTSEL_EVENT | \
61 ARCH_PERFMON_EVENTSEL_UMASK | \
62 ARCH_PERFMON_EVENTSEL_EDGE | \
63 ARCH_PERFMON_EVENTSEL_INV | \
64 ARCH_PERFMON_EVENTSEL_CMASK)
65 #define X86_ALL_EVENT_FLAGS \
66 (ARCH_PERFMON_EVENTSEL_EDGE | \
67 ARCH_PERFMON_EVENTSEL_INV | \
68 ARCH_PERFMON_EVENTSEL_CMASK | \
69 ARCH_PERFMON_EVENTSEL_ANY | \
70 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \
72 HSW_IN_TX_CHECKPOINTED)
73 #define AMD64_RAW_EVENT_MASK \
74 (X86_RAW_EVENT_MASK | \
76 #define AMD64_RAW_EVENT_MASK_NB \
77 (AMD64_EVENTSEL_EVENT | \
78 ARCH_PERFMON_EVENTSEL_UMASK)
79 #define AMD64_NUM_COUNTERS 4
80 #define AMD64_NUM_COUNTERS_CORE 6
81 #define AMD64_NUM_COUNTERS_NB 4
83 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
84 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
85 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
86 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
87 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
89 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
90 #define ARCH_PERFMON_EVENTS_COUNT 7
92 #define PEBS_DATACFG_MEMINFO BIT_ULL(0)
93 #define PEBS_DATACFG_GP BIT_ULL(1)
94 #define PEBS_DATACFG_XMMS BIT_ULL(2)
95 #define PEBS_DATACFG_LBRS BIT_ULL(3)
96 #define PEBS_DATACFG_LBR_SHIFT 24
99 * Intel "Architectural Performance Monitoring" CPUID
100 * detection/enumeration details:
104 unsigned int version_id
:8;
105 unsigned int num_counters
:8;
106 unsigned int bit_width
:8;
107 unsigned int mask_length
:8;
114 unsigned int no_unhalted_core_cycles
:1;
115 unsigned int no_instructions_retired
:1;
116 unsigned int no_unhalted_reference_cycles
:1;
117 unsigned int no_llc_reference
:1;
118 unsigned int no_llc_misses
:1;
119 unsigned int no_branch_instruction_retired
:1;
120 unsigned int no_branch_misses_retired
:1;
127 unsigned int num_counters_fixed
:5;
128 unsigned int bit_width_fixed
:8;
129 unsigned int reserved
:19;
134 struct x86_pmu_capability
{
137 int num_counters_fixed
;
140 unsigned int events_mask
;
145 * Fixed-purpose performance events:
149 * All 3 fixed-mode PMCs are configured via this single MSR:
151 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
154 * The counts are available in three separate MSRs:
157 /* Instr_Retired.Any: */
158 #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
159 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
161 /* CPU_CLK_Unhalted.Core: */
162 #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
163 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
165 /* CPU_CLK_Unhalted.Ref: */
166 #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
167 #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
168 #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
171 * We model BTS tracing as another fixed-mode PMC.
173 * We choose a value in the middle of the fixed event range, since lower
174 * values are used by actual fixed events and higher values are used
175 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
177 #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
179 #define GLOBAL_STATUS_COND_CHG BIT_ULL(63)
180 #define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(62)
181 #define GLOBAL_STATUS_UNC_OVF BIT_ULL(61)
182 #define GLOBAL_STATUS_ASIF BIT_ULL(60)
183 #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
184 #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58)
185 #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
194 u64 applicable_counters
;
198 struct pebs_meminfo
{
206 u64 flags
, ip
, ax
, cx
, dx
, bx
, sp
, bp
, si
, di
;
207 u64 r8
, r9
, r10
, r11
, r12
, r13
, r14
, r15
;
211 u64 xmm
[16*2]; /* two entries for each register */
214 struct pebs_lbr_entry
{
219 struct pebs_lbr_entry lbr
[0]; /* Variable length */
223 * IBS cpuid feature detection
226 #define IBS_CPUID_FEATURES 0x8000001b
229 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
230 * bit 0 is used to indicate the existence of IBS.
232 #define IBS_CAPS_AVAIL (1U<<0)
233 #define IBS_CAPS_FETCHSAM (1U<<1)
234 #define IBS_CAPS_OPSAM (1U<<2)
235 #define IBS_CAPS_RDWROPCNT (1U<<3)
236 #define IBS_CAPS_OPCNT (1U<<4)
237 #define IBS_CAPS_BRNTRGT (1U<<5)
238 #define IBS_CAPS_OPCNTEXT (1U<<6)
239 #define IBS_CAPS_RIPINVALIDCHK (1U<<7)
240 #define IBS_CAPS_OPBRNFUSE (1U<<8)
241 #define IBS_CAPS_FETCHCTLEXTD (1U<<9)
242 #define IBS_CAPS_OPDATA4 (1U<<10)
244 #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
245 | IBS_CAPS_FETCHSAM \
252 #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
253 #define IBSCTL_LVT_OFFSET_MASK 0x0F
255 /* IBS fetch bits/masks */
256 #define IBS_FETCH_RAND_EN (1ULL<<57)
257 #define IBS_FETCH_VAL (1ULL<<49)
258 #define IBS_FETCH_ENABLE (1ULL<<48)
259 #define IBS_FETCH_CNT 0xFFFF0000ULL
260 #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
264 * The lower 7 bits of the current count are random bits
265 * preloaded by hardware and ignored in software
267 #define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
268 #define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
269 #define IBS_OP_CNT_CTL (1ULL<<19)
270 #define IBS_OP_VAL (1ULL<<18)
271 #define IBS_OP_ENABLE (1ULL<<17)
272 #define IBS_OP_MAX_CNT 0x0000FFFFULL
273 #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
274 #define IBS_RIP_INVALID (1ULL<<38)
276 #ifdef CONFIG_X86_LOCAL_APIC
277 extern u32
get_ibs_caps(void);
279 static inline u32
get_ibs_caps(void) { return 0; }
282 #ifdef CONFIG_PERF_EVENTS
283 extern void perf_events_lapic_init(void);
286 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
287 * unused and ABI specified to be 0, so nobody should care what we do with
290 * EXACT - the IP points to the exact instruction that triggered the
291 * event (HW bugs exempt).
292 * VM - original X86_VM_MASK; see set_linear_ip().
294 #define PERF_EFLAGS_EXACT (1UL << 3)
295 #define PERF_EFLAGS_VM (1UL << 5)
298 struct x86_perf_regs
{
303 extern unsigned long perf_instruction_pointer(struct pt_regs
*regs
);
304 extern unsigned long perf_misc_flags(struct pt_regs
*regs
);
305 #define perf_misc_flags(regs) perf_misc_flags(regs)
307 #include <asm/stacktrace.h>
310 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
311 * and the comment with PERF_EFLAGS_EXACT.
313 #define perf_arch_fetch_caller_regs(regs, __ip) { \
314 (regs)->ip = (__ip); \
315 (regs)->sp = (unsigned long)__builtin_frame_address(0); \
316 (regs)->cs = __KERNEL_CS; \
320 struct perf_guest_switch_msr
{
325 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability
*cap
);
326 extern void perf_check_microcode(void);
327 extern int x86_perf_rdpmc_index(struct perf_event
*event
);
329 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability
*cap
)
331 memset(cap
, 0, sizeof(*cap
));
334 static inline void perf_events_lapic_init(void) { }
335 static inline void perf_check_microcode(void) { }
338 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
339 extern struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
);
341 static inline struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
)
348 #ifdef CONFIG_CPU_SUP_INTEL
349 extern void intel_pt_handle_vmx(int on
);
351 static inline void intel_pt_handle_vmx(int on
)
357 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
358 extern void amd_pmu_enable_virt(void);
359 extern void amd_pmu_disable_virt(void);
361 static inline void amd_pmu_enable_virt(void) { }
362 static inline void amd_pmu_disable_virt(void) { }
365 #define arch_perf_out_copy_user copy_from_user_nmi
367 #endif /* _ASM_X86_PERF_EVENT_H */