2 * Performance events x86 architecture header
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
12 * For licencing details see kernel-base/COPYING
15 #include <linux/perf_event.h>
17 #include <asm/intel_ds.h>
19 /* To enable MSR tracing please use the generic trace points. */
23 * register -------------------------------
24 * | HT | no HT | HT | no HT |
25 *-----------------------------------------
26 * offcore | core | core | cpu | core |
27 * lbr_sel | core | core | cpu | core |
28 * ld_lat | cpu | core | cpu | core |
29 *-----------------------------------------
31 * Given that there is a small number of shared regs,
32 * we can pre-allocate their slot in the per-cpu
33 * per-core reg tables.
36 EXTRA_REG_NONE
= -1, /* not used */
38 EXTRA_REG_RSP_0
= 0, /* offcore_response_0 */
39 EXTRA_REG_RSP_1
= 1, /* offcore_response_1 */
40 EXTRA_REG_LBR
= 2, /* lbr_select */
41 EXTRA_REG_LDLAT
= 3, /* ld_lat_threshold */
42 EXTRA_REG_FE
= 4, /* fe_* */
44 EXTRA_REG_MAX
/* number of entries needed */
47 struct event_constraint
{
49 unsigned long idxmsk
[BITS_TO_LONGS(X86_PMC_IDX_MAX
)];
60 static inline bool constraint_match(struct event_constraint
*c
, u64 ecode
)
62 return ((ecode
& c
->cmask
) - c
->code
) <= (u64
)c
->size
;
66 * struct hw_perf_event.flags flags
68 #define PERF_X86_EVENT_PEBS_LDLAT 0x0001 /* ld+ldlat data address sampling */
69 #define PERF_X86_EVENT_PEBS_ST 0x0002 /* st data address sampling */
70 #define PERF_X86_EVENT_PEBS_ST_HSW 0x0004 /* haswell style datala, store */
71 #define PERF_X86_EVENT_PEBS_LD_HSW 0x0008 /* haswell style datala, load */
72 #define PERF_X86_EVENT_PEBS_NA_HSW 0x0010 /* haswell style datala, unknown */
73 #define PERF_X86_EVENT_EXCL 0x0020 /* HT exclusivity on counter */
74 #define PERF_X86_EVENT_DYNAMIC 0x0040 /* dynamic alloc'd constraint */
75 #define PERF_X86_EVENT_RDPMC_ALLOWED 0x0080 /* grant rdpmc permission */
76 #define PERF_X86_EVENT_EXCL_ACCT 0x0100 /* accounted EXCL event */
77 #define PERF_X86_EVENT_AUTO_RELOAD 0x0200 /* use PEBS auto-reload */
78 #define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */
81 int nb_id
; /* NorthBridge id */
82 int refcnt
; /* reference count */
83 struct perf_event
*owners
[X86_PMC_IDX_MAX
];
84 struct event_constraint event_constraints
[X86_PMC_IDX_MAX
];
87 #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1)
90 * Flags PEBS can handle without an PMI.
92 * TID can only be handled by flushing at context switch.
93 * REGS_USER can be handled for events limited to ring 3.
96 #define LARGE_PEBS_FLAGS \
97 (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
98 PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
99 PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
100 PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
101 PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
104 #define PEBS_GP_REGS \
105 ((1ULL << PERF_REG_X86_AX) | \
106 (1ULL << PERF_REG_X86_BX) | \
107 (1ULL << PERF_REG_X86_CX) | \
108 (1ULL << PERF_REG_X86_DX) | \
109 (1ULL << PERF_REG_X86_DI) | \
110 (1ULL << PERF_REG_X86_SI) | \
111 (1ULL << PERF_REG_X86_SP) | \
112 (1ULL << PERF_REG_X86_BP) | \
113 (1ULL << PERF_REG_X86_IP) | \
114 (1ULL << PERF_REG_X86_FLAGS) | \
115 (1ULL << PERF_REG_X86_R8) | \
116 (1ULL << PERF_REG_X86_R9) | \
117 (1ULL << PERF_REG_X86_R10) | \
118 (1ULL << PERF_REG_X86_R11) | \
119 (1ULL << PERF_REG_X86_R12) | \
120 (1ULL << PERF_REG_X86_R13) | \
121 (1ULL << PERF_REG_X86_R14) | \
122 (1ULL << PERF_REG_X86_R15))
125 * Per register state.
128 raw_spinlock_t lock
; /* per-core: protect structure */
129 u64 config
; /* extra MSR config */
130 u64 reg
; /* extra MSR number */
131 atomic_t ref
; /* reference count */
137 * Used to coordinate shared registers between HT threads or
138 * among events on a single PMU.
140 struct intel_shared_regs
{
141 struct er_account regs
[EXTRA_REG_MAX
];
142 int refcnt
; /* per-core: #HT threads */
143 unsigned core_id
; /* per-core: core id */
146 enum intel_excl_state_type
{
147 INTEL_EXCL_UNUSED
= 0, /* counter is unused */
148 INTEL_EXCL_SHARED
= 1, /* counter can be used by both threads */
149 INTEL_EXCL_EXCLUSIVE
= 2, /* counter can be used by one thread only */
152 struct intel_excl_states
{
153 enum intel_excl_state_type state
[X86_PMC_IDX_MAX
];
154 bool sched_started
; /* true if scheduling has started */
157 struct intel_excl_cntrs
{
160 struct intel_excl_states states
[2];
163 u16 has_exclusive
[2];
164 u32 exclusive_present
;
167 int refcnt
; /* per-core: #HT threads */
168 unsigned core_id
; /* per-core: core id */
171 struct x86_perf_task_context
;
172 #define MAX_LBR_ENTRIES 32
175 X86_PERF_KFREE_SHARED
= 0,
176 X86_PERF_KFREE_EXCL
= 1,
180 struct cpu_hw_events
{
182 * Generic x86 PMC bits
184 struct perf_event
*events
[X86_PMC_IDX_MAX
]; /* in counter order */
185 unsigned long active_mask
[BITS_TO_LONGS(X86_PMC_IDX_MAX
)];
186 unsigned long running
[BITS_TO_LONGS(X86_PMC_IDX_MAX
)];
189 int n_events
; /* the # of events in the below arrays */
190 int n_added
; /* the # last events in the below arrays;
191 they've never been enabled yet */
192 int n_txn
; /* the # last events in the below arrays;
193 added in the current transaction */
194 int assign
[X86_PMC_IDX_MAX
]; /* event to counter assignment */
195 u64 tags
[X86_PMC_IDX_MAX
];
197 struct perf_event
*event_list
[X86_PMC_IDX_MAX
]; /* in enabled order */
198 struct event_constraint
*event_constraint
[X86_PMC_IDX_MAX
];
200 int n_excl
; /* the number of exclusive events */
202 unsigned int txn_flags
;
206 * Intel DebugStore bits
208 struct debug_store
*ds
;
215 /* Current super set of events hardware configuration */
217 u64 active_pebs_data_cfg
;
218 int pebs_record_size
;
225 struct perf_branch_stack lbr_stack
;
226 struct perf_branch_entry lbr_entries
[MAX_LBR_ENTRIES
];
227 struct er_account
*lbr_sel
;
229 struct x86_perf_task_context
*last_task_ctx
;
233 * Intel host/guest exclude bits
235 u64 intel_ctrl_guest_mask
;
236 u64 intel_ctrl_host_mask
;
237 struct perf_guest_switch_msr guest_switch_msrs
[X86_PMC_IDX_MAX
];
240 * Intel checkpoint mask
245 * manage shared (per-core, per-cpu) registers
246 * used on Intel NHM/WSM/SNB
248 struct intel_shared_regs
*shared_regs
;
250 * manage exclusive counter access between hyperthread
252 struct event_constraint
*constraint_list
; /* in enable order */
253 struct intel_excl_cntrs
*excl_cntrs
;
254 int excl_thread_id
; /* 0 or 1 */
257 * SKL TSX_FORCE_ABORT shadow
264 struct amd_nb
*amd_nb
;
265 /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
266 u64 perf_ctr_virt_mask
;
268 void *kfree_on_online
[X86_PERF_KFREE_MAX
];
271 #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \
272 { .idxmsk64 = (n) }, \
281 #define __EVENT_CONSTRAINT(c, n, m, w, o, f) \
282 __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f)
284 #define EVENT_CONSTRAINT(c, n, m) \
285 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
288 * The constraint_match() function only works for 'simple' event codes
289 * and not for extended (AMD64_EVENTSEL_EVENT) events codes.
291 #define EVENT_CONSTRAINT_RANGE(c, e, n, m) \
292 __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0)
294 #define INTEL_EXCLEVT_CONSTRAINT(c, n) \
295 __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
296 0, PERF_X86_EVENT_EXCL)
299 * The overlap flag marks event constraints with overlapping counter
300 * masks. This is the case if the counter mask of such an event is not
301 * a subset of any other counter mask of a constraint with an equal or
302 * higher weight, e.g.:
304 * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
305 * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
306 * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
308 * The event scheduler may not select the correct counter in the first
309 * cycle because it needs to know which subsequent events will be
310 * scheduled. It may fail to schedule the events then. So we set the
311 * overlap flag for such constraints to give the scheduler a hint which
312 * events to select for counter rescheduling.
314 * Care must be taken as the rescheduling algorithm is O(n!) which
315 * will increase scheduling cycles for an over-committed system
316 * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros
317 * and its counter masks must be kept at a minimum.
319 #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
320 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
323 * Constraint on the Event code.
325 #define INTEL_EVENT_CONSTRAINT(c, n) \
326 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
329 * Constraint on a range of Event codes
331 #define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \
332 EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT)
335 * Constraint on the Event code + UMask + fixed-mask
337 * filter mask to validate fixed counter events.
338 * the following filters disqualify for fixed counters:
343 * - in_tx_checkpointed
344 * The other filters are supported by fixed counters.
345 * The any-thread option is supported starting with v3.
347 #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
348 #define FIXED_EVENT_CONSTRAINT(c, n) \
349 EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
352 * Constraint on the Event code + UMask
354 #define INTEL_UEVENT_CONSTRAINT(c, n) \
355 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
357 /* Constraint on specific umask bit only + event */
358 #define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \
359 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
361 /* Like UEVENT_CONSTRAINT, but match flags too */
362 #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \
363 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
365 #define INTEL_EXCLUEVT_CONSTRAINT(c, n) \
366 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
367 HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
369 #define INTEL_PLD_CONSTRAINT(c, n) \
370 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
371 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
373 #define INTEL_PST_CONSTRAINT(c, n) \
374 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
375 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
377 /* Event constraint, but match on all event flags too. */
378 #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
379 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
381 #define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \
382 EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
384 /* Check only flags, but allow all event/umask */
385 #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \
386 EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
388 /* Check flags and event code, and set the HSW store flag */
389 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
390 __EVENT_CONSTRAINT(code, n, \
391 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
392 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
394 /* Check flags and event code, and set the HSW load flag */
395 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
396 __EVENT_CONSTRAINT(code, n, \
397 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
398 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
400 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \
401 __EVENT_CONSTRAINT_RANGE(code, end, n, \
402 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
403 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
405 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
406 __EVENT_CONSTRAINT(code, n, \
407 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
409 PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
411 /* Check flags and event code/umask, and set the HSW store flag */
412 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
413 __EVENT_CONSTRAINT(code, n, \
414 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
415 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
417 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
418 __EVENT_CONSTRAINT(code, n, \
419 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
421 PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
423 /* Check flags and event code/umask, and set the HSW load flag */
424 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
425 __EVENT_CONSTRAINT(code, n, \
426 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
427 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
429 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
430 __EVENT_CONSTRAINT(code, n, \
431 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
433 PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
435 /* Check flags and event code/umask, and set the HSW N/A flag */
436 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
437 __EVENT_CONSTRAINT(code, n, \
438 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
439 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
443 * We define the end marker as having a weight of -1
444 * to enable blacklisting of events using a counter bitmask
445 * of zero and thus a weight of zero.
446 * The end marker has a weight that cannot possibly be
447 * obtained from counting the bits in the bitmask.
449 #define EVENT_CONSTRAINT_END { .weight = -1 }
452 * Check for end marker with weight == -1
454 #define for_each_event_constraint(e, c) \
455 for ((e) = (c); (e)->weight != -1; (e)++)
458 * Extra registers for specific events.
460 * Some events need large masks and require external MSRs.
461 * Those extra MSRs end up being shared for all events on
462 * a PMU and sometimes between PMU of sibling HT threads.
463 * In either case, the kernel needs to handle conflicting
464 * accesses to those extra, shared, regs. The data structure
465 * to manage those registers is stored in cpu_hw_event.
472 int idx
; /* per_xxx->regs[] reg index */
473 bool extra_msr_access
;
476 #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
479 .config_mask = (m), \
480 .valid_mask = (vm), \
481 .idx = EXTRA_REG_##i, \
482 .extra_msr_access = true, \
485 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
486 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
488 #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
489 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
490 ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
492 #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
493 INTEL_UEVENT_EXTRA_REG(c, \
494 MSR_PEBS_LD_LAT_THRESHOLD, \
498 #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
500 union perf_capabilities
{
508 * PMU supports separate counter range for writing
511 u64 full_width_write
:1;
517 struct x86_pmu_quirk
{
518 struct x86_pmu_quirk
*next
;
522 union x86_pmu_config
{
543 #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
546 x86_lbr_exclusive_lbr
,
547 x86_lbr_exclusive_bts
,
548 x86_lbr_exclusive_pt
,
549 x86_lbr_exclusive_max
,
553 * struct x86_pmu - generic x86 pmu
557 * Generic x86 PMC bits
561 int (*handle_irq
)(struct pt_regs
*);
562 void (*disable_all
)(void);
563 void (*enable_all
)(int added
);
564 void (*enable
)(struct perf_event
*);
565 void (*disable
)(struct perf_event
*);
566 void (*add
)(struct perf_event
*);
567 void (*del
)(struct perf_event
*);
568 void (*read
)(struct perf_event
*event
);
569 int (*hw_config
)(struct perf_event
*event
);
570 int (*schedule_events
)(struct cpu_hw_events
*cpuc
, int n
, int *assign
);
573 int (*addr_offset
)(int index
, bool eventsel
);
574 int (*rdpmc_index
)(int index
);
575 u64 (*event_map
)(int);
578 int num_counters_fixed
;
582 unsigned long events_maskl
;
583 unsigned long events_mask
[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT
)];
588 struct event_constraint
*
589 (*get_event_constraints
)(struct cpu_hw_events
*cpuc
,
591 struct perf_event
*event
);
593 void (*put_event_constraints
)(struct cpu_hw_events
*cpuc
,
594 struct perf_event
*event
);
596 void (*start_scheduling
)(struct cpu_hw_events
*cpuc
);
598 void (*commit_scheduling
)(struct cpu_hw_events
*cpuc
, int idx
, int cntr
);
600 void (*stop_scheduling
)(struct cpu_hw_events
*cpuc
);
602 struct event_constraint
*event_constraints
;
603 struct x86_pmu_quirk
*quirks
;
604 int perfctr_second_write
;
605 u64 (*limit_period
)(struct perf_event
*event
, u64 l
);
607 /* PMI handler bits */
608 unsigned int late_ack
:1,
613 int attr_rdpmc_broken
;
615 struct attribute
**format_attrs
;
617 ssize_t (*events_sysfs_show
)(char *page
, u64 config
);
618 const struct attribute_group
**attr_update
;
620 unsigned long attr_freeze_on_smi
;
625 int (*cpu_prepare
)(int cpu
);
626 void (*cpu_starting
)(int cpu
);
627 void (*cpu_dying
)(int cpu
);
628 void (*cpu_dead
)(int cpu
);
630 void (*check_microcode
)(void);
631 void (*sched_task
)(struct perf_event_context
*ctx
,
635 * Intel Arch Perfmon v2+
638 union perf_capabilities intel_cap
;
641 * Intel DebugStore bits
650 pebs_no_isolation
:1;
651 int pebs_record_size
;
652 int pebs_buffer_size
;
654 void (*drain_pebs
)(struct pt_regs
*regs
);
655 struct event_constraint
*pebs_constraints
;
656 void (*pebs_aliases
)(struct perf_event
*event
);
657 unsigned long large_pebs_flags
;
663 unsigned long lbr_tos
, lbr_from
, lbr_to
; /* MSR base regs */
664 int lbr_nr
; /* hardware stack size */
665 u64 lbr_sel_mask
; /* LBR_SELECT valid bits */
666 const int *lbr_sel_map
; /* lbr_select mappings */
667 bool lbr_double_abort
; /* duplicated lbr aborts */
668 bool lbr_pt_coexist
; /* (LBR|BTS) may coexist with PT */
671 * Intel PT/LBR/BTS are exclusive
673 atomic_t lbr_exclusive
[x86_lbr_exclusive_max
];
678 unsigned int amd_nb_constraints
: 1;
681 * Extra registers for events
683 struct extra_reg
*extra_regs
;
687 * Intel host/guest support (KVM)
689 struct perf_guest_switch_msr
*(*guest_get_msrs
)(int *nr
);
692 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
694 int (*check_period
) (struct perf_event
*event
, u64 period
);
697 struct x86_perf_task_context
{
698 u64 lbr_from
[MAX_LBR_ENTRIES
];
699 u64 lbr_to
[MAX_LBR_ENTRIES
];
700 u64 lbr_info
[MAX_LBR_ENTRIES
];
703 int lbr_callstack_users
;
708 #define x86_add_quirk(func_) \
710 static struct x86_pmu_quirk __quirk __initdata = { \
713 __quirk.next = x86_pmu.quirks; \
714 x86_pmu.quirks = &__quirk; \
720 #define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */
721 #define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */
722 #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
723 #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
724 #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */
725 #define PMU_FL_TFA 0x20 /* deal with TSX force abort */
727 #define EVENT_VAR(_id) event_attr_##_id
728 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
730 #define EVENT_ATTR(_name, _id) \
731 static struct perf_pmu_events_attr EVENT_VAR(_id) = { \
732 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
733 .id = PERF_COUNT_HW_##_id, \
737 #define EVENT_ATTR_STR(_name, v, str) \
738 static struct perf_pmu_events_attr event_attr_##v = { \
739 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
744 #define EVENT_ATTR_STR_HT(_name, v, noht, ht) \
745 static struct perf_pmu_events_ht_attr event_attr_##v = { \
746 .attr = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
748 .event_str_noht = noht, \
749 .event_str_ht = ht, \
752 struct pmu
*x86_get_pmu(void);
753 extern struct x86_pmu x86_pmu __read_mostly
;
755 static inline bool x86_pmu_has_lbr_callstack(void)
757 return x86_pmu
.lbr_sel_map
&&
758 x86_pmu
.lbr_sel_map
[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT
] > 0;
761 DECLARE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
);
763 int x86_perf_event_set_period(struct perf_event
*event
);
766 * Generalized hw caching related hw_event table, filled
767 * in on a per model basis. A value of 0 means
768 * 'not supported', -1 means 'hw_event makes no sense on
769 * this CPU', any other value means the raw hw_event
773 #define C(x) PERF_COUNT_HW_CACHE_##x
775 extern u64 __read_mostly hw_cache_event_ids
776 [PERF_COUNT_HW_CACHE_MAX
]
777 [PERF_COUNT_HW_CACHE_OP_MAX
]
778 [PERF_COUNT_HW_CACHE_RESULT_MAX
];
779 extern u64 __read_mostly hw_cache_extra_regs
780 [PERF_COUNT_HW_CACHE_MAX
]
781 [PERF_COUNT_HW_CACHE_OP_MAX
]
782 [PERF_COUNT_HW_CACHE_RESULT_MAX
];
784 u64
x86_perf_event_update(struct perf_event
*event
);
786 static inline unsigned int x86_pmu_config_addr(int index
)
788 return x86_pmu
.eventsel
+ (x86_pmu
.addr_offset
?
789 x86_pmu
.addr_offset(index
, true) : index
);
792 static inline unsigned int x86_pmu_event_addr(int index
)
794 return x86_pmu
.perfctr
+ (x86_pmu
.addr_offset
?
795 x86_pmu
.addr_offset(index
, false) : index
);
798 static inline int x86_pmu_rdpmc_index(int index
)
800 return x86_pmu
.rdpmc_index
? x86_pmu
.rdpmc_index(index
) : index
;
803 int x86_add_exclusive(unsigned int what
);
805 void x86_del_exclusive(unsigned int what
);
807 int x86_reserve_hardware(void);
809 void x86_release_hardware(void);
811 int x86_pmu_max_precise(void);
813 void hw_perf_lbr_event_destroy(struct perf_event
*event
);
815 int x86_setup_perfctr(struct perf_event
*event
);
817 int x86_pmu_hw_config(struct perf_event
*event
);
819 void x86_pmu_disable_all(void);
821 static inline void __x86_pmu_enable_event(struct hw_perf_event
*hwc
,
824 u64 disable_mask
= __this_cpu_read(cpu_hw_events
.perf_ctr_virt_mask
);
826 if (hwc
->extra_reg
.reg
)
827 wrmsrl(hwc
->extra_reg
.reg
, hwc
->extra_reg
.config
);
828 wrmsrl(hwc
->config_base
, (hwc
->config
| enable_mask
) & ~disable_mask
);
831 void x86_pmu_enable_all(int added
);
833 int perf_assign_events(struct event_constraint
**constraints
, int n
,
834 int wmin
, int wmax
, int gpmax
, int *assign
);
835 int x86_schedule_events(struct cpu_hw_events
*cpuc
, int n
, int *assign
);
837 void x86_pmu_stop(struct perf_event
*event
, int flags
);
839 static inline void x86_pmu_disable_event(struct perf_event
*event
)
841 struct hw_perf_event
*hwc
= &event
->hw
;
843 wrmsrl(hwc
->config_base
, hwc
->config
);
846 void x86_pmu_enable_event(struct perf_event
*event
);
848 int x86_pmu_handle_irq(struct pt_regs
*regs
);
850 extern struct event_constraint emptyconstraint
;
852 extern struct event_constraint unconstrained
;
854 static inline bool kernel_ip(unsigned long ip
)
857 return ip
> PAGE_OFFSET
;
864 * Not all PMUs provide the right context information to place the reported IP
865 * into full context. Specifically segment registers are typically not
868 * Assuming the address is a linear address (it is for IBS), we fake the CS and
869 * vm86 mode using the known zero-based code segment and 'fix up' the registers
872 * Intel PEBS/LBR appear to typically provide the effective address, nothing
873 * much we can do about that but pray and treat it like a linear address.
875 static inline void set_linear_ip(struct pt_regs
*regs
, unsigned long ip
)
877 regs
->cs
= kernel_ip(ip
) ? __KERNEL_CS
: __USER_CS
;
878 if (regs
->flags
& X86_VM_MASK
)
879 regs
->flags
^= (PERF_EFLAGS_VM
| X86_VM_MASK
);
883 ssize_t
x86_event_sysfs_show(char *page
, u64 config
, u64 event
);
884 ssize_t
intel_event_sysfs_show(char *page
, u64 config
);
886 ssize_t
events_sysfs_show(struct device
*dev
, struct device_attribute
*attr
,
888 ssize_t
events_ht_sysfs_show(struct device
*dev
, struct device_attribute
*attr
,
891 #ifdef CONFIG_CPU_SUP_AMD
893 int amd_pmu_init(void);
895 #else /* CONFIG_CPU_SUP_AMD */
897 static inline int amd_pmu_init(void)
902 #endif /* CONFIG_CPU_SUP_AMD */
904 #ifdef CONFIG_CPU_SUP_INTEL
906 static inline bool intel_pmu_has_bts_period(struct perf_event
*event
, u64 period
)
908 struct hw_perf_event
*hwc
= &event
->hw
;
909 unsigned int hw_event
, bts_event
;
911 if (event
->attr
.freq
)
914 hw_event
= hwc
->config
& INTEL_ARCH_EVENT_MASK
;
915 bts_event
= x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
917 return hw_event
== bts_event
&& period
== 1;
920 static inline bool intel_pmu_has_bts(struct perf_event
*event
)
922 struct hw_perf_event
*hwc
= &event
->hw
;
924 return intel_pmu_has_bts_period(event
, hwc
->sample_period
);
927 int intel_pmu_save_and_restart(struct perf_event
*event
);
929 struct event_constraint
*
930 x86_get_event_constraints(struct cpu_hw_events
*cpuc
, int idx
,
931 struct perf_event
*event
);
933 extern int intel_cpuc_prepare(struct cpu_hw_events
*cpuc
, int cpu
);
934 extern void intel_cpuc_finish(struct cpu_hw_events
*cpuc
);
936 int intel_pmu_init(void);
938 void init_debug_store_on_cpu(int cpu
);
940 void fini_debug_store_on_cpu(int cpu
);
942 void release_ds_buffers(void);
944 void reserve_ds_buffers(void);
946 extern struct event_constraint bts_constraint
;
948 void intel_pmu_enable_bts(u64 config
);
950 void intel_pmu_disable_bts(void);
952 int intel_pmu_drain_bts_buffer(void);
954 extern struct event_constraint intel_core2_pebs_event_constraints
[];
956 extern struct event_constraint intel_atom_pebs_event_constraints
[];
958 extern struct event_constraint intel_slm_pebs_event_constraints
[];
960 extern struct event_constraint intel_glm_pebs_event_constraints
[];
962 extern struct event_constraint intel_glp_pebs_event_constraints
[];
964 extern struct event_constraint intel_nehalem_pebs_event_constraints
[];
966 extern struct event_constraint intel_westmere_pebs_event_constraints
[];
968 extern struct event_constraint intel_snb_pebs_event_constraints
[];
970 extern struct event_constraint intel_ivb_pebs_event_constraints
[];
972 extern struct event_constraint intel_hsw_pebs_event_constraints
[];
974 extern struct event_constraint intel_bdw_pebs_event_constraints
[];
976 extern struct event_constraint intel_skl_pebs_event_constraints
[];
978 extern struct event_constraint intel_icl_pebs_event_constraints
[];
980 struct event_constraint
*intel_pebs_constraints(struct perf_event
*event
);
982 void intel_pmu_pebs_add(struct perf_event
*event
);
984 void intel_pmu_pebs_del(struct perf_event
*event
);
986 void intel_pmu_pebs_enable(struct perf_event
*event
);
988 void intel_pmu_pebs_disable(struct perf_event
*event
);
990 void intel_pmu_pebs_enable_all(void);
992 void intel_pmu_pebs_disable_all(void);
994 void intel_pmu_pebs_sched_task(struct perf_event_context
*ctx
, bool sched_in
);
996 void intel_pmu_auto_reload_read(struct perf_event
*event
);
998 void intel_pmu_store_pebs_lbrs(struct pebs_lbr
*lbr
);
1000 void intel_ds_init(void);
1002 void intel_pmu_lbr_sched_task(struct perf_event_context
*ctx
, bool sched_in
);
1004 u64
lbr_from_signext_quirk_wr(u64 val
);
1006 void intel_pmu_lbr_reset(void);
1008 void intel_pmu_lbr_add(struct perf_event
*event
);
1010 void intel_pmu_lbr_del(struct perf_event
*event
);
1012 void intel_pmu_lbr_enable_all(bool pmi
);
1014 void intel_pmu_lbr_disable_all(void);
1016 void intel_pmu_lbr_read(void);
1018 void intel_pmu_lbr_init_core(void);
1020 void intel_pmu_lbr_init_nhm(void);
1022 void intel_pmu_lbr_init_atom(void);
1024 void intel_pmu_lbr_init_slm(void);
1026 void intel_pmu_lbr_init_snb(void);
1028 void intel_pmu_lbr_init_hsw(void);
1030 void intel_pmu_lbr_init_skl(void);
1032 void intel_pmu_lbr_init_knl(void);
1034 void intel_pmu_pebs_data_source_nhm(void);
1036 void intel_pmu_pebs_data_source_skl(bool pmem
);
1038 int intel_pmu_setup_lbr_filter(struct perf_event
*event
);
1040 void intel_pt_interrupt(void);
1042 int intel_bts_interrupt(void);
1044 void intel_bts_enable_local(void);
1046 void intel_bts_disable_local(void);
1048 int p4_pmu_init(void);
1050 int p6_pmu_init(void);
1052 int knc_pmu_init(void);
1054 static inline int is_ht_workaround_enabled(void)
1056 return !!(x86_pmu
.flags
& PMU_FL_EXCL_ENABLED
);
1059 #else /* CONFIG_CPU_SUP_INTEL */
1061 static inline void reserve_ds_buffers(void)
1065 static inline void release_ds_buffers(void)
1069 static inline int intel_pmu_init(void)
1074 static inline int intel_cpuc_prepare(struct cpu_hw_events
*cpuc
, int cpu
)
1079 static inline void intel_cpuc_finish(struct cpu_hw_events
*cpuc
)
1083 static inline int is_ht_workaround_enabled(void)
1087 #endif /* CONFIG_CPU_SUP_INTEL */