1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/bitops.h>
3 #include <linux/types.h>
4 #include <linux/slab.h>
6 #include <asm/cpu_entry_area.h>
7 #include <asm/perf_event.h>
8 #include <asm/tlbflush.h>
12 #include "../perf_event.h"
14 /* Waste a full page so it can be mapped into the cpu_entry_area */
15 DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store
, cpu_debug_store
);
17 /* The size of a BTS record in bytes: */
18 #define BTS_RECORD_SIZE 24
20 #define PEBS_FIXUP_SIZE PAGE_SIZE
23 * pebs_record_32 for p4 and core not supported
25 struct pebs_record_32 {
33 union intel_x86_pebs_dse
{
36 unsigned int ld_dse
:4;
37 unsigned int ld_stlb_miss
:1;
38 unsigned int ld_locked
:1;
39 unsigned int ld_reserved
:26;
42 unsigned int st_l1d_hit
:1;
43 unsigned int st_reserved1
:3;
44 unsigned int st_stlb_miss
:1;
45 unsigned int st_locked
:1;
46 unsigned int st_reserved2
:26;
52 * Map PEBS Load Latency Data Source encodings to generic
53 * memory data source information
55 #define P(a, b) PERF_MEM_S(a, b)
56 #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
57 #define LEVEL(x) P(LVLNUM, x)
58 #define REM P(REMOTE, REMOTE)
59 #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
61 /* Version for Sandy Bridge and later */
62 static u64 pebs_data_source
[] = {
63 P(OP
, LOAD
) | P(LVL
, MISS
) | LEVEL(L3
) | P(SNOOP
, NA
),/* 0x00:ukn L3 */
64 OP_LH
| P(LVL
, L1
) | LEVEL(L1
) | P(SNOOP
, NONE
), /* 0x01: L1 local */
65 OP_LH
| P(LVL
, LFB
) | LEVEL(LFB
) | P(SNOOP
, NONE
), /* 0x02: LFB hit */
66 OP_LH
| P(LVL
, L2
) | LEVEL(L2
) | P(SNOOP
, NONE
), /* 0x03: L2 hit */
67 OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, NONE
), /* 0x04: L3 hit */
68 OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, MISS
), /* 0x05: L3 hit, snoop miss */
69 OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, HIT
), /* 0x06: L3 hit, snoop hit */
70 OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, HITM
), /* 0x07: L3 hit, snoop hitm */
71 OP_LH
| P(LVL
, REM_CCE1
) | REM
| LEVEL(L3
) | P(SNOOP
, HIT
), /* 0x08: L3 miss snoop hit */
72 OP_LH
| P(LVL
, REM_CCE1
) | REM
| LEVEL(L3
) | P(SNOOP
, HITM
), /* 0x09: L3 miss snoop hitm*/
73 OP_LH
| P(LVL
, LOC_RAM
) | LEVEL(RAM
) | P(SNOOP
, HIT
), /* 0x0a: L3 miss, shared */
74 OP_LH
| P(LVL
, REM_RAM1
) | REM
| LEVEL(L3
) | P(SNOOP
, HIT
), /* 0x0b: L3 miss, shared */
75 OP_LH
| P(LVL
, LOC_RAM
) | LEVEL(RAM
) | SNOOP_NONE_MISS
, /* 0x0c: L3 miss, excl */
76 OP_LH
| P(LVL
, REM_RAM1
) | LEVEL(RAM
) | REM
| SNOOP_NONE_MISS
, /* 0x0d: L3 miss, excl */
77 OP_LH
| P(LVL
, IO
) | LEVEL(NA
) | P(SNOOP
, NONE
), /* 0x0e: I/O */
78 OP_LH
| P(LVL
, UNC
) | LEVEL(NA
) | P(SNOOP
, NONE
), /* 0x0f: uncached */
81 /* Patch up minor differences in the bits */
82 void __init
intel_pmu_pebs_data_source_nhm(void)
84 pebs_data_source
[0x05] = OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, HIT
);
85 pebs_data_source
[0x06] = OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, HITM
);
86 pebs_data_source
[0x07] = OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, HITM
);
89 void __init
intel_pmu_pebs_data_source_skl(bool pmem
)
91 u64 pmem_or_l4
= pmem
? LEVEL(PMEM
) : LEVEL(L4
);
93 pebs_data_source
[0x08] = OP_LH
| pmem_or_l4
| P(SNOOP
, HIT
);
94 pebs_data_source
[0x09] = OP_LH
| pmem_or_l4
| REM
| P(SNOOP
, HIT
);
95 pebs_data_source
[0x0b] = OP_LH
| LEVEL(RAM
) | REM
| P(SNOOP
, NONE
);
96 pebs_data_source
[0x0c] = OP_LH
| LEVEL(ANY_CACHE
) | REM
| P(SNOOPX
, FWD
);
97 pebs_data_source
[0x0d] = OP_LH
| LEVEL(ANY_CACHE
) | REM
| P(SNOOP
, HITM
);
100 static u64
precise_store_data(u64 status
)
102 union intel_x86_pebs_dse dse
;
103 u64 val
= P(OP
, STORE
) | P(SNOOP
, NA
) | P(LVL
, L1
) | P(TLB
, L2
);
109 * 1 = stored missed 2nd level TLB
111 * so it either hit the walker or the OS
112 * otherwise hit 2nd level TLB
114 if (dse
.st_stlb_miss
)
120 * bit 0: hit L1 data cache
121 * if not set, then all we know is that
130 * bit 5: Locked prefix
133 val
|= P(LOCK
, LOCKED
);
138 static u64
precise_datala_hsw(struct perf_event
*event
, u64 status
)
140 union perf_mem_data_src dse
;
142 dse
.val
= PERF_MEM_NA
;
144 if (event
->hw
.flags
& PERF_X86_EVENT_PEBS_ST_HSW
)
145 dse
.mem_op
= PERF_MEM_OP_STORE
;
146 else if (event
->hw
.flags
& PERF_X86_EVENT_PEBS_LD_HSW
)
147 dse
.mem_op
= PERF_MEM_OP_LOAD
;
150 * L1 info only valid for following events:
152 * MEM_UOPS_RETIRED.STLB_MISS_STORES
153 * MEM_UOPS_RETIRED.LOCK_STORES
154 * MEM_UOPS_RETIRED.SPLIT_STORES
155 * MEM_UOPS_RETIRED.ALL_STORES
157 if (event
->hw
.flags
& PERF_X86_EVENT_PEBS_ST_HSW
) {
159 dse
.mem_lvl
= PERF_MEM_LVL_L1
| PERF_MEM_LVL_HIT
;
161 dse
.mem_lvl
= PERF_MEM_LVL_L1
| PERF_MEM_LVL_MISS
;
166 static u64
load_latency_data(u64 status
)
168 union intel_x86_pebs_dse dse
;
174 * use the mapping table for bit 0-3
176 val
= pebs_data_source
[dse
.ld_dse
];
179 * Nehalem models do not support TLB, Lock infos
181 if (x86_pmu
.pebs_no_tlb
) {
182 val
|= P(TLB
, NA
) | P(LOCK
, NA
);
187 * 0 = did not miss 2nd level TLB
188 * 1 = missed 2nd level TLB
190 if (dse
.ld_stlb_miss
)
191 val
|= P(TLB
, MISS
) | P(TLB
, L2
);
193 val
|= P(TLB
, HIT
) | P(TLB
, L1
) | P(TLB
, L2
);
196 * bit 5: locked prefix
199 val
|= P(LOCK
, LOCKED
);
204 struct pebs_record_core
{
208 u64 r8
, r9
, r10
, r11
;
209 u64 r12
, r13
, r14
, r15
;
212 struct pebs_record_nhm
{
216 u64 r8
, r9
, r10
, r11
;
217 u64 r12
, r13
, r14
, r15
;
218 u64 status
, dla
, dse
, lat
;
222 * Same as pebs_record_nhm, with two additional fields.
224 struct pebs_record_hsw
{
228 u64 r8
, r9
, r10
, r11
;
229 u64 r12
, r13
, r14
, r15
;
230 u64 status
, dla
, dse
, lat
;
231 u64 real_ip
, tsx_tuning
;
234 union hsw_tsx_tuning
{
236 u32 cycles_last_block
: 32,
239 instruction_abort
: 1,
240 non_instruction_abort
: 1,
249 #define PEBS_HSW_TSX_FLAGS 0xff00000000ULL
251 /* Same as HSW, plus TSC */
253 struct pebs_record_skl
{
257 u64 r8
, r9
, r10
, r11
;
258 u64 r12
, r13
, r14
, r15
;
259 u64 status
, dla
, dse
, lat
;
260 u64 real_ip
, tsx_tuning
;
264 void init_debug_store_on_cpu(int cpu
)
266 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
271 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
,
272 (u32
)((u64
)(unsigned long)ds
),
273 (u32
)((u64
)(unsigned long)ds
>> 32));
276 void fini_debug_store_on_cpu(int cpu
)
278 if (!per_cpu(cpu_hw_events
, cpu
).ds
)
281 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
, 0, 0);
284 static DEFINE_PER_CPU(void *, insn_buffer
);
286 static void ds_update_cea(void *cea
, void *addr
, size_t size
, pgprot_t prot
)
288 unsigned long start
= (unsigned long)cea
;
292 pa
= virt_to_phys(addr
);
295 for (; msz
< size
; msz
+= PAGE_SIZE
, pa
+= PAGE_SIZE
, cea
+= PAGE_SIZE
)
296 cea_set_pte(cea
, pa
, prot
);
299 * This is a cross-CPU update of the cpu_entry_area, we must shoot down
300 * all TLB entries for it.
302 flush_tlb_kernel_range(start
, start
+ size
);
306 static void ds_clear_cea(void *cea
, size_t size
)
308 unsigned long start
= (unsigned long)cea
;
312 for (; msz
< size
; msz
+= PAGE_SIZE
, cea
+= PAGE_SIZE
)
313 cea_set_pte(cea
, 0, PAGE_NONE
);
315 flush_tlb_kernel_range(start
, start
+ size
);
319 static void *dsalloc_pages(size_t size
, gfp_t flags
, int cpu
)
321 unsigned int order
= get_order(size
);
322 int node
= cpu_to_node(cpu
);
325 page
= __alloc_pages_node(node
, flags
| __GFP_ZERO
, order
);
326 return page
? page_address(page
) : NULL
;
329 static void dsfree_pages(const void *buffer
, size_t size
)
332 free_pages((unsigned long)buffer
, get_order(size
));
335 static int alloc_pebs_buffer(int cpu
)
337 struct cpu_hw_events
*hwev
= per_cpu_ptr(&cpu_hw_events
, cpu
);
338 struct debug_store
*ds
= hwev
->ds
;
339 size_t bsiz
= x86_pmu
.pebs_buffer_size
;
340 int max
, node
= cpu_to_node(cpu
);
341 void *buffer
, *insn_buff
, *cea
;
346 buffer
= dsalloc_pages(bsiz
, GFP_KERNEL
, cpu
);
347 if (unlikely(!buffer
))
351 * HSW+ already provides us the eventing ip; no need to allocate this
354 if (x86_pmu
.intel_cap
.pebs_format
< 2) {
355 insn_buff
= kzalloc_node(PEBS_FIXUP_SIZE
, GFP_KERNEL
, node
);
357 dsfree_pages(buffer
, bsiz
);
360 per_cpu(insn_buffer
, cpu
) = insn_buff
;
362 hwev
->ds_pebs_vaddr
= buffer
;
363 /* Update the cpu entry area mapping */
364 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_buffers
.pebs_buffer
;
365 ds
->pebs_buffer_base
= (unsigned long) cea
;
366 ds_update_cea(cea
, buffer
, bsiz
, PAGE_KERNEL
);
367 ds
->pebs_index
= ds
->pebs_buffer_base
;
368 max
= x86_pmu
.pebs_record_size
* (bsiz
/ x86_pmu
.pebs_record_size
);
369 ds
->pebs_absolute_maximum
= ds
->pebs_buffer_base
+ max
;
373 static void release_pebs_buffer(int cpu
)
375 struct cpu_hw_events
*hwev
= per_cpu_ptr(&cpu_hw_events
, cpu
);
381 kfree(per_cpu(insn_buffer
, cpu
));
382 per_cpu(insn_buffer
, cpu
) = NULL
;
384 /* Clear the fixmap */
385 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_buffers
.pebs_buffer
;
386 ds_clear_cea(cea
, x86_pmu
.pebs_buffer_size
);
387 dsfree_pages(hwev
->ds_pebs_vaddr
, x86_pmu
.pebs_buffer_size
);
388 hwev
->ds_pebs_vaddr
= NULL
;
391 static int alloc_bts_buffer(int cpu
)
393 struct cpu_hw_events
*hwev
= per_cpu_ptr(&cpu_hw_events
, cpu
);
394 struct debug_store
*ds
= hwev
->ds
;
401 buffer
= dsalloc_pages(BTS_BUFFER_SIZE
, GFP_KERNEL
| __GFP_NOWARN
, cpu
);
402 if (unlikely(!buffer
)) {
403 WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__
);
406 hwev
->ds_bts_vaddr
= buffer
;
407 /* Update the fixmap */
408 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_buffers
.bts_buffer
;
409 ds
->bts_buffer_base
= (unsigned long) cea
;
410 ds_update_cea(cea
, buffer
, BTS_BUFFER_SIZE
, PAGE_KERNEL
);
411 ds
->bts_index
= ds
->bts_buffer_base
;
412 max
= BTS_BUFFER_SIZE
/ BTS_RECORD_SIZE
;
413 ds
->bts_absolute_maximum
= ds
->bts_buffer_base
+
414 max
* BTS_RECORD_SIZE
;
415 ds
->bts_interrupt_threshold
= ds
->bts_absolute_maximum
-
416 (max
/ 16) * BTS_RECORD_SIZE
;
420 static void release_bts_buffer(int cpu
)
422 struct cpu_hw_events
*hwev
= per_cpu_ptr(&cpu_hw_events
, cpu
);
428 /* Clear the fixmap */
429 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_buffers
.bts_buffer
;
430 ds_clear_cea(cea
, BTS_BUFFER_SIZE
);
431 dsfree_pages(hwev
->ds_bts_vaddr
, BTS_BUFFER_SIZE
);
432 hwev
->ds_bts_vaddr
= NULL
;
435 static int alloc_ds_buffer(int cpu
)
437 struct debug_store
*ds
= &get_cpu_entry_area(cpu
)->cpu_debug_store
;
439 memset(ds
, 0, sizeof(*ds
));
440 per_cpu(cpu_hw_events
, cpu
).ds
= ds
;
444 static void release_ds_buffer(int cpu
)
446 per_cpu(cpu_hw_events
, cpu
).ds
= NULL
;
449 void release_ds_buffers(void)
453 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
456 for_each_possible_cpu(cpu
)
457 release_ds_buffer(cpu
);
459 for_each_possible_cpu(cpu
) {
461 * Again, ignore errors from offline CPUs, they will no longer
462 * observe cpu_hw_events.ds and not program the DS_AREA when
465 fini_debug_store_on_cpu(cpu
);
468 for_each_possible_cpu(cpu
) {
469 release_pebs_buffer(cpu
);
470 release_bts_buffer(cpu
);
474 void reserve_ds_buffers(void)
476 int bts_err
= 0, pebs_err
= 0;
479 x86_pmu
.bts_active
= 0;
480 x86_pmu
.pebs_active
= 0;
482 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
491 for_each_possible_cpu(cpu
) {
492 if (alloc_ds_buffer(cpu
)) {
497 if (!bts_err
&& alloc_bts_buffer(cpu
))
500 if (!pebs_err
&& alloc_pebs_buffer(cpu
))
503 if (bts_err
&& pebs_err
)
508 for_each_possible_cpu(cpu
)
509 release_bts_buffer(cpu
);
513 for_each_possible_cpu(cpu
)
514 release_pebs_buffer(cpu
);
517 if (bts_err
&& pebs_err
) {
518 for_each_possible_cpu(cpu
)
519 release_ds_buffer(cpu
);
521 if (x86_pmu
.bts
&& !bts_err
)
522 x86_pmu
.bts_active
= 1;
524 if (x86_pmu
.pebs
&& !pebs_err
)
525 x86_pmu
.pebs_active
= 1;
527 for_each_possible_cpu(cpu
) {
529 * Ignores wrmsr_on_cpu() errors for offline CPUs they
530 * will get this call through intel_pmu_cpu_starting().
532 init_debug_store_on_cpu(cpu
);
541 struct event_constraint bts_constraint
=
542 EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS
, 0);
544 void intel_pmu_enable_bts(u64 config
)
546 unsigned long debugctlmsr
;
548 debugctlmsr
= get_debugctlmsr();
550 debugctlmsr
|= DEBUGCTLMSR_TR
;
551 debugctlmsr
|= DEBUGCTLMSR_BTS
;
552 if (config
& ARCH_PERFMON_EVENTSEL_INT
)
553 debugctlmsr
|= DEBUGCTLMSR_BTINT
;
555 if (!(config
& ARCH_PERFMON_EVENTSEL_OS
))
556 debugctlmsr
|= DEBUGCTLMSR_BTS_OFF_OS
;
558 if (!(config
& ARCH_PERFMON_EVENTSEL_USR
))
559 debugctlmsr
|= DEBUGCTLMSR_BTS_OFF_USR
;
561 update_debugctlmsr(debugctlmsr
);
564 void intel_pmu_disable_bts(void)
566 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
567 unsigned long debugctlmsr
;
572 debugctlmsr
= get_debugctlmsr();
575 ~(DEBUGCTLMSR_TR
| DEBUGCTLMSR_BTS
| DEBUGCTLMSR_BTINT
|
576 DEBUGCTLMSR_BTS_OFF_OS
| DEBUGCTLMSR_BTS_OFF_USR
);
578 update_debugctlmsr(debugctlmsr
);
581 int intel_pmu_drain_bts_buffer(void)
583 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
584 struct debug_store
*ds
= cpuc
->ds
;
590 struct perf_event
*event
= cpuc
->events
[INTEL_PMC_IDX_FIXED_BTS
];
591 struct bts_record
*at
, *base
, *top
;
592 struct perf_output_handle handle
;
593 struct perf_event_header header
;
594 struct perf_sample_data data
;
595 unsigned long skip
= 0;
601 if (!x86_pmu
.bts_active
)
604 base
= (struct bts_record
*)(unsigned long)ds
->bts_buffer_base
;
605 top
= (struct bts_record
*)(unsigned long)ds
->bts_index
;
610 memset(®s
, 0, sizeof(regs
));
612 ds
->bts_index
= ds
->bts_buffer_base
;
614 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
617 * BTS leaks kernel addresses in branches across the cpl boundary,
618 * such as traps or system calls, so unless the user is asking for
619 * kernel tracing (and right now it's not possible), we'd need to
620 * filter them out. But first we need to count how many of those we
621 * have in the current batch. This is an extra O(n) pass, however,
622 * it's much faster than the other one especially considering that
623 * n <= 2560 (BTS_BUFFER_SIZE / BTS_RECORD_SIZE * 15/16; see the
624 * alloc_bts_buffer()).
626 for (at
= base
; at
< top
; at
++) {
628 * Note that right now *this* BTS code only works if
629 * attr::exclude_kernel is set, but let's keep this extra
630 * check here in case that changes.
632 if (event
->attr
.exclude_kernel
&&
633 (kernel_ip(at
->from
) || kernel_ip(at
->to
)))
638 * Prepare a generic sample, i.e. fill in the invariant fields.
639 * We will overwrite the from and to address before we output
643 perf_prepare_sample(&header
, &data
, event
, ®s
);
645 if (perf_output_begin(&handle
, event
, header
.size
*
646 (top
- base
- skip
)))
649 for (at
= base
; at
< top
; at
++) {
650 /* Filter out any records that contain kernel addresses. */
651 if (event
->attr
.exclude_kernel
&&
652 (kernel_ip(at
->from
) || kernel_ip(at
->to
)))
658 perf_output_sample(&handle
, &header
, &data
, event
);
661 perf_output_end(&handle
);
663 /* There's new data available. */
664 event
->hw
.interrupts
++;
665 event
->pending_kill
= POLL_IN
;
671 static inline void intel_pmu_drain_pebs_buffer(void)
675 x86_pmu
.drain_pebs(®s
);
681 struct event_constraint intel_core2_pebs_event_constraints
[] = {
682 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
683 INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
684 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
685 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
686 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
687 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
688 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
692 struct event_constraint intel_atom_pebs_event_constraints
[] = {
693 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
694 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
695 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
696 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
697 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
698 /* Allow all events as PEBS with no flags */
699 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
703 struct event_constraint intel_slm_pebs_event_constraints
[] = {
704 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
705 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
706 /* Allow all events as PEBS with no flags */
707 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
711 struct event_constraint intel_glm_pebs_event_constraints
[] = {
712 /* Allow all events as PEBS with no flags */
713 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
717 struct event_constraint intel_nehalem_pebs_event_constraints
[] = {
718 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
719 INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
720 INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
721 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
722 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
723 INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
724 INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
725 INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
726 INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
727 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
728 INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
729 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
730 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
734 struct event_constraint intel_westmere_pebs_event_constraints
[] = {
735 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
736 INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
737 INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
738 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
739 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
740 INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
741 INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
742 INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
743 INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
744 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
745 INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
746 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
747 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
751 struct event_constraint intel_snb_pebs_event_constraints
[] = {
752 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
753 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
754 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
755 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
756 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
757 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
758 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
759 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
760 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
761 /* Allow all events as PEBS with no flags */
762 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
766 struct event_constraint intel_ivb_pebs_event_constraints
[] = {
767 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
768 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
769 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
770 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
771 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
772 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
773 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
774 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
775 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
776 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
777 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
778 /* Allow all events as PEBS with no flags */
779 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
783 struct event_constraint intel_hsw_pebs_event_constraints
[] = {
784 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
785 INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
786 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
787 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
788 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
789 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
790 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
791 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
792 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
793 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
794 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
795 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
796 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
797 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
798 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
799 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
800 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
801 /* Allow all events as PEBS with no flags */
802 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
806 struct event_constraint intel_bdw_pebs_event_constraints
[] = {
807 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
808 INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
809 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
810 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
811 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
812 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
813 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
814 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
815 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
816 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
817 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
818 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
819 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
820 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
821 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
822 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
823 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
824 /* Allow all events as PEBS with no flags */
825 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
830 struct event_constraint intel_skl_pebs_event_constraints
[] = {
831 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
832 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
833 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
834 /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
835 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
836 INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */
837 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
838 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
839 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
840 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf), /* MEM_INST_RETIRED.LOCK_STORES */
841 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
842 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
843 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
844 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
845 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
846 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
847 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_L3_MISS_RETIRED.* */
848 /* Allow all events as PEBS with no flags */
849 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
853 struct event_constraint intel_icl_pebs_event_constraints
[] = {
854 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL
), /* INST_RETIRED.PREC_DIST */
855 INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL
), /* SLOTS */
857 INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
858 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */
859 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf), /* MEM_INST_RETIRED.STORE */
861 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */
863 INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
866 * Everything else is handled by PMU_FL_PEBS_ALL, because we
867 * need the full constraints from the main table.
873 struct event_constraint
*intel_pebs_constraints(struct perf_event
*event
)
875 struct event_constraint
*c
;
877 if (!event
->attr
.precise_ip
)
880 if (x86_pmu
.pebs_constraints
) {
881 for_each_event_constraint(c
, x86_pmu
.pebs_constraints
) {
882 if (constraint_match(c
, event
->hw
.config
)) {
883 event
->hw
.flags
|= c
->flags
;
890 * Extended PEBS support
891 * Makes the PEBS code search the normal constraints.
893 if (x86_pmu
.flags
& PMU_FL_PEBS_ALL
)
896 return &emptyconstraint
;
900 * We need the sched_task callback even for per-cpu events when we use
901 * the large interrupt threshold, such that we can provide PID and TID
904 static inline bool pebs_needs_sched_cb(struct cpu_hw_events
*cpuc
)
906 if (cpuc
->n_pebs
== cpuc
->n_pebs_via_pt
)
909 return cpuc
->n_pebs
&& (cpuc
->n_pebs
== cpuc
->n_large_pebs
);
912 void intel_pmu_pebs_sched_task(struct perf_event_context
*ctx
, bool sched_in
)
914 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
916 if (!sched_in
&& pebs_needs_sched_cb(cpuc
))
917 intel_pmu_drain_pebs_buffer();
920 static inline void pebs_update_threshold(struct cpu_hw_events
*cpuc
)
922 struct debug_store
*ds
= cpuc
->ds
;
926 if (cpuc
->n_pebs_via_pt
)
929 if (x86_pmu
.flags
& PMU_FL_PEBS_ALL
)
930 reserved
= x86_pmu
.max_pebs_events
+ x86_pmu
.num_counters_fixed
;
932 reserved
= x86_pmu
.max_pebs_events
;
934 if (cpuc
->n_pebs
== cpuc
->n_large_pebs
) {
935 threshold
= ds
->pebs_absolute_maximum
-
936 reserved
* cpuc
->pebs_record_size
;
938 threshold
= ds
->pebs_buffer_base
+ cpuc
->pebs_record_size
;
941 ds
->pebs_interrupt_threshold
= threshold
;
944 static void adaptive_pebs_record_size_update(void)
946 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
947 u64 pebs_data_cfg
= cpuc
->pebs_data_cfg
;
948 int sz
= sizeof(struct pebs_basic
);
950 if (pebs_data_cfg
& PEBS_DATACFG_MEMINFO
)
951 sz
+= sizeof(struct pebs_meminfo
);
952 if (pebs_data_cfg
& PEBS_DATACFG_GP
)
953 sz
+= sizeof(struct pebs_gprs
);
954 if (pebs_data_cfg
& PEBS_DATACFG_XMMS
)
955 sz
+= sizeof(struct pebs_xmm
);
956 if (pebs_data_cfg
& PEBS_DATACFG_LBRS
)
957 sz
+= x86_pmu
.lbr_nr
* sizeof(struct pebs_lbr_entry
);
959 cpuc
->pebs_record_size
= sz
;
962 #define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC | \
963 PERF_SAMPLE_PHYS_ADDR | PERF_SAMPLE_WEIGHT | \
964 PERF_SAMPLE_TRANSACTION)
966 static u64
pebs_update_adaptive_cfg(struct perf_event
*event
)
968 struct perf_event_attr
*attr
= &event
->attr
;
969 u64 sample_type
= attr
->sample_type
;
970 u64 pebs_data_cfg
= 0;
971 bool gprs
, tsx_weight
;
973 if (!(sample_type
& ~(PERF_SAMPLE_IP
|PERF_SAMPLE_TIME
)) &&
974 attr
->precise_ip
> 1)
975 return pebs_data_cfg
;
977 if (sample_type
& PERF_PEBS_MEMINFO_TYPE
)
978 pebs_data_cfg
|= PEBS_DATACFG_MEMINFO
;
982 * + user requested them
983 * + precise_ip < 2 for the non event IP
984 * + For RTM TSX weight we need GPRs for the abort code.
986 gprs
= (sample_type
& PERF_SAMPLE_REGS_INTR
) &&
987 (attr
->sample_regs_intr
& PEBS_GP_REGS
);
989 tsx_weight
= (sample_type
& PERF_SAMPLE_WEIGHT
) &&
990 ((attr
->config
& INTEL_ARCH_EVENT_MASK
) ==
991 x86_pmu
.rtm_abort_event
);
993 if (gprs
|| (attr
->precise_ip
< 2) || tsx_weight
)
994 pebs_data_cfg
|= PEBS_DATACFG_GP
;
996 if ((sample_type
& PERF_SAMPLE_REGS_INTR
) &&
997 (attr
->sample_regs_intr
& PERF_REG_EXTENDED_MASK
))
998 pebs_data_cfg
|= PEBS_DATACFG_XMMS
;
1000 if (sample_type
& PERF_SAMPLE_BRANCH_STACK
) {
1002 * For now always log all LBRs. Could configure this
1005 pebs_data_cfg
|= PEBS_DATACFG_LBRS
|
1006 ((x86_pmu
.lbr_nr
-1) << PEBS_DATACFG_LBR_SHIFT
);
1009 return pebs_data_cfg
;
1013 pebs_update_state(bool needed_cb
, struct cpu_hw_events
*cpuc
,
1014 struct perf_event
*event
, bool add
)
1016 struct pmu
*pmu
= event
->ctx
->pmu
;
1018 * Make sure we get updated with the first PEBS
1019 * event. It will trigger also during removal, but
1020 * that does not hurt:
1022 bool update
= cpuc
->n_pebs
== 1;
1024 if (needed_cb
!= pebs_needs_sched_cb(cpuc
)) {
1026 perf_sched_cb_inc(pmu
);
1028 perf_sched_cb_dec(pmu
);
1034 * The PEBS record doesn't shrink on pmu::del(). Doing so would require
1035 * iterating all remaining PEBS events to reconstruct the config.
1037 if (x86_pmu
.intel_cap
.pebs_baseline
&& add
) {
1040 /* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
1041 if (cpuc
->n_pebs
== 1) {
1042 cpuc
->pebs_data_cfg
= 0;
1043 cpuc
->pebs_record_size
= sizeof(struct pebs_basic
);
1046 pebs_data_cfg
= pebs_update_adaptive_cfg(event
);
1048 /* Update pebs_record_size if new event requires more data. */
1049 if (pebs_data_cfg
& ~cpuc
->pebs_data_cfg
) {
1050 cpuc
->pebs_data_cfg
|= pebs_data_cfg
;
1051 adaptive_pebs_record_size_update();
1057 pebs_update_threshold(cpuc
);
1060 void intel_pmu_pebs_add(struct perf_event
*event
)
1062 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1063 struct hw_perf_event
*hwc
= &event
->hw
;
1064 bool needed_cb
= pebs_needs_sched_cb(cpuc
);
1067 if (hwc
->flags
& PERF_X86_EVENT_LARGE_PEBS
)
1068 cpuc
->n_large_pebs
++;
1069 if (hwc
->flags
& PERF_X86_EVENT_PEBS_VIA_PT
)
1070 cpuc
->n_pebs_via_pt
++;
1072 pebs_update_state(needed_cb
, cpuc
, event
, true);
1075 static void intel_pmu_pebs_via_pt_disable(struct perf_event
*event
)
1077 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1079 if (!is_pebs_pt(event
))
1082 if (!(cpuc
->pebs_enabled
& ~PEBS_VIA_PT_MASK
))
1083 cpuc
->pebs_enabled
&= ~PEBS_VIA_PT_MASK
;
1086 static void intel_pmu_pebs_via_pt_enable(struct perf_event
*event
)
1088 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1089 struct hw_perf_event
*hwc
= &event
->hw
;
1090 struct debug_store
*ds
= cpuc
->ds
;
1092 if (!is_pebs_pt(event
))
1095 if (!(event
->hw
.flags
& PERF_X86_EVENT_LARGE_PEBS
))
1096 cpuc
->pebs_enabled
|= PEBS_PMI_AFTER_EACH_RECORD
;
1098 cpuc
->pebs_enabled
|= PEBS_OUTPUT_PT
;
1100 wrmsrl(MSR_RELOAD_PMC0
+ hwc
->idx
, ds
->pebs_event_reset
[hwc
->idx
]);
1103 void intel_pmu_pebs_enable(struct perf_event
*event
)
1105 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1106 struct hw_perf_event
*hwc
= &event
->hw
;
1107 struct debug_store
*ds
= cpuc
->ds
;
1109 hwc
->config
&= ~ARCH_PERFMON_EVENTSEL_INT
;
1111 cpuc
->pebs_enabled
|= 1ULL << hwc
->idx
;
1113 if ((event
->hw
.flags
& PERF_X86_EVENT_PEBS_LDLAT
) && (x86_pmu
.version
< 5))
1114 cpuc
->pebs_enabled
|= 1ULL << (hwc
->idx
+ 32);
1115 else if (event
->hw
.flags
& PERF_X86_EVENT_PEBS_ST
)
1116 cpuc
->pebs_enabled
|= 1ULL << 63;
1118 if (x86_pmu
.intel_cap
.pebs_baseline
) {
1119 hwc
->config
|= ICL_EVENTSEL_ADAPTIVE
;
1120 if (cpuc
->pebs_data_cfg
!= cpuc
->active_pebs_data_cfg
) {
1121 wrmsrl(MSR_PEBS_DATA_CFG
, cpuc
->pebs_data_cfg
);
1122 cpuc
->active_pebs_data_cfg
= cpuc
->pebs_data_cfg
;
1127 * Use auto-reload if possible to save a MSR write in the PMI.
1128 * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
1130 if (hwc
->flags
& PERF_X86_EVENT_AUTO_RELOAD
) {
1131 unsigned int idx
= hwc
->idx
;
1133 if (idx
>= INTEL_PMC_IDX_FIXED
)
1134 idx
= MAX_PEBS_EVENTS
+ (idx
- INTEL_PMC_IDX_FIXED
);
1135 ds
->pebs_event_reset
[idx
] =
1136 (u64
)(-hwc
->sample_period
) & x86_pmu
.cntval_mask
;
1138 ds
->pebs_event_reset
[hwc
->idx
] = 0;
1141 intel_pmu_pebs_via_pt_enable(event
);
1144 void intel_pmu_pebs_del(struct perf_event
*event
)
1146 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1147 struct hw_perf_event
*hwc
= &event
->hw
;
1148 bool needed_cb
= pebs_needs_sched_cb(cpuc
);
1151 if (hwc
->flags
& PERF_X86_EVENT_LARGE_PEBS
)
1152 cpuc
->n_large_pebs
--;
1153 if (hwc
->flags
& PERF_X86_EVENT_PEBS_VIA_PT
)
1154 cpuc
->n_pebs_via_pt
--;
1156 pebs_update_state(needed_cb
, cpuc
, event
, false);
1159 void intel_pmu_pebs_disable(struct perf_event
*event
)
1161 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1162 struct hw_perf_event
*hwc
= &event
->hw
;
1164 if (cpuc
->n_pebs
== cpuc
->n_large_pebs
&&
1165 cpuc
->n_pebs
!= cpuc
->n_pebs_via_pt
)
1166 intel_pmu_drain_pebs_buffer();
1168 cpuc
->pebs_enabled
&= ~(1ULL << hwc
->idx
);
1170 if ((event
->hw
.flags
& PERF_X86_EVENT_PEBS_LDLAT
) &&
1171 (x86_pmu
.version
< 5))
1172 cpuc
->pebs_enabled
&= ~(1ULL << (hwc
->idx
+ 32));
1173 else if (event
->hw
.flags
& PERF_X86_EVENT_PEBS_ST
)
1174 cpuc
->pebs_enabled
&= ~(1ULL << 63);
1176 intel_pmu_pebs_via_pt_disable(event
);
1179 wrmsrl(MSR_IA32_PEBS_ENABLE
, cpuc
->pebs_enabled
);
1181 hwc
->config
|= ARCH_PERFMON_EVENTSEL_INT
;
1184 void intel_pmu_pebs_enable_all(void)
1186 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1188 if (cpuc
->pebs_enabled
)
1189 wrmsrl(MSR_IA32_PEBS_ENABLE
, cpuc
->pebs_enabled
);
1192 void intel_pmu_pebs_disable_all(void)
1194 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1196 if (cpuc
->pebs_enabled
)
1197 wrmsrl(MSR_IA32_PEBS_ENABLE
, 0);
1200 static int intel_pmu_pebs_fixup_ip(struct pt_regs
*regs
)
1202 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1203 unsigned long from
= cpuc
->lbr_entries
[0].from
;
1204 unsigned long old_to
, to
= cpuc
->lbr_entries
[0].to
;
1205 unsigned long ip
= regs
->ip
;
1211 * We don't need to fixup if the PEBS assist is fault like
1213 if (!x86_pmu
.intel_cap
.pebs_trap
)
1217 * No LBR entry, no basic block, no rewinding
1219 if (!cpuc
->lbr_stack
.nr
|| !from
|| !to
)
1223 * Basic blocks should never cross user/kernel boundaries
1225 if (kernel_ip(ip
) != kernel_ip(to
))
1229 * unsigned math, either ip is before the start (impossible) or
1230 * the basic block is larger than 1 page (sanity)
1232 if ((ip
- to
) > PEBS_FIXUP_SIZE
)
1236 * We sampled a branch insn, rewind using the LBR stack
1239 set_linear_ip(regs
, from
);
1244 if (!kernel_ip(ip
)) {
1246 u8
*buf
= this_cpu_read(insn_buffer
);
1248 /* 'size' must fit our buffer, see above */
1249 bytes
= copy_from_user_nmi(buf
, (void __user
*)to
, size
);
1263 #ifdef CONFIG_X86_64
1264 is_64bit
= kernel_ip(to
) || !test_thread_flag(TIF_IA32
);
1266 insn_init(&insn
, kaddr
, size
, is_64bit
);
1267 insn_get_length(&insn
);
1269 * Make sure there was not a problem decoding the
1270 * instruction and getting the length. This is
1271 * doubly important because we have an infinite
1272 * loop if insn.length=0.
1278 kaddr
+= insn
.length
;
1279 size
-= insn
.length
;
1283 set_linear_ip(regs
, old_to
);
1288 * Even though we decoded the basic block, the instruction stream
1289 * never matched the given IP, either the TO or the IP got corrupted.
1294 static inline u64
intel_get_tsx_weight(u64 tsx_tuning
)
1297 union hsw_tsx_tuning tsx
= { .value
= tsx_tuning
};
1298 return tsx
.cycles_last_block
;
1303 static inline u64
intel_get_tsx_transaction(u64 tsx_tuning
, u64 ax
)
1305 u64 txn
= (tsx_tuning
& PEBS_HSW_TSX_FLAGS
) >> 32;
1307 /* For RTM XABORTs also log the abort code from AX */
1308 if ((txn
& PERF_TXN_TRANSACTION
) && (ax
& 1))
1309 txn
|= ((ax
>> 24) & 0xff) << PERF_TXN_ABORT_SHIFT
;
1313 static inline u64
get_pebs_status(void *n
)
1315 if (x86_pmu
.intel_cap
.pebs_format
< 4)
1316 return ((struct pebs_record_nhm
*)n
)->status
;
1317 return ((struct pebs_basic
*)n
)->applicable_counters
;
1320 #define PERF_X86_EVENT_PEBS_HSW_PREC \
1321 (PERF_X86_EVENT_PEBS_ST_HSW | \
1322 PERF_X86_EVENT_PEBS_LD_HSW | \
1323 PERF_X86_EVENT_PEBS_NA_HSW)
1325 static u64
get_data_src(struct perf_event
*event
, u64 aux
)
1327 u64 val
= PERF_MEM_NA
;
1328 int fl
= event
->hw
.flags
;
1329 bool fst
= fl
& (PERF_X86_EVENT_PEBS_ST
| PERF_X86_EVENT_PEBS_HSW_PREC
);
1331 if (fl
& PERF_X86_EVENT_PEBS_LDLAT
)
1332 val
= load_latency_data(aux
);
1333 else if (fst
&& (fl
& PERF_X86_EVENT_PEBS_HSW_PREC
))
1334 val
= precise_datala_hsw(event
, aux
);
1336 val
= precise_store_data(aux
);
1340 static void setup_pebs_fixed_sample_data(struct perf_event
*event
,
1341 struct pt_regs
*iregs
, void *__pebs
,
1342 struct perf_sample_data
*data
,
1343 struct pt_regs
*regs
)
1346 * We cast to the biggest pebs_record but are careful not to
1347 * unconditionally access the 'extra' entries.
1349 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1350 struct pebs_record_skl
*pebs
= __pebs
;
1357 sample_type
= event
->attr
.sample_type
;
1358 fll
= event
->hw
.flags
& PERF_X86_EVENT_PEBS_LDLAT
;
1360 perf_sample_data_init(data
, 0, event
->hw
.last_period
);
1362 data
->period
= event
->hw
.last_period
;
1365 * Use latency for weight (only avail with PEBS-LL)
1367 if (fll
&& (sample_type
& PERF_SAMPLE_WEIGHT
))
1368 data
->weight
= pebs
->lat
;
1371 * data.data_src encodes the data source
1373 if (sample_type
& PERF_SAMPLE_DATA_SRC
)
1374 data
->data_src
.val
= get_data_src(event
, pebs
->dse
);
1377 * We must however always use iregs for the unwinder to stay sane; the
1378 * record BP,SP,IP can point into thin air when the record is from a
1379 * previous PMI context or an (I)RET happened between the record and
1382 if (sample_type
& PERF_SAMPLE_CALLCHAIN
)
1383 data
->callchain
= perf_callchain(event
, iregs
);
1386 * We use the interrupt regs as a base because the PEBS record does not
1387 * contain a full regs set, specifically it seems to lack segment
1388 * descriptors, which get used by things like user_mode().
1390 * In the simple case fix up only the IP for PERF_SAMPLE_IP.
1395 * Initialize regs_>flags from PEBS,
1396 * Clear exact bit (which uses x86 EFLAGS Reserved bit 3),
1397 * i.e., do not rely on it being zero:
1399 regs
->flags
= pebs
->flags
& ~PERF_EFLAGS_EXACT
;
1401 if (sample_type
& PERF_SAMPLE_REGS_INTR
) {
1402 regs
->ax
= pebs
->ax
;
1403 regs
->bx
= pebs
->bx
;
1404 regs
->cx
= pebs
->cx
;
1405 regs
->dx
= pebs
->dx
;
1406 regs
->si
= pebs
->si
;
1407 regs
->di
= pebs
->di
;
1409 regs
->bp
= pebs
->bp
;
1410 regs
->sp
= pebs
->sp
;
1412 #ifndef CONFIG_X86_32
1413 regs
->r8
= pebs
->r8
;
1414 regs
->r9
= pebs
->r9
;
1415 regs
->r10
= pebs
->r10
;
1416 regs
->r11
= pebs
->r11
;
1417 regs
->r12
= pebs
->r12
;
1418 regs
->r13
= pebs
->r13
;
1419 regs
->r14
= pebs
->r14
;
1420 regs
->r15
= pebs
->r15
;
1424 if (event
->attr
.precise_ip
> 1) {
1426 * Haswell and later processors have an 'eventing IP'
1427 * (real IP) which fixes the off-by-1 skid in hardware.
1428 * Use it when precise_ip >= 2 :
1430 if (x86_pmu
.intel_cap
.pebs_format
>= 2) {
1431 set_linear_ip(regs
, pebs
->real_ip
);
1432 regs
->flags
|= PERF_EFLAGS_EXACT
;
1434 /* Otherwise, use PEBS off-by-1 IP: */
1435 set_linear_ip(regs
, pebs
->ip
);
1438 * With precise_ip >= 2, try to fix up the off-by-1 IP
1439 * using the LBR. If successful, the fixup function
1440 * corrects regs->ip and calls set_linear_ip() on regs:
1442 if (intel_pmu_pebs_fixup_ip(regs
))
1443 regs
->flags
|= PERF_EFLAGS_EXACT
;
1447 * When precise_ip == 1, return the PEBS off-by-1 IP,
1448 * no fixup attempted:
1450 set_linear_ip(regs
, pebs
->ip
);
1454 if ((sample_type
& (PERF_SAMPLE_ADDR
| PERF_SAMPLE_PHYS_ADDR
)) &&
1455 x86_pmu
.intel_cap
.pebs_format
>= 1)
1456 data
->addr
= pebs
->dla
;
1458 if (x86_pmu
.intel_cap
.pebs_format
>= 2) {
1459 /* Only set the TSX weight when no memory weight. */
1460 if ((sample_type
& PERF_SAMPLE_WEIGHT
) && !fll
)
1461 data
->weight
= intel_get_tsx_weight(pebs
->tsx_tuning
);
1463 if (sample_type
& PERF_SAMPLE_TRANSACTION
)
1464 data
->txn
= intel_get_tsx_transaction(pebs
->tsx_tuning
,
1469 * v3 supplies an accurate time stamp, so we use that
1470 * for the time stamp.
1472 * We can only do this for the default trace clock.
1474 if (x86_pmu
.intel_cap
.pebs_format
>= 3 &&
1475 event
->attr
.use_clockid
== 0)
1476 data
->time
= native_sched_clock_from_tsc(pebs
->tsc
);
1478 if (has_branch_stack(event
))
1479 data
->br_stack
= &cpuc
->lbr_stack
;
1482 static void adaptive_pebs_save_regs(struct pt_regs
*regs
,
1483 struct pebs_gprs
*gprs
)
1485 regs
->ax
= gprs
->ax
;
1486 regs
->bx
= gprs
->bx
;
1487 regs
->cx
= gprs
->cx
;
1488 regs
->dx
= gprs
->dx
;
1489 regs
->si
= gprs
->si
;
1490 regs
->di
= gprs
->di
;
1491 regs
->bp
= gprs
->bp
;
1492 regs
->sp
= gprs
->sp
;
1493 #ifndef CONFIG_X86_32
1494 regs
->r8
= gprs
->r8
;
1495 regs
->r9
= gprs
->r9
;
1496 regs
->r10
= gprs
->r10
;
1497 regs
->r11
= gprs
->r11
;
1498 regs
->r12
= gprs
->r12
;
1499 regs
->r13
= gprs
->r13
;
1500 regs
->r14
= gprs
->r14
;
1501 regs
->r15
= gprs
->r15
;
1506 * With adaptive PEBS the layout depends on what fields are configured.
1509 static void setup_pebs_adaptive_sample_data(struct perf_event
*event
,
1510 struct pt_regs
*iregs
, void *__pebs
,
1511 struct perf_sample_data
*data
,
1512 struct pt_regs
*regs
)
1514 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1515 struct pebs_basic
*basic
= __pebs
;
1516 void *next_record
= basic
+ 1;
1519 struct pebs_meminfo
*meminfo
= NULL
;
1520 struct pebs_gprs
*gprs
= NULL
;
1521 struct x86_perf_regs
*perf_regs
;
1526 perf_regs
= container_of(regs
, struct x86_perf_regs
, regs
);
1527 perf_regs
->xmm_regs
= NULL
;
1529 sample_type
= event
->attr
.sample_type
;
1530 format_size
= basic
->format_size
;
1531 perf_sample_data_init(data
, 0, event
->hw
.last_period
);
1532 data
->period
= event
->hw
.last_period
;
1534 if (event
->attr
.use_clockid
== 0)
1535 data
->time
= native_sched_clock_from_tsc(basic
->tsc
);
1538 * We must however always use iregs for the unwinder to stay sane; the
1539 * record BP,SP,IP can point into thin air when the record is from a
1540 * previous PMI context or an (I)RET happened between the record and
1543 if (sample_type
& PERF_SAMPLE_CALLCHAIN
)
1544 data
->callchain
= perf_callchain(event
, iregs
);
1547 /* The ip in basic is EventingIP */
1548 set_linear_ip(regs
, basic
->ip
);
1549 regs
->flags
= PERF_EFLAGS_EXACT
;
1552 * The record for MEMINFO is in front of GP
1553 * But PERF_SAMPLE_TRANSACTION needs gprs->ax.
1554 * Save the pointer here but process later.
1556 if (format_size
& PEBS_DATACFG_MEMINFO
) {
1557 meminfo
= next_record
;
1558 next_record
= meminfo
+ 1;
1561 if (format_size
& PEBS_DATACFG_GP
) {
1563 next_record
= gprs
+ 1;
1565 if (event
->attr
.precise_ip
< 2) {
1566 set_linear_ip(regs
, gprs
->ip
);
1567 regs
->flags
&= ~PERF_EFLAGS_EXACT
;
1570 if (sample_type
& PERF_SAMPLE_REGS_INTR
)
1571 adaptive_pebs_save_regs(regs
, gprs
);
1574 if (format_size
& PEBS_DATACFG_MEMINFO
) {
1575 if (sample_type
& PERF_SAMPLE_WEIGHT
)
1576 data
->weight
= meminfo
->latency
?:
1577 intel_get_tsx_weight(meminfo
->tsx_tuning
);
1579 if (sample_type
& PERF_SAMPLE_DATA_SRC
)
1580 data
->data_src
.val
= get_data_src(event
, meminfo
->aux
);
1582 if (sample_type
& (PERF_SAMPLE_ADDR
| PERF_SAMPLE_PHYS_ADDR
))
1583 data
->addr
= meminfo
->address
;
1585 if (sample_type
& PERF_SAMPLE_TRANSACTION
)
1586 data
->txn
= intel_get_tsx_transaction(meminfo
->tsx_tuning
,
1587 gprs
? gprs
->ax
: 0);
1590 if (format_size
& PEBS_DATACFG_XMMS
) {
1591 struct pebs_xmm
*xmm
= next_record
;
1593 next_record
= xmm
+ 1;
1594 perf_regs
->xmm_regs
= xmm
->xmm
;
1597 if (format_size
& PEBS_DATACFG_LBRS
) {
1598 struct pebs_lbr
*lbr
= next_record
;
1599 int num_lbr
= ((format_size
>> PEBS_DATACFG_LBR_SHIFT
)
1601 next_record
= next_record
+ num_lbr
*sizeof(struct pebs_lbr_entry
);
1603 if (has_branch_stack(event
)) {
1604 intel_pmu_store_pebs_lbrs(lbr
);
1605 data
->br_stack
= &cpuc
->lbr_stack
;
1609 WARN_ONCE(next_record
!= __pebs
+ (format_size
>> 48),
1610 "PEBS record size %llu, expected %llu, config %llx\n",
1612 (u64
)(next_record
- __pebs
),
1613 basic
->format_size
);
1616 static inline void *
1617 get_next_pebs_record_by_bit(void *base
, void *top
, int bit
)
1619 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1624 * fmt0 does not have a status bitfield (does not use
1625 * perf_record_nhm format)
1627 if (x86_pmu
.intel_cap
.pebs_format
< 1)
1633 for (at
= base
; at
< top
; at
+= cpuc
->pebs_record_size
) {
1634 unsigned long status
= get_pebs_status(at
);
1636 if (test_bit(bit
, (unsigned long *)&status
)) {
1637 /* PEBS v3 has accurate status bits */
1638 if (x86_pmu
.intel_cap
.pebs_format
>= 3)
1641 if (status
== (1 << bit
))
1644 /* clear non-PEBS bit and re-check */
1645 pebs_status
= status
& cpuc
->pebs_enabled
;
1646 pebs_status
&= PEBS_COUNTER_MASK
;
1647 if (pebs_status
== (1 << bit
))
1654 void intel_pmu_auto_reload_read(struct perf_event
*event
)
1656 WARN_ON(!(event
->hw
.flags
& PERF_X86_EVENT_AUTO_RELOAD
));
1658 perf_pmu_disable(event
->pmu
);
1659 intel_pmu_drain_pebs_buffer();
1660 perf_pmu_enable(event
->pmu
);
1664 * Special variant of intel_pmu_save_and_restart() for auto-reload.
1667 intel_pmu_save_and_restart_reload(struct perf_event
*event
, int count
)
1669 struct hw_perf_event
*hwc
= &event
->hw
;
1670 int shift
= 64 - x86_pmu
.cntval_bits
;
1671 u64 period
= hwc
->sample_period
;
1672 u64 prev_raw_count
, new_raw_count
;
1678 * drain_pebs() only happens when the PMU is disabled.
1680 WARN_ON(this_cpu_read(cpu_hw_events
.enabled
));
1682 prev_raw_count
= local64_read(&hwc
->prev_count
);
1683 rdpmcl(hwc
->event_base_rdpmc
, new_raw_count
);
1684 local64_set(&hwc
->prev_count
, new_raw_count
);
1687 * Since the counter increments a negative counter value and
1688 * overflows on the sign switch, giving the interval:
1692 * the difference between two consequtive reads is:
1694 * A) value2 - value1;
1695 * when no overflows have happened in between,
1697 * B) (0 - value1) + (value2 - (-period));
1698 * when one overflow happened in between,
1700 * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period));
1701 * when @n overflows happened in between.
1703 * Here A) is the obvious difference, B) is the extension to the
1704 * discrete interval, where the first term is to the top of the
1705 * interval and the second term is from the bottom of the next
1706 * interval and C) the extension to multiple intervals, where the
1707 * middle term is the whole intervals covered.
1709 * An equivalent of C, by reduction, is:
1711 * value2 - value1 + n * period
1713 new = ((s64
)(new_raw_count
<< shift
) >> shift
);
1714 old
= ((s64
)(prev_raw_count
<< shift
) >> shift
);
1715 local64_add(new - old
+ count
* period
, &event
->count
);
1717 perf_event_update_userpage(event
);
1722 static void __intel_pmu_pebs_event(struct perf_event
*event
,
1723 struct pt_regs
*iregs
,
1724 void *base
, void *top
,
1726 void (*setup_sample
)(struct perf_event
*,
1729 struct perf_sample_data
*,
1732 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1733 struct hw_perf_event
*hwc
= &event
->hw
;
1734 struct perf_sample_data data
;
1735 struct x86_perf_regs perf_regs
;
1736 struct pt_regs
*regs
= &perf_regs
.regs
;
1737 void *at
= get_next_pebs_record_by_bit(base
, top
, bit
);
1739 if (hwc
->flags
& PERF_X86_EVENT_AUTO_RELOAD
) {
1741 * Now, auto-reload is only enabled in fixed period mode.
1742 * The reload value is always hwc->sample_period.
1743 * May need to change it, if auto-reload is enabled in
1746 intel_pmu_save_and_restart_reload(event
, count
);
1747 } else if (!intel_pmu_save_and_restart(event
))
1751 setup_sample(event
, iregs
, at
, &data
, regs
);
1752 perf_event_output(event
, &data
, regs
);
1753 at
+= cpuc
->pebs_record_size
;
1754 at
= get_next_pebs_record_by_bit(at
, top
, bit
);
1758 setup_sample(event
, iregs
, at
, &data
, regs
);
1761 * All but the last records are processed.
1762 * The last one is left to be able to call the overflow handler.
1764 if (perf_event_overflow(event
, &data
, regs
)) {
1765 x86_pmu_stop(event
, 0);
1771 static void intel_pmu_drain_pebs_core(struct pt_regs
*iregs
)
1773 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1774 struct debug_store
*ds
= cpuc
->ds
;
1775 struct perf_event
*event
= cpuc
->events
[0]; /* PMC0 only */
1776 struct pebs_record_core
*at
, *top
;
1779 if (!x86_pmu
.pebs_active
)
1782 at
= (struct pebs_record_core
*)(unsigned long)ds
->pebs_buffer_base
;
1783 top
= (struct pebs_record_core
*)(unsigned long)ds
->pebs_index
;
1786 * Whatever else happens, drain the thing
1788 ds
->pebs_index
= ds
->pebs_buffer_base
;
1790 if (!test_bit(0, cpuc
->active_mask
))
1793 WARN_ON_ONCE(!event
);
1795 if (!event
->attr
.precise_ip
)
1800 if (event
->hw
.flags
& PERF_X86_EVENT_AUTO_RELOAD
)
1801 intel_pmu_save_and_restart_reload(event
, 0);
1805 __intel_pmu_pebs_event(event
, iregs
, at
, top
, 0, n
,
1806 setup_pebs_fixed_sample_data
);
1809 static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events
*cpuc
, int size
)
1811 struct perf_event
*event
;
1815 * The drain_pebs() could be called twice in a short period
1816 * for auto-reload event in pmu::read(). There are no
1817 * overflows have happened in between.
1818 * It needs to call intel_pmu_save_and_restart_reload() to
1819 * update the event->count for this case.
1821 for_each_set_bit(bit
, (unsigned long *)&cpuc
->pebs_enabled
, size
) {
1822 event
= cpuc
->events
[bit
];
1823 if (event
->hw
.flags
& PERF_X86_EVENT_AUTO_RELOAD
)
1824 intel_pmu_save_and_restart_reload(event
, 0);
1828 static void intel_pmu_drain_pebs_nhm(struct pt_regs
*iregs
)
1830 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1831 struct debug_store
*ds
= cpuc
->ds
;
1832 struct perf_event
*event
;
1833 void *base
, *at
, *top
;
1834 short counts
[INTEL_PMC_IDX_FIXED
+ MAX_FIXED_PEBS_EVENTS
] = {};
1835 short error
[INTEL_PMC_IDX_FIXED
+ MAX_FIXED_PEBS_EVENTS
] = {};
1839 if (!x86_pmu
.pebs_active
)
1842 base
= (struct pebs_record_nhm
*)(unsigned long)ds
->pebs_buffer_base
;
1843 top
= (struct pebs_record_nhm
*)(unsigned long)ds
->pebs_index
;
1845 ds
->pebs_index
= ds
->pebs_buffer_base
;
1847 mask
= (1ULL << x86_pmu
.max_pebs_events
) - 1;
1848 size
= x86_pmu
.max_pebs_events
;
1849 if (x86_pmu
.flags
& PMU_FL_PEBS_ALL
) {
1850 mask
|= ((1ULL << x86_pmu
.num_counters_fixed
) - 1) << INTEL_PMC_IDX_FIXED
;
1851 size
= INTEL_PMC_IDX_FIXED
+ x86_pmu
.num_counters_fixed
;
1854 if (unlikely(base
>= top
)) {
1855 intel_pmu_pebs_event_update_no_drain(cpuc
, size
);
1859 for (at
= base
; at
< top
; at
+= x86_pmu
.pebs_record_size
) {
1860 struct pebs_record_nhm
*p
= at
;
1863 pebs_status
= p
->status
& cpuc
->pebs_enabled
;
1864 pebs_status
&= mask
;
1866 /* PEBS v3 has more accurate status bits */
1867 if (x86_pmu
.intel_cap
.pebs_format
>= 3) {
1868 for_each_set_bit(bit
, (unsigned long *)&pebs_status
, size
)
1875 * On some CPUs the PEBS status can be zero when PEBS is
1876 * racing with clearing of GLOBAL_STATUS.
1878 * Normally we would drop that record, but in the
1879 * case when there is only a single active PEBS event
1880 * we can assume it's for that event.
1882 if (!pebs_status
&& cpuc
->pebs_enabled
&&
1883 !(cpuc
->pebs_enabled
& (cpuc
->pebs_enabled
-1)))
1884 pebs_status
= cpuc
->pebs_enabled
;
1886 bit
= find_first_bit((unsigned long *)&pebs_status
,
1887 x86_pmu
.max_pebs_events
);
1888 if (bit
>= x86_pmu
.max_pebs_events
)
1892 * The PEBS hardware does not deal well with the situation
1893 * when events happen near to each other and multiple bits
1894 * are set. But it should happen rarely.
1896 * If these events include one PEBS and multiple non-PEBS
1897 * events, it doesn't impact PEBS record. The record will
1898 * be handled normally. (slow path)
1900 * If these events include two or more PEBS events, the
1901 * records for the events can be collapsed into a single
1902 * one, and it's not possible to reconstruct all events
1903 * that caused the PEBS record. It's called collision.
1904 * If collision happened, the record will be dropped.
1906 if (p
->status
!= (1ULL << bit
)) {
1907 for_each_set_bit(i
, (unsigned long *)&pebs_status
, size
)
1915 for_each_set_bit(bit
, (unsigned long *)&mask
, size
) {
1916 if ((counts
[bit
] == 0) && (error
[bit
] == 0))
1919 event
= cpuc
->events
[bit
];
1920 if (WARN_ON_ONCE(!event
))
1923 if (WARN_ON_ONCE(!event
->attr
.precise_ip
))
1926 /* log dropped samples number */
1928 perf_log_lost_samples(event
, error
[bit
]);
1930 if (perf_event_account_interrupt(event
))
1931 x86_pmu_stop(event
, 0);
1935 __intel_pmu_pebs_event(event
, iregs
, base
,
1936 top
, bit
, counts
[bit
],
1937 setup_pebs_fixed_sample_data
);
1942 static void intel_pmu_drain_pebs_icl(struct pt_regs
*iregs
)
1944 short counts
[INTEL_PMC_IDX_FIXED
+ MAX_FIXED_PEBS_EVENTS
] = {};
1945 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1946 struct debug_store
*ds
= cpuc
->ds
;
1947 struct perf_event
*event
;
1948 void *base
, *at
, *top
;
1952 if (!x86_pmu
.pebs_active
)
1955 base
= (struct pebs_basic
*)(unsigned long)ds
->pebs_buffer_base
;
1956 top
= (struct pebs_basic
*)(unsigned long)ds
->pebs_index
;
1958 ds
->pebs_index
= ds
->pebs_buffer_base
;
1960 mask
= ((1ULL << x86_pmu
.max_pebs_events
) - 1) |
1961 (((1ULL << x86_pmu
.num_counters_fixed
) - 1) << INTEL_PMC_IDX_FIXED
);
1962 size
= INTEL_PMC_IDX_FIXED
+ x86_pmu
.num_counters_fixed
;
1964 if (unlikely(base
>= top
)) {
1965 intel_pmu_pebs_event_update_no_drain(cpuc
, size
);
1969 for (at
= base
; at
< top
; at
+= cpuc
->pebs_record_size
) {
1972 pebs_status
= get_pebs_status(at
) & cpuc
->pebs_enabled
;
1973 pebs_status
&= mask
;
1975 for_each_set_bit(bit
, (unsigned long *)&pebs_status
, size
)
1979 for_each_set_bit(bit
, (unsigned long *)&mask
, size
) {
1980 if (counts
[bit
] == 0)
1983 event
= cpuc
->events
[bit
];
1984 if (WARN_ON_ONCE(!event
))
1987 if (WARN_ON_ONCE(!event
->attr
.precise_ip
))
1990 __intel_pmu_pebs_event(event
, iregs
, base
,
1991 top
, bit
, counts
[bit
],
1992 setup_pebs_adaptive_sample_data
);
1997 * BTS, PEBS probe and setup
2000 void __init
intel_ds_init(void)
2003 * No support for 32bit formats
2005 if (!boot_cpu_has(X86_FEATURE_DTES64
))
2008 x86_pmu
.bts
= boot_cpu_has(X86_FEATURE_BTS
);
2009 x86_pmu
.pebs
= boot_cpu_has(X86_FEATURE_PEBS
);
2010 x86_pmu
.pebs_buffer_size
= PEBS_BUFFER_SIZE
;
2011 if (x86_pmu
.version
<= 4)
2012 x86_pmu
.pebs_no_isolation
= 1;
2015 char pebs_type
= x86_pmu
.intel_cap
.pebs_trap
? '+' : '-';
2016 char *pebs_qual
= "";
2017 int format
= x86_pmu
.intel_cap
.pebs_format
;
2020 x86_pmu
.intel_cap
.pebs_baseline
= 0;
2024 pr_cont("PEBS fmt0%c, ", pebs_type
);
2025 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_core
);
2027 * Using >PAGE_SIZE buffers makes the WRMSR to
2028 * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
2029 * mysteriously hang on Core2.
2031 * As a workaround, we don't do this.
2033 x86_pmu
.pebs_buffer_size
= PAGE_SIZE
;
2034 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_core
;
2038 pr_cont("PEBS fmt1%c, ", pebs_type
);
2039 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_nhm
);
2040 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_nhm
;
2044 pr_cont("PEBS fmt2%c, ", pebs_type
);
2045 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_hsw
);
2046 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_nhm
;
2050 pr_cont("PEBS fmt3%c, ", pebs_type
);
2051 x86_pmu
.pebs_record_size
=
2052 sizeof(struct pebs_record_skl
);
2053 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_nhm
;
2054 x86_pmu
.large_pebs_flags
|= PERF_SAMPLE_TIME
;
2058 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_icl
;
2059 x86_pmu
.pebs_record_size
= sizeof(struct pebs_basic
);
2060 if (x86_pmu
.intel_cap
.pebs_baseline
) {
2061 x86_pmu
.large_pebs_flags
|=
2062 PERF_SAMPLE_BRANCH_STACK
|
2064 x86_pmu
.flags
|= PMU_FL_PEBS_ALL
;
2065 pebs_qual
= "-baseline";
2066 x86_get_pmu()->capabilities
|= PERF_PMU_CAP_EXTENDED_REGS
;
2068 /* Only basic record supported */
2069 x86_pmu
.large_pebs_flags
&=
2070 ~(PERF_SAMPLE_ADDR
|
2072 PERF_SAMPLE_DATA_SRC
|
2073 PERF_SAMPLE_TRANSACTION
|
2074 PERF_SAMPLE_REGS_USER
|
2075 PERF_SAMPLE_REGS_INTR
);
2077 pr_cont("PEBS fmt4%c%s, ", pebs_type
, pebs_qual
);
2079 if (x86_pmu
.intel_cap
.pebs_output_pt_available
) {
2080 pr_cont("PEBS-via-PT, ");
2081 x86_get_pmu()->capabilities
|= PERF_PMU_CAP_AUX_OUTPUT
;
2087 pr_cont("no PEBS fmt%d%c, ", format
, pebs_type
);
2093 void perf_restore_debug_store(void)
2095 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
2097 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
2100 wrmsrl(MSR_IA32_DS_AREA
, (unsigned long)ds
);