1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/bitops.h>
3 #include <linux/types.h>
4 #include <linux/slab.h>
6 #include <asm/cpu_entry_area.h>
7 #include <asm/perf_event.h>
8 #include <asm/tlbflush.h>
12 #include "../perf_event.h"
14 /* Waste a full page so it can be mapped into the cpu_entry_area */
15 DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store
, cpu_debug_store
);
17 /* The size of a BTS record in bytes: */
18 #define BTS_RECORD_SIZE 24
20 #define PEBS_FIXUP_SIZE PAGE_SIZE
23 * pebs_record_32 for p4 and core not supported
25 struct pebs_record_32 {
33 union intel_x86_pebs_dse
{
36 unsigned int ld_dse
:4;
37 unsigned int ld_stlb_miss
:1;
38 unsigned int ld_locked
:1;
39 unsigned int ld_reserved
:26;
42 unsigned int st_l1d_hit
:1;
43 unsigned int st_reserved1
:3;
44 unsigned int st_stlb_miss
:1;
45 unsigned int st_locked
:1;
46 unsigned int st_reserved2
:26;
52 * Map PEBS Load Latency Data Source encodings to generic
53 * memory data source information
55 #define P(a, b) PERF_MEM_S(a, b)
56 #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
57 #define LEVEL(x) P(LVLNUM, x)
58 #define REM P(REMOTE, REMOTE)
59 #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
61 /* Version for Sandy Bridge and later */
62 static u64 pebs_data_source
[] = {
63 P(OP
, LOAD
) | P(LVL
, MISS
) | LEVEL(L3
) | P(SNOOP
, NA
),/* 0x00:ukn L3 */
64 OP_LH
| P(LVL
, L1
) | LEVEL(L1
) | P(SNOOP
, NONE
), /* 0x01: L1 local */
65 OP_LH
| P(LVL
, LFB
) | LEVEL(LFB
) | P(SNOOP
, NONE
), /* 0x02: LFB hit */
66 OP_LH
| P(LVL
, L2
) | LEVEL(L2
) | P(SNOOP
, NONE
), /* 0x03: L2 hit */
67 OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, NONE
), /* 0x04: L3 hit */
68 OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, MISS
), /* 0x05: L3 hit, snoop miss */
69 OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, HIT
), /* 0x06: L3 hit, snoop hit */
70 OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, HITM
), /* 0x07: L3 hit, snoop hitm */
71 OP_LH
| P(LVL
, REM_CCE1
) | REM
| LEVEL(L3
) | P(SNOOP
, HIT
), /* 0x08: L3 miss snoop hit */
72 OP_LH
| P(LVL
, REM_CCE1
) | REM
| LEVEL(L3
) | P(SNOOP
, HITM
), /* 0x09: L3 miss snoop hitm*/
73 OP_LH
| P(LVL
, LOC_RAM
) | LEVEL(RAM
) | P(SNOOP
, HIT
), /* 0x0a: L3 miss, shared */
74 OP_LH
| P(LVL
, REM_RAM1
) | REM
| LEVEL(L3
) | P(SNOOP
, HIT
), /* 0x0b: L3 miss, shared */
75 OP_LH
| P(LVL
, LOC_RAM
) | LEVEL(RAM
) | SNOOP_NONE_MISS
, /* 0x0c: L3 miss, excl */
76 OP_LH
| P(LVL
, REM_RAM1
) | LEVEL(RAM
) | REM
| SNOOP_NONE_MISS
, /* 0x0d: L3 miss, excl */
77 OP_LH
| P(LVL
, IO
) | LEVEL(NA
) | P(SNOOP
, NONE
), /* 0x0e: I/O */
78 OP_LH
| P(LVL
, UNC
) | LEVEL(NA
) | P(SNOOP
, NONE
), /* 0x0f: uncached */
81 /* Patch up minor differences in the bits */
82 void __init
intel_pmu_pebs_data_source_nhm(void)
84 pebs_data_source
[0x05] = OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, HIT
);
85 pebs_data_source
[0x06] = OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, HITM
);
86 pebs_data_source
[0x07] = OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, HITM
);
89 void __init
intel_pmu_pebs_data_source_skl(bool pmem
)
91 u64 pmem_or_l4
= pmem
? LEVEL(PMEM
) : LEVEL(L4
);
93 pebs_data_source
[0x08] = OP_LH
| pmem_or_l4
| P(SNOOP
, HIT
);
94 pebs_data_source
[0x09] = OP_LH
| pmem_or_l4
| REM
| P(SNOOP
, HIT
);
95 pebs_data_source
[0x0b] = OP_LH
| LEVEL(RAM
) | REM
| P(SNOOP
, NONE
);
96 pebs_data_source
[0x0c] = OP_LH
| LEVEL(ANY_CACHE
) | REM
| P(SNOOPX
, FWD
);
97 pebs_data_source
[0x0d] = OP_LH
| LEVEL(ANY_CACHE
) | REM
| P(SNOOP
, HITM
);
100 static u64
precise_store_data(u64 status
)
102 union intel_x86_pebs_dse dse
;
103 u64 val
= P(OP
, STORE
) | P(SNOOP
, NA
) | P(LVL
, L1
) | P(TLB
, L2
);
109 * 1 = stored missed 2nd level TLB
111 * so it either hit the walker or the OS
112 * otherwise hit 2nd level TLB
114 if (dse
.st_stlb_miss
)
120 * bit 0: hit L1 data cache
121 * if not set, then all we know is that
130 * bit 5: Locked prefix
133 val
|= P(LOCK
, LOCKED
);
138 static u64
precise_datala_hsw(struct perf_event
*event
, u64 status
)
140 union perf_mem_data_src dse
;
142 dse
.val
= PERF_MEM_NA
;
144 if (event
->hw
.flags
& PERF_X86_EVENT_PEBS_ST_HSW
)
145 dse
.mem_op
= PERF_MEM_OP_STORE
;
146 else if (event
->hw
.flags
& PERF_X86_EVENT_PEBS_LD_HSW
)
147 dse
.mem_op
= PERF_MEM_OP_LOAD
;
150 * L1 info only valid for following events:
152 * MEM_UOPS_RETIRED.STLB_MISS_STORES
153 * MEM_UOPS_RETIRED.LOCK_STORES
154 * MEM_UOPS_RETIRED.SPLIT_STORES
155 * MEM_UOPS_RETIRED.ALL_STORES
157 if (event
->hw
.flags
& PERF_X86_EVENT_PEBS_ST_HSW
) {
159 dse
.mem_lvl
= PERF_MEM_LVL_L1
| PERF_MEM_LVL_HIT
;
161 dse
.mem_lvl
= PERF_MEM_LVL_L1
| PERF_MEM_LVL_MISS
;
166 static u64
load_latency_data(u64 status
)
168 union intel_x86_pebs_dse dse
;
174 * use the mapping table for bit 0-3
176 val
= pebs_data_source
[dse
.ld_dse
];
179 * Nehalem models do not support TLB, Lock infos
181 if (x86_pmu
.pebs_no_tlb
) {
182 val
|= P(TLB
, NA
) | P(LOCK
, NA
);
187 * 0 = did not miss 2nd level TLB
188 * 1 = missed 2nd level TLB
190 if (dse
.ld_stlb_miss
)
191 val
|= P(TLB
, MISS
) | P(TLB
, L2
);
193 val
|= P(TLB
, HIT
) | P(TLB
, L1
) | P(TLB
, L2
);
196 * bit 5: locked prefix
199 val
|= P(LOCK
, LOCKED
);
204 struct pebs_record_core
{
208 u64 r8
, r9
, r10
, r11
;
209 u64 r12
, r13
, r14
, r15
;
212 struct pebs_record_nhm
{
216 u64 r8
, r9
, r10
, r11
;
217 u64 r12
, r13
, r14
, r15
;
218 u64 status
, dla
, dse
, lat
;
222 * Same as pebs_record_nhm, with two additional fields.
224 struct pebs_record_hsw
{
228 u64 r8
, r9
, r10
, r11
;
229 u64 r12
, r13
, r14
, r15
;
230 u64 status
, dla
, dse
, lat
;
231 u64 real_ip
, tsx_tuning
;
234 union hsw_tsx_tuning
{
236 u32 cycles_last_block
: 32,
239 instruction_abort
: 1,
240 non_instruction_abort
: 1,
249 #define PEBS_HSW_TSX_FLAGS 0xff00000000ULL
251 /* Same as HSW, plus TSC */
253 struct pebs_record_skl
{
257 u64 r8
, r9
, r10
, r11
;
258 u64 r12
, r13
, r14
, r15
;
259 u64 status
, dla
, dse
, lat
;
260 u64 real_ip
, tsx_tuning
;
264 void init_debug_store_on_cpu(int cpu
)
266 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
271 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
,
272 (u32
)((u64
)(unsigned long)ds
),
273 (u32
)((u64
)(unsigned long)ds
>> 32));
276 void fini_debug_store_on_cpu(int cpu
)
278 if (!per_cpu(cpu_hw_events
, cpu
).ds
)
281 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
, 0, 0);
284 static DEFINE_PER_CPU(void *, insn_buffer
);
286 static void ds_update_cea(void *cea
, void *addr
, size_t size
, pgprot_t prot
)
288 unsigned long start
= (unsigned long)cea
;
292 pa
= virt_to_phys(addr
);
295 for (; msz
< size
; msz
+= PAGE_SIZE
, pa
+= PAGE_SIZE
, cea
+= PAGE_SIZE
)
296 cea_set_pte(cea
, pa
, prot
);
299 * This is a cross-CPU update of the cpu_entry_area, we must shoot down
300 * all TLB entries for it.
302 flush_tlb_kernel_range(start
, start
+ size
);
306 static void ds_clear_cea(void *cea
, size_t size
)
308 unsigned long start
= (unsigned long)cea
;
312 for (; msz
< size
; msz
+= PAGE_SIZE
, cea
+= PAGE_SIZE
)
313 cea_set_pte(cea
, 0, PAGE_NONE
);
315 flush_tlb_kernel_range(start
, start
+ size
);
319 static void *dsalloc_pages(size_t size
, gfp_t flags
, int cpu
)
321 unsigned int order
= get_order(size
);
322 int node
= cpu_to_node(cpu
);
325 page
= __alloc_pages_node(node
, flags
| __GFP_ZERO
, order
);
326 return page
? page_address(page
) : NULL
;
329 static void dsfree_pages(const void *buffer
, size_t size
)
332 free_pages((unsigned long)buffer
, get_order(size
));
335 static int alloc_pebs_buffer(int cpu
)
337 struct cpu_hw_events
*hwev
= per_cpu_ptr(&cpu_hw_events
, cpu
);
338 struct debug_store
*ds
= hwev
->ds
;
339 size_t bsiz
= x86_pmu
.pebs_buffer_size
;
340 int max
, node
= cpu_to_node(cpu
);
341 void *buffer
, *insn_buff
, *cea
;
346 buffer
= dsalloc_pages(bsiz
, GFP_KERNEL
, cpu
);
347 if (unlikely(!buffer
))
351 * HSW+ already provides us the eventing ip; no need to allocate this
354 if (x86_pmu
.intel_cap
.pebs_format
< 2) {
355 insn_buff
= kzalloc_node(PEBS_FIXUP_SIZE
, GFP_KERNEL
, node
);
357 dsfree_pages(buffer
, bsiz
);
360 per_cpu(insn_buffer
, cpu
) = insn_buff
;
362 hwev
->ds_pebs_vaddr
= buffer
;
363 /* Update the cpu entry area mapping */
364 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_buffers
.pebs_buffer
;
365 ds
->pebs_buffer_base
= (unsigned long) cea
;
366 ds_update_cea(cea
, buffer
, bsiz
, PAGE_KERNEL
);
367 ds
->pebs_index
= ds
->pebs_buffer_base
;
368 max
= x86_pmu
.pebs_record_size
* (bsiz
/ x86_pmu
.pebs_record_size
);
369 ds
->pebs_absolute_maximum
= ds
->pebs_buffer_base
+ max
;
373 static void release_pebs_buffer(int cpu
)
375 struct cpu_hw_events
*hwev
= per_cpu_ptr(&cpu_hw_events
, cpu
);
381 kfree(per_cpu(insn_buffer
, cpu
));
382 per_cpu(insn_buffer
, cpu
) = NULL
;
384 /* Clear the fixmap */
385 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_buffers
.pebs_buffer
;
386 ds_clear_cea(cea
, x86_pmu
.pebs_buffer_size
);
387 dsfree_pages(hwev
->ds_pebs_vaddr
, x86_pmu
.pebs_buffer_size
);
388 hwev
->ds_pebs_vaddr
= NULL
;
391 static int alloc_bts_buffer(int cpu
)
393 struct cpu_hw_events
*hwev
= per_cpu_ptr(&cpu_hw_events
, cpu
);
394 struct debug_store
*ds
= hwev
->ds
;
401 buffer
= dsalloc_pages(BTS_BUFFER_SIZE
, GFP_KERNEL
| __GFP_NOWARN
, cpu
);
402 if (unlikely(!buffer
)) {
403 WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__
);
406 hwev
->ds_bts_vaddr
= buffer
;
407 /* Update the fixmap */
408 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_buffers
.bts_buffer
;
409 ds
->bts_buffer_base
= (unsigned long) cea
;
410 ds_update_cea(cea
, buffer
, BTS_BUFFER_SIZE
, PAGE_KERNEL
);
411 ds
->bts_index
= ds
->bts_buffer_base
;
412 max
= BTS_BUFFER_SIZE
/ BTS_RECORD_SIZE
;
413 ds
->bts_absolute_maximum
= ds
->bts_buffer_base
+
414 max
* BTS_RECORD_SIZE
;
415 ds
->bts_interrupt_threshold
= ds
->bts_absolute_maximum
-
416 (max
/ 16) * BTS_RECORD_SIZE
;
420 static void release_bts_buffer(int cpu
)
422 struct cpu_hw_events
*hwev
= per_cpu_ptr(&cpu_hw_events
, cpu
);
428 /* Clear the fixmap */
429 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_buffers
.bts_buffer
;
430 ds_clear_cea(cea
, BTS_BUFFER_SIZE
);
431 dsfree_pages(hwev
->ds_bts_vaddr
, BTS_BUFFER_SIZE
);
432 hwev
->ds_bts_vaddr
= NULL
;
435 static int alloc_ds_buffer(int cpu
)
437 struct debug_store
*ds
= &get_cpu_entry_area(cpu
)->cpu_debug_store
;
439 memset(ds
, 0, sizeof(*ds
));
440 per_cpu(cpu_hw_events
, cpu
).ds
= ds
;
444 static void release_ds_buffer(int cpu
)
446 per_cpu(cpu_hw_events
, cpu
).ds
= NULL
;
449 void release_ds_buffers(void)
453 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
456 for_each_possible_cpu(cpu
)
457 release_ds_buffer(cpu
);
459 for_each_possible_cpu(cpu
) {
461 * Again, ignore errors from offline CPUs, they will no longer
462 * observe cpu_hw_events.ds and not program the DS_AREA when
465 fini_debug_store_on_cpu(cpu
);
468 for_each_possible_cpu(cpu
) {
469 release_pebs_buffer(cpu
);
470 release_bts_buffer(cpu
);
474 void reserve_ds_buffers(void)
476 int bts_err
= 0, pebs_err
= 0;
479 x86_pmu
.bts_active
= 0;
480 x86_pmu
.pebs_active
= 0;
482 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
491 for_each_possible_cpu(cpu
) {
492 if (alloc_ds_buffer(cpu
)) {
497 if (!bts_err
&& alloc_bts_buffer(cpu
))
500 if (!pebs_err
&& alloc_pebs_buffer(cpu
))
503 if (bts_err
&& pebs_err
)
508 for_each_possible_cpu(cpu
)
509 release_bts_buffer(cpu
);
513 for_each_possible_cpu(cpu
)
514 release_pebs_buffer(cpu
);
517 if (bts_err
&& pebs_err
) {
518 for_each_possible_cpu(cpu
)
519 release_ds_buffer(cpu
);
521 if (x86_pmu
.bts
&& !bts_err
)
522 x86_pmu
.bts_active
= 1;
524 if (x86_pmu
.pebs
&& !pebs_err
)
525 x86_pmu
.pebs_active
= 1;
527 for_each_possible_cpu(cpu
) {
529 * Ignores wrmsr_on_cpu() errors for offline CPUs they
530 * will get this call through intel_pmu_cpu_starting().
532 init_debug_store_on_cpu(cpu
);
541 struct event_constraint bts_constraint
=
542 EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS
, 0);
544 void intel_pmu_enable_bts(u64 config
)
546 unsigned long debugctlmsr
;
548 debugctlmsr
= get_debugctlmsr();
550 debugctlmsr
|= DEBUGCTLMSR_TR
;
551 debugctlmsr
|= DEBUGCTLMSR_BTS
;
552 if (config
& ARCH_PERFMON_EVENTSEL_INT
)
553 debugctlmsr
|= DEBUGCTLMSR_BTINT
;
555 if (!(config
& ARCH_PERFMON_EVENTSEL_OS
))
556 debugctlmsr
|= DEBUGCTLMSR_BTS_OFF_OS
;
558 if (!(config
& ARCH_PERFMON_EVENTSEL_USR
))
559 debugctlmsr
|= DEBUGCTLMSR_BTS_OFF_USR
;
561 update_debugctlmsr(debugctlmsr
);
564 void intel_pmu_disable_bts(void)
566 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
567 unsigned long debugctlmsr
;
572 debugctlmsr
= get_debugctlmsr();
575 ~(DEBUGCTLMSR_TR
| DEBUGCTLMSR_BTS
| DEBUGCTLMSR_BTINT
|
576 DEBUGCTLMSR_BTS_OFF_OS
| DEBUGCTLMSR_BTS_OFF_USR
);
578 update_debugctlmsr(debugctlmsr
);
581 int intel_pmu_drain_bts_buffer(void)
583 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
584 struct debug_store
*ds
= cpuc
->ds
;
590 struct perf_event
*event
= cpuc
->events
[INTEL_PMC_IDX_FIXED_BTS
];
591 struct bts_record
*at
, *base
, *top
;
592 struct perf_output_handle handle
;
593 struct perf_event_header header
;
594 struct perf_sample_data data
;
595 unsigned long skip
= 0;
601 if (!x86_pmu
.bts_active
)
604 base
= (struct bts_record
*)(unsigned long)ds
->bts_buffer_base
;
605 top
= (struct bts_record
*)(unsigned long)ds
->bts_index
;
610 memset(®s
, 0, sizeof(regs
));
612 ds
->bts_index
= ds
->bts_buffer_base
;
614 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
617 * BTS leaks kernel addresses in branches across the cpl boundary,
618 * such as traps or system calls, so unless the user is asking for
619 * kernel tracing (and right now it's not possible), we'd need to
620 * filter them out. But first we need to count how many of those we
621 * have in the current batch. This is an extra O(n) pass, however,
622 * it's much faster than the other one especially considering that
623 * n <= 2560 (BTS_BUFFER_SIZE / BTS_RECORD_SIZE * 15/16; see the
624 * alloc_bts_buffer()).
626 for (at
= base
; at
< top
; at
++) {
628 * Note that right now *this* BTS code only works if
629 * attr::exclude_kernel is set, but let's keep this extra
630 * check here in case that changes.
632 if (event
->attr
.exclude_kernel
&&
633 (kernel_ip(at
->from
) || kernel_ip(at
->to
)))
638 * Prepare a generic sample, i.e. fill in the invariant fields.
639 * We will overwrite the from and to address before we output
643 perf_prepare_sample(&header
, &data
, event
, ®s
);
645 if (perf_output_begin(&handle
, &data
, event
,
646 header
.size
* (top
- base
- skip
)))
649 for (at
= base
; at
< top
; at
++) {
650 /* Filter out any records that contain kernel addresses. */
651 if (event
->attr
.exclude_kernel
&&
652 (kernel_ip(at
->from
) || kernel_ip(at
->to
)))
658 perf_output_sample(&handle
, &header
, &data
, event
);
661 perf_output_end(&handle
);
663 /* There's new data available. */
664 event
->hw
.interrupts
++;
665 event
->pending_kill
= POLL_IN
;
671 static inline void intel_pmu_drain_pebs_buffer(void)
673 struct perf_sample_data data
;
675 x86_pmu
.drain_pebs(NULL
, &data
);
681 struct event_constraint intel_core2_pebs_event_constraints
[] = {
682 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
683 INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
684 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
685 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
686 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
687 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
688 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
692 struct event_constraint intel_atom_pebs_event_constraints
[] = {
693 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
694 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
695 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
696 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
697 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
698 /* Allow all events as PEBS with no flags */
699 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
703 struct event_constraint intel_slm_pebs_event_constraints
[] = {
704 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
705 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
706 /* Allow all events as PEBS with no flags */
707 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
711 struct event_constraint intel_glm_pebs_event_constraints
[] = {
712 /* Allow all events as PEBS with no flags */
713 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
717 struct event_constraint intel_nehalem_pebs_event_constraints
[] = {
718 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
719 INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
720 INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
721 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
722 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
723 INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
724 INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
725 INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
726 INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
727 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
728 INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
729 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
730 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
734 struct event_constraint intel_westmere_pebs_event_constraints
[] = {
735 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
736 INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
737 INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
738 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
739 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
740 INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
741 INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
742 INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
743 INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
744 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
745 INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
746 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
747 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
751 struct event_constraint intel_snb_pebs_event_constraints
[] = {
752 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
753 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
754 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
755 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
756 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
757 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
758 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
759 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
760 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
761 /* Allow all events as PEBS with no flags */
762 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
766 struct event_constraint intel_ivb_pebs_event_constraints
[] = {
767 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
768 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
769 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
770 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
771 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
772 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
773 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
774 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
775 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
776 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
777 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
778 /* Allow all events as PEBS with no flags */
779 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
783 struct event_constraint intel_hsw_pebs_event_constraints
[] = {
784 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
785 INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
786 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
787 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
788 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
789 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
790 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
791 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
792 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
793 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
794 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
795 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
796 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
797 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
798 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
799 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
800 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
801 /* Allow all events as PEBS with no flags */
802 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
806 struct event_constraint intel_bdw_pebs_event_constraints
[] = {
807 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
808 INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
809 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
810 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
811 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
812 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
813 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
814 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
815 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
816 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
817 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
818 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
819 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
820 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
821 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
822 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
823 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
824 /* Allow all events as PEBS with no flags */
825 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
830 struct event_constraint intel_skl_pebs_event_constraints
[] = {
831 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
832 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
833 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
834 /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
835 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
836 INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */
837 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
838 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
839 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
840 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf), /* MEM_INST_RETIRED.LOCK_STORES */
841 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
842 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
843 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
844 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
845 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
846 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
847 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_L3_MISS_RETIRED.* */
848 /* Allow all events as PEBS with no flags */
849 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
853 struct event_constraint intel_icl_pebs_event_constraints
[] = {
854 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL
), /* INST_RETIRED.PREC_DIST */
855 INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL
), /* SLOTS */
857 INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
858 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */
859 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf), /* MEM_INST_RETIRED.STORE */
861 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */
863 INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
866 * Everything else is handled by PMU_FL_PEBS_ALL, because we
867 * need the full constraints from the main table.
873 struct event_constraint
*intel_pebs_constraints(struct perf_event
*event
)
875 struct event_constraint
*c
;
877 if (!event
->attr
.precise_ip
)
880 if (x86_pmu
.pebs_constraints
) {
881 for_each_event_constraint(c
, x86_pmu
.pebs_constraints
) {
882 if (constraint_match(c
, event
->hw
.config
)) {
883 event
->hw
.flags
|= c
->flags
;
890 * Extended PEBS support
891 * Makes the PEBS code search the normal constraints.
893 if (x86_pmu
.flags
& PMU_FL_PEBS_ALL
)
896 return &emptyconstraint
;
900 * We need the sched_task callback even for per-cpu events when we use
901 * the large interrupt threshold, such that we can provide PID and TID
904 static inline bool pebs_needs_sched_cb(struct cpu_hw_events
*cpuc
)
906 if (cpuc
->n_pebs
== cpuc
->n_pebs_via_pt
)
909 return cpuc
->n_pebs
&& (cpuc
->n_pebs
== cpuc
->n_large_pebs
);
912 void intel_pmu_pebs_sched_task(struct perf_event_context
*ctx
, bool sched_in
)
914 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
916 if (!sched_in
&& pebs_needs_sched_cb(cpuc
))
917 intel_pmu_drain_pebs_buffer();
920 static inline void pebs_update_threshold(struct cpu_hw_events
*cpuc
)
922 struct debug_store
*ds
= cpuc
->ds
;
926 if (cpuc
->n_pebs_via_pt
)
929 if (x86_pmu
.flags
& PMU_FL_PEBS_ALL
)
930 reserved
= x86_pmu
.max_pebs_events
+ x86_pmu
.num_counters_fixed
;
932 reserved
= x86_pmu
.max_pebs_events
;
934 if (cpuc
->n_pebs
== cpuc
->n_large_pebs
) {
935 threshold
= ds
->pebs_absolute_maximum
-
936 reserved
* cpuc
->pebs_record_size
;
938 threshold
= ds
->pebs_buffer_base
+ cpuc
->pebs_record_size
;
941 ds
->pebs_interrupt_threshold
= threshold
;
944 static void adaptive_pebs_record_size_update(void)
946 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
947 u64 pebs_data_cfg
= cpuc
->pebs_data_cfg
;
948 int sz
= sizeof(struct pebs_basic
);
950 if (pebs_data_cfg
& PEBS_DATACFG_MEMINFO
)
951 sz
+= sizeof(struct pebs_meminfo
);
952 if (pebs_data_cfg
& PEBS_DATACFG_GP
)
953 sz
+= sizeof(struct pebs_gprs
);
954 if (pebs_data_cfg
& PEBS_DATACFG_XMMS
)
955 sz
+= sizeof(struct pebs_xmm
);
956 if (pebs_data_cfg
& PEBS_DATACFG_LBRS
)
957 sz
+= x86_pmu
.lbr_nr
* sizeof(struct lbr_entry
);
959 cpuc
->pebs_record_size
= sz
;
962 #define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC | \
963 PERF_SAMPLE_PHYS_ADDR | PERF_SAMPLE_WEIGHT | \
964 PERF_SAMPLE_TRANSACTION | \
965 PERF_SAMPLE_DATA_PAGE_SIZE)
967 static u64
pebs_update_adaptive_cfg(struct perf_event
*event
)
969 struct perf_event_attr
*attr
= &event
->attr
;
970 u64 sample_type
= attr
->sample_type
;
971 u64 pebs_data_cfg
= 0;
972 bool gprs
, tsx_weight
;
974 if (!(sample_type
& ~(PERF_SAMPLE_IP
|PERF_SAMPLE_TIME
)) &&
975 attr
->precise_ip
> 1)
976 return pebs_data_cfg
;
978 if (sample_type
& PERF_PEBS_MEMINFO_TYPE
)
979 pebs_data_cfg
|= PEBS_DATACFG_MEMINFO
;
983 * + user requested them
984 * + precise_ip < 2 for the non event IP
985 * + For RTM TSX weight we need GPRs for the abort code.
987 gprs
= (sample_type
& PERF_SAMPLE_REGS_INTR
) &&
988 (attr
->sample_regs_intr
& PEBS_GP_REGS
);
990 tsx_weight
= (sample_type
& PERF_SAMPLE_WEIGHT
) &&
991 ((attr
->config
& INTEL_ARCH_EVENT_MASK
) ==
992 x86_pmu
.rtm_abort_event
);
994 if (gprs
|| (attr
->precise_ip
< 2) || tsx_weight
)
995 pebs_data_cfg
|= PEBS_DATACFG_GP
;
997 if ((sample_type
& PERF_SAMPLE_REGS_INTR
) &&
998 (attr
->sample_regs_intr
& PERF_REG_EXTENDED_MASK
))
999 pebs_data_cfg
|= PEBS_DATACFG_XMMS
;
1001 if (sample_type
& PERF_SAMPLE_BRANCH_STACK
) {
1003 * For now always log all LBRs. Could configure this
1006 pebs_data_cfg
|= PEBS_DATACFG_LBRS
|
1007 ((x86_pmu
.lbr_nr
-1) << PEBS_DATACFG_LBR_SHIFT
);
1010 return pebs_data_cfg
;
1014 pebs_update_state(bool needed_cb
, struct cpu_hw_events
*cpuc
,
1015 struct perf_event
*event
, bool add
)
1017 struct pmu
*pmu
= event
->ctx
->pmu
;
1019 * Make sure we get updated with the first PEBS
1020 * event. It will trigger also during removal, but
1021 * that does not hurt:
1023 bool update
= cpuc
->n_pebs
== 1;
1025 if (needed_cb
!= pebs_needs_sched_cb(cpuc
)) {
1027 perf_sched_cb_inc(pmu
);
1029 perf_sched_cb_dec(pmu
);
1035 * The PEBS record doesn't shrink on pmu::del(). Doing so would require
1036 * iterating all remaining PEBS events to reconstruct the config.
1038 if (x86_pmu
.intel_cap
.pebs_baseline
&& add
) {
1041 /* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
1042 if (cpuc
->n_pebs
== 1) {
1043 cpuc
->pebs_data_cfg
= 0;
1044 cpuc
->pebs_record_size
= sizeof(struct pebs_basic
);
1047 pebs_data_cfg
= pebs_update_adaptive_cfg(event
);
1049 /* Update pebs_record_size if new event requires more data. */
1050 if (pebs_data_cfg
& ~cpuc
->pebs_data_cfg
) {
1051 cpuc
->pebs_data_cfg
|= pebs_data_cfg
;
1052 adaptive_pebs_record_size_update();
1058 pebs_update_threshold(cpuc
);
1061 void intel_pmu_pebs_add(struct perf_event
*event
)
1063 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1064 struct hw_perf_event
*hwc
= &event
->hw
;
1065 bool needed_cb
= pebs_needs_sched_cb(cpuc
);
1068 if (hwc
->flags
& PERF_X86_EVENT_LARGE_PEBS
)
1069 cpuc
->n_large_pebs
++;
1070 if (hwc
->flags
& PERF_X86_EVENT_PEBS_VIA_PT
)
1071 cpuc
->n_pebs_via_pt
++;
1073 pebs_update_state(needed_cb
, cpuc
, event
, true);
1076 static void intel_pmu_pebs_via_pt_disable(struct perf_event
*event
)
1078 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1080 if (!is_pebs_pt(event
))
1083 if (!(cpuc
->pebs_enabled
& ~PEBS_VIA_PT_MASK
))
1084 cpuc
->pebs_enabled
&= ~PEBS_VIA_PT_MASK
;
1087 static void intel_pmu_pebs_via_pt_enable(struct perf_event
*event
)
1089 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1090 struct hw_perf_event
*hwc
= &event
->hw
;
1091 struct debug_store
*ds
= cpuc
->ds
;
1093 if (!is_pebs_pt(event
))
1096 if (!(event
->hw
.flags
& PERF_X86_EVENT_LARGE_PEBS
))
1097 cpuc
->pebs_enabled
|= PEBS_PMI_AFTER_EACH_RECORD
;
1099 cpuc
->pebs_enabled
|= PEBS_OUTPUT_PT
;
1101 wrmsrl(MSR_RELOAD_PMC0
+ hwc
->idx
, ds
->pebs_event_reset
[hwc
->idx
]);
1104 void intel_pmu_pebs_enable(struct perf_event
*event
)
1106 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1107 struct hw_perf_event
*hwc
= &event
->hw
;
1108 struct debug_store
*ds
= cpuc
->ds
;
1110 hwc
->config
&= ~ARCH_PERFMON_EVENTSEL_INT
;
1112 cpuc
->pebs_enabled
|= 1ULL << hwc
->idx
;
1114 if ((event
->hw
.flags
& PERF_X86_EVENT_PEBS_LDLAT
) && (x86_pmu
.version
< 5))
1115 cpuc
->pebs_enabled
|= 1ULL << (hwc
->idx
+ 32);
1116 else if (event
->hw
.flags
& PERF_X86_EVENT_PEBS_ST
)
1117 cpuc
->pebs_enabled
|= 1ULL << 63;
1119 if (x86_pmu
.intel_cap
.pebs_baseline
) {
1120 hwc
->config
|= ICL_EVENTSEL_ADAPTIVE
;
1121 if (cpuc
->pebs_data_cfg
!= cpuc
->active_pebs_data_cfg
) {
1122 wrmsrl(MSR_PEBS_DATA_CFG
, cpuc
->pebs_data_cfg
);
1123 cpuc
->active_pebs_data_cfg
= cpuc
->pebs_data_cfg
;
1128 * Use auto-reload if possible to save a MSR write in the PMI.
1129 * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
1131 if (hwc
->flags
& PERF_X86_EVENT_AUTO_RELOAD
) {
1132 unsigned int idx
= hwc
->idx
;
1134 if (idx
>= INTEL_PMC_IDX_FIXED
)
1135 idx
= MAX_PEBS_EVENTS
+ (idx
- INTEL_PMC_IDX_FIXED
);
1136 ds
->pebs_event_reset
[idx
] =
1137 (u64
)(-hwc
->sample_period
) & x86_pmu
.cntval_mask
;
1139 ds
->pebs_event_reset
[hwc
->idx
] = 0;
1142 intel_pmu_pebs_via_pt_enable(event
);
1145 void intel_pmu_pebs_del(struct perf_event
*event
)
1147 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1148 struct hw_perf_event
*hwc
= &event
->hw
;
1149 bool needed_cb
= pebs_needs_sched_cb(cpuc
);
1152 if (hwc
->flags
& PERF_X86_EVENT_LARGE_PEBS
)
1153 cpuc
->n_large_pebs
--;
1154 if (hwc
->flags
& PERF_X86_EVENT_PEBS_VIA_PT
)
1155 cpuc
->n_pebs_via_pt
--;
1157 pebs_update_state(needed_cb
, cpuc
, event
, false);
1160 void intel_pmu_pebs_disable(struct perf_event
*event
)
1162 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1163 struct hw_perf_event
*hwc
= &event
->hw
;
1165 if (cpuc
->n_pebs
== cpuc
->n_large_pebs
&&
1166 cpuc
->n_pebs
!= cpuc
->n_pebs_via_pt
)
1167 intel_pmu_drain_pebs_buffer();
1169 cpuc
->pebs_enabled
&= ~(1ULL << hwc
->idx
);
1171 if ((event
->hw
.flags
& PERF_X86_EVENT_PEBS_LDLAT
) &&
1172 (x86_pmu
.version
< 5))
1173 cpuc
->pebs_enabled
&= ~(1ULL << (hwc
->idx
+ 32));
1174 else if (event
->hw
.flags
& PERF_X86_EVENT_PEBS_ST
)
1175 cpuc
->pebs_enabled
&= ~(1ULL << 63);
1177 intel_pmu_pebs_via_pt_disable(event
);
1180 wrmsrl(MSR_IA32_PEBS_ENABLE
, cpuc
->pebs_enabled
);
1182 hwc
->config
|= ARCH_PERFMON_EVENTSEL_INT
;
1185 void intel_pmu_pebs_enable_all(void)
1187 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1189 if (cpuc
->pebs_enabled
)
1190 wrmsrl(MSR_IA32_PEBS_ENABLE
, cpuc
->pebs_enabled
);
1193 void intel_pmu_pebs_disable_all(void)
1195 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1197 if (cpuc
->pebs_enabled
)
1198 wrmsrl(MSR_IA32_PEBS_ENABLE
, 0);
1201 static int intel_pmu_pebs_fixup_ip(struct pt_regs
*regs
)
1203 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1204 unsigned long from
= cpuc
->lbr_entries
[0].from
;
1205 unsigned long old_to
, to
= cpuc
->lbr_entries
[0].to
;
1206 unsigned long ip
= regs
->ip
;
1212 * We don't need to fixup if the PEBS assist is fault like
1214 if (!x86_pmu
.intel_cap
.pebs_trap
)
1218 * No LBR entry, no basic block, no rewinding
1220 if (!cpuc
->lbr_stack
.nr
|| !from
|| !to
)
1224 * Basic blocks should never cross user/kernel boundaries
1226 if (kernel_ip(ip
) != kernel_ip(to
))
1230 * unsigned math, either ip is before the start (impossible) or
1231 * the basic block is larger than 1 page (sanity)
1233 if ((ip
- to
) > PEBS_FIXUP_SIZE
)
1237 * We sampled a branch insn, rewind using the LBR stack
1240 set_linear_ip(regs
, from
);
1245 if (!kernel_ip(ip
)) {
1247 u8
*buf
= this_cpu_read(insn_buffer
);
1249 /* 'size' must fit our buffer, see above */
1250 bytes
= copy_from_user_nmi(buf
, (void __user
*)to
, size
);
1264 #ifdef CONFIG_X86_64
1265 is_64bit
= kernel_ip(to
) || any_64bit_mode(regs
);
1267 insn_init(&insn
, kaddr
, size
, is_64bit
);
1268 insn_get_length(&insn
);
1270 * Make sure there was not a problem decoding the
1271 * instruction and getting the length. This is
1272 * doubly important because we have an infinite
1273 * loop if insn.length=0.
1279 kaddr
+= insn
.length
;
1280 size
-= insn
.length
;
1284 set_linear_ip(regs
, old_to
);
1289 * Even though we decoded the basic block, the instruction stream
1290 * never matched the given IP, either the TO or the IP got corrupted.
1295 static inline u64
intel_get_tsx_weight(u64 tsx_tuning
)
1298 union hsw_tsx_tuning tsx
= { .value
= tsx_tuning
};
1299 return tsx
.cycles_last_block
;
1304 static inline u64
intel_get_tsx_transaction(u64 tsx_tuning
, u64 ax
)
1306 u64 txn
= (tsx_tuning
& PEBS_HSW_TSX_FLAGS
) >> 32;
1308 /* For RTM XABORTs also log the abort code from AX */
1309 if ((txn
& PERF_TXN_TRANSACTION
) && (ax
& 1))
1310 txn
|= ((ax
>> 24) & 0xff) << PERF_TXN_ABORT_SHIFT
;
1314 static inline u64
get_pebs_status(void *n
)
1316 if (x86_pmu
.intel_cap
.pebs_format
< 4)
1317 return ((struct pebs_record_nhm
*)n
)->status
;
1318 return ((struct pebs_basic
*)n
)->applicable_counters
;
1321 #define PERF_X86_EVENT_PEBS_HSW_PREC \
1322 (PERF_X86_EVENT_PEBS_ST_HSW | \
1323 PERF_X86_EVENT_PEBS_LD_HSW | \
1324 PERF_X86_EVENT_PEBS_NA_HSW)
1326 static u64
get_data_src(struct perf_event
*event
, u64 aux
)
1328 u64 val
= PERF_MEM_NA
;
1329 int fl
= event
->hw
.flags
;
1330 bool fst
= fl
& (PERF_X86_EVENT_PEBS_ST
| PERF_X86_EVENT_PEBS_HSW_PREC
);
1332 if (fl
& PERF_X86_EVENT_PEBS_LDLAT
)
1333 val
= load_latency_data(aux
);
1334 else if (fst
&& (fl
& PERF_X86_EVENT_PEBS_HSW_PREC
))
1335 val
= precise_datala_hsw(event
, aux
);
1337 val
= precise_store_data(aux
);
1341 #define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \
1342 PERF_SAMPLE_PHYS_ADDR | \
1343 PERF_SAMPLE_DATA_PAGE_SIZE)
1345 static void setup_pebs_fixed_sample_data(struct perf_event
*event
,
1346 struct pt_regs
*iregs
, void *__pebs
,
1347 struct perf_sample_data
*data
,
1348 struct pt_regs
*regs
)
1351 * We cast to the biggest pebs_record but are careful not to
1352 * unconditionally access the 'extra' entries.
1354 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1355 struct pebs_record_skl
*pebs
= __pebs
;
1362 sample_type
= event
->attr
.sample_type
;
1363 fll
= event
->hw
.flags
& PERF_X86_EVENT_PEBS_LDLAT
;
1365 perf_sample_data_init(data
, 0, event
->hw
.last_period
);
1367 data
->period
= event
->hw
.last_period
;
1370 * Use latency for weight (only avail with PEBS-LL)
1372 if (fll
&& (sample_type
& PERF_SAMPLE_WEIGHT
))
1373 data
->weight
= pebs
->lat
;
1376 * data.data_src encodes the data source
1378 if (sample_type
& PERF_SAMPLE_DATA_SRC
)
1379 data
->data_src
.val
= get_data_src(event
, pebs
->dse
);
1382 * We must however always use iregs for the unwinder to stay sane; the
1383 * record BP,SP,IP can point into thin air when the record is from a
1384 * previous PMI context or an (I)RET happened between the record and
1387 if (sample_type
& PERF_SAMPLE_CALLCHAIN
)
1388 data
->callchain
= perf_callchain(event
, iregs
);
1391 * We use the interrupt regs as a base because the PEBS record does not
1392 * contain a full regs set, specifically it seems to lack segment
1393 * descriptors, which get used by things like user_mode().
1395 * In the simple case fix up only the IP for PERF_SAMPLE_IP.
1400 * Initialize regs_>flags from PEBS,
1401 * Clear exact bit (which uses x86 EFLAGS Reserved bit 3),
1402 * i.e., do not rely on it being zero:
1404 regs
->flags
= pebs
->flags
& ~PERF_EFLAGS_EXACT
;
1406 if (sample_type
& PERF_SAMPLE_REGS_INTR
) {
1407 regs
->ax
= pebs
->ax
;
1408 regs
->bx
= pebs
->bx
;
1409 regs
->cx
= pebs
->cx
;
1410 regs
->dx
= pebs
->dx
;
1411 regs
->si
= pebs
->si
;
1412 regs
->di
= pebs
->di
;
1414 regs
->bp
= pebs
->bp
;
1415 regs
->sp
= pebs
->sp
;
1417 #ifndef CONFIG_X86_32
1418 regs
->r8
= pebs
->r8
;
1419 regs
->r9
= pebs
->r9
;
1420 regs
->r10
= pebs
->r10
;
1421 regs
->r11
= pebs
->r11
;
1422 regs
->r12
= pebs
->r12
;
1423 regs
->r13
= pebs
->r13
;
1424 regs
->r14
= pebs
->r14
;
1425 regs
->r15
= pebs
->r15
;
1429 if (event
->attr
.precise_ip
> 1) {
1431 * Haswell and later processors have an 'eventing IP'
1432 * (real IP) which fixes the off-by-1 skid in hardware.
1433 * Use it when precise_ip >= 2 :
1435 if (x86_pmu
.intel_cap
.pebs_format
>= 2) {
1436 set_linear_ip(regs
, pebs
->real_ip
);
1437 regs
->flags
|= PERF_EFLAGS_EXACT
;
1439 /* Otherwise, use PEBS off-by-1 IP: */
1440 set_linear_ip(regs
, pebs
->ip
);
1443 * With precise_ip >= 2, try to fix up the off-by-1 IP
1444 * using the LBR. If successful, the fixup function
1445 * corrects regs->ip and calls set_linear_ip() on regs:
1447 if (intel_pmu_pebs_fixup_ip(regs
))
1448 regs
->flags
|= PERF_EFLAGS_EXACT
;
1452 * When precise_ip == 1, return the PEBS off-by-1 IP,
1453 * no fixup attempted:
1455 set_linear_ip(regs
, pebs
->ip
);
1459 if ((sample_type
& PERF_SAMPLE_ADDR_TYPE
) &&
1460 x86_pmu
.intel_cap
.pebs_format
>= 1)
1461 data
->addr
= pebs
->dla
;
1463 if (x86_pmu
.intel_cap
.pebs_format
>= 2) {
1464 /* Only set the TSX weight when no memory weight. */
1465 if ((sample_type
& PERF_SAMPLE_WEIGHT
) && !fll
)
1466 data
->weight
= intel_get_tsx_weight(pebs
->tsx_tuning
);
1468 if (sample_type
& PERF_SAMPLE_TRANSACTION
)
1469 data
->txn
= intel_get_tsx_transaction(pebs
->tsx_tuning
,
1474 * v3 supplies an accurate time stamp, so we use that
1475 * for the time stamp.
1477 * We can only do this for the default trace clock.
1479 if (x86_pmu
.intel_cap
.pebs_format
>= 3 &&
1480 event
->attr
.use_clockid
== 0)
1481 data
->time
= native_sched_clock_from_tsc(pebs
->tsc
);
1483 if (has_branch_stack(event
))
1484 data
->br_stack
= &cpuc
->lbr_stack
;
1487 static void adaptive_pebs_save_regs(struct pt_regs
*regs
,
1488 struct pebs_gprs
*gprs
)
1490 regs
->ax
= gprs
->ax
;
1491 regs
->bx
= gprs
->bx
;
1492 regs
->cx
= gprs
->cx
;
1493 regs
->dx
= gprs
->dx
;
1494 regs
->si
= gprs
->si
;
1495 regs
->di
= gprs
->di
;
1496 regs
->bp
= gprs
->bp
;
1497 regs
->sp
= gprs
->sp
;
1498 #ifndef CONFIG_X86_32
1499 regs
->r8
= gprs
->r8
;
1500 regs
->r9
= gprs
->r9
;
1501 regs
->r10
= gprs
->r10
;
1502 regs
->r11
= gprs
->r11
;
1503 regs
->r12
= gprs
->r12
;
1504 regs
->r13
= gprs
->r13
;
1505 regs
->r14
= gprs
->r14
;
1506 regs
->r15
= gprs
->r15
;
1511 * With adaptive PEBS the layout depends on what fields are configured.
1514 static void setup_pebs_adaptive_sample_data(struct perf_event
*event
,
1515 struct pt_regs
*iregs
, void *__pebs
,
1516 struct perf_sample_data
*data
,
1517 struct pt_regs
*regs
)
1519 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1520 struct pebs_basic
*basic
= __pebs
;
1521 void *next_record
= basic
+ 1;
1524 struct pebs_meminfo
*meminfo
= NULL
;
1525 struct pebs_gprs
*gprs
= NULL
;
1526 struct x86_perf_regs
*perf_regs
;
1531 perf_regs
= container_of(regs
, struct x86_perf_regs
, regs
);
1532 perf_regs
->xmm_regs
= NULL
;
1534 sample_type
= event
->attr
.sample_type
;
1535 format_size
= basic
->format_size
;
1536 perf_sample_data_init(data
, 0, event
->hw
.last_period
);
1537 data
->period
= event
->hw
.last_period
;
1539 if (event
->attr
.use_clockid
== 0)
1540 data
->time
= native_sched_clock_from_tsc(basic
->tsc
);
1543 * We must however always use iregs for the unwinder to stay sane; the
1544 * record BP,SP,IP can point into thin air when the record is from a
1545 * previous PMI context or an (I)RET happened between the record and
1548 if (sample_type
& PERF_SAMPLE_CALLCHAIN
)
1549 data
->callchain
= perf_callchain(event
, iregs
);
1552 /* The ip in basic is EventingIP */
1553 set_linear_ip(regs
, basic
->ip
);
1554 regs
->flags
= PERF_EFLAGS_EXACT
;
1557 * The record for MEMINFO is in front of GP
1558 * But PERF_SAMPLE_TRANSACTION needs gprs->ax.
1559 * Save the pointer here but process later.
1561 if (format_size
& PEBS_DATACFG_MEMINFO
) {
1562 meminfo
= next_record
;
1563 next_record
= meminfo
+ 1;
1566 if (format_size
& PEBS_DATACFG_GP
) {
1568 next_record
= gprs
+ 1;
1570 if (event
->attr
.precise_ip
< 2) {
1571 set_linear_ip(regs
, gprs
->ip
);
1572 regs
->flags
&= ~PERF_EFLAGS_EXACT
;
1575 if (sample_type
& PERF_SAMPLE_REGS_INTR
)
1576 adaptive_pebs_save_regs(regs
, gprs
);
1579 if (format_size
& PEBS_DATACFG_MEMINFO
) {
1580 if (sample_type
& PERF_SAMPLE_WEIGHT
)
1581 data
->weight
= meminfo
->latency
?:
1582 intel_get_tsx_weight(meminfo
->tsx_tuning
);
1584 if (sample_type
& PERF_SAMPLE_DATA_SRC
)
1585 data
->data_src
.val
= get_data_src(event
, meminfo
->aux
);
1587 if (sample_type
& PERF_SAMPLE_ADDR_TYPE
)
1588 data
->addr
= meminfo
->address
;
1590 if (sample_type
& PERF_SAMPLE_TRANSACTION
)
1591 data
->txn
= intel_get_tsx_transaction(meminfo
->tsx_tuning
,
1592 gprs
? gprs
->ax
: 0);
1595 if (format_size
& PEBS_DATACFG_XMMS
) {
1596 struct pebs_xmm
*xmm
= next_record
;
1598 next_record
= xmm
+ 1;
1599 perf_regs
->xmm_regs
= xmm
->xmm
;
1602 if (format_size
& PEBS_DATACFG_LBRS
) {
1603 struct lbr_entry
*lbr
= next_record
;
1604 int num_lbr
= ((format_size
>> PEBS_DATACFG_LBR_SHIFT
)
1606 next_record
= next_record
+ num_lbr
* sizeof(struct lbr_entry
);
1608 if (has_branch_stack(event
)) {
1609 intel_pmu_store_pebs_lbrs(lbr
);
1610 data
->br_stack
= &cpuc
->lbr_stack
;
1614 WARN_ONCE(next_record
!= __pebs
+ (format_size
>> 48),
1615 "PEBS record size %llu, expected %llu, config %llx\n",
1617 (u64
)(next_record
- __pebs
),
1618 basic
->format_size
);
1621 static inline void *
1622 get_next_pebs_record_by_bit(void *base
, void *top
, int bit
)
1624 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1629 * fmt0 does not have a status bitfield (does not use
1630 * perf_record_nhm format)
1632 if (x86_pmu
.intel_cap
.pebs_format
< 1)
1638 for (at
= base
; at
< top
; at
+= cpuc
->pebs_record_size
) {
1639 unsigned long status
= get_pebs_status(at
);
1641 if (test_bit(bit
, (unsigned long *)&status
)) {
1642 /* PEBS v3 has accurate status bits */
1643 if (x86_pmu
.intel_cap
.pebs_format
>= 3)
1646 if (status
== (1 << bit
))
1649 /* clear non-PEBS bit and re-check */
1650 pebs_status
= status
& cpuc
->pebs_enabled
;
1651 pebs_status
&= PEBS_COUNTER_MASK
;
1652 if (pebs_status
== (1 << bit
))
1659 void intel_pmu_auto_reload_read(struct perf_event
*event
)
1661 WARN_ON(!(event
->hw
.flags
& PERF_X86_EVENT_AUTO_RELOAD
));
1663 perf_pmu_disable(event
->pmu
);
1664 intel_pmu_drain_pebs_buffer();
1665 perf_pmu_enable(event
->pmu
);
1669 * Special variant of intel_pmu_save_and_restart() for auto-reload.
1672 intel_pmu_save_and_restart_reload(struct perf_event
*event
, int count
)
1674 struct hw_perf_event
*hwc
= &event
->hw
;
1675 int shift
= 64 - x86_pmu
.cntval_bits
;
1676 u64 period
= hwc
->sample_period
;
1677 u64 prev_raw_count
, new_raw_count
;
1683 * drain_pebs() only happens when the PMU is disabled.
1685 WARN_ON(this_cpu_read(cpu_hw_events
.enabled
));
1687 prev_raw_count
= local64_read(&hwc
->prev_count
);
1688 rdpmcl(hwc
->event_base_rdpmc
, new_raw_count
);
1689 local64_set(&hwc
->prev_count
, new_raw_count
);
1692 * Since the counter increments a negative counter value and
1693 * overflows on the sign switch, giving the interval:
1697 * the difference between two consequtive reads is:
1699 * A) value2 - value1;
1700 * when no overflows have happened in between,
1702 * B) (0 - value1) + (value2 - (-period));
1703 * when one overflow happened in between,
1705 * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period));
1706 * when @n overflows happened in between.
1708 * Here A) is the obvious difference, B) is the extension to the
1709 * discrete interval, where the first term is to the top of the
1710 * interval and the second term is from the bottom of the next
1711 * interval and C) the extension to multiple intervals, where the
1712 * middle term is the whole intervals covered.
1714 * An equivalent of C, by reduction, is:
1716 * value2 - value1 + n * period
1718 new = ((s64
)(new_raw_count
<< shift
) >> shift
);
1719 old
= ((s64
)(prev_raw_count
<< shift
) >> shift
);
1720 local64_add(new - old
+ count
* period
, &event
->count
);
1722 local64_set(&hwc
->period_left
, -new);
1724 perf_event_update_userpage(event
);
1729 static __always_inline
void
1730 __intel_pmu_pebs_event(struct perf_event
*event
,
1731 struct pt_regs
*iregs
,
1732 struct perf_sample_data
*data
,
1733 void *base
, void *top
,
1735 void (*setup_sample
)(struct perf_event
*,
1738 struct perf_sample_data
*,
1741 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1742 struct hw_perf_event
*hwc
= &event
->hw
;
1743 struct x86_perf_regs perf_regs
;
1744 struct pt_regs
*regs
= &perf_regs
.regs
;
1745 void *at
= get_next_pebs_record_by_bit(base
, top
, bit
);
1746 static struct pt_regs dummy_iregs
;
1748 if (hwc
->flags
& PERF_X86_EVENT_AUTO_RELOAD
) {
1750 * Now, auto-reload is only enabled in fixed period mode.
1751 * The reload value is always hwc->sample_period.
1752 * May need to change it, if auto-reload is enabled in
1755 intel_pmu_save_and_restart_reload(event
, count
);
1756 } else if (!intel_pmu_save_and_restart(event
))
1760 iregs
= &dummy_iregs
;
1763 setup_sample(event
, iregs
, at
, data
, regs
);
1764 perf_event_output(event
, data
, regs
);
1765 at
+= cpuc
->pebs_record_size
;
1766 at
= get_next_pebs_record_by_bit(at
, top
, bit
);
1770 setup_sample(event
, iregs
, at
, data
, regs
);
1771 if (iregs
== &dummy_iregs
) {
1773 * The PEBS records may be drained in the non-overflow context,
1774 * e.g., large PEBS + context switch. Perf should treat the
1775 * last record the same as other PEBS records, and doesn't
1776 * invoke the generic overflow handler.
1778 perf_event_output(event
, data
, regs
);
1781 * All but the last records are processed.
1782 * The last one is left to be able to call the overflow handler.
1784 if (perf_event_overflow(event
, data
, regs
))
1785 x86_pmu_stop(event
, 0);
1789 static void intel_pmu_drain_pebs_core(struct pt_regs
*iregs
, struct perf_sample_data
*data
)
1791 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1792 struct debug_store
*ds
= cpuc
->ds
;
1793 struct perf_event
*event
= cpuc
->events
[0]; /* PMC0 only */
1794 struct pebs_record_core
*at
, *top
;
1797 if (!x86_pmu
.pebs_active
)
1800 at
= (struct pebs_record_core
*)(unsigned long)ds
->pebs_buffer_base
;
1801 top
= (struct pebs_record_core
*)(unsigned long)ds
->pebs_index
;
1804 * Whatever else happens, drain the thing
1806 ds
->pebs_index
= ds
->pebs_buffer_base
;
1808 if (!test_bit(0, cpuc
->active_mask
))
1811 WARN_ON_ONCE(!event
);
1813 if (!event
->attr
.precise_ip
)
1818 if (event
->hw
.flags
& PERF_X86_EVENT_AUTO_RELOAD
)
1819 intel_pmu_save_and_restart_reload(event
, 0);
1823 __intel_pmu_pebs_event(event
, iregs
, data
, at
, top
, 0, n
,
1824 setup_pebs_fixed_sample_data
);
1827 static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events
*cpuc
, int size
)
1829 struct perf_event
*event
;
1833 * The drain_pebs() could be called twice in a short period
1834 * for auto-reload event in pmu::read(). There are no
1835 * overflows have happened in between.
1836 * It needs to call intel_pmu_save_and_restart_reload() to
1837 * update the event->count for this case.
1839 for_each_set_bit(bit
, (unsigned long *)&cpuc
->pebs_enabled
, size
) {
1840 event
= cpuc
->events
[bit
];
1841 if (event
->hw
.flags
& PERF_X86_EVENT_AUTO_RELOAD
)
1842 intel_pmu_save_and_restart_reload(event
, 0);
1846 static void intel_pmu_drain_pebs_nhm(struct pt_regs
*iregs
, struct perf_sample_data
*data
)
1848 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1849 struct debug_store
*ds
= cpuc
->ds
;
1850 struct perf_event
*event
;
1851 void *base
, *at
, *top
;
1852 short counts
[INTEL_PMC_IDX_FIXED
+ MAX_FIXED_PEBS_EVENTS
] = {};
1853 short error
[INTEL_PMC_IDX_FIXED
+ MAX_FIXED_PEBS_EVENTS
] = {};
1857 if (!x86_pmu
.pebs_active
)
1860 base
= (struct pebs_record_nhm
*)(unsigned long)ds
->pebs_buffer_base
;
1861 top
= (struct pebs_record_nhm
*)(unsigned long)ds
->pebs_index
;
1863 ds
->pebs_index
= ds
->pebs_buffer_base
;
1865 mask
= (1ULL << x86_pmu
.max_pebs_events
) - 1;
1866 size
= x86_pmu
.max_pebs_events
;
1867 if (x86_pmu
.flags
& PMU_FL_PEBS_ALL
) {
1868 mask
|= ((1ULL << x86_pmu
.num_counters_fixed
) - 1) << INTEL_PMC_IDX_FIXED
;
1869 size
= INTEL_PMC_IDX_FIXED
+ x86_pmu
.num_counters_fixed
;
1872 if (unlikely(base
>= top
)) {
1873 intel_pmu_pebs_event_update_no_drain(cpuc
, size
);
1877 for (at
= base
; at
< top
; at
+= x86_pmu
.pebs_record_size
) {
1878 struct pebs_record_nhm
*p
= at
;
1881 pebs_status
= p
->status
& cpuc
->pebs_enabled
;
1882 pebs_status
&= mask
;
1884 /* PEBS v3 has more accurate status bits */
1885 if (x86_pmu
.intel_cap
.pebs_format
>= 3) {
1886 for_each_set_bit(bit
, (unsigned long *)&pebs_status
, size
)
1893 * On some CPUs the PEBS status can be zero when PEBS is
1894 * racing with clearing of GLOBAL_STATUS.
1896 * Normally we would drop that record, but in the
1897 * case when there is only a single active PEBS event
1898 * we can assume it's for that event.
1900 if (!pebs_status
&& cpuc
->pebs_enabled
&&
1901 !(cpuc
->pebs_enabled
& (cpuc
->pebs_enabled
-1)))
1902 pebs_status
= cpuc
->pebs_enabled
;
1904 bit
= find_first_bit((unsigned long *)&pebs_status
,
1905 x86_pmu
.max_pebs_events
);
1906 if (bit
>= x86_pmu
.max_pebs_events
)
1910 * The PEBS hardware does not deal well with the situation
1911 * when events happen near to each other and multiple bits
1912 * are set. But it should happen rarely.
1914 * If these events include one PEBS and multiple non-PEBS
1915 * events, it doesn't impact PEBS record. The record will
1916 * be handled normally. (slow path)
1918 * If these events include two or more PEBS events, the
1919 * records for the events can be collapsed into a single
1920 * one, and it's not possible to reconstruct all events
1921 * that caused the PEBS record. It's called collision.
1922 * If collision happened, the record will be dropped.
1924 if (pebs_status
!= (1ULL << bit
)) {
1925 for_each_set_bit(i
, (unsigned long *)&pebs_status
, size
)
1933 for_each_set_bit(bit
, (unsigned long *)&mask
, size
) {
1934 if ((counts
[bit
] == 0) && (error
[bit
] == 0))
1937 event
= cpuc
->events
[bit
];
1938 if (WARN_ON_ONCE(!event
))
1941 if (WARN_ON_ONCE(!event
->attr
.precise_ip
))
1944 /* log dropped samples number */
1946 perf_log_lost_samples(event
, error
[bit
]);
1948 if (iregs
&& perf_event_account_interrupt(event
))
1949 x86_pmu_stop(event
, 0);
1953 __intel_pmu_pebs_event(event
, iregs
, data
, base
,
1954 top
, bit
, counts
[bit
],
1955 setup_pebs_fixed_sample_data
);
1960 static void intel_pmu_drain_pebs_icl(struct pt_regs
*iregs
, struct perf_sample_data
*data
)
1962 short counts
[INTEL_PMC_IDX_FIXED
+ MAX_FIXED_PEBS_EVENTS
] = {};
1963 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1964 struct debug_store
*ds
= cpuc
->ds
;
1965 struct perf_event
*event
;
1966 void *base
, *at
, *top
;
1970 if (!x86_pmu
.pebs_active
)
1973 base
= (struct pebs_basic
*)(unsigned long)ds
->pebs_buffer_base
;
1974 top
= (struct pebs_basic
*)(unsigned long)ds
->pebs_index
;
1976 ds
->pebs_index
= ds
->pebs_buffer_base
;
1978 mask
= ((1ULL << x86_pmu
.max_pebs_events
) - 1) |
1979 (((1ULL << x86_pmu
.num_counters_fixed
) - 1) << INTEL_PMC_IDX_FIXED
);
1980 size
= INTEL_PMC_IDX_FIXED
+ x86_pmu
.num_counters_fixed
;
1982 if (unlikely(base
>= top
)) {
1983 intel_pmu_pebs_event_update_no_drain(cpuc
, size
);
1987 for (at
= base
; at
< top
; at
+= cpuc
->pebs_record_size
) {
1990 pebs_status
= get_pebs_status(at
) & cpuc
->pebs_enabled
;
1991 pebs_status
&= mask
;
1993 for_each_set_bit(bit
, (unsigned long *)&pebs_status
, size
)
1997 for_each_set_bit(bit
, (unsigned long *)&mask
, size
) {
1998 if (counts
[bit
] == 0)
2001 event
= cpuc
->events
[bit
];
2002 if (WARN_ON_ONCE(!event
))
2005 if (WARN_ON_ONCE(!event
->attr
.precise_ip
))
2008 __intel_pmu_pebs_event(event
, iregs
, data
, base
,
2009 top
, bit
, counts
[bit
],
2010 setup_pebs_adaptive_sample_data
);
2015 * BTS, PEBS probe and setup
2018 void __init
intel_ds_init(void)
2021 * No support for 32bit formats
2023 if (!boot_cpu_has(X86_FEATURE_DTES64
))
2026 x86_pmu
.bts
= boot_cpu_has(X86_FEATURE_BTS
);
2027 x86_pmu
.pebs
= boot_cpu_has(X86_FEATURE_PEBS
);
2028 x86_pmu
.pebs_buffer_size
= PEBS_BUFFER_SIZE
;
2029 if (x86_pmu
.version
<= 4)
2030 x86_pmu
.pebs_no_isolation
= 1;
2033 char pebs_type
= x86_pmu
.intel_cap
.pebs_trap
? '+' : '-';
2034 char *pebs_qual
= "";
2035 int format
= x86_pmu
.intel_cap
.pebs_format
;
2038 x86_pmu
.intel_cap
.pebs_baseline
= 0;
2042 pr_cont("PEBS fmt0%c, ", pebs_type
);
2043 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_core
);
2045 * Using >PAGE_SIZE buffers makes the WRMSR to
2046 * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
2047 * mysteriously hang on Core2.
2049 * As a workaround, we don't do this.
2051 x86_pmu
.pebs_buffer_size
= PAGE_SIZE
;
2052 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_core
;
2056 pr_cont("PEBS fmt1%c, ", pebs_type
);
2057 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_nhm
);
2058 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_nhm
;
2062 pr_cont("PEBS fmt2%c, ", pebs_type
);
2063 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_hsw
);
2064 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_nhm
;
2068 pr_cont("PEBS fmt3%c, ", pebs_type
);
2069 x86_pmu
.pebs_record_size
=
2070 sizeof(struct pebs_record_skl
);
2071 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_nhm
;
2072 x86_pmu
.large_pebs_flags
|= PERF_SAMPLE_TIME
;
2076 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_icl
;
2077 x86_pmu
.pebs_record_size
= sizeof(struct pebs_basic
);
2078 if (x86_pmu
.intel_cap
.pebs_baseline
) {
2079 x86_pmu
.large_pebs_flags
|=
2080 PERF_SAMPLE_BRANCH_STACK
|
2082 x86_pmu
.flags
|= PMU_FL_PEBS_ALL
;
2083 pebs_qual
= "-baseline";
2084 x86_get_pmu()->capabilities
|= PERF_PMU_CAP_EXTENDED_REGS
;
2086 /* Only basic record supported */
2087 x86_pmu
.large_pebs_flags
&=
2088 ~(PERF_SAMPLE_ADDR
|
2090 PERF_SAMPLE_DATA_SRC
|
2091 PERF_SAMPLE_TRANSACTION
|
2092 PERF_SAMPLE_REGS_USER
|
2093 PERF_SAMPLE_REGS_INTR
);
2095 pr_cont("PEBS fmt4%c%s, ", pebs_type
, pebs_qual
);
2097 if (x86_pmu
.intel_cap
.pebs_output_pt_available
) {
2098 pr_cont("PEBS-via-PT, ");
2099 x86_get_pmu()->capabilities
|= PERF_PMU_CAP_AUX_OUTPUT
;
2105 pr_cont("no PEBS fmt%d%c, ", format
, pebs_type
);
2111 void perf_restore_debug_store(void)
2113 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
2115 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
2118 wrmsrl(MSR_IA32_DS_AREA
, (unsigned long)ds
);