1 #include <linux/bitops.h>
2 #include <linux/types.h>
3 #include <linux/slab.h>
5 #include <asm/perf_event.h>
7 #include "perf_event.h"
9 /* The size of a BTS record in bytes: */
10 #define BTS_RECORD_SIZE 24
12 #define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
13 #define PEBS_BUFFER_SIZE PAGE_SIZE
16 * pebs_record_32 for p4 and core not supported
18 struct pebs_record_32 {
26 struct pebs_record_core
{
31 u64 r12
, r13
, r14
, r15
;
34 struct pebs_record_nhm
{
39 u64 r12
, r13
, r14
, r15
;
40 u64 status
, dla
, dse
, lat
;
43 void init_debug_store_on_cpu(int cpu
)
45 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
50 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
,
51 (u32
)((u64
)(unsigned long)ds
),
52 (u32
)((u64
)(unsigned long)ds
>> 32));
55 void fini_debug_store_on_cpu(int cpu
)
57 if (!per_cpu(cpu_hw_events
, cpu
).ds
)
60 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
, 0, 0);
63 static int alloc_pebs_buffer(int cpu
)
65 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
66 int node
= cpu_to_node(cpu
);
67 int max
, thresh
= 1; /* always use a single PEBS record */
73 buffer
= kmalloc_node(PEBS_BUFFER_SIZE
, GFP_KERNEL
| __GFP_ZERO
, node
);
74 if (unlikely(!buffer
))
77 max
= PEBS_BUFFER_SIZE
/ x86_pmu
.pebs_record_size
;
79 ds
->pebs_buffer_base
= (u64
)(unsigned long)buffer
;
80 ds
->pebs_index
= ds
->pebs_buffer_base
;
81 ds
->pebs_absolute_maximum
= ds
->pebs_buffer_base
+
82 max
* x86_pmu
.pebs_record_size
;
84 ds
->pebs_interrupt_threshold
= ds
->pebs_buffer_base
+
85 thresh
* x86_pmu
.pebs_record_size
;
90 static void release_pebs_buffer(int cpu
)
92 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
94 if (!ds
|| !x86_pmu
.pebs
)
97 kfree((void *)(unsigned long)ds
->pebs_buffer_base
);
98 ds
->pebs_buffer_base
= 0;
101 static int alloc_bts_buffer(int cpu
)
103 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
104 int node
= cpu_to_node(cpu
);
111 buffer
= kmalloc_node(BTS_BUFFER_SIZE
, GFP_KERNEL
| __GFP_ZERO
, node
);
112 if (unlikely(!buffer
))
115 max
= BTS_BUFFER_SIZE
/ BTS_RECORD_SIZE
;
118 ds
->bts_buffer_base
= (u64
)(unsigned long)buffer
;
119 ds
->bts_index
= ds
->bts_buffer_base
;
120 ds
->bts_absolute_maximum
= ds
->bts_buffer_base
+
121 max
* BTS_RECORD_SIZE
;
122 ds
->bts_interrupt_threshold
= ds
->bts_absolute_maximum
-
123 thresh
* BTS_RECORD_SIZE
;
128 static void release_bts_buffer(int cpu
)
130 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
132 if (!ds
|| !x86_pmu
.bts
)
135 kfree((void *)(unsigned long)ds
->bts_buffer_base
);
136 ds
->bts_buffer_base
= 0;
139 static int alloc_ds_buffer(int cpu
)
141 int node
= cpu_to_node(cpu
);
142 struct debug_store
*ds
;
144 ds
= kmalloc_node(sizeof(*ds
), GFP_KERNEL
| __GFP_ZERO
, node
);
148 per_cpu(cpu_hw_events
, cpu
).ds
= ds
;
153 static void release_ds_buffer(int cpu
)
155 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
160 per_cpu(cpu_hw_events
, cpu
).ds
= NULL
;
164 void release_ds_buffers(void)
168 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
172 for_each_online_cpu(cpu
)
173 fini_debug_store_on_cpu(cpu
);
175 for_each_possible_cpu(cpu
) {
176 release_pebs_buffer(cpu
);
177 release_bts_buffer(cpu
);
178 release_ds_buffer(cpu
);
183 void reserve_ds_buffers(void)
185 int bts_err
= 0, pebs_err
= 0;
188 x86_pmu
.bts_active
= 0;
189 x86_pmu
.pebs_active
= 0;
191 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
202 for_each_possible_cpu(cpu
) {
203 if (alloc_ds_buffer(cpu
)) {
208 if (!bts_err
&& alloc_bts_buffer(cpu
))
211 if (!pebs_err
&& alloc_pebs_buffer(cpu
))
214 if (bts_err
&& pebs_err
)
219 for_each_possible_cpu(cpu
)
220 release_bts_buffer(cpu
);
224 for_each_possible_cpu(cpu
)
225 release_pebs_buffer(cpu
);
228 if (bts_err
&& pebs_err
) {
229 for_each_possible_cpu(cpu
)
230 release_ds_buffer(cpu
);
232 if (x86_pmu
.bts
&& !bts_err
)
233 x86_pmu
.bts_active
= 1;
235 if (x86_pmu
.pebs
&& !pebs_err
)
236 x86_pmu
.pebs_active
= 1;
238 for_each_online_cpu(cpu
)
239 init_debug_store_on_cpu(cpu
);
249 struct event_constraint bts_constraint
=
250 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS
, 0);
252 void intel_pmu_enable_bts(u64 config
)
254 unsigned long debugctlmsr
;
256 debugctlmsr
= get_debugctlmsr();
258 debugctlmsr
|= DEBUGCTLMSR_TR
;
259 debugctlmsr
|= DEBUGCTLMSR_BTS
;
260 debugctlmsr
|= DEBUGCTLMSR_BTINT
;
262 if (!(config
& ARCH_PERFMON_EVENTSEL_OS
))
263 debugctlmsr
|= DEBUGCTLMSR_BTS_OFF_OS
;
265 if (!(config
& ARCH_PERFMON_EVENTSEL_USR
))
266 debugctlmsr
|= DEBUGCTLMSR_BTS_OFF_USR
;
268 update_debugctlmsr(debugctlmsr
);
271 void intel_pmu_disable_bts(void)
273 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
274 unsigned long debugctlmsr
;
279 debugctlmsr
= get_debugctlmsr();
282 ~(DEBUGCTLMSR_TR
| DEBUGCTLMSR_BTS
| DEBUGCTLMSR_BTINT
|
283 DEBUGCTLMSR_BTS_OFF_OS
| DEBUGCTLMSR_BTS_OFF_USR
);
285 update_debugctlmsr(debugctlmsr
);
288 int intel_pmu_drain_bts_buffer(void)
290 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
291 struct debug_store
*ds
= cpuc
->ds
;
297 struct perf_event
*event
= cpuc
->events
[X86_PMC_IDX_FIXED_BTS
];
298 struct bts_record
*at
, *top
;
299 struct perf_output_handle handle
;
300 struct perf_event_header header
;
301 struct perf_sample_data data
;
307 if (!x86_pmu
.bts_active
)
310 at
= (struct bts_record
*)(unsigned long)ds
->bts_buffer_base
;
311 top
= (struct bts_record
*)(unsigned long)ds
->bts_index
;
316 ds
->bts_index
= ds
->bts_buffer_base
;
318 perf_sample_data_init(&data
, 0);
319 data
.period
= event
->hw
.last_period
;
323 * Prepare a generic sample, i.e. fill in the invariant fields.
324 * We will overwrite the from and to address before we output
327 perf_prepare_sample(&header
, &data
, event
, ®s
);
329 if (perf_output_begin(&handle
, event
, header
.size
* (top
- at
)))
332 for (; at
< top
; at
++) {
336 perf_output_sample(&handle
, &header
, &data
, event
);
339 perf_output_end(&handle
);
341 /* There's new data available. */
342 event
->hw
.interrupts
++;
343 event
->pending_kill
= POLL_IN
;
350 struct event_constraint intel_core2_pebs_event_constraints
[] = {
351 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
352 INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
353 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
354 INTEL_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
355 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
359 struct event_constraint intel_atom_pebs_event_constraints
[] = {
360 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
361 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
362 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
366 struct event_constraint intel_nehalem_pebs_event_constraints
[] = {
367 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
368 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
369 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
370 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
371 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
372 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
373 INTEL_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
374 INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
375 INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
376 INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
377 INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
381 struct event_constraint intel_westmere_pebs_event_constraints
[] = {
382 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
383 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
384 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
385 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
386 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
387 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
388 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
389 INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
390 INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
391 INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
392 INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
396 struct event_constraint intel_snb_pebs_event_constraints
[] = {
397 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
398 INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
399 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
400 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
401 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
402 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
403 INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */
404 INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
405 INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
406 INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
407 INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
408 INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
409 INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
410 INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
411 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
412 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
413 INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
417 struct event_constraint
*intel_pebs_constraints(struct perf_event
*event
)
419 struct event_constraint
*c
;
421 if (!event
->attr
.precise_ip
)
424 if (x86_pmu
.pebs_constraints
) {
425 for_each_event_constraint(c
, x86_pmu
.pebs_constraints
) {
426 if ((event
->hw
.config
& c
->cmask
) == c
->code
)
431 return &emptyconstraint
;
434 void intel_pmu_pebs_enable(struct perf_event
*event
)
436 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
437 struct hw_perf_event
*hwc
= &event
->hw
;
439 hwc
->config
&= ~ARCH_PERFMON_EVENTSEL_INT
;
441 cpuc
->pebs_enabled
|= 1ULL << hwc
->idx
;
443 if (x86_pmu
.intel_cap
.pebs_trap
&& event
->attr
.precise_ip
> 1)
444 intel_pmu_lbr_enable(event
);
447 void intel_pmu_pebs_disable(struct perf_event
*event
)
449 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
450 struct hw_perf_event
*hwc
= &event
->hw
;
452 cpuc
->pebs_enabled
&= ~(1ULL << hwc
->idx
);
454 wrmsrl(MSR_IA32_PEBS_ENABLE
, cpuc
->pebs_enabled
);
456 hwc
->config
|= ARCH_PERFMON_EVENTSEL_INT
;
458 if (x86_pmu
.intel_cap
.pebs_trap
&& event
->attr
.precise_ip
> 1)
459 intel_pmu_lbr_disable(event
);
462 void intel_pmu_pebs_enable_all(void)
464 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
466 if (cpuc
->pebs_enabled
)
467 wrmsrl(MSR_IA32_PEBS_ENABLE
, cpuc
->pebs_enabled
);
470 void intel_pmu_pebs_disable_all(void)
472 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
474 if (cpuc
->pebs_enabled
)
475 wrmsrl(MSR_IA32_PEBS_ENABLE
, 0);
478 #include <asm/insn.h>
480 static inline bool kernel_ip(unsigned long ip
)
483 return ip
> PAGE_OFFSET
;
489 static int intel_pmu_pebs_fixup_ip(struct pt_regs
*regs
)
491 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
492 unsigned long from
= cpuc
->lbr_entries
[0].from
;
493 unsigned long old_to
, to
= cpuc
->lbr_entries
[0].to
;
494 unsigned long ip
= regs
->ip
;
498 * We don't need to fixup if the PEBS assist is fault like
500 if (!x86_pmu
.intel_cap
.pebs_trap
)
504 * No LBR entry, no basic block, no rewinding
506 if (!cpuc
->lbr_stack
.nr
|| !from
|| !to
)
510 * Basic blocks should never cross user/kernel boundaries
512 if (kernel_ip(ip
) != kernel_ip(to
))
516 * unsigned math, either ip is before the start (impossible) or
517 * the basic block is larger than 1 page (sanity)
519 if ((ip
- to
) > PAGE_SIZE
)
523 * We sampled a branch insn, rewind using the LBR stack
532 u8 buf
[MAX_INSN_SIZE
];
536 if (!kernel_ip(ip
)) {
537 int bytes
, size
= MAX_INSN_SIZE
;
539 bytes
= copy_from_user_nmi(buf
, (void __user
*)to
, size
);
548 is_64bit
= kernel_ip(to
) || !test_thread_flag(TIF_IA32
);
550 insn_init(&insn
, kaddr
, is_64bit
);
551 insn_get_length(&insn
);
561 * Even though we decoded the basic block, the instruction stream
562 * never matched the given IP, either the TO or the IP got corrupted.
567 static void __intel_pmu_pebs_event(struct perf_event
*event
,
568 struct pt_regs
*iregs
, void *__pebs
)
571 * We cast to pebs_record_core since that is a subset of
572 * both formats and we don't use the other fields in this
575 struct pebs_record_core
*pebs
= __pebs
;
576 struct perf_sample_data data
;
579 if (!intel_pmu_save_and_restart(event
))
582 perf_sample_data_init(&data
, 0);
583 data
.period
= event
->hw
.last_period
;
586 * We use the interrupt regs as a base because the PEBS record
587 * does not contain a full regs set, specifically it seems to
588 * lack segment descriptors, which get used by things like
591 * In the simple case fix up only the IP and BP,SP regs, for
592 * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
593 * A possible PERF_SAMPLE_REGS will have to transfer all regs.
600 if (event
->attr
.precise_ip
> 1 && intel_pmu_pebs_fixup_ip(®s
))
601 regs
.flags
|= PERF_EFLAGS_EXACT
;
603 regs
.flags
&= ~PERF_EFLAGS_EXACT
;
605 if (perf_event_overflow(event
, &data
, ®s
))
606 x86_pmu_stop(event
, 0);
609 static void intel_pmu_drain_pebs_core(struct pt_regs
*iregs
)
611 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
612 struct debug_store
*ds
= cpuc
->ds
;
613 struct perf_event
*event
= cpuc
->events
[0]; /* PMC0 only */
614 struct pebs_record_core
*at
, *top
;
617 if (!x86_pmu
.pebs_active
)
620 at
= (struct pebs_record_core
*)(unsigned long)ds
->pebs_buffer_base
;
621 top
= (struct pebs_record_core
*)(unsigned long)ds
->pebs_index
;
624 * Whatever else happens, drain the thing
626 ds
->pebs_index
= ds
->pebs_buffer_base
;
628 if (!test_bit(0, cpuc
->active_mask
))
631 WARN_ON_ONCE(!event
);
633 if (!event
->attr
.precise_ip
)
641 * Should not happen, we program the threshold at 1 and do not
647 __intel_pmu_pebs_event(event
, iregs
, at
);
650 static void intel_pmu_drain_pebs_nhm(struct pt_regs
*iregs
)
652 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
653 struct debug_store
*ds
= cpuc
->ds
;
654 struct pebs_record_nhm
*at
, *top
;
655 struct perf_event
*event
= NULL
;
659 if (!x86_pmu
.pebs_active
)
662 at
= (struct pebs_record_nhm
*)(unsigned long)ds
->pebs_buffer_base
;
663 top
= (struct pebs_record_nhm
*)(unsigned long)ds
->pebs_index
;
665 ds
->pebs_index
= ds
->pebs_buffer_base
;
672 * Should not happen, we program the threshold at 1 and do not
675 WARN_ON_ONCE(n
> MAX_PEBS_EVENTS
);
677 for ( ; at
< top
; at
++) {
678 for_each_set_bit(bit
, (unsigned long *)&at
->status
, MAX_PEBS_EVENTS
) {
679 event
= cpuc
->events
[bit
];
680 if (!test_bit(bit
, cpuc
->active_mask
))
683 WARN_ON_ONCE(!event
);
685 if (!event
->attr
.precise_ip
)
688 if (__test_and_set_bit(bit
, (unsigned long *)&status
))
694 if (!event
|| bit
>= MAX_PEBS_EVENTS
)
697 __intel_pmu_pebs_event(event
, iregs
, at
);
702 * BTS, PEBS probe and setup
705 void intel_ds_init(void)
708 * No support for 32bit formats
710 if (!boot_cpu_has(X86_FEATURE_DTES64
))
713 x86_pmu
.bts
= boot_cpu_has(X86_FEATURE_BTS
);
714 x86_pmu
.pebs
= boot_cpu_has(X86_FEATURE_PEBS
);
716 char pebs_type
= x86_pmu
.intel_cap
.pebs_trap
? '+' : '-';
717 int format
= x86_pmu
.intel_cap
.pebs_format
;
721 printk(KERN_CONT
"PEBS fmt0%c, ", pebs_type
);
722 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_core
);
723 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_core
;
727 printk(KERN_CONT
"PEBS fmt1%c, ", pebs_type
);
728 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_nhm
);
729 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_nhm
;
733 printk(KERN_CONT
"no PEBS fmt%d%c, ", format
, pebs_type
);