1 #include <linux/bitops.h>
2 #include <linux/types.h>
3 #include <linux/slab.h>
5 #include <asm/perf_event.h>
8 #include "perf_event.h"
10 /* The size of a BTS record in bytes: */
11 #define BTS_RECORD_SIZE 24
13 #define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
14 #define PEBS_BUFFER_SIZE PAGE_SIZE
17 * pebs_record_32 for p4 and core not supported
19 struct pebs_record_32 {
27 struct pebs_record_core
{
32 u64 r12
, r13
, r14
, r15
;
35 struct pebs_record_nhm
{
40 u64 r12
, r13
, r14
, r15
;
41 u64 status
, dla
, dse
, lat
;
44 void init_debug_store_on_cpu(int cpu
)
46 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
51 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
,
52 (u32
)((u64
)(unsigned long)ds
),
53 (u32
)((u64
)(unsigned long)ds
>> 32));
56 void fini_debug_store_on_cpu(int cpu
)
58 if (!per_cpu(cpu_hw_events
, cpu
).ds
)
61 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
, 0, 0);
64 static int alloc_pebs_buffer(int cpu
)
66 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
67 int node
= cpu_to_node(cpu
);
68 int max
, thresh
= 1; /* always use a single PEBS record */
74 buffer
= kmalloc_node(PEBS_BUFFER_SIZE
, GFP_KERNEL
| __GFP_ZERO
, node
);
75 if (unlikely(!buffer
))
78 max
= PEBS_BUFFER_SIZE
/ x86_pmu
.pebs_record_size
;
80 ds
->pebs_buffer_base
= (u64
)(unsigned long)buffer
;
81 ds
->pebs_index
= ds
->pebs_buffer_base
;
82 ds
->pebs_absolute_maximum
= ds
->pebs_buffer_base
+
83 max
* x86_pmu
.pebs_record_size
;
85 ds
->pebs_interrupt_threshold
= ds
->pebs_buffer_base
+
86 thresh
* x86_pmu
.pebs_record_size
;
91 static void release_pebs_buffer(int cpu
)
93 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
95 if (!ds
|| !x86_pmu
.pebs
)
98 kfree((void *)(unsigned long)ds
->pebs_buffer_base
);
99 ds
->pebs_buffer_base
= 0;
102 static int alloc_bts_buffer(int cpu
)
104 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
105 int node
= cpu_to_node(cpu
);
112 buffer
= kmalloc_node(BTS_BUFFER_SIZE
, GFP_KERNEL
| __GFP_ZERO
, node
);
113 if (unlikely(!buffer
))
116 max
= BTS_BUFFER_SIZE
/ BTS_RECORD_SIZE
;
119 ds
->bts_buffer_base
= (u64
)(unsigned long)buffer
;
120 ds
->bts_index
= ds
->bts_buffer_base
;
121 ds
->bts_absolute_maximum
= ds
->bts_buffer_base
+
122 max
* BTS_RECORD_SIZE
;
123 ds
->bts_interrupt_threshold
= ds
->bts_absolute_maximum
-
124 thresh
* BTS_RECORD_SIZE
;
129 static void release_bts_buffer(int cpu
)
131 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
133 if (!ds
|| !x86_pmu
.bts
)
136 kfree((void *)(unsigned long)ds
->bts_buffer_base
);
137 ds
->bts_buffer_base
= 0;
140 static int alloc_ds_buffer(int cpu
)
142 int node
= cpu_to_node(cpu
);
143 struct debug_store
*ds
;
145 ds
= kmalloc_node(sizeof(*ds
), GFP_KERNEL
| __GFP_ZERO
, node
);
149 per_cpu(cpu_hw_events
, cpu
).ds
= ds
;
154 static void release_ds_buffer(int cpu
)
156 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
161 per_cpu(cpu_hw_events
, cpu
).ds
= NULL
;
165 void release_ds_buffers(void)
169 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
173 for_each_online_cpu(cpu
)
174 fini_debug_store_on_cpu(cpu
);
176 for_each_possible_cpu(cpu
) {
177 release_pebs_buffer(cpu
);
178 release_bts_buffer(cpu
);
179 release_ds_buffer(cpu
);
184 void reserve_ds_buffers(void)
186 int bts_err
= 0, pebs_err
= 0;
189 x86_pmu
.bts_active
= 0;
190 x86_pmu
.pebs_active
= 0;
192 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
203 for_each_possible_cpu(cpu
) {
204 if (alloc_ds_buffer(cpu
)) {
209 if (!bts_err
&& alloc_bts_buffer(cpu
))
212 if (!pebs_err
&& alloc_pebs_buffer(cpu
))
215 if (bts_err
&& pebs_err
)
220 for_each_possible_cpu(cpu
)
221 release_bts_buffer(cpu
);
225 for_each_possible_cpu(cpu
)
226 release_pebs_buffer(cpu
);
229 if (bts_err
&& pebs_err
) {
230 for_each_possible_cpu(cpu
)
231 release_ds_buffer(cpu
);
233 if (x86_pmu
.bts
&& !bts_err
)
234 x86_pmu
.bts_active
= 1;
236 if (x86_pmu
.pebs
&& !pebs_err
)
237 x86_pmu
.pebs_active
= 1;
239 for_each_online_cpu(cpu
)
240 init_debug_store_on_cpu(cpu
);
250 struct event_constraint bts_constraint
=
251 EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS
, 0);
253 void intel_pmu_enable_bts(u64 config
)
255 unsigned long debugctlmsr
;
257 debugctlmsr
= get_debugctlmsr();
259 debugctlmsr
|= DEBUGCTLMSR_TR
;
260 debugctlmsr
|= DEBUGCTLMSR_BTS
;
261 debugctlmsr
|= DEBUGCTLMSR_BTINT
;
263 if (!(config
& ARCH_PERFMON_EVENTSEL_OS
))
264 debugctlmsr
|= DEBUGCTLMSR_BTS_OFF_OS
;
266 if (!(config
& ARCH_PERFMON_EVENTSEL_USR
))
267 debugctlmsr
|= DEBUGCTLMSR_BTS_OFF_USR
;
269 update_debugctlmsr(debugctlmsr
);
272 void intel_pmu_disable_bts(void)
274 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
275 unsigned long debugctlmsr
;
280 debugctlmsr
= get_debugctlmsr();
283 ~(DEBUGCTLMSR_TR
| DEBUGCTLMSR_BTS
| DEBUGCTLMSR_BTINT
|
284 DEBUGCTLMSR_BTS_OFF_OS
| DEBUGCTLMSR_BTS_OFF_USR
);
286 update_debugctlmsr(debugctlmsr
);
289 int intel_pmu_drain_bts_buffer(void)
291 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
292 struct debug_store
*ds
= cpuc
->ds
;
298 struct perf_event
*event
= cpuc
->events
[INTEL_PMC_IDX_FIXED_BTS
];
299 struct bts_record
*at
, *top
;
300 struct perf_output_handle handle
;
301 struct perf_event_header header
;
302 struct perf_sample_data data
;
308 if (!x86_pmu
.bts_active
)
311 at
= (struct bts_record
*)(unsigned long)ds
->bts_buffer_base
;
312 top
= (struct bts_record
*)(unsigned long)ds
->bts_index
;
317 ds
->bts_index
= ds
->bts_buffer_base
;
319 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
323 * Prepare a generic sample, i.e. fill in the invariant fields.
324 * We will overwrite the from and to address before we output
327 perf_prepare_sample(&header
, &data
, event
, ®s
);
329 if (perf_output_begin(&handle
, event
, header
.size
* (top
- at
)))
332 for (; at
< top
; at
++) {
336 perf_output_sample(&handle
, &header
, &data
, event
);
339 perf_output_end(&handle
);
341 /* There's new data available. */
342 event
->hw
.interrupts
++;
343 event
->pending_kill
= POLL_IN
;
350 struct event_constraint intel_core2_pebs_event_constraints
[] = {
351 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
352 INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
353 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
354 INTEL_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
355 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
359 struct event_constraint intel_atom_pebs_event_constraints
[] = {
360 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
361 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
362 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
366 struct event_constraint intel_nehalem_pebs_event_constraints
[] = {
367 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
368 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
369 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
370 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
371 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
372 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
373 INTEL_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
374 INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
375 INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
376 INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
377 INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
381 struct event_constraint intel_westmere_pebs_event_constraints
[] = {
382 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
383 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
384 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
385 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
386 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
387 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
388 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
389 INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
390 INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
391 INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
392 INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
396 struct event_constraint intel_snb_pebs_event_constraints
[] = {
397 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
398 INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
399 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
400 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
401 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
402 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
403 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
404 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
405 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
406 INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
410 struct event_constraint intel_ivb_pebs_event_constraints
[] = {
411 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
412 INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
413 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
414 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
415 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
416 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
417 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
418 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
419 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
420 INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
424 struct event_constraint
*intel_pebs_constraints(struct perf_event
*event
)
426 struct event_constraint
*c
;
428 if (!event
->attr
.precise_ip
)
431 if (x86_pmu
.pebs_constraints
) {
432 for_each_event_constraint(c
, x86_pmu
.pebs_constraints
) {
433 if ((event
->hw
.config
& c
->cmask
) == c
->code
)
438 return &emptyconstraint
;
441 void intel_pmu_pebs_enable(struct perf_event
*event
)
443 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
444 struct hw_perf_event
*hwc
= &event
->hw
;
446 hwc
->config
&= ~ARCH_PERFMON_EVENTSEL_INT
;
448 cpuc
->pebs_enabled
|= 1ULL << hwc
->idx
;
451 void intel_pmu_pebs_disable(struct perf_event
*event
)
453 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
454 struct hw_perf_event
*hwc
= &event
->hw
;
456 cpuc
->pebs_enabled
&= ~(1ULL << hwc
->idx
);
458 wrmsrl(MSR_IA32_PEBS_ENABLE
, cpuc
->pebs_enabled
);
460 hwc
->config
|= ARCH_PERFMON_EVENTSEL_INT
;
463 void intel_pmu_pebs_enable_all(void)
465 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
467 if (cpuc
->pebs_enabled
)
468 wrmsrl(MSR_IA32_PEBS_ENABLE
, cpuc
->pebs_enabled
);
471 void intel_pmu_pebs_disable_all(void)
473 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
475 if (cpuc
->pebs_enabled
)
476 wrmsrl(MSR_IA32_PEBS_ENABLE
, 0);
479 static int intel_pmu_pebs_fixup_ip(struct pt_regs
*regs
)
481 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
482 unsigned long from
= cpuc
->lbr_entries
[0].from
;
483 unsigned long old_to
, to
= cpuc
->lbr_entries
[0].to
;
484 unsigned long ip
= regs
->ip
;
488 * We don't need to fixup if the PEBS assist is fault like
490 if (!x86_pmu
.intel_cap
.pebs_trap
)
494 * No LBR entry, no basic block, no rewinding
496 if (!cpuc
->lbr_stack
.nr
|| !from
|| !to
)
500 * Basic blocks should never cross user/kernel boundaries
502 if (kernel_ip(ip
) != kernel_ip(to
))
506 * unsigned math, either ip is before the start (impossible) or
507 * the basic block is larger than 1 page (sanity)
509 if ((ip
- to
) > PAGE_SIZE
)
513 * We sampled a branch insn, rewind using the LBR stack
516 set_linear_ip(regs
, from
);
522 u8 buf
[MAX_INSN_SIZE
];
526 if (!kernel_ip(ip
)) {
527 int bytes
, size
= MAX_INSN_SIZE
;
529 bytes
= copy_from_user_nmi(buf
, (void __user
*)to
, size
);
538 is_64bit
= kernel_ip(to
) || !test_thread_flag(TIF_IA32
);
540 insn_init(&insn
, kaddr
, is_64bit
);
541 insn_get_length(&insn
);
546 set_linear_ip(regs
, old_to
);
551 * Even though we decoded the basic block, the instruction stream
552 * never matched the given IP, either the TO or the IP got corrupted.
557 static void __intel_pmu_pebs_event(struct perf_event
*event
,
558 struct pt_regs
*iregs
, void *__pebs
)
561 * We cast to pebs_record_core since that is a subset of
562 * both formats and we don't use the other fields in this
565 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
566 struct pebs_record_core
*pebs
= __pebs
;
567 struct perf_sample_data data
;
570 if (!intel_pmu_save_and_restart(event
))
573 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
576 * We use the interrupt regs as a base because the PEBS record
577 * does not contain a full regs set, specifically it seems to
578 * lack segment descriptors, which get used by things like
581 * In the simple case fix up only the IP and BP,SP regs, for
582 * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
583 * A possible PERF_SAMPLE_REGS will have to transfer all regs.
586 regs
.flags
= pebs
->flags
;
587 set_linear_ip(®s
, pebs
->ip
);
591 if (event
->attr
.precise_ip
> 1 && intel_pmu_pebs_fixup_ip(®s
))
592 regs
.flags
|= PERF_EFLAGS_EXACT
;
594 regs
.flags
&= ~PERF_EFLAGS_EXACT
;
596 if (has_branch_stack(event
))
597 data
.br_stack
= &cpuc
->lbr_stack
;
599 if (perf_event_overflow(event
, &data
, ®s
))
600 x86_pmu_stop(event
, 0);
603 static void intel_pmu_drain_pebs_core(struct pt_regs
*iregs
)
605 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
606 struct debug_store
*ds
= cpuc
->ds
;
607 struct perf_event
*event
= cpuc
->events
[0]; /* PMC0 only */
608 struct pebs_record_core
*at
, *top
;
611 if (!x86_pmu
.pebs_active
)
614 at
= (struct pebs_record_core
*)(unsigned long)ds
->pebs_buffer_base
;
615 top
= (struct pebs_record_core
*)(unsigned long)ds
->pebs_index
;
618 * Whatever else happens, drain the thing
620 ds
->pebs_index
= ds
->pebs_buffer_base
;
622 if (!test_bit(0, cpuc
->active_mask
))
625 WARN_ON_ONCE(!event
);
627 if (!event
->attr
.precise_ip
)
635 * Should not happen, we program the threshold at 1 and do not
638 WARN_ONCE(n
> 1, "bad leftover pebs %d\n", n
);
641 __intel_pmu_pebs_event(event
, iregs
, at
);
644 static void intel_pmu_drain_pebs_nhm(struct pt_regs
*iregs
)
646 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
647 struct debug_store
*ds
= cpuc
->ds
;
648 struct pebs_record_nhm
*at
, *top
;
649 struct perf_event
*event
= NULL
;
653 if (!x86_pmu
.pebs_active
)
656 at
= (struct pebs_record_nhm
*)(unsigned long)ds
->pebs_buffer_base
;
657 top
= (struct pebs_record_nhm
*)(unsigned long)ds
->pebs_index
;
659 ds
->pebs_index
= ds
->pebs_buffer_base
;
666 * Should not happen, we program the threshold at 1 and do not
669 WARN_ONCE(n
> x86_pmu
.max_pebs_events
, "Unexpected number of pebs records %d\n", n
);
671 for ( ; at
< top
; at
++) {
672 for_each_set_bit(bit
, (unsigned long *)&at
->status
, x86_pmu
.max_pebs_events
) {
673 event
= cpuc
->events
[bit
];
674 if (!test_bit(bit
, cpuc
->active_mask
))
677 WARN_ON_ONCE(!event
);
679 if (!event
->attr
.precise_ip
)
682 if (__test_and_set_bit(bit
, (unsigned long *)&status
))
688 if (!event
|| bit
>= x86_pmu
.max_pebs_events
)
691 __intel_pmu_pebs_event(event
, iregs
, at
);
696 * BTS, PEBS probe and setup
699 void intel_ds_init(void)
702 * No support for 32bit formats
704 if (!boot_cpu_has(X86_FEATURE_DTES64
))
707 x86_pmu
.bts
= boot_cpu_has(X86_FEATURE_BTS
);
708 x86_pmu
.pebs
= boot_cpu_has(X86_FEATURE_PEBS
);
710 char pebs_type
= x86_pmu
.intel_cap
.pebs_trap
? '+' : '-';
711 int format
= x86_pmu
.intel_cap
.pebs_format
;
715 printk(KERN_CONT
"PEBS fmt0%c, ", pebs_type
);
716 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_core
);
717 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_core
;
721 printk(KERN_CONT
"PEBS fmt1%c, ", pebs_type
);
722 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_nhm
);
723 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_nhm
;
727 printk(KERN_CONT
"no PEBS fmt%d%c, ", format
, pebs_type
);
733 void perf_restore_debug_store(void)
735 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
738 init_debug_store_on_cpu(smp_processor_id());