1 #ifdef CONFIG_CPU_SUP_INTEL
3 /* The maximal number of PEBS events: */
4 #define MAX_PEBS_EVENTS 4
6 /* The size of a BTS record in bytes: */
7 #define BTS_RECORD_SIZE 24
9 #define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
10 #define PEBS_BUFFER_SIZE PAGE_SIZE
13 * pebs_record_32 for p4 and core not supported
15 struct pebs_record_32 {
23 struct pebs_record_core
{
28 u64 r12
, r13
, r14
, r15
;
31 struct pebs_record_nhm
{
36 u64 r12
, r13
, r14
, r15
;
37 u64 status
, dla
, dse
, lat
;
41 * A debug store configuration.
43 * We only support architectures that use 64bit fields.
48 u64 bts_absolute_maximum
;
49 u64 bts_interrupt_threshold
;
52 u64 pebs_absolute_maximum
;
53 u64 pebs_interrupt_threshold
;
54 u64 pebs_event_reset
[MAX_PEBS_EVENTS
];
57 static void init_debug_store_on_cpu(int cpu
)
59 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
64 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
,
65 (u32
)((u64
)(unsigned long)ds
),
66 (u32
)((u64
)(unsigned long)ds
>> 32));
69 static void fini_debug_store_on_cpu(int cpu
)
71 if (!per_cpu(cpu_hw_events
, cpu
).ds
)
74 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
, 0, 0);
77 static int alloc_pebs_buffer(int cpu
)
79 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
80 int node
= cpu_to_node(cpu
);
81 int max
, thresh
= 1; /* always use a single PEBS record */
87 buffer
= kmalloc_node(PEBS_BUFFER_SIZE
, GFP_KERNEL
| __GFP_ZERO
, node
);
88 if (unlikely(!buffer
))
91 max
= PEBS_BUFFER_SIZE
/ x86_pmu
.pebs_record_size
;
93 ds
->pebs_buffer_base
= (u64
)(unsigned long)buffer
;
94 ds
->pebs_index
= ds
->pebs_buffer_base
;
95 ds
->pebs_absolute_maximum
= ds
->pebs_buffer_base
+
96 max
* x86_pmu
.pebs_record_size
;
98 ds
->pebs_interrupt_threshold
= ds
->pebs_buffer_base
+
99 thresh
* x86_pmu
.pebs_record_size
;
104 static void release_pebs_buffer(int cpu
)
106 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
108 if (!ds
|| !x86_pmu
.pebs
)
111 kfree((void *)(unsigned long)ds
->pebs_buffer_base
);
112 ds
->pebs_buffer_base
= 0;
115 static int alloc_bts_buffer(int cpu
)
117 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
118 int node
= cpu_to_node(cpu
);
125 buffer
= kmalloc_node(BTS_BUFFER_SIZE
, GFP_KERNEL
| __GFP_ZERO
, node
);
126 if (unlikely(!buffer
))
129 max
= BTS_BUFFER_SIZE
/ BTS_RECORD_SIZE
;
132 ds
->bts_buffer_base
= (u64
)(unsigned long)buffer
;
133 ds
->bts_index
= ds
->bts_buffer_base
;
134 ds
->bts_absolute_maximum
= ds
->bts_buffer_base
+
135 max
* BTS_RECORD_SIZE
;
136 ds
->bts_interrupt_threshold
= ds
->bts_absolute_maximum
-
137 thresh
* BTS_RECORD_SIZE
;
142 static void release_bts_buffer(int cpu
)
144 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
146 if (!ds
|| !x86_pmu
.bts
)
149 kfree((void *)(unsigned long)ds
->bts_buffer_base
);
150 ds
->bts_buffer_base
= 0;
153 static int alloc_ds_buffer(int cpu
)
155 int node
= cpu_to_node(cpu
);
156 struct debug_store
*ds
;
158 ds
= kmalloc_node(sizeof(*ds
), GFP_KERNEL
| __GFP_ZERO
, node
);
162 per_cpu(cpu_hw_events
, cpu
).ds
= ds
;
167 static void release_ds_buffer(int cpu
)
169 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
174 per_cpu(cpu_hw_events
, cpu
).ds
= NULL
;
178 static void release_ds_buffers(void)
182 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
186 for_each_online_cpu(cpu
)
187 fini_debug_store_on_cpu(cpu
);
189 for_each_possible_cpu(cpu
) {
190 release_pebs_buffer(cpu
);
191 release_bts_buffer(cpu
);
192 release_ds_buffer(cpu
);
197 static void reserve_ds_buffers(void)
199 int bts_err
= 0, pebs_err
= 0;
202 x86_pmu
.bts_active
= 0;
203 x86_pmu
.pebs_active
= 0;
205 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
216 for_each_possible_cpu(cpu
) {
217 if (alloc_ds_buffer(cpu
)) {
222 if (!bts_err
&& alloc_bts_buffer(cpu
))
225 if (!pebs_err
&& alloc_pebs_buffer(cpu
))
228 if (bts_err
&& pebs_err
)
233 for_each_possible_cpu(cpu
)
234 release_bts_buffer(cpu
);
238 for_each_possible_cpu(cpu
)
239 release_pebs_buffer(cpu
);
242 if (bts_err
&& pebs_err
) {
243 for_each_possible_cpu(cpu
)
244 release_ds_buffer(cpu
);
246 if (x86_pmu
.bts
&& !bts_err
)
247 x86_pmu
.bts_active
= 1;
249 if (x86_pmu
.pebs
&& !pebs_err
)
250 x86_pmu
.pebs_active
= 1;
252 for_each_online_cpu(cpu
)
253 init_debug_store_on_cpu(cpu
);
263 static struct event_constraint bts_constraint
=
264 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS
, 0);
266 static void intel_pmu_enable_bts(u64 config
)
268 unsigned long debugctlmsr
;
270 debugctlmsr
= get_debugctlmsr();
272 debugctlmsr
|= DEBUGCTLMSR_TR
;
273 debugctlmsr
|= DEBUGCTLMSR_BTS
;
274 debugctlmsr
|= DEBUGCTLMSR_BTINT
;
276 if (!(config
& ARCH_PERFMON_EVENTSEL_OS
))
277 debugctlmsr
|= DEBUGCTLMSR_BTS_OFF_OS
;
279 if (!(config
& ARCH_PERFMON_EVENTSEL_USR
))
280 debugctlmsr
|= DEBUGCTLMSR_BTS_OFF_USR
;
282 update_debugctlmsr(debugctlmsr
);
285 static void intel_pmu_disable_bts(void)
287 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
288 unsigned long debugctlmsr
;
293 debugctlmsr
= get_debugctlmsr();
296 ~(DEBUGCTLMSR_TR
| DEBUGCTLMSR_BTS
| DEBUGCTLMSR_BTINT
|
297 DEBUGCTLMSR_BTS_OFF_OS
| DEBUGCTLMSR_BTS_OFF_USR
);
299 update_debugctlmsr(debugctlmsr
);
302 static int intel_pmu_drain_bts_buffer(void)
304 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
305 struct debug_store
*ds
= cpuc
->ds
;
311 struct perf_event
*event
= cpuc
->events
[X86_PMC_IDX_FIXED_BTS
];
312 struct bts_record
*at
, *top
;
313 struct perf_output_handle handle
;
314 struct perf_event_header header
;
315 struct perf_sample_data data
;
321 if (!x86_pmu
.bts_active
)
324 at
= (struct bts_record
*)(unsigned long)ds
->bts_buffer_base
;
325 top
= (struct bts_record
*)(unsigned long)ds
->bts_index
;
330 ds
->bts_index
= ds
->bts_buffer_base
;
332 perf_sample_data_init(&data
, 0);
333 data
.period
= event
->hw
.last_period
;
337 * Prepare a generic sample, i.e. fill in the invariant fields.
338 * We will overwrite the from and to address before we output
341 perf_prepare_sample(&header
, &data
, event
, ®s
);
343 if (perf_output_begin(&handle
, event
, header
.size
* (top
- at
)))
346 for (; at
< top
; at
++) {
350 perf_output_sample(&handle
, &header
, &data
, event
);
353 perf_output_end(&handle
);
355 /* There's new data available. */
356 event
->hw
.interrupts
++;
357 event
->pending_kill
= POLL_IN
;
364 static struct event_constraint intel_core2_pebs_event_constraints
[] = {
365 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
366 INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
367 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
368 INTEL_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
369 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
373 static struct event_constraint intel_atom_pebs_event_constraints
[] = {
374 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
375 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
376 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
380 static struct event_constraint intel_nehalem_pebs_event_constraints
[] = {
381 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
382 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
383 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
384 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
385 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
386 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
387 INTEL_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
388 INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
389 INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
390 INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
391 INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
395 static struct event_constraint intel_westmere_pebs_event_constraints
[] = {
396 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
397 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
398 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
399 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
400 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
401 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
402 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
403 INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
404 INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
405 INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
406 INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
410 static struct event_constraint intel_snb_pebs_events
[] = {
411 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
412 INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
413 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
414 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
415 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
416 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
417 INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */
418 INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
419 INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
420 INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
421 INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
422 INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
423 INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
424 INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
425 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
426 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
427 INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
431 static struct event_constraint
*
432 intel_pebs_constraints(struct perf_event
*event
)
434 struct event_constraint
*c
;
436 if (!event
->attr
.precise_ip
)
439 if (x86_pmu
.pebs_constraints
) {
440 for_each_event_constraint(c
, x86_pmu
.pebs_constraints
) {
441 if ((event
->hw
.config
& c
->cmask
) == c
->code
)
446 return &emptyconstraint
;
449 static void intel_pmu_pebs_enable(struct perf_event
*event
)
451 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
452 struct hw_perf_event
*hwc
= &event
->hw
;
454 hwc
->config
&= ~ARCH_PERFMON_EVENTSEL_INT
;
456 cpuc
->pebs_enabled
|= 1ULL << hwc
->idx
;
457 WARN_ON_ONCE(cpuc
->enabled
);
459 if (x86_pmu
.intel_cap
.pebs_trap
&& event
->attr
.precise_ip
> 1)
460 intel_pmu_lbr_enable(event
);
463 static void intel_pmu_pebs_disable(struct perf_event
*event
)
465 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
466 struct hw_perf_event
*hwc
= &event
->hw
;
468 cpuc
->pebs_enabled
&= ~(1ULL << hwc
->idx
);
470 wrmsrl(MSR_IA32_PEBS_ENABLE
, cpuc
->pebs_enabled
);
472 hwc
->config
|= ARCH_PERFMON_EVENTSEL_INT
;
474 if (x86_pmu
.intel_cap
.pebs_trap
&& event
->attr
.precise_ip
> 1)
475 intel_pmu_lbr_disable(event
);
478 static void intel_pmu_pebs_enable_all(void)
480 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
482 if (cpuc
->pebs_enabled
)
483 wrmsrl(MSR_IA32_PEBS_ENABLE
, cpuc
->pebs_enabled
);
486 static void intel_pmu_pebs_disable_all(void)
488 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
490 if (cpuc
->pebs_enabled
)
491 wrmsrl(MSR_IA32_PEBS_ENABLE
, 0);
494 #include <asm/insn.h>
496 static inline bool kernel_ip(unsigned long ip
)
499 return ip
> PAGE_OFFSET
;
505 static int intel_pmu_pebs_fixup_ip(struct pt_regs
*regs
)
507 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
508 unsigned long from
= cpuc
->lbr_entries
[0].from
;
509 unsigned long old_to
, to
= cpuc
->lbr_entries
[0].to
;
510 unsigned long ip
= regs
->ip
;
513 * We don't need to fixup if the PEBS assist is fault like
515 if (!x86_pmu
.intel_cap
.pebs_trap
)
519 * No LBR entry, no basic block, no rewinding
521 if (!cpuc
->lbr_stack
.nr
|| !from
|| !to
)
525 * Basic blocks should never cross user/kernel boundaries
527 if (kernel_ip(ip
) != kernel_ip(to
))
531 * unsigned math, either ip is before the start (impossible) or
532 * the basic block is larger than 1 page (sanity)
534 if ((ip
- to
) > PAGE_SIZE
)
538 * We sampled a branch insn, rewind using the LBR stack
547 u8 buf
[MAX_INSN_SIZE
];
551 if (!kernel_ip(ip
)) {
552 int bytes
, size
= MAX_INSN_SIZE
;
554 bytes
= copy_from_user_nmi(buf
, (void __user
*)to
, size
);
562 kernel_insn_init(&insn
, kaddr
);
563 insn_get_length(&insn
);
573 * Even though we decoded the basic block, the instruction stream
574 * never matched the given IP, either the TO or the IP got corrupted.
579 static int intel_pmu_save_and_restart(struct perf_event
*event
);
581 static void __intel_pmu_pebs_event(struct perf_event
*event
,
582 struct pt_regs
*iregs
, void *__pebs
)
585 * We cast to pebs_record_core since that is a subset of
586 * both formats and we don't use the other fields in this
589 struct pebs_record_core
*pebs
= __pebs
;
590 struct perf_sample_data data
;
593 if (!intel_pmu_save_and_restart(event
))
596 perf_sample_data_init(&data
, 0);
597 data
.period
= event
->hw
.last_period
;
600 * We use the interrupt regs as a base because the PEBS record
601 * does not contain a full regs set, specifically it seems to
602 * lack segment descriptors, which get used by things like
605 * In the simple case fix up only the IP and BP,SP regs, for
606 * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
607 * A possible PERF_SAMPLE_REGS will have to transfer all regs.
614 if (event
->attr
.precise_ip
> 1 && intel_pmu_pebs_fixup_ip(®s
))
615 regs
.flags
|= PERF_EFLAGS_EXACT
;
617 regs
.flags
&= ~PERF_EFLAGS_EXACT
;
619 if (perf_event_overflow(event
, &data
, ®s
))
620 x86_pmu_stop(event
, 0);
623 static void intel_pmu_drain_pebs_core(struct pt_regs
*iregs
)
625 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
626 struct debug_store
*ds
= cpuc
->ds
;
627 struct perf_event
*event
= cpuc
->events
[0]; /* PMC0 only */
628 struct pebs_record_core
*at
, *top
;
631 if (!x86_pmu
.pebs_active
)
634 at
= (struct pebs_record_core
*)(unsigned long)ds
->pebs_buffer_base
;
635 top
= (struct pebs_record_core
*)(unsigned long)ds
->pebs_index
;
638 * Whatever else happens, drain the thing
640 ds
->pebs_index
= ds
->pebs_buffer_base
;
642 if (!test_bit(0, cpuc
->active_mask
))
645 WARN_ON_ONCE(!event
);
647 if (!event
->attr
.precise_ip
)
655 * Should not happen, we program the threshold at 1 and do not
661 __intel_pmu_pebs_event(event
, iregs
, at
);
664 static void intel_pmu_drain_pebs_nhm(struct pt_regs
*iregs
)
666 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
667 struct debug_store
*ds
= cpuc
->ds
;
668 struct pebs_record_nhm
*at
, *top
;
669 struct perf_event
*event
= NULL
;
673 if (!x86_pmu
.pebs_active
)
676 at
= (struct pebs_record_nhm
*)(unsigned long)ds
->pebs_buffer_base
;
677 top
= (struct pebs_record_nhm
*)(unsigned long)ds
->pebs_index
;
679 ds
->pebs_index
= ds
->pebs_buffer_base
;
686 * Should not happen, we program the threshold at 1 and do not
689 WARN_ON_ONCE(n
> MAX_PEBS_EVENTS
);
691 for ( ; at
< top
; at
++) {
692 for_each_set_bit(bit
, (unsigned long *)&at
->status
, MAX_PEBS_EVENTS
) {
693 event
= cpuc
->events
[bit
];
694 if (!test_bit(bit
, cpuc
->active_mask
))
697 WARN_ON_ONCE(!event
);
699 if (!event
->attr
.precise_ip
)
702 if (__test_and_set_bit(bit
, (unsigned long *)&status
))
708 if (!event
|| bit
>= MAX_PEBS_EVENTS
)
711 __intel_pmu_pebs_event(event
, iregs
, at
);
716 * BTS, PEBS probe and setup
719 static void intel_ds_init(void)
722 * No support for 32bit formats
724 if (!boot_cpu_has(X86_FEATURE_DTES64
))
727 x86_pmu
.bts
= boot_cpu_has(X86_FEATURE_BTS
);
728 x86_pmu
.pebs
= boot_cpu_has(X86_FEATURE_PEBS
);
730 char pebs_type
= x86_pmu
.intel_cap
.pebs_trap
? '+' : '-';
731 int format
= x86_pmu
.intel_cap
.pebs_format
;
735 printk(KERN_CONT
"PEBS fmt0%c, ", pebs_type
);
736 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_core
);
737 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_core
;
741 printk(KERN_CONT
"PEBS fmt1%c, ", pebs_type
);
742 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_nhm
);
743 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_nhm
;
747 printk(KERN_CONT
"no PEBS fmt%d%c, ", format
, pebs_type
);
753 #else /* CONFIG_CPU_SUP_INTEL */
755 static void reserve_ds_buffers(void)
759 static void release_ds_buffers(void)
763 #endif /* CONFIG_CPU_SUP_INTEL */