2 * Performance events - AMD IBS
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
6 * For licencing details see kernel-base/COPYING
9 #include <linux/perf_event.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/ptrace.h>
16 #include "perf_event.h"
20 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
22 #include <linux/kprobes.h>
23 #include <linux/hardirq.h>
27 #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
28 #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
39 struct perf_event
*event
;
40 unsigned long state
[BITS_TO_LONGS(IBS_MAX_STATES
)];
51 unsigned long offset_mask
[1];
53 struct cpu_perf_ibs __percpu
*pcpu
;
54 u64 (*get_count
)(u64 config
);
57 struct perf_ibs_data
{
60 u32 data
[0]; /* data buffer starts here */
63 u64 regs
[MSR_AMD64_IBS_REG_COUNT_MAX
];
67 perf_event_set_period(struct hw_perf_event
*hwc
, u64 min
, u64 max
, u64
*hw_period
)
69 s64 left
= local64_read(&hwc
->period_left
);
70 s64 period
= hwc
->sample_period
;
74 * If we are way outside a reasonable range then just skip forward:
76 if (unlikely(left
<= -period
)) {
78 local64_set(&hwc
->period_left
, left
);
79 hwc
->last_period
= period
;
83 if (unlikely(left
< (s64
)min
)) {
85 local64_set(&hwc
->period_left
, left
);
86 hwc
->last_period
= period
;
91 * If the hw period that triggers the sw overflow is too short
92 * we might hit the irq handler. This biases the results.
93 * Thus we shorten the next-to-last period and set the last
94 * period to the max period.
104 *hw_period
= (u64
)left
;
110 perf_event_try_update(struct perf_event
*event
, u64 new_raw_count
, int width
)
112 struct hw_perf_event
*hwc
= &event
->hw
;
113 int shift
= 64 - width
;
118 * Careful: an NMI might modify the previous event value.
120 * Our tactic to handle this is to first atomically read and
121 * exchange a new raw count - then add that new-prev delta
122 * count to the generic event atomically:
124 prev_raw_count
= local64_read(&hwc
->prev_count
);
125 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
126 new_raw_count
) != prev_raw_count
)
130 * Now we have the new raw value and have updated the prev
131 * timestamp already. We can now calculate the elapsed delta
132 * (event-)time and add that to the generic event.
134 * Careful, not all hw sign-extends above the physical width
137 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
140 local64_add(delta
, &event
->count
);
141 local64_sub(delta
, &hwc
->period_left
);
146 static struct perf_ibs perf_ibs_fetch
;
147 static struct perf_ibs perf_ibs_op
;
149 static struct perf_ibs
*get_ibs_pmu(int type
)
151 if (perf_ibs_fetch
.pmu
.type
== type
)
152 return &perf_ibs_fetch
;
153 if (perf_ibs_op
.pmu
.type
== type
)
159 * Use IBS for precise event sampling:
161 * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
162 * perf record -a -e r076:p ... # same as -e cpu-cycles:p
163 * perf record -a -e r0C1:p ... # use ibs op counting micro-ops
165 * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
166 * MSRC001_1033) is used to select either cycle or micro-ops counting
169 * The rip of IBS samples has skid 0. Thus, IBS supports precise
170 * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
171 * rip is invalid when IBS was not able to record the rip correctly.
172 * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
175 static int perf_ibs_precise_event(struct perf_event
*event
, u64
*config
)
177 switch (event
->attr
.precise_ip
) {
187 switch (event
->attr
.type
) {
188 case PERF_TYPE_HARDWARE
:
189 switch (event
->attr
.config
) {
190 case PERF_COUNT_HW_CPU_CYCLES
:
196 switch (event
->attr
.config
) {
201 *config
= IBS_OP_CNT_CTL
;
212 static const struct perf_event_attr ibs_notsupp
= {
221 static int perf_ibs_init(struct perf_event
*event
)
223 struct hw_perf_event
*hwc
= &event
->hw
;
224 struct perf_ibs
*perf_ibs
;
228 perf_ibs
= get_ibs_pmu(event
->attr
.type
);
230 config
= event
->attr
.config
;
232 perf_ibs
= &perf_ibs_op
;
233 ret
= perf_ibs_precise_event(event
, &config
);
238 if (event
->pmu
!= &perf_ibs
->pmu
)
241 if (perf_flags(&event
->attr
) & perf_flags(&ibs_notsupp
))
244 if (config
& ~perf_ibs
->config_mask
)
247 if (hwc
->sample_period
) {
248 if (config
& perf_ibs
->cnt_mask
)
249 /* raw max_cnt may not be set */
251 if (!event
->attr
.sample_freq
&& hwc
->sample_period
& 0x0f)
253 * lower 4 bits can not be set in ibs max cnt,
254 * but allowing it in case we adjust the
255 * sample period to set a frequency.
258 hwc
->sample_period
&= ~0x0FULL
;
259 if (!hwc
->sample_period
)
260 hwc
->sample_period
= 0x10;
262 max_cnt
= config
& perf_ibs
->cnt_mask
;
263 config
&= ~perf_ibs
->cnt_mask
;
264 event
->attr
.sample_period
= max_cnt
<< 4;
265 hwc
->sample_period
= event
->attr
.sample_period
;
268 if (!hwc
->sample_period
)
272 * If we modify hwc->sample_period, we also need to update
273 * hwc->last_period and hwc->period_left.
275 hwc
->last_period
= hwc
->sample_period
;
276 local64_set(&hwc
->period_left
, hwc
->sample_period
);
278 hwc
->config_base
= perf_ibs
->msr
;
279 hwc
->config
= config
;
284 static int perf_ibs_set_period(struct perf_ibs
*perf_ibs
,
285 struct hw_perf_event
*hwc
, u64
*period
)
289 /* ignore lower 4 bits in min count: */
290 overflow
= perf_event_set_period(hwc
, 1<<4, perf_ibs
->max_period
, period
);
291 local64_set(&hwc
->prev_count
, 0);
296 static u64
get_ibs_fetch_count(u64 config
)
298 return (config
& IBS_FETCH_CNT
) >> 12;
301 static u64
get_ibs_op_count(u64 config
)
305 if (config
& IBS_OP_VAL
)
306 count
+= (config
& IBS_OP_MAX_CNT
) << 4; /* cnt rolled over */
308 if (ibs_caps
& IBS_CAPS_RDWROPCNT
)
309 count
+= (config
& IBS_OP_CUR_CNT
) >> 32;
315 perf_ibs_event_update(struct perf_ibs
*perf_ibs
, struct perf_event
*event
,
318 u64 count
= perf_ibs
->get_count(*config
);
321 * Set width to 64 since we do not overflow on max width but
322 * instead on max count. In perf_ibs_set_period() we clear
323 * prev count manually on overflow.
325 while (!perf_event_try_update(event
, count
, 64)) {
326 rdmsrl(event
->hw
.config_base
, *config
);
327 count
= perf_ibs
->get_count(*config
);
331 static inline void perf_ibs_enable_event(struct perf_ibs
*perf_ibs
,
332 struct hw_perf_event
*hwc
, u64 config
)
334 wrmsrl(hwc
->config_base
, hwc
->config
| config
| perf_ibs
->enable_mask
);
338 * Erratum #420 Instruction-Based Sampling Engine May Generate
339 * Interrupt that Cannot Be Cleared:
341 * Must clear counter mask first, then clear the enable bit. See
342 * Revision Guide for AMD Family 10h Processors, Publication #41322.
344 static inline void perf_ibs_disable_event(struct perf_ibs
*perf_ibs
,
345 struct hw_perf_event
*hwc
, u64 config
)
347 config
&= ~perf_ibs
->cnt_mask
;
348 wrmsrl(hwc
->config_base
, config
);
349 config
&= ~perf_ibs
->enable_mask
;
350 wrmsrl(hwc
->config_base
, config
);
354 * We cannot restore the ibs pmu state, so we always needs to update
355 * the event while stopping it and then reset the state when starting
356 * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
357 * perf_ibs_start()/perf_ibs_stop() and instead always do it.
359 static void perf_ibs_start(struct perf_event
*event
, int flags
)
361 struct hw_perf_event
*hwc
= &event
->hw
;
362 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
363 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
366 if (WARN_ON_ONCE(!(hwc
->state
& PERF_HES_STOPPED
)))
369 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
372 perf_ibs_set_period(perf_ibs
, hwc
, &period
);
373 set_bit(IBS_STARTED
, pcpu
->state
);
374 perf_ibs_enable_event(perf_ibs
, hwc
, period
>> 4);
376 perf_event_update_userpage(event
);
379 static void perf_ibs_stop(struct perf_event
*event
, int flags
)
381 struct hw_perf_event
*hwc
= &event
->hw
;
382 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
383 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
387 stopping
= test_and_clear_bit(IBS_STARTED
, pcpu
->state
);
389 if (!stopping
&& (hwc
->state
& PERF_HES_UPTODATE
))
392 rdmsrl(hwc
->config_base
, config
);
395 set_bit(IBS_STOPPING
, pcpu
->state
);
396 perf_ibs_disable_event(perf_ibs
, hwc
, config
);
397 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
398 hwc
->state
|= PERF_HES_STOPPED
;
401 if (hwc
->state
& PERF_HES_UPTODATE
)
405 * Clear valid bit to not count rollovers on update, rollovers
406 * are only updated in the irq handler.
408 config
&= ~perf_ibs
->valid_mask
;
410 perf_ibs_event_update(perf_ibs
, event
, &config
);
411 hwc
->state
|= PERF_HES_UPTODATE
;
414 static int perf_ibs_add(struct perf_event
*event
, int flags
)
416 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
417 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
419 if (test_and_set_bit(IBS_ENABLED
, pcpu
->state
))
422 event
->hw
.state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
426 if (flags
& PERF_EF_START
)
427 perf_ibs_start(event
, PERF_EF_RELOAD
);
432 static void perf_ibs_del(struct perf_event
*event
, int flags
)
434 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
435 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
437 if (!test_and_clear_bit(IBS_ENABLED
, pcpu
->state
))
440 perf_ibs_stop(event
, PERF_EF_UPDATE
);
444 perf_event_update_userpage(event
);
447 static void perf_ibs_read(struct perf_event
*event
) { }
449 static struct perf_ibs perf_ibs_fetch
= {
451 .task_ctx_nr
= perf_invalid_context
,
453 .event_init
= perf_ibs_init
,
456 .start
= perf_ibs_start
,
457 .stop
= perf_ibs_stop
,
458 .read
= perf_ibs_read
,
460 .msr
= MSR_AMD64_IBSFETCHCTL
,
461 .config_mask
= IBS_FETCH_CONFIG_MASK
,
462 .cnt_mask
= IBS_FETCH_MAX_CNT
,
463 .enable_mask
= IBS_FETCH_ENABLE
,
464 .valid_mask
= IBS_FETCH_VAL
,
465 .max_period
= IBS_FETCH_MAX_CNT
<< 4,
466 .offset_mask
= { MSR_AMD64_IBSFETCH_REG_MASK
},
467 .offset_max
= MSR_AMD64_IBSFETCH_REG_COUNT
,
469 .get_count
= get_ibs_fetch_count
,
472 static struct perf_ibs perf_ibs_op
= {
474 .task_ctx_nr
= perf_invalid_context
,
476 .event_init
= perf_ibs_init
,
479 .start
= perf_ibs_start
,
480 .stop
= perf_ibs_stop
,
481 .read
= perf_ibs_read
,
483 .msr
= MSR_AMD64_IBSOPCTL
,
484 .config_mask
= IBS_OP_CONFIG_MASK
,
485 .cnt_mask
= IBS_OP_MAX_CNT
,
486 .enable_mask
= IBS_OP_ENABLE
,
487 .valid_mask
= IBS_OP_VAL
,
488 .max_period
= IBS_OP_MAX_CNT
<< 4,
489 .offset_mask
= { MSR_AMD64_IBSOP_REG_MASK
},
490 .offset_max
= MSR_AMD64_IBSOP_REG_COUNT
,
492 .get_count
= get_ibs_op_count
,
495 static int perf_ibs_handle_irq(struct perf_ibs
*perf_ibs
, struct pt_regs
*iregs
)
497 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
498 struct perf_event
*event
= pcpu
->event
;
499 struct hw_perf_event
*hwc
= &event
->hw
;
500 struct perf_sample_data data
;
501 struct perf_raw_record raw
;
503 struct perf_ibs_data ibs_data
;
504 int offset
, size
, check_rip
, offset_max
, throttle
= 0;
506 u64
*buf
, *config
, period
;
508 if (!test_bit(IBS_STARTED
, pcpu
->state
)) {
510 * Catch spurious interrupts after stopping IBS: After
511 * disabling IBS there could be still incomming NMIs
512 * with samples that even have the valid bit cleared.
513 * Mark all this NMIs as handled.
515 return test_and_clear_bit(IBS_STOPPING
, pcpu
->state
) ? 1 : 0;
518 msr
= hwc
->config_base
;
521 if (!(*buf
++ & perf_ibs
->valid_mask
))
524 config
= &ibs_data
.regs
[0];
525 perf_ibs_event_update(perf_ibs
, event
, config
);
526 perf_sample_data_init(&data
, 0, hwc
->last_period
);
527 if (!perf_ibs_set_period(perf_ibs
, hwc
, &period
))
528 goto out
; /* no sw counter overflow */
530 ibs_data
.caps
= ibs_caps
;
533 check_rip
= (perf_ibs
== &perf_ibs_op
&& (ibs_caps
& IBS_CAPS_RIPINVALIDCHK
));
534 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
)
535 offset_max
= perf_ibs
->offset_max
;
541 rdmsrl(msr
+ offset
, *buf
++);
543 offset
= find_next_bit(perf_ibs
->offset_mask
,
544 perf_ibs
->offset_max
,
546 } while (offset
< offset_max
);
547 ibs_data
.size
= sizeof(u64
) * size
;
550 if (check_rip
&& (ibs_data
.regs
[2] & IBS_RIP_INVALID
)) {
551 regs
.flags
&= ~PERF_EFLAGS_EXACT
;
553 set_linear_ip(®s
, ibs_data
.regs
[1]);
554 regs
.flags
|= PERF_EFLAGS_EXACT
;
557 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
) {
558 raw
.size
= sizeof(u32
) + ibs_data
.size
;
559 raw
.data
= ibs_data
.data
;
563 throttle
= perf_event_overflow(event
, &data
, ®s
);
566 perf_ibs_disable_event(perf_ibs
, hwc
, *config
);
568 perf_ibs_enable_event(perf_ibs
, hwc
, period
>> 4);
570 perf_event_update_userpage(event
);
576 perf_ibs_nmi_handler(unsigned int cmd
, struct pt_regs
*regs
)
580 handled
+= perf_ibs_handle_irq(&perf_ibs_fetch
, regs
);
581 handled
+= perf_ibs_handle_irq(&perf_ibs_op
, regs
);
584 inc_irq_stat(apic_perf_irqs
);
589 static __init
int perf_ibs_pmu_init(struct perf_ibs
*perf_ibs
, char *name
)
591 struct cpu_perf_ibs __percpu
*pcpu
;
594 pcpu
= alloc_percpu(struct cpu_perf_ibs
);
598 perf_ibs
->pcpu
= pcpu
;
600 ret
= perf_pmu_register(&perf_ibs
->pmu
, name
, -1);
602 perf_ibs
->pcpu
= NULL
;
609 static __init
int perf_event_ibs_init(void)
612 return -ENODEV
; /* ibs not supported by the cpu */
614 perf_ibs_pmu_init(&perf_ibs_fetch
, "ibs_fetch");
615 if (ibs_caps
& IBS_CAPS_OPCNT
)
616 perf_ibs_op
.config_mask
|= IBS_OP_CNT_CTL
;
617 perf_ibs_pmu_init(&perf_ibs_op
, "ibs_op");
618 register_nmi_handler(NMI_LOCAL
, perf_ibs_nmi_handler
, 0, "perf_ibs");
619 printk(KERN_INFO
"perf: AMD IBS detected (0x%08x)\n", ibs_caps
);
624 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
626 static __init
int perf_event_ibs_init(void) { return 0; }
630 /* IBS - apic initialization, for perf and oprofile */
632 static __init u32
__get_ibs_caps(void)
635 unsigned int max_level
;
637 if (!boot_cpu_has(X86_FEATURE_IBS
))
640 /* check IBS cpuid feature flags */
641 max_level
= cpuid_eax(0x80000000);
642 if (max_level
< IBS_CPUID_FEATURES
)
643 return IBS_CAPS_DEFAULT
;
645 caps
= cpuid_eax(IBS_CPUID_FEATURES
);
646 if (!(caps
& IBS_CAPS_AVAIL
))
647 /* cpuid flags not valid */
648 return IBS_CAPS_DEFAULT
;
653 u32
get_ibs_caps(void)
658 EXPORT_SYMBOL(get_ibs_caps
);
660 static inline int get_eilvt(int offset
)
662 return !setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_NMI
, 1);
665 static inline int put_eilvt(int offset
)
667 return !setup_APIC_eilvt(offset
, 0, 0, 1);
671 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
673 static inline int ibs_eilvt_valid(void)
681 rdmsrl(MSR_AMD64_IBSCTL
, val
);
682 offset
= val
& IBSCTL_LVT_OFFSET_MASK
;
684 if (!(val
& IBSCTL_LVT_OFFSET_VALID
)) {
685 pr_err(FW_BUG
"cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
686 smp_processor_id(), offset
, MSR_AMD64_IBSCTL
, val
);
690 if (!get_eilvt(offset
)) {
691 pr_err(FW_BUG
"cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
692 smp_processor_id(), offset
, MSR_AMD64_IBSCTL
, val
);
703 static int setup_ibs_ctl(int ibs_eilvt_off
)
705 struct pci_dev
*cpu_cfg
;
712 cpu_cfg
= pci_get_device(PCI_VENDOR_ID_AMD
,
713 PCI_DEVICE_ID_AMD_10H_NB_MISC
,
718 pci_write_config_dword(cpu_cfg
, IBSCTL
, ibs_eilvt_off
719 | IBSCTL_LVT_OFFSET_VALID
);
720 pci_read_config_dword(cpu_cfg
, IBSCTL
, &value
);
721 if (value
!= (ibs_eilvt_off
| IBSCTL_LVT_OFFSET_VALID
)) {
722 pci_dev_put(cpu_cfg
);
723 printk(KERN_DEBUG
"Failed to setup IBS LVT offset, "
724 "IBSCTL = 0x%08x\n", value
);
730 printk(KERN_DEBUG
"No CPU node configured for IBS\n");
738 * This runs only on the current cpu. We try to find an LVT offset and
739 * setup the local APIC. For this we must disable preemption. On
740 * success we initialize all nodes with this offset. This updates then
741 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
742 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
743 * is using the new offset.
745 static int force_ibs_eilvt_setup(void)
751 /* find the next free available EILVT entry, skip offset 0 */
752 for (offset
= 1; offset
< APIC_EILVT_NR_MAX
; offset
++) {
753 if (get_eilvt(offset
))
758 if (offset
== APIC_EILVT_NR_MAX
) {
759 printk(KERN_DEBUG
"No EILVT entry available\n");
763 ret
= setup_ibs_ctl(offset
);
767 if (!ibs_eilvt_valid()) {
772 pr_info("IBS: LVT offset %d assigned\n", offset
);
782 static inline int get_ibs_lvt_offset(void)
786 rdmsrl(MSR_AMD64_IBSCTL
, val
);
787 if (!(val
& IBSCTL_LVT_OFFSET_VALID
))
790 return val
& IBSCTL_LVT_OFFSET_MASK
;
793 static void setup_APIC_ibs(void *dummy
)
797 offset
= get_ibs_lvt_offset();
801 if (!setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_NMI
, 0))
804 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
808 static void clear_APIC_ibs(void *dummy
)
812 offset
= get_ibs_lvt_offset();
814 setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_FIX
, 1);
818 perf_ibs_cpu_notifier(struct notifier_block
*self
, unsigned long action
, void *hcpu
)
820 switch (action
& ~CPU_TASKS_FROZEN
) {
822 setup_APIC_ibs(NULL
);
825 clear_APIC_ibs(NULL
);
834 static __init
int amd_ibs_init(void)
839 caps
= __get_ibs_caps();
841 return -ENODEV
; /* ibs not supported by the cpu */
844 * Force LVT offset assignment for family 10h: The offsets are
845 * not assigned by the BIOS for this family, so the OS is
846 * responsible for doing it. If the OS assignment fails, fall
847 * back to BIOS settings and try to setup this.
849 if (boot_cpu_data
.x86
== 0x10)
850 force_ibs_eilvt_setup();
852 if (!ibs_eilvt_valid())
857 /* make ibs_caps visible to other cpus: */
859 perf_cpu_notifier(perf_ibs_cpu_notifier
);
860 smp_call_function(setup_APIC_ibs
, NULL
, 1);
863 ret
= perf_event_ibs_init();
866 pr_err("Failed to setup IBS, %d\n", ret
);
870 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
871 device_initcall(amd_ibs_init
);