2 * Performance events - AMD IBS
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
6 * For licencing details see kernel-base/COPYING
9 #include <linux/perf_event.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/ptrace.h>
16 #include "perf_event.h"
20 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
22 #include <linux/kprobes.h>
23 #include <linux/hardirq.h>
27 #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
28 #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
39 struct perf_event
*event
;
40 unsigned long state
[BITS_TO_LONGS(IBS_MAX_STATES
)];
51 unsigned long offset_mask
[1];
53 struct cpu_perf_ibs __percpu
*pcpu
;
55 struct attribute
**format_attrs
;
56 struct attribute_group format_group
;
57 const struct attribute_group
*attr_groups
[2];
59 u64 (*get_count
)(u64 config
);
62 struct perf_ibs_data
{
65 u32 data
[0]; /* data buffer starts here */
68 u64 regs
[MSR_AMD64_IBS_REG_COUNT_MAX
];
72 perf_event_set_period(struct hw_perf_event
*hwc
, u64 min
, u64 max
, u64
*hw_period
)
74 s64 left
= local64_read(&hwc
->period_left
);
75 s64 period
= hwc
->sample_period
;
79 * If we are way outside a reasonable range then just skip forward:
81 if (unlikely(left
<= -period
)) {
83 local64_set(&hwc
->period_left
, left
);
84 hwc
->last_period
= period
;
88 if (unlikely(left
< (s64
)min
)) {
90 local64_set(&hwc
->period_left
, left
);
91 hwc
->last_period
= period
;
96 * If the hw period that triggers the sw overflow is too short
97 * we might hit the irq handler. This biases the results.
98 * Thus we shorten the next-to-last period and set the last
99 * period to the max period.
109 *hw_period
= (u64
)left
;
115 perf_event_try_update(struct perf_event
*event
, u64 new_raw_count
, int width
)
117 struct hw_perf_event
*hwc
= &event
->hw
;
118 int shift
= 64 - width
;
123 * Careful: an NMI might modify the previous event value.
125 * Our tactic to handle this is to first atomically read and
126 * exchange a new raw count - then add that new-prev delta
127 * count to the generic event atomically:
129 prev_raw_count
= local64_read(&hwc
->prev_count
);
130 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
131 new_raw_count
) != prev_raw_count
)
135 * Now we have the new raw value and have updated the prev
136 * timestamp already. We can now calculate the elapsed delta
137 * (event-)time and add that to the generic event.
139 * Careful, not all hw sign-extends above the physical width
142 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
145 local64_add(delta
, &event
->count
);
146 local64_sub(delta
, &hwc
->period_left
);
151 static struct perf_ibs perf_ibs_fetch
;
152 static struct perf_ibs perf_ibs_op
;
154 static struct perf_ibs
*get_ibs_pmu(int type
)
156 if (perf_ibs_fetch
.pmu
.type
== type
)
157 return &perf_ibs_fetch
;
158 if (perf_ibs_op
.pmu
.type
== type
)
164 * Use IBS for precise event sampling:
166 * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
167 * perf record -a -e r076:p ... # same as -e cpu-cycles:p
168 * perf record -a -e r0C1:p ... # use ibs op counting micro-ops
170 * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
171 * MSRC001_1033) is used to select either cycle or micro-ops counting
174 * The rip of IBS samples has skid 0. Thus, IBS supports precise
175 * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
176 * rip is invalid when IBS was not able to record the rip correctly.
177 * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
180 static int perf_ibs_precise_event(struct perf_event
*event
, u64
*config
)
182 switch (event
->attr
.precise_ip
) {
192 switch (event
->attr
.type
) {
193 case PERF_TYPE_HARDWARE
:
194 switch (event
->attr
.config
) {
195 case PERF_COUNT_HW_CPU_CYCLES
:
201 switch (event
->attr
.config
) {
206 *config
= IBS_OP_CNT_CTL
;
217 static const struct perf_event_attr ibs_notsupp
= {
226 static int perf_ibs_init(struct perf_event
*event
)
228 struct hw_perf_event
*hwc
= &event
->hw
;
229 struct perf_ibs
*perf_ibs
;
233 perf_ibs
= get_ibs_pmu(event
->attr
.type
);
235 config
= event
->attr
.config
;
237 perf_ibs
= &perf_ibs_op
;
238 ret
= perf_ibs_precise_event(event
, &config
);
243 if (event
->pmu
!= &perf_ibs
->pmu
)
246 if (perf_flags(&event
->attr
) & perf_flags(&ibs_notsupp
))
249 if (config
& ~perf_ibs
->config_mask
)
252 if (hwc
->sample_period
) {
253 if (config
& perf_ibs
->cnt_mask
)
254 /* raw max_cnt may not be set */
256 if (!event
->attr
.sample_freq
&& hwc
->sample_period
& 0x0f)
258 * lower 4 bits can not be set in ibs max cnt,
259 * but allowing it in case we adjust the
260 * sample period to set a frequency.
263 hwc
->sample_period
&= ~0x0FULL
;
264 if (!hwc
->sample_period
)
265 hwc
->sample_period
= 0x10;
267 max_cnt
= config
& perf_ibs
->cnt_mask
;
268 config
&= ~perf_ibs
->cnt_mask
;
269 event
->attr
.sample_period
= max_cnt
<< 4;
270 hwc
->sample_period
= event
->attr
.sample_period
;
273 if (!hwc
->sample_period
)
277 * If we modify hwc->sample_period, we also need to update
278 * hwc->last_period and hwc->period_left.
280 hwc
->last_period
= hwc
->sample_period
;
281 local64_set(&hwc
->period_left
, hwc
->sample_period
);
283 hwc
->config_base
= perf_ibs
->msr
;
284 hwc
->config
= config
;
289 static int perf_ibs_set_period(struct perf_ibs
*perf_ibs
,
290 struct hw_perf_event
*hwc
, u64
*period
)
294 /* ignore lower 4 bits in min count: */
295 overflow
= perf_event_set_period(hwc
, 1<<4, perf_ibs
->max_period
, period
);
296 local64_set(&hwc
->prev_count
, 0);
301 static u64
get_ibs_fetch_count(u64 config
)
303 return (config
& IBS_FETCH_CNT
) >> 12;
306 static u64
get_ibs_op_count(u64 config
)
310 if (config
& IBS_OP_VAL
)
311 count
+= (config
& IBS_OP_MAX_CNT
) << 4; /* cnt rolled over */
313 if (ibs_caps
& IBS_CAPS_RDWROPCNT
)
314 count
+= (config
& IBS_OP_CUR_CNT
) >> 32;
320 perf_ibs_event_update(struct perf_ibs
*perf_ibs
, struct perf_event
*event
,
323 u64 count
= perf_ibs
->get_count(*config
);
326 * Set width to 64 since we do not overflow on max width but
327 * instead on max count. In perf_ibs_set_period() we clear
328 * prev count manually on overflow.
330 while (!perf_event_try_update(event
, count
, 64)) {
331 rdmsrl(event
->hw
.config_base
, *config
);
332 count
= perf_ibs
->get_count(*config
);
336 static inline void perf_ibs_enable_event(struct perf_ibs
*perf_ibs
,
337 struct hw_perf_event
*hwc
, u64 config
)
339 wrmsrl(hwc
->config_base
, hwc
->config
| config
| perf_ibs
->enable_mask
);
343 * Erratum #420 Instruction-Based Sampling Engine May Generate
344 * Interrupt that Cannot Be Cleared:
346 * Must clear counter mask first, then clear the enable bit. See
347 * Revision Guide for AMD Family 10h Processors, Publication #41322.
349 static inline void perf_ibs_disable_event(struct perf_ibs
*perf_ibs
,
350 struct hw_perf_event
*hwc
, u64 config
)
352 config
&= ~perf_ibs
->cnt_mask
;
353 wrmsrl(hwc
->config_base
, config
);
354 config
&= ~perf_ibs
->enable_mask
;
355 wrmsrl(hwc
->config_base
, config
);
359 * We cannot restore the ibs pmu state, so we always needs to update
360 * the event while stopping it and then reset the state when starting
361 * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
362 * perf_ibs_start()/perf_ibs_stop() and instead always do it.
364 static void perf_ibs_start(struct perf_event
*event
, int flags
)
366 struct hw_perf_event
*hwc
= &event
->hw
;
367 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
368 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
371 if (WARN_ON_ONCE(!(hwc
->state
& PERF_HES_STOPPED
)))
374 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
377 perf_ibs_set_period(perf_ibs
, hwc
, &period
);
378 set_bit(IBS_STARTED
, pcpu
->state
);
379 perf_ibs_enable_event(perf_ibs
, hwc
, period
>> 4);
381 perf_event_update_userpage(event
);
384 static void perf_ibs_stop(struct perf_event
*event
, int flags
)
386 struct hw_perf_event
*hwc
= &event
->hw
;
387 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
388 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
392 stopping
= test_and_clear_bit(IBS_STARTED
, pcpu
->state
);
394 if (!stopping
&& (hwc
->state
& PERF_HES_UPTODATE
))
397 rdmsrl(hwc
->config_base
, config
);
400 set_bit(IBS_STOPPING
, pcpu
->state
);
401 perf_ibs_disable_event(perf_ibs
, hwc
, config
);
402 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
403 hwc
->state
|= PERF_HES_STOPPED
;
406 if (hwc
->state
& PERF_HES_UPTODATE
)
410 * Clear valid bit to not count rollovers on update, rollovers
411 * are only updated in the irq handler.
413 config
&= ~perf_ibs
->valid_mask
;
415 perf_ibs_event_update(perf_ibs
, event
, &config
);
416 hwc
->state
|= PERF_HES_UPTODATE
;
419 static int perf_ibs_add(struct perf_event
*event
, int flags
)
421 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
422 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
424 if (test_and_set_bit(IBS_ENABLED
, pcpu
->state
))
427 event
->hw
.state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
431 if (flags
& PERF_EF_START
)
432 perf_ibs_start(event
, PERF_EF_RELOAD
);
437 static void perf_ibs_del(struct perf_event
*event
, int flags
)
439 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
440 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
442 if (!test_and_clear_bit(IBS_ENABLED
, pcpu
->state
))
445 perf_ibs_stop(event
, PERF_EF_UPDATE
);
449 perf_event_update_userpage(event
);
452 static void perf_ibs_read(struct perf_event
*event
) { }
454 PMU_FORMAT_ATTR(rand_en
, "config:57");
455 PMU_FORMAT_ATTR(cnt_ctl
, "config:19");
457 static struct attribute
*ibs_fetch_format_attrs
[] = {
458 &format_attr_rand_en
.attr
,
462 static struct attribute
*ibs_op_format_attrs
[] = {
463 NULL
, /* &format_attr_cnt_ctl.attr if IBS_CAPS_OPCNT */
467 static struct perf_ibs perf_ibs_fetch
= {
469 .task_ctx_nr
= perf_invalid_context
,
471 .event_init
= perf_ibs_init
,
474 .start
= perf_ibs_start
,
475 .stop
= perf_ibs_stop
,
476 .read
= perf_ibs_read
,
478 .msr
= MSR_AMD64_IBSFETCHCTL
,
479 .config_mask
= IBS_FETCH_CONFIG_MASK
,
480 .cnt_mask
= IBS_FETCH_MAX_CNT
,
481 .enable_mask
= IBS_FETCH_ENABLE
,
482 .valid_mask
= IBS_FETCH_VAL
,
483 .max_period
= IBS_FETCH_MAX_CNT
<< 4,
484 .offset_mask
= { MSR_AMD64_IBSFETCH_REG_MASK
},
485 .offset_max
= MSR_AMD64_IBSFETCH_REG_COUNT
,
486 .format_attrs
= ibs_fetch_format_attrs
,
488 .get_count
= get_ibs_fetch_count
,
491 static struct perf_ibs perf_ibs_op
= {
493 .task_ctx_nr
= perf_invalid_context
,
495 .event_init
= perf_ibs_init
,
498 .start
= perf_ibs_start
,
499 .stop
= perf_ibs_stop
,
500 .read
= perf_ibs_read
,
502 .msr
= MSR_AMD64_IBSOPCTL
,
503 .config_mask
= IBS_OP_CONFIG_MASK
,
504 .cnt_mask
= IBS_OP_MAX_CNT
,
505 .enable_mask
= IBS_OP_ENABLE
,
506 .valid_mask
= IBS_OP_VAL
,
507 .max_period
= IBS_OP_MAX_CNT
<< 4,
508 .offset_mask
= { MSR_AMD64_IBSOP_REG_MASK
},
509 .offset_max
= MSR_AMD64_IBSOP_REG_COUNT
,
510 .format_attrs
= ibs_op_format_attrs
,
512 .get_count
= get_ibs_op_count
,
515 static int perf_ibs_handle_irq(struct perf_ibs
*perf_ibs
, struct pt_regs
*iregs
)
517 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
518 struct perf_event
*event
= pcpu
->event
;
519 struct hw_perf_event
*hwc
= &event
->hw
;
520 struct perf_sample_data data
;
521 struct perf_raw_record raw
;
523 struct perf_ibs_data ibs_data
;
524 int offset
, size
, check_rip
, offset_max
, throttle
= 0;
526 u64
*buf
, *config
, period
;
528 if (!test_bit(IBS_STARTED
, pcpu
->state
)) {
530 * Catch spurious interrupts after stopping IBS: After
531 * disabling IBS there could be still incomming NMIs
532 * with samples that even have the valid bit cleared.
533 * Mark all this NMIs as handled.
535 return test_and_clear_bit(IBS_STOPPING
, pcpu
->state
) ? 1 : 0;
538 msr
= hwc
->config_base
;
541 if (!(*buf
++ & perf_ibs
->valid_mask
))
544 config
= &ibs_data
.regs
[0];
545 perf_ibs_event_update(perf_ibs
, event
, config
);
546 perf_sample_data_init(&data
, 0, hwc
->last_period
);
547 if (!perf_ibs_set_period(perf_ibs
, hwc
, &period
))
548 goto out
; /* no sw counter overflow */
550 ibs_data
.caps
= ibs_caps
;
553 check_rip
= (perf_ibs
== &perf_ibs_op
&& (ibs_caps
& IBS_CAPS_RIPINVALIDCHK
));
554 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
)
555 offset_max
= perf_ibs
->offset_max
;
561 rdmsrl(msr
+ offset
, *buf
++);
563 offset
= find_next_bit(perf_ibs
->offset_mask
,
564 perf_ibs
->offset_max
,
566 } while (offset
< offset_max
);
567 ibs_data
.size
= sizeof(u64
) * size
;
570 if (check_rip
&& (ibs_data
.regs
[2] & IBS_RIP_INVALID
)) {
571 regs
.flags
&= ~PERF_EFLAGS_EXACT
;
573 set_linear_ip(®s
, ibs_data
.regs
[1]);
574 regs
.flags
|= PERF_EFLAGS_EXACT
;
577 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
) {
578 raw
.size
= sizeof(u32
) + ibs_data
.size
;
579 raw
.data
= ibs_data
.data
;
583 throttle
= perf_event_overflow(event
, &data
, ®s
);
586 perf_ibs_disable_event(perf_ibs
, hwc
, *config
);
588 perf_ibs_enable_event(perf_ibs
, hwc
, period
>> 4);
590 perf_event_update_userpage(event
);
596 perf_ibs_nmi_handler(unsigned int cmd
, struct pt_regs
*regs
)
600 handled
+= perf_ibs_handle_irq(&perf_ibs_fetch
, regs
);
601 handled
+= perf_ibs_handle_irq(&perf_ibs_op
, regs
);
604 inc_irq_stat(apic_perf_irqs
);
609 static __init
int perf_ibs_pmu_init(struct perf_ibs
*perf_ibs
, char *name
)
611 struct cpu_perf_ibs __percpu
*pcpu
;
614 pcpu
= alloc_percpu(struct cpu_perf_ibs
);
618 perf_ibs
->pcpu
= pcpu
;
620 /* register attributes */
621 if (perf_ibs
->format_attrs
[0]) {
622 memset(&perf_ibs
->format_group
, 0, sizeof(perf_ibs
->format_group
));
623 perf_ibs
->format_group
.name
= "format";
624 perf_ibs
->format_group
.attrs
= perf_ibs
->format_attrs
;
626 memset(&perf_ibs
->attr_groups
, 0, sizeof(perf_ibs
->attr_groups
));
627 perf_ibs
->attr_groups
[0] = &perf_ibs
->format_group
;
628 perf_ibs
->pmu
.attr_groups
= perf_ibs
->attr_groups
;
631 ret
= perf_pmu_register(&perf_ibs
->pmu
, name
, -1);
633 perf_ibs
->pcpu
= NULL
;
640 static __init
int perf_event_ibs_init(void)
642 struct attribute
**attr
= ibs_op_format_attrs
;
645 return -ENODEV
; /* ibs not supported by the cpu */
647 perf_ibs_pmu_init(&perf_ibs_fetch
, "ibs_fetch");
649 if (ibs_caps
& IBS_CAPS_OPCNT
) {
650 perf_ibs_op
.config_mask
|= IBS_OP_CNT_CTL
;
651 *attr
++ = &format_attr_cnt_ctl
.attr
;
653 perf_ibs_pmu_init(&perf_ibs_op
, "ibs_op");
655 register_nmi_handler(NMI_LOCAL
, perf_ibs_nmi_handler
, 0, "perf_ibs");
656 printk(KERN_INFO
"perf: AMD IBS detected (0x%08x)\n", ibs_caps
);
661 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
663 static __init
int perf_event_ibs_init(void) { return 0; }
667 /* IBS - apic initialization, for perf and oprofile */
669 static __init u32
__get_ibs_caps(void)
672 unsigned int max_level
;
674 if (!boot_cpu_has(X86_FEATURE_IBS
))
677 /* check IBS cpuid feature flags */
678 max_level
= cpuid_eax(0x80000000);
679 if (max_level
< IBS_CPUID_FEATURES
)
680 return IBS_CAPS_DEFAULT
;
682 caps
= cpuid_eax(IBS_CPUID_FEATURES
);
683 if (!(caps
& IBS_CAPS_AVAIL
))
684 /* cpuid flags not valid */
685 return IBS_CAPS_DEFAULT
;
690 u32
get_ibs_caps(void)
695 EXPORT_SYMBOL(get_ibs_caps
);
697 static inline int get_eilvt(int offset
)
699 return !setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_NMI
, 1);
702 static inline int put_eilvt(int offset
)
704 return !setup_APIC_eilvt(offset
, 0, 0, 1);
708 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
710 static inline int ibs_eilvt_valid(void)
718 rdmsrl(MSR_AMD64_IBSCTL
, val
);
719 offset
= val
& IBSCTL_LVT_OFFSET_MASK
;
721 if (!(val
& IBSCTL_LVT_OFFSET_VALID
)) {
722 pr_err(FW_BUG
"cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
723 smp_processor_id(), offset
, MSR_AMD64_IBSCTL
, val
);
727 if (!get_eilvt(offset
)) {
728 pr_err(FW_BUG
"cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
729 smp_processor_id(), offset
, MSR_AMD64_IBSCTL
, val
);
740 static int setup_ibs_ctl(int ibs_eilvt_off
)
742 struct pci_dev
*cpu_cfg
;
749 cpu_cfg
= pci_get_device(PCI_VENDOR_ID_AMD
,
750 PCI_DEVICE_ID_AMD_10H_NB_MISC
,
755 pci_write_config_dword(cpu_cfg
, IBSCTL
, ibs_eilvt_off
756 | IBSCTL_LVT_OFFSET_VALID
);
757 pci_read_config_dword(cpu_cfg
, IBSCTL
, &value
);
758 if (value
!= (ibs_eilvt_off
| IBSCTL_LVT_OFFSET_VALID
)) {
759 pci_dev_put(cpu_cfg
);
760 printk(KERN_DEBUG
"Failed to setup IBS LVT offset, "
761 "IBSCTL = 0x%08x\n", value
);
767 printk(KERN_DEBUG
"No CPU node configured for IBS\n");
775 * This runs only on the current cpu. We try to find an LVT offset and
776 * setup the local APIC. For this we must disable preemption. On
777 * success we initialize all nodes with this offset. This updates then
778 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
779 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
780 * is using the new offset.
782 static int force_ibs_eilvt_setup(void)
788 /* find the next free available EILVT entry, skip offset 0 */
789 for (offset
= 1; offset
< APIC_EILVT_NR_MAX
; offset
++) {
790 if (get_eilvt(offset
))
795 if (offset
== APIC_EILVT_NR_MAX
) {
796 printk(KERN_DEBUG
"No EILVT entry available\n");
800 ret
= setup_ibs_ctl(offset
);
804 if (!ibs_eilvt_valid()) {
809 pr_info("IBS: LVT offset %d assigned\n", offset
);
819 static inline int get_ibs_lvt_offset(void)
823 rdmsrl(MSR_AMD64_IBSCTL
, val
);
824 if (!(val
& IBSCTL_LVT_OFFSET_VALID
))
827 return val
& IBSCTL_LVT_OFFSET_MASK
;
830 static void setup_APIC_ibs(void *dummy
)
834 offset
= get_ibs_lvt_offset();
838 if (!setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_NMI
, 0))
841 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
845 static void clear_APIC_ibs(void *dummy
)
849 offset
= get_ibs_lvt_offset();
851 setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_FIX
, 1);
855 perf_ibs_cpu_notifier(struct notifier_block
*self
, unsigned long action
, void *hcpu
)
857 switch (action
& ~CPU_TASKS_FROZEN
) {
859 setup_APIC_ibs(NULL
);
862 clear_APIC_ibs(NULL
);
871 static __init
int amd_ibs_init(void)
876 caps
= __get_ibs_caps();
878 return -ENODEV
; /* ibs not supported by the cpu */
881 * Force LVT offset assignment for family 10h: The offsets are
882 * not assigned by the BIOS for this family, so the OS is
883 * responsible for doing it. If the OS assignment fails, fall
884 * back to BIOS settings and try to setup this.
886 if (boot_cpu_data
.x86
== 0x10)
887 force_ibs_eilvt_setup();
889 if (!ibs_eilvt_valid())
894 /* make ibs_caps visible to other cpus: */
896 perf_cpu_notifier(perf_ibs_cpu_notifier
);
897 smp_call_function(setup_APIC_ibs
, NULL
, 1);
900 ret
= perf_event_ibs_init();
903 pr_err("Failed to setup IBS, %d\n", ret
);
907 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
908 device_initcall(amd_ibs_init
);