2 * Performance events - AMD IBS
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
6 * For licencing details see kernel-base/COPYING
9 #include <linux/perf_event.h>
10 #include <linux/init.h>
11 #include <linux/export.h>
12 #include <linux/pci.h>
13 #include <linux/ptrace.h>
14 #include <linux/syscore_ops.h>
15 #include <linux/sched/clock.h>
19 #include "../perf_event.h"
23 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
25 #include <linux/kprobes.h>
26 #include <linux/hardirq.h>
30 #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
31 #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
37 * ENABLED; tracks the pmu::add(), pmu::del() state, when set the counter is taken
38 * and any further add()s must fail.
40 * STARTED/STOPPING/STOPPED; deal with pmu::start(), pmu::stop() state but are
41 * complicated by the fact that the IBS hardware can send late NMIs (ie. after
42 * we've cleared the EN bit).
44 * In order to consume these late NMIs we have the STOPPED state, any NMI that
45 * happens after we've cleared the EN state will clear this bit and report the
46 * NMI handled (this is fundamentally racy in the face or multiple NMI sources,
47 * someone else can consume our BIT and our NMI will go unhandled).
49 * And since we cannot set/clear this separate bit together with the EN bit,
50 * there are races; if we cleared STARTED early, an NMI could land in
51 * between clearing STARTED and clearing the EN bit (in fact multiple NMIs
52 * could happen if the period is small enough), and consume our STOPPED bit
53 * and trigger streams of unhandled NMIs.
55 * If, however, we clear STARTED late, an NMI can hit between clearing the
56 * EN bit and clearing STARTED, still see STARTED set and process the event.
57 * If this event will have the VALID bit clear, we bail properly, but this
58 * is not a given. With VALID set we can end up calling pmu::stop() again
59 * (the throttle logic) and trigger the WARNs in there.
61 * So what we do is set STOPPING before clearing EN to avoid the pmu::stop()
62 * nesting, and clear STARTED late, so that we have a well defined state over
63 * the clearing of the EN bit.
65 * XXX: we could probably be using !atomic bitops for all this.
78 struct perf_event
*event
;
79 unsigned long state
[BITS_TO_LONGS(IBS_MAX_STATES
)];
90 unsigned long offset_mask
[1];
92 struct cpu_perf_ibs __percpu
*pcpu
;
94 struct attribute
**format_attrs
;
95 struct attribute_group format_group
;
96 const struct attribute_group
*attr_groups
[2];
98 u64 (*get_count
)(u64 config
);
101 struct perf_ibs_data
{
104 u32 data
[0]; /* data buffer starts here */
107 u64 regs
[MSR_AMD64_IBS_REG_COUNT_MAX
];
111 perf_event_set_period(struct hw_perf_event
*hwc
, u64 min
, u64 max
, u64
*hw_period
)
113 s64 left
= local64_read(&hwc
->period_left
);
114 s64 period
= hwc
->sample_period
;
118 * If we are way outside a reasonable range then just skip forward:
120 if (unlikely(left
<= -period
)) {
122 local64_set(&hwc
->period_left
, left
);
123 hwc
->last_period
= period
;
127 if (unlikely(left
< (s64
)min
)) {
129 local64_set(&hwc
->period_left
, left
);
130 hwc
->last_period
= period
;
135 * If the hw period that triggers the sw overflow is too short
136 * we might hit the irq handler. This biases the results.
137 * Thus we shorten the next-to-last period and set the last
138 * period to the max period.
148 *hw_period
= (u64
)left
;
154 perf_event_try_update(struct perf_event
*event
, u64 new_raw_count
, int width
)
156 struct hw_perf_event
*hwc
= &event
->hw
;
157 int shift
= 64 - width
;
162 * Careful: an NMI might modify the previous event value.
164 * Our tactic to handle this is to first atomically read and
165 * exchange a new raw count - then add that new-prev delta
166 * count to the generic event atomically:
168 prev_raw_count
= local64_read(&hwc
->prev_count
);
169 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
170 new_raw_count
) != prev_raw_count
)
174 * Now we have the new raw value and have updated the prev
175 * timestamp already. We can now calculate the elapsed delta
176 * (event-)time and add that to the generic event.
178 * Careful, not all hw sign-extends above the physical width
181 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
184 local64_add(delta
, &event
->count
);
185 local64_sub(delta
, &hwc
->period_left
);
190 static struct perf_ibs perf_ibs_fetch
;
191 static struct perf_ibs perf_ibs_op
;
193 static struct perf_ibs
*get_ibs_pmu(int type
)
195 if (perf_ibs_fetch
.pmu
.type
== type
)
196 return &perf_ibs_fetch
;
197 if (perf_ibs_op
.pmu
.type
== type
)
203 * Use IBS for precise event sampling:
205 * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
206 * perf record -a -e r076:p ... # same as -e cpu-cycles:p
207 * perf record -a -e r0C1:p ... # use ibs op counting micro-ops
209 * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
210 * MSRC001_1033) is used to select either cycle or micro-ops counting
213 * The rip of IBS samples has skid 0. Thus, IBS supports precise
214 * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
215 * rip is invalid when IBS was not able to record the rip correctly.
216 * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
219 static int perf_ibs_precise_event(struct perf_event
*event
, u64
*config
)
221 switch (event
->attr
.precise_ip
) {
231 switch (event
->attr
.type
) {
232 case PERF_TYPE_HARDWARE
:
233 switch (event
->attr
.config
) {
234 case PERF_COUNT_HW_CPU_CYCLES
:
240 switch (event
->attr
.config
) {
245 *config
= IBS_OP_CNT_CTL
;
256 static const struct perf_event_attr ibs_notsupp
= {
265 static int perf_ibs_init(struct perf_event
*event
)
267 struct hw_perf_event
*hwc
= &event
->hw
;
268 struct perf_ibs
*perf_ibs
;
272 perf_ibs
= get_ibs_pmu(event
->attr
.type
);
274 config
= event
->attr
.config
;
276 perf_ibs
= &perf_ibs_op
;
277 ret
= perf_ibs_precise_event(event
, &config
);
282 if (event
->pmu
!= &perf_ibs
->pmu
)
285 if (perf_flags(&event
->attr
) & perf_flags(&ibs_notsupp
))
288 if (config
& ~perf_ibs
->config_mask
)
291 if (hwc
->sample_period
) {
292 if (config
& perf_ibs
->cnt_mask
)
293 /* raw max_cnt may not be set */
295 if (!event
->attr
.sample_freq
&& hwc
->sample_period
& 0x0f)
297 * lower 4 bits can not be set in ibs max cnt,
298 * but allowing it in case we adjust the
299 * sample period to set a frequency.
302 hwc
->sample_period
&= ~0x0FULL
;
303 if (!hwc
->sample_period
)
304 hwc
->sample_period
= 0x10;
306 max_cnt
= config
& perf_ibs
->cnt_mask
;
307 config
&= ~perf_ibs
->cnt_mask
;
308 event
->attr
.sample_period
= max_cnt
<< 4;
309 hwc
->sample_period
= event
->attr
.sample_period
;
312 if (!hwc
->sample_period
)
316 * If we modify hwc->sample_period, we also need to update
317 * hwc->last_period and hwc->period_left.
319 hwc
->last_period
= hwc
->sample_period
;
320 local64_set(&hwc
->period_left
, hwc
->sample_period
);
322 hwc
->config_base
= perf_ibs
->msr
;
323 hwc
->config
= config
;
328 static int perf_ibs_set_period(struct perf_ibs
*perf_ibs
,
329 struct hw_perf_event
*hwc
, u64
*period
)
333 /* ignore lower 4 bits in min count: */
334 overflow
= perf_event_set_period(hwc
, 1<<4, perf_ibs
->max_period
, period
);
335 local64_set(&hwc
->prev_count
, 0);
340 static u64
get_ibs_fetch_count(u64 config
)
342 return (config
& IBS_FETCH_CNT
) >> 12;
345 static u64
get_ibs_op_count(u64 config
)
349 if (config
& IBS_OP_VAL
)
350 count
+= (config
& IBS_OP_MAX_CNT
) << 4; /* cnt rolled over */
352 if (ibs_caps
& IBS_CAPS_RDWROPCNT
)
353 count
+= (config
& IBS_OP_CUR_CNT
) >> 32;
359 perf_ibs_event_update(struct perf_ibs
*perf_ibs
, struct perf_event
*event
,
362 u64 count
= perf_ibs
->get_count(*config
);
365 * Set width to 64 since we do not overflow on max width but
366 * instead on max count. In perf_ibs_set_period() we clear
367 * prev count manually on overflow.
369 while (!perf_event_try_update(event
, count
, 64)) {
370 rdmsrl(event
->hw
.config_base
, *config
);
371 count
= perf_ibs
->get_count(*config
);
375 static inline void perf_ibs_enable_event(struct perf_ibs
*perf_ibs
,
376 struct hw_perf_event
*hwc
, u64 config
)
378 wrmsrl(hwc
->config_base
, hwc
->config
| config
| perf_ibs
->enable_mask
);
382 * Erratum #420 Instruction-Based Sampling Engine May Generate
383 * Interrupt that Cannot Be Cleared:
385 * Must clear counter mask first, then clear the enable bit. See
386 * Revision Guide for AMD Family 10h Processors, Publication #41322.
388 static inline void perf_ibs_disable_event(struct perf_ibs
*perf_ibs
,
389 struct hw_perf_event
*hwc
, u64 config
)
391 config
&= ~perf_ibs
->cnt_mask
;
392 wrmsrl(hwc
->config_base
, config
);
393 config
&= ~perf_ibs
->enable_mask
;
394 wrmsrl(hwc
->config_base
, config
);
398 * We cannot restore the ibs pmu state, so we always needs to update
399 * the event while stopping it and then reset the state when starting
400 * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
401 * perf_ibs_start()/perf_ibs_stop() and instead always do it.
403 static void perf_ibs_start(struct perf_event
*event
, int flags
)
405 struct hw_perf_event
*hwc
= &event
->hw
;
406 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
407 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
410 if (WARN_ON_ONCE(!(hwc
->state
& PERF_HES_STOPPED
)))
413 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
416 perf_ibs_set_period(perf_ibs
, hwc
, &period
);
418 * Set STARTED before enabling the hardware, such that a subsequent NMI
421 set_bit(IBS_STARTED
, pcpu
->state
);
422 clear_bit(IBS_STOPPING
, pcpu
->state
);
423 perf_ibs_enable_event(perf_ibs
, hwc
, period
>> 4);
425 perf_event_update_userpage(event
);
428 static void perf_ibs_stop(struct perf_event
*event
, int flags
)
430 struct hw_perf_event
*hwc
= &event
->hw
;
431 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
432 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
436 if (test_and_set_bit(IBS_STOPPING
, pcpu
->state
))
439 stopping
= test_bit(IBS_STARTED
, pcpu
->state
);
441 if (!stopping
&& (hwc
->state
& PERF_HES_UPTODATE
))
444 rdmsrl(hwc
->config_base
, config
);
448 * Set STOPPED before disabling the hardware, such that it
449 * must be visible to NMIs the moment we clear the EN bit,
450 * at which point we can generate an !VALID sample which
451 * we need to consume.
453 set_bit(IBS_STOPPED
, pcpu
->state
);
454 perf_ibs_disable_event(perf_ibs
, hwc
, config
);
456 * Clear STARTED after disabling the hardware; if it were
457 * cleared before an NMI hitting after the clear but before
458 * clearing the EN bit might think it a spurious NMI and not
461 * Clearing it after, however, creates the problem of the NMI
462 * handler seeing STARTED but not having a valid sample.
464 clear_bit(IBS_STARTED
, pcpu
->state
);
465 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
466 hwc
->state
|= PERF_HES_STOPPED
;
469 if (hwc
->state
& PERF_HES_UPTODATE
)
473 * Clear valid bit to not count rollovers on update, rollovers
474 * are only updated in the irq handler.
476 config
&= ~perf_ibs
->valid_mask
;
478 perf_ibs_event_update(perf_ibs
, event
, &config
);
479 hwc
->state
|= PERF_HES_UPTODATE
;
482 static int perf_ibs_add(struct perf_event
*event
, int flags
)
484 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
485 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
487 if (test_and_set_bit(IBS_ENABLED
, pcpu
->state
))
490 event
->hw
.state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
494 if (flags
& PERF_EF_START
)
495 perf_ibs_start(event
, PERF_EF_RELOAD
);
500 static void perf_ibs_del(struct perf_event
*event
, int flags
)
502 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
503 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
505 if (!test_and_clear_bit(IBS_ENABLED
, pcpu
->state
))
508 perf_ibs_stop(event
, PERF_EF_UPDATE
);
512 perf_event_update_userpage(event
);
515 static void perf_ibs_read(struct perf_event
*event
) { }
517 PMU_FORMAT_ATTR(rand_en
, "config:57");
518 PMU_FORMAT_ATTR(cnt_ctl
, "config:19");
520 static struct attribute
*ibs_fetch_format_attrs
[] = {
521 &format_attr_rand_en
.attr
,
525 static struct attribute
*ibs_op_format_attrs
[] = {
526 NULL
, /* &format_attr_cnt_ctl.attr if IBS_CAPS_OPCNT */
530 static struct perf_ibs perf_ibs_fetch
= {
532 .task_ctx_nr
= perf_invalid_context
,
534 .event_init
= perf_ibs_init
,
537 .start
= perf_ibs_start
,
538 .stop
= perf_ibs_stop
,
539 .read
= perf_ibs_read
,
541 .msr
= MSR_AMD64_IBSFETCHCTL
,
542 .config_mask
= IBS_FETCH_CONFIG_MASK
,
543 .cnt_mask
= IBS_FETCH_MAX_CNT
,
544 .enable_mask
= IBS_FETCH_ENABLE
,
545 .valid_mask
= IBS_FETCH_VAL
,
546 .max_period
= IBS_FETCH_MAX_CNT
<< 4,
547 .offset_mask
= { MSR_AMD64_IBSFETCH_REG_MASK
},
548 .offset_max
= MSR_AMD64_IBSFETCH_REG_COUNT
,
549 .format_attrs
= ibs_fetch_format_attrs
,
551 .get_count
= get_ibs_fetch_count
,
554 static struct perf_ibs perf_ibs_op
= {
556 .task_ctx_nr
= perf_invalid_context
,
558 .event_init
= perf_ibs_init
,
561 .start
= perf_ibs_start
,
562 .stop
= perf_ibs_stop
,
563 .read
= perf_ibs_read
,
565 .msr
= MSR_AMD64_IBSOPCTL
,
566 .config_mask
= IBS_OP_CONFIG_MASK
,
567 .cnt_mask
= IBS_OP_MAX_CNT
,
568 .enable_mask
= IBS_OP_ENABLE
,
569 .valid_mask
= IBS_OP_VAL
,
570 .max_period
= IBS_OP_MAX_CNT
<< 4,
571 .offset_mask
= { MSR_AMD64_IBSOP_REG_MASK
},
572 .offset_max
= MSR_AMD64_IBSOP_REG_COUNT
,
573 .format_attrs
= ibs_op_format_attrs
,
575 .get_count
= get_ibs_op_count
,
578 static int perf_ibs_handle_irq(struct perf_ibs
*perf_ibs
, struct pt_regs
*iregs
)
580 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
581 struct perf_event
*event
= pcpu
->event
;
582 struct hw_perf_event
*hwc
;
583 struct perf_sample_data data
;
584 struct perf_raw_record raw
;
586 struct perf_ibs_data ibs_data
;
587 int offset
, size
, check_rip
, offset_max
, throttle
= 0;
589 u64
*buf
, *config
, period
;
591 if (!test_bit(IBS_STARTED
, pcpu
->state
)) {
594 * Catch spurious interrupts after stopping IBS: After
595 * disabling IBS there could be still incoming NMIs
596 * with samples that even have the valid bit cleared.
597 * Mark all this NMIs as handled.
599 if (test_and_clear_bit(IBS_STOPPED
, pcpu
->state
))
605 if (WARN_ON_ONCE(!event
))
609 msr
= hwc
->config_base
;
612 if (!(*buf
++ & perf_ibs
->valid_mask
))
615 config
= &ibs_data
.regs
[0];
616 perf_ibs_event_update(perf_ibs
, event
, config
);
617 perf_sample_data_init(&data
, 0, hwc
->last_period
);
618 if (!perf_ibs_set_period(perf_ibs
, hwc
, &period
))
619 goto out
; /* no sw counter overflow */
621 ibs_data
.caps
= ibs_caps
;
624 check_rip
= (perf_ibs
== &perf_ibs_op
&& (ibs_caps
& IBS_CAPS_RIPINVALIDCHK
));
625 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
)
626 offset_max
= perf_ibs
->offset_max
;
632 rdmsrl(msr
+ offset
, *buf
++);
634 offset
= find_next_bit(perf_ibs
->offset_mask
,
635 perf_ibs
->offset_max
,
637 } while (offset
< offset_max
);
638 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
) {
640 * Read IbsBrTarget and IbsOpData4 separately
641 * depending on their availability.
642 * Can't add to offset_max as they are staggered
644 if (ibs_caps
& IBS_CAPS_BRNTRGT
) {
645 rdmsrl(MSR_AMD64_IBSBRTARGET
, *buf
++);
648 if (ibs_caps
& IBS_CAPS_OPDATA4
) {
649 rdmsrl(MSR_AMD64_IBSOPDATA4
, *buf
++);
653 ibs_data
.size
= sizeof(u64
) * size
;
656 if (check_rip
&& (ibs_data
.regs
[2] & IBS_RIP_INVALID
)) {
657 regs
.flags
&= ~PERF_EFLAGS_EXACT
;
659 set_linear_ip(®s
, ibs_data
.regs
[1]);
660 regs
.flags
|= PERF_EFLAGS_EXACT
;
663 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
) {
664 raw
= (struct perf_raw_record
){
666 .size
= sizeof(u32
) + ibs_data
.size
,
667 .data
= ibs_data
.data
,
673 throttle
= perf_event_overflow(event
, &data
, ®s
);
676 perf_ibs_stop(event
, 0);
678 perf_ibs_enable_event(perf_ibs
, hwc
, period
>> 4);
680 perf_event_update_userpage(event
);
686 perf_ibs_nmi_handler(unsigned int cmd
, struct pt_regs
*regs
)
688 u64 stamp
= sched_clock();
691 handled
+= perf_ibs_handle_irq(&perf_ibs_fetch
, regs
);
692 handled
+= perf_ibs_handle_irq(&perf_ibs_op
, regs
);
695 inc_irq_stat(apic_perf_irqs
);
697 perf_sample_event_took(sched_clock() - stamp
);
701 NOKPROBE_SYMBOL(perf_ibs_nmi_handler
);
703 static __init
int perf_ibs_pmu_init(struct perf_ibs
*perf_ibs
, char *name
)
705 struct cpu_perf_ibs __percpu
*pcpu
;
708 pcpu
= alloc_percpu(struct cpu_perf_ibs
);
712 perf_ibs
->pcpu
= pcpu
;
714 /* register attributes */
715 if (perf_ibs
->format_attrs
[0]) {
716 memset(&perf_ibs
->format_group
, 0, sizeof(perf_ibs
->format_group
));
717 perf_ibs
->format_group
.name
= "format";
718 perf_ibs
->format_group
.attrs
= perf_ibs
->format_attrs
;
720 memset(&perf_ibs
->attr_groups
, 0, sizeof(perf_ibs
->attr_groups
));
721 perf_ibs
->attr_groups
[0] = &perf_ibs
->format_group
;
722 perf_ibs
->pmu
.attr_groups
= perf_ibs
->attr_groups
;
725 ret
= perf_pmu_register(&perf_ibs
->pmu
, name
, -1);
727 perf_ibs
->pcpu
= NULL
;
734 static __init
void perf_event_ibs_init(void)
736 struct attribute
**attr
= ibs_op_format_attrs
;
738 perf_ibs_pmu_init(&perf_ibs_fetch
, "ibs_fetch");
740 if (ibs_caps
& IBS_CAPS_OPCNT
) {
741 perf_ibs_op
.config_mask
|= IBS_OP_CNT_CTL
;
742 *attr
++ = &format_attr_cnt_ctl
.attr
;
744 perf_ibs_pmu_init(&perf_ibs_op
, "ibs_op");
746 register_nmi_handler(NMI_LOCAL
, perf_ibs_nmi_handler
, 0, "perf_ibs");
747 pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps
);
750 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
752 static __init
void perf_event_ibs_init(void) { }
756 /* IBS - apic initialization, for perf and oprofile */
758 static __init u32
__get_ibs_caps(void)
761 unsigned int max_level
;
763 if (!boot_cpu_has(X86_FEATURE_IBS
))
766 /* check IBS cpuid feature flags */
767 max_level
= cpuid_eax(0x80000000);
768 if (max_level
< IBS_CPUID_FEATURES
)
769 return IBS_CAPS_DEFAULT
;
771 caps
= cpuid_eax(IBS_CPUID_FEATURES
);
772 if (!(caps
& IBS_CAPS_AVAIL
))
773 /* cpuid flags not valid */
774 return IBS_CAPS_DEFAULT
;
779 u32
get_ibs_caps(void)
784 EXPORT_SYMBOL(get_ibs_caps
);
786 static inline int get_eilvt(int offset
)
788 return !setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_NMI
, 1);
791 static inline int put_eilvt(int offset
)
793 return !setup_APIC_eilvt(offset
, 0, 0, 1);
797 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
799 static inline int ibs_eilvt_valid(void)
807 rdmsrl(MSR_AMD64_IBSCTL
, val
);
808 offset
= val
& IBSCTL_LVT_OFFSET_MASK
;
810 if (!(val
& IBSCTL_LVT_OFFSET_VALID
)) {
811 pr_err(FW_BUG
"cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
812 smp_processor_id(), offset
, MSR_AMD64_IBSCTL
, val
);
816 if (!get_eilvt(offset
)) {
817 pr_err(FW_BUG
"cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
818 smp_processor_id(), offset
, MSR_AMD64_IBSCTL
, val
);
829 static int setup_ibs_ctl(int ibs_eilvt_off
)
831 struct pci_dev
*cpu_cfg
;
838 cpu_cfg
= pci_get_device(PCI_VENDOR_ID_AMD
,
839 PCI_DEVICE_ID_AMD_10H_NB_MISC
,
844 pci_write_config_dword(cpu_cfg
, IBSCTL
, ibs_eilvt_off
845 | IBSCTL_LVT_OFFSET_VALID
);
846 pci_read_config_dword(cpu_cfg
, IBSCTL
, &value
);
847 if (value
!= (ibs_eilvt_off
| IBSCTL_LVT_OFFSET_VALID
)) {
848 pci_dev_put(cpu_cfg
);
849 pr_debug("Failed to setup IBS LVT offset, IBSCTL = 0x%08x\n",
856 pr_debug("No CPU node configured for IBS\n");
864 * This runs only on the current cpu. We try to find an LVT offset and
865 * setup the local APIC. For this we must disable preemption. On
866 * success we initialize all nodes with this offset. This updates then
867 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
868 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
869 * is using the new offset.
871 static void force_ibs_eilvt_setup(void)
877 /* find the next free available EILVT entry, skip offset 0 */
878 for (offset
= 1; offset
< APIC_EILVT_NR_MAX
; offset
++) {
879 if (get_eilvt(offset
))
884 if (offset
== APIC_EILVT_NR_MAX
) {
885 pr_debug("No EILVT entry available\n");
889 ret
= setup_ibs_ctl(offset
);
893 if (!ibs_eilvt_valid())
896 pr_info("LVT offset %d assigned\n", offset
);
906 static void ibs_eilvt_setup(void)
909 * Force LVT offset assignment for family 10h: The offsets are
910 * not assigned by the BIOS for this family, so the OS is
911 * responsible for doing it. If the OS assignment fails, fall
912 * back to BIOS settings and try to setup this.
914 if (boot_cpu_data
.x86
== 0x10)
915 force_ibs_eilvt_setup();
918 static inline int get_ibs_lvt_offset(void)
922 rdmsrl(MSR_AMD64_IBSCTL
, val
);
923 if (!(val
& IBSCTL_LVT_OFFSET_VALID
))
926 return val
& IBSCTL_LVT_OFFSET_MASK
;
929 static void setup_APIC_ibs(void)
933 offset
= get_ibs_lvt_offset();
937 if (!setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_NMI
, 0))
940 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
944 static void clear_APIC_ibs(void)
948 offset
= get_ibs_lvt_offset();
950 setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_FIX
, 1);
953 static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu
)
961 static int perf_ibs_suspend(void)
967 static void perf_ibs_resume(void)
973 static struct syscore_ops perf_ibs_syscore_ops
= {
974 .resume
= perf_ibs_resume
,
975 .suspend
= perf_ibs_suspend
,
978 static void perf_ibs_pm_init(void)
980 register_syscore_ops(&perf_ibs_syscore_ops
);
985 static inline void perf_ibs_pm_init(void) { }
989 static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu
)
995 static __init
int amd_ibs_init(void)
999 caps
= __get_ibs_caps();
1001 return -ENODEV
; /* ibs not supported by the cpu */
1005 if (!ibs_eilvt_valid())
1011 /* make ibs_caps visible to other cpus: */
1014 * x86_pmu_amd_ibs_starting_cpu will be called from core on
1017 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING
,
1018 "perf/x86/amd/ibs:starting",
1019 x86_pmu_amd_ibs_starting_cpu
,
1020 x86_pmu_amd_ibs_dying_cpu
);
1022 perf_event_ibs_init();
1027 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
1028 device_initcall(amd_ibs_init
);