2 * Performance events - AMD IBS
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
6 * For licencing details see kernel-base/COPYING
9 #include <linux/perf_event.h>
10 #include <linux/init.h>
11 #include <linux/export.h>
12 #include <linux/pci.h>
13 #include <linux/ptrace.h>
14 #include <linux/syscore_ops.h>
15 #include <linux/sched/clock.h>
19 #include "../perf_event.h"
23 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
25 #include <linux/kprobes.h>
26 #include <linux/hardirq.h>
30 #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
31 #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
37 * ENABLED; tracks the pmu::add(), pmu::del() state, when set the counter is taken
38 * and any further add()s must fail.
40 * STARTED/STOPPING/STOPPED; deal with pmu::start(), pmu::stop() state but are
41 * complicated by the fact that the IBS hardware can send late NMIs (ie. after
42 * we've cleared the EN bit).
44 * In order to consume these late NMIs we have the STOPPED state, any NMI that
45 * happens after we've cleared the EN state will clear this bit and report the
46 * NMI handled (this is fundamentally racy in the face or multiple NMI sources,
47 * someone else can consume our BIT and our NMI will go unhandled).
49 * And since we cannot set/clear this separate bit together with the EN bit,
50 * there are races; if we cleared STARTED early, an NMI could land in
51 * between clearing STARTED and clearing the EN bit (in fact multiple NMIs
52 * could happen if the period is small enough), and consume our STOPPED bit
53 * and trigger streams of unhandled NMIs.
55 * If, however, we clear STARTED late, an NMI can hit between clearing the
56 * EN bit and clearing STARTED, still see STARTED set and process the event.
57 * If this event will have the VALID bit clear, we bail properly, but this
58 * is not a given. With VALID set we can end up calling pmu::stop() again
59 * (the throttle logic) and trigger the WARNs in there.
61 * So what we do is set STOPPING before clearing EN to avoid the pmu::stop()
62 * nesting, and clear STARTED late, so that we have a well defined state over
63 * the clearing of the EN bit.
65 * XXX: we could probably be using !atomic bitops for all this.
78 struct perf_event
*event
;
79 unsigned long state
[BITS_TO_LONGS(IBS_MAX_STATES
)];
90 unsigned long offset_mask
[1];
92 struct cpu_perf_ibs __percpu
*pcpu
;
94 struct attribute
**format_attrs
;
95 struct attribute_group format_group
;
96 const struct attribute_group
*attr_groups
[2];
98 u64 (*get_count
)(u64 config
);
101 struct perf_ibs_data
{
104 u32 data
[0]; /* data buffer starts here */
107 u64 regs
[MSR_AMD64_IBS_REG_COUNT_MAX
];
111 perf_event_set_period(struct hw_perf_event
*hwc
, u64 min
, u64 max
, u64
*hw_period
)
113 s64 left
= local64_read(&hwc
->period_left
);
114 s64 period
= hwc
->sample_period
;
118 * If we are way outside a reasonable range then just skip forward:
120 if (unlikely(left
<= -period
)) {
122 local64_set(&hwc
->period_left
, left
);
123 hwc
->last_period
= period
;
127 if (unlikely(left
< (s64
)min
)) {
129 local64_set(&hwc
->period_left
, left
);
130 hwc
->last_period
= period
;
135 * If the hw period that triggers the sw overflow is too short
136 * we might hit the irq handler. This biases the results.
137 * Thus we shorten the next-to-last period and set the last
138 * period to the max period.
148 *hw_period
= (u64
)left
;
154 perf_event_try_update(struct perf_event
*event
, u64 new_raw_count
, int width
)
156 struct hw_perf_event
*hwc
= &event
->hw
;
157 int shift
= 64 - width
;
162 * Careful: an NMI might modify the previous event value.
164 * Our tactic to handle this is to first atomically read and
165 * exchange a new raw count - then add that new-prev delta
166 * count to the generic event atomically:
168 prev_raw_count
= local64_read(&hwc
->prev_count
);
169 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
170 new_raw_count
) != prev_raw_count
)
174 * Now we have the new raw value and have updated the prev
175 * timestamp already. We can now calculate the elapsed delta
176 * (event-)time and add that to the generic event.
178 * Careful, not all hw sign-extends above the physical width
181 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
184 local64_add(delta
, &event
->count
);
185 local64_sub(delta
, &hwc
->period_left
);
190 static struct perf_ibs perf_ibs_fetch
;
191 static struct perf_ibs perf_ibs_op
;
193 static struct perf_ibs
*get_ibs_pmu(int type
)
195 if (perf_ibs_fetch
.pmu
.type
== type
)
196 return &perf_ibs_fetch
;
197 if (perf_ibs_op
.pmu
.type
== type
)
203 * Use IBS for precise event sampling:
205 * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
206 * perf record -a -e r076:p ... # same as -e cpu-cycles:p
207 * perf record -a -e r0C1:p ... # use ibs op counting micro-ops
209 * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
210 * MSRC001_1033) is used to select either cycle or micro-ops counting
213 * The rip of IBS samples has skid 0. Thus, IBS supports precise
214 * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
215 * rip is invalid when IBS was not able to record the rip correctly.
216 * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
219 static int perf_ibs_precise_event(struct perf_event
*event
, u64
*config
)
221 switch (event
->attr
.precise_ip
) {
231 switch (event
->attr
.type
) {
232 case PERF_TYPE_HARDWARE
:
233 switch (event
->attr
.config
) {
234 case PERF_COUNT_HW_CPU_CYCLES
:
240 switch (event
->attr
.config
) {
245 *config
= IBS_OP_CNT_CTL
;
256 static int perf_ibs_init(struct perf_event
*event
)
258 struct hw_perf_event
*hwc
= &event
->hw
;
259 struct perf_ibs
*perf_ibs
;
263 perf_ibs
= get_ibs_pmu(event
->attr
.type
);
265 config
= event
->attr
.config
;
267 perf_ibs
= &perf_ibs_op
;
268 ret
= perf_ibs_precise_event(event
, &config
);
273 if (event
->pmu
!= &perf_ibs
->pmu
)
276 if (config
& ~perf_ibs
->config_mask
)
279 if (hwc
->sample_period
) {
280 if (config
& perf_ibs
->cnt_mask
)
281 /* raw max_cnt may not be set */
283 if (!event
->attr
.sample_freq
&& hwc
->sample_period
& 0x0f)
285 * lower 4 bits can not be set in ibs max cnt,
286 * but allowing it in case we adjust the
287 * sample period to set a frequency.
290 hwc
->sample_period
&= ~0x0FULL
;
291 if (!hwc
->sample_period
)
292 hwc
->sample_period
= 0x10;
294 max_cnt
= config
& perf_ibs
->cnt_mask
;
295 config
&= ~perf_ibs
->cnt_mask
;
296 event
->attr
.sample_period
= max_cnt
<< 4;
297 hwc
->sample_period
= event
->attr
.sample_period
;
300 if (!hwc
->sample_period
)
304 * If we modify hwc->sample_period, we also need to update
305 * hwc->last_period and hwc->period_left.
307 hwc
->last_period
= hwc
->sample_period
;
308 local64_set(&hwc
->period_left
, hwc
->sample_period
);
310 hwc
->config_base
= perf_ibs
->msr
;
311 hwc
->config
= config
;
316 static int perf_ibs_set_period(struct perf_ibs
*perf_ibs
,
317 struct hw_perf_event
*hwc
, u64
*period
)
321 /* ignore lower 4 bits in min count: */
322 overflow
= perf_event_set_period(hwc
, 1<<4, perf_ibs
->max_period
, period
);
323 local64_set(&hwc
->prev_count
, 0);
328 static u64
get_ibs_fetch_count(u64 config
)
330 return (config
& IBS_FETCH_CNT
) >> 12;
333 static u64
get_ibs_op_count(u64 config
)
337 if (config
& IBS_OP_VAL
)
338 count
+= (config
& IBS_OP_MAX_CNT
) << 4; /* cnt rolled over */
340 if (ibs_caps
& IBS_CAPS_RDWROPCNT
)
341 count
+= (config
& IBS_OP_CUR_CNT
) >> 32;
347 perf_ibs_event_update(struct perf_ibs
*perf_ibs
, struct perf_event
*event
,
350 u64 count
= perf_ibs
->get_count(*config
);
353 * Set width to 64 since we do not overflow on max width but
354 * instead on max count. In perf_ibs_set_period() we clear
355 * prev count manually on overflow.
357 while (!perf_event_try_update(event
, count
, 64)) {
358 rdmsrl(event
->hw
.config_base
, *config
);
359 count
= perf_ibs
->get_count(*config
);
363 static inline void perf_ibs_enable_event(struct perf_ibs
*perf_ibs
,
364 struct hw_perf_event
*hwc
, u64 config
)
366 wrmsrl(hwc
->config_base
, hwc
->config
| config
| perf_ibs
->enable_mask
);
370 * Erratum #420 Instruction-Based Sampling Engine May Generate
371 * Interrupt that Cannot Be Cleared:
373 * Must clear counter mask first, then clear the enable bit. See
374 * Revision Guide for AMD Family 10h Processors, Publication #41322.
376 static inline void perf_ibs_disable_event(struct perf_ibs
*perf_ibs
,
377 struct hw_perf_event
*hwc
, u64 config
)
379 config
&= ~perf_ibs
->cnt_mask
;
380 if (boot_cpu_data
.x86
== 0x10)
381 wrmsrl(hwc
->config_base
, config
);
382 config
&= ~perf_ibs
->enable_mask
;
383 wrmsrl(hwc
->config_base
, config
);
387 * We cannot restore the ibs pmu state, so we always needs to update
388 * the event while stopping it and then reset the state when starting
389 * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
390 * perf_ibs_start()/perf_ibs_stop() and instead always do it.
392 static void perf_ibs_start(struct perf_event
*event
, int flags
)
394 struct hw_perf_event
*hwc
= &event
->hw
;
395 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
396 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
399 if (WARN_ON_ONCE(!(hwc
->state
& PERF_HES_STOPPED
)))
402 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
405 perf_ibs_set_period(perf_ibs
, hwc
, &period
);
407 * Set STARTED before enabling the hardware, such that a subsequent NMI
410 set_bit(IBS_STARTED
, pcpu
->state
);
411 clear_bit(IBS_STOPPING
, pcpu
->state
);
412 perf_ibs_enable_event(perf_ibs
, hwc
, period
>> 4);
414 perf_event_update_userpage(event
);
417 static void perf_ibs_stop(struct perf_event
*event
, int flags
)
419 struct hw_perf_event
*hwc
= &event
->hw
;
420 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
421 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
425 if (test_and_set_bit(IBS_STOPPING
, pcpu
->state
))
428 stopping
= test_bit(IBS_STARTED
, pcpu
->state
);
430 if (!stopping
&& (hwc
->state
& PERF_HES_UPTODATE
))
433 rdmsrl(hwc
->config_base
, config
);
437 * Set STOPPED before disabling the hardware, such that it
438 * must be visible to NMIs the moment we clear the EN bit,
439 * at which point we can generate an !VALID sample which
440 * we need to consume.
442 set_bit(IBS_STOPPED
, pcpu
->state
);
443 perf_ibs_disable_event(perf_ibs
, hwc
, config
);
445 * Clear STARTED after disabling the hardware; if it were
446 * cleared before an NMI hitting after the clear but before
447 * clearing the EN bit might think it a spurious NMI and not
450 * Clearing it after, however, creates the problem of the NMI
451 * handler seeing STARTED but not having a valid sample.
453 clear_bit(IBS_STARTED
, pcpu
->state
);
454 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
455 hwc
->state
|= PERF_HES_STOPPED
;
458 if (hwc
->state
& PERF_HES_UPTODATE
)
462 * Clear valid bit to not count rollovers on update, rollovers
463 * are only updated in the irq handler.
465 config
&= ~perf_ibs
->valid_mask
;
467 perf_ibs_event_update(perf_ibs
, event
, &config
);
468 hwc
->state
|= PERF_HES_UPTODATE
;
471 static int perf_ibs_add(struct perf_event
*event
, int flags
)
473 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
474 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
476 if (test_and_set_bit(IBS_ENABLED
, pcpu
->state
))
479 event
->hw
.state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
483 if (flags
& PERF_EF_START
)
484 perf_ibs_start(event
, PERF_EF_RELOAD
);
489 static void perf_ibs_del(struct perf_event
*event
, int flags
)
491 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
492 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
494 if (!test_and_clear_bit(IBS_ENABLED
, pcpu
->state
))
497 perf_ibs_stop(event
, PERF_EF_UPDATE
);
501 perf_event_update_userpage(event
);
504 static void perf_ibs_read(struct perf_event
*event
) { }
506 PMU_FORMAT_ATTR(rand_en
, "config:57");
507 PMU_FORMAT_ATTR(cnt_ctl
, "config:19");
509 static struct attribute
*ibs_fetch_format_attrs
[] = {
510 &format_attr_rand_en
.attr
,
514 static struct attribute
*ibs_op_format_attrs
[] = {
515 NULL
, /* &format_attr_cnt_ctl.attr if IBS_CAPS_OPCNT */
519 static struct perf_ibs perf_ibs_fetch
= {
521 .task_ctx_nr
= perf_invalid_context
,
523 .event_init
= perf_ibs_init
,
526 .start
= perf_ibs_start
,
527 .stop
= perf_ibs_stop
,
528 .read
= perf_ibs_read
,
529 .capabilities
= PERF_PMU_CAP_NO_EXCLUDE
,
531 .msr
= MSR_AMD64_IBSFETCHCTL
,
532 .config_mask
= IBS_FETCH_CONFIG_MASK
,
533 .cnt_mask
= IBS_FETCH_MAX_CNT
,
534 .enable_mask
= IBS_FETCH_ENABLE
,
535 .valid_mask
= IBS_FETCH_VAL
,
536 .max_period
= IBS_FETCH_MAX_CNT
<< 4,
537 .offset_mask
= { MSR_AMD64_IBSFETCH_REG_MASK
},
538 .offset_max
= MSR_AMD64_IBSFETCH_REG_COUNT
,
539 .format_attrs
= ibs_fetch_format_attrs
,
541 .get_count
= get_ibs_fetch_count
,
544 static struct perf_ibs perf_ibs_op
= {
546 .task_ctx_nr
= perf_invalid_context
,
548 .event_init
= perf_ibs_init
,
551 .start
= perf_ibs_start
,
552 .stop
= perf_ibs_stop
,
553 .read
= perf_ibs_read
,
555 .msr
= MSR_AMD64_IBSOPCTL
,
556 .config_mask
= IBS_OP_CONFIG_MASK
,
557 .cnt_mask
= IBS_OP_MAX_CNT
| IBS_OP_CUR_CNT
|
559 .enable_mask
= IBS_OP_ENABLE
,
560 .valid_mask
= IBS_OP_VAL
,
561 .max_period
= IBS_OP_MAX_CNT
<< 4,
562 .offset_mask
= { MSR_AMD64_IBSOP_REG_MASK
},
563 .offset_max
= MSR_AMD64_IBSOP_REG_COUNT
,
564 .format_attrs
= ibs_op_format_attrs
,
566 .get_count
= get_ibs_op_count
,
569 static int perf_ibs_handle_irq(struct perf_ibs
*perf_ibs
, struct pt_regs
*iregs
)
571 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
572 struct perf_event
*event
= pcpu
->event
;
573 struct hw_perf_event
*hwc
;
574 struct perf_sample_data data
;
575 struct perf_raw_record raw
;
577 struct perf_ibs_data ibs_data
;
578 int offset
, size
, check_rip
, offset_max
, throttle
= 0;
580 u64
*buf
, *config
, period
;
582 if (!test_bit(IBS_STARTED
, pcpu
->state
)) {
585 * Catch spurious interrupts after stopping IBS: After
586 * disabling IBS there could be still incoming NMIs
587 * with samples that even have the valid bit cleared.
588 * Mark all this NMIs as handled.
590 if (test_and_clear_bit(IBS_STOPPED
, pcpu
->state
))
596 if (WARN_ON_ONCE(!event
))
600 msr
= hwc
->config_base
;
603 if (!(*buf
++ & perf_ibs
->valid_mask
))
606 config
= &ibs_data
.regs
[0];
607 perf_ibs_event_update(perf_ibs
, event
, config
);
608 perf_sample_data_init(&data
, 0, hwc
->last_period
);
609 if (!perf_ibs_set_period(perf_ibs
, hwc
, &period
))
610 goto out
; /* no sw counter overflow */
612 ibs_data
.caps
= ibs_caps
;
615 check_rip
= (perf_ibs
== &perf_ibs_op
&& (ibs_caps
& IBS_CAPS_RIPINVALIDCHK
));
616 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
)
617 offset_max
= perf_ibs
->offset_max
;
623 rdmsrl(msr
+ offset
, *buf
++);
625 offset
= find_next_bit(perf_ibs
->offset_mask
,
626 perf_ibs
->offset_max
,
628 } while (offset
< offset_max
);
629 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
) {
631 * Read IbsBrTarget and IbsOpData4 separately
632 * depending on their availability.
633 * Can't add to offset_max as they are staggered
635 if (ibs_caps
& IBS_CAPS_BRNTRGT
) {
636 rdmsrl(MSR_AMD64_IBSBRTARGET
, *buf
++);
639 if (ibs_caps
& IBS_CAPS_OPDATA4
) {
640 rdmsrl(MSR_AMD64_IBSOPDATA4
, *buf
++);
644 ibs_data
.size
= sizeof(u64
) * size
;
647 if (check_rip
&& (ibs_data
.regs
[2] & IBS_RIP_INVALID
)) {
648 regs
.flags
&= ~PERF_EFLAGS_EXACT
;
650 set_linear_ip(®s
, ibs_data
.regs
[1]);
651 regs
.flags
|= PERF_EFLAGS_EXACT
;
654 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
) {
655 raw
= (struct perf_raw_record
){
657 .size
= sizeof(u32
) + ibs_data
.size
,
658 .data
= ibs_data
.data
,
664 throttle
= perf_event_overflow(event
, &data
, ®s
);
667 perf_ibs_stop(event
, 0);
671 if ((ibs_caps
& IBS_CAPS_RDWROPCNT
) &&
672 (*config
& IBS_OP_CNT_CTL
))
673 period
|= *config
& IBS_OP_CUR_CNT_RAND
;
675 perf_ibs_enable_event(perf_ibs
, hwc
, period
);
678 perf_event_update_userpage(event
);
684 perf_ibs_nmi_handler(unsigned int cmd
, struct pt_regs
*regs
)
686 u64 stamp
= sched_clock();
689 handled
+= perf_ibs_handle_irq(&perf_ibs_fetch
, regs
);
690 handled
+= perf_ibs_handle_irq(&perf_ibs_op
, regs
);
693 inc_irq_stat(apic_perf_irqs
);
695 perf_sample_event_took(sched_clock() - stamp
);
699 NOKPROBE_SYMBOL(perf_ibs_nmi_handler
);
701 static __init
int perf_ibs_pmu_init(struct perf_ibs
*perf_ibs
, char *name
)
703 struct cpu_perf_ibs __percpu
*pcpu
;
706 pcpu
= alloc_percpu(struct cpu_perf_ibs
);
710 perf_ibs
->pcpu
= pcpu
;
712 /* register attributes */
713 if (perf_ibs
->format_attrs
[0]) {
714 memset(&perf_ibs
->format_group
, 0, sizeof(perf_ibs
->format_group
));
715 perf_ibs
->format_group
.name
= "format";
716 perf_ibs
->format_group
.attrs
= perf_ibs
->format_attrs
;
718 memset(&perf_ibs
->attr_groups
, 0, sizeof(perf_ibs
->attr_groups
));
719 perf_ibs
->attr_groups
[0] = &perf_ibs
->format_group
;
720 perf_ibs
->pmu
.attr_groups
= perf_ibs
->attr_groups
;
723 ret
= perf_pmu_register(&perf_ibs
->pmu
, name
, -1);
725 perf_ibs
->pcpu
= NULL
;
732 static __init
void perf_event_ibs_init(void)
734 struct attribute
**attr
= ibs_op_format_attrs
;
736 perf_ibs_pmu_init(&perf_ibs_fetch
, "ibs_fetch");
738 if (ibs_caps
& IBS_CAPS_OPCNT
) {
739 perf_ibs_op
.config_mask
|= IBS_OP_CNT_CTL
;
740 *attr
++ = &format_attr_cnt_ctl
.attr
;
742 perf_ibs_pmu_init(&perf_ibs_op
, "ibs_op");
744 register_nmi_handler(NMI_LOCAL
, perf_ibs_nmi_handler
, 0, "perf_ibs");
745 pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps
);
748 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
750 static __init
void perf_event_ibs_init(void) { }
754 /* IBS - apic initialization, for perf and oprofile */
756 static __init u32
__get_ibs_caps(void)
759 unsigned int max_level
;
761 if (!boot_cpu_has(X86_FEATURE_IBS
))
764 /* check IBS cpuid feature flags */
765 max_level
= cpuid_eax(0x80000000);
766 if (max_level
< IBS_CPUID_FEATURES
)
767 return IBS_CAPS_DEFAULT
;
769 caps
= cpuid_eax(IBS_CPUID_FEATURES
);
770 if (!(caps
& IBS_CAPS_AVAIL
))
771 /* cpuid flags not valid */
772 return IBS_CAPS_DEFAULT
;
777 u32
get_ibs_caps(void)
782 EXPORT_SYMBOL(get_ibs_caps
);
784 static inline int get_eilvt(int offset
)
786 return !setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_NMI
, 1);
789 static inline int put_eilvt(int offset
)
791 return !setup_APIC_eilvt(offset
, 0, 0, 1);
795 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
797 static inline int ibs_eilvt_valid(void)
805 rdmsrl(MSR_AMD64_IBSCTL
, val
);
806 offset
= val
& IBSCTL_LVT_OFFSET_MASK
;
808 if (!(val
& IBSCTL_LVT_OFFSET_VALID
)) {
809 pr_err(FW_BUG
"cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
810 smp_processor_id(), offset
, MSR_AMD64_IBSCTL
, val
);
814 if (!get_eilvt(offset
)) {
815 pr_err(FW_BUG
"cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
816 smp_processor_id(), offset
, MSR_AMD64_IBSCTL
, val
);
827 static int setup_ibs_ctl(int ibs_eilvt_off
)
829 struct pci_dev
*cpu_cfg
;
836 cpu_cfg
= pci_get_device(PCI_VENDOR_ID_AMD
,
837 PCI_DEVICE_ID_AMD_10H_NB_MISC
,
842 pci_write_config_dword(cpu_cfg
, IBSCTL
, ibs_eilvt_off
843 | IBSCTL_LVT_OFFSET_VALID
);
844 pci_read_config_dword(cpu_cfg
, IBSCTL
, &value
);
845 if (value
!= (ibs_eilvt_off
| IBSCTL_LVT_OFFSET_VALID
)) {
846 pci_dev_put(cpu_cfg
);
847 pr_debug("Failed to setup IBS LVT offset, IBSCTL = 0x%08x\n",
854 pr_debug("No CPU node configured for IBS\n");
862 * This runs only on the current cpu. We try to find an LVT offset and
863 * setup the local APIC. For this we must disable preemption. On
864 * success we initialize all nodes with this offset. This updates then
865 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
866 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
867 * is using the new offset.
869 static void force_ibs_eilvt_setup(void)
875 /* find the next free available EILVT entry, skip offset 0 */
876 for (offset
= 1; offset
< APIC_EILVT_NR_MAX
; offset
++) {
877 if (get_eilvt(offset
))
882 if (offset
== APIC_EILVT_NR_MAX
) {
883 pr_debug("No EILVT entry available\n");
887 ret
= setup_ibs_ctl(offset
);
891 if (!ibs_eilvt_valid())
894 pr_info("LVT offset %d assigned\n", offset
);
904 static void ibs_eilvt_setup(void)
907 * Force LVT offset assignment for family 10h: The offsets are
908 * not assigned by the BIOS for this family, so the OS is
909 * responsible for doing it. If the OS assignment fails, fall
910 * back to BIOS settings and try to setup this.
912 if (boot_cpu_data
.x86
== 0x10)
913 force_ibs_eilvt_setup();
916 static inline int get_ibs_lvt_offset(void)
920 rdmsrl(MSR_AMD64_IBSCTL
, val
);
921 if (!(val
& IBSCTL_LVT_OFFSET_VALID
))
924 return val
& IBSCTL_LVT_OFFSET_MASK
;
927 static void setup_APIC_ibs(void)
931 offset
= get_ibs_lvt_offset();
935 if (!setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_NMI
, 0))
938 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
942 static void clear_APIC_ibs(void)
946 offset
= get_ibs_lvt_offset();
948 setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_FIX
, 1);
951 static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu
)
959 static int perf_ibs_suspend(void)
965 static void perf_ibs_resume(void)
971 static struct syscore_ops perf_ibs_syscore_ops
= {
972 .resume
= perf_ibs_resume
,
973 .suspend
= perf_ibs_suspend
,
976 static void perf_ibs_pm_init(void)
978 register_syscore_ops(&perf_ibs_syscore_ops
);
983 static inline void perf_ibs_pm_init(void) { }
987 static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu
)
993 static __init
int amd_ibs_init(void)
997 caps
= __get_ibs_caps();
999 return -ENODEV
; /* ibs not supported by the cpu */
1003 if (!ibs_eilvt_valid())
1009 /* make ibs_caps visible to other cpus: */
1012 * x86_pmu_amd_ibs_starting_cpu will be called from core on
1015 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING
,
1016 "perf/x86/amd/ibs:starting",
1017 x86_pmu_amd_ibs_starting_cpu
,
1018 x86_pmu_amd_ibs_dying_cpu
);
1020 perf_event_ibs_init();
1025 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
1026 device_initcall(amd_ibs_init
);