2 * Performance events - AMD IBS
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
6 * For licencing details see kernel-base/COPYING
9 #include <linux/perf_event.h>
10 #include <linux/init.h>
11 #include <linux/export.h>
12 #include <linux/pci.h>
13 #include <linux/ptrace.h>
14 #include <linux/syscore_ops.h>
15 #include <linux/sched/clock.h>
19 #include "../perf_event.h"
23 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
25 #include <linux/kprobes.h>
26 #include <linux/hardirq.h>
30 #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
31 #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
37 * ENABLED; tracks the pmu::add(), pmu::del() state, when set the counter is taken
38 * and any further add()s must fail.
40 * STARTED/STOPPING/STOPPED; deal with pmu::start(), pmu::stop() state but are
41 * complicated by the fact that the IBS hardware can send late NMIs (ie. after
42 * we've cleared the EN bit).
44 * In order to consume these late NMIs we have the STOPPED state, any NMI that
45 * happens after we've cleared the EN state will clear this bit and report the
46 * NMI handled (this is fundamentally racy in the face or multiple NMI sources,
47 * someone else can consume our BIT and our NMI will go unhandled).
49 * And since we cannot set/clear this separate bit together with the EN bit,
50 * there are races; if we cleared STARTED early, an NMI could land in
51 * between clearing STARTED and clearing the EN bit (in fact multiple NMIs
52 * could happen if the period is small enough), and consume our STOPPED bit
53 * and trigger streams of unhandled NMIs.
55 * If, however, we clear STARTED late, an NMI can hit between clearing the
56 * EN bit and clearing STARTED, still see STARTED set and process the event.
57 * If this event will have the VALID bit clear, we bail properly, but this
58 * is not a given. With VALID set we can end up calling pmu::stop() again
59 * (the throttle logic) and trigger the WARNs in there.
61 * So what we do is set STOPPING before clearing EN to avoid the pmu::stop()
62 * nesting, and clear STARTED late, so that we have a well defined state over
63 * the clearing of the EN bit.
65 * XXX: we could probably be using !atomic bitops for all this.
78 struct perf_event
*event
;
79 unsigned long state
[BITS_TO_LONGS(IBS_MAX_STATES
)];
90 unsigned long offset_mask
[1];
92 unsigned int fetch_count_reset_broken
: 1;
93 struct cpu_perf_ibs __percpu
*pcpu
;
95 struct attribute
**format_attrs
;
96 struct attribute_group format_group
;
97 const struct attribute_group
*attr_groups
[2];
99 u64 (*get_count
)(u64 config
);
102 struct perf_ibs_data
{
105 u32 data
[0]; /* data buffer starts here */
108 u64 regs
[MSR_AMD64_IBS_REG_COUNT_MAX
];
112 perf_event_set_period(struct hw_perf_event
*hwc
, u64 min
, u64 max
, u64
*hw_period
)
114 s64 left
= local64_read(&hwc
->period_left
);
115 s64 period
= hwc
->sample_period
;
119 * If we are way outside a reasonable range then just skip forward:
121 if (unlikely(left
<= -period
)) {
123 local64_set(&hwc
->period_left
, left
);
124 hwc
->last_period
= period
;
128 if (unlikely(left
< (s64
)min
)) {
130 local64_set(&hwc
->period_left
, left
);
131 hwc
->last_period
= period
;
136 * If the hw period that triggers the sw overflow is too short
137 * we might hit the irq handler. This biases the results.
138 * Thus we shorten the next-to-last period and set the last
139 * period to the max period.
149 *hw_period
= (u64
)left
;
155 perf_event_try_update(struct perf_event
*event
, u64 new_raw_count
, int width
)
157 struct hw_perf_event
*hwc
= &event
->hw
;
158 int shift
= 64 - width
;
163 * Careful: an NMI might modify the previous event value.
165 * Our tactic to handle this is to first atomically read and
166 * exchange a new raw count - then add that new-prev delta
167 * count to the generic event atomically:
169 prev_raw_count
= local64_read(&hwc
->prev_count
);
170 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
171 new_raw_count
) != prev_raw_count
)
175 * Now we have the new raw value and have updated the prev
176 * timestamp already. We can now calculate the elapsed delta
177 * (event-)time and add that to the generic event.
179 * Careful, not all hw sign-extends above the physical width
182 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
185 local64_add(delta
, &event
->count
);
186 local64_sub(delta
, &hwc
->period_left
);
191 static struct perf_ibs perf_ibs_fetch
;
192 static struct perf_ibs perf_ibs_op
;
194 static struct perf_ibs
*get_ibs_pmu(int type
)
196 if (perf_ibs_fetch
.pmu
.type
== type
)
197 return &perf_ibs_fetch
;
198 if (perf_ibs_op
.pmu
.type
== type
)
204 * Use IBS for precise event sampling:
206 * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
207 * perf record -a -e r076:p ... # same as -e cpu-cycles:p
208 * perf record -a -e r0C1:p ... # use ibs op counting micro-ops
210 * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
211 * MSRC001_1033) is used to select either cycle or micro-ops counting
214 * The rip of IBS samples has skid 0. Thus, IBS supports precise
215 * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
216 * rip is invalid when IBS was not able to record the rip correctly.
217 * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
220 static int perf_ibs_precise_event(struct perf_event
*event
, u64
*config
)
222 switch (event
->attr
.precise_ip
) {
232 switch (event
->attr
.type
) {
233 case PERF_TYPE_HARDWARE
:
234 switch (event
->attr
.config
) {
235 case PERF_COUNT_HW_CPU_CYCLES
:
241 switch (event
->attr
.config
) {
246 *config
= IBS_OP_CNT_CTL
;
257 static int perf_ibs_init(struct perf_event
*event
)
259 struct hw_perf_event
*hwc
= &event
->hw
;
260 struct perf_ibs
*perf_ibs
;
264 perf_ibs
= get_ibs_pmu(event
->attr
.type
);
266 config
= event
->attr
.config
;
268 perf_ibs
= &perf_ibs_op
;
269 ret
= perf_ibs_precise_event(event
, &config
);
274 if (event
->pmu
!= &perf_ibs
->pmu
)
277 if (config
& ~perf_ibs
->config_mask
)
280 if (hwc
->sample_period
) {
281 if (config
& perf_ibs
->cnt_mask
)
282 /* raw max_cnt may not be set */
284 if (!event
->attr
.sample_freq
&& hwc
->sample_period
& 0x0f)
286 * lower 4 bits can not be set in ibs max cnt,
287 * but allowing it in case we adjust the
288 * sample period to set a frequency.
291 hwc
->sample_period
&= ~0x0FULL
;
292 if (!hwc
->sample_period
)
293 hwc
->sample_period
= 0x10;
295 max_cnt
= config
& perf_ibs
->cnt_mask
;
296 config
&= ~perf_ibs
->cnt_mask
;
297 event
->attr
.sample_period
= max_cnt
<< 4;
298 hwc
->sample_period
= event
->attr
.sample_period
;
301 if (!hwc
->sample_period
)
305 * If we modify hwc->sample_period, we also need to update
306 * hwc->last_period and hwc->period_left.
308 hwc
->last_period
= hwc
->sample_period
;
309 local64_set(&hwc
->period_left
, hwc
->sample_period
);
311 hwc
->config_base
= perf_ibs
->msr
;
312 hwc
->config
= config
;
317 static int perf_ibs_set_period(struct perf_ibs
*perf_ibs
,
318 struct hw_perf_event
*hwc
, u64
*period
)
322 /* ignore lower 4 bits in min count: */
323 overflow
= perf_event_set_period(hwc
, 1<<4, perf_ibs
->max_period
, period
);
324 local64_set(&hwc
->prev_count
, 0);
329 static u64
get_ibs_fetch_count(u64 config
)
331 return (config
& IBS_FETCH_CNT
) >> 12;
334 static u64
get_ibs_op_count(u64 config
)
339 * If the internal 27-bit counter rolled over, the count is MaxCnt
340 * and the lower 7 bits of CurCnt are randomized.
341 * Otherwise CurCnt has the full 27-bit current counter value.
343 if (config
& IBS_OP_VAL
) {
344 count
= (config
& IBS_OP_MAX_CNT
) << 4;
345 if (ibs_caps
& IBS_CAPS_OPCNTEXT
)
346 count
+= config
& IBS_OP_MAX_CNT_EXT_MASK
;
347 } else if (ibs_caps
& IBS_CAPS_RDWROPCNT
) {
348 count
= (config
& IBS_OP_CUR_CNT
) >> 32;
355 perf_ibs_event_update(struct perf_ibs
*perf_ibs
, struct perf_event
*event
,
358 u64 count
= perf_ibs
->get_count(*config
);
361 * Set width to 64 since we do not overflow on max width but
362 * instead on max count. In perf_ibs_set_period() we clear
363 * prev count manually on overflow.
365 while (!perf_event_try_update(event
, count
, 64)) {
366 rdmsrl(event
->hw
.config_base
, *config
);
367 count
= perf_ibs
->get_count(*config
);
371 static inline void perf_ibs_enable_event(struct perf_ibs
*perf_ibs
,
372 struct hw_perf_event
*hwc
, u64 config
)
374 u64 tmp
= hwc
->config
| config
;
376 if (perf_ibs
->fetch_count_reset_broken
)
377 wrmsrl(hwc
->config_base
, tmp
& ~perf_ibs
->enable_mask
);
379 wrmsrl(hwc
->config_base
, tmp
| perf_ibs
->enable_mask
);
383 * Erratum #420 Instruction-Based Sampling Engine May Generate
384 * Interrupt that Cannot Be Cleared:
386 * Must clear counter mask first, then clear the enable bit. See
387 * Revision Guide for AMD Family 10h Processors, Publication #41322.
389 static inline void perf_ibs_disable_event(struct perf_ibs
*perf_ibs
,
390 struct hw_perf_event
*hwc
, u64 config
)
392 config
&= ~perf_ibs
->cnt_mask
;
393 if (boot_cpu_data
.x86
== 0x10)
394 wrmsrl(hwc
->config_base
, config
);
395 config
&= ~perf_ibs
->enable_mask
;
396 wrmsrl(hwc
->config_base
, config
);
400 * We cannot restore the ibs pmu state, so we always needs to update
401 * the event while stopping it and then reset the state when starting
402 * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
403 * perf_ibs_start()/perf_ibs_stop() and instead always do it.
405 static void perf_ibs_start(struct perf_event
*event
, int flags
)
407 struct hw_perf_event
*hwc
= &event
->hw
;
408 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
409 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
410 u64 period
, config
= 0;
412 if (WARN_ON_ONCE(!(hwc
->state
& PERF_HES_STOPPED
)))
415 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
418 perf_ibs_set_period(perf_ibs
, hwc
, &period
);
419 if (perf_ibs
== &perf_ibs_op
&& (ibs_caps
& IBS_CAPS_OPCNTEXT
)) {
420 config
|= period
& IBS_OP_MAX_CNT_EXT_MASK
;
421 period
&= ~IBS_OP_MAX_CNT_EXT_MASK
;
423 config
|= period
>> 4;
426 * Set STARTED before enabling the hardware, such that a subsequent NMI
429 set_bit(IBS_STARTED
, pcpu
->state
);
430 clear_bit(IBS_STOPPING
, pcpu
->state
);
431 perf_ibs_enable_event(perf_ibs
, hwc
, config
);
433 perf_event_update_userpage(event
);
436 static void perf_ibs_stop(struct perf_event
*event
, int flags
)
438 struct hw_perf_event
*hwc
= &event
->hw
;
439 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
440 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
444 if (test_and_set_bit(IBS_STOPPING
, pcpu
->state
))
447 stopping
= test_bit(IBS_STARTED
, pcpu
->state
);
449 if (!stopping
&& (hwc
->state
& PERF_HES_UPTODATE
))
452 rdmsrl(hwc
->config_base
, config
);
456 * Set STOPPED before disabling the hardware, such that it
457 * must be visible to NMIs the moment we clear the EN bit,
458 * at which point we can generate an !VALID sample which
459 * we need to consume.
461 set_bit(IBS_STOPPED
, pcpu
->state
);
462 perf_ibs_disable_event(perf_ibs
, hwc
, config
);
464 * Clear STARTED after disabling the hardware; if it were
465 * cleared before an NMI hitting after the clear but before
466 * clearing the EN bit might think it a spurious NMI and not
469 * Clearing it after, however, creates the problem of the NMI
470 * handler seeing STARTED but not having a valid sample.
472 clear_bit(IBS_STARTED
, pcpu
->state
);
473 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
474 hwc
->state
|= PERF_HES_STOPPED
;
477 if (hwc
->state
& PERF_HES_UPTODATE
)
481 * Clear valid bit to not count rollovers on update, rollovers
482 * are only updated in the irq handler.
484 config
&= ~perf_ibs
->valid_mask
;
486 perf_ibs_event_update(perf_ibs
, event
, &config
);
487 hwc
->state
|= PERF_HES_UPTODATE
;
490 static int perf_ibs_add(struct perf_event
*event
, int flags
)
492 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
493 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
495 if (test_and_set_bit(IBS_ENABLED
, pcpu
->state
))
498 event
->hw
.state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
502 if (flags
& PERF_EF_START
)
503 perf_ibs_start(event
, PERF_EF_RELOAD
);
508 static void perf_ibs_del(struct perf_event
*event
, int flags
)
510 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
511 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
513 if (!test_and_clear_bit(IBS_ENABLED
, pcpu
->state
))
516 perf_ibs_stop(event
, PERF_EF_UPDATE
);
520 perf_event_update_userpage(event
);
523 static void perf_ibs_read(struct perf_event
*event
) { }
525 PMU_FORMAT_ATTR(rand_en
, "config:57");
526 PMU_FORMAT_ATTR(cnt_ctl
, "config:19");
528 static struct attribute
*ibs_fetch_format_attrs
[] = {
529 &format_attr_rand_en
.attr
,
533 static struct attribute
*ibs_op_format_attrs
[] = {
534 NULL
, /* &format_attr_cnt_ctl.attr if IBS_CAPS_OPCNT */
538 static struct perf_ibs perf_ibs_fetch
= {
540 .task_ctx_nr
= perf_invalid_context
,
542 .event_init
= perf_ibs_init
,
545 .start
= perf_ibs_start
,
546 .stop
= perf_ibs_stop
,
547 .read
= perf_ibs_read
,
548 .capabilities
= PERF_PMU_CAP_NO_EXCLUDE
,
550 .msr
= MSR_AMD64_IBSFETCHCTL
,
551 .config_mask
= IBS_FETCH_CONFIG_MASK
,
552 .cnt_mask
= IBS_FETCH_MAX_CNT
,
553 .enable_mask
= IBS_FETCH_ENABLE
,
554 .valid_mask
= IBS_FETCH_VAL
,
555 .max_period
= IBS_FETCH_MAX_CNT
<< 4,
556 .offset_mask
= { MSR_AMD64_IBSFETCH_REG_MASK
},
557 .offset_max
= MSR_AMD64_IBSFETCH_REG_COUNT
,
558 .format_attrs
= ibs_fetch_format_attrs
,
560 .get_count
= get_ibs_fetch_count
,
563 static struct perf_ibs perf_ibs_op
= {
565 .task_ctx_nr
= perf_invalid_context
,
567 .event_init
= perf_ibs_init
,
570 .start
= perf_ibs_start
,
571 .stop
= perf_ibs_stop
,
572 .read
= perf_ibs_read
,
574 .msr
= MSR_AMD64_IBSOPCTL
,
575 .config_mask
= IBS_OP_CONFIG_MASK
,
576 .cnt_mask
= IBS_OP_MAX_CNT
| IBS_OP_CUR_CNT
|
578 .enable_mask
= IBS_OP_ENABLE
,
579 .valid_mask
= IBS_OP_VAL
,
580 .max_period
= IBS_OP_MAX_CNT
<< 4,
581 .offset_mask
= { MSR_AMD64_IBSOP_REG_MASK
},
582 .offset_max
= MSR_AMD64_IBSOP_REG_COUNT
,
583 .format_attrs
= ibs_op_format_attrs
,
585 .get_count
= get_ibs_op_count
,
588 static int perf_ibs_handle_irq(struct perf_ibs
*perf_ibs
, struct pt_regs
*iregs
)
590 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
591 struct perf_event
*event
= pcpu
->event
;
592 struct hw_perf_event
*hwc
;
593 struct perf_sample_data data
;
594 struct perf_raw_record raw
;
596 struct perf_ibs_data ibs_data
;
597 int offset
, size
, check_rip
, offset_max
, throttle
= 0;
599 u64
*buf
, *config
, period
, new_config
= 0;
601 if (!test_bit(IBS_STARTED
, pcpu
->state
)) {
604 * Catch spurious interrupts after stopping IBS: After
605 * disabling IBS there could be still incoming NMIs
606 * with samples that even have the valid bit cleared.
607 * Mark all this NMIs as handled.
609 if (test_and_clear_bit(IBS_STOPPED
, pcpu
->state
))
615 if (WARN_ON_ONCE(!event
))
619 msr
= hwc
->config_base
;
622 if (!(*buf
++ & perf_ibs
->valid_mask
))
625 config
= &ibs_data
.regs
[0];
626 perf_ibs_event_update(perf_ibs
, event
, config
);
627 perf_sample_data_init(&data
, 0, hwc
->last_period
);
628 if (!perf_ibs_set_period(perf_ibs
, hwc
, &period
))
629 goto out
; /* no sw counter overflow */
631 ibs_data
.caps
= ibs_caps
;
634 check_rip
= (perf_ibs
== &perf_ibs_op
&& (ibs_caps
& IBS_CAPS_RIPINVALIDCHK
));
635 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
)
636 offset_max
= perf_ibs
->offset_max
;
642 rdmsrl(msr
+ offset
, *buf
++);
644 offset
= find_next_bit(perf_ibs
->offset_mask
,
645 perf_ibs
->offset_max
,
647 } while (offset
< offset_max
);
649 * Read IbsBrTarget, IbsOpData4, and IbsExtdCtl separately
650 * depending on their availability.
651 * Can't add to offset_max as they are staggered
653 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
) {
654 if (perf_ibs
== &perf_ibs_op
) {
655 if (ibs_caps
& IBS_CAPS_BRNTRGT
) {
656 rdmsrl(MSR_AMD64_IBSBRTARGET
, *buf
++);
659 if (ibs_caps
& IBS_CAPS_OPDATA4
) {
660 rdmsrl(MSR_AMD64_IBSOPDATA4
, *buf
++);
664 if (perf_ibs
== &perf_ibs_fetch
&& (ibs_caps
& IBS_CAPS_FETCHCTLEXTD
)) {
665 rdmsrl(MSR_AMD64_ICIBSEXTDCTL
, *buf
++);
669 ibs_data
.size
= sizeof(u64
) * size
;
672 if (check_rip
&& (ibs_data
.regs
[2] & IBS_RIP_INVALID
)) {
673 regs
.flags
&= ~PERF_EFLAGS_EXACT
;
675 set_linear_ip(®s
, ibs_data
.regs
[1]);
676 regs
.flags
|= PERF_EFLAGS_EXACT
;
679 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
) {
680 raw
= (struct perf_raw_record
){
682 .size
= sizeof(u32
) + ibs_data
.size
,
683 .data
= ibs_data
.data
,
689 throttle
= perf_event_overflow(event
, &data
, ®s
);
692 perf_ibs_stop(event
, 0);
694 if (perf_ibs
== &perf_ibs_op
) {
695 if (ibs_caps
& IBS_CAPS_OPCNTEXT
) {
696 new_config
= period
& IBS_OP_MAX_CNT_EXT_MASK
;
697 period
&= ~IBS_OP_MAX_CNT_EXT_MASK
;
699 if ((ibs_caps
& IBS_CAPS_RDWROPCNT
) && (*config
& IBS_OP_CNT_CTL
))
700 new_config
|= *config
& IBS_OP_CUR_CNT_RAND
;
702 new_config
|= period
>> 4;
704 perf_ibs_enable_event(perf_ibs
, hwc
, new_config
);
707 perf_event_update_userpage(event
);
713 perf_ibs_nmi_handler(unsigned int cmd
, struct pt_regs
*regs
)
715 u64 stamp
= sched_clock();
718 handled
+= perf_ibs_handle_irq(&perf_ibs_fetch
, regs
);
719 handled
+= perf_ibs_handle_irq(&perf_ibs_op
, regs
);
722 inc_irq_stat(apic_perf_irqs
);
724 perf_sample_event_took(sched_clock() - stamp
);
728 NOKPROBE_SYMBOL(perf_ibs_nmi_handler
);
730 static __init
int perf_ibs_pmu_init(struct perf_ibs
*perf_ibs
, char *name
)
732 struct cpu_perf_ibs __percpu
*pcpu
;
735 pcpu
= alloc_percpu(struct cpu_perf_ibs
);
739 perf_ibs
->pcpu
= pcpu
;
741 /* register attributes */
742 if (perf_ibs
->format_attrs
[0]) {
743 memset(&perf_ibs
->format_group
, 0, sizeof(perf_ibs
->format_group
));
744 perf_ibs
->format_group
.name
= "format";
745 perf_ibs
->format_group
.attrs
= perf_ibs
->format_attrs
;
747 memset(&perf_ibs
->attr_groups
, 0, sizeof(perf_ibs
->attr_groups
));
748 perf_ibs
->attr_groups
[0] = &perf_ibs
->format_group
;
749 perf_ibs
->pmu
.attr_groups
= perf_ibs
->attr_groups
;
752 ret
= perf_pmu_register(&perf_ibs
->pmu
, name
, -1);
754 perf_ibs
->pcpu
= NULL
;
761 static __init
void perf_event_ibs_init(void)
763 struct attribute
**attr
= ibs_op_format_attrs
;
766 * Some chips fail to reset the fetch count when it is written; instead
767 * they need a 0-1 transition of IbsFetchEn.
769 if (boot_cpu_data
.x86
>= 0x16 && boot_cpu_data
.x86
<= 0x18)
770 perf_ibs_fetch
.fetch_count_reset_broken
= 1;
772 perf_ibs_pmu_init(&perf_ibs_fetch
, "ibs_fetch");
774 if (ibs_caps
& IBS_CAPS_OPCNT
) {
775 perf_ibs_op
.config_mask
|= IBS_OP_CNT_CTL
;
776 *attr
++ = &format_attr_cnt_ctl
.attr
;
779 if (ibs_caps
& IBS_CAPS_OPCNTEXT
) {
780 perf_ibs_op
.max_period
|= IBS_OP_MAX_CNT_EXT_MASK
;
781 perf_ibs_op
.config_mask
|= IBS_OP_MAX_CNT_EXT_MASK
;
782 perf_ibs_op
.cnt_mask
|= IBS_OP_MAX_CNT_EXT_MASK
;
785 perf_ibs_pmu_init(&perf_ibs_op
, "ibs_op");
787 register_nmi_handler(NMI_LOCAL
, perf_ibs_nmi_handler
, 0, "perf_ibs");
788 pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps
);
791 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
793 static __init
void perf_event_ibs_init(void) { }
797 /* IBS - apic initialization, for perf and oprofile */
799 static __init u32
__get_ibs_caps(void)
802 unsigned int max_level
;
804 if (!boot_cpu_has(X86_FEATURE_IBS
))
807 /* check IBS cpuid feature flags */
808 max_level
= cpuid_eax(0x80000000);
809 if (max_level
< IBS_CPUID_FEATURES
)
810 return IBS_CAPS_DEFAULT
;
812 caps
= cpuid_eax(IBS_CPUID_FEATURES
);
813 if (!(caps
& IBS_CAPS_AVAIL
))
814 /* cpuid flags not valid */
815 return IBS_CAPS_DEFAULT
;
820 u32
get_ibs_caps(void)
825 EXPORT_SYMBOL(get_ibs_caps
);
827 static inline int get_eilvt(int offset
)
829 return !setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_NMI
, 1);
832 static inline int put_eilvt(int offset
)
834 return !setup_APIC_eilvt(offset
, 0, 0, 1);
838 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
840 static inline int ibs_eilvt_valid(void)
848 rdmsrl(MSR_AMD64_IBSCTL
, val
);
849 offset
= val
& IBSCTL_LVT_OFFSET_MASK
;
851 if (!(val
& IBSCTL_LVT_OFFSET_VALID
)) {
852 pr_err(FW_BUG
"cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
853 smp_processor_id(), offset
, MSR_AMD64_IBSCTL
, val
);
857 if (!get_eilvt(offset
)) {
858 pr_err(FW_BUG
"cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
859 smp_processor_id(), offset
, MSR_AMD64_IBSCTL
, val
);
870 static int setup_ibs_ctl(int ibs_eilvt_off
)
872 struct pci_dev
*cpu_cfg
;
879 cpu_cfg
= pci_get_device(PCI_VENDOR_ID_AMD
,
880 PCI_DEVICE_ID_AMD_10H_NB_MISC
,
885 pci_write_config_dword(cpu_cfg
, IBSCTL
, ibs_eilvt_off
886 | IBSCTL_LVT_OFFSET_VALID
);
887 pci_read_config_dword(cpu_cfg
, IBSCTL
, &value
);
888 if (value
!= (ibs_eilvt_off
| IBSCTL_LVT_OFFSET_VALID
)) {
889 pci_dev_put(cpu_cfg
);
890 pr_debug("Failed to setup IBS LVT offset, IBSCTL = 0x%08x\n",
897 pr_debug("No CPU node configured for IBS\n");
905 * This runs only on the current cpu. We try to find an LVT offset and
906 * setup the local APIC. For this we must disable preemption. On
907 * success we initialize all nodes with this offset. This updates then
908 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
909 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
910 * is using the new offset.
912 static void force_ibs_eilvt_setup(void)
918 /* find the next free available EILVT entry, skip offset 0 */
919 for (offset
= 1; offset
< APIC_EILVT_NR_MAX
; offset
++) {
920 if (get_eilvt(offset
))
925 if (offset
== APIC_EILVT_NR_MAX
) {
926 pr_debug("No EILVT entry available\n");
930 ret
= setup_ibs_ctl(offset
);
934 if (!ibs_eilvt_valid())
937 pr_info("LVT offset %d assigned\n", offset
);
947 static void ibs_eilvt_setup(void)
950 * Force LVT offset assignment for family 10h: The offsets are
951 * not assigned by the BIOS for this family, so the OS is
952 * responsible for doing it. If the OS assignment fails, fall
953 * back to BIOS settings and try to setup this.
955 if (boot_cpu_data
.x86
== 0x10)
956 force_ibs_eilvt_setup();
959 static inline int get_ibs_lvt_offset(void)
963 rdmsrl(MSR_AMD64_IBSCTL
, val
);
964 if (!(val
& IBSCTL_LVT_OFFSET_VALID
))
967 return val
& IBSCTL_LVT_OFFSET_MASK
;
970 static void setup_APIC_ibs(void)
974 offset
= get_ibs_lvt_offset();
978 if (!setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_NMI
, 0))
981 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
985 static void clear_APIC_ibs(void)
989 offset
= get_ibs_lvt_offset();
991 setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_FIX
, 1);
994 static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu
)
1002 static int perf_ibs_suspend(void)
1008 static void perf_ibs_resume(void)
1014 static struct syscore_ops perf_ibs_syscore_ops
= {
1015 .resume
= perf_ibs_resume
,
1016 .suspend
= perf_ibs_suspend
,
1019 static void perf_ibs_pm_init(void)
1021 register_syscore_ops(&perf_ibs_syscore_ops
);
1026 static inline void perf_ibs_pm_init(void) { }
1030 static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu
)
1036 static __init
int amd_ibs_init(void)
1040 caps
= __get_ibs_caps();
1042 return -ENODEV
; /* ibs not supported by the cpu */
1046 if (!ibs_eilvt_valid())
1052 /* make ibs_caps visible to other cpus: */
1055 * x86_pmu_amd_ibs_starting_cpu will be called from core on
1058 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING
,
1059 "perf/x86/amd/ibs:starting",
1060 x86_pmu_amd_ibs_starting_cpu
,
1061 x86_pmu_amd_ibs_dying_cpu
);
1063 perf_event_ibs_init();
1068 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
1069 device_initcall(amd_ibs_init
);