1 #include <linux/perf_event.h>
2 #include <linux/export.h>
3 #include <linux/types.h>
4 #include <linux/init.h>
5 #include <linux/slab.h>
6 #include <asm/apicdef.h>
8 #include "perf_event.h"
10 static __initconst
const u64 amd_hw_cache_event_ids
11 [PERF_COUNT_HW_CACHE_MAX
]
12 [PERF_COUNT_HW_CACHE_OP_MAX
]
13 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
17 [ C(RESULT_ACCESS
) ] = 0x0040, /* Data Cache Accesses */
18 [ C(RESULT_MISS
) ] = 0x0141, /* Data Cache Misses */
21 [ C(RESULT_ACCESS
) ] = 0x0142, /* Data Cache Refills :system */
22 [ C(RESULT_MISS
) ] = 0,
24 [ C(OP_PREFETCH
) ] = {
25 [ C(RESULT_ACCESS
) ] = 0x0267, /* Data Prefetcher :attempts */
26 [ C(RESULT_MISS
) ] = 0x0167, /* Data Prefetcher :cancelled */
31 [ C(RESULT_ACCESS
) ] = 0x0080, /* Instruction cache fetches */
32 [ C(RESULT_MISS
) ] = 0x0081, /* Instruction cache misses */
35 [ C(RESULT_ACCESS
) ] = -1,
36 [ C(RESULT_MISS
) ] = -1,
38 [ C(OP_PREFETCH
) ] = {
39 [ C(RESULT_ACCESS
) ] = 0x014B, /* Prefetch Instructions :Load */
40 [ C(RESULT_MISS
) ] = 0,
45 [ C(RESULT_ACCESS
) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
46 [ C(RESULT_MISS
) ] = 0x037E, /* L2 Cache Misses : IC+DC */
49 [ C(RESULT_ACCESS
) ] = 0x017F, /* L2 Fill/Writeback */
50 [ C(RESULT_MISS
) ] = 0,
52 [ C(OP_PREFETCH
) ] = {
53 [ C(RESULT_ACCESS
) ] = 0,
54 [ C(RESULT_MISS
) ] = 0,
59 [ C(RESULT_ACCESS
) ] = 0x0040, /* Data Cache Accesses */
60 [ C(RESULT_MISS
) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
63 [ C(RESULT_ACCESS
) ] = 0,
64 [ C(RESULT_MISS
) ] = 0,
66 [ C(OP_PREFETCH
) ] = {
67 [ C(RESULT_ACCESS
) ] = 0,
68 [ C(RESULT_MISS
) ] = 0,
73 [ C(RESULT_ACCESS
) ] = 0x0080, /* Instruction fecthes */
74 [ C(RESULT_MISS
) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
77 [ C(RESULT_ACCESS
) ] = -1,
78 [ C(RESULT_MISS
) ] = -1,
80 [ C(OP_PREFETCH
) ] = {
81 [ C(RESULT_ACCESS
) ] = -1,
82 [ C(RESULT_MISS
) ] = -1,
87 [ C(RESULT_ACCESS
) ] = 0x00c2, /* Retired Branch Instr. */
88 [ C(RESULT_MISS
) ] = 0x00c3, /* Retired Mispredicted BI */
91 [ C(RESULT_ACCESS
) ] = -1,
92 [ C(RESULT_MISS
) ] = -1,
94 [ C(OP_PREFETCH
) ] = {
95 [ C(RESULT_ACCESS
) ] = -1,
96 [ C(RESULT_MISS
) ] = -1,
101 [ C(RESULT_ACCESS
) ] = 0xb8e9, /* CPU Request to Memory, l+r */
102 [ C(RESULT_MISS
) ] = 0x98e9, /* CPU Request to Memory, r */
105 [ C(RESULT_ACCESS
) ] = -1,
106 [ C(RESULT_MISS
) ] = -1,
108 [ C(OP_PREFETCH
) ] = {
109 [ C(RESULT_ACCESS
) ] = -1,
110 [ C(RESULT_MISS
) ] = -1,
116 * AMD Performance Monitor K7 and later.
118 static const u64 amd_perfmon_event_map
[] =
120 [PERF_COUNT_HW_CPU_CYCLES
] = 0x0076,
121 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
122 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x0080,
123 [PERF_COUNT_HW_CACHE_MISSES
] = 0x0081,
124 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c2,
125 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c3,
126 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = 0x00d0, /* "Decoder empty" event */
127 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = 0x00d1, /* "Dispatch stalls" event */
130 static u64
amd_pmu_event_map(int hw_event
)
132 return amd_perfmon_event_map
[hw_event
];
135 static struct event_constraint
*amd_nb_event_constraint
;
138 * Previously calculated offsets
140 static unsigned int event_offsets
[X86_PMC_IDX_MAX
] __read_mostly
;
141 static unsigned int count_offsets
[X86_PMC_IDX_MAX
] __read_mostly
;
142 static unsigned int rdpmc_indexes
[X86_PMC_IDX_MAX
] __read_mostly
;
146 * 4 counters starting at 0xc0010000 each offset by 1
148 * CPUs with core performance counter extensions:
149 * 6 counters starting at 0xc0010200 each offset by 2
151 * CPUs with north bridge performance counter extensions:
152 * 4 additional counters starting at 0xc0010240 each offset by 2
153 * (indexed right above either one of the above core counters)
155 static inline int amd_pmu_addr_offset(int index
, bool eventsel
)
157 int offset
, first
, base
;
163 offset
= event_offsets
[index
];
165 offset
= count_offsets
[index
];
170 if (amd_nb_event_constraint
&&
171 test_bit(index
, amd_nb_event_constraint
->idxmsk
)) {
173 * calculate the offset of NB counters with respect to
174 * base eventsel or perfctr
177 first
= find_first_bit(amd_nb_event_constraint
->idxmsk
,
181 base
= MSR_F15H_NB_PERF_CTL
- x86_pmu
.eventsel
;
183 base
= MSR_F15H_NB_PERF_CTR
- x86_pmu
.perfctr
;
185 offset
= base
+ ((index
- first
) << 1);
186 } else if (!cpu_has_perfctr_core
)
192 event_offsets
[index
] = offset
;
194 count_offsets
[index
] = offset
;
199 static inline int amd_pmu_rdpmc_index(int index
)
206 ret
= rdpmc_indexes
[index
];
211 if (amd_nb_event_constraint
&&
212 test_bit(index
, amd_nb_event_constraint
->idxmsk
)) {
214 * according to the mnual, ECX value of the NB counters is
215 * the index of the NB counter (0, 1, 2 or 3) plus 6
218 first
= find_first_bit(amd_nb_event_constraint
->idxmsk
,
220 ret
= index
- first
+ 6;
224 rdpmc_indexes
[index
] = ret
;
229 static int amd_core_hw_config(struct perf_event
*event
)
231 if (event
->attr
.exclude_host
&& event
->attr
.exclude_guest
)
233 * When HO == GO == 1 the hardware treats that as GO == HO == 0
234 * and will count in both modes. We don't want to count in that
235 * case so we emulate no-counting by setting US = OS = 0.
237 event
->hw
.config
&= ~(ARCH_PERFMON_EVENTSEL_USR
|
238 ARCH_PERFMON_EVENTSEL_OS
);
239 else if (event
->attr
.exclude_host
)
240 event
->hw
.config
|= AMD64_EVENTSEL_GUESTONLY
;
241 else if (event
->attr
.exclude_guest
)
242 event
->hw
.config
|= AMD64_EVENTSEL_HOSTONLY
;
248 * NB counters do not support the following event select bits:
251 * Invert counter mask
255 static int amd_nb_hw_config(struct perf_event
*event
)
257 /* for NB, we only allow system wide counting mode */
258 if (is_sampling_event(event
) || event
->attach_state
& PERF_ATTACH_TASK
)
261 if (event
->attr
.exclude_user
|| event
->attr
.exclude_kernel
||
262 event
->attr
.exclude_host
|| event
->attr
.exclude_guest
)
265 event
->hw
.config
&= ~(ARCH_PERFMON_EVENTSEL_USR
|
266 ARCH_PERFMON_EVENTSEL_OS
);
268 if (event
->hw
.config
& ~(AMD64_RAW_EVENT_MASK_NB
|
269 ARCH_PERFMON_EVENTSEL_INT
))
276 * AMD64 events are detected based on their event codes.
278 static inline unsigned int amd_get_event_code(struct hw_perf_event
*hwc
)
280 return ((hwc
->config
>> 24) & 0x0f00) | (hwc
->config
& 0x00ff);
283 static inline int amd_is_nb_event(struct hw_perf_event
*hwc
)
285 return (hwc
->config
& 0xe0) == 0xe0;
288 static inline int amd_is_perfctr_nb_event(struct hw_perf_event
*hwc
)
290 return amd_nb_event_constraint
&& amd_is_nb_event(hwc
);
293 static inline int amd_has_nb(struct cpu_hw_events
*cpuc
)
295 struct amd_nb
*nb
= cpuc
->amd_nb
;
297 return nb
&& nb
->nb_id
!= -1;
300 static int amd_pmu_hw_config(struct perf_event
*event
)
304 /* pass precise event sampling to ibs: */
305 if (event
->attr
.precise_ip
&& get_ibs_caps())
308 if (has_branch_stack(event
))
311 ret
= x86_pmu_hw_config(event
);
315 if (event
->attr
.type
== PERF_TYPE_RAW
)
316 event
->hw
.config
|= event
->attr
.config
& AMD64_RAW_EVENT_MASK
;
318 if (amd_is_perfctr_nb_event(&event
->hw
))
319 return amd_nb_hw_config(event
);
321 return amd_core_hw_config(event
);
324 static void __amd_put_nb_event_constraints(struct cpu_hw_events
*cpuc
,
325 struct perf_event
*event
)
327 struct amd_nb
*nb
= cpuc
->amd_nb
;
331 * need to scan whole list because event may not have
332 * been assigned during scheduling
334 * no race condition possible because event can only
335 * be removed on one CPU at a time AND PMU is disabled
338 for (i
= 0; i
< x86_pmu
.num_counters
; i
++) {
339 if (cmpxchg(nb
->owners
+ i
, event
, NULL
) == event
)
344 static void amd_nb_interrupt_hw_config(struct hw_perf_event
*hwc
)
346 int core_id
= cpu_data(smp_processor_id()).cpu_core_id
;
348 /* deliver interrupts only to this core */
349 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_INT
) {
350 hwc
->config
|= AMD64_EVENTSEL_INT_CORE_ENABLE
;
351 hwc
->config
&= ~AMD64_EVENTSEL_INT_CORE_SEL_MASK
;
352 hwc
->config
|= (u64
)(core_id
) <<
353 AMD64_EVENTSEL_INT_CORE_SEL_SHIFT
;
358 * AMD64 NorthBridge events need special treatment because
359 * counter access needs to be synchronized across all cores
360 * of a package. Refer to BKDG section 3.12
362 * NB events are events measuring L3 cache, Hypertransport
363 * traffic. They are identified by an event code >= 0xe00.
364 * They measure events on the NorthBride which is shared
365 * by all cores on a package. NB events are counted on a
366 * shared set of counters. When a NB event is programmed
367 * in a counter, the data actually comes from a shared
368 * counter. Thus, access to those counters needs to be
371 * We implement the synchronization such that no two cores
372 * can be measuring NB events using the same counters. Thus,
373 * we maintain a per-NB allocation table. The available slot
374 * is propagated using the event_constraint structure.
376 * We provide only one choice for each NB event based on
377 * the fact that only NB events have restrictions. Consequently,
378 * if a counter is available, there is a guarantee the NB event
379 * will be assigned to it. If no slot is available, an empty
380 * constraint is returned and scheduling will eventually fail
383 * Note that all cores attached the same NB compete for the same
384 * counters to host NB events, this is why we use atomic ops. Some
385 * multi-chip CPUs may have more than one NB.
387 * Given that resources are allocated (cmpxchg), they must be
388 * eventually freed for others to use. This is accomplished by
389 * calling __amd_put_nb_event_constraints()
391 * Non NB events are not impacted by this restriction.
393 static struct event_constraint
*
394 __amd_get_nb_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
,
395 struct event_constraint
*c
)
397 struct hw_perf_event
*hwc
= &event
->hw
;
398 struct amd_nb
*nb
= cpuc
->amd_nb
;
399 struct perf_event
*old
;
409 * detect if already present, if so reuse
411 * cannot merge with actual allocation
412 * because of possible holes
414 * event can already be present yet not assigned (in hwc->idx)
415 * because of successive calls to x86_schedule_events() from
416 * hw_perf_group_sched_in() without hw_perf_enable()
418 for_each_set_bit(idx
, c
->idxmsk
, x86_pmu
.num_counters
) {
419 if (new == -1 || hwc
->idx
== idx
)
420 /* assign free slot, prefer hwc->idx */
421 old
= cmpxchg(nb
->owners
+ idx
, NULL
, event
);
422 else if (nb
->owners
[idx
] == event
)
423 /* event already present */
428 if (old
&& old
!= event
)
431 /* reassign to this slot */
433 cmpxchg(nb
->owners
+ new, event
, NULL
);
436 /* already present, reuse */
442 return &emptyconstraint
;
444 if (amd_is_perfctr_nb_event(hwc
))
445 amd_nb_interrupt_hw_config(hwc
);
447 return &nb
->event_constraints
[new];
450 static struct amd_nb
*amd_alloc_nb(int cpu
)
455 nb
= kmalloc_node(sizeof(struct amd_nb
), GFP_KERNEL
| __GFP_ZERO
,
463 * initialize all possible NB constraints
465 for (i
= 0; i
< x86_pmu
.num_counters
; i
++) {
466 __set_bit(i
, nb
->event_constraints
[i
].idxmsk
);
467 nb
->event_constraints
[i
].weight
= 1;
472 static int amd_pmu_cpu_prepare(int cpu
)
474 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
476 WARN_ON_ONCE(cpuc
->amd_nb
);
478 if (boot_cpu_data
.x86_max_cores
< 2)
481 cpuc
->amd_nb
= amd_alloc_nb(cpu
);
488 static void amd_pmu_cpu_starting(int cpu
)
490 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
494 cpuc
->perf_ctr_virt_mask
= AMD64_EVENTSEL_HOSTONLY
;
496 if (boot_cpu_data
.x86_max_cores
< 2)
499 nb_id
= amd_get_nb_id(cpu
);
500 WARN_ON_ONCE(nb_id
== BAD_APICID
);
502 for_each_online_cpu(i
) {
503 nb
= per_cpu(cpu_hw_events
, i
).amd_nb
;
504 if (WARN_ON_ONCE(!nb
))
507 if (nb
->nb_id
== nb_id
) {
508 cpuc
->kfree_on_online
= cpuc
->amd_nb
;
514 cpuc
->amd_nb
->nb_id
= nb_id
;
515 cpuc
->amd_nb
->refcnt
++;
518 static void amd_pmu_cpu_dead(int cpu
)
520 struct cpu_hw_events
*cpuhw
;
522 if (boot_cpu_data
.x86_max_cores
< 2)
525 cpuhw
= &per_cpu(cpu_hw_events
, cpu
);
528 struct amd_nb
*nb
= cpuhw
->amd_nb
;
530 if (nb
->nb_id
== -1 || --nb
->refcnt
== 0)
533 cpuhw
->amd_nb
= NULL
;
537 static struct event_constraint
*
538 amd_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
541 * if not NB event or no NB, then no constraints
543 if (!(amd_has_nb(cpuc
) && amd_is_nb_event(&event
->hw
)))
544 return &unconstrained
;
546 return __amd_get_nb_event_constraints(cpuc
, event
,
547 amd_nb_event_constraint
);
550 static void amd_put_event_constraints(struct cpu_hw_events
*cpuc
,
551 struct perf_event
*event
)
553 if (amd_has_nb(cpuc
) && amd_is_nb_event(&event
->hw
))
554 __amd_put_nb_event_constraints(cpuc
, event
);
557 PMU_FORMAT_ATTR(event
, "config:0-7,32-35");
558 PMU_FORMAT_ATTR(umask
, "config:8-15" );
559 PMU_FORMAT_ATTR(edge
, "config:18" );
560 PMU_FORMAT_ATTR(inv
, "config:23" );
561 PMU_FORMAT_ATTR(cmask
, "config:24-31" );
563 static struct attribute
*amd_format_attr
[] = {
564 &format_attr_event
.attr
,
565 &format_attr_umask
.attr
,
566 &format_attr_edge
.attr
,
567 &format_attr_inv
.attr
,
568 &format_attr_cmask
.attr
,
574 #define AMD_EVENT_TYPE_MASK 0x000000F0ULL
576 #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
577 #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
578 #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
579 #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
580 #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
581 #define AMD_EVENT_EX_LS 0x000000C0ULL
582 #define AMD_EVENT_DE 0x000000D0ULL
583 #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
586 * AMD family 15h event code/PMC mappings:
588 * type = event_code & 0x0F0:
590 * 0x000 FP PERF_CTL[5:3]
591 * 0x010 FP PERF_CTL[5:3]
592 * 0x020 LS PERF_CTL[5:0]
593 * 0x030 LS PERF_CTL[5:0]
594 * 0x040 DC PERF_CTL[5:0]
595 * 0x050 DC PERF_CTL[5:0]
596 * 0x060 CU PERF_CTL[2:0]
597 * 0x070 CU PERF_CTL[2:0]
598 * 0x080 IC/DE PERF_CTL[2:0]
599 * 0x090 IC/DE PERF_CTL[2:0]
602 * 0x0C0 EX/LS PERF_CTL[5:0]
603 * 0x0D0 DE PERF_CTL[2:0]
604 * 0x0E0 NB NB_PERF_CTL[3:0]
605 * 0x0F0 NB NB_PERF_CTL[3:0]
609 * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
610 * 0x003 FP PERF_CTL[3]
611 * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
612 * 0x00B FP PERF_CTL[3]
613 * 0x00D FP PERF_CTL[3]
614 * 0x023 DE PERF_CTL[2:0]
615 * 0x02D LS PERF_CTL[3]
616 * 0x02E LS PERF_CTL[3,0]
617 * 0x031 LS PERF_CTL[2:0] (**)
618 * 0x043 CU PERF_CTL[2:0]
619 * 0x045 CU PERF_CTL[2:0]
620 * 0x046 CU PERF_CTL[2:0]
621 * 0x054 CU PERF_CTL[2:0]
622 * 0x055 CU PERF_CTL[2:0]
623 * 0x08F IC PERF_CTL[0]
624 * 0x187 DE PERF_CTL[0]
625 * 0x188 DE PERF_CTL[0]
626 * 0x0DB EX PERF_CTL[5:0]
627 * 0x0DC LS PERF_CTL[5:0]
628 * 0x0DD LS PERF_CTL[5:0]
629 * 0x0DE LS PERF_CTL[5:0]
630 * 0x0DF LS PERF_CTL[5:0]
631 * 0x1C0 EX PERF_CTL[5:3]
632 * 0x1D6 EX PERF_CTL[5:0]
633 * 0x1D8 EX PERF_CTL[5:0]
635 * (*) depending on the umask all FPU counters may be used
636 * (**) only one unitmask enabled at a time
639 static struct event_constraint amd_f15_PMC0
= EVENT_CONSTRAINT(0, 0x01, 0);
640 static struct event_constraint amd_f15_PMC20
= EVENT_CONSTRAINT(0, 0x07, 0);
641 static struct event_constraint amd_f15_PMC3
= EVENT_CONSTRAINT(0, 0x08, 0);
642 static struct event_constraint amd_f15_PMC30
= EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
643 static struct event_constraint amd_f15_PMC50
= EVENT_CONSTRAINT(0, 0x3F, 0);
644 static struct event_constraint amd_f15_PMC53
= EVENT_CONSTRAINT(0, 0x38, 0);
646 static struct event_constraint amd_NBPMC96
= EVENT_CONSTRAINT(0, 0x3C0, 0);
647 static struct event_constraint amd_NBPMC74
= EVENT_CONSTRAINT(0, 0xF0, 0);
649 static struct event_constraint
*
650 amd_get_event_constraints_f15h(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
652 struct hw_perf_event
*hwc
= &event
->hw
;
653 unsigned int event_code
= amd_get_event_code(hwc
);
655 switch (event_code
& AMD_EVENT_TYPE_MASK
) {
657 switch (event_code
) {
659 if (!(hwc
->config
& 0x0000F000ULL
))
661 if (!(hwc
->config
& 0x00000F00ULL
))
663 return &amd_f15_PMC3
;
665 if (hweight_long(hwc
->config
& ARCH_PERFMON_EVENTSEL_UMASK
) <= 1)
667 return &amd_f15_PMC3
;
671 return &amd_f15_PMC3
;
673 return &amd_f15_PMC53
;
676 case AMD_EVENT_EX_LS
:
677 switch (event_code
) {
684 return &amd_f15_PMC20
;
686 return &amd_f15_PMC3
;
688 return &amd_f15_PMC30
;
690 if (hweight_long(hwc
->config
& ARCH_PERFMON_EVENTSEL_UMASK
) <= 1)
691 return &amd_f15_PMC20
;
692 return &emptyconstraint
;
694 return &amd_f15_PMC53
;
696 return &amd_f15_PMC50
;
699 case AMD_EVENT_IC_DE
:
701 switch (event_code
) {
705 return &amd_f15_PMC0
;
706 case 0x0DB ... 0x0DF:
709 return &amd_f15_PMC50
;
711 return &amd_f15_PMC20
;
714 return __amd_get_nb_event_constraints(cpuc
, event
,
715 amd_nb_event_constraint
);
717 return &emptyconstraint
;
721 static ssize_t
amd_event_sysfs_show(char *page
, u64 config
)
723 u64 event
= (config
& ARCH_PERFMON_EVENTSEL_EVENT
) |
724 (config
& AMD64_EVENTSEL_EVENT
) >> 24;
726 return x86_event_sysfs_show(page
, config
, event
);
729 static __initconst
const struct x86_pmu amd_pmu
= {
731 .handle_irq
= x86_pmu_handle_irq
,
732 .disable_all
= x86_pmu_disable_all
,
733 .enable_all
= x86_pmu_enable_all
,
734 .enable
= x86_pmu_enable_event
,
735 .disable
= x86_pmu_disable_event
,
736 .hw_config
= amd_pmu_hw_config
,
737 .schedule_events
= x86_schedule_events
,
738 .eventsel
= MSR_K7_EVNTSEL0
,
739 .perfctr
= MSR_K7_PERFCTR0
,
740 .addr_offset
= amd_pmu_addr_offset
,
741 .rdpmc_index
= amd_pmu_rdpmc_index
,
742 .event_map
= amd_pmu_event_map
,
743 .max_events
= ARRAY_SIZE(amd_perfmon_event_map
),
744 .num_counters
= AMD64_NUM_COUNTERS
,
746 .cntval_mask
= (1ULL << 48) - 1,
748 /* use highest bit to detect overflow */
749 .max_period
= (1ULL << 47) - 1,
750 .get_event_constraints
= amd_get_event_constraints
,
751 .put_event_constraints
= amd_put_event_constraints
,
753 .format_attrs
= amd_format_attr
,
754 .events_sysfs_show
= amd_event_sysfs_show
,
756 .cpu_prepare
= amd_pmu_cpu_prepare
,
757 .cpu_starting
= amd_pmu_cpu_starting
,
758 .cpu_dead
= amd_pmu_cpu_dead
,
761 static int setup_event_constraints(void)
763 if (boot_cpu_data
.x86
== 0x15)
764 x86_pmu
.get_event_constraints
= amd_get_event_constraints_f15h
;
768 static int setup_perfctr_core(void)
770 if (!cpu_has_perfctr_core
) {
771 WARN(x86_pmu
.get_event_constraints
== amd_get_event_constraints_f15h
,
772 KERN_ERR
"Odd, counter constraints enabled but no core perfctrs detected!");
776 WARN(x86_pmu
.get_event_constraints
== amd_get_event_constraints
,
777 KERN_ERR
"hw perf events core counters need constraints handler!");
780 * If core performance counter extensions exists, we must use
781 * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
782 * x86_pmu_addr_offset().
784 x86_pmu
.eventsel
= MSR_F15H_PERF_CTL
;
785 x86_pmu
.perfctr
= MSR_F15H_PERF_CTR
;
786 x86_pmu
.num_counters
= AMD64_NUM_COUNTERS_CORE
;
788 printk(KERN_INFO
"perf: AMD core performance counters detected\n");
793 static int setup_perfctr_nb(void)
795 if (!cpu_has_perfctr_nb
)
798 x86_pmu
.num_counters
+= AMD64_NUM_COUNTERS_NB
;
800 if (cpu_has_perfctr_core
)
801 amd_nb_event_constraint
= &amd_NBPMC96
;
803 amd_nb_event_constraint
= &amd_NBPMC74
;
805 printk(KERN_INFO
"perf: AMD northbridge performance counters detected\n");
810 __init
int amd_pmu_init(void)
812 /* Performance-monitoring supported from K7 and later: */
813 if (boot_cpu_data
.x86
< 6)
818 setup_event_constraints();
819 setup_perfctr_core();
822 /* Events are common for all AMDs */
823 memcpy(hw_cache_event_ids
, amd_hw_cache_event_ids
,
824 sizeof(hw_cache_event_ids
));
829 void amd_pmu_enable_virt(void)
831 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
833 cpuc
->perf_ctr_virt_mask
= 0;
835 /* Reload all events */
836 x86_pmu_disable_all();
837 x86_pmu_enable_all(0);
839 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt
);
841 void amd_pmu_disable_virt(void)
843 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
846 * We only mask out the Host-only bit so that host-only counting works
847 * when SVM is disabled. If someone sets up a guest-only counter when
848 * SVM is disabled the Guest-only bits still gets set and the counter
849 * will not count anything.
851 cpuc
->perf_ctr_virt_mask
= AMD64_EVENTSEL_HOSTONLY
;
853 /* Reload all events */
854 x86_pmu_disable_all();
855 x86_pmu_enable_all(0);
857 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt
);