3 * athlon / K7 / K8 / Family 10h model-specific MSR operations
5 * @remark Copyright 2002-2009 OProfile authors
6 * @remark Read the file COPYING
9 * @author Philippe Elie
10 * @author Graydon Hoare
11 * @author Robert Richter <robert.richter@amd.com>
12 * @author Barry Kasindorf <barry.kasindorf@amd.com>
13 * @author Jason Yeh <jason.yeh@amd.com>
14 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
17 #include <linux/oprofile.h>
18 #include <linux/device.h>
19 #include <linux/pci.h>
20 #include <linux/percpu.h>
22 #include <asm/ptrace.h>
26 #include "op_x86_model.h"
27 #include "op_counter.h"
29 #define NUM_COUNTERS 4
30 #define NUM_CONTROLS 4
31 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
32 #define NUM_VIRT_COUNTERS 32
33 #define NUM_VIRT_CONTROLS 32
35 #define NUM_VIRT_COUNTERS NUM_COUNTERS
36 #define NUM_VIRT_CONTROLS NUM_CONTROLS
39 #define OP_EVENT_MASK 0x0FFF
40 #define OP_CTR_OVERFLOW (1ULL<<31)
42 #define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
44 static unsigned long reset_value
[NUM_VIRT_COUNTERS
];
46 #ifdef CONFIG_OPROFILE_IBS
48 /* IbsFetchCtl bits/masks */
49 #define IBS_FETCH_RAND_EN (1ULL<<57)
50 #define IBS_FETCH_VAL (1ULL<<49)
51 #define IBS_FETCH_ENABLE (1ULL<<48)
52 #define IBS_FETCH_CNT_MASK 0xFFFF0000ULL
55 #define IBS_OP_CNT_CTL (1ULL<<19)
56 #define IBS_OP_VAL (1ULL<<18)
57 #define IBS_OP_ENABLE (1ULL<<17)
59 #define IBS_FETCH_SIZE 6
60 #define IBS_OP_SIZE 12
62 static int has_ibs
; /* AMD Family10h and later */
64 struct op_ibs_config
{
65 unsigned long op_enabled
;
66 unsigned long fetch_enabled
;
67 unsigned long max_cnt_fetch
;
68 unsigned long max_cnt_op
;
69 unsigned long rand_en
;
70 unsigned long dispatched_ops
;
73 static struct op_ibs_config ibs_config
;
77 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
79 static void op_mux_fill_in_addresses(struct op_msrs
* const msrs
)
83 for (i
= 0; i
< NUM_VIRT_COUNTERS
; i
++) {
84 int hw_counter
= op_x86_virt_to_phys(i
);
85 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0
+ i
))
86 msrs
->multiplex
[i
].addr
= MSR_K7_PERFCTR0
+ hw_counter
;
88 msrs
->multiplex
[i
].addr
= 0;
92 static void op_mux_switch_ctrl(struct op_x86_model_spec
const *model
,
93 struct op_msrs
const * const msrs
)
98 /* enable active counters */
99 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
100 int virt
= op_x86_phys_to_virt(i
);
101 if (!counter_config
[virt
].enabled
)
103 rdmsrl(msrs
->controls
[i
].addr
, val
);
104 val
&= model
->reserved
;
105 val
|= op_x86_get_ctrl(model
, &counter_config
[virt
]);
106 wrmsrl(msrs
->controls
[i
].addr
, val
);
112 static inline void op_mux_fill_in_addresses(struct op_msrs
* const msrs
) { }
116 /* functions for op_amd_spec */
118 static void op_amd_fill_in_addresses(struct op_msrs
* const msrs
)
122 for (i
= 0; i
< NUM_COUNTERS
; i
++) {
123 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0
+ i
))
124 msrs
->counters
[i
].addr
= MSR_K7_PERFCTR0
+ i
;
126 msrs
->counters
[i
].addr
= 0;
129 for (i
= 0; i
< NUM_CONTROLS
; i
++) {
130 if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0
+ i
))
131 msrs
->controls
[i
].addr
= MSR_K7_EVNTSEL0
+ i
;
133 msrs
->controls
[i
].addr
= 0;
136 op_mux_fill_in_addresses(msrs
);
139 static void op_amd_setup_ctrs(struct op_x86_model_spec
const *model
,
140 struct op_msrs
const * const msrs
)
145 /* setup reset_value */
146 for (i
= 0; i
< NUM_VIRT_COUNTERS
; ++i
) {
147 if (counter_config
[i
].enabled
)
148 reset_value
[i
] = counter_config
[i
].count
;
153 /* clear all counters */
154 for (i
= 0; i
< NUM_CONTROLS
; ++i
) {
155 if (unlikely(!msrs
->controls
[i
].addr
))
157 rdmsrl(msrs
->controls
[i
].addr
, val
);
158 val
&= model
->reserved
;
159 wrmsrl(msrs
->controls
[i
].addr
, val
);
162 /* avoid a false detection of ctr overflows in NMI handler */
163 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
164 if (unlikely(!msrs
->counters
[i
].addr
))
166 wrmsrl(msrs
->counters
[i
].addr
, -1LL);
169 /* enable active counters */
170 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
171 int virt
= op_x86_phys_to_virt(i
);
172 if (!counter_config
[virt
].enabled
)
174 if (!msrs
->counters
[i
].addr
)
177 /* setup counter registers */
178 wrmsrl(msrs
->counters
[i
].addr
, -(u64
)reset_value
[virt
]);
180 /* setup control registers */
181 rdmsrl(msrs
->controls
[i
].addr
, val
);
182 val
&= model
->reserved
;
183 val
|= op_x86_get_ctrl(model
, &counter_config
[virt
]);
184 wrmsrl(msrs
->controls
[i
].addr
, val
);
188 #ifdef CONFIG_OPROFILE_IBS
191 op_amd_handle_ibs(struct pt_regs
* const regs
,
192 struct op_msrs
const * const msrs
)
195 struct op_entry entry
;
200 if (ibs_config
.fetch_enabled
) {
201 rdmsrl(MSR_AMD64_IBSFETCHCTL
, ctl
);
202 if (ctl
& IBS_FETCH_VAL
) {
203 rdmsrl(MSR_AMD64_IBSFETCHLINAD
, val
);
204 oprofile_write_reserve(&entry
, regs
, val
,
205 IBS_FETCH_CODE
, IBS_FETCH_SIZE
);
206 oprofile_add_data64(&entry
, val
);
207 oprofile_add_data64(&entry
, ctl
);
208 rdmsrl(MSR_AMD64_IBSFETCHPHYSAD
, val
);
209 oprofile_add_data64(&entry
, val
);
210 oprofile_write_commit(&entry
);
212 /* reenable the IRQ */
213 ctl
&= ~(IBS_FETCH_VAL
| IBS_FETCH_CNT_MASK
);
214 ctl
|= IBS_FETCH_ENABLE
;
215 wrmsrl(MSR_AMD64_IBSFETCHCTL
, ctl
);
219 if (ibs_config
.op_enabled
) {
220 rdmsrl(MSR_AMD64_IBSOPCTL
, ctl
);
221 if (ctl
& IBS_OP_VAL
) {
222 rdmsrl(MSR_AMD64_IBSOPRIP
, val
);
223 oprofile_write_reserve(&entry
, regs
, val
,
224 IBS_OP_CODE
, IBS_OP_SIZE
);
225 oprofile_add_data64(&entry
, val
);
226 rdmsrl(MSR_AMD64_IBSOPDATA
, val
);
227 oprofile_add_data64(&entry
, val
);
228 rdmsrl(MSR_AMD64_IBSOPDATA2
, val
);
229 oprofile_add_data64(&entry
, val
);
230 rdmsrl(MSR_AMD64_IBSOPDATA3
, val
);
231 oprofile_add_data64(&entry
, val
);
232 rdmsrl(MSR_AMD64_IBSDCLINAD
, val
);
233 oprofile_add_data64(&entry
, val
);
234 rdmsrl(MSR_AMD64_IBSDCPHYSAD
, val
);
235 oprofile_add_data64(&entry
, val
);
236 oprofile_write_commit(&entry
);
238 /* reenable the IRQ */
239 ctl
&= ~IBS_OP_VAL
& 0xFFFFFFFF;
240 ctl
|= IBS_OP_ENABLE
;
241 wrmsrl(MSR_AMD64_IBSOPCTL
, ctl
);
246 static inline void op_amd_start_ibs(void)
249 if (has_ibs
&& ibs_config
.fetch_enabled
) {
250 val
= (ibs_config
.max_cnt_fetch
>> 4) & 0xFFFF;
251 val
|= ibs_config
.rand_en
? IBS_FETCH_RAND_EN
: 0;
252 val
|= IBS_FETCH_ENABLE
;
253 wrmsrl(MSR_AMD64_IBSFETCHCTL
, val
);
256 if (has_ibs
&& ibs_config
.op_enabled
) {
257 val
= (ibs_config
.max_cnt_op
>> 4) & 0xFFFF;
258 val
|= ibs_config
.dispatched_ops
? IBS_OP_CNT_CTL
: 0;
259 val
|= IBS_OP_ENABLE
;
260 wrmsrl(MSR_AMD64_IBSOPCTL
, val
);
264 static void op_amd_stop_ibs(void)
266 if (has_ibs
&& ibs_config
.fetch_enabled
)
267 /* clear max count and enable */
268 wrmsrl(MSR_AMD64_IBSFETCHCTL
, 0);
270 if (has_ibs
&& ibs_config
.op_enabled
)
271 /* clear max count and enable */
272 wrmsrl(MSR_AMD64_IBSOPCTL
, 0);
277 static inline void op_amd_handle_ibs(struct pt_regs
* const regs
,
278 struct op_msrs
const * const msrs
) { }
279 static inline void op_amd_start_ibs(void) { }
280 static inline void op_amd_stop_ibs(void) { }
284 static int op_amd_check_ctrs(struct pt_regs
* const regs
,
285 struct op_msrs
const * const msrs
)
290 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
291 int virt
= op_x86_phys_to_virt(i
);
292 if (!reset_value
[virt
])
294 rdmsrl(msrs
->counters
[i
].addr
, val
);
295 /* bit is clear if overflowed: */
296 if (val
& OP_CTR_OVERFLOW
)
298 oprofile_add_sample(regs
, virt
);
299 wrmsrl(msrs
->counters
[i
].addr
, -(u64
)reset_value
[virt
]);
302 op_amd_handle_ibs(regs
, msrs
);
304 /* See op_model_ppro.c */
308 static void op_amd_start(struct op_msrs
const * const msrs
)
313 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
314 if (!reset_value
[op_x86_phys_to_virt(i
)])
316 rdmsrl(msrs
->controls
[i
].addr
, val
);
317 val
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
318 wrmsrl(msrs
->controls
[i
].addr
, val
);
324 static void op_amd_stop(struct op_msrs
const * const msrs
)
330 * Subtle: stop on all counters to avoid race with setting our
333 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
334 if (!reset_value
[op_x86_phys_to_virt(i
)])
336 rdmsrl(msrs
->controls
[i
].addr
, val
);
337 val
&= ~ARCH_PERFMON_EVENTSEL0_ENABLE
;
338 wrmsrl(msrs
->controls
[i
].addr
, val
);
344 static void op_amd_shutdown(struct op_msrs
const * const msrs
)
348 for (i
= 0; i
< NUM_COUNTERS
; ++i
) {
349 if (msrs
->counters
[i
].addr
)
350 release_perfctr_nmi(MSR_K7_PERFCTR0
+ i
);
352 for (i
= 0; i
< NUM_CONTROLS
; ++i
) {
353 if (msrs
->controls
[i
].addr
)
354 release_evntsel_nmi(MSR_K7_EVNTSEL0
+ i
);
358 #ifdef CONFIG_OPROFILE_IBS
360 static u8 ibs_eilvt_off
;
362 static inline void apic_init_ibs_nmi_per_cpu(void *arg
)
364 ibs_eilvt_off
= setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI
, 0);
367 static inline void apic_clear_ibs_nmi_per_cpu(void *arg
)
369 setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX
, 1);
372 static int init_ibs_nmi(void)
374 #define IBSCTL_LVTOFFSETVAL (1 << 8)
376 struct pci_dev
*cpu_cfg
;
381 on_each_cpu(apic_init_ibs_nmi_per_cpu
, NULL
, 1);
386 cpu_cfg
= pci_get_device(PCI_VENDOR_ID_AMD
,
387 PCI_DEVICE_ID_AMD_10H_NB_MISC
,
392 pci_write_config_dword(cpu_cfg
, IBSCTL
, ibs_eilvt_off
393 | IBSCTL_LVTOFFSETVAL
);
394 pci_read_config_dword(cpu_cfg
, IBSCTL
, &value
);
395 if (value
!= (ibs_eilvt_off
| IBSCTL_LVTOFFSETVAL
)) {
396 pci_dev_put(cpu_cfg
);
397 printk(KERN_DEBUG
"Failed to setup IBS LVT offset, "
398 "IBSCTL = 0x%08x", value
);
404 printk(KERN_DEBUG
"No CPU node configured for IBS");
410 /* Works only for 64bit with proper numa implementation. */
411 if (nodes
!= num_possible_nodes()) {
412 printk(KERN_DEBUG
"Failed to setup CPU node(s) for IBS, "
413 "found: %d, expected %d",
414 nodes
, num_possible_nodes());
421 /* uninitialize the APIC for the IBS interrupts if needed */
422 static void clear_ibs_nmi(void)
425 on_each_cpu(apic_clear_ibs_nmi_per_cpu
, NULL
, 1);
428 /* initialize the APIC for the IBS interrupts if available */
429 static void ibs_init(void)
431 has_ibs
= boot_cpu_has(X86_FEATURE_IBS
);
436 if (init_ibs_nmi()) {
441 printk(KERN_INFO
"oprofile: AMD IBS detected\n");
444 static void ibs_exit(void)
452 static int (*create_arch_files
)(struct super_block
*sb
, struct dentry
*root
);
454 static int setup_ibs_files(struct super_block
*sb
, struct dentry
*root
)
459 /* architecture specific files */
460 if (create_arch_files
)
461 ret
= create_arch_files(sb
, root
);
469 /* model specific files */
471 /* setup some reasonable defaults */
472 ibs_config
.max_cnt_fetch
= 250000;
473 ibs_config
.fetch_enabled
= 0;
474 ibs_config
.max_cnt_op
= 250000;
475 ibs_config
.op_enabled
= 0;
476 ibs_config
.dispatched_ops
= 1;
478 dir
= oprofilefs_mkdir(sb
, root
, "ibs_fetch");
479 oprofilefs_create_ulong(sb
, dir
, "enable",
480 &ibs_config
.fetch_enabled
);
481 oprofilefs_create_ulong(sb
, dir
, "max_count",
482 &ibs_config
.max_cnt_fetch
);
483 oprofilefs_create_ulong(sb
, dir
, "rand_enable",
484 &ibs_config
.rand_en
);
486 dir
= oprofilefs_mkdir(sb
, root
, "ibs_op");
487 oprofilefs_create_ulong(sb
, dir
, "enable",
488 &ibs_config
.op_enabled
);
489 oprofilefs_create_ulong(sb
, dir
, "max_count",
490 &ibs_config
.max_cnt_op
);
491 oprofilefs_create_ulong(sb
, dir
, "dispatched_ops",
492 &ibs_config
.dispatched_ops
);
497 static int op_amd_init(struct oprofile_operations
*ops
)
500 create_arch_files
= ops
->create_files
;
501 ops
->create_files
= setup_ibs_files
;
505 static void op_amd_exit(void)
514 static int op_amd_init(struct oprofile_operations
*ops
)
519 static void op_amd_exit(void) {}
521 #endif /* CONFIG_OPROFILE_IBS */
523 struct op_x86_model_spec op_amd_spec
= {
524 .num_counters
= NUM_COUNTERS
,
525 .num_controls
= NUM_CONTROLS
,
526 .num_virt_counters
= NUM_VIRT_COUNTERS
,
527 .reserved
= MSR_AMD_EVENTSEL_RESERVED
,
528 .event_mask
= OP_EVENT_MASK
,
531 .fill_in_addresses
= &op_amd_fill_in_addresses
,
532 .setup_ctrs
= &op_amd_setup_ctrs
,
533 .check_ctrs
= &op_amd_check_ctrs
,
534 .start
= &op_amd_start
,
535 .stop
= &op_amd_stop
,
536 .shutdown
= &op_amd_shutdown
,
537 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
538 .switch_ctrl
= &op_mux_switch_ctrl
,