4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
8 * @author Robert Richter <robert.richter@amd.com>
9 * @author Barry Kasindorf <barry.kasindorf@amd.com>
10 * @author Jason Yeh <jason.yeh@amd.com>
11 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
14 #include <linux/init.h>
15 #include <linux/notifier.h>
16 #include <linux/smp.h>
17 #include <linux/oprofile.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/slab.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kdebug.h>
22 #include <linux/cpu.h>
27 #include "op_counter.h"
28 #include "op_x86_model.h"
30 static struct op_x86_model_spec
*model
;
31 static DEFINE_PER_CPU(struct op_msrs
, cpu_msrs
);
32 static DEFINE_PER_CPU(unsigned long, saved_lvtpc
);
34 /* must be protected with get_online_cpus()/put_online_cpus(): */
35 static int nmi_enabled
;
36 static int ctr_running
;
38 struct op_counter_config counter_config
[OP_MAX_COUNTER
];
40 /* common functions */
42 u64
op_x86_get_ctrl(struct op_x86_model_spec
const *model
,
43 struct op_counter_config
*counter_config
)
46 u16 event
= (u16
)counter_config
->event
;
48 val
|= ARCH_PERFMON_EVENTSEL_INT
;
49 val
|= counter_config
->user
? ARCH_PERFMON_EVENTSEL_USR
: 0;
50 val
|= counter_config
->kernel
? ARCH_PERFMON_EVENTSEL_OS
: 0;
51 val
|= (counter_config
->unit_mask
& 0xFF) << 8;
52 counter_config
->extra
&= (ARCH_PERFMON_EVENTSEL_INV
|
53 ARCH_PERFMON_EVENTSEL_EDGE
|
54 ARCH_PERFMON_EVENTSEL_CMASK
);
55 val
|= counter_config
->extra
;
56 event
&= model
->event_mask
? model
->event_mask
: 0xFF;
58 val
|= (event
& 0x0F00) << 24;
64 static int profile_exceptions_notify(unsigned int val
, struct pt_regs
*regs
)
67 model
->check_ctrs(regs
, &__get_cpu_var(cpu_msrs
));
68 else if (!nmi_enabled
)
71 model
->stop(&__get_cpu_var(cpu_msrs
));
75 static void nmi_cpu_save_registers(struct op_msrs
*msrs
)
77 struct op_msr
*counters
= msrs
->counters
;
78 struct op_msr
*controls
= msrs
->controls
;
81 for (i
= 0; i
< model
->num_counters
; ++i
) {
83 rdmsrl(counters
[i
].addr
, counters
[i
].saved
);
86 for (i
= 0; i
< model
->num_controls
; ++i
) {
88 rdmsrl(controls
[i
].addr
, controls
[i
].saved
);
92 static void nmi_cpu_start(void *dummy
)
94 struct op_msrs
const *msrs
= &__get_cpu_var(cpu_msrs
);
101 static int nmi_start(void)
105 /* make ctr_running visible to the nmi handler: */
107 on_each_cpu(nmi_cpu_start
, NULL
, 1);
112 static void nmi_cpu_stop(void *dummy
)
114 struct op_msrs
const *msrs
= &__get_cpu_var(cpu_msrs
);
121 static void nmi_stop(void)
124 on_each_cpu(nmi_cpu_stop
, NULL
, 1);
129 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
131 static DEFINE_PER_CPU(int, switch_index
);
133 static inline int has_mux(void)
135 return !!model
->switch_ctrl
;
138 inline int op_x86_phys_to_virt(int phys
)
140 return __this_cpu_read(switch_index
) + phys
;
143 inline int op_x86_virt_to_phys(int virt
)
145 return virt
% model
->num_counters
;
148 static void nmi_shutdown_mux(void)
155 for_each_possible_cpu(i
) {
156 kfree(per_cpu(cpu_msrs
, i
).multiplex
);
157 per_cpu(cpu_msrs
, i
).multiplex
= NULL
;
158 per_cpu(switch_index
, i
) = 0;
162 static int nmi_setup_mux(void)
164 size_t multiplex_size
=
165 sizeof(struct op_msr
) * model
->num_virt_counters
;
171 for_each_possible_cpu(i
) {
172 per_cpu(cpu_msrs
, i
).multiplex
=
173 kzalloc(multiplex_size
, GFP_KERNEL
);
174 if (!per_cpu(cpu_msrs
, i
).multiplex
)
181 static void nmi_cpu_setup_mux(int cpu
, struct op_msrs
const * const msrs
)
184 struct op_msr
*multiplex
= msrs
->multiplex
;
189 for (i
= 0; i
< model
->num_virt_counters
; ++i
) {
190 if (counter_config
[i
].enabled
) {
191 multiplex
[i
].saved
= -(u64
)counter_config
[i
].count
;
193 multiplex
[i
].saved
= 0;
197 per_cpu(switch_index
, cpu
) = 0;
200 static void nmi_cpu_save_mpx_registers(struct op_msrs
*msrs
)
202 struct op_msr
*counters
= msrs
->counters
;
203 struct op_msr
*multiplex
= msrs
->multiplex
;
206 for (i
= 0; i
< model
->num_counters
; ++i
) {
207 int virt
= op_x86_phys_to_virt(i
);
208 if (counters
[i
].addr
)
209 rdmsrl(counters
[i
].addr
, multiplex
[virt
].saved
);
213 static void nmi_cpu_restore_mpx_registers(struct op_msrs
*msrs
)
215 struct op_msr
*counters
= msrs
->counters
;
216 struct op_msr
*multiplex
= msrs
->multiplex
;
219 for (i
= 0; i
< model
->num_counters
; ++i
) {
220 int virt
= op_x86_phys_to_virt(i
);
221 if (counters
[i
].addr
)
222 wrmsrl(counters
[i
].addr
, multiplex
[virt
].saved
);
226 static void nmi_cpu_switch(void *dummy
)
228 int cpu
= smp_processor_id();
229 int si
= per_cpu(switch_index
, cpu
);
230 struct op_msrs
*msrs
= &per_cpu(cpu_msrs
, cpu
);
233 nmi_cpu_save_mpx_registers(msrs
);
235 /* move to next set */
236 si
+= model
->num_counters
;
237 if ((si
>= model
->num_virt_counters
) || (counter_config
[si
].count
== 0))
238 per_cpu(switch_index
, cpu
) = 0;
240 per_cpu(switch_index
, cpu
) = si
;
242 model
->switch_ctrl(model
, msrs
);
243 nmi_cpu_restore_mpx_registers(msrs
);
250 * Quick check to see if multiplexing is necessary.
251 * The check should be sufficient since counters are used
254 static int nmi_multiplex_on(void)
256 return counter_config
[model
->num_counters
].count
? 0 : -EINVAL
;
259 static int nmi_switch_event(void)
262 return -ENOSYS
; /* not implemented */
263 if (nmi_multiplex_on() < 0)
264 return -EINVAL
; /* not necessary */
268 on_each_cpu(nmi_cpu_switch
, NULL
, 1);
274 static inline void mux_init(struct oprofile_operations
*ops
)
277 ops
->switch_events
= nmi_switch_event
;
280 static void mux_clone(int cpu
)
285 memcpy(per_cpu(cpu_msrs
, cpu
).multiplex
,
286 per_cpu(cpu_msrs
, 0).multiplex
,
287 sizeof(struct op_msr
) * model
->num_virt_counters
);
292 inline int op_x86_phys_to_virt(int phys
) { return phys
; }
293 inline int op_x86_virt_to_phys(int virt
) { return virt
; }
294 static inline void nmi_shutdown_mux(void) { }
295 static inline int nmi_setup_mux(void) { return 1; }
297 nmi_cpu_setup_mux(int cpu
, struct op_msrs
const * const msrs
) { }
298 static inline void mux_init(struct oprofile_operations
*ops
) { }
299 static void mux_clone(int cpu
) { }
303 static void free_msrs(void)
306 for_each_possible_cpu(i
) {
307 kfree(per_cpu(cpu_msrs
, i
).counters
);
308 per_cpu(cpu_msrs
, i
).counters
= NULL
;
309 kfree(per_cpu(cpu_msrs
, i
).controls
);
310 per_cpu(cpu_msrs
, i
).controls
= NULL
;
315 static int allocate_msrs(void)
317 size_t controls_size
= sizeof(struct op_msr
) * model
->num_controls
;
318 size_t counters_size
= sizeof(struct op_msr
) * model
->num_counters
;
321 for_each_possible_cpu(i
) {
322 per_cpu(cpu_msrs
, i
).counters
= kzalloc(counters_size
,
324 if (!per_cpu(cpu_msrs
, i
).counters
)
326 per_cpu(cpu_msrs
, i
).controls
= kzalloc(controls_size
,
328 if (!per_cpu(cpu_msrs
, i
).controls
)
332 if (!nmi_setup_mux())
342 static void nmi_cpu_setup(void *dummy
)
344 int cpu
= smp_processor_id();
345 struct op_msrs
*msrs
= &per_cpu(cpu_msrs
, cpu
);
346 nmi_cpu_save_registers(msrs
);
347 raw_spin_lock(&oprofilefs_lock
);
348 model
->setup_ctrs(model
, msrs
);
349 nmi_cpu_setup_mux(cpu
, msrs
);
350 raw_spin_unlock(&oprofilefs_lock
);
351 per_cpu(saved_lvtpc
, cpu
) = apic_read(APIC_LVTPC
);
352 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
355 static void nmi_cpu_restore_registers(struct op_msrs
*msrs
)
357 struct op_msr
*counters
= msrs
->counters
;
358 struct op_msr
*controls
= msrs
->controls
;
361 for (i
= 0; i
< model
->num_controls
; ++i
) {
362 if (controls
[i
].addr
)
363 wrmsrl(controls
[i
].addr
, controls
[i
].saved
);
366 for (i
= 0; i
< model
->num_counters
; ++i
) {
367 if (counters
[i
].addr
)
368 wrmsrl(counters
[i
].addr
, counters
[i
].saved
);
372 static void nmi_cpu_shutdown(void *dummy
)
375 int cpu
= smp_processor_id();
376 struct op_msrs
*msrs
= &per_cpu(cpu_msrs
, cpu
);
378 /* restoring APIC_LVTPC can trigger an apic error because the delivery
379 * mode and vector nr combination can be illegal. That's by design: on
380 * power on apic lvt contain a zero vector nr which are legal only for
381 * NMI delivery mode. So inhibit apic err before restoring lvtpc
383 v
= apic_read(APIC_LVTERR
);
384 apic_write(APIC_LVTERR
, v
| APIC_LVT_MASKED
);
385 apic_write(APIC_LVTPC
, per_cpu(saved_lvtpc
, cpu
));
386 apic_write(APIC_LVTERR
, v
);
387 nmi_cpu_restore_registers(msrs
);
390 static void nmi_cpu_up(void *dummy
)
393 nmi_cpu_setup(dummy
);
395 nmi_cpu_start(dummy
);
398 static void nmi_cpu_down(void *dummy
)
403 nmi_cpu_shutdown(dummy
);
406 static int nmi_create_files(struct super_block
*sb
, struct dentry
*root
)
410 for (i
= 0; i
< model
->num_virt_counters
; ++i
) {
414 /* quick little hack to _not_ expose a counter if it is not
415 * available for use. This should protect userspace app.
416 * NOTE: assumes 1:1 mapping here (that counters are organized
417 * sequentially in their struct assignment).
419 if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i
)))
422 snprintf(buf
, sizeof(buf
), "%d", i
);
423 dir
= oprofilefs_mkdir(sb
, root
, buf
);
424 oprofilefs_create_ulong(sb
, dir
, "enabled", &counter_config
[i
].enabled
);
425 oprofilefs_create_ulong(sb
, dir
, "event", &counter_config
[i
].event
);
426 oprofilefs_create_ulong(sb
, dir
, "count", &counter_config
[i
].count
);
427 oprofilefs_create_ulong(sb
, dir
, "unit_mask", &counter_config
[i
].unit_mask
);
428 oprofilefs_create_ulong(sb
, dir
, "kernel", &counter_config
[i
].kernel
);
429 oprofilefs_create_ulong(sb
, dir
, "user", &counter_config
[i
].user
);
430 oprofilefs_create_ulong(sb
, dir
, "extra", &counter_config
[i
].extra
);
436 static int oprofile_cpu_notifier(struct notifier_block
*b
, unsigned long action
,
439 int cpu
= (unsigned long)data
;
441 case CPU_DOWN_FAILED
:
443 smp_call_function_single(cpu
, nmi_cpu_up
, NULL
, 0);
445 case CPU_DOWN_PREPARE
:
446 smp_call_function_single(cpu
, nmi_cpu_down
, NULL
, 1);
452 static struct notifier_block oprofile_cpu_nb
= {
453 .notifier_call
= oprofile_cpu_notifier
456 static int nmi_setup(void)
461 if (!allocate_msrs())
464 /* We need to serialize save and setup for HT because the subset
465 * of msrs are distinct for save and setup operations
468 /* Assume saved/restored counters are the same on all CPUs */
469 err
= model
->fill_in_addresses(&per_cpu(cpu_msrs
, 0));
473 for_each_possible_cpu(cpu
) {
477 memcpy(per_cpu(cpu_msrs
, cpu
).counters
,
478 per_cpu(cpu_msrs
, 0).counters
,
479 sizeof(struct op_msr
) * model
->num_counters
);
481 memcpy(per_cpu(cpu_msrs
, cpu
).controls
,
482 per_cpu(cpu_msrs
, 0).controls
,
483 sizeof(struct op_msr
) * model
->num_controls
);
490 /* make variables visible to the nmi handler: */
492 err
= register_nmi_handler(NMI_LOCAL
, profile_exceptions_notify
,
498 register_cpu_notifier(&oprofile_cpu_nb
);
500 /* make nmi_enabled visible to the nmi handler: */
502 on_each_cpu(nmi_cpu_setup
, NULL
, 1);
511 static void nmi_shutdown(void)
513 struct op_msrs
*msrs
;
516 unregister_cpu_notifier(&oprofile_cpu_nb
);
517 on_each_cpu(nmi_cpu_shutdown
, NULL
, 1);
521 /* make variables visible to the nmi handler: */
523 unregister_nmi_handler(NMI_LOCAL
, "oprofile");
524 msrs
= &get_cpu_var(cpu_msrs
);
525 model
->shutdown(msrs
);
527 put_cpu_var(cpu_msrs
);
532 static int nmi_suspend(void)
534 /* Only one CPU left, just stop that one */
535 if (nmi_enabled
== 1)
540 static void nmi_resume(void)
542 if (nmi_enabled
== 1)
546 static struct syscore_ops oprofile_syscore_ops
= {
547 .resume
= nmi_resume
,
548 .suspend
= nmi_suspend
,
551 static void __init
init_suspend_resume(void)
553 register_syscore_ops(&oprofile_syscore_ops
);
556 static void exit_suspend_resume(void)
558 unregister_syscore_ops(&oprofile_syscore_ops
);
563 static inline void init_suspend_resume(void) { }
564 static inline void exit_suspend_resume(void) { }
566 #endif /* CONFIG_PM */
568 static int __init
p4_init(char **cpu_type
)
570 __u8 cpu_model
= boot_cpu_data
.x86_model
;
572 if (cpu_model
> 6 || cpu_model
== 5)
576 *cpu_type
= "i386/p4";
580 switch (smp_num_siblings
) {
582 *cpu_type
= "i386/p4";
587 *cpu_type
= "i386/p4-ht";
588 model
= &op_p4_ht2_spec
;
593 printk(KERN_INFO
"oprofile: P4 HyperThreading detected with > 2 threads\n");
594 printk(KERN_INFO
"oprofile: Reverting to timer mode.\n");
598 enum __force_cpu_type
{
599 reserved
= 0, /* do not force */
604 static int force_cpu_type
;
606 static int set_cpu_type(const char *str
, struct kernel_param
*kp
)
608 if (!strcmp(str
, "timer")) {
609 force_cpu_type
= timer
;
610 printk(KERN_INFO
"oprofile: forcing NMI timer mode\n");
611 } else if (!strcmp(str
, "arch_perfmon")) {
612 force_cpu_type
= arch_perfmon
;
613 printk(KERN_INFO
"oprofile: forcing architectural perfmon\n");
620 module_param_call(cpu_type
, set_cpu_type
, NULL
, NULL
, 0);
622 static int __init
ppro_init(char **cpu_type
)
624 __u8 cpu_model
= boot_cpu_data
.x86_model
;
625 struct op_x86_model_spec
*spec
= &op_ppro_spec
; /* default */
627 if (force_cpu_type
== arch_perfmon
&& cpu_has_arch_perfmon
)
631 * Documentation on identifying Intel processors by CPU family
632 * and model can be found in the Intel Software Developer's
635 * http://www.intel.com/products/processor/manuals/
637 * As of May 2010 the documentation for this was in the:
638 * "Intel 64 and IA-32 Architectures Software Developer's
639 * Manual Volume 3B: System Programming Guide", "Table B-1
640 * CPUID Signature Values of DisplayFamily_DisplayModel".
644 *cpu_type
= "i386/ppro";
647 *cpu_type
= "i386/pii";
651 *cpu_type
= "i386/piii";
655 *cpu_type
= "i386/p6_mobile";
658 *cpu_type
= "i386/core";
664 *cpu_type
= "i386/core_2";
669 spec
= &op_arch_perfmon_spec
;
670 *cpu_type
= "i386/core_i7";
673 *cpu_type
= "i386/atom";
684 int __init
op_nmi_init(struct oprofile_operations
*ops
)
686 __u8 vendor
= boot_cpu_data
.x86_vendor
;
687 __u8 family
= boot_cpu_data
.x86
;
688 char *cpu_type
= NULL
;
694 if (force_cpu_type
== timer
)
699 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
703 cpu_type
= "i386/athlon";
707 * Actually it could be i386/hammer too, but
708 * give user space an consistent name.
710 cpu_type
= "x86-64/hammer";
713 cpu_type
= "x86-64/family10";
716 cpu_type
= "x86-64/family11h";
719 cpu_type
= "x86-64/family12h";
722 cpu_type
= "x86-64/family14h";
725 cpu_type
= "x86-64/family15h";
730 model
= &op_amd_spec
;
733 case X86_VENDOR_INTEL
:
740 /* A P6-class processor */
742 ppro_init(&cpu_type
);
752 if (!cpu_has_arch_perfmon
)
755 /* use arch perfmon as fallback */
756 cpu_type
= "i386/arch_perfmon";
757 model
= &op_arch_perfmon_spec
;
764 /* default values, can be overwritten by model */
765 ops
->create_files
= nmi_create_files
;
766 ops
->setup
= nmi_setup
;
767 ops
->shutdown
= nmi_shutdown
;
768 ops
->start
= nmi_start
;
769 ops
->stop
= nmi_stop
;
770 ops
->cpu_type
= cpu_type
;
773 ret
= model
->init(ops
);
777 if (!model
->num_virt_counters
)
778 model
->num_virt_counters
= model
->num_counters
;
782 init_suspend_resume();
784 printk(KERN_INFO
"oprofile: using NMI interrupt.\n");
788 void op_nmi_exit(void)
790 exit_suspend_resume();