4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
8 * @author Robert Richter <robert.richter@amd.com>
9 * @author Barry Kasindorf <barry.kasindorf@amd.com>
10 * @author Jason Yeh <jason.yeh@amd.com>
11 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
14 #include <linux/init.h>
15 #include <linux/notifier.h>
16 #include <linux/smp.h>
17 #include <linux/oprofile.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/slab.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kdebug.h>
22 #include <linux/cpu.h>
27 #include "op_counter.h"
28 #include "op_x86_model.h"
30 static struct op_x86_model_spec
*model
;
31 static DEFINE_PER_CPU(struct op_msrs
, cpu_msrs
);
32 static DEFINE_PER_CPU(unsigned long, saved_lvtpc
);
34 /* must be protected with get_online_cpus()/put_online_cpus(): */
35 static int nmi_enabled
;
36 static int ctr_running
;
38 struct op_counter_config counter_config
[OP_MAX_COUNTER
];
40 /* common functions */
42 u64
op_x86_get_ctrl(struct op_x86_model_spec
const *model
,
43 struct op_counter_config
*counter_config
)
46 u16 event
= (u16
)counter_config
->event
;
48 val
|= ARCH_PERFMON_EVENTSEL_INT
;
49 val
|= counter_config
->user
? ARCH_PERFMON_EVENTSEL_USR
: 0;
50 val
|= counter_config
->kernel
? ARCH_PERFMON_EVENTSEL_OS
: 0;
51 val
|= (counter_config
->unit_mask
& 0xFF) << 8;
52 counter_config
->extra
&= (ARCH_PERFMON_EVENTSEL_INV
|
53 ARCH_PERFMON_EVENTSEL_EDGE
|
54 ARCH_PERFMON_EVENTSEL_CMASK
);
55 val
|= counter_config
->extra
;
56 event
&= model
->event_mask
? model
->event_mask
: 0xFF;
58 val
|= (event
& 0x0F00) << 24;
64 static int profile_exceptions_notify(struct notifier_block
*self
,
65 unsigned long val
, void *data
)
67 struct die_args
*args
= (struct die_args
*)data
;
68 int ret
= NOTIFY_DONE
;
73 model
->check_ctrs(args
->regs
, &__get_cpu_var(cpu_msrs
));
74 else if (!nmi_enabled
)
77 model
->stop(&__get_cpu_var(cpu_msrs
));
86 static void nmi_cpu_save_registers(struct op_msrs
*msrs
)
88 struct op_msr
*counters
= msrs
->counters
;
89 struct op_msr
*controls
= msrs
->controls
;
92 for (i
= 0; i
< model
->num_counters
; ++i
) {
94 rdmsrl(counters
[i
].addr
, counters
[i
].saved
);
97 for (i
= 0; i
< model
->num_controls
; ++i
) {
99 rdmsrl(controls
[i
].addr
, controls
[i
].saved
);
103 static void nmi_cpu_start(void *dummy
)
105 struct op_msrs
const *msrs
= &__get_cpu_var(cpu_msrs
);
112 static int nmi_start(void)
115 on_each_cpu(nmi_cpu_start
, NULL
, 1);
121 static void nmi_cpu_stop(void *dummy
)
123 struct op_msrs
const *msrs
= &__get_cpu_var(cpu_msrs
);
130 static void nmi_stop(void)
133 on_each_cpu(nmi_cpu_stop
, NULL
, 1);
138 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
140 static DEFINE_PER_CPU(int, switch_index
);
142 static inline int has_mux(void)
144 return !!model
->switch_ctrl
;
147 inline int op_x86_phys_to_virt(int phys
)
149 return __this_cpu_read(switch_index
) + phys
;
152 inline int op_x86_virt_to_phys(int virt
)
154 return virt
% model
->num_counters
;
157 static void nmi_shutdown_mux(void)
164 for_each_possible_cpu(i
) {
165 kfree(per_cpu(cpu_msrs
, i
).multiplex
);
166 per_cpu(cpu_msrs
, i
).multiplex
= NULL
;
167 per_cpu(switch_index
, i
) = 0;
171 static int nmi_setup_mux(void)
173 size_t multiplex_size
=
174 sizeof(struct op_msr
) * model
->num_virt_counters
;
180 for_each_possible_cpu(i
) {
181 per_cpu(cpu_msrs
, i
).multiplex
=
182 kzalloc(multiplex_size
, GFP_KERNEL
);
183 if (!per_cpu(cpu_msrs
, i
).multiplex
)
190 static void nmi_cpu_setup_mux(int cpu
, struct op_msrs
const * const msrs
)
193 struct op_msr
*multiplex
= msrs
->multiplex
;
198 for (i
= 0; i
< model
->num_virt_counters
; ++i
) {
199 if (counter_config
[i
].enabled
) {
200 multiplex
[i
].saved
= -(u64
)counter_config
[i
].count
;
202 multiplex
[i
].saved
= 0;
206 per_cpu(switch_index
, cpu
) = 0;
209 static void nmi_cpu_save_mpx_registers(struct op_msrs
*msrs
)
211 struct op_msr
*counters
= msrs
->counters
;
212 struct op_msr
*multiplex
= msrs
->multiplex
;
215 for (i
= 0; i
< model
->num_counters
; ++i
) {
216 int virt
= op_x86_phys_to_virt(i
);
217 if (counters
[i
].addr
)
218 rdmsrl(counters
[i
].addr
, multiplex
[virt
].saved
);
222 static void nmi_cpu_restore_mpx_registers(struct op_msrs
*msrs
)
224 struct op_msr
*counters
= msrs
->counters
;
225 struct op_msr
*multiplex
= msrs
->multiplex
;
228 for (i
= 0; i
< model
->num_counters
; ++i
) {
229 int virt
= op_x86_phys_to_virt(i
);
230 if (counters
[i
].addr
)
231 wrmsrl(counters
[i
].addr
, multiplex
[virt
].saved
);
235 static void nmi_cpu_switch(void *dummy
)
237 int cpu
= smp_processor_id();
238 int si
= per_cpu(switch_index
, cpu
);
239 struct op_msrs
*msrs
= &per_cpu(cpu_msrs
, cpu
);
242 nmi_cpu_save_mpx_registers(msrs
);
244 /* move to next set */
245 si
+= model
->num_counters
;
246 if ((si
>= model
->num_virt_counters
) || (counter_config
[si
].count
== 0))
247 per_cpu(switch_index
, cpu
) = 0;
249 per_cpu(switch_index
, cpu
) = si
;
251 model
->switch_ctrl(model
, msrs
);
252 nmi_cpu_restore_mpx_registers(msrs
);
259 * Quick check to see if multiplexing is necessary.
260 * The check should be sufficient since counters are used
263 static int nmi_multiplex_on(void)
265 return counter_config
[model
->num_counters
].count
? 0 : -EINVAL
;
268 static int nmi_switch_event(void)
271 return -ENOSYS
; /* not implemented */
272 if (nmi_multiplex_on() < 0)
273 return -EINVAL
; /* not necessary */
277 on_each_cpu(nmi_cpu_switch
, NULL
, 1);
283 static inline void mux_init(struct oprofile_operations
*ops
)
286 ops
->switch_events
= nmi_switch_event
;
289 static void mux_clone(int cpu
)
294 memcpy(per_cpu(cpu_msrs
, cpu
).multiplex
,
295 per_cpu(cpu_msrs
, 0).multiplex
,
296 sizeof(struct op_msr
) * model
->num_virt_counters
);
301 inline int op_x86_phys_to_virt(int phys
) { return phys
; }
302 inline int op_x86_virt_to_phys(int virt
) { return virt
; }
303 static inline void nmi_shutdown_mux(void) { }
304 static inline int nmi_setup_mux(void) { return 1; }
306 nmi_cpu_setup_mux(int cpu
, struct op_msrs
const * const msrs
) { }
307 static inline void mux_init(struct oprofile_operations
*ops
) { }
308 static void mux_clone(int cpu
) { }
312 static void free_msrs(void)
315 for_each_possible_cpu(i
) {
316 kfree(per_cpu(cpu_msrs
, i
).counters
);
317 per_cpu(cpu_msrs
, i
).counters
= NULL
;
318 kfree(per_cpu(cpu_msrs
, i
).controls
);
319 per_cpu(cpu_msrs
, i
).controls
= NULL
;
324 static int allocate_msrs(void)
326 size_t controls_size
= sizeof(struct op_msr
) * model
->num_controls
;
327 size_t counters_size
= sizeof(struct op_msr
) * model
->num_counters
;
330 for_each_possible_cpu(i
) {
331 per_cpu(cpu_msrs
, i
).counters
= kzalloc(counters_size
,
333 if (!per_cpu(cpu_msrs
, i
).counters
)
335 per_cpu(cpu_msrs
, i
).controls
= kzalloc(controls_size
,
337 if (!per_cpu(cpu_msrs
, i
).controls
)
341 if (!nmi_setup_mux())
351 static void nmi_cpu_setup(void *dummy
)
353 int cpu
= smp_processor_id();
354 struct op_msrs
*msrs
= &per_cpu(cpu_msrs
, cpu
);
355 nmi_cpu_save_registers(msrs
);
356 spin_lock(&oprofilefs_lock
);
357 model
->setup_ctrs(model
, msrs
);
358 nmi_cpu_setup_mux(cpu
, msrs
);
359 spin_unlock(&oprofilefs_lock
);
360 per_cpu(saved_lvtpc
, cpu
) = apic_read(APIC_LVTPC
);
361 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
364 static struct notifier_block profile_exceptions_nb
= {
365 .notifier_call
= profile_exceptions_notify
,
367 .priority
= NMI_LOCAL_LOW_PRIOR
,
370 static void nmi_cpu_restore_registers(struct op_msrs
*msrs
)
372 struct op_msr
*counters
= msrs
->counters
;
373 struct op_msr
*controls
= msrs
->controls
;
376 for (i
= 0; i
< model
->num_controls
; ++i
) {
377 if (controls
[i
].addr
)
378 wrmsrl(controls
[i
].addr
, controls
[i
].saved
);
381 for (i
= 0; i
< model
->num_counters
; ++i
) {
382 if (counters
[i
].addr
)
383 wrmsrl(counters
[i
].addr
, counters
[i
].saved
);
387 static void nmi_cpu_shutdown(void *dummy
)
390 int cpu
= smp_processor_id();
391 struct op_msrs
*msrs
= &per_cpu(cpu_msrs
, cpu
);
393 /* restoring APIC_LVTPC can trigger an apic error because the delivery
394 * mode and vector nr combination can be illegal. That's by design: on
395 * power on apic lvt contain a zero vector nr which are legal only for
396 * NMI delivery mode. So inhibit apic err before restoring lvtpc
398 v
= apic_read(APIC_LVTERR
);
399 apic_write(APIC_LVTERR
, v
| APIC_LVT_MASKED
);
400 apic_write(APIC_LVTPC
, per_cpu(saved_lvtpc
, cpu
));
401 apic_write(APIC_LVTERR
, v
);
402 nmi_cpu_restore_registers(msrs
);
407 static void nmi_cpu_up(void *dummy
)
410 nmi_cpu_setup(dummy
);
412 nmi_cpu_start(dummy
);
415 static void nmi_cpu_down(void *dummy
)
420 nmi_cpu_shutdown(dummy
);
423 static int nmi_create_files(struct super_block
*sb
, struct dentry
*root
)
427 for (i
= 0; i
< model
->num_virt_counters
; ++i
) {
431 /* quick little hack to _not_ expose a counter if it is not
432 * available for use. This should protect userspace app.
433 * NOTE: assumes 1:1 mapping here (that counters are organized
434 * sequentially in their struct assignment).
436 if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i
)))
439 snprintf(buf
, sizeof(buf
), "%d", i
);
440 dir
= oprofilefs_mkdir(sb
, root
, buf
);
441 oprofilefs_create_ulong(sb
, dir
, "enabled", &counter_config
[i
].enabled
);
442 oprofilefs_create_ulong(sb
, dir
, "event", &counter_config
[i
].event
);
443 oprofilefs_create_ulong(sb
, dir
, "count", &counter_config
[i
].count
);
444 oprofilefs_create_ulong(sb
, dir
, "unit_mask", &counter_config
[i
].unit_mask
);
445 oprofilefs_create_ulong(sb
, dir
, "kernel", &counter_config
[i
].kernel
);
446 oprofilefs_create_ulong(sb
, dir
, "user", &counter_config
[i
].user
);
447 oprofilefs_create_ulong(sb
, dir
, "extra", &counter_config
[i
].extra
);
453 static int oprofile_cpu_notifier(struct notifier_block
*b
, unsigned long action
,
456 int cpu
= (unsigned long)data
;
458 case CPU_DOWN_FAILED
:
460 smp_call_function_single(cpu
, nmi_cpu_up
, NULL
, 0);
462 case CPU_DOWN_PREPARE
:
463 smp_call_function_single(cpu
, nmi_cpu_down
, NULL
, 1);
469 static struct notifier_block oprofile_cpu_nb
= {
470 .notifier_call
= oprofile_cpu_notifier
473 static int nmi_setup(void)
478 if (!allocate_msrs())
481 /* We need to serialize save and setup for HT because the subset
482 * of msrs are distinct for save and setup operations
485 /* Assume saved/restored counters are the same on all CPUs */
486 err
= model
->fill_in_addresses(&per_cpu(cpu_msrs
, 0));
490 for_each_possible_cpu(cpu
) {
494 memcpy(per_cpu(cpu_msrs
, cpu
).counters
,
495 per_cpu(cpu_msrs
, 0).counters
,
496 sizeof(struct op_msr
) * model
->num_counters
);
498 memcpy(per_cpu(cpu_msrs
, cpu
).controls
,
499 per_cpu(cpu_msrs
, 0).controls
,
500 sizeof(struct op_msr
) * model
->num_controls
);
508 err
= register_die_notifier(&profile_exceptions_nb
);
513 register_cpu_notifier(&oprofile_cpu_nb
);
514 on_each_cpu(nmi_cpu_setup
, NULL
, 1);
524 static void nmi_shutdown(void)
526 struct op_msrs
*msrs
;
529 unregister_cpu_notifier(&oprofile_cpu_nb
);
530 on_each_cpu(nmi_cpu_shutdown
, NULL
, 1);
535 unregister_die_notifier(&profile_exceptions_nb
);
536 msrs
= &get_cpu_var(cpu_msrs
);
537 model
->shutdown(msrs
);
539 put_cpu_var(cpu_msrs
);
544 static int nmi_suspend(void)
546 /* Only one CPU left, just stop that one */
547 if (nmi_enabled
== 1)
552 static void nmi_resume(void)
554 if (nmi_enabled
== 1)
558 static struct syscore_ops oprofile_syscore_ops
= {
559 .resume
= nmi_resume
,
560 .suspend
= nmi_suspend
,
563 static void __init
init_suspend_resume(void)
565 register_syscore_ops(&oprofile_syscore_ops
);
568 static void exit_suspend_resume(void)
570 unregister_syscore_ops(&oprofile_syscore_ops
);
575 static inline void init_suspend_resume(void) { }
576 static inline void exit_suspend_resume(void) { }
578 #endif /* CONFIG_PM */
580 static int __init
p4_init(char **cpu_type
)
582 __u8 cpu_model
= boot_cpu_data
.x86_model
;
584 if (cpu_model
> 6 || cpu_model
== 5)
588 *cpu_type
= "i386/p4";
592 switch (smp_num_siblings
) {
594 *cpu_type
= "i386/p4";
599 *cpu_type
= "i386/p4-ht";
600 model
= &op_p4_ht2_spec
;
605 printk(KERN_INFO
"oprofile: P4 HyperThreading detected with > 2 threads\n");
606 printk(KERN_INFO
"oprofile: Reverting to timer mode.\n");
610 static int force_arch_perfmon
;
611 static int force_cpu_type(const char *str
, struct kernel_param
*kp
)
613 if (!strcmp(str
, "arch_perfmon")) {
614 force_arch_perfmon
= 1;
615 printk(KERN_INFO
"oprofile: forcing architectural perfmon\n");
620 module_param_call(cpu_type
, force_cpu_type
, NULL
, NULL
, 0);
622 static int __init
ppro_init(char **cpu_type
)
624 __u8 cpu_model
= boot_cpu_data
.x86_model
;
625 struct op_x86_model_spec
*spec
= &op_ppro_spec
; /* default */
627 if (force_arch_perfmon
&& cpu_has_arch_perfmon
)
631 * Documentation on identifying Intel processors by CPU family
632 * and model can be found in the Intel Software Developer's
635 * http://www.intel.com/products/processor/manuals/
637 * As of May 2010 the documentation for this was in the:
638 * "Intel 64 and IA-32 Architectures Software Developer's
639 * Manual Volume 3B: System Programming Guide", "Table B-1
640 * CPUID Signature Values of DisplayFamily_DisplayModel".
644 *cpu_type
= "i386/ppro";
647 *cpu_type
= "i386/pii";
651 *cpu_type
= "i386/piii";
655 *cpu_type
= "i386/p6_mobile";
658 *cpu_type
= "i386/core";
664 *cpu_type
= "i386/core_2";
669 spec
= &op_arch_perfmon_spec
;
670 *cpu_type
= "i386/core_i7";
673 *cpu_type
= "i386/atom";
684 int __init
op_nmi_init(struct oprofile_operations
*ops
)
686 __u8 vendor
= boot_cpu_data
.x86_vendor
;
687 __u8 family
= boot_cpu_data
.x86
;
688 char *cpu_type
= NULL
;
696 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
700 cpu_type
= "i386/athlon";
704 * Actually it could be i386/hammer too, but
705 * give user space an consistent name.
707 cpu_type
= "x86-64/hammer";
710 cpu_type
= "x86-64/family10";
713 cpu_type
= "x86-64/family11h";
716 cpu_type
= "x86-64/family12h";
719 cpu_type
= "x86-64/family14h";
722 cpu_type
= "x86-64/family15h";
727 model
= &op_amd_spec
;
730 case X86_VENDOR_INTEL
:
737 /* A P6-class processor */
739 ppro_init(&cpu_type
);
749 if (!cpu_has_arch_perfmon
)
752 /* use arch perfmon as fallback */
753 cpu_type
= "i386/arch_perfmon";
754 model
= &op_arch_perfmon_spec
;
761 /* default values, can be overwritten by model */
762 ops
->create_files
= nmi_create_files
;
763 ops
->setup
= nmi_setup
;
764 ops
->shutdown
= nmi_shutdown
;
765 ops
->start
= nmi_start
;
766 ops
->stop
= nmi_stop
;
767 ops
->cpu_type
= cpu_type
;
770 ret
= model
->init(ops
);
774 if (!model
->num_virt_counters
)
775 model
->num_virt_counters
= model
->num_counters
;
779 init_suspend_resume();
781 printk(KERN_INFO
"oprofile: using NMI interrupt.\n");
785 void op_nmi_exit(void)
787 exit_suspend_resume();