Adding support for MOXA ART SoC. Testing port of linux-2.6.32.60-moxart.
[linux-3.6.7-moxart.git] / arch / sparc / kernel / pcr.c
blob0ce0dd2332aac60802a0cce15366e2b0158c2d36
1 /* pcr.c: Generic sparc64 performance counter infrastructure.
3 * Copyright (C) 2009 David S. Miller (davem@davemloft.net)
4 */
5 #include <linux/kernel.h>
6 #include <linux/export.h>
7 #include <linux/init.h>
8 #include <linux/irq.h>
10 #include <linux/irq_work.h>
11 #include <linux/ftrace.h>
13 #include <asm/pil.h>
14 #include <asm/pcr.h>
15 #include <asm/nmi.h>
16 #include <asm/spitfire.h>
17 #include <asm/perfctr.h>
19 /* This code is shared between various users of the performance
20 * counters. Users will be oprofile, pseudo-NMI watchdog, and the
21 * perf_event support layer.
24 #define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
25 #define PCR_N2_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | \
26 PCR_N2_TOE_OV1 | \
27 (2 << PCR_N2_SL1_SHIFT) | \
28 (0xff << PCR_N2_MASK1_SHIFT))
30 u64 pcr_enable;
31 unsigned int picl_shift;
33 /* Performance counter interrupts run unmasked at PIL level 15.
34 * Therefore we can't do things like wakeups and other work
35 * that expects IRQ disabling to be adhered to in locking etc.
37 * Therefore in such situations we defer the work by signalling
38 * a lower level cpu IRQ.
40 void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
42 struct pt_regs *old_regs;
44 clear_softint(1 << PIL_DEFERRED_PCR_WORK);
46 old_regs = set_irq_regs(regs);
47 irq_enter();
48 #ifdef CONFIG_IRQ_WORK
49 irq_work_run();
50 #endif
51 irq_exit();
52 set_irq_regs(old_regs);
55 void arch_irq_work_raise(void)
57 set_softint(1 << PIL_DEFERRED_PCR_WORK);
60 const struct pcr_ops *pcr_ops;
61 EXPORT_SYMBOL_GPL(pcr_ops);
63 static u64 direct_pcr_read(void)
65 u64 val;
67 read_pcr(val);
68 return val;
71 static void direct_pcr_write(u64 val)
73 write_pcr(val);
76 static const struct pcr_ops direct_pcr_ops = {
77 .read = direct_pcr_read,
78 .write = direct_pcr_write,
81 static void n2_pcr_write(u64 val)
83 unsigned long ret;
85 if (val & PCR_N2_HTRACE) {
86 ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
87 if (ret != HV_EOK)
88 write_pcr(val);
89 } else
90 write_pcr(val);
93 static const struct pcr_ops n2_pcr_ops = {
94 .read = direct_pcr_read,
95 .write = n2_pcr_write,
98 static unsigned long perf_hsvc_group;
99 static unsigned long perf_hsvc_major;
100 static unsigned long perf_hsvc_minor;
102 static int __init register_perf_hsvc(void)
104 if (tlb_type == hypervisor) {
105 switch (sun4v_chip_type) {
106 case SUN4V_CHIP_NIAGARA1:
107 perf_hsvc_group = HV_GRP_NIAG_PERF;
108 break;
110 case SUN4V_CHIP_NIAGARA2:
111 perf_hsvc_group = HV_GRP_N2_CPU;
112 break;
114 case SUN4V_CHIP_NIAGARA3:
115 perf_hsvc_group = HV_GRP_KT_CPU;
116 break;
118 default:
119 return -ENODEV;
123 perf_hsvc_major = 1;
124 perf_hsvc_minor = 0;
125 if (sun4v_hvapi_register(perf_hsvc_group,
126 perf_hsvc_major,
127 &perf_hsvc_minor)) {
128 printk("perfmon: Could not register hvapi.\n");
129 return -ENODEV;
132 return 0;
135 static void __init unregister_perf_hsvc(void)
137 if (tlb_type != hypervisor)
138 return;
139 sun4v_hvapi_unregister(perf_hsvc_group);
142 int __init pcr_arch_init(void)
144 int err = register_perf_hsvc();
146 if (err)
147 return err;
149 switch (tlb_type) {
150 case hypervisor:
151 pcr_ops = &n2_pcr_ops;
152 pcr_enable = PCR_N2_ENABLE;
153 picl_shift = 2;
154 break;
156 case cheetah:
157 case cheetah_plus:
158 pcr_ops = &direct_pcr_ops;
159 pcr_enable = PCR_SUN4U_ENABLE;
160 break;
162 case spitfire:
163 /* UltraSPARC-I/II and derivatives lack a profile
164 * counter overflow interrupt so we can't make use of
165 * their hardware currently.
167 /* fallthrough */
168 default:
169 err = -ENODEV;
170 goto out_unregister;
173 return nmi_init();
175 out_unregister:
176 unregister_perf_hsvc();
177 return err;