iommu/msm: Add iommu_group support
[linux/fpc-iii.git] / arch / sparc / kernel / pcr.c
blobeb978c77c76a78d401e5dce22b4f5bafa8ef948d
1 /* pcr.c: Generic sparc64 performance counter infrastructure.
3 * Copyright (C) 2009 David S. Miller (davem@davemloft.net)
4 */
5 #include <linux/kernel.h>
6 #include <linux/export.h>
7 #include <linux/init.h>
8 #include <linux/irq.h>
10 #include <linux/irq_work.h>
11 #include <linux/ftrace.h>
13 #include <asm/pil.h>
14 #include <asm/pcr.h>
15 #include <asm/nmi.h>
16 #include <asm/asi.h>
17 #include <asm/spitfire.h>
19 /* This code is shared between various users of the performance
20 * counters. Users will be oprofile, pseudo-NMI watchdog, and the
21 * perf_event support layer.
24 /* Performance counter interrupts run unmasked at PIL level 15.
25 * Therefore we can't do things like wakeups and other work
26 * that expects IRQ disabling to be adhered to in locking etc.
28 * Therefore in such situations we defer the work by signalling
29 * a lower level cpu IRQ.
31 void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
33 struct pt_regs *old_regs;
35 clear_softint(1 << PIL_DEFERRED_PCR_WORK);
37 old_regs = set_irq_regs(regs);
38 irq_enter();
39 #ifdef CONFIG_IRQ_WORK
40 irq_work_run();
41 #endif
42 irq_exit();
43 set_irq_regs(old_regs);
46 void arch_irq_work_raise(void)
48 set_softint(1 << PIL_DEFERRED_PCR_WORK);
51 const struct pcr_ops *pcr_ops;
52 EXPORT_SYMBOL_GPL(pcr_ops);
54 static u64 direct_pcr_read(unsigned long reg_num)
56 u64 val;
58 WARN_ON_ONCE(reg_num != 0);
59 __asm__ __volatile__("rd %%pcr, %0" : "=r" (val));
60 return val;
63 static void direct_pcr_write(unsigned long reg_num, u64 val)
65 WARN_ON_ONCE(reg_num != 0);
66 __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (val));
69 static u64 direct_pic_read(unsigned long reg_num)
71 u64 val;
73 WARN_ON_ONCE(reg_num != 0);
74 __asm__ __volatile__("rd %%pic, %0" : "=r" (val));
75 return val;
78 static void direct_pic_write(unsigned long reg_num, u64 val)
80 WARN_ON_ONCE(reg_num != 0);
82 /* Blackbird errata workaround. See commentary in
83 * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
84 * for more information.
86 __asm__ __volatile__("ba,pt %%xcc, 99f\n\t"
87 " nop\n\t"
88 ".align 64\n"
89 "99:wr %0, 0x0, %%pic\n\t"
90 "rd %%pic, %%g0" : : "r" (val));
93 static u64 direct_picl_value(unsigned int nmi_hz)
95 u32 delta = local_cpu_data().clock_tick / nmi_hz;
97 return ((u64)((0 - delta) & 0xffffffff)) << 32;
100 static const struct pcr_ops direct_pcr_ops = {
101 .read_pcr = direct_pcr_read,
102 .write_pcr = direct_pcr_write,
103 .read_pic = direct_pic_read,
104 .write_pic = direct_pic_write,
105 .nmi_picl_value = direct_picl_value,
106 .pcr_nmi_enable = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE),
107 .pcr_nmi_disable = PCR_PIC_PRIV,
110 static void n2_pcr_write(unsigned long reg_num, u64 val)
112 unsigned long ret;
114 WARN_ON_ONCE(reg_num != 0);
115 if (val & PCR_N2_HTRACE) {
116 ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
117 if (ret != HV_EOK)
118 direct_pcr_write(reg_num, val);
119 } else
120 direct_pcr_write(reg_num, val);
123 static u64 n2_picl_value(unsigned int nmi_hz)
125 u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2);
127 return ((u64)((0 - delta) & 0xffffffff)) << 32;
130 static const struct pcr_ops n2_pcr_ops = {
131 .read_pcr = direct_pcr_read,
132 .write_pcr = n2_pcr_write,
133 .read_pic = direct_pic_read,
134 .write_pic = direct_pic_write,
135 .nmi_picl_value = n2_picl_value,
136 .pcr_nmi_enable = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE |
137 PCR_N2_TOE_OV1 |
138 (2 << PCR_N2_SL1_SHIFT) |
139 (0xff << PCR_N2_MASK1_SHIFT)),
140 .pcr_nmi_disable = PCR_PIC_PRIV,
143 static u64 n4_pcr_read(unsigned long reg_num)
145 unsigned long val;
147 (void) sun4v_vt_get_perfreg(reg_num, &val);
149 return val;
152 static void n4_pcr_write(unsigned long reg_num, u64 val)
154 (void) sun4v_vt_set_perfreg(reg_num, val);
157 static u64 n4_pic_read(unsigned long reg_num)
159 unsigned long val;
161 __asm__ __volatile__("ldxa [%1] %2, %0"
162 : "=r" (val)
163 : "r" (reg_num * 0x8UL), "i" (ASI_PIC));
165 return val;
168 static void n4_pic_write(unsigned long reg_num, u64 val)
170 __asm__ __volatile__("stxa %0, [%1] %2"
171 : /* no outputs */
172 : "r" (val), "r" (reg_num * 0x8UL), "i" (ASI_PIC));
175 static u64 n4_picl_value(unsigned int nmi_hz)
177 u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2);
179 return ((u64)((0 - delta) & 0xffffffff));
182 static const struct pcr_ops n4_pcr_ops = {
183 .read_pcr = n4_pcr_read,
184 .write_pcr = n4_pcr_write,
185 .read_pic = n4_pic_read,
186 .write_pic = n4_pic_write,
187 .nmi_picl_value = n4_picl_value,
188 .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
189 PCR_N4_UTRACE | PCR_N4_TOE |
190 (26 << PCR_N4_SL_SHIFT)),
191 .pcr_nmi_disable = PCR_N4_PICNPT,
194 static u64 n5_pcr_read(unsigned long reg_num)
196 unsigned long val;
198 (void) sun4v_t5_get_perfreg(reg_num, &val);
200 return val;
203 static void n5_pcr_write(unsigned long reg_num, u64 val)
205 (void) sun4v_t5_set_perfreg(reg_num, val);
208 static const struct pcr_ops n5_pcr_ops = {
209 .read_pcr = n5_pcr_read,
210 .write_pcr = n5_pcr_write,
211 .read_pic = n4_pic_read,
212 .write_pic = n4_pic_write,
213 .nmi_picl_value = n4_picl_value,
214 .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
215 PCR_N4_UTRACE | PCR_N4_TOE |
216 (26 << PCR_N4_SL_SHIFT)),
217 .pcr_nmi_disable = PCR_N4_PICNPT,
220 static u64 m7_pcr_read(unsigned long reg_num)
222 unsigned long val;
224 (void) sun4v_m7_get_perfreg(reg_num, &val);
226 return val;
229 static void m7_pcr_write(unsigned long reg_num, u64 val)
231 (void) sun4v_m7_set_perfreg(reg_num, val);
234 static const struct pcr_ops m7_pcr_ops = {
235 .read_pcr = m7_pcr_read,
236 .write_pcr = m7_pcr_write,
237 .read_pic = n4_pic_read,
238 .write_pic = n4_pic_write,
239 .nmi_picl_value = n4_picl_value,
240 .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
241 PCR_N4_UTRACE | PCR_N4_TOE |
242 (26 << PCR_N4_SL_SHIFT)),
243 .pcr_nmi_disable = PCR_N4_PICNPT,
246 static unsigned long perf_hsvc_group;
247 static unsigned long perf_hsvc_major;
248 static unsigned long perf_hsvc_minor;
250 static int __init register_perf_hsvc(void)
252 unsigned long hverror;
254 if (tlb_type == hypervisor) {
255 switch (sun4v_chip_type) {
256 case SUN4V_CHIP_NIAGARA1:
257 perf_hsvc_group = HV_GRP_NIAG_PERF;
258 break;
260 case SUN4V_CHIP_NIAGARA2:
261 perf_hsvc_group = HV_GRP_N2_CPU;
262 break;
264 case SUN4V_CHIP_NIAGARA3:
265 perf_hsvc_group = HV_GRP_KT_CPU;
266 break;
268 case SUN4V_CHIP_NIAGARA4:
269 perf_hsvc_group = HV_GRP_VT_CPU;
270 break;
272 case SUN4V_CHIP_NIAGARA5:
273 perf_hsvc_group = HV_GRP_T5_CPU;
274 break;
276 case SUN4V_CHIP_SPARC_M7:
277 perf_hsvc_group = HV_GRP_M7_PERF;
278 break;
280 default:
281 return -ENODEV;
285 perf_hsvc_major = 1;
286 perf_hsvc_minor = 0;
287 hverror = sun4v_hvapi_register(perf_hsvc_group,
288 perf_hsvc_major,
289 &perf_hsvc_minor);
290 if (hverror) {
291 pr_err("perfmon: Could not register hvapi(0x%lx).\n",
292 hverror);
293 return -ENODEV;
296 return 0;
299 static void __init unregister_perf_hsvc(void)
301 if (tlb_type != hypervisor)
302 return;
303 sun4v_hvapi_unregister(perf_hsvc_group);
306 static int __init setup_sun4v_pcr_ops(void)
308 int ret = 0;
310 switch (sun4v_chip_type) {
311 case SUN4V_CHIP_NIAGARA1:
312 case SUN4V_CHIP_NIAGARA2:
313 case SUN4V_CHIP_NIAGARA3:
314 pcr_ops = &n2_pcr_ops;
315 break;
317 case SUN4V_CHIP_NIAGARA4:
318 pcr_ops = &n4_pcr_ops;
319 break;
321 case SUN4V_CHIP_NIAGARA5:
322 pcr_ops = &n5_pcr_ops;
323 break;
325 case SUN4V_CHIP_SPARC_M7:
326 pcr_ops = &m7_pcr_ops;
327 break;
329 default:
330 ret = -ENODEV;
331 break;
334 return ret;
337 int __init pcr_arch_init(void)
339 int err = register_perf_hsvc();
341 if (err)
342 return err;
344 switch (tlb_type) {
345 case hypervisor:
346 err = setup_sun4v_pcr_ops();
347 if (err)
348 goto out_unregister;
349 break;
351 case cheetah:
352 case cheetah_plus:
353 pcr_ops = &direct_pcr_ops;
354 break;
356 case spitfire:
357 /* UltraSPARC-I/II and derivatives lack a profile
358 * counter overflow interrupt so we can't make use of
359 * their hardware currently.
361 /* fallthrough */
362 default:
363 err = -ENODEV;
364 goto out_unregister;
367 return nmi_init();
369 out_unregister:
370 unregister_perf_hsvc();
371 return err;