cxgb4/l2t: Mark expected switch fall-through
[linux/fpc-iii.git] / arch / mips / oprofile / op_model_mipsxx.c
blob7c04b17f4a488aa8e50186be84dd0fcc50125ba2
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 2004, 05, 06 by Ralf Baechle
7 * Copyright (C) 2005 by MIPS Technologies, Inc.
8 */
9 #include <linux/cpumask.h>
10 #include <linux/oprofile.h>
11 #include <linux/interrupt.h>
12 #include <linux/smp.h>
13 #include <asm/irq_regs.h>
14 #include <asm/time.h>
16 #include "op_impl.h"
18 #define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \
19 MIPS_PERFCTRL_EVENT)
20 #define M_PERFCTL_VPEID(vpe) ((vpe) << MIPS_PERFCTRL_VPEID_S)
22 #define M_COUNTER_OVERFLOW (1UL << 31)
24 static int (*save_perf_irq)(void);
25 static int perfcount_irq;
28 * XLR has only one set of counters per core. Designate the
29 * first hardware thread in the core for setup and init.
30 * Skip CPUs with non-zero hardware thread id (4 hwt per core)
32 #if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
33 #define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0)
34 #else
35 #define oprofile_skip_cpu(c) 0
36 #endif
38 #ifdef CONFIG_MIPS_MT_SMP
39 #define WHAT (MIPS_PERFCTRL_MT_EN_VPE | \
40 M_PERFCTL_VPEID(cpu_vpe_id(&current_cpu_data)))
41 #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
42 0 : cpu_vpe_id(&current_cpu_data))
45 * The number of bits to shift to convert between counters per core and
46 * counters per VPE. There is no reasonable interface atm to obtain the
47 * number of VPEs used by Linux and in the 34K this number is fixed to two
48 * anyways so we hardcore a few things here for the moment. The way it's
49 * done here will ensure that oprofile VSMP kernel will run right on a lesser
50 * core like a 24K also or with maxcpus=1.
52 static inline unsigned int vpe_shift(void)
54 if (num_possible_cpus() > 1)
55 return 1;
57 return 0;
60 #else
62 #define WHAT 0
63 #define vpe_id() 0
65 static inline unsigned int vpe_shift(void)
67 return 0;
70 #endif
72 static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
74 return counters >> vpe_shift();
77 static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
79 return counters << vpe_shift();
82 #define __define_perf_accessors(r, n, np) \
84 static inline unsigned int r_c0_ ## r ## n(void) \
85 { \
86 unsigned int cpu = vpe_id(); \
88 switch (cpu) { \
89 case 0: \
90 return read_c0_ ## r ## n(); \
91 case 1: \
92 return read_c0_ ## r ## np(); \
93 default: \
94 BUG(); \
95 } \
96 return 0; \
97 } \
99 static inline void w_c0_ ## r ## n(unsigned int value) \
101 unsigned int cpu = vpe_id(); \
103 switch (cpu) { \
104 case 0: \
105 write_c0_ ## r ## n(value); \
106 return; \
107 case 1: \
108 write_c0_ ## r ## np(value); \
109 return; \
110 default: \
111 BUG(); \
113 return; \
116 __define_perf_accessors(perfcntr, 0, 2)
117 __define_perf_accessors(perfcntr, 1, 3)
118 __define_perf_accessors(perfcntr, 2, 0)
119 __define_perf_accessors(perfcntr, 3, 1)
121 __define_perf_accessors(perfctrl, 0, 2)
122 __define_perf_accessors(perfctrl, 1, 3)
123 __define_perf_accessors(perfctrl, 2, 0)
124 __define_perf_accessors(perfctrl, 3, 1)
126 struct op_mips_model op_model_mipsxx_ops;
128 static struct mipsxx_register_config {
129 unsigned int control[4];
130 unsigned int counter[4];
131 } reg;
133 /* Compute all of the registers in preparation for enabling profiling. */
135 static void mipsxx_reg_setup(struct op_counter_config *ctr)
137 unsigned int counters = op_model_mipsxx_ops.num_counters;
138 int i;
140 /* Compute the performance counter control word. */
141 for (i = 0; i < counters; i++) {
142 reg.control[i] = 0;
143 reg.counter[i] = 0;
145 if (!ctr[i].enabled)
146 continue;
148 reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
149 MIPS_PERFCTRL_IE;
150 if (ctr[i].kernel)
151 reg.control[i] |= MIPS_PERFCTRL_K;
152 if (ctr[i].user)
153 reg.control[i] |= MIPS_PERFCTRL_U;
154 if (ctr[i].exl)
155 reg.control[i] |= MIPS_PERFCTRL_EXL;
156 if (boot_cpu_type() == CPU_XLR)
157 reg.control[i] |= XLR_PERFCTRL_ALLTHREADS;
158 reg.counter[i] = 0x80000000 - ctr[i].count;
162 /* Program all of the registers in preparation for enabling profiling. */
164 static void mipsxx_cpu_setup(void *args)
166 unsigned int counters = op_model_mipsxx_ops.num_counters;
168 if (oprofile_skip_cpu(smp_processor_id()))
169 return;
171 switch (counters) {
172 case 4:
173 w_c0_perfctrl3(0);
174 w_c0_perfcntr3(reg.counter[3]);
175 case 3:
176 w_c0_perfctrl2(0);
177 w_c0_perfcntr2(reg.counter[2]);
178 case 2:
179 w_c0_perfctrl1(0);
180 w_c0_perfcntr1(reg.counter[1]);
181 case 1:
182 w_c0_perfctrl0(0);
183 w_c0_perfcntr0(reg.counter[0]);
187 /* Start all counters on current CPU */
188 static void mipsxx_cpu_start(void *args)
190 unsigned int counters = op_model_mipsxx_ops.num_counters;
192 if (oprofile_skip_cpu(smp_processor_id()))
193 return;
195 switch (counters) {
196 case 4:
197 w_c0_perfctrl3(WHAT | reg.control[3]);
198 case 3:
199 w_c0_perfctrl2(WHAT | reg.control[2]);
200 case 2:
201 w_c0_perfctrl1(WHAT | reg.control[1]);
202 case 1:
203 w_c0_perfctrl0(WHAT | reg.control[0]);
207 /* Stop all counters on current CPU */
208 static void mipsxx_cpu_stop(void *args)
210 unsigned int counters = op_model_mipsxx_ops.num_counters;
212 if (oprofile_skip_cpu(smp_processor_id()))
213 return;
215 switch (counters) {
216 case 4:
217 w_c0_perfctrl3(0);
218 case 3:
219 w_c0_perfctrl2(0);
220 case 2:
221 w_c0_perfctrl1(0);
222 case 1:
223 w_c0_perfctrl0(0);
227 static int mipsxx_perfcount_handler(void)
229 unsigned int counters = op_model_mipsxx_ops.num_counters;
230 unsigned int control;
231 unsigned int counter;
232 int handled = IRQ_NONE;
234 if (cpu_has_mips_r2 && !(read_c0_cause() & CAUSEF_PCI))
235 return handled;
237 switch (counters) {
238 #define HANDLE_COUNTER(n) \
239 case n + 1: \
240 control = r_c0_perfctrl ## n(); \
241 counter = r_c0_perfcntr ## n(); \
242 if ((control & MIPS_PERFCTRL_IE) && \
243 (counter & M_COUNTER_OVERFLOW)) { \
244 oprofile_add_sample(get_irq_regs(), n); \
245 w_c0_perfcntr ## n(reg.counter[n]); \
246 handled = IRQ_HANDLED; \
248 HANDLE_COUNTER(3)
249 HANDLE_COUNTER(2)
250 HANDLE_COUNTER(1)
251 HANDLE_COUNTER(0)
254 return handled;
257 static inline int __n_counters(void)
259 if (!cpu_has_perf)
260 return 0;
261 if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
262 return 1;
263 if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
264 return 2;
265 if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
266 return 3;
268 return 4;
271 static inline int n_counters(void)
273 int counters;
275 switch (current_cpu_type()) {
276 case CPU_R10000:
277 counters = 2;
278 break;
280 case CPU_R12000:
281 case CPU_R14000:
282 case CPU_R16000:
283 counters = 4;
284 break;
286 default:
287 counters = __n_counters();
290 return counters;
293 static void reset_counters(void *arg)
295 int counters = (int)(long)arg;
296 switch (counters) {
297 case 4:
298 w_c0_perfctrl3(0);
299 w_c0_perfcntr3(0);
300 case 3:
301 w_c0_perfctrl2(0);
302 w_c0_perfcntr2(0);
303 case 2:
304 w_c0_perfctrl1(0);
305 w_c0_perfcntr1(0);
306 case 1:
307 w_c0_perfctrl0(0);
308 w_c0_perfcntr0(0);
312 static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
314 return mipsxx_perfcount_handler();
317 static int __init mipsxx_init(void)
319 int counters;
321 counters = n_counters();
322 if (counters == 0) {
323 printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
324 return -ENODEV;
327 #ifdef CONFIG_MIPS_MT_SMP
328 if (!cpu_has_mipsmt_pertccounters)
329 counters = counters_total_to_per_cpu(counters);
330 #endif
331 on_each_cpu(reset_counters, (void *)(long)counters, 1);
333 op_model_mipsxx_ops.num_counters = counters;
334 switch (current_cpu_type()) {
335 case CPU_M14KC:
336 op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
337 break;
339 case CPU_M14KEC:
340 op_model_mipsxx_ops.cpu_type = "mips/M14KEc";
341 break;
343 case CPU_20KC:
344 op_model_mipsxx_ops.cpu_type = "mips/20K";
345 break;
347 case CPU_24K:
348 op_model_mipsxx_ops.cpu_type = "mips/24K";
349 break;
351 case CPU_25KF:
352 op_model_mipsxx_ops.cpu_type = "mips/25K";
353 break;
355 case CPU_1004K:
356 case CPU_34K:
357 op_model_mipsxx_ops.cpu_type = "mips/34K";
358 break;
360 case CPU_1074K:
361 case CPU_74K:
362 op_model_mipsxx_ops.cpu_type = "mips/74K";
363 break;
365 case CPU_INTERAPTIV:
366 op_model_mipsxx_ops.cpu_type = "mips/interAptiv";
367 break;
369 case CPU_PROAPTIV:
370 op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
371 break;
373 case CPU_P5600:
374 op_model_mipsxx_ops.cpu_type = "mips/P5600";
375 break;
377 case CPU_I6400:
378 op_model_mipsxx_ops.cpu_type = "mips/I6400";
379 break;
381 case CPU_M5150:
382 op_model_mipsxx_ops.cpu_type = "mips/M5150";
383 break;
385 case CPU_5KC:
386 op_model_mipsxx_ops.cpu_type = "mips/5K";
387 break;
389 case CPU_R10000:
390 if ((current_cpu_data.processor_id & 0xff) == 0x20)
391 op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
392 else
393 op_model_mipsxx_ops.cpu_type = "mips/r10000";
394 break;
396 case CPU_R12000:
397 case CPU_R14000:
398 op_model_mipsxx_ops.cpu_type = "mips/r12000";
399 break;
401 case CPU_R16000:
402 op_model_mipsxx_ops.cpu_type = "mips/r16000";
403 break;
405 case CPU_SB1:
406 case CPU_SB1A:
407 op_model_mipsxx_ops.cpu_type = "mips/sb1";
408 break;
410 case CPU_LOONGSON1:
411 op_model_mipsxx_ops.cpu_type = "mips/loongson1";
412 break;
414 case CPU_XLR:
415 op_model_mipsxx_ops.cpu_type = "mips/xlr";
416 break;
418 default:
419 printk(KERN_ERR "Profiling unsupported for this CPU\n");
421 return -ENODEV;
424 save_perf_irq = perf_irq;
425 perf_irq = mipsxx_perfcount_handler;
427 if (get_c0_perfcount_int)
428 perfcount_irq = get_c0_perfcount_int();
429 else if (cp0_perfcount_irq >= 0)
430 perfcount_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
431 else
432 perfcount_irq = -1;
434 if (perfcount_irq >= 0)
435 return request_irq(perfcount_irq, mipsxx_perfcount_int,
436 IRQF_PERCPU | IRQF_NOBALANCING |
437 IRQF_NO_THREAD | IRQF_NO_SUSPEND |
438 IRQF_SHARED,
439 "Perfcounter", save_perf_irq);
441 return 0;
444 static void mipsxx_exit(void)
446 int counters = op_model_mipsxx_ops.num_counters;
448 if (perfcount_irq >= 0)
449 free_irq(perfcount_irq, save_perf_irq);
451 counters = counters_per_cpu_to_total(counters);
452 on_each_cpu(reset_counters, (void *)(long)counters, 1);
454 perf_irq = save_perf_irq;
457 struct op_mips_model op_model_mipsxx_ops = {
458 .reg_setup = mipsxx_reg_setup,
459 .cpu_setup = mipsxx_cpu_setup,
460 .init = mipsxx_init,
461 .exit = mipsxx_exit,
462 .cpu_start = mipsxx_cpu_start,
463 .cpu_stop = mipsxx_cpu_stop,