x86, efi: Set runtime_version to the EFI spec revision
[linux/fpc-iii.git] / arch / powerpc / oprofile / op_model_fsl_emb.c
blobccc1daa33aed9c9d5f3b32a82e369da970769e73
1 /*
2 * Freescale Embedded oprofile support, based on ppc64 oprofile support
3 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
5 * Copyright (c) 2004, 2010 Freescale Semiconductor, Inc
7 * Author: Andy Fleming
8 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <linux/oprofile.h>
17 #include <linux/init.h>
18 #include <linux/smp.h>
19 #include <asm/ptrace.h>
20 #include <asm/processor.h>
21 #include <asm/cputable.h>
22 #include <asm/reg_fsl_emb.h>
23 #include <asm/page.h>
24 #include <asm/pmc.h>
25 #include <asm/oprofile_impl.h>
27 static unsigned long reset_value[OP_MAX_COUNTER];
29 static int num_counters;
30 static int oprofile_running;
32 static inline u32 get_pmlca(int ctr)
34 u32 pmlca;
36 switch (ctr) {
37 case 0:
38 pmlca = mfpmr(PMRN_PMLCA0);
39 break;
40 case 1:
41 pmlca = mfpmr(PMRN_PMLCA1);
42 break;
43 case 2:
44 pmlca = mfpmr(PMRN_PMLCA2);
45 break;
46 case 3:
47 pmlca = mfpmr(PMRN_PMLCA3);
48 break;
49 default:
50 panic("Bad ctr number\n");
53 return pmlca;
56 static inline void set_pmlca(int ctr, u32 pmlca)
58 switch (ctr) {
59 case 0:
60 mtpmr(PMRN_PMLCA0, pmlca);
61 break;
62 case 1:
63 mtpmr(PMRN_PMLCA1, pmlca);
64 break;
65 case 2:
66 mtpmr(PMRN_PMLCA2, pmlca);
67 break;
68 case 3:
69 mtpmr(PMRN_PMLCA3, pmlca);
70 break;
71 default:
72 panic("Bad ctr number\n");
76 static inline unsigned int ctr_read(unsigned int i)
78 switch(i) {
79 case 0:
80 return mfpmr(PMRN_PMC0);
81 case 1:
82 return mfpmr(PMRN_PMC1);
83 case 2:
84 return mfpmr(PMRN_PMC2);
85 case 3:
86 return mfpmr(PMRN_PMC3);
87 default:
88 return 0;
92 static inline void ctr_write(unsigned int i, unsigned int val)
94 switch(i) {
95 case 0:
96 mtpmr(PMRN_PMC0, val);
97 break;
98 case 1:
99 mtpmr(PMRN_PMC1, val);
100 break;
101 case 2:
102 mtpmr(PMRN_PMC2, val);
103 break;
104 case 3:
105 mtpmr(PMRN_PMC3, val);
106 break;
107 default:
108 break;
113 static void init_pmc_stop(int ctr)
115 u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
116 PMLCA_FCM1 | PMLCA_FCM0);
117 u32 pmlcb = 0;
119 switch (ctr) {
120 case 0:
121 mtpmr(PMRN_PMLCA0, pmlca);
122 mtpmr(PMRN_PMLCB0, pmlcb);
123 break;
124 case 1:
125 mtpmr(PMRN_PMLCA1, pmlca);
126 mtpmr(PMRN_PMLCB1, pmlcb);
127 break;
128 case 2:
129 mtpmr(PMRN_PMLCA2, pmlca);
130 mtpmr(PMRN_PMLCB2, pmlcb);
131 break;
132 case 3:
133 mtpmr(PMRN_PMLCA3, pmlca);
134 mtpmr(PMRN_PMLCB3, pmlcb);
135 break;
136 default:
137 panic("Bad ctr number!\n");
141 static void set_pmc_event(int ctr, int event)
143 u32 pmlca;
145 pmlca = get_pmlca(ctr);
147 pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
148 ((event << PMLCA_EVENT_SHIFT) &
149 PMLCA_EVENT_MASK);
151 set_pmlca(ctr, pmlca);
154 static void set_pmc_user_kernel(int ctr, int user, int kernel)
156 u32 pmlca;
158 pmlca = get_pmlca(ctr);
160 if(user)
161 pmlca &= ~PMLCA_FCU;
162 else
163 pmlca |= PMLCA_FCU;
165 if(kernel)
166 pmlca &= ~PMLCA_FCS;
167 else
168 pmlca |= PMLCA_FCS;
170 set_pmlca(ctr, pmlca);
173 static void set_pmc_marked(int ctr, int mark0, int mark1)
175 u32 pmlca = get_pmlca(ctr);
177 if(mark0)
178 pmlca &= ~PMLCA_FCM0;
179 else
180 pmlca |= PMLCA_FCM0;
182 if(mark1)
183 pmlca &= ~PMLCA_FCM1;
184 else
185 pmlca |= PMLCA_FCM1;
187 set_pmlca(ctr, pmlca);
190 static void pmc_start_ctr(int ctr, int enable)
192 u32 pmlca = get_pmlca(ctr);
194 pmlca &= ~PMLCA_FC;
196 if (enable)
197 pmlca |= PMLCA_CE;
198 else
199 pmlca &= ~PMLCA_CE;
201 set_pmlca(ctr, pmlca);
204 static void pmc_start_ctrs(int enable)
206 u32 pmgc0 = mfpmr(PMRN_PMGC0);
208 pmgc0 &= ~PMGC0_FAC;
209 pmgc0 |= PMGC0_FCECE;
211 if (enable)
212 pmgc0 |= PMGC0_PMIE;
213 else
214 pmgc0 &= ~PMGC0_PMIE;
216 mtpmr(PMRN_PMGC0, pmgc0);
219 static void pmc_stop_ctrs(void)
221 u32 pmgc0 = mfpmr(PMRN_PMGC0);
223 pmgc0 |= PMGC0_FAC;
225 pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
227 mtpmr(PMRN_PMGC0, pmgc0);
230 static int fsl_emb_cpu_setup(struct op_counter_config *ctr)
232 int i;
234 /* freeze all counters */
235 pmc_stop_ctrs();
237 for (i = 0;i < num_counters;i++) {
238 init_pmc_stop(i);
240 set_pmc_event(i, ctr[i].event);
242 set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel);
245 return 0;
248 static int fsl_emb_reg_setup(struct op_counter_config *ctr,
249 struct op_system_config *sys,
250 int num_ctrs)
252 int i;
254 num_counters = num_ctrs;
256 /* Our counters count up, and "count" refers to
257 * how much before the next interrupt, and we interrupt
258 * on overflow. So we calculate the starting value
259 * which will give us "count" until overflow.
260 * Then we set the events on the enabled counters */
261 for (i = 0; i < num_counters; ++i)
262 reset_value[i] = 0x80000000UL - ctr[i].count;
264 return 0;
267 static int fsl_emb_start(struct op_counter_config *ctr)
269 int i;
271 mtmsr(mfmsr() | MSR_PMM);
273 for (i = 0; i < num_counters; ++i) {
274 if (ctr[i].enabled) {
275 ctr_write(i, reset_value[i]);
276 /* Set each enabled counter to only
277 * count when the Mark bit is *not* set */
278 set_pmc_marked(i, 1, 0);
279 pmc_start_ctr(i, 1);
280 } else {
281 ctr_write(i, 0);
283 /* Set the ctr to be stopped */
284 pmc_start_ctr(i, 0);
288 /* Clear the freeze bit, and enable the interrupt.
289 * The counters won't actually start until the rfi clears
290 * the PMM bit */
291 pmc_start_ctrs(1);
293 oprofile_running = 1;
295 pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(),
296 mfpmr(PMRN_PMGC0));
298 return 0;
301 static void fsl_emb_stop(void)
303 /* freeze counters */
304 pmc_stop_ctrs();
306 oprofile_running = 0;
308 pr_debug("stop on cpu %d, pmgc0 %x\n", smp_processor_id(),
309 mfpmr(PMRN_PMGC0));
311 mb();
315 static void fsl_emb_handle_interrupt(struct pt_regs *regs,
316 struct op_counter_config *ctr)
318 unsigned long pc;
319 int is_kernel;
320 int val;
321 int i;
323 pc = regs->nip;
324 is_kernel = is_kernel_addr(pc);
326 for (i = 0; i < num_counters; ++i) {
327 val = ctr_read(i);
328 if (val < 0) {
329 if (oprofile_running && ctr[i].enabled) {
330 oprofile_add_ext_sample(pc, regs, i, is_kernel);
331 ctr_write(i, reset_value[i]);
332 } else {
333 ctr_write(i, 0);
338 /* The freeze bit was set by the interrupt. */
339 /* Clear the freeze bit, and reenable the interrupt. The
340 * counters won't actually start until the rfi clears the PMM
341 * bit. The PMM bit should not be set until after the interrupt
342 * is cleared to avoid it getting lost in some hypervisor
343 * environments.
345 mtmsr(mfmsr() | MSR_PMM);
346 pmc_start_ctrs(1);
349 struct op_powerpc_model op_model_fsl_emb = {
350 .reg_setup = fsl_emb_reg_setup,
351 .cpu_setup = fsl_emb_cpu_setup,
352 .start = fsl_emb_start,
353 .stop = fsl_emb_stop,
354 .handle_interrupt = fsl_emb_handle_interrupt,