2 * Freescale Embedded oprofile support, based on ppc64 oprofile support
3 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
5 * Copyright (c) 2004, 2010 Freescale Semiconductor, Inc
8 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <linux/oprofile.h>
17 #include <linux/init.h>
18 #include <linux/smp.h>
19 #include <asm/ptrace.h>
20 #include <asm/processor.h>
21 #include <asm/cputable.h>
22 #include <asm/reg_fsl_emb.h>
25 #include <asm/oprofile_impl.h>
27 static unsigned long reset_value
[OP_MAX_COUNTER
];
29 static int num_counters
;
30 static int oprofile_running
;
32 static inline u32
get_pmlca(int ctr
)
38 pmlca
= mfpmr(PMRN_PMLCA0
);
41 pmlca
= mfpmr(PMRN_PMLCA1
);
44 pmlca
= mfpmr(PMRN_PMLCA2
);
47 pmlca
= mfpmr(PMRN_PMLCA3
);
50 pmlca
= mfpmr(PMRN_PMLCA4
);
53 pmlca
= mfpmr(PMRN_PMLCA5
);
56 panic("Bad ctr number\n");
62 static inline void set_pmlca(int ctr
, u32 pmlca
)
66 mtpmr(PMRN_PMLCA0
, pmlca
);
69 mtpmr(PMRN_PMLCA1
, pmlca
);
72 mtpmr(PMRN_PMLCA2
, pmlca
);
75 mtpmr(PMRN_PMLCA3
, pmlca
);
78 mtpmr(PMRN_PMLCA4
, pmlca
);
81 mtpmr(PMRN_PMLCA5
, pmlca
);
84 panic("Bad ctr number\n");
88 static inline unsigned int ctr_read(unsigned int i
)
92 return mfpmr(PMRN_PMC0
);
94 return mfpmr(PMRN_PMC1
);
96 return mfpmr(PMRN_PMC2
);
98 return mfpmr(PMRN_PMC3
);
100 return mfpmr(PMRN_PMC4
);
102 return mfpmr(PMRN_PMC5
);
108 static inline void ctr_write(unsigned int i
, unsigned int val
)
112 mtpmr(PMRN_PMC0
, val
);
115 mtpmr(PMRN_PMC1
, val
);
118 mtpmr(PMRN_PMC2
, val
);
121 mtpmr(PMRN_PMC3
, val
);
124 mtpmr(PMRN_PMC4
, val
);
127 mtpmr(PMRN_PMC5
, val
);
135 static void init_pmc_stop(int ctr
)
137 u32 pmlca
= (PMLCA_FC
| PMLCA_FCS
| PMLCA_FCU
|
138 PMLCA_FCM1
| PMLCA_FCM0
);
143 mtpmr(PMRN_PMLCA0
, pmlca
);
144 mtpmr(PMRN_PMLCB0
, pmlcb
);
147 mtpmr(PMRN_PMLCA1
, pmlca
);
148 mtpmr(PMRN_PMLCB1
, pmlcb
);
151 mtpmr(PMRN_PMLCA2
, pmlca
);
152 mtpmr(PMRN_PMLCB2
, pmlcb
);
155 mtpmr(PMRN_PMLCA3
, pmlca
);
156 mtpmr(PMRN_PMLCB3
, pmlcb
);
159 mtpmr(PMRN_PMLCA4
, pmlca
);
160 mtpmr(PMRN_PMLCB4
, pmlcb
);
163 mtpmr(PMRN_PMLCA5
, pmlca
);
164 mtpmr(PMRN_PMLCB5
, pmlcb
);
167 panic("Bad ctr number!\n");
171 static void set_pmc_event(int ctr
, int event
)
175 pmlca
= get_pmlca(ctr
);
177 pmlca
= (pmlca
& ~PMLCA_EVENT_MASK
) |
178 ((event
<< PMLCA_EVENT_SHIFT
) &
181 set_pmlca(ctr
, pmlca
);
184 static void set_pmc_user_kernel(int ctr
, int user
, int kernel
)
188 pmlca
= get_pmlca(ctr
);
200 set_pmlca(ctr
, pmlca
);
203 static void set_pmc_marked(int ctr
, int mark0
, int mark1
)
205 u32 pmlca
= get_pmlca(ctr
);
208 pmlca
&= ~PMLCA_FCM0
;
213 pmlca
&= ~PMLCA_FCM1
;
217 set_pmlca(ctr
, pmlca
);
220 static void pmc_start_ctr(int ctr
, int enable
)
222 u32 pmlca
= get_pmlca(ctr
);
231 set_pmlca(ctr
, pmlca
);
234 static void pmc_start_ctrs(int enable
)
236 u32 pmgc0
= mfpmr(PMRN_PMGC0
);
239 pmgc0
|= PMGC0_FCECE
;
244 pmgc0
&= ~PMGC0_PMIE
;
246 mtpmr(PMRN_PMGC0
, pmgc0
);
249 static void pmc_stop_ctrs(void)
251 u32 pmgc0
= mfpmr(PMRN_PMGC0
);
255 pmgc0
&= ~(PMGC0_PMIE
| PMGC0_FCECE
);
257 mtpmr(PMRN_PMGC0
, pmgc0
);
260 static int fsl_emb_cpu_setup(struct op_counter_config
*ctr
)
264 /* freeze all counters */
267 for (i
= 0;i
< num_counters
;i
++) {
270 set_pmc_event(i
, ctr
[i
].event
);
272 set_pmc_user_kernel(i
, ctr
[i
].user
, ctr
[i
].kernel
);
278 static int fsl_emb_reg_setup(struct op_counter_config
*ctr
,
279 struct op_system_config
*sys
,
284 num_counters
= num_ctrs
;
286 /* Our counters count up, and "count" refers to
287 * how much before the next interrupt, and we interrupt
288 * on overflow. So we calculate the starting value
289 * which will give us "count" until overflow.
290 * Then we set the events on the enabled counters */
291 for (i
= 0; i
< num_counters
; ++i
)
292 reset_value
[i
] = 0x80000000UL
- ctr
[i
].count
;
297 static int fsl_emb_start(struct op_counter_config
*ctr
)
301 mtmsr(mfmsr() | MSR_PMM
);
303 for (i
= 0; i
< num_counters
; ++i
) {
304 if (ctr
[i
].enabled
) {
305 ctr_write(i
, reset_value
[i
]);
306 /* Set each enabled counter to only
307 * count when the Mark bit is *not* set */
308 set_pmc_marked(i
, 1, 0);
313 /* Set the ctr to be stopped */
318 /* Clear the freeze bit, and enable the interrupt.
319 * The counters won't actually start until the rfi clears
323 oprofile_running
= 1;
325 pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(),
331 static void fsl_emb_stop(void)
333 /* freeze counters */
336 oprofile_running
= 0;
338 pr_debug("stop on cpu %d, pmgc0 %x\n", smp_processor_id(),
345 static void fsl_emb_handle_interrupt(struct pt_regs
*regs
,
346 struct op_counter_config
*ctr
)
354 is_kernel
= is_kernel_addr(pc
);
356 for (i
= 0; i
< num_counters
; ++i
) {
359 if (oprofile_running
&& ctr
[i
].enabled
) {
360 oprofile_add_ext_sample(pc
, regs
, i
, is_kernel
);
361 ctr_write(i
, reset_value
[i
]);
368 /* The freeze bit was set by the interrupt. */
369 /* Clear the freeze bit, and reenable the interrupt. The
370 * counters won't actually start until the rfi clears the PMM
371 * bit. The PMM bit should not be set until after the interrupt
372 * is cleared to avoid it getting lost in some hypervisor
375 mtmsr(mfmsr() | MSR_PMM
);
379 struct op_powerpc_model op_model_fsl_emb
= {
380 .reg_setup
= fsl_emb_reg_setup
,
381 .cpu_setup
= fsl_emb_cpu_setup
,
382 .start
= fsl_emb_start
,
383 .stop
= fsl_emb_stop
,
384 .handle_interrupt
= fsl_emb_handle_interrupt
,