Merge tag 'sched-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / powerpc / oprofile / op_model_fsl_emb.c
blob25dc6813eceebea54ffa3fdfb3a0b8869bfcde15
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Freescale Embedded oprofile support, based on ppc64 oprofile support
4 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
6 * Copyright (c) 2004, 2010 Freescale Semiconductor, Inc
8 * Author: Andy Fleming
9 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
12 #include <linux/oprofile.h>
13 #include <linux/smp.h>
14 #include <asm/ptrace.h>
15 #include <asm/processor.h>
16 #include <asm/cputable.h>
17 #include <asm/reg_fsl_emb.h>
18 #include <asm/page.h>
19 #include <asm/pmc.h>
20 #include <asm/oprofile_impl.h>
22 static unsigned long reset_value[OP_MAX_COUNTER];
24 static int num_counters;
25 static int oprofile_running;
27 static inline u32 get_pmlca(int ctr)
29 u32 pmlca;
31 switch (ctr) {
32 case 0:
33 pmlca = mfpmr(PMRN_PMLCA0);
34 break;
35 case 1:
36 pmlca = mfpmr(PMRN_PMLCA1);
37 break;
38 case 2:
39 pmlca = mfpmr(PMRN_PMLCA2);
40 break;
41 case 3:
42 pmlca = mfpmr(PMRN_PMLCA3);
43 break;
44 case 4:
45 pmlca = mfpmr(PMRN_PMLCA4);
46 break;
47 case 5:
48 pmlca = mfpmr(PMRN_PMLCA5);
49 break;
50 default:
51 panic("Bad ctr number\n");
54 return pmlca;
57 static inline void set_pmlca(int ctr, u32 pmlca)
59 switch (ctr) {
60 case 0:
61 mtpmr(PMRN_PMLCA0, pmlca);
62 break;
63 case 1:
64 mtpmr(PMRN_PMLCA1, pmlca);
65 break;
66 case 2:
67 mtpmr(PMRN_PMLCA2, pmlca);
68 break;
69 case 3:
70 mtpmr(PMRN_PMLCA3, pmlca);
71 break;
72 case 4:
73 mtpmr(PMRN_PMLCA4, pmlca);
74 break;
75 case 5:
76 mtpmr(PMRN_PMLCA5, pmlca);
77 break;
78 default:
79 panic("Bad ctr number\n");
83 static inline unsigned int ctr_read(unsigned int i)
85 switch(i) {
86 case 0:
87 return mfpmr(PMRN_PMC0);
88 case 1:
89 return mfpmr(PMRN_PMC1);
90 case 2:
91 return mfpmr(PMRN_PMC2);
92 case 3:
93 return mfpmr(PMRN_PMC3);
94 case 4:
95 return mfpmr(PMRN_PMC4);
96 case 5:
97 return mfpmr(PMRN_PMC5);
98 default:
99 return 0;
103 static inline void ctr_write(unsigned int i, unsigned int val)
105 switch(i) {
106 case 0:
107 mtpmr(PMRN_PMC0, val);
108 break;
109 case 1:
110 mtpmr(PMRN_PMC1, val);
111 break;
112 case 2:
113 mtpmr(PMRN_PMC2, val);
114 break;
115 case 3:
116 mtpmr(PMRN_PMC3, val);
117 break;
118 case 4:
119 mtpmr(PMRN_PMC4, val);
120 break;
121 case 5:
122 mtpmr(PMRN_PMC5, val);
123 break;
124 default:
125 break;
130 static void init_pmc_stop(int ctr)
132 u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
133 PMLCA_FCM1 | PMLCA_FCM0);
134 u32 pmlcb = 0;
136 switch (ctr) {
137 case 0:
138 mtpmr(PMRN_PMLCA0, pmlca);
139 mtpmr(PMRN_PMLCB0, pmlcb);
140 break;
141 case 1:
142 mtpmr(PMRN_PMLCA1, pmlca);
143 mtpmr(PMRN_PMLCB1, pmlcb);
144 break;
145 case 2:
146 mtpmr(PMRN_PMLCA2, pmlca);
147 mtpmr(PMRN_PMLCB2, pmlcb);
148 break;
149 case 3:
150 mtpmr(PMRN_PMLCA3, pmlca);
151 mtpmr(PMRN_PMLCB3, pmlcb);
152 break;
153 case 4:
154 mtpmr(PMRN_PMLCA4, pmlca);
155 mtpmr(PMRN_PMLCB4, pmlcb);
156 break;
157 case 5:
158 mtpmr(PMRN_PMLCA5, pmlca);
159 mtpmr(PMRN_PMLCB5, pmlcb);
160 break;
161 default:
162 panic("Bad ctr number!\n");
166 static void set_pmc_event(int ctr, int event)
168 u32 pmlca;
170 pmlca = get_pmlca(ctr);
172 pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
173 ((event << PMLCA_EVENT_SHIFT) &
174 PMLCA_EVENT_MASK);
176 set_pmlca(ctr, pmlca);
179 static void set_pmc_user_kernel(int ctr, int user, int kernel)
181 u32 pmlca;
183 pmlca = get_pmlca(ctr);
185 if(user)
186 pmlca &= ~PMLCA_FCU;
187 else
188 pmlca |= PMLCA_FCU;
190 if(kernel)
191 pmlca &= ~PMLCA_FCS;
192 else
193 pmlca |= PMLCA_FCS;
195 set_pmlca(ctr, pmlca);
198 static void set_pmc_marked(int ctr, int mark0, int mark1)
200 u32 pmlca = get_pmlca(ctr);
202 if(mark0)
203 pmlca &= ~PMLCA_FCM0;
204 else
205 pmlca |= PMLCA_FCM0;
207 if(mark1)
208 pmlca &= ~PMLCA_FCM1;
209 else
210 pmlca |= PMLCA_FCM1;
212 set_pmlca(ctr, pmlca);
215 static void pmc_start_ctr(int ctr, int enable)
217 u32 pmlca = get_pmlca(ctr);
219 pmlca &= ~PMLCA_FC;
221 if (enable)
222 pmlca |= PMLCA_CE;
223 else
224 pmlca &= ~PMLCA_CE;
226 set_pmlca(ctr, pmlca);
229 static void pmc_start_ctrs(int enable)
231 u32 pmgc0 = mfpmr(PMRN_PMGC0);
233 pmgc0 &= ~PMGC0_FAC;
234 pmgc0 |= PMGC0_FCECE;
236 if (enable)
237 pmgc0 |= PMGC0_PMIE;
238 else
239 pmgc0 &= ~PMGC0_PMIE;
241 mtpmr(PMRN_PMGC0, pmgc0);
244 static void pmc_stop_ctrs(void)
246 u32 pmgc0 = mfpmr(PMRN_PMGC0);
248 pmgc0 |= PMGC0_FAC;
250 pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
252 mtpmr(PMRN_PMGC0, pmgc0);
255 static int fsl_emb_cpu_setup(struct op_counter_config *ctr)
257 int i;
259 /* freeze all counters */
260 pmc_stop_ctrs();
262 for (i = 0;i < num_counters;i++) {
263 init_pmc_stop(i);
265 set_pmc_event(i, ctr[i].event);
267 set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel);
270 return 0;
273 static int fsl_emb_reg_setup(struct op_counter_config *ctr,
274 struct op_system_config *sys,
275 int num_ctrs)
277 int i;
279 num_counters = num_ctrs;
281 /* Our counters count up, and "count" refers to
282 * how much before the next interrupt, and we interrupt
283 * on overflow. So we calculate the starting value
284 * which will give us "count" until overflow.
285 * Then we set the events on the enabled counters */
286 for (i = 0; i < num_counters; ++i)
287 reset_value[i] = 0x80000000UL - ctr[i].count;
289 return 0;
292 static int fsl_emb_start(struct op_counter_config *ctr)
294 int i;
296 mtmsr(mfmsr() | MSR_PMM);
298 for (i = 0; i < num_counters; ++i) {
299 if (ctr[i].enabled) {
300 ctr_write(i, reset_value[i]);
301 /* Set each enabled counter to only
302 * count when the Mark bit is *not* set */
303 set_pmc_marked(i, 1, 0);
304 pmc_start_ctr(i, 1);
305 } else {
306 ctr_write(i, 0);
308 /* Set the ctr to be stopped */
309 pmc_start_ctr(i, 0);
313 /* Clear the freeze bit, and enable the interrupt.
314 * The counters won't actually start until the rfi clears
315 * the PMM bit */
316 pmc_start_ctrs(1);
318 oprofile_running = 1;
320 pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(),
321 mfpmr(PMRN_PMGC0));
323 return 0;
326 static void fsl_emb_stop(void)
328 /* freeze counters */
329 pmc_stop_ctrs();
331 oprofile_running = 0;
333 pr_debug("stop on cpu %d, pmgc0 %x\n", smp_processor_id(),
334 mfpmr(PMRN_PMGC0));
336 mb();
340 static void fsl_emb_handle_interrupt(struct pt_regs *regs,
341 struct op_counter_config *ctr)
343 unsigned long pc;
344 int is_kernel;
345 int val;
346 int i;
348 pc = regs->nip;
349 is_kernel = is_kernel_addr(pc);
351 for (i = 0; i < num_counters; ++i) {
352 val = ctr_read(i);
353 if (val < 0) {
354 if (oprofile_running && ctr[i].enabled) {
355 oprofile_add_ext_sample(pc, regs, i, is_kernel);
356 ctr_write(i, reset_value[i]);
357 } else {
358 ctr_write(i, 0);
363 /* The freeze bit was set by the interrupt. */
364 /* Clear the freeze bit, and reenable the interrupt. The
365 * counters won't actually start until the rfi clears the PMM
366 * bit. The PMM bit should not be set until after the interrupt
367 * is cleared to avoid it getting lost in some hypervisor
368 * environments.
370 mtmsr(mfmsr() | MSR_PMM);
371 pmc_start_ctrs(1);
374 struct op_powerpc_model op_model_fsl_emb = {
375 .reg_setup = fsl_emb_reg_setup,
376 .cpu_setup = fsl_emb_cpu_setup,
377 .start = fsl_emb_start,
378 .stop = fsl_emb_stop,
379 .handle_interrupt = fsl_emb_handle_interrupt,