efivars: Refactor sanity checking code into separate function
[linux/fpc-iii.git] / arch / powerpc / oprofile / op_model_7450.c
blobd29b6e4e5e721f1509417a0eef829dcaf58dd80b
1 /*
2 * arch/powerpc/oprofile/op_model_7450.c
4 * Freescale 745x/744x oprofile support, based on fsl_booke support
5 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
7 * Copyright (c) 2004 Freescale Semiconductor, Inc
9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <linux/oprofile.h>
19 #include <linux/smp.h>
20 #include <asm/ptrace.h>
21 #include <asm/processor.h>
22 #include <asm/cputable.h>
23 #include <asm/page.h>
24 #include <asm/pmc.h>
25 #include <asm/oprofile_impl.h>
27 static unsigned long reset_value[OP_MAX_COUNTER];
29 static int oprofile_running;
30 static u32 mmcr0_val, mmcr1_val, mmcr2_val, num_pmcs;
32 #define MMCR0_PMC1_SHIFT 6
33 #define MMCR0_PMC2_SHIFT 0
34 #define MMCR1_PMC3_SHIFT 27
35 #define MMCR1_PMC4_SHIFT 22
36 #define MMCR1_PMC5_SHIFT 17
37 #define MMCR1_PMC6_SHIFT 11
39 #define mmcr0_event1(event) \
40 ((event << MMCR0_PMC1_SHIFT) & MMCR0_PMC1SEL)
41 #define mmcr0_event2(event) \
42 ((event << MMCR0_PMC2_SHIFT) & MMCR0_PMC2SEL)
44 #define mmcr1_event3(event) \
45 ((event << MMCR1_PMC3_SHIFT) & MMCR1_PMC3SEL)
46 #define mmcr1_event4(event) \
47 ((event << MMCR1_PMC4_SHIFT) & MMCR1_PMC4SEL)
48 #define mmcr1_event5(event) \
49 ((event << MMCR1_PMC5_SHIFT) & MMCR1_PMC5SEL)
50 #define mmcr1_event6(event) \
51 ((event << MMCR1_PMC6_SHIFT) & MMCR1_PMC6SEL)
53 #define MMCR0_INIT (MMCR0_FC | MMCR0_FCS | MMCR0_FCP | MMCR0_FCM1 | MMCR0_FCM0)
55 /* Unfreezes the counters on this CPU, enables the interrupt,
56 * enables the counters to trigger the interrupt, and sets the
57 * counters to only count when the mark bit is not set.
59 static void pmc_start_ctrs(void)
61 u32 mmcr0 = mfspr(SPRN_MMCR0);
63 mmcr0 &= ~(MMCR0_FC | MMCR0_FCM0);
64 mmcr0 |= (MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
66 mtspr(SPRN_MMCR0, mmcr0);
69 /* Disables the counters on this CPU, and freezes them */
70 static void pmc_stop_ctrs(void)
72 u32 mmcr0 = mfspr(SPRN_MMCR0);
74 mmcr0 |= MMCR0_FC;
75 mmcr0 &= ~(MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
77 mtspr(SPRN_MMCR0, mmcr0);
80 /* Configures the counters on this CPU based on the global
81 * settings */
82 static int fsl7450_cpu_setup(struct op_counter_config *ctr)
84 /* freeze all counters */
85 pmc_stop_ctrs();
87 mtspr(SPRN_MMCR0, mmcr0_val);
88 mtspr(SPRN_MMCR1, mmcr1_val);
89 if (num_pmcs > 4)
90 mtspr(SPRN_MMCR2, mmcr2_val);
92 return 0;
95 /* Configures the global settings for the countes on all CPUs. */
96 static int fsl7450_reg_setup(struct op_counter_config *ctr,
97 struct op_system_config *sys,
98 int num_ctrs)
100 int i;
102 num_pmcs = num_ctrs;
103 /* Our counters count up, and "count" refers to
104 * how much before the next interrupt, and we interrupt
105 * on overflow. So we calculate the starting value
106 * which will give us "count" until overflow.
107 * Then we set the events on the enabled counters */
108 for (i = 0; i < num_ctrs; ++i)
109 reset_value[i] = 0x80000000UL - ctr[i].count;
111 /* Set events for Counters 1 & 2 */
112 mmcr0_val = MMCR0_INIT | mmcr0_event1(ctr[0].event)
113 | mmcr0_event2(ctr[1].event);
115 /* Setup user/kernel bits */
116 if (sys->enable_kernel)
117 mmcr0_val &= ~(MMCR0_FCS);
119 if (sys->enable_user)
120 mmcr0_val &= ~(MMCR0_FCP);
122 /* Set events for Counters 3-6 */
123 mmcr1_val = mmcr1_event3(ctr[2].event)
124 | mmcr1_event4(ctr[3].event);
125 if (num_ctrs > 4)
126 mmcr1_val |= mmcr1_event5(ctr[4].event)
127 | mmcr1_event6(ctr[5].event);
129 mmcr2_val = 0;
131 return 0;
134 /* Sets the counters on this CPU to the chosen values, and starts them */
135 static int fsl7450_start(struct op_counter_config *ctr)
137 int i;
139 mtmsr(mfmsr() | MSR_PMM);
141 for (i = 0; i < num_pmcs; ++i) {
142 if (ctr[i].enabled)
143 classic_ctr_write(i, reset_value[i]);
144 else
145 classic_ctr_write(i, 0);
148 /* Clear the freeze bit, and enable the interrupt.
149 * The counters won't actually start until the rfi clears
150 * the PMM bit */
151 pmc_start_ctrs();
153 oprofile_running = 1;
155 return 0;
158 /* Stop the counters on this CPU */
159 static void fsl7450_stop(void)
161 /* freeze counters */
162 pmc_stop_ctrs();
164 oprofile_running = 0;
166 mb();
170 /* Handle the interrupt on this CPU, and log a sample for each
171 * event that triggered the interrupt */
172 static void fsl7450_handle_interrupt(struct pt_regs *regs,
173 struct op_counter_config *ctr)
175 unsigned long pc;
176 int is_kernel;
177 int val;
178 int i;
180 /* set the PMM bit (see comment below) */
181 mtmsr(mfmsr() | MSR_PMM);
183 pc = mfspr(SPRN_SIAR);
184 is_kernel = is_kernel_addr(pc);
186 for (i = 0; i < num_pmcs; ++i) {
187 val = classic_ctr_read(i);
188 if (val < 0) {
189 if (oprofile_running && ctr[i].enabled) {
190 oprofile_add_ext_sample(pc, regs, i, is_kernel);
191 classic_ctr_write(i, reset_value[i]);
192 } else {
193 classic_ctr_write(i, 0);
198 /* The freeze bit was set by the interrupt. */
199 /* Clear the freeze bit, and reenable the interrupt.
200 * The counters won't actually start until the rfi clears
201 * the PM/M bit */
202 pmc_start_ctrs();
205 struct op_powerpc_model op_model_7450= {
206 .reg_setup = fsl7450_reg_setup,
207 .cpu_setup = fsl7450_cpu_setup,
208 .start = fsl7450_start,
209 .stop = fsl7450_stop,
210 .handle_interrupt = fsl7450_handle_interrupt,