x86, efi: Set runtime_version to the EFI spec revision
[linux/fpc-iii.git] / arch / powerpc / oprofile / op_model_7450.c
blobff617246d128b7a9c5555c42f2b22f4bf9cfa000
1 /*
2 * arch/powerpc/oprofile/op_model_7450.c
4 * Freescale 745x/744x oprofile support, based on fsl_booke support
5 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
7 * Copyright (c) 2004 Freescale Semiconductor, Inc
9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <linux/oprofile.h>
19 #include <linux/init.h>
20 #include <linux/smp.h>
21 #include <asm/ptrace.h>
22 #include <asm/processor.h>
23 #include <asm/cputable.h>
24 #include <asm/page.h>
25 #include <asm/pmc.h>
26 #include <asm/oprofile_impl.h>
28 static unsigned long reset_value[OP_MAX_COUNTER];
30 static int oprofile_running;
31 static u32 mmcr0_val, mmcr1_val, mmcr2_val, num_pmcs;
33 #define MMCR0_PMC1_SHIFT 6
34 #define MMCR0_PMC2_SHIFT 0
35 #define MMCR1_PMC3_SHIFT 27
36 #define MMCR1_PMC4_SHIFT 22
37 #define MMCR1_PMC5_SHIFT 17
38 #define MMCR1_PMC6_SHIFT 11
40 #define mmcr0_event1(event) \
41 ((event << MMCR0_PMC1_SHIFT) & MMCR0_PMC1SEL)
42 #define mmcr0_event2(event) \
43 ((event << MMCR0_PMC2_SHIFT) & MMCR0_PMC2SEL)
45 #define mmcr1_event3(event) \
46 ((event << MMCR1_PMC3_SHIFT) & MMCR1_PMC3SEL)
47 #define mmcr1_event4(event) \
48 ((event << MMCR1_PMC4_SHIFT) & MMCR1_PMC4SEL)
49 #define mmcr1_event5(event) \
50 ((event << MMCR1_PMC5_SHIFT) & MMCR1_PMC5SEL)
51 #define mmcr1_event6(event) \
52 ((event << MMCR1_PMC6_SHIFT) & MMCR1_PMC6SEL)
54 #define MMCR0_INIT (MMCR0_FC | MMCR0_FCS | MMCR0_FCP | MMCR0_FCM1 | MMCR0_FCM0)
56 /* Unfreezes the counters on this CPU, enables the interrupt,
57 * enables the counters to trigger the interrupt, and sets the
58 * counters to only count when the mark bit is not set.
60 static void pmc_start_ctrs(void)
62 u32 mmcr0 = mfspr(SPRN_MMCR0);
64 mmcr0 &= ~(MMCR0_FC | MMCR0_FCM0);
65 mmcr0 |= (MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
67 mtspr(SPRN_MMCR0, mmcr0);
70 /* Disables the counters on this CPU, and freezes them */
71 static void pmc_stop_ctrs(void)
73 u32 mmcr0 = mfspr(SPRN_MMCR0);
75 mmcr0 |= MMCR0_FC;
76 mmcr0 &= ~(MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
78 mtspr(SPRN_MMCR0, mmcr0);
81 /* Configures the counters on this CPU based on the global
82 * settings */
83 static int fsl7450_cpu_setup(struct op_counter_config *ctr)
85 /* freeze all counters */
86 pmc_stop_ctrs();
88 mtspr(SPRN_MMCR0, mmcr0_val);
89 mtspr(SPRN_MMCR1, mmcr1_val);
90 if (num_pmcs > 4)
91 mtspr(SPRN_MMCR2, mmcr2_val);
93 return 0;
96 /* Configures the global settings for the countes on all CPUs. */
97 static int fsl7450_reg_setup(struct op_counter_config *ctr,
98 struct op_system_config *sys,
99 int num_ctrs)
101 int i;
103 num_pmcs = num_ctrs;
104 /* Our counters count up, and "count" refers to
105 * how much before the next interrupt, and we interrupt
106 * on overflow. So we calculate the starting value
107 * which will give us "count" until overflow.
108 * Then we set the events on the enabled counters */
109 for (i = 0; i < num_ctrs; ++i)
110 reset_value[i] = 0x80000000UL - ctr[i].count;
112 /* Set events for Counters 1 & 2 */
113 mmcr0_val = MMCR0_INIT | mmcr0_event1(ctr[0].event)
114 | mmcr0_event2(ctr[1].event);
116 /* Setup user/kernel bits */
117 if (sys->enable_kernel)
118 mmcr0_val &= ~(MMCR0_FCS);
120 if (sys->enable_user)
121 mmcr0_val &= ~(MMCR0_FCP);
123 /* Set events for Counters 3-6 */
124 mmcr1_val = mmcr1_event3(ctr[2].event)
125 | mmcr1_event4(ctr[3].event);
126 if (num_ctrs > 4)
127 mmcr1_val |= mmcr1_event5(ctr[4].event)
128 | mmcr1_event6(ctr[5].event);
130 mmcr2_val = 0;
132 return 0;
135 /* Sets the counters on this CPU to the chosen values, and starts them */
136 static int fsl7450_start(struct op_counter_config *ctr)
138 int i;
140 mtmsr(mfmsr() | MSR_PMM);
142 for (i = 0; i < num_pmcs; ++i) {
143 if (ctr[i].enabled)
144 classic_ctr_write(i, reset_value[i]);
145 else
146 classic_ctr_write(i, 0);
149 /* Clear the freeze bit, and enable the interrupt.
150 * The counters won't actually start until the rfi clears
151 * the PMM bit */
152 pmc_start_ctrs();
154 oprofile_running = 1;
156 return 0;
159 /* Stop the counters on this CPU */
160 static void fsl7450_stop(void)
162 /* freeze counters */
163 pmc_stop_ctrs();
165 oprofile_running = 0;
167 mb();
171 /* Handle the interrupt on this CPU, and log a sample for each
172 * event that triggered the interrupt */
173 static void fsl7450_handle_interrupt(struct pt_regs *regs,
174 struct op_counter_config *ctr)
176 unsigned long pc;
177 int is_kernel;
178 int val;
179 int i;
181 /* set the PMM bit (see comment below) */
182 mtmsr(mfmsr() | MSR_PMM);
184 pc = mfspr(SPRN_SIAR);
185 is_kernel = is_kernel_addr(pc);
187 for (i = 0; i < num_pmcs; ++i) {
188 val = classic_ctr_read(i);
189 if (val < 0) {
190 if (oprofile_running && ctr[i].enabled) {
191 oprofile_add_ext_sample(pc, regs, i, is_kernel);
192 classic_ctr_write(i, reset_value[i]);
193 } else {
194 classic_ctr_write(i, 0);
199 /* The freeze bit was set by the interrupt. */
200 /* Clear the freeze bit, and reenable the interrupt.
201 * The counters won't actually start until the rfi clears
202 * the PM/M bit */
203 pmc_start_ctrs();
206 struct op_powerpc_model op_model_7450= {
207 .reg_setup = fsl7450_reg_setup,
208 .cpu_setup = fsl7450_cpu_setup,
209 .start = fsl7450_start,
210 .stop = fsl7450_stop,
211 .handle_interrupt = fsl7450_handle_interrupt,