sched: s/sched_latency/sched_min_granularity
[usb.git] / arch / powerpc / oprofile / op_model_7450.c
blobcc599eb8768b3eac6bbbe1a1ad862d1d80db2283
1 /*
2 * arch/powerpc/oprofile/op_model_7450.c
4 * Freescale 745x/744x oprofile support, based on fsl_booke support
5 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
7 * Copyright (c) 2004 Freescale Semiconductor, Inc
9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <linux/oprofile.h>
19 #include <linux/init.h>
20 #include <linux/smp.h>
21 #include <asm/ptrace.h>
22 #include <asm/system.h>
23 #include <asm/processor.h>
24 #include <asm/cputable.h>
25 #include <asm/page.h>
26 #include <asm/pmc.h>
27 #include <asm/oprofile_impl.h>
29 static unsigned long reset_value[OP_MAX_COUNTER];
31 static int oprofile_running;
32 static u32 mmcr0_val, mmcr1_val, mmcr2_val;
34 #define MMCR0_PMC1_SHIFT 6
35 #define MMCR0_PMC2_SHIFT 0
36 #define MMCR1_PMC3_SHIFT 27
37 #define MMCR1_PMC4_SHIFT 22
38 #define MMCR1_PMC5_SHIFT 17
39 #define MMCR1_PMC6_SHIFT 11
41 #define mmcr0_event1(event) \
42 ((event << MMCR0_PMC1_SHIFT) & MMCR0_PMC1SEL)
43 #define mmcr0_event2(event) \
44 ((event << MMCR0_PMC2_SHIFT) & MMCR0_PMC2SEL)
46 #define mmcr1_event3(event) \
47 ((event << MMCR1_PMC3_SHIFT) & MMCR1_PMC3SEL)
48 #define mmcr1_event4(event) \
49 ((event << MMCR1_PMC4_SHIFT) & MMCR1_PMC4SEL)
50 #define mmcr1_event5(event) \
51 ((event << MMCR1_PMC5_SHIFT) & MMCR1_PMC5SEL)
52 #define mmcr1_event6(event) \
53 ((event << MMCR1_PMC6_SHIFT) & MMCR1_PMC6SEL)
55 #define MMCR0_INIT (MMCR0_FC | MMCR0_FCS | MMCR0_FCP | MMCR0_FCM1 | MMCR0_FCM0)
57 /* Unfreezes the counters on this CPU, enables the interrupt,
58 * enables the counters to trigger the interrupt, and sets the
59 * counters to only count when the mark bit is not set.
61 static void pmc_start_ctrs(void)
63 u32 mmcr0 = mfspr(SPRN_MMCR0);
65 mmcr0 &= ~(MMCR0_FC | MMCR0_FCM0);
66 mmcr0 |= (MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
68 mtspr(SPRN_MMCR0, mmcr0);
71 /* Disables the counters on this CPU, and freezes them */
72 static void pmc_stop_ctrs(void)
74 u32 mmcr0 = mfspr(SPRN_MMCR0);
76 mmcr0 |= MMCR0_FC;
77 mmcr0 &= ~(MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
79 mtspr(SPRN_MMCR0, mmcr0);
82 /* Configures the counters on this CPU based on the global
83 * settings */
84 static int fsl7450_cpu_setup(struct op_counter_config *ctr)
86 /* freeze all counters */
87 pmc_stop_ctrs();
89 mtspr(SPRN_MMCR0, mmcr0_val);
90 mtspr(SPRN_MMCR1, mmcr1_val);
91 mtspr(SPRN_MMCR2, mmcr2_val);
93 return 0;
96 #define NUM_CTRS 6
98 /* Configures the global settings for the countes on all CPUs. */
99 static int fsl7450_reg_setup(struct op_counter_config *ctr,
100 struct op_system_config *sys,
101 int num_ctrs)
103 int i;
105 /* Our counters count up, and "count" refers to
106 * how much before the next interrupt, and we interrupt
107 * on overflow. So we calculate the starting value
108 * which will give us "count" until overflow.
109 * Then we set the events on the enabled counters */
110 for (i = 0; i < NUM_CTRS; ++i)
111 reset_value[i] = 0x80000000UL - ctr[i].count;
113 /* Set events for Counters 1 & 2 */
114 mmcr0_val = MMCR0_INIT | mmcr0_event1(ctr[0].event)
115 | mmcr0_event2(ctr[1].event);
117 /* Setup user/kernel bits */
118 if (sys->enable_kernel)
119 mmcr0_val &= ~(MMCR0_FCS);
121 if (sys->enable_user)
122 mmcr0_val &= ~(MMCR0_FCP);
124 /* Set events for Counters 3-6 */
125 mmcr1_val = mmcr1_event3(ctr[2].event)
126 | mmcr1_event4(ctr[3].event)
127 | mmcr1_event5(ctr[4].event)
128 | mmcr1_event6(ctr[5].event);
130 mmcr2_val = 0;
132 return 0;
135 /* Sets the counters on this CPU to the chosen values, and starts them */
136 static int fsl7450_start(struct op_counter_config *ctr)
138 int i;
140 mtmsr(mfmsr() | MSR_PMM);
142 for (i = 0; i < NUM_CTRS; ++i) {
143 if (ctr[i].enabled)
144 classic_ctr_write(i, reset_value[i]);
145 else
146 classic_ctr_write(i, 0);
149 /* Clear the freeze bit, and enable the interrupt.
150 * The counters won't actually start until the rfi clears
151 * the PMM bit */
152 pmc_start_ctrs();
154 oprofile_running = 1;
156 return 0;
159 /* Stop the counters on this CPU */
160 static void fsl7450_stop(void)
162 /* freeze counters */
163 pmc_stop_ctrs();
165 oprofile_running = 0;
167 mb();
171 /* Handle the interrupt on this CPU, and log a sample for each
172 * event that triggered the interrupt */
173 static void fsl7450_handle_interrupt(struct pt_regs *regs,
174 struct op_counter_config *ctr)
176 unsigned long pc;
177 int is_kernel;
178 int val;
179 int i;
181 /* set the PMM bit (see comment below) */
182 mtmsr(mfmsr() | MSR_PMM);
184 pc = mfspr(SPRN_SIAR);
185 is_kernel = is_kernel_addr(pc);
187 for (i = 0; i < NUM_CTRS; ++i) {
188 val = classic_ctr_read(i);
189 if (val < 0) {
190 if (oprofile_running && ctr[i].enabled) {
191 oprofile_add_ext_sample(pc, regs, i, is_kernel);
192 classic_ctr_write(i, reset_value[i]);
193 } else {
194 classic_ctr_write(i, 0);
199 /* The freeze bit was set by the interrupt. */
200 /* Clear the freeze bit, and reenable the interrupt.
201 * The counters won't actually start until the rfi clears
202 * the PM/M bit */
203 pmc_start_ctrs();
206 struct op_powerpc_model op_model_7450= {
207 .reg_setup = fsl7450_reg_setup,
208 .cpu_setup = fsl7450_cpu_setup,
209 .start = fsl7450_start,
210 .stop = fsl7450_stop,
211 .handle_interrupt = fsl7450_handle_interrupt,