docbook: fix fatal rapidio yet again (and more to come)
[linux/fpc-iii.git] / arch / sh / oprofile / op_model_sh7750.c
blob6b9a98e07004a09f30e34d4335e26aa62e788328
1 /*
2 * arch/sh/oprofile/op_model_sh7750.c
4 * OProfile support for SH7750/SH7750S Performance Counters
6 * Copyright (C) 2003, 2004 Paul Mundt
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
12 #include <linux/kernel.h>
13 #include <linux/oprofile.h>
14 #include <linux/profile.h>
15 #include <linux/init.h>
16 #include <linux/errno.h>
17 #include <linux/interrupt.h>
18 #include <linux/fs.h>
19 #include <asm/uaccess.h>
20 #include <asm/io.h>
22 #define PM_CR_BASE 0xff000084 /* 16-bit */
23 #define PM_CTR_BASE 0xff100004 /* 32-bit */
25 #define PMCR1 (PM_CR_BASE + 0x00)
26 #define PMCR2 (PM_CR_BASE + 0x04)
27 #define PMCTR1H (PM_CTR_BASE + 0x00)
28 #define PMCTR1L (PM_CTR_BASE + 0x04)
29 #define PMCTR2H (PM_CTR_BASE + 0x08)
30 #define PMCTR2L (PM_CTR_BASE + 0x0c)
32 #define PMCR_PMM_MASK 0x0000003f
34 #define PMCR_CLKF 0x00000100
35 #define PMCR_PMCLR 0x00002000
36 #define PMCR_PMST 0x00004000
37 #define PMCR_PMEN 0x00008000
39 #define PMCR_ENABLE (PMCR_PMST | PMCR_PMEN)
42 * SH7750/SH7750S have 2 perf counters
44 #define NR_CNTRS 2
46 struct op_counter_config {
47 unsigned long enabled;
48 unsigned long event;
49 unsigned long count;
51 /* Dummy values for userspace tool compliance */
52 unsigned long kernel;
53 unsigned long user;
54 unsigned long unit_mask;
57 static struct op_counter_config ctr[NR_CNTRS];
60 * There are a number of events supported by each counter (33 in total).
61 * Since we have 2 counters, each counter will take the event code as it
62 * corresponds to the PMCR PMM setting. Each counter can be configured
63 * independently.
65 * Event Code Description
66 * ---------- -----------
68 * 0x01 Operand read access
69 * 0x02 Operand write access
70 * 0x03 UTLB miss
71 * 0x04 Operand cache read miss
72 * 0x05 Operand cache write miss
73 * 0x06 Instruction fetch (w/ cache)
74 * 0x07 Instruction TLB miss
75 * 0x08 Instruction cache miss
76 * 0x09 All operand accesses
77 * 0x0a All instruction accesses
78 * 0x0b OC RAM operand access
79 * 0x0d On-chip I/O space access
80 * 0x0e Operand access (r/w)
81 * 0x0f Operand cache miss (r/w)
82 * 0x10 Branch instruction
83 * 0x11 Branch taken
84 * 0x12 BSR/BSRF/JSR
85 * 0x13 Instruction execution
86 * 0x14 Instruction execution in parallel
87 * 0x15 FPU Instruction execution
88 * 0x16 Interrupt
89 * 0x17 NMI
90 * 0x18 trapa instruction execution
91 * 0x19 UBCA match
92 * 0x1a UBCB match
93 * 0x21 Instruction cache fill
94 * 0x22 Operand cache fill
95 * 0x23 Elapsed time
96 * 0x24 Pipeline freeze by I-cache miss
97 * 0x25 Pipeline freeze by D-cache miss
98 * 0x27 Pipeline freeze by branch instruction
99 * 0x28 Pipeline freeze by CPU register
100 * 0x29 Pipeline freeze by FPU
102 * Unfortunately we don't have a native exception or interrupt for counter
103 * overflow (although since these counters can run for 16.3 days without
104 * overflowing, it's not really necessary).
106 * OProfile on the other hand likes to have samples taken periodically, so
107 * for now we just piggyback the timer interrupt to get the expected
108 * behavior.
111 static int sh7750_timer_notify(struct pt_regs *regs)
113 oprofile_add_sample(regs, 0);
114 return 0;
117 static u64 sh7750_read_counter(int counter)
119 u32 hi, lo;
121 hi = (counter == 0) ? ctrl_inl(PMCTR1H) : ctrl_inl(PMCTR2H);
122 lo = (counter == 0) ? ctrl_inl(PMCTR1L) : ctrl_inl(PMCTR2L);
124 return (u64)((u64)(hi & 0xffff) << 32) | lo;
128 * Files will be in a path like:
130 * /<oprofilefs mount point>/<counter number>/<file>
132 * So when dealing with <file>, we look to the parent dentry for the counter
133 * number.
135 static inline int to_counter(struct file *file)
137 const unsigned char *name = file->f_path.dentry->d_parent->d_name.name;
139 return (int)simple_strtol(name, NULL, 10);
143 * XXX: We have 48-bit counters, so we're probably going to want something
144 * more along the lines of oprofilefs_ullong_to_user().. Truncating to
145 * unsigned long works fine for now though, as long as we don't attempt to
146 * profile for too horribly long.
148 static ssize_t sh7750_read_count(struct file *file, char __user *buf,
149 size_t count, loff_t *ppos)
151 int counter = to_counter(file);
152 u64 val = sh7750_read_counter(counter);
154 return oprofilefs_ulong_to_user((unsigned long)val, buf, count, ppos);
157 static ssize_t sh7750_write_count(struct file *file, const char __user *buf,
158 size_t count, loff_t *ppos)
160 int counter = to_counter(file);
161 unsigned long val;
163 if (oprofilefs_ulong_from_user(&val, buf, count))
164 return -EFAULT;
167 * Any write will clear the counter, although only 0 should be
168 * written for this purpose, as we do not support setting the
169 * counter to an arbitrary value.
171 WARN_ON(val != 0);
173 if (counter == 0) {
174 ctrl_outw(ctrl_inw(PMCR1) | PMCR_PMCLR, PMCR1);
175 } else {
176 ctrl_outw(ctrl_inw(PMCR2) | PMCR_PMCLR, PMCR2);
179 return count;
182 static const struct file_operations count_fops = {
183 .read = sh7750_read_count,
184 .write = sh7750_write_count,
187 static int sh7750_perf_counter_create_files(struct super_block *sb, struct dentry *root)
189 int i;
191 for (i = 0; i < NR_CNTRS; i++) {
192 struct dentry *dir;
193 char buf[4];
195 snprintf(buf, sizeof(buf), "%d", i);
196 dir = oprofilefs_mkdir(sb, root, buf);
198 oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
199 oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
200 oprofilefs_create_file(sb, dir, "count", &count_fops);
202 /* Dummy entries */
203 oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
204 oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
205 oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
208 return 0;
211 static int sh7750_perf_counter_start(void)
213 u16 pmcr;
215 /* Enable counter 1 */
216 if (ctr[0].enabled) {
217 pmcr = ctrl_inw(PMCR1);
218 WARN_ON(pmcr & PMCR_PMEN);
220 pmcr &= ~PMCR_PMM_MASK;
221 pmcr |= ctr[0].event;
222 ctrl_outw(pmcr | PMCR_ENABLE, PMCR1);
225 /* Enable counter 2 */
226 if (ctr[1].enabled) {
227 pmcr = ctrl_inw(PMCR2);
228 WARN_ON(pmcr & PMCR_PMEN);
230 pmcr &= ~PMCR_PMM_MASK;
231 pmcr |= ctr[1].event;
232 ctrl_outw(pmcr | PMCR_ENABLE, PMCR2);
235 return register_timer_hook(sh7750_timer_notify);
238 static void sh7750_perf_counter_stop(void)
240 ctrl_outw(ctrl_inw(PMCR1) & ~PMCR_PMEN, PMCR1);
241 ctrl_outw(ctrl_inw(PMCR2) & ~PMCR_PMEN, PMCR2);
243 unregister_timer_hook(sh7750_timer_notify);
246 static struct oprofile_operations sh7750_perf_counter_ops = {
247 .create_files = sh7750_perf_counter_create_files,
248 .start = sh7750_perf_counter_start,
249 .stop = sh7750_perf_counter_stop,
252 int __init oprofile_arch_init(struct oprofile_operations *ops)
254 if (!(current_cpu_data.flags & CPU_HAS_PERF_COUNTER))
255 return -ENODEV;
257 ops = &sh7750_perf_counter_ops;
258 ops->cpu_type = (char *)get_cpu_subtype(&current_cpu_data);
260 printk(KERN_INFO "oprofile: using SH-4 (%s) performance monitoring.\n",
261 sh7750_perf_counter_ops.cpu_type);
263 /* Clear the counters */
264 ctrl_outw(ctrl_inw(PMCR1) | PMCR_PMCLR, PMCR1);
265 ctrl_outw(ctrl_inw(PMCR2) | PMCR_PMCLR, PMCR2);
267 return 0;
270 void oprofile_arch_exit(void)