[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / arch / i386 / kernel / msr.c
blob05d9f8f363a695dc330f69d5550de0f43e774fff
1 /* ----------------------------------------------------------------------- *
2 *
3 * Copyright 2000 H. Peter Anvin - All Rights Reserved
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
8 * USA; either version 2 of the License, or (at your option) any later
9 * version; incorporated herein by reference.
11 * ----------------------------------------------------------------------- */
14 * msr.c
16 * x86 MSR access device
18 * This device is accessed by lseek() to the appropriate register number
19 * and then read/write in chunks of 8 bytes. A larger size means multiple
20 * reads or writes of the same register.
22 * This driver uses /dev/cpu/%d/msr where %d is the minor number, and on
23 * an SMP box will direct the access to CPU %d.
26 #include <linux/module.h>
27 #include <linux/config.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/fcntl.h>
32 #include <linux/init.h>
33 #include <linux/poll.h>
34 #include <linux/smp.h>
35 #include <linux/smp_lock.h>
36 #include <linux/major.h>
37 #include <linux/fs.h>
38 #include <linux/device.h>
39 #include <linux/cpu.h>
40 #include <linux/notifier.h>
42 #include <asm/processor.h>
43 #include <asm/msr.h>
44 #include <asm/uaccess.h>
45 #include <asm/system.h>
47 static struct class_simple *msr_class;
49 /* Note: "err" is handled in a funny way below. Otherwise one version
50 of gcc or another breaks. */
52 static inline int wrmsr_eio(u32 reg, u32 eax, u32 edx)
54 int err;
56 asm volatile ("1: wrmsr\n"
57 "2:\n"
58 ".section .fixup,\"ax\"\n"
59 "3: movl %4,%0\n"
60 " jmp 2b\n"
61 ".previous\n"
62 ".section __ex_table,\"a\"\n"
63 " .align 4\n" " .long 1b,3b\n" ".previous":"=&bDS" (err)
64 :"a"(eax), "d"(edx), "c"(reg), "i"(-EIO), "0"(0));
66 return err;
69 static inline int rdmsr_eio(u32 reg, u32 *eax, u32 *edx)
71 int err;
73 asm volatile ("1: rdmsr\n"
74 "2:\n"
75 ".section .fixup,\"ax\"\n"
76 "3: movl %4,%0\n"
77 " jmp 2b\n"
78 ".previous\n"
79 ".section __ex_table,\"a\"\n"
80 " .align 4\n"
81 " .long 1b,3b\n"
82 ".previous":"=&bDS" (err), "=a"(*eax), "=d"(*edx)
83 :"c"(reg), "i"(-EIO), "0"(0));
85 return err;
88 #ifdef CONFIG_SMP
90 struct msr_command {
91 int cpu;
92 int err;
93 u32 reg;
94 u32 data[2];
97 static void msr_smp_wrmsr(void *cmd_block)
99 struct msr_command *cmd = (struct msr_command *)cmd_block;
101 if (cmd->cpu == smp_processor_id())
102 cmd->err = wrmsr_eio(cmd->reg, cmd->data[0], cmd->data[1]);
105 static void msr_smp_rdmsr(void *cmd_block)
107 struct msr_command *cmd = (struct msr_command *)cmd_block;
109 if (cmd->cpu == smp_processor_id())
110 cmd->err = rdmsr_eio(cmd->reg, &cmd->data[0], &cmd->data[1]);
113 static inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx)
115 struct msr_command cmd;
116 int ret;
118 preempt_disable();
119 if (cpu == smp_processor_id()) {
120 ret = wrmsr_eio(reg, eax, edx);
121 } else {
122 cmd.cpu = cpu;
123 cmd.reg = reg;
124 cmd.data[0] = eax;
125 cmd.data[1] = edx;
127 smp_call_function(msr_smp_wrmsr, &cmd, 1, 1);
128 ret = cmd.err;
130 preempt_enable();
131 return ret;
134 static inline int do_rdmsr(int cpu, u32 reg, u32 * eax, u32 * edx)
136 struct msr_command cmd;
137 int ret;
139 preempt_disable();
140 if (cpu == smp_processor_id()) {
141 ret = rdmsr_eio(reg, eax, edx);
142 } else {
143 cmd.cpu = cpu;
144 cmd.reg = reg;
146 smp_call_function(msr_smp_rdmsr, &cmd, 1, 1);
148 *eax = cmd.data[0];
149 *edx = cmd.data[1];
151 ret = cmd.err;
153 preempt_enable();
154 return ret;
157 #else /* ! CONFIG_SMP */
159 static inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx)
161 return wrmsr_eio(reg, eax, edx);
164 static inline int do_rdmsr(int cpu, u32 reg, u32 *eax, u32 *edx)
166 return rdmsr_eio(reg, eax, edx);
169 #endif /* ! CONFIG_SMP */
171 static loff_t msr_seek(struct file *file, loff_t offset, int orig)
173 loff_t ret = -EINVAL;
175 lock_kernel();
176 switch (orig) {
177 case 0:
178 file->f_pos = offset;
179 ret = file->f_pos;
180 break;
181 case 1:
182 file->f_pos += offset;
183 ret = file->f_pos;
185 unlock_kernel();
186 return ret;
189 static ssize_t msr_read(struct file *file, char __user * buf,
190 size_t count, loff_t * ppos)
192 u32 __user *tmp = (u32 __user *) buf;
193 u32 data[2];
194 size_t rv;
195 u32 reg = *ppos;
196 int cpu = iminor(file->f_dentry->d_inode);
197 int err;
199 if (count % 8)
200 return -EINVAL; /* Invalid chunk size */
202 for (rv = 0; count; count -= 8) {
203 err = do_rdmsr(cpu, reg, &data[0], &data[1]);
204 if (err)
205 return err;
206 if (copy_to_user(tmp, &data, 8))
207 return -EFAULT;
208 tmp += 2;
211 return ((char __user *)tmp) - buf;
214 static ssize_t msr_write(struct file *file, const char __user *buf,
215 size_t count, loff_t *ppos)
217 const u32 __user *tmp = (const u32 __user *)buf;
218 u32 data[2];
219 size_t rv;
220 u32 reg = *ppos;
221 int cpu = iminor(file->f_dentry->d_inode);
222 int err;
224 if (count % 8)
225 return -EINVAL; /* Invalid chunk size */
227 for (rv = 0; count; count -= 8) {
228 if (copy_from_user(&data, tmp, 8))
229 return -EFAULT;
230 err = do_wrmsr(cpu, reg, data[0], data[1]);
231 if (err)
232 return err;
233 tmp += 2;
236 return ((char __user *)tmp) - buf;
239 static int msr_open(struct inode *inode, struct file *file)
241 unsigned int cpu = iminor(file->f_dentry->d_inode);
242 struct cpuinfo_x86 *c = &(cpu_data)[cpu];
244 if (cpu >= NR_CPUS || !cpu_online(cpu))
245 return -ENXIO; /* No such CPU */
246 if (!cpu_has(c, X86_FEATURE_MSR))
247 return -EIO; /* MSR not supported */
249 return 0;
253 * File operations we support
255 static struct file_operations msr_fops = {
256 .owner = THIS_MODULE,
257 .llseek = msr_seek,
258 .read = msr_read,
259 .write = msr_write,
260 .open = msr_open,
263 static int msr_class_simple_device_add(int i)
265 int err = 0;
266 struct class_device *class_err;
268 class_err = class_simple_device_add(msr_class, MKDEV(MSR_MAJOR, i), NULL, "msr%d",i);
269 if (IS_ERR(class_err))
270 err = PTR_ERR(class_err);
271 return err;
274 static int __devinit msr_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
276 unsigned int cpu = (unsigned long)hcpu;
278 switch (action) {
279 case CPU_ONLINE:
280 msr_class_simple_device_add(cpu);
281 break;
282 case CPU_DEAD:
283 class_simple_device_remove(MKDEV(MSR_MAJOR, cpu));
284 break;
286 return NOTIFY_OK;
289 static struct notifier_block msr_class_cpu_notifier =
291 .notifier_call = msr_class_cpu_callback,
294 static int __init msr_init(void)
296 int i, err = 0;
297 i = 0;
299 if (register_chrdev(MSR_MAJOR, "cpu/msr", &msr_fops)) {
300 printk(KERN_ERR "msr: unable to get major %d for msr\n",
301 MSR_MAJOR);
302 err = -EBUSY;
303 goto out;
305 msr_class = class_simple_create(THIS_MODULE, "msr");
306 if (IS_ERR(msr_class)) {
307 err = PTR_ERR(msr_class);
308 goto out_chrdev;
310 for_each_online_cpu(i) {
311 err = msr_class_simple_device_add(i);
312 if (err != 0)
313 goto out_class;
315 register_cpu_notifier(&msr_class_cpu_notifier);
317 err = 0;
318 goto out;
320 out_class:
321 i = 0;
322 for_each_online_cpu(i)
323 class_simple_device_remove(MKDEV(MSR_MAJOR, i));
324 class_simple_destroy(msr_class);
325 out_chrdev:
326 unregister_chrdev(MSR_MAJOR, "cpu/msr");
327 out:
328 return err;
331 static void __exit msr_exit(void)
333 int cpu = 0;
334 for_each_online_cpu(cpu)
335 class_simple_device_remove(MKDEV(MSR_MAJOR, cpu));
336 class_simple_destroy(msr_class);
337 unregister_chrdev(MSR_MAJOR, "cpu/msr");
338 unregister_cpu_notifier(&msr_class_cpu_notifier);
341 module_init(msr_init);
342 module_exit(msr_exit)
344 MODULE_AUTHOR("H. Peter Anvin <hpa@zytor.com>");
345 MODULE_DESCRIPTION("x86 generic MSR driver");
346 MODULE_LICENSE("GPL");