Initial EXPERIMENTAL implementation of device-mapper thin provisioning
[linux-2.6/next.git] / arch / x86 / kernel / vsyscall_64.c
blobdda7dff9cef7e624be6239ca463789e09eedd200
1 /*
2 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
3 * Copyright 2003 Andi Kleen, SuSE Labs.
5 * [ NOTE: this mechanism is now deprecated in favor of the vDSO. ]
7 * Thanks to hpa@transmeta.com for some useful hint.
8 * Special thanks to Ingo Molnar for his early experience with
9 * a different vsyscall implementation for Linux/IA32 and for the name.
11 * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
12 * at virtual address -10Mbyte+1024bytes etc... There are at max 4
13 * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
14 * jumping out of line if necessary. We cannot add more with this
15 * mechanism because older kernels won't return -ENOSYS.
17 * Note: the concept clashes with user mode linux. UML users should
18 * use the vDSO.
21 /* Disable profiling for userspace code: */
22 #define DISABLE_BRANCH_PROFILING
24 #include <linux/time.h>
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/timer.h>
28 #include <linux/seqlock.h>
29 #include <linux/jiffies.h>
30 #include <linux/sysctl.h>
31 #include <linux/clocksource.h>
32 #include <linux/getcpu.h>
33 #include <linux/cpu.h>
34 #include <linux/smp.h>
35 #include <linux/notifier.h>
36 #include <linux/syscalls.h>
37 #include <linux/ratelimit.h>
39 #include <asm/vsyscall.h>
40 #include <asm/pgtable.h>
41 #include <asm/compat.h>
42 #include <asm/page.h>
43 #include <asm/unistd.h>
44 #include <asm/fixmap.h>
45 #include <asm/errno.h>
46 #include <asm/io.h>
47 #include <asm/segment.h>
48 #include <asm/desc.h>
49 #include <asm/topology.h>
50 #include <asm/vgtod.h>
51 #include <asm/traps.h>
53 DEFINE_VVAR(int, vgetcpu_mode);
54 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
56 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
59 void update_vsyscall_tz(void)
61 unsigned long flags;
63 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
64 /* sys_tz has changed */
65 vsyscall_gtod_data.sys_tz = sys_tz;
66 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
69 void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
70 struct clocksource *clock, u32 mult)
72 unsigned long flags;
74 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
76 /* copy vsyscall data */
77 vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
78 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
79 vsyscall_gtod_data.clock.mask = clock->mask;
80 vsyscall_gtod_data.clock.mult = mult;
81 vsyscall_gtod_data.clock.shift = clock->shift;
82 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
83 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
84 vsyscall_gtod_data.wall_to_monotonic = *wtm;
85 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
87 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
90 static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
91 const char *message)
93 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
94 struct task_struct *tsk;
96 if (!show_unhandled_signals || !__ratelimit(&rs))
97 return;
99 tsk = current;
101 printk("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
102 level, tsk->comm, task_pid_nr(tsk),
103 message, regs->ip - 2, regs->cs,
104 regs->sp, regs->ax, regs->si, regs->di);
107 static int addr_to_vsyscall_nr(unsigned long addr)
109 int nr;
111 if ((addr & ~0xC00UL) != VSYSCALL_START)
112 return -EINVAL;
114 nr = (addr & 0xC00UL) >> 10;
115 if (nr >= 3)
116 return -EINVAL;
118 return nr;
121 void dotraplinkage do_emulate_vsyscall(struct pt_regs *regs, long error_code)
123 struct task_struct *tsk;
124 unsigned long caller;
125 int vsyscall_nr;
126 long ret;
128 local_irq_enable();
131 * Real 64-bit user mode code has cs == __USER_CS. Anything else
132 * is bogus.
134 if (regs->cs != __USER_CS) {
136 * If we trapped from kernel mode, we might as well OOPS now
137 * instead of returning to some random address and OOPSing
138 * then.
140 BUG_ON(!user_mode(regs));
142 /* Compat mode and non-compat 32-bit CS should both segfault. */
143 warn_bad_vsyscall(KERN_WARNING, regs,
144 "illegal int 0xcc from 32-bit mode");
145 goto sigsegv;
149 * x86-ism here: regs->ip points to the instruction after the int 0xcc,
150 * and int 0xcc is two bytes long.
152 vsyscall_nr = addr_to_vsyscall_nr(regs->ip - 2);
153 if (vsyscall_nr < 0) {
154 warn_bad_vsyscall(KERN_WARNING, regs,
155 "illegal int 0xcc (exploit attempt?)");
156 goto sigsegv;
159 if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
160 warn_bad_vsyscall(KERN_WARNING, regs, "int 0xcc with bad stack (exploit attempt?)");
161 goto sigsegv;
164 tsk = current;
165 if (seccomp_mode(&tsk->seccomp))
166 do_exit(SIGKILL);
168 switch (vsyscall_nr) {
169 case 0:
170 ret = sys_gettimeofday(
171 (struct timeval __user *)regs->di,
172 (struct timezone __user *)regs->si);
173 break;
175 case 1:
176 ret = sys_time((time_t __user *)regs->di);
177 break;
179 case 2:
180 ret = sys_getcpu((unsigned __user *)regs->di,
181 (unsigned __user *)regs->si,
183 break;
186 if (ret == -EFAULT) {
188 * Bad news -- userspace fed a bad pointer to a vsyscall.
190 * With a real vsyscall, that would have caused SIGSEGV.
191 * To make writing reliable exploits using the emulated
192 * vsyscalls harder, generate SIGSEGV here as well.
194 warn_bad_vsyscall(KERN_INFO, regs,
195 "vsyscall fault (exploit attempt?)");
196 goto sigsegv;
199 regs->ax = ret;
201 /* Emulate a ret instruction. */
202 regs->ip = caller;
203 regs->sp += 8;
205 local_irq_disable();
206 return;
208 sigsegv:
209 regs->ip -= 2; /* The faulting instruction should be the int 0xcc. */
210 force_sig(SIGSEGV, current);
211 local_irq_disable();
215 * Assume __initcall executes before all user space. Hopefully kmod
216 * doesn't violate that. We'll find out if it does.
218 static void __cpuinit vsyscall_set_cpu(int cpu)
220 unsigned long d;
221 unsigned long node = 0;
222 #ifdef CONFIG_NUMA
223 node = cpu_to_node(cpu);
224 #endif
225 if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
226 write_rdtscp_aux((node << 12) | cpu);
229 * Store cpu number in limit so that it can be loaded quickly
230 * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node)
232 d = 0x0f40000000000ULL;
233 d |= cpu;
234 d |= (node & 0xf) << 12;
235 d |= (node >> 4) << 48;
237 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
240 static void __cpuinit cpu_vsyscall_init(void *arg)
242 /* preemption should be already off */
243 vsyscall_set_cpu(raw_smp_processor_id());
246 static int __cpuinit
247 cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
249 long cpu = (long)arg;
251 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
252 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
254 return NOTIFY_DONE;
257 void __init map_vsyscall(void)
259 extern char __vsyscall_0;
260 unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
261 extern char __vvar_page;
262 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
264 /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
265 __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
266 __set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR);
267 BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) != (unsigned long)VVAR_ADDRESS);
270 static int __init vsyscall_init(void)
272 BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE));
274 on_each_cpu(cpu_vsyscall_init, NULL, 1);
275 /* notifier priority > KVM */
276 hotcpu_notifier(cpu_vsyscall_notifier, 30);
278 return 0;
280 __initcall(vsyscall_init);