2 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
3 * Copyright 2003 Andi Kleen, SuSE Labs.
5 * [ NOTE: this mechanism is now deprecated in favor of the vDSO. ]
7 * Thanks to hpa@transmeta.com for some useful hint.
8 * Special thanks to Ingo Molnar for his early experience with
9 * a different vsyscall implementation for Linux/IA32 and for the name.
11 * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
12 * at virtual address -10Mbyte+1024bytes etc... There are at max 4
13 * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
14 * jumping out of line if necessary. We cannot add more with this
15 * mechanism because older kernels won't return -ENOSYS.
17 * Note: the concept clashes with user mode linux. UML users should
21 #include <linux/time.h>
22 #include <linux/init.h>
23 #include <linux/kernel.h>
24 #include <linux/timer.h>
25 #include <linux/seqlock.h>
26 #include <linux/jiffies.h>
27 #include <linux/sysctl.h>
28 #include <linux/topology.h>
29 #include <linux/clocksource.h>
30 #include <linux/getcpu.h>
31 #include <linux/cpu.h>
32 #include <linux/smp.h>
33 #include <linux/notifier.h>
34 #include <linux/syscalls.h>
35 #include <linux/ratelimit.h>
37 #include <asm/vsyscall.h>
38 #include <asm/pgtable.h>
39 #include <asm/compat.h>
41 #include <asm/unistd.h>
42 #include <asm/fixmap.h>
43 #include <asm/errno.h>
45 #include <asm/segment.h>
47 #include <asm/topology.h>
48 #include <asm/vgtod.h>
49 #include <asm/traps.h>
51 #define CREATE_TRACE_POINTS
52 #include "vsyscall_trace.h"
54 DEFINE_VVAR(int, vgetcpu_mode
);
55 DEFINE_VVAR(struct vsyscall_gtod_data
, vsyscall_gtod_data
) =
57 .lock
= __SEQLOCK_UNLOCKED(__vsyscall_gtod_data
.lock
),
60 static enum { EMULATE
, NATIVE
, NONE
} vsyscall_mode
= EMULATE
;
62 static int __init
vsyscall_setup(char *str
)
65 if (!strcmp("emulate", str
))
66 vsyscall_mode
= EMULATE
;
67 else if (!strcmp("native", str
))
68 vsyscall_mode
= NATIVE
;
69 else if (!strcmp("none", str
))
79 early_param("vsyscall", vsyscall_setup
);
81 void update_vsyscall_tz(void)
85 write_seqlock_irqsave(&vsyscall_gtod_data
.lock
, flags
);
86 /* sys_tz has changed */
87 vsyscall_gtod_data
.sys_tz
= sys_tz
;
88 write_sequnlock_irqrestore(&vsyscall_gtod_data
.lock
, flags
);
91 void update_vsyscall(struct timespec
*wall_time
, struct timespec
*wtm
,
92 struct clocksource
*clock
, u32 mult
)
96 write_seqlock_irqsave(&vsyscall_gtod_data
.lock
, flags
);
98 /* copy vsyscall data */
99 vsyscall_gtod_data
.clock
.vclock_mode
= clock
->archdata
.vclock_mode
;
100 vsyscall_gtod_data
.clock
.cycle_last
= clock
->cycle_last
;
101 vsyscall_gtod_data
.clock
.mask
= clock
->mask
;
102 vsyscall_gtod_data
.clock
.mult
= mult
;
103 vsyscall_gtod_data
.clock
.shift
= clock
->shift
;
104 vsyscall_gtod_data
.wall_time_sec
= wall_time
->tv_sec
;
105 vsyscall_gtod_data
.wall_time_nsec
= wall_time
->tv_nsec
;
106 vsyscall_gtod_data
.wall_to_monotonic
= *wtm
;
107 vsyscall_gtod_data
.wall_time_coarse
= __current_kernel_time();
109 write_sequnlock_irqrestore(&vsyscall_gtod_data
.lock
, flags
);
112 static void warn_bad_vsyscall(const char *level
, struct pt_regs
*regs
,
115 static DEFINE_RATELIMIT_STATE(rs
, DEFAULT_RATELIMIT_INTERVAL
, DEFAULT_RATELIMIT_BURST
);
116 struct task_struct
*tsk
;
118 if (!show_unhandled_signals
|| !__ratelimit(&rs
))
123 printk("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
124 level
, tsk
->comm
, task_pid_nr(tsk
),
125 message
, regs
->ip
, regs
->cs
,
126 regs
->sp
, regs
->ax
, regs
->si
, regs
->di
);
129 static int addr_to_vsyscall_nr(unsigned long addr
)
133 if ((addr
& ~0xC00UL
) != VSYSCALL_START
)
136 nr
= (addr
& 0xC00UL
) >> 10;
143 bool emulate_vsyscall(struct pt_regs
*regs
, unsigned long address
)
145 struct task_struct
*tsk
;
146 unsigned long caller
;
151 * No point in checking CS -- the only way to get here is a user mode
152 * trap to a high address, which means that we're in 64-bit user code.
155 WARN_ON_ONCE(address
!= regs
->ip
);
157 if (vsyscall_mode
== NONE
) {
158 warn_bad_vsyscall(KERN_INFO
, regs
,
159 "vsyscall attempted with vsyscall=none");
163 vsyscall_nr
= addr_to_vsyscall_nr(address
);
165 trace_emulate_vsyscall(vsyscall_nr
);
167 if (vsyscall_nr
< 0) {
168 warn_bad_vsyscall(KERN_WARNING
, regs
,
169 "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround");
173 if (get_user(caller
, (unsigned long __user
*)regs
->sp
) != 0) {
174 warn_bad_vsyscall(KERN_WARNING
, regs
,
175 "vsyscall with bad stack (exploit attempt?)");
180 if (seccomp_mode(&tsk
->seccomp
))
183 switch (vsyscall_nr
) {
185 ret
= sys_gettimeofday(
186 (struct timeval __user
*)regs
->di
,
187 (struct timezone __user
*)regs
->si
);
191 ret
= sys_time((time_t __user
*)regs
->di
);
195 ret
= sys_getcpu((unsigned __user
*)regs
->di
,
196 (unsigned __user
*)regs
->si
,
201 if (ret
== -EFAULT
) {
203 * Bad news -- userspace fed a bad pointer to a vsyscall.
205 * With a real vsyscall, that would have caused SIGSEGV.
206 * To make writing reliable exploits using the emulated
207 * vsyscalls harder, generate SIGSEGV here as well.
209 warn_bad_vsyscall(KERN_INFO
, regs
,
210 "vsyscall fault (exploit attempt?)");
216 /* Emulate a ret instruction. */
223 force_sig(SIGSEGV
, current
);
228 * Assume __initcall executes before all user space. Hopefully kmod
229 * doesn't violate that. We'll find out if it does.
231 static void __cpuinit
vsyscall_set_cpu(int cpu
)
234 unsigned long node
= 0;
236 node
= cpu_to_node(cpu
);
238 if (cpu_has(&cpu_data(cpu
), X86_FEATURE_RDTSCP
))
239 write_rdtscp_aux((node
<< 12) | cpu
);
242 * Store cpu number in limit so that it can be loaded quickly
243 * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node)
245 d
= 0x0f40000000000ULL
;
247 d
|= (node
& 0xf) << 12;
248 d
|= (node
>> 4) << 48;
250 write_gdt_entry(get_cpu_gdt_table(cpu
), GDT_ENTRY_PER_CPU
, &d
, DESCTYPE_S
);
253 static void __cpuinit
cpu_vsyscall_init(void *arg
)
255 /* preemption should be already off */
256 vsyscall_set_cpu(raw_smp_processor_id());
260 cpu_vsyscall_notifier(struct notifier_block
*n
, unsigned long action
, void *arg
)
262 long cpu
= (long)arg
;
264 if (action
== CPU_ONLINE
|| action
== CPU_ONLINE_FROZEN
)
265 smp_call_function_single(cpu
, cpu_vsyscall_init
, NULL
, 1);
270 void __init
map_vsyscall(void)
272 extern char __vsyscall_page
;
273 unsigned long physaddr_vsyscall
= __pa_symbol(&__vsyscall_page
);
274 extern char __vvar_page
;
275 unsigned long physaddr_vvar_page
= __pa_symbol(&__vvar_page
);
277 __set_fixmap(VSYSCALL_FIRST_PAGE
, physaddr_vsyscall
,
278 vsyscall_mode
== NATIVE
279 ? PAGE_KERNEL_VSYSCALL
281 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE
) !=
282 (unsigned long)VSYSCALL_START
);
284 __set_fixmap(VVAR_PAGE
, physaddr_vvar_page
, PAGE_KERNEL_VVAR
);
285 BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE
) !=
286 (unsigned long)VVAR_ADDRESS
);
289 static int __init
vsyscall_init(void)
291 BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE
));
293 on_each_cpu(cpu_vsyscall_init
, NULL
, 1);
294 /* notifier priority > KVM */
295 hotcpu_notifier(cpu_vsyscall_notifier
, 30);
299 __initcall(vsyscall_init
);