2 * Copyright (c) 2012-2014 Andy Lutomirski <luto@amacapital.net>
4 * Based on the original implementation which is:
5 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Copyright 2003 Andi Kleen, SuSE Labs.
8 * Parts of the original code have been moved to arch/x86/vdso/vma.c
10 * This file implements vsyscall emulation. vsyscalls are a legacy ABI:
11 * Userspace can request certain kernel services by calling fixed
12 * addresses. This concept is problematic:
14 * - It interferes with ASLR.
15 * - It's awkward to write code that lives in kernel addresses but is
16 * callable by userspace at fixed addresses.
17 * - The whole concept is impossible for 32-bit compat userspace.
18 * - UML cannot easily virtualize a vsyscall.
20 * As of mid-2014, I believe that there is no new userspace code that
21 * will use a vsyscall if the vDSO is present. I hope that there will
22 * soon be no new userspace code that will ever use a vsyscall.
24 * The code in this file emulates vsyscalls when notified of a page
25 * fault to a vsyscall address.
28 #include <linux/kernel.h>
29 #include <linux/timer.h>
30 #include <linux/sched/signal.h>
31 #include <linux/mm_types.h>
32 #include <linux/syscalls.h>
33 #include <linux/ratelimit.h>
35 #include <asm/vsyscall.h>
36 #include <asm/unistd.h>
37 #include <asm/fixmap.h>
38 #include <asm/traps.h>
40 #define CREATE_TRACE_POINTS
41 #include "vsyscall_trace.h"
43 static enum { EMULATE
, NATIVE
, NONE
} vsyscall_mode
=
44 #if defined(CONFIG_LEGACY_VSYSCALL_NATIVE)
46 #elif defined(CONFIG_LEGACY_VSYSCALL_NONE)
52 static int __init
vsyscall_setup(char *str
)
55 if (!strcmp("emulate", str
))
56 vsyscall_mode
= EMULATE
;
57 else if (!strcmp("native", str
))
58 vsyscall_mode
= NATIVE
;
59 else if (!strcmp("none", str
))
69 early_param("vsyscall", vsyscall_setup
);
71 static void warn_bad_vsyscall(const char *level
, struct pt_regs
*regs
,
74 if (!show_unhandled_signals
)
77 printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
78 level
, current
->comm
, task_pid_nr(current
),
79 message
, regs
->ip
, regs
->cs
,
80 regs
->sp
, regs
->ax
, regs
->si
, regs
->di
);
83 static int addr_to_vsyscall_nr(unsigned long addr
)
87 if ((addr
& ~0xC00UL
) != VSYSCALL_ADDR
)
90 nr
= (addr
& 0xC00UL
) >> 10;
97 static bool write_ok_or_segv(unsigned long ptr
, size_t size
)
100 * XXX: if access_ok, get_user, and put_user handled
101 * sig_on_uaccess_err, this could go away.
104 if (!access_ok(VERIFY_WRITE
, (void __user
*)ptr
, size
)) {
106 struct thread_struct
*thread
= ¤t
->thread
;
108 thread
->error_code
= 6; /* user fault, no page, write */
110 thread
->trap_nr
= X86_TRAP_PF
;
112 memset(&info
, 0, sizeof(info
));
113 info
.si_signo
= SIGSEGV
;
115 info
.si_code
= SEGV_MAPERR
;
116 info
.si_addr
= (void __user
*)ptr
;
118 force_sig_info(SIGSEGV
, &info
, current
);
125 bool emulate_vsyscall(struct pt_regs
*regs
, unsigned long address
)
127 struct task_struct
*tsk
;
128 unsigned long caller
;
129 int vsyscall_nr
, syscall_nr
, tmp
;
130 int prev_sig_on_uaccess_err
;
134 * No point in checking CS -- the only way to get here is a user mode
135 * trap to a high address, which means that we're in 64-bit user code.
138 WARN_ON_ONCE(address
!= regs
->ip
);
140 if (vsyscall_mode
== NONE
) {
141 warn_bad_vsyscall(KERN_INFO
, regs
,
142 "vsyscall attempted with vsyscall=none");
146 vsyscall_nr
= addr_to_vsyscall_nr(address
);
148 trace_emulate_vsyscall(vsyscall_nr
);
150 if (vsyscall_nr
< 0) {
151 warn_bad_vsyscall(KERN_WARNING
, regs
,
152 "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround");
156 if (get_user(caller
, (unsigned long __user
*)regs
->sp
) != 0) {
157 warn_bad_vsyscall(KERN_WARNING
, regs
,
158 "vsyscall with bad stack (exploit attempt?)");
165 * Check for access_ok violations and find the syscall nr.
167 * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
168 * 64-bit, so we don't need to special-case it here. For all the
169 * vsyscalls, NULL means "don't write anything" not "write it at
172 switch (vsyscall_nr
) {
174 if (!write_ok_or_segv(regs
->di
, sizeof(struct timeval
)) ||
175 !write_ok_or_segv(regs
->si
, sizeof(struct timezone
))) {
180 syscall_nr
= __NR_gettimeofday
;
184 if (!write_ok_or_segv(regs
->di
, sizeof(time_t))) {
189 syscall_nr
= __NR_time
;
193 if (!write_ok_or_segv(regs
->di
, sizeof(unsigned)) ||
194 !write_ok_or_segv(regs
->si
, sizeof(unsigned))) {
199 syscall_nr
= __NR_getcpu
;
204 * Handle seccomp. regs->ip must be the original value.
205 * See seccomp_send_sigsys and Documentation/prctl/seccomp_filter.txt.
207 * We could optimize the seccomp disabled case, but performance
208 * here doesn't matter.
210 regs
->orig_ax
= syscall_nr
;
212 tmp
= secure_computing(NULL
);
213 if ((!tmp
&& regs
->orig_ax
!= syscall_nr
) || regs
->ip
!= address
) {
214 warn_bad_vsyscall(KERN_DEBUG
, regs
,
215 "seccomp tried to change syscall nr or ip");
220 goto do_ret
; /* skip requested */
223 * With a real vsyscall, page faults cause SIGSEGV. We want to
224 * preserve that behavior to make writing exploits harder.
226 prev_sig_on_uaccess_err
= current
->thread
.sig_on_uaccess_err
;
227 current
->thread
.sig_on_uaccess_err
= 1;
230 switch (vsyscall_nr
) {
232 ret
= sys_gettimeofday(
233 (struct timeval __user
*)regs
->di
,
234 (struct timezone __user
*)regs
->si
);
238 ret
= sys_time((time_t __user
*)regs
->di
);
242 ret
= sys_getcpu((unsigned __user
*)regs
->di
,
243 (unsigned __user
*)regs
->si
,
248 current
->thread
.sig_on_uaccess_err
= prev_sig_on_uaccess_err
;
251 if (ret
== -EFAULT
) {
252 /* Bad news -- userspace fed a bad pointer to a vsyscall. */
253 warn_bad_vsyscall(KERN_INFO
, regs
,
254 "vsyscall fault (exploit attempt?)");
257 * If we failed to generate a signal for any reason,
258 * generate one here. (This should be impossible.)
260 if (WARN_ON_ONCE(!sigismember(&tsk
->pending
.signal
, SIGBUS
) &&
261 !sigismember(&tsk
->pending
.signal
, SIGSEGV
)))
264 return true; /* Don't emulate the ret. */
270 /* Emulate a ret instruction. */
276 force_sig(SIGSEGV
, current
);
281 * A pseudo VMA to allow ptrace access for the vsyscall page. This only
282 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
283 * not need special handling anymore:
285 static const char *gate_vma_name(struct vm_area_struct
*vma
)
289 static const struct vm_operations_struct gate_vma_ops
= {
290 .name
= gate_vma_name
,
292 static struct vm_area_struct gate_vma
= {
293 .vm_start
= VSYSCALL_ADDR
,
294 .vm_end
= VSYSCALL_ADDR
+ PAGE_SIZE
,
295 .vm_page_prot
= PAGE_READONLY_EXEC
,
296 .vm_flags
= VM_READ
| VM_EXEC
,
297 .vm_ops
= &gate_vma_ops
,
300 struct vm_area_struct
*get_gate_vma(struct mm_struct
*mm
)
303 if (!mm
|| mm
->context
.ia32_compat
)
306 if (vsyscall_mode
== NONE
)
311 int in_gate_area(struct mm_struct
*mm
, unsigned long addr
)
313 struct vm_area_struct
*vma
= get_gate_vma(mm
);
318 return (addr
>= vma
->vm_start
) && (addr
< vma
->vm_end
);
322 * Use this when you have no reliable mm, typically from interrupt
323 * context. It is less reliable than using a task's mm and may give
326 int in_gate_area_no_mm(unsigned long addr
)
328 return vsyscall_mode
!= NONE
&& (addr
& PAGE_MASK
) == VSYSCALL_ADDR
;
331 void __init
map_vsyscall(void)
333 extern char __vsyscall_page
;
334 unsigned long physaddr_vsyscall
= __pa_symbol(&__vsyscall_page
);
336 if (vsyscall_mode
!= NONE
)
337 __set_fixmap(VSYSCALL_PAGE
, physaddr_vsyscall
,
338 vsyscall_mode
== NATIVE
339 ? PAGE_KERNEL_VSYSCALL
342 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE
) !=
343 (unsigned long)VSYSCALL_ADDR
);