hugetlb: introduce generic version of hugetlb_free_pgd_range
[linux/fpc-iii.git] / arch / x86 / entry / vsyscall / vsyscall_64.c
blob85fd85d52ffdedf58bad09332b2cda831254aabe
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2012-2014 Andy Lutomirski <luto@amacapital.net>
5 * Based on the original implementation which is:
6 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
7 * Copyright 2003 Andi Kleen, SuSE Labs.
9 * Parts of the original code have been moved to arch/x86/vdso/vma.c
11 * This file implements vsyscall emulation. vsyscalls are a legacy ABI:
12 * Userspace can request certain kernel services by calling fixed
13 * addresses. This concept is problematic:
15 * - It interferes with ASLR.
16 * - It's awkward to write code that lives in kernel addresses but is
17 * callable by userspace at fixed addresses.
18 * - The whole concept is impossible for 32-bit compat userspace.
19 * - UML cannot easily virtualize a vsyscall.
21 * As of mid-2014, I believe that there is no new userspace code that
22 * will use a vsyscall if the vDSO is present. I hope that there will
23 * soon be no new userspace code that will ever use a vsyscall.
25 * The code in this file emulates vsyscalls when notified of a page
26 * fault to a vsyscall address.
29 #include <linux/kernel.h>
30 #include <linux/timer.h>
31 #include <linux/sched/signal.h>
32 #include <linux/mm_types.h>
33 #include <linux/syscalls.h>
34 #include <linux/ratelimit.h>
36 #include <asm/vsyscall.h>
37 #include <asm/unistd.h>
38 #include <asm/fixmap.h>
39 #include <asm/traps.h>
40 #include <asm/paravirt.h>
42 #define CREATE_TRACE_POINTS
43 #include "vsyscall_trace.h"
45 static enum { EMULATE, NONE } vsyscall_mode =
46 #ifdef CONFIG_LEGACY_VSYSCALL_NONE
47 NONE;
48 #else
49 EMULATE;
50 #endif
52 static int __init vsyscall_setup(char *str)
54 if (str) {
55 if (!strcmp("emulate", str))
56 vsyscall_mode = EMULATE;
57 else if (!strcmp("none", str))
58 vsyscall_mode = NONE;
59 else
60 return -EINVAL;
62 return 0;
65 return -EINVAL;
67 early_param("vsyscall", vsyscall_setup);
69 static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
70 const char *message)
72 if (!show_unhandled_signals)
73 return;
75 printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
76 level, current->comm, task_pid_nr(current),
77 message, regs->ip, regs->cs,
78 regs->sp, regs->ax, regs->si, regs->di);
81 static int addr_to_vsyscall_nr(unsigned long addr)
83 int nr;
85 if ((addr & ~0xC00UL) != VSYSCALL_ADDR)
86 return -EINVAL;
88 nr = (addr & 0xC00UL) >> 10;
89 if (nr >= 3)
90 return -EINVAL;
92 return nr;
95 static bool write_ok_or_segv(unsigned long ptr, size_t size)
98 * XXX: if access_ok, get_user, and put_user handled
99 * sig_on_uaccess_err, this could go away.
102 if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) {
103 struct thread_struct *thread = &current->thread;
105 thread->error_code = 6; /* user fault, no page, write */
106 thread->cr2 = ptr;
107 thread->trap_nr = X86_TRAP_PF;
109 force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)ptr, current);
110 return false;
111 } else {
112 return true;
116 bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
118 struct task_struct *tsk;
119 unsigned long caller;
120 int vsyscall_nr, syscall_nr, tmp;
121 int prev_sig_on_uaccess_err;
122 long ret;
123 unsigned long orig_dx;
126 * No point in checking CS -- the only way to get here is a user mode
127 * trap to a high address, which means that we're in 64-bit user code.
130 WARN_ON_ONCE(address != regs->ip);
132 if (vsyscall_mode == NONE) {
133 warn_bad_vsyscall(KERN_INFO, regs,
134 "vsyscall attempted with vsyscall=none");
135 return false;
138 vsyscall_nr = addr_to_vsyscall_nr(address);
140 trace_emulate_vsyscall(vsyscall_nr);
142 if (vsyscall_nr < 0) {
143 warn_bad_vsyscall(KERN_WARNING, regs,
144 "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround");
145 goto sigsegv;
148 if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
149 warn_bad_vsyscall(KERN_WARNING, regs,
150 "vsyscall with bad stack (exploit attempt?)");
151 goto sigsegv;
154 tsk = current;
157 * Check for access_ok violations and find the syscall nr.
159 * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
160 * 64-bit, so we don't need to special-case it here. For all the
161 * vsyscalls, NULL means "don't write anything" not "write it at
162 * address 0".
164 switch (vsyscall_nr) {
165 case 0:
166 if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
167 !write_ok_or_segv(regs->si, sizeof(struct timezone))) {
168 ret = -EFAULT;
169 goto check_fault;
172 syscall_nr = __NR_gettimeofday;
173 break;
175 case 1:
176 if (!write_ok_or_segv(regs->di, sizeof(time_t))) {
177 ret = -EFAULT;
178 goto check_fault;
181 syscall_nr = __NR_time;
182 break;
184 case 2:
185 if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
186 !write_ok_or_segv(regs->si, sizeof(unsigned))) {
187 ret = -EFAULT;
188 goto check_fault;
191 syscall_nr = __NR_getcpu;
192 break;
196 * Handle seccomp. regs->ip must be the original value.
197 * See seccomp_send_sigsys and Documentation/userspace-api/seccomp_filter.rst.
199 * We could optimize the seccomp disabled case, but performance
200 * here doesn't matter.
202 regs->orig_ax = syscall_nr;
203 regs->ax = -ENOSYS;
204 tmp = secure_computing(NULL);
205 if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
206 warn_bad_vsyscall(KERN_DEBUG, regs,
207 "seccomp tried to change syscall nr or ip");
208 do_exit(SIGSYS);
210 regs->orig_ax = -1;
211 if (tmp)
212 goto do_ret; /* skip requested */
215 * With a real vsyscall, page faults cause SIGSEGV. We want to
216 * preserve that behavior to make writing exploits harder.
218 prev_sig_on_uaccess_err = current->thread.sig_on_uaccess_err;
219 current->thread.sig_on_uaccess_err = 1;
221 ret = -EFAULT;
222 switch (vsyscall_nr) {
223 case 0:
224 /* this decodes regs->di and regs->si on its own */
225 ret = __x64_sys_gettimeofday(regs);
226 break;
228 case 1:
229 /* this decodes regs->di on its own */
230 ret = __x64_sys_time(regs);
231 break;
233 case 2:
234 /* while we could clobber regs->dx, we didn't in the past... */
235 orig_dx = regs->dx;
236 regs->dx = 0;
237 /* this decodes regs->di, regs->si and regs->dx on its own */
238 ret = __x64_sys_getcpu(regs);
239 regs->dx = orig_dx;
240 break;
243 current->thread.sig_on_uaccess_err = prev_sig_on_uaccess_err;
245 check_fault:
246 if (ret == -EFAULT) {
247 /* Bad news -- userspace fed a bad pointer to a vsyscall. */
248 warn_bad_vsyscall(KERN_INFO, regs,
249 "vsyscall fault (exploit attempt?)");
252 * If we failed to generate a signal for any reason,
253 * generate one here. (This should be impossible.)
255 if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
256 !sigismember(&tsk->pending.signal, SIGSEGV)))
257 goto sigsegv;
259 return true; /* Don't emulate the ret. */
262 regs->ax = ret;
264 do_ret:
265 /* Emulate a ret instruction. */
266 regs->ip = caller;
267 regs->sp += 8;
268 return true;
270 sigsegv:
271 force_sig(SIGSEGV, current);
272 return true;
276 * A pseudo VMA to allow ptrace access for the vsyscall page. This only
277 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
278 * not need special handling anymore:
280 static const char *gate_vma_name(struct vm_area_struct *vma)
282 return "[vsyscall]";
284 static const struct vm_operations_struct gate_vma_ops = {
285 .name = gate_vma_name,
287 static struct vm_area_struct gate_vma = {
288 .vm_start = VSYSCALL_ADDR,
289 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
290 .vm_page_prot = PAGE_READONLY_EXEC,
291 .vm_flags = VM_READ | VM_EXEC,
292 .vm_ops = &gate_vma_ops,
295 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
297 #ifdef CONFIG_COMPAT
298 if (!mm || mm->context.ia32_compat)
299 return NULL;
300 #endif
301 if (vsyscall_mode == NONE)
302 return NULL;
303 return &gate_vma;
306 int in_gate_area(struct mm_struct *mm, unsigned long addr)
308 struct vm_area_struct *vma = get_gate_vma(mm);
310 if (!vma)
311 return 0;
313 return (addr >= vma->vm_start) && (addr < vma->vm_end);
317 * Use this when you have no reliable mm, typically from interrupt
318 * context. It is less reliable than using a task's mm and may give
319 * false positives.
321 int in_gate_area_no_mm(unsigned long addr)
323 return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR;
327 * The VSYSCALL page is the only user-accessible page in the kernel address
328 * range. Normally, the kernel page tables can have _PAGE_USER clear, but
329 * the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls
330 * are enabled.
332 * Some day we may create a "minimal" vsyscall mode in which we emulate
333 * vsyscalls but leave the page not present. If so, we skip calling
334 * this.
336 void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
338 pgd_t *pgd;
339 p4d_t *p4d;
340 pud_t *pud;
341 pmd_t *pmd;
343 pgd = pgd_offset_pgd(root, VSYSCALL_ADDR);
344 set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
345 p4d = p4d_offset(pgd, VSYSCALL_ADDR);
346 #if CONFIG_PGTABLE_LEVELS >= 5
347 set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER));
348 #endif
349 pud = pud_offset(p4d, VSYSCALL_ADDR);
350 set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
351 pmd = pmd_offset(pud, VSYSCALL_ADDR);
352 set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER));
355 void __init map_vsyscall(void)
357 extern char __vsyscall_page;
358 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
360 if (vsyscall_mode != NONE) {
361 __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
362 PAGE_KERNEL_VVAR);
363 set_vsyscall_pgtable_user_bits(swapper_pg_dir);
366 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
367 (unsigned long)VSYSCALL_ADDR);