Linux 6.13-rc4
[linux.git] / arch / sh / kernel / vsyscall / vsyscall.c
blobadd35c51e017811241a944fa8ca131f357d85d73
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * arch/sh/kernel/vsyscall/vsyscall.c
5 * Copyright (C) 2006 Paul Mundt
7 * vDSO randomization
8 * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
9 */
10 #include <linux/mm.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/gfp.h>
14 #include <linux/module.h>
15 #include <linux/elf.h>
16 #include <linux/sched.h>
17 #include <linux/err.h>
20 * Should the kernel map a VDSO page into processes and pass its
21 * address down to glibc upon exec()?
23 unsigned int __read_mostly vdso_enabled = 1;
24 EXPORT_SYMBOL_GPL(vdso_enabled);
26 static int __init vdso_setup(char *s)
28 vdso_enabled = simple_strtoul(s, NULL, 0);
29 return 1;
31 __setup("vdso=", vdso_setup);
34 * These symbols are defined by vsyscall.o to mark the bounds
35 * of the ELF DSO images included therein.
37 extern const char vsyscall_trapa_start, vsyscall_trapa_end;
38 static struct page *syscall_pages[1];
39 static struct vm_special_mapping vdso_mapping = {
40 .name = "[vdso]",
41 .pages = syscall_pages,
44 int __init vsyscall_init(void)
46 void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
47 syscall_pages[0] = virt_to_page(syscall_page);
50 * XXX: Map this page to a fixmap entry if we get around
51 * to adding the page to ELF core dumps
54 memcpy(syscall_page,
55 &vsyscall_trapa_start,
56 &vsyscall_trapa_end - &vsyscall_trapa_start);
58 return 0;
61 /* Setup a VMA at program startup for the vsyscall page */
62 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
64 struct mm_struct *mm = current->mm;
65 struct vm_area_struct *vma;
66 unsigned long addr;
67 int ret;
69 if (mmap_write_lock_killable(mm))
70 return -EINTR;
72 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
73 if (IS_ERR_VALUE(addr)) {
74 ret = addr;
75 goto up_fail;
78 vdso_mapping.pages = syscall_pages;
79 vma = _install_special_mapping(mm, addr, PAGE_SIZE,
80 VM_READ | VM_EXEC |
81 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
82 &vdso_mapping);
83 ret = PTR_ERR(vma);
84 if (IS_ERR(vma))
85 goto up_fail;
87 current->mm->context.vdso = (void *)addr;
88 ret = 0;
90 up_fail:
91 mmap_write_unlock(mm);
92 return ret;
95 const char *arch_vma_name(struct vm_area_struct *vma)
97 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
98 return "[vdso]";
100 return NULL;