1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2022 Helge Deller <deller@gmx.de>
5 * based on arch/s390/kernel/vdso.c which is
6 * Copyright IBM Corp. 2008
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
10 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/elf.h>
14 #include <linux/timekeeper_internal.h>
15 #include <linux/compat.h>
16 #include <linux/nsproxy.h>
17 #include <linux/time_namespace.h>
18 #include <linux/random.h>
20 #include <asm/pgtable.h>
22 #include <asm/sections.h>
24 #include <asm/cacheflush.h>
26 extern char vdso32_start
, vdso32_end
;
27 extern char vdso64_start
, vdso64_end
;
29 static int vdso_mremap(const struct vm_special_mapping
*sm
,
30 struct vm_area_struct
*vma
)
32 current
->mm
->context
.vdso_base
= vma
->vm_start
;
37 static struct vm_special_mapping vdso64_mapping
= {
39 .mremap
= vdso_mremap
,
43 static struct vm_special_mapping vdso32_mapping
= {
45 .mremap
= vdso_mremap
,
49 * This is called from binfmt_elf, we create the special vma for the
50 * vDSO and insert it into the mm struct tree
52 int arch_setup_additional_pages(struct linux_binprm
*bprm
,
56 unsigned long vdso_text_start
, vdso_text_len
, map_base
;
57 struct vm_special_mapping
*vdso_mapping
;
58 struct mm_struct
*mm
= current
->mm
;
59 struct vm_area_struct
*vma
;
62 if (mmap_write_lock_killable(mm
))
66 if (!is_compat_task()) {
67 vdso_text_len
= &vdso64_end
- &vdso64_start
;
68 vdso_mapping
= &vdso64_mapping
;
72 vdso_text_len
= &vdso32_end
- &vdso32_start
;
73 vdso_mapping
= &vdso32_mapping
;
76 map_base
= mm
->mmap_base
;
77 if (current
->flags
& PF_RANDOMIZE
)
78 map_base
-= get_random_u32_below(0x20) * PAGE_SIZE
;
80 vdso_text_start
= get_unmapped_area(NULL
, map_base
, vdso_text_len
, 0, 0);
82 /* VM_MAYWRITE for COW so gdb can set breakpoints */
83 vma
= _install_special_mapping(mm
, vdso_text_start
, vdso_text_len
,
85 VM_MAYREAD
|VM_MAYWRITE
|VM_MAYEXEC
,
88 do_munmap(mm
, vdso_text_start
, PAGE_SIZE
, NULL
);
91 current
->mm
->context
.vdso_base
= vdso_text_start
;
95 mmap_write_unlock(mm
);
99 static struct page
** __init
vdso_setup_pages(void *start
, void *end
)
101 int pages
= (end
- start
) >> PAGE_SHIFT
;
102 struct page
**pagelist
;
105 pagelist
= kcalloc(pages
+ 1, sizeof(struct page
*), GFP_KERNEL
);
107 panic("%s: Cannot allocate page list for VDSO", __func__
);
108 for (i
= 0; i
< pages
; i
++)
109 pagelist
[i
] = virt_to_page(start
+ i
* PAGE_SIZE
);
113 static int __init
vdso_init(void)
116 vdso64_mapping
.pages
= vdso_setup_pages(&vdso64_start
, &vdso64_end
);
118 if (IS_ENABLED(CONFIG_COMPAT
) || !IS_ENABLED(CONFIG_64BIT
))
119 vdso32_mapping
.pages
= vdso_setup_pages(&vdso32_start
, &vdso32_end
);
122 arch_initcall(vdso_init
);