1 // SPDX-License-Identifier: GPL-2.0-only
3 * Set up the VMAs to tell the VM about the vDSO.
4 * Copyright 2007 Andi Kleen, SUSE Labs.
8 * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
12 #include <linux/err.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/linkage.h>
17 #include <linux/random.h>
18 #include <linux/elf.h>
19 #include <asm/cacheflush.h>
20 #include <asm/spitfire.h>
25 unsigned int __read_mostly vdso_enabled
= 1;
27 static struct vm_special_mapping vvar_mapping
= {
32 static struct vm_special_mapping vdso_mapping64
= {
38 static struct vm_special_mapping vdso_mapping32
= {
43 struct vvar_data
*vvar_data
;
45 struct vdso_elfinfo32
{
48 unsigned long dynsymsize
;
53 struct vdso_elfinfo64
{
56 unsigned long dynsymsize
;
63 struct vdso_elfinfo32 elf32
;
64 struct vdso_elfinfo64 elf64
;
68 static void *one_section64(struct vdso_elfinfo64
*e
, const char *name
,
75 shdrs
= (void *)e
->hdr
+ e
->hdr
->e_shoff
;
76 snames
= (void *)e
->hdr
+ shdrs
[e
->hdr
->e_shstrndx
].sh_offset
;
77 for (i
= 1; i
< e
->hdr
->e_shnum
; i
++) {
78 if (!strcmp(snames
+shdrs
[i
].sh_name
, name
)) {
80 *size
= shdrs
[i
].sh_size
;
81 return (void *)e
->hdr
+ shdrs
[i
].sh_offset
;
87 static int find_sections64(const struct vdso_image
*image
, struct vdso_elfinfo
*_e
)
89 struct vdso_elfinfo64
*e
= &_e
->u
.elf64
;
92 e
->dynsym
= one_section64(e
, ".dynsym", &e
->dynsymsize
);
93 e
->dynstr
= one_section64(e
, ".dynstr", NULL
);
95 if (!e
->dynsym
|| !e
->dynstr
) {
96 pr_err("VDSO64: Missing symbol sections.\n");
102 static Elf64_Sym
*find_sym64(const struct vdso_elfinfo64
*e
, const char *name
)
106 for (i
= 0; i
< (e
->dynsymsize
/ sizeof(Elf64_Sym
)); i
++) {
107 Elf64_Sym
*s
= &e
->dynsym
[i
];
110 if (!strcmp(e
->dynstr
+ s
->st_name
, name
))
116 static int patchsym64(struct vdso_elfinfo
*_e
, const char *orig
,
119 struct vdso_elfinfo64
*e
= &_e
->u
.elf64
;
120 Elf64_Sym
*osym
= find_sym64(e
, orig
);
121 Elf64_Sym
*nsym
= find_sym64(e
, new);
123 if (!nsym
|| !osym
) {
124 pr_err("VDSO64: Missing symbols.\n");
127 osym
->st_value
= nsym
->st_value
;
128 osym
->st_size
= nsym
->st_size
;
129 osym
->st_info
= nsym
->st_info
;
130 osym
->st_other
= nsym
->st_other
;
131 osym
->st_shndx
= nsym
->st_shndx
;
136 static void *one_section32(struct vdso_elfinfo32
*e
, const char *name
,
143 shdrs
= (void *)e
->hdr
+ e
->hdr
->e_shoff
;
144 snames
= (void *)e
->hdr
+ shdrs
[e
->hdr
->e_shstrndx
].sh_offset
;
145 for (i
= 1; i
< e
->hdr
->e_shnum
; i
++) {
146 if (!strcmp(snames
+shdrs
[i
].sh_name
, name
)) {
148 *size
= shdrs
[i
].sh_size
;
149 return (void *)e
->hdr
+ shdrs
[i
].sh_offset
;
155 static int find_sections32(const struct vdso_image
*image
, struct vdso_elfinfo
*_e
)
157 struct vdso_elfinfo32
*e
= &_e
->u
.elf32
;
159 e
->hdr
= image
->data
;
160 e
->dynsym
= one_section32(e
, ".dynsym", &e
->dynsymsize
);
161 e
->dynstr
= one_section32(e
, ".dynstr", NULL
);
163 if (!e
->dynsym
|| !e
->dynstr
) {
164 pr_err("VDSO32: Missing symbol sections.\n");
170 static Elf32_Sym
*find_sym32(const struct vdso_elfinfo32
*e
, const char *name
)
174 for (i
= 0; i
< (e
->dynsymsize
/ sizeof(Elf32_Sym
)); i
++) {
175 Elf32_Sym
*s
= &e
->dynsym
[i
];
178 if (!strcmp(e
->dynstr
+ s
->st_name
, name
))
184 static int patchsym32(struct vdso_elfinfo
*_e
, const char *orig
,
187 struct vdso_elfinfo32
*e
= &_e
->u
.elf32
;
188 Elf32_Sym
*osym
= find_sym32(e
, orig
);
189 Elf32_Sym
*nsym
= find_sym32(e
, new);
191 if (!nsym
|| !osym
) {
192 pr_err("VDSO32: Missing symbols.\n");
195 osym
->st_value
= nsym
->st_value
;
196 osym
->st_size
= nsym
->st_size
;
197 osym
->st_info
= nsym
->st_info
;
198 osym
->st_other
= nsym
->st_other
;
199 osym
->st_shndx
= nsym
->st_shndx
;
204 static int find_sections(const struct vdso_image
*image
, struct vdso_elfinfo
*e
,
208 return find_sections64(image
, e
);
210 return find_sections32(image
, e
);
213 static int patch_one_symbol(struct vdso_elfinfo
*e
, const char *orig
,
214 const char *new_target
, bool elf64
)
217 return patchsym64(e
, orig
, new_target
);
219 return patchsym32(e
, orig
, new_target
);
222 static int stick_patch(const struct vdso_image
*image
, struct vdso_elfinfo
*e
, bool elf64
)
226 err
= find_sections(image
, e
, elf64
);
230 err
= patch_one_symbol(e
,
231 "__vdso_gettimeofday",
232 "__vdso_gettimeofday_stick", elf64
);
236 return patch_one_symbol(e
,
237 "__vdso_clock_gettime",
238 "__vdso_clock_gettime_stick", elf64
);
243 * Allocate pages for the vdso and vvar, and copy in the vdso text from the
246 static int __init
init_vdso_image(const struct vdso_image
*image
,
247 struct vm_special_mapping
*vdso_mapping
,
250 int cnpages
= (image
->size
) / PAGE_SIZE
;
251 struct page
*dp
, **dpp
= NULL
;
252 struct page
*cp
, **cpp
= NULL
;
253 struct vdso_elfinfo ei
;
256 if (tlb_type
!= spitfire
) {
257 int err
= stick_patch(image
, &ei
, elf64
);
263 * First, the vdso text. This is initialied data, an integral number of
266 if (WARN_ON(image
->size
% PAGE_SIZE
!= 0))
269 cpp
= kcalloc(cnpages
, sizeof(struct page
*), GFP_KERNEL
);
270 vdso_mapping
->pages
= cpp
;
275 for (i
= 0; i
< cnpages
; i
++) {
276 cp
= alloc_page(GFP_KERNEL
);
280 copy_page(page_address(cp
), image
->data
+ i
* PAGE_SIZE
);
284 * Now the vvar page. This is uninitialized data.
287 if (vvar_data
== NULL
) {
288 dnpages
= (sizeof(struct vvar_data
) / PAGE_SIZE
) + 1;
289 if (WARN_ON(dnpages
!= 1))
291 dpp
= kcalloc(dnpages
, sizeof(struct page
*), GFP_KERNEL
);
292 vvar_mapping
.pages
= dpp
;
297 dp
= alloc_page(GFP_KERNEL
);
302 vvar_data
= page_address(dp
);
303 memset(vvar_data
, 0, PAGE_SIZE
);
311 for (i
= 0; i
< cnpages
; i
++) {
316 vdso_mapping
->pages
= NULL
;
320 for (i
= 0; i
< dnpages
; i
++) {
325 vvar_mapping
.pages
= NULL
;
328 pr_warn("Cannot allocate vdso\n");
333 static int __init
init_vdso(void)
336 #ifdef CONFIG_SPARC64
337 err
= init_vdso_image(&vdso_image_64_builtin
, &vdso_mapping64
, true);
343 err
= init_vdso_image(&vdso_image_32_builtin
, &vdso_mapping32
, false);
348 subsys_initcall(init_vdso
);
352 /* Shuffle the vdso up a bit, randomly. */
353 static unsigned long vdso_addr(unsigned long start
, unsigned int len
)
357 /* This loses some more bits than a modulo, but is cheaper */
358 offset
= get_random_u32_below(PTRS_PER_PTE
);
359 return start
+ (offset
<< PAGE_SHIFT
);
362 static int map_vdso(const struct vdso_image
*image
,
363 struct vm_special_mapping
*vdso_mapping
)
365 struct mm_struct
*mm
= current
->mm
;
366 struct vm_area_struct
*vma
;
367 unsigned long text_start
, addr
= 0;
373 * First, get an unmapped region: then randomize it, and make sure that
376 if (current
->flags
& PF_RANDOMIZE
) {
377 addr
= get_unmapped_area(NULL
, 0,
378 image
->size
- image
->sym_vvar_start
,
380 if (IS_ERR_VALUE(addr
)) {
384 addr
= vdso_addr(addr
, image
->size
- image
->sym_vvar_start
);
386 addr
= get_unmapped_area(NULL
, addr
,
387 image
->size
- image
->sym_vvar_start
, 0, 0);
388 if (IS_ERR_VALUE(addr
)) {
393 text_start
= addr
- image
->sym_vvar_start
;
394 current
->mm
->context
.vdso
= (void __user
*)text_start
;
397 * MAYWRITE to allow gdb to COW and set breakpoints
399 vma
= _install_special_mapping(mm
,
403 VM_MAYREAD
|VM_MAYWRITE
|VM_MAYEXEC
,
411 vma
= _install_special_mapping(mm
,
413 -image
->sym_vvar_start
,
419 do_munmap(mm
, text_start
, image
->size
, NULL
);
424 current
->mm
->context
.vdso
= NULL
;
426 mmap_write_unlock(mm
);
430 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
436 #if defined CONFIG_COMPAT
437 if (!(is_32bit_task()))
438 return map_vdso(&vdso_image_64_builtin
, &vdso_mapping64
);
440 return map_vdso(&vdso_image_32_builtin
, &vdso_mapping32
);
442 return map_vdso(&vdso_image_64_builtin
, &vdso_mapping64
);
447 static __init
int vdso_setup(char *s
)
452 err
= kstrtoul(s
, 10, &val
);
457 __setup("vdso=", vdso_setup
);