1 // SPDX-License-Identifier: GPL-2.0-only
3 * Set up the VMAs to tell the VM about the vDSO.
4 * Copyright 2007 Andi Kleen, SUSE Labs.
8 * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
12 #include <linux/err.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/linkage.h>
17 #include <linux/random.h>
18 #include <linux/elf.h>
19 #include <asm/cacheflush.h>
20 #include <asm/spitfire.h>
25 unsigned int __read_mostly vdso_enabled
= 1;
27 static struct vm_special_mapping vvar_mapping
= {
32 static struct vm_special_mapping vdso_mapping64
= {
38 static struct vm_special_mapping vdso_mapping32
= {
43 struct vvar_data
*vvar_data
;
45 struct vdso_elfinfo32
{
48 unsigned long dynsymsize
;
53 struct vdso_elfinfo64
{
56 unsigned long dynsymsize
;
63 struct vdso_elfinfo32 elf32
;
64 struct vdso_elfinfo64 elf64
;
68 static void *one_section64(struct vdso_elfinfo64
*e
, const char *name
,
75 shdrs
= (void *)e
->hdr
+ e
->hdr
->e_shoff
;
76 snames
= (void *)e
->hdr
+ shdrs
[e
->hdr
->e_shstrndx
].sh_offset
;
77 for (i
= 1; i
< e
->hdr
->e_shnum
; i
++) {
78 if (!strcmp(snames
+shdrs
[i
].sh_name
, name
)) {
80 *size
= shdrs
[i
].sh_size
;
81 return (void *)e
->hdr
+ shdrs
[i
].sh_offset
;
87 static int find_sections64(const struct vdso_image
*image
, struct vdso_elfinfo
*_e
)
89 struct vdso_elfinfo64
*e
= &_e
->u
.elf64
;
92 e
->dynsym
= one_section64(e
, ".dynsym", &e
->dynsymsize
);
93 e
->dynstr
= one_section64(e
, ".dynstr", NULL
);
95 if (!e
->dynsym
|| !e
->dynstr
) {
96 pr_err("VDSO64: Missing symbol sections.\n");
102 static Elf64_Sym
*find_sym64(const struct vdso_elfinfo64
*e
, const char *name
)
106 for (i
= 0; i
< (e
->dynsymsize
/ sizeof(Elf64_Sym
)); i
++) {
107 Elf64_Sym
*s
= &e
->dynsym
[i
];
110 if (!strcmp(e
->dynstr
+ s
->st_name
, name
))
116 static int patchsym64(struct vdso_elfinfo
*_e
, const char *orig
,
119 struct vdso_elfinfo64
*e
= &_e
->u
.elf64
;
120 Elf64_Sym
*osym
= find_sym64(e
, orig
);
121 Elf64_Sym
*nsym
= find_sym64(e
, new);
123 if (!nsym
|| !osym
) {
124 pr_err("VDSO64: Missing symbols.\n");
127 osym
->st_value
= nsym
->st_value
;
128 osym
->st_size
= nsym
->st_size
;
129 osym
->st_info
= nsym
->st_info
;
130 osym
->st_other
= nsym
->st_other
;
131 osym
->st_shndx
= nsym
->st_shndx
;
136 static void *one_section32(struct vdso_elfinfo32
*e
, const char *name
,
143 shdrs
= (void *)e
->hdr
+ e
->hdr
->e_shoff
;
144 snames
= (void *)e
->hdr
+ shdrs
[e
->hdr
->e_shstrndx
].sh_offset
;
145 for (i
= 1; i
< e
->hdr
->e_shnum
; i
++) {
146 if (!strcmp(snames
+shdrs
[i
].sh_name
, name
)) {
148 *size
= shdrs
[i
].sh_size
;
149 return (void *)e
->hdr
+ shdrs
[i
].sh_offset
;
155 static int find_sections32(const struct vdso_image
*image
, struct vdso_elfinfo
*_e
)
157 struct vdso_elfinfo32
*e
= &_e
->u
.elf32
;
159 e
->hdr
= image
->data
;
160 e
->dynsym
= one_section32(e
, ".dynsym", &e
->dynsymsize
);
161 e
->dynstr
= one_section32(e
, ".dynstr", NULL
);
163 if (!e
->dynsym
|| !e
->dynstr
) {
164 pr_err("VDSO32: Missing symbol sections.\n");
170 static Elf32_Sym
*find_sym32(const struct vdso_elfinfo32
*e
, const char *name
)
174 for (i
= 0; i
< (e
->dynsymsize
/ sizeof(Elf32_Sym
)); i
++) {
175 Elf32_Sym
*s
= &e
->dynsym
[i
];
178 if (!strcmp(e
->dynstr
+ s
->st_name
, name
))
184 static int patchsym32(struct vdso_elfinfo
*_e
, const char *orig
,
187 struct vdso_elfinfo32
*e
= &_e
->u
.elf32
;
188 Elf32_Sym
*osym
= find_sym32(e
, orig
);
189 Elf32_Sym
*nsym
= find_sym32(e
, new);
191 if (!nsym
|| !osym
) {
192 pr_err("VDSO32: Missing symbols.\n");
195 osym
->st_value
= nsym
->st_value
;
196 osym
->st_size
= nsym
->st_size
;
197 osym
->st_info
= nsym
->st_info
;
198 osym
->st_other
= nsym
->st_other
;
199 osym
->st_shndx
= nsym
->st_shndx
;
204 static int find_sections(const struct vdso_image
*image
, struct vdso_elfinfo
*e
,
208 return find_sections64(image
, e
);
210 return find_sections32(image
, e
);
213 static int patch_one_symbol(struct vdso_elfinfo
*e
, const char *orig
,
214 const char *new_target
, bool elf64
)
217 return patchsym64(e
, orig
, new_target
);
219 return patchsym32(e
, orig
, new_target
);
222 static int stick_patch(const struct vdso_image
*image
, struct vdso_elfinfo
*e
, bool elf64
)
226 err
= find_sections(image
, e
, elf64
);
230 err
= patch_one_symbol(e
,
231 "__vdso_gettimeofday",
232 "__vdso_gettimeofday_stick", elf64
);
236 return patch_one_symbol(e
,
237 "__vdso_clock_gettime",
238 "__vdso_clock_gettime_stick", elf64
);
243 * Allocate pages for the vdso and vvar, and copy in the vdso text from the
246 int __init
init_vdso_image(const struct vdso_image
*image
,
247 struct vm_special_mapping
*vdso_mapping
, bool elf64
)
249 int cnpages
= (image
->size
) / PAGE_SIZE
;
250 struct page
*dp
, **dpp
= NULL
;
251 struct page
*cp
, **cpp
= NULL
;
252 struct vdso_elfinfo ei
;
255 if (tlb_type
!= spitfire
) {
256 int err
= stick_patch(image
, &ei
, elf64
);
262 * First, the vdso text. This is initialied data, an integral number of
265 if (WARN_ON(image
->size
% PAGE_SIZE
!= 0))
268 cpp
= kcalloc(cnpages
, sizeof(struct page
*), GFP_KERNEL
);
269 vdso_mapping
->pages
= cpp
;
274 for (i
= 0; i
< cnpages
; i
++) {
275 cp
= alloc_page(GFP_KERNEL
);
279 copy_page(page_address(cp
), image
->data
+ i
* PAGE_SIZE
);
283 * Now the vvar page. This is uninitialized data.
286 if (vvar_data
== NULL
) {
287 dnpages
= (sizeof(struct vvar_data
) / PAGE_SIZE
) + 1;
288 if (WARN_ON(dnpages
!= 1))
290 dpp
= kcalloc(dnpages
, sizeof(struct page
*), GFP_KERNEL
);
291 vvar_mapping
.pages
= dpp
;
296 dp
= alloc_page(GFP_KERNEL
);
301 vvar_data
= page_address(dp
);
302 memset(vvar_data
, 0, PAGE_SIZE
);
310 for (i
= 0; i
< cnpages
; i
++) {
315 vdso_mapping
->pages
= NULL
;
319 for (i
= 0; i
< dnpages
; i
++) {
324 vvar_mapping
.pages
= NULL
;
327 pr_warn("Cannot allocate vdso\n");
332 static int __init
init_vdso(void)
335 #ifdef CONFIG_SPARC64
336 err
= init_vdso_image(&vdso_image_64_builtin
, &vdso_mapping64
, true);
342 err
= init_vdso_image(&vdso_image_32_builtin
, &vdso_mapping32
, false);
347 subsys_initcall(init_vdso
);
351 /* Shuffle the vdso up a bit, randomly. */
352 static unsigned long vdso_addr(unsigned long start
, unsigned int len
)
356 /* This loses some more bits than a modulo, but is cheaper */
357 offset
= get_random_int() & (PTRS_PER_PTE
- 1);
358 return start
+ (offset
<< PAGE_SHIFT
);
361 static int map_vdso(const struct vdso_image
*image
,
362 struct vm_special_mapping
*vdso_mapping
)
364 struct mm_struct
*mm
= current
->mm
;
365 struct vm_area_struct
*vma
;
366 unsigned long text_start
, addr
= 0;
369 down_write(&mm
->mmap_sem
);
372 * First, get an unmapped region: then randomize it, and make sure that
375 if (current
->flags
& PF_RANDOMIZE
) {
376 addr
= get_unmapped_area(NULL
, 0,
377 image
->size
- image
->sym_vvar_start
,
379 if (IS_ERR_VALUE(addr
)) {
383 addr
= vdso_addr(addr
, image
->size
- image
->sym_vvar_start
);
385 addr
= get_unmapped_area(NULL
, addr
,
386 image
->size
- image
->sym_vvar_start
, 0, 0);
387 if (IS_ERR_VALUE(addr
)) {
392 text_start
= addr
- image
->sym_vvar_start
;
393 current
->mm
->context
.vdso
= (void __user
*)text_start
;
396 * MAYWRITE to allow gdb to COW and set breakpoints
398 vma
= _install_special_mapping(mm
,
402 VM_MAYREAD
|VM_MAYWRITE
|VM_MAYEXEC
,
410 vma
= _install_special_mapping(mm
,
412 -image
->sym_vvar_start
,
418 do_munmap(mm
, text_start
, image
->size
, NULL
);
423 current
->mm
->context
.vdso
= NULL
;
425 up_write(&mm
->mmap_sem
);
429 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
435 #if defined CONFIG_COMPAT
436 if (!(is_32bit_task()))
437 return map_vdso(&vdso_image_64_builtin
, &vdso_mapping64
);
439 return map_vdso(&vdso_image_32_builtin
, &vdso_mapping32
);
441 return map_vdso(&vdso_image_64_builtin
, &vdso_mapping64
);
446 static __init
int vdso_setup(char *s
)
451 err
= kstrtoul(s
, 10, &val
);
457 __setup("vdso=", vdso_setup
);