2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
8 * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
12 #include <linux/err.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/linkage.h>
17 #include <linux/random.h>
18 #include <linux/elf.h>
23 unsigned int __read_mostly vdso_enabled
= 1;
25 static struct vm_special_mapping vvar_mapping
= {
30 static struct vm_special_mapping vdso_mapping64
= {
36 static struct vm_special_mapping vdso_mapping32
= {
41 struct vvar_data
*vvar_data
;
43 #define SAVE_INSTR_SIZE 4
46 * Allocate pages for the vdso and vvar, and copy in the vdso text from the
49 int __init
init_vdso_image(const struct vdso_image
*image
,
50 struct vm_special_mapping
*vdso_mapping
)
53 struct page
*dp
, **dpp
= NULL
;
55 struct page
*cp
, **cpp
= NULL
;
56 int cnpages
= (image
->size
) / PAGE_SIZE
;
59 * First, the vdso text. This is initialied data, an integral number of
62 if (WARN_ON(image
->size
% PAGE_SIZE
!= 0))
65 cpp
= kcalloc(cnpages
, sizeof(struct page
*), GFP_KERNEL
);
66 vdso_mapping
->pages
= cpp
;
73 * If the system uses %tick instead of %stick, patch the VDSO
74 * with instruction reading %tick instead of %stick.
76 unsigned int j
, k
= SAVE_INSTR_SIZE
;
77 unsigned char *data
= image
->data
;
79 for (j
= image
->sym_vread_tick_patch_start
;
80 j
< image
->sym_vread_tick_patch_end
; j
++) {
82 data
[image
->sym_vread_tick
+ k
] = data
[j
];
87 for (i
= 0; i
< cnpages
; i
++) {
88 cp
= alloc_page(GFP_KERNEL
);
92 copy_page(page_address(cp
), image
->data
+ i
* PAGE_SIZE
);
96 * Now the vvar page. This is uninitialized data.
99 if (vvar_data
== NULL
) {
100 dnpages
= (sizeof(struct vvar_data
) / PAGE_SIZE
) + 1;
101 if (WARN_ON(dnpages
!= 1))
103 dpp
= kcalloc(dnpages
, sizeof(struct page
*), GFP_KERNEL
);
104 vvar_mapping
.pages
= dpp
;
109 dp
= alloc_page(GFP_KERNEL
);
114 vvar_data
= page_address(dp
);
115 memset(vvar_data
, 0, PAGE_SIZE
);
123 for (i
= 0; i
< cnpages
; i
++) {
128 vdso_mapping
->pages
= NULL
;
132 for (i
= 0; i
< dnpages
; i
++) {
137 vvar_mapping
.pages
= NULL
;
140 pr_warn("Cannot allocate vdso\n");
145 static int __init
init_vdso(void)
148 #ifdef CONFIG_SPARC64
149 err
= init_vdso_image(&vdso_image_64_builtin
, &vdso_mapping64
);
155 err
= init_vdso_image(&vdso_image_32_builtin
, &vdso_mapping32
);
160 subsys_initcall(init_vdso
);
164 /* Shuffle the vdso up a bit, randomly. */
165 static unsigned long vdso_addr(unsigned long start
, unsigned int len
)
169 /* This loses some more bits than a modulo, but is cheaper */
170 offset
= get_random_int() & (PTRS_PER_PTE
- 1);
171 return start
+ (offset
<< PAGE_SHIFT
);
174 static int map_vdso(const struct vdso_image
*image
,
175 struct vm_special_mapping
*vdso_mapping
)
177 struct mm_struct
*mm
= current
->mm
;
178 struct vm_area_struct
*vma
;
179 unsigned long text_start
, addr
= 0;
182 down_write(&mm
->mmap_sem
);
185 * First, get an unmapped region: then randomize it, and make sure that
188 if (current
->flags
& PF_RANDOMIZE
) {
189 addr
= get_unmapped_area(NULL
, 0,
190 image
->size
- image
->sym_vvar_start
,
192 if (IS_ERR_VALUE(addr
)) {
196 addr
= vdso_addr(addr
, image
->size
- image
->sym_vvar_start
);
198 addr
= get_unmapped_area(NULL
, addr
,
199 image
->size
- image
->sym_vvar_start
, 0, 0);
200 if (IS_ERR_VALUE(addr
)) {
205 text_start
= addr
- image
->sym_vvar_start
;
206 current
->mm
->context
.vdso
= (void __user
*)text_start
;
209 * MAYWRITE to allow gdb to COW and set breakpoints
211 vma
= _install_special_mapping(mm
,
215 VM_MAYREAD
|VM_MAYWRITE
|VM_MAYEXEC
,
223 vma
= _install_special_mapping(mm
,
225 -image
->sym_vvar_start
,
231 do_munmap(mm
, text_start
, image
->size
, NULL
);
236 current
->mm
->context
.vdso
= NULL
;
238 up_write(&mm
->mmap_sem
);
242 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
248 #if defined CONFIG_COMPAT
249 if (!(is_32bit_task()))
250 return map_vdso(&vdso_image_64_builtin
, &vdso_mapping64
);
252 return map_vdso(&vdso_image_32_builtin
, &vdso_mapping32
);
254 return map_vdso(&vdso_image_64_builtin
, &vdso_mapping64
);
259 static __init
int vdso_setup(char *s
)
264 err
= kstrtoul(s
, 10, &val
);
270 __setup("vdso=", vdso_setup
);