1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2007 Andi Kleen, SUSE Labs.
5 * This contains most of the x86 vDSO kernel-side code.
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/random.h>
14 #include <linux/elf.h>
15 #include <linux/cpu.h>
16 #include <linux/ptrace.h>
17 #include <linux/time_namespace.h>
19 #include <asm/pvclock.h>
20 #include <asm/vgtod.h>
21 #include <asm/proto.h>
27 #include <asm/cpufeature.h>
28 #include <clocksource/hyperv_timer.h>
30 #undef _ASM_X86_VVAR_H
31 #define EMIT_VVAR(name, offset) \
32 const size_t name ## _offset = offset;
35 struct vdso_data
*arch_get_vdso_data(void *vvar_page
)
37 return (struct vdso_data
*)(vvar_page
+ _vdso_data_offset
);
41 #if defined(CONFIG_X86_64)
42 unsigned int __read_mostly vdso64_enabled
= 1;
45 void __init
init_vdso_image(const struct vdso_image
*image
)
47 BUG_ON(image
->size
% PAGE_SIZE
!= 0);
49 apply_alternatives((struct alt_instr
*)(image
->data
+ image
->alt
),
50 (struct alt_instr
*)(image
->data
+ image
->alt
+
54 static const struct vm_special_mapping vvar_mapping
;
57 static vm_fault_t
vdso_fault(const struct vm_special_mapping
*sm
,
58 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
60 const struct vdso_image
*image
= vma
->vm_mm
->context
.vdso_image
;
62 if (!image
|| (vmf
->pgoff
<< PAGE_SHIFT
) >= image
->size
)
63 return VM_FAULT_SIGBUS
;
65 vmf
->page
= virt_to_page(image
->data
+ (vmf
->pgoff
<< PAGE_SHIFT
));
70 static void vdso_fix_landing(const struct vdso_image
*image
,
71 struct vm_area_struct
*new_vma
)
73 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
74 if (in_ia32_syscall() && image
== &vdso_image_32
) {
75 struct pt_regs
*regs
= current_pt_regs();
76 unsigned long vdso_land
= image
->sym_int80_landing_pad
;
77 unsigned long old_land_addr
= vdso_land
+
78 (unsigned long)current
->mm
->context
.vdso
;
80 /* Fixing userspace landing - look at do_fast_syscall_32 */
81 if (regs
->ip
== old_land_addr
)
82 regs
->ip
= new_vma
->vm_start
+ vdso_land
;
87 static int vdso_mremap(const struct vm_special_mapping
*sm
,
88 struct vm_area_struct
*new_vma
)
90 unsigned long new_size
= new_vma
->vm_end
- new_vma
->vm_start
;
91 const struct vdso_image
*image
= current
->mm
->context
.vdso_image
;
93 if (image
->size
!= new_size
)
96 vdso_fix_landing(image
, new_vma
);
97 current
->mm
->context
.vdso
= (void __user
*)new_vma
->vm_start
;
102 static int vvar_mremap(const struct vm_special_mapping
*sm
,
103 struct vm_area_struct
*new_vma
)
105 const struct vdso_image
*image
= new_vma
->vm_mm
->context
.vdso_image
;
106 unsigned long new_size
= new_vma
->vm_end
- new_vma
->vm_start
;
108 if (new_size
!= -image
->sym_vvar_start
)
114 #ifdef CONFIG_TIME_NS
115 static struct page
*find_timens_vvar_page(struct vm_area_struct
*vma
)
117 if (likely(vma
->vm_mm
== current
->mm
))
118 return current
->nsproxy
->time_ns
->vvar_page
;
121 * VM_PFNMAP | VM_IO protect .fault() handler from being called
122 * through interfaces like /proc/$pid/mem or
123 * process_vm_{readv,writev}() as long as there's no .access()
124 * in special_mapping_vmops().
125 * For more details check_vma_flags() and __access_remote_vm()
128 WARN(1, "vvar_page accessed remotely");
134 * The vvar page layout depends on whether a task belongs to the root or
135 * non-root time namespace. Whenever a task changes its namespace, the VVAR
136 * page tables are cleared and then they will re-faulted with a
137 * corresponding layout.
138 * See also the comment near timens_setup_vdso_data() for details.
140 int vdso_join_timens(struct task_struct
*task
, struct time_namespace
*ns
)
142 struct mm_struct
*mm
= task
->mm
;
143 struct vm_area_struct
*vma
;
145 if (down_write_killable(&mm
->mmap_sem
))
148 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
149 unsigned long size
= vma
->vm_end
- vma
->vm_start
;
151 if (vma_is_special_mapping(vma
, &vvar_mapping
))
152 zap_page_range(vma
, vma
->vm_start
, size
);
155 up_write(&mm
->mmap_sem
);
159 static inline struct page
*find_timens_vvar_page(struct vm_area_struct
*vma
)
165 static vm_fault_t
vvar_fault(const struct vm_special_mapping
*sm
,
166 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
168 const struct vdso_image
*image
= vma
->vm_mm
->context
.vdso_image
;
173 return VM_FAULT_SIGBUS
;
175 sym_offset
= (long)(vmf
->pgoff
<< PAGE_SHIFT
) +
176 image
->sym_vvar_start
;
179 * Sanity check: a symbol offset of zero means that the page
180 * does not exist for this vdso image, not that the page is at
181 * offset zero relative to the text mapping. This should be
182 * impossible here, because sym_offset should only be zero for
183 * the page past the end of the vvar mapping.
186 return VM_FAULT_SIGBUS
;
188 if (sym_offset
== image
->sym_vvar_page
) {
189 struct page
*timens_page
= find_timens_vvar_page(vma
);
191 pfn
= __pa_symbol(&__vvar_page
) >> PAGE_SHIFT
;
194 * If a task belongs to a time namespace then a namespace
195 * specific VVAR is mapped with the sym_vvar_page offset and
196 * the real VVAR page is mapped with the sym_timens_page
198 * See also the comment near timens_setup_vdso_data().
205 * Optimization: inside time namespace pre-fault
206 * VVAR page too. As on timens page there are only
207 * offsets for clocks on VVAR, it'll be faulted
208 * shortly by VDSO code.
210 addr
= vmf
->address
+ (image
->sym_timens_page
- sym_offset
);
211 err
= vmf_insert_pfn(vma
, addr
, pfn
);
212 if (unlikely(err
& VM_FAULT_ERROR
))
215 pfn
= page_to_pfn(timens_page
);
218 return vmf_insert_pfn(vma
, vmf
->address
, pfn
);
219 } else if (sym_offset
== image
->sym_pvclock_page
) {
220 struct pvclock_vsyscall_time_info
*pvti
=
221 pvclock_get_pvti_cpu0_va();
222 if (pvti
&& vclock_was_used(VCLOCK_PVCLOCK
)) {
223 return vmf_insert_pfn_prot(vma
, vmf
->address
,
224 __pa(pvti
) >> PAGE_SHIFT
,
225 pgprot_decrypted(vma
->vm_page_prot
));
227 } else if (sym_offset
== image
->sym_hvclock_page
) {
228 struct ms_hyperv_tsc_page
*tsc_pg
= hv_get_tsc_page();
230 if (tsc_pg
&& vclock_was_used(VCLOCK_HVCLOCK
))
231 return vmf_insert_pfn(vma
, vmf
->address
,
232 virt_to_phys(tsc_pg
) >> PAGE_SHIFT
);
233 } else if (sym_offset
== image
->sym_timens_page
) {
234 struct page
*timens_page
= find_timens_vvar_page(vma
);
237 return VM_FAULT_SIGBUS
;
239 pfn
= __pa_symbol(&__vvar_page
) >> PAGE_SHIFT
;
240 return vmf_insert_pfn(vma
, vmf
->address
, pfn
);
243 return VM_FAULT_SIGBUS
;
246 static const struct vm_special_mapping vdso_mapping
= {
249 .mremap
= vdso_mremap
,
251 static const struct vm_special_mapping vvar_mapping
= {
254 .mremap
= vvar_mremap
,
258 * Add vdso and vvar mappings to current process.
259 * @image - blob to map
260 * @addr - request a specific address (zero to map at free addr)
262 static int map_vdso(const struct vdso_image
*image
, unsigned long addr
)
264 struct mm_struct
*mm
= current
->mm
;
265 struct vm_area_struct
*vma
;
266 unsigned long text_start
;
269 if (down_write_killable(&mm
->mmap_sem
))
272 addr
= get_unmapped_area(NULL
, addr
,
273 image
->size
- image
->sym_vvar_start
, 0, 0);
274 if (IS_ERR_VALUE(addr
)) {
279 text_start
= addr
- image
->sym_vvar_start
;
282 * MAYWRITE to allow gdb to COW and set breakpoints
284 vma
= _install_special_mapping(mm
,
288 VM_MAYREAD
|VM_MAYWRITE
|VM_MAYEXEC
,
296 vma
= _install_special_mapping(mm
,
298 -image
->sym_vvar_start
,
299 VM_READ
|VM_MAYREAD
|VM_IO
|VM_DONTDUMP
|
305 do_munmap(mm
, text_start
, image
->size
, NULL
);
307 current
->mm
->context
.vdso
= (void __user
*)text_start
;
308 current
->mm
->context
.vdso_image
= image
;
312 up_write(&mm
->mmap_sem
);
318 * Put the vdso above the (randomized) stack with another randomized
319 * offset. This way there is no hole in the middle of address space.
320 * To save memory make sure it is still in the same PTE as the stack
321 * top. This doesn't give that many random bits.
323 * Note that this algorithm is imperfect: the distribution of the vdso
324 * start address within a PMD is biased toward the end.
326 * Only used for the 64-bit and x32 vdsos.
328 static unsigned long vdso_addr(unsigned long start
, unsigned len
)
330 unsigned long addr
, end
;
334 * Round up the start address. It can start out unaligned as a result
335 * of stack start randomization.
337 start
= PAGE_ALIGN(start
);
339 /* Round the lowest possible end address up to a PMD boundary. */
340 end
= (start
+ len
+ PMD_SIZE
- 1) & PMD_MASK
;
341 if (end
>= TASK_SIZE_MAX
)
346 offset
= get_random_int() % (((end
- start
) >> PAGE_SHIFT
) + 1);
347 addr
= start
+ (offset
<< PAGE_SHIFT
);
353 * Forcibly align the final address in case we have a hardware
354 * issue that requires alignment for performance reasons.
356 addr
= align_vdso_addr(addr
);
361 static int map_vdso_randomized(const struct vdso_image
*image
)
363 unsigned long addr
= vdso_addr(current
->mm
->start_stack
, image
->size
-image
->sym_vvar_start
);
365 return map_vdso(image
, addr
);
369 int map_vdso_once(const struct vdso_image
*image
, unsigned long addr
)
371 struct mm_struct
*mm
= current
->mm
;
372 struct vm_area_struct
*vma
;
374 down_write(&mm
->mmap_sem
);
376 * Check if we have already mapped vdso blob - fail to prevent
377 * abusing from userspace install_speciall_mapping, which may
378 * not do accounting and rlimit right.
379 * We could search vma near context.vdso, but it's a slowpath,
380 * so let's explicitly check all VMAs to be completely sure.
382 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
383 if (vma_is_special_mapping(vma
, &vdso_mapping
) ||
384 vma_is_special_mapping(vma
, &vvar_mapping
)) {
385 up_write(&mm
->mmap_sem
);
389 up_write(&mm
->mmap_sem
);
391 return map_vdso(image
, addr
);
394 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
395 static int load_vdso32(void)
397 if (vdso32_enabled
!= 1) /* Other values all mean "disabled" */
400 return map_vdso(&vdso_image_32
, 0);
405 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
410 return map_vdso_randomized(&vdso_image_64
);
414 int compat_arch_setup_additional_pages(struct linux_binprm
*bprm
,
417 #ifdef CONFIG_X86_X32_ABI
418 if (test_thread_flag(TIF_X32
)) {
421 return map_vdso_randomized(&vdso_image_x32
);
424 #ifdef CONFIG_IA32_EMULATION
425 return load_vdso32();
432 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
434 return load_vdso32();
439 static __init
int vdso_setup(char *s
)
441 vdso64_enabled
= simple_strtoul(s
, NULL
, 0);
444 __setup("vdso=", vdso_setup
);
446 static int __init
init_vdso(void)
448 init_vdso_image(&vdso_image_64
);
450 #ifdef CONFIG_X86_X32_ABI
451 init_vdso_image(&vdso_image_x32
);
456 subsys_initcall(init_vdso
);
457 #endif /* CONFIG_X86_64 */