2 * Copyright 2007 Andi Kleen, SUSE Labs.
3 * Subject to the GPL, v.2
5 * This contains most of the x86 vDSO kernel-side code.
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/random.h>
14 #include <linux/elf.h>
15 #include <linux/cpu.h>
16 #include <linux/ptrace.h>
17 #include <asm/pvclock.h>
18 #include <asm/vgtod.h>
19 #include <asm/proto.h>
24 #include <asm/cpufeature.h>
25 #include <asm/mshyperv.h>
27 #if defined(CONFIG_X86_64)
28 unsigned int __read_mostly vdso64_enabled
= 1;
31 void __init
init_vdso_image(const struct vdso_image
*image
)
33 BUG_ON(image
->size
% PAGE_SIZE
!= 0);
35 apply_alternatives((struct alt_instr
*)(image
->data
+ image
->alt
),
36 (struct alt_instr
*)(image
->data
+ image
->alt
+
42 static int vdso_fault(const struct vm_special_mapping
*sm
,
43 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
45 const struct vdso_image
*image
= vma
->vm_mm
->context
.vdso_image
;
47 if (!image
|| (vmf
->pgoff
<< PAGE_SHIFT
) >= image
->size
)
48 return VM_FAULT_SIGBUS
;
50 vmf
->page
= virt_to_page(image
->data
+ (vmf
->pgoff
<< PAGE_SHIFT
));
55 static void vdso_fix_landing(const struct vdso_image
*image
,
56 struct vm_area_struct
*new_vma
)
58 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
59 if (in_ia32_syscall() && image
== &vdso_image_32
) {
60 struct pt_regs
*regs
= current_pt_regs();
61 unsigned long vdso_land
= image
->sym_int80_landing_pad
;
62 unsigned long old_land_addr
= vdso_land
+
63 (unsigned long)current
->mm
->context
.vdso
;
65 /* Fixing userspace landing - look at do_fast_syscall_32 */
66 if (regs
->ip
== old_land_addr
)
67 regs
->ip
= new_vma
->vm_start
+ vdso_land
;
72 static int vdso_mremap(const struct vm_special_mapping
*sm
,
73 struct vm_area_struct
*new_vma
)
75 unsigned long new_size
= new_vma
->vm_end
- new_vma
->vm_start
;
76 const struct vdso_image
*image
= current
->mm
->context
.vdso_image
;
78 if (image
->size
!= new_size
)
81 vdso_fix_landing(image
, new_vma
);
82 current
->mm
->context
.vdso
= (void __user
*)new_vma
->vm_start
;
87 static int vvar_fault(const struct vm_special_mapping
*sm
,
88 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
90 const struct vdso_image
*image
= vma
->vm_mm
->context
.vdso_image
;
95 return VM_FAULT_SIGBUS
;
97 sym_offset
= (long)(vmf
->pgoff
<< PAGE_SHIFT
) +
98 image
->sym_vvar_start
;
101 * Sanity check: a symbol offset of zero means that the page
102 * does not exist for this vdso image, not that the page is at
103 * offset zero relative to the text mapping. This should be
104 * impossible here, because sym_offset should only be zero for
105 * the page past the end of the vvar mapping.
108 return VM_FAULT_SIGBUS
;
110 if (sym_offset
== image
->sym_vvar_page
) {
111 ret
= vm_insert_pfn(vma
, vmf
->address
,
112 __pa_symbol(&__vvar_page
) >> PAGE_SHIFT
);
113 } else if (sym_offset
== image
->sym_pvclock_page
) {
114 struct pvclock_vsyscall_time_info
*pvti
=
115 pvclock_get_pvti_cpu0_va();
116 if (pvti
&& vclock_was_used(VCLOCK_PVCLOCK
)) {
117 ret
= vm_insert_pfn_prot(
120 __pa(pvti
) >> PAGE_SHIFT
,
121 pgprot_decrypted(vma
->vm_page_prot
));
123 } else if (sym_offset
== image
->sym_hvclock_page
) {
124 struct ms_hyperv_tsc_page
*tsc_pg
= hv_get_tsc_page();
126 if (tsc_pg
&& vclock_was_used(VCLOCK_HVCLOCK
))
127 ret
= vm_insert_pfn(vma
, vmf
->address
,
128 vmalloc_to_pfn(tsc_pg
));
131 if (ret
== 0 || ret
== -EBUSY
)
132 return VM_FAULT_NOPAGE
;
134 return VM_FAULT_SIGBUS
;
137 static const struct vm_special_mapping vdso_mapping
= {
140 .mremap
= vdso_mremap
,
142 static const struct vm_special_mapping vvar_mapping
= {
148 * Add vdso and vvar mappings to current process.
149 * @image - blob to map
150 * @addr - request a specific address (zero to map at free addr)
152 static int map_vdso(const struct vdso_image
*image
, unsigned long addr
)
154 struct mm_struct
*mm
= current
->mm
;
155 struct vm_area_struct
*vma
;
156 unsigned long text_start
;
159 if (down_write_killable(&mm
->mmap_sem
))
162 addr
= get_unmapped_area(NULL
, addr
,
163 image
->size
- image
->sym_vvar_start
, 0, 0);
164 if (IS_ERR_VALUE(addr
)) {
169 text_start
= addr
- image
->sym_vvar_start
;
172 * MAYWRITE to allow gdb to COW and set breakpoints
174 vma
= _install_special_mapping(mm
,
178 VM_MAYREAD
|VM_MAYWRITE
|VM_MAYEXEC
,
186 vma
= _install_special_mapping(mm
,
188 -image
->sym_vvar_start
,
189 VM_READ
|VM_MAYREAD
|VM_IO
|VM_DONTDUMP
|
195 do_munmap(mm
, text_start
, image
->size
, NULL
);
197 current
->mm
->context
.vdso
= (void __user
*)text_start
;
198 current
->mm
->context
.vdso_image
= image
;
202 up_write(&mm
->mmap_sem
);
208 * Put the vdso above the (randomized) stack with another randomized
209 * offset. This way there is no hole in the middle of address space.
210 * To save memory make sure it is still in the same PTE as the stack
211 * top. This doesn't give that many random bits.
213 * Note that this algorithm is imperfect: the distribution of the vdso
214 * start address within a PMD is biased toward the end.
216 * Only used for the 64-bit and x32 vdsos.
218 static unsigned long vdso_addr(unsigned long start
, unsigned len
)
220 unsigned long addr
, end
;
224 * Round up the start address. It can start out unaligned as a result
225 * of stack start randomization.
227 start
= PAGE_ALIGN(start
);
229 /* Round the lowest possible end address up to a PMD boundary. */
230 end
= (start
+ len
+ PMD_SIZE
- 1) & PMD_MASK
;
231 if (end
>= TASK_SIZE_MAX
)
236 offset
= get_random_int() % (((end
- start
) >> PAGE_SHIFT
) + 1);
237 addr
= start
+ (offset
<< PAGE_SHIFT
);
243 * Forcibly align the final address in case we have a hardware
244 * issue that requires alignment for performance reasons.
246 addr
= align_vdso_addr(addr
);
251 static int map_vdso_randomized(const struct vdso_image
*image
)
253 unsigned long addr
= vdso_addr(current
->mm
->start_stack
, image
->size
-image
->sym_vvar_start
);
255 return map_vdso(image
, addr
);
259 int map_vdso_once(const struct vdso_image
*image
, unsigned long addr
)
261 struct mm_struct
*mm
= current
->mm
;
262 struct vm_area_struct
*vma
;
264 down_write(&mm
->mmap_sem
);
266 * Check if we have already mapped vdso blob - fail to prevent
267 * abusing from userspace install_speciall_mapping, which may
268 * not do accounting and rlimit right.
269 * We could search vma near context.vdso, but it's a slowpath,
270 * so let's explicitely check all VMAs to be completely sure.
272 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
273 if (vma_is_special_mapping(vma
, &vdso_mapping
) ||
274 vma_is_special_mapping(vma
, &vvar_mapping
)) {
275 up_write(&mm
->mmap_sem
);
279 up_write(&mm
->mmap_sem
);
281 return map_vdso(image
, addr
);
284 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
285 static int load_vdso32(void)
287 if (vdso32_enabled
!= 1) /* Other values all mean "disabled" */
290 return map_vdso(&vdso_image_32
, 0);
295 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
300 return map_vdso_randomized(&vdso_image_64
);
304 int compat_arch_setup_additional_pages(struct linux_binprm
*bprm
,
307 #ifdef CONFIG_X86_X32_ABI
308 if (test_thread_flag(TIF_X32
)) {
311 return map_vdso_randomized(&vdso_image_x32
);
314 #ifdef CONFIG_IA32_EMULATION
315 return load_vdso32();
322 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
324 return load_vdso32();
329 static __init
int vdso_setup(char *s
)
331 vdso64_enabled
= simple_strtoul(s
, NULL
, 0);
334 __setup("vdso=", vdso_setup
);
338 static void vgetcpu_cpu_init(void *arg
)
340 int cpu
= smp_processor_id();
341 struct desc_struct d
= { };
342 unsigned long node
= 0;
344 node
= cpu_to_node(cpu
);
346 if (static_cpu_has(X86_FEATURE_RDTSCP
))
347 write_rdtscp_aux((node
<< 12) | cpu
);
350 * Store cpu number in limit so that it can be loaded
351 * quickly in user space in vgetcpu. (12 bits for the CPU
352 * and 8 bits for the node)
354 d
.limit0
= cpu
| ((node
& 0xf) << 12);
355 d
.limit1
= node
>> 4;
356 d
.type
= 5; /* RO data, expand down, accessed */
357 d
.dpl
= 3; /* Visible to user code */
358 d
.s
= 1; /* Not a system segment */
359 d
.p
= 1; /* Present */
360 d
.d
= 1; /* 32-bit */
362 write_gdt_entry(get_cpu_gdt_rw(cpu
), GDT_ENTRY_PER_CPU
, &d
, DESCTYPE_S
);
365 static int vgetcpu_online(unsigned int cpu
)
367 return smp_call_function_single(cpu
, vgetcpu_cpu_init
, NULL
, 1);
370 static int __init
init_vdso(void)
372 init_vdso_image(&vdso_image_64
);
374 #ifdef CONFIG_X86_X32_ABI
375 init_vdso_image(&vdso_image_x32
);
378 /* notifier priority > KVM */
379 return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE
,
380 "x86/vdso/vma:online", vgetcpu_online
, NULL
);
382 subsys_initcall(init_vdso
);
383 #endif /* CONFIG_X86_64 */