x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / arch / x86 / entry / vdso / vma.c
bloba77fd3c8d8241c15e34da8af3eea4d48e47c75a8
1 /*
2 * Copyright 2007 Andi Kleen, SUSE Labs.
3 * Subject to the GPL, v.2
5 * This contains most of the x86 vDSO kernel-side code.
6 */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/random.h>
14 #include <linux/elf.h>
15 #include <linux/cpu.h>
16 #include <linux/ptrace.h>
17 #include <asm/pvclock.h>
18 #include <asm/vgtod.h>
19 #include <asm/proto.h>
20 #include <asm/vdso.h>
21 #include <asm/vvar.h>
22 #include <asm/page.h>
23 #include <asm/desc.h>
24 #include <asm/cpufeature.h>
25 #include <asm/mshyperv.h>
27 #if defined(CONFIG_X86_64)
28 unsigned int __read_mostly vdso64_enabled = 1;
29 #endif
31 void __init init_vdso_image(const struct vdso_image *image)
33 BUG_ON(image->size % PAGE_SIZE != 0);
35 apply_alternatives((struct alt_instr *)(image->data + image->alt),
36 (struct alt_instr *)(image->data + image->alt +
37 image->alt_len));
40 struct linux_binprm;
42 static int vdso_fault(const struct vm_special_mapping *sm,
43 struct vm_area_struct *vma, struct vm_fault *vmf)
45 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
47 if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
48 return VM_FAULT_SIGBUS;
50 vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
51 get_page(vmf->page);
52 return 0;
55 static void vdso_fix_landing(const struct vdso_image *image,
56 struct vm_area_struct *new_vma)
58 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
59 if (in_ia32_syscall() && image == &vdso_image_32) {
60 struct pt_regs *regs = current_pt_regs();
61 unsigned long vdso_land = image->sym_int80_landing_pad;
62 unsigned long old_land_addr = vdso_land +
63 (unsigned long)current->mm->context.vdso;
65 /* Fixing userspace landing - look at do_fast_syscall_32 */
66 if (regs->ip == old_land_addr)
67 regs->ip = new_vma->vm_start + vdso_land;
69 #endif
72 static int vdso_mremap(const struct vm_special_mapping *sm,
73 struct vm_area_struct *new_vma)
75 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
76 const struct vdso_image *image = current->mm->context.vdso_image;
78 if (image->size != new_size)
79 return -EINVAL;
81 vdso_fix_landing(image, new_vma);
82 current->mm->context.vdso = (void __user *)new_vma->vm_start;
84 return 0;
87 static int vvar_fault(const struct vm_special_mapping *sm,
88 struct vm_area_struct *vma, struct vm_fault *vmf)
90 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
91 long sym_offset;
92 int ret = -EFAULT;
94 if (!image)
95 return VM_FAULT_SIGBUS;
97 sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
98 image->sym_vvar_start;
101 * Sanity check: a symbol offset of zero means that the page
102 * does not exist for this vdso image, not that the page is at
103 * offset zero relative to the text mapping. This should be
104 * impossible here, because sym_offset should only be zero for
105 * the page past the end of the vvar mapping.
107 if (sym_offset == 0)
108 return VM_FAULT_SIGBUS;
110 if (sym_offset == image->sym_vvar_page) {
111 ret = vm_insert_pfn(vma, vmf->address,
112 __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
113 } else if (sym_offset == image->sym_pvclock_page) {
114 struct pvclock_vsyscall_time_info *pvti =
115 pvclock_get_pvti_cpu0_va();
116 if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
117 ret = vm_insert_pfn(
118 vma,
119 vmf->address,
120 __pa(pvti) >> PAGE_SHIFT);
122 } else if (sym_offset == image->sym_hvclock_page) {
123 struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
125 if (tsc_pg && vclock_was_used(VCLOCK_HVCLOCK))
126 ret = vm_insert_pfn(vma, vmf->address,
127 vmalloc_to_pfn(tsc_pg));
130 if (ret == 0 || ret == -EBUSY)
131 return VM_FAULT_NOPAGE;
133 return VM_FAULT_SIGBUS;
136 static const struct vm_special_mapping vdso_mapping = {
137 .name = "[vdso]",
138 .fault = vdso_fault,
139 .mremap = vdso_mremap,
141 static const struct vm_special_mapping vvar_mapping = {
142 .name = "[vvar]",
143 .fault = vvar_fault,
147 * Add vdso and vvar mappings to current process.
148 * @image - blob to map
149 * @addr - request a specific address (zero to map at free addr)
151 static int map_vdso(const struct vdso_image *image, unsigned long addr)
153 struct mm_struct *mm = current->mm;
154 struct vm_area_struct *vma;
155 unsigned long text_start;
156 int ret = 0;
158 if (down_write_killable(&mm->mmap_sem))
159 return -EINTR;
161 addr = get_unmapped_area(NULL, addr,
162 image->size - image->sym_vvar_start, 0, 0);
163 if (IS_ERR_VALUE(addr)) {
164 ret = addr;
165 goto up_fail;
168 text_start = addr - image->sym_vvar_start;
171 * MAYWRITE to allow gdb to COW and set breakpoints
173 vma = _install_special_mapping(mm,
174 text_start,
175 image->size,
176 VM_READ|VM_EXEC|
177 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
178 &vdso_mapping);
180 if (IS_ERR(vma)) {
181 ret = PTR_ERR(vma);
182 goto up_fail;
185 vma = _install_special_mapping(mm,
186 addr,
187 -image->sym_vvar_start,
188 VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
189 VM_PFNMAP,
190 &vvar_mapping);
192 if (IS_ERR(vma)) {
193 ret = PTR_ERR(vma);
194 do_munmap(mm, text_start, image->size, NULL);
195 } else {
196 current->mm->context.vdso = (void __user *)text_start;
197 current->mm->context.vdso_image = image;
200 up_fail:
201 up_write(&mm->mmap_sem);
202 return ret;
205 #ifdef CONFIG_X86_64
207 * Put the vdso above the (randomized) stack with another randomized
208 * offset. This way there is no hole in the middle of address space.
209 * To save memory make sure it is still in the same PTE as the stack
210 * top. This doesn't give that many random bits.
212 * Note that this algorithm is imperfect: the distribution of the vdso
213 * start address within a PMD is biased toward the end.
215 * Only used for the 64-bit and x32 vdsos.
217 static unsigned long vdso_addr(unsigned long start, unsigned len)
219 unsigned long addr, end;
220 unsigned offset;
223 * Round up the start address. It can start out unaligned as a result
224 * of stack start randomization.
226 start = PAGE_ALIGN(start);
228 /* Round the lowest possible end address up to a PMD boundary. */
229 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
230 if (end >= TASK_SIZE_MAX)
231 end = TASK_SIZE_MAX;
232 end -= len;
234 if (end > start) {
235 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
236 addr = start + (offset << PAGE_SHIFT);
237 } else {
238 addr = start;
242 * Forcibly align the final address in case we have a hardware
243 * issue that requires alignment for performance reasons.
245 addr = align_vdso_addr(addr);
247 return addr;
250 static int map_vdso_randomized(const struct vdso_image *image)
252 unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
254 return map_vdso(image, addr);
256 #endif
258 int map_vdso_once(const struct vdso_image *image, unsigned long addr)
260 struct mm_struct *mm = current->mm;
261 struct vm_area_struct *vma;
263 down_write(&mm->mmap_sem);
265 * Check if we have already mapped vdso blob - fail to prevent
266 * abusing from userspace install_speciall_mapping, which may
267 * not do accounting and rlimit right.
268 * We could search vma near context.vdso, but it's a slowpath,
269 * so let's explicitely check all VMAs to be completely sure.
271 for (vma = mm->mmap; vma; vma = vma->vm_next) {
272 if (vma_is_special_mapping(vma, &vdso_mapping) ||
273 vma_is_special_mapping(vma, &vvar_mapping)) {
274 up_write(&mm->mmap_sem);
275 return -EEXIST;
278 up_write(&mm->mmap_sem);
280 return map_vdso(image, addr);
283 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
284 static int load_vdso32(void)
286 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
287 return 0;
289 return map_vdso(&vdso_image_32, 0);
291 #endif
293 #ifdef CONFIG_X86_64
294 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
296 if (!vdso64_enabled)
297 return 0;
299 return map_vdso_randomized(&vdso_image_64);
302 #ifdef CONFIG_COMPAT
303 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
304 int uses_interp)
306 #ifdef CONFIG_X86_X32_ABI
307 if (test_thread_flag(TIF_X32)) {
308 if (!vdso64_enabled)
309 return 0;
310 return map_vdso_randomized(&vdso_image_x32);
312 #endif
313 #ifdef CONFIG_IA32_EMULATION
314 return load_vdso32();
315 #else
316 return 0;
317 #endif
319 #endif
320 #else
321 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
323 return load_vdso32();
325 #endif
327 #ifdef CONFIG_X86_64
328 static __init int vdso_setup(char *s)
330 vdso64_enabled = simple_strtoul(s, NULL, 0);
331 return 0;
333 __setup("vdso=", vdso_setup);
334 #endif
336 #ifdef CONFIG_X86_64
337 static void vgetcpu_cpu_init(void *arg)
339 int cpu = smp_processor_id();
340 struct desc_struct d = { };
341 unsigned long node = 0;
342 #ifdef CONFIG_NUMA
343 node = cpu_to_node(cpu);
344 #endif
345 if (static_cpu_has(X86_FEATURE_RDTSCP))
346 write_rdtscp_aux((node << 12) | cpu);
349 * Store cpu number in limit so that it can be loaded
350 * quickly in user space in vgetcpu. (12 bits for the CPU
351 * and 8 bits for the node)
353 d.limit0 = cpu | ((node & 0xf) << 12);
354 d.limit1 = node >> 4;
355 d.type = 5; /* RO data, expand down, accessed */
356 d.dpl = 3; /* Visible to user code */
357 d.s = 1; /* Not a system segment */
358 d.p = 1; /* Present */
359 d.d = 1; /* 32-bit */
361 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
364 static int vgetcpu_online(unsigned int cpu)
366 return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
369 static int __init init_vdso(void)
371 init_vdso_image(&vdso_image_64);
373 #ifdef CONFIG_X86_X32_ABI
374 init_vdso_image(&vdso_image_x32);
375 #endif
377 /* notifier priority > KVM */
378 return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
379 "x86/vdso/vma:online", vgetcpu_online, NULL);
381 subsys_initcall(init_vdso);
382 #endif /* CONFIG_X86_64 */