drm/nouveau/tegra: Fix error handling
[linux/fpc-iii.git] / arch / x86 / entry / vdso / vma.c
blob23c881caabd1ce1d702ef86c0c5e2ba170283516
1 /*
2 * Copyright 2007 Andi Kleen, SUSE Labs.
3 * Subject to the GPL, v.2
5 * This contains most of the x86 vDSO kernel-side code.
6 */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/random.h>
13 #include <linux/elf.h>
14 #include <linux/cpu.h>
15 #include <linux/ptrace.h>
16 #include <asm/pvclock.h>
17 #include <asm/vgtod.h>
18 #include <asm/proto.h>
19 #include <asm/vdso.h>
20 #include <asm/vvar.h>
21 #include <asm/page.h>
22 #include <asm/desc.h>
23 #include <asm/cpufeature.h>
25 #if defined(CONFIG_X86_64)
26 unsigned int __read_mostly vdso64_enabled = 1;
27 #endif
29 void __init init_vdso_image(const struct vdso_image *image)
31 BUG_ON(image->size % PAGE_SIZE != 0);
33 apply_alternatives((struct alt_instr *)(image->data + image->alt),
34 (struct alt_instr *)(image->data + image->alt +
35 image->alt_len));
38 struct linux_binprm;
40 static int vdso_fault(const struct vm_special_mapping *sm,
41 struct vm_area_struct *vma, struct vm_fault *vmf)
43 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
45 if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
46 return VM_FAULT_SIGBUS;
48 vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
49 get_page(vmf->page);
50 return 0;
53 static void vdso_fix_landing(const struct vdso_image *image,
54 struct vm_area_struct *new_vma)
56 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
57 if (in_ia32_syscall() && image == &vdso_image_32) {
58 struct pt_regs *regs = current_pt_regs();
59 unsigned long vdso_land = image->sym_int80_landing_pad;
60 unsigned long old_land_addr = vdso_land +
61 (unsigned long)current->mm->context.vdso;
63 /* Fixing userspace landing - look at do_fast_syscall_32 */
64 if (regs->ip == old_land_addr)
65 regs->ip = new_vma->vm_start + vdso_land;
67 #endif
70 static int vdso_mremap(const struct vm_special_mapping *sm,
71 struct vm_area_struct *new_vma)
73 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
74 const struct vdso_image *image = current->mm->context.vdso_image;
76 if (image->size != new_size)
77 return -EINVAL;
79 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
80 return -EFAULT;
82 vdso_fix_landing(image, new_vma);
83 current->mm->context.vdso = (void __user *)new_vma->vm_start;
85 return 0;
88 static int vvar_fault(const struct vm_special_mapping *sm,
89 struct vm_area_struct *vma, struct vm_fault *vmf)
91 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
92 long sym_offset;
93 int ret = -EFAULT;
95 if (!image)
96 return VM_FAULT_SIGBUS;
98 sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
99 image->sym_vvar_start;
102 * Sanity check: a symbol offset of zero means that the page
103 * does not exist for this vdso image, not that the page is at
104 * offset zero relative to the text mapping. This should be
105 * impossible here, because sym_offset should only be zero for
106 * the page past the end of the vvar mapping.
108 if (sym_offset == 0)
109 return VM_FAULT_SIGBUS;
111 if (sym_offset == image->sym_vvar_page) {
112 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
113 __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
114 } else if (sym_offset == image->sym_pvclock_page) {
115 struct pvclock_vsyscall_time_info *pvti =
116 pvclock_pvti_cpu0_va();
117 if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
118 ret = vm_insert_pfn(
119 vma,
120 (unsigned long)vmf->virtual_address,
121 __pa(pvti) >> PAGE_SHIFT);
125 if (ret == 0 || ret == -EBUSY)
126 return VM_FAULT_NOPAGE;
128 return VM_FAULT_SIGBUS;
131 static const struct vm_special_mapping vdso_mapping = {
132 .name = "[vdso]",
133 .fault = vdso_fault,
134 .mremap = vdso_mremap,
136 static const struct vm_special_mapping vvar_mapping = {
137 .name = "[vvar]",
138 .fault = vvar_fault,
142 * Add vdso and vvar mappings to current process.
143 * @image - blob to map
144 * @addr - request a specific address (zero to map at free addr)
146 static int map_vdso(const struct vdso_image *image, unsigned long addr)
148 struct mm_struct *mm = current->mm;
149 struct vm_area_struct *vma;
150 unsigned long text_start;
151 int ret = 0;
153 if (down_write_killable(&mm->mmap_sem))
154 return -EINTR;
156 addr = get_unmapped_area(NULL, addr,
157 image->size - image->sym_vvar_start, 0, 0);
158 if (IS_ERR_VALUE(addr)) {
159 ret = addr;
160 goto up_fail;
163 text_start = addr - image->sym_vvar_start;
164 current->mm->context.vdso = (void __user *)text_start;
165 current->mm->context.vdso_image = image;
168 * MAYWRITE to allow gdb to COW and set breakpoints
170 vma = _install_special_mapping(mm,
171 text_start,
172 image->size,
173 VM_READ|VM_EXEC|
174 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
175 &vdso_mapping);
177 if (IS_ERR(vma)) {
178 ret = PTR_ERR(vma);
179 goto up_fail;
182 vma = _install_special_mapping(mm,
183 addr,
184 -image->sym_vvar_start,
185 VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
186 VM_PFNMAP,
187 &vvar_mapping);
189 if (IS_ERR(vma)) {
190 ret = PTR_ERR(vma);
191 do_munmap(mm, text_start, image->size);
194 up_fail:
195 if (ret) {
196 current->mm->context.vdso = NULL;
197 current->mm->context.vdso_image = NULL;
200 up_write(&mm->mmap_sem);
201 return ret;
204 #ifdef CONFIG_X86_64
206 * Put the vdso above the (randomized) stack with another randomized
207 * offset. This way there is no hole in the middle of address space.
208 * To save memory make sure it is still in the same PTE as the stack
209 * top. This doesn't give that many random bits.
211 * Note that this algorithm is imperfect: the distribution of the vdso
212 * start address within a PMD is biased toward the end.
214 * Only used for the 64-bit and x32 vdsos.
216 static unsigned long vdso_addr(unsigned long start, unsigned len)
218 unsigned long addr, end;
219 unsigned offset;
222 * Round up the start address. It can start out unaligned as a result
223 * of stack start randomization.
225 start = PAGE_ALIGN(start);
227 /* Round the lowest possible end address up to a PMD boundary. */
228 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
229 if (end >= TASK_SIZE_MAX)
230 end = TASK_SIZE_MAX;
231 end -= len;
233 if (end > start) {
234 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
235 addr = start + (offset << PAGE_SHIFT);
236 } else {
237 addr = start;
241 * Forcibly align the final address in case we have a hardware
242 * issue that requires alignment for performance reasons.
244 addr = align_vdso_addr(addr);
246 return addr;
249 static int map_vdso_randomized(const struct vdso_image *image)
251 unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
253 return map_vdso(image, addr);
255 #endif
257 int map_vdso_once(const struct vdso_image *image, unsigned long addr)
259 struct mm_struct *mm = current->mm;
260 struct vm_area_struct *vma;
262 down_write(&mm->mmap_sem);
264 * Check if we have already mapped vdso blob - fail to prevent
265 * abusing from userspace install_speciall_mapping, which may
266 * not do accounting and rlimit right.
267 * We could search vma near context.vdso, but it's a slowpath,
268 * so let's explicitely check all VMAs to be completely sure.
270 for (vma = mm->mmap; vma; vma = vma->vm_next) {
271 if (vma_is_special_mapping(vma, &vdso_mapping) ||
272 vma_is_special_mapping(vma, &vvar_mapping)) {
273 up_write(&mm->mmap_sem);
274 return -EEXIST;
277 up_write(&mm->mmap_sem);
279 return map_vdso(image, addr);
282 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
283 static int load_vdso32(void)
285 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
286 return 0;
288 return map_vdso(&vdso_image_32, 0);
290 #endif
292 #ifdef CONFIG_X86_64
293 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
295 if (!vdso64_enabled)
296 return 0;
298 return map_vdso_randomized(&vdso_image_64);
301 #ifdef CONFIG_COMPAT
302 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
303 int uses_interp)
305 #ifdef CONFIG_X86_X32_ABI
306 if (test_thread_flag(TIF_X32)) {
307 if (!vdso64_enabled)
308 return 0;
309 return map_vdso_randomized(&vdso_image_x32);
311 #endif
312 #ifdef CONFIG_IA32_EMULATION
313 return load_vdso32();
314 #else
315 return 0;
316 #endif
318 #endif
319 #else
320 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
322 return load_vdso32();
324 #endif
326 #ifdef CONFIG_X86_64
327 static __init int vdso_setup(char *s)
329 vdso64_enabled = simple_strtoul(s, NULL, 0);
330 return 0;
332 __setup("vdso=", vdso_setup);
333 #endif
335 #ifdef CONFIG_X86_64
336 static void vgetcpu_cpu_init(void *arg)
338 int cpu = smp_processor_id();
339 struct desc_struct d = { };
340 unsigned long node = 0;
341 #ifdef CONFIG_NUMA
342 node = cpu_to_node(cpu);
343 #endif
344 if (static_cpu_has(X86_FEATURE_RDTSCP))
345 write_rdtscp_aux((node << 12) | cpu);
348 * Store cpu number in limit so that it can be loaded
349 * quickly in user space in vgetcpu. (12 bits for the CPU
350 * and 8 bits for the node)
352 d.limit0 = cpu | ((node & 0xf) << 12);
353 d.limit = node >> 4;
354 d.type = 5; /* RO data, expand down, accessed */
355 d.dpl = 3; /* Visible to user code */
356 d.s = 1; /* Not a system segment */
357 d.p = 1; /* Present */
358 d.d = 1; /* 32-bit */
360 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
363 static int vgetcpu_online(unsigned int cpu)
365 return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
368 static int __init init_vdso(void)
370 init_vdso_image(&vdso_image_64);
372 #ifdef CONFIG_X86_X32_ABI
373 init_vdso_image(&vdso_image_x32);
374 #endif
376 /* notifier priority > KVM */
377 return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
378 "AP_X86_VDSO_VMA_ONLINE", vgetcpu_online, NULL);
380 subsys_initcall(init_vdso);
381 #endif /* CONFIG_X86_64 */