x86/amd-iommu: Add per IOMMU reference counting
[linux/fpc-iii.git] / arch / x86 / vdso / vma.c
blob21e1aeb9f3ea1b1f839445dbae69ad461ddf188f
1 /*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6 #include <linux/mm.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/init.h>
10 #include <linux/random.h>
11 #include <linux/elf.h>
12 #include <asm/vsyscall.h>
13 #include <asm/vgtod.h>
14 #include <asm/proto.h>
15 #include <asm/vdso.h>
17 #include "vextern.h" /* Just for VMAGIC. */
18 #undef VEXTERN
20 unsigned int __read_mostly vdso_enabled = 1;
22 extern char vdso_start[], vdso_end[];
23 extern unsigned short vdso_sync_cpuid;
25 static struct page **vdso_pages;
26 static unsigned vdso_size;
28 static inline void *var_ref(void *p, char *name)
30 if (*(void **)p != (void *)VMAGIC) {
31 printk("VDSO: variable %s broken\n", name);
32 vdso_enabled = 0;
34 return p;
37 static int __init init_vdso_vars(void)
39 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
40 int i;
41 char *vbase;
43 vdso_size = npages << PAGE_SHIFT;
44 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
45 if (!vdso_pages)
46 goto oom;
47 for (i = 0; i < npages; i++) {
48 struct page *p;
49 p = alloc_page(GFP_KERNEL);
50 if (!p)
51 goto oom;
52 vdso_pages[i] = p;
53 copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
56 vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL);
57 if (!vbase)
58 goto oom;
60 if (memcmp(vbase, "\177ELF", 4)) {
61 printk("VDSO: I'm broken; not ELF\n");
62 vdso_enabled = 0;
65 #define VEXTERN(x) \
66 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
67 #include "vextern.h"
68 #undef VEXTERN
69 return 0;
71 oom:
72 printk("Cannot allocate vdso\n");
73 vdso_enabled = 0;
74 return -ENOMEM;
76 __initcall(init_vdso_vars);
78 struct linux_binprm;
80 /* Put the vdso above the (randomized) stack with another randomized offset.
81 This way there is no hole in the middle of address space.
82 To save memory make sure it is still in the same PTE as the stack top.
83 This doesn't give that many random bits */
84 static unsigned long vdso_addr(unsigned long start, unsigned len)
86 unsigned long addr, end;
87 unsigned offset;
88 end = (start + PMD_SIZE - 1) & PMD_MASK;
89 if (end >= TASK_SIZE_MAX)
90 end = TASK_SIZE_MAX;
91 end -= len;
92 /* This loses some more bits than a modulo, but is cheaper */
93 offset = get_random_int() & (PTRS_PER_PTE - 1);
94 addr = start + (offset << PAGE_SHIFT);
95 if (addr >= end)
96 addr = end;
97 return addr;
100 /* Setup a VMA at program startup for the vsyscall page.
101 Not called for compat tasks */
102 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
104 struct mm_struct *mm = current->mm;
105 unsigned long addr;
106 int ret;
108 if (!vdso_enabled)
109 return 0;
111 down_write(&mm->mmap_sem);
112 addr = vdso_addr(mm->start_stack, vdso_size);
113 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
114 if (IS_ERR_VALUE(addr)) {
115 ret = addr;
116 goto up_fail;
119 current->mm->context.vdso = (void *)addr;
121 ret = install_special_mapping(mm, addr, vdso_size,
122 VM_READ|VM_EXEC|
123 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
124 VM_ALWAYSDUMP,
125 vdso_pages);
126 if (ret) {
127 current->mm->context.vdso = NULL;
128 goto up_fail;
131 up_fail:
132 up_write(&mm->mmap_sem);
133 return ret;
136 static __init int vdso_setup(char *s)
138 vdso_enabled = simple_strtoul(s, NULL, 0);
139 return 0;
141 __setup("vdso=", vdso_setup);