KVM: nVMX: Fix returned value of MSR_IA32_VMX_VMCS_ENUM
[linux/fpc-iii.git] / arch / x86 / vdso / vma.c
blobe1513c47872a9a040b9bb3956b54d3f94c5a303d
1 /*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6 #include <linux/mm.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/random.h>
12 #include <linux/elf.h>
13 #include <asm/vsyscall.h>
14 #include <asm/vgtod.h>
15 #include <asm/proto.h>
16 #include <asm/vdso.h>
17 #include <asm/page.h>
18 #include <asm/hpet.h>
20 #if defined(CONFIG_X86_64)
21 unsigned int __read_mostly vdso64_enabled = 1;
23 extern unsigned short vdso_sync_cpuid;
24 #endif
26 void __init init_vdso_image(const struct vdso_image *image)
28 int i;
29 int npages = (image->size) / PAGE_SIZE;
31 BUG_ON(image->size % PAGE_SIZE != 0);
32 for (i = 0; i < npages; i++)
33 image->text_mapping.pages[i] =
34 virt_to_page(image->data + i*PAGE_SIZE);
36 apply_alternatives((struct alt_instr *)(image->data + image->alt),
37 (struct alt_instr *)(image->data + image->alt +
38 image->alt_len));
41 #if defined(CONFIG_X86_64)
42 static int __init init_vdso(void)
44 init_vdso_image(&vdso_image_64);
46 #ifdef CONFIG_X86_X32_ABI
47 init_vdso_image(&vdso_image_x32);
48 #endif
50 return 0;
52 subsys_initcall(init_vdso);
53 #endif
55 struct linux_binprm;
57 /* Put the vdso above the (randomized) stack with another randomized offset.
58 This way there is no hole in the middle of address space.
59 To save memory make sure it is still in the same PTE as the stack top.
60 This doesn't give that many random bits.
62 Only used for the 64-bit and x32 vdsos. */
63 static unsigned long vdso_addr(unsigned long start, unsigned len)
65 unsigned long addr, end;
66 unsigned offset;
67 end = (start + PMD_SIZE - 1) & PMD_MASK;
68 if (end >= TASK_SIZE_MAX)
69 end = TASK_SIZE_MAX;
70 end -= len;
71 /* This loses some more bits than a modulo, but is cheaper */
72 offset = get_random_int() & (PTRS_PER_PTE - 1);
73 addr = start + (offset << PAGE_SHIFT);
74 if (addr >= end)
75 addr = end;
78 * page-align it here so that get_unmapped_area doesn't
79 * align it wrongfully again to the next page. addr can come in 4K
80 * unaligned here as a result of stack start randomization.
82 addr = PAGE_ALIGN(addr);
83 addr = align_vdso_addr(addr);
85 return addr;
88 static int map_vdso(const struct vdso_image *image, bool calculate_addr)
90 struct mm_struct *mm = current->mm;
91 struct vm_area_struct *vma;
92 unsigned long addr;
93 int ret = 0;
94 static struct page *no_pages[] = {NULL};
95 static struct vm_special_mapping vvar_mapping = {
96 .name = "[vvar]",
97 .pages = no_pages,
100 if (calculate_addr) {
101 addr = vdso_addr(current->mm->start_stack,
102 image->sym_end_mapping);
103 } else {
104 addr = 0;
107 down_write(&mm->mmap_sem);
109 addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, 0);
110 if (IS_ERR_VALUE(addr)) {
111 ret = addr;
112 goto up_fail;
115 current->mm->context.vdso = (void __user *)addr;
118 * MAYWRITE to allow gdb to COW and set breakpoints
120 vma = _install_special_mapping(mm,
121 addr,
122 image->size,
123 VM_READ|VM_EXEC|
124 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
125 &image->text_mapping);
127 if (IS_ERR(vma)) {
128 ret = PTR_ERR(vma);
129 goto up_fail;
132 vma = _install_special_mapping(mm,
133 addr + image->size,
134 image->sym_end_mapping - image->size,
135 VM_READ,
136 &vvar_mapping);
138 if (IS_ERR(vma)) {
139 ret = PTR_ERR(vma);
140 goto up_fail;
143 if (image->sym_vvar_page)
144 ret = remap_pfn_range(vma,
145 addr + image->sym_vvar_page,
146 __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
147 PAGE_SIZE,
148 PAGE_READONLY);
150 if (ret)
151 goto up_fail;
153 #ifdef CONFIG_HPET_TIMER
154 if (hpet_address && image->sym_hpet_page) {
155 ret = io_remap_pfn_range(vma,
156 addr + image->sym_hpet_page,
157 hpet_address >> PAGE_SHIFT,
158 PAGE_SIZE,
159 pgprot_noncached(PAGE_READONLY));
161 if (ret)
162 goto up_fail;
164 #endif
166 up_fail:
167 if (ret)
168 current->mm->context.vdso = NULL;
170 up_write(&mm->mmap_sem);
171 return ret;
174 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
175 static int load_vdso32(void)
177 int ret;
179 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
180 return 0;
182 ret = map_vdso(selected_vdso32, false);
183 if (ret)
184 return ret;
186 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
187 current_thread_info()->sysenter_return =
188 current->mm->context.vdso +
189 selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
191 return 0;
193 #endif
195 #ifdef CONFIG_X86_64
196 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
198 if (!vdso64_enabled)
199 return 0;
201 return map_vdso(&vdso_image_64, true);
204 #ifdef CONFIG_COMPAT
205 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
206 int uses_interp)
208 #ifdef CONFIG_X86_X32_ABI
209 if (test_thread_flag(TIF_X32)) {
210 if (!vdso64_enabled)
211 return 0;
213 return map_vdso(&vdso_image_x32, true);
215 #endif
217 return load_vdso32();
219 #endif
220 #else
221 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
223 return load_vdso32();
225 #endif
227 #ifdef CONFIG_X86_64
228 static __init int vdso_setup(char *s)
230 vdso64_enabled = simple_strtoul(s, NULL, 0);
231 return 0;
233 __setup("vdso=", vdso_setup);
234 #endif