iwlwifi: mvm: fix version check for GEO_TX_POWER_LIMIT support
[linux/fpc-iii.git] / arch / sparc / vdso / vma.c
blob5eaff3c1aa0c73110ba0668709b6da5c5fd87736
1 /*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
7 /*
8 * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
9 */
11 #include <linux/mm.h>
12 #include <linux/err.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/linkage.h>
17 #include <linux/random.h>
18 #include <linux/elf.h>
19 #include <asm/vdso.h>
20 #include <asm/vvar.h>
21 #include <asm/page.h>
23 unsigned int __read_mostly vdso_enabled = 1;
25 static struct vm_special_mapping vvar_mapping = {
26 .name = "[vvar]"
29 #ifdef CONFIG_SPARC64
30 static struct vm_special_mapping vdso_mapping64 = {
31 .name = "[vdso]"
33 #endif
35 #ifdef CONFIG_COMPAT
36 static struct vm_special_mapping vdso_mapping32 = {
37 .name = "[vdso]"
39 #endif
41 struct vvar_data *vvar_data;
43 #define SAVE_INSTR_SIZE 4
46 * Allocate pages for the vdso and vvar, and copy in the vdso text from the
47 * kernel image.
49 int __init init_vdso_image(const struct vdso_image *image,
50 struct vm_special_mapping *vdso_mapping)
52 int i;
53 struct page *dp, **dpp = NULL;
54 int dnpages = 0;
55 struct page *cp, **cpp = NULL;
56 int cnpages = (image->size) / PAGE_SIZE;
59 * First, the vdso text. This is initialied data, an integral number of
60 * pages long.
62 if (WARN_ON(image->size % PAGE_SIZE != 0))
63 goto oom;
65 cpp = kcalloc(cnpages, sizeof(struct page *), GFP_KERNEL);
66 vdso_mapping->pages = cpp;
68 if (!cpp)
69 goto oom;
71 if (vdso_fix_stick) {
73 * If the system uses %tick instead of %stick, patch the VDSO
74 * with instruction reading %tick instead of %stick.
76 unsigned int j, k = SAVE_INSTR_SIZE;
77 unsigned char *data = image->data;
79 for (j = image->sym_vread_tick_patch_start;
80 j < image->sym_vread_tick_patch_end; j++) {
82 data[image->sym_vread_tick + k] = data[j];
83 k++;
87 for (i = 0; i < cnpages; i++) {
88 cp = alloc_page(GFP_KERNEL);
89 if (!cp)
90 goto oom;
91 cpp[i] = cp;
92 copy_page(page_address(cp), image->data + i * PAGE_SIZE);
96 * Now the vvar page. This is uninitialized data.
99 if (vvar_data == NULL) {
100 dnpages = (sizeof(struct vvar_data) / PAGE_SIZE) + 1;
101 if (WARN_ON(dnpages != 1))
102 goto oom;
103 dpp = kcalloc(dnpages, sizeof(struct page *), GFP_KERNEL);
104 vvar_mapping.pages = dpp;
106 if (!dpp)
107 goto oom;
109 dp = alloc_page(GFP_KERNEL);
110 if (!dp)
111 goto oom;
113 dpp[0] = dp;
114 vvar_data = page_address(dp);
115 memset(vvar_data, 0, PAGE_SIZE);
117 vvar_data->seq = 0;
120 return 0;
121 oom:
122 if (cpp != NULL) {
123 for (i = 0; i < cnpages; i++) {
124 if (cpp[i] != NULL)
125 __free_page(cpp[i]);
127 kfree(cpp);
128 vdso_mapping->pages = NULL;
131 if (dpp != NULL) {
132 for (i = 0; i < dnpages; i++) {
133 if (dpp[i] != NULL)
134 __free_page(dpp[i]);
136 kfree(dpp);
137 vvar_mapping.pages = NULL;
140 pr_warn("Cannot allocate vdso\n");
141 vdso_enabled = 0;
142 return -ENOMEM;
145 static int __init init_vdso(void)
147 int err = 0;
148 #ifdef CONFIG_SPARC64
149 err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64);
150 if (err)
151 return err;
152 #endif
154 #ifdef CONFIG_COMPAT
155 err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32);
156 #endif
157 return err;
160 subsys_initcall(init_vdso);
162 struct linux_binprm;
164 /* Shuffle the vdso up a bit, randomly. */
165 static unsigned long vdso_addr(unsigned long start, unsigned int len)
167 unsigned int offset;
169 /* This loses some more bits than a modulo, but is cheaper */
170 offset = get_random_int() & (PTRS_PER_PTE - 1);
171 return start + (offset << PAGE_SHIFT);
174 static int map_vdso(const struct vdso_image *image,
175 struct vm_special_mapping *vdso_mapping)
177 struct mm_struct *mm = current->mm;
178 struct vm_area_struct *vma;
179 unsigned long text_start, addr = 0;
180 int ret = 0;
182 down_write(&mm->mmap_sem);
185 * First, get an unmapped region: then randomize it, and make sure that
186 * region is free.
188 if (current->flags & PF_RANDOMIZE) {
189 addr = get_unmapped_area(NULL, 0,
190 image->size - image->sym_vvar_start,
191 0, 0);
192 if (IS_ERR_VALUE(addr)) {
193 ret = addr;
194 goto up_fail;
196 addr = vdso_addr(addr, image->size - image->sym_vvar_start);
198 addr = get_unmapped_area(NULL, addr,
199 image->size - image->sym_vvar_start, 0, 0);
200 if (IS_ERR_VALUE(addr)) {
201 ret = addr;
202 goto up_fail;
205 text_start = addr - image->sym_vvar_start;
206 current->mm->context.vdso = (void __user *)text_start;
209 * MAYWRITE to allow gdb to COW and set breakpoints
211 vma = _install_special_mapping(mm,
212 text_start,
213 image->size,
214 VM_READ|VM_EXEC|
215 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
216 vdso_mapping);
218 if (IS_ERR(vma)) {
219 ret = PTR_ERR(vma);
220 goto up_fail;
223 vma = _install_special_mapping(mm,
224 addr,
225 -image->sym_vvar_start,
226 VM_READ|VM_MAYREAD,
227 &vvar_mapping);
229 if (IS_ERR(vma)) {
230 ret = PTR_ERR(vma);
231 do_munmap(mm, text_start, image->size, NULL);
234 up_fail:
235 if (ret)
236 current->mm->context.vdso = NULL;
238 up_write(&mm->mmap_sem);
239 return ret;
242 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
245 if (!vdso_enabled)
246 return 0;
248 #if defined CONFIG_COMPAT
249 if (!(is_32bit_task()))
250 return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
251 else
252 return map_vdso(&vdso_image_32_builtin, &vdso_mapping32);
253 #else
254 return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
255 #endif
259 static __init int vdso_setup(char *s)
261 int err;
262 unsigned long val;
264 err = kstrtoul(s, 10, &val);
265 if (err)
266 return err;
267 vdso_enabled = val;
268 return 0;
270 __setup("vdso=", vdso_setup);