Merge branch 'for-3.18-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[linux/fpc-iii.git] / arch / s390 / kernel / vdso.c
blob0bbb7e027c5aae5778483db3fcdbe334f9a4243b
1 /*
2 * vdso setup for s390
4 * Copyright IBM Corp. 2008
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License (version 2 only)
9 * as published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/errno.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/smp.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/slab.h>
21 #include <linux/user.h>
22 #include <linux/elf.h>
23 #include <linux/security.h>
24 #include <linux/bootmem.h>
25 #include <linux/compat.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/pgtable.h>
28 #include <asm/processor.h>
29 #include <asm/mmu.h>
30 #include <asm/mmu_context.h>
31 #include <asm/sections.h>
32 #include <asm/vdso.h>
33 #include <asm/facility.h>
35 #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
36 extern char vdso32_start, vdso32_end;
37 static void *vdso32_kbase = &vdso32_start;
38 static unsigned int vdso32_pages;
39 static struct page **vdso32_pagelist;
40 #endif
42 #ifdef CONFIG_64BIT
43 extern char vdso64_start, vdso64_end;
44 static void *vdso64_kbase = &vdso64_start;
45 static unsigned int vdso64_pages;
46 static struct page **vdso64_pagelist;
47 #endif /* CONFIG_64BIT */
50 * Should the kernel map a VDSO page into processes and pass its
51 * address down to glibc upon exec()?
53 unsigned int __read_mostly vdso_enabled = 1;
55 static int __init vdso_setup(char *s)
57 unsigned long val;
58 int rc;
60 rc = 0;
61 if (strncmp(s, "on", 3) == 0)
62 vdso_enabled = 1;
63 else if (strncmp(s, "off", 4) == 0)
64 vdso_enabled = 0;
65 else {
66 rc = kstrtoul(s, 0, &val);
67 vdso_enabled = rc ? 0 : !!val;
69 return !rc;
71 __setup("vdso=", vdso_setup);
74 * The vdso data page
76 static union {
77 struct vdso_data data;
78 u8 page[PAGE_SIZE];
79 } vdso_data_store __page_aligned_data;
80 struct vdso_data *vdso_data = &vdso_data_store.data;
83 * Setup vdso data page.
85 static void vdso_init_data(struct vdso_data *vd)
87 vd->ectg_available = test_facility(31);
90 #ifdef CONFIG_64BIT
92 * Allocate/free per cpu vdso data.
94 #define SEGMENT_ORDER 2
96 int vdso_alloc_per_cpu(struct _lowcore *lowcore)
98 unsigned long segment_table, page_table, page_frame;
99 u32 *psal, *aste;
100 int i;
102 lowcore->vdso_per_cpu_data = __LC_PASTE;
104 if (!vdso_enabled)
105 return 0;
107 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
108 page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
109 page_frame = get_zeroed_page(GFP_KERNEL);
110 if (!segment_table || !page_table || !page_frame)
111 goto out;
113 clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
114 PAGE_SIZE << SEGMENT_ORDER);
115 clear_table((unsigned long *) page_table, _PAGE_INVALID,
116 256*sizeof(unsigned long));
118 *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
119 *(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
121 psal = (u32 *) (page_table + 256*sizeof(unsigned long));
122 aste = psal + 32;
124 for (i = 4; i < 32; i += 4)
125 psal[i] = 0x80000000;
127 lowcore->paste[4] = (u32)(addr_t) psal;
128 psal[0] = 0x02000000;
129 psal[2] = (u32)(addr_t) aste;
130 *(unsigned long *) (aste + 2) = segment_table +
131 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
132 aste[4] = (u32)(addr_t) psal;
133 lowcore->vdso_per_cpu_data = page_frame;
135 return 0;
137 out:
138 free_page(page_frame);
139 free_page(page_table);
140 free_pages(segment_table, SEGMENT_ORDER);
141 return -ENOMEM;
144 void vdso_free_per_cpu(struct _lowcore *lowcore)
146 unsigned long segment_table, page_table, page_frame;
147 u32 *psal, *aste;
149 if (!vdso_enabled)
150 return;
152 psal = (u32 *)(addr_t) lowcore->paste[4];
153 aste = (u32 *)(addr_t) psal[2];
154 segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
155 page_table = *(unsigned long *) segment_table;
156 page_frame = *(unsigned long *) page_table;
158 free_page(page_frame);
159 free_page(page_table);
160 free_pages(segment_table, SEGMENT_ORDER);
163 static void vdso_init_cr5(void)
165 unsigned long cr5;
167 if (!vdso_enabled)
168 return;
169 cr5 = offsetof(struct _lowcore, paste);
170 __ctl_load(cr5, 5, 5);
172 #endif /* CONFIG_64BIT */
175 * This is called from binfmt_elf, we create the special vma for the
176 * vDSO and insert it into the mm struct tree
178 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
180 struct mm_struct *mm = current->mm;
181 struct page **vdso_pagelist;
182 unsigned long vdso_pages;
183 unsigned long vdso_base;
184 int rc;
186 if (!vdso_enabled)
187 return 0;
189 * Only map the vdso for dynamically linked elf binaries.
191 if (!uses_interp)
192 return 0;
194 #ifdef CONFIG_64BIT
195 vdso_pagelist = vdso64_pagelist;
196 vdso_pages = vdso64_pages;
197 #ifdef CONFIG_COMPAT
198 if (is_compat_task()) {
199 vdso_pagelist = vdso32_pagelist;
200 vdso_pages = vdso32_pages;
202 #endif
203 #else
204 vdso_pagelist = vdso32_pagelist;
205 vdso_pages = vdso32_pages;
206 #endif
209 * vDSO has a problem and was disabled, just don't "enable" it for
210 * the process
212 if (vdso_pages == 0)
213 return 0;
215 current->mm->context.vdso_base = 0;
218 * pick a base address for the vDSO in process space. We try to put
219 * it at vdso_base which is the "natural" base for it, but we might
220 * fail and end up putting it elsewhere.
222 down_write(&mm->mmap_sem);
223 vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
224 if (IS_ERR_VALUE(vdso_base)) {
225 rc = vdso_base;
226 goto out_up;
230 * Put vDSO base into mm struct. We need to do this before calling
231 * install_special_mapping or the perf counter mmap tracking code
232 * will fail to recognise it as a vDSO (since arch_vma_name fails).
234 current->mm->context.vdso_base = vdso_base;
237 * our vma flags don't have VM_WRITE so by default, the process
238 * isn't allowed to write those pages.
239 * gdb can break that with ptrace interface, and thus trigger COW
240 * on those pages but it's then your responsibility to never do that
241 * on the "data" page of the vDSO or you'll stop getting kernel
242 * updates and your nice userland gettimeofday will be totally dead.
243 * It's fine to use that for setting breakpoints in the vDSO code
244 * pages though.
246 rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
247 VM_READ|VM_EXEC|
248 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
249 vdso_pagelist);
250 if (rc)
251 current->mm->context.vdso_base = 0;
252 out_up:
253 up_write(&mm->mmap_sem);
254 return rc;
257 const char *arch_vma_name(struct vm_area_struct *vma)
259 if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
260 return "[vdso]";
261 return NULL;
264 static int __init vdso_init(void)
266 int i;
268 if (!vdso_enabled)
269 return 0;
270 vdso_init_data(vdso_data);
271 #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
272 /* Calculate the size of the 32 bit vDSO */
273 vdso32_pages = ((&vdso32_end - &vdso32_start
274 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
276 /* Make sure pages are in the correct state */
277 vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
278 GFP_KERNEL);
279 BUG_ON(vdso32_pagelist == NULL);
280 for (i = 0; i < vdso32_pages - 1; i++) {
281 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
282 ClearPageReserved(pg);
283 get_page(pg);
284 vdso32_pagelist[i] = pg;
286 vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
287 vdso32_pagelist[vdso32_pages] = NULL;
288 #endif
290 #ifdef CONFIG_64BIT
291 /* Calculate the size of the 64 bit vDSO */
292 vdso64_pages = ((&vdso64_end - &vdso64_start
293 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
295 /* Make sure pages are in the correct state */
296 vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
297 GFP_KERNEL);
298 BUG_ON(vdso64_pagelist == NULL);
299 for (i = 0; i < vdso64_pages - 1; i++) {
300 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
301 ClearPageReserved(pg);
302 get_page(pg);
303 vdso64_pagelist[i] = pg;
305 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
306 vdso64_pagelist[vdso64_pages] = NULL;
307 if (vdso_alloc_per_cpu(&S390_lowcore))
308 BUG();
309 vdso_init_cr5();
310 #endif /* CONFIG_64BIT */
312 get_page(virt_to_page(vdso_data));
314 smp_wmb();
316 return 0;
318 early_initcall(vdso_init);