1 // SPDX-License-Identifier: GPL-2.0-only
3 * VDSO implementations.
5 * Copyright (C) 2012 ARM Limited
7 * Author: Will Deacon <will.deacon@arm.com>
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/timekeeper_internal.h>
22 #include <linux/vmalloc.h>
23 #include <vdso/datapage.h>
24 #include <vdso/helpers.h>
25 #include <vdso/vsyscall.h>
27 #include <asm/cacheflush.h>
28 #include <asm/signal32.h>
31 extern char vdso_start
[], vdso_end
[];
32 #ifdef CONFIG_COMPAT_VDSO
33 extern char vdso32_start
[], vdso32_end
[];
34 #endif /* CONFIG_COMPAT_VDSO */
36 /* vdso_lookup arch_index */
39 #ifdef CONFIG_COMPAT_VDSO
41 #endif /* CONFIG_COMPAT_VDSO */
43 #ifdef CONFIG_COMPAT_VDSO
44 #define VDSO_TYPES (ARM64_VDSO32 + 1)
46 #define VDSO_TYPES (ARM64_VDSO + 1)
47 #endif /* CONFIG_COMPAT_VDSO */
51 const char *vdso_code_start
;
52 const char *vdso_code_end
;
53 unsigned long vdso_pages
;
55 struct vm_special_mapping
*dm
;
57 struct vm_special_mapping
*cm
;
60 static struct __vdso_abi vdso_lookup
[VDSO_TYPES
] __ro_after_init
= {
63 .vdso_code_start
= vdso_start
,
64 .vdso_code_end
= vdso_end
,
66 #ifdef CONFIG_COMPAT_VDSO
69 .vdso_code_start
= vdso32_start
,
70 .vdso_code_end
= vdso32_end
,
72 #endif /* CONFIG_COMPAT_VDSO */
79 struct vdso_data data
[CS_BASES
];
81 } vdso_data_store __page_aligned_data
;
82 struct vdso_data
*vdso_data
= vdso_data_store
.data
;
84 static int __vdso_remap(enum arch_vdso_type arch_index
,
85 const struct vm_special_mapping
*sm
,
86 struct vm_area_struct
*new_vma
)
88 unsigned long new_size
= new_vma
->vm_end
- new_vma
->vm_start
;
89 unsigned long vdso_size
= vdso_lookup
[arch_index
].vdso_code_end
-
90 vdso_lookup
[arch_index
].vdso_code_start
;
92 if (vdso_size
!= new_size
)
95 current
->mm
->context
.vdso
= (void *)new_vma
->vm_start
;
100 static int __vdso_init(enum arch_vdso_type arch_index
)
103 struct page
**vdso_pagelist
;
106 if (memcmp(vdso_lookup
[arch_index
].vdso_code_start
, "\177ELF", 4)) {
107 pr_err("vDSO is not a valid ELF object!\n");
111 vdso_lookup
[arch_index
].vdso_pages
= (
112 vdso_lookup
[arch_index
].vdso_code_end
-
113 vdso_lookup
[arch_index
].vdso_code_start
) >>
116 /* Allocate the vDSO pagelist, plus a page for the data. */
117 vdso_pagelist
= kcalloc(vdso_lookup
[arch_index
].vdso_pages
+ 1,
118 sizeof(struct page
*),
120 if (vdso_pagelist
== NULL
)
123 /* Grab the vDSO data page. */
124 vdso_pagelist
[0] = phys_to_page(__pa_symbol(vdso_data
));
127 /* Grab the vDSO code pages. */
128 pfn
= sym_to_pfn(vdso_lookup
[arch_index
].vdso_code_start
);
130 for (i
= 0; i
< vdso_lookup
[arch_index
].vdso_pages
; i
++)
131 vdso_pagelist
[i
+ 1] = pfn_to_page(pfn
+ i
);
133 vdso_lookup
[arch_index
].dm
->pages
= &vdso_pagelist
[0];
134 vdso_lookup
[arch_index
].cm
->pages
= &vdso_pagelist
[1];
139 static int __setup_additional_pages(enum arch_vdso_type arch_index
,
140 struct mm_struct
*mm
,
141 struct linux_binprm
*bprm
,
144 unsigned long vdso_base
, vdso_text_len
, vdso_mapping_len
;
147 vdso_text_len
= vdso_lookup
[arch_index
].vdso_pages
<< PAGE_SHIFT
;
148 /* Be sure to map the data page */
149 vdso_mapping_len
= vdso_text_len
+ PAGE_SIZE
;
151 vdso_base
= get_unmapped_area(NULL
, 0, vdso_mapping_len
, 0, 0);
152 if (IS_ERR_VALUE(vdso_base
)) {
153 ret
= ERR_PTR(vdso_base
);
157 ret
= _install_special_mapping(mm
, vdso_base
, PAGE_SIZE
,
159 vdso_lookup
[arch_index
].dm
);
163 vdso_base
+= PAGE_SIZE
;
164 mm
->context
.vdso
= (void *)vdso_base
;
165 ret
= _install_special_mapping(mm
, vdso_base
, vdso_text_len
,
167 VM_MAYREAD
|VM_MAYWRITE
|VM_MAYEXEC
,
168 vdso_lookup
[arch_index
].cm
);
175 mm
->context
.vdso
= NULL
;
181 * Create and map the vectors page for AArch32 tasks.
183 #ifdef CONFIG_COMPAT_VDSO
184 static int aarch32_vdso_mremap(const struct vm_special_mapping
*sm
,
185 struct vm_area_struct
*new_vma
)
187 return __vdso_remap(ARM64_VDSO32
, sm
, new_vma
);
189 #endif /* CONFIG_COMPAT_VDSO */
192 * aarch32_vdso_pages:
195 * or (CONFIG_COMPAT_VDSO):
201 #ifdef CONFIG_COMPAT_VDSO
204 #define C_PAGES (C_VDSO + 1)
207 #define C_PAGES (C_SIGPAGE + 1)
208 #endif /* CONFIG_COMPAT_VDSO */
209 static struct page
*aarch32_vdso_pages
[C_PAGES
] __ro_after_init
;
210 static struct vm_special_mapping aarch32_vdso_spec
[C_PAGES
] = {
212 .name
= "[vectors]", /* ABI */
213 .pages
= &aarch32_vdso_pages
[C_VECTORS
],
215 #ifdef CONFIG_COMPAT_VDSO
221 .mremap
= aarch32_vdso_mremap
,
225 .name
= "[sigpage]", /* ABI */
226 .pages
= &aarch32_vdso_pages
[C_SIGPAGE
],
228 #endif /* CONFIG_COMPAT_VDSO */
231 static int aarch32_alloc_kuser_vdso_page(void)
233 extern char __kuser_helper_start
[], __kuser_helper_end
[];
234 int kuser_sz
= __kuser_helper_end
- __kuser_helper_start
;
235 unsigned long vdso_page
;
237 if (!IS_ENABLED(CONFIG_KUSER_HELPERS
))
240 vdso_page
= get_zeroed_page(GFP_ATOMIC
);
244 memcpy((void *)(vdso_page
+ 0x1000 - kuser_sz
), __kuser_helper_start
,
246 aarch32_vdso_pages
[C_VECTORS
] = virt_to_page(vdso_page
);
247 flush_dcache_page(aarch32_vdso_pages
[C_VECTORS
]);
251 #ifdef CONFIG_COMPAT_VDSO
252 static int __aarch32_alloc_vdso_pages(void)
256 vdso_lookup
[ARM64_VDSO32
].dm
= &aarch32_vdso_spec
[C_VVAR
];
257 vdso_lookup
[ARM64_VDSO32
].cm
= &aarch32_vdso_spec
[C_VDSO
];
259 ret
= __vdso_init(ARM64_VDSO32
);
263 ret
= aarch32_alloc_kuser_vdso_page();
265 unsigned long c_vvar
=
266 (unsigned long)page_to_virt(aarch32_vdso_pages
[C_VVAR
]);
267 unsigned long c_vdso
=
268 (unsigned long)page_to_virt(aarch32_vdso_pages
[C_VDSO
]);
277 static int __aarch32_alloc_vdso_pages(void)
279 extern char __aarch32_sigret_code_start
[], __aarch32_sigret_code_end
[];
280 int sigret_sz
= __aarch32_sigret_code_end
- __aarch32_sigret_code_start
;
281 unsigned long sigpage
;
284 sigpage
= get_zeroed_page(GFP_ATOMIC
);
288 memcpy((void *)sigpage
, __aarch32_sigret_code_start
, sigret_sz
);
289 aarch32_vdso_pages
[C_SIGPAGE
] = virt_to_page(sigpage
);
290 flush_dcache_page(aarch32_vdso_pages
[C_SIGPAGE
]);
292 ret
= aarch32_alloc_kuser_vdso_page();
298 #endif /* CONFIG_COMPAT_VDSO */
300 static int __init
aarch32_alloc_vdso_pages(void)
302 return __aarch32_alloc_vdso_pages();
304 arch_initcall(aarch32_alloc_vdso_pages
);
306 static int aarch32_kuser_helpers_setup(struct mm_struct
*mm
)
310 if (!IS_ENABLED(CONFIG_KUSER_HELPERS
))
314 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
315 * not safe to CoW the page containing the CPU exception vectors.
317 ret
= _install_special_mapping(mm
, AARCH32_VECTORS_BASE
, PAGE_SIZE
,
319 VM_MAYREAD
| VM_MAYEXEC
,
320 &aarch32_vdso_spec
[C_VECTORS
]);
322 return PTR_ERR_OR_ZERO(ret
);
325 #ifndef CONFIG_COMPAT_VDSO
326 static int aarch32_sigreturn_setup(struct mm_struct
*mm
)
331 addr
= get_unmapped_area(NULL
, 0, PAGE_SIZE
, 0, 0);
332 if (IS_ERR_VALUE(addr
)) {
338 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
341 ret
= _install_special_mapping(mm
, addr
, PAGE_SIZE
,
342 VM_READ
| VM_EXEC
| VM_MAYREAD
|
343 VM_MAYWRITE
| VM_MAYEXEC
,
344 &aarch32_vdso_spec
[C_SIGPAGE
]);
348 mm
->context
.vdso
= (void *)addr
;
351 return PTR_ERR_OR_ZERO(ret
);
353 #endif /* !CONFIG_COMPAT_VDSO */
355 int aarch32_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
357 struct mm_struct
*mm
= current
->mm
;
360 if (down_write_killable(&mm
->mmap_sem
))
363 ret
= aarch32_kuser_helpers_setup(mm
);
367 #ifdef CONFIG_COMPAT_VDSO
368 ret
= __setup_additional_pages(ARM64_VDSO32
,
373 ret
= aarch32_sigreturn_setup(mm
);
374 #endif /* CONFIG_COMPAT_VDSO */
377 up_write(&mm
->mmap_sem
);
380 #endif /* CONFIG_COMPAT */
382 static int vdso_mremap(const struct vm_special_mapping
*sm
,
383 struct vm_area_struct
*new_vma
)
385 return __vdso_remap(ARM64_VDSO
, sm
, new_vma
);
389 * aarch64_vdso_pages:
395 #define A_PAGES (A_VDSO + 1)
396 static struct vm_special_mapping vdso_spec
[A_PAGES
] __ro_after_init
= {
402 .mremap
= vdso_mremap
,
406 static int __init
vdso_init(void)
408 vdso_lookup
[ARM64_VDSO
].dm
= &vdso_spec
[A_VVAR
];
409 vdso_lookup
[ARM64_VDSO
].cm
= &vdso_spec
[A_VDSO
];
411 return __vdso_init(ARM64_VDSO
);
413 arch_initcall(vdso_init
);
415 int arch_setup_additional_pages(struct linux_binprm
*bprm
,
418 struct mm_struct
*mm
= current
->mm
;
421 if (down_write_killable(&mm
->mmap_sem
))
424 ret
= __setup_additional_pages(ARM64_VDSO
,
429 up_write(&mm
->mmap_sem
);