1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
4 * <benh@kernel.crashing.org>
5 * Copyright (C) 2012 ARM Limited
6 * Copyright (C) 2015 Regents of the University of California
11 #include <linux/slab.h>
12 #include <linux/binfmts.h>
13 #include <linux/err.h>
16 #include <linux/time_namespace.h>
17 #include <vdso/datapage.h>
18 #include <vdso/vsyscall.h>
21 VVAR_DATA_PAGE_OFFSET
,
22 VVAR_TIMENS_PAGE_OFFSET
,
31 #define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT)
33 static union vdso_data_store vdso_data_store __page_aligned_data
;
34 struct vdso_data
*vdso_data
= vdso_data_store
.data
;
38 const char *vdso_code_start
;
39 const char *vdso_code_end
;
40 unsigned long vdso_pages
;
42 struct vm_special_mapping
*dm
;
44 struct vm_special_mapping
*cm
;
47 static struct __vdso_info vdso_info
;
49 static struct __vdso_info compat_vdso_info
;
52 static int vdso_mremap(const struct vm_special_mapping
*sm
,
53 struct vm_area_struct
*new_vma
)
55 current
->mm
->context
.vdso
= (void *)new_vma
->vm_start
;
60 static void __init
__vdso_init(struct __vdso_info
*vdso_info
)
63 struct page
**vdso_pagelist
;
66 if (memcmp(vdso_info
->vdso_code_start
, "\177ELF", 4))
67 panic("vDSO is not a valid ELF object!\n");
69 vdso_info
->vdso_pages
= (
70 vdso_info
->vdso_code_end
-
71 vdso_info
->vdso_code_start
) >>
74 vdso_pagelist
= kcalloc(vdso_info
->vdso_pages
,
75 sizeof(struct page
*),
77 if (vdso_pagelist
== NULL
)
78 panic("vDSO kcalloc failed!\n");
80 /* Grab the vDSO code pages. */
81 pfn
= sym_to_pfn(vdso_info
->vdso_code_start
);
83 for (i
= 0; i
< vdso_info
->vdso_pages
; i
++)
84 vdso_pagelist
[i
] = pfn_to_page(pfn
+ i
);
86 vdso_info
->cm
->pages
= vdso_pagelist
;
90 struct vdso_data
*arch_get_vdso_data(void *vvar_page
)
92 return (struct vdso_data
*)(vvar_page
);
96 * The vvar mapping contains data for a specific time namespace, so when a task
97 * changes namespace we must unmap its vvar data for the old namespace.
98 * Subsequent faults will map in data for the new namespace.
100 * For more details see timens_setup_vdso_data().
102 int vdso_join_timens(struct task_struct
*task
, struct time_namespace
*ns
)
104 struct mm_struct
*mm
= task
->mm
;
105 struct vm_area_struct
*vma
;
106 VMA_ITERATOR(vmi
, mm
, 0);
110 for_each_vma(vmi
, vma
) {
111 if (vma_is_special_mapping(vma
, vdso_info
.dm
))
114 if (vma_is_special_mapping(vma
, compat_vdso_info
.dm
))
119 mmap_read_unlock(mm
);
124 static vm_fault_t
vvar_fault(const struct vm_special_mapping
*sm
,
125 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
127 struct page
*timens_page
= find_timens_vvar_page(vma
);
130 switch (vmf
->pgoff
) {
131 case VVAR_DATA_PAGE_OFFSET
:
133 pfn
= page_to_pfn(timens_page
);
135 pfn
= sym_to_pfn(vdso_data
);
137 #ifdef CONFIG_TIME_NS
138 case VVAR_TIMENS_PAGE_OFFSET
:
140 * If a task belongs to a time namespace then a namespace
141 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
142 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
144 * See also the comment near timens_setup_vdso_data().
147 return VM_FAULT_SIGBUS
;
148 pfn
= sym_to_pfn(vdso_data
);
150 #endif /* CONFIG_TIME_NS */
152 return VM_FAULT_SIGBUS
;
155 return vmf_insert_pfn(vma
, vmf
->address
, pfn
);
158 static struct vm_special_mapping rv_vdso_maps
[] __ro_after_init
= {
159 [RV_VDSO_MAP_VVAR
] = {
163 [RV_VDSO_MAP_VDSO
] = {
165 .mremap
= vdso_mremap
,
169 static struct __vdso_info vdso_info __ro_after_init
= {
171 .vdso_code_start
= vdso_start
,
172 .vdso_code_end
= vdso_end
,
173 .dm
= &rv_vdso_maps
[RV_VDSO_MAP_VVAR
],
174 .cm
= &rv_vdso_maps
[RV_VDSO_MAP_VDSO
],
178 static struct vm_special_mapping rv_compat_vdso_maps
[] __ro_after_init
= {
179 [RV_VDSO_MAP_VVAR
] = {
183 [RV_VDSO_MAP_VDSO
] = {
185 .mremap
= vdso_mremap
,
189 static struct __vdso_info compat_vdso_info __ro_after_init
= {
190 .name
= "compat_vdso",
191 .vdso_code_start
= compat_vdso_start
,
192 .vdso_code_end
= compat_vdso_end
,
193 .dm
= &rv_compat_vdso_maps
[RV_VDSO_MAP_VVAR
],
194 .cm
= &rv_compat_vdso_maps
[RV_VDSO_MAP_VDSO
],
198 static int __init
vdso_init(void)
200 __vdso_init(&vdso_info
);
202 __vdso_init(&compat_vdso_info
);
207 arch_initcall(vdso_init
);
209 static int __setup_additional_pages(struct mm_struct
*mm
,
210 struct linux_binprm
*bprm
,
212 struct __vdso_info
*vdso_info
)
214 unsigned long vdso_base
, vdso_text_len
, vdso_mapping_len
;
217 BUILD_BUG_ON(VVAR_NR_PAGES
!= __VVAR_PAGES
);
219 vdso_text_len
= vdso_info
->vdso_pages
<< PAGE_SHIFT
;
220 /* Be sure to map the data page */
221 vdso_mapping_len
= vdso_text_len
+ VVAR_SIZE
;
223 vdso_base
= get_unmapped_area(NULL
, 0, vdso_mapping_len
, 0, 0);
224 if (IS_ERR_VALUE(vdso_base
)) {
225 ret
= ERR_PTR(vdso_base
);
229 ret
= _install_special_mapping(mm
, vdso_base
, VVAR_SIZE
,
230 (VM_READ
| VM_MAYREAD
| VM_PFNMAP
), vdso_info
->dm
);
234 vdso_base
+= VVAR_SIZE
;
235 mm
->context
.vdso
= (void *)vdso_base
;
238 _install_special_mapping(mm
, vdso_base
, vdso_text_len
,
239 (VM_READ
| VM_EXEC
| VM_MAYREAD
| VM_MAYWRITE
| VM_MAYEXEC
),
248 mm
->context
.vdso
= NULL
;
253 int compat_arch_setup_additional_pages(struct linux_binprm
*bprm
,
256 struct mm_struct
*mm
= current
->mm
;
259 if (mmap_write_lock_killable(mm
))
262 ret
= __setup_additional_pages(mm
, bprm
, uses_interp
,
264 mmap_write_unlock(mm
);
270 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
272 struct mm_struct
*mm
= current
->mm
;
275 if (mmap_write_lock_killable(mm
))
278 ret
= __setup_additional_pages(mm
, bprm
, uses_interp
, &vdso_info
);
279 mmap_write_unlock(mm
);