1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright IBM Corp. 2008
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
14 #include <linux/smp.h>
15 #include <linux/stddef.h>
16 #include <linux/unistd.h>
17 #include <linux/slab.h>
18 #include <linux/user.h>
19 #include <linux/elf.h>
20 #include <linux/security.h>
21 #include <linux/memblock.h>
22 #include <linux/compat.h>
23 #include <asm/asm-offsets.h>
24 #include <asm/pgtable.h>
25 #include <asm/processor.h>
27 #include <asm/mmu_context.h>
28 #include <asm/sections.h>
30 #include <asm/facility.h>
32 extern char vdso64_start
, vdso64_end
;
33 static void *vdso64_kbase
= &vdso64_start
;
34 static unsigned int vdso64_pages
;
35 static struct page
**vdso64_pagelist
;
38 * Should the kernel map a VDSO page into processes and pass its
39 * address down to glibc upon exec()?
41 unsigned int __read_mostly vdso_enabled
= 1;
43 static vm_fault_t
vdso_fault(const struct vm_special_mapping
*sm
,
44 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
46 struct page
**vdso_pagelist
;
47 unsigned long vdso_pages
;
49 vdso_pagelist
= vdso64_pagelist
;
50 vdso_pages
= vdso64_pages
;
52 if (vmf
->pgoff
>= vdso_pages
)
53 return VM_FAULT_SIGBUS
;
55 vmf
->page
= vdso_pagelist
[vmf
->pgoff
];
60 static int vdso_mremap(const struct vm_special_mapping
*sm
,
61 struct vm_area_struct
*vma
)
63 unsigned long vdso_pages
;
65 vdso_pages
= vdso64_pages
;
67 if ((vdso_pages
<< PAGE_SHIFT
) != vma
->vm_end
- vma
->vm_start
)
70 if (WARN_ON_ONCE(current
->mm
!= vma
->vm_mm
))
73 current
->mm
->context
.vdso_base
= vma
->vm_start
;
77 static const struct vm_special_mapping vdso_mapping
= {
80 .mremap
= vdso_mremap
,
83 static int __init
vdso_setup(char *str
)
87 if (!kstrtobool(str
, &enabled
))
88 vdso_enabled
= enabled
;
91 __setup("vdso=", vdso_setup
);
97 struct vdso_data data
;
99 } vdso_data_store __page_aligned_data
;
100 struct vdso_data
*vdso_data
= &vdso_data_store
.data
;
103 * Setup vdso data page.
105 static void __init
vdso_init_data(struct vdso_data
*vd
)
107 vd
->ectg_available
= test_facility(31);
111 * Allocate/free per cpu vdso data.
113 #define SEGMENT_ORDER 2
116 * The initial vdso_data structure for the boot CPU. Eventually
117 * it is replaced with a properly allocated structure in vdso_init.
118 * This is necessary because a valid S390_lowcore.vdso_per_cpu_data
119 * pointer is required to be able to return from an interrupt or
120 * program check. See the exit paths in entry.S.
122 struct vdso_data boot_vdso_data __initdata
;
124 void __init
vdso_alloc_boot_cpu(struct lowcore
*lowcore
)
126 lowcore
->vdso_per_cpu_data
= (unsigned long) &boot_vdso_data
;
129 int vdso_alloc_per_cpu(struct lowcore
*lowcore
)
131 unsigned long segment_table
, page_table
, page_frame
;
132 struct vdso_per_cpu_data
*vd
;
134 segment_table
= __get_free_pages(GFP_KERNEL
, SEGMENT_ORDER
);
135 page_table
= get_zeroed_page(GFP_KERNEL
);
136 page_frame
= get_zeroed_page(GFP_KERNEL
);
137 if (!segment_table
|| !page_table
|| !page_frame
)
139 arch_set_page_dat(virt_to_page(segment_table
), SEGMENT_ORDER
);
140 arch_set_page_dat(virt_to_page(page_table
), 0);
142 /* Initialize per-cpu vdso data page */
143 vd
= (struct vdso_per_cpu_data
*) page_frame
;
144 vd
->cpu_nr
= lowcore
->cpu_nr
;
145 vd
->node_id
= cpu_to_node(vd
->cpu_nr
);
147 /* Set up page table for the vdso address space */
148 memset64((u64
*)segment_table
, _SEGMENT_ENTRY_EMPTY
, _CRST_ENTRIES
);
149 memset64((u64
*)page_table
, _PAGE_INVALID
, PTRS_PER_PTE
);
151 *(unsigned long *) segment_table
= _SEGMENT_ENTRY
+ page_table
;
152 *(unsigned long *) page_table
= _PAGE_PROTECT
+ page_frame
;
154 lowcore
->vdso_asce
= segment_table
+
155 _ASCE_TABLE_LENGTH
+ _ASCE_USER_BITS
+ _ASCE_TYPE_SEGMENT
;
156 lowcore
->vdso_per_cpu_data
= page_frame
;
161 free_page(page_frame
);
162 free_page(page_table
);
163 free_pages(segment_table
, SEGMENT_ORDER
);
167 void vdso_free_per_cpu(struct lowcore
*lowcore
)
169 unsigned long segment_table
, page_table
, page_frame
;
171 segment_table
= lowcore
->vdso_asce
& PAGE_MASK
;
172 page_table
= *(unsigned long *) segment_table
;
173 page_frame
= *(unsigned long *) page_table
;
175 free_page(page_frame
);
176 free_page(page_table
);
177 free_pages(segment_table
, SEGMENT_ORDER
);
181 * This is called from binfmt_elf, we create the special vma for the
182 * vDSO and insert it into the mm struct tree
184 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
186 struct mm_struct
*mm
= current
->mm
;
187 struct vm_area_struct
*vma
;
188 unsigned long vdso_pages
;
189 unsigned long vdso_base
;
195 if (is_compat_task())
198 vdso_pages
= vdso64_pages
;
200 * vDSO has a problem and was disabled, just don't "enable" it for
207 * pick a base address for the vDSO in process space. We try to put
208 * it at vdso_base which is the "natural" base for it, but we might
209 * fail and end up putting it elsewhere.
211 if (down_write_killable(&mm
->mmap_sem
))
213 vdso_base
= get_unmapped_area(NULL
, 0, vdso_pages
<< PAGE_SHIFT
, 0, 0);
214 if (IS_ERR_VALUE(vdso_base
)) {
220 * our vma flags don't have VM_WRITE so by default, the process
221 * isn't allowed to write those pages.
222 * gdb can break that with ptrace interface, and thus trigger COW
223 * on those pages but it's then your responsibility to never do that
224 * on the "data" page of the vDSO or you'll stop getting kernel
225 * updates and your nice userland gettimeofday will be totally dead.
226 * It's fine to use that for setting breakpoints in the vDSO code
229 vma
= _install_special_mapping(mm
, vdso_base
, vdso_pages
<< PAGE_SHIFT
,
231 VM_MAYREAD
|VM_MAYWRITE
|VM_MAYEXEC
,
238 current
->mm
->context
.vdso_base
= vdso_base
;
242 up_write(&mm
->mmap_sem
);
246 static int __init
vdso_init(void)
250 vdso_init_data(vdso_data
);
252 /* Calculate the size of the 64 bit vDSO */
253 vdso64_pages
= ((&vdso64_end
- &vdso64_start
254 + PAGE_SIZE
- 1) >> PAGE_SHIFT
) + 1;
256 /* Make sure pages are in the correct state */
257 vdso64_pagelist
= kcalloc(vdso64_pages
+ 1, sizeof(struct page
*),
259 BUG_ON(vdso64_pagelist
== NULL
);
260 for (i
= 0; i
< vdso64_pages
- 1; i
++) {
261 struct page
*pg
= virt_to_page(vdso64_kbase
+ i
*PAGE_SIZE
);
263 vdso64_pagelist
[i
] = pg
;
265 vdso64_pagelist
[vdso64_pages
- 1] = virt_to_page(vdso_data
);
266 vdso64_pagelist
[vdso64_pages
] = NULL
;
267 if (vdso_alloc_per_cpu(&S390_lowcore
))
270 get_page(virt_to_page(vdso_data
));
274 early_initcall(vdso_init
);