2 * VDSO implementation for AArch64 and vector page setup for AArch32.
4 * Copyright (C) 2012 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Author: Will Deacon <will.deacon@arm.com>
21 #include <linux/cache.h>
22 #include <linux/clocksource.h>
23 #include <linux/elf.h>
24 #include <linux/err.h>
25 #include <linux/errno.h>
26 #include <linux/gfp.h>
27 #include <linux/kernel.h>
29 #include <linux/sched.h>
30 #include <linux/signal.h>
31 #include <linux/slab.h>
32 #include <linux/timekeeper_internal.h>
33 #include <linux/vmalloc.h>
35 #include <asm/cacheflush.h>
36 #include <asm/signal32.h>
38 #include <asm/vdso_datapage.h>
40 extern char vdso_start
, vdso_end
;
41 static unsigned long vdso_pages __ro_after_init
;
47 struct vdso_data data
;
49 } vdso_data_store __page_aligned_data
;
50 struct vdso_data
*vdso_data
= &vdso_data_store
.data
;
54 * Create and map the vectors page for AArch32 tasks.
56 static struct page
*vectors_page
[1] __ro_after_init
;
58 static int __init
alloc_vectors_page(void)
60 extern char __kuser_helper_start
[], __kuser_helper_end
[];
61 extern char __aarch32_sigret_code_start
[], __aarch32_sigret_code_end
[];
63 int kuser_sz
= __kuser_helper_end
- __kuser_helper_start
;
64 int sigret_sz
= __aarch32_sigret_code_end
- __aarch32_sigret_code_start
;
67 vpage
= get_zeroed_page(GFP_ATOMIC
);
73 memcpy((void *)vpage
+ 0x1000 - kuser_sz
, __kuser_helper_start
,
77 memcpy((void *)vpage
+ AARCH32_KERN_SIGRET_CODE_OFFSET
,
78 __aarch32_sigret_code_start
, sigret_sz
);
80 flush_icache_range(vpage
, vpage
+ PAGE_SIZE
);
81 vectors_page
[0] = virt_to_page(vpage
);
85 arch_initcall(alloc_vectors_page
);
87 int aarch32_setup_vectors_page(struct linux_binprm
*bprm
, int uses_interp
)
89 struct mm_struct
*mm
= current
->mm
;
90 unsigned long addr
= AARCH32_VECTORS_BASE
;
91 static const struct vm_special_mapping spec
= {
93 .pages
= vectors_page
,
98 if (down_write_killable(&mm
->mmap_sem
))
100 current
->mm
->context
.vdso
= (void *)addr
;
102 /* Map vectors page at the high address. */
103 ret
= _install_special_mapping(mm
, addr
, PAGE_SIZE
,
104 VM_READ
|VM_EXEC
|VM_MAYREAD
|VM_MAYEXEC
,
107 up_write(&mm
->mmap_sem
);
109 return PTR_ERR_OR_ZERO(ret
);
111 #endif /* CONFIG_COMPAT */
113 static struct vm_special_mapping vdso_spec
[2] __ro_after_init
= {
122 static int __init
vdso_init(void)
125 struct page
**vdso_pagelist
;
128 if (memcmp(&vdso_start
, "\177ELF", 4)) {
129 pr_err("vDSO is not a valid ELF object!\n");
133 vdso_pages
= (&vdso_end
- &vdso_start
) >> PAGE_SHIFT
;
134 pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
135 vdso_pages
+ 1, vdso_pages
, &vdso_start
, 1L, vdso_data
);
137 /* Allocate the vDSO pagelist, plus a page for the data. */
138 vdso_pagelist
= kcalloc(vdso_pages
+ 1, sizeof(struct page
*),
140 if (vdso_pagelist
== NULL
)
143 /* Grab the vDSO data page. */
144 vdso_pagelist
[0] = phys_to_page(__pa_symbol(vdso_data
));
147 /* Grab the vDSO code pages. */
148 pfn
= sym_to_pfn(&vdso_start
);
150 for (i
= 0; i
< vdso_pages
; i
++)
151 vdso_pagelist
[i
+ 1] = pfn_to_page(pfn
+ i
);
153 vdso_spec
[0].pages
= &vdso_pagelist
[0];
154 vdso_spec
[1].pages
= &vdso_pagelist
[1];
158 arch_initcall(vdso_init
);
160 int arch_setup_additional_pages(struct linux_binprm
*bprm
,
163 struct mm_struct
*mm
= current
->mm
;
164 unsigned long vdso_base
, vdso_text_len
, vdso_mapping_len
;
167 vdso_text_len
= vdso_pages
<< PAGE_SHIFT
;
168 /* Be sure to map the data page */
169 vdso_mapping_len
= vdso_text_len
+ PAGE_SIZE
;
171 if (down_write_killable(&mm
->mmap_sem
))
173 vdso_base
= get_unmapped_area(NULL
, 0, vdso_mapping_len
, 0, 0);
174 if (IS_ERR_VALUE(vdso_base
)) {
175 ret
= ERR_PTR(vdso_base
);
178 ret
= _install_special_mapping(mm
, vdso_base
, PAGE_SIZE
,
184 vdso_base
+= PAGE_SIZE
;
185 mm
->context
.vdso
= (void *)vdso_base
;
186 ret
= _install_special_mapping(mm
, vdso_base
, vdso_text_len
,
188 VM_MAYREAD
|VM_MAYWRITE
|VM_MAYEXEC
,
194 up_write(&mm
->mmap_sem
);
198 mm
->context
.vdso
= NULL
;
199 up_write(&mm
->mmap_sem
);
204 * Update the vDSO data page to keep in sync with kernel timekeeping.
206 void update_vsyscall(struct timekeeper
*tk
)
208 u32 use_syscall
= !tk
->tkr_mono
.clock
->archdata
.vdso_direct
;
210 ++vdso_data
->tb_seq_count
;
213 vdso_data
->use_syscall
= use_syscall
;
214 vdso_data
->xtime_coarse_sec
= tk
->xtime_sec
;
215 vdso_data
->xtime_coarse_nsec
= tk
->tkr_mono
.xtime_nsec
>>
217 vdso_data
->wtm_clock_sec
= tk
->wall_to_monotonic
.tv_sec
;
218 vdso_data
->wtm_clock_nsec
= tk
->wall_to_monotonic
.tv_nsec
;
221 /* tkr_mono.cycle_last == tkr_raw.cycle_last */
222 vdso_data
->cs_cycle_last
= tk
->tkr_mono
.cycle_last
;
223 vdso_data
->raw_time_sec
= tk
->raw_time
.tv_sec
;
224 vdso_data
->raw_time_nsec
= (tk
->raw_time
.tv_nsec
<<
226 tk
->tkr_raw
.xtime_nsec
;
227 vdso_data
->xtime_clock_sec
= tk
->xtime_sec
;
228 vdso_data
->xtime_clock_nsec
= tk
->tkr_mono
.xtime_nsec
;
229 vdso_data
->cs_mono_mult
= tk
->tkr_mono
.mult
;
230 vdso_data
->cs_raw_mult
= tk
->tkr_raw
.mult
;
231 /* tkr_mono.shift == tkr_raw.shift */
232 vdso_data
->cs_shift
= tk
->tkr_mono
.shift
;
236 ++vdso_data
->tb_seq_count
;
239 void update_vsyscall_tz(void)
241 vdso_data
->tz_minuteswest
= sys_tz
.tz_minuteswest
;
242 vdso_data
->tz_dsttime
= sys_tz
.tz_dsttime
;