2 * Copyright (C) 2015 Imagination Technologies
3 * Author: Alex Smith <alex.smith@imgtec.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
11 #include <linux/binfmts.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/init.h>
15 #include <linux/ioport.h>
16 #include <linux/kernel.h>
18 #include <linux/random.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/timekeeper_internal.h>
24 #include <asm/mips-cps.h>
28 /* Kernel-provided data used by the VDSO. */
29 static union mips_vdso_data vdso_data __page_aligned_data
;
32 * Mapping for the VDSO data/GIC pages. The real pages are mapped manually, as
33 * what we map and where within the area they are mapped is determined at
36 static struct page
*no_pages
[] = { NULL
};
37 static struct vm_special_mapping vdso_vvar_mapping
= {
42 static void __init
init_vdso_image(struct mips_vdso_image
*image
)
44 unsigned long num_pages
, i
;
45 unsigned long data_pfn
;
47 BUG_ON(!PAGE_ALIGNED(image
->data
));
48 BUG_ON(!PAGE_ALIGNED(image
->size
));
50 num_pages
= image
->size
/ PAGE_SIZE
;
52 data_pfn
= __phys_to_pfn(__pa_symbol(image
->data
));
53 for (i
= 0; i
< num_pages
; i
++)
54 image
->mapping
.pages
[i
] = pfn_to_page(data_pfn
+ i
);
57 static int __init
init_vdso(void)
59 init_vdso_image(&vdso_image
);
61 #ifdef CONFIG_MIPS32_O32
62 init_vdso_image(&vdso_image_o32
);
65 #ifdef CONFIG_MIPS32_N32
66 init_vdso_image(&vdso_image_n32
);
71 subsys_initcall(init_vdso
);
73 void update_vsyscall(struct timekeeper
*tk
)
75 vdso_data_write_begin(&vdso_data
);
77 vdso_data
.xtime_sec
= tk
->xtime_sec
;
78 vdso_data
.xtime_nsec
= tk
->tkr_mono
.xtime_nsec
;
79 vdso_data
.wall_to_mono_sec
= tk
->wall_to_monotonic
.tv_sec
;
80 vdso_data
.wall_to_mono_nsec
= tk
->wall_to_monotonic
.tv_nsec
;
81 vdso_data
.cs_shift
= tk
->tkr_mono
.shift
;
83 vdso_data
.clock_mode
= tk
->tkr_mono
.clock
->archdata
.vdso_clock_mode
;
84 if (vdso_data
.clock_mode
!= VDSO_CLOCK_NONE
) {
85 vdso_data
.cs_mult
= tk
->tkr_mono
.mult
;
86 vdso_data
.cs_cycle_last
= tk
->tkr_mono
.cycle_last
;
87 vdso_data
.cs_mask
= tk
->tkr_mono
.mask
;
90 vdso_data_write_end(&vdso_data
);
93 void update_vsyscall_tz(void)
95 if (vdso_data
.clock_mode
!= VDSO_CLOCK_NONE
) {
96 vdso_data
.tz_minuteswest
= sys_tz
.tz_minuteswest
;
97 vdso_data
.tz_dsttime
= sys_tz
.tz_dsttime
;
101 static unsigned long vdso_base(void)
105 /* Skip the delay slot emulation page */
106 base
= STACK_TOP
+ PAGE_SIZE
;
108 if (current
->flags
& PF_RANDOMIZE
) {
109 base
+= get_random_int() & (VDSO_RANDOMIZE_SIZE
- 1);
110 base
= PAGE_ALIGN(base
);
116 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
118 struct mips_vdso_image
*image
= current
->thread
.abi
->vdso
;
119 struct mm_struct
*mm
= current
->mm
;
120 unsigned long gic_size
, vvar_size
, size
, base
, data_addr
, vdso_addr
, gic_pfn
;
121 struct vm_area_struct
*vma
;
124 if (down_write_killable(&mm
->mmap_sem
))
127 /* Map delay slot emulation page */
128 base
= mmap_region(NULL
, STACK_TOP
, PAGE_SIZE
,
129 VM_READ
|VM_WRITE
|VM_EXEC
|
130 VM_MAYREAD
|VM_MAYWRITE
|VM_MAYEXEC
,
132 if (IS_ERR_VALUE(base
)) {
138 * Determine total area size. This includes the VDSO data itself, the
139 * data page, and the GIC user page if present. Always create a mapping
140 * for the GIC user area if the GIC is present regardless of whether it
141 * is the current clocksource, in case it comes into use later on. We
142 * only map a page even though the total area is 64K, as we only need
143 * the counter registers at the start.
145 gic_size
= mips_gic_present() ? PAGE_SIZE
: 0;
146 vvar_size
= gic_size
+ PAGE_SIZE
;
147 size
= vvar_size
+ image
->size
;
150 * Find a region that's large enough for us to perform the
151 * colour-matching alignment below.
153 if (cpu_has_dc_aliases
)
154 size
+= shm_align_mask
+ 1;
156 base
= get_unmapped_area(NULL
, vdso_base(), size
, 0, 0);
157 if (IS_ERR_VALUE(base
)) {
163 * If we suffer from dcache aliasing, ensure that the VDSO data page
164 * mapping is coloured the same as the kernel's mapping of that memory.
165 * This ensures that when the kernel updates the VDSO data userland
166 * will observe it without requiring cache invalidations.
168 if (cpu_has_dc_aliases
) {
169 base
= __ALIGN_MASK(base
, shm_align_mask
);
170 base
+= ((unsigned long)&vdso_data
- gic_size
) & shm_align_mask
;
173 data_addr
= base
+ gic_size
;
174 vdso_addr
= data_addr
+ PAGE_SIZE
;
176 vma
= _install_special_mapping(mm
, base
, vvar_size
,
177 VM_READ
| VM_MAYREAD
,
184 /* Map GIC user page. */
186 gic_pfn
= virt_to_phys(mips_gic_base
+ MIPS_GIC_USER_OFS
) >> PAGE_SHIFT
;
188 ret
= io_remap_pfn_range(vma
, base
, gic_pfn
, gic_size
,
189 pgprot_noncached(PAGE_READONLY
));
195 ret
= remap_pfn_range(vma
, data_addr
,
196 virt_to_phys(&vdso_data
) >> PAGE_SHIFT
,
197 PAGE_SIZE
, PAGE_READONLY
);
201 /* Map VDSO image. */
202 vma
= _install_special_mapping(mm
, vdso_addr
, image
->size
,
204 VM_MAYREAD
| VM_MAYWRITE
| VM_MAYEXEC
,
211 mm
->context
.vdso
= (void *)vdso_addr
;
215 up_write(&mm
->mmap_sem
);