1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2015 Imagination Technologies
4 * Author: Alex Smith <alex.smith@imgtec.com>
7 #include <linux/binfmts.h>
10 #include <linux/init.h>
11 #include <linux/ioport.h>
12 #include <linux/kernel.h>
14 #include <linux/mman.h>
15 #include <linux/random.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
20 #include <asm/mips-cps.h>
23 #include <vdso/helpers.h>
24 #include <vdso/vsyscall.h>
26 /* Kernel-provided data used by the VDSO. */
27 static union vdso_data_store mips_vdso_data __page_aligned_data
;
28 struct vdso_data
*vdso_data
= mips_vdso_data
.data
;
31 * Mapping for the VDSO data/GIC pages. The real pages are mapped manually, as
32 * what we map and where within the area they are mapped is determined at
35 static struct page
*no_pages
[] = { NULL
};
36 static struct vm_special_mapping vdso_vvar_mapping
= {
41 static void __init
init_vdso_image(struct mips_vdso_image
*image
)
43 unsigned long num_pages
, i
;
44 unsigned long data_pfn
;
46 BUG_ON(!PAGE_ALIGNED(image
->data
));
47 BUG_ON(!PAGE_ALIGNED(image
->size
));
49 num_pages
= image
->size
/ PAGE_SIZE
;
51 data_pfn
= __phys_to_pfn(__pa_symbol(image
->data
));
52 for (i
= 0; i
< num_pages
; i
++)
53 image
->mapping
.pages
[i
] = pfn_to_page(data_pfn
+ i
);
56 static int __init
init_vdso(void)
58 init_vdso_image(&vdso_image
);
60 #ifdef CONFIG_MIPS32_O32
61 init_vdso_image(&vdso_image_o32
);
64 #ifdef CONFIG_MIPS32_N32
65 init_vdso_image(&vdso_image_n32
);
70 subsys_initcall(init_vdso
);
72 static unsigned long vdso_base(void)
74 unsigned long base
= STACK_TOP
;
76 if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT
)) {
77 /* Skip the delay slot emulation page */
81 if (current
->flags
& PF_RANDOMIZE
) {
82 base
+= get_random_u32_below(VDSO_RANDOMIZE_SIZE
);
83 base
= PAGE_ALIGN(base
);
89 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
91 struct mips_vdso_image
*image
= current
->thread
.abi
->vdso
;
92 struct mm_struct
*mm
= current
->mm
;
93 unsigned long gic_size
, vvar_size
, size
, base
, data_addr
, vdso_addr
, gic_pfn
, gic_base
;
94 struct vm_area_struct
*vma
;
97 if (mmap_write_lock_killable(mm
))
100 if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT
)) {
101 unsigned long unused
;
103 /* Map delay slot emulation page */
104 base
= do_mmap(NULL
, STACK_TOP
, PAGE_SIZE
, PROT_READ
| PROT_EXEC
,
105 MAP_ANONYMOUS
| MAP_PRIVATE
| MAP_FIXED
, 0, 0, &unused
,
107 if (IS_ERR_VALUE(base
)) {
114 * Determine total area size. This includes the VDSO data itself, the
115 * data page, and the GIC user page if present. Always create a mapping
116 * for the GIC user area if the GIC is present regardless of whether it
117 * is the current clocksource, in case it comes into use later on. We
118 * only map a page even though the total area is 64K, as we only need
119 * the counter registers at the start.
121 gic_size
= mips_gic_present() ? PAGE_SIZE
: 0;
122 vvar_size
= gic_size
+ PAGE_SIZE
;
123 size
= vvar_size
+ image
->size
;
126 * Find a region that's large enough for us to perform the
127 * colour-matching alignment below.
129 if (cpu_has_dc_aliases
)
130 size
+= shm_align_mask
+ 1;
132 base
= get_unmapped_area(NULL
, vdso_base(), size
, 0, 0);
133 if (IS_ERR_VALUE(base
)) {
139 * If we suffer from dcache aliasing, ensure that the VDSO data page
140 * mapping is coloured the same as the kernel's mapping of that memory.
141 * This ensures that when the kernel updates the VDSO data userland
142 * will observe it without requiring cache invalidations.
144 if (cpu_has_dc_aliases
) {
145 base
= __ALIGN_MASK(base
, shm_align_mask
);
146 base
+= ((unsigned long)vdso_data
- gic_size
) & shm_align_mask
;
149 data_addr
= base
+ gic_size
;
150 vdso_addr
= data_addr
+ PAGE_SIZE
;
152 vma
= _install_special_mapping(mm
, base
, vvar_size
,
153 VM_READ
| VM_MAYREAD
,
160 /* Map GIC user page. */
162 gic_base
= (unsigned long)mips_gic_base
+ MIPS_GIC_USER_OFS
;
163 gic_pfn
= PFN_DOWN(__pa(gic_base
));
165 ret
= io_remap_pfn_range(vma
, base
, gic_pfn
, gic_size
,
166 pgprot_noncached(vma
->vm_page_prot
));
172 ret
= remap_pfn_range(vma
, data_addr
,
173 virt_to_phys(vdso_data
) >> PAGE_SHIFT
,
174 PAGE_SIZE
, vma
->vm_page_prot
);
178 /* Map VDSO image. */
179 vma
= _install_special_mapping(mm
, vdso_addr
, image
->size
,
181 VM_MAYREAD
| VM_MAYWRITE
| VM_MAYEXEC
,
188 mm
->context
.vdso
= (void *)vdso_addr
;
192 mmap_write_unlock(mm
);