1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2012 ARM Limited
3 // Copyright (C) 2005-2017 Andes Technology Corporation
5 #include <linux/cache.h>
6 #include <linux/clocksource.h>
9 #include <linux/errno.h>
10 #include <linux/gfp.h>
11 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/signal.h>
15 #include <linux/slab.h>
16 #include <linux/timekeeper_internal.h>
17 #include <linux/vmalloc.h>
18 #include <linux/random.h>
20 #include <asm/cacheflush.h>
22 #include <asm/vdso_datapage.h>
23 #include <asm/vdso_timer_info.h>
24 #include <asm/cache_info.h>
25 extern struct cache_info L1_cache_info
[2];
26 extern char vdso_start
[], vdso_end
[];
27 static unsigned long vdso_pages __ro_after_init
;
28 static unsigned long timer_mapping_base
;
30 struct timer_info_t timer_info
= {
31 .cycle_count_down
= true,
32 .mapping_base
= EMPTY_TIMER_MAPPING
,
33 .cycle_count_reg_offset
= EMPTY_REG_OFFSET
38 static struct page
*no_pages
[] = { NULL
};
41 struct vdso_data data
;
43 } vdso_data_store __page_aligned_data
;
44 struct vdso_data
*vdso_data
= &vdso_data_store
.data
;
45 static struct vm_special_mapping vdso_spec
[2] __ro_after_init
= {
55 static void get_timer_node_info(void)
57 timer_mapping_base
= timer_info
.mapping_base
;
58 vdso_data
->cycle_count_offset
=
59 timer_info
.cycle_count_reg_offset
;
60 vdso_data
->cycle_count_down
=
61 timer_info
.cycle_count_down
;
64 static int __init
vdso_init(void)
67 struct page
**vdso_pagelist
;
69 if (memcmp(vdso_start
, "\177ELF", 4)) {
70 pr_err("vDSO is not a valid ELF object!\n");
73 /* Creat a timer io mapping to get clock cycles counter */
74 get_timer_node_info();
76 vdso_pages
= (vdso_end
- vdso_start
) >> PAGE_SHIFT
;
77 pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
78 vdso_pages
+ 1, vdso_pages
, vdso_start
, 1L, vdso_data
);
80 /* Allocate the vDSO pagelist */
81 vdso_pagelist
= kcalloc(vdso_pages
, sizeof(struct page
*), GFP_KERNEL
);
82 if (vdso_pagelist
== NULL
)
85 for (i
= 0; i
< vdso_pages
; i
++)
86 vdso_pagelist
[i
] = virt_to_page(vdso_start
+ i
* PAGE_SIZE
);
87 vdso_spec
[1].pages
= &vdso_pagelist
[0];
92 arch_initcall(vdso_init
);
94 unsigned long inline vdso_random_addr(unsigned long vdso_mapping_len
)
96 unsigned long start
= current
->mm
->mmap_base
, end
, offset
, addr
;
97 start
= PAGE_ALIGN(start
);
99 /* Round the lowest possible end address up to a PMD boundary. */
100 end
= (start
+ vdso_mapping_len
+ PMD_SIZE
- 1) & PMD_MASK
;
101 if (end
>= TASK_SIZE
)
103 end
-= vdso_mapping_len
;
106 offset
= get_random_int() % (((end
- start
) >> PAGE_SHIFT
) + 1);
107 addr
= start
+ (offset
<< PAGE_SHIFT
);
114 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
116 struct mm_struct
*mm
= current
->mm
;
117 unsigned long vdso_base
, vdso_text_len
, vdso_mapping_len
;
118 struct vm_area_struct
*vma
;
119 unsigned long addr
= 0;
121 int ret
, vvar_page_num
= 2;
123 vdso_text_len
= vdso_pages
<< PAGE_SHIFT
;
125 if(timer_mapping_base
== EMPTY_VALUE
)
127 /* Be sure to map the data page */
128 vdso_mapping_len
= vdso_text_len
+ vvar_page_num
* PAGE_SIZE
;
129 #ifdef CONFIG_CPU_CACHE_ALIASING
130 vdso_mapping_len
+= L1_cache_info
[DCACHE
].aliasing_num
- 1;
133 if (down_write_killable(&mm
->mmap_sem
))
136 addr
= vdso_random_addr(vdso_mapping_len
);
137 vdso_base
= get_unmapped_area(NULL
, addr
, vdso_mapping_len
, 0, 0);
138 if (IS_ERR_VALUE(vdso_base
)) {
143 #ifdef CONFIG_CPU_CACHE_ALIASING
145 unsigned int aliasing_mask
=
146 L1_cache_info
[DCACHE
].aliasing_mask
;
147 unsigned int page_colour_ofs
;
148 page_colour_ofs
= ((unsigned int)vdso_data
& aliasing_mask
) -
149 (vdso_base
& aliasing_mask
);
150 vdso_base
+= page_colour_ofs
& aliasing_mask
;
154 vma
= _install_special_mapping(mm
, vdso_base
, vvar_page_num
* PAGE_SIZE
,
155 VM_READ
| VM_MAYREAD
, &vdso_spec
[0]);
161 /*Map vdata to user space */
162 ret
= io_remap_pfn_range(vma
, vdso_base
,
163 virt_to_phys(vdso_data
) >> PAGE_SHIFT
,
164 PAGE_SIZE
, vma
->vm_page_prot
);
168 /*Map timer to user space */
169 vdso_base
+= PAGE_SIZE
;
170 prot
= __pgprot(_PAGE_V
| _PAGE_M_UR_KR
| _PAGE_D
| _PAGE_C_DEV
);
171 ret
= io_remap_pfn_range(vma
, vdso_base
, timer_mapping_base
>> PAGE_SHIFT
,
176 /*Map vdso to user space */
177 vdso_base
+= PAGE_SIZE
;
178 mm
->context
.vdso
= (void *)vdso_base
;
179 vma
= _install_special_mapping(mm
, vdso_base
, vdso_text_len
,
181 VM_MAYREAD
| VM_MAYWRITE
| VM_MAYEXEC
,
188 up_write(&mm
->mmap_sem
);
192 mm
->context
.vdso
= NULL
;
193 up_write(&mm
->mmap_sem
);
197 static void vdso_write_begin(struct vdso_data
*vdata
)
199 ++vdso_data
->seq_count
;
200 smp_wmb(); /* Pairs with smp_rmb in vdso_read_retry */
203 static void vdso_write_end(struct vdso_data
*vdata
)
205 smp_wmb(); /* Pairs with smp_rmb in vdso_read_begin */
206 ++vdso_data
->seq_count
;
209 void update_vsyscall(struct timekeeper
*tk
)
211 vdso_write_begin(vdso_data
);
212 vdso_data
->cs_mask
= tk
->tkr_mono
.mask
;
213 vdso_data
->cs_mult
= tk
->tkr_mono
.mult
;
214 vdso_data
->cs_shift
= tk
->tkr_mono
.shift
;
215 vdso_data
->cs_cycle_last
= tk
->tkr_mono
.cycle_last
;
216 vdso_data
->wtm_clock_sec
= tk
->wall_to_monotonic
.tv_sec
;
217 vdso_data
->wtm_clock_nsec
= tk
->wall_to_monotonic
.tv_nsec
;
218 vdso_data
->xtime_clock_sec
= tk
->xtime_sec
;
219 vdso_data
->xtime_clock_nsec
= tk
->tkr_mono
.xtime_nsec
;
220 vdso_data
->xtime_coarse_sec
= tk
->xtime_sec
;
221 vdso_data
->xtime_coarse_nsec
= tk
->tkr_mono
.xtime_nsec
>>
223 vdso_data
->hrtimer_res
= hrtimer_resolution
;
224 vdso_write_end(vdso_data
);
227 void update_vsyscall_tz(void)
229 vdso_data
->tz_minuteswest
= sys_tz
.tz_minuteswest
;
230 vdso_data
->tz_dsttime
= sys_tz
.tz_dsttime
;