1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/lib/uaccess_with_memcpy.c
5 * Written by: Lennert Buytenhek and Nicolas Pitre
6 * Copyright (C) 2009 Marvell Semiconductor
9 #include <linux/kernel.h>
10 #include <linux/ctype.h>
11 #include <linux/uaccess.h>
12 #include <linux/rwsem.h>
14 #include <linux/sched.h>
15 #include <linux/hardirq.h> /* for in_atomic() */
16 #include <linux/gfp.h>
17 #include <linux/highmem.h>
18 #include <linux/hugetlb.h>
19 #include <asm/current.h>
23 pin_page_for_write(const void __user
*_addr
, pte_t
**ptep
, spinlock_t
**ptlp
)
25 unsigned long addr
= (unsigned long)_addr
;
32 pgd
= pgd_offset(current
->mm
, addr
);
33 if (unlikely(pgd_none(*pgd
) || pgd_bad(*pgd
)))
36 pud
= pud_offset(pgd
, addr
);
37 if (unlikely(pud_none(*pud
) || pud_bad(*pud
)))
40 pmd
= pmd_offset(pud
, addr
);
41 if (unlikely(pmd_none(*pmd
)))
45 * A pmd can be bad if it refers to a HugeTLB or THP page.
47 * Both THP and HugeTLB pages have the same pmd layout
48 * and should not be manipulated by the pte functions.
50 * Lock the page table for the destination and check
51 * to see that it's still huge and whether or not we will
52 * need to fault on write.
54 if (unlikely(pmd_thp_or_huge(*pmd
))) {
55 ptl
= ¤t
->mm
->page_table_lock
;
57 if (unlikely(!pmd_thp_or_huge(*pmd
)
58 || pmd_hugewillfault(*pmd
))) {
68 if (unlikely(pmd_bad(*pmd
)))
71 pte
= pte_offset_map_lock(current
->mm
, pmd
, addr
, &ptl
);
72 if (unlikely(!pte_present(*pte
) || !pte_young(*pte
) ||
73 !pte_write(*pte
) || !pte_dirty(*pte
))) {
74 pte_unmap_unlock(pte
, ptl
);
84 static unsigned long noinline
85 __copy_to_user_memcpy(void __user
*to
, const void *from
, unsigned long n
)
87 unsigned long ua_flags
;
90 if (uaccess_kernel()) {
91 memcpy((void *)to
, from
, n
);
95 /* the mmap semaphore is taken only if not in an atomic context */
96 atomic
= faulthandler_disabled();
99 down_read(¤t
->mm
->mmap_sem
);
105 while (!pin_page_for_write(to
, &pte
, &ptl
)) {
107 up_read(¤t
->mm
->mmap_sem
);
108 if (__put_user(0, (char __user
*)to
))
111 down_read(¤t
->mm
->mmap_sem
);
114 tocopy
= (~(unsigned long)to
& ~PAGE_MASK
) + 1;
118 ua_flags
= uaccess_save_and_enable();
119 memcpy((void *)to
, from
, tocopy
);
120 uaccess_restore(ua_flags
);
126 pte_unmap_unlock(pte
, ptl
);
131 up_read(¤t
->mm
->mmap_sem
);
138 arm_copy_to_user(void __user
*to
, const void *from
, unsigned long n
)
141 * This test is stubbed out of the main function above to keep
142 * the overhead for small copies low by avoiding a large
143 * register dump on the stack just to reload them right away.
144 * With frame pointer disabled, tail call optimization kicks in
145 * as well making this test almost invisible.
148 unsigned long ua_flags
= uaccess_save_and_enable();
149 n
= __copy_to_user_std(to
, from
, n
);
150 uaccess_restore(ua_flags
);
152 n
= __copy_to_user_memcpy(uaccess_mask_range_ptr(to
, n
),
158 static unsigned long noinline
159 __clear_user_memset(void __user
*addr
, unsigned long n
)
161 unsigned long ua_flags
;
163 if (uaccess_kernel()) {
164 memset((void *)addr
, 0, n
);
168 down_read(¤t
->mm
->mmap_sem
);
174 while (!pin_page_for_write(addr
, &pte
, &ptl
)) {
175 up_read(¤t
->mm
->mmap_sem
);
176 if (__put_user(0, (char __user
*)addr
))
178 down_read(¤t
->mm
->mmap_sem
);
181 tocopy
= (~(unsigned long)addr
& ~PAGE_MASK
) + 1;
185 ua_flags
= uaccess_save_and_enable();
186 memset((void *)addr
, 0, tocopy
);
187 uaccess_restore(ua_flags
);
192 pte_unmap_unlock(pte
, ptl
);
196 up_read(¤t
->mm
->mmap_sem
);
202 unsigned long arm_clear_user(void __user
*addr
, unsigned long n
)
204 /* See rational for this in __copy_to_user() above. */
206 unsigned long ua_flags
= uaccess_save_and_enable();
207 n
= __clear_user_std(addr
, n
);
208 uaccess_restore(ua_flags
);
210 n
= __clear_user_memset(addr
, n
);
218 * This code is disabled by default, but kept around in case the chosen
219 * thresholds need to be revalidated. Some overhead (small but still)
220 * would be implied by a runtime determined variable threshold, and
221 * so far the measurement on concerned targets didn't show a worthwhile
224 * Note that a fairly precise sched_clock() implementation is needed
225 * for results to make some sense.
228 #include <linux/vmalloc.h>
230 static int __init
test_size_treshold(void)
232 struct page
*src_page
, *dst_page
;
233 void *user_ptr
, *kernel_ptr
;
234 unsigned long long t0
, t1
, t2
;
238 src_page
= alloc_page(GFP_KERNEL
);
241 dst_page
= alloc_page(GFP_KERNEL
);
244 kernel_ptr
= page_address(src_page
);
245 user_ptr
= vmap(&dst_page
, 1, VM_IOREMAP
, __pgprot(__P010
));
249 /* warm up the src page dcache */
250 ret
= __copy_to_user_memcpy(user_ptr
, kernel_ptr
, PAGE_SIZE
);
252 for (size
= PAGE_SIZE
; size
>= 4; size
/= 2) {
254 ret
|= __copy_to_user_memcpy(user_ptr
, kernel_ptr
, size
);
256 ret
|= __copy_to_user_std(user_ptr
, kernel_ptr
, size
);
258 printk("copy_to_user: %d %llu %llu\n", size
, t1
- t0
, t2
- t1
);
261 for (size
= PAGE_SIZE
; size
>= 4; size
/= 2) {
263 ret
|= __clear_user_memset(user_ptr
, size
);
265 ret
|= __clear_user_std(user_ptr
, size
);
267 printk("clear_user: %d %llu %llu\n", size
, t1
- t0
, t2
- t1
);
282 subsys_initcall(test_size_treshold
);