1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/lib/uaccess_with_memcpy.c
5 * Written by: Lennert Buytenhek and Nicolas Pitre
6 * Copyright (C) 2009 Marvell Semiconductor
9 #include <linux/kernel.h>
10 #include <linux/ctype.h>
11 #include <linux/uaccess.h>
12 #include <linux/rwsem.h>
14 #include <linux/sched.h>
15 #include <linux/hardirq.h> /* for in_atomic() */
16 #include <linux/gfp.h>
17 #include <linux/highmem.h>
18 #include <linux/hugetlb.h>
19 #include <asm/current.h>
23 pin_page_for_write(const void __user
*_addr
, pte_t
**ptep
, spinlock_t
**ptlp
)
25 unsigned long addr
= (unsigned long)_addr
;
33 pgd
= pgd_offset(current
->mm
, addr
);
34 if (unlikely(pgd_none(*pgd
) || pgd_bad(*pgd
)))
37 p4d
= p4d_offset(pgd
, addr
);
38 if (unlikely(p4d_none(*p4d
) || p4d_bad(*p4d
)))
41 pud
= pud_offset(p4d
, addr
);
42 if (unlikely(pud_none(*pud
) || pud_bad(*pud
)))
45 pmd
= pmd_offset(pud
, addr
);
46 if (unlikely(pmd_none(*pmd
)))
50 * A pmd can be bad if it refers to a HugeTLB or THP page.
52 * Both THP and HugeTLB pages have the same pmd layout
53 * and should not be manipulated by the pte functions.
55 * Lock the page table for the destination and check
56 * to see that it's still huge and whether or not we will
57 * need to fault on write.
59 if (unlikely(pmd_thp_or_huge(*pmd
))) {
60 ptl
= ¤t
->mm
->page_table_lock
;
62 if (unlikely(!pmd_thp_or_huge(*pmd
)
63 || pmd_hugewillfault(*pmd
))) {
73 if (unlikely(pmd_bad(*pmd
)))
76 pte
= pte_offset_map_lock(current
->mm
, pmd
, addr
, &ptl
);
77 if (unlikely(!pte_present(*pte
) || !pte_young(*pte
) ||
78 !pte_write(*pte
) || !pte_dirty(*pte
))) {
79 pte_unmap_unlock(pte
, ptl
);
89 static unsigned long noinline
90 __copy_to_user_memcpy(void __user
*to
, const void *from
, unsigned long n
)
92 unsigned long ua_flags
;
95 if (uaccess_kernel()) {
96 memcpy((void *)to
, from
, n
);
100 /* the mmap semaphore is taken only if not in an atomic context */
101 atomic
= faulthandler_disabled();
104 mmap_read_lock(current
->mm
);
110 while (!pin_page_for_write(to
, &pte
, &ptl
)) {
112 mmap_read_unlock(current
->mm
);
113 if (__put_user(0, (char __user
*)to
))
116 mmap_read_lock(current
->mm
);
119 tocopy
= (~(unsigned long)to
& ~PAGE_MASK
) + 1;
123 ua_flags
= uaccess_save_and_enable();
124 memcpy((void *)to
, from
, tocopy
);
125 uaccess_restore(ua_flags
);
131 pte_unmap_unlock(pte
, ptl
);
136 mmap_read_unlock(current
->mm
);
143 arm_copy_to_user(void __user
*to
, const void *from
, unsigned long n
)
146 * This test is stubbed out of the main function above to keep
147 * the overhead for small copies low by avoiding a large
148 * register dump on the stack just to reload them right away.
149 * With frame pointer disabled, tail call optimization kicks in
150 * as well making this test almost invisible.
153 unsigned long ua_flags
= uaccess_save_and_enable();
154 n
= __copy_to_user_std(to
, from
, n
);
155 uaccess_restore(ua_flags
);
157 n
= __copy_to_user_memcpy(uaccess_mask_range_ptr(to
, n
),
163 static unsigned long noinline
164 __clear_user_memset(void __user
*addr
, unsigned long n
)
166 unsigned long ua_flags
;
168 if (uaccess_kernel()) {
169 memset((void *)addr
, 0, n
);
173 mmap_read_lock(current
->mm
);
179 while (!pin_page_for_write(addr
, &pte
, &ptl
)) {
180 mmap_read_unlock(current
->mm
);
181 if (__put_user(0, (char __user
*)addr
))
183 mmap_read_lock(current
->mm
);
186 tocopy
= (~(unsigned long)addr
& ~PAGE_MASK
) + 1;
190 ua_flags
= uaccess_save_and_enable();
191 memset((void *)addr
, 0, tocopy
);
192 uaccess_restore(ua_flags
);
197 pte_unmap_unlock(pte
, ptl
);
201 mmap_read_unlock(current
->mm
);
207 unsigned long arm_clear_user(void __user
*addr
, unsigned long n
)
209 /* See rational for this in __copy_to_user() above. */
211 unsigned long ua_flags
= uaccess_save_and_enable();
212 n
= __clear_user_std(addr
, n
);
213 uaccess_restore(ua_flags
);
215 n
= __clear_user_memset(addr
, n
);
223 * This code is disabled by default, but kept around in case the chosen
224 * thresholds need to be revalidated. Some overhead (small but still)
225 * would be implied by a runtime determined variable threshold, and
226 * so far the measurement on concerned targets didn't show a worthwhile
229 * Note that a fairly precise sched_clock() implementation is needed
230 * for results to make some sense.
233 #include <linux/vmalloc.h>
235 static int __init
test_size_treshold(void)
237 struct page
*src_page
, *dst_page
;
238 void *user_ptr
, *kernel_ptr
;
239 unsigned long long t0
, t1
, t2
;
243 src_page
= alloc_page(GFP_KERNEL
);
246 dst_page
= alloc_page(GFP_KERNEL
);
249 kernel_ptr
= page_address(src_page
);
250 user_ptr
= vmap(&dst_page
, 1, VM_IOREMAP
, __pgprot(__P010
));
254 /* warm up the src page dcache */
255 ret
= __copy_to_user_memcpy(user_ptr
, kernel_ptr
, PAGE_SIZE
);
257 for (size
= PAGE_SIZE
; size
>= 4; size
/= 2) {
259 ret
|= __copy_to_user_memcpy(user_ptr
, kernel_ptr
, size
);
261 ret
|= __copy_to_user_std(user_ptr
, kernel_ptr
, size
);
263 printk("copy_to_user: %d %llu %llu\n", size
, t1
- t0
, t2
- t1
);
266 for (size
= PAGE_SIZE
; size
>= 4; size
/= 2) {
268 ret
|= __clear_user_memset(user_ptr
, size
);
270 ret
|= __clear_user_std(user_ptr
, size
);
272 printk("clear_user: %d %llu %llu\n", size
, t1
- t0
, t2
- t1
);
287 subsys_initcall(test_size_treshold
);