2 * linux/arch/arm/lib/uaccess_with_memcpy.c
4 * Written by: Lennert Buytenhek and Nicolas Pitre
5 * Copyright (C) 2009 Marvell Semiconductor
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/kernel.h>
13 #include <linux/ctype.h>
14 #include <linux/uaccess.h>
15 #include <linux/rwsem.h>
17 #include <linux/sched.h>
18 #include <linux/hardirq.h> /* for in_atomic() */
19 #include <linux/gfp.h>
20 #include <linux/highmem.h>
21 #include <linux/hugetlb.h>
22 #include <asm/current.h>
26 pin_page_for_write(const void __user
*_addr
, pte_t
**ptep
, spinlock_t
**ptlp
)
28 unsigned long addr
= (unsigned long)_addr
;
35 pgd
= pgd_offset(current
->mm
, addr
);
36 if (unlikely(pgd_none(*pgd
) || pgd_bad(*pgd
)))
39 pud
= pud_offset(pgd
, addr
);
40 if (unlikely(pud_none(*pud
) || pud_bad(*pud
)))
43 pmd
= pmd_offset(pud
, addr
);
44 if (unlikely(pmd_none(*pmd
)))
48 * A pmd can be bad if it refers to a HugeTLB or THP page.
50 * Both THP and HugeTLB pages have the same pmd layout
51 * and should not be manipulated by the pte functions.
53 * Lock the page table for the destination and check
54 * to see that it's still huge and whether or not we will
55 * need to fault on write, or if we have a splitting THP.
57 if (unlikely(pmd_thp_or_huge(*pmd
))) {
58 ptl
= ¤t
->mm
->page_table_lock
;
60 if (unlikely(!pmd_thp_or_huge(*pmd
)
61 || pmd_hugewillfault(*pmd
)
62 || pmd_trans_splitting(*pmd
))) {
72 if (unlikely(pmd_bad(*pmd
)))
75 pte
= pte_offset_map_lock(current
->mm
, pmd
, addr
, &ptl
);
76 if (unlikely(!pte_present(*pte
) || !pte_young(*pte
) ||
77 !pte_write(*pte
) || !pte_dirty(*pte
))) {
78 pte_unmap_unlock(pte
, ptl
);
88 static unsigned long noinline
89 __copy_to_user_memcpy(void __user
*to
, const void *from
, unsigned long n
)
93 if (unlikely(segment_eq(get_fs(), KERNEL_DS
))) {
94 memcpy((void *)to
, from
, n
);
98 /* the mmap semaphore is taken only if not in an atomic context */
102 down_read(¤t
->mm
->mmap_sem
);
108 while (!pin_page_for_write(to
, &pte
, &ptl
)) {
110 up_read(¤t
->mm
->mmap_sem
);
111 if (__put_user(0, (char __user
*)to
))
114 down_read(¤t
->mm
->mmap_sem
);
117 tocopy
= (~(unsigned long)to
& ~PAGE_MASK
) + 1;
121 memcpy((void *)to
, from
, tocopy
);
127 pte_unmap_unlock(pte
, ptl
);
132 up_read(¤t
->mm
->mmap_sem
);
139 __copy_to_user(void __user
*to
, const void *from
, unsigned long n
)
142 * This test is stubbed out of the main function above to keep
143 * the overhead for small copies low by avoiding a large
144 * register dump on the stack just to reload them right away.
145 * With frame pointer disabled, tail call optimization kicks in
146 * as well making this test almost invisible.
149 return __copy_to_user_std(to
, from
, n
);
150 return __copy_to_user_memcpy(to
, from
, n
);
153 static unsigned long noinline
154 __clear_user_memset(void __user
*addr
, unsigned long n
)
156 if (unlikely(segment_eq(get_fs(), KERNEL_DS
))) {
157 memset((void *)addr
, 0, n
);
161 down_read(¤t
->mm
->mmap_sem
);
167 while (!pin_page_for_write(addr
, &pte
, &ptl
)) {
168 up_read(¤t
->mm
->mmap_sem
);
169 if (__put_user(0, (char __user
*)addr
))
171 down_read(¤t
->mm
->mmap_sem
);
174 tocopy
= (~(unsigned long)addr
& ~PAGE_MASK
) + 1;
178 memset((void *)addr
, 0, tocopy
);
183 pte_unmap_unlock(pte
, ptl
);
187 up_read(¤t
->mm
->mmap_sem
);
193 unsigned long __clear_user(void __user
*addr
, unsigned long n
)
195 /* See rational for this in __copy_to_user() above. */
197 return __clear_user_std(addr
, n
);
198 return __clear_user_memset(addr
, n
);
204 * This code is disabled by default, but kept around in case the chosen
205 * thresholds need to be revalidated. Some overhead (small but still)
206 * would be implied by a runtime determined variable threshold, and
207 * so far the measurement on concerned targets didn't show a worthwhile
210 * Note that a fairly precise sched_clock() implementation is needed
211 * for results to make some sense.
214 #include <linux/vmalloc.h>
216 static int __init
test_size_treshold(void)
218 struct page
*src_page
, *dst_page
;
219 void *user_ptr
, *kernel_ptr
;
220 unsigned long long t0
, t1
, t2
;
224 src_page
= alloc_page(GFP_KERNEL
);
227 dst_page
= alloc_page(GFP_KERNEL
);
230 kernel_ptr
= page_address(src_page
);
231 user_ptr
= vmap(&dst_page
, 1, VM_IOREMAP
, __pgprot(__P010
));
235 /* warm up the src page dcache */
236 ret
= __copy_to_user_memcpy(user_ptr
, kernel_ptr
, PAGE_SIZE
);
238 for (size
= PAGE_SIZE
; size
>= 4; size
/= 2) {
240 ret
|= __copy_to_user_memcpy(user_ptr
, kernel_ptr
, size
);
242 ret
|= __copy_to_user_std(user_ptr
, kernel_ptr
, size
);
244 printk("copy_to_user: %d %llu %llu\n", size
, t1
- t0
, t2
- t1
);
247 for (size
= PAGE_SIZE
; size
>= 4; size
/= 2) {
249 ret
|= __clear_user_memset(user_ptr
, size
);
251 ret
|= __clear_user_std(user_ptr
, size
);
253 printk("clear_user: %d %llu %llu\n", size
, t1
- t0
, t2
- t1
);
268 subsys_initcall(test_size_treshold
);