2 * linux/arch/arm/lib/uaccess_with_memcpy.c
4 * Written by: Lennert Buytenhek and Nicolas Pitre
5 * Copyright (C) 2009 Marvell Semiconductor
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/kernel.h>
13 #include <linux/ctype.h>
14 #include <linux/uaccess.h>
15 #include <linux/rwsem.h>
17 #include <linux/sched.h>
18 #include <linux/hardirq.h> /* for in_atomic() */
19 #include <linux/gfp.h>
20 #include <linux/highmem.h>
21 #include <asm/current.h>
25 pin_page_for_write(const void __user
*_addr
, pte_t
**ptep
, spinlock_t
**ptlp
)
27 unsigned long addr
= (unsigned long)_addr
;
34 pgd
= pgd_offset(current
->mm
, addr
);
35 if (unlikely(pgd_none(*pgd
) || pgd_bad(*pgd
)))
38 pud
= pud_offset(pgd
, addr
);
39 if (unlikely(pud_none(*pud
) || pud_bad(*pud
)))
42 pmd
= pmd_offset(pud
, addr
);
43 if (unlikely(pmd_none(*pmd
) || pmd_bad(*pmd
)))
46 pte
= pte_offset_map_lock(current
->mm
, pmd
, addr
, &ptl
);
47 if (unlikely(!pte_present(*pte
) || !pte_young(*pte
) ||
48 !pte_write(*pte
) || !pte_dirty(*pte
))) {
49 pte_unmap_unlock(pte
, ptl
);
59 static unsigned long noinline
60 __copy_to_user_memcpy(void __user
*to
, const void *from
, unsigned long n
)
64 if (unlikely(segment_eq(get_fs(), KERNEL_DS
))) {
65 memcpy((void *)to
, from
, n
);
69 /* the mmap semaphore is taken only if not in an atomic context */
73 down_read(¤t
->mm
->mmap_sem
);
79 while (!pin_page_for_write(to
, &pte
, &ptl
)) {
81 up_read(¤t
->mm
->mmap_sem
);
82 if (__put_user(0, (char __user
*)to
))
85 down_read(¤t
->mm
->mmap_sem
);
88 tocopy
= (~(unsigned long)to
& ~PAGE_MASK
) + 1;
92 memcpy((void *)to
, from
, tocopy
);
97 pte_unmap_unlock(pte
, ptl
);
100 up_read(¤t
->mm
->mmap_sem
);
107 __copy_to_user(void __user
*to
, const void *from
, unsigned long n
)
110 * This test is stubbed out of the main function above to keep
111 * the overhead for small copies low by avoiding a large
112 * register dump on the stack just to reload them right away.
113 * With frame pointer disabled, tail call optimization kicks in
114 * as well making this test almost invisible.
117 return __copy_to_user_std(to
, from
, n
);
118 return __copy_to_user_memcpy(to
, from
, n
);
121 static unsigned long noinline
122 __clear_user_memset(void __user
*addr
, unsigned long n
)
124 if (unlikely(segment_eq(get_fs(), KERNEL_DS
))) {
125 memset((void *)addr
, 0, n
);
129 down_read(¤t
->mm
->mmap_sem
);
135 while (!pin_page_for_write(addr
, &pte
, &ptl
)) {
136 up_read(¤t
->mm
->mmap_sem
);
137 if (__put_user(0, (char __user
*)addr
))
139 down_read(¤t
->mm
->mmap_sem
);
142 tocopy
= (~(unsigned long)addr
& ~PAGE_MASK
) + 1;
146 memset((void *)addr
, 0, tocopy
);
150 pte_unmap_unlock(pte
, ptl
);
152 up_read(¤t
->mm
->mmap_sem
);
158 unsigned long __clear_user(void __user
*addr
, unsigned long n
)
160 /* See rational for this in __copy_to_user() above. */
162 return __clear_user_std(addr
, n
);
163 return __clear_user_memset(addr
, n
);
169 * This code is disabled by default, but kept around in case the chosen
170 * thresholds need to be revalidated. Some overhead (small but still)
171 * would be implied by a runtime determined variable threshold, and
172 * so far the measurement on concerned targets didn't show a worthwhile
175 * Note that a fairly precise sched_clock() implementation is needed
176 * for results to make some sense.
179 #include <linux/vmalloc.h>
181 static int __init
test_size_treshold(void)
183 struct page
*src_page
, *dst_page
;
184 void *user_ptr
, *kernel_ptr
;
185 unsigned long long t0
, t1
, t2
;
189 src_page
= alloc_page(GFP_KERNEL
);
192 dst_page
= alloc_page(GFP_KERNEL
);
195 kernel_ptr
= page_address(src_page
);
196 user_ptr
= vmap(&dst_page
, 1, VM_IOREMAP
, __pgprot(__P010
));
200 /* warm up the src page dcache */
201 ret
= __copy_to_user_memcpy(user_ptr
, kernel_ptr
, PAGE_SIZE
);
203 for (size
= PAGE_SIZE
; size
>= 4; size
/= 2) {
205 ret
|= __copy_to_user_memcpy(user_ptr
, kernel_ptr
, size
);
207 ret
|= __copy_to_user_std(user_ptr
, kernel_ptr
, size
);
209 printk("copy_to_user: %d %llu %llu\n", size
, t1
- t0
, t2
- t1
);
212 for (size
= PAGE_SIZE
; size
>= 4; size
/= 2) {
214 ret
|= __clear_user_memset(user_ptr
, size
);
216 ret
|= __clear_user_std(user_ptr
, size
);
218 printk("clear_user: %d %llu %llu\n", size
, t1
- t0
, t2
- t1
);
233 subsys_initcall(test_size_treshold
);