spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / arch / arm / lib / uaccess_with_memcpy.c
blob025f742dd4df6bf79b279babd264980d851f01d5
1 /*
2 * linux/arch/arm/lib/uaccess_with_memcpy.c
4 * Written by: Lennert Buytenhek and Nicolas Pitre
5 * Copyright (C) 2009 Marvell Semiconductor
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/kernel.h>
13 #include <linux/ctype.h>
14 #include <linux/uaccess.h>
15 #include <linux/rwsem.h>
16 #include <linux/mm.h>
17 #include <linux/sched.h>
18 #include <linux/hardirq.h> /* for in_atomic() */
19 #include <linux/gfp.h>
20 #include <linux/highmem.h>
21 #include <asm/current.h>
22 #include <asm/page.h>
24 static int
25 pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
27 unsigned long addr = (unsigned long)_addr;
28 pgd_t *pgd;
29 pmd_t *pmd;
30 pte_t *pte;
31 pud_t *pud;
32 spinlock_t *ptl;
34 pgd = pgd_offset(current->mm, addr);
35 if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
36 return 0;
38 pud = pud_offset(pgd, addr);
39 if (unlikely(pud_none(*pud) || pud_bad(*pud)))
40 return 0;
42 pmd = pmd_offset(pud, addr);
43 if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd)))
44 return 0;
46 pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
47 if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
48 !pte_write(*pte) || !pte_dirty(*pte))) {
49 pte_unmap_unlock(pte, ptl);
50 return 0;
53 *ptep = pte;
54 *ptlp = ptl;
56 return 1;
59 static unsigned long noinline
60 __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
62 int atomic;
64 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
65 memcpy((void *)to, from, n);
66 return 0;
69 /* the mmap semaphore is taken only if not in an atomic context */
70 atomic = in_atomic();
72 if (!atomic)
73 down_read(&current->mm->mmap_sem);
74 while (n) {
75 pte_t *pte;
76 spinlock_t *ptl;
77 int tocopy;
79 while (!pin_page_for_write(to, &pte, &ptl)) {
80 if (!atomic)
81 up_read(&current->mm->mmap_sem);
82 if (__put_user(0, (char __user *)to))
83 goto out;
84 if (!atomic)
85 down_read(&current->mm->mmap_sem);
88 tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
89 if (tocopy > n)
90 tocopy = n;
92 memcpy((void *)to, from, tocopy);
93 to += tocopy;
94 from += tocopy;
95 n -= tocopy;
97 pte_unmap_unlock(pte, ptl);
99 if (!atomic)
100 up_read(&current->mm->mmap_sem);
102 out:
103 return n;
106 unsigned long
107 __copy_to_user(void __user *to, const void *from, unsigned long n)
110 * This test is stubbed out of the main function above to keep
111 * the overhead for small copies low by avoiding a large
112 * register dump on the stack just to reload them right away.
113 * With frame pointer disabled, tail call optimization kicks in
114 * as well making this test almost invisible.
116 if (n < 64)
117 return __copy_to_user_std(to, from, n);
118 return __copy_to_user_memcpy(to, from, n);
121 static unsigned long noinline
122 __clear_user_memset(void __user *addr, unsigned long n)
124 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
125 memset((void *)addr, 0, n);
126 return 0;
129 down_read(&current->mm->mmap_sem);
130 while (n) {
131 pte_t *pte;
132 spinlock_t *ptl;
133 int tocopy;
135 while (!pin_page_for_write(addr, &pte, &ptl)) {
136 up_read(&current->mm->mmap_sem);
137 if (__put_user(0, (char __user *)addr))
138 goto out;
139 down_read(&current->mm->mmap_sem);
142 tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
143 if (tocopy > n)
144 tocopy = n;
146 memset((void *)addr, 0, tocopy);
147 addr += tocopy;
148 n -= tocopy;
150 pte_unmap_unlock(pte, ptl);
152 up_read(&current->mm->mmap_sem);
154 out:
155 return n;
158 unsigned long __clear_user(void __user *addr, unsigned long n)
160 /* See rational for this in __copy_to_user() above. */
161 if (n < 64)
162 return __clear_user_std(addr, n);
163 return __clear_user_memset(addr, n);
166 #if 0
169 * This code is disabled by default, but kept around in case the chosen
170 * thresholds need to be revalidated. Some overhead (small but still)
171 * would be implied by a runtime determined variable threshold, and
172 * so far the measurement on concerned targets didn't show a worthwhile
173 * variation.
175 * Note that a fairly precise sched_clock() implementation is needed
176 * for results to make some sense.
179 #include <linux/vmalloc.h>
181 static int __init test_size_treshold(void)
183 struct page *src_page, *dst_page;
184 void *user_ptr, *kernel_ptr;
185 unsigned long long t0, t1, t2;
186 int size, ret;
188 ret = -ENOMEM;
189 src_page = alloc_page(GFP_KERNEL);
190 if (!src_page)
191 goto no_src;
192 dst_page = alloc_page(GFP_KERNEL);
193 if (!dst_page)
194 goto no_dst;
195 kernel_ptr = page_address(src_page);
196 user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
197 if (!user_ptr)
198 goto no_vmap;
200 /* warm up the src page dcache */
201 ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
203 for (size = PAGE_SIZE; size >= 4; size /= 2) {
204 t0 = sched_clock();
205 ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
206 t1 = sched_clock();
207 ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
208 t2 = sched_clock();
209 printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
212 for (size = PAGE_SIZE; size >= 4; size /= 2) {
213 t0 = sched_clock();
214 ret |= __clear_user_memset(user_ptr, size);
215 t1 = sched_clock();
216 ret |= __clear_user_std(user_ptr, size);
217 t2 = sched_clock();
218 printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
221 if (ret)
222 ret = -EFAULT;
224 vunmap(user_ptr);
225 no_vmap:
226 put_page(dst_page);
227 no_dst:
228 put_page(src_page);
229 no_src:
230 return ret;
233 subsys_initcall(test_size_treshold);
235 #endif