mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / arch / arm / lib / uaccess_with_memcpy.c
blob73dc7360cbdd58c76e7e230417e83d902d3a2a3b
1 /*
2 * linux/arch/arm/lib/uaccess_with_memcpy.c
4 * Written by: Lennert Buytenhek and Nicolas Pitre
5 * Copyright (C) 2009 Marvell Semiconductor
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/kernel.h>
13 #include <linux/ctype.h>
14 #include <linux/uaccess.h>
15 #include <linux/rwsem.h>
16 #include <linux/mm.h>
17 #include <linux/sched.h>
18 #include <linux/hardirq.h> /* for in_atomic() */
19 #include <linux/gfp.h>
20 #include <linux/highmem.h>
21 #include <linux/hugetlb.h>
22 #include <asm/current.h>
23 #include <asm/page.h>
25 static int
26 pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
28 unsigned long addr = (unsigned long)_addr;
29 pgd_t *pgd;
30 pmd_t *pmd;
31 pte_t *pte;
32 pud_t *pud;
33 spinlock_t *ptl;
35 pgd = pgd_offset(current->mm, addr);
36 if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
37 return 0;
39 pud = pud_offset(pgd, addr);
40 if (unlikely(pud_none(*pud) || pud_bad(*pud)))
41 return 0;
43 pmd = pmd_offset(pud, addr);
44 if (unlikely(pmd_none(*pmd)))
45 return 0;
48 * A pmd can be bad if it refers to a HugeTLB or THP page.
50 * Both THP and HugeTLB pages have the same pmd layout
51 * and should not be manipulated by the pte functions.
53 * Lock the page table for the destination and check
54 * to see that it's still huge and whether or not we will
55 * need to fault on write.
57 if (unlikely(pmd_thp_or_huge(*pmd))) {
58 ptl = &current->mm->page_table_lock;
59 spin_lock(ptl);
60 if (unlikely(!pmd_thp_or_huge(*pmd)
61 || pmd_hugewillfault(*pmd))) {
62 spin_unlock(ptl);
63 return 0;
66 *ptep = NULL;
67 *ptlp = ptl;
68 return 1;
71 if (unlikely(pmd_bad(*pmd)))
72 return 0;
74 pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
75 if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
76 !pte_write(*pte) || !pte_dirty(*pte))) {
77 pte_unmap_unlock(pte, ptl);
78 return 0;
81 *ptep = pte;
82 *ptlp = ptl;
84 return 1;
87 static unsigned long noinline
88 __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
90 unsigned long ua_flags;
91 int atomic;
93 if (uaccess_kernel()) {
94 memcpy((void *)to, from, n);
95 return 0;
98 /* the mmap semaphore is taken only if not in an atomic context */
99 atomic = faulthandler_disabled();
101 if (!atomic)
102 down_read(&current->mm->mmap_sem);
103 while (n) {
104 pte_t *pte;
105 spinlock_t *ptl;
106 int tocopy;
108 while (!pin_page_for_write(to, &pte, &ptl)) {
109 if (!atomic)
110 up_read(&current->mm->mmap_sem);
111 if (__put_user(0, (char __user *)to))
112 goto out;
113 if (!atomic)
114 down_read(&current->mm->mmap_sem);
117 tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
118 if (tocopy > n)
119 tocopy = n;
121 ua_flags = uaccess_save_and_enable();
122 memcpy((void *)to, from, tocopy);
123 uaccess_restore(ua_flags);
124 to += tocopy;
125 from += tocopy;
126 n -= tocopy;
128 if (pte)
129 pte_unmap_unlock(pte, ptl);
130 else
131 spin_unlock(ptl);
133 if (!atomic)
134 up_read(&current->mm->mmap_sem);
136 out:
137 return n;
140 unsigned long
141 arm_copy_to_user(void __user *to, const void *from, unsigned long n)
144 * This test is stubbed out of the main function above to keep
145 * the overhead for small copies low by avoiding a large
146 * register dump on the stack just to reload them right away.
147 * With frame pointer disabled, tail call optimization kicks in
148 * as well making this test almost invisible.
150 if (n < 64) {
151 unsigned long ua_flags = uaccess_save_and_enable();
152 n = __copy_to_user_std(to, from, n);
153 uaccess_restore(ua_flags);
154 } else {
155 n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
156 from, n);
158 return n;
161 static unsigned long noinline
162 __clear_user_memset(void __user *addr, unsigned long n)
164 unsigned long ua_flags;
166 if (uaccess_kernel()) {
167 memset((void *)addr, 0, n);
168 return 0;
171 down_read(&current->mm->mmap_sem);
172 while (n) {
173 pte_t *pte;
174 spinlock_t *ptl;
175 int tocopy;
177 while (!pin_page_for_write(addr, &pte, &ptl)) {
178 up_read(&current->mm->mmap_sem);
179 if (__put_user(0, (char __user *)addr))
180 goto out;
181 down_read(&current->mm->mmap_sem);
184 tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
185 if (tocopy > n)
186 tocopy = n;
188 ua_flags = uaccess_save_and_enable();
189 memset((void *)addr, 0, tocopy);
190 uaccess_restore(ua_flags);
191 addr += tocopy;
192 n -= tocopy;
194 if (pte)
195 pte_unmap_unlock(pte, ptl);
196 else
197 spin_unlock(ptl);
199 up_read(&current->mm->mmap_sem);
201 out:
202 return n;
205 unsigned long arm_clear_user(void __user *addr, unsigned long n)
207 /* See rational for this in __copy_to_user() above. */
208 if (n < 64) {
209 unsigned long ua_flags = uaccess_save_and_enable();
210 n = __clear_user_std(addr, n);
211 uaccess_restore(ua_flags);
212 } else {
213 n = __clear_user_memset(addr, n);
215 return n;
218 #if 0
221 * This code is disabled by default, but kept around in case the chosen
222 * thresholds need to be revalidated. Some overhead (small but still)
223 * would be implied by a runtime determined variable threshold, and
224 * so far the measurement on concerned targets didn't show a worthwhile
225 * variation.
227 * Note that a fairly precise sched_clock() implementation is needed
228 * for results to make some sense.
231 #include <linux/vmalloc.h>
233 static int __init test_size_treshold(void)
235 struct page *src_page, *dst_page;
236 void *user_ptr, *kernel_ptr;
237 unsigned long long t0, t1, t2;
238 int size, ret;
240 ret = -ENOMEM;
241 src_page = alloc_page(GFP_KERNEL);
242 if (!src_page)
243 goto no_src;
244 dst_page = alloc_page(GFP_KERNEL);
245 if (!dst_page)
246 goto no_dst;
247 kernel_ptr = page_address(src_page);
248 user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
249 if (!user_ptr)
250 goto no_vmap;
252 /* warm up the src page dcache */
253 ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
255 for (size = PAGE_SIZE; size >= 4; size /= 2) {
256 t0 = sched_clock();
257 ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
258 t1 = sched_clock();
259 ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
260 t2 = sched_clock();
261 printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
264 for (size = PAGE_SIZE; size >= 4; size /= 2) {
265 t0 = sched_clock();
266 ret |= __clear_user_memset(user_ptr, size);
267 t1 = sched_clock();
268 ret |= __clear_user_std(user_ptr, size);
269 t2 = sched_clock();
270 printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
273 if (ret)
274 ret = -EFAULT;
276 vunmap(user_ptr);
277 no_vmap:
278 put_page(dst_page);
279 no_dst:
280 put_page(src_page);
281 no_src:
282 return ret;
285 subsys_initcall(test_size_treshold);
287 #endif