2 * arch/s390/lib/uaccess_pt.c
4 * User access functions based on page table walks for enhanced
5 * system layout without hardware support.
7 * Copyright IBM Corp. 2006
8 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
11 #include <linux/errno.h>
12 #include <linux/hardirq.h>
14 #include <asm/uaccess.h>
15 #include <asm/futex.h>
18 static inline pte_t
*follow_table(struct mm_struct
*mm
, unsigned long addr
)
24 pgd
= pgd_offset(mm
, addr
);
25 if (pgd_none(*pgd
) || unlikely(pgd_bad(*pgd
)))
26 return (pte_t
*) 0x3a;
28 pud
= pud_offset(pgd
, addr
);
29 if (pud_none(*pud
) || unlikely(pud_bad(*pud
)))
30 return (pte_t
*) 0x3b;
32 pmd
= pmd_offset(pud
, addr
);
33 if (pmd_none(*pmd
) || unlikely(pmd_bad(*pmd
)))
34 return (pte_t
*) 0x10;
36 return pte_offset_map(pmd
, addr
);
39 static __always_inline
size_t __user_copy_pt(unsigned long uaddr
, void *kptr
,
40 size_t n
, int write_user
)
42 struct mm_struct
*mm
= current
->mm
;
43 unsigned long offset
, pfn
, done
, size
;
49 spin_lock(&mm
->page_table_lock
);
51 pte
= follow_table(mm
, uaddr
);
52 if ((unsigned long) pte
< 0x1000)
54 if (!pte_present(*pte
)) {
57 } else if (write_user
&& !pte_write(*pte
)) {
63 offset
= uaddr
& (PAGE_SIZE
- 1);
64 size
= min(n
- done
, PAGE_SIZE
- offset
);
66 to
= (void *)((pfn
<< PAGE_SHIFT
) + offset
);
69 from
= (void *)((pfn
<< PAGE_SHIFT
) + offset
);
72 memcpy(to
, from
, size
);
76 spin_unlock(&mm
->page_table_lock
);
79 spin_unlock(&mm
->page_table_lock
);
80 if (__handle_fault(uaddr
, (unsigned long) pte
, write_user
))
86 * Do DAT for user address by page table walk, return kernel address.
87 * This function needs to be called with current->mm->page_table_lock held.
89 static __always_inline
unsigned long __dat_user_addr(unsigned long uaddr
)
91 struct mm_struct
*mm
= current
->mm
;
97 pte
= follow_table(mm
, uaddr
);
98 if ((unsigned long) pte
< 0x1000)
100 if (!pte_present(*pte
)) {
101 pte
= (pte_t
*) 0x11;
106 return (pfn
<< PAGE_SHIFT
) + (uaddr
& (PAGE_SIZE
- 1));
108 spin_unlock(&mm
->page_table_lock
);
109 rc
= __handle_fault(uaddr
, (unsigned long) pte
, 0);
110 spin_lock(&mm
->page_table_lock
);
116 size_t copy_from_user_pt(size_t n
, const void __user
*from
, void *to
)
120 if (segment_eq(get_fs(), KERNEL_DS
)) {
121 memcpy(to
, (void __kernel __force
*) from
, n
);
124 rc
= __user_copy_pt((unsigned long) from
, to
, n
, 0);
126 memset(to
+ n
- rc
, 0, rc
);
130 size_t copy_to_user_pt(size_t n
, void __user
*to
, const void *from
)
132 if (segment_eq(get_fs(), KERNEL_DS
)) {
133 memcpy((void __kernel __force
*) to
, from
, n
);
136 return __user_copy_pt((unsigned long) to
, (void *) from
, n
, 1);
139 static size_t clear_user_pt(size_t n
, void __user
*to
)
141 long done
, size
, ret
;
143 if (segment_eq(get_fs(), KERNEL_DS
)) {
144 memset((void __kernel __force
*) to
, 0, n
);
149 if (n
- done
> PAGE_SIZE
)
153 ret
= __user_copy_pt((unsigned long) to
+ done
,
154 &empty_zero_page
, size
, 1);
157 return ret
+ n
- done
;
162 static size_t strnlen_user_pt(size_t count
, const char __user
*src
)
165 unsigned long uaddr
= (unsigned long) src
;
166 struct mm_struct
*mm
= current
->mm
;
167 unsigned long offset
, pfn
, done
, len
;
171 if (segment_eq(get_fs(), KERNEL_DS
))
172 return strnlen((const char __kernel __force
*) src
, count
) + 1;
175 spin_lock(&mm
->page_table_lock
);
177 pte
= follow_table(mm
, uaddr
);
178 if ((unsigned long) pte
< 0x1000)
180 if (!pte_present(*pte
)) {
181 pte
= (pte_t
*) 0x11;
186 offset
= uaddr
& (PAGE_SIZE
-1);
187 addr
= (char *)(pfn
<< PAGE_SHIFT
) + offset
;
188 len
= min(count
- done
, PAGE_SIZE
- offset
);
189 len_str
= strnlen(addr
, len
);
192 } while ((len_str
== len
) && (done
< count
));
193 spin_unlock(&mm
->page_table_lock
);
196 spin_unlock(&mm
->page_table_lock
);
197 if (__handle_fault(uaddr
, (unsigned long) pte
, 0))
202 static size_t strncpy_from_user_pt(size_t count
, const char __user
*src
,
205 size_t n
= strnlen_user_pt(count
, src
);
211 if (segment_eq(get_fs(), KERNEL_DS
)) {
212 memcpy(dst
, (const char __kernel __force
*) src
, n
);
213 if (dst
[n
-1] == '\0')
218 if (__user_copy_pt((unsigned long) src
, dst
, n
, 0))
220 if (dst
[n
-1] == '\0')
226 static size_t copy_in_user_pt(size_t n
, void __user
*to
,
227 const void __user
*from
)
229 struct mm_struct
*mm
= current
->mm
;
230 unsigned long offset_from
, offset_to
, offset_max
, pfn_from
, pfn_to
,
231 uaddr
, done
, size
, error_code
;
232 unsigned long uaddr_from
= (unsigned long) from
;
233 unsigned long uaddr_to
= (unsigned long) to
;
234 pte_t
*pte_from
, *pte_to
;
237 if (segment_eq(get_fs(), KERNEL_DS
)) {
238 memcpy((void __force
*) to
, (void __force
*) from
, n
);
243 spin_lock(&mm
->page_table_lock
);
247 pte_from
= follow_table(mm
, uaddr_from
);
248 error_code
= (unsigned long) pte_from
;
249 if (error_code
< 0x1000)
251 if (!pte_present(*pte_from
)) {
258 pte_to
= follow_table(mm
, uaddr_to
);
259 error_code
= (unsigned long) pte_to
;
260 if (error_code
< 0x1000)
262 if (!pte_present(*pte_to
)) {
265 } else if (!pte_write(*pte_to
)) {
270 pfn_from
= pte_pfn(*pte_from
);
271 pfn_to
= pte_pfn(*pte_to
);
272 offset_from
= uaddr_from
& (PAGE_SIZE
-1);
273 offset_to
= uaddr_from
& (PAGE_SIZE
-1);
274 offset_max
= max(offset_from
, offset_to
);
275 size
= min(n
- done
, PAGE_SIZE
- offset_max
);
277 memcpy((void *)(pfn_to
<< PAGE_SHIFT
) + offset_to
,
278 (void *)(pfn_from
<< PAGE_SHIFT
) + offset_from
, size
);
283 spin_unlock(&mm
->page_table_lock
);
286 spin_unlock(&mm
->page_table_lock
);
287 if (__handle_fault(uaddr
, error_code
, write_user
))
292 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
293 asm volatile("0: l %1,0(%6)\n" \
295 "2: cs %1,%2,0(%6)\n" \
299 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
300 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
302 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
303 "m" (*uaddr) : "cc" );
305 static int __futex_atomic_op_pt(int op
, u32 __user
*uaddr
, int oparg
, int *old
)
307 int oldval
= 0, newval
, ret
;
311 __futex_atomic_op("lr %2,%5\n",
312 ret
, oldval
, newval
, uaddr
, oparg
);
315 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
316 ret
, oldval
, newval
, uaddr
, oparg
);
319 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
320 ret
, oldval
, newval
, uaddr
, oparg
);
323 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
324 ret
, oldval
, newval
, uaddr
, oparg
);
327 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
328 ret
, oldval
, newval
, uaddr
, oparg
);
338 int futex_atomic_op_pt(int op
, u32 __user
*uaddr
, int oparg
, int *old
)
342 if (segment_eq(get_fs(), KERNEL_DS
))
343 return __futex_atomic_op_pt(op
, uaddr
, oparg
, old
);
344 spin_lock(¤t
->mm
->page_table_lock
);
345 uaddr
= (u32 __force __user
*)
346 __dat_user_addr((__force
unsigned long) uaddr
);
348 spin_unlock(¤t
->mm
->page_table_lock
);
351 get_page(virt_to_page(uaddr
));
352 spin_unlock(¤t
->mm
->page_table_lock
);
353 ret
= __futex_atomic_op_pt(op
, uaddr
, oparg
, old
);
354 put_page(virt_to_page(uaddr
));
358 static int __futex_atomic_cmpxchg_pt(u32
*uval
, u32 __user
*uaddr
,
359 u32 oldval
, u32 newval
)
363 asm volatile("0: cs %1,%4,0(%5)\n"
366 EX_TABLE(0b
,2b
) EX_TABLE(1b
,2b
)
367 : "=d" (ret
), "+d" (oldval
), "=m" (*uaddr
)
368 : "0" (-EFAULT
), "d" (newval
), "a" (uaddr
), "m" (*uaddr
)
374 int futex_atomic_cmpxchg_pt(u32
*uval
, u32 __user
*uaddr
,
375 u32 oldval
, u32 newval
)
379 if (segment_eq(get_fs(), KERNEL_DS
))
380 return __futex_atomic_cmpxchg_pt(uval
, uaddr
, oldval
, newval
);
381 spin_lock(¤t
->mm
->page_table_lock
);
382 uaddr
= (u32 __force __user
*)
383 __dat_user_addr((__force
unsigned long) uaddr
);
385 spin_unlock(¤t
->mm
->page_table_lock
);
388 get_page(virt_to_page(uaddr
));
389 spin_unlock(¤t
->mm
->page_table_lock
);
390 ret
= __futex_atomic_cmpxchg_pt(uval
, uaddr
, oldval
, newval
);
391 put_page(virt_to_page(uaddr
));
395 struct uaccess_ops uaccess_pt
= {
396 .copy_from_user
= copy_from_user_pt
,
397 .copy_from_user_small
= copy_from_user_pt
,
398 .copy_to_user
= copy_to_user_pt
,
399 .copy_to_user_small
= copy_to_user_pt
,
400 .copy_in_user
= copy_in_user_pt
,
401 .clear_user
= clear_user_pt
,
402 .strnlen_user
= strnlen_user_pt
,
403 .strncpy_from_user
= strncpy_from_user_pt
,
404 .futex_atomic_op
= futex_atomic_op_pt
,
405 .futex_atomic_cmpxchg
= futex_atomic_cmpxchg_pt
,