2 * arch/s390/lib/uaccess_pt.c
4 * User access functions based on page table walks for enhanced
5 * system layout without hardware support.
7 * Copyright IBM Corp. 2006
8 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
11 #include <linux/errno.h>
12 #include <linux/hardirq.h>
14 #include <asm/uaccess.h>
15 #include <asm/futex.h>
18 static int __handle_fault(struct mm_struct
*mm
, unsigned long address
,
21 struct vm_area_struct
*vma
;
27 down_read(&mm
->mmap_sem
);
28 vma
= find_vma(mm
, address
);
31 if (unlikely(vma
->vm_start
> address
)) {
32 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
34 if (expand_stack(vma
, address
))
39 /* page not present, check vm flags */
40 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
| VM_WRITE
)))
43 if (!(vma
->vm_flags
& VM_WRITE
))
48 fault
= handle_mm_fault(mm
, vma
, address
, write_access
);
49 if (unlikely(fault
& VM_FAULT_ERROR
)) {
50 if (fault
& VM_FAULT_OOM
)
52 else if (fault
& VM_FAULT_SIGBUS
)
56 if (fault
& VM_FAULT_MAJOR
)
62 up_read(&mm
->mmap_sem
);
66 up_read(&mm
->mmap_sem
);
67 if (is_init(current
)) {
69 down_read(&mm
->mmap_sem
);
72 printk("VM: killing process %s\n", current
->comm
);
76 up_read(&mm
->mmap_sem
);
77 current
->thread
.prot_addr
= address
;
78 current
->thread
.trap_no
= 0x11;
79 force_sig(SIGBUS
, current
);
83 static size_t __user_copy_pt(unsigned long uaddr
, void *kptr
,
84 size_t n
, int write_user
)
86 struct mm_struct
*mm
= current
->mm
;
87 unsigned long offset
, pfn
, done
, size
;
95 spin_lock(&mm
->page_table_lock
);
97 pgd
= pgd_offset(mm
, uaddr
);
98 if (pgd_none(*pgd
) || unlikely(pgd_bad(*pgd
)))
101 pmd
= pmd_offset(pgd
, uaddr
);
102 if (pmd_none(*pmd
) || unlikely(pmd_bad(*pmd
)))
105 pte
= pte_offset_map(pmd
, uaddr
);
106 if (!pte
|| !pte_present(*pte
) ||
107 (write_user
&& !pte_write(*pte
)))
114 offset
= uaddr
& (PAGE_SIZE
- 1);
115 size
= min(n
- done
, PAGE_SIZE
- offset
);
117 to
= (void *)((pfn
<< PAGE_SHIFT
) + offset
);
120 from
= (void *)((pfn
<< PAGE_SHIFT
) + offset
);
123 memcpy(to
, from
, size
);
128 spin_unlock(&mm
->page_table_lock
);
131 spin_unlock(&mm
->page_table_lock
);
132 if (__handle_fault(mm
, uaddr
, write_user
))
138 * Do DAT for user address by page table walk, return kernel address.
139 * This function needs to be called with current->mm->page_table_lock held.
141 static unsigned long __dat_user_addr(unsigned long uaddr
)
143 struct mm_struct
*mm
= current
->mm
;
144 unsigned long pfn
, ret
;
152 pgd
= pgd_offset(mm
, uaddr
);
153 if (pgd_none(*pgd
) || unlikely(pgd_bad(*pgd
)))
156 pmd
= pmd_offset(pgd
, uaddr
);
157 if (pmd_none(*pmd
) || unlikely(pmd_bad(*pmd
)))
160 pte
= pte_offset_map(pmd
, uaddr
);
161 if (!pte
|| !pte_present(*pte
))
168 ret
= (pfn
<< PAGE_SHIFT
) + (uaddr
& (PAGE_SIZE
- 1));
172 spin_unlock(&mm
->page_table_lock
);
173 rc
= __handle_fault(mm
, uaddr
, 0);
174 spin_lock(&mm
->page_table_lock
);
180 size_t copy_from_user_pt(size_t n
, const void __user
*from
, void *to
)
184 if (segment_eq(get_fs(), KERNEL_DS
)) {
185 memcpy(to
, (void __kernel __force
*) from
, n
);
188 rc
= __user_copy_pt((unsigned long) from
, to
, n
, 0);
190 memset(to
+ n
- rc
, 0, rc
);
194 size_t copy_to_user_pt(size_t n
, void __user
*to
, const void *from
)
196 if (segment_eq(get_fs(), KERNEL_DS
)) {
197 memcpy((void __kernel __force
*) to
, from
, n
);
200 return __user_copy_pt((unsigned long) to
, (void *) from
, n
, 1);
203 static size_t clear_user_pt(size_t n
, void __user
*to
)
205 long done
, size
, ret
;
207 if (segment_eq(get_fs(), KERNEL_DS
)) {
208 memset((void __kernel __force
*) to
, 0, n
);
213 if (n
- done
> PAGE_SIZE
)
217 ret
= __user_copy_pt((unsigned long) to
+ done
,
218 &empty_zero_page
, size
, 1);
221 return ret
+ n
- done
;
226 static size_t strnlen_user_pt(size_t count
, const char __user
*src
)
229 unsigned long uaddr
= (unsigned long) src
;
230 struct mm_struct
*mm
= current
->mm
;
231 unsigned long offset
, pfn
, done
, len
;
237 if (segment_eq(get_fs(), KERNEL_DS
))
238 return strnlen((const char __kernel __force
*) src
, count
) + 1;
241 spin_lock(&mm
->page_table_lock
);
243 pgd
= pgd_offset(mm
, uaddr
);
244 if (pgd_none(*pgd
) || unlikely(pgd_bad(*pgd
)))
247 pmd
= pmd_offset(pgd
, uaddr
);
248 if (pmd_none(*pmd
) || unlikely(pmd_bad(*pmd
)))
251 pte
= pte_offset_map(pmd
, uaddr
);
252 if (!pte
|| !pte_present(*pte
))
256 if (!pfn_valid(pfn
)) {
261 offset
= uaddr
& (PAGE_SIZE
-1);
262 addr
= (char *)(pfn
<< PAGE_SHIFT
) + offset
;
263 len
= min(count
- done
, PAGE_SIZE
- offset
);
264 len_str
= strnlen(addr
, len
);
267 } while ((len_str
== len
) && (done
< count
));
269 spin_unlock(&mm
->page_table_lock
);
272 spin_unlock(&mm
->page_table_lock
);
273 if (__handle_fault(mm
, uaddr
, 0)) {
279 static size_t strncpy_from_user_pt(size_t count
, const char __user
*src
,
282 size_t n
= strnlen_user_pt(count
, src
);
288 if (segment_eq(get_fs(), KERNEL_DS
)) {
289 memcpy(dst
, (const char __kernel __force
*) src
, n
);
290 if (dst
[n
-1] == '\0')
295 if (__user_copy_pt((unsigned long) src
, dst
, n
, 0))
297 if (dst
[n
-1] == '\0')
303 static size_t copy_in_user_pt(size_t n
, void __user
*to
,
304 const void __user
*from
)
306 struct mm_struct
*mm
= current
->mm
;
307 unsigned long offset_from
, offset_to
, offset_max
, pfn_from
, pfn_to
,
309 unsigned long uaddr_from
= (unsigned long) from
;
310 unsigned long uaddr_to
= (unsigned long) to
;
311 pgd_t
*pgd_from
, *pgd_to
;
312 pmd_t
*pmd_from
, *pmd_to
;
313 pte_t
*pte_from
, *pte_to
;
318 spin_lock(&mm
->page_table_lock
);
320 pgd_from
= pgd_offset(mm
, uaddr_from
);
321 if (pgd_none(*pgd_from
) || unlikely(pgd_bad(*pgd_from
))) {
326 pgd_to
= pgd_offset(mm
, uaddr_to
);
327 if (pgd_none(*pgd_to
) || unlikely(pgd_bad(*pgd_to
))) {
333 pmd_from
= pmd_offset(pgd_from
, uaddr_from
);
334 if (pmd_none(*pmd_from
) || unlikely(pmd_bad(*pmd_from
))) {
339 pmd_to
= pmd_offset(pgd_to
, uaddr_to
);
340 if (pmd_none(*pmd_to
) || unlikely(pmd_bad(*pmd_to
))) {
346 pte_from
= pte_offset_map(pmd_from
, uaddr_from
);
347 if (!pte_from
|| !pte_present(*pte_from
)) {
352 pte_to
= pte_offset_map(pmd_to
, uaddr_to
);
353 if (!pte_to
|| !pte_present(*pte_to
) || !pte_write(*pte_to
)) {
359 pfn_from
= pte_pfn(*pte_from
);
360 if (!pfn_valid(pfn_from
))
362 pfn_to
= pte_pfn(*pte_to
);
363 if (!pfn_valid(pfn_to
))
366 offset_from
= uaddr_from
& (PAGE_SIZE
-1);
367 offset_to
= uaddr_from
& (PAGE_SIZE
-1);
368 offset_max
= max(offset_from
, offset_to
);
369 size
= min(n
- done
, PAGE_SIZE
- offset_max
);
371 memcpy((void *)(pfn_to
<< PAGE_SHIFT
) + offset_to
,
372 (void *)(pfn_from
<< PAGE_SHIFT
) + offset_from
, size
);
378 spin_unlock(&mm
->page_table_lock
);
381 spin_unlock(&mm
->page_table_lock
);
382 if (__handle_fault(mm
, uaddr
, write_user
))
387 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
388 asm volatile("0: l %1,0(%6)\n" \
390 "2: cs %1,%2,0(%6)\n" \
394 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
395 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
397 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
398 "m" (*uaddr) : "cc" );
400 int futex_atomic_op_pt(int op
, int __user
*uaddr
, int oparg
, int *old
)
402 int oldval
= 0, newval
, ret
;
404 spin_lock(¤t
->mm
->page_table_lock
);
405 uaddr
= (int __user
*) __dat_user_addr((unsigned long) uaddr
);
407 spin_unlock(¤t
->mm
->page_table_lock
);
410 get_page(virt_to_page(uaddr
));
411 spin_unlock(¤t
->mm
->page_table_lock
);
414 __futex_atomic_op("lr %2,%5\n",
415 ret
, oldval
, newval
, uaddr
, oparg
);
418 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
419 ret
, oldval
, newval
, uaddr
, oparg
);
422 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
423 ret
, oldval
, newval
, uaddr
, oparg
);
426 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
427 ret
, oldval
, newval
, uaddr
, oparg
);
430 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
431 ret
, oldval
, newval
, uaddr
, oparg
);
436 put_page(virt_to_page(uaddr
));
441 int futex_atomic_cmpxchg_pt(int __user
*uaddr
, int oldval
, int newval
)
445 spin_lock(¤t
->mm
->page_table_lock
);
446 uaddr
= (int __user
*) __dat_user_addr((unsigned long) uaddr
);
448 spin_unlock(¤t
->mm
->page_table_lock
);
451 get_page(virt_to_page(uaddr
));
452 spin_unlock(¤t
->mm
->page_table_lock
);
453 asm volatile(" cs %1,%4,0(%5)\n"
457 : "=d" (ret
), "+d" (oldval
), "=m" (*uaddr
)
458 : "0" (-EFAULT
), "d" (newval
), "a" (uaddr
), "m" (*uaddr
)
460 put_page(virt_to_page(uaddr
));
464 struct uaccess_ops uaccess_pt
= {
465 .copy_from_user
= copy_from_user_pt
,
466 .copy_from_user_small
= copy_from_user_pt
,
467 .copy_to_user
= copy_to_user_pt
,
468 .copy_to_user_small
= copy_to_user_pt
,
469 .copy_in_user
= copy_in_user_pt
,
470 .clear_user
= clear_user_pt
,
471 .strnlen_user
= strnlen_user_pt
,
472 .strncpy_from_user
= strncpy_from_user_pt
,
473 .futex_atomic_op
= futex_atomic_op_pt
,
474 .futex_atomic_cmpxchg
= futex_atomic_cmpxchg_pt
,