conn rcv_lock converted to spinlock, struct cor_sock created, kernel_packet skb_clone...
[cor_2_6_31.git] / arch / s390 / mm / maccess.c
blob81756271dc44a51848f26f93bab6c5b2a066982a
1 /*
2 * Access kernel memory without faulting -- s390 specific implementation.
4 * Copyright IBM Corp. 2009
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
8 */
10 #include <linux/uaccess.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <asm/system.h>
17 * This function writes to kernel memory bypassing DAT and possible
18 * write protection. It copies one to four bytes from src to dst
19 * using the stura instruction.
20 * Returns the number of bytes copied or -EFAULT.
22 static long probe_kernel_write_odd(void *dst, void *src, size_t size)
24 unsigned long count, aligned;
25 int offset, mask;
26 int rc = -EFAULT;
28 aligned = (unsigned long) dst & ~3UL;
29 offset = (unsigned long) dst & 3;
30 count = min_t(unsigned long, 4 - offset, size);
31 mask = (0xf << (4 - count)) & 0xf;
32 mask >>= offset;
33 asm volatile(
34 " bras 1,0f\n"
35 " icm 0,0,0(%3)\n"
36 "0: l 0,0(%1)\n"
37 " lra %1,0(%1)\n"
38 "1: ex %2,0(1)\n"
39 "2: stura 0,%1\n"
40 " la %0,0\n"
41 "3:\n"
42 EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
43 : "+d" (rc), "+a" (aligned)
44 : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
45 return rc ? rc : count;
48 long probe_kernel_write(void *dst, void *src, size_t size)
50 long copied = 0;
52 while (size) {
53 copied = probe_kernel_write_odd(dst, src, size);
54 if (copied < 0)
55 break;
56 dst += copied;
57 src += copied;
58 size -= copied;
60 return copied < 0 ? -EFAULT : 0;