nfsd4: typo logical vs bitwise negate for want_mask
[linux-btrfs-devel.git] / arch / s390 / mm / maccess.c
blob5dbbaa6e594c8192302a23525bfcbfb05685c50e
1 /*
2 * Access kernel memory without faulting -- s390 specific implementation.
4 * Copyright IBM Corp. 2009
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
8 */
10 #include <linux/uaccess.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <asm/system.h>
17 * This function writes to kernel memory bypassing DAT and possible
18 * write protection. It copies one to four bytes from src to dst
19 * using the stura instruction.
20 * Returns the number of bytes copied or -EFAULT.
22 static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
24 unsigned long count, aligned;
25 int offset, mask;
26 int rc = -EFAULT;
28 aligned = (unsigned long) dst & ~3UL;
29 offset = (unsigned long) dst & 3;
30 count = min_t(unsigned long, 4 - offset, size);
31 mask = (0xf << (4 - count)) & 0xf;
32 mask >>= offset;
33 asm volatile(
34 " bras 1,0f\n"
35 " icm 0,0,0(%3)\n"
36 "0: l 0,0(%1)\n"
37 " lra %1,0(%1)\n"
38 "1: ex %2,0(1)\n"
39 "2: stura 0,%1\n"
40 " la %0,0\n"
41 "3:\n"
42 EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
43 : "+d" (rc), "+a" (aligned)
44 : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
45 return rc ? rc : count;
48 long probe_kernel_write(void *dst, const void *src, size_t size)
50 long copied = 0;
52 while (size) {
53 copied = probe_kernel_write_odd(dst, src, size);
54 if (copied < 0)
55 break;
56 dst += copied;
57 src += copied;
58 size -= copied;
60 return copied < 0 ? -EFAULT : 0;
63 int memcpy_real(void *dest, void *src, size_t count)
65 register unsigned long _dest asm("2") = (unsigned long) dest;
66 register unsigned long _len1 asm("3") = (unsigned long) count;
67 register unsigned long _src asm("4") = (unsigned long) src;
68 register unsigned long _len2 asm("5") = (unsigned long) count;
69 unsigned long flags;
70 int rc = -EFAULT;
72 if (!count)
73 return 0;
74 flags = __arch_local_irq_stnsm(0xf8UL);
75 asm volatile (
76 "0: mvcle %1,%2,0x0\n"
77 "1: jo 0b\n"
78 " lhi %0,0x0\n"
79 "2:\n"
80 EX_TABLE(1b,2b)
81 : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
82 "+d" (_len2), "=m" (*((long *) dest))
83 : "m" (*((long *) src))
84 : "cc", "memory");
85 arch_local_irq_restore(flags);
86 return rc;
90 * Copy memory to absolute zero
92 void copy_to_absolute_zero(void *dest, void *src, size_t count)
94 unsigned long cr0;
96 BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore));
97 preempt_disable();
98 __ctl_store(cr0, 0, 0);
99 __ctl_clear_bit(0, 28); /* disable lowcore protection */
100 memcpy_real(dest + store_prefix(), src, count);
101 __ctl_load(cr0, 0, 0);
102 preempt_enable();