2 * Access kernel memory without faulting -- s390 specific implementation.
4 * Copyright IBM Corp. 2009
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
10 #include <linux/uaccess.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/gfp.h>
15 #include <linux/cpu.h>
16 #include <asm/ctl_reg.h>
20 * This function writes to kernel memory bypassing DAT and possible
21 * write protection. It copies one to four bytes from src to dst
22 * using the stura instruction.
23 * Returns the number of bytes copied or -EFAULT.
25 static long probe_kernel_write_odd(void *dst
, const void *src
, size_t size
)
27 unsigned long count
, aligned
;
31 aligned
= (unsigned long) dst
& ~3UL;
32 offset
= (unsigned long) dst
& 3;
33 count
= min_t(unsigned long, 4 - offset
, size
);
34 mask
= (0xf << (4 - count
)) & 0xf;
45 EX_TABLE(0b
,3b
) EX_TABLE(1b
,3b
) EX_TABLE(2b
,3b
)
46 : "+d" (rc
), "+a" (aligned
)
47 : "a" (mask
), "a" (src
) : "cc", "memory", "0", "1");
48 return rc
? rc
: count
;
51 long probe_kernel_write(void *dst
, const void *src
, size_t size
)
56 copied
= probe_kernel_write_odd(dst
, src
, size
);
63 return copied
< 0 ? -EFAULT
: 0;
66 static int __memcpy_real(void *dest
, void *src
, size_t count
)
68 register unsigned long _dest
asm("2") = (unsigned long) dest
;
69 register unsigned long _len1
asm("3") = (unsigned long) count
;
70 register unsigned long _src
asm("4") = (unsigned long) src
;
71 register unsigned long _len2
asm("5") = (unsigned long) count
;
75 "0: mvcle %1,%2,0x0\n"
80 : "+d" (rc
), "+d" (_dest
), "+d" (_src
), "+d" (_len1
),
81 "+d" (_len2
), "=m" (*((long *) dest
))
82 : "m" (*((long *) src
))
88 * Copy memory in real mode (kernel to kernel)
90 int memcpy_real(void *dest
, void *src
, size_t count
)
97 local_irq_save(flags
);
98 __arch_local_irq_stnsm(0xfbUL
);
99 rc
= __memcpy_real(dest
, src
, count
);
100 local_irq_restore(flags
);
105 * Copy memory in absolute mode (kernel to kernel)
107 void memcpy_absolute(void *dest
, void *src
, size_t count
)
109 unsigned long cr0
, flags
, prefix
;
111 flags
= arch_local_irq_save();
112 __ctl_store(cr0
, 0, 0);
113 __ctl_clear_bit(0, 28); /* disable lowcore protection */
114 prefix
= store_prefix();
116 local_mcck_disable();
118 memcpy(dest
, src
, count
);
122 memcpy(dest
, src
, count
);
124 __ctl_load(cr0
, 0, 0);
125 arch_local_irq_restore(flags
);
129 * Copy memory from kernel (real) to user (virtual)
131 int copy_to_user_real(void __user
*dest
, void *src
, unsigned long count
)
133 int offs
= 0, size
, rc
;
136 buf
= (char *) __get_free_page(GFP_KERNEL
);
140 while (offs
< count
) {
141 size
= min(PAGE_SIZE
, count
- offs
);
142 if (memcpy_real(buf
, src
+ offs
, size
))
144 if (copy_to_user(dest
+ offs
, buf
, size
))
150 free_page((unsigned long) buf
);
155 * Check if physical address is within prefix or zero page
157 static int is_swapped(unsigned long addr
)
162 if (addr
< sizeof(struct _lowcore
))
164 for_each_online_cpu(cpu
) {
165 lc
= (unsigned long) lowcore_ptr
[cpu
];
166 if (addr
> lc
+ sizeof(struct _lowcore
) - 1 || addr
< lc
)
174 * Convert a physical pointer for /dev/mem access
176 * For swapped prefix pages a new buffer is returned that contains a copy of
177 * the absolute memory. The buffer size is maximum one page large.
179 void *xlate_dev_mem_ptr(unsigned long addr
)
181 void *bounce
= (void *) addr
;
186 if (is_swapped(addr
)) {
187 size
= PAGE_SIZE
- (addr
& ~PAGE_MASK
);
188 bounce
= (void *) __get_free_page(GFP_ATOMIC
);
190 memcpy_absolute(bounce
, (void *) addr
, size
);
198 * Free converted buffer for /dev/mem access (if necessary)
200 void unxlate_dev_mem_ptr(unsigned long addr
, void *buf
)
202 if ((void *) addr
!= buf
)
203 free_page((unsigned long) buf
);