1 // SPDX-License-Identifier: GPL-2.0
3 * Access kernel memory without faulting -- s390 specific implementation.
5 * Copyright IBM Corp. 2009, 2015
7 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
11 #include <linux/uaccess.h>
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/cpu.h>
17 #include <asm/ctl_reg.h>
19 #include <asm/stacktrace.h>
21 static notrace
long s390_kernel_write_odd(void *dst
, const void *src
, size_t size
)
23 unsigned long aligned
, offset
, count
;
26 aligned
= (unsigned long) dst
& ~7UL;
27 offset
= (unsigned long) dst
& 7UL;
28 size
= min(8UL - offset
, size
);
32 " mvc 0(1,%4),0(%5)\n"
33 "0: mvc 0(8,%3),0(%0)\n"
38 : "+&a" (aligned
), "+&a" (count
), "=m" (tmp
)
39 : "a" (&tmp
), "a" (&tmp
[offset
]), "a" (src
)
40 : "cc", "memory", "1");
45 * s390_kernel_write - write to kernel memory bypassing DAT
46 * @dst: destination address
47 * @src: source address
48 * @size: number of bytes to copy
50 * This function writes to kernel memory bypassing DAT and possible page table
51 * write protection. It writes to the destination using the sturg instruction.
52 * Therefore we have a read-modify-write sequence: the function reads eight
53 * bytes from destination at an eight byte boundary, modifies the bytes
54 * requested and writes the result back in a loop.
56 static DEFINE_SPINLOCK(s390_kernel_write_lock
);
58 notrace
void *s390_kernel_write(void *dst
, const void *src
, size_t size
)
64 spin_lock_irqsave(&s390_kernel_write_lock
, flags
);
65 if (!(flags
& PSW_MASK_DAT
)) {
66 memcpy(dst
, src
, size
);
69 copied
= s390_kernel_write_odd(tmp
, src
, size
);
75 spin_unlock_irqrestore(&s390_kernel_write_lock
, flags
);
80 static int __no_sanitize_address
__memcpy_real(void *dest
, void *src
, size_t count
)
82 register unsigned long _dest
asm("2") = (unsigned long) dest
;
83 register unsigned long _len1
asm("3") = (unsigned long) count
;
84 register unsigned long _src
asm("4") = (unsigned long) src
;
85 register unsigned long _len2
asm("5") = (unsigned long) count
;
89 "0: mvcle %1,%2,0x0\n"
94 : "+d" (rc
), "+d" (_dest
), "+d" (_src
), "+d" (_len1
),
95 "+d" (_len2
), "=m" (*((long *) dest
))
96 : "m" (*((long *) src
))
101 static unsigned long __no_sanitize_address
_memcpy_real(unsigned long dest
,
105 int irqs_disabled
, rc
;
110 flags
= arch_local_irq_save();
111 irqs_disabled
= arch_irqs_disabled_flags(flags
);
113 trace_hardirqs_off();
114 __arch_local_irq_stnsm(0xf8); // disable DAT
115 rc
= __memcpy_real((void *) dest
, (void *) src
, (size_t) count
);
116 if (flags
& PSW_MASK_DAT
)
117 __arch_local_irq_stosm(0x04); // enable DAT
120 __arch_local_irq_ssm(flags
);
125 * Copy memory in real mode (kernel to kernel)
127 int memcpy_real(void *dest
, void *src
, size_t count
)
131 if (S390_lowcore
.nodat_stack
!= 0) {
133 rc
= CALL_ON_STACK(_memcpy_real
, S390_lowcore
.nodat_stack
, 3,
139 * This is a really early memcpy_real call, the stacks are
140 * not set up yet. Just call _memcpy_real on the early boot
143 return _memcpy_real((unsigned long) dest
,(unsigned long) src
,
144 (unsigned long) count
);
148 * Copy memory in absolute mode (kernel to kernel)
150 void memcpy_absolute(void *dest
, void *src
, size_t count
)
152 unsigned long cr0
, flags
, prefix
;
154 flags
= arch_local_irq_save();
155 __ctl_store(cr0
, 0, 0);
156 __ctl_clear_bit(0, 28); /* disable lowcore protection */
157 prefix
= store_prefix();
159 local_mcck_disable();
161 memcpy(dest
, src
, count
);
165 memcpy(dest
, src
, count
);
167 __ctl_load(cr0
, 0, 0);
168 arch_local_irq_restore(flags
);
172 * Copy memory from kernel (real) to user (virtual)
174 int copy_to_user_real(void __user
*dest
, void *src
, unsigned long count
)
176 int offs
= 0, size
, rc
;
179 buf
= (char *) __get_free_page(GFP_KERNEL
);
183 while (offs
< count
) {
184 size
= min(PAGE_SIZE
, count
- offs
);
185 if (memcpy_real(buf
, src
+ offs
, size
))
187 if (copy_to_user(dest
+ offs
, buf
, size
))
193 free_page((unsigned long) buf
);
198 * Check if physical address is within prefix or zero page
200 static int is_swapped(unsigned long addr
)
205 if (addr
< sizeof(struct lowcore
))
207 for_each_online_cpu(cpu
) {
208 lc
= (unsigned long) lowcore_ptr
[cpu
];
209 if (addr
> lc
+ sizeof(struct lowcore
) - 1 || addr
< lc
)
217 * Convert a physical pointer for /dev/mem access
219 * For swapped prefix pages a new buffer is returned that contains a copy of
220 * the absolute memory. The buffer size is maximum one page large.
222 void *xlate_dev_mem_ptr(phys_addr_t addr
)
224 void *bounce
= (void *) addr
;
229 if (is_swapped(addr
)) {
230 size
= PAGE_SIZE
- (addr
& ~PAGE_MASK
);
231 bounce
= (void *) __get_free_page(GFP_ATOMIC
);
233 memcpy_absolute(bounce
, (void *) addr
, size
);
241 * Free converted buffer for /dev/mem access (if necessary)
243 void unxlate_dev_mem_ptr(phys_addr_t addr
, void *buf
)
245 if ((void *) addr
!= buf
)
246 free_page((unsigned long) buf
);