x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / arch / s390 / lib / uaccess.c
blob802903c50de125f54f8b4f3713e243c0b85a5b6e
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Standard user space access functions based on mvcp/mvcs and doing
4 * interesting things in the secondary space mode.
6 * Copyright IBM Corp. 2006,2014
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Gerald Schaefer (gerald.schaefer@de.ibm.com)
9 */
11 #include <linux/jump_label.h>
12 #include <linux/uaccess.h>
13 #include <linux/export.h>
14 #include <linux/errno.h>
15 #include <linux/mm.h>
16 #include <asm/mmu_context.h>
17 #include <asm/facility.h>
19 #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
20 static DEFINE_STATIC_KEY_FALSE(have_mvcos);
22 static int __init uaccess_init(void)
24 if (test_facility(27))
25 static_branch_enable(&have_mvcos);
26 return 0;
28 early_initcall(uaccess_init);
30 static inline int copy_with_mvcos(void)
32 if (static_branch_likely(&have_mvcos))
33 return 1;
34 return 0;
36 #else
37 static inline int copy_with_mvcos(void)
39 return 1;
41 #endif
43 static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
44 unsigned long size)
46 register unsigned long reg0 asm("0") = 0x81UL;
47 unsigned long tmp1, tmp2;
49 tmp1 = -4096UL;
50 asm volatile(
51 "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
52 "6: jz 4f\n"
53 "1: algr %0,%3\n"
54 " slgr %1,%3\n"
55 " slgr %2,%3\n"
56 " j 0b\n"
57 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
58 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
59 " slgr %4,%1\n"
60 " clgr %0,%4\n" /* copy crosses next page boundary? */
61 " jnh 5f\n"
62 "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
63 "7: slgr %0,%4\n"
64 " j 5f\n"
65 "4: slgr %0,%0\n"
66 "5:\n"
67 EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
68 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
69 : "d" (reg0) : "cc", "memory");
70 return size;
73 static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
74 unsigned long size)
76 unsigned long tmp1, tmp2;
78 load_kernel_asce();
79 tmp1 = -256UL;
80 asm volatile(
81 " sacf 0\n"
82 "0: mvcp 0(%0,%2),0(%1),%3\n"
83 "7: jz 5f\n"
84 "1: algr %0,%3\n"
85 " la %1,256(%1)\n"
86 " la %2,256(%2)\n"
87 "2: mvcp 0(%0,%2),0(%1),%3\n"
88 "8: jnz 1b\n"
89 " j 5f\n"
90 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
91 " lghi %3,-4096\n"
92 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
93 " slgr %4,%1\n"
94 " clgr %0,%4\n" /* copy crosses next page boundary? */
95 " jnh 6f\n"
96 "4: mvcp 0(%4,%2),0(%1),%3\n"
97 "9: slgr %0,%4\n"
98 " j 6f\n"
99 "5: slgr %0,%0\n"
100 "6: sacf 768\n"
101 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
102 EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
103 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
104 : : "cc", "memory");
105 return size;
108 unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
110 if (copy_with_mvcos())
111 return copy_from_user_mvcos(to, from, n);
112 return copy_from_user_mvcp(to, from, n);
114 EXPORT_SYMBOL(raw_copy_from_user);
116 static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
117 unsigned long size)
119 register unsigned long reg0 asm("0") = 0x810000UL;
120 unsigned long tmp1, tmp2;
122 tmp1 = -4096UL;
123 asm volatile(
124 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
125 "6: jz 4f\n"
126 "1: algr %0,%3\n"
127 " slgr %1,%3\n"
128 " slgr %2,%3\n"
129 " j 0b\n"
130 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
131 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
132 " slgr %4,%1\n"
133 " clgr %0,%4\n" /* copy crosses next page boundary? */
134 " jnh 5f\n"
135 "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
136 "7: slgr %0,%4\n"
137 " j 5f\n"
138 "4: slgr %0,%0\n"
139 "5:\n"
140 EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
141 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
142 : "d" (reg0) : "cc", "memory");
143 return size;
146 static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
147 unsigned long size)
149 unsigned long tmp1, tmp2;
151 load_kernel_asce();
152 tmp1 = -256UL;
153 asm volatile(
154 " sacf 0\n"
155 "0: mvcs 0(%0,%1),0(%2),%3\n"
156 "7: jz 5f\n"
157 "1: algr %0,%3\n"
158 " la %1,256(%1)\n"
159 " la %2,256(%2)\n"
160 "2: mvcs 0(%0,%1),0(%2),%3\n"
161 "8: jnz 1b\n"
162 " j 5f\n"
163 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
164 " lghi %3,-4096\n"
165 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
166 " slgr %4,%1\n"
167 " clgr %0,%4\n" /* copy crosses next page boundary? */
168 " jnh 6f\n"
169 "4: mvcs 0(%4,%1),0(%2),%3\n"
170 "9: slgr %0,%4\n"
171 " j 6f\n"
172 "5: slgr %0,%0\n"
173 "6: sacf 768\n"
174 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
175 EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
176 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
177 : : "cc", "memory");
178 return size;
181 unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
183 if (copy_with_mvcos())
184 return copy_to_user_mvcos(to, from, n);
185 return copy_to_user_mvcs(to, from, n);
187 EXPORT_SYMBOL(raw_copy_to_user);
189 static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
190 unsigned long size)
192 register unsigned long reg0 asm("0") = 0x810081UL;
193 unsigned long tmp1, tmp2;
195 tmp1 = -4096UL;
196 /* FIXME: copy with reduced length. */
197 asm volatile(
198 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
199 " jz 2f\n"
200 "1: algr %0,%3\n"
201 " slgr %1,%3\n"
202 " slgr %2,%3\n"
203 " j 0b\n"
204 "2:slgr %0,%0\n"
205 "3: \n"
206 EX_TABLE(0b,3b)
207 : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
208 : "d" (reg0) : "cc", "memory");
209 return size;
212 static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
213 unsigned long size)
215 unsigned long tmp1;
217 load_kernel_asce();
218 asm volatile(
219 " sacf 256\n"
220 " aghi %0,-1\n"
221 " jo 5f\n"
222 " bras %3,3f\n"
223 "0: aghi %0,257\n"
224 "1: mvc 0(1,%1),0(%2)\n"
225 " la %1,1(%1)\n"
226 " la %2,1(%2)\n"
227 " aghi %0,-1\n"
228 " jnz 1b\n"
229 " j 5f\n"
230 "2: mvc 0(256,%1),0(%2)\n"
231 " la %1,256(%1)\n"
232 " la %2,256(%2)\n"
233 "3: aghi %0,-256\n"
234 " jnm 2b\n"
235 "4: ex %0,1b-0b(%3)\n"
236 "5: slgr %0,%0\n"
237 "6: sacf 768\n"
238 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
239 : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
240 : : "cc", "memory");
241 return size;
244 unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
246 if (copy_with_mvcos())
247 return copy_in_user_mvcos(to, from, n);
248 return copy_in_user_mvc(to, from, n);
250 EXPORT_SYMBOL(raw_copy_in_user);
252 static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
254 register unsigned long reg0 asm("0") = 0x810000UL;
255 unsigned long tmp1, tmp2;
257 tmp1 = -4096UL;
258 asm volatile(
259 "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
260 " jz 4f\n"
261 "1: algr %0,%2\n"
262 " slgr %1,%2\n"
263 " j 0b\n"
264 "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
265 " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
266 " slgr %3,%1\n"
267 " clgr %0,%3\n" /* copy crosses next page boundary? */
268 " jnh 5f\n"
269 "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
270 " slgr %0,%3\n"
271 " j 5f\n"
272 "4: slgr %0,%0\n"
273 "5:\n"
274 EX_TABLE(0b,2b) EX_TABLE(3b,5b)
275 : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
276 : "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
277 return size;
280 static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
282 unsigned long tmp1, tmp2;
284 load_kernel_asce();
285 asm volatile(
286 " sacf 256\n"
287 " aghi %0,-1\n"
288 " jo 5f\n"
289 " bras %3,3f\n"
290 " xc 0(1,%1),0(%1)\n"
291 "0: aghi %0,257\n"
292 " la %2,255(%1)\n" /* %2 = ptr + 255 */
293 " srl %2,12\n"
294 " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
295 " slgr %2,%1\n"
296 " clgr %0,%2\n" /* clear crosses next page boundary? */
297 " jnh 5f\n"
298 " aghi %2,-1\n"
299 "1: ex %2,0(%3)\n"
300 " aghi %2,1\n"
301 " slgr %0,%2\n"
302 " j 5f\n"
303 "2: xc 0(256,%1),0(%1)\n"
304 " la %1,256(%1)\n"
305 "3: aghi %0,-256\n"
306 " jnm 2b\n"
307 "4: ex %0,0(%3)\n"
308 "5: slgr %0,%0\n"
309 "6: sacf 768\n"
310 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
311 : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
312 : : "cc", "memory");
313 return size;
316 unsigned long __clear_user(void __user *to, unsigned long size)
318 if (copy_with_mvcos())
319 return clear_user_mvcos(to, size);
320 return clear_user_xc(to, size);
322 EXPORT_SYMBOL(__clear_user);
324 static inline unsigned long strnlen_user_srst(const char __user *src,
325 unsigned long size)
327 register unsigned long reg0 asm("0") = 0;
328 unsigned long tmp1, tmp2;
330 asm volatile(
331 " la %2,0(%1)\n"
332 " la %3,0(%0,%1)\n"
333 " slgr %0,%0\n"
334 " sacf 256\n"
335 "0: srst %3,%2\n"
336 " jo 0b\n"
337 " la %0,1(%3)\n" /* strnlen_user results includes \0 */
338 " slgr %0,%1\n"
339 "1: sacf 768\n"
340 EX_TABLE(0b,1b)
341 : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
342 : "d" (reg0) : "cc", "memory");
343 return size;
346 unsigned long __strnlen_user(const char __user *src, unsigned long size)
348 if (unlikely(!size))
349 return 0;
350 load_kernel_asce();
351 return strnlen_user_srst(src, size);
353 EXPORT_SYMBOL(__strnlen_user);
355 long __strncpy_from_user(char *dst, const char __user *src, long size)
357 size_t done, len, offset, len_str;
359 if (unlikely(size <= 0))
360 return 0;
361 done = 0;
362 do {
363 offset = (size_t)src & (L1_CACHE_BYTES - 1);
364 len = min(size - done, L1_CACHE_BYTES - offset);
365 if (copy_from_user(dst, src, len))
366 return -EFAULT;
367 len_str = strnlen(dst, len);
368 done += len_str;
369 src += len_str;
370 dst += len_str;
371 } while ((len_str == len) && (done < size));
372 return done;
374 EXPORT_SYMBOL(__strncpy_from_user);