mm: make wait_on_page_writeback() wait for multiple pending writebacks
[linux/fpc-iii.git] / arch / s390 / include / asm / uaccess.h
blobc6707885e7c238962f451c5c6062767a55bacb1f
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Derived from "include/asm-i386/uaccess.h"
9 */
10 #ifndef __S390_UACCESS_H
11 #define __S390_UACCESS_H
14 * User space memory access functions
16 #include <asm/processor.h>
17 #include <asm/ctl_reg.h>
18 #include <asm/extable.h>
19 #include <asm/facility.h>
21 void debug_user_asce(void);
23 static inline int __range_ok(unsigned long addr, unsigned long size)
25 return 1;
28 #define __access_ok(addr, size) \
29 ({ \
30 __chk_user_ptr(addr); \
31 __range_ok((unsigned long)(addr), (size)); \
34 #define access_ok(addr, size) __access_ok(addr, size)
36 unsigned long __must_check
37 raw_copy_from_user(void *to, const void __user *from, unsigned long n);
39 unsigned long __must_check
40 raw_copy_to_user(void __user *to, const void *from, unsigned long n);
42 #ifndef CONFIG_KASAN
43 #define INLINE_COPY_FROM_USER
44 #define INLINE_COPY_TO_USER
45 #endif
47 int __put_user_bad(void) __attribute__((noreturn));
48 int __get_user_bad(void) __attribute__((noreturn));
50 #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
52 #define __put_get_user_asm(to, from, size, spec) \
53 ({ \
54 register unsigned long __reg0 asm("0") = spec; \
55 int __rc; \
57 asm volatile( \
58 "0: mvcos %1,%3,%2\n" \
59 "1: xr %0,%0\n" \
60 "2:\n" \
61 ".pushsection .fixup, \"ax\"\n" \
62 "3: lhi %0,%5\n" \
63 " jg 2b\n" \
64 ".popsection\n" \
65 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
66 : "=d" (__rc), "+Q" (*(to)) \
67 : "d" (size), "Q" (*(from)), \
68 "d" (__reg0), "K" (-EFAULT) \
69 : "cc"); \
70 __rc; \
73 static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
75 unsigned long spec = 0x810000UL;
76 int rc;
78 switch (size) {
79 case 1:
80 rc = __put_get_user_asm((unsigned char __user *)ptr,
81 (unsigned char *)x,
82 size, spec);
83 break;
84 case 2:
85 rc = __put_get_user_asm((unsigned short __user *)ptr,
86 (unsigned short *)x,
87 size, spec);
88 break;
89 case 4:
90 rc = __put_get_user_asm((unsigned int __user *)ptr,
91 (unsigned int *)x,
92 size, spec);
93 break;
94 case 8:
95 rc = __put_get_user_asm((unsigned long __user *)ptr,
96 (unsigned long *)x,
97 size, spec);
98 break;
99 default:
100 __put_user_bad();
101 break;
103 return rc;
106 static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
108 unsigned long spec = 0x81UL;
109 int rc;
111 switch (size) {
112 case 1:
113 rc = __put_get_user_asm((unsigned char *)x,
114 (unsigned char __user *)ptr,
115 size, spec);
116 break;
117 case 2:
118 rc = __put_get_user_asm((unsigned short *)x,
119 (unsigned short __user *)ptr,
120 size, spec);
121 break;
122 case 4:
123 rc = __put_get_user_asm((unsigned int *)x,
124 (unsigned int __user *)ptr,
125 size, spec);
126 break;
127 case 8:
128 rc = __put_get_user_asm((unsigned long *)x,
129 (unsigned long __user *)ptr,
130 size, spec);
131 break;
132 default:
133 __get_user_bad();
134 break;
136 return rc;
139 #else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
141 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
143 size = raw_copy_to_user(ptr, x, size);
144 return size ? -EFAULT : 0;
147 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
149 size = raw_copy_from_user(x, ptr, size);
150 return size ? -EFAULT : 0;
153 #endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
156 * These are the main single-value transfer routines. They automatically
157 * use the right size if we just have the right pointer type.
159 #define __put_user(x, ptr) \
160 ({ \
161 __typeof__(*(ptr)) __x = (x); \
162 int __pu_err = -EFAULT; \
163 __chk_user_ptr(ptr); \
164 switch (sizeof (*(ptr))) { \
165 case 1: \
166 case 2: \
167 case 4: \
168 case 8: \
169 __pu_err = __put_user_fn(&__x, ptr, \
170 sizeof(*(ptr))); \
171 break; \
172 default: \
173 __put_user_bad(); \
174 break; \
176 __builtin_expect(__pu_err, 0); \
179 #define put_user(x, ptr) \
180 ({ \
181 might_fault(); \
182 __put_user(x, ptr); \
186 #define __get_user(x, ptr) \
187 ({ \
188 int __gu_err = -EFAULT; \
189 __chk_user_ptr(ptr); \
190 switch (sizeof(*(ptr))) { \
191 case 1: { \
192 unsigned char __x = 0; \
193 __gu_err = __get_user_fn(&__x, ptr, \
194 sizeof(*(ptr))); \
195 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
196 break; \
197 }; \
198 case 2: { \
199 unsigned short __x = 0; \
200 __gu_err = __get_user_fn(&__x, ptr, \
201 sizeof(*(ptr))); \
202 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
203 break; \
204 }; \
205 case 4: { \
206 unsigned int __x = 0; \
207 __gu_err = __get_user_fn(&__x, ptr, \
208 sizeof(*(ptr))); \
209 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
210 break; \
211 }; \
212 case 8: { \
213 unsigned long long __x = 0; \
214 __gu_err = __get_user_fn(&__x, ptr, \
215 sizeof(*(ptr))); \
216 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
217 break; \
218 }; \
219 default: \
220 __get_user_bad(); \
221 break; \
223 __builtin_expect(__gu_err, 0); \
226 #define get_user(x, ptr) \
227 ({ \
228 might_fault(); \
229 __get_user(x, ptr); \
232 unsigned long __must_check
233 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
236 * Copy a null terminated string from userspace.
239 long __strncpy_from_user(char *dst, const char __user *src, long count);
241 static inline long __must_check
242 strncpy_from_user(char *dst, const char __user *src, long count)
244 might_fault();
245 return __strncpy_from_user(dst, src, count);
248 unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
250 static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
252 might_fault();
253 return __strnlen_user(src, n);
257 * Zero Userspace
259 unsigned long __must_check __clear_user(void __user *to, unsigned long size);
261 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
263 might_fault();
264 return __clear_user(to, n);
267 int copy_to_user_real(void __user *dest, void *src, unsigned long count);
268 void *s390_kernel_write(void *dst, const void *src, size_t size);
270 #define HAVE_GET_KERNEL_NOFAULT
272 int __noreturn __put_kernel_bad(void);
274 #define __put_kernel_asm(val, to, insn) \
275 ({ \
276 int __rc; \
278 asm volatile( \
279 "0: " insn " %2,%1\n" \
280 "1: xr %0,%0\n" \
281 "2:\n" \
282 ".pushsection .fixup, \"ax\"\n" \
283 "3: lhi %0,%3\n" \
284 " jg 2b\n" \
285 ".popsection\n" \
286 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
287 : "=d" (__rc), "+Q" (*(to)) \
288 : "d" (val), "K" (-EFAULT) \
289 : "cc"); \
290 __rc; \
293 #define __put_kernel_nofault(dst, src, type, err_label) \
294 do { \
295 u64 __x = (u64)(*((type *)(src))); \
296 int __pk_err; \
298 switch (sizeof(type)) { \
299 case 1: \
300 __pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \
301 break; \
302 case 2: \
303 __pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \
304 break; \
305 case 4: \
306 __pk_err = __put_kernel_asm(__x, (type *)(dst), "st"); \
307 break; \
308 case 8: \
309 __pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \
310 break; \
311 default: \
312 __pk_err = __put_kernel_bad(); \
313 break; \
315 if (unlikely(__pk_err)) \
316 goto err_label; \
317 } while (0)
319 int __noreturn __get_kernel_bad(void);
321 #define __get_kernel_asm(val, from, insn) \
322 ({ \
323 int __rc; \
325 asm volatile( \
326 "0: " insn " %1,%2\n" \
327 "1: xr %0,%0\n" \
328 "2:\n" \
329 ".pushsection .fixup, \"ax\"\n" \
330 "3: lhi %0,%3\n" \
331 " jg 2b\n" \
332 ".popsection\n" \
333 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
334 : "=d" (__rc), "+d" (val) \
335 : "Q" (*(from)), "K" (-EFAULT) \
336 : "cc"); \
337 __rc; \
340 #define __get_kernel_nofault(dst, src, type, err_label) \
341 do { \
342 int __gk_err; \
344 switch (sizeof(type)) { \
345 case 1: { \
346 u8 __x = 0; \
348 __gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \
349 *((type *)(dst)) = (type)__x; \
350 break; \
351 }; \
352 case 2: { \
353 u16 __x = 0; \
355 __gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \
356 *((type *)(dst)) = (type)__x; \
357 break; \
358 }; \
359 case 4: { \
360 u32 __x = 0; \
362 __gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \
363 *((type *)(dst)) = (type)__x; \
364 break; \
365 }; \
366 case 8: { \
367 u64 __x = 0; \
369 __gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \
370 *((type *)(dst)) = (type)__x; \
371 break; \
372 }; \
373 default: \
374 __gk_err = __get_kernel_bad(); \
375 break; \
377 if (unlikely(__gk_err)) \
378 goto err_label; \
379 } while (0)
381 #endif /* __S390_UACCESS_H */