Linux 4.8-rc8
[linux/fpc-iii.git] / arch / s390 / include / asm / uaccess.h
blob52d7c8709279fcd937add7062ac62910ce40fdcf
1 /*
2 * S390 version
3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com),
5 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Derived from "include/asm-i386/uaccess.h"
8 */
9 #ifndef __S390_UACCESS_H
10 #define __S390_UACCESS_H
13 * User space memory access functions
15 #include <linux/sched.h>
16 #include <linux/errno.h>
17 #include <asm/ctl_reg.h>
19 #define VERIFY_READ 0
20 #define VERIFY_WRITE 1
24 * The fs value determines whether argument validity checking should be
25 * performed or not. If get_fs() == USER_DS, checking is performed, with
26 * get_fs() == KERNEL_DS, checking is bypassed.
28 * For historical reasons, these macros are grossly misnamed.
31 #define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
34 #define KERNEL_DS MAKE_MM_SEG(0)
35 #define USER_DS MAKE_MM_SEG(1)
37 #define get_ds() (KERNEL_DS)
38 #define get_fs() (current->thread.mm_segment)
40 #define set_fs(x) \
41 ({ \
42 unsigned long __pto; \
43 current->thread.mm_segment = (x); \
44 __pto = current->thread.mm_segment.ar4 ? \
45 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
46 __ctl_load(__pto, 7, 7); \
49 #define segment_eq(a,b) ((a).ar4 == (b).ar4)
51 static inline int __range_ok(unsigned long addr, unsigned long size)
53 return 1;
56 #define __access_ok(addr, size) \
57 ({ \
58 __chk_user_ptr(addr); \
59 __range_ok((unsigned long)(addr), (size)); \
62 #define access_ok(type, addr, size) __access_ok(addr, size)
65 * The exception table consists of pairs of addresses: the first is the
66 * address of an instruction that is allowed to fault, and the second is
67 * the address at which the program should continue. No registers are
68 * modified, so it is entirely up to the continuation code to figure out
69 * what to do.
71 * All the routines below use bits of fixup code that are out of line
72 * with the main instruction path. This means when everything is well,
73 * we don't even have to jump over them. Further, they do not intrude
74 * on our cache or tlb entries.
77 struct exception_table_entry
79 int insn, fixup;
82 static inline unsigned long extable_fixup(const struct exception_table_entry *x)
84 return (unsigned long)&x->fixup + x->fixup;
87 #define ARCH_HAS_RELATIVE_EXTABLE
89 /**
90 * __copy_from_user: - Copy a block of data from user space, with less checking.
91 * @to: Destination address, in kernel space.
92 * @from: Source address, in user space.
93 * @n: Number of bytes to copy.
95 * Context: User context only. This function may sleep if pagefaults are
96 * enabled.
98 * Copy data from user space to kernel space. Caller must check
99 * the specified block with access_ok() before calling this function.
101 * Returns number of bytes that could not be copied.
102 * On success, this will be zero.
104 * If some data could not be copied, this function will pad the copied
105 * data to the requested size using zero bytes.
107 unsigned long __must_check __copy_from_user(void *to, const void __user *from,
108 unsigned long n);
111 * __copy_to_user: - Copy a block of data into user space, with less checking.
112 * @to: Destination address, in user space.
113 * @from: Source address, in kernel space.
114 * @n: Number of bytes to copy.
116 * Context: User context only. This function may sleep if pagefaults are
117 * enabled.
119 * Copy data from kernel space to user space. Caller must check
120 * the specified block with access_ok() before calling this function.
122 * Returns number of bytes that could not be copied.
123 * On success, this will be zero.
125 unsigned long __must_check __copy_to_user(void __user *to, const void *from,
126 unsigned long n);
128 #define __copy_to_user_inatomic __copy_to_user
129 #define __copy_from_user_inatomic __copy_from_user
131 #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
133 #define __put_get_user_asm(to, from, size, spec) \
134 ({ \
135 register unsigned long __reg0 asm("0") = spec; \
136 int __rc; \
138 asm volatile( \
139 "0: mvcos %1,%3,%2\n" \
140 "1: xr %0,%0\n" \
141 "2:\n" \
142 ".pushsection .fixup, \"ax\"\n" \
143 "3: lhi %0,%5\n" \
144 " jg 2b\n" \
145 ".popsection\n" \
146 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
147 : "=d" (__rc), "=Q" (*(to)) \
148 : "d" (size), "Q" (*(from)), \
149 "d" (__reg0), "K" (-EFAULT) \
150 : "cc"); \
151 __rc; \
154 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
156 unsigned long spec = 0x810000UL;
157 int rc;
159 switch (size) {
160 case 1:
161 rc = __put_get_user_asm((unsigned char __user *)ptr,
162 (unsigned char *)x,
163 size, spec);
164 break;
165 case 2:
166 rc = __put_get_user_asm((unsigned short __user *)ptr,
167 (unsigned short *)x,
168 size, spec);
169 break;
170 case 4:
171 rc = __put_get_user_asm((unsigned int __user *)ptr,
172 (unsigned int *)x,
173 size, spec);
174 break;
175 case 8:
176 rc = __put_get_user_asm((unsigned long __user *)ptr,
177 (unsigned long *)x,
178 size, spec);
179 break;
181 return rc;
184 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
186 unsigned long spec = 0x81UL;
187 int rc;
189 switch (size) {
190 case 1:
191 rc = __put_get_user_asm((unsigned char *)x,
192 (unsigned char __user *)ptr,
193 size, spec);
194 break;
195 case 2:
196 rc = __put_get_user_asm((unsigned short *)x,
197 (unsigned short __user *)ptr,
198 size, spec);
199 break;
200 case 4:
201 rc = __put_get_user_asm((unsigned int *)x,
202 (unsigned int __user *)ptr,
203 size, spec);
204 break;
205 case 8:
206 rc = __put_get_user_asm((unsigned long *)x,
207 (unsigned long __user *)ptr,
208 size, spec);
209 break;
211 return rc;
214 #else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
216 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
218 size = __copy_to_user(ptr, x, size);
219 return size ? -EFAULT : 0;
222 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
224 size = __copy_from_user(x, ptr, size);
225 return size ? -EFAULT : 0;
228 #endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
231 * These are the main single-value transfer routines. They automatically
232 * use the right size if we just have the right pointer type.
234 #define __put_user(x, ptr) \
235 ({ \
236 __typeof__(*(ptr)) __x = (x); \
237 int __pu_err = -EFAULT; \
238 __chk_user_ptr(ptr); \
239 switch (sizeof (*(ptr))) { \
240 case 1: \
241 case 2: \
242 case 4: \
243 case 8: \
244 __pu_err = __put_user_fn(&__x, ptr, \
245 sizeof(*(ptr))); \
246 break; \
247 default: \
248 __put_user_bad(); \
249 break; \
251 __builtin_expect(__pu_err, 0); \
254 #define put_user(x, ptr) \
255 ({ \
256 might_fault(); \
257 __put_user(x, ptr); \
261 int __put_user_bad(void) __attribute__((noreturn));
263 #define __get_user(x, ptr) \
264 ({ \
265 int __gu_err = -EFAULT; \
266 __chk_user_ptr(ptr); \
267 switch (sizeof(*(ptr))) { \
268 case 1: { \
269 unsigned char __x = 0; \
270 __gu_err = __get_user_fn(&__x, ptr, \
271 sizeof(*(ptr))); \
272 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
273 break; \
274 }; \
275 case 2: { \
276 unsigned short __x = 0; \
277 __gu_err = __get_user_fn(&__x, ptr, \
278 sizeof(*(ptr))); \
279 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
280 break; \
281 }; \
282 case 4: { \
283 unsigned int __x = 0; \
284 __gu_err = __get_user_fn(&__x, ptr, \
285 sizeof(*(ptr))); \
286 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
287 break; \
288 }; \
289 case 8: { \
290 unsigned long long __x = 0; \
291 __gu_err = __get_user_fn(&__x, ptr, \
292 sizeof(*(ptr))); \
293 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
294 break; \
295 }; \
296 default: \
297 __get_user_bad(); \
298 break; \
300 __builtin_expect(__gu_err, 0); \
303 #define get_user(x, ptr) \
304 ({ \
305 might_fault(); \
306 __get_user(x, ptr); \
309 int __get_user_bad(void) __attribute__((noreturn));
311 #define __put_user_unaligned __put_user
312 #define __get_user_unaligned __get_user
314 extern void __compiletime_error("usercopy buffer size is too small")
315 __bad_copy_user(void);
317 static inline void copy_user_overflow(int size, unsigned long count)
319 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
323 * copy_to_user: - Copy a block of data into user space.
324 * @to: Destination address, in user space.
325 * @from: Source address, in kernel space.
326 * @n: Number of bytes to copy.
328 * Context: User context only. This function may sleep if pagefaults are
329 * enabled.
331 * Copy data from kernel space to user space.
333 * Returns number of bytes that could not be copied.
334 * On success, this will be zero.
336 static inline unsigned long __must_check
337 copy_to_user(void __user *to, const void *from, unsigned long n)
339 might_fault();
340 return __copy_to_user(to, from, n);
344 * copy_from_user: - Copy a block of data from user space.
345 * @to: Destination address, in kernel space.
346 * @from: Source address, in user space.
347 * @n: Number of bytes to copy.
349 * Context: User context only. This function may sleep if pagefaults are
350 * enabled.
352 * Copy data from user space to kernel space.
354 * Returns number of bytes that could not be copied.
355 * On success, this will be zero.
357 * If some data could not be copied, this function will pad the copied
358 * data to the requested size using zero bytes.
360 static inline unsigned long __must_check
361 copy_from_user(void *to, const void __user *from, unsigned long n)
363 unsigned int sz = __compiletime_object_size(to);
365 might_fault();
366 if (unlikely(sz != -1 && sz < n)) {
367 if (!__builtin_constant_p(n))
368 copy_user_overflow(sz, n);
369 else
370 __bad_copy_user();
371 return n;
373 return __copy_from_user(to, from, n);
376 unsigned long __must_check
377 __copy_in_user(void __user *to, const void __user *from, unsigned long n);
379 static inline unsigned long __must_check
380 copy_in_user(void __user *to, const void __user *from, unsigned long n)
382 might_fault();
383 return __copy_in_user(to, from, n);
387 * Copy a null terminated string from userspace.
390 long __strncpy_from_user(char *dst, const char __user *src, long count);
392 static inline long __must_check
393 strncpy_from_user(char *dst, const char __user *src, long count)
395 might_fault();
396 return __strncpy_from_user(dst, src, count);
399 unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
401 static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
403 might_fault();
404 return __strnlen_user(src, n);
408 * strlen_user: - Get the size of a string in user space.
409 * @str: The string to measure.
411 * Context: User context only. This function may sleep if pagefaults are
412 * enabled.
414 * Get the size of a NUL-terminated string in user space.
416 * Returns the size of the string INCLUDING the terminating NUL.
417 * On exception, returns 0.
419 * If there is a limit on the length of a valid string, you may wish to
420 * consider using strnlen_user() instead.
422 #define strlen_user(str) strnlen_user(str, ~0UL)
425 * Zero Userspace
427 unsigned long __must_check __clear_user(void __user *to, unsigned long size);
429 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
431 might_fault();
432 return __clear_user(to, n);
435 int copy_to_user_real(void __user *dest, void *src, unsigned long count);
436 void s390_kernel_write(void *dst, const void *src, size_t size);
438 #endif /* __S390_UACCESS_H */