1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Authors: Bjorn Wesen (bjornw@axis.com)
4 * Hans-Peter Nilsson (hp@axis.com)
7 /* Asm:s have been tweaked (within the domain of correctness) to give
8 satisfactory results for "gcc version 2.96 20000427 (experimental)".
12 Register $r9 is chosen for temporaries, being a call-clobbered register
13 first in line to be used (notably for local blocks), not colliding with
14 parameter registers. */
16 #ifndef _CRIS_UACCESS_H
17 #define _CRIS_UACCESS_H
19 #include <asm/processor.h>
23 * The fs value determines whether argument validity checking should be
24 * performed or not. If get_fs() == USER_DS, checking is performed, with
25 * get_fs() == KERNEL_DS, checking is bypassed.
27 * For historical reasons, these macros are grossly misnamed.
30 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
32 /* addr_limit is the maximum accessible address for the task. we misuse
33 * the KERNEL_DS and USER_DS values to both assign and compare the
34 * addr_limit values through the equally misnamed get/set_fs macros.
38 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
39 #define USER_DS MAKE_MM_SEG(TASK_SIZE)
41 #define get_ds() (KERNEL_DS)
42 #define get_fs() (current_thread_info()->addr_limit)
43 #define set_fs(x) (current_thread_info()->addr_limit = (x))
45 #define segment_eq(a, b) ((a).seg == (b).seg)
47 #define __kernel_ok (uaccess_kernel())
48 #define __user_ok(addr, size) \
49 (((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size)))
50 #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
51 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
53 #include <arch/uaccess.h>
54 #include <asm/extable.h>
57 * These are the main single-value transfer routines. They automatically
58 * use the right size if we just have the right pointer type.
60 * This gets kind of ugly. We want to return _two_ values in "get_user()"
61 * and yet we don't want to do any pointers, because that is too much
62 * of a performance impact. Thus we have a few rather ugly macros here,
63 * and hide all the ugliness from the user.
65 * The "__xxx" versions of the user access functions are versions that
66 * do not verify the address space, that must have been done previously
67 * with a separate "access_ok()" call (this is used when we do multiple
68 * accesses to the same area of user memory).
70 * As we use the same address space for kernel and user data on
71 * CRIS, we can just do these as direct assignments. (Of course, the
72 * exception handling means that it's no longer "just"...)
74 #define get_user(x, ptr) \
75 __get_user_check((x), (ptr), sizeof(*(ptr)))
76 #define put_user(x, ptr) \
77 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
79 #define __get_user(x, ptr) \
80 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
81 #define __put_user(x, ptr) \
82 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
84 extern long __put_user_bad(void);
86 #define __put_user_size(x, ptr, size, retval) \
91 __put_user_asm(x, ptr, retval, "move.b"); \
94 __put_user_asm(x, ptr, retval, "move.w"); \
97 __put_user_asm(x, ptr, retval, "move.d"); \
100 __put_user_asm_64(x, ptr, retval); \
107 #define __get_user_size(x, ptr, size, retval) \
112 __get_user_asm(x, ptr, retval, "move.b"); \
115 __get_user_asm(x, ptr, retval, "move.w"); \
118 __get_user_asm(x, ptr, retval, "move.d"); \
121 __get_user_asm_64(x, ptr, retval); \
124 (x) = __get_user_bad(); \
128 #define __put_user_nocheck(x, ptr, size) \
131 __put_user_size((x), (ptr), (size), __pu_err); \
135 #define __put_user_check(x, ptr, size) \
137 long __pu_err = -EFAULT; \
138 __typeof__(*(ptr)) *__pu_addr = (ptr); \
139 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
140 __put_user_size((x), __pu_addr, (size), __pu_err); \
144 struct __large_struct
{ unsigned long buf
[100]; };
145 #define __m(x) (*(struct __large_struct *)(x))
149 #define __get_user_nocheck(x, ptr, size) \
151 long __gu_err, __gu_val; \
152 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
153 (x) = (__force __typeof__(*(ptr)))__gu_val; \
157 #define __get_user_check(x, ptr, size) \
159 long __gu_err = -EFAULT, __gu_val = 0; \
160 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
161 if (access_ok(VERIFY_READ, __gu_addr, size)) \
162 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
163 (x) = (__force __typeof__(*(ptr)))__gu_val; \
167 extern long __get_user_bad(void);
169 /* More complex functions. Most are inline, but some call functions that
170 live in lib/usercopy.c */
172 extern unsigned long __copy_user(void __user
*to
, const void *from
, unsigned long n
);
173 extern unsigned long __copy_user_in(void *to
, const void __user
*from
, unsigned long n
);
174 extern unsigned long __do_clear_user(void __user
*to
, unsigned long n
);
177 strncpy_from_user(char *dst
, const char __user
*src
, long count
)
181 if (access_ok(VERIFY_READ
, src
, 1))
182 res
= __do_strncpy_from_user(dst
, src
, count
);
187 /* Note that these expand awfully if made into switch constructs, so
190 static inline unsigned long
191 __constant_copy_from_user(void *to
, const void __user
*from
, unsigned long n
)
193 unsigned long ret
= 0;
198 __asm_copy_from_user_1(to
, from
, ret
);
200 __asm_copy_from_user_2(to
, from
, ret
);
202 __asm_copy_from_user_3(to
, from
, ret
);
204 __asm_copy_from_user_4(to
, from
, ret
);
206 __asm_copy_from_user_5(to
, from
, ret
);
208 __asm_copy_from_user_6(to
, from
, ret
);
210 __asm_copy_from_user_7(to
, from
, ret
);
212 __asm_copy_from_user_8(to
, from
, ret
);
214 __asm_copy_from_user_9(to
, from
, ret
);
216 __asm_copy_from_user_10(to
, from
, ret
);
218 __asm_copy_from_user_11(to
, from
, ret
);
220 __asm_copy_from_user_12(to
, from
, ret
);
222 __asm_copy_from_user_13(to
, from
, ret
);
224 __asm_copy_from_user_14(to
, from
, ret
);
226 __asm_copy_from_user_15(to
, from
, ret
);
228 __asm_copy_from_user_16(to
, from
, ret
);
230 __asm_copy_from_user_20(to
, from
, ret
);
232 __asm_copy_from_user_24(to
, from
, ret
);
234 ret
= __copy_user_in(to
, from
, n
);
239 /* Ditto, don't make a switch out of this. */
241 static inline unsigned long
242 __constant_copy_to_user(void __user
*to
, const void *from
, unsigned long n
)
244 unsigned long ret
= 0;
249 __asm_copy_to_user_1(to
, from
, ret
);
251 __asm_copy_to_user_2(to
, from
, ret
);
253 __asm_copy_to_user_3(to
, from
, ret
);
255 __asm_copy_to_user_4(to
, from
, ret
);
257 __asm_copy_to_user_5(to
, from
, ret
);
259 __asm_copy_to_user_6(to
, from
, ret
);
261 __asm_copy_to_user_7(to
, from
, ret
);
263 __asm_copy_to_user_8(to
, from
, ret
);
265 __asm_copy_to_user_9(to
, from
, ret
);
267 __asm_copy_to_user_10(to
, from
, ret
);
269 __asm_copy_to_user_11(to
, from
, ret
);
271 __asm_copy_to_user_12(to
, from
, ret
);
273 __asm_copy_to_user_13(to
, from
, ret
);
275 __asm_copy_to_user_14(to
, from
, ret
);
277 __asm_copy_to_user_15(to
, from
, ret
);
279 __asm_copy_to_user_16(to
, from
, ret
);
281 __asm_copy_to_user_20(to
, from
, ret
);
283 __asm_copy_to_user_24(to
, from
, ret
);
285 ret
= __copy_user(to
, from
, n
);
290 /* No switch, please. */
292 static inline unsigned long
293 __constant_clear_user(void __user
*to
, unsigned long n
)
295 unsigned long ret
= 0;
300 __asm_clear_1(to
, ret
);
302 __asm_clear_2(to
, ret
);
304 __asm_clear_3(to
, ret
);
306 __asm_clear_4(to
, ret
);
308 __asm_clear_8(to
, ret
);
310 __asm_clear_12(to
, ret
);
312 __asm_clear_16(to
, ret
);
314 __asm_clear_20(to
, ret
);
316 __asm_clear_24(to
, ret
);
318 ret
= __do_clear_user(to
, n
);
324 static inline size_t clear_user(void __user
*to
, size_t n
)
326 if (unlikely(!access_ok(VERIFY_WRITE
, to
, n
)))
328 if (__builtin_constant_p(n
))
329 return __constant_clear_user(to
, n
);
331 return __do_clear_user(to
, n
);
334 static inline unsigned long
335 raw_copy_from_user(void *to
, const void __user
*from
, unsigned long n
)
337 if (__builtin_constant_p(n
))
338 return __constant_copy_from_user(to
, from
, n
);
340 return __copy_user_in(to
, from
, n
);
343 static inline unsigned long
344 raw_copy_to_user(void __user
*to
, const void *from
, unsigned long n
)
346 if (__builtin_constant_p(n
))
347 return __constant_copy_to_user(to
, from
, n
);
349 return __copy_user(to
, from
, n
);
352 #define INLINE_COPY_FROM_USER
353 #define INLINE_COPY_TO_USER
355 static inline unsigned long
356 __clear_user(void __user
*to
, unsigned long n
)
358 return __do_clear_user(to
, n
);
361 #endif /* _CRIS_UACCESS_H */