WIP FPC-III support
[linux/fpc-iii.git] / arch / riscv / include / asm / uaccess.h
blob824b2c9da75bd2095274c2ef73933944cc900845
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 Regents of the University of California
5 * This file was copied from include/asm-generic/uaccess.h
6 */
8 #ifndef _ASM_RISCV_UACCESS_H
9 #define _ASM_RISCV_UACCESS_H
11 #include <asm/pgtable.h> /* for TASK_SIZE */
14 * User space memory access functions
16 #ifdef CONFIG_MMU
17 #include <linux/errno.h>
18 #include <linux/compiler.h>
19 #include <linux/thread_info.h>
20 #include <asm/byteorder.h>
21 #include <asm/extable.h>
22 #include <asm/asm.h>
24 #define __enable_user_access() \
25 __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory")
26 #define __disable_user_access() \
27 __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
29 /**
30 * access_ok: - Checks if a user space pointer is valid
31 * @addr: User space pointer to start of block to check
32 * @size: Size of block to check
34 * Context: User context only. This function may sleep.
36 * Checks if a pointer to a block of memory in user space is valid.
38 * Returns true (nonzero) if the memory block may be valid, false (zero)
39 * if it is definitely invalid.
41 * Note that, depending on architecture, this function probably just
42 * checks that the pointer is in the user space range - after calling
43 * this function, memory access functions may still return -EFAULT.
45 #define access_ok(addr, size) ({ \
46 __chk_user_ptr(addr); \
47 likely(__access_ok((unsigned long __force)(addr), (size))); \
51 * Ensure that the range [addr, addr+size) is within the process's
52 * address space
54 static inline int __access_ok(unsigned long addr, unsigned long size)
56 return size <= TASK_SIZE && addr <= TASK_SIZE - size;
60 * The exception table consists of pairs of addresses: the first is the
61 * address of an instruction that is allowed to fault, and the second is
62 * the address at which the program should continue. No registers are
63 * modified, so it is entirely up to the continuation code to figure out
64 * what to do.
66 * All the routines below use bits of fixup code that are out of line
67 * with the main instruction path. This means when everything is well,
68 * we don't even have to jump over them. Further, they do not intrude
69 * on our cache or tlb entries.
72 #define __LSW 0
73 #define __MSW 1
76 * The "__xxx" versions of the user access functions do not verify the address
77 * space - it must have been done previously with a separate "access_ok()"
78 * call.
81 #define __get_user_asm(insn, x, ptr, err) \
82 do { \
83 uintptr_t __tmp; \
84 __typeof__(x) __x; \
85 __asm__ __volatile__ ( \
86 "1:\n" \
87 " " insn " %1, %3\n" \
88 "2:\n" \
89 " .section .fixup,\"ax\"\n" \
90 " .balign 4\n" \
91 "3:\n" \
92 " li %0, %4\n" \
93 " li %1, 0\n" \
94 " jump 2b, %2\n" \
95 " .previous\n" \
96 " .section __ex_table,\"a\"\n" \
97 " .balign " RISCV_SZPTR "\n" \
98 " " RISCV_PTR " 1b, 3b\n" \
99 " .previous" \
100 : "+r" (err), "=&r" (__x), "=r" (__tmp) \
101 : "m" (*(ptr)), "i" (-EFAULT)); \
102 (x) = __x; \
103 } while (0)
105 #ifdef CONFIG_64BIT
106 #define __get_user_8(x, ptr, err) \
107 __get_user_asm("ld", x, ptr, err)
108 #else /* !CONFIG_64BIT */
109 #define __get_user_8(x, ptr, err) \
110 do { \
111 u32 __user *__ptr = (u32 __user *)(ptr); \
112 u32 __lo, __hi; \
113 uintptr_t __tmp; \
114 __asm__ __volatile__ ( \
115 "1:\n" \
116 " lw %1, %4\n" \
117 "2:\n" \
118 " lw %2, %5\n" \
119 "3:\n" \
120 " .section .fixup,\"ax\"\n" \
121 " .balign 4\n" \
122 "4:\n" \
123 " li %0, %6\n" \
124 " li %1, 0\n" \
125 " li %2, 0\n" \
126 " jump 3b, %3\n" \
127 " .previous\n" \
128 " .section __ex_table,\"a\"\n" \
129 " .balign " RISCV_SZPTR "\n" \
130 " " RISCV_PTR " 1b, 4b\n" \
131 " " RISCV_PTR " 2b, 4b\n" \
132 " .previous" \
133 : "+r" (err), "=&r" (__lo), "=r" (__hi), \
134 "=r" (__tmp) \
135 : "m" (__ptr[__LSW]), "m" (__ptr[__MSW]), \
136 "i" (-EFAULT)); \
137 (x) = (__typeof__(x))((__typeof__((x)-(x)))( \
138 (((u64)__hi << 32) | __lo))); \
139 } while (0)
140 #endif /* CONFIG_64BIT */
142 #define __get_user_nocheck(x, __gu_ptr, __gu_err) \
143 do { \
144 switch (sizeof(*__gu_ptr)) { \
145 case 1: \
146 __get_user_asm("lb", (x), __gu_ptr, __gu_err); \
147 break; \
148 case 2: \
149 __get_user_asm("lh", (x), __gu_ptr, __gu_err); \
150 break; \
151 case 4: \
152 __get_user_asm("lw", (x), __gu_ptr, __gu_err); \
153 break; \
154 case 8: \
155 __get_user_8((x), __gu_ptr, __gu_err); \
156 break; \
157 default: \
158 BUILD_BUG(); \
160 } while (0)
163 * __get_user: - Get a simple variable from user space, with less checking.
164 * @x: Variable to store result.
165 * @ptr: Source address, in user space.
167 * Context: User context only. This function may sleep.
169 * This macro copies a single simple variable from user space to kernel
170 * space. It supports simple types like char and int, but not larger
171 * data types like structures or arrays.
173 * @ptr must have pointer-to-simple-variable type, and the result of
174 * dereferencing @ptr must be assignable to @x without a cast.
176 * Caller must check the pointer with access_ok() before calling this
177 * function.
179 * Returns zero on success, or -EFAULT on error.
180 * On error, the variable @x is set to zero.
182 #define __get_user(x, ptr) \
183 ({ \
184 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
185 long __gu_err = 0; \
187 __chk_user_ptr(__gu_ptr); \
189 __enable_user_access(); \
190 __get_user_nocheck(x, __gu_ptr, __gu_err); \
191 __disable_user_access(); \
193 __gu_err; \
197 * get_user: - Get a simple variable from user space.
198 * @x: Variable to store result.
199 * @ptr: Source address, in user space.
201 * Context: User context only. This function may sleep.
203 * This macro copies a single simple variable from user space to kernel
204 * space. It supports simple types like char and int, but not larger
205 * data types like structures or arrays.
207 * @ptr must have pointer-to-simple-variable type, and the result of
208 * dereferencing @ptr must be assignable to @x without a cast.
210 * Returns zero on success, or -EFAULT on error.
211 * On error, the variable @x is set to zero.
213 #define get_user(x, ptr) \
214 ({ \
215 const __typeof__(*(ptr)) __user *__p = (ptr); \
216 might_fault(); \
217 access_ok(__p, sizeof(*__p)) ? \
218 __get_user((x), __p) : \
219 ((x) = 0, -EFAULT); \
222 #define __put_user_asm(insn, x, ptr, err) \
223 do { \
224 uintptr_t __tmp; \
225 __typeof__(*(ptr)) __x = x; \
226 __asm__ __volatile__ ( \
227 "1:\n" \
228 " " insn " %z3, %2\n" \
229 "2:\n" \
230 " .section .fixup,\"ax\"\n" \
231 " .balign 4\n" \
232 "3:\n" \
233 " li %0, %4\n" \
234 " jump 2b, %1\n" \
235 " .previous\n" \
236 " .section __ex_table,\"a\"\n" \
237 " .balign " RISCV_SZPTR "\n" \
238 " " RISCV_PTR " 1b, 3b\n" \
239 " .previous" \
240 : "+r" (err), "=r" (__tmp), "=m" (*(ptr)) \
241 : "rJ" (__x), "i" (-EFAULT)); \
242 } while (0)
244 #ifdef CONFIG_64BIT
245 #define __put_user_8(x, ptr, err) \
246 __put_user_asm("sd", x, ptr, err)
247 #else /* !CONFIG_64BIT */
248 #define __put_user_8(x, ptr, err) \
249 do { \
250 u32 __user *__ptr = (u32 __user *)(ptr); \
251 u64 __x = (__typeof__((x)-(x)))(x); \
252 uintptr_t __tmp; \
253 __asm__ __volatile__ ( \
254 "1:\n" \
255 " sw %z4, %2\n" \
256 "2:\n" \
257 " sw %z5, %3\n" \
258 "3:\n" \
259 " .section .fixup,\"ax\"\n" \
260 " .balign 4\n" \
261 "4:\n" \
262 " li %0, %6\n" \
263 " jump 3b, %1\n" \
264 " .previous\n" \
265 " .section __ex_table,\"a\"\n" \
266 " .balign " RISCV_SZPTR "\n" \
267 " " RISCV_PTR " 1b, 4b\n" \
268 " " RISCV_PTR " 2b, 4b\n" \
269 " .previous" \
270 : "+r" (err), "=r" (__tmp), \
271 "=m" (__ptr[__LSW]), \
272 "=m" (__ptr[__MSW]) \
273 : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
274 } while (0)
275 #endif /* CONFIG_64BIT */
277 #define __put_user_nocheck(x, __gu_ptr, __pu_err) \
278 do { \
279 switch (sizeof(*__gu_ptr)) { \
280 case 1: \
281 __put_user_asm("sb", (x), __gu_ptr, __pu_err); \
282 break; \
283 case 2: \
284 __put_user_asm("sh", (x), __gu_ptr, __pu_err); \
285 break; \
286 case 4: \
287 __put_user_asm("sw", (x), __gu_ptr, __pu_err); \
288 break; \
289 case 8: \
290 __put_user_8((x), __gu_ptr, __pu_err); \
291 break; \
292 default: \
293 BUILD_BUG(); \
295 } while (0)
298 * __put_user: - Write a simple value into user space, with less checking.
299 * @x: Value to copy to user space.
300 * @ptr: Destination address, in user space.
302 * Context: User context only. This function may sleep.
304 * This macro copies a single simple value from kernel space to user
305 * space. It supports simple types like char and int, but not larger
306 * data types like structures or arrays.
308 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
309 * to the result of dereferencing @ptr.
311 * Caller must check the pointer with access_ok() before calling this
312 * function.
314 * Returns zero on success, or -EFAULT on error.
316 #define __put_user(x, ptr) \
317 ({ \
318 __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
319 long __pu_err = 0; \
321 __chk_user_ptr(__gu_ptr); \
323 __enable_user_access(); \
324 __put_user_nocheck(x, __gu_ptr, __pu_err); \
325 __disable_user_access(); \
327 __pu_err; \
331 * put_user: - Write a simple value into user space.
332 * @x: Value to copy to user space.
333 * @ptr: Destination address, in user space.
335 * Context: User context only. This function may sleep.
337 * This macro copies a single simple value from kernel space to user
338 * space. It supports simple types like char and int, but not larger
339 * data types like structures or arrays.
341 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
342 * to the result of dereferencing @ptr.
344 * Returns zero on success, or -EFAULT on error.
346 #define put_user(x, ptr) \
347 ({ \
348 __typeof__(*(ptr)) __user *__p = (ptr); \
349 might_fault(); \
350 access_ok(__p, sizeof(*__p)) ? \
351 __put_user((x), __p) : \
352 -EFAULT; \
356 unsigned long __must_check __asm_copy_to_user(void __user *to,
357 const void *from, unsigned long n);
358 unsigned long __must_check __asm_copy_from_user(void *to,
359 const void __user *from, unsigned long n);
361 static inline unsigned long
362 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
364 return __asm_copy_from_user(to, from, n);
367 static inline unsigned long
368 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
370 return __asm_copy_to_user(to, from, n);
373 extern long strncpy_from_user(char *dest, const char __user *src, long count);
375 extern long __must_check strlen_user(const char __user *str);
376 extern long __must_check strnlen_user(const char __user *str, long n);
378 extern
379 unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
381 static inline
382 unsigned long __must_check clear_user(void __user *to, unsigned long n)
384 might_fault();
385 return access_ok(to, n) ?
386 __clear_user(to, n) : n;
390 * Atomic compare-and-exchange, but with a fixup for userspace faults. Faults
391 * will set "err" to -EFAULT, while successful accesses return the previous
392 * value.
394 #define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \
395 ({ \
396 __typeof__(ptr) __ptr = (ptr); \
397 __typeof__(*(ptr)) __old = (old); \
398 __typeof__(*(ptr)) __new = (new); \
399 __typeof__(*(ptr)) __ret; \
400 __typeof__(err) __err = 0; \
401 register unsigned int __rc; \
402 __enable_user_access(); \
403 switch (size) { \
404 case 4: \
405 __asm__ __volatile__ ( \
406 "0:\n" \
407 " lr.w" #scb " %[ret], %[ptr]\n" \
408 " bne %[ret], %z[old], 1f\n" \
409 " sc.w" #lrb " %[rc], %z[new], %[ptr]\n" \
410 " bnez %[rc], 0b\n" \
411 "1:\n" \
412 ".section .fixup,\"ax\"\n" \
413 ".balign 4\n" \
414 "2:\n" \
415 " li %[err], %[efault]\n" \
416 " jump 1b, %[rc]\n" \
417 ".previous\n" \
418 ".section __ex_table,\"a\"\n" \
419 ".balign " RISCV_SZPTR "\n" \
420 " " RISCV_PTR " 1b, 2b\n" \
421 ".previous\n" \
422 : [ret] "=&r" (__ret), \
423 [rc] "=&r" (__rc), \
424 [ptr] "+A" (*__ptr), \
425 [err] "=&r" (__err) \
426 : [old] "rJ" (__old), \
427 [new] "rJ" (__new), \
428 [efault] "i" (-EFAULT)); \
429 break; \
430 case 8: \
431 __asm__ __volatile__ ( \
432 "0:\n" \
433 " lr.d" #scb " %[ret], %[ptr]\n" \
434 " bne %[ret], %z[old], 1f\n" \
435 " sc.d" #lrb " %[rc], %z[new], %[ptr]\n" \
436 " bnez %[rc], 0b\n" \
437 "1:\n" \
438 ".section .fixup,\"ax\"\n" \
439 ".balign 4\n" \
440 "2:\n" \
441 " li %[err], %[efault]\n" \
442 " jump 1b, %[rc]\n" \
443 ".previous\n" \
444 ".section __ex_table,\"a\"\n" \
445 ".balign " RISCV_SZPTR "\n" \
446 " " RISCV_PTR " 1b, 2b\n" \
447 ".previous\n" \
448 : [ret] "=&r" (__ret), \
449 [rc] "=&r" (__rc), \
450 [ptr] "+A" (*__ptr), \
451 [err] "=&r" (__err) \
452 : [old] "rJ" (__old), \
453 [new] "rJ" (__new), \
454 [efault] "i" (-EFAULT)); \
455 break; \
456 default: \
457 BUILD_BUG(); \
459 __disable_user_access(); \
460 (err) = __err; \
461 __ret; \
464 #define HAVE_GET_KERNEL_NOFAULT
466 #define __get_kernel_nofault(dst, src, type, err_label) \
467 do { \
468 long __kr_err; \
470 __get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
471 if (unlikely(__kr_err)) \
472 goto err_label; \
473 } while (0)
475 #define __put_kernel_nofault(dst, src, type, err_label) \
476 do { \
477 long __kr_err; \
479 __put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \
480 if (unlikely(__kr_err)) \
481 goto err_label; \
482 } while (0)
484 #else /* CONFIG_MMU */
485 #include <asm-generic/uaccess.h>
486 #endif /* CONFIG_MMU */
487 #endif /* _ASM_RISCV_UACCESS_H */