x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / arch / arm / include / asm / uaccess.h
bloba5807b67ca8a31702345d5af47fff41aaa2cf391
1 /*
2 * arch/arm/include/asm/uaccess.h
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8 #ifndef _ASMARM_UACCESS_H
9 #define _ASMARM_UACCESS_H
12 * User space memory access functions
14 #include <linux/string.h>
15 #include <asm/memory.h>
16 #include <asm/domain.h>
17 #include <asm/unified.h>
18 #include <asm/compiler.h>
20 #include <asm/extable.h>
23 * These two functions allow hooking accesses to userspace to increase
24 * system integrity by ensuring that the kernel can not inadvertantly
25 * perform such accesses (eg, via list poison values) which could then
26 * be exploited for priviledge escalation.
28 static inline unsigned int uaccess_save_and_enable(void)
30 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
31 unsigned int old_domain = get_domain();
33 /* Set the current domain access to permit user accesses */
34 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
35 domain_val(DOMAIN_USER, DOMAIN_CLIENT));
37 return old_domain;
38 #else
39 return 0;
40 #endif
43 static inline void uaccess_restore(unsigned int flags)
45 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
46 /* Restore the user access mask */
47 set_domain(flags);
48 #endif
52 * These two are intentionally not defined anywhere - if the kernel
53 * code generates any references to them, that's a bug.
55 extern int __get_user_bad(void);
56 extern int __put_user_bad(void);
59 * Note that this is actually 0x1,0000,0000
61 #define KERNEL_DS 0x00000000
62 #define get_ds() (KERNEL_DS)
64 #ifdef CONFIG_MMU
66 #define USER_DS TASK_SIZE
67 #define get_fs() (current_thread_info()->addr_limit)
69 static inline void set_fs(mm_segment_t fs)
71 current_thread_info()->addr_limit = fs;
74 * Prevent a mispredicted conditional call to set_fs from forwarding
75 * the wrong address limit to access_ok under speculation.
77 dsb(nsh);
78 isb();
80 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
83 #define segment_eq(a, b) ((a) == (b))
85 /* We use 33-bit arithmetic here... */
86 #define __range_ok(addr, size) ({ \
87 unsigned long flag, roksum; \
88 __chk_user_ptr(addr); \
89 __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
90 : "=&r" (flag), "=&r" (roksum) \
91 : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
92 : "cc"); \
93 flag; })
96 * This is a type: either unsigned long, if the argument fits into
97 * that type, or otherwise unsigned long long.
99 #define __inttype(x) \
100 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
103 * Sanitise a uaccess pointer such that it becomes NULL if addr+size
104 * is above the current addr_limit.
106 #define uaccess_mask_range_ptr(ptr, size) \
107 ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
108 static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
109 size_t size)
111 void __user *safe_ptr = (void __user *)ptr;
112 unsigned long tmp;
114 asm volatile(
115 " sub %1, %3, #1\n"
116 " subs %1, %1, %0\n"
117 " addhs %1, %1, #1\n"
118 " subhss %1, %1, %2\n"
119 " movlo %0, #0\n"
120 : "+r" (safe_ptr), "=&r" (tmp)
121 : "r" (size), "r" (current_thread_info()->addr_limit)
122 : "cc");
124 csdb();
125 return safe_ptr;
129 * Single-value transfer routines. They automatically use the right
130 * size if we just have the right pointer type. Note that the functions
131 * which read from user space (*get_*) need to take care not to leak
132 * kernel data even if the calling code is buggy and fails to check
133 * the return value. This means zeroing out the destination variable
134 * or buffer on error. Normally this is done out of line by the
135 * fixup code, but there are a few places where it intrudes on the
136 * main code path. When we only write to user space, there is no
137 * problem.
139 extern int __get_user_1(void *);
140 extern int __get_user_2(void *);
141 extern int __get_user_4(void *);
142 extern int __get_user_32t_8(void *);
143 extern int __get_user_8(void *);
144 extern int __get_user_64t_1(void *);
145 extern int __get_user_64t_2(void *);
146 extern int __get_user_64t_4(void *);
148 #define __GUP_CLOBBER_1 "lr", "cc"
149 #ifdef CONFIG_CPU_USE_DOMAINS
150 #define __GUP_CLOBBER_2 "ip", "lr", "cc"
151 #else
152 #define __GUP_CLOBBER_2 "lr", "cc"
153 #endif
154 #define __GUP_CLOBBER_4 "lr", "cc"
155 #define __GUP_CLOBBER_32t_8 "lr", "cc"
156 #define __GUP_CLOBBER_8 "lr", "cc"
158 #define __get_user_x(__r2, __p, __e, __l, __s) \
159 __asm__ __volatile__ ( \
160 __asmeq("%0", "r0") __asmeq("%1", "r2") \
161 __asmeq("%3", "r1") \
162 "bl __get_user_" #__s \
163 : "=&r" (__e), "=r" (__r2) \
164 : "0" (__p), "r" (__l) \
165 : __GUP_CLOBBER_##__s)
167 /* narrowing a double-word get into a single 32bit word register: */
168 #ifdef __ARMEB__
169 #define __get_user_x_32t(__r2, __p, __e, __l, __s) \
170 __get_user_x(__r2, __p, __e, __l, 32t_8)
171 #else
172 #define __get_user_x_32t __get_user_x
173 #endif
176 * storing result into proper least significant word of 64bit target var,
177 * different only for big endian case where 64 bit __r2 lsw is r3:
179 #ifdef __ARMEB__
180 #define __get_user_x_64t(__r2, __p, __e, __l, __s) \
181 __asm__ __volatile__ ( \
182 __asmeq("%0", "r0") __asmeq("%1", "r2") \
183 __asmeq("%3", "r1") \
184 "bl __get_user_64t_" #__s \
185 : "=&r" (__e), "=r" (__r2) \
186 : "0" (__p), "r" (__l) \
187 : __GUP_CLOBBER_##__s)
188 #else
189 #define __get_user_x_64t __get_user_x
190 #endif
193 #define __get_user_check(x, p) \
194 ({ \
195 unsigned long __limit = current_thread_info()->addr_limit - 1; \
196 register const typeof(*(p)) __user *__p asm("r0") = (p);\
197 register __inttype(x) __r2 asm("r2"); \
198 register unsigned long __l asm("r1") = __limit; \
199 register int __e asm("r0"); \
200 unsigned int __ua_flags = uaccess_save_and_enable(); \
201 switch (sizeof(*(__p))) { \
202 case 1: \
203 if (sizeof((x)) >= 8) \
204 __get_user_x_64t(__r2, __p, __e, __l, 1); \
205 else \
206 __get_user_x(__r2, __p, __e, __l, 1); \
207 break; \
208 case 2: \
209 if (sizeof((x)) >= 8) \
210 __get_user_x_64t(__r2, __p, __e, __l, 2); \
211 else \
212 __get_user_x(__r2, __p, __e, __l, 2); \
213 break; \
214 case 4: \
215 if (sizeof((x)) >= 8) \
216 __get_user_x_64t(__r2, __p, __e, __l, 4); \
217 else \
218 __get_user_x(__r2, __p, __e, __l, 4); \
219 break; \
220 case 8: \
221 if (sizeof((x)) < 8) \
222 __get_user_x_32t(__r2, __p, __e, __l, 4); \
223 else \
224 __get_user_x(__r2, __p, __e, __l, 8); \
225 break; \
226 default: __e = __get_user_bad(); break; \
228 uaccess_restore(__ua_flags); \
229 x = (typeof(*(p))) __r2; \
230 __e; \
233 #define get_user(x, p) \
234 ({ \
235 might_fault(); \
236 __get_user_check(x, p); \
239 extern int __put_user_1(void *, unsigned int);
240 extern int __put_user_2(void *, unsigned int);
241 extern int __put_user_4(void *, unsigned int);
242 extern int __put_user_8(void *, unsigned long long);
244 #define __put_user_check(__pu_val, __ptr, __err, __s) \
245 ({ \
246 unsigned long __limit = current_thread_info()->addr_limit - 1; \
247 register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
248 register const void __user *__p asm("r0") = __ptr; \
249 register unsigned long __l asm("r1") = __limit; \
250 register int __e asm("r0"); \
251 __asm__ __volatile__ ( \
252 __asmeq("%0", "r0") __asmeq("%2", "r2") \
253 __asmeq("%3", "r1") \
254 "bl __put_user_" #__s \
255 : "=&r" (__e) \
256 : "0" (__p), "r" (__r2), "r" (__l) \
257 : "ip", "lr", "cc"); \
258 __err = __e; \
261 #else /* CONFIG_MMU */
264 * uClinux has only one addr space, so has simplified address limits.
266 #define USER_DS KERNEL_DS
268 #define segment_eq(a, b) (1)
269 #define __addr_ok(addr) ((void)(addr), 1)
270 #define __range_ok(addr, size) ((void)(addr), 0)
271 #define get_fs() (KERNEL_DS)
273 static inline void set_fs(mm_segment_t fs)
277 #define get_user(x, p) __get_user(x, p)
278 #define __put_user_check __put_user_nocheck
280 #endif /* CONFIG_MMU */
282 #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
284 #define user_addr_max() \
285 (uaccess_kernel() ? ~0UL : get_fs())
287 #ifdef CONFIG_CPU_SPECTRE
289 * When mitigating Spectre variant 1, it is not worth fixing the non-
290 * verifying accessors, because we need to add verification of the
291 * address space there. Force these to use the standard get_user()
292 * version instead.
294 #define __get_user(x, ptr) get_user(x, ptr)
295 #else
298 * The "__xxx" versions of the user access functions do not verify the
299 * address space - it must have been done previously with a separate
300 * "access_ok()" call.
302 * The "xxx_error" versions set the third argument to EFAULT if an
303 * error occurs, and leave it unchanged on success. Note that these
304 * versions are void (ie, don't return a value as such).
306 #define __get_user(x, ptr) \
307 ({ \
308 long __gu_err = 0; \
309 __get_user_err((x), (ptr), __gu_err); \
310 __gu_err; \
313 #define __get_user_err(x, ptr, err) \
314 do { \
315 unsigned long __gu_addr = (unsigned long)(ptr); \
316 unsigned long __gu_val; \
317 unsigned int __ua_flags; \
318 __chk_user_ptr(ptr); \
319 might_fault(); \
320 __ua_flags = uaccess_save_and_enable(); \
321 switch (sizeof(*(ptr))) { \
322 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
323 case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
324 case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
325 default: (__gu_val) = __get_user_bad(); \
327 uaccess_restore(__ua_flags); \
328 (x) = (__typeof__(*(ptr)))__gu_val; \
329 } while (0)
331 #define __get_user_asm(x, addr, err, instr) \
332 __asm__ __volatile__( \
333 "1: " TUSER(instr) " %1, [%2], #0\n" \
334 "2:\n" \
335 " .pushsection .text.fixup,\"ax\"\n" \
336 " .align 2\n" \
337 "3: mov %0, %3\n" \
338 " mov %1, #0\n" \
339 " b 2b\n" \
340 " .popsection\n" \
341 " .pushsection __ex_table,\"a\"\n" \
342 " .align 3\n" \
343 " .long 1b, 3b\n" \
344 " .popsection" \
345 : "+r" (err), "=&r" (x) \
346 : "r" (addr), "i" (-EFAULT) \
347 : "cc")
349 #define __get_user_asm_byte(x, addr, err) \
350 __get_user_asm(x, addr, err, ldrb)
352 #ifndef __ARMEB__
353 #define __get_user_asm_half(x, __gu_addr, err) \
354 ({ \
355 unsigned long __b1, __b2; \
356 __get_user_asm_byte(__b1, __gu_addr, err); \
357 __get_user_asm_byte(__b2, __gu_addr + 1, err); \
358 (x) = __b1 | (__b2 << 8); \
360 #else
361 #define __get_user_asm_half(x, __gu_addr, err) \
362 ({ \
363 unsigned long __b1, __b2; \
364 __get_user_asm_byte(__b1, __gu_addr, err); \
365 __get_user_asm_byte(__b2, __gu_addr + 1, err); \
366 (x) = (__b1 << 8) | __b2; \
368 #endif
370 #define __get_user_asm_word(x, addr, err) \
371 __get_user_asm(x, addr, err, ldr)
372 #endif
375 #define __put_user_switch(x, ptr, __err, __fn) \
376 do { \
377 const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
378 __typeof__(*(ptr)) __pu_val = (x); \
379 unsigned int __ua_flags; \
380 might_fault(); \
381 __ua_flags = uaccess_save_and_enable(); \
382 switch (sizeof(*(ptr))) { \
383 case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
384 case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
385 case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
386 case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
387 default: __err = __put_user_bad(); break; \
389 uaccess_restore(__ua_flags); \
390 } while (0)
392 #define put_user(x, ptr) \
393 ({ \
394 int __pu_err = 0; \
395 __put_user_switch((x), (ptr), __pu_err, __put_user_check); \
396 __pu_err; \
399 #ifdef CONFIG_CPU_SPECTRE
401 * When mitigating Spectre variant 1.1, all accessors need to include
402 * verification of the address space.
404 #define __put_user(x, ptr) put_user(x, ptr)
406 #else
407 #define __put_user(x, ptr) \
408 ({ \
409 long __pu_err = 0; \
410 __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
411 __pu_err; \
414 #define __put_user_nocheck(x, __pu_ptr, __err, __size) \
415 do { \
416 unsigned long __pu_addr = (unsigned long)__pu_ptr; \
417 __put_user_nocheck_##__size(x, __pu_addr, __err); \
418 } while (0)
420 #define __put_user_nocheck_1 __put_user_asm_byte
421 #define __put_user_nocheck_2 __put_user_asm_half
422 #define __put_user_nocheck_4 __put_user_asm_word
423 #define __put_user_nocheck_8 __put_user_asm_dword
425 #define __put_user_asm(x, __pu_addr, err, instr) \
426 __asm__ __volatile__( \
427 "1: " TUSER(instr) " %1, [%2], #0\n" \
428 "2:\n" \
429 " .pushsection .text.fixup,\"ax\"\n" \
430 " .align 2\n" \
431 "3: mov %0, %3\n" \
432 " b 2b\n" \
433 " .popsection\n" \
434 " .pushsection __ex_table,\"a\"\n" \
435 " .align 3\n" \
436 " .long 1b, 3b\n" \
437 " .popsection" \
438 : "+r" (err) \
439 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
440 : "cc")
442 #define __put_user_asm_byte(x, __pu_addr, err) \
443 __put_user_asm(x, __pu_addr, err, strb)
445 #ifndef __ARMEB__
446 #define __put_user_asm_half(x, __pu_addr, err) \
447 ({ \
448 unsigned long __temp = (__force unsigned long)(x); \
449 __put_user_asm_byte(__temp, __pu_addr, err); \
450 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
452 #else
453 #define __put_user_asm_half(x, __pu_addr, err) \
454 ({ \
455 unsigned long __temp = (__force unsigned long)(x); \
456 __put_user_asm_byte(__temp >> 8, __pu_addr, err); \
457 __put_user_asm_byte(__temp, __pu_addr + 1, err); \
459 #endif
461 #define __put_user_asm_word(x, __pu_addr, err) \
462 __put_user_asm(x, __pu_addr, err, str)
464 #ifndef __ARMEB__
465 #define __reg_oper0 "%R2"
466 #define __reg_oper1 "%Q2"
467 #else
468 #define __reg_oper0 "%Q2"
469 #define __reg_oper1 "%R2"
470 #endif
472 #define __put_user_asm_dword(x, __pu_addr, err) \
473 __asm__ __volatile__( \
474 ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \
475 ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \
476 THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \
477 THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \
478 "3:\n" \
479 " .pushsection .text.fixup,\"ax\"\n" \
480 " .align 2\n" \
481 "4: mov %0, %3\n" \
482 " b 3b\n" \
483 " .popsection\n" \
484 " .pushsection __ex_table,\"a\"\n" \
485 " .align 3\n" \
486 " .long 1b, 4b\n" \
487 " .long 2b, 4b\n" \
488 " .popsection" \
489 : "+r" (err), "+r" (__pu_addr) \
490 : "r" (x), "i" (-EFAULT) \
491 : "cc")
493 #endif /* !CONFIG_CPU_SPECTRE */
495 #ifdef CONFIG_MMU
496 extern unsigned long __must_check
497 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
499 static inline unsigned long __must_check
500 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
502 unsigned int __ua_flags;
504 __ua_flags = uaccess_save_and_enable();
505 n = arm_copy_from_user(to, from, n);
506 uaccess_restore(__ua_flags);
507 return n;
510 extern unsigned long __must_check
511 arm_copy_to_user(void __user *to, const void *from, unsigned long n);
512 extern unsigned long __must_check
513 __copy_to_user_std(void __user *to, const void *from, unsigned long n);
515 static inline unsigned long __must_check
516 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
518 #ifndef CONFIG_UACCESS_WITH_MEMCPY
519 unsigned int __ua_flags;
520 __ua_flags = uaccess_save_and_enable();
521 n = arm_copy_to_user(to, from, n);
522 uaccess_restore(__ua_flags);
523 return n;
524 #else
525 return arm_copy_to_user(to, from, n);
526 #endif
529 extern unsigned long __must_check
530 arm_clear_user(void __user *addr, unsigned long n);
531 extern unsigned long __must_check
532 __clear_user_std(void __user *addr, unsigned long n);
534 static inline unsigned long __must_check
535 __clear_user(void __user *addr, unsigned long n)
537 unsigned int __ua_flags = uaccess_save_and_enable();
538 n = arm_clear_user(addr, n);
539 uaccess_restore(__ua_flags);
540 return n;
543 #else
544 static inline unsigned long
545 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
547 memcpy(to, (const void __force *)from, n);
548 return 0;
550 static inline unsigned long
551 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
553 memcpy((void __force *)to, from, n);
554 return 0;
556 #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
557 #endif
558 #define INLINE_COPY_TO_USER
559 #define INLINE_COPY_FROM_USER
561 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
563 if (access_ok(VERIFY_WRITE, to, n))
564 n = __clear_user(to, n);
565 return n;
568 /* These are from lib/ code, and use __get_user() and friends */
569 extern long strncpy_from_user(char *dest, const char __user *src, long count);
571 extern __must_check long strnlen_user(const char __user *str, long n);
573 #endif /* _ASMARM_UACCESS_H */