1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_H
3 #define _ASM_X86_UACCESS_H
5 * User space memory access functions
7 #include <linux/compiler.h>
8 #include <linux/kasan-checks.h>
9 #include <linux/string.h>
13 #include <asm/extable.h>
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
20 * For historical reasons, these macros are grossly misnamed.
23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
25 #define KERNEL_DS MAKE_MM_SEG(-1UL)
26 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
28 #define get_fs() (current->thread.addr_limit)
29 static inline void set_fs(mm_segment_t fs
)
31 current
->thread
.addr_limit
= fs
;
32 /* On user-mode return, check fs is correct */
33 set_thread_flag(TIF_FSCHECK
);
36 #define segment_eq(a, b) ((a).seg == (b).seg)
37 #define user_addr_max() (current->thread.addr_limit.seg)
40 * Test whether a block of memory is a valid user space address.
41 * Returns 0 if the range is valid, nonzero otherwise.
43 static inline bool __chk_range_not_ok(unsigned long addr
, unsigned long size
, unsigned long limit
)
46 * If we have used "sizeof()" for the size,
47 * we know it won't overflow the limit (but
48 * it might overflow the 'addr', so it's
49 * important to subtract the size from the
50 * limit, not add it to the address).
52 if (__builtin_constant_p(size
))
53 return unlikely(addr
> limit
- size
);
55 /* Arbitrary sizes? Be careful about overflow */
57 if (unlikely(addr
< size
))
59 return unlikely(addr
> limit
);
62 #define __range_not_ok(addr, size, limit) \
64 __chk_user_ptr(addr); \
65 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
68 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
69 # define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task())
71 # define WARN_ON_IN_IRQ()
75 * access_ok - Checks if a user space pointer is valid
76 * @addr: User space pointer to start of block to check
77 * @size: Size of block to check
79 * Context: User context only. This function may sleep if pagefaults are
82 * Checks if a pointer to a block of memory in user space is valid.
84 * Note that, depending on architecture, this function probably just
85 * checks that the pointer is in the user space range - after calling
86 * this function, memory access functions may still return -EFAULT.
88 * Return: true (nonzero) if the memory block may be valid, false (zero)
89 * if it is definitely invalid.
91 #define access_ok(addr, size) \
94 likely(!__range_not_ok(addr, size, user_addr_max())); \
98 * These are the main single-value transfer routines. They automatically
99 * use the right size if we just have the right pointer type.
101 * This gets kind of ugly. We want to return _two_ values in "get_user()"
102 * and yet we don't want to do any pointers, because that is too much
103 * of a performance impact. Thus we have a few rather ugly macros here,
104 * and hide all the ugliness from the user.
106 * The "__xxx" versions of the user access functions are versions that
107 * do not verify the address space, that must have been done previously
108 * with a separate "access_ok()" call (this is used when we do multiple
109 * accesses to the same area of user memory).
112 extern int __get_user_1(void);
113 extern int __get_user_2(void);
114 extern int __get_user_4(void);
115 extern int __get_user_8(void);
116 extern int __get_user_bad(void);
118 #define __uaccess_begin() stac()
119 #define __uaccess_end() clac()
120 #define __uaccess_begin_nospec() \
127 * This is a type: either unsigned long, if the argument fits into
128 * that type, or otherwise unsigned long long.
130 #define __inttype(x) \
131 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
134 * get_user - Get a simple variable from user space.
135 * @x: Variable to store result.
136 * @ptr: Source address, in user space.
138 * Context: User context only. This function may sleep if pagefaults are
141 * This macro copies a single simple variable from user space to kernel
142 * space. It supports simple types like char and int, but not larger
143 * data types like structures or arrays.
145 * @ptr must have pointer-to-simple-variable type, and the result of
146 * dereferencing @ptr must be assignable to @x without a cast.
148 * Return: zero on success, or -EFAULT on error.
149 * On error, the variable @x is set to zero.
152 * Careful: we have to cast the result to the type of the pointer
155 * The use of _ASM_DX as the register specifier is a bit of a
156 * simplification, as gcc only cares about it as the starting point
157 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
158 * (%ecx being the next register in gcc's x86 register sequence), and
161 * Clang/LLVM cares about the size of the register, but still wants
162 * the base register for something that ends up being a pair.
164 #define get_user(x, ptr) \
167 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
168 __chk_user_ptr(ptr); \
170 asm volatile("call __get_user_%P4" \
171 : "=a" (__ret_gu), "=r" (__val_gu), \
172 ASM_CALL_CONSTRAINT \
173 : "0" (ptr), "i" (sizeof(*(ptr)))); \
174 (x) = (__force __typeof__(*(ptr))) __val_gu; \
175 __builtin_expect(__ret_gu, 0); \
178 #define __put_user_x(size, x, ptr, __ret_pu) \
179 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
180 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
185 #define __put_user_goto_u64(x, addr, label) \
186 asm_volatile_goto("\n" \
187 "1: movl %%eax,0(%1)\n" \
188 "2: movl %%edx,4(%1)\n" \
189 _ASM_EXTABLE_UA(1b, %l2) \
190 _ASM_EXTABLE_UA(2b, %l2) \
191 : : "A" (x), "r" (addr) \
194 #define __put_user_asm_ex_u64(x, addr) \
196 "1: movl %%eax,0(%1)\n" \
197 "2: movl %%edx,4(%1)\n" \
199 _ASM_EXTABLE_EX(1b, 2b) \
200 _ASM_EXTABLE_EX(2b, 3b) \
201 : : "A" (x), "r" (addr))
203 #define __put_user_x8(x, ptr, __ret_pu) \
204 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
205 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
207 #define __put_user_goto_u64(x, ptr, label) \
208 __put_user_goto(x, ptr, "q", "", "er", label)
209 #define __put_user_asm_ex_u64(x, addr) \
210 __put_user_asm_ex(x, addr, "q", "", "er")
211 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
214 extern void __put_user_bad(void);
217 * Strange magic calling convention: pointer in %ecx,
218 * value in %eax(:%edx), return value in %eax. clobbers %rbx
220 extern void __put_user_1(void);
221 extern void __put_user_2(void);
222 extern void __put_user_4(void);
223 extern void __put_user_8(void);
226 * put_user - Write a simple value into user space.
227 * @x: Value to copy to user space.
228 * @ptr: Destination address, in user space.
230 * Context: User context only. This function may sleep if pagefaults are
233 * This macro copies a single simple value from kernel space to user
234 * space. It supports simple types like char and int, but not larger
235 * data types like structures or arrays.
237 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
238 * to the result of dereferencing @ptr.
240 * Return: zero on success, or -EFAULT on error.
242 #define put_user(x, ptr) \
245 __typeof__(*(ptr)) __pu_val; \
246 __chk_user_ptr(ptr); \
249 switch (sizeof(*(ptr))) { \
251 __put_user_x(1, __pu_val, ptr, __ret_pu); \
254 __put_user_x(2, __pu_val, ptr, __ret_pu); \
257 __put_user_x(4, __pu_val, ptr, __ret_pu); \
260 __put_user_x8(__pu_val, ptr, __ret_pu); \
263 __put_user_x(X, __pu_val, ptr, __ret_pu); \
266 __builtin_expect(__ret_pu, 0); \
269 #define __put_user_size(x, ptr, size, label) \
271 __chk_user_ptr(ptr); \
274 __put_user_goto(x, ptr, "b", "b", "iq", label); \
277 __put_user_goto(x, ptr, "w", "w", "ir", label); \
280 __put_user_goto(x, ptr, "l", "k", "ir", label); \
283 __put_user_goto_u64(x, ptr, label); \
291 * This doesn't do __uaccess_begin/end - the exception handling
292 * around it must do that.
294 #define __put_user_size_ex(x, ptr, size) \
296 __chk_user_ptr(ptr); \
299 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
302 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
305 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
308 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
316 #define __get_user_asm_u64(x, ptr, retval, errret) \
318 __typeof__(ptr) __ptr = (ptr); \
320 "1: movl %2,%%eax\n" \
321 "2: movl %3,%%edx\n" \
323 ".section .fixup,\"ax\"\n" \
325 " xorl %%eax,%%eax\n" \
326 " xorl %%edx,%%edx\n" \
329 _ASM_EXTABLE_UA(1b, 4b) \
330 _ASM_EXTABLE_UA(2b, 4b) \
331 : "=r" (retval), "=&A"(x) \
332 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
333 "i" (errret), "0" (retval)); \
336 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
338 #define __get_user_asm_u64(x, ptr, retval, errret) \
339 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
340 #define __get_user_asm_ex_u64(x, ptr) \
341 __get_user_asm_ex(x, ptr, "q", "", "=r")
344 #define __get_user_size(x, ptr, size, retval, errret) \
347 __chk_user_ptr(ptr); \
350 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
353 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
356 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
359 __get_user_asm_u64(x, ptr, retval, errret); \
362 (x) = __get_user_bad(); \
366 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
368 "1: mov"itype" %2,%"rtype"1\n" \
370 ".section .fixup,\"ax\"\n" \
372 " xor"itype" %"rtype"1,%"rtype"1\n" \
375 _ASM_EXTABLE_UA(1b, 3b) \
376 : "=r" (err), ltype(x) \
377 : "m" (__m(addr)), "i" (errret), "0" (err))
379 #define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \
381 "1: mov"itype" %2,%"rtype"1\n" \
383 ".section .fixup,\"ax\"\n" \
387 _ASM_EXTABLE_UA(1b, 3b) \
388 : "=r" (err), ltype(x) \
389 : "m" (__m(addr)), "i" (errret), "0" (err))
392 * This doesn't do __uaccess_begin/end - the exception handling
393 * around it must do that.
395 #define __get_user_size_ex(x, ptr, size) \
397 __chk_user_ptr(ptr); \
400 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
403 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
406 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
409 __get_user_asm_ex_u64(x, ptr); \
412 (x) = __get_user_bad(); \
416 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
417 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
419 ".section .fixup,\"ax\"\n" \
420 "3:xor"itype" %"rtype"0,%"rtype"0\n" \
423 _ASM_EXTABLE_EX(1b, 3b) \
424 : ltype(x) : "m" (__m(addr)))
426 #define __put_user_nocheck(x, ptr, size) \
428 __label__ __pu_label; \
429 int __pu_err = -EFAULT; \
430 __typeof__(*(ptr)) __pu_val; \
433 __put_user_size(__pu_val, (ptr), (size), __pu_label); \
437 __builtin_expect(__pu_err, 0); \
440 #define __get_user_nocheck(x, ptr, size) \
443 __inttype(*(ptr)) __gu_val; \
444 __uaccess_begin_nospec(); \
445 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
447 (x) = (__force __typeof__(*(ptr)))__gu_val; \
448 __builtin_expect(__gu_err, 0); \
451 /* FIXME: this hack is definitely wrong -AK */
452 struct __large_struct
{ unsigned long buf
[100]; };
453 #define __m(x) (*(struct __large_struct __user *)(x))
456 * Tell gcc we read from memory instead of writing: this is because
457 * we do not write to any memory gcc knows about, so there are no
460 #define __put_user_goto(x, addr, itype, rtype, ltype, label) \
461 asm_volatile_goto("\n" \
462 "1: mov"itype" %"rtype"0,%1\n" \
463 _ASM_EXTABLE_UA(1b, %l2) \
464 : : ltype(x), "m" (__m(addr)) \
467 #define __put_user_failed(x, addr, itype, rtype, ltype, errret) \
468 ({ __label__ __puflab; \
469 int __pufret = errret; \
470 __put_user_goto(x,addr,itype,rtype,ltype,__puflab); \
472 __puflab: __pufret; })
474 #define __put_user_asm(x, addr, retval, itype, rtype, ltype, errret) do { \
475 retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \
478 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
479 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
481 _ASM_EXTABLE_EX(1b, 2b) \
482 : : ltype(x), "m" (__m(addr)))
485 * uaccess_try and catch
487 #define uaccess_try do { \
488 current->thread.uaccess_err = 0; \
492 #define uaccess_try_nospec do { \
493 current->thread.uaccess_err = 0; \
494 __uaccess_begin_nospec(); \
496 #define uaccess_catch(err) \
498 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
502 * __get_user - Get a simple variable from user space, with less checking.
503 * @x: Variable to store result.
504 * @ptr: Source address, in user space.
506 * Context: User context only. This function may sleep if pagefaults are
509 * This macro copies a single simple variable from user space to kernel
510 * space. It supports simple types like char and int, but not larger
511 * data types like structures or arrays.
513 * @ptr must have pointer-to-simple-variable type, and the result of
514 * dereferencing @ptr must be assignable to @x without a cast.
516 * Caller must check the pointer with access_ok() before calling this
519 * Return: zero on success, or -EFAULT on error.
520 * On error, the variable @x is set to zero.
523 #define __get_user(x, ptr) \
524 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
527 * __put_user - Write a simple value into user space, with less checking.
528 * @x: Value to copy to user space.
529 * @ptr: Destination address, in user space.
531 * Context: User context only. This function may sleep if pagefaults are
534 * This macro copies a single simple value from kernel space to user
535 * space. It supports simple types like char and int, but not larger
536 * data types like structures or arrays.
538 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
539 * to the result of dereferencing @ptr.
541 * Caller must check the pointer with access_ok() before calling this
544 * Return: zero on success, or -EFAULT on error.
547 #define __put_user(x, ptr) \
548 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
551 * {get|put}_user_try and catch
555 * } get_user_catch(err)
557 #define get_user_try uaccess_try_nospec
558 #define get_user_catch(err) uaccess_catch(err)
560 #define get_user_ex(x, ptr) do { \
561 unsigned long __gue_val; \
562 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
563 (x) = (__force __typeof__(*(ptr)))__gue_val; \
566 #define put_user_try uaccess_try
567 #define put_user_catch(err) uaccess_catch(err)
569 #define put_user_ex(x, ptr) \
570 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
573 copy_from_user_nmi(void *to
, const void __user
*from
, unsigned long n
);
574 extern __must_check
long
575 strncpy_from_user(char *dst
, const char __user
*src
, long count
);
577 extern __must_check
long strnlen_user(const char __user
*str
, long n
);
579 unsigned long __must_check
clear_user(void __user
*mem
, unsigned long len
);
580 unsigned long __must_check
__clear_user(void __user
*mem
, unsigned long len
);
582 extern void __cmpxchg_wrong_size(void)
583 __compiletime_error("Bad argument size for cmpxchg");
585 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
588 __typeof__(ptr) __uval = (uval); \
589 __typeof__(*(ptr)) __old = (old); \
590 __typeof__(*(ptr)) __new = (new); \
591 __uaccess_begin_nospec(); \
596 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
598 "\t.section .fixup, \"ax\"\n" \
602 _ASM_EXTABLE_UA(1b, 3b) \
603 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
604 : "i" (-EFAULT), "q" (__new), "1" (__old) \
612 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
614 "\t.section .fixup, \"ax\"\n" \
618 _ASM_EXTABLE_UA(1b, 3b) \
619 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
620 : "i" (-EFAULT), "r" (__new), "1" (__old) \
628 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
630 "\t.section .fixup, \"ax\"\n" \
634 _ASM_EXTABLE_UA(1b, 3b) \
635 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
636 : "i" (-EFAULT), "r" (__new), "1" (__old) \
643 if (!IS_ENABLED(CONFIG_X86_64)) \
644 __cmpxchg_wrong_size(); \
647 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
649 "\t.section .fixup, \"ax\"\n" \
653 _ASM_EXTABLE_UA(1b, 3b) \
654 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
655 : "i" (-EFAULT), "r" (__new), "1" (__old) \
661 __cmpxchg_wrong_size(); \
668 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
670 access_ok((ptr), sizeof(*(ptr))) ? \
671 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
672 (old), (new), sizeof(*(ptr))) : \
677 * movsl can be slow when source and dest are not both 8-byte aligned
679 #ifdef CONFIG_X86_INTEL_USERCOPY
680 extern struct movsl_mask
{
682 } ____cacheline_aligned_in_smp movsl_mask
;
685 #define ARCH_HAS_NOCACHE_UACCESS 1
688 # include <asm/uaccess_32.h>
690 # include <asm/uaccess_64.h>
694 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
695 * nested NMI paths are careful to preserve CR2.
697 * Caller must use pagefault_enable/disable, or run in interrupt context,
698 * and also do a uaccess_ok() check
700 #define __copy_from_user_nmi __copy_from_user_inatomic
703 * The "unsafe" user accesses aren't really "unsafe", but the naming
704 * is a big fat warning: you have to not only do the access_ok()
705 * checking before using them, but you have to surround them with the
706 * user_access_begin/end() pair.
708 static __must_check
inline bool user_access_begin(const void __user
*ptr
, size_t len
)
710 if (unlikely(!access_ok(ptr
,len
)))
712 __uaccess_begin_nospec();
715 #define user_access_begin(a,b) user_access_begin(a,b)
716 #define user_access_end() __uaccess_end()
718 #define unsafe_put_user(x, ptr, label) \
719 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
721 #define unsafe_get_user(x, ptr, err_label) \
724 __inttype(*(ptr)) __gu_val; \
725 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
726 (x) = (__force __typeof__(*(ptr)))__gu_val; \
727 if (unlikely(__gu_err)) goto err_label; \
730 #endif /* _ASM_X86_UACCESS_H */