mtd: nand: omap: Fix comment in platform data using wrong Kconfig symbol
[linux/fpc-iii.git] / arch / x86 / include / asm / uaccess.h
blob1954dd5552a2e2fbeaf21937ad4c6d98c6ba0aff
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_H
3 #define _ASM_X86_UACCESS_H
4 /*
5 * User space memory access functions
6 */
7 #include <linux/compiler.h>
8 #include <linux/kasan-checks.h>
9 #include <linux/string.h>
10 #include <asm/asm.h>
11 #include <asm/page.h>
12 #include <asm/smap.h>
13 #include <asm/extable.h>
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
20 * For historical reasons, these macros are grossly misnamed.
23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
25 #define KERNEL_DS MAKE_MM_SEG(-1UL)
26 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
28 #define get_fs() (current->thread.addr_limit)
29 static inline void set_fs(mm_segment_t fs)
31 current->thread.addr_limit = fs;
32 /* On user-mode return, check fs is correct */
33 set_thread_flag(TIF_FSCHECK);
36 #define segment_eq(a, b) ((a).seg == (b).seg)
37 #define user_addr_max() (current->thread.addr_limit.seg)
40 * Test whether a block of memory is a valid user space address.
41 * Returns 0 if the range is valid, nonzero otherwise.
43 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
46 * If we have used "sizeof()" for the size,
47 * we know it won't overflow the limit (but
48 * it might overflow the 'addr', so it's
49 * important to subtract the size from the
50 * limit, not add it to the address).
52 if (__builtin_constant_p(size))
53 return unlikely(addr > limit - size);
55 /* Arbitrary sizes? Be careful about overflow */
56 addr += size;
57 if (unlikely(addr < size))
58 return true;
59 return unlikely(addr > limit);
62 #define __range_not_ok(addr, size, limit) \
63 ({ \
64 __chk_user_ptr(addr); \
65 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
68 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
69 # define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task())
70 #else
71 # define WARN_ON_IN_IRQ()
72 #endif
74 /**
75 * access_ok - Checks if a user space pointer is valid
76 * @addr: User space pointer to start of block to check
77 * @size: Size of block to check
79 * Context: User context only. This function may sleep if pagefaults are
80 * enabled.
82 * Checks if a pointer to a block of memory in user space is valid.
84 * Note that, depending on architecture, this function probably just
85 * checks that the pointer is in the user space range - after calling
86 * this function, memory access functions may still return -EFAULT.
88 * Return: true (nonzero) if the memory block may be valid, false (zero)
89 * if it is definitely invalid.
91 #define access_ok(addr, size) \
92 ({ \
93 WARN_ON_IN_IRQ(); \
94 likely(!__range_not_ok(addr, size, user_addr_max())); \
98 * These are the main single-value transfer routines. They automatically
99 * use the right size if we just have the right pointer type.
101 * This gets kind of ugly. We want to return _two_ values in "get_user()"
102 * and yet we don't want to do any pointers, because that is too much
103 * of a performance impact. Thus we have a few rather ugly macros here,
104 * and hide all the ugliness from the user.
106 * The "__xxx" versions of the user access functions are versions that
107 * do not verify the address space, that must have been done previously
108 * with a separate "access_ok()" call (this is used when we do multiple
109 * accesses to the same area of user memory).
112 extern int __get_user_1(void);
113 extern int __get_user_2(void);
114 extern int __get_user_4(void);
115 extern int __get_user_8(void);
116 extern int __get_user_bad(void);
118 #define __uaccess_begin() stac()
119 #define __uaccess_end() clac()
120 #define __uaccess_begin_nospec() \
121 ({ \
122 stac(); \
123 barrier_nospec(); \
127 * This is a type: either unsigned long, if the argument fits into
128 * that type, or otherwise unsigned long long.
130 #define __inttype(x) \
131 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
134 * get_user - Get a simple variable from user space.
135 * @x: Variable to store result.
136 * @ptr: Source address, in user space.
138 * Context: User context only. This function may sleep if pagefaults are
139 * enabled.
141 * This macro copies a single simple variable from user space to kernel
142 * space. It supports simple types like char and int, but not larger
143 * data types like structures or arrays.
145 * @ptr must have pointer-to-simple-variable type, and the result of
146 * dereferencing @ptr must be assignable to @x without a cast.
148 * Return: zero on success, or -EFAULT on error.
149 * On error, the variable @x is set to zero.
152 * Careful: we have to cast the result to the type of the pointer
153 * for sign reasons.
155 * The use of _ASM_DX as the register specifier is a bit of a
156 * simplification, as gcc only cares about it as the starting point
157 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
158 * (%ecx being the next register in gcc's x86 register sequence), and
159 * %rdx on 64 bits.
161 * Clang/LLVM cares about the size of the register, but still wants
162 * the base register for something that ends up being a pair.
164 #define get_user(x, ptr) \
165 ({ \
166 int __ret_gu; \
167 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
168 __chk_user_ptr(ptr); \
169 might_fault(); \
170 asm volatile("call __get_user_%P4" \
171 : "=a" (__ret_gu), "=r" (__val_gu), \
172 ASM_CALL_CONSTRAINT \
173 : "0" (ptr), "i" (sizeof(*(ptr)))); \
174 (x) = (__force __typeof__(*(ptr))) __val_gu; \
175 __builtin_expect(__ret_gu, 0); \
178 #define __put_user_x(size, x, ptr, __ret_pu) \
179 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
180 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
184 #ifdef CONFIG_X86_32
185 #define __put_user_goto_u64(x, addr, label) \
186 asm_volatile_goto("\n" \
187 "1: movl %%eax,0(%1)\n" \
188 "2: movl %%edx,4(%1)\n" \
189 _ASM_EXTABLE_UA(1b, %l2) \
190 _ASM_EXTABLE_UA(2b, %l2) \
191 : : "A" (x), "r" (addr) \
192 : : label)
194 #define __put_user_asm_ex_u64(x, addr) \
195 asm volatile("\n" \
196 "1: movl %%eax,0(%1)\n" \
197 "2: movl %%edx,4(%1)\n" \
198 "3:" \
199 _ASM_EXTABLE_EX(1b, 2b) \
200 _ASM_EXTABLE_EX(2b, 3b) \
201 : : "A" (x), "r" (addr))
203 #define __put_user_x8(x, ptr, __ret_pu) \
204 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
205 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
206 #else
207 #define __put_user_goto_u64(x, ptr, label) \
208 __put_user_goto(x, ptr, "q", "", "er", label)
209 #define __put_user_asm_ex_u64(x, addr) \
210 __put_user_asm_ex(x, addr, "q", "", "er")
211 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
212 #endif
214 extern void __put_user_bad(void);
217 * Strange magic calling convention: pointer in %ecx,
218 * value in %eax(:%edx), return value in %eax. clobbers %rbx
220 extern void __put_user_1(void);
221 extern void __put_user_2(void);
222 extern void __put_user_4(void);
223 extern void __put_user_8(void);
226 * put_user - Write a simple value into user space.
227 * @x: Value to copy to user space.
228 * @ptr: Destination address, in user space.
230 * Context: User context only. This function may sleep if pagefaults are
231 * enabled.
233 * This macro copies a single simple value from kernel space to user
234 * space. It supports simple types like char and int, but not larger
235 * data types like structures or arrays.
237 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
238 * to the result of dereferencing @ptr.
240 * Return: zero on success, or -EFAULT on error.
242 #define put_user(x, ptr) \
243 ({ \
244 int __ret_pu; \
245 __typeof__(*(ptr)) __pu_val; \
246 __chk_user_ptr(ptr); \
247 might_fault(); \
248 __pu_val = x; \
249 switch (sizeof(*(ptr))) { \
250 case 1: \
251 __put_user_x(1, __pu_val, ptr, __ret_pu); \
252 break; \
253 case 2: \
254 __put_user_x(2, __pu_val, ptr, __ret_pu); \
255 break; \
256 case 4: \
257 __put_user_x(4, __pu_val, ptr, __ret_pu); \
258 break; \
259 case 8: \
260 __put_user_x8(__pu_val, ptr, __ret_pu); \
261 break; \
262 default: \
263 __put_user_x(X, __pu_val, ptr, __ret_pu); \
264 break; \
266 __builtin_expect(__ret_pu, 0); \
269 #define __put_user_size(x, ptr, size, label) \
270 do { \
271 __chk_user_ptr(ptr); \
272 switch (size) { \
273 case 1: \
274 __put_user_goto(x, ptr, "b", "b", "iq", label); \
275 break; \
276 case 2: \
277 __put_user_goto(x, ptr, "w", "w", "ir", label); \
278 break; \
279 case 4: \
280 __put_user_goto(x, ptr, "l", "k", "ir", label); \
281 break; \
282 case 8: \
283 __put_user_goto_u64(x, ptr, label); \
284 break; \
285 default: \
286 __put_user_bad(); \
288 } while (0)
291 * This doesn't do __uaccess_begin/end - the exception handling
292 * around it must do that.
294 #define __put_user_size_ex(x, ptr, size) \
295 do { \
296 __chk_user_ptr(ptr); \
297 switch (size) { \
298 case 1: \
299 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
300 break; \
301 case 2: \
302 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
303 break; \
304 case 4: \
305 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
306 break; \
307 case 8: \
308 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
309 break; \
310 default: \
311 __put_user_bad(); \
313 } while (0)
315 #ifdef CONFIG_X86_32
316 #define __get_user_asm_u64(x, ptr, retval, errret) \
317 ({ \
318 __typeof__(ptr) __ptr = (ptr); \
319 asm volatile("\n" \
320 "1: movl %2,%%eax\n" \
321 "2: movl %3,%%edx\n" \
322 "3:\n" \
323 ".section .fixup,\"ax\"\n" \
324 "4: mov %4,%0\n" \
325 " xorl %%eax,%%eax\n" \
326 " xorl %%edx,%%edx\n" \
327 " jmp 3b\n" \
328 ".previous\n" \
329 _ASM_EXTABLE_UA(1b, 4b) \
330 _ASM_EXTABLE_UA(2b, 4b) \
331 : "=r" (retval), "=&A"(x) \
332 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
333 "i" (errret), "0" (retval)); \
336 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
337 #else
338 #define __get_user_asm_u64(x, ptr, retval, errret) \
339 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
340 #define __get_user_asm_ex_u64(x, ptr) \
341 __get_user_asm_ex(x, ptr, "q", "", "=r")
342 #endif
344 #define __get_user_size(x, ptr, size, retval, errret) \
345 do { \
346 retval = 0; \
347 __chk_user_ptr(ptr); \
348 switch (size) { \
349 case 1: \
350 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
351 break; \
352 case 2: \
353 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
354 break; \
355 case 4: \
356 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
357 break; \
358 case 8: \
359 __get_user_asm_u64(x, ptr, retval, errret); \
360 break; \
361 default: \
362 (x) = __get_user_bad(); \
364 } while (0)
366 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
367 asm volatile("\n" \
368 "1: mov"itype" %2,%"rtype"1\n" \
369 "2:\n" \
370 ".section .fixup,\"ax\"\n" \
371 "3: mov %3,%0\n" \
372 " xor"itype" %"rtype"1,%"rtype"1\n" \
373 " jmp 2b\n" \
374 ".previous\n" \
375 _ASM_EXTABLE_UA(1b, 3b) \
376 : "=r" (err), ltype(x) \
377 : "m" (__m(addr)), "i" (errret), "0" (err))
379 #define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \
380 asm volatile("\n" \
381 "1: mov"itype" %2,%"rtype"1\n" \
382 "2:\n" \
383 ".section .fixup,\"ax\"\n" \
384 "3: mov %3,%0\n" \
385 " jmp 2b\n" \
386 ".previous\n" \
387 _ASM_EXTABLE_UA(1b, 3b) \
388 : "=r" (err), ltype(x) \
389 : "m" (__m(addr)), "i" (errret), "0" (err))
392 * This doesn't do __uaccess_begin/end - the exception handling
393 * around it must do that.
395 #define __get_user_size_ex(x, ptr, size) \
396 do { \
397 __chk_user_ptr(ptr); \
398 switch (size) { \
399 case 1: \
400 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
401 break; \
402 case 2: \
403 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
404 break; \
405 case 4: \
406 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
407 break; \
408 case 8: \
409 __get_user_asm_ex_u64(x, ptr); \
410 break; \
411 default: \
412 (x) = __get_user_bad(); \
414 } while (0)
416 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
417 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
418 "2:\n" \
419 ".section .fixup,\"ax\"\n" \
420 "3:xor"itype" %"rtype"0,%"rtype"0\n" \
421 " jmp 2b\n" \
422 ".previous\n" \
423 _ASM_EXTABLE_EX(1b, 3b) \
424 : ltype(x) : "m" (__m(addr)))
426 #define __put_user_nocheck(x, ptr, size) \
427 ({ \
428 __label__ __pu_label; \
429 int __pu_err = -EFAULT; \
430 __typeof__(*(ptr)) __pu_val; \
431 __pu_val = x; \
432 __uaccess_begin(); \
433 __put_user_size(__pu_val, (ptr), (size), __pu_label); \
434 __pu_err = 0; \
435 __pu_label: \
436 __uaccess_end(); \
437 __builtin_expect(__pu_err, 0); \
440 #define __get_user_nocheck(x, ptr, size) \
441 ({ \
442 int __gu_err; \
443 __inttype(*(ptr)) __gu_val; \
444 __uaccess_begin_nospec(); \
445 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
446 __uaccess_end(); \
447 (x) = (__force __typeof__(*(ptr)))__gu_val; \
448 __builtin_expect(__gu_err, 0); \
451 /* FIXME: this hack is definitely wrong -AK */
452 struct __large_struct { unsigned long buf[100]; };
453 #define __m(x) (*(struct __large_struct __user *)(x))
456 * Tell gcc we read from memory instead of writing: this is because
457 * we do not write to any memory gcc knows about, so there are no
458 * aliasing issues.
460 #define __put_user_goto(x, addr, itype, rtype, ltype, label) \
461 asm_volatile_goto("\n" \
462 "1: mov"itype" %"rtype"0,%1\n" \
463 _ASM_EXTABLE_UA(1b, %l2) \
464 : : ltype(x), "m" (__m(addr)) \
465 : : label)
467 #define __put_user_failed(x, addr, itype, rtype, ltype, errret) \
468 ({ __label__ __puflab; \
469 int __pufret = errret; \
470 __put_user_goto(x,addr,itype,rtype,ltype,__puflab); \
471 __pufret = 0; \
472 __puflab: __pufret; })
474 #define __put_user_asm(x, addr, retval, itype, rtype, ltype, errret) do { \
475 retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \
476 } while (0)
478 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
479 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
480 "2:\n" \
481 _ASM_EXTABLE_EX(1b, 2b) \
482 : : ltype(x), "m" (__m(addr)))
485 * uaccess_try and catch
487 #define uaccess_try do { \
488 current->thread.uaccess_err = 0; \
489 __uaccess_begin(); \
490 barrier();
492 #define uaccess_try_nospec do { \
493 current->thread.uaccess_err = 0; \
494 __uaccess_begin_nospec(); \
496 #define uaccess_catch(err) \
497 __uaccess_end(); \
498 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
499 } while (0)
502 * __get_user - Get a simple variable from user space, with less checking.
503 * @x: Variable to store result.
504 * @ptr: Source address, in user space.
506 * Context: User context only. This function may sleep if pagefaults are
507 * enabled.
509 * This macro copies a single simple variable from user space to kernel
510 * space. It supports simple types like char and int, but not larger
511 * data types like structures or arrays.
513 * @ptr must have pointer-to-simple-variable type, and the result of
514 * dereferencing @ptr must be assignable to @x without a cast.
516 * Caller must check the pointer with access_ok() before calling this
517 * function.
519 * Return: zero on success, or -EFAULT on error.
520 * On error, the variable @x is set to zero.
523 #define __get_user(x, ptr) \
524 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
527 * __put_user - Write a simple value into user space, with less checking.
528 * @x: Value to copy to user space.
529 * @ptr: Destination address, in user space.
531 * Context: User context only. This function may sleep if pagefaults are
532 * enabled.
534 * This macro copies a single simple value from kernel space to user
535 * space. It supports simple types like char and int, but not larger
536 * data types like structures or arrays.
538 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
539 * to the result of dereferencing @ptr.
541 * Caller must check the pointer with access_ok() before calling this
542 * function.
544 * Return: zero on success, or -EFAULT on error.
547 #define __put_user(x, ptr) \
548 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
551 * {get|put}_user_try and catch
553 * get_user_try {
554 * get_user_ex(...);
555 * } get_user_catch(err)
557 #define get_user_try uaccess_try_nospec
558 #define get_user_catch(err) uaccess_catch(err)
560 #define get_user_ex(x, ptr) do { \
561 unsigned long __gue_val; \
562 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
563 (x) = (__force __typeof__(*(ptr)))__gue_val; \
564 } while (0)
566 #define put_user_try uaccess_try
567 #define put_user_catch(err) uaccess_catch(err)
569 #define put_user_ex(x, ptr) \
570 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
572 extern unsigned long
573 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
574 extern __must_check long
575 strncpy_from_user(char *dst, const char __user *src, long count);
577 extern __must_check long strnlen_user(const char __user *str, long n);
579 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
580 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
582 extern void __cmpxchg_wrong_size(void)
583 __compiletime_error("Bad argument size for cmpxchg");
585 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
586 ({ \
587 int __ret = 0; \
588 __typeof__(ptr) __uval = (uval); \
589 __typeof__(*(ptr)) __old = (old); \
590 __typeof__(*(ptr)) __new = (new); \
591 __uaccess_begin_nospec(); \
592 switch (size) { \
593 case 1: \
595 asm volatile("\n" \
596 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
597 "2:\n" \
598 "\t.section .fixup, \"ax\"\n" \
599 "3:\tmov %3, %0\n" \
600 "\tjmp 2b\n" \
601 "\t.previous\n" \
602 _ASM_EXTABLE_UA(1b, 3b) \
603 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
604 : "i" (-EFAULT), "q" (__new), "1" (__old) \
605 : "memory" \
606 ); \
607 break; \
609 case 2: \
611 asm volatile("\n" \
612 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
613 "2:\n" \
614 "\t.section .fixup, \"ax\"\n" \
615 "3:\tmov %3, %0\n" \
616 "\tjmp 2b\n" \
617 "\t.previous\n" \
618 _ASM_EXTABLE_UA(1b, 3b) \
619 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
620 : "i" (-EFAULT), "r" (__new), "1" (__old) \
621 : "memory" \
622 ); \
623 break; \
625 case 4: \
627 asm volatile("\n" \
628 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
629 "2:\n" \
630 "\t.section .fixup, \"ax\"\n" \
631 "3:\tmov %3, %0\n" \
632 "\tjmp 2b\n" \
633 "\t.previous\n" \
634 _ASM_EXTABLE_UA(1b, 3b) \
635 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
636 : "i" (-EFAULT), "r" (__new), "1" (__old) \
637 : "memory" \
638 ); \
639 break; \
641 case 8: \
643 if (!IS_ENABLED(CONFIG_X86_64)) \
644 __cmpxchg_wrong_size(); \
646 asm volatile("\n" \
647 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
648 "2:\n" \
649 "\t.section .fixup, \"ax\"\n" \
650 "3:\tmov %3, %0\n" \
651 "\tjmp 2b\n" \
652 "\t.previous\n" \
653 _ASM_EXTABLE_UA(1b, 3b) \
654 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
655 : "i" (-EFAULT), "r" (__new), "1" (__old) \
656 : "memory" \
657 ); \
658 break; \
660 default: \
661 __cmpxchg_wrong_size(); \
663 __uaccess_end(); \
664 *__uval = __old; \
665 __ret; \
668 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
669 ({ \
670 access_ok((ptr), sizeof(*(ptr))) ? \
671 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
672 (old), (new), sizeof(*(ptr))) : \
673 -EFAULT; \
677 * movsl can be slow when source and dest are not both 8-byte aligned
679 #ifdef CONFIG_X86_INTEL_USERCOPY
680 extern struct movsl_mask {
681 int mask;
682 } ____cacheline_aligned_in_smp movsl_mask;
683 #endif
685 #define ARCH_HAS_NOCACHE_UACCESS 1
687 #ifdef CONFIG_X86_32
688 # include <asm/uaccess_32.h>
689 #else
690 # include <asm/uaccess_64.h>
691 #endif
694 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
695 * nested NMI paths are careful to preserve CR2.
697 * Caller must use pagefault_enable/disable, or run in interrupt context,
698 * and also do a uaccess_ok() check
700 #define __copy_from_user_nmi __copy_from_user_inatomic
703 * The "unsafe" user accesses aren't really "unsafe", but the naming
704 * is a big fat warning: you have to not only do the access_ok()
705 * checking before using them, but you have to surround them with the
706 * user_access_begin/end() pair.
708 static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
710 if (unlikely(!access_ok(ptr,len)))
711 return 0;
712 __uaccess_begin_nospec();
713 return 1;
715 #define user_access_begin(a,b) user_access_begin(a,b)
716 #define user_access_end() __uaccess_end()
718 #define unsafe_put_user(x, ptr, label) \
719 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
721 #define unsafe_get_user(x, ptr, err_label) \
722 do { \
723 int __gu_err; \
724 __inttype(*(ptr)) __gu_val; \
725 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
726 (x) = (__force __typeof__(*(ptr)))__gu_val; \
727 if (unlikely(__gu_err)) goto err_label; \
728 } while (0)
730 #endif /* _ASM_X86_UACCESS_H */