Linux 5.7.6
[linux/fpc-iii.git] / arch / x86 / include / asm / uaccess_64.h
blobbc10e3dc64fed755dc5077bb4c6bc3c6984794f2
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_64_H
3 #define _ASM_X86_UACCESS_64_H
5 /*
6 * User space memory access functions
7 */
8 #include <linux/compiler.h>
9 #include <linux/lockdep.h>
10 #include <linux/kasan-checks.h>
11 #include <asm/alternative.h>
12 #include <asm/cpufeatures.h>
13 #include <asm/page.h>
16 * Copy To/From Userspace
19 /* Handles exceptions in both to and from, but doesn't do access_ok */
20 __must_check unsigned long
21 copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
22 __must_check unsigned long
23 copy_user_generic_string(void *to, const void *from, unsigned len);
24 __must_check unsigned long
25 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
27 static __always_inline __must_check unsigned long
28 copy_user_generic(void *to, const void *from, unsigned len)
30 unsigned ret;
33 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
34 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
35 * Otherwise, use copy_user_generic_unrolled.
37 alternative_call_2(copy_user_generic_unrolled,
38 copy_user_generic_string,
39 X86_FEATURE_REP_GOOD,
40 copy_user_enhanced_fast_string,
41 X86_FEATURE_ERMS,
42 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
43 "=d" (len)),
44 "1" (to), "2" (from), "3" (len)
45 : "memory", "rcx", "r8", "r9", "r10", "r11");
46 return ret;
49 static __always_inline __must_check unsigned long
50 copy_to_user_mcsafe(void *to, const void *from, unsigned len)
52 unsigned long ret;
54 __uaccess_begin();
56 * Note, __memcpy_mcsafe() is explicitly used since it can
57 * handle exceptions / faults. memcpy_mcsafe() may fall back to
58 * memcpy() which lacks this handling.
60 ret = __memcpy_mcsafe(to, from, len);
61 __uaccess_end();
62 return ret;
65 static __always_inline __must_check unsigned long
66 raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
68 return copy_user_generic(dst, (__force void *)src, size);
71 static __always_inline __must_check unsigned long
72 raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
74 return copy_user_generic((__force void *)dst, src, size);
77 static __always_inline __must_check
78 unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
80 return copy_user_generic((__force void *)dst,
81 (__force void *)src, size);
84 extern long __copy_user_nocache(void *dst, const void __user *src,
85 unsigned size, int zerorest);
87 extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
88 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
89 size_t len);
91 static inline int
92 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
93 unsigned size)
95 kasan_check_write(dst, size);
96 return __copy_user_nocache(dst, src, size, 0);
99 static inline int
100 __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
102 kasan_check_write(dst, size);
103 return __copy_user_flushcache(dst, src, size);
106 unsigned long
107 mcsafe_handle_tail(char *to, char *from, unsigned len);
109 #endif /* _ASM_X86_UACCESS_64_H */