x86/efi: Enforce CONFIG_RELOCATABLE for EFI boot stub
[linux/fpc-iii.git] / arch / x86 / include / asm / uaccess_64.h
blob4f7923dd00079d0a582cb5467e05e43774001c44
1 #ifndef _ASM_X86_UACCESS_64_H
2 #define _ASM_X86_UACCESS_64_H
4 /*
5 * User space memory access functions
6 */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/lockdep.h>
10 #include <asm/alternative.h>
11 #include <asm/cpufeature.h>
12 #include <asm/page.h>
15 * Copy To/From Userspace
18 /* Handles exceptions in both to and from, but doesn't do access_ok */
19 __must_check unsigned long
20 copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
21 __must_check unsigned long
22 copy_user_generic_string(void *to, const void *from, unsigned len);
23 __must_check unsigned long
24 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
26 static __always_inline __must_check unsigned long
27 copy_user_generic(void *to, const void *from, unsigned len)
29 unsigned ret;
32 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
33 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
34 * Otherwise, use copy_user_generic_unrolled.
36 alternative_call_2(copy_user_generic_unrolled,
37 copy_user_generic_string,
38 X86_FEATURE_REP_GOOD,
39 copy_user_enhanced_fast_string,
40 X86_FEATURE_ERMS,
41 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
42 "=d" (len)),
43 "1" (to), "2" (from), "3" (len)
44 : "memory", "rcx", "r8", "r9", "r10", "r11");
45 return ret;
48 __must_check unsigned long
49 _copy_to_user(void __user *to, const void *from, unsigned len);
50 __must_check unsigned long
51 _copy_from_user(void *to, const void __user *from, unsigned len);
52 __must_check unsigned long
53 copy_in_user(void __user *to, const void __user *from, unsigned len);
55 static inline unsigned long __must_check copy_from_user(void *to,
56 const void __user *from,
57 unsigned long n)
59 int sz = __compiletime_object_size(to);
61 might_fault();
62 if (likely(sz == -1 || sz >= n))
63 n = _copy_from_user(to, from, n);
64 #ifdef CONFIG_DEBUG_VM
65 else
66 WARN(1, "Buffer overflow detected!\n");
67 #endif
68 return n;
71 static __always_inline __must_check
72 int copy_to_user(void __user *dst, const void *src, unsigned size)
74 might_fault();
76 return _copy_to_user(dst, src, size);
79 static __always_inline __must_check
80 int __copy_from_user(void *dst, const void __user *src, unsigned size)
82 int ret = 0;
84 might_fault();
85 if (!__builtin_constant_p(size))
86 return copy_user_generic(dst, (__force void *)src, size);
87 switch (size) {
88 case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
89 ret, "b", "b", "=q", 1);
90 return ret;
91 case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
92 ret, "w", "w", "=r", 2);
93 return ret;
94 case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
95 ret, "l", "k", "=r", 4);
96 return ret;
97 case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
98 ret, "q", "", "=r", 8);
99 return ret;
100 case 10:
101 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
102 ret, "q", "", "=r", 10);
103 if (unlikely(ret))
104 return ret;
105 __get_user_asm(*(u16 *)(8 + (char *)dst),
106 (u16 __user *)(8 + (char __user *)src),
107 ret, "w", "w", "=r", 2);
108 return ret;
109 case 16:
110 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
111 ret, "q", "", "=r", 16);
112 if (unlikely(ret))
113 return ret;
114 __get_user_asm(*(u64 *)(8 + (char *)dst),
115 (u64 __user *)(8 + (char __user *)src),
116 ret, "q", "", "=r", 8);
117 return ret;
118 default:
119 return copy_user_generic(dst, (__force void *)src, size);
123 static __always_inline __must_check
124 int __copy_to_user(void __user *dst, const void *src, unsigned size)
126 int ret = 0;
128 might_fault();
129 if (!__builtin_constant_p(size))
130 return copy_user_generic((__force void *)dst, src, size);
131 switch (size) {
132 case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
133 ret, "b", "b", "iq", 1);
134 return ret;
135 case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
136 ret, "w", "w", "ir", 2);
137 return ret;
138 case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
139 ret, "l", "k", "ir", 4);
140 return ret;
141 case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
142 ret, "q", "", "er", 8);
143 return ret;
144 case 10:
145 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
146 ret, "q", "", "er", 10);
147 if (unlikely(ret))
148 return ret;
149 asm("":::"memory");
150 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
151 ret, "w", "w", "ir", 2);
152 return ret;
153 case 16:
154 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
155 ret, "q", "", "er", 16);
156 if (unlikely(ret))
157 return ret;
158 asm("":::"memory");
159 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
160 ret, "q", "", "er", 8);
161 return ret;
162 default:
163 return copy_user_generic((__force void *)dst, src, size);
167 static __always_inline __must_check
168 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
170 int ret = 0;
172 might_fault();
173 if (!__builtin_constant_p(size))
174 return copy_user_generic((__force void *)dst,
175 (__force void *)src, size);
176 switch (size) {
177 case 1: {
178 u8 tmp;
179 __get_user_asm(tmp, (u8 __user *)src,
180 ret, "b", "b", "=q", 1);
181 if (likely(!ret))
182 __put_user_asm(tmp, (u8 __user *)dst,
183 ret, "b", "b", "iq", 1);
184 return ret;
186 case 2: {
187 u16 tmp;
188 __get_user_asm(tmp, (u16 __user *)src,
189 ret, "w", "w", "=r", 2);
190 if (likely(!ret))
191 __put_user_asm(tmp, (u16 __user *)dst,
192 ret, "w", "w", "ir", 2);
193 return ret;
196 case 4: {
197 u32 tmp;
198 __get_user_asm(tmp, (u32 __user *)src,
199 ret, "l", "k", "=r", 4);
200 if (likely(!ret))
201 __put_user_asm(tmp, (u32 __user *)dst,
202 ret, "l", "k", "ir", 4);
203 return ret;
205 case 8: {
206 u64 tmp;
207 __get_user_asm(tmp, (u64 __user *)src,
208 ret, "q", "", "=r", 8);
209 if (likely(!ret))
210 __put_user_asm(tmp, (u64 __user *)dst,
211 ret, "q", "", "er", 8);
212 return ret;
214 default:
215 return copy_user_generic((__force void *)dst,
216 (__force void *)src, size);
220 static __must_check __always_inline int
221 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
223 return copy_user_generic(dst, (__force const void *)src, size);
226 static __must_check __always_inline int
227 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
229 return copy_user_generic((__force void *)dst, src, size);
232 extern long __copy_user_nocache(void *dst, const void __user *src,
233 unsigned size, int zerorest);
235 static inline int
236 __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
238 might_fault();
239 return __copy_user_nocache(dst, src, size, 1);
242 static inline int
243 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
244 unsigned size)
246 return __copy_user_nocache(dst, src, size, 0);
249 unsigned long
250 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
252 #endif /* _ASM_X86_UACCESS_64_H */