1 #ifndef __i386_UACCESS_H
2 #define __i386_UACCESS_H
5 * User space memory access functions
7 #include <linux/errno.h>
8 #include <linux/thread_info.h>
9 #include <linux/prefetch.h>
10 #include <linux/string.h>
15 * movsl can be slow when source and dest are not both 8-byte aligned
17 #ifdef CONFIG_X86_INTEL_USERCOPY
18 extern struct movsl_mask
{
20 } ____cacheline_aligned_in_smp movsl_mask
;
23 extern void __put_user_bad(void);
26 * Strange magic calling convention: pointer in %ecx,
27 * value in %eax(:%edx), return value in %eax, no clobbers.
29 extern void __put_user_1(void);
30 extern void __put_user_2(void);
31 extern void __put_user_4(void);
32 extern void __put_user_8(void);
34 #define __put_user_x(size, x, ptr) \
35 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
36 :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
38 #define __put_user_8(x, ptr) \
39 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
40 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
44 * put_user: - Write a simple value into user space.
45 * @x: Value to copy to user space.
46 * @ptr: Destination address, in user space.
48 * Context: User context only. This function may sleep.
50 * This macro copies a single simple value from kernel space to user
51 * space. It supports simple types like char and int, but not larger
52 * data types like structures or arrays.
54 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
55 * to the result of dereferencing @ptr.
57 * Returns zero on success, or -EFAULT on error.
59 #ifdef CONFIG_X86_WP_WORKS_OK
61 #define put_user(x, ptr) \
64 __typeof__(*(ptr)) __pu_val; \
65 __chk_user_ptr(ptr); \
67 switch (sizeof(*(ptr))) { \
69 __put_user_x(1, __pu_val, ptr); \
72 __put_user_x(2, __pu_val, ptr); \
75 __put_user_x(4, __pu_val, ptr); \
78 __put_user_8(__pu_val, ptr); \
81 __put_user_x(X, __pu_val, ptr); \
88 #define put_user(x, ptr) \
91 __typeof__(*(ptr))__pus_tmp = x; \
93 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
94 sizeof(*(ptr))) != 0)) \
103 * __get_user: - Get a simple variable from user space, with less checking.
104 * @x: Variable to store result.
105 * @ptr: Source address, in user space.
107 * Context: User context only. This function may sleep.
109 * This macro copies a single simple variable from user space to kernel
110 * space. It supports simple types like char and int, but not larger
111 * data types like structures or arrays.
113 * @ptr must have pointer-to-simple-variable type, and the result of
114 * dereferencing @ptr must be assignable to @x without a cast.
116 * Caller must check the pointer with access_ok() before calling this
119 * Returns zero on success, or -EFAULT on error.
120 * On error, the variable @x is set to zero.
122 #define __get_user(x, ptr) \
123 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
127 * __put_user: - Write a simple value into user space, with less checking.
128 * @x: Value to copy to user space.
129 * @ptr: Destination address, in user space.
131 * Context: User context only. This function may sleep.
133 * This macro copies a single simple value from kernel space to user
134 * space. It supports simple types like char and int, but not larger
135 * data types like structures or arrays.
137 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
138 * to the result of dereferencing @ptr.
140 * Caller must check the pointer with access_ok() before calling this
143 * Returns zero on success, or -EFAULT on error.
145 #define __put_user(x, ptr) \
146 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
148 #define __put_user_nocheck(x, ptr, size) \
151 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
156 #define __put_user_u64(x, addr, err) \
157 asm volatile("1: movl %%eax,0(%2)\n" \
158 "2: movl %%edx,4(%2)\n" \
160 ".section .fixup,\"ax\"\n" \
164 _ASM_EXTABLE(1b, 4b) \
165 _ASM_EXTABLE(2b, 4b) \
167 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
169 #ifdef CONFIG_X86_WP_WORKS_OK
171 #define __put_user_size(x, ptr, size, retval, errret) \
174 __chk_user_ptr(ptr); \
177 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
180 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
183 __put_user_asm(x, ptr, retval, "l", "", "ir", errret); \
186 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
195 #define __put_user_size(x, ptr, size, retval, errret) \
197 __typeof__(*(ptr))__pus_tmp = x; \
200 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
205 struct __large_struct
{ unsigned long buf
[100]; };
206 #define __m(x) (*(struct __large_struct __user *)(x))
209 * Tell gcc we read from memory instead of writing: this is because
210 * we do not write to any memory gcc knows about, so there are no
213 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
214 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
216 ".section .fixup,\"ax\"\n" \
220 _ASM_EXTABLE(1b, 3b) \
222 : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
225 #define __get_user_nocheck(x, ptr, size) \
228 unsigned long __gu_val; \
229 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
230 (x) = (__typeof__(*(ptr)))__gu_val; \
234 #define __get_user_size(x, ptr, size, retval, errret) \
237 __chk_user_ptr(ptr); \
240 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
243 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
246 __get_user_asm(x, ptr, retval, "l", "", "=r", errret); \
249 (x) = __get_user_bad(); \
253 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
254 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
256 ".section .fixup,\"ax\"\n" \
258 " xor"itype" %"rtype"1,%"rtype"1\n" \
261 _ASM_EXTABLE(1b, 3b) \
262 : "=r" (err), ltype (x) \
263 : "m" (__m(addr)), "i" (errret), "0" (err))
266 unsigned long __must_check __copy_to_user_ll
267 (void __user
*to
, const void *from
, unsigned long n
);
268 unsigned long __must_check __copy_from_user_ll
269 (void *to
, const void __user
*from
, unsigned long n
);
270 unsigned long __must_check __copy_from_user_ll_nozero
271 (void *to
, const void __user
*from
, unsigned long n
);
272 unsigned long __must_check __copy_from_user_ll_nocache
273 (void *to
, const void __user
*from
, unsigned long n
);
274 unsigned long __must_check __copy_from_user_ll_nocache_nozero
275 (void *to
, const void __user
*from
, unsigned long n
);
278 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
279 * @to: Destination address, in user space.
280 * @from: Source address, in kernel space.
281 * @n: Number of bytes to copy.
283 * Context: User context only.
285 * Copy data from kernel space to user space. Caller must check
286 * the specified block with access_ok() before calling this function.
287 * The caller should also make sure he pins the user space address
288 * so that the we don't result in page fault and sleep.
290 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
291 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
292 * If a store crosses a page boundary and gets a fault, the x86 will not write
293 * anything, so this is accurate.
296 static __always_inline
unsigned long __must_check
297 __copy_to_user_inatomic(void __user
*to
, const void *from
, unsigned long n
)
299 if (__builtin_constant_p(n
)) {
304 __put_user_size(*(u8
*)from
, (u8 __user
*)to
,
308 __put_user_size(*(u16
*)from
, (u16 __user
*)to
,
312 __put_user_size(*(u32
*)from
, (u32 __user
*)to
,
317 return __copy_to_user_ll(to
, from
, n
);
321 * __copy_to_user: - Copy a block of data into user space, with less checking.
322 * @to: Destination address, in user space.
323 * @from: Source address, in kernel space.
324 * @n: Number of bytes to copy.
326 * Context: User context only. This function may sleep.
328 * Copy data from kernel space to user space. Caller must check
329 * the specified block with access_ok() before calling this function.
331 * Returns number of bytes that could not be copied.
332 * On success, this will be zero.
334 static __always_inline
unsigned long __must_check
335 __copy_to_user(void __user
*to
, const void *from
, unsigned long n
)
338 return __copy_to_user_inatomic(to
, from
, n
);
341 static __always_inline
unsigned long
342 __copy_from_user_inatomic(void *to
, const void __user
*from
, unsigned long n
)
344 /* Avoid zeroing the tail if the copy fails..
345 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
346 * but as the zeroing behaviour is only significant when n is not
347 * constant, that shouldn't be a problem.
349 if (__builtin_constant_p(n
)) {
354 __get_user_size(*(u8
*)to
, from
, 1, ret
, 1);
357 __get_user_size(*(u16
*)to
, from
, 2, ret
, 2);
360 __get_user_size(*(u32
*)to
, from
, 4, ret
, 4);
364 return __copy_from_user_ll_nozero(to
, from
, n
);
368 * __copy_from_user: - Copy a block of data from user space, with less checking.
369 * @to: Destination address, in kernel space.
370 * @from: Source address, in user space.
371 * @n: Number of bytes to copy.
373 * Context: User context only. This function may sleep.
375 * Copy data from user space to kernel space. Caller must check
376 * the specified block with access_ok() before calling this function.
378 * Returns number of bytes that could not be copied.
379 * On success, this will be zero.
381 * If some data could not be copied, this function will pad the copied
382 * data to the requested size using zero bytes.
384 * An alternate version - __copy_from_user_inatomic() - may be called from
385 * atomic context and will fail rather than sleep. In this case the
386 * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
387 * for explanation of why this is needed.
389 static __always_inline
unsigned long
390 __copy_from_user(void *to
, const void __user
*from
, unsigned long n
)
393 if (__builtin_constant_p(n
)) {
398 __get_user_size(*(u8
*)to
, from
, 1, ret
, 1);
401 __get_user_size(*(u16
*)to
, from
, 2, ret
, 2);
404 __get_user_size(*(u32
*)to
, from
, 4, ret
, 4);
408 return __copy_from_user_ll(to
, from
, n
);
411 #define ARCH_HAS_NOCACHE_UACCESS
413 static __always_inline
unsigned long __copy_from_user_nocache(void *to
,
414 const void __user
*from
, unsigned long n
)
417 if (__builtin_constant_p(n
)) {
422 __get_user_size(*(u8
*)to
, from
, 1, ret
, 1);
425 __get_user_size(*(u16
*)to
, from
, 2, ret
, 2);
428 __get_user_size(*(u32
*)to
, from
, 4, ret
, 4);
432 return __copy_from_user_ll_nocache(to
, from
, n
);
435 static __always_inline
unsigned long
436 __copy_from_user_inatomic_nocache(void *to
, const void __user
*from
,
439 return __copy_from_user_ll_nocache_nozero(to
, from
, n
);
442 unsigned long __must_check
copy_to_user(void __user
*to
,
443 const void *from
, unsigned long n
);
444 unsigned long __must_check
copy_from_user(void *to
,
445 const void __user
*from
,
447 long __must_check
strncpy_from_user(char *dst
, const char __user
*src
,
449 long __must_check
__strncpy_from_user(char *dst
,
450 const char __user
*src
, long count
);
453 * strlen_user: - Get the size of a string in user space.
454 * @str: The string to measure.
456 * Context: User context only. This function may sleep.
458 * Get the size of a NUL-terminated string in user space.
460 * Returns the size of the string INCLUDING the terminating NUL.
461 * On exception, returns 0.
463 * If there is a limit on the length of a valid string, you may wish to
464 * consider using strnlen_user() instead.
466 #define strlen_user(str) strnlen_user(str, LONG_MAX)
468 long strnlen_user(const char __user
*str
, long n
);
469 unsigned long __must_check
clear_user(void __user
*mem
, unsigned long len
);
470 unsigned long __must_check
__clear_user(void __user
*mem
, unsigned long len
);
472 #endif /* __i386_UACCESS_H */