x86: move __addr_ok to uaccess.h.
[linux-2.6/verdex.git] / include / asm-x86 / uaccess_32.h
blob87b1aede9d4b0049914550db5ef9bee8c0e90ad6
1 #ifndef __i386_UACCESS_H
2 #define __i386_UACCESS_H
4 /*
5 * User space memory access functions
6 */
7 #include <linux/errno.h>
8 #include <linux/thread_info.h>
9 #include <linux/prefetch.h>
10 #include <linux/string.h>
11 #include <asm/asm.h>
12 #include <asm/page.h>
15 * movsl can be slow when source and dest are not both 8-byte aligned
17 #ifdef CONFIG_X86_INTEL_USERCOPY
18 extern struct movsl_mask {
19 int mask;
20 } ____cacheline_aligned_in_smp movsl_mask;
21 #endif
23 extern void __put_user_bad(void);
26 * Strange magic calling convention: pointer in %ecx,
27 * value in %eax(:%edx), return value in %eax, no clobbers.
29 extern void __put_user_1(void);
30 extern void __put_user_2(void);
31 extern void __put_user_4(void);
32 extern void __put_user_8(void);
34 #define __put_user_x(size, x, ptr) \
35 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
36 :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
38 #define __put_user_8(x, ptr) \
39 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
40 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
43 /**
44 * put_user: - Write a simple value into user space.
45 * @x: Value to copy to user space.
46 * @ptr: Destination address, in user space.
48 * Context: User context only. This function may sleep.
50 * This macro copies a single simple value from kernel space to user
51 * space. It supports simple types like char and int, but not larger
52 * data types like structures or arrays.
54 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
55 * to the result of dereferencing @ptr.
57 * Returns zero on success, or -EFAULT on error.
59 #ifdef CONFIG_X86_WP_WORKS_OK
61 #define put_user(x, ptr) \
62 ({ \
63 int __ret_pu; \
64 __typeof__(*(ptr)) __pu_val; \
65 __chk_user_ptr(ptr); \
66 __pu_val = x; \
67 switch (sizeof(*(ptr))) { \
68 case 1: \
69 __put_user_x(1, __pu_val, ptr); \
70 break; \
71 case 2: \
72 __put_user_x(2, __pu_val, ptr); \
73 break; \
74 case 4: \
75 __put_user_x(4, __pu_val, ptr); \
76 break; \
77 case 8: \
78 __put_user_8(__pu_val, ptr); \
79 break; \
80 default: \
81 __put_user_x(X, __pu_val, ptr); \
82 break; \
83 } \
84 __ret_pu; \
87 #else
88 #define put_user(x, ptr) \
89 ({ \
90 int __ret_pu; \
91 __typeof__(*(ptr))__pus_tmp = x; \
92 __ret_pu = 0; \
93 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
94 sizeof(*(ptr))) != 0)) \
95 __ret_pu = -EFAULT; \
96 __ret_pu; \
100 #endif
103 * __get_user: - Get a simple variable from user space, with less checking.
104 * @x: Variable to store result.
105 * @ptr: Source address, in user space.
107 * Context: User context only. This function may sleep.
109 * This macro copies a single simple variable from user space to kernel
110 * space. It supports simple types like char and int, but not larger
111 * data types like structures or arrays.
113 * @ptr must have pointer-to-simple-variable type, and the result of
114 * dereferencing @ptr must be assignable to @x without a cast.
116 * Caller must check the pointer with access_ok() before calling this
117 * function.
119 * Returns zero on success, or -EFAULT on error.
120 * On error, the variable @x is set to zero.
122 #define __get_user(x, ptr) \
123 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
127 * __put_user: - Write a simple value into user space, with less checking.
128 * @x: Value to copy to user space.
129 * @ptr: Destination address, in user space.
131 * Context: User context only. This function may sleep.
133 * This macro copies a single simple value from kernel space to user
134 * space. It supports simple types like char and int, but not larger
135 * data types like structures or arrays.
137 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
138 * to the result of dereferencing @ptr.
140 * Caller must check the pointer with access_ok() before calling this
141 * function.
143 * Returns zero on success, or -EFAULT on error.
145 #define __put_user(x, ptr) \
146 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
148 #define __put_user_nocheck(x, ptr, size) \
149 ({ \
150 long __pu_err; \
151 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
152 __pu_err; \
156 #define __put_user_u64(x, addr, err) \
157 asm volatile("1: movl %%eax,0(%2)\n" \
158 "2: movl %%edx,4(%2)\n" \
159 "3:\n" \
160 ".section .fixup,\"ax\"\n" \
161 "4: movl %3,%0\n" \
162 " jmp 3b\n" \
163 ".previous\n" \
164 _ASM_EXTABLE(1b, 4b) \
165 _ASM_EXTABLE(2b, 4b) \
166 : "=r" (err) \
167 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
169 #ifdef CONFIG_X86_WP_WORKS_OK
171 #define __put_user_size(x, ptr, size, retval, errret) \
172 do { \
173 retval = 0; \
174 __chk_user_ptr(ptr); \
175 switch (size) { \
176 case 1: \
177 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
178 break; \
179 case 2: \
180 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
181 break; \
182 case 4: \
183 __put_user_asm(x, ptr, retval, "l", "", "ir", errret); \
184 break; \
185 case 8: \
186 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
187 break; \
188 default: \
189 __put_user_bad(); \
191 } while (0)
193 #else
195 #define __put_user_size(x, ptr, size, retval, errret) \
196 do { \
197 __typeof__(*(ptr))__pus_tmp = x; \
198 retval = 0; \
200 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
201 retval = errret; \
202 } while (0)
204 #endif
205 struct __large_struct { unsigned long buf[100]; };
206 #define __m(x) (*(struct __large_struct __user *)(x))
209 * Tell gcc we read from memory instead of writing: this is because
210 * we do not write to any memory gcc knows about, so there are no
211 * aliasing issues.
213 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
214 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
215 "2:\n" \
216 ".section .fixup,\"ax\"\n" \
217 "3: movl %3,%0\n" \
218 " jmp 2b\n" \
219 ".previous\n" \
220 _ASM_EXTABLE(1b, 3b) \
221 : "=r"(err) \
222 : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
225 #define __get_user_nocheck(x, ptr, size) \
226 ({ \
227 long __gu_err; \
228 unsigned long __gu_val; \
229 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
230 (x) = (__typeof__(*(ptr)))__gu_val; \
231 __gu_err; \
234 #define __get_user_size(x, ptr, size, retval, errret) \
235 do { \
236 retval = 0; \
237 __chk_user_ptr(ptr); \
238 switch (size) { \
239 case 1: \
240 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
241 break; \
242 case 2: \
243 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
244 break; \
245 case 4: \
246 __get_user_asm(x, ptr, retval, "l", "", "=r", errret); \
247 break; \
248 default: \
249 (x) = __get_user_bad(); \
251 } while (0)
253 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
254 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
255 "2:\n" \
256 ".section .fixup,\"ax\"\n" \
257 "3: movl %3,%0\n" \
258 " xor"itype" %"rtype"1,%"rtype"1\n" \
259 " jmp 2b\n" \
260 ".previous\n" \
261 _ASM_EXTABLE(1b, 3b) \
262 : "=r" (err), ltype (x) \
263 : "m" (__m(addr)), "i" (errret), "0" (err))
266 unsigned long __must_check __copy_to_user_ll
267 (void __user *to, const void *from, unsigned long n);
268 unsigned long __must_check __copy_from_user_ll
269 (void *to, const void __user *from, unsigned long n);
270 unsigned long __must_check __copy_from_user_ll_nozero
271 (void *to, const void __user *from, unsigned long n);
272 unsigned long __must_check __copy_from_user_ll_nocache
273 (void *to, const void __user *from, unsigned long n);
274 unsigned long __must_check __copy_from_user_ll_nocache_nozero
275 (void *to, const void __user *from, unsigned long n);
278 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
279 * @to: Destination address, in user space.
280 * @from: Source address, in kernel space.
281 * @n: Number of bytes to copy.
283 * Context: User context only.
285 * Copy data from kernel space to user space. Caller must check
286 * the specified block with access_ok() before calling this function.
287 * The caller should also make sure he pins the user space address
288 * so that the we don't result in page fault and sleep.
290 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
291 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
292 * If a store crosses a page boundary and gets a fault, the x86 will not write
293 * anything, so this is accurate.
296 static __always_inline unsigned long __must_check
297 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
299 if (__builtin_constant_p(n)) {
300 unsigned long ret;
302 switch (n) {
303 case 1:
304 __put_user_size(*(u8 *)from, (u8 __user *)to,
305 1, ret, 1);
306 return ret;
307 case 2:
308 __put_user_size(*(u16 *)from, (u16 __user *)to,
309 2, ret, 2);
310 return ret;
311 case 4:
312 __put_user_size(*(u32 *)from, (u32 __user *)to,
313 4, ret, 4);
314 return ret;
317 return __copy_to_user_ll(to, from, n);
321 * __copy_to_user: - Copy a block of data into user space, with less checking.
322 * @to: Destination address, in user space.
323 * @from: Source address, in kernel space.
324 * @n: Number of bytes to copy.
326 * Context: User context only. This function may sleep.
328 * Copy data from kernel space to user space. Caller must check
329 * the specified block with access_ok() before calling this function.
331 * Returns number of bytes that could not be copied.
332 * On success, this will be zero.
334 static __always_inline unsigned long __must_check
335 __copy_to_user(void __user *to, const void *from, unsigned long n)
337 might_sleep();
338 return __copy_to_user_inatomic(to, from, n);
341 static __always_inline unsigned long
342 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
344 /* Avoid zeroing the tail if the copy fails..
345 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
346 * but as the zeroing behaviour is only significant when n is not
347 * constant, that shouldn't be a problem.
349 if (__builtin_constant_p(n)) {
350 unsigned long ret;
352 switch (n) {
353 case 1:
354 __get_user_size(*(u8 *)to, from, 1, ret, 1);
355 return ret;
356 case 2:
357 __get_user_size(*(u16 *)to, from, 2, ret, 2);
358 return ret;
359 case 4:
360 __get_user_size(*(u32 *)to, from, 4, ret, 4);
361 return ret;
364 return __copy_from_user_ll_nozero(to, from, n);
368 * __copy_from_user: - Copy a block of data from user space, with less checking.
369 * @to: Destination address, in kernel space.
370 * @from: Source address, in user space.
371 * @n: Number of bytes to copy.
373 * Context: User context only. This function may sleep.
375 * Copy data from user space to kernel space. Caller must check
376 * the specified block with access_ok() before calling this function.
378 * Returns number of bytes that could not be copied.
379 * On success, this will be zero.
381 * If some data could not be copied, this function will pad the copied
382 * data to the requested size using zero bytes.
384 * An alternate version - __copy_from_user_inatomic() - may be called from
385 * atomic context and will fail rather than sleep. In this case the
386 * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
387 * for explanation of why this is needed.
389 static __always_inline unsigned long
390 __copy_from_user(void *to, const void __user *from, unsigned long n)
392 might_sleep();
393 if (__builtin_constant_p(n)) {
394 unsigned long ret;
396 switch (n) {
397 case 1:
398 __get_user_size(*(u8 *)to, from, 1, ret, 1);
399 return ret;
400 case 2:
401 __get_user_size(*(u16 *)to, from, 2, ret, 2);
402 return ret;
403 case 4:
404 __get_user_size(*(u32 *)to, from, 4, ret, 4);
405 return ret;
408 return __copy_from_user_ll(to, from, n);
411 #define ARCH_HAS_NOCACHE_UACCESS
413 static __always_inline unsigned long __copy_from_user_nocache(void *to,
414 const void __user *from, unsigned long n)
416 might_sleep();
417 if (__builtin_constant_p(n)) {
418 unsigned long ret;
420 switch (n) {
421 case 1:
422 __get_user_size(*(u8 *)to, from, 1, ret, 1);
423 return ret;
424 case 2:
425 __get_user_size(*(u16 *)to, from, 2, ret, 2);
426 return ret;
427 case 4:
428 __get_user_size(*(u32 *)to, from, 4, ret, 4);
429 return ret;
432 return __copy_from_user_ll_nocache(to, from, n);
435 static __always_inline unsigned long
436 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
437 unsigned long n)
439 return __copy_from_user_ll_nocache_nozero(to, from, n);
442 unsigned long __must_check copy_to_user(void __user *to,
443 const void *from, unsigned long n);
444 unsigned long __must_check copy_from_user(void *to,
445 const void __user *from,
446 unsigned long n);
447 long __must_check strncpy_from_user(char *dst, const char __user *src,
448 long count);
449 long __must_check __strncpy_from_user(char *dst,
450 const char __user *src, long count);
453 * strlen_user: - Get the size of a string in user space.
454 * @str: The string to measure.
456 * Context: User context only. This function may sleep.
458 * Get the size of a NUL-terminated string in user space.
460 * Returns the size of the string INCLUDING the terminating NUL.
461 * On exception, returns 0.
463 * If there is a limit on the length of a valid string, you may wish to
464 * consider using strnlen_user() instead.
466 #define strlen_user(str) strnlen_user(str, LONG_MAX)
468 long strnlen_user(const char __user *str, long n);
469 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
470 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
472 #endif /* __i386_UACCESS_H */