mm-only debug patch...
[mmotm.git] / arch / x86 / include / asm / uaccess_64.h
blobce6fec7ce38d403bd986651c19999bdb266f9fd5
1 #ifndef _ASM_X86_UACCESS_64_H
2 #define _ASM_X86_UACCESS_64_H
4 /*
5 * User space memory access functions
6 */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/prefetch.h>
10 #include <linux/lockdep.h>
11 #include <asm/page.h>
14 * Copy To/From Userspace
17 /* Handles exceptions in both to and from, but doesn't do access_ok */
18 __must_check unsigned long
19 copy_user_generic(void *to, const void *from, unsigned len);
21 __must_check unsigned long
22 copy_to_user(void __user *to, const void *from, unsigned len);
23 __must_check unsigned long
24 _copy_from_user(void *to, const void __user *from, unsigned len);
25 __must_check unsigned long
26 copy_in_user(void __user *to, const void __user *from, unsigned len);
28 static inline unsigned long __must_check copy_from_user(void *to,
29 const void __user *from,
30 unsigned long n)
32 int sz = __compiletime_object_size(to);
33 int ret = -EFAULT;
35 if (likely(sz == -1 || sz >= n))
36 ret = _copy_from_user(to, from, n);
37 #ifdef CONFIG_DEBUG_VM
38 else
39 WARN(1, "Buffer overflow detected!\n");
40 #endif
41 return ret;
45 static __always_inline __must_check
46 int __copy_from_user(void *dst, const void __user *src, unsigned size)
48 int ret = 0;
50 might_fault();
51 if (!__builtin_constant_p(size))
52 return copy_user_generic(dst, (__force void *)src, size);
53 switch (size) {
54 case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
55 ret, "b", "b", "=q", 1);
56 return ret;
57 case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
58 ret, "w", "w", "=r", 2);
59 return ret;
60 case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
61 ret, "l", "k", "=r", 4);
62 return ret;
63 case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
64 ret, "q", "", "=r", 8);
65 return ret;
66 case 10:
67 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
68 ret, "q", "", "=r", 10);
69 if (unlikely(ret))
70 return ret;
71 __get_user_asm(*(u16 *)(8 + (char *)dst),
72 (u16 __user *)(8 + (char __user *)src),
73 ret, "w", "w", "=r", 2);
74 return ret;
75 case 16:
76 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
77 ret, "q", "", "=r", 16);
78 if (unlikely(ret))
79 return ret;
80 __get_user_asm(*(u64 *)(8 + (char *)dst),
81 (u64 __user *)(8 + (char __user *)src),
82 ret, "q", "", "=r", 8);
83 return ret;
84 default:
85 return copy_user_generic(dst, (__force void *)src, size);
89 static __always_inline __must_check
90 int __copy_to_user(void __user *dst, const void *src, unsigned size)
92 int ret = 0;
94 might_fault();
95 if (!__builtin_constant_p(size))
96 return copy_user_generic((__force void *)dst, src, size);
97 switch (size) {
98 case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
99 ret, "b", "b", "iq", 1);
100 return ret;
101 case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
102 ret, "w", "w", "ir", 2);
103 return ret;
104 case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
105 ret, "l", "k", "ir", 4);
106 return ret;
107 case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
108 ret, "q", "", "er", 8);
109 return ret;
110 case 10:
111 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
112 ret, "q", "", "er", 10);
113 if (unlikely(ret))
114 return ret;
115 asm("":::"memory");
116 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
117 ret, "w", "w", "ir", 2);
118 return ret;
119 case 16:
120 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
121 ret, "q", "", "er", 16);
122 if (unlikely(ret))
123 return ret;
124 asm("":::"memory");
125 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
126 ret, "q", "", "er", 8);
127 return ret;
128 default:
129 return copy_user_generic((__force void *)dst, src, size);
133 static __always_inline __must_check
134 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
136 int ret = 0;
138 might_fault();
139 if (!__builtin_constant_p(size))
140 return copy_user_generic((__force void *)dst,
141 (__force void *)src, size);
142 switch (size) {
143 case 1: {
144 u8 tmp;
145 __get_user_asm(tmp, (u8 __user *)src,
146 ret, "b", "b", "=q", 1);
147 if (likely(!ret))
148 __put_user_asm(tmp, (u8 __user *)dst,
149 ret, "b", "b", "iq", 1);
150 return ret;
152 case 2: {
153 u16 tmp;
154 __get_user_asm(tmp, (u16 __user *)src,
155 ret, "w", "w", "=r", 2);
156 if (likely(!ret))
157 __put_user_asm(tmp, (u16 __user *)dst,
158 ret, "w", "w", "ir", 2);
159 return ret;
162 case 4: {
163 u32 tmp;
164 __get_user_asm(tmp, (u32 __user *)src,
165 ret, "l", "k", "=r", 4);
166 if (likely(!ret))
167 __put_user_asm(tmp, (u32 __user *)dst,
168 ret, "l", "k", "ir", 4);
169 return ret;
171 case 8: {
172 u64 tmp;
173 __get_user_asm(tmp, (u64 __user *)src,
174 ret, "q", "", "=r", 8);
175 if (likely(!ret))
176 __put_user_asm(tmp, (u64 __user *)dst,
177 ret, "q", "", "er", 8);
178 return ret;
180 default:
181 return copy_user_generic((__force void *)dst,
182 (__force void *)src, size);
186 __must_check long
187 strncpy_from_user(char *dst, const char __user *src, long count);
188 __must_check long
189 __strncpy_from_user(char *dst, const char __user *src, long count);
190 __must_check long strnlen_user(const char __user *str, long n);
191 __must_check long __strnlen_user(const char __user *str, long n);
192 __must_check long strlen_user(const char __user *str);
193 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
194 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
196 __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
197 unsigned size);
199 static __must_check __always_inline int
200 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
202 return copy_user_generic((__force void *)dst, src, size);
205 extern long __copy_user_nocache(void *dst, const void __user *src,
206 unsigned size, int zerorest);
208 static inline int
209 __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
211 might_sleep();
212 return __copy_user_nocache(dst, src, size, 1);
215 static inline int
216 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
217 unsigned size)
219 return __copy_user_nocache(dst, src, size, 0);
222 unsigned long
223 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
225 #endif /* _ASM_X86_UACCESS_64_H */