[SPARC64]: Fix cpu trampoline et al. mismatch warnings.
[linux-2.6/openmoko-kernel/knife-kernel.git] / include / asm-x86 / uaccess_64.h
blobb87eb4ba8f9d6e20ff345eda0fcc78e88a807df5
1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
4 /*
5 * User space memory access functions
6 */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/prefetch.h>
10 #include <asm/page.h>
12 #define VERIFY_READ 0
13 #define VERIFY_WRITE 1
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
20 * For historical reasons, these macros are grossly misnamed.
23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
25 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
26 #define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
28 #define get_ds() (KERNEL_DS)
29 #define get_fs() (current_thread_info()->addr_limit)
30 #define set_fs(x) (current_thread_info()->addr_limit = (x))
32 #define segment_eq(a,b) ((a).seg == (b).seg)
34 #define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg)))
37 * Uhhuh, this needs 65-bit arithmetic. We have a carry..
39 #define __range_not_ok(addr,size) ({ \
40 unsigned long flag,roksum; \
41 __chk_user_ptr(addr); \
42 asm("# range_ok\n\r" \
43 "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
44 :"=&r" (flag), "=r" (roksum) \
45 :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \
46 flag; })
48 #define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0)
51 * The exception table consists of pairs of addresses: the first is the
52 * address of an instruction that is allowed to fault, and the second is
53 * the address at which the program should continue. No registers are
54 * modified, so it is entirely up to the continuation code to figure out
55 * what to do.
57 * All the routines below use bits of fixup code that are out of line
58 * with the main instruction path. This means when everything is well,
59 * we don't even have to jump over them. Further, they do not intrude
60 * on our cache or tlb entries.
63 struct exception_table_entry
65 unsigned long insn, fixup;
68 extern int fixup_exception(struct pt_regs *regs);
70 #define ARCH_HAS_SEARCH_EXTABLE
73 * These are the main single-value transfer routines. They automatically
74 * use the right size if we just have the right pointer type.
76 * This gets kind of ugly. We want to return _two_ values in "get_user()"
77 * and yet we don't want to do any pointers, because that is too much
78 * of a performance impact. Thus we have a few rather ugly macros here,
79 * and hide all the ugliness from the user.
81 * The "__xxx" versions of the user access functions are versions that
82 * do not verify the address space, that must have been done previously
83 * with a separate "access_ok()" call (this is used when we do multiple
84 * accesses to the same area of user memory).
87 #define __get_user_x(size,ret,x,ptr) \
88 asm volatile("call __get_user_" #size \
89 :"=a" (ret),"=d" (x) \
90 :"c" (ptr) \
91 :"r8")
93 /* Careful: we have to cast the result to the type of the pointer for sign reasons */
94 #define get_user(x,ptr) \
95 ({ unsigned long __val_gu; \
96 int __ret_gu; \
97 __chk_user_ptr(ptr); \
98 switch(sizeof (*(ptr))) { \
99 case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
100 case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
101 case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
102 case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \
103 default: __get_user_bad(); break; \
105 (x) = (__force typeof(*(ptr)))__val_gu; \
106 __ret_gu; \
109 extern void __put_user_1(void);
110 extern void __put_user_2(void);
111 extern void __put_user_4(void);
112 extern void __put_user_8(void);
113 extern void __put_user_bad(void);
115 #define __put_user_x(size,ret,x,ptr) \
116 asm volatile("call __put_user_" #size \
117 :"=a" (ret) \
118 :"c" (ptr),"d" (x) \
119 :"r8")
121 #define put_user(x,ptr) \
122 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
124 #define __get_user(x,ptr) \
125 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
126 #define __put_user(x,ptr) \
127 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
129 #define __get_user_unaligned __get_user
130 #define __put_user_unaligned __put_user
132 #define __put_user_nocheck(x,ptr,size) \
133 ({ \
134 int __pu_err; \
135 __put_user_size((x),(ptr),(size),__pu_err); \
136 __pu_err; \
140 #define __put_user_check(x,ptr,size) \
141 ({ \
142 int __pu_err; \
143 typeof(*(ptr)) __user *__pu_addr = (ptr); \
144 switch (size) { \
145 case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \
146 case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \
147 case 4: __put_user_x(4,__pu_err,x,__pu_addr); break; \
148 case 8: __put_user_x(8,__pu_err,x,__pu_addr); break; \
149 default: __put_user_bad(); \
151 __pu_err; \
154 #define __put_user_size(x,ptr,size,retval) \
155 do { \
156 retval = 0; \
157 __chk_user_ptr(ptr); \
158 switch (size) { \
159 case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\
160 case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\
161 case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\
162 case 8: __put_user_asm(x,ptr,retval,"q","","Zr",-EFAULT); break;\
163 default: __put_user_bad(); \
165 } while (0)
167 /* FIXME: this hack is definitely wrong -AK */
168 struct __large_struct { unsigned long buf[100]; };
169 #define __m(x) (*(struct __large_struct __user *)(x))
172 * Tell gcc we read from memory instead of writing: this is because
173 * we do not write to any memory gcc knows about, so there are no
174 * aliasing issues.
176 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
177 asm volatile( \
178 "1: mov"itype" %"rtype"1,%2\n" \
179 "2:\n" \
180 ".section .fixup,\"ax\"\n" \
181 "3: mov %3,%0\n" \
182 " jmp 2b\n" \
183 ".previous\n" \
184 _ASM_EXTABLE(1b,3b) \
185 : "=r"(err) \
186 : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err))
189 #define __get_user_nocheck(x,ptr,size) \
190 ({ \
191 int __gu_err; \
192 unsigned long __gu_val; \
193 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
194 (x) = (__force typeof(*(ptr)))__gu_val; \
195 __gu_err; \
198 extern int __get_user_1(void);
199 extern int __get_user_2(void);
200 extern int __get_user_4(void);
201 extern int __get_user_8(void);
202 extern int __get_user_bad(void);
204 #define __get_user_size(x,ptr,size,retval) \
205 do { \
206 retval = 0; \
207 __chk_user_ptr(ptr); \
208 switch (size) { \
209 case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\
210 case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\
211 case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\
212 case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\
213 default: (x) = __get_user_bad(); \
215 } while (0)
217 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
218 asm volatile( \
219 "1: mov"itype" %2,%"rtype"1\n" \
220 "2:\n" \
221 ".section .fixup,\"ax\"\n" \
222 "3: mov %3,%0\n" \
223 " xor"itype" %"rtype"1,%"rtype"1\n" \
224 " jmp 2b\n" \
225 ".previous\n" \
226 _ASM_EXTABLE(1b,3b) \
227 : "=r"(err), ltype (x) \
228 : "m"(__m(addr)), "i"(errno), "0"(err))
231 * Copy To/From Userspace
234 /* Handles exceptions in both to and from, but doesn't do access_ok */
235 __must_check unsigned long
236 copy_user_generic(void *to, const void *from, unsigned len);
238 __must_check unsigned long
239 copy_to_user(void __user *to, const void *from, unsigned len);
240 __must_check unsigned long
241 copy_from_user(void *to, const void __user *from, unsigned len);
242 __must_check unsigned long
243 copy_in_user(void __user *to, const void __user *from, unsigned len);
245 static __always_inline __must_check
246 int __copy_from_user(void *dst, const void __user *src, unsigned size)
248 int ret = 0;
249 if (!__builtin_constant_p(size))
250 return copy_user_generic(dst,(__force void *)src,size);
251 switch (size) {
252 case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1);
253 return ret;
254 case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2);
255 return ret;
256 case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4);
257 return ret;
258 case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8);
259 return ret;
260 case 10:
261 __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
262 if (unlikely(ret)) return ret;
263 __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2);
264 return ret;
265 case 16:
266 __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
267 if (unlikely(ret)) return ret;
268 __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8);
269 return ret;
270 default:
271 return copy_user_generic(dst,(__force void *)src,size);
275 static __always_inline __must_check
276 int __copy_to_user(void __user *dst, const void *src, unsigned size)
278 int ret = 0;
279 if (!__builtin_constant_p(size))
280 return copy_user_generic((__force void *)dst,src,size);
281 switch (size) {
282 case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1);
283 return ret;
284 case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2);
285 return ret;
286 case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4);
287 return ret;
288 case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8);
289 return ret;
290 case 10:
291 __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10);
292 if (unlikely(ret)) return ret;
293 asm("":::"memory");
294 __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2);
295 return ret;
296 case 16:
297 __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16);
298 if (unlikely(ret)) return ret;
299 asm("":::"memory");
300 __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8);
301 return ret;
302 default:
303 return copy_user_generic((__force void *)dst,src,size);
307 static __always_inline __must_check
308 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
310 int ret = 0;
311 if (!__builtin_constant_p(size))
312 return copy_user_generic((__force void *)dst,(__force void *)src,size);
313 switch (size) {
314 case 1: {
315 u8 tmp;
316 __get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1);
317 if (likely(!ret))
318 __put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1);
319 return ret;
321 case 2: {
322 u16 tmp;
323 __get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2);
324 if (likely(!ret))
325 __put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2);
326 return ret;
329 case 4: {
330 u32 tmp;
331 __get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4);
332 if (likely(!ret))
333 __put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4);
334 return ret;
336 case 8: {
337 u64 tmp;
338 __get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8);
339 if (likely(!ret))
340 __put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8);
341 return ret;
343 default:
344 return copy_user_generic((__force void *)dst,(__force void *)src,size);
348 __must_check long
349 strncpy_from_user(char *dst, const char __user *src, long count);
350 __must_check long
351 __strncpy_from_user(char *dst, const char __user *src, long count);
352 __must_check long strnlen_user(const char __user *str, long n);
353 __must_check long __strnlen_user(const char __user *str, long n);
354 __must_check long strlen_user(const char __user *str);
355 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
356 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
358 __must_check long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size);
360 static __must_check __always_inline int
361 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
363 return copy_user_generic((__force void *)dst, src, size);
366 #define ARCH_HAS_NOCACHE_UACCESS 1
367 extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest);
369 static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
371 might_sleep();
372 return __copy_user_nocache(dst, src, size, 1);
375 static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size)
377 return __copy_user_nocache(dst, src, size, 0);
380 #endif /* __X86_64_UACCESS_H */