Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / arch / sparc / include / asm / uaccess_64.h
blob3e1449f077981efe818301bc9bef530969f65719
1 #ifndef _ASM_UACCESS_H
2 #define _ASM_UACCESS_H
4 /*
5 * User space memory access functions
6 */
8 #ifdef __KERNEL__
9 #include <linux/errno.h>
10 #include <linux/compiler.h>
11 #include <linux/string.h>
12 #include <linux/thread_info.h>
13 #include <asm/asi.h>
14 #include <asm/system.h>
15 #include <asm/spitfire.h>
16 #include <asm-generic/uaccess-unaligned.h>
17 #endif
19 #ifndef __ASSEMBLY__
22 * Sparc64 is segmented, though more like the M68K than the I386.
23 * We use the secondary ASI to address user memory, which references a
24 * completely different VM map, thus there is zero chance of the user
25 * doing something queer and tricking us into poking kernel memory.
27 * What is left here is basically what is needed for the other parts of
28 * the kernel that expect to be able to manipulate, erum, "segments".
29 * Or perhaps more properly, permissions.
31 * "For historical reasons, these macros are grossly misnamed." -Linus
34 #define KERNEL_DS ((mm_segment_t) { ASI_P })
35 #define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */
37 #define VERIFY_READ 0
38 #define VERIFY_WRITE 1
40 #define get_fs() ((mm_segment_t) { get_thread_current_ds() })
41 #define get_ds() (KERNEL_DS)
43 #define segment_eq(a,b) ((a).seg == (b).seg)
45 #define set_fs(val) \
46 do { \
47 set_thread_current_ds((val).seg); \
48 __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
49 } while(0)
51 static inline int __access_ok(const void __user * addr, unsigned long size)
53 return 1;
56 static inline int access_ok(int type, const void __user * addr, unsigned long size)
58 return 1;
62 * The exception table consists of pairs of addresses: the first is the
63 * address of an instruction that is allowed to fault, and the second is
64 * the address at which the program should continue. No registers are
65 * modified, so it is entirely up to the continuation code to figure out
66 * what to do.
68 * All the routines below use bits of fixup code that are out of line
69 * with the main instruction path. This means when everything is well,
70 * we don't even have to jump over them. Further, they do not intrude
71 * on our cache or tlb entries.
74 struct exception_table_entry {
75 unsigned int insn, fixup;
78 extern void __ret_efault(void);
79 extern void __retl_efault(void);
81 /* Uh, these should become the main single-value transfer routines..
82 * They automatically use the right size if we just have the right
83 * pointer type..
85 * This gets kind of ugly. We want to return _two_ values in "get_user()"
86 * and yet we don't want to do any pointers, because that is too much
87 * of a performance impact. Thus we have a few rather ugly macros here,
88 * and hide all the ugliness from the user.
90 #define put_user(x,ptr) ({ \
91 unsigned long __pu_addr = (unsigned long)(ptr); \
92 __chk_user_ptr(ptr); \
93 __put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
95 #define get_user(x,ptr) ({ \
96 unsigned long __gu_addr = (unsigned long)(ptr); \
97 __chk_user_ptr(ptr); \
98 __get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
100 #define __put_user(x,ptr) put_user(x,ptr)
101 #define __get_user(x,ptr) get_user(x,ptr)
103 struct __large_struct { unsigned long buf[100]; };
104 #define __m(x) ((struct __large_struct *)(x))
106 #define __put_user_nocheck(data,addr,size) ({ \
107 register int __pu_ret; \
108 switch (size) { \
109 case 1: __put_user_asm(data,b,addr,__pu_ret); break; \
110 case 2: __put_user_asm(data,h,addr,__pu_ret); break; \
111 case 4: __put_user_asm(data,w,addr,__pu_ret); break; \
112 case 8: __put_user_asm(data,x,addr,__pu_ret); break; \
113 default: __pu_ret = __put_user_bad(); break; \
114 } __pu_ret; })
116 #define __put_user_asm(x,size,addr,ret) \
117 __asm__ __volatile__( \
118 "/* Put user asm, inline. */\n" \
119 "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
120 "clr %0\n" \
121 "2:\n\n\t" \
122 ".section .fixup,#alloc,#execinstr\n\t" \
123 ".align 4\n" \
124 "3:\n\t" \
125 "sethi %%hi(2b), %0\n\t" \
126 "jmpl %0 + %%lo(2b), %%g0\n\t" \
127 " mov %3, %0\n\n\t" \
128 ".previous\n\t" \
129 ".section __ex_table,\"a\"\n\t" \
130 ".align 4\n\t" \
131 ".word 1b, 3b\n\t" \
132 ".previous\n\n\t" \
133 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
134 "i" (-EFAULT))
136 extern int __put_user_bad(void);
138 #define __get_user_nocheck(data,addr,size,type) ({ \
139 register int __gu_ret; \
140 register unsigned long __gu_val; \
141 switch (size) { \
142 case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
143 case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
144 case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \
145 case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \
146 default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
147 } data = (type) __gu_val; __gu_ret; })
149 #define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \
150 register unsigned long __gu_val __asm__ ("l1"); \
151 switch (size) { \
152 case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
153 case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
154 case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \
155 case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \
156 default: if (__get_user_bad()) return retval; \
157 } data = (type) __gu_val; })
159 #define __get_user_asm(x,size,addr,ret) \
160 __asm__ __volatile__( \
161 "/* Get user asm, inline. */\n" \
162 "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
163 "clr %0\n" \
164 "2:\n\n\t" \
165 ".section .fixup,#alloc,#execinstr\n\t" \
166 ".align 4\n" \
167 "3:\n\t" \
168 "sethi %%hi(2b), %0\n\t" \
169 "clr %1\n\t" \
170 "jmpl %0 + %%lo(2b), %%g0\n\t" \
171 " mov %3, %0\n\n\t" \
172 ".previous\n\t" \
173 ".section __ex_table,\"a\"\n\t" \
174 ".align 4\n\t" \
175 ".word 1b, 3b\n\n\t" \
176 ".previous\n\t" \
177 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
178 "i" (-EFAULT))
180 #define __get_user_asm_ret(x,size,addr,retval) \
181 if (__builtin_constant_p(retval) && retval == -EFAULT) \
182 __asm__ __volatile__( \
183 "/* Get user asm ret, inline. */\n" \
184 "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
185 ".section __ex_table,\"a\"\n\t" \
186 ".align 4\n\t" \
187 ".word 1b,__ret_efault\n\n\t" \
188 ".previous\n\t" \
189 : "=r" (x) : "r" (__m(addr))); \
190 else \
191 __asm__ __volatile__( \
192 "/* Get user asm ret, inline. */\n" \
193 "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
194 ".section .fixup,#alloc,#execinstr\n\t" \
195 ".align 4\n" \
196 "3:\n\t" \
197 "ret\n\t" \
198 " restore %%g0, %2, %%o0\n\n\t" \
199 ".previous\n\t" \
200 ".section __ex_table,\"a\"\n\t" \
201 ".align 4\n\t" \
202 ".word 1b, 3b\n\n\t" \
203 ".previous\n\t" \
204 : "=r" (x) : "r" (__m(addr)), "i" (retval))
206 extern int __get_user_bad(void);
208 extern unsigned long __must_check ___copy_from_user(void *to,
209 const void __user *from,
210 unsigned long size);
211 extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
212 unsigned long size);
213 static inline unsigned long __must_check
214 copy_from_user(void *to, const void __user *from, unsigned long size)
216 unsigned long ret = ___copy_from_user(to, from, size);
218 if (unlikely(ret))
219 ret = copy_from_user_fixup(to, from, size);
221 return ret;
223 #define __copy_from_user copy_from_user
225 extern unsigned long __must_check ___copy_to_user(void __user *to,
226 const void *from,
227 unsigned long size);
228 extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
229 unsigned long size);
230 static inline unsigned long __must_check
231 copy_to_user(void __user *to, const void *from, unsigned long size)
233 unsigned long ret = ___copy_to_user(to, from, size);
235 if (unlikely(ret))
236 ret = copy_to_user_fixup(to, from, size);
237 return ret;
239 #define __copy_to_user copy_to_user
241 extern unsigned long __must_check ___copy_in_user(void __user *to,
242 const void __user *from,
243 unsigned long size);
244 extern unsigned long copy_in_user_fixup(void __user *to, void __user *from,
245 unsigned long size);
246 static inline unsigned long __must_check
247 copy_in_user(void __user *to, void __user *from, unsigned long size)
249 unsigned long ret = ___copy_in_user(to, from, size);
251 if (unlikely(ret))
252 ret = copy_in_user_fixup(to, from, size);
253 return ret;
255 #define __copy_in_user copy_in_user
257 extern unsigned long __must_check __clear_user(void __user *, unsigned long);
259 #define clear_user __clear_user
261 extern long __must_check __strncpy_from_user(char *dest, const char __user *src, long count);
263 #define strncpy_from_user __strncpy_from_user
265 extern long __strlen_user(const char __user *);
266 extern long __strnlen_user(const char __user *, long len);
268 #define strlen_user __strlen_user
269 #define strnlen_user __strnlen_user
270 #define __copy_to_user_inatomic ___copy_to_user
271 #define __copy_from_user_inatomic ___copy_from_user
273 #endif /* __ASSEMBLY__ */
275 #endif /* _ASM_UACCESS_H */