jbd: Journal block numbers can ever be only 32-bit use unsigned int for them
[linux/fpc-iii.git] / arch / sparc / include / asm / uaccess_64.h
blob9ea271e19c70a9be95c4d40f3da34faea0b6c4a9
1 #ifndef _ASM_UACCESS_H
2 #define _ASM_UACCESS_H
4 /*
5 * User space memory access functions
6 */
8 #ifdef __KERNEL__
9 #include <linux/compiler.h>
10 #include <linux/string.h>
11 #include <linux/thread_info.h>
12 #include <asm/asi.h>
13 #include <asm/system.h>
14 #include <asm/spitfire.h>
15 #include <asm-generic/uaccess-unaligned.h>
16 #endif
18 #ifndef __ASSEMBLY__
21 * Sparc64 is segmented, though more like the M68K than the I386.
22 * We use the secondary ASI to address user memory, which references a
23 * completely different VM map, thus there is zero chance of the user
24 * doing something queer and tricking us into poking kernel memory.
26 * What is left here is basically what is needed for the other parts of
27 * the kernel that expect to be able to manipulate, erum, "segments".
28 * Or perhaps more properly, permissions.
30 * "For historical reasons, these macros are grossly misnamed." -Linus
33 #define KERNEL_DS ((mm_segment_t) { ASI_P })
34 #define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */
36 #define VERIFY_READ 0
37 #define VERIFY_WRITE 1
39 #define get_fs() ((mm_segment_t) { get_thread_current_ds() })
40 #define get_ds() (KERNEL_DS)
42 #define segment_eq(a,b) ((a).seg == (b).seg)
44 #define set_fs(val) \
45 do { \
46 set_thread_current_ds((val).seg); \
47 __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
48 } while(0)
50 static inline int __access_ok(const void __user * addr, unsigned long size)
52 return 1;
55 static inline int access_ok(int type, const void __user * addr, unsigned long size)
57 return 1;
61 * The exception table consists of pairs of addresses: the first is the
62 * address of an instruction that is allowed to fault, and the second is
63 * the address at which the program should continue. No registers are
64 * modified, so it is entirely up to the continuation code to figure out
65 * what to do.
67 * All the routines below use bits of fixup code that are out of line
68 * with the main instruction path. This means when everything is well,
69 * we don't even have to jump over them. Further, they do not intrude
70 * on our cache or tlb entries.
73 struct exception_table_entry {
74 unsigned int insn, fixup;
77 extern void __ret_efault(void);
78 extern void __retl_efault(void);
80 /* Uh, these should become the main single-value transfer routines..
81 * They automatically use the right size if we just have the right
82 * pointer type..
84 * This gets kind of ugly. We want to return _two_ values in "get_user()"
85 * and yet we don't want to do any pointers, because that is too much
86 * of a performance impact. Thus we have a few rather ugly macros here,
87 * and hide all the ugliness from the user.
89 #define put_user(x,ptr) ({ \
90 unsigned long __pu_addr = (unsigned long)(ptr); \
91 __chk_user_ptr(ptr); \
92 __put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
94 #define get_user(x,ptr) ({ \
95 unsigned long __gu_addr = (unsigned long)(ptr); \
96 __chk_user_ptr(ptr); \
97 __get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
99 #define __put_user(x,ptr) put_user(x,ptr)
100 #define __get_user(x,ptr) get_user(x,ptr)
102 struct __large_struct { unsigned long buf[100]; };
103 #define __m(x) ((struct __large_struct *)(x))
105 #define __put_user_nocheck(data,addr,size) ({ \
106 register int __pu_ret; \
107 switch (size) { \
108 case 1: __put_user_asm(data,b,addr,__pu_ret); break; \
109 case 2: __put_user_asm(data,h,addr,__pu_ret); break; \
110 case 4: __put_user_asm(data,w,addr,__pu_ret); break; \
111 case 8: __put_user_asm(data,x,addr,__pu_ret); break; \
112 default: __pu_ret = __put_user_bad(); break; \
113 } __pu_ret; })
115 #define __put_user_asm(x,size,addr,ret) \
116 __asm__ __volatile__( \
117 "/* Put user asm, inline. */\n" \
118 "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
119 "clr %0\n" \
120 "2:\n\n\t" \
121 ".section .fixup,#alloc,#execinstr\n\t" \
122 ".align 4\n" \
123 "3:\n\t" \
124 "sethi %%hi(2b), %0\n\t" \
125 "jmpl %0 + %%lo(2b), %%g0\n\t" \
126 " mov %3, %0\n\n\t" \
127 ".previous\n\t" \
128 ".section __ex_table,\"a\"\n\t" \
129 ".align 4\n\t" \
130 ".word 1b, 3b\n\t" \
131 ".previous\n\n\t" \
132 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
133 "i" (-EFAULT))
135 extern int __put_user_bad(void);
137 #define __get_user_nocheck(data,addr,size,type) ({ \
138 register int __gu_ret; \
139 register unsigned long __gu_val; \
140 switch (size) { \
141 case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
142 case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
143 case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \
144 case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \
145 default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
146 } data = (type) __gu_val; __gu_ret; })
148 #define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \
149 register unsigned long __gu_val __asm__ ("l1"); \
150 switch (size) { \
151 case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
152 case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
153 case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \
154 case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \
155 default: if (__get_user_bad()) return retval; \
156 } data = (type) __gu_val; })
158 #define __get_user_asm(x,size,addr,ret) \
159 __asm__ __volatile__( \
160 "/* Get user asm, inline. */\n" \
161 "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
162 "clr %0\n" \
163 "2:\n\n\t" \
164 ".section .fixup,#alloc,#execinstr\n\t" \
165 ".align 4\n" \
166 "3:\n\t" \
167 "sethi %%hi(2b), %0\n\t" \
168 "clr %1\n\t" \
169 "jmpl %0 + %%lo(2b), %%g0\n\t" \
170 " mov %3, %0\n\n\t" \
171 ".previous\n\t" \
172 ".section __ex_table,\"a\"\n\t" \
173 ".align 4\n\t" \
174 ".word 1b, 3b\n\n\t" \
175 ".previous\n\t" \
176 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
177 "i" (-EFAULT))
179 #define __get_user_asm_ret(x,size,addr,retval) \
180 if (__builtin_constant_p(retval) && retval == -EFAULT) \
181 __asm__ __volatile__( \
182 "/* Get user asm ret, inline. */\n" \
183 "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
184 ".section __ex_table,\"a\"\n\t" \
185 ".align 4\n\t" \
186 ".word 1b,__ret_efault\n\n\t" \
187 ".previous\n\t" \
188 : "=r" (x) : "r" (__m(addr))); \
189 else \
190 __asm__ __volatile__( \
191 "/* Get user asm ret, inline. */\n" \
192 "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
193 ".section .fixup,#alloc,#execinstr\n\t" \
194 ".align 4\n" \
195 "3:\n\t" \
196 "ret\n\t" \
197 " restore %%g0, %2, %%o0\n\n\t" \
198 ".previous\n\t" \
199 ".section __ex_table,\"a\"\n\t" \
200 ".align 4\n\t" \
201 ".word 1b, 3b\n\n\t" \
202 ".previous\n\t" \
203 : "=r" (x) : "r" (__m(addr)), "i" (retval))
205 extern int __get_user_bad(void);
207 extern unsigned long __must_check ___copy_from_user(void *to,
208 const void __user *from,
209 unsigned long size);
210 extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
211 unsigned long size);
212 static inline unsigned long __must_check
213 copy_from_user(void *to, const void __user *from, unsigned long size)
215 unsigned long ret = ___copy_from_user(to, from, size);
217 if (unlikely(ret))
218 ret = copy_from_user_fixup(to, from, size);
219 return ret;
221 #define __copy_from_user copy_from_user
223 extern unsigned long __must_check ___copy_to_user(void __user *to,
224 const void *from,
225 unsigned long size);
226 extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
227 unsigned long size);
228 static inline unsigned long __must_check
229 copy_to_user(void __user *to, const void *from, unsigned long size)
231 unsigned long ret = ___copy_to_user(to, from, size);
233 if (unlikely(ret))
234 ret = copy_to_user_fixup(to, from, size);
235 return ret;
237 #define __copy_to_user copy_to_user
239 extern unsigned long __must_check ___copy_in_user(void __user *to,
240 const void __user *from,
241 unsigned long size);
242 extern unsigned long copy_in_user_fixup(void __user *to, void __user *from,
243 unsigned long size);
244 static inline unsigned long __must_check
245 copy_in_user(void __user *to, void __user *from, unsigned long size)
247 unsigned long ret = ___copy_in_user(to, from, size);
249 if (unlikely(ret))
250 ret = copy_in_user_fixup(to, from, size);
251 return ret;
253 #define __copy_in_user copy_in_user
255 extern unsigned long __must_check __clear_user(void __user *, unsigned long);
257 #define clear_user __clear_user
259 extern long __must_check __strncpy_from_user(char *dest, const char __user *src, long count);
261 #define strncpy_from_user __strncpy_from_user
263 extern long __strlen_user(const char __user *);
264 extern long __strnlen_user(const char __user *, long len);
266 #define strlen_user __strlen_user
267 #define strnlen_user __strnlen_user
268 #define __copy_to_user_inatomic ___copy_to_user
269 #define __copy_from_user_inatomic ___copy_from_user
271 #endif /* __ASSEMBLY__ */
273 #endif /* _ASM_UACCESS_H */