irqchip: Fix dependencies for archs w/o HAS_IOMEM
[linux/fpc-iii.git] / arch / sparc / include / asm / uaccess_64.h
blobea6e9a20f3ffb5a80156b1766ac2112c3c0d2bdc
1 #ifndef _ASM_UACCESS_H
2 #define _ASM_UACCESS_H
4 /*
5 * User space memory access functions
6 */
8 #ifdef __KERNEL__
9 #include <linux/errno.h>
10 #include <linux/compiler.h>
11 #include <linux/string.h>
12 #include <linux/thread_info.h>
13 #include <asm/asi.h>
14 #include <asm/spitfire.h>
15 #include <asm-generic/uaccess-unaligned.h>
16 #endif
18 #ifndef __ASSEMBLY__
20 #include <asm/processor.h>
23 * Sparc64 is segmented, though more like the M68K than the I386.
24 * We use the secondary ASI to address user memory, which references a
25 * completely different VM map, thus there is zero chance of the user
26 * doing something queer and tricking us into poking kernel memory.
28 * What is left here is basically what is needed for the other parts of
29 * the kernel that expect to be able to manipulate, erum, "segments".
30 * Or perhaps more properly, permissions.
32 * "For historical reasons, these macros are grossly misnamed." -Linus
35 #define KERNEL_DS ((mm_segment_t) { ASI_P })
36 #define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */
38 #define VERIFY_READ 0
39 #define VERIFY_WRITE 1
41 #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
42 #define get_ds() (KERNEL_DS)
44 #define segment_eq(a, b) ((a).seg == (b).seg)
46 #define set_fs(val) \
47 do { \
48 current_thread_info()->current_ds = (val).seg; \
49 __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
50 } while(0)
53 * Test whether a block of memory is a valid user space address.
54 * Returns 0 if the range is valid, nonzero otherwise.
56 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
58 if (__builtin_constant_p(size))
59 return addr > limit - size;
61 addr += size;
62 if (addr < size)
63 return true;
65 return addr > limit;
68 #define __range_not_ok(addr, size, limit) \
69 ({ \
70 __chk_user_ptr(addr); \
71 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
74 static inline int __access_ok(const void __user * addr, unsigned long size)
76 return 1;
79 static inline int access_ok(int type, const void __user * addr, unsigned long size)
81 return 1;
85 * The exception table consists of pairs of addresses: the first is the
86 * address of an instruction that is allowed to fault, and the second is
87 * the address at which the program should continue. No registers are
88 * modified, so it is entirely up to the continuation code to figure out
89 * what to do.
91 * All the routines below use bits of fixup code that are out of line
92 * with the main instruction path. This means when everything is well,
93 * we don't even have to jump over them. Further, they do not intrude
94 * on our cache or tlb entries.
97 struct exception_table_entry {
98 unsigned int insn, fixup;
101 void __ret_efault(void);
102 void __retl_efault(void);
104 /* Uh, these should become the main single-value transfer routines..
105 * They automatically use the right size if we just have the right
106 * pointer type..
108 * This gets kind of ugly. We want to return _two_ values in "get_user()"
109 * and yet we don't want to do any pointers, because that is too much
110 * of a performance impact. Thus we have a few rather ugly macros here,
111 * and hide all the ugliness from the user.
113 #define put_user(x, ptr) ({ \
114 unsigned long __pu_addr = (unsigned long)(ptr); \
115 __chk_user_ptr(ptr); \
116 __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
119 #define get_user(x, ptr) ({ \
120 unsigned long __gu_addr = (unsigned long)(ptr); \
121 __chk_user_ptr(ptr); \
122 __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
125 #define __put_user(x, ptr) put_user(x, ptr)
126 #define __get_user(x, ptr) get_user(x, ptr)
128 struct __large_struct { unsigned long buf[100]; };
129 #define __m(x) ((struct __large_struct *)(x))
131 #define __put_user_nocheck(data, addr, size) ({ \
132 register int __pu_ret; \
133 switch (size) { \
134 case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
135 case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
136 case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
137 case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
138 default: __pu_ret = __put_user_bad(); break; \
140 __pu_ret; \
143 #define __put_user_asm(x, size, addr, ret) \
144 __asm__ __volatile__( \
145 "/* Put user asm, inline. */\n" \
146 "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
147 "clr %0\n" \
148 "2:\n\n\t" \
149 ".section .fixup,#alloc,#execinstr\n\t" \
150 ".align 4\n" \
151 "3:\n\t" \
152 "sethi %%hi(2b), %0\n\t" \
153 "jmpl %0 + %%lo(2b), %%g0\n\t" \
154 " mov %3, %0\n\n\t" \
155 ".previous\n\t" \
156 ".section __ex_table,\"a\"\n\t" \
157 ".align 4\n\t" \
158 ".word 1b, 3b\n\t" \
159 ".previous\n\n\t" \
160 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
161 "i" (-EFAULT))
163 int __put_user_bad(void);
165 #define __get_user_nocheck(data, addr, size, type) ({ \
166 register int __gu_ret; \
167 register unsigned long __gu_val; \
168 switch (size) { \
169 case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
170 case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
171 case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
172 case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \
173 default: \
174 __gu_val = 0; \
175 __gu_ret = __get_user_bad(); \
176 break; \
178 data = (__force type) __gu_val; \
179 __gu_ret; \
182 #define __get_user_nocheck_ret(data, addr, size, type, retval) ({ \
183 register unsigned long __gu_val __asm__ ("l1"); \
184 switch (size) { \
185 case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break; \
186 case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break; \
187 case 4: __get_user_asm_ret(__gu_val, uw, addr, retval); break; \
188 case 8: __get_user_asm_ret(__gu_val, x, addr, retval); break; \
189 default: \
190 if (__get_user_bad()) \
191 return retval; \
193 data = (__force type) __gu_val; \
196 #define __get_user_asm(x, size, addr, ret) \
197 __asm__ __volatile__( \
198 "/* Get user asm, inline. */\n" \
199 "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
200 "clr %0\n" \
201 "2:\n\n\t" \
202 ".section .fixup,#alloc,#execinstr\n\t" \
203 ".align 4\n" \
204 "3:\n\t" \
205 "sethi %%hi(2b), %0\n\t" \
206 "clr %1\n\t" \
207 "jmpl %0 + %%lo(2b), %%g0\n\t" \
208 " mov %3, %0\n\n\t" \
209 ".previous\n\t" \
210 ".section __ex_table,\"a\"\n\t" \
211 ".align 4\n\t" \
212 ".word 1b, 3b\n\n\t" \
213 ".previous\n\t" \
214 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
215 "i" (-EFAULT))
217 #define __get_user_asm_ret(x, size, addr, retval) \
218 if (__builtin_constant_p(retval) && retval == -EFAULT) \
219 __asm__ __volatile__( \
220 "/* Get user asm ret, inline. */\n" \
221 "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
222 ".section __ex_table,\"a\"\n\t" \
223 ".align 4\n\t" \
224 ".word 1b,__ret_efault\n\n\t" \
225 ".previous\n\t" \
226 : "=r" (x) : "r" (__m(addr))); \
227 else \
228 __asm__ __volatile__( \
229 "/* Get user asm ret, inline. */\n" \
230 "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
231 ".section .fixup,#alloc,#execinstr\n\t" \
232 ".align 4\n" \
233 "3:\n\t" \
234 "ret\n\t" \
235 " restore %%g0, %2, %%o0\n\n\t" \
236 ".previous\n\t" \
237 ".section __ex_table,\"a\"\n\t" \
238 ".align 4\n\t" \
239 ".word 1b, 3b\n\n\t" \
240 ".previous\n\t" \
241 : "=r" (x) : "r" (__m(addr)), "i" (retval))
243 int __get_user_bad(void);
245 unsigned long __must_check ___copy_from_user(void *to,
246 const void __user *from,
247 unsigned long size);
248 unsigned long copy_from_user_fixup(void *to, const void __user *from,
249 unsigned long size);
250 static inline unsigned long __must_check
251 copy_from_user(void *to, const void __user *from, unsigned long size)
253 unsigned long ret = ___copy_from_user(to, from, size);
255 if (unlikely(ret))
256 ret = copy_from_user_fixup(to, from, size);
258 return ret;
260 #define __copy_from_user copy_from_user
262 unsigned long __must_check ___copy_to_user(void __user *to,
263 const void *from,
264 unsigned long size);
265 unsigned long copy_to_user_fixup(void __user *to, const void *from,
266 unsigned long size);
267 static inline unsigned long __must_check
268 copy_to_user(void __user *to, const void *from, unsigned long size)
270 unsigned long ret = ___copy_to_user(to, from, size);
272 if (unlikely(ret))
273 ret = copy_to_user_fixup(to, from, size);
274 return ret;
276 #define __copy_to_user copy_to_user
278 unsigned long __must_check ___copy_in_user(void __user *to,
279 const void __user *from,
280 unsigned long size);
281 unsigned long copy_in_user_fixup(void __user *to, void __user *from,
282 unsigned long size);
283 static inline unsigned long __must_check
284 copy_in_user(void __user *to, void __user *from, unsigned long size)
286 unsigned long ret = ___copy_in_user(to, from, size);
288 if (unlikely(ret))
289 ret = copy_in_user_fixup(to, from, size);
290 return ret;
292 #define __copy_in_user copy_in_user
294 unsigned long __must_check __clear_user(void __user *, unsigned long);
296 #define clear_user __clear_user
298 __must_check long strlen_user(const char __user *str);
299 __must_check long strnlen_user(const char __user *str, long n);
301 #define __copy_to_user_inatomic __copy_to_user
302 #define __copy_from_user_inatomic __copy_from_user
304 struct pt_regs;
305 unsigned long compute_effective_address(struct pt_regs *,
306 unsigned int insn,
307 unsigned int rd);
309 #endif /* __ASSEMBLY__ */
311 #endif /* _ASM_UACCESS_H */