5 * User space memory access functions
9 #include <linux/errno.h>
10 #include <linux/compiler.h>
11 #include <linux/string.h>
12 #include <linux/thread_info.h>
14 #include <asm/spitfire.h>
15 #include <asm-generic/uaccess-unaligned.h>
20 #include <asm/processor.h>
23 * Sparc64 is segmented, though more like the M68K than the I386.
24 * We use the secondary ASI to address user memory, which references a
25 * completely different VM map, thus there is zero chance of the user
26 * doing something queer and tricking us into poking kernel memory.
28 * What is left here is basically what is needed for the other parts of
29 * the kernel that expect to be able to manipulate, erum, "segments".
30 * Or perhaps more properly, permissions.
32 * "For historical reasons, these macros are grossly misnamed." -Linus
35 #define KERNEL_DS ((mm_segment_t) { ASI_P })
36 #define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */
39 #define VERIFY_WRITE 1
41 #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
42 #define get_ds() (KERNEL_DS)
44 #define segment_eq(a, b) ((a).seg == (b).seg)
48 current_thread_info()->current_ds = (val).seg; \
49 __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
53 * Test whether a block of memory is a valid user space address.
54 * Returns 0 if the range is valid, nonzero otherwise.
56 static inline bool __chk_range_not_ok(unsigned long addr
, unsigned long size
, unsigned long limit
)
58 if (__builtin_constant_p(size
))
59 return addr
> limit
- size
;
68 #define __range_not_ok(addr, size, limit) \
70 __chk_user_ptr(addr); \
71 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
74 static inline int __access_ok(const void __user
* addr
, unsigned long size
)
79 static inline int access_ok(int type
, const void __user
* addr
, unsigned long size
)
85 * The exception table consists of pairs of addresses: the first is the
86 * address of an instruction that is allowed to fault, and the second is
87 * the address at which the program should continue. No registers are
88 * modified, so it is entirely up to the continuation code to figure out
91 * All the routines below use bits of fixup code that are out of line
92 * with the main instruction path. This means when everything is well,
93 * we don't even have to jump over them. Further, they do not intrude
94 * on our cache or tlb entries.
97 struct exception_table_entry
{
98 unsigned int insn
, fixup
;
101 void __ret_efault(void);
102 void __retl_efault(void);
104 /* Uh, these should become the main single-value transfer routines..
105 * They automatically use the right size if we just have the right
108 * This gets kind of ugly. We want to return _two_ values in "get_user()"
109 * and yet we don't want to do any pointers, because that is too much
110 * of a performance impact. Thus we have a few rather ugly macros here,
111 * and hide all the ugliness from the user.
113 #define put_user(x, ptr) ({ \
114 unsigned long __pu_addr = (unsigned long)(ptr); \
115 __chk_user_ptr(ptr); \
116 __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
119 #define get_user(x, ptr) ({ \
120 unsigned long __gu_addr = (unsigned long)(ptr); \
121 __chk_user_ptr(ptr); \
122 __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
125 #define __put_user(x, ptr) put_user(x, ptr)
126 #define __get_user(x, ptr) get_user(x, ptr)
128 struct __large_struct
{ unsigned long buf
[100]; };
129 #define __m(x) ((struct __large_struct *)(x))
131 #define __put_user_nocheck(data, addr, size) ({ \
132 register int __pu_ret; \
134 case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
135 case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
136 case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
137 case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
138 default: __pu_ret = __put_user_bad(); break; \
143 #define __put_user_asm(x, size, addr, ret) \
144 __asm__ __volatile__( \
145 "/* Put user asm, inline. */\n" \
146 "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
149 ".section .fixup,#alloc,#execinstr\n\t" \
152 "sethi %%hi(2b), %0\n\t" \
153 "jmpl %0 + %%lo(2b), %%g0\n\t" \
154 " mov %3, %0\n\n\t" \
156 ".section __ex_table,\"a\"\n\t" \
160 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
163 int __put_user_bad(void);
165 #define __get_user_nocheck(data, addr, size, type) ({ \
166 register int __gu_ret; \
167 register unsigned long __gu_val; \
169 case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
170 case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
171 case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
172 case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \
175 __gu_ret = __get_user_bad(); \
178 data = (__force type) __gu_val; \
182 #define __get_user_asm(x, size, addr, ret) \
183 __asm__ __volatile__( \
184 "/* Get user asm, inline. */\n" \
185 "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
188 ".section .fixup,#alloc,#execinstr\n\t" \
191 "sethi %%hi(2b), %0\n\t" \
193 "jmpl %0 + %%lo(2b), %%g0\n\t" \
194 " mov %3, %0\n\n\t" \
196 ".section __ex_table,\"a\"\n\t" \
198 ".word 1b, 3b\n\n\t" \
200 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
203 int __get_user_bad(void);
205 unsigned long __must_check
___copy_from_user(void *to
,
206 const void __user
*from
,
208 unsigned long copy_from_user_fixup(void *to
, const void __user
*from
,
210 static inline unsigned long __must_check
211 copy_from_user(void *to
, const void __user
*from
, unsigned long size
)
215 check_object_size(to
, size
, false);
217 ret
= ___copy_from_user(to
, from
, size
);
219 ret
= copy_from_user_fixup(to
, from
, size
);
223 #define __copy_from_user copy_from_user
225 unsigned long __must_check
___copy_to_user(void __user
*to
,
228 unsigned long copy_to_user_fixup(void __user
*to
, const void *from
,
230 static inline unsigned long __must_check
231 copy_to_user(void __user
*to
, const void *from
, unsigned long size
)
235 check_object_size(from
, size
, true);
237 ret
= ___copy_to_user(to
, from
, size
);
239 ret
= copy_to_user_fixup(to
, from
, size
);
242 #define __copy_to_user copy_to_user
244 unsigned long __must_check
___copy_in_user(void __user
*to
,
245 const void __user
*from
,
247 unsigned long copy_in_user_fixup(void __user
*to
, void __user
*from
,
249 static inline unsigned long __must_check
250 copy_in_user(void __user
*to
, void __user
*from
, unsigned long size
)
252 unsigned long ret
= ___copy_in_user(to
, from
, size
);
255 ret
= copy_in_user_fixup(to
, from
, size
);
258 #define __copy_in_user copy_in_user
260 unsigned long __must_check
__clear_user(void __user
*, unsigned long);
262 #define clear_user __clear_user
264 __must_check
long strlen_user(const char __user
*str
);
265 __must_check
long strnlen_user(const char __user
*str
, long n
);
267 #define __copy_to_user_inatomic __copy_to_user
268 #define __copy_from_user_inatomic __copy_from_user
271 unsigned long compute_effective_address(struct pt_regs
*,
275 #endif /* __ASSEMBLY__ */
277 #endif /* _ASM_UACCESS_H */