Linux 2.6.12-rc6
[cris-mirror.git] / include / asm-ppc / uaccess.h
blobb044ae03ac5638b01dff66e5bd150b404effe07e
1 #ifdef __KERNEL__
2 #ifndef _PPC_UACCESS_H
3 #define _PPC_UACCESS_H
5 #ifndef __ASSEMBLY__
6 #include <linux/sched.h>
7 #include <linux/errno.h>
8 #include <asm/processor.h>
10 #define VERIFY_READ 0
11 #define VERIFY_WRITE 1
14 * The fs value determines whether argument validity checking should be
15 * performed or not. If get_fs() == USER_DS, checking is performed, with
16 * get_fs() == KERNEL_DS, checking is bypassed.
18 * For historical reasons, these macros are grossly misnamed.
20 * The fs/ds values are now the highest legal address in the "segment".
21 * This simplifies the checking in the routines below.
24 #define KERNEL_DS ((mm_segment_t) { ~0UL })
25 #define USER_DS ((mm_segment_t) { TASK_SIZE - 1 })
27 #define get_ds() (KERNEL_DS)
28 #define get_fs() (current->thread.fs)
29 #define set_fs(val) (current->thread.fs = (val))
31 #define segment_eq(a,b) ((a).seg == (b).seg)
33 #define __access_ok(addr,size) \
34 ((addr) <= current->thread.fs.seg \
35 && ((size) == 0 || (size) - 1 <= current->thread.fs.seg - (addr)))
37 #define access_ok(type, addr, size) \
38 (__chk_user_ptr(addr),__access_ok((unsigned long)(addr),(size)))
40 /* this function will go away soon - use access_ok() instead */
41 extern inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
43 return access_ok(type, addr, size) ? 0 : -EFAULT;
48 * The exception table consists of pairs of addresses: the first is the
49 * address of an instruction that is allowed to fault, and the second is
50 * the address at which the program should continue. No registers are
51 * modified, so it is entirely up to the continuation code to figure out
52 * what to do.
54 * All the routines below use bits of fixup code that are out of line
55 * with the main instruction path. This means when everything is well,
56 * we don't even have to jump over them. Further, they do not intrude
57 * on our cache or tlb entries.
60 struct exception_table_entry
62 unsigned long insn, fixup;
66 * These are the main single-value transfer routines. They automatically
67 * use the right size if we just have the right pointer type.
69 * This gets kind of ugly. We want to return _two_ values in "get_user()"
70 * and yet we don't want to do any pointers, because that is too much
71 * of a performance impact. Thus we have a few rather ugly macros here,
72 * and hide all the ugliness from the user.
74 * The "__xxx" versions of the user access functions are versions that
75 * do not verify the address space, that must have been done previously
76 * with a separate "access_ok()" call (this is used when we do multiple
77 * accesses to the same area of user memory).
79 * As we use the same address space for kernel and user data on the
80 * PowerPC, we can just do these as direct assignments. (Of course, the
81 * exception handling means that it's no longer "just"...)
83 * The "user64" versions of the user access functions are versions that
84 * allow access of 64-bit data. The "get_user" functions do not
85 * properly handle 64-bit data because the value gets down cast to a long.
86 * The "put_user" functions already handle 64-bit data properly but we add
87 * "user64" versions for completeness
89 #define get_user(x,ptr) \
90 __get_user_check((x),(ptr),sizeof(*(ptr)))
91 #define get_user64(x,ptr) \
92 __get_user64_check((x),(ptr),sizeof(*(ptr)))
93 #define put_user(x,ptr) \
94 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
95 #define put_user64(x,ptr) put_user(x,ptr)
97 #define __get_user(x,ptr) \
98 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
99 #define __get_user64(x,ptr) \
100 __get_user64_nocheck((x),(ptr),sizeof(*(ptr)))
101 #define __put_user(x,ptr) \
102 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
103 #define __put_user64(x,ptr) __put_user(x,ptr)
105 extern long __put_user_bad(void);
107 #define __put_user_nocheck(x,ptr,size) \
108 ({ \
109 long __pu_err; \
110 __chk_user_ptr(ptr); \
111 __put_user_size((x),(ptr),(size),__pu_err); \
112 __pu_err; \
115 #define __put_user_check(x,ptr,size) \
116 ({ \
117 long __pu_err = -EFAULT; \
118 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
119 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
120 __put_user_size((x),__pu_addr,(size),__pu_err); \
121 __pu_err; \
124 #define __put_user_size(x,ptr,size,retval) \
125 do { \
126 retval = 0; \
127 switch (size) { \
128 case 1: \
129 __put_user_asm(x, ptr, retval, "stb"); \
130 break; \
131 case 2: \
132 __put_user_asm(x, ptr, retval, "sth"); \
133 break; \
134 case 4: \
135 __put_user_asm(x, ptr, retval, "stw"); \
136 break; \
137 case 8: \
138 __put_user_asm2(x, ptr, retval); \
139 break; \
140 default: \
141 __put_user_bad(); \
143 } while (0)
146 * We don't tell gcc that we are accessing memory, but this is OK
147 * because we do not write to any memory gcc knows about, so there
148 * are no aliasing issues.
150 #define __put_user_asm(x, addr, err, op) \
151 __asm__ __volatile__( \
152 "1: "op" %1,0(%2)\n" \
153 "2:\n" \
154 ".section .fixup,\"ax\"\n" \
155 "3: li %0,%3\n" \
156 " b 2b\n" \
157 ".previous\n" \
158 ".section __ex_table,\"a\"\n" \
159 " .align 2\n" \
160 " .long 1b,3b\n" \
161 ".previous" \
162 : "=r" (err) \
163 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
165 #define __put_user_asm2(x, addr, err) \
166 __asm__ __volatile__( \
167 "1: stw %1,0(%2)\n" \
168 "2: stw %1+1,4(%2)\n" \
169 "3:\n" \
170 ".section .fixup,\"ax\"\n" \
171 "4: li %0,%3\n" \
172 " b 3b\n" \
173 ".previous\n" \
174 ".section __ex_table,\"a\"\n" \
175 " .align 2\n" \
176 " .long 1b,4b\n" \
177 " .long 2b,4b\n" \
178 ".previous" \
179 : "=r" (err) \
180 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
182 #define __get_user_nocheck(x, ptr, size) \
183 ({ \
184 long __gu_err; \
185 unsigned long __gu_val; \
186 __chk_user_ptr(ptr); \
187 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
188 (x) = (__typeof__(*(ptr)))__gu_val; \
189 __gu_err; \
192 #define __get_user64_nocheck(x, ptr, size) \
193 ({ \
194 long __gu_err; \
195 long long __gu_val; \
196 __chk_user_ptr(ptr); \
197 __get_user_size64(__gu_val, (ptr), (size), __gu_err); \
198 (x) = (__typeof__(*(ptr)))__gu_val; \
199 __gu_err; \
202 #define __get_user_check(x, ptr, size) \
203 ({ \
204 long __gu_err = -EFAULT; \
205 unsigned long __gu_val = 0; \
206 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
207 if (access_ok(VERIFY_READ, __gu_addr, (size))) \
208 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
209 (x) = (__typeof__(*(ptr)))__gu_val; \
210 __gu_err; \
213 #define __get_user64_check(x, ptr, size) \
214 ({ \
215 long __gu_err = -EFAULT; \
216 long long __gu_val = 0; \
217 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
218 if (access_ok(VERIFY_READ, __gu_addr, (size))) \
219 __get_user_size64(__gu_val, __gu_addr, (size), __gu_err); \
220 (x) = (__typeof__(*(ptr)))__gu_val; \
221 __gu_err; \
224 extern long __get_user_bad(void);
226 #define __get_user_size(x, ptr, size, retval) \
227 do { \
228 retval = 0; \
229 switch (size) { \
230 case 1: \
231 __get_user_asm(x, ptr, retval, "lbz"); \
232 break; \
233 case 2: \
234 __get_user_asm(x, ptr, retval, "lhz"); \
235 break; \
236 case 4: \
237 __get_user_asm(x, ptr, retval, "lwz"); \
238 break; \
239 default: \
240 x = __get_user_bad(); \
242 } while (0)
244 #define __get_user_size64(x, ptr, size, retval) \
245 do { \
246 retval = 0; \
247 switch (size) { \
248 case 1: \
249 __get_user_asm(x, ptr, retval, "lbz"); \
250 break; \
251 case 2: \
252 __get_user_asm(x, ptr, retval, "lhz"); \
253 break; \
254 case 4: \
255 __get_user_asm(x, ptr, retval, "lwz"); \
256 break; \
257 case 8: \
258 __get_user_asm2(x, ptr, retval); \
259 break; \
260 default: \
261 x = __get_user_bad(); \
263 } while (0)
265 #define __get_user_asm(x, addr, err, op) \
266 __asm__ __volatile__( \
267 "1: "op" %1,0(%2)\n" \
268 "2:\n" \
269 ".section .fixup,\"ax\"\n" \
270 "3: li %0,%3\n" \
271 " li %1,0\n" \
272 " b 2b\n" \
273 ".previous\n" \
274 ".section __ex_table,\"a\"\n" \
275 " .align 2\n" \
276 " .long 1b,3b\n" \
277 ".previous" \
278 : "=r"(err), "=r"(x) \
279 : "b"(addr), "i"(-EFAULT), "0"(err))
281 #define __get_user_asm2(x, addr, err) \
282 __asm__ __volatile__( \
283 "1: lwz %1,0(%2)\n" \
284 "2: lwz %1+1,4(%2)\n" \
285 "3:\n" \
286 ".section .fixup,\"ax\"\n" \
287 "4: li %0,%3\n" \
288 " li %1,0\n" \
289 " li %1+1,0\n" \
290 " b 3b\n" \
291 ".previous\n" \
292 ".section __ex_table,\"a\"\n" \
293 " .align 2\n" \
294 " .long 1b,4b\n" \
295 " .long 2b,4b\n" \
296 ".previous" \
297 : "=r"(err), "=&r"(x) \
298 : "b"(addr), "i"(-EFAULT), "0"(err))
300 /* more complex routines */
302 extern int __copy_tofrom_user(void __user *to, const void __user *from,
303 unsigned long size);
305 extern inline unsigned long
306 copy_from_user(void *to, const void __user *from, unsigned long n)
308 unsigned long over;
310 if (access_ok(VERIFY_READ, from, n))
311 return __copy_tofrom_user((__force void __user *)to, from, n);
312 if ((unsigned long)from < TASK_SIZE) {
313 over = (unsigned long)from + n - TASK_SIZE;
314 return __copy_tofrom_user((__force void __user *)to, from, n - over) + over;
316 return n;
319 extern inline unsigned long
320 copy_to_user(void __user *to, const void *from, unsigned long n)
322 unsigned long over;
324 if (access_ok(VERIFY_WRITE, to, n))
325 return __copy_tofrom_user(to, (__force void __user *) from, n);
326 if ((unsigned long)to < TASK_SIZE) {
327 over = (unsigned long)to + n - TASK_SIZE;
328 return __copy_tofrom_user(to, (__force void __user *) from, n - over) + over;
330 return n;
333 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long size)
335 return __copy_tofrom_user((__force void __user *)to, from, size);
338 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long size)
340 return __copy_tofrom_user(to, (__force void __user *)from, size);
343 #define __copy_to_user_inatomic __copy_to_user
344 #define __copy_from_user_inatomic __copy_from_user
346 extern unsigned long __clear_user(void __user *addr, unsigned long size);
348 extern inline unsigned long
349 clear_user(void __user *addr, unsigned long size)
351 if (access_ok(VERIFY_WRITE, addr, size))
352 return __clear_user(addr, size);
353 if ((unsigned long)addr < TASK_SIZE) {
354 unsigned long over = (unsigned long)addr + size - TASK_SIZE;
355 return __clear_user(addr, size - over) + over;
357 return size;
360 extern int __strncpy_from_user(char *dst, const char __user *src, long count);
362 extern inline long
363 strncpy_from_user(char *dst, const char __user *src, long count)
365 if (access_ok(VERIFY_READ, src, 1))
366 return __strncpy_from_user(dst, src, count);
367 return -EFAULT;
371 * Return the size of a string (including the ending 0)
373 * Return 0 for error
376 extern int __strnlen_user(const char __user *str, long len, unsigned long top);
379 * Returns the length of the string at str (including the null byte),
380 * or 0 if we hit a page we can't access,
381 * or something > len if we didn't find a null byte.
383 * The `top' parameter to __strnlen_user is to make sure that
384 * we can never overflow from the user area into kernel space.
386 extern __inline__ int strnlen_user(const char __user *str, long len)
388 unsigned long top = current->thread.fs.seg;
390 if ((unsigned long)str > top)
391 return 0;
392 return __strnlen_user(str, len, top);
395 #define strlen_user(str) strnlen_user((str), 0x7ffffffe)
397 #endif /* __ASSEMBLY__ */
399 #endif /* _PPC_UACCESS_H */
400 #endif /* __KERNEL__ */