2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
11 #ifndef _ASM_MICROBLAZE_UACCESS_H
12 #define _ASM_MICROBLAZE_UACCESS_H
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h> /* RLIMIT_FSIZE */
24 #include <asm/pgtable.h>
25 #include <asm/segment.h>
26 #include <linux/string.h>
29 #define VERIFY_WRITE 1
31 #define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0)
35 extern int ___range_ok(unsigned long addr
, unsigned long size
);
37 #define __range_ok(addr, size) \
38 ___range_ok((unsigned long)(addr), (unsigned long)(size))
40 #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
41 #define __access_ok(add, size) (__range_ok((addr), (size)) == 0)
43 /* Undefined function to trigger linker error */
44 extern int bad_user_access_length(void);
46 /* FIXME this is function for optimalization -> memcpy */
47 #define __get_user(var, ptr) \
50 switch (sizeof(*(ptr))) { \
57 memcpy((void *) &(var), (ptr), 8); \
61 __gu_err = __get_user_bad(); \
67 #define __get_user_bad() (bad_user_access_length(), (-EFAULT))
69 /* FIXME is not there defined __pu_val */
70 #define __put_user(var, ptr) \
73 switch (sizeof(*(ptr))) { \
80 typeof(*(ptr)) __pu_val = (var); \
81 memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
85 __pu_err = __put_user_bad(); \
91 #define __put_user_bad() (bad_user_access_length(), (-EFAULT))
93 #define put_user(x, ptr) __put_user((x), (ptr))
94 #define get_user(x, ptr) __get_user((x), (ptr))
96 #define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0)
97 #define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0)
99 #define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n)))
100 #define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n)))
101 #define __copy_to_user_inatomic(to, from, n) \
102 (__copy_to_user((to), (from), (n)))
103 #define __copy_from_user_inatomic(to, from, n) \
104 (__copy_from_user((to), (from), (n)))
106 static inline unsigned long clear_user(void *addr
, unsigned long size
)
108 if (access_ok(VERIFY_WRITE
, addr
, size
))
109 size
= __clear_user(addr
, size
);
113 /* Returns 0 if exception not found and fixup otherwise. */
114 extern unsigned long search_exception_table(unsigned long);
116 extern long strncpy_from_user(char *dst
, const char *src
, long count
);
117 extern long strnlen_user(const char *src
, long count
);
119 #else /* CONFIG_MMU */
122 * Address is valid if:
123 * - "addr", "addr + size" and "size" are all below the limit
125 #define access_ok(type, addr, size) \
126 (get_fs().seg > (((unsigned long)(addr)) | \
127 (size) | ((unsigned long)(addr) + (size))))
129 /* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
130 type?"WRITE":"READ",addr,size,get_fs().seg)) */
133 * All the __XXX versions macros/functions below do not perform
134 * access checking. It is assumed that the necessary checks have been
135 * already performed before the finction (macro) is called.
138 #define get_user(x, ptr) \
140 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
141 ? __get_user((x), (ptr)) : -EFAULT; \
144 #define put_user(x, ptr) \
146 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
147 ? __put_user((x), (ptr)) : -EFAULT; \
150 #define __get_user(x, ptr) \
152 unsigned long __gu_val; \
153 /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
155 switch (sizeof(*(ptr))) { \
157 __get_user_asm("lbu", (ptr), __gu_val, __gu_err); \
160 __get_user_asm("lhu", (ptr), __gu_val, __gu_err); \
163 __get_user_asm("lw", (ptr), __gu_val, __gu_err); \
166 __gu_val = 0; __gu_err = -EINVAL; \
168 x = (__typeof__(*(ptr))) __gu_val; \
172 #define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
174 __asm__ __volatile__ ( \
175 "1:" insn " %1, %2, r0; \
178 .section .fixup,\"ax\"; \
182 .section __ex_table,\"a\"; \
185 : "=r"(__gu_err), "=r"(__gu_val) \
186 : "r"(__gu_ptr), "i"(-EFAULT) \
190 #define __put_user(x, ptr) \
192 __typeof__(*(ptr)) volatile __gu_val = (x); \
194 switch (sizeof(__gu_val)) { \
196 __put_user_asm("sb", (ptr), __gu_val, __gu_err); \
199 __put_user_asm("sh", (ptr), __gu_val, __gu_err); \
202 __put_user_asm("sw", (ptr), __gu_val, __gu_err); \
205 __put_user_asm_8((ptr), __gu_val, __gu_err); \
208 __gu_err = -EINVAL; \
213 #define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
215 __asm__ __volatile__ (" lwi %0, %1, 0; \
221 .section .fixup,\"ax\"; \
225 .section __ex_table,\"a\"; \
230 "r"(__gu_ptr), "i"(-EFAULT) \
234 #define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
236 __asm__ __volatile__ ( \
237 "1:" insn " %1, %2, r0; \
240 .section .fixup,\"ax\"; \
244 .section __ex_table,\"a\"; \
248 : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
253 * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
255 static inline int clear_user(char *to
, int size
)
257 if (size
&& access_ok(VERIFY_WRITE
, to
, size
)) {
258 __asm__
__volatile__ (" \
265 .section __ex_table,\"a\"; \
275 extern unsigned long __copy_tofrom_user(void __user
*to
,
276 const void __user
*from
, unsigned long size
);
278 #define copy_to_user(to, from, n) \
279 (access_ok(VERIFY_WRITE, (to), (n)) ? \
280 __copy_tofrom_user((void __user *)(to), \
281 (__force const void __user *)(from), (n)) \
284 #define __copy_to_user(to, from, n) copy_to_user((to), (from), (n))
285 #define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
287 #define copy_from_user(to, from, n) \
288 (access_ok(VERIFY_READ, (from), (n)) ? \
289 __copy_tofrom_user((__force void __user *)(to), \
290 (void __user *)(from), (n)) \
293 #define __copy_from_user(to, from, n) copy_from_user((to), (from), (n))
294 #define __copy_from_user_inatomic(to, from, n) \
295 copy_from_user((to), (from), (n))
297 extern int __strncpy_user(char *to
, const char __user
*from
, int len
);
298 extern int __strnlen_user(const char __user
*sstr
, int len
);
300 #define strncpy_from_user(to, from, len) \
301 (access_ok(VERIFY_READ, from, 1) ? \
302 __strncpy_user(to, from, len) : -EFAULT)
303 #define strnlen_user(str, len) \
304 (access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
306 #endif /* CONFIG_MMU */
309 * The exception table consists of pairs of addresses: the first is the
310 * address of an instruction that is allowed to fault, and the second is
311 * the address at which the program should continue. No registers are
312 * modified, so it is entirely up to the continuation code to figure out
315 * All the routines below use bits of fixup code that are out of line
316 * with the main instruction path. This means when everything is well,
317 * we don't even have to jump over them. Further, they do not intrude
318 * on our cache or tlb entries.
320 struct exception_table_entry
{
321 unsigned long insn
, fixup
;
324 #endif /* __ASSEMBLY__ */
325 #endif /* __KERNEL__ */
327 #endif /* _ASM_MICROBLAZE_UACCESS_H */