Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / arch / mn10300 / include / asm / uaccess.h
blob5af468fd13596c4f98202d5b3b60d1e620daae7d
1 /* MN10300 userspace access functions
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
15 * User space memory access functions
17 #include <linux/kernel.h>
18 #include <asm/page.h>
21 * The fs value determines whether argument validity checking should be
22 * performed or not. If get_fs() == USER_DS, checking is performed, with
23 * get_fs() == KERNEL_DS, checking is bypassed.
25 * For historical reasons, these macros are grossly misnamed.
27 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
29 #define KERNEL_XDS MAKE_MM_SEG(0xBFFFFFFF)
30 #define KERNEL_DS MAKE_MM_SEG(0x9FFFFFFF)
31 #define USER_DS MAKE_MM_SEG(TASK_SIZE)
33 #define get_ds() (KERNEL_DS)
34 #define get_fs() (current_thread_info()->addr_limit)
35 #define set_fs(x) (current_thread_info()->addr_limit = (x))
37 #define segment_eq(a, b) ((a).seg == (b).seg)
39 #define __addr_ok(addr) \
40 ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
43 * check that a range of addresses falls within the current address limit
45 static inline int ___range_ok(unsigned long addr, unsigned int size)
47 int flag = 1, tmp;
49 asm(" add %3,%1 \n" /* set C-flag if addr + size > 4Gb */
50 " bcs 0f \n"
51 " cmp %4,%1 \n" /* jump if addr+size>limit (error) */
52 " bhi 0f \n"
53 " clr %0 \n" /* mark okay */
54 "0: \n"
55 : "=r"(flag), "=&r"(tmp)
56 : "1"(addr), "ir"(size),
57 "r"(current_thread_info()->addr_limit.seg), "0"(flag)
58 : "cc"
61 return flag;
64 #define __range_ok(addr, size) ___range_ok((unsigned long)(addr), (u32)(size))
66 #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
67 #define __access_ok(addr, size) (__range_ok((addr), (size)) == 0)
69 #include <asm/extable.h>
71 #define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
72 #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
75 * The "__xxx" versions do not do address space checking, useful when
76 * doing multiple accesses to the same area (the user has to do the
77 * checks by hand with "access_ok()")
79 #define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
80 #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
82 struct __large_struct { unsigned long buf[100]; };
83 #define __m(x) (*(struct __large_struct *)(x))
85 #define __get_user_nocheck(x, ptr, size) \
86 ({ \
87 unsigned long __gu_addr; \
88 int __gu_err; \
89 __gu_addr = (unsigned long) (ptr); \
90 switch (size) { \
91 case 1: { \
92 unsigned char __gu_val; \
93 __get_user_asm("bu"); \
94 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
95 break; \
96 } \
97 case 2: { \
98 unsigned short __gu_val; \
99 __get_user_asm("hu"); \
100 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
101 break; \
103 case 4: { \
104 unsigned int __gu_val; \
105 __get_user_asm(""); \
106 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
107 break; \
109 default: \
110 __get_user_unknown(); \
111 break; \
113 __gu_err; \
116 #define __get_user_check(x, ptr, size) \
117 ({ \
118 const __typeof__(*(ptr))* __guc_ptr = (ptr); \
119 int _e; \
120 if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \
121 _e = __get_user_nocheck((x), __guc_ptr, (size)); \
122 else { \
123 _e = -EFAULT; \
124 (x) = (__typeof__(x))0; \
126 _e; \
129 #define __get_user_asm(INSN) \
130 ({ \
131 asm volatile( \
132 "1:\n" \
133 " mov"INSN" %2,%1\n" \
134 " mov 0,%0\n" \
135 "2:\n" \
136 " .section .fixup,\"ax\"\n" \
137 "3:\n\t" \
138 " mov 0,%1\n" \
139 " mov %3,%0\n" \
140 " jmp 2b\n" \
141 " .previous\n" \
142 " .section __ex_table,\"a\"\n" \
143 " .balign 4\n" \
144 " .long 1b, 3b\n" \
145 " .previous" \
146 : "=&r" (__gu_err), "=&r" (__gu_val) \
147 : "m" (__m(__gu_addr)), "i" (-EFAULT)); \
150 extern int __get_user_unknown(void);
152 #define __put_user_nocheck(x, ptr, size) \
153 ({ \
154 union { \
155 __typeof__(*(ptr)) val; \
156 u32 bits[2]; \
157 } __pu_val; \
158 unsigned long __pu_addr; \
159 int __pu_err; \
160 __pu_val.val = (x); \
161 __pu_addr = (unsigned long) (ptr); \
162 switch (size) { \
163 case 1: __put_user_asm("bu"); break; \
164 case 2: __put_user_asm("hu"); break; \
165 case 4: __put_user_asm("" ); break; \
166 case 8: __put_user_asm8(); break; \
167 default: __pu_err = __put_user_unknown(); break; \
169 __pu_err; \
172 #define __put_user_check(x, ptr, size) \
173 ({ \
174 union { \
175 __typeof__(*(ptr)) val; \
176 u32 bits[2]; \
177 } __pu_val; \
178 unsigned long __pu_addr; \
179 int __pu_err; \
180 __pu_val.val = (x); \
181 __pu_addr = (unsigned long) (ptr); \
182 if (likely(__access_ok(__pu_addr, size))) { \
183 switch (size) { \
184 case 1: __put_user_asm("bu"); break; \
185 case 2: __put_user_asm("hu"); break; \
186 case 4: __put_user_asm("" ); break; \
187 case 8: __put_user_asm8(); break; \
188 default: __pu_err = __put_user_unknown(); break; \
191 else { \
192 __pu_err = -EFAULT; \
194 __pu_err; \
197 #define __put_user_asm(INSN) \
198 ({ \
199 asm volatile( \
200 "1:\n" \
201 " mov"INSN" %1,%2\n" \
202 " mov 0,%0\n" \
203 "2:\n" \
204 " .section .fixup,\"ax\"\n" \
205 "3:\n" \
206 " mov %3,%0\n" \
207 " jmp 2b\n" \
208 " .previous\n" \
209 " .section __ex_table,\"a\"\n" \
210 " .balign 4\n" \
211 " .long 1b, 3b\n" \
212 " .previous" \
213 : "=&r" (__pu_err) \
214 : "r" (__pu_val.val), "m" (__m(__pu_addr)), \
215 "i" (-EFAULT) \
216 ); \
219 #define __put_user_asm8() \
220 ({ \
221 asm volatile( \
222 "1: mov %1,%3 \n" \
223 "2: mov %2,%4 \n" \
224 " mov 0,%0 \n" \
225 "3: \n" \
226 " .section .fixup,\"ax\" \n" \
227 "4: \n" \
228 " mov %5,%0 \n" \
229 " jmp 3b \n" \
230 " .previous \n" \
231 " .section __ex_table,\"a\"\n" \
232 " .balign 4 \n" \
233 " .long 1b, 4b \n" \
234 " .long 2b, 4b \n" \
235 " .previous \n" \
236 : "=&r" (__pu_err) \
237 : "r" (__pu_val.bits[0]), "r" (__pu_val.bits[1]), \
238 "m" (__m(__pu_addr)), "m" (__m(__pu_addr+4)), \
239 "i" (-EFAULT) \
240 ); \
243 extern int __put_user_unknown(void);
247 * Copy To/From Userspace
249 /* Generic arbitrary sized copy. */
250 #define __copy_user(to, from, size) \
251 do { \
252 if (size) { \
253 void *__to = to; \
254 const void *__from = from; \
255 int w; \
256 asm volatile( \
257 "0: movbu (%0),%3;\n" \
258 "1: movbu %3,(%1);\n" \
259 " inc %0;\n" \
260 " inc %1;\n" \
261 " add -1,%2;\n" \
262 " bne 0b;\n" \
263 "2:\n" \
264 " .section .fixup,\"ax\"\n" \
265 "3: jmp 2b\n" \
266 " .previous\n" \
267 " .section __ex_table,\"a\"\n" \
268 " .balign 4\n" \
269 " .long 0b,3b\n" \
270 " .long 1b,3b\n" \
271 " .previous\n" \
272 : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
273 : "0"(__from), "1"(__to), "2"(size) \
274 : "cc", "memory"); \
276 } while (0)
278 static inline unsigned long
279 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
281 __copy_user(to, from, n);
282 return n;
285 static inline unsigned long
286 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
288 __copy_user(to, from, n);
289 return n;
292 extern long strncpy_from_user(char *dst, const char __user *src, long count);
293 extern long strnlen_user(const char __user *str, long n);
294 extern unsigned long clear_user(void __user *mem, unsigned long len);
295 extern unsigned long __clear_user(void __user *mem, unsigned long len);
297 #endif /* _ASM_UACCESS_H */