2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 * -__clear_user( ) called multiple times during elf load was byte loop
10 * converted to do as much word clear as possible.
13 * -Hand crafted constant propagation for "constant" copy sizes
14 * -stock kernel shrunk by 33K at -O3
17 * -Added option to (UN)inline copy_(to|from)_user to reduce code sz
18 * -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
19 * -Enabled when doing -Os
21 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
24 #ifndef _ASM_ARC_UACCESS_H
25 #define _ASM_ARC_UACCESS_H
27 #include <linux/sched.h>
28 #include <asm/errno.h>
29 #include <linux/string.h> /* for generic string functions */
32 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
35 * Algorthmically, for __user_ok() we want do:
36 * (start < TASK_SIZE) && (start+len < TASK_SIZE)
37 * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
38 * emitted directly in code.
40 * This can however be rewritten as follows:
41 * (len <= TASK_SIZE) && (start+len < TASK_SIZE)
43 * Because it essentially checks if buffer end is within limit and @len is
44 * non-ngeative, which implies that buffer start will be within limit too.
46 * The reason for rewriting being, for majority of cases, @len is generally
47 * compile time constant, causing first sub-expression to be compile time
50 * The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10),
51 * so we check for TASK_SIZE using get_fs() since the addr_limit load from mem
52 * would already have been done at this call site for __kernel_ok()
55 #define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
56 ((addr) <= (get_fs() - (sz))))
57 #define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
58 likely(__user_ok((addr), (sz))))
60 /*********** Single byte/hword/word copies ******************/
62 #define __get_user_fn(sz, u, k) \
64 long __ret = 0; /* success by default */ \
66 case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break; \
67 case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break; \
68 case 4: __arc_get_user_one(*(k), u, "ld", __ret); break; \
69 case 8: __arc_get_user_one_64(*(k), u, __ret); break; \
75 * Returns 0 on success, -EFAULT if not.
76 * @ret already contains 0 - given that errors will be less likely
77 * (hence +r asm constraint below).
78 * In case of error, fixup code will make it -EFAULT
80 #define __arc_get_user_one(dst, src, op, ret) \
81 __asm__ __volatile__( \
84 " .section .fixup, \"ax\"\n" \
89 " .section __ex_table, \"a\"\n" \
94 : "+r" (ret), "=r" (dst) \
95 : "r" (src), "ir" (-EFAULT))
97 #define __arc_get_user_one_64(dst, src, ret) \
98 __asm__ __volatile__( \
100 "4: ld %R1,[%2, 4]\n" \
102 " .section .fixup, \"ax\"\n" \
107 " .section __ex_table, \"a\"\n" \
113 : "+r" (ret), "=r" (dst) \
114 : "r" (src), "ir" (-EFAULT))
116 #define __put_user_fn(sz, u, k) \
118 long __ret = 0; /* success by default */ \
120 case 1: __arc_put_user_one(*(k), u, "stb", __ret); break; \
121 case 2: __arc_put_user_one(*(k), u, "stw", __ret); break; \
122 case 4: __arc_put_user_one(*(k), u, "st", __ret); break; \
123 case 8: __arc_put_user_one_64(*(k), u, __ret); break; \
128 #define __arc_put_user_one(src, dst, op, ret) \
129 __asm__ __volatile__( \
130 "1: "op" %1,[%2]\n" \
132 " .section .fixup, \"ax\"\n" \
137 " .section __ex_table, \"a\"\n" \
143 : "r" (src), "r" (dst), "ir" (-EFAULT))
145 #define __arc_put_user_one_64(src, dst, ret) \
146 __asm__ __volatile__( \
148 "4: st %R1,[%2, 4]\n" \
150 " .section .fixup, \"ax\"\n" \
155 " .section __ex_table, \"a\"\n" \
162 : "r" (src), "r" (dst), "ir" (-EFAULT))
165 static inline unsigned long
166 __arc_copy_from_user(void *to
, const void __user
*from
, unsigned long n
)
170 unsigned long tmp1
, tmp2
, tmp3
, tmp4
;
171 unsigned long orig_n
= n
;
177 if (((unsigned long)to
& 0x3) || ((unsigned long)from
& 0x3)) {
181 __asm__
__volatile__ (
182 " mov.f lp_count, %0 \n"
184 "1: ldb.ab %1, [%3, 1] \n"
185 " stb.ab %1, [%2, 1] \n"
188 " .section .fixup, \"ax\" \n"
192 " .section __ex_table, \"a\" \n"
199 * Note as an '&' earlyclobber operand to make sure the
200 * temporary register inside the loop is not the same as
203 "=&r" (tmp
), "+r" (to
), "+r" (from
)
205 : "lp_count", "lp_start", "lp_end", "memory");
211 * Hand-crafted constant propagation to reduce code sz of the
212 * laddered copy 16x,8,4,2,1
214 if (__builtin_constant_p(orig_n
)) {
218 orig_n
= orig_n
% 16;
220 __asm__
__volatile__(
221 " lsr lp_count, %7,4 \n"
223 "1: ld.ab %3, [%2, 4] \n"
224 "11: ld.ab %4, [%2, 4] \n"
225 "12: ld.ab %5, [%2, 4] \n"
226 "13: ld.ab %6, [%2, 4] \n"
227 " st.ab %3, [%1, 4] \n"
228 " st.ab %4, [%1, 4] \n"
229 " st.ab %5, [%1, 4] \n"
230 " st.ab %6, [%1, 4] \n"
233 " .section .fixup, \"ax\" \n"
237 " .section __ex_table, \"a\" \n"
244 : "+r" (res
), "+r"(to
), "+r"(from
),
245 "=r"(tmp1
), "=r"(tmp2
), "=r"(tmp3
), "=r"(tmp4
)
247 : "lp_count", "memory");
252 __asm__
__volatile__(
253 "14: ld.ab %3, [%2,4] \n"
254 "15: ld.ab %4, [%2,4] \n"
255 " st.ab %3, [%1,4] \n"
256 " st.ab %4, [%1,4] \n"
259 " .section .fixup, \"ax\" \n"
263 " .section __ex_table, \"a\" \n"
268 : "+r" (res
), "+r"(to
), "+r"(from
),
269 "=r"(tmp1
), "=r"(tmp2
)
276 __asm__
__volatile__(
277 "16: ld.ab %3, [%2,4] \n"
278 " st.ab %3, [%1,4] \n"
281 " .section .fixup, \"ax\" \n"
285 " .section __ex_table, \"a\" \n"
289 : "+r" (res
), "+r"(to
), "+r"(from
), "=r"(tmp1
)
296 __asm__
__volatile__(
297 "17: ldw.ab %3, [%2,2] \n"
298 " stw.ab %3, [%1,2] \n"
301 " .section .fixup, \"ax\" \n"
305 " .section __ex_table, \"a\" \n"
309 : "+r" (res
), "+r"(to
), "+r"(from
), "=r"(tmp1
)
314 __asm__
__volatile__(
315 "18: ldb.ab %3, [%2,2] \n"
316 " stb.ab %3, [%1,2] \n"
319 " .section .fixup, \"ax\" \n"
323 " .section __ex_table, \"a\" \n"
327 : "+r" (res
), "+r"(to
), "+r"(from
), "=r"(tmp1
)
331 } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
333 __asm__
__volatile__(
335 " lsr.f lp_count, %3,4 \n" /* 16x bytes */
337 "1: ld.ab %5, [%2, 4] \n"
338 "11: ld.ab %6, [%2, 4] \n"
339 "12: ld.ab %7, [%2, 4] \n"
340 "13: ld.ab %8, [%2, 4] \n"
341 " st.ab %5, [%1, 4] \n"
342 " st.ab %6, [%1, 4] \n"
343 " st.ab %7, [%1, 4] \n"
344 " st.ab %8, [%1, 4] \n"
346 "3: and.f %3,%3,0xf \n" /* stragglers */
348 " bbit0 %3,3,31f \n" /* 8 bytes left */
349 "14: ld.ab %5, [%2,4] \n"
350 "15: ld.ab %6, [%2,4] \n"
351 " st.ab %5, [%1,4] \n"
352 " st.ab %6, [%1,4] \n"
354 "31: bbit0 %3,2,32f \n" /* 4 bytes left */
355 "16: ld.ab %5, [%2,4] \n"
356 " st.ab %5, [%1,4] \n"
358 "32: bbit0 %3,1,33f \n" /* 2 bytes left */
359 "17: ldw.ab %5, [%2,2] \n"
360 " stw.ab %5, [%1,2] \n"
362 "33: bbit0 %3,0,34f \n"
363 "18: ldb.ab %5, [%2,1] \n" /* 1 byte left */
364 " stb.ab %5, [%1,1] \n"
367 " .section .fixup, \"ax\" \n"
371 " .section __ex_table, \"a\" \n"
383 : "=r" (res
), "+r"(to
), "+r"(from
), "+r"(n
), "=r"(val
),
384 "=r"(tmp1
), "=r"(tmp2
), "=r"(tmp3
), "=r"(tmp4
)
386 : "lp_count", "memory");
392 extern unsigned long slowpath_copy_to_user(void __user
*to
, const void *from
,
395 static inline unsigned long
396 __arc_copy_to_user(void __user
*to
, const void *from
, unsigned long n
)
400 unsigned long tmp1
, tmp2
, tmp3
, tmp4
;
401 unsigned long orig_n
= n
;
407 if (((unsigned long)to
& 0x3) || ((unsigned long)from
& 0x3)) {
411 __asm__
__volatile__(
412 " mov.f lp_count, %0 \n"
414 " ldb.ab %1, [%3, 1] \n"
415 "1: stb.ab %1, [%2, 1] \n"
418 " .section .fixup, \"ax\" \n"
422 " .section __ex_table, \"a\" \n"
428 /* Note as an '&' earlyclobber operand to make sure the
429 * temporary register inside the loop is not the same as
432 "=&r" (tmp
), "+r" (to
), "+r" (from
)
434 : "lp_count", "lp_start", "lp_end", "memory");
439 if (__builtin_constant_p(orig_n
)) {
443 orig_n
= orig_n
% 16;
445 __asm__
__volatile__(
446 " lsr lp_count, %7,4 \n"
448 " ld.ab %3, [%2, 4] \n"
449 " ld.ab %4, [%2, 4] \n"
450 " ld.ab %5, [%2, 4] \n"
451 " ld.ab %6, [%2, 4] \n"
452 "1: st.ab %3, [%1, 4] \n"
453 "11: st.ab %4, [%1, 4] \n"
454 "12: st.ab %5, [%1, 4] \n"
455 "13: st.ab %6, [%1, 4] \n"
458 " .section .fixup, \"ax\" \n"
462 " .section __ex_table, \"a\" \n"
469 : "+r" (res
), "+r"(to
), "+r"(from
),
470 "=r"(tmp1
), "=r"(tmp2
), "=r"(tmp3
), "=r"(tmp4
)
472 : "lp_count", "memory");
477 __asm__
__volatile__(
478 " ld.ab %3, [%2,4] \n"
479 " ld.ab %4, [%2,4] \n"
480 "14: st.ab %3, [%1,4] \n"
481 "15: st.ab %4, [%1,4] \n"
484 " .section .fixup, \"ax\" \n"
488 " .section __ex_table, \"a\" \n"
493 : "+r" (res
), "+r"(to
), "+r"(from
),
494 "=r"(tmp1
), "=r"(tmp2
)
501 __asm__
__volatile__(
502 " ld.ab %3, [%2,4] \n"
503 "16: st.ab %3, [%1,4] \n"
506 " .section .fixup, \"ax\" \n"
510 " .section __ex_table, \"a\" \n"
514 : "+r" (res
), "+r"(to
), "+r"(from
), "=r"(tmp1
)
521 __asm__
__volatile__(
522 " ldw.ab %3, [%2,2] \n"
523 "17: stw.ab %3, [%1,2] \n"
526 " .section .fixup, \"ax\" \n"
530 " .section __ex_table, \"a\" \n"
534 : "+r" (res
), "+r"(to
), "+r"(from
), "=r"(tmp1
)
539 __asm__
__volatile__(
540 " ldb.ab %3, [%2,1] \n"
541 "18: stb.ab %3, [%1,1] \n"
544 " .section .fixup, \"ax\" \n"
548 " .section __ex_table, \"a\" \n"
552 : "+r" (res
), "+r"(to
), "+r"(from
), "=r"(tmp1
)
556 } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
558 __asm__
__volatile__(
560 " lsr.f lp_count, %3,4 \n" /* 16x bytes */
562 " ld.ab %5, [%2, 4] \n"
563 " ld.ab %6, [%2, 4] \n"
564 " ld.ab %7, [%2, 4] \n"
565 " ld.ab %8, [%2, 4] \n"
566 "1: st.ab %5, [%1, 4] \n"
567 "11: st.ab %6, [%1, 4] \n"
568 "12: st.ab %7, [%1, 4] \n"
569 "13: st.ab %8, [%1, 4] \n"
571 "3: and.f %3,%3,0xf \n" /* stragglers */
573 " bbit0 %3,3,31f \n" /* 8 bytes left */
574 " ld.ab %5, [%2,4] \n"
575 " ld.ab %6, [%2,4] \n"
576 "14: st.ab %5, [%1,4] \n"
577 "15: st.ab %6, [%1,4] \n"
578 " sub.f %0, %0, 8 \n"
579 "31: bbit0 %3,2,32f \n" /* 4 bytes left */
580 " ld.ab %5, [%2,4] \n"
581 "16: st.ab %5, [%1,4] \n"
582 " sub.f %0, %0, 4 \n"
583 "32: bbit0 %3,1,33f \n" /* 2 bytes left */
584 " ldw.ab %5, [%2,2] \n"
585 "17: stw.ab %5, [%1,2] \n"
586 " sub.f %0, %0, 2 \n"
587 "33: bbit0 %3,0,34f \n"
588 " ldb.ab %5, [%2,1] \n" /* 1 byte left */
589 "18: stb.ab %5, [%1,1] \n"
590 " sub.f %0, %0, 1 \n"
592 " .section .fixup, \"ax\" \n"
596 " .section __ex_table, \"a\" \n"
608 : "=r" (res
), "+r"(to
), "+r"(from
), "+r"(n
), "=r"(val
),
609 "=r"(tmp1
), "=r"(tmp2
), "=r"(tmp3
), "=r"(tmp4
)
611 : "lp_count", "memory");
617 static inline unsigned long __arc_clear_user(void __user
*to
, unsigned long n
)
620 unsigned char *d_char
= to
;
622 __asm__
__volatile__(
623 " bbit0 %0, 0, 1f \n"
624 "75: stb.ab %2, [%0,1] \n"
626 "1: bbit0 %0, 1, 2f \n"
627 "76: stw.ab %2, [%0,2] \n"
629 "2: asr.f lp_count, %1, 2 \n"
631 "77: st.ab %2, [%0,4] \n"
633 "3: bbit0 %1, 1, 4f \n"
634 "78: stw.ab %2, [%0,2] \n"
636 "4: bbit0 %1, 0, 5f \n"
637 "79: stb.ab %2, [%0,1] \n"
640 " .section .fixup, \"ax\" \n"
644 " .section __ex_table, \"a\" \n"
652 : "+r"(d_char
), "+r"(res
)
654 : "lp_count", "lp_start", "lp_end", "memory");
660 __arc_strncpy_from_user(char *dst
, const char __user
*src
, long count
)
668 __asm__
__volatile__(
670 "1: ldb.ab %3, [%2, 1] \n"
671 " breq.d %3, 0, 3f \n"
672 " stb.ab %3, [%1, 1] \n"
673 " add %0, %0, 1 # Num of NON NULL bytes copied \n"
675 " .section .fixup, \"ax\" \n"
677 "4: mov %0, %4 # sets @res as -EFAULT \n"
680 " .section __ex_table, \"a\" \n"
684 : "+r"(res
), "+r"(dst
), "+r"(src
), "=r"(val
)
685 : "g"(-EFAULT
), "l"(count
)
691 static inline long __arc_strnlen_user(const char __user
*s
, long n
)
696 __asm__
__volatile__(
698 "1: ldb.ab %3, [%0, 1] \n"
699 " breq.d %3, 0, 2f \n"
700 " sub.f %2, %2, 1 \n"
703 "2: sub %0, %1, %2 \n"
705 " .section .fixup, \"ax\" \n"
710 " .section __ex_table, \"a\" \n"
714 : "=r"(res
), "=r"(tmp1
), "=r"(cnt
), "=r"(val
)
721 #ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
722 #define __copy_from_user(t, f, n) __arc_copy_from_user(t, f, n)
723 #define __copy_to_user(t, f, n) __arc_copy_to_user(t, f, n)
724 #define __clear_user(d, n) __arc_clear_user(d, n)
725 #define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n)
726 #define __strnlen_user(s, n) __arc_strnlen_user(s, n)
728 extern long arc_copy_from_user_noinline(void *to
, const void __user
* from
,
730 extern long arc_copy_to_user_noinline(void __user
*to
, const void *from
,
732 extern unsigned long arc_clear_user_noinline(void __user
*to
,
734 extern long arc_strncpy_from_user_noinline (char *dst
, const char __user
*src
,
736 extern long arc_strnlen_user_noinline(const char __user
*src
, long n
);
738 #define __copy_from_user(t, f, n) arc_copy_from_user_noinline(t, f, n)
739 #define __copy_to_user(t, f, n) arc_copy_to_user_noinline(t, f, n)
740 #define __clear_user(d, n) arc_clear_user_noinline(d, n)
741 #define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n)
742 #define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n)
746 #include <asm-generic/uaccess.h>
748 extern int fixup_exception(struct pt_regs
*regs
);