treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / arc / include / asm / uaccess.h
blobea40ec7f6cae2876ff1e8290476a982d2b5d9c8c
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 * vineetg: June 2010
6 * -__clear_user( ) called multiple times during elf load was byte loop
7 * converted to do as much word clear as possible.
9 * vineetg: Dec 2009
10 * -Hand crafted constant propagation for "constant" copy sizes
11 * -stock kernel shrunk by 33K at -O3
13 * vineetg: Sept 2009
14 * -Added option to (UN)inline copy_(to|from)_user to reduce code sz
15 * -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
16 * -Enabled when doing -Os
18 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
21 #ifndef _ASM_ARC_UACCESS_H
22 #define _ASM_ARC_UACCESS_H
24 #include <linux/string.h> /* for generic string functions */
27 #define __kernel_ok (uaccess_kernel())
30 * Algorithmically, for __user_ok() we want do:
31 * (start < TASK_SIZE) && (start+len < TASK_SIZE)
32 * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
33 * emitted directly in code.
35 * This can however be rewritten as follows:
36 * (len <= TASK_SIZE) && (start+len < TASK_SIZE)
38 * Because it essentially checks if buffer end is within limit and @len is
39 * non-ngeative, which implies that buffer start will be within limit too.
41 * The reason for rewriting being, for majority of cases, @len is generally
42 * compile time constant, causing first sub-expression to be compile time
43 * subsumed.
45 * The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10),
46 * so we check for TASK_SIZE using get_fs() since the addr_limit load from mem
47 * would already have been done at this call site for __kernel_ok()
50 #define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
51 ((addr) <= (get_fs() - (sz))))
52 #define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
53 likely(__user_ok((addr), (sz))))
55 /*********** Single byte/hword/word copies ******************/
57 #define __get_user_fn(sz, u, k) \
58 ({ \
59 long __ret = 0; /* success by default */ \
60 switch (sz) { \
61 case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break; \
62 case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break; \
63 case 4: __arc_get_user_one(*(k), u, "ld", __ret); break; \
64 case 8: __arc_get_user_one_64(*(k), u, __ret); break; \
65 } \
66 __ret; \
70 * Returns 0 on success, -EFAULT if not.
71 * @ret already contains 0 - given that errors will be less likely
72 * (hence +r asm constraint below).
73 * In case of error, fixup code will make it -EFAULT
75 #define __arc_get_user_one(dst, src, op, ret) \
76 __asm__ __volatile__( \
77 "1: "op" %1,[%2]\n" \
78 "2: ;nop\n" \
79 " .section .fixup, \"ax\"\n" \
80 " .align 4\n" \
81 "3: # return -EFAULT\n" \
82 " mov %0, %3\n" \
83 " # zero out dst ptr\n" \
84 " mov %1, 0\n" \
85 " j 2b\n" \
86 " .previous\n" \
87 " .section __ex_table, \"a\"\n" \
88 " .align 4\n" \
89 " .word 1b,3b\n" \
90 " .previous\n" \
92 : "+r" (ret), "=r" (dst) \
93 : "r" (src), "ir" (-EFAULT))
95 #define __arc_get_user_one_64(dst, src, ret) \
96 __asm__ __volatile__( \
97 "1: ld %1,[%2]\n" \
98 "4: ld %R1,[%2, 4]\n" \
99 "2: ;nop\n" \
100 " .section .fixup, \"ax\"\n" \
101 " .align 4\n" \
102 "3: # return -EFAULT\n" \
103 " mov %0, %3\n" \
104 " # zero out dst ptr\n" \
105 " mov %1, 0\n" \
106 " mov %R1, 0\n" \
107 " j 2b\n" \
108 " .previous\n" \
109 " .section __ex_table, \"a\"\n" \
110 " .align 4\n" \
111 " .word 1b,3b\n" \
112 " .word 4b,3b\n" \
113 " .previous\n" \
115 : "+r" (ret), "=r" (dst) \
116 : "r" (src), "ir" (-EFAULT))
118 #define __put_user_fn(sz, u, k) \
119 ({ \
120 long __ret = 0; /* success by default */ \
121 switch (sz) { \
122 case 1: __arc_put_user_one(*(k), u, "stb", __ret); break; \
123 case 2: __arc_put_user_one(*(k), u, "stw", __ret); break; \
124 case 4: __arc_put_user_one(*(k), u, "st", __ret); break; \
125 case 8: __arc_put_user_one_64(*(k), u, __ret); break; \
127 __ret; \
130 #define __arc_put_user_one(src, dst, op, ret) \
131 __asm__ __volatile__( \
132 "1: "op" %1,[%2]\n" \
133 "2: ;nop\n" \
134 " .section .fixup, \"ax\"\n" \
135 " .align 4\n" \
136 "3: mov %0, %3\n" \
137 " j 2b\n" \
138 " .previous\n" \
139 " .section __ex_table, \"a\"\n" \
140 " .align 4\n" \
141 " .word 1b,3b\n" \
142 " .previous\n" \
144 : "+r" (ret) \
145 : "r" (src), "r" (dst), "ir" (-EFAULT))
147 #define __arc_put_user_one_64(src, dst, ret) \
148 __asm__ __volatile__( \
149 "1: st %1,[%2]\n" \
150 "4: st %R1,[%2, 4]\n" \
151 "2: ;nop\n" \
152 " .section .fixup, \"ax\"\n" \
153 " .align 4\n" \
154 "3: mov %0, %3\n" \
155 " j 2b\n" \
156 " .previous\n" \
157 " .section __ex_table, \"a\"\n" \
158 " .align 4\n" \
159 " .word 1b,3b\n" \
160 " .word 4b,3b\n" \
161 " .previous\n" \
163 : "+r" (ret) \
164 : "r" (src), "r" (dst), "ir" (-EFAULT))
167 static inline unsigned long
168 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
170 long res = 0;
171 char val;
172 unsigned long tmp1, tmp2, tmp3, tmp4;
173 unsigned long orig_n = n;
175 if (n == 0)
176 return 0;
178 /* unaligned */
179 if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
181 unsigned char tmp;
183 __asm__ __volatile__ (
184 " mov.f lp_count, %0 \n"
185 " lpnz 2f \n"
186 "1: ldb.ab %1, [%3, 1] \n"
187 " stb.ab %1, [%2, 1] \n"
188 " sub %0,%0,1 \n"
189 "2: ;nop \n"
190 " .section .fixup, \"ax\" \n"
191 " .align 4 \n"
192 "3: j 2b \n"
193 " .previous \n"
194 " .section __ex_table, \"a\" \n"
195 " .align 4 \n"
196 " .word 1b, 3b \n"
197 " .previous \n"
199 : "+r" (n),
201 * Note as an '&' earlyclobber operand to make sure the
202 * temporary register inside the loop is not the same as
203 * FROM or TO.
205 "=&r" (tmp), "+r" (to), "+r" (from)
207 : "lp_count", "memory");
209 return n;
213 * Hand-crafted constant propagation to reduce code sz of the
214 * laddered copy 16x,8,4,2,1
216 if (__builtin_constant_p(orig_n)) {
217 res = orig_n;
219 if (orig_n / 16) {
220 orig_n = orig_n % 16;
222 __asm__ __volatile__(
223 " lsr lp_count, %7,4 \n"
224 " lp 3f \n"
225 "1: ld.ab %3, [%2, 4] \n"
226 "11: ld.ab %4, [%2, 4] \n"
227 "12: ld.ab %5, [%2, 4] \n"
228 "13: ld.ab %6, [%2, 4] \n"
229 " st.ab %3, [%1, 4] \n"
230 " st.ab %4, [%1, 4] \n"
231 " st.ab %5, [%1, 4] \n"
232 " st.ab %6, [%1, 4] \n"
233 " sub %0,%0,16 \n"
234 "3: ;nop \n"
235 " .section .fixup, \"ax\" \n"
236 " .align 4 \n"
237 "4: j 3b \n"
238 " .previous \n"
239 " .section __ex_table, \"a\" \n"
240 " .align 4 \n"
241 " .word 1b, 4b \n"
242 " .word 11b,4b \n"
243 " .word 12b,4b \n"
244 " .word 13b,4b \n"
245 " .previous \n"
246 : "+r" (res), "+r"(to), "+r"(from),
247 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
248 : "ir"(n)
249 : "lp_count", "memory");
251 if (orig_n / 8) {
252 orig_n = orig_n % 8;
254 __asm__ __volatile__(
255 "14: ld.ab %3, [%2,4] \n"
256 "15: ld.ab %4, [%2,4] \n"
257 " st.ab %3, [%1,4] \n"
258 " st.ab %4, [%1,4] \n"
259 " sub %0,%0,8 \n"
260 "31: ;nop \n"
261 " .section .fixup, \"ax\" \n"
262 " .align 4 \n"
263 "4: j 31b \n"
264 " .previous \n"
265 " .section __ex_table, \"a\" \n"
266 " .align 4 \n"
267 " .word 14b,4b \n"
268 " .word 15b,4b \n"
269 " .previous \n"
270 : "+r" (res), "+r"(to), "+r"(from),
271 "=r"(tmp1), "=r"(tmp2)
273 : "memory");
275 if (orig_n / 4) {
276 orig_n = orig_n % 4;
278 __asm__ __volatile__(
279 "16: ld.ab %3, [%2,4] \n"
280 " st.ab %3, [%1,4] \n"
281 " sub %0,%0,4 \n"
282 "32: ;nop \n"
283 " .section .fixup, \"ax\" \n"
284 " .align 4 \n"
285 "4: j 32b \n"
286 " .previous \n"
287 " .section __ex_table, \"a\" \n"
288 " .align 4 \n"
289 " .word 16b,4b \n"
290 " .previous \n"
291 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
293 : "memory");
295 if (orig_n / 2) {
296 orig_n = orig_n % 2;
298 __asm__ __volatile__(
299 "17: ldw.ab %3, [%2,2] \n"
300 " stw.ab %3, [%1,2] \n"
301 " sub %0,%0,2 \n"
302 "33: ;nop \n"
303 " .section .fixup, \"ax\" \n"
304 " .align 4 \n"
305 "4: j 33b \n"
306 " .previous \n"
307 " .section __ex_table, \"a\" \n"
308 " .align 4 \n"
309 " .word 17b,4b \n"
310 " .previous \n"
311 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
313 : "memory");
315 if (orig_n & 1) {
316 __asm__ __volatile__(
317 "18: ldb.ab %3, [%2,2] \n"
318 " stb.ab %3, [%1,2] \n"
319 " sub %0,%0,1 \n"
320 "34: ; nop \n"
321 " .section .fixup, \"ax\" \n"
322 " .align 4 \n"
323 "4: j 34b \n"
324 " .previous \n"
325 " .section __ex_table, \"a\" \n"
326 " .align 4 \n"
327 " .word 18b,4b \n"
328 " .previous \n"
329 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
331 : "memory");
333 } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
335 __asm__ __volatile__(
336 " mov %0,%3 \n"
337 " lsr.f lp_count, %3,4 \n" /* 16x bytes */
338 " lpnz 3f \n"
339 "1: ld.ab %5, [%2, 4] \n"
340 "11: ld.ab %6, [%2, 4] \n"
341 "12: ld.ab %7, [%2, 4] \n"
342 "13: ld.ab %8, [%2, 4] \n"
343 " st.ab %5, [%1, 4] \n"
344 " st.ab %6, [%1, 4] \n"
345 " st.ab %7, [%1, 4] \n"
346 " st.ab %8, [%1, 4] \n"
347 " sub %0,%0,16 \n"
348 "3: and.f %3,%3,0xf \n" /* stragglers */
349 " bz 34f \n"
350 " bbit0 %3,3,31f \n" /* 8 bytes left */
351 "14: ld.ab %5, [%2,4] \n"
352 "15: ld.ab %6, [%2,4] \n"
353 " st.ab %5, [%1,4] \n"
354 " st.ab %6, [%1,4] \n"
355 " sub.f %0,%0,8 \n"
356 "31: bbit0 %3,2,32f \n" /* 4 bytes left */
357 "16: ld.ab %5, [%2,4] \n"
358 " st.ab %5, [%1,4] \n"
359 " sub.f %0,%0,4 \n"
360 "32: bbit0 %3,1,33f \n" /* 2 bytes left */
361 "17: ldw.ab %5, [%2,2] \n"
362 " stw.ab %5, [%1,2] \n"
363 " sub.f %0,%0,2 \n"
364 "33: bbit0 %3,0,34f \n"
365 "18: ldb.ab %5, [%2,1] \n" /* 1 byte left */
366 " stb.ab %5, [%1,1] \n"
367 " sub.f %0,%0,1 \n"
368 "34: ;nop \n"
369 " .section .fixup, \"ax\" \n"
370 " .align 4 \n"
371 "4: j 34b \n"
372 " .previous \n"
373 " .section __ex_table, \"a\" \n"
374 " .align 4 \n"
375 " .word 1b, 4b \n"
376 " .word 11b,4b \n"
377 " .word 12b,4b \n"
378 " .word 13b,4b \n"
379 " .word 14b,4b \n"
380 " .word 15b,4b \n"
381 " .word 16b,4b \n"
382 " .word 17b,4b \n"
383 " .word 18b,4b \n"
384 " .previous \n"
385 : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
386 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
388 : "lp_count", "memory");
391 return res;
394 static inline unsigned long
395 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
397 long res = 0;
398 char val;
399 unsigned long tmp1, tmp2, tmp3, tmp4;
400 unsigned long orig_n = n;
402 if (n == 0)
403 return 0;
405 /* unaligned */
406 if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
408 unsigned char tmp;
410 __asm__ __volatile__(
411 " mov.f lp_count, %0 \n"
412 " lpnz 3f \n"
413 " ldb.ab %1, [%3, 1] \n"
414 "1: stb.ab %1, [%2, 1] \n"
415 " sub %0, %0, 1 \n"
416 "3: ;nop \n"
417 " .section .fixup, \"ax\" \n"
418 " .align 4 \n"
419 "4: j 3b \n"
420 " .previous \n"
421 " .section __ex_table, \"a\" \n"
422 " .align 4 \n"
423 " .word 1b, 4b \n"
424 " .previous \n"
426 : "+r" (n),
427 /* Note as an '&' earlyclobber operand to make sure the
428 * temporary register inside the loop is not the same as
429 * FROM or TO.
431 "=&r" (tmp), "+r" (to), "+r" (from)
433 : "lp_count", "memory");
435 return n;
438 if (__builtin_constant_p(orig_n)) {
439 res = orig_n;
441 if (orig_n / 16) {
442 orig_n = orig_n % 16;
444 __asm__ __volatile__(
445 " lsr lp_count, %7,4 \n"
446 " lp 3f \n"
447 " ld.ab %3, [%2, 4] \n"
448 " ld.ab %4, [%2, 4] \n"
449 " ld.ab %5, [%2, 4] \n"
450 " ld.ab %6, [%2, 4] \n"
451 "1: st.ab %3, [%1, 4] \n"
452 "11: st.ab %4, [%1, 4] \n"
453 "12: st.ab %5, [%1, 4] \n"
454 "13: st.ab %6, [%1, 4] \n"
455 " sub %0, %0, 16 \n"
456 "3:;nop \n"
457 " .section .fixup, \"ax\" \n"
458 " .align 4 \n"
459 "4: j 3b \n"
460 " .previous \n"
461 " .section __ex_table, \"a\" \n"
462 " .align 4 \n"
463 " .word 1b, 4b \n"
464 " .word 11b,4b \n"
465 " .word 12b,4b \n"
466 " .word 13b,4b \n"
467 " .previous \n"
468 : "+r" (res), "+r"(to), "+r"(from),
469 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
470 : "ir"(n)
471 : "lp_count", "memory");
473 if (orig_n / 8) {
474 orig_n = orig_n % 8;
476 __asm__ __volatile__(
477 " ld.ab %3, [%2,4] \n"
478 " ld.ab %4, [%2,4] \n"
479 "14: st.ab %3, [%1,4] \n"
480 "15: st.ab %4, [%1,4] \n"
481 " sub %0, %0, 8 \n"
482 "31:;nop \n"
483 " .section .fixup, \"ax\" \n"
484 " .align 4 \n"
485 "4: j 31b \n"
486 " .previous \n"
487 " .section __ex_table, \"a\" \n"
488 " .align 4 \n"
489 " .word 14b,4b \n"
490 " .word 15b,4b \n"
491 " .previous \n"
492 : "+r" (res), "+r"(to), "+r"(from),
493 "=r"(tmp1), "=r"(tmp2)
495 : "memory");
497 if (orig_n / 4) {
498 orig_n = orig_n % 4;
500 __asm__ __volatile__(
501 " ld.ab %3, [%2,4] \n"
502 "16: st.ab %3, [%1,4] \n"
503 " sub %0, %0, 4 \n"
504 "32:;nop \n"
505 " .section .fixup, \"ax\" \n"
506 " .align 4 \n"
507 "4: j 32b \n"
508 " .previous \n"
509 " .section __ex_table, \"a\" \n"
510 " .align 4 \n"
511 " .word 16b,4b \n"
512 " .previous \n"
513 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
515 : "memory");
517 if (orig_n / 2) {
518 orig_n = orig_n % 2;
520 __asm__ __volatile__(
521 " ldw.ab %3, [%2,2] \n"
522 "17: stw.ab %3, [%1,2] \n"
523 " sub %0, %0, 2 \n"
524 "33:;nop \n"
525 " .section .fixup, \"ax\" \n"
526 " .align 4 \n"
527 "4: j 33b \n"
528 " .previous \n"
529 " .section __ex_table, \"a\" \n"
530 " .align 4 \n"
531 " .word 17b,4b \n"
532 " .previous \n"
533 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
535 : "memory");
537 if (orig_n & 1) {
538 __asm__ __volatile__(
539 " ldb.ab %3, [%2,1] \n"
540 "18: stb.ab %3, [%1,1] \n"
541 " sub %0, %0, 1 \n"
542 "34: ;nop \n"
543 " .section .fixup, \"ax\" \n"
544 " .align 4 \n"
545 "4: j 34b \n"
546 " .previous \n"
547 " .section __ex_table, \"a\" \n"
548 " .align 4 \n"
549 " .word 18b,4b \n"
550 " .previous \n"
551 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
553 : "memory");
555 } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
557 __asm__ __volatile__(
558 " mov %0,%3 \n"
559 " lsr.f lp_count, %3,4 \n" /* 16x bytes */
560 " lpnz 3f \n"
561 " ld.ab %5, [%2, 4] \n"
562 " ld.ab %6, [%2, 4] \n"
563 " ld.ab %7, [%2, 4] \n"
564 " ld.ab %8, [%2, 4] \n"
565 "1: st.ab %5, [%1, 4] \n"
566 "11: st.ab %6, [%1, 4] \n"
567 "12: st.ab %7, [%1, 4] \n"
568 "13: st.ab %8, [%1, 4] \n"
569 " sub %0, %0, 16 \n"
570 "3: and.f %3,%3,0xf \n" /* stragglers */
571 " bz 34f \n"
572 " bbit0 %3,3,31f \n" /* 8 bytes left */
573 " ld.ab %5, [%2,4] \n"
574 " ld.ab %6, [%2,4] \n"
575 "14: st.ab %5, [%1,4] \n"
576 "15: st.ab %6, [%1,4] \n"
577 " sub.f %0, %0, 8 \n"
578 "31: bbit0 %3,2,32f \n" /* 4 bytes left */
579 " ld.ab %5, [%2,4] \n"
580 "16: st.ab %5, [%1,4] \n"
581 " sub.f %0, %0, 4 \n"
582 "32: bbit0 %3,1,33f \n" /* 2 bytes left */
583 " ldw.ab %5, [%2,2] \n"
584 "17: stw.ab %5, [%1,2] \n"
585 " sub.f %0, %0, 2 \n"
586 "33: bbit0 %3,0,34f \n"
587 " ldb.ab %5, [%2,1] \n" /* 1 byte left */
588 "18: stb.ab %5, [%1,1] \n"
589 " sub.f %0, %0, 1 \n"
590 "34: ;nop \n"
591 " .section .fixup, \"ax\" \n"
592 " .align 4 \n"
593 "4: j 34b \n"
594 " .previous \n"
595 " .section __ex_table, \"a\" \n"
596 " .align 4 \n"
597 " .word 1b, 4b \n"
598 " .word 11b,4b \n"
599 " .word 12b,4b \n"
600 " .word 13b,4b \n"
601 " .word 14b,4b \n"
602 " .word 15b,4b \n"
603 " .word 16b,4b \n"
604 " .word 17b,4b \n"
605 " .word 18b,4b \n"
606 " .previous \n"
607 : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
608 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
610 : "lp_count", "memory");
613 return res;
616 static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
618 long res = n;
619 unsigned char *d_char = to;
621 __asm__ __volatile__(
622 " bbit0 %0, 0, 1f \n"
623 "75: stb.ab %2, [%0,1] \n"
624 " sub %1, %1, 1 \n"
625 "1: bbit0 %0, 1, 2f \n"
626 "76: stw.ab %2, [%0,2] \n"
627 " sub %1, %1, 2 \n"
628 "2: asr.f lp_count, %1, 2 \n"
629 " lpnz 3f \n"
630 "77: st.ab %2, [%0,4] \n"
631 " sub %1, %1, 4 \n"
632 "3: bbit0 %1, 1, 4f \n"
633 "78: stw.ab %2, [%0,2] \n"
634 " sub %1, %1, 2 \n"
635 "4: bbit0 %1, 0, 5f \n"
636 "79: stb.ab %2, [%0,1] \n"
637 " sub %1, %1, 1 \n"
638 "5: \n"
639 " .section .fixup, \"ax\" \n"
640 " .align 4 \n"
641 "3: j 5b \n"
642 " .previous \n"
643 " .section __ex_table, \"a\" \n"
644 " .align 4 \n"
645 " .word 75b, 3b \n"
646 " .word 76b, 3b \n"
647 " .word 77b, 3b \n"
648 " .word 78b, 3b \n"
649 " .word 79b, 3b \n"
650 " .previous \n"
651 : "+r"(d_char), "+r"(res)
652 : "i"(0)
653 : "lp_count", "memory");
655 return res;
658 static inline long
659 __arc_strncpy_from_user(char *dst, const char __user *src, long count)
661 long res = 0;
662 char val;
664 if (count == 0)
665 return 0;
667 __asm__ __volatile__(
668 " mov lp_count, %5 \n"
669 " lp 3f \n"
670 "1: ldb.ab %3, [%2, 1] \n"
671 " breq.d %3, 0, 3f \n"
672 " stb.ab %3, [%1, 1] \n"
673 " add %0, %0, 1 # Num of NON NULL bytes copied \n"
674 "3: \n"
675 " .section .fixup, \"ax\" \n"
676 " .align 4 \n"
677 "4: mov %0, %4 # sets @res as -EFAULT \n"
678 " j 3b \n"
679 " .previous \n"
680 " .section __ex_table, \"a\" \n"
681 " .align 4 \n"
682 " .word 1b, 4b \n"
683 " .previous \n"
684 : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
685 : "g"(-EFAULT), "r"(count)
686 : "lp_count", "memory");
688 return res;
691 static inline long __arc_strnlen_user(const char __user *s, long n)
693 long res, tmp1, cnt;
694 char val;
696 __asm__ __volatile__(
697 " mov %2, %1 \n"
698 "1: ldb.ab %3, [%0, 1] \n"
699 " breq.d %3, 0, 2f \n"
700 " sub.f %2, %2, 1 \n"
701 " bnz 1b \n"
702 " sub %2, %2, 1 \n"
703 "2: sub %0, %1, %2 \n"
704 "3: ;nop \n"
705 " .section .fixup, \"ax\" \n"
706 " .align 4 \n"
707 "4: mov %0, 0 \n"
708 " j 3b \n"
709 " .previous \n"
710 " .section __ex_table, \"a\" \n"
711 " .align 4 \n"
712 " .word 1b, 4b \n"
713 " .previous \n"
714 : "=r"(res), "=r"(tmp1), "=r"(cnt), "=r"(val)
715 : "0"(s), "1"(n)
716 : "memory");
718 return res;
721 #ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
723 #define INLINE_COPY_TO_USER
724 #define INLINE_COPY_FROM_USER
726 #define __clear_user(d, n) __arc_clear_user(d, n)
727 #define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n)
728 #define __strnlen_user(s, n) __arc_strnlen_user(s, n)
729 #else
730 extern unsigned long arc_clear_user_noinline(void __user *to,
731 unsigned long n);
732 extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
733 long count);
734 extern long arc_strnlen_user_noinline(const char __user *src, long n);
736 #define __clear_user(d, n) arc_clear_user_noinline(d, n)
737 #define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n)
738 #define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n)
740 #endif
742 #include <asm/segment.h>
743 #include <asm-generic/uaccess.h>
745 #endif