2 * User address space access functions.
3 * The non-inlined parts of asm-metag/uaccess.h are here.
5 * Copyright (C) 2006, Imagination Technologies.
6 * Copyright (C) 2000, Axis Communications AB.
8 * Written by Hans-Peter Nilsson.
9 * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
10 * Modified for Meta by Will Newton.
13 #include <linux/export.h>
14 #include <linux/uaccess.h>
15 #include <asm/cache.h> /* def of L1_CACHE_BYTES */
18 #define RAPF_MIN_BUF_SIZE (3*L1_CACHE_BYTES)
21 /* The "double write" in this code is because the Meta will not fault
22 * immediately unless the memory pipe is forced to by e.g. a data stall or
23 * another memory op. The second write should be discarded by the write
24 * combiner so should have virtually no cost.
27 #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
31 " .section .fixup,\"ax\"\n" \
34 " MOVT D1Ar1,#HI(1b)\n" \
35 " JUMP D1Ar1,#LO(1b)\n" \
37 " .section __ex_table,\"a\"\n" \
40 : "=r" (to), "=r" (from), "=r" (ret) \
41 : "0" (to), "1" (from), "2" (ret) \
45 #define __asm_copy_to_user_1(to, from, ret) \
46 __asm_copy_user_cont(to, from, ret, \
47 " GETB D1Ar1,[%1++]\n" \
48 " SETB [%0],D1Ar1\n" \
49 "2: SETB [%0++],D1Ar1\n", \
50 "3: ADD %2,%2,#1\n", \
53 #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
54 __asm_copy_user_cont(to, from, ret, \
55 " GETW D1Ar1,[%1++]\n" \
56 " SETW [%0],D1Ar1\n" \
57 "2: SETW [%0++],D1Ar1\n" COPY, \
58 "3: ADD %2,%2,#2\n" FIXUP, \
59 " .long 2b,3b\n" TENTRY)
61 #define __asm_copy_to_user_2(to, from, ret) \
62 __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
64 #define __asm_copy_to_user_3(to, from, ret) \
65 __asm_copy_to_user_2x_cont(to, from, ret, \
66 " GETB D1Ar1,[%1++]\n" \
67 " SETB [%0],D1Ar1\n" \
68 "4: SETB [%0++],D1Ar1\n", \
69 "5: ADD %2,%2,#1\n", \
72 #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
73 __asm_copy_user_cont(to, from, ret, \
74 " GETD D1Ar1,[%1++]\n" \
75 " SETD [%0],D1Ar1\n" \
76 "2: SETD [%0++],D1Ar1\n" COPY, \
77 "3: ADD %2,%2,#4\n" FIXUP, \
78 " .long 2b,3b\n" TENTRY)
80 #define __asm_copy_to_user_4(to, from, ret) \
81 __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
83 #define __asm_copy_to_user_5(to, from, ret) \
84 __asm_copy_to_user_4x_cont(to, from, ret, \
85 " GETB D1Ar1,[%1++]\n" \
86 " SETB [%0],D1Ar1\n" \
87 "4: SETB [%0++],D1Ar1\n", \
88 "5: ADD %2,%2,#1\n", \
91 #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
92 __asm_copy_to_user_4x_cont(to, from, ret, \
93 " GETW D1Ar1,[%1++]\n" \
94 " SETW [%0],D1Ar1\n" \
95 "4: SETW [%0++],D1Ar1\n" COPY, \
96 "5: ADD %2,%2,#2\n" FIXUP, \
97 " .long 4b,5b\n" TENTRY)
99 #define __asm_copy_to_user_6(to, from, ret) \
100 __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
102 #define __asm_copy_to_user_7(to, from, ret) \
103 __asm_copy_to_user_6x_cont(to, from, ret, \
104 " GETB D1Ar1,[%1++]\n" \
105 " SETB [%0],D1Ar1\n" \
106 "6: SETB [%0++],D1Ar1\n", \
107 "7: ADD %2,%2,#1\n", \
110 #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
111 __asm_copy_to_user_4x_cont(to, from, ret, \
112 " GETD D1Ar1,[%1++]\n" \
113 " SETD [%0],D1Ar1\n" \
114 "4: SETD [%0++],D1Ar1\n" COPY, \
115 "5: ADD %2,%2,#4\n" FIXUP, \
116 " .long 4b,5b\n" TENTRY)
118 #define __asm_copy_to_user_8(to, from, ret) \
119 __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
121 #define __asm_copy_to_user_9(to, from, ret) \
122 __asm_copy_to_user_8x_cont(to, from, ret, \
123 " GETB D1Ar1,[%1++]\n" \
124 " SETB [%0],D1Ar1\n" \
125 "6: SETB [%0++],D1Ar1\n", \
126 "7: ADD %2,%2,#1\n", \
129 #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
130 __asm_copy_to_user_8x_cont(to, from, ret, \
131 " GETW D1Ar1,[%1++]\n" \
132 " SETW [%0],D1Ar1\n" \
133 "6: SETW [%0++],D1Ar1\n" COPY, \
134 "7: ADD %2,%2,#2\n" FIXUP, \
135 " .long 6b,7b\n" TENTRY)
137 #define __asm_copy_to_user_10(to, from, ret) \
138 __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
140 #define __asm_copy_to_user_11(to, from, ret) \
141 __asm_copy_to_user_10x_cont(to, from, ret, \
142 " GETB D1Ar1,[%1++]\n" \
143 " SETB [%0],D1Ar1\n" \
144 "8: SETB [%0++],D1Ar1\n", \
145 "9: ADD %2,%2,#1\n", \
148 #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
149 __asm_copy_to_user_8x_cont(to, from, ret, \
150 " GETD D1Ar1,[%1++]\n" \
151 " SETD [%0],D1Ar1\n" \
152 "6: SETD [%0++],D1Ar1\n" COPY, \
153 "7: ADD %2,%2,#4\n" FIXUP, \
154 " .long 6b,7b\n" TENTRY)
155 #define __asm_copy_to_user_12(to, from, ret) \
156 __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
158 #define __asm_copy_to_user_13(to, from, ret) \
159 __asm_copy_to_user_12x_cont(to, from, ret, \
160 " GETB D1Ar1,[%1++]\n" \
161 " SETB [%0],D1Ar1\n" \
162 "8: SETB [%0++],D1Ar1\n", \
163 "9: ADD %2,%2,#1\n", \
166 #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
167 __asm_copy_to_user_12x_cont(to, from, ret, \
168 " GETW D1Ar1,[%1++]\n" \
169 " SETW [%0],D1Ar1\n" \
170 "8: SETW [%0++],D1Ar1\n" COPY, \
171 "9: ADD %2,%2,#2\n" FIXUP, \
172 " .long 8b,9b\n" TENTRY)
174 #define __asm_copy_to_user_14(to, from, ret) \
175 __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
177 #define __asm_copy_to_user_15(to, from, ret) \
178 __asm_copy_to_user_14x_cont(to, from, ret, \
179 " GETB D1Ar1,[%1++]\n" \
180 " SETB [%0],D1Ar1\n" \
181 "10: SETB [%0++],D1Ar1\n", \
182 "11: ADD %2,%2,#1\n", \
185 #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
186 __asm_copy_to_user_12x_cont(to, from, ret, \
187 " GETD D1Ar1,[%1++]\n" \
188 " SETD [%0],D1Ar1\n" \
189 "8: SETD [%0++],D1Ar1\n" COPY, \
190 "9: ADD %2,%2,#4\n" FIXUP, \
191 " .long 8b,9b\n" TENTRY)
193 #define __asm_copy_to_user_16(to, from, ret) \
194 __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
196 #define __asm_copy_to_user_8x64(to, from, ret) \
198 " GETL D0Ar2,D1Ar1,[%1++]\n" \
199 " SETL [%0],D0Ar2,D1Ar1\n" \
200 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
202 " .section .fixup,\"ax\"\n" \
203 "3: ADD %2,%2,#8\n" \
204 " MOVT D0Ar2,#HI(1b)\n" \
205 " JUMP D0Ar2,#LO(1b)\n" \
207 " .section __ex_table,\"a\"\n" \
210 : "=r" (to), "=r" (from), "=r" (ret) \
211 : "0" (to), "1" (from), "2" (ret) \
212 : "D1Ar1", "D0Ar2", "memory")
215 * optimized copying loop using RAPF when 64 bit aligned
217 * n will be automatically decremented inside the loop
218 * ret will be left intact. if error occurs we will rewind
219 * so that the original non optimized code will fill up
220 * this value correctly.
223 * > n will hold total number of uncopied bytes
225 * > {'to','from'} will be rewind back so that
226 * the non-optimized code will do the proper fix up
228 * DCACHE drops the cacheline which helps in reducing cache
231 * We introduce an extra SETL at the end of the loop to
232 * ensure we don't fall off the loop before we catch all
236 * LSM_STEP in TXSTATUS must be cleared in fix up code.
237 * since we're using M{S,G}ETL, a fault might happen at
238 * any address in the middle of M{S,G}ETL causing
239 * the value of LSM_STEP to be incorrect which can
240 * cause subsequent use of M{S,G}ET{L,D} to go wrong.
241 * ie: if LSM_STEP was 1 when a fault occurs, the
242 * next call to M{S,G}ET{L,D} will skip the first
243 * copy/getting as it think that the first 1 has already
247 #define __asm_copy_user_64bit_rapf_loop( \
248 to, from, ret, n, id, FIXUP) \
252 "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
254 "LSR D1Ar5, %3, #6\n" \
255 "SUB TXRPT, D1Ar5, #2\n" \
258 "ADD RAPF, %1, #64\n" \
260 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
262 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
263 "SUB %3, %3, #32\n" \
265 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
267 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
268 "SUB %3, %3, #32\n" \
269 "DCACHE [%1+#-64], D0Ar6\n" \
274 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
276 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
277 "SUB %3, %3, #32\n" \
279 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
281 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
284 "SETL [%0++], D0.7, D1.7\n" \
285 "SUB %3, %3, #32\n" \
287 "DCACHE [%1+#-64], D0Ar6\n" \
288 "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
289 "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
290 "GETL D0.5, D1.5, [A0StP+#-24]\n" \
291 "GETL D0.6, D1.6, [A0StP+#-16]\n" \
292 "GETL D0.7, D1.7, [A0StP+#-8]\n" \
293 "SUB A0StP, A0StP, #40\n" \
294 " .section .fixup,\"ax\"\n" \
296 " ADD %0, %0, #8\n" \
298 " MOV D0Ar2, TXSTATUS\n" \
299 " MOV D1Ar1, TXSTATUS\n" \
300 " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
301 " MOV TXSTATUS, D1Ar1\n" \
303 " MOVT D0Ar2,#HI(1b)\n" \
304 " JUMP D0Ar2,#LO(1b)\n" \
306 " .section __ex_table,\"a\"\n" \
317 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
318 : "0" (to), "1" (from), "2" (ret), "3" (n) \
319 : "D1Ar1", "D0Ar2", "memory")
321 /* rewind 'to' and 'from' pointers when a fault occurs
324 * A fault always occurs on writing to user buffer. A fault
325 * is at a single address, so we need to rewind by only 4
327 * Since we do a complete read from kernel buffer before
328 * writing, we need to rewind it also. The amount to be
329 * rewind equals the number of faulty writes in MSETD
330 * which is: [4 - (LSM_STEP-1)]*8
331 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
332 * and stored in D0Ar2
334 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
335 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
336 * a fault happens at the 4th write, LSM_STEP will be 0
337 * instead of 4. The code copes with that.
339 * n is updated by the number of successful writes, which is:
340 * n = n - (LSM_STEP-1)*8
342 #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
343 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
344 "LSR D0Ar2, D0Ar2, #8\n" \
345 "AND D0Ar2, D0Ar2, #0x7\n" \
346 "ADDZ D0Ar2, D0Ar2, #4\n" \
347 "SUB D0Ar2, D0Ar2, #1\n" \
349 "SUB D0Ar2, D1Ar1, D0Ar2\n" \
350 "LSL D0Ar2, D0Ar2, #3\n" \
351 "LSL D1Ar1, D1Ar1, #3\n" \
352 "SUB D1Ar1, D1Ar1, D0Ar2\n" \
354 "SUB %1, %1,D0Ar2\n" \
355 "SUB %3, %3, D1Ar1\n")
358 * optimized copying loop using RAPF when 32 bit aligned
360 * n will be automatically decremented inside the loop
361 * ret will be left intact. if error occurs we will rewind
362 * so that the original non optimized code will fill up
363 * this value correctly.
366 * > n will hold total number of uncopied bytes
368 * > {'to','from'} will be rewind back so that
369 * the non-optimized code will do the proper fix up
371 * DCACHE drops the cacheline which helps in reducing cache
374 * We introduce an extra SETD at the end of the loop to
375 * ensure we don't fall off the loop before we catch all
379 * LSM_STEP in TXSTATUS must be cleared in fix up code.
380 * since we're using M{S,G}ETL, a fault might happen at
381 * any address in the middle of M{S,G}ETL causing
382 * the value of LSM_STEP to be incorrect which can
383 * cause subsequent use of M{S,G}ET{L,D} to go wrong.
384 * ie: if LSM_STEP was 1 when a fault occurs, the
385 * next call to M{S,G}ET{L,D} will skip the first
386 * copy/getting as it think that the first 1 has already
390 #define __asm_copy_user_32bit_rapf_loop( \
391 to, from, ret, n, id, FIXUP) \
395 "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
397 "LSR D1Ar5, %3, #6\n" \
398 "SUB TXRPT, D1Ar5, #2\n" \
401 "ADD RAPF, %1, #64\n" \
403 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
405 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
406 "SUB %3, %3, #16\n" \
408 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
410 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
411 "SUB %3, %3, #16\n" \
413 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
415 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
416 "SUB %3, %3, #16\n" \
418 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
420 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
421 "SUB %3, %3, #16\n" \
422 "DCACHE [%1+#-64], D0Ar6\n" \
427 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
429 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
430 "SUB %3, %3, #16\n" \
432 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
434 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
435 "SUB %3, %3, #16\n" \
437 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
439 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
440 "SUB %3, %3, #16\n" \
442 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
444 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
447 "SETD [%0++], D0.7\n" \
448 "SUB %3, %3, #16\n" \
450 "DCACHE [%1+#-64], D0Ar6\n" \
451 "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
452 "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
453 "GETL D0.5, D1.5, [A0StP+#-24]\n" \
454 "GETL D0.6, D1.6, [A0StP+#-16]\n" \
455 "GETL D0.7, D1.7, [A0StP+#-8]\n" \
456 "SUB A0StP, A0StP, #40\n" \
457 " .section .fixup,\"ax\"\n" \
459 " ADD %0, %0, #4\n" \
461 " MOV D0Ar2, TXSTATUS\n" \
462 " MOV D1Ar1, TXSTATUS\n" \
463 " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
464 " MOV TXSTATUS, D1Ar1\n" \
466 " MOVT D0Ar2,#HI(1b)\n" \
467 " JUMP D0Ar2,#LO(1b)\n" \
469 " .section __ex_table,\"a\"\n" \
488 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
489 : "0" (to), "1" (from), "2" (ret), "3" (n) \
490 : "D1Ar1", "D0Ar2", "memory")
492 /* rewind 'to' and 'from' pointers when a fault occurs
495 * A fault always occurs on writing to user buffer. A fault
496 * is at a single address, so we need to rewind by only 4
498 * Since we do a complete read from kernel buffer before
499 * writing, we need to rewind it also. The amount to be
500 * rewind equals the number of faulty writes in MSETD
501 * which is: [4 - (LSM_STEP-1)]*4
502 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
503 * and stored in D0Ar2
505 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
506 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
507 * a fault happens at the 4th write, LSM_STEP will be 0
508 * instead of 4. The code copes with that.
510 * n is updated by the number of successful writes, which is:
511 * n = n - (LSM_STEP-1)*4
513 #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
514 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
515 "LSR D0Ar2, D0Ar2, #8\n" \
516 "AND D0Ar2, D0Ar2, #0x7\n" \
517 "ADDZ D0Ar2, D0Ar2, #4\n" \
518 "SUB D0Ar2, D0Ar2, #1\n" \
520 "SUB D0Ar2, D1Ar1, D0Ar2\n" \
521 "LSL D0Ar2, D0Ar2, #2\n" \
522 "LSL D1Ar1, D1Ar1, #2\n" \
523 "SUB D1Ar1, D1Ar1, D0Ar2\n" \
525 "SUB %1, %1, D0Ar2\n" \
526 "SUB %3, %3, D1Ar1\n")
528 unsigned long __copy_user(void __user
*pdst
, const void *psrc
,
531 register char __user
*dst
asm ("A0.2") = pdst
;
532 register const char *src
asm ("A1.2") = psrc
;
533 unsigned long retn
= 0;
538 if ((unsigned long) src
& 1) {
539 __asm_copy_to_user_1(dst
, src
, retn
);
542 if ((unsigned long) dst
& 1) {
543 /* Worst case - byte copy */
545 __asm_copy_to_user_1(dst
, src
, retn
);
549 if (((unsigned long) src
& 2) && n
>= 2) {
550 __asm_copy_to_user_2(dst
, src
, retn
);
553 if ((unsigned long) dst
& 2) {
554 /* Second worst case - word copy */
556 __asm_copy_to_user_2(dst
, src
, retn
);
562 /* 64 bit copy loop */
563 if (!(((unsigned long) src
| (__force
unsigned long) dst
) & 7)) {
564 if (n
>= RAPF_MIN_BUF_SIZE
) {
565 /* copy user using 64 bit rapf copy */
566 __asm_copy_to_user_64bit_rapf_loop(dst
, src
, retn
,
570 __asm_copy_to_user_8x64(dst
, src
, retn
);
574 if (n
>= RAPF_MIN_BUF_SIZE
) {
575 /* copy user using 32 bit rapf copy */
576 __asm_copy_to_user_32bit_rapf_loop(dst
, src
, retn
, n
, "32cu");
579 /* 64 bit copy loop */
580 if (!(((unsigned long) src
| (__force
unsigned long) dst
) & 7)) {
582 __asm_copy_to_user_8x64(dst
, src
, retn
);
589 __asm_copy_to_user_16(dst
, src
, retn
);
594 __asm_copy_to_user_4(dst
, src
, retn
);
602 __asm_copy_to_user_1(dst
, src
, retn
);
605 __asm_copy_to_user_2(dst
, src
, retn
);
608 __asm_copy_to_user_3(dst
, src
, retn
);
614 EXPORT_SYMBOL(__copy_user
);
616 #define __asm_copy_from_user_1(to, from, ret) \
617 __asm_copy_user_cont(to, from, ret, \
618 " GETB D1Ar1,[%1++]\n" \
619 "2: SETB [%0++],D1Ar1\n", \
620 "3: ADD %2,%2,#1\n" \
621 " SETB [%0++],D1Ar1\n", \
624 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
625 __asm_copy_user_cont(to, from, ret, \
626 " GETW D1Ar1,[%1++]\n" \
627 "2: SETW [%0++],D1Ar1\n" COPY, \
628 "3: ADD %2,%2,#2\n" \
629 " SETW [%0++],D1Ar1\n" FIXUP, \
630 " .long 2b,3b\n" TENTRY)
632 #define __asm_copy_from_user_2(to, from, ret) \
633 __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
635 #define __asm_copy_from_user_3(to, from, ret) \
636 __asm_copy_from_user_2x_cont(to, from, ret, \
637 " GETB D1Ar1,[%1++]\n" \
638 "4: SETB [%0++],D1Ar1\n", \
639 "5: ADD %2,%2,#1\n" \
640 " SETB [%0++],D1Ar1\n", \
643 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
644 __asm_copy_user_cont(to, from, ret, \
645 " GETD D1Ar1,[%1++]\n" \
646 "2: SETD [%0++],D1Ar1\n" COPY, \
647 "3: ADD %2,%2,#4\n" \
648 " SETD [%0++],D1Ar1\n" FIXUP, \
649 " .long 2b,3b\n" TENTRY)
651 #define __asm_copy_from_user_4(to, from, ret) \
652 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
654 #define __asm_copy_from_user_5(to, from, ret) \
655 __asm_copy_from_user_4x_cont(to, from, ret, \
656 " GETB D1Ar1,[%1++]\n" \
657 "4: SETB [%0++],D1Ar1\n", \
658 "5: ADD %2,%2,#1\n" \
659 " SETB [%0++],D1Ar1\n", \
662 #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
663 __asm_copy_from_user_4x_cont(to, from, ret, \
664 " GETW D1Ar1,[%1++]\n" \
665 "4: SETW [%0++],D1Ar1\n" COPY, \
666 "5: ADD %2,%2,#2\n" \
667 " SETW [%0++],D1Ar1\n" FIXUP, \
668 " .long 4b,5b\n" TENTRY)
670 #define __asm_copy_from_user_6(to, from, ret) \
671 __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
673 #define __asm_copy_from_user_7(to, from, ret) \
674 __asm_copy_from_user_6x_cont(to, from, ret, \
675 " GETB D1Ar1,[%1++]\n" \
676 "6: SETB [%0++],D1Ar1\n", \
677 "7: ADD %2,%2,#1\n" \
678 " SETB [%0++],D1Ar1\n", \
681 #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
682 __asm_copy_from_user_4x_cont(to, from, ret, \
683 " GETD D1Ar1,[%1++]\n" \
684 "4: SETD [%0++],D1Ar1\n" COPY, \
685 "5: ADD %2,%2,#4\n" \
686 " SETD [%0++],D1Ar1\n" FIXUP, \
687 " .long 4b,5b\n" TENTRY)
689 #define __asm_copy_from_user_8(to, from, ret) \
690 __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
692 #define __asm_copy_from_user_9(to, from, ret) \
693 __asm_copy_from_user_8x_cont(to, from, ret, \
694 " GETB D1Ar1,[%1++]\n" \
695 "6: SETB [%0++],D1Ar1\n", \
696 "7: ADD %2,%2,#1\n" \
697 " SETB [%0++],D1Ar1\n", \
700 #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
701 __asm_copy_from_user_8x_cont(to, from, ret, \
702 " GETW D1Ar1,[%1++]\n" \
703 "6: SETW [%0++],D1Ar1\n" COPY, \
704 "7: ADD %2,%2,#2\n" \
705 " SETW [%0++],D1Ar1\n" FIXUP, \
706 " .long 6b,7b\n" TENTRY)
708 #define __asm_copy_from_user_10(to, from, ret) \
709 __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
711 #define __asm_copy_from_user_11(to, from, ret) \
712 __asm_copy_from_user_10x_cont(to, from, ret, \
713 " GETB D1Ar1,[%1++]\n" \
714 "8: SETB [%0++],D1Ar1\n", \
715 "9: ADD %2,%2,#1\n" \
716 " SETB [%0++],D1Ar1\n", \
719 #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
720 __asm_copy_from_user_8x_cont(to, from, ret, \
721 " GETD D1Ar1,[%1++]\n" \
722 "6: SETD [%0++],D1Ar1\n" COPY, \
723 "7: ADD %2,%2,#4\n" \
724 " SETD [%0++],D1Ar1\n" FIXUP, \
725 " .long 6b,7b\n" TENTRY)
727 #define __asm_copy_from_user_12(to, from, ret) \
728 __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
730 #define __asm_copy_from_user_13(to, from, ret) \
731 __asm_copy_from_user_12x_cont(to, from, ret, \
732 " GETB D1Ar1,[%1++]\n" \
733 "8: SETB [%0++],D1Ar1\n", \
734 "9: ADD %2,%2,#1\n" \
735 " SETB [%0++],D1Ar1\n", \
738 #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
739 __asm_copy_from_user_12x_cont(to, from, ret, \
740 " GETW D1Ar1,[%1++]\n" \
741 "8: SETW [%0++],D1Ar1\n" COPY, \
742 "9: ADD %2,%2,#2\n" \
743 " SETW [%0++],D1Ar1\n" FIXUP, \
744 " .long 8b,9b\n" TENTRY)
746 #define __asm_copy_from_user_14(to, from, ret) \
747 __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
749 #define __asm_copy_from_user_15(to, from, ret) \
750 __asm_copy_from_user_14x_cont(to, from, ret, \
751 " GETB D1Ar1,[%1++]\n" \
752 "10: SETB [%0++],D1Ar1\n", \
753 "11: ADD %2,%2,#1\n" \
754 " SETB [%0++],D1Ar1\n", \
757 #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
758 __asm_copy_from_user_12x_cont(to, from, ret, \
759 " GETD D1Ar1,[%1++]\n" \
760 "8: SETD [%0++],D1Ar1\n" COPY, \
761 "9: ADD %2,%2,#4\n" \
762 " SETD [%0++],D1Ar1\n" FIXUP, \
763 " .long 8b,9b\n" TENTRY)
765 #define __asm_copy_from_user_16(to, from, ret) \
766 __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
768 #define __asm_copy_from_user_8x64(to, from, ret) \
770 " GETL D0Ar2,D1Ar1,[%1++]\n" \
771 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
773 " .section .fixup,\"ax\"\n" \
776 "3: ADD %2,%2,#8\n" \
777 " SETL [%0++],D0Ar2,D1Ar1\n" \
778 " MOVT D0Ar2,#HI(1b)\n" \
779 " JUMP D0Ar2,#LO(1b)\n" \
781 " .section __ex_table,\"a\"\n" \
784 : "=a" (to), "=r" (from), "=r" (ret) \
785 : "0" (to), "1" (from), "2" (ret) \
786 : "D1Ar1", "D0Ar2", "memory")
788 /* rewind 'from' pointer when a fault occurs
791 * A fault occurs while reading from user buffer, which is the
792 * source. Since the fault is at a single address, we only
793 * need to rewind by 8 bytes.
794 * Since we don't write to kernel buffer until we read first,
795 * the kernel buffer is at the right state and needn't be
798 #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
799 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
802 /* rewind 'from' pointer when a fault occurs
805 * A fault occurs while reading from user buffer, which is the
806 * source. Since the fault is at a single address, we only
807 * need to rewind by 4 bytes.
808 * Since we don't write to kernel buffer until we read first,
809 * the kernel buffer is at the right state and needn't be
812 #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
813 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
817 /* Copy from user to kernel, zeroing the bytes that were inaccessible in
818 userland. The return-value is the number of bytes that were
820 unsigned long __copy_user_zeroing(void *pdst
, const void __user
*psrc
,
823 register char *dst
asm ("A0.2") = pdst
;
824 register const char __user
*src
asm ("A1.2") = psrc
;
825 unsigned long retn
= 0;
830 if ((unsigned long) src
& 1) {
831 __asm_copy_from_user_1(dst
, src
, retn
);
834 if ((unsigned long) dst
& 1) {
835 /* Worst case - byte copy */
837 __asm_copy_from_user_1(dst
, src
, retn
);
840 goto copy_exception_bytes
;
843 if (((unsigned long) src
& 2) && n
>= 2) {
844 __asm_copy_from_user_2(dst
, src
, retn
);
847 if ((unsigned long) dst
& 2) {
848 /* Second worst case - word copy */
850 __asm_copy_from_user_2(dst
, src
, retn
);
853 goto copy_exception_bytes
;
857 /* We only need one check after the unalignment-adjustments,
858 because if both adjustments were done, either both or
859 neither reference had an exception. */
861 goto copy_exception_bytes
;
864 /* 64 bit copy loop */
865 if (!(((unsigned long) src
| (unsigned long) dst
) & 7)) {
866 if (n
>= RAPF_MIN_BUF_SIZE
) {
867 /* Copy using fast 64bit rapf */
868 __asm_copy_from_user_64bit_rapf_loop(dst
, src
, retn
,
872 __asm_copy_from_user_8x64(dst
, src
, retn
);
875 goto copy_exception_bytes
;
879 if (n
>= RAPF_MIN_BUF_SIZE
) {
880 /* Copy using fast 32bit rapf */
881 __asm_copy_from_user_32bit_rapf_loop(dst
, src
, retn
,
885 /* 64 bit copy loop */
886 if (!(((unsigned long) src
| (unsigned long) dst
) & 7)) {
888 __asm_copy_from_user_8x64(dst
, src
, retn
);
891 goto copy_exception_bytes
;
897 __asm_copy_from_user_4(dst
, src
, retn
);
901 goto copy_exception_bytes
;
904 /* If we get here, there were no memory read faults. */
906 /* These copies are at least "naturally aligned" (so we don't
907 have to check each byte), due to the src alignment code.
908 The *_3 case *will* get the correct count for retn. */
910 /* This case deliberately left in (if you have doubts check the
911 generated assembly code). */
914 __asm_copy_from_user_1(dst
, src
, retn
);
917 __asm_copy_from_user_2(dst
, src
, retn
);
920 __asm_copy_from_user_3(dst
, src
, retn
);
924 /* If we get here, retn correctly reflects the number of failing
928 copy_exception_bytes
:
929 /* We already have "retn" bytes cleared, and need to clear the
930 remaining "n" bytes. A non-optimized simple byte-for-byte in-line
931 memset is preferred here, since this isn't speed-critical code and
932 we'd rather have this a leaf-function than calling memset. */
935 for (endp
= dst
+ n
; dst
< endp
; dst
++)
941 EXPORT_SYMBOL(__copy_user_zeroing
);
943 #define __asm_clear_8x64(to, ret) \
947 " SETL [%0],D0Ar2,D1Ar1\n" \
948 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
950 " .section .fixup,\"ax\"\n" \
951 "3: ADD %1,%1,#8\n" \
952 " MOVT D0Ar2,#HI(1b)\n" \
953 " JUMP D0Ar2,#LO(1b)\n" \
955 " .section __ex_table,\"a\"\n" \
958 : "=r" (to), "=r" (ret) \
959 : "0" (to), "1" (ret) \
960 : "D1Ar1", "D0Ar2", "memory")
962 /* Zero userspace. */
964 #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
969 " .section .fixup,\"ax\"\n" \
971 " MOVT D1Ar1,#HI(1b)\n" \
972 " JUMP D1Ar1,#LO(1b)\n" \
974 " .section __ex_table,\"a\"\n" \
977 : "=r" (to), "=r" (ret) \
978 : "0" (to), "1" (ret) \
981 #define __asm_clear_1(to, ret) \
982 __asm_clear(to, ret, \
983 " SETB [%0],D1Ar1\n" \
984 "2: SETB [%0++],D1Ar1\n", \
985 "3: ADD %1,%1,#1\n", \
988 #define __asm_clear_2(to, ret) \
989 __asm_clear(to, ret, \
990 " SETW [%0],D1Ar1\n" \
991 "2: SETW [%0++],D1Ar1\n", \
992 "3: ADD %1,%1,#2\n", \
995 #define __asm_clear_3(to, ret) \
996 __asm_clear(to, ret, \
997 "2: SETW [%0++],D1Ar1\n" \
998 " SETB [%0],D1Ar1\n" \
999 "3: SETB [%0++],D1Ar1\n", \
1000 "4: ADD %1,%1,#2\n" \
1001 "5: ADD %1,%1,#1\n", \
1005 #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
1006 __asm_clear(to, ret, \
1007 " SETD [%0],D1Ar1\n" \
1008 "2: SETD [%0++],D1Ar1\n" CLEAR, \
1009 "3: ADD %1,%1,#4\n" FIXUP, \
1010 " .long 2b,3b\n" TENTRY)
1012 #define __asm_clear_4(to, ret) \
1013 __asm_clear_4x_cont(to, ret, "", "", "")
1015 #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
1016 __asm_clear_4x_cont(to, ret, \
1017 " SETD [%0],D1Ar1\n" \
1018 "4: SETD [%0++],D1Ar1\n" CLEAR, \
1019 "5: ADD %1,%1,#4\n" FIXUP, \
1020 " .long 4b,5b\n" TENTRY)
1022 #define __asm_clear_8(to, ret) \
1023 __asm_clear_8x_cont(to, ret, "", "", "")
1025 #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
1026 __asm_clear_8x_cont(to, ret, \
1027 " SETD [%0],D1Ar1\n" \
1028 "6: SETD [%0++],D1Ar1\n" CLEAR, \
1029 "7: ADD %1,%1,#4\n" FIXUP, \
1030 " .long 6b,7b\n" TENTRY)
1032 #define __asm_clear_12(to, ret) \
1033 __asm_clear_12x_cont(to, ret, "", "", "")
1035 #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
1036 __asm_clear_12x_cont(to, ret, \
1037 " SETD [%0],D1Ar1\n" \
1038 "8: SETD [%0++],D1Ar1\n" CLEAR, \
1039 "9: ADD %1,%1,#4\n" FIXUP, \
1040 " .long 8b,9b\n" TENTRY)
1042 #define __asm_clear_16(to, ret) \
1043 __asm_clear_16x_cont(to, ret, "", "", "")
1045 unsigned long __do_clear_user(void __user
*pto
, unsigned long pn
)
1047 register char __user
*dst
asm ("D0Re0") = pto
;
1048 register unsigned long n
asm ("D1Re0") = pn
;
1049 register unsigned long retn
asm ("D0Ar6") = 0;
1051 if ((unsigned long) dst
& 1) {
1052 __asm_clear_1(dst
, retn
);
1056 if ((unsigned long) dst
& 2) {
1057 __asm_clear_2(dst
, retn
);
1061 /* 64 bit copy loop */
1062 if (!((__force
unsigned long) dst
& 7)) {
1064 __asm_clear_8x64(dst
, retn
);
1070 __asm_clear_16(dst
, retn
);
1075 __asm_clear_4(dst
, retn
);
1083 __asm_clear_1(dst
, retn
);
1086 __asm_clear_2(dst
, retn
);
1089 __asm_clear_3(dst
, retn
);
1095 EXPORT_SYMBOL(__do_clear_user
);
1097 unsigned char __get_user_asm_b(const void __user
*addr
, long *err
)
1099 register unsigned char x
asm ("D0Re0") = 0;
1105 " .section .fixup,\"ax\"\n"
1107 " SETD [%1],D0FrT\n"
1108 " MOVT D0FrT,#HI(2b)\n"
1109 " JUMP D0FrT,#LO(2b)\n"
1111 " .section __ex_table,\"a\"\n"
1115 : "r" (err
), "r" (addr
), "P" (-EFAULT
)
1119 EXPORT_SYMBOL(__get_user_asm_b
);
1121 unsigned short __get_user_asm_w(const void __user
*addr
, long *err
)
1123 register unsigned short x
asm ("D0Re0") = 0;
1129 " .section .fixup,\"ax\"\n"
1131 " SETD [%1],D0FrT\n"
1132 " MOVT D0FrT,#HI(2b)\n"
1133 " JUMP D0FrT,#LO(2b)\n"
1135 " .section __ex_table,\"a\"\n"
1139 : "r" (err
), "r" (addr
), "P" (-EFAULT
)
1143 EXPORT_SYMBOL(__get_user_asm_w
);
1145 unsigned int __get_user_asm_d(const void __user
*addr
, long *err
)
1147 register unsigned int x
asm ("D0Re0") = 0;
1153 " .section .fixup,\"ax\"\n"
1155 " SETD [%1],D0FrT\n"
1156 " MOVT D0FrT,#HI(2b)\n"
1157 " JUMP D0FrT,#LO(2b)\n"
1159 " .section __ex_table,\"a\"\n"
1163 : "r" (err
), "r" (addr
), "P" (-EFAULT
)
1167 EXPORT_SYMBOL(__get_user_asm_d
);
1169 long __put_user_asm_b(unsigned int x
, void __user
*addr
)
1171 register unsigned int err
asm ("D0Re0") = 0;
1178 ".section .fixup,\"ax\"\n"
1180 " MOVT D0FrT,#HI(2b)\n"
1181 " JUMP D0FrT,#LO(2b)\n"
1183 ".section __ex_table,\"a\"\n"
1187 : "d" (x
), "a" (addr
), "P"(-EFAULT
)
1191 EXPORT_SYMBOL(__put_user_asm_b
);
1193 long __put_user_asm_w(unsigned int x
, void __user
*addr
)
1195 register unsigned int err
asm ("D0Re0") = 0;
1202 ".section .fixup,\"ax\"\n"
1204 " MOVT D0FrT,#HI(2b)\n"
1205 " JUMP D0FrT,#LO(2b)\n"
1207 ".section __ex_table,\"a\"\n"
1211 : "d" (x
), "a" (addr
), "P"(-EFAULT
)
1215 EXPORT_SYMBOL(__put_user_asm_w
);
1217 long __put_user_asm_d(unsigned int x
, void __user
*addr
)
1219 register unsigned int err
asm ("D0Re0") = 0;
1226 ".section .fixup,\"ax\"\n"
1228 " MOVT D0FrT,#HI(2b)\n"
1229 " JUMP D0FrT,#LO(2b)\n"
1231 ".section __ex_table,\"a\"\n"
1235 : "d" (x
), "a" (addr
), "P"(-EFAULT
)
1239 EXPORT_SYMBOL(__put_user_asm_d
);
1241 long __put_user_asm_l(unsigned long long x
, void __user
*addr
)
1243 register unsigned int err
asm ("D0Re0") = 0;
1246 " SETL [%2],%1,%t1\n"
1248 " SETL [%2],%1,%t1\n"
1250 ".section .fixup,\"ax\"\n"
1252 " MOVT D0FrT,#HI(2b)\n"
1253 " JUMP D0FrT,#LO(2b)\n"
1255 ".section __ex_table,\"a\"\n"
1259 : "d" (x
), "a" (addr
), "P"(-EFAULT
)
1263 EXPORT_SYMBOL(__put_user_asm_l
);
1265 long strnlen_user(const char __user
*src
, long count
)
1269 if (!access_ok(VERIFY_READ
, src
, 0))
1272 asm volatile (" MOV D0Ar4, %1\n"
1275 " SUBS D0FrT, D0Ar6, #0\n"
1276 " SUB D0Ar6, D0Ar6, #1\n"
1278 " GETB D0FrT, [D0Ar4+#1++]\n"
1280 " TST D0FrT, #255\n"
1283 " SUB %0, %2, D0Ar6\n"
1285 " .section .fixup,\"ax\"\n"
1288 " MOVT D0FrT,#HI(3b)\n"
1289 " JUMP D0FrT,#LO(3b)\n"
1291 " .section __ex_table,\"a\"\n"
1295 : "r" (src
), "r" (count
)
1296 : "D0FrT", "D0Ar4", "D0Ar6", "cc");
1300 EXPORT_SYMBOL(strnlen_user
);
1302 long __strncpy_from_user(char *dst
, const char __user
*src
, long count
)
1310 * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
1313 * This code is deduced from:
1318 * while ((*dst++ = (tmp2 = *src++)) != 0
1322 * res = count - tmp1;
1327 asm volatile (" MOV %0,%3\n"
1329 " GETB D0FrT,[%2++]\n"
1332 " SETB [%1++],D0FrT\n"
1339 " .section .fixup,\"ax\"\n"
1342 " MOVT D0FrT,#HI(4b)\n"
1343 " JUMP D0FrT,#LO(4b)\n"
1345 " .section __ex_table,\"a\"\n"
1348 : "=r" (res
), "=r" (dst
), "=r" (src
), "=r" (count
)
1349 : "3" (count
), "1" (dst
), "2" (src
), "P" (-EFAULT
)
1350 : "D0FrT", "memory", "cc");
1354 EXPORT_SYMBOL(__strncpy_from_user
);