1 // SPDX-License-Identifier: GPL-2.0
3 * User address space access functions.
4 * The non-inlined parts of asm-metag/uaccess.h are here.
6 * Copyright (C) 2006, Imagination Technologies.
7 * Copyright (C) 2000, Axis Communications AB.
9 * Written by Hans-Peter Nilsson.
10 * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
11 * Modified for Meta by Will Newton.
14 #include <linux/export.h>
15 #include <linux/uaccess.h>
16 #include <asm/cache.h> /* def of L1_CACHE_BYTES */
19 #define RAPF_MIN_BUF_SIZE (3*L1_CACHE_BYTES)
22 /* The "double write" in this code is because the Meta will not fault
23 * immediately unless the memory pipe is forced to by e.g. a data stall or
24 * another memory op. The second write should be discarded by the write
25 * combiner so should have virtually no cost.
28 #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
32 " .section .fixup,\"ax\"\n" \
34 " MOVT D1Ar1,#HI(1b)\n" \
35 " JUMP D1Ar1,#LO(1b)\n" \
37 " .section __ex_table,\"a\"\n" \
40 : "=r" (to), "=r" (from), "=r" (ret) \
41 : "0" (to), "1" (from), "2" (ret) \
45 #define __asm_copy_to_user_1(to, from, ret) \
46 __asm_copy_user_cont(to, from, ret, \
47 " GETB D1Ar1,[%1++]\n" \
48 " SETB [%0],D1Ar1\n" \
49 "2: SETB [%0++],D1Ar1\n", \
50 "3: ADD %2,%2,#1\n", \
53 #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
54 __asm_copy_user_cont(to, from, ret, \
55 " GETW D1Ar1,[%1++]\n" \
56 " SETW [%0],D1Ar1\n" \
57 "2: SETW [%0++],D1Ar1\n" COPY, \
58 "3: ADD %2,%2,#2\n" FIXUP, \
59 " .long 2b,3b\n" TENTRY)
61 #define __asm_copy_to_user_2(to, from, ret) \
62 __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
64 #define __asm_copy_to_user_3(to, from, ret) \
65 __asm_copy_to_user_2x_cont(to, from, ret, \
66 " GETB D1Ar1,[%1++]\n" \
67 " SETB [%0],D1Ar1\n" \
68 "4: SETB [%0++],D1Ar1\n", \
69 "5: ADD %2,%2,#1\n", \
72 #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
73 __asm_copy_user_cont(to, from, ret, \
74 " GETD D1Ar1,[%1++]\n" \
75 " SETD [%0],D1Ar1\n" \
76 "2: SETD [%0++],D1Ar1\n" COPY, \
77 "3: ADD %2,%2,#4\n" FIXUP, \
78 " .long 2b,3b\n" TENTRY)
80 #define __asm_copy_to_user_4(to, from, ret) \
81 __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
83 #define __asm_copy_to_user_5(to, from, ret) \
84 __asm_copy_to_user_4x_cont(to, from, ret, \
85 " GETB D1Ar1,[%1++]\n" \
86 " SETB [%0],D1Ar1\n" \
87 "4: SETB [%0++],D1Ar1\n", \
88 "5: ADD %2,%2,#1\n", \
91 #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
92 __asm_copy_to_user_4x_cont(to, from, ret, \
93 " GETW D1Ar1,[%1++]\n" \
94 " SETW [%0],D1Ar1\n" \
95 "4: SETW [%0++],D1Ar1\n" COPY, \
96 "5: ADD %2,%2,#2\n" FIXUP, \
97 " .long 4b,5b\n" TENTRY)
99 #define __asm_copy_to_user_6(to, from, ret) \
100 __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
102 #define __asm_copy_to_user_7(to, from, ret) \
103 __asm_copy_to_user_6x_cont(to, from, ret, \
104 " GETB D1Ar1,[%1++]\n" \
105 " SETB [%0],D1Ar1\n" \
106 "6: SETB [%0++],D1Ar1\n", \
107 "7: ADD %2,%2,#1\n", \
110 #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
111 __asm_copy_to_user_4x_cont(to, from, ret, \
112 " GETD D1Ar1,[%1++]\n" \
113 " SETD [%0],D1Ar1\n" \
114 "4: SETD [%0++],D1Ar1\n" COPY, \
115 "5: ADD %2,%2,#4\n" FIXUP, \
116 " .long 4b,5b\n" TENTRY)
118 #define __asm_copy_to_user_8(to, from, ret) \
119 __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
121 #define __asm_copy_to_user_9(to, from, ret) \
122 __asm_copy_to_user_8x_cont(to, from, ret, \
123 " GETB D1Ar1,[%1++]\n" \
124 " SETB [%0],D1Ar1\n" \
125 "6: SETB [%0++],D1Ar1\n", \
126 "7: ADD %2,%2,#1\n", \
129 #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
130 __asm_copy_to_user_8x_cont(to, from, ret, \
131 " GETW D1Ar1,[%1++]\n" \
132 " SETW [%0],D1Ar1\n" \
133 "6: SETW [%0++],D1Ar1\n" COPY, \
134 "7: ADD %2,%2,#2\n" FIXUP, \
135 " .long 6b,7b\n" TENTRY)
137 #define __asm_copy_to_user_10(to, from, ret) \
138 __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
140 #define __asm_copy_to_user_11(to, from, ret) \
141 __asm_copy_to_user_10x_cont(to, from, ret, \
142 " GETB D1Ar1,[%1++]\n" \
143 " SETB [%0],D1Ar1\n" \
144 "8: SETB [%0++],D1Ar1\n", \
145 "9: ADD %2,%2,#1\n", \
148 #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
149 __asm_copy_to_user_8x_cont(to, from, ret, \
150 " GETD D1Ar1,[%1++]\n" \
151 " SETD [%0],D1Ar1\n" \
152 "6: SETD [%0++],D1Ar1\n" COPY, \
153 "7: ADD %2,%2,#4\n" FIXUP, \
154 " .long 6b,7b\n" TENTRY)
155 #define __asm_copy_to_user_12(to, from, ret) \
156 __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
158 #define __asm_copy_to_user_13(to, from, ret) \
159 __asm_copy_to_user_12x_cont(to, from, ret, \
160 " GETB D1Ar1,[%1++]\n" \
161 " SETB [%0],D1Ar1\n" \
162 "8: SETB [%0++],D1Ar1\n", \
163 "9: ADD %2,%2,#1\n", \
166 #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
167 __asm_copy_to_user_12x_cont(to, from, ret, \
168 " GETW D1Ar1,[%1++]\n" \
169 " SETW [%0],D1Ar1\n" \
170 "8: SETW [%0++],D1Ar1\n" COPY, \
171 "9: ADD %2,%2,#2\n" FIXUP, \
172 " .long 8b,9b\n" TENTRY)
174 #define __asm_copy_to_user_14(to, from, ret) \
175 __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
177 #define __asm_copy_to_user_15(to, from, ret) \
178 __asm_copy_to_user_14x_cont(to, from, ret, \
179 " GETB D1Ar1,[%1++]\n" \
180 " SETB [%0],D1Ar1\n" \
181 "10: SETB [%0++],D1Ar1\n", \
182 "11: ADD %2,%2,#1\n", \
185 #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
186 __asm_copy_to_user_12x_cont(to, from, ret, \
187 " GETD D1Ar1,[%1++]\n" \
188 " SETD [%0],D1Ar1\n" \
189 "8: SETD [%0++],D1Ar1\n" COPY, \
190 "9: ADD %2,%2,#4\n" FIXUP, \
191 " .long 8b,9b\n" TENTRY)
193 #define __asm_copy_to_user_16(to, from, ret) \
194 __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
196 #define __asm_copy_to_user_8x64(to, from, ret) \
198 " GETL D0Ar2,D1Ar1,[%1++]\n" \
199 " SETL [%0],D0Ar2,D1Ar1\n" \
200 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
202 " .section .fixup,\"ax\"\n" \
203 "3: ADD %2,%2,#8\n" \
204 " MOVT D0Ar2,#HI(1b)\n" \
205 " JUMP D0Ar2,#LO(1b)\n" \
207 " .section __ex_table,\"a\"\n" \
210 : "=r" (to), "=r" (from), "=r" (ret) \
211 : "0" (to), "1" (from), "2" (ret) \
212 : "D1Ar1", "D0Ar2", "memory")
215 * optimized copying loop using RAPF when 64 bit aligned
217 * n will be automatically decremented inside the loop
218 * ret will be left intact. if error occurs we will rewind
219 * so that the original non optimized code will fill up
220 * this value correctly.
223 * > n will hold total number of uncopied bytes
225 * > {'to','from'} will be rewind back so that
226 * the non-optimized code will do the proper fix up
228 * DCACHE drops the cacheline which helps in reducing cache
231 * We introduce an extra SETL at the end of the loop to
232 * ensure we don't fall off the loop before we catch all
236 * LSM_STEP in TXSTATUS must be cleared in fix up code.
237 * since we're using M{S,G}ETL, a fault might happen at
238 * any address in the middle of M{S,G}ETL causing
239 * the value of LSM_STEP to be incorrect which can
240 * cause subsequent use of M{S,G}ET{L,D} to go wrong.
241 * ie: if LSM_STEP was 1 when a fault occurs, the
242 * next call to M{S,G}ET{L,D} will skip the first
243 * copy/getting as it think that the first 1 has already
247 #define __asm_copy_user_64bit_rapf_loop( \
248 to, from, ret, n, id, FIXUP) \
252 " MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
254 " LSR D1Ar5, %3, #6\n" \
255 " SUB TXRPT, D1Ar5, #2\n" \
258 " ADD RAPF, %1, #64\n" \
259 "21: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
260 "22: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
261 "23: SUB %3, %3, #32\n" \
262 "24: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
263 "25: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
264 "26: SUB %3, %3, #32\n" \
265 " DCACHE [%1+#-64], D0Ar6\n" \
269 "27: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
270 "28: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
271 "29: SUB %3, %3, #32\n" \
272 "30: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
273 "31: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
274 "32: SETL [%0+#-8], D0.7, D1.7\n" \
275 " SUB %3, %3, #32\n" \
276 "1: DCACHE [%1+#-64], D0Ar6\n" \
277 " GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
278 " GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
279 " GETL D0.5, D1.5, [A0StP+#-24]\n" \
280 " GETL D0.6, D1.6, [A0StP+#-16]\n" \
281 " GETL D0.7, D1.7, [A0StP+#-8]\n" \
282 " SUB A0StP, A0StP, #40\n" \
283 " .section .fixup,\"ax\"\n" \
284 "3: MOV D0Ar2, TXSTATUS\n" \
285 " MOV D1Ar1, TXSTATUS\n" \
286 " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
287 " MOV TXSTATUS, D1Ar1\n" \
289 " MOVT D0Ar2, #HI(1b)\n" \
290 " JUMP D0Ar2, #LO(1b)\n" \
292 " .section __ex_table,\"a\"\n" \
306 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
307 : "0" (to), "1" (from), "2" (ret), "3" (n) \
308 : "D1Ar1", "D0Ar2", "cc", "memory")
310 /* rewind 'to' and 'from' pointers when a fault occurs
313 * A fault always occurs on writing to user buffer. A fault
314 * is at a single address, so we need to rewind by only 4
316 * Since we do a complete read from kernel buffer before
317 * writing, we need to rewind it also. The amount to be
318 * rewind equals the number of faulty writes in MSETD
319 * which is: [4 - (LSM_STEP-1)]*8
320 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
321 * and stored in D0Ar2
323 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
324 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
325 * a fault happens at the 4th write, LSM_STEP will be 0
326 * instead of 4. The code copes with that.
328 * n is updated by the number of successful writes, which is:
329 * n = n - (LSM_STEP-1)*8
331 #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
332 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
333 "LSR D0Ar2, D0Ar2, #8\n" \
334 "ANDS D0Ar2, D0Ar2, #0x7\n" \
335 "ADDZ D0Ar2, D0Ar2, #4\n" \
336 "SUB D0Ar2, D0Ar2, #1\n" \
338 "SUB D0Ar2, D1Ar1, D0Ar2\n" \
339 "LSL D0Ar2, D0Ar2, #3\n" \
340 "LSL D1Ar1, D1Ar1, #3\n" \
341 "SUB D1Ar1, D1Ar1, D0Ar2\n" \
343 "SUB %1, %1,D0Ar2\n" \
344 "SUB %3, %3, D1Ar1\n")
347 * optimized copying loop using RAPF when 32 bit aligned
349 * n will be automatically decremented inside the loop
350 * ret will be left intact. if error occurs we will rewind
351 * so that the original non optimized code will fill up
352 * this value correctly.
355 * > n will hold total number of uncopied bytes
357 * > {'to','from'} will be rewind back so that
358 * the non-optimized code will do the proper fix up
360 * DCACHE drops the cacheline which helps in reducing cache
363 * We introduce an extra SETD at the end of the loop to
364 * ensure we don't fall off the loop before we catch all
368 * LSM_STEP in TXSTATUS must be cleared in fix up code.
369 * since we're using M{S,G}ETL, a fault might happen at
370 * any address in the middle of M{S,G}ETL causing
371 * the value of LSM_STEP to be incorrect which can
372 * cause subsequent use of M{S,G}ET{L,D} to go wrong.
373 * ie: if LSM_STEP was 1 when a fault occurs, the
374 * next call to M{S,G}ET{L,D} will skip the first
375 * copy/getting as it think that the first 1 has already
379 #define __asm_copy_user_32bit_rapf_loop( \
380 to, from, ret, n, id, FIXUP) \
384 " MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
386 " LSR D1Ar5, %3, #6\n" \
387 " SUB TXRPT, D1Ar5, #2\n" \
390 " ADD RAPF, %1, #64\n" \
391 "21: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
392 "22: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
393 "23: SUB %3, %3, #16\n" \
394 "24: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
395 "25: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
396 "26: SUB %3, %3, #16\n" \
397 "27: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
398 "28: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
399 "29: SUB %3, %3, #16\n" \
400 "30: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
401 "31: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
402 "32: SUB %3, %3, #16\n" \
403 " DCACHE [%1+#-64], D0Ar6\n" \
407 "33: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
408 "34: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
409 "35: SUB %3, %3, #16\n" \
410 "36: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
411 "37: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
412 "38: SUB %3, %3, #16\n" \
413 "39: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
414 "40: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
415 "41: SUB %3, %3, #16\n" \
416 "42: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
417 "43: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
418 "44: SETD [%0+#-4], D0.7\n" \
419 " SUB %3, %3, #16\n" \
420 "1: DCACHE [%1+#-64], D0Ar6\n" \
421 " GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
422 " GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
423 " GETL D0.5, D1.5, [A0StP+#-24]\n" \
424 " GETL D0.6, D1.6, [A0StP+#-16]\n" \
425 " GETL D0.7, D1.7, [A0StP+#-8]\n" \
426 " SUB A0StP, A0StP, #40\n" \
427 " .section .fixup,\"ax\"\n" \
428 "3: MOV D0Ar2, TXSTATUS\n" \
429 " MOV D1Ar1, TXSTATUS\n" \
430 " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
431 " MOV TXSTATUS, D1Ar1\n" \
433 " MOVT D0Ar2, #HI(1b)\n" \
434 " JUMP D0Ar2, #LO(1b)\n" \
436 " .section __ex_table,\"a\"\n" \
462 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
463 : "0" (to), "1" (from), "2" (ret), "3" (n) \
464 : "D1Ar1", "D0Ar2", "cc", "memory")
466 /* rewind 'to' and 'from' pointers when a fault occurs
469 * A fault always occurs on writing to user buffer. A fault
470 * is at a single address, so we need to rewind by only 4
472 * Since we do a complete read from kernel buffer before
473 * writing, we need to rewind it also. The amount to be
474 * rewind equals the number of faulty writes in MSETD
475 * which is: [4 - (LSM_STEP-1)]*4
476 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
477 * and stored in D0Ar2
479 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
480 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
481 * a fault happens at the 4th write, LSM_STEP will be 0
482 * instead of 4. The code copes with that.
484 * n is updated by the number of successful writes, which is:
485 * n = n - (LSM_STEP-1)*4
487 #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
488 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
489 "LSR D0Ar2, D0Ar2, #8\n" \
490 "ANDS D0Ar2, D0Ar2, #0x7\n" \
491 "ADDZ D0Ar2, D0Ar2, #4\n" \
492 "SUB D0Ar2, D0Ar2, #1\n" \
494 "SUB D0Ar2, D1Ar1, D0Ar2\n" \
495 "LSL D0Ar2, D0Ar2, #2\n" \
496 "LSL D1Ar1, D1Ar1, #2\n" \
497 "SUB D1Ar1, D1Ar1, D0Ar2\n" \
499 "SUB %1, %1, D0Ar2\n" \
500 "SUB %3, %3, D1Ar1\n")
502 unsigned long raw_copy_to_user(void __user
*pdst
, const void *psrc
,
505 register char __user
*dst
asm ("A0.2") = pdst
;
506 register const char *src
asm ("A1.2") = psrc
;
507 unsigned long retn
= 0;
512 if ((unsigned long) src
& 1) {
513 __asm_copy_to_user_1(dst
, src
, retn
);
518 if ((unsigned long) dst
& 1) {
519 /* Worst case - byte copy */
521 __asm_copy_to_user_1(dst
, src
, retn
);
527 if (((unsigned long) src
& 2) && n
>= 2) {
528 __asm_copy_to_user_2(dst
, src
, retn
);
533 if ((unsigned long) dst
& 2) {
534 /* Second worst case - word copy */
536 __asm_copy_to_user_2(dst
, src
, retn
);
544 /* 64 bit copy loop */
545 if (!(((unsigned long) src
| (__force
unsigned long) dst
) & 7)) {
546 if (n
>= RAPF_MIN_BUF_SIZE
) {
547 /* copy user using 64 bit rapf copy */
548 __asm_copy_to_user_64bit_rapf_loop(dst
, src
, retn
,
552 __asm_copy_to_user_8x64(dst
, src
, retn
);
558 if (n
>= RAPF_MIN_BUF_SIZE
) {
559 /* copy user using 32 bit rapf copy */
560 __asm_copy_to_user_32bit_rapf_loop(dst
, src
, retn
, n
, "32cu");
563 /* 64 bit copy loop */
564 if (!(((unsigned long) src
| (__force
unsigned long) dst
) & 7)) {
566 __asm_copy_to_user_8x64(dst
, src
, retn
);
575 __asm_copy_to_user_16(dst
, src
, retn
);
582 __asm_copy_to_user_4(dst
, src
, retn
);
592 __asm_copy_to_user_1(dst
, src
, retn
);
595 __asm_copy_to_user_2(dst
, src
, retn
);
598 __asm_copy_to_user_3(dst
, src
, retn
);
603 * If we get here, retn correctly reflects the number of failing
608 EXPORT_SYMBOL(raw_copy_to_user
);
610 #define __asm_copy_from_user_1(to, from, ret) \
611 __asm_copy_user_cont(to, from, ret, \
612 " GETB D1Ar1,[%1++]\n" \
613 "2: SETB [%0++],D1Ar1\n", \
614 "3: ADD %2,%2,#1\n", \
617 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
618 __asm_copy_user_cont(to, from, ret, \
619 " GETW D1Ar1,[%1++]\n" \
620 "2: SETW [%0++],D1Ar1\n" COPY, \
621 "3: ADD %2,%2,#2\n" FIXUP, \
622 " .long 2b,3b\n" TENTRY)
624 #define __asm_copy_from_user_2(to, from, ret) \
625 __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
627 #define __asm_copy_from_user_3(to, from, ret) \
628 __asm_copy_from_user_2x_cont(to, from, ret, \
629 " GETB D1Ar1,[%1++]\n" \
630 "4: SETB [%0++],D1Ar1\n", \
631 "5: ADD %2,%2,#1\n", \
634 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
635 __asm_copy_user_cont(to, from, ret, \
636 " GETD D1Ar1,[%1++]\n" \
637 "2: SETD [%0++],D1Ar1\n" COPY, \
638 "3: ADD %2,%2,#4\n" FIXUP, \
639 " .long 2b,3b\n" TENTRY)
641 #define __asm_copy_from_user_4(to, from, ret) \
642 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
644 #define __asm_copy_from_user_8x64(to, from, ret) \
646 " GETL D0Ar2,D1Ar1,[%1++]\n" \
647 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
649 " .section .fixup,\"ax\"\n" \
650 "3: ADD %2,%2,#8\n" \
651 " MOVT D0Ar2,#HI(1b)\n" \
652 " JUMP D0Ar2,#LO(1b)\n" \
654 " .section __ex_table,\"a\"\n" \
657 : "=a" (to), "=r" (from), "=r" (ret) \
658 : "0" (to), "1" (from), "2" (ret) \
659 : "D1Ar1", "D0Ar2", "memory")
661 /* rewind 'from' pointer when a fault occurs
664 * A fault occurs while reading from user buffer, which is the
666 * Since we don't write to kernel buffer until we read first,
667 * the kernel buffer is at the right state and needn't be
668 * corrected, but the source must be rewound to the beginning of
669 * the block, which is LSM_STEP*8 bytes.
670 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
671 * and stored in D0Ar2
673 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
674 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
675 * a fault happens at the 4th write, LSM_STEP will be 0
676 * instead of 4. The code copes with that.
678 #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
679 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
680 "LSR D0Ar2, D0Ar2, #5\n" \
681 "ANDS D0Ar2, D0Ar2, #0x38\n" \
682 "ADDZ D0Ar2, D0Ar2, #32\n" \
683 "SUB %1, %1, D0Ar2\n")
685 /* rewind 'from' pointer when a fault occurs
688 * A fault occurs while reading from user buffer, which is the
690 * Since we don't write to kernel buffer until we read first,
691 * the kernel buffer is at the right state and needn't be
692 * corrected, but the source must be rewound to the beginning of
693 * the block, which is LSM_STEP*4 bytes.
694 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
695 * and stored in D0Ar2
697 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
698 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
699 * a fault happens at the 4th write, LSM_STEP will be 0
700 * instead of 4. The code copes with that.
702 #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
703 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
704 "LSR D0Ar2, D0Ar2, #6\n" \
705 "ANDS D0Ar2, D0Ar2, #0x1c\n" \
706 "ADDZ D0Ar2, D0Ar2, #16\n" \
707 "SUB %1, %1, D0Ar2\n")
711 * Copy from user to kernel. The return-value is the number of bytes that were
714 unsigned long raw_copy_from_user(void *pdst
, const void __user
*psrc
,
717 register char *dst
asm ("A0.2") = pdst
;
718 register const char __user
*src
asm ("A1.2") = psrc
;
719 unsigned long retn
= 0;
724 if ((unsigned long) src
& 1) {
725 __asm_copy_from_user_1(dst
, src
, retn
);
730 if ((unsigned long) dst
& 1) {
731 /* Worst case - byte copy */
733 __asm_copy_from_user_1(dst
, src
, retn
);
739 if (((unsigned long) src
& 2) && n
>= 2) {
740 __asm_copy_from_user_2(dst
, src
, retn
);
745 if ((unsigned long) dst
& 2) {
746 /* Second worst case - word copy */
748 __asm_copy_from_user_2(dst
, src
, retn
);
756 /* 64 bit copy loop */
757 if (!(((unsigned long) src
| (unsigned long) dst
) & 7)) {
758 if (n
>= RAPF_MIN_BUF_SIZE
) {
759 /* Copy using fast 64bit rapf */
760 __asm_copy_from_user_64bit_rapf_loop(dst
, src
, retn
,
764 __asm_copy_from_user_8x64(dst
, src
, retn
);
771 if (n
>= RAPF_MIN_BUF_SIZE
) {
772 /* Copy using fast 32bit rapf */
773 __asm_copy_from_user_32bit_rapf_loop(dst
, src
, retn
,
777 /* 64 bit copy loop */
778 if (!(((unsigned long) src
| (unsigned long) dst
) & 7)) {
780 __asm_copy_from_user_8x64(dst
, src
, retn
);
789 __asm_copy_from_user_4(dst
, src
, retn
);
796 /* If we get here, there were no memory read faults. */
798 /* These copies are at least "naturally aligned" (so we don't
799 have to check each byte), due to the src alignment code.
800 The *_3 case *will* get the correct count for retn. */
802 /* This case deliberately left in (if you have doubts check the
803 generated assembly code). */
806 __asm_copy_from_user_1(dst
, src
, retn
);
809 __asm_copy_from_user_2(dst
, src
, retn
);
812 __asm_copy_from_user_3(dst
, src
, retn
);
816 /* If we get here, retn correctly reflects the number of failing
820 EXPORT_SYMBOL(raw_copy_from_user
);
822 #define __asm_clear_8x64(to, ret) \
826 " SETL [%0],D0Ar2,D1Ar1\n" \
827 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
829 " .section .fixup,\"ax\"\n" \
830 "3: ADD %1,%1,#8\n" \
831 " MOVT D0Ar2,#HI(1b)\n" \
832 " JUMP D0Ar2,#LO(1b)\n" \
834 " .section __ex_table,\"a\"\n" \
837 : "=r" (to), "=r" (ret) \
838 : "0" (to), "1" (ret) \
839 : "D1Ar1", "D0Ar2", "memory")
841 /* Zero userspace. */
843 #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
848 " .section .fixup,\"ax\"\n" \
850 " MOVT D1Ar1,#HI(1b)\n" \
851 " JUMP D1Ar1,#LO(1b)\n" \
853 " .section __ex_table,\"a\"\n" \
856 : "=r" (to), "=r" (ret) \
857 : "0" (to), "1" (ret) \
860 #define __asm_clear_1(to, ret) \
861 __asm_clear(to, ret, \
862 " SETB [%0],D1Ar1\n" \
863 "2: SETB [%0++],D1Ar1\n", \
864 "3: ADD %1,%1,#1\n", \
867 #define __asm_clear_2(to, ret) \
868 __asm_clear(to, ret, \
869 " SETW [%0],D1Ar1\n" \
870 "2: SETW [%0++],D1Ar1\n", \
871 "3: ADD %1,%1,#2\n", \
874 #define __asm_clear_3(to, ret) \
875 __asm_clear(to, ret, \
876 "2: SETW [%0++],D1Ar1\n" \
877 " SETB [%0],D1Ar1\n" \
878 "3: SETB [%0++],D1Ar1\n", \
879 "4: ADD %1,%1,#2\n" \
880 "5: ADD %1,%1,#1\n", \
884 #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
885 __asm_clear(to, ret, \
886 " SETD [%0],D1Ar1\n" \
887 "2: SETD [%0++],D1Ar1\n" CLEAR, \
888 "3: ADD %1,%1,#4\n" FIXUP, \
889 " .long 2b,3b\n" TENTRY)
891 #define __asm_clear_4(to, ret) \
892 __asm_clear_4x_cont(to, ret, "", "", "")
894 #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
895 __asm_clear_4x_cont(to, ret, \
896 " SETD [%0],D1Ar1\n" \
897 "4: SETD [%0++],D1Ar1\n" CLEAR, \
898 "5: ADD %1,%1,#4\n" FIXUP, \
899 " .long 4b,5b\n" TENTRY)
901 #define __asm_clear_8(to, ret) \
902 __asm_clear_8x_cont(to, ret, "", "", "")
904 #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
905 __asm_clear_8x_cont(to, ret, \
906 " SETD [%0],D1Ar1\n" \
907 "6: SETD [%0++],D1Ar1\n" CLEAR, \
908 "7: ADD %1,%1,#4\n" FIXUP, \
909 " .long 6b,7b\n" TENTRY)
911 #define __asm_clear_12(to, ret) \
912 __asm_clear_12x_cont(to, ret, "", "", "")
914 #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
915 __asm_clear_12x_cont(to, ret, \
916 " SETD [%0],D1Ar1\n" \
917 "8: SETD [%0++],D1Ar1\n" CLEAR, \
918 "9: ADD %1,%1,#4\n" FIXUP, \
919 " .long 8b,9b\n" TENTRY)
921 #define __asm_clear_16(to, ret) \
922 __asm_clear_16x_cont(to, ret, "", "", "")
924 unsigned long __do_clear_user(void __user
*pto
, unsigned long pn
)
926 register char __user
*dst
asm ("D0Re0") = pto
;
927 register unsigned long n
asm ("D1Re0") = pn
;
928 register unsigned long retn
asm ("D0Ar6") = 0;
930 if ((unsigned long) dst
& 1) {
931 __asm_clear_1(dst
, retn
);
935 if ((unsigned long) dst
& 2) {
936 __asm_clear_2(dst
, retn
);
940 /* 64 bit copy loop */
941 if (!((__force
unsigned long) dst
& 7)) {
943 __asm_clear_8x64(dst
, retn
);
949 __asm_clear_16(dst
, retn
);
954 __asm_clear_4(dst
, retn
);
962 __asm_clear_1(dst
, retn
);
965 __asm_clear_2(dst
, retn
);
968 __asm_clear_3(dst
, retn
);
974 EXPORT_SYMBOL(__do_clear_user
);
976 unsigned char __get_user_asm_b(const void __user
*addr
, long *err
)
978 register unsigned char x
asm ("D0Re0") = 0;
984 " .section .fixup,\"ax\"\n"
987 " MOVT D0FrT,#HI(2b)\n"
988 " JUMP D0FrT,#LO(2b)\n"
990 " .section __ex_table,\"a\"\n"
994 : "r" (err
), "r" (addr
), "P" (-EFAULT
)
998 EXPORT_SYMBOL(__get_user_asm_b
);
1000 unsigned short __get_user_asm_w(const void __user
*addr
, long *err
)
1002 register unsigned short x
asm ("D0Re0") = 0;
1008 " .section .fixup,\"ax\"\n"
1010 " SETD [%1],D0FrT\n"
1011 " MOVT D0FrT,#HI(2b)\n"
1012 " JUMP D0FrT,#LO(2b)\n"
1014 " .section __ex_table,\"a\"\n"
1018 : "r" (err
), "r" (addr
), "P" (-EFAULT
)
1022 EXPORT_SYMBOL(__get_user_asm_w
);
1024 unsigned int __get_user_asm_d(const void __user
*addr
, long *err
)
1026 register unsigned int x
asm ("D0Re0") = 0;
1032 " .section .fixup,\"ax\"\n"
1034 " SETD [%1],D0FrT\n"
1035 " MOVT D0FrT,#HI(2b)\n"
1036 " JUMP D0FrT,#LO(2b)\n"
1038 " .section __ex_table,\"a\"\n"
1042 : "r" (err
), "r" (addr
), "P" (-EFAULT
)
1046 EXPORT_SYMBOL(__get_user_asm_d
);
1048 unsigned long long __get_user_asm_l(const void __user
*addr
, long *err
)
1050 register unsigned long long x
asm ("D0Re0") = 0;
1052 " GETL %0,%t0,[%2]\n"
1054 " GETL %0,%t0,[%2]\n"
1056 " .section .fixup,\"ax\"\n"
1058 " SETD [%1],D0FrT\n"
1059 " MOVT D0FrT,#HI(2b)\n"
1060 " JUMP D0FrT,#LO(2b)\n"
1062 " .section __ex_table,\"a\"\n"
1066 : "r" (err
), "r" (addr
), "P" (-EFAULT
)
1070 EXPORT_SYMBOL(__get_user_asm_l
);
1072 long __put_user_asm_b(unsigned int x
, void __user
*addr
)
1074 register unsigned int err
asm ("D0Re0") = 0;
1081 ".section .fixup,\"ax\"\n"
1083 " MOVT D0FrT,#HI(2b)\n"
1084 " JUMP D0FrT,#LO(2b)\n"
1086 ".section __ex_table,\"a\"\n"
1090 : "d" (x
), "a" (addr
), "P"(-EFAULT
)
1094 EXPORT_SYMBOL(__put_user_asm_b
);
1096 long __put_user_asm_w(unsigned int x
, void __user
*addr
)
1098 register unsigned int err
asm ("D0Re0") = 0;
1105 ".section .fixup,\"ax\"\n"
1107 " MOVT D0FrT,#HI(2b)\n"
1108 " JUMP D0FrT,#LO(2b)\n"
1110 ".section __ex_table,\"a\"\n"
1114 : "d" (x
), "a" (addr
), "P"(-EFAULT
)
1118 EXPORT_SYMBOL(__put_user_asm_w
);
1120 long __put_user_asm_d(unsigned int x
, void __user
*addr
)
1122 register unsigned int err
asm ("D0Re0") = 0;
1129 ".section .fixup,\"ax\"\n"
1131 " MOVT D0FrT,#HI(2b)\n"
1132 " JUMP D0FrT,#LO(2b)\n"
1134 ".section __ex_table,\"a\"\n"
1138 : "d" (x
), "a" (addr
), "P"(-EFAULT
)
1142 EXPORT_SYMBOL(__put_user_asm_d
);
1144 long __put_user_asm_l(unsigned long long x
, void __user
*addr
)
1146 register unsigned int err
asm ("D0Re0") = 0;
1149 " SETL [%2],%1,%t1\n"
1151 " SETL [%2],%1,%t1\n"
1153 ".section .fixup,\"ax\"\n"
1155 " MOVT D0FrT,#HI(2b)\n"
1156 " JUMP D0FrT,#LO(2b)\n"
1158 ".section __ex_table,\"a\"\n"
1162 : "d" (x
), "a" (addr
), "P"(-EFAULT
)
1166 EXPORT_SYMBOL(__put_user_asm_l
);
1168 long strnlen_user(const char __user
*src
, long count
)
1172 if (!access_ok(VERIFY_READ
, src
, 0))
1175 asm volatile (" MOV D0Ar4, %1\n"
1178 " SUBS D0FrT, D0Ar6, #0\n"
1179 " SUB D0Ar6, D0Ar6, #1\n"
1181 " GETB D0FrT, [D0Ar4+#1++]\n"
1183 " TST D0FrT, #255\n"
1186 " SUB %0, %2, D0Ar6\n"
1188 " .section .fixup,\"ax\"\n"
1191 " MOVT D0FrT,#HI(3b)\n"
1192 " JUMP D0FrT,#LO(3b)\n"
1194 " .section __ex_table,\"a\"\n"
1198 : "r" (src
), "r" (count
)
1199 : "D0FrT", "D0Ar4", "D0Ar6", "cc");
1203 EXPORT_SYMBOL(strnlen_user
);
1205 long __strncpy_from_user(char *dst
, const char __user
*src
, long count
)
1213 * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
1216 * This code is deduced from:
1221 * while ((*dst++ = (tmp2 = *src++)) != 0
1225 * res = count - tmp1;
1230 asm volatile (" MOV %0,%3\n"
1232 " GETB D0FrT,[%2++]\n"
1235 " SETB [%1++],D0FrT\n"
1242 " .section .fixup,\"ax\"\n"
1245 " MOVT D0FrT,#HI(4b)\n"
1246 " JUMP D0FrT,#LO(4b)\n"
1248 " .section __ex_table,\"a\"\n"
1251 : "=r" (res
), "=r" (dst
), "=r" (src
), "=r" (count
)
1252 : "3" (count
), "1" (dst
), "2" (src
), "P" (-EFAULT
)
1253 : "D0FrT", "memory", "cc");
1257 EXPORT_SYMBOL(__strncpy_from_user
);