Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / arch / metag / lib / usercopy.c
bloba48ef522c02dc28d52089dc243dcb7c25ee58796
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * User address space access functions.
4 * The non-inlined parts of asm-metag/uaccess.h are here.
6 * Copyright (C) 2006, Imagination Technologies.
7 * Copyright (C) 2000, Axis Communications AB.
9 * Written by Hans-Peter Nilsson.
10 * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
11 * Modified for Meta by Will Newton.
14 #include <linux/export.h>
15 #include <linux/uaccess.h>
16 #include <asm/cache.h> /* def of L1_CACHE_BYTES */
18 #define USE_RAPF
19 #define RAPF_MIN_BUF_SIZE (3*L1_CACHE_BYTES)
22 /* The "double write" in this code is because the Meta will not fault
23 * immediately unless the memory pipe is forced to by e.g. a data stall or
24 * another memory op. The second write should be discarded by the write
25 * combiner so should have virtually no cost.
28 #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
29 asm volatile ( \
30 COPY \
31 "1:\n" \
32 " .section .fixup,\"ax\"\n" \
33 FIXUP \
34 " MOVT D1Ar1,#HI(1b)\n" \
35 " JUMP D1Ar1,#LO(1b)\n" \
36 " .previous\n" \
37 " .section __ex_table,\"a\"\n" \
38 TENTRY \
39 " .previous\n" \
40 : "=r" (to), "=r" (from), "=r" (ret) \
41 : "0" (to), "1" (from), "2" (ret) \
42 : "D1Ar1", "memory")
45 #define __asm_copy_to_user_1(to, from, ret) \
46 __asm_copy_user_cont(to, from, ret, \
47 " GETB D1Ar1,[%1++]\n" \
48 " SETB [%0],D1Ar1\n" \
49 "2: SETB [%0++],D1Ar1\n", \
50 "3: ADD %2,%2,#1\n", \
51 " .long 2b,3b\n")
53 #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
54 __asm_copy_user_cont(to, from, ret, \
55 " GETW D1Ar1,[%1++]\n" \
56 " SETW [%0],D1Ar1\n" \
57 "2: SETW [%0++],D1Ar1\n" COPY, \
58 "3: ADD %2,%2,#2\n" FIXUP, \
59 " .long 2b,3b\n" TENTRY)
61 #define __asm_copy_to_user_2(to, from, ret) \
62 __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
64 #define __asm_copy_to_user_3(to, from, ret) \
65 __asm_copy_to_user_2x_cont(to, from, ret, \
66 " GETB D1Ar1,[%1++]\n" \
67 " SETB [%0],D1Ar1\n" \
68 "4: SETB [%0++],D1Ar1\n", \
69 "5: ADD %2,%2,#1\n", \
70 " .long 4b,5b\n")
72 #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
73 __asm_copy_user_cont(to, from, ret, \
74 " GETD D1Ar1,[%1++]\n" \
75 " SETD [%0],D1Ar1\n" \
76 "2: SETD [%0++],D1Ar1\n" COPY, \
77 "3: ADD %2,%2,#4\n" FIXUP, \
78 " .long 2b,3b\n" TENTRY)
80 #define __asm_copy_to_user_4(to, from, ret) \
81 __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
83 #define __asm_copy_to_user_5(to, from, ret) \
84 __asm_copy_to_user_4x_cont(to, from, ret, \
85 " GETB D1Ar1,[%1++]\n" \
86 " SETB [%0],D1Ar1\n" \
87 "4: SETB [%0++],D1Ar1\n", \
88 "5: ADD %2,%2,#1\n", \
89 " .long 4b,5b\n")
91 #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
92 __asm_copy_to_user_4x_cont(to, from, ret, \
93 " GETW D1Ar1,[%1++]\n" \
94 " SETW [%0],D1Ar1\n" \
95 "4: SETW [%0++],D1Ar1\n" COPY, \
96 "5: ADD %2,%2,#2\n" FIXUP, \
97 " .long 4b,5b\n" TENTRY)
99 #define __asm_copy_to_user_6(to, from, ret) \
100 __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
102 #define __asm_copy_to_user_7(to, from, ret) \
103 __asm_copy_to_user_6x_cont(to, from, ret, \
104 " GETB D1Ar1,[%1++]\n" \
105 " SETB [%0],D1Ar1\n" \
106 "6: SETB [%0++],D1Ar1\n", \
107 "7: ADD %2,%2,#1\n", \
108 " .long 6b,7b\n")
110 #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
111 __asm_copy_to_user_4x_cont(to, from, ret, \
112 " GETD D1Ar1,[%1++]\n" \
113 " SETD [%0],D1Ar1\n" \
114 "4: SETD [%0++],D1Ar1\n" COPY, \
115 "5: ADD %2,%2,#4\n" FIXUP, \
116 " .long 4b,5b\n" TENTRY)
118 #define __asm_copy_to_user_8(to, from, ret) \
119 __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
121 #define __asm_copy_to_user_9(to, from, ret) \
122 __asm_copy_to_user_8x_cont(to, from, ret, \
123 " GETB D1Ar1,[%1++]\n" \
124 " SETB [%0],D1Ar1\n" \
125 "6: SETB [%0++],D1Ar1\n", \
126 "7: ADD %2,%2,#1\n", \
127 " .long 6b,7b\n")
129 #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
130 __asm_copy_to_user_8x_cont(to, from, ret, \
131 " GETW D1Ar1,[%1++]\n" \
132 " SETW [%0],D1Ar1\n" \
133 "6: SETW [%0++],D1Ar1\n" COPY, \
134 "7: ADD %2,%2,#2\n" FIXUP, \
135 " .long 6b,7b\n" TENTRY)
137 #define __asm_copy_to_user_10(to, from, ret) \
138 __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
140 #define __asm_copy_to_user_11(to, from, ret) \
141 __asm_copy_to_user_10x_cont(to, from, ret, \
142 " GETB D1Ar1,[%1++]\n" \
143 " SETB [%0],D1Ar1\n" \
144 "8: SETB [%0++],D1Ar1\n", \
145 "9: ADD %2,%2,#1\n", \
146 " .long 8b,9b\n")
148 #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
149 __asm_copy_to_user_8x_cont(to, from, ret, \
150 " GETD D1Ar1,[%1++]\n" \
151 " SETD [%0],D1Ar1\n" \
152 "6: SETD [%0++],D1Ar1\n" COPY, \
153 "7: ADD %2,%2,#4\n" FIXUP, \
154 " .long 6b,7b\n" TENTRY)
155 #define __asm_copy_to_user_12(to, from, ret) \
156 __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
158 #define __asm_copy_to_user_13(to, from, ret) \
159 __asm_copy_to_user_12x_cont(to, from, ret, \
160 " GETB D1Ar1,[%1++]\n" \
161 " SETB [%0],D1Ar1\n" \
162 "8: SETB [%0++],D1Ar1\n", \
163 "9: ADD %2,%2,#1\n", \
164 " .long 8b,9b\n")
166 #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
167 __asm_copy_to_user_12x_cont(to, from, ret, \
168 " GETW D1Ar1,[%1++]\n" \
169 " SETW [%0],D1Ar1\n" \
170 "8: SETW [%0++],D1Ar1\n" COPY, \
171 "9: ADD %2,%2,#2\n" FIXUP, \
172 " .long 8b,9b\n" TENTRY)
174 #define __asm_copy_to_user_14(to, from, ret) \
175 __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
177 #define __asm_copy_to_user_15(to, from, ret) \
178 __asm_copy_to_user_14x_cont(to, from, ret, \
179 " GETB D1Ar1,[%1++]\n" \
180 " SETB [%0],D1Ar1\n" \
181 "10: SETB [%0++],D1Ar1\n", \
182 "11: ADD %2,%2,#1\n", \
183 " .long 10b,11b\n")
185 #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
186 __asm_copy_to_user_12x_cont(to, from, ret, \
187 " GETD D1Ar1,[%1++]\n" \
188 " SETD [%0],D1Ar1\n" \
189 "8: SETD [%0++],D1Ar1\n" COPY, \
190 "9: ADD %2,%2,#4\n" FIXUP, \
191 " .long 8b,9b\n" TENTRY)
193 #define __asm_copy_to_user_16(to, from, ret) \
194 __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
196 #define __asm_copy_to_user_8x64(to, from, ret) \
197 asm volatile ( \
198 " GETL D0Ar2,D1Ar1,[%1++]\n" \
199 " SETL [%0],D0Ar2,D1Ar1\n" \
200 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
201 "1:\n" \
202 " .section .fixup,\"ax\"\n" \
203 "3: ADD %2,%2,#8\n" \
204 " MOVT D0Ar2,#HI(1b)\n" \
205 " JUMP D0Ar2,#LO(1b)\n" \
206 " .previous\n" \
207 " .section __ex_table,\"a\"\n" \
208 " .long 2b,3b\n" \
209 " .previous\n" \
210 : "=r" (to), "=r" (from), "=r" (ret) \
211 : "0" (to), "1" (from), "2" (ret) \
212 : "D1Ar1", "D0Ar2", "memory")
215 * optimized copying loop using RAPF when 64 bit aligned
217 * n will be automatically decremented inside the loop
218 * ret will be left intact. if error occurs we will rewind
219 * so that the original non optimized code will fill up
220 * this value correctly.
222 * on fault:
223 * > n will hold total number of uncopied bytes
225 * > {'to','from'} will be rewind back so that
226 * the non-optimized code will do the proper fix up
228 * DCACHE drops the cacheline which helps in reducing cache
229 * pollution.
231 * We introduce an extra SETL at the end of the loop to
232 * ensure we don't fall off the loop before we catch all
233 * erros.
235 * NOTICE:
236 * LSM_STEP in TXSTATUS must be cleared in fix up code.
237 * since we're using M{S,G}ETL, a fault might happen at
238 * any address in the middle of M{S,G}ETL causing
239 * the value of LSM_STEP to be incorrect which can
240 * cause subsequent use of M{S,G}ET{L,D} to go wrong.
241 * ie: if LSM_STEP was 1 when a fault occurs, the
242 * next call to M{S,G}ET{L,D} will skip the first
243 * copy/getting as it think that the first 1 has already
244 * been done.
247 #define __asm_copy_user_64bit_rapf_loop( \
248 to, from, ret, n, id, FIXUP) \
249 asm volatile ( \
250 ".balign 8\n" \
251 " MOV RAPF, %1\n" \
252 " MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
253 " MOV D0Ar6, #0\n" \
254 " LSR D1Ar5, %3, #6\n" \
255 " SUB TXRPT, D1Ar5, #2\n" \
256 " MOV RAPF, %1\n" \
257 "$Lloop"id":\n" \
258 " ADD RAPF, %1, #64\n" \
259 "21: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
260 "22: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
261 "23: SUB %3, %3, #32\n" \
262 "24: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
263 "25: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
264 "26: SUB %3, %3, #32\n" \
265 " DCACHE [%1+#-64], D0Ar6\n" \
266 " BR $Lloop"id"\n" \
268 " MOV RAPF, %1\n" \
269 "27: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
270 "28: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
271 "29: SUB %3, %3, #32\n" \
272 "30: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
273 "31: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
274 "32: SETL [%0+#-8], D0.7, D1.7\n" \
275 " SUB %3, %3, #32\n" \
276 "1: DCACHE [%1+#-64], D0Ar6\n" \
277 " GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
278 " GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
279 " GETL D0.5, D1.5, [A0StP+#-24]\n" \
280 " GETL D0.6, D1.6, [A0StP+#-16]\n" \
281 " GETL D0.7, D1.7, [A0StP+#-8]\n" \
282 " SUB A0StP, A0StP, #40\n" \
283 " .section .fixup,\"ax\"\n" \
284 "3: MOV D0Ar2, TXSTATUS\n" \
285 " MOV D1Ar1, TXSTATUS\n" \
286 " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
287 " MOV TXSTATUS, D1Ar1\n" \
288 FIXUP \
289 " MOVT D0Ar2, #HI(1b)\n" \
290 " JUMP D0Ar2, #LO(1b)\n" \
291 " .previous\n" \
292 " .section __ex_table,\"a\"\n" \
293 " .long 21b,3b\n" \
294 " .long 22b,3b\n" \
295 " .long 23b,3b\n" \
296 " .long 24b,3b\n" \
297 " .long 25b,3b\n" \
298 " .long 26b,3b\n" \
299 " .long 27b,3b\n" \
300 " .long 28b,3b\n" \
301 " .long 29b,3b\n" \
302 " .long 30b,3b\n" \
303 " .long 31b,3b\n" \
304 " .long 32b,3b\n" \
305 " .previous\n" \
306 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
307 : "0" (to), "1" (from), "2" (ret), "3" (n) \
308 : "D1Ar1", "D0Ar2", "cc", "memory")
310 /* rewind 'to' and 'from' pointers when a fault occurs
312 * Rationale:
313 * A fault always occurs on writing to user buffer. A fault
314 * is at a single address, so we need to rewind by only 4
315 * bytes.
316 * Since we do a complete read from kernel buffer before
317 * writing, we need to rewind it also. The amount to be
318 * rewind equals the number of faulty writes in MSETD
319 * which is: [4 - (LSM_STEP-1)]*8
320 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
321 * and stored in D0Ar2
323 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
324 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
325 * a fault happens at the 4th write, LSM_STEP will be 0
326 * instead of 4. The code copes with that.
328 * n is updated by the number of successful writes, which is:
329 * n = n - (LSM_STEP-1)*8
331 #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
332 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
333 "LSR D0Ar2, D0Ar2, #8\n" \
334 "ANDS D0Ar2, D0Ar2, #0x7\n" \
335 "ADDZ D0Ar2, D0Ar2, #4\n" \
336 "SUB D0Ar2, D0Ar2, #1\n" \
337 "MOV D1Ar1, #4\n" \
338 "SUB D0Ar2, D1Ar1, D0Ar2\n" \
339 "LSL D0Ar2, D0Ar2, #3\n" \
340 "LSL D1Ar1, D1Ar1, #3\n" \
341 "SUB D1Ar1, D1Ar1, D0Ar2\n" \
342 "SUB %0, %0, #8\n" \
343 "SUB %1, %1,D0Ar2\n" \
344 "SUB %3, %3, D1Ar1\n")
347 * optimized copying loop using RAPF when 32 bit aligned
349 * n will be automatically decremented inside the loop
350 * ret will be left intact. if error occurs we will rewind
351 * so that the original non optimized code will fill up
352 * this value correctly.
354 * on fault:
355 * > n will hold total number of uncopied bytes
357 * > {'to','from'} will be rewind back so that
358 * the non-optimized code will do the proper fix up
360 * DCACHE drops the cacheline which helps in reducing cache
361 * pollution.
363 * We introduce an extra SETD at the end of the loop to
364 * ensure we don't fall off the loop before we catch all
365 * erros.
367 * NOTICE:
368 * LSM_STEP in TXSTATUS must be cleared in fix up code.
369 * since we're using M{S,G}ETL, a fault might happen at
370 * any address in the middle of M{S,G}ETL causing
371 * the value of LSM_STEP to be incorrect which can
372 * cause subsequent use of M{S,G}ET{L,D} to go wrong.
373 * ie: if LSM_STEP was 1 when a fault occurs, the
374 * next call to M{S,G}ET{L,D} will skip the first
375 * copy/getting as it think that the first 1 has already
376 * been done.
379 #define __asm_copy_user_32bit_rapf_loop( \
380 to, from, ret, n, id, FIXUP) \
381 asm volatile ( \
382 ".balign 8\n" \
383 " MOV RAPF, %1\n" \
384 " MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
385 " MOV D0Ar6, #0\n" \
386 " LSR D1Ar5, %3, #6\n" \
387 " SUB TXRPT, D1Ar5, #2\n" \
388 " MOV RAPF, %1\n" \
389 "$Lloop"id":\n" \
390 " ADD RAPF, %1, #64\n" \
391 "21: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
392 "22: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
393 "23: SUB %3, %3, #16\n" \
394 "24: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
395 "25: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
396 "26: SUB %3, %3, #16\n" \
397 "27: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
398 "28: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
399 "29: SUB %3, %3, #16\n" \
400 "30: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
401 "31: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
402 "32: SUB %3, %3, #16\n" \
403 " DCACHE [%1+#-64], D0Ar6\n" \
404 " BR $Lloop"id"\n" \
406 " MOV RAPF, %1\n" \
407 "33: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
408 "34: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
409 "35: SUB %3, %3, #16\n" \
410 "36: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
411 "37: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
412 "38: SUB %3, %3, #16\n" \
413 "39: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
414 "40: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
415 "41: SUB %3, %3, #16\n" \
416 "42: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
417 "43: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
418 "44: SETD [%0+#-4], D0.7\n" \
419 " SUB %3, %3, #16\n" \
420 "1: DCACHE [%1+#-64], D0Ar6\n" \
421 " GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
422 " GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
423 " GETL D0.5, D1.5, [A0StP+#-24]\n" \
424 " GETL D0.6, D1.6, [A0StP+#-16]\n" \
425 " GETL D0.7, D1.7, [A0StP+#-8]\n" \
426 " SUB A0StP, A0StP, #40\n" \
427 " .section .fixup,\"ax\"\n" \
428 "3: MOV D0Ar2, TXSTATUS\n" \
429 " MOV D1Ar1, TXSTATUS\n" \
430 " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
431 " MOV TXSTATUS, D1Ar1\n" \
432 FIXUP \
433 " MOVT D0Ar2, #HI(1b)\n" \
434 " JUMP D0Ar2, #LO(1b)\n" \
435 " .previous\n" \
436 " .section __ex_table,\"a\"\n" \
437 " .long 21b,3b\n" \
438 " .long 22b,3b\n" \
439 " .long 23b,3b\n" \
440 " .long 24b,3b\n" \
441 " .long 25b,3b\n" \
442 " .long 26b,3b\n" \
443 " .long 27b,3b\n" \
444 " .long 28b,3b\n" \
445 " .long 29b,3b\n" \
446 " .long 30b,3b\n" \
447 " .long 31b,3b\n" \
448 " .long 32b,3b\n" \
449 " .long 33b,3b\n" \
450 " .long 34b,3b\n" \
451 " .long 35b,3b\n" \
452 " .long 36b,3b\n" \
453 " .long 37b,3b\n" \
454 " .long 38b,3b\n" \
455 " .long 39b,3b\n" \
456 " .long 40b,3b\n" \
457 " .long 41b,3b\n" \
458 " .long 42b,3b\n" \
459 " .long 43b,3b\n" \
460 " .long 44b,3b\n" \
461 " .previous\n" \
462 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
463 : "0" (to), "1" (from), "2" (ret), "3" (n) \
464 : "D1Ar1", "D0Ar2", "cc", "memory")
466 /* rewind 'to' and 'from' pointers when a fault occurs
468 * Rationale:
469 * A fault always occurs on writing to user buffer. A fault
470 * is at a single address, so we need to rewind by only 4
471 * bytes.
472 * Since we do a complete read from kernel buffer before
473 * writing, we need to rewind it also. The amount to be
474 * rewind equals the number of faulty writes in MSETD
475 * which is: [4 - (LSM_STEP-1)]*4
476 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
477 * and stored in D0Ar2
479 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
480 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
481 * a fault happens at the 4th write, LSM_STEP will be 0
482 * instead of 4. The code copes with that.
484 * n is updated by the number of successful writes, which is:
485 * n = n - (LSM_STEP-1)*4
487 #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
488 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
489 "LSR D0Ar2, D0Ar2, #8\n" \
490 "ANDS D0Ar2, D0Ar2, #0x7\n" \
491 "ADDZ D0Ar2, D0Ar2, #4\n" \
492 "SUB D0Ar2, D0Ar2, #1\n" \
493 "MOV D1Ar1, #4\n" \
494 "SUB D0Ar2, D1Ar1, D0Ar2\n" \
495 "LSL D0Ar2, D0Ar2, #2\n" \
496 "LSL D1Ar1, D1Ar1, #2\n" \
497 "SUB D1Ar1, D1Ar1, D0Ar2\n" \
498 "SUB %0, %0, #4\n" \
499 "SUB %1, %1, D0Ar2\n" \
500 "SUB %3, %3, D1Ar1\n")
502 unsigned long raw_copy_to_user(void __user *pdst, const void *psrc,
503 unsigned long n)
505 register char __user *dst asm ("A0.2") = pdst;
506 register const char *src asm ("A1.2") = psrc;
507 unsigned long retn = 0;
509 if (n == 0)
510 return 0;
512 if ((unsigned long) src & 1) {
513 __asm_copy_to_user_1(dst, src, retn);
514 n--;
515 if (retn)
516 return retn + n;
518 if ((unsigned long) dst & 1) {
519 /* Worst case - byte copy */
520 while (n > 0) {
521 __asm_copy_to_user_1(dst, src, retn);
522 n--;
523 if (retn)
524 return retn + n;
527 if (((unsigned long) src & 2) && n >= 2) {
528 __asm_copy_to_user_2(dst, src, retn);
529 n -= 2;
530 if (retn)
531 return retn + n;
533 if ((unsigned long) dst & 2) {
534 /* Second worst case - word copy */
535 while (n >= 2) {
536 __asm_copy_to_user_2(dst, src, retn);
537 n -= 2;
538 if (retn)
539 return retn + n;
543 #ifdef USE_RAPF
544 /* 64 bit copy loop */
545 if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
546 if (n >= RAPF_MIN_BUF_SIZE) {
547 /* copy user using 64 bit rapf copy */
548 __asm_copy_to_user_64bit_rapf_loop(dst, src, retn,
549 n, "64cu");
551 while (n >= 8) {
552 __asm_copy_to_user_8x64(dst, src, retn);
553 n -= 8;
554 if (retn)
555 return retn + n;
558 if (n >= RAPF_MIN_BUF_SIZE) {
559 /* copy user using 32 bit rapf copy */
560 __asm_copy_to_user_32bit_rapf_loop(dst, src, retn, n, "32cu");
562 #else
563 /* 64 bit copy loop */
564 if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
565 while (n >= 8) {
566 __asm_copy_to_user_8x64(dst, src, retn);
567 n -= 8;
568 if (retn)
569 return retn + n;
572 #endif
574 while (n >= 16) {
575 __asm_copy_to_user_16(dst, src, retn);
576 n -= 16;
577 if (retn)
578 return retn + n;
581 while (n >= 4) {
582 __asm_copy_to_user_4(dst, src, retn);
583 n -= 4;
584 if (retn)
585 return retn + n;
588 switch (n) {
589 case 0:
590 break;
591 case 1:
592 __asm_copy_to_user_1(dst, src, retn);
593 break;
594 case 2:
595 __asm_copy_to_user_2(dst, src, retn);
596 break;
597 case 3:
598 __asm_copy_to_user_3(dst, src, retn);
599 break;
603 * If we get here, retn correctly reflects the number of failing
604 * bytes.
606 return retn;
608 EXPORT_SYMBOL(raw_copy_to_user);
610 #define __asm_copy_from_user_1(to, from, ret) \
611 __asm_copy_user_cont(to, from, ret, \
612 " GETB D1Ar1,[%1++]\n" \
613 "2: SETB [%0++],D1Ar1\n", \
614 "3: ADD %2,%2,#1\n", \
615 " .long 2b,3b\n")
617 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
618 __asm_copy_user_cont(to, from, ret, \
619 " GETW D1Ar1,[%1++]\n" \
620 "2: SETW [%0++],D1Ar1\n" COPY, \
621 "3: ADD %2,%2,#2\n" FIXUP, \
622 " .long 2b,3b\n" TENTRY)
624 #define __asm_copy_from_user_2(to, from, ret) \
625 __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
627 #define __asm_copy_from_user_3(to, from, ret) \
628 __asm_copy_from_user_2x_cont(to, from, ret, \
629 " GETB D1Ar1,[%1++]\n" \
630 "4: SETB [%0++],D1Ar1\n", \
631 "5: ADD %2,%2,#1\n", \
632 " .long 4b,5b\n")
634 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
635 __asm_copy_user_cont(to, from, ret, \
636 " GETD D1Ar1,[%1++]\n" \
637 "2: SETD [%0++],D1Ar1\n" COPY, \
638 "3: ADD %2,%2,#4\n" FIXUP, \
639 " .long 2b,3b\n" TENTRY)
641 #define __asm_copy_from_user_4(to, from, ret) \
642 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
644 #define __asm_copy_from_user_8x64(to, from, ret) \
645 asm volatile ( \
646 " GETL D0Ar2,D1Ar1,[%1++]\n" \
647 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
648 "1:\n" \
649 " .section .fixup,\"ax\"\n" \
650 "3: ADD %2,%2,#8\n" \
651 " MOVT D0Ar2,#HI(1b)\n" \
652 " JUMP D0Ar2,#LO(1b)\n" \
653 " .previous\n" \
654 " .section __ex_table,\"a\"\n" \
655 " .long 2b,3b\n" \
656 " .previous\n" \
657 : "=a" (to), "=r" (from), "=r" (ret) \
658 : "0" (to), "1" (from), "2" (ret) \
659 : "D1Ar1", "D0Ar2", "memory")
661 /* rewind 'from' pointer when a fault occurs
663 * Rationale:
664 * A fault occurs while reading from user buffer, which is the
665 * source.
666 * Since we don't write to kernel buffer until we read first,
667 * the kernel buffer is at the right state and needn't be
668 * corrected, but the source must be rewound to the beginning of
669 * the block, which is LSM_STEP*8 bytes.
670 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
671 * and stored in D0Ar2
673 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
674 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
675 * a fault happens at the 4th write, LSM_STEP will be 0
676 * instead of 4. The code copes with that.
678 #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
679 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
680 "LSR D0Ar2, D0Ar2, #5\n" \
681 "ANDS D0Ar2, D0Ar2, #0x38\n" \
682 "ADDZ D0Ar2, D0Ar2, #32\n" \
683 "SUB %1, %1, D0Ar2\n")
685 /* rewind 'from' pointer when a fault occurs
687 * Rationale:
688 * A fault occurs while reading from user buffer, which is the
689 * source.
690 * Since we don't write to kernel buffer until we read first,
691 * the kernel buffer is at the right state and needn't be
692 * corrected, but the source must be rewound to the beginning of
693 * the block, which is LSM_STEP*4 bytes.
694 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
695 * and stored in D0Ar2
697 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
698 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
699 * a fault happens at the 4th write, LSM_STEP will be 0
700 * instead of 4. The code copes with that.
702 #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
703 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
704 "LSR D0Ar2, D0Ar2, #6\n" \
705 "ANDS D0Ar2, D0Ar2, #0x1c\n" \
706 "ADDZ D0Ar2, D0Ar2, #16\n" \
707 "SUB %1, %1, D0Ar2\n")
711 * Copy from user to kernel. The return-value is the number of bytes that were
712 * inaccessible.
714 unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
715 unsigned long n)
717 register char *dst asm ("A0.2") = pdst;
718 register const char __user *src asm ("A1.2") = psrc;
719 unsigned long retn = 0;
721 if (n == 0)
722 return 0;
724 if ((unsigned long) src & 1) {
725 __asm_copy_from_user_1(dst, src, retn);
726 n--;
727 if (retn)
728 return retn + n;
730 if ((unsigned long) dst & 1) {
731 /* Worst case - byte copy */
732 while (n > 0) {
733 __asm_copy_from_user_1(dst, src, retn);
734 n--;
735 if (retn)
736 return retn + n;
739 if (((unsigned long) src & 2) && n >= 2) {
740 __asm_copy_from_user_2(dst, src, retn);
741 n -= 2;
742 if (retn)
743 return retn + n;
745 if ((unsigned long) dst & 2) {
746 /* Second worst case - word copy */
747 while (n >= 2) {
748 __asm_copy_from_user_2(dst, src, retn);
749 n -= 2;
750 if (retn)
751 return retn + n;
755 #ifdef USE_RAPF
756 /* 64 bit copy loop */
757 if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
758 if (n >= RAPF_MIN_BUF_SIZE) {
759 /* Copy using fast 64bit rapf */
760 __asm_copy_from_user_64bit_rapf_loop(dst, src, retn,
761 n, "64cuz");
763 while (n >= 8) {
764 __asm_copy_from_user_8x64(dst, src, retn);
765 n -= 8;
766 if (retn)
767 return retn + n;
771 if (n >= RAPF_MIN_BUF_SIZE) {
772 /* Copy using fast 32bit rapf */
773 __asm_copy_from_user_32bit_rapf_loop(dst, src, retn,
774 n, "32cuz");
776 #else
777 /* 64 bit copy loop */
778 if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
779 while (n >= 8) {
780 __asm_copy_from_user_8x64(dst, src, retn);
781 n -= 8;
782 if (retn)
783 return retn + n;
786 #endif
788 while (n >= 4) {
789 __asm_copy_from_user_4(dst, src, retn);
790 n -= 4;
792 if (retn)
793 return retn + n;
796 /* If we get here, there were no memory read faults. */
797 switch (n) {
798 /* These copies are at least "naturally aligned" (so we don't
799 have to check each byte), due to the src alignment code.
800 The *_3 case *will* get the correct count for retn. */
801 case 0:
802 /* This case deliberately left in (if you have doubts check the
803 generated assembly code). */
804 break;
805 case 1:
806 __asm_copy_from_user_1(dst, src, retn);
807 break;
808 case 2:
809 __asm_copy_from_user_2(dst, src, retn);
810 break;
811 case 3:
812 __asm_copy_from_user_3(dst, src, retn);
813 break;
816 /* If we get here, retn correctly reflects the number of failing
817 bytes. */
818 return retn;
820 EXPORT_SYMBOL(raw_copy_from_user);
822 #define __asm_clear_8x64(to, ret) \
823 asm volatile ( \
824 " MOV D0Ar2,#0\n" \
825 " MOV D1Ar1,#0\n" \
826 " SETL [%0],D0Ar2,D1Ar1\n" \
827 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
828 "1:\n" \
829 " .section .fixup,\"ax\"\n" \
830 "3: ADD %1,%1,#8\n" \
831 " MOVT D0Ar2,#HI(1b)\n" \
832 " JUMP D0Ar2,#LO(1b)\n" \
833 " .previous\n" \
834 " .section __ex_table,\"a\"\n" \
835 " .long 2b,3b\n" \
836 " .previous\n" \
837 : "=r" (to), "=r" (ret) \
838 : "0" (to), "1" (ret) \
839 : "D1Ar1", "D0Ar2", "memory")
841 /* Zero userspace. */
843 #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
844 asm volatile ( \
845 " MOV D1Ar1,#0\n" \
846 CLEAR \
847 "1:\n" \
848 " .section .fixup,\"ax\"\n" \
849 FIXUP \
850 " MOVT D1Ar1,#HI(1b)\n" \
851 " JUMP D1Ar1,#LO(1b)\n" \
852 " .previous\n" \
853 " .section __ex_table,\"a\"\n" \
854 TENTRY \
855 " .previous" \
856 : "=r" (to), "=r" (ret) \
857 : "0" (to), "1" (ret) \
858 : "D1Ar1", "memory")
860 #define __asm_clear_1(to, ret) \
861 __asm_clear(to, ret, \
862 " SETB [%0],D1Ar1\n" \
863 "2: SETB [%0++],D1Ar1\n", \
864 "3: ADD %1,%1,#1\n", \
865 " .long 2b,3b\n")
867 #define __asm_clear_2(to, ret) \
868 __asm_clear(to, ret, \
869 " SETW [%0],D1Ar1\n" \
870 "2: SETW [%0++],D1Ar1\n", \
871 "3: ADD %1,%1,#2\n", \
872 " .long 2b,3b\n")
874 #define __asm_clear_3(to, ret) \
875 __asm_clear(to, ret, \
876 "2: SETW [%0++],D1Ar1\n" \
877 " SETB [%0],D1Ar1\n" \
878 "3: SETB [%0++],D1Ar1\n", \
879 "4: ADD %1,%1,#2\n" \
880 "5: ADD %1,%1,#1\n", \
881 " .long 2b,4b\n" \
882 " .long 3b,5b\n")
884 #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
885 __asm_clear(to, ret, \
886 " SETD [%0],D1Ar1\n" \
887 "2: SETD [%0++],D1Ar1\n" CLEAR, \
888 "3: ADD %1,%1,#4\n" FIXUP, \
889 " .long 2b,3b\n" TENTRY)
891 #define __asm_clear_4(to, ret) \
892 __asm_clear_4x_cont(to, ret, "", "", "")
894 #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
895 __asm_clear_4x_cont(to, ret, \
896 " SETD [%0],D1Ar1\n" \
897 "4: SETD [%0++],D1Ar1\n" CLEAR, \
898 "5: ADD %1,%1,#4\n" FIXUP, \
899 " .long 4b,5b\n" TENTRY)
901 #define __asm_clear_8(to, ret) \
902 __asm_clear_8x_cont(to, ret, "", "", "")
904 #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
905 __asm_clear_8x_cont(to, ret, \
906 " SETD [%0],D1Ar1\n" \
907 "6: SETD [%0++],D1Ar1\n" CLEAR, \
908 "7: ADD %1,%1,#4\n" FIXUP, \
909 " .long 6b,7b\n" TENTRY)
911 #define __asm_clear_12(to, ret) \
912 __asm_clear_12x_cont(to, ret, "", "", "")
914 #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
915 __asm_clear_12x_cont(to, ret, \
916 " SETD [%0],D1Ar1\n" \
917 "8: SETD [%0++],D1Ar1\n" CLEAR, \
918 "9: ADD %1,%1,#4\n" FIXUP, \
919 " .long 8b,9b\n" TENTRY)
921 #define __asm_clear_16(to, ret) \
922 __asm_clear_16x_cont(to, ret, "", "", "")
924 unsigned long __do_clear_user(void __user *pto, unsigned long pn)
926 register char __user *dst asm ("D0Re0") = pto;
927 register unsigned long n asm ("D1Re0") = pn;
928 register unsigned long retn asm ("D0Ar6") = 0;
930 if ((unsigned long) dst & 1) {
931 __asm_clear_1(dst, retn);
932 n--;
935 if ((unsigned long) dst & 2) {
936 __asm_clear_2(dst, retn);
937 n -= 2;
940 /* 64 bit copy loop */
941 if (!((__force unsigned long) dst & 7)) {
942 while (n >= 8) {
943 __asm_clear_8x64(dst, retn);
944 n -= 8;
948 while (n >= 16) {
949 __asm_clear_16(dst, retn);
950 n -= 16;
953 while (n >= 4) {
954 __asm_clear_4(dst, retn);
955 n -= 4;
958 switch (n) {
959 case 0:
960 break;
961 case 1:
962 __asm_clear_1(dst, retn);
963 break;
964 case 2:
965 __asm_clear_2(dst, retn);
966 break;
967 case 3:
968 __asm_clear_3(dst, retn);
969 break;
972 return retn;
974 EXPORT_SYMBOL(__do_clear_user);
976 unsigned char __get_user_asm_b(const void __user *addr, long *err)
978 register unsigned char x asm ("D0Re0") = 0;
979 asm volatile (
980 " GETB %0,[%2]\n"
981 "1:\n"
982 " GETB %0,[%2]\n"
983 "2:\n"
984 " .section .fixup,\"ax\"\n"
985 "3: MOV D0FrT,%3\n"
986 " SETD [%1],D0FrT\n"
987 " MOVT D0FrT,#HI(2b)\n"
988 " JUMP D0FrT,#LO(2b)\n"
989 " .previous\n"
990 " .section __ex_table,\"a\"\n"
991 " .long 1b,3b\n"
992 " .previous\n"
993 : "=r" (x)
994 : "r" (err), "r" (addr), "P" (-EFAULT)
995 : "D0FrT");
996 return x;
998 EXPORT_SYMBOL(__get_user_asm_b);
1000 unsigned short __get_user_asm_w(const void __user *addr, long *err)
1002 register unsigned short x asm ("D0Re0") = 0;
1003 asm volatile (
1004 " GETW %0,[%2]\n"
1005 "1:\n"
1006 " GETW %0,[%2]\n"
1007 "2:\n"
1008 " .section .fixup,\"ax\"\n"
1009 "3: MOV D0FrT,%3\n"
1010 " SETD [%1],D0FrT\n"
1011 " MOVT D0FrT,#HI(2b)\n"
1012 " JUMP D0FrT,#LO(2b)\n"
1013 " .previous\n"
1014 " .section __ex_table,\"a\"\n"
1015 " .long 1b,3b\n"
1016 " .previous\n"
1017 : "=r" (x)
1018 : "r" (err), "r" (addr), "P" (-EFAULT)
1019 : "D0FrT");
1020 return x;
1022 EXPORT_SYMBOL(__get_user_asm_w);
1024 unsigned int __get_user_asm_d(const void __user *addr, long *err)
1026 register unsigned int x asm ("D0Re0") = 0;
1027 asm volatile (
1028 " GETD %0,[%2]\n"
1029 "1:\n"
1030 " GETD %0,[%2]\n"
1031 "2:\n"
1032 " .section .fixup,\"ax\"\n"
1033 "3: MOV D0FrT,%3\n"
1034 " SETD [%1],D0FrT\n"
1035 " MOVT D0FrT,#HI(2b)\n"
1036 " JUMP D0FrT,#LO(2b)\n"
1037 " .previous\n"
1038 " .section __ex_table,\"a\"\n"
1039 " .long 1b,3b\n"
1040 " .previous\n"
1041 : "=r" (x)
1042 : "r" (err), "r" (addr), "P" (-EFAULT)
1043 : "D0FrT");
1044 return x;
1046 EXPORT_SYMBOL(__get_user_asm_d);
1048 unsigned long long __get_user_asm_l(const void __user *addr, long *err)
1050 register unsigned long long x asm ("D0Re0") = 0;
1051 asm volatile (
1052 " GETL %0,%t0,[%2]\n"
1053 "1:\n"
1054 " GETL %0,%t0,[%2]\n"
1055 "2:\n"
1056 " .section .fixup,\"ax\"\n"
1057 "3: MOV D0FrT,%3\n"
1058 " SETD [%1],D0FrT\n"
1059 " MOVT D0FrT,#HI(2b)\n"
1060 " JUMP D0FrT,#LO(2b)\n"
1061 " .previous\n"
1062 " .section __ex_table,\"a\"\n"
1063 " .long 1b,3b\n"
1064 " .previous\n"
1065 : "=r" (x)
1066 : "r" (err), "r" (addr), "P" (-EFAULT)
1067 : "D0FrT");
1068 return x;
1070 EXPORT_SYMBOL(__get_user_asm_l);
1072 long __put_user_asm_b(unsigned int x, void __user *addr)
1074 register unsigned int err asm ("D0Re0") = 0;
1075 asm volatile (
1076 " MOV %0,#0\n"
1077 " SETB [%2],%1\n"
1078 "1:\n"
1079 " SETB [%2],%1\n"
1080 "2:\n"
1081 ".section .fixup,\"ax\"\n"
1082 "3: MOV %0,%3\n"
1083 " MOVT D0FrT,#HI(2b)\n"
1084 " JUMP D0FrT,#LO(2b)\n"
1085 ".previous\n"
1086 ".section __ex_table,\"a\"\n"
1087 " .long 1b,3b\n"
1088 ".previous"
1089 : "=r"(err)
1090 : "d" (x), "a" (addr), "P"(-EFAULT)
1091 : "D0FrT");
1092 return err;
1094 EXPORT_SYMBOL(__put_user_asm_b);
1096 long __put_user_asm_w(unsigned int x, void __user *addr)
1098 register unsigned int err asm ("D0Re0") = 0;
1099 asm volatile (
1100 " MOV %0,#0\n"
1101 " SETW [%2],%1\n"
1102 "1:\n"
1103 " SETW [%2],%1\n"
1104 "2:\n"
1105 ".section .fixup,\"ax\"\n"
1106 "3: MOV %0,%3\n"
1107 " MOVT D0FrT,#HI(2b)\n"
1108 " JUMP D0FrT,#LO(2b)\n"
1109 ".previous\n"
1110 ".section __ex_table,\"a\"\n"
1111 " .long 1b,3b\n"
1112 ".previous"
1113 : "=r"(err)
1114 : "d" (x), "a" (addr), "P"(-EFAULT)
1115 : "D0FrT");
1116 return err;
1118 EXPORT_SYMBOL(__put_user_asm_w);
1120 long __put_user_asm_d(unsigned int x, void __user *addr)
1122 register unsigned int err asm ("D0Re0") = 0;
1123 asm volatile (
1124 " MOV %0,#0\n"
1125 " SETD [%2],%1\n"
1126 "1:\n"
1127 " SETD [%2],%1\n"
1128 "2:\n"
1129 ".section .fixup,\"ax\"\n"
1130 "3: MOV %0,%3\n"
1131 " MOVT D0FrT,#HI(2b)\n"
1132 " JUMP D0FrT,#LO(2b)\n"
1133 ".previous\n"
1134 ".section __ex_table,\"a\"\n"
1135 " .long 1b,3b\n"
1136 ".previous"
1137 : "=r"(err)
1138 : "d" (x), "a" (addr), "P"(-EFAULT)
1139 : "D0FrT");
1140 return err;
1142 EXPORT_SYMBOL(__put_user_asm_d);
1144 long __put_user_asm_l(unsigned long long x, void __user *addr)
1146 register unsigned int err asm ("D0Re0") = 0;
1147 asm volatile (
1148 " MOV %0,#0\n"
1149 " SETL [%2],%1,%t1\n"
1150 "1:\n"
1151 " SETL [%2],%1,%t1\n"
1152 "2:\n"
1153 ".section .fixup,\"ax\"\n"
1154 "3: MOV %0,%3\n"
1155 " MOVT D0FrT,#HI(2b)\n"
1156 " JUMP D0FrT,#LO(2b)\n"
1157 ".previous\n"
1158 ".section __ex_table,\"a\"\n"
1159 " .long 1b,3b\n"
1160 ".previous"
1161 : "=r"(err)
1162 : "d" (x), "a" (addr), "P"(-EFAULT)
1163 : "D0FrT");
1164 return err;
1166 EXPORT_SYMBOL(__put_user_asm_l);
1168 long strnlen_user(const char __user *src, long count)
1170 long res;
1172 if (!access_ok(VERIFY_READ, src, 0))
1173 return 0;
1175 asm volatile (" MOV D0Ar4, %1\n"
1176 " MOV D0Ar6, %2\n"
1177 "0:\n"
1178 " SUBS D0FrT, D0Ar6, #0\n"
1179 " SUB D0Ar6, D0Ar6, #1\n"
1180 " BLE 2f\n"
1181 " GETB D0FrT, [D0Ar4+#1++]\n"
1182 "1:\n"
1183 " TST D0FrT, #255\n"
1184 " BNE 0b\n"
1185 "2:\n"
1186 " SUB %0, %2, D0Ar6\n"
1187 "3:\n"
1188 " .section .fixup,\"ax\"\n"
1189 "4:\n"
1190 " MOV %0, #0\n"
1191 " MOVT D0FrT,#HI(3b)\n"
1192 " JUMP D0FrT,#LO(3b)\n"
1193 " .previous\n"
1194 " .section __ex_table,\"a\"\n"
1195 " .long 1b,4b\n"
1196 " .previous\n"
1197 : "=r" (res)
1198 : "r" (src), "r" (count)
1199 : "D0FrT", "D0Ar4", "D0Ar6", "cc");
1201 return res;
1203 EXPORT_SYMBOL(strnlen_user);
1205 long __strncpy_from_user(char *dst, const char __user *src, long count)
1207 long res;
1209 if (count == 0)
1210 return 0;
1213 * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
1214 * So do we.
1216 * This code is deduced from:
1218 * char tmp2;
1219 * long tmp1, tmp3;
1220 * tmp1 = count;
1221 * while ((*dst++ = (tmp2 = *src++)) != 0
1222 * && --tmp1)
1225 * res = count - tmp1;
1227 * with tweaks.
1230 asm volatile (" MOV %0,%3\n"
1231 "1:\n"
1232 " GETB D0FrT,[%2++]\n"
1233 "2:\n"
1234 " CMP D0FrT,#0\n"
1235 " SETB [%1++],D0FrT\n"
1236 " BEQ 3f\n"
1237 " SUBS %0,%0,#1\n"
1238 " BNZ 1b\n"
1239 "3:\n"
1240 " SUB %0,%3,%0\n"
1241 "4:\n"
1242 " .section .fixup,\"ax\"\n"
1243 "5:\n"
1244 " MOV %0,%7\n"
1245 " MOVT D0FrT,#HI(4b)\n"
1246 " JUMP D0FrT,#LO(4b)\n"
1247 " .previous\n"
1248 " .section __ex_table,\"a\"\n"
1249 " .long 2b,5b\n"
1250 " .previous"
1251 : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
1252 : "3" (count), "1" (dst), "2" (src), "P" (-EFAULT)
1253 : "D0FrT", "memory", "cc");
1255 return res;
1257 EXPORT_SYMBOL(__strncpy_from_user);