1 /* $NetBSD: memmove.S,v 1.3 2008/04/28 20:22:52 martin Exp $ */
4 * Copyright (c) 1997 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Neil A. Carson and Mark Brinicombe
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include <machine/asm.h>
35 /* LINTSTUB: Func: void *memmove(void *, const void *, size_t) */
38 /* bcopy = memcpy/memmove with arguments reversed. */
39 /* LINTSTUB: Func: void bcopy(void *, void *, size_t) */
41 /* switch the source and destination registers */
46 /* Do the buffers overlap? */
48 RETc(eq) /* Bail now if src/dst are the same */
49 subhs r3, r0, r1 /* if (dst > src) r3 = dst - src */
50 sublo r3, r1, r0 /* if (src > dst) r3 = src - dst */
51 cmp r3, r2 /* if (r3 >= len) we have an overlap */
52 bhs PIC_SYM(_C_LABEL(memcpy), PLT)
54 /* Determine copy direction */
56 bcc .Lmemmove_backwards
58 moveq r0, #0 /* Quick abort for len=0 */
61 stmdb sp!, {r0, lr} /* memmove() returns dest addr */
63 blt .Lmemmove_fl4 /* less than 4 bytes */
65 bne .Lmemmove_fdestul /* oh unaligned destination addr */
67 bne .Lmemmove_fsrcul /* oh unaligned source addr */
70 /* We have aligned source and destination */
72 blt .Lmemmove_fl12 /* less than 12 bytes (4 from above) */
74 blt .Lmemmove_fl32 /* less than 32 bytes (12 from above) */
75 stmdb sp!, {r4} /* borrow r4 */
77 /* blat 32 bytes at a time */
78 /* XXX for really big copies perhaps we should use more registers */
80 ldmia r1!, {r3, r4, r12, lr}
81 stmia r0!, {r3, r4, r12, lr}
82 ldmia r1!, {r3, r4, r12, lr}
83 stmia r0!, {r3, r4, r12, lr}
88 ldmgeia r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
89 stmgeia r0!, {r3, r4, r12, lr}
91 ldmia sp!, {r4} /* return r4 */
96 /* blat 12 bytes at a time */
98 ldmgeia r1!, {r3, r12, lr}
99 stmgeia r0!, {r3, r12, lr}
101 bge .Lmemmove_floop12
110 ldmgeia r1!, {r3, r12}
111 stmgeia r0!, {r3, r12}
115 /* less than 4 bytes to go */
117 ldmeqia sp!, {r0, pc} /* done */
119 /* copy the crud byte at a time */
129 /* erg - unaligned destination */
134 /* align destination with byte copies */
142 blt .Lmemmove_fl4 /* less the 4 bytes */
145 beq .Lmemmove_ft8 /* we have an aligned source */
147 /* erg - unaligned source */
148 /* This is where it gets nasty ... */
153 bgt .Lmemmove_fsrcul3
154 beq .Lmemmove_fsrcul2
156 blt .Lmemmove_fsrcul1loop4
160 .Lmemmove_fsrcul1loop16:
166 ldmia r1!, {r4, r5, r12, lr}
168 orr r3, r3, r4, lsr #24
170 orr r4, r4, r5, lsr #24
172 orr r5, r5, r12, lsr #24
174 orr r12, r12, lr, lsr #24
176 orr r3, r3, r4, lsl #24
178 orr r4, r4, r5, lsl #24
180 orr r5, r5, r12, lsl #24
182 orr r12, r12, lr, lsl #24
184 stmia r0!, {r3-r5, r12}
186 bge .Lmemmove_fsrcul1loop16
189 blt .Lmemmove_fsrcul1l4
191 .Lmemmove_fsrcul1loop4:
199 orr r12, r12, lr, lsr #24
201 orr r12, r12, lr, lsl #24
205 bge .Lmemmove_fsrcul1loop4
213 blt .Lmemmove_fsrcul2loop4
217 .Lmemmove_fsrcul2loop16:
223 ldmia r1!, {r4, r5, r12, lr}
225 orr r3, r3, r4, lsr #16
227 orr r4, r4, r5, lsr #16
229 orr r5, r5, r12, lsr #16
230 mov r12, r12, lsl #16
231 orr r12, r12, lr, lsr #16
233 orr r3, r3, r4, lsl #16
235 orr r4, r4, r5, lsl #16
237 orr r5, r5, r12, lsl #16
238 mov r12, r12, lsr #16
239 orr r12, r12, lr, lsl #16
241 stmia r0!, {r3-r5, r12}
243 bge .Lmemmove_fsrcul2loop16
246 blt .Lmemmove_fsrcul2l4
248 .Lmemmove_fsrcul2loop4:
256 orr r12, r12, lr, lsr #16
258 orr r12, r12, lr, lsl #16
262 bge .Lmemmove_fsrcul2loop4
270 blt .Lmemmove_fsrcul3loop4
274 .Lmemmove_fsrcul3loop16:
280 ldmia r1!, {r4, r5, r12, lr}
282 orr r3, r3, r4, lsr #8
284 orr r4, r4, r5, lsr #8
286 orr r5, r5, r12, lsr #8
287 mov r12, r12, lsl #24
288 orr r12, r12, lr, lsr #8
290 orr r3, r3, r4, lsl #8
292 orr r4, r4, r5, lsl #8
294 orr r5, r5, r12, lsl #8
295 mov r12, r12, lsr #24
296 orr r12, r12, lr, lsl #8
298 stmia r0!, {r3-r5, r12}
300 bge .Lmemmove_fsrcul3loop16
303 blt .Lmemmove_fsrcul3l4
305 .Lmemmove_fsrcul3loop4:
313 orr r12, r12, lr, lsr #8
315 orr r12, r12, lr, lsl #8
319 bge .Lmemmove_fsrcul3loop4
329 blt .Lmemmove_bl4 /* less than 4 bytes */
331 bne .Lmemmove_bdestul /* oh unaligned destination addr */
333 bne .Lmemmove_bsrcul /* oh unaligned source addr */
336 /* We have aligned source and destination */
338 blt .Lmemmove_bl12 /* less than 12 bytes (4 from above) */
340 subs r2, r2, #0x14 /* less than 32 bytes (12 from above) */
343 /* blat 32 bytes at a time */
344 /* XXX for really big copies perhaps we should use more registers */
346 ldmdb r1!, {r3, r4, r12, lr}
347 stmdb r0!, {r3, r4, r12, lr}
348 ldmdb r1!, {r3, r4, r12, lr}
349 stmdb r0!, {r3, r4, r12, lr}
351 bge .Lmemmove_bloop32
355 ldmgedb r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
356 stmgedb r0!, {r3, r4, r12, lr}
359 ldmgedb r1!, {r3, r12, lr} /* blat a remaining 12 bytes */
360 stmgedb r0!, {r3, r12, lr}
370 ldmgedb r1!, {r3, r12}
371 stmgedb r0!, {r3, r12}
375 /* less than 4 bytes to go */
379 /* copy the crud byte at a time */
383 ldrgeb r3, [r1, #-1]!
384 strgeb r3, [r0, #-1]!
385 ldrgtb r3, [r1, #-1]!
386 strgtb r3, [r0, #-1]!
389 /* erg - unaligned destination */
393 /* align destination with byte copies */
396 ldrgeb r3, [r1, #-1]!
397 strgeb r3, [r0, #-1]!
398 ldrgtb r3, [r1, #-1]!
399 strgtb r3, [r0, #-1]!
401 blt .Lmemmove_bl4 /* less than 4 bytes to go */
403 beq .Lmemmove_bt8 /* we have an aligned source */
405 /* erg - unaligned source */
406 /* This is where it gets nasty ... */
411 blt .Lmemmove_bsrcul1
412 beq .Lmemmove_bsrcul2
414 blt .Lmemmove_bsrcul3loop4
416 stmdb sp!, {r4, r5, lr}
418 .Lmemmove_bsrcul3loop16:
424 ldmdb r1!, {r3-r5, r12}
426 orr lr, lr, r12, lsl #24
428 orr r12, r12, r5, lsl #24
430 orr r5, r5, r4, lsl #24
432 orr r4, r4, r3, lsl #24
434 orr lr, lr, r12, lsr #24
436 orr r12, r12, r5, lsr #24
438 orr r5, r5, r4, lsr #24
440 orr r4, r4, r3, lsr #24
442 stmdb r0!, {r4, r5, r12, lr}
444 bge .Lmemmove_bsrcul3loop16
445 ldmia sp!, {r4, r5, lr}
447 blt .Lmemmove_bsrcul3l4
449 .Lmemmove_bsrcul3loop4:
457 orr r12, r12, r3, lsl #24
459 orr r12, r12, r3, lsr #24
463 bge .Lmemmove_bsrcul3loop4
471 blt .Lmemmove_bsrcul2loop4
473 stmdb sp!, {r4, r5, lr}
475 .Lmemmove_bsrcul2loop16:
481 ldmdb r1!, {r3-r5, r12}
483 orr lr, lr, r12, lsl #16
484 mov r12, r12, lsr #16
485 orr r12, r12, r5, lsl #16
487 orr r5, r5, r4, lsl #16
489 orr r4, r4, r3, lsl #16
491 orr lr, lr, r12, lsr #16
492 mov r12, r12, lsl #16
493 orr r12, r12, r5, lsr #16
495 orr r5, r5, r4, lsr #16
497 orr r4, r4, r3, lsr #16
499 stmdb r0!, {r4, r5, r12, lr}
501 bge .Lmemmove_bsrcul2loop16
502 ldmia sp!, {r4, r5, lr}
504 blt .Lmemmove_bsrcul2l4
506 .Lmemmove_bsrcul2loop4:
514 orr r12, r12, r3, lsl #16
516 orr r12, r12, r3, lsr #16
520 bge .Lmemmove_bsrcul2loop4
528 blt .Lmemmove_bsrcul1loop4
530 stmdb sp!, {r4, r5, lr}
532 .Lmemmove_bsrcul1loop32:
538 ldmdb r1!, {r3-r5, r12}
540 orr lr, lr, r12, lsl #8
541 mov r12, r12, lsr #24
542 orr r12, r12, r5, lsl #8
544 orr r5, r5, r4, lsl #8
546 orr r4, r4, r3, lsl #8
548 orr lr, lr, r12, lsr #8
549 mov r12, r12, lsl #24
550 orr r12, r12, r5, lsr #8
552 orr r5, r5, r4, lsr #8
554 orr r4, r4, r3, lsr #8
556 stmdb r0!, {r4, r5, r12, lr}
558 bge .Lmemmove_bsrcul1loop32
559 ldmia sp!, {r4, r5, lr}
561 blt .Lmemmove_bsrcul1l4
563 .Lmemmove_bsrcul1loop4:
571 orr r12, r12, r3, lsl #8
573 orr r12, r12, r3, lsr #8
577 bge .Lmemmove_bsrcul1loop4