1 /* $NetBSD: memcpy_xscale.S,v 1.2 2007/06/21 21:37:04 scw Exp $ */
4 * Copyright 2003 Wasabi Systems, Inc.
7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
38 #include <machine/asm.h>
40 /* LINTSTUB: Func: void *memcpy(void *dst, const void *src, size_t len) */
44 ble .Lmemcpy_short /* <= 12 bytes */
45 mov r3, r0 /* We must not clobber r0 */
47 /* Word-align the destination buffer */
48 ands ip, r3, #0x03 /* Already word aligned? */
49 beq .Lmemcpy_wordaligned /* Yup */
54 ldrleb ip, [r1], #0x01
56 strleb ip, [r3], #0x01
57 ldrltb ip, [r1], #0x01
59 strltb ip, [r3], #0x01
61 /* Destination buffer is now word aligned */
63 ands ip, r1, #0x03 /* Is src also word-aligned? */
64 bne .Lmemcpy_bad_align /* Nope. Things just got bad */
66 /* Quad-align the destination buffer */
67 tst r3, #0x07 /* Already quad aligned? */
69 stmfd sp!, {r4-r9} /* Free up some registers */
73 /* Destination buffer quad aligned, source is at least word aligned */
75 blt .Lmemcpy_w_lessthan128
77 /* Copy 128 bytes at a time */
79 ldr r4, [r1], #0x04 /* LD:00-03 */
80 ldr r5, [r1], #0x04 /* LD:04-07 */
81 pld [r1, #0x18] /* Prefetch 0x20 */
82 ldr r6, [r1], #0x04 /* LD:08-0b */
83 ldr r7, [r1], #0x04 /* LD:0c-0f */
84 ldr r8, [r1], #0x04 /* LD:10-13 */
85 ldr r9, [r1], #0x04 /* LD:14-17 */
86 strd r4, [r3], #0x08 /* ST:00-07 */
87 ldr r4, [r1], #0x04 /* LD:18-1b */
88 ldr r5, [r1], #0x04 /* LD:1c-1f */
89 strd r6, [r3], #0x08 /* ST:08-0f */
90 ldr r6, [r1], #0x04 /* LD:20-23 */
91 ldr r7, [r1], #0x04 /* LD:24-27 */
92 pld [r1, #0x18] /* Prefetch 0x40 */
93 strd r8, [r3], #0x08 /* ST:10-17 */
94 ldr r8, [r1], #0x04 /* LD:28-2b */
95 ldr r9, [r1], #0x04 /* LD:2c-2f */
96 strd r4, [r3], #0x08 /* ST:18-1f */
97 ldr r4, [r1], #0x04 /* LD:30-33 */
98 ldr r5, [r1], #0x04 /* LD:34-37 */
99 strd r6, [r3], #0x08 /* ST:20-27 */
100 ldr r6, [r1], #0x04 /* LD:38-3b */
101 ldr r7, [r1], #0x04 /* LD:3c-3f */
102 strd r8, [r3], #0x08 /* ST:28-2f */
103 ldr r8, [r1], #0x04 /* LD:40-43 */
104 ldr r9, [r1], #0x04 /* LD:44-47 */
105 pld [r1, #0x18] /* Prefetch 0x60 */
106 strd r4, [r3], #0x08 /* ST:30-37 */
107 ldr r4, [r1], #0x04 /* LD:48-4b */
108 ldr r5, [r1], #0x04 /* LD:4c-4f */
109 strd r6, [r3], #0x08 /* ST:38-3f */
110 ldr r6, [r1], #0x04 /* LD:50-53 */
111 ldr r7, [r1], #0x04 /* LD:54-57 */
112 strd r8, [r3], #0x08 /* ST:40-47 */
113 ldr r8, [r1], #0x04 /* LD:58-5b */
114 ldr r9, [r1], #0x04 /* LD:5c-5f */
115 strd r4, [r3], #0x08 /* ST:48-4f */
116 ldr r4, [r1], #0x04 /* LD:60-63 */
117 ldr r5, [r1], #0x04 /* LD:64-67 */
118 pld [r1, #0x18] /* Prefetch 0x80 */
119 strd r6, [r3], #0x08 /* ST:50-57 */
120 ldr r6, [r1], #0x04 /* LD:68-6b */
121 ldr r7, [r1], #0x04 /* LD:6c-6f */
122 strd r8, [r3], #0x08 /* ST:58-5f */
123 ldr r8, [r1], #0x04 /* LD:70-73 */
124 ldr r9, [r1], #0x04 /* LD:74-77 */
125 strd r4, [r3], #0x08 /* ST:60-67 */
126 ldr r4, [r1], #0x04 /* LD:78-7b */
127 ldr r5, [r1], #0x04 /* LD:7c-7f */
128 strd r6, [r3], #0x08 /* ST:68-6f */
129 strd r8, [r3], #0x08 /* ST:70-77 */
131 strd r4, [r3], #0x08 /* ST:78-7f */
132 bge .Lmemcpy_w_loop128
134 .Lmemcpy_w_lessthan128:
135 adds r2, r2, #0x80 /* Adjust for extra sub */
137 bxeq lr /* Return now if done */
139 blt .Lmemcpy_w_lessthan32
141 /* Copy 32 bytes at a time */
157 bge .Lmemcpy_w_loop32
159 .Lmemcpy_w_lessthan32:
160 adds r2, r2, #0x20 /* Adjust for extra sub */
162 bxeq lr /* Return now if done */
166 addne pc, pc, r4, lsl #1
169 /* At least 24 bytes remaining */
175 /* At least 16 bytes remaining */
181 /* At least 8 bytes remaining */
187 /* Less than 8 bytes remaining */
189 bxeq lr /* Return now if done */
191 ldrge ip, [r1], #0x04
192 strge ip, [r3], #0x04
193 bxeq lr /* Return now if done */
197 ldrgeb r2, [r1], #0x01
200 strgeb r2, [r3], #0x01
206 * At this point, it has not been possible to word align both buffers.
207 * The destination buffer is word aligned, but the source buffer is not.
218 .Lmemcpy_bad1_loop16:
230 orr r4, r4, r5, lsr #24
232 orr r5, r5, r6, lsr #24
234 orr r6, r6, r7, lsr #24
236 orr r7, r7, ip, lsr #24
238 orr r4, r4, r5, lsl #24
240 orr r5, r5, r6, lsl #24
242 orr r6, r6, r7, lsl #24
244 orr r7, r7, ip, lsl #24
254 bge .Lmemcpy_bad1_loop16
256 blt .Lmemcpy_bad1_loop16_short
258 /* copy last 16 bytes (without preload) */
269 orr r4, r4, r5, lsr #24
271 orr r5, r5, r6, lsr #24
273 orr r6, r6, r7, lsr #24
275 orr r7, r7, ip, lsr #24
277 orr r4, r4, r5, lsl #24
279 orr r5, r5, r6, lsl #24
281 orr r6, r6, r7, lsl #24
283 orr r7, r7, ip, lsl #24
291 bxeq lr /* Return now if done */
293 .Lmemcpy_bad1_loop16_short:
296 blt .Lmemcpy_bad_done
307 orr r4, r4, ip, lsr #24
309 orr r4, r4, ip, lsl #24
312 bge .Lmemcpy_bad1_loop4
316 .Lmemcpy_bad2_loop16:
328 orr r4, r4, r5, lsr #16
330 orr r5, r5, r6, lsr #16
332 orr r6, r6, r7, lsr #16
334 orr r7, r7, ip, lsr #16
336 orr r4, r4, r5, lsl #16
338 orr r5, r5, r6, lsl #16
340 orr r6, r6, r7, lsl #16
342 orr r7, r7, ip, lsl #16
352 bge .Lmemcpy_bad2_loop16
354 blt .Lmemcpy_bad2_loop16_short
356 /* copy last 16 bytes (without preload) */
367 orr r4, r4, r5, lsr #16
369 orr r5, r5, r6, lsr #16
371 orr r6, r6, r7, lsr #16
373 orr r7, r7, ip, lsr #16
375 orr r4, r4, r5, lsl #16
377 orr r5, r5, r6, lsl #16
379 orr r6, r6, r7, lsl #16
381 orr r7, r7, ip, lsl #16
389 bxeq lr /* Return now if done */
391 .Lmemcpy_bad2_loop16_short:
394 blt .Lmemcpy_bad_done
405 orr r4, r4, ip, lsr #16
407 orr r4, r4, ip, lsl #16
410 bge .Lmemcpy_bad2_loop4
414 .Lmemcpy_bad3_loop16:
426 orr r4, r4, r5, lsr #8
428 orr r5, r5, r6, lsr #8
430 orr r6, r6, r7, lsr #8
432 orr r7, r7, ip, lsr #8
434 orr r4, r4, r5, lsl #8
436 orr r5, r5, r6, lsl #8
438 orr r6, r6, r7, lsl #8
440 orr r7, r7, ip, lsl #8
450 bge .Lmemcpy_bad3_loop16
452 blt .Lmemcpy_bad3_loop16_short
454 /* copy last 16 bytes (without preload) */
465 orr r4, r4, r5, lsr #8
467 orr r5, r5, r6, lsr #8
469 orr r6, r6, r7, lsr #8
471 orr r7, r7, ip, lsr #8
473 orr r4, r4, r5, lsl #8
475 orr r5, r5, r6, lsl #8
477 orr r6, r6, r7, lsl #8
479 orr r7, r7, ip, lsl #8
487 bxeq lr /* Return now if done */
489 .Lmemcpy_bad3_loop16_short:
492 blt .Lmemcpy_bad_done
503 orr r4, r4, ip, lsr #8
505 orr r4, r4, ip, lsl #8
508 bge .Lmemcpy_bad3_loop4
517 ldrgeb r2, [r1], #0x01
520 strgeb r2, [r3], #0x01
526 * Handle short copies (less than 16 bytes), possibly misaligned.
527 * Some of these are *very* common, thanks to the network stack,
528 * and so are handled specially.
532 add pc, pc, r2, lsl #2
535 b .Lmemcpy_bytewise /* 0x01 */
536 b .Lmemcpy_bytewise /* 0x02 */
537 b .Lmemcpy_bytewise /* 0x03 */
538 b .Lmemcpy_4 /* 0x04 */
539 b .Lmemcpy_bytewise /* 0x05 */
540 b .Lmemcpy_6 /* 0x06 */
541 b .Lmemcpy_bytewise /* 0x07 */
542 b .Lmemcpy_8 /* 0x08 */
543 b .Lmemcpy_bytewise /* 0x09 */
544 b .Lmemcpy_bytewise /* 0x0a */
545 b .Lmemcpy_bytewise /* 0x0b */
546 b .Lmemcpy_c /* 0x0c */
549 mov r3, r0 /* We must not clobber r0 */
551 1: subs r2, r2, #0x01
553 ldrneb ip, [r1], #0x01
558 /******************************************************************************
559 * Special case for 4 byte copies
561 #define LMEMCPY_4_LOG2 6 /* 64 bytes */
562 #define LMEMCPY_4_PAD .align LMEMCPY_4_LOG2
566 orr r2, r2, r0, lsl #2
569 addne pc, r3, r2, lsl #LMEMCPY_4_LOG2
572 * 0000: dst is 32-bit aligned, src is 32-bit aligned
580 * 0001: dst is 32-bit aligned, src is 8-bit aligned
582 ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
583 ldr r2, [r1, #3] /* BE:r2 = 3xxx LE:r2 = xxx3 */
585 mov r3, r3, lsl #8 /* r3 = 012. */
586 orr r3, r3, r2, lsr #24 /* r3 = 0123 */
588 mov r3, r3, lsr #8 /* r3 = .210 */
589 orr r3, r3, r2, lsl #24 /* r3 = 3210 */
596 * 0010: dst is 32-bit aligned, src is 16-bit aligned
605 orr r3, r2, r3, lsl #16
611 * 0011: dst is 32-bit aligned, src is 8-bit aligned
613 ldr r3, [r1, #-3] /* BE:r3 = xxx0 LE:r3 = 0xxx */
614 ldr r2, [r1, #1] /* BE:r2 = 123x LE:r2 = x321 */
616 mov r3, r3, lsl #24 /* r3 = 0... */
617 orr r3, r3, r2, lsr #8 /* r3 = 0123 */
619 mov r3, r3, lsr #24 /* r3 = ...0 */
620 orr r3, r3, r2, lsl #8 /* r3 = 3210 */
627 * 0100: dst is 8-bit aligned, src is 32-bit aligned
646 * 0101: dst is 8-bit aligned, src is 8-bit aligned
658 * 0110: dst is 8-bit aligned, src is 16-bit aligned
660 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
661 ldrh r3, [r1, #0x02] /* LE:r3 = ..23 LE:r3 = ..32 */
663 mov r1, r2, lsr #8 /* r1 = ...0 */
665 mov r2, r2, lsl #8 /* r2 = .01. */
666 orr r2, r2, r3, lsr #8 /* r2 = .012 */
669 mov r2, r2, lsr #8 /* r2 = ...1 */
670 orr r2, r2, r3, lsl #8 /* r2 = .321 */
671 mov r3, r3, lsr #8 /* r3 = ...3 */
679 * 0111: dst is 8-bit aligned, src is 8-bit aligned
691 * 1000: dst is 16-bit aligned, src is 32-bit aligned
707 * 1001: dst is 16-bit aligned, src is 8-bit aligned
709 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
710 ldr r3, [r1, #3] /* BE:r3 = 3xxx LE:r3 = xxx3 */
711 mov r1, r2, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
714 mov r2, r2, lsl #8 /* r2 = 012. */
715 orr r2, r2, r3, lsr #24 /* r2 = 0123 */
717 mov r2, r2, lsr #24 /* r2 = ...2 */
718 orr r2, r2, r3, lsl #8 /* r2 = xx32 */
725 * 1010: dst is 16-bit aligned, src is 16-bit aligned
735 * 1011: dst is 16-bit aligned, src is 8-bit aligned
737 ldr r3, [r1, #1] /* BE:r3 = 123x LE:r3 = x321 */
738 ldr r2, [r1, #-3] /* BE:r2 = xxx0 LE:r2 = 0xxx */
739 mov r1, r3, lsr #8 /* BE:r1 = .123 LE:r1 = .x32 */
742 mov r3, r3, lsr #24 /* r3 = ...1 */
743 orr r3, r3, r2, lsl #8 /* r3 = xx01 */
745 mov r3, r3, lsl #8 /* r3 = 321. */
746 orr r3, r3, r2, lsr #24 /* r3 = 3210 */
753 * 1100: dst is 8-bit aligned, src is 32-bit aligned
755 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
773 * 1101: dst is 8-bit aligned, src is 8-bit aligned
785 * 1110: dst is 8-bit aligned, src is 16-bit aligned
788 ldrh r3, [r1, #0x02] /* BE:r3 = ..23 LE:r3 = ..32 */
789 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
791 mov r3, r3, lsr #8 /* r3 = ...2 */
792 orr r3, r3, r2, lsl #8 /* r3 = ..12 */
794 mov r2, r2, lsr #8 /* r2 = ...0 */
797 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
798 ldrh r3, [r1, #0x02] /* BE:r3 = ..23 LE:r3 = ..32 */
800 mov r2, r2, lsr #8 /* r2 = ...1 */
801 orr r2, r2, r3, lsl #8 /* r2 = .321 */
803 mov r3, r3, lsr #8 /* r3 = ...3 */
810 * 1111: dst is 8-bit aligned, src is 8-bit aligned
822 /******************************************************************************
823 * Special case for 6 byte copies
825 #define LMEMCPY_6_LOG2 6 /* 64 bytes */
826 #define LMEMCPY_6_PAD .align LMEMCPY_6_LOG2
830 orr r2, r2, r0, lsl #2
833 addne pc, r3, r2, lsl #LMEMCPY_6_LOG2
836 * 0000: dst is 32-bit aligned, src is 32-bit aligned
846 * 0001: dst is 32-bit aligned, src is 8-bit aligned
848 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
849 ldr r3, [r1, #0x03] /* BE:r3 = 345x LE:r3 = x543 */
851 mov r2, r2, lsl #8 /* r2 = 012. */
852 orr r2, r2, r3, lsr #24 /* r2 = 0123 */
854 mov r2, r2, lsr #8 /* r2 = .210 */
855 orr r2, r2, r3, lsl #24 /* r2 = 3210 */
857 mov r3, r3, lsr #8 /* BE:r3 = .345 LE:r3 = .x54 */
864 * 0010: dst is 32-bit aligned, src is 16-bit aligned
866 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
867 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
869 mov r1, r3, lsr #16 /* r1 = ..23 */
870 orr r1, r1, r2, lsl #16 /* r1 = 0123 */
874 mov r1, r3, lsr #16 /* r1 = ..54 */
875 orr r2, r2, r3, lsl #16 /* r2 = 3210 */
883 * 0011: dst is 32-bit aligned, src is 8-bit aligned
885 ldr r2, [r1, #-3] /* BE:r2 = xxx0 LE:r2 = 0xxx */
886 ldr r3, [r1, #1] /* BE:r3 = 1234 LE:r3 = 4321 */
887 ldr r1, [r1, #5] /* BE:r1 = 5xxx LE:r3 = xxx5 */
889 mov r2, r2, lsl #24 /* r2 = 0... */
890 orr r2, r2, r3, lsr #8 /* r2 = 0123 */
891 mov r3, r3, lsl #8 /* r3 = 234. */
892 orr r1, r3, r1, lsr #24 /* r1 = 2345 */
894 mov r2, r2, lsr #24 /* r2 = ...0 */
895 orr r2, r2, r3, lsl #8 /* r2 = 3210 */
896 mov r1, r1, lsl #8 /* r1 = xx5. */
897 orr r1, r1, r3, lsr #24 /* r1 = xx54 */
905 * 0100: dst is 8-bit aligned, src is 32-bit aligned
907 ldr r3, [r1] /* BE:r3 = 0123 LE:r3 = 3210 */
908 ldrh r2, [r1, #0x04] /* BE:r2 = ..45 LE:r2 = ..54 */
909 mov r1, r3, lsr #8 /* BE:r1 = .012 LE:r1 = .321 */
912 mov r1, r3, lsr #24 /* r1 = ...0 */
914 mov r3, r3, lsl #8 /* r3 = 123. */
915 orr r3, r3, r2, lsr #8 /* r3 = 1234 */
918 mov r3, r3, lsr #24 /* r3 = ...3 */
919 orr r3, r3, r2, lsl #8 /* r3 = .543 */
920 mov r2, r2, lsr #8 /* r2 = ...5 */
928 * 0101: dst is 8-bit aligned, src is 8-bit aligned
942 * 0110: dst is 8-bit aligned, src is 16-bit aligned
944 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
945 ldr r1, [r1, #0x02] /* BE:r1 = 2345 LE:r1 = 5432 */
947 mov r3, r2, lsr #8 /* r3 = ...0 */
950 mov r3, r1, lsr #8 /* r3 = .234 */
952 mov r3, r2, lsl #8 /* r3 = .01. */
953 orr r3, r3, r1, lsr #24 /* r3 = .012 */
959 mov r3, r1, lsr #8 /* r3 = .543 */
961 mov r3, r2, lsr #8 /* r3 = ...1 */
962 orr r3, r3, r1, lsl #8 /* r3 = 4321 */
969 * 0111: dst is 8-bit aligned, src is 8-bit aligned
983 * 1000: dst is 16-bit aligned, src is 32-bit aligned
986 ldr r2, [r1] /* r2 = 0123 */
987 ldrh r3, [r1, #0x04] /* r3 = ..45 */
988 mov r1, r2, lsr #16 /* r1 = ..01 */
989 orr r3, r3, r2, lsl#16 /* r3 = 2345 */
993 ldrh r2, [r1, #0x04] /* r2 = ..54 */
994 ldr r3, [r1] /* r3 = 3210 */
995 mov r2, r2, lsl #16 /* r2 = 54.. */
996 orr r2, r2, r3, lsr #16 /* r2 = 5432 */
1004 * 1001: dst is 16-bit aligned, src is 8-bit aligned
1006 ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
1007 ldr r2, [r1, #3] /* BE:r2 = 345x LE:r2 = x543 */
1008 mov r1, r3, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
1010 mov r2, r2, lsr #8 /* r2 = .345 */
1011 orr r2, r2, r3, lsl #24 /* r2 = 2345 */
1013 mov r2, r2, lsl #8 /* r2 = 543. */
1014 orr r2, r2, r3, lsr #24 /* r2 = 5432 */
1022 * 1010: dst is 16-bit aligned, src is 16-bit aligned
1032 * 1011: dst is 16-bit aligned, src is 8-bit aligned
1034 ldrb r3, [r1] /* r3 = ...0 */
1035 ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */
1036 ldrb r1, [r1, #0x05] /* r1 = ...5 */
1038 mov r3, r3, lsl #8 /* r3 = ..0. */
1039 orr r3, r3, r2, lsr #24 /* r3 = ..01 */
1040 orr r1, r1, r2, lsl #8 /* r1 = 2345 */
1042 orr r3, r3, r2, lsl #8 /* r3 = 3210 */
1043 mov r1, r1, lsl #24 /* r1 = 5... */
1044 orr r1, r1, r2, lsr #8 /* r1 = 5432 */
1052 * 1100: dst is 8-bit aligned, src is 32-bit aligned
1054 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
1055 ldrh r1, [r1, #0x04] /* BE:r1 = ..45 LE:r1 = ..54 */
1057 mov r3, r2, lsr #24 /* r3 = ...0 */
1059 mov r2, r2, lsl #8 /* r2 = 123. */
1060 orr r2, r2, r1, lsr #8 /* r2 = 1234 */
1063 mov r2, r2, lsr #8 /* r2 = .321 */
1064 orr r2, r2, r1, lsl #24 /* r2 = 4321 */
1065 mov r1, r1, lsr #8 /* r1 = ...5 */
1068 strb r1, [r0, #0x05]
1073 * 1101: dst is 8-bit aligned, src is 8-bit aligned
1076 ldrh r3, [r1, #0x01]
1077 ldrh ip, [r1, #0x03]
1078 ldrb r1, [r1, #0x05]
1080 strh r3, [r0, #0x01]
1081 strh ip, [r0, #0x03]
1082 strb r1, [r0, #0x05]
1087 * 1110: dst is 8-bit aligned, src is 16-bit aligned
1089 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1090 ldr r1, [r1, #0x02] /* BE:r1 = 2345 LE:r1 = 5432 */
1092 mov r3, r2, lsr #8 /* r3 = ...0 */
1094 mov r2, r2, lsl #24 /* r2 = 1... */
1095 orr r2, r2, r1, lsr #8 /* r2 = 1234 */
1098 mov r2, r2, lsr #8 /* r2 = ...1 */
1099 orr r2, r2, r1, lsl #8 /* r2 = 4321 */
1100 mov r1, r1, lsr #24 /* r1 = ...5 */
1103 strb r1, [r0, #0x05]
1108 * 1111: dst is 8-bit aligned, src is 8-bit aligned
1112 ldrb r1, [r1, #0x05]
1115 strb r1, [r0, #0x05]
1120 /******************************************************************************
1121 * Special case for 8 byte copies
1123 #define LMEMCPY_8_LOG2 6 /* 64 bytes */
1124 #define LMEMCPY_8_PAD .align LMEMCPY_8_LOG2
1128 orr r2, r2, r0, lsl #2
1131 addne pc, r3, r2, lsl #LMEMCPY_8_LOG2
1134 * 0000: dst is 32-bit aligned, src is 32-bit aligned
1144 * 0001: dst is 32-bit aligned, src is 8-bit aligned
1146 ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
1147 ldr r2, [r1, #0x03] /* BE:r2 = 3456 LE:r2 = 6543 */
1148 ldrb r1, [r1, #0x07] /* r1 = ...7 */
1150 mov r3, r3, lsl #8 /* r3 = 012. */
1151 orr r3, r3, r2, lsr #24 /* r3 = 0123 */
1152 orr r2, r1, r2, lsl #8 /* r2 = 4567 */
1154 mov r3, r3, lsr #8 /* r3 = .210 */
1155 orr r3, r3, r2, lsl #24 /* r3 = 3210 */
1156 mov r1, r1, lsl #24 /* r1 = 7... */
1157 orr r2, r1, r2, lsr #8 /* r2 = 7654 */
1165 * 0010: dst is 32-bit aligned, src is 16-bit aligned
1167 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1168 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
1169 ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */
1171 mov r2, r2, lsl #16 /* r2 = 01.. */
1172 orr r2, r2, r3, lsr #16 /* r2 = 0123 */
1173 orr r3, r1, r3, lsl #16 /* r3 = 4567 */
1175 orr r2, r2, r3, lsl #16 /* r2 = 3210 */
1176 mov r3, r3, lsr #16 /* r3 = ..54 */
1177 orr r3, r3, r1, lsl #16 /* r3 = 7654 */
1185 * 0011: dst is 32-bit aligned, src is 8-bit aligned
1187 ldrb r3, [r1] /* r3 = ...0 */
1188 ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */
1189 ldr r1, [r1, #0x05] /* BE:r1 = 567x LE:r1 = x765 */
1191 mov r3, r3, lsl #24 /* r3 = 0... */
1192 orr r3, r3, r2, lsr #8 /* r3 = 0123 */
1193 mov r2, r2, lsl #24 /* r2 = 4... */
1194 orr r2, r2, r1, lsr #8 /* r2 = 4567 */
1196 orr r3, r3, r2, lsl #8 /* r3 = 3210 */
1197 mov r2, r2, lsr #24 /* r2 = ...4 */
1198 orr r2, r2, r1, lsl #8 /* r2 = 7654 */
1206 * 0100: dst is 8-bit aligned, src is 32-bit aligned
1208 ldr r3, [r1] /* BE:r3 = 0123 LE:r3 = 3210 */
1209 ldr r2, [r1, #0x04] /* BE:r2 = 4567 LE:r2 = 7654 */
1211 mov r1, r3, lsr #24 /* r1 = ...0 */
1213 mov r1, r3, lsr #8 /* r1 = .012 */
1214 strb r2, [r0, #0x07]
1215 mov r3, r3, lsl #24 /* r3 = 3... */
1216 orr r3, r3, r2, lsr #8 /* r3 = 3456 */
1219 mov r1, r2, lsr #24 /* r1 = ...7 */
1220 strb r1, [r0, #0x07]
1221 mov r1, r3, lsr #8 /* r1 = .321 */
1222 mov r3, r3, lsr #24 /* r3 = ...3 */
1223 orr r3, r3, r2, lsl #8 /* r3 = 6543 */
1225 strh r1, [r0, #0x01]
1231 * 0101: dst is 8-bit aligned, src is 8-bit aligned
1234 ldrh r3, [r1, #0x01]
1236 ldrb r1, [r1, #0x07]
1238 strh r3, [r0, #0x01]
1240 strb r1, [r0, #0x07]
1245 * 0110: dst is 8-bit aligned, src is 16-bit aligned
1247 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1248 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
1249 ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */
1251 mov ip, r2, lsr #8 /* ip = ...0 */
1253 mov ip, r2, lsl #8 /* ip = .01. */
1254 orr ip, ip, r3, lsr #24 /* ip = .012 */
1255 strb r1, [r0, #0x07]
1256 mov r3, r3, lsl #8 /* r3 = 345. */
1257 orr r3, r3, r1, lsr #8 /* r3 = 3456 */
1259 strb r2, [r0] /* 0 */
1260 mov ip, r1, lsr #8 /* ip = ...7 */
1261 strb ip, [r0, #0x07] /* 7 */
1262 mov ip, r2, lsr #8 /* ip = ...1 */
1263 orr ip, ip, r3, lsl #8 /* ip = 4321 */
1264 mov r3, r3, lsr #8 /* r3 = .543 */
1265 orr r3, r3, r1, lsl #24 /* r3 = 6543 */
1267 strh ip, [r0, #0x01]
1273 * 0111: dst is 8-bit aligned, src is 8-bit aligned
1275 ldrb r3, [r1] /* r3 = ...0 */
1276 ldr ip, [r1, #0x01] /* BE:ip = 1234 LE:ip = 4321 */
1277 ldrh r2, [r1, #0x05] /* BE:r2 = ..56 LE:r2 = ..65 */
1278 ldrb r1, [r1, #0x07] /* r1 = ...7 */
1280 mov r3, ip, lsr #16 /* BE:r3 = ..12 LE:r3 = ..43 */
1282 strh r3, [r0, #0x01]
1283 orr r2, r2, ip, lsl #16 /* r2 = 3456 */
1285 strh ip, [r0, #0x01]
1286 orr r2, r3, r2, lsl #16 /* r2 = 6543 */
1289 strb r1, [r0, #0x07]
1294 * 1000: dst is 16-bit aligned, src is 32-bit aligned
1296 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
1297 ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
1298 mov r1, r2, lsr #16 /* BE:r1 = ..01 LE:r1 = ..32 */
1301 mov r1, r3, lsr #16 /* r1 = ..45 */
1302 orr r2, r1 ,r2, lsl #16 /* r2 = 2345 */
1305 orr r2, r1, r3, lsl #16 /* r2 = 5432 */
1306 mov r3, r3, lsr #16 /* r3 = ..76 */
1309 strh r3, [r0, #0x06]
1314 * 1001: dst is 16-bit aligned, src is 8-bit aligned
1316 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
1317 ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */
1318 ldrb ip, [r1, #0x07] /* ip = ...7 */
1319 mov r1, r2, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
1322 mov r1, r2, lsl #24 /* r1 = 2... */
1323 orr r1, r1, r3, lsr #8 /* r1 = 2345 */
1324 orr r3, ip, r3, lsl #8 /* r3 = 4567 */
1326 mov r1, r2, lsr #24 /* r1 = ...2 */
1327 orr r1, r1, r3, lsl #8 /* r1 = 5432 */
1328 mov r3, r3, lsr #24 /* r3 = ...6 */
1329 orr r3, r3, ip, lsl #8 /* r3 = ..76 */
1332 strh r3, [r0, #0x06]
1337 * 1010: dst is 16-bit aligned, src is 16-bit aligned
1341 ldrh r3, [r1, #0x06]
1344 strh r3, [r0, #0x06]
1349 * 1011: dst is 16-bit aligned, src is 8-bit aligned
1351 ldr r3, [r1, #0x05] /* BE:r3 = 567x LE:r3 = x765 */
1352 ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */
1353 ldrb ip, [r1] /* ip = ...0 */
1354 mov r1, r3, lsr #8 /* BE:r1 = .567 LE:r1 = .x76 */
1355 strh r1, [r0, #0x06]
1357 mov r3, r3, lsr #24 /* r3 = ...5 */
1358 orr r3, r3, r2, lsl #8 /* r3 = 2345 */
1359 mov r2, r2, lsr #24 /* r2 = ...1 */
1360 orr r2, r2, ip, lsl #8 /* r2 = ..01 */
1362 mov r3, r3, lsl #24 /* r3 = 5... */
1363 orr r3, r3, r2, lsr #8 /* r3 = 5432 */
1364 orr r2, ip, r2, lsl #8 /* r2 = 3210 */
1372 * 1100: dst is 8-bit aligned, src is 32-bit aligned
1374 ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
1375 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
1376 mov r1, r3, lsr #8 /* BE:r1 = .456 LE:r1 = .765 */
1377 strh r1, [r0, #0x05]
1379 strb r3, [r0, #0x07]
1380 mov r1, r2, lsr #24 /* r1 = ...0 */
1382 mov r2, r2, lsl #8 /* r2 = 123. */
1383 orr r2, r2, r3, lsr #24 /* r2 = 1234 */
1387 mov r1, r3, lsr #24 /* r1 = ...7 */
1388 strb r1, [r0, #0x07]
1389 mov r2, r2, lsr #8 /* r2 = .321 */
1390 orr r2, r2, r3, lsl #24 /* r2 = 4321 */
1397 * 1101: dst is 8-bit aligned, src is 8-bit aligned
1399 ldrb r3, [r1] /* r3 = ...0 */
1400 ldrh r2, [r1, #0x01] /* BE:r2 = ..12 LE:r2 = ..21 */
1401 ldr ip, [r1, #0x03] /* BE:ip = 3456 LE:ip = 6543 */
1402 ldrb r1, [r1, #0x07] /* r1 = ...7 */
1404 mov r3, ip, lsr #16 /* BE:r3 = ..34 LE:r3 = ..65 */
1406 strh ip, [r0, #0x05]
1407 orr r2, r3, r2, lsl #16 /* r2 = 1234 */
1409 strh r3, [r0, #0x05]
1410 orr r2, r2, ip, lsl #16 /* r2 = 4321 */
1413 strb r1, [r0, #0x07]
1418 * 1110: dst is 8-bit aligned, src is 16-bit aligned
1420 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1421 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
1422 ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */
1424 mov ip, r2, lsr #8 /* ip = ...0 */
1426 mov ip, r2, lsl #24 /* ip = 1... */
1427 orr ip, ip, r3, lsr #8 /* ip = 1234 */
1428 strb r1, [r0, #0x07]
1429 mov r1, r1, lsr #8 /* r1 = ...6 */
1430 orr r1, r1, r3, lsl #8 /* r1 = 3456 */
1433 mov ip, r2, lsr #8 /* ip = ...1 */
1434 orr ip, ip, r3, lsl #8 /* ip = 4321 */
1435 mov r2, r1, lsr #8 /* r2 = ...7 */
1436 strb r2, [r0, #0x07]
1437 mov r1, r1, lsl #8 /* r1 = .76. */
1438 orr r1, r1, r3, lsr #24 /* r1 = .765 */
1441 strh r1, [r0, #0x05]
1446 * 1111: dst is 8-bit aligned, src is 8-bit aligned
1450 ldrh r3, [r1, #0x05]
1451 ldrb r1, [r1, #0x07]
1454 strh r3, [r0, #0x05]
1455 strb r1, [r0, #0x07]
1459 /******************************************************************************
1460 * Special case for 12 byte copies
1462 #define LMEMCPY_C_LOG2 7 /* 128 bytes */
1463 #define LMEMCPY_C_PAD .align LMEMCPY_C_LOG2
1467 orr r2, r2, r0, lsl #2
1470 addne pc, r3, r2, lsl #LMEMCPY_C_LOG2
1473 * 0000: dst is 32-bit aligned, src is 32-bit aligned
1485 * 0001: dst is 32-bit aligned, src is 8-bit aligned
1487 ldrb r2, [r1, #0xb] /* r2 = ...B */
1488 ldr ip, [r1, #0x07] /* BE:ip = 789A LE:ip = A987 */
1489 ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */
1490 ldr r1, [r1, #-1] /* BE:r1 = x012 LE:r1 = 210x */
1492 orr r2, r2, ip, lsl #8 /* r2 = 89AB */
1494 mov r2, ip, lsr #24 /* r2 = ...7 */
1495 orr r2, r2, r3, lsl #8 /* r2 = 4567 */
1496 mov r1, r1, lsl #8 /* r1 = 012. */
1497 orr r1, r1, r3, lsr #24 /* r1 = 0123 */
1499 mov r2, r2, lsl #24 /* r2 = B... */
1500 orr r2, r2, ip, lsr #8 /* r2 = BA98 */
1502 mov r2, ip, lsl #24 /* r2 = 7... */
1503 orr r2, r2, r3, lsr #8 /* r2 = 7654 */
1504 mov r1, r1, lsr #8 /* r1 = .210 */
1505 orr r1, r1, r3, lsl #24 /* r1 = 3210 */
1513 * 0010: dst is 32-bit aligned, src is 16-bit aligned
1515 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1516 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
1517 ldr ip, [r1, #0x06] /* BE:ip = 6789 LE:ip = 9876 */
1518 ldrh r1, [r1, #0x0a] /* BE:r1 = ..AB LE:r1 = ..BA */
1520 mov r2, r2, lsl #16 /* r2 = 01.. */
1521 orr r2, r2, r3, lsr #16 /* r2 = 0123 */
1523 mov r3, r3, lsl #16 /* r3 = 45.. */
1524 orr r3, r3, ip, lsr #16 /* r3 = 4567 */
1525 orr r1, r1, ip, lsl #16 /* r1 = 89AB */
1527 orr r2, r2, r3, lsl #16 /* r2 = 3210 */
1529 mov r3, r3, lsr #16 /* r3 = ..54 */
1530 orr r3, r3, ip, lsl #16 /* r3 = 7654 */
1531 mov r1, r1, lsl #16 /* r1 = BA.. */
1532 orr r1, r1, ip, lsr #16 /* r1 = BA98 */
1540 * 0011: dst is 32-bit aligned, src is 8-bit aligned
1542 ldrb r2, [r1] /* r2 = ...0 */
1543 ldr r3, [r1, #0x01] /* BE:r3 = 1234 LE:r3 = 4321 */
1544 ldr ip, [r1, #0x05] /* BE:ip = 5678 LE:ip = 8765 */
1545 ldr r1, [r1, #0x09] /* BE:r1 = 9ABx LE:r1 = xBA9 */
1547 mov r2, r2, lsl #24 /* r2 = 0... */
1548 orr r2, r2, r3, lsr #8 /* r2 = 0123 */
1550 mov r3, r3, lsl #24 /* r3 = 4... */
1551 orr r3, r3, ip, lsr #8 /* r3 = 4567 */
1552 mov r1, r1, lsr #8 /* r1 = .9AB */
1553 orr r1, r1, ip, lsl #24 /* r1 = 89AB */
1555 orr r2, r2, r3, lsl #8 /* r2 = 3210 */
1557 mov r3, r3, lsr #24 /* r3 = ...4 */
1558 orr r3, r3, ip, lsl #8 /* r3 = 7654 */
1559 mov r1, r1, lsl #8 /* r1 = BA9. */
1560 orr r1, r1, ip, lsr #24 /* r1 = BA98 */
1568 * 0100: dst is 8-bit aligned (byte 1), src is 32-bit aligned
1570 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
1571 ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
1572 ldr ip, [r1, #0x08] /* BE:ip = 89AB LE:ip = BA98 */
1573 mov r1, r2, lsr #8 /* BE:r1 = .012 LE:r1 = .321 */
1574 strh r1, [r0, #0x01]
1576 mov r1, r2, lsr #24 /* r1 = ...0 */
1578 mov r1, r2, lsl #24 /* r1 = 3... */
1579 orr r2, r1, r3, lsr #8 /* r1 = 3456 */
1580 mov r1, r3, lsl #24 /* r1 = 7... */
1581 orr r1, r1, ip, lsr #8 /* r1 = 789A */
1584 mov r1, r2, lsr #24 /* r1 = ...3 */
1585 orr r2, r1, r3, lsl #8 /* r1 = 6543 */
1586 mov r1, r3, lsr #24 /* r1 = ...7 */
1587 orr r1, r1, ip, lsl #8 /* r1 = A987 */
1588 mov ip, ip, lsr #24 /* ip = ...B */
1592 strb ip, [r0, #0x0b]
1597 * 0101: dst is 8-bit aligned (byte 1), src is 8-bit aligned (byte 1)
1600 ldrh r3, [r1, #0x01]
1604 ldrb r1, [r1, #0x0b]
1605 strh r3, [r0, #0x01]
1608 strb r1, [r0, #0x0b]
1613 * 0110: dst is 8-bit aligned (byte 1), src is 16-bit aligned
1615 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1616 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
1617 ldr ip, [r1, #0x06] /* BE:ip = 6789 LE:ip = 9876 */
1618 ldrh r1, [r1, #0x0a] /* BE:r1 = ..AB LE:r1 = ..BA */
1620 mov r2, r2, ror #8 /* r2 = 1..0 */
1622 mov r2, r2, lsr #16 /* r2 = ..1. */
1623 orr r2, r2, r3, lsr #24 /* r2 = ..12 */
1624 strh r2, [r0, #0x01]
1625 mov r2, r3, lsl #8 /* r2 = 345. */
1626 orr r3, r2, ip, lsr #24 /* r3 = 3456 */
1627 mov r2, ip, lsl #8 /* r2 = 789. */
1628 orr r2, r2, r1, lsr #8 /* r2 = 789A */
1631 mov r2, r2, lsr #8 /* r2 = ...1 */
1632 orr r2, r2, r3, lsl #8 /* r2 = 4321 */
1633 strh r2, [r0, #0x01]
1634 mov r2, r3, lsr #8 /* r2 = .543 */
1635 orr r3, r2, ip, lsl #24 /* r3 = 6543 */
1636 mov r2, ip, lsr #8 /* r2 = .987 */
1637 orr r2, r2, r1, lsl #24 /* r2 = A987 */
1638 mov r1, r1, lsr #8 /* r1 = ...B */
1642 strb r1, [r0, #0x0b]
1647 * 0111: dst is 8-bit aligned (byte 1), src is 8-bit aligned (byte 3)
1650 ldr r3, [r1, #0x01] /* BE:r3 = 1234 LE:r3 = 4321 */
1651 ldr ip, [r1, #0x05] /* BE:ip = 5678 LE:ip = 8765 */
1652 ldr r1, [r1, #0x09] /* BE:r1 = 9ABx LE:r1 = xBA9 */
1655 mov r2, r3, lsr #16 /* r2 = ..12 */
1656 strh r2, [r0, #0x01]
1657 mov r3, r3, lsl #16 /* r3 = 34.. */
1658 orr r3, r3, ip, lsr #16 /* r3 = 3456 */
1659 mov ip, ip, lsl #16 /* ip = 78.. */
1660 orr ip, ip, r1, lsr #16 /* ip = 789A */
1661 mov r1, r1, lsr #8 /* r1 = .9AB */
1663 strh r3, [r0, #0x01]
1664 mov r3, r3, lsr #16 /* r3 = ..43 */
1665 orr r3, r3, ip, lsl #16 /* r3 = 6543 */
1666 mov ip, ip, lsr #16 /* ip = ..87 */
1667 orr ip, ip, r1, lsl #16 /* ip = A987 */
1668 mov r1, r1, lsr #16 /* r1 = ..xB */
1672 strb r1, [r0, #0x0b]
1677 * 1000: dst is 16-bit aligned, src is 32-bit aligned
1679 ldr ip, [r1] /* BE:ip = 0123 LE:ip = 3210 */
1680 ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
1681 ldr r2, [r1, #0x08] /* BE:r2 = 89AB LE:r2 = BA98 */
1682 mov r1, ip, lsr #16 /* BE:r1 = ..01 LE:r1 = ..32 */
1685 mov r1, ip, lsl #16 /* r1 = 23.. */
1686 orr r1, r1, r3, lsr #16 /* r1 = 2345 */
1687 mov r3, r3, lsl #16 /* r3 = 67.. */
1688 orr r3, r3, r2, lsr #16 /* r3 = 6789 */
1691 orr r1, r1, r3, lsl #16 /* r1 = 5432 */
1692 mov r3, r3, lsr #16 /* r3 = ..76 */
1693 orr r3, r3, r2, lsl #16 /* r3 = 9876 */
1694 mov r2, r2, lsr #16 /* r2 = ..BA */
1698 strh r2, [r0, #0x0a]
1703 * 1001: dst is 16-bit aligned, src is 8-bit aligned (byte 1)
1705 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
1706 ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */
1707 mov ip, r2, lsr #8 /* BE:ip = .x01 LE:ip = .210 */
1709 ldr ip, [r1, #0x07] /* BE:ip = 789A LE:ip = A987 */
1710 ldrb r1, [r1, #0x0b] /* r1 = ...B */
1712 mov r2, r2, lsl #24 /* r2 = 2... */
1713 orr r2, r2, r3, lsr #8 /* r2 = 2345 */
1714 mov r3, r3, lsl #24 /* r3 = 6... */
1715 orr r3, r3, ip, lsr #8 /* r3 = 6789 */
1716 orr r1, r1, ip, lsl #8 /* r1 = 89AB */
1718 mov r2, r2, lsr #24 /* r2 = ...2 */
1719 orr r2, r2, r3, lsl #8 /* r2 = 5432 */
1720 mov r3, r3, lsr #24 /* r3 = ...6 */
1721 orr r3, r3, ip, lsl #8 /* r3 = 9876 */
1722 mov r1, r1, lsl #8 /* r1 = ..B. */
1723 orr r1, r1, ip, lsr #24 /* r1 = ..BA */
1727 strh r1, [r0, #0x0a]
1732 * 1010: dst is 16-bit aligned, src is 16-bit aligned
1737 ldrh r1, [r1, #0x0a]
1741 strh r1, [r0, #0x0a]
1746 * 1011: dst is 16-bit aligned, src is 8-bit aligned (byte 3)
1748 ldr r2, [r1, #0x09] /* BE:r2 = 9ABx LE:r2 = xBA9 */
1749 ldr r3, [r1, #0x05] /* BE:r3 = 5678 LE:r3 = 8765 */
1750 mov ip, r2, lsr #8 /* BE:ip = .9AB LE:ip = .xBA */
1751 strh ip, [r0, #0x0a]
1752 ldr ip, [r1, #0x01] /* BE:ip = 1234 LE:ip = 4321 */
1753 ldrb r1, [r1] /* r1 = ...0 */
1755 mov r2, r2, lsr #24 /* r2 = ...9 */
1756 orr r2, r2, r3, lsl #8 /* r2 = 6789 */
1757 mov r3, r3, lsr #24 /* r3 = ...5 */
1758 orr r3, r3, ip, lsl #8 /* r3 = 2345 */
1759 mov r1, r1, lsl #8 /* r1 = ..0. */
1760 orr r1, r1, ip, lsr #24 /* r1 = ..01 */
1762 mov r2, r2, lsl #24 /* r2 = 9... */
1763 orr r2, r2, r3, lsr #8 /* r2 = 9876 */
1764 mov r3, r3, lsl #24 /* r3 = 5... */
1765 orr r3, r3, ip, lsr #8 /* r3 = 5432 */
1766 orr r1, r1, ip, lsl #8 /* r1 = 3210 */
1775 * 1100: dst is 8-bit aligned (byte 3), src is 32-bit aligned
1777 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
1778 ldr ip, [r1, #0x04] /* BE:ip = 4567 LE:ip = 7654 */
1779 ldr r1, [r1, #0x08] /* BE:r1 = 89AB LE:r1 = BA98 */
1781 mov r3, r2, lsr #24 /* r3 = ...0 */
1783 mov r2, r2, lsl #8 /* r2 = 123. */
1784 orr r2, r2, ip, lsr #24 /* r2 = 1234 */
1786 mov r2, ip, lsl #8 /* r2 = 567. */
1787 orr r2, r2, r1, lsr #24 /* r2 = 5678 */
1789 mov r2, r1, lsr #8 /* r2 = ..9A */
1790 strh r2, [r0, #0x09]
1791 strb r1, [r0, #0x0b]
1794 mov r3, r2, lsr #8 /* r3 = .321 */
1795 orr r3, r3, ip, lsl #24 /* r3 = 4321 */
1797 mov r3, ip, lsr #8 /* r3 = .765 */
1798 orr r3, r3, r1, lsl #24 /* r3 = 8765 */
1800 mov r1, r1, lsr #8 /* r1 = .BA9 */
1801 strh r1, [r0, #0x09]
1802 mov r1, r1, lsr #16 /* r1 = ...B */
1803 strb r1, [r0, #0x0b]
1809 * 1101: dst is 8-bit aligned (byte 3), src is 8-bit aligned (byte 1)
1811 ldrb r2, [r1, #0x0b] /* r2 = ...B */
1812 ldr r3, [r1, #0x07] /* BE:r3 = 789A LE:r3 = A987 */
1813 ldr ip, [r1, #0x03] /* BE:ip = 3456 LE:ip = 6543 */
1814 ldr r1, [r1, #-1] /* BE:r1 = x012 LE:r1 = 210x */
1815 strb r2, [r0, #0x0b]
1817 strh r3, [r0, #0x09]
1818 mov r3, r3, lsr #16 /* r3 = ..78 */
1819 orr r3, r3, ip, lsl #16 /* r3 = 5678 */
1820 mov ip, ip, lsr #16 /* ip = ..34 */
1821 orr ip, ip, r1, lsl #16 /* ip = 1234 */
1822 mov r1, r1, lsr #16 /* r1 = ..x0 */
1824 mov r2, r3, lsr #16 /* r2 = ..A9 */
1825 strh r2, [r0, #0x09]
1826 mov r3, r3, lsl #16 /* r3 = 87.. */
1827 orr r3, r3, ip, lsr #16 /* r3 = 8765 */
1828 mov ip, ip, lsl #16 /* ip = 43.. */
1829 orr ip, ip, r1, lsr #16 /* ip = 4321 */
1830 mov r1, r1, lsr #8 /* r1 = .210 */
1839 * 1110: dst is 8-bit aligned (byte 3), src is 16-bit aligned
1842 ldrh r2, [r1, #0x0a] /* r2 = ..AB */
1843 ldr ip, [r1, #0x06] /* ip = 6789 */
1844 ldr r3, [r1, #0x02] /* r3 = 2345 */
1845 ldrh r1, [r1] /* r1 = ..01 */
1846 strb r2, [r0, #0x0b]
1847 mov r2, r2, lsr #8 /* r2 = ...A */
1848 orr r2, r2, ip, lsl #8 /* r2 = 789A */
1849 mov ip, ip, lsr #8 /* ip = .678 */
1850 orr ip, ip, r3, lsl #24 /* ip = 5678 */
1851 mov r3, r3, lsr #8 /* r3 = .234 */
1852 orr r3, r3, r1, lsl #24 /* r3 = 1234 */
1853 mov r1, r1, lsr #8 /* r1 = ...0 */
1857 strh r2, [r0, #0x09]
1859 ldrh r2, [r1] /* r2 = ..10 */
1860 ldr r3, [r1, #0x02] /* r3 = 5432 */
1861 ldr ip, [r1, #0x06] /* ip = 9876 */
1862 ldrh r1, [r1, #0x0a] /* r1 = ..BA */
1864 mov r2, r2, lsr #8 /* r2 = ...1 */
1865 orr r2, r2, r3, lsl #8 /* r2 = 4321 */
1866 mov r3, r3, lsr #24 /* r3 = ...5 */
1867 orr r3, r3, ip, lsl #8 /* r3 = 8765 */
1868 mov ip, ip, lsr #24 /* ip = ...9 */
1869 orr ip, ip, r1, lsl #8 /* ip = .BA9 */
1870 mov r1, r1, lsr #8 /* r1 = ...B */
1873 strh ip, [r0, #0x09]
1874 strb r1, [r0, #0x0b]
1880 * 1111: dst is 8-bit aligned (byte 3), src is 8-bit aligned (byte 3)
1886 ldrh r2, [r1, #0x09]
1887 ldrb r1, [r1, #0x0b]
1890 strh r2, [r0, #0x09]
1891 strb r1, [r0, #0x0b]
1893 #endif /* !_STANDALONE */