1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on linux/arch/arm/lib/memset.S
5 * ASM optimised string functions
14 ands r3, r0, #3 @ 1 unaligned?
15 mov ip, r0 @ preserve r0 as return value
18 * we know that the pointer in ip is aligned to a word boundary.
20 1: orr r1, r1, r1, lsl #8
21 orr r1, r1, r1, lsl #16
29 * We need 2 extra registers for this loop - use r8 and the LR
36 stmiage ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
37 stmiage ip!, {r1, r3, r8, lr}
38 stmiage ip!, {r1, r3, r8, lr}
39 stmiage ip!, {r1, r3, r8, lr}
41 ldmfdeq sp!, {r8, pc} @ Now <64 bytes to go.
43 * No need to correct the count; we're only testing bits from now on
46 stmiane ip!, {r1, r3, r8, lr}
47 stmiane ip!, {r1, r3, r8, lr}
49 stmiane ip!, {r1, r3, r8, lr}
55 * This version aligns the destination pointer in order to write
56 * whole cache lines at once.
59 stmfd sp!, {r4-r8, lr}
74 movs r8, r8, lsl #(32 - 4)
75 stmcsia ip!, {r4, r5, r6, r7}
82 stmiage ip!, {r1, r3-r8, lr}
83 stmiage ip!, {r1, r3-r8, lr}
85 ldmfdeq sp!, {r4-r8, pc}
88 stmiane ip!, {r1, r3-r8, lr}
91 ldmfd sp!, {r4-r8, lr}
100 * When we get here, we've got less than 4 bytes to zero. We
101 * may have an unaligned pointer as well.
110 6: subs r2, r2, #4 @ 1 do we have enough
111 blt 5b @ 1 bytes to align with?
113 strblt r1, [ip], #1 @ 1
114 strble r1, [ip], #1 @ 1
115 strb r1, [ip], #1 @ 1
116 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))