1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * linux/arch/arm/lib/memset.S
5 * Copyright (C) 1995-2000 Russell King
7 * ASM optimised string functions
9 #include <linux/linkage.h>
10 #include <asm/assembler.h>
11 #include <asm/unwind.h>
19 ands r3, r0, #3 @ 1 unaligned?
20 mov ip, r0 @ preserve r0 as return value
23 * we know that the pointer in ip is aligned to a word boundary.
25 1: orr r1, r1, r1, lsl #8
26 orr r1, r1, r1, lsl #16
34 * We need 2 extra registers for this loop - use r8 and the LR
39 UNWIND( .save {r8, lr} )
44 stmiage ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
45 stmiage ip!, {r1, r3, r8, lr}
46 stmiage ip!, {r1, r3, r8, lr}
47 stmiage ip!, {r1, r3, r8, lr}
49 ldmfdeq sp!, {r8, pc} @ Now <64 bytes to go.
51 * No need to correct the count; we're only testing bits from now on
54 stmiane ip!, {r1, r3, r8, lr}
55 stmiane ip!, {r1, r3, r8, lr}
57 stmiane ip!, {r1, r3, r8, lr}
64 * This version aligns the destination pointer in order to write
65 * whole cache lines at once.
68 stmfd sp!, {r4-r8, lr}
71 UNWIND( .save {r4-r8, lr} )
86 movs r8, r8, lsl #(32 - 4)
87 stmiacs ip!, {r4, r5, r6, r7}
94 stmiage ip!, {r1, r3-r8, lr}
95 stmiage ip!, {r1, r3-r8, lr}
97 ldmfdeq sp!, {r4-r8, pc}
100 stmiane ip!, {r1, r3-r8, lr}
103 ldmfd sp!, {r4-r8, lr}
110 stmiane ip!, {r1, r3}
114 * When we get here, we've got less than 4 bytes to set. We
115 * may have an unaligned pointer as well.
124 6: subs r2, r2, #4 @ 1 do we have enough
125 blt 5b @ 1 bytes to align with?
127 strblt r1, [ip], #1 @ 1
128 strble r1, [ip], #1 @ 1
129 strb r1, [ip], #1 @ 1
130 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
138 mov r3, r1 @ copy r1 to r3 and fall into memset64
143 mov ip, r0 @ preserve r0 as return value
144 b 7b @ jump into the middle of memset