1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * linux/arch/arm/lib/memset.S
5 * Copyright (C) 1995-2000 Russell King
7 * ASM optimised string functions
9 #include <linux/linkage.h>
10 #include <asm/assembler.h>
11 #include <asm/unwind.h>
20 ands r3, r0, #3 @ 1 unaligned?
21 mov ip, r0 @ preserve r0 as return value
24 * we know that the pointer in ip is aligned to a word boundary.
26 1: orr r1, r1, r1, lsl #8
27 orr r1, r1, r1, lsl #16
35 * We need 2 extra registers for this loop - use r8 and the LR
40 UNWIND( .save {r8, lr} )
45 stmiage ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
46 stmiage ip!, {r1, r3, r8, lr}
47 stmiage ip!, {r1, r3, r8, lr}
48 stmiage ip!, {r1, r3, r8, lr}
50 ldmfdeq sp!, {r8, pc} @ Now <64 bytes to go.
52 * No need to correct the count; we're only testing bits from now on
55 stmiane ip!, {r1, r3, r8, lr}
56 stmiane ip!, {r1, r3, r8, lr}
58 stmiane ip!, {r1, r3, r8, lr}
65 * This version aligns the destination pointer in order to write
66 * whole cache lines at once.
69 stmfd sp!, {r4-r8, lr}
72 UNWIND( .save {r4-r8, lr} )
87 movs r8, r8, lsl #(32 - 4)
88 stmiacs ip!, {r4, r5, r6, r7}
95 stmiage ip!, {r1, r3-r8, lr}
96 stmiage ip!, {r1, r3-r8, lr}
98 ldmfdeq sp!, {r4-r8, pc}
101 stmiane ip!, {r1, r3-r8, lr}
104 ldmfd sp!, {r4-r8, lr}
111 stmiane ip!, {r1, r3}
115 * When we get here, we've got less than 4 bytes to set. We
116 * may have an unaligned pointer as well.
125 6: subs r2, r2, #4 @ 1 do we have enough
126 blt 5b @ 1 bytes to align with?
128 strblt r1, [ip], #1 @ 1
129 strble r1, [ip], #1 @ 1
130 strb r1, [ip], #1 @ 1
131 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
140 mov r3, r1 @ copy r1 to r3 and fall into memset64
145 mov ip, r0 @ preserve r0 as return value
146 b 7b @ jump into the middle of memset