2 * memcpy - copy memory area
4 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 * See https://llvm.org/LICENSE.txt for license information.
6 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 * ARMv8-a, AArch64, unaligned accesses.
15 #include "../asmdefs.h"
44 /* This implementation handles overlaps and supports both memcpy and memmove
45 from a single entry point. It uses unaligned accesses and branchless
46 sequences to keep the code small, simple and improve performance.
48 Copies are split into 3 main cases: small copies of up to 32 bytes, medium
49 copies of up to 128 bytes, and large copies. The overhead of the overlap
50 check is negligible since it is only required for large copies.
52 Large copies use a software pipelined loop processing 64 bytes per iteration.
53 The destination pointer is 16-byte aligned to minimize unaligned accesses.
54 The loop tail is handled by always copying 64 bytes from the end.
57 ENTRY (__memcpy_aarch64)
58 ENTRY_ALIAS (__memmove_aarch64)
59 add srcend, src, count
60 add dstend, dstin, count
66 /* Small copies: 0..32 bytes. */
70 ldp D_l, D_h, [srcend, -16]
72 stp D_l, D_h, [dstend, -16]
75 /* Copy 8-15 bytes. */
77 tbz count, 3, L(copy8)
87 tbz count, 2, L(copy4)
89 ldr B_lw, [srcend, -4]
91 str B_lw, [dstend, -4]
94 /* Copy 0..3 bytes using a branchless sequence. */
99 ldrb C_lw, [srcend, -1]
100 ldrb B_lw, [src, tmp1]
102 strb B_lw, [dstin, tmp1]
103 strb C_lw, [dstend, -1]
108 /* Medium copies: 33..128 bytes. */
111 ldp B_l, B_h, [src, 16]
112 ldp C_l, C_h, [srcend, -32]
113 ldp D_l, D_h, [srcend, -16]
116 stp A_l, A_h, [dstin]
117 stp B_l, B_h, [dstin, 16]
118 stp C_l, C_h, [dstend, -32]
119 stp D_l, D_h, [dstend, -16]
123 /* Copy 65..128 bytes. */
125 ldp E_l, E_h, [src, 32]
126 ldp F_l, F_h, [src, 48]
129 ldp G_l, G_h, [srcend, -64]
130 ldp H_l, H_h, [srcend, -48]
131 stp G_l, G_h, [dstend, -64]
132 stp H_l, H_h, [dstend, -48]
134 stp A_l, A_h, [dstin]
135 stp B_l, B_h, [dstin, 16]
136 stp E_l, E_h, [dstin, 32]
137 stp F_l, F_h, [dstin, 48]
138 stp C_l, C_h, [dstend, -32]
139 stp D_l, D_h, [dstend, -16]
143 /* Copy more than 128 bytes. */
145 /* Use backwards copy if there is an overlap. */
149 b.lo L(copy_long_backwards)
151 /* Copy 16 bytes and then align dst to 16-byte alignment. */
157 add count, count, tmp1 /* Count is now 16 too large. */
158 ldp A_l, A_h, [src, 16]
159 stp D_l, D_h, [dstin]
160 ldp B_l, B_h, [src, 32]
161 ldp C_l, C_h, [src, 48]
162 ldp D_l, D_h, [src, 64]!
163 subs count, count, 128 + 16 /* Test and readjust count. */
164 b.ls L(copy64_from_end)
167 stp A_l, A_h, [dst, 16]
168 ldp A_l, A_h, [src, 16]
169 stp B_l, B_h, [dst, 32]
170 ldp B_l, B_h, [src, 32]
171 stp C_l, C_h, [dst, 48]
172 ldp C_l, C_h, [src, 48]
173 stp D_l, D_h, [dst, 64]!
174 ldp D_l, D_h, [src, 64]!
175 subs count, count, 64
178 /* Write the last iteration and copy 64 bytes from the end. */
180 ldp E_l, E_h, [srcend, -64]
181 stp A_l, A_h, [dst, 16]
182 ldp A_l, A_h, [srcend, -48]
183 stp B_l, B_h, [dst, 32]
184 ldp B_l, B_h, [srcend, -32]
185 stp C_l, C_h, [dst, 48]
186 ldp C_l, C_h, [srcend, -16]
187 stp D_l, D_h, [dst, 64]
188 stp E_l, E_h, [dstend, -64]
189 stp A_l, A_h, [dstend, -48]
190 stp B_l, B_h, [dstend, -32]
191 stp C_l, C_h, [dstend, -16]
196 /* Large backwards copy for overlapping copies.
197 Copy 16 bytes and then align dst to 16-byte alignment. */
198 L(copy_long_backwards):
199 ldp D_l, D_h, [srcend, -16]
201 sub srcend, srcend, tmp1
202 sub count, count, tmp1
203 ldp A_l, A_h, [srcend, -16]
204 stp D_l, D_h, [dstend, -16]
205 ldp B_l, B_h, [srcend, -32]
206 ldp C_l, C_h, [srcend, -48]
207 ldp D_l, D_h, [srcend, -64]!
208 sub dstend, dstend, tmp1
209 subs count, count, 128
210 b.ls L(copy64_from_start)
213 stp A_l, A_h, [dstend, -16]
214 ldp A_l, A_h, [srcend, -16]
215 stp B_l, B_h, [dstend, -32]
216 ldp B_l, B_h, [srcend, -32]
217 stp C_l, C_h, [dstend, -48]
218 ldp C_l, C_h, [srcend, -48]
219 stp D_l, D_h, [dstend, -64]!
220 ldp D_l, D_h, [srcend, -64]!
221 subs count, count, 64
222 b.hi L(loop64_backwards)
224 /* Write the last iteration and copy 64 bytes from the start. */
225 L(copy64_from_start):
226 ldp G_l, G_h, [src, 48]
227 stp A_l, A_h, [dstend, -16]
228 ldp A_l, A_h, [src, 32]
229 stp B_l, B_h, [dstend, -32]
230 ldp B_l, B_h, [src, 16]
231 stp C_l, C_h, [dstend, -48]
233 stp D_l, D_h, [dstend, -64]
234 stp G_l, G_h, [dstin, 48]
235 stp A_l, A_h, [dstin, 32]
236 stp B_l, B_h, [dstin, 16]
237 stp C_l, C_h, [dstin]
240 END (__memcpy_aarch64)