2 * Copyright (C) 2013 ARM Ltd.
3 * Copyright (C) 2013 Linaro.
5 * This code is based on glibc cortex strings work originally authored by Linaro
6 * and re-licensed under GPLv2 for the Linux kernel. The original code can
9 * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
10 * files/head:/src/aarch64/
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
25 #include <linux/linkage.h>
26 #include <asm/assembler.h>
27 #include <asm/cache.h>
30 * Copy a buffer from src to dest (alignment handled by the hardware)
62 /*When memory length is less than 16, the accessed are not aligned.*/
66 ands tmp2, tmp2, #15/* Bytes to reach alignment. */
68 sub count, count, tmp2
70 * Copy the leading memory data from src to dst in an increasing
71 * address order.By this way,the risk of overwritting the source
72 * memory data is eliminated when the distance between src and
73 * dst is less than 16. The memory accesses here are alignment.
87 tbz tmp2, #3, .LSrcAligned
95 * Deal with small copies quickly by dropping straight into the
100 * Copy up to 48 bytes of data. At this point we only need the
101 * bottom 6 bits of count to be accurate.
103 ands tmp1, count, #0x30
108 ldp A_l, A_h, [src], #16
109 stp A_l, A_h, [dst], #16
111 ldp A_l, A_h, [src], #16
112 stp A_l, A_h, [dst], #16
114 ldp A_l, A_h, [src], #16
115 stp A_l, A_h, [dst], #16
118 * Prefer to break one ldp/stp into several load/store to access
119 * memory in an increasing address order,rather than to load/store 16
120 * bytes from (src-16) to (dst-16) and to backward the src to aligned
121 * address,which way is used in original cortex memcpy. If keeping
122 * the original memcpy process here, memmove need to satisfy the
123 * precondition that src address is at least 16 bytes bigger than dst
124 * address,otherwise some source data will be overwritten when memove
125 * call memcpy directly. To make memmove simpler and decouple the
126 * memcpy's dependency on memmove, withdrew the original process.
137 ldrh tmp1w, [src], #2
138 strh tmp1w, [dst], #2
140 tbz count, #0, .Lexitfunc
148 subs count, count, #128
149 b.ge .Lcpy_body_large
151 * Less than 128 bytes to copy, so handle 64 here and then jump
154 ldp A_l, A_h, [src],#16
155 stp A_l, A_h, [dst],#16
156 ldp B_l, B_h, [src],#16
157 ldp C_l, C_h, [src],#16
158 stp B_l, B_h, [dst],#16
159 stp C_l, C_h, [dst],#16
160 ldp D_l, D_h, [src],#16
161 stp D_l, D_h, [dst],#16
168 * Critical loop. Start at a new cache line boundary. Assuming
169 * 64 bytes per line this ensures the entire loop is in one line.
171 .p2align L1_CACHE_SHIFT
173 /* pre-get 64 bytes data. */
174 ldp A_l, A_h, [src],#16
175 ldp B_l, B_h, [src],#16
176 ldp C_l, C_h, [src],#16
177 ldp D_l, D_h, [src],#16
180 * interlace the load of next 64 bytes data block with store of the last
181 * loaded 64 bytes data.
183 stp A_l, A_h, [dst],#16
184 ldp A_l, A_h, [src],#16
185 stp B_l, B_h, [dst],#16
186 ldp B_l, B_h, [src],#16
187 stp C_l, C_h, [dst],#16
188 ldp C_l, C_h, [src],#16
189 stp D_l, D_h, [dst],#16
190 ldp D_l, D_h, [src],#16
191 subs count, count, #64
193 stp A_l, A_h, [dst],#16
194 stp B_l, B_h, [dst],#16
195 stp C_l, C_h, [dst],#16
196 stp D_l, D_h, [dst],#16