2 * Copyright (C) 2013 ARM Ltd.
3 * Copyright (C) 2013 Linaro.
5 * This code is based on glibc cortex strings work originally authored by Linaro
6 * and re-licensed under GPLv2 for the Linux kernel. The original code can
9 * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
10 * files/head:/src/aarch64/
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
25 #include <linux/linkage.h>
26 #include <asm/assembler.h>
27 #include <asm/cache.h>
30 * Fill in the buffer with character c (alignment handled by the hardware)
58 mov dst, dstin /* Preserve return value. */
60 orr A_lw, A_lw, A_lw, lsl #8
61 orr A_lw, A_lw, A_lw, lsl #16
62 orr A_l, A_l, A_l, lsl #32
66 /*All store maybe are non-aligned..*/
82 /*Whether the start address is aligned with 16.*/
87 * The count is not less than 16, we can use stp to store the start 16 bytes,
88 * then adjust the dst aligned with 16.This process will make the current
89 * memory address at alignment boundary.
91 stp A_l, A_l, [dst] /*non-aligned store..*/
92 /*make the dst aligned..*/
93 sub count, count, tmp2
103 ands tmp1, count, #0x30
108 stp A_l, A_l, [dst], #16
110 stp A_l, A_l, [dst], #16
112 stp A_l, A_l, [dst], #16
114 * The last store length is less than 16,use stp to write last 16 bytes.
115 * It will lead some bytes written twice and the access is non-aligned.
118 ands count, count, #15
121 stp A_l, A_l, [dst, #-16] /* Repeat some/all of last store. */
126 * Critical loop. Start at a new cache line boundary. Assuming
127 * 64 bytes per line, this ensures the entire loop is in one line.
129 .p2align L1_CACHE_SHIFT
131 sub dst, dst, #16/* Pre-bias. */
132 sub count, count, #64
134 stp A_l, A_l, [dst, #16]
135 stp A_l, A_l, [dst, #32]
136 stp A_l, A_l, [dst, #48]
137 stp A_l, A_l, [dst, #64]!
138 subs count, count, #64
147 * For zeroing memory, check to see if we can use the ZVA feature to
148 * zero entire 'cache' lines.
154 * For zeroing small amounts of memory, it's not worth setting up
155 * the line-clear code.
158 b.lt .Lnot_short /*count is at least 128 bytes*/
161 tbnz tmp1, #4, .Lnot_short
163 and zva_len, tmp1w, #15 /* Safety: other bits reserved. */
164 lsl zva_len, tmp3w, zva_len
166 ands tmp3w, zva_len, #63
168 * ensure the zva_len is not less than 64.
169 * It is not meaningful to use ZVA if the block size is less than 64.
174 * Compute how far we need to go to become suitably aligned. We're
175 * already at quad-word alignment.
178 b.lt .Lnot_short /* Not enough to reach alignment. */
179 sub zva_bits_x, zva_len_x, #1
181 ands tmp2, tmp2, zva_bits_x
182 b.eq 2f /* Already aligned. */
183 /* Not aligned, check that there's enough to copy after alignment.*/
184 sub tmp1, count, tmp2
186 * grantee the remain length to be ZVA is bigger than 64,
187 * avoid to make the 2f's process over mem range.*/
189 ccmp tmp1, zva_len_x, #8, ge /* NZCV=0b1000 */
192 * We know that there's at least 64 bytes to zero and that it's safe
193 * to overrun by 64 bytes.
198 stp A_l, A_l, [dst, #16]
199 stp A_l, A_l, [dst, #32]
201 stp A_l, A_l, [dst, #48]
204 /* We've overrun a bit, so adjust dst downwards.*/
207 sub count, count, zva_len_x
210 add dst, dst, zva_len_x
211 subs count, count, zva_len_x
213 ands count, count, zva_bits_x
214 b.ne .Ltail_maybe_long