2 * strlen - calculate the length of a string
4 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 * See https://llvm.org/LICENSE.txt for license information.
6 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 * ARMv8-a, AArch64, unaligned accesses, min page size 4k.
14 #include "../asmdefs.h"
16 /* To test the page crossing code path more thoroughly, compile with
17 -DTEST_PAGE_CROSS - this will force all calls through the slower
18 entry path. This option is not intended for production use. */
20 /* Arguments and results. */
24 /* Locals and temporaries. */
36 /* NUL detection works on the principle that (X - 1) & (~X) & 0x80
37 (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
38 can be done in parallel across the entire word. A faster check
39 (X - 1) & 0x80 is zero for non-NUL ASCII characters, but gives
40 false hits for characters 129..255. */
42 #define REP8_01 0x0101010101010101
43 #define REP8_7f 0x7f7f7f7f7f7f7f7f
44 #define REP8_80 0x8080808080808080
46 #ifdef TEST_PAGE_CROSS
47 # define MIN_PAGE_SIZE 15
49 # define MIN_PAGE_SIZE 4096
52 /* Since strings are short on average, we check the first 16 bytes
53 of the string for a NUL character. In order to do an unaligned ldp
54 safely we have to do a page cross check first. If there is a NUL
55 byte we calculate the length from the 2 8-byte words using
56 conditional select to reduce branch mispredictions (it is unlikely
57 __strlen_aarch64 will be repeatedly called on strings with the same length).
59 If the string is longer than 16 bytes, we align src so don't need
60 further page cross checks, and process 32 bytes per iteration
61 using the fast NUL check. If we encounter non-ASCII characters,
62 fallback to a second loop using the full NUL check.
64 If the page cross check fails, we read 16 bytes from an aligned
65 address, remove any characters before the string, and continue
66 in the main loop using aligned loads. Since strings crossing a
67 page in the first 16 bytes are rare (probability of
68 16/MIN_PAGE_SIZE ~= 0.4%), this case does not need to be optimized.
70 AArch64 systems have a minimum page size of 4k. We don't bother
71 checking for larger page sizes - the cost of setting up the correct
72 page size is just not worth the extra gain from a small reduction in
73 the cases taking the slow path. Note that we only care about
74 whether the first fetch, which may be misaligned, crosses a page
77 ENTRY (__strlen_aarch64)
78 and tmp1, srcin, MIN_PAGE_SIZE - 1
80 cmp tmp1, MIN_PAGE_SIZE - 16
82 ldp data1, data2, [srcin]
84 /* For big-endian, carry propagation (if the final byte in the
85 string is 0x01) means we cannot use has_nul1/2 directly.
86 Since we expect strings to be small and early-exit,
87 byte-swap the data now so has_null1/2 will be correct. */
91 sub tmp1, data1, zeroones
92 orr tmp2, data1, REP8_7f
93 sub tmp3, data2, zeroones
94 orr tmp4, data2, REP8_7f
95 bics has_nul1, tmp1, tmp2
96 bic has_nul2, tmp3, tmp4
97 ccmp has_nul2, 0, 0, eq
98 beq L(main_loop_entry)
100 /* Enter with C = has_nul1 == 0. */
101 csel has_nul1, has_nul1, has_nul2, cc
103 rev has_nul1, has_nul1
105 csel len, xzr, len, cc
106 add len, len, tmp1, lsr 3
109 /* The inner loop processes 32 bytes per iteration and uses the fast
110 NUL check. If we encounter non-ASCII characters, use a second
111 loop with the accurate NUL check. */
117 ldp data1, data2, [src, 32]!
119 sub tmp1, data1, zeroones
120 sub tmp3, data2, zeroones
122 tst tmp2, zeroones, lsl 7
124 ldp data1, data2, [src, 16]
125 sub tmp1, data1, zeroones
126 sub tmp3, data2, zeroones
128 tst tmp2, zeroones, lsl 7
132 /* The fast check failed, so do the slower, accurate NUL check. */
133 orr tmp2, data1, REP8_7f
134 orr tmp4, data2, REP8_7f
135 bics has_nul1, tmp1, tmp2
136 bic has_nul2, tmp3, tmp4
137 ccmp has_nul2, 0, 0, eq
140 /* Enter with C = has_nul1 == 0. */
143 /* For big-endian, carry propagation (if the final byte in the
144 string is 0x01) means we cannot use has_nul1/2 directly. The
145 easiest way to get the correct byte is to byte-swap the data
146 and calculate the syndrome a second time. */
147 csel data1, data1, data2, cc
149 sub tmp1, data1, zeroones
150 orr tmp2, data1, REP8_7f
151 bic has_nul1, tmp1, tmp2
153 csel has_nul1, has_nul1, has_nul2, cc
156 rev has_nul1, has_nul1
159 csel len, len, tmp2, cc
160 add len, len, tmp1, lsr 3
164 ldp data1, data2, [src, 16]!
165 sub tmp1, data1, zeroones
166 orr tmp2, data1, REP8_7f
167 sub tmp3, data2, zeroones
168 orr tmp4, data2, REP8_7f
169 bics has_nul1, tmp1, tmp2
170 bic has_nul2, tmp3, tmp4
171 ccmp has_nul2, 0, 0, eq
173 ldp data1, data2, [src, 16]!
174 sub tmp1, data1, zeroones
175 orr tmp2, data1, REP8_7f
176 sub tmp3, data2, zeroones
177 orr tmp4, data2, REP8_7f
178 bics has_nul1, tmp1, tmp2
179 bic has_nul2, tmp3, tmp4
180 ccmp has_nul2, 0, 0, eq
184 /* Load 16 bytes from [srcin & ~15] and force the bytes that precede
185 srcin to 0x7f, so we ignore any NUL bytes before the string.
186 Then continue in the aligned loop. */
189 ldp data1, data2, [src]
193 /* Big-endian. Early bytes are at MSB. */
194 lsr tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */
196 /* Little-endian. Early bytes are at LSB. */
197 lsl tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */
199 orr tmp1, tmp1, REP8_80
200 orn data1, data1, tmp1
201 orn tmp2, data2, tmp1
203 csel data1, data1, tmp4, eq
204 csel data2, data2, tmp2, eq
205 b L(page_cross_entry)
207 END (__strlen_aarch64)