1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
6 define <vscale x 16 x i1> @lane_mask_nxv16i1_i32(i32 %index, i32 %TC) {
7 ; CHECK-LABEL: lane_mask_nxv16i1_i32:
9 ; CHECK-NEXT: whilelo p0.b, w0, w1
11 %active.lane.mask = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 %index, i32 %TC)
12 ret <vscale x 16 x i1> %active.lane.mask
15 define <vscale x 8 x i1> @lane_mask_nxv8i1_i32(i32 %index, i32 %TC) {
16 ; CHECK-LABEL: lane_mask_nxv8i1_i32:
18 ; CHECK-NEXT: whilelo p0.h, w0, w1
20 %active.lane.mask = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32 %index, i32 %TC)
21 ret <vscale x 8 x i1> %active.lane.mask
24 define <vscale x 4 x i1> @lane_mask_nxv4i1_i32(i32 %index, i32 %TC) {
25 ; CHECK-LABEL: lane_mask_nxv4i1_i32:
27 ; CHECK-NEXT: whilelo p0.s, w0, w1
29 %active.lane.mask = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 %index, i32 %TC)
30 ret <vscale x 4 x i1> %active.lane.mask
33 define <vscale x 2 x i1> @lane_mask_nxv2i1_i32(i32 %index, i32 %TC) {
34 ; CHECK-LABEL: lane_mask_nxv2i1_i32:
36 ; CHECK-NEXT: whilelo p0.d, w0, w1
38 %active.lane.mask = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i32(i32 %index, i32 %TC)
39 ret <vscale x 2 x i1> %active.lane.mask
42 define <vscale x 16 x i1> @lane_mask_nxv16i1_i64(i64 %index, i64 %TC) {
43 ; CHECK-LABEL: lane_mask_nxv16i1_i64:
45 ; CHECK-NEXT: whilelo p0.b, x0, x1
47 %active.lane.mask = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 %index, i64 %TC)
48 ret <vscale x 16 x i1> %active.lane.mask
51 define <vscale x 8 x i1> @lane_mask_nxv8i1_i64(i64 %index, i64 %TC) {
52 ; CHECK-LABEL: lane_mask_nxv8i1_i64:
54 ; CHECK-NEXT: whilelo p0.h, x0, x1
56 %active.lane.mask = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 %index, i64 %TC)
57 ret <vscale x 8 x i1> %active.lane.mask
60 define <vscale x 4 x i1> @lane_mask_nxv4i1_i64(i64 %index, i64 %TC) {
61 ; CHECK-LABEL: lane_mask_nxv4i1_i64:
63 ; CHECK-NEXT: whilelo p0.s, x0, x1
65 %active.lane.mask = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 %index, i64 %TC)
66 ret <vscale x 4 x i1> %active.lane.mask
69 define <vscale x 2 x i1> @lane_mask_nxv2i1_i64(i64 %index, i64 %TC) {
70 ; CHECK-LABEL: lane_mask_nxv2i1_i64:
72 ; CHECK-NEXT: whilelo p0.d, x0, x1
74 %active.lane.mask = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 %index, i64 %TC)
75 ret <vscale x 2 x i1> %active.lane.mask
78 define <vscale x 16 x i1> @lane_mask_nxv16i1_i8(i8 %index, i8 %TC) {
79 ; CHECK-LABEL: lane_mask_nxv16i1_i8:
81 ; CHECK-NEXT: index z0.b, #0, #1
82 ; CHECK-NEXT: mov z1.b, w0
83 ; CHECK-NEXT: ptrue p0.b
84 ; CHECK-NEXT: uqadd z0.b, z0.b, z1.b
85 ; CHECK-NEXT: mov z1.b, w1
86 ; CHECK-NEXT: cmphi p0.b, p0/z, z1.b, z0.b
88 %active.lane.mask = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i8(i8 %index, i8 %TC)
89 ret <vscale x 16 x i1> %active.lane.mask
92 define <vscale x 8 x i1> @lane_mask_nxv8i1_i8(i8 %index, i8 %TC) {
93 ; CHECK-LABEL: lane_mask_nxv8i1_i8:
95 ; CHECK-NEXT: index z0.h, #0, #1
96 ; CHECK-NEXT: mov z1.h, w0
97 ; CHECK-NEXT: ptrue p0.h
98 ; CHECK-NEXT: and z1.h, z1.h, #0xff
99 ; CHECK-NEXT: and z0.h, z0.h, #0xff
100 ; CHECK-NEXT: add z0.h, z0.h, z1.h
101 ; CHECK-NEXT: mov z1.h, w1
102 ; CHECK-NEXT: umin z0.h, z0.h, #255
103 ; CHECK-NEXT: and z1.h, z1.h, #0xff
104 ; CHECK-NEXT: cmphi p0.h, p0/z, z1.h, z0.h
106 %active.lane.mask = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i8(i8 %index, i8 %TC)
107 ret <vscale x 8 x i1> %active.lane.mask
110 define <vscale x 4 x i1> @lane_mask_nxv4i1_i8(i8 %index, i8 %TC) {
111 ; CHECK-LABEL: lane_mask_nxv4i1_i8:
113 ; CHECK-NEXT: index z0.s, #0, #1
114 ; CHECK-NEXT: and w8, w0, #0xff
115 ; CHECK-NEXT: ptrue p0.s
116 ; CHECK-NEXT: mov z1.s, w8
117 ; CHECK-NEXT: and w8, w1, #0xff
118 ; CHECK-NEXT: and z0.s, z0.s, #0xff
119 ; CHECK-NEXT: add z0.s, z0.s, z1.s
120 ; CHECK-NEXT: mov z1.s, w8
121 ; CHECK-NEXT: umin z0.s, z0.s, #255
122 ; CHECK-NEXT: cmphi p0.s, p0/z, z1.s, z0.s
124 %active.lane.mask = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i8(i8 %index, i8 %TC)
125 ret <vscale x 4 x i1> %active.lane.mask
128 define <vscale x 2 x i1> @lane_mask_nxv2i1_i8(i8 %index, i8 %TC) {
129 ; CHECK-LABEL: lane_mask_nxv2i1_i8:
131 ; CHECK-NEXT: index z0.d, #0, #1
132 ; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
133 ; CHECK-NEXT: and x8, x0, #0xff
134 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
135 ; CHECK-NEXT: ptrue p0.d
136 ; CHECK-NEXT: mov z1.d, x8
137 ; CHECK-NEXT: and x8, x1, #0xff
138 ; CHECK-NEXT: and z0.d, z0.d, #0xff
139 ; CHECK-NEXT: add z0.d, z0.d, z1.d
140 ; CHECK-NEXT: mov z1.d, x8
141 ; CHECK-NEXT: umin z0.d, z0.d, #255
142 ; CHECK-NEXT: cmphi p0.d, p0/z, z1.d, z0.d
144 %active.lane.mask = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i8(i8 %index, i8 %TC)
145 ret <vscale x 2 x i1> %active.lane.mask
151 define <vscale x 32 x i1> @lane_mask_nxv32i1_i32(i32 %index, i32 %TC) {
152 ; CHECK-LABEL: lane_mask_nxv32i1_i32:
154 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
155 ; CHECK-NEXT: addvl sp, sp, #-1
156 ; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill
157 ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
158 ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
159 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
160 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
161 ; CHECK-NEXT: .cfi_offset w29, -16
162 ; CHECK-NEXT: index z0.s, #0, #1
163 ; CHECK-NEXT: mov z1.s, w0
164 ; CHECK-NEXT: mov z25.s, w1
165 ; CHECK-NEXT: ptrue p0.s
166 ; CHECK-NEXT: mov z2.d, z0.d
167 ; CHECK-NEXT: mov z3.d, z0.d
168 ; CHECK-NEXT: uqadd z6.s, z0.s, z1.s
169 ; CHECK-NEXT: incw z0.s, all, mul #4
170 ; CHECK-NEXT: incw z2.s
171 ; CHECK-NEXT: incw z3.s, all, mul #2
172 ; CHECK-NEXT: uqadd z0.s, z0.s, z1.s
173 ; CHECK-NEXT: cmphi p2.s, p0/z, z25.s, z6.s
174 ; CHECK-NEXT: mov z4.d, z2.d
175 ; CHECK-NEXT: uqadd z5.s, z2.s, z1.s
176 ; CHECK-NEXT: uqadd z7.s, z3.s, z1.s
177 ; CHECK-NEXT: incw z2.s, all, mul #4
178 ; CHECK-NEXT: incw z3.s, all, mul #4
179 ; CHECK-NEXT: cmphi p5.s, p0/z, z25.s, z0.s
180 ; CHECK-NEXT: incw z4.s, all, mul #2
181 ; CHECK-NEXT: uqadd z2.s, z2.s, z1.s
182 ; CHECK-NEXT: uqadd z3.s, z3.s, z1.s
183 ; CHECK-NEXT: cmphi p1.s, p0/z, z25.s, z5.s
184 ; CHECK-NEXT: cmphi p3.s, p0/z, z25.s, z7.s
185 ; CHECK-NEXT: uqadd z24.s, z4.s, z1.s
186 ; CHECK-NEXT: incw z4.s, all, mul #4
187 ; CHECK-NEXT: cmphi p6.s, p0/z, z25.s, z2.s
188 ; CHECK-NEXT: cmphi p7.s, p0/z, z25.s, z3.s
189 ; CHECK-NEXT: uzp1 p1.h, p2.h, p1.h
190 ; CHECK-NEXT: uqadd z1.s, z4.s, z1.s
191 ; CHECK-NEXT: cmphi p4.s, p0/z, z25.s, z24.s
192 ; CHECK-NEXT: cmphi p0.s, p0/z, z25.s, z1.s
193 ; CHECK-NEXT: uzp1 p2.h, p3.h, p4.h
194 ; CHECK-NEXT: uzp1 p3.h, p5.h, p6.h
195 ; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
196 ; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
197 ; CHECK-NEXT: uzp1 p4.h, p7.h, p0.h
198 ; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
199 ; CHECK-NEXT: uzp1 p0.b, p1.b, p2.b
200 ; CHECK-NEXT: uzp1 p1.b, p3.b, p4.b
201 ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
202 ; CHECK-NEXT: addvl sp, sp, #1
203 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
205 %active.lane.mask = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i32(i32 %index, i32 %TC)
206 ret <vscale x 32 x i1> %active.lane.mask
209 define <vscale x 32 x i1> @lane_mask_nxv32i1_i64(i64 %index, i64 %TC) {
210 ; CHECK-LABEL: lane_mask_nxv32i1_i64:
212 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
213 ; CHECK-NEXT: addvl sp, sp, #-2
214 ; CHECK-NEXT: str p9, [sp, #2, mul vl] // 2-byte Folded Spill
215 ; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Folded Spill
216 ; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill
217 ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
218 ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
219 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
220 ; CHECK-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill
221 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
222 ; CHECK-NEXT: .cfi_offset w29, -16
223 ; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
224 ; CHECK-NEXT: index z5.d, #0, #1
225 ; CHECK-NEXT: mov z0.d, x0
226 ; CHECK-NEXT: mov z3.d, x1
227 ; CHECK-NEXT: ptrue p0.d
228 ; CHECK-NEXT: mov z2.d, z5.d
229 ; CHECK-NEXT: mov z1.d, z5.d
230 ; CHECK-NEXT: mov z4.d, z5.d
231 ; CHECK-NEXT: uqadd z25.d, z5.d, z0.d
232 ; CHECK-NEXT: incd z5.d, all, mul #8
233 ; CHECK-NEXT: incd z2.d
234 ; CHECK-NEXT: incd z1.d, all, mul #2
235 ; CHECK-NEXT: incd z4.d, all, mul #4
236 ; CHECK-NEXT: uqadd z5.d, z5.d, z0.d
237 ; CHECK-NEXT: cmphi p3.d, p0/z, z3.d, z25.d
238 ; CHECK-NEXT: mov z6.d, z2.d
239 ; CHECK-NEXT: mov z7.d, z2.d
240 ; CHECK-NEXT: mov z24.d, z1.d
241 ; CHECK-NEXT: uqadd z26.d, z2.d, z0.d
242 ; CHECK-NEXT: uqadd z27.d, z1.d, z0.d
243 ; CHECK-NEXT: uqadd z28.d, z4.d, z0.d
244 ; CHECK-NEXT: incd z2.d, all, mul #8
245 ; CHECK-NEXT: incd z1.d, all, mul #8
246 ; CHECK-NEXT: incd z4.d, all, mul #8
247 ; CHECK-NEXT: incd z6.d, all, mul #2
248 ; CHECK-NEXT: incd z7.d, all, mul #4
249 ; CHECK-NEXT: incd z24.d, all, mul #4
250 ; CHECK-NEXT: cmphi p4.d, p0/z, z3.d, z26.d
251 ; CHECK-NEXT: cmphi p2.d, p0/z, z3.d, z27.d
252 ; CHECK-NEXT: cmphi p1.d, p0/z, z3.d, z28.d
253 ; CHECK-NEXT: mov z31.d, z6.d
254 ; CHECK-NEXT: uqadd z29.d, z6.d, z0.d
255 ; CHECK-NEXT: uqadd z30.d, z7.d, z0.d
256 ; CHECK-NEXT: uqadd z8.d, z24.d, z0.d
257 ; CHECK-NEXT: incd z6.d, all, mul #8
258 ; CHECK-NEXT: incd z7.d, all, mul #8
259 ; CHECK-NEXT: incd z24.d, all, mul #8
260 ; CHECK-NEXT: uqadd z2.d, z2.d, z0.d
261 ; CHECK-NEXT: uqadd z1.d, z1.d, z0.d
262 ; CHECK-NEXT: incd z31.d, all, mul #4
263 ; CHECK-NEXT: uqadd z4.d, z4.d, z0.d
264 ; CHECK-NEXT: uzp1 p3.s, p3.s, p4.s
265 ; CHECK-NEXT: cmphi p5.d, p0/z, z3.d, z29.d
266 ; CHECK-NEXT: cmphi p7.d, p0/z, z3.d, z30.d
267 ; CHECK-NEXT: uqadd z6.d, z6.d, z0.d
268 ; CHECK-NEXT: cmphi p6.d, p0/z, z3.d, z8.d
269 ; CHECK-NEXT: ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
270 ; CHECK-NEXT: uqadd z7.d, z7.d, z0.d
271 ; CHECK-NEXT: uqadd z25.d, z31.d, z0.d
272 ; CHECK-NEXT: incd z31.d, all, mul #8
273 ; CHECK-NEXT: uqadd z24.d, z24.d, z0.d
274 ; CHECK-NEXT: cmphi p4.d, p0/z, z3.d, z5.d
275 ; CHECK-NEXT: uzp1 p2.s, p2.s, p5.s
276 ; CHECK-NEXT: cmphi p5.d, p0/z, z3.d, z2.d
277 ; CHECK-NEXT: cmphi p9.d, p0/z, z3.d, z6.d
278 ; CHECK-NEXT: uqadd z0.d, z31.d, z0.d
279 ; CHECK-NEXT: uzp1 p1.s, p1.s, p7.s
280 ; CHECK-NEXT: cmphi p7.d, p0/z, z3.d, z1.d
281 ; CHECK-NEXT: cmphi p8.d, p0/z, z3.d, z25.d
282 ; CHECK-NEXT: uzp1 p2.h, p3.h, p2.h
283 ; CHECK-NEXT: cmphi p3.d, p0/z, z3.d, z7.d
284 ; CHECK-NEXT: uzp1 p4.s, p4.s, p5.s
285 ; CHECK-NEXT: uzp1 p5.s, p7.s, p9.s
286 ; CHECK-NEXT: ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
287 ; CHECK-NEXT: uzp1 p6.s, p6.s, p8.s
288 ; CHECK-NEXT: cmphi p8.d, p0/z, z3.d, z4.d
289 ; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
290 ; CHECK-NEXT: uzp1 p4.h, p4.h, p5.h
291 ; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
292 ; CHECK-NEXT: uzp1 p1.h, p1.h, p6.h
293 ; CHECK-NEXT: cmphi p6.d, p0/z, z3.d, z24.d
294 ; CHECK-NEXT: cmphi p0.d, p0/z, z3.d, z0.d
295 ; CHECK-NEXT: uzp1 p3.s, p8.s, p3.s
296 ; CHECK-NEXT: ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
297 ; CHECK-NEXT: uzp1 p0.s, p6.s, p0.s
298 ; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
299 ; CHECK-NEXT: uzp1 p3.h, p3.h, p0.h
300 ; CHECK-NEXT: uzp1 p0.b, p2.b, p1.b
301 ; CHECK-NEXT: uzp1 p1.b, p4.b, p3.b
302 ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
303 ; CHECK-NEXT: addvl sp, sp, #2
304 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
306 %active.lane.mask = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64 %index, i64 %TC)
307 ret <vscale x 32 x i1> %active.lane.mask
310 define <vscale x 32 x i1> @lane_mask_nxv32i1_i8(i8 %index, i8 %TC) {
311 ; CHECK-LABEL: lane_mask_nxv32i1_i8:
313 ; CHECK-NEXT: index z0.b, #0, #1
314 ; CHECK-NEXT: rdvl x8, #1
315 ; CHECK-NEXT: mov z2.b, w0
316 ; CHECK-NEXT: mov z1.b, w8
317 ; CHECK-NEXT: ptrue p1.b
318 ; CHECK-NEXT: add z1.b, z0.b, z1.b
319 ; CHECK-NEXT: uqadd z0.b, z0.b, z2.b
320 ; CHECK-NEXT: uqadd z1.b, z1.b, z2.b
321 ; CHECK-NEXT: mov z2.b, w1
322 ; CHECK-NEXT: cmphi p0.b, p1/z, z2.b, z0.b
323 ; CHECK-NEXT: cmphi p1.b, p1/z, z2.b, z1.b
325 %active.lane.mask = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i8(i8 %index, i8 %TC)
326 ret <vscale x 32 x i1> %active.lane.mask
329 ; UTC_ARGS: --disable
330 ; This test exists to protect against a compiler crash caused by an attempt to
331 ; convert (via changeVectorElementType) an MVT into an EVT, which is impossible.
332 ; The test's output is large and not relevant so check lines have been disabled.
333 define <vscale x 64 x i1> @lane_mask_nxv64i1_i64(i64 %index, i64 %TC) {
334 ; CHECK-LABEL: lane_mask_nxv64i1_i64:
335 %active.lane.mask = call <vscale x 64 x i1> @llvm.get.active.lane.mask.nxv64i1.i64(i64 %index, i64 %TC)
336 ret <vscale x 64 x i1> %active.lane.mask
342 define <16 x i1> @lane_mask_v16i1_i32(i32 %index, i32 %TC) {
343 ; CHECK-LABEL: lane_mask_v16i1_i32:
345 ; CHECK-NEXT: whilelo p0.b, w0, w1
346 ; CHECK-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff
347 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
349 %active.lane.mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 %index, i32 %TC)
350 ret <16 x i1> %active.lane.mask
353 define <8 x i1> @lane_mask_v8i1_i32(i32 %index, i32 %TC) {
354 ; CHECK-LABEL: lane_mask_v8i1_i32:
356 ; CHECK-NEXT: whilelo p0.b, w0, w1
357 ; CHECK-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff
358 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
360 %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %TC)
361 ret <8 x i1> %active.lane.mask
364 define <4 x i1> @lane_mask_v4i1_i32(i32 %index, i32 %TC) {
365 ; CHECK-LABEL: lane_mask_v4i1_i32:
367 ; CHECK-NEXT: whilelo p0.h, w0, w1
368 ; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff
369 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
371 %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %TC)
372 ret <4 x i1> %active.lane.mask
375 define <2 x i1> @lane_mask_v2i1_i32(i32 %index, i32 %TC) {
376 ; CHECK-LABEL: lane_mask_v2i1_i32:
378 ; CHECK-NEXT: whilelo p0.s, w0, w1
379 ; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff
380 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
382 %active.lane.mask = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32 %index, i32 %TC)
383 ret <2 x i1> %active.lane.mask
386 define <16 x i1> @lane_mask_v16i1_i64(i64 %index, i64 %TC) {
387 ; CHECK-LABEL: lane_mask_v16i1_i64:
389 ; CHECK-NEXT: whilelo p0.b, x0, x1
390 ; CHECK-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff
391 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
393 %active.lane.mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64 %index, i64 %TC)
394 ret <16 x i1> %active.lane.mask
397 define <8 x i1> @lane_mask_v8i1_i64(i64 %index, i64 %TC) {
398 ; CHECK-LABEL: lane_mask_v8i1_i64:
400 ; CHECK-NEXT: whilelo p0.b, x0, x1
401 ; CHECK-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff
402 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
404 %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64 %index, i64 %TC)
405 ret <8 x i1> %active.lane.mask
408 define <4 x i1> @lane_mask_v4i1_i64(i64 %index, i64 %TC) {
409 ; CHECK-LABEL: lane_mask_v4i1_i64:
411 ; CHECK-NEXT: whilelo p0.h, x0, x1
412 ; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff
413 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
415 %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 %index, i64 %TC)
416 ret <4 x i1> %active.lane.mask
419 define <2 x i1> @lane_mask_v2i1_i64(i64 %index, i64 %TC) {
420 ; CHECK-LABEL: lane_mask_v2i1_i64:
422 ; CHECK-NEXT: whilelo p0.s, x0, x1
423 ; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff
424 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
426 %active.lane.mask = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 %index, i64 %TC)
427 ret <2 x i1> %active.lane.mask
430 define <16 x i1> @lane_mask_v16i1_i8(i8 %index, i8 %TC) {
431 ; CHECK-LABEL: lane_mask_v16i1_i8:
433 ; CHECK-NEXT: index z0.b, #0, #1
434 ; CHECK-NEXT: dup v1.16b, w0
435 ; CHECK-NEXT: uqadd v0.16b, v1.16b, v0.16b
436 ; CHECK-NEXT: dup v1.16b, w1
437 ; CHECK-NEXT: cmhi v0.16b, v1.16b, v0.16b
439 %active.lane.mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i8(i8 %index, i8 %TC)
440 ret <16 x i1> %active.lane.mask
443 define <8 x i1> @lane_mask_v8i1_i8(i8 %index, i8 %TC) {
444 ; CHECK-LABEL: lane_mask_v8i1_i8:
446 ; CHECK-NEXT: index z0.b, #0, #1
447 ; CHECK-NEXT: dup v1.8b, w0
448 ; CHECK-NEXT: uqadd v0.8b, v1.8b, v0.8b
449 ; CHECK-NEXT: dup v1.8b, w1
450 ; CHECK-NEXT: cmhi v0.8b, v1.8b, v0.8b
452 %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i8(i8 %index, i8 %TC)
453 ret <8 x i1> %active.lane.mask
456 define <4 x i1> @lane_mask_v4i1_i8(i8 %index, i8 %TC) {
457 ; CHECK-LABEL: lane_mask_v4i1_i8:
459 ; CHECK-NEXT: dup v0.4h, w0
460 ; CHECK-NEXT: index z1.h, #0, #1
461 ; CHECK-NEXT: movi d2, #0xff00ff00ff00ff
462 ; CHECK-NEXT: dup v3.4h, w1
463 ; CHECK-NEXT: bic v0.4h, #255, lsl #8
464 ; CHECK-NEXT: bic v3.4h, #255, lsl #8
465 ; CHECK-NEXT: add v0.4h, v0.4h, v1.4h
466 ; CHECK-NEXT: umin v0.4h, v0.4h, v2.4h
467 ; CHECK-NEXT: cmhi v0.4h, v3.4h, v0.4h
469 %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i8(i8 %index, i8 %TC)
470 ret <4 x i1> %active.lane.mask
473 define <2 x i1> @lane_mask_v2i1_i8(i8 %index, i8 %TC) {
474 ; CHECK-LABEL: lane_mask_v2i1_i8:
476 ; CHECK-NEXT: movi d0, #0x0000ff000000ff
477 ; CHECK-NEXT: dup v1.2s, w0
478 ; CHECK-NEXT: index z2.s, #0, #1
479 ; CHECK-NEXT: dup v3.2s, w1
480 ; CHECK-NEXT: and v1.8b, v1.8b, v0.8b
481 ; CHECK-NEXT: add v1.2s, v1.2s, v2.2s
482 ; CHECK-NEXT: and v2.8b, v3.8b, v0.8b
483 ; CHECK-NEXT: umin v0.2s, v1.2s, v0.2s
484 ; CHECK-NEXT: cmhi v0.2s, v2.2s, v0.2s
486 %active.lane.mask = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i8(i8 %index, i8 %TC)
487 ret <2 x i1> %active.lane.mask
490 define <vscale x 4 x i1> @lane_mask_nxv4i1_imm3() {
491 ; CHECK-LABEL: lane_mask_nxv4i1_imm3:
492 ; CHECK: // %bb.0: // %entry
493 ; CHECK-NEXT: ptrue p0.s, vl3
496 %active.lane.mask = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 3)
497 ret <vscale x 4 x i1> %active.lane.mask
500 define <vscale x 4 x i1> @lane_mask_nxv4i1_imm5() {
501 ; CHECK-LABEL: lane_mask_nxv4i1_imm5:
502 ; CHECK: // %bb.0: // %entry
503 ; CHECK-NEXT: mov w8, #5 // =0x5
504 ; CHECK-NEXT: whilelo p0.s, xzr, x8
507 %active.lane.mask = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 5)
508 ret <vscale x 4 x i1> %active.lane.mask
511 define <vscale x 4 x i1> @lane_mask_nxv4i1_imm4() {
512 ; CHECK-LABEL: lane_mask_nxv4i1_imm4:
513 ; CHECK: // %bb.0: // %entry
514 ; CHECK-NEXT: ptrue p0.s, vl4
517 %active.lane.mask = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 10, i64 14)
518 ret <vscale x 4 x i1> %active.lane.mask
521 define <vscale x 16 x i1> @lane_mask_nxv16i1_imm10() {
522 ; CHECK-LABEL: lane_mask_nxv16i1_imm10:
523 ; CHECK: // %bb.0: // %entry
524 ; CHECK-NEXT: mov w8, #10 // =0xa
525 ; CHECK-NEXT: whilelo p0.b, xzr, x8
528 %active.lane.mask = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 10)
529 ret <vscale x 16 x i1> %active.lane.mask
532 define <vscale x 16 x i1> @lane_mask_nxv16i1_imm256() vscale_range(16, 16) {
533 ; CHECK-LABEL: lane_mask_nxv16i1_imm256:
534 ; CHECK: // %bb.0: // %entry
535 ; CHECK-NEXT: ptrue p0.b, vl256
538 %active.lane.mask = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 0, i64 256)
539 ret <vscale x 16 x i1> %active.lane.mask
543 declare <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i32(i32, i32)
544 declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32, i32)
545 declare <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32, i32)
546 declare <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32, i32)
547 declare <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i32(i32, i32)
549 declare <vscale x 64 x i1> @llvm.get.active.lane.mask.nxv64i1.i64(i64, i64)
550 declare <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64, i64)
551 declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64, i64)
552 declare <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64, i64)
553 declare <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64, i64)
554 declare <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64, i64)
556 declare <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i8(i8, i8)
557 declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i8(i8, i8)
558 declare <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i8(i8, i8)
559 declare <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i8(i8, i8)
560 declare <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i8(i8, i8)
563 declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32)
564 declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
565 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
566 declare <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32, i32)
568 declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64, i64)
569 declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64, i64)
570 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64, i64)
571 declare <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64, i64)
573 declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i8(i8, i8)
574 declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i8(i8, i8)
575 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i8(i8, i8)
576 declare <2 x i1> @llvm.get.active.lane.mask.v2i1.i8(i8, i8)