1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -mattr=+bf16 < %s | FileCheck %s --check-prefixes=CHECK
4 define <vscale x 2 x i64> @insert_v2i64_nxv2i64(<vscale x 2 x i64> %vec, <2 x i64> %subvec) nounwind {
5 ; CHECK-LABEL: insert_v2i64_nxv2i64:
7 ; CHECK-NEXT: ptrue p0.d, vl2
8 ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
9 ; CHECK-NEXT: mov z0.d, p0/m, z1.d
11 %retval = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> %vec, <2 x i64> %subvec, i64 0)
12 ret <vscale x 2 x i64> %retval
15 define <vscale x 2 x i64> @insert_v2i64_nxv2i64_idx2(<vscale x 2 x i64> %vec, <2 x i64> %subvec) nounwind {
16 ; CHECK-LABEL: insert_v2i64_nxv2i64_idx2:
18 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
19 ; CHECK-NEXT: addvl sp, sp, #-1
20 ; CHECK-NEXT: ptrue p0.d
22 ; CHECK-NEXT: mov w9, #2 // =0x2
23 ; CHECK-NEXT: sub x8, x8, #2
24 ; CHECK-NEXT: cmp x8, #2
25 ; CHECK-NEXT: csel x8, x8, x9, lo
26 ; CHECK-NEXT: mov x9, sp
27 ; CHECK-NEXT: lsl x8, x8, #3
28 ; CHECK-NEXT: st1d { z0.d }, p0, [sp]
29 ; CHECK-NEXT: str q1, [x9, x8]
30 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp]
31 ; CHECK-NEXT: addvl sp, sp, #1
32 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
34 %retval = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> %vec, <2 x i64> %subvec, i64 2)
35 ret <vscale x 2 x i64> %retval
38 define <vscale x 4 x i32> @insert_v4i32_nxv4i32(<vscale x 4 x i32> %vec, <4 x i32> %subvec) nounwind {
39 ; CHECK-LABEL: insert_v4i32_nxv4i32:
41 ; CHECK-NEXT: ptrue p0.s, vl4
42 ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
43 ; CHECK-NEXT: mov z0.s, p0/m, z1.s
45 %retval = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> %vec, <4 x i32> %subvec, i64 0)
46 ret <vscale x 4 x i32> %retval
49 define <vscale x 4 x i32> @insert_v4i32_nxv4i32_idx4(<vscale x 4 x i32> %vec, <4 x i32> %subvec) nounwind {
50 ; CHECK-LABEL: insert_v4i32_nxv4i32_idx4:
52 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
53 ; CHECK-NEXT: addvl sp, sp, #-1
54 ; CHECK-NEXT: ptrue p0.s
56 ; CHECK-NEXT: mov w9, #4 // =0x4
57 ; CHECK-NEXT: sub x8, x8, #4
58 ; CHECK-NEXT: cmp x8, #4
59 ; CHECK-NEXT: csel x8, x8, x9, lo
60 ; CHECK-NEXT: mov x9, sp
61 ; CHECK-NEXT: lsl x8, x8, #2
62 ; CHECK-NEXT: st1w { z0.s }, p0, [sp]
63 ; CHECK-NEXT: str q1, [x9, x8]
64 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [sp]
65 ; CHECK-NEXT: addvl sp, sp, #1
66 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
68 %retval = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> %vec, <4 x i32> %subvec, i64 4)
69 ret <vscale x 4 x i32> %retval
72 define <vscale x 8 x i16> @insert_v8i16_nxv8i16(<vscale x 8 x i16> %vec, <8 x i16> %subvec) nounwind {
73 ; CHECK-LABEL: insert_v8i16_nxv8i16:
75 ; CHECK-NEXT: ptrue p0.h, vl8
76 ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
77 ; CHECK-NEXT: mov z0.h, p0/m, z1.h
79 %retval = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> %vec, <8 x i16> %subvec, i64 0)
80 ret <vscale x 8 x i16> %retval
83 define <vscale x 8 x i16> @insert_v8i16_nxv8i16_idx8(<vscale x 8 x i16> %vec, <8 x i16> %subvec) nounwind {
84 ; CHECK-LABEL: insert_v8i16_nxv8i16_idx8:
86 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
87 ; CHECK-NEXT: addvl sp, sp, #-1
88 ; CHECK-NEXT: ptrue p0.h
90 ; CHECK-NEXT: mov w9, #8 // =0x8
91 ; CHECK-NEXT: sub x8, x8, #8
92 ; CHECK-NEXT: cmp x8, #8
93 ; CHECK-NEXT: csel x8, x8, x9, lo
94 ; CHECK-NEXT: mov x9, sp
95 ; CHECK-NEXT: lsl x8, x8, #1
96 ; CHECK-NEXT: st1h { z0.h }, p0, [sp]
97 ; CHECK-NEXT: str q1, [x9, x8]
98 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [sp]
99 ; CHECK-NEXT: addvl sp, sp, #1
100 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
102 %retval = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> %vec, <8 x i16> %subvec, i64 8)
103 ret <vscale x 8 x i16> %retval
106 define <vscale x 16 x i8> @insert_v16i8_nxv16i8(<vscale x 16 x i8> %vec, <16 x i8> %subvec) nounwind {
107 ; CHECK-LABEL: insert_v16i8_nxv16i8:
109 ; CHECK-NEXT: ptrue p0.b, vl16
110 ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
111 ; CHECK-NEXT: mov z0.b, p0/m, z1.b
113 %retval = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> %vec, <16 x i8> %subvec, i64 0)
114 ret <vscale x 16 x i8> %retval
117 define <vscale x 16 x i8> @insert_v16i8_nxv16i8_idx16(<vscale x 16 x i8> %vec, <16 x i8> %subvec) nounwind {
118 ; CHECK-LABEL: insert_v16i8_nxv16i8_idx16:
120 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
121 ; CHECK-NEXT: addvl sp, sp, #-1
122 ; CHECK-NEXT: ptrue p0.b
123 ; CHECK-NEXT: mov x8, #-16 // =0xfffffffffffffff0
124 ; CHECK-NEXT: mov w9, #16 // =0x10
125 ; CHECK-NEXT: addvl x8, x8, #1
126 ; CHECK-NEXT: mov x10, sp
127 ; CHECK-NEXT: cmp x8, #16
128 ; CHECK-NEXT: csel x8, x8, x9, lo
129 ; CHECK-NEXT: st1b { z0.b }, p0, [sp]
130 ; CHECK-NEXT: str q1, [x10, x8]
131 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [sp]
132 ; CHECK-NEXT: addvl sp, sp, #1
133 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
135 %retval = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> %vec, <16 x i8> %subvec, i64 16)
136 ret <vscale x 16 x i8> %retval
140 ; Insert subvectors into illegal vectors
142 define void @insert_nxv8i64_nxv16i64(<vscale x 8 x i64> %sv0, <vscale x 8 x i64> %sv1, <vscale x 16 x i64>* %out) {
143 ; CHECK-LABEL: insert_nxv8i64_nxv16i64:
145 ; CHECK-NEXT: ptrue p0.d
146 ; CHECK-NEXT: st1d { z7.d }, p0, [x0, #7, mul vl]
147 ; CHECK-NEXT: st1d { z6.d }, p0, [x0, #6, mul vl]
148 ; CHECK-NEXT: st1d { z5.d }, p0, [x0, #5, mul vl]
149 ; CHECK-NEXT: st1d { z4.d }, p0, [x0, #4, mul vl]
150 ; CHECK-NEXT: st1d { z3.d }, p0, [x0, #3, mul vl]
151 ; CHECK-NEXT: st1d { z2.d }, p0, [x0, #2, mul vl]
152 ; CHECK-NEXT: st1d { z1.d }, p0, [x0, #1, mul vl]
153 ; CHECK-NEXT: st1d { z0.d }, p0, [x0]
155 %v0 = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 0)
156 %v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> %v0, <vscale x 8 x i64> %sv1, i64 8)
157 store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
161 define void @insert_nxv8i64_nxv16i64_lo(<vscale x 8 x i64> %sv0, <vscale x 16 x i64>* %out) {
162 ; CHECK-LABEL: insert_nxv8i64_nxv16i64_lo:
164 ; CHECK-NEXT: ptrue p0.d
165 ; CHECK-NEXT: st1d { z3.d }, p0, [x0, #3, mul vl]
166 ; CHECK-NEXT: st1d { z2.d }, p0, [x0, #2, mul vl]
167 ; CHECK-NEXT: st1d { z1.d }, p0, [x0, #1, mul vl]
168 ; CHECK-NEXT: st1d { z0.d }, p0, [x0]
170 %v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 0)
171 store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
175 define void @insert_nxv8i64_nxv16i64_hi(<vscale x 8 x i64> %sv0, <vscale x 16 x i64>* %out) {
176 ; CHECK-LABEL: insert_nxv8i64_nxv16i64_hi:
178 ; CHECK-NEXT: ptrue p0.d
179 ; CHECK-NEXT: st1d { z3.d }, p0, [x0, #7, mul vl]
180 ; CHECK-NEXT: st1d { z2.d }, p0, [x0, #6, mul vl]
181 ; CHECK-NEXT: st1d { z1.d }, p0, [x0, #5, mul vl]
182 ; CHECK-NEXT: st1d { z0.d }, p0, [x0, #4, mul vl]
184 %v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 8)
185 store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
189 define void @insert_v2i64_nxv16i64(<2 x i64> %sv0, <2 x i64> %sv1, <vscale x 16 x i64>* %out) uwtable {
190 ; CHECK-LABEL: insert_v2i64_nxv16i64:
192 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
193 ; CHECK-NEXT: .cfi_def_cfa_offset 16
194 ; CHECK-NEXT: .cfi_offset w29, -16
195 ; CHECK-NEXT: addvl sp, sp, #-4
196 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
197 ; CHECK-NEXT: ptrue p0.d
198 ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
199 ; CHECK-NEXT: st1d { z0.d }, p0, [sp]
200 ; CHECK-NEXT: str q1, [sp, #32]
201 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp]
202 ; CHECK-NEXT: ld1d { z1.d }, p0/z, [sp, #1, mul vl]
203 ; CHECK-NEXT: ld1d { z2.d }, p0/z, [sp, #2, mul vl]
204 ; CHECK-NEXT: ld1d { z3.d }, p0/z, [sp, #3, mul vl]
205 ; CHECK-NEXT: st1d { z3.d }, p0, [x0, #3, mul vl]
206 ; CHECK-NEXT: st1d { z2.d }, p0, [x0, #2, mul vl]
207 ; CHECK-NEXT: st1d { z1.d }, p0, [x0, #1, mul vl]
208 ; CHECK-NEXT: st1d { z0.d }, p0, [x0]
209 ; CHECK-NEXT: addvl sp, sp, #4
210 ; CHECK-NEXT: .cfi_def_cfa wsp, 16
211 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
212 ; CHECK-NEXT: .cfi_def_cfa_offset 0
213 ; CHECK-NEXT: .cfi_restore w29
215 %v0 = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv0, i64 0)
216 %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> %v0, <2 x i64> %sv1, i64 4)
217 store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
221 define void @insert_v2i64_nxv16i64_lo0(<2 x i64>* %psv, <vscale x 16 x i64>* %out) {
222 ; CHECK-LABEL: insert_v2i64_nxv16i64_lo0:
224 ; CHECK-NEXT: ptrue p0.d
225 ; CHECK-NEXT: ldr q0, [x0]
226 ; CHECK-NEXT: st1d { z0.d }, p0, [x1]
228 %sv = load <2 x i64>, <2 x i64>* %psv
229 %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 0)
230 store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
234 define void @insert_v2i64_nxv16i64_lo2(<2 x i64>* %psv, <vscale x 16 x i64>* %out) uwtable {
235 ; CHECK-LABEL: insert_v2i64_nxv16i64_lo2:
237 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
238 ; CHECK-NEXT: .cfi_def_cfa_offset 16
239 ; CHECK-NEXT: .cfi_offset w29, -16
240 ; CHECK-NEXT: addvl sp, sp, #-2
241 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
242 ; CHECK-NEXT: ptrue p0.d
243 ; CHECK-NEXT: ldr q0, [x0]
244 ; CHECK-NEXT: str q0, [sp, #16]
245 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp]
246 ; CHECK-NEXT: ld1d { z1.d }, p0/z, [sp, #1, mul vl]
247 ; CHECK-NEXT: st1d { z1.d }, p0, [x1, #1, mul vl]
248 ; CHECK-NEXT: st1d { z0.d }, p0, [x1]
249 ; CHECK-NEXT: addvl sp, sp, #2
250 ; CHECK-NEXT: .cfi_def_cfa wsp, 16
251 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
252 ; CHECK-NEXT: .cfi_def_cfa_offset 0
253 ; CHECK-NEXT: .cfi_restore w29
255 %sv = load <2 x i64>, <2 x i64>* %psv
256 %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 2)
257 store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
262 ; Insert subvectors that need widening
264 define <vscale x 4 x i32> @insert_nxv1i32_nxv4i32_undef() nounwind {
265 ; CHECK-LABEL: insert_nxv1i32_nxv4i32_undef:
266 ; CHECK: // %bb.0: // %entry
267 ; CHECK-NEXT: mov z0.s, #1 // =0x1
270 %0 = insertelement <vscale x 1 x i32> undef, i32 1, i32 0
271 %subvec = shufflevector <vscale x 1 x i32> %0, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
272 %retval = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> undef, <vscale x 1 x i32> %subvec, i64 0)
273 ret <vscale x 4 x i32> %retval
276 define <vscale x 6 x i16> @insert_nxv1i16_nxv6i16_undef() nounwind {
277 ; CHECK-LABEL: insert_nxv1i16_nxv6i16_undef:
278 ; CHECK: // %bb.0: // %entry
279 ; CHECK-NEXT: mov z0.h, #1 // =0x1
282 %0 = insertelement <vscale x 1 x i16> undef, i16 1, i32 0
283 %subvec = shufflevector <vscale x 1 x i16> %0, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
284 %retval = call <vscale x 6 x i16> @llvm.vector.insert.nxv6i16.nxv1i16(<vscale x 6 x i16> undef, <vscale x 1 x i16> %subvec, i64 0)
285 ret <vscale x 6 x i16> %retval
288 define <vscale x 4 x float> @insert_nxv1f32_nxv4f32_undef(<vscale x 1 x float> %subvec) nounwind {
289 ; CHECK-LABEL: insert_nxv1f32_nxv4f32_undef:
290 ; CHECK: // %bb.0: // %entry
291 ; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
294 %retval = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv1f32(<vscale x 4 x float> undef, <vscale x 1 x float> %subvec, i64 0)
295 ret <vscale x 4 x float> %retval
298 ; This tests promotion of the input operand to INSERT_SUBVECTOR.
299 define <vscale x 8 x i16> @insert_nxv8i16_nxv2i16(<vscale x 8 x i16> %vec, <vscale x 2 x i16> %in) nounwind {
300 ; CHECK-LABEL: insert_nxv8i16_nxv2i16:
302 ; CHECK-NEXT: uunpklo z2.s, z0.h
303 ; CHECK-NEXT: uunpkhi z0.s, z0.h
304 ; CHECK-NEXT: uunpklo z2.d, z2.s
305 ; CHECK-NEXT: uzp1 z1.s, z2.s, z1.s
306 ; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
308 %r = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv2i16(<vscale x 8 x i16> %vec, <vscale x 2 x i16> %in, i64 2)
309 ret <vscale x 8 x i16> %r
312 define <vscale x 4 x half> @insert_nxv4f16_nxv2f16_0(<vscale x 4 x half> %sv0, <vscale x 2 x half> %sv1) nounwind {
313 ; CHECK-LABEL: insert_nxv4f16_nxv2f16_0:
315 ; CHECK-NEXT: uunpkhi z0.d, z0.s
316 ; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s
318 %v0 = call <vscale x 4 x half> @llvm.vector.insert.nxv4f16.nxv2f16(<vscale x 4 x half> %sv0, <vscale x 2 x half> %sv1, i64 0)
319 ret <vscale x 4 x half> %v0
322 define <vscale x 4 x half> @insert_nxv4f16_nxv2f16_2(<vscale x 4 x half> %sv0, <vscale x 2 x half> %sv1) nounwind {
323 ; CHECK-LABEL: insert_nxv4f16_nxv2f16_2:
325 ; CHECK-NEXT: uunpklo z0.d, z0.s
326 ; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
328 %v0 = call <vscale x 4 x half> @llvm.vector.insert.nxv4f16.nxv2f16(<vscale x 4 x half> %sv0, <vscale x 2 x half> %sv1, i64 2)
329 ret <vscale x 4 x half> %v0
332 ; Test that the index is scaled by vscale if the subvector is scalable.
333 define <vscale x 8 x half> @insert_nxv8f16_nxv2f16(<vscale x 8 x half> %vec, <vscale x 2 x half> %in) nounwind {
334 ; CHECK-LABEL: insert_nxv8f16_nxv2f16:
336 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
337 ; CHECK-NEXT: addvl sp, sp, #-1
338 ; CHECK-NEXT: ptrue p0.h
339 ; CHECK-NEXT: ptrue p1.d
340 ; CHECK-NEXT: st1h { z0.h }, p0, [sp]
341 ; CHECK-NEXT: st1h { z1.d }, p1, [sp, #1, mul vl]
342 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [sp]
343 ; CHECK-NEXT: addvl sp, sp, #1
344 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
346 %r = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.nxv2f16(<vscale x 8 x half> %vec, <vscale x 2 x half> %in, i64 2)
347 ret <vscale x 8 x half> %r
350 define <vscale x 8 x half> @insert_nxv8f16_nxv4f16_0(<vscale x 8 x half> %sv0, <vscale x 4 x half> %sv1) nounwind {
351 ; CHECK-LABEL: insert_nxv8f16_nxv4f16_0:
353 ; CHECK-NEXT: uunpkhi z0.s, z0.h
354 ; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
356 %v0 = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.nxv4f16(<vscale x 8 x half> %sv0, <vscale x 4 x half> %sv1, i64 0)
357 ret <vscale x 8 x half> %v0
360 define <vscale x 8 x half> @insert_nxv8f16_nxv4f16_4(<vscale x 8 x half> %sv0, <vscale x 4 x half> %sv1) nounwind {
361 ; CHECK-LABEL: insert_nxv8f16_nxv4f16_4:
363 ; CHECK-NEXT: uunpklo z0.s, z0.h
364 ; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h
366 %v0 = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.nxv4f16(<vscale x 8 x half> %sv0, <vscale x 4 x half> %sv1, i64 4)
367 ret <vscale x 8 x half> %v0
370 ; Fixed length clamping
372 define <vscale x 2 x i64> @insert_fixed_v2i64_nxv2i64(<vscale x 2 x i64> %vec, <2 x i64> %subvec) nounwind #0 {
373 ; CHECK-LABEL: insert_fixed_v2i64_nxv2i64:
375 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
376 ; CHECK-NEXT: addvl sp, sp, #-1
377 ; CHECK-NEXT: ptrue p0.d
378 ; CHECK-NEXT: st1d { z0.d }, p0, [sp]
379 ; CHECK-NEXT: str q1, [sp, #16]
380 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp]
381 ; CHECK-NEXT: addvl sp, sp, #1
382 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
384 %retval = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> %vec, <2 x i64> %subvec, i64 2)
385 ret <vscale x 2 x i64> %retval
388 define <vscale x 2 x i64> @insert_fixed_v4i64_nxv2i64(<vscale x 2 x i64> %vec, <4 x i64>* %ptr) nounwind #0 {
389 ; CHECK-LABEL: insert_fixed_v4i64_nxv2i64:
391 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
392 ; CHECK-NEXT: addvl sp, sp, #-1
393 ; CHECK-NEXT: ptrue p0.d
394 ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0]
395 ; CHECK-NEXT: st1d { z0.d }, p0, [sp]
396 ; CHECK-NEXT: st1d { z1.d }, p0, [sp]
397 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp]
398 ; CHECK-NEXT: addvl sp, sp, #1
399 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
401 %subvec = load <4 x i64>, <4 x i64>* %ptr
402 %retval = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> %vec, <4 x i64> %subvec, i64 4)
403 ret <vscale x 2 x i64> %retval
406 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
407 ;; Upacked types that need result widening
408 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
410 define <vscale x 3 x i32> @insert_nxv3i32_nxv2i32(<vscale x 2 x i32> %sv0) {
411 ; CHECK-LABEL: insert_nxv3i32_nxv2i32:
413 ; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
415 %v0 = call <vscale x 3 x i32> @llvm.vector.insert.nxv3i32.nxv2i32(<vscale x 3 x i32> undef, <vscale x 2 x i32> %sv0, i64 0)
416 ret <vscale x 3 x i32> %v0
419 ;; Check that the Subvector is not widen so it does not crash.
420 define <vscale x 3 x i32> @insert_nxv3i32_nxv2i32_2(<vscale x 3 x i32> %sv0, <vscale x 2 x i32> %sv1) {
421 ; CHECK-LABEL: insert_nxv3i32_nxv2i32_2:
423 ; CHECK-NEXT: uunpkhi z0.d, z0.s
424 ; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s
426 %v0 = call <vscale x 3 x i32> @llvm.vector.insert.nxv3i32.nxv2i32(<vscale x 3 x i32> %sv0, <vscale x 2 x i32> %sv1, i64 0)
427 ret <vscale x 3 x i32> %v0
430 define <vscale x 3 x float> @insert_nxv3f32_nxv2f32(<vscale x 2 x float> %sv0) nounwind {
431 ; CHECK-LABEL: insert_nxv3f32_nxv2f32:
433 ; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
435 %v0 = call <vscale x 3 x float> @llvm.vector.insert.nxv3f32.nxv2f32(<vscale x 3 x float> undef, <vscale x 2 x float> %sv0, i64 0)
436 ret <vscale x 3 x float> %v0
439 define <vscale x 4 x float> @insert_nxv4f32_nxv2f32_0(<vscale x 4 x float> %sv0, <vscale x 2 x float> %sv1) nounwind {
440 ; CHECK-LABEL: insert_nxv4f32_nxv2f32_0:
442 ; CHECK-NEXT: uunpkhi z0.d, z0.s
443 ; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s
445 %v0 = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> %sv0, <vscale x 2 x float> %sv1, i64 0)
446 ret <vscale x 4 x float> %v0
449 define <vscale x 4 x float> @insert_nxv4f32_nxv2f32_2(<vscale x 4 x float> %sv0, <vscale x 2 x float> %sv1) nounwind {
450 ; CHECK-LABEL: insert_nxv4f32_nxv2f32_2:
452 ; CHECK-NEXT: uunpklo z0.d, z0.s
453 ; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
455 %v0 = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> %sv0, <vscale x 2 x float> %sv1, i64 2)
456 ret <vscale x 4 x float> %v0
459 define <vscale x 6 x i32> @insert_nxv6i32_nxv2i32(<vscale x 2 x i32> %sv0, <vscale x 2 x i32> %sv1) nounwind {
460 ; CHECK-LABEL: insert_nxv6i32_nxv2i32:
462 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
463 ; CHECK-NEXT: addvl sp, sp, #-2
464 ; CHECK-NEXT: ptrue p0.s
465 ; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
466 ; CHECK-NEXT: st1w { z0.s }, p0, [sp]
467 ; CHECK-NEXT: ld1w { z1.s }, p0/z, [sp, #1, mul vl]
468 ; CHECK-NEXT: addvl sp, sp, #2
469 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
471 %v0 = call <vscale x 6 x i32> @llvm.vector.insert.nxv6i32.nxv2i32(<vscale x 6 x i32> undef, <vscale x 2 x i32> %sv0, i64 0)
472 %v1 = call <vscale x 6 x i32> @llvm.vector.insert.nxv6i32.nxv2i32(<vscale x 6 x i32> %v0, <vscale x 2 x i32> %sv1, i64 2)
473 ret <vscale x 6 x i32> %v1
476 ;; This only works because the input vector is undef and index is zero
477 define <vscale x 6 x i32> @insert_nxv6i32_nxv3i32(<vscale x 3 x i32> %sv0) {
478 ; CHECK-LABEL: insert_nxv6i32_nxv3i32:
481 %v0 = call <vscale x 6 x i32> @llvm.vector.insert.nxv6i32.nxv3i32(<vscale x 6 x i32> undef, <vscale x 3 x i32> %sv0, i64 0)
482 ret <vscale x 6 x i32> %v0
485 define <vscale x 12 x i32> @insert_nxv12i32_nxv4i32(<vscale x 4 x i32> %sv0, <vscale x 4 x i32> %sv1, <vscale x 4 x i32> %sv2) {
486 ; CHECK-LABEL: insert_nxv12i32_nxv4i32:
489 %v0 = call <vscale x 12 x i32> @llvm.vector.insert.nxv4i32.nxv12i32(<vscale x 12 x i32> undef, <vscale x 4 x i32> %sv0, i64 0)
490 %v1 = call <vscale x 12 x i32> @llvm.vector.insert.nxv4i32.nxv12i32(<vscale x 12 x i32> %v0, <vscale x 4 x i32> %sv1, i64 4)
491 %v2 = call <vscale x 12 x i32> @llvm.vector.insert.nxv4i32.nxv12i32(<vscale x 12 x i32> %v1, <vscale x 4 x i32> %sv2, i64 8)
492 ret <vscale x 12 x i32> %v2
495 define <vscale x 2 x bfloat> @insert_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %sv0, <vscale x 2 x bfloat> %sv1) nounwind {
496 ; CHECK-LABEL: insert_nxv2bf16_nxv2bf16:
498 ; CHECK-NEXT: mov z0.d, z1.d
500 %v0 = call <vscale x 2 x bfloat> @llvm.vector.insert.nxv2bf16.nxv2bf16(<vscale x 2 x bfloat> %sv0, <vscale x 2 x bfloat> %sv1, i64 0)
501 ret <vscale x 2 x bfloat> %v0
504 define <vscale x 4 x bfloat> @insert_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %sv0, <vscale x 4 x bfloat> %sv1) nounwind {
505 ; CHECK-LABEL: insert_nxv4bf16_nxv4bf16:
507 ; CHECK-NEXT: mov z0.d, z1.d
509 %v0 = call <vscale x 4 x bfloat> @llvm.vector.insert.nxv4bf16.nxv4bf16(<vscale x 4 x bfloat> %sv0, <vscale x 4 x bfloat> %sv1, i64 0)
510 ret <vscale x 4 x bfloat> %v0
513 define <vscale x 4 x bfloat> @insert_nxv4bf16_v4bf16(<vscale x 4 x bfloat> %sv0, <4 x bfloat> %v1) nounwind {
514 ; CHECK-LABEL: insert_nxv4bf16_v4bf16:
516 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
517 ; CHECK-NEXT: addvl sp, sp, #-1
518 ; CHECK-NEXT: ptrue p0.s
519 ; CHECK-NEXT: addpl x8, sp, #4
520 ; CHECK-NEXT: st1h { z0.s }, p0, [sp, #1, mul vl]
521 ; CHECK-NEXT: str d1, [x8]
522 ; CHECK-NEXT: ld1h { z0.s }, p0/z, [sp, #1, mul vl]
523 ; CHECK-NEXT: addvl sp, sp, #1
524 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
526 %v0 = call <vscale x 4 x bfloat> @llvm.vector.insert.nxv4bf16.v4bf16(<vscale x 4 x bfloat> %sv0, <4 x bfloat> %v1, i64 0)
527 ret <vscale x 4 x bfloat> %v0
530 define <vscale x 8 x bfloat> @insert_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %sv0, <vscale x 8 x bfloat> %sv1) nounwind {
531 ; CHECK-LABEL: insert_nxv8bf16_nxv8bf16:
533 ; CHECK-NEXT: mov z0.d, z1.d
535 %v0 = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.nxv8bf16(<vscale x 8 x bfloat> %sv0, <vscale x 8 x bfloat> %sv1, i64 0)
536 ret <vscale x 8 x bfloat> %v0
539 define <vscale x 8 x bfloat> @insert_nxv8bf16_v8bf16(<vscale x 8 x bfloat> %sv0, <8 x bfloat> %v1) nounwind {
540 ; CHECK-LABEL: insert_nxv8bf16_v8bf16:
542 ; CHECK-NEXT: ptrue p0.h, vl8
543 ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
544 ; CHECK-NEXT: mov z0.h, p0/m, z1.h
546 %v0 = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> %sv0, <8 x bfloat> %v1, i64 0)
547 ret <vscale x 8 x bfloat> %v0
550 define <vscale x 8 x bfloat> @insert_nxv8bf16_nxv4bf16_0(<vscale x 8 x bfloat> %sv0, <vscale x 4 x bfloat> %sv1) nounwind {
551 ; CHECK-LABEL: insert_nxv8bf16_nxv4bf16_0:
553 ; CHECK-NEXT: uunpkhi z0.s, z0.h
554 ; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
556 %v0 = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.nxv4bf16(<vscale x 8 x bfloat> %sv0, <vscale x 4 x bfloat> %sv1, i64 0)
557 ret <vscale x 8 x bfloat> %v0
560 define <vscale x 8 x bfloat> @insert_nxv8bf16_nxv4bf16_4(<vscale x 8 x bfloat> %sv0, <vscale x 4 x bfloat> %sv1) nounwind {
561 ; CHECK-LABEL: insert_nxv8bf16_nxv4bf16_4:
563 ; CHECK-NEXT: uunpklo z0.s, z0.h
564 ; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h
566 %v0 = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.nxv4bf16(<vscale x 8 x bfloat> %sv0, <vscale x 4 x bfloat> %sv1, i64 4)
567 ret <vscale x 8 x bfloat> %v0
570 define <vscale x 4 x bfloat> @insert_nxv4bf16_nxv2bf16_0(<vscale x 4 x bfloat> %sv0, <vscale x 2 x bfloat> %sv1) nounwind {
571 ; CHECK-LABEL: insert_nxv4bf16_nxv2bf16_0:
573 ; CHECK-NEXT: uunpkhi z0.d, z0.s
574 ; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s
576 %v0 = call <vscale x 4 x bfloat> @llvm.vector.insert.nxv4bf16.nxv2bf16(<vscale x 4 x bfloat> %sv0, <vscale x 2 x bfloat> %sv1, i64 0)
577 ret <vscale x 4 x bfloat> %v0
580 define <vscale x 4 x bfloat> @insert_nxv4bf16_nxv2bf16_2(<vscale x 4 x bfloat> %sv0, <vscale x 2 x bfloat> %sv1) nounwind {
581 ; CHECK-LABEL: insert_nxv4bf16_nxv2bf16_2:
583 ; CHECK-NEXT: uunpklo z0.d, z0.s
584 ; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
586 %v0 = call <vscale x 4 x bfloat> @llvm.vector.insert.nxv4bf16.nxv2bf16(<vscale x 4 x bfloat> %sv0, <vscale x 2 x bfloat> %sv1, i64 2)
587 ret <vscale x 4 x bfloat> %v0
590 ; Test predicate inserts of half size.
591 define <vscale x 16 x i1> @insert_nxv16i1_nxv8i1_0(<vscale x 16 x i1> %vec, <vscale x 8 x i1> %sv) {
592 ; CHECK-LABEL: insert_nxv16i1_nxv8i1_0:
594 ; CHECK-NEXT: punpkhi p0.h, p0.b
595 ; CHECK-NEXT: uzp1 p0.b, p1.b, p0.b
597 %v0 = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv8i1(<vscale x 16 x i1> %vec, <vscale x 8 x i1> %sv, i64 0)
598 ret <vscale x 16 x i1> %v0
601 define <vscale x 16 x i1> @insert_nxv16i1_nxv8i1_8(<vscale x 16 x i1> %vec, <vscale x 8 x i1> %sv) {
602 ; CHECK-LABEL: insert_nxv16i1_nxv8i1_8:
604 ; CHECK-NEXT: punpklo p0.h, p0.b
605 ; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b
607 %v0 = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv8i1(<vscale x 16 x i1> %vec, <vscale x 8 x i1> %sv, i64 8)
608 ret <vscale x 16 x i1> %v0
611 ; Test predicate inserts of less than half the size.
612 define <vscale x 16 x i1> @insert_nxv16i1_nxv4i1_0(<vscale x 16 x i1> %vec, <vscale x 4 x i1> %sv) {
613 ; CHECK-LABEL: insert_nxv16i1_nxv4i1_0:
615 ; CHECK-NEXT: punpklo p2.h, p0.b
616 ; CHECK-NEXT: punpkhi p0.h, p0.b
617 ; CHECK-NEXT: punpkhi p2.h, p2.b
618 ; CHECK-NEXT: uzp1 p1.h, p1.h, p2.h
619 ; CHECK-NEXT: uzp1 p0.b, p1.b, p0.b
621 %v0 = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> %vec, <vscale x 4 x i1> %sv, i64 0)
622 ret <vscale x 16 x i1> %v0
625 define <vscale x 16 x i1> @insert_nxv16i1_nxv4i1_12(<vscale x 16 x i1> %vec, <vscale x 4 x i1> %sv) {
626 ; CHECK-LABEL: insert_nxv16i1_nxv4i1_12:
628 ; CHECK-NEXT: punpkhi p2.h, p0.b
629 ; CHECK-NEXT: punpklo p0.h, p0.b
630 ; CHECK-NEXT: punpklo p2.h, p2.b
631 ; CHECK-NEXT: uzp1 p1.h, p2.h, p1.h
632 ; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b
634 %v0 = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> %vec, <vscale x 4 x i1> %sv, i64 12)
635 ret <vscale x 16 x i1> %v0
638 ; Test predicate insert into undef/zero
639 define <vscale x 16 x i1> @insert_nxv16i1_nxv4i1_into_zero(<vscale x 4 x i1> %sv) {
640 ; CHECK-LABEL: insert_nxv16i1_nxv4i1_into_zero:
642 ; CHECK-NEXT: pfalse p1.b
643 ; CHECK-NEXT: uzp1 p0.h, p0.h, p1.h
644 ; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b
646 %v0 = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> zeroinitializer, <vscale x 4 x i1> %sv, i64 0)
647 ret <vscale x 16 x i1> %v0
650 define <vscale x 16 x i1> @insert_nxv16i1_nxv4i1_into_poison(<vscale x 4 x i1> %sv) {
651 ; CHECK-LABEL: insert_nxv16i1_nxv4i1_into_poison:
653 ; CHECK-NEXT: uzp1 p0.h, p0.h, p0.h
654 ; CHECK-NEXT: uzp1 p0.b, p0.b, p0.b
656 %v0 = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> poison, <vscale x 4 x i1> %sv, i64 0)
657 ret <vscale x 16 x i1> %v0
660 ; Test constant predicate insert into undef
661 define <vscale x 2 x i1> @insert_nxv2i1_v8i1_const_true_into_undef() vscale_range(4,8) {
662 ; CHECK-LABEL: insert_nxv2i1_v8i1_const_true_into_undef:
664 ; CHECK-NEXT: ptrue p0.d
666 %v0 = call <vscale x 2 x i1> @llvm.vector.insert.nxv2i1.v8i1 (<vscale x 2 x i1> undef, <8 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, i64 0)
667 ret <vscale x 2 x i1> %v0
670 define <vscale x 4 x i1> @insert_nxv4i1_v16i1_const_true_into_undef() vscale_range(4,8) {
671 ; CHECK-LABEL: insert_nxv4i1_v16i1_const_true_into_undef:
673 ; CHECK-NEXT: ptrue p0.s
675 %v0 = call <vscale x 4 x i1> @llvm.vector.insert.nxv4i1.v16i1 (<vscale x 4 x i1> undef, <16 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, i64 0)
676 ret <vscale x 4 x i1> %v0
679 define <vscale x 8 x i1> @insert_nxv8i1_v32i1_const_true_into_undef() vscale_range(4,8) {
680 ; CHECK-LABEL: insert_nxv8i1_v32i1_const_true_into_undef:
682 ; CHECK-NEXT: ptrue p0.h
684 %v0 = call <vscale x 8 x i1> @llvm.vector.insert.nxv8i1.v32i1 (<vscale x 8 x i1> undef, <32 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, i64 0)
685 ret <vscale x 8 x i1> %v0
688 define <vscale x 16 x i1> @insert_nxv16i1_v64i1_const_true_into_undef() vscale_range(4,8) {
689 ; CHECK-LABEL: insert_nxv16i1_v64i1_const_true_into_undef:
691 ; CHECK-NEXT: ptrue p0.b
693 %v0 = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.v64i1 (<vscale x 16 x i1> undef, <64 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, i64 0)
694 ret <vscale x 16 x i1> %v0
698 ; Insert nxv1i1 type into: nxv2i1
701 define <vscale x 2 x i1> @insert_nxv1i1_nxv2i1_0(<vscale x 2 x i1> %vec, <vscale x 1 x i1> %sv) {
702 ; CHECK-LABEL: insert_nxv1i1_nxv2i1_0:
704 ; CHECK-NEXT: punpkhi p0.h, p0.b
705 ; CHECK-NEXT: uzp1 p0.d, p1.d, p0.d
707 %res = call <vscale x 2 x i1> @llvm.vector.insert.nxv2i1.nxv1i1(<vscale x 2 x i1> %vec, <vscale x 1 x i1> %sv, i64 0)
708 ret <vscale x 2 x i1> %res
711 define <vscale x 2 x i1> @insert_nxv1i1_nxv2i1_1(<vscale x 2 x i1> %vec, <vscale x 1 x i1> %sv) {
712 ; CHECK-LABEL: insert_nxv1i1_nxv2i1_1:
714 ; CHECK-NEXT: punpklo p0.h, p0.b
715 ; CHECK-NEXT: uzp1 p0.d, p0.d, p1.d
717 %res = call <vscale x 2 x i1> @llvm.vector.insert.nxv2i1.nxv1i1(<vscale x 2 x i1> %vec, <vscale x 1 x i1> %sv, i64 1)
718 ret <vscale x 2 x i1> %res
722 ; Insert nxv1i1 type into: nxv4i1
725 define <vscale x 4 x i1> @insert_nxv1i1_nxv4i1_0(<vscale x 4 x i1> %vec, <vscale x 1 x i1> %sv) {
726 ; CHECK-LABEL: insert_nxv1i1_nxv4i1_0:
728 ; CHECK-NEXT: punpklo p2.h, p0.b
729 ; CHECK-NEXT: punpkhi p0.h, p0.b
730 ; CHECK-NEXT: punpkhi p2.h, p2.b
731 ; CHECK-NEXT: uzp1 p1.d, p1.d, p2.d
732 ; CHECK-NEXT: uzp1 p0.s, p1.s, p0.s
734 %res = call <vscale x 4 x i1> @llvm.vector.insert.nxv4i1.nxv1i1(<vscale x 4 x i1> %vec, <vscale x 1 x i1> %sv, i64 0)
735 ret <vscale x 4 x i1> %res
738 define <vscale x 4 x i1> @insert_nxv1i1_nxv4i1_1(<vscale x 4 x i1> %vec, <vscale x 1 x i1> %sv) {
739 ; CHECK-LABEL: insert_nxv1i1_nxv4i1_1:
741 ; CHECK-NEXT: punpklo p2.h, p0.b
742 ; CHECK-NEXT: punpkhi p0.h, p0.b
743 ; CHECK-NEXT: punpklo p2.h, p2.b
744 ; CHECK-NEXT: uzp1 p1.d, p2.d, p1.d
745 ; CHECK-NEXT: uzp1 p0.s, p1.s, p0.s
747 %res = call <vscale x 4 x i1> @llvm.vector.insert.nxv4i1.nxv1i1(<vscale x 4 x i1> %vec, <vscale x 1 x i1> %sv, i64 1)
748 ret <vscale x 4 x i1> %res
751 define <vscale x 4 x i1> @insert_nxv1i1_nxv4i1_2(<vscale x 4 x i1> %vec, <vscale x 1 x i1> %sv) {
752 ; CHECK-LABEL: insert_nxv1i1_nxv4i1_2:
754 ; CHECK-NEXT: punpkhi p2.h, p0.b
755 ; CHECK-NEXT: punpklo p0.h, p0.b
756 ; CHECK-NEXT: punpkhi p2.h, p2.b
757 ; CHECK-NEXT: uzp1 p1.d, p1.d, p2.d
758 ; CHECK-NEXT: uzp1 p0.s, p0.s, p1.s
760 %res = call <vscale x 4 x i1> @llvm.vector.insert.nxv4i1.nxv1i1(<vscale x 4 x i1> %vec, <vscale x 1 x i1> %sv, i64 2)
761 ret <vscale x 4 x i1> %res
764 define <vscale x 4 x i1> @insert_nxv1i1_nxv4i1_3(<vscale x 4 x i1> %vec, <vscale x 1 x i1> %sv) {
765 ; CHECK-LABEL: insert_nxv1i1_nxv4i1_3:
767 ; CHECK-NEXT: punpkhi p2.h, p0.b
768 ; CHECK-NEXT: punpklo p0.h, p0.b
769 ; CHECK-NEXT: punpklo p2.h, p2.b
770 ; CHECK-NEXT: uzp1 p1.d, p2.d, p1.d
771 ; CHECK-NEXT: uzp1 p0.s, p0.s, p1.s
773 %res = call <vscale x 4 x i1> @llvm.vector.insert.nxv4i1.nxv1i1(<vscale x 4 x i1> %vec, <vscale x 1 x i1> %sv, i64 3)
774 ret <vscale x 4 x i1> %res
778 ; Insert nxv1i1 type into: nxv8i1
781 define <vscale x 8 x i1> @insert_nxv1i1_nxv8i1_0(<vscale x 8 x i1> %vec, <vscale x 1 x i1> %sv) {
782 ; CHECK-LABEL: insert_nxv1i1_nxv8i1_0:
784 ; CHECK-NEXT: punpklo p2.h, p0.b
785 ; CHECK-NEXT: punpkhi p0.h, p0.b
786 ; CHECK-NEXT: punpklo p3.h, p2.b
787 ; CHECK-NEXT: punpkhi p2.h, p2.b
788 ; CHECK-NEXT: punpkhi p3.h, p3.b
789 ; CHECK-NEXT: uzp1 p1.d, p1.d, p3.d
790 ; CHECK-NEXT: uzp1 p1.s, p1.s, p2.s
791 ; CHECK-NEXT: uzp1 p0.h, p1.h, p0.h
793 %res = call <vscale x 8 x i1> @llvm.vector.insert.nxv8i1.nxv1i1(<vscale x 8 x i1> %vec, <vscale x 1 x i1> %sv, i64 0)
794 ret <vscale x 8 x i1> %res
797 define <vscale x 8 x i1> @insert_nxv1i1_nxv8i1_1(<vscale x 8 x i1> %vec, <vscale x 1 x i1> %sv) {
798 ; CHECK-LABEL: insert_nxv1i1_nxv8i1_1:
800 ; CHECK-NEXT: punpklo p2.h, p0.b
801 ; CHECK-NEXT: punpkhi p0.h, p0.b
802 ; CHECK-NEXT: punpklo p3.h, p2.b
803 ; CHECK-NEXT: punpkhi p2.h, p2.b
804 ; CHECK-NEXT: punpklo p3.h, p3.b
805 ; CHECK-NEXT: uzp1 p1.d, p3.d, p1.d
806 ; CHECK-NEXT: uzp1 p1.s, p1.s, p2.s
807 ; CHECK-NEXT: uzp1 p0.h, p1.h, p0.h
809 %res = call <vscale x 8 x i1> @llvm.vector.insert.nxv8i1.nxv1i1(<vscale x 8 x i1> %vec, <vscale x 1 x i1> %sv, i64 1)
810 ret <vscale x 8 x i1> %res
813 define <vscale x 8 x i1> @insert_nxv1i1_nxv8i1_2(<vscale x 8 x i1> %vec, <vscale x 1 x i1> %sv) {
814 ; CHECK-LABEL: insert_nxv1i1_nxv8i1_2:
816 ; CHECK-NEXT: punpklo p2.h, p0.b
817 ; CHECK-NEXT: punpkhi p0.h, p0.b
818 ; CHECK-NEXT: punpkhi p3.h, p2.b
819 ; CHECK-NEXT: punpklo p2.h, p2.b
820 ; CHECK-NEXT: punpkhi p3.h, p3.b
821 ; CHECK-NEXT: uzp1 p1.d, p1.d, p3.d
822 ; CHECK-NEXT: uzp1 p1.s, p2.s, p1.s
823 ; CHECK-NEXT: uzp1 p0.h, p1.h, p0.h
825 %res = call <vscale x 8 x i1> @llvm.vector.insert.nxv8i1.nxv1i1(<vscale x 8 x i1> %vec, <vscale x 1 x i1> %sv, i64 2)
826 ret <vscale x 8 x i1> %res
829 define <vscale x 8 x i1> @insert_nxv1i1_nxv8i1_3(<vscale x 8 x i1> %vec, <vscale x 1 x i1> %sv) {
830 ; CHECK-LABEL: insert_nxv1i1_nxv8i1_3:
832 ; CHECK-NEXT: punpklo p2.h, p0.b
833 ; CHECK-NEXT: punpkhi p0.h, p0.b
834 ; CHECK-NEXT: punpkhi p3.h, p2.b
835 ; CHECK-NEXT: punpklo p2.h, p2.b
836 ; CHECK-NEXT: punpklo p3.h, p3.b
837 ; CHECK-NEXT: uzp1 p1.d, p3.d, p1.d
838 ; CHECK-NEXT: uzp1 p1.s, p2.s, p1.s
839 ; CHECK-NEXT: uzp1 p0.h, p1.h, p0.h
841 %res = call <vscale x 8 x i1> @llvm.vector.insert.nxv8i1.nxv1i1(<vscale x 8 x i1> %vec, <vscale x 1 x i1> %sv, i64 3)
842 ret <vscale x 8 x i1> %res
845 define <vscale x 8 x i1> @insert_nxv1i1_nxv8i1_4(<vscale x 8 x i1> %vec, <vscale x 1 x i1> %sv) {
846 ; CHECK-LABEL: insert_nxv1i1_nxv8i1_4:
848 ; CHECK-NEXT: punpkhi p2.h, p0.b
849 ; CHECK-NEXT: punpklo p0.h, p0.b
850 ; CHECK-NEXT: punpklo p3.h, p2.b
851 ; CHECK-NEXT: punpkhi p2.h, p2.b
852 ; CHECK-NEXT: punpkhi p3.h, p3.b
853 ; CHECK-NEXT: uzp1 p1.d, p1.d, p3.d
854 ; CHECK-NEXT: uzp1 p1.s, p1.s, p2.s
855 ; CHECK-NEXT: uzp1 p0.h, p0.h, p1.h
857 %res = call <vscale x 8 x i1> @llvm.vector.insert.nxv8i1.nxv1i1(<vscale x 8 x i1> %vec, <vscale x 1 x i1> %sv, i64 4)
858 ret <vscale x 8 x i1> %res
861 define <vscale x 8 x i1> @insert_nxv1i1_nxv8i1_5(<vscale x 8 x i1> %vec, <vscale x 1 x i1> %sv) {
862 ; CHECK-LABEL: insert_nxv1i1_nxv8i1_5:
864 ; CHECK-NEXT: punpkhi p2.h, p0.b
865 ; CHECK-NEXT: punpklo p0.h, p0.b
866 ; CHECK-NEXT: punpklo p3.h, p2.b
867 ; CHECK-NEXT: punpkhi p2.h, p2.b
868 ; CHECK-NEXT: punpklo p3.h, p3.b
869 ; CHECK-NEXT: uzp1 p1.d, p3.d, p1.d
870 ; CHECK-NEXT: uzp1 p1.s, p1.s, p2.s
871 ; CHECK-NEXT: uzp1 p0.h, p0.h, p1.h
873 %res = call <vscale x 8 x i1> @llvm.vector.insert.nxv8i1.nxv1i1(<vscale x 8 x i1> %vec, <vscale x 1 x i1> %sv, i64 5)
874 ret <vscale x 8 x i1> %res
877 define <vscale x 8 x i1> @insert_nxv1i1_nxv8i1_6(<vscale x 8 x i1> %vec, <vscale x 1 x i1> %sv) {
878 ; CHECK-LABEL: insert_nxv1i1_nxv8i1_6:
880 ; CHECK-NEXT: punpkhi p2.h, p0.b
881 ; CHECK-NEXT: punpklo p0.h, p0.b
882 ; CHECK-NEXT: punpkhi p3.h, p2.b
883 ; CHECK-NEXT: punpklo p2.h, p2.b
884 ; CHECK-NEXT: punpkhi p3.h, p3.b
885 ; CHECK-NEXT: uzp1 p1.d, p1.d, p3.d
886 ; CHECK-NEXT: uzp1 p1.s, p2.s, p1.s
887 ; CHECK-NEXT: uzp1 p0.h, p0.h, p1.h
889 %res = call <vscale x 8 x i1> @llvm.vector.insert.nxv8i1.nxv1i1(<vscale x 8 x i1> %vec, <vscale x 1 x i1> %sv, i64 6)
890 ret <vscale x 8 x i1> %res
893 define <vscale x 8 x i1> @insert_nxv1i1_nxv8i1_7(<vscale x 8 x i1> %vec, <vscale x 1 x i1> %sv) {
894 ; CHECK-LABEL: insert_nxv1i1_nxv8i1_7:
896 ; CHECK-NEXT: punpkhi p2.h, p0.b
897 ; CHECK-NEXT: punpklo p0.h, p0.b
898 ; CHECK-NEXT: punpkhi p3.h, p2.b
899 ; CHECK-NEXT: punpklo p2.h, p2.b
900 ; CHECK-NEXT: punpklo p3.h, p3.b
901 ; CHECK-NEXT: uzp1 p1.d, p3.d, p1.d
902 ; CHECK-NEXT: uzp1 p1.s, p2.s, p1.s
903 ; CHECK-NEXT: uzp1 p0.h, p0.h, p1.h
905 %res = call <vscale x 8 x i1> @llvm.vector.insert.nxv8i1.nxv1i1(<vscale x 8 x i1> %vec, <vscale x 1 x i1> %sv, i64 7)
906 ret <vscale x 8 x i1> %res
910 ; Insert nxv1i1 type into: nxv16i1
913 define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_0(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv) {
914 ; CHECK-LABEL: insert_nxv1i1_nxv16i1_0:
916 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
917 ; CHECK-NEXT: addvl sp, sp, #-1
918 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
919 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
920 ; CHECK-NEXT: .cfi_offset w29, -16
921 ; CHECK-NEXT: punpklo p2.h, p0.b
922 ; CHECK-NEXT: punpkhi p0.h, p0.b
923 ; CHECK-NEXT: punpklo p3.h, p2.b
924 ; CHECK-NEXT: punpkhi p2.h, p2.b
925 ; CHECK-NEXT: punpklo p4.h, p3.b
926 ; CHECK-NEXT: punpkhi p3.h, p3.b
927 ; CHECK-NEXT: punpkhi p4.h, p4.b
928 ; CHECK-NEXT: uzp1 p1.d, p1.d, p4.d
929 ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
930 ; CHECK-NEXT: uzp1 p1.s, p1.s, p3.s
931 ; CHECK-NEXT: uzp1 p1.h, p1.h, p2.h
932 ; CHECK-NEXT: uzp1 p0.b, p1.b, p0.b
933 ; CHECK-NEXT: addvl sp, sp, #1
934 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
936 %res = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv1i1(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv, i64 0)
937 ret <vscale x 16 x i1> %res
940 define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_1(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv) {
941 ; CHECK-LABEL: insert_nxv1i1_nxv16i1_1:
943 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
944 ; CHECK-NEXT: addvl sp, sp, #-1
945 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
946 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
947 ; CHECK-NEXT: .cfi_offset w29, -16
948 ; CHECK-NEXT: punpklo p2.h, p0.b
949 ; CHECK-NEXT: punpkhi p0.h, p0.b
950 ; CHECK-NEXT: punpklo p3.h, p2.b
951 ; CHECK-NEXT: punpkhi p2.h, p2.b
952 ; CHECK-NEXT: punpklo p4.h, p3.b
953 ; CHECK-NEXT: punpkhi p3.h, p3.b
954 ; CHECK-NEXT: punpklo p4.h, p4.b
955 ; CHECK-NEXT: uzp1 p1.d, p4.d, p1.d
956 ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
957 ; CHECK-NEXT: uzp1 p1.s, p1.s, p3.s
958 ; CHECK-NEXT: uzp1 p1.h, p1.h, p2.h
959 ; CHECK-NEXT: uzp1 p0.b, p1.b, p0.b
960 ; CHECK-NEXT: addvl sp, sp, #1
961 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
963 %res = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv1i1(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv, i64 1)
964 ret <vscale x 16 x i1> %res
967 define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_2(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv) {
968 ; CHECK-LABEL: insert_nxv1i1_nxv16i1_2:
970 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
971 ; CHECK-NEXT: addvl sp, sp, #-1
972 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
973 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
974 ; CHECK-NEXT: .cfi_offset w29, -16
975 ; CHECK-NEXT: punpklo p2.h, p0.b
976 ; CHECK-NEXT: punpkhi p0.h, p0.b
977 ; CHECK-NEXT: punpklo p3.h, p2.b
978 ; CHECK-NEXT: punpkhi p2.h, p2.b
979 ; CHECK-NEXT: punpkhi p4.h, p3.b
980 ; CHECK-NEXT: punpklo p3.h, p3.b
981 ; CHECK-NEXT: punpkhi p4.h, p4.b
982 ; CHECK-NEXT: uzp1 p1.d, p1.d, p4.d
983 ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
984 ; CHECK-NEXT: uzp1 p1.s, p3.s, p1.s
985 ; CHECK-NEXT: uzp1 p1.h, p1.h, p2.h
986 ; CHECK-NEXT: uzp1 p0.b, p1.b, p0.b
987 ; CHECK-NEXT: addvl sp, sp, #1
988 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
990 %res = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv1i1(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv, i64 2)
991 ret <vscale x 16 x i1> %res
994 define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_3(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv) {
995 ; CHECK-LABEL: insert_nxv1i1_nxv16i1_3:
997 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
998 ; CHECK-NEXT: addvl sp, sp, #-1
999 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
1000 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
1001 ; CHECK-NEXT: .cfi_offset w29, -16
1002 ; CHECK-NEXT: punpklo p2.h, p0.b
1003 ; CHECK-NEXT: punpkhi p0.h, p0.b
1004 ; CHECK-NEXT: punpklo p3.h, p2.b
1005 ; CHECK-NEXT: punpkhi p2.h, p2.b
1006 ; CHECK-NEXT: punpkhi p4.h, p3.b
1007 ; CHECK-NEXT: punpklo p3.h, p3.b
1008 ; CHECK-NEXT: punpklo p4.h, p4.b
1009 ; CHECK-NEXT: uzp1 p1.d, p4.d, p1.d
1010 ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
1011 ; CHECK-NEXT: uzp1 p1.s, p3.s, p1.s
1012 ; CHECK-NEXT: uzp1 p1.h, p1.h, p2.h
1013 ; CHECK-NEXT: uzp1 p0.b, p1.b, p0.b
1014 ; CHECK-NEXT: addvl sp, sp, #1
1015 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1017 %res = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv1i1(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv, i64 3)
1018 ret <vscale x 16 x i1> %res
1021 define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_4(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv) {
1022 ; CHECK-LABEL: insert_nxv1i1_nxv16i1_4:
1024 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1025 ; CHECK-NEXT: addvl sp, sp, #-1
1026 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
1027 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
1028 ; CHECK-NEXT: .cfi_offset w29, -16
1029 ; CHECK-NEXT: punpklo p2.h, p0.b
1030 ; CHECK-NEXT: punpkhi p0.h, p0.b
1031 ; CHECK-NEXT: punpkhi p3.h, p2.b
1032 ; CHECK-NEXT: punpklo p2.h, p2.b
1033 ; CHECK-NEXT: punpklo p4.h, p3.b
1034 ; CHECK-NEXT: punpkhi p3.h, p3.b
1035 ; CHECK-NEXT: punpkhi p4.h, p4.b
1036 ; CHECK-NEXT: uzp1 p1.d, p1.d, p4.d
1037 ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
1038 ; CHECK-NEXT: uzp1 p1.s, p1.s, p3.s
1039 ; CHECK-NEXT: uzp1 p1.h, p2.h, p1.h
1040 ; CHECK-NEXT: uzp1 p0.b, p1.b, p0.b
1041 ; CHECK-NEXT: addvl sp, sp, #1
1042 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1044 %res = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv1i1(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv, i64 4)
1045 ret <vscale x 16 x i1> %res
1048 define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_5(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv) {
1049 ; CHECK-LABEL: insert_nxv1i1_nxv16i1_5:
1051 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1052 ; CHECK-NEXT: addvl sp, sp, #-1
1053 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
1054 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
1055 ; CHECK-NEXT: .cfi_offset w29, -16
1056 ; CHECK-NEXT: punpklo p2.h, p0.b
1057 ; CHECK-NEXT: punpkhi p0.h, p0.b
1058 ; CHECK-NEXT: punpkhi p3.h, p2.b
1059 ; CHECK-NEXT: punpklo p2.h, p2.b
1060 ; CHECK-NEXT: punpklo p4.h, p3.b
1061 ; CHECK-NEXT: punpkhi p3.h, p3.b
1062 ; CHECK-NEXT: punpklo p4.h, p4.b
1063 ; CHECK-NEXT: uzp1 p1.d, p4.d, p1.d
1064 ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
1065 ; CHECK-NEXT: uzp1 p1.s, p1.s, p3.s
1066 ; CHECK-NEXT: uzp1 p1.h, p2.h, p1.h
1067 ; CHECK-NEXT: uzp1 p0.b, p1.b, p0.b
1068 ; CHECK-NEXT: addvl sp, sp, #1
1069 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1071 %res = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv1i1(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv, i64 5)
1072 ret <vscale x 16 x i1> %res
1075 define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_6(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv) {
1076 ; CHECK-LABEL: insert_nxv1i1_nxv16i1_6:
1078 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1079 ; CHECK-NEXT: addvl sp, sp, #-1
1080 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
1081 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
1082 ; CHECK-NEXT: .cfi_offset w29, -16
1083 ; CHECK-NEXT: punpklo p2.h, p0.b
1084 ; CHECK-NEXT: punpkhi p0.h, p0.b
1085 ; CHECK-NEXT: punpkhi p3.h, p2.b
1086 ; CHECK-NEXT: punpklo p2.h, p2.b
1087 ; CHECK-NEXT: punpkhi p4.h, p3.b
1088 ; CHECK-NEXT: punpklo p3.h, p3.b
1089 ; CHECK-NEXT: punpkhi p4.h, p4.b
1090 ; CHECK-NEXT: uzp1 p1.d, p1.d, p4.d
1091 ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
1092 ; CHECK-NEXT: uzp1 p1.s, p3.s, p1.s
1093 ; CHECK-NEXT: uzp1 p1.h, p2.h, p1.h
1094 ; CHECK-NEXT: uzp1 p0.b, p1.b, p0.b
1095 ; CHECK-NEXT: addvl sp, sp, #1
1096 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1098 %res = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv1i1(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv, i64 6)
1099 ret <vscale x 16 x i1> %res
1102 define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_7(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv) {
1103 ; CHECK-LABEL: insert_nxv1i1_nxv16i1_7:
1105 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1106 ; CHECK-NEXT: addvl sp, sp, #-1
1107 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
1108 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
1109 ; CHECK-NEXT: .cfi_offset w29, -16
1110 ; CHECK-NEXT: punpklo p2.h, p0.b
1111 ; CHECK-NEXT: punpkhi p0.h, p0.b
1112 ; CHECK-NEXT: punpkhi p3.h, p2.b
1113 ; CHECK-NEXT: punpklo p2.h, p2.b
1114 ; CHECK-NEXT: punpkhi p4.h, p3.b
1115 ; CHECK-NEXT: punpklo p3.h, p3.b
1116 ; CHECK-NEXT: punpklo p4.h, p4.b
1117 ; CHECK-NEXT: uzp1 p1.d, p4.d, p1.d
1118 ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
1119 ; CHECK-NEXT: uzp1 p1.s, p3.s, p1.s
1120 ; CHECK-NEXT: uzp1 p1.h, p2.h, p1.h
1121 ; CHECK-NEXT: uzp1 p0.b, p1.b, p0.b
1122 ; CHECK-NEXT: addvl sp, sp, #1
1123 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1125 %res = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv1i1(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv, i64 7)
1126 ret <vscale x 16 x i1> %res
1129 define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_8(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv) {
1130 ; CHECK-LABEL: insert_nxv1i1_nxv16i1_8:
1132 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1133 ; CHECK-NEXT: addvl sp, sp, #-1
1134 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
1135 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
1136 ; CHECK-NEXT: .cfi_offset w29, -16
1137 ; CHECK-NEXT: punpkhi p2.h, p0.b
1138 ; CHECK-NEXT: punpklo p0.h, p0.b
1139 ; CHECK-NEXT: punpklo p3.h, p2.b
1140 ; CHECK-NEXT: punpkhi p2.h, p2.b
1141 ; CHECK-NEXT: punpklo p4.h, p3.b
1142 ; CHECK-NEXT: punpkhi p3.h, p3.b
1143 ; CHECK-NEXT: punpkhi p4.h, p4.b
1144 ; CHECK-NEXT: uzp1 p1.d, p1.d, p4.d
1145 ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
1146 ; CHECK-NEXT: uzp1 p1.s, p1.s, p3.s
1147 ; CHECK-NEXT: uzp1 p1.h, p1.h, p2.h
1148 ; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b
1149 ; CHECK-NEXT: addvl sp, sp, #1
1150 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1152 %res = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv1i1(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv, i64 8)
1153 ret <vscale x 16 x i1> %res
1156 define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_9(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv) {
1157 ; CHECK-LABEL: insert_nxv1i1_nxv16i1_9:
1159 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1160 ; CHECK-NEXT: addvl sp, sp, #-1
1161 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
1162 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
1163 ; CHECK-NEXT: .cfi_offset w29, -16
1164 ; CHECK-NEXT: punpkhi p2.h, p0.b
1165 ; CHECK-NEXT: punpklo p0.h, p0.b
1166 ; CHECK-NEXT: punpklo p3.h, p2.b
1167 ; CHECK-NEXT: punpkhi p2.h, p2.b
1168 ; CHECK-NEXT: punpklo p4.h, p3.b
1169 ; CHECK-NEXT: punpkhi p3.h, p3.b
1170 ; CHECK-NEXT: punpklo p4.h, p4.b
1171 ; CHECK-NEXT: uzp1 p1.d, p4.d, p1.d
1172 ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
1173 ; CHECK-NEXT: uzp1 p1.s, p1.s, p3.s
1174 ; CHECK-NEXT: uzp1 p1.h, p1.h, p2.h
1175 ; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b
1176 ; CHECK-NEXT: addvl sp, sp, #1
1177 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1179 %res = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv1i1(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv, i64 9)
1180 ret <vscale x 16 x i1> %res
1183 define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_10(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv) {
1184 ; CHECK-LABEL: insert_nxv1i1_nxv16i1_10:
1186 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1187 ; CHECK-NEXT: addvl sp, sp, #-1
1188 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
1189 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
1190 ; CHECK-NEXT: .cfi_offset w29, -16
1191 ; CHECK-NEXT: punpkhi p2.h, p0.b
1192 ; CHECK-NEXT: punpklo p0.h, p0.b
1193 ; CHECK-NEXT: punpklo p3.h, p2.b
1194 ; CHECK-NEXT: punpkhi p2.h, p2.b
1195 ; CHECK-NEXT: punpkhi p4.h, p3.b
1196 ; CHECK-NEXT: punpklo p3.h, p3.b
1197 ; CHECK-NEXT: punpkhi p4.h, p4.b
1198 ; CHECK-NEXT: uzp1 p1.d, p1.d, p4.d
1199 ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
1200 ; CHECK-NEXT: uzp1 p1.s, p3.s, p1.s
1201 ; CHECK-NEXT: uzp1 p1.h, p1.h, p2.h
1202 ; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b
1203 ; CHECK-NEXT: addvl sp, sp, #1
1204 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1206 %res = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv1i1(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv, i64 10)
1207 ret <vscale x 16 x i1> %res
1210 define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_11(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv) {
1211 ; CHECK-LABEL: insert_nxv1i1_nxv16i1_11:
1213 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1214 ; CHECK-NEXT: addvl sp, sp, #-1
1215 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
1216 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
1217 ; CHECK-NEXT: .cfi_offset w29, -16
1218 ; CHECK-NEXT: punpkhi p2.h, p0.b
1219 ; CHECK-NEXT: punpklo p0.h, p0.b
1220 ; CHECK-NEXT: punpklo p3.h, p2.b
1221 ; CHECK-NEXT: punpkhi p2.h, p2.b
1222 ; CHECK-NEXT: punpkhi p4.h, p3.b
1223 ; CHECK-NEXT: punpklo p3.h, p3.b
1224 ; CHECK-NEXT: punpklo p4.h, p4.b
1225 ; CHECK-NEXT: uzp1 p1.d, p4.d, p1.d
1226 ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
1227 ; CHECK-NEXT: uzp1 p1.s, p3.s, p1.s
1228 ; CHECK-NEXT: uzp1 p1.h, p1.h, p2.h
1229 ; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b
1230 ; CHECK-NEXT: addvl sp, sp, #1
1231 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1233 %res = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv1i1(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv, i64 11)
1234 ret <vscale x 16 x i1> %res
1237 define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_12(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv) {
1238 ; CHECK-LABEL: insert_nxv1i1_nxv16i1_12:
1240 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1241 ; CHECK-NEXT: addvl sp, sp, #-1
1242 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
1243 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
1244 ; CHECK-NEXT: .cfi_offset w29, -16
1245 ; CHECK-NEXT: punpkhi p2.h, p0.b
1246 ; CHECK-NEXT: punpklo p0.h, p0.b
1247 ; CHECK-NEXT: punpkhi p3.h, p2.b
1248 ; CHECK-NEXT: punpklo p2.h, p2.b
1249 ; CHECK-NEXT: punpklo p4.h, p3.b
1250 ; CHECK-NEXT: punpkhi p3.h, p3.b
1251 ; CHECK-NEXT: punpkhi p4.h, p4.b
1252 ; CHECK-NEXT: uzp1 p1.d, p1.d, p4.d
1253 ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
1254 ; CHECK-NEXT: uzp1 p1.s, p1.s, p3.s
1255 ; CHECK-NEXT: uzp1 p1.h, p2.h, p1.h
1256 ; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b
1257 ; CHECK-NEXT: addvl sp, sp, #1
1258 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1260 %res = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv1i1(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv, i64 12)
1261 ret <vscale x 16 x i1> %res
1264 define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_13(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv) {
1265 ; CHECK-LABEL: insert_nxv1i1_nxv16i1_13:
1267 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1268 ; CHECK-NEXT: addvl sp, sp, #-1
1269 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
1270 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
1271 ; CHECK-NEXT: .cfi_offset w29, -16
1272 ; CHECK-NEXT: punpkhi p2.h, p0.b
1273 ; CHECK-NEXT: punpklo p0.h, p0.b
1274 ; CHECK-NEXT: punpkhi p3.h, p2.b
1275 ; CHECK-NEXT: punpklo p2.h, p2.b
1276 ; CHECK-NEXT: punpklo p4.h, p3.b
1277 ; CHECK-NEXT: punpkhi p3.h, p3.b
1278 ; CHECK-NEXT: punpklo p4.h, p4.b
1279 ; CHECK-NEXT: uzp1 p1.d, p4.d, p1.d
1280 ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
1281 ; CHECK-NEXT: uzp1 p1.s, p1.s, p3.s
1282 ; CHECK-NEXT: uzp1 p1.h, p2.h, p1.h
1283 ; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b
1284 ; CHECK-NEXT: addvl sp, sp, #1
1285 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1287 %res = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv1i1(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv, i64 13)
1288 ret <vscale x 16 x i1> %res
1291 define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_14(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv) {
1292 ; CHECK-LABEL: insert_nxv1i1_nxv16i1_14:
1294 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1295 ; CHECK-NEXT: addvl sp, sp, #-1
1296 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
1297 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
1298 ; CHECK-NEXT: .cfi_offset w29, -16
1299 ; CHECK-NEXT: punpkhi p2.h, p0.b
1300 ; CHECK-NEXT: punpklo p0.h, p0.b
1301 ; CHECK-NEXT: punpkhi p3.h, p2.b
1302 ; CHECK-NEXT: punpklo p2.h, p2.b
1303 ; CHECK-NEXT: punpkhi p4.h, p3.b
1304 ; CHECK-NEXT: punpklo p3.h, p3.b
1305 ; CHECK-NEXT: punpkhi p4.h, p4.b
1306 ; CHECK-NEXT: uzp1 p1.d, p1.d, p4.d
1307 ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
1308 ; CHECK-NEXT: uzp1 p1.s, p3.s, p1.s
1309 ; CHECK-NEXT: uzp1 p1.h, p2.h, p1.h
1310 ; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b
1311 ; CHECK-NEXT: addvl sp, sp, #1
1312 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1314 %res = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv1i1(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv, i64 14)
1315 ret <vscale x 16 x i1> %res
1318 define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_15(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv) {
1319 ; CHECK-LABEL: insert_nxv1i1_nxv16i1_15:
1321 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
1322 ; CHECK-NEXT: addvl sp, sp, #-1
1323 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
1324 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
1325 ; CHECK-NEXT: .cfi_offset w29, -16
1326 ; CHECK-NEXT: punpkhi p2.h, p0.b
1327 ; CHECK-NEXT: punpklo p0.h, p0.b
1328 ; CHECK-NEXT: punpkhi p3.h, p2.b
1329 ; CHECK-NEXT: punpklo p2.h, p2.b
1330 ; CHECK-NEXT: punpkhi p4.h, p3.b
1331 ; CHECK-NEXT: punpklo p3.h, p3.b
1332 ; CHECK-NEXT: punpklo p4.h, p4.b
1333 ; CHECK-NEXT: uzp1 p1.d, p4.d, p1.d
1334 ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
1335 ; CHECK-NEXT: uzp1 p1.s, p3.s, p1.s
1336 ; CHECK-NEXT: uzp1 p1.h, p2.h, p1.h
1337 ; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b
1338 ; CHECK-NEXT: addvl sp, sp, #1
1339 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
1341 %res = call <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv1i1(<vscale x 16 x i1> %vec, <vscale x 1 x i1> %sv, i64 15)
1342 ret <vscale x 16 x i1> %res
1345 attributes #0 = { vscale_range(2,2) }
1347 declare <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8>, <16 x i8>, i64)
1349 declare <vscale x 6 x i16> @llvm.vector.insert.nxv6i16.nxv1i16(<vscale x 6 x i16>, <vscale x 1 x i16>, i64)
1350 declare <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv2i16(<vscale x 8 x i16>, <vscale x 2 x i16>, i64)
1351 declare <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16>, <8 x i16>, i64)
1353 declare <vscale x 3 x i32> @llvm.vector.insert.nxv3i32.nxv2i32(<vscale x 3 x i32>, <vscale x 2 x i32>, i64)
1354 declare <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32>, <vscale x 1 x i32>, i64)
1355 declare <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32>, <4 x i32>, i64)
1356 declare <vscale x 12 x i32> @llvm.vector.insert.nxv4i32.nxv12i32(<vscale x 12 x i32>, <vscale x 4 x i32>, i64)
1357 declare <vscale x 6 x i32> @llvm.vector.insert.nxv6i32.nxv2i32(<vscale x 6 x i32>, <vscale x 2 x i32>, i64)
1358 declare <vscale x 6 x i32> @llvm.vector.insert.nxv6i32.nxv3i32(<vscale x 6 x i32>, <vscale x 3 x i32>, i64)
1360 declare <vscale x 2 x bfloat> @llvm.vector.insert.nxv2bf16.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, i64)
1361 declare <vscale x 4 x bfloat> @llvm.vector.insert.nxv4bf16.nxv2bf16(<vscale x 4 x bfloat>, <vscale x 2 x bfloat>, i64)
1362 declare <vscale x 4 x bfloat> @llvm.vector.insert.nxv4bf16.nxv4bf16(<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, i64)
1363 declare <vscale x 4 x bfloat> @llvm.vector.insert.nxv4bf16.v4bf16(<vscale x 4 x bfloat>, <4 x bfloat>, i64)
1364 declare <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i64)
1365 declare <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.nxv4bf16(<vscale x 8 x bfloat>, <vscale x 4 x bfloat>, i64)
1366 declare <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat>, <8 x bfloat>, i64)
1368 declare <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64>, <2 x i64>, i64)
1369 declare <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64>, <4 x i64>, i64)
1370 declare <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64>, <vscale x 8 x i64>, i64)
1371 declare <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64>, <2 x i64>, i64)
1373 declare <vscale x 4 x half> @llvm.vector.insert.nxv4f16.nxv2f16(<vscale x 4 x half>, <vscale x 2 x half>, i64)
1374 declare <vscale x 8 x half> @llvm.vector.insert.nxv8f16.nxv2f16(<vscale x 8 x half>, <vscale x 2 x half>, i64)
1375 declare <vscale x 8 x half> @llvm.vector.insert.nxv8f16.nxv4f16(<vscale x 8 x half>, <vscale x 4 x half>, i64)
1377 declare <vscale x 3 x float> @llvm.vector.insert.nxv3f32.nxv2f32(<vscale x 3 x float>, <vscale x 2 x float>, i64)
1378 declare <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv1f32(<vscale x 4 x float>, <vscale x 1 x float>, i64)
1379 declare <vscale x 4 x float> @llvm.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float>, <vscale x 2 x float>, i64)
1381 declare <vscale x 2 x i1> @llvm.vector.insert.nxv2i1.v8i1(<vscale x 2 x i1>, <8 x i1>, i64)
1382 declare <vscale x 4 x i1> @llvm.vector.insert.nxv4i1.v16i1(<vscale x 4 x i1>, <16 x i1>, i64)
1383 declare <vscale x 8 x i1> @llvm.vector.insert.nxv8i1.v32i1(<vscale x 8 x i1>, <32 x i1>, i64)
1384 declare <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv1i1(<vscale x 16 x i1>, <vscale x 1 x i1>, i64)
1385 declare <vscale x 8 x i1> @llvm.vector.insert.nxv8i1.nxv1i1(<vscale x 8 x i1>, <vscale x 1 x i1>, i64)
1386 declare <vscale x 4 x i1> @llvm.vector.insert.nxv4i1.nxv1i1(<vscale x 4 x i1>, <vscale x 1 x i1>, i64)
1387 declare <vscale x 2 x i1> @llvm.vector.insert.nxv2i1.nxv1i1(<vscale x 2 x i1>, <vscale x 1 x i1>, i64)
1388 declare <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1>, <vscale x 4 x i1>, i64)
1389 declare <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.nxv8i1(<vscale x 16 x i1>, <vscale x 8 x i1>, i64)
1390 declare <vscale x 16 x i1> @llvm.vector.insert.nxv16i1.v64i1(<vscale x 16 x i1>, <64 x i1>, i64)