1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,VLA,RV32VLA
3 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,VLA,RV64VLA
5 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,VLA,RV32VLA
6 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,VLA,RV64VLA
8 ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v -riscv-v-vector-bits-max=128 -verify-machineinstrs | FileCheck -check-prefixes=CHECK,VLS,RV32VLS %s
9 ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,v -riscv-v-vector-bits-max=128 -verify-machineinstrs | FileCheck -check-prefixes=CHECK,VLS,RV64VLS %s
11 define <vscale x 8 x i32> @insert_nxv8i32_v2i32_0(<vscale x 8 x i32> %vec, ptr %svp) {
12 ; VLA-LABEL: insert_nxv8i32_v2i32_0:
14 ; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
15 ; VLA-NEXT: vle32.v v12, (a0)
16 ; VLA-NEXT: vsetivli zero, 2, e32, m4, tu, ma
17 ; VLA-NEXT: vmv.v.v v8, v12
20 ; VLS-LABEL: insert_nxv8i32_v2i32_0:
22 ; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
23 ; VLS-NEXT: vle32.v v12, (a0)
24 ; VLS-NEXT: vsetivli zero, 2, e32, m1, tu, ma
25 ; VLS-NEXT: vmv.v.v v8, v12
27 %sv = load <2 x i32>, ptr %svp
28 %v = call <vscale x 8 x i32> @llvm.vector.insert.v2i32.nxv8i32(<vscale x 8 x i32> %vec, <2 x i32> %sv, i64 0)
29 ret <vscale x 8 x i32> %v
32 define <vscale x 8 x i32> @insert_nxv8i32_v2i32_2(<vscale x 8 x i32> %vec, ptr %svp) {
33 ; VLA-LABEL: insert_nxv8i32_v2i32_2:
35 ; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
36 ; VLA-NEXT: vle32.v v12, (a0)
37 ; VLA-NEXT: vsetivli zero, 4, e32, m4, tu, ma
38 ; VLA-NEXT: vslideup.vi v8, v12, 2
41 ; VLS-LABEL: insert_nxv8i32_v2i32_2:
43 ; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
44 ; VLS-NEXT: vle32.v v12, (a0)
45 ; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma
46 ; VLS-NEXT: vslideup.vi v8, v12, 2
48 %sv = load <2 x i32>, ptr %svp
49 %v = call <vscale x 8 x i32> @llvm.vector.insert.v2i32.nxv8i32(<vscale x 8 x i32> %vec, <2 x i32> %sv, i64 2)
50 ret <vscale x 8 x i32> %v
53 define <vscale x 8 x i32> @insert_nxv8i32_v2i32_6(<vscale x 8 x i32> %vec, ptr %svp) {
54 ; VLA-LABEL: insert_nxv8i32_v2i32_6:
56 ; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
57 ; VLA-NEXT: vle32.v v12, (a0)
58 ; VLA-NEXT: vsetivli zero, 8, e32, m4, tu, ma
59 ; VLA-NEXT: vslideup.vi v8, v12, 6
62 ; VLS-LABEL: insert_nxv8i32_v2i32_6:
64 ; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
65 ; VLS-NEXT: vle32.v v12, (a0)
66 ; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma
67 ; VLS-NEXT: vslideup.vi v9, v12, 2
69 %sv = load <2 x i32>, ptr %svp
70 %v = call <vscale x 8 x i32> @llvm.vector.insert.v2i32.nxv8i32(<vscale x 8 x i32> %vec, <2 x i32> %sv, i64 6)
71 ret <vscale x 8 x i32> %v
74 define <vscale x 8 x i32> @insert_nxv8i32_v8i32_0(<vscale x 8 x i32> %vec, ptr %svp) {
75 ; VLA-LABEL: insert_nxv8i32_v8i32_0:
77 ; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
78 ; VLA-NEXT: vle32.v v12, (a0)
79 ; VLA-NEXT: vsetivli zero, 8, e32, m4, tu, ma
80 ; VLA-NEXT: vmv.v.v v8, v12
83 ; VLS-LABEL: insert_nxv8i32_v8i32_0:
85 ; VLS-NEXT: vl2re32.v v8, (a0)
87 %sv = load <8 x i32>, ptr %svp
88 %v = call <vscale x 8 x i32> @llvm.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32> %vec, <8 x i32> %sv, i64 0)
89 ret <vscale x 8 x i32> %v
92 define <vscale x 8 x i32> @insert_nxv8i32_v8i32_8(<vscale x 8 x i32> %vec, ptr %svp) {
93 ; VLA-LABEL: insert_nxv8i32_v8i32_8:
95 ; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
96 ; VLA-NEXT: vle32.v v12, (a0)
97 ; VLA-NEXT: vsetivli zero, 16, e32, m4, tu, ma
98 ; VLA-NEXT: vslideup.vi v8, v12, 8
101 ; VLS-LABEL: insert_nxv8i32_v8i32_8:
103 ; VLS-NEXT: vl2re32.v v10, (a0)
105 %sv = load <8 x i32>, ptr %svp
106 %v = call <vscale x 8 x i32> @llvm.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32> %vec, <8 x i32> %sv, i64 8)
107 ret <vscale x 8 x i32> %v
110 define <vscale x 8 x i32> @insert_nxv8i32_undef_v2i32_0(ptr %svp) {
111 ; CHECK-LABEL: insert_nxv8i32_undef_v2i32_0:
113 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
114 ; CHECK-NEXT: vle32.v v8, (a0)
116 %sv = load <2 x i32>, ptr %svp
117 %v = call <vscale x 8 x i32> @llvm.vector.insert.v2i32.nxv8i32(<vscale x 8 x i32> undef, <2 x i32> %sv, i64 0)
118 ret <vscale x 8 x i32> %v
121 define <vscale x 2 x i32> @insert_nxv8i32_v4i32_0(<vscale x 2 x i32> %vec, <4 x i32> %subvec) {
122 ; VLA-LABEL: insert_nxv8i32_v4i32_0:
124 ; VLA-NEXT: vsetivli zero, 4, e32, m1, tu, ma
125 ; VLA-NEXT: vmv.v.v v8, v9
128 ; VLS-LABEL: insert_nxv8i32_v4i32_0:
130 ; VLS-NEXT: vmv1r.v v8, v9
132 %v = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32> %vec, <4 x i32> %subvec, i64 0)
133 ret <vscale x 2 x i32> %v
137 define <4 x i32> @insert_v4i32_v4i32_0(<4 x i32> %vec, <4 x i32> %subvec) {
138 ; CHECK-LABEL: insert_v4i32_v4i32_0:
140 ; CHECK-NEXT: vmv1r.v v8, v9
142 %v = call <4 x i32> @llvm.vector.insert.v4i32.v4i32(<4 x i32> %vec, <4 x i32> %subvec, i64 0)
146 define void @insert_v4i32_v2i32_0(ptr %vp, ptr %svp) {
147 ; VLA-LABEL: insert_v4i32_v2i32_0:
149 ; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
150 ; VLA-NEXT: vle32.v v8, (a1)
151 ; VLA-NEXT: vsetivli zero, 4, e32, m1, ta, ma
152 ; VLA-NEXT: vle32.v v9, (a0)
153 ; VLA-NEXT: vsetivli zero, 2, e32, m1, tu, ma
154 ; VLA-NEXT: vmv.v.v v9, v8
155 ; VLA-NEXT: vsetivli zero, 4, e32, m1, ta, ma
156 ; VLA-NEXT: vse32.v v9, (a0)
159 ; VLS-LABEL: insert_v4i32_v2i32_0:
161 ; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
162 ; VLS-NEXT: vle32.v v8, (a1)
163 ; VLS-NEXT: vl1re32.v v9, (a0)
164 ; VLS-NEXT: vsetivli zero, 2, e32, m1, tu, ma
165 ; VLS-NEXT: vmv.v.v v9, v8
166 ; VLS-NEXT: vs1r.v v9, (a0)
168 %sv = load <2 x i32>, ptr %svp
169 %vec = load <4 x i32>, ptr %vp
170 %v = call <4 x i32> @llvm.vector.insert.v2i32.v4i32(<4 x i32> %vec, <2 x i32> %sv, i64 0)
171 store <4 x i32> %v, ptr %vp
175 define void @insert_v4i32_v2i32_2(ptr %vp, ptr %svp) {
176 ; VLA-LABEL: insert_v4i32_v2i32_2:
178 ; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
179 ; VLA-NEXT: vle32.v v8, (a1)
180 ; VLA-NEXT: vsetivli zero, 4, e32, m1, ta, ma
181 ; VLA-NEXT: vle32.v v9, (a0)
182 ; VLA-NEXT: vslideup.vi v9, v8, 2
183 ; VLA-NEXT: vse32.v v9, (a0)
186 ; VLS-LABEL: insert_v4i32_v2i32_2:
188 ; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
189 ; VLS-NEXT: vle32.v v8, (a1)
190 ; VLS-NEXT: vl1re32.v v9, (a0)
191 ; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma
192 ; VLS-NEXT: vslideup.vi v9, v8, 2
193 ; VLS-NEXT: vs1r.v v9, (a0)
195 %sv = load <2 x i32>, ptr %svp
196 %vec = load <4 x i32>, ptr %vp
197 %v = call <4 x i32> @llvm.vector.insert.v2i32.v4i32(<4 x i32> %vec, <2 x i32> %sv, i64 2)
198 store <4 x i32> %v, ptr %vp
202 define void @insert_v4i32_undef_v2i32_0(ptr %vp, ptr %svp) {
203 ; VLA-LABEL: insert_v4i32_undef_v2i32_0:
205 ; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
206 ; VLA-NEXT: vle32.v v8, (a1)
207 ; VLA-NEXT: vsetivli zero, 4, e32, m1, ta, ma
208 ; VLA-NEXT: vse32.v v8, (a0)
211 ; VLS-LABEL: insert_v4i32_undef_v2i32_0:
213 ; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
214 ; VLS-NEXT: vle32.v v8, (a1)
215 ; VLS-NEXT: vs1r.v v8, (a0)
217 %sv = load <2 x i32>, ptr %svp
218 %v = call <4 x i32> @llvm.vector.insert.v2i32.v4i32(<4 x i32> undef, <2 x i32> %sv, i64 0)
219 store <4 x i32> %v, ptr %vp
223 ; This tests the code path in RISCVISelDAGToDAG::Select where we select an
224 ; insert_subvector with a fixed vector and fixed subvector type. The phi here is
225 ; used to prevent the fixed insert_subvector from being combined away into a
226 ; scalable insert_subvector.
227 define <4 x i32> @insert_v4i32_undef_v2i32_0_phi(<2 x i32> %subvec, i1 %cond) {
228 ; CHECK-LABEL: insert_v4i32_undef_v2i32_0_phi:
229 ; CHECK: # %bb.0: # %entry
230 ; CHECK-NEXT: andi a0, a0, 1
231 ; CHECK-NEXT: bnez a0, .LBB11_2
232 ; CHECK-NEXT: # %bb.1:
233 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
234 ; CHECK-NEXT: vmv.v.i v8, 0
235 ; CHECK-NEXT: .LBB11_2: # %bar
238 br i1 %cond, label %foo, label %bar
240 %v = call <4 x i32> @llvm.vector.insert.v2i32.v4i32(<4 x i32> undef, <2 x i32> %subvec, i64 0)
243 %w = phi <4 x i32> [%v, %foo], [zeroinitializer, %entry]
248 define void @insert_v8i32_v2i32_0(ptr %vp, ptr %svp) {
249 ; VLA-LABEL: insert_v8i32_v2i32_0:
251 ; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
252 ; VLA-NEXT: vle32.v v8, (a1)
253 ; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
254 ; VLA-NEXT: vle32.v v10, (a0)
255 ; VLA-NEXT: vsetivli zero, 2, e32, m2, tu, ma
256 ; VLA-NEXT: vmv.v.v v10, v8
257 ; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
258 ; VLA-NEXT: vse32.v v10, (a0)
261 ; VLS-LABEL: insert_v8i32_v2i32_0:
263 ; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
264 ; VLS-NEXT: vle32.v v8, (a1)
265 ; VLS-NEXT: vl2re32.v v10, (a0)
266 ; VLS-NEXT: vsetivli zero, 2, e32, m1, tu, ma
267 ; VLS-NEXT: vmv.v.v v10, v8
268 ; VLS-NEXT: vs2r.v v10, (a0)
270 %sv = load <2 x i32>, ptr %svp
271 %vec = load <8 x i32>, ptr %vp
272 %v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 0)
273 store <8 x i32> %v, ptr %vp
277 define void @insert_v8i32_v2i32_2(ptr %vp, ptr %svp) {
278 ; VLA-LABEL: insert_v8i32_v2i32_2:
280 ; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
281 ; VLA-NEXT: vle32.v v8, (a0)
282 ; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
283 ; VLA-NEXT: vle32.v v10, (a1)
284 ; VLA-NEXT: vsetivli zero, 4, e32, m2, tu, ma
285 ; VLA-NEXT: vslideup.vi v8, v10, 2
286 ; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
287 ; VLA-NEXT: vse32.v v8, (a0)
290 ; VLS-LABEL: insert_v8i32_v2i32_2:
292 ; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
293 ; VLS-NEXT: vle32.v v8, (a1)
294 ; VLS-NEXT: vl2re32.v v10, (a0)
295 ; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma
296 ; VLS-NEXT: vslideup.vi v10, v8, 2
297 ; VLS-NEXT: vs2r.v v10, (a0)
299 %sv = load <2 x i32>, ptr %svp
300 %vec = load <8 x i32>, ptr %vp
301 %v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 2)
302 store <8 x i32> %v, ptr %vp
306 define void @insert_v8i32_v2i32_6(ptr %vp, ptr %svp) {
307 ; VLA-LABEL: insert_v8i32_v2i32_6:
309 ; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
310 ; VLA-NEXT: vle32.v v8, (a0)
311 ; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
312 ; VLA-NEXT: vle32.v v10, (a1)
313 ; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
314 ; VLA-NEXT: vslideup.vi v8, v10, 6
315 ; VLA-NEXT: vse32.v v8, (a0)
318 ; VLS-LABEL: insert_v8i32_v2i32_6:
320 ; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
321 ; VLS-NEXT: vle32.v v8, (a1)
322 ; VLS-NEXT: vl2re32.v v10, (a0)
323 ; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma
324 ; VLS-NEXT: vslideup.vi v11, v8, 2
325 ; VLS-NEXT: vs2r.v v10, (a0)
327 %sv = load <2 x i32>, ptr %svp
328 %vec = load <8 x i32>, ptr %vp
329 %v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 6)
330 store <8 x i32> %v, ptr %vp
334 define void @insert_v8i32_undef_v2i32_6(ptr %vp, ptr %svp) {
335 ; VLA-LABEL: insert_v8i32_undef_v2i32_6:
337 ; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
338 ; VLA-NEXT: vle32.v v8, (a1)
339 ; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
340 ; VLA-NEXT: vslideup.vi v10, v8, 6
341 ; VLA-NEXT: vse32.v v10, (a0)
344 ; VLS-LABEL: insert_v8i32_undef_v2i32_6:
346 ; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
347 ; VLS-NEXT: vle32.v v8, (a1)
348 ; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma
349 ; VLS-NEXT: vslideup.vi v9, v8, 2
350 ; VLS-NEXT: vs2r.v v8, (a0)
352 %sv = load <2 x i32>, ptr %svp
353 %v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> undef, <2 x i32> %sv, i64 6)
354 store <8 x i32> %v, ptr %vp
358 define void @insert_v4i16_v2i16_0(ptr %vp, ptr %svp) {
359 ; CHECK-LABEL: insert_v4i16_v2i16_0:
361 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
362 ; CHECK-NEXT: vle16.v v8, (a0)
363 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
364 ; CHECK-NEXT: vle16.v v9, (a1)
365 ; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, ma
366 ; CHECK-NEXT: vmv.v.v v8, v9
367 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
368 ; CHECK-NEXT: vse16.v v8, (a0)
370 %v = load <4 x i16>, ptr %vp
371 %sv = load <2 x i16>, ptr %svp
372 %c = call <4 x i16> @llvm.vector.insert.v2i16.v4i16(<4 x i16> %v, <2 x i16> %sv, i64 0)
373 store <4 x i16> %c, ptr %vp
377 define void @insert_v4i16_v2i16_2(ptr %vp, ptr %svp) {
378 ; CHECK-LABEL: insert_v4i16_v2i16_2:
380 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
381 ; CHECK-NEXT: vle16.v v8, (a0)
382 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
383 ; CHECK-NEXT: vle16.v v9, (a1)
384 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
385 ; CHECK-NEXT: vslideup.vi v8, v9, 2
386 ; CHECK-NEXT: vse16.v v8, (a0)
388 %v = load <4 x i16>, ptr %vp
389 %sv = load <2 x i16>, ptr %svp
390 %c = call <4 x i16> @llvm.vector.insert.v2i16.v4i16(<4 x i16> %v, <2 x i16> %sv, i64 2)
391 store <4 x i16> %c, ptr %vp
395 define void @insert_v32i1_v8i1_0(ptr %vp, ptr %svp) {
396 ; VLA-LABEL: insert_v32i1_v8i1_0:
398 ; VLA-NEXT: li a2, 32
399 ; VLA-NEXT: vsetvli zero, a2, e8, m2, ta, ma
400 ; VLA-NEXT: vlm.v v8, (a0)
401 ; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
402 ; VLA-NEXT: vlm.v v9, (a1)
403 ; VLA-NEXT: vsetivli zero, 1, e8, mf4, tu, ma
404 ; VLA-NEXT: vmv.v.v v8, v9
405 ; VLA-NEXT: vsetvli zero, a2, e8, m2, ta, ma
406 ; VLA-NEXT: vsm.v v8, (a0)
409 ; VLS-LABEL: insert_v32i1_v8i1_0:
411 ; VLS-NEXT: vsetvli a2, zero, e8, m2, ta, ma
412 ; VLS-NEXT: vlm.v v8, (a0)
413 ; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
414 ; VLS-NEXT: vlm.v v9, (a1)
415 ; VLS-NEXT: vsetivli zero, 1, e8, mf4, tu, ma
416 ; VLS-NEXT: vmv.v.v v8, v9
417 ; VLS-NEXT: vsetvli a1, zero, e8, m2, ta, ma
418 ; VLS-NEXT: vsm.v v8, (a0)
420 %v = load <32 x i1>, ptr %vp
421 %sv = load <8 x i1>, ptr %svp
422 %c = call <32 x i1> @llvm.vector.insert.v8i1.v32i1(<32 x i1> %v, <8 x i1> %sv, i64 0)
423 store <32 x i1> %c, ptr %vp
427 define void @insert_v32i1_v8i1_16(ptr %vp, ptr %svp) {
428 ; VLA-LABEL: insert_v32i1_v8i1_16:
430 ; VLA-NEXT: li a2, 32
431 ; VLA-NEXT: vsetvli zero, a2, e8, m2, ta, ma
432 ; VLA-NEXT: vlm.v v8, (a0)
433 ; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
434 ; VLA-NEXT: vlm.v v9, (a1)
435 ; VLA-NEXT: vsetivli zero, 3, e8, mf4, tu, ma
436 ; VLA-NEXT: vslideup.vi v8, v9, 2
437 ; VLA-NEXT: vsetvli zero, a2, e8, m2, ta, ma
438 ; VLA-NEXT: vsm.v v8, (a0)
441 ; VLS-LABEL: insert_v32i1_v8i1_16:
443 ; VLS-NEXT: vsetvli a2, zero, e8, m2, ta, ma
444 ; VLS-NEXT: vlm.v v8, (a0)
445 ; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
446 ; VLS-NEXT: vlm.v v9, (a1)
447 ; VLS-NEXT: vsetivli zero, 3, e8, mf4, tu, ma
448 ; VLS-NEXT: vslideup.vi v8, v9, 2
449 ; VLS-NEXT: vsetvli a1, zero, e8, m2, ta, ma
450 ; VLS-NEXT: vsm.v v8, (a0)
452 %v = load <32 x i1>, ptr %vp
453 %sv = load <8 x i1>, ptr %svp
454 %c = call <32 x i1> @llvm.vector.insert.v8i1.v32i1(<32 x i1> %v, <8 x i1> %sv, i64 16)
455 store <32 x i1> %c, ptr %vp
459 define void @insert_v8i1_v4i1_0(ptr %vp, ptr %svp) {
460 ; CHECK-LABEL: insert_v8i1_v4i1_0:
462 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
463 ; CHECK-NEXT: vlm.v v0, (a0)
464 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
465 ; CHECK-NEXT: vlm.v v8, (a1)
466 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
467 ; CHECK-NEXT: vmv.v.i v9, 0
468 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
469 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
470 ; CHECK-NEXT: vmv.v.i v10, 0
471 ; CHECK-NEXT: vmv1r.v v0, v8
472 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
473 ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
474 ; CHECK-NEXT: vmv.v.v v9, v8
475 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
476 ; CHECK-NEXT: vmsne.vi v8, v9, 0
477 ; CHECK-NEXT: vsm.v v8, (a0)
479 %v = load <8 x i1>, ptr %vp
480 %sv = load <4 x i1>, ptr %svp
481 %c = call <8 x i1> @llvm.vector.insert.v4i1.v8i1(<8 x i1> %v, <4 x i1> %sv, i64 0)
482 store <8 x i1> %c, ptr %vp
486 define void @insert_v8i1_v4i1_4(ptr %vp, ptr %svp) {
487 ; CHECK-LABEL: insert_v8i1_v4i1_4:
489 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
490 ; CHECK-NEXT: vlm.v v0, (a0)
491 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
492 ; CHECK-NEXT: vlm.v v8, (a1)
493 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
494 ; CHECK-NEXT: vmv.v.i v9, 0
495 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
496 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
497 ; CHECK-NEXT: vmv.v.i v10, 0
498 ; CHECK-NEXT: vmv1r.v v0, v8
499 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
500 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
501 ; CHECK-NEXT: vslideup.vi v9, v8, 4
502 ; CHECK-NEXT: vmsne.vi v8, v9, 0
503 ; CHECK-NEXT: vsm.v v8, (a0)
505 %v = load <8 x i1>, ptr %vp
506 %sv = load <4 x i1>, ptr %svp
507 %c = call <8 x i1> @llvm.vector.insert.v4i1.v8i1(<8 x i1> %v, <4 x i1> %sv, i64 4)
508 store <8 x i1> %c, ptr %vp
512 define <vscale x 2 x i16> @insert_nxv2i16_v2i16_0(<vscale x 2 x i16> %v, ptr %svp) {
513 ; CHECK-LABEL: insert_nxv2i16_v2i16_0:
515 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
516 ; CHECK-NEXT: vle16.v v9, (a0)
517 ; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, ma
518 ; CHECK-NEXT: vmv.v.v v8, v9
520 %sv = load <2 x i16>, ptr %svp
521 %c = call <vscale x 2 x i16> @llvm.vector.insert.v2i16.nxv2i16(<vscale x 2 x i16> %v, <2 x i16> %sv, i64 0)
522 ret <vscale x 2 x i16> %c
525 define <vscale x 2 x i16> @insert_nxv2i16_v2i16_2(<vscale x 2 x i16> %v, ptr %svp) {
526 ; CHECK-LABEL: insert_nxv2i16_v2i16_2:
528 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
529 ; CHECK-NEXT: vle16.v v9, (a0)
530 ; CHECK-NEXT: vsetivli zero, 6, e16, mf2, tu, ma
531 ; CHECK-NEXT: vslideup.vi v8, v9, 4
533 %sv = load <2 x i16>, ptr %svp
534 %c = call <vscale x 2 x i16> @llvm.vector.insert.v2i16.nxv2i16(<vscale x 2 x i16> %v, <2 x i16> %sv, i64 4)
535 ret <vscale x 2 x i16> %c
538 define <vscale x 2 x i1> @insert_nxv2i1_v4i1_0(<vscale x 2 x i1> %v, ptr %svp) {
539 ; VLA-LABEL: insert_nxv2i1_v4i1_0:
541 ; VLA-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
542 ; VLA-NEXT: vlm.v v8, (a0)
543 ; VLA-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
544 ; VLA-NEXT: vmv.v.i v9, 0
545 ; VLA-NEXT: vmerge.vim v9, v9, 1, v0
546 ; VLA-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
547 ; VLA-NEXT: vmv.v.i v10, 0
548 ; VLA-NEXT: vmv1r.v v0, v8
549 ; VLA-NEXT: vmerge.vim v8, v10, 1, v0
550 ; VLA-NEXT: vsetvli zero, zero, e8, mf4, tu, ma
551 ; VLA-NEXT: vmv.v.v v9, v8
552 ; VLA-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
553 ; VLA-NEXT: vmsne.vi v0, v9, 0
556 ; VLS-LABEL: insert_nxv2i1_v4i1_0:
558 ; VLS-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
559 ; VLS-NEXT: vlm.v v8, (a0)
560 ; VLS-NEXT: vmv.v.i v9, 0
561 ; VLS-NEXT: vmerge.vim v10, v9, 1, v0
562 ; VLS-NEXT: vmv1r.v v0, v8
563 ; VLS-NEXT: vmerge.vim v8, v9, 1, v0
564 ; VLS-NEXT: vsetvli zero, zero, e8, mf4, tu, ma
565 ; VLS-NEXT: vmv.v.v v10, v8
566 ; VLS-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
567 ; VLS-NEXT: vmsne.vi v0, v10, 0
569 %sv = load <4 x i1>, ptr %svp
570 %c = call <vscale x 2 x i1> @llvm.vector.insert.v4i1.nxv2i1(<vscale x 2 x i1> %v, <4 x i1> %sv, i64 0)
571 ret <vscale x 2 x i1> %c
574 define <vscale x 8 x i1> @insert_nxv8i1_v4i1_0(<vscale x 8 x i1> %v, ptr %svp) {
575 ; CHECK-LABEL: insert_nxv8i1_v4i1_0:
577 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
578 ; CHECK-NEXT: vlm.v v8, (a0)
579 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, tu, ma
580 ; CHECK-NEXT: vmv.v.v v0, v8
582 %sv = load <8 x i1>, ptr %svp
583 %c = call <vscale x 8 x i1> @llvm.vector.insert.v8i1.nxv8i1(<vscale x 8 x i1> %v, <8 x i1> %sv, i64 0)
584 ret <vscale x 8 x i1> %c
587 define <vscale x 8 x i1> @insert_nxv8i1_v8i1_16(<vscale x 8 x i1> %v, ptr %svp) {
588 ; CHECK-LABEL: insert_nxv8i1_v8i1_16:
590 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
591 ; CHECK-NEXT: vlm.v v8, (a0)
592 ; CHECK-NEXT: vsetivli zero, 3, e8, mf8, tu, ma
593 ; CHECK-NEXT: vslideup.vi v0, v8, 2
595 %sv = load <8 x i1>, ptr %svp
596 %c = call <vscale x 8 x i1> @llvm.vector.insert.v8i1.nxv8i1(<vscale x 8 x i1> %v, <8 x i1> %sv, i64 16)
597 ret <vscale x 8 x i1> %c
600 declare <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64>, <2 x i64>, i64)
602 define void @insert_v2i64_nxv16i64(ptr %psv0, ptr %psv1, ptr %out) {
603 ; VLA-LABEL: insert_v2i64_nxv16i64:
605 ; VLA-NEXT: vsetivli zero, 2, e64, m1, ta, ma
606 ; VLA-NEXT: vle64.v v8, (a0)
607 ; VLA-NEXT: vle64.v v16, (a1)
608 ; VLA-NEXT: vsetivli zero, 6, e64, m8, tu, ma
609 ; VLA-NEXT: vslideup.vi v8, v16, 4
610 ; VLA-NEXT: vs8r.v v8, (a2)
613 ; VLS-LABEL: insert_v2i64_nxv16i64:
615 ; VLS-NEXT: vl1re64.v v8, (a0)
616 ; VLS-NEXT: vl1re64.v v10, (a1)
617 ; VLS-NEXT: vs8r.v v8, (a2)
619 %sv0 = load <2 x i64>, ptr %psv0
620 %sv1 = load <2 x i64>, ptr %psv1
621 %v0 = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv0, i64 0)
622 %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> %v0, <2 x i64> %sv1, i64 4)
623 store <vscale x 16 x i64> %v, ptr %out
627 define void @insert_v2i64_nxv16i64_lo0(ptr %psv, ptr %out) {
628 ; VLA-LABEL: insert_v2i64_nxv16i64_lo0:
630 ; VLA-NEXT: vsetivli zero, 2, e64, m1, ta, ma
631 ; VLA-NEXT: vle64.v v8, (a0)
632 ; VLA-NEXT: vs8r.v v8, (a1)
635 ; VLS-LABEL: insert_v2i64_nxv16i64_lo0:
637 ; VLS-NEXT: vl1re64.v v8, (a0)
638 ; VLS-NEXT: vs8r.v v8, (a1)
640 %sv = load <2 x i64>, ptr %psv
641 %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 0)
642 store <vscale x 16 x i64> %v, ptr %out
646 define void @insert_v2i64_nxv16i64_lo2(ptr %psv, ptr %out) {
647 ; VLA-LABEL: insert_v2i64_nxv16i64_lo2:
649 ; VLA-NEXT: vsetivli zero, 2, e64, m1, ta, ma
650 ; VLA-NEXT: vle64.v v8, (a0)
651 ; VLA-NEXT: vsetivli zero, 4, e64, m8, ta, ma
652 ; VLA-NEXT: vslideup.vi v16, v8, 2
653 ; VLA-NEXT: vs8r.v v16, (a1)
656 ; VLS-LABEL: insert_v2i64_nxv16i64_lo2:
658 ; VLS-NEXT: vl1re64.v v9, (a0)
659 ; VLS-NEXT: vs8r.v v8, (a1)
661 %sv = load <2 x i64>, ptr %psv
662 %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 2)
663 store <vscale x 16 x i64> %v, ptr %out
667 ; Check we don't mistakenly optimize this: we don't know whether this is
668 ; inserted into the low or high split vector.
669 define void @insert_v2i64_nxv16i64_hi(ptr %psv, ptr %out) {
670 ; RV32-LABEL: insert_v2i64_nxv16i64_hi:
672 ; RV32-NEXT: addi sp, sp, -80
673 ; RV32-NEXT: .cfi_def_cfa_offset 80
674 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
675 ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
676 ; RV32-NEXT: .cfi_offset ra, -4
677 ; RV32-NEXT: .cfi_offset s0, -8
678 ; RV32-NEXT: addi s0, sp, 80
679 ; RV32-NEXT: .cfi_def_cfa s0, 0
680 ; RV32-NEXT: csrr a2, vlenb
681 ; RV32-NEXT: slli a2, a2, 4
682 ; RV32-NEXT: sub sp, sp, a2
683 ; RV32-NEXT: andi sp, sp, -64
684 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
685 ; RV32-NEXT: vle64.v v8, (a0)
686 ; RV32-NEXT: addi a0, sp, 128
687 ; RV32-NEXT: vse64.v v8, (a0)
688 ; RV32-NEXT: csrr a0, vlenb
689 ; RV32-NEXT: slli a0, a0, 3
690 ; RV32-NEXT: addi a2, sp, 64
691 ; RV32-NEXT: add a3, a2, a0
692 ; RV32-NEXT: vl8re64.v v8, (a3)
693 ; RV32-NEXT: vl8re64.v v16, (a2)
694 ; RV32-NEXT: add a0, a1, a0
695 ; RV32-NEXT: vs8r.v v8, (a0)
696 ; RV32-NEXT: vs8r.v v16, (a1)
697 ; RV32-NEXT: addi sp, s0, -80
698 ; RV32-NEXT: .cfi_def_cfa sp, 80
699 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
700 ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
701 ; RV32-NEXT: addi sp, sp, 80
703 ; RV64-LABEL: insert_v2i64_nxv16i64_hi:
705 ; RV64-NEXT: addi sp, sp, -80
706 ; RV64-NEXT: .cfi_def_cfa_offset 80
707 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
708 ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
709 ; RV64-NEXT: .cfi_offset ra, -8
710 ; RV64-NEXT: .cfi_offset s0, -16
711 ; RV64-NEXT: addi s0, sp, 80
712 ; RV64-NEXT: .cfi_def_cfa s0, 0
713 ; RV64-NEXT: csrr a2, vlenb
714 ; RV64-NEXT: slli a2, a2, 4
715 ; RV64-NEXT: sub sp, sp, a2
716 ; RV64-NEXT: andi sp, sp, -64
717 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
718 ; RV64-NEXT: vle64.v v8, (a0)
719 ; RV64-NEXT: addi a0, sp, 128
720 ; RV64-NEXT: vse64.v v8, (a0)
721 ; RV64-NEXT: csrr a0, vlenb
722 ; RV64-NEXT: slli a0, a0, 3
723 ; RV64-NEXT: addi a2, sp, 64
724 ; RV64-NEXT: add a3, a2, a0
725 ; RV64-NEXT: vl8re64.v v8, (a3)
726 ; RV64-NEXT: vl8re64.v v16, (a2)
727 ; RV64-NEXT: add a0, a1, a0
728 ; RV64-NEXT: vs8r.v v8, (a0)
729 ; RV64-NEXT: vs8r.v v16, (a1)
730 ; RV64-NEXT: addi sp, s0, -80
731 ; RV64-NEXT: .cfi_def_cfa sp, 80
732 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
733 ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
734 ; RV64-NEXT: addi sp, sp, 80
736 ; RV32VLA-LABEL: insert_v2i64_nxv16i64_hi:
738 ; RV32VLA-NEXT: addi sp, sp, -80
739 ; RV32VLA-NEXT: .cfi_def_cfa_offset 80
740 ; RV32VLA-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
741 ; RV32VLA-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
742 ; RV32VLA-NEXT: .cfi_offset ra, -4
743 ; RV32VLA-NEXT: .cfi_offset s0, -8
744 ; RV32VLA-NEXT: addi s0, sp, 80
745 ; RV32VLA-NEXT: .cfi_def_cfa s0, 0
746 ; RV32VLA-NEXT: csrr a2, vlenb
747 ; RV32VLA-NEXT: slli a2, a2, 4
748 ; RV32VLA-NEXT: sub sp, sp, a2
749 ; RV32VLA-NEXT: andi sp, sp, -64
750 ; RV32VLA-NEXT: vsetivli zero, 2, e64, m1, ta, ma
751 ; RV32VLA-NEXT: vle64.v v8, (a0)
752 ; RV32VLA-NEXT: addi a0, sp, 128
753 ; RV32VLA-NEXT: vse64.v v8, (a0)
754 ; RV32VLA-NEXT: csrr a0, vlenb
755 ; RV32VLA-NEXT: slli a0, a0, 3
756 ; RV32VLA-NEXT: addi a2, sp, 64
757 ; RV32VLA-NEXT: add a3, a2, a0
758 ; RV32VLA-NEXT: vl8re64.v v8, (a3)
759 ; RV32VLA-NEXT: vl8re64.v v16, (a2)
760 ; RV32VLA-NEXT: add a0, a1, a0
761 ; RV32VLA-NEXT: vs8r.v v8, (a0)
762 ; RV32VLA-NEXT: vs8r.v v16, (a1)
763 ; RV32VLA-NEXT: addi sp, s0, -80
764 ; RV32VLA-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
765 ; RV32VLA-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
766 ; RV32VLA-NEXT: addi sp, sp, 80
769 ; RV64VLA-LABEL: insert_v2i64_nxv16i64_hi:
771 ; RV64VLA-NEXT: addi sp, sp, -80
772 ; RV64VLA-NEXT: .cfi_def_cfa_offset 80
773 ; RV64VLA-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
774 ; RV64VLA-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
775 ; RV64VLA-NEXT: .cfi_offset ra, -8
776 ; RV64VLA-NEXT: .cfi_offset s0, -16
777 ; RV64VLA-NEXT: addi s0, sp, 80
778 ; RV64VLA-NEXT: .cfi_def_cfa s0, 0
779 ; RV64VLA-NEXT: csrr a2, vlenb
780 ; RV64VLA-NEXT: slli a2, a2, 4
781 ; RV64VLA-NEXT: sub sp, sp, a2
782 ; RV64VLA-NEXT: andi sp, sp, -64
783 ; RV64VLA-NEXT: vsetivli zero, 2, e64, m1, ta, ma
784 ; RV64VLA-NEXT: vle64.v v8, (a0)
785 ; RV64VLA-NEXT: addi a0, sp, 128
786 ; RV64VLA-NEXT: vse64.v v8, (a0)
787 ; RV64VLA-NEXT: csrr a0, vlenb
788 ; RV64VLA-NEXT: slli a0, a0, 3
789 ; RV64VLA-NEXT: addi a2, sp, 64
790 ; RV64VLA-NEXT: add a3, a2, a0
791 ; RV64VLA-NEXT: vl8re64.v v8, (a3)
792 ; RV64VLA-NEXT: vl8re64.v v16, (a2)
793 ; RV64VLA-NEXT: add a0, a1, a0
794 ; RV64VLA-NEXT: vs8r.v v8, (a0)
795 ; RV64VLA-NEXT: vs8r.v v16, (a1)
796 ; RV64VLA-NEXT: addi sp, s0, -80
797 ; RV64VLA-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
798 ; RV64VLA-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
799 ; RV64VLA-NEXT: addi sp, sp, 80
802 ; RV32VLS-LABEL: insert_v2i64_nxv16i64_hi:
804 ; RV32VLS-NEXT: addi sp, sp, -80
805 ; RV32VLS-NEXT: .cfi_def_cfa_offset 80
806 ; RV32VLS-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
807 ; RV32VLS-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
808 ; RV32VLS-NEXT: .cfi_offset ra, -4
809 ; RV32VLS-NEXT: .cfi_offset s0, -8
810 ; RV32VLS-NEXT: addi s0, sp, 80
811 ; RV32VLS-NEXT: .cfi_def_cfa s0, 0
812 ; RV32VLS-NEXT: addi sp, sp, -256
813 ; RV32VLS-NEXT: andi sp, sp, -64
814 ; RV32VLS-NEXT: vl1re64.v v8, (a0)
815 ; RV32VLS-NEXT: addi a0, sp, 128
816 ; RV32VLS-NEXT: vs1r.v v8, (a0)
817 ; RV32VLS-NEXT: addi a0, sp, 64
818 ; RV32VLS-NEXT: addi a2, sp, 192
819 ; RV32VLS-NEXT: vl8re64.v v8, (a2)
820 ; RV32VLS-NEXT: vl8re64.v v16, (a0)
821 ; RV32VLS-NEXT: addi a0, a1, 128
822 ; RV32VLS-NEXT: vs8r.v v8, (a0)
823 ; RV32VLS-NEXT: vs8r.v v16, (a1)
824 ; RV32VLS-NEXT: addi sp, s0, -80
825 ; RV32VLS-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
826 ; RV32VLS-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
827 ; RV32VLS-NEXT: addi sp, sp, 80
830 ; RV64VLS-LABEL: insert_v2i64_nxv16i64_hi:
832 ; RV64VLS-NEXT: addi sp, sp, -80
833 ; RV64VLS-NEXT: .cfi_def_cfa_offset 80
834 ; RV64VLS-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
835 ; RV64VLS-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
836 ; RV64VLS-NEXT: .cfi_offset ra, -8
837 ; RV64VLS-NEXT: .cfi_offset s0, -16
838 ; RV64VLS-NEXT: addi s0, sp, 80
839 ; RV64VLS-NEXT: .cfi_def_cfa s0, 0
840 ; RV64VLS-NEXT: addi sp, sp, -256
841 ; RV64VLS-NEXT: andi sp, sp, -64
842 ; RV64VLS-NEXT: vl1re64.v v8, (a0)
843 ; RV64VLS-NEXT: addi a0, sp, 128
844 ; RV64VLS-NEXT: vs1r.v v8, (a0)
845 ; RV64VLS-NEXT: addi a0, sp, 64
846 ; RV64VLS-NEXT: addi a2, sp, 192
847 ; RV64VLS-NEXT: vl8re64.v v8, (a2)
848 ; RV64VLS-NEXT: vl8re64.v v16, (a0)
849 ; RV64VLS-NEXT: addi a0, a1, 128
850 ; RV64VLS-NEXT: vs8r.v v8, (a0)
851 ; RV64VLS-NEXT: vs8r.v v16, (a1)
852 ; RV64VLS-NEXT: addi sp, s0, -80
853 ; RV64VLS-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
854 ; RV64VLS-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
855 ; RV64VLS-NEXT: addi sp, sp, 80
857 %sv = load <2 x i64>, ptr %psv
858 %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 8)
859 store <vscale x 16 x i64> %v, ptr %out
863 declare <8 x i1> @llvm.vector.insert.v4i1.v8i1(<8 x i1>, <4 x i1>, i64)
864 declare <32 x i1> @llvm.vector.insert.v8i1.v32i1(<32 x i1>, <8 x i1>, i64)
866 declare <4 x i16> @llvm.vector.insert.v2i16.v4i16(<4 x i16>, <2 x i16>, i64)
868 declare <4 x i32> @llvm.vector.insert.v2i32.v4i32(<4 x i32>, <2 x i32>, i64)
869 declare <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32>, <2 x i32>, i64)
871 declare <vscale x 2 x i1> @llvm.vector.insert.v4i1.nxv2i1(<vscale x 2 x i1>, <4 x i1>, i64)
872 declare <vscale x 8 x i1> @llvm.vector.insert.v8i1.nxv8i1(<vscale x 8 x i1>, <8 x i1>, i64)
874 declare <vscale x 2 x i16> @llvm.vector.insert.v2i16.nxv2i16(<vscale x 2 x i16>, <2 x i16>, i64)
876 declare <vscale x 8 x i32> @llvm.vector.insert.v2i32.nxv8i32(<vscale x 8 x i32>, <2 x i32>, i64)
877 declare <vscale x 8 x i32> @llvm.vector.insert.v4i32.nxv8i32(<vscale x 8 x i32>, <4 x i32>, i64)
878 declare <vscale x 8 x i32> @llvm.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32>, <8 x i32>, i64)
880 ; We emit insert_subvectors of fixed vectors at index 0 into undefs as a
881 ; copy_to_regclass or insert_subreg, depending on the register classes of the
882 ; vector types. Make sure that we use the correct type and not the shrunken
883 ; LMUL=1 type, otherwise we will end up with an invalid extract_subvector when
884 ; converting it from scalable->fixed, e.g. we get this for VLEN=128:
886 ; t14: nxv2i32 = insert_subvector undef:nxv2i32, t4, Constant:i64<0>
887 ; t15: v8i32 = extract_subvector t14, Constant:i64<0>
888 declare <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32>, i64)
889 define <4 x i32> @insert_extract_v8i32_v2i32_0(<2 x i32> %v) {
890 ; CHECK-LABEL: insert_extract_v8i32_v2i32_0:
893 %1 = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> poison, <2 x i32> %v, i64 0)
894 %2 = call <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32> %1, i64 0)