1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
5 define <vscale x 1 x i8> @insertelt_nxv1i8_0(<vscale x 1 x i8> %v, i8 signext %elt) {
6 ; CHECK-LABEL: insertelt_nxv1i8_0:
8 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, ma
9 ; CHECK-NEXT: vmv.s.x v8, a0
11 %r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 0
12 ret <vscale x 1 x i8> %r
15 define <vscale x 1 x i8> @insertelt_nxv1i8_imm(<vscale x 1 x i8> %v, i8 signext %elt) {
16 ; CHECK-LABEL: insertelt_nxv1i8_imm:
18 ; CHECK-NEXT: vsetivli zero, 4, e8, mf8, tu, ma
19 ; CHECK-NEXT: vmv.s.x v9, a0
20 ; CHECK-NEXT: vslideup.vi v8, v9, 3
22 %r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 3
23 ret <vscale x 1 x i8> %r
26 define <vscale x 1 x i8> @insertelt_nxv1i8_idx(<vscale x 1 x i8> %v, i8 signext %elt, i32 zeroext %idx) {
27 ; CHECK-LABEL: insertelt_nxv1i8_idx:
29 ; CHECK-NEXT: addi a2, a1, 1
30 ; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
31 ; CHECK-NEXT: vmv.s.x v9, a0
32 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, tu, ma
33 ; CHECK-NEXT: vslideup.vx v8, v9, a1
35 %r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 %idx
36 ret <vscale x 1 x i8> %r
39 define <vscale x 2 x i8> @insertelt_nxv2i8_0(<vscale x 2 x i8> %v, i8 signext %elt) {
40 ; CHECK-LABEL: insertelt_nxv2i8_0:
42 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, ma
43 ; CHECK-NEXT: vmv.s.x v8, a0
45 %r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 0
46 ret <vscale x 2 x i8> %r
49 define <vscale x 2 x i8> @insertelt_nxv2i8_imm(<vscale x 2 x i8> %v, i8 signext %elt) {
50 ; CHECK-LABEL: insertelt_nxv2i8_imm:
52 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, ma
53 ; CHECK-NEXT: vmv.s.x v9, a0
54 ; CHECK-NEXT: vslideup.vi v8, v9, 3
56 %r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 3
57 ret <vscale x 2 x i8> %r
60 define <vscale x 2 x i8> @insertelt_nxv2i8_idx(<vscale x 2 x i8> %v, i8 signext %elt, i32 zeroext %idx) {
61 ; CHECK-LABEL: insertelt_nxv2i8_idx:
63 ; CHECK-NEXT: addi a2, a1, 1
64 ; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
65 ; CHECK-NEXT: vmv.s.x v9, a0
66 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, tu, ma
67 ; CHECK-NEXT: vslideup.vx v8, v9, a1
69 %r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 %idx
70 ret <vscale x 2 x i8> %r
73 define <vscale x 4 x i8> @insertelt_nxv4i8_0(<vscale x 4 x i8> %v, i8 signext %elt) {
74 ; CHECK-LABEL: insertelt_nxv4i8_0:
76 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, ma
77 ; CHECK-NEXT: vmv.s.x v8, a0
79 %r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 0
80 ret <vscale x 4 x i8> %r
83 define <vscale x 4 x i8> @insertelt_nxv4i8_imm(<vscale x 4 x i8> %v, i8 signext %elt) {
84 ; CHECK-LABEL: insertelt_nxv4i8_imm:
86 ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
87 ; CHECK-NEXT: vmv.s.x v9, a0
88 ; CHECK-NEXT: vslideup.vi v8, v9, 3
90 %r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 3
91 ret <vscale x 4 x i8> %r
94 define <vscale x 4 x i8> @insertelt_nxv4i8_idx(<vscale x 4 x i8> %v, i8 signext %elt, i32 zeroext %idx) {
95 ; CHECK-LABEL: insertelt_nxv4i8_idx:
97 ; CHECK-NEXT: addi a2, a1, 1
98 ; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
99 ; CHECK-NEXT: vmv.s.x v9, a0
100 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, tu, ma
101 ; CHECK-NEXT: vslideup.vx v8, v9, a1
103 %r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 %idx
104 ret <vscale x 4 x i8> %r
107 define <vscale x 8 x i8> @insertelt_nxv8i8_0(<vscale x 8 x i8> %v, i8 signext %elt) {
108 ; CHECK-LABEL: insertelt_nxv8i8_0:
110 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, ma
111 ; CHECK-NEXT: vmv.s.x v8, a0
113 %r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 0
114 ret <vscale x 8 x i8> %r
117 define <vscale x 8 x i8> @insertelt_nxv8i8_imm(<vscale x 8 x i8> %v, i8 signext %elt) {
118 ; CHECK-LABEL: insertelt_nxv8i8_imm:
120 ; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma
121 ; CHECK-NEXT: vmv.s.x v9, a0
122 ; CHECK-NEXT: vslideup.vi v8, v9, 3
124 %r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 3
125 ret <vscale x 8 x i8> %r
128 define <vscale x 8 x i8> @insertelt_nxv8i8_idx(<vscale x 8 x i8> %v, i8 signext %elt, i32 zeroext %idx) {
129 ; CHECK-LABEL: insertelt_nxv8i8_idx:
131 ; CHECK-NEXT: addi a2, a1, 1
132 ; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
133 ; CHECK-NEXT: vmv.s.x v9, a0
134 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, tu, ma
135 ; CHECK-NEXT: vslideup.vx v8, v9, a1
137 %r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 %idx
138 ret <vscale x 8 x i8> %r
141 define <vscale x 16 x i8> @insertelt_nxv16i8_0(<vscale x 16 x i8> %v, i8 signext %elt) {
142 ; CHECK-LABEL: insertelt_nxv16i8_0:
144 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, ma
145 ; CHECK-NEXT: vmv.s.x v8, a0
147 %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 0
148 ret <vscale x 16 x i8> %r
151 define <vscale x 16 x i8> @insertelt_nxv16i8_imm(<vscale x 16 x i8> %v, i8 signext %elt) {
152 ; CHECK-LABEL: insertelt_nxv16i8_imm:
154 ; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma
155 ; CHECK-NEXT: vmv.s.x v10, a0
156 ; CHECK-NEXT: vslideup.vi v8, v10, 3
158 %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 3
159 ret <vscale x 16 x i8> %r
162 define <vscale x 16 x i8> @insertelt_nxv16i8_idx(<vscale x 16 x i8> %v, i8 signext %elt, i32 zeroext %idx) {
163 ; CHECK-LABEL: insertelt_nxv16i8_idx:
165 ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
166 ; CHECK-NEXT: vmv.s.x v10, a0
167 ; CHECK-NEXT: addi a0, a1, 1
168 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
169 ; CHECK-NEXT: vslideup.vx v8, v10, a1
171 %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 %idx
172 ret <vscale x 16 x i8> %r
175 define <vscale x 32 x i8> @insertelt_nxv32i8_0(<vscale x 32 x i8> %v, i8 signext %elt) {
176 ; CHECK-LABEL: insertelt_nxv32i8_0:
178 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, ma
179 ; CHECK-NEXT: vmv.s.x v8, a0
181 %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 0
182 ret <vscale x 32 x i8> %r
185 define <vscale x 32 x i8> @insertelt_nxv32i8_imm(<vscale x 32 x i8> %v, i8 signext %elt) {
186 ; CHECK-LABEL: insertelt_nxv32i8_imm:
188 ; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma
189 ; CHECK-NEXT: vmv.s.x v12, a0
190 ; CHECK-NEXT: vslideup.vi v8, v12, 3
192 %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 3
193 ret <vscale x 32 x i8> %r
196 define <vscale x 32 x i8> @insertelt_nxv32i8_idx(<vscale x 32 x i8> %v, i8 signext %elt, i32 zeroext %idx) {
197 ; CHECK-LABEL: insertelt_nxv32i8_idx:
199 ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
200 ; CHECK-NEXT: vmv.s.x v12, a0
201 ; CHECK-NEXT: addi a0, a1, 1
202 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
203 ; CHECK-NEXT: vslideup.vx v8, v12, a1
205 %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 %idx
206 ret <vscale x 32 x i8> %r
209 define <vscale x 64 x i8> @insertelt_nxv64i8_0(<vscale x 64 x i8> %v, i8 signext %elt) {
210 ; CHECK-LABEL: insertelt_nxv64i8_0:
212 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, ma
213 ; CHECK-NEXT: vmv.s.x v8, a0
215 %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 0
216 ret <vscale x 64 x i8> %r
219 define <vscale x 64 x i8> @insertelt_nxv64i8_imm(<vscale x 64 x i8> %v, i8 signext %elt) {
220 ; CHECK-LABEL: insertelt_nxv64i8_imm:
222 ; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma
223 ; CHECK-NEXT: vmv.s.x v16, a0
224 ; CHECK-NEXT: vslideup.vi v8, v16, 3
226 %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 3
227 ret <vscale x 64 x i8> %r
230 define <vscale x 64 x i8> @insertelt_nxv64i8_idx(<vscale x 64 x i8> %v, i8 signext %elt, i32 zeroext %idx) {
231 ; CHECK-LABEL: insertelt_nxv64i8_idx:
233 ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
234 ; CHECK-NEXT: vmv.s.x v16, a0
235 ; CHECK-NEXT: addi a0, a1, 1
236 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
237 ; CHECK-NEXT: vslideup.vx v8, v16, a1
239 %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 %idx
240 ret <vscale x 64 x i8> %r
243 define <vscale x 1 x i16> @insertelt_nxv1i16_0(<vscale x 1 x i16> %v, i16 signext %elt) {
244 ; CHECK-LABEL: insertelt_nxv1i16_0:
246 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, ma
247 ; CHECK-NEXT: vmv.s.x v8, a0
249 %r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 0
250 ret <vscale x 1 x i16> %r
253 define <vscale x 1 x i16> @insertelt_nxv1i16_imm(<vscale x 1 x i16> %v, i16 signext %elt) {
254 ; CHECK-LABEL: insertelt_nxv1i16_imm:
256 ; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, ma
257 ; CHECK-NEXT: vmv.s.x v9, a0
258 ; CHECK-NEXT: vslideup.vi v8, v9, 3
260 %r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 3
261 ret <vscale x 1 x i16> %r
264 define <vscale x 1 x i16> @insertelt_nxv1i16_idx(<vscale x 1 x i16> %v, i16 signext %elt, i32 zeroext %idx) {
265 ; CHECK-LABEL: insertelt_nxv1i16_idx:
267 ; CHECK-NEXT: addi a2, a1, 1
268 ; CHECK-NEXT: vsetvli a3, zero, e16, m1, ta, ma
269 ; CHECK-NEXT: vmv.s.x v9, a0
270 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, tu, ma
271 ; CHECK-NEXT: vslideup.vx v8, v9, a1
273 %r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 %idx
274 ret <vscale x 1 x i16> %r
277 define <vscale x 2 x i16> @insertelt_nxv2i16_0(<vscale x 2 x i16> %v, i16 signext %elt) {
278 ; CHECK-LABEL: insertelt_nxv2i16_0:
280 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, ma
281 ; CHECK-NEXT: vmv.s.x v8, a0
283 %r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 0
284 ret <vscale x 2 x i16> %r
287 define <vscale x 2 x i16> @insertelt_nxv2i16_imm(<vscale x 2 x i16> %v, i16 signext %elt) {
288 ; CHECK-LABEL: insertelt_nxv2i16_imm:
290 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma
291 ; CHECK-NEXT: vmv.s.x v9, a0
292 ; CHECK-NEXT: vslideup.vi v8, v9, 3
294 %r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 3
295 ret <vscale x 2 x i16> %r
298 define <vscale x 2 x i16> @insertelt_nxv2i16_idx(<vscale x 2 x i16> %v, i16 signext %elt, i32 zeroext %idx) {
299 ; CHECK-LABEL: insertelt_nxv2i16_idx:
301 ; CHECK-NEXT: addi a2, a1, 1
302 ; CHECK-NEXT: vsetvli a3, zero, e16, m1, ta, ma
303 ; CHECK-NEXT: vmv.s.x v9, a0
304 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, tu, ma
305 ; CHECK-NEXT: vslideup.vx v8, v9, a1
307 %r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 %idx
308 ret <vscale x 2 x i16> %r
311 define <vscale x 4 x i16> @insertelt_nxv4i16_0(<vscale x 4 x i16> %v, i16 signext %elt) {
312 ; CHECK-LABEL: insertelt_nxv4i16_0:
314 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, ma
315 ; CHECK-NEXT: vmv.s.x v8, a0
317 %r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 0
318 ret <vscale x 4 x i16> %r
321 define <vscale x 4 x i16> @insertelt_nxv4i16_imm(<vscale x 4 x i16> %v, i16 signext %elt) {
322 ; CHECK-LABEL: insertelt_nxv4i16_imm:
324 ; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma
325 ; CHECK-NEXT: vmv.s.x v9, a0
326 ; CHECK-NEXT: vslideup.vi v8, v9, 3
328 %r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 3
329 ret <vscale x 4 x i16> %r
332 define <vscale x 4 x i16> @insertelt_nxv4i16_idx(<vscale x 4 x i16> %v, i16 signext %elt, i32 zeroext %idx) {
333 ; CHECK-LABEL: insertelt_nxv4i16_idx:
335 ; CHECK-NEXT: addi a2, a1, 1
336 ; CHECK-NEXT: vsetvli a3, zero, e16, m1, ta, ma
337 ; CHECK-NEXT: vmv.s.x v9, a0
338 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, tu, ma
339 ; CHECK-NEXT: vslideup.vx v8, v9, a1
341 %r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 %idx
342 ret <vscale x 4 x i16> %r
345 define <vscale x 8 x i16> @insertelt_nxv8i16_0(<vscale x 8 x i16> %v, i16 signext %elt) {
346 ; CHECK-LABEL: insertelt_nxv8i16_0:
348 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, ma
349 ; CHECK-NEXT: vmv.s.x v8, a0
351 %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 0
352 ret <vscale x 8 x i16> %r
355 define <vscale x 8 x i16> @insertelt_nxv8i16_imm(<vscale x 8 x i16> %v, i16 signext %elt) {
356 ; CHECK-LABEL: insertelt_nxv8i16_imm:
358 ; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma
359 ; CHECK-NEXT: vmv.s.x v10, a0
360 ; CHECK-NEXT: vslideup.vi v8, v10, 3
362 %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 3
363 ret <vscale x 8 x i16> %r
366 define <vscale x 8 x i16> @insertelt_nxv8i16_idx(<vscale x 8 x i16> %v, i16 signext %elt, i32 zeroext %idx) {
367 ; CHECK-LABEL: insertelt_nxv8i16_idx:
369 ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
370 ; CHECK-NEXT: vmv.s.x v10, a0
371 ; CHECK-NEXT: addi a0, a1, 1
372 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
373 ; CHECK-NEXT: vslideup.vx v8, v10, a1
375 %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 %idx
376 ret <vscale x 8 x i16> %r
379 define <vscale x 16 x i16> @insertelt_nxv16i16_0(<vscale x 16 x i16> %v, i16 signext %elt) {
380 ; CHECK-LABEL: insertelt_nxv16i16_0:
382 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, ma
383 ; CHECK-NEXT: vmv.s.x v8, a0
385 %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 0
386 ret <vscale x 16 x i16> %r
389 define <vscale x 16 x i16> @insertelt_nxv16i16_imm(<vscale x 16 x i16> %v, i16 signext %elt) {
390 ; CHECK-LABEL: insertelt_nxv16i16_imm:
392 ; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma
393 ; CHECK-NEXT: vmv.s.x v12, a0
394 ; CHECK-NEXT: vslideup.vi v8, v12, 3
396 %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 3
397 ret <vscale x 16 x i16> %r
400 define <vscale x 16 x i16> @insertelt_nxv16i16_idx(<vscale x 16 x i16> %v, i16 signext %elt, i32 zeroext %idx) {
401 ; CHECK-LABEL: insertelt_nxv16i16_idx:
403 ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
404 ; CHECK-NEXT: vmv.s.x v12, a0
405 ; CHECK-NEXT: addi a0, a1, 1
406 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
407 ; CHECK-NEXT: vslideup.vx v8, v12, a1
409 %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 %idx
410 ret <vscale x 16 x i16> %r
413 define <vscale x 32 x i16> @insertelt_nxv32i16_0(<vscale x 32 x i16> %v, i16 signext %elt) {
414 ; CHECK-LABEL: insertelt_nxv32i16_0:
416 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, ma
417 ; CHECK-NEXT: vmv.s.x v8, a0
419 %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 0
420 ret <vscale x 32 x i16> %r
423 define <vscale x 32 x i16> @insertelt_nxv32i16_imm(<vscale x 32 x i16> %v, i16 signext %elt) {
424 ; CHECK-LABEL: insertelt_nxv32i16_imm:
426 ; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma
427 ; CHECK-NEXT: vmv.s.x v16, a0
428 ; CHECK-NEXT: vslideup.vi v8, v16, 3
430 %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 3
431 ret <vscale x 32 x i16> %r
434 define <vscale x 32 x i16> @insertelt_nxv32i16_idx(<vscale x 32 x i16> %v, i16 signext %elt, i32 zeroext %idx) {
435 ; CHECK-LABEL: insertelt_nxv32i16_idx:
437 ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
438 ; CHECK-NEXT: vmv.s.x v16, a0
439 ; CHECK-NEXT: addi a0, a1, 1
440 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma
441 ; CHECK-NEXT: vslideup.vx v8, v16, a1
443 %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 %idx
444 ret <vscale x 32 x i16> %r
447 define <vscale x 1 x i32> @insertelt_nxv1i32_0(<vscale x 1 x i32> %v, i32 signext %elt) {
448 ; CHECK-LABEL: insertelt_nxv1i32_0:
450 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, ma
451 ; CHECK-NEXT: vmv.s.x v8, a0
453 %r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 0
454 ret <vscale x 1 x i32> %r
457 define <vscale x 1 x i32> @insertelt_nxv1i32_imm(<vscale x 1 x i32> %v, i32 signext %elt) {
458 ; CHECK-LABEL: insertelt_nxv1i32_imm:
460 ; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, ma
461 ; CHECK-NEXT: vmv.s.x v9, a0
462 ; CHECK-NEXT: vslideup.vi v8, v9, 3
464 %r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 3
465 ret <vscale x 1 x i32> %r
468 define <vscale x 1 x i32> @insertelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 signext %elt, i32 zeroext %idx) {
469 ; CHECK-LABEL: insertelt_nxv1i32_idx:
471 ; CHECK-NEXT: addi a2, a1, 1
472 ; CHECK-NEXT: vsetvli a3, zero, e32, m1, ta, ma
473 ; CHECK-NEXT: vmv.s.x v9, a0
474 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, tu, ma
475 ; CHECK-NEXT: vslideup.vx v8, v9, a1
477 %r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 %idx
478 ret <vscale x 1 x i32> %r
481 define <vscale x 2 x i32> @insertelt_nxv2i32_0(<vscale x 2 x i32> %v, i32 signext %elt) {
482 ; CHECK-LABEL: insertelt_nxv2i32_0:
484 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, ma
485 ; CHECK-NEXT: vmv.s.x v8, a0
487 %r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 0
488 ret <vscale x 2 x i32> %r
491 define <vscale x 2 x i32> @insertelt_nxv2i32_imm(<vscale x 2 x i32> %v, i32 signext %elt) {
492 ; CHECK-LABEL: insertelt_nxv2i32_imm:
494 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma
495 ; CHECK-NEXT: vmv.s.x v9, a0
496 ; CHECK-NEXT: vslideup.vi v8, v9, 3
498 %r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 3
499 ret <vscale x 2 x i32> %r
502 define <vscale x 2 x i32> @insertelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 signext %elt, i32 zeroext %idx) {
503 ; CHECK-LABEL: insertelt_nxv2i32_idx:
505 ; CHECK-NEXT: addi a2, a1, 1
506 ; CHECK-NEXT: vsetvli a3, zero, e32, m1, ta, ma
507 ; CHECK-NEXT: vmv.s.x v9, a0
508 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, tu, ma
509 ; CHECK-NEXT: vslideup.vx v8, v9, a1
511 %r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 %idx
512 ret <vscale x 2 x i32> %r
515 define <vscale x 4 x i32> @insertelt_nxv4i32_0(<vscale x 4 x i32> %v, i32 signext %elt) {
516 ; CHECK-LABEL: insertelt_nxv4i32_0:
518 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, ma
519 ; CHECK-NEXT: vmv.s.x v8, a0
521 %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 0
522 ret <vscale x 4 x i32> %r
525 define <vscale x 4 x i32> @insertelt_nxv4i32_imm(<vscale x 4 x i32> %v, i32 signext %elt) {
526 ; CHECK-LABEL: insertelt_nxv4i32_imm:
528 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma
529 ; CHECK-NEXT: vmv.s.x v10, a0
530 ; CHECK-NEXT: vslideup.vi v8, v10, 3
532 %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 3
533 ret <vscale x 4 x i32> %r
536 define <vscale x 4 x i32> @insertelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 signext %elt, i32 zeroext %idx) {
537 ; CHECK-LABEL: insertelt_nxv4i32_idx:
539 ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
540 ; CHECK-NEXT: vmv.s.x v10, a0
541 ; CHECK-NEXT: addi a0, a1, 1
542 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
543 ; CHECK-NEXT: vslideup.vx v8, v10, a1
545 %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 %idx
546 ret <vscale x 4 x i32> %r
549 define <vscale x 8 x i32> @insertelt_nxv8i32_0(<vscale x 8 x i32> %v, i32 signext %elt) {
550 ; CHECK-LABEL: insertelt_nxv8i32_0:
552 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, ma
553 ; CHECK-NEXT: vmv.s.x v8, a0
555 %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 0
556 ret <vscale x 8 x i32> %r
559 define <vscale x 8 x i32> @insertelt_nxv8i32_imm(<vscale x 8 x i32> %v, i32 signext %elt) {
560 ; CHECK-LABEL: insertelt_nxv8i32_imm:
562 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma
563 ; CHECK-NEXT: vmv.s.x v12, a0
564 ; CHECK-NEXT: vslideup.vi v8, v12, 3
566 %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 3
567 ret <vscale x 8 x i32> %r
570 define <vscale x 8 x i32> @insertelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 signext %elt, i32 zeroext %idx) {
571 ; CHECK-LABEL: insertelt_nxv8i32_idx:
573 ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
574 ; CHECK-NEXT: vmv.s.x v12, a0
575 ; CHECK-NEXT: addi a0, a1, 1
576 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
577 ; CHECK-NEXT: vslideup.vx v8, v12, a1
579 %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 %idx
580 ret <vscale x 8 x i32> %r
583 define <vscale x 16 x i32> @insertelt_nxv16i32_0(<vscale x 16 x i32> %v, i32 signext %elt) {
584 ; CHECK-LABEL: insertelt_nxv16i32_0:
586 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, ma
587 ; CHECK-NEXT: vmv.s.x v8, a0
589 %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 0
590 ret <vscale x 16 x i32> %r
593 define <vscale x 16 x i32> @insertelt_nxv16i32_imm(<vscale x 16 x i32> %v, i32 signext %elt) {
594 ; CHECK-LABEL: insertelt_nxv16i32_imm:
596 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma
597 ; CHECK-NEXT: vmv.s.x v16, a0
598 ; CHECK-NEXT: vslideup.vi v8, v16, 3
600 %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 3
601 ret <vscale x 16 x i32> %r
604 define <vscale x 16 x i32> @insertelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 signext %elt, i32 zeroext %idx) {
605 ; CHECK-LABEL: insertelt_nxv16i32_idx:
607 ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
608 ; CHECK-NEXT: vmv.s.x v16, a0
609 ; CHECK-NEXT: addi a0, a1, 1
610 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma
611 ; CHECK-NEXT: vslideup.vx v8, v16, a1
613 %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 %idx
614 ret <vscale x 16 x i32> %r
617 define <vscale x 1 x i64> @insertelt_nxv1i64_0(<vscale x 1 x i64> %v, i64 %elt) {
618 ; CHECK-LABEL: insertelt_nxv1i64_0:
620 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, ma
621 ; CHECK-NEXT: vmv.s.x v8, a0
623 %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 0
624 ret <vscale x 1 x i64> %r
627 define <vscale x 1 x i64> @insertelt_nxv1i64_imm(<vscale x 1 x i64> %v, i64 %elt) {
628 ; CHECK-LABEL: insertelt_nxv1i64_imm:
630 ; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, ma
631 ; CHECK-NEXT: vmv.s.x v9, a0
632 ; CHECK-NEXT: vslideup.vi v8, v9, 3
634 %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 3
635 ret <vscale x 1 x i64> %r
638 define <vscale x 1 x i64> @insertelt_nxv1i64_idx(<vscale x 1 x i64> %v, i64 %elt, i32 %idx) {
639 ; CHECK-LABEL: insertelt_nxv1i64_idx:
641 ; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma
642 ; CHECK-NEXT: vmv.s.x v9, a0
643 ; CHECK-NEXT: slli a1, a1, 32
644 ; CHECK-NEXT: srli a1, a1, 32
645 ; CHECK-NEXT: addi a0, a1, 1
646 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
647 ; CHECK-NEXT: vslideup.vx v8, v9, a1
649 %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 %idx
650 ret <vscale x 1 x i64> %r
653 define <vscale x 2 x i64> @insertelt_nxv2i64_0(<vscale x 2 x i64> %v, i64 %elt) {
654 ; CHECK-LABEL: insertelt_nxv2i64_0:
656 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, ma
657 ; CHECK-NEXT: vmv.s.x v8, a0
659 %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 0
660 ret <vscale x 2 x i64> %r
663 define <vscale x 2 x i64> @insertelt_nxv2i64_imm(<vscale x 2 x i64> %v, i64 %elt) {
664 ; CHECK-LABEL: insertelt_nxv2i64_imm:
666 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma
667 ; CHECK-NEXT: vmv.s.x v10, a0
668 ; CHECK-NEXT: vslideup.vi v8, v10, 3
670 %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 3
671 ret <vscale x 2 x i64> %r
674 define <vscale x 2 x i64> @insertelt_nxv2i64_idx(<vscale x 2 x i64> %v, i64 %elt, i32 %idx) {
675 ; CHECK-LABEL: insertelt_nxv2i64_idx:
677 ; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma
678 ; CHECK-NEXT: vmv.s.x v10, a0
679 ; CHECK-NEXT: slli a1, a1, 32
680 ; CHECK-NEXT: srli a1, a1, 32
681 ; CHECK-NEXT: addi a0, a1, 1
682 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
683 ; CHECK-NEXT: vslideup.vx v8, v10, a1
685 %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 %idx
686 ret <vscale x 2 x i64> %r
689 define <vscale x 4 x i64> @insertelt_nxv4i64_0(<vscale x 4 x i64> %v, i64 %elt) {
690 ; CHECK-LABEL: insertelt_nxv4i64_0:
692 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, ma
693 ; CHECK-NEXT: vmv.s.x v8, a0
695 %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 0
696 ret <vscale x 4 x i64> %r
699 define <vscale x 4 x i64> @insertelt_nxv4i64_imm(<vscale x 4 x i64> %v, i64 %elt) {
700 ; CHECK-LABEL: insertelt_nxv4i64_imm:
702 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma
703 ; CHECK-NEXT: vmv.s.x v12, a0
704 ; CHECK-NEXT: vslideup.vi v8, v12, 3
706 %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 3
707 ret <vscale x 4 x i64> %r
710 define <vscale x 4 x i64> @insertelt_nxv4i64_idx(<vscale x 4 x i64> %v, i64 %elt, i32 %idx) {
711 ; CHECK-LABEL: insertelt_nxv4i64_idx:
713 ; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma
714 ; CHECK-NEXT: vmv.s.x v12, a0
715 ; CHECK-NEXT: slli a1, a1, 32
716 ; CHECK-NEXT: srli a1, a1, 32
717 ; CHECK-NEXT: addi a0, a1, 1
718 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
719 ; CHECK-NEXT: vslideup.vx v8, v12, a1
721 %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 %idx
722 ret <vscale x 4 x i64> %r
725 define <vscale x 8 x i64> @insertelt_nxv8i64_0(<vscale x 8 x i64> %v, i64 %elt) {
726 ; CHECK-LABEL: insertelt_nxv8i64_0:
728 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, ma
729 ; CHECK-NEXT: vmv.s.x v8, a0
731 %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 0
732 ret <vscale x 8 x i64> %r
735 define <vscale x 8 x i64> @insertelt_nxv8i64_imm(<vscale x 8 x i64> %v, i64 %elt) {
736 ; CHECK-LABEL: insertelt_nxv8i64_imm:
738 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma
739 ; CHECK-NEXT: vmv.s.x v16, a0
740 ; CHECK-NEXT: vslideup.vi v8, v16, 3
742 %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 3
743 ret <vscale x 8 x i64> %r
746 define <vscale x 8 x i64> @insertelt_nxv8i64_idx(<vscale x 8 x i64> %v, i64 %elt, i32 %idx) {
747 ; CHECK-LABEL: insertelt_nxv8i64_idx:
749 ; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma
750 ; CHECK-NEXT: vmv.s.x v16, a0
751 ; CHECK-NEXT: slli a1, a1, 32
752 ; CHECK-NEXT: srli a1, a1, 32
753 ; CHECK-NEXT: addi a0, a1, 1
754 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma
755 ; CHECK-NEXT: vslideup.vx v8, v16, a1
757 %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 %idx
758 ret <vscale x 8 x i64> %r