1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
5 define <vscale x 1 x i8> @insertelt_nxv1i8_0(<vscale x 1 x i8> %v, i8 signext %elt) {
6 ; CHECK-LABEL: insertelt_nxv1i8_0:
8 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, ma
9 ; CHECK-NEXT: vmv.s.x v8, a0
11 %r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 0
12 ret <vscale x 1 x i8> %r
15 define <vscale x 1 x i8> @insertelt_nxv1i8_imm(<vscale x 1 x i8> %v, i8 signext %elt) {
16 ; CHECK-LABEL: insertelt_nxv1i8_imm:
18 ; CHECK-NEXT: vsetivli zero, 4, e8, mf8, tu, ma
19 ; CHECK-NEXT: vmv.s.x v9, a0
20 ; CHECK-NEXT: vslideup.vi v8, v9, 3
22 %r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 3
23 ret <vscale x 1 x i8> %r
26 define <vscale x 1 x i8> @insertelt_nxv1i8_idx(<vscale x 1 x i8> %v, i8 signext %elt, i32 signext %idx) {
27 ; CHECK-LABEL: insertelt_nxv1i8_idx:
29 ; CHECK-NEXT: addi a2, a1, 1
30 ; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
31 ; CHECK-NEXT: vmv.s.x v9, a0
32 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, tu, ma
33 ; CHECK-NEXT: vslideup.vx v8, v9, a1
35 %r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 %idx
36 ret <vscale x 1 x i8> %r
39 define <vscale x 2 x i8> @insertelt_nxv2i8_0(<vscale x 2 x i8> %v, i8 signext %elt) {
40 ; CHECK-LABEL: insertelt_nxv2i8_0:
42 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, ma
43 ; CHECK-NEXT: vmv.s.x v8, a0
45 %r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 0
46 ret <vscale x 2 x i8> %r
49 define <vscale x 2 x i8> @insertelt_nxv2i8_imm(<vscale x 2 x i8> %v, i8 signext %elt) {
50 ; CHECK-LABEL: insertelt_nxv2i8_imm:
52 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, ma
53 ; CHECK-NEXT: vmv.s.x v9, a0
54 ; CHECK-NEXT: vslideup.vi v8, v9, 3
56 %r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 3
57 ret <vscale x 2 x i8> %r
60 define <vscale x 2 x i8> @insertelt_nxv2i8_idx(<vscale x 2 x i8> %v, i8 signext %elt, i32 signext %idx) {
61 ; CHECK-LABEL: insertelt_nxv2i8_idx:
63 ; CHECK-NEXT: addi a2, a1, 1
64 ; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
65 ; CHECK-NEXT: vmv.s.x v9, a0
66 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, tu, ma
67 ; CHECK-NEXT: vslideup.vx v8, v9, a1
69 %r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 %idx
70 ret <vscale x 2 x i8> %r
73 define <vscale x 4 x i8> @insertelt_nxv4i8_0(<vscale x 4 x i8> %v, i8 signext %elt) {
74 ; CHECK-LABEL: insertelt_nxv4i8_0:
76 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, ma
77 ; CHECK-NEXT: vmv.s.x v8, a0
79 %r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 0
80 ret <vscale x 4 x i8> %r
83 define <vscale x 4 x i8> @insertelt_nxv4i8_imm(<vscale x 4 x i8> %v, i8 signext %elt) {
84 ; CHECK-LABEL: insertelt_nxv4i8_imm:
86 ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
87 ; CHECK-NEXT: vmv.s.x v9, a0
88 ; CHECK-NEXT: vslideup.vi v8, v9, 3
90 %r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 3
91 ret <vscale x 4 x i8> %r
94 define <vscale x 4 x i8> @insertelt_nxv4i8_idx(<vscale x 4 x i8> %v, i8 signext %elt, i32 signext %idx) {
95 ; CHECK-LABEL: insertelt_nxv4i8_idx:
97 ; CHECK-NEXT: addi a2, a1, 1
98 ; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
99 ; CHECK-NEXT: vmv.s.x v9, a0
100 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, tu, ma
101 ; CHECK-NEXT: vslideup.vx v8, v9, a1
103 %r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 %idx
104 ret <vscale x 4 x i8> %r
107 define <vscale x 8 x i8> @insertelt_nxv8i8_0(<vscale x 8 x i8> %v, i8 signext %elt) {
108 ; CHECK-LABEL: insertelt_nxv8i8_0:
110 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, ma
111 ; CHECK-NEXT: vmv.s.x v8, a0
113 %r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 0
114 ret <vscale x 8 x i8> %r
117 define <vscale x 8 x i8> @insertelt_nxv8i8_imm(<vscale x 8 x i8> %v, i8 signext %elt) {
118 ; CHECK-LABEL: insertelt_nxv8i8_imm:
120 ; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma
121 ; CHECK-NEXT: vmv.s.x v9, a0
122 ; CHECK-NEXT: vslideup.vi v8, v9, 3
124 %r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 3
125 ret <vscale x 8 x i8> %r
128 define <vscale x 8 x i8> @insertelt_nxv8i8_idx(<vscale x 8 x i8> %v, i8 signext %elt, i32 signext %idx) {
129 ; CHECK-LABEL: insertelt_nxv8i8_idx:
131 ; CHECK-NEXT: addi a2, a1, 1
132 ; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
133 ; CHECK-NEXT: vmv.s.x v9, a0
134 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, tu, ma
135 ; CHECK-NEXT: vslideup.vx v8, v9, a1
137 %r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 %idx
138 ret <vscale x 8 x i8> %r
141 define <vscale x 16 x i8> @insertelt_nxv16i8_0(<vscale x 16 x i8> %v, i8 signext %elt) {
142 ; CHECK-LABEL: insertelt_nxv16i8_0:
144 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, ma
145 ; CHECK-NEXT: vmv.s.x v8, a0
147 %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 0
148 ret <vscale x 16 x i8> %r
151 define <vscale x 16 x i8> @insertelt_nxv16i8_imm(<vscale x 16 x i8> %v, i8 signext %elt) {
152 ; CHECK-LABEL: insertelt_nxv16i8_imm:
154 ; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma
155 ; CHECK-NEXT: vmv.s.x v10, a0
156 ; CHECK-NEXT: vslideup.vi v8, v10, 3
158 %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 3
159 ret <vscale x 16 x i8> %r
162 define <vscale x 16 x i8> @insertelt_nxv16i8_idx(<vscale x 16 x i8> %v, i8 signext %elt, i32 signext %idx) {
163 ; CHECK-LABEL: insertelt_nxv16i8_idx:
165 ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
166 ; CHECK-NEXT: vmv.s.x v10, a0
167 ; CHECK-NEXT: addi a0, a1, 1
168 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
169 ; CHECK-NEXT: vslideup.vx v8, v10, a1
171 %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 %idx
172 ret <vscale x 16 x i8> %r
175 define <vscale x 32 x i8> @insertelt_nxv32i8_0(<vscale x 32 x i8> %v, i8 signext %elt) {
176 ; CHECK-LABEL: insertelt_nxv32i8_0:
178 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, ma
179 ; CHECK-NEXT: vmv.s.x v8, a0
181 %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 0
182 ret <vscale x 32 x i8> %r
185 define <vscale x 32 x i8> @insertelt_nxv32i8_imm(<vscale x 32 x i8> %v, i8 signext %elt) {
186 ; CHECK-LABEL: insertelt_nxv32i8_imm:
188 ; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma
189 ; CHECK-NEXT: vmv.s.x v12, a0
190 ; CHECK-NEXT: vslideup.vi v8, v12, 3
192 %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 3
193 ret <vscale x 32 x i8> %r
196 define <vscale x 32 x i8> @insertelt_nxv32i8_idx(<vscale x 32 x i8> %v, i8 signext %elt, i32 signext %idx) {
197 ; CHECK-LABEL: insertelt_nxv32i8_idx:
199 ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
200 ; CHECK-NEXT: vmv.s.x v12, a0
201 ; CHECK-NEXT: addi a0, a1, 1
202 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
203 ; CHECK-NEXT: vslideup.vx v8, v12, a1
205 %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 %idx
206 ret <vscale x 32 x i8> %r
209 define <vscale x 64 x i8> @insertelt_nxv64i8_0(<vscale x 64 x i8> %v, i8 signext %elt) {
210 ; CHECK-LABEL: insertelt_nxv64i8_0:
212 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, ma
213 ; CHECK-NEXT: vmv.s.x v8, a0
215 %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 0
216 ret <vscale x 64 x i8> %r
219 define <vscale x 64 x i8> @insertelt_nxv64i8_imm(<vscale x 64 x i8> %v, i8 signext %elt) {
220 ; CHECK-LABEL: insertelt_nxv64i8_imm:
222 ; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma
223 ; CHECK-NEXT: vmv.s.x v16, a0
224 ; CHECK-NEXT: vslideup.vi v8, v16, 3
226 %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 3
227 ret <vscale x 64 x i8> %r
230 define <vscale x 64 x i8> @insertelt_nxv64i8_idx(<vscale x 64 x i8> %v, i8 signext %elt, i32 signext %idx) {
231 ; CHECK-LABEL: insertelt_nxv64i8_idx:
233 ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
234 ; CHECK-NEXT: vmv.s.x v16, a0
235 ; CHECK-NEXT: addi a0, a1, 1
236 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
237 ; CHECK-NEXT: vslideup.vx v8, v16, a1
239 %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 %idx
240 ret <vscale x 64 x i8> %r
243 define <vscale x 1 x i16> @insertelt_nxv1i16_0(<vscale x 1 x i16> %v, i16 signext %elt) {
244 ; CHECK-LABEL: insertelt_nxv1i16_0:
246 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, ma
247 ; CHECK-NEXT: vmv.s.x v8, a0
249 %r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 0
250 ret <vscale x 1 x i16> %r
253 define <vscale x 1 x i16> @insertelt_nxv1i16_imm(<vscale x 1 x i16> %v, i16 signext %elt) {
254 ; CHECK-LABEL: insertelt_nxv1i16_imm:
256 ; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, ma
257 ; CHECK-NEXT: vmv.s.x v9, a0
258 ; CHECK-NEXT: vslideup.vi v8, v9, 3
260 %r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 3
261 ret <vscale x 1 x i16> %r
264 define <vscale x 1 x i16> @insertelt_nxv1i16_idx(<vscale x 1 x i16> %v, i16 signext %elt, i32 signext %idx) {
265 ; CHECK-LABEL: insertelt_nxv1i16_idx:
267 ; CHECK-NEXT: addi a2, a1, 1
268 ; CHECK-NEXT: vsetvli a3, zero, e16, m1, ta, ma
269 ; CHECK-NEXT: vmv.s.x v9, a0
270 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, tu, ma
271 ; CHECK-NEXT: vslideup.vx v8, v9, a1
273 %r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 %idx
274 ret <vscale x 1 x i16> %r
277 define <vscale x 2 x i16> @insertelt_nxv2i16_0(<vscale x 2 x i16> %v, i16 signext %elt) {
278 ; CHECK-LABEL: insertelt_nxv2i16_0:
280 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, ma
281 ; CHECK-NEXT: vmv.s.x v8, a0
283 %r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 0
284 ret <vscale x 2 x i16> %r
287 define <vscale x 2 x i16> @insertelt_nxv2i16_imm(<vscale x 2 x i16> %v, i16 signext %elt) {
288 ; CHECK-LABEL: insertelt_nxv2i16_imm:
290 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma
291 ; CHECK-NEXT: vmv.s.x v9, a0
292 ; CHECK-NEXT: vslideup.vi v8, v9, 3
294 %r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 3
295 ret <vscale x 2 x i16> %r
298 define <vscale x 2 x i16> @insertelt_nxv2i16_idx(<vscale x 2 x i16> %v, i16 signext %elt, i32 signext %idx) {
299 ; CHECK-LABEL: insertelt_nxv2i16_idx:
301 ; CHECK-NEXT: addi a2, a1, 1
302 ; CHECK-NEXT: vsetvli a3, zero, e16, m1, ta, ma
303 ; CHECK-NEXT: vmv.s.x v9, a0
304 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, tu, ma
305 ; CHECK-NEXT: vslideup.vx v8, v9, a1
307 %r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 %idx
308 ret <vscale x 2 x i16> %r
311 define <vscale x 4 x i16> @insertelt_nxv4i16_0(<vscale x 4 x i16> %v, i16 signext %elt) {
312 ; CHECK-LABEL: insertelt_nxv4i16_0:
314 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, ma
315 ; CHECK-NEXT: vmv.s.x v8, a0
317 %r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 0
318 ret <vscale x 4 x i16> %r
321 define <vscale x 4 x i16> @insertelt_nxv4i16_imm(<vscale x 4 x i16> %v, i16 signext %elt) {
322 ; CHECK-LABEL: insertelt_nxv4i16_imm:
324 ; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma
325 ; CHECK-NEXT: vmv.s.x v9, a0
326 ; CHECK-NEXT: vslideup.vi v8, v9, 3
328 %r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 3
329 ret <vscale x 4 x i16> %r
332 define <vscale x 4 x i16> @insertelt_nxv4i16_idx(<vscale x 4 x i16> %v, i16 signext %elt, i32 signext %idx) {
333 ; CHECK-LABEL: insertelt_nxv4i16_idx:
335 ; CHECK-NEXT: addi a2, a1, 1
336 ; CHECK-NEXT: vsetvli a3, zero, e16, m1, ta, ma
337 ; CHECK-NEXT: vmv.s.x v9, a0
338 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, tu, ma
339 ; CHECK-NEXT: vslideup.vx v8, v9, a1
341 %r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 %idx
342 ret <vscale x 4 x i16> %r
345 define <vscale x 8 x i16> @insertelt_nxv8i16_0(<vscale x 8 x i16> %v, i16 signext %elt) {
346 ; CHECK-LABEL: insertelt_nxv8i16_0:
348 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, ma
349 ; CHECK-NEXT: vmv.s.x v8, a0
351 %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 0
352 ret <vscale x 8 x i16> %r
355 define <vscale x 8 x i16> @insertelt_nxv8i16_imm(<vscale x 8 x i16> %v, i16 signext %elt) {
356 ; CHECK-LABEL: insertelt_nxv8i16_imm:
358 ; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma
359 ; CHECK-NEXT: vmv.s.x v10, a0
360 ; CHECK-NEXT: vslideup.vi v8, v10, 3
362 %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 3
363 ret <vscale x 8 x i16> %r
366 define <vscale x 8 x i16> @insertelt_nxv8i16_idx(<vscale x 8 x i16> %v, i16 signext %elt, i32 signext %idx) {
367 ; CHECK-LABEL: insertelt_nxv8i16_idx:
369 ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
370 ; CHECK-NEXT: vmv.s.x v10, a0
371 ; CHECK-NEXT: addi a0, a1, 1
372 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
373 ; CHECK-NEXT: vslideup.vx v8, v10, a1
375 %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 %idx
376 ret <vscale x 8 x i16> %r
379 define <vscale x 16 x i16> @insertelt_nxv16i16_0(<vscale x 16 x i16> %v, i16 signext %elt) {
380 ; CHECK-LABEL: insertelt_nxv16i16_0:
382 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, ma
383 ; CHECK-NEXT: vmv.s.x v8, a0
385 %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 0
386 ret <vscale x 16 x i16> %r
389 define <vscale x 16 x i16> @insertelt_nxv16i16_imm(<vscale x 16 x i16> %v, i16 signext %elt) {
390 ; CHECK-LABEL: insertelt_nxv16i16_imm:
392 ; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma
393 ; CHECK-NEXT: vmv.s.x v12, a0
394 ; CHECK-NEXT: vslideup.vi v8, v12, 3
396 %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 3
397 ret <vscale x 16 x i16> %r
400 define <vscale x 16 x i16> @insertelt_nxv16i16_idx(<vscale x 16 x i16> %v, i16 signext %elt, i32 signext %idx) {
401 ; CHECK-LABEL: insertelt_nxv16i16_idx:
403 ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
404 ; CHECK-NEXT: vmv.s.x v12, a0
405 ; CHECK-NEXT: addi a0, a1, 1
406 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
407 ; CHECK-NEXT: vslideup.vx v8, v12, a1
409 %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 %idx
410 ret <vscale x 16 x i16> %r
413 define <vscale x 32 x i16> @insertelt_nxv32i16_0(<vscale x 32 x i16> %v, i16 signext %elt) {
414 ; CHECK-LABEL: insertelt_nxv32i16_0:
416 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, ma
417 ; CHECK-NEXT: vmv.s.x v8, a0
419 %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 0
420 ret <vscale x 32 x i16> %r
423 define <vscale x 32 x i16> @insertelt_nxv32i16_imm(<vscale x 32 x i16> %v, i16 signext %elt) {
424 ; CHECK-LABEL: insertelt_nxv32i16_imm:
426 ; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma
427 ; CHECK-NEXT: vmv.s.x v16, a0
428 ; CHECK-NEXT: vslideup.vi v8, v16, 3
430 %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 3
431 ret <vscale x 32 x i16> %r
434 define <vscale x 32 x i16> @insertelt_nxv32i16_idx(<vscale x 32 x i16> %v, i16 signext %elt, i32 signext %idx) {
435 ; CHECK-LABEL: insertelt_nxv32i16_idx:
437 ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
438 ; CHECK-NEXT: vmv.s.x v16, a0
439 ; CHECK-NEXT: addi a0, a1, 1
440 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma
441 ; CHECK-NEXT: vslideup.vx v8, v16, a1
443 %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 %idx
444 ret <vscale x 32 x i16> %r
447 define <vscale x 1 x i32> @insertelt_nxv1i32_0(<vscale x 1 x i32> %v, i32 %elt) {
448 ; CHECK-LABEL: insertelt_nxv1i32_0:
450 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, ma
451 ; CHECK-NEXT: vmv.s.x v8, a0
453 %r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 0
454 ret <vscale x 1 x i32> %r
457 define <vscale x 1 x i32> @insertelt_nxv1i32_imm(<vscale x 1 x i32> %v, i32 %elt) {
458 ; CHECK-LABEL: insertelt_nxv1i32_imm:
460 ; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, ma
461 ; CHECK-NEXT: vmv.s.x v9, a0
462 ; CHECK-NEXT: vslideup.vi v8, v9, 3
464 %r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 3
465 ret <vscale x 1 x i32> %r
468 define <vscale x 1 x i32> @insertelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 %elt, i32 %idx) {
469 ; CHECK-LABEL: insertelt_nxv1i32_idx:
471 ; CHECK-NEXT: addi a2, a1, 1
472 ; CHECK-NEXT: vsetvli a3, zero, e32, m1, ta, ma
473 ; CHECK-NEXT: vmv.s.x v9, a0
474 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, tu, ma
475 ; CHECK-NEXT: vslideup.vx v8, v9, a1
477 %r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 %idx
478 ret <vscale x 1 x i32> %r
481 define <vscale x 2 x i32> @insertelt_nxv2i32_0(<vscale x 2 x i32> %v, i32 %elt) {
482 ; CHECK-LABEL: insertelt_nxv2i32_0:
484 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, ma
485 ; CHECK-NEXT: vmv.s.x v8, a0
487 %r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 0
488 ret <vscale x 2 x i32> %r
491 define <vscale x 2 x i32> @insertelt_nxv2i32_imm(<vscale x 2 x i32> %v, i32 %elt) {
492 ; CHECK-LABEL: insertelt_nxv2i32_imm:
494 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma
495 ; CHECK-NEXT: vmv.s.x v9, a0
496 ; CHECK-NEXT: vslideup.vi v8, v9, 3
498 %r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 3
499 ret <vscale x 2 x i32> %r
502 define <vscale x 2 x i32> @insertelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 %elt, i32 %idx) {
503 ; CHECK-LABEL: insertelt_nxv2i32_idx:
505 ; CHECK-NEXT: addi a2, a1, 1
506 ; CHECK-NEXT: vsetvli a3, zero, e32, m1, ta, ma
507 ; CHECK-NEXT: vmv.s.x v9, a0
508 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, tu, ma
509 ; CHECK-NEXT: vslideup.vx v8, v9, a1
511 %r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 %idx
512 ret <vscale x 2 x i32> %r
515 define <vscale x 4 x i32> @insertelt_nxv4i32_0(<vscale x 4 x i32> %v, i32 %elt) {
516 ; CHECK-LABEL: insertelt_nxv4i32_0:
518 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, ma
519 ; CHECK-NEXT: vmv.s.x v8, a0
521 %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 0
522 ret <vscale x 4 x i32> %r
525 define <vscale x 4 x i32> @insertelt_nxv4i32_imm(<vscale x 4 x i32> %v, i32 %elt) {
526 ; CHECK-LABEL: insertelt_nxv4i32_imm:
528 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma
529 ; CHECK-NEXT: vmv.s.x v10, a0
530 ; CHECK-NEXT: vslideup.vi v8, v10, 3
532 %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 3
533 ret <vscale x 4 x i32> %r
536 define <vscale x 4 x i32> @insertelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 %elt, i32 %idx) {
537 ; CHECK-LABEL: insertelt_nxv4i32_idx:
539 ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
540 ; CHECK-NEXT: vmv.s.x v10, a0
541 ; CHECK-NEXT: addi a0, a1, 1
542 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
543 ; CHECK-NEXT: vslideup.vx v8, v10, a1
545 %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 %idx
546 ret <vscale x 4 x i32> %r
549 define <vscale x 8 x i32> @insertelt_nxv8i32_0(<vscale x 8 x i32> %v, i32 %elt) {
550 ; CHECK-LABEL: insertelt_nxv8i32_0:
552 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, ma
553 ; CHECK-NEXT: vmv.s.x v8, a0
555 %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 0
556 ret <vscale x 8 x i32> %r
559 define <vscale x 8 x i32> @insertelt_nxv8i32_imm(<vscale x 8 x i32> %v, i32 %elt) {
560 ; CHECK-LABEL: insertelt_nxv8i32_imm:
562 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma
563 ; CHECK-NEXT: vmv.s.x v12, a0
564 ; CHECK-NEXT: vslideup.vi v8, v12, 3
566 %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 3
567 ret <vscale x 8 x i32> %r
570 define <vscale x 8 x i32> @insertelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 %elt, i32 %idx) {
571 ; CHECK-LABEL: insertelt_nxv8i32_idx:
573 ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
574 ; CHECK-NEXT: vmv.s.x v12, a0
575 ; CHECK-NEXT: addi a0, a1, 1
576 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
577 ; CHECK-NEXT: vslideup.vx v8, v12, a1
579 %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 %idx
580 ret <vscale x 8 x i32> %r
583 define <vscale x 16 x i32> @insertelt_nxv16i32_0(<vscale x 16 x i32> %v, i32 %elt) {
584 ; CHECK-LABEL: insertelt_nxv16i32_0:
586 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, ma
587 ; CHECK-NEXT: vmv.s.x v8, a0
589 %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 0
590 ret <vscale x 16 x i32> %r
593 define <vscale x 16 x i32> @insertelt_nxv16i32_imm(<vscale x 16 x i32> %v, i32 %elt) {
594 ; CHECK-LABEL: insertelt_nxv16i32_imm:
596 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma
597 ; CHECK-NEXT: vmv.s.x v16, a0
598 ; CHECK-NEXT: vslideup.vi v8, v16, 3
600 %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 3
601 ret <vscale x 16 x i32> %r
604 define <vscale x 16 x i32> @insertelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 %elt, i32 %idx) {
605 ; CHECK-LABEL: insertelt_nxv16i32_idx:
607 ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
608 ; CHECK-NEXT: vmv.s.x v16, a0
609 ; CHECK-NEXT: addi a0, a1, 1
610 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma
611 ; CHECK-NEXT: vslideup.vx v8, v16, a1
613 %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 %idx
614 ret <vscale x 16 x i32> %r
617 define <vscale x 1 x i64> @insertelt_nxv1i64_0(<vscale x 1 x i64> %v, i64 %elt) {
618 ; CHECK-LABEL: insertelt_nxv1i64_0:
620 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
621 ; CHECK-NEXT: vslide1down.vx v8, v8, a0
622 ; CHECK-NEXT: vslide1down.vx v8, v8, a1
624 %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 0
625 ret <vscale x 1 x i64> %r
628 define <vscale x 1 x i64> @insertelt_nxv1i64_imm(<vscale x 1 x i64> %v, i64 %elt) {
629 ; CHECK-LABEL: insertelt_nxv1i64_imm:
631 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
632 ; CHECK-NEXT: vslide1down.vx v9, v8, a0
633 ; CHECK-NEXT: vslide1down.vx v9, v9, a1
634 ; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, ma
635 ; CHECK-NEXT: vslideup.vi v8, v9, 3
637 %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 3
638 ret <vscale x 1 x i64> %r
641 define <vscale x 1 x i64> @insertelt_nxv1i64_idx(<vscale x 1 x i64> %v, i64 %elt, i32 %idx) {
642 ; CHECK-LABEL: insertelt_nxv1i64_idx:
644 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
645 ; CHECK-NEXT: vslide1down.vx v9, v8, a0
646 ; CHECK-NEXT: vslide1down.vx v9, v9, a1
647 ; CHECK-NEXT: addi a0, a2, 1
648 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
649 ; CHECK-NEXT: vslideup.vx v8, v9, a2
651 %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 %idx
652 ret <vscale x 1 x i64> %r
655 define <vscale x 2 x i64> @insertelt_nxv2i64_0(<vscale x 2 x i64> %v, i64 %elt) {
656 ; CHECK-LABEL: insertelt_nxv2i64_0:
658 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
659 ; CHECK-NEXT: vslide1down.vx v8, v8, a0
660 ; CHECK-NEXT: vslide1down.vx v8, v8, a1
662 %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 0
663 ret <vscale x 2 x i64> %r
666 define <vscale x 2 x i64> @insertelt_nxv2i64_imm(<vscale x 2 x i64> %v, i64 %elt) {
667 ; CHECK-LABEL: insertelt_nxv2i64_imm:
669 ; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma
670 ; CHECK-NEXT: vslide1down.vx v10, v8, a0
671 ; CHECK-NEXT: vslide1down.vx v10, v10, a1
672 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma
673 ; CHECK-NEXT: vslideup.vi v8, v10, 3
675 %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 3
676 ret <vscale x 2 x i64> %r
679 define <vscale x 2 x i64> @insertelt_nxv2i64_idx(<vscale x 2 x i64> %v, i64 %elt, i32 %idx) {
680 ; CHECK-LABEL: insertelt_nxv2i64_idx:
682 ; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma
683 ; CHECK-NEXT: vslide1down.vx v10, v8, a0
684 ; CHECK-NEXT: vslide1down.vx v10, v10, a1
685 ; CHECK-NEXT: addi a0, a2, 1
686 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
687 ; CHECK-NEXT: vslideup.vx v8, v10, a2
689 %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 %idx
690 ret <vscale x 2 x i64> %r
693 define <vscale x 4 x i64> @insertelt_nxv4i64_0(<vscale x 4 x i64> %v, i64 %elt) {
694 ; CHECK-LABEL: insertelt_nxv4i64_0:
696 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
697 ; CHECK-NEXT: vslide1down.vx v8, v8, a0
698 ; CHECK-NEXT: vslide1down.vx v8, v8, a1
700 %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 0
701 ret <vscale x 4 x i64> %r
704 define <vscale x 4 x i64> @insertelt_nxv4i64_imm(<vscale x 4 x i64> %v, i64 %elt) {
705 ; CHECK-LABEL: insertelt_nxv4i64_imm:
707 ; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma
708 ; CHECK-NEXT: vslide1down.vx v12, v8, a0
709 ; CHECK-NEXT: vslide1down.vx v12, v12, a1
710 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma
711 ; CHECK-NEXT: vslideup.vi v8, v12, 3
713 %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 3
714 ret <vscale x 4 x i64> %r
717 define <vscale x 4 x i64> @insertelt_nxv4i64_idx(<vscale x 4 x i64> %v, i64 %elt, i32 %idx) {
718 ; CHECK-LABEL: insertelt_nxv4i64_idx:
720 ; CHECK-NEXT: vsetivli zero, 2, e32, m4, ta, ma
721 ; CHECK-NEXT: vslide1down.vx v12, v8, a0
722 ; CHECK-NEXT: vslide1down.vx v12, v12, a1
723 ; CHECK-NEXT: addi a0, a2, 1
724 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
725 ; CHECK-NEXT: vslideup.vx v8, v12, a2
727 %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 %idx
728 ret <vscale x 4 x i64> %r
731 define <vscale x 8 x i64> @insertelt_nxv8i64_0(<vscale x 8 x i64> %v, i64 %elt) {
732 ; CHECK-LABEL: insertelt_nxv8i64_0:
734 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
735 ; CHECK-NEXT: vslide1down.vx v8, v8, a0
736 ; CHECK-NEXT: vslide1down.vx v8, v8, a1
738 %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 0
739 ret <vscale x 8 x i64> %r
742 define <vscale x 8 x i64> @insertelt_nxv8i64_imm(<vscale x 8 x i64> %v, i64 %elt) {
743 ; CHECK-LABEL: insertelt_nxv8i64_imm:
745 ; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma
746 ; CHECK-NEXT: vslide1down.vx v16, v8, a0
747 ; CHECK-NEXT: vslide1down.vx v16, v16, a1
748 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma
749 ; CHECK-NEXT: vslideup.vi v8, v16, 3
751 %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 3
752 ret <vscale x 8 x i64> %r
755 define <vscale x 8 x i64> @insertelt_nxv8i64_idx(<vscale x 8 x i64> %v, i64 %elt, i32 %idx) {
756 ; CHECK-LABEL: insertelt_nxv8i64_idx:
758 ; CHECK-NEXT: vsetivli zero, 2, e32, m8, ta, ma
759 ; CHECK-NEXT: vslide1down.vx v16, v8, a0
760 ; CHECK-NEXT: vslide1down.vx v16, v16, a1
761 ; CHECK-NEXT: addi a0, a2, 1
762 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma
763 ; CHECK-NEXT: vslideup.vx v8, v16, a2
765 %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 %idx
766 ret <vscale x 8 x i64> %r
769 ; Extra tests to check lowering of constant values
770 define <vscale x 2 x i64> @insertelt_nxv2i64_0_c10(<vscale x 2 x i64> %v) {
771 ; CHECK-LABEL: insertelt_nxv2i64_0_c10:
773 ; CHECK-NEXT: li a0, 10
774 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, ma
775 ; CHECK-NEXT: vmv.s.x v8, a0
777 %r = insertelement <vscale x 2 x i64> %v, i64 10, i32 0
778 ret <vscale x 2 x i64> %r
781 define <vscale x 2 x i64> @insertelt_nxv2i64_imm_c10(<vscale x 2 x i64> %v) {
782 ; CHECK-LABEL: insertelt_nxv2i64_imm_c10:
784 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
785 ; CHECK-NEXT: vmv.v.i v10, 10
786 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma
787 ; CHECK-NEXT: vslideup.vi v8, v10, 3
789 %r = insertelement <vscale x 2 x i64> %v, i64 10, i32 3
790 ret <vscale x 2 x i64> %r
793 define <vscale x 2 x i64> @insertelt_nxv2i64_idx_c10(<vscale x 2 x i64> %v, i32 %idx) {
794 ; CHECK-LABEL: insertelt_nxv2i64_idx_c10:
796 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
797 ; CHECK-NEXT: vmv.v.i v10, 10
798 ; CHECK-NEXT: addi a1, a0, 1
799 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma
800 ; CHECK-NEXT: vslideup.vx v8, v10, a0
802 %r = insertelement <vscale x 2 x i64> %v, i64 10, i32 %idx
803 ret <vscale x 2 x i64> %r
806 define <vscale x 2 x i64> @insertelt_nxv2i64_0_cn1(<vscale x 2 x i64> %v) {
807 ; CHECK-LABEL: insertelt_nxv2i64_0_cn1:
809 ; CHECK-NEXT: li a0, -1
810 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, ma
811 ; CHECK-NEXT: vmv.s.x v8, a0
813 %r = insertelement <vscale x 2 x i64> %v, i64 -1, i32 0
814 ret <vscale x 2 x i64> %r
817 define <vscale x 2 x i64> @insertelt_nxv2i64_imm_cn1(<vscale x 2 x i64> %v) {
818 ; CHECK-LABEL: insertelt_nxv2i64_imm_cn1:
820 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
821 ; CHECK-NEXT: vmv.v.i v10, -1
822 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma
823 ; CHECK-NEXT: vslideup.vi v8, v10, 3
825 %r = insertelement <vscale x 2 x i64> %v, i64 -1, i32 3
826 ret <vscale x 2 x i64> %r
829 define <vscale x 2 x i64> @insertelt_nxv2i64_idx_cn1(<vscale x 2 x i64> %v, i32 %idx) {
830 ; CHECK-LABEL: insertelt_nxv2i64_idx_cn1:
832 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
833 ; CHECK-NEXT: vmv.v.i v10, -1
834 ; CHECK-NEXT: addi a1, a0, 1
835 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma
836 ; CHECK-NEXT: vslideup.vx v8, v10, a0
838 %r = insertelement <vscale x 2 x i64> %v, i64 -1, i32 %idx
839 ret <vscale x 2 x i64> %r