1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s
7 define <vscale x 1 x half> @insertelt_nxv1f16_0(<vscale x 1 x half> %v, half %elt) {
8 ; CHECK-LABEL: insertelt_nxv1f16_0:
10 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
11 ; CHECK-NEXT: vfmv.s.f v8, fa0
13 %r = insertelement <vscale x 1 x half> %v, half %elt, i32 0
14 ret <vscale x 1 x half> %r
17 define <vscale x 1 x half> @insertelt_nxv1f16_imm(<vscale x 1 x half> %v, half %elt) {
18 ; CHECK-LABEL: insertelt_nxv1f16_imm:
20 ; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, ma
21 ; CHECK-NEXT: vfmv.s.f v9, fa0
22 ; CHECK-NEXT: vslideup.vi v8, v9, 3
24 %r = insertelement <vscale x 1 x half> %v, half %elt, i32 3
25 ret <vscale x 1 x half> %r
28 define <vscale x 1 x half> @insertelt_nxv1f16_idx(<vscale x 1 x half> %v, half %elt, i32 zeroext %idx) {
29 ; CHECK-LABEL: insertelt_nxv1f16_idx:
31 ; CHECK-NEXT: addi a1, a0, 1
32 ; CHECK-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
33 ; CHECK-NEXT: vfmv.s.f v9, fa0
34 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
35 ; CHECK-NEXT: vslideup.vx v8, v9, a0
37 %r = insertelement <vscale x 1 x half> %v, half %elt, i32 %idx
38 ret <vscale x 1 x half> %r
41 define <vscale x 2 x half> @insertelt_nxv2f16_0(<vscale x 2 x half> %v, half %elt) {
42 ; CHECK-LABEL: insertelt_nxv2f16_0:
44 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
45 ; CHECK-NEXT: vfmv.s.f v8, fa0
47 %r = insertelement <vscale x 2 x half> %v, half %elt, i32 0
48 ret <vscale x 2 x half> %r
51 define <vscale x 2 x half> @insertelt_nxv2f16_imm(<vscale x 2 x half> %v, half %elt) {
52 ; CHECK-LABEL: insertelt_nxv2f16_imm:
54 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma
55 ; CHECK-NEXT: vfmv.s.f v9, fa0
56 ; CHECK-NEXT: vslideup.vi v8, v9, 3
58 %r = insertelement <vscale x 2 x half> %v, half %elt, i32 3
59 ret <vscale x 2 x half> %r
62 define <vscale x 2 x half> @insertelt_nxv2f16_idx(<vscale x 2 x half> %v, half %elt, i32 zeroext %idx) {
63 ; CHECK-LABEL: insertelt_nxv2f16_idx:
65 ; CHECK-NEXT: addi a1, a0, 1
66 ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
67 ; CHECK-NEXT: vfmv.s.f v9, fa0
68 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
69 ; CHECK-NEXT: vslideup.vx v8, v9, a0
71 %r = insertelement <vscale x 2 x half> %v, half %elt, i32 %idx
72 ret <vscale x 2 x half> %r
75 define <vscale x 4 x half> @insertelt_nxv4f16_0(<vscale x 4 x half> %v, half %elt) {
76 ; CHECK-LABEL: insertelt_nxv4f16_0:
78 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, ma
79 ; CHECK-NEXT: vfmv.s.f v8, fa0
81 %r = insertelement <vscale x 4 x half> %v, half %elt, i32 0
82 ret <vscale x 4 x half> %r
85 define <vscale x 4 x half> @insertelt_nxv4f16_imm(<vscale x 4 x half> %v, half %elt) {
86 ; CHECK-LABEL: insertelt_nxv4f16_imm:
88 ; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma
89 ; CHECK-NEXT: vfmv.s.f v9, fa0
90 ; CHECK-NEXT: vslideup.vi v8, v9, 3
92 %r = insertelement <vscale x 4 x half> %v, half %elt, i32 3
93 ret <vscale x 4 x half> %r
96 define <vscale x 4 x half> @insertelt_nxv4f16_idx(<vscale x 4 x half> %v, half %elt, i32 zeroext %idx) {
97 ; CHECK-LABEL: insertelt_nxv4f16_idx:
99 ; CHECK-NEXT: addi a1, a0, 1
100 ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
101 ; CHECK-NEXT: vfmv.s.f v9, fa0
102 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
103 ; CHECK-NEXT: vslideup.vx v8, v9, a0
105 %r = insertelement <vscale x 4 x half> %v, half %elt, i32 %idx
106 ret <vscale x 4 x half> %r
109 define <vscale x 8 x half> @insertelt_nxv8f16_0(<vscale x 8 x half> %v, half %elt) {
110 ; CHECK-LABEL: insertelt_nxv8f16_0:
112 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, ma
113 ; CHECK-NEXT: vfmv.s.f v8, fa0
115 %r = insertelement <vscale x 8 x half> %v, half %elt, i32 0
116 ret <vscale x 8 x half> %r
119 define <vscale x 8 x half> @insertelt_nxv8f16_imm(<vscale x 8 x half> %v, half %elt) {
120 ; CHECK-LABEL: insertelt_nxv8f16_imm:
122 ; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma
123 ; CHECK-NEXT: vfmv.s.f v10, fa0
124 ; CHECK-NEXT: vslideup.vi v8, v10, 3
126 %r = insertelement <vscale x 8 x half> %v, half %elt, i32 3
127 ret <vscale x 8 x half> %r
130 define <vscale x 8 x half> @insertelt_nxv8f16_idx(<vscale x 8 x half> %v, half %elt, i32 zeroext %idx) {
131 ; CHECK-LABEL: insertelt_nxv8f16_idx:
133 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
134 ; CHECK-NEXT: vfmv.s.f v10, fa0
135 ; CHECK-NEXT: addi a1, a0, 1
136 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
137 ; CHECK-NEXT: vslideup.vx v8, v10, a0
139 %r = insertelement <vscale x 8 x half> %v, half %elt, i32 %idx
140 ret <vscale x 8 x half> %r
143 define <vscale x 16 x half> @insertelt_nxv16f16_0(<vscale x 16 x half> %v, half %elt) {
144 ; CHECK-LABEL: insertelt_nxv16f16_0:
146 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, ma
147 ; CHECK-NEXT: vfmv.s.f v8, fa0
149 %r = insertelement <vscale x 16 x half> %v, half %elt, i32 0
150 ret <vscale x 16 x half> %r
153 define <vscale x 16 x half> @insertelt_nxv16f16_imm(<vscale x 16 x half> %v, half %elt) {
154 ; CHECK-LABEL: insertelt_nxv16f16_imm:
156 ; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma
157 ; CHECK-NEXT: vfmv.s.f v12, fa0
158 ; CHECK-NEXT: vslideup.vi v8, v12, 3
160 %r = insertelement <vscale x 16 x half> %v, half %elt, i32 3
161 ret <vscale x 16 x half> %r
164 define <vscale x 16 x half> @insertelt_nxv16f16_idx(<vscale x 16 x half> %v, half %elt, i32 zeroext %idx) {
165 ; CHECK-LABEL: insertelt_nxv16f16_idx:
167 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
168 ; CHECK-NEXT: vfmv.s.f v12, fa0
169 ; CHECK-NEXT: addi a1, a0, 1
170 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
171 ; CHECK-NEXT: vslideup.vx v8, v12, a0
173 %r = insertelement <vscale x 16 x half> %v, half %elt, i32 %idx
174 ret <vscale x 16 x half> %r
177 define <vscale x 32 x half> @insertelt_nxv32f16_0(<vscale x 32 x half> %v, half %elt) {
178 ; CHECK-LABEL: insertelt_nxv32f16_0:
180 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, ma
181 ; CHECK-NEXT: vfmv.s.f v8, fa0
183 %r = insertelement <vscale x 32 x half> %v, half %elt, i32 0
184 ret <vscale x 32 x half> %r
187 define <vscale x 32 x half> @insertelt_nxv32f16_imm(<vscale x 32 x half> %v, half %elt) {
188 ; CHECK-LABEL: insertelt_nxv32f16_imm:
190 ; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma
191 ; CHECK-NEXT: vfmv.s.f v16, fa0
192 ; CHECK-NEXT: vslideup.vi v8, v16, 3
194 %r = insertelement <vscale x 32 x half> %v, half %elt, i32 3
195 ret <vscale x 32 x half> %r
198 define <vscale x 32 x half> @insertelt_nxv32f16_idx(<vscale x 32 x half> %v, half %elt, i32 zeroext %idx) {
199 ; CHECK-LABEL: insertelt_nxv32f16_idx:
201 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
202 ; CHECK-NEXT: vfmv.s.f v16, fa0
203 ; CHECK-NEXT: addi a1, a0, 1
204 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, ma
205 ; CHECK-NEXT: vslideup.vx v8, v16, a0
207 %r = insertelement <vscale x 32 x half> %v, half %elt, i32 %idx
208 ret <vscale x 32 x half> %r
211 define <vscale x 1 x float> @insertelt_nxv1f32_0(<vscale x 1 x float> %v, float %elt) {
212 ; CHECK-LABEL: insertelt_nxv1f32_0:
214 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, ma
215 ; CHECK-NEXT: vfmv.s.f v8, fa0
217 %r = insertelement <vscale x 1 x float> %v, float %elt, i32 0
218 ret <vscale x 1 x float> %r
221 define <vscale x 1 x float> @insertelt_nxv1f32_imm(<vscale x 1 x float> %v, float %elt) {
222 ; CHECK-LABEL: insertelt_nxv1f32_imm:
224 ; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, ma
225 ; CHECK-NEXT: vfmv.s.f v9, fa0
226 ; CHECK-NEXT: vslideup.vi v8, v9, 3
228 %r = insertelement <vscale x 1 x float> %v, float %elt, i32 3
229 ret <vscale x 1 x float> %r
232 define <vscale x 1 x float> @insertelt_nxv1f32_idx(<vscale x 1 x float> %v, float %elt, i32 zeroext %idx) {
233 ; CHECK-LABEL: insertelt_nxv1f32_idx:
235 ; CHECK-NEXT: addi a1, a0, 1
236 ; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma
237 ; CHECK-NEXT: vfmv.s.f v9, fa0
238 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
239 ; CHECK-NEXT: vslideup.vx v8, v9, a0
241 %r = insertelement <vscale x 1 x float> %v, float %elt, i32 %idx
242 ret <vscale x 1 x float> %r
245 define <vscale x 2 x float> @insertelt_nxv2f32_0(<vscale x 2 x float> %v, float %elt) {
246 ; CHECK-LABEL: insertelt_nxv2f32_0:
248 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, ma
249 ; CHECK-NEXT: vfmv.s.f v8, fa0
251 %r = insertelement <vscale x 2 x float> %v, float %elt, i32 0
252 ret <vscale x 2 x float> %r
255 define <vscale x 2 x float> @insertelt_nxv2f32_imm(<vscale x 2 x float> %v, float %elt) {
256 ; CHECK-LABEL: insertelt_nxv2f32_imm:
258 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma
259 ; CHECK-NEXT: vfmv.s.f v9, fa0
260 ; CHECK-NEXT: vslideup.vi v8, v9, 3
262 %r = insertelement <vscale x 2 x float> %v, float %elt, i32 3
263 ret <vscale x 2 x float> %r
266 define <vscale x 2 x float> @insertelt_nxv2f32_idx(<vscale x 2 x float> %v, float %elt, i32 zeroext %idx) {
267 ; CHECK-LABEL: insertelt_nxv2f32_idx:
269 ; CHECK-NEXT: addi a1, a0, 1
270 ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
271 ; CHECK-NEXT: vfmv.s.f v9, fa0
272 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
273 ; CHECK-NEXT: vslideup.vx v8, v9, a0
275 %r = insertelement <vscale x 2 x float> %v, float %elt, i32 %idx
276 ret <vscale x 2 x float> %r
279 define <vscale x 4 x float> @insertelt_nxv4f32_0(<vscale x 4 x float> %v, float %elt) {
280 ; CHECK-LABEL: insertelt_nxv4f32_0:
282 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, ma
283 ; CHECK-NEXT: vfmv.s.f v8, fa0
285 %r = insertelement <vscale x 4 x float> %v, float %elt, i32 0
286 ret <vscale x 4 x float> %r
289 define <vscale x 4 x float> @insertelt_nxv4f32_imm(<vscale x 4 x float> %v, float %elt) {
290 ; CHECK-LABEL: insertelt_nxv4f32_imm:
292 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma
293 ; CHECK-NEXT: vfmv.s.f v10, fa0
294 ; CHECK-NEXT: vslideup.vi v8, v10, 3
296 %r = insertelement <vscale x 4 x float> %v, float %elt, i32 3
297 ret <vscale x 4 x float> %r
300 define <vscale x 4 x float> @insertelt_nxv4f32_idx(<vscale x 4 x float> %v, float %elt, i32 zeroext %idx) {
301 ; CHECK-LABEL: insertelt_nxv4f32_idx:
303 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
304 ; CHECK-NEXT: vfmv.s.f v10, fa0
305 ; CHECK-NEXT: addi a1, a0, 1
306 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
307 ; CHECK-NEXT: vslideup.vx v8, v10, a0
309 %r = insertelement <vscale x 4 x float> %v, float %elt, i32 %idx
310 ret <vscale x 4 x float> %r
313 define <vscale x 8 x float> @insertelt_nxv8f32_0(<vscale x 8 x float> %v, float %elt) {
314 ; CHECK-LABEL: insertelt_nxv8f32_0:
316 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, ma
317 ; CHECK-NEXT: vfmv.s.f v8, fa0
319 %r = insertelement <vscale x 8 x float> %v, float %elt, i32 0
320 ret <vscale x 8 x float> %r
323 define <vscale x 8 x float> @insertelt_nxv8f32_imm(<vscale x 8 x float> %v, float %elt) {
324 ; CHECK-LABEL: insertelt_nxv8f32_imm:
326 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma
327 ; CHECK-NEXT: vfmv.s.f v12, fa0
328 ; CHECK-NEXT: vslideup.vi v8, v12, 3
330 %r = insertelement <vscale x 8 x float> %v, float %elt, i32 3
331 ret <vscale x 8 x float> %r
334 define <vscale x 8 x float> @insertelt_nxv8f32_idx(<vscale x 8 x float> %v, float %elt, i32 zeroext %idx) {
335 ; CHECK-LABEL: insertelt_nxv8f32_idx:
337 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
338 ; CHECK-NEXT: vfmv.s.f v12, fa0
339 ; CHECK-NEXT: addi a1, a0, 1
340 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
341 ; CHECK-NEXT: vslideup.vx v8, v12, a0
343 %r = insertelement <vscale x 8 x float> %v, float %elt, i32 %idx
344 ret <vscale x 8 x float> %r
347 define <vscale x 16 x float> @insertelt_nxv16f32_0(<vscale x 16 x float> %v, float %elt) {
348 ; CHECK-LABEL: insertelt_nxv16f32_0:
350 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, ma
351 ; CHECK-NEXT: vfmv.s.f v8, fa0
353 %r = insertelement <vscale x 16 x float> %v, float %elt, i32 0
354 ret <vscale x 16 x float> %r
357 define <vscale x 16 x float> @insertelt_nxv16f32_imm(<vscale x 16 x float> %v, float %elt) {
358 ; CHECK-LABEL: insertelt_nxv16f32_imm:
360 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma
361 ; CHECK-NEXT: vfmv.s.f v16, fa0
362 ; CHECK-NEXT: vslideup.vi v8, v16, 3
364 %r = insertelement <vscale x 16 x float> %v, float %elt, i32 3
365 ret <vscale x 16 x float> %r
368 define <vscale x 16 x float> @insertelt_nxv16f32_idx(<vscale x 16 x float> %v, float %elt, i32 zeroext %idx) {
369 ; CHECK-LABEL: insertelt_nxv16f32_idx:
371 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
372 ; CHECK-NEXT: vfmv.s.f v16, fa0
373 ; CHECK-NEXT: addi a1, a0, 1
374 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma
375 ; CHECK-NEXT: vslideup.vx v8, v16, a0
377 %r = insertelement <vscale x 16 x float> %v, float %elt, i32 %idx
378 ret <vscale x 16 x float> %r
381 define <vscale x 1 x double> @insertelt_nxv1f64_0(<vscale x 1 x double> %v, double %elt) {
382 ; CHECK-LABEL: insertelt_nxv1f64_0:
384 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, ma
385 ; CHECK-NEXT: vfmv.s.f v8, fa0
387 %r = insertelement <vscale x 1 x double> %v, double %elt, i32 0
388 ret <vscale x 1 x double> %r
391 define <vscale x 1 x double> @insertelt_nxv1f64_imm(<vscale x 1 x double> %v, double %elt) {
392 ; CHECK-LABEL: insertelt_nxv1f64_imm:
394 ; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, ma
395 ; CHECK-NEXT: vfmv.s.f v9, fa0
396 ; CHECK-NEXT: vslideup.vi v8, v9, 3
398 %r = insertelement <vscale x 1 x double> %v, double %elt, i32 3
399 ret <vscale x 1 x double> %r
402 define <vscale x 1 x double> @insertelt_nxv1f64_idx(<vscale x 1 x double> %v, double %elt, i32 zeroext %idx) {
403 ; CHECK-LABEL: insertelt_nxv1f64_idx:
405 ; CHECK-NEXT: addi a1, a0, 1
406 ; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma
407 ; CHECK-NEXT: vfmv.s.f v9, fa0
408 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma
409 ; CHECK-NEXT: vslideup.vx v8, v9, a0
411 %r = insertelement <vscale x 1 x double> %v, double %elt, i32 %idx
412 ret <vscale x 1 x double> %r
415 define <vscale x 2 x double> @insertelt_nxv2f64_0(<vscale x 2 x double> %v, double %elt) {
416 ; CHECK-LABEL: insertelt_nxv2f64_0:
418 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, ma
419 ; CHECK-NEXT: vfmv.s.f v8, fa0
421 %r = insertelement <vscale x 2 x double> %v, double %elt, i32 0
422 ret <vscale x 2 x double> %r
425 define <vscale x 2 x double> @insertelt_nxv2f64_imm(<vscale x 2 x double> %v, double %elt) {
426 ; CHECK-LABEL: insertelt_nxv2f64_imm:
428 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma
429 ; CHECK-NEXT: vfmv.s.f v10, fa0
430 ; CHECK-NEXT: vslideup.vi v8, v10, 3
432 %r = insertelement <vscale x 2 x double> %v, double %elt, i32 3
433 ret <vscale x 2 x double> %r
436 define <vscale x 2 x double> @insertelt_nxv2f64_idx(<vscale x 2 x double> %v, double %elt, i32 zeroext %idx) {
437 ; CHECK-LABEL: insertelt_nxv2f64_idx:
439 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
440 ; CHECK-NEXT: vfmv.s.f v10, fa0
441 ; CHECK-NEXT: addi a1, a0, 1
442 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma
443 ; CHECK-NEXT: vslideup.vx v8, v10, a0
445 %r = insertelement <vscale x 2 x double> %v, double %elt, i32 %idx
446 ret <vscale x 2 x double> %r
449 define <vscale x 4 x double> @insertelt_nxv4f64_0(<vscale x 4 x double> %v, double %elt) {
450 ; CHECK-LABEL: insertelt_nxv4f64_0:
452 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, ma
453 ; CHECK-NEXT: vfmv.s.f v8, fa0
455 %r = insertelement <vscale x 4 x double> %v, double %elt, i32 0
456 ret <vscale x 4 x double> %r
459 define <vscale x 4 x double> @insertelt_nxv4f64_imm(<vscale x 4 x double> %v, double %elt) {
460 ; CHECK-LABEL: insertelt_nxv4f64_imm:
462 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma
463 ; CHECK-NEXT: vfmv.s.f v12, fa0
464 ; CHECK-NEXT: vslideup.vi v8, v12, 3
466 %r = insertelement <vscale x 4 x double> %v, double %elt, i32 3
467 ret <vscale x 4 x double> %r
470 define <vscale x 4 x double> @insertelt_nxv4f64_idx(<vscale x 4 x double> %v, double %elt, i32 zeroext %idx) {
471 ; CHECK-LABEL: insertelt_nxv4f64_idx:
473 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
474 ; CHECK-NEXT: vfmv.s.f v12, fa0
475 ; CHECK-NEXT: addi a1, a0, 1
476 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, ma
477 ; CHECK-NEXT: vslideup.vx v8, v12, a0
479 %r = insertelement <vscale x 4 x double> %v, double %elt, i32 %idx
480 ret <vscale x 4 x double> %r
483 define <vscale x 8 x double> @insertelt_nxv8f64_0(<vscale x 8 x double> %v, double %elt) {
484 ; CHECK-LABEL: insertelt_nxv8f64_0:
486 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, ma
487 ; CHECK-NEXT: vfmv.s.f v8, fa0
489 %r = insertelement <vscale x 8 x double> %v, double %elt, i32 0
490 ret <vscale x 8 x double> %r
493 define <vscale x 8 x double> @insertelt_nxv8f64_imm(<vscale x 8 x double> %v, double %elt) {
494 ; CHECK-LABEL: insertelt_nxv8f64_imm:
496 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma
497 ; CHECK-NEXT: vfmv.s.f v16, fa0
498 ; CHECK-NEXT: vslideup.vi v8, v16, 3
500 %r = insertelement <vscale x 8 x double> %v, double %elt, i32 3
501 ret <vscale x 8 x double> %r
504 define <vscale x 8 x double> @insertelt_nxv8f64_idx(<vscale x 8 x double> %v, double %elt, i32 zeroext %idx) {
505 ; CHECK-LABEL: insertelt_nxv8f64_idx:
507 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
508 ; CHECK-NEXT: vfmv.s.f v16, fa0
509 ; CHECK-NEXT: addi a1, a0, 1
510 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma
511 ; CHECK-NEXT: vslideup.vx v8, v16, a0
513 %r = insertelement <vscale x 8 x double> %v, double %elt, i32 %idx
514 ret <vscale x 8 x double> %r