1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,NOZFMIN,ZVFH
3 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,NOZFMIN,ZVFH
4 ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,NOZFMIN,ZVFHMIN
5 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,NOZFMIN,ZVFHMIN
6 ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfhmin,+zfbfmin,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,ZFMIN
7 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfhmin,+zfbfmin,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,ZFMIN
9 define bfloat @extractelt_nxv1bf16_0(<vscale x 1 x bfloat> %v) {
10 ; NOZFMIN-LABEL: extractelt_nxv1bf16_0:
12 ; NOZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
13 ; NOZFMIN-NEXT: vmv.x.s a0, v8
14 ; NOZFMIN-NEXT: lui a1, 1048560
15 ; NOZFMIN-NEXT: or a0, a0, a1
16 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
19 ; ZFMIN-LABEL: extractelt_nxv1bf16_0:
21 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
22 ; ZFMIN-NEXT: vmv.x.s a0, v8
23 ; ZFMIN-NEXT: fmv.h.x fa0, a0
25 %r = extractelement <vscale x 1 x bfloat> %v, i32 0
29 define bfloat @extractelt_nxv1bf16_imm(<vscale x 1 x bfloat> %v) {
30 ; NOZFMIN-LABEL: extractelt_nxv1bf16_imm:
32 ; NOZFMIN-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
33 ; NOZFMIN-NEXT: vslidedown.vi v8, v8, 2
34 ; NOZFMIN-NEXT: vmv.x.s a0, v8
35 ; NOZFMIN-NEXT: lui a1, 1048560
36 ; NOZFMIN-NEXT: or a0, a0, a1
37 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
40 ; ZFMIN-LABEL: extractelt_nxv1bf16_imm:
42 ; ZFMIN-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
43 ; ZFMIN-NEXT: vslidedown.vi v8, v8, 2
44 ; ZFMIN-NEXT: vmv.x.s a0, v8
45 ; ZFMIN-NEXT: fmv.h.x fa0, a0
47 %r = extractelement <vscale x 1 x bfloat> %v, i32 2
51 define bfloat @extractelt_nxv1bf16_idx(<vscale x 1 x bfloat> %v, i32 zeroext %idx) {
52 ; NOZFMIN-LABEL: extractelt_nxv1bf16_idx:
54 ; NOZFMIN-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
55 ; NOZFMIN-NEXT: vslidedown.vx v8, v8, a0
56 ; NOZFMIN-NEXT: vmv.x.s a0, v8
57 ; NOZFMIN-NEXT: lui a1, 1048560
58 ; NOZFMIN-NEXT: or a0, a0, a1
59 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
62 ; ZFMIN-LABEL: extractelt_nxv1bf16_idx:
64 ; ZFMIN-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
65 ; ZFMIN-NEXT: vslidedown.vx v8, v8, a0
66 ; ZFMIN-NEXT: vmv.x.s a0, v8
67 ; ZFMIN-NEXT: fmv.h.x fa0, a0
69 %r = extractelement <vscale x 1 x bfloat> %v, i32 %idx
73 define bfloat @extractelt_nxv2bf16_0(<vscale x 2 x bfloat> %v) {
74 ; NOZFMIN-LABEL: extractelt_nxv2bf16_0:
76 ; NOZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
77 ; NOZFMIN-NEXT: vmv.x.s a0, v8
78 ; NOZFMIN-NEXT: lui a1, 1048560
79 ; NOZFMIN-NEXT: or a0, a0, a1
80 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
83 ; ZFMIN-LABEL: extractelt_nxv2bf16_0:
85 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
86 ; ZFMIN-NEXT: vmv.x.s a0, v8
87 ; ZFMIN-NEXT: fmv.h.x fa0, a0
89 %r = extractelement <vscale x 2 x bfloat> %v, i32 0
93 define bfloat @extractelt_nxv2bf16_imm(<vscale x 2 x bfloat> %v) {
94 ; NOZFMIN-LABEL: extractelt_nxv2bf16_imm:
96 ; NOZFMIN-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
97 ; NOZFMIN-NEXT: vslidedown.vi v8, v8, 2
98 ; NOZFMIN-NEXT: vmv.x.s a0, v8
99 ; NOZFMIN-NEXT: lui a1, 1048560
100 ; NOZFMIN-NEXT: or a0, a0, a1
101 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
104 ; ZFMIN-LABEL: extractelt_nxv2bf16_imm:
106 ; ZFMIN-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
107 ; ZFMIN-NEXT: vslidedown.vi v8, v8, 2
108 ; ZFMIN-NEXT: vmv.x.s a0, v8
109 ; ZFMIN-NEXT: fmv.h.x fa0, a0
111 %r = extractelement <vscale x 2 x bfloat> %v, i32 2
115 define bfloat @extractelt_nxv2bf16_idx(<vscale x 2 x bfloat> %v, i32 zeroext %idx) {
116 ; NOZFMIN-LABEL: extractelt_nxv2bf16_idx:
118 ; NOZFMIN-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
119 ; NOZFMIN-NEXT: vslidedown.vx v8, v8, a0
120 ; NOZFMIN-NEXT: vmv.x.s a0, v8
121 ; NOZFMIN-NEXT: lui a1, 1048560
122 ; NOZFMIN-NEXT: or a0, a0, a1
123 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
126 ; ZFMIN-LABEL: extractelt_nxv2bf16_idx:
128 ; ZFMIN-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
129 ; ZFMIN-NEXT: vslidedown.vx v8, v8, a0
130 ; ZFMIN-NEXT: vmv.x.s a0, v8
131 ; ZFMIN-NEXT: fmv.h.x fa0, a0
133 %r = extractelement <vscale x 2 x bfloat> %v, i32 %idx
137 define bfloat @extractelt_nxv4bf16_0(<vscale x 4 x bfloat> %v) {
138 ; NOZFMIN-LABEL: extractelt_nxv4bf16_0:
140 ; NOZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
141 ; NOZFMIN-NEXT: vmv.x.s a0, v8
142 ; NOZFMIN-NEXT: lui a1, 1048560
143 ; NOZFMIN-NEXT: or a0, a0, a1
144 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
147 ; ZFMIN-LABEL: extractelt_nxv4bf16_0:
149 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
150 ; ZFMIN-NEXT: vmv.x.s a0, v8
151 ; ZFMIN-NEXT: fmv.h.x fa0, a0
153 %r = extractelement <vscale x 4 x bfloat> %v, i32 0
157 define bfloat @extractelt_nxv4bf16_imm(<vscale x 4 x bfloat> %v) {
158 ; NOZFMIN-LABEL: extractelt_nxv4bf16_imm:
160 ; NOZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
161 ; NOZFMIN-NEXT: vslidedown.vi v8, v8, 2
162 ; NOZFMIN-NEXT: vmv.x.s a0, v8
163 ; NOZFMIN-NEXT: lui a1, 1048560
164 ; NOZFMIN-NEXT: or a0, a0, a1
165 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
168 ; ZFMIN-LABEL: extractelt_nxv4bf16_imm:
170 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
171 ; ZFMIN-NEXT: vslidedown.vi v8, v8, 2
172 ; ZFMIN-NEXT: vmv.x.s a0, v8
173 ; ZFMIN-NEXT: fmv.h.x fa0, a0
175 %r = extractelement <vscale x 4 x bfloat> %v, i32 2
179 define bfloat @extractelt_nxv4bf16_idx(<vscale x 4 x bfloat> %v, i32 zeroext %idx) {
180 ; NOZFMIN-LABEL: extractelt_nxv4bf16_idx:
182 ; NOZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
183 ; NOZFMIN-NEXT: vslidedown.vx v8, v8, a0
184 ; NOZFMIN-NEXT: vmv.x.s a0, v8
185 ; NOZFMIN-NEXT: lui a1, 1048560
186 ; NOZFMIN-NEXT: or a0, a0, a1
187 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
190 ; ZFMIN-LABEL: extractelt_nxv4bf16_idx:
192 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
193 ; ZFMIN-NEXT: vslidedown.vx v8, v8, a0
194 ; ZFMIN-NEXT: vmv.x.s a0, v8
195 ; ZFMIN-NEXT: fmv.h.x fa0, a0
197 %r = extractelement <vscale x 4 x bfloat> %v, i32 %idx
201 define bfloat @extractelt_nxv8bf16_0(<vscale x 8 x bfloat> %v) {
202 ; NOZFMIN-LABEL: extractelt_nxv8bf16_0:
204 ; NOZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
205 ; NOZFMIN-NEXT: vmv.x.s a0, v8
206 ; NOZFMIN-NEXT: lui a1, 1048560
207 ; NOZFMIN-NEXT: or a0, a0, a1
208 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
211 ; ZFMIN-LABEL: extractelt_nxv8bf16_0:
213 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
214 ; ZFMIN-NEXT: vmv.x.s a0, v8
215 ; ZFMIN-NEXT: fmv.h.x fa0, a0
217 %r = extractelement <vscale x 8 x bfloat> %v, i32 0
221 define bfloat @extractelt_nxv8bf16_imm(<vscale x 8 x bfloat> %v) {
222 ; NOZFMIN-LABEL: extractelt_nxv8bf16_imm:
224 ; NOZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
225 ; NOZFMIN-NEXT: vslidedown.vi v8, v8, 2
226 ; NOZFMIN-NEXT: vmv.x.s a0, v8
227 ; NOZFMIN-NEXT: lui a1, 1048560
228 ; NOZFMIN-NEXT: or a0, a0, a1
229 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
232 ; ZFMIN-LABEL: extractelt_nxv8bf16_imm:
234 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
235 ; ZFMIN-NEXT: vslidedown.vi v8, v8, 2
236 ; ZFMIN-NEXT: vmv.x.s a0, v8
237 ; ZFMIN-NEXT: fmv.h.x fa0, a0
239 %r = extractelement <vscale x 8 x bfloat> %v, i32 2
243 define bfloat @extractelt_nxv8bf16_idx(<vscale x 8 x bfloat> %v, i32 zeroext %idx) {
244 ; NOZFMIN-LABEL: extractelt_nxv8bf16_idx:
246 ; NOZFMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma
247 ; NOZFMIN-NEXT: vslidedown.vx v8, v8, a0
248 ; NOZFMIN-NEXT: vmv.x.s a0, v8
249 ; NOZFMIN-NEXT: lui a1, 1048560
250 ; NOZFMIN-NEXT: or a0, a0, a1
251 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
254 ; ZFMIN-LABEL: extractelt_nxv8bf16_idx:
256 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma
257 ; ZFMIN-NEXT: vslidedown.vx v8, v8, a0
258 ; ZFMIN-NEXT: vmv.x.s a0, v8
259 ; ZFMIN-NEXT: fmv.h.x fa0, a0
261 %r = extractelement <vscale x 8 x bfloat> %v, i32 %idx
265 define bfloat @extractelt_nxv16bf16_0(<vscale x 16 x bfloat> %v) {
266 ; NOZFMIN-LABEL: extractelt_nxv16bf16_0:
268 ; NOZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
269 ; NOZFMIN-NEXT: vmv.x.s a0, v8
270 ; NOZFMIN-NEXT: lui a1, 1048560
271 ; NOZFMIN-NEXT: or a0, a0, a1
272 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
275 ; ZFMIN-LABEL: extractelt_nxv16bf16_0:
277 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
278 ; ZFMIN-NEXT: vmv.x.s a0, v8
279 ; ZFMIN-NEXT: fmv.h.x fa0, a0
281 %r = extractelement <vscale x 16 x bfloat> %v, i32 0
285 define bfloat @extractelt_nxv16bf16_imm(<vscale x 16 x bfloat> %v) {
286 ; NOZFMIN-LABEL: extractelt_nxv16bf16_imm:
288 ; NOZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
289 ; NOZFMIN-NEXT: vslidedown.vi v8, v8, 2
290 ; NOZFMIN-NEXT: vmv.x.s a0, v8
291 ; NOZFMIN-NEXT: lui a1, 1048560
292 ; NOZFMIN-NEXT: or a0, a0, a1
293 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
296 ; ZFMIN-LABEL: extractelt_nxv16bf16_imm:
298 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
299 ; ZFMIN-NEXT: vslidedown.vi v8, v8, 2
300 ; ZFMIN-NEXT: vmv.x.s a0, v8
301 ; ZFMIN-NEXT: fmv.h.x fa0, a0
303 %r = extractelement <vscale x 16 x bfloat> %v, i32 2
307 define bfloat @extractelt_nxv16bf16_idx(<vscale x 16 x bfloat> %v, i32 zeroext %idx) {
308 ; NOZFMIN-LABEL: extractelt_nxv16bf16_idx:
310 ; NOZFMIN-NEXT: vsetivli zero, 1, e16, m4, ta, ma
311 ; NOZFMIN-NEXT: vslidedown.vx v8, v8, a0
312 ; NOZFMIN-NEXT: vmv.x.s a0, v8
313 ; NOZFMIN-NEXT: lui a1, 1048560
314 ; NOZFMIN-NEXT: or a0, a0, a1
315 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
318 ; ZFMIN-LABEL: extractelt_nxv16bf16_idx:
320 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m4, ta, ma
321 ; ZFMIN-NEXT: vslidedown.vx v8, v8, a0
322 ; ZFMIN-NEXT: vmv.x.s a0, v8
323 ; ZFMIN-NEXT: fmv.h.x fa0, a0
325 %r = extractelement <vscale x 16 x bfloat> %v, i32 %idx
329 define bfloat @extractelt_nxv32bf16_0(<vscale x 32 x bfloat> %v) {
330 ; NOZFMIN-LABEL: extractelt_nxv32bf16_0:
332 ; NOZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
333 ; NOZFMIN-NEXT: vmv.x.s a0, v8
334 ; NOZFMIN-NEXT: lui a1, 1048560
335 ; NOZFMIN-NEXT: or a0, a0, a1
336 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
339 ; ZFMIN-LABEL: extractelt_nxv32bf16_0:
341 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
342 ; ZFMIN-NEXT: vmv.x.s a0, v8
343 ; ZFMIN-NEXT: fmv.h.x fa0, a0
345 %r = extractelement <vscale x 32 x bfloat> %v, i32 0
349 define bfloat @extractelt_nxv32bf16_imm(<vscale x 32 x bfloat> %v) {
350 ; NOZFMIN-LABEL: extractelt_nxv32bf16_imm:
352 ; NOZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
353 ; NOZFMIN-NEXT: vslidedown.vi v8, v8, 2
354 ; NOZFMIN-NEXT: vmv.x.s a0, v8
355 ; NOZFMIN-NEXT: lui a1, 1048560
356 ; NOZFMIN-NEXT: or a0, a0, a1
357 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
360 ; ZFMIN-LABEL: extractelt_nxv32bf16_imm:
362 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
363 ; ZFMIN-NEXT: vslidedown.vi v8, v8, 2
364 ; ZFMIN-NEXT: vmv.x.s a0, v8
365 ; ZFMIN-NEXT: fmv.h.x fa0, a0
367 %r = extractelement <vscale x 32 x bfloat> %v, i32 2
371 define bfloat @extractelt_nxv32bf16_idx(<vscale x 32 x bfloat> %v, i32 zeroext %idx) {
372 ; NOZFMIN-LABEL: extractelt_nxv32bf16_idx:
374 ; NOZFMIN-NEXT: vsetivli zero, 1, e16, m8, ta, ma
375 ; NOZFMIN-NEXT: vslidedown.vx v8, v8, a0
376 ; NOZFMIN-NEXT: vmv.x.s a0, v8
377 ; NOZFMIN-NEXT: lui a1, 1048560
378 ; NOZFMIN-NEXT: or a0, a0, a1
379 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
382 ; ZFMIN-LABEL: extractelt_nxv32bf16_idx:
384 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m8, ta, ma
385 ; ZFMIN-NEXT: vslidedown.vx v8, v8, a0
386 ; ZFMIN-NEXT: vmv.x.s a0, v8
387 ; ZFMIN-NEXT: fmv.h.x fa0, a0
389 %r = extractelement <vscale x 32 x bfloat> %v, i32 %idx
393 define half @extractelt_nxv1f16_0(<vscale x 1 x half> %v) {
394 ; ZVFH-LABEL: extractelt_nxv1f16_0:
396 ; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
397 ; ZVFH-NEXT: vfmv.f.s fa0, v8
400 ; ZVFHMIN-LABEL: extractelt_nxv1f16_0:
402 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
403 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
404 ; ZVFHMIN-NEXT: lui a1, 1048560
405 ; ZVFHMIN-NEXT: or a0, a0, a1
406 ; ZVFHMIN-NEXT: fmv.w.x fa0, a0
409 ; ZFMIN-LABEL: extractelt_nxv1f16_0:
411 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
412 ; ZFMIN-NEXT: vmv.x.s a0, v8
413 ; ZFMIN-NEXT: fmv.h.x fa0, a0
415 %r = extractelement <vscale x 1 x half> %v, i32 0
419 define half @extractelt_nxv1f16_imm(<vscale x 1 x half> %v) {
420 ; ZVFH-LABEL: extractelt_nxv1f16_imm:
422 ; ZVFH-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
423 ; ZVFH-NEXT: vslidedown.vi v8, v8, 2
424 ; ZVFH-NEXT: vfmv.f.s fa0, v8
427 ; ZVFHMIN-LABEL: extractelt_nxv1f16_imm:
429 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
430 ; ZVFHMIN-NEXT: vslidedown.vi v8, v8, 2
431 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
432 ; ZVFHMIN-NEXT: lui a1, 1048560
433 ; ZVFHMIN-NEXT: or a0, a0, a1
434 ; ZVFHMIN-NEXT: fmv.w.x fa0, a0
437 ; ZFMIN-LABEL: extractelt_nxv1f16_imm:
439 ; ZFMIN-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
440 ; ZFMIN-NEXT: vslidedown.vi v8, v8, 2
441 ; ZFMIN-NEXT: vmv.x.s a0, v8
442 ; ZFMIN-NEXT: fmv.h.x fa0, a0
444 %r = extractelement <vscale x 1 x half> %v, i32 2
448 define half @extractelt_nxv1f16_idx(<vscale x 1 x half> %v, i32 zeroext %idx) {
449 ; ZVFH-LABEL: extractelt_nxv1f16_idx:
451 ; ZVFH-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
452 ; ZVFH-NEXT: vslidedown.vx v8, v8, a0
453 ; ZVFH-NEXT: vfmv.f.s fa0, v8
456 ; ZVFHMIN-LABEL: extractelt_nxv1f16_idx:
458 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
459 ; ZVFHMIN-NEXT: vslidedown.vx v8, v8, a0
460 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
461 ; ZVFHMIN-NEXT: lui a1, 1048560
462 ; ZVFHMIN-NEXT: or a0, a0, a1
463 ; ZVFHMIN-NEXT: fmv.w.x fa0, a0
466 ; ZFMIN-LABEL: extractelt_nxv1f16_idx:
468 ; ZFMIN-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
469 ; ZFMIN-NEXT: vslidedown.vx v8, v8, a0
470 ; ZFMIN-NEXT: vmv.x.s a0, v8
471 ; ZFMIN-NEXT: fmv.h.x fa0, a0
473 %r = extractelement <vscale x 1 x half> %v, i32 %idx
477 define half @extractelt_nxv2f16_0(<vscale x 2 x half> %v) {
478 ; ZVFH-LABEL: extractelt_nxv2f16_0:
480 ; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
481 ; ZVFH-NEXT: vfmv.f.s fa0, v8
484 ; ZVFHMIN-LABEL: extractelt_nxv2f16_0:
486 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
487 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
488 ; ZVFHMIN-NEXT: lui a1, 1048560
489 ; ZVFHMIN-NEXT: or a0, a0, a1
490 ; ZVFHMIN-NEXT: fmv.w.x fa0, a0
493 ; ZFMIN-LABEL: extractelt_nxv2f16_0:
495 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
496 ; ZFMIN-NEXT: vmv.x.s a0, v8
497 ; ZFMIN-NEXT: fmv.h.x fa0, a0
499 %r = extractelement <vscale x 2 x half> %v, i32 0
503 define half @extractelt_nxv2f16_imm(<vscale x 2 x half> %v) {
504 ; ZVFH-LABEL: extractelt_nxv2f16_imm:
506 ; ZVFH-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
507 ; ZVFH-NEXT: vslidedown.vi v8, v8, 2
508 ; ZVFH-NEXT: vfmv.f.s fa0, v8
511 ; ZVFHMIN-LABEL: extractelt_nxv2f16_imm:
513 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
514 ; ZVFHMIN-NEXT: vslidedown.vi v8, v8, 2
515 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
516 ; ZVFHMIN-NEXT: lui a1, 1048560
517 ; ZVFHMIN-NEXT: or a0, a0, a1
518 ; ZVFHMIN-NEXT: fmv.w.x fa0, a0
521 ; ZFMIN-LABEL: extractelt_nxv2f16_imm:
523 ; ZFMIN-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
524 ; ZFMIN-NEXT: vslidedown.vi v8, v8, 2
525 ; ZFMIN-NEXT: vmv.x.s a0, v8
526 ; ZFMIN-NEXT: fmv.h.x fa0, a0
528 %r = extractelement <vscale x 2 x half> %v, i32 2
532 define half @extractelt_nxv2f16_idx(<vscale x 2 x half> %v, i32 zeroext %idx) {
533 ; ZVFH-LABEL: extractelt_nxv2f16_idx:
535 ; ZVFH-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
536 ; ZVFH-NEXT: vslidedown.vx v8, v8, a0
537 ; ZVFH-NEXT: vfmv.f.s fa0, v8
540 ; ZVFHMIN-LABEL: extractelt_nxv2f16_idx:
542 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
543 ; ZVFHMIN-NEXT: vslidedown.vx v8, v8, a0
544 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
545 ; ZVFHMIN-NEXT: lui a1, 1048560
546 ; ZVFHMIN-NEXT: or a0, a0, a1
547 ; ZVFHMIN-NEXT: fmv.w.x fa0, a0
550 ; ZFMIN-LABEL: extractelt_nxv2f16_idx:
552 ; ZFMIN-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
553 ; ZFMIN-NEXT: vslidedown.vx v8, v8, a0
554 ; ZFMIN-NEXT: vmv.x.s a0, v8
555 ; ZFMIN-NEXT: fmv.h.x fa0, a0
557 %r = extractelement <vscale x 2 x half> %v, i32 %idx
561 define half @extractelt_nxv4f16_0(<vscale x 4 x half> %v) {
562 ; ZVFH-LABEL: extractelt_nxv4f16_0:
564 ; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
565 ; ZVFH-NEXT: vfmv.f.s fa0, v8
568 ; ZVFHMIN-LABEL: extractelt_nxv4f16_0:
570 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
571 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
572 ; ZVFHMIN-NEXT: lui a1, 1048560
573 ; ZVFHMIN-NEXT: or a0, a0, a1
574 ; ZVFHMIN-NEXT: fmv.w.x fa0, a0
577 ; ZFMIN-LABEL: extractelt_nxv4f16_0:
579 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
580 ; ZFMIN-NEXT: vmv.x.s a0, v8
581 ; ZFMIN-NEXT: fmv.h.x fa0, a0
583 %r = extractelement <vscale x 4 x half> %v, i32 0
587 define half @extractelt_nxv4f16_imm(<vscale x 4 x half> %v) {
588 ; ZVFH-LABEL: extractelt_nxv4f16_imm:
590 ; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
591 ; ZVFH-NEXT: vslidedown.vi v8, v8, 2
592 ; ZVFH-NEXT: vfmv.f.s fa0, v8
595 ; ZVFHMIN-LABEL: extractelt_nxv4f16_imm:
597 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
598 ; ZVFHMIN-NEXT: vslidedown.vi v8, v8, 2
599 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
600 ; ZVFHMIN-NEXT: lui a1, 1048560
601 ; ZVFHMIN-NEXT: or a0, a0, a1
602 ; ZVFHMIN-NEXT: fmv.w.x fa0, a0
605 ; ZFMIN-LABEL: extractelt_nxv4f16_imm:
607 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
608 ; ZFMIN-NEXT: vslidedown.vi v8, v8, 2
609 ; ZFMIN-NEXT: vmv.x.s a0, v8
610 ; ZFMIN-NEXT: fmv.h.x fa0, a0
612 %r = extractelement <vscale x 4 x half> %v, i32 2
616 define half @extractelt_nxv4f16_idx(<vscale x 4 x half> %v, i32 zeroext %idx) {
617 ; ZVFH-LABEL: extractelt_nxv4f16_idx:
619 ; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
620 ; ZVFH-NEXT: vslidedown.vx v8, v8, a0
621 ; ZVFH-NEXT: vfmv.f.s fa0, v8
624 ; ZVFHMIN-LABEL: extractelt_nxv4f16_idx:
626 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
627 ; ZVFHMIN-NEXT: vslidedown.vx v8, v8, a0
628 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
629 ; ZVFHMIN-NEXT: lui a1, 1048560
630 ; ZVFHMIN-NEXT: or a0, a0, a1
631 ; ZVFHMIN-NEXT: fmv.w.x fa0, a0
634 ; ZFMIN-LABEL: extractelt_nxv4f16_idx:
636 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
637 ; ZFMIN-NEXT: vslidedown.vx v8, v8, a0
638 ; ZFMIN-NEXT: vmv.x.s a0, v8
639 ; ZFMIN-NEXT: fmv.h.x fa0, a0
641 %r = extractelement <vscale x 4 x half> %v, i32 %idx
645 define half @extractelt_nxv8f16_0(<vscale x 8 x half> %v) {
646 ; ZVFH-LABEL: extractelt_nxv8f16_0:
648 ; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
649 ; ZVFH-NEXT: vfmv.f.s fa0, v8
652 ; ZVFHMIN-LABEL: extractelt_nxv8f16_0:
654 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
655 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
656 ; ZVFHMIN-NEXT: lui a1, 1048560
657 ; ZVFHMIN-NEXT: or a0, a0, a1
658 ; ZVFHMIN-NEXT: fmv.w.x fa0, a0
661 ; ZFMIN-LABEL: extractelt_nxv8f16_0:
663 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
664 ; ZFMIN-NEXT: vmv.x.s a0, v8
665 ; ZFMIN-NEXT: fmv.h.x fa0, a0
667 %r = extractelement <vscale x 8 x half> %v, i32 0
671 define half @extractelt_nxv8f16_imm(<vscale x 8 x half> %v) {
672 ; ZVFH-LABEL: extractelt_nxv8f16_imm:
674 ; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
675 ; ZVFH-NEXT: vslidedown.vi v8, v8, 2
676 ; ZVFH-NEXT: vfmv.f.s fa0, v8
679 ; ZVFHMIN-LABEL: extractelt_nxv8f16_imm:
681 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
682 ; ZVFHMIN-NEXT: vslidedown.vi v8, v8, 2
683 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
684 ; ZVFHMIN-NEXT: lui a1, 1048560
685 ; ZVFHMIN-NEXT: or a0, a0, a1
686 ; ZVFHMIN-NEXT: fmv.w.x fa0, a0
689 ; ZFMIN-LABEL: extractelt_nxv8f16_imm:
691 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
692 ; ZFMIN-NEXT: vslidedown.vi v8, v8, 2
693 ; ZFMIN-NEXT: vmv.x.s a0, v8
694 ; ZFMIN-NEXT: fmv.h.x fa0, a0
696 %r = extractelement <vscale x 8 x half> %v, i32 2
700 define half @extractelt_nxv8f16_idx(<vscale x 8 x half> %v, i32 zeroext %idx) {
701 ; ZVFH-LABEL: extractelt_nxv8f16_idx:
703 ; ZVFH-NEXT: vsetivli zero, 1, e16, m2, ta, ma
704 ; ZVFH-NEXT: vslidedown.vx v8, v8, a0
705 ; ZVFH-NEXT: vfmv.f.s fa0, v8
708 ; ZVFHMIN-LABEL: extractelt_nxv8f16_idx:
710 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma
711 ; ZVFHMIN-NEXT: vslidedown.vx v8, v8, a0
712 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
713 ; ZVFHMIN-NEXT: lui a1, 1048560
714 ; ZVFHMIN-NEXT: or a0, a0, a1
715 ; ZVFHMIN-NEXT: fmv.w.x fa0, a0
718 ; ZFMIN-LABEL: extractelt_nxv8f16_idx:
720 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma
721 ; ZFMIN-NEXT: vslidedown.vx v8, v8, a0
722 ; ZFMIN-NEXT: vmv.x.s a0, v8
723 ; ZFMIN-NEXT: fmv.h.x fa0, a0
725 %r = extractelement <vscale x 8 x half> %v, i32 %idx
729 define half @extractelt_nxv16f16_0(<vscale x 16 x half> %v) {
730 ; ZVFH-LABEL: extractelt_nxv16f16_0:
732 ; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
733 ; ZVFH-NEXT: vfmv.f.s fa0, v8
736 ; ZVFHMIN-LABEL: extractelt_nxv16f16_0:
738 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
739 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
740 ; ZVFHMIN-NEXT: lui a1, 1048560
741 ; ZVFHMIN-NEXT: or a0, a0, a1
742 ; ZVFHMIN-NEXT: fmv.w.x fa0, a0
745 ; ZFMIN-LABEL: extractelt_nxv16f16_0:
747 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
748 ; ZFMIN-NEXT: vmv.x.s a0, v8
749 ; ZFMIN-NEXT: fmv.h.x fa0, a0
751 %r = extractelement <vscale x 16 x half> %v, i32 0
755 define half @extractelt_nxv16f16_imm(<vscale x 16 x half> %v) {
756 ; ZVFH-LABEL: extractelt_nxv16f16_imm:
758 ; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
759 ; ZVFH-NEXT: vslidedown.vi v8, v8, 2
760 ; ZVFH-NEXT: vfmv.f.s fa0, v8
763 ; ZVFHMIN-LABEL: extractelt_nxv16f16_imm:
765 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
766 ; ZVFHMIN-NEXT: vslidedown.vi v8, v8, 2
767 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
768 ; ZVFHMIN-NEXT: lui a1, 1048560
769 ; ZVFHMIN-NEXT: or a0, a0, a1
770 ; ZVFHMIN-NEXT: fmv.w.x fa0, a0
773 ; ZFMIN-LABEL: extractelt_nxv16f16_imm:
775 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
776 ; ZFMIN-NEXT: vslidedown.vi v8, v8, 2
777 ; ZFMIN-NEXT: vmv.x.s a0, v8
778 ; ZFMIN-NEXT: fmv.h.x fa0, a0
780 %r = extractelement <vscale x 16 x half> %v, i32 2
784 define half @extractelt_nxv16f16_idx(<vscale x 16 x half> %v, i32 zeroext %idx) {
785 ; ZVFH-LABEL: extractelt_nxv16f16_idx:
787 ; ZVFH-NEXT: vsetivli zero, 1, e16, m4, ta, ma
788 ; ZVFH-NEXT: vslidedown.vx v8, v8, a0
789 ; ZVFH-NEXT: vfmv.f.s fa0, v8
792 ; ZVFHMIN-LABEL: extractelt_nxv16f16_idx:
794 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m4, ta, ma
795 ; ZVFHMIN-NEXT: vslidedown.vx v8, v8, a0
796 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
797 ; ZVFHMIN-NEXT: lui a1, 1048560
798 ; ZVFHMIN-NEXT: or a0, a0, a1
799 ; ZVFHMIN-NEXT: fmv.w.x fa0, a0
802 ; ZFMIN-LABEL: extractelt_nxv16f16_idx:
804 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m4, ta, ma
805 ; ZFMIN-NEXT: vslidedown.vx v8, v8, a0
806 ; ZFMIN-NEXT: vmv.x.s a0, v8
807 ; ZFMIN-NEXT: fmv.h.x fa0, a0
809 %r = extractelement <vscale x 16 x half> %v, i32 %idx
813 define half @extractelt_nxv32f16_0(<vscale x 32 x half> %v) {
814 ; ZVFH-LABEL: extractelt_nxv32f16_0:
816 ; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
817 ; ZVFH-NEXT: vfmv.f.s fa0, v8
820 ; ZVFHMIN-LABEL: extractelt_nxv32f16_0:
822 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
823 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
824 ; ZVFHMIN-NEXT: lui a1, 1048560
825 ; ZVFHMIN-NEXT: or a0, a0, a1
826 ; ZVFHMIN-NEXT: fmv.w.x fa0, a0
829 ; ZFMIN-LABEL: extractelt_nxv32f16_0:
831 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
832 ; ZFMIN-NEXT: vmv.x.s a0, v8
833 ; ZFMIN-NEXT: fmv.h.x fa0, a0
835 %r = extractelement <vscale x 32 x half> %v, i32 0
839 define half @extractelt_nxv32f16_imm(<vscale x 32 x half> %v) {
840 ; ZVFH-LABEL: extractelt_nxv32f16_imm:
842 ; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
843 ; ZVFH-NEXT: vslidedown.vi v8, v8, 2
844 ; ZVFH-NEXT: vfmv.f.s fa0, v8
847 ; ZVFHMIN-LABEL: extractelt_nxv32f16_imm:
849 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
850 ; ZVFHMIN-NEXT: vslidedown.vi v8, v8, 2
851 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
852 ; ZVFHMIN-NEXT: lui a1, 1048560
853 ; ZVFHMIN-NEXT: or a0, a0, a1
854 ; ZVFHMIN-NEXT: fmv.w.x fa0, a0
857 ; ZFMIN-LABEL: extractelt_nxv32f16_imm:
859 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
860 ; ZFMIN-NEXT: vslidedown.vi v8, v8, 2
861 ; ZFMIN-NEXT: vmv.x.s a0, v8
862 ; ZFMIN-NEXT: fmv.h.x fa0, a0
864 %r = extractelement <vscale x 32 x half> %v, i32 2
868 define half @extractelt_nxv32f16_idx(<vscale x 32 x half> %v, i32 zeroext %idx) {
869 ; ZVFH-LABEL: extractelt_nxv32f16_idx:
871 ; ZVFH-NEXT: vsetivli zero, 1, e16, m8, ta, ma
872 ; ZVFH-NEXT: vslidedown.vx v8, v8, a0
873 ; ZVFH-NEXT: vfmv.f.s fa0, v8
876 ; ZVFHMIN-LABEL: extractelt_nxv32f16_idx:
878 ; ZVFHMIN-NEXT: vsetivli zero, 1, e16, m8, ta, ma
879 ; ZVFHMIN-NEXT: vslidedown.vx v8, v8, a0
880 ; ZVFHMIN-NEXT: vmv.x.s a0, v8
881 ; ZVFHMIN-NEXT: lui a1, 1048560
882 ; ZVFHMIN-NEXT: or a0, a0, a1
883 ; ZVFHMIN-NEXT: fmv.w.x fa0, a0
886 ; ZFMIN-LABEL: extractelt_nxv32f16_idx:
888 ; ZFMIN-NEXT: vsetivli zero, 1, e16, m8, ta, ma
889 ; ZFMIN-NEXT: vslidedown.vx v8, v8, a0
890 ; ZFMIN-NEXT: vmv.x.s a0, v8
891 ; ZFMIN-NEXT: fmv.h.x fa0, a0
893 %r = extractelement <vscale x 32 x half> %v, i32 %idx
897 define float @extractelt_nxv1f32_0(<vscale x 1 x float> %v) {
898 ; CHECK-LABEL: extractelt_nxv1f32_0:
900 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
901 ; CHECK-NEXT: vfmv.f.s fa0, v8
903 %r = extractelement <vscale x 1 x float> %v, i32 0
907 define float @extractelt_nxv1f32_imm(<vscale x 1 x float> %v) {
908 ; CHECK-LABEL: extractelt_nxv1f32_imm:
910 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
911 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
912 ; CHECK-NEXT: vfmv.f.s fa0, v8
914 %r = extractelement <vscale x 1 x float> %v, i32 2
918 define float @extractelt_nxv1f32_idx(<vscale x 1 x float> %v, i32 zeroext %idx) {
919 ; CHECK-LABEL: extractelt_nxv1f32_idx:
921 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
922 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
923 ; CHECK-NEXT: vfmv.f.s fa0, v8
925 %r = extractelement <vscale x 1 x float> %v, i32 %idx
929 define float @extractelt_nxv2f32_0(<vscale x 2 x float> %v) {
930 ; CHECK-LABEL: extractelt_nxv2f32_0:
932 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
933 ; CHECK-NEXT: vfmv.f.s fa0, v8
935 %r = extractelement <vscale x 2 x float> %v, i32 0
939 define float @extractelt_nxv2f32_imm(<vscale x 2 x float> %v) {
940 ; CHECK-LABEL: extractelt_nxv2f32_imm:
942 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
943 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
944 ; CHECK-NEXT: vfmv.f.s fa0, v8
946 %r = extractelement <vscale x 2 x float> %v, i32 2
950 define float @extractelt_nxv2f32_idx(<vscale x 2 x float> %v, i32 zeroext %idx) {
951 ; CHECK-LABEL: extractelt_nxv2f32_idx:
953 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
954 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
955 ; CHECK-NEXT: vfmv.f.s fa0, v8
957 %r = extractelement <vscale x 2 x float> %v, i32 %idx
961 define float @extractelt_nxv4f32_0(<vscale x 4 x float> %v) {
962 ; CHECK-LABEL: extractelt_nxv4f32_0:
964 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
965 ; CHECK-NEXT: vfmv.f.s fa0, v8
967 %r = extractelement <vscale x 4 x float> %v, i32 0
971 define float @extractelt_nxv4f32_imm(<vscale x 4 x float> %v) {
972 ; CHECK-LABEL: extractelt_nxv4f32_imm:
974 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
975 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
976 ; CHECK-NEXT: vfmv.f.s fa0, v8
978 %r = extractelement <vscale x 4 x float> %v, i32 2
982 define float @extractelt_nxv4f32_idx(<vscale x 4 x float> %v, i32 zeroext %idx) {
983 ; CHECK-LABEL: extractelt_nxv4f32_idx:
985 ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
986 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
987 ; CHECK-NEXT: vfmv.f.s fa0, v8
989 %r = extractelement <vscale x 4 x float> %v, i32 %idx
993 define float @extractelt_nxv8f32_0(<vscale x 8 x float> %v) {
994 ; CHECK-LABEL: extractelt_nxv8f32_0:
996 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
997 ; CHECK-NEXT: vfmv.f.s fa0, v8
999 %r = extractelement <vscale x 8 x float> %v, i32 0
1003 define float @extractelt_nxv8f32_imm(<vscale x 8 x float> %v) {
1004 ; CHECK-LABEL: extractelt_nxv8f32_imm:
1006 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
1007 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
1008 ; CHECK-NEXT: vfmv.f.s fa0, v8
1010 %r = extractelement <vscale x 8 x float> %v, i32 2
1014 define float @extractelt_nxv8f32_idx(<vscale x 8 x float> %v, i32 zeroext %idx) {
1015 ; CHECK-LABEL: extractelt_nxv8f32_idx:
1017 ; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma
1018 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1019 ; CHECK-NEXT: vfmv.f.s fa0, v8
1021 %r = extractelement <vscale x 8 x float> %v, i32 %idx
1025 define float @extractelt_nxv16f32_0(<vscale x 16 x float> %v) {
1026 ; CHECK-LABEL: extractelt_nxv16f32_0:
1028 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
1029 ; CHECK-NEXT: vfmv.f.s fa0, v8
1031 %r = extractelement <vscale x 16 x float> %v, i32 0
1035 define float @extractelt_nxv16f32_imm(<vscale x 16 x float> %v) {
1036 ; CHECK-LABEL: extractelt_nxv16f32_imm:
1038 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
1039 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
1040 ; CHECK-NEXT: vfmv.f.s fa0, v8
1042 %r = extractelement <vscale x 16 x float> %v, i32 2
1046 define float @extractelt_nxv16f32_idx(<vscale x 16 x float> %v, i32 zeroext %idx) {
1047 ; CHECK-LABEL: extractelt_nxv16f32_idx:
1049 ; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma
1050 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1051 ; CHECK-NEXT: vfmv.f.s fa0, v8
1053 %r = extractelement <vscale x 16 x float> %v, i32 %idx
1057 define double @extractelt_nxv1f64_0(<vscale x 1 x double> %v) {
1058 ; CHECK-LABEL: extractelt_nxv1f64_0:
1060 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
1061 ; CHECK-NEXT: vfmv.f.s fa0, v8
1063 %r = extractelement <vscale x 1 x double> %v, i32 0
1067 define double @extractelt_nxv1f64_imm(<vscale x 1 x double> %v) {
1068 ; CHECK-LABEL: extractelt_nxv1f64_imm:
1070 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
1071 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
1072 ; CHECK-NEXT: vfmv.f.s fa0, v8
1074 %r = extractelement <vscale x 1 x double> %v, i32 2
1078 define double @extractelt_nxv1f64_idx(<vscale x 1 x double> %v, i32 zeroext %idx) {
1079 ; CHECK-LABEL: extractelt_nxv1f64_idx:
1081 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
1082 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1083 ; CHECK-NEXT: vfmv.f.s fa0, v8
1085 %r = extractelement <vscale x 1 x double> %v, i32 %idx
1089 define double @extractelt_nxv2f64_0(<vscale x 2 x double> %v) {
1090 ; CHECK-LABEL: extractelt_nxv2f64_0:
1092 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
1093 ; CHECK-NEXT: vfmv.f.s fa0, v8
1095 %r = extractelement <vscale x 2 x double> %v, i32 0
1099 define double @extractelt_nxv2f64_imm(<vscale x 2 x double> %v) {
1100 ; CHECK-LABEL: extractelt_nxv2f64_imm:
1102 ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
1103 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
1104 ; CHECK-NEXT: vfmv.f.s fa0, v8
1106 %r = extractelement <vscale x 2 x double> %v, i32 2
1110 define double @extractelt_nxv2f64_idx(<vscale x 2 x double> %v, i32 zeroext %idx) {
1111 ; CHECK-LABEL: extractelt_nxv2f64_idx:
1113 ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
1114 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1115 ; CHECK-NEXT: vfmv.f.s fa0, v8
1117 %r = extractelement <vscale x 2 x double> %v, i32 %idx
1121 define double @extractelt_nxv4f64_0(<vscale x 4 x double> %v) {
1122 ; CHECK-LABEL: extractelt_nxv4f64_0:
1124 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
1125 ; CHECK-NEXT: vfmv.f.s fa0, v8
1127 %r = extractelement <vscale x 4 x double> %v, i32 0
1131 define double @extractelt_nxv4f64_imm(<vscale x 4 x double> %v) {
1132 ; CHECK-LABEL: extractelt_nxv4f64_imm:
1134 ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
1135 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
1136 ; CHECK-NEXT: vfmv.f.s fa0, v8
1138 %r = extractelement <vscale x 4 x double> %v, i32 2
1142 define double @extractelt_nxv4f64_idx(<vscale x 4 x double> %v, i32 zeroext %idx) {
1143 ; CHECK-LABEL: extractelt_nxv4f64_idx:
1145 ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma
1146 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1147 ; CHECK-NEXT: vfmv.f.s fa0, v8
1149 %r = extractelement <vscale x 4 x double> %v, i32 %idx
1153 define double @extractelt_nxv8f64_0(<vscale x 8 x double> %v) {
1154 ; CHECK-LABEL: extractelt_nxv8f64_0:
1156 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
1157 ; CHECK-NEXT: vfmv.f.s fa0, v8
1159 %r = extractelement <vscale x 8 x double> %v, i32 0
1163 define double @extractelt_nxv8f64_imm(<vscale x 8 x double> %v) {
1164 ; CHECK-LABEL: extractelt_nxv8f64_imm:
1166 ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
1167 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
1168 ; CHECK-NEXT: vfmv.f.s fa0, v8
1170 %r = extractelement <vscale x 8 x double> %v, i32 2
1174 define double @extractelt_nxv8f64_idx(<vscale x 8 x double> %v, i32 zeroext %idx) {
1175 ; CHECK-LABEL: extractelt_nxv8f64_idx:
1177 ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
1178 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
1179 ; CHECK-NEXT: vfmv.f.s fa0, v8
1181 %r = extractelement <vscale x 8 x double> %v, i32 %idx
1185 define void @store_extractelt_nxv8f64(ptr %x, ptr %p) {
1186 ; CHECK-LABEL: store_extractelt_nxv8f64:
1188 ; CHECK-NEXT: vl8re64.v v8, (a0)
1189 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
1190 ; CHECK-NEXT: vslidedown.vi v8, v8, 1
1191 ; CHECK-NEXT: vse64.v v8, (a1)
1193 %a = load <vscale x 8 x double>, ptr %x
1194 %b = extractelement <vscale x 8 x double> %a, i64 1
1195 store double %b, ptr %p
1199 define void @store_vfmv_f_s_nxv8f64(ptr %x, ptr %p) {
1200 ; CHECK-LABEL: store_vfmv_f_s_nxv8f64:
1202 ; CHECK-NEXT: vl8re64.v v8, (a0)
1203 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
1204 ; CHECK-NEXT: vse64.v v8, (a1)
1206 %a = load <vscale x 8 x double>, ptr %x
1207 %b = call double @llvm.riscv.vfmv.f.s.nxv8f64(<vscale x 8 x double> %a)
1208 store double %b, ptr %p
1212 declare double @llvm.riscv.vfmv.f.s.nxv8f64(<vscale x 8 x double>)
1214 define float @extractelt_fadd_nxv4f32_splat(<vscale x 4 x float> %x) {
1215 ; CHECK-LABEL: extractelt_fadd_nxv4f32_splat:
1217 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
1218 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
1219 ; CHECK-NEXT: lui a0, 263168
1220 ; CHECK-NEXT: vfmv.f.s fa5, v8
1221 ; CHECK-NEXT: fmv.w.x fa4, a0
1222 ; CHECK-NEXT: fadd.s fa0, fa5, fa4
1224 %bo = fadd <vscale x 4 x float> %x, splat (float 3.0)
1225 %ext = extractelement <vscale x 4 x float> %bo, i32 2
1229 define float @extractelt_fsub_nxv4f32_splat(<vscale x 4 x float> %x) {
1230 ; CHECK-LABEL: extractelt_fsub_nxv4f32_splat:
1232 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
1233 ; CHECK-NEXT: vslidedown.vi v8, v8, 1
1234 ; CHECK-NEXT: lui a0, 263168
1235 ; CHECK-NEXT: vfmv.f.s fa5, v8
1236 ; CHECK-NEXT: fmv.w.x fa4, a0
1237 ; CHECK-NEXT: fsub.s fa0, fa4, fa5
1239 %bo = fsub <vscale x 4 x float> splat (float 3.0), %x
1240 %ext = extractelement <vscale x 4 x float> %bo, i32 1
1244 define float @extractelt_fmul_nxv4f32_splat(<vscale x 4 x float> %x) {
1245 ; CHECK-LABEL: extractelt_fmul_nxv4f32_splat:
1247 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
1248 ; CHECK-NEXT: vslidedown.vi v8, v8, 3
1249 ; CHECK-NEXT: lui a0, 263168
1250 ; CHECK-NEXT: vfmv.f.s fa5, v8
1251 ; CHECK-NEXT: fmv.w.x fa4, a0
1252 ; CHECK-NEXT: fmul.s fa0, fa5, fa4
1254 %bo = fmul <vscale x 4 x float> %x, splat (float 3.0)
1255 %ext = extractelement <vscale x 4 x float> %bo, i32 3
1259 define float @extractelt_fdiv_nxv4f32_splat(<vscale x 4 x float> %x) {
1260 ; CHECK-LABEL: extractelt_fdiv_nxv4f32_splat:
1262 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
1263 ; CHECK-NEXT: vfmv.f.s fa5, v8
1264 ; CHECK-NEXT: lui a0, 263168
1265 ; CHECK-NEXT: fmv.w.x fa4, a0
1266 ; CHECK-NEXT: fdiv.s fa0, fa5, fa4
1268 %bo = fdiv <vscale x 4 x float> %x, splat (float 3.0)
1269 %ext = extractelement <vscale x 4 x float> %bo, i32 0
1273 define double @extractelt_nxv16f64_0(<vscale x 16 x double> %v) {
1274 ; CHECK-LABEL: extractelt_nxv16f64_0:
1276 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
1277 ; CHECK-NEXT: vfmv.f.s fa0, v8
1279 %r = extractelement <vscale x 16 x double> %v, i32 0
1283 define double @extractelt_nxv16f64_neg1(<vscale x 16 x double> %v) {
1284 ; RV32-LABEL: extractelt_nxv16f64_neg1:
1286 ; RV32-NEXT: addi sp, sp, -80
1287 ; RV32-NEXT: .cfi_def_cfa_offset 80
1288 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
1289 ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
1290 ; RV32-NEXT: .cfi_offset ra, -4
1291 ; RV32-NEXT: .cfi_offset s0, -8
1292 ; RV32-NEXT: addi s0, sp, 80
1293 ; RV32-NEXT: .cfi_def_cfa s0, 0
1294 ; RV32-NEXT: csrr a0, vlenb
1295 ; RV32-NEXT: slli a0, a0, 4
1296 ; RV32-NEXT: sub sp, sp, a0
1297 ; RV32-NEXT: andi sp, sp, -64
1298 ; RV32-NEXT: addi a0, sp, 64
1299 ; RV32-NEXT: csrr a1, vlenb
1300 ; RV32-NEXT: vs8r.v v8, (a0)
1301 ; RV32-NEXT: slli a2, a1, 3
1302 ; RV32-NEXT: slli a1, a1, 4
1303 ; RV32-NEXT: add a2, a0, a2
1304 ; RV32-NEXT: vs8r.v v16, (a2)
1305 ; RV32-NEXT: add a0, a1, a0
1306 ; RV32-NEXT: fld fa0, -8(a0)
1307 ; RV32-NEXT: addi sp, s0, -80
1308 ; RV32-NEXT: .cfi_def_cfa sp, 80
1309 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
1310 ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
1311 ; RV32-NEXT: .cfi_restore ra
1312 ; RV32-NEXT: .cfi_restore s0
1313 ; RV32-NEXT: addi sp, sp, 80
1314 ; RV32-NEXT: .cfi_def_cfa_offset 0
1317 ; RV64-LABEL: extractelt_nxv16f64_neg1:
1319 ; RV64-NEXT: addi sp, sp, -80
1320 ; RV64-NEXT: .cfi_def_cfa_offset 80
1321 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
1322 ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
1323 ; RV64-NEXT: .cfi_offset ra, -8
1324 ; RV64-NEXT: .cfi_offset s0, -16
1325 ; RV64-NEXT: addi s0, sp, 80
1326 ; RV64-NEXT: .cfi_def_cfa s0, 0
1327 ; RV64-NEXT: csrr a0, vlenb
1328 ; RV64-NEXT: slli a0, a0, 4
1329 ; RV64-NEXT: sub sp, sp, a0
1330 ; RV64-NEXT: andi sp, sp, -64
1331 ; RV64-NEXT: addi a0, sp, 64
1332 ; RV64-NEXT: csrr a2, vlenb
1333 ; RV64-NEXT: li a1, -1
1334 ; RV64-NEXT: vs8r.v v8, (a0)
1335 ; RV64-NEXT: slli a3, a2, 3
1336 ; RV64-NEXT: srli a1, a1, 32
1337 ; RV64-NEXT: slli a2, a2, 1
1338 ; RV64-NEXT: add a3, a0, a3
1339 ; RV64-NEXT: addi a2, a2, -1
1340 ; RV64-NEXT: vs8r.v v16, (a3)
1341 ; RV64-NEXT: bltu a2, a1, .LBB70_2
1342 ; RV64-NEXT: # %bb.1:
1343 ; RV64-NEXT: mv a2, a1
1344 ; RV64-NEXT: .LBB70_2:
1345 ; RV64-NEXT: slli a2, a2, 3
1346 ; RV64-NEXT: add a0, a0, a2
1347 ; RV64-NEXT: fld fa0, 0(a0)
1348 ; RV64-NEXT: addi sp, s0, -80
1349 ; RV64-NEXT: .cfi_def_cfa sp, 80
1350 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
1351 ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
1352 ; RV64-NEXT: .cfi_restore ra
1353 ; RV64-NEXT: .cfi_restore s0
1354 ; RV64-NEXT: addi sp, sp, 80
1355 ; RV64-NEXT: .cfi_def_cfa_offset 0
1357 %r = extractelement <vscale x 16 x double> %v, i32 -1
1361 define double @extractelt_nxv16f64_imm(<vscale x 16 x double> %v) {
1362 ; CHECK-LABEL: extractelt_nxv16f64_imm:
1364 ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
1365 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
1366 ; CHECK-NEXT: vfmv.f.s fa0, v8
1368 %r = extractelement <vscale x 16 x double> %v, i32 2
1372 define double @extractelt_nxv16f64_idx(<vscale x 16 x double> %v, i32 zeroext %idx) {
1373 ; RV32-LABEL: extractelt_nxv16f64_idx:
1375 ; RV32-NEXT: csrr a1, vlenb
1376 ; RV32-NEXT: slli a2, a1, 1
1377 ; RV32-NEXT: addi a2, a2, -1
1378 ; RV32-NEXT: bltu a0, a2, .LBB72_2
1379 ; RV32-NEXT: # %bb.1:
1380 ; RV32-NEXT: mv a0, a2
1381 ; RV32-NEXT: .LBB72_2:
1382 ; RV32-NEXT: addi sp, sp, -80
1383 ; RV32-NEXT: .cfi_def_cfa_offset 80
1384 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
1385 ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
1386 ; RV32-NEXT: .cfi_offset ra, -4
1387 ; RV32-NEXT: .cfi_offset s0, -8
1388 ; RV32-NEXT: addi s0, sp, 80
1389 ; RV32-NEXT: .cfi_def_cfa s0, 0
1390 ; RV32-NEXT: csrr a2, vlenb
1391 ; RV32-NEXT: slli a2, a2, 4
1392 ; RV32-NEXT: sub sp, sp, a2
1393 ; RV32-NEXT: andi sp, sp, -64
1394 ; RV32-NEXT: slli a0, a0, 3
1395 ; RV32-NEXT: addi a2, sp, 64
1396 ; RV32-NEXT: slli a1, a1, 3
1397 ; RV32-NEXT: add a0, a2, a0
1398 ; RV32-NEXT: vs8r.v v8, (a2)
1399 ; RV32-NEXT: add a1, a2, a1
1400 ; RV32-NEXT: vs8r.v v16, (a1)
1401 ; RV32-NEXT: fld fa0, 0(a0)
1402 ; RV32-NEXT: addi sp, s0, -80
1403 ; RV32-NEXT: .cfi_def_cfa sp, 80
1404 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
1405 ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
1406 ; RV32-NEXT: .cfi_restore ra
1407 ; RV32-NEXT: .cfi_restore s0
1408 ; RV32-NEXT: addi sp, sp, 80
1409 ; RV32-NEXT: .cfi_def_cfa_offset 0
1412 ; RV64-LABEL: extractelt_nxv16f64_idx:
1414 ; RV64-NEXT: csrr a1, vlenb
1415 ; RV64-NEXT: slli a2, a1, 1
1416 ; RV64-NEXT: addi a2, a2, -1
1417 ; RV64-NEXT: bltu a0, a2, .LBB72_2
1418 ; RV64-NEXT: # %bb.1:
1419 ; RV64-NEXT: mv a0, a2
1420 ; RV64-NEXT: .LBB72_2:
1421 ; RV64-NEXT: addi sp, sp, -80
1422 ; RV64-NEXT: .cfi_def_cfa_offset 80
1423 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
1424 ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
1425 ; RV64-NEXT: .cfi_offset ra, -8
1426 ; RV64-NEXT: .cfi_offset s0, -16
1427 ; RV64-NEXT: addi s0, sp, 80
1428 ; RV64-NEXT: .cfi_def_cfa s0, 0
1429 ; RV64-NEXT: csrr a2, vlenb
1430 ; RV64-NEXT: slli a2, a2, 4
1431 ; RV64-NEXT: sub sp, sp, a2
1432 ; RV64-NEXT: andi sp, sp, -64
1433 ; RV64-NEXT: slli a0, a0, 3
1434 ; RV64-NEXT: addi a2, sp, 64
1435 ; RV64-NEXT: slli a1, a1, 3
1436 ; RV64-NEXT: add a0, a2, a0
1437 ; RV64-NEXT: vs8r.v v8, (a2)
1438 ; RV64-NEXT: add a1, a2, a1
1439 ; RV64-NEXT: vs8r.v v16, (a1)
1440 ; RV64-NEXT: fld fa0, 0(a0)
1441 ; RV64-NEXT: addi sp, s0, -80
1442 ; RV64-NEXT: .cfi_def_cfa sp, 80
1443 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
1444 ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
1445 ; RV64-NEXT: .cfi_restore ra
1446 ; RV64-NEXT: .cfi_restore s0
1447 ; RV64-NEXT: addi sp, sp, 80
1448 ; RV64-NEXT: .cfi_def_cfa_offset 0
1450 %r = extractelement <vscale x 16 x double> %v, i32 %idx