1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32NOM
4 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=ilp32d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32M
8 define signext i8 @extractelt_nxv1i8_0(<vscale x 1 x i8> %v) {
9 ; CHECK-LABEL: extractelt_nxv1i8_0:
11 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
12 ; CHECK-NEXT: vmv.x.s a0, v8
14 %r = extractelement <vscale x 1 x i8> %v, i32 0
18 define signext i8 @extractelt_nxv1i8_imm(<vscale x 1 x i8> %v) {
19 ; CHECK-LABEL: extractelt_nxv1i8_imm:
21 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
22 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
23 ; CHECK-NEXT: vmv.x.s a0, v8
25 %r = extractelement <vscale x 1 x i8> %v, i32 2
29 define signext i8 @extractelt_nxv1i8_idx(<vscale x 1 x i8> %v, i32 %idx) {
30 ; CHECK-LABEL: extractelt_nxv1i8_idx:
32 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
33 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
34 ; CHECK-NEXT: vmv.x.s a0, v8
36 %r = extractelement <vscale x 1 x i8> %v, i32 %idx
40 define signext i8 @extractelt_nxv2i8_0(<vscale x 2 x i8> %v) {
41 ; CHECK-LABEL: extractelt_nxv2i8_0:
43 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
44 ; CHECK-NEXT: vmv.x.s a0, v8
46 %r = extractelement <vscale x 2 x i8> %v, i32 0
50 define signext i8 @extractelt_nxv2i8_imm(<vscale x 2 x i8> %v) {
51 ; CHECK-LABEL: extractelt_nxv2i8_imm:
53 ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
54 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
55 ; CHECK-NEXT: vmv.x.s a0, v8
57 %r = extractelement <vscale x 2 x i8> %v, i32 2
61 define signext i8 @extractelt_nxv2i8_idx(<vscale x 2 x i8> %v, i32 %idx) {
62 ; CHECK-LABEL: extractelt_nxv2i8_idx:
64 ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
65 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
66 ; CHECK-NEXT: vmv.x.s a0, v8
68 %r = extractelement <vscale x 2 x i8> %v, i32 %idx
72 define signext i8 @extractelt_nxv4i8_0(<vscale x 4 x i8> %v) {
73 ; CHECK-LABEL: extractelt_nxv4i8_0:
75 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
76 ; CHECK-NEXT: vmv.x.s a0, v8
78 %r = extractelement <vscale x 4 x i8> %v, i32 0
82 define signext i8 @extractelt_nxv4i8_imm(<vscale x 4 x i8> %v) {
83 ; CHECK-LABEL: extractelt_nxv4i8_imm:
85 ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
86 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
87 ; CHECK-NEXT: vmv.x.s a0, v8
89 %r = extractelement <vscale x 4 x i8> %v, i32 2
93 define signext i8 @extractelt_nxv4i8_idx(<vscale x 4 x i8> %v, i32 %idx) {
94 ; CHECK-LABEL: extractelt_nxv4i8_idx:
96 ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
97 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
98 ; CHECK-NEXT: vmv.x.s a0, v8
100 %r = extractelement <vscale x 4 x i8> %v, i32 %idx
104 define signext i8 @extractelt_nxv8i8_0(<vscale x 8 x i8> %v) {
105 ; CHECK-LABEL: extractelt_nxv8i8_0:
107 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
108 ; CHECK-NEXT: vmv.x.s a0, v8
110 %r = extractelement <vscale x 8 x i8> %v, i32 0
114 define signext i8 @extractelt_nxv8i8_imm(<vscale x 8 x i8> %v) {
115 ; CHECK-LABEL: extractelt_nxv8i8_imm:
117 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
118 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
119 ; CHECK-NEXT: vmv.x.s a0, v8
121 %r = extractelement <vscale x 8 x i8> %v, i32 2
125 define signext i8 @extractelt_nxv8i8_idx(<vscale x 8 x i8> %v, i32 %idx) {
126 ; CHECK-LABEL: extractelt_nxv8i8_idx:
128 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
129 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
130 ; CHECK-NEXT: vmv.x.s a0, v8
132 %r = extractelement <vscale x 8 x i8> %v, i32 %idx
136 define signext i8 @extractelt_nxv16i8_0(<vscale x 16 x i8> %v) {
137 ; CHECK-LABEL: extractelt_nxv16i8_0:
139 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
140 ; CHECK-NEXT: vmv.x.s a0, v8
142 %r = extractelement <vscale x 16 x i8> %v, i32 0
146 define signext i8 @extractelt_nxv16i8_imm(<vscale x 16 x i8> %v) {
147 ; CHECK-LABEL: extractelt_nxv16i8_imm:
149 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
150 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
151 ; CHECK-NEXT: vmv.x.s a0, v8
153 %r = extractelement <vscale x 16 x i8> %v, i32 2
157 define signext i8 @extractelt_nxv16i8_idx(<vscale x 16 x i8> %v, i32 %idx) {
158 ; CHECK-LABEL: extractelt_nxv16i8_idx:
160 ; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma
161 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
162 ; CHECK-NEXT: vmv.x.s a0, v8
164 %r = extractelement <vscale x 16 x i8> %v, i32 %idx
168 define signext i8 @extractelt_nxv32i8_0(<vscale x 32 x i8> %v) {
169 ; CHECK-LABEL: extractelt_nxv32i8_0:
171 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
172 ; CHECK-NEXT: vmv.x.s a0, v8
174 %r = extractelement <vscale x 32 x i8> %v, i32 0
178 define signext i8 @extractelt_nxv32i8_imm(<vscale x 32 x i8> %v) {
179 ; CHECK-LABEL: extractelt_nxv32i8_imm:
181 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
182 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
183 ; CHECK-NEXT: vmv.x.s a0, v8
185 %r = extractelement <vscale x 32 x i8> %v, i32 2
189 define signext i8 @extractelt_nxv32i8_idx(<vscale x 32 x i8> %v, i32 %idx) {
190 ; CHECK-LABEL: extractelt_nxv32i8_idx:
192 ; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma
193 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
194 ; CHECK-NEXT: vmv.x.s a0, v8
196 %r = extractelement <vscale x 32 x i8> %v, i32 %idx
200 define signext i8 @extractelt_nxv64i8_0(<vscale x 64 x i8> %v) {
201 ; CHECK-LABEL: extractelt_nxv64i8_0:
203 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
204 ; CHECK-NEXT: vmv.x.s a0, v8
206 %r = extractelement <vscale x 64 x i8> %v, i32 0
210 define signext i8 @extractelt_nxv64i8_imm(<vscale x 64 x i8> %v) {
211 ; CHECK-LABEL: extractelt_nxv64i8_imm:
213 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
214 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
215 ; CHECK-NEXT: vmv.x.s a0, v8
217 %r = extractelement <vscale x 64 x i8> %v, i32 2
221 define signext i8 @extractelt_nxv64i8_idx(<vscale x 64 x i8> %v, i32 %idx) {
222 ; CHECK-LABEL: extractelt_nxv64i8_idx:
224 ; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
225 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
226 ; CHECK-NEXT: vmv.x.s a0, v8
228 %r = extractelement <vscale x 64 x i8> %v, i32 %idx
232 define signext i16 @extractelt_nxv1i16_0(<vscale x 1 x i16> %v) {
233 ; CHECK-LABEL: extractelt_nxv1i16_0:
235 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
236 ; CHECK-NEXT: vmv.x.s a0, v8
238 %r = extractelement <vscale x 1 x i16> %v, i32 0
242 define signext i16 @extractelt_nxv1i16_imm(<vscale x 1 x i16> %v) {
243 ; CHECK-LABEL: extractelt_nxv1i16_imm:
245 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
246 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
247 ; CHECK-NEXT: vmv.x.s a0, v8
249 %r = extractelement <vscale x 1 x i16> %v, i32 2
253 define signext i16 @extractelt_nxv1i16_idx(<vscale x 1 x i16> %v, i32 %idx) {
254 ; CHECK-LABEL: extractelt_nxv1i16_idx:
256 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
257 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
258 ; CHECK-NEXT: vmv.x.s a0, v8
260 %r = extractelement <vscale x 1 x i16> %v, i32 %idx
264 define signext i16 @extractelt_nxv2i16_0(<vscale x 2 x i16> %v) {
265 ; CHECK-LABEL: extractelt_nxv2i16_0:
267 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
268 ; CHECK-NEXT: vmv.x.s a0, v8
270 %r = extractelement <vscale x 2 x i16> %v, i32 0
274 define signext i16 @extractelt_nxv2i16_imm(<vscale x 2 x i16> %v) {
275 ; CHECK-LABEL: extractelt_nxv2i16_imm:
277 ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
278 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
279 ; CHECK-NEXT: vmv.x.s a0, v8
281 %r = extractelement <vscale x 2 x i16> %v, i32 2
285 define signext i16 @extractelt_nxv2i16_idx(<vscale x 2 x i16> %v, i32 %idx) {
286 ; CHECK-LABEL: extractelt_nxv2i16_idx:
288 ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
289 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
290 ; CHECK-NEXT: vmv.x.s a0, v8
292 %r = extractelement <vscale x 2 x i16> %v, i32 %idx
296 define signext i16 @extractelt_nxv4i16_0(<vscale x 4 x i16> %v) {
297 ; CHECK-LABEL: extractelt_nxv4i16_0:
299 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
300 ; CHECK-NEXT: vmv.x.s a0, v8
302 %r = extractelement <vscale x 4 x i16> %v, i32 0
306 define signext i16 @extractelt_nxv4i16_imm(<vscale x 4 x i16> %v) {
307 ; CHECK-LABEL: extractelt_nxv4i16_imm:
309 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
310 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
311 ; CHECK-NEXT: vmv.x.s a0, v8
313 %r = extractelement <vscale x 4 x i16> %v, i32 2
317 define signext i16 @extractelt_nxv4i16_idx(<vscale x 4 x i16> %v, i32 %idx) {
318 ; CHECK-LABEL: extractelt_nxv4i16_idx:
320 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
321 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
322 ; CHECK-NEXT: vmv.x.s a0, v8
324 %r = extractelement <vscale x 4 x i16> %v, i32 %idx
328 define signext i16 @extractelt_nxv8i16_0(<vscale x 8 x i16> %v) {
329 ; CHECK-LABEL: extractelt_nxv8i16_0:
331 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
332 ; CHECK-NEXT: vmv.x.s a0, v8
334 %r = extractelement <vscale x 8 x i16> %v, i32 0
338 define signext i16 @extractelt_nxv8i16_imm(<vscale x 8 x i16> %v) {
339 ; CHECK-LABEL: extractelt_nxv8i16_imm:
341 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
342 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
343 ; CHECK-NEXT: vmv.x.s a0, v8
345 %r = extractelement <vscale x 8 x i16> %v, i32 2
349 define signext i16 @extractelt_nxv8i16_idx(<vscale x 8 x i16> %v, i32 %idx) {
350 ; CHECK-LABEL: extractelt_nxv8i16_idx:
352 ; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
353 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
354 ; CHECK-NEXT: vmv.x.s a0, v8
356 %r = extractelement <vscale x 8 x i16> %v, i32 %idx
360 define signext i16 @extractelt_nxv16i16_0(<vscale x 16 x i16> %v) {
361 ; CHECK-LABEL: extractelt_nxv16i16_0:
363 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
364 ; CHECK-NEXT: vmv.x.s a0, v8
366 %r = extractelement <vscale x 16 x i16> %v, i32 0
370 define signext i16 @extractelt_nxv16i16_imm(<vscale x 16 x i16> %v) {
371 ; CHECK-LABEL: extractelt_nxv16i16_imm:
373 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
374 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
375 ; CHECK-NEXT: vmv.x.s a0, v8
377 %r = extractelement <vscale x 16 x i16> %v, i32 2
381 define signext i16 @extractelt_nxv16i16_idx(<vscale x 16 x i16> %v, i32 %idx) {
382 ; CHECK-LABEL: extractelt_nxv16i16_idx:
384 ; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma
385 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
386 ; CHECK-NEXT: vmv.x.s a0, v8
388 %r = extractelement <vscale x 16 x i16> %v, i32 %idx
392 define signext i16 @extractelt_nxv32i16_0(<vscale x 32 x i16> %v) {
393 ; CHECK-LABEL: extractelt_nxv32i16_0:
395 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
396 ; CHECK-NEXT: vmv.x.s a0, v8
398 %r = extractelement <vscale x 32 x i16> %v, i32 0
402 define signext i16 @extractelt_nxv32i16_imm(<vscale x 32 x i16> %v) {
403 ; CHECK-LABEL: extractelt_nxv32i16_imm:
405 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
406 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
407 ; CHECK-NEXT: vmv.x.s a0, v8
409 %r = extractelement <vscale x 32 x i16> %v, i32 2
413 define signext i16 @extractelt_nxv32i16_idx(<vscale x 32 x i16> %v, i32 %idx) {
414 ; CHECK-LABEL: extractelt_nxv32i16_idx:
416 ; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma
417 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
418 ; CHECK-NEXT: vmv.x.s a0, v8
420 %r = extractelement <vscale x 32 x i16> %v, i32 %idx
424 define i32 @extractelt_nxv1i32_0(<vscale x 1 x i32> %v) {
425 ; CHECK-LABEL: extractelt_nxv1i32_0:
427 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
428 ; CHECK-NEXT: vmv.x.s a0, v8
430 %r = extractelement <vscale x 1 x i32> %v, i32 0
434 define i32 @extractelt_nxv1i32_imm(<vscale x 1 x i32> %v) {
435 ; CHECK-LABEL: extractelt_nxv1i32_imm:
437 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
438 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
439 ; CHECK-NEXT: vmv.x.s a0, v8
441 %r = extractelement <vscale x 1 x i32> %v, i32 2
445 define i32 @extractelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 %idx) {
446 ; CHECK-LABEL: extractelt_nxv1i32_idx:
448 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
449 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
450 ; CHECK-NEXT: vmv.x.s a0, v8
452 %r = extractelement <vscale x 1 x i32> %v, i32 %idx
456 define i32 @extractelt_nxv2i32_0(<vscale x 2 x i32> %v) {
457 ; CHECK-LABEL: extractelt_nxv2i32_0:
459 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
460 ; CHECK-NEXT: vmv.x.s a0, v8
462 %r = extractelement <vscale x 2 x i32> %v, i32 0
466 define i32 @extractelt_nxv2i32_imm(<vscale x 2 x i32> %v) {
467 ; CHECK-LABEL: extractelt_nxv2i32_imm:
469 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
470 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
471 ; CHECK-NEXT: vmv.x.s a0, v8
473 %r = extractelement <vscale x 2 x i32> %v, i32 2
477 define i32 @extractelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 %idx) {
478 ; CHECK-LABEL: extractelt_nxv2i32_idx:
480 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
481 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
482 ; CHECK-NEXT: vmv.x.s a0, v8
484 %r = extractelement <vscale x 2 x i32> %v, i32 %idx
488 define i32 @extractelt_nxv4i32_0(<vscale x 4 x i32> %v) {
489 ; CHECK-LABEL: extractelt_nxv4i32_0:
491 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
492 ; CHECK-NEXT: vmv.x.s a0, v8
494 %r = extractelement <vscale x 4 x i32> %v, i32 0
498 define i32 @extractelt_nxv4i32_imm(<vscale x 4 x i32> %v) {
499 ; CHECK-LABEL: extractelt_nxv4i32_imm:
501 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
502 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
503 ; CHECK-NEXT: vmv.x.s a0, v8
505 %r = extractelement <vscale x 4 x i32> %v, i32 2
509 define i32 @extractelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 %idx) {
510 ; CHECK-LABEL: extractelt_nxv4i32_idx:
512 ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
513 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
514 ; CHECK-NEXT: vmv.x.s a0, v8
516 %r = extractelement <vscale x 4 x i32> %v, i32 %idx
520 define i32 @extractelt_nxv8i32_0(<vscale x 8 x i32> %v) {
521 ; CHECK-LABEL: extractelt_nxv8i32_0:
523 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
524 ; CHECK-NEXT: vmv.x.s a0, v8
526 %r = extractelement <vscale x 8 x i32> %v, i32 0
530 define i32 @extractelt_nxv8i32_imm(<vscale x 8 x i32> %v) {
531 ; CHECK-LABEL: extractelt_nxv8i32_imm:
533 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
534 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
535 ; CHECK-NEXT: vmv.x.s a0, v8
537 %r = extractelement <vscale x 8 x i32> %v, i32 2
541 define i32 @extractelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 %idx) {
542 ; CHECK-LABEL: extractelt_nxv8i32_idx:
544 ; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma
545 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
546 ; CHECK-NEXT: vmv.x.s a0, v8
548 %r = extractelement <vscale x 8 x i32> %v, i32 %idx
552 define i32 @extractelt_nxv16i32_0(<vscale x 16 x i32> %v) {
553 ; CHECK-LABEL: extractelt_nxv16i32_0:
555 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
556 ; CHECK-NEXT: vmv.x.s a0, v8
558 %r = extractelement <vscale x 16 x i32> %v, i32 0
562 define i32 @extractelt_nxv16i32_imm(<vscale x 16 x i32> %v) {
563 ; CHECK-LABEL: extractelt_nxv16i32_imm:
565 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
566 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
567 ; CHECK-NEXT: vmv.x.s a0, v8
569 %r = extractelement <vscale x 16 x i32> %v, i32 2
573 define i32 @extractelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 %idx) {
574 ; CHECK-LABEL: extractelt_nxv16i32_idx:
576 ; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma
577 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
578 ; CHECK-NEXT: vmv.x.s a0, v8
580 %r = extractelement <vscale x 16 x i32> %v, i32 %idx
584 define i64 @extractelt_nxv1i64_0(<vscale x 1 x i64> %v) {
585 ; CHECK-LABEL: extractelt_nxv1i64_0:
587 ; CHECK-NEXT: li a0, 32
588 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
589 ; CHECK-NEXT: vsrl.vx v9, v8, a0
590 ; CHECK-NEXT: vmv.x.s a1, v9
591 ; CHECK-NEXT: vmv.x.s a0, v8
593 %r = extractelement <vscale x 1 x i64> %v, i32 0
597 define i64 @extractelt_nxv1i64_imm(<vscale x 1 x i64> %v) {
598 ; CHECK-LABEL: extractelt_nxv1i64_imm:
600 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
601 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
602 ; CHECK-NEXT: li a0, 32
603 ; CHECK-NEXT: vsrl.vx v9, v8, a0
604 ; CHECK-NEXT: vmv.x.s a1, v9
605 ; CHECK-NEXT: vmv.x.s a0, v8
607 %r = extractelement <vscale x 1 x i64> %v, i32 2
611 define i64 @extractelt_nxv1i64_idx(<vscale x 1 x i64> %v, i32 %idx) {
612 ; CHECK-LABEL: extractelt_nxv1i64_idx:
614 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
615 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
616 ; CHECK-NEXT: vmv.x.s a0, v8
617 ; CHECK-NEXT: li a1, 32
618 ; CHECK-NEXT: vsrl.vx v8, v8, a1
619 ; CHECK-NEXT: vmv.x.s a1, v8
621 %r = extractelement <vscale x 1 x i64> %v, i32 %idx
625 define i64 @extractelt_nxv2i64_0(<vscale x 2 x i64> %v) {
626 ; CHECK-LABEL: extractelt_nxv2i64_0:
628 ; CHECK-NEXT: li a0, 32
629 ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
630 ; CHECK-NEXT: vsrl.vx v10, v8, a0
631 ; CHECK-NEXT: vmv.x.s a1, v10
632 ; CHECK-NEXT: vmv.x.s a0, v8
634 %r = extractelement <vscale x 2 x i64> %v, i32 0
638 define i64 @extractelt_nxv2i64_imm(<vscale x 2 x i64> %v) {
639 ; CHECK-LABEL: extractelt_nxv2i64_imm:
641 ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
642 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
643 ; CHECK-NEXT: li a0, 32
644 ; CHECK-NEXT: vsrl.vx v10, v8, a0
645 ; CHECK-NEXT: vmv.x.s a1, v10
646 ; CHECK-NEXT: vmv.x.s a0, v8
648 %r = extractelement <vscale x 2 x i64> %v, i32 2
652 define i64 @extractelt_nxv2i64_idx(<vscale x 2 x i64> %v, i32 %idx) {
653 ; CHECK-LABEL: extractelt_nxv2i64_idx:
655 ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
656 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
657 ; CHECK-NEXT: vmv.x.s a0, v8
658 ; CHECK-NEXT: li a1, 32
659 ; CHECK-NEXT: vsrl.vx v8, v8, a1
660 ; CHECK-NEXT: vmv.x.s a1, v8
662 %r = extractelement <vscale x 2 x i64> %v, i32 %idx
666 define i64 @extractelt_nxv4i64_0(<vscale x 4 x i64> %v) {
667 ; CHECK-LABEL: extractelt_nxv4i64_0:
669 ; CHECK-NEXT: li a0, 32
670 ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma
671 ; CHECK-NEXT: vsrl.vx v12, v8, a0
672 ; CHECK-NEXT: vmv.x.s a1, v12
673 ; CHECK-NEXT: vmv.x.s a0, v8
675 %r = extractelement <vscale x 4 x i64> %v, i32 0
679 define i64 @extractelt_nxv4i64_imm(<vscale x 4 x i64> %v) {
680 ; CHECK-LABEL: extractelt_nxv4i64_imm:
682 ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma
683 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
684 ; CHECK-NEXT: li a0, 32
685 ; CHECK-NEXT: vsrl.vx v12, v8, a0
686 ; CHECK-NEXT: vmv.x.s a1, v12
687 ; CHECK-NEXT: vmv.x.s a0, v8
689 %r = extractelement <vscale x 4 x i64> %v, i32 2
693 define i64 @extractelt_nxv4i64_idx(<vscale x 4 x i64> %v, i32 %idx) {
694 ; CHECK-LABEL: extractelt_nxv4i64_idx:
696 ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma
697 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
698 ; CHECK-NEXT: vmv.x.s a0, v8
699 ; CHECK-NEXT: li a1, 32
700 ; CHECK-NEXT: vsrl.vx v8, v8, a1
701 ; CHECK-NEXT: vmv.x.s a1, v8
703 %r = extractelement <vscale x 4 x i64> %v, i32 %idx
707 define i64 @extractelt_nxv8i64_0(<vscale x 8 x i64> %v) {
708 ; CHECK-LABEL: extractelt_nxv8i64_0:
710 ; CHECK-NEXT: li a0, 32
711 ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
712 ; CHECK-NEXT: vsrl.vx v16, v8, a0
713 ; CHECK-NEXT: vmv.x.s a1, v16
714 ; CHECK-NEXT: vmv.x.s a0, v8
716 %r = extractelement <vscale x 8 x i64> %v, i32 0
720 define i64 @extractelt_nxv8i64_imm(<vscale x 8 x i64> %v) {
721 ; CHECK-LABEL: extractelt_nxv8i64_imm:
723 ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
724 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
725 ; CHECK-NEXT: li a0, 32
726 ; CHECK-NEXT: vsrl.vx v16, v8, a0
727 ; CHECK-NEXT: vmv.x.s a1, v16
728 ; CHECK-NEXT: vmv.x.s a0, v8
730 %r = extractelement <vscale x 8 x i64> %v, i32 2
734 define i64 @extractelt_nxv8i64_idx(<vscale x 8 x i64> %v, i32 %idx) {
735 ; CHECK-LABEL: extractelt_nxv8i64_idx:
737 ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
738 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
739 ; CHECK-NEXT: vmv.x.s a0, v8
740 ; CHECK-NEXT: li a1, 32
741 ; CHECK-NEXT: vsrl.vx v8, v8, a1
742 ; CHECK-NEXT: vmv.x.s a1, v8
744 %r = extractelement <vscale x 8 x i64> %v, i32 %idx
748 define i32 @extractelt_add_nxv4i32_splat(<vscale x 4 x i32> %x) {
749 ; CHECK-LABEL: extractelt_add_nxv4i32_splat:
751 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
752 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
753 ; CHECK-NEXT: vmv.x.s a0, v8
754 ; CHECK-NEXT: addi a0, a0, 3
756 %bo = add <vscale x 4 x i32> %x, splat (i32 3)
757 %ext = extractelement <vscale x 4 x i32> %bo, i32 2
761 define i32 @extractelt_sub_nxv4i32_splat(<vscale x 4 x i32> %x) {
762 ; CHECK-LABEL: extractelt_sub_nxv4i32_splat:
764 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
765 ; CHECK-NEXT: vslidedown.vi v8, v8, 1
766 ; CHECK-NEXT: vmv.x.s a0, v8
767 ; CHECK-NEXT: li a1, 3
768 ; CHECK-NEXT: sub a0, a1, a0
770 %bo = sub <vscale x 4 x i32> splat (i32 3), %x
771 %ext = extractelement <vscale x 4 x i32> %bo, i32 1
775 define i32 @extractelt_mul_nxv4i32_splat(<vscale x 4 x i32> %x) {
776 ; RV32NOM-LABEL: extractelt_mul_nxv4i32_splat:
778 ; RV32NOM-NEXT: li a0, 3
779 ; RV32NOM-NEXT: vsetvli a1, zero, e32, m2, ta, ma
780 ; RV32NOM-NEXT: vmul.vx v8, v8, a0
781 ; RV32NOM-NEXT: vsetivli zero, 1, e32, m1, ta, ma
782 ; RV32NOM-NEXT: vslidedown.vi v8, v8, 3
783 ; RV32NOM-NEXT: vmv.x.s a0, v8
786 ; RV32M-LABEL: extractelt_mul_nxv4i32_splat:
788 ; RV32M-NEXT: vsetivli zero, 1, e32, m1, ta, ma
789 ; RV32M-NEXT: vslidedown.vi v8, v8, 3
790 ; RV32M-NEXT: vmv.x.s a0, v8
791 ; RV32M-NEXT: slli a1, a0, 1
792 ; RV32M-NEXT: add a0, a1, a0
794 %bo = mul <vscale x 4 x i32> %x, splat (i32 3)
795 %ext = extractelement <vscale x 4 x i32> %bo, i32 3
799 define i32 @extractelt_sdiv_nxv4i32_splat(<vscale x 4 x i32> %x) {
800 ; RV32NOM-LABEL: extractelt_sdiv_nxv4i32_splat:
802 ; RV32NOM-NEXT: lui a0, 349525
803 ; RV32NOM-NEXT: addi a0, a0, 1366
804 ; RV32NOM-NEXT: vsetvli a1, zero, e32, m2, ta, ma
805 ; RV32NOM-NEXT: vmulh.vx v8, v8, a0
806 ; RV32NOM-NEXT: vsrl.vi v10, v8, 31
807 ; RV32NOM-NEXT: vadd.vv v8, v8, v10
808 ; RV32NOM-NEXT: vmv.x.s a0, v8
811 ; RV32M-LABEL: extractelt_sdiv_nxv4i32_splat:
813 ; RV32M-NEXT: vsetivli zero, 1, e32, m1, ta, ma
814 ; RV32M-NEXT: vmv.x.s a0, v8
815 ; RV32M-NEXT: lui a1, 349525
816 ; RV32M-NEXT: addi a1, a1, 1366
817 ; RV32M-NEXT: mulh a0, a0, a1
818 ; RV32M-NEXT: srli a1, a0, 31
819 ; RV32M-NEXT: add a0, a0, a1
821 %bo = sdiv <vscale x 4 x i32> %x, splat (i32 3)
822 %ext = extractelement <vscale x 4 x i32> %bo, i32 0
826 define i32 @extractelt_udiv_nxv4i32_splat(<vscale x 4 x i32> %x) {
827 ; RV32NOM-LABEL: extractelt_udiv_nxv4i32_splat:
829 ; RV32NOM-NEXT: lui a0, 349525
830 ; RV32NOM-NEXT: addi a0, a0, 1366
831 ; RV32NOM-NEXT: vsetvli a1, zero, e32, m2, ta, ma
832 ; RV32NOM-NEXT: vmulh.vx v8, v8, a0
833 ; RV32NOM-NEXT: vsrl.vi v10, v8, 31
834 ; RV32NOM-NEXT: vadd.vv v8, v8, v10
835 ; RV32NOM-NEXT: vmv.x.s a0, v8
838 ; RV32M-LABEL: extractelt_udiv_nxv4i32_splat:
840 ; RV32M-NEXT: vsetivli zero, 1, e32, m1, ta, ma
841 ; RV32M-NEXT: vmv.x.s a0, v8
842 ; RV32M-NEXT: lui a1, 349525
843 ; RV32M-NEXT: addi a1, a1, 1366
844 ; RV32M-NEXT: mulh a0, a0, a1
845 ; RV32M-NEXT: srli a1, a0, 31
846 ; RV32M-NEXT: add a0, a0, a1
848 %bo = sdiv <vscale x 4 x i32> %x, splat (i32 3)
849 %ext = extractelement <vscale x 4 x i32> %bo, i32 0
853 define i32 @extractelt_nxv32i32_0(<vscale x 32 x i32> %v) {
854 ; CHECK-LABEL: extractelt_nxv32i32_0:
856 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
857 ; CHECK-NEXT: vmv.x.s a0, v8
859 %r = extractelement <vscale x 32 x i32> %v, i32 0
863 define i32 @extractelt_nxv32i32_neg1(<vscale x 32 x i32> %v) {
864 ; CHECK-LABEL: extractelt_nxv32i32_neg1:
866 ; CHECK-NEXT: addi sp, sp, -80
867 ; CHECK-NEXT: .cfi_def_cfa_offset 80
868 ; CHECK-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
869 ; CHECK-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
870 ; CHECK-NEXT: .cfi_offset ra, -4
871 ; CHECK-NEXT: .cfi_offset s0, -8
872 ; CHECK-NEXT: addi s0, sp, 80
873 ; CHECK-NEXT: .cfi_def_cfa s0, 0
874 ; CHECK-NEXT: csrr a0, vlenb
875 ; CHECK-NEXT: slli a0, a0, 4
876 ; CHECK-NEXT: sub sp, sp, a0
877 ; CHECK-NEXT: andi sp, sp, -64
878 ; CHECK-NEXT: addi a0, sp, 64
879 ; CHECK-NEXT: vs8r.v v8, (a0)
880 ; CHECK-NEXT: csrr a1, vlenb
881 ; CHECK-NEXT: slli a2, a1, 3
882 ; CHECK-NEXT: add a2, a0, a2
883 ; CHECK-NEXT: vs8r.v v16, (a2)
884 ; CHECK-NEXT: slli a1, a1, 4
885 ; CHECK-NEXT: add a0, a1, a0
886 ; CHECK-NEXT: lw a0, -4(a0)
887 ; CHECK-NEXT: addi sp, s0, -80
888 ; CHECK-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
889 ; CHECK-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
890 ; CHECK-NEXT: addi sp, sp, 80
892 %r = extractelement <vscale x 32 x i32> %v, i32 -1
896 define i32 @extractelt_nxv32i32_imm(<vscale x 32 x i32> %v) {
897 ; CHECK-LABEL: extractelt_nxv32i32_imm:
899 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
900 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
901 ; CHECK-NEXT: vmv.x.s a0, v8
903 %r = extractelement <vscale x 32 x i32> %v, i32 2
907 define i32 @extractelt_nxv32i32_idx(<vscale x 32 x i32> %v, i32 %idx) {
908 ; CHECK-LABEL: extractelt_nxv32i32_idx:
910 ; CHECK-NEXT: csrr a1, vlenb
911 ; CHECK-NEXT: slli a2, a1, 2
912 ; CHECK-NEXT: addi a2, a2, -1
913 ; CHECK-NEXT: bltu a0, a2, .LBB74_2
914 ; CHECK-NEXT: # %bb.1:
915 ; CHECK-NEXT: mv a0, a2
916 ; CHECK-NEXT: .LBB74_2:
917 ; CHECK-NEXT: addi sp, sp, -80
918 ; CHECK-NEXT: .cfi_def_cfa_offset 80
919 ; CHECK-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
920 ; CHECK-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
921 ; CHECK-NEXT: .cfi_offset ra, -4
922 ; CHECK-NEXT: .cfi_offset s0, -8
923 ; CHECK-NEXT: addi s0, sp, 80
924 ; CHECK-NEXT: .cfi_def_cfa s0, 0
925 ; CHECK-NEXT: csrr a2, vlenb
926 ; CHECK-NEXT: slli a2, a2, 4
927 ; CHECK-NEXT: sub sp, sp, a2
928 ; CHECK-NEXT: andi sp, sp, -64
929 ; CHECK-NEXT: slli a0, a0, 2
930 ; CHECK-NEXT: addi a2, sp, 64
931 ; CHECK-NEXT: add a0, a2, a0
932 ; CHECK-NEXT: vs8r.v v8, (a2)
933 ; CHECK-NEXT: slli a1, a1, 3
934 ; CHECK-NEXT: add a1, a2, a1
935 ; CHECK-NEXT: vs8r.v v16, (a1)
936 ; CHECK-NEXT: lw a0, 0(a0)
937 ; CHECK-NEXT: addi sp, s0, -80
938 ; CHECK-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
939 ; CHECK-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
940 ; CHECK-NEXT: addi sp, sp, 80
942 %r = extractelement <vscale x 32 x i32> %v, i32 %idx
946 define i64 @extractelt_nxv16i64_0(<vscale x 16 x i64> %v) {
947 ; CHECK-LABEL: extractelt_nxv16i64_0:
949 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
950 ; CHECK-NEXT: vmv.x.s a0, v8
951 ; CHECK-NEXT: vslidedown.vi v8, v8, 1
952 ; CHECK-NEXT: vmv.x.s a1, v8
954 %r = extractelement <vscale x 16 x i64> %v, i32 0