1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,VLA
3 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,VLA
5 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,VLA
6 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,VLA
8 ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v -riscv-v-vector-bits-max=128 -verify-machineinstrs | FileCheck -check-prefixes=CHECK,VLS %s
9 ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,v -riscv-v-vector-bits-max=128 -verify-machineinstrs | FileCheck -check-prefixes=CHECK,VLS %s
11 define void @extract_v2i8_v4i8_0(ptr %x, ptr %y) {
12 ; CHECK-LABEL: extract_v2i8_v4i8_0:
14 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
15 ; CHECK-NEXT: vle8.v v8, (a0)
16 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
17 ; CHECK-NEXT: vse8.v v8, (a1)
19 %a = load <4 x i8>, ptr %x
20 %c = call <2 x i8> @llvm.vector.extract.v2i8.v4i8(<4 x i8> %a, i64 0)
21 store <2 x i8> %c, ptr %y
25 define void @extract_v2i8_v4i8_2(ptr %x, ptr %y) {
26 ; CHECK-LABEL: extract_v2i8_v4i8_2:
28 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
29 ; CHECK-NEXT: vle8.v v8, (a0)
30 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
31 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
32 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
33 ; CHECK-NEXT: vse8.v v8, (a1)
35 %a = load <4 x i8>, ptr %x
36 %c = call <2 x i8> @llvm.vector.extract.v2i8.v4i8(<4 x i8> %a, i64 2)
37 store <2 x i8> %c, ptr %y
41 define void @extract_v2i8_v8i8_0(ptr %x, ptr %y) {
42 ; CHECK-LABEL: extract_v2i8_v8i8_0:
44 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
45 ; CHECK-NEXT: vle8.v v8, (a0)
46 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
47 ; CHECK-NEXT: vse8.v v8, (a1)
49 %a = load <8 x i8>, ptr %x
50 %c = call <2 x i8> @llvm.vector.extract.v2i8.v8i8(<8 x i8> %a, i64 0)
51 store <2 x i8> %c, ptr %y
55 define void @extract_v2i8_v8i8_6(ptr %x, ptr %y) {
56 ; CHECK-LABEL: extract_v2i8_v8i8_6:
58 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
59 ; CHECK-NEXT: vle8.v v8, (a0)
60 ; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, ma
61 ; CHECK-NEXT: vslidedown.vi v8, v8, 6
62 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
63 ; CHECK-NEXT: vse8.v v8, (a1)
65 %a = load <8 x i8>, ptr %x
66 %c = call <2 x i8> @llvm.vector.extract.v2i8.v8i8(<8 x i8> %a, i64 6)
67 store <2 x i8> %c, ptr %y
71 define void @extract_v1i32_v8i32_4(ptr %x, ptr %y) {
72 ; VLA-LABEL: extract_v1i32_v8i32_4:
74 ; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
75 ; VLA-NEXT: vle32.v v8, (a0)
76 ; VLA-NEXT: vslidedown.vi v8, v8, 4
77 ; VLA-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
78 ; VLA-NEXT: vse32.v v8, (a1)
81 ; VLS-LABEL: extract_v1i32_v8i32_4:
83 ; VLS-NEXT: vl2re32.v v8, (a0)
84 ; VLS-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
85 ; VLS-NEXT: vse32.v v9, (a1)
87 %a = load <8 x i32>, ptr %x
88 %c = call <1 x i32> @llvm.vector.extract.v1i32.v8i32(<8 x i32> %a, i64 4)
89 store <1 x i32> %c, ptr %y
93 define void @extract_v1i32_v8i32_5(ptr %x, ptr %y) {
94 ; VLA-LABEL: extract_v1i32_v8i32_5:
96 ; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
97 ; VLA-NEXT: vle32.v v8, (a0)
98 ; VLA-NEXT: vslidedown.vi v8, v8, 5
99 ; VLA-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
100 ; VLA-NEXT: vse32.v v8, (a1)
103 ; VLS-LABEL: extract_v1i32_v8i32_5:
105 ; VLS-NEXT: vl2re32.v v8, (a0)
106 ; VLS-NEXT: vsetivli zero, 1, e32, m1, ta, ma
107 ; VLS-NEXT: vslidedown.vi v8, v9, 1
108 ; VLS-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
109 ; VLS-NEXT: vse32.v v8, (a1)
111 %a = load <8 x i32>, ptr %x
112 %c = call <1 x i32> @llvm.vector.extract.v1i32.v8i32(<8 x i32> %a, i64 5)
113 store <1 x i32> %c, ptr %y
117 define void @extract_v2i32_v8i32_0(ptr %x, ptr %y) {
118 ; VLA-LABEL: extract_v2i32_v8i32_0:
120 ; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
121 ; VLA-NEXT: vle32.v v8, (a0)
122 ; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
123 ; VLA-NEXT: vse32.v v8, (a1)
126 ; VLS-LABEL: extract_v2i32_v8i32_0:
128 ; VLS-NEXT: vl2re32.v v8, (a0)
129 ; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
130 ; VLS-NEXT: vse32.v v8, (a1)
132 %a = load <8 x i32>, ptr %x
133 %c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 0)
134 store <2 x i32> %c, ptr %y
138 define void @extract_v2i32_v8i32_2(ptr %x, ptr %y) {
139 ; VLA-LABEL: extract_v2i32_v8i32_2:
141 ; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
142 ; VLA-NEXT: vle32.v v8, (a0)
143 ; VLA-NEXT: vsetivli zero, 2, e32, m1, ta, ma
144 ; VLA-NEXT: vslidedown.vi v8, v8, 2
145 ; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
146 ; VLA-NEXT: vse32.v v8, (a1)
149 ; VLS-LABEL: extract_v2i32_v8i32_2:
151 ; VLS-NEXT: vl2re32.v v8, (a0)
152 ; VLS-NEXT: vsetivli zero, 2, e32, m1, ta, ma
153 ; VLS-NEXT: vslidedown.vi v8, v8, 2
154 ; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
155 ; VLS-NEXT: vse32.v v8, (a1)
157 %a = load <8 x i32>, ptr %x
158 %c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 2)
159 store <2 x i32> %c, ptr %y
163 define void @extract_v2i32_v8i32_4(ptr %x, ptr %y) {
164 ; VLA-LABEL: extract_v2i32_v8i32_4:
166 ; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
167 ; VLA-NEXT: vle32.v v8, (a0)
168 ; VLA-NEXT: vsetivli zero, 2, e32, m2, ta, ma
169 ; VLA-NEXT: vslidedown.vi v8, v8, 4
170 ; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
171 ; VLA-NEXT: vse32.v v8, (a1)
174 ; VLS-LABEL: extract_v2i32_v8i32_4:
176 ; VLS-NEXT: vl2re32.v v8, (a0)
177 ; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
178 ; VLS-NEXT: vse32.v v9, (a1)
180 %a = load <8 x i32>, ptr %x
181 %c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 4)
182 store <2 x i32> %c, ptr %y
186 define void @extract_v2i32_v8i32_6(ptr %x, ptr %y) {
187 ; VLA-LABEL: extract_v2i32_v8i32_6:
189 ; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
190 ; VLA-NEXT: vle32.v v8, (a0)
191 ; VLA-NEXT: vsetivli zero, 2, e32, m2, ta, ma
192 ; VLA-NEXT: vslidedown.vi v8, v8, 6
193 ; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
194 ; VLA-NEXT: vse32.v v8, (a1)
197 ; VLS-LABEL: extract_v2i32_v8i32_6:
199 ; VLS-NEXT: vl2re32.v v8, (a0)
200 ; VLS-NEXT: vsetivli zero, 2, e32, m1, ta, ma
201 ; VLS-NEXT: vslidedown.vi v8, v9, 2
202 ; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
203 ; VLS-NEXT: vse32.v v8, (a1)
205 %a = load <8 x i32>, ptr %x
206 %c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 6)
207 store <2 x i32> %c, ptr %y
211 define void @extract_v2i32_nxv16i32_0(<vscale x 16 x i32> %x, ptr %y) {
212 ; CHECK-LABEL: extract_v2i32_nxv16i32_0:
214 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
215 ; CHECK-NEXT: vse32.v v8, (a0)
217 %c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 0)
218 store <2 x i32> %c, ptr %y
223 define void @extract_v2i32_nxv16i32_2(<vscale x 16 x i32> %x, ptr %y) {
224 ; CHECK-LABEL: extract_v2i32_nxv16i32_2:
226 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
227 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
228 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
229 ; CHECK-NEXT: vse32.v v8, (a0)
231 %c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 2)
232 store <2 x i32> %c, ptr %y
236 define void @extract_v2i32_nxv16i32_4(<vscale x 16 x i32> %x, ptr %y) {
237 ; VLA-LABEL: extract_v2i32_nxv16i32_4:
239 ; VLA-NEXT: vsetivli zero, 2, e32, m2, ta, ma
240 ; VLA-NEXT: vslidedown.vi v8, v8, 4
241 ; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
242 ; VLA-NEXT: vse32.v v8, (a0)
245 ; VLS-LABEL: extract_v2i32_nxv16i32_4:
247 ; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
248 ; VLS-NEXT: vse32.v v9, (a0)
250 %c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 4)
251 store <2 x i32> %c, ptr %y
255 define void @extract_v2i32_nxv16i32_6(<vscale x 16 x i32> %x, ptr %y) {
256 ; VLA-LABEL: extract_v2i32_nxv16i32_6:
258 ; VLA-NEXT: vsetivli zero, 2, e32, m2, ta, ma
259 ; VLA-NEXT: vslidedown.vi v8, v8, 6
260 ; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
261 ; VLA-NEXT: vse32.v v8, (a0)
264 ; VLS-LABEL: extract_v2i32_nxv16i32_6:
266 ; VLS-NEXT: vsetivli zero, 2, e32, m1, ta, ma
267 ; VLS-NEXT: vslidedown.vi v8, v9, 2
268 ; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
269 ; VLS-NEXT: vse32.v v8, (a0)
271 %c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 6)
272 store <2 x i32> %c, ptr %y
276 define void @extract_v2i32_nxv16i32_8(<vscale x 16 x i32> %x, ptr %y) {
277 ; VLA-LABEL: extract_v2i32_nxv16i32_8:
279 ; VLA-NEXT: vsetivli zero, 2, e32, m4, ta, ma
280 ; VLA-NEXT: vslidedown.vi v8, v8, 8
281 ; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
282 ; VLA-NEXT: vse32.v v8, (a0)
285 ; VLS-LABEL: extract_v2i32_nxv16i32_8:
287 ; VLS-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
288 ; VLS-NEXT: vse32.v v10, (a0)
290 %c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 8)
291 store <2 x i32> %c, ptr %y
295 define void @extract_v2i8_nxv2i8_0(<vscale x 2 x i8> %x, ptr %y) {
296 ; CHECK-LABEL: extract_v2i8_nxv2i8_0:
298 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
299 ; CHECK-NEXT: vse8.v v8, (a0)
301 %c = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %x, i64 0)
302 store <2 x i8> %c, ptr %y
306 define void @extract_v2i8_nxv2i8_2(<vscale x 2 x i8> %x, ptr %y) {
307 ; CHECK-LABEL: extract_v2i8_nxv2i8_2:
309 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
310 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
311 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
312 ; CHECK-NEXT: vse8.v v8, (a0)
314 %c = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %x, i64 2)
315 store <2 x i8> %c, ptr %y
319 define void @extract_v2i8_nxv2i8_4(<vscale x 2 x i8> %x, ptr %y) {
320 ; CHECK-LABEL: extract_v2i8_nxv2i8_4:
322 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
323 ; CHECK-NEXT: vslidedown.vi v8, v8, 4
324 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
325 ; CHECK-NEXT: vse8.v v8, (a0)
327 %c = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %x, i64 4)
328 store <2 x i8> %c, ptr %y
332 define void @extract_v2i8_nxv2i8_6(<vscale x 2 x i8> %x, ptr %y) {
333 ; CHECK-LABEL: extract_v2i8_nxv2i8_6:
335 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
336 ; CHECK-NEXT: vslidedown.vi v8, v8, 6
337 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
338 ; CHECK-NEXT: vse8.v v8, (a0)
340 %c = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %x, i64 6)
341 store <2 x i8> %c, ptr %y
345 define void @extract_v8i32_nxv16i32_8(<vscale x 16 x i32> %x, ptr %y) {
346 ; VLA-LABEL: extract_v8i32_nxv16i32_8:
348 ; VLA-NEXT: vsetivli zero, 8, e32, m4, ta, ma
349 ; VLA-NEXT: vslidedown.vi v8, v8, 8
350 ; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
351 ; VLA-NEXT: vse32.v v8, (a0)
354 ; VLS-LABEL: extract_v8i32_nxv16i32_8:
356 ; VLS-NEXT: vs2r.v v10, (a0)
358 %c = call <8 x i32> @llvm.vector.extract.v8i32.nxv16i32(<vscale x 16 x i32> %x, i64 8)
359 store <8 x i32> %c, ptr %y
363 define void @extract_v8i1_v64i1_0(ptr %x, ptr %y) {
364 ; VLA-LABEL: extract_v8i1_v64i1_0:
366 ; VLA-NEXT: li a2, 64
367 ; VLA-NEXT: vsetvli zero, a2, e8, m4, ta, ma
368 ; VLA-NEXT: vlm.v v8, (a0)
369 ; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
370 ; VLA-NEXT: vsm.v v8, (a1)
373 ; VLS-LABEL: extract_v8i1_v64i1_0:
375 ; VLS-NEXT: vsetvli a2, zero, e8, m4, ta, ma
376 ; VLS-NEXT: vlm.v v8, (a0)
377 ; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
378 ; VLS-NEXT: vsm.v v8, (a1)
380 %a = load <64 x i1>, ptr %x
381 %c = call <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 0)
382 store <8 x i1> %c, ptr %y
386 define void @extract_v8i1_v64i1_8(ptr %x, ptr %y) {
387 ; VLA-LABEL: extract_v8i1_v64i1_8:
389 ; VLA-NEXT: li a2, 64
390 ; VLA-NEXT: vsetvli zero, a2, e8, m4, ta, ma
391 ; VLA-NEXT: vlm.v v8, (a0)
392 ; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
393 ; VLA-NEXT: vslidedown.vi v8, v8, 1
394 ; VLA-NEXT: vsm.v v8, (a1)
397 ; VLS-LABEL: extract_v8i1_v64i1_8:
399 ; VLS-NEXT: vsetvli a2, zero, e8, m4, ta, ma
400 ; VLS-NEXT: vlm.v v8, (a0)
401 ; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
402 ; VLS-NEXT: vslidedown.vi v8, v8, 1
403 ; VLS-NEXT: vsm.v v8, (a1)
405 %a = load <64 x i1>, ptr %x
406 %c = call <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 8)
407 store <8 x i1> %c, ptr %y
411 define void @extract_v8i1_v64i1_48(ptr %x, ptr %y) {
412 ; VLA-LABEL: extract_v8i1_v64i1_48:
414 ; VLA-NEXT: li a2, 64
415 ; VLA-NEXT: vsetvli zero, a2, e8, m4, ta, ma
416 ; VLA-NEXT: vlm.v v8, (a0)
417 ; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
418 ; VLA-NEXT: vslidedown.vi v8, v8, 6
419 ; VLA-NEXT: vsm.v v8, (a1)
422 ; VLS-LABEL: extract_v8i1_v64i1_48:
424 ; VLS-NEXT: vsetvli a2, zero, e8, m4, ta, ma
425 ; VLS-NEXT: vlm.v v8, (a0)
426 ; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
427 ; VLS-NEXT: vslidedown.vi v8, v8, 6
428 ; VLS-NEXT: vsm.v v8, (a1)
430 %a = load <64 x i1>, ptr %x
431 %c = call <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 48)
432 store <8 x i1> %c, ptr %y
436 define void @extract_v8i1_nxv2i1_0(<vscale x 2 x i1> %x, ptr %y) {
437 ; CHECK-LABEL: extract_v8i1_nxv2i1_0:
439 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
440 ; CHECK-NEXT: vsm.v v0, (a0)
442 %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv2i1(<vscale x 2 x i1> %x, i64 0)
443 store <8 x i1> %c, ptr %y
447 define void @extract_v8i1_nxv64i1_0(<vscale x 64 x i1> %x, ptr %y) {
448 ; CHECK-LABEL: extract_v8i1_nxv64i1_0:
450 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
451 ; CHECK-NEXT: vsm.v v0, (a0)
453 %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %x, i64 0)
454 store <8 x i1> %c, ptr %y
458 define void @extract_v8i1_nxv64i1_8(<vscale x 64 x i1> %x, ptr %y) {
459 ; CHECK-LABEL: extract_v8i1_nxv64i1_8:
461 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
462 ; CHECK-NEXT: vslidedown.vi v8, v0, 1
463 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
464 ; CHECK-NEXT: vsm.v v8, (a0)
466 %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %x, i64 8)
467 store <8 x i1> %c, ptr %y
471 define void @extract_v8i1_nxv64i1_48(<vscale x 64 x i1> %x, ptr %y) {
472 ; CHECK-LABEL: extract_v8i1_nxv64i1_48:
474 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
475 ; CHECK-NEXT: vslidedown.vi v8, v0, 6
476 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
477 ; CHECK-NEXT: vsm.v v8, (a0)
479 %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %x, i64 48)
480 store <8 x i1> %c, ptr %y
484 define void @extract_v8i1_nxv64i1_128(<vscale x 64 x i1> %x, ptr %y) {
485 ; CHECK-LABEL: extract_v8i1_nxv64i1_128:
487 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
488 ; CHECK-NEXT: vslidedown.vi v8, v0, 16
489 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
490 ; CHECK-NEXT: vsm.v v8, (a0)
492 %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %x, i64 128)
493 store <8 x i1> %c, ptr %y
497 define void @extract_v8i1_nxv64i1_192(<vscale x 64 x i1> %x, ptr %y) {
498 ; CHECK-LABEL: extract_v8i1_nxv64i1_192:
500 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
501 ; CHECK-NEXT: vslidedown.vi v8, v0, 24
502 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
503 ; CHECK-NEXT: vsm.v v8, (a0)
505 %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %x, i64 192)
506 store <8 x i1> %c, ptr %y
510 define void @extract_v2i1_v64i1_0(ptr %x, ptr %y) {
511 ; VLA-LABEL: extract_v2i1_v64i1_0:
513 ; VLA-NEXT: li a2, 64
514 ; VLA-NEXT: vsetvli zero, a2, e8, m4, ta, ma
515 ; VLA-NEXT: vlm.v v0, (a0)
516 ; VLA-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
517 ; VLA-NEXT: vmv.v.i v8, 0
518 ; VLA-NEXT: vmerge.vim v8, v8, 1, v0
519 ; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
520 ; VLA-NEXT: vmv.v.i v9, 0
521 ; VLA-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
522 ; VLA-NEXT: vmv.v.v v9, v8
523 ; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
524 ; VLA-NEXT: vmsne.vi v8, v9, 0
525 ; VLA-NEXT: vsm.v v8, (a1)
528 ; VLS-LABEL: extract_v2i1_v64i1_0:
530 ; VLS-NEXT: vsetvli a2, zero, e8, m4, ta, ma
531 ; VLS-NEXT: vlm.v v0, (a0)
532 ; VLS-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
533 ; VLS-NEXT: vmv.v.i v8, 0
534 ; VLS-NEXT: vmerge.vim v8, v8, 1, v0
535 ; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
536 ; VLS-NEXT: vmv.v.i v9, 0
537 ; VLS-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
538 ; VLS-NEXT: vmv.v.v v9, v8
539 ; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
540 ; VLS-NEXT: vmsne.vi v8, v9, 0
541 ; VLS-NEXT: vsm.v v8, (a1)
543 %a = load <64 x i1>, ptr %x
544 %c = call <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 0)
545 store <2 x i1> %c, ptr %y
549 define void @extract_v2i1_v64i1_2(ptr %x, ptr %y) {
550 ; VLA-LABEL: extract_v2i1_v64i1_2:
552 ; VLA-NEXT: li a2, 64
553 ; VLA-NEXT: vsetvli zero, a2, e8, m4, ta, ma
554 ; VLA-NEXT: vlm.v v0, (a0)
555 ; VLA-NEXT: vmv.v.i v8, 0
556 ; VLA-NEXT: vmerge.vim v8, v8, 1, v0
557 ; VLA-NEXT: vsetivli zero, 2, e8, m1, ta, ma
558 ; VLA-NEXT: vslidedown.vi v8, v8, 2
559 ; VLA-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
560 ; VLA-NEXT: vmsne.vi v0, v8, 0
561 ; VLA-NEXT: vmv.v.i v8, 0
562 ; VLA-NEXT: vmerge.vim v8, v8, 1, v0
563 ; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
564 ; VLA-NEXT: vmv.v.i v9, 0
565 ; VLA-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
566 ; VLA-NEXT: vmv.v.v v9, v8
567 ; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
568 ; VLA-NEXT: vmsne.vi v8, v9, 0
569 ; VLA-NEXT: vsm.v v8, (a1)
572 ; VLS-LABEL: extract_v2i1_v64i1_2:
574 ; VLS-NEXT: vsetvli a2, zero, e8, m4, ta, ma
575 ; VLS-NEXT: vlm.v v0, (a0)
576 ; VLS-NEXT: vmv.v.i v8, 0
577 ; VLS-NEXT: vmerge.vim v8, v8, 1, v0
578 ; VLS-NEXT: vsetivli zero, 2, e8, m1, ta, ma
579 ; VLS-NEXT: vslidedown.vi v8, v8, 2
580 ; VLS-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
581 ; VLS-NEXT: vmsne.vi v0, v8, 0
582 ; VLS-NEXT: vmv.v.i v8, 0
583 ; VLS-NEXT: vmerge.vim v8, v8, 1, v0
584 ; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
585 ; VLS-NEXT: vmv.v.i v9, 0
586 ; VLS-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
587 ; VLS-NEXT: vmv.v.v v9, v8
588 ; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
589 ; VLS-NEXT: vmsne.vi v8, v9, 0
590 ; VLS-NEXT: vsm.v v8, (a1)
592 %a = load <64 x i1>, ptr %x
593 %c = call <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 2)
594 store <2 x i1> %c, ptr %y
598 define void @extract_v2i1_v64i1_42(ptr %x, ptr %y) {
599 ; VLA-LABEL: extract_v2i1_v64i1_42:
601 ; VLA-NEXT: li a2, 64
602 ; VLA-NEXT: vsetvli zero, a2, e8, m4, ta, ma
603 ; VLA-NEXT: vlm.v v0, (a0)
604 ; VLA-NEXT: vmv.v.i v8, 0
605 ; VLA-NEXT: vmerge.vim v8, v8, 1, v0
606 ; VLA-NEXT: li a0, 42
607 ; VLA-NEXT: vsetivli zero, 2, e8, m4, ta, ma
608 ; VLA-NEXT: vslidedown.vx v8, v8, a0
609 ; VLA-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
610 ; VLA-NEXT: vmsne.vi v0, v8, 0
611 ; VLA-NEXT: vmv.v.i v8, 0
612 ; VLA-NEXT: vmerge.vim v8, v8, 1, v0
613 ; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
614 ; VLA-NEXT: vmv.v.i v9, 0
615 ; VLA-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
616 ; VLA-NEXT: vmv.v.v v9, v8
617 ; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
618 ; VLA-NEXT: vmsne.vi v8, v9, 0
619 ; VLA-NEXT: vsm.v v8, (a1)
622 ; VLS-LABEL: extract_v2i1_v64i1_42:
624 ; VLS-NEXT: vsetvli a2, zero, e8, m4, ta, ma
625 ; VLS-NEXT: vlm.v v0, (a0)
626 ; VLS-NEXT: vmv.v.i v8, 0
627 ; VLS-NEXT: vmerge.vim v8, v8, 1, v0
628 ; VLS-NEXT: vsetivli zero, 2, e8, m1, ta, ma
629 ; VLS-NEXT: vslidedown.vi v8, v10, 10
630 ; VLS-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
631 ; VLS-NEXT: vmsne.vi v0, v8, 0
632 ; VLS-NEXT: vmv.v.i v8, 0
633 ; VLS-NEXT: vmerge.vim v8, v8, 1, v0
634 ; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
635 ; VLS-NEXT: vmv.v.i v9, 0
636 ; VLS-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
637 ; VLS-NEXT: vmv.v.v v9, v8
638 ; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
639 ; VLS-NEXT: vmsne.vi v8, v9, 0
640 ; VLS-NEXT: vsm.v v8, (a1)
642 %a = load <64 x i1>, ptr %x
643 %c = call <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 42)
644 store <2 x i1> %c, ptr %y
648 define void @extract_v2i1_nxv2i1_0(<vscale x 2 x i1> %x, ptr %y) {
649 ; CHECK-LABEL: extract_v2i1_nxv2i1_0:
651 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
652 ; CHECK-NEXT: vmv.v.i v8, 0
653 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
654 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
655 ; CHECK-NEXT: vmv.v.i v9, 0
656 ; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
657 ; CHECK-NEXT: vmv.v.v v9, v8
658 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
659 ; CHECK-NEXT: vmsne.vi v8, v9, 0
660 ; CHECK-NEXT: vsm.v v8, (a0)
662 %c = call <2 x i1> @llvm.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1> %x, i64 0)
663 store <2 x i1> %c, ptr %y
667 define void @extract_v2i1_nxv2i1_2(<vscale x 2 x i1> %x, ptr %y) {
668 ; VLA-LABEL: extract_v2i1_nxv2i1_2:
670 ; VLA-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
671 ; VLA-NEXT: vmv.v.i v8, 0
672 ; VLA-NEXT: vmerge.vim v8, v8, 1, v0
673 ; VLA-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
674 ; VLA-NEXT: vslidedown.vi v8, v8, 2
675 ; VLA-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
676 ; VLA-NEXT: vmsne.vi v0, v8, 0
677 ; VLA-NEXT: vmv.v.i v8, 0
678 ; VLA-NEXT: vmerge.vim v8, v8, 1, v0
679 ; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
680 ; VLA-NEXT: vmv.v.i v9, 0
681 ; VLA-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
682 ; VLA-NEXT: vmv.v.v v9, v8
683 ; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
684 ; VLA-NEXT: vmsne.vi v8, v9, 0
685 ; VLA-NEXT: vsm.v v8, (a0)
688 ; VLS-LABEL: extract_v2i1_nxv2i1_2:
690 ; VLS-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
691 ; VLS-NEXT: vmv.v.i v8, 0
692 ; VLS-NEXT: vmerge.vim v8, v8, 1, v0
693 ; VLS-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
694 ; VLS-NEXT: vslidedown.vi v8, v8, 2
695 ; VLS-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
696 ; VLS-NEXT: vmsne.vi v0, v8, 0
697 ; VLS-NEXT: vmv.v.i v8, 0
698 ; VLS-NEXT: vmerge.vim v8, v8, 1, v0
699 ; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
700 ; VLS-NEXT: vmv.v.i v9, 0
701 ; VLS-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
702 ; VLS-NEXT: vmv.v.v v9, v8
703 ; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
704 ; VLS-NEXT: vmsne.vi v8, v9, 0
705 ; VLS-NEXT: vsm.v v8, (a0)
707 %c = call <2 x i1> @llvm.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1> %x, i64 2)
708 store <2 x i1> %c, ptr %y
712 define void @extract_v2i1_nxv64i1_0(<vscale x 64 x i1> %x, ptr %y) {
713 ; CHECK-LABEL: extract_v2i1_nxv64i1_0:
715 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
716 ; CHECK-NEXT: vmv.v.i v8, 0
717 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
718 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
719 ; CHECK-NEXT: vmv.v.i v9, 0
720 ; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
721 ; CHECK-NEXT: vmv.v.v v9, v8
722 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
723 ; CHECK-NEXT: vmsne.vi v8, v9, 0
724 ; CHECK-NEXT: vsm.v v8, (a0)
726 %c = call <2 x i1> @llvm.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %x, i64 0)
727 store <2 x i1> %c, ptr %y
731 define void @extract_v2i1_nxv64i1_2(<vscale x 64 x i1> %x, ptr %y) {
732 ; CHECK-LABEL: extract_v2i1_nxv64i1_2:
734 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
735 ; CHECK-NEXT: vmv.v.i v8, 0
736 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
737 ; CHECK-NEXT: vsetivli zero, 2, e8, m1, ta, ma
738 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
739 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
740 ; CHECK-NEXT: vmsne.vi v0, v8, 0
741 ; CHECK-NEXT: vmv.v.i v8, 0
742 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
743 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
744 ; CHECK-NEXT: vmv.v.i v9, 0
745 ; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
746 ; CHECK-NEXT: vmv.v.v v9, v8
747 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
748 ; CHECK-NEXT: vmsne.vi v8, v9, 0
749 ; CHECK-NEXT: vsm.v v8, (a0)
751 %c = call <2 x i1> @llvm.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %x, i64 2)
752 store <2 x i1> %c, ptr %y
756 define void @extract_v2i1_nxv64i1_42(<vscale x 64 x i1> %x, ptr %y) {
757 ; VLA-LABEL: extract_v2i1_nxv64i1_42:
759 ; VLA-NEXT: vsetvli a1, zero, e8, m8, ta, ma
760 ; VLA-NEXT: vmv.v.i v8, 0
761 ; VLA-NEXT: vmerge.vim v8, v8, 1, v0
762 ; VLA-NEXT: li a1, 42
763 ; VLA-NEXT: vsetivli zero, 2, e8, m4, ta, ma
764 ; VLA-NEXT: vslidedown.vx v8, v8, a1
765 ; VLA-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
766 ; VLA-NEXT: vmsne.vi v0, v8, 0
767 ; VLA-NEXT: vmv.v.i v8, 0
768 ; VLA-NEXT: vmerge.vim v8, v8, 1, v0
769 ; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
770 ; VLA-NEXT: vmv.v.i v9, 0
771 ; VLA-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
772 ; VLA-NEXT: vmv.v.v v9, v8
773 ; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
774 ; VLA-NEXT: vmsne.vi v8, v9, 0
775 ; VLA-NEXT: vsm.v v8, (a0)
778 ; VLS-LABEL: extract_v2i1_nxv64i1_42:
780 ; VLS-NEXT: vsetvli a1, zero, e8, m8, ta, ma
781 ; VLS-NEXT: vmv.v.i v8, 0
782 ; VLS-NEXT: vmerge.vim v8, v8, 1, v0
783 ; VLS-NEXT: vsetivli zero, 2, e8, m1, ta, ma
784 ; VLS-NEXT: vslidedown.vi v8, v10, 10
785 ; VLS-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
786 ; VLS-NEXT: vmsne.vi v0, v8, 0
787 ; VLS-NEXT: vmv.v.i v8, 0
788 ; VLS-NEXT: vmerge.vim v8, v8, 1, v0
789 ; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
790 ; VLS-NEXT: vmv.v.i v9, 0
791 ; VLS-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
792 ; VLS-NEXT: vmv.v.v v9, v8
793 ; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
794 ; VLS-NEXT: vmsne.vi v8, v9, 0
795 ; VLS-NEXT: vsm.v v8, (a0)
797 %c = call <2 x i1> @llvm.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %x, i64 42)
798 store <2 x i1> %c, ptr %y
802 define void @extract_v2i1_nxv32i1_26(<vscale x 32 x i1> %x, ptr %y) {
803 ; VLA-LABEL: extract_v2i1_nxv32i1_26:
805 ; VLA-NEXT: vsetvli a1, zero, e8, m4, ta, ma
806 ; VLA-NEXT: vmv.v.i v8, 0
807 ; VLA-NEXT: vmerge.vim v8, v8, 1, v0
808 ; VLA-NEXT: vsetivli zero, 2, e8, m2, ta, ma
809 ; VLA-NEXT: vslidedown.vi v8, v8, 26
810 ; VLA-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
811 ; VLA-NEXT: vmsne.vi v0, v8, 0
812 ; VLA-NEXT: vmv.v.i v8, 0
813 ; VLA-NEXT: vmerge.vim v8, v8, 1, v0
814 ; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
815 ; VLA-NEXT: vmv.v.i v9, 0
816 ; VLA-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
817 ; VLA-NEXT: vmv.v.v v9, v8
818 ; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
819 ; VLA-NEXT: vmsne.vi v8, v9, 0
820 ; VLA-NEXT: vsm.v v8, (a0)
823 ; VLS-LABEL: extract_v2i1_nxv32i1_26:
825 ; VLS-NEXT: vsetvli a1, zero, e8, m4, ta, ma
826 ; VLS-NEXT: vmv.v.i v8, 0
827 ; VLS-NEXT: vmerge.vim v8, v8, 1, v0
828 ; VLS-NEXT: vsetivli zero, 2, e8, m1, ta, ma
829 ; VLS-NEXT: vslidedown.vi v8, v9, 10
830 ; VLS-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
831 ; VLS-NEXT: vmsne.vi v0, v8, 0
832 ; VLS-NEXT: vmv.v.i v8, 0
833 ; VLS-NEXT: vmerge.vim v8, v8, 1, v0
834 ; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
835 ; VLS-NEXT: vmv.v.i v9, 0
836 ; VLS-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
837 ; VLS-NEXT: vmv.v.v v9, v8
838 ; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
839 ; VLS-NEXT: vmsne.vi v8, v9, 0
840 ; VLS-NEXT: vsm.v v8, (a0)
842 %c = call <2 x i1> @llvm.vector.extract.v2i1.nxv32i1(<vscale x 32 x i1> %x, i64 26)
843 store <2 x i1> %c, ptr %y
847 define void @extract_v8i1_nxv32i1_16(<vscale x 32 x i1> %x, ptr %y) {
848 ; CHECK-LABEL: extract_v8i1_nxv32i1_16:
850 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
851 ; CHECK-NEXT: vslidedown.vi v8, v0, 2
852 ; CHECK-NEXT: vsm.v v8, (a0)
854 %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv32i1(<vscale x 32 x i1> %x, i64 16)
855 store <8 x i1> %c, ptr %y
859 define <1 x i64> @extract_v1i64_v2i64_1(<2 x i64> %x) {
860 ; CHECK-LABEL: extract_v1i64_v2i64_1:
862 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
863 ; CHECK-NEXT: vslidedown.vi v8, v8, 1
865 %v = call <1 x i64> @llvm.vector.extract.v1i64.v2i64(<2 x i64> %x, i64 1)
869 declare <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %vec, i64 %idx)
870 declare <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %vec, i64 %idx)
872 declare <2 x i1> @llvm.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1> %vec, i64 %idx)
873 declare <8 x i1> @llvm.vector.extract.v8i1.nxv2i1(<vscale x 2 x i1> %vec, i64 %idx)
875 declare <2 x i1> @llvm.vector.extract.v2i1.nxv32i1(<vscale x 32 x i1> %vec, i64 %idx)
876 declare <8 x i1> @llvm.vector.extract.v8i1.nxv32i1(<vscale x 32 x i1> %vec, i64 %idx)
878 declare <2 x i1> @llvm.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %vec, i64 %idx)
879 declare <8 x i1> @llvm.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %vec, i64 %idx)
881 declare <2 x i8> @llvm.vector.extract.v2i8.v4i8(<4 x i8> %vec, i64 %idx)
882 declare <2 x i8> @llvm.vector.extract.v2i8.v8i8(<8 x i8> %vec, i64 %idx)
884 declare <1 x i32> @llvm.vector.extract.v1i32.v8i32(<8 x i32> %vec, i64 %idx)
885 declare <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %vec, i64 %idx)
887 declare <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %vec, i64 %idx)
889 declare <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
890 declare <8 x i32> @llvm.vector.extract.v8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)