1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V
3 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -riscv-v-vector-bits-max=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-KNOWNVLEN128
5 define void @extract_v2i8_v4i8_0(ptr %x, ptr %y) {
6 ; CHECK-LABEL: extract_v2i8_v4i8_0:
8 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
9 ; CHECK-NEXT: vle8.v v8, (a0)
10 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
11 ; CHECK-NEXT: vse8.v v8, (a1)
13 %a = load <4 x i8>, ptr %x
14 %c = call <2 x i8> @llvm.vector.extract.v2i8.v4i8(<4 x i8> %a, i64 0)
15 store <2 x i8> %c, ptr %y
19 define void @extract_v2i8_v4i8_2(ptr %x, ptr %y) {
20 ; CHECK-LABEL: extract_v2i8_v4i8_2:
22 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
23 ; CHECK-NEXT: vle8.v v8, (a0)
24 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
25 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
26 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
27 ; CHECK-NEXT: vse8.v v8, (a1)
29 %a = load <4 x i8>, ptr %x
30 %c = call <2 x i8> @llvm.vector.extract.v2i8.v4i8(<4 x i8> %a, i64 2)
31 store <2 x i8> %c, ptr %y
35 define void @extract_v2i8_v8i8_0(ptr %x, ptr %y) {
36 ; CHECK-LABEL: extract_v2i8_v8i8_0:
38 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
39 ; CHECK-NEXT: vle8.v v8, (a0)
40 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
41 ; CHECK-NEXT: vse8.v v8, (a1)
43 %a = load <8 x i8>, ptr %x
44 %c = call <2 x i8> @llvm.vector.extract.v2i8.v8i8(<8 x i8> %a, i64 0)
45 store <2 x i8> %c, ptr %y
49 define void @extract_v2i8_v8i8_6(ptr %x, ptr %y) {
50 ; CHECK-LABEL: extract_v2i8_v8i8_6:
52 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
53 ; CHECK-NEXT: vle8.v v8, (a0)
54 ; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, ma
55 ; CHECK-NEXT: vslidedown.vi v8, v8, 6
56 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
57 ; CHECK-NEXT: vse8.v v8, (a1)
59 %a = load <8 x i8>, ptr %x
60 %c = call <2 x i8> @llvm.vector.extract.v2i8.v8i8(<8 x i8> %a, i64 6)
61 store <2 x i8> %c, ptr %y
65 define void @extract_v1i32_v8i32_4(ptr %x, ptr %y) {
66 ; CHECK-LABEL: extract_v1i32_v8i32_4:
68 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
69 ; CHECK-NEXT: vle32.v v8, (a0)
70 ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
71 ; CHECK-NEXT: vslidedown.vi v8, v8, 4
72 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
73 ; CHECK-NEXT: vse32.v v8, (a1)
75 %a = load <8 x i32>, ptr %x
76 %c = call <1 x i32> @llvm.vector.extract.v1i32.v8i32(<8 x i32> %a, i64 4)
77 store <1 x i32> %c, ptr %y
81 define void @extract_v1i32_v8i32_5(ptr %x, ptr %y) {
82 ; CHECK-LABEL: extract_v1i32_v8i32_5:
84 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
85 ; CHECK-NEXT: vle32.v v8, (a0)
86 ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
87 ; CHECK-NEXT: vslidedown.vi v8, v8, 5
88 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
89 ; CHECK-NEXT: vse32.v v8, (a1)
91 %a = load <8 x i32>, ptr %x
92 %c = call <1 x i32> @llvm.vector.extract.v1i32.v8i32(<8 x i32> %a, i64 5)
93 store <1 x i32> %c, ptr %y
97 define void @extract_v2i32_v8i32_0(ptr %x, ptr %y) {
98 ; CHECK-LABEL: extract_v2i32_v8i32_0:
100 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
101 ; CHECK-NEXT: vle32.v v8, (a0)
102 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
103 ; CHECK-NEXT: vse32.v v8, (a1)
105 %a = load <8 x i32>, ptr %x
106 %c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 0)
107 store <2 x i32> %c, ptr %y
111 define void @extract_v2i32_v8i32_2(ptr %x, ptr %y) {
112 ; CHECK-LABEL: extract_v2i32_v8i32_2:
114 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
115 ; CHECK-NEXT: vle32.v v8, (a0)
116 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
117 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
118 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
119 ; CHECK-NEXT: vse32.v v8, (a1)
121 %a = load <8 x i32>, ptr %x
122 %c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 2)
123 store <2 x i32> %c, ptr %y
127 define void @extract_v2i32_v8i32_4(ptr %x, ptr %y) {
128 ; CHECK-LABEL: extract_v2i32_v8i32_4:
130 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
131 ; CHECK-NEXT: vle32.v v8, (a0)
132 ; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma
133 ; CHECK-NEXT: vslidedown.vi v8, v8, 4
134 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
135 ; CHECK-NEXT: vse32.v v8, (a1)
137 %a = load <8 x i32>, ptr %x
138 %c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 4)
139 store <2 x i32> %c, ptr %y
143 define void @extract_v2i32_v8i32_6(ptr %x, ptr %y) {
144 ; CHECK-LABEL: extract_v2i32_v8i32_6:
146 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
147 ; CHECK-NEXT: vle32.v v8, (a0)
148 ; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma
149 ; CHECK-NEXT: vslidedown.vi v8, v8, 6
150 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
151 ; CHECK-NEXT: vse32.v v8, (a1)
153 %a = load <8 x i32>, ptr %x
154 %c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 6)
155 store <2 x i32> %c, ptr %y
159 define void @extract_v2i32_nxv16i32_0(<vscale x 16 x i32> %x, ptr %y) {
160 ; CHECK-LABEL: extract_v2i32_nxv16i32_0:
162 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
163 ; CHECK-NEXT: vse32.v v8, (a0)
165 %c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 0)
166 store <2 x i32> %c, ptr %y
171 define void @extract_v2i32_nxv16i32_2(<vscale x 16 x i32> %x, ptr %y) {
172 ; CHECK-LABEL: extract_v2i32_nxv16i32_2:
174 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
175 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
176 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
177 ; CHECK-NEXT: vse32.v v8, (a0)
179 %c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 2)
180 store <2 x i32> %c, ptr %y
184 define void @extract_v2i32_nxv16i32_4(<vscale x 16 x i32> %x, ptr %y) {
185 ; CHECK-LABEL: extract_v2i32_nxv16i32_4:
187 ; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma
188 ; CHECK-NEXT: vslidedown.vi v8, v8, 4
189 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
190 ; CHECK-NEXT: vse32.v v8, (a0)
192 %c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 4)
193 store <2 x i32> %c, ptr %y
197 define void @extract_v2i32_nxv16i32_6(<vscale x 16 x i32> %x, ptr %y) {
198 ; CHECK-LABEL: extract_v2i32_nxv16i32_6:
200 ; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma
201 ; CHECK-NEXT: vslidedown.vi v8, v8, 6
202 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
203 ; CHECK-NEXT: vse32.v v8, (a0)
205 %c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 6)
206 store <2 x i32> %c, ptr %y
210 define void @extract_v2i32_nxv16i32_8(<vscale x 16 x i32> %x, ptr %y) {
211 ; CHECK-LABEL: extract_v2i32_nxv16i32_8:
213 ; CHECK-NEXT: vsetivli zero, 2, e32, m4, ta, ma
214 ; CHECK-NEXT: vslidedown.vi v8, v8, 8
215 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
216 ; CHECK-NEXT: vse32.v v8, (a0)
218 %c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %x, i64 8)
219 store <2 x i32> %c, ptr %y
223 define void @extract_v2i8_nxv2i8_0(<vscale x 2 x i8> %x, ptr %y) {
224 ; CHECK-LABEL: extract_v2i8_nxv2i8_0:
226 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
227 ; CHECK-NEXT: vse8.v v8, (a0)
229 %c = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %x, i64 0)
230 store <2 x i8> %c, ptr %y
234 define void @extract_v2i8_nxv2i8_2(<vscale x 2 x i8> %x, ptr %y) {
235 ; CHECK-LABEL: extract_v2i8_nxv2i8_2:
237 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
238 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
239 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
240 ; CHECK-NEXT: vse8.v v8, (a0)
242 %c = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %x, i64 2)
243 store <2 x i8> %c, ptr %y
247 define void @extract_v2i8_nxv2i8_4(<vscale x 2 x i8> %x, ptr %y) {
248 ; CHECK-LABEL: extract_v2i8_nxv2i8_4:
250 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
251 ; CHECK-NEXT: vslidedown.vi v8, v8, 4
252 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
253 ; CHECK-NEXT: vse8.v v8, (a0)
255 %c = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %x, i64 4)
256 store <2 x i8> %c, ptr %y
260 define void @extract_v2i8_nxv2i8_6(<vscale x 2 x i8> %x, ptr %y) {
261 ; CHECK-LABEL: extract_v2i8_nxv2i8_6:
263 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
264 ; CHECK-NEXT: vslidedown.vi v8, v8, 6
265 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
266 ; CHECK-NEXT: vse8.v v8, (a0)
268 %c = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %x, i64 6)
269 store <2 x i8> %c, ptr %y
273 define void @extract_v8i32_nxv16i32_8(<vscale x 16 x i32> %x, ptr %y) {
274 ; CHECK-LABEL: extract_v8i32_nxv16i32_8:
276 ; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
277 ; CHECK-NEXT: vslidedown.vi v8, v8, 8
278 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
279 ; CHECK-NEXT: vse32.v v8, (a0)
281 %c = call <8 x i32> @llvm.vector.extract.v8i32.nxv16i32(<vscale x 16 x i32> %x, i64 8)
282 store <8 x i32> %c, ptr %y
286 define void @extract_v8i1_v64i1_0(ptr %x, ptr %y) {
287 ; CHECK-LABEL: extract_v8i1_v64i1_0:
289 ; CHECK-NEXT: li a2, 64
290 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
291 ; CHECK-NEXT: vlm.v v8, (a0)
292 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
293 ; CHECK-NEXT: vsm.v v8, (a1)
295 %a = load <64 x i1>, ptr %x
296 %c = call <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 0)
297 store <8 x i1> %c, ptr %y
301 define void @extract_v8i1_v64i1_8(ptr %x, ptr %y) {
302 ; CHECK-LABEL: extract_v8i1_v64i1_8:
304 ; CHECK-NEXT: li a2, 64
305 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
306 ; CHECK-NEXT: vlm.v v8, (a0)
307 ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
308 ; CHECK-NEXT: vslidedown.vi v8, v8, 1
309 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
310 ; CHECK-NEXT: vsm.v v8, (a1)
312 %a = load <64 x i1>, ptr %x
313 %c = call <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 8)
314 store <8 x i1> %c, ptr %y
318 define void @extract_v8i1_v64i1_48(ptr %x, ptr %y) {
319 ; CHECK-LABEL: extract_v8i1_v64i1_48:
321 ; CHECK-NEXT: li a2, 64
322 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
323 ; CHECK-NEXT: vlm.v v8, (a0)
324 ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
325 ; CHECK-NEXT: vslidedown.vi v8, v8, 6
326 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
327 ; CHECK-NEXT: vsm.v v8, (a1)
329 %a = load <64 x i1>, ptr %x
330 %c = call <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 48)
331 store <8 x i1> %c, ptr %y
335 define void @extract_v8i1_nxv2i1_0(<vscale x 2 x i1> %x, ptr %y) {
336 ; CHECK-LABEL: extract_v8i1_nxv2i1_0:
338 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
339 ; CHECK-NEXT: vsm.v v0, (a0)
341 %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv2i1(<vscale x 2 x i1> %x, i64 0)
342 store <8 x i1> %c, ptr %y
346 define void @extract_v8i1_nxv64i1_0(<vscale x 64 x i1> %x, ptr %y) {
347 ; CHECK-LABEL: extract_v8i1_nxv64i1_0:
349 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
350 ; CHECK-NEXT: vsm.v v0, (a0)
352 %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %x, i64 0)
353 store <8 x i1> %c, ptr %y
357 define void @extract_v8i1_nxv64i1_8(<vscale x 64 x i1> %x, ptr %y) {
358 ; CHECK-LABEL: extract_v8i1_nxv64i1_8:
360 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
361 ; CHECK-NEXT: vslidedown.vi v8, v0, 1
362 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
363 ; CHECK-NEXT: vsm.v v8, (a0)
365 %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %x, i64 8)
366 store <8 x i1> %c, ptr %y
370 define void @extract_v8i1_nxv64i1_48(<vscale x 64 x i1> %x, ptr %y) {
371 ; CHECK-LABEL: extract_v8i1_nxv64i1_48:
373 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
374 ; CHECK-NEXT: vslidedown.vi v8, v0, 6
375 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
376 ; CHECK-NEXT: vsm.v v8, (a0)
378 %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %x, i64 48)
379 store <8 x i1> %c, ptr %y
383 define void @extract_v8i1_nxv64i1_128(<vscale x 64 x i1> %x, ptr %y) {
384 ; CHECK-LABEL: extract_v8i1_nxv64i1_128:
386 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
387 ; CHECK-NEXT: vslidedown.vi v8, v0, 16
388 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
389 ; CHECK-NEXT: vsm.v v8, (a0)
391 %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %x, i64 128)
392 store <8 x i1> %c, ptr %y
396 define void @extract_v8i1_nxv64i1_192(<vscale x 64 x i1> %x, ptr %y) {
397 ; CHECK-LABEL: extract_v8i1_nxv64i1_192:
399 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
400 ; CHECK-NEXT: vslidedown.vi v8, v0, 24
401 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
402 ; CHECK-NEXT: vsm.v v8, (a0)
404 %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %x, i64 192)
405 store <8 x i1> %c, ptr %y
409 define void @extract_v2i1_v64i1_0(ptr %x, ptr %y) {
410 ; CHECK-LABEL: extract_v2i1_v64i1_0:
412 ; CHECK-NEXT: li a2, 64
413 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
414 ; CHECK-NEXT: vlm.v v0, (a0)
415 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
416 ; CHECK-NEXT: vmv.v.i v8, 0
417 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
418 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
419 ; CHECK-NEXT: vmv.v.i v9, 0
420 ; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
421 ; CHECK-NEXT: vmv.v.v v9, v8
422 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
423 ; CHECK-NEXT: vmsne.vi v8, v9, 0
424 ; CHECK-NEXT: vsm.v v8, (a1)
426 %a = load <64 x i1>, ptr %x
427 %c = call <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 0)
428 store <2 x i1> %c, ptr %y
432 define void @extract_v2i1_v64i1_2(ptr %x, ptr %y) {
433 ; CHECK-LABEL: extract_v2i1_v64i1_2:
435 ; CHECK-NEXT: li a2, 64
436 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
437 ; CHECK-NEXT: vlm.v v0, (a0)
438 ; CHECK-NEXT: vmv.v.i v8, 0
439 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
440 ; CHECK-NEXT: vsetivli zero, 2, e8, m1, ta, ma
441 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
442 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
443 ; CHECK-NEXT: vmsne.vi v0, v8, 0
444 ; CHECK-NEXT: vmv.v.i v8, 0
445 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
446 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
447 ; CHECK-NEXT: vmv.v.i v9, 0
448 ; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
449 ; CHECK-NEXT: vmv.v.v v9, v8
450 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
451 ; CHECK-NEXT: vmsne.vi v8, v9, 0
452 ; CHECK-NEXT: vsm.v v8, (a1)
454 %a = load <64 x i1>, ptr %x
455 %c = call <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 2)
456 store <2 x i1> %c, ptr %y
460 define void @extract_v2i1_v64i1_42(ptr %x, ptr %y) {
461 ; CHECK-LABEL: extract_v2i1_v64i1_42:
463 ; CHECK-NEXT: li a2, 64
464 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
465 ; CHECK-NEXT: vlm.v v0, (a0)
466 ; CHECK-NEXT: vmv.v.i v8, 0
467 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
468 ; CHECK-NEXT: li a0, 42
469 ; CHECK-NEXT: vsetivli zero, 2, e8, m4, ta, ma
470 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
471 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
472 ; CHECK-NEXT: vmsne.vi v0, v8, 0
473 ; CHECK-NEXT: vmv.v.i v8, 0
474 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
475 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
476 ; CHECK-NEXT: vmv.v.i v9, 0
477 ; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
478 ; CHECK-NEXT: vmv.v.v v9, v8
479 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
480 ; CHECK-NEXT: vmsne.vi v8, v9, 0
481 ; CHECK-NEXT: vsm.v v8, (a1)
483 %a = load <64 x i1>, ptr %x
484 %c = call <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 42)
485 store <2 x i1> %c, ptr %y
489 define void @extract_v2i1_nxv2i1_0(<vscale x 2 x i1> %x, ptr %y) {
490 ; CHECK-LABEL: extract_v2i1_nxv2i1_0:
492 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
493 ; CHECK-NEXT: vmv.v.i v8, 0
494 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
495 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
496 ; CHECK-NEXT: vmv.v.i v9, 0
497 ; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
498 ; CHECK-NEXT: vmv.v.v v9, v8
499 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
500 ; CHECK-NEXT: vmsne.vi v8, v9, 0
501 ; CHECK-NEXT: vsm.v v8, (a0)
503 %c = call <2 x i1> @llvm.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1> %x, i64 0)
504 store <2 x i1> %c, ptr %y
508 define void @extract_v2i1_nxv2i1_2(<vscale x 2 x i1> %x, ptr %y) {
509 ; CHECK-LABEL: extract_v2i1_nxv2i1_2:
511 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
512 ; CHECK-NEXT: vmv.v.i v8, 0
513 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
514 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
515 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
516 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
517 ; CHECK-NEXT: vmsne.vi v0, v8, 0
518 ; CHECK-NEXT: vmv.v.i v8, 0
519 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
520 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
521 ; CHECK-NEXT: vmv.v.i v9, 0
522 ; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
523 ; CHECK-NEXT: vmv.v.v v9, v8
524 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
525 ; CHECK-NEXT: vmsne.vi v8, v9, 0
526 ; CHECK-NEXT: vsm.v v8, (a0)
528 %c = call <2 x i1> @llvm.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1> %x, i64 2)
529 store <2 x i1> %c, ptr %y
533 define void @extract_v2i1_nxv64i1_0(<vscale x 64 x i1> %x, ptr %y) {
534 ; CHECK-LABEL: extract_v2i1_nxv64i1_0:
536 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
537 ; CHECK-NEXT: vmv.v.i v8, 0
538 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
539 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
540 ; CHECK-NEXT: vmv.v.i v9, 0
541 ; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
542 ; CHECK-NEXT: vmv.v.v v9, v8
543 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
544 ; CHECK-NEXT: vmsne.vi v8, v9, 0
545 ; CHECK-NEXT: vsm.v v8, (a0)
547 %c = call <2 x i1> @llvm.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %x, i64 0)
548 store <2 x i1> %c, ptr %y
552 define void @extract_v2i1_nxv64i1_2(<vscale x 64 x i1> %x, ptr %y) {
553 ; CHECK-LABEL: extract_v2i1_nxv64i1_2:
555 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
556 ; CHECK-NEXT: vmv.v.i v8, 0
557 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
558 ; CHECK-NEXT: vsetivli zero, 2, e8, m1, ta, ma
559 ; CHECK-NEXT: vslidedown.vi v8, v8, 2
560 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
561 ; CHECK-NEXT: vmsne.vi v0, v8, 0
562 ; CHECK-NEXT: vmv.v.i v8, 0
563 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
564 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
565 ; CHECK-NEXT: vmv.v.i v9, 0
566 ; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
567 ; CHECK-NEXT: vmv.v.v v9, v8
568 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
569 ; CHECK-NEXT: vmsne.vi v8, v9, 0
570 ; CHECK-NEXT: vsm.v v8, (a0)
572 %c = call <2 x i1> @llvm.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %x, i64 2)
573 store <2 x i1> %c, ptr %y
577 define void @extract_v2i1_nxv64i1_42(<vscale x 64 x i1> %x, ptr %y) {
578 ; CHECK-LABEL: extract_v2i1_nxv64i1_42:
580 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
581 ; CHECK-NEXT: vmv.v.i v8, 0
582 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
583 ; CHECK-NEXT: li a1, 42
584 ; CHECK-NEXT: vsetivli zero, 2, e8, m4, ta, ma
585 ; CHECK-NEXT: vslidedown.vx v8, v8, a1
586 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
587 ; CHECK-NEXT: vmsne.vi v0, v8, 0
588 ; CHECK-NEXT: vmv.v.i v8, 0
589 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
590 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
591 ; CHECK-NEXT: vmv.v.i v9, 0
592 ; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
593 ; CHECK-NEXT: vmv.v.v v9, v8
594 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
595 ; CHECK-NEXT: vmsne.vi v8, v9, 0
596 ; CHECK-NEXT: vsm.v v8, (a0)
598 %c = call <2 x i1> @llvm.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %x, i64 42)
599 store <2 x i1> %c, ptr %y
603 define void @extract_v2i1_nxv32i1_26(<vscale x 32 x i1> %x, ptr %y) {
604 ; CHECK-LABEL: extract_v2i1_nxv32i1_26:
606 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
607 ; CHECK-NEXT: vmv.v.i v8, 0
608 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
609 ; CHECK-NEXT: vsetivli zero, 2, e8, m2, ta, ma
610 ; CHECK-NEXT: vslidedown.vi v8, v8, 26
611 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
612 ; CHECK-NEXT: vmsne.vi v0, v8, 0
613 ; CHECK-NEXT: vmv.v.i v8, 0
614 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
615 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
616 ; CHECK-NEXT: vmv.v.i v9, 0
617 ; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
618 ; CHECK-NEXT: vmv.v.v v9, v8
619 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
620 ; CHECK-NEXT: vmsne.vi v8, v9, 0
621 ; CHECK-NEXT: vsm.v v8, (a0)
623 %c = call <2 x i1> @llvm.vector.extract.v2i1.nxv32i1(<vscale x 32 x i1> %x, i64 26)
624 store <2 x i1> %c, ptr %y
628 define void @extract_v8i1_nxv32i1_16(<vscale x 32 x i1> %x, ptr %y) {
629 ; CHECK-LABEL: extract_v8i1_nxv32i1_16:
631 ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
632 ; CHECK-NEXT: vslidedown.vi v8, v0, 2
633 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
634 ; CHECK-NEXT: vsm.v v8, (a0)
636 %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv32i1(<vscale x 32 x i1> %x, i64 16)
637 store <8 x i1> %c, ptr %y
641 declare <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %vec, i64 %idx)
642 declare <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %vec, i64 %idx)
644 declare <2 x i1> @llvm.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1> %vec, i64 %idx)
645 declare <8 x i1> @llvm.vector.extract.v8i1.nxv2i1(<vscale x 2 x i1> %vec, i64 %idx)
647 declare <2 x i1> @llvm.vector.extract.v2i1.nxv32i1(<vscale x 32 x i1> %vec, i64 %idx)
648 declare <8 x i1> @llvm.vector.extract.v8i1.nxv32i1(<vscale x 32 x i1> %vec, i64 %idx)
650 declare <2 x i1> @llvm.vector.extract.v2i1.nxv64i1(<vscale x 64 x i1> %vec, i64 %idx)
651 declare <8 x i1> @llvm.vector.extract.v8i1.nxv64i1(<vscale x 64 x i1> %vec, i64 %idx)
653 declare <2 x i8> @llvm.vector.extract.v2i8.v4i8(<4 x i8> %vec, i64 %idx)
654 declare <2 x i8> @llvm.vector.extract.v2i8.v8i8(<8 x i8> %vec, i64 %idx)
656 declare <1 x i32> @llvm.vector.extract.v1i32.v8i32(<8 x i32> %vec, i64 %idx)
657 declare <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %vec, i64 %idx)
659 declare <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> %vec, i64 %idx)
661 declare <2 x i32> @llvm.vector.extract.v2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
662 declare <8 x i32> @llvm.vector.extract.v8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
663 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
664 ; CHECK-KNOWNVLEN128: {{.*}}