1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfh,+zvfh | FileCheck %s
3 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfh,+zvfh | FileCheck %s
7 define void @vector_interleave_store_v32i1_v16i1(<16 x i1> %a, <16 x i1> %b, ptr %p) {
8 ; CHECK-LABEL: vector_interleave_store_v32i1_v16i1:
10 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
11 ; CHECK-NEXT: vslideup.vi v0, v8, 2
12 ; CHECK-NEXT: li a1, 32
13 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
14 ; CHECK-NEXT: vmv.v.i v8, 0
15 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
16 ; CHECK-NEXT: vsetivli zero, 16, e8, m2, ta, ma
17 ; CHECK-NEXT: vslidedown.vi v10, v8, 16
18 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
19 ; CHECK-NEXT: vwaddu.vv v12, v8, v10
20 ; CHECK-NEXT: li a2, -1
21 ; CHECK-NEXT: vwmaccu.vx v12, a2, v10
22 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
23 ; CHECK-NEXT: vmsne.vi v8, v12, 0
24 ; CHECK-NEXT: vsm.v v8, (a0)
26 %res = call <32 x i1> @llvm.experimental.vector.interleave2.v32i1(<16 x i1> %a, <16 x i1> %b)
27 store <32 x i1> %res, ptr %p
31 ; Shouldn't be lowered to vsseg because it's unaligned
32 define void @vector_interleave_store_v16i16_v8i16_align1(<8 x i16> %a, <8 x i16> %b, ptr %p) {
33 ; CHECK-LABEL: vector_interleave_store_v16i16_v8i16_align1:
35 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
36 ; CHECK-NEXT: vwaddu.vv v10, v8, v9
37 ; CHECK-NEXT: li a1, -1
38 ; CHECK-NEXT: vwmaccu.vx v10, a1, v9
39 ; CHECK-NEXT: li a1, 32
40 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
41 ; CHECK-NEXT: vse8.v v10, (a0)
43 %res = call <16 x i16> @llvm.experimental.vector.interleave2.v16i16(<8 x i16> %a, <8 x i16> %b)
44 store <16 x i16> %res, ptr %p, align 1
48 define void @vector_interleave_store_v16i16_v8i16(<8 x i16> %a, <8 x i16> %b, ptr %p) {
49 ; CHECK-LABEL: vector_interleave_store_v16i16_v8i16:
51 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
52 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
54 %res = call <16 x i16> @llvm.experimental.vector.interleave2.v16i16(<8 x i16> %a, <8 x i16> %b)
55 store <16 x i16> %res, ptr %p
59 define void @vector_interleave_store_v8i32_v4i32(<4 x i32> %a, <4 x i32> %b, ptr %p) {
60 ; CHECK-LABEL: vector_interleave_store_v8i32_v4i32:
62 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
63 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
65 %res = call <8 x i32> @llvm.experimental.vector.interleave2.v8i32(<4 x i32> %a, <4 x i32> %b)
66 store <8 x i32> %res, ptr %p
70 define void @vector_interleave_store_v4i64_v2i64(<2 x i64> %a, <2 x i64> %b, ptr %p) {
71 ; CHECK-LABEL: vector_interleave_store_v4i64_v2i64:
73 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
74 ; CHECK-NEXT: vsseg2e64.v v8, (a0)
76 %res = call <4 x i64> @llvm.experimental.vector.interleave2.v4i64(<2 x i64> %a, <2 x i64> %b)
77 store <4 x i64> %res, ptr %p
81 declare <32 x i1> @llvm.experimental.vector.interleave2.v32i1(<16 x i1>, <16 x i1>)
82 declare <16 x i16> @llvm.experimental.vector.interleave2.v16i16(<8 x i16>, <8 x i16>)
83 declare <8 x i32> @llvm.experimental.vector.interleave2.v8i32(<4 x i32>, <4 x i32>)
84 declare <4 x i64> @llvm.experimental.vector.interleave2.v4i64(<2 x i64>, <2 x i64>)
88 define void @vector_interleave_store_v4f16_v2f16(<2 x half> %a, <2 x half> %b, ptr %p) {
89 ; CHECK-LABEL: vector_interleave_store_v4f16_v2f16:
91 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
92 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
94 %res = call <4 x half> @llvm.experimental.vector.interleave2.v4f16(<2 x half> %a, <2 x half> %b)
95 store <4 x half> %res, ptr %p
99 define void @vector_interleave_store_v8f16_v4f16(<4 x half> %a, <4 x half> %b, ptr %p) {
100 ; CHECK-LABEL: vector_interleave_store_v8f16_v4f16:
102 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
103 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
105 %res = call <8 x half> @llvm.experimental.vector.interleave2.v8f16(<4 x half> %a, <4 x half> %b)
106 store <8 x half> %res, ptr %p
110 define void @vector_interleave_store_v4f32_v2f32(<2 x float> %a, <2 x float> %b, ptr %p) {
111 ; CHECK-LABEL: vector_interleave_store_v4f32_v2f32:
113 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
114 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
116 %res = call <4 x float> @llvm.experimental.vector.interleave2.v4f32(<2 x float> %a, <2 x float> %b)
117 store <4 x float> %res, ptr %p
121 define void @vector_interleave_store_v16f16_v8f16(<8 x half> %a, <8 x half> %b, ptr %p) {
122 ; CHECK-LABEL: vector_interleave_store_v16f16_v8f16:
124 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
125 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
127 %res = call <16 x half> @llvm.experimental.vector.interleave2.v16f16(<8 x half> %a, <8 x half> %b)
128 store <16 x half> %res, ptr %p
132 define void @vector_interleave_store_v8f32_v4f32(<4 x float> %a, <4 x float> %b, ptr %p) {
133 ; CHECK-LABEL: vector_interleave_store_v8f32_v4f32:
135 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
136 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
138 %res = call <8 x float> @llvm.experimental.vector.interleave2.v8f32(<4 x float> %a, <4 x float> %b)
139 store <8 x float> %res, ptr %p
143 define void @vector_interleave_store_v4f64_v2f64(<2 x double> %a, <2 x double> %b, ptr %p) {
144 ; CHECK-LABEL: vector_interleave_store_v4f64_v2f64:
146 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
147 ; CHECK-NEXT: vsseg2e64.v v8, (a0)
149 %res = call <4 x double> @llvm.experimental.vector.interleave2.v4f64(<2 x double> %a, <2 x double> %b)
150 store <4 x double> %res, ptr %p
155 declare <4 x half> @llvm.experimental.vector.interleave2.v4f16(<2 x half>, <2 x half>)
156 declare <8 x half> @llvm.experimental.vector.interleave2.v8f16(<4 x half>, <4 x half>)
157 declare <4 x float> @llvm.experimental.vector.interleave2.v4f32(<2 x float>, <2 x float>)
158 declare <16 x half> @llvm.experimental.vector.interleave2.v16f16(<8 x half>, <8 x half>)
159 declare <8 x float> @llvm.experimental.vector.interleave2.v8f32(<4 x float>, <4 x float>)
160 declare <4 x double> @llvm.experimental.vector.interleave2.v4f64(<2 x double>, <2 x double>)