1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
4 declare <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1>, <vscale x 2 x i16>, <vscale x 2 x i16>, i32)
5 declare <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32)
6 declare <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1>, <vscale x 2 x float>, <vscale x 2 x float>, i32)
7 declare <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, i32)
9 ; Test binary operator with vp.merge and vp.smax.
10 declare <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
11 define <vscale x 2 x i32> @vpmerge_vpadd(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
12 ; CHECK-LABEL: vpmerge_vpadd:
14 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
15 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
17 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
18 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
19 %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
20 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
21 ret <vscale x 2 x i32> %b
24 ; Test glued node of merge should not be deleted.
25 declare <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, metadata, <vscale x 2 x i1>, i32)
26 define <vscale x 2 x i32> @vpmerge_vpadd2(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
27 ; CHECK-LABEL: vpmerge_vpadd2:
29 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
30 ; CHECK-NEXT: vmseq.vv v0, v9, v10
31 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
32 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
34 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
35 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
36 %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
37 %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> %mask, i32 %vl)
38 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
39 ret <vscale x 2 x i32> %b
42 ; Test vp.merge has all-ones mask.
43 define <vscale x 2 x i32> @vpmerge_vpadd3(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
44 ; CHECK-LABEL: vpmerge_vpadd3:
46 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
47 ; CHECK-NEXT: vadd.vv v8, v9, v10
49 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
50 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
51 %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
52 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
53 ret <vscale x 2 x i32> %b
56 ; Test float binary operator with vp.merge and vp.fadd.
57 declare <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i32)
58 define <vscale x 2 x float> @vpmerge_vpfadd(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
59 ; CHECK-LABEL: vpmerge_vpfadd:
61 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
62 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
64 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
65 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
66 %a = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %mask, i32 %vl)
67 %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
68 ret <vscale x 2 x float> %b
71 ; Test for binary operator with specific EEW by riscv.vrgatherei16.
72 declare <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i16>, i64)
73 define <vscale x 2 x i32> @vpmerge_vrgatherei16(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i16> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
74 ; CHECK-LABEL: vpmerge_vrgatherei16:
76 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
77 ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10
79 %1 = zext i32 %vl to i64
80 %2 = tail call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> %x, <vscale x 2 x i16> %y, i64 %1)
81 %3 = tail call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %2, <vscale x 2 x i32> %passthru, i32 %vl)
82 ret <vscale x 2 x i32> %2
85 ; Test conversion by fptosi.
86 declare <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
87 define <vscale x 2 x i16> @vpmerge_vpfptosi(<vscale x 2 x i16> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
88 ; CHECK-LABEL: vpmerge_vpfptosi:
90 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
91 ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
93 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
94 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
95 %a = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %mask, i32 %vl)
96 %b = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %a, <vscale x 2 x i16> %passthru, i32 %vl)
97 ret <vscale x 2 x i16> %b
100 ; Test conversion by sitofp.
101 declare <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
102 define <vscale x 2 x float> @vpmerge_vpsitofp(<vscale x 2 x float> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
103 ; CHECK-LABEL: vpmerge_vpsitofp:
105 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
106 ; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t
108 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
109 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
110 %a = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> %mask, i32 %vl)
111 %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
112 ret <vscale x 2 x float> %b
115 ; Test integer extension by vp.zext.
116 declare <vscale x 2 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i32)
117 define <vscale x 2 x i32> @vpmerge_vpzext(<vscale x 2 x i32> %passthru, <vscale x 2 x i8> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
118 ; CHECK-LABEL: vpmerge_vpzext:
120 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
121 ; CHECK-NEXT: vzext.vf4 v8, v9, v0.t
123 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
124 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
125 %a = call <vscale x 2 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i1> %mask, i32 %vl)
126 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
127 ret <vscale x 2 x i32> %b
130 ; Test integer truncation by vp.trunc.
131 declare <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
132 define <vscale x 2 x i32> @vpmerge_vptrunc(<vscale x 2 x i32> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
133 ; CHECK-LABEL: vpmerge_vptrunc:
135 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
136 ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
138 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
139 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
140 %a = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> %mask, i32 %vl)
141 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
142 ret <vscale x 2 x i32> %b
145 ; Test integer extension by vp.fpext.
146 declare <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
147 define <vscale x 2 x double> @vpmerge_vpfpext(<vscale x 2 x double> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
148 ; CHECK-LABEL: vpmerge_vpfpext:
150 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
151 ; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t
153 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
154 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
155 %a = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %mask, i32 %vl)
156 %b = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %a, <vscale x 2 x double> %passthru, i32 %vl)
157 ret <vscale x 2 x double> %b
160 ; Test integer truncation by vp.trunc.
161 declare <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
162 define <vscale x 2 x float> @vpmerge_vpfptrunc(<vscale x 2 x float> %passthru, <vscale x 2 x double> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
163 ; CHECK-LABEL: vpmerge_vpfptrunc:
165 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
166 ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t
168 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
169 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
170 %a = call <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x i1> %mask, i32 %vl)
171 %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
172 ret <vscale x 2 x float> %b
175 ; Test load operation by vp.load.
176 declare <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> *, <vscale x 2 x i1>, i32)
177 define <vscale x 2 x i32> @vpmerge_vpload(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
178 ; CHECK-LABEL: vpmerge_vpload:
180 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
181 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
183 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
184 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
185 %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
186 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
187 ret <vscale x 2 x i32> %b
190 ; Test result has chain and glued node.
191 define <vscale x 2 x i32> @vpmerge_vpload2(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
192 ; CHECK-LABEL: vpmerge_vpload2:
194 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
195 ; CHECK-NEXT: vmseq.vv v0, v9, v10
196 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
197 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
199 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
200 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
201 %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
202 %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> %mask, i32 %vl)
203 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
204 ret <vscale x 2 x i32> %b
207 ; Test result has chain output of true operand of merge.vvm.
208 define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
209 ; CHECK-LABEL: vpmerge_vpload_store:
211 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
212 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
213 ; CHECK-NEXT: vs1r.v v8, (a0)
215 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
216 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
217 %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
218 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
219 store <vscale x 2 x i32> %b, <vscale x 2 x i32> * %p
223 ; FIXME: Merge vmerge.vvm and vleffN.v
224 declare { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, i64)
225 define <vscale x 2 x i32> @vpmerge_vleff(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
226 ; CHECK-LABEL: vpmerge_vleff:
228 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
229 ; CHECK-NEXT: vle32ff.v v9, (a0)
230 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
231 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
233 %1 = zext i32 %vl to i64
234 %a = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i64 %1)
235 %b = extractvalue { <vscale x 2 x i32>, i64 } %a, 0
236 %c = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %b, <vscale x 2 x i32> %passthru, i32 %vl)
237 ret <vscale x 2 x i32> %c
240 ; Test strided load by riscv.vlse
241 declare <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, i64, i64)
242 define <vscale x 2 x i32> @vpmerge_vlse(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
243 ; CHECK-LABEL: vpmerge_vlse:
245 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, tu, mu
246 ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t
248 %1 = zext i32 %vl to i64
249 %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i64 %s, i64 %1)
250 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
251 ret <vscale x 2 x i32> %b
254 ; Test indexed load by riscv.vluxei
255 declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(<vscale x 2 x i32>, <vscale x 2 x i32>*, <vscale x 2 x i64>, i64)
256 define <vscale x 2 x i32> @vpmerge_vluxei(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i64> %idx, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
257 ; CHECK-LABEL: vpmerge_vluxei:
259 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, tu, mu
260 ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t
262 %1 = zext i32 %vl to i64
263 %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, <vscale x 2 x i64> %idx, i64 %1)
264 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
265 ret <vscale x 2 x i32> %b
268 ; Test vector index by riscv.vid
269 declare <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(<vscale x 2 x i32>, i64)
270 define <vscale x 2 x i32> @vpmerge_vid(<vscale x 2 x i32> %passthru, <vscale x 2 x i1> %m, i32 zeroext %vl) {
271 ; CHECK-LABEL: vpmerge_vid:
273 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
274 ; CHECK-NEXT: vid.v v8, v0.t
276 %1 = zext i32 %vl to i64
277 %a = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(<vscale x 2 x i32> undef, i64 %1)
278 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
279 ret <vscale x 2 x i32> %b
283 declare <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i64)
284 define <vscale x 2 x i32> @vpmerge_viota(<vscale x 2 x i32> %passthru, <vscale x 2 x i1> %m, <vscale x 2 x i1> %vm, i32 zeroext %vl) {
285 ; CHECK-LABEL: vpmerge_viota:
287 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
288 ; CHECK-NEXT: viota.m v8, v9, v0.t
290 %1 = zext i32 %vl to i64
291 %a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i1> %vm, i64 %1)
292 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
293 ret <vscale x 2 x i32> %b
297 declare <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x float>, i64)
298 define <vscale x 2 x i32> @vpmerge_vflcass(<vscale x 2 x i32> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
299 ; CHECK-LABEL: vpmerge_vflcass:
301 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
302 ; CHECK-NEXT: vfclass.v v8, v9, v0.t
304 %1 = zext i32 %vl to i64
305 %a = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x float> %vf, i64 %1)
306 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
307 ret <vscale x 2 x i32> %b
311 declare <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, i64, i64)
312 define <vscale x 2 x float> @vpmerge_vfsqrt(<vscale x 2 x float> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
313 ; CHECK-LABEL: vpmerge_vfsqrt:
315 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
316 ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t
318 %1 = zext i32 %vl to i64
319 %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 7, i64 %1)
320 %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
321 ret <vscale x 2 x float> %b
324 ; Test reciprocal operation by riscv.vfrec7
325 declare <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, i64, i64)
326 define <vscale x 2 x float> @vpmerge_vfrec7(<vscale x 2 x float> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
327 ; CHECK-LABEL: vpmerge_vfrec7:
329 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
330 ; CHECK-NEXT: vfrec7.v v8, v9, v0.t
332 %1 = zext i32 %vl to i64
333 %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 7, i64 %1)
334 %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
335 ret <vscale x 2 x float> %b
338 ; Test vector operations with VLMAX vector length.
340 ; Test binary operator with vp.merge and add.
341 define <vscale x 2 x i32> @vpmerge_add(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
342 ; CHECK-LABEL: vpmerge_add:
344 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
345 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
347 %a = add <vscale x 2 x i32> %x, %y
348 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
349 ret <vscale x 2 x i32> %b
352 ; Test binary operator with vp.merge and fadd.
353 define <vscale x 2 x float> @vpmerge_fadd(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
354 ; CHECK-LABEL: vpmerge_fadd:
356 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
357 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
359 %a = fadd <vscale x 2 x float> %x, %y
360 %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
361 ret <vscale x 2 x float> %b
364 ; This shouldn't be folded because we need to preserve exceptions with
365 ; "fpexcept.strict" exception behaviour, and masking may hide them.
366 define <vscale x 2 x float> @vpmerge_constrained_fadd(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i64 %vl) strictfp {
367 ; CHECK-LABEL: vpmerge_constrained_fadd:
369 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
370 ; CHECK-NEXT: vfadd.vv v9, v9, v10
371 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
372 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
374 %a = call <vscale x 2 x float> @llvm.experimental.constrained.fadd(<vscale x 2 x float> %x, <vscale x 2 x float> %y, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
375 %b = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> %m, i64 %vl) strictfp
376 ret <vscale x 2 x float> %b
378 declare <vscale x 2 x float> @llvm.experimental.constrained.fadd(<vscale x 2 x float>, <vscale x 2 x float>, metadata, metadata)
379 declare <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i64)
381 ; This shouldn't be folded because we need to preserve exceptions with
382 ; "fpexcept.strict" exception behaviour, and masking may hide them.
383 define <vscale x 2 x float> @vpmerge_constrained_fadd_vlmax(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m) strictfp {
384 ; CHECK-LABEL: vpmerge_constrained_fadd_vlmax:
386 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
387 ; CHECK-NEXT: vfadd.vv v9, v9, v10
388 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma
389 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
391 %a = call <vscale x 2 x float> @llvm.experimental.constrained.fadd(<vscale x 2 x float> %x, <vscale x 2 x float> %y, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
392 %b = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> %m, i64 -1) strictfp
393 ret <vscale x 2 x float> %b
396 ; Test conversion by fptosi.
397 define <vscale x 2 x i16> @vpmerge_fptosi(<vscale x 2 x i16> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
398 ; CHECK-LABEL: vpmerge_fptosi:
400 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
401 ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
403 %a = fptosi <vscale x 2 x float> %x to <vscale x 2 x i16>
404 %b = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %a, <vscale x 2 x i16> %passthru, i32 %vl)
405 ret <vscale x 2 x i16> %b
408 ; Test conversion by sitofp.
409 define <vscale x 2 x float> @vpmerge_sitofp(<vscale x 2 x float> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
410 ; CHECK-LABEL: vpmerge_sitofp:
412 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
413 ; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t
415 %a = sitofp <vscale x 2 x i64> %x to <vscale x 2 x float>
416 %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
417 ret <vscale x 2 x float> %b
420 ; Test float extension by fpext.
421 define <vscale x 2 x double> @vpmerge_fpext(<vscale x 2 x double> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
422 ; CHECK-LABEL: vpmerge_fpext:
424 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
425 ; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t
427 %a = fpext <vscale x 2 x float> %x to <vscale x 2 x double>
428 %b = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %a, <vscale x 2 x double> %passthru, i32 %vl)
429 ret <vscale x 2 x double> %b
432 ; Test float truncation by fptrunc.
433 define <vscale x 2 x float> @vpmerge_fptrunc(<vscale x 2 x float> %passthru, <vscale x 2 x double> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
434 ; CHECK-LABEL: vpmerge_fptrunc:
436 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
437 ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t
439 %a = fptrunc <vscale x 2 x double> %x to <vscale x 2 x float>
440 %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
441 ret <vscale x 2 x float> %b
444 ; Test integer extension by zext.
445 define <vscale x 2 x i32> @vpmerge_zext(<vscale x 2 x i32> %passthru, <vscale x 2 x i8> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
446 ; CHECK-LABEL: vpmerge_zext:
448 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
449 ; CHECK-NEXT: vzext.vf4 v8, v9, v0.t
451 %a = zext <vscale x 2 x i8> %x to <vscale x 2 x i32>
452 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
453 ret <vscale x 2 x i32> %b
456 ; Test integer truncation by trunc.
457 define <vscale x 2 x i32> @vpmerge_trunc(<vscale x 2 x i32> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
458 ; CHECK-LABEL: vpmerge_trunc:
460 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
461 ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
463 %a = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
464 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
465 ret <vscale x 2 x i32> %b
468 declare <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1>, <vscale x 2 x i16>, <vscale x 2 x i16>, i32)
469 declare <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32)
470 declare <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1>, <vscale x 2 x float>, <vscale x 2 x float>, i32)
471 declare <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, i32)
473 ; Test binary operator with vp.select and vp.smax.
474 define <vscale x 2 x i32> @vpselect_vpadd(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
475 ; CHECK-LABEL: vpselect_vpadd:
477 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
478 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
480 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
481 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
482 %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
483 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
484 ret <vscale x 2 x i32> %b
487 ; Test glued node of select should not be deleted.
488 define <vscale x 2 x i32> @vpselect_vpadd2(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
489 ; CHECK-LABEL: vpselect_vpadd2:
491 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
492 ; CHECK-NEXT: vmseq.vv v0, v9, v10
493 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
495 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
496 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
497 %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
498 %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> %mask, i32 %vl)
499 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
500 ret <vscale x 2 x i32> %b
503 ; Test vp.select has all-ones mask.
504 define <vscale x 2 x i32> @vpselect_vpadd3(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
505 ; CHECK-LABEL: vpselect_vpadd3:
507 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
508 ; CHECK-NEXT: vadd.vv v8, v9, v10
510 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
511 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
512 %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
513 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
514 ret <vscale x 2 x i32> %b
517 ; Test float binary operator with vp.select and vp.fadd.
518 define <vscale x 2 x float> @vpselect_vpfadd(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
519 ; CHECK-LABEL: vpselect_vpfadd:
521 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
522 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
524 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
525 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
526 %a = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %mask, i32 %vl)
527 %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
528 ret <vscale x 2 x float> %b
531 ; Test for binary operator with specific EEW by riscv.vrgatherei16.
532 define <vscale x 2 x i32> @vpselect_vrgatherei16(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i16> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
533 ; CHECK-LABEL: vpselect_vrgatherei16:
535 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
536 ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10
538 %1 = zext i32 %vl to i64
539 %2 = tail call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> %x, <vscale x 2 x i16> %y, i64 %1)
540 %3 = tail call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %2, <vscale x 2 x i32> %passthru, i32 %vl)
541 ret <vscale x 2 x i32> %2
544 ; Test conversion by fptosi.
545 define <vscale x 2 x i16> @vpselect_vpfptosi(<vscale x 2 x i16> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
546 ; CHECK-LABEL: vpselect_vpfptosi:
548 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
549 ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
551 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
552 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
553 %a = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %mask, i32 %vl)
554 %b = call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %a, <vscale x 2 x i16> %passthru, i32 %vl)
555 ret <vscale x 2 x i16> %b
558 ; Test conversion by sitofp.
559 define <vscale x 2 x float> @vpselect_vpsitofp(<vscale x 2 x float> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
560 ; CHECK-LABEL: vpselect_vpsitofp:
562 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
563 ; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t
565 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
566 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
567 %a = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> %mask, i32 %vl)
568 %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
569 ret <vscale x 2 x float> %b
572 ; Test integer extension by vp.zext.
573 define <vscale x 2 x i32> @vpselect_vpzext(<vscale x 2 x i32> %passthru, <vscale x 2 x i8> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
574 ; CHECK-LABEL: vpselect_vpzext:
576 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
577 ; CHECK-NEXT: vzext.vf4 v8, v9, v0.t
579 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
580 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
581 %a = call <vscale x 2 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i1> %mask, i32 %vl)
582 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
583 ret <vscale x 2 x i32> %b
586 ; Test integer truncation by vp.trunc.
587 define <vscale x 2 x i32> @vpselect_vptrunc(<vscale x 2 x i32> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
588 ; CHECK-LABEL: vpselect_vptrunc:
590 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
591 ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
593 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
594 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
595 %a = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> %mask, i32 %vl)
596 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
597 ret <vscale x 2 x i32> %b
600 ; Test integer extension by vp.fpext.
601 define <vscale x 2 x double> @vpselect_vpfpext(<vscale x 2 x double> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
602 ; CHECK-LABEL: vpselect_vpfpext:
604 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
605 ; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t
607 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
608 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
609 %a = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %mask, i32 %vl)
610 %b = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %a, <vscale x 2 x double> %passthru, i32 %vl)
611 ret <vscale x 2 x double> %b
614 ; Test integer truncation by vp.trunc.
615 define <vscale x 2 x float> @vpselect_vpfptrunc(<vscale x 2 x float> %passthru, <vscale x 2 x double> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
616 ; CHECK-LABEL: vpselect_vpfptrunc:
618 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
619 ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t
621 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
622 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
623 %a = call <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x i1> %mask, i32 %vl)
624 %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
625 ret <vscale x 2 x float> %b
628 ; Test load operation by vp.load.
629 define <vscale x 2 x i32> @vpselect_vpload(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
630 ; CHECK-LABEL: vpselect_vpload:
632 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
633 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
635 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
636 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
637 %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
638 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
639 ret <vscale x 2 x i32> %b
642 ; Test result has chain and glued node.
643 define <vscale x 2 x i32> @vpselect_vpload2(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
644 ; CHECK-LABEL: vpselect_vpload2:
646 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
647 ; CHECK-NEXT: vmseq.vv v0, v9, v10
648 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
650 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
651 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
652 %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
653 %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> %mask, i32 %vl)
654 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
655 ret <vscale x 2 x i32> %b
658 ; Test result has chain output of true operand of select.vvm.
659 define void @vpselect_vpload_store(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
660 ; CHECK-LABEL: vpselect_vpload_store:
662 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
663 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
664 ; CHECK-NEXT: vs1r.v v8, (a0)
666 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
667 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
668 %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
669 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
670 store <vscale x 2 x i32> %b, <vscale x 2 x i32> * %p
674 ; FIXME: select vselect.vvm and vleffN.v
675 define <vscale x 2 x i32> @vpselect_vleff(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
676 ; CHECK-LABEL: vpselect_vleff:
678 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
679 ; CHECK-NEXT: vle32ff.v v9, (a0)
680 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
681 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
683 %1 = zext i32 %vl to i64
684 %a = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i64 %1)
685 %b = extractvalue { <vscale x 2 x i32>, i64 } %a, 0
686 %c = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %b, <vscale x 2 x i32> %passthru, i32 %vl)
687 ret <vscale x 2 x i32> %c
690 ; Test strided load by riscv.vlse
691 define <vscale x 2 x i32> @vpselect_vlse(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
692 ; CHECK-LABEL: vpselect_vlse:
694 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
695 ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t
697 %1 = zext i32 %vl to i64
698 %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i64 %s, i64 %1)
699 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
700 ret <vscale x 2 x i32> %b
703 ; Test indexed load by riscv.vluxei
704 define <vscale x 2 x i32> @vpselect_vluxei(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i64> %idx, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
705 ; CHECK-LABEL: vpselect_vluxei:
707 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
708 ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t
710 %1 = zext i32 %vl to i64
711 %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, <vscale x 2 x i64> %idx, i64 %1)
712 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
713 ret <vscale x 2 x i32> %b
716 ; Test vector index by riscv.vid
717 define <vscale x 2 x i32> @vpselect_vid(<vscale x 2 x i32> %passthru, <vscale x 2 x i1> %m, i32 zeroext %vl) {
718 ; CHECK-LABEL: vpselect_vid:
720 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
721 ; CHECK-NEXT: vid.v v8, v0.t
723 %1 = zext i32 %vl to i64
724 %a = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(<vscale x 2 x i32> undef, i64 %1)
725 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
726 ret <vscale x 2 x i32> %b
730 define <vscale x 2 x i32> @vpselect_viota(<vscale x 2 x i32> %passthru, <vscale x 2 x i1> %m, <vscale x 2 x i1> %vm, i32 zeroext %vl) {
731 ; CHECK-LABEL: vpselect_viota:
733 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
734 ; CHECK-NEXT: viota.m v8, v9, v0.t
736 %1 = zext i32 %vl to i64
737 %a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i1> %vm, i64 %1)
738 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
739 ret <vscale x 2 x i32> %b
743 define <vscale x 2 x i32> @vpselect_vflcass(<vscale x 2 x i32> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
744 ; CHECK-LABEL: vpselect_vflcass:
746 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
747 ; CHECK-NEXT: vfclass.v v8, v9, v0.t
749 %1 = zext i32 %vl to i64
750 %a = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x float> %vf, i64 %1)
751 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
752 ret <vscale x 2 x i32> %b
756 define <vscale x 2 x float> @vpselect_vfsqrt(<vscale x 2 x float> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
757 ; CHECK-LABEL: vpselect_vfsqrt:
759 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
760 ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t
762 %1 = zext i32 %vl to i64
763 %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 7, i64 %1)
764 %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
765 ret <vscale x 2 x float> %b
768 ; Test reciprocal operation by riscv.vfrec7
769 define <vscale x 2 x float> @vpselect_vfrec7(<vscale x 2 x float> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
770 ; CHECK-LABEL: vpselect_vfrec7:
772 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
773 ; CHECK-NEXT: vfrec7.v v8, v9, v0.t
775 %1 = zext i32 %vl to i64
776 %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 7, i64 %1)
777 %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
778 ret <vscale x 2 x float> %b
782 declare <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i64, i64, i64)
783 define <vscale x 2 x i32> @vpselect_vslideup(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i64 %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
784 ; CHECK-LABEL: vpselect_vslideup:
786 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
787 ; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
789 %1 = zext i32 %vl to i64
790 %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> %v, i64 %x, i64 %1, i64 0)
791 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
792 ret <vscale x 2 x i32> %b
795 declare <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i64, i64, i64)
796 define <vscale x 2 x i32> @vpselect_vslidedown(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i64 %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
797 ; CHECK-LABEL: vpselect_vslidedown:
799 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
800 ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
802 %1 = zext i32 %vl to i64
803 %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> %v, i64 %x, i64 %1, i64 0)
804 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
805 ret <vscale x 2 x i32> %b
808 declare <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i32, i64)
809 define <vscale x 2 x i32> @vpselect_vslide1up(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i32 %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
810 ; CHECK-LABEL: vpselect_vslide1up:
812 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
813 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
815 %1 = zext i32 %vl to i64
816 %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> %v, i32 %x, i64 %1)
817 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
818 ret <vscale x 2 x i32> %b
821 declare <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i32, i64)
822 define <vscale x 2 x i32> @vpselect_vslide1down(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i32 %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
823 ; CHECK-LABEL: vpselect_vslide1down:
825 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
826 ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t
828 %1 = zext i32 %vl to i64
829 %a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> %v, i32 %x, i64 %1)
830 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
831 ret <vscale x 2 x i32> %b
834 ; Test vector operations with VLMAX vector length.
836 ; Test binary operator with vp.select and add.
837 define <vscale x 2 x i32> @vpselect_add(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
838 ; CHECK-LABEL: vpselect_add:
840 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
841 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
843 %a = add <vscale x 2 x i32> %x, %y
844 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
845 ret <vscale x 2 x i32> %b
848 ; Test binary operator with vp.select and fadd.
849 define <vscale x 2 x float> @vpselect_fadd(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
850 ; CHECK-LABEL: vpselect_fadd:
852 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
853 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
855 %a = fadd <vscale x 2 x float> %x, %y
856 %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
857 ret <vscale x 2 x float> %b
860 ; Test conversion by fptosi.
861 define <vscale x 2 x i16> @vpselect_fptosi(<vscale x 2 x i16> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
862 ; CHECK-LABEL: vpselect_fptosi:
864 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
865 ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
867 %a = fptosi <vscale x 2 x float> %x to <vscale x 2 x i16>
868 %b = call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %a, <vscale x 2 x i16> %passthru, i32 %vl)
869 ret <vscale x 2 x i16> %b
872 ; Test conversion by sitofp.
873 define <vscale x 2 x float> @vpselect_sitofp(<vscale x 2 x float> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
874 ; CHECK-LABEL: vpselect_sitofp:
876 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
877 ; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t
879 %a = sitofp <vscale x 2 x i64> %x to <vscale x 2 x float>
880 %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
881 ret <vscale x 2 x float> %b
884 ; Test float extension by fpext.
885 define <vscale x 2 x double> @vpselect_fpext(<vscale x 2 x double> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
886 ; CHECK-LABEL: vpselect_fpext:
888 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
889 ; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t
891 %a = fpext <vscale x 2 x float> %x to <vscale x 2 x double>
892 %b = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %a, <vscale x 2 x double> %passthru, i32 %vl)
893 ret <vscale x 2 x double> %b
896 ; Test float truncation by fptrunc.
897 define <vscale x 2 x float> @vpselect_fptrunc(<vscale x 2 x float> %passthru, <vscale x 2 x double> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
898 ; CHECK-LABEL: vpselect_fptrunc:
900 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
901 ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t
903 %a = fptrunc <vscale x 2 x double> %x to <vscale x 2 x float>
904 %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
905 ret <vscale x 2 x float> %b
908 ; Test integer extension by zext.
909 define <vscale x 2 x i32> @vpselect_zext(<vscale x 2 x i32> %passthru, <vscale x 2 x i8> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
910 ; CHECK-LABEL: vpselect_zext:
912 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
913 ; CHECK-NEXT: vzext.vf4 v8, v9, v0.t
915 %a = zext <vscale x 2 x i8> %x to <vscale x 2 x i32>
916 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
917 ret <vscale x 2 x i32> %b
920 ; Test integer truncation by trunc.
921 define <vscale x 2 x i32> @vpselect_trunc(<vscale x 2 x i32> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
922 ; CHECK-LABEL: vpselect_trunc:
924 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
925 ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
927 %a = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
928 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
929 ret <vscale x 2 x i32> %b
932 ; Folding this would create a loop in the DAG becuase the chain from the VLE is
933 ; used by the vssubu.
934 define void @test_dag_loop() {
935 ; CHECK-LABEL: test_dag_loop:
936 ; CHECK: # %bb.0: # %entry
937 ; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma
938 ; CHECK-NEXT: vle16.v v8, (zero)
939 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
940 ; CHECK-NEXT: vmclr.m v0
941 ; CHECK-NEXT: vmv.v.i v16, 0
942 ; CHECK-NEXT: vsetivli zero, 0, e8, m4, tu, mu
943 ; CHECK-NEXT: vmv4r.v v20, v16
944 ; CHECK-NEXT: vssubu.vx v20, v16, zero, v0.t
945 ; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma
946 ; CHECK-NEXT: vmseq.vv v0, v20, v16
947 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
948 ; CHECK-NEXT: vmv.v.i v16, 0
949 ; CHECK-NEXT: vsetivli zero, 1, e16, m8, tu, ma
950 ; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0
951 ; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, ma
952 ; CHECK-NEXT: vse16.v v16, (zero)
955 %0 = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16>* null, i64 1)
956 %1 = tail call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> zeroinitializer, <vscale x 32 x i8> zeroinitializer, i8 0, <vscale x 32 x i1> zeroinitializer, i64 0, i64 0)
957 %2 = tail call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> %1, <vscale x 32 x i8> zeroinitializer, i64 0)
958 %3 = tail call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> zeroinitializer, <vscale x 32 x i16> zeroinitializer, <vscale x 32 x i16> %0, <vscale x 32 x i1> %2, i64 1)
959 call void @llvm.riscv.vse.nxv32i16.i64(<vscale x 32 x i16> %3, <vscale x 32 x i16>* null, i64 0)
963 define <vscale x 1 x i16> @test_vaaddu(<vscale x 1 x i16> %var_11, i16 zeroext %var_9, <vscale x 1 x i1> %var_5, <vscale x 1 x i16> %var_0) {
964 ; CHECK-LABEL: test_vaaddu:
965 ; CHECK: # %bb.0: # %entry
966 ; CHECK-NEXT: vsetivli zero, 3, e16, mf4, ta, mu
967 ; CHECK-NEXT: csrwi vxrm, 0
968 ; CHECK-NEXT: vaaddu.vx v9, v8, a0, v0.t
969 ; CHECK-NEXT: vmv1r.v v8, v9
972 %0 = tail call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> %var_11, i16 %var_9, i64 0, i64 3)
973 %1 = tail call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> %var_0, <vscale x 1 x i16> %0, <vscale x 1 x i1> %var_5, i64 3)
974 ret <vscale x 1 x i16> %1
977 declare <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16.i64(<vscale x 32 x i16>, <vscale x 32 x i16>* nocapture, i64)
978 declare <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, i8, <vscale x 32 x i1>, i64, i64 immarg)
979 declare <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, i64)
980 declare <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i64)
981 declare void @llvm.riscv.vse.nxv32i16.i64(<vscale x 32 x i16>, <vscale x 32 x i16>* nocapture, i64)
982 declare <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i64(<vscale x 1 x i16>, <vscale x 1 x i16>, i16, i64 immarg, i64)
983 declare <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
985 ; Tests for folding vmerge into its ops when their VLs differ
987 declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, i64)
988 declare <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
991 define <vscale x 2 x i32> @vmerge_smaller_vl_same_passthru(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) {
992 ; CHECK-LABEL: vmerge_smaller_vl_same_passthru:
994 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu
995 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
997 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i64 4)
998 %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 2)
999 ret <vscale x 2 x i32> %b
1002 ; Can fold with VL=2
1003 define <vscale x 2 x i32> @vmerge_larger_vl_same_passthru(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) {
1004 ; CHECK-LABEL: vmerge_larger_vl_same_passthru:
1006 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu
1007 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
1009 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i64 2)
1010 %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 3)
1011 ret <vscale x 2 x i32> %b
1014 ; Can fold with VL=2
1015 define <vscale x 2 x i32> @vmerge_smaller_vl_different_passthru(<vscale x 2 x i32> %pt1, <vscale x 2 x i32> %pt2, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) {
1016 ; CHECK-LABEL: vmerge_smaller_vl_different_passthru:
1018 ; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma
1019 ; CHECK-NEXT: vadd.vv v8, v10, v11
1020 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
1021 ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0
1022 ; CHECK-NEXT: vmv1r.v v8, v9
1024 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(<vscale x 2 x i32> %pt1, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i64 3)
1025 %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %pt2, <vscale x 2 x i32> %pt2, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 2)
1026 ret <vscale x 2 x i32> %b
1029 ; Can't fold this because we need to take elements from both %pt1 and %pt2
1030 define <vscale x 2 x i32> @vmerge_larger_vl_different_passthru(<vscale x 2 x i32> %pt1, <vscale x 2 x i32> %pt2, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) {
1031 ; CHECK-LABEL: vmerge_larger_vl_different_passthru:
1033 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
1034 ; CHECK-NEXT: vadd.vv v8, v10, v11
1035 ; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma
1036 ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0
1037 ; CHECK-NEXT: vmv1r.v v8, v9
1039 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(<vscale x 2 x i32> %pt1, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i64 2)
1040 %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %pt2, <vscale x 2 x i32> %pt2, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 3)
1041 ret <vscale x 2 x i32> %b
1044 ; Can fold with VL=2
1045 define <vscale x 2 x i32> @vmerge_smaller_vl_poison_passthru(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) {
1046 ; CHECK-LABEL: vmerge_smaller_vl_poison_passthru:
1048 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu
1049 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
1051 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i64 3)
1052 %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 2)
1053 ret <vscale x 2 x i32> %b
1056 ; Can fold with VL=2
1057 define <vscale x 2 x i32> @vmerge_larger_vl_poison_passthru(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) {
1058 ; CHECK-LABEL: vmerge_larger_vl_poison_passthru:
1060 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu
1061 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
1063 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i64 2)
1064 %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 3)
1065 ret <vscale x 2 x i32> %b
1068 ; The vadd's new policy should be tail undisturbed since the false op of the
1069 ; vmerge moves from the the body to the tail, and we need to preserve it.
1070 define <vscale x 2 x i32> @vmerge_larger_vl_false_becomes_tail(<vscale x 2 x i32> %false, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) {
1071 ; CHECK-LABEL: vmerge_larger_vl_false_becomes_tail:
1073 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu
1074 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
1076 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i64 2)
1077 %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %false, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 3)
1078 ret <vscale x 2 x i32> %b