1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
4 declare <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1>, <vscale x 2 x i16>, <vscale x 2 x i16>, i32)
5 declare <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32)
6 declare <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1>, <vscale x 2 x float>, <vscale x 2 x float>, i32)
7 declare <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, i32)
9 ; Test binary operator with vp.merge and vp.smax.
10 declare <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
11 define <vscale x 2 x i32> @vpmerge_vpadd(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
12 ; CHECK-LABEL: vpmerge_vpadd:
14 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
15 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
17 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
18 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
19 %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
20 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
21 ret <vscale x 2 x i32> %b
24 ; Test glued node of merge should not be deleted.
25 declare <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, metadata, <vscale x 2 x i1>, i32)
26 define <vscale x 2 x i32> @vpmerge_vpadd2(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
27 ; CHECK-LABEL: vpmerge_vpadd2:
29 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
30 ; CHECK-NEXT: vmseq.vv v0, v9, v10
31 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
32 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
34 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
35 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
36 %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
37 %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> %mask, i32 %vl)
38 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
39 ret <vscale x 2 x i32> %b
42 ; Test vp.merge has all-ones mask.
43 define <vscale x 2 x i32> @vpmerge_vpadd3(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
44 ; CHECK-LABEL: vpmerge_vpadd3:
46 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
47 ; CHECK-NEXT: vadd.vv v8, v9, v10
49 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
50 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
51 %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
52 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
53 ret <vscale x 2 x i32> %b
56 ; Test float binary operator with vp.merge and vp.fadd.
57 declare <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i32)
58 define <vscale x 2 x float> @vpmerge_vpfadd(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
59 ; CHECK-LABEL: vpmerge_vpfadd:
61 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
62 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
64 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
65 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
66 %a = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %mask, i32 %vl)
67 %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
68 ret <vscale x 2 x float> %b
71 ; Test for binary operator with specific EEW by riscv.vrgatherei16.
72 declare <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i16>, i64)
73 define <vscale x 2 x i32> @vpmerge_vrgatherei16(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i16> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
74 ; CHECK-LABEL: vpmerge_vrgatherei16:
76 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
77 ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10
79 %1 = zext i32 %vl to i64
80 %2 = tail call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> %x, <vscale x 2 x i16> %y, i64 %1)
81 %3 = tail call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %2, <vscale x 2 x i32> %passthru, i32 %vl)
82 ret <vscale x 2 x i32> %2
85 ; Test conversion by fptosi.
86 declare <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
87 define <vscale x 2 x i16> @vpmerge_vpfptosi(<vscale x 2 x i16> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
88 ; CHECK-LABEL: vpmerge_vpfptosi:
90 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
91 ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
93 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
94 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
95 %a = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %mask, i32 %vl)
96 %b = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %a, <vscale x 2 x i16> %passthru, i32 %vl)
97 ret <vscale x 2 x i16> %b
100 ; Test conversion by sitofp.
101 declare <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
102 define <vscale x 2 x float> @vpmerge_vpsitofp(<vscale x 2 x float> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
103 ; CHECK-LABEL: vpmerge_vpsitofp:
105 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
106 ; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t
108 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
109 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
110 %a = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> %mask, i32 %vl)
111 %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
112 ret <vscale x 2 x float> %b
115 ; Test integer extension by vp.zext.
116 declare <vscale x 2 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i32)
117 define <vscale x 2 x i32> @vpmerge_vpzext(<vscale x 2 x i32> %passthru, <vscale x 2 x i8> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
118 ; CHECK-LABEL: vpmerge_vpzext:
120 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
121 ; CHECK-NEXT: vzext.vf4 v8, v9, v0.t
123 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
124 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
125 %a = call <vscale x 2 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i1> %mask, i32 %vl)
126 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
127 ret <vscale x 2 x i32> %b
130 ; Test integer truncation by vp.trunc.
131 declare <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
132 define <vscale x 2 x i32> @vpmerge_vptrunc(<vscale x 2 x i32> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
133 ; CHECK-LABEL: vpmerge_vptrunc:
135 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
136 ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
138 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
139 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
140 %a = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> %mask, i32 %vl)
141 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
142 ret <vscale x 2 x i32> %b
145 ; Test integer extension by vp.fpext.
146 declare <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
147 define <vscale x 2 x double> @vpmerge_vpfpext(<vscale x 2 x double> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
148 ; CHECK-LABEL: vpmerge_vpfpext:
150 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
151 ; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t
153 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
154 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
155 %a = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %mask, i32 %vl)
156 %b = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %a, <vscale x 2 x double> %passthru, i32 %vl)
157 ret <vscale x 2 x double> %b
160 ; Test integer truncation by vp.trunc.
161 declare <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
162 define <vscale x 2 x float> @vpmerge_vpfptrunc(<vscale x 2 x float> %passthru, <vscale x 2 x double> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
163 ; CHECK-LABEL: vpmerge_vpfptrunc:
165 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
166 ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t
168 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
169 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
170 %a = call <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x i1> %mask, i32 %vl)
171 %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
172 ret <vscale x 2 x float> %b
175 ; Test load operation by vp.load.
176 declare <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> *, <vscale x 2 x i1>, i32)
177 define <vscale x 2 x i32> @vpmerge_vpload(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
178 ; CHECK-LABEL: vpmerge_vpload:
180 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
181 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
183 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
184 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
185 %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
186 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
187 ret <vscale x 2 x i32> %b
190 ; Test result has chain and glued node.
191 define <vscale x 2 x i32> @vpmerge_vpload2(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
192 ; CHECK-LABEL: vpmerge_vpload2:
194 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
195 ; CHECK-NEXT: vmseq.vv v0, v9, v10
196 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
197 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
199 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
200 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
201 %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
202 %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> %mask, i32 %vl)
203 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
204 ret <vscale x 2 x i32> %b
207 ; Test result has chain output of true operand of merge.vvm.
208 define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
209 ; CHECK-LABEL: vpmerge_vpload_store:
211 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
212 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
213 ; CHECK-NEXT: vs1r.v v8, (a0)
215 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
216 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
217 %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
218 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
219 store <vscale x 2 x i32> %b, <vscale x 2 x i32> * %p
223 ; FIXME: Merge vmerge.vvm and vleffN.v
224 declare { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, i64)
225 define <vscale x 2 x i32> @vpmerge_vleff(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
226 ; CHECK-LABEL: vpmerge_vleff:
228 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
229 ; CHECK-NEXT: vle32ff.v v9, (a0)
230 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
231 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
233 %1 = zext i32 %vl to i64
234 %a = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i64 %1)
235 %b = extractvalue { <vscale x 2 x i32>, i64 } %a, 0
236 %c = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %b, <vscale x 2 x i32> %passthru, i32 %vl)
237 ret <vscale x 2 x i32> %c
240 ; Test strided load by riscv.vlse
241 declare <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, i64, i64)
242 define <vscale x 2 x i32> @vpmerge_vlse(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
243 ; CHECK-LABEL: vpmerge_vlse:
245 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, tu, mu
246 ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t
248 %1 = zext i32 %vl to i64
249 %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i64 %s, i64 %1)
250 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
251 ret <vscale x 2 x i32> %b
254 ; Test indexed load by riscv.vluxei
255 declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(<vscale x 2 x i32>, <vscale x 2 x i32>*, <vscale x 2 x i64>, i64)
256 define <vscale x 2 x i32> @vpmerge_vluxei(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i64> %idx, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
257 ; CHECK-LABEL: vpmerge_vluxei:
259 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, tu, mu
260 ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t
262 %1 = zext i32 %vl to i64
263 %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, <vscale x 2 x i64> %idx, i64 %1)
264 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
265 ret <vscale x 2 x i32> %b
268 ; Test vector index by riscv.vid
269 declare <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(<vscale x 2 x i32>, i64)
270 define <vscale x 2 x i32> @vpmerge_vid(<vscale x 2 x i32> %passthru, <vscale x 2 x i1> %m, i32 zeroext %vl) {
271 ; CHECK-LABEL: vpmerge_vid:
273 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
274 ; CHECK-NEXT: vid.v v8, v0.t
276 %1 = zext i32 %vl to i64
277 %a = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(<vscale x 2 x i32> undef, i64 %1)
278 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
279 ret <vscale x 2 x i32> %b
282 ; Test not combine VIOTA_M and VMERGE_VVM without true mask.
283 declare <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i64)
284 define <vscale x 2 x i32> @vpmerge_viota(<vscale x 2 x i32> %passthru, <vscale x 2 x i1> %m, <vscale x 2 x i1> %vm, i32 zeroext %vl) {
285 ; CHECK-LABEL: vpmerge_viota:
287 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
288 ; CHECK-NEXT: viota.m v10, v9
289 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma
290 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
292 %1 = zext i32 %vl to i64
293 %a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i1> %vm, i64 %1)
294 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
295 ret <vscale x 2 x i32> %b
298 ; Test combine VIOTA_M and VMERGE_VVM with true mask.
299 define <vscale x 2 x i32> @vpmerge_viota2(<vscale x 2 x i32> %passthru, <vscale x 2 x i1> %vm, i32 zeroext %vl) {
300 ; CHECK-LABEL: vpmerge_viota2:
302 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
303 ; CHECK-NEXT: viota.m v8, v0
305 %1 = zext i32 %vl to i64
306 %a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i1> %vm, i64 %1)
307 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
308 %true = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
309 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %true, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
310 ret <vscale x 2 x i32> %b
314 declare <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x float>, i64)
315 define <vscale x 2 x i32> @vpmerge_vflcass(<vscale x 2 x i32> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
316 ; CHECK-LABEL: vpmerge_vflcass:
318 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
319 ; CHECK-NEXT: vfclass.v v8, v9, v0.t
321 %1 = zext i32 %vl to i64
322 %a = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x float> %vf, i64 %1)
323 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
324 ret <vscale x 2 x i32> %b
328 declare <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, i64, i64)
329 define <vscale x 2 x float> @vpmerge_vfsqrt(<vscale x 2 x float> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
330 ; CHECK-LABEL: vpmerge_vfsqrt:
332 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
333 ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t
335 %1 = zext i32 %vl to i64
336 %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 7, i64 %1)
337 %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
338 ret <vscale x 2 x float> %b
341 ; Test reciprocal operation by riscv.vfrec7
342 declare <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, i64, i64)
343 define <vscale x 2 x float> @vpmerge_vfrec7(<vscale x 2 x float> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
344 ; CHECK-LABEL: vpmerge_vfrec7:
346 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
347 ; CHECK-NEXT: vfrec7.v v8, v9, v0.t
349 %1 = zext i32 %vl to i64
350 %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 7, i64 %1)
351 %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
352 ret <vscale x 2 x float> %b
355 ; Test vector operations with VLMAX vector length.
357 ; Test binary operator with vp.merge and add.
358 define <vscale x 2 x i32> @vpmerge_add(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
359 ; CHECK-LABEL: vpmerge_add:
361 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
362 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
364 %a = add <vscale x 2 x i32> %x, %y
365 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
366 ret <vscale x 2 x i32> %b
369 ; Test binary operator with vp.merge and fadd.
370 define <vscale x 2 x float> @vpmerge_fadd(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
371 ; CHECK-LABEL: vpmerge_fadd:
373 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
374 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
376 %a = fadd <vscale x 2 x float> %x, %y
377 %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
378 ret <vscale x 2 x float> %b
381 ; This shouldn't be folded because we need to preserve exceptions with
382 ; "fpexcept.strict" exception behaviour, and masking may hide them.
383 define <vscale x 2 x float> @vpmerge_constrained_fadd(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i64 %vl) strictfp {
384 ; CHECK-LABEL: vpmerge_constrained_fadd:
386 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
387 ; CHECK-NEXT: vfadd.vv v9, v9, v10
388 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
389 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
391 %a = call <vscale x 2 x float> @llvm.experimental.constrained.fadd(<vscale x 2 x float> %x, <vscale x 2 x float> %y, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
392 %b = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> %m, i64 %vl) strictfp
393 ret <vscale x 2 x float> %b
395 declare <vscale x 2 x float> @llvm.experimental.constrained.fadd(<vscale x 2 x float>, <vscale x 2 x float>, metadata, metadata)
396 declare <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i64)
398 ; This shouldn't be folded because we need to preserve exceptions with
399 ; "fpexcept.strict" exception behaviour, and masking may hide them.
400 define <vscale x 2 x float> @vpmerge_constrained_fadd_vlmax(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m) strictfp {
401 ; CHECK-LABEL: vpmerge_constrained_fadd_vlmax:
403 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
404 ; CHECK-NEXT: vfadd.vv v9, v9, v10
405 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma
406 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
408 %a = call <vscale x 2 x float> @llvm.experimental.constrained.fadd(<vscale x 2 x float> %x, <vscale x 2 x float> %y, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
409 %b = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> %m, i64 -1) strictfp
410 ret <vscale x 2 x float> %b
413 ; Test conversion by fptosi.
414 define <vscale x 2 x i16> @vpmerge_fptosi(<vscale x 2 x i16> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
415 ; CHECK-LABEL: vpmerge_fptosi:
417 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
418 ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
420 %a = fptosi <vscale x 2 x float> %x to <vscale x 2 x i16>
421 %b = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %a, <vscale x 2 x i16> %passthru, i32 %vl)
422 ret <vscale x 2 x i16> %b
425 ; Test conversion by sitofp.
426 define <vscale x 2 x float> @vpmerge_sitofp(<vscale x 2 x float> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
427 ; CHECK-LABEL: vpmerge_sitofp:
429 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
430 ; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t
432 %a = sitofp <vscale x 2 x i64> %x to <vscale x 2 x float>
433 %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
434 ret <vscale x 2 x float> %b
437 ; Test float extension by fpext.
438 define <vscale x 2 x double> @vpmerge_fpext(<vscale x 2 x double> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
439 ; CHECK-LABEL: vpmerge_fpext:
441 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
442 ; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t
444 %a = fpext <vscale x 2 x float> %x to <vscale x 2 x double>
445 %b = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %a, <vscale x 2 x double> %passthru, i32 %vl)
446 ret <vscale x 2 x double> %b
449 ; Test float truncation by fptrunc.
450 define <vscale x 2 x float> @vpmerge_fptrunc(<vscale x 2 x float> %passthru, <vscale x 2 x double> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
451 ; CHECK-LABEL: vpmerge_fptrunc:
453 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
454 ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t
456 %a = fptrunc <vscale x 2 x double> %x to <vscale x 2 x float>
457 %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
458 ret <vscale x 2 x float> %b
461 ; Test integer extension by zext.
462 define <vscale x 2 x i32> @vpmerge_zext(<vscale x 2 x i32> %passthru, <vscale x 2 x i8> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
463 ; CHECK-LABEL: vpmerge_zext:
465 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
466 ; CHECK-NEXT: vzext.vf4 v8, v9, v0.t
468 %a = zext <vscale x 2 x i8> %x to <vscale x 2 x i32>
469 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
470 ret <vscale x 2 x i32> %b
473 ; Test integer truncation by trunc.
474 define <vscale x 2 x i32> @vpmerge_trunc(<vscale x 2 x i32> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
475 ; CHECK-LABEL: vpmerge_trunc:
477 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
478 ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
480 %a = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
481 %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
482 ret <vscale x 2 x i32> %b
485 declare <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1>, <vscale x 2 x i16>, <vscale x 2 x i16>, i32)
486 declare <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32)
487 declare <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1>, <vscale x 2 x float>, <vscale x 2 x float>, i32)
488 declare <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, i32)
490 ; Test binary operator with vp.select and vp.smax.
491 define <vscale x 2 x i32> @vpselect_vpadd(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
492 ; CHECK-LABEL: vpselect_vpadd:
494 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
495 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
497 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
498 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
499 %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
500 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
501 ret <vscale x 2 x i32> %b
504 ; Test glued node of select should not be deleted.
505 define <vscale x 2 x i32> @vpselect_vpadd2(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
506 ; CHECK-LABEL: vpselect_vpadd2:
508 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
509 ; CHECK-NEXT: vmseq.vv v0, v9, v10
510 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
512 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
513 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
514 %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
515 %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> %mask, i32 %vl)
516 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
517 ret <vscale x 2 x i32> %b
520 ; Test vp.select has all-ones mask.
521 define <vscale x 2 x i32> @vpselect_vpadd3(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
522 ; CHECK-LABEL: vpselect_vpadd3:
524 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
525 ; CHECK-NEXT: vadd.vv v8, v9, v10
527 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
528 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
529 %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
530 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
531 ret <vscale x 2 x i32> %b
534 ; Test float binary operator with vp.select and vp.fadd.
535 define <vscale x 2 x float> @vpselect_vpfadd(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
536 ; CHECK-LABEL: vpselect_vpfadd:
538 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
539 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
541 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
542 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
543 %a = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %mask, i32 %vl)
544 %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
545 ret <vscale x 2 x float> %b
548 ; Test for binary operator with specific EEW by riscv.vrgatherei16.
549 define <vscale x 2 x i32> @vpselect_vrgatherei16(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i16> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
550 ; CHECK-LABEL: vpselect_vrgatherei16:
552 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
553 ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10
555 %1 = zext i32 %vl to i64
556 %2 = tail call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> %x, <vscale x 2 x i16> %y, i64 %1)
557 %3 = tail call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %2, <vscale x 2 x i32> %passthru, i32 %vl)
558 ret <vscale x 2 x i32> %2
561 ; Test conversion by fptosi.
562 define <vscale x 2 x i16> @vpselect_vpfptosi(<vscale x 2 x i16> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
563 ; CHECK-LABEL: vpselect_vpfptosi:
565 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
566 ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
568 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
569 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
570 %a = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %mask, i32 %vl)
571 %b = call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %a, <vscale x 2 x i16> %passthru, i32 %vl)
572 ret <vscale x 2 x i16> %b
575 ; Test conversion by sitofp.
576 define <vscale x 2 x float> @vpselect_vpsitofp(<vscale x 2 x float> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
577 ; CHECK-LABEL: vpselect_vpsitofp:
579 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
580 ; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t
582 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
583 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
584 %a = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> %mask, i32 %vl)
585 %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
586 ret <vscale x 2 x float> %b
589 ; Test integer extension by vp.zext.
590 define <vscale x 2 x i32> @vpselect_vpzext(<vscale x 2 x i32> %passthru, <vscale x 2 x i8> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
591 ; CHECK-LABEL: vpselect_vpzext:
593 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
594 ; CHECK-NEXT: vzext.vf4 v8, v9, v0.t
596 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
597 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
598 %a = call <vscale x 2 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i1> %mask, i32 %vl)
599 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
600 ret <vscale x 2 x i32> %b
603 ; Test integer truncation by vp.trunc.
604 define <vscale x 2 x i32> @vpselect_vptrunc(<vscale x 2 x i32> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
605 ; CHECK-LABEL: vpselect_vptrunc:
607 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
608 ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
610 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
611 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
612 %a = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> %mask, i32 %vl)
613 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
614 ret <vscale x 2 x i32> %b
617 ; Test integer extension by vp.fpext.
618 define <vscale x 2 x double> @vpselect_vpfpext(<vscale x 2 x double> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
619 ; CHECK-LABEL: vpselect_vpfpext:
621 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
622 ; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t
624 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
625 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
626 %a = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %mask, i32 %vl)
627 %b = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %a, <vscale x 2 x double> %passthru, i32 %vl)
628 ret <vscale x 2 x double> %b
631 ; Test integer truncation by vp.trunc.
632 define <vscale x 2 x float> @vpselect_vpfptrunc(<vscale x 2 x float> %passthru, <vscale x 2 x double> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
633 ; CHECK-LABEL: vpselect_vpfptrunc:
635 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
636 ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t
638 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
639 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
640 %a = call <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x i1> %mask, i32 %vl)
641 %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
642 ret <vscale x 2 x float> %b
645 ; Test load operation by vp.load.
646 define <vscale x 2 x i32> @vpselect_vpload(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
647 ; CHECK-LABEL: vpselect_vpload:
649 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
650 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
652 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
653 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
654 %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
655 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
656 ret <vscale x 2 x i32> %b
659 ; Test result has chain and glued node.
660 define <vscale x 2 x i32> @vpselect_vpload2(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i32 zeroext %vl) {
661 ; CHECK-LABEL: vpselect_vpload2:
663 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
664 ; CHECK-NEXT: vmseq.vv v0, v9, v10
665 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
667 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
668 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
669 %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
670 %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> %mask, i32 %vl)
671 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
672 ret <vscale x 2 x i32> %b
675 ; Test result has chain output of true operand of select.vvm.
676 define void @vpselect_vpload_store(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
677 ; CHECK-LABEL: vpselect_vpload_store:
679 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
680 ; CHECK-NEXT: vle32.v v8, (a0), v0.t
681 ; CHECK-NEXT: vs1r.v v8, (a0)
683 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
684 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
685 %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(<vscale x 2 x i32> * %p, <vscale x 2 x i1> %mask, i32 %vl)
686 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
687 store <vscale x 2 x i32> %b, <vscale x 2 x i32> * %p
691 ; FIXME: select vselect.vvm and vleffN.v
692 define <vscale x 2 x i32> @vpselect_vleff(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i32 zeroext %vl) {
693 ; CHECK-LABEL: vpselect_vleff:
695 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
696 ; CHECK-NEXT: vle32ff.v v9, (a0)
697 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
698 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
700 %1 = zext i32 %vl to i64
701 %a = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i64 %1)
702 %b = extractvalue { <vscale x 2 x i32>, i64 } %a, 0
703 %c = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %b, <vscale x 2 x i32> %passthru, i32 %vl)
704 ret <vscale x 2 x i32> %c
707 ; Test strided load by riscv.vlse
708 define <vscale x 2 x i32> @vpselect_vlse(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
709 ; CHECK-LABEL: vpselect_vlse:
711 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
712 ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t
714 %1 = zext i32 %vl to i64
715 %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i64 %s, i64 %1)
716 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
717 ret <vscale x 2 x i32> %b
720 ; Test indexed load by riscv.vluxei
721 define <vscale x 2 x i32> @vpselect_vluxei(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> * %p, <vscale x 2 x i64> %idx, <vscale x 2 x i1> %m, i64 %s, i32 zeroext %vl) {
722 ; CHECK-LABEL: vpselect_vluxei:
724 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
725 ; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t
727 %1 = zext i32 %vl to i64
728 %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, <vscale x 2 x i64> %idx, i64 %1)
729 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
730 ret <vscale x 2 x i32> %b
733 ; Test vector index by riscv.vid
734 define <vscale x 2 x i32> @vpselect_vid(<vscale x 2 x i32> %passthru, <vscale x 2 x i1> %m, i32 zeroext %vl) {
735 ; CHECK-LABEL: vpselect_vid:
737 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
738 ; CHECK-NEXT: vid.v v8, v0.t
740 %1 = zext i32 %vl to i64
741 %a = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(<vscale x 2 x i32> undef, i64 %1)
742 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
743 ret <vscale x 2 x i32> %b
747 define <vscale x 2 x i32> @vpselect_viota(<vscale x 2 x i32> %passthru, <vscale x 2 x i1> %m, <vscale x 2 x i1> %vm, i32 zeroext %vl) {
748 ; CHECK-LABEL: vpselect_viota:
750 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
751 ; CHECK-NEXT: viota.m v10, v9
752 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
754 %1 = zext i32 %vl to i64
755 %a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i1> %vm, i64 %1)
756 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
757 ret <vscale x 2 x i32> %b
761 define <vscale x 2 x i32> @vpselect_vflcass(<vscale x 2 x i32> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
762 ; CHECK-LABEL: vpselect_vflcass:
764 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
765 ; CHECK-NEXT: vfclass.v v8, v9, v0.t
767 %1 = zext i32 %vl to i64
768 %a = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x float> %vf, i64 %1)
769 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
770 ret <vscale x 2 x i32> %b
774 define <vscale x 2 x float> @vpselect_vfsqrt(<vscale x 2 x float> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
775 ; CHECK-LABEL: vpselect_vfsqrt:
777 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
778 ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t
780 %1 = zext i32 %vl to i64
781 %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 7, i64 %1)
782 %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
783 ret <vscale x 2 x float> %b
786 ; Test reciprocal operation by riscv.vfrec7
787 define <vscale x 2 x float> @vpselect_vfrec7(<vscale x 2 x float> %passthru, <vscale x 2 x float> %vf, <vscale x 2 x i1> %m, i32 zeroext %vl) {
788 ; CHECK-LABEL: vpselect_vfrec7:
790 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
791 ; CHECK-NEXT: vfrec7.v v8, v9, v0.t
793 %1 = zext i32 %vl to i64
794 %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %vf, i64 7, i64 %1)
795 %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
796 ret <vscale x 2 x float> %b
800 declare <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i64, i64, i64)
801 define <vscale x 2 x i32> @vpselect_vslideup(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i64 %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
802 ; CHECK-LABEL: vpselect_vslideup:
804 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
805 ; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
807 %1 = zext i32 %vl to i64
808 %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> %v, i64 %x, i64 %1, i64 0)
809 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
810 ret <vscale x 2 x i32> %b
813 declare <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i64, i64, i64)
814 define <vscale x 2 x i32> @vpselect_vslidedown(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i64 %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
815 ; CHECK-LABEL: vpselect_vslidedown:
817 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
818 ; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
820 %1 = zext i32 %vl to i64
821 %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> %v, i64 %x, i64 %1, i64 0)
822 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
823 ret <vscale x 2 x i32> %b
826 declare <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i32, i64)
827 define <vscale x 2 x i32> @vpselect_vslide1up(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i32 %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
828 ; CHECK-LABEL: vpselect_vslide1up:
830 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
831 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
833 %1 = zext i32 %vl to i64
834 %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> %v, i32 %x, i64 %1)
835 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
836 ret <vscale x 2 x i32> %b
839 declare <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i32, i64)
840 define <vscale x 2 x i32> @vpselect_vslide1down(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i32 %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
841 ; CHECK-LABEL: vpselect_vslide1down:
843 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
844 ; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t
846 %1 = zext i32 %vl to i64
847 %a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> %v, i32 %x, i64 %1)
848 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
849 ret <vscale x 2 x i32> %b
852 ; Test vector operations with VLMAX vector length.
854 ; Test binary operator with vp.select and add.
855 define <vscale x 2 x i32> @vpselect_add(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
856 ; CHECK-LABEL: vpselect_add:
858 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
859 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
861 %a = add <vscale x 2 x i32> %x, %y
862 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
863 ret <vscale x 2 x i32> %b
866 ; Test binary operator with vp.select and fadd.
867 define <vscale x 2 x float> @vpselect_fadd(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i32 zeroext %vl) {
868 ; CHECK-LABEL: vpselect_fadd:
870 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
871 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
873 %a = fadd <vscale x 2 x float> %x, %y
874 %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
875 ret <vscale x 2 x float> %b
878 ; Test conversion by fptosi.
879 define <vscale x 2 x i16> @vpselect_fptosi(<vscale x 2 x i16> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
880 ; CHECK-LABEL: vpselect_fptosi:
882 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
883 ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
885 %a = fptosi <vscale x 2 x float> %x to <vscale x 2 x i16>
886 %b = call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %a, <vscale x 2 x i16> %passthru, i32 %vl)
887 ret <vscale x 2 x i16> %b
890 ; Test conversion by sitofp.
891 define <vscale x 2 x float> @vpselect_sitofp(<vscale x 2 x float> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
892 ; CHECK-LABEL: vpselect_sitofp:
894 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
895 ; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t
897 %a = sitofp <vscale x 2 x i64> %x to <vscale x 2 x float>
898 %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
899 ret <vscale x 2 x float> %b
902 ; Test float extension by fpext.
903 define <vscale x 2 x double> @vpselect_fpext(<vscale x 2 x double> %passthru, <vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
904 ; CHECK-LABEL: vpselect_fpext:
906 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
907 ; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t
909 %a = fpext <vscale x 2 x float> %x to <vscale x 2 x double>
910 %b = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %a, <vscale x 2 x double> %passthru, i32 %vl)
911 ret <vscale x 2 x double> %b
914 ; Test float truncation by fptrunc.
915 define <vscale x 2 x float> @vpselect_fptrunc(<vscale x 2 x float> %passthru, <vscale x 2 x double> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
916 ; CHECK-LABEL: vpselect_fptrunc:
918 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
919 ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t
921 %a = fptrunc <vscale x 2 x double> %x to <vscale x 2 x float>
922 %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
923 ret <vscale x 2 x float> %b
926 ; Test integer extension by zext.
927 define <vscale x 2 x i32> @vpselect_zext(<vscale x 2 x i32> %passthru, <vscale x 2 x i8> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
928 ; CHECK-LABEL: vpselect_zext:
930 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
931 ; CHECK-NEXT: vzext.vf4 v8, v9, v0.t
933 %a = zext <vscale x 2 x i8> %x to <vscale x 2 x i32>
934 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
935 ret <vscale x 2 x i32> %b
938 ; Test integer truncation by trunc.
939 define <vscale x 2 x i32> @vpselect_trunc(<vscale x 2 x i32> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i1> %m, i32 zeroext %vl) {
940 ; CHECK-LABEL: vpselect_trunc:
942 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
943 ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
945 %a = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
946 %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
947 ret <vscale x 2 x i32> %b
950 ; Folding this would create a loop in the DAG becuase the chain from the VLE is
951 ; used by the vssubu.
952 define void @test_dag_loop() {
953 ; CHECK-LABEL: test_dag_loop:
954 ; CHECK: # %bb.0: # %entry
955 ; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma
956 ; CHECK-NEXT: vle16.v v8, (zero)
957 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
958 ; CHECK-NEXT: vmclr.m v0
959 ; CHECK-NEXT: vmv.v.i v16, 0
960 ; CHECK-NEXT: vsetivli zero, 0, e8, m4, tu, mu
961 ; CHECK-NEXT: vmv4r.v v20, v16
962 ; CHECK-NEXT: vssubu.vx v20, v16, zero, v0.t
963 ; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma
964 ; CHECK-NEXT: vmseq.vv v0, v20, v16
965 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
966 ; CHECK-NEXT: vmv.v.i v16, 0
967 ; CHECK-NEXT: vsetivli zero, 1, e16, m8, tu, ma
968 ; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0
969 ; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, ma
970 ; CHECK-NEXT: vse16.v v16, (zero)
973 %0 = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16>* null, i64 1)
974 %1 = tail call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> zeroinitializer, <vscale x 32 x i8> zeroinitializer, i8 0, <vscale x 32 x i1> zeroinitializer, i64 0, i64 0)
975 %2 = tail call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> %1, <vscale x 32 x i8> zeroinitializer, i64 0)
976 %3 = tail call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> zeroinitializer, <vscale x 32 x i16> zeroinitializer, <vscale x 32 x i16> %0, <vscale x 32 x i1> %2, i64 1)
977 call void @llvm.riscv.vse.nxv32i16.i64(<vscale x 32 x i16> %3, <vscale x 32 x i16>* null, i64 0)
981 define <vscale x 1 x i16> @test_vaaddu(<vscale x 1 x i16> %var_11, i16 zeroext %var_9, <vscale x 1 x i1> %var_5, <vscale x 1 x i16> %var_0) {
982 ; CHECK-LABEL: test_vaaddu:
983 ; CHECK: # %bb.0: # %entry
984 ; CHECK-NEXT: vsetivli zero, 3, e16, mf4, ta, mu
985 ; CHECK-NEXT: csrwi vxrm, 0
986 ; CHECK-NEXT: vaaddu.vx v9, v8, a0, v0.t
987 ; CHECK-NEXT: vmv1r.v v8, v9
990 %0 = tail call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> %var_11, i16 %var_9, i64 0, i64 3)
991 %1 = tail call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> %var_0, <vscale x 1 x i16> %0, <vscale x 1 x i1> %var_5, i64 3)
992 ret <vscale x 1 x i16> %1
995 ; Test reductions don't have a vmerge folded into them, since the mask affects
998 declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
1004 define <vscale x 2 x i32> @vredsum(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 %vl) {
1005 ; CHECK-LABEL: vredsum:
1007 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1008 ; CHECK-NEXT: vmv1r.v v11, v8
1009 ; CHECK-NEXT: vredsum.vs v11, v9, v10
1010 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma
1011 ; CHECK-NEXT: vmerge.vvm v8, v8, v11, v0
1013 %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
1014 <vscale x 2 x i32> %passthru,
1015 <vscale x 2 x i32> %x,
1016 <vscale x 2 x i32> %y,
1018 %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 %vl)
1019 ret <vscale x 2 x i32> %b
1022 declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32(
1023 <vscale x 2 x float>,
1024 <vscale x 2 x float>,
1025 <vscale x 2 x float>,
1028 define <vscale x 2 x float> @vfredusum(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i64 %vl) {
1029 ; CHECK-LABEL: vfredusum:
1031 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1032 ; CHECK-NEXT: fsrmi a0, 0
1033 ; CHECK-NEXT: vmv1r.v v11, v8
1034 ; CHECK-NEXT: vfredusum.vs v11, v9, v10
1035 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma
1036 ; CHECK-NEXT: vmerge.vvm v8, v8, v11, v0
1037 ; CHECK-NEXT: fsrm a0
1039 %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32(
1040 <vscale x 2 x float> %passthru,
1041 <vscale x 2 x float> %x,
1042 <vscale x 2 x float> %y,
1044 %b = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> %m, i64 %vl)
1045 ret <vscale x 2 x float> %b
1048 ; However we can fold it in if the mask is all ones.
1049 define <vscale x 2 x i32> @vredsum_allones_mask(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i64 %vl) {
1050 ; CHECK-LABEL: vredsum_allones_mask:
1052 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
1053 ; CHECK-NEXT: vredsum.vs v8, v9, v10
1055 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
1056 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
1058 %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
1059 <vscale x 2 x i32> %passthru,
1060 <vscale x 2 x i32> %x,
1061 <vscale x 2 x i32> %y,
1063 %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %mask, i64 %vl)
1064 ret <vscale x 2 x i32> %b
1067 define <vscale x 2 x float> @vfredusum_allones_mask(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, i64 %vl) {
1068 ; CHECK-LABEL: vfredusum_allones_mask:
1070 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
1071 ; CHECK-NEXT: fsrmi a0, 0
1072 ; CHECK-NEXT: vfredusum.vs v8, v9, v10
1073 ; CHECK-NEXT: fsrm a0
1075 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
1076 %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
1078 %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32(
1079 <vscale x 2 x float> %passthru,
1080 <vscale x 2 x float> %x,
1081 <vscale x 2 x float> %y,
1083 %b = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> %mask, i64 %vl)
1084 ret <vscale x 2 x float> %b
1087 declare <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16.i64(<vscale x 32 x i16>, <vscale x 32 x i16>* nocapture, i64)
1088 declare <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, i8, <vscale x 32 x i1>, i64, i64 immarg)
1089 declare <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, i64)
1090 declare <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i64)
1091 declare void @llvm.riscv.vse.nxv32i16.i64(<vscale x 32 x i16>, <vscale x 32 x i16>* nocapture, i64)
1092 declare <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i64(<vscale x 1 x i16>, <vscale x 1 x i16>, i16, i64 immarg, i64)
1093 declare <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
1095 ; Tests for folding vmerge into its ops when their VLs differ
1097 declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, i64)
1098 declare <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
1100 ; Can fold with VL=2
1101 define <vscale x 2 x i32> @vmerge_smaller_vl_same_passthru(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) {
1102 ; CHECK-LABEL: vmerge_smaller_vl_same_passthru:
1104 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu
1105 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
1107 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i64 4)
1108 %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 2)
1109 ret <vscale x 2 x i32> %b
1112 ; Can fold with VL=2
1113 define <vscale x 2 x i32> @vmerge_larger_vl_same_passthru(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) {
1114 ; CHECK-LABEL: vmerge_larger_vl_same_passthru:
1116 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu
1117 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
1119 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i64 2)
1120 %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 3)
1121 ret <vscale x 2 x i32> %b
1124 ; Can fold with VL=2
1125 define <vscale x 2 x i32> @vmerge_smaller_vl_different_passthru(<vscale x 2 x i32> %pt1, <vscale x 2 x i32> %pt2, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) {
1126 ; CHECK-LABEL: vmerge_smaller_vl_different_passthru:
1128 ; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma
1129 ; CHECK-NEXT: vadd.vv v8, v10, v11
1130 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
1131 ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0
1132 ; CHECK-NEXT: vmv1r.v v8, v9
1134 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(<vscale x 2 x i32> %pt1, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i64 3)
1135 %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %pt2, <vscale x 2 x i32> %pt2, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 2)
1136 ret <vscale x 2 x i32> %b
1139 ; Can't fold this because we need to take elements from both %pt1 and %pt2
1140 define <vscale x 2 x i32> @vmerge_larger_vl_different_passthru(<vscale x 2 x i32> %pt1, <vscale x 2 x i32> %pt2, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) {
1141 ; CHECK-LABEL: vmerge_larger_vl_different_passthru:
1143 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
1144 ; CHECK-NEXT: vadd.vv v8, v10, v11
1145 ; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma
1146 ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0
1147 ; CHECK-NEXT: vmv1r.v v8, v9
1149 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(<vscale x 2 x i32> %pt1, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i64 2)
1150 %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %pt2, <vscale x 2 x i32> %pt2, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 3)
1151 ret <vscale x 2 x i32> %b
1154 ; Can fold with VL=2
1155 define <vscale x 2 x i32> @vmerge_smaller_vl_poison_passthru(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) {
1156 ; CHECK-LABEL: vmerge_smaller_vl_poison_passthru:
1158 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu
1159 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
1161 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i64 3)
1162 %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 2)
1163 ret <vscale x 2 x i32> %b
1166 ; Can fold with VL=2
1167 define <vscale x 2 x i32> @vmerge_larger_vl_poison_passthru(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) {
1168 ; CHECK-LABEL: vmerge_larger_vl_poison_passthru:
1170 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu
1171 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
1173 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i64 2)
1174 %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 3)
1175 ret <vscale x 2 x i32> %b
1178 ; The vadd's new policy should be tail undisturbed since the false op of the
1179 ; vmerge moves from the the body to the tail, and we need to preserve it.
1180 define <vscale x 2 x i32> @vmerge_larger_vl_false_becomes_tail(<vscale x 2 x i32> %false, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m) {
1181 ; CHECK-LABEL: vmerge_larger_vl_false_becomes_tail:
1183 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu
1184 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
1186 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i64 2)
1187 %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %false, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 3)
1188 ret <vscale x 2 x i32> %b