1 //===- RISCVInstrInfoVVLPatterns.td - RVV VL patterns ------*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file contains the required infrastructure and VL patterns to
10 /// support code generation for the standard 'V' (Vector) extension, version
13 /// This file is included from and depends upon RISCVInstrInfoVPseudos.td
15 /// Note: the patterns for RVV intrinsics are found in
16 /// RISCVInstrInfoVPseudos.td.
18 //===----------------------------------------------------------------------===//
20 //===----------------------------------------------------------------------===//
21 // Helpers to define the VL patterns.
22 //===----------------------------------------------------------------------===//
24 def SDT_RISCVIntUnOp_VL : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>,
26 SDTCisVec<0>, SDTCisInt<0>,
27 SDTCVecEltisVT<3, i1>,
28 SDTCisSameNumEltsAs<0, 3>,
29 SDTCisVT<4, XLenVT>]>;
31 def SDT_RISCVIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>,
33 SDTCisVec<0>, SDTCisInt<0>,
35 SDTCVecEltisVT<4, i1>,
36 SDTCisSameNumEltsAs<0, 4>,
37 SDTCisVT<5, XLenVT>]>;
39 def SDT_RISCVFPUnOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
40 SDTCisVec<0>, SDTCisFP<0>,
41 SDTCVecEltisVT<2, i1>,
42 SDTCisSameNumEltsAs<0, 2>,
43 SDTCisVT<3, XLenVT>]>;
44 def SDT_RISCVFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>,
46 SDTCisVec<0>, SDTCisFP<0>,
48 SDTCVecEltisVT<4, i1>,
49 SDTCisSameNumEltsAs<0, 4>,
50 SDTCisVT<5, XLenVT>]>;
52 def SDT_RISCVCopySign_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>,
54 SDTCisVec<0>, SDTCisFP<0>,
56 SDTCVecEltisVT<4, i1>,
57 SDTCisSameNumEltsAs<0, 4>,
58 SDTCisVT<5, XLenVT>]>;
60 def riscv_vmv_v_v_vl : SDNode<"RISCVISD::VMV_V_V_VL",
61 SDTypeProfile<1, 3, [SDTCisVec<0>,
64 SDTCisVT<3, XLenVT>]>>;
65 def riscv_vmv_v_x_vl : SDNode<"RISCVISD::VMV_V_X_VL",
66 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<0>,
69 SDTCisVT<3, XLenVT>]>>;
70 def riscv_vfmv_v_f_vl : SDNode<"RISCVISD::VFMV_V_F_VL",
71 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisFP<0>,
74 SDTCisVT<3, XLenVT>]>>;
75 def riscv_vmv_s_x_vl : SDNode<"RISCVISD::VMV_S_X_VL",
76 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
79 SDTCisVT<3, XLenVT>]>>;
80 def riscv_vfmv_s_f_vl : SDNode<"RISCVISD::VFMV_S_F_VL",
81 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
84 SDTCisVT<3, XLenVT>]>>;
86 def riscv_add_vl : SDNode<"RISCVISD::ADD_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
87 def riscv_sub_vl : SDNode<"RISCVISD::SUB_VL", SDT_RISCVIntBinOp_VL>;
88 def riscv_mul_vl : SDNode<"RISCVISD::MUL_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
89 def riscv_mulhs_vl : SDNode<"RISCVISD::MULHS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
90 def riscv_mulhu_vl : SDNode<"RISCVISD::MULHU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
91 def riscv_and_vl : SDNode<"RISCVISD::AND_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
92 def riscv_or_vl : SDNode<"RISCVISD::OR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
93 def riscv_xor_vl : SDNode<"RISCVISD::XOR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
94 def riscv_sdiv_vl : SDNode<"RISCVISD::SDIV_VL", SDT_RISCVIntBinOp_VL>;
95 def riscv_srem_vl : SDNode<"RISCVISD::SREM_VL", SDT_RISCVIntBinOp_VL>;
96 def riscv_udiv_vl : SDNode<"RISCVISD::UDIV_VL", SDT_RISCVIntBinOp_VL>;
97 def riscv_urem_vl : SDNode<"RISCVISD::UREM_VL", SDT_RISCVIntBinOp_VL>;
98 def riscv_shl_vl : SDNode<"RISCVISD::SHL_VL", SDT_RISCVIntBinOp_VL>;
99 def riscv_sra_vl : SDNode<"RISCVISD::SRA_VL", SDT_RISCVIntBinOp_VL>;
100 def riscv_srl_vl : SDNode<"RISCVISD::SRL_VL", SDT_RISCVIntBinOp_VL>;
101 def riscv_rotl_vl : SDNode<"RISCVISD::ROTL_VL", SDT_RISCVIntBinOp_VL>;
102 def riscv_rotr_vl : SDNode<"RISCVISD::ROTR_VL", SDT_RISCVIntBinOp_VL>;
103 def riscv_smin_vl : SDNode<"RISCVISD::SMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
104 def riscv_smax_vl : SDNode<"RISCVISD::SMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
105 def riscv_umin_vl : SDNode<"RISCVISD::UMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
106 def riscv_umax_vl : SDNode<"RISCVISD::UMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
108 def riscv_bitreverse_vl : SDNode<"RISCVISD::BITREVERSE_VL", SDT_RISCVIntUnOp_VL>;
109 def riscv_bswap_vl : SDNode<"RISCVISD::BSWAP_VL", SDT_RISCVIntUnOp_VL>;
110 def riscv_ctlz_vl : SDNode<"RISCVISD::CTLZ_VL", SDT_RISCVIntUnOp_VL>;
111 def riscv_cttz_vl : SDNode<"RISCVISD::CTTZ_VL", SDT_RISCVIntUnOp_VL>;
112 def riscv_ctpop_vl : SDNode<"RISCVISD::CTPOP_VL", SDT_RISCVIntUnOp_VL>;
114 def riscv_avgflooru_vl : SDNode<"RISCVISD::AVGFLOORU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
115 def riscv_avgceilu_vl : SDNode<"RISCVISD::AVGCEILU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
116 def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
117 def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
118 def riscv_ssubsat_vl : SDNode<"RISCVISD::SSUBSAT_VL", SDT_RISCVIntBinOp_VL>;
119 def riscv_usubsat_vl : SDNode<"RISCVISD::USUBSAT_VL", SDT_RISCVIntBinOp_VL>;
121 def riscv_fadd_vl : SDNode<"RISCVISD::FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>;
122 def riscv_fsub_vl : SDNode<"RISCVISD::FSUB_VL", SDT_RISCVFPBinOp_VL>;
123 def riscv_fmul_vl : SDNode<"RISCVISD::FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>;
124 def riscv_fdiv_vl : SDNode<"RISCVISD::FDIV_VL", SDT_RISCVFPBinOp_VL>;
125 def riscv_fneg_vl : SDNode<"RISCVISD::FNEG_VL", SDT_RISCVFPUnOp_VL>;
126 def riscv_fabs_vl : SDNode<"RISCVISD::FABS_VL", SDT_RISCVFPUnOp_VL>;
127 def riscv_fsqrt_vl : SDNode<"RISCVISD::FSQRT_VL", SDT_RISCVFPUnOp_VL>;
128 def riscv_fcopysign_vl : SDNode<"RISCVISD::FCOPYSIGN_VL", SDT_RISCVCopySign_VL>;
129 def riscv_vfmin_vl : SDNode<"RISCVISD::VFMIN_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>;
130 def riscv_vfmax_vl : SDNode<"RISCVISD::VFMAX_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>;
132 def riscv_strict_fadd_vl : SDNode<"RISCVISD::STRICT_FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>;
133 def riscv_strict_fsub_vl : SDNode<"RISCVISD::STRICT_FSUB_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>;
134 def riscv_strict_fmul_vl : SDNode<"RISCVISD::STRICT_FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>;
135 def riscv_strict_fdiv_vl : SDNode<"RISCVISD::STRICT_FDIV_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>;
136 def riscv_strict_fsqrt_vl : SDNode<"RISCVISD::STRICT_FSQRT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>;
138 def any_riscv_fadd_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
139 [(riscv_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
140 (riscv_strict_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>;
141 def any_riscv_fsub_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
142 [(riscv_fsub_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
143 (riscv_strict_fsub_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>;
144 def any_riscv_fmul_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
145 [(riscv_fmul_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
146 (riscv_strict_fmul_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>;
147 def any_riscv_fdiv_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
148 [(riscv_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
149 (riscv_strict_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>;
150 def any_riscv_fsqrt_vl : PatFrags<(ops node:$src, node:$mask, node:$vl),
151 [(riscv_fsqrt_vl node:$src, node:$mask, node:$vl),
152 (riscv_strict_fsqrt_vl node:$src, node:$mask, node:$vl)]>;
154 def riscv_fclass_vl : SDNode<"RISCVISD::FCLASS_VL",
155 SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisVec<0>,
156 SDTCisFP<1>, SDTCisVec<1>,
157 SDTCisSameSizeAs<0, 1>,
158 SDTCisSameNumEltsAs<0, 1>,
159 SDTCVecEltisVT<2, i1>,
160 SDTCisSameNumEltsAs<0, 2>,
161 SDTCisVT<3, XLenVT>]>>;
163 def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>,
166 SDTCisVec<0>, SDTCisFP<0>,
167 SDTCVecEltisVT<4, i1>,
168 SDTCisSameNumEltsAs<0, 4>,
169 SDTCisVT<5, XLenVT>]>;
170 def riscv_vfmadd_vl : SDNode<"RISCVISD::VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>;
171 def riscv_vfnmadd_vl : SDNode<"RISCVISD::VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>;
172 def riscv_vfmsub_vl : SDNode<"RISCVISD::VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>;
173 def riscv_vfnmsub_vl : SDNode<"RISCVISD::VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>;
175 def SDT_RISCVWVecFMA_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>,
176 SDTCisVec<1>, SDTCisFP<1>,
177 SDTCisOpSmallerThanOp<1, 0>,
178 SDTCisSameNumEltsAs<0, 1>,
181 SDTCVecEltisVT<4, i1>,
182 SDTCisSameNumEltsAs<0, 4>,
183 SDTCisVT<5, XLenVT>]>;
184 def riscv_vfwmadd_vl : SDNode<"RISCVISD::VFWMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>;
185 def riscv_vfwnmadd_vl : SDNode<"RISCVISD::VFWNMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>;
186 def riscv_vfwmsub_vl : SDNode<"RISCVISD::VFWMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>;
187 def riscv_vfwnmsub_vl : SDNode<"RISCVISD::VFWNMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>;
189 def riscv_strict_vfmadd_vl : SDNode<"RISCVISD::STRICT_VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>;
190 def riscv_strict_vfnmadd_vl : SDNode<"RISCVISD::STRICT_VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>;
191 def riscv_strict_vfmsub_vl : SDNode<"RISCVISD::STRICT_VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>;
192 def riscv_strict_vfnmsub_vl : SDNode<"RISCVISD::STRICT_VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>;
194 def any_riscv_vfmadd_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl),
195 [(riscv_vfmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl),
196 (riscv_strict_vfmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>;
197 def any_riscv_vfnmadd_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl),
198 [(riscv_vfnmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl),
199 (riscv_strict_vfnmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>;
200 def any_riscv_vfmsub_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl),
201 [(riscv_vfmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl),
202 (riscv_strict_vfmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>;
203 def any_riscv_vfnmsub_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl),
204 [(riscv_vfnmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl),
205 (riscv_strict_vfnmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>;
207 def SDT_RISCVFPRoundOp_VL : SDTypeProfile<1, 3, [
208 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1>, SDTCisSameNumEltsAs<0, 1>,
209 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>
211 def SDT_RISCVFPExtendOp_VL : SDTypeProfile<1, 3, [
212 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>,
213 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>
216 def riscv_fpround_vl : SDNode<"RISCVISD::FP_ROUND_VL", SDT_RISCVFPRoundOp_VL>;
217 def riscv_strict_fpround_vl : SDNode<"RISCVISD::STRICT_FP_ROUND_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>;
218 def riscv_fpextend_vl : SDNode<"RISCVISD::FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL>;
219 def riscv_strict_fpextend_vl : SDNode<"RISCVISD::STRICT_FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL, [SDNPHasChain]>;
220 def riscv_fncvt_rod_vl : SDNode<"RISCVISD::VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL>;
221 def riscv_strict_fncvt_rod_vl : SDNode<"RISCVISD::STRICT_VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>;
223 def any_riscv_fpround_vl : PatFrags<(ops node:$src, node:$mask, node:$vl),
224 [(riscv_fpround_vl node:$src, node:$mask, node:$vl),
225 (riscv_strict_fpround_vl node:$src, node:$mask, node:$vl)]>;
226 def any_riscv_fpextend_vl : PatFrags<(ops node:$src, node:$mask, node:$vl),
227 [(riscv_fpextend_vl node:$src, node:$mask, node:$vl),
228 (riscv_strict_fpextend_vl node:$src, node:$mask, node:$vl)]>;
229 def any_riscv_fncvt_rod_vl : PatFrags<(ops node:$src, node:$mask, node:$vl),
230 [(riscv_fncvt_rod_vl node:$src, node:$mask, node:$vl),
231 (riscv_strict_fncvt_rod_vl node:$src, node:$mask, node:$vl)]>;
233 def SDT_RISCVFP2IOp_VL : SDTypeProfile<1, 3, [
234 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>,
235 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>
237 def SDT_RISCVFP2IOp_RM_VL : SDTypeProfile<1, 4, [
238 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>,
239 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>,
240 SDTCisVT<4, XLenVT> // Rounding mode
243 def SDT_RISCVI2FPOp_VL : SDTypeProfile<1, 3, [
244 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>,
245 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>
247 def SDT_RISCVI2FPOp_RM_VL : SDTypeProfile<1, 4, [
248 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>,
249 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>,
250 SDTCisVT<4, XLenVT> // Rounding mode
253 def SDT_RISCVSETCCOP_VL : SDTypeProfile<1, 6, [
254 SDTCVecEltisVT<0, i1>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>,
255 SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>, SDTCisSameAs<0, 4>,
256 SDTCisSameAs<0, 5>, SDTCisVT<6, XLenVT>]>;
259 def riscv_vfcvt_xu_f_vl : SDNode<"RISCVISD::VFCVT_XU_F_VL", SDT_RISCVFP2IOp_VL>;
260 def riscv_vfcvt_x_f_vl : SDNode<"RISCVISD::VFCVT_X_F_VL", SDT_RISCVFP2IOp_VL>;
261 def riscv_vfcvt_rm_xu_f_vl : SDNode<"RISCVISD::VFCVT_RM_XU_F_VL", SDT_RISCVFP2IOp_RM_VL>;
262 def riscv_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL>;
264 def riscv_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL>;
265 def riscv_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL>;
267 def riscv_strict_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL, [SDNPHasChain]>;
268 def riscv_strict_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>;
269 def riscv_strict_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>;
271 def any_riscv_vfcvt_rm_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl, node:$rm),
272 [(riscv_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm),
273 (riscv_strict_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm)]>;
274 def any_riscv_vfcvt_rtz_xu_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl),
275 [(riscv_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl),
276 (riscv_strict_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl)]>;
277 def any_riscv_vfcvt_rtz_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl),
278 [(riscv_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl),
279 (riscv_strict_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl)]>;
282 def riscv_sint_to_fp_vl : SDNode<"RISCVISD::SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>;
283 def riscv_uint_to_fp_vl : SDNode<"RISCVISD::UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>;
284 def riscv_vfcvt_rm_f_xu_vl : SDNode<"RISCVISD::VFCVT_RM_F_XU_VL", SDT_RISCVI2FPOp_RM_VL>;
285 def riscv_vfcvt_rm_f_x_vl : SDNode<"RISCVISD::VFCVT_RM_F_X_VL", SDT_RISCVI2FPOp_RM_VL>;
287 def riscv_strict_sint_to_fp_vl : SDNode<"RISCVISD::STRICT_SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>;
288 def riscv_strict_uint_to_fp_vl : SDNode<"RISCVISD::STRICT_UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>;
290 def any_riscv_sint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl),
291 [(riscv_sint_to_fp_vl node:$src, node:$mask, node:$vl),
292 (riscv_strict_sint_to_fp_vl node:$src, node:$mask, node:$vl)]>;
293 def any_riscv_uint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl),
294 [(riscv_uint_to_fp_vl node:$src, node:$mask, node:$vl),
295 (riscv_strict_uint_to_fp_vl node:$src, node:$mask, node:$vl)]>;
297 def riscv_vfround_noexcept_vl: SDNode<"RISCVISD::VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL>;
298 def riscv_strict_vfround_noexcept_vl: SDNode<"RISCVISD::STRICT_VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>;
300 def any_riscv_vfround_noexcept_vl : PatFrags<(ops node:$src, node:$mask, node:$vl),
301 [(riscv_vfround_noexcept_vl node:$src, node:$mask, node:$vl),
302 (riscv_strict_vfround_noexcept_vl node:$src, node:$mask, node:$vl)]>;
304 def riscv_setcc_vl : SDNode<"RISCVISD::SETCC_VL", SDT_RISCVSETCCOP_VL>;
305 def riscv_strict_fsetcc_vl : SDNode<"RISCVISD::STRICT_FSETCC_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>;
306 def riscv_strict_fsetccs_vl : SDNode<"RISCVISD::STRICT_FSETCCS_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>;
307 def any_riscv_fsetcc_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl),
308 [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl),
309 (riscv_strict_fsetcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl)]>;
310 def any_riscv_fsetccs_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl),
311 [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl),
312 (riscv_strict_fsetccs_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl)]>;
314 def riscv_vrgather_vx_vl : SDNode<"RISCVISD::VRGATHER_VX_VL",
315 SDTypeProfile<1, 5, [SDTCisVec<0>,
319 SDTCVecEltisVT<4, i1>,
320 SDTCisSameNumEltsAs<0, 4>,
321 SDTCisVT<5, XLenVT>]>>;
322 def riscv_vrgather_vv_vl : SDNode<"RISCVISD::VRGATHER_VV_VL",
323 SDTypeProfile<1, 5, [SDTCisVec<0>,
326 SDTCisSameNumEltsAs<0, 2>,
327 SDTCisSameSizeAs<0, 2>,
329 SDTCVecEltisVT<4, i1>,
330 SDTCisSameNumEltsAs<0, 4>,
331 SDTCisVT<5, XLenVT>]>>;
332 def riscv_vrgatherei16_vv_vl : SDNode<"RISCVISD::VRGATHEREI16_VV_VL",
333 SDTypeProfile<1, 5, [SDTCisVec<0>,
336 SDTCVecEltisVT<2, i16>,
337 SDTCisSameNumEltsAs<0, 2>,
339 SDTCVecEltisVT<4, i1>,
340 SDTCisSameNumEltsAs<0, 4>,
341 SDTCisVT<5, XLenVT>]>>;
343 def SDT_RISCVVMERGE_VL : SDTypeProfile<1, 5, [
344 SDTCisVec<0>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<1, i1>,
345 SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>, SDTCisSameAs<0, 4>,
349 def riscv_vmerge_vl : SDNode<"RISCVISD::VMERGE_VL", SDT_RISCVVMERGE_VL>;
351 def SDT_RISCVVMSETCLR_VL : SDTypeProfile<1, 1, [SDTCVecEltisVT<0, i1>,
352 SDTCisVT<1, XLenVT>]>;
353 def riscv_vmclr_vl : SDNode<"RISCVISD::VMCLR_VL", SDT_RISCVVMSETCLR_VL>;
354 def riscv_vmset_vl : SDNode<"RISCVISD::VMSET_VL", SDT_RISCVVMSETCLR_VL>;
356 def SDT_RISCVMaskBinOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
358 SDTCVecEltisVT<0, i1>,
359 SDTCisVT<3, XLenVT>]>;
360 def riscv_vmand_vl : SDNode<"RISCVISD::VMAND_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>;
361 def riscv_vmor_vl : SDNode<"RISCVISD::VMOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>;
362 def riscv_vmxor_vl : SDNode<"RISCVISD::VMXOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>;
364 def true_mask : PatLeaf<(riscv_vmset_vl (XLenVT srcvalue))>;
366 def riscv_vmnot_vl : PatFrag<(ops node:$rs, node:$vl),
367 (riscv_vmxor_vl node:$rs, true_mask, node:$vl)>;
369 def riscv_vcpop_vl : SDNode<"RISCVISD::VCPOP_VL",
370 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>,
371 SDTCisVec<1>, SDTCisInt<1>,
372 SDTCVecEltisVT<2, i1>,
373 SDTCisSameNumEltsAs<1, 2>,
374 SDTCisVT<3, XLenVT>]>>;
376 def riscv_vfirst_vl : SDNode<"RISCVISD::VFIRST_VL",
377 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>,
378 SDTCisVec<1>, SDTCisInt<1>,
379 SDTCVecEltisVT<2, i1>,
380 SDTCisSameNumEltsAs<1, 2>,
381 SDTCisVT<3, XLenVT>]>>;
383 def SDT_RISCVVEXTEND_VL : SDTypeProfile<1, 3, [SDTCisVec<0>,
384 SDTCisSameNumEltsAs<0, 1>,
385 SDTCisSameNumEltsAs<1, 2>,
386 SDTCVecEltisVT<2, i1>,
387 SDTCisVT<3, XLenVT>]>;
388 def riscv_sext_vl : SDNode<"RISCVISD::VSEXT_VL", SDT_RISCVVEXTEND_VL>;
389 def riscv_zext_vl : SDNode<"RISCVISD::VZEXT_VL", SDT_RISCVVEXTEND_VL>;
390 def riscv_ext_vl : PatFrags<(ops node:$A, node:$B, node:$C),
391 [(riscv_sext_vl node:$A, node:$B, node:$C),
392 (riscv_zext_vl node:$A, node:$B, node:$C)]>;
394 def riscv_trunc_vector_vl : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL",
395 SDTypeProfile<1, 3, [SDTCisVec<0>,
396 SDTCisSameNumEltsAs<0, 1>,
397 SDTCisSameNumEltsAs<0, 2>,
398 SDTCVecEltisVT<2, i1>,
399 SDTCisVT<3, XLenVT>]>>;
401 def SDT_RISCVVWIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>,
403 SDTCisSameNumEltsAs<0, 1>,
404 SDTCisOpSmallerThanOp<1, 0>,
407 SDTCisSameNumEltsAs<1, 4>,
408 SDTCVecEltisVT<4, i1>,
409 SDTCisVT<5, XLenVT>]>;
410 def riscv_vwmul_vl : SDNode<"RISCVISD::VWMUL_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>;
411 def riscv_vwmulu_vl : SDNode<"RISCVISD::VWMULU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>;
412 def riscv_vwmulsu_vl : SDNode<"RISCVISD::VWMULSU_VL", SDT_RISCVVWIntBinOp_VL>;
413 def riscv_vwadd_vl : SDNode<"RISCVISD::VWADD_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>;
414 def riscv_vwaddu_vl : SDNode<"RISCVISD::VWADDU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>;
415 def riscv_vwsub_vl : SDNode<"RISCVISD::VWSUB_VL", SDT_RISCVVWIntBinOp_VL, []>;
416 def riscv_vwsubu_vl : SDNode<"RISCVISD::VWSUBU_VL", SDT_RISCVVWIntBinOp_VL, []>;
417 def riscv_vwsll_vl : SDNode<"RISCVISD::VWSLL_VL", SDT_RISCVVWIntBinOp_VL, []>;
419 def SDT_RISCVVWIntTernOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>,
421 SDTCisSameNumEltsAs<0, 1>,
422 SDTCisOpSmallerThanOp<1, 0>,
425 SDTCisSameNumEltsAs<1, 4>,
426 SDTCVecEltisVT<4, i1>,
427 SDTCisVT<5, XLenVT>]>;
428 def riscv_vwmacc_vl : SDNode<"RISCVISD::VWMACC_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>;
429 def riscv_vwmaccu_vl : SDNode<"RISCVISD::VWMACCU_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>;
430 def riscv_vwmaccsu_vl : SDNode<"RISCVISD::VWMACCSU_VL", SDT_RISCVVWIntTernOp_VL, []>;
432 def SDT_RISCVVWFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>,
434 SDTCisSameNumEltsAs<0, 1>,
435 SDTCisOpSmallerThanOp<1, 0>,
438 SDTCisSameNumEltsAs<1, 4>,
439 SDTCVecEltisVT<4, i1>,
440 SDTCisVT<5, XLenVT>]>;
441 def riscv_vfwmul_vl : SDNode<"RISCVISD::VFWMUL_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>;
442 def riscv_vfwadd_vl : SDNode<"RISCVISD::VFWADD_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>;
443 def riscv_vfwsub_vl : SDNode<"RISCVISD::VFWSUB_VL", SDT_RISCVVWFPBinOp_VL, []>;
445 def SDT_RISCVVNIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>,
447 SDTCisSameNumEltsAs<0, 1>,
448 SDTCisOpSmallerThanOp<0, 1>,
451 SDTCisSameNumEltsAs<0, 4>,
452 SDTCVecEltisVT<4, i1>,
453 SDTCisVT<5, XLenVT>]>;
454 def riscv_vnsrl_vl : SDNode<"RISCVISD::VNSRL_VL", SDT_RISCVVNIntBinOp_VL>;
456 def SDT_RISCVVWIntBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>,
459 SDTCisSameNumEltsAs<1, 2>,
460 SDTCisOpSmallerThanOp<2, 1>,
462 SDTCisSameNumEltsAs<1, 4>,
463 SDTCVecEltisVT<4, i1>,
464 SDTCisVT<5, XLenVT>]>;
465 def riscv_vwadd_w_vl : SDNode<"RISCVISD::VWADD_W_VL", SDT_RISCVVWIntBinOpW_VL>;
466 def riscv_vwaddu_w_vl : SDNode<"RISCVISD::VWADDU_W_VL", SDT_RISCVVWIntBinOpW_VL>;
467 def riscv_vwsub_w_vl : SDNode<"RISCVISD::VWSUB_W_VL", SDT_RISCVVWIntBinOpW_VL>;
468 def riscv_vwsubu_w_vl : SDNode<"RISCVISD::VWSUBU_W_VL", SDT_RISCVVWIntBinOpW_VL>;
470 def SDT_RISCVVWFPBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>,
473 SDTCisSameNumEltsAs<1, 2>,
474 SDTCisOpSmallerThanOp<2, 1>,
476 SDTCisSameNumEltsAs<1, 4>,
477 SDTCVecEltisVT<4, i1>,
478 SDTCisVT<5, XLenVT>]>;
480 def riscv_vfwadd_w_vl : SDNode<"RISCVISD::VFWADD_W_VL", SDT_RISCVVWFPBinOpW_VL>;
481 def riscv_vfwsub_w_vl : SDNode<"RISCVISD::VFWSUB_W_VL", SDT_RISCVVWFPBinOpW_VL>;
483 def SDTRVVVecReduce : SDTypeProfile<1, 6, [
484 SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>,
485 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<2, 4>, SDTCisVT<5, XLenVT>,
489 def riscv_add_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
491 (riscv_add_vl node:$A, node:$B, node:$C,
492 node:$D, node:$E), [{
493 return N->hasOneUse();
496 def riscv_sub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
498 (riscv_sub_vl node:$A, node:$B, node:$C,
499 node:$D, node:$E), [{
500 return N->hasOneUse();
503 def riscv_mul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
505 (riscv_mul_vl node:$A, node:$B, node:$C,
506 node:$D, node:$E), [{
507 return N->hasOneUse();
510 def riscv_vwmul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
512 (riscv_vwmul_vl node:$A, node:$B, node:$C,
513 node:$D, node:$E), [{
514 return N->hasOneUse();
517 def riscv_vwmulu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
519 (riscv_vwmulu_vl node:$A, node:$B, node:$C,
520 node:$D, node:$E), [{
521 return N->hasOneUse();
524 def riscv_vwmulsu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
526 (riscv_vwmulsu_vl node:$A, node:$B, node:$C,
527 node:$D, node:$E), [{
528 return N->hasOneUse();
531 def riscv_sext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C),
532 (riscv_sext_vl node:$A, node:$B, node:$C), [{
533 return N->hasOneUse();
536 def riscv_zext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C),
537 (riscv_zext_vl node:$A, node:$B, node:$C), [{
538 return N->hasOneUse();
541 def riscv_ext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C),
542 (riscv_ext_vl node:$A, node:$B, node:$C), [{
543 return N->hasOneUse();
546 def riscv_fpextend_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C),
547 (riscv_fpextend_vl node:$A, node:$B, node:$C), [{
548 return N->hasOneUse();
551 def riscv_vfmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
553 (riscv_vfmadd_vl node:$A, node:$B,
554 node:$C, node:$D, node:$E), [{
555 return N->hasOneUse();
558 def riscv_vfnmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
560 (riscv_vfnmadd_vl node:$A, node:$B,
561 node:$C, node:$D, node:$E), [{
562 return N->hasOneUse();
565 def riscv_vfmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
567 (riscv_vfmsub_vl node:$A, node:$B,
568 node:$C, node:$D, node:$E), [{
569 return N->hasOneUse();
572 def riscv_vfnmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
574 (riscv_vfnmsub_vl node:$A, node:$B,
575 node:$C, node:$D, node:$E), [{
576 return N->hasOneUse();
579 foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR",
580 "FADD", "SEQ_FADD", "FMIN", "FMAX"] in
581 def rvv_vecreduce_#kind#_vl : SDNode<"RISCVISD::VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>;
583 // Give explicit Complexity to prefer simm5/uimm5.
584 def SplatPat : ComplexPattern<vAny, 1, "selectVSplat", [], [], 1>;
585 def SplatPat_simm5 : ComplexPattern<vAny, 1, "selectVSplatSimm5", [], [], 3>;
586 def SplatPat_uimm5 : ComplexPattern<vAny, 1, "selectVSplatUimmBits<5>", [], [], 3>;
587 def SplatPat_uimm6 : ComplexPattern<vAny, 1, "selectVSplatUimmBits<6>", [], [], 3>;
588 def SplatPat_simm5_plus1
589 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1", [], [], 3>;
590 def SplatPat_simm5_plus1_nonzero
591 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1NonZero", [], [], 3>;
593 // Selects extends or truncates of splats where we only care about the lowest 8
594 // bits of each element.
596 : ComplexPattern<vAny, 1, "selectLow8BitsVSplat", [], [], 2>;
598 // Ignore the vl operand on vmv_v_f, and vmv_s_f.
599 def SplatFPOp : PatFrags<(ops node:$op),
600 [(riscv_vfmv_v_f_vl undef, node:$op, srcvalue),
601 (riscv_vfmv_s_f_vl undef, node:$op, srcvalue)]>;
603 def sew8simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<8>", []>;
604 def sew16simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<16>", []>;
605 def sew32simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<32>", []>;
606 def sew64simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<64>", []>;
608 class VPatBinaryVL_V<SDPatternOperator vop,
609 string instruction_name,
611 ValueType result_type,
617 VReg result_reg_class,
621 : Pat<(result_type (vop
622 (op1_type op1_reg_class:$rs1),
623 (op2_type op2_reg_class:$rs2),
624 (result_type result_reg_class:$merge),
629 instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
630 instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK"))
631 result_reg_class:$merge,
634 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
636 class VPatBinaryVL_V_RM<SDPatternOperator vop,
637 string instruction_name,
639 ValueType result_type,
645 VReg result_reg_class,
649 : Pat<(result_type (vop
650 (op1_type op1_reg_class:$rs1),
651 (op2_type op2_reg_class:$rs2),
652 (result_type result_reg_class:$merge),
657 instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
658 instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK"))
659 result_reg_class:$merge,
663 // Value to indicate no rounding mode change in
664 // RISCVInsertReadWriteCSR
666 GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
668 multiclass VPatTiedBinaryNoMaskVL_V<SDNode vop,
669 string instruction_name,
671 ValueType result_type,
675 VReg result_reg_class,
676 VReg op2_reg_class> {
677 def : Pat<(result_type (vop
678 (result_type result_reg_class:$rs1),
679 (op2_type op2_reg_class:$rs2),
683 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED")
684 result_reg_class:$rs1,
686 GPR:$vl, sew, TAIL_AGNOSTIC)>;
688 def : Pat<(riscv_vmerge_vl true_mask,
690 result_reg_class:$rs1,
691 (op2_type op2_reg_class:$rs2),
695 result_reg_class:$rs1, result_reg_class:$rs1, VLOpFrag),
696 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED")
697 result_reg_class:$rs1,
699 GPR:$vl, sew, TU_MU)>;
702 class VPatTiedBinaryMaskVL_V<SDNode vop,
703 string instruction_name,
705 ValueType result_type,
710 VReg result_reg_class,
711 VReg op2_reg_class> :
712 Pat<(result_type (vop
713 (result_type result_reg_class:$rs1),
714 (op2_type op2_reg_class:$rs2),
715 (result_type result_reg_class:$rs1),
718 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_MASK_TIED")
719 result_reg_class:$rs1,
721 (mask_type V0), GPR:$vl, sew, TU_MU)>;
723 multiclass VPatTiedBinaryNoMaskVL_V_RM<SDNode vop,
724 string instruction_name,
726 ValueType result_type,
730 VReg result_reg_class,
732 bit isSEWAware = 0> {
733 defvar name = !if(isSEWAware,
734 instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_TIED",
735 instruction_name#"_"#suffix#"_"#vlmul.MX#"_TIED");
736 def : Pat<(result_type (vop
737 (result_type result_reg_class:$rs1),
738 (op2_type op2_reg_class:$rs2),
742 (!cast<Instruction>(name)
743 result_reg_class:$rs1,
745 // Value to indicate no rounding mode change in
746 // RISCVInsertReadWriteCSR
748 GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
750 def : Pat<(riscv_vmerge_vl true_mask,
752 result_reg_class:$rs1,
753 (op2_type op2_reg_class:$rs2),
757 result_reg_class:$rs1, result_reg_class:$rs1, VLOpFrag),
758 (!cast<Instruction>(name)
759 result_reg_class:$rs1,
761 // Value to indicate no rounding mode change in
762 // RISCVInsertReadWriteCSR
764 GPR:$vl, log2sew, TU_MU)>;
767 class VPatBinaryVL_XI<SDPatternOperator vop,
768 string instruction_name,
770 ValueType result_type,
776 VReg result_reg_class,
778 ComplexPattern SplatPatKind,
781 : Pat<(result_type (vop
782 (vop1_type vop_reg_class:$rs1),
783 (vop2_type (SplatPatKind (XLenVT xop_kind:$rs2))),
784 (result_type result_reg_class:$merge),
789 instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
790 instruction_name#_#suffix#_#vlmul.MX#"_MASK"))
791 result_reg_class:$merge,
794 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
796 multiclass VPatBinaryVL_VV_VX<SDPatternOperator vop, string instruction_name,
797 list<VTypeInfo> vtilist = AllIntegerVectors,
798 bit isSEWAware = 0> {
799 foreach vti = vtilist in {
800 let Predicates = GetVTypePredicates<vti>.Predicates in {
801 def : VPatBinaryVL_V<vop, instruction_name, "VV",
802 vti.Vector, vti.Vector, vti.Vector, vti.Mask,
803 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
804 vti.RegClass, isSEWAware>;
805 def : VPatBinaryVL_XI<vop, instruction_name, "VX",
806 vti.Vector, vti.Vector, vti.Vector, vti.Mask,
807 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
808 SplatPat, GPR, isSEWAware>;
813 multiclass VPatBinaryVL_VV_VX_VI<SDPatternOperator vop, string instruction_name,
814 Operand ImmType = simm5>
815 : VPatBinaryVL_VV_VX<vop, instruction_name> {
816 foreach vti = AllIntegerVectors in {
817 let Predicates = GetVTypePredicates<vti>.Predicates in
818 def : VPatBinaryVL_XI<vop, instruction_name, "VI",
819 vti.Vector, vti.Vector, vti.Vector, vti.Mask,
820 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
821 !cast<ComplexPattern>(SplatPat#_#ImmType),
826 multiclass VPatBinaryWVL_VV_VX<SDPatternOperator vop, string instruction_name> {
827 foreach VtiToWti = AllWidenableIntVectors in {
828 defvar vti = VtiToWti.Vti;
829 defvar wti = VtiToWti.Wti;
830 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
831 GetVTypePredicates<wti>.Predicates) in {
832 def : VPatBinaryVL_V<vop, instruction_name, "VV",
833 wti.Vector, vti.Vector, vti.Vector, vti.Mask,
834 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass,
836 def : VPatBinaryVL_XI<vop, instruction_name, "VX",
837 wti.Vector, vti.Vector, vti.Vector, vti.Mask,
838 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass,
844 multiclass VPatBinaryWVL_VV_VX_WV_WX<SDPatternOperator vop, SDNode vop_w,
845 string instruction_name>
846 : VPatBinaryWVL_VV_VX<vop, instruction_name> {
847 foreach VtiToWti = AllWidenableIntVectors in {
848 defvar vti = VtiToWti.Vti;
849 defvar wti = VtiToWti.Wti;
850 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
851 GetVTypePredicates<wti>.Predicates) in {
852 defm : VPatTiedBinaryNoMaskVL_V<vop_w, instruction_name, "WV",
853 wti.Vector, vti.Vector, vti.Log2SEW,
854 vti.LMul, wti.RegClass, vti.RegClass>;
855 def : VPatTiedBinaryMaskVL_V<vop_w, instruction_name, "WV",
856 wti.Vector, vti.Vector, wti.Mask,
857 vti.Log2SEW, vti.LMul, wti.RegClass,
859 def : VPatBinaryVL_V<vop_w, instruction_name, "WV",
860 wti.Vector, wti.Vector, vti.Vector, vti.Mask,
861 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass,
863 def : VPatBinaryVL_XI<vop_w, instruction_name, "WX",
864 wti.Vector, wti.Vector, vti.Vector, vti.Mask,
865 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass,
871 multiclass VPatBinaryNVL_WV_WX_WI<SDPatternOperator vop, string instruction_name> {
872 foreach VtiToWti = AllWidenableIntVectors in {
873 defvar vti = VtiToWti.Vti;
874 defvar wti = VtiToWti.Wti;
875 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
876 GetVTypePredicates<wti>.Predicates) in {
877 def : VPatBinaryVL_V<vop, instruction_name, "WV",
878 vti.Vector, wti.Vector, vti.Vector, vti.Mask,
879 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass,
881 def : VPatBinaryVL_XI<vop, instruction_name, "WX",
882 vti.Vector, wti.Vector, vti.Vector, vti.Mask,
883 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass,
885 def : VPatBinaryVL_XI<vop, instruction_name, "WI",
886 vti.Vector, wti.Vector, vti.Vector, vti.Mask,
887 vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass,
888 !cast<ComplexPattern>(SplatPat#_#uimm5),
894 class VPatBinaryVL_VF<SDPatternOperator vop,
895 string instruction_name,
896 ValueType result_type,
902 VReg result_reg_class,
904 RegisterClass scalar_reg_class,
906 : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1),
907 (vop2_type (SplatFPOp scalar_reg_class:$rs2)),
908 (result_type result_reg_class:$merge),
913 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
914 instruction_name#"_"#vlmul.MX#"_MASK"))
915 result_reg_class:$merge,
917 scalar_reg_class:$rs2,
918 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
920 class VPatBinaryVL_VF_RM<SDPatternOperator vop,
921 string instruction_name,
922 ValueType result_type,
928 VReg result_reg_class,
930 RegisterClass scalar_reg_class,
932 : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1),
933 (vop2_type (SplatFPOp scalar_reg_class:$rs2)),
934 (result_type result_reg_class:$merge),
939 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
940 instruction_name#"_"#vlmul.MX#"_MASK"))
941 result_reg_class:$merge,
943 scalar_reg_class:$rs2,
945 // Value to indicate no rounding mode change in
946 // RISCVInsertReadWriteCSR
948 GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
950 multiclass VPatBinaryFPVL_VV_VF<SDPatternOperator vop, string instruction_name,
951 bit isSEWAware = 0> {
952 foreach vti = AllFloatVectors in {
953 let Predicates = GetVTypePredicates<vti>.Predicates in {
954 def : VPatBinaryVL_V<vop, instruction_name, "VV",
955 vti.Vector, vti.Vector, vti.Vector, vti.Mask,
956 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
957 vti.RegClass, isSEWAware>;
958 def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix,
959 vti.Vector, vti.Vector, vti.Vector, vti.Mask,
960 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
961 vti.ScalarRegClass, isSEWAware>;
966 multiclass VPatBinaryFPVL_VV_VF_RM<SDPatternOperator vop, string instruction_name,
967 bit isSEWAware = 0> {
968 foreach vti = AllFloatVectors in {
969 let Predicates = GetVTypePredicates<vti>.Predicates in {
970 def : VPatBinaryVL_V_RM<vop, instruction_name, "VV",
971 vti.Vector, vti.Vector, vti.Vector, vti.Mask,
972 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
973 vti.RegClass, isSEWAware>;
974 def : VPatBinaryVL_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix,
975 vti.Vector, vti.Vector, vti.Vector, vti.Mask,
976 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
977 vti.ScalarRegClass, isSEWAware>;
982 multiclass VPatBinaryFPVL_R_VF<SDPatternOperator vop, string instruction_name,
983 bit isSEWAware = 0> {
984 foreach fvti = AllFloatVectors in {
985 let Predicates = GetVTypePredicates<fvti>.Predicates in
986 def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2),
988 (fvti.Vector fvti.RegClass:$merge),
993 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK",
994 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK"))
995 fvti.RegClass:$merge,
996 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
997 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
1001 multiclass VPatBinaryFPVL_R_VF_RM<SDPatternOperator vop, string instruction_name,
1002 bit isSEWAware = 0> {
1003 foreach fvti = AllFloatVectors in {
1004 let Predicates = GetVTypePredicates<fvti>.Predicates in
1005 def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2),
1007 (fvti.Vector fvti.RegClass:$merge),
1010 (!cast<Instruction>(
1012 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK",
1013 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK"))
1014 fvti.RegClass:$merge,
1015 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
1017 // Value to indicate no rounding mode change in
1018 // RISCVInsertReadWriteCSR
1020 GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
1024 multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name,
1026 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
1027 vti.RegClass:$rs2, cc,
1031 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK")
1035 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
1038 // Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped.
1039 multiclass VPatIntegerSetCCVL_VV_Swappable<VTypeInfo vti, string instruction_name,
1040 CondCode cc, CondCode invcc>
1041 : VPatIntegerSetCCVL_VV<vti, instruction_name, cc> {
1042 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs2),
1043 vti.RegClass:$rs1, invcc,
1047 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK")
1048 VR:$merge, vti.RegClass:$rs1,
1049 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
1052 multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_name,
1053 CondCode cc, CondCode invcc> {
1054 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK");
1055 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
1056 (SplatPat (XLenVT GPR:$rs2)), cc,
1060 (instruction_masked VR:$merge, vti.RegClass:$rs1,
1061 GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
1062 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)),
1063 (vti.Vector vti.RegClass:$rs1), invcc,
1067 (instruction_masked VR:$merge, vti.RegClass:$rs1,
1068 GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
1071 multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_name,
1072 CondCode cc, CondCode invcc> {
1073 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK");
1074 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
1075 (SplatPat_simm5 simm5:$rs2), cc,
1079 (instruction_masked VR:$merge, vti.RegClass:$rs1,
1080 XLenVT:$rs2, (vti.Mask V0), GPR:$vl,
1083 // FIXME: Can do some canonicalization to remove these patterns.
1084 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2),
1085 (vti.Vector vti.RegClass:$rs1), invcc,
1089 (instruction_masked VR:$merge, vti.RegClass:$rs1,
1090 simm5:$rs2, (vti.Mask V0), GPR:$vl,
1094 multiclass VPatIntegerSetCCVL_VIPlus1_Swappable<VTypeInfo vti,
1095 string instruction_name,
1096 CondCode cc, CondCode invcc,
1097 ComplexPattern splatpat_kind> {
1098 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK");
1099 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
1100 (splatpat_kind simm5:$rs2), cc,
1104 (instruction_masked VR:$merge, vti.RegClass:$rs1,
1105 (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl,
1108 // FIXME: Can do some canonicalization to remove these patterns.
1109 def : Pat<(vti.Mask (riscv_setcc_vl (splatpat_kind simm5:$rs2),
1110 (vti.Vector vti.RegClass:$rs1), invcc,
1114 (instruction_masked VR:$merge, vti.RegClass:$rs1,
1115 (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl,
1119 multiclass VPatFPSetCCVL_VV_VF_FV<SDPatternOperator vop, CondCode cc,
1121 string swapped_op_inst_name> {
1122 foreach fvti = AllFloatVectors in {
1123 let Predicates = GetVTypePredicates<fvti>.Predicates in {
1124 def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1),
1130 (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX#"_MASK")
1131 VR:$merge, fvti.RegClass:$rs1,
1132 fvti.RegClass:$rs2, (fvti.Mask V0),
1133 GPR:$vl, fvti.Log2SEW)>;
1134 def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1),
1135 (SplatFPOp fvti.ScalarRegClass:$rs2),
1140 (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")
1141 VR:$merge, fvti.RegClass:$rs1,
1142 fvti.ScalarRegClass:$rs2, (fvti.Mask V0),
1143 GPR:$vl, fvti.Log2SEW)>;
1144 def : Pat<(fvti.Mask (vop (SplatFPOp fvti.ScalarRegClass:$rs2),
1145 (fvti.Vector fvti.RegClass:$rs1),
1150 (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")
1151 VR:$merge, fvti.RegClass:$rs1,
1152 fvti.ScalarRegClass:$rs2, (fvti.Mask V0),
1153 GPR:$vl, fvti.Log2SEW)>;
1158 multiclass VPatExtendVL_V<SDNode vop, string inst_name, string suffix,
1159 list <VTypeInfoToFraction> fraction_list> {
1160 foreach vtiTofti = fraction_list in {
1161 defvar vti = vtiTofti.Vti;
1162 defvar fti = vtiTofti.Fti;
1163 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1164 GetVTypePredicates<fti>.Predicates) in
1165 def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2),
1166 (fti.Mask V0), VLOpFrag)),
1167 (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX#"_MASK")
1168 (vti.Vector (IMPLICIT_DEF)),
1170 (fti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
1174 // Single width converting
1176 multiclass VPatConvertFP2IVL_V<SDPatternOperator vop, string instruction_name> {
1177 foreach fvti = AllFloatVectors in {
1178 defvar ivti = GetIntVTypeInfo<fvti>.Vti;
1179 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
1180 GetVTypePredicates<ivti>.Predicates) in
1181 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
1184 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK")
1185 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
1186 (fvti.Mask V0), GPR:$vl, ivti.Log2SEW, TA_MA)>;
1190 multiclass VPatConvertFP2IVL_V_RM<SDPatternOperator vop, string instruction_name> {
1191 foreach fvti = AllFloatVectors in {
1192 defvar ivti = GetIntVTypeInfo<fvti>.Vti;
1193 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
1194 GetVTypePredicates<ivti>.Predicates) in
1195 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
1198 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK")
1199 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
1201 // Value to indicate no rounding mode change in
1202 // RISCVInsertReadWriteCSR
1204 GPR:$vl, ivti.Log2SEW, TA_MA)>;
1209 multiclass VPatConvertFP2I_RM_VL_V<SDPatternOperator vop, string instruction_name> {
1210 foreach fvti = AllFloatVectors in {
1211 defvar ivti = GetIntVTypeInfo<fvti>.Vti;
1212 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
1213 GetVTypePredicates<ivti>.Predicates) in
1214 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
1215 (fvti.Mask V0), (XLenVT timm:$frm),
1217 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK")
1218 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
1219 (fvti.Mask V0), timm:$frm, GPR:$vl, ivti.Log2SEW,
1224 multiclass VPatConvertI2FPVL_V_RM<SDPatternOperator vop, string instruction_name> {
1225 foreach fvti = AllFloatVectors in {
1226 defvar ivti = GetIntVTypeInfo<fvti>.Vti;
1227 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
1228 GetVTypePredicates<ivti>.Predicates) in
1229 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1),
1232 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
1233 (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1,
1235 // Value to indicate no rounding mode change in
1236 // RISCVInsertReadWriteCSR
1238 GPR:$vl, fvti.Log2SEW, TA_MA)>;
1242 multiclass VPatConvertI2FP_RM_VL_V<SDNode vop, string instruction_name> {
1243 foreach fvti = AllFloatVectors in {
1244 defvar ivti = GetIntVTypeInfo<fvti>.Vti;
1245 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
1246 GetVTypePredicates<ivti>.Predicates) in
1247 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1),
1248 (ivti.Mask V0), (XLenVT timm:$frm),
1250 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
1251 (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1,
1252 (ivti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>;
1256 // Widening converting
1258 multiclass VPatWConvertFP2IVL_V<SDPatternOperator vop, string instruction_name> {
1259 foreach fvtiToFWti = AllWidenableFloatVectors in {
1260 defvar fvti = fvtiToFWti.Vti;
1261 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
1262 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
1263 GetVTypePredicates<iwti>.Predicates) in
1264 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
1267 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK")
1268 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
1269 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>;
1273 multiclass VPatWConvertFP2IVL_V_RM<SDPatternOperator vop, string instruction_name> {
1274 foreach fvtiToFWti = AllWidenableFloatVectors in {
1275 defvar fvti = fvtiToFWti.Vti;
1276 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
1277 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
1278 GetVTypePredicates<iwti>.Predicates) in
1279 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
1282 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK")
1283 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
1285 // Value to indicate no rounding mode change in
1286 // RISCVInsertReadWriteCSR
1288 GPR:$vl, fvti.Log2SEW, TA_MA)>;
1293 multiclass VPatWConvertFP2I_RM_VL_V<SDNode vop, string instruction_name> {
1294 foreach fvtiToFWti = AllWidenableFloatVectors in {
1295 defvar fvti = fvtiToFWti.Vti;
1296 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
1297 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
1298 GetVTypePredicates<iwti>.Predicates) in
1299 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
1300 (fvti.Mask V0), (XLenVT timm:$frm),
1302 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK")
1303 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
1304 (fvti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>;
1308 multiclass VPatWConvertI2FPVL_V<SDPatternOperator vop,
1309 string instruction_name> {
1310 foreach vtiToWti = AllWidenableIntToFloatVectors in {
1311 defvar ivti = vtiToWti.Vti;
1312 defvar fwti = vtiToWti.Wti;
1313 let Predicates = !listconcat(GetVTypePredicates<ivti>.Predicates,
1314 GetVTypePredicates<fwti>.Predicates) in
1315 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1),
1318 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_E"#ivti.SEW#"_MASK")
1319 (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1,
1321 GPR:$vl, ivti.Log2SEW, TA_MA)>;
1325 // Narrowing converting
1327 multiclass VPatNConvertFP2IVL_W<SDPatternOperator vop,
1328 string instruction_name> {
1329 // Reuse the same list of types used in the widening nodes, but just swap the
1330 // direction of types around so we're converting from Wti -> Vti
1331 foreach vtiToWti = AllWidenableIntToFloatVectors in {
1332 defvar vti = vtiToWti.Vti;
1333 defvar fwti = vtiToWti.Wti;
1334 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1335 GetVTypePredicates<fwti>.Predicates) in
1336 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1),
1339 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK")
1340 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
1341 (fwti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
1345 multiclass VPatNConvertFP2IVL_W_RM<SDPatternOperator vop,
1346 string instruction_name> {
1347 // Reuse the same list of types used in the widening nodes, but just swap the
1348 // direction of types around so we're converting from Wti -> Vti
1349 foreach vtiToWti = AllWidenableIntToFloatVectors in {
1350 defvar vti = vtiToWti.Vti;
1351 defvar fwti = vtiToWti.Wti;
1352 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1353 GetVTypePredicates<fwti>.Predicates) in
1354 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1),
1357 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK")
1358 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
1360 // Value to indicate no rounding mode change in
1361 // RISCVInsertReadWriteCSR
1363 GPR:$vl, vti.Log2SEW, TA_MA)>;
1367 multiclass VPatNConvertFP2I_RM_VL_W<SDNode vop, string instruction_name> {
1368 foreach vtiToWti = AllWidenableIntToFloatVectors in {
1369 defvar vti = vtiToWti.Vti;
1370 defvar fwti = vtiToWti.Wti;
1371 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1372 GetVTypePredicates<fwti>.Predicates) in
1373 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1),
1374 (fwti.Mask V0), (XLenVT timm:$frm),
1376 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK")
1377 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
1378 (fwti.Mask V0), timm:$frm, GPR:$vl, vti.Log2SEW, TA_MA)>;
1382 multiclass VPatNConvertI2FPVL_W_RM<SDPatternOperator vop,
1383 string instruction_name> {
1384 foreach fvtiToFWti = AllWidenableFloatVectors in {
1385 defvar fvti = fvtiToFWti.Vti;
1386 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
1387 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
1388 GetVTypePredicates<iwti>.Predicates) in
1389 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1),
1392 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
1393 (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1,
1395 // Value to indicate no rounding mode change in
1396 // RISCVInsertReadWriteCSR
1398 GPR:$vl, fvti.Log2SEW, TA_MA)>;
1402 multiclass VPatNConvertI2FP_RM_VL_W<SDNode vop, string instruction_name> {
1403 foreach fvtiToFWti = AllWidenableFloatVectors in {
1404 defvar fvti = fvtiToFWti.Vti;
1405 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
1406 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
1407 GetVTypePredicates<iwti>.Predicates) in
1408 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1),
1409 (iwti.Mask V0), (XLenVT timm:$frm),
1411 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
1412 (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1,
1413 (iwti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>;
1417 multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> {
1418 foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in {
1419 defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1");
1420 let Predicates = GetVTypePredicates<vti>.Predicates in {
1421 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge),
1422 (vti.Vector vti.RegClass:$rs1), VR:$rs2,
1423 (vti.Mask V0), VLOpFrag,
1424 (XLenVT timm:$policy))),
1425 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
1426 (vti_m1.Vector VR:$merge),
1427 (vti.Vector vti.RegClass:$rs1),
1428 (vti_m1.Vector VR:$rs2),
1429 (vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
1434 multiclass VPatReductionVL_RM<SDNode vop, string instruction_name, bit is_float> {
1435 foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in {
1436 defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1");
1437 let Predicates = GetVTypePredicates<vti>.Predicates in {
1438 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge),
1439 (vti.Vector vti.RegClass:$rs1), VR:$rs2,
1440 (vti.Mask V0), VLOpFrag,
1441 (XLenVT timm:$policy))),
1442 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
1443 (vti_m1.Vector VR:$merge),
1444 (vti.Vector vti.RegClass:$rs1),
1445 (vti_m1.Vector VR:$rs2),
1447 // Value to indicate no rounding mode change in
1448 // RISCVInsertReadWriteCSR
1450 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
1455 multiclass VPatBinaryVL_WV_WX_WI<SDNode op, string instruction_name> {
1456 foreach vtiToWti = AllWidenableIntVectors in {
1457 defvar vti = vtiToWti.Vti;
1458 defvar wti = vtiToWti.Wti;
1459 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1460 GetVTypePredicates<wti>.Predicates) in {
1463 (riscv_trunc_vector_vl
1464 (op (wti.Vector wti.RegClass:$rs2),
1465 (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1)))),
1466 (vti.Mask true_mask),
1468 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX)
1469 (vti.Vector (IMPLICIT_DEF)),
1470 wti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>;
1474 (riscv_trunc_vector_vl
1475 (op (wti.Vector wti.RegClass:$rs2),
1476 (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1)))),
1477 (vti.Mask true_mask),
1479 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX)
1480 (vti.Vector (IMPLICIT_DEF)),
1481 wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>;
1485 (riscv_trunc_vector_vl
1486 (op (wti.Vector wti.RegClass:$rs2),
1487 (wti.Vector (SplatPat_uimm5 uimm5:$rs1))), (vti.Mask true_mask),
1489 (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX)
1490 (vti.Vector (IMPLICIT_DEF)),
1491 wti.RegClass:$rs2, uimm5:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>;
1496 multiclass VPatWidenReductionVL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> {
1497 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in {
1498 defvar vti = vtiToWti.Vti;
1499 defvar wti = vtiToWti.Wti;
1500 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1");
1501 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1502 GetVTypePredicates<wti>.Predicates) in {
1503 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
1504 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))),
1505 VR:$rs2, (vti.Mask V0), VLOpFrag,
1506 (XLenVT timm:$policy))),
1507 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
1508 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
1509 (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW,
1510 (XLenVT timm:$policy))>;
1515 multiclass VPatWidenReductionVL_RM<SDNode vop, PatFrags extop, string instruction_name, bit is_float> {
1516 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in {
1517 defvar vti = vtiToWti.Vti;
1518 defvar wti = vtiToWti.Wti;
1519 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1");
1520 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1521 GetVTypePredicates<wti>.Predicates) in {
1522 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
1523 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))),
1524 VR:$rs2, (vti.Mask V0), VLOpFrag,
1525 (XLenVT timm:$policy))),
1526 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
1527 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
1528 (wti_m1.Vector VR:$rs2), (vti.Mask V0),
1529 // Value to indicate no rounding mode change in
1530 // RISCVInsertReadWriteCSR
1532 GPR:$vl, vti.Log2SEW,
1533 (XLenVT timm:$policy))>;
1538 multiclass VPatWidenReductionVL_Ext_VL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> {
1539 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in {
1540 defvar vti = vtiToWti.Vti;
1541 defvar wti = vtiToWti.Wti;
1542 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1");
1543 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1544 GetVTypePredicates<wti>.Predicates) in {
1545 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
1546 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)),
1547 VR:$rs2, (vti.Mask V0), VLOpFrag,
1548 (XLenVT timm:$policy))),
1549 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
1550 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
1551 (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW,
1552 (XLenVT timm:$policy))>;
1557 multiclass VPatWidenReductionVL_Ext_VL_RM<SDNode vop, PatFrags extop, string instruction_name, bit is_float> {
1558 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in {
1559 defvar vti = vtiToWti.Vti;
1560 defvar wti = vtiToWti.Wti;
1561 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1");
1562 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1563 GetVTypePredicates<wti>.Predicates) in {
1564 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
1565 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)),
1566 VR:$rs2, (vti.Mask V0), VLOpFrag,
1567 (XLenVT timm:$policy))),
1568 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
1569 (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
1570 (wti_m1.Vector VR:$rs2), (vti.Mask V0),
1571 // Value to indicate no rounding mode change in
1572 // RISCVInsertReadWriteCSR
1574 GPR:$vl, vti.Log2SEW,
1575 (XLenVT timm:$policy))>;
1580 multiclass VPatBinaryFPWVL_VV_VF<SDNode vop, string instruction_name> {
1581 foreach fvtiToFWti = AllWidenableFloatVectors in {
1582 defvar vti = fvtiToFWti.Vti;
1583 defvar wti = fvtiToFWti.Wti;
1584 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1585 GetVTypePredicates<wti>.Predicates) in {
1586 def : VPatBinaryVL_V<vop, instruction_name, "VV",
1587 wti.Vector, vti.Vector, vti.Vector, vti.Mask,
1588 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass,
1590 def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix,
1591 wti.Vector, vti.Vector, vti.Vector, vti.Mask,
1592 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass,
1593 vti.ScalarRegClass>;
1598 multiclass VPatBinaryFPWVL_VV_VF_RM<SDNode vop, string instruction_name,
1599 bit isSEWAware = 0> {
1600 foreach fvtiToFWti = AllWidenableFloatVectors in {
1601 defvar vti = fvtiToFWti.Vti;
1602 defvar wti = fvtiToFWti.Wti;
1603 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1604 GetVTypePredicates<wti>.Predicates) in {
1605 def : VPatBinaryVL_V_RM<vop, instruction_name, "VV",
1606 wti.Vector, vti.Vector, vti.Vector, vti.Mask,
1607 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass,
1608 vti.RegClass, isSEWAware>;
1609 def : VPatBinaryVL_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix,
1610 wti.Vector, vti.Vector, vti.Vector, vti.Mask,
1611 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass,
1612 vti.ScalarRegClass, isSEWAware>;
1617 multiclass VPatBinaryFPWVL_VV_VF_WV_WF<SDNode vop, SDNode vop_w, string instruction_name>
1618 : VPatBinaryFPWVL_VV_VF<vop, instruction_name> {
1619 foreach fvtiToFWti = AllWidenableFloatVectors in {
1620 defvar vti = fvtiToFWti.Vti;
1621 defvar wti = fvtiToFWti.Wti;
1622 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1623 GetVTypePredicates<wti>.Predicates) in {
1624 defm : VPatTiedBinaryNoMaskVL_V<vop_w, instruction_name, "WV",
1625 wti.Vector, vti.Vector, vti.Log2SEW,
1626 vti.LMul, wti.RegClass, vti.RegClass>;
1627 def : VPatBinaryVL_V<vop_w, instruction_name, "WV",
1628 wti.Vector, wti.Vector, vti.Vector, vti.Mask,
1629 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass,
1631 def : VPatBinaryVL_VF<vop_w, instruction_name#"_W"#vti.ScalarSuffix,
1632 wti.Vector, wti.Vector, vti.Vector, vti.Mask,
1633 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass,
1634 vti.ScalarRegClass>;
1639 multiclass VPatBinaryFPWVL_VV_VF_WV_WF_RM<
1640 SDNode vop, SDNode vop_w, string instruction_name, bit isSEWAware = 0>
1641 : VPatBinaryFPWVL_VV_VF_RM<vop, instruction_name, isSEWAware> {
1642 foreach fvtiToFWti = AllWidenableFloatVectors in {
1643 defvar vti = fvtiToFWti.Vti;
1644 defvar wti = fvtiToFWti.Wti;
1645 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1646 GetVTypePredicates<wti>.Predicates) in {
1647 defm : VPatTiedBinaryNoMaskVL_V_RM<vop_w, instruction_name, "WV",
1648 wti.Vector, vti.Vector, vti.Log2SEW,
1649 vti.LMul, wti.RegClass, vti.RegClass,
1651 def : VPatBinaryVL_V_RM<vop_w, instruction_name, "WV",
1652 wti.Vector, wti.Vector, vti.Vector, vti.Mask,
1653 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass,
1654 vti.RegClass, isSEWAware>;
1655 def : VPatBinaryVL_VF_RM<vop_w, instruction_name#"_W"#vti.ScalarSuffix,
1656 wti.Vector, wti.Vector, vti.Vector, vti.Mask,
1657 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass,
1658 vti.ScalarRegClass, isSEWAware>;
1663 multiclass VPatNarrowShiftSplatExt_WX<SDNode op, PatFrags extop, string instruction_name> {
1664 foreach vtiToWti = AllWidenableIntVectors in {
1665 defvar vti = vtiToWti.Vti;
1666 defvar wti = vtiToWti.Wti;
1667 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1668 GetVTypePredicates<wti>.Predicates) in
1671 (riscv_trunc_vector_vl
1672 (op (wti.Vector wti.RegClass:$rs2),
1673 (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1))),
1674 (vti.Mask true_mask), VLOpFrag)),
1675 srcvalue, (wti.Mask true_mask), VLOpFrag),
1676 (vti.Mask true_mask), VLOpFrag)),
1677 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX)
1678 (vti.Vector (IMPLICIT_DEF)),
1679 wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>;
1683 multiclass VPatNarrowShiftExtVL_WV<SDNode op, PatFrags extop, string instruction_name> {
1684 foreach vtiToWti = AllWidenableIntVectors in {
1685 defvar vti = vtiToWti.Vti;
1686 defvar wti = vtiToWti.Wti;
1687 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1688 GetVTypePredicates<wti>.Predicates) in
1691 (riscv_trunc_vector_vl
1692 (op (wti.Vector wti.RegClass:$rs2),
1693 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1),
1694 (vti.Mask true_mask), VLOpFrag)),
1695 srcvalue, (vti.Mask true_mask), VLOpFrag),
1696 (vti.Mask V0), VLOpFrag)),
1697 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_MASK")
1698 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, vti.RegClass:$rs1,
1699 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
1703 multiclass VPatNarrowShiftVL_WV<SDNode op, string instruction_name> {
1704 defm : VPatNarrowShiftExtVL_WV<op, riscv_sext_vl_oneuse, instruction_name>;
1705 defm : VPatNarrowShiftExtVL_WV<op, riscv_zext_vl_oneuse, instruction_name>;
1708 multiclass VPatMultiplyAddVL_VV_VX<SDNode op, string instruction_name> {
1709 foreach vti = AllIntegerVectors in {
1710 defvar suffix = vti.LMul.MX;
1711 let Predicates = GetVTypePredicates<vti>.Predicates in {
1712 // NOTE: We choose VMADD because it has the most commuting freedom. So it
1713 // works best with how TwoAddressInstructionPass tries commuting.
1714 def : Pat<(vti.Vector
1715 (op vti.RegClass:$rs2,
1716 (riscv_mul_vl_oneuse vti.RegClass:$rs1,
1718 srcvalue, (vti.Mask true_mask), VLOpFrag),
1719 srcvalue, (vti.Mask true_mask), VLOpFrag)),
1720 (!cast<Instruction>(instruction_name#"_VV_"# suffix)
1721 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
1722 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
1723 // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally
1725 def : Pat<(vti.Vector
1726 (op vti.RegClass:$rs2,
1727 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1),
1729 srcvalue, (vti.Mask true_mask), VLOpFrag),
1730 srcvalue, (vti.Mask true_mask), VLOpFrag)),
1731 (!cast<Instruction>(instruction_name#"_VX_" # suffix)
1732 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
1733 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
1738 multiclass VPatMultiplyAccVL_VV_VX<PatFrag op, string instruction_name> {
1739 foreach vti = AllIntegerVectors in {
1740 defvar suffix = vti.LMul.MX;
1741 let Predicates = GetVTypePredicates<vti>.Predicates in {
1742 def : Pat<(riscv_vmerge_vl (vti.Mask V0),
1743 (vti.Vector (op vti.RegClass:$rd,
1744 (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2,
1745 srcvalue, (vti.Mask true_mask), VLOpFrag),
1746 srcvalue, (vti.Mask true_mask), VLOpFrag)),
1747 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag),
1748 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
1749 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
1750 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>;
1751 def : Pat<(riscv_vmerge_vl (vti.Mask V0),
1752 (vti.Vector (op vti.RegClass:$rd,
1753 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2,
1754 srcvalue, (vti.Mask true_mask), VLOpFrag),
1755 srcvalue, (vti.Mask true_mask), VLOpFrag)),
1756 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag),
1757 (!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK")
1758 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
1759 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>;
1760 def : Pat<(riscv_vmerge_vl (vti.Mask V0),
1761 (vti.Vector (op vti.RegClass:$rd,
1762 (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2,
1763 srcvalue, (vti.Mask true_mask), VLOpFrag),
1764 srcvalue, (vti.Mask true_mask), VLOpFrag)),
1765 vti.RegClass:$rd, undef, VLOpFrag),
1766 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
1767 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
1768 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
1769 def : Pat<(riscv_vmerge_vl (vti.Mask V0),
1770 (vti.Vector (op vti.RegClass:$rd,
1771 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2,
1772 srcvalue, (vti.Mask true_mask), VLOpFrag),
1773 srcvalue, (vti.Mask true_mask), VLOpFrag)),
1774 vti.RegClass:$rd, undef, VLOpFrag),
1775 (!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK")
1776 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
1777 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
1782 multiclass VPatWidenMultiplyAddVL_VV_VX<SDNode vwmacc_op, string instr_name> {
1783 foreach vtiTowti = AllWidenableIntVectors in {
1784 defvar vti = vtiTowti.Vti;
1785 defvar wti = vtiTowti.Wti;
1786 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1787 GetVTypePredicates<wti>.Predicates) in {
1788 def : Pat<(vwmacc_op (vti.Vector vti.RegClass:$rs1),
1789 (vti.Vector vti.RegClass:$rs2),
1790 (wti.Vector wti.RegClass:$rd),
1791 (vti.Mask V0), VLOpFrag),
1792 (!cast<Instruction>(instr_name#"_VV_"#vti.LMul.MX#"_MASK")
1793 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
1794 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
1795 def : Pat<(vwmacc_op (SplatPat XLenVT:$rs1),
1796 (vti.Vector vti.RegClass:$rs2),
1797 (wti.Vector wti.RegClass:$rd),
1798 (vti.Mask V0), VLOpFrag),
1799 (!cast<Instruction>(instr_name#"_VX_"#vti.LMul.MX#"_MASK")
1800 wti.RegClass:$rd, vti.ScalarRegClass:$rs1,
1801 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
1807 multiclass VPatNarrowShiftSplat_WX_WI<SDNode op, string instruction_name> {
1808 foreach vtiTowti = AllWidenableIntVectors in {
1809 defvar vti = vtiTowti.Vti;
1810 defvar wti = vtiTowti.Wti;
1811 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1812 GetVTypePredicates<wti>.Predicates) in {
1813 def : Pat<(vti.Vector (riscv_trunc_vector_vl
1814 (wti.Vector (op wti.RegClass:$rs1, (SplatPat XLenVT:$rs2),
1815 srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)),
1816 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX)
1817 (vti.Vector (IMPLICIT_DEF)),
1818 wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>;
1819 def : Pat<(vti.Vector (riscv_trunc_vector_vl
1820 (wti.Vector (op wti.RegClass:$rs1, (SplatPat_uimm5 uimm5:$rs2),
1821 srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)),
1822 (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX)
1823 (vti.Vector (IMPLICIT_DEF)),
1824 wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>;
1829 multiclass VPatFPMulAddVL_VV_VF<SDPatternOperator vop, string instruction_name> {
1830 foreach vti = AllFloatVectors in {
1831 defvar suffix = vti.LMul.MX;
1832 let Predicates = GetVTypePredicates<vti>.Predicates in {
1833 def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd,
1834 vti.RegClass:$rs2, (vti.Mask V0),
1836 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
1837 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
1838 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
1840 def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1),
1841 vti.RegClass:$rd, vti.RegClass:$rs2,
1844 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK")
1845 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
1846 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
1851 multiclass VPatFPMulAddVL_VV_VF_RM<SDPatternOperator vop, string instruction_name> {
1852 foreach vti = AllFloatVectors in {
1853 defvar suffix = vti.LMul.MX # "_E" # vti.SEW;
1854 let Predicates = GetVTypePredicates<vti>.Predicates in {
1855 def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd,
1856 vti.RegClass:$rs2, (vti.Mask V0),
1858 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
1859 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
1861 // Value to indicate no rounding mode change in
1862 // RISCVInsertReadWriteCSR
1864 GPR:$vl, vti.Log2SEW, TA_MA)>;
1866 def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1),
1867 vti.RegClass:$rd, vti.RegClass:$rs2,
1870 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK")
1871 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
1873 // Value to indicate no rounding mode change in
1874 // RISCVInsertReadWriteCSR
1876 GPR:$vl, vti.Log2SEW, TA_MA)>;
1881 multiclass VPatFPMulAccVL_VV_VF<PatFrag vop, string instruction_name> {
1882 foreach vti = AllFloatVectors in {
1883 defvar suffix = vti.LMul.MX;
1884 let Predicates = GetVTypePredicates<vti>.Predicates in {
1885 def : Pat<(riscv_vmerge_vl (vti.Mask V0),
1886 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2,
1887 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
1888 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag),
1889 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
1890 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
1891 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>;
1892 def : Pat<(riscv_vmerge_vl (vti.Mask V0),
1893 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2,
1894 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
1895 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag),
1896 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK")
1897 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
1898 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>;
1899 def : Pat<(riscv_vmerge_vl (vti.Mask V0),
1900 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2,
1901 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
1902 vti.RegClass:$rd, undef, VLOpFrag),
1903 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
1904 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
1905 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
1906 def : Pat<(riscv_vmerge_vl (vti.Mask V0),
1907 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2,
1908 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
1909 vti.RegClass:$rd, undef, VLOpFrag),
1910 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK")
1911 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
1912 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
1917 multiclass VPatFPMulAccVL_VV_VF_RM<PatFrag vop, string instruction_name> {
1918 foreach vti = AllFloatVectors in {
1919 defvar suffix = vti.LMul.MX # "_E" # vti.SEW;
1920 let Predicates = GetVTypePredicates<vti>.Predicates in {
1921 def : Pat<(riscv_vmerge_vl (vti.Mask V0),
1922 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2,
1923 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
1924 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag),
1925 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
1926 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
1928 // Value to indicate no rounding mode change in
1929 // RISCVInsertReadWriteCSR
1931 GPR:$vl, vti.Log2SEW, TU_MU)>;
1932 def : Pat<(riscv_vmerge_vl (vti.Mask V0),
1933 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2,
1934 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
1935 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag),
1936 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK")
1937 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
1939 // Value to indicate no rounding mode change in
1940 // RISCVInsertReadWriteCSR
1942 GPR:$vl, vti.Log2SEW, TU_MU)>;
1943 def : Pat<(riscv_vmerge_vl (vti.Mask V0),
1944 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2,
1945 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
1946 vti.RegClass:$rd, undef, VLOpFrag),
1947 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
1948 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
1950 // Value to indicate no rounding mode change in
1951 // RISCVInsertReadWriteCSR
1953 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
1954 def : Pat<(riscv_vmerge_vl (vti.Mask V0),
1955 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2,
1956 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
1957 vti.RegClass:$rd, undef, VLOpFrag),
1958 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK")
1959 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
1961 // Value to indicate no rounding mode change in
1962 // RISCVInsertReadWriteCSR
1964 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
1969 multiclass VPatWidenFPMulAccVL_VV_VF<SDNode vop, string instruction_name> {
1970 foreach vtiToWti = AllWidenableFloatVectors in {
1971 defvar vti = vtiToWti.Vti;
1972 defvar wti = vtiToWti.Wti;
1973 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1974 GetVTypePredicates<wti>.Predicates) in {
1975 def : Pat<(vop (vti.Vector vti.RegClass:$rs1),
1976 (vti.Vector vti.RegClass:$rs2),
1977 (wti.Vector wti.RegClass:$rd), (vti.Mask V0),
1979 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX #"_MASK")
1980 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
1981 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
1982 def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)),
1983 (vti.Vector vti.RegClass:$rs2),
1984 (wti.Vector wti.RegClass:$rd), (vti.Mask V0),
1986 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX #"_MASK")
1987 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
1988 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
1993 multiclass VPatWidenFPMulAccVL_VV_VF_RM<SDNode vop, string instruction_name> {
1994 foreach vtiToWti = AllWidenableFloatVectors in {
1995 defvar vti = vtiToWti.Vti;
1996 defvar wti = vtiToWti.Wti;
1997 defvar suffix = vti.LMul.MX # "_E" # vti.SEW;
1998 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
1999 GetVTypePredicates<wti>.Predicates) in {
2000 def : Pat<(vop (vti.Vector vti.RegClass:$rs1),
2001 (vti.Vector vti.RegClass:$rs2),
2002 (wti.Vector wti.RegClass:$rd), (vti.Mask V0),
2004 (!cast<Instruction>(instruction_name#"_VV_"#suffix#"_MASK")
2005 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
2007 // Value to indicate no rounding mode change in
2008 // RISCVInsertReadWriteCSR
2010 GPR:$vl, vti.Log2SEW, TA_MA)>;
2011 def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)),
2012 (vti.Vector vti.RegClass:$rs2),
2013 (wti.Vector wti.RegClass:$rd), (vti.Mask V0),
2015 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#suffix#"_MASK")
2016 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
2018 // Value to indicate no rounding mode change in
2019 // RISCVInsertReadWriteCSR
2021 GPR:$vl, vti.Log2SEW, TA_MA)>;
2026 multiclass VPatSlideVL_VX_VI<SDNode vop, string instruction_name> {
2027 foreach vti = AllVectors in {
2028 let Predicates = GetVTypePredicates<vti>.Predicates in {
2029 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rd),
2030 (vti.Vector vti.RegClass:$rs1),
2031 uimm5:$rs2, (vti.Mask V0),
2032 VLOpFrag, (XLenVT timm:$policy))),
2033 (!cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK")
2034 vti.RegClass:$rd, vti.RegClass:$rs1, uimm5:$rs2,
2035 (vti.Mask V0), GPR:$vl, vti.Log2SEW,
2036 (XLenVT timm:$policy))>;
2038 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rd),
2039 (vti.Vector vti.RegClass:$rs1),
2040 GPR:$rs2, (vti.Mask V0),
2041 VLOpFrag, (XLenVT timm:$policy))),
2042 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK")
2043 vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2,
2044 (vti.Mask V0), GPR:$vl, vti.Log2SEW,
2045 (XLenVT timm:$policy))>;
2050 multiclass VPatSlide1VL_VX<SDNode vop, string instruction_name> {
2051 foreach vti = AllIntegerVectors in {
2052 let Predicates = GetVTypePredicates<vti>.Predicates in {
2053 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rs3),
2054 (vti.Vector vti.RegClass:$rs1),
2055 GPR:$rs2, (vti.Mask V0), VLOpFrag)),
2056 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK")
2057 vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2,
2058 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>;
2063 multiclass VPatSlide1VL_VF<SDNode vop, string instruction_name> {
2064 foreach vti = AllFloatVectors in {
2065 let Predicates = GetVTypePredicates<vti>.Predicates in {
2066 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rs3),
2067 (vti.Vector vti.RegClass:$rs1),
2068 vti.Scalar:$rs2, (vti.Mask V0), VLOpFrag)),
2069 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_MASK")
2070 vti.RegClass:$rs3, vti.RegClass:$rs1, vti.Scalar:$rs2,
2071 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>;
2076 multiclass VPatAVGADDVL_VV_VX_RM<SDNode vop, int vxrm> {
2077 foreach vti = AllIntegerVectors in {
2078 let Predicates = GetVTypePredicates<vti>.Predicates in {
2079 def : Pat<(vop (vti.Vector vti.RegClass:$rs1),
2080 (vti.Vector vti.RegClass:$rs2),
2081 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
2082 (!cast<Instruction>("PseudoVAADDU_VV_"#vti.LMul.MX#"_MASK")
2083 vti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs2,
2084 (vti.Mask V0), vxrm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
2085 def : Pat<(vop (vti.Vector vti.RegClass:$rs1),
2086 (vti.Vector (SplatPat (XLenVT GPR:$rs2))),
2087 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
2088 (!cast<Instruction>("PseudoVAADDU_VX_"#vti.LMul.MX#"_MASK")
2089 vti.RegClass:$merge, vti.RegClass:$rs1, GPR:$rs2,
2090 (vti.Mask V0), vxrm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
2095 //===----------------------------------------------------------------------===//
2097 //===----------------------------------------------------------------------===//
2099 // 11. Vector Integer Arithmetic Instructions
2101 // 11.1. Vector Single-Width Integer Add and Subtract
2102 defm : VPatBinaryVL_VV_VX_VI<riscv_add_vl, "PseudoVADD">;
2103 defm : VPatBinaryVL_VV_VX<riscv_sub_vl, "PseudoVSUB">;
2104 // Handle VRSUB specially since it's the only integer binary op with reversed
2106 foreach vti = AllIntegerVectors in {
2107 let Predicates = GetVTypePredicates<vti>.Predicates in {
2108 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))),
2109 (vti.Vector vti.RegClass:$rs1),
2110 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
2111 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK")
2112 vti.RegClass:$merge, vti.RegClass:$rs1, GPR:$rs2,
2113 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
2114 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)),
2115 (vti.Vector vti.RegClass:$rs1),
2116 vti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
2117 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK")
2118 vti.RegClass:$merge, vti.RegClass:$rs1, simm5:$rs2,
2119 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
2123 // 11.2. Vector Widening Integer Add/Subtract
2124 defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwadd_vl, riscv_vwadd_w_vl, "PseudoVWADD">;
2125 defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwaddu_vl, riscv_vwaddu_w_vl, "PseudoVWADDU">;
2126 defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsub_vl, riscv_vwsub_w_vl, "PseudoVWSUB">;
2127 defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsubu_vl, riscv_vwsubu_w_vl, "PseudoVWSUBU">;
2129 // shl_vl (ext_vl v, splat 1) is a special case of widening add.
2130 foreach vtiToWti = AllWidenableIntVectors in {
2131 defvar vti = vtiToWti.Vti;
2132 defvar wti = vtiToWti.Wti;
2133 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
2134 GetVTypePredicates<wti>.Predicates) in {
2135 def : Pat<(riscv_shl_vl (wti.Vector (riscv_sext_vl_oneuse
2136 (vti.Vector vti.RegClass:$rs1),
2137 (vti.Mask V0), VLOpFrag)),
2138 (wti.Vector (riscv_vmv_v_x_vl
2139 (wti.Vector undef), 1, VLOpFrag)),
2140 wti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
2141 (!cast<Instruction>("PseudoVWADD_VV_"#vti.LMul.MX#"_MASK")
2142 wti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs1,
2143 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
2144 def : Pat<(riscv_shl_vl (wti.Vector (riscv_zext_vl_oneuse
2145 (vti.Vector vti.RegClass:$rs1),
2146 (vti.Mask V0), VLOpFrag)),
2147 (wti.Vector (riscv_vmv_v_x_vl
2148 (wti.Vector undef), 1, VLOpFrag)),
2149 wti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
2150 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX#"_MASK")
2151 wti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs1,
2152 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
2156 // 11.3. Vector Integer Extension
2157 defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF2",
2158 AllFractionableVF2IntVectors>;
2159 defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF2",
2160 AllFractionableVF2IntVectors>;
2161 defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF4",
2162 AllFractionableVF4IntVectors>;
2163 defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF4",
2164 AllFractionableVF4IntVectors>;
2165 defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF8",
2166 AllFractionableVF8IntVectors>;
2167 defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF8",
2168 AllFractionableVF8IntVectors>;
2170 // 11.5. Vector Bitwise Logical Instructions
2171 defm : VPatBinaryVL_VV_VX_VI<riscv_and_vl, "PseudoVAND">;
2172 defm : VPatBinaryVL_VV_VX_VI<riscv_or_vl, "PseudoVOR">;
2173 defm : VPatBinaryVL_VV_VX_VI<riscv_xor_vl, "PseudoVXOR">;
2175 // 11.6. Vector Single-Width Bit Shift Instructions
2176 defm : VPatBinaryVL_VV_VX_VI<riscv_shl_vl, "PseudoVSLL", uimm5>;
2177 defm : VPatBinaryVL_VV_VX_VI<riscv_srl_vl, "PseudoVSRL", uimm5>;
2178 defm : VPatBinaryVL_VV_VX_VI<riscv_sra_vl, "PseudoVSRA", uimm5>;
2180 foreach vti = AllIntegerVectors in {
2181 // Emit shift by 1 as an add since it might be faster.
2182 let Predicates = GetVTypePredicates<vti>.Predicates in
2183 def : Pat<(riscv_shl_vl (vti.Vector vti.RegClass:$rs1),
2184 (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)),
2185 srcvalue, (vti.Mask true_mask), VLOpFrag),
2186 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX)
2187 (vti.Vector (IMPLICIT_DEF)),
2188 vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>;
2191 // 11.7. Vector Narrowing Integer Right Shift Instructions
2192 defm : VPatBinaryVL_WV_WX_WI<srl, "PseudoVNSRL">;
2193 defm : VPatBinaryVL_WV_WX_WI<sra, "PseudoVNSRA">;
2195 defm : VPatNarrowShiftSplat_WX_WI<riscv_sra_vl, "PseudoVNSRA">;
2196 defm : VPatNarrowShiftSplat_WX_WI<riscv_srl_vl, "PseudoVNSRL">;
2197 defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_sext_vl_oneuse, "PseudoVNSRA">;
2198 defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_zext_vl_oneuse, "PseudoVNSRA">;
2199 defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_sext_vl_oneuse, "PseudoVNSRL">;
2200 defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_zext_vl_oneuse, "PseudoVNSRL">;
2202 defm : VPatNarrowShiftVL_WV<riscv_srl_vl, "PseudoVNSRL">;
2203 defm : VPatNarrowShiftVL_WV<riscv_sra_vl, "PseudoVNSRA">;
2205 defm : VPatBinaryNVL_WV_WX_WI<riscv_vnsrl_vl, "PseudoVNSRL">;
2207 foreach vtiTowti = AllWidenableIntVectors in {
2208 defvar vti = vtiTowti.Vti;
2209 defvar wti = vtiTowti.Wti;
2210 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
2211 GetVTypePredicates<wti>.Predicates) in
2212 def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1),
2215 (!cast<Instruction>("PseudoVNSRL_WI_"#vti.LMul.MX#"_MASK")
2216 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0,
2217 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
2220 // 11.8. Vector Integer Comparison Instructions
2221 foreach vti = AllIntegerVectors in {
2222 let Predicates = GetVTypePredicates<vti>.Predicates in {
2223 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSEQ", SETEQ>;
2224 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSNE", SETNE>;
2226 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>;
2227 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>;
2228 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>;
2229 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>;
2231 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>;
2232 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>;
2233 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>;
2234 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>;
2235 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>;
2236 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>;
2237 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>;
2238 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>;
2239 // There is no VMSGE(U)_VX instruction
2241 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>;
2242 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>;
2243 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>;
2244 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>;
2245 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>;
2246 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>;
2248 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLE", SETLT, SETGT,
2249 SplatPat_simm5_plus1>;
2250 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLEU", SETULT, SETUGT,
2251 SplatPat_simm5_plus1_nonzero>;
2252 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGT", SETGE, SETLE,
2253 SplatPat_simm5_plus1>;
2254 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGTU", SETUGE, SETULE,
2255 SplatPat_simm5_plus1_nonzero>;
2257 } // foreach vti = AllIntegerVectors
2259 // 11.9. Vector Integer Min/Max Instructions
2260 defm : VPatBinaryVL_VV_VX<riscv_umin_vl, "PseudoVMINU">;
2261 defm : VPatBinaryVL_VV_VX<riscv_smin_vl, "PseudoVMIN">;
2262 defm : VPatBinaryVL_VV_VX<riscv_umax_vl, "PseudoVMAXU">;
2263 defm : VPatBinaryVL_VV_VX<riscv_smax_vl, "PseudoVMAX">;
2265 // 11.10. Vector Single-Width Integer Multiply Instructions
2266 defm : VPatBinaryVL_VV_VX<riscv_mul_vl, "PseudoVMUL">;
2267 defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH", IntegerVectorsExceptI64>;
2268 defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU", IntegerVectorsExceptI64>;
2269 // vsmul.vv and vsmul.vx are not included in EEW=64 in Zve64*.
2270 let Predicates = [HasVInstructionsFullMultiply] in {
2271 defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH", I64IntegerVectors>;
2272 defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU", I64IntegerVectors>;
2275 // 11.11. Vector Integer Divide Instructions
2276 defm : VPatBinaryVL_VV_VX<riscv_udiv_vl, "PseudoVDIVU", isSEWAware=1>;
2277 defm : VPatBinaryVL_VV_VX<riscv_sdiv_vl, "PseudoVDIV", isSEWAware=1>;
2278 defm : VPatBinaryVL_VV_VX<riscv_urem_vl, "PseudoVREMU", isSEWAware=1>;
2279 defm : VPatBinaryVL_VV_VX<riscv_srem_vl, "PseudoVREM", isSEWAware=1>;
2281 // 11.12. Vector Widening Integer Multiply Instructions
2282 defm : VPatBinaryWVL_VV_VX<riscv_vwmul_vl, "PseudoVWMUL">;
2283 defm : VPatBinaryWVL_VV_VX<riscv_vwmulu_vl, "PseudoVWMULU">;
2284 defm : VPatBinaryWVL_VV_VX<riscv_vwmulsu_vl, "PseudoVWMULSU">;
2286 // 11.13 Vector Single-Width Integer Multiply-Add Instructions
2287 defm : VPatMultiplyAddVL_VV_VX<riscv_add_vl, "PseudoVMADD">;
2288 defm : VPatMultiplyAddVL_VV_VX<riscv_sub_vl, "PseudoVNMSUB">;
2289 defm : VPatMultiplyAccVL_VV_VX<riscv_add_vl_oneuse, "PseudoVMACC">;
2290 defm : VPatMultiplyAccVL_VV_VX<riscv_sub_vl_oneuse, "PseudoVNMSAC">;
2292 // 11.14. Vector Widening Integer Multiply-Add Instructions
2293 defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmacc_vl, "PseudoVWMACC">;
2294 defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmaccu_vl, "PseudoVWMACCU">;
2295 defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmaccsu_vl, "PseudoVWMACCSU">;
2296 foreach vtiTowti = AllWidenableIntVectors in {
2297 defvar vti = vtiTowti.Vti;
2298 defvar wti = vtiTowti.Wti;
2299 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
2300 GetVTypePredicates<wti>.Predicates) in
2301 def : Pat<(riscv_vwmaccsu_vl (vti.Vector vti.RegClass:$rs1),
2302 (SplatPat XLenVT:$rs2),
2303 (wti.Vector wti.RegClass:$rd),
2304 (vti.Mask V0), VLOpFrag),
2305 (!cast<Instruction>("PseudoVWMACCUS_VX_"#vti.LMul.MX#"_MASK")
2306 wti.RegClass:$rd, vti.ScalarRegClass:$rs2, vti.RegClass:$rs1,
2307 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
2310 // 11.15. Vector Integer Merge Instructions
2311 foreach vti = AllIntegerVectors in {
2312 let Predicates = GetVTypePredicates<vti>.Predicates in {
2313 def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0),
2316 vti.RegClass:$merge,
2318 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX)
2319 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
2320 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
2322 def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0),
2323 (SplatPat XLenVT:$rs1),
2325 vti.RegClass:$merge,
2327 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX)
2328 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1,
2329 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
2331 def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0),
2332 (SplatPat_simm5 simm5:$rs1),
2334 vti.RegClass:$merge,
2336 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX)
2337 vti.RegClass:$merge, vti.RegClass:$rs2, simm5:$rs1,
2338 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
2342 // 11.16. Vector Integer Move Instructions
2343 foreach vti = AllVectors in {
2344 let Predicates = GetVTypePredicates<vti>.Predicates in {
2345 def : Pat<(vti.Vector (riscv_vmv_v_v_vl vti.RegClass:$passthru,
2346 vti.RegClass:$rs2, VLOpFrag)),
2347 (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX)
2348 vti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>;
2351 foreach vti = AllIntegerVectors in {
2352 def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.RegClass:$passthru, GPR:$rs2, VLOpFrag)),
2353 (!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX)
2354 vti.RegClass:$passthru, GPR:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>;
2355 defvar ImmPat = !cast<ComplexPattern>("sew"#vti.SEW#"simm5");
2356 def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.RegClass:$passthru, (ImmPat simm5:$imm5),
2358 (!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX)
2359 vti.RegClass:$passthru, simm5:$imm5, GPR:$vl, vti.Log2SEW, TU_MU)>;
2363 // 12. Vector Fixed-Point Arithmetic Instructions
2365 // 12.1. Vector Single-Width Saturating Add and Subtract
2366 defm : VPatBinaryVL_VV_VX_VI<riscv_saddsat_vl, "PseudoVSADD">;
2367 defm : VPatBinaryVL_VV_VX_VI<riscv_uaddsat_vl, "PseudoVSADDU">;
2368 defm : VPatBinaryVL_VV_VX<riscv_ssubsat_vl, "PseudoVSSUB">;
2369 defm : VPatBinaryVL_VV_VX<riscv_usubsat_vl, "PseudoVSSUBU">;
2371 // 12.2. Vector Single-Width Averaging Add and Subtract
2372 defm : VPatAVGADDVL_VV_VX_RM<riscv_avgflooru_vl, 0b10>;
2373 defm : VPatAVGADDVL_VV_VX_RM<riscv_avgceilu_vl, 0b00>;
2375 // 12.5. Vector Narrowing Fixed-Point Clip Instructions
2376 multiclass VPatTruncSatClipVL<VTypeInfo vti, VTypeInfo wti> {
2377 defvar sew = vti.SEW;
2378 defvar uminval = !sub(!shl(1, sew), 1);
2379 defvar sminval = !sub(!shl(1, !sub(sew, 1)), 1);
2380 defvar smaxval = !sub(0, !shl(1, !sub(sew, 1)));
2382 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
2383 GetVTypePredicates<wti>.Predicates) in {
2384 def : Pat<(vti.Vector (riscv_trunc_vector_vl
2385 (wti.Vector (riscv_smin_vl
2386 (wti.Vector (riscv_smax_vl
2387 (wti.Vector wti.RegClass:$rs1),
2388 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), smaxval, (XLenVT srcvalue))),
2389 (wti.Vector undef),(wti.Mask V0), VLOpFrag)),
2390 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), sminval, (XLenVT srcvalue))),
2391 (wti.Vector undef), (wti.Mask V0), VLOpFrag)),
2392 (vti.Mask V0), VLOpFrag)),
2393 (!cast<Instruction>("PseudoVNCLIP_WI_"#vti.LMul.MX#"_MASK")
2394 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0,
2395 (vti.Mask V0), 0, GPR:$vl, vti.Log2SEW, TA_MA)>;
2397 def : Pat<(vti.Vector (riscv_trunc_vector_vl
2398 (wti.Vector (riscv_smax_vl
2399 (wti.Vector (riscv_smin_vl
2400 (wti.Vector wti.RegClass:$rs1),
2401 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), sminval, (XLenVT srcvalue))),
2402 (wti.Vector undef),(wti.Mask V0), VLOpFrag)),
2403 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), smaxval, (XLenVT srcvalue))),
2404 (wti.Vector undef), (wti.Mask V0), VLOpFrag)),
2405 (vti.Mask V0), VLOpFrag)),
2406 (!cast<Instruction>("PseudoVNCLIP_WI_"#vti.LMul.MX#"_MASK")
2407 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0,
2408 (vti.Mask V0), 0, GPR:$vl, vti.Log2SEW, TA_MA)>;
2410 def : Pat<(vti.Vector (riscv_trunc_vector_vl
2411 (wti.Vector (riscv_umin_vl
2412 (wti.Vector wti.RegClass:$rs1),
2413 (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), uminval, (XLenVT srcvalue))),
2414 (wti.Vector undef), (wti.Mask V0), VLOpFrag)),
2415 (vti.Mask V0), VLOpFrag)),
2416 (!cast<Instruction>("PseudoVNCLIPU_WI_"#vti.LMul.MX#"_MASK")
2417 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0,
2418 (vti.Mask V0), 0, GPR:$vl, vti.Log2SEW, TA_MA)>;
2422 foreach vtiToWti = AllWidenableIntVectors in
2423 defm : VPatTruncSatClipVL<vtiToWti.Vti, vtiToWti.Wti>;
2425 // 13. Vector Floating-Point Instructions
2427 // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
2428 defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fadd_vl, "PseudoVFADD", isSEWAware=1>;
2429 defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fsub_vl, "PseudoVFSUB", isSEWAware=1>;
2430 defm : VPatBinaryFPVL_R_VF_RM<any_riscv_fsub_vl, "PseudoVFRSUB", isSEWAware=1>;
2432 // 13.3. Vector Widening Floating-Point Add/Subtract Instructions
2433 defm : VPatBinaryFPWVL_VV_VF_WV_WF_RM<riscv_vfwadd_vl, riscv_vfwadd_w_vl,
2434 "PseudoVFWADD", isSEWAware=1>;
2435 defm : VPatBinaryFPWVL_VV_VF_WV_WF_RM<riscv_vfwsub_vl, riscv_vfwsub_w_vl,
2436 "PseudoVFWSUB", isSEWAware=1>;
2438 // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
2439 defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fmul_vl, "PseudoVFMUL", isSEWAware=1>;
2440 defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fdiv_vl, "PseudoVFDIV", isSEWAware=1>;
2441 defm : VPatBinaryFPVL_R_VF_RM<any_riscv_fdiv_vl, "PseudoVFRDIV", isSEWAware=1>;
2443 // 13.5. Vector Widening Floating-Point Multiply Instructions
2444 defm : VPatBinaryFPWVL_VV_VF_RM<riscv_vfwmul_vl, "PseudoVFWMUL", isSEWAware=1>;
2446 // 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions.
2447 defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfmadd_vl, "PseudoVFMADD">;
2448 defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfmsub_vl, "PseudoVFMSUB">;
2449 defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfnmadd_vl, "PseudoVFNMADD">;
2450 defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfnmsub_vl, "PseudoVFNMSUB">;
2451 defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfmadd_vl_oneuse, "PseudoVFMACC">;
2452 defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfmsub_vl_oneuse, "PseudoVFMSAC">;
2453 defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfnmadd_vl_oneuse, "PseudoVFNMACC">;
2454 defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfnmsub_vl_oneuse, "PseudoVFNMSAC">;
2456 // 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
2457 defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwmadd_vl, "PseudoVFWMACC">;
2458 defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwnmadd_vl, "PseudoVFWNMACC">;
2459 defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwmsub_vl, "PseudoVFWMSAC">;
2460 defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwnmsub_vl, "PseudoVFWNMSAC">;
2462 // 13.11. Vector Floating-Point MIN/MAX Instructions
2463 defm : VPatBinaryFPVL_VV_VF<riscv_vfmin_vl, "PseudoVFMIN", isSEWAware=1>;
2464 defm : VPatBinaryFPVL_VV_VF<riscv_vfmax_vl, "PseudoVFMAX", isSEWAware=1>;
2466 // 13.13. Vector Floating-Point Compare Instructions
2467 defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETEQ,
2468 "PseudoVMFEQ", "PseudoVMFEQ">;
2469 defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETOEQ,
2470 "PseudoVMFEQ", "PseudoVMFEQ">;
2471 defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETNE,
2472 "PseudoVMFNE", "PseudoVMFNE">;
2473 defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETUNE,
2474 "PseudoVMFNE", "PseudoVMFNE">;
2475 defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETLT,
2476 "PseudoVMFLT", "PseudoVMFGT">;
2477 defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETOLT,
2478 "PseudoVMFLT", "PseudoVMFGT">;
2479 defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETLE,
2480 "PseudoVMFLE", "PseudoVMFGE">;
2481 defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETOLE,
2482 "PseudoVMFLE", "PseudoVMFGE">;
2484 foreach vti = AllFloatVectors in {
2485 let Predicates = GetVTypePredicates<vti>.Predicates in {
2486 // 13.8. Vector Floating-Point Square-Root Instruction
2487 def : Pat<(any_riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask V0),
2489 (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX # "_E" # vti.SEW # "_MASK")
2490 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2,
2492 // Value to indicate no rounding mode change in
2493 // RISCVInsertReadWriteCSR
2495 GPR:$vl, vti.Log2SEW, TA_MA)>;
2497 // 13.12. Vector Floating-Point Sign-Injection Instructions
2498 def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0),
2500 (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX #"_E"#vti.SEW#"_MASK")
2501 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs,
2502 vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
2504 // Handle fneg with VFSGNJN using the same input for both operands.
2505 def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0),
2507 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW #"_MASK")
2508 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs,
2509 vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
2512 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
2513 (vti.Vector vti.RegClass:$rs2),
2514 vti.RegClass:$merge,
2517 (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_E"#vti.SEW#"_MASK")
2518 vti.RegClass:$merge, vti.RegClass:$rs1,
2519 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
2522 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
2523 (riscv_fneg_vl vti.RegClass:$rs2,
2524 (vti.Mask true_mask),
2527 (vti.Mask true_mask),
2529 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW)
2530 (vti.Vector (IMPLICIT_DEF)),
2531 vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>;
2533 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
2534 (SplatFPOp vti.ScalarRegClass:$rs2),
2535 vti.RegClass:$merge,
2538 (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX#"_E"#vti.SEW#"_MASK")
2539 vti.RegClass:$merge, vti.RegClass:$rs1,
2540 vti.ScalarRegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
2543 // Rounding without exception to implement nearbyint.
2544 def : Pat<(any_riscv_vfround_noexcept_vl (vti.Vector vti.RegClass:$rs1),
2545 (vti.Mask V0), VLOpFrag),
2546 (!cast<Instruction>("PseudoVFROUND_NOEXCEPT_V_" # vti.LMul.MX #"_MASK")
2547 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1,
2548 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
2550 // 14.14. Vector Floating-Point Classify Instruction
2551 def : Pat<(riscv_fclass_vl (vti.Vector vti.RegClass:$rs2),
2552 (vti.Mask V0), VLOpFrag),
2553 (!cast<Instruction>("PseudoVFCLASS_V_"# vti.LMul.MX #"_MASK")
2554 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2,
2555 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
2559 foreach fvti = AllFloatVectors in {
2560 // Floating-point vselects:
2561 // 11.15. Vector Integer Merge Instructions
2562 // 13.15. Vector Floating-Point Merge Instruction
2563 defvar ivti = GetIntVTypeInfo<fvti>.Vti;
2564 let Predicates = GetVTypePredicates<ivti>.Predicates in {
2565 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0),
2568 fvti.RegClass:$merge,
2570 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
2571 fvti.RegClass:$merge, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0),
2572 GPR:$vl, fvti.Log2SEW)>;
2574 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0),
2575 (SplatFPOp (SelectFPImm (XLenVT GPR:$imm))),
2577 fvti.RegClass:$merge,
2579 (!cast<Instruction>("PseudoVMERGE_VXM_"#fvti.LMul.MX)
2580 fvti.RegClass:$merge, fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask V0),
2581 GPR:$vl, fvti.Log2SEW)>;
2584 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0),
2585 (SplatFPOp (fvti.Scalar fpimm0)),
2587 fvti.RegClass:$merge,
2589 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
2590 fvti.RegClass:$merge, fvti.RegClass:$rs2, 0, (fvti.Mask V0),
2591 GPR:$vl, fvti.Log2SEW)>;
2594 let Predicates = GetVTypePredicates<fvti>.Predicates in {
2595 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0),
2596 (SplatFPOp fvti.ScalarRegClass:$rs1),
2598 fvti.RegClass:$merge,
2600 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
2601 fvti.RegClass:$merge, fvti.RegClass:$rs2,
2602 (fvti.Scalar fvti.ScalarRegClass:$rs1),
2603 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
2607 foreach fvti = !listconcat(AllFloatVectors, AllBFloatVectors) in {
2608 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
2609 GetVTypeScalarPredicates<fvti>.Predicates) in {
2610 // 13.16. Vector Floating-Point Move Instruction
2611 // If we're splatting fpimm0, use vmv.v.x vd, x0.
2612 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
2613 fvti.Vector:$passthru, (fvti.Scalar (fpimm0)), VLOpFrag)),
2614 (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX)
2615 $passthru, 0, GPR:$vl, fvti.Log2SEW, TU_MU)>;
2616 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
2617 fvti.Vector:$passthru, (fvti.Scalar (SelectFPImm (XLenVT GPR:$imm))), VLOpFrag)),
2618 (!cast<Instruction>("PseudoVMV_V_X_"#fvti.LMul.MX)
2619 $passthru, GPR:$imm, GPR:$vl, fvti.Log2SEW, TU_MU)>;
2621 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
2622 fvti.Vector:$passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)),
2623 (!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" #
2625 $passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2),
2626 GPR:$vl, fvti.Log2SEW, TU_MU)>;
2630 // 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions
2631 defm : VPatConvertFP2IVL_V_RM<riscv_vfcvt_xu_f_vl, "PseudoVFCVT_XU_F_V">;
2632 defm : VPatConvertFP2IVL_V_RM<riscv_vfcvt_x_f_vl, "PseudoVFCVT_X_F_V">;
2633 defm : VPatConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFCVT_RM_XU_F_V">;
2634 defm : VPatConvertFP2I_RM_VL_V<any_riscv_vfcvt_rm_x_f_vl, "PseudoVFCVT_RM_X_F_V">;
2636 defm : VPatConvertFP2IVL_V<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFCVT_RTZ_XU_F_V">;
2637 defm : VPatConvertFP2IVL_V<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFCVT_RTZ_X_F_V">;
2639 defm : VPatConvertI2FPVL_V_RM<any_riscv_uint_to_fp_vl, "PseudoVFCVT_F_XU_V">;
2640 defm : VPatConvertI2FPVL_V_RM<any_riscv_sint_to_fp_vl, "PseudoVFCVT_F_X_V">;
2642 defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_xu_vl, "PseudoVFCVT_RM_F_XU_V">;
2643 defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_x_vl, "PseudoVFCVT_RM_F_X_V">;
2645 // 13.18. Widening Floating-Point/Integer Type-Convert Instructions
2646 defm : VPatWConvertFP2IVL_V_RM<riscv_vfcvt_xu_f_vl, "PseudoVFWCVT_XU_F_V">;
2647 defm : VPatWConvertFP2IVL_V_RM<riscv_vfcvt_x_f_vl, "PseudoVFWCVT_X_F_V">;
2648 defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFWCVT_RM_XU_F_V">;
2649 defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_x_f_vl, "PseudoVFWCVT_RM_X_F_V">;
2651 defm : VPatWConvertFP2IVL_V<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFWCVT_RTZ_XU_F_V">;
2652 defm : VPatWConvertFP2IVL_V<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFWCVT_RTZ_X_F_V">;
2654 defm : VPatWConvertI2FPVL_V<any_riscv_uint_to_fp_vl, "PseudoVFWCVT_F_XU_V">;
2655 defm : VPatWConvertI2FPVL_V<any_riscv_sint_to_fp_vl, "PseudoVFWCVT_F_X_V">;
2657 foreach fvtiToFWti = AllWidenableFloatVectors in {
2658 defvar fvti = fvtiToFWti.Vti;
2659 defvar fwti = fvtiToFWti.Wti;
2660 let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal],
2661 !listconcat(GetVTypePredicates<fvti>.Predicates,
2662 GetVTypePredicates<fwti>.Predicates)) in
2663 def : Pat<(fwti.Vector (any_riscv_fpextend_vl
2664 (fvti.Vector fvti.RegClass:$rs1),
2667 (!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
2668 (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
2670 GPR:$vl, fvti.Log2SEW, TA_MA)>;
2673 foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in {
2674 defvar fvti = fvtiToFWti.Vti;
2675 defvar fwti = fvtiToFWti.Wti;
2676 let Predicates = [HasVInstructionsBF16] in
2677 def : Pat<(fwti.Vector (any_riscv_fpextend_vl
2678 (fvti.Vector fvti.RegClass:$rs1),
2681 (!cast<Instruction>("PseudoVFWCVTBF16_F_F_V_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
2682 (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
2684 GPR:$vl, fvti.Log2SEW, TA_MA)>;
2687 // 13.19 Narrowing Floating-Point/Integer Type-Convert Instructions
2688 defm : VPatNConvertFP2IVL_W_RM<riscv_vfcvt_xu_f_vl, "PseudoVFNCVT_XU_F_W">;
2689 defm : VPatNConvertFP2IVL_W_RM<riscv_vfcvt_x_f_vl, "PseudoVFNCVT_X_F_W">;
2690 defm : VPatNConvertFP2I_RM_VL_W<riscv_vfcvt_rm_xu_f_vl, "PseudoVFNCVT_RM_XU_F_W">;
2691 defm : VPatNConvertFP2I_RM_VL_W<riscv_vfcvt_rm_x_f_vl, "PseudoVFNCVT_RM_X_F_W">;
2693 defm : VPatNConvertFP2IVL_W<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFNCVT_RTZ_XU_F_W">;
2694 defm : VPatNConvertFP2IVL_W<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFNCVT_RTZ_X_F_W">;
2696 defm : VPatNConvertI2FPVL_W_RM<any_riscv_uint_to_fp_vl, "PseudoVFNCVT_F_XU_W">;
2697 defm : VPatNConvertI2FPVL_W_RM<any_riscv_sint_to_fp_vl, "PseudoVFNCVT_F_X_W">;
2699 defm : VPatNConvertI2FP_RM_VL_W<riscv_vfcvt_rm_f_xu_vl, "PseudoVFNCVT_RM_F_XU_W">;
2700 defm : VPatNConvertI2FP_RM_VL_W<riscv_vfcvt_rm_f_x_vl, "PseudoVFNCVT_RM_F_X_W">;
2702 foreach fvtiToFWti = AllWidenableFloatVectors in {
2703 defvar fvti = fvtiToFWti.Vti;
2704 defvar fwti = fvtiToFWti.Wti;
2705 // Define vfwcvt.f.f.v for f16 when Zvfhmin is enable.
2706 let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal],
2707 !listconcat(GetVTypePredicates<fvti>.Predicates,
2708 GetVTypePredicates<fwti>.Predicates)) in {
2709 def : Pat<(fvti.Vector (any_riscv_fpround_vl
2710 (fwti.Vector fwti.RegClass:$rs1),
2711 (fwti.Mask V0), VLOpFrag)),
2712 (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
2713 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
2715 // Value to indicate no rounding mode change in
2716 // RISCVInsertReadWriteCSR
2718 GPR:$vl, fvti.Log2SEW, TA_MA)>;
2720 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
2721 GetVTypePredicates<fwti>.Predicates) in
2722 def : Pat<(fvti.Vector (any_riscv_fncvt_rod_vl
2723 (fwti.Vector fwti.RegClass:$rs1),
2724 (fwti.Mask V0), VLOpFrag)),
2725 (!cast<Instruction>("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
2726 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
2727 (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>;
2731 foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in {
2732 defvar fvti = fvtiToFWti.Vti;
2733 defvar fwti = fvtiToFWti.Wti;
2734 let Predicates = [HasVInstructionsBF16] in
2735 def : Pat<(fvti.Vector (any_riscv_fpround_vl
2736 (fwti.Vector fwti.RegClass:$rs1),
2737 (fwti.Mask V0), VLOpFrag)),
2738 (!cast<Instruction>("PseudoVFNCVTBF16_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK")
2739 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
2741 // Value to indicate no rounding mode change in
2742 // RISCVInsertReadWriteCSR
2744 GPR:$vl, fvti.Log2SEW, TA_MA)>;
2747 // 14. Vector Reduction Operations
2749 // 14.1. Vector Single-Width Integer Reduction Instructions
2750 defm : VPatReductionVL<rvv_vecreduce_ADD_vl, "PseudoVREDSUM", is_float=0>;
2751 defm : VPatReductionVL<rvv_vecreduce_UMAX_vl, "PseudoVREDMAXU", is_float=0>;
2752 defm : VPatReductionVL<rvv_vecreduce_SMAX_vl, "PseudoVREDMAX", is_float=0>;
2753 defm : VPatReductionVL<rvv_vecreduce_UMIN_vl, "PseudoVREDMINU", is_float=0>;
2754 defm : VPatReductionVL<rvv_vecreduce_SMIN_vl, "PseudoVREDMIN", is_float=0>;
2755 defm : VPatReductionVL<rvv_vecreduce_AND_vl, "PseudoVREDAND", is_float=0>;
2756 defm : VPatReductionVL<rvv_vecreduce_OR_vl, "PseudoVREDOR", is_float=0>;
2757 defm : VPatReductionVL<rvv_vecreduce_XOR_vl, "PseudoVREDXOR", is_float=0>;
2759 // 14.2. Vector Widening Integer Reduction Instructions
2760 defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, anyext_oneuse, "PseudoVWREDSUMU", is_float=0>;
2761 defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, zext_oneuse, "PseudoVWREDSUMU", is_float=0>;
2762 defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_zext_vl_oneuse, "PseudoVWREDSUMU", is_float=0>;
2763 defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, sext_oneuse, "PseudoVWREDSUM", is_float=0>;
2764 defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_sext_vl_oneuse, "PseudoVWREDSUM", is_float=0>;
2766 // 14.3. Vector Single-Width Floating-Point Reduction Instructions
2767 defm : VPatReductionVL_RM<rvv_vecreduce_SEQ_FADD_vl, "PseudoVFREDOSUM", is_float=1>;
2768 defm : VPatReductionVL_RM<rvv_vecreduce_FADD_vl, "PseudoVFREDUSUM", is_float=1>;
2769 defm : VPatReductionVL<rvv_vecreduce_FMIN_vl, "PseudoVFREDMIN", is_float=1>;
2770 defm : VPatReductionVL<rvv_vecreduce_FMAX_vl, "PseudoVFREDMAX", is_float=1>;
2772 // 14.4. Vector Widening Floating-Point Reduction Instructions
2773 defm : VPatWidenReductionVL_RM<rvv_vecreduce_SEQ_FADD_vl, fpext_oneuse,
2774 "PseudoVFWREDOSUM", is_float=1>;
2775 defm : VPatWidenReductionVL_Ext_VL_RM<rvv_vecreduce_SEQ_FADD_vl,
2776 riscv_fpextend_vl_oneuse,
2777 "PseudoVFWREDOSUM", is_float=1>;
2778 defm : VPatWidenReductionVL_RM<rvv_vecreduce_FADD_vl, fpext_oneuse,
2779 "PseudoVFWREDUSUM", is_float=1>;
2780 defm : VPatWidenReductionVL_Ext_VL_RM<rvv_vecreduce_FADD_vl,
2781 riscv_fpextend_vl_oneuse,
2782 "PseudoVFWREDUSUM", is_float=1>;
2784 // 15. Vector Mask Instructions
2786 foreach mti = AllMasks in {
2787 let Predicates = [HasVInstructions] in {
2788 // 15.1 Vector Mask-Register Logical Instructions
2789 def : Pat<(mti.Mask (riscv_vmset_vl VLOpFrag)),
2790 (!cast<Instruction>("PseudoVMSET_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>;
2791 def : Pat<(mti.Mask (riscv_vmclr_vl VLOpFrag)),
2792 (!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>;
2794 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)),
2795 (!cast<Instruction>("PseudoVMAND_MM_" # mti.LMul.MX)
2796 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
2797 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)),
2798 (!cast<Instruction>("PseudoVMOR_MM_" # mti.LMul.MX)
2799 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
2800 def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)),
2801 (!cast<Instruction>("PseudoVMXOR_MM_" # mti.LMul.MX)
2802 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
2804 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1,
2805 (riscv_vmnot_vl VR:$rs2, VLOpFrag),
2807 (!cast<Instruction>("PseudoVMANDN_MM_" # mti.LMul.MX)
2808 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
2809 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1,
2810 (riscv_vmnot_vl VR:$rs2, VLOpFrag),
2812 (!cast<Instruction>("PseudoVMORN_MM_" # mti.LMul.MX)
2813 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
2814 // XOR is associative so we need 2 patterns for VMXNOR.
2815 def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1,
2817 VR:$rs2, VLOpFrag)),
2818 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX)
2819 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
2821 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2,
2824 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX)
2825 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
2826 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2,
2829 (!cast<Instruction>("PseudoVMNOR_MM_" # mti.LMul.MX)
2830 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
2831 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2,
2834 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX)
2835 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
2837 // Match the not idiom to the vmnot.m pseudo.
2838 def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)),
2839 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX)
2840 VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>;
2842 // 15.2 Vector count population in mask vcpop.m
2843 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask true_mask),
2845 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX)
2846 VR:$rs2, GPR:$vl, mti.Log2SEW)>;
2847 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask V0),
2849 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX # "_MASK")
2850 VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
2852 // 15.3 vfirst find-first-set mask bit
2853 def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask true_mask),
2855 (!cast<Instruction>("PseudoVFIRST_M_" # mti.BX)
2856 VR:$rs2, GPR:$vl, mti.Log2SEW)>;
2857 def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask V0),
2859 (!cast<Instruction>("PseudoVFIRST_M_" # mti.BX # "_MASK")
2860 VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
2864 // 16. Vector Permutation Instructions
2866 // 16.1. Integer Scalar Move Instructions
2867 foreach vti = NoGroupIntegerVectors in {
2868 let Predicates = GetVTypePredicates<vti>.Predicates in {
2869 def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$merge),
2870 vti.ScalarRegClass:$rs1,
2872 (PseudoVMV_S_X $merge, vti.ScalarRegClass:$rs1, GPR:$vl,
2877 // 16.4. Vector Register Gather Instruction
2878 foreach vti = AllIntegerVectors in {
2879 let Predicates = GetVTypePredicates<vti>.Predicates in {
2880 def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2,
2882 vti.RegClass:$merge,
2885 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK")
2886 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
2887 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
2888 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1,
2889 vti.RegClass:$merge,
2892 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK")
2893 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1,
2894 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
2895 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2,
2897 vti.RegClass:$merge,
2900 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK")
2901 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm,
2902 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
2905 // emul = lmul * 16 / sew
2906 defvar vlmul = vti.LMul;
2907 defvar octuple_lmul = vlmul.octuple;
2908 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW);
2909 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
2910 defvar emul_str = octuple_to_str<octuple_emul>.ret;
2911 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str);
2912 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str;
2913 let Predicates = GetVTypePredicates<vti>.Predicates in
2914 def : Pat<(vti.Vector
2915 (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2,
2916 (ivti.Vector ivti.RegClass:$rs1),
2917 vti.RegClass:$merge,
2920 (!cast<Instruction>(inst#"_MASK")
2921 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
2922 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
2926 // 16.2. Floating-Point Scalar Move Instructions
2927 foreach vti = NoGroupFloatVectors in {
2928 let Predicates = GetVTypePredicates<vti>.Predicates in {
2929 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge),
2930 (vti.Scalar (fpimm0)),
2932 (PseudoVMV_S_X $merge, (XLenVT X0), GPR:$vl, vti.Log2SEW)>;
2933 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge),
2934 (vti.Scalar (SelectFPImm (XLenVT GPR:$imm))),
2936 (PseudoVMV_S_X $merge, GPR:$imm, GPR:$vl, vti.Log2SEW)>;
2940 foreach vti = AllFloatVectors in {
2941 let Predicates = GetVTypePredicates<vti>.Predicates in {
2942 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge),
2943 vti.ScalarRegClass:$rs1,
2945 (!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX)
2946 vti.RegClass:$merge,
2947 (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>;
2949 defvar ivti = GetIntVTypeInfo<vti>.Vti;
2950 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
2951 GetVTypePredicates<ivti>.Predicates) in {
2952 def : Pat<(vti.Vector
2953 (riscv_vrgather_vv_vl vti.RegClass:$rs2,
2954 (ivti.Vector vti.RegClass:$rs1),
2955 vti.RegClass:$merge,
2958 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK")
2959 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
2960 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
2961 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1,
2962 vti.RegClass:$merge,
2965 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK")
2966 vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1,
2967 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
2968 def : Pat<(vti.Vector
2969 (riscv_vrgather_vx_vl vti.RegClass:$rs2,
2971 vti.RegClass:$merge,
2974 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK")
2975 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm,
2976 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
2979 defvar vlmul = vti.LMul;
2980 defvar octuple_lmul = vlmul.octuple;
2981 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW);
2982 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
2983 defvar emul_str = octuple_to_str<octuple_emul>.ret;
2984 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str);
2985 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str;
2986 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
2987 GetVTypePredicates<ivti>.Predicates) in
2988 def : Pat<(vti.Vector
2989 (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2,
2990 (ivti.Vector ivti.RegClass:$rs1),
2991 vti.RegClass:$merge,
2994 (!cast<Instruction>(inst#"_MASK")
2995 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
2996 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
3000 //===----------------------------------------------------------------------===//
3001 // Miscellaneous RISCVISD SDNodes
3002 //===----------------------------------------------------------------------===//
3004 def riscv_vid_vl : SDNode<"RISCVISD::VID_VL", SDTypeProfile<1, 2,
3005 [SDTCisVec<0>, SDTCVecEltisVT<1, i1>,
3006 SDTCisSameNumEltsAs<0, 1>, SDTCisVT<2, XLenVT>]>, []>;
3008 def SDTRVVSlide : SDTypeProfile<1, 6, [
3009 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>,
3010 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>,
3013 def SDTRVVSlide1 : SDTypeProfile<1, 5, [
3014 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisInt<0>,
3015 SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>,
3018 def SDTRVVFSlide1 : SDTypeProfile<1, 5, [
3019 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisFP<0>,
3020 SDTCisEltOfVec<3, 0>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>,
3024 def riscv_slideup_vl : SDNode<"RISCVISD::VSLIDEUP_VL", SDTRVVSlide, []>;
3025 def riscv_slide1up_vl : SDNode<"RISCVISD::VSLIDE1UP_VL", SDTRVVSlide1, []>;
3026 def riscv_slidedown_vl : SDNode<"RISCVISD::VSLIDEDOWN_VL", SDTRVVSlide, []>;
3027 def riscv_slide1down_vl : SDNode<"RISCVISD::VSLIDE1DOWN_VL", SDTRVVSlide1, []>;
3028 def riscv_fslide1up_vl : SDNode<"RISCVISD::VFSLIDE1UP_VL", SDTRVVFSlide1, []>;
3029 def riscv_fslide1down_vl : SDNode<"RISCVISD::VFSLIDE1DOWN_VL", SDTRVVFSlide1, []>;
3031 foreach vti = AllIntegerVectors in {
3032 let Predicates = GetVTypePredicates<vti>.Predicates in {
3033 def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask V0),
3035 (!cast<Instruction>("PseudoVID_V_"#vti.LMul.MX#"_MASK")
3036 (vti.Vector (IMPLICIT_DEF)), (vti.Mask V0), GPR:$vl, vti.Log2SEW,
3041 defm : VPatSlideVL_VX_VI<riscv_slideup_vl, "PseudoVSLIDEUP">;
3042 defm : VPatSlideVL_VX_VI<riscv_slidedown_vl, "PseudoVSLIDEDOWN">;
3043 defm : VPatSlide1VL_VX<riscv_slide1up_vl, "PseudoVSLIDE1UP">;
3044 defm : VPatSlide1VL_VF<riscv_fslide1up_vl, "PseudoVFSLIDE1UP">;
3045 defm : VPatSlide1VL_VX<riscv_slide1down_vl, "PseudoVSLIDE1DOWN">;
3046 defm : VPatSlide1VL_VF<riscv_fslide1down_vl, "PseudoVFSLIDE1DOWN">;