1 //===- RISCVInstrInfoVVLPatterns.td - RVV VL patterns ------*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file contains the required infrastructure and VL patterns to
10 /// support code generation for the standard 'V' (Vector) extension, version
11 /// 0.10. This version is still experimental as the 'V' extension hasn't been
14 /// This file is included from and depends upon RISCVInstrInfoVPseudos.td
16 /// Note: the patterns for RVV intrinsics are found in
17 /// RISCVInstrInfoVPseudos.td.
19 //===----------------------------------------------------------------------===//
21 //===----------------------------------------------------------------------===//
22 // Helpers to define the VL patterns.
23 //===----------------------------------------------------------------------===//
25 def SDT_RISCVVLE_VL : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisPtrTy<1>,
26 SDTCisVT<2, XLenVT>]>;
27 def SDT_RISCVVSE_VL : SDTypeProfile<0, 3, [SDTCisVec<0>, SDTCisPtrTy<1>,
28 SDTCisVT<2, XLenVT>]>;
30 def SDT_RISCVIntBinOp_VL : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>,
32 SDTCisVec<0>, SDTCisInt<0>,
33 SDTCVecEltisVT<3, i1>,
34 SDTCisSameNumEltsAs<0, 3>,
35 SDTCisVT<4, XLenVT>]>;
37 def SDT_RISCVFPUnOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
38 SDTCisVec<0>, SDTCisFP<0>,
39 SDTCVecEltisVT<2, i1>,
40 SDTCisSameNumEltsAs<0, 2>,
41 SDTCisVT<3, XLenVT>]>;
42 def SDT_RISCVFPBinOp_VL : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>,
44 SDTCisVec<0>, SDTCisFP<0>,
45 SDTCVecEltisVT<3, i1>,
46 SDTCisSameNumEltsAs<0, 3>,
47 SDTCisVT<4, XLenVT>]>;
49 def riscv_vmv_v_x_vl : SDNode<"RISCVISD::VMV_V_X_VL",
50 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<0>,
52 SDTCisVT<2, XLenVT>]>>;
53 def riscv_vfmv_v_f_vl : SDNode<"RISCVISD::VFMV_V_F_VL",
54 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisFP<0>,
56 SDTCisVT<2, XLenVT>]>>;
57 def riscv_vmv_s_x_vl : SDNode<"RISCVISD::VMV_S_X_VL",
58 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
61 SDTCisVT<3, XLenVT>]>>;
62 def riscv_vfmv_s_f_vl : SDNode<"RISCVISD::VFMV_S_F_VL",
63 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
66 SDTCisVT<3, XLenVT>]>>;
68 def riscv_vle_vl : SDNode<"RISCVISD::VLE_VL", SDT_RISCVVLE_VL,
69 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
70 def riscv_vse_vl : SDNode<"RISCVISD::VSE_VL", SDT_RISCVVSE_VL,
71 [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
73 def riscv_add_vl : SDNode<"RISCVISD::ADD_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
74 def riscv_sub_vl : SDNode<"RISCVISD::SUB_VL", SDT_RISCVIntBinOp_VL>;
75 def riscv_mul_vl : SDNode<"RISCVISD::MUL_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
76 def riscv_mulhs_vl : SDNode<"RISCVISD::MULHS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
77 def riscv_mulhu_vl : SDNode<"RISCVISD::MULHU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
78 def riscv_and_vl : SDNode<"RISCVISD::AND_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
79 def riscv_or_vl : SDNode<"RISCVISD::OR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
80 def riscv_xor_vl : SDNode<"RISCVISD::XOR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
81 def riscv_sdiv_vl : SDNode<"RISCVISD::SDIV_VL", SDT_RISCVIntBinOp_VL>;
82 def riscv_srem_vl : SDNode<"RISCVISD::SREM_VL", SDT_RISCVIntBinOp_VL>;
83 def riscv_udiv_vl : SDNode<"RISCVISD::UDIV_VL", SDT_RISCVIntBinOp_VL>;
84 def riscv_urem_vl : SDNode<"RISCVISD::UREM_VL", SDT_RISCVIntBinOp_VL>;
85 def riscv_shl_vl : SDNode<"RISCVISD::SHL_VL", SDT_RISCVIntBinOp_VL>;
86 def riscv_sra_vl : SDNode<"RISCVISD::SRA_VL", SDT_RISCVIntBinOp_VL>;
87 def riscv_srl_vl : SDNode<"RISCVISD::SRL_VL", SDT_RISCVIntBinOp_VL>;
88 def riscv_smin_vl : SDNode<"RISCVISD::SMIN_VL", SDT_RISCVIntBinOp_VL>;
89 def riscv_smax_vl : SDNode<"RISCVISD::SMAX_VL", SDT_RISCVIntBinOp_VL>;
90 def riscv_umin_vl : SDNode<"RISCVISD::UMIN_VL", SDT_RISCVIntBinOp_VL>;
91 def riscv_umax_vl : SDNode<"RISCVISD::UMAX_VL", SDT_RISCVIntBinOp_VL>;
93 def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL>;
94 def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL>;
95 def riscv_ssubsat_vl : SDNode<"RISCVISD::SSUBSAT_VL", SDT_RISCVIntBinOp_VL>;
96 def riscv_usubsat_vl : SDNode<"RISCVISD::USUBSAT_VL", SDT_RISCVIntBinOp_VL>;
98 def riscv_fadd_vl : SDNode<"RISCVISD::FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>;
99 def riscv_fsub_vl : SDNode<"RISCVISD::FSUB_VL", SDT_RISCVFPBinOp_VL>;
100 def riscv_fmul_vl : SDNode<"RISCVISD::FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>;
101 def riscv_fdiv_vl : SDNode<"RISCVISD::FDIV_VL", SDT_RISCVFPBinOp_VL>;
102 def riscv_fneg_vl : SDNode<"RISCVISD::FNEG_VL", SDT_RISCVFPUnOp_VL>;
103 def riscv_fabs_vl : SDNode<"RISCVISD::FABS_VL", SDT_RISCVFPUnOp_VL>;
104 def riscv_fsqrt_vl : SDNode<"RISCVISD::FSQRT_VL", SDT_RISCVFPUnOp_VL>;
105 def riscv_fcopysign_vl : SDNode<"RISCVISD::FCOPYSIGN_VL", SDT_RISCVFPBinOp_VL>;
106 def riscv_fminnum_vl : SDNode<"RISCVISD::FMINNUM_VL", SDT_RISCVFPBinOp_VL>;
107 def riscv_fmaxnum_vl : SDNode<"RISCVISD::FMAXNUM_VL", SDT_RISCVFPBinOp_VL>;
109 def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>,
112 SDTCisVec<0>, SDTCisFP<0>,
113 SDTCVecEltisVT<4, i1>,
114 SDTCisSameNumEltsAs<0, 4>,
115 SDTCisVT<5, XLenVT>]>;
116 def riscv_fma_vl : SDNode<"RISCVISD::FMA_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>;
118 def SDT_RISCVFPRoundOp_VL : SDTypeProfile<1, 3, [
119 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1>, SDTCisSameNumEltsAs<0, 1>,
120 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>
122 def SDT_RISCVFPExtendOp_VL : SDTypeProfile<1, 3, [
123 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>,
124 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>
127 def riscv_fpround_vl : SDNode<"RISCVISD::FP_ROUND_VL", SDT_RISCVFPRoundOp_VL>;
128 def riscv_fpextend_vl : SDNode<"RISCVISD::FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL>;
129 def riscv_fncvt_rod_vl : SDNode<"RISCVISD::VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL>;
131 def SDT_RISCVFP2IOp_VL : SDTypeProfile<1, 3, [
132 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>,
133 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>
135 def SDT_RISCVI2FPOp_VL : SDTypeProfile<1, 3, [
136 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>,
137 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>
140 def riscv_fp_to_sint_vl : SDNode<"RISCVISD::FP_TO_SINT_VL", SDT_RISCVFP2IOp_VL>;
141 def riscv_fp_to_uint_vl : SDNode<"RISCVISD::FP_TO_UINT_VL", SDT_RISCVFP2IOp_VL>;
142 def riscv_sint_to_fp_vl : SDNode<"RISCVISD::SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>;
143 def riscv_uint_to_fp_vl : SDNode<"RISCVISD::UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>;
145 def riscv_setcc_vl : SDNode<"RISCVISD::SETCC_VL",
146 SDTypeProfile<1, 5, [SDTCVecEltisVT<0, i1>,
148 SDTCisSameNumEltsAs<0, 1>,
150 SDTCisVT<3, OtherVT>,
152 SDTCisVT<5, XLenVT>]>>;
154 def riscv_vrgather_vx_vl : SDNode<"RISCVISD::VRGATHER_VX_VL",
155 SDTypeProfile<1, 4, [SDTCisVec<0>,
158 SDTCVecEltisVT<3, i1>,
159 SDTCisSameNumEltsAs<0, 3>,
160 SDTCisVT<4, XLenVT>]>>;
161 def riscv_vrgather_vv_vl : SDNode<"RISCVISD::VRGATHER_VV_VL",
162 SDTypeProfile<1, 4, [SDTCisVec<0>,
165 SDTCisSameNumEltsAs<0, 2>,
166 SDTCisSameSizeAs<0, 2>,
167 SDTCVecEltisVT<3, i1>,
168 SDTCisSameNumEltsAs<0, 3>,
169 SDTCisVT<4, XLenVT>]>>;
170 def riscv_vrgatherei16_vv_vl : SDNode<"RISCVISD::VRGATHEREI16_VV_VL",
171 SDTypeProfile<1, 4, [SDTCisVec<0>,
174 SDTCVecEltisVT<2, i16>,
175 SDTCisSameNumEltsAs<0, 2>,
176 SDTCVecEltisVT<3, i1>,
177 SDTCisSameNumEltsAs<0, 3>,
178 SDTCisVT<4, XLenVT>]>>;
180 def riscv_vselect_vl : SDNode<"RISCVISD::VSELECT_VL",
181 SDTypeProfile<1, 4, [SDTCisVec<0>,
183 SDTCisSameNumEltsAs<0, 1>,
184 SDTCVecEltisVT<1, i1>,
187 SDTCisVT<4, XLenVT>]>>;
189 def SDT_RISCVMaskBinOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
191 SDTCVecEltisVT<0, i1>,
192 SDTCisVT<3, XLenVT>]>;
193 def riscv_vmand_vl : SDNode<"RISCVISD::VMAND_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>;
194 def riscv_vmor_vl : SDNode<"RISCVISD::VMOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>;
195 def riscv_vmxor_vl : SDNode<"RISCVISD::VMXOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>;
197 def true_mask : PatLeaf<(riscv_vmset_vl (XLenVT srcvalue))>;
199 def riscv_vmnot_vl : PatFrag<(ops node:$rs, node:$vl),
200 (riscv_vmxor_vl node:$rs, true_mask, node:$vl)>;
202 def riscv_vpopc_vl : SDNode<"RISCVISD::VPOPC_VL",
203 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>,
204 SDTCisVec<1>, SDTCisInt<1>,
205 SDTCVecEltisVT<2, i1>,
206 SDTCisSameNumEltsAs<1, 2>,
207 SDTCisVT<3, XLenVT>]>>;
209 def SDT_RISCVVEXTEND_VL : SDTypeProfile<1, 3, [SDTCisVec<0>,
210 SDTCisSameNumEltsAs<0, 1>,
211 SDTCisSameNumEltsAs<1, 2>,
212 SDTCVecEltisVT<2, i1>,
213 SDTCisVT<3, XLenVT>]>;
214 def riscv_sext_vl : SDNode<"RISCVISD::VSEXT_VL", SDT_RISCVVEXTEND_VL>;
215 def riscv_zext_vl : SDNode<"RISCVISD::VZEXT_VL", SDT_RISCVVEXTEND_VL>;
217 def riscv_trunc_vector_vl : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL",
218 SDTypeProfile<1, 3, [SDTCisVec<0>,
220 SDTCisSameNumEltsAs<0, 2>,
221 SDTCVecEltisVT<2, i1>,
222 SDTCisVT<3, XLenVT>]>>;
224 def SDT_RISCVVWMUL_VL : SDTypeProfile<1, 4, [SDTCisVec<0>,
225 SDTCisSameNumEltsAs<0, 1>,
227 SDTCisSameNumEltsAs<1, 3>,
228 SDTCVecEltisVT<3, i1>,
229 SDTCisVT<4, XLenVT>]>;
230 def riscv_vwmul_vl : SDNode<"RISCVISD::VWMUL_VL", SDT_RISCVVWMUL_VL, [SDNPCommutative]>;
231 def riscv_vwmulu_vl : SDNode<"RISCVISD::VWMULU_VL", SDT_RISCVVWMUL_VL, [SDNPCommutative]>;
233 def SDTRVVVecReduce : SDTypeProfile<1, 4, [
234 SDTCisVec<0>, SDTCisVec<1>, SDTCisSameAs<0, 2>, SDTCVecEltisVT<3, i1>,
235 SDTCisSameNumEltsAs<1, 3>, SDTCisVT<4, XLenVT>
238 def riscv_mul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D),
239 (riscv_mul_vl node:$A, node:$B, node:$C,
241 return N->hasOneUse();
244 def riscv_vwmul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D),
245 (riscv_vwmul_vl node:$A, node:$B, node:$C,
247 return N->hasOneUse();
250 def riscv_vwmulu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D),
251 (riscv_vwmulu_vl node:$A, node:$B, node:$C,
253 return N->hasOneUse();
256 foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR",
257 "FADD", "SEQ_FADD", "FMIN", "FMAX"] in
258 def rvv_vecreduce_#kind#_vl : SDNode<"RISCVISD::VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>;
260 // Ignore the vl operand.
261 def SplatFPOp : PatFrag<(ops node:$op),
262 (riscv_vfmv_v_f_vl node:$op, srcvalue)>;
264 def sew8simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<8>", []>;
265 def sew16simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<16>", []>;
266 def sew32simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<32>", []>;
267 def sew64simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<64>", []>;
269 multiclass VPatBinaryVL_VV<SDNode vop,
270 string instruction_name,
271 ValueType result_type,
278 def : Pat<(result_type (vop
279 (op_type op_reg_class:$rs1),
280 (op_type op_reg_class:$rs2),
281 (mask_type true_mask),
283 (!cast<Instruction>(instruction_name#"_VV_"# vlmul.MX)
287 def : Pat<(result_type (vop
288 (op_type op_reg_class:$rs1),
289 (op_type op_reg_class:$rs2),
290 (mask_type VMV0:$vm),
292 (!cast<Instruction>(instruction_name#"_VV_"# vlmul.MX#"_MASK")
293 (result_type (IMPLICIT_DEF)),
296 VMV0:$vm, GPR:$vl, sew)>;
299 multiclass VPatBinaryVL_XI<SDNode vop,
300 string instruction_name,
302 ValueType result_type,
309 ComplexPattern SplatPatKind,
310 DAGOperand xop_kind> {
311 def : Pat<(result_type (vop
312 (vop_type vop_reg_class:$rs1),
313 (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))),
314 (mask_type true_mask),
316 (!cast<Instruction>(instruction_name#_#suffix#_# vlmul.MX)
320 def : Pat<(result_type (vop
321 (vop_type vop_reg_class:$rs1),
322 (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))),
323 (mask_type VMV0:$vm),
325 (!cast<Instruction>(instruction_name#_#suffix#_# vlmul.MX#"_MASK")
326 (result_type (IMPLICIT_DEF)),
329 VMV0:$vm, GPR:$vl, sew)>;
332 multiclass VPatBinaryVL_VV_VX<SDNode vop, string instruction_name> {
333 foreach vti = AllIntegerVectors in {
334 defm : VPatBinaryVL_VV<vop, instruction_name,
335 vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
336 vti.LMul, vti.RegClass, vti.RegClass>;
337 defm : VPatBinaryVL_XI<vop, instruction_name, "VX",
338 vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
339 vti.LMul, vti.RegClass, vti.RegClass,
344 multiclass VPatBinaryVL_VV_VX_VI<SDNode vop, string instruction_name,
345 Operand ImmType = simm5>
346 : VPatBinaryVL_VV_VX<vop, instruction_name> {
347 foreach vti = AllIntegerVectors in {
348 defm : VPatBinaryVL_XI<vop, instruction_name, "VI",
349 vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
350 vti.LMul, vti.RegClass, vti.RegClass,
351 !cast<ComplexPattern>(SplatPat#_#ImmType),
356 multiclass VPatBinaryWVL_VV_VX<SDNode vop, string instruction_name> {
357 foreach VtiToWti = AllWidenableIntVectors in {
358 defvar vti = VtiToWti.Vti;
359 defvar wti = VtiToWti.Wti;
360 defm : VPatBinaryVL_VV<vop, instruction_name,
361 wti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
362 vti.LMul, wti.RegClass, vti.RegClass>;
363 defm : VPatBinaryVL_XI<vop, instruction_name, "VX",
364 wti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
365 vti.LMul, wti.RegClass, vti.RegClass,
370 class VPatBinaryVL_VF<SDNode vop,
371 string instruction_name,
372 ValueType result_type,
379 RegisterClass scalar_reg_class> :
380 Pat<(result_type (vop (vop_type vop_reg_class:$rs1),
381 (vop_type (SplatFPOp scalar_reg_class:$rs2)),
382 (mask_type true_mask),
384 (!cast<Instruction>(instruction_name#"_"#vlmul.MX)
386 scalar_reg_class:$rs2,
389 multiclass VPatBinaryFPVL_VV_VF<SDNode vop, string instruction_name> {
390 foreach vti = AllFloatVectors in {
391 defm : VPatBinaryVL_VV<vop, instruction_name,
392 vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
393 vti.LMul, vti.RegClass, vti.RegClass>;
394 def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix,
395 vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
396 vti.LMul, vti.RegClass, vti.RegClass,
401 multiclass VPatBinaryFPVL_R_VF<SDNode vop, string instruction_name> {
402 foreach fvti = AllFloatVectors in
403 def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2),
405 (fvti.Mask true_mask),
407 (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
408 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
409 GPR:$vl, fvti.Log2SEW)>;
412 multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name,
414 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
415 vti.RegClass:$rs2, cc,
416 (vti.Mask true_mask),
418 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX)
419 vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl,
423 // Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped.
424 multiclass VPatIntegerSetCCVL_VV_Swappable<VTypeInfo vti, string instruction_name,
425 CondCode cc, CondCode invcc> :
426 VPatIntegerSetCCVL_VV<vti, instruction_name, cc> {
427 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs2),
428 vti.RegClass:$rs1, invcc,
429 (vti.Mask true_mask),
431 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX)
432 vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl,
436 multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_name,
437 CondCode cc, CondCode invcc> {
438 defvar instruction = !cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX);
439 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
440 (SplatPat (XLenVT GPR:$rs2)), cc,
441 (vti.Mask true_mask),
443 (instruction vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
444 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)),
445 (vti.Vector vti.RegClass:$rs1), invcc,
446 (vti.Mask true_mask),
448 (instruction vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
451 multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_name,
452 CondCode cc, CondCode invcc> {
453 defvar instruction = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX);
454 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
455 (SplatPat_simm5 simm5:$rs2), cc,
456 (vti.Mask true_mask),
458 (instruction vti.RegClass:$rs1, XLenVT:$rs2, GPR:$vl, vti.Log2SEW)>;
459 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2),
460 (vti.Vector vti.RegClass:$rs1), invcc,
461 (vti.Mask true_mask),
463 (instruction vti.RegClass:$rs1, simm5:$rs2, GPR:$vl, vti.Log2SEW)>;
466 multiclass VPatIntegerSetCCVL_VIPlus1<VTypeInfo vti, string instruction_name,
467 CondCode cc, ComplexPattern splatpat_kind> {
468 defvar instruction = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX);
469 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
470 (splatpat_kind simm5:$rs2), cc,
471 (vti.Mask true_mask),
473 (instruction vti.RegClass:$rs1, (DecImm simm5:$rs2),
474 GPR:$vl, vti.Log2SEW)>;
477 multiclass VPatFPSetCCVL_VV_VF_FV<CondCode cc,
479 string swapped_op_inst_name> {
480 foreach fvti = AllFloatVectors in {
481 def : Pat<(fvti.Mask (riscv_setcc_vl (fvti.Vector fvti.RegClass:$rs1),
484 (fvti.Mask true_mask),
486 (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX)
487 fvti.RegClass:$rs1, fvti.RegClass:$rs2, GPR:$vl, fvti.Log2SEW)>;
488 def : Pat<(fvti.Mask (riscv_setcc_vl (fvti.Vector fvti.RegClass:$rs1),
489 (SplatFPOp fvti.ScalarRegClass:$rs2),
491 (fvti.Mask true_mask),
493 (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
494 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
495 GPR:$vl, fvti.Log2SEW)>;
496 def : Pat<(fvti.Mask (riscv_setcc_vl (SplatFPOp fvti.ScalarRegClass:$rs2),
497 (fvti.Vector fvti.RegClass:$rs1),
499 (fvti.Mask true_mask),
501 (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
502 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
503 GPR:$vl, fvti.Log2SEW)>;
507 multiclass VPatExtendSDNode_V_VL<SDNode vop, string inst_name, string suffix,
508 list <VTypeInfoToFraction> fraction_list> {
509 foreach vtiTofti = fraction_list in {
510 defvar vti = vtiTofti.Vti;
511 defvar fti = vtiTofti.Fti;
512 def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2),
513 true_mask, VLOpFrag)),
514 (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX)
515 fti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>;
519 multiclass VPatConvertFP2ISDNode_V_VL<SDNode vop, string instruction_name> {
520 foreach fvti = AllFloatVectors in {
521 defvar ivti = GetIntVTypeInfo<fvti>.Vti;
522 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
523 (fvti.Mask true_mask),
525 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX)
526 fvti.RegClass:$rs1, GPR:$vl, ivti.Log2SEW)>;
530 multiclass VPatConvertI2FPSDNode_V_VL<SDNode vop, string instruction_name> {
531 foreach fvti = AllFloatVectors in {
532 defvar ivti = GetIntVTypeInfo<fvti>.Vti;
533 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1),
534 (ivti.Mask true_mask),
536 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX)
537 ivti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>;
541 multiclass VPatWConvertFP2ISDNode_V_VL<SDNode vop, string instruction_name> {
542 foreach fvtiToFWti = AllWidenableFloatVectors in {
543 defvar fvti = fvtiToFWti.Vti;
544 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
545 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
546 (fvti.Mask true_mask),
548 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX)
549 fvti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>;
553 multiclass VPatWConvertI2FPSDNode_V_VL<SDNode vop, string instruction_name> {
554 foreach vtiToWti = AllWidenableIntToFloatVectors in {
555 defvar ivti = vtiToWti.Vti;
556 defvar fwti = vtiToWti.Wti;
557 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1),
558 (ivti.Mask true_mask),
560 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX)
561 ivti.RegClass:$rs1, GPR:$vl, ivti.Log2SEW)>;
565 multiclass VPatNConvertFP2ISDNode_V_VL<SDNode vop, string instruction_name> {
566 foreach vtiToWti = AllWidenableIntToFloatVectors in {
567 defvar vti = vtiToWti.Vti;
568 defvar fwti = vtiToWti.Wti;
569 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1),
570 (fwti.Mask true_mask),
572 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX)
573 fwti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
577 multiclass VPatNConvertI2FPSDNode_V_VL<SDNode vop, string instruction_name> {
578 foreach fvtiToFWti = AllWidenableFloatVectors in {
579 defvar fvti = fvtiToFWti.Vti;
580 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
581 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1),
582 (iwti.Mask true_mask),
584 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX)
585 iwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>;
589 multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> {
590 foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in {
591 defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1");
592 def: Pat<(vti_m1.Vector (vop (vti.Vector vti.RegClass:$rs1), VR:$rs2,
593 (vti.Mask true_mask),
595 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX)
596 (vti_m1.Vector (IMPLICIT_DEF)),
597 (vti.Vector vti.RegClass:$rs1),
598 (vti_m1.Vector VR:$rs2),
599 GPR:$vl, vti.Log2SEW)>;
603 //===----------------------------------------------------------------------===//
605 //===----------------------------------------------------------------------===//
607 let Predicates = [HasStdExtV] in {
609 // 7.4. Vector Unit-Stride Instructions
610 foreach vti = AllVectors in {
611 defvar load_instr = !cast<Instruction>("PseudoVLE"#vti.SEW#"_V_"#vti.LMul.MX);
612 defvar store_instr = !cast<Instruction>("PseudoVSE"#vti.SEW#"_V_"#vti.LMul.MX);
614 def : Pat<(vti.Vector (riscv_vle_vl BaseAddr:$rs1, VLOpFrag)),
615 (load_instr BaseAddr:$rs1, GPR:$vl, vti.Log2SEW)>;
617 def : Pat<(riscv_vse_vl (vti.Vector vti.RegClass:$rs2), BaseAddr:$rs1,
619 (store_instr vti.RegClass:$rs2, BaseAddr:$rs1, GPR:$vl, vti.Log2SEW)>;
622 foreach mti = AllMasks in {
623 defvar load_instr = !cast<Instruction>("PseudoVLE1_V_"#mti.BX);
624 defvar store_instr = !cast<Instruction>("PseudoVSE1_V_"#mti.BX);
625 def : Pat<(mti.Mask (riscv_vle_vl BaseAddr:$rs1, VLOpFrag)),
626 (load_instr BaseAddr:$rs1, GPR:$vl, mti.Log2SEW)>;
627 def : Pat<(riscv_vse_vl (mti.Mask VR:$rs2), BaseAddr:$rs1,
629 (store_instr VR:$rs2, BaseAddr:$rs1, GPR:$vl, mti.Log2SEW)>;
632 // 12.1. Vector Single-Width Integer Add and Subtract
633 defm : VPatBinaryVL_VV_VX_VI<riscv_add_vl, "PseudoVADD">;
634 defm : VPatBinaryVL_VV_VX<riscv_sub_vl, "PseudoVSUB">;
635 // Handle VRSUB specially since it's the only integer binary op with reversed
637 foreach vti = AllIntegerVectors in {
638 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))),
639 (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask),
641 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX)
642 vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
643 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))),
644 (vti.Vector vti.RegClass:$rs1), (vti.Mask VMV0:$vm),
646 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK")
647 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2,
648 VMV0:$vm, GPR:$vl, vti.Log2SEW)>;
649 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)),
650 (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask),
652 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX)
653 vti.RegClass:$rs1, simm5:$rs2, GPR:$vl, vti.Log2SEW)>;
654 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)),
655 (vti.Vector vti.RegClass:$rs1), (vti.Mask VMV0:$vm),
657 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK")
658 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, simm5:$rs2,
659 VMV0:$vm, GPR:$vl, vti.Log2SEW)>;
662 // 12.3. Vector Integer Extension
663 defm : VPatExtendSDNode_V_VL<riscv_zext_vl, "PseudoVZEXT", "VF2",
664 AllFractionableVF2IntVectors>;
665 defm : VPatExtendSDNode_V_VL<riscv_sext_vl, "PseudoVSEXT", "VF2",
666 AllFractionableVF2IntVectors>;
667 defm : VPatExtendSDNode_V_VL<riscv_zext_vl, "PseudoVZEXT", "VF4",
668 AllFractionableVF4IntVectors>;
669 defm : VPatExtendSDNode_V_VL<riscv_sext_vl, "PseudoVSEXT", "VF4",
670 AllFractionableVF4IntVectors>;
671 defm : VPatExtendSDNode_V_VL<riscv_zext_vl, "PseudoVZEXT", "VF8",
672 AllFractionableVF8IntVectors>;
673 defm : VPatExtendSDNode_V_VL<riscv_sext_vl, "PseudoVSEXT", "VF8",
674 AllFractionableVF8IntVectors>;
676 // 12.5. Vector Bitwise Logical Instructions
677 defm : VPatBinaryVL_VV_VX_VI<riscv_and_vl, "PseudoVAND">;
678 defm : VPatBinaryVL_VV_VX_VI<riscv_or_vl, "PseudoVOR">;
679 defm : VPatBinaryVL_VV_VX_VI<riscv_xor_vl, "PseudoVXOR">;
681 // 12.6. Vector Single-Width Bit Shift Instructions
682 defm : VPatBinaryVL_VV_VX_VI<riscv_shl_vl, "PseudoVSLL", uimm5>;
683 defm : VPatBinaryVL_VV_VX_VI<riscv_srl_vl, "PseudoVSRL", uimm5>;
684 defm : VPatBinaryVL_VV_VX_VI<riscv_sra_vl, "PseudoVSRA", uimm5>;
686 foreach vti = AllIntegerVectors in {
687 // Emit shift by 1 as an add since it might be faster.
688 def : Pat<(riscv_shl_vl (vti.Vector vti.RegClass:$rs1),
689 (riscv_vmv_v_x_vl 1, (XLenVT srcvalue)),
690 (vti.Mask true_mask),
692 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX)
693 vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
696 // 12.7. Vector Narrowing Integer Right Shift Instructions
697 foreach vtiTowti = AllWidenableIntVectors in {
698 defvar vti = vtiTowti.Vti;
699 defvar wti = vtiTowti.Wti;
700 def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1),
701 (vti.Mask true_mask),
703 (!cast<Instruction>("PseudoVNSRL_WI_"#vti.LMul.MX)
704 wti.RegClass:$rs1, 0, GPR:$vl, vti.Log2SEW)>;
706 def : Pat<(vti.Vector
707 (riscv_trunc_vector_vl
709 (riscv_sra_vl wti.RegClass:$rs1, (SplatPat XLenVT:$rs2),
710 true_mask, VLOpFrag)), true_mask, VLOpFrag)),
711 (!cast<Instruction>("PseudoVNSRA_WX_"#vti.LMul.MX)
712 wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
713 def : Pat<(vti.Vector
714 (riscv_trunc_vector_vl
716 (riscv_sra_vl wti.RegClass:$rs1, (SplatPat_uimm5 uimm5:$rs2),
717 true_mask, VLOpFrag)), true_mask, VLOpFrag)),
718 (!cast<Instruction>("PseudoVNSRA_WI_"#vti.LMul.MX)
719 wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW)>;
721 def : Pat<(vti.Vector
722 (riscv_trunc_vector_vl
724 (riscv_srl_vl wti.RegClass:$rs1, (SplatPat XLenVT:$rs2),
725 true_mask, VLOpFrag)), true_mask, VLOpFrag)),
726 (!cast<Instruction>("PseudoVNSRL_WX_"#vti.LMul.MX)
727 wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
728 def : Pat<(vti.Vector
729 (riscv_trunc_vector_vl
731 (riscv_srl_vl wti.RegClass:$rs1, (SplatPat_uimm5 uimm5:$rs2),
732 true_mask, VLOpFrag)), true_mask, VLOpFrag)),
733 (!cast<Instruction>("PseudoVNSRL_WI_"#vti.LMul.MX)
734 wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW)>;
737 // 12.8. Vector Integer Comparison Instructions
738 foreach vti = AllIntegerVectors in {
739 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSEQ", SETEQ>;
740 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSNE", SETNE>;
742 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>;
743 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>;
744 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>;
745 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>;
747 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>;
748 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>;
749 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>;
750 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>;
751 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>;
752 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>;
753 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>;
754 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>;
755 // There is no VMSGE(U)_VX instruction
757 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>;
758 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>;
759 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>;
760 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>;
762 defm : VPatIntegerSetCCVL_VIPlus1<vti, "PseudoVMSLE", SETLT,
763 SplatPat_simm5_plus1>;
764 defm : VPatIntegerSetCCVL_VIPlus1<vti, "PseudoVMSLEU", SETULT,
765 SplatPat_simm5_plus1_nonzero>;
766 defm : VPatIntegerSetCCVL_VIPlus1<vti, "PseudoVMSGT", SETGE,
767 SplatPat_simm5_plus1>;
768 defm : VPatIntegerSetCCVL_VIPlus1<vti, "PseudoVMSGTU", SETUGE,
769 SplatPat_simm5_plus1_nonzero>;
770 } // foreach vti = AllIntegerVectors
772 // 12.9. Vector Integer Min/Max Instructions
773 defm : VPatBinaryVL_VV_VX<riscv_umin_vl, "PseudoVMINU">;
774 defm : VPatBinaryVL_VV_VX<riscv_smin_vl, "PseudoVMIN">;
775 defm : VPatBinaryVL_VV_VX<riscv_umax_vl, "PseudoVMAXU">;
776 defm : VPatBinaryVL_VV_VX<riscv_smax_vl, "PseudoVMAX">;
778 // 12.10. Vector Single-Width Integer Multiply Instructions
779 defm : VPatBinaryVL_VV_VX<riscv_mul_vl, "PseudoVMUL">;
780 defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH">;
781 defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU">;
783 // 12.11. Vector Integer Divide Instructions
784 defm : VPatBinaryVL_VV_VX<riscv_udiv_vl, "PseudoVDIVU">;
785 defm : VPatBinaryVL_VV_VX<riscv_sdiv_vl, "PseudoVDIV">;
786 defm : VPatBinaryVL_VV_VX<riscv_urem_vl, "PseudoVREMU">;
787 defm : VPatBinaryVL_VV_VX<riscv_srem_vl, "PseudoVREM">;
789 // 12.12. Vector Widening Integer Multiply Instructions
790 defm : VPatBinaryWVL_VV_VX<riscv_vwmul_vl, "PseudoVWMUL">;
791 defm : VPatBinaryWVL_VV_VX<riscv_vwmulu_vl, "PseudoVWMULU">;
793 // 12.13 Vector Single-Width Integer Multiply-Add Instructions
794 foreach vti = AllIntegerVectors in {
795 // NOTE: We choose VMADD because it has the most commuting freedom. So it
796 // works best with how TwoAddressInstructionPass tries commuting.
797 defvar suffix = vti.LMul.MX;
798 def : Pat<(vti.Vector
799 (riscv_add_vl vti.RegClass:$rs2,
800 (riscv_mul_vl_oneuse vti.RegClass:$rs1,
802 (vti.Mask true_mask), VLOpFrag),
803 (vti.Mask true_mask), VLOpFrag)),
804 (!cast<Instruction>("PseudoVMADD_VV_"# suffix)
805 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
806 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
807 def : Pat<(vti.Vector
808 (riscv_sub_vl vti.RegClass:$rs2,
809 (riscv_mul_vl_oneuse vti.RegClass:$rs1,
811 (vti.Mask true_mask), VLOpFrag),
812 (vti.Mask true_mask), VLOpFrag)),
813 (!cast<Instruction>("PseudoVNMSUB_VV_"# suffix)
814 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
815 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
817 // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally
819 def : Pat<(vti.Vector
820 (riscv_add_vl vti.RegClass:$rs2,
821 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1),
823 (vti.Mask true_mask), VLOpFrag),
824 (vti.Mask true_mask), VLOpFrag)),
825 (!cast<Instruction>("PseudoVMADD_VX_" # suffix)
826 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
827 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
828 def : Pat<(vti.Vector
829 (riscv_sub_vl vti.RegClass:$rs2,
830 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1),
832 (vti.Mask true_mask),
834 (vti.Mask true_mask), VLOpFrag)),
835 (!cast<Instruction>("PseudoVNMSUB_VX_" # suffix)
836 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
837 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
840 // 12.14. Vector Widening Integer Multiply-Add Instructions
841 foreach vtiTowti = AllWidenableIntVectors in {
842 defvar vti = vtiTowti.Vti;
843 defvar wti = vtiTowti.Wti;
844 def : Pat<(wti.Vector
845 (riscv_add_vl wti.RegClass:$rd,
846 (riscv_vwmul_vl_oneuse vti.RegClass:$rs1,
847 (vti.Vector vti.RegClass:$rs2),
848 (vti.Mask true_mask), VLOpFrag),
849 (vti.Mask true_mask), VLOpFrag)),
850 (!cast<Instruction>("PseudoVWMACC_VV_" # vti.LMul.MX)
851 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
852 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
853 def : Pat<(wti.Vector
854 (riscv_add_vl wti.RegClass:$rd,
855 (riscv_vwmulu_vl_oneuse vti.RegClass:$rs1,
856 (vti.Vector vti.RegClass:$rs2),
857 (vti.Mask true_mask), VLOpFrag),
858 (vti.Mask true_mask), VLOpFrag)),
859 (!cast<Instruction>("PseudoVWMACCU_VV_" # vti.LMul.MX)
860 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
861 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
863 def : Pat<(wti.Vector
864 (riscv_add_vl wti.RegClass:$rd,
865 (riscv_vwmul_vl_oneuse (SplatPat XLenVT:$rs1),
866 (vti.Vector vti.RegClass:$rs2),
867 (vti.Mask true_mask), VLOpFrag),
868 (vti.Mask true_mask), VLOpFrag)),
869 (!cast<Instruction>("PseudoVWMACC_VX_" # vti.LMul.MX)
870 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
871 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
872 def : Pat<(wti.Vector
873 (riscv_add_vl wti.RegClass:$rd,
874 (riscv_vwmulu_vl_oneuse (SplatPat XLenVT:$rs1),
875 (vti.Vector vti.RegClass:$rs2),
876 (vti.Mask true_mask), VLOpFrag),
877 (vti.Mask true_mask), VLOpFrag)),
878 (!cast<Instruction>("PseudoVWMACCU_VX_" # vti.LMul.MX)
879 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
880 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
883 // 12.15. Vector Integer Merge Instructions
884 foreach vti = AllIntegerVectors in {
885 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
889 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX)
890 vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm,
891 GPR:$vl, vti.Log2SEW)>;
893 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
894 (SplatPat XLenVT:$rs1),
897 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX)
898 vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, GPR:$vl, vti.Log2SEW)>;
900 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
901 (SplatPat_simm5 simm5:$rs1),
904 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX)
905 vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, GPR:$vl, vti.Log2SEW)>;
908 // 12.16. Vector Integer Move Instructions
909 foreach vti = AllIntegerVectors in {
910 def : Pat<(vti.Vector (riscv_vmv_v_x_vl GPR:$rs2, VLOpFrag)),
911 (!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX)
912 $rs2, GPR:$vl, vti.Log2SEW)>;
913 defvar ImmPat = !cast<ComplexPattern>("sew"#vti.SEW#"simm5");
914 def : Pat<(vti.Vector (riscv_vmv_v_x_vl (ImmPat XLenVT:$imm5),
916 (!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX)
917 XLenVT:$imm5, GPR:$vl, vti.Log2SEW)>;
920 // 12.1. Vector Single-Width Saturating Add and Subtract
921 defm : VPatBinaryVL_VV_VX_VI<riscv_saddsat_vl, "PseudoVSADD">;
922 defm : VPatBinaryVL_VV_VX_VI<riscv_uaddsat_vl, "PseudoVSADDU">;
923 defm : VPatBinaryVL_VV_VX<riscv_ssubsat_vl, "PseudoVSSUB">;
924 defm : VPatBinaryVL_VV_VX<riscv_usubsat_vl, "PseudoVSSUBU">;
926 } // Predicates = [HasStdExtV]
928 // 15.1. Vector Single-Width Integer Reduction Instructions
929 let Predicates = [HasStdExtV] in {
930 defm : VPatReductionVL<rvv_vecreduce_ADD_vl, "PseudoVREDSUM", /*is_float*/0>;
931 defm : VPatReductionVL<rvv_vecreduce_UMAX_vl, "PseudoVREDMAXU", /*is_float*/0>;
932 defm : VPatReductionVL<rvv_vecreduce_SMAX_vl, "PseudoVREDMAX", /*is_float*/0>;
933 defm : VPatReductionVL<rvv_vecreduce_UMIN_vl, "PseudoVREDMINU", /*is_float*/0>;
934 defm : VPatReductionVL<rvv_vecreduce_SMIN_vl, "PseudoVREDMIN", /*is_float*/0>;
935 defm : VPatReductionVL<rvv_vecreduce_AND_vl, "PseudoVREDAND", /*is_float*/0>;
936 defm : VPatReductionVL<rvv_vecreduce_OR_vl, "PseudoVREDOR", /*is_float*/0>;
937 defm : VPatReductionVL<rvv_vecreduce_XOR_vl, "PseudoVREDXOR", /*is_float*/0>;
938 } // Predicates = [HasStdExtV]
940 // 15.3. Vector Single-Width Floating-Point Reduction Instructions
941 let Predicates = [HasStdExtV, HasStdExtF] in {
942 defm : VPatReductionVL<rvv_vecreduce_SEQ_FADD_vl, "PseudoVFREDOSUM", /*is_float*/1>;
943 defm : VPatReductionVL<rvv_vecreduce_FADD_vl, "PseudoVFREDSUM", /*is_float*/1>;
944 defm : VPatReductionVL<rvv_vecreduce_FMIN_vl, "PseudoVFREDMIN", /*is_float*/1>;
945 defm : VPatReductionVL<rvv_vecreduce_FMAX_vl, "PseudoVFREDMAX", /*is_float*/1>;
946 } // Predicates = [HasStdExtV, HasStdExtF]
948 let Predicates = [HasStdExtV, HasStdExtF] in {
950 // 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
951 defm : VPatBinaryFPVL_VV_VF<riscv_fadd_vl, "PseudoVFADD">;
952 defm : VPatBinaryFPVL_VV_VF<riscv_fsub_vl, "PseudoVFSUB">;
953 defm : VPatBinaryFPVL_R_VF<riscv_fsub_vl, "PseudoVFRSUB">;
955 // 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
956 defm : VPatBinaryFPVL_VV_VF<riscv_fmul_vl, "PseudoVFMUL">;
957 defm : VPatBinaryFPVL_VV_VF<riscv_fdiv_vl, "PseudoVFDIV">;
958 defm : VPatBinaryFPVL_R_VF<riscv_fdiv_vl, "PseudoVFRDIV">;
960 // 14.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions.
961 foreach vti = AllFloatVectors in {
962 // NOTE: We choose VFMADD because it has the most commuting freedom. So it
963 // works best with how TwoAddressInstructionPass tries commuting.
964 defvar suffix = vti.LMul.MX;
965 def : Pat<(vti.Vector (riscv_fma_vl vti.RegClass:$rs1, vti.RegClass:$rd,
966 vti.RegClass:$rs2, (vti.Mask true_mask),
968 (!cast<Instruction>("PseudoVFMADD_VV_"# suffix)
969 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
970 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
971 def : Pat<(vti.Vector (riscv_fma_vl vti.RegClass:$rs1, vti.RegClass:$rd,
972 (riscv_fneg_vl vti.RegClass:$rs2,
973 (vti.Mask true_mask),
975 (vti.Mask true_mask),
977 (!cast<Instruction>("PseudoVFMSUB_VV_"# suffix)
978 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
979 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
980 def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl vti.RegClass:$rs1,
981 (vti.Mask true_mask),
984 (riscv_fneg_vl vti.RegClass:$rs2,
985 (vti.Mask true_mask),
987 (vti.Mask true_mask),
989 (!cast<Instruction>("PseudoVFNMADD_VV_"# suffix)
990 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
991 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
992 def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl vti.RegClass:$rs1,
993 (vti.Mask true_mask),
995 vti.RegClass:$rd, vti.RegClass:$rs2,
996 (vti.Mask true_mask),
998 (!cast<Instruction>("PseudoVFNMSUB_VV_"# suffix)
999 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
1000 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
1002 // The choice of VFMADD here is arbitrary, vfmadd.vf and vfmacc.vf are equally
1004 def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1),
1005 vti.RegClass:$rd, vti.RegClass:$rs2,
1006 (vti.Mask true_mask),
1008 (!cast<Instruction>("PseudoVFMADD_V" # vti.ScalarSuffix # "_" # suffix)
1009 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
1010 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
1011 def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1),
1013 (riscv_fneg_vl vti.RegClass:$rs2,
1014 (vti.Mask true_mask),
1016 (vti.Mask true_mask),
1018 (!cast<Instruction>("PseudoVFMSUB_V" # vti.ScalarSuffix # "_" # suffix)
1019 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
1020 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
1021 def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1),
1022 (riscv_fneg_vl vti.RegClass:$rd,
1023 (vti.Mask true_mask),
1025 (riscv_fneg_vl vti.RegClass:$rs2,
1026 (vti.Mask true_mask),
1028 (vti.Mask true_mask),
1030 (!cast<Instruction>("PseudoVFNMADD_V" # vti.ScalarSuffix # "_" # suffix)
1031 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
1032 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
1033 def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1),
1034 (riscv_fneg_vl vti.RegClass:$rd,
1035 (vti.Mask true_mask),
1038 (vti.Mask true_mask),
1040 (!cast<Instruction>("PseudoVFNMSUB_V" # vti.ScalarSuffix # "_" # suffix)
1041 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
1042 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
1044 // The splat might be negated.
1045 def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl (SplatFPOp vti.ScalarRegClass:$rs1),
1046 (vti.Mask true_mask),
1049 (riscv_fneg_vl vti.RegClass:$rs2,
1050 (vti.Mask true_mask),
1052 (vti.Mask true_mask),
1054 (!cast<Instruction>("PseudoVFNMADD_V" # vti.ScalarSuffix # "_" # suffix)
1055 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
1056 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
1057 def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl (SplatFPOp vti.ScalarRegClass:$rs1),
1058 (vti.Mask true_mask),
1060 vti.RegClass:$rd, vti.RegClass:$rs2,
1061 (vti.Mask true_mask),
1063 (!cast<Instruction>("PseudoVFNMSUB_V" # vti.ScalarSuffix # "_" # suffix)
1064 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
1065 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
1068 // 14.11. Vector Floating-Point MIN/MAX Instructions
1069 defm : VPatBinaryFPVL_VV_VF<riscv_fminnum_vl, "PseudoVFMIN">;
1070 defm : VPatBinaryFPVL_VV_VF<riscv_fmaxnum_vl, "PseudoVFMAX">;
1072 // 14.13. Vector Floating-Point Compare Instructions
1073 defm : VPatFPSetCCVL_VV_VF_FV<SETEQ, "PseudoVMFEQ", "PseudoVMFEQ">;
1074 defm : VPatFPSetCCVL_VV_VF_FV<SETOEQ, "PseudoVMFEQ", "PseudoVMFEQ">;
1076 defm : VPatFPSetCCVL_VV_VF_FV<SETNE, "PseudoVMFNE", "PseudoVMFNE">;
1077 defm : VPatFPSetCCVL_VV_VF_FV<SETUNE, "PseudoVMFNE", "PseudoVMFNE">;
1079 defm : VPatFPSetCCVL_VV_VF_FV<SETLT, "PseudoVMFLT", "PseudoVMFGT">;
1080 defm : VPatFPSetCCVL_VV_VF_FV<SETOLT, "PseudoVMFLT", "PseudoVMFGT">;
1082 defm : VPatFPSetCCVL_VV_VF_FV<SETLE, "PseudoVMFLE", "PseudoVMFGE">;
1083 defm : VPatFPSetCCVL_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">;
1085 foreach vti = AllFloatVectors in {
1086 // 14.8. Vector Floating-Point Square-Root Instruction
1087 def : Pat<(riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask),
1089 (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX)
1090 vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>;
1092 // 14.12. Vector Floating-Point Sign-Injection Instructions
1093 def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask true_mask),
1095 (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX)
1096 vti.RegClass:$rs, vti.RegClass:$rs, GPR:$vl, vti.Log2SEW)>;
1097 // Handle fneg with VFSGNJN using the same input for both operands.
1098 def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask true_mask),
1100 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX)
1101 vti.RegClass:$rs, vti.RegClass:$rs, GPR:$vl, vti.Log2SEW)>;
1102 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
1103 (vti.Vector vti.RegClass:$rs2),
1104 (vti.Mask true_mask),
1106 (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX)
1107 vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>;
1108 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
1109 (riscv_fneg_vl vti.RegClass:$rs2,
1110 (vti.Mask true_mask),
1112 (vti.Mask true_mask),
1114 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX)
1115 vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>;
1117 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
1118 (SplatFPOp vti.ScalarRegClass:$rs2),
1119 (vti.Mask true_mask),
1121 (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX)
1122 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, GPR:$vl, vti.Log2SEW)>;
1125 foreach fvti = AllFloatVectors in {
1126 // Floating-point vselects:
1127 // 12.15. Vector Integer Merge Instructions
1128 // 14.15. Vector Floating-Point Merge Instruction
1129 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
1133 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
1134 fvti.RegClass:$rs2, fvti.RegClass:$rs1, VMV0:$vm,
1135 GPR:$vl, fvti.Log2SEW)>;
1137 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
1138 (SplatFPOp fvti.ScalarRegClass:$rs1),
1141 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
1143 (fvti.Scalar fvti.ScalarRegClass:$rs1),
1144 VMV0:$vm, GPR:$vl, fvti.Log2SEW)>;
1146 def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
1147 (SplatFPOp (fvti.Scalar fpimm0)),
1150 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
1151 fvti.RegClass:$rs2, 0, VMV0:$vm, GPR:$vl, fvti.Log2SEW)>;
1153 // 14.16. Vector Floating-Point Move Instruction
1154 // If we're splatting fpimm0, use vmv.v.x vd, x0.
1155 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
1156 (fvti.Scalar (fpimm0)), VLOpFrag)),
1157 (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX)
1158 0, GPR:$vl, fvti.Log2SEW)>;
1160 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
1161 (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)),
1162 (!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" #
1164 (fvti.Scalar fvti.ScalarRegClass:$rs2),
1165 GPR:$vl, fvti.Log2SEW)>;
1167 // 14.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions
1168 defm : VPatConvertFP2ISDNode_V_VL<riscv_fp_to_sint_vl, "PseudoVFCVT_RTZ_X_F_V">;
1169 defm : VPatConvertFP2ISDNode_V_VL<riscv_fp_to_uint_vl, "PseudoVFCVT_RTZ_XU_F_V">;
1170 defm : VPatConvertI2FPSDNode_V_VL<riscv_sint_to_fp_vl, "PseudoVFCVT_F_X_V">;
1171 defm : VPatConvertI2FPSDNode_V_VL<riscv_uint_to_fp_vl, "PseudoVFCVT_F_XU_V">;
1173 // 14.18. Widening Floating-Point/Integer Type-Convert Instructions
1174 defm : VPatWConvertFP2ISDNode_V_VL<riscv_fp_to_sint_vl, "PseudoVFWCVT_RTZ_X_F_V">;
1175 defm : VPatWConvertFP2ISDNode_V_VL<riscv_fp_to_uint_vl, "PseudoVFWCVT_RTZ_XU_F_V">;
1176 defm : VPatWConvertI2FPSDNode_V_VL<riscv_sint_to_fp_vl, "PseudoVFWCVT_F_X_V">;
1177 defm : VPatWConvertI2FPSDNode_V_VL<riscv_uint_to_fp_vl, "PseudoVFWCVT_F_XU_V">;
1178 foreach fvtiToFWti = AllWidenableFloatVectors in {
1179 defvar fvti = fvtiToFWti.Vti;
1180 defvar fwti = fvtiToFWti.Wti;
1181 def : Pat<(fwti.Vector (riscv_fpextend_vl (fvti.Vector fvti.RegClass:$rs1),
1182 (fvti.Mask true_mask),
1184 (!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX)
1185 fvti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>;
1188 // 14.19 Narrowing Floating-Point/Integer Type-Convert Instructions
1189 defm : VPatNConvertFP2ISDNode_V_VL<riscv_fp_to_sint_vl, "PseudoVFNCVT_RTZ_X_F_W">;
1190 defm : VPatNConvertFP2ISDNode_V_VL<riscv_fp_to_uint_vl, "PseudoVFNCVT_RTZ_XU_F_W">;
1191 defm : VPatNConvertI2FPSDNode_V_VL<riscv_sint_to_fp_vl, "PseudoVFNCVT_F_X_W">;
1192 defm : VPatNConvertI2FPSDNode_V_VL<riscv_uint_to_fp_vl, "PseudoVFNCVT_F_XU_W">;
1193 foreach fvtiToFWti = AllWidenableFloatVectors in {
1194 defvar fvti = fvtiToFWti.Vti;
1195 defvar fwti = fvtiToFWti.Wti;
1196 def : Pat<(fvti.Vector (riscv_fpround_vl (fwti.Vector fwti.RegClass:$rs1),
1197 (fwti.Mask true_mask),
1199 (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX)
1200 fwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>;
1202 def : Pat<(fvti.Vector (riscv_fncvt_rod_vl (fwti.Vector fwti.RegClass:$rs1),
1203 (fwti.Mask true_mask),
1205 (!cast<Instruction>("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX)
1206 fwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>;
1210 } // Predicates = [HasStdExtV, HasStdExtF]
1212 let Predicates = [HasStdExtV] in {
1214 foreach mti = AllMasks in {
1215 // 16.1 Vector Mask-Register Logical Instructions
1216 def : Pat<(mti.Mask (riscv_vmset_vl VLOpFrag)),
1217 (!cast<Instruction>("PseudoVMSET_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>;
1218 def : Pat<(mti.Mask (riscv_vmclr_vl VLOpFrag)),
1219 (!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>;
1221 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)),
1222 (!cast<Instruction>("PseudoVMAND_MM_" # mti.LMul.MX)
1223 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
1224 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)),
1225 (!cast<Instruction>("PseudoVMOR_MM_" # mti.LMul.MX)
1226 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
1227 def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)),
1228 (!cast<Instruction>("PseudoVMXOR_MM_" # mti.LMul.MX)
1229 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
1231 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1,
1232 (riscv_vmnot_vl VR:$rs2, VLOpFrag),
1234 (!cast<Instruction>("PseudoVMANDNOT_MM_" # mti.LMul.MX)
1235 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
1236 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1,
1237 (riscv_vmnot_vl VR:$rs2, VLOpFrag),
1239 (!cast<Instruction>("PseudoVMORNOT_MM_" # mti.LMul.MX)
1240 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
1241 // XOR is associative so we need 2 patterns for VMXNOR.
1242 def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1,
1244 VR:$rs2, VLOpFrag)),
1245 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX)
1246 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
1248 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2,
1251 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX)
1252 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
1253 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2,
1256 (!cast<Instruction>("PseudoVMNOR_MM_" # mti.LMul.MX)
1257 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
1258 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2,
1261 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX)
1262 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
1264 // Match the not idiom to the vmnot.m pseudo.
1265 def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)),
1266 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX)
1267 VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>;
1269 // 16.2 Vector Mask Population Count vpopc
1270 def : Pat<(XLenVT (riscv_vpopc_vl (mti.Mask VR:$rs2), (mti.Mask true_mask),
1272 (!cast<Instruction>("PseudoVPOPC_M_" # mti.BX)
1273 VR:$rs2, GPR:$vl, mti.Log2SEW)>;
1276 } // Predicates = [HasStdExtV]
1278 let Predicates = [HasStdExtV] in {
1279 // 17.1. Integer Scalar Move Instructions
1280 // 17.4. Vector Register Gather Instruction
1281 foreach vti = AllIntegerVectors in {
1282 def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$merge),
1283 vti.ScalarRegClass:$rs1,
1285 (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX)
1286 vti.RegClass:$merge,
1287 (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>;
1288 def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2,
1289 (vti.Vector vti.RegClass:$rs1),
1290 (vti.Mask true_mask),
1292 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX)
1293 vti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
1294 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1,
1295 (vti.Mask true_mask),
1297 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX)
1298 vti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW)>;
1299 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, uimm5:$imm,
1300 (vti.Mask true_mask),
1302 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX)
1303 vti.RegClass:$rs2, uimm5:$imm, GPR:$vl, vti.Log2SEW)>;
1305 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
1306 (riscv_vrgather_vv_vl
1309 (vti.Mask true_mask),
1311 vti.RegClass:$merge,
1313 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK")
1314 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
1315 vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
1317 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
1318 (riscv_vrgather_vx_vl
1321 (vti.Mask true_mask),
1323 vti.RegClass:$merge,
1325 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK")
1326 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm,
1327 vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
1329 // emul = lmul * 16 / sew
1330 defvar vlmul = vti.LMul;
1331 defvar octuple_lmul = vlmul.octuple;
1332 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW);
1333 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
1334 defvar emul_str = octuple_to_str<octuple_emul>.ret;
1335 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str);
1336 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_" # emul_str;
1337 def : Pat<(vti.Vector (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2,
1338 (ivti.Vector ivti.RegClass:$rs1),
1339 (vti.Mask true_mask),
1341 (!cast<Instruction>(inst)
1342 vti.RegClass:$rs2, ivti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
1344 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
1345 (riscv_vrgatherei16_vv_vl
1347 (ivti.Vector ivti.RegClass:$rs1),
1348 (vti.Mask true_mask),
1350 vti.RegClass:$merge,
1352 (!cast<Instruction>(inst#"_MASK")
1353 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
1354 vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
1358 } // Predicates = [HasStdExtV]
1360 let Predicates = [HasStdExtV, HasStdExtF] in {
1362 // 17.2. Floating-Point Scalar Move Instructions
1363 foreach vti = AllFloatVectors in {
1364 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge),
1365 vti.ScalarRegClass:$rs1,
1367 (!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX)
1368 vti.RegClass:$merge,
1369 (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>;
1370 defvar ivti = GetIntVTypeInfo<vti>.Vti;
1371 def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2,
1372 (ivti.Vector vti.RegClass:$rs1),
1373 (vti.Mask true_mask),
1375 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX)
1376 vti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
1377 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1,
1378 (vti.Mask true_mask),
1380 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX)
1381 vti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW)>;
1382 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, uimm5:$imm,
1383 (vti.Mask true_mask),
1385 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX)
1386 vti.RegClass:$rs2, uimm5:$imm, GPR:$vl, vti.Log2SEW)>;
1388 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
1389 (riscv_vrgather_vv_vl
1391 (ivti.Vector vti.RegClass:$rs1),
1392 (vti.Mask true_mask),
1394 vti.RegClass:$merge,
1396 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK")
1397 vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
1398 vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
1400 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
1401 (riscv_vrgather_vx_vl
1404 (vti.Mask true_mask),
1406 vti.RegClass:$merge,
1408 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK")
1409 vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm,
1410 vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
1412 defvar vlmul = vti.LMul;
1413 defvar octuple_lmul = vlmul.octuple;
1414 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW);
1415 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
1416 defvar emul_str = octuple_to_str<octuple_emul>.ret;
1417 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str);
1418 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_" # emul_str;
1419 def : Pat<(vti.Vector (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2,
1420 (ivti.Vector ivti.RegClass:$rs1),
1421 (vti.Mask true_mask),
1423 (!cast<Instruction>(inst)
1424 vti.RegClass:$rs2, ivti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
1426 def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
1427 (riscv_vrgatherei16_vv_vl
1429 (ivti.Vector ivti.RegClass:$rs1),
1430 (vti.Mask true_mask),
1432 vti.RegClass:$merge,
1434 (!cast<Instruction>(inst#"_MASK")
1435 vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
1436 vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
1440 } // Predicates = [HasStdExtV, HasStdExtF]
1442 //===----------------------------------------------------------------------===//
1443 // Miscellaneous RISCVISD SDNodes
1444 //===----------------------------------------------------------------------===//
1446 def riscv_vid_vl : SDNode<"RISCVISD::VID_VL", SDTypeProfile<1, 2,
1447 [SDTCisVec<0>, SDTCVecEltisVT<1, i1>,
1448 SDTCisSameNumEltsAs<0, 1>, SDTCisVT<2, XLenVT>]>, []>;
1450 def SDTRVVSlide : SDTypeProfile<1, 5, [
1451 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>,
1452 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>
1454 def SDTRVVSlide1 : SDTypeProfile<1, 4, [
1455 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisInt<0>, SDTCisVT<2, XLenVT>,
1456 SDTCVecEltisVT<3, i1>, SDTCisSameNumEltsAs<0, 3>, SDTCisVT<4, XLenVT>
1459 def riscv_slideup_vl : SDNode<"RISCVISD::VSLIDEUP_VL", SDTRVVSlide, []>;
1460 def riscv_slide1up_vl : SDNode<"RISCVISD::VSLIDE1UP_VL", SDTRVVSlide1, []>;
1461 def riscv_slidedown_vl : SDNode<"RISCVISD::VSLIDEDOWN_VL", SDTRVVSlide, []>;
1462 def riscv_slide1down_vl : SDNode<"RISCVISD::VSLIDE1DOWN_VL", SDTRVVSlide1, []>;
1464 let Predicates = [HasStdExtV] in {
1466 foreach vti = AllIntegerVectors in {
1467 def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask true_mask),
1469 (!cast<Instruction>("PseudoVID_V_"#vti.LMul.MX) GPR:$vl, vti.Log2SEW)>;
1471 def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector vti.RegClass:$rs1),
1472 GPR:$rs2, (vti.Mask true_mask),
1474 (!cast<Instruction>("PseudoVSLIDE1UP_VX_"#vti.LMul.MX)
1475 vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
1476 def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector vti.RegClass:$rs1),
1477 GPR:$rs2, (vti.Mask true_mask),
1479 (!cast<Instruction>("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX)
1480 vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
1483 foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in {
1484 def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3),
1485 (vti.Vector vti.RegClass:$rs1),
1486 uimm5:$rs2, (vti.Mask true_mask),
1488 (!cast<Instruction>("PseudoVSLIDEUP_VI_"#vti.LMul.MX)
1489 vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2,
1490 GPR:$vl, vti.Log2SEW)>;
1492 def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3),
1493 (vti.Vector vti.RegClass:$rs1),
1494 GPR:$rs2, (vti.Mask true_mask),
1496 (!cast<Instruction>("PseudoVSLIDEUP_VX_"#vti.LMul.MX)
1497 vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2,
1498 GPR:$vl, vti.Log2SEW)>;
1500 def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3),
1501 (vti.Vector vti.RegClass:$rs1),
1502 uimm5:$rs2, (vti.Mask true_mask),
1504 (!cast<Instruction>("PseudoVSLIDEDOWN_VI_"#vti.LMul.MX)
1505 vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2,
1506 GPR:$vl, vti.Log2SEW)>;
1508 def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3),
1509 (vti.Vector vti.RegClass:$rs1),
1510 GPR:$rs2, (vti.Mask true_mask),
1512 (!cast<Instruction>("PseudoVSLIDEDOWN_VX_"#vti.LMul.MX)
1513 vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2,
1514 GPR:$vl, vti.Log2SEW)>;
1517 } // Predicates = [HasStdExtV]