1 //===-- RISCVInstrInfoZvk.td - RISC-V 'Zvk' instructions ---*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the RISC-V instructions from the standard 'Zvk',
10 // Vector Cryptography Instructions extension, version Release 1.0.0.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Operand and SDNode transformation definitions.
16 //===----------------------------------------------------------------------===//
18 def tuimm5 : RISCVOp, TImmLeaf<XLenVT, [{return isUInt<5>(Imm);}]>;
20 //===----------------------------------------------------------------------===//
21 // Instruction class templates
22 //===----------------------------------------------------------------------===//
24 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
25 multiclass VCLMUL_MV_V_X<string opcodestr, bits<6> funct6> {
26 def V : VALUVV<funct6, OPMVV, opcodestr # "." # "vv">,
27 SchedBinaryMC<"WriteVCLMULV", "ReadVCLMULV", "ReadVCLMULV">;
28 def X : VALUVX<funct6, OPMVX, opcodestr # "." # "vx">,
29 SchedBinaryMC<"WriteVCLMULX", "ReadVCLMULV", "ReadVCLMULX">;
32 class RVInstIVI_VROR<bits<6> funct6, dag outs, dag ins, string opcodestr,
34 : RVInst<outs, ins, opcodestr, argstr, [], InstFormatR> {
40 let Inst{31-27} = funct6{5-1};
41 let Inst{26} = imm{5};
43 let Inst{24-20} = vs2;
44 let Inst{19-15} = imm{4-0};
45 let Inst{14-12} = OPIVI.Value;
47 let Inst{6-0} = OPC_OP_V.Value;
49 let Uses = [VTYPE, VL];
50 let RVVConstraint = VMConstraint;
53 multiclass VROR_IV_V_X_I<string opcodestr, bits<6> funct6>
54 : VALU_IV_V_X<opcodestr, funct6> {
55 def I : RVInstIVI_VROR<funct6, (outs VR:$vd),
56 (ins VR:$vs2, uimm6:$imm, VMaskOp:$vm),
57 opcodestr # ".vi", "$vd, $vs2, $imm$vm">,
58 SchedUnaryMC<"WriteVRotI", "ReadVRotV">;
62 class PALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
63 : VALUVVNoVm<funct6, opv, opcodestr> {
64 let Inst{6-0} = OPC_OP_VE.Value;
68 class PALUVVNoVmTernary<bits<6> funct6, RISCVVFormat opv, string opcodestr>
69 : RVInstVV<funct6, opv, (outs VR:$vd_wb),
70 (ins VR:$vd, VR:$vs2, VR:$vs1),
71 opcodestr, "$vd, $vs2, $vs1"> {
72 let Constraints = "$vd = $vd_wb";
74 let Inst{6-0} = OPC_OP_VE.Value;
78 class PALUVINoVm<bits<6> funct6, string opcodestr, Operand optype>
79 : VALUVINoVm<funct6, opcodestr, optype> {
80 let Inst{6-0} = OPC_OP_VE.Value;
81 let Inst{14-12} = OPMVV.Value;
84 // op vd, vs2, imm where vd is also a source regardless of tail policy
85 class PALUVINoVmBinary<bits<6> funct6, string opcodestr, Operand optype>
86 : RVInstIVI<funct6, (outs VR:$vd_wb),
87 (ins VR:$vd, VR:$vs2, optype:$imm),
88 opcodestr, "$vd, $vs2, $imm"> {
89 let Constraints = "$vd = $vd_wb";
91 let Inst{6-0} = OPC_OP_VE.Value;
92 let Inst{14-12} = OPMVV.Value;
95 // op vd, vs2 (use vs1 as instruction encoding) where vd is also a source
96 // regardless of tail policy
97 class PALUVs2NoVmBinary<bits<6> funct6, bits<5> vs1, RISCVVFormat opv,
99 : RVInstV<funct6, vs1, opv, (outs VR:$vd_wb), (ins VR:$vd, VR:$vs2),
100 opcodestr, "$vd, $vs2"> {
101 let Constraints = "$vd = $vd_wb";
103 let Inst{6-0} = OPC_OP_VE.Value;
106 multiclass VAES_MV_V_S<bits<6> funct6_vv, bits<6> funct6_vs, bits<5> vs1,
107 RISCVVFormat opv, string opcodestr> {
108 let RVVConstraint = NoConstraint in
109 def NAME # _VV : PALUVs2NoVmBinary<funct6_vv, vs1, opv, opcodestr # ".vv">,
110 SchedBinaryMC<"WriteVAESMVV", "ReadVAESMVV", "ReadVAESMVV">;
111 let RVVConstraint = VS2Constraint in
112 def NAME # _VS : PALUVs2NoVmBinary<funct6_vs, vs1, opv, opcodestr # ".vs">,
113 SchedBinaryMC<"WriteVAESMVV", "ReadVAESMVV", "ReadVAESMVV">;
115 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
117 //===----------------------------------------------------------------------===//
119 //===----------------------------------------------------------------------===//
121 let Predicates = [HasStdExtZvbb] in {
122 def VBREV_V : VALUVs2<0b010010, 0b01010, OPMVV, "vbrev.v">;
123 def VCLZ_V : VALUVs2<0b010010, 0b01100, OPMVV, "vclz.v">;
124 def VCPOP_V : VALUVs2<0b010010, 0b01110, OPMVV, "vcpop.v">;
125 def VCTZ_V : VALUVs2<0b010010, 0b01101, OPMVV, "vctz.v">;
126 let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV,
127 DestEEW = EEWSEWx2 in
128 defm VWSLL_V : VSHT_IV_V_X_I<"vwsll", 0b110101>;
129 } // Predicates = [HasStdExtZvbb]
131 let Predicates = [HasStdExtZvbcOrZvbc32e] in {
132 defm VCLMUL_V : VCLMUL_MV_V_X<"vclmul", 0b001100>;
133 defm VCLMULH_V : VCLMUL_MV_V_X<"vclmulh", 0b001101>;
134 } // Predicates = [HasStdExtZvbcOrZvbc32e]
136 let Predicates = [HasStdExtZvkb] in {
137 defm VANDN_V : VALU_IV_V_X<"vandn", 0b000001>;
138 def VBREV8_V : VALUVs2<0b010010, 0b01000, OPMVV, "vbrev8.v">;
139 def VREV8_V : VALUVs2<0b010010, 0b01001, OPMVV, "vrev8.v">;
140 defm VROL_V : VALU_IV_V_X<"vrol", 0b010101>;
141 defm VROR_V : VROR_IV_V_X_I<"vror", 0b010100>;
142 } // Predicates = [HasStdExtZvkb]
144 let ElementsDependOn = EltDepsVLMask in {
146 let Predicates = [HasStdExtZvkg], RVVConstraint = NoConstraint in {
147 def VGHSH_VV : PALUVVNoVmTernary<0b101100, OPMVV, "vghsh.vv">,
148 SchedTernaryMC<"WriteVGHSHV", "ReadVGHSHV", "ReadVGHSHV",
150 def VGMUL_VV : PALUVs2NoVmBinary<0b101000, 0b10001, OPMVV, "vgmul.vv">,
151 SchedBinaryMC<"WriteVGMULV", "ReadVGMULV", "ReadVGMULV">;
152 } // Predicates = [HasStdExtZvkg]
154 let Predicates = [HasStdExtZvkgs], RVVConstraint = VS2Constraint in {
155 def VGHSH_VS : PALUVVNoVmTernary<0b100011, OPMVV, "vghsh.vs">,
156 SchedTernaryMC<"WriteVGHSHV", "ReadVGHSHV", "ReadVGHSHV",
158 def VGMUL_VS : PALUVs2NoVmBinary<0b101001, 0b10001, OPMVV, "vgmul.vs">,
159 SchedBinaryMC<"WriteVGMULV", "ReadVGMULV", "ReadVGMULV">;
160 } // Predicates = [HasStdExtZvkgs]
162 let Predicates = [HasStdExtZvknhaOrZvknhb], RVVConstraint = Sha2Constraint in {
163 def VSHA2CH_VV : PALUVVNoVmTernary<0b101110, OPMVV, "vsha2ch.vv">,
164 SchedTernaryMC<"WriteVSHA2CHV", "ReadVSHA2CHV", "ReadVSHA2CHV",
166 def VSHA2CL_VV : PALUVVNoVmTernary<0b101111, OPMVV, "vsha2cl.vv">,
167 SchedTernaryMC<"WriteVSHA2CLV", "ReadVSHA2CLV", "ReadVSHA2CLV",
169 def VSHA2MS_VV : PALUVVNoVmTernary<0b101101, OPMVV, "vsha2ms.vv">,
170 SchedTernaryMC<"WriteVSHA2MSV", "ReadVSHA2MSV", "ReadVSHA2MSV",
172 } // Predicates = [HasStdExtZvknhaOrZvknhb]
174 let Predicates = [HasStdExtZvkned] in {
175 defm VAESDF : VAES_MV_V_S<0b101000, 0b101001, 0b00001, OPMVV, "vaesdf">;
176 defm VAESDM : VAES_MV_V_S<0b101000, 0b101001, 0b00000, OPMVV, "vaesdm">;
177 defm VAESEF : VAES_MV_V_S<0b101000, 0b101001, 0b00011, OPMVV, "vaesef">;
178 defm VAESEM : VAES_MV_V_S<0b101000, 0b101001, 0b00010, OPMVV, "vaesem">;
179 def VAESKF1_VI : PALUVINoVm<0b100010, "vaeskf1.vi", uimm5>,
180 SchedUnaryMC<"WriteVAESKF1V", "ReadVAESKF1V">;
181 def VAESKF2_VI : PALUVINoVmBinary<0b101010, "vaeskf2.vi", uimm5>,
182 SchedBinaryMC<"WriteVAESKF2V", "ReadVAESKF2V", "ReadVAESKF2V">;
183 let RVVConstraint = VS2Constraint in
184 def VAESZ_VS : PALUVs2NoVmBinary<0b101001, 0b00111, OPMVV, "vaesz.vs">,
185 SchedBinaryMC<"WriteVAESZV", "ReadVAESZV", "ReadVAESZV">;
186 } // Predicates = [HasStdExtZvkned]
188 let Predicates = [HasStdExtZvksed] in {
189 let RVVConstraint = NoConstraint in
190 def VSM4K_VI : PALUVINoVm<0b100001, "vsm4k.vi", uimm5>,
191 SchedUnaryMC<"WriteVSM4KV", "ReadVSM4KV">;
192 defm VSM4R : VAES_MV_V_S<0b101000, 0b101001, 0b10000, OPMVV, "vsm4r">;
193 } // Predicates = [HasStdExtZvksed]
195 let Predicates = [HasStdExtZvksh], RVVConstraint = VS2Constraint in {
196 def VSM3C_VI : PALUVINoVmBinary<0b101011, "vsm3c.vi", uimm5>,
197 SchedBinaryMC<"WriteVSM3CV", "ReadVSM3CV", "ReadVSM3CV">;
198 def VSM3ME_VV : PALUVVNoVm<0b100000, OPMVV, "vsm3me.vv">,
199 SchedUnaryMC<"WriteVSM3MEV", "ReadVSM3MEV">;
200 } // Predicates = [HasStdExtZvksh]
202 } // ElementsDependOn = EltDepsVLMask
204 //===----------------------------------------------------------------------===//
205 // Pseudo instructions
206 //===----------------------------------------------------------------------===//
208 defvar I32IntegerVectors = !filter(vti, AllIntegerVectors, !eq(vti.SEW, 32));
209 defvar I32I64IntegerVectors = !filter(vti, AllIntegerVectors,
210 !or(!eq(vti.SEW, 32), !eq(vti.SEW, 64)));
212 class ZvkI32IntegerVectors<string vd_lmul> {
213 list<VTypeInfo> vs2_types = !cond(!eq(vd_lmul, "M8") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 32)),
214 !eq(vd_lmul, "M4") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 32)),
215 !eq(vd_lmul, "M2") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 16)),
216 !eq(vd_lmul, "M1") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 8)),
217 !eq(vd_lmul, "MF2") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 4)),
218 !eq(vd_lmul, "MF4") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 2)),
219 !eq(vd_lmul, "MF8") : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 1)));
222 class ZvkMxSet<string vd_lmul> {
223 list<LMULInfo> vs2_lmuls = !cond(!eq(vd_lmul, "M8") : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4],
224 !eq(vd_lmul, "M4") : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4],
225 !eq(vd_lmul, "M2") : [V_MF8, V_MF4, V_MF2, V_M1, V_M2],
226 !eq(vd_lmul, "M1") : [V_MF8, V_MF4, V_MF2, V_M1],
227 !eq(vd_lmul, "MF2") : [V_MF8, V_MF4, V_MF2],
228 !eq(vd_lmul, "MF4") : [V_MF8, V_MF4],
229 !eq(vd_lmul, "MF8") : [V_MF8]);
232 class VPseudoBinaryNoMask_Zvk<DAGOperand RetClass, VReg OpClass> :
233 Pseudo<(outs RetClass:$rd_wb),
234 (ins RetClass:$rd, OpClass:$rs2, AVL:$vl, sew:$sew, vec_policy:$policy), []>,
238 let hasSideEffects = 0;
239 let Constraints = "$rd_wb = $rd";
242 let HasVecPolicyOp = 1;
243 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
246 class VPseudoTernaryNoMask_Zvk<VReg RetClass,
248 DAGOperand Op2Class> :
249 Pseudo<(outs RetClass:$rd_wb),
250 (ins RetClass:$rd, Op1Class:$rs2, Op2Class:$rs1,
251 AVL:$vl, sew:$sew, vec_policy:$policy), []>,
255 let hasSideEffects = 0;
256 let Constraints = "$rd_wb = $rd";
259 let HasVecPolicyOp = 1;
260 let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
263 multiclass VPseudoBinaryNoMaskPolicy_Zvk<VReg RetClass,
267 string Constraint = ""> {
268 let VLMul = MInfo.value in {
269 def "_" # MInfo.MX : VPseudoBinaryNoMaskPolicy<RetClass, Op1Class, Op2Class,
274 multiclass VPseudoTernaryNoMask_Zvk<VReg RetClass,
277 LMULInfo MInfo, int sew = 0> {
278 let VLMul = MInfo.value, SEW = sew in {
279 defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
280 def suffix : VPseudoTernaryNoMask_Zvk<RetClass, Op1Class, Op2Class>;
284 multiclass VPseudoBinaryV_V_NoMask_Zvk<LMULInfo m> {
285 let VLMul = m.value in {
286 def "_VV_" # m.MX : VPseudoBinaryNoMask_Zvk<m.vrclass, m.vrclass>;
290 multiclass VPseudoBinaryV_S_NoMask_Zvk<LMULInfo m> {
291 let VLMul = m.value in
292 foreach vs2_lmul = ZvkMxSet<m.MX>.vs2_lmuls in
293 def "_VS_" # m.MX # "_" # vs2_lmul.MX : VPseudoBinaryNoMask_Zvk<m.vrclass, vs2_lmul.vrclass>;
296 multiclass VPseudoVGMUL {
297 foreach m = MxListVF4 in {
299 defm "" : VPseudoBinaryV_V_NoMask_Zvk<m>,
300 SchedBinary<"WriteVGMULV", "ReadVGMULV", "ReadVGMULV", mx>;
304 multiclass VPseudoVAESMV {
305 foreach m = MxListVF4 in {
307 defm "" : VPseudoBinaryV_V_NoMask_Zvk<m>,
308 SchedBinary<"WriteVAESMVV", "ReadVAESMVV", "ReadVAESMVV", mx>;
309 defm "" : VPseudoBinaryV_S_NoMask_Zvk<m>,
310 SchedBinary<"WriteVAESMVV", "ReadVAESMVV", "ReadVAESMVV", mx>;
315 multiclass VPseudoVSM4R {
316 foreach m = MxListVF4 in {
318 defm "" : VPseudoBinaryV_V_NoMask_Zvk<m>,
319 SchedBinary<"WriteVSM4RV", "ReadVSM4RV", "ReadVSM4RV", mx>;
320 defm "" : VPseudoBinaryV_S_NoMask_Zvk<m>,
321 SchedBinary<"WriteVSM4RV", "ReadVSM4RV", "ReadVSM4RV", mx>;
326 multiclass VPseudoVGHSH {
327 foreach m = MxListVF4 in {
329 defm _VV : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, m.vrclass, m>,
330 SchedTernary<"WriteVGHSHV", "ReadVGHSHV", "ReadVGHSHV",
335 multiclass VPseudoVSHA2CH {
336 foreach m = MxListVF4 in {
338 defm _VV : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, m.vrclass, m>,
339 SchedTernary<"WriteVSHA2CHV", "ReadVSHA2CHV", "ReadVSHA2CHV",
344 multiclass VPseudoVSHA2CL {
345 foreach m = MxListVF4 in {
347 defm _VV : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, m.vrclass, m>,
348 SchedTernary<"WriteVSHA2CLV", "ReadVSHA2CLV", "ReadVSHA2CLV",
353 multiclass VPseudoVSHA2MS<int sew = 0> {
354 foreach m = !if(!eq(sew, 64), MxListVF8, MxListVF4) in {
356 defm _VV : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, m.vrclass, m, sew = sew>,
357 SchedTernary<"WriteVSHA2MSV", "ReadVSHA2MSV", "ReadVSHA2MSV",
358 "ReadVSHA2MSV", mx, sew>;
362 multiclass VPseudoVAESKF1 {
363 foreach m = MxListVF4 in {
365 defm _VI : VPseudoBinaryNoMaskPolicy_Zvk<m.vrclass, m.vrclass, uimm5, m>,
366 SchedBinary<"WriteVAESKF1V", "ReadVAESKF1V", "ReadVAESKF1V", mx,
367 forcePassthruRead=true>;
371 multiclass VPseudoVAESKF2 {
372 foreach m = MxListVF4 in {
374 defm _VI : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, uimm5, m>,
375 SchedTernary<"WriteVAESKF2V", "ReadVAESKF2V", "ReadVAESKF2V",
380 multiclass VPseudoVAESZ {
381 foreach m = MxListVF4 in {
383 defm "" : VPseudoBinaryV_S_NoMask_Zvk<m>,
384 SchedBinary<"WriteVAESZV", "ReadVAESZV", "ReadVAESZV", mx>;
388 multiclass VPseudoVSM3C {
389 foreach m = MxListVF4 in {
391 defm _VI : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, uimm5, m>,
392 SchedTernary<"WriteVSM3CV", "ReadVSM3CV", "ReadVSM3CV",
397 multiclass VPseudoVSM4K {
398 foreach m = MxListVF4 in {
400 defm _VI : VPseudoBinaryNoMaskPolicy_Zvk<m.vrclass, m.vrclass, uimm5, m>,
401 SchedBinary<"WriteVSM4KV", "ReadVSM4KV", "ReadVSM4KV", mx,
402 forcePassthruRead=true>;
406 multiclass VPseudoVSM3ME {
407 foreach m = MxListVF4 in {
409 defm _VV : VPseudoBinaryNoMaskPolicy_Zvk<m.vrclass, m.vrclass, m.vrclass, m>,
410 SchedBinary<"WriteVSM3MEV", "ReadVSM3MEV", "ReadVSM3MEV", mx,
411 forcePassthruRead=true>;
415 multiclass VPseudoVCLMUL_VV_VX {
416 foreach m = MxList in {
418 defm "" : VPseudoBinaryV_VV<m>,
419 SchedBinary<"WriteVCLMULV", "ReadVCLMULV", "ReadVCLMULV", mx,
420 forcePassthruRead=true>;
421 defm "" : VPseudoBinaryV_VX<m>,
422 SchedBinary<"WriteVCLMULX", "ReadVCLMULV", "ReadVCLMULX", mx,
423 forcePassthruRead=true>;
427 multiclass VPseudoUnaryV_V<LMULInfo m> {
428 let VLMul = m.value in {
429 defvar suffix = "_V_" # m.MX;
430 def suffix : VPseudoUnaryNoMask<m.vrclass, m.vrclass>;
431 def suffix # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>,
432 RISCVMaskedPseudo<MaskIdx=2>;
436 multiclass VPseudoVBREV {
437 foreach m = MxList in {
439 defm "" : VPseudoUnaryV_V<m>,
440 SchedUnary<"WriteVBREVV", "ReadVBREVV", mx, forcePassthruRead=true>;
444 multiclass VPseudoVCLZ {
445 foreach m = MxList in {
447 defm "" : VPseudoUnaryV_V<m>,
448 SchedUnary<"WriteVCLZV", "ReadVCLZV", mx, forcePassthruRead=true>;
452 multiclass VPseudoVCTZ {
453 foreach m = MxList in {
455 defm "" : VPseudoUnaryV_V<m>,
456 SchedUnary<"WriteVCTZV", "ReadVCTZV", mx, forcePassthruRead=true>;
460 multiclass VPseudoVCPOP {
461 foreach m = MxList in {
463 defm "" : VPseudoUnaryV_V<m>,
464 SchedUnary<"WriteVCPOPV", "ReadVCPOPV", mx, forcePassthruRead=true>;
468 multiclass VPseudoVWSLL {
469 foreach m = MxListW in {
471 defm "" : VPseudoBinaryW_VV<m>,
472 SchedBinary<"WriteVWSLLV", "ReadVWSLLV", "ReadVWSLLV", mx,
473 forcePassthruRead=true>;
474 defm "" : VPseudoBinaryW_VX<m>,
475 SchedBinary<"WriteVWSLLX", "ReadVWSLLV", "ReadVWSLLX", mx,
476 forcePassthruRead=true>;
477 defm "" : VPseudoBinaryW_VI<uimm5, m>,
478 SchedUnary<"WriteVWSLLI", "ReadVWSLLV", mx,
479 forcePassthruRead=true>;
483 multiclass VPseudoVANDN {
484 foreach m = MxList in {
485 defm "" : VPseudoBinaryV_VV<m>,
486 SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", m.MX,
487 forcePassthruRead=true>;
488 defm "" : VPseudoBinaryV_VX<m>,
489 SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", m.MX,
490 forcePassthruRead=true>;
494 multiclass VPseudoVBREV8 {
495 foreach m = MxList in {
497 defm "" : VPseudoUnaryV_V<m>,
498 SchedUnary<"WriteVBREV8V", "ReadVBREV8V", mx, forcePassthruRead=true>;
502 multiclass VPseudoVREV8 {
503 foreach m = MxList in {
505 defm "" : VPseudoUnaryV_V<m>,
506 SchedUnary<"WriteVREV8V", "ReadVREV8V", mx, forcePassthruRead=true>;
510 multiclass VPseudoVROT_VV_VX {
511 foreach m = MxList in {
512 defm "" : VPseudoBinaryV_VV<m>,
513 SchedBinary<"WriteVRotV", "ReadVRotV", "ReadVRotV", m.MX,
514 forcePassthruRead=true>;
515 defm "" : VPseudoBinaryV_VX<m>,
516 SchedBinary<"WriteVRotX", "ReadVRotV", "ReadVRotX", m.MX,
517 forcePassthruRead=true>;
521 multiclass VPseudoVROT_VV_VX_VI
522 : VPseudoVROT_VV_VX {
523 foreach m = MxList in {
524 defm "" : VPseudoBinaryV_VI<uimm6, m>,
525 SchedUnary<"WriteVRotI", "ReadVRotV", m.MX,
526 forcePassthruRead=true>;
530 let Predicates = [HasStdExtZvbb] in {
531 defm PseudoVBREV : VPseudoVBREV;
532 defm PseudoVCLZ : VPseudoVCLZ;
533 defm PseudoVCTZ : VPseudoVCTZ;
534 defm PseudoVCPOP : VPseudoVCPOP;
535 defm PseudoVWSLL : VPseudoVWSLL;
536 } // Predicates = [HasStdExtZvbb]
538 let Predicates = [HasStdExtZvbc] in {
539 defm PseudoVCLMUL : VPseudoVCLMUL_VV_VX;
540 defm PseudoVCLMULH : VPseudoVCLMUL_VV_VX;
541 } // Predicates = [HasStdExtZvbc]
543 let Predicates = [HasStdExtZvkb] in {
544 defm PseudoVANDN : VPseudoVANDN;
545 defm PseudoVBREV8 : VPseudoVBREV8;
546 defm PseudoVREV8 : VPseudoVREV8;
547 defm PseudoVROL : VPseudoVROT_VV_VX;
548 defm PseudoVROR : VPseudoVROT_VV_VX_VI;
549 } // Predicates = [HasStdExtZvkb]
551 let Predicates = [HasStdExtZvkg] in {
552 defm PseudoVGHSH : VPseudoVGHSH;
553 defm PseudoVGMUL : VPseudoVGMUL;
554 } // Predicates = [HasStdExtZvkg]
556 let Predicates = [HasStdExtZvkned] in {
557 defm PseudoVAESDF : VPseudoVAESMV;
558 defm PseudoVAESDM : VPseudoVAESMV;
559 defm PseudoVAESEF : VPseudoVAESMV;
560 defm PseudoVAESEM : VPseudoVAESMV;
561 defm PseudoVAESKF1 : VPseudoVAESKF1;
562 defm PseudoVAESKF2 : VPseudoVAESKF2;
563 defm PseudoVAESZ : VPseudoVAESZ;
564 } // Predicates = [HasStdExtZvkned]
566 let Predicates = [HasStdExtZvknhaOrZvknhb] in {
567 defm PseudoVSHA2CH : VPseudoVSHA2CH;
568 defm PseudoVSHA2CL : VPseudoVSHA2CL;
569 defm PseudoVSHA2MS : VPseudoVSHA2MS<sew=32>;
570 let Predicates = [HasStdExtZvknhb] in
571 defm PseudoVSHA2MS : VPseudoVSHA2MS<sew=64>;
572 } // Predicates = [HasStdExtZvknhaOrZvknhb]
574 let Predicates = [HasStdExtZvksed] in {
575 defm PseudoVSM4K : VPseudoVSM4K;
576 defm PseudoVSM4R : VPseudoVSM4R;
577 } // Predicates = [HasStdExtZvksed]
579 let Predicates = [HasStdExtZvksh] in {
580 defm PseudoVSM3C : VPseudoVSM3C;
581 defm PseudoVSM3ME : VPseudoVSM3ME;
582 } // Predicates = [HasStdExtZvksh]
584 //===----------------------------------------------------------------------===//
586 //===----------------------------------------------------------------------===//
588 multiclass VPatUnarySDNode_V<SDPatternOperator op, string instruction_name,
589 Predicate predicate = HasStdExtZvbb> {
590 foreach vti = AllIntegerVectors in {
591 let Predicates = !listconcat([predicate],
592 GetVTypePredicates<vti>.Predicates) in {
593 def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1))),
594 (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX)
595 (vti.Vector (IMPLICIT_DEF)),
597 vti.AVL, vti.Log2SEW, TA_MA)>;
602 // Helpers for detecting splats since we preprocess splat_vector to vmv.v.x
603 // This should match the logic in RISCVDAGToDAGISel::selectVSplat
604 def riscv_splat_vector : PatFrag<(ops node:$rs1),
605 (riscv_vmv_v_x_vl undef, node:$rs1, srcvalue)>;
606 def riscv_vnot : PatFrag<(ops node:$rs1), (xor node:$rs1,
607 (riscv_splat_vector -1))>;
609 foreach vti = AllIntegerVectors in {
610 let Predicates = !listconcat([HasStdExtZvkb],
611 GetVTypePredicates<vti>.Predicates) in {
612 def : Pat<(vti.Vector (and (riscv_vnot vti.RegClass:$rs1),
614 (!cast<Instruction>("PseudoVANDN_VV_"#vti.LMul.MX)
615 (vti.Vector (IMPLICIT_DEF)),
618 vti.AVL, vti.Log2SEW, TA_MA)>;
619 def : Pat<(vti.Vector (and (riscv_splat_vector
620 (not vti.ScalarRegClass:$rs1)),
622 (!cast<Instruction>("PseudoVANDN_VX_"#vti.LMul.MX)
623 (vti.Vector (IMPLICIT_DEF)),
625 vti.ScalarRegClass:$rs1,
626 vti.AVL, vti.Log2SEW, TA_MA)>;
630 defm : VPatUnarySDNode_V<bitreverse, "PseudoVBREV">;
631 defm : VPatUnarySDNode_V<bswap, "PseudoVREV8", HasStdExtZvkb>;
632 defm : VPatUnarySDNode_V<ctlz, "PseudoVCLZ">;
633 defm : VPatUnarySDNode_V<cttz, "PseudoVCTZ">;
634 defm : VPatUnarySDNode_V<ctpop, "PseudoVCPOP">;
636 defm : VPatBinarySDNode_VV_VX<rotl, "PseudoVROL">;
638 // Invert the immediate and mask it to SEW for readability.
639 def InvRot8Imm : SDNodeXForm<imm, [{
640 return CurDAG->getTargetConstant(0x7 & (64 - N->getZExtValue()), SDLoc(N),
643 def InvRot16Imm : SDNodeXForm<imm, [{
644 return CurDAG->getTargetConstant(0xf & (64 - N->getZExtValue()), SDLoc(N),
647 def InvRot32Imm : SDNodeXForm<imm, [{
648 return CurDAG->getTargetConstant(0x1f & (64 - N->getZExtValue()), SDLoc(N),
651 def InvRot64Imm : SDNodeXForm<imm, [{
652 return CurDAG->getTargetConstant(0x3f & (64 - N->getZExtValue()), SDLoc(N),
656 // Although there is no vrol.vi, an immediate rotate left can be achieved by
657 // negating the immediate in vror.vi
658 foreach vti = AllIntegerVectors in {
659 let Predicates = !listconcat([HasStdExtZvkb],
660 GetVTypePredicates<vti>.Predicates) in {
661 def : Pat<(vti.Vector (rotl vti.RegClass:$rs2,
662 (vti.Vector (SplatPat_uimm6 uimm6:$rs1)))),
663 (!cast<Instruction>("PseudoVROR_VI_"#vti.LMul.MX)
664 (vti.Vector (IMPLICIT_DEF)),
666 (!cast<SDNodeXForm>("InvRot" # vti.SEW # "Imm") uimm6:$rs1),
667 vti.AVL, vti.Log2SEW, TA_MA)>;
670 defm : VPatBinarySDNode_VV_VX_VI<rotr, "PseudoVROR", uimm6>;
672 foreach vtiToWti = AllWidenableIntVectors in {
673 defvar vti = vtiToWti.Vti;
674 defvar wti = vtiToWti.Wti;
675 let Predicates = !listconcat([HasStdExtZvbb],
676 GetVTypePredicates<vti>.Predicates,
677 GetVTypePredicates<wti>.Predicates) in {
678 def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))),
679 (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1)))),
680 (!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX)
681 (wti.Vector (IMPLICIT_DEF)),
682 vti.RegClass:$rs2, vti.RegClass:$rs1,
683 vti.AVL, vti.Log2SEW, TA_MA)>;
685 def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))),
686 (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1)))),
687 (!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX)
688 (wti.Vector (IMPLICIT_DEF)),
689 vti.RegClass:$rs2, GPR:$rs1,
690 vti.AVL, vti.Log2SEW, TA_MA)>;
692 def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))),
693 (wti.Vector (SplatPat_uimm5 uimm5:$rs1))),
694 (!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX)
695 (wti.Vector (IMPLICIT_DEF)),
696 vti.RegClass:$rs2, uimm5:$rs1,
697 vti.AVL, vti.Log2SEW, TA_MA)>;
701 //===----------------------------------------------------------------------===//
703 //===----------------------------------------------------------------------===//
705 multiclass VPatUnaryVL_V<SDPatternOperator op, string instruction_name,
706 Predicate predicate = HasStdExtZvbb> {
707 foreach vti = AllIntegerVectors in {
708 let Predicates = !listconcat([predicate],
709 GetVTypePredicates<vti>.Predicates) in {
710 def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1),
711 (vti.Vector vti.RegClass:$passthru),
714 (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX#"_MASK")
715 vti.RegClass:$passthru,
725 foreach vti = AllIntegerVectors in {
726 let Predicates = !listconcat([HasStdExtZvkb],
727 GetVTypePredicates<vti>.Predicates) in {
728 def : Pat<(vti.Vector (riscv_and_vl (riscv_xor_vl
729 (vti.Vector vti.RegClass:$rs1),
730 (riscv_splat_vector -1),
731 (vti.Vector vti.RegClass:$passthru),
734 (vti.Vector vti.RegClass:$rs2),
735 (vti.Vector vti.RegClass:$passthru),
738 (!cast<Instruction>("PseudoVANDN_VV_"#vti.LMul.MX#"_MASK")
739 vti.RegClass:$passthru,
747 def : Pat<(vti.Vector (riscv_and_vl (riscv_splat_vector
748 (not vti.ScalarRegClass:$rs1)),
749 (vti.Vector vti.RegClass:$rs2),
750 (vti.Vector vti.RegClass:$passthru),
753 (!cast<Instruction>("PseudoVANDN_VX_"#vti.LMul.MX#"_MASK")
754 vti.RegClass:$passthru,
756 vti.ScalarRegClass:$rs1,
764 defm : VPatUnaryVL_V<riscv_bitreverse_vl, "PseudoVBREV">;
765 defm : VPatUnaryVL_V<riscv_bswap_vl, "PseudoVREV8", HasStdExtZvkb>;
766 defm : VPatUnaryVL_V<riscv_ctlz_vl, "PseudoVCLZ">;
767 defm : VPatUnaryVL_V<riscv_cttz_vl, "PseudoVCTZ">;
768 defm : VPatUnaryVL_V<riscv_ctpop_vl, "PseudoVCPOP">;
770 defm : VPatBinaryVL_VV_VX<riscv_rotl_vl, "PseudoVROL">;
771 // Although there is no vrol.vi, an immediate rotate left can be achieved by
772 // negating the immediate in vror.vi
773 foreach vti = AllIntegerVectors in {
774 let Predicates = !listconcat([HasStdExtZvkb],
775 GetVTypePredicates<vti>.Predicates) in {
776 def : Pat<(riscv_rotl_vl vti.RegClass:$rs2,
777 (vti.Vector (SplatPat_uimm6 uimm6:$rs1)),
778 (vti.Vector vti.RegClass:$passthru),
779 (vti.Mask V0), VLOpFrag),
780 (!cast<Instruction>("PseudoVROR_VI_"#vti.LMul.MX#"_MASK")
781 vti.RegClass:$passthru,
783 (!cast<SDNodeXForm>("InvRot" # vti.SEW # "Imm") uimm6:$rs1),
784 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
787 defm : VPatBinaryVL_VV_VX_VI<riscv_rotr_vl, "PseudoVROR", uimm6>;
789 foreach vtiToWti = AllWidenableIntVectors in {
790 defvar vti = vtiToWti.Vti;
791 defvar wti = vtiToWti.Wti;
792 let Predicates = !listconcat([HasStdExtZvbb],
793 GetVTypePredicates<vti>.Predicates,
794 GetVTypePredicates<wti>.Predicates) in {
795 def : Pat<(riscv_shl_vl
796 (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))),
797 (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1))),
798 (wti.Vector wti.RegClass:$passthru),
799 (vti.Mask V0), VLOpFrag),
800 (!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK")
801 wti.RegClass:$passthru, vti.RegClass:$rs2, vti.RegClass:$rs1,
802 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
804 def : Pat<(riscv_shl_vl
805 (wti.Vector (riscv_zext_vl_oneuse
806 (vti.Vector vti.RegClass:$rs2),
807 (vti.Mask V0), VLOpFrag)),
808 (wti.Vector (riscv_ext_vl_oneuse
809 (vti.Vector vti.RegClass:$rs1),
810 (vti.Mask V0), VLOpFrag)),
811 (wti.Vector wti.RegClass:$passthru),
812 (vti.Mask V0), VLOpFrag),
813 (!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK")
814 wti.RegClass:$passthru, vti.RegClass:$rs2, vti.RegClass:$rs1,
815 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
817 def : Pat<(riscv_shl_vl
818 (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))),
819 (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))),
820 (wti.Vector wti.RegClass:$passthru),
821 (vti.Mask V0), VLOpFrag),
822 (!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK")
823 wti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$rs1,
824 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
826 def : Pat<(riscv_shl_vl
827 (wti.Vector (riscv_zext_vl_oneuse
828 (vti.Vector vti.RegClass:$rs2),
829 (vti.Mask V0), VLOpFrag)),
830 (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))),
831 (wti.Vector wti.RegClass:$passthru),
832 (vti.Mask V0), VLOpFrag),
833 (!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK")
834 wti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$rs1,
835 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
837 def : Pat<(riscv_shl_vl
838 (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))),
839 (wti.Vector (SplatPat_uimm5 uimm5:$rs1)),
840 (wti.Vector wti.RegClass:$passthru),
841 (vti.Mask V0), VLOpFrag),
842 (!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK")
843 wti.RegClass:$passthru, vti.RegClass:$rs2, uimm5:$rs1,
844 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
846 def : Pat<(riscv_shl_vl
847 (wti.Vector (riscv_zext_vl_oneuse
848 (vti.Vector vti.RegClass:$rs2),
849 (vti.Mask V0), VLOpFrag)),
850 (wti.Vector (SplatPat_uimm5 uimm5:$rs1)),
851 (wti.Vector wti.RegClass:$passthru),
852 (vti.Mask V0), VLOpFrag),
853 (!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK")
854 wti.RegClass:$passthru, vti.RegClass:$rs2, uimm5:$rs1,
855 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
857 def : Pat<(riscv_vwsll_vl
858 (vti.Vector vti.RegClass:$rs2),
859 (vti.Vector vti.RegClass:$rs1),
860 (wti.Vector wti.RegClass:$passthru),
861 (vti.Mask V0), VLOpFrag),
862 (!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK")
863 wti.RegClass:$passthru, vti.RegClass:$rs2, vti.RegClass:$rs1,
864 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
866 def : Pat<(riscv_vwsll_vl
867 (vti.Vector vti.RegClass:$rs2),
868 (vti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))),
869 (wti.Vector wti.RegClass:$passthru),
870 (vti.Mask V0), VLOpFrag),
871 (!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK")
872 wti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$rs1,
873 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
875 def : Pat<(riscv_vwsll_vl
876 (vti.Vector vti.RegClass:$rs2),
877 (vti.Vector (SplatPat_uimm5 uimm5:$rs1)),
878 (wti.Vector wti.RegClass:$passthru),
879 (vti.Mask V0), VLOpFrag),
880 (!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK")
881 wti.RegClass:$passthru, vti.RegClass:$rs2, uimm5:$rs1,
882 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
886 //===----------------------------------------------------------------------===//
888 //===----------------------------------------------------------------------===//
890 class VPatUnaryNoMask_Zvk<string intrinsic_name,
893 ValueType result_type,
897 VReg result_reg_class,
898 VReg op2_reg_class> :
899 Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
900 (result_type result_reg_class:$rd),
901 (op2_type op2_reg_class:$rs2),
902 VLOpFrag, (XLenVT timm:$policy))),
903 (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
904 (result_type result_reg_class:$rd),
905 (op2_type op2_reg_class:$rs2),
906 GPR:$vl, sew, (XLenVT timm:$policy))>;
908 class VPatUnaryNoMask_VS_Zvk<string intrinsic_name,
911 ValueType result_type,
916 VReg result_reg_class,
917 VReg op2_reg_class> :
918 Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
919 (result_type result_reg_class:$rd),
920 (op2_type op2_reg_class:$rs2),
921 VLOpFrag, (XLenVT timm:$policy))),
922 (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_"#vs2_lmul.MX)
923 (result_type result_reg_class:$rd),
924 (op2_type op2_reg_class:$rs2),
925 GPR:$vl, sew, (XLenVT timm:$policy))>;
927 multiclass VPatUnaryV_V_NoMask_Zvk<string intrinsic, string instruction,
928 list<VTypeInfo> vtilist> {
929 foreach vti = vtilist in
930 def : VPatUnaryNoMask_Zvk<intrinsic # "_vv", instruction, "VV",
931 vti.Vector, vti.Vector, vti.Log2SEW,
932 vti.LMul, vti.RegClass, vti.RegClass>;
935 multiclass VPatUnaryV_S_NoMaskVectorCrypto<string intrinsic, string instruction,
936 list<VTypeInfo> vtilist> {
937 foreach vti = vtilist in
938 foreach vti_vs2 = ZvkI32IntegerVectors<vti.LMul.MX>.vs2_types in
939 def : VPatUnaryNoMask_VS_Zvk<intrinsic # "_vs", instruction, "VS",
940 vti.Vector, vti_vs2.Vector, vti.Log2SEW,
941 vti.LMul, vti_vs2.LMul, vti.RegClass, vti_vs2.RegClass>;
944 multiclass VPatUnaryV_V_S_NoMask_Zvk<string intrinsic, string instruction,
945 list<VTypeInfo> vtilist> {
946 defm : VPatUnaryV_V_NoMask_Zvk<intrinsic, instruction, vtilist>;
947 defm : VPatUnaryV_S_NoMaskVectorCrypto<intrinsic, instruction, vtilist>;
950 multiclass VPatBinaryV_VV_NoMask<string intrinsic, string instruction,
951 list<VTypeInfo> vtilist,
952 bit isSEWAware = false> {
953 foreach vti = vtilist in
954 def : VPatTernaryNoMaskWithPolicy<intrinsic, instruction, "VV",
955 vti.Vector, vti.Vector, vti.Vector,
956 vti.Log2SEW, vti.LMul, vti.RegClass,
957 vti.RegClass, vti.RegClass,
958 isSEWAware = isSEWAware>;
961 multiclass VPatBinaryV_VI_NoMask<string intrinsic, string instruction,
962 list<VTypeInfo> vtilist,
963 Operand imm_type = tuimm5> {
964 foreach vti = vtilist in
965 def : VPatTernaryNoMaskWithPolicy<intrinsic, instruction, "VI",
966 vti.Vector, vti.Vector, XLenVT,
967 vti.Log2SEW, vti.LMul, vti.RegClass,
968 vti.RegClass, imm_type>;
971 multiclass VPatBinaryV_VI_NoMaskTU<string intrinsic, string instruction,
972 list<VTypeInfo> vtilist,
973 Operand imm_type = tuimm5> {
974 foreach vti = vtilist in
975 def : VPatBinaryNoMaskTU<intrinsic, instruction # "_VI_" # vti.LMul.MX,
976 vti.Vector, vti.Vector, XLenVT, vti.Log2SEW,
977 vti.RegClass, vti.RegClass, imm_type>;
980 multiclass VPatBinaryV_VV_NoMaskTU<string intrinsic, string instruction,
981 list<VTypeInfo> vtilist> {
982 foreach vti = vtilist in
983 def : VPatBinaryNoMaskTU<intrinsic, instruction # "_VV_" # vti.LMul.MX,
984 vti.Vector, vti.Vector, vti.Vector, vti.Log2SEW,
985 vti.RegClass, vti.RegClass, vti.RegClass>;
988 multiclass VPatBinaryV_VX_VROTATE<string intrinsic, string instruction,
989 list<VTypeInfo> vtilist, bit isSEWAware = 0> {
990 foreach vti = vtilist in {
991 defvar kind = "V"#vti.ScalarSuffix;
992 let Predicates = GetVTypePredicates<vti>.Predicates in
993 defm : VPatBinary<intrinsic,
995 instruction#"_"#kind#"_"#vti.LMul.MX#"_E"#vti.SEW,
996 instruction#"_"#kind#"_"#vti.LMul.MX),
997 vti.Vector, vti.Vector, XLenVT, vti.Mask,
998 vti.Log2SEW, vti.RegClass,
999 vti.RegClass, vti.ScalarRegClass>;
1003 multiclass VPatBinaryV_VI_VROL<string intrinsic, string instruction,
1004 list<VTypeInfo> vtilist, bit isSEWAware = 0> {
1005 foreach vti = vtilist in {
1006 defvar Intr = !cast<Intrinsic>(intrinsic);
1007 defvar Pseudo = !cast<Instruction>(
1008 !if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW,
1009 instruction#"_VI_"#vti.LMul.MX));
1010 let Predicates = GetVTypePredicates<vti>.Predicates in
1011 def : Pat<(vti.Vector (Intr (vti.Vector vti.RegClass:$passthru),
1012 (vti.Vector vti.RegClass:$rs2),
1013 (XLenVT uimm6:$rs1),
1015 (Pseudo (vti.Vector vti.RegClass:$passthru),
1016 (vti.Vector vti.RegClass:$rs2),
1017 (InvRot64Imm uimm6:$rs1),
1018 GPR:$vl, vti.Log2SEW, TU_MU)>;
1020 defvar IntrMask = !cast<Intrinsic>(intrinsic#"_mask");
1021 defvar PseudoMask = !cast<Instruction>(
1022 !if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK",
1023 instruction#"_VI_"#vti.LMul.MX#"_MASK"));
1024 let Predicates = GetVTypePredicates<vti>.Predicates in
1025 def : Pat<(vti.Vector (IntrMask (vti.Vector vti.RegClass:$passthru),
1026 (vti.Vector vti.RegClass:$rs2),
1027 (XLenVT uimm6:$rs1),
1029 VLOpFrag, (XLenVT timm:$policy))),
1030 (PseudoMask (vti.Vector vti.RegClass:$passthru),
1031 (vti.Vector vti.RegClass:$rs2),
1032 (InvRot64Imm uimm6:$rs1),
1034 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
1038 multiclass VPatBinaryV_VV_VX_VROL<string intrinsic, string instruction,
1039 string instruction2, list<VTypeInfo> vtilist>
1040 : VPatBinaryV_VV<intrinsic, instruction, vtilist>,
1041 VPatBinaryV_VX_VROTATE<intrinsic, instruction, vtilist>,
1042 VPatBinaryV_VI_VROL<intrinsic, instruction2, vtilist>;
1044 multiclass VPatBinaryV_VV_VX_VI_VROR<string intrinsic, string instruction,
1045 list<VTypeInfo> vtilist>
1046 : VPatBinaryV_VV<intrinsic, instruction, vtilist>,
1047 VPatBinaryV_VX_VROTATE<intrinsic, instruction, vtilist>,
1048 VPatBinaryV_VI<intrinsic, instruction, vtilist, uimm6>;
1050 multiclass VPatBinaryW_VV_VX_VI_VWSLL<string intrinsic, string instruction,
1051 list<VTypeInfoToWide> vtilist>
1052 : VPatBinaryW_VV<intrinsic, instruction, vtilist> {
1053 foreach VtiToWti = vtilist in {
1054 defvar Vti = VtiToWti.Vti;
1055 defvar Wti = VtiToWti.Wti;
1056 defvar kind = "V"#Vti.ScalarSuffix;
1057 let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
1058 GetVTypePredicates<Wti>.Predicates) in {
1059 defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
1060 Wti.Vector, Vti.Vector, XLenVT, Vti.Mask,
1061 Vti.Log2SEW, Wti.RegClass,
1062 Vti.RegClass, Vti.ScalarRegClass>;
1063 defm : VPatBinary<intrinsic, instruction # "_VI_" # Vti.LMul.MX,
1064 Wti.Vector, Vti.Vector, XLenVT, Vti.Mask,
1065 Vti.Log2SEW, Wti.RegClass,
1066 Vti.RegClass, uimm5>;
1071 let Predicates = [HasStdExtZvbb] in {
1072 defm : VPatUnaryV_V<"int_riscv_vbrev", "PseudoVBREV", AllIntegerVectors>;
1073 defm : VPatUnaryV_V<"int_riscv_vclz", "PseudoVCLZ", AllIntegerVectors>;
1074 defm : VPatUnaryV_V<"int_riscv_vctz", "PseudoVCTZ", AllIntegerVectors>;
1075 defm : VPatUnaryV_V<"int_riscv_vcpopv", "PseudoVCPOP", AllIntegerVectors>;
1076 defm : VPatBinaryW_VV_VX_VI_VWSLL<"int_riscv_vwsll", "PseudoVWSLL", AllWidenableIntVectors>;
1077 } // Predicates = [HasStdExtZvbb]
1079 let Predicates = [HasStdExtZvbc] in {
1080 defm : VPatBinaryV_VV_VX<"int_riscv_vclmul", "PseudoVCLMUL", I64IntegerVectors>;
1081 defm : VPatBinaryV_VV_VX<"int_riscv_vclmulh", "PseudoVCLMULH", I64IntegerVectors>;
1082 } // Predicates = [HasStdExtZvbc]
1084 let Predicates = [HasStdExtZvkb] in {
1085 defm : VPatBinaryV_VV_VX<"int_riscv_vandn", "PseudoVANDN", AllIntegerVectors>;
1086 defm : VPatUnaryV_V<"int_riscv_vbrev8", "PseudoVBREV8", AllIntegerVectors>;
1087 defm : VPatUnaryV_V<"int_riscv_vrev8", "PseudoVREV8", AllIntegerVectors>;
1088 defm : VPatBinaryV_VV_VX_VROL<"int_riscv_vrol", "PseudoVROL", "PseudoVROR", AllIntegerVectors>;
1089 defm : VPatBinaryV_VV_VX_VI_VROR<"int_riscv_vror", "PseudoVROR", AllIntegerVectors>;
1090 } // Predicates = [HasStdExtZvkb]
1092 let Predicates = [HasStdExtZvkg] in {
1093 defm : VPatBinaryV_VV_NoMask<"int_riscv_vghsh", "PseudoVGHSH", I32IntegerVectors>;
1094 defm : VPatUnaryV_V_NoMask_Zvk<"int_riscv_vgmul", "PseudoVGMUL", I32IntegerVectors>;
1095 } // Predicates = [HasStdExtZvkg]
1097 let Predicates = [HasStdExtZvkned] in {
1098 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesdf", "PseudoVAESDF", I32IntegerVectors>;
1099 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesdm", "PseudoVAESDM", I32IntegerVectors>;
1100 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesef", "PseudoVAESEF", I32IntegerVectors>;
1101 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesem", "PseudoVAESEM", I32IntegerVectors>;
1102 defm : VPatBinaryV_VI_NoMaskTU<"int_riscv_vaeskf1", "PseudoVAESKF1", I32IntegerVectors>;
1103 defm : VPatBinaryV_VI_NoMask<"int_riscv_vaeskf2", "PseudoVAESKF2", I32IntegerVectors>;
1104 defm : VPatUnaryV_S_NoMaskVectorCrypto<"int_riscv_vaesz", "PseudoVAESZ", I32IntegerVectors>;
1105 } // Predicates = [HasStdExtZvkned]
1107 let Predicates = [HasStdExtZvknha] in {
1108 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ch", "PseudoVSHA2CH", I32IntegerVectors>;
1109 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2cl", "PseudoVSHA2CH", I32IntegerVectors>;
1110 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ms", "PseudoVSHA2MS", I32IntegerVectors, isSEWAware=true>;
1111 } // Predicates = [HasStdExtZvknha]
1113 let Predicates = [HasStdExtZvknhb] in {
1114 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ch", "PseudoVSHA2CH", I32I64IntegerVectors>;
1115 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2cl", "PseudoVSHA2CH", I32I64IntegerVectors>;
1116 defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ms", "PseudoVSHA2MS", I32I64IntegerVectors, isSEWAware=true>;
1117 } // Predicates = [HasStdExtZvknhb]
1119 let Predicates = [HasStdExtZvksed] in {
1120 defm : VPatBinaryV_VI_NoMaskTU<"int_riscv_vsm4k", "PseudoVSM4K", I32IntegerVectors>;
1121 defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vsm4r", "PseudoVSM4R", I32IntegerVectors>;
1122 } // Predicates = [HasStdExtZvksed]
1124 let Predicates = [HasStdExtZvksh] in {
1125 defm : VPatBinaryV_VI_NoMask<"int_riscv_vsm3c", "PseudoVSM3C", I32IntegerVectors>;
1126 defm : VPatBinaryV_VV_NoMaskTU<"int_riscv_vsm3me", "PseudoVSM3ME", I32IntegerVectors>;
1127 } // Predicates = [HasStdExtZvksh]