1 //===-- RISCVInstrInfoZb.td - RISC-V Bitmanip instructions -*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the RISC-V instructions from the standard Bitmanip
10 // extensions, versions:
16 // This file also describes RISC-V instructions from the Zbk* extensions in
17 // Cryptography Extensions Volume I: Scalar & Entropy Source Instructions,
23 //===----------------------------------------------------------------------===//
25 //===----------------------------------------------------------------------===//
26 // Operand and SDNode transformation definitions.
27 //===----------------------------------------------------------------------===//
29 def riscv_clzw : SDNode<"RISCVISD::CLZW", SDT_RISCVIntUnaryOpW>;
30 def riscv_ctzw : SDNode<"RISCVISD::CTZW", SDT_RISCVIntUnaryOpW>;
31 def riscv_rolw : SDNode<"RISCVISD::ROLW", SDT_RISCVIntBinOpW>;
32 def riscv_rorw : SDNode<"RISCVISD::RORW", SDT_RISCVIntBinOpW>;
33 def riscv_brev8 : SDNode<"RISCVISD::BREV8", SDTIntUnaryOp>;
34 def riscv_orc_b : SDNode<"RISCVISD::ORC_B", SDTIntUnaryOp>;
35 def riscv_zip : SDNode<"RISCVISD::ZIP", SDTIntUnaryOp>;
36 def riscv_unzip : SDNode<"RISCVISD::UNZIP", SDTIntUnaryOp>;
37 def riscv_absw : SDNode<"RISCVISD::ABSW", SDTIntUnaryOp>;
38 def riscv_clmul : SDNode<"RISCVISD::CLMUL", SDTIntBinOp>;
39 def riscv_clmulh : SDNode<"RISCVISD::CLMULH", SDTIntBinOp>;
40 def riscv_clmulr : SDNode<"RISCVISD::CLMULR", SDTIntBinOp>;
42 def UImmLog2XLenHalfAsmOperand : AsmOperandClass {
43 let Name = "UImmLog2XLenHalf";
44 let RenderMethod = "addImmOperands";
45 let DiagnosticType = "InvalidUImmLog2XLenHalf";
48 def shfl_uimm : RISCVOp, ImmLeaf<XLenVT, [{
49 if (Subtarget->is64Bit())
50 return isUInt<5>(Imm);
51 return isUInt<4>(Imm);
53 let ParserMatchClass = UImmLog2XLenHalfAsmOperand;
54 let DecoderMethod = "decodeUImmOperand<5>";
55 let OperandType = "OPERAND_UIMM_SHFL";
56 let MCOperandPredicate = [{
58 if (!MCOp.evaluateAsConstantImm(Imm))
60 if (STI.getTargetTriple().isArch64Bit())
61 return isUInt<5>(Imm);
62 return isUInt<4>(Imm);
66 def BCLRXForm : SDNodeXForm<imm, [{
68 return CurDAG->getTargetConstant(llvm::countr_one(N->getZExtValue()),
69 SDLoc(N), N->getValueType(0));
72 def SingleBitSetMaskToIndex : SDNodeXForm<imm, [{
74 return CurDAG->getTargetConstant(llvm::countr_zero(N->getZExtValue()),
75 SDLoc(N), N->getValueType(0));
78 // Checks if this mask has a single 0 bit and cannot be used with ANDI.
79 def BCLRMask : ImmLeaf<XLenVT, [{
80 if (Subtarget->is64Bit())
81 return !isInt<12>(Imm) && isPowerOf2_64(~Imm);
82 return !isInt<12>(Imm) && isPowerOf2_32(~Imm);
85 // Checks if this mask has a single 1 bit and cannot be used with ORI/XORI.
86 def SingleBitSetMask : ImmLeaf<XLenVT, [{
87 if (Subtarget->is64Bit())
88 return !isInt<12>(Imm) && isPowerOf2_64(Imm);
89 return !isInt<12>(Imm) && isPowerOf2_32(Imm);
90 }], SingleBitSetMaskToIndex>;
92 // Check if (or r, i) can be optimized to (BSETI (BSETI r, i0), i1),
93 // in which i = (1 << i0) | (1 << i1).
94 def BSETINVTwoBitsMask : PatLeaf<(imm), [{
97 // The immediate should not be a simm12.
98 if (isInt<12>(N->getSExtValue()))
100 // The immediate must have exactly two bits set.
101 return llvm::popcount(N->getZExtValue()) == 2;
104 def BSETINVTwoBitsMaskHigh : SDNodeXForm<imm, [{
105 uint64_t I = N->getZExtValue();
106 return CurDAG->getTargetConstant(llvm::Log2_64(I), SDLoc(N),
110 // Check if (or r, imm) can be optimized to (BSETI (ORI r, i0), i1),
111 // in which imm = i0 | (1 << i1).
112 def BSETINVORIMask : PatLeaf<(imm), [{
115 // The immediate should not be a simm12.
116 if (isInt<12>(N->getSExtValue()))
118 // There should be only one set bit from bit 11 to the top.
119 return isPowerOf2_64(N->getZExtValue() & ~0x7ff);
122 def BSETINVORIMaskLow : SDNodeXForm<imm, [{
123 return CurDAG->getTargetConstant(N->getZExtValue() & 0x7ff,
124 SDLoc(N), N->getValueType(0));
127 // Check if (and r, i) can be optimized to (BCLRI (BCLRI r, i0), i1),
128 // in which i = ~((1<<i0) | (1<<i1)).
129 def BCLRITwoBitsMask : PatLeaf<(imm), [{
132 // The immediate should not be a simm12.
133 if (isInt<12>(N->getSExtValue()))
135 // The immediate must have exactly two bits clear.
136 return (unsigned)llvm::popcount(N->getZExtValue()) == Subtarget->getXLen() - 2;
139 def BCLRITwoBitsMaskLow : SDNodeXForm<imm, [{
140 return CurDAG->getTargetConstant(llvm::countr_zero(~N->getZExtValue()),
141 SDLoc(N), N->getValueType(0));
144 def BCLRITwoBitsMaskHigh : SDNodeXForm<imm, [{
145 uint64_t I = N->getSExtValue();
146 if (!Subtarget->is64Bit())
147 I |= 0xffffffffull << 32;
148 return CurDAG->getTargetConstant(llvm::Log2_64(~I), SDLoc(N),
152 // Check if (and r, i) can be optimized to (BCLRI (ANDI r, i0), i1),
153 // in which i = i0 & ~(1<<i1).
154 def BCLRIANDIMask : PatLeaf<(imm), [{
157 // The immediate should not be a simm12.
158 if (isInt<12>(N->getSExtValue()))
160 // There should be only one clear bit from bit 11 to the top.
161 uint64_t I = N->getZExtValue() | 0x7ff;
162 return Subtarget->is64Bit() ? isPowerOf2_64(~I) : isPowerOf2_32(~I);
165 def BCLRIANDIMaskLow : SDNodeXForm<imm, [{
166 return CurDAG->getTargetConstant((N->getZExtValue() & 0x7ff) | ~0x7ffull,
167 SDLoc(N), N->getValueType(0));
170 def C3LeftShift : PatLeaf<(imm), [{
171 uint64_t C = N->getZExtValue();
172 return C > 3 && (C >> llvm::countr_zero(C)) == 3;
175 def C5LeftShift : PatLeaf<(imm), [{
176 uint64_t C = N->getZExtValue();
177 return C > 5 && (C >> llvm::countr_zero(C)) == 5;
180 def C9LeftShift : PatLeaf<(imm), [{
181 uint64_t C = N->getZExtValue();
182 return C > 9 && (C >> llvm::countr_zero(C)) == 9;
185 // Constant of the form (3 << C) where C is less than 32.
186 def C3LeftShiftUW : PatLeaf<(imm), [{
187 uint64_t C = N->getZExtValue();
188 unsigned Shift = llvm::countr_zero(C);
189 return 1 <= Shift && Shift < 32 && (C >> Shift) == 3;
192 // Constant of the form (5 << C) where C is less than 32.
193 def C5LeftShiftUW : PatLeaf<(imm), [{
194 uint64_t C = N->getZExtValue();
195 unsigned Shift = llvm::countr_zero(C);
196 return 1 <= Shift && Shift < 32 && (C >> Shift) == 5;
199 // Constant of the form (9 << C) where C is less than 32.
200 def C9LeftShiftUW : PatLeaf<(imm), [{
201 uint64_t C = N->getZExtValue();
202 unsigned Shift = llvm::countr_zero(C);
203 return 1 <= Shift && Shift < 32 && (C >> Shift) == 9;
206 def CSImm12MulBy4 : PatLeaf<(imm), [{
209 int64_t C = N->getSExtValue();
210 // Skip if C is simm12, an lui, or can be optimized by the PatLeaf AddiPair.
211 return !isInt<13>(C) && !isShiftedInt<20, 12>(C) && isShiftedInt<12, 2>(C);
214 def CSImm12MulBy8 : PatLeaf<(imm), [{
217 int64_t C = N->getSExtValue();
218 // Skip if C is simm12, an lui or can be optimized by the PatLeaf AddiPair or
220 return !isInt<14>(C) && !isShiftedInt<20, 12>(C) && isShiftedInt<12, 3>(C);
223 def SimmShiftRightBy2XForm : SDNodeXForm<imm, [{
224 return CurDAG->getTargetConstant(N->getSExtValue() >> 2, SDLoc(N),
228 def SimmShiftRightBy3XForm : SDNodeXForm<imm, [{
229 return CurDAG->getTargetConstant(N->getSExtValue() >> 3, SDLoc(N),
233 // Pattern to exclude simm12 immediates from matching, namely `non_imm12`.
234 // GISel currently doesn't support PatFrag for leaf nodes, so `non_imm12`
235 // cannot be implemented in that way. To reuse patterns between the two
236 // ISels, we instead create PatFrag on operators that use `non_imm12`.
237 class binop_with_non_imm12<SDPatternOperator binop>
238 : PatFrag<(ops node:$x, node:$y), (binop node:$x, node:$y), [{
239 auto *C = dyn_cast<ConstantSDNode>(Operands[1]);
240 return !C || !isInt<12>(C->getSExtValue());
242 let PredicateCodeUsesOperands = 1;
243 let GISelPredicateCode = [{
244 const MachineOperand &ImmOp = *Operands[1];
245 const MachineFunction &MF = *MI.getParent()->getParent();
246 const MachineRegisterInfo &MRI = MF.getRegInfo();
248 if (ImmOp.isReg() && ImmOp.getReg())
249 if (auto Val = getIConstantVRegValWithLookThrough(ImmOp.getReg(), MRI)) {
250 // We do NOT want immediates that fit in 12 bits.
251 return !isInt<12>(Val->Value.getSExtValue());
257 def add_non_imm12 : binop_with_non_imm12<add>;
258 def or_is_add_non_imm12 : binop_with_non_imm12<or_is_add>;
260 def Shifted32OnesMask : IntImmLeaf<XLenVT, [{
261 if (!Imm.isShiftedMask())
264 unsigned TrailingZeros = Imm.countr_zero();
265 return TrailingZeros > 0 && TrailingZeros < 32 &&
266 Imm == UINT64_C(0xFFFFFFFF) << TrailingZeros;
269 def sh1add_op : ComplexPattern<XLenVT, 1, "selectSHXADDOp<1>", [], [], 6>;
270 def sh2add_op : ComplexPattern<XLenVT, 1, "selectSHXADDOp<2>", [], [], 6>;
271 def sh3add_op : ComplexPattern<XLenVT, 1, "selectSHXADDOp<3>", [], [], 6>;
273 def sh1add_uw_op : ComplexPattern<XLenVT, 1, "selectSHXADD_UWOp<1>", [], [], 6>;
274 def sh2add_uw_op : ComplexPattern<XLenVT, 1, "selectSHXADD_UWOp<2>", [], [], 6>;
275 def sh3add_uw_op : ComplexPattern<XLenVT, 1, "selectSHXADD_UWOp<3>", [], [], 6>;
277 //===----------------------------------------------------------------------===//
278 // Instruction class templates
279 //===----------------------------------------------------------------------===//
281 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
282 class RVBUnary<bits<12> imm12, bits<3> funct3,
283 RISCVOpcode opcode, string opcodestr>
284 : RVInstIUnary<imm12, funct3, opcode, (outs GPR:$rd), (ins GPR:$rs1),
285 opcodestr, "$rd, $rs1">;
287 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
288 class RVBShift_ri<bits<5> imm11_7, bits<3> funct3, RISCVOpcode opcode,
290 : RVInstIShift<imm11_7, funct3, opcode, (outs GPR:$rd),
291 (ins GPR:$rs1, uimmlog2xlen:$shamt), opcodestr,
292 "$rd, $rs1, $shamt">;
294 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
295 class RVBShiftW_ri<bits<7> imm11_5, bits<3> funct3, RISCVOpcode opcode,
297 : RVInstIShiftW<imm11_5, funct3, opcode, (outs GPR:$rd),
298 (ins GPR:$rs1, uimm5:$shamt), opcodestr,
299 "$rd, $rs1, $shamt">;
301 //===----------------------------------------------------------------------===//
303 //===----------------------------------------------------------------------===//
305 let Predicates = [HasStdExtZbbOrZbkb] in {
306 def ANDN : ALU_rr<0b0100000, 0b111, "andn">,
307 Sched<[WriteIALU, ReadIALU, ReadIALU]>;
308 def ORN : ALU_rr<0b0100000, 0b110, "orn">,
309 Sched<[WriteIALU, ReadIALU, ReadIALU]>;
310 def XNOR : ALU_rr<0b0100000, 0b100, "xnor">,
311 Sched<[WriteIALU, ReadIALU, ReadIALU]>;
312 } // Predicates = [HasStdExtZbbOrZbkb]
314 let Predicates = [HasStdExtZba] in {
315 def SH1ADD : ALU_rr<0b0010000, 0b010, "sh1add">,
316 Sched<[WriteSHXADD, ReadSHXADD, ReadSHXADD]>;
317 def SH2ADD : ALU_rr<0b0010000, 0b100, "sh2add">,
318 Sched<[WriteSHXADD, ReadSHXADD, ReadSHXADD]>;
319 def SH3ADD : ALU_rr<0b0010000, 0b110, "sh3add">,
320 Sched<[WriteSHXADD, ReadSHXADD, ReadSHXADD]>;
321 } // Predicates = [HasStdExtZba]
323 let Predicates = [HasStdExtZba, IsRV64] in {
324 def SLLI_UW : RVBShift_ri<0b00001, 0b001, OPC_OP_IMM_32, "slli.uw">,
325 Sched<[WriteShiftImm32, ReadShiftImm32]>;
326 def ADD_UW : ALUW_rr<0b0000100, 0b000, "add.uw">,
327 Sched<[WriteIALU32, ReadIALU32, ReadIALU32]>;
328 def SH1ADD_UW : ALUW_rr<0b0010000, 0b010, "sh1add.uw">,
329 Sched<[WriteSHXADD32, ReadSHXADD32, ReadSHXADD32]>;
330 def SH2ADD_UW : ALUW_rr<0b0010000, 0b100, "sh2add.uw">,
331 Sched<[WriteSHXADD32, ReadSHXADD32, ReadSHXADD32]>;
332 def SH3ADD_UW : ALUW_rr<0b0010000, 0b110, "sh3add.uw">,
333 Sched<[WriteSHXADD32, ReadSHXADD32, ReadSHXADD32]>;
334 } // Predicates = [HasStdExtZba, IsRV64]
336 let Predicates = [HasStdExtZbbOrZbkb] in {
337 def ROL : ALU_rr<0b0110000, 0b001, "rol">,
338 Sched<[WriteRotateReg, ReadRotateReg, ReadRotateReg]>;
339 def ROR : ALU_rr<0b0110000, 0b101, "ror">,
340 Sched<[WriteRotateReg, ReadRotateReg, ReadRotateReg]>;
342 def RORI : RVBShift_ri<0b01100, 0b101, OPC_OP_IMM, "rori">,
343 Sched<[WriteRotateImm, ReadRotateImm]>;
344 } // Predicates = [HasStdExtZbbOrZbkb]
346 let Predicates = [HasStdExtZbbOrZbkb, IsRV64], IsSignExtendingOpW = 1 in {
347 def ROLW : ALUW_rr<0b0110000, 0b001, "rolw">,
348 Sched<[WriteRotateReg32, ReadRotateReg32, ReadRotateReg32]>;
349 def RORW : ALUW_rr<0b0110000, 0b101, "rorw">,
350 Sched<[WriteRotateReg32, ReadRotateReg32, ReadRotateReg32]>;
352 def RORIW : RVBShiftW_ri<0b0110000, 0b101, OPC_OP_IMM_32, "roriw">,
353 Sched<[WriteRotateImm32, ReadRotateImm32]>;
354 } // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
356 let Predicates = [HasStdExtZbs] in {
357 def BCLR : ALU_rr<0b0100100, 0b001, "bclr">,
358 Sched<[WriteSingleBit, ReadSingleBit, ReadSingleBit]>;
359 def BSET : ALU_rr<0b0010100, 0b001, "bset">,
360 Sched<[WriteSingleBit, ReadSingleBit, ReadSingleBit]>;
361 def BINV : ALU_rr<0b0110100, 0b001, "binv">,
362 Sched<[WriteSingleBit, ReadSingleBit, ReadSingleBit]>;
363 let IsSignExtendingOpW = 1 in
364 def BEXT : ALU_rr<0b0100100, 0b101, "bext">,
365 Sched<[WriteBEXT, ReadSingleBit, ReadSingleBit]>;
367 def BCLRI : RVBShift_ri<0b01001, 0b001, OPC_OP_IMM, "bclri">,
368 Sched<[WriteSingleBitImm, ReadSingleBitImm]>;
369 def BSETI : RVBShift_ri<0b00101, 0b001, OPC_OP_IMM, "bseti">,
370 Sched<[WriteSingleBitImm, ReadSingleBitImm]>;
371 def BINVI : RVBShift_ri<0b01101, 0b001, OPC_OP_IMM, "binvi">,
372 Sched<[WriteSingleBitImm, ReadSingleBitImm]>;
373 let IsSignExtendingOpW = 1 in
374 def BEXTI : RVBShift_ri<0b01001, 0b101, OPC_OP_IMM, "bexti">,
375 Sched<[WriteBEXTI, ReadSingleBitImm]>;
376 } // Predicates = [HasStdExtZbs]
378 // These instructions were named xperm.n and xperm.b in the last version of
379 // the draft bit manipulation specification they were included in. However, we
380 // use the mnemonics given to them in the ratified Zbkx extension.
381 let Predicates = [HasStdExtZbkx] in {
382 def XPERM4 : ALU_rr<0b0010100, 0b010, "xperm4">,
383 Sched<[WriteXPERM, ReadXPERM, ReadXPERM]>;
384 def XPERM8 : ALU_rr<0b0010100, 0b100, "xperm8">,
385 Sched<[WriteXPERM, ReadXPERM, ReadXPERM]>;
386 } // Predicates = [HasStdExtZbkx]
388 let Predicates = [HasStdExtZbb], IsSignExtendingOpW = 1 in {
389 def CLZ : RVBUnary<0b011000000000, 0b001, OPC_OP_IMM, "clz">,
390 Sched<[WriteCLZ, ReadCLZ]>;
391 def CTZ : RVBUnary<0b011000000001, 0b001, OPC_OP_IMM, "ctz">,
392 Sched<[WriteCTZ, ReadCTZ]>;
393 def CPOP : RVBUnary<0b011000000010, 0b001, OPC_OP_IMM, "cpop">,
394 Sched<[WriteCPOP, ReadCPOP]>;
395 } // Predicates = [HasStdExtZbb]
397 let Predicates = [HasStdExtZbb, IsRV64], IsSignExtendingOpW = 1 in {
398 def CLZW : RVBUnary<0b011000000000, 0b001, OPC_OP_IMM_32, "clzw">,
399 Sched<[WriteCLZ32, ReadCLZ32]>;
400 def CTZW : RVBUnary<0b011000000001, 0b001, OPC_OP_IMM_32, "ctzw">,
401 Sched<[WriteCTZ32, ReadCTZ32]>;
402 def CPOPW : RVBUnary<0b011000000010, 0b001, OPC_OP_IMM_32, "cpopw">,
403 Sched<[WriteCPOP32, ReadCPOP32]>;
404 } // Predicates = [HasStdExtZbb, IsRV64]
406 let Predicates = [HasStdExtZbb], IsSignExtendingOpW = 1 in {
407 def SEXT_B : RVBUnary<0b011000000100, 0b001, OPC_OP_IMM, "sext.b">,
408 Sched<[WriteIALU, ReadIALU]>;
409 def SEXT_H : RVBUnary<0b011000000101, 0b001, OPC_OP_IMM, "sext.h">,
410 Sched<[WriteIALU, ReadIALU]>;
411 } // Predicates = [HasStdExtZbb]
413 let Predicates = [HasStdExtZbc] in {
414 def CLMULR : ALU_rr<0b0000101, 0b010, "clmulr", Commutable=1>,
415 Sched<[WriteCLMUL, ReadCLMUL, ReadCLMUL]>;
416 } // Predicates = [HasStdExtZbc]
418 let Predicates = [HasStdExtZbcOrZbkc] in {
419 def CLMUL : ALU_rr<0b0000101, 0b001, "clmul", Commutable=1>,
420 Sched<[WriteCLMUL, ReadCLMUL, ReadCLMUL]>;
421 def CLMULH : ALU_rr<0b0000101, 0b011, "clmulh", Commutable=1>,
422 Sched<[WriteCLMUL, ReadCLMUL, ReadCLMUL]>;
423 } // Predicates = [HasStdExtZbcOrZbkc]
425 let Predicates = [HasStdExtZbb] in {
426 def MIN : ALU_rr<0b0000101, 0b100, "min", Commutable=1>,
427 Sched<[WriteIALU, ReadIALU, ReadIALU]>;
428 def MINU : ALU_rr<0b0000101, 0b101, "minu", Commutable=1>,
429 Sched<[WriteIALU, ReadIALU, ReadIALU]>;
430 def MAX : ALU_rr<0b0000101, 0b110, "max", Commutable=1>,
431 Sched<[WriteIALU, ReadIALU, ReadIALU]>;
432 def MAXU : ALU_rr<0b0000101, 0b111, "maxu", Commutable=1>,
433 Sched<[WriteIALU, ReadIALU, ReadIALU]>;
434 } // Predicates = [HasStdExtZbb]
436 let Predicates = [HasStdExtZbkb] in {
437 def PACK : ALU_rr<0b0000100, 0b100, "pack">,
438 Sched<[WritePACK, ReadPACK, ReadPACK]>;
439 let IsSignExtendingOpW = 1 in
440 def PACKH : ALU_rr<0b0000100, 0b111, "packh">,
441 Sched<[WritePACK, ReadPACK, ReadPACK]>;
442 } // Predicates = [HasStdExtZbkb]
444 let Predicates = [HasStdExtZbkb, IsRV64], IsSignExtendingOpW = 1 in
445 def PACKW : ALUW_rr<0b0000100, 0b100, "packw">,
446 Sched<[WritePACK32, ReadPACK32, ReadPACK32]>;
448 let Predicates = [HasStdExtZbb, IsRV32] in {
449 def ZEXT_H_RV32 : RVBUnary<0b000010000000, 0b100, OPC_OP, "zext.h">,
450 Sched<[WriteIALU, ReadIALU]>;
451 } // Predicates = [HasStdExtZbb, IsRV32]
453 let Predicates = [HasStdExtZbb, IsRV64], IsSignExtendingOpW = 1 in {
454 def ZEXT_H_RV64 : RVBUnary<0b000010000000, 0b100, OPC_OP_32, "zext.h">,
455 Sched<[WriteIALU, ReadIALU]>;
456 } // Predicates = [HasStdExtZbb, IsRV64]
458 let Predicates = [HasStdExtZbbOrZbkb, IsRV32] in {
459 def REV8_RV32 : RVBUnary<0b011010011000, 0b101, OPC_OP_IMM, "rev8">,
460 Sched<[WriteREV8, ReadREV8]>;
461 } // Predicates = [HasStdExtZbbOrZbkb, IsRV32]
463 let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in {
464 def REV8_RV64 : RVBUnary<0b011010111000, 0b101, OPC_OP_IMM, "rev8">,
465 Sched<[WriteREV8, ReadREV8]>;
466 } // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
468 let Predicates = [HasStdExtZbb] in {
469 def ORC_B : RVBUnary<0b001010000111, 0b101, OPC_OP_IMM, "orc.b">,
470 Sched<[WriteORCB, ReadORCB]>;
471 } // Predicates = [HasStdExtZbb]
473 let Predicates = [HasStdExtZbkb] in
474 def BREV8 : RVBUnary<0b011010000111, 0b101, OPC_OP_IMM, "brev8">,
475 Sched<[WriteBREV8, ReadBREV8]>;
477 let Predicates = [HasStdExtZbkb, IsRV32] in {
478 def ZIP_RV32 : RVBUnary<0b000010001111, 0b001, OPC_OP_IMM, "zip">,
479 Sched<[WriteZIP, ReadZIP]>;
480 def UNZIP_RV32 : RVBUnary<0b000010001111, 0b101, OPC_OP_IMM, "unzip">,
481 Sched<[WriteZIP, ReadZIP]>;
482 } // Predicates = [HasStdExtZbkb, IsRV32]
485 //===----------------------------------------------------------------------===//
486 // Pseudo Instructions
487 //===----------------------------------------------------------------------===//
489 let Predicates = [HasStdExtZba, IsRV64] in {
490 def : InstAlias<"zext.w $rd, $rs", (ADD_UW GPR:$rd, GPR:$rs, X0)>;
491 } // Predicates = [HasStdExtZba, IsRV64]
493 let Predicates = [HasStdExtZbb] in {
494 def : InstAlias<"ror $rd, $rs1, $shamt",
495 (RORI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
496 } // Predicates = [HasStdExtZbb]
498 let Predicates = [HasStdExtZbb, IsRV64] in {
499 def : InstAlias<"rorw $rd, $rs1, $shamt",
500 (RORIW GPR:$rd, GPR:$rs1, uimm5:$shamt), 0>;
501 } // Predicates = [HasStdExtZbb, IsRV64]
503 let Predicates = [HasStdExtZbs] in {
504 def : InstAlias<"bset $rd, $rs1, $shamt",
505 (BSETI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
506 def : InstAlias<"bclr $rd, $rs1, $shamt",
507 (BCLRI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
508 def : InstAlias<"binv $rd, $rs1, $shamt",
509 (BINVI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
510 def : InstAlias<"bext $rd, $rs1, $shamt",
511 (BEXTI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt), 0>;
512 } // Predicates = [HasStdExtZbs]
514 //===----------------------------------------------------------------------===//
516 //===----------------------------------------------------------------------===//
518 let Predicates = [HasStdExtZbbOrZbkb] in {
519 def : Pat<(XLenVT (and GPR:$rs1, (not GPR:$rs2))), (ANDN GPR:$rs1, GPR:$rs2)>;
520 def : Pat<(XLenVT (or GPR:$rs1, (not GPR:$rs2))), (ORN GPR:$rs1, GPR:$rs2)>;
521 def : Pat<(XLenVT (xor GPR:$rs1, (not GPR:$rs2))), (XNOR GPR:$rs1, GPR:$rs2)>;
522 } // Predicates = [HasStdExtZbbOrZbkb]
524 let Predicates = [HasStdExtZbbOrZbkb] in {
525 def : PatGprGpr<shiftop<rotl>, ROL>;
526 def : PatGprGpr<shiftop<rotr>, ROR>;
528 def : PatGprImm<rotr, RORI, uimmlog2xlen>;
529 // There's no encoding for roli in the the 'B' extension as it can be
530 // implemented with rori by negating the immediate.
531 def : Pat<(XLenVT (rotl GPR:$rs1, uimmlog2xlen:$shamt)),
532 (RORI GPR:$rs1, (ImmSubFromXLen uimmlog2xlen:$shamt))>;
533 } // Predicates = [HasStdExtZbbOrZbkb]
535 let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in {
536 def : PatGprGpr<shiftopw<riscv_rolw>, ROLW>;
537 def : PatGprGpr<shiftopw<riscv_rorw>, RORW>;
538 def : PatGprImm<riscv_rorw, RORIW, uimm5>;
539 def : Pat<(riscv_rolw GPR:$rs1, uimm5:$rs2),
540 (RORIW GPR:$rs1, (ImmSubFrom32 uimm5:$rs2))>;
541 } // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
543 let Predicates = [HasStdExtZbs] in {
544 def : Pat<(XLenVT (and (not (shiftop<shl> 1, (XLenVT GPR:$rs2))), GPR:$rs1)),
545 (BCLR GPR:$rs1, GPR:$rs2)>;
546 def : Pat<(XLenVT (and (rotl -2, (XLenVT GPR:$rs2)), GPR:$rs1)),
547 (BCLR GPR:$rs1, GPR:$rs2)>;
548 def : Pat<(XLenVT (or (shiftop<shl> 1, (XLenVT GPR:$rs2)), GPR:$rs1)),
549 (BSET GPR:$rs1, GPR:$rs2)>;
550 def : Pat<(XLenVT (xor (shiftop<shl> 1, (XLenVT GPR:$rs2)), GPR:$rs1)),
551 (BINV GPR:$rs1, GPR:$rs2)>;
552 def : Pat<(XLenVT (and (shiftop<srl> GPR:$rs1, (XLenVT GPR:$rs2)), 1)),
553 (BEXT GPR:$rs1, GPR:$rs2)>;
555 def : Pat<(XLenVT (shiftop<shl> 1, (XLenVT GPR:$rs2))),
556 (BSET (XLenVT X0), GPR:$rs2)>;
558 def : Pat<(XLenVT (and GPR:$rs1, BCLRMask:$mask)),
559 (BCLRI GPR:$rs1, BCLRMask:$mask)>;
560 def : Pat<(XLenVT (or GPR:$rs1, SingleBitSetMask:$mask)),
561 (BSETI GPR:$rs1, SingleBitSetMask:$mask)>;
562 def : Pat<(XLenVT (xor GPR:$rs1, SingleBitSetMask:$mask)),
563 (BINVI GPR:$rs1, SingleBitSetMask:$mask)>;
565 def : Pat<(XLenVT (and (srl GPR:$rs1, uimmlog2xlen:$shamt), (XLenVT 1))),
566 (BEXTI GPR:$rs1, uimmlog2xlen:$shamt)>;
568 def : Pat<(XLenVT (seteq (XLenVT (and GPR:$rs1, SingleBitSetMask:$mask)), 0)),
569 (BEXTI (XORI GPR:$rs1, -1), SingleBitSetMask:$mask)>;
571 def : Pat<(XLenVT (or GPR:$r, BSETINVTwoBitsMask:$i)),
572 (BSETI (BSETI GPR:$r, (TrailingZeros BSETINVTwoBitsMask:$i)),
573 (BSETINVTwoBitsMaskHigh BSETINVTwoBitsMask:$i))>;
574 def : Pat<(XLenVT (xor GPR:$r, BSETINVTwoBitsMask:$i)),
575 (BINVI (BINVI GPR:$r, (TrailingZeros BSETINVTwoBitsMask:$i)),
576 (BSETINVTwoBitsMaskHigh BSETINVTwoBitsMask:$i))>;
577 def : Pat<(XLenVT (or GPR:$r, BSETINVORIMask:$i)),
578 (BSETI (ORI GPR:$r, (BSETINVORIMaskLow BSETINVORIMask:$i)),
579 (BSETINVTwoBitsMaskHigh BSETINVORIMask:$i))>;
580 def : Pat<(XLenVT (xor GPR:$r, BSETINVORIMask:$i)),
581 (BINVI (XORI GPR:$r, (BSETINVORIMaskLow BSETINVORIMask:$i)),
582 (BSETINVTwoBitsMaskHigh BSETINVORIMask:$i))>;
583 def : Pat<(XLenVT (and GPR:$r, BCLRITwoBitsMask:$i)),
584 (BCLRI (BCLRI GPR:$r, (BCLRITwoBitsMaskLow BCLRITwoBitsMask:$i)),
585 (BCLRITwoBitsMaskHigh BCLRITwoBitsMask:$i))>;
586 def : Pat<(XLenVT (and GPR:$r, BCLRIANDIMask:$i)),
587 (BCLRI (ANDI GPR:$r, (BCLRIANDIMaskLow BCLRIANDIMask:$i)),
588 (BCLRITwoBitsMaskHigh BCLRIANDIMask:$i))>;
589 } // Predicates = [HasStdExtZbs]
591 let Predicates = [HasStdExtZbb] in
592 def : PatGpr<riscv_orc_b, ORC_B>;
594 let Predicates = [HasStdExtZbkb] in
595 def : PatGpr<riscv_brev8, BREV8>;
597 let Predicates = [HasStdExtZbkb, IsRV32] in {
598 // We treat zip and unzip as separate instructions, so match it directly.
599 def : PatGpr<riscv_zip, ZIP_RV32, i32>;
600 def : PatGpr<riscv_unzip, UNZIP_RV32, i32>;
601 } // Predicates = [HasStdExtZbkb, IsRV32]
603 let Predicates = [HasStdExtZbb] in {
604 def : PatGpr<ctlz, CLZ>;
605 def : PatGpr<cttz, CTZ>;
606 def : PatGpr<ctpop, CPOP>;
607 } // Predicates = [HasStdExtZbb]
609 let Predicates = [HasStdExtZbb, IsRV64] in {
610 def : PatGpr<riscv_clzw, CLZW>;
611 def : PatGpr<riscv_ctzw, CTZW>;
612 def : Pat<(i64 (ctpop (i64 (zexti32 (i64 GPR:$rs1))))), (CPOPW GPR:$rs1)>;
614 def : Pat<(i64 (riscv_absw GPR:$rs1)),
615 (MAX GPR:$rs1, (SUBW (XLenVT X0), GPR:$rs1))>;
616 } // Predicates = [HasStdExtZbb, IsRV64]
618 let Predicates = [HasStdExtZbb] in {
619 def : Pat<(XLenVT (sext_inreg GPR:$rs1, i8)), (SEXT_B GPR:$rs1)>;
620 def : Pat<(XLenVT (sext_inreg GPR:$rs1, i16)), (SEXT_H GPR:$rs1)>;
621 } // Predicates = [HasStdExtZbb]
623 let Predicates = [HasStdExtZbb] in {
624 def : PatGprGpr<smin, MIN>;
625 def : PatGprGpr<smax, MAX>;
626 def : PatGprGpr<umin, MINU>;
627 def : PatGprGpr<umax, MAXU>;
628 } // Predicates = [HasStdExtZbb]
630 let Predicates = [HasStdExtZbbOrZbkb, IsRV32] in
631 def : PatGpr<bswap, REV8_RV32, i32>;
633 let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in
634 def : PatGpr<bswap, REV8_RV64, i64>;
636 let Predicates = [HasStdExtZbkb] in {
637 def : Pat<(or (and (shl GPR:$rs2, (XLenVT 8)), 0xFFFF),
638 (zexti8 (XLenVT GPR:$rs1))),
639 (PACKH GPR:$rs1, GPR:$rs2)>;
640 def : Pat<(or (shl (zexti8 (XLenVT GPR:$rs2)), (XLenVT 8)),
641 (zexti8 (XLenVT GPR:$rs1))),
642 (PACKH GPR:$rs1, GPR:$rs2)>;
643 def : Pat<(and (or (shl GPR:$rs2, (XLenVT 8)),
644 (zexti8 (XLenVT GPR:$rs1))), 0xFFFF),
645 (PACKH GPR:$rs1, GPR:$rs2)>;
646 } // Predicates = [HasStdExtZbkb]
648 let Predicates = [HasStdExtZbkb, IsRV32] in
649 def : Pat<(i32 (or (zexti16 (i32 GPR:$rs1)), (shl GPR:$rs2, (i32 16)))),
650 (PACK GPR:$rs1, GPR:$rs2)>;
652 let Predicates = [HasStdExtZbkb, IsRV64] in {
653 def : Pat<(i64 (or (zexti32 (i64 GPR:$rs1)), (shl GPR:$rs2, (i64 32)))),
654 (PACK GPR:$rs1, GPR:$rs2)>;
656 def : Pat<(binop_allwusers<or> (shl GPR:$rs2, (i64 16)),
657 (zexti16 (i64 GPR:$rs1))),
658 (PACKW GPR:$rs1, GPR:$rs2)>;
659 def : Pat<(i64 (or (sext_inreg (shl GPR:$rs2, (i64 16)), i32),
660 (zexti16 (i64 GPR:$rs1)))),
661 (PACKW GPR:$rs1, GPR:$rs2)>;
662 } // Predicates = [HasStdExtZbkb, IsRV64]
664 let Predicates = [HasStdExtZbb, IsRV32] in
665 def : Pat<(i32 (and GPR:$rs, 0xFFFF)), (ZEXT_H_RV32 GPR:$rs)>;
666 let Predicates = [HasStdExtZbb, IsRV64] in
667 def : Pat<(i64 (and GPR:$rs, 0xFFFF)), (ZEXT_H_RV64 GPR:$rs)>;
669 let Predicates = [HasStdExtZba] in {
671 foreach i = {1,2,3} in {
672 defvar shxadd = !cast<Instruction>("SH"#i#"ADD");
673 def : Pat<(XLenVT (add_non_imm12 (shl GPR:$rs1, (XLenVT i)), GPR:$rs2)),
674 (shxadd GPR:$rs1, GPR:$rs2)>;
676 defvar pat = !cast<ComplexPattern>("sh"#i#"add_op");
677 // More complex cases use a ComplexPattern.
678 def : Pat<(XLenVT (add_non_imm12 pat:$rs1, GPR:$rs2)),
679 (shxadd pat:$rs1, GPR:$rs2)>;
682 def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 6)), GPR:$rs2),
683 (SH1ADD (SH1ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
684 def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 10)), GPR:$rs2),
685 (SH1ADD (SH2ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
686 def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 18)), GPR:$rs2),
687 (SH1ADD (SH3ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
688 def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 12)), GPR:$rs2),
689 (SH2ADD (SH1ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
690 def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 20)), GPR:$rs2),
691 (SH2ADD (SH2ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
692 def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 36)), GPR:$rs2),
693 (SH2ADD (SH3ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
694 def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 24)), GPR:$rs2),
695 (SH3ADD (SH1ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
696 def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 40)), GPR:$rs2),
697 (SH3ADD (SH2ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
698 def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 72)), GPR:$rs2),
699 (SH3ADD (SH3ADD GPR:$rs1, GPR:$rs1), GPR:$rs2)>;
701 def : Pat<(add (XLenVT GPR:$r), CSImm12MulBy4:$i),
702 (SH2ADD (ADDI (XLenVT X0), (SimmShiftRightBy2XForm CSImm12MulBy4:$i)),
704 def : Pat<(add (XLenVT GPR:$r), CSImm12MulBy8:$i),
705 (SH3ADD (ADDI (XLenVT X0), (SimmShiftRightBy3XForm CSImm12MulBy8:$i)),
708 def : Pat<(mul (XLenVT GPR:$r), C3LeftShift:$i),
709 (SLLI (SH1ADD GPR:$r, GPR:$r),
710 (TrailingZeros C3LeftShift:$i))>;
711 def : Pat<(mul (XLenVT GPR:$r), C5LeftShift:$i),
712 (SLLI (SH2ADD GPR:$r, GPR:$r),
713 (TrailingZeros C5LeftShift:$i))>;
714 def : Pat<(mul (XLenVT GPR:$r), C9LeftShift:$i),
715 (SLLI (SH3ADD GPR:$r, GPR:$r),
716 (TrailingZeros C9LeftShift:$i))>;
718 def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 11)),
719 (SH1ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r)>;
720 def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 19)),
721 (SH1ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r)>;
722 def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 13)),
723 (SH2ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r)>;
724 def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 21)),
725 (SH2ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r)>;
726 def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 37)),
727 (SH2ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r)>;
728 def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 25)),
729 (SH3ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r)>;
730 def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 41)),
731 (SH3ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r)>;
732 def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 73)),
733 (SH3ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r)>;
734 def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 27)),
735 (SH1ADD (SH3ADD GPR:$r, GPR:$r), (SH3ADD GPR:$r, GPR:$r))>;
736 def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 45)),
737 (SH2ADD (SH3ADD GPR:$r, GPR:$r), (SH3ADD GPR:$r, GPR:$r))>;
738 def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 81)),
739 (SH3ADD (SH3ADD GPR:$r, GPR:$r), (SH3ADD GPR:$r, GPR:$r))>;
740 } // Predicates = [HasStdExtZba]
742 let Predicates = [HasStdExtZba, IsRV64] in {
743 def : Pat<(i64 (shl (and GPR:$rs1, 0xFFFFFFFF), uimm5:$shamt)),
744 (SLLI_UW GPR:$rs1, uimm5:$shamt)>;
745 // Match a shifted 0xffffffff mask. Use SRLI to clear the LSBs and SLLI_UW to
747 def : Pat<(i64 (and GPR:$rs1, Shifted32OnesMask:$mask)),
748 (SLLI_UW (SRLI GPR:$rs1, Shifted32OnesMask:$mask),
749 Shifted32OnesMask:$mask)>;
750 def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0xFFFFFFFF), GPR:$rs2)),
751 (ADD_UW GPR:$rs1, GPR:$rs2)>;
752 def : Pat<(i64 (and GPR:$rs, 0xFFFFFFFF)), (ADD_UW GPR:$rs, (XLenVT X0))>;
754 def : Pat<(i64 (or_is_add_non_imm12 (and GPR:$rs1, 0xFFFFFFFF), GPR:$rs2)),
755 (ADD_UW GPR:$rs1, GPR:$rs2)>;
757 foreach i = {1,2,3} in {
758 defvar shxadd_uw = !cast<Instruction>("SH"#i#"ADD_UW");
759 def : Pat<(i64 (add_non_imm12 (shl (and GPR:$rs1, 0xFFFFFFFF), (i64 i)), (XLenVT GPR:$rs2))),
760 (shxadd_uw GPR:$rs1, GPR:$rs2)>;
763 def : Pat<(i64 (add_non_imm12 (and (shl GPR:$rs1, (i64 1)), 0x1FFFFFFFF), (XLenVT GPR:$rs2))),
764 (SH1ADD_UW GPR:$rs1, GPR:$rs2)>;
765 def : Pat<(i64 (add_non_imm12 (and (shl GPR:$rs1, (i64 2)), 0x3FFFFFFFF), (XLenVT GPR:$rs2))),
766 (SH2ADD_UW GPR:$rs1, GPR:$rs2)>;
767 def : Pat<(i64 (add_non_imm12 (and (shl GPR:$rs1, (i64 3)), 0x7FFFFFFFF), (XLenVT GPR:$rs2))),
768 (SH3ADD_UW GPR:$rs1, GPR:$rs2)>;
770 // More complex cases use a ComplexPattern.
771 foreach i = {1,2,3} in {
772 defvar pat = !cast<ComplexPattern>("sh"#i#"add_uw_op");
773 def : Pat<(i64 (add_non_imm12 pat:$rs1, (XLenVT GPR:$rs2))),
774 (!cast<Instruction>("SH"#i#"ADD_UW") pat:$rs1, GPR:$rs2)>;
777 def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0xFFFFFFFE), (XLenVT GPR:$rs2))),
778 (SH1ADD (SRLIW GPR:$rs1, 1), GPR:$rs2)>;
779 def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0xFFFFFFFC), (XLenVT GPR:$rs2))),
780 (SH2ADD (SRLIW GPR:$rs1, 2), GPR:$rs2)>;
781 def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0xFFFFFFF8), (XLenVT GPR:$rs2))),
782 (SH3ADD (SRLIW GPR:$rs1, 3), GPR:$rs2)>;
784 // Use SRLI to clear the LSBs and SHXADD_UW to mask and shift.
785 def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0x1FFFFFFFE), (XLenVT GPR:$rs2))),
786 (SH1ADD_UW (SRLI GPR:$rs1, 1), GPR:$rs2)>;
787 def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0x3FFFFFFFC), (XLenVT GPR:$rs2))),
788 (SH2ADD_UW (SRLI GPR:$rs1, 2), GPR:$rs2)>;
789 def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0x7FFFFFFF8), (XLenVT GPR:$rs2))),
790 (SH3ADD_UW (SRLI GPR:$rs1, 3), GPR:$rs2)>;
792 def : Pat<(i64 (mul (and_oneuse GPR:$r, 0xFFFFFFFF), C3LeftShiftUW:$i)),
793 (SH1ADD (SLLI_UW GPR:$r, (TrailingZeros C3LeftShiftUW:$i)),
794 (SLLI_UW GPR:$r, (TrailingZeros C3LeftShiftUW:$i)))>;
795 def : Pat<(i64 (mul (and_oneuse GPR:$r, 0xFFFFFFFF), C5LeftShiftUW:$i)),
796 (SH2ADD (SLLI_UW GPR:$r, (TrailingZeros C5LeftShiftUW:$i)),
797 (SLLI_UW GPR:$r, (TrailingZeros C5LeftShiftUW:$i)))>;
798 def : Pat<(i64 (mul (and_oneuse GPR:$r, 0xFFFFFFFF), C9LeftShiftUW:$i)),
799 (SH3ADD (SLLI_UW GPR:$r, (TrailingZeros C9LeftShiftUW:$i)),
800 (SLLI_UW GPR:$r, (TrailingZeros C9LeftShiftUW:$i)))>;
801 } // Predicates = [HasStdExtZba, IsRV64]
803 let Predicates = [HasStdExtZbcOrZbkc] in {
804 def : PatGprGpr<riscv_clmul, CLMUL>;
805 def : PatGprGpr<riscv_clmulh, CLMULH>;
806 } // Predicates = [HasStdExtZbcOrZbkc]
808 let Predicates = [HasStdExtZbc] in
809 def : PatGprGpr<riscv_clmulr, CLMULR>;
811 let Predicates = [HasStdExtZbkx] in {
812 def : PatGprGpr<int_riscv_xperm4, XPERM4>;
813 def : PatGprGpr<int_riscv_xperm8, XPERM8>;
814 } // Predicates = [HasStdExtZbkx]
816 //===----------------------------------------------------------------------===//
817 // Experimental RV64 i32 legalization patterns.
818 //===----------------------------------------------------------------------===//
820 let Predicates = [HasStdExtZbb, IsRV64] in {
821 def : PatGpr<ctlz, CLZW, i32>;
822 def : PatGpr<cttz, CTZW, i32>;
823 def : PatGpr<ctpop, CPOPW, i32>;
825 def : Pat<(i32 (sext_inreg GPR:$rs1, i8)), (SEXT_B GPR:$rs1)>;
826 def : Pat<(i32 (sext_inreg GPR:$rs1, i16)), (SEXT_H GPR:$rs1)>;
827 } // Predicates = [HasStdExtZbb, IsRV64]
829 let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in {
830 def : Pat<(i32 (and GPR:$rs1, (not GPR:$rs2))), (ANDN GPR:$rs1, GPR:$rs2)>;
831 def : Pat<(i32 (or GPR:$rs1, (not GPR:$rs2))), (ORN GPR:$rs1, GPR:$rs2)>;
832 def : Pat<(i32 (xor GPR:$rs1, (not GPR:$rs2))), (XNOR GPR:$rs1, GPR:$rs2)>;
834 def : PatGprGpr<shiftopw<rotl>, ROLW, i32, i64>;
835 def : PatGprGpr<shiftopw<rotr>, RORW, i32, i64>;
836 def : PatGprImm<rotr, RORIW, uimm5, i32>;
838 def : Pat<(i32 (rotl GPR:$rs1, uimm5:$rs2)),
839 (RORIW GPR:$rs1, (ImmSubFrom32 uimm5:$rs2))>;
840 } // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
842 let Predicates = [HasStdExtZba, IsRV64] in {
843 def : Pat<(zext GPR:$src), (ADD_UW GPR:$src, (XLenVT X0))>;