1 //== LoongArchInstrInfo.td - Target Description for LoongArch -*- tablegen -*-//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the LoongArch instructions in TableGen format.
11 //===----------------------------------------------------------------------===//
13 //===----------------------------------------------------------------------===//
14 // LoongArch specific DAG Nodes.
15 //===----------------------------------------------------------------------===//
17 // Target-independent type requirements, but with target-specific formats.
18 def SDT_CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
20 def SDT_CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
23 // Target-dependent type requirements.
24 def SDT_LoongArchCall : SDTypeProfile<0, -1, [SDTCisVT<0, GRLenVT>]>;
25 def SDT_LoongArchIntBinOpW : SDTypeProfile<1, 2, [
26 SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<0, i64>
29 def SDT_LoongArchBStrIns: SDTypeProfile<1, 4, [
30 SDTCisInt<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisInt<3>,
34 def SDT_LoongArchBStrPick: SDTypeProfile<1, 3, [
35 SDTCisInt<0>, SDTCisSameAs<0, 1>, SDTCisInt<2>, SDTCisSameAs<2, 3>
38 // "VI" means no output and an integer input.
39 def SDT_LoongArchVI : SDTypeProfile<0, 1, [SDTCisVT<0, GRLenVT>]>;
41 def SDT_LoongArchCsrrd : SDTypeProfile<1, 1, [SDTCisInt<0>,
42 SDTCisVT<1, GRLenVT>]>;
43 def SDT_LoongArchCsrwr : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
44 SDTCisVT<2, GRLenVT>]>;
45 def SDT_LoongArchCsrxchg : SDTypeProfile<1, 3, [SDTCisInt<0>,
48 SDTCisVT<3, GRLenVT>]>;
49 def SDT_LoongArchIocsrwr : SDTypeProfile<0, 2, [SDTCisInt<0>,
51 def SDT_LoongArchMovgr2fcsr : SDTypeProfile<0, 2, [SDTCisVT<0, GRLenVT>,
53 def SDT_LoongArchMovfcsr2gr : SDTypeProfile<1, 1, [SDTCisVT<0, GRLenVT>,
56 // TODO: Add LoongArch specific DAG Nodes
57 // Target-independent nodes, but with target-specific formats.
58 def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_CallSeqStart,
59 [SDNPHasChain, SDNPOutGlue]>;
60 def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_CallSeqEnd,
61 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
63 // Target-dependent nodes.
64 def loongarch_call : SDNode<"LoongArchISD::CALL", SDT_LoongArchCall,
65 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
67 def loongarch_ret : SDNode<"LoongArchISD::RET", SDTNone,
68 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
69 def loongarch_tail : SDNode<"LoongArchISD::TAIL", SDT_LoongArchCall,
70 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
72 def loongarch_sll_w : SDNode<"LoongArchISD::SLL_W", SDT_LoongArchIntBinOpW>;
73 def loongarch_sra_w : SDNode<"LoongArchISD::SRA_W", SDT_LoongArchIntBinOpW>;
74 def loongarch_srl_w : SDNode<"LoongArchISD::SRL_W", SDT_LoongArchIntBinOpW>;
75 def loongarch_rotr_w : SDNode<"LoongArchISD::ROTR_W", SDT_LoongArchIntBinOpW>;
76 def loongarch_rotl_w : SDNode<"LoongArchISD::ROTL_W", SDT_LoongArchIntBinOpW>;
77 def loongarch_crc_w_b_w
78 : SDNode<"LoongArchISD::CRC_W_B_W", SDT_LoongArchIntBinOpW, [SDNPHasChain]>;
79 def loongarch_crc_w_h_w
80 : SDNode<"LoongArchISD::CRC_W_H_W", SDT_LoongArchIntBinOpW, [SDNPHasChain]>;
81 def loongarch_crc_w_w_w
82 : SDNode<"LoongArchISD::CRC_W_W_W", SDT_LoongArchIntBinOpW, [SDNPHasChain]>;
83 def loongarch_crc_w_d_w
84 : SDNode<"LoongArchISD::CRC_W_D_W", SDT_LoongArchIntBinOpW, [SDNPHasChain]>;
85 def loongarch_crcc_w_b_w : SDNode<"LoongArchISD::CRCC_W_B_W",
86 SDT_LoongArchIntBinOpW, [SDNPHasChain]>;
87 def loongarch_crcc_w_h_w : SDNode<"LoongArchISD::CRCC_W_H_W",
88 SDT_LoongArchIntBinOpW, [SDNPHasChain]>;
89 def loongarch_crcc_w_w_w : SDNode<"LoongArchISD::CRCC_W_W_W",
90 SDT_LoongArchIntBinOpW, [SDNPHasChain]>;
91 def loongarch_crcc_w_d_w : SDNode<"LoongArchISD::CRCC_W_D_W",
92 SDT_LoongArchIntBinOpW, [SDNPHasChain]>;
94 : SDNode<"LoongArchISD::BSTRINS", SDT_LoongArchBStrIns>;
95 def loongarch_bstrpick
96 : SDNode<"LoongArchISD::BSTRPICK", SDT_LoongArchBStrPick>;
97 def loongarch_revb_2h : SDNode<"LoongArchISD::REVB_2H", SDTUnaryOp>;
98 def loongarch_revb_2w : SDNode<"LoongArchISD::REVB_2W", SDTUnaryOp>;
99 def loongarch_bitrev_4b : SDNode<"LoongArchISD::BITREV_4B", SDTUnaryOp>;
100 def loongarch_bitrev_w : SDNode<"LoongArchISD::BITREV_W", SDTUnaryOp>;
101 def loongarch_clzw : SDNode<"LoongArchISD::CLZ_W", SDTIntBitCountUnaryOp>;
102 def loongarch_ctzw : SDNode<"LoongArchISD::CTZ_W", SDTIntBitCountUnaryOp>;
103 def loongarch_dbar : SDNode<"LoongArchISD::DBAR", SDT_LoongArchVI,
104 [SDNPHasChain, SDNPSideEffect]>;
105 def loongarch_ibar : SDNode<"LoongArchISD::IBAR", SDT_LoongArchVI,
106 [SDNPHasChain, SDNPSideEffect]>;
107 def loongarch_break : SDNode<"LoongArchISD::BREAK", SDT_LoongArchVI,
108 [SDNPHasChain, SDNPSideEffect]>;
109 def loongarch_movfcsr2gr : SDNode<"LoongArchISD::MOVFCSR2GR",
110 SDT_LoongArchMovfcsr2gr, [SDNPHasChain]>;
111 def loongarch_movgr2fcsr : SDNode<"LoongArchISD::MOVGR2FCSR",
112 SDT_LoongArchMovgr2fcsr,
113 [SDNPHasChain, SDNPSideEffect]>;
114 def loongarch_syscall : SDNode<"LoongArchISD::SYSCALL", SDT_LoongArchVI,
115 [SDNPHasChain, SDNPSideEffect]>;
116 def loongarch_csrrd : SDNode<"LoongArchISD::CSRRD", SDT_LoongArchCsrrd,
117 [SDNPHasChain, SDNPSideEffect]>;
118 def loongarch_csrwr : SDNode<"LoongArchISD::CSRWR", SDT_LoongArchCsrwr,
119 [SDNPHasChain, SDNPSideEffect]>;
120 def loongarch_csrxchg : SDNode<"LoongArchISD::CSRXCHG",
121 SDT_LoongArchCsrxchg,
122 [SDNPHasChain, SDNPSideEffect]>;
123 def loongarch_iocsrrd_b : SDNode<"LoongArchISD::IOCSRRD_B", SDTUnaryOp,
124 [SDNPHasChain, SDNPSideEffect]>;
125 def loongarch_iocsrrd_h : SDNode<"LoongArchISD::IOCSRRD_H", SDTUnaryOp,
126 [SDNPHasChain, SDNPSideEffect]>;
127 def loongarch_iocsrrd_w : SDNode<"LoongArchISD::IOCSRRD_W", SDTUnaryOp,
128 [SDNPHasChain, SDNPSideEffect]>;
129 def loongarch_iocsrrd_d : SDNode<"LoongArchISD::IOCSRRD_D", SDTUnaryOp,
130 [SDNPHasChain, SDNPSideEffect]>;
131 def loongarch_iocsrwr_b : SDNode<"LoongArchISD::IOCSRWR_B",
132 SDT_LoongArchIocsrwr,
133 [SDNPHasChain, SDNPSideEffect]>;
134 def loongarch_iocsrwr_h : SDNode<"LoongArchISD::IOCSRWR_H",
135 SDT_LoongArchIocsrwr,
136 [SDNPHasChain, SDNPSideEffect]>;
137 def loongarch_iocsrwr_w : SDNode<"LoongArchISD::IOCSRWR_W",
138 SDT_LoongArchIocsrwr,
139 [SDNPHasChain, SDNPSideEffect]>;
140 def loongarch_iocsrwr_d : SDNode<"LoongArchISD::IOCSRWR_D",
141 SDT_LoongArchIocsrwr,
142 [SDNPHasChain, SDNPSideEffect]>;
143 def loongarch_cpucfg : SDNode<"LoongArchISD::CPUCFG", SDTUnaryOp,
146 def to_fclass_mask: SDNodeXForm<timm, [{
147 uint64_t Check = N->getZExtValue();
150 Mask |= LoongArch::FClassMaskSignalingNaN;
152 Mask |= LoongArch::FClassMaskQuietNaN;
153 if (Check & fcPosInf)
154 Mask |= LoongArch::FClassMaskPositiveInfinity;
155 if (Check & fcNegInf)
156 Mask |= LoongArch::FClassMaskNegativeInfinity;
157 if (Check & fcPosNormal)
158 Mask |= LoongArch::FClassMaskPositiveNormal;
159 if (Check & fcNegNormal)
160 Mask |= LoongArch::FClassMaskNegativeNormal;
161 if (Check & fcPosSubnormal)
162 Mask |= LoongArch::FClassMaskPositiveSubnormal;
163 if (Check & fcNegSubnormal)
164 Mask |= LoongArch::FClassMaskNegativeSubnormal;
165 if (Check & fcPosZero)
166 Mask |= LoongArch::FClassMaskPositiveZero;
167 if (Check & fcNegZero)
168 Mask |= LoongArch::FClassMaskNegativeZero;
169 return CurDAG->getTargetConstant(Mask, SDLoc(N), Subtarget->getGRLenVT());
172 //===----------------------------------------------------------------------===//
173 // Operand and SDNode transformation definitions.
174 //===----------------------------------------------------------------------===//
176 class ImmAsmOperand<string prefix, int width, string suffix>
178 let Name = prefix # "Imm" # width # suffix;
179 let DiagnosticType = !strconcat("Invalid", Name);
180 let RenderMethod = "addImmOperands";
183 class SImmAsmOperand<int width, string suffix = "">
184 : ImmAsmOperand<"S", width, suffix> {
187 class UImmAsmOperand<int width, string suffix = "">
188 : ImmAsmOperand<"U", width, suffix> {
191 // A parse method for "$r*" or "$r*, 0", where the 0 is be silently ignored.
192 // Only used for "AM*" instructions, in order to be compatible with GAS.
193 def AtomicMemAsmOperand : AsmOperandClass {
194 let Name = "AtomicMemAsmOperand";
195 let RenderMethod = "addRegOperands";
196 let PredicateMethod = "isGPR";
197 let ParserMethod = "parseAtomicMemOp";
200 def GPRMemAtomic : RegisterOperand<GPR> {
201 let ParserMatchClass = AtomicMemAsmOperand;
202 let PrintMethod = "printAtomicMemOp";
205 // A parameterized register class alternative to i32imm/i64imm from Target.td.
206 def grlenimm : Operand<GRLenVT>;
207 def imm32 : Operand<GRLenVT> {
208 let ParserMatchClass = ImmAsmOperand<"", 32, "">;
211 def uimm1 : Operand<GRLenVT>, ImmLeaf<GRLenVT, [{return isUInt<1>(Imm);}]>{
212 let ParserMatchClass = UImmAsmOperand<1>;
215 def uimm2 : Operand<GRLenVT>, ImmLeaf<GRLenVT, [{return isUInt<2>(Imm);}]> {
216 let ParserMatchClass = UImmAsmOperand<2>;
219 def uimm2_plus1 : Operand<GRLenVT>,
220 ImmLeaf<GRLenVT, [{return isUInt<2>(Imm - 1);}]> {
221 let ParserMatchClass = UImmAsmOperand<2, "plus1">;
222 let EncoderMethod = "getImmOpValueSub1";
223 let DecoderMethod = "decodeUImmOperand<2, 1>";
226 def uimm3 : Operand<GRLenVT>, ImmLeaf<GRLenVT, [{return isUInt<3>(Imm);}]> {
227 let ParserMatchClass = UImmAsmOperand<3>;
230 def uimm4 : Operand<GRLenVT>, ImmLeaf<GRLenVT, [{return isUInt<4>(Imm);}]> {
231 let ParserMatchClass = UImmAsmOperand<4>;
234 def uimm5 : Operand<GRLenVT>, ImmLeaf<GRLenVT, [{return isUInt<5>(Imm);}]> {
235 let ParserMatchClass = UImmAsmOperand<5>;
238 def uimm6 : Operand<GRLenVT>, ImmLeaf<GRLenVT, [{return isUInt<6>(Imm);}]> {
239 let ParserMatchClass = UImmAsmOperand<6>;
242 def uimm7 : Operand<GRLenVT> {
243 let ParserMatchClass = UImmAsmOperand<7>;
246 def uimm8 : Operand<GRLenVT>, ImmLeaf<GRLenVT, [{return isUInt<8>(Imm);}]> {
247 let ParserMatchClass = UImmAsmOperand<8>;
250 class UImm12Operand : Operand<GRLenVT>,
251 ImmLeaf <GRLenVT, [{return isUInt<12>(Imm);}]> {
252 let DecoderMethod = "decodeUImmOperand<12>";
255 def uimm12 : UImm12Operand {
256 let ParserMatchClass = UImmAsmOperand<12>;
259 def uimm12_ori : UImm12Operand {
260 let ParserMatchClass = UImmAsmOperand<12, "ori">;
263 def uimm14 : Operand<GRLenVT>,
264 ImmLeaf <GRLenVT, [{return isUInt<14>(Imm);}]> {
265 let ParserMatchClass = UImmAsmOperand<14>;
268 def uimm15 : Operand<GRLenVT>,
269 ImmLeaf <GRLenVT, [{return isUInt<15>(Imm);}]> {
270 let ParserMatchClass = UImmAsmOperand<15>;
273 def simm5 : Operand<GRLenVT> {
274 let ParserMatchClass = SImmAsmOperand<5>;
275 let DecoderMethod = "decodeSImmOperand<5>";
278 def simm8 : Operand<GRLenVT> {
279 let ParserMatchClass = SImmAsmOperand<8>;
280 let DecoderMethod = "decodeSImmOperand<8>";
283 foreach I = [1, 2, 3] in {
284 def simm8_lsl # I : Operand<GRLenVT> {
285 let ParserMatchClass = SImmAsmOperand<8, "lsl" # I>;
286 let EncoderMethod = "getImmOpValueAsr<" # I # ">";
287 let DecoderMethod = "decodeSImmOperand<8," # I # ">";
291 def simm9_lsl3 : Operand<GRLenVT> {
292 let ParserMatchClass = SImmAsmOperand<9, "lsl3">;
293 let EncoderMethod = "getImmOpValueAsr<3>";
294 let DecoderMethod = "decodeSImmOperand<9, 3>";
297 def simm10 : Operand<GRLenVT> {
298 let ParserMatchClass = SImmAsmOperand<10>;
301 def simm10_lsl2 : Operand<GRLenVT> {
302 let ParserMatchClass = SImmAsmOperand<10, "lsl2">;
303 let EncoderMethod = "getImmOpValueAsr<2>";
304 let DecoderMethod = "decodeSImmOperand<10, 2>";
307 def simm11_lsl1 : Operand<GRLenVT> {
308 let ParserMatchClass = SImmAsmOperand<11, "lsl1">;
309 let EncoderMethod = "getImmOpValueAsr<1>";
310 let DecoderMethod = "decodeSImmOperand<11, 1>";
313 class SImm12Operand : Operand<GRLenVT>,
314 ImmLeaf <GRLenVT, [{return isInt<12>(Imm);}]> {
315 let DecoderMethod = "decodeSImmOperand<12>";
318 def simm12 : SImm12Operand {
319 let ParserMatchClass = SImmAsmOperand<12>;
322 def simm12_addlike : SImm12Operand {
323 let ParserMatchClass = SImmAsmOperand<12, "addlike">;
326 def simm12_lu52id : SImm12Operand {
327 let ParserMatchClass = SImmAsmOperand<12, "lu52id">;
330 def simm13 : Operand<GRLenVT> {
331 let ParserMatchClass = SImmAsmOperand<13>;
332 let DecoderMethod = "decodeSImmOperand<13>";
335 def simm14_lsl2 : Operand<GRLenVT>,
336 ImmLeaf<GRLenVT, [{return isShiftedInt<14,2>(Imm);}]> {
337 let ParserMatchClass = SImmAsmOperand<14, "lsl2">;
338 let EncoderMethod = "getImmOpValueAsr<2>";
339 let DecoderMethod = "decodeSImmOperand<14, 2>";
342 def simm16 : Operand<GRLenVT> {
343 let ParserMatchClass = SImmAsmOperand<16>;
344 let DecoderMethod = "decodeSImmOperand<16>";
347 def simm16_lsl2 : Operand<GRLenVT>,
348 ImmLeaf<GRLenVT, [{return isInt<16>(Imm>>2);}]> {
349 let ParserMatchClass = SImmAsmOperand<16, "lsl2">;
350 let EncoderMethod = "getImmOpValueAsr<2>";
351 let DecoderMethod = "decodeSImmOperand<16, 2>";
354 def simm16_lsl2_br : Operand<OtherVT> {
355 let ParserMatchClass = SImmAsmOperand<16, "lsl2">;
356 let EncoderMethod = "getImmOpValueAsr<2>";
357 let DecoderMethod = "decodeSImmOperand<16, 2>";
360 class SImm20Operand : Operand<GRLenVT> {
361 let DecoderMethod = "decodeSImmOperand<20>";
364 def simm20 : SImm20Operand {
365 let ParserMatchClass = SImmAsmOperand<20>;
368 def simm20_pcalau12i : SImm20Operand {
369 let ParserMatchClass = SImmAsmOperand<20, "pcalau12i">;
372 def simm20_lu12iw : SImm20Operand {
373 let ParserMatchClass = SImmAsmOperand<20, "lu12iw">;
376 def simm20_lu32id : SImm20Operand {
377 let ParserMatchClass = SImmAsmOperand<20, "lu32id">;
380 def simm21_lsl2 : Operand<OtherVT> {
381 let ParserMatchClass = SImmAsmOperand<21, "lsl2">;
382 let EncoderMethod = "getImmOpValueAsr<2>";
383 let DecoderMethod = "decodeSImmOperand<21, 2>";
386 def SImm26OperandB: AsmOperandClass {
387 let Name = "SImm26OperandB";
388 let PredicateMethod = "isSImm26Operand";
389 let RenderMethod = "addImmOperands";
390 let DiagnosticType = "InvalidSImm26Operand";
391 let ParserMethod = "parseImmediate";
394 // A symbol or an imm used in B/PseudoBR.
395 def simm26_b : Operand<OtherVT> {
396 let ParserMatchClass = SImm26OperandB;
397 let EncoderMethod = "getImmOpValueAsr<2>";
398 let DecoderMethod = "decodeSImmOperand<26, 2>";
401 def SImm26OperandBL: AsmOperandClass {
402 let Name = "SImm26OperandBL";
403 let PredicateMethod = "isSImm26Operand";
404 let RenderMethod = "addImmOperands";
405 let DiagnosticType = "InvalidSImm26Operand";
406 let ParserMethod = "parseSImm26Operand";
409 // A symbol or an imm used in BL/PseudoCALL/PseudoTAIL.
410 def simm26_symbol : Operand<GRLenVT> {
411 let ParserMatchClass = SImm26OperandBL;
412 let EncoderMethod = "getImmOpValueAsr<2>";
413 let DecoderMethod = "decodeSImmOperand<26, 2>";
416 // A 32-bit signed immediate with the lowest 16 bits zeroed, suitable for
417 // direct use with `addu16i.d`.
418 def simm16_lsl16 : Operand<GRLenVT>,
419 ImmLeaf<GRLenVT, [{return isShiftedInt<16, 16>(Imm);}]>;
421 // A 32-bit signed immediate expressible with a pair of `addu16i.d + addi` for
423 def simm32_hi16_lo12: Operand<GRLenVT>, ImmLeaf<GRLenVT, [{
424 return isShiftedInt<16, 16>(Imm - SignExtend64<12>(Imm));
427 def BareSymbol : AsmOperandClass {
428 let Name = "BareSymbol";
429 let RenderMethod = "addImmOperands";
430 let DiagnosticType = "InvalidBareSymbol";
431 let ParserMethod = "parseImmediate";
434 // A bare symbol used in "PseudoLA_*" instructions.
435 def bare_symbol : Operand<GRLenVT> {
436 let ParserMatchClass = BareSymbol;
439 // Standalone (codegen-only) immleaf patterns.
441 // A 12-bit signed immediate plus one where the imm range will be [-2047, 2048].
442 def simm12_plus1 : ImmLeaf<GRLenVT,
443 [{return (isInt<12>(Imm) && Imm != -2048) || Imm == 2048;}]>;
445 // Return the negation of an immediate value.
446 def NegImm : SDNodeXForm<imm, [{
447 return CurDAG->getTargetConstant(-N->getSExtValue(), SDLoc(N),
451 // FP immediate patterns.
452 def fpimm0 : PatLeaf<(fpimm), [{return N->isExactlyValue(+0.0);}]>;
453 def fpimm0neg : PatLeaf<(fpimm), [{return N->isExactlyValue(-0.0);}]>;
454 def fpimm1 : PatLeaf<(fpimm), [{return N->isExactlyValue(+1.0);}]>;
456 // Return an immediate subtracted from 32.
457 def ImmSubFrom32 : SDNodeXForm<imm, [{
458 return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N),
462 // Return the lowest 12 bits of the signed immediate.
463 def LO12: SDNodeXForm<imm, [{
464 return CurDAG->getTargetConstant(SignExtend64<12>(N->getSExtValue()),
465 SDLoc(N), N->getValueType(0));
468 // Return the higher 16 bits of the signed immediate.
469 def HI16 : SDNodeXForm<imm, [{
470 return CurDAG->getTargetConstant(N->getSExtValue() >> 16, SDLoc(N),
474 // Return the higher 16 bits of the signed immediate, adjusted for use within an
475 // `addu16i.d + addi` pair.
476 def HI16ForAddu16idAddiPair: SDNodeXForm<imm, [{
477 auto Imm = N->getSExtValue();
478 return CurDAG->getTargetConstant((Imm - SignExtend64<12>(Imm)) >> 16,
479 SDLoc(N), N->getValueType(0));
482 def BaseAddr : ComplexPattern<iPTR, 1, "SelectBaseAddr">;
483 def AddrConstant : ComplexPattern<iPTR, 2, "SelectAddrConstant">;
484 def NonFIBaseAddr : ComplexPattern<iPTR, 1, "selectNonFIBaseAddr">;
486 def fma_nsz : PatFrag<(ops node:$fj, node:$fk, node:$fa),
487 (fma node:$fj, node:$fk, node:$fa), [{
488 return N->getFlags().hasNoSignedZeros();
491 // Check if (add r, imm) can be optimized to (ADDI (ADDI r, imm0), imm1),
492 // in which imm = imm0 + imm1, and both imm0 & imm1 are simm12.
493 def AddiPair : PatLeaf<(imm), [{
496 // The immediate operand must be in range [-4096,-2049] or [2048,4094].
497 int64_t Imm = N->getSExtValue();
498 return (-4096 <= Imm && Imm <= -2049) || (2048 <= Imm && Imm <= 4094);
501 // Return -2048 if immediate is negative or 2047 if positive.
502 def AddiPairImmLarge : SDNodeXForm<imm, [{
503 int64_t Imm = N->getSExtValue() < 0 ? -2048 : 2047;
504 return CurDAG->getTargetConstant(Imm, SDLoc(N),
508 // Return imm - (imm < 0 ? -2048 : 2047).
509 def AddiPairImmSmall : SDNodeXForm<imm, [{
510 int64_t Imm = N->getSExtValue();
511 int64_t Adj = Imm < 0 ? -2048 : 2047;
512 return CurDAG->getTargetConstant(Imm - Adj, SDLoc(N),
516 // Check if (mul r, imm) can be optimized to (SLLI (ALSL r, r, i0), i1),
517 // in which imm = (1 + (1 << i0)) << i1.
518 def AlslSlliImm : PatLeaf<(imm), [{
521 uint64_t Imm = N->getZExtValue();
522 unsigned I1 = llvm::countr_zero(Imm);
523 uint64_t Rem = Imm >> I1;
524 return Rem == 3 || Rem == 5 || Rem == 9 || Rem == 17;
527 def AlslSlliImmI1 : SDNodeXForm<imm, [{
528 uint64_t Imm = N->getZExtValue();
529 unsigned I1 = llvm::countr_zero(Imm);
530 return CurDAG->getTargetConstant(I1, SDLoc(N),
534 def AlslSlliImmI0 : SDNodeXForm<imm, [{
535 uint64_t Imm = N->getZExtValue();
536 unsigned I1 = llvm::countr_zero(Imm);
539 case 3: I0 = 1; break;
540 case 5: I0 = 2; break;
541 case 9: I0 = 3; break;
542 default: I0 = 4; break;
544 return CurDAG->getTargetConstant(I0, SDLoc(N),
548 // Check if (and r, imm) can be optimized to (BSTRINS r, R0, msb, lsb),
549 // in which imm = ~((2^^(msb-lsb+1) - 1) << lsb).
550 def BstrinsImm : PatLeaf<(imm), [{
553 uint64_t Imm = N->getZExtValue();
554 // andi can be used instead if Imm <= 0xfff.
557 unsigned MaskIdx, MaskLen;
558 return N->getValueType(0).getSizeInBits() == 32
559 ? llvm::isShiftedMask_32(~Imm, MaskIdx, MaskLen)
560 : llvm::isShiftedMask_64(~Imm, MaskIdx, MaskLen);
563 def BstrinsMsb: SDNodeXForm<imm, [{
564 uint64_t Imm = N->getZExtValue();
565 unsigned MaskIdx, MaskLen;
566 N->getValueType(0).getSizeInBits() == 32
567 ? llvm::isShiftedMask_32(~Imm, MaskIdx, MaskLen)
568 : llvm::isShiftedMask_64(~Imm, MaskIdx, MaskLen);
569 return CurDAG->getTargetConstant(MaskIdx + MaskLen - 1, SDLoc(N),
573 def BstrinsLsb: SDNodeXForm<imm, [{
574 uint64_t Imm = N->getZExtValue();
575 unsigned MaskIdx, MaskLen;
576 N->getValueType(0).getSizeInBits() == 32
577 ? llvm::isShiftedMask_32(~Imm, MaskIdx, MaskLen)
578 : llvm::isShiftedMask_64(~Imm, MaskIdx, MaskLen);
579 return CurDAG->getTargetConstant(MaskIdx, SDLoc(N), N->getValueType(0));
582 //===----------------------------------------------------------------------===//
583 // Instruction Formats
584 //===----------------------------------------------------------------------===//
586 include "LoongArchInstrFormats.td"
587 include "LoongArchFloatInstrFormats.td"
588 include "LoongArchLSXInstrFormats.td"
589 include "LoongArchLASXInstrFormats.td"
590 include "LoongArchLBTInstrFormats.td"
592 //===----------------------------------------------------------------------===//
593 // Instruction Class Templates
594 //===----------------------------------------------------------------------===//
596 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
597 class ALU_3R<bits<32> op>
598 : Fmt3R<op, (outs GPR:$rd), (ins GPR:$rj, GPR:$rk), "$rd, $rj, $rk">;
599 class ALU_2R<bits<32> op>
600 : Fmt2R<op, (outs GPR:$rd), (ins GPR:$rj), "$rd, $rj">;
602 class ALU_3RI2<bits<32> op, Operand ImmOpnd>
603 : Fmt3RI2<op, (outs GPR:$rd), (ins GPR:$rj, GPR:$rk, ImmOpnd:$imm2),
604 "$rd, $rj, $rk, $imm2">;
605 class ALU_3RI3<bits<32> op, Operand ImmOpnd>
606 : Fmt3RI3<op, (outs GPR:$rd), (ins GPR:$rj, GPR:$rk, ImmOpnd:$imm3),
607 "$rd, $rj, $rk, $imm3">;
608 class ALU_2RI5<bits<32> op, Operand ImmOpnd>
609 : Fmt2RI5<op, (outs GPR:$rd), (ins GPR:$rj, ImmOpnd:$imm5),
611 class ALU_2RI6<bits<32> op, Operand ImmOpnd>
612 : Fmt2RI6<op, (outs GPR:$rd), (ins GPR:$rj, ImmOpnd:$imm6),
614 class ALU_2RI12<bits<32> op, Operand ImmOpnd>
615 : Fmt2RI12<op, (outs GPR:$rd), (ins GPR:$rj, ImmOpnd:$imm12),
617 class ALU_2RI16<bits<32> op, Operand ImmOpnd>
618 : Fmt2RI16<op, (outs GPR:$rd), (ins GPR:$rj, ImmOpnd:$imm16),
620 class ALU_1RI20<bits<32> op, Operand ImmOpnd>
621 : Fmt1RI20<op, (outs GPR:$rd), (ins ImmOpnd:$imm20), "$rd, $imm20">;
622 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
624 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
625 class MISC_I15<bits<32> op>
626 : FmtI15<op, (outs), (ins uimm15:$imm15), "$imm15">;
628 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
629 class RDTIME_2R<bits<32> op>
630 : Fmt2R<op, (outs GPR:$rd, GPR:$rj), (ins), "$rd, $rj">;
632 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
633 class BrCC_2RI16<bits<32> op>
634 : Fmt2RI16<op, (outs), (ins GPR:$rj, GPR:$rd, simm16_lsl2_br:$imm16),
635 "$rj, $rd, $imm16"> {
637 let isTerminator = 1;
639 class BrCCZ_1RI21<bits<32> op>
640 : Fmt1RI21<op, (outs), (ins GPR:$rj, simm21_lsl2:$imm21),
643 let isTerminator = 1;
645 class Br_I26<bits<32> op>
646 : FmtI26<op, (outs), (ins simm26_b:$imm26), "$imm26"> {
648 let isTerminator = 1;
650 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
652 let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
653 class LOAD_3R<bits<32> op>
654 : Fmt3R<op, (outs GPR:$rd), (ins GPR:$rj, GPR:$rk), "$rd, $rj, $rk">;
655 class LOAD_2RI12<bits<32> op>
656 : Fmt2RI12<op, (outs GPR:$rd), (ins GPR:$rj, simm12_addlike:$imm12),
658 class LOAD_2RI14<bits<32> op>
659 : Fmt2RI14<op, (outs GPR:$rd), (ins GPR:$rj, simm14_lsl2:$imm14),
661 } // hasSideEffects = 0, mayLoad = 1, mayStore = 0
663 let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
664 class STORE_3R<bits<32> op>
665 : Fmt3R<op, (outs), (ins GPR:$rd, GPR:$rj, GPR:$rk),
667 class STORE_2RI12<bits<32> op>
668 : Fmt2RI12<op, (outs), (ins GPR:$rd, GPR:$rj, simm12_addlike:$imm12),
670 class STORE_2RI14<bits<32> op>
671 : Fmt2RI14<op, (outs), (ins GPR:$rd, GPR:$rj, simm14_lsl2:$imm14),
673 } // hasSideEffects = 0, mayLoad = 0, mayStore = 1
675 let hasSideEffects = 0, mayLoad = 1, mayStore = 1, Constraints = "@earlyclobber $rd" in
676 class AM_3R<bits<32> op>
677 : Fmt3R<op, (outs GPR:$rd), (ins GPR:$rk, GPRMemAtomic:$rj),
680 let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
681 class LLBase<bits<32> op>
682 : Fmt2RI14<op, (outs GPR:$rd), (ins GPR:$rj, simm14_lsl2:$imm14),
685 let hasSideEffects = 0, mayLoad = 0, mayStore = 1, Constraints = "$rd = $dst" in
686 class SCBase<bits<32> op>
687 : Fmt2RI14<op, (outs GPR:$dst), (ins GPR:$rd, GPR:$rj, simm14_lsl2:$imm14),
690 let hasSideEffects = 1 in
691 class IOCSRRD<bits<32> op>
692 : Fmt2R<op, (outs GPR:$rd), (ins GPR:$rj), "$rd, $rj">;
694 let hasSideEffects = 1 in
695 class IOCSRWR<bits<32> op>
696 : Fmt2R<op, (outs), (ins GPR:$rd, GPR:$rj), "$rd, $rj">;
698 //===----------------------------------------------------------------------===//
699 // Basic Integer Instructions
700 //===----------------------------------------------------------------------===//
702 // Arithmetic Operation Instructions
703 def ADD_W : ALU_3R<0x00100000>;
704 def SUB_W : ALU_3R<0x00110000>;
705 def ADDI_W : ALU_2RI12<0x02800000, simm12_addlike>;
706 def ALSL_W : ALU_3RI2<0x00040000, uimm2_plus1>;
707 def LU12I_W : ALU_1RI20<0x14000000, simm20_lu12iw>;
708 def SLT : ALU_3R<0x00120000>;
709 def SLTU : ALU_3R<0x00128000>;
710 def SLTI : ALU_2RI12<0x02000000, simm12>;
711 def SLTUI : ALU_2RI12<0x02400000, simm12>;
712 def PCADDI : ALU_1RI20<0x18000000, simm20>;
713 def PCADDU12I : ALU_1RI20<0x1c000000, simm20>;
714 def PCALAU12I : ALU_1RI20<0x1a000000, simm20_pcalau12i>;
715 def AND : ALU_3R<0x00148000>;
716 def OR : ALU_3R<0x00150000>;
717 def NOR : ALU_3R<0x00140000>;
718 def XOR : ALU_3R<0x00158000>;
719 def ANDN : ALU_3R<0x00168000>;
720 def ORN : ALU_3R<0x00160000>;
721 def ANDI : ALU_2RI12<0x03400000, uimm12>;
722 def ORI : ALU_2RI12<0x03800000, uimm12_ori>;
723 def XORI : ALU_2RI12<0x03c00000, uimm12>;
724 def MUL_W : ALU_3R<0x001c0000>;
725 def MULH_W : ALU_3R<0x001c8000>;
726 def MULH_WU : ALU_3R<0x001d0000>;
727 let usesCustomInserter = true in {
728 def DIV_W : ALU_3R<0x00200000>;
729 def MOD_W : ALU_3R<0x00208000>;
730 def DIV_WU : ALU_3R<0x00210000>;
731 def MOD_WU : ALU_3R<0x00218000>;
732 } // usesCustomInserter = true
734 // Bit-shift Instructions
735 def SLL_W : ALU_3R<0x00170000>;
736 def SRL_W : ALU_3R<0x00178000>;
737 def SRA_W : ALU_3R<0x00180000>;
738 def ROTR_W : ALU_3R<0x001b0000>;
740 def SLLI_W : ALU_2RI5<0x00408000, uimm5>;
741 def SRLI_W : ALU_2RI5<0x00448000, uimm5>;
742 def SRAI_W : ALU_2RI5<0x00488000, uimm5>;
743 def ROTRI_W : ALU_2RI5<0x004c8000, uimm5>;
745 // Bit-manipulation Instructions
746 def EXT_W_B : ALU_2R<0x00005c00>;
747 def EXT_W_H : ALU_2R<0x00005800>;
748 def CLO_W : ALU_2R<0x00001000>;
749 def CLZ_W : ALU_2R<0x00001400>;
750 def CTO_W : ALU_2R<0x00001800>;
751 def CTZ_W : ALU_2R<0x00001c00>;
752 def BYTEPICK_W : ALU_3RI2<0x00080000, uimm2>;
753 def REVB_2H : ALU_2R<0x00003000>;
754 def BITREV_4B : ALU_2R<0x00004800>;
755 def BITREV_W : ALU_2R<0x00005000>;
756 let Constraints = "$rd = $dst" in {
757 def BSTRINS_W : FmtBSTR_W<0x00600000, (outs GPR:$dst),
758 (ins GPR:$rd, GPR:$rj, uimm5:$msbw, uimm5:$lsbw),
759 "$rd, $rj, $msbw, $lsbw">;
761 def BSTRPICK_W : FmtBSTR_W<0x00608000, (outs GPR:$rd),
762 (ins GPR:$rj, uimm5:$msbw, uimm5:$lsbw),
763 "$rd, $rj, $msbw, $lsbw">;
764 def MASKEQZ : ALU_3R<0x00130000>;
765 def MASKNEZ : ALU_3R<0x00138000>;
767 // Branch Instructions
768 def BEQ : BrCC_2RI16<0x58000000>;
769 def BNE : BrCC_2RI16<0x5c000000>;
770 def BLT : BrCC_2RI16<0x60000000>;
771 def BGE : BrCC_2RI16<0x64000000>;
772 def BLTU : BrCC_2RI16<0x68000000>;
773 def BGEU : BrCC_2RI16<0x6c000000>;
774 def BEQZ : BrCCZ_1RI21<0x40000000>;
775 def BNEZ : BrCCZ_1RI21<0x44000000>;
776 def B : Br_I26<0x50000000>;
778 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCall = 1, Defs=[R1] in
779 def BL : FmtI26<0x54000000, (outs), (ins simm26_symbol:$imm26), "$imm26">;
780 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
781 def JIRL : Fmt2RI16<0x4c000000, (outs GPR:$rd),
782 (ins GPR:$rj, simm16_lsl2:$imm16), "$rd, $rj, $imm16">;
784 // Common Memory Access Instructions
785 def LD_B : LOAD_2RI12<0x28000000>;
786 def LD_H : LOAD_2RI12<0x28400000>;
787 def LD_W : LOAD_2RI12<0x28800000>;
788 def LD_BU : LOAD_2RI12<0x2a000000>;
789 def LD_HU : LOAD_2RI12<0x2a400000>;
790 def ST_B : STORE_2RI12<0x29000000>;
791 def ST_H : STORE_2RI12<0x29400000>;
792 def ST_W : STORE_2RI12<0x29800000>;
793 let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in
794 def PRELD : FmtPRELD<(outs), (ins uimm5:$imm5, GPR:$rj, simm12:$imm12),
795 "$imm5, $rj, $imm12">;
797 // Atomic Memory Access Instructions
798 def LL_W : LLBase<0x20000000>;
799 def SC_W : SCBase<0x21000000>;
801 // Barrier Instructions
802 def DBAR : MISC_I15<0x38720000>;
803 def IBAR : MISC_I15<0x38728000>;
805 // Other Miscellaneous Instructions
806 def SYSCALL : MISC_I15<0x002b0000>;
807 def BREAK : MISC_I15<0x002a0000>;
808 def RDTIMEL_W : RDTIME_2R<0x00006000>;
809 def RDTIMEH_W : RDTIME_2R<0x00006400>;
810 def CPUCFG : ALU_2R<0x00006c00>;
812 // Cache Maintenance Instructions
813 def CACOP : FmtCACOP<(outs), (ins uimm5:$op, GPR:$rj, simm12:$imm12),
816 /// LA64 instructions
818 let Predicates = [IsLA64] in {
820 // Arithmetic Operation Instructions for 64-bits
821 def ADD_D : ALU_3R<0x00108000>;
822 def SUB_D : ALU_3R<0x00118000>;
823 def ADDI_D : ALU_2RI12<0x02c00000, simm12_addlike>;
824 def ADDU16I_D : ALU_2RI16<0x10000000, simm16>;
825 def ALSL_WU : ALU_3RI2<0x00060000, uimm2_plus1>;
826 def ALSL_D : ALU_3RI2<0x002c0000, uimm2_plus1>;
827 let Constraints = "$rd = $dst" in {
828 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
829 def LU32I_D : Fmt1RI20<0x16000000, (outs GPR:$dst),
830 (ins GPR:$rd, simm20_lu32id:$imm20),
833 def LU52I_D : ALU_2RI12<0x03000000, simm12_lu52id>;
834 def PCADDU18I : ALU_1RI20<0x1e000000, simm20>;
835 def MUL_D : ALU_3R<0x001d8000>;
836 def MULH_D : ALU_3R<0x001e0000>;
837 def MULH_DU : ALU_3R<0x001e8000>;
838 def MULW_D_W : ALU_3R<0x001f0000>;
839 def MULW_D_WU : ALU_3R<0x001f8000>;
840 let usesCustomInserter = true in {
841 def DIV_D : ALU_3R<0x00220000>;
842 def MOD_D : ALU_3R<0x00228000>;
843 def DIV_DU : ALU_3R<0x00230000>;
844 def MOD_DU : ALU_3R<0x00238000>;
845 } // usesCustomInserter = true
847 // Bit-shift Instructions for 64-bits
848 def SLL_D : ALU_3R<0x00188000>;
849 def SRL_D : ALU_3R<0x00190000>;
850 def SRA_D : ALU_3R<0x00198000>;
851 def ROTR_D : ALU_3R<0x001b8000>;
852 def SLLI_D : ALU_2RI6<0x00410000, uimm6>;
853 def SRLI_D : ALU_2RI6<0x00450000, uimm6>;
854 def SRAI_D : ALU_2RI6<0x00490000, uimm6>;
855 def ROTRI_D : ALU_2RI6<0x004d0000, uimm6>;
857 // Bit-manipulation Instructions for 64-bits
858 def CLO_D : ALU_2R<0x00002000>;
859 def CLZ_D : ALU_2R<0x00002400>;
860 def CTO_D : ALU_2R<0x00002800>;
861 def CTZ_D : ALU_2R<0x00002c00>;
862 def BYTEPICK_D : ALU_3RI3<0x000c0000, uimm3>;
863 def REVB_4H : ALU_2R<0x00003400>;
864 def REVB_2W : ALU_2R<0x00003800>;
865 def REVB_D : ALU_2R<0x00003c00>;
866 def REVH_2W : ALU_2R<0x00004000>;
867 def REVH_D : ALU_2R<0x00004400>;
868 def BITREV_8B : ALU_2R<0x00004c00>;
869 def BITREV_D : ALU_2R<0x00005400>;
870 let Constraints = "$rd = $dst" in {
871 def BSTRINS_D : FmtBSTR_D<0x00800000, (outs GPR:$dst),
872 (ins GPR:$rd, GPR:$rj, uimm6:$msbd, uimm6:$lsbd),
873 "$rd, $rj, $msbd, $lsbd">;
875 def BSTRPICK_D : FmtBSTR_D<0x00c00000, (outs GPR:$rd),
876 (ins GPR:$rj, uimm6:$msbd, uimm6:$lsbd),
877 "$rd, $rj, $msbd, $lsbd">;
879 // Common Memory Access Instructions for 64-bits
880 def LD_WU : LOAD_2RI12<0x2a800000>;
881 def LD_D : LOAD_2RI12<0x28c00000>;
882 def ST_D : STORE_2RI12<0x29c00000>;
883 def LDX_B : LOAD_3R<0x38000000>;
884 def LDX_H : LOAD_3R<0x38040000>;
885 def LDX_W : LOAD_3R<0x38080000>;
886 def LDX_D : LOAD_3R<0x380c0000>;
887 def LDX_BU : LOAD_3R<0x38200000>;
888 def LDX_HU : LOAD_3R<0x38240000>;
889 def LDX_WU : LOAD_3R<0x38280000>;
890 def STX_B : STORE_3R<0x38100000>;
891 def STX_H : STORE_3R<0x38140000>;
892 def STX_W : STORE_3R<0x38180000>;
893 def STX_D : STORE_3R<0x381c0000>;
894 def LDPTR_W : LOAD_2RI14<0x24000000>;
895 def LDPTR_D : LOAD_2RI14<0x26000000>;
896 def STPTR_W : STORE_2RI14<0x25000000>;
897 def STPTR_D : STORE_2RI14<0x27000000>;
898 let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in
899 def PRELDX : FmtPRELDX<(outs), (ins uimm5:$imm5, GPR:$rj, GPR:$rk),
902 // Bound Check Memory Access Instructions
903 def LDGT_B : LOAD_3R<0x38780000>;
904 def LDGT_H : LOAD_3R<0x38788000>;
905 def LDGT_W : LOAD_3R<0x38790000>;
906 def LDGT_D : LOAD_3R<0x38798000>;
907 def LDLE_B : LOAD_3R<0x387a0000>;
908 def LDLE_H : LOAD_3R<0x387a8000>;
909 def LDLE_W : LOAD_3R<0x387b0000>;
910 def LDLE_D : LOAD_3R<0x387b8000>;
911 def STGT_B : STORE_3R<0x387c0000>;
912 def STGT_H : STORE_3R<0x387c8000>;
913 def STGT_W : STORE_3R<0x387d0000>;
914 def STGT_D : STORE_3R<0x387d8000>;
915 def STLE_B : STORE_3R<0x387e0000>;
916 def STLE_H : STORE_3R<0x387e8000>;
917 def STLE_W : STORE_3R<0x387f0000>;
918 def STLE_D : STORE_3R<0x387f8000>;
920 // Atomic Memory Access Instructions for 64-bits
921 def AMSWAP_W : AM_3R<0x38600000>;
922 def AMSWAP_D : AM_3R<0x38608000>;
923 def AMADD_W : AM_3R<0x38610000>;
924 def AMADD_D : AM_3R<0x38618000>;
925 def AMAND_W : AM_3R<0x38620000>;
926 def AMAND_D : AM_3R<0x38628000>;
927 def AMOR_W : AM_3R<0x38630000>;
928 def AMOR_D : AM_3R<0x38638000>;
929 def AMXOR_W : AM_3R<0x38640000>;
930 def AMXOR_D : AM_3R<0x38648000>;
931 def AMMAX_W : AM_3R<0x38650000>;
932 def AMMAX_D : AM_3R<0x38658000>;
933 def AMMIN_W : AM_3R<0x38660000>;
934 def AMMIN_D : AM_3R<0x38668000>;
935 def AMMAX_WU : AM_3R<0x38670000>;
936 def AMMAX_DU : AM_3R<0x38678000>;
937 def AMMIN_WU : AM_3R<0x38680000>;
938 def AMMIN_DU : AM_3R<0x38688000>;
939 def AMSWAP__DB_W : AM_3R<0x38690000>;
940 def AMSWAP__DB_D : AM_3R<0x38698000>;
941 def AMADD__DB_W : AM_3R<0x386a0000>;
942 def AMADD__DB_D : AM_3R<0x386a8000>;
943 def AMAND__DB_W : AM_3R<0x386b0000>;
944 def AMAND__DB_D : AM_3R<0x386b8000>;
945 def AMOR__DB_W : AM_3R<0x386c0000>;
946 def AMOR__DB_D : AM_3R<0x386c8000>;
947 def AMXOR__DB_W : AM_3R<0x386d0000>;
948 def AMXOR__DB_D : AM_3R<0x386d8000>;
949 def AMMAX__DB_W : AM_3R<0x386e0000>;
950 def AMMAX__DB_D : AM_3R<0x386e8000>;
951 def AMMIN__DB_W : AM_3R<0x386f0000>;
952 def AMMIN__DB_D : AM_3R<0x386f8000>;
953 def AMMAX__DB_WU : AM_3R<0x38700000>;
954 def AMMAX__DB_DU : AM_3R<0x38708000>;
955 def AMMIN__DB_WU : AM_3R<0x38710000>;
956 def AMMIN__DB_DU : AM_3R<0x38718000>;
957 def LL_D : LLBase<0x22000000>;
958 def SC_D : SCBase<0x23000000>;
960 // CRC Check Instructions
961 def CRC_W_B_W : ALU_3R<0x00240000>;
962 def CRC_W_H_W : ALU_3R<0x00248000>;
963 def CRC_W_W_W : ALU_3R<0x00250000>;
964 def CRC_W_D_W : ALU_3R<0x00258000>;
965 def CRCC_W_B_W : ALU_3R<0x00260000>;
966 def CRCC_W_H_W : ALU_3R<0x00268000>;
967 def CRCC_W_W_W : ALU_3R<0x00270000>;
968 def CRCC_W_D_W : ALU_3R<0x00278000>;
970 // Other Miscellaneous Instructions for 64-bits
971 def ASRTLE_D : FmtASRT<0x00010000, (outs), (ins GPR:$rj, GPR:$rk),
973 def ASRTGT_D : FmtASRT<0x00018000, (outs), (ins GPR:$rj, GPR:$rk),
975 def RDTIME_D : RDTIME_2R<0x00006800>;
976 } // Predicates = [IsLA64]
978 //===----------------------------------------------------------------------===//
979 // Pseudo-instructions and codegen patterns
981 // Naming convention: For 'generic' pattern classes, we use the naming
982 // convention PatTy1Ty2.
983 //===----------------------------------------------------------------------===//
985 /// Generic pattern classes
987 class PatGprGpr<SDPatternOperator OpNode, LAInst Inst>
988 : Pat<(OpNode GPR:$rj, GPR:$rk), (Inst GPR:$rj, GPR:$rk)>;
989 class PatGprGpr_32<SDPatternOperator OpNode, LAInst Inst>
990 : Pat<(sext_inreg (OpNode GPR:$rj, GPR:$rk), i32), (Inst GPR:$rj, GPR:$rk)>;
991 class PatGpr<SDPatternOperator OpNode, LAInst Inst>
992 : Pat<(OpNode GPR:$rj), (Inst GPR:$rj)>;
994 class PatGprImm<SDPatternOperator OpNode, LAInst Inst, Operand ImmOpnd>
995 : Pat<(OpNode GPR:$rj, ImmOpnd:$imm),
996 (Inst GPR:$rj, ImmOpnd:$imm)>;
997 class PatGprImm_32<SDPatternOperator OpNode, LAInst Inst, Operand ImmOpnd>
998 : Pat<(sext_inreg (OpNode GPR:$rj, ImmOpnd:$imm), i32),
999 (Inst GPR:$rj, ImmOpnd:$imm)>;
1002 def AddLike: PatFrags<(ops node:$A, node:$B),
1003 [(add node:$A, node:$B), (or node:$A, node:$B)], [{
1004 return N->getOpcode() == ISD::ADD || isOrEquivalentToAdd(N);
1007 /// Simple arithmetic operations
1009 // Match both a plain shift and one where the shift amount is masked (this is
1010 // typically introduced when the legalizer promotes the shift amount and
1011 // zero-extends it). For LoongArch, the mask is unnecessary as shifts in the
1012 // base ISA only read the least significant 5 bits (LA32) or 6 bits (LA64).
1014 : ComplexPattern<GRLenVT, 1, "selectShiftMaskGRLen", [], [], 0>;
1015 def shiftMask32 : ComplexPattern<i64, 1, "selectShiftMask32", [], [], 0>;
1017 def sexti32 : ComplexPattern<i64, 1, "selectSExti32">;
1018 def zexti32 : ComplexPattern<i64, 1, "selectZExti32">;
1020 class shiftop<SDPatternOperator operator>
1021 : PatFrag<(ops node:$val, node:$count),
1022 (operator node:$val, (GRLenVT (shiftMaskGRLen node:$count)))>;
1023 class shiftopw<SDPatternOperator operator>
1024 : PatFrag<(ops node:$val, node:$count),
1025 (operator node:$val, (i64 (shiftMask32 node:$count)))>;
1027 def mul_const_oneuse : PatFrag<(ops node:$A, node:$B),
1028 (mul node:$A, node:$B), [{
1029 if (auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1030 return N1C->hasOneUse();
1034 let Predicates = [IsLA32] in {
1035 def : PatGprGpr<add, ADD_W>;
1036 def : PatGprImm<add, ADDI_W, simm12>;
1037 def : PatGprGpr<sub, SUB_W>;
1038 def : PatGprGpr<sdiv, DIV_W>;
1039 def : PatGprGpr<udiv, DIV_WU>;
1040 def : PatGprGpr<srem, MOD_W>;
1041 def : PatGprGpr<urem, MOD_WU>;
1042 def : PatGprGpr<mul, MUL_W>;
1043 def : PatGprGpr<mulhs, MULH_W>;
1044 def : PatGprGpr<mulhu, MULH_WU>;
1045 def : PatGprGpr<rotr, ROTR_W>;
1046 def : PatGprImm<rotr, ROTRI_W, uimm5>;
1048 foreach Idx = 1...3 in {
1049 defvar ShamtA = !mul(8, Idx);
1050 defvar ShamtB = !mul(8, !sub(4, Idx));
1051 def : Pat<(or (shl GPR:$rk, (i32 ShamtA)), (srl GPR:$rj, (i32 ShamtB))),
1052 (BYTEPICK_W GPR:$rj, GPR:$rk, Idx)>;
1054 } // Predicates = [IsLA32]
1056 let Predicates = [IsLA64] in {
1057 def : PatGprGpr<add, ADD_D>;
1058 def : PatGprGpr_32<add, ADD_W>;
1059 def : PatGprImm<add, ADDI_D, simm12>;
1060 def : PatGprImm_32<add, ADDI_W, simm12>;
1061 def : PatGprGpr<sub, SUB_D>;
1062 def : PatGprGpr_32<sub, SUB_W>;
1063 def : PatGprGpr<sdiv, DIV_D>;
1064 def : PatGprGpr<udiv, DIV_DU>;
1065 def : PatGprGpr<srem, MOD_D>;
1066 def : PatGprGpr<urem, MOD_DU>;
1067 def : PatGprGpr<rotr, ROTR_D>;
1068 def : PatGprGpr<loongarch_rotr_w, ROTR_W>;
1069 def : PatGprImm<rotr, ROTRI_D, uimm6>;
1070 def : PatGprImm_32<rotr, ROTRI_W, uimm5>;
1071 def : Pat<(loongarch_rotl_w GPR:$rj, uimm5:$imm),
1072 (ROTRI_W GPR:$rj, (ImmSubFrom32 uimm5:$imm))>;
1073 def : Pat<(sext_inreg (loongarch_rotl_w GPR:$rj, uimm5:$imm), i32),
1074 (ROTRI_W GPR:$rj, (ImmSubFrom32 uimm5:$imm))>;
1075 // TODO: Select "_W[U]" instructions for i32xi32 if only lower 32 bits of the
1076 // product are used.
1077 def : PatGprGpr<mul, MUL_D>;
1078 def : PatGprGpr<mulhs, MULH_D>;
1079 def : PatGprGpr<mulhu, MULH_DU>;
1080 // Select MULW_D_W for calculating the full 64 bits product of i32xi32 signed
1082 def : Pat<(i64 (mul (sext_inreg GPR:$rj, i32), (sext_inreg GPR:$rk, i32))),
1083 (MULW_D_W GPR:$rj, GPR:$rk)>;
1084 // Select MULW_D_WU for calculating the full 64 bits product of i32xi32
1085 // unsigned multiplication.
1086 def : Pat<(i64 (mul (loongarch_bstrpick GPR:$rj, (i64 31), (i64 0)),
1087 (loongarch_bstrpick GPR:$rk, (i64 31), (i64 0)))),
1088 (MULW_D_WU GPR:$rj, GPR:$rk)>;
1090 def : Pat<(add GPR:$rj, simm16_lsl16:$imm),
1091 (ADDU16I_D GPR:$rj, (HI16 $imm))>;
1092 def : Pat<(add GPR:$rj, simm32_hi16_lo12:$imm),
1093 (ADDI_D (ADDU16I_D GPR:$rj, (HI16ForAddu16idAddiPair $imm)),
1095 def : Pat<(sext_inreg (add GPR:$rj, simm32_hi16_lo12:$imm), i32),
1096 (ADDI_W (ADDU16I_D GPR:$rj, (HI16ForAddu16idAddiPair $imm)),
1099 let Predicates = [IsLA32] in {
1100 def : Pat<(add GPR:$rj, (AddiPair:$im)),
1101 (ADDI_W (ADDI_W GPR:$rj, (AddiPairImmLarge AddiPair:$im)),
1102 (AddiPairImmSmall AddiPair:$im))>;
1103 } // Predicates = [IsLA32]
1105 let Predicates = [IsLA64] in {
1106 def : Pat<(add GPR:$rj, (AddiPair:$im)),
1107 (ADDI_D (ADDI_D GPR:$rj, (AddiPairImmLarge AddiPair:$im)),
1108 (AddiPairImmSmall AddiPair:$im))>;
1109 def : Pat<(sext_inreg (add GPR:$rj, (AddiPair:$im)), i32),
1110 (ADDI_W (ADDI_W GPR:$rj, (AddiPairImmLarge AddiPair:$im)),
1111 (AddiPairImmSmall AddiPair:$im))>;
1112 } // Predicates = [IsLA64]
1114 let Predicates = [IsLA32] in {
1115 foreach Idx0 = 1...4 in {
1116 foreach Idx1 = 1...4 in {
1117 defvar CImm = !add(1, !shl(!add(1, !shl(1, Idx0)), Idx1));
1118 def : Pat<(mul_const_oneuse GPR:$r, (i32 CImm)),
1119 (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 Idx0)),
1120 GPR:$r, (i32 Idx1))>;
1123 foreach Idx0 = 1...4 in {
1124 foreach Idx1 = 1...4 in {
1125 defvar Cb = !add(1, !shl(1, Idx0));
1126 defvar CImm = !add(Cb, !shl(Cb, Idx1));
1127 def : Pat<(mul_const_oneuse GPR:$r, (i32 CImm)),
1128 (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 Idx0)),
1129 (ALSL_W GPR:$r, GPR:$r, (i32 Idx0)), (i32 Idx1))>;
1132 } // Predicates = [IsLA32]
1134 let Predicates = [IsLA64] in {
1135 foreach Idx0 = 1...4 in {
1136 foreach Idx1 = 1...4 in {
1137 defvar CImm = !add(1, !shl(!add(1, !shl(1, Idx0)), Idx1));
1138 def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 CImm)), i32),
1139 (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 Idx0)),
1140 GPR:$r, (i64 Idx1))>;
1141 def : Pat<(mul_const_oneuse GPR:$r, (i64 CImm)),
1142 (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 Idx0)),
1143 GPR:$r, (i64 Idx1))>;
1146 foreach Idx0 = 1...4 in {
1147 foreach Idx1 = 1...4 in {
1148 defvar Cb = !add(1, !shl(1, Idx0));
1149 defvar CImm = !add(Cb, !shl(Cb, Idx1));
1150 def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 CImm)), i32),
1151 (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 Idx0)),
1152 (ALSL_W GPR:$r, GPR:$r, (i64 Idx0)), (i64 Idx1))>;
1153 def : Pat<(mul_const_oneuse GPR:$r, (i64 CImm)),
1154 (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 Idx0)),
1155 (ALSL_D GPR:$r, GPR:$r, (i64 Idx0)), (i64 Idx1))>;
1158 } // Predicates = [IsLA64]
1160 let Predicates = [IsLA32] in {
1161 def : Pat<(mul GPR:$rj, (AlslSlliImm:$im)),
1162 (SLLI_W (ALSL_W GPR:$rj, GPR:$rj, (AlslSlliImmI0 AlslSlliImm:$im)),
1163 (AlslSlliImmI1 AlslSlliImm:$im))>;
1164 } // Predicates = [IsLA32]
1166 let Predicates = [IsLA64] in {
1167 def : Pat<(sext_inreg (mul GPR:$rj, (AlslSlliImm:$im)), i32),
1168 (SLLI_W (ALSL_W GPR:$rj, GPR:$rj, (AlslSlliImmI0 AlslSlliImm:$im)),
1169 (AlslSlliImmI1 AlslSlliImm:$im))>;
1170 def : Pat<(mul GPR:$rj, (AlslSlliImm:$im)),
1171 (SLLI_D (ALSL_D GPR:$rj, GPR:$rj, (AlslSlliImmI0 AlslSlliImm:$im)),
1172 (AlslSlliImmI1 AlslSlliImm:$im))>;
1173 } // Predicates = [IsLA64]
1175 foreach Idx = 1...7 in {
1176 defvar ShamtA = !mul(8, Idx);
1177 defvar ShamtB = !mul(8, !sub(8, Idx));
1178 def : Pat<(or (shl GPR:$rk, (i64 ShamtA)), (srl GPR:$rj, (i64 ShamtB))),
1179 (BYTEPICK_D GPR:$rj, GPR:$rk, Idx)>;
1182 foreach Idx = 1...3 in {
1183 defvar ShamtA = !mul(8, Idx);
1184 defvar ShamtB = !mul(8, !sub(4, Idx));
1185 // NOTE: the srl node would already be transformed into a loongarch_bstrpick
1186 // by the time this pattern gets to execute, hence the weird construction.
1187 def : Pat<(sext_inreg (or (shl GPR:$rk, (i64 ShamtA)),
1188 (loongarch_bstrpick GPR:$rj, (i64 31),
1189 (i64 ShamtB))), i32),
1190 (BYTEPICK_W GPR:$rj, GPR:$rk, Idx)>;
1192 } // Predicates = [IsLA64]
1194 def : PatGprGpr<and, AND>;
1195 def : PatGprImm<and, ANDI, uimm12>;
1196 def : PatGprGpr<or, OR>;
1197 def : PatGprImm<or, ORI, uimm12>;
1198 def : PatGprGpr<xor, XOR>;
1199 def : PatGprImm<xor, XORI, uimm12>;
1200 def : Pat<(not GPR:$rj), (NOR GPR:$rj, R0)>;
1201 def : Pat<(not (or GPR:$rj, GPR:$rk)), (NOR GPR:$rj, GPR:$rk)>;
1202 def : Pat<(or GPR:$rj, (not GPR:$rk)), (ORN GPR:$rj, GPR:$rk)>;
1203 def : Pat<(and GPR:$rj, (not GPR:$rk)), (ANDN GPR:$rj, GPR:$rk)>;
1205 let Predicates = [IsLA32] in {
1206 def : Pat<(and GPR:$rj, BstrinsImm:$imm),
1207 (BSTRINS_W GPR:$rj, R0, (BstrinsMsb BstrinsImm:$imm),
1208 (BstrinsLsb BstrinsImm:$imm))>;
1209 } // Predicates = [IsLA32]
1211 let Predicates = [IsLA64] in {
1212 def : Pat<(and GPR:$rj, BstrinsImm:$imm),
1213 (BSTRINS_D GPR:$rj, R0, (BstrinsMsb BstrinsImm:$imm),
1214 (BstrinsLsb BstrinsImm:$imm))>;
1215 } // Predicates = [IsLA64]
1219 // We lower `trap` to `amswap.w rd:$r0, rk:$r1, rj:$r0`, as this is guaranteed
1220 // to trap with an INE (non-existent on LA32, explicitly documented to INE on
1221 // LA64). And the resulting signal is different from `debugtrap` like on some
1222 // other existing ports so programs/porters might have an easier time.
1223 def PseudoUNIMP : Pseudo<(outs), (ins), [(trap)]>,
1224 PseudoInstExpansion<(AMSWAP_W R0, R1, R0)>;
1226 // We lower `debugtrap` to `break 0`, as this is guaranteed to exist and work,
1227 // even for LA32 Primary. Also, because so far the ISA does not provide a
1228 // specific trap instruction/kind exclusively for alerting the debugger,
1229 // every other project uses the generic immediate of 0 for this.
1230 def : Pat<(debugtrap), (BREAK 0)>;
1232 /// Bit counting operations
1234 let Predicates = [IsLA64] in {
1235 def : PatGpr<ctlz, CLZ_D>;
1236 def : PatGpr<cttz, CTZ_D>;
1237 def : Pat<(ctlz (not GPR:$rj)), (CLO_D GPR:$rj)>;
1238 def : Pat<(cttz (not GPR:$rj)), (CTO_D GPR:$rj)>;
1239 def : PatGpr<loongarch_clzw, CLZ_W>;
1240 def : PatGpr<loongarch_ctzw, CTZ_W>;
1241 def : Pat<(loongarch_clzw (not GPR:$rj)), (CLO_W GPR:$rj)>;
1242 def : Pat<(loongarch_ctzw (not GPR:$rj)), (CTO_W GPR:$rj)>;
1243 } // Predicates = [IsLA64]
1245 let Predicates = [IsLA32] in {
1246 def : PatGpr<ctlz, CLZ_W>;
1247 def : PatGpr<cttz, CTZ_W>;
1248 def : Pat<(ctlz (not GPR:$rj)), (CLO_W GPR:$rj)>;
1249 def : Pat<(cttz (not GPR:$rj)), (CTO_W GPR:$rj)>;
1250 } // Predicates = [IsLA32]
1252 /// FrameIndex calculations
1253 let Predicates = [IsLA32] in {
1254 def : Pat<(AddLike (i32 BaseAddr:$rj), simm12:$imm12),
1255 (ADDI_W (i32 BaseAddr:$rj), simm12:$imm12)>;
1256 } // Predicates = [IsLA32]
1257 let Predicates = [IsLA64] in {
1258 def : Pat<(AddLike (i64 BaseAddr:$rj), simm12:$imm12),
1259 (ADDI_D (i64 BaseAddr:$rj), simm12:$imm12)>;
1260 } // Predicates = [IsLA64]
1262 /// Shifted addition
1263 let Predicates = [IsLA32] in {
1264 def : Pat<(add GPR:$rk, (shl GPR:$rj, uimm2_plus1:$imm2)),
1265 (ALSL_W GPR:$rj, GPR:$rk, uimm2_plus1:$imm2)>;
1266 } // Predicates = [IsLA32]
1267 let Predicates = [IsLA64] in {
1268 def : Pat<(add GPR:$rk, (shl GPR:$rj, uimm2_plus1:$imm2)),
1269 (ALSL_D GPR:$rj, GPR:$rk, uimm2_plus1:$imm2)>;
1270 def : Pat<(sext_inreg (add GPR:$rk, (shl GPR:$rj, uimm2_plus1:$imm2)), i32),
1271 (ALSL_W GPR:$rj, GPR:$rk, uimm2_plus1:$imm2)>;
1272 def : Pat<(loongarch_bstrpick (add GPR:$rk, (shl GPR:$rj, uimm2_plus1:$imm2)),
1274 (ALSL_WU GPR:$rj, GPR:$rk, uimm2_plus1:$imm2)>;
1275 } // Predicates = [IsLA64]
1279 let Predicates = [IsLA32] in {
1280 def : PatGprGpr<shiftop<shl>, SLL_W>;
1281 def : PatGprGpr<shiftop<sra>, SRA_W>;
1282 def : PatGprGpr<shiftop<srl>, SRL_W>;
1283 def : PatGprImm<shl, SLLI_W, uimm5>;
1284 def : PatGprImm<sra, SRAI_W, uimm5>;
1285 def : PatGprImm<srl, SRLI_W, uimm5>;
1286 } // Predicates = [IsLA32]
1288 let Predicates = [IsLA64] in {
1289 def : PatGprGpr<shiftopw<loongarch_sll_w>, SLL_W>;
1290 def : PatGprGpr<shiftopw<loongarch_sra_w>, SRA_W>;
1291 def : PatGprGpr<shiftopw<loongarch_srl_w>, SRL_W>;
1292 def : PatGprGpr<shiftop<shl>, SLL_D>;
1293 def : PatGprGpr<shiftop<sra>, SRA_D>;
1294 def : PatGprGpr<shiftop<srl>, SRL_D>;
1295 def : PatGprImm<shl, SLLI_D, uimm6>;
1296 def : PatGprImm<sra, SRAI_D, uimm6>;
1297 def : PatGprImm<srl, SRLI_D, uimm6>;
1298 } // Predicates = [IsLA64]
1302 def : Pat<(sext_inreg GPR:$rj, i8), (EXT_W_B GPR:$rj)>;
1303 def : Pat<(sext_inreg GPR:$rj, i16), (EXT_W_H GPR:$rj)>;
1305 let Predicates = [IsLA64] in {
1306 def : Pat<(sext_inreg GPR:$rj, i32), (ADDI_W GPR:$rj, 0)>;
1307 } // Predicates = [IsLA64]
1311 def : PatGprGpr<setlt, SLT>;
1312 def : PatGprImm<setlt, SLTI, simm12>;
1313 def : PatGprGpr<setult, SLTU>;
1314 def : PatGprImm<setult, SLTUI, simm12>;
1316 // Define pattern expansions for setcc operations that aren't directly
1317 // handled by a LoongArch instruction.
1318 def : Pat<(seteq GPR:$rj, 0), (SLTUI GPR:$rj, 1)>;
1319 def : Pat<(seteq GPR:$rj, GPR:$rk), (SLTUI (XOR GPR:$rj, GPR:$rk), 1)>;
1320 let Predicates = [IsLA32] in {
1321 def : Pat<(seteq GPR:$rj, simm12_plus1:$imm12),
1322 (SLTUI (ADDI_W GPR:$rj, (NegImm simm12_plus1:$imm12)), 1)>;
1323 } // Predicates = [IsLA32]
1324 let Predicates = [IsLA64] in {
1325 def : Pat<(seteq GPR:$rj, simm12_plus1:$imm12),
1326 (SLTUI (ADDI_D GPR:$rj, (NegImm simm12_plus1:$imm12)), 1)>;
1327 } // Predicates = [IsLA64]
1328 def : Pat<(setne GPR:$rj, 0), (SLTU R0, GPR:$rj)>;
1329 def : Pat<(setne GPR:$rj, GPR:$rk), (SLTU R0, (XOR GPR:$rj, GPR:$rk))>;
1330 let Predicates = [IsLA32] in {
1331 def : Pat<(setne GPR:$rj, simm12_plus1:$imm12),
1332 (SLTU R0, (ADDI_W GPR:$rj, (NegImm simm12_plus1:$imm12)))>;
1333 } // Predicates = [IsLA32]
1334 let Predicates = [IsLA64] in {
1335 def : Pat<(setne GPR:$rj, simm12_plus1:$imm12),
1336 (SLTU R0, (ADDI_D GPR:$rj, (NegImm simm12_plus1:$imm12)))>;
1337 } // Predicates = [IsLA64]
1338 def : Pat<(setugt GPR:$rj, GPR:$rk), (SLTU GPR:$rk, GPR:$rj)>;
1339 def : Pat<(setuge GPR:$rj, GPR:$rk), (XORI (SLTU GPR:$rj, GPR:$rk), 1)>;
1340 def : Pat<(setule GPR:$rj, GPR:$rk), (XORI (SLTU GPR:$rk, GPR:$rj), 1)>;
1341 def : Pat<(setgt GPR:$rj, GPR:$rk), (SLT GPR:$rk, GPR:$rj)>;
1342 def : Pat<(setge GPR:$rj, GPR:$rk), (XORI (SLT GPR:$rj, GPR:$rk), 1)>;
1343 def : Pat<(setle GPR:$rj, GPR:$rk), (XORI (SLT GPR:$rk, GPR:$rj), 1)>;
1347 def : Pat<(select GPR:$cond, GPR:$t, 0), (MASKEQZ GPR:$t, GPR:$cond)>;
1348 def : Pat<(select GPR:$cond, 0, GPR:$f), (MASKNEZ GPR:$f, GPR:$cond)>;
1349 def : Pat<(select GPR:$cond, GPR:$t, GPR:$f),
1350 (OR (MASKEQZ GPR:$t, GPR:$cond), (MASKNEZ GPR:$f, GPR:$cond))>;
1352 /// Branches and jumps
1354 class BccPat<PatFrag CondOp, LAInst Inst>
1355 : Pat<(brcond (GRLenVT (CondOp GPR:$rj, GPR:$rd)), bb:$imm16),
1356 (Inst GPR:$rj, GPR:$rd, bb:$imm16)>;
1358 def : BccPat<seteq, BEQ>;
1359 def : BccPat<setne, BNE>;
1360 def : BccPat<setlt, BLT>;
1361 def : BccPat<setge, BGE>;
1362 def : BccPat<setult, BLTU>;
1363 def : BccPat<setuge, BGEU>;
1365 class BccSwapPat<PatFrag CondOp, LAInst InstBcc>
1366 : Pat<(brcond (GRLenVT (CondOp GPR:$rd, GPR:$rj)), bb:$imm16),
1367 (InstBcc GPR:$rj, GPR:$rd, bb:$imm16)>;
1369 // Condition codes that don't have matching LoongArch branch instructions, but
1370 // are trivially supported by swapping the two input operands.
1371 def : BccSwapPat<setgt, BLT>;
1372 def : BccSwapPat<setle, BGE>;
1373 def : BccSwapPat<setugt, BLTU>;
1374 def : BccSwapPat<setule, BGEU>;
1376 // An extra pattern is needed for a brcond without a setcc (i.e. where the
1377 // condition was calculated elsewhere).
1378 def : Pat<(brcond GPR:$rj, bb:$imm21), (BNEZ GPR:$rj, bb:$imm21)>;
1380 def : Pat<(brcond (GRLenVT (seteq GPR:$rj, 0)), bb:$imm21),
1381 (BEQZ GPR:$rj, bb:$imm21)>;
1382 def : Pat<(brcond (GRLenVT (setne GPR:$rj, 0)), bb:$imm21),
1383 (BNEZ GPR:$rj, bb:$imm21)>;
1385 let isBarrier = 1, isBranch = 1, isTerminator = 1 in
1386 def PseudoBR : Pseudo<(outs), (ins simm26_b:$imm26), [(br bb:$imm26)]>,
1387 PseudoInstExpansion<(B simm26_b:$imm26)>;
1389 let isBarrier = 1, isBranch = 1, isIndirectBranch = 1, isTerminator = 1 in
1390 def PseudoBRIND : Pseudo<(outs), (ins GPR:$rj, simm16_lsl2:$imm16)>,
1391 PseudoInstExpansion<(JIRL R0, GPR:$rj, simm16_lsl2:$imm16)>;
1393 def : Pat<(brind GPR:$rj), (PseudoBRIND GPR:$rj, 0)>;
1394 def : Pat<(brind (add GPR:$rj, simm16_lsl2:$imm16)),
1395 (PseudoBRIND GPR:$rj, simm16_lsl2:$imm16)>;
1397 let isCall = 1, Defs = [R1] in
1398 def PseudoCALL : Pseudo<(outs), (ins simm26_symbol:$func)>;
1400 def : Pat<(loongarch_call tglobaladdr:$func), (PseudoCALL tglobaladdr:$func)>;
1401 def : Pat<(loongarch_call texternalsym:$func), (PseudoCALL texternalsym:$func)>;
1403 let isCall = 1, Defs = [R1] in
1404 def PseudoCALLIndirect : Pseudo<(outs), (ins GPR:$rj),
1405 [(loongarch_call GPR:$rj)]>,
1406 PseudoInstExpansion<(JIRL R1, GPR:$rj, 0)>;
1408 let isCall = 1, hasSideEffects = 0, mayStore = 0, mayLoad = 0, Defs = [R1] in
1409 def PseudoJIRL_CALL : Pseudo<(outs), (ins GPR:$rj, simm16_lsl2:$imm16)>,
1410 PseudoInstExpansion<(JIRL R1, GPR:$rj,
1411 simm16_lsl2:$imm16)>;
1413 let isBarrier = 1, isReturn = 1, isTerminator = 1 in
1414 def PseudoRET : Pseudo<(outs), (ins), [(loongarch_ret)]>,
1415 PseudoInstExpansion<(JIRL R0, R1, 0)>;
1417 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [R3] in
1418 def PseudoTAIL : Pseudo<(outs), (ins simm26_symbol:$dst)>;
1420 def : Pat<(loongarch_tail (iPTR tglobaladdr:$dst)),
1421 (PseudoTAIL tglobaladdr:$dst)>;
1422 def : Pat<(loongarch_tail (iPTR texternalsym:$dst)),
1423 (PseudoTAIL texternalsym:$dst)>;
1425 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [R3] in
1426 def PseudoTAILIndirect : Pseudo<(outs), (ins GPRT:$rj),
1427 [(loongarch_tail GPRT:$rj)]>,
1428 PseudoInstExpansion<(JIRL R0, GPR:$rj, 0)>;
1430 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
1431 hasSideEffects = 0, mayStore = 0, mayLoad = 0, Uses = [R3] in
1432 def PseudoB_TAIL : Pseudo<(outs), (ins simm26_b:$imm26)>,
1433 PseudoInstExpansion<(B simm26_b:$imm26)>;
1435 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
1436 hasSideEffects = 0, mayStore = 0, mayLoad = 0, Uses = [R3] in
1437 def PseudoJIRL_TAIL : Pseudo<(outs), (ins GPR:$rj, simm16_lsl2:$imm16)>,
1438 PseudoInstExpansion<(JIRL R0, GPR:$rj,
1439 simm16_lsl2:$imm16)>;
1441 /// Load address (la*) macro instructions.
1443 // Define isCodeGenOnly = 0 to expose them to tablegened assembly parser.
1444 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0,
1445 isAsmParserOnly = 1 in {
1446 def PseudoLA_ABS : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1447 "la.abs", "$dst, $src">;
1448 def PseudoLA_ABS_LARGE : Pseudo<(outs GPR:$dst),
1449 (ins GPR:$tmp, bare_symbol:$src), [],
1450 "la.abs", "$dst, $src">;
1451 def PseudoLA_PCREL : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1452 "la.pcrel", "$dst, $src">;
1453 def PseudoLA_PCREL_LARGE : Pseudo<(outs GPR:$dst),
1454 (ins GPR:$tmp, bare_symbol:$src), [],
1455 "la.pcrel", "$dst, $tmp, $src">,
1457 def PseudoLA_TLS_LE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1458 "la.tls.le", "$dst, $src">;
1460 let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 0,
1461 isAsmParserOnly = 1 in {
1462 def PseudoLA_GOT : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1463 "la.got", "$dst, $src">;
1464 def PseudoLA_GOT_LARGE : Pseudo<(outs GPR:$dst),
1465 (ins GPR:$tmp, bare_symbol:$src), [],
1466 "la.got", "$dst, $tmp, $src">,
1468 def PseudoLA_TLS_IE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1469 "la.tls.ie", "$dst, $src">;
1470 def PseudoLA_TLS_IE_LARGE : Pseudo<(outs GPR:$dst),
1471 (ins GPR:$tmp, bare_symbol:$src), [],
1472 "la.tls.ie", "$dst, $tmp, $src">,
1474 def PseudoLA_TLS_LD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1475 "la.tls.ld", "$dst, $src">;
1476 def PseudoLA_TLS_LD_LARGE : Pseudo<(outs GPR:$dst),
1477 (ins GPR:$tmp, bare_symbol:$src), [],
1478 "la.tls.ld", "$dst, $tmp, $src">,
1480 def PseudoLA_TLS_GD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1481 "la.tls.gd", "$dst, $src">;
1482 def PseudoLA_TLS_GD_LARGE : Pseudo<(outs GPR:$dst),
1483 (ins GPR:$tmp, bare_symbol:$src), [],
1484 "la.tls.gd", "$dst, $tmp, $src">,
1488 // Load address inst alias: "la", "la.global" and "la.local".
1490 // la = la.global = la.got
1491 // la.local = la.pcrel
1492 // With feature "+la-global-with-pcrel":
1493 // la = la.global = la.pcrel
1494 // With feature "+la-global-with-abs":
1495 // la = la.global = la.abs
1496 // With feature "+la-local-with-abs":
1497 // la.local = la.abs
1498 // With features "+la-global-with-pcrel,+la-global-with-abs"(disorder):
1499 // la = la.global = la.pcrel
1500 // Note: To keep consistent with gnu-as behavior, the "la" can only have one
1501 // register operand.
1502 def : InstAlias<"la $dst, $src", (PseudoLA_GOT GPR:$dst, bare_symbol:$src)>;
1503 def : InstAlias<"la.global $dst, $src",
1504 (PseudoLA_GOT GPR:$dst, bare_symbol:$src)>;
1505 def : InstAlias<"la.global $dst, $tmp, $src",
1506 (PseudoLA_GOT_LARGE GPR:$dst, GPR:$tmp, bare_symbol:$src)>;
1507 def : InstAlias<"la.local $dst, $src",
1508 (PseudoLA_PCREL GPR:$dst, bare_symbol:$src)>;
1509 def : InstAlias<"la.local $dst, $tmp, $src",
1510 (PseudoLA_PCREL_LARGE GPR:$dst, GPR:$tmp, bare_symbol:$src)>;
1512 // Note: Keep HasLaGlobalWithPcrel before HasLaGlobalWithAbs to ensure
1513 // "la-global-with-pcrel" takes effect when bose "la-global-with-pcrel" and
1514 // "la-global-with-abs" are enabled.
1515 let Predicates = [HasLaGlobalWithPcrel] in {
1516 def : InstAlias<"la $dst, $src", (PseudoLA_PCREL GPR:$dst, bare_symbol:$src)>;
1517 def : InstAlias<"la.global $dst, $src",
1518 (PseudoLA_PCREL GPR:$dst, bare_symbol:$src)>;
1519 def : InstAlias<"la.global $dst, $tmp, $src",
1520 (PseudoLA_PCREL_LARGE GPR:$dst, GPR:$tmp, bare_symbol:$src)>;
1521 } // Predicates = [HasLaGlobalWithPcrel]
1523 let Predicates = [HasLaGlobalWithAbs] in {
1524 def : InstAlias<"la $dst, $src", (PseudoLA_ABS GPR:$dst, bare_symbol:$src)>;
1525 def : InstAlias<"la.global $dst, $src",
1526 (PseudoLA_ABS GPR:$dst, bare_symbol:$src)>;
1527 def : InstAlias<"la.global $dst, $tmp, $src",
1528 (PseudoLA_ABS_LARGE GPR:$dst, GPR:$tmp, bare_symbol:$src)>;
1529 } // Predicates = [HasLaGlobalWithAbs]
1531 let Predicates = [HasLaLocalWithAbs] in {
1532 def : InstAlias<"la.local $dst, $src",
1533 (PseudoLA_ABS GPR:$dst, bare_symbol:$src)>;
1534 def : InstAlias<"la.local $dst, $tmp, $src",
1535 (PseudoLA_ABS_LARGE GPR:$dst, GPR:$tmp, bare_symbol:$src)>;
1536 } // Predicates = [HasLaLocalWithAbs]
1538 /// BSTRINS and BSTRPICK
1540 let Predicates = [IsLA32] in {
1541 def : Pat<(loongarch_bstrins GPR:$rd, GPR:$rj, uimm5:$msbd, uimm5:$lsbd),
1542 (BSTRINS_W GPR:$rd, GPR:$rj, uimm5:$msbd, uimm5:$lsbd)>;
1543 def : Pat<(loongarch_bstrpick GPR:$rj, uimm5:$msbd, uimm5:$lsbd),
1544 (BSTRPICK_W GPR:$rj, uimm5:$msbd, uimm5:$lsbd)>;
1545 } // Predicates = [IsLA32]
1547 let Predicates = [IsLA64] in {
1548 def : Pat<(loongarch_bstrins GPR:$rd, GPR:$rj, uimm6:$msbd, uimm6:$lsbd),
1549 (BSTRINS_D GPR:$rd, GPR:$rj, uimm6:$msbd, uimm6:$lsbd)>;
1550 def : Pat<(loongarch_bstrpick GPR:$rj, uimm6:$msbd, uimm6:$lsbd),
1551 (BSTRPICK_D GPR:$rj, uimm6:$msbd, uimm6:$lsbd)>;
1552 } // Predicates = [IsLA64]
1554 /// Byte-swapping and bit-reversal
1556 def : Pat<(loongarch_revb_2h GPR:$rj), (REVB_2H GPR:$rj)>;
1557 def : Pat<(loongarch_bitrev_4b GPR:$rj), (BITREV_4B GPR:$rj)>;
1559 let Predicates = [IsLA32] in {
1560 def : Pat<(bswap GPR:$rj), (ROTRI_W (REVB_2H GPR:$rj), 16)>;
1561 def : Pat<(bitreverse GPR:$rj), (BITREV_W GPR:$rj)>;
1562 def : Pat<(bswap (bitreverse GPR:$rj)), (BITREV_4B GPR:$rj)>;
1563 def : Pat<(bitreverse (bswap GPR:$rj)), (BITREV_4B GPR:$rj)>;
1564 } // Predicates = [IsLA32]
1566 let Predicates = [IsLA64] in {
1567 def : Pat<(loongarch_revb_2w GPR:$rj), (REVB_2W GPR:$rj)>;
1568 def : Pat<(bswap GPR:$rj), (REVB_D GPR:$rj)>;
1569 def : Pat<(loongarch_bitrev_w GPR:$rj), (BITREV_W GPR:$rj)>;
1570 def : Pat<(bitreverse GPR:$rj), (BITREV_D GPR:$rj)>;
1571 def : Pat<(bswap (bitreverse GPR:$rj)), (BITREV_8B GPR:$rj)>;
1572 def : Pat<(bitreverse (bswap GPR:$rj)), (BITREV_8B GPR:$rj)>;
1573 } // Predicates = [IsLA64]
1577 multiclass LdPat<PatFrag LoadOp, LAInst Inst, ValueType vt = GRLenVT> {
1578 def : Pat<(vt (LoadOp BaseAddr:$rj)), (Inst BaseAddr:$rj, 0)>;
1579 def : Pat<(vt (LoadOp (AddrConstant GPR:$rj, simm12:$imm12))),
1580 (Inst GPR:$rj, simm12:$imm12)>;
1581 def : Pat<(vt (LoadOp (AddLike BaseAddr:$rj, simm12:$imm12))),
1582 (Inst BaseAddr:$rj, simm12:$imm12)>;
1585 defm : LdPat<sextloadi8, LD_B>;
1586 defm : LdPat<extloadi8, LD_B>;
1587 defm : LdPat<sextloadi16, LD_H>;
1588 defm : LdPat<extloadi16, LD_H>;
1589 defm : LdPat<load, LD_W>, Requires<[IsLA32]>;
1590 defm : LdPat<zextloadi8, LD_BU>;
1591 defm : LdPat<zextloadi16, LD_HU>;
1592 let Predicates = [IsLA64] in {
1593 defm : LdPat<sextloadi32, LD_W, i64>;
1594 defm : LdPat<extloadi32, LD_W, i64>;
1595 defm : LdPat<zextloadi32, LD_WU, i64>;
1596 defm : LdPat<load, LD_D, i64>;
1597 } // Predicates = [IsLA64]
1599 // LA64 register-register-addressed loads
1600 let Predicates = [IsLA64] in {
1601 class RegRegLdPat<PatFrag LoadOp, LAInst Inst, ValueType vt>
1602 : Pat<(vt (LoadOp (add NonFIBaseAddr:$rj, GPR:$rk))),
1603 (Inst NonFIBaseAddr:$rj, GPR:$rk)>;
1605 def : RegRegLdPat<extloadi8, LDX_B, i64>;
1606 def : RegRegLdPat<sextloadi8, LDX_B, i64>;
1607 def : RegRegLdPat<zextloadi8, LDX_BU, i64>;
1608 def : RegRegLdPat<extloadi16, LDX_H, i64>;
1609 def : RegRegLdPat<sextloadi16, LDX_H, i64>;
1610 def : RegRegLdPat<zextloadi16, LDX_HU, i64>;
1611 def : RegRegLdPat<extloadi32, LDX_W, i64>;
1612 def : RegRegLdPat<sextloadi32, LDX_W, i64>;
1613 def : RegRegLdPat<zextloadi32, LDX_WU, i64>;
1614 def : RegRegLdPat<load, LDX_D, i64>;
1615 } // Predicates = [IsLA64]
1619 multiclass StPat<PatFrag StoreOp, LAInst Inst, RegisterClass StTy,
1621 def : Pat<(StoreOp (vt StTy:$rd), BaseAddr:$rj),
1622 (Inst StTy:$rd, BaseAddr:$rj, 0)>;
1623 def : Pat<(StoreOp (vt StTy:$rs2), (AddrConstant GPR:$rj, simm12:$imm12)),
1624 (Inst StTy:$rs2, GPR:$rj, simm12:$imm12)>;
1625 def : Pat<(StoreOp (vt StTy:$rd), (AddLike BaseAddr:$rj, simm12:$imm12)),
1626 (Inst StTy:$rd, BaseAddr:$rj, simm12:$imm12)>;
1629 defm : StPat<truncstorei8, ST_B, GPR, GRLenVT>;
1630 defm : StPat<truncstorei16, ST_H, GPR, GRLenVT>;
1631 defm : StPat<store, ST_W, GPR, i32>, Requires<[IsLA32]>;
1632 let Predicates = [IsLA64] in {
1633 defm : StPat<truncstorei32, ST_W, GPR, i64>;
1634 defm : StPat<store, ST_D, GPR, i64>;
1635 } // Predicates = [IsLA64]
1637 let Predicates = [IsLA64] in {
1638 def : Pat<(i64 (sextloadi32 (AddLike BaseAddr:$rj, simm14_lsl2:$imm14))),
1639 (LDPTR_W BaseAddr:$rj, simm14_lsl2:$imm14)>;
1640 def : Pat<(i64 (load (AddLike BaseAddr:$rj, simm14_lsl2:$imm14))),
1641 (LDPTR_D BaseAddr:$rj, simm14_lsl2:$imm14)>;
1642 def : Pat<(truncstorei32 (i64 GPR:$rd),
1643 (AddLike BaseAddr:$rj, simm14_lsl2:$imm14)),
1644 (STPTR_W GPR:$rd, BaseAddr:$rj, simm14_lsl2:$imm14)>;
1645 def : Pat<(store (i64 GPR:$rd), (AddLike BaseAddr:$rj, simm14_lsl2:$imm14)),
1646 (STPTR_D GPR:$rd, BaseAddr:$rj, simm14_lsl2:$imm14)>;
1647 } // Predicates = [IsLA64]
1649 // LA64 register-register-addressed stores
1650 let Predicates = [IsLA64] in {
1651 class RegRegStPat<PatFrag StoreOp, LAInst Inst, RegisterClass StTy,
1653 : Pat<(StoreOp (vt StTy:$rd), (add NonFIBaseAddr:$rj, GPR:$rk)),
1654 (Inst StTy:$rd, NonFIBaseAddr:$rj, GPR:$rk)>;
1656 def : RegRegStPat<truncstorei8, STX_B, GPR, i64>;
1657 def : RegRegStPat<truncstorei16, STX_H, GPR, i64>;
1658 def : RegRegStPat<truncstorei32, STX_W, GPR, i64>;
1659 def : RegRegStPat<store, STX_D, GPR, i64>;
1660 } // Predicates = [IsLA64]
1662 /// Atomic loads and stores
1664 // DBAR hint encoding for LA664 and later micro-architectures, paraphrased from
1665 // the Linux patch revealing it [1]:
1667 // - Bit 4: kind of constraint (0: completion, 1: ordering)
1668 // - Bit 3: barrier for previous read (0: true, 1: false)
1669 // - Bit 2: barrier for previous write (0: true, 1: false)
1670 // - Bit 1: barrier for succeeding read (0: true, 1: false)
1671 // - Bit 0: barrier for succeeding write (0: true, 1: false)
1673 // Hint 0x700: barrier for "read after read" from the same address, which is
1674 // e.g. needed by LL-SC loops on older models. (DBAR 0x700 behaves the same as
1675 // nop if such reordering is disabled on supporting newer models.)
1677 // [1]: https://lore.kernel.org/loongarch/20230516124536.535343-1-chenhuacai@loongson.cn/
1679 // Implementations without support for the finer-granularity hints simply treat
1680 // all as the full barrier (DBAR 0), so we can unconditionally start emiting the
1681 // more precise hints right away.
1683 def : Pat<(atomic_fence 4, timm), (DBAR 0b10100)>; // acquire
1684 def : Pat<(atomic_fence 5, timm), (DBAR 0b10010)>; // release
1685 def : Pat<(atomic_fence 6, timm), (DBAR 0b10000)>; // acqrel
1686 def : Pat<(atomic_fence 7, timm), (DBAR 0b10000)>; // seqcst
1688 defm : LdPat<atomic_load_8, LD_B>;
1689 defm : LdPat<atomic_load_16, LD_H>;
1690 defm : LdPat<atomic_load_32, LD_W>;
1692 class release_seqcst_store<PatFrag base>
1693 : PatFrag<(ops node:$val, node:$ptr), (base node:$val, node:$ptr), [{
1694 AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getSuccessOrdering();
1695 return isReleaseOrStronger(Ordering);
1698 class unordered_monotonic_store<PatFrag base>
1699 : PatFrag<(ops node:$val, node:$ptr), (base node:$val, node:$ptr), [{
1700 AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getSuccessOrdering();
1701 return !isReleaseOrStronger(Ordering);
1704 def atomic_store_release_seqcst_32 : release_seqcst_store<atomic_store_32>;
1705 def atomic_store_release_seqcst_64 : release_seqcst_store<atomic_store_64>;
1706 def atomic_store_unordered_monotonic_32
1707 : unordered_monotonic_store<atomic_store_32>;
1708 def atomic_store_unordered_monotonic_64
1709 : unordered_monotonic_store<atomic_store_64>;
1711 defm : StPat<atomic_store_8, ST_B, GPR, GRLenVT>;
1712 defm : StPat<atomic_store_16, ST_H, GPR, GRLenVT>;
1713 defm : StPat<atomic_store_unordered_monotonic_32, ST_W, GPR, i32>,
1716 def PseudoAtomicStoreW
1717 : Pseudo<(outs GPR:$dst), (ins GPR:$rk, GPR:$rj)>,
1718 PseudoInstExpansion<(AMSWAP__DB_W R0, GPR:$rk, GPRMemAtomic:$rj)>;
1720 def : Pat<(atomic_store_release_seqcst_32 GPR:$rj, GPR:$rk),
1721 (PseudoAtomicStoreW GPR:$rj, GPR:$rk)>;
1723 let Predicates = [IsLA64] in {
1724 def PseudoAtomicStoreD
1725 : Pseudo<(outs GPR:$dst), (ins GPR:$rk, GPR:$rj)>,
1726 PseudoInstExpansion<(AMSWAP__DB_D R0, GPR:$rk, GPRMemAtomic:$rj)>;
1728 def : Pat<(atomic_store_release_seqcst_64 GPR:$rj, GPR:$rk),
1729 (PseudoAtomicStoreD GPR:$rj, GPR:$rk)>;
1731 defm : LdPat<atomic_load_64, LD_D>;
1732 defm : StPat<atomic_store_unordered_monotonic_32, ST_W, GPR, i64>;
1733 defm : StPat<atomic_store_unordered_monotonic_64, ST_D, GPR, i64>;
1734 } // Predicates = [IsLA64]
1738 class PseudoMaskedAM
1739 : Pseudo<(outs GPR:$res, GPR:$scratch),
1740 (ins GPR:$addr, GPR:$incr, GPR:$mask, grlenimm:$ordering)> {
1741 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
1744 let hasSideEffects = 0;
1748 def PseudoMaskedAtomicSwap32 : PseudoMaskedAM;
1749 def PseudoMaskedAtomicLoadAdd32 : PseudoMaskedAM;
1750 def PseudoMaskedAtomicLoadSub32 : PseudoMaskedAM;
1751 def PseudoMaskedAtomicLoadNand32 : PseudoMaskedAM;
1753 class PseudoAM : Pseudo<(outs GPR:$res, GPR:$scratch),
1754 (ins GPR:$addr, GPR:$incr, grlenimm:$ordering)> {
1755 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
1758 let hasSideEffects = 0;
1762 def PseudoAtomicSwap32 : PseudoAM;
1763 def PseudoAtomicLoadNand32 : PseudoAM;
1764 def PseudoAtomicLoadNand64 : PseudoAM;
1765 def PseudoAtomicLoadAdd32 : PseudoAM;
1766 def PseudoAtomicLoadSub32 : PseudoAM;
1767 def PseudoAtomicLoadAnd32 : PseudoAM;
1768 def PseudoAtomicLoadOr32 : PseudoAM;
1769 def PseudoAtomicLoadXor32 : PseudoAM;
1771 multiclass PseudoBinPat<string Op, Pseudo BinInst> {
1772 def : Pat<(!cast<PatFrag>(Op#"_monotonic") GPR:$addr, GPR:$incr),
1773 (BinInst GPR:$addr, GPR:$incr, 2)>;
1774 def : Pat<(!cast<PatFrag>(Op#"_acquire") GPR:$addr, GPR:$incr),
1775 (BinInst GPR:$addr, GPR:$incr, 4)>;
1776 def : Pat<(!cast<PatFrag>(Op#"_release") GPR:$addr, GPR:$incr),
1777 (BinInst GPR:$addr, GPR:$incr, 5)>;
1778 def : Pat<(!cast<PatFrag>(Op#"_acq_rel") GPR:$addr, GPR:$incr),
1779 (BinInst GPR:$addr, GPR:$incr, 6)>;
1780 def : Pat<(!cast<PatFrag>(Op#"_seq_cst") GPR:$addr, GPR:$incr),
1781 (BinInst GPR:$addr, GPR:$incr, 7)>;
1784 class PseudoMaskedAMUMinUMax
1785 : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
1786 (ins GPR:$addr, GPR:$incr, GPR:$mask, grlenimm:$ordering)> {
1787 let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
1788 "@earlyclobber $scratch2";
1791 let hasSideEffects = 0;
1795 def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMUMinUMax;
1796 def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMUMinUMax;
1798 class PseudoMaskedAMMinMax
1799 : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
1800 (ins GPR:$addr, GPR:$incr, GPR:$mask, grlenimm:$sextshamt,
1801 grlenimm:$ordering)> {
1802 let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
1803 "@earlyclobber $scratch2";
1806 let hasSideEffects = 0;
1810 def PseudoMaskedAtomicLoadMax32 : PseudoMaskedAMMinMax;
1811 def PseudoMaskedAtomicLoadMin32 : PseudoMaskedAMMinMax;
1813 /// Compare and exchange
1816 : Pseudo<(outs GPR:$res, GPR:$scratch),
1817 (ins GPR:$addr, GPR:$cmpval, GPR:$newval, grlenimm:$fail_order)> {
1818 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
1821 let hasSideEffects = 0;
1825 def PseudoCmpXchg32 : PseudoCmpXchg;
1826 def PseudoCmpXchg64 : PseudoCmpXchg;
1828 def PseudoMaskedCmpXchg32
1829 : Pseudo<(outs GPR:$res, GPR:$scratch),
1830 (ins GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask,
1831 grlenimm:$fail_order)> {
1832 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
1835 let hasSideEffects = 0;
1839 class PseudoMaskedAMMinMaxPat<Intrinsic intrin, Pseudo AMInst>
1840 : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
1842 (AMInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
1845 class AtomicPat<Intrinsic intrin, Pseudo AMInst>
1846 : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering),
1847 (AMInst GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering)>;
1849 // These atomic cmpxchg PatFrags only care about the failure ordering.
1850 // The PatFrags defined by multiclass `ternary_atomic_op_ord` in
1851 // TargetSelectionDAG.td care about the merged memory ordering that is the
1852 // stronger one between success and failure. But for LoongArch LL-SC we only
1853 // need to care about the failure ordering as explained in PR #67391. So we
1854 // define these PatFrags that will be used to define cmpxchg pats below.
1855 multiclass ternary_atomic_op_failure_ord {
1856 def NAME#_failure_monotonic : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
1857 (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
1858 AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getFailureOrdering();
1859 return Ordering == AtomicOrdering::Monotonic;
1861 def NAME#_failure_acquire : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
1862 (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
1863 AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getFailureOrdering();
1864 return Ordering == AtomicOrdering::Acquire;
1866 def NAME#_failure_release : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
1867 (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
1868 AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getFailureOrdering();
1869 return Ordering == AtomicOrdering::Release;
1871 def NAME#_failure_acq_rel : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
1872 (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
1873 AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getFailureOrdering();
1874 return Ordering == AtomicOrdering::AcquireRelease;
1876 def NAME#_failure_seq_cst : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
1877 (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
1878 AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getFailureOrdering();
1879 return Ordering == AtomicOrdering::SequentiallyConsistent;
1883 defm atomic_cmp_swap_32 : ternary_atomic_op_failure_ord;
1884 defm atomic_cmp_swap_64 : ternary_atomic_op_failure_ord;
1886 let Predicates = [IsLA64] in {
1887 def : AtomicPat<int_loongarch_masked_atomicrmw_xchg_i64,
1888 PseudoMaskedAtomicSwap32>;
1889 def : Pat<(atomic_swap_32 GPR:$addr, GPR:$incr),
1890 (AMSWAP__DB_W GPR:$incr, GPR:$addr)>;
1891 def : Pat<(atomic_swap_64 GPR:$addr, GPR:$incr),
1892 (AMSWAP__DB_D GPR:$incr, GPR:$addr)>;
1893 def : Pat<(atomic_load_add_64 GPR:$rj, GPR:$rk),
1894 (AMADD__DB_D GPR:$rk, GPR:$rj)>;
1895 def : AtomicPat<int_loongarch_masked_atomicrmw_add_i64,
1896 PseudoMaskedAtomicLoadAdd32>;
1897 def : Pat<(atomic_load_sub_32 GPR:$rj, GPR:$rk),
1898 (AMADD__DB_W (SUB_W R0, GPR:$rk), GPR:$rj)>;
1899 def : Pat<(atomic_load_sub_64 GPR:$rj, GPR:$rk),
1900 (AMADD__DB_D (SUB_D R0, GPR:$rk), GPR:$rj)>;
1901 def : AtomicPat<int_loongarch_masked_atomicrmw_sub_i64,
1902 PseudoMaskedAtomicLoadSub32>;
1903 defm : PseudoBinPat<"atomic_load_nand_64", PseudoAtomicLoadNand64>;
1904 def : AtomicPat<int_loongarch_masked_atomicrmw_nand_i64,
1905 PseudoMaskedAtomicLoadNand32>;
1906 def : Pat<(atomic_load_add_32 GPR:$rj, GPR:$rk),
1907 (AMADD__DB_W GPR:$rk, GPR:$rj)>;
1908 def : Pat<(atomic_load_and_32 GPR:$rj, GPR:$rk),
1909 (AMAND__DB_W GPR:$rk, GPR:$rj)>;
1910 def : Pat<(atomic_load_and_64 GPR:$rj, GPR:$rk),
1911 (AMAND__DB_D GPR:$rk, GPR:$rj)>;
1912 def : Pat<(atomic_load_or_32 GPR:$rj, GPR:$rk),
1913 (AMOR__DB_W GPR:$rk, GPR:$rj)>;
1914 def : Pat<(atomic_load_or_64 GPR:$rj, GPR:$rk),
1915 (AMOR__DB_D GPR:$rk, GPR:$rj)>;
1916 def : Pat<(atomic_load_xor_32 GPR:$rj, GPR:$rk),
1917 (AMXOR__DB_W GPR:$rk, GPR:$rj)>;
1918 def : Pat<(atomic_load_xor_64 GPR:$rj, GPR:$rk),
1919 (AMXOR__DB_D GPR:$rk, GPR:$rj)>;
1921 def : Pat<(atomic_load_umin_32 GPR:$rj, GPR:$rk),
1922 (AMMIN__DB_WU GPR:$rk, GPR:$rj)>;
1923 def : Pat<(atomic_load_umin_64 GPR:$rj, GPR:$rk),
1924 (AMMIN__DB_DU GPR:$rk, GPR:$rj)>;
1925 def : Pat<(atomic_load_umax_32 GPR:$rj, GPR:$rk),
1926 (AMMAX__DB_WU GPR:$rk, GPR:$rj)>;
1927 def : Pat<(atomic_load_umax_64 GPR:$rj, GPR:$rk),
1928 (AMMAX__DB_DU GPR:$rk, GPR:$rj)>;
1930 def : Pat<(atomic_load_min_32 GPR:$rj, GPR:$rk),
1931 (AMMIN__DB_W GPR:$rk, GPR:$rj)>;
1932 def : Pat<(atomic_load_min_64 GPR:$rj, GPR:$rk),
1933 (AMMIN__DB_D GPR:$rk, GPR:$rj)>;
1934 def : Pat<(atomic_load_max_32 GPR:$rj, GPR:$rk),
1935 (AMMAX__DB_W GPR:$rk, GPR:$rj)>;
1936 def : Pat<(atomic_load_max_64 GPR:$rj, GPR:$rk),
1937 (AMMAX__DB_D GPR:$rk, GPR:$rj)>;
1939 def : AtomicPat<int_loongarch_masked_atomicrmw_umax_i64,
1940 PseudoMaskedAtomicLoadUMax32>;
1941 def : AtomicPat<int_loongarch_masked_atomicrmw_umin_i64,
1942 PseudoMaskedAtomicLoadUMin32>;
1944 // Ordering constants must be kept in sync with the AtomicOrdering enum in
1945 // AtomicOrdering.h.
1946 multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst,
1947 ValueType vt = GRLenVT> {
1948 def : Pat<(vt (!cast<PatFrag>(Op#"_failure_monotonic") GPR:$addr, GPR:$cmp, GPR:$new)),
1949 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>;
1950 def : Pat<(vt (!cast<PatFrag>(Op#"_failure_acquire") GPR:$addr, GPR:$cmp, GPR:$new)),
1951 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>;
1952 def : Pat<(vt (!cast<PatFrag>(Op#"_failure_release") GPR:$addr, GPR:$cmp, GPR:$new)),
1953 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>;
1954 def : Pat<(vt (!cast<PatFrag>(Op#"_failure_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new)),
1955 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>;
1956 def : Pat<(vt (!cast<PatFrag>(Op#"_failure_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new)),
1957 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>;
1960 defm : PseudoCmpXchgPat<"atomic_cmp_swap_32", PseudoCmpXchg32>;
1961 defm : PseudoCmpXchgPat<"atomic_cmp_swap_64", PseudoCmpXchg64, i64>;
1962 def : Pat<(int_loongarch_masked_cmpxchg_i64
1963 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$fail_order),
1964 (PseudoMaskedCmpXchg32
1965 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$fail_order)>;
1967 def : PseudoMaskedAMMinMaxPat<int_loongarch_masked_atomicrmw_max_i64,
1968 PseudoMaskedAtomicLoadMax32>;
1969 def : PseudoMaskedAMMinMaxPat<int_loongarch_masked_atomicrmw_min_i64,
1970 PseudoMaskedAtomicLoadMin32>;
1971 } // Predicates = [IsLA64]
1973 defm : PseudoBinPat<"atomic_load_nand_32", PseudoAtomicLoadNand32>;
1975 let Predicates = [IsLA32] in {
1976 def : AtomicPat<int_loongarch_masked_atomicrmw_xchg_i32,
1977 PseudoMaskedAtomicSwap32>;
1978 defm : PseudoBinPat<"atomic_swap_32", PseudoAtomicSwap32>;
1979 def : AtomicPat<int_loongarch_masked_atomicrmw_add_i32,
1980 PseudoMaskedAtomicLoadAdd32>;
1981 def : AtomicPat<int_loongarch_masked_atomicrmw_sub_i32,
1982 PseudoMaskedAtomicLoadSub32>;
1983 def : AtomicPat<int_loongarch_masked_atomicrmw_nand_i32,
1984 PseudoMaskedAtomicLoadNand32>;
1985 defm : PseudoBinPat<"atomic_load_add_32", PseudoAtomicLoadAdd32>;
1986 defm : PseudoBinPat<"atomic_load_sub_32", PseudoAtomicLoadSub32>;
1987 defm : PseudoBinPat<"atomic_load_and_32", PseudoAtomicLoadAnd32>;
1988 defm : PseudoBinPat<"atomic_load_or_32", PseudoAtomicLoadOr32>;
1989 defm : PseudoBinPat<"atomic_load_xor_32", PseudoAtomicLoadXor32>;
1990 } // Predicates = [IsLA32]
1994 def : Pat<(int_loongarch_cacop_d timm:$op, i64:$rj, timm:$imm12),
1995 (CACOP timm:$op, GPR:$rj, timm:$imm12)>;
1996 def : Pat<(int_loongarch_cacop_w i32:$op, i32:$rj, i32:$imm12),
1997 (CACOP timm:$op, GPR:$rj, timm:$imm12)>;
1998 def : Pat<(loongarch_dbar uimm15:$imm15), (DBAR uimm15:$imm15)>;
1999 def : Pat<(loongarch_ibar uimm15:$imm15), (IBAR uimm15:$imm15)>;
2000 def : Pat<(loongarch_break uimm15:$imm15), (BREAK uimm15:$imm15)>;
2001 def : Pat<(loongarch_syscall uimm15:$imm15), (SYSCALL uimm15:$imm15)>;
2003 let Predicates = [IsLA64] in {
2004 // CRC Check Instructions
2005 def : PatGprGpr<loongarch_crc_w_b_w, CRC_W_B_W>;
2006 def : PatGprGpr<loongarch_crc_w_h_w, CRC_W_H_W>;
2007 def : PatGprGpr<loongarch_crc_w_w_w, CRC_W_W_W>;
2008 def : PatGprGpr<loongarch_crc_w_d_w, CRC_W_D_W>;
2009 def : PatGprGpr<loongarch_crcc_w_b_w, CRCC_W_B_W>;
2010 def : PatGprGpr<loongarch_crcc_w_h_w, CRCC_W_H_W>;
2011 def : PatGprGpr<loongarch_crcc_w_w_w, CRCC_W_W_W>;
2012 def : PatGprGpr<loongarch_crcc_w_d_w, CRCC_W_D_W>;
2013 } // Predicates = [IsLA64]
2015 /// Other pseudo-instructions
2017 // Pessimistically assume the stack pointer will be clobbered
2018 let Defs = [R3], Uses = [R3] in {
2019 def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
2020 [(callseq_start timm:$amt1, timm:$amt2)]>;
2021 def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
2022 [(callseq_end timm:$amt1, timm:$amt2)]>;
2023 } // Defs = [R3], Uses = [R3]
2025 //===----------------------------------------------------------------------===//
2026 // Assembler Pseudo Instructions
2027 //===----------------------------------------------------------------------===//
2029 def : InstAlias<"nop", (ANDI R0, R0, 0)>;
2030 def : InstAlias<"move $dst, $src", (OR GPR:$dst, GPR:$src, R0)>;
2031 // `ret` is supported since binutils commit 20f2e2686c79a5ac (version 2.40 and
2033 def : InstAlias<"ret", (JIRL R0, R1, 0)>;
2034 def : InstAlias<"jr $rj", (JIRL R0, GPR:$rj, 0)>;
2036 // Branches implemented with alias.
2037 // Always output the canonical mnemonic for the pseudo branch instructions.
2038 // The GNU tools emit the canonical mnemonic for the branch pseudo instructions
2039 // as well (e.g. "bgt" will be recognised by the assembler but never printed by
2040 // objdump). Match this behaviour by setting a zero weight.
2041 def : InstAlias<"bgt $rj, $rd, $imm16",
2042 (BLT GPR:$rd, GPR:$rj, simm16_lsl2_br:$imm16), 0>;
2043 def : InstAlias<"bgtu $rj, $rd, $imm16",
2044 (BLTU GPR:$rd, GPR:$rj, simm16_lsl2_br:$imm16), 0>;
2045 def : InstAlias<"ble $rj, $rd, $imm16",
2046 (BGE GPR:$rd, GPR:$rj, simm16_lsl2_br:$imm16), 0>;
2047 def : InstAlias<"bleu $rj, $rd, $imm16",
2048 (BGEU GPR:$rd, GPR:$rj, simm16_lsl2_br:$imm16), 0>;
2049 def : InstAlias<"bltz $rd, $imm16",
2050 (BLT GPR:$rd, R0, simm16_lsl2_br:$imm16), 0>;
2051 def : InstAlias<"bgtz $rj, $imm16",
2052 (BLT R0, GPR:$rj, simm16_lsl2_br:$imm16), 0>;
2053 def : InstAlias<"blez $rj, $imm16",
2054 (BGE R0, GPR:$rj, simm16_lsl2_br:$imm16), 0>;
2055 def : InstAlias<"bgez $rd, $imm16",
2056 (BGE GPR:$rd, R0, simm16_lsl2_br:$imm16), 0>;
2059 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0,
2060 isAsmParserOnly = 1 in {
2061 def PseudoLI_W : Pseudo<(outs GPR:$rd), (ins imm32:$imm), [],
2062 "li.w", "$rd, $imm">;
2063 def PseudoLI_D : Pseudo<(outs GPR:$rd), (ins grlenimm:$imm), [],
2064 "li.d", "$rd, $imm">, Requires<[IsLA64]>;
2067 //===----------------------------------------------------------------------===//
2068 // Basic Floating-Point Instructions
2069 //===----------------------------------------------------------------------===//
2071 include "LoongArchFloat32InstrInfo.td"
2072 include "LoongArchFloat64InstrInfo.td"
2074 let Predicates = [HasBasicF], usesCustomInserter = 1 in {
2075 def WRFCSR : Pseudo<(outs), (ins uimm2:$fcsr, GPR:$src),
2076 [(loongarch_movgr2fcsr uimm2:$fcsr, GRLenVT:$src)]>;
2077 def RDFCSR : Pseudo<(outs GPR:$rd), (ins uimm2:$fcsr),
2078 [(set GPR:$rd, (loongarch_movfcsr2gr uimm2:$fcsr))]>;
2081 //===----------------------------------------------------------------------===//
2082 // Privilege Instructions
2083 //===----------------------------------------------------------------------===//
2085 // CSR Access Instructions
2086 let hasSideEffects = 1 in
2087 def CSRRD : FmtCSR<0x04000000, (outs GPR:$rd), (ins uimm14:$csr_num),
2089 let hasSideEffects = 1, Constraints = "$rd = $dst" in {
2090 def CSRWR : FmtCSR<0x04000020, (outs GPR:$dst),
2091 (ins GPR:$rd, uimm14:$csr_num), "$rd, $csr_num">;
2092 def CSRXCHG : FmtCSRXCHG<0x04000000, (outs GPR:$dst),
2093 (ins GPR:$rd, GPR:$rj, uimm14:$csr_num),
2094 "$rd, $rj, $csr_num">;
2095 } // hasSideEffects = 1, Constraints = "$rd = $dst"
2097 // IOCSR Access Instructions
2098 def IOCSRRD_B : IOCSRRD<0x06480000>;
2099 def IOCSRRD_H : IOCSRRD<0x06480400>;
2100 def IOCSRRD_W : IOCSRRD<0x06480800>;
2101 def IOCSRWR_B : IOCSRWR<0x06481000>;
2102 def IOCSRWR_H : IOCSRWR<0x06481400>;
2103 def IOCSRWR_W : IOCSRWR<0x06481800>;
2104 let Predicates = [IsLA64] in {
2105 def IOCSRRD_D : IOCSRRD<0x06480c00>;
2106 def IOCSRWR_D : IOCSRWR<0x06481c00>;
2107 } // Predicates = [IsLA64]
2109 // TLB Maintenance Instructions
2110 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
2111 def TLBSRCH : FmtI32<0x06482800>;
2112 def TLBRD : FmtI32<0x06482c00>;
2113 def TLBWR : FmtI32<0x06483000>;
2114 def TLBFILL : FmtI32<0x06483400>;
2115 def TLBCLR : FmtI32<0x06482000>;
2116 def TLBFLUSH : FmtI32<0x06482400>;
2117 def INVTLB : FmtINVTLB<(outs), (ins GPR:$rk, GPR:$rj, uimm5:$op),
2119 } // hasSideEffects = 1, mayLoad = 0, mayStore = 0
2121 // Software Page Walking Instructions
2122 def LDDIR : Fmt2RI8<0x06400000, (outs GPR:$rd),
2123 (ins GPR:$rj, uimm8:$imm8), "$rd, $rj, $imm8">;
2124 def LDPTE : FmtLDPTE<(outs), (ins GPR:$rj, uimm8:$seq), "$rj, $seq">;
2127 // Other Miscellaneous Instructions
2128 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
2129 def ERTN : FmtI32<0x06483800>;
2130 def DBCL : MISC_I15<0x002a8000>;
2131 def IDLE : MISC_I15<0x06488000>;
2133 //===----------------------------------------------------------------------===//
2134 // Privilege Intrinsics
2135 //===----------------------------------------------------------------------===//
2137 def : Pat<(loongarch_csrrd uimm14:$imm14), (CSRRD uimm14:$imm14)>;
2138 def : Pat<(loongarch_csrwr GPR:$rd, uimm14:$imm14),
2139 (CSRWR GPR:$rd, uimm14:$imm14)>;
2140 def : Pat<(loongarch_csrxchg GPR:$rd, GPR:$rj, uimm14:$imm14),
2141 (CSRXCHG GPR:$rd, GPR:$rj, uimm14:$imm14)>;
2143 def : Pat<(loongarch_iocsrrd_b GPR:$rj), (IOCSRRD_B GPR:$rj)>;
2144 def : Pat<(loongarch_iocsrrd_h GPR:$rj), (IOCSRRD_H GPR:$rj)>;
2145 def : Pat<(loongarch_iocsrrd_w GPR:$rj), (IOCSRRD_W GPR:$rj)>;
2147 def : Pat<(loongarch_iocsrwr_b GPR:$rd, GPR:$rj), (IOCSRWR_B GPR:$rd, GPR:$rj)>;
2148 def : Pat<(loongarch_iocsrwr_h GPR:$rd, GPR:$rj), (IOCSRWR_H GPR:$rd, GPR:$rj)>;
2149 def : Pat<(loongarch_iocsrwr_w GPR:$rd, GPR:$rj), (IOCSRWR_W GPR:$rd, GPR:$rj)>;
2151 def : Pat<(loongarch_cpucfg GPR:$rj), (CPUCFG GPR:$rj)>;
2153 let Predicates = [IsLA64] in {
2154 def : Pat<(loongarch_iocsrrd_d GPR:$rj), (IOCSRRD_D GPR:$rj)>;
2155 def : Pat<(loongarch_iocsrwr_d GPR:$rd, GPR:$rj), (IOCSRWR_D GPR:$rd, GPR:$rj)>;
2156 def : Pat<(int_loongarch_asrtle_d GPR:$rj, GPR:$rk),
2157 (ASRTLE_D GPR:$rj, GPR:$rk)>;
2158 def : Pat<(int_loongarch_asrtgt_d GPR:$rj, GPR:$rk),
2159 (ASRTGT_D GPR:$rj, GPR:$rk)>;
2160 def : Pat<(int_loongarch_lddir_d GPR:$rj, timm:$imm8),
2161 (LDDIR GPR:$rj, timm:$imm8)>;
2162 def : Pat<(int_loongarch_ldpte_d GPR:$rj, timm:$imm8),
2163 (LDPTE GPR:$rj, timm:$imm8)>;
2164 } // Predicates = [IsLA64]
2166 //===----------------------------------------------------------------------===//
2168 //===----------------------------------------------------------------------===//
2169 include "LoongArchLSXInstrInfo.td"
2171 //===----------------------------------------------------------------------===//
2172 // LASX Instructions
2173 //===----------------------------------------------------------------------===//
2174 include "LoongArchLASXInstrInfo.td"
2176 //===----------------------------------------------------------------------===//
2178 //===----------------------------------------------------------------------===//
2179 include "LoongArchLVZInstrInfo.td"
2181 //===----------------------------------------------------------------------===//
2183 //===----------------------------------------------------------------------===//
2184 include "LoongArchLBTInstrInfo.td"