1 //== LoongArchInstrInfo.td - Target Description for LoongArch -*- tablegen -*-//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the LoongArch instructions in TableGen format.
11 //===----------------------------------------------------------------------===//
13 //===----------------------------------------------------------------------===//
14 // LoongArch specific DAG Nodes.
15 //===----------------------------------------------------------------------===//
17 // Target-independent type requirements, but with target-specific formats.
18 def SDT_CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
20 def SDT_CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
23 // Target-dependent type requirements.
24 def SDT_LoongArchCall : SDTypeProfile<0, -1, [SDTCisVT<0, GRLenVT>]>;
25 def SDT_LoongArchIntBinOpW : SDTypeProfile<1, 2, [
26 SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<0, i64>
29 def SDT_LoongArchBStrIns: SDTypeProfile<1, 4, [
30 SDTCisInt<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisInt<3>,
34 def SDT_LoongArchBStrPick: SDTypeProfile<1, 3, [
35 SDTCisInt<0>, SDTCisSameAs<0, 1>, SDTCisInt<2>, SDTCisSameAs<2, 3>
38 // "VI" means no output and an integer input.
39 def SDT_LoongArchVI : SDTypeProfile<0, 1, [SDTCisVT<0, GRLenVT>]>;
41 def SDT_LoongArchCsrrd : SDTypeProfile<1, 1, [SDTCisInt<0>,
42 SDTCisVT<1, GRLenVT>]>;
43 def SDT_LoongArchCsrwr : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
44 SDTCisVT<2, GRLenVT>]>;
45 def SDT_LoongArchCsrxchg : SDTypeProfile<1, 3, [SDTCisInt<0>,
48 SDTCisVT<3, GRLenVT>]>;
49 def SDT_LoongArchIocsrwr : SDTypeProfile<0, 2, [SDTCisInt<0>,
51 def SDT_LoongArchMovgr2fcsr : SDTypeProfile<0, 2, [SDTCisVT<0, GRLenVT>,
53 def SDT_LoongArchMovfcsr2gr : SDTypeProfile<1, 1, [SDTCisVT<0, GRLenVT>,
56 // TODO: Add LoongArch specific DAG Nodes
57 // Target-independent nodes, but with target-specific formats.
58 def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_CallSeqStart,
59 [SDNPHasChain, SDNPOutGlue]>;
60 def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_CallSeqEnd,
61 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
63 // Target-dependent nodes.
64 def loongarch_call : SDNode<"LoongArchISD::CALL", SDT_LoongArchCall,
65 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
67 def loongarch_ret : SDNode<"LoongArchISD::RET", SDTNone,
68 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
69 def loongarch_tail : SDNode<"LoongArchISD::TAIL", SDT_LoongArchCall,
70 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
72 def loongarch_call_medium : SDNode<"LoongArchISD::CALL_MEDIUM", SDT_LoongArchCall,
73 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
75 def loongarch_tail_medium : SDNode<"LoongArchISD::TAIL_MEDIUM", SDT_LoongArchCall,
76 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
78 def loongarch_call_large : SDNode<"LoongArchISD::CALL_LARGE", SDT_LoongArchCall,
79 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
81 def loongarch_tail_large : SDNode<"LoongArchISD::TAIL_LARGE", SDT_LoongArchCall,
82 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
84 def loongarch_sll_w : SDNode<"LoongArchISD::SLL_W", SDT_LoongArchIntBinOpW>;
85 def loongarch_sra_w : SDNode<"LoongArchISD::SRA_W", SDT_LoongArchIntBinOpW>;
86 def loongarch_srl_w : SDNode<"LoongArchISD::SRL_W", SDT_LoongArchIntBinOpW>;
87 def loongarch_rotr_w : SDNode<"LoongArchISD::ROTR_W", SDT_LoongArchIntBinOpW>;
88 def loongarch_div_wu : SDNode<"LoongArchISD::DIV_WU", SDT_LoongArchIntBinOpW>;
89 def loongarch_mod_wu : SDNode<"LoongArchISD::MOD_WU", SDT_LoongArchIntBinOpW>;
90 def loongarch_crc_w_b_w
91 : SDNode<"LoongArchISD::CRC_W_B_W", SDT_LoongArchIntBinOpW, [SDNPHasChain]>;
92 def loongarch_crc_w_h_w
93 : SDNode<"LoongArchISD::CRC_W_H_W", SDT_LoongArchIntBinOpW, [SDNPHasChain]>;
94 def loongarch_crc_w_w_w
95 : SDNode<"LoongArchISD::CRC_W_W_W", SDT_LoongArchIntBinOpW, [SDNPHasChain]>;
96 def loongarch_crc_w_d_w
97 : SDNode<"LoongArchISD::CRC_W_D_W", SDT_LoongArchIntBinOpW, [SDNPHasChain]>;
98 def loongarch_crcc_w_b_w : SDNode<"LoongArchISD::CRCC_W_B_W",
99 SDT_LoongArchIntBinOpW, [SDNPHasChain]>;
100 def loongarch_crcc_w_h_w : SDNode<"LoongArchISD::CRCC_W_H_W",
101 SDT_LoongArchIntBinOpW, [SDNPHasChain]>;
102 def loongarch_crcc_w_w_w : SDNode<"LoongArchISD::CRCC_W_W_W",
103 SDT_LoongArchIntBinOpW, [SDNPHasChain]>;
104 def loongarch_crcc_w_d_w : SDNode<"LoongArchISD::CRCC_W_D_W",
105 SDT_LoongArchIntBinOpW, [SDNPHasChain]>;
106 def loongarch_bstrins
107 : SDNode<"LoongArchISD::BSTRINS", SDT_LoongArchBStrIns>;
108 def loongarch_bstrpick
109 : SDNode<"LoongArchISD::BSTRPICK", SDT_LoongArchBStrPick>;
110 def loongarch_revb_2h : SDNode<"LoongArchISD::REVB_2H", SDTUnaryOp>;
111 def loongarch_revb_2w : SDNode<"LoongArchISD::REVB_2W", SDTUnaryOp>;
112 def loongarch_bitrev_4b : SDNode<"LoongArchISD::BITREV_4B", SDTUnaryOp>;
113 def loongarch_bitrev_w : SDNode<"LoongArchISD::BITREV_W", SDTUnaryOp>;
114 def loongarch_clzw : SDNode<"LoongArchISD::CLZ_W", SDTIntBitCountUnaryOp>;
115 def loongarch_ctzw : SDNode<"LoongArchISD::CTZ_W", SDTIntBitCountUnaryOp>;
116 def loongarch_dbar : SDNode<"LoongArchISD::DBAR", SDT_LoongArchVI,
117 [SDNPHasChain, SDNPSideEffect]>;
118 def loongarch_ibar : SDNode<"LoongArchISD::IBAR", SDT_LoongArchVI,
119 [SDNPHasChain, SDNPSideEffect]>;
120 def loongarch_break : SDNode<"LoongArchISD::BREAK", SDT_LoongArchVI,
121 [SDNPHasChain, SDNPSideEffect]>;
122 def loongarch_movfcsr2gr : SDNode<"LoongArchISD::MOVFCSR2GR",
123 SDT_LoongArchMovfcsr2gr, [SDNPHasChain]>;
124 def loongarch_movgr2fcsr : SDNode<"LoongArchISD::MOVGR2FCSR",
125 SDT_LoongArchMovgr2fcsr,
126 [SDNPHasChain, SDNPSideEffect]>;
127 def loongarch_syscall : SDNode<"LoongArchISD::SYSCALL", SDT_LoongArchVI,
128 [SDNPHasChain, SDNPSideEffect]>;
129 def loongarch_csrrd : SDNode<"LoongArchISD::CSRRD", SDT_LoongArchCsrrd,
130 [SDNPHasChain, SDNPSideEffect]>;
131 def loongarch_csrwr : SDNode<"LoongArchISD::CSRWR", SDT_LoongArchCsrwr,
132 [SDNPHasChain, SDNPSideEffect]>;
133 def loongarch_csrxchg : SDNode<"LoongArchISD::CSRXCHG",
134 SDT_LoongArchCsrxchg,
135 [SDNPHasChain, SDNPSideEffect]>;
136 def loongarch_iocsrrd_b : SDNode<"LoongArchISD::IOCSRRD_B", SDTUnaryOp,
137 [SDNPHasChain, SDNPSideEffect]>;
138 def loongarch_iocsrrd_h : SDNode<"LoongArchISD::IOCSRRD_H", SDTUnaryOp,
139 [SDNPHasChain, SDNPSideEffect]>;
140 def loongarch_iocsrrd_w : SDNode<"LoongArchISD::IOCSRRD_W", SDTUnaryOp,
141 [SDNPHasChain, SDNPSideEffect]>;
142 def loongarch_iocsrrd_d : SDNode<"LoongArchISD::IOCSRRD_D", SDTUnaryOp,
143 [SDNPHasChain, SDNPSideEffect]>;
144 def loongarch_iocsrwr_b : SDNode<"LoongArchISD::IOCSRWR_B",
145 SDT_LoongArchIocsrwr,
146 [SDNPHasChain, SDNPSideEffect]>;
147 def loongarch_iocsrwr_h : SDNode<"LoongArchISD::IOCSRWR_H",
148 SDT_LoongArchIocsrwr,
149 [SDNPHasChain, SDNPSideEffect]>;
150 def loongarch_iocsrwr_w : SDNode<"LoongArchISD::IOCSRWR_W",
151 SDT_LoongArchIocsrwr,
152 [SDNPHasChain, SDNPSideEffect]>;
153 def loongarch_iocsrwr_d : SDNode<"LoongArchISD::IOCSRWR_D",
154 SDT_LoongArchIocsrwr,
155 [SDNPHasChain, SDNPSideEffect]>;
156 def loongarch_cpucfg : SDNode<"LoongArchISD::CPUCFG", SDTUnaryOp,
159 def to_fclass_mask: SDNodeXForm<timm, [{
160 uint64_t Check = N->getZExtValue();
163 Mask |= LoongArch::FClassMaskSignalingNaN;
165 Mask |= LoongArch::FClassMaskQuietNaN;
166 if (Check & fcPosInf)
167 Mask |= LoongArch::FClassMaskPositiveInfinity;
168 if (Check & fcNegInf)
169 Mask |= LoongArch::FClassMaskNegativeInfinity;
170 if (Check & fcPosNormal)
171 Mask |= LoongArch::FClassMaskPositiveNormal;
172 if (Check & fcNegNormal)
173 Mask |= LoongArch::FClassMaskNegativeNormal;
174 if (Check & fcPosSubnormal)
175 Mask |= LoongArch::FClassMaskPositiveSubnormal;
176 if (Check & fcNegSubnormal)
177 Mask |= LoongArch::FClassMaskNegativeSubnormal;
178 if (Check & fcPosZero)
179 Mask |= LoongArch::FClassMaskPositiveZero;
180 if (Check & fcNegZero)
181 Mask |= LoongArch::FClassMaskNegativeZero;
182 return CurDAG->getTargetConstant(Mask, SDLoc(N), Subtarget->getGRLenVT());
185 //===----------------------------------------------------------------------===//
186 // Operand and SDNode transformation definitions.
187 //===----------------------------------------------------------------------===//
189 class ImmAsmOperand<string prefix, int width, string suffix>
191 let Name = prefix # "Imm" # width # suffix;
192 let DiagnosticType = !strconcat("Invalid", Name);
193 let RenderMethod = "addImmOperands";
196 class SImmAsmOperand<int width, string suffix = "">
197 : ImmAsmOperand<"S", width, suffix> {
200 class UImmAsmOperand<int width, string suffix = "">
201 : ImmAsmOperand<"U", width, suffix> {
204 // A parse method for "$r*" or "$r*, 0", where the 0 is be silently ignored.
205 // Only used for "AM*" instructions, in order to be compatible with GAS.
206 def AtomicMemAsmOperand : AsmOperandClass {
207 let Name = "AtomicMemAsmOperand";
208 let RenderMethod = "addRegOperands";
209 let PredicateMethod = "isGPR";
210 let ParserMethod = "parseAtomicMemOp";
213 def GPRMemAtomic : RegisterOperand<GPR> {
214 let ParserMatchClass = AtomicMemAsmOperand;
215 let PrintMethod = "printAtomicMemOp";
218 // A parameterized register class alternative to i32imm/i64imm from Target.td.
219 def grlenimm : Operand<GRLenVT>;
220 def imm32 : Operand<GRLenVT> {
221 let ParserMatchClass = ImmAsmOperand<"", 32, "">;
223 def imm64 : Operand<i64> {
224 let ParserMatchClass = ImmAsmOperand<"", 64, "">;
227 def uimm1 : Operand<GRLenVT>, ImmLeaf<GRLenVT, [{return isUInt<1>(Imm);}]>{
228 let ParserMatchClass = UImmAsmOperand<1>;
231 def uimm2 : Operand<GRLenVT>, ImmLeaf<GRLenVT, [{return isUInt<2>(Imm);}]> {
232 let ParserMatchClass = UImmAsmOperand<2>;
235 def uimm2_plus1 : Operand<GRLenVT>,
236 ImmLeaf<GRLenVT, [{return isUInt<2>(Imm - 1);}]> {
237 let ParserMatchClass = UImmAsmOperand<2, "plus1">;
238 let EncoderMethod = "getImmOpValueSub1";
239 let DecoderMethod = "decodeUImmOperand<2, 1>";
242 def uimm3 : Operand<GRLenVT>, ImmLeaf<GRLenVT, [{return isUInt<3>(Imm);}]> {
243 let ParserMatchClass = UImmAsmOperand<3>;
246 def uimm4 : Operand<GRLenVT>, ImmLeaf<GRLenVT, [{return isUInt<4>(Imm);}]> {
247 let ParserMatchClass = UImmAsmOperand<4>;
250 def uimm5 : Operand<GRLenVT>, ImmLeaf<GRLenVT, [{return isUInt<5>(Imm);}]> {
251 let ParserMatchClass = UImmAsmOperand<5>;
254 def uimm6 : Operand<GRLenVT>, ImmLeaf<GRLenVT, [{return isUInt<6>(Imm);}]> {
255 let ParserMatchClass = UImmAsmOperand<6>;
258 def uimm7 : Operand<GRLenVT> {
259 let ParserMatchClass = UImmAsmOperand<7>;
262 def uimm8 : Operand<GRLenVT>, ImmLeaf<GRLenVT, [{return isUInt<8>(Imm);}]> {
263 let ParserMatchClass = UImmAsmOperand<8>;
266 class UImm12Operand : Operand<GRLenVT>,
267 ImmLeaf <GRLenVT, [{return isUInt<12>(Imm);}]> {
268 let DecoderMethod = "decodeUImmOperand<12>";
271 def uimm12 : UImm12Operand {
272 let ParserMatchClass = UImmAsmOperand<12>;
275 def uimm12_ori : UImm12Operand {
276 let ParserMatchClass = UImmAsmOperand<12, "ori">;
279 def uimm14 : Operand<GRLenVT>,
280 ImmLeaf <GRLenVT, [{return isUInt<14>(Imm);}]> {
281 let ParserMatchClass = UImmAsmOperand<14>;
284 def uimm15 : Operand<GRLenVT>,
285 ImmLeaf <GRLenVT, [{return isUInt<15>(Imm);}]> {
286 let ParserMatchClass = UImmAsmOperand<15>;
289 def simm5 : Operand<GRLenVT> {
290 let ParserMatchClass = SImmAsmOperand<5>;
291 let DecoderMethod = "decodeSImmOperand<5>";
294 def simm8 : Operand<GRLenVT> {
295 let ParserMatchClass = SImmAsmOperand<8>;
296 let DecoderMethod = "decodeSImmOperand<8>";
299 foreach I = [1, 2, 3] in {
300 def simm8_lsl # I : Operand<GRLenVT> {
301 let ParserMatchClass = SImmAsmOperand<8, "lsl" # I>;
302 let EncoderMethod = "getImmOpValueAsr<" # I # ">";
303 let DecoderMethod = "decodeSImmOperand<8," # I # ">";
307 def simm9_lsl3 : Operand<GRLenVT> {
308 let ParserMatchClass = SImmAsmOperand<9, "lsl3">;
309 let EncoderMethod = "getImmOpValueAsr<3>";
310 let DecoderMethod = "decodeSImmOperand<9, 3>";
313 def simm10 : Operand<GRLenVT> {
314 let ParserMatchClass = SImmAsmOperand<10>;
317 def simm10_lsl2 : Operand<GRLenVT> {
318 let ParserMatchClass = SImmAsmOperand<10, "lsl2">;
319 let EncoderMethod = "getImmOpValueAsr<2>";
320 let DecoderMethod = "decodeSImmOperand<10, 2>";
323 def simm11_lsl1 : Operand<GRLenVT> {
324 let ParserMatchClass = SImmAsmOperand<11, "lsl1">;
325 let EncoderMethod = "getImmOpValueAsr<1>";
326 let DecoderMethod = "decodeSImmOperand<11, 1>";
329 class SImm12Operand : Operand<GRLenVT>,
330 ImmLeaf <GRLenVT, [{return isInt<12>(Imm);}]> {
331 let DecoderMethod = "decodeSImmOperand<12>";
334 def simm12 : SImm12Operand {
335 let ParserMatchClass = SImmAsmOperand<12>;
338 def simm12_addlike : SImm12Operand {
339 let ParserMatchClass = SImmAsmOperand<12, "addlike">;
342 def simm12_lu52id : SImm12Operand {
343 let ParserMatchClass = SImmAsmOperand<12, "lu52id">;
346 def simm13 : Operand<GRLenVT> {
347 let ParserMatchClass = SImmAsmOperand<13>;
348 let DecoderMethod = "decodeSImmOperand<13>";
351 def simm14_lsl2 : Operand<GRLenVT>,
352 ImmLeaf<GRLenVT, [{return isShiftedInt<14,2>(Imm);}]> {
353 let ParserMatchClass = SImmAsmOperand<14, "lsl2">;
354 let EncoderMethod = "getImmOpValueAsr<2>";
355 let DecoderMethod = "decodeSImmOperand<14, 2>";
358 def simm16 : Operand<GRLenVT> {
359 let ParserMatchClass = SImmAsmOperand<16>;
360 let DecoderMethod = "decodeSImmOperand<16>";
363 def simm16_lsl2 : Operand<GRLenVT>,
364 ImmLeaf<GRLenVT, [{return isInt<16>(Imm>>2);}]> {
365 let ParserMatchClass = SImmAsmOperand<16, "lsl2">;
366 let EncoderMethod = "getImmOpValueAsr<2>";
367 let DecoderMethod = "decodeSImmOperand<16, 2>";
370 def simm16_lsl2_br : Operand<OtherVT> {
371 let ParserMatchClass = SImmAsmOperand<16, "lsl2">;
372 let EncoderMethod = "getImmOpValueAsr<2>";
373 let DecoderMethod = "decodeSImmOperand<16, 2>";
376 class SImm20Operand : Operand<GRLenVT> {
377 let DecoderMethod = "decodeSImmOperand<20>";
380 def simm20 : SImm20Operand {
381 let ParserMatchClass = SImmAsmOperand<20>;
384 def simm20_pcalau12i : SImm20Operand {
385 let ParserMatchClass = SImmAsmOperand<20, "pcalau12i">;
388 def simm20_lu12iw : SImm20Operand {
389 let ParserMatchClass = SImmAsmOperand<20, "lu12iw">;
392 def simm20_lu32id : SImm20Operand {
393 let ParserMatchClass = SImmAsmOperand<20, "lu32id">;
396 def simm20_pcaddu18i : SImm20Operand {
397 let ParserMatchClass = SImmAsmOperand<20, "pcaddu18i">;
400 def simm20_pcaddi : SImm20Operand {
401 let ParserMatchClass = SImmAsmOperand<20, "pcaddi">;
404 def simm21_lsl2 : Operand<OtherVT> {
405 let ParserMatchClass = SImmAsmOperand<21, "lsl2">;
406 let EncoderMethod = "getImmOpValueAsr<2>";
407 let DecoderMethod = "decodeSImmOperand<21, 2>";
410 def SImm26OperandB: AsmOperandClass {
411 let Name = "SImm26OperandB";
412 let PredicateMethod = "isSImm26Operand";
413 let RenderMethod = "addImmOperands";
414 let DiagnosticType = "InvalidSImm26Operand";
415 let ParserMethod = "parseImmediate";
418 // A symbol or an imm used in B/PseudoBR.
419 def simm26_b : Operand<OtherVT> {
420 let ParserMatchClass = SImm26OperandB;
421 let EncoderMethod = "getImmOpValueAsr<2>";
422 let DecoderMethod = "decodeSImmOperand<26, 2>";
425 def SImm26OperandBL: AsmOperandClass {
426 let Name = "SImm26OperandBL";
427 let PredicateMethod = "isSImm26Operand";
428 let RenderMethod = "addImmOperands";
429 let DiagnosticType = "InvalidSImm26Operand";
430 let ParserMethod = "parseSImm26Operand";
433 // A symbol or an imm used in BL/PseudoCALL/PseudoTAIL.
434 def simm26_symbol : Operand<GRLenVT> {
435 let ParserMatchClass = SImm26OperandBL;
436 let EncoderMethod = "getImmOpValueAsr<2>";
437 let DecoderMethod = "decodeSImmOperand<26, 2>";
440 // A 32-bit signed immediate with the lowest 16 bits zeroed, suitable for
441 // direct use with `addu16i.d`.
442 def simm16_lsl16 : Operand<GRLenVT>,
443 ImmLeaf<GRLenVT, [{return isShiftedInt<16, 16>(Imm);}]>;
445 // A 32-bit signed immediate expressible with a pair of `addu16i.d + addi` for
447 def simm32_hi16_lo12: Operand<GRLenVT>, ImmLeaf<GRLenVT, [{
448 return !isInt<12>(Imm) && isShiftedInt<16, 16>(Imm - SignExtend64<12>(Imm));
451 def BareSymbol : AsmOperandClass {
452 let Name = "BareSymbol";
453 let RenderMethod = "addImmOperands";
454 let DiagnosticType = "InvalidBareSymbol";
455 let ParserMethod = "parseImmediate";
458 // A bare symbol used in "PseudoLA_*" instructions.
459 def bare_symbol : Operand<GRLenVT> {
460 let ParserMatchClass = BareSymbol;
463 def TPRelAddSymbol : AsmOperandClass {
464 let Name = "TPRelAddSymbol";
465 let RenderMethod = "addImmOperands";
466 let DiagnosticType = "InvalidTPRelAddSymbol";
467 let ParserMethod = "parseOperandWithModifier";
470 // A bare symbol with the %le_add_r variant.
471 def tprel_add_symbol : Operand<GRLenVT> {
472 let ParserMatchClass = TPRelAddSymbol;
476 // Standalone (codegen-only) immleaf patterns.
478 // A 12-bit signed immediate plus one where the imm range will be [-2047, 2048].
479 def simm12_plus1 : ImmLeaf<GRLenVT,
480 [{return (isInt<12>(Imm) && Imm != -2048) || Imm == 2048;}]>;
482 // Return the negation of an immediate value.
483 def NegImm : SDNodeXForm<imm, [{
484 return CurDAG->getTargetConstant(-N->getSExtValue(), SDLoc(N),
488 // FP immediate patterns.
489 def fpimm0 : PatLeaf<(fpimm), [{return N->isExactlyValue(+0.0);}]>;
490 def fpimm0neg : PatLeaf<(fpimm), [{return N->isExactlyValue(-0.0);}]>;
491 def fpimm1 : PatLeaf<(fpimm), [{return N->isExactlyValue(+1.0);}]>;
493 // Return an immediate subtracted from 32.
494 def ImmSubFrom32 : SDNodeXForm<imm, [{
495 return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N),
499 // Return the lowest 12 bits of the signed immediate.
500 def LO12: SDNodeXForm<imm, [{
501 return CurDAG->getTargetConstant(SignExtend64<12>(N->getSExtValue()),
502 SDLoc(N), N->getValueType(0));
505 // Return the higher 16 bits of the signed immediate.
506 def HI16 : SDNodeXForm<imm, [{
507 return CurDAG->getTargetConstant(N->getSExtValue() >> 16, SDLoc(N),
511 // Return the higher 16 bits of the signed immediate, adjusted for use within an
512 // `addu16i.d + addi` pair.
513 def HI16ForAddu16idAddiPair: SDNodeXForm<imm, [{
514 auto Imm = N->getSExtValue();
515 return CurDAG->getTargetConstant((Imm - SignExtend64<12>(Imm)) >> 16,
516 SDLoc(N), N->getValueType(0));
519 def BaseAddr : ComplexPattern<iPTR, 1, "SelectBaseAddr">;
520 def AddrConstant : ComplexPattern<iPTR, 2, "SelectAddrConstant">;
521 def NonFIBaseAddr : ComplexPattern<iPTR, 1, "selectNonFIBaseAddr">;
523 def fma_nsz : PatFrag<(ops node:$fj, node:$fk, node:$fa),
524 (fma node:$fj, node:$fk, node:$fa), [{
525 return N->getFlags().hasNoSignedZeros();
528 // Check if (add r, imm) can be optimized to (ADDI (ADDI r, imm0), imm1),
529 // in which imm = imm0 + imm1, and both imm0 & imm1 are simm12.
530 def AddiPair : PatLeaf<(imm), [{
533 // The immediate operand must be in range [-4096,-2049] or [2048,4094].
534 int64_t Imm = N->getSExtValue();
535 return (-4096 <= Imm && Imm <= -2049) || (2048 <= Imm && Imm <= 4094);
538 // Return -2048 if immediate is negative or 2047 if positive.
539 def AddiPairImmLarge : SDNodeXForm<imm, [{
540 int64_t Imm = N->getSExtValue() < 0 ? -2048 : 2047;
541 return CurDAG->getTargetConstant(Imm, SDLoc(N),
545 // Return imm - (imm < 0 ? -2048 : 2047).
546 def AddiPairImmSmall : SDNodeXForm<imm, [{
547 int64_t Imm = N->getSExtValue();
548 int64_t Adj = Imm < 0 ? -2048 : 2047;
549 return CurDAG->getTargetConstant(Imm - Adj, SDLoc(N),
553 // Check if (mul r, imm) can be optimized to (SLLI (ALSL r, r, i0), i1),
554 // in which imm = (1 + (1 << i0)) << i1.
555 def AlslSlliImm : PatLeaf<(imm), [{
558 uint64_t Imm = N->getZExtValue();
559 unsigned I1 = llvm::countr_zero(Imm);
560 uint64_t Rem = Imm >> I1;
561 return Rem == 3 || Rem == 5 || Rem == 9 || Rem == 17;
564 def AlslSlliImmI1 : SDNodeXForm<imm, [{
565 uint64_t Imm = N->getZExtValue();
566 unsigned I1 = llvm::countr_zero(Imm);
567 return CurDAG->getTargetConstant(I1, SDLoc(N),
571 def AlslSlliImmI0 : SDNodeXForm<imm, [{
572 uint64_t Imm = N->getZExtValue();
573 unsigned I1 = llvm::countr_zero(Imm);
576 case 3: I0 = 1; break;
577 case 5: I0 = 2; break;
578 case 9: I0 = 3; break;
579 default: I0 = 4; break;
581 return CurDAG->getTargetConstant(I0, SDLoc(N),
585 // Check if (and r, imm) can be optimized to (BSTRINS r, R0, msb, lsb),
586 // in which imm = ~((2^^(msb-lsb+1) - 1) << lsb).
587 def BstrinsImm : PatLeaf<(imm), [{
590 uint64_t Imm = N->getZExtValue();
591 // andi can be used instead if Imm <= 0xfff.
594 unsigned MaskIdx, MaskLen;
595 return N->getValueType(0).getSizeInBits() == 32
596 ? llvm::isShiftedMask_32(~Imm, MaskIdx, MaskLen)
597 : llvm::isShiftedMask_64(~Imm, MaskIdx, MaskLen);
600 def BstrinsMsb: SDNodeXForm<imm, [{
601 uint64_t Imm = N->getZExtValue();
602 unsigned MaskIdx, MaskLen;
603 N->getValueType(0).getSizeInBits() == 32
604 ? llvm::isShiftedMask_32(~Imm, MaskIdx, MaskLen)
605 : llvm::isShiftedMask_64(~Imm, MaskIdx, MaskLen);
606 return CurDAG->getTargetConstant(MaskIdx + MaskLen - 1, SDLoc(N),
610 def BstrinsLsb: SDNodeXForm<imm, [{
611 uint64_t Imm = N->getZExtValue();
612 unsigned MaskIdx, MaskLen;
613 N->getValueType(0).getSizeInBits() == 32
614 ? llvm::isShiftedMask_32(~Imm, MaskIdx, MaskLen)
615 : llvm::isShiftedMask_64(~Imm, MaskIdx, MaskLen);
616 return CurDAG->getTargetConstant(MaskIdx, SDLoc(N), N->getValueType(0));
619 //===----------------------------------------------------------------------===//
620 // Instruction Formats
621 //===----------------------------------------------------------------------===//
623 include "LoongArchInstrFormats.td"
624 include "LoongArchFloatInstrFormats.td"
625 include "LoongArchLSXInstrFormats.td"
626 include "LoongArchLASXInstrFormats.td"
627 include "LoongArchLBTInstrFormats.td"
629 //===----------------------------------------------------------------------===//
630 // Instruction Class Templates
631 //===----------------------------------------------------------------------===//
633 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
634 class ALU_3R<bits<32> op>
635 : Fmt3R<op, (outs GPR:$rd), (ins GPR:$rj, GPR:$rk), "$rd, $rj, $rk">;
636 class ALU_2R<bits<32> op>
637 : Fmt2R<op, (outs GPR:$rd), (ins GPR:$rj), "$rd, $rj">;
639 class ALU_3RI2<bits<32> op, Operand ImmOpnd>
640 : Fmt3RI2<op, (outs GPR:$rd), (ins GPR:$rj, GPR:$rk, ImmOpnd:$imm2),
641 "$rd, $rj, $rk, $imm2">;
642 class ALU_3RI3<bits<32> op, Operand ImmOpnd>
643 : Fmt3RI3<op, (outs GPR:$rd), (ins GPR:$rj, GPR:$rk, ImmOpnd:$imm3),
644 "$rd, $rj, $rk, $imm3">;
645 class ALU_2RI5<bits<32> op, Operand ImmOpnd>
646 : Fmt2RI5<op, (outs GPR:$rd), (ins GPR:$rj, ImmOpnd:$imm5),
648 class ALU_2RI6<bits<32> op, Operand ImmOpnd>
649 : Fmt2RI6<op, (outs GPR:$rd), (ins GPR:$rj, ImmOpnd:$imm6),
651 class ALU_2RI12<bits<32> op, Operand ImmOpnd>
652 : Fmt2RI12<op, (outs GPR:$rd), (ins GPR:$rj, ImmOpnd:$imm12),
654 class ALU_2RI16<bits<32> op, Operand ImmOpnd>
655 : Fmt2RI16<op, (outs GPR:$rd), (ins GPR:$rj, ImmOpnd:$imm16),
657 class ALU_1RI20<bits<32> op, Operand ImmOpnd>
658 : Fmt1RI20<op, (outs GPR:$rd), (ins ImmOpnd:$imm20), "$rd, $imm20">;
659 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
661 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
662 class MISC_I15<bits<32> op>
663 : FmtI15<op, (outs), (ins uimm15:$imm15), "$imm15">;
665 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
666 class RDTIME_2R<bits<32> op>
667 : Fmt2R<op, (outs GPR:$rd, GPR:$rj), (ins), "$rd, $rj">;
669 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
670 class BrCC_2RI16<bits<32> op>
671 : Fmt2RI16<op, (outs), (ins GPR:$rj, GPR:$rd, simm16_lsl2_br:$imm16),
672 "$rj, $rd, $imm16"> {
674 let isTerminator = 1;
676 class BrCCZ_1RI21<bits<32> op>
677 : Fmt1RI21<op, (outs), (ins GPR:$rj, simm21_lsl2:$imm21),
680 let isTerminator = 1;
682 class Br_I26<bits<32> op>
683 : FmtI26<op, (outs), (ins simm26_b:$imm26), "$imm26"> {
685 let isTerminator = 1;
688 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
690 let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
691 class LOAD_3R<bits<32> op>
692 : Fmt3R<op, (outs GPR:$rd), (ins GPR:$rj, GPR:$rk), "$rd, $rj, $rk">;
693 class LOAD_2RI12<bits<32> op>
694 : Fmt2RI12<op, (outs GPR:$rd), (ins GPR:$rj, simm12_addlike:$imm12),
696 class LOAD_2RI14<bits<32> op>
697 : Fmt2RI14<op, (outs GPR:$rd), (ins GPR:$rj, simm14_lsl2:$imm14),
699 } // hasSideEffects = 0, mayLoad = 1, mayStore = 0
701 let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
702 class STORE_3R<bits<32> op>
703 : Fmt3R<op, (outs), (ins GPR:$rd, GPR:$rj, GPR:$rk),
705 class STORE_2RI12<bits<32> op>
706 : Fmt2RI12<op, (outs), (ins GPR:$rd, GPR:$rj, simm12_addlike:$imm12),
708 class STORE_2RI14<bits<32> op>
709 : Fmt2RI14<op, (outs), (ins GPR:$rd, GPR:$rj, simm14_lsl2:$imm14),
711 } // hasSideEffects = 0, mayLoad = 0, mayStore = 1
713 let hasSideEffects = 0, mayLoad = 1, mayStore = 1, Constraints = "@earlyclobber $rd" in
714 class AM_3R<bits<32> op>
715 : Fmt3R<op, (outs GPR:$rd), (ins GPR:$rk, GPRMemAtomic:$rj),
718 let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
719 class LLBase<bits<32> op>
720 : Fmt2RI14<op, (outs GPR:$rd), (ins GPR:$rj, simm14_lsl2:$imm14),
722 class LLBase_ACQ<bits<32> op>
723 : Fmt2R<op, (outs GPR:$rd), (ins GPR:$rj), "$rd, $rj">;
726 let hasSideEffects = 0, mayLoad = 0, mayStore = 1, Constraints = "$rd = $dst" in {
727 class SCBase<bits<32> op>
728 : Fmt2RI14<op, (outs GPR:$dst), (ins GPR:$rd, GPR:$rj, simm14_lsl2:$imm14),
730 class SCBase_128<bits<32> op>
731 : Fmt3R<op, (outs GPR:$dst), (ins GPR:$rd, GPR:$rk, GPR:$rj),
733 class SCBase_REL<bits<32> op>
734 : Fmt2R<op, (outs GPR:$dst), (ins GPR:$rd, GPR:$rj), "$rd, $rj">;
737 let hasSideEffects = 1 in
738 class IOCSRRD<bits<32> op>
739 : Fmt2R<op, (outs GPR:$rd), (ins GPR:$rj), "$rd, $rj">;
741 let hasSideEffects = 1 in
742 class IOCSRWR<bits<32> op>
743 : Fmt2R<op, (outs), (ins GPR:$rd, GPR:$rj), "$rd, $rj">;
745 //===----------------------------------------------------------------------===//
746 // Basic Integer Instructions
747 //===----------------------------------------------------------------------===//
749 // Arithmetic Operation Instructions
750 def ADD_W : ALU_3R<0x00100000>;
751 def SUB_W : ALU_3R<0x00110000>;
752 def ADDI_W : ALU_2RI12<0x02800000, simm12_addlike>;
753 def ALSL_W : ALU_3RI2<0x00040000, uimm2_plus1>;
754 let isReMaterializable = 1 in {
755 def LU12I_W : ALU_1RI20<0x14000000, simm20_lu12iw>;
757 def SLT : ALU_3R<0x00120000>;
758 def SLTU : ALU_3R<0x00128000>;
759 def SLTI : ALU_2RI12<0x02000000, simm12>;
760 def SLTUI : ALU_2RI12<0x02400000, simm12>;
761 def PCADDI : ALU_1RI20<0x18000000, simm20_pcaddi>;
762 def PCADDU12I : ALU_1RI20<0x1c000000, simm20>;
763 def PCALAU12I : ALU_1RI20<0x1a000000, simm20_pcalau12i>;
764 def AND : ALU_3R<0x00148000>;
765 def OR : ALU_3R<0x00150000>;
766 def NOR : ALU_3R<0x00140000>;
767 def XOR : ALU_3R<0x00158000>;
768 def ANDN : ALU_3R<0x00168000>;
769 def ORN : ALU_3R<0x00160000>;
770 def ANDI : ALU_2RI12<0x03400000, uimm12>;
771 // See LoongArchInstrInfo::isAsCheapAsAMove for more details.
772 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
773 def ORI : ALU_2RI12<0x03800000, uimm12_ori>;
774 def XORI : ALU_2RI12<0x03c00000, uimm12>;
776 def MUL_W : ALU_3R<0x001c0000>;
777 def MULH_W : ALU_3R<0x001c8000>;
778 def MULH_WU : ALU_3R<0x001d0000>;
779 let usesCustomInserter = true in {
780 def DIV_W : ALU_3R<0x00200000>;
781 def MOD_W : ALU_3R<0x00208000>;
782 def DIV_WU : ALU_3R<0x00210000>;
783 def MOD_WU : ALU_3R<0x00218000>;
784 } // usesCustomInserter = true
786 // Bit-shift Instructions
787 def SLL_W : ALU_3R<0x00170000>;
788 def SRL_W : ALU_3R<0x00178000>;
789 def SRA_W : ALU_3R<0x00180000>;
790 def ROTR_W : ALU_3R<0x001b0000>;
792 def SLLI_W : ALU_2RI5<0x00408000, uimm5>;
793 def SRLI_W : ALU_2RI5<0x00448000, uimm5>;
794 def SRAI_W : ALU_2RI5<0x00488000, uimm5>;
795 def ROTRI_W : ALU_2RI5<0x004c8000, uimm5>;
797 // Bit-manipulation Instructions
798 def EXT_W_B : ALU_2R<0x00005c00>;
799 def EXT_W_H : ALU_2R<0x00005800>;
800 def CLO_W : ALU_2R<0x00001000>;
801 def CLZ_W : ALU_2R<0x00001400>;
802 def CTO_W : ALU_2R<0x00001800>;
803 def CTZ_W : ALU_2R<0x00001c00>;
804 def BYTEPICK_W : ALU_3RI2<0x00080000, uimm2>;
805 def REVB_2H : ALU_2R<0x00003000>;
806 def BITREV_4B : ALU_2R<0x00004800>;
807 def BITREV_W : ALU_2R<0x00005000>;
808 let Constraints = "$rd = $dst" in {
809 def BSTRINS_W : FmtBSTR_W<0x00600000, (outs GPR:$dst),
810 (ins GPR:$rd, GPR:$rj, uimm5:$msbw, uimm5:$lsbw),
811 "$rd, $rj, $msbw, $lsbw">;
813 def BSTRPICK_W : FmtBSTR_W<0x00608000, (outs GPR:$rd),
814 (ins GPR:$rj, uimm5:$msbw, uimm5:$lsbw),
815 "$rd, $rj, $msbw, $lsbw">;
816 def MASKEQZ : ALU_3R<0x00130000>;
817 def MASKNEZ : ALU_3R<0x00138000>;
819 // Branch Instructions
820 def BEQ : BrCC_2RI16<0x58000000>;
821 def BNE : BrCC_2RI16<0x5c000000>;
822 def BLT : BrCC_2RI16<0x60000000>;
823 def BGE : BrCC_2RI16<0x64000000>;
824 def BLTU : BrCC_2RI16<0x68000000>;
825 def BGEU : BrCC_2RI16<0x6c000000>;
826 def BEQZ : BrCCZ_1RI21<0x40000000>;
827 def BNEZ : BrCCZ_1RI21<0x44000000>;
828 def B : Br_I26<0x50000000>;
830 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCall = 1, Defs=[R1] in
831 def BL : FmtI26<0x54000000, (outs), (ins simm26_symbol:$imm26), "$imm26">;
832 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
833 def JIRL : Fmt2RI16<0x4c000000, (outs GPR:$rd),
834 (ins GPR:$rj, simm16_lsl2:$imm16), "$rd, $rj, $imm16">;
836 // Common Memory Access Instructions
837 def LD_B : LOAD_2RI12<0x28000000>;
838 def LD_H : LOAD_2RI12<0x28400000>;
839 def LD_W : LOAD_2RI12<0x28800000>;
840 def LD_BU : LOAD_2RI12<0x2a000000>;
841 def LD_HU : LOAD_2RI12<0x2a400000>;
842 def ST_B : STORE_2RI12<0x29000000>;
843 def ST_H : STORE_2RI12<0x29400000>;
844 def ST_W : STORE_2RI12<0x29800000>;
845 let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in
846 def PRELD : FmtPRELD<(outs), (ins uimm5:$imm5, GPR:$rj, simm12:$imm12),
847 "$imm5, $rj, $imm12">;
849 // Atomic Memory Access Instructions
850 def LL_W : LLBase<0x20000000>;
851 def SC_W : SCBase<0x21000000>;
852 def LLACQ_W : LLBase_ACQ<0x38578000>;
853 def SCREL_W : SCBase_REL<0x38578400>;
855 // Barrier Instructions
856 def DBAR : MISC_I15<0x38720000>;
857 def IBAR : MISC_I15<0x38728000>;
859 // Other Miscellaneous Instructions
860 def SYSCALL : MISC_I15<0x002b0000>;
861 def BREAK : MISC_I15<0x002a0000>;
862 def RDTIMEL_W : RDTIME_2R<0x00006000>;
863 def RDTIMEH_W : RDTIME_2R<0x00006400>;
864 def CPUCFG : ALU_2R<0x00006c00>;
866 // Cache Maintenance Instructions
867 def CACOP : FmtCACOP<(outs), (ins uimm5:$op, GPR:$rj, simm12:$imm12),
870 /// LA64 instructions
872 let Predicates = [IsLA64] in {
874 // Arithmetic Operation Instructions for 64-bits
875 def ADD_D : ALU_3R<0x00108000>;
876 def SUB_D : ALU_3R<0x00118000>;
877 // ADDI_D isn't always rematerializable, but isReMaterializable will be used as
878 // a hint which is verified in isReallyTriviallyReMaterializable.
879 // See LoongArchInstrInfo::isAsCheapAsAMove for more details.
880 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
881 def ADDI_D : ALU_2RI12<0x02c00000, simm12_addlike>;
883 def ADDU16I_D : ALU_2RI16<0x10000000, simm16>;
884 def ALSL_WU : ALU_3RI2<0x00060000, uimm2_plus1>;
885 def ALSL_D : ALU_3RI2<0x002c0000, uimm2_plus1>;
886 let Constraints = "$rd = $dst" in {
887 let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
888 isReMaterializable = 1 in
889 def LU32I_D : Fmt1RI20<0x16000000, (outs GPR:$dst),
890 (ins GPR:$rd, simm20_lu32id:$imm20),
893 let isReMaterializable = 1 in {
894 def LU52I_D : ALU_2RI12<0x03000000, simm12_lu52id>;
896 def PCADDU18I : ALU_1RI20<0x1e000000, simm20_pcaddu18i>;
897 def MUL_D : ALU_3R<0x001d8000>;
898 def MULH_D : ALU_3R<0x001e0000>;
899 def MULH_DU : ALU_3R<0x001e8000>;
900 def MULW_D_W : ALU_3R<0x001f0000>;
901 def MULW_D_WU : ALU_3R<0x001f8000>;
902 let usesCustomInserter = true in {
903 def DIV_D : ALU_3R<0x00220000>;
904 def MOD_D : ALU_3R<0x00228000>;
905 def DIV_DU : ALU_3R<0x00230000>;
906 def MOD_DU : ALU_3R<0x00238000>;
907 } // usesCustomInserter = true
909 // Bit-shift Instructions for 64-bits
910 def SLL_D : ALU_3R<0x00188000>;
911 def SRL_D : ALU_3R<0x00190000>;
912 def SRA_D : ALU_3R<0x00198000>;
913 def ROTR_D : ALU_3R<0x001b8000>;
914 def SLLI_D : ALU_2RI6<0x00410000, uimm6>;
915 def SRLI_D : ALU_2RI6<0x00450000, uimm6>;
916 def SRAI_D : ALU_2RI6<0x00490000, uimm6>;
917 def ROTRI_D : ALU_2RI6<0x004d0000, uimm6>;
919 // Bit-manipulation Instructions for 64-bits
920 def CLO_D : ALU_2R<0x00002000>;
921 def CLZ_D : ALU_2R<0x00002400>;
922 def CTO_D : ALU_2R<0x00002800>;
923 def CTZ_D : ALU_2R<0x00002c00>;
924 def BYTEPICK_D : ALU_3RI3<0x000c0000, uimm3>;
925 def REVB_4H : ALU_2R<0x00003400>;
926 def REVB_2W : ALU_2R<0x00003800>;
927 def REVB_D : ALU_2R<0x00003c00>;
928 def REVH_2W : ALU_2R<0x00004000>;
929 def REVH_D : ALU_2R<0x00004400>;
930 def BITREV_8B : ALU_2R<0x00004c00>;
931 def BITREV_D : ALU_2R<0x00005400>;
932 let Constraints = "$rd = $dst" in {
933 def BSTRINS_D : FmtBSTR_D<0x00800000, (outs GPR:$dst),
934 (ins GPR:$rd, GPR:$rj, uimm6:$msbd, uimm6:$lsbd),
935 "$rd, $rj, $msbd, $lsbd">;
937 def BSTRPICK_D : FmtBSTR_D<0x00c00000, (outs GPR:$rd),
938 (ins GPR:$rj, uimm6:$msbd, uimm6:$lsbd),
939 "$rd, $rj, $msbd, $lsbd">;
941 // Common Memory Access Instructions for 64-bits
942 def LD_WU : LOAD_2RI12<0x2a800000>;
943 def LD_D : LOAD_2RI12<0x28c00000>;
944 def ST_D : STORE_2RI12<0x29c00000>;
945 def LDX_B : LOAD_3R<0x38000000>;
946 def LDX_H : LOAD_3R<0x38040000>;
947 def LDX_W : LOAD_3R<0x38080000>;
948 def LDX_D : LOAD_3R<0x380c0000>;
949 def LDX_BU : LOAD_3R<0x38200000>;
950 def LDX_HU : LOAD_3R<0x38240000>;
951 def LDX_WU : LOAD_3R<0x38280000>;
952 def STX_B : STORE_3R<0x38100000>;
953 def STX_H : STORE_3R<0x38140000>;
954 def STX_W : STORE_3R<0x38180000>;
955 def STX_D : STORE_3R<0x381c0000>;
956 def LDPTR_W : LOAD_2RI14<0x24000000>;
957 def LDPTR_D : LOAD_2RI14<0x26000000>;
958 def STPTR_W : STORE_2RI14<0x25000000>;
959 def STPTR_D : STORE_2RI14<0x27000000>;
960 let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in
961 def PRELDX : FmtPRELDX<(outs), (ins uimm5:$imm5, GPR:$rj, GPR:$rk),
964 // Bound Check Memory Access Instructions
965 def LDGT_B : LOAD_3R<0x38780000>;
966 def LDGT_H : LOAD_3R<0x38788000>;
967 def LDGT_W : LOAD_3R<0x38790000>;
968 def LDGT_D : LOAD_3R<0x38798000>;
969 def LDLE_B : LOAD_3R<0x387a0000>;
970 def LDLE_H : LOAD_3R<0x387a8000>;
971 def LDLE_W : LOAD_3R<0x387b0000>;
972 def LDLE_D : LOAD_3R<0x387b8000>;
973 def STGT_B : STORE_3R<0x387c0000>;
974 def STGT_H : STORE_3R<0x387c8000>;
975 def STGT_W : STORE_3R<0x387d0000>;
976 def STGT_D : STORE_3R<0x387d8000>;
977 def STLE_B : STORE_3R<0x387e0000>;
978 def STLE_H : STORE_3R<0x387e8000>;
979 def STLE_W : STORE_3R<0x387f0000>;
980 def STLE_D : STORE_3R<0x387f8000>;
982 // Atomic Memory Access Instructions for 64-bits
983 def AMSWAP_B : AM_3R<0x385c0000>;
984 def AMSWAP_H : AM_3R<0x385c8000>;
985 def AMSWAP_W : AM_3R<0x38600000>;
986 def AMSWAP_D : AM_3R<0x38608000>;
987 def AMADD_B : AM_3R<0x385d0000>;
988 def AMADD_H : AM_3R<0x385d8000>;
989 def AMADD_W : AM_3R<0x38610000>;
990 def AMADD_D : AM_3R<0x38618000>;
991 def AMAND_W : AM_3R<0x38620000>;
992 def AMAND_D : AM_3R<0x38628000>;
993 def AMOR_W : AM_3R<0x38630000>;
994 def AMOR_D : AM_3R<0x38638000>;
995 def AMXOR_W : AM_3R<0x38640000>;
996 def AMXOR_D : AM_3R<0x38648000>;
997 def AMMAX_W : AM_3R<0x38650000>;
998 def AMMAX_D : AM_3R<0x38658000>;
999 def AMMIN_W : AM_3R<0x38660000>;
1000 def AMMIN_D : AM_3R<0x38668000>;
1001 def AMMAX_WU : AM_3R<0x38670000>;
1002 def AMMAX_DU : AM_3R<0x38678000>;
1003 def AMMIN_WU : AM_3R<0x38680000>;
1004 def AMMIN_DU : AM_3R<0x38688000>;
1005 def AMSWAP__DB_B : AM_3R<0x385e0000>;
1006 def AMSWAP__DB_H : AM_3R<0x385e8000>;
1007 def AMSWAP__DB_W : AM_3R<0x38690000>;
1008 def AMSWAP__DB_D : AM_3R<0x38698000>;
1009 def AMADD__DB_B : AM_3R<0x385f0000>;
1010 def AMADD__DB_H : AM_3R<0x385f8000>;
1011 def AMADD__DB_W : AM_3R<0x386a0000>;
1012 def AMADD__DB_D : AM_3R<0x386a8000>;
1013 def AMAND__DB_W : AM_3R<0x386b0000>;
1014 def AMAND__DB_D : AM_3R<0x386b8000>;
1015 def AMOR__DB_W : AM_3R<0x386c0000>;
1016 def AMOR__DB_D : AM_3R<0x386c8000>;
1017 def AMXOR__DB_W : AM_3R<0x386d0000>;
1018 def AMXOR__DB_D : AM_3R<0x386d8000>;
1019 def AMMAX__DB_W : AM_3R<0x386e0000>;
1020 def AMMAX__DB_D : AM_3R<0x386e8000>;
1021 def AMMIN__DB_W : AM_3R<0x386f0000>;
1022 def AMMIN__DB_D : AM_3R<0x386f8000>;
1023 def AMMAX__DB_WU : AM_3R<0x38700000>;
1024 def AMMAX__DB_DU : AM_3R<0x38708000>;
1025 def AMMIN__DB_WU : AM_3R<0x38710000>;
1026 def AMMIN__DB_DU : AM_3R<0x38718000>;
1027 def AMCAS_B : AM_3R<0x38580000>;
1028 def AMCAS_H : AM_3R<0x38588000>;
1029 def AMCAS_W : AM_3R<0x38590000>;
1030 def AMCAS_D : AM_3R<0x38598000>;
1031 def AMCAS__DB_B : AM_3R<0x385a0000>;
1032 def AMCAS__DB_H : AM_3R<0x385a8000>;
1033 def AMCAS__DB_W : AM_3R<0x385b0000>;
1034 def AMCAS__DB_D : AM_3R<0x385b8000>;
1035 def LL_D : LLBase<0x22000000>;
1036 def SC_D : SCBase<0x23000000>;
1037 def SC_Q : SCBase_128<0x38570000>;
1038 def LLACQ_D : LLBase_ACQ<0x38578800>;
1039 def SCREL_D : SCBase_REL<0x38578C00>;
1041 // CRC Check Instructions
1042 def CRC_W_B_W : ALU_3R<0x00240000>;
1043 def CRC_W_H_W : ALU_3R<0x00248000>;
1044 def CRC_W_W_W : ALU_3R<0x00250000>;
1045 def CRC_W_D_W : ALU_3R<0x00258000>;
1046 def CRCC_W_B_W : ALU_3R<0x00260000>;
1047 def CRCC_W_H_W : ALU_3R<0x00268000>;
1048 def CRCC_W_W_W : ALU_3R<0x00270000>;
1049 def CRCC_W_D_W : ALU_3R<0x00278000>;
1051 // Other Miscellaneous Instructions for 64-bits
1052 def ASRTLE_D : FmtASRT<0x00010000, (outs), (ins GPR:$rj, GPR:$rk),
1054 def ASRTGT_D : FmtASRT<0x00018000, (outs), (ins GPR:$rj, GPR:$rk),
1056 def RDTIME_D : RDTIME_2R<0x00006800>;
1057 } // Predicates = [IsLA64]
1059 //===----------------------------------------------------------------------===//
1060 // Pseudo-instructions and codegen patterns
1062 // Naming convention: For 'generic' pattern classes, we use the naming
1063 // convention PatTy1Ty2.
1064 //===----------------------------------------------------------------------===//
1066 /// Generic pattern classes
1068 class PatGprGpr<SDPatternOperator OpNode, LAInst Inst>
1069 : Pat<(OpNode GPR:$rj, GPR:$rk), (Inst GPR:$rj, GPR:$rk)>;
1070 class PatGprGpr_32<SDPatternOperator OpNode, LAInst Inst>
1071 : Pat<(sext_inreg (OpNode GPR:$rj, GPR:$rk), i32), (Inst GPR:$rj, GPR:$rk)>;
1072 class PatGpr<SDPatternOperator OpNode, LAInst Inst>
1073 : Pat<(OpNode GPR:$rj), (Inst GPR:$rj)>;
1075 class PatGprImm<SDPatternOperator OpNode, LAInst Inst, Operand ImmOpnd>
1076 : Pat<(OpNode GPR:$rj, ImmOpnd:$imm),
1077 (Inst GPR:$rj, ImmOpnd:$imm)>;
1078 class PatGprImm_32<SDPatternOperator OpNode, LAInst Inst, Operand ImmOpnd>
1079 : Pat<(sext_inreg (OpNode GPR:$rj, ImmOpnd:$imm), i32),
1080 (Inst GPR:$rj, ImmOpnd:$imm)>;
1083 def AddLike: PatFrags<(ops node:$A, node:$B),
1084 [(add node:$A, node:$B), (or node:$A, node:$B)], [{
1085 return CurDAG->isBaseWithConstantOffset(SDValue(N, 0));
1088 /// Simple arithmetic operations
1090 // Match both a plain shift and one where the shift amount is masked (this is
1091 // typically introduced when the legalizer promotes the shift amount and
1092 // zero-extends it). For LoongArch, the mask is unnecessary as shifts in the
1093 // base ISA only read the least significant 5 bits (LA32) or 6 bits (LA64).
1095 : ComplexPattern<GRLenVT, 1, "selectShiftMaskGRLen", [], [], 0>;
1096 def shiftMask32 : ComplexPattern<i64, 1, "selectShiftMask32", [], [], 0>;
1098 def sexti32 : ComplexPattern<i64, 1, "selectSExti32">;
1099 def zexti32 : ComplexPattern<i64, 1, "selectZExti32">;
1101 class shiftop<SDPatternOperator operator>
1102 : PatFrag<(ops node:$val, node:$count),
1103 (operator node:$val, (GRLenVT (shiftMaskGRLen node:$count)))>;
1104 class shiftopw<SDPatternOperator operator>
1105 : PatFrag<(ops node:$val, node:$count),
1106 (operator node:$val, (i64 (shiftMask32 node:$count)))>;
1108 def mul_const_oneuse : PatFrag<(ops node:$A, node:$B),
1109 (mul node:$A, node:$B), [{
1110 if (auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1111 return N1C->hasOneUse();
1115 let Predicates = [IsLA32] in {
1116 def : PatGprGpr<add, ADD_W>;
1117 def : PatGprImm<add, ADDI_W, simm12>;
1118 def : PatGprGpr<sub, SUB_W>;
1119 def : PatGprGpr<sdiv, DIV_W>;
1120 def : PatGprGpr<udiv, DIV_WU>;
1121 def : PatGprGpr<srem, MOD_W>;
1122 def : PatGprGpr<urem, MOD_WU>;
1123 def : PatGprGpr<mul, MUL_W>;
1124 def : PatGprGpr<mulhs, MULH_W>;
1125 def : PatGprGpr<mulhu, MULH_WU>;
1126 def : PatGprGpr<rotr, ROTR_W>;
1127 def : PatGprImm<rotr, ROTRI_W, uimm5>;
1129 foreach Idx = 1...3 in {
1130 defvar ShamtA = !mul(8, Idx);
1131 defvar ShamtB = !mul(8, !sub(4, Idx));
1132 def : Pat<(or (shl GPR:$rk, (i32 ShamtA)), (srl GPR:$rj, (i32 ShamtB))),
1133 (BYTEPICK_W GPR:$rj, GPR:$rk, Idx)>;
1135 } // Predicates = [IsLA32]
1137 let Predicates = [IsLA64] in {
1138 def : PatGprGpr<add, ADD_D>;
1139 def : PatGprImm<add, ADDI_D, simm12>;
1140 def : PatGprGpr<sub, SUB_D>;
1141 def : PatGprGpr<sdiv, DIV_D>;
1142 def : PatGprGpr_32<sdiv, DIV_W>;
1143 def : PatGprGpr<udiv, DIV_DU>;
1144 def : PatGprGpr<loongarch_div_wu, DIV_WU>;
1145 def : PatGprGpr<srem, MOD_D>;
1146 def : PatGprGpr_32<srem, MOD_W>;
1147 def : PatGprGpr<urem, MOD_DU>;
1148 def : PatGprGpr<loongarch_mod_wu, MOD_WU>;
1149 def : PatGprGpr<rotr, ROTR_D>;
1150 def : PatGprGpr<loongarch_rotr_w, ROTR_W>;
1151 def : PatGprImm<rotr, ROTRI_D, uimm6>;
1152 def : PatGprImm_32<rotr, ROTRI_W, uimm5>;
1153 def : PatGprImm<loongarch_rotr_w, ROTRI_W, uimm5>;
1154 // TODO: Select "_W[U]" instructions for i32xi32 if only lower 32 bits of the
1155 // product are used.
1156 def : PatGprGpr<mul, MUL_D>;
1157 def : PatGprGpr<mulhs, MULH_D>;
1158 def : PatGprGpr<mulhu, MULH_DU>;
1159 // Select MULW_D_W for calculating the full 64 bits product of i32xi32 signed
1161 def : Pat<(i64 (mul (sext_inreg GPR:$rj, i32), (sext_inreg GPR:$rk, i32))),
1162 (MULW_D_W GPR:$rj, GPR:$rk)>;
1163 // Select MULW_D_WU for calculating the full 64 bits product of i32xi32
1164 // unsigned multiplication.
1165 def : Pat<(i64 (mul (loongarch_bstrpick GPR:$rj, (i64 31), (i64 0)),
1166 (loongarch_bstrpick GPR:$rk, (i64 31), (i64 0)))),
1167 (MULW_D_WU GPR:$rj, GPR:$rk)>;
1169 def : Pat<(add GPR:$rj, simm16_lsl16:$imm),
1170 (ADDU16I_D GPR:$rj, (HI16 $imm))>;
1171 def : Pat<(add GPR:$rj, simm32_hi16_lo12:$imm),
1172 (ADDI_D (ADDU16I_D GPR:$rj, (HI16ForAddu16idAddiPair $imm)),
1174 def : Pat<(sext_inreg (add GPR:$rj, simm32_hi16_lo12:$imm), i32),
1175 (ADDI_W (ADDU16I_D GPR:$rj, (HI16ForAddu16idAddiPair $imm)),
1178 let Predicates = [IsLA32] in {
1179 def : Pat<(add GPR:$rj, (AddiPair:$im)),
1180 (ADDI_W (ADDI_W GPR:$rj, (AddiPairImmLarge AddiPair:$im)),
1181 (AddiPairImmSmall AddiPair:$im))>;
1182 } // Predicates = [IsLA32]
1184 let Predicates = [IsLA64] in {
1185 def : Pat<(add GPR:$rj, (AddiPair:$im)),
1186 (ADDI_D (ADDI_D GPR:$rj, (AddiPairImmLarge AddiPair:$im)),
1187 (AddiPairImmSmall AddiPair:$im))>;
1188 def : Pat<(sext_inreg (add GPR:$rj, (AddiPair:$im)), i32),
1189 (ADDI_W (ADDI_W GPR:$rj, (AddiPairImmLarge AddiPair:$im)),
1190 (AddiPairImmSmall AddiPair:$im))>;
1191 } // Predicates = [IsLA64]
1193 let Predicates = [IsLA32] in {
1194 foreach Idx0 = 1...4 in {
1195 foreach Idx1 = 1...4 in {
1196 defvar CImm = !add(1, !shl(!add(1, !shl(1, Idx0)), Idx1));
1197 def : Pat<(mul_const_oneuse GPR:$r, (i32 CImm)),
1198 (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 Idx0)),
1199 GPR:$r, (i32 Idx1))>;
1202 foreach Idx0 = 1...4 in {
1203 foreach Idx1 = 1...4 in {
1204 defvar Cb = !add(1, !shl(1, Idx0));
1205 defvar CImm = !add(Cb, !shl(Cb, Idx1));
1206 def : Pat<(mul_const_oneuse GPR:$r, (i32 CImm)),
1207 (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 Idx0)),
1208 (ALSL_W GPR:$r, GPR:$r, (i32 Idx0)), (i32 Idx1))>;
1211 } // Predicates = [IsLA32]
1213 let Predicates = [IsLA64] in {
1214 foreach Idx0 = 1...4 in {
1215 foreach Idx1 = 1...4 in {
1216 defvar CImm = !add(1, !shl(!add(1, !shl(1, Idx0)), Idx1));
1217 def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 CImm)), i32),
1218 (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 Idx0)),
1219 GPR:$r, (i64 Idx1))>;
1220 def : Pat<(mul_const_oneuse GPR:$r, (i64 CImm)),
1221 (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 Idx0)),
1222 GPR:$r, (i64 Idx1))>;
1225 foreach Idx0 = 1...4 in {
1226 foreach Idx1 = 1...4 in {
1227 defvar Cb = !add(1, !shl(1, Idx0));
1228 defvar CImm = !add(Cb, !shl(Cb, Idx1));
1229 def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 CImm)), i32),
1230 (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 Idx0)),
1231 (ALSL_W GPR:$r, GPR:$r, (i64 Idx0)), (i64 Idx1))>;
1232 def : Pat<(mul_const_oneuse GPR:$r, (i64 CImm)),
1233 (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 Idx0)),
1234 (ALSL_D GPR:$r, GPR:$r, (i64 Idx0)), (i64 Idx1))>;
1237 } // Predicates = [IsLA64]
1239 let Predicates = [IsLA32] in {
1240 def : Pat<(mul GPR:$rj, (AlslSlliImm:$im)),
1241 (SLLI_W (ALSL_W GPR:$rj, GPR:$rj, (AlslSlliImmI0 AlslSlliImm:$im)),
1242 (AlslSlliImmI1 AlslSlliImm:$im))>;
1243 } // Predicates = [IsLA32]
1245 let Predicates = [IsLA64] in {
1246 def : Pat<(sext_inreg (mul GPR:$rj, (AlslSlliImm:$im)), i32),
1247 (SLLI_W (ALSL_W GPR:$rj, GPR:$rj, (AlslSlliImmI0 AlslSlliImm:$im)),
1248 (AlslSlliImmI1 AlslSlliImm:$im))>;
1249 def : Pat<(mul GPR:$rj, (AlslSlliImm:$im)),
1250 (SLLI_D (ALSL_D GPR:$rj, GPR:$rj, (AlslSlliImmI0 AlslSlliImm:$im)),
1251 (AlslSlliImmI1 AlslSlliImm:$im))>;
1252 } // Predicates = [IsLA64]
1254 foreach Idx = 1...7 in {
1255 defvar ShamtA = !mul(8, Idx);
1256 defvar ShamtB = !mul(8, !sub(8, Idx));
1257 def : Pat<(or (shl GPR:$rk, (i64 ShamtA)), (srl GPR:$rj, (i64 ShamtB))),
1258 (BYTEPICK_D GPR:$rj, GPR:$rk, Idx)>;
1261 foreach Idx = 1...3 in {
1262 defvar ShamtA = !mul(8, Idx);
1263 defvar ShamtB = !mul(8, !sub(4, Idx));
1264 // NOTE: the srl node would already be transformed into a loongarch_bstrpick
1265 // by the time this pattern gets to execute, hence the weird construction.
1266 def : Pat<(sext_inreg (or (shl GPR:$rk, (i64 ShamtA)),
1267 (loongarch_bstrpick GPR:$rj, (i64 31),
1268 (i64 ShamtB))), i32),
1269 (BYTEPICK_W GPR:$rj, GPR:$rk, Idx)>;
1271 } // Predicates = [IsLA64]
1273 def : PatGprGpr<and, AND>;
1274 def : PatGprImm<and, ANDI, uimm12>;
1275 def : PatGprGpr<or, OR>;
1276 def : PatGprImm<or, ORI, uimm12>;
1277 def : PatGprGpr<xor, XOR>;
1278 def : PatGprImm<xor, XORI, uimm12>;
1279 def : Pat<(not GPR:$rj), (NOR GPR:$rj, R0)>;
1280 def : Pat<(not (or GPR:$rj, GPR:$rk)), (NOR GPR:$rj, GPR:$rk)>;
1281 def : Pat<(or GPR:$rj, (not GPR:$rk)), (ORN GPR:$rj, GPR:$rk)>;
1282 def : Pat<(and GPR:$rj, (not GPR:$rk)), (ANDN GPR:$rj, GPR:$rk)>;
1284 let Predicates = [IsLA32] in {
1285 def : Pat<(and GPR:$rj, BstrinsImm:$imm),
1286 (BSTRINS_W GPR:$rj, R0, (BstrinsMsb BstrinsImm:$imm),
1287 (BstrinsLsb BstrinsImm:$imm))>;
1288 } // Predicates = [IsLA32]
1290 let Predicates = [IsLA64] in {
1291 def : Pat<(and GPR:$rj, BstrinsImm:$imm),
1292 (BSTRINS_D GPR:$rj, R0, (BstrinsMsb BstrinsImm:$imm),
1293 (BstrinsLsb BstrinsImm:$imm))>;
1294 } // Predicates = [IsLA64]
1298 // We lower `trap` to `amswap.w rd:$r0, rk:$r1, rj:$r0`, as this is guaranteed
1299 // to trap with an INE (non-existent on LA32, explicitly documented to INE on
1300 // LA64). And the resulting signal is different from `debugtrap` like on some
1301 // other existing ports so programs/porters might have an easier time.
1302 def PseudoUNIMP : Pseudo<(outs), (ins), [(trap)]>,
1303 PseudoInstExpansion<(AMSWAP_W R0, R1, R0)>;
1305 // We lower `debugtrap` to `break 0`, as this is guaranteed to exist and work,
1306 // even for LA32 Primary. Also, because so far the ISA does not provide a
1307 // specific trap instruction/kind exclusively for alerting the debugger,
1308 // every other project uses the generic immediate of 0 for this.
1309 def : Pat<(debugtrap), (BREAK 0)>;
1311 /// Bit counting operations
1313 let Predicates = [IsLA64] in {
1314 def : PatGpr<ctlz, CLZ_D>;
1315 def : PatGpr<cttz, CTZ_D>;
1316 def : Pat<(ctlz (not GPR:$rj)), (CLO_D GPR:$rj)>;
1317 def : Pat<(cttz (not GPR:$rj)), (CTO_D GPR:$rj)>;
1318 def : PatGpr<loongarch_clzw, CLZ_W>;
1319 def : PatGpr<loongarch_ctzw, CTZ_W>;
1320 def : Pat<(loongarch_clzw (not GPR:$rj)), (CLO_W GPR:$rj)>;
1321 def : Pat<(loongarch_ctzw (not GPR:$rj)), (CTO_W GPR:$rj)>;
1322 } // Predicates = [IsLA64]
1324 let Predicates = [IsLA32] in {
1325 def : PatGpr<ctlz, CLZ_W>;
1326 def : PatGpr<cttz, CTZ_W>;
1327 def : Pat<(ctlz (not GPR:$rj)), (CLO_W GPR:$rj)>;
1328 def : Pat<(cttz (not GPR:$rj)), (CTO_W GPR:$rj)>;
1329 } // Predicates = [IsLA32]
1331 /// FrameIndex calculations
1332 let Predicates = [IsLA32] in {
1333 def : Pat<(AddLike (i32 BaseAddr:$rj), simm12:$imm12),
1334 (ADDI_W (i32 BaseAddr:$rj), simm12:$imm12)>;
1335 } // Predicates = [IsLA32]
1336 let Predicates = [IsLA64] in {
1337 def : Pat<(AddLike (i64 BaseAddr:$rj), simm12:$imm12),
1338 (ADDI_D (i64 BaseAddr:$rj), simm12:$imm12)>;
1339 } // Predicates = [IsLA64]
1341 /// Shifted addition
1342 let Predicates = [IsLA32] in {
1343 def : Pat<(add GPR:$rk, (shl GPR:$rj, uimm2_plus1:$imm2)),
1344 (ALSL_W GPR:$rj, GPR:$rk, uimm2_plus1:$imm2)>;
1345 } // Predicates = [IsLA32]
1346 let Predicates = [IsLA64] in {
1347 def : Pat<(add GPR:$rk, (shl GPR:$rj, uimm2_plus1:$imm2)),
1348 (ALSL_D GPR:$rj, GPR:$rk, uimm2_plus1:$imm2)>;
1349 def : Pat<(sext_inreg (add GPR:$rk, (shl GPR:$rj, uimm2_plus1:$imm2)), i32),
1350 (ALSL_W GPR:$rj, GPR:$rk, uimm2_plus1:$imm2)>;
1351 def : Pat<(loongarch_bstrpick (add GPR:$rk, (shl GPR:$rj, uimm2_plus1:$imm2)),
1353 (ALSL_WU GPR:$rj, GPR:$rk, uimm2_plus1:$imm2)>;
1354 } // Predicates = [IsLA64]
1358 let Predicates = [IsLA32] in {
1359 def : PatGprGpr<shiftop<shl>, SLL_W>;
1360 def : PatGprGpr<shiftop<sra>, SRA_W>;
1361 def : PatGprGpr<shiftop<srl>, SRL_W>;
1362 def : PatGprImm<shl, SLLI_W, uimm5>;
1363 def : PatGprImm<sra, SRAI_W, uimm5>;
1364 def : PatGprImm<srl, SRLI_W, uimm5>;
1365 } // Predicates = [IsLA32]
1367 let Predicates = [IsLA64] in {
1368 def : PatGprGpr<shiftopw<loongarch_sll_w>, SLL_W>;
1369 def : PatGprGpr<shiftopw<loongarch_sra_w>, SRA_W>;
1370 def : PatGprGpr<shiftopw<loongarch_srl_w>, SRL_W>;
1371 def : PatGprGpr<shiftop<shl>, SLL_D>;
1372 def : PatGprGpr<shiftop<sra>, SRA_D>;
1373 def : PatGprGpr<shiftop<srl>, SRL_D>;
1374 def : PatGprImm<shl, SLLI_D, uimm6>;
1375 def : PatGprImm<sra, SRAI_D, uimm6>;
1376 def : PatGprImm<srl, SRLI_D, uimm6>;
1377 } // Predicates = [IsLA64]
1381 def : Pat<(sext_inreg GPR:$rj, i8), (EXT_W_B GPR:$rj)>;
1382 def : Pat<(sext_inreg GPR:$rj, i16), (EXT_W_H GPR:$rj)>;
1384 let Predicates = [IsLA64] in {
1385 def : Pat<(sext_inreg GPR:$rj, i32), (ADDI_W GPR:$rj, 0)>;
1386 } // Predicates = [IsLA64]
1390 def : PatGprGpr<setlt, SLT>;
1391 def : PatGprImm<setlt, SLTI, simm12>;
1392 def : PatGprGpr<setult, SLTU>;
1393 def : PatGprImm<setult, SLTUI, simm12>;
1395 // Define pattern expansions for setcc operations that aren't directly
1396 // handled by a LoongArch instruction.
1397 def : Pat<(seteq GPR:$rj, 0), (SLTUI GPR:$rj, 1)>;
1398 def : Pat<(seteq GPR:$rj, GPR:$rk), (SLTUI (XOR GPR:$rj, GPR:$rk), 1)>;
1399 let Predicates = [IsLA32] in {
1400 def : Pat<(seteq GPR:$rj, simm12_plus1:$imm12),
1401 (SLTUI (ADDI_W GPR:$rj, (NegImm simm12_plus1:$imm12)), 1)>;
1402 } // Predicates = [IsLA32]
1403 let Predicates = [IsLA64] in {
1404 def : Pat<(seteq GPR:$rj, simm12_plus1:$imm12),
1405 (SLTUI (ADDI_D GPR:$rj, (NegImm simm12_plus1:$imm12)), 1)>;
1406 } // Predicates = [IsLA64]
1407 def : Pat<(setne GPR:$rj, 0), (SLTU R0, GPR:$rj)>;
1408 def : Pat<(setne GPR:$rj, GPR:$rk), (SLTU R0, (XOR GPR:$rj, GPR:$rk))>;
1409 let Predicates = [IsLA32] in {
1410 def : Pat<(setne GPR:$rj, simm12_plus1:$imm12),
1411 (SLTU R0, (ADDI_W GPR:$rj, (NegImm simm12_plus1:$imm12)))>;
1412 } // Predicates = [IsLA32]
1413 let Predicates = [IsLA64] in {
1414 def : Pat<(setne GPR:$rj, simm12_plus1:$imm12),
1415 (SLTU R0, (ADDI_D GPR:$rj, (NegImm simm12_plus1:$imm12)))>;
1416 } // Predicates = [IsLA64]
1417 def : Pat<(setugt GPR:$rj, GPR:$rk), (SLTU GPR:$rk, GPR:$rj)>;
1418 def : Pat<(setuge GPR:$rj, GPR:$rk), (XORI (SLTU GPR:$rj, GPR:$rk), 1)>;
1419 def : Pat<(setule GPR:$rj, GPR:$rk), (XORI (SLTU GPR:$rk, GPR:$rj), 1)>;
1420 def : Pat<(setgt GPR:$rj, GPR:$rk), (SLT GPR:$rk, GPR:$rj)>;
1421 def : Pat<(setge GPR:$rj, GPR:$rk), (XORI (SLT GPR:$rj, GPR:$rk), 1)>;
1422 def : Pat<(setle GPR:$rj, GPR:$rk), (XORI (SLT GPR:$rk, GPR:$rj), 1)>;
1426 def : Pat<(select GPR:$cond, GPR:$t, 0), (MASKEQZ GPR:$t, GPR:$cond)>;
1427 def : Pat<(select GPR:$cond, 0, GPR:$f), (MASKNEZ GPR:$f, GPR:$cond)>;
1428 def : Pat<(select GPR:$cond, GPR:$t, GPR:$f),
1429 (OR (MASKEQZ GPR:$t, GPR:$cond), (MASKNEZ GPR:$f, GPR:$cond))>;
1431 /// Branches and jumps
1433 class BccPat<PatFrag CondOp, LAInst Inst>
1434 : Pat<(brcond (GRLenVT (CondOp GPR:$rj, GPR:$rd)), bb:$imm16),
1435 (Inst GPR:$rj, GPR:$rd, bb:$imm16)>;
1437 def : BccPat<seteq, BEQ>;
1438 def : BccPat<setne, BNE>;
1439 def : BccPat<setlt, BLT>;
1440 def : BccPat<setge, BGE>;
1441 def : BccPat<setult, BLTU>;
1442 def : BccPat<setuge, BGEU>;
1444 class BccSwapPat<PatFrag CondOp, LAInst InstBcc>
1445 : Pat<(brcond (GRLenVT (CondOp GPR:$rd, GPR:$rj)), bb:$imm16),
1446 (InstBcc GPR:$rj, GPR:$rd, bb:$imm16)>;
1448 // Condition codes that don't have matching LoongArch branch instructions, but
1449 // are trivially supported by swapping the two input operands.
1450 def : BccSwapPat<setgt, BLT>;
1451 def : BccSwapPat<setle, BGE>;
1452 def : BccSwapPat<setugt, BLTU>;
1453 def : BccSwapPat<setule, BGEU>;
1455 // An extra pattern is needed for a brcond without a setcc (i.e. where the
1456 // condition was calculated elsewhere).
1457 def : Pat<(brcond GPR:$rj, bb:$imm21), (BNEZ GPR:$rj, bb:$imm21)>;
1459 def : Pat<(brcond (GRLenVT (seteq GPR:$rj, 0)), bb:$imm21),
1460 (BEQZ GPR:$rj, bb:$imm21)>;
1461 def : Pat<(brcond (GRLenVT (setne GPR:$rj, 0)), bb:$imm21),
1462 (BNEZ GPR:$rj, bb:$imm21)>;
1464 let isBarrier = 1, isBranch = 1, isTerminator = 1 in
1465 def PseudoBR : Pseudo<(outs), (ins simm26_b:$imm26), [(br bb:$imm26)]>,
1466 PseudoInstExpansion<(B simm26_b:$imm26)>;
1468 let isBarrier = 1, isBranch = 1, isIndirectBranch = 1, isTerminator = 1 in
1469 def PseudoBRIND : Pseudo<(outs), (ins GPR:$rj, simm16_lsl2:$imm16)>,
1470 PseudoInstExpansion<(JIRL R0, GPR:$rj, simm16_lsl2:$imm16)>;
1472 def : Pat<(brind GPR:$rj), (PseudoBRIND GPR:$rj, 0)>;
1473 def : Pat<(brind (add GPR:$rj, simm16_lsl2:$imm16)),
1474 (PseudoBRIND GPR:$rj, simm16_lsl2:$imm16)>;
1476 // Function call with 'Small' code model.
1477 let isCall = 1, Defs = [R1] in
1478 def PseudoCALL : Pseudo<(outs), (ins bare_symbol:$func)>;
1480 def : Pat<(loongarch_call tglobaladdr:$func), (PseudoCALL tglobaladdr:$func)>;
1481 def : Pat<(loongarch_call texternalsym:$func), (PseudoCALL texternalsym:$func)>;
1483 // Function call with 'Medium' code model.
1484 let isCall = 1, Defs = [R1, R20], Size = 8 in
1485 def PseudoCALL_MEDIUM : Pseudo<(outs), (ins bare_symbol:$func)>;
1487 let Predicates = [IsLA64] in {
1488 def : Pat<(loongarch_call_medium tglobaladdr:$func),
1489 (PseudoCALL_MEDIUM tglobaladdr:$func)>;
1490 def : Pat<(loongarch_call_medium texternalsym:$func),
1491 (PseudoCALL_MEDIUM texternalsym:$func)>;
1492 } // Predicates = [IsLA64]
1494 // Function call with 'Large' code model.
1495 let isCall = 1, Defs = [R1, R20], Size = 24 in
1496 def PseudoCALL_LARGE: Pseudo<(outs), (ins bare_symbol:$func)>;
1498 let Predicates = [IsLA64] in {
1499 def : Pat<(loongarch_call_large tglobaladdr:$func),
1500 (PseudoCALL_LARGE tglobaladdr:$func)>;
1501 def : Pat<(loongarch_call_large texternalsym:$func),
1502 (PseudoCALL_LARGE texternalsym:$func)>;
1503 } // Predicates = [IsLA64]
1505 let isCall = 1, Defs = [R1] in
1506 def PseudoCALLIndirect : Pseudo<(outs), (ins GPR:$rj),
1507 [(loongarch_call GPR:$rj)]>,
1508 PseudoInstExpansion<(JIRL R1, GPR:$rj, 0)>;
1509 let Predicates = [IsLA64] in {
1510 def : Pat<(loongarch_call_medium GPR:$rj), (PseudoCALLIndirect GPR:$rj)>;
1511 def : Pat<(loongarch_call_large GPR:$rj), (PseudoCALLIndirect GPR:$rj)>;
1514 let isCall = 1, hasSideEffects = 0, mayStore = 0, mayLoad = 0, Defs = [R1] in
1515 def PseudoJIRL_CALL : Pseudo<(outs), (ins GPR:$rj, simm16_lsl2:$imm16)>,
1516 PseudoInstExpansion<(JIRL R1, GPR:$rj,
1517 simm16_lsl2:$imm16)>;
1519 let isBarrier = 1, isReturn = 1, isTerminator = 1 in
1520 def PseudoRET : Pseudo<(outs), (ins), [(loongarch_ret)]>,
1521 PseudoInstExpansion<(JIRL R0, R1, 0)>;
1523 // Tail call with 'Small' code model.
1524 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [R3] in
1525 def PseudoTAIL : Pseudo<(outs), (ins bare_symbol:$dst)>;
1527 def : Pat<(loongarch_tail (iPTR tglobaladdr:$dst)),
1528 (PseudoTAIL tglobaladdr:$dst)>;
1529 def : Pat<(loongarch_tail (iPTR texternalsym:$dst)),
1530 (PseudoTAIL texternalsym:$dst)>;
1532 // Tail call with 'Medium' code model.
1533 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
1534 Uses = [R3], Defs = [R20], Size = 8 in
1535 def PseudoTAIL_MEDIUM : Pseudo<(outs), (ins bare_symbol:$dst)>;
1537 let Predicates = [IsLA64] in {
1538 def : Pat<(loongarch_tail_medium (iPTR tglobaladdr:$dst)),
1539 (PseudoTAIL_MEDIUM tglobaladdr:$dst)>;
1540 def : Pat<(loongarch_tail_medium (iPTR texternalsym:$dst)),
1541 (PseudoTAIL_MEDIUM texternalsym:$dst)>;
1542 } // Predicates = [IsLA64]
1544 // Tail call with 'Large' code model.
1545 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
1546 Uses = [R3], Defs = [R19, R20], Size = 24 in
1547 def PseudoTAIL_LARGE : Pseudo<(outs), (ins bare_symbol:$dst)>;
1549 let Predicates = [IsLA64] in {
1550 def : Pat<(loongarch_tail_large (iPTR tglobaladdr:$dst)),
1551 (PseudoTAIL_LARGE tglobaladdr:$dst)>;
1552 def : Pat<(loongarch_tail_large (iPTR texternalsym:$dst)),
1553 (PseudoTAIL_LARGE texternalsym:$dst)>;
1554 } // Predicates = [IsLA64]
1556 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [R3] in
1557 def PseudoTAILIndirect : Pseudo<(outs), (ins GPRT:$rj),
1558 [(loongarch_tail GPRT:$rj)]>,
1559 PseudoInstExpansion<(JIRL R0, GPR:$rj, 0)>;
1560 let Predicates = [IsLA64] in {
1561 def : Pat<(loongarch_tail_medium GPR:$rj), (PseudoTAILIndirect GPR:$rj)>;
1562 def : Pat<(loongarch_tail_large GPR:$rj), (PseudoTAILIndirect GPR:$rj)>;
1565 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
1566 hasSideEffects = 0, mayStore = 0, mayLoad = 0, Uses = [R3] in
1567 def PseudoB_TAIL : Pseudo<(outs), (ins simm26_b:$imm26)>,
1568 PseudoInstExpansion<(B simm26_b:$imm26)>;
1570 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
1571 hasSideEffects = 0, mayStore = 0, mayLoad = 0, Uses = [R3] in
1572 def PseudoJIRL_TAIL : Pseudo<(outs), (ins GPR:$rj, simm16_lsl2:$imm16)>,
1573 PseudoInstExpansion<(JIRL R0, GPR:$rj,
1574 simm16_lsl2:$imm16)>;
1576 /// call36/taill36 macro instructions
1577 let isCall = 1, isBarrier = 1, isCodeGenOnly = 0, isAsmParserOnly = 1,
1578 Defs = [R1], Size = 8, hasSideEffects = 0, mayStore = 0, mayLoad = 0 in
1579 def PseudoCALL36 : Pseudo<(outs), (ins bare_symbol:$dst), [],
1582 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [R3],
1583 isCodeGenOnly = 0, isAsmParserOnly = 1, Size = 8, hasSideEffects = 0,
1584 mayStore = 0, mayLoad = 0 in
1585 def PseudoTAIL36 : Pseudo<(outs), (ins GPR:$tmp, bare_symbol:$dst), [],
1586 "tail36", "$tmp, $dst">,
1589 // This is a special case of the ADD_W/D instruction used to facilitate the use
1590 // of a fourth operand to emit a relocation on a symbol relating to this
1591 // instruction. The relocation does not affect any bits of the instruction itself
1592 // but is used as a hint to the linker.
1593 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0 in {
1594 def PseudoAddTPRel_W : Pseudo<(outs GPR:$rd),
1595 (ins GPR:$rj, GPR:$rk, tprel_add_symbol:$sym), [],
1596 "add.w", "$rd, $rj, $rk, $sym">,
1598 def PseudoAddTPRel_D : Pseudo<(outs GPR:$rd),
1599 (ins GPR:$rj, GPR:$rk, tprel_add_symbol:$sym), [],
1600 "add.d", "$rd, $rj, $rk, $sym">,
1604 /// Load address (la*) macro instructions.
1606 // Define isCodeGenOnly = 0 to expose them to tablegened assembly parser.
1607 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0,
1608 isAsmParserOnly = 1 in {
1609 def PseudoLA_ABS : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1610 "la.abs", "$dst, $src">;
1611 def PseudoLA_ABS_LARGE : Pseudo<(outs GPR:$dst),
1612 (ins GPR:$tmp, bare_symbol:$src), [],
1613 "la.abs", "$dst, $src">;
1614 def PseudoLA_PCREL : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1615 "la.pcrel", "$dst, $src">;
1616 def PseudoLA_TLS_LD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1617 "la.tls.ld", "$dst, $src">;
1618 def PseudoLA_TLS_GD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1619 "la.tls.gd", "$dst, $src">;
1620 let Defs = [R20], Size = 20 in {
1621 def PseudoLA_PCREL_LARGE : Pseudo<(outs GPR:$dst),
1622 (ins GPR:$tmp, bare_symbol:$src), [],
1623 "la.pcrel", "$dst, $tmp, $src">,
1625 def PseudoLA_TLS_LE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1626 "la.tls.le", "$dst, $src">;
1627 def PseudoLA_TLS_LD_LARGE : Pseudo<(outs GPR:$dst),
1628 (ins GPR:$tmp, bare_symbol:$src), [],
1629 "la.tls.ld", "$dst, $tmp, $src">,
1631 def PseudoLA_TLS_GD_LARGE : Pseudo<(outs GPR:$dst),
1632 (ins GPR:$tmp, bare_symbol:$src), [],
1633 "la.tls.gd", "$dst, $tmp, $src">,
1635 } // Defs = [R20], Size = 20
1637 let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 0,
1638 isAsmParserOnly = 1 in {
1639 def PseudoLA_GOT : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1640 "la.got", "$dst, $src">;
1641 def PseudoLA_TLS_IE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1642 "la.tls.ie", "$dst, $src">;
1643 let Defs = [R20], Size = 20 in {
1644 def PseudoLA_GOT_LARGE : Pseudo<(outs GPR:$dst),
1645 (ins GPR:$tmp, bare_symbol:$src), [],
1646 "la.got", "$dst, $tmp, $src">,
1648 def PseudoLA_TLS_IE_LARGE : Pseudo<(outs GPR:$dst),
1649 (ins GPR:$tmp, bare_symbol:$src), [],
1650 "la.tls.ie", "$dst, $tmp, $src">,
1652 } // Defs = [R20], Size = 20
1655 // Used for expand PseudoLA_TLS_DESC_* instructions.
1656 let isCall = 1, isBarrier = 1, hasSideEffects = 0, mayStore = 0, mayLoad = 0,
1657 Defs = [R4], Uses = [R4] in
1658 def PseudoDESC_CALL : Pseudo<(outs GPR:$rd), (ins GPR:$rj, simm16_lsl2:$imm16)>,
1659 PseudoInstExpansion<(JIRL GPR:$rd, GPR:$rj,
1660 simm16_lsl2:$imm16)>;
1663 let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 0,
1664 isAsmParserOnly = 1, Defs = [R1] in {
1665 def PseudoLA_TLS_DESC_ABS : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src),
1666 [], "la.tls.desc", "$dst, $src">,
1667 Requires<[IsLA32, HasLaGlobalWithAbs]>;
1668 def PseudoLA_TLS_DESC_ABS_LARGE : Pseudo<(outs GPR:$dst),
1669 (ins GPR:$tmp, bare_symbol:$src), [],
1670 "la.tls.desc", "$dst, $src">,
1671 Requires<[IsLA64, HasLaGlobalWithAbs]>;
1672 def PseudoLA_TLS_DESC_PC : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1673 "la.tls.desc", "$dst, $src">;
1676 let isCall = 1, isBarrier = 1, hasSideEffects = 0, mayStore = 0, mayLoad = 0,
1677 isCodeGenOnly = 0, isAsmParserOnly = 1, Defs = [R1, R4, R20], Size = 32 in
1678 def PseudoLA_TLS_DESC_PC_LARGE : Pseudo<(outs GPR:$dst),
1679 (ins GPR:$tmp, bare_symbol:$src), [],
1680 "la.tls.desc", "$dst, $tmp, $src">,
1683 // Load address inst alias: "la", "la.global" and "la.local".
1685 // la = la.global = la.got
1686 // la.local = la.pcrel
1687 // With feature "+la-global-with-pcrel":
1688 // la = la.global = la.pcrel
1689 // With feature "+la-global-with-abs":
1690 // la = la.global = la.abs
1691 // With feature "+la-local-with-abs":
1692 // la.local = la.abs
1693 // With features "+la-global-with-pcrel,+la-global-with-abs"(disorder):
1694 // la = la.global = la.pcrel
1695 // Note: To keep consistent with gnu-as behavior, the "la" can only have one
1696 // register operand.
1697 def : InstAlias<"la $dst, $src", (PseudoLA_GOT GPR:$dst, bare_symbol:$src)>;
1698 def : InstAlias<"la.global $dst, $src",
1699 (PseudoLA_GOT GPR:$dst, bare_symbol:$src)>;
1700 def : InstAlias<"la.global $dst, $tmp, $src",
1701 (PseudoLA_GOT_LARGE GPR:$dst, GPR:$tmp, bare_symbol:$src)>;
1702 def : InstAlias<"la.local $dst, $src",
1703 (PseudoLA_PCREL GPR:$dst, bare_symbol:$src)>;
1704 def : InstAlias<"la.local $dst, $tmp, $src",
1705 (PseudoLA_PCREL_LARGE GPR:$dst, GPR:$tmp, bare_symbol:$src)>;
1707 // Note: Keep HasLaGlobalWithPcrel before HasLaGlobalWithAbs to ensure
1708 // "la-global-with-pcrel" takes effect when bose "la-global-with-pcrel" and
1709 // "la-global-with-abs" are enabled.
1710 let Predicates = [HasLaGlobalWithPcrel] in {
1711 def : InstAlias<"la $dst, $src", (PseudoLA_PCREL GPR:$dst, bare_symbol:$src)>;
1712 def : InstAlias<"la.global $dst, $src",
1713 (PseudoLA_PCREL GPR:$dst, bare_symbol:$src)>;
1714 def : InstAlias<"la.global $dst, $tmp, $src",
1715 (PseudoLA_PCREL_LARGE GPR:$dst, GPR:$tmp, bare_symbol:$src)>;
1716 } // Predicates = [HasLaGlobalWithPcrel]
1718 let Predicates = [HasLaGlobalWithAbs] in {
1719 def : InstAlias<"la $dst, $src", (PseudoLA_ABS GPR:$dst, bare_symbol:$src)>;
1720 def : InstAlias<"la.global $dst, $src",
1721 (PseudoLA_ABS GPR:$dst, bare_symbol:$src)>;
1722 def : InstAlias<"la.global $dst, $tmp, $src",
1723 (PseudoLA_ABS_LARGE GPR:$dst, GPR:$tmp, bare_symbol:$src)>;
1724 } // Predicates = [HasLaGlobalWithAbs]
1726 let Predicates = [HasLaLocalWithAbs] in {
1727 def : InstAlias<"la.local $dst, $src",
1728 (PseudoLA_ABS GPR:$dst, bare_symbol:$src)>;
1729 def : InstAlias<"la.local $dst, $tmp, $src",
1730 (PseudoLA_ABS_LARGE GPR:$dst, GPR:$tmp, bare_symbol:$src)>;
1731 } // Predicates = [HasLaLocalWithAbs]
1733 /// BSTRINS and BSTRPICK
1735 let Predicates = [IsLA32] in {
1736 def : Pat<(loongarch_bstrins GPR:$rd, GPR:$rj, uimm5:$msbd, uimm5:$lsbd),
1737 (BSTRINS_W GPR:$rd, GPR:$rj, uimm5:$msbd, uimm5:$lsbd)>;
1738 def : Pat<(loongarch_bstrpick GPR:$rj, uimm5:$msbd, uimm5:$lsbd),
1739 (BSTRPICK_W GPR:$rj, uimm5:$msbd, uimm5:$lsbd)>;
1740 } // Predicates = [IsLA32]
1742 let Predicates = [IsLA64] in {
1743 def : Pat<(loongarch_bstrins GPR:$rd, GPR:$rj, uimm6:$msbd, uimm6:$lsbd),
1744 (BSTRINS_D GPR:$rd, GPR:$rj, uimm6:$msbd, uimm6:$lsbd)>;
1745 def : Pat<(loongarch_bstrpick GPR:$rj, uimm6:$msbd, uimm6:$lsbd),
1746 (BSTRPICK_D GPR:$rj, uimm6:$msbd, uimm6:$lsbd)>;
1747 } // Predicates = [IsLA64]
1749 /// Byte-swapping and bit-reversal
1751 def : Pat<(loongarch_revb_2h GPR:$rj), (REVB_2H GPR:$rj)>;
1752 def : Pat<(loongarch_bitrev_4b GPR:$rj), (BITREV_4B GPR:$rj)>;
1754 let Predicates = [IsLA32] in {
1755 def : Pat<(bswap GPR:$rj), (ROTRI_W (REVB_2H GPR:$rj), 16)>;
1756 def : Pat<(bitreverse GPR:$rj), (BITREV_W GPR:$rj)>;
1757 def : Pat<(bswap (bitreverse GPR:$rj)), (BITREV_4B GPR:$rj)>;
1758 def : Pat<(bitreverse (bswap GPR:$rj)), (BITREV_4B GPR:$rj)>;
1759 } // Predicates = [IsLA32]
1761 let Predicates = [IsLA64] in {
1762 def : Pat<(loongarch_revb_2w GPR:$rj), (REVB_2W GPR:$rj)>;
1763 def : Pat<(bswap GPR:$rj), (REVB_D GPR:$rj)>;
1764 def : Pat<(loongarch_bitrev_w GPR:$rj), (BITREV_W GPR:$rj)>;
1765 def : Pat<(bitreverse GPR:$rj), (BITREV_D GPR:$rj)>;
1766 def : Pat<(bswap (bitreverse GPR:$rj)), (BITREV_8B GPR:$rj)>;
1767 def : Pat<(bitreverse (bswap GPR:$rj)), (BITREV_8B GPR:$rj)>;
1768 } // Predicates = [IsLA64]
1772 multiclass LdPat<PatFrag LoadOp, LAInst Inst, ValueType vt = GRLenVT> {
1773 def : Pat<(vt (LoadOp BaseAddr:$rj)), (Inst BaseAddr:$rj, 0)>;
1774 def : Pat<(vt (LoadOp (AddrConstant GPR:$rj, simm12:$imm12))),
1775 (Inst GPR:$rj, simm12:$imm12)>;
1776 def : Pat<(vt (LoadOp (AddLike BaseAddr:$rj, simm12:$imm12))),
1777 (Inst BaseAddr:$rj, simm12:$imm12)>;
1780 defm : LdPat<sextloadi8, LD_B>;
1781 defm : LdPat<extloadi8, LD_B>;
1782 defm : LdPat<sextloadi16, LD_H>;
1783 defm : LdPat<extloadi16, LD_H>;
1784 defm : LdPat<load, LD_W>, Requires<[IsLA32]>;
1785 defm : LdPat<zextloadi8, LD_BU>;
1786 defm : LdPat<zextloadi16, LD_HU>;
1787 let Predicates = [IsLA64] in {
1788 defm : LdPat<sextloadi32, LD_W, i64>;
1789 defm : LdPat<extloadi32, LD_W, i64>;
1790 defm : LdPat<zextloadi32, LD_WU, i64>;
1791 defm : LdPat<load, LD_D, i64>;
1792 } // Predicates = [IsLA64]
1794 // LA64 register-register-addressed loads
1795 let Predicates = [IsLA64] in {
1796 class RegRegLdPat<PatFrag LoadOp, LAInst Inst, ValueType vt>
1797 : Pat<(vt (LoadOp (add NonFIBaseAddr:$rj, GPR:$rk))),
1798 (Inst NonFIBaseAddr:$rj, GPR:$rk)>;
1800 def : RegRegLdPat<extloadi8, LDX_B, i64>;
1801 def : RegRegLdPat<sextloadi8, LDX_B, i64>;
1802 def : RegRegLdPat<zextloadi8, LDX_BU, i64>;
1803 def : RegRegLdPat<extloadi16, LDX_H, i64>;
1804 def : RegRegLdPat<sextloadi16, LDX_H, i64>;
1805 def : RegRegLdPat<zextloadi16, LDX_HU, i64>;
1806 def : RegRegLdPat<extloadi32, LDX_W, i64>;
1807 def : RegRegLdPat<sextloadi32, LDX_W, i64>;
1808 def : RegRegLdPat<zextloadi32, LDX_WU, i64>;
1809 def : RegRegLdPat<load, LDX_D, i64>;
1810 } // Predicates = [IsLA64]
1814 multiclass StPat<PatFrag StoreOp, LAInst Inst, RegisterClass StTy,
1816 def : Pat<(StoreOp (vt StTy:$rd), BaseAddr:$rj),
1817 (Inst StTy:$rd, BaseAddr:$rj, 0)>;
1818 def : Pat<(StoreOp (vt StTy:$rs2), (AddrConstant GPR:$rj, simm12:$imm12)),
1819 (Inst StTy:$rs2, GPR:$rj, simm12:$imm12)>;
1820 def : Pat<(StoreOp (vt StTy:$rd), (AddLike BaseAddr:$rj, simm12:$imm12)),
1821 (Inst StTy:$rd, BaseAddr:$rj, simm12:$imm12)>;
1824 defm : StPat<truncstorei8, ST_B, GPR, GRLenVT>;
1825 defm : StPat<truncstorei16, ST_H, GPR, GRLenVT>;
1826 defm : StPat<store, ST_W, GPR, i32>, Requires<[IsLA32]>;
1827 let Predicates = [IsLA64] in {
1828 defm : StPat<truncstorei32, ST_W, GPR, i64>;
1829 defm : StPat<store, ST_D, GPR, i64>;
1830 } // Predicates = [IsLA64]
1832 let Predicates = [IsLA64] in {
1833 def : Pat<(i64 (sextloadi32 (AddLike BaseAddr:$rj, simm14_lsl2:$imm14))),
1834 (LDPTR_W BaseAddr:$rj, simm14_lsl2:$imm14)>;
1835 def : Pat<(i64 (load (AddLike BaseAddr:$rj, simm14_lsl2:$imm14))),
1836 (LDPTR_D BaseAddr:$rj, simm14_lsl2:$imm14)>;
1837 def : Pat<(truncstorei32 (i64 GPR:$rd),
1838 (AddLike BaseAddr:$rj, simm14_lsl2:$imm14)),
1839 (STPTR_W GPR:$rd, BaseAddr:$rj, simm14_lsl2:$imm14)>;
1840 def : Pat<(store (i64 GPR:$rd), (AddLike BaseAddr:$rj, simm14_lsl2:$imm14)),
1841 (STPTR_D GPR:$rd, BaseAddr:$rj, simm14_lsl2:$imm14)>;
1842 } // Predicates = [IsLA64]
1844 // LA64 register-register-addressed stores
1845 let Predicates = [IsLA64] in {
1846 class RegRegStPat<PatFrag StoreOp, LAInst Inst, RegisterClass StTy,
1848 : Pat<(StoreOp (vt StTy:$rd), (add NonFIBaseAddr:$rj, GPR:$rk)),
1849 (Inst StTy:$rd, NonFIBaseAddr:$rj, GPR:$rk)>;
1851 def : RegRegStPat<truncstorei8, STX_B, GPR, i64>;
1852 def : RegRegStPat<truncstorei16, STX_H, GPR, i64>;
1853 def : RegRegStPat<truncstorei32, STX_W, GPR, i64>;
1854 def : RegRegStPat<store, STX_D, GPR, i64>;
1855 } // Predicates = [IsLA64]
1857 /// Atomic loads and stores
1859 // DBAR hint encoding for LA664 and later micro-architectures, paraphrased from
1860 // the Linux patch revealing it [1]:
1862 // - Bit 4: kind of constraint (0: completion, 1: ordering)
1863 // - Bit 3: barrier for previous read (0: true, 1: false)
1864 // - Bit 2: barrier for previous write (0: true, 1: false)
1865 // - Bit 1: barrier for succeeding read (0: true, 1: false)
1866 // - Bit 0: barrier for succeeding write (0: true, 1: false)
1868 // Hint 0x700: barrier for "read after read" from the same address, which is
1869 // e.g. needed by LL-SC loops on older models. (DBAR 0x700 behaves the same as
1870 // nop if such reordering is disabled on supporting newer models.)
1872 // [1]: https://lore.kernel.org/loongarch/20230516124536.535343-1-chenhuacai@loongson.cn/
1874 // Implementations without support for the finer-granularity hints simply treat
1875 // all as the full barrier (DBAR 0), so we can unconditionally start emiting the
1876 // more precise hints right away.
1878 def : Pat<(atomic_fence 4, timm), (DBAR 0b10100)>; // acquire
1879 def : Pat<(atomic_fence 5, timm), (DBAR 0b10010)>; // release
1880 def : Pat<(atomic_fence 6, timm), (DBAR 0b10000)>; // acqrel
1881 def : Pat<(atomic_fence 7, timm), (DBAR 0b10000)>; // seqcst
1883 defm : LdPat<atomic_load_8, LD_B>;
1884 defm : LdPat<atomic_load_16, LD_H>;
1885 defm : LdPat<atomic_load_32, LD_W>;
1887 class release_seqcst_store<PatFrag base>
1888 : PatFrag<(ops node:$val, node:$ptr), (base node:$val, node:$ptr), [{
1889 AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getSuccessOrdering();
1890 return isReleaseOrStronger(Ordering);
1893 class unordered_monotonic_store<PatFrag base>
1894 : PatFrag<(ops node:$val, node:$ptr), (base node:$val, node:$ptr), [{
1895 AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getSuccessOrdering();
1896 return !isReleaseOrStronger(Ordering);
1899 def atomic_store_release_seqcst_32 : release_seqcst_store<atomic_store_32>;
1900 def atomic_store_release_seqcst_64 : release_seqcst_store<atomic_store_64>;
1901 def atomic_store_unordered_monotonic_32
1902 : unordered_monotonic_store<atomic_store_32>;
1903 def atomic_store_unordered_monotonic_64
1904 : unordered_monotonic_store<atomic_store_64>;
1906 defm : StPat<atomic_store_8, ST_B, GPR, GRLenVT>;
1907 defm : StPat<atomic_store_16, ST_H, GPR, GRLenVT>;
1908 defm : StPat<atomic_store_unordered_monotonic_32, ST_W, GPR, i32>,
1911 def PseudoAtomicStoreW
1912 : Pseudo<(outs GPR:$dst), (ins GPR:$rk, GPR:$rj)>,
1913 PseudoInstExpansion<(AMSWAP__DB_W R0, GPR:$rk, GPRMemAtomic:$rj)>;
1915 def : Pat<(atomic_store_release_seqcst_32 GPR:$rj, GPR:$rk),
1916 (PseudoAtomicStoreW GPR:$rj, GPR:$rk)>;
1918 let Predicates = [IsLA64] in {
1919 def PseudoAtomicStoreD
1920 : Pseudo<(outs GPR:$dst), (ins GPR:$rk, GPR:$rj)>,
1921 PseudoInstExpansion<(AMSWAP__DB_D R0, GPR:$rk, GPRMemAtomic:$rj)>;
1923 def : Pat<(atomic_store_release_seqcst_64 GPR:$rj, GPR:$rk),
1924 (PseudoAtomicStoreD GPR:$rj, GPR:$rk)>;
1926 defm : LdPat<atomic_load_64, LD_D>;
1927 defm : StPat<atomic_store_unordered_monotonic_32, ST_W, GPR, i64>;
1928 defm : StPat<atomic_store_unordered_monotonic_64, ST_D, GPR, i64>;
1929 } // Predicates = [IsLA64]
1933 class PseudoMaskedAM
1934 : Pseudo<(outs GPR:$res, GPR:$scratch),
1935 (ins GPR:$addr, GPR:$incr, GPR:$mask, grlenimm:$ordering)> {
1936 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
1939 let hasSideEffects = 0;
1943 def PseudoMaskedAtomicSwap32 : PseudoMaskedAM;
1944 def PseudoMaskedAtomicLoadAdd32 : PseudoMaskedAM;
1945 def PseudoMaskedAtomicLoadSub32 : PseudoMaskedAM;
1946 def PseudoMaskedAtomicLoadNand32 : PseudoMaskedAM;
1948 class PseudoAM : Pseudo<(outs GPR:$res, GPR:$scratch),
1949 (ins GPR:$addr, GPR:$incr, grlenimm:$ordering)> {
1950 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
1953 let hasSideEffects = 0;
1957 def PseudoAtomicSwap32 : PseudoAM;
1958 def PseudoAtomicLoadNand32 : PseudoAM;
1959 def PseudoAtomicLoadNand64 : PseudoAM;
1960 def PseudoAtomicLoadAdd32 : PseudoAM;
1961 def PseudoAtomicLoadSub32 : PseudoAM;
1962 def PseudoAtomicLoadAnd32 : PseudoAM;
1963 def PseudoAtomicLoadOr32 : PseudoAM;
1964 def PseudoAtomicLoadXor32 : PseudoAM;
1966 multiclass PseudoBinPat<string Op, Pseudo BinInst> {
1967 def : Pat<(!cast<PatFrag>(Op#"_monotonic") GPR:$addr, GPR:$incr),
1968 (BinInst GPR:$addr, GPR:$incr, 2)>;
1969 def : Pat<(!cast<PatFrag>(Op#"_acquire") GPR:$addr, GPR:$incr),
1970 (BinInst GPR:$addr, GPR:$incr, 4)>;
1971 def : Pat<(!cast<PatFrag>(Op#"_release") GPR:$addr, GPR:$incr),
1972 (BinInst GPR:$addr, GPR:$incr, 5)>;
1973 def : Pat<(!cast<PatFrag>(Op#"_acq_rel") GPR:$addr, GPR:$incr),
1974 (BinInst GPR:$addr, GPR:$incr, 6)>;
1975 def : Pat<(!cast<PatFrag>(Op#"_seq_cst") GPR:$addr, GPR:$incr),
1976 (BinInst GPR:$addr, GPR:$incr, 7)>;
1979 class PseudoMaskedAMUMinUMax
1980 : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
1981 (ins GPR:$addr, GPR:$incr, GPR:$mask, grlenimm:$ordering)> {
1982 let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
1983 "@earlyclobber $scratch2";
1986 let hasSideEffects = 0;
1990 def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMUMinUMax;
1991 def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMUMinUMax;
1993 class PseudoMaskedAMMinMax
1994 : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
1995 (ins GPR:$addr, GPR:$incr, GPR:$mask, grlenimm:$sextshamt,
1996 grlenimm:$ordering)> {
1997 let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
1998 "@earlyclobber $scratch2";
2001 let hasSideEffects = 0;
2005 def PseudoMaskedAtomicLoadMax32 : PseudoMaskedAMMinMax;
2006 def PseudoMaskedAtomicLoadMin32 : PseudoMaskedAMMinMax;
2008 /// Compare and exchange
2011 : Pseudo<(outs GPR:$res, GPR:$scratch),
2012 (ins GPR:$addr, GPR:$cmpval, GPR:$newval, grlenimm:$fail_order)> {
2013 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
2016 let hasSideEffects = 0;
2020 def PseudoCmpXchg32 : PseudoCmpXchg;
2021 def PseudoCmpXchg64 : PseudoCmpXchg;
2023 def PseudoMaskedCmpXchg32
2024 : Pseudo<(outs GPR:$res, GPR:$scratch),
2025 (ins GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask,
2026 grlenimm:$fail_order)> {
2027 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
2030 let hasSideEffects = 0;
2034 class PseudoMaskedAMMinMaxPat<Intrinsic intrin, Pseudo AMInst>
2035 : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
2037 (AMInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
2040 class AtomicPat<Intrinsic intrin, Pseudo AMInst>
2041 : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering),
2042 (AMInst GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering)>;
2044 // These atomic cmpxchg PatFrags only care about the failure ordering.
2045 // The PatFrags defined by multiclass `ternary_atomic_op_ord` in
2046 // TargetSelectionDAG.td care about the merged memory ordering that is the
2047 // stronger one between success and failure. But for LoongArch LL-SC we only
2048 // need to care about the failure ordering as explained in PR #67391. So we
2049 // define these PatFrags that will be used to define cmpxchg pats below.
2050 multiclass ternary_atomic_op_failure_ord {
2051 def NAME#_failure_monotonic : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
2052 (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
2053 AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getFailureOrdering();
2054 return Ordering == AtomicOrdering::Monotonic;
2056 def NAME#_failure_acquire : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
2057 (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
2058 AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getFailureOrdering();
2059 return Ordering == AtomicOrdering::Acquire;
2061 def NAME#_failure_release : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
2062 (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
2063 AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getFailureOrdering();
2064 return Ordering == AtomicOrdering::Release;
2066 def NAME#_failure_acq_rel : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
2067 (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
2068 AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getFailureOrdering();
2069 return Ordering == AtomicOrdering::AcquireRelease;
2071 def NAME#_failure_seq_cst : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
2072 (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val), [{
2073 AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getFailureOrdering();
2074 return Ordering == AtomicOrdering::SequentiallyConsistent;
2078 defm atomic_cmp_swap_i32 : ternary_atomic_op_failure_ord;
2079 defm atomic_cmp_swap_i64 : ternary_atomic_op_failure_ord;
2081 let Predicates = [IsLA64] in {
2082 def : AtomicPat<int_loongarch_masked_atomicrmw_xchg_i64,
2083 PseudoMaskedAtomicSwap32>;
2084 def : Pat<(atomic_swap_i32 GPR:$addr, GPR:$incr),
2085 (AMSWAP__DB_W GPR:$incr, GPR:$addr)>;
2086 def : Pat<(atomic_swap_i64 GPR:$addr, GPR:$incr),
2087 (AMSWAP__DB_D GPR:$incr, GPR:$addr)>;
2088 def : Pat<(atomic_load_add_i64 GPR:$rj, GPR:$rk),
2089 (AMADD__DB_D GPR:$rk, GPR:$rj)>;
2090 def : AtomicPat<int_loongarch_masked_atomicrmw_add_i64,
2091 PseudoMaskedAtomicLoadAdd32>;
2092 def : Pat<(atomic_load_sub_i32 GPR:$rj, GPR:$rk),
2093 (AMADD__DB_W (SUB_W R0, GPR:$rk), GPR:$rj)>;
2094 def : Pat<(atomic_load_sub_i64 GPR:$rj, GPR:$rk),
2095 (AMADD__DB_D (SUB_D R0, GPR:$rk), GPR:$rj)>;
2096 def : AtomicPat<int_loongarch_masked_atomicrmw_sub_i64,
2097 PseudoMaskedAtomicLoadSub32>;
2098 defm : PseudoBinPat<"atomic_load_nand_i64", PseudoAtomicLoadNand64>;
2099 def : AtomicPat<int_loongarch_masked_atomicrmw_nand_i64,
2100 PseudoMaskedAtomicLoadNand32>;
2101 def : Pat<(atomic_load_add_i32 GPR:$rj, GPR:$rk),
2102 (AMADD__DB_W GPR:$rk, GPR:$rj)>;
2103 def : Pat<(atomic_load_and_i32 GPR:$rj, GPR:$rk),
2104 (AMAND__DB_W GPR:$rk, GPR:$rj)>;
2105 def : Pat<(atomic_load_and_i64 GPR:$rj, GPR:$rk),
2106 (AMAND__DB_D GPR:$rk, GPR:$rj)>;
2107 def : Pat<(atomic_load_or_i32 GPR:$rj, GPR:$rk),
2108 (AMOR__DB_W GPR:$rk, GPR:$rj)>;
2109 def : Pat<(atomic_load_or_i64 GPR:$rj, GPR:$rk),
2110 (AMOR__DB_D GPR:$rk, GPR:$rj)>;
2111 def : Pat<(atomic_load_xor_i32 GPR:$rj, GPR:$rk),
2112 (AMXOR__DB_W GPR:$rk, GPR:$rj)>;
2113 def : Pat<(atomic_load_xor_i64 GPR:$rj, GPR:$rk),
2114 (AMXOR__DB_D GPR:$rk, GPR:$rj)>;
2116 def : Pat<(atomic_load_umin_i32 GPR:$rj, GPR:$rk),
2117 (AMMIN__DB_WU GPR:$rk, GPR:$rj)>;
2118 def : Pat<(atomic_load_umin_i64 GPR:$rj, GPR:$rk),
2119 (AMMIN__DB_DU GPR:$rk, GPR:$rj)>;
2120 def : Pat<(atomic_load_umax_i32 GPR:$rj, GPR:$rk),
2121 (AMMAX__DB_WU GPR:$rk, GPR:$rj)>;
2122 def : Pat<(atomic_load_umax_i64 GPR:$rj, GPR:$rk),
2123 (AMMAX__DB_DU GPR:$rk, GPR:$rj)>;
2125 def : Pat<(atomic_load_min_i32 GPR:$rj, GPR:$rk),
2126 (AMMIN__DB_W GPR:$rk, GPR:$rj)>;
2127 def : Pat<(atomic_load_min_i64 GPR:$rj, GPR:$rk),
2128 (AMMIN__DB_D GPR:$rk, GPR:$rj)>;
2129 def : Pat<(atomic_load_max_i32 GPR:$rj, GPR:$rk),
2130 (AMMAX__DB_W GPR:$rk, GPR:$rj)>;
2131 def : Pat<(atomic_load_max_i64 GPR:$rj, GPR:$rk),
2132 (AMMAX__DB_D GPR:$rk, GPR:$rj)>;
2134 def : AtomicPat<int_loongarch_masked_atomicrmw_umax_i64,
2135 PseudoMaskedAtomicLoadUMax32>;
2136 def : AtomicPat<int_loongarch_masked_atomicrmw_umin_i64,
2137 PseudoMaskedAtomicLoadUMin32>;
2139 // Ordering constants must be kept in sync with the AtomicOrdering enum in
2140 // AtomicOrdering.h.
2141 multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst,
2142 ValueType vt = GRLenVT> {
2143 def : Pat<(vt (!cast<PatFrag>(Op#"_failure_monotonic") GPR:$addr, GPR:$cmp, GPR:$new)),
2144 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>;
2145 def : Pat<(vt (!cast<PatFrag>(Op#"_failure_acquire") GPR:$addr, GPR:$cmp, GPR:$new)),
2146 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>;
2147 def : Pat<(vt (!cast<PatFrag>(Op#"_failure_release") GPR:$addr, GPR:$cmp, GPR:$new)),
2148 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>;
2149 def : Pat<(vt (!cast<PatFrag>(Op#"_failure_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new)),
2150 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>;
2151 def : Pat<(vt (!cast<PatFrag>(Op#"_failure_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new)),
2152 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>;
2155 defm : PseudoCmpXchgPat<"atomic_cmp_swap_i32", PseudoCmpXchg32>;
2156 defm : PseudoCmpXchgPat<"atomic_cmp_swap_i64", PseudoCmpXchg64, i64>;
2157 def : Pat<(int_loongarch_masked_cmpxchg_i64
2158 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$fail_order),
2159 (PseudoMaskedCmpXchg32
2160 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$fail_order)>;
2162 def : PseudoMaskedAMMinMaxPat<int_loongarch_masked_atomicrmw_max_i64,
2163 PseudoMaskedAtomicLoadMax32>;
2164 def : PseudoMaskedAMMinMaxPat<int_loongarch_masked_atomicrmw_min_i64,
2165 PseudoMaskedAtomicLoadMin32>;
2166 } // Predicates = [IsLA64]
2168 defm : PseudoBinPat<"atomic_load_nand_i32", PseudoAtomicLoadNand32>;
2170 let Predicates = [IsLA32] in {
2171 def : AtomicPat<int_loongarch_masked_atomicrmw_xchg_i32,
2172 PseudoMaskedAtomicSwap32>;
2173 defm : PseudoBinPat<"atomic_swap_i32", PseudoAtomicSwap32>;
2174 def : AtomicPat<int_loongarch_masked_atomicrmw_add_i32,
2175 PseudoMaskedAtomicLoadAdd32>;
2176 def : AtomicPat<int_loongarch_masked_atomicrmw_sub_i32,
2177 PseudoMaskedAtomicLoadSub32>;
2178 def : AtomicPat<int_loongarch_masked_atomicrmw_nand_i32,
2179 PseudoMaskedAtomicLoadNand32>;
2180 defm : PseudoBinPat<"atomic_load_add_i32", PseudoAtomicLoadAdd32>;
2181 defm : PseudoBinPat<"atomic_load_sub_i32", PseudoAtomicLoadSub32>;
2182 defm : PseudoBinPat<"atomic_load_and_i32", PseudoAtomicLoadAnd32>;
2183 defm : PseudoBinPat<"atomic_load_or_i32", PseudoAtomicLoadOr32>;
2184 defm : PseudoBinPat<"atomic_load_xor_i32", PseudoAtomicLoadXor32>;
2185 } // Predicates = [IsLA32]
2189 def : Pat<(int_loongarch_cacop_d timm:$op, i64:$rj, timm:$imm12),
2190 (CACOP timm:$op, GPR:$rj, timm:$imm12)>;
2191 def : Pat<(int_loongarch_cacop_w i32:$op, i32:$rj, i32:$imm12),
2192 (CACOP timm:$op, GPR:$rj, timm:$imm12)>;
2193 def : Pat<(loongarch_dbar uimm15:$imm15), (DBAR uimm15:$imm15)>;
2194 def : Pat<(loongarch_ibar uimm15:$imm15), (IBAR uimm15:$imm15)>;
2195 def : Pat<(loongarch_break uimm15:$imm15), (BREAK uimm15:$imm15)>;
2196 def : Pat<(loongarch_syscall uimm15:$imm15), (SYSCALL uimm15:$imm15)>;
2198 let Predicates = [IsLA64] in {
2199 // CRC Check Instructions
2200 def : PatGprGpr<loongarch_crc_w_b_w, CRC_W_B_W>;
2201 def : PatGprGpr<loongarch_crc_w_h_w, CRC_W_H_W>;
2202 def : PatGprGpr<loongarch_crc_w_w_w, CRC_W_W_W>;
2203 def : PatGprGpr<loongarch_crc_w_d_w, CRC_W_D_W>;
2204 def : PatGprGpr<loongarch_crcc_w_b_w, CRCC_W_B_W>;
2205 def : PatGprGpr<loongarch_crcc_w_h_w, CRCC_W_H_W>;
2206 def : PatGprGpr<loongarch_crcc_w_w_w, CRCC_W_W_W>;
2207 def : PatGprGpr<loongarch_crcc_w_d_w, CRCC_W_D_W>;
2208 } // Predicates = [IsLA64]
2210 /// Other pseudo-instructions
2212 // Pessimistically assume the stack pointer will be clobbered
2213 let Defs = [R3], Uses = [R3] in {
2214 def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
2215 [(callseq_start timm:$amt1, timm:$amt2)]>;
2216 def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
2217 [(callseq_end timm:$amt1, timm:$amt2)]>;
2218 } // Defs = [R3], Uses = [R3]
2220 //===----------------------------------------------------------------------===//
2221 // Assembler Pseudo Instructions
2222 //===----------------------------------------------------------------------===//
2224 def : InstAlias<"nop", (ANDI R0, R0, 0)>;
2225 def : InstAlias<"move $dst, $src", (OR GPR:$dst, GPR:$src, R0)>;
2226 // `ret` is supported since binutils commit 20f2e2686c79a5ac (version 2.40 and
2228 def : InstAlias<"ret", (JIRL R0, R1, 0)>;
2229 def : InstAlias<"jr $rj", (JIRL R0, GPR:$rj, 0)>;
2231 // Branches implemented with alias.
2232 // Always output the canonical mnemonic for the pseudo branch instructions.
2233 // The GNU tools emit the canonical mnemonic for the branch pseudo instructions
2234 // as well (e.g. "bgt" will be recognised by the assembler but never printed by
2235 // objdump). Match this behaviour by setting a zero weight.
2236 def : InstAlias<"bgt $rj, $rd, $imm16",
2237 (BLT GPR:$rd, GPR:$rj, simm16_lsl2_br:$imm16), 0>;
2238 def : InstAlias<"bgtu $rj, $rd, $imm16",
2239 (BLTU GPR:$rd, GPR:$rj, simm16_lsl2_br:$imm16), 0>;
2240 def : InstAlias<"ble $rj, $rd, $imm16",
2241 (BGE GPR:$rd, GPR:$rj, simm16_lsl2_br:$imm16), 0>;
2242 def : InstAlias<"bleu $rj, $rd, $imm16",
2243 (BGEU GPR:$rd, GPR:$rj, simm16_lsl2_br:$imm16), 0>;
2244 def : InstAlias<"bltz $rd, $imm16",
2245 (BLT GPR:$rd, R0, simm16_lsl2_br:$imm16), 0>;
2246 def : InstAlias<"bgtz $rj, $imm16",
2247 (BLT R0, GPR:$rj, simm16_lsl2_br:$imm16), 0>;
2248 def : InstAlias<"blez $rj, $imm16",
2249 (BGE R0, GPR:$rj, simm16_lsl2_br:$imm16), 0>;
2250 def : InstAlias<"bgez $rd, $imm16",
2251 (BGE GPR:$rd, R0, simm16_lsl2_br:$imm16), 0>;
2254 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0,
2255 isAsmParserOnly = 1 in {
2256 def PseudoLI_W : Pseudo<(outs GPR:$rd), (ins imm32:$imm), [],
2257 "li.w", "$rd, $imm">;
2258 def PseudoLI_D : Pseudo<(outs GPR:$rd), (ins imm64:$imm), [],
2259 "li.d", "$rd, $imm">, Requires<[IsLA64]>;
2262 //===----------------------------------------------------------------------===//
2263 // Basic Floating-Point Instructions
2264 //===----------------------------------------------------------------------===//
2266 include "LoongArchFloat32InstrInfo.td"
2267 include "LoongArchFloat64InstrInfo.td"
2269 let Predicates = [HasBasicF], usesCustomInserter = 1 in {
2270 def WRFCSR : Pseudo<(outs), (ins uimm2:$fcsr, GPR:$src),
2271 [(loongarch_movgr2fcsr uimm2:$fcsr, GRLenVT:$src)]>;
2272 def RDFCSR : Pseudo<(outs GPR:$rd), (ins uimm2:$fcsr),
2273 [(set GPR:$rd, (loongarch_movfcsr2gr uimm2:$fcsr))]>;
2276 //===----------------------------------------------------------------------===//
2277 // Privilege Instructions
2278 //===----------------------------------------------------------------------===//
2280 // CSR Access Instructions
2281 let hasSideEffects = 1 in
2282 def CSRRD : FmtCSR<0x04000000, (outs GPR:$rd), (ins uimm14:$csr_num),
2284 let hasSideEffects = 1, Constraints = "$rd = $dst" in {
2285 def CSRWR : FmtCSR<0x04000020, (outs GPR:$dst),
2286 (ins GPR:$rd, uimm14:$csr_num), "$rd, $csr_num">;
2287 def CSRXCHG : FmtCSRXCHG<0x04000000, (outs GPR:$dst),
2288 (ins GPR:$rd, GPR:$rj, uimm14:$csr_num),
2289 "$rd, $rj, $csr_num">;
2290 } // hasSideEffects = 1, Constraints = "$rd = $dst"
2292 // IOCSR Access Instructions
2293 def IOCSRRD_B : IOCSRRD<0x06480000>;
2294 def IOCSRRD_H : IOCSRRD<0x06480400>;
2295 def IOCSRRD_W : IOCSRRD<0x06480800>;
2296 def IOCSRWR_B : IOCSRWR<0x06481000>;
2297 def IOCSRWR_H : IOCSRWR<0x06481400>;
2298 def IOCSRWR_W : IOCSRWR<0x06481800>;
2299 let Predicates = [IsLA64] in {
2300 def IOCSRRD_D : IOCSRRD<0x06480c00>;
2301 def IOCSRWR_D : IOCSRWR<0x06481c00>;
2302 } // Predicates = [IsLA64]
2304 // TLB Maintenance Instructions
2305 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
2306 def TLBSRCH : FmtI32<0x06482800>;
2307 def TLBRD : FmtI32<0x06482c00>;
2308 def TLBWR : FmtI32<0x06483000>;
2309 def TLBFILL : FmtI32<0x06483400>;
2310 def TLBCLR : FmtI32<0x06482000>;
2311 def TLBFLUSH : FmtI32<0x06482400>;
2312 def INVTLB : FmtINVTLB<(outs), (ins GPR:$rk, GPR:$rj, uimm5:$op),
2314 } // hasSideEffects = 1, mayLoad = 0, mayStore = 0
2316 // Software Page Walking Instructions
2317 def LDDIR : Fmt2RI8<0x06400000, (outs GPR:$rd),
2318 (ins GPR:$rj, uimm8:$imm8), "$rd, $rj, $imm8">;
2319 def LDPTE : FmtLDPTE<(outs), (ins GPR:$rj, uimm8:$seq), "$rj, $seq">;
2322 // Other Miscellaneous Instructions
2323 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
2324 def ERTN : FmtI32<0x06483800>;
2325 def DBCL : MISC_I15<0x002a8000>;
2326 def IDLE : MISC_I15<0x06488000>;
2328 //===----------------------------------------------------------------------===//
2329 // Privilege Intrinsics
2330 //===----------------------------------------------------------------------===//
2332 def : Pat<(loongarch_csrrd uimm14:$imm14), (CSRRD uimm14:$imm14)>;
2333 def : Pat<(loongarch_csrwr GPR:$rd, uimm14:$imm14),
2334 (CSRWR GPR:$rd, uimm14:$imm14)>;
2335 def : Pat<(loongarch_csrxchg GPR:$rd, GPR:$rj, uimm14:$imm14),
2336 (CSRXCHG GPR:$rd, GPR:$rj, uimm14:$imm14)>;
2338 def : Pat<(loongarch_iocsrrd_b GPR:$rj), (IOCSRRD_B GPR:$rj)>;
2339 def : Pat<(loongarch_iocsrrd_h GPR:$rj), (IOCSRRD_H GPR:$rj)>;
2340 def : Pat<(loongarch_iocsrrd_w GPR:$rj), (IOCSRRD_W GPR:$rj)>;
2342 def : Pat<(loongarch_iocsrwr_b GPR:$rd, GPR:$rj), (IOCSRWR_B GPR:$rd, GPR:$rj)>;
2343 def : Pat<(loongarch_iocsrwr_h GPR:$rd, GPR:$rj), (IOCSRWR_H GPR:$rd, GPR:$rj)>;
2344 def : Pat<(loongarch_iocsrwr_w GPR:$rd, GPR:$rj), (IOCSRWR_W GPR:$rd, GPR:$rj)>;
2346 def : Pat<(loongarch_cpucfg GPR:$rj), (CPUCFG GPR:$rj)>;
2348 let Predicates = [IsLA64] in {
2349 def : Pat<(loongarch_iocsrrd_d GPR:$rj), (IOCSRRD_D GPR:$rj)>;
2350 def : Pat<(loongarch_iocsrwr_d GPR:$rd, GPR:$rj), (IOCSRWR_D GPR:$rd, GPR:$rj)>;
2351 def : Pat<(int_loongarch_asrtle_d GPR:$rj, GPR:$rk),
2352 (ASRTLE_D GPR:$rj, GPR:$rk)>;
2353 def : Pat<(int_loongarch_asrtgt_d GPR:$rj, GPR:$rk),
2354 (ASRTGT_D GPR:$rj, GPR:$rk)>;
2355 def : Pat<(int_loongarch_lddir_d GPR:$rj, timm:$imm8),
2356 (LDDIR GPR:$rj, timm:$imm8)>;
2357 def : Pat<(int_loongarch_ldpte_d GPR:$rj, timm:$imm8),
2358 (LDPTE GPR:$rj, timm:$imm8)>;
2359 } // Predicates = [IsLA64]
2361 //===----------------------------------------------------------------------===//
2363 //===----------------------------------------------------------------------===//
2364 include "LoongArchLSXInstrInfo.td"
2366 //===----------------------------------------------------------------------===//
2367 // LASX Instructions
2368 //===----------------------------------------------------------------------===//
2369 include "LoongArchLASXInstrInfo.td"
2371 //===----------------------------------------------------------------------===//
2373 //===----------------------------------------------------------------------===//
2374 include "LoongArchLVZInstrInfo.td"
2376 //===----------------------------------------------------------------------===//
2378 //===----------------------------------------------------------------------===//
2379 include "LoongArchLBTInstrInfo.td"