1 //===-- AVRInstrInfo.td - AVR Instruction defs -------------*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the AVR instructions in TableGen format.
11 //===----------------------------------------------------------------------===//
13 include "AVRInstrFormats.td"
15 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
19 def SDT_AVRCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i16>, SDTCisVT<1, i16>]>;
20 def SDT_AVRCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i16>, SDTCisVT<1, i16>]>;
21 def SDT_AVRCall : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
22 def SDT_AVRWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
24 : SDTypeProfile<0, 2, [SDTCisVT<0, OtherVT>, SDTCisVT<1, i8>]>;
25 def SDT_AVRCmp : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>;
26 def SDT_AVRTst : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
29 [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
31 //===----------------------------------------------------------------------===//
32 // AVR Specific Node Definitions
33 //===----------------------------------------------------------------------===//
35 def AVRretflag : SDNode<"AVRISD::RET_FLAG", SDTNone,
36 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
37 def AVRretiflag : SDNode<"AVRISD::RETI_FLAG", SDTNone,
38 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
40 def AVRcallseq_start : SDNode<"ISD::CALLSEQ_START", SDT_AVRCallSeqStart,
41 [SDNPHasChain, SDNPOutGlue]>;
42 def AVRcallseq_end : SDNode<"ISD::CALLSEQ_END", SDT_AVRCallSeqEnd,
43 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
45 def AVRcall : SDNode<"AVRISD::CALL", SDT_AVRCall,
46 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, SDNPVariadic]>;
48 def AVRWrapper : SDNode<"AVRISD::WRAPPER", SDT_AVRWrapper>;
51 : SDNode<"AVRISD::BRCOND", SDT_AVRBrcond, [SDNPHasChain, SDNPInGlue]>;
52 def AVRcmp : SDNode<"AVRISD::CMP", SDT_AVRCmp, [SDNPOutGlue]>;
53 def AVRcmpc : SDNode<"AVRISD::CMPC", SDT_AVRCmp, [SDNPInGlue, SDNPOutGlue]>;
54 def AVRtst : SDNode<"AVRISD::TST", SDT_AVRTst, [SDNPOutGlue]>;
55 def AVRselectcc : SDNode<"AVRISD::SELECT_CC", SDT_AVRSelectCC, [SDNPInGlue]>;
58 def AVRlsl : SDNode<"AVRISD::LSL", SDTIntUnaryOp>;
59 def AVRlsr : SDNode<"AVRISD::LSR", SDTIntUnaryOp>;
60 def AVRrol : SDNode<"AVRISD::ROL", SDTIntUnaryOp>;
61 def AVRror : SDNode<"AVRISD::ROR", SDTIntUnaryOp>;
62 def AVRasr : SDNode<"AVRISD::ASR", SDTIntUnaryOp>;
63 def AVRlslhi : SDNode<"AVRISD::LSLHI", SDTIntUnaryOp>;
64 def AVRlsrlo : SDNode<"AVRISD::LSRLO", SDTIntUnaryOp>;
65 def AVRasrlo : SDNode<"AVRISD::ASRLO", SDTIntUnaryOp>;
66 def AVRlslbn : SDNode<"AVRISD::LSLBN", SDTIntBinOp>;
67 def AVRlsrbn : SDNode<"AVRISD::LSRBN", SDTIntBinOp>;
68 def AVRasrbn : SDNode<"AVRISD::ASRBN", SDTIntBinOp>;
69 def AVRlslwn : SDNode<"AVRISD::LSLWN", SDTIntBinOp>;
70 def AVRlsrwn : SDNode<"AVRISD::LSRWN", SDTIntBinOp>;
71 def AVRasrwn : SDNode<"AVRISD::ASRWN", SDTIntBinOp>;
73 // Pseudo shift nodes for non-constant shift amounts.
74 def AVRlslLoop : SDNode<"AVRISD::LSLLOOP", SDTIntShiftOp>;
75 def AVRlsrLoop : SDNode<"AVRISD::LSRLOOP", SDTIntShiftOp>;
76 def AVRrolLoop : SDNode<"AVRISD::ROLLOOP", SDTIntShiftOp>;
77 def AVRrorLoop : SDNode<"AVRISD::RORLOOP", SDTIntShiftOp>;
78 def AVRasrLoop : SDNode<"AVRISD::ASRLOOP", SDTIntShiftOp>;
81 def AVRSwap : SDNode<"AVRISD::SWAP", SDTIntUnaryOp>;
83 //===----------------------------------------------------------------------===//
84 // AVR Operands, Complex Patterns and Transformations Definitions.
85 //===----------------------------------------------------------------------===//
87 def imm8_neg_XFORM : SDNodeXForm<imm, [{
88 return CurDAG->getTargetConstant(
89 -N->getAPIntValue(), SDLoc(N), MVT::i8);
94 return CurDAG->getTargetConstant(-N->getAPIntValue(),
98 def imm0_63_neg : PatLeaf<(imm), [{
99 int64_t val = -N->getSExtValue();
100 return val >= 0 && val < 64;
104 def uimm6 : PatLeaf<(imm), [{ return isUInt<6>(N->getZExtValue()); }]>;
106 // imm_com8_XFORM - Return the complement of a imm_com8 value
108 : SDNodeXForm<imm, [{
109 return CurDAG->getTargetConstant(
110 ~((uint8_t) N->getZExtValue()), SDLoc(N), MVT::i8);
113 // imm_com8 - Match an immediate that is a complement
114 // of a 8-bit immediate.
115 // Note: this pattern doesn't require an encoder method and such, as it's
116 // only used on aliases (Pat<> and InstAlias<>). The actual encoding
117 // is handled by the destination instructions, which use imm_com8.
118 def imm_com8_asmoperand : AsmOperandClass { let Name = "ImmCom8"; }
119 def imm_com8 : Operand<i8> { let ParserMatchClass = imm_com8_asmoperand; }
122 : SDNodeXForm<imm, [{
123 uint8_t offset = Subtarget->getIORegisterOffset();
124 return CurDAG->getTargetConstant(
125 uint8_t(N->getZExtValue()) - offset, SDLoc(N), MVT::i8);
129 : SDNodeXForm<imm, [{
130 return CurDAG->getTargetConstant(
131 Log2_32(uint8_t(N->getZExtValue())), SDLoc(N), MVT::i8);
134 def iobitposn8_XFORM : SDNodeXForm<imm, [{
135 return CurDAG->getTargetConstant(
136 Log2_32(uint8_t(~N->getZExtValue())),
140 def ioaddr8 : PatLeaf<(imm), [{
141 uint8_t offset = Subtarget->getIORegisterOffset();
142 uint64_t val = N->getZExtValue() - offset;
147 def lowioaddr8 : PatLeaf<(imm), [{
148 uint8_t offset = Subtarget->getIORegisterOffset();
149 uint64_t val = N->getZExtValue() - offset;
154 def ioaddr16 : PatLeaf<(imm), [{
155 uint8_t offset = Subtarget->getIORegisterOffset();
156 uint64_t val = N->getZExtValue() - offset;
162 : PatLeaf<(imm), [{ return isPowerOf2_32(uint8_t(N->getZExtValue())); }],
166 : PatLeaf<(imm), [{ return isPowerOf2_32(uint8_t(~N->getZExtValue())); }],
169 def MemriAsmOperand : AsmOperandClass {
171 let ParserMethod = "parseMemriOperand";
174 /// Address operand for `reg+imm` used by STD and LDD.
175 def memri : Operand<iPTR> {
176 let MIOperandInfo = (ops PTRDISPREGS, i16imm);
178 let PrintMethod = "printMemri";
179 let EncoderMethod = "encodeMemri";
180 let DecoderMethod = "decodeMemri";
182 let ParserMatchClass = MemriAsmOperand;
185 // Address operand for `SP+imm` used by STD{W}SPQRr
186 def memspi : Operand<iPTR> {
187 let MIOperandInfo = (ops GPRSP, i16imm);
188 let PrintMethod = "printMemspi";
191 def relbrtarget_7 : Operand<OtherVT> {
192 let PrintMethod = "printPCRelImm";
193 let EncoderMethod = "encodeRelCondBrTarget<AVR::fixup_7_pcrel>";
196 def brtarget_13 : Operand<OtherVT> {
197 let PrintMethod = "printPCRelImm";
198 let EncoderMethod = "encodeRelCondBrTarget<AVR::fixup_13_pcrel>";
201 def rcalltarget_13 : Operand<i16> {
202 let PrintMethod = "printPCRelImm";
203 let EncoderMethod = "encodeRelCondBrTarget<AVR::fixup_13_pcrel>";
206 // The target of a 22 or 16-bit call/jmp instruction.
207 def call_target : Operand<iPTR> {
208 let EncoderMethod = "encodeCallTarget";
209 let DecoderMethod = "decodeCallTarget";
212 // A 16-bit address (which can lead to an R_AVR_16 relocation).
213 def imm16 : Operand<i16> { let EncoderMethod = "encodeImm<AVR::fixup_16, 2>"; }
215 // A 7-bit address (which can lead to an R_AVR_LDS_STS_16 relocation).
216 def imm7tiny : Operand<i8> {
217 let EncoderMethod = "encodeImm<AVR::fixup_lds_sts_16, 0>";
220 /// A 6-bit immediate used in the ADIW/SBIW instructions.
221 def imm_arith6 : Operand<i16> {
222 let EncoderMethod = "encodeImm<AVR::fixup_6_adiw, 0>";
225 /// An 8-bit immediate inside an instruction with the same format
226 /// as the `LDI` instruction (the `FRdK` format).
227 def imm_ldi8 : Operand<i8> {
228 let EncoderMethod = "encodeImm<AVR::fixup_ldi, 0>";
231 /// A 5-bit port number used in SBIC and friends (the `FIOBIT` format).
232 def imm_port5 : Operand<i8> {
233 let EncoderMethod = "encodeImm<AVR::fixup_port5, 0>";
236 /// A 6-bit port number used in the `IN` instruction and friends (the
238 def imm_port6 : Operand<i8> {
239 let EncoderMethod = "encodeImm<AVR::fixup_port6, 0>";
242 // Addressing mode pattern reg+imm6
243 def addr : ComplexPattern<iPTR, 2, "SelectAddr", [], [SDNPWantRoot]>;
245 // AsmOperand class for a pointer register.
246 // Used with the LD/ST family of instructions.
247 // See FSTLD in AVRInstrFormats.td
248 def PtrRegAsmOperand : AsmOperandClass { let Name = "Reg"; }
250 // A special operand type for the LD/ST instructions.
251 // It converts the pointer register number into a two-bit field used in the
253 def LDSTPtrReg : Operand<i16> {
254 let MIOperandInfo = (ops PTRREGS);
255 let EncoderMethod = "encodeLDSTPtrReg";
257 let ParserMatchClass = PtrRegAsmOperand;
260 // A special operand type for the LDD/STD instructions.
261 // It behaves identically to the LD/ST version, except restricts
262 // the pointer registers to Y and Z.
263 def LDDSTDPtrReg : Operand<i16> {
264 let MIOperandInfo = (ops PTRDISPREGS);
265 let EncoderMethod = "encodeLDSTPtrReg";
267 let ParserMatchClass = PtrRegAsmOperand;
270 //===----------------------------------------------------------------------===//
271 // AVR predicates for subtarget features
272 //===----------------------------------------------------------------------===//
274 def HasSRAM : Predicate<"Subtarget->hasSRAM()">,
275 AssemblerPredicate<(all_of FeatureSRAM)>;
277 def HasJMPCALL : Predicate<"Subtarget->hasJMPCALL()">,
278 AssemblerPredicate<(all_of FeatureJMPCALL)>;
280 def HasIJMPCALL : Predicate<"Subtarget->hasIJMPCALL()">,
281 AssemblerPredicate<(all_of FeatureIJMPCALL)>;
283 def HasEIJMPCALL : Predicate<"Subtarget->hasEIJMPCALL()">,
284 AssemblerPredicate<(all_of FeatureEIJMPCALL)>;
286 def HasADDSUBIW : Predicate<"Subtarget->hasADDSUBIW()">,
287 AssemblerPredicate<(all_of FeatureADDSUBIW)>;
289 def HasSmallStack : Predicate<"Subtarget->HasSmallStack()">,
290 AssemblerPredicate<(all_of FeatureSmallStack)>;
292 def HasMOVW : Predicate<"Subtarget->hasMOVW()">,
293 AssemblerPredicate<(all_of FeatureMOVW)>;
295 def HasLPM : Predicate<"Subtarget->hasLPM()">,
296 AssemblerPredicate<(all_of FeatureLPM)>;
298 def HasLPMX : Predicate<"Subtarget->hasLPMX()">,
299 AssemblerPredicate<(all_of FeatureLPMX)>;
301 def HasELPM : Predicate<"Subtarget->hasELPM()">,
302 AssemblerPredicate<(all_of FeatureELPM)>;
304 def HasELPMX : Predicate<"Subtarget->hasELPMX()">,
305 AssemblerPredicate<(all_of FeatureELPMX)>;
307 def HasSPM : Predicate<"Subtarget->hasSPM()">,
308 AssemblerPredicate<(all_of FeatureSPM)>;
310 def HasSPMX : Predicate<"Subtarget->hasSPMX()">,
311 AssemblerPredicate<(all_of FeatureSPMX)>;
313 def HasDES : Predicate<"Subtarget->hasDES()">,
314 AssemblerPredicate<(all_of FeatureDES)>;
316 def SupportsRMW : Predicate<"Subtarget->supportsRMW()">,
317 AssemblerPredicate<(all_of FeatureRMW)>;
319 def SupportsMultiplication : Predicate<"Subtarget->supportsMultiplication()">,
320 AssemblerPredicate<(all_of FeatureMultiplication)>;
322 def HasBREAK : Predicate<"Subtarget->hasBREAK()">,
323 AssemblerPredicate<(all_of FeatureBREAK)>;
325 def HasTinyEncoding : Predicate<"Subtarget->hasTinyEncoding()">,
326 AssemblerPredicate<(all_of FeatureTinyEncoding)>;
328 def HasNonTinyEncoding : Predicate<"!Subtarget->hasTinyEncoding()">,
329 AssemblerPredicate<(any_of (not FeatureTinyEncoding))>;
331 // AVR specific condition code. These correspond to AVR_*_COND in
332 // AVRInstrInfo.td. They must be kept in synch.
333 def AVR_COND_EQ : PatLeaf<(i8 0)>;
334 def AVR_COND_NE : PatLeaf<(i8 1)>;
335 def AVR_COND_GE : PatLeaf<(i8 2)>;
336 def AVR_COND_LT : PatLeaf<(i8 3)>;
337 def AVR_COND_SH : PatLeaf<(i8 4)>;
338 def AVR_COND_LO : PatLeaf<(i8 5)>;
339 def AVR_COND_MI : PatLeaf<(i8 6)>;
340 def AVR_COND_PL : PatLeaf<(i8 7)>;
342 //===----------------------------------------------------------------------===//
343 //===----------------------------------------------------------------------===//
344 // AVR Instruction list
345 //===----------------------------------------------------------------------===//
346 //===----------------------------------------------------------------------===//
348 // ADJCALLSTACKDOWN/UP implicitly use/def SP because they may be expanded into
349 // a stack adjustment and the codegen must know that they may modify the stack
350 // pointer before prolog-epilog rewriting occurs.
351 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
352 // sub / add which can clobber SREG.
353 let Defs = [SP, SREG], Uses = [SP] in {
354 def ADJCALLSTACKDOWN : Pseudo<(outs),
358 "#ADJCALLSTACKDOWN", [(AVRcallseq_start timm
362 // R31R30 is used to update SP. It is normally free because it is a
363 // call-clobbered register but it is necessary to set it as a def as the
364 // register allocator might use it in rare cases (for rematerialization, it
365 // seems). hasSideEffects needs to be set to true so this instruction isn't
367 let Defs = [R31R30], hasSideEffects = 1 in def ADJCALLSTACKUP
372 "#ADJCALLSTACKUP", [(AVRcallseq_end timm
377 //===----------------------------------------------------------------------===//
379 //===----------------------------------------------------------------------===//
380 let isCommutable = 1, Constraints = "$src = $rd", Defs = [SREG] in {
382 // Adds two 8-bit registers.
384 : FRdRr<0b0000, 0b11,
397 // ADDW Rd+1:Rd, Rr+1:Rr
398 // Pseudo instruction to add four 8-bit registers as two 16-bit values.
417 // Adds two 8-bit registers with carry.
418 let Uses = [SREG] in def ADCRdRr
419 : FRdRr<0b0001, 0b11,
432 // ADCW Rd+1:Rd, Rr+1:Rr
433 // Pseudo instruction to add four 8-bit registers as two 16-bit values with
439 let Uses = [SREG] in def ADCWRdRr : Pseudo<(outs DREGS
453 // Adds an immediate 6-bit value K to Rd, placing the result in Rd.
467 Requires<[HasADDSUBIW]>;
470 //===----------------------------------------------------------------------===//
472 //===----------------------------------------------------------------------===//
473 let Constraints = "$src = $rd", Defs = [SREG] in {
475 // Subtracts the 8-bit value of Rr from Rd and places the value in Rd.
477 : FRdRr<0b0001, 0b10,
490 // SUBW Rd+1:Rd, Rr+1:Rr
491 // Subtracts two 16-bit values and places the result into Rd.
523 // SUBIW Rd+1:Rd, K+1:K
529 : Pseudo<(outs DLDREGS
554 Requires<[HasADDSUBIW]>;
556 // Subtract with carry operations which must read the carry flag in SREG.
557 let Uses = [SREG] in {
559 : FRdRr<0b0000, 0b10,
572 // SBCW Rd+1:Rd, Rr+1:Rr
577 def SBCWRdRr : Pseudo<(outs DREGS
604 // SBCIW Rd+1:Rd, K+1:K
607 def SBCIWRdK : Pseudo<(outs DLDREGS
622 //===----------------------------------------------------------------------===//
623 // Increment and Decrement
624 //===----------------------------------------------------------------------===//
625 let Constraints = "$src = $rd", Defs = [SREG] in {
627 : FRd<0b1001, 0b0100011,
638 : FRd<0b1001, 0b0101010,
649 //===----------------------------------------------------------------------===//
651 //===----------------------------------------------------------------------===//
653 let isCommutable = 1, Defs = [R1, R0, SREG] in {
655 // Multiplies Rd by Rr and places the result into R1:R0.
656 let usesCustomInserter = 1 in {
657 def MULRdRr : FRdRr<0b1001, 0b11, (outs),
662 [/*(set R1, R0, (smullohi i8:$rd, i8:$rr))*/]>,
663 Requires<[SupportsMultiplication]>;
665 def MULSRdRr : FMUL2RdRr<0, (outs),
669 "muls\t$rd, $rr", []>,
670 Requires<[SupportsMultiplication]>;
673 def MULSURdRr : FMUL2RdRr<1, (outs),
677 "mulsu\t$rd, $rr", []>,
678 Requires<[SupportsMultiplication]>;
680 def FMUL : FFMULRdRr<0b01, (outs),
684 "fmul\t$rd, $rr", []>,
685 Requires<[SupportsMultiplication]>;
687 def FMULS : FFMULRdRr<0b10, (outs),
691 "fmuls\t$rd, $rr", []>,
692 Requires<[SupportsMultiplication]>;
694 def FMULSU : FFMULRdRr<0b11, (outs),
698 "fmulsu\t$rd, $rr", []>,
699 Requires<[SupportsMultiplication]>;
703 [R15, R14, R13, R12, R11, R10, R9, R8, R7, R6, R5, R4, R3, R2, R1,
704 R0] in def DESK : FDES<(outs),
710 //===----------------------------------------------------------------------===//
712 //===----------------------------------------------------------------------===//
713 let Constraints = "$src = $rd", Defs = [SREG] in {
714 // Register-Register logic instructions (which have the
715 // property of commutativity).
716 let isCommutable = 1 in {
718 : FRdRr<0b0010, 0b00,
731 // ANDW Rd+1:Rd, Rr+1:Rr
736 def ANDWRdRr : Pseudo<(outs DREGS
750 : FRdRr<0b0010, 0b10,
763 // ORW Rd+1:Rd, Rr+1:Rr
768 def ORWRdRr : Pseudo<(outs DREGS
782 : FRdRr<0b0010, 0b01,
795 // EORW Rd+1:Rd, Rr+1:Rr
800 def EORWRdRr : Pseudo<(outs DREGS
828 // ANDI Rd+1:Rd, K+1:K
834 : Pseudo<(outs DLDREGS
860 // ORIW Rd+1:Rd, K+1,K
866 : Pseudo<(outs DLDREGS
879 //===----------------------------------------------------------------------===//
880 // One's/Two's Complement
881 //===----------------------------------------------------------------------===//
882 let Constraints = "$src = $rd", Defs = [SREG] in {
884 : FRd<0b1001, 0b0100000,
899 def COMWRd : Pseudo<(outs DREGS
910 : FRd<0b1001, 0b0100001,
926 let hasSideEffects=0 in
927 def NEGWRd : Pseudo<(outs DREGS:$rd),
928 (ins DREGS:$src, GPR8:$zero),
934 // Test for zero of minus.
935 // This operation is identical to a `Rd AND Rd`.
936 def : InstAlias<"tst\t$rd", (ANDRdRr GPR8 : $rd, GPR8 : $rd)>;
940 // Mnemonic alias to 'ORI Rd, K'. Same bit pattern, same operands,
942 def : InstAlias<"sbr\t$rd, $k",
946 /* Disable display, so we don't override ORI */ 0>;
948 //===----------------------------------------------------------------------===//
950 //===----------------------------------------------------------------------===//
951 let isBarrier = 1, isBranch = 1, isTerminator = 1 in {
952 def RJMPk : FBRk<0, (outs),
958 let isIndirectBranch = 1,
959 Uses = [R31R30] in def IJMP
960 : F16<0b1001010000001001, (outs), (ins), "ijmp", []>,
961 Requires<[HasIJMPCALL]>;
963 let isIndirectBranch = 1,
964 Uses = [R31R30] in def EIJMP
965 : F16<0b1001010000011001, (outs), (ins), "eijmp", []>,
966 Requires<[HasEIJMPCALL]>;
968 def JMPk : F32BRk<0b110, (outs),
972 Requires<[HasJMPCALL]>;
975 //===----------------------------------------------------------------------===//
977 //===----------------------------------------------------------------------===//
979 // SP is marked as a use to prevent stack-pointer assignments that appear
980 // immediately before calls from potentially appearing dead.
981 let Uses = [SP] in def RCALLk : FBRk<1, (outs), (ins rcalltarget_13:$k),
982 "rcall\t$k", [(AVRcall imm:$k)]>;
984 // SP is marked as a use to prevent stack-pointer assignments that appear
985 // immediately before calls from potentially appearing dead.
986 let Uses = [SP, R31R30] in def ICALL
987 : F16<0b1001010100001001, (outs), (ins variable_ops), "icall", []>,
988 Requires<[HasIJMPCALL]>;
990 // SP is marked as a use to prevent stack-pointer assignments that appear
991 // immediately before calls from potentially appearing dead.
992 let Uses = [SP, R31R30] in def EICALL
993 : F16<0b1001010100011001, (outs), (ins variable_ops), "eicall", []>,
994 Requires<[HasEIJMPCALL]>;
996 // SP is marked as a use to prevent stack-pointer assignments that appear
997 // immediately before calls from potentially appearing dead.
999 // TODO: the imm field can be either 16 or 22 bits in devices with more
1000 // than 64k of ROM, fix it once we support the largest devices.
1001 let Uses = [SP] in def CALLk : F32BRk<0b111, (outs), (ins call_target:$k),
1002 "call\t$k", [(AVRcall imm:$k)]>,
1003 Requires<[HasJMPCALL]>;
1006 //===----------------------------------------------------------------------===//
1007 // Return instructions.
1008 //===----------------------------------------------------------------------===//
1009 let isTerminator = 1, isReturn = 1, isBarrier = 1 in {
1010 def RET : F16<0b1001010100001000, (outs), (ins), "ret", [(AVRretflag)]>;
1012 def RETI : F16<0b1001010100011000, (outs), (ins), "reti", [(AVRretiflag)]>;
1015 //===----------------------------------------------------------------------===//
1016 // Compare operations.
1017 //===----------------------------------------------------------------------===//
1018 let Defs = [SREG] in {
1020 // Compare Rd and Rr, skipping the next instruction if they are equal.
1021 let isBarrier = 1, isBranch = 1,
1022 isTerminator = 1 in def CPSE : FRdRr<0b0001, 0b00, (outs),
1026 "cpse\t$rd, $rr", []>;
1029 : FRdRr<0b0001, 0b01, (outs),
1033 "cp\t$rd, $rr", [(AVRcmp i8
1038 // CPW Rd+1:Rd, Rr+1:Rr
1043 def CPWRdRr : Pseudo<(outs),
1053 let Uses = [SREG] in def CPCRdRr
1054 : FRdRr<0b0000, 0b01, (outs),
1058 "cpc\t$rd, $rr", [(AVRcmpc i8
1063 // CPCW Rd+1:Rd. Rr+1:Rr
1068 let Uses = [SREG] in def CPCWRdRr
1073 "cpcw\t$src, $src2",
1080 // Compares a register with an 8 bit immediate.
1082 : FRdK<0b0011, (outs),
1086 "cpi\t$rd, $k", [(AVRcmp i8
1092 //===----------------------------------------------------------------------===//
1093 // Register conditional skipping/branching operations.
1094 //===----------------------------------------------------------------------===//
1095 let isBranch = 1, isTerminator = 1 in {
1096 // Conditional skipping on GPR register bits, and
1097 // conditional skipping on IO register bits.
1098 let isBarrier = 1 in {
1099 def SBRCRrB : FRdB<0b10, (outs),
1103 "sbrc\t$rd, $b", []>;
1105 def SBRSRrB : FRdB<0b11, (outs),
1109 "sbrs\t$rd, $b", []>;
1111 def SBICAb : FIOBIT<0b01, (outs),
1115 "sbic\t$addr, $b", []>;
1117 def SBISAb : FIOBIT<0b11, (outs),
1121 "sbis\t$addr, $b", []>;
1124 // Relative branches on status flag bits.
1125 let Uses = [SREG] in {
1127 // Branch if `s` flag in status register is set.
1128 def BRBSsk : FSK<0, (outs),
1132 "brbs\t$s, $k", []>;
1135 // Branch if `s` flag in status register is clear.
1136 def BRBCsk : FSK<1, (outs),
1140 "brbc\t$s, $k", []>;
1145 // Branch if carry flag is set
1146 def : InstAlias<"brcs\t$k", (BRBSsk 0, relbrtarget_7 : $k)>;
1149 // Branch if carry flag is clear
1150 def : InstAlias<"brcc\t$k", (BRBCsk 0, relbrtarget_7 : $k)>;
1153 // Branch if half carry flag is set
1154 def : InstAlias<"brhs\t$k", (BRBSsk 5, relbrtarget_7 : $k)>;
1157 // Branch if half carry flag is clear
1158 def : InstAlias<"brhc\t$k", (BRBCsk 5, relbrtarget_7 : $k)>;
1161 // Branch if the T flag is set
1162 def : InstAlias<"brts\t$k", (BRBSsk 6, relbrtarget_7 : $k)>;
1165 // Branch if the T flag is clear
1166 def : InstAlias<"brtc\t$k", (BRBCsk 6, relbrtarget_7 : $k)>;
1169 // Branch if the overflow flag is set
1170 def : InstAlias<"brvs\t$k", (BRBSsk 3, relbrtarget_7 : $k)>;
1173 // Branch if the overflow flag is clear
1174 def : InstAlias<"brvc\t$k", (BRBCsk 3, relbrtarget_7 : $k)>;
1177 // Branch if the global interrupt flag is enabled
1178 def : InstAlias<"brie\t$k", (BRBSsk 7, relbrtarget_7 : $k)>;
1181 // Branch if the global interrupt flag is disabled
1182 def : InstAlias<"brid\t$k", (BRBCsk 7, relbrtarget_7 : $k)>;
1184 //===----------------------------------------------------------------------===//
1185 // PC-relative conditional branches
1186 //===----------------------------------------------------------------------===//
1187 // Based on status register. We cannot simplify these into instruction aliases
1188 // because we also need to be able to specify a pattern to match for ISel.
1189 let isBranch = 1, isTerminator = 1, Uses = [SREG] in {
1190 def BREQk : FBRsk<0, 0b001, (outs),
1193 "breq\t$k", [(AVRbrcond bb
1194 : $k, AVR_COND_EQ)]>;
1196 def BRNEk : FBRsk<1, 0b001, (outs),
1199 "brne\t$k", [(AVRbrcond bb
1200 : $k, AVR_COND_NE)]>;
1202 def BRSHk : FBRsk<1, 0b000, (outs),
1205 "brsh\t$k", [(AVRbrcond bb
1206 : $k, AVR_COND_SH)]>;
1208 def BRLOk : FBRsk<0, 0b000, (outs),
1211 "brlo\t$k", [(AVRbrcond bb
1212 : $k, AVR_COND_LO)]>;
1214 def BRMIk : FBRsk<0, 0b010, (outs),
1217 "brmi\t$k", [(AVRbrcond bb
1218 : $k, AVR_COND_MI)]>;
1220 def BRPLk : FBRsk<1, 0b010, (outs),
1223 "brpl\t$k", [(AVRbrcond bb
1224 : $k, AVR_COND_PL)]>;
1226 def BRGEk : FBRsk<1, 0b100, (outs),
1229 "brge\t$k", [(AVRbrcond bb
1230 : $k, AVR_COND_GE)]>;
1232 def BRLTk : FBRsk<0, 0b100, (outs),
1235 "brlt\t$k", [(AVRbrcond bb
1236 : $k, AVR_COND_LT)]>;
1239 //===----------------------------------------------------------------------===//
1240 // Data transfer instructions
1241 //===----------------------------------------------------------------------===//
1242 // 8 and 16-bit register move instructions.
1243 let hasSideEffects = 0 in {
1244 def MOVRdRr : FRdRr<0b0010, 0b11,
1249 "mov\t$rd, $rr", []>;
1251 def MOVWRdRr : FMOVWRdRr<(outs DREGS
1255 "movw\t$rd, $rr", []>,
1256 Requires<[HasMOVW]>;
1259 // Load immediate values into registers.
1260 let isReMaterializable = 1 in {
1261 def LDIRdK : FRdK<0b1110,
1266 "ldi\t$rd, $k", [(set i8
1270 // LDIW Rd+1:Rd, K+1:K
1275 def LDIWRdK : Pseudo<(outs DLDREGS
1279 "ldiw\t$dst, $src", [(set i16
1284 // Load from data space into register.
1285 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1286 def LDSRdK : F32DM<0b0,
1291 "lds\t$rd, $k", [(set i8
1294 Requires<[HasSRAM, HasNonTinyEncoding]>;
1296 // Load from data space into register, which is only available on AVRTiny.
1297 def LDSRdKTiny : FLDSSTSTINY<0b0, (outs LD8:$rd), (ins imm7tiny:$k),
1298 "lds\t$rd, $k", []>,
1299 Requires<[HasSRAM, HasTinyEncoding]>;
1301 // LDSW Rd+1:Rd, K+1:K
1305 // lds Rd+1 (K+1:K) + 1
1306 def LDSWRdK : Pseudo<(outs DREGS
1310 "ldsw\t$dst, $src", [(set i16
1313 Requires<[HasSRAM, HasNonTinyEncoding]>;
1317 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1318 def LDRdPtr : FSTLD<0, 0b00,
1323 "ld\t$reg, $ptrreg", [(set GPR8
1326 Requires<[HasSRAM]>;
1333 // On reduced tiny cores, this instruction expands to:
1337 let Constraints = "@earlyclobber $reg" in def LDWRdPtr
1338 : Pseudo<(outs DREGS
1342 "ldw\t$reg, $ptrreg", [(set i16
1345 Requires<[HasSRAM]>;
1348 // Indirect loads (with postincrement or predecrement).
1349 let mayLoad = 1, hasSideEffects = 0,
1350 Constraints = "$ptrreg = $base_wb,@earlyclobber $reg" in {
1351 def LDRdPtrPi : FSTLD<0, 0b01,
1357 "ld\t$reg, $ptrreg+", []>,
1358 Requires<[HasSRAM]>;
1364 def LDWRdPtrPi : Pseudo<(outs DREGS
1369 "ldw\t$reg, $ptrreg+", []>,
1370 Requires<[HasSRAM]>;
1372 def LDRdPtrPd : FSTLD<0, 0b10,
1378 "ld\t$reg, -$ptrreg", []>,
1379 Requires<[HasSRAM]>;
1386 def LDWRdPtrPd : Pseudo<(outs DREGS
1391 "ldw\t$reg, -$ptrreg", []>,
1392 Requires<[HasSRAM]>;
1395 // Load indirect with displacement operations.
1396 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1397 let Constraints = "@earlyclobber $reg" in def LDDRdPtrQ
1403 "ldd\t$reg, $memri", [(set i8
1406 Requires<[HasSRAM, HasNonTinyEncoding]>;
1408 // LDDW Rd+1:Rd, P+q
1413 // On reduced tiny cores, this instruction expands to:
1418 let Constraints = "@earlyclobber $dst" in def LDDWRdPtrQ
1419 : Pseudo<(outs DREGS
1423 "lddw\t$dst, $memri", [(set i16
1426 Requires<[HasSRAM]>;
1428 // An identical pseudo instruction to LDDWRdPtrQ, expect restricted to the Y
1429 // register and without the @earlyclobber flag.
1431 // Used to work around a bug caused by the register allocator not
1432 // being able to handle the expansion of a COPY into an machine instruction
1433 // that has an earlyclobber flag. This is because the register allocator will
1434 // try expand a copy from a register slot into an earlyclobber instruction.
1435 // Instructions that are earlyclobber need to be in a dedicated earlyclobber
1438 // This pseudo instruction can be used pre-AVR pseudo expansion in order to
1439 // get a frame index load without directly using earlyclobber instructions.
1441 // The pseudo expansion pass trivially expands this into LDDWRdPtrQ.
1443 // This instruction may be removed once PR13375 is fixed.
1445 hasSideEffects = 0 in def LDDWRdYQ : Pseudo<(outs DREGS
1449 "lddw\t$dst, $memri", []>,
1450 Requires<[HasSRAM]>;
1453 class AtomicLoad<PatFrag Op, RegisterClass DRC, RegisterClass PTRRC>
1458 "atomic_op", [(set DRC
1462 class AtomicStore<PatFrag Op, RegisterClass DRC, RegisterClass PTRRC>
1467 "atomic_op", [(Op i16
1471 class AtomicLoadOp<PatFrag Op, RegisterClass DRC, RegisterClass PTRRC>
1472 : Pseudo<(outs DRC:$rd),
1473 (ins PTRRC:$rr, DRC:$operand),
1474 "atomic_op", [(set DRC:$rd, (Op i16:$rr, DRC:$operand))]>;
1476 // Atomic instructions
1477 // ===================
1479 // 8-bit operations can use any pointer register because
1480 // they are expanded directly into an LD/ST instruction.
1482 // 16-bit operations use 16-bit load/store postincrement instructions,
1483 // which require PTRDISPREGS.
1485 def AtomicLoad8 : AtomicLoad<atomic_load_8, GPR8, PTRREGS>;
1486 def AtomicLoad16 : AtomicLoad<atomic_load_16, DREGS, PTRDISPREGS>;
1488 def AtomicStore8 : AtomicStore<atomic_store_8, GPR8, PTRREGS>;
1489 def AtomicStore16 : AtomicStore<atomic_store_16, DREGS, PTRDISPREGS>;
1491 class AtomicLoadOp8<PatFrag Op> : AtomicLoadOp<Op, GPR8, PTRREGS>;
1492 class AtomicLoadOp16<PatFrag Op> : AtomicLoadOp<Op, DREGS, PTRDISPREGS>;
1494 let usesCustomInserter=1 in {
1495 def AtomicLoadAdd8 : AtomicLoadOp8<atomic_load_add_8>;
1496 def AtomicLoadAdd16 : AtomicLoadOp16<atomic_load_add_16>;
1497 def AtomicLoadSub8 : AtomicLoadOp8<atomic_load_sub_8>;
1498 def AtomicLoadSub16 : AtomicLoadOp16<atomic_load_sub_16>;
1499 def AtomicLoadAnd8 : AtomicLoadOp8<atomic_load_and_8>;
1500 def AtomicLoadAnd16 : AtomicLoadOp16<atomic_load_and_16>;
1501 def AtomicLoadOr8 : AtomicLoadOp8<atomic_load_or_8>;
1502 def AtomicLoadOr16 : AtomicLoadOp16<atomic_load_or_16>;
1503 def AtomicLoadXor8 : AtomicLoadOp8<atomic_load_xor_8>;
1504 def AtomicLoadXor16 : AtomicLoadOp16<atomic_load_xor_16>;
1507 : Pseudo<(outs), (ins), "atomic_fence", [(atomic_fence timm, timm)]>;
1509 // Indirect store from register to data space.
1510 def STSKRr : F32DM<0b1, (outs),
1514 "sts\t$k, $rd", [(store i8
1517 Requires<[HasSRAM, HasNonTinyEncoding]>;
1519 // Store from register to data space, which is only available on AVRTiny.
1520 def STSKRrTiny : FLDSSTSTINY<0b1, (outs), (ins imm7tiny:$k, LD8:$rd),
1521 "sts\t$k, $rd", []>,
1522 Requires<[HasSRAM, HasTinyEncoding]>;
1524 // STSW K+1:K, Rr+1:Rr
1527 // sts Rr+1, (K+1:K) + 1
1529 def STSWKRr : Pseudo<(outs),
1533 "stsw\t$dst, $src", [(store i16
1536 Requires<[HasSRAM, HasNonTinyEncoding]>;
1540 // Stores the value of Rr into the location addressed by pointer P.
1541 def STPtrRr : FSTLD<1, 0b00, (outs),
1545 "st\t$ptrreg, $reg", [(store GPR8
1548 Requires<[HasSRAM]>;
1551 // Stores the value of Rr into the location addressed by pointer P.
1556 // On reduced tiny cores, this instruction expands to:
1560 def STWPtrRr : Pseudo<(outs),
1564 "stw\t$ptrreg, $reg", [(store i16
1567 Requires<[HasSRAM]>;
1569 // Indirect stores (with postincrement or predecrement).
1570 let Constraints = "$ptrreg = $base_wb,@earlyclobber $base_wb" in {
1573 // Stores the value of Rr into the location addressed by pointer P.
1574 // Post increments P.
1575 def STPtrPiRr : FSTLD<1, 0b01,
1582 "st\t$ptrreg+, $reg", [(set i16
1583 : $base_wb, (post_store GPR8
1587 Requires<[HasSRAM]>;
1590 // Stores the value of Rr into the location addressed by pointer P.
1591 // Post increments P.
1596 def STWPtrPiRr : Pseudo<(outs PTRREGS
1602 "stw\t$ptrreg+, $trh", [(set PTRREGS
1603 : $base_wb, (post_store DREGS
1607 Requires<[HasSRAM]>;
1610 // Stores the value of Rr into the location addressed by pointer P.
1611 // Pre decrements P.
1612 def STPtrPdRr : FSTLD<1, 0b10,
1619 "st\t-$ptrreg, $reg", [(set i16
1620 : $base_wb, (pre_store GPR8
1624 Requires<[HasSRAM]>;
1627 // Stores the value of Rr into the location addressed by pointer P.
1628 // Pre decrements P.
1633 def STWPtrPdRr : Pseudo<(outs PTRREGS
1639 "stw\t-$ptrreg, $reg", [(set PTRREGS
1640 : $base_wb, (pre_store i16
1644 Requires<[HasSRAM]>;
1647 // Store indirect with displacement operations.
1649 // Stores the value of Rr into the location addressed by pointer P with a
1650 // displacement of q. Does not modify P.
1651 def STDPtrQRr : FSTDLDD<1, (outs),
1655 "std\t$memri, $reg", [(store i8
1658 Requires<[HasSRAM, HasNonTinyEncoding]>;
1660 // STDW P+q, Rr+1:Rr
1661 // Stores the value of Rr into the location addressed by pointer P with a
1662 // displacement of q. Does not modify P.
1667 // On reduced tiny cores, this instruction expands to:
1672 def STDWPtrQRr : Pseudo<(outs),
1676 "stdw\t$memri, $src", [(store i16
1679 Requires<[HasSRAM]>;
1681 // Load program memory operations.
1682 let canFoldAsLoad = 1, isReMaterializable = 1, mayLoad = 1,
1683 hasSideEffects = 0 in {
1685 Uses = [R31R30] in def LPM
1686 : F16<0b1001010111001000, (outs), (ins), "lpm", []>,
1689 def LPMRdZ : FLPMX<0, 0,
1694 "lpm\t$rd, $z", []>,
1695 Requires<[HasLPMX]>;
1697 // Load program memory, while postincrementing the Z register.
1698 let Defs = [R31R30] in {
1699 def LPMRdZPi : FLPMX<0, 1,
1704 "lpm\t$rd, $z+", []>,
1705 Requires<[HasLPMX]>;
1707 let Constraints = "@earlyclobber $dst" in
1708 def LPMWRdZ : Pseudo<(outs DREGS
1712 "lpmw\t$dst, $z", []>,
1713 Requires<[HasLPMX]>;
1715 def LPMWRdZPi : Pseudo<(outs DREGS
1719 "lpmw\t$dst, $z+", []>,
1720 Requires<[HasLPMX]>;
1724 // Extended load program memory operations.
1725 let mayLoad = 1, hasSideEffects = 0 in {
1727 Uses = [R31R30] in def ELPM
1728 : F16<0b1001010111011000, (outs), (ins), "elpm", []>,
1729 Requires<[HasELPM]>;
1731 def ELPMRdZ : FLPMX<1, 0, (outs GPR8:$rd), (ins ZREG:$z),
1732 "elpm\t$rd, $z", []>,
1733 Requires<[HasELPMX]>;
1735 let Defs = [R31R30] in {
1736 def ELPMRdZPi : FLPMX<1, 1, (outs GPR8:$rd), (ins ZREG:$z),
1737 "elpm\t$rd, $z+", []>,
1738 Requires<[HasELPMX]>;
1741 // These pseudos are combination of the OUT and ELPM instructions.
1742 let Defs = [R31R30], hasSideEffects = 1 in {
1743 def ELPMBRdZ : Pseudo<(outs GPR8:$dst), (ins ZREG:$z, LD8:$p),
1744 "elpmb\t$dst, $z, $p", []>,
1745 Requires<[HasELPMX]>;
1747 let Constraints = "@earlyclobber $dst" in
1748 def ELPMWRdZ : Pseudo<(outs DREGS:$dst), (ins ZREG:$z, LD8:$p),
1749 "elpmw\t$dst, $z, $p", []>,
1750 Requires<[HasELPMX]>;
1752 def ELPMBRdZPi : Pseudo<(outs GPR8:$dst), (ins ZREG:$z, LD8:$p),
1753 "elpmb\t$dst, $z+, $p", []>,
1754 Requires<[HasELPMX]>;
1756 def ELPMWRdZPi : Pseudo<(outs DREGS:$dst), (ins ZREG:$z, LD8:$p),
1757 "elpmw\t$dst, $z+, $p", []>,
1758 Requires<[HasELPMX]>;
1762 // Store program memory operations.
1763 let Uses = [R1, R0] in {
1764 let Uses = [R31R30, R1, R0] in def SPM
1765 : F16<0b1001010111101000, (outs), (ins), "spm", []>,
1768 let Defs = [R31R30] in def SPMZPi : F16<0b1001010111111000, (outs),
1772 Requires<[HasSPMX]>;
1775 // Read data from IO location operations.
1776 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1777 def INRdA : FIORdA<(outs GPR8
1781 "in\t$rd, $A", [(set i8
1782 : $rd, (load ioaddr8
1785 def INWRdA : Pseudo<(outs DREGS
1789 "inw\t$dst, $src", [(set i16
1790 : $dst, (load ioaddr16
1794 // Write data to IO location operations.
1795 def OUTARr : FIOARr<(outs),
1799 "out\t$A, $rr", [(store i8
1803 def OUTWARr : Pseudo<(outs),
1807 "outw\t$dst, $src", [(store i16
1811 // Stack push/pop operations.
1812 let Defs = [SP], Uses = [SP], hasSideEffects = 0 in {
1813 // Stack push operations.
1814 let mayStore = 1 in {
1815 def PUSHRr : FRd<0b1001, 0b0011111, (outs),
1819 Requires<[HasSRAM]>;
1821 def PUSHWRr : Pseudo<(outs),
1825 Requires<[HasSRAM]>;
1828 // Stack pop operations.
1829 let mayLoad = 1 in {
1830 def POPRd : FRd<0b1001, 0b0001111,
1833 (ins), "pop\t$rd", []>,
1834 Requires<[HasSRAM]>;
1836 def POPWRd : Pseudo<(outs DREGS
1838 (ins), "popw\t$reg", []>,
1839 Requires<[HasSRAM]>;
1843 // Read-Write-Modify (RMW) instructions.
1844 def XCHZRd : FZRd<0b100,
1849 "xch\t$z, $rd", []>,
1850 Requires<[SupportsRMW]>;
1852 def LASZRd : FZRd<0b101,
1857 "las\t$z, $rd", []>,
1858 Requires<[SupportsRMW]>;
1860 def LACZRd : FZRd<0b110,
1865 "lac\t$z, $rd", []>,
1866 Requires<[SupportsRMW]>;
1868 def LATZRd : FZRd<0b111,
1873 "lat\t$z, $rd", []>,
1874 Requires<[SupportsRMW]>;
1876 //===----------------------------------------------------------------------===//
1877 // Bit and bit-test instructions
1878 //===----------------------------------------------------------------------===//
1880 // Bit shift/rotate operations.
1881 let Constraints = "$src = $rd", Defs = [SREG] in {
1882 // 8-bit LSL is an alias of ADD Rd, Rd
1884 def LSLWRd : Pseudo<(outs DREGS
1894 def LSLWHiRd : Pseudo<(outs DREGS:$rd), (ins DREGS:$src), "lslwhi\t$rd",
1895 [(set i16:$rd, (AVRlslhi i16:$src)), (implicit SREG)]>;
1897 def LSLWNRd : Pseudo<(outs DLDREGS
1902 "lslwn\t$rd, $bits", [
1904 : $rd, (AVRlslwn i16
1910 def LSLBNRd : Pseudo<(outs LD8
1915 "lslbn\t$rd, $bits", [
1924 : FRd<0b1001, 0b0100110,
1929 "lsr\t$rd", [(set i8
1934 def LSRWRd : Pseudo<(outs DREGS
1944 def LSRWLoRd : Pseudo<(outs DREGS:$rd), (ins DREGS:$src), "lsrwlo\t$rd",
1945 [(set i16:$rd, (AVRlsrlo i16:$src)), (implicit SREG)]>;
1947 def LSRWNRd : Pseudo<(outs DLDREGS
1952 "lsrwn\t$rd, $bits", [
1954 : $rd, (AVRlsrwn i16
1960 def LSRBNRd : Pseudo<(outs LD8
1965 "lsrbn\t$rd, $bits", [
1974 : FRd<0b1001, 0b0100101,
1979 "asr\t$rd", [(set i8
1984 def ASRWNRd : Pseudo<(outs DREGS
1989 "asrwn\t$rd, $bits", [
1991 : $rd, (AVRasrwn i16
1997 def ASRBNRd : Pseudo<(outs LD8
2002 "asrbn\t$rd, $bits", [
2010 def ASRWRd : Pseudo<(outs DREGS
2020 def ASRWLoRd : Pseudo<(outs DREGS:$rd), (ins DREGS:$src), "asrwlo\t$rd",
2021 [(set i16:$rd, (AVRasrlo i16:$src)), (implicit SREG)]>;
2023 let hasSideEffects=0 in
2024 def ROLBRd : Pseudo<(outs GPR8
2026 (ins GPR8:$src, GPR8:$zero),
2030 def RORBRd : Pseudo<(outs GPR8
2040 // Bit rotate operations.
2041 let Uses = [SREG] in {
2044 : Pseudo<(outs DREGS
2054 def RORRd : FRd<0b1001, 0b0100111,
2062 : Pseudo<(outs DREGS
2075 // Swaps the high and low nibbles in a register.
2077 "$src = $rd" in def SWAPRd : FRd<0b1001, 0b0100010,
2082 "swap\t$rd", [(set i8
2086 // IO register bit set/clear operations.
2087 //: TODO: add patterns when popcount(imm)==2 to be expanded with 2 sbi/cbi
2088 // instead of in+ori+out which requires one more instr.
2089 def SBIAb : FIOBIT<0b10, (outs),
2093 "sbi\t$addr, $b", [(store(or(i8(load lowioaddr8
2100 def CBIAb : FIOBIT<0b00, (outs),
2104 "cbi\t$addr, $b", [(store(and(i8(load lowioaddr8
2111 // Status register bit load/store operations.
2112 let Defs = [SREG] in def BST : FRdB<0b01, (outs),
2116 "bst\t$rd, $b", []>;
2118 let Constraints = "$src = $rd",
2119 Uses = [SREG] in def BLD : FRdB<0b00,
2125 "bld\t$rd, $b", []>;
2127 def CBR : InstAlias<"cbr\t$rd, $k", (ANDIRdK LD8 : $rd, imm_com8 : $k), 0>;
2130 // Alias for EOR Rd, Rd
2132 // Clears all bits in a register.
2133 def CLR : InstAlias<"clr\t$rd", (EORRdRr GPR8 : $rd, GPR8 : $rd)>;
2136 // Alias for ADD Rd, Rd
2138 // Logical shift left one bit.
2139 def LSL : InstAlias<"lsl\t$rd", (ADDRdRr GPR8 : $rd, GPR8 : $rd)>;
2141 def ROL : InstAlias<"rol\t$rd", (ADCRdRr GPR8 : $rd, GPR8 : $rd)>;
2144 // Alias for LDI Rd, 0xff
2146 // Sets all bits in a register.
2147 def : InstAlias<"ser\t$rd", (LDIRdK LD8 : $rd, 0xff), 0>;
2149 let hasSideEffects=1 in {
2150 let Defs = [SREG] in def BSETs : FS<0,
2155 let Defs = [SREG] in def BCLRs : FS<1,
2161 // Set/clear aliases for the carry (C) status flag (bit 0).
2162 def : InstAlias<"sec", (BSETs 0)>;
2163 def : InstAlias<"clc", (BCLRs 0)>;
2165 // Set/clear aliases for the zero (Z) status flag (bit 1).
2166 def : InstAlias<"sez", (BSETs 1)>;
2167 def : InstAlias<"clz", (BCLRs 1)>;
2169 // Set/clear aliases for the negative (N) status flag (bit 2).
2170 def : InstAlias<"sen", (BSETs 2)>;
2171 def : InstAlias<"cln", (BCLRs 2)>;
2173 // Set/clear aliases for the overflow (V) status flag (bit 3).
2174 def : InstAlias<"sev", (BSETs 3)>;
2175 def : InstAlias<"clv", (BCLRs 3)>;
2177 // Set/clear aliases for the signed (S) status flag (bit 4).
2178 def : InstAlias<"ses", (BSETs 4)>;
2179 def : InstAlias<"cls", (BCLRs 4)>;
2181 // Set/clear aliases for the half-carry (H) status flag (bit 5).
2182 def : InstAlias<"seh", (BSETs 5)>;
2183 def : InstAlias<"clh", (BCLRs 5)>;
2185 // Set/clear aliases for the T status flag (bit 6).
2186 def : InstAlias<"set", (BSETs 6)>;
2187 def : InstAlias<"clt", (BCLRs 6)>;
2189 // Set/clear aliases for the interrupt (I) status flag (bit 7).
2190 def : InstAlias<"sei", (BSETs 7)>;
2191 def : InstAlias<"cli", (BCLRs 7)>;
2193 //===----------------------------------------------------------------------===//
2194 // Special/Control instructions
2195 //===----------------------------------------------------------------------===//
2198 // Breakpoint instruction
2200 // <|1001|0101|1001|1000>
2201 def BREAK : F16<0b1001010110011000, (outs), (ins), "break", []>,
2202 Requires<[HasBREAK]>;
2205 // No-operation instruction
2207 // <|0000|0000|0000|0000>
2208 def NOP : F16<0b0000000000000000, (outs), (ins), "nop", []>;
2211 // Sleep instruction
2213 // <|1001|0101|1000|1000>
2214 def SLEEP : F16<0b1001010110001000, (outs), (ins), "sleep", []>;
2219 // <|1001|0101|1010|1000>
2220 def WDR : F16<0b1001010110101000, (outs), (ins), "wdr", []>;
2222 //===----------------------------------------------------------------------===//
2223 // Pseudo instructions for later expansion
2224 //===----------------------------------------------------------------------===//
2226 //: TODO: Optimize this for wider types AND optimize the following code
2227 // compile int foo(char a, char b, char c, char d) {return d+b;}
2228 // looks like a missed sext_inreg opportunity.
2230 : ExtensionPseudo<(outs DREGS
2241 : ExtensionPseudo<(outs DREGS
2251 // This pseudo gets expanded into a movw+adiw thus it clobbers SREG.
2253 hasSideEffects = 0 in def FRMIDX : Pseudo<(outs DLDREGS
2258 "frmidx\t$dst, $src, $src2", []>;
2260 // This pseudo is either converted to a regular store or a push which clobbers
2262 def STDSPQRr : StorePseudo<(outs),
2266 "stdstk\t$dst, $src", [(store i8
2270 // This pseudo is either converted to a regular store or a push which clobbers
2272 def STDWSPQRr : StorePseudo<(outs),
2276 "stdwstk\t$dst, $src", [(store i16
2280 // SP read/write pseudos.
2281 let hasSideEffects = 0 in {
2282 let Uses = [SP] in def SPREAD : Pseudo<(outs DREGS
2286 "spread\t$dst, $src", []>;
2288 let Defs = [SP] in def SPWRITE : Pseudo<(outs GPRSP
2292 "spwrite\t$dst, $src", []>;
2295 def Select8 : SelectPseudo<(outs GPR8
2301 "# Select8 PSEUDO", [(set i8
2302 : $dst, (AVRselectcc i8
2307 def Select16 : SelectPseudo<(outs DREGS
2313 "# Select16 PSEUDO", [(set i16
2314 : $dst, (AVRselectcc i16
2319 def Lsl8 : ShiftPseudo<(outs GPR8
2324 "# Lsl8 PSEUDO", [(set i8
2325 : $dst, (AVRlslLoop i8
2329 def Lsl16 : ShiftPseudo<(outs DREGS
2334 "# Lsl16 PSEUDO", [(set i16
2335 : $dst, (AVRlslLoop i16
2339 def Lsr8 : ShiftPseudo<(outs GPR8
2344 "# Lsr8 PSEUDO", [(set i8
2345 : $dst, (AVRlsrLoop i8
2349 def Lsr16 : ShiftPseudo<(outs DREGS
2354 "# Lsr16 PSEUDO", [(set i16
2355 : $dst, (AVRlsrLoop i16
2359 def Rol8 : ShiftPseudo<(outs GPR8
2364 "# Rol8 PSEUDO", [(set i8
2365 : $dst, (AVRrolLoop i8
2369 def Rol16 : ShiftPseudo<(outs DREGS
2374 "# Rol16 PSEUDO", [(set i16
2375 : $dst, (AVRrolLoop i16
2379 def Ror8 : ShiftPseudo<(outs GPR8
2384 "# Ror8 PSEUDO", [(set i8
2385 : $dst, (AVRrorLoop i8
2389 def Ror16 : ShiftPseudo<(outs DREGS
2394 "# Ror16 PSEUDO", [(set i16
2395 : $dst, (AVRrorLoop i16
2399 def Asr8 : ShiftPseudo<(outs GPR8
2404 "# Asr8 PSEUDO", [(set i8
2405 : $dst, (AVRasrLoop i8
2409 def Asr16 : ShiftPseudo<(outs DREGS
2414 "# Asr16 PSEUDO", [(set i16
2415 : $dst, (AVRasrLoop i16
2419 // lowered to a copy from the zero register.
2420 let usesCustomInserter=1 in
2421 def CopyZero : Pseudo<(outs GPR8:$rd), (ins), "clrz\t$rd", [(set i8:$rd, 0)]>;
2423 //===----------------------------------------------------------------------===//
2424 // Non-Instruction Patterns
2425 //===----------------------------------------------------------------------===//
2427 //: TODO: look in x86InstrCompiler.td for odd encoding trick related to
2428 // add x, 128 -> sub x, -128. Clang is emitting an eor for this (ldi+eor)
2430 // the add instruction always writes the carry flag
2431 def : Pat<(addc i8 : $src, i8 : $src2), (ADDRdRr i8 : $src, i8 : $src2)>;
2432 def : Pat<(addc DREGS
2439 // all sub instruction variants always writes the carry flag
2440 def : Pat<(subc i8 : $src, i8 : $src2), (SUBRdRr i8 : $src, i8 : $src2)>;
2441 def : Pat<(subc i16 : $src, i16 : $src2), (SUBWRdRr i16 : $src, i16 : $src2)>;
2442 def : Pat<(subc i8 : $src, imm : $src2), (SUBIRdK i8 : $src, imm : $src2)>;
2443 def : Pat<(subc i16 : $src, imm : $src2), (SUBIWRdK i16 : $src, imm : $src2)>;
2445 // These patterns convert add (x, -imm) to sub (x, imm) since we dont have
2446 // any add with imm instructions. Also take care of the adiw/sbiw instructions.
2448 : $src1, imm0_63_neg
2451 : $src1, (imm0_63_neg
2453 Requires<[HasADDSUBIW]>;
2458 : $src1, (imm16_neg_XFORM imm
2464 : $src1, (imm16_neg_XFORM imm
2471 : $src1, (imm8_neg_XFORM imm
2477 : $src1, (imm8_neg_XFORM imm
2483 : $src1, (imm8_neg_XFORM imm
2486 // Emit NEGWRd with an extra zero register operand.
2487 def : Pat<(ineg i16:$src),
2488 (NEGWRd i16:$src, (CopyZero))>;
2491 let Predicates = [HasJMPCALL] in {
2492 def : Pat<(AVRcall(i16 tglobaladdr:$dst)), (CALLk tglobaladdr:$dst)>;
2493 def : Pat<(AVRcall(i16 texternalsym:$dst)), (CALLk texternalsym:$dst)>;
2495 def : Pat<(AVRcall(i16 tglobaladdr:$dst)), (RCALLk tglobaladdr:$dst)>;
2496 def : Pat<(AVRcall(i16 texternalsym:$dst)), (RCALLk texternalsym:$dst)>;
2499 def : Pat<(i16(anyext i8
2501 (INSERT_SUBREG(i16(IMPLICIT_DEF)), i8
2505 def : Pat<(i8(trunc i16 : $src)), (EXTRACT_SUBREG i16 : $src, sub_lo)>;
2508 def : Pat<(sext_inreg i16
2510 (SEXT(i8(EXTRACT_SUBREG i16
2514 def : Pat<(i16(AVRWrapper tglobaladdr : $dst)), (LDIWRdK tglobaladdr : $dst)>;
2516 : $src, (AVRWrapper tglobaladdr
2521 def : Pat<(i8(load(AVRWrapper tglobaladdr:$dst))),
2522 (LDSRdK tglobaladdr:$dst)>,
2523 Requires<[HasSRAM, HasNonTinyEncoding]>;
2524 def : Pat<(i16(load(AVRWrapper tglobaladdr:$dst))),
2525 (LDSWRdK tglobaladdr:$dst)>,
2526 Requires<[HasSRAM, HasNonTinyEncoding]>;
2527 def : Pat<(store i8:$src, (i16(AVRWrapper tglobaladdr:$dst))),
2528 (STSKRr tglobaladdr:$dst, i8:$src)>,
2529 Requires<[HasSRAM, HasNonTinyEncoding]>;
2530 def : Pat<(store i16:$src, (i16(AVRWrapper tglobaladdr:$dst))),
2531 (STSWKRr tglobaladdr:$dst, i16:$src)>,
2532 Requires<[HasSRAM, HasNonTinyEncoding]>;
2535 def : Pat<(i16(AVRWrapper tblockaddress
2537 (LDIWRdK tblockaddress
2540 def : Pat<(i8(trunc(AVRlsrwn DLDREGS
2542 (EXTRACT_SUBREG DREGS
2545 // :FIXME: DAGCombiner produces an shl node after legalization from these seq:
2546 // BR_JT -> (mul x, 2) -> (shl x, 1)
2547 def : Pat<(shl i16 : $src1, (i8 1)), (LSLWRd i16 : $src1)>;
2549 // Lowering of 'tst' node to 'TST' instruction.
2550 // TST is an alias of AND Rd, Rd.
2551 def : Pat<(AVRtst i8 : $rd), (ANDRdRr GPR8 : $rd, GPR8 : $rd)>;
2553 // Lowering of 'lsl' node to 'LSL' instruction.
2554 // LSL is an alias of 'ADD Rd, Rd'
2555 def : Pat<(AVRlsl i8 : $rd), (ADDRdRr GPR8 : $rd, GPR8 : $rd)>;