1 //===-- LanaiInstrInfo.cpp - Lanai Instruction Information ------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the Lanai implementation of the TargetInstrInfo class.
11 //===----------------------------------------------------------------------===//
13 #include "LanaiInstrInfo.h"
14 #include "LanaiAluCode.h"
15 #include "LanaiCondCode.h"
16 #include "MCTargetDesc/LanaiBaseInfo.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/CodeGen/MachineFunctionPass.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/MC/TargetRegistry.h"
23 #include "llvm/Support/ErrorHandling.h"
27 #define GET_INSTRINFO_CTOR_DTOR
28 #include "LanaiGenInstrInfo.inc"
30 LanaiInstrInfo::LanaiInstrInfo()
31 : LanaiGenInstrInfo(Lanai::ADJCALLSTACKDOWN
, Lanai::ADJCALLSTACKUP
),
34 void LanaiInstrInfo::copyPhysReg(MachineBasicBlock
&MBB
,
35 MachineBasicBlock::iterator Position
,
37 MCRegister DestinationRegister
,
38 MCRegister SourceRegister
,
39 bool KillSource
) const {
40 if (!Lanai::GPRRegClass
.contains(DestinationRegister
, SourceRegister
)) {
41 llvm_unreachable("Impossible reg-to-reg copy");
44 BuildMI(MBB
, Position
, DL
, get(Lanai::OR_I_LO
), DestinationRegister
)
45 .addReg(SourceRegister
, getKillRegState(KillSource
))
49 void LanaiInstrInfo::storeRegToStackSlot(
50 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator Position
,
51 Register SourceRegister
, bool IsKill
, int FrameIndex
,
52 const TargetRegisterClass
*RegisterClass
,
53 const TargetRegisterInfo
* /*RegisterInfo*/, Register
/*VReg*/) const {
55 if (Position
!= MBB
.end()) {
56 DL
= Position
->getDebugLoc();
59 if (!Lanai::GPRRegClass
.hasSubClassEq(RegisterClass
)) {
60 llvm_unreachable("Can't store this register to stack slot");
62 BuildMI(MBB
, Position
, DL
, get(Lanai::SW_RI
))
63 .addReg(SourceRegister
, getKillRegState(IsKill
))
64 .addFrameIndex(FrameIndex
)
69 void LanaiInstrInfo::loadRegFromStackSlot(
70 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator Position
,
71 Register DestinationRegister
, int FrameIndex
,
72 const TargetRegisterClass
*RegisterClass
,
73 const TargetRegisterInfo
* /*RegisterInfo*/, Register
/*VReg*/) const {
75 if (Position
!= MBB
.end()) {
76 DL
= Position
->getDebugLoc();
79 if (!Lanai::GPRRegClass
.hasSubClassEq(RegisterClass
)) {
80 llvm_unreachable("Can't load this register from stack slot");
82 BuildMI(MBB
, Position
, DL
, get(Lanai::LDW_RI
), DestinationRegister
)
83 .addFrameIndex(FrameIndex
)
88 bool LanaiInstrInfo::areMemAccessesTriviallyDisjoint(
89 const MachineInstr
&MIa
, const MachineInstr
&MIb
) const {
90 assert(MIa
.mayLoadOrStore() && "MIa must be a load or store.");
91 assert(MIb
.mayLoadOrStore() && "MIb must be a load or store.");
93 if (MIa
.hasUnmodeledSideEffects() || MIb
.hasUnmodeledSideEffects() ||
94 MIa
.hasOrderedMemoryRef() || MIb
.hasOrderedMemoryRef())
97 // Retrieve the base register, offset from the base register and width. Width
98 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
99 // base registers are identical, and the offset of a lower memory access +
100 // the width doesn't overlap the offset of a higher memory access,
101 // then the memory accesses are different.
102 const TargetRegisterInfo
*TRI
= &getRegisterInfo();
103 const MachineOperand
*BaseOpA
= nullptr, *BaseOpB
= nullptr;
104 int64_t OffsetA
= 0, OffsetB
= 0;
105 LocationSize WidthA
= 0, WidthB
= 0;
106 if (getMemOperandWithOffsetWidth(MIa
, BaseOpA
, OffsetA
, WidthA
, TRI
) &&
107 getMemOperandWithOffsetWidth(MIb
, BaseOpB
, OffsetB
, WidthB
, TRI
)) {
108 if (BaseOpA
->isIdenticalTo(*BaseOpB
)) {
109 int LowOffset
= std::min(OffsetA
, OffsetB
);
110 int HighOffset
= std::max(OffsetA
, OffsetB
);
111 LocationSize LowWidth
= (LowOffset
== OffsetA
) ? WidthA
: WidthB
;
112 if (LowWidth
.hasValue() &&
113 LowOffset
+ (int)LowWidth
.getValue() <= HighOffset
)
120 bool LanaiInstrInfo::expandPostRAPseudo(MachineInstr
& /*MI*/) const {
124 static LPCC::CondCode
getOppositeCondition(LPCC::CondCode CC
) {
126 case LPCC::ICC_T
: // true
128 case LPCC::ICC_F
: // false
130 case LPCC::ICC_HI
: // high
132 case LPCC::ICC_LS
: // low or same
134 case LPCC::ICC_CC
: // carry cleared
136 case LPCC::ICC_CS
: // carry set
138 case LPCC::ICC_NE
: // not equal
140 case LPCC::ICC_EQ
: // equal
142 case LPCC::ICC_VC
: // oVerflow cleared
144 case LPCC::ICC_VS
: // oVerflow set
146 case LPCC::ICC_PL
: // plus (note: 0 is "minus" too here)
148 case LPCC::ICC_MI
: // minus
150 case LPCC::ICC_GE
: // greater than or equal
152 case LPCC::ICC_LT
: // less than
154 case LPCC::ICC_GT
: // greater than
156 case LPCC::ICC_LE
: // less than or equal
159 llvm_unreachable("Invalid condtional code");
163 std::pair
<unsigned, unsigned>
164 LanaiInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF
) const {
165 return std::make_pair(TF
, 0u);
168 ArrayRef
<std::pair
<unsigned, const char *>>
169 LanaiInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
170 using namespace LanaiII
;
171 static const std::pair
<unsigned, const char *> TargetFlags
[] = {
172 {MO_ABS_HI
, "lanai-hi"},
173 {MO_ABS_LO
, "lanai-lo"},
174 {MO_NO_FLAG
, "lanai-nf"}};
175 return ArrayRef(TargetFlags
);
178 bool LanaiInstrInfo::analyzeCompare(const MachineInstr
&MI
, Register
&SrcReg
,
179 Register
&SrcReg2
, int64_t &CmpMask
,
180 int64_t &CmpValue
) const {
181 switch (MI
.getOpcode()) {
184 case Lanai::SFSUB_F_RI_LO
:
185 case Lanai::SFSUB_F_RI_HI
:
186 SrcReg
= MI
.getOperand(0).getReg();
187 SrcReg2
= Register();
189 CmpValue
= MI
.getOperand(1).getImm();
191 case Lanai::SFSUB_F_RR
:
192 SrcReg
= MI
.getOperand(0).getReg();
193 SrcReg2
= MI
.getOperand(1).getReg();
202 // isRedundantFlagInstr - check whether the first instruction, whose only
203 // purpose is to update flags, can be made redundant.
204 // * SFSUB_F_RR can be made redundant by SUB_RI if the operands are the same.
205 // * SFSUB_F_RI can be made redundant by SUB_I if the operands are the same.
206 inline static bool isRedundantFlagInstr(MachineInstr
*CmpI
, unsigned SrcReg
,
207 unsigned SrcReg2
, int64_t ImmValue
,
209 if (CmpI
->getOpcode() == Lanai::SFSUB_F_RR
&&
210 OI
->getOpcode() == Lanai::SUB_R
&&
211 ((OI
->getOperand(1).getReg() == SrcReg
&&
212 OI
->getOperand(2).getReg() == SrcReg2
) ||
213 (OI
->getOperand(1).getReg() == SrcReg2
&&
214 OI
->getOperand(2).getReg() == SrcReg
)))
217 if (((CmpI
->getOpcode() == Lanai::SFSUB_F_RI_LO
&&
218 OI
->getOpcode() == Lanai::SUB_I_LO
) ||
219 (CmpI
->getOpcode() == Lanai::SFSUB_F_RI_HI
&&
220 OI
->getOpcode() == Lanai::SUB_I_HI
)) &&
221 OI
->getOperand(1).getReg() == SrcReg
&&
222 OI
->getOperand(2).getImm() == ImmValue
)
227 inline static unsigned flagSettingOpcodeVariant(unsigned OldOpcode
) {
229 case Lanai::ADD_I_HI
:
230 return Lanai::ADD_F_I_HI
;
231 case Lanai::ADD_I_LO
:
232 return Lanai::ADD_F_I_LO
;
234 return Lanai::ADD_F_R
;
235 case Lanai::ADDC_I_HI
:
236 return Lanai::ADDC_F_I_HI
;
237 case Lanai::ADDC_I_LO
:
238 return Lanai::ADDC_F_I_LO
;
240 return Lanai::ADDC_F_R
;
241 case Lanai::AND_I_HI
:
242 return Lanai::AND_F_I_HI
;
243 case Lanai::AND_I_LO
:
244 return Lanai::AND_F_I_LO
;
246 return Lanai::AND_F_R
;
248 return Lanai::OR_F_I_HI
;
250 return Lanai::OR_F_I_LO
;
252 return Lanai::OR_F_R
;
254 return Lanai::SL_F_I
;
256 return Lanai::SRL_F_R
;
258 return Lanai::SA_F_I
;
260 return Lanai::SRA_F_R
;
261 case Lanai::SUB_I_HI
:
262 return Lanai::SUB_F_I_HI
;
263 case Lanai::SUB_I_LO
:
264 return Lanai::SUB_F_I_LO
;
266 return Lanai::SUB_F_R
;
267 case Lanai::SUBB_I_HI
:
268 return Lanai::SUBB_F_I_HI
;
269 case Lanai::SUBB_I_LO
:
270 return Lanai::SUBB_F_I_LO
;
272 return Lanai::SUBB_F_R
;
273 case Lanai::XOR_I_HI
:
274 return Lanai::XOR_F_I_HI
;
275 case Lanai::XOR_I_LO
:
276 return Lanai::XOR_F_I_LO
;
278 return Lanai::XOR_F_R
;
284 bool LanaiInstrInfo::optimizeCompareInstr(
285 MachineInstr
&CmpInstr
, Register SrcReg
, Register SrcReg2
,
286 int64_t /*CmpMask*/, int64_t CmpValue
,
287 const MachineRegisterInfo
*MRI
) const {
288 // Get the unique definition of SrcReg.
289 MachineInstr
*MI
= MRI
->getUniqueVRegDef(SrcReg
);
293 // Get ready to iterate backward from CmpInstr.
294 MachineBasicBlock::iterator I
= CmpInstr
, E
= MI
,
295 B
= CmpInstr
.getParent()->begin();
297 // Early exit if CmpInstr is at the beginning of the BB.
301 // There are two possible candidates which can be changed to set SR:
302 // One is MI, the other is a SUB instruction.
303 // * For SFSUB_F_RR(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1).
304 // * For SFSUB_F_RI(r1, CmpValue), we are looking for SUB(r1, CmpValue).
305 MachineInstr
*Sub
= nullptr;
307 // MI is not a candidate to transform into a flag setting instruction.
309 else if (MI
->getParent() != CmpInstr
.getParent() || CmpValue
!= 0) {
310 // Conservatively refuse to convert an instruction which isn't in the same
311 // BB as the comparison. Don't return if SFSUB_F_RI and CmpValue != 0 as Sub
312 // may still be a candidate.
313 if (CmpInstr
.getOpcode() == Lanai::SFSUB_F_RI_LO
)
319 // Check that SR isn't set between the comparison instruction and the
320 // instruction we want to change while searching for Sub.
321 const TargetRegisterInfo
*TRI
= &getRegisterInfo();
322 for (--I
; I
!= E
; --I
) {
323 const MachineInstr
&Instr
= *I
;
325 if (Instr
.modifiesRegister(Lanai::SR
, TRI
) ||
326 Instr
.readsRegister(Lanai::SR
, TRI
))
327 // This instruction modifies or uses SR after the one we want to change.
328 // We can't do this transformation.
331 // Check whether CmpInstr can be made redundant by the current instruction.
332 if (isRedundantFlagInstr(&CmpInstr
, SrcReg
, SrcReg2
, CmpValue
, &*I
)) {
337 // Don't search outside the containing basic block.
342 // Return false if no candidates exist.
346 // The single candidate is called MI.
350 if (flagSettingOpcodeVariant(MI
->getOpcode()) != Lanai::NOP
) {
353 SmallVector
<std::pair
<MachineOperand
*, LPCC::CondCode
>, 4>
356 E
= CmpInstr
.getParent()->end();
357 while (!isSafe
&& ++I
!= E
) {
358 const MachineInstr
&Instr
= *I
;
359 for (unsigned IO
= 0, EO
= Instr
.getNumOperands(); !isSafe
&& IO
!= EO
;
361 const MachineOperand
&MO
= Instr
.getOperand(IO
);
362 if (MO
.isRegMask() && MO
.clobbersPhysReg(Lanai::SR
)) {
366 if (!MO
.isReg() || MO
.getReg() != Lanai::SR
)
372 // Condition code is after the operand before SR.
374 CC
= (LPCC::CondCode
)Instr
.getOperand(IO
- 1).getImm();
377 LPCC::CondCode NewCC
= getOppositeCondition(CC
);
378 if (NewCC
== LPCC::ICC_T
)
380 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based on
381 // CMP needs to be updated to be based on SUB. Push the condition
382 // code operands to OperandsToUpdate. If it is safe to remove
383 // CmpInstr, the condition code of these operands will be modified.
384 if (SrcReg2
!= 0 && Sub
->getOperand(1).getReg() == SrcReg2
&&
385 Sub
->getOperand(2).getReg() == SrcReg
) {
386 OperandsToUpdate
.push_back(
387 std::make_pair(&((*I
).getOperand(IO
- 1)), NewCC
));
390 // No Sub, so this is x = <op> y, z; cmp x, 0.
392 case LPCC::ICC_EQ
: // Z
393 case LPCC::ICC_NE
: // Z
394 case LPCC::ICC_MI
: // N
395 case LPCC::ICC_PL
: // N
396 case LPCC::ICC_F
: // none
397 case LPCC::ICC_T
: // none
398 // SR can be used multiple times, we should continue.
400 case LPCC::ICC_CS
: // C
401 case LPCC::ICC_CC
: // C
402 case LPCC::ICC_VS
: // V
403 case LPCC::ICC_VC
: // V
404 case LPCC::ICC_HI
: // C Z
405 case LPCC::ICC_LS
: // C Z
406 case LPCC::ICC_GE
: // N V
407 case LPCC::ICC_LT
: // N V
408 case LPCC::ICC_GT
: // Z N V
409 case LPCC::ICC_LE
: // Z N V
410 // The instruction uses the V bit or C bit which is not safe.
419 // If SR is not killed nor re-defined, we should check whether it is
420 // live-out. If it is live-out, do not optimize.
422 MachineBasicBlock
*MBB
= CmpInstr
.getParent();
423 for (const MachineBasicBlock
*Succ
: MBB
->successors())
424 if (Succ
->isLiveIn(Lanai::SR
))
428 // Toggle the optional operand to SR.
429 MI
->setDesc(get(flagSettingOpcodeVariant(MI
->getOpcode())));
430 MI
->addRegisterDefined(Lanai::SR
);
431 CmpInstr
.eraseFromParent();
438 bool LanaiInstrInfo::analyzeSelect(const MachineInstr
&MI
,
439 SmallVectorImpl
<MachineOperand
> &Cond
,
440 unsigned &TrueOp
, unsigned &FalseOp
,
441 bool &Optimizable
) const {
442 assert(MI
.getOpcode() == Lanai::SELECT
&& "unknown select instruction");
447 // 3: Condition code.
450 Cond
.push_back(MI
.getOperand(3));
455 // Identify instructions that can be folded into a SELECT instruction, and
456 // return the defining instruction.
457 static MachineInstr
*canFoldIntoSelect(Register Reg
,
458 const MachineRegisterInfo
&MRI
) {
459 if (!Reg
.isVirtual())
461 if (!MRI
.hasOneNonDBGUse(Reg
))
463 MachineInstr
*MI
= MRI
.getVRegDef(Reg
);
466 // MI is folded into the SELECT by predicating it.
467 if (!MI
->isPredicable())
469 // Check if MI has any non-dead defs or physreg uses. This also detects
470 // predicated instructions which will be reading SR.
471 for (const MachineOperand
&MO
: llvm::drop_begin(MI
->operands(), 1)) {
472 // Reject frame index operands.
473 if (MO
.isFI() || MO
.isCPI() || MO
.isJTI())
477 // MI can't have any tied operands, that would conflict with predication.
480 if (MO
.getReg().isPhysical())
482 if (MO
.isDef() && !MO
.isDead())
485 bool DontMoveAcrossStores
= true;
486 if (!MI
->isSafeToMove(/*AliasAnalysis=*/nullptr, DontMoveAcrossStores
))
492 LanaiInstrInfo::optimizeSelect(MachineInstr
&MI
,
493 SmallPtrSetImpl
<MachineInstr
*> &SeenMIs
,
494 bool /*PreferFalse*/) const {
495 assert(MI
.getOpcode() == Lanai::SELECT
&& "unknown select instruction");
496 MachineRegisterInfo
&MRI
= MI
.getParent()->getParent()->getRegInfo();
497 MachineInstr
*DefMI
= canFoldIntoSelect(MI
.getOperand(1).getReg(), MRI
);
498 bool Invert
= !DefMI
;
500 DefMI
= canFoldIntoSelect(MI
.getOperand(2).getReg(), MRI
);
504 // Find new register class to use.
505 MachineOperand FalseReg
= MI
.getOperand(Invert
? 1 : 2);
506 Register DestReg
= MI
.getOperand(0).getReg();
507 const TargetRegisterClass
*PreviousClass
= MRI
.getRegClass(FalseReg
.getReg());
508 if (!MRI
.constrainRegClass(DestReg
, PreviousClass
))
511 // Create a new predicated version of DefMI.
512 MachineInstrBuilder NewMI
=
513 BuildMI(*MI
.getParent(), MI
, MI
.getDebugLoc(), DefMI
->getDesc(), DestReg
);
515 // Copy all the DefMI operands, excluding its (null) predicate.
516 const MCInstrDesc
&DefDesc
= DefMI
->getDesc();
517 for (unsigned i
= 1, e
= DefDesc
.getNumOperands();
518 i
!= e
&& !DefDesc
.operands()[i
].isPredicate(); ++i
)
519 NewMI
.add(DefMI
->getOperand(i
));
521 unsigned CondCode
= MI
.getOperand(3).getImm();
523 NewMI
.addImm(getOppositeCondition(LPCC::CondCode(CondCode
)));
525 NewMI
.addImm(CondCode
);
526 NewMI
.copyImplicitOps(MI
);
528 // The output register value when the predicate is false is an implicit
529 // register operand tied to the first def. The tie makes the register
530 // allocator ensure the FalseReg is allocated the same register as operand 0.
531 FalseReg
.setImplicit();
533 NewMI
->tieOperands(0, NewMI
->getNumOperands() - 1);
535 // Update SeenMIs set: register newly created MI and erase removed DefMI.
536 SeenMIs
.insert(NewMI
);
537 SeenMIs
.erase(DefMI
);
539 // If MI is inside a loop, and DefMI is outside the loop, then kill flags on
540 // DefMI would be invalid when transferred inside the loop. Checking for a
541 // loop is expensive, but at least remove kill flags if they are in different
543 if (DefMI
->getParent() != MI
.getParent())
544 NewMI
->clearKillInfo();
546 // The caller will erase MI, but not DefMI.
547 DefMI
->eraseFromParent();
551 // The analyzeBranch function is used to examine conditional instructions and
552 // remove unnecessary instructions. This method is used by BranchFolder and
553 // IfConverter machine function passes to improve the CFG.
554 // - TrueBlock is set to the destination if condition evaluates true (it is the
555 // nullptr if the destination is the fall-through branch);
556 // - FalseBlock is set to the destination if condition evaluates to false (it
557 // is the nullptr if the branch is unconditional);
558 // - condition is populated with machine operands needed to generate the branch
559 // to insert in insertBranch;
560 // Returns: false if branch could successfully be analyzed.
561 bool LanaiInstrInfo::analyzeBranch(MachineBasicBlock
&MBB
,
562 MachineBasicBlock
*&TrueBlock
,
563 MachineBasicBlock
*&FalseBlock
,
564 SmallVectorImpl
<MachineOperand
> &Condition
,
565 bool AllowModify
) const {
566 // Iterator to current instruction being considered.
567 MachineBasicBlock::iterator Instruction
= MBB
.end();
569 // Start from the bottom of the block and work up, examining the
570 // terminator instructions.
571 while (Instruction
!= MBB
.begin()) {
574 // Skip over debug instructions.
575 if (Instruction
->isDebugInstr())
578 // Working from the bottom, when we see a non-terminator
579 // instruction, we're done.
580 if (!isUnpredicatedTerminator(*Instruction
))
583 // A terminator that isn't a branch can't easily be handled
585 if (!Instruction
->isBranch())
588 // Handle unconditional branches.
589 if (Instruction
->getOpcode() == Lanai::BT
) {
591 TrueBlock
= Instruction
->getOperand(0).getMBB();
595 // If the block has any instructions after a branch, delete them.
596 MBB
.erase(std::next(Instruction
), MBB
.end());
599 FalseBlock
= nullptr;
601 // Delete the jump if it's equivalent to a fall-through.
602 if (MBB
.isLayoutSuccessor(Instruction
->getOperand(0).getMBB())) {
604 Instruction
->eraseFromParent();
605 Instruction
= MBB
.end();
609 // TrueBlock is used to indicate the unconditional destination.
610 TrueBlock
= Instruction
->getOperand(0).getMBB();
614 // Handle conditional branches
615 unsigned Opcode
= Instruction
->getOpcode();
616 if (Opcode
!= Lanai::BRCC
)
617 return true; // Unknown opcode.
619 // Multiple conditional branches are not handled here so only proceed if
620 // there are no conditions enqueued.
621 if (Condition
.empty()) {
622 LPCC::CondCode BranchCond
=
623 static_cast<LPCC::CondCode
>(Instruction
->getOperand(1).getImm());
625 // TrueBlock is the target of the previously seen unconditional branch.
626 FalseBlock
= TrueBlock
;
627 TrueBlock
= Instruction
->getOperand(0).getMBB();
628 Condition
.push_back(MachineOperand::CreateImm(BranchCond
));
632 // Multiple conditional branches are not handled.
636 // Return false indicating branch successfully analyzed.
640 // reverseBranchCondition - Reverses the branch condition of the specified
641 // condition list, returning false on success and true if it cannot be
643 bool LanaiInstrInfo::reverseBranchCondition(
644 SmallVectorImpl
<llvm::MachineOperand
> &Condition
) const {
645 assert((Condition
.size() == 1) &&
646 "Lanai branch conditions should have one component.");
648 LPCC::CondCode BranchCond
=
649 static_cast<LPCC::CondCode
>(Condition
[0].getImm());
650 Condition
[0].setImm(getOppositeCondition(BranchCond
));
654 // Insert the branch with condition specified in condition and given targets
655 // (TrueBlock and FalseBlock). This function returns the number of machine
656 // instructions inserted.
657 unsigned LanaiInstrInfo::insertBranch(MachineBasicBlock
&MBB
,
658 MachineBasicBlock
*TrueBlock
,
659 MachineBasicBlock
*FalseBlock
,
660 ArrayRef
<MachineOperand
> Condition
,
662 int *BytesAdded
) const {
663 // Shouldn't be a fall through.
664 assert(TrueBlock
&& "insertBranch must not be told to insert a fallthrough");
665 assert(!BytesAdded
&& "code size not handled");
667 // If condition is empty then an unconditional branch is being inserted.
668 if (Condition
.empty()) {
669 assert(!FalseBlock
&& "Unconditional branch with multiple successors!");
670 BuildMI(&MBB
, DL
, get(Lanai::BT
)).addMBB(TrueBlock
);
674 // Else a conditional branch is inserted.
675 assert((Condition
.size() == 1) &&
676 "Lanai branch conditions should have one component.");
677 unsigned ConditionalCode
= Condition
[0].getImm();
678 BuildMI(&MBB
, DL
, get(Lanai::BRCC
)).addMBB(TrueBlock
).addImm(ConditionalCode
);
680 // If no false block, then false behavior is fall through and no branch needs
685 BuildMI(&MBB
, DL
, get(Lanai::BT
)).addMBB(FalseBlock
);
689 unsigned LanaiInstrInfo::removeBranch(MachineBasicBlock
&MBB
,
690 int *BytesRemoved
) const {
691 assert(!BytesRemoved
&& "code size not handled");
693 MachineBasicBlock::iterator Instruction
= MBB
.end();
696 while (Instruction
!= MBB
.begin()) {
698 if (Instruction
->isDebugInstr())
700 if (Instruction
->getOpcode() != Lanai::BT
&&
701 Instruction
->getOpcode() != Lanai::BRCC
) {
705 // Remove the branch.
706 Instruction
->eraseFromParent();
707 Instruction
= MBB
.end();
714 Register
LanaiInstrInfo::isLoadFromStackSlot(const MachineInstr
&MI
,
715 int &FrameIndex
) const {
716 if (MI
.getOpcode() == Lanai::LDW_RI
)
717 if (MI
.getOperand(1).isFI() && MI
.getOperand(2).isImm() &&
718 MI
.getOperand(2).getImm() == 0) {
719 FrameIndex
= MI
.getOperand(1).getIndex();
720 return MI
.getOperand(0).getReg();
725 Register
LanaiInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr
&MI
,
726 int &FrameIndex
) const {
727 if (MI
.getOpcode() == Lanai::LDW_RI
) {
729 if ((Reg
= isLoadFromStackSlot(MI
, FrameIndex
)))
731 // Check for post-frame index elimination operations
732 SmallVector
<const MachineMemOperand
*, 1> Accesses
;
733 if (hasLoadFromStackSlot(MI
, Accesses
)){
735 cast
<FixedStackPseudoSourceValue
>(Accesses
.front()->getPseudoValue())
743 Register
LanaiInstrInfo::isStoreToStackSlot(const MachineInstr
&MI
,
744 int &FrameIndex
) const {
745 if (MI
.getOpcode() == Lanai::SW_RI
)
746 if (MI
.getOperand(0).isFI() && MI
.getOperand(1).isImm() &&
747 MI
.getOperand(1).getImm() == 0) {
748 FrameIndex
= MI
.getOperand(0).getIndex();
749 return MI
.getOperand(2).getReg();
754 bool LanaiInstrInfo::getMemOperandWithOffsetWidth(
755 const MachineInstr
&LdSt
, const MachineOperand
*&BaseOp
, int64_t &Offset
,
756 LocationSize
&Width
, const TargetRegisterInfo
* /*TRI*/) const {
757 // Handle only loads/stores with base register followed by immediate offset
758 // and with add as ALU op.
759 if (LdSt
.getNumOperands() != 4)
761 if (!LdSt
.getOperand(1).isReg() || !LdSt
.getOperand(2).isImm() ||
762 !(LdSt
.getOperand(3).isImm() && LdSt
.getOperand(3).getImm() == LPAC::ADD
))
765 switch (LdSt
.getOpcode()) {
786 BaseOp
= &LdSt
.getOperand(1);
787 Offset
= LdSt
.getOperand(2).getImm();
789 if (!BaseOp
->isReg())
795 bool LanaiInstrInfo::getMemOperandsWithOffsetWidth(
796 const MachineInstr
&LdSt
, SmallVectorImpl
<const MachineOperand
*> &BaseOps
,
797 int64_t &Offset
, bool &OffsetIsScalable
, LocationSize
&Width
,
798 const TargetRegisterInfo
*TRI
) const {
799 switch (LdSt
.getOpcode()) {
811 const MachineOperand
*BaseOp
;
812 OffsetIsScalable
= false;
813 if (!getMemOperandWithOffsetWidth(LdSt
, BaseOp
, Offset
, Width
, TRI
))
815 BaseOps
.push_back(BaseOp
);