1 //===- ARMBaseInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the Base ARM implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "ARMBaseInstrInfo.h"
16 #include "ARMAddressingModes.h"
17 #include "ARMGenInstrInfo.inc"
18 #include "ARMMachineFunctionInfo.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/CodeGen/LiveVariables.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineJumpTableInfo.h"
24 #include "llvm/MC/MCAsmInfo.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/ErrorHandling.h"
30 EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden
,
31 cl::desc("Enable ARM 2-addr to 3-addr conv"));
33 ARMBaseInstrInfo::ARMBaseInstrInfo()
34 : TargetInstrInfoImpl(ARMInsts
, array_lengthof(ARMInsts
)) {
38 ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator
&MFI
,
39 MachineBasicBlock::iterator
&MBBI
,
40 LiveVariables
*LV
) const {
41 // FIXME: Thumb2 support.
46 MachineInstr
*MI
= MBBI
;
47 MachineFunction
&MF
= *MI
->getParent()->getParent();
48 unsigned TSFlags
= MI
->getDesc().TSFlags
;
50 switch ((TSFlags
& ARMII::IndexModeMask
) >> ARMII::IndexModeShift
) {
52 case ARMII::IndexModePre
:
55 case ARMII::IndexModePost
:
59 // Try splitting an indexed load/store to an un-indexed one plus an add/sub
61 unsigned MemOpc
= getUnindexedOpcode(MI
->getOpcode());
65 MachineInstr
*UpdateMI
= NULL
;
66 MachineInstr
*MemMI
= NULL
;
67 unsigned AddrMode
= (TSFlags
& ARMII::AddrModeMask
);
68 const TargetInstrDesc
&TID
= MI
->getDesc();
69 unsigned NumOps
= TID
.getNumOperands();
70 bool isLoad
= !TID
.mayStore();
71 const MachineOperand
&WB
= isLoad
? MI
->getOperand(1) : MI
->getOperand(0);
72 const MachineOperand
&Base
= MI
->getOperand(2);
73 const MachineOperand
&Offset
= MI
->getOperand(NumOps
-3);
74 unsigned WBReg
= WB
.getReg();
75 unsigned BaseReg
= Base
.getReg();
76 unsigned OffReg
= Offset
.getReg();
77 unsigned OffImm
= MI
->getOperand(NumOps
-2).getImm();
78 ARMCC::CondCodes Pred
= (ARMCC::CondCodes
)MI
->getOperand(NumOps
-1).getImm();
81 assert(false && "Unknown indexed op!");
83 case ARMII::AddrMode2
: {
84 bool isSub
= ARM_AM::getAM2Op(OffImm
) == ARM_AM::sub
;
85 unsigned Amt
= ARM_AM::getAM2Offset(OffImm
);
87 if (ARM_AM::getSOImmVal(Amt
) == -1)
88 // Can't encode it in a so_imm operand. This transformation will
89 // add more than 1 instruction. Abandon!
91 UpdateMI
= BuildMI(MF
, MI
->getDebugLoc(),
92 get(isSub
? ARM::SUBri
: ARM::ADDri
), WBReg
)
93 .addReg(BaseReg
).addImm(Amt
)
94 .addImm(Pred
).addReg(0).addReg(0);
95 } else if (Amt
!= 0) {
96 ARM_AM::ShiftOpc ShOpc
= ARM_AM::getAM2ShiftOpc(OffImm
);
97 unsigned SOOpc
= ARM_AM::getSORegOpc(ShOpc
, Amt
);
98 UpdateMI
= BuildMI(MF
, MI
->getDebugLoc(),
99 get(isSub
? ARM::SUBrs
: ARM::ADDrs
), WBReg
)
100 .addReg(BaseReg
).addReg(OffReg
).addReg(0).addImm(SOOpc
)
101 .addImm(Pred
).addReg(0).addReg(0);
103 UpdateMI
= BuildMI(MF
, MI
->getDebugLoc(),
104 get(isSub
? ARM::SUBrr
: ARM::ADDrr
), WBReg
)
105 .addReg(BaseReg
).addReg(OffReg
)
106 .addImm(Pred
).addReg(0).addReg(0);
109 case ARMII::AddrMode3
: {
110 bool isSub
= ARM_AM::getAM3Op(OffImm
) == ARM_AM::sub
;
111 unsigned Amt
= ARM_AM::getAM3Offset(OffImm
);
113 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
114 UpdateMI
= BuildMI(MF
, MI
->getDebugLoc(),
115 get(isSub
? ARM::SUBri
: ARM::ADDri
), WBReg
)
116 .addReg(BaseReg
).addImm(Amt
)
117 .addImm(Pred
).addReg(0).addReg(0);
119 UpdateMI
= BuildMI(MF
, MI
->getDebugLoc(),
120 get(isSub
? ARM::SUBrr
: ARM::ADDrr
), WBReg
)
121 .addReg(BaseReg
).addReg(OffReg
)
122 .addImm(Pred
).addReg(0).addReg(0);
127 std::vector
<MachineInstr
*> NewMIs
;
130 MemMI
= BuildMI(MF
, MI
->getDebugLoc(),
131 get(MemOpc
), MI
->getOperand(0).getReg())
132 .addReg(WBReg
).addReg(0).addImm(0).addImm(Pred
);
134 MemMI
= BuildMI(MF
, MI
->getDebugLoc(),
135 get(MemOpc
)).addReg(MI
->getOperand(1).getReg())
136 .addReg(WBReg
).addReg(0).addImm(0).addImm(Pred
);
137 NewMIs
.push_back(MemMI
);
138 NewMIs
.push_back(UpdateMI
);
141 MemMI
= BuildMI(MF
, MI
->getDebugLoc(),
142 get(MemOpc
), MI
->getOperand(0).getReg())
143 .addReg(BaseReg
).addReg(0).addImm(0).addImm(Pred
);
145 MemMI
= BuildMI(MF
, MI
->getDebugLoc(),
146 get(MemOpc
)).addReg(MI
->getOperand(1).getReg())
147 .addReg(BaseReg
).addReg(0).addImm(0).addImm(Pred
);
149 UpdateMI
->getOperand(0).setIsDead();
150 NewMIs
.push_back(UpdateMI
);
151 NewMIs
.push_back(MemMI
);
154 // Transfer LiveVariables states, kill / dead info.
156 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
157 MachineOperand
&MO
= MI
->getOperand(i
);
158 if (MO
.isReg() && MO
.getReg() &&
159 TargetRegisterInfo::isVirtualRegister(MO
.getReg())) {
160 unsigned Reg
= MO
.getReg();
162 LiveVariables::VarInfo
&VI
= LV
->getVarInfo(Reg
);
164 MachineInstr
*NewMI
= (Reg
== WBReg
) ? UpdateMI
: MemMI
;
166 LV
->addVirtualRegisterDead(Reg
, NewMI
);
168 if (MO
.isUse() && MO
.isKill()) {
169 for (unsigned j
= 0; j
< 2; ++j
) {
170 // Look at the two new MI's in reverse order.
171 MachineInstr
*NewMI
= NewMIs
[j
];
172 if (!NewMI
->readsRegister(Reg
))
174 LV
->addVirtualRegisterKilled(Reg
, NewMI
);
175 if (VI
.removeKill(MI
))
176 VI
.Kills
.push_back(NewMI
);
184 MFI
->insert(MBBI
, NewMIs
[1]);
185 MFI
->insert(MBBI
, NewMIs
[0]);
191 ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock
&MBB
,MachineBasicBlock
*&TBB
,
192 MachineBasicBlock
*&FBB
,
193 SmallVectorImpl
<MachineOperand
> &Cond
,
194 bool AllowModify
) const {
195 // If the block has no terminators, it just falls into the block after it.
196 MachineBasicBlock::iterator I
= MBB
.end();
197 if (I
== MBB
.begin() || !isUnpredicatedTerminator(--I
))
200 // Get the last instruction in the block.
201 MachineInstr
*LastInst
= I
;
203 // If there is only one terminator instruction, process it.
204 unsigned LastOpc
= LastInst
->getOpcode();
205 if (I
== MBB
.begin() || !isUnpredicatedTerminator(--I
)) {
206 if (isUncondBranchOpcode(LastOpc
)) {
207 TBB
= LastInst
->getOperand(0).getMBB();
210 if (isCondBranchOpcode(LastOpc
)) {
211 // Block ends with fall-through condbranch.
212 TBB
= LastInst
->getOperand(0).getMBB();
213 Cond
.push_back(LastInst
->getOperand(1));
214 Cond
.push_back(LastInst
->getOperand(2));
217 return true; // Can't handle indirect branch.
220 // Get the instruction before it if it is a terminator.
221 MachineInstr
*SecondLastInst
= I
;
223 // If there are three terminators, we don't know what sort of block this is.
224 if (SecondLastInst
&& I
!= MBB
.begin() && isUnpredicatedTerminator(--I
))
227 // If the block ends with a B and a Bcc, handle it.
228 unsigned SecondLastOpc
= SecondLastInst
->getOpcode();
229 if (isCondBranchOpcode(SecondLastOpc
) && isUncondBranchOpcode(LastOpc
)) {
230 TBB
= SecondLastInst
->getOperand(0).getMBB();
231 Cond
.push_back(SecondLastInst
->getOperand(1));
232 Cond
.push_back(SecondLastInst
->getOperand(2));
233 FBB
= LastInst
->getOperand(0).getMBB();
237 // If the block ends with two unconditional branches, handle it. The second
238 // one is not executed, so remove it.
239 if (isUncondBranchOpcode(SecondLastOpc
) && isUncondBranchOpcode(LastOpc
)) {
240 TBB
= SecondLastInst
->getOperand(0).getMBB();
243 I
->eraseFromParent();
247 // ...likewise if it ends with a branch table followed by an unconditional
248 // branch. The branch folder can create these, and we must get rid of them for
249 // correctness of Thumb constant islands.
250 if (isJumpTableBranchOpcode(SecondLastOpc
) &&
251 isUncondBranchOpcode(LastOpc
)) {
254 I
->eraseFromParent();
258 // Otherwise, can't handle this.
263 unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock
&MBB
) const {
264 MachineBasicBlock::iterator I
= MBB
.end();
265 if (I
== MBB
.begin()) return 0;
267 if (!isUncondBranchOpcode(I
->getOpcode()) &&
268 !isCondBranchOpcode(I
->getOpcode()))
271 // Remove the branch.
272 I
->eraseFromParent();
276 if (I
== MBB
.begin()) return 1;
278 if (!isCondBranchOpcode(I
->getOpcode()))
281 // Remove the branch.
282 I
->eraseFromParent();
287 ARMBaseInstrInfo::InsertBranch(MachineBasicBlock
&MBB
, MachineBasicBlock
*TBB
,
288 MachineBasicBlock
*FBB
,
289 const SmallVectorImpl
<MachineOperand
> &Cond
) const {
290 // FIXME this should probably have a DebugLoc argument
291 DebugLoc dl
= DebugLoc::getUnknownLoc();
293 ARMFunctionInfo
*AFI
= MBB
.getParent()->getInfo
<ARMFunctionInfo
>();
294 int BOpc
= !AFI
->isThumbFunction()
295 ? ARM::B
: (AFI
->isThumb2Function() ? ARM::t2B
: ARM::tB
);
296 int BccOpc
= !AFI
->isThumbFunction()
297 ? ARM::Bcc
: (AFI
->isThumb2Function() ? ARM::t2Bcc
: ARM::tBcc
);
299 // Shouldn't be a fall through.
300 assert(TBB
&& "InsertBranch must not be told to insert a fallthrough");
301 assert((Cond
.size() == 2 || Cond
.size() == 0) &&
302 "ARM branch conditions have two components!");
305 if (Cond
.empty()) // Unconditional branch?
306 BuildMI(&MBB
, dl
, get(BOpc
)).addMBB(TBB
);
308 BuildMI(&MBB
, dl
, get(BccOpc
)).addMBB(TBB
)
309 .addImm(Cond
[0].getImm()).addReg(Cond
[1].getReg());
313 // Two-way conditional branch.
314 BuildMI(&MBB
, dl
, get(BccOpc
)).addMBB(TBB
)
315 .addImm(Cond
[0].getImm()).addReg(Cond
[1].getReg());
316 BuildMI(&MBB
, dl
, get(BOpc
)).addMBB(FBB
);
320 bool ARMBaseInstrInfo::
321 ReverseBranchCondition(SmallVectorImpl
<MachineOperand
> &Cond
) const {
322 ARMCC::CondCodes CC
= (ARMCC::CondCodes
)(int)Cond
[0].getImm();
323 Cond
[0].setImm(ARMCC::getOppositeCondition(CC
));
327 bool ARMBaseInstrInfo::
328 PredicateInstruction(MachineInstr
*MI
,
329 const SmallVectorImpl
<MachineOperand
> &Pred
) const {
330 unsigned Opc
= MI
->getOpcode();
331 if (isUncondBranchOpcode(Opc
)) {
332 MI
->setDesc(get(getMatchingCondBranchOpcode(Opc
)));
333 MI
->addOperand(MachineOperand::CreateImm(Pred
[0].getImm()));
334 MI
->addOperand(MachineOperand::CreateReg(Pred
[1].getReg(), false));
338 int PIdx
= MI
->findFirstPredOperandIdx();
340 MachineOperand
&PMO
= MI
->getOperand(PIdx
);
341 PMO
.setImm(Pred
[0].getImm());
342 MI
->getOperand(PIdx
+1).setReg(Pred
[1].getReg());
348 bool ARMBaseInstrInfo::
349 SubsumesPredicate(const SmallVectorImpl
<MachineOperand
> &Pred1
,
350 const SmallVectorImpl
<MachineOperand
> &Pred2
) const {
351 if (Pred1
.size() > 2 || Pred2
.size() > 2)
354 ARMCC::CondCodes CC1
= (ARMCC::CondCodes
)Pred1
[0].getImm();
355 ARMCC::CondCodes CC2
= (ARMCC::CondCodes
)Pred2
[0].getImm();
365 return CC2
== ARMCC::HI
;
367 return CC2
== ARMCC::LO
|| CC2
== ARMCC::EQ
;
369 return CC2
== ARMCC::GT
;
371 return CC2
== ARMCC::LT
;
375 bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr
*MI
,
376 std::vector
<MachineOperand
> &Pred
) const {
377 // FIXME: This confuses implicit_def with optional CPSR def.
378 const TargetInstrDesc
&TID
= MI
->getDesc();
379 if (!TID
.getImplicitDefs() && !TID
.hasOptionalDef())
383 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
384 const MachineOperand
&MO
= MI
->getOperand(i
);
385 if (MO
.isReg() && MO
.getReg() == ARM::CPSR
) {
395 /// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
396 static unsigned getNumJTEntries(const std::vector
<MachineJumpTableEntry
> &JT
,
397 unsigned JTI
) DISABLE_INLINE
;
398 static unsigned getNumJTEntries(const std::vector
<MachineJumpTableEntry
> &JT
,
400 return JT
[JTI
].MBBs
.size();
403 /// GetInstSize - Return the size of the specified MachineInstr.
405 unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr
*MI
) const {
406 const MachineBasicBlock
&MBB
= *MI
->getParent();
407 const MachineFunction
*MF
= MBB
.getParent();
408 const MCAsmInfo
*MAI
= MF
->getTarget().getMCAsmInfo();
410 // Basic size info comes from the TSFlags field.
411 const TargetInstrDesc
&TID
= MI
->getDesc();
412 unsigned TSFlags
= TID
.TSFlags
;
414 unsigned Opc
= MI
->getOpcode();
415 switch ((TSFlags
& ARMII::SizeMask
) >> ARMII::SizeShift
) {
417 // If this machine instr is an inline asm, measure it.
418 if (MI
->getOpcode() == ARM::INLINEASM
)
419 return getInlineAsmLength(MI
->getOperand(0).getSymbolName(), *MAI
);
424 llvm_unreachable("Unknown or unset size field for instr!");
425 case TargetInstrInfo::IMPLICIT_DEF
:
426 case TargetInstrInfo::DBG_LABEL
:
427 case TargetInstrInfo::EH_LABEL
:
432 case ARMII::Size8Bytes
: return 8; // ARM instruction x 2.
433 case ARMII::Size4Bytes
: return 4; // ARM / Thumb2 instruction.
434 case ARMII::Size2Bytes
: return 2; // Thumb1 instruction.
435 case ARMII::SizeSpecial
: {
437 case ARM::CONSTPOOL_ENTRY
:
438 // If this machine instr is a constant pool entry, its size is recorded as
440 return MI
->getOperand(2).getImm();
441 case ARM::Int_eh_sjlj_setjmp
:
443 case ARM::t2Int_eh_sjlj_setjmp
:
452 // These are jumptable branches, i.e. a branch followed by an inlined
453 // jumptable. The size is 4 + 4 * number of entries. For TBB, each
454 // entry is one byte; TBH two byte each.
455 unsigned EntrySize
= (Opc
== ARM::t2TBB
)
456 ? 1 : ((Opc
== ARM::t2TBH
) ? 2 : 4);
457 unsigned NumOps
= TID
.getNumOperands();
458 MachineOperand JTOP
=
459 MI
->getOperand(NumOps
- (TID
.isPredicable() ? 3 : 2));
460 unsigned JTI
= JTOP
.getIndex();
461 const MachineJumpTableInfo
*MJTI
= MF
->getJumpTableInfo();
462 const std::vector
<MachineJumpTableEntry
> &JT
= MJTI
->getJumpTables();
463 assert(JTI
< JT
.size());
464 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
465 // 4 aligned. The assembler / linker may add 2 byte padding just before
466 // the JT entries. The size does not include this padding; the
467 // constant islands pass does separate bookkeeping for it.
468 // FIXME: If we know the size of the function is less than (1 << 16) *2
469 // bytes, we can use 16-bit entries instead. Then there won't be an
471 unsigned InstSize
= (Opc
== ARM::tBR_JTr
|| Opc
== ARM::t2BR_JT
) ? 2 : 4;
472 unsigned NumEntries
= getNumJTEntries(JT
, JTI
);
473 if (Opc
== ARM::t2TBB
&& (NumEntries
& 1))
474 // Make sure the instruction that follows TBB is 2-byte aligned.
475 // FIXME: Constant island pass should insert an "ALIGN" instruction
478 return NumEntries
* EntrySize
+ InstSize
;
481 // Otherwise, pseudo-instruction sizes are zero.
486 return 0; // Not reached
489 /// Return true if the instruction is a register to register move and
490 /// leave the source and dest operands in the passed parameters.
493 ARMBaseInstrInfo::isMoveInstr(const MachineInstr
&MI
,
494 unsigned &SrcReg
, unsigned &DstReg
,
495 unsigned& SrcSubIdx
, unsigned& DstSubIdx
) const {
496 SrcSubIdx
= DstSubIdx
= 0; // No sub-registers.
498 switch (MI
.getOpcode()) {
504 SrcReg
= MI
.getOperand(1).getReg();
505 DstReg
= MI
.getOperand(0).getReg();
510 case ARM::tMOVgpr2tgpr
:
511 case ARM::tMOVtgpr2gpr
:
512 case ARM::tMOVgpr2gpr
:
514 assert(MI
.getDesc().getNumOperands() >= 2 &&
515 MI
.getOperand(0).isReg() &&
516 MI
.getOperand(1).isReg() &&
517 "Invalid ARM MOV instruction");
518 SrcReg
= MI
.getOperand(1).getReg();
519 DstReg
= MI
.getOperand(0).getReg();
528 ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr
*MI
,
529 int &FrameIndex
) const {
530 switch (MI
->getOpcode()) {
533 case ARM::t2LDRs
: // FIXME: don't use t2LDRs to access frame.
534 if (MI
->getOperand(1).isFI() &&
535 MI
->getOperand(2).isReg() &&
536 MI
->getOperand(3).isImm() &&
537 MI
->getOperand(2).getReg() == 0 &&
538 MI
->getOperand(3).getImm() == 0) {
539 FrameIndex
= MI
->getOperand(1).getIndex();
540 return MI
->getOperand(0).getReg();
545 if (MI
->getOperand(1).isFI() &&
546 MI
->getOperand(2).isImm() &&
547 MI
->getOperand(2).getImm() == 0) {
548 FrameIndex
= MI
->getOperand(1).getIndex();
549 return MI
->getOperand(0).getReg();
554 if (MI
->getOperand(1).isFI() &&
555 MI
->getOperand(2).isImm() &&
556 MI
->getOperand(2).getImm() == 0) {
557 FrameIndex
= MI
->getOperand(1).getIndex();
558 return MI
->getOperand(0).getReg();
567 ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr
*MI
,
568 int &FrameIndex
) const {
569 switch (MI
->getOpcode()) {
572 case ARM::t2STRs
: // FIXME: don't use t2STRs to access frame.
573 if (MI
->getOperand(1).isFI() &&
574 MI
->getOperand(2).isReg() &&
575 MI
->getOperand(3).isImm() &&
576 MI
->getOperand(2).getReg() == 0 &&
577 MI
->getOperand(3).getImm() == 0) {
578 FrameIndex
= MI
->getOperand(1).getIndex();
579 return MI
->getOperand(0).getReg();
584 if (MI
->getOperand(1).isFI() &&
585 MI
->getOperand(2).isImm() &&
586 MI
->getOperand(2).getImm() == 0) {
587 FrameIndex
= MI
->getOperand(1).getIndex();
588 return MI
->getOperand(0).getReg();
593 if (MI
->getOperand(1).isFI() &&
594 MI
->getOperand(2).isImm() &&
595 MI
->getOperand(2).getImm() == 0) {
596 FrameIndex
= MI
->getOperand(1).getIndex();
597 return MI
->getOperand(0).getReg();
606 ARMBaseInstrInfo::copyRegToReg(MachineBasicBlock
&MBB
,
607 MachineBasicBlock::iterator I
,
608 unsigned DestReg
, unsigned SrcReg
,
609 const TargetRegisterClass
*DestRC
,
610 const TargetRegisterClass
*SrcRC
) const {
611 DebugLoc DL
= DebugLoc::getUnknownLoc();
612 if (I
!= MBB
.end()) DL
= I
->getDebugLoc();
614 if (DestRC
!= SrcRC
) {
615 // Allow DPR / DPR_VFP2 / DPR_8 cross-class copies
616 // Allow QPR / QPR_VFP2 cross-class copies
617 if (DestRC
== ARM::DPRRegisterClass
) {
618 if (SrcRC
== ARM::DPR_VFP2RegisterClass
||
619 SrcRC
== ARM::DPR_8RegisterClass
) {
622 } else if (DestRC
== ARM::DPR_VFP2RegisterClass
) {
623 if (SrcRC
== ARM::DPRRegisterClass
||
624 SrcRC
== ARM::DPR_8RegisterClass
) {
627 } else if (DestRC
== ARM::DPR_8RegisterClass
) {
628 if (SrcRC
== ARM::DPRRegisterClass
||
629 SrcRC
== ARM::DPR_VFP2RegisterClass
) {
632 } else if ((DestRC
== ARM::QPRRegisterClass
&&
633 SrcRC
== ARM::QPR_VFP2RegisterClass
) ||
634 (DestRC
== ARM::QPR_VFP2RegisterClass
&&
635 SrcRC
== ARM::QPRRegisterClass
)) {
640 if (DestRC
== ARM::GPRRegisterClass
) {
641 AddDefaultCC(AddDefaultPred(BuildMI(MBB
, I
, DL
, get(ARM::MOVr
),
642 DestReg
).addReg(SrcReg
)));
643 } else if (DestRC
== ARM::SPRRegisterClass
) {
644 AddDefaultPred(BuildMI(MBB
, I
, DL
, get(ARM::FCPYS
), DestReg
)
646 } else if ((DestRC
== ARM::DPRRegisterClass
) ||
647 (DestRC
== ARM::DPR_VFP2RegisterClass
) ||
648 (DestRC
== ARM::DPR_8RegisterClass
)) {
649 AddDefaultPred(BuildMI(MBB
, I
, DL
, get(ARM::FCPYD
), DestReg
)
651 } else if (DestRC
== ARM::QPRRegisterClass
||
652 DestRC
== ARM::QPR_VFP2RegisterClass
) {
653 BuildMI(MBB
, I
, DL
, get(ARM::VMOVQ
), DestReg
).addReg(SrcReg
);
661 void ARMBaseInstrInfo::
662 storeRegToStackSlot(MachineBasicBlock
&MBB
, MachineBasicBlock::iterator I
,
663 unsigned SrcReg
, bool isKill
, int FI
,
664 const TargetRegisterClass
*RC
) const {
665 DebugLoc DL
= DebugLoc::getUnknownLoc();
666 if (I
!= MBB
.end()) DL
= I
->getDebugLoc();
668 if (RC
== ARM::GPRRegisterClass
) {
669 AddDefaultPred(BuildMI(MBB
, I
, DL
, get(ARM::STR
))
670 .addReg(SrcReg
, getKillRegState(isKill
))
671 .addFrameIndex(FI
).addReg(0).addImm(0));
672 } else if (RC
== ARM::DPRRegisterClass
||
673 RC
== ARM::DPR_VFP2RegisterClass
||
674 RC
== ARM::DPR_8RegisterClass
) {
675 AddDefaultPred(BuildMI(MBB
, I
, DL
, get(ARM::FSTD
))
676 .addReg(SrcReg
, getKillRegState(isKill
))
677 .addFrameIndex(FI
).addImm(0));
678 } else if (RC
== ARM::SPRRegisterClass
) {
679 AddDefaultPred(BuildMI(MBB
, I
, DL
, get(ARM::FSTS
))
680 .addReg(SrcReg
, getKillRegState(isKill
))
681 .addFrameIndex(FI
).addImm(0));
683 assert((RC
== ARM::QPRRegisterClass
||
684 RC
== ARM::QPR_VFP2RegisterClass
) && "Unknown regclass!");
685 // FIXME: Neon instructions should support predicates
686 BuildMI(MBB
, I
, DL
, get(ARM::VSTRQ
)).addReg(SrcReg
, getKillRegState(isKill
))
687 .addFrameIndex(FI
).addImm(0);
691 void ARMBaseInstrInfo::
692 loadRegFromStackSlot(MachineBasicBlock
&MBB
, MachineBasicBlock::iterator I
,
693 unsigned DestReg
, int FI
,
694 const TargetRegisterClass
*RC
) const {
695 DebugLoc DL
= DebugLoc::getUnknownLoc();
696 if (I
!= MBB
.end()) DL
= I
->getDebugLoc();
698 if (RC
== ARM::GPRRegisterClass
) {
699 AddDefaultPred(BuildMI(MBB
, I
, DL
, get(ARM::LDR
), DestReg
)
700 .addFrameIndex(FI
).addReg(0).addImm(0));
701 } else if (RC
== ARM::DPRRegisterClass
||
702 RC
== ARM::DPR_VFP2RegisterClass
||
703 RC
== ARM::DPR_8RegisterClass
) {
704 AddDefaultPred(BuildMI(MBB
, I
, DL
, get(ARM::FLDD
), DestReg
)
705 .addFrameIndex(FI
).addImm(0));
706 } else if (RC
== ARM::SPRRegisterClass
) {
707 AddDefaultPred(BuildMI(MBB
, I
, DL
, get(ARM::FLDS
), DestReg
)
708 .addFrameIndex(FI
).addImm(0));
710 assert((RC
== ARM::QPRRegisterClass
||
711 RC
== ARM::QPR_VFP2RegisterClass
) && "Unknown regclass!");
712 // FIXME: Neon instructions should support predicates
713 BuildMI(MBB
, I
, DL
, get(ARM::VLDRQ
), DestReg
).addFrameIndex(FI
).addImm(0);
717 MachineInstr
*ARMBaseInstrInfo::
718 foldMemoryOperandImpl(MachineFunction
&MF
, MachineInstr
*MI
,
719 const SmallVectorImpl
<unsigned> &Ops
, int FI
) const {
720 if (Ops
.size() != 1) return NULL
;
722 unsigned OpNum
= Ops
[0];
723 unsigned Opc
= MI
->getOpcode();
724 MachineInstr
*NewMI
= NULL
;
725 if (Opc
== ARM::MOVr
|| Opc
== ARM::t2MOVr
) {
726 // If it is updating CPSR, then it cannot be folded.
727 if (MI
->getOperand(4).getReg() == ARM::CPSR
&& !MI
->getOperand(4).isDead())
729 unsigned Pred
= MI
->getOperand(2).getImm();
730 unsigned PredReg
= MI
->getOperand(3).getReg();
731 if (OpNum
== 0) { // move -> store
732 unsigned SrcReg
= MI
->getOperand(1).getReg();
733 bool isKill
= MI
->getOperand(1).isKill();
734 bool isUndef
= MI
->getOperand(1).isUndef();
735 if (Opc
== ARM::MOVr
)
736 NewMI
= BuildMI(MF
, MI
->getDebugLoc(), get(ARM::STR
))
737 .addReg(SrcReg
, getKillRegState(isKill
) | getUndefRegState(isUndef
))
738 .addFrameIndex(FI
).addReg(0).addImm(0).addImm(Pred
).addReg(PredReg
);
740 NewMI
= BuildMI(MF
, MI
->getDebugLoc(), get(ARM::t2STRi12
))
741 .addReg(SrcReg
, getKillRegState(isKill
) | getUndefRegState(isUndef
))
742 .addFrameIndex(FI
).addImm(0).addImm(Pred
).addReg(PredReg
);
743 } else { // move -> load
744 unsigned DstReg
= MI
->getOperand(0).getReg();
745 bool isDead
= MI
->getOperand(0).isDead();
746 bool isUndef
= MI
->getOperand(0).isUndef();
747 if (Opc
== ARM::MOVr
)
748 NewMI
= BuildMI(MF
, MI
->getDebugLoc(), get(ARM::LDR
))
751 getDeadRegState(isDead
) |
752 getUndefRegState(isUndef
))
753 .addFrameIndex(FI
).addReg(0).addImm(0).addImm(Pred
).addReg(PredReg
);
755 NewMI
= BuildMI(MF
, MI
->getDebugLoc(), get(ARM::t2LDRi12
))
758 getDeadRegState(isDead
) |
759 getUndefRegState(isUndef
))
760 .addFrameIndex(FI
).addImm(0).addImm(Pred
).addReg(PredReg
);
762 } else if (Opc
== ARM::tMOVgpr2gpr
||
763 Opc
== ARM::tMOVtgpr2gpr
||
764 Opc
== ARM::tMOVgpr2tgpr
) {
765 if (OpNum
== 0) { // move -> store
766 unsigned SrcReg
= MI
->getOperand(1).getReg();
767 bool isKill
= MI
->getOperand(1).isKill();
768 bool isUndef
= MI
->getOperand(1).isUndef();
769 NewMI
= BuildMI(MF
, MI
->getDebugLoc(), get(ARM::t2STRi12
))
770 .addReg(SrcReg
, getKillRegState(isKill
) | getUndefRegState(isUndef
))
771 .addFrameIndex(FI
).addImm(0).addImm(ARMCC::AL
).addReg(0);
772 } else { // move -> load
773 unsigned DstReg
= MI
->getOperand(0).getReg();
774 bool isDead
= MI
->getOperand(0).isDead();
775 bool isUndef
= MI
->getOperand(0).isUndef();
776 NewMI
= BuildMI(MF
, MI
->getDebugLoc(), get(ARM::t2LDRi12
))
779 getDeadRegState(isDead
) |
780 getUndefRegState(isUndef
))
781 .addFrameIndex(FI
).addImm(0).addImm(ARMCC::AL
).addReg(0);
783 } else if (Opc
== ARM::FCPYS
) {
784 unsigned Pred
= MI
->getOperand(2).getImm();
785 unsigned PredReg
= MI
->getOperand(3).getReg();
786 if (OpNum
== 0) { // move -> store
787 unsigned SrcReg
= MI
->getOperand(1).getReg();
788 bool isKill
= MI
->getOperand(1).isKill();
789 bool isUndef
= MI
->getOperand(1).isUndef();
790 NewMI
= BuildMI(MF
, MI
->getDebugLoc(), get(ARM::FSTS
))
791 .addReg(SrcReg
, getKillRegState(isKill
) | getUndefRegState(isUndef
))
793 .addImm(0).addImm(Pred
).addReg(PredReg
);
794 } else { // move -> load
795 unsigned DstReg
= MI
->getOperand(0).getReg();
796 bool isDead
= MI
->getOperand(0).isDead();
797 bool isUndef
= MI
->getOperand(0).isUndef();
798 NewMI
= BuildMI(MF
, MI
->getDebugLoc(), get(ARM::FLDS
))
801 getDeadRegState(isDead
) |
802 getUndefRegState(isUndef
))
803 .addFrameIndex(FI
).addImm(0).addImm(Pred
).addReg(PredReg
);
806 else if (Opc
== ARM::FCPYD
) {
807 unsigned Pred
= MI
->getOperand(2).getImm();
808 unsigned PredReg
= MI
->getOperand(3).getReg();
809 if (OpNum
== 0) { // move -> store
810 unsigned SrcReg
= MI
->getOperand(1).getReg();
811 bool isKill
= MI
->getOperand(1).isKill();
812 bool isUndef
= MI
->getOperand(1).isUndef();
813 NewMI
= BuildMI(MF
, MI
->getDebugLoc(), get(ARM::FSTD
))
814 .addReg(SrcReg
, getKillRegState(isKill
) | getUndefRegState(isUndef
))
815 .addFrameIndex(FI
).addImm(0).addImm(Pred
).addReg(PredReg
);
816 } else { // move -> load
817 unsigned DstReg
= MI
->getOperand(0).getReg();
818 bool isDead
= MI
->getOperand(0).isDead();
819 bool isUndef
= MI
->getOperand(0).isUndef();
820 NewMI
= BuildMI(MF
, MI
->getDebugLoc(), get(ARM::FLDD
))
823 getDeadRegState(isDead
) |
824 getUndefRegState(isUndef
))
825 .addFrameIndex(FI
).addImm(0).addImm(Pred
).addReg(PredReg
);
833 ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction
&MF
,
835 const SmallVectorImpl
<unsigned> &Ops
,
836 MachineInstr
* LoadMI
) const {
842 ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr
*MI
,
843 const SmallVectorImpl
<unsigned> &Ops
) const {
844 if (Ops
.size() != 1) return false;
846 unsigned Opc
= MI
->getOpcode();
847 if (Opc
== ARM::MOVr
|| Opc
== ARM::t2MOVr
) {
848 // If it is updating CPSR, then it cannot be folded.
849 return MI
->getOperand(4).getReg() != ARM::CPSR
||
850 MI
->getOperand(4).isDead();
851 } else if (Opc
== ARM::tMOVgpr2gpr
||
852 Opc
== ARM::tMOVtgpr2gpr
||
853 Opc
== ARM::tMOVgpr2tgpr
) {
855 } else if (Opc
== ARM::FCPYS
|| Opc
== ARM::FCPYD
) {
857 } else if (Opc
== ARM::VMOVD
|| Opc
== ARM::VMOVQ
) {
858 return false; // FIXME
864 /// getInstrPredicate - If instruction is predicated, returns its predicate
865 /// condition, otherwise returns AL. It also returns the condition code
866 /// register by reference.
867 ARMCC::CondCodes
llvm::getInstrPredicate(MachineInstr
*MI
, unsigned &PredReg
) {
868 int PIdx
= MI
->findFirstPredOperandIdx();
874 PredReg
= MI
->getOperand(PIdx
+1).getReg();
875 return (ARMCC::CondCodes
)MI
->getOperand(PIdx
).getImm();
879 int llvm::getMatchingCondBranchOpcode(int Opc
) {
882 else if (Opc
== ARM::tB
)
884 else if (Opc
== ARM::t2B
)
887 llvm_unreachable("Unknown unconditional branch opcode!");
892 void llvm::emitARMRegPlusImmediate(MachineBasicBlock
&MBB
,
893 MachineBasicBlock::iterator
&MBBI
, DebugLoc dl
,
894 unsigned DestReg
, unsigned BaseReg
, int NumBytes
,
895 ARMCC::CondCodes Pred
, unsigned PredReg
,
896 const ARMBaseInstrInfo
&TII
) {
897 bool isSub
= NumBytes
< 0;
898 if (isSub
) NumBytes
= -NumBytes
;
901 unsigned RotAmt
= ARM_AM::getSOImmValRotate(NumBytes
);
902 unsigned ThisVal
= NumBytes
& ARM_AM::rotr32(0xFF, RotAmt
);
903 assert(ThisVal
&& "Didn't extract field correctly");
905 // We will handle these bits from offset, clear them.
906 NumBytes
&= ~ThisVal
;
908 assert(ARM_AM::getSOImmVal(ThisVal
) != -1 && "Bit extraction didn't work?");
910 // Build the new ADD / SUB.
911 unsigned Opc
= isSub
? ARM::SUBri
: ARM::ADDri
;
912 BuildMI(MBB
, MBBI
, dl
, TII
.get(Opc
), DestReg
)
913 .addReg(BaseReg
, RegState::Kill
).addImm(ThisVal
)
914 .addImm((unsigned)Pred
).addReg(PredReg
).addReg(0);
919 bool llvm::rewriteARMFrameIndex(MachineInstr
&MI
, unsigned FrameRegIdx
,
920 unsigned FrameReg
, int &Offset
,
921 const ARMBaseInstrInfo
&TII
) {
922 unsigned Opcode
= MI
.getOpcode();
923 const TargetInstrDesc
&Desc
= MI
.getDesc();
924 unsigned AddrMode
= (Desc
.TSFlags
& ARMII::AddrModeMask
);
927 // Memory operands in inline assembly always use AddrMode2.
928 if (Opcode
== ARM::INLINEASM
)
929 AddrMode
= ARMII::AddrMode2
;
931 if (Opcode
== ARM::ADDri
) {
932 Offset
+= MI
.getOperand(FrameRegIdx
+1).getImm();
934 // Turn it into a move.
935 MI
.setDesc(TII
.get(ARM::MOVr
));
936 MI
.getOperand(FrameRegIdx
).ChangeToRegister(FrameReg
, false);
937 MI
.RemoveOperand(FrameRegIdx
+1);
940 } else if (Offset
< 0) {
943 MI
.setDesc(TII
.get(ARM::SUBri
));
946 // Common case: small offset, fits into instruction.
947 if (ARM_AM::getSOImmVal(Offset
) != -1) {
948 // Replace the FrameIndex with sp / fp
949 MI
.getOperand(FrameRegIdx
).ChangeToRegister(FrameReg
, false);
950 MI
.getOperand(FrameRegIdx
+1).ChangeToImmediate(Offset
);
955 // Otherwise, pull as much of the immedidate into this ADDri/SUBri
957 unsigned RotAmt
= ARM_AM::getSOImmValRotate(Offset
);
958 unsigned ThisImmVal
= Offset
& ARM_AM::rotr32(0xFF, RotAmt
);
960 // We will handle these bits from offset, clear them.
961 Offset
&= ~ThisImmVal
;
963 // Get the properly encoded SOImmVal field.
964 assert(ARM_AM::getSOImmVal(ThisImmVal
) != -1 &&
965 "Bit extraction didn't work?");
966 MI
.getOperand(FrameRegIdx
+1).ChangeToImmediate(ThisImmVal
);
970 unsigned NumBits
= 0;
973 case ARMII::AddrMode2
: {
974 ImmIdx
= FrameRegIdx
+2;
975 InstrOffs
= ARM_AM::getAM2Offset(MI
.getOperand(ImmIdx
).getImm());
976 if (ARM_AM::getAM2Op(MI
.getOperand(ImmIdx
).getImm()) == ARM_AM::sub
)
981 case ARMII::AddrMode3
: {
982 ImmIdx
= FrameRegIdx
+2;
983 InstrOffs
= ARM_AM::getAM3Offset(MI
.getOperand(ImmIdx
).getImm());
984 if (ARM_AM::getAM3Op(MI
.getOperand(ImmIdx
).getImm()) == ARM_AM::sub
)
989 case ARMII::AddrMode4
:
990 // Can't fold any offset even if it's zero.
992 case ARMII::AddrMode5
: {
993 ImmIdx
= FrameRegIdx
+1;
994 InstrOffs
= ARM_AM::getAM5Offset(MI
.getOperand(ImmIdx
).getImm());
995 if (ARM_AM::getAM5Op(MI
.getOperand(ImmIdx
).getImm()) == ARM_AM::sub
)
1002 llvm_unreachable("Unsupported addressing mode!");
1006 Offset
+= InstrOffs
* Scale
;
1007 assert((Offset
& (Scale
-1)) == 0 && "Can't encode this offset!");
1013 // Attempt to fold address comp. if opcode has offset bits
1015 // Common case: small offset, fits into instruction.
1016 MachineOperand
&ImmOp
= MI
.getOperand(ImmIdx
);
1017 int ImmedOffset
= Offset
/ Scale
;
1018 unsigned Mask
= (1 << NumBits
) - 1;
1019 if ((unsigned)Offset
<= Mask
* Scale
) {
1020 // Replace the FrameIndex with sp
1021 MI
.getOperand(FrameRegIdx
).ChangeToRegister(FrameReg
, false);
1023 ImmedOffset
|= 1 << NumBits
;
1024 ImmOp
.ChangeToImmediate(ImmedOffset
);
1029 // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
1030 ImmedOffset
= ImmedOffset
& Mask
;
1032 ImmedOffset
|= 1 << NumBits
;
1033 ImmOp
.ChangeToImmediate(ImmedOffset
);
1034 Offset
&= ~(Mask
*Scale
);
1038 Offset
= (isSub
) ? -Offset
: Offset
;