1 //===- ARMInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the "Instituto Nokia de Tecnologia" and
6 // is distributed under the University of Illinois Open Source
7 // License. See LICENSE.TXT for details.
9 //===----------------------------------------------------------------------===//
11 // This file contains the ARM implementation of the TargetInstrInfo class.
13 //===----------------------------------------------------------------------===//
15 #include "ARMInstrInfo.h"
17 #include "ARMAddressingModes.h"
18 #include "ARMGenInstrInfo.inc"
19 #include "ARMMachineFunctionInfo.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/CodeGen/LiveVariables.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineJumpTableInfo.h"
24 #include "llvm/Target/TargetAsmInfo.h"
25 #include "llvm/Support/CommandLine.h"
28 static cl::opt
<bool> EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden
,
29 cl::desc("Enable ARM 2-addr to 3-addr conv"));
31 ARMInstrInfo::ARMInstrInfo(const ARMSubtarget
&STI
)
32 : TargetInstrInfo(ARMInsts
, array_lengthof(ARMInsts
)),
36 const TargetRegisterClass
*ARMInstrInfo::getPointerRegClass() const {
37 return &ARM::GPRRegClass
;
40 /// Return true if the instruction is a register to register move and
41 /// leave the source and dest operands in the passed parameters.
43 bool ARMInstrInfo::isMoveInstr(const MachineInstr
&MI
,
44 unsigned &SrcReg
, unsigned &DstReg
) const {
45 MachineOpCode oc
= MI
.getOpcode();
51 SrcReg
= MI
.getOperand(1).getReg();
52 DstReg
= MI
.getOperand(0).getReg();
56 assert(MI
.getInstrDescriptor()->numOperands
>= 2 &&
57 MI
.getOperand(0).isRegister() &&
58 MI
.getOperand(1).isRegister() &&
59 "Invalid ARM MOV instruction");
60 SrcReg
= MI
.getOperand(1).getReg();
61 DstReg
= MI
.getOperand(0).getReg();
66 unsigned ARMInstrInfo::isLoadFromStackSlot(MachineInstr
*MI
, int &FrameIndex
) const{
67 switch (MI
->getOpcode()) {
70 if (MI
->getOperand(1).isFrameIndex() &&
71 MI
->getOperand(2).isRegister() &&
72 MI
->getOperand(3).isImmediate() &&
73 MI
->getOperand(2).getReg() == 0 &&
74 MI
->getOperand(3).getImmedValue() == 0) {
75 FrameIndex
= MI
->getOperand(1).getFrameIndex();
76 return MI
->getOperand(0).getReg();
81 if (MI
->getOperand(1).isFrameIndex() &&
82 MI
->getOperand(2).isImmediate() &&
83 MI
->getOperand(2).getImmedValue() == 0) {
84 FrameIndex
= MI
->getOperand(1).getFrameIndex();
85 return MI
->getOperand(0).getReg();
89 if (MI
->getOperand(1).isFrameIndex() &&
90 MI
->getOperand(2).isImmediate() &&
91 MI
->getOperand(2).getImmedValue() == 0) {
92 FrameIndex
= MI
->getOperand(1).getFrameIndex();
93 return MI
->getOperand(0).getReg();
100 unsigned ARMInstrInfo::isStoreToStackSlot(MachineInstr
*MI
, int &FrameIndex
) const {
101 switch (MI
->getOpcode()) {
104 if (MI
->getOperand(1).isFrameIndex() &&
105 MI
->getOperand(2).isRegister() &&
106 MI
->getOperand(3).isImmediate() &&
107 MI
->getOperand(2).getReg() == 0 &&
108 MI
->getOperand(3).getImmedValue() == 0) {
109 FrameIndex
= MI
->getOperand(1).getFrameIndex();
110 return MI
->getOperand(0).getReg();
115 if (MI
->getOperand(1).isFrameIndex() &&
116 MI
->getOperand(2).isImmediate() &&
117 MI
->getOperand(2).getImmedValue() == 0) {
118 FrameIndex
= MI
->getOperand(1).getFrameIndex();
119 return MI
->getOperand(0).getReg();
123 if (MI
->getOperand(1).isFrameIndex() &&
124 MI
->getOperand(2).isImmediate() &&
125 MI
->getOperand(2).getImmedValue() == 0) {
126 FrameIndex
= MI
->getOperand(1).getFrameIndex();
127 return MI
->getOperand(0).getReg();
134 static unsigned getUnindexedOpcode(unsigned Opc
) {
147 case ARM::LDRSH_POST
:
150 case ARM::LDRSB_POST
:
166 ARMInstrInfo::convertToThreeAddress(MachineFunction::iterator
&MFI
,
167 MachineBasicBlock::iterator
&MBBI
,
168 LiveVariables
&LV
) const {
172 MachineInstr
*MI
= MBBI
;
173 unsigned TSFlags
= MI
->getInstrDescriptor()->TSFlags
;
175 switch ((TSFlags
& ARMII::IndexModeMask
) >> ARMII::IndexModeShift
) {
176 default: return NULL
;
177 case ARMII::IndexModePre
:
180 case ARMII::IndexModePost
:
184 // Try spliting an indexed load / store to a un-indexed one plus an add/sub
186 unsigned MemOpc
= getUnindexedOpcode(MI
->getOpcode());
190 MachineInstr
*UpdateMI
= NULL
;
191 MachineInstr
*MemMI
= NULL
;
192 unsigned AddrMode
= (TSFlags
& ARMII::AddrModeMask
);
193 const TargetInstrDescriptor
*TID
= MI
->getInstrDescriptor();
194 unsigned NumOps
= TID
->numOperands
;
195 bool isLoad
= (TID
->Flags
& M_LOAD_FLAG
) != 0;
196 const MachineOperand
&WB
= isLoad
? MI
->getOperand(1) : MI
->getOperand(0);
197 const MachineOperand
&Base
= MI
->getOperand(2);
198 const MachineOperand
&Offset
= MI
->getOperand(NumOps
-3);
199 unsigned WBReg
= WB
.getReg();
200 unsigned BaseReg
= Base
.getReg();
201 unsigned OffReg
= Offset
.getReg();
202 unsigned OffImm
= MI
->getOperand(NumOps
-2).getImm();
203 ARMCC::CondCodes Pred
= (ARMCC::CondCodes
)MI
->getOperand(NumOps
-1).getImm();
206 assert(false && "Unknown indexed op!");
208 case ARMII::AddrMode2
: {
209 bool isSub
= ARM_AM::getAM2Op(OffImm
) == ARM_AM::sub
;
210 unsigned Amt
= ARM_AM::getAM2Offset(OffImm
);
212 int SOImmVal
= ARM_AM::getSOImmVal(Amt
);
214 // Can't encode it in a so_imm operand. This transformation will
215 // add more than 1 instruction. Abandon!
217 UpdateMI
= BuildMI(get(isSub
? ARM::SUBri
: ARM::ADDri
), WBReg
)
218 .addReg(BaseReg
).addImm(SOImmVal
)
219 .addImm(Pred
).addReg(0).addReg(0);
220 } else if (Amt
!= 0) {
221 ARM_AM::ShiftOpc ShOpc
= ARM_AM::getAM2ShiftOpc(OffImm
);
222 unsigned SOOpc
= ARM_AM::getSORegOpc(ShOpc
, Amt
);
223 UpdateMI
= BuildMI(get(isSub
? ARM::SUBrs
: ARM::ADDrs
), WBReg
)
224 .addReg(BaseReg
).addReg(OffReg
).addReg(0).addImm(SOOpc
)
225 .addImm(Pred
).addReg(0).addReg(0);
227 UpdateMI
= BuildMI(get(isSub
? ARM::SUBrr
: ARM::ADDrr
), WBReg
)
228 .addReg(BaseReg
).addReg(OffReg
)
229 .addImm(Pred
).addReg(0).addReg(0);
232 case ARMII::AddrMode3
: {
233 bool isSub
= ARM_AM::getAM3Op(OffImm
) == ARM_AM::sub
;
234 unsigned Amt
= ARM_AM::getAM3Offset(OffImm
);
236 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
237 UpdateMI
= BuildMI(get(isSub
? ARM::SUBri
: ARM::ADDri
), WBReg
)
238 .addReg(BaseReg
).addImm(Amt
)
239 .addImm(Pred
).addReg(0).addReg(0);
241 UpdateMI
= BuildMI(get(isSub
? ARM::SUBrr
: ARM::ADDrr
), WBReg
)
242 .addReg(BaseReg
).addReg(OffReg
)
243 .addImm(Pred
).addReg(0).addReg(0);
248 std::vector
<MachineInstr
*> NewMIs
;
251 MemMI
= BuildMI(get(MemOpc
), MI
->getOperand(0).getReg())
252 .addReg(WBReg
).addReg(0).addImm(0).addImm(Pred
);
254 MemMI
= BuildMI(get(MemOpc
)).addReg(MI
->getOperand(1).getReg())
255 .addReg(WBReg
).addReg(0).addImm(0).addImm(Pred
);
256 NewMIs
.push_back(MemMI
);
257 NewMIs
.push_back(UpdateMI
);
260 MemMI
= BuildMI(get(MemOpc
), MI
->getOperand(0).getReg())
261 .addReg(BaseReg
).addReg(0).addImm(0).addImm(Pred
);
263 MemMI
= BuildMI(get(MemOpc
)).addReg(MI
->getOperand(1).getReg())
264 .addReg(BaseReg
).addReg(0).addImm(0).addImm(Pred
);
266 UpdateMI
->getOperand(0).setIsDead();
267 NewMIs
.push_back(UpdateMI
);
268 NewMIs
.push_back(MemMI
);
271 // Transfer LiveVariables states, kill / dead info.
272 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
273 MachineOperand
&MO
= MI
->getOperand(i
);
274 if (MO
.isRegister() && MO
.getReg() &&
275 MRegisterInfo::isVirtualRegister(MO
.getReg())) {
276 unsigned Reg
= MO
.getReg();
277 LiveVariables::VarInfo
&VI
= LV
.getVarInfo(Reg
);
279 MachineInstr
*NewMI
= (Reg
== WBReg
) ? UpdateMI
: MemMI
;
281 LV
.addVirtualRegisterDead(Reg
, NewMI
);
282 // Update the defining instruction.
283 if (VI
.DefInst
== MI
)
286 if (MO
.isUse() && MO
.isKill()) {
287 for (unsigned j
= 0; j
< 2; ++j
) {
288 // Look at the two new MI's in reverse order.
289 MachineInstr
*NewMI
= NewMIs
[j
];
290 int NIdx
= NewMI
->findRegisterUseOperandIdx(Reg
);
293 LV
.addVirtualRegisterKilled(Reg
, NewMI
);
294 if (VI
.removeKill(MI
))
295 VI
.Kills
.push_back(NewMI
);
302 MFI
->insert(MBBI
, NewMIs
[1]);
303 MFI
->insert(MBBI
, NewMIs
[0]);
308 bool ARMInstrInfo::AnalyzeBranch(MachineBasicBlock
&MBB
,MachineBasicBlock
*&TBB
,
309 MachineBasicBlock
*&FBB
,
310 std::vector
<MachineOperand
> &Cond
) const {
311 // If the block has no terminators, it just falls into the block after it.
312 MachineBasicBlock::iterator I
= MBB
.end();
313 if (I
== MBB
.begin() || !isUnpredicatedTerminator(--I
))
316 // Get the last instruction in the block.
317 MachineInstr
*LastInst
= I
;
319 // If there is only one terminator instruction, process it.
320 unsigned LastOpc
= LastInst
->getOpcode();
321 if (I
== MBB
.begin() || !isUnpredicatedTerminator(--I
)) {
322 if (LastOpc
== ARM::B
|| LastOpc
== ARM::tB
) {
323 TBB
= LastInst
->getOperand(0).getMachineBasicBlock();
326 if (LastOpc
== ARM::Bcc
|| LastOpc
== ARM::tBcc
) {
327 // Block ends with fall-through condbranch.
328 TBB
= LastInst
->getOperand(0).getMachineBasicBlock();
329 Cond
.push_back(LastInst
->getOperand(1));
330 Cond
.push_back(LastInst
->getOperand(2));
333 return true; // Can't handle indirect branch.
336 // Get the instruction before it if it is a terminator.
337 MachineInstr
*SecondLastInst
= I
;
339 // If there are three terminators, we don't know what sort of block this is.
340 if (SecondLastInst
&& I
!= MBB
.begin() && isUnpredicatedTerminator(--I
))
343 // If the block ends with ARM::B/ARM::tB and a ARM::Bcc/ARM::tBcc, handle it.
344 unsigned SecondLastOpc
= SecondLastInst
->getOpcode();
345 if ((SecondLastOpc
== ARM::Bcc
&& LastOpc
== ARM::B
) ||
346 (SecondLastOpc
== ARM::tBcc
&& LastOpc
== ARM::tB
)) {
347 TBB
= SecondLastInst
->getOperand(0).getMachineBasicBlock();
348 Cond
.push_back(SecondLastInst
->getOperand(1));
349 Cond
.push_back(SecondLastInst
->getOperand(2));
350 FBB
= LastInst
->getOperand(0).getMachineBasicBlock();
354 // If the block ends with two unconditional branches, handle it. The second
355 // one is not executed, so remove it.
356 if ((SecondLastOpc
== ARM::B
|| SecondLastOpc
==ARM::tB
) &&
357 (LastOpc
== ARM::B
|| LastOpc
== ARM::tB
)) {
358 TBB
= SecondLastInst
->getOperand(0).getMachineBasicBlock();
360 I
->eraseFromParent();
364 // Likewise if it ends with a branch table followed by an unconditional branch.
365 // The branch folder can create these, and we must get rid of them for
366 // correctness of Thumb constant islands.
367 if ((SecondLastOpc
== ARM::BR_JTr
|| SecondLastOpc
==ARM::BR_JTm
||
368 SecondLastOpc
== ARM::BR_JTadd
|| SecondLastOpc
==ARM::tBR_JTr
) &&
369 (LastOpc
== ARM::B
|| LastOpc
== ARM::tB
)) {
371 I
->eraseFromParent();
375 // Otherwise, can't handle this.
380 unsigned ARMInstrInfo::RemoveBranch(MachineBasicBlock
&MBB
) const {
381 MachineFunction
&MF
= *MBB
.getParent();
382 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
383 int BOpc
= AFI
->isThumbFunction() ? ARM::tB
: ARM::B
;
384 int BccOpc
= AFI
->isThumbFunction() ? ARM::tBcc
: ARM::Bcc
;
386 MachineBasicBlock::iterator I
= MBB
.end();
387 if (I
== MBB
.begin()) return 0;
389 if (I
->getOpcode() != BOpc
&& I
->getOpcode() != BccOpc
)
392 // Remove the branch.
393 I
->eraseFromParent();
397 if (I
== MBB
.begin()) return 1;
399 if (I
->getOpcode() != BccOpc
)
402 // Remove the branch.
403 I
->eraseFromParent();
407 unsigned ARMInstrInfo::InsertBranch(MachineBasicBlock
&MBB
, MachineBasicBlock
*TBB
,
408 MachineBasicBlock
*FBB
,
409 const std::vector
<MachineOperand
> &Cond
) const {
410 MachineFunction
&MF
= *MBB
.getParent();
411 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
412 int BOpc
= AFI
->isThumbFunction() ? ARM::tB
: ARM::B
;
413 int BccOpc
= AFI
->isThumbFunction() ? ARM::tBcc
: ARM::Bcc
;
415 // Shouldn't be a fall through.
416 assert(TBB
&& "InsertBranch must not be told to insert a fallthrough");
417 assert((Cond
.size() == 2 || Cond
.size() == 0) &&
418 "ARM branch conditions have two components!");
421 if (Cond
.empty()) // Unconditional branch?
422 BuildMI(&MBB
, get(BOpc
)).addMBB(TBB
);
424 BuildMI(&MBB
, get(BccOpc
)).addMBB(TBB
)
425 .addImm(Cond
[0].getImm()).addReg(Cond
[1].getReg());
429 // Two-way conditional branch.
430 BuildMI(&MBB
, get(BccOpc
)).addMBB(TBB
)
431 .addImm(Cond
[0].getImm()).addReg(Cond
[1].getReg());
432 BuildMI(&MBB
, get(BOpc
)).addMBB(FBB
);
436 bool ARMInstrInfo::BlockHasNoFallThrough(MachineBasicBlock
&MBB
) const {
437 if (MBB
.empty()) return false;
439 switch (MBB
.back().getOpcode()) {
440 case ARM::BX_RET
: // Return.
443 case ARM::tBX_RET_vararg
:
446 case ARM::tB
: // Uncond branch.
448 case ARM::BR_JTr
: // Jumptable branch.
449 case ARM::BR_JTm
: // Jumptable branch through mem.
450 case ARM::BR_JTadd
: // Jumptable branch add to pc.
452 default: return false;
457 ReverseBranchCondition(std::vector
<MachineOperand
> &Cond
) const {
458 ARMCC::CondCodes CC
= (ARMCC::CondCodes
)(int)Cond
[0].getImm();
459 Cond
[0].setImm(ARMCC::getOppositeCondition(CC
));
463 bool ARMInstrInfo::isPredicated(const MachineInstr
*MI
) const {
464 int PIdx
= MI
->findFirstPredOperandIdx();
465 return PIdx
!= -1 && MI
->getOperand(PIdx
).getImmedValue() != ARMCC::AL
;
468 bool ARMInstrInfo::PredicateInstruction(MachineInstr
*MI
,
469 const std::vector
<MachineOperand
> &Pred
) const {
470 unsigned Opc
= MI
->getOpcode();
471 if (Opc
== ARM::B
|| Opc
== ARM::tB
) {
472 MI
->setInstrDescriptor(get(Opc
== ARM::B
? ARM::Bcc
: ARM::tBcc
));
473 MI
->addImmOperand(Pred
[0].getImmedValue());
474 MI
->addRegOperand(Pred
[1].getReg(), false);
478 int PIdx
= MI
->findFirstPredOperandIdx();
480 MachineOperand
&PMO
= MI
->getOperand(PIdx
);
481 PMO
.setImm(Pred
[0].getImmedValue());
482 MI
->getOperand(PIdx
+1).setReg(Pred
[1].getReg());
489 ARMInstrInfo::SubsumesPredicate(const std::vector
<MachineOperand
> &Pred1
,
490 const std::vector
<MachineOperand
> &Pred2
) const{
491 if (Pred1
.size() > 2 || Pred2
.size() > 2)
494 ARMCC::CondCodes CC1
= (ARMCC::CondCodes
)Pred1
[0].getImmedValue();
495 ARMCC::CondCodes CC2
= (ARMCC::CondCodes
)Pred2
[0].getImmedValue();
505 return CC2
== ARMCC::HI
;
507 return CC2
== ARMCC::LO
|| CC2
== ARMCC::EQ
;
509 return CC2
== ARMCC::GT
;
511 return CC2
== ARMCC::LT
;
515 bool ARMInstrInfo::DefinesPredicate(MachineInstr
*MI
,
516 std::vector
<MachineOperand
> &Pred
) const {
517 const TargetInstrDescriptor
*TID
= MI
->getInstrDescriptor();
518 if (!TID
->ImplicitDefs
&& (TID
->Flags
& M_HAS_OPTIONAL_DEF
) == 0)
522 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
523 const MachineOperand
&MO
= MI
->getOperand(i
);
524 if (MO
.isRegister() && MO
.getReg() == ARM::CPSR
) {
534 /// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
535 static unsigned getNumJTEntries(const std::vector
<MachineJumpTableEntry
> &JT
,
536 unsigned JTI
) DISABLE_INLINE
;
537 static unsigned getNumJTEntries(const std::vector
<MachineJumpTableEntry
> &JT
,
539 return JT
[JTI
].MBBs
.size();
542 /// GetInstSize - Return the size of the specified MachineInstr.
544 unsigned ARM::GetInstSize(MachineInstr
*MI
) {
545 MachineBasicBlock
&MBB
= *MI
->getParent();
546 const MachineFunction
*MF
= MBB
.getParent();
547 const TargetAsmInfo
*TAI
= MF
->getTarget().getTargetAsmInfo();
549 // Basic size info comes from the TSFlags field.
550 const TargetInstrDescriptor
*TID
= MI
->getInstrDescriptor();
551 unsigned TSFlags
= TID
->TSFlags
;
553 switch ((TSFlags
& ARMII::SizeMask
) >> ARMII::SizeShift
) {
555 // If this machine instr is an inline asm, measure it.
556 if (MI
->getOpcode() == ARM::INLINEASM
)
557 return TAI
->getInlineAsmLength(MI
->getOperand(0).getSymbolName());
558 if (MI
->getOpcode() == ARM::LABEL
)
560 assert(0 && "Unknown or unset size field for instr!");
562 case ARMII::Size8Bytes
: return 8; // Arm instruction x 2.
563 case ARMII::Size4Bytes
: return 4; // Arm instruction.
564 case ARMII::Size2Bytes
: return 2; // Thumb instruction.
565 case ARMII::SizeSpecial
: {
566 switch (MI
->getOpcode()) {
567 case ARM::CONSTPOOL_ENTRY
:
568 // If this machine instr is a constant pool entry, its size is recorded as
570 return MI
->getOperand(2).getImm();
575 // These are jumptable branches, i.e. a branch followed by an inlined
576 // jumptable. The size is 4 + 4 * number of entries.
577 unsigned NumOps
= TID
->numOperands
;
578 MachineOperand JTOP
=
579 MI
->getOperand(NumOps
- ((TID
->Flags
& M_PREDICABLE
) ? 3 : 2));
580 unsigned JTI
= JTOP
.getJumpTableIndex();
581 MachineJumpTableInfo
*MJTI
= MF
->getJumpTableInfo();
582 const std::vector
<MachineJumpTableEntry
> &JT
= MJTI
->getJumpTables();
583 assert(JTI
< JT
.size());
584 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
585 // 4 aligned. The assembler / linker may add 2 byte padding just before
586 // the JT entries. The size does not include this padding; the
587 // constant islands pass does separate bookkeeping for it.
588 // FIXME: If we know the size of the function is less than (1 << 16) *2
589 // bytes, we can use 16-bit entries instead. Then there won't be an
591 return getNumJTEntries(JT
, JTI
) * 4 +
592 (MI
->getOpcode()==ARM::tBR_JTr
? 2 : 4);
595 // Otherwise, pseudo-instruction sizes are zero.
602 /// GetFunctionSize - Returns the size of the specified MachineFunction.
604 unsigned ARM::GetFunctionSize(MachineFunction
&MF
) {
606 for (MachineFunction::iterator MBBI
= MF
.begin(), E
= MF
.end();
608 MachineBasicBlock
&MBB
= *MBBI
;
609 for (MachineBasicBlock::iterator I
= MBB
.begin(),E
= MBB
.end(); I
!= E
; ++I
)
610 FnSize
+= ARM::GetInstSize(I
);