1 //===- ARMInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the ARM implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "ARMInstrInfo.h"
16 #include "ARMAddressingModes.h"
17 #include "ARMGenInstrInfo.inc"
18 #include "ARMMachineFunctionInfo.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/CodeGen/LiveVariables.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineJumpTableInfo.h"
24 #include "llvm/Target/TargetAsmInfo.h"
25 #include "llvm/Support/CommandLine.h"
29 EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden
,
30 cl::desc("Enable ARM 2-addr to 3-addr conv"));
33 const MachineInstrBuilder
&AddDefaultPred(const MachineInstrBuilder
&MIB
) {
34 return MIB
.addImm((int64_t)ARMCC::AL
).addReg(0);
38 const MachineInstrBuilder
&AddDefaultCC(const MachineInstrBuilder
&MIB
) {
42 ARMInstrInfo::ARMInstrInfo(const ARMSubtarget
&STI
)
43 : TargetInstrInfoImpl(ARMInsts
, array_lengthof(ARMInsts
)),
48 /// Return true if the instruction is a register to register move and
49 /// leave the source and dest operands in the passed parameters.
51 bool ARMInstrInfo::isMoveInstr(const MachineInstr
&MI
,
52 unsigned &SrcReg
, unsigned &DstReg
,
53 unsigned& SrcSubIdx
, unsigned& DstSubIdx
) const {
54 SrcSubIdx
= DstSubIdx
= 0; // No sub-registers.
56 unsigned oc
= MI
.getOpcode();
62 SrcReg
= MI
.getOperand(1).getReg();
63 DstReg
= MI
.getOperand(0).getReg();
67 case ARM::tMOVhir2lor
:
68 case ARM::tMOVlor2hir
:
69 case ARM::tMOVhir2hir
:
70 assert(MI
.getDesc().getNumOperands() >= 2 &&
71 MI
.getOperand(0).isReg() &&
72 MI
.getOperand(1).isReg() &&
73 "Invalid ARM MOV instruction");
74 SrcReg
= MI
.getOperand(1).getReg();
75 DstReg
= MI
.getOperand(0).getReg();
80 unsigned ARMInstrInfo::isLoadFromStackSlot(const MachineInstr
*MI
,
81 int &FrameIndex
) const {
82 switch (MI
->getOpcode()) {
85 if (MI
->getOperand(1).isFI() &&
86 MI
->getOperand(2).isReg() &&
87 MI
->getOperand(3).isImm() &&
88 MI
->getOperand(2).getReg() == 0 &&
89 MI
->getOperand(3).getImm() == 0) {
90 FrameIndex
= MI
->getOperand(1).getIndex();
91 return MI
->getOperand(0).getReg();
96 if (MI
->getOperand(1).isFI() &&
97 MI
->getOperand(2).isImm() &&
98 MI
->getOperand(2).getImm() == 0) {
99 FrameIndex
= MI
->getOperand(1).getIndex();
100 return MI
->getOperand(0).getReg();
104 if (MI
->getOperand(1).isFI() &&
105 MI
->getOperand(2).isImm() &&
106 MI
->getOperand(2).getImm() == 0) {
107 FrameIndex
= MI
->getOperand(1).getIndex();
108 return MI
->getOperand(0).getReg();
115 unsigned ARMInstrInfo::isStoreToStackSlot(const MachineInstr
*MI
,
116 int &FrameIndex
) const {
117 switch (MI
->getOpcode()) {
120 if (MI
->getOperand(1).isFI() &&
121 MI
->getOperand(2).isReg() &&
122 MI
->getOperand(3).isImm() &&
123 MI
->getOperand(2).getReg() == 0 &&
124 MI
->getOperand(3).getImm() == 0) {
125 FrameIndex
= MI
->getOperand(1).getIndex();
126 return MI
->getOperand(0).getReg();
131 if (MI
->getOperand(1).isFI() &&
132 MI
->getOperand(2).isImm() &&
133 MI
->getOperand(2).getImm() == 0) {
134 FrameIndex
= MI
->getOperand(1).getIndex();
135 return MI
->getOperand(0).getReg();
139 if (MI
->getOperand(1).isFI() &&
140 MI
->getOperand(2).isImm() &&
141 MI
->getOperand(2).getImm() == 0) {
142 FrameIndex
= MI
->getOperand(1).getIndex();
143 return MI
->getOperand(0).getReg();
150 void ARMInstrInfo::reMaterialize(MachineBasicBlock
&MBB
,
151 MachineBasicBlock::iterator I
,
153 const MachineInstr
*Orig
) const {
154 DebugLoc dl
= Orig
->getDebugLoc();
155 if (Orig
->getOpcode() == ARM::MOVi2pieces
) {
156 RI
.emitLoadConstPool(MBB
, I
, DestReg
, Orig
->getOperand(1).getImm(),
157 Orig
->getOperand(2).getImm(),
158 Orig
->getOperand(3).getReg(), this, false, dl
);
162 MachineInstr
*MI
= MBB
.getParent()->CloneMachineInstr(Orig
);
163 MI
->getOperand(0).setReg(DestReg
);
167 static unsigned getUnindexedOpcode(unsigned Opc
) {
180 case ARM::LDRSH_POST
:
183 case ARM::LDRSB_POST
:
199 ARMInstrInfo::convertToThreeAddress(MachineFunction::iterator
&MFI
,
200 MachineBasicBlock::iterator
&MBBI
,
201 LiveVariables
*LV
) const {
205 MachineInstr
*MI
= MBBI
;
206 MachineFunction
&MF
= *MI
->getParent()->getParent();
207 unsigned TSFlags
= MI
->getDesc().TSFlags
;
209 switch ((TSFlags
& ARMII::IndexModeMask
) >> ARMII::IndexModeShift
) {
210 default: return NULL
;
211 case ARMII::IndexModePre
:
214 case ARMII::IndexModePost
:
218 // Try splitting an indexed load/store to an un-indexed one plus an add/sub
220 unsigned MemOpc
= getUnindexedOpcode(MI
->getOpcode());
224 MachineInstr
*UpdateMI
= NULL
;
225 MachineInstr
*MemMI
= NULL
;
226 unsigned AddrMode
= (TSFlags
& ARMII::AddrModeMask
);
227 const TargetInstrDesc
&TID
= MI
->getDesc();
228 unsigned NumOps
= TID
.getNumOperands();
229 bool isLoad
= !TID
.mayStore();
230 const MachineOperand
&WB
= isLoad
? MI
->getOperand(1) : MI
->getOperand(0);
231 const MachineOperand
&Base
= MI
->getOperand(2);
232 const MachineOperand
&Offset
= MI
->getOperand(NumOps
-3);
233 unsigned WBReg
= WB
.getReg();
234 unsigned BaseReg
= Base
.getReg();
235 unsigned OffReg
= Offset
.getReg();
236 unsigned OffImm
= MI
->getOperand(NumOps
-2).getImm();
237 ARMCC::CondCodes Pred
= (ARMCC::CondCodes
)MI
->getOperand(NumOps
-1).getImm();
240 assert(false && "Unknown indexed op!");
242 case ARMII::AddrMode2
: {
243 bool isSub
= ARM_AM::getAM2Op(OffImm
) == ARM_AM::sub
;
244 unsigned Amt
= ARM_AM::getAM2Offset(OffImm
);
246 int SOImmVal
= ARM_AM::getSOImmVal(Amt
);
248 // Can't encode it in a so_imm operand. This transformation will
249 // add more than 1 instruction. Abandon!
251 UpdateMI
= BuildMI(MF
, MI
->getDebugLoc(),
252 get(isSub
? ARM::SUBri
: ARM::ADDri
), WBReg
)
253 .addReg(BaseReg
).addImm(SOImmVal
)
254 .addImm(Pred
).addReg(0).addReg(0);
255 } else if (Amt
!= 0) {
256 ARM_AM::ShiftOpc ShOpc
= ARM_AM::getAM2ShiftOpc(OffImm
);
257 unsigned SOOpc
= ARM_AM::getSORegOpc(ShOpc
, Amt
);
258 UpdateMI
= BuildMI(MF
, MI
->getDebugLoc(),
259 get(isSub
? ARM::SUBrs
: ARM::ADDrs
), WBReg
)
260 .addReg(BaseReg
).addReg(OffReg
).addReg(0).addImm(SOOpc
)
261 .addImm(Pred
).addReg(0).addReg(0);
263 UpdateMI
= BuildMI(MF
, MI
->getDebugLoc(),
264 get(isSub
? ARM::SUBrr
: ARM::ADDrr
), WBReg
)
265 .addReg(BaseReg
).addReg(OffReg
)
266 .addImm(Pred
).addReg(0).addReg(0);
269 case ARMII::AddrMode3
: {
270 bool isSub
= ARM_AM::getAM3Op(OffImm
) == ARM_AM::sub
;
271 unsigned Amt
= ARM_AM::getAM3Offset(OffImm
);
273 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
274 UpdateMI
= BuildMI(MF
, MI
->getDebugLoc(),
275 get(isSub
? ARM::SUBri
: ARM::ADDri
), WBReg
)
276 .addReg(BaseReg
).addImm(Amt
)
277 .addImm(Pred
).addReg(0).addReg(0);
279 UpdateMI
= BuildMI(MF
, MI
->getDebugLoc(),
280 get(isSub
? ARM::SUBrr
: ARM::ADDrr
), WBReg
)
281 .addReg(BaseReg
).addReg(OffReg
)
282 .addImm(Pred
).addReg(0).addReg(0);
287 std::vector
<MachineInstr
*> NewMIs
;
290 MemMI
= BuildMI(MF
, MI
->getDebugLoc(),
291 get(MemOpc
), MI
->getOperand(0).getReg())
292 .addReg(WBReg
).addReg(0).addImm(0).addImm(Pred
);
294 MemMI
= BuildMI(MF
, MI
->getDebugLoc(),
295 get(MemOpc
)).addReg(MI
->getOperand(1).getReg())
296 .addReg(WBReg
).addReg(0).addImm(0).addImm(Pred
);
297 NewMIs
.push_back(MemMI
);
298 NewMIs
.push_back(UpdateMI
);
301 MemMI
= BuildMI(MF
, MI
->getDebugLoc(),
302 get(MemOpc
), MI
->getOperand(0).getReg())
303 .addReg(BaseReg
).addReg(0).addImm(0).addImm(Pred
);
305 MemMI
= BuildMI(MF
, MI
->getDebugLoc(),
306 get(MemOpc
)).addReg(MI
->getOperand(1).getReg())
307 .addReg(BaseReg
).addReg(0).addImm(0).addImm(Pred
);
309 UpdateMI
->getOperand(0).setIsDead();
310 NewMIs
.push_back(UpdateMI
);
311 NewMIs
.push_back(MemMI
);
314 // Transfer LiveVariables states, kill / dead info.
316 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
317 MachineOperand
&MO
= MI
->getOperand(i
);
318 if (MO
.isReg() && MO
.getReg() &&
319 TargetRegisterInfo::isVirtualRegister(MO
.getReg())) {
320 unsigned Reg
= MO
.getReg();
322 LiveVariables::VarInfo
&VI
= LV
->getVarInfo(Reg
);
324 MachineInstr
*NewMI
= (Reg
== WBReg
) ? UpdateMI
: MemMI
;
326 LV
->addVirtualRegisterDead(Reg
, NewMI
);
328 if (MO
.isUse() && MO
.isKill()) {
329 for (unsigned j
= 0; j
< 2; ++j
) {
330 // Look at the two new MI's in reverse order.
331 MachineInstr
*NewMI
= NewMIs
[j
];
332 if (!NewMI
->readsRegister(Reg
))
334 LV
->addVirtualRegisterKilled(Reg
, NewMI
);
335 if (VI
.removeKill(MI
))
336 VI
.Kills
.push_back(NewMI
);
344 MFI
->insert(MBBI
, NewMIs
[1]);
345 MFI
->insert(MBBI
, NewMIs
[0]);
350 bool ARMInstrInfo::AnalyzeBranch(MachineBasicBlock
&MBB
,MachineBasicBlock
*&TBB
,
351 MachineBasicBlock
*&FBB
,
352 SmallVectorImpl
<MachineOperand
> &Cond
,
353 bool AllowModify
) const {
354 // If the block has no terminators, it just falls into the block after it.
355 MachineBasicBlock::iterator I
= MBB
.end();
356 if (I
== MBB
.begin() || !isUnpredicatedTerminator(--I
))
359 // Get the last instruction in the block.
360 MachineInstr
*LastInst
= I
;
362 // If there is only one terminator instruction, process it.
363 unsigned LastOpc
= LastInst
->getOpcode();
364 if (I
== MBB
.begin() || !isUnpredicatedTerminator(--I
)) {
365 if (LastOpc
== ARM::B
|| LastOpc
== ARM::tB
) {
366 TBB
= LastInst
->getOperand(0).getMBB();
369 if (LastOpc
== ARM::Bcc
|| LastOpc
== ARM::tBcc
) {
370 // Block ends with fall-through condbranch.
371 TBB
= LastInst
->getOperand(0).getMBB();
372 Cond
.push_back(LastInst
->getOperand(1));
373 Cond
.push_back(LastInst
->getOperand(2));
376 return true; // Can't handle indirect branch.
379 // Get the instruction before it if it is a terminator.
380 MachineInstr
*SecondLastInst
= I
;
382 // If there are three terminators, we don't know what sort of block this is.
383 if (SecondLastInst
&& I
!= MBB
.begin() && isUnpredicatedTerminator(--I
))
386 // If the block ends with ARM::B/ARM::tB and a ARM::Bcc/ARM::tBcc, handle it.
387 unsigned SecondLastOpc
= SecondLastInst
->getOpcode();
388 if ((SecondLastOpc
== ARM::Bcc
&& LastOpc
== ARM::B
) ||
389 (SecondLastOpc
== ARM::tBcc
&& LastOpc
== ARM::tB
)) {
390 TBB
= SecondLastInst
->getOperand(0).getMBB();
391 Cond
.push_back(SecondLastInst
->getOperand(1));
392 Cond
.push_back(SecondLastInst
->getOperand(2));
393 FBB
= LastInst
->getOperand(0).getMBB();
397 // If the block ends with two unconditional branches, handle it. The second
398 // one is not executed, so remove it.
399 if ((SecondLastOpc
== ARM::B
|| SecondLastOpc
==ARM::tB
) &&
400 (LastOpc
== ARM::B
|| LastOpc
== ARM::tB
)) {
401 TBB
= SecondLastInst
->getOperand(0).getMBB();
404 I
->eraseFromParent();
408 // ...likewise if it ends with a branch table followed by an unconditional
409 // branch. The branch folder can create these, and we must get rid of them for
410 // correctness of Thumb constant islands.
411 if ((SecondLastOpc
== ARM::BR_JTr
|| SecondLastOpc
==ARM::BR_JTm
||
412 SecondLastOpc
== ARM::BR_JTadd
|| SecondLastOpc
==ARM::tBR_JTr
) &&
413 (LastOpc
== ARM::B
|| LastOpc
== ARM::tB
)) {
416 I
->eraseFromParent();
420 // Otherwise, can't handle this.
425 unsigned ARMInstrInfo::RemoveBranch(MachineBasicBlock
&MBB
) const {
426 MachineFunction
&MF
= *MBB
.getParent();
427 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
428 int BOpc
= AFI
->isThumbFunction() ? ARM::tB
: ARM::B
;
429 int BccOpc
= AFI
->isThumbFunction() ? ARM::tBcc
: ARM::Bcc
;
431 MachineBasicBlock::iterator I
= MBB
.end();
432 if (I
== MBB
.begin()) return 0;
434 if (I
->getOpcode() != BOpc
&& I
->getOpcode() != BccOpc
)
437 // Remove the branch.
438 I
->eraseFromParent();
442 if (I
== MBB
.begin()) return 1;
444 if (I
->getOpcode() != BccOpc
)
447 // Remove the branch.
448 I
->eraseFromParent();
453 ARMInstrInfo::InsertBranch(MachineBasicBlock
&MBB
, MachineBasicBlock
*TBB
,
454 MachineBasicBlock
*FBB
,
455 const SmallVectorImpl
<MachineOperand
> &Cond
) const {
456 // FIXME this should probably have a DebugLoc argument
457 DebugLoc dl
= DebugLoc::getUnknownLoc();
458 MachineFunction
&MF
= *MBB
.getParent();
459 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
460 int BOpc
= AFI
->isThumbFunction() ? ARM::tB
: ARM::B
;
461 int BccOpc
= AFI
->isThumbFunction() ? ARM::tBcc
: ARM::Bcc
;
463 // Shouldn't be a fall through.
464 assert(TBB
&& "InsertBranch must not be told to insert a fallthrough");
465 assert((Cond
.size() == 2 || Cond
.size() == 0) &&
466 "ARM branch conditions have two components!");
469 if (Cond
.empty()) // Unconditional branch?
470 BuildMI(&MBB
, dl
, get(BOpc
)).addMBB(TBB
);
472 BuildMI(&MBB
, dl
, get(BccOpc
)).addMBB(TBB
)
473 .addImm(Cond
[0].getImm()).addReg(Cond
[1].getReg());
477 // Two-way conditional branch.
478 BuildMI(&MBB
, dl
, get(BccOpc
)).addMBB(TBB
)
479 .addImm(Cond
[0].getImm()).addReg(Cond
[1].getReg());
480 BuildMI(&MBB
, dl
, get(BOpc
)).addMBB(FBB
);
484 bool ARMInstrInfo::copyRegToReg(MachineBasicBlock
&MBB
,
485 MachineBasicBlock::iterator I
,
486 unsigned DestReg
, unsigned SrcReg
,
487 const TargetRegisterClass
*DestRC
,
488 const TargetRegisterClass
*SrcRC
) const {
489 MachineFunction
&MF
= *MBB
.getParent();
490 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
491 DebugLoc DL
= DebugLoc::getUnknownLoc();
492 if (I
!= MBB
.end()) DL
= I
->getDebugLoc();
494 if (!AFI
->isThumbFunction()) {
495 if (DestRC
== ARM::GPRRegisterClass
) {
496 AddDefaultCC(AddDefaultPred(BuildMI(MBB
, I
, DL
, get(ARM::MOVr
), DestReg
)
501 if (DestRC
== ARM::GPRRegisterClass
) {
502 if (SrcRC
== ARM::GPRRegisterClass
) {
503 BuildMI(MBB
, I
, DL
, get(ARM::tMOVhir2hir
), DestReg
).addReg(SrcReg
);
505 } else if (SrcRC
== ARM::tGPRRegisterClass
) {
506 BuildMI(MBB
, I
, DL
, get(ARM::tMOVlor2hir
), DestReg
).addReg(SrcReg
);
509 } else if (DestRC
== ARM::tGPRRegisterClass
) {
510 if (SrcRC
== ARM::GPRRegisterClass
) {
511 BuildMI(MBB
, I
, DL
, get(ARM::tMOVhir2lor
), DestReg
).addReg(SrcReg
);
513 } else if (SrcRC
== ARM::tGPRRegisterClass
) {
514 BuildMI(MBB
, I
, DL
, get(ARM::tMOVr
), DestReg
).addReg(SrcReg
);
519 if (DestRC
!= SrcRC
) {
520 // Not yet supported!
525 if (DestRC
== ARM::SPRRegisterClass
)
526 AddDefaultPred(BuildMI(MBB
, I
, DL
, get(ARM::FCPYS
), DestReg
)
528 else if (DestRC
== ARM::DPRRegisterClass
)
529 AddDefaultPred(BuildMI(MBB
, I
, DL
, get(ARM::FCPYD
), DestReg
)
538 storeRegToStackSlot(MachineBasicBlock
&MBB
, MachineBasicBlock::iterator I
,
539 unsigned SrcReg
, bool isKill
, int FI
,
540 const TargetRegisterClass
*RC
) const {
541 DebugLoc DL
= DebugLoc::getUnknownLoc();
542 if (I
!= MBB
.end()) DL
= I
->getDebugLoc();
544 if (RC
== ARM::GPRRegisterClass
) {
545 MachineFunction
&MF
= *MBB
.getParent();
546 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
547 assert (!AFI
->isThumbFunction());
548 AddDefaultPred(BuildMI(MBB
, I
, DL
, get(ARM::STR
))
549 .addReg(SrcReg
, false, false, isKill
)
550 .addFrameIndex(FI
).addReg(0).addImm(0));
551 } else if (RC
== ARM::tGPRRegisterClass
) {
552 MachineFunction
&MF
= *MBB
.getParent();
553 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
554 assert (AFI
->isThumbFunction());
555 BuildMI(MBB
, I
, DL
, get(ARM::tSpill
))
556 .addReg(SrcReg
, false, false, isKill
)
557 .addFrameIndex(FI
).addImm(0);
558 } else if (RC
== ARM::DPRRegisterClass
) {
559 AddDefaultPred(BuildMI(MBB
, I
, DL
, get(ARM::FSTD
))
560 .addReg(SrcReg
, false, false, isKill
)
561 .addFrameIndex(FI
).addImm(0));
563 assert(RC
== ARM::SPRRegisterClass
&& "Unknown regclass!");
564 AddDefaultPred(BuildMI(MBB
, I
, DL
, get(ARM::FSTS
))
565 .addReg(SrcReg
, false, false, isKill
)
566 .addFrameIndex(FI
).addImm(0));
570 void ARMInstrInfo::storeRegToAddr(MachineFunction
&MF
, unsigned SrcReg
,
572 SmallVectorImpl
<MachineOperand
> &Addr
,
573 const TargetRegisterClass
*RC
,
574 SmallVectorImpl
<MachineInstr
*> &NewMIs
) const{
575 DebugLoc DL
= DebugLoc::getUnknownLoc();
577 if (RC
== ARM::GPRRegisterClass
) {
578 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
579 if (AFI
->isThumbFunction()) {
580 Opc
= Addr
[0].isFI() ? ARM::tSpill
: ARM::tSTR
;
581 MachineInstrBuilder MIB
=
582 BuildMI(MF
, DL
, get(Opc
)).addReg(SrcReg
, false, false, isKill
);
583 for (unsigned i
= 0, e
= Addr
.size(); i
!= e
; ++i
)
584 MIB
.addOperand(Addr
[i
]);
585 NewMIs
.push_back(MIB
);
589 } else if (RC
== ARM::DPRRegisterClass
) {
592 assert(RC
== ARM::SPRRegisterClass
&& "Unknown regclass!");
596 MachineInstrBuilder MIB
=
597 BuildMI(MF
, DL
, get(Opc
)).addReg(SrcReg
, false, false, isKill
);
598 for (unsigned i
= 0, e
= Addr
.size(); i
!= e
; ++i
)
599 MIB
.addOperand(Addr
[i
]);
601 NewMIs
.push_back(MIB
);
606 loadRegFromStackSlot(MachineBasicBlock
&MBB
, MachineBasicBlock::iterator I
,
607 unsigned DestReg
, int FI
,
608 const TargetRegisterClass
*RC
) const {
609 DebugLoc DL
= DebugLoc::getUnknownLoc();
610 if (I
!= MBB
.end()) DL
= I
->getDebugLoc();
612 if (RC
== ARM::GPRRegisterClass
) {
613 MachineFunction
&MF
= *MBB
.getParent();
614 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
615 assert (!AFI
->isThumbFunction());
616 AddDefaultPred(BuildMI(MBB
, I
, DL
, get(ARM::LDR
), DestReg
)
617 .addFrameIndex(FI
).addReg(0).addImm(0));
618 } else if (RC
== ARM::tGPRRegisterClass
) {
619 MachineFunction
&MF
= *MBB
.getParent();
620 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
621 assert (AFI
->isThumbFunction());
622 BuildMI(MBB
, I
, DL
, get(ARM::tRestore
), DestReg
)
623 .addFrameIndex(FI
).addImm(0);
624 } else if (RC
== ARM::DPRRegisterClass
) {
625 AddDefaultPred(BuildMI(MBB
, I
, DL
, get(ARM::FLDD
), DestReg
)
626 .addFrameIndex(FI
).addImm(0));
628 assert(RC
== ARM::SPRRegisterClass
&& "Unknown regclass!");
629 AddDefaultPred(BuildMI(MBB
, I
, DL
, get(ARM::FLDS
), DestReg
)
630 .addFrameIndex(FI
).addImm(0));
635 loadRegFromAddr(MachineFunction
&MF
, unsigned DestReg
,
636 SmallVectorImpl
<MachineOperand
> &Addr
,
637 const TargetRegisterClass
*RC
,
638 SmallVectorImpl
<MachineInstr
*> &NewMIs
) const {
639 DebugLoc DL
= DebugLoc::getUnknownLoc();
641 if (RC
== ARM::GPRRegisterClass
) {
642 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
643 if (AFI
->isThumbFunction()) {
644 Opc
= Addr
[0].isFI() ? ARM::tRestore
: ARM::tLDR
;
645 MachineInstrBuilder MIB
= BuildMI(MF
, DL
, get(Opc
), DestReg
);
646 for (unsigned i
= 0, e
= Addr
.size(); i
!= e
; ++i
)
647 MIB
.addOperand(Addr
[i
]);
648 NewMIs
.push_back(MIB
);
652 } else if (RC
== ARM::DPRRegisterClass
) {
655 assert(RC
== ARM::SPRRegisterClass
&& "Unknown regclass!");
659 MachineInstrBuilder MIB
= BuildMI(MF
, DL
, get(Opc
), DestReg
);
660 for (unsigned i
= 0, e
= Addr
.size(); i
!= e
; ++i
)
661 MIB
.addOperand(Addr
[i
]);
663 NewMIs
.push_back(MIB
);
668 spillCalleeSavedRegisters(MachineBasicBlock
&MBB
,
669 MachineBasicBlock::iterator MI
,
670 const std::vector
<CalleeSavedInfo
> &CSI
) const {
671 MachineFunction
&MF
= *MBB
.getParent();
672 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
673 if (!AFI
->isThumbFunction() || CSI
.empty())
676 DebugLoc DL
= DebugLoc::getUnknownLoc();
677 if (MI
!= MBB
.end()) DL
= MI
->getDebugLoc();
679 MachineInstrBuilder MIB
= BuildMI(MBB
, MI
, DL
, get(ARM::tPUSH
));
680 for (unsigned i
= CSI
.size(); i
!= 0; --i
) {
681 unsigned Reg
= CSI
[i
-1].getReg();
682 // Add the callee-saved register as live-in. It's killed at the spill.
684 MIB
.addReg(Reg
, false/*isDef*/,false/*isImp*/,true/*isKill*/);
690 restoreCalleeSavedRegisters(MachineBasicBlock
&MBB
,
691 MachineBasicBlock::iterator MI
,
692 const std::vector
<CalleeSavedInfo
> &CSI
) const {
693 MachineFunction
&MF
= *MBB
.getParent();
694 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
695 if (!AFI
->isThumbFunction() || CSI
.empty())
698 bool isVarArg
= AFI
->getVarArgsRegSaveSize() > 0;
699 MachineInstr
*PopMI
= MF
.CreateMachineInstr(get(ARM::tPOP
),MI
->getDebugLoc());
700 MBB
.insert(MI
, PopMI
);
701 for (unsigned i
= CSI
.size(); i
!= 0; --i
) {
702 unsigned Reg
= CSI
[i
-1].getReg();
703 if (Reg
== ARM::LR
) {
704 // Special epilogue for vararg functions. See emitEpilogue
708 PopMI
->setDesc(get(ARM::tPOP_RET
));
711 PopMI
->addOperand(MachineOperand::CreateReg(Reg
, true));
716 MachineInstr
*ARMInstrInfo::
717 foldMemoryOperandImpl(MachineFunction
&MF
, MachineInstr
*MI
,
718 const SmallVectorImpl
<unsigned> &Ops
, int FI
) const {
719 if (Ops
.size() != 1) return NULL
;
721 unsigned OpNum
= Ops
[0];
722 unsigned Opc
= MI
->getOpcode();
723 MachineInstr
*NewMI
= NULL
;
727 if (MI
->getOperand(4).getReg() == ARM::CPSR
)
728 // If it is updating CPSR, then it cannot be folded.
730 unsigned Pred
= MI
->getOperand(2).getImm();
731 unsigned PredReg
= MI
->getOperand(3).getReg();
732 if (OpNum
== 0) { // move -> store
733 unsigned SrcReg
= MI
->getOperand(1).getReg();
734 bool isKill
= MI
->getOperand(1).isKill();
735 NewMI
= BuildMI(MF
, MI
->getDebugLoc(), get(ARM::STR
))
736 .addReg(SrcReg
, false, false, isKill
)
737 .addFrameIndex(FI
).addReg(0).addImm(0).addImm(Pred
).addReg(PredReg
);
738 } else { // move -> load
739 unsigned DstReg
= MI
->getOperand(0).getReg();
740 bool isDead
= MI
->getOperand(0).isDead();
741 NewMI
= BuildMI(MF
, MI
->getDebugLoc(), get(ARM::LDR
))
742 .addReg(DstReg
, true, false, false, isDead
)
743 .addFrameIndex(FI
).addReg(0).addImm(0).addImm(Pred
).addReg(PredReg
);
748 case ARM::tMOVlor2hir
:
749 case ARM::tMOVhir2lor
:
750 case ARM::tMOVhir2hir
: {
751 if (OpNum
== 0) { // move -> store
752 unsigned SrcReg
= MI
->getOperand(1).getReg();
753 bool isKill
= MI
->getOperand(1).isKill();
754 if (RI
.isPhysicalRegister(SrcReg
) && !RI
.isLowRegister(SrcReg
))
755 // tSpill cannot take a high register operand.
757 NewMI
= BuildMI(MF
, MI
->getDebugLoc(), get(ARM::tSpill
))
758 .addReg(SrcReg
, false, false, isKill
)
759 .addFrameIndex(FI
).addImm(0);
760 } else { // move -> load
761 unsigned DstReg
= MI
->getOperand(0).getReg();
762 if (RI
.isPhysicalRegister(DstReg
) && !RI
.isLowRegister(DstReg
))
763 // tRestore cannot target a high register operand.
765 bool isDead
= MI
->getOperand(0).isDead();
766 NewMI
= BuildMI(MF
, MI
->getDebugLoc(), get(ARM::tRestore
))
767 .addReg(DstReg
, true, false, false, isDead
)
768 .addFrameIndex(FI
).addImm(0);
773 unsigned Pred
= MI
->getOperand(2).getImm();
774 unsigned PredReg
= MI
->getOperand(3).getReg();
775 if (OpNum
== 0) { // move -> store
776 unsigned SrcReg
= MI
->getOperand(1).getReg();
777 NewMI
= BuildMI(MF
, MI
->getDebugLoc(), get(ARM::FSTS
))
778 .addReg(SrcReg
).addFrameIndex(FI
)
779 .addImm(0).addImm(Pred
).addReg(PredReg
);
780 } else { // move -> load
781 unsigned DstReg
= MI
->getOperand(0).getReg();
782 NewMI
= BuildMI(MF
, MI
->getDebugLoc(), get(ARM::FLDS
), DstReg
)
784 .addImm(0).addImm(Pred
).addReg(PredReg
);
789 unsigned Pred
= MI
->getOperand(2).getImm();
790 unsigned PredReg
= MI
->getOperand(3).getReg();
791 if (OpNum
== 0) { // move -> store
792 unsigned SrcReg
= MI
->getOperand(1).getReg();
793 bool isKill
= MI
->getOperand(1).isKill();
794 NewMI
= BuildMI(MF
, MI
->getDebugLoc(), get(ARM::FSTD
))
795 .addReg(SrcReg
, false, false, isKill
)
796 .addFrameIndex(FI
).addImm(0).addImm(Pred
).addReg(PredReg
);
797 } else { // move -> load
798 unsigned DstReg
= MI
->getOperand(0).getReg();
799 bool isDead
= MI
->getOperand(0).isDead();
800 NewMI
= BuildMI(MF
, MI
->getDebugLoc(), get(ARM::FLDD
))
801 .addReg(DstReg
, true, false, false, isDead
)
802 .addFrameIndex(FI
).addImm(0).addImm(Pred
).addReg(PredReg
);
812 canFoldMemoryOperand(const MachineInstr
*MI
,
813 const SmallVectorImpl
<unsigned> &Ops
) const {
814 if (Ops
.size() != 1) return false;
816 unsigned OpNum
= Ops
[0];
817 unsigned Opc
= MI
->getOpcode();
821 // If it is updating CPSR, then it cannot be folded.
822 return MI
->getOperand(4).getReg() != ARM::CPSR
;
824 case ARM::tMOVlor2hir
:
825 case ARM::tMOVhir2lor
:
826 case ARM::tMOVhir2hir
: {
827 if (OpNum
== 0) { // move -> store
828 unsigned SrcReg
= MI
->getOperand(1).getReg();
829 if (RI
.isPhysicalRegister(SrcReg
) && !RI
.isLowRegister(SrcReg
))
830 // tSpill cannot take a high register operand.
832 } else { // move -> load
833 unsigned DstReg
= MI
->getOperand(0).getReg();
834 if (RI
.isPhysicalRegister(DstReg
) && !RI
.isLowRegister(DstReg
))
835 // tRestore cannot target a high register operand.
848 bool ARMInstrInfo::BlockHasNoFallThrough(const MachineBasicBlock
&MBB
) const {
849 if (MBB
.empty()) return false;
851 switch (MBB
.back().getOpcode()) {
852 case ARM::BX_RET
: // Return.
855 case ARM::tBX_RET_vararg
:
858 case ARM::tB
: // Uncond branch.
860 case ARM::BR_JTr
: // Jumptable branch.
861 case ARM::BR_JTm
: // Jumptable branch through mem.
862 case ARM::BR_JTadd
: // Jumptable branch add to pc.
864 default: return false;
869 ReverseBranchCondition(SmallVectorImpl
<MachineOperand
> &Cond
) const {
870 ARMCC::CondCodes CC
= (ARMCC::CondCodes
)(int)Cond
[0].getImm();
871 Cond
[0].setImm(ARMCC::getOppositeCondition(CC
));
875 bool ARMInstrInfo::isPredicated(const MachineInstr
*MI
) const {
876 int PIdx
= MI
->findFirstPredOperandIdx();
877 return PIdx
!= -1 && MI
->getOperand(PIdx
).getImm() != ARMCC::AL
;
881 PredicateInstruction(MachineInstr
*MI
,
882 const SmallVectorImpl
<MachineOperand
> &Pred
) const {
883 unsigned Opc
= MI
->getOpcode();
884 if (Opc
== ARM::B
|| Opc
== ARM::tB
) {
885 MI
->setDesc(get(Opc
== ARM::B
? ARM::Bcc
: ARM::tBcc
));
886 MI
->addOperand(MachineOperand::CreateImm(Pred
[0].getImm()));
887 MI
->addOperand(MachineOperand::CreateReg(Pred
[1].getReg(), false));
891 int PIdx
= MI
->findFirstPredOperandIdx();
893 MachineOperand
&PMO
= MI
->getOperand(PIdx
);
894 PMO
.setImm(Pred
[0].getImm());
895 MI
->getOperand(PIdx
+1).setReg(Pred
[1].getReg());
902 SubsumesPredicate(const SmallVectorImpl
<MachineOperand
> &Pred1
,
903 const SmallVectorImpl
<MachineOperand
> &Pred2
) const {
904 if (Pred1
.size() > 2 || Pred2
.size() > 2)
907 ARMCC::CondCodes CC1
= (ARMCC::CondCodes
)Pred1
[0].getImm();
908 ARMCC::CondCodes CC2
= (ARMCC::CondCodes
)Pred2
[0].getImm();
918 return CC2
== ARMCC::HI
;
920 return CC2
== ARMCC::LO
|| CC2
== ARMCC::EQ
;
922 return CC2
== ARMCC::GT
;
924 return CC2
== ARMCC::LT
;
928 bool ARMInstrInfo::DefinesPredicate(MachineInstr
*MI
,
929 std::vector
<MachineOperand
> &Pred
) const {
930 const TargetInstrDesc
&TID
= MI
->getDesc();
931 if (!TID
.getImplicitDefs() && !TID
.hasOptionalDef())
935 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
936 const MachineOperand
&MO
= MI
->getOperand(i
);
937 if (MO
.isReg() && MO
.getReg() == ARM::CPSR
) {
947 /// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
948 static unsigned getNumJTEntries(const std::vector
<MachineJumpTableEntry
> &JT
,
949 unsigned JTI
) DISABLE_INLINE
;
950 static unsigned getNumJTEntries(const std::vector
<MachineJumpTableEntry
> &JT
,
952 return JT
[JTI
].MBBs
.size();
955 /// GetInstSize - Return the size of the specified MachineInstr.
957 unsigned ARMInstrInfo::GetInstSizeInBytes(const MachineInstr
*MI
) const {
958 const MachineBasicBlock
&MBB
= *MI
->getParent();
959 const MachineFunction
*MF
= MBB
.getParent();
960 const TargetAsmInfo
*TAI
= MF
->getTarget().getTargetAsmInfo();
962 // Basic size info comes from the TSFlags field.
963 const TargetInstrDesc
&TID
= MI
->getDesc();
964 unsigned TSFlags
= TID
.TSFlags
;
966 switch ((TSFlags
& ARMII::SizeMask
) >> ARMII::SizeShift
) {
968 // If this machine instr is an inline asm, measure it.
969 if (MI
->getOpcode() == ARM::INLINEASM
)
970 return TAI
->getInlineAsmLength(MI
->getOperand(0).getSymbolName());
973 switch (MI
->getOpcode()) {
975 assert(0 && "Unknown or unset size field for instr!");
977 case TargetInstrInfo::IMPLICIT_DEF
:
978 case TargetInstrInfo::DECLARE
:
979 case TargetInstrInfo::DBG_LABEL
:
980 case TargetInstrInfo::EH_LABEL
:
985 case ARMII::Size8Bytes
: return 8; // Arm instruction x 2.
986 case ARMII::Size4Bytes
: return 4; // Arm instruction.
987 case ARMII::Size2Bytes
: return 2; // Thumb instruction.
988 case ARMII::SizeSpecial
: {
989 switch (MI
->getOpcode()) {
990 case ARM::CONSTPOOL_ENTRY
:
991 // If this machine instr is a constant pool entry, its size is recorded as
993 return MI
->getOperand(2).getImm();
998 // These are jumptable branches, i.e. a branch followed by an inlined
999 // jumptable. The size is 4 + 4 * number of entries.
1000 unsigned NumOps
= TID
.getNumOperands();
1001 MachineOperand JTOP
=
1002 MI
->getOperand(NumOps
- (TID
.isPredicable() ? 3 : 2));
1003 unsigned JTI
= JTOP
.getIndex();
1004 const MachineJumpTableInfo
*MJTI
= MF
->getJumpTableInfo();
1005 const std::vector
<MachineJumpTableEntry
> &JT
= MJTI
->getJumpTables();
1006 assert(JTI
< JT
.size());
1007 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
1008 // 4 aligned. The assembler / linker may add 2 byte padding just before
1009 // the JT entries. The size does not include this padding; the
1010 // constant islands pass does separate bookkeeping for it.
1011 // FIXME: If we know the size of the function is less than (1 << 16) *2
1012 // bytes, we can use 16-bit entries instead. Then there won't be an
1014 return getNumJTEntries(JT
, JTI
) * 4 +
1015 (MI
->getOpcode()==ARM::tBR_JTr
? 2 : 4);
1018 // Otherwise, pseudo-instruction sizes are zero.
1023 return 0; // Not reached