Couple of fixes to mention bunzip2 and make instructions more clear.
[llvm-complete.git] / lib / Target / ARM / ARMInstrInfo.cpp
blob0be0fd69b49a4d9ee321cfab7e993d3cec2ac9a9
1 //===- ARMInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file was developed by the "Instituto Nokia de Tecnologia" and
6 // is distributed under the University of Illinois Open Source
7 // License. See LICENSE.TXT for details.
8 //
9 //===----------------------------------------------------------------------===//
11 // This file contains the ARM implementation of the TargetInstrInfo class.
13 //===----------------------------------------------------------------------===//
15 #include "ARMInstrInfo.h"
16 #include "ARM.h"
17 #include "ARMAddressingModes.h"
18 #include "ARMGenInstrInfo.inc"
19 #include "ARMMachineFunctionInfo.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/CodeGen/LiveVariables.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineJumpTableInfo.h"
24 #include "llvm/Target/TargetAsmInfo.h"
25 #include "llvm/Support/CommandLine.h"
26 using namespace llvm;
28 static cl::opt<bool> EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
29 cl::desc("Enable ARM 2-addr to 3-addr conv"));
31 ARMInstrInfo::ARMInstrInfo(const ARMSubtarget &STI)
32 : TargetInstrInfo(ARMInsts, array_lengthof(ARMInsts)),
33 RI(*this, STI) {
36 const TargetRegisterClass *ARMInstrInfo::getPointerRegClass() const {
37 return &ARM::GPRRegClass;
40 /// Return true if the instruction is a register to register move and
41 /// leave the source and dest operands in the passed parameters.
42 ///
43 bool ARMInstrInfo::isMoveInstr(const MachineInstr &MI,
44 unsigned &SrcReg, unsigned &DstReg) const {
45 MachineOpCode oc = MI.getOpcode();
46 switch (oc) {
47 default:
48 return false;
49 case ARM::FCPYS:
50 case ARM::FCPYD:
51 SrcReg = MI.getOperand(1).getReg();
52 DstReg = MI.getOperand(0).getReg();
53 return true;
54 case ARM::MOVr:
55 case ARM::tMOVr:
56 assert(MI.getInstrDescriptor()->numOperands >= 2 &&
57 MI.getOperand(0).isRegister() &&
58 MI.getOperand(1).isRegister() &&
59 "Invalid ARM MOV instruction");
60 SrcReg = MI.getOperand(1).getReg();
61 DstReg = MI.getOperand(0).getReg();
62 return true;
66 unsigned ARMInstrInfo::isLoadFromStackSlot(MachineInstr *MI, int &FrameIndex) const{
67 switch (MI->getOpcode()) {
68 default: break;
69 case ARM::LDR:
70 if (MI->getOperand(1).isFrameIndex() &&
71 MI->getOperand(2).isRegister() &&
72 MI->getOperand(3).isImmediate() &&
73 MI->getOperand(2).getReg() == 0 &&
74 MI->getOperand(3).getImmedValue() == 0) {
75 FrameIndex = MI->getOperand(1).getFrameIndex();
76 return MI->getOperand(0).getReg();
78 break;
79 case ARM::FLDD:
80 case ARM::FLDS:
81 if (MI->getOperand(1).isFrameIndex() &&
82 MI->getOperand(2).isImmediate() &&
83 MI->getOperand(2).getImmedValue() == 0) {
84 FrameIndex = MI->getOperand(1).getFrameIndex();
85 return MI->getOperand(0).getReg();
87 break;
88 case ARM::tRestore:
89 if (MI->getOperand(1).isFrameIndex() &&
90 MI->getOperand(2).isImmediate() &&
91 MI->getOperand(2).getImmedValue() == 0) {
92 FrameIndex = MI->getOperand(1).getFrameIndex();
93 return MI->getOperand(0).getReg();
95 break;
97 return 0;
100 unsigned ARMInstrInfo::isStoreToStackSlot(MachineInstr *MI, int &FrameIndex) const {
101 switch (MI->getOpcode()) {
102 default: break;
103 case ARM::STR:
104 if (MI->getOperand(1).isFrameIndex() &&
105 MI->getOperand(2).isRegister() &&
106 MI->getOperand(3).isImmediate() &&
107 MI->getOperand(2).getReg() == 0 &&
108 MI->getOperand(3).getImmedValue() == 0) {
109 FrameIndex = MI->getOperand(1).getFrameIndex();
110 return MI->getOperand(0).getReg();
112 break;
113 case ARM::FSTD:
114 case ARM::FSTS:
115 if (MI->getOperand(1).isFrameIndex() &&
116 MI->getOperand(2).isImmediate() &&
117 MI->getOperand(2).getImmedValue() == 0) {
118 FrameIndex = MI->getOperand(1).getFrameIndex();
119 return MI->getOperand(0).getReg();
121 break;
122 case ARM::tSpill:
123 if (MI->getOperand(1).isFrameIndex() &&
124 MI->getOperand(2).isImmediate() &&
125 MI->getOperand(2).getImmedValue() == 0) {
126 FrameIndex = MI->getOperand(1).getFrameIndex();
127 return MI->getOperand(0).getReg();
129 break;
131 return 0;
134 static unsigned getUnindexedOpcode(unsigned Opc) {
135 switch (Opc) {
136 default: break;
137 case ARM::LDR_PRE:
138 case ARM::LDR_POST:
139 return ARM::LDR;
140 case ARM::LDRH_PRE:
141 case ARM::LDRH_POST:
142 return ARM::LDRH;
143 case ARM::LDRB_PRE:
144 case ARM::LDRB_POST:
145 return ARM::LDRB;
146 case ARM::LDRSH_PRE:
147 case ARM::LDRSH_POST:
148 return ARM::LDRSH;
149 case ARM::LDRSB_PRE:
150 case ARM::LDRSB_POST:
151 return ARM::LDRSB;
152 case ARM::STR_PRE:
153 case ARM::STR_POST:
154 return ARM::STR;
155 case ARM::STRH_PRE:
156 case ARM::STRH_POST:
157 return ARM::STRH;
158 case ARM::STRB_PRE:
159 case ARM::STRB_POST:
160 return ARM::STRB;
162 return 0;
165 MachineInstr *
166 ARMInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
167 MachineBasicBlock::iterator &MBBI,
168 LiveVariables &LV) const {
169 if (!EnableARM3Addr)
170 return NULL;
172 MachineInstr *MI = MBBI;
173 unsigned TSFlags = MI->getInstrDescriptor()->TSFlags;
174 bool isPre = false;
175 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
176 default: return NULL;
177 case ARMII::IndexModePre:
178 isPre = true;
179 break;
180 case ARMII::IndexModePost:
181 break;
184 // Try spliting an indexed load / store to a un-indexed one plus an add/sub
185 // operation.
186 unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
187 if (MemOpc == 0)
188 return NULL;
190 MachineInstr *UpdateMI = NULL;
191 MachineInstr *MemMI = NULL;
192 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
193 const TargetInstrDescriptor *TID = MI->getInstrDescriptor();
194 unsigned NumOps = TID->numOperands;
195 bool isLoad = (TID->Flags & M_LOAD_FLAG) != 0;
196 const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
197 const MachineOperand &Base = MI->getOperand(2);
198 const MachineOperand &Offset = MI->getOperand(NumOps-3);
199 unsigned WBReg = WB.getReg();
200 unsigned BaseReg = Base.getReg();
201 unsigned OffReg = Offset.getReg();
202 unsigned OffImm = MI->getOperand(NumOps-2).getImm();
203 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
204 switch (AddrMode) {
205 default:
206 assert(false && "Unknown indexed op!");
207 return NULL;
208 case ARMII::AddrMode2: {
209 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
210 unsigned Amt = ARM_AM::getAM2Offset(OffImm);
211 if (OffReg == 0) {
212 int SOImmVal = ARM_AM::getSOImmVal(Amt);
213 if (SOImmVal == -1)
214 // Can't encode it in a so_imm operand. This transformation will
215 // add more than 1 instruction. Abandon!
216 return NULL;
217 UpdateMI = BuildMI(get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
218 .addReg(BaseReg).addImm(SOImmVal)
219 .addImm(Pred).addReg(0).addReg(0);
220 } else if (Amt != 0) {
221 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
222 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
223 UpdateMI = BuildMI(get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg)
224 .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
225 .addImm(Pred).addReg(0).addReg(0);
226 } else
227 UpdateMI = BuildMI(get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
228 .addReg(BaseReg).addReg(OffReg)
229 .addImm(Pred).addReg(0).addReg(0);
230 break;
232 case ARMII::AddrMode3 : {
233 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
234 unsigned Amt = ARM_AM::getAM3Offset(OffImm);
235 if (OffReg == 0)
236 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
237 UpdateMI = BuildMI(get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
238 .addReg(BaseReg).addImm(Amt)
239 .addImm(Pred).addReg(0).addReg(0);
240 else
241 UpdateMI = BuildMI(get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
242 .addReg(BaseReg).addReg(OffReg)
243 .addImm(Pred).addReg(0).addReg(0);
244 break;
248 std::vector<MachineInstr*> NewMIs;
249 if (isPre) {
250 if (isLoad)
251 MemMI = BuildMI(get(MemOpc), MI->getOperand(0).getReg())
252 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
253 else
254 MemMI = BuildMI(get(MemOpc)).addReg(MI->getOperand(1).getReg())
255 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
256 NewMIs.push_back(MemMI);
257 NewMIs.push_back(UpdateMI);
258 } else {
259 if (isLoad)
260 MemMI = BuildMI(get(MemOpc), MI->getOperand(0).getReg())
261 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
262 else
263 MemMI = BuildMI(get(MemOpc)).addReg(MI->getOperand(1).getReg())
264 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
265 if (WB.isDead())
266 UpdateMI->getOperand(0).setIsDead();
267 NewMIs.push_back(UpdateMI);
268 NewMIs.push_back(MemMI);
271 // Transfer LiveVariables states, kill / dead info.
272 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
273 MachineOperand &MO = MI->getOperand(i);
274 if (MO.isRegister() && MO.getReg() &&
275 MRegisterInfo::isVirtualRegister(MO.getReg())) {
276 unsigned Reg = MO.getReg();
277 LiveVariables::VarInfo &VI = LV.getVarInfo(Reg);
278 if (MO.isDef()) {
279 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
280 if (MO.isDead())
281 LV.addVirtualRegisterDead(Reg, NewMI);
282 // Update the defining instruction.
283 if (VI.DefInst == MI)
284 VI.DefInst = NewMI;
286 if (MO.isUse() && MO.isKill()) {
287 for (unsigned j = 0; j < 2; ++j) {
288 // Look at the two new MI's in reverse order.
289 MachineInstr *NewMI = NewMIs[j];
290 int NIdx = NewMI->findRegisterUseOperandIdx(Reg);
291 if (NIdx == -1)
292 continue;
293 LV.addVirtualRegisterKilled(Reg, NewMI);
294 if (VI.removeKill(MI))
295 VI.Kills.push_back(NewMI);
296 break;
302 MFI->insert(MBBI, NewMIs[1]);
303 MFI->insert(MBBI, NewMIs[0]);
304 return NewMIs[0];
307 // Branch analysis.
308 bool ARMInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
309 MachineBasicBlock *&FBB,
310 std::vector<MachineOperand> &Cond) const {
311 // If the block has no terminators, it just falls into the block after it.
312 MachineBasicBlock::iterator I = MBB.end();
313 if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
314 return false;
316 // Get the last instruction in the block.
317 MachineInstr *LastInst = I;
319 // If there is only one terminator instruction, process it.
320 unsigned LastOpc = LastInst->getOpcode();
321 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
322 if (LastOpc == ARM::B || LastOpc == ARM::tB) {
323 TBB = LastInst->getOperand(0).getMachineBasicBlock();
324 return false;
326 if (LastOpc == ARM::Bcc || LastOpc == ARM::tBcc) {
327 // Block ends with fall-through condbranch.
328 TBB = LastInst->getOperand(0).getMachineBasicBlock();
329 Cond.push_back(LastInst->getOperand(1));
330 Cond.push_back(LastInst->getOperand(2));
331 return false;
333 return true; // Can't handle indirect branch.
336 // Get the instruction before it if it is a terminator.
337 MachineInstr *SecondLastInst = I;
339 // If there are three terminators, we don't know what sort of block this is.
340 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
341 return true;
343 // If the block ends with ARM::B/ARM::tB and a ARM::Bcc/ARM::tBcc, handle it.
344 unsigned SecondLastOpc = SecondLastInst->getOpcode();
345 if ((SecondLastOpc == ARM::Bcc && LastOpc == ARM::B) ||
346 (SecondLastOpc == ARM::tBcc && LastOpc == ARM::tB)) {
347 TBB = SecondLastInst->getOperand(0).getMachineBasicBlock();
348 Cond.push_back(SecondLastInst->getOperand(1));
349 Cond.push_back(SecondLastInst->getOperand(2));
350 FBB = LastInst->getOperand(0).getMachineBasicBlock();
351 return false;
354 // If the block ends with two unconditional branches, handle it. The second
355 // one is not executed, so remove it.
356 if ((SecondLastOpc == ARM::B || SecondLastOpc==ARM::tB) &&
357 (LastOpc == ARM::B || LastOpc == ARM::tB)) {
358 TBB = SecondLastInst->getOperand(0).getMachineBasicBlock();
359 I = LastInst;
360 I->eraseFromParent();
361 return false;
364 // Likewise if it ends with a branch table followed by an unconditional branch.
365 // The branch folder can create these, and we must get rid of them for
366 // correctness of Thumb constant islands.
367 if ((SecondLastOpc == ARM::BR_JTr || SecondLastOpc==ARM::BR_JTm ||
368 SecondLastOpc == ARM::BR_JTadd || SecondLastOpc==ARM::tBR_JTr) &&
369 (LastOpc == ARM::B || LastOpc == ARM::tB)) {
370 I = LastInst;
371 I->eraseFromParent();
372 return true;
375 // Otherwise, can't handle this.
376 return true;
380 unsigned ARMInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
381 MachineFunction &MF = *MBB.getParent();
382 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
383 int BOpc = AFI->isThumbFunction() ? ARM::tB : ARM::B;
384 int BccOpc = AFI->isThumbFunction() ? ARM::tBcc : ARM::Bcc;
386 MachineBasicBlock::iterator I = MBB.end();
387 if (I == MBB.begin()) return 0;
388 --I;
389 if (I->getOpcode() != BOpc && I->getOpcode() != BccOpc)
390 return 0;
392 // Remove the branch.
393 I->eraseFromParent();
395 I = MBB.end();
397 if (I == MBB.begin()) return 1;
398 --I;
399 if (I->getOpcode() != BccOpc)
400 return 1;
402 // Remove the branch.
403 I->eraseFromParent();
404 return 2;
407 unsigned ARMInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
408 MachineBasicBlock *FBB,
409 const std::vector<MachineOperand> &Cond) const {
410 MachineFunction &MF = *MBB.getParent();
411 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
412 int BOpc = AFI->isThumbFunction() ? ARM::tB : ARM::B;
413 int BccOpc = AFI->isThumbFunction() ? ARM::tBcc : ARM::Bcc;
415 // Shouldn't be a fall through.
416 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
417 assert((Cond.size() == 2 || Cond.size() == 0) &&
418 "ARM branch conditions have two components!");
420 if (FBB == 0) {
421 if (Cond.empty()) // Unconditional branch?
422 BuildMI(&MBB, get(BOpc)).addMBB(TBB);
423 else
424 BuildMI(&MBB, get(BccOpc)).addMBB(TBB)
425 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
426 return 1;
429 // Two-way conditional branch.
430 BuildMI(&MBB, get(BccOpc)).addMBB(TBB)
431 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
432 BuildMI(&MBB, get(BOpc)).addMBB(FBB);
433 return 2;
436 bool ARMInstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
437 if (MBB.empty()) return false;
439 switch (MBB.back().getOpcode()) {
440 case ARM::BX_RET: // Return.
441 case ARM::LDM_RET:
442 case ARM::tBX_RET:
443 case ARM::tBX_RET_vararg:
444 case ARM::tPOP_RET:
445 case ARM::B:
446 case ARM::tB: // Uncond branch.
447 case ARM::tBR_JTr:
448 case ARM::BR_JTr: // Jumptable branch.
449 case ARM::BR_JTm: // Jumptable branch through mem.
450 case ARM::BR_JTadd: // Jumptable branch add to pc.
451 return true;
452 default: return false;
456 bool ARMInstrInfo::
457 ReverseBranchCondition(std::vector<MachineOperand> &Cond) const {
458 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
459 Cond[0].setImm(ARMCC::getOppositeCondition(CC));
460 return false;
463 bool ARMInstrInfo::isPredicated(const MachineInstr *MI) const {
464 int PIdx = MI->findFirstPredOperandIdx();
465 return PIdx != -1 && MI->getOperand(PIdx).getImmedValue() != ARMCC::AL;
468 bool ARMInstrInfo::PredicateInstruction(MachineInstr *MI,
469 const std::vector<MachineOperand> &Pred) const {
470 unsigned Opc = MI->getOpcode();
471 if (Opc == ARM::B || Opc == ARM::tB) {
472 MI->setInstrDescriptor(get(Opc == ARM::B ? ARM::Bcc : ARM::tBcc));
473 MI->addImmOperand(Pred[0].getImmedValue());
474 MI->addRegOperand(Pred[1].getReg(), false);
475 return true;
478 int PIdx = MI->findFirstPredOperandIdx();
479 if (PIdx != -1) {
480 MachineOperand &PMO = MI->getOperand(PIdx);
481 PMO.setImm(Pred[0].getImmedValue());
482 MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
483 return true;
485 return false;
488 bool
489 ARMInstrInfo::SubsumesPredicate(const std::vector<MachineOperand> &Pred1,
490 const std::vector<MachineOperand> &Pred2) const{
491 if (Pred1.size() > 2 || Pred2.size() > 2)
492 return false;
494 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImmedValue();
495 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImmedValue();
496 if (CC1 == CC2)
497 return true;
499 switch (CC1) {
500 default:
501 return false;
502 case ARMCC::AL:
503 return true;
504 case ARMCC::HS:
505 return CC2 == ARMCC::HI;
506 case ARMCC::LS:
507 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
508 case ARMCC::GE:
509 return CC2 == ARMCC::GT;
510 case ARMCC::LE:
511 return CC2 == ARMCC::LT;
515 bool ARMInstrInfo::DefinesPredicate(MachineInstr *MI,
516 std::vector<MachineOperand> &Pred) const {
517 const TargetInstrDescriptor *TID = MI->getInstrDescriptor();
518 if (!TID->ImplicitDefs && (TID->Flags & M_HAS_OPTIONAL_DEF) == 0)
519 return false;
521 bool Found = false;
522 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
523 const MachineOperand &MO = MI->getOperand(i);
524 if (MO.isRegister() && MO.getReg() == ARM::CPSR) {
525 Pred.push_back(MO);
526 Found = true;
530 return Found;
534 /// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
535 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
536 unsigned JTI) DISABLE_INLINE;
537 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
538 unsigned JTI) {
539 return JT[JTI].MBBs.size();
542 /// GetInstSize - Return the size of the specified MachineInstr.
544 unsigned ARM::GetInstSize(MachineInstr *MI) {
545 MachineBasicBlock &MBB = *MI->getParent();
546 const MachineFunction *MF = MBB.getParent();
547 const TargetAsmInfo *TAI = MF->getTarget().getTargetAsmInfo();
549 // Basic size info comes from the TSFlags field.
550 const TargetInstrDescriptor *TID = MI->getInstrDescriptor();
551 unsigned TSFlags = TID->TSFlags;
553 switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
554 default:
555 // If this machine instr is an inline asm, measure it.
556 if (MI->getOpcode() == ARM::INLINEASM)
557 return TAI->getInlineAsmLength(MI->getOperand(0).getSymbolName());
558 if (MI->getOpcode() == ARM::LABEL)
559 return 0;
560 assert(0 && "Unknown or unset size field for instr!");
561 break;
562 case ARMII::Size8Bytes: return 8; // Arm instruction x 2.
563 case ARMII::Size4Bytes: return 4; // Arm instruction.
564 case ARMII::Size2Bytes: return 2; // Thumb instruction.
565 case ARMII::SizeSpecial: {
566 switch (MI->getOpcode()) {
567 case ARM::CONSTPOOL_ENTRY:
568 // If this machine instr is a constant pool entry, its size is recorded as
569 // operand #2.
570 return MI->getOperand(2).getImm();
571 case ARM::BR_JTr:
572 case ARM::BR_JTm:
573 case ARM::BR_JTadd:
574 case ARM::tBR_JTr: {
575 // These are jumptable branches, i.e. a branch followed by an inlined
576 // jumptable. The size is 4 + 4 * number of entries.
577 unsigned NumOps = TID->numOperands;
578 MachineOperand JTOP =
579 MI->getOperand(NumOps - ((TID->Flags & M_PREDICABLE) ? 3 : 2));
580 unsigned JTI = JTOP.getJumpTableIndex();
581 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
582 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
583 assert(JTI < JT.size());
584 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
585 // 4 aligned. The assembler / linker may add 2 byte padding just before
586 // the JT entries. The size does not include this padding; the
587 // constant islands pass does separate bookkeeping for it.
588 // FIXME: If we know the size of the function is less than (1 << 16) *2
589 // bytes, we can use 16-bit entries instead. Then there won't be an
590 // alignment issue.
591 return getNumJTEntries(JT, JTI) * 4 +
592 (MI->getOpcode()==ARM::tBR_JTr ? 2 : 4);
594 default:
595 // Otherwise, pseudo-instruction sizes are zero.
596 return 0;
602 /// GetFunctionSize - Returns the size of the specified MachineFunction.
604 unsigned ARM::GetFunctionSize(MachineFunction &MF) {
605 unsigned FnSize = 0;
606 for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
607 MBBI != E; ++MBBI) {
608 MachineBasicBlock &MBB = *MBBI;
609 for (MachineBasicBlock::iterator I = MBB.begin(),E = MBB.end(); I != E; ++I)
610 FnSize += ARM::GetInstSize(I);
612 return FnSize;