Use BranchProbability instead of floating points in IfConverter.
[llvm/stm8.git] / lib / Target / ARM / ARMBaseRegisterInfo.cpp
blobe46082d23646571235c72d5bab1b15bb84ca004c
1 //===- ARMBaseRegisterInfo.cpp - ARM Register Information -------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the base ARM implementation of TargetRegisterInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "ARM.h"
15 #include "ARMAddressingModes.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMBaseRegisterInfo.h"
18 #include "ARMFrameLowering.h"
19 #include "ARMInstrInfo.h"
20 #include "ARMMachineFunctionInfo.h"
21 #include "ARMSubtarget.h"
22 #include "llvm/Constants.h"
23 #include "llvm/DerivedTypes.h"
24 #include "llvm/Function.h"
25 #include "llvm/LLVMContext.h"
26 #include "llvm/CodeGen/MachineConstantPool.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineLocation.h"
31 #include "llvm/CodeGen/MachineRegisterInfo.h"
32 #include "llvm/CodeGen/RegisterScavenging.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Target/TargetFrameLowering.h"
37 #include "llvm/Target/TargetMachine.h"
38 #include "llvm/Target/TargetOptions.h"
39 #include "llvm/ADT/BitVector.h"
40 #include "llvm/ADT/SmallVector.h"
41 #include "llvm/Support/CommandLine.h"
43 #define GET_REGINFO_MC_DESC
44 #define GET_REGINFO_TARGET_DESC
45 #include "ARMGenRegisterInfo.inc"
47 using namespace llvm;
49 static cl::opt<bool>
50 ForceAllBaseRegAlloc("arm-force-base-reg-alloc", cl::Hidden, cl::init(false),
51 cl::desc("Force use of virtual base registers for stack load/store"));
52 static cl::opt<bool>
53 EnableLocalStackAlloc("enable-local-stack-alloc", cl::init(true), cl::Hidden,
54 cl::desc("Enable pre-regalloc stack frame index allocation"));
55 static cl::opt<bool>
56 EnableBasePointer("arm-use-base-pointer", cl::Hidden, cl::init(true),
57 cl::desc("Enable use of a base pointer for complex stack frames"));
59 ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
60 const ARMSubtarget &sti)
61 : ARMGenRegisterInfo(), TII(tii), STI(sti),
62 FramePtr((STI.isTargetDarwin() || STI.isThumb()) ? ARM::R7 : ARM::R11),
63 BasePtr(ARM::R6) {
66 const unsigned*
67 ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
68 static const unsigned CalleeSavedRegs[] = {
69 ARM::LR, ARM::R11, ARM::R10, ARM::R9, ARM::R8,
70 ARM::R7, ARM::R6, ARM::R5, ARM::R4,
72 ARM::D15, ARM::D14, ARM::D13, ARM::D12,
73 ARM::D11, ARM::D10, ARM::D9, ARM::D8,
77 static const unsigned DarwinCalleeSavedRegs[] = {
78 // Darwin ABI deviates from ARM standard ABI. R9 is not a callee-saved
79 // register.
80 ARM::LR, ARM::R7, ARM::R6, ARM::R5, ARM::R4,
81 ARM::R11, ARM::R10, ARM::R8,
83 ARM::D15, ARM::D14, ARM::D13, ARM::D12,
84 ARM::D11, ARM::D10, ARM::D9, ARM::D8,
87 return STI.isTargetDarwin() ? DarwinCalleeSavedRegs : CalleeSavedRegs;
90 BitVector ARMBaseRegisterInfo::
91 getReservedRegs(const MachineFunction &MF) const {
92 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
94 // FIXME: avoid re-calculating this every time.
95 BitVector Reserved(getNumRegs());
96 Reserved.set(ARM::SP);
97 Reserved.set(ARM::PC);
98 Reserved.set(ARM::FPSCR);
99 if (TFI->hasFP(MF))
100 Reserved.set(FramePtr);
101 if (hasBasePointer(MF))
102 Reserved.set(BasePtr);
103 // Some targets reserve R9.
104 if (STI.isR9Reserved())
105 Reserved.set(ARM::R9);
106 // Reserve D16-D31 if the subtarget doesn't support them.
107 if (!STI.hasVFP3() || STI.hasD16()) {
108 assert(ARM::D31 == ARM::D16 + 15);
109 for (unsigned i = 0; i != 16; ++i)
110 Reserved.set(ARM::D16 + i);
112 return Reserved;
115 bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF,
116 unsigned Reg) const {
117 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
119 switch (Reg) {
120 default: break;
121 case ARM::SP:
122 case ARM::PC:
123 return true;
124 case ARM::R6:
125 if (hasBasePointer(MF))
126 return true;
127 break;
128 case ARM::R7:
129 case ARM::R11:
130 if (FramePtr == Reg && TFI->hasFP(MF))
131 return true;
132 break;
133 case ARM::R9:
134 return STI.isR9Reserved();
137 return false;
140 const TargetRegisterClass *
141 ARMBaseRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
142 const TargetRegisterClass *B,
143 unsigned SubIdx) const {
144 switch (SubIdx) {
145 default: return 0;
146 case ARM::ssub_0:
147 case ARM::ssub_1:
148 case ARM::ssub_2:
149 case ARM::ssub_3: {
150 // S sub-registers.
151 if (A->getSize() == 8) {
152 if (B == &ARM::SPR_8RegClass)
153 return &ARM::DPR_8RegClass;
154 assert(B == &ARM::SPRRegClass && "Expecting SPR register class!");
155 if (A == &ARM::DPR_8RegClass)
156 return A;
157 return &ARM::DPR_VFP2RegClass;
160 if (A->getSize() == 16) {
161 if (B == &ARM::SPR_8RegClass)
162 return &ARM::QPR_8RegClass;
163 return &ARM::QPR_VFP2RegClass;
166 if (A->getSize() == 32) {
167 if (B == &ARM::SPR_8RegClass)
168 return 0; // Do not allow coalescing!
169 return &ARM::QQPR_VFP2RegClass;
172 assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
173 return 0; // Do not allow coalescing!
175 case ARM::dsub_0:
176 case ARM::dsub_1:
177 case ARM::dsub_2:
178 case ARM::dsub_3: {
179 // D sub-registers.
180 if (A->getSize() == 16) {
181 if (B == &ARM::DPR_VFP2RegClass)
182 return &ARM::QPR_VFP2RegClass;
183 if (B == &ARM::DPR_8RegClass)
184 return 0; // Do not allow coalescing!
185 return A;
188 if (A->getSize() == 32) {
189 if (B == &ARM::DPR_VFP2RegClass)
190 return &ARM::QQPR_VFP2RegClass;
191 if (B == &ARM::DPR_8RegClass)
192 return 0; // Do not allow coalescing!
193 return A;
196 assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
197 if (B != &ARM::DPRRegClass)
198 return 0; // Do not allow coalescing!
199 return A;
201 case ARM::dsub_4:
202 case ARM::dsub_5:
203 case ARM::dsub_6:
204 case ARM::dsub_7: {
205 // D sub-registers of QQQQ registers.
206 if (A->getSize() == 64 && B == &ARM::DPRRegClass)
207 return A;
208 return 0; // Do not allow coalescing!
211 case ARM::qsub_0:
212 case ARM::qsub_1: {
213 // Q sub-registers.
214 if (A->getSize() == 32) {
215 if (B == &ARM::QPR_VFP2RegClass)
216 return &ARM::QQPR_VFP2RegClass;
217 if (B == &ARM::QPR_8RegClass)
218 return 0; // Do not allow coalescing!
219 return A;
222 assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
223 if (B == &ARM::QPRRegClass)
224 return A;
225 return 0; // Do not allow coalescing!
227 case ARM::qsub_2:
228 case ARM::qsub_3: {
229 // Q sub-registers of QQQQ registers.
230 if (A->getSize() == 64 && B == &ARM::QPRRegClass)
231 return A;
232 return 0; // Do not allow coalescing!
235 return 0;
238 bool
239 ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass *RC,
240 SmallVectorImpl<unsigned> &SubIndices,
241 unsigned &NewSubIdx) const {
243 unsigned Size = RC->getSize() * 8;
244 if (Size < 6)
245 return 0;
247 NewSubIdx = 0; // Whole register.
248 unsigned NumRegs = SubIndices.size();
249 if (NumRegs == 8) {
250 // 8 D registers -> 1 QQQQ register.
251 return (Size == 512 &&
252 SubIndices[0] == ARM::dsub_0 &&
253 SubIndices[1] == ARM::dsub_1 &&
254 SubIndices[2] == ARM::dsub_2 &&
255 SubIndices[3] == ARM::dsub_3 &&
256 SubIndices[4] == ARM::dsub_4 &&
257 SubIndices[5] == ARM::dsub_5 &&
258 SubIndices[6] == ARM::dsub_6 &&
259 SubIndices[7] == ARM::dsub_7);
260 } else if (NumRegs == 4) {
261 if (SubIndices[0] == ARM::qsub_0) {
262 // 4 Q registers -> 1 QQQQ register.
263 return (Size == 512 &&
264 SubIndices[1] == ARM::qsub_1 &&
265 SubIndices[2] == ARM::qsub_2 &&
266 SubIndices[3] == ARM::qsub_3);
267 } else if (SubIndices[0] == ARM::dsub_0) {
268 // 4 D registers -> 1 QQ register.
269 if (Size >= 256 &&
270 SubIndices[1] == ARM::dsub_1 &&
271 SubIndices[2] == ARM::dsub_2 &&
272 SubIndices[3] == ARM::dsub_3) {
273 if (Size == 512)
274 NewSubIdx = ARM::qqsub_0;
275 return true;
277 } else if (SubIndices[0] == ARM::dsub_4) {
278 // 4 D registers -> 1 QQ register (2nd).
279 if (Size == 512 &&
280 SubIndices[1] == ARM::dsub_5 &&
281 SubIndices[2] == ARM::dsub_6 &&
282 SubIndices[3] == ARM::dsub_7) {
283 NewSubIdx = ARM::qqsub_1;
284 return true;
286 } else if (SubIndices[0] == ARM::ssub_0) {
287 // 4 S registers -> 1 Q register.
288 if (Size >= 128 &&
289 SubIndices[1] == ARM::ssub_1 &&
290 SubIndices[2] == ARM::ssub_2 &&
291 SubIndices[3] == ARM::ssub_3) {
292 if (Size >= 256)
293 NewSubIdx = ARM::qsub_0;
294 return true;
297 } else if (NumRegs == 2) {
298 if (SubIndices[0] == ARM::qsub_0) {
299 // 2 Q registers -> 1 QQ register.
300 if (Size >= 256 && SubIndices[1] == ARM::qsub_1) {
301 if (Size == 512)
302 NewSubIdx = ARM::qqsub_0;
303 return true;
305 } else if (SubIndices[0] == ARM::qsub_2) {
306 // 2 Q registers -> 1 QQ register (2nd).
307 if (Size == 512 && SubIndices[1] == ARM::qsub_3) {
308 NewSubIdx = ARM::qqsub_1;
309 return true;
311 } else if (SubIndices[0] == ARM::dsub_0) {
312 // 2 D registers -> 1 Q register.
313 if (Size >= 128 && SubIndices[1] == ARM::dsub_1) {
314 if (Size >= 256)
315 NewSubIdx = ARM::qsub_0;
316 return true;
318 } else if (SubIndices[0] == ARM::dsub_2) {
319 // 2 D registers -> 1 Q register (2nd).
320 if (Size >= 256 && SubIndices[1] == ARM::dsub_3) {
321 NewSubIdx = ARM::qsub_1;
322 return true;
324 } else if (SubIndices[0] == ARM::dsub_4) {
325 // 2 D registers -> 1 Q register (3rd).
326 if (Size == 512 && SubIndices[1] == ARM::dsub_5) {
327 NewSubIdx = ARM::qsub_2;
328 return true;
330 } else if (SubIndices[0] == ARM::dsub_6) {
331 // 2 D registers -> 1 Q register (3rd).
332 if (Size == 512 && SubIndices[1] == ARM::dsub_7) {
333 NewSubIdx = ARM::qsub_3;
334 return true;
336 } else if (SubIndices[0] == ARM::ssub_0) {
337 // 2 S registers -> 1 D register.
338 if (SubIndices[1] == ARM::ssub_1) {
339 if (Size >= 128)
340 NewSubIdx = ARM::dsub_0;
341 return true;
343 } else if (SubIndices[0] == ARM::ssub_2) {
344 // 2 S registers -> 1 D register (2nd).
345 if (Size >= 128 && SubIndices[1] == ARM::ssub_3) {
346 NewSubIdx = ARM::dsub_1;
347 return true;
351 return false;
354 const TargetRegisterClass*
355 ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC)
356 const {
357 const TargetRegisterClass *Super = RC;
358 TargetRegisterClass::sc_iterator I = RC->superclasses_begin();
359 do {
360 switch (Super->getID()) {
361 case ARM::GPRRegClassID:
362 case ARM::SPRRegClassID:
363 case ARM::DPRRegClassID:
364 case ARM::QPRRegClassID:
365 case ARM::QQPRRegClassID:
366 case ARM::QQQQPRRegClassID:
367 return Super;
369 Super = *I++;
370 } while (Super);
371 return RC;
374 const TargetRegisterClass *
375 ARMBaseRegisterInfo::getPointerRegClass(unsigned Kind) const {
376 return ARM::GPRRegisterClass;
379 unsigned
380 ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
381 MachineFunction &MF) const {
382 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
384 switch (RC->getID()) {
385 default:
386 return 0;
387 case ARM::tGPRRegClassID:
388 return TFI->hasFP(MF) ? 4 : 5;
389 case ARM::GPRRegClassID: {
390 unsigned FP = TFI->hasFP(MF) ? 1 : 0;
391 return 10 - FP - (STI.isR9Reserved() ? 1 : 0);
393 case ARM::SPRRegClassID: // Currently not used as 'rep' register class.
394 case ARM::DPRRegClassID:
395 return 32 - 10;
399 /// getRawAllocationOrder - Returns the register allocation order for a
400 /// specified register class with a target-dependent hint.
401 ArrayRef<unsigned>
402 ARMBaseRegisterInfo::getRawAllocationOrder(const TargetRegisterClass *RC,
403 unsigned HintType, unsigned HintReg,
404 const MachineFunction &MF) const {
405 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
406 // Alternative register allocation orders when favoring even / odd registers
407 // of register pairs.
409 // No FP, R9 is available.
410 static const unsigned GPREven1[] = {
411 ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8, ARM::R10,
412 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7,
413 ARM::R9, ARM::R11
415 static const unsigned GPROdd1[] = {
416 ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R9, ARM::R11,
417 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
418 ARM::R8, ARM::R10
421 // FP is R7, R9 is available.
422 static const unsigned GPREven2[] = {
423 ARM::R0, ARM::R2, ARM::R4, ARM::R8, ARM::R10,
424 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6,
425 ARM::R9, ARM::R11
427 static const unsigned GPROdd2[] = {
428 ARM::R1, ARM::R3, ARM::R5, ARM::R9, ARM::R11,
429 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
430 ARM::R8, ARM::R10
433 // FP is R11, R9 is available.
434 static const unsigned GPREven3[] = {
435 ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8,
436 ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7,
437 ARM::R9
439 static const unsigned GPROdd3[] = {
440 ARM::R1, ARM::R3, ARM::R5, ARM::R6, ARM::R9,
441 ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R7,
442 ARM::R8
445 // No FP, R9 is not available.
446 static const unsigned GPREven4[] = {
447 ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R10,
448 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8,
449 ARM::R11
451 static const unsigned GPROdd4[] = {
452 ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R11,
453 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
454 ARM::R10
457 // FP is R7, R9 is not available.
458 static const unsigned GPREven5[] = {
459 ARM::R0, ARM::R2, ARM::R4, ARM::R10,
460 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6, ARM::R8,
461 ARM::R11
463 static const unsigned GPROdd5[] = {
464 ARM::R1, ARM::R3, ARM::R5, ARM::R11,
465 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
466 ARM::R10
469 // FP is R11, R9 is not available.
470 static const unsigned GPREven6[] = {
471 ARM::R0, ARM::R2, ARM::R4, ARM::R6,
472 ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8
474 static const unsigned GPROdd6[] = {
475 ARM::R1, ARM::R3, ARM::R5, ARM::R7,
476 ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8
479 // We only support even/odd hints for GPR and rGPR.
480 if (RC != ARM::GPRRegisterClass && RC != ARM::rGPRRegisterClass)
481 return RC->getRawAllocationOrder(MF);
483 if (HintType == ARMRI::RegPairEven) {
484 if (isPhysicalRegister(HintReg) && getRegisterPairEven(HintReg, MF) == 0)
485 // It's no longer possible to fulfill this hint. Return the default
486 // allocation order.
487 return RC->getRawAllocationOrder(MF);
489 if (!TFI->hasFP(MF)) {
490 if (!STI.isR9Reserved())
491 return ArrayRef<unsigned>(GPREven1);
492 else
493 return ArrayRef<unsigned>(GPREven4);
494 } else if (FramePtr == ARM::R7) {
495 if (!STI.isR9Reserved())
496 return ArrayRef<unsigned>(GPREven2);
497 else
498 return ArrayRef<unsigned>(GPREven5);
499 } else { // FramePtr == ARM::R11
500 if (!STI.isR9Reserved())
501 return ArrayRef<unsigned>(GPREven3);
502 else
503 return ArrayRef<unsigned>(GPREven6);
505 } else if (HintType == ARMRI::RegPairOdd) {
506 if (isPhysicalRegister(HintReg) && getRegisterPairOdd(HintReg, MF) == 0)
507 // It's no longer possible to fulfill this hint. Return the default
508 // allocation order.
509 return RC->getRawAllocationOrder(MF);
511 if (!TFI->hasFP(MF)) {
512 if (!STI.isR9Reserved())
513 return ArrayRef<unsigned>(GPROdd1);
514 else
515 return ArrayRef<unsigned>(GPROdd4);
516 } else if (FramePtr == ARM::R7) {
517 if (!STI.isR9Reserved())
518 return ArrayRef<unsigned>(GPROdd2);
519 else
520 return ArrayRef<unsigned>(GPROdd5);
521 } else { // FramePtr == ARM::R11
522 if (!STI.isR9Reserved())
523 return ArrayRef<unsigned>(GPROdd3);
524 else
525 return ArrayRef<unsigned>(GPROdd6);
528 return RC->getRawAllocationOrder(MF);
531 /// ResolveRegAllocHint - Resolves the specified register allocation hint
532 /// to a physical register. Returns the physical register if it is successful.
533 unsigned
534 ARMBaseRegisterInfo::ResolveRegAllocHint(unsigned Type, unsigned Reg,
535 const MachineFunction &MF) const {
536 if (Reg == 0 || !isPhysicalRegister(Reg))
537 return 0;
538 if (Type == 0)
539 return Reg;
540 else if (Type == (unsigned)ARMRI::RegPairOdd)
541 // Odd register.
542 return getRegisterPairOdd(Reg, MF);
543 else if (Type == (unsigned)ARMRI::RegPairEven)
544 // Even register.
545 return getRegisterPairEven(Reg, MF);
546 return 0;
549 void
550 ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
551 MachineFunction &MF) const {
552 MachineRegisterInfo *MRI = &MF.getRegInfo();
553 std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
554 if ((Hint.first == (unsigned)ARMRI::RegPairOdd ||
555 Hint.first == (unsigned)ARMRI::RegPairEven) &&
556 TargetRegisterInfo::isVirtualRegister(Hint.second)) {
557 // If 'Reg' is one of the even / odd register pair and it's now changed
558 // (e.g. coalesced) into a different register. The other register of the
559 // pair allocation hint must be updated to reflect the relationship
560 // change.
561 unsigned OtherReg = Hint.second;
562 Hint = MRI->getRegAllocationHint(OtherReg);
563 if (Hint.second == Reg)
564 // Make sure the pair has not already divorced.
565 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
569 bool
570 ARMBaseRegisterInfo::avoidWriteAfterWrite(const TargetRegisterClass *RC) const {
571 // CortexA9 has a Write-after-write hazard for NEON registers.
572 if (!STI.isCortexA9())
573 return false;
575 switch (RC->getID()) {
576 case ARM::DPRRegClassID:
577 case ARM::DPR_8RegClassID:
578 case ARM::DPR_VFP2RegClassID:
579 case ARM::QPRRegClassID:
580 case ARM::QPR_8RegClassID:
581 case ARM::QPR_VFP2RegClassID:
582 case ARM::SPRRegClassID:
583 case ARM::SPR_8RegClassID:
584 // Avoid reusing S, D, and Q registers.
585 // Don't increase register pressure for QQ and QQQQ.
586 return true;
587 default:
588 return false;
592 bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
593 const MachineFrameInfo *MFI = MF.getFrameInfo();
594 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
596 if (!EnableBasePointer)
597 return false;
599 if (needsStackRealignment(MF) && MFI->hasVarSizedObjects())
600 return true;
602 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited
603 // negative range for ldr/str (255), and thumb1 is positive offsets only.
604 // It's going to be better to use the SP or Base Pointer instead. When there
605 // are variable sized objects, we can't reference off of the SP, so we
606 // reserve a Base Pointer.
607 if (AFI->isThumbFunction() && MFI->hasVarSizedObjects()) {
608 // Conservatively estimate whether the negative offset from the frame
609 // pointer will be sufficient to reach. If a function has a smallish
610 // frame, it's less likely to have lots of spills and callee saved
611 // space, so it's all more likely to be within range of the frame pointer.
612 // If it's wrong, the scavenger will still enable access to work, it just
613 // won't be optimal.
614 if (AFI->isThumb2Function() && MFI->getLocalFrameSize() < 128)
615 return false;
616 return true;
619 return false;
622 bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const {
623 const MachineFrameInfo *MFI = MF.getFrameInfo();
624 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
625 // We can't realign the stack if:
626 // 1. Dynamic stack realignment is explicitly disabled,
627 // 2. This is a Thumb1 function (it's not useful, so we don't bother), or
628 // 3. There are VLAs in the function and the base pointer is disabled.
629 return (RealignStack && !AFI->isThumb1OnlyFunction() &&
630 (!MFI->hasVarSizedObjects() || EnableBasePointer));
633 bool ARMBaseRegisterInfo::
634 needsStackRealignment(const MachineFunction &MF) const {
635 const MachineFrameInfo *MFI = MF.getFrameInfo();
636 const Function *F = MF.getFunction();
637 unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
638 bool requiresRealignment = ((MFI->getLocalFrameMaxAlign() > StackAlign) ||
639 F->hasFnAttr(Attribute::StackAlignment));
641 return requiresRealignment && canRealignStack(MF);
644 bool ARMBaseRegisterInfo::
645 cannotEliminateFrame(const MachineFunction &MF) const {
646 const MachineFrameInfo *MFI = MF.getFrameInfo();
647 if (DisableFramePointerElim(MF) && MFI->adjustsStack())
648 return true;
649 return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken()
650 || needsStackRealignment(MF);
653 unsigned ARMBaseRegisterInfo::getRARegister() const {
654 return ARM::LR;
657 unsigned
658 ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
659 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
661 if (TFI->hasFP(MF))
662 return FramePtr;
663 return ARM::SP;
666 unsigned ARMBaseRegisterInfo::getEHExceptionRegister() const {
667 llvm_unreachable("What is the exception register");
668 return 0;
671 unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const {
672 llvm_unreachable("What is the exception handler register");
673 return 0;
676 int ARMBaseRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
677 return ARMGenRegisterInfo::getDwarfRegNumFull(RegNum, 0);
680 int ARMBaseRegisterInfo::getLLVMRegNum(unsigned DwarfRegNo, bool isEH) const {
681 return ARMGenRegisterInfo::getLLVMRegNumFull(DwarfRegNo,0);
684 unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg,
685 const MachineFunction &MF) const {
686 switch (Reg) {
687 default: break;
688 // Return 0 if either register of the pair is a special register.
689 // So no R12, etc.
690 case ARM::R1:
691 return ARM::R0;
692 case ARM::R3:
693 return ARM::R2;
694 case ARM::R5:
695 return ARM::R4;
696 case ARM::R7:
697 return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
698 ? 0 : ARM::R6;
699 case ARM::R9:
700 return isReservedReg(MF, ARM::R9) ? 0 :ARM::R8;
701 case ARM::R11:
702 return isReservedReg(MF, ARM::R11) ? 0 : ARM::R10;
704 case ARM::S1:
705 return ARM::S0;
706 case ARM::S3:
707 return ARM::S2;
708 case ARM::S5:
709 return ARM::S4;
710 case ARM::S7:
711 return ARM::S6;
712 case ARM::S9:
713 return ARM::S8;
714 case ARM::S11:
715 return ARM::S10;
716 case ARM::S13:
717 return ARM::S12;
718 case ARM::S15:
719 return ARM::S14;
720 case ARM::S17:
721 return ARM::S16;
722 case ARM::S19:
723 return ARM::S18;
724 case ARM::S21:
725 return ARM::S20;
726 case ARM::S23:
727 return ARM::S22;
728 case ARM::S25:
729 return ARM::S24;
730 case ARM::S27:
731 return ARM::S26;
732 case ARM::S29:
733 return ARM::S28;
734 case ARM::S31:
735 return ARM::S30;
737 case ARM::D1:
738 return ARM::D0;
739 case ARM::D3:
740 return ARM::D2;
741 case ARM::D5:
742 return ARM::D4;
743 case ARM::D7:
744 return ARM::D6;
745 case ARM::D9:
746 return ARM::D8;
747 case ARM::D11:
748 return ARM::D10;
749 case ARM::D13:
750 return ARM::D12;
751 case ARM::D15:
752 return ARM::D14;
753 case ARM::D17:
754 return ARM::D16;
755 case ARM::D19:
756 return ARM::D18;
757 case ARM::D21:
758 return ARM::D20;
759 case ARM::D23:
760 return ARM::D22;
761 case ARM::D25:
762 return ARM::D24;
763 case ARM::D27:
764 return ARM::D26;
765 case ARM::D29:
766 return ARM::D28;
767 case ARM::D31:
768 return ARM::D30;
771 return 0;
774 unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg,
775 const MachineFunction &MF) const {
776 switch (Reg) {
777 default: break;
778 // Return 0 if either register of the pair is a special register.
779 // So no R12, etc.
780 case ARM::R0:
781 return ARM::R1;
782 case ARM::R2:
783 return ARM::R3;
784 case ARM::R4:
785 return ARM::R5;
786 case ARM::R6:
787 return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
788 ? 0 : ARM::R7;
789 case ARM::R8:
790 return isReservedReg(MF, ARM::R9) ? 0 :ARM::R9;
791 case ARM::R10:
792 return isReservedReg(MF, ARM::R11) ? 0 : ARM::R11;
794 case ARM::S0:
795 return ARM::S1;
796 case ARM::S2:
797 return ARM::S3;
798 case ARM::S4:
799 return ARM::S5;
800 case ARM::S6:
801 return ARM::S7;
802 case ARM::S8:
803 return ARM::S9;
804 case ARM::S10:
805 return ARM::S11;
806 case ARM::S12:
807 return ARM::S13;
808 case ARM::S14:
809 return ARM::S15;
810 case ARM::S16:
811 return ARM::S17;
812 case ARM::S18:
813 return ARM::S19;
814 case ARM::S20:
815 return ARM::S21;
816 case ARM::S22:
817 return ARM::S23;
818 case ARM::S24:
819 return ARM::S25;
820 case ARM::S26:
821 return ARM::S27;
822 case ARM::S28:
823 return ARM::S29;
824 case ARM::S30:
825 return ARM::S31;
827 case ARM::D0:
828 return ARM::D1;
829 case ARM::D2:
830 return ARM::D3;
831 case ARM::D4:
832 return ARM::D5;
833 case ARM::D6:
834 return ARM::D7;
835 case ARM::D8:
836 return ARM::D9;
837 case ARM::D10:
838 return ARM::D11;
839 case ARM::D12:
840 return ARM::D13;
841 case ARM::D14:
842 return ARM::D15;
843 case ARM::D16:
844 return ARM::D17;
845 case ARM::D18:
846 return ARM::D19;
847 case ARM::D20:
848 return ARM::D21;
849 case ARM::D22:
850 return ARM::D23;
851 case ARM::D24:
852 return ARM::D25;
853 case ARM::D26:
854 return ARM::D27;
855 case ARM::D28:
856 return ARM::D29;
857 case ARM::D30:
858 return ARM::D31;
861 return 0;
864 /// emitLoadConstPool - Emits a load from constpool to materialize the
865 /// specified immediate.
866 void ARMBaseRegisterInfo::
867 emitLoadConstPool(MachineBasicBlock &MBB,
868 MachineBasicBlock::iterator &MBBI,
869 DebugLoc dl,
870 unsigned DestReg, unsigned SubIdx, int Val,
871 ARMCC::CondCodes Pred,
872 unsigned PredReg, unsigned MIFlags) const {
873 MachineFunction &MF = *MBB.getParent();
874 MachineConstantPool *ConstantPool = MF.getConstantPool();
875 const Constant *C =
876 ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
877 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
879 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
880 .addReg(DestReg, getDefRegState(true), SubIdx)
881 .addConstantPoolIndex(Idx)
882 .addImm(0).addImm(Pred).addReg(PredReg)
883 .setMIFlags(MIFlags);
886 bool ARMBaseRegisterInfo::
887 requiresRegisterScavenging(const MachineFunction &MF) const {
888 return true;
891 bool ARMBaseRegisterInfo::
892 requiresFrameIndexScavenging(const MachineFunction &MF) const {
893 return true;
896 bool ARMBaseRegisterInfo::
897 requiresVirtualBaseRegisters(const MachineFunction &MF) const {
898 return EnableLocalStackAlloc;
901 static void
902 emitSPUpdate(bool isARM,
903 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
904 DebugLoc dl, const ARMBaseInstrInfo &TII,
905 int NumBytes,
906 ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) {
907 if (isARM)
908 emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
909 Pred, PredReg, TII);
910 else
911 emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
912 Pred, PredReg, TII);
916 void ARMBaseRegisterInfo::
917 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
918 MachineBasicBlock::iterator I) const {
919 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
920 if (!TFI->hasReservedCallFrame(MF)) {
921 // If we have alloca, convert as follows:
922 // ADJCALLSTACKDOWN -> sub, sp, sp, amount
923 // ADJCALLSTACKUP -> add, sp, sp, amount
924 MachineInstr *Old = I;
925 DebugLoc dl = Old->getDebugLoc();
926 unsigned Amount = Old->getOperand(0).getImm();
927 if (Amount != 0) {
928 // We need to keep the stack aligned properly. To do this, we round the
929 // amount of space needed for the outgoing arguments up to the next
930 // alignment boundary.
931 unsigned Align = TFI->getStackAlignment();
932 Amount = (Amount+Align-1)/Align*Align;
934 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
935 assert(!AFI->isThumb1OnlyFunction() &&
936 "This eliminateCallFramePseudoInstr does not support Thumb1!");
937 bool isARM = !AFI->isThumbFunction();
939 // Replace the pseudo instruction with a new instruction...
940 unsigned Opc = Old->getOpcode();
941 int PIdx = Old->findFirstPredOperandIdx();
942 ARMCC::CondCodes Pred = (PIdx == -1)
943 ? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(PIdx).getImm();
944 if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
945 // Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
946 unsigned PredReg = Old->getOperand(2).getReg();
947 emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, Pred, PredReg);
948 } else {
949 // Note: PredReg is operand 3 for ADJCALLSTACKUP.
950 unsigned PredReg = Old->getOperand(3).getReg();
951 assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
952 emitSPUpdate(isARM, MBB, I, dl, TII, Amount, Pred, PredReg);
956 MBB.erase(I);
959 int64_t ARMBaseRegisterInfo::
960 getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
961 const MCInstrDesc &Desc = MI->getDesc();
962 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
963 int64_t InstrOffs = 0;;
964 int Scale = 1;
965 unsigned ImmIdx = 0;
966 switch (AddrMode) {
967 case ARMII::AddrModeT2_i8:
968 case ARMII::AddrModeT2_i12:
969 case ARMII::AddrMode_i12:
970 InstrOffs = MI->getOperand(Idx+1).getImm();
971 Scale = 1;
972 break;
973 case ARMII::AddrMode5: {
974 // VFP address mode.
975 const MachineOperand &OffOp = MI->getOperand(Idx+1);
976 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
977 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
978 InstrOffs = -InstrOffs;
979 Scale = 4;
980 break;
982 case ARMII::AddrMode2: {
983 ImmIdx = Idx+2;
984 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm());
985 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
986 InstrOffs = -InstrOffs;
987 break;
989 case ARMII::AddrMode3: {
990 ImmIdx = Idx+2;
991 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm());
992 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
993 InstrOffs = -InstrOffs;
994 break;
996 case ARMII::AddrModeT1_s: {
997 ImmIdx = Idx+1;
998 InstrOffs = MI->getOperand(ImmIdx).getImm();
999 Scale = 4;
1000 break;
1002 default:
1003 llvm_unreachable("Unsupported addressing mode!");
1004 break;
1007 return InstrOffs * Scale;
1010 /// needsFrameBaseReg - Returns true if the instruction's frame index
1011 /// reference would be better served by a base register other than FP
1012 /// or SP. Used by LocalStackFrameAllocation to determine which frame index
1013 /// references it should create new base registers for.
1014 bool ARMBaseRegisterInfo::
1015 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
1016 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) {
1017 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
1020 // It's the load/store FI references that cause issues, as it can be difficult
1021 // to materialize the offset if it won't fit in the literal field. Estimate
1022 // based on the size of the local frame and some conservative assumptions
1023 // about the rest of the stack frame (note, this is pre-regalloc, so
1024 // we don't know everything for certain yet) whether this offset is likely
1025 // to be out of range of the immediate. Return true if so.
1027 // We only generate virtual base registers for loads and stores, so
1028 // return false for everything else.
1029 unsigned Opc = MI->getOpcode();
1030 switch (Opc) {
1031 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12:
1032 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12:
1033 case ARM::t2LDRi12: case ARM::t2LDRi8:
1034 case ARM::t2STRi12: case ARM::t2STRi8:
1035 case ARM::VLDRS: case ARM::VLDRD:
1036 case ARM::VSTRS: case ARM::VSTRD:
1037 case ARM::tSTRspi: case ARM::tLDRspi:
1038 if (ForceAllBaseRegAlloc)
1039 return true;
1040 break;
1041 default:
1042 return false;
1045 // Without a virtual base register, if the function has variable sized
1046 // objects, all fixed-size local references will be via the frame pointer,
1047 // Approximate the offset and see if it's legal for the instruction.
1048 // Note that the incoming offset is based on the SP value at function entry,
1049 // so it'll be negative.
1050 MachineFunction &MF = *MI->getParent()->getParent();
1051 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
1052 MachineFrameInfo *MFI = MF.getFrameInfo();
1053 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1055 // Estimate an offset from the frame pointer.
1056 // Conservatively assume all callee-saved registers get pushed. R4-R6
1057 // will be earlier than the FP, so we ignore those.
1058 // R7, LR
1059 int64_t FPOffset = Offset - 8;
1060 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15
1061 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction())
1062 FPOffset -= 80;
1063 // Estimate an offset from the stack pointer.
1064 // The incoming offset is relating to the SP at the start of the function,
1065 // but when we access the local it'll be relative to the SP after local
1066 // allocation, so adjust our SP-relative offset by that allocation size.
1067 Offset = -Offset;
1068 Offset += MFI->getLocalFrameSize();
1069 // Assume that we'll have at least some spill slots allocated.
1070 // FIXME: This is a total SWAG number. We should run some statistics
1071 // and pick a real one.
1072 Offset += 128; // 128 bytes of spill slots
1074 // If there is a frame pointer, try using it.
1075 // The FP is only available if there is no dynamic realignment. We
1076 // don't know for sure yet whether we'll need that, so we guess based
1077 // on whether there are any local variables that would trigger it.
1078 unsigned StackAlign = TFI->getStackAlignment();
1079 if (TFI->hasFP(MF) &&
1080 !((MFI->getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) {
1081 if (isFrameOffsetLegal(MI, FPOffset))
1082 return false;
1084 // If we can reference via the stack pointer, try that.
1085 // FIXME: This (and the code that resolves the references) can be improved
1086 // to only disallow SP relative references in the live range of
1087 // the VLA(s). In practice, it's unclear how much difference that
1088 // would make, but it may be worth doing.
1089 if (!MFI->hasVarSizedObjects() && isFrameOffsetLegal(MI, Offset))
1090 return false;
1092 // The offset likely isn't legal, we want to allocate a virtual base register.
1093 return true;
1096 /// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to
1097 /// be a pointer to FrameIdx at the beginning of the basic block.
1098 void ARMBaseRegisterInfo::
1099 materializeFrameBaseRegister(MachineBasicBlock *MBB,
1100 unsigned BaseReg, int FrameIdx,
1101 int64_t Offset) const {
1102 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
1103 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri :
1104 (AFI->isThumb1OnlyFunction() ? ARM::tADDrSPi : ARM::t2ADDri);
1106 MachineBasicBlock::iterator Ins = MBB->begin();
1107 DebugLoc DL; // Defaults to "unknown"
1108 if (Ins != MBB->end())
1109 DL = Ins->getDebugLoc();
1111 const MCInstrDesc &MCID = TII.get(ADDriOpc);
1112 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1113 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this));
1115 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg)
1116 .addFrameIndex(FrameIdx).addImm(Offset);
1118 if (!AFI->isThumb1OnlyFunction())
1119 AddDefaultCC(AddDefaultPred(MIB));
1122 void
1123 ARMBaseRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
1124 unsigned BaseReg, int64_t Offset) const {
1125 MachineInstr &MI = *I;
1126 MachineBasicBlock &MBB = *MI.getParent();
1127 MachineFunction &MF = *MBB.getParent();
1128 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1129 int Off = Offset; // ARM doesn't need the general 64-bit offsets
1130 unsigned i = 0;
1132 assert(!AFI->isThumb1OnlyFunction() &&
1133 "This resolveFrameIndex does not support Thumb1!");
1135 while (!MI.getOperand(i).isFI()) {
1136 ++i;
1137 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1139 bool Done = false;
1140 if (!AFI->isThumbFunction())
1141 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII);
1142 else {
1143 assert(AFI->isThumb2Function());
1144 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII);
1146 assert (Done && "Unable to resolve frame index!");
1149 bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
1150 int64_t Offset) const {
1151 const MCInstrDesc &Desc = MI->getDesc();
1152 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1153 unsigned i = 0;
1155 while (!MI->getOperand(i).isFI()) {
1156 ++i;
1157 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
1160 // AddrMode4 and AddrMode6 cannot handle any offset.
1161 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
1162 return Offset == 0;
1164 unsigned NumBits = 0;
1165 unsigned Scale = 1;
1166 bool isSigned = true;
1167 switch (AddrMode) {
1168 case ARMII::AddrModeT2_i8:
1169 case ARMII::AddrModeT2_i12:
1170 // i8 supports only negative, and i12 supports only positive, so
1171 // based on Offset sign, consider the appropriate instruction
1172 Scale = 1;
1173 if (Offset < 0) {
1174 NumBits = 8;
1175 Offset = -Offset;
1176 } else {
1177 NumBits = 12;
1179 break;
1180 case ARMII::AddrMode5:
1181 // VFP address mode.
1182 NumBits = 8;
1183 Scale = 4;
1184 break;
1185 case ARMII::AddrMode_i12:
1186 case ARMII::AddrMode2:
1187 NumBits = 12;
1188 break;
1189 case ARMII::AddrMode3:
1190 NumBits = 8;
1191 break;
1192 case ARMII::AddrModeT1_s:
1193 NumBits = 5;
1194 Scale = 4;
1195 isSigned = false;
1196 break;
1197 default:
1198 llvm_unreachable("Unsupported addressing mode!");
1199 break;
1202 Offset += getFrameIndexInstrOffset(MI, i);
1203 // Make sure the offset is encodable for instructions that scale the
1204 // immediate.
1205 if ((Offset & (Scale-1)) != 0)
1206 return false;
1208 if (isSigned && Offset < 0)
1209 Offset = -Offset;
1211 unsigned Mask = (1 << NumBits) - 1;
1212 if ((unsigned)Offset <= Mask * Scale)
1213 return true;
1215 return false;
1218 void
1219 ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
1220 int SPAdj, RegScavenger *RS) const {
1221 unsigned i = 0;
1222 MachineInstr &MI = *II;
1223 MachineBasicBlock &MBB = *MI.getParent();
1224 MachineFunction &MF = *MBB.getParent();
1225 const ARMFrameLowering *TFI =
1226 static_cast<const ARMFrameLowering*>(MF.getTarget().getFrameLowering());
1227 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1228 assert(!AFI->isThumb1OnlyFunction() &&
1229 "This eliminateFrameIndex does not support Thumb1!");
1231 while (!MI.getOperand(i).isFI()) {
1232 ++i;
1233 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1236 int FrameIndex = MI.getOperand(i).getIndex();
1237 unsigned FrameReg;
1239 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
1241 // Special handling of dbg_value instructions.
1242 if (MI.isDebugValue()) {
1243 MI.getOperand(i). ChangeToRegister(FrameReg, false /*isDef*/);
1244 MI.getOperand(i+1).ChangeToImmediate(Offset);
1245 return;
1248 // Modify MI as necessary to handle as much of 'Offset' as possible
1249 bool Done = false;
1250 if (!AFI->isThumbFunction())
1251 Done = rewriteARMFrameIndex(MI, i, FrameReg, Offset, TII);
1252 else {
1253 assert(AFI->isThumb2Function());
1254 Done = rewriteT2FrameIndex(MI, i, FrameReg, Offset, TII);
1256 if (Done)
1257 return;
1259 // If we get here, the immediate doesn't fit into the instruction. We folded
1260 // as much as possible above, handle the rest, providing a register that is
1261 // SP+LargeImm.
1262 assert((Offset ||
1263 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
1264 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) &&
1265 "This code isn't needed if offset already handled!");
1267 unsigned ScratchReg = 0;
1268 int PIdx = MI.findFirstPredOperandIdx();
1269 ARMCC::CondCodes Pred = (PIdx == -1)
1270 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
1271 unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg();
1272 if (Offset == 0)
1273 // Must be addrmode4/6.
1274 MI.getOperand(i).ChangeToRegister(FrameReg, false, false, false);
1275 else {
1276 ScratchReg = MF.getRegInfo().createVirtualRegister(ARM::GPRRegisterClass);
1277 if (!AFI->isThumbFunction())
1278 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1279 Offset, Pred, PredReg, TII);
1280 else {
1281 assert(AFI->isThumb2Function());
1282 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1283 Offset, Pred, PredReg, TII);
1285 // Update the original instruction to use the scratch register.
1286 MI.getOperand(i).ChangeToRegister(ScratchReg, false, false, true);