1 //===- ARMBaseRegisterInfo.cpp - ARM Register Information -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the base ARM implementation of TargetRegisterInfo class.
12 //===----------------------------------------------------------------------===//
15 #include "ARMAddressingModes.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMBaseRegisterInfo.h"
18 #include "ARMFrameLowering.h"
19 #include "ARMInstrInfo.h"
20 #include "ARMMachineFunctionInfo.h"
21 #include "ARMSubtarget.h"
22 #include "llvm/Constants.h"
23 #include "llvm/DerivedTypes.h"
24 #include "llvm/Function.h"
25 #include "llvm/LLVMContext.h"
26 #include "llvm/CodeGen/MachineConstantPool.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineLocation.h"
31 #include "llvm/CodeGen/MachineRegisterInfo.h"
32 #include "llvm/CodeGen/RegisterScavenging.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Target/TargetFrameLowering.h"
37 #include "llvm/Target/TargetMachine.h"
38 #include "llvm/Target/TargetOptions.h"
39 #include "llvm/ADT/BitVector.h"
40 #include "llvm/ADT/SmallVector.h"
41 #include "llvm/Support/CommandLine.h"
43 #define GET_REGINFO_MC_DESC
44 #define GET_REGINFO_TARGET_DESC
45 #include "ARMGenRegisterInfo.inc"
50 ForceAllBaseRegAlloc("arm-force-base-reg-alloc", cl::Hidden
, cl::init(false),
51 cl::desc("Force use of virtual base registers for stack load/store"));
53 EnableLocalStackAlloc("enable-local-stack-alloc", cl::init(true), cl::Hidden
,
54 cl::desc("Enable pre-regalloc stack frame index allocation"));
56 EnableBasePointer("arm-use-base-pointer", cl::Hidden
, cl::init(true),
57 cl::desc("Enable use of a base pointer for complex stack frames"));
59 ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo
&tii
,
60 const ARMSubtarget
&sti
)
61 : ARMGenRegisterInfo(), TII(tii
), STI(sti
),
62 FramePtr((STI
.isTargetDarwin() || STI
.isThumb()) ? ARM::R7
: ARM::R11
),
67 ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction
*MF
) const {
68 static const unsigned CalleeSavedRegs
[] = {
69 ARM::LR
, ARM::R11
, ARM::R10
, ARM::R9
, ARM::R8
,
70 ARM::R7
, ARM::R6
, ARM::R5
, ARM::R4
,
72 ARM::D15
, ARM::D14
, ARM::D13
, ARM::D12
,
73 ARM::D11
, ARM::D10
, ARM::D9
, ARM::D8
,
77 static const unsigned DarwinCalleeSavedRegs
[] = {
78 // Darwin ABI deviates from ARM standard ABI. R9 is not a callee-saved
80 ARM::LR
, ARM::R7
, ARM::R6
, ARM::R5
, ARM::R4
,
81 ARM::R11
, ARM::R10
, ARM::R8
,
83 ARM::D15
, ARM::D14
, ARM::D13
, ARM::D12
,
84 ARM::D11
, ARM::D10
, ARM::D9
, ARM::D8
,
87 return STI
.isTargetDarwin() ? DarwinCalleeSavedRegs
: CalleeSavedRegs
;
90 BitVector
ARMBaseRegisterInfo::
91 getReservedRegs(const MachineFunction
&MF
) const {
92 const TargetFrameLowering
*TFI
= MF
.getTarget().getFrameLowering();
94 // FIXME: avoid re-calculating this every time.
95 BitVector
Reserved(getNumRegs());
96 Reserved
.set(ARM::SP
);
97 Reserved
.set(ARM::PC
);
98 Reserved
.set(ARM::FPSCR
);
100 Reserved
.set(FramePtr
);
101 if (hasBasePointer(MF
))
102 Reserved
.set(BasePtr
);
103 // Some targets reserve R9.
104 if (STI
.isR9Reserved())
105 Reserved
.set(ARM::R9
);
106 // Reserve D16-D31 if the subtarget doesn't support them.
107 if (!STI
.hasVFP3() || STI
.hasD16()) {
108 assert(ARM::D31
== ARM::D16
+ 15);
109 for (unsigned i
= 0; i
!= 16; ++i
)
110 Reserved
.set(ARM::D16
+ i
);
115 bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction
&MF
,
116 unsigned Reg
) const {
117 const TargetFrameLowering
*TFI
= MF
.getTarget().getFrameLowering();
125 if (hasBasePointer(MF
))
130 if (FramePtr
== Reg
&& TFI
->hasFP(MF
))
134 return STI
.isR9Reserved();
140 const TargetRegisterClass
*
141 ARMBaseRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass
*A
,
142 const TargetRegisterClass
*B
,
143 unsigned SubIdx
) const {
151 if (A
->getSize() == 8) {
152 if (B
== &ARM::SPR_8RegClass
)
153 return &ARM::DPR_8RegClass
;
154 assert(B
== &ARM::SPRRegClass
&& "Expecting SPR register class!");
155 if (A
== &ARM::DPR_8RegClass
)
157 return &ARM::DPR_VFP2RegClass
;
160 if (A
->getSize() == 16) {
161 if (B
== &ARM::SPR_8RegClass
)
162 return &ARM::QPR_8RegClass
;
163 return &ARM::QPR_VFP2RegClass
;
166 if (A
->getSize() == 32) {
167 if (B
== &ARM::SPR_8RegClass
)
168 return 0; // Do not allow coalescing!
169 return &ARM::QQPR_VFP2RegClass
;
172 assert(A
->getSize() == 64 && "Expecting a QQQQ register class!");
173 return 0; // Do not allow coalescing!
180 if (A
->getSize() == 16) {
181 if (B
== &ARM::DPR_VFP2RegClass
)
182 return &ARM::QPR_VFP2RegClass
;
183 if (B
== &ARM::DPR_8RegClass
)
184 return 0; // Do not allow coalescing!
188 if (A
->getSize() == 32) {
189 if (B
== &ARM::DPR_VFP2RegClass
)
190 return &ARM::QQPR_VFP2RegClass
;
191 if (B
== &ARM::DPR_8RegClass
)
192 return 0; // Do not allow coalescing!
196 assert(A
->getSize() == 64 && "Expecting a QQQQ register class!");
197 if (B
!= &ARM::DPRRegClass
)
198 return 0; // Do not allow coalescing!
205 // D sub-registers of QQQQ registers.
206 if (A
->getSize() == 64 && B
== &ARM::DPRRegClass
)
208 return 0; // Do not allow coalescing!
214 if (A
->getSize() == 32) {
215 if (B
== &ARM::QPR_VFP2RegClass
)
216 return &ARM::QQPR_VFP2RegClass
;
217 if (B
== &ARM::QPR_8RegClass
)
218 return 0; // Do not allow coalescing!
222 assert(A
->getSize() == 64 && "Expecting a QQQQ register class!");
223 if (B
== &ARM::QPRRegClass
)
225 return 0; // Do not allow coalescing!
229 // Q sub-registers of QQQQ registers.
230 if (A
->getSize() == 64 && B
== &ARM::QPRRegClass
)
232 return 0; // Do not allow coalescing!
239 ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass
*RC
,
240 SmallVectorImpl
<unsigned> &SubIndices
,
241 unsigned &NewSubIdx
) const {
243 unsigned Size
= RC
->getSize() * 8;
247 NewSubIdx
= 0; // Whole register.
248 unsigned NumRegs
= SubIndices
.size();
250 // 8 D registers -> 1 QQQQ register.
251 return (Size
== 512 &&
252 SubIndices
[0] == ARM::dsub_0
&&
253 SubIndices
[1] == ARM::dsub_1
&&
254 SubIndices
[2] == ARM::dsub_2
&&
255 SubIndices
[3] == ARM::dsub_3
&&
256 SubIndices
[4] == ARM::dsub_4
&&
257 SubIndices
[5] == ARM::dsub_5
&&
258 SubIndices
[6] == ARM::dsub_6
&&
259 SubIndices
[7] == ARM::dsub_7
);
260 } else if (NumRegs
== 4) {
261 if (SubIndices
[0] == ARM::qsub_0
) {
262 // 4 Q registers -> 1 QQQQ register.
263 return (Size
== 512 &&
264 SubIndices
[1] == ARM::qsub_1
&&
265 SubIndices
[2] == ARM::qsub_2
&&
266 SubIndices
[3] == ARM::qsub_3
);
267 } else if (SubIndices
[0] == ARM::dsub_0
) {
268 // 4 D registers -> 1 QQ register.
270 SubIndices
[1] == ARM::dsub_1
&&
271 SubIndices
[2] == ARM::dsub_2
&&
272 SubIndices
[3] == ARM::dsub_3
) {
274 NewSubIdx
= ARM::qqsub_0
;
277 } else if (SubIndices
[0] == ARM::dsub_4
) {
278 // 4 D registers -> 1 QQ register (2nd).
280 SubIndices
[1] == ARM::dsub_5
&&
281 SubIndices
[2] == ARM::dsub_6
&&
282 SubIndices
[3] == ARM::dsub_7
) {
283 NewSubIdx
= ARM::qqsub_1
;
286 } else if (SubIndices
[0] == ARM::ssub_0
) {
287 // 4 S registers -> 1 Q register.
289 SubIndices
[1] == ARM::ssub_1
&&
290 SubIndices
[2] == ARM::ssub_2
&&
291 SubIndices
[3] == ARM::ssub_3
) {
293 NewSubIdx
= ARM::qsub_0
;
297 } else if (NumRegs
== 2) {
298 if (SubIndices
[0] == ARM::qsub_0
) {
299 // 2 Q registers -> 1 QQ register.
300 if (Size
>= 256 && SubIndices
[1] == ARM::qsub_1
) {
302 NewSubIdx
= ARM::qqsub_0
;
305 } else if (SubIndices
[0] == ARM::qsub_2
) {
306 // 2 Q registers -> 1 QQ register (2nd).
307 if (Size
== 512 && SubIndices
[1] == ARM::qsub_3
) {
308 NewSubIdx
= ARM::qqsub_1
;
311 } else if (SubIndices
[0] == ARM::dsub_0
) {
312 // 2 D registers -> 1 Q register.
313 if (Size
>= 128 && SubIndices
[1] == ARM::dsub_1
) {
315 NewSubIdx
= ARM::qsub_0
;
318 } else if (SubIndices
[0] == ARM::dsub_2
) {
319 // 2 D registers -> 1 Q register (2nd).
320 if (Size
>= 256 && SubIndices
[1] == ARM::dsub_3
) {
321 NewSubIdx
= ARM::qsub_1
;
324 } else if (SubIndices
[0] == ARM::dsub_4
) {
325 // 2 D registers -> 1 Q register (3rd).
326 if (Size
== 512 && SubIndices
[1] == ARM::dsub_5
) {
327 NewSubIdx
= ARM::qsub_2
;
330 } else if (SubIndices
[0] == ARM::dsub_6
) {
331 // 2 D registers -> 1 Q register (3rd).
332 if (Size
== 512 && SubIndices
[1] == ARM::dsub_7
) {
333 NewSubIdx
= ARM::qsub_3
;
336 } else if (SubIndices
[0] == ARM::ssub_0
) {
337 // 2 S registers -> 1 D register.
338 if (SubIndices
[1] == ARM::ssub_1
) {
340 NewSubIdx
= ARM::dsub_0
;
343 } else if (SubIndices
[0] == ARM::ssub_2
) {
344 // 2 S registers -> 1 D register (2nd).
345 if (Size
>= 128 && SubIndices
[1] == ARM::ssub_3
) {
346 NewSubIdx
= ARM::dsub_1
;
354 const TargetRegisterClass
*
355 ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass
*RC
)
357 const TargetRegisterClass
*Super
= RC
;
358 TargetRegisterClass::sc_iterator I
= RC
->superclasses_begin();
360 switch (Super
->getID()) {
361 case ARM::GPRRegClassID
:
362 case ARM::SPRRegClassID
:
363 case ARM::DPRRegClassID
:
364 case ARM::QPRRegClassID
:
365 case ARM::QQPRRegClassID
:
366 case ARM::QQQQPRRegClassID
:
374 const TargetRegisterClass
*
375 ARMBaseRegisterInfo::getPointerRegClass(unsigned Kind
) const {
376 return ARM::GPRRegisterClass
;
380 ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass
*RC
,
381 MachineFunction
&MF
) const {
382 const TargetFrameLowering
*TFI
= MF
.getTarget().getFrameLowering();
384 switch (RC
->getID()) {
387 case ARM::tGPRRegClassID
:
388 return TFI
->hasFP(MF
) ? 4 : 5;
389 case ARM::GPRRegClassID
: {
390 unsigned FP
= TFI
->hasFP(MF
) ? 1 : 0;
391 return 10 - FP
- (STI
.isR9Reserved() ? 1 : 0);
393 case ARM::SPRRegClassID
: // Currently not used as 'rep' register class.
394 case ARM::DPRRegClassID
:
399 /// getRawAllocationOrder - Returns the register allocation order for a
400 /// specified register class with a target-dependent hint.
402 ARMBaseRegisterInfo::getRawAllocationOrder(const TargetRegisterClass
*RC
,
403 unsigned HintType
, unsigned HintReg
,
404 const MachineFunction
&MF
) const {
405 const TargetFrameLowering
*TFI
= MF
.getTarget().getFrameLowering();
406 // Alternative register allocation orders when favoring even / odd registers
407 // of register pairs.
409 // No FP, R9 is available.
410 static const unsigned GPREven1
[] = {
411 ARM::R0
, ARM::R2
, ARM::R4
, ARM::R6
, ARM::R8
, ARM::R10
,
412 ARM::R1
, ARM::R3
, ARM::R12
,ARM::LR
, ARM::R5
, ARM::R7
,
415 static const unsigned GPROdd1
[] = {
416 ARM::R1
, ARM::R3
, ARM::R5
, ARM::R7
, ARM::R9
, ARM::R11
,
417 ARM::R0
, ARM::R2
, ARM::R12
,ARM::LR
, ARM::R4
, ARM::R6
,
421 // FP is R7, R9 is available.
422 static const unsigned GPREven2
[] = {
423 ARM::R0
, ARM::R2
, ARM::R4
, ARM::R8
, ARM::R10
,
424 ARM::R1
, ARM::R3
, ARM::R12
,ARM::LR
, ARM::R5
, ARM::R6
,
427 static const unsigned GPROdd2
[] = {
428 ARM::R1
, ARM::R3
, ARM::R5
, ARM::R9
, ARM::R11
,
429 ARM::R0
, ARM::R2
, ARM::R12
,ARM::LR
, ARM::R4
, ARM::R6
,
433 // FP is R11, R9 is available.
434 static const unsigned GPREven3
[] = {
435 ARM::R0
, ARM::R2
, ARM::R4
, ARM::R6
, ARM::R8
,
436 ARM::R1
, ARM::R3
, ARM::R10
,ARM::R12
,ARM::LR
, ARM::R5
, ARM::R7
,
439 static const unsigned GPROdd3
[] = {
440 ARM::R1
, ARM::R3
, ARM::R5
, ARM::R6
, ARM::R9
,
441 ARM::R0
, ARM::R2
, ARM::R10
,ARM::R12
,ARM::LR
, ARM::R4
, ARM::R7
,
445 // No FP, R9 is not available.
446 static const unsigned GPREven4
[] = {
447 ARM::R0
, ARM::R2
, ARM::R4
, ARM::R6
, ARM::R10
,
448 ARM::R1
, ARM::R3
, ARM::R12
,ARM::LR
, ARM::R5
, ARM::R7
, ARM::R8
,
451 static const unsigned GPROdd4
[] = {
452 ARM::R1
, ARM::R3
, ARM::R5
, ARM::R7
, ARM::R11
,
453 ARM::R0
, ARM::R2
, ARM::R12
,ARM::LR
, ARM::R4
, ARM::R6
, ARM::R8
,
457 // FP is R7, R9 is not available.
458 static const unsigned GPREven5
[] = {
459 ARM::R0
, ARM::R2
, ARM::R4
, ARM::R10
,
460 ARM::R1
, ARM::R3
, ARM::R12
,ARM::LR
, ARM::R5
, ARM::R6
, ARM::R8
,
463 static const unsigned GPROdd5
[] = {
464 ARM::R1
, ARM::R3
, ARM::R5
, ARM::R11
,
465 ARM::R0
, ARM::R2
, ARM::R12
,ARM::LR
, ARM::R4
, ARM::R6
, ARM::R8
,
469 // FP is R11, R9 is not available.
470 static const unsigned GPREven6
[] = {
471 ARM::R0
, ARM::R2
, ARM::R4
, ARM::R6
,
472 ARM::R1
, ARM::R3
, ARM::R10
,ARM::R12
,ARM::LR
, ARM::R5
, ARM::R7
, ARM::R8
474 static const unsigned GPROdd6
[] = {
475 ARM::R1
, ARM::R3
, ARM::R5
, ARM::R7
,
476 ARM::R0
, ARM::R2
, ARM::R10
,ARM::R12
,ARM::LR
, ARM::R4
, ARM::R6
, ARM::R8
479 // We only support even/odd hints for GPR and rGPR.
480 if (RC
!= ARM::GPRRegisterClass
&& RC
!= ARM::rGPRRegisterClass
)
481 return RC
->getRawAllocationOrder(MF
);
483 if (HintType
== ARMRI::RegPairEven
) {
484 if (isPhysicalRegister(HintReg
) && getRegisterPairEven(HintReg
, MF
) == 0)
485 // It's no longer possible to fulfill this hint. Return the default
487 return RC
->getRawAllocationOrder(MF
);
489 if (!TFI
->hasFP(MF
)) {
490 if (!STI
.isR9Reserved())
491 return ArrayRef
<unsigned>(GPREven1
);
493 return ArrayRef
<unsigned>(GPREven4
);
494 } else if (FramePtr
== ARM::R7
) {
495 if (!STI
.isR9Reserved())
496 return ArrayRef
<unsigned>(GPREven2
);
498 return ArrayRef
<unsigned>(GPREven5
);
499 } else { // FramePtr == ARM::R11
500 if (!STI
.isR9Reserved())
501 return ArrayRef
<unsigned>(GPREven3
);
503 return ArrayRef
<unsigned>(GPREven6
);
505 } else if (HintType
== ARMRI::RegPairOdd
) {
506 if (isPhysicalRegister(HintReg
) && getRegisterPairOdd(HintReg
, MF
) == 0)
507 // It's no longer possible to fulfill this hint. Return the default
509 return RC
->getRawAllocationOrder(MF
);
511 if (!TFI
->hasFP(MF
)) {
512 if (!STI
.isR9Reserved())
513 return ArrayRef
<unsigned>(GPROdd1
);
515 return ArrayRef
<unsigned>(GPROdd4
);
516 } else if (FramePtr
== ARM::R7
) {
517 if (!STI
.isR9Reserved())
518 return ArrayRef
<unsigned>(GPROdd2
);
520 return ArrayRef
<unsigned>(GPROdd5
);
521 } else { // FramePtr == ARM::R11
522 if (!STI
.isR9Reserved())
523 return ArrayRef
<unsigned>(GPROdd3
);
525 return ArrayRef
<unsigned>(GPROdd6
);
528 return RC
->getRawAllocationOrder(MF
);
531 /// ResolveRegAllocHint - Resolves the specified register allocation hint
532 /// to a physical register. Returns the physical register if it is successful.
534 ARMBaseRegisterInfo::ResolveRegAllocHint(unsigned Type
, unsigned Reg
,
535 const MachineFunction
&MF
) const {
536 if (Reg
== 0 || !isPhysicalRegister(Reg
))
540 else if (Type
== (unsigned)ARMRI::RegPairOdd
)
542 return getRegisterPairOdd(Reg
, MF
);
543 else if (Type
== (unsigned)ARMRI::RegPairEven
)
545 return getRegisterPairEven(Reg
, MF
);
550 ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg
, unsigned NewReg
,
551 MachineFunction
&MF
) const {
552 MachineRegisterInfo
*MRI
= &MF
.getRegInfo();
553 std::pair
<unsigned, unsigned> Hint
= MRI
->getRegAllocationHint(Reg
);
554 if ((Hint
.first
== (unsigned)ARMRI::RegPairOdd
||
555 Hint
.first
== (unsigned)ARMRI::RegPairEven
) &&
556 TargetRegisterInfo::isVirtualRegister(Hint
.second
)) {
557 // If 'Reg' is one of the even / odd register pair and it's now changed
558 // (e.g. coalesced) into a different register. The other register of the
559 // pair allocation hint must be updated to reflect the relationship
561 unsigned OtherReg
= Hint
.second
;
562 Hint
= MRI
->getRegAllocationHint(OtherReg
);
563 if (Hint
.second
== Reg
)
564 // Make sure the pair has not already divorced.
565 MRI
->setRegAllocationHint(OtherReg
, Hint
.first
, NewReg
);
570 ARMBaseRegisterInfo::avoidWriteAfterWrite(const TargetRegisterClass
*RC
) const {
571 // CortexA9 has a Write-after-write hazard for NEON registers.
572 if (!STI
.isCortexA9())
575 switch (RC
->getID()) {
576 case ARM::DPRRegClassID
:
577 case ARM::DPR_8RegClassID
:
578 case ARM::DPR_VFP2RegClassID
:
579 case ARM::QPRRegClassID
:
580 case ARM::QPR_8RegClassID
:
581 case ARM::QPR_VFP2RegClassID
:
582 case ARM::SPRRegClassID
:
583 case ARM::SPR_8RegClassID
:
584 // Avoid reusing S, D, and Q registers.
585 // Don't increase register pressure for QQ and QQQQ.
592 bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction
&MF
) const {
593 const MachineFrameInfo
*MFI
= MF
.getFrameInfo();
594 const ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
596 if (!EnableBasePointer
)
599 if (needsStackRealignment(MF
) && MFI
->hasVarSizedObjects())
602 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited
603 // negative range for ldr/str (255), and thumb1 is positive offsets only.
604 // It's going to be better to use the SP or Base Pointer instead. When there
605 // are variable sized objects, we can't reference off of the SP, so we
606 // reserve a Base Pointer.
607 if (AFI
->isThumbFunction() && MFI
->hasVarSizedObjects()) {
608 // Conservatively estimate whether the negative offset from the frame
609 // pointer will be sufficient to reach. If a function has a smallish
610 // frame, it's less likely to have lots of spills and callee saved
611 // space, so it's all more likely to be within range of the frame pointer.
612 // If it's wrong, the scavenger will still enable access to work, it just
614 if (AFI
->isThumb2Function() && MFI
->getLocalFrameSize() < 128)
622 bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction
&MF
) const {
623 const MachineFrameInfo
*MFI
= MF
.getFrameInfo();
624 const ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
625 // We can't realign the stack if:
626 // 1. Dynamic stack realignment is explicitly disabled,
627 // 2. This is a Thumb1 function (it's not useful, so we don't bother), or
628 // 3. There are VLAs in the function and the base pointer is disabled.
629 return (RealignStack
&& !AFI
->isThumb1OnlyFunction() &&
630 (!MFI
->hasVarSizedObjects() || EnableBasePointer
));
633 bool ARMBaseRegisterInfo::
634 needsStackRealignment(const MachineFunction
&MF
) const {
635 const MachineFrameInfo
*MFI
= MF
.getFrameInfo();
636 const Function
*F
= MF
.getFunction();
637 unsigned StackAlign
= MF
.getTarget().getFrameLowering()->getStackAlignment();
638 bool requiresRealignment
= ((MFI
->getLocalFrameMaxAlign() > StackAlign
) ||
639 F
->hasFnAttr(Attribute::StackAlignment
));
641 return requiresRealignment
&& canRealignStack(MF
);
644 bool ARMBaseRegisterInfo::
645 cannotEliminateFrame(const MachineFunction
&MF
) const {
646 const MachineFrameInfo
*MFI
= MF
.getFrameInfo();
647 if (DisableFramePointerElim(MF
) && MFI
->adjustsStack())
649 return MFI
->hasVarSizedObjects() || MFI
->isFrameAddressTaken()
650 || needsStackRealignment(MF
);
653 unsigned ARMBaseRegisterInfo::getRARegister() const {
658 ARMBaseRegisterInfo::getFrameRegister(const MachineFunction
&MF
) const {
659 const TargetFrameLowering
*TFI
= MF
.getTarget().getFrameLowering();
666 unsigned ARMBaseRegisterInfo::getEHExceptionRegister() const {
667 llvm_unreachable("What is the exception register");
671 unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const {
672 llvm_unreachable("What is the exception handler register");
676 int ARMBaseRegisterInfo::getDwarfRegNum(unsigned RegNum
, bool isEH
) const {
677 return ARMGenRegisterInfo::getDwarfRegNumFull(RegNum
, 0);
680 int ARMBaseRegisterInfo::getLLVMRegNum(unsigned DwarfRegNo
, bool isEH
) const {
681 return ARMGenRegisterInfo::getLLVMRegNumFull(DwarfRegNo
,0);
684 unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg
,
685 const MachineFunction
&MF
) const {
688 // Return 0 if either register of the pair is a special register.
697 return (isReservedReg(MF
, ARM::R7
) || isReservedReg(MF
, ARM::R6
))
700 return isReservedReg(MF
, ARM::R9
) ? 0 :ARM::R8
;
702 return isReservedReg(MF
, ARM::R11
) ? 0 : ARM::R10
;
774 unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg
,
775 const MachineFunction
&MF
) const {
778 // Return 0 if either register of the pair is a special register.
787 return (isReservedReg(MF
, ARM::R7
) || isReservedReg(MF
, ARM::R6
))
790 return isReservedReg(MF
, ARM::R9
) ? 0 :ARM::R9
;
792 return isReservedReg(MF
, ARM::R11
) ? 0 : ARM::R11
;
864 /// emitLoadConstPool - Emits a load from constpool to materialize the
865 /// specified immediate.
866 void ARMBaseRegisterInfo::
867 emitLoadConstPool(MachineBasicBlock
&MBB
,
868 MachineBasicBlock::iterator
&MBBI
,
870 unsigned DestReg
, unsigned SubIdx
, int Val
,
871 ARMCC::CondCodes Pred
,
872 unsigned PredReg
, unsigned MIFlags
) const {
873 MachineFunction
&MF
= *MBB
.getParent();
874 MachineConstantPool
*ConstantPool
= MF
.getConstantPool();
876 ConstantInt::get(Type::getInt32Ty(MF
.getFunction()->getContext()), Val
);
877 unsigned Idx
= ConstantPool
->getConstantPoolIndex(C
, 4);
879 BuildMI(MBB
, MBBI
, dl
, TII
.get(ARM::LDRcp
))
880 .addReg(DestReg
, getDefRegState(true), SubIdx
)
881 .addConstantPoolIndex(Idx
)
882 .addImm(0).addImm(Pred
).addReg(PredReg
)
883 .setMIFlags(MIFlags
);
886 bool ARMBaseRegisterInfo::
887 requiresRegisterScavenging(const MachineFunction
&MF
) const {
891 bool ARMBaseRegisterInfo::
892 requiresFrameIndexScavenging(const MachineFunction
&MF
) const {
896 bool ARMBaseRegisterInfo::
897 requiresVirtualBaseRegisters(const MachineFunction
&MF
) const {
898 return EnableLocalStackAlloc
;
902 emitSPUpdate(bool isARM
,
903 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator
&MBBI
,
904 DebugLoc dl
, const ARMBaseInstrInfo
&TII
,
906 ARMCC::CondCodes Pred
= ARMCC::AL
, unsigned PredReg
= 0) {
908 emitARMRegPlusImmediate(MBB
, MBBI
, dl
, ARM::SP
, ARM::SP
, NumBytes
,
911 emitT2RegPlusImmediate(MBB
, MBBI
, dl
, ARM::SP
, ARM::SP
, NumBytes
,
916 void ARMBaseRegisterInfo::
917 eliminateCallFramePseudoInstr(MachineFunction
&MF
, MachineBasicBlock
&MBB
,
918 MachineBasicBlock::iterator I
) const {
919 const TargetFrameLowering
*TFI
= MF
.getTarget().getFrameLowering();
920 if (!TFI
->hasReservedCallFrame(MF
)) {
921 // If we have alloca, convert as follows:
922 // ADJCALLSTACKDOWN -> sub, sp, sp, amount
923 // ADJCALLSTACKUP -> add, sp, sp, amount
924 MachineInstr
*Old
= I
;
925 DebugLoc dl
= Old
->getDebugLoc();
926 unsigned Amount
= Old
->getOperand(0).getImm();
928 // We need to keep the stack aligned properly. To do this, we round the
929 // amount of space needed for the outgoing arguments up to the next
930 // alignment boundary.
931 unsigned Align
= TFI
->getStackAlignment();
932 Amount
= (Amount
+Align
-1)/Align
*Align
;
934 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
935 assert(!AFI
->isThumb1OnlyFunction() &&
936 "This eliminateCallFramePseudoInstr does not support Thumb1!");
937 bool isARM
= !AFI
->isThumbFunction();
939 // Replace the pseudo instruction with a new instruction...
940 unsigned Opc
= Old
->getOpcode();
941 int PIdx
= Old
->findFirstPredOperandIdx();
942 ARMCC::CondCodes Pred
= (PIdx
== -1)
943 ? ARMCC::AL
: (ARMCC::CondCodes
)Old
->getOperand(PIdx
).getImm();
944 if (Opc
== ARM::ADJCALLSTACKDOWN
|| Opc
== ARM::tADJCALLSTACKDOWN
) {
945 // Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
946 unsigned PredReg
= Old
->getOperand(2).getReg();
947 emitSPUpdate(isARM
, MBB
, I
, dl
, TII
, -Amount
, Pred
, PredReg
);
949 // Note: PredReg is operand 3 for ADJCALLSTACKUP.
950 unsigned PredReg
= Old
->getOperand(3).getReg();
951 assert(Opc
== ARM::ADJCALLSTACKUP
|| Opc
== ARM::tADJCALLSTACKUP
);
952 emitSPUpdate(isARM
, MBB
, I
, dl
, TII
, Amount
, Pred
, PredReg
);
959 int64_t ARMBaseRegisterInfo::
960 getFrameIndexInstrOffset(const MachineInstr
*MI
, int Idx
) const {
961 const MCInstrDesc
&Desc
= MI
->getDesc();
962 unsigned AddrMode
= (Desc
.TSFlags
& ARMII::AddrModeMask
);
963 int64_t InstrOffs
= 0;;
967 case ARMII::AddrModeT2_i8
:
968 case ARMII::AddrModeT2_i12
:
969 case ARMII::AddrMode_i12
:
970 InstrOffs
= MI
->getOperand(Idx
+1).getImm();
973 case ARMII::AddrMode5
: {
975 const MachineOperand
&OffOp
= MI
->getOperand(Idx
+1);
976 InstrOffs
= ARM_AM::getAM5Offset(OffOp
.getImm());
977 if (ARM_AM::getAM5Op(OffOp
.getImm()) == ARM_AM::sub
)
978 InstrOffs
= -InstrOffs
;
982 case ARMII::AddrMode2
: {
984 InstrOffs
= ARM_AM::getAM2Offset(MI
->getOperand(ImmIdx
).getImm());
985 if (ARM_AM::getAM2Op(MI
->getOperand(ImmIdx
).getImm()) == ARM_AM::sub
)
986 InstrOffs
= -InstrOffs
;
989 case ARMII::AddrMode3
: {
991 InstrOffs
= ARM_AM::getAM3Offset(MI
->getOperand(ImmIdx
).getImm());
992 if (ARM_AM::getAM3Op(MI
->getOperand(ImmIdx
).getImm()) == ARM_AM::sub
)
993 InstrOffs
= -InstrOffs
;
996 case ARMII::AddrModeT1_s
: {
998 InstrOffs
= MI
->getOperand(ImmIdx
).getImm();
1003 llvm_unreachable("Unsupported addressing mode!");
1007 return InstrOffs
* Scale
;
1010 /// needsFrameBaseReg - Returns true if the instruction's frame index
1011 /// reference would be better served by a base register other than FP
1012 /// or SP. Used by LocalStackFrameAllocation to determine which frame index
1013 /// references it should create new base registers for.
1014 bool ARMBaseRegisterInfo::
1015 needsFrameBaseReg(MachineInstr
*MI
, int64_t Offset
) const {
1016 for (unsigned i
= 0; !MI
->getOperand(i
).isFI(); ++i
) {
1017 assert(i
< MI
->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
1020 // It's the load/store FI references that cause issues, as it can be difficult
1021 // to materialize the offset if it won't fit in the literal field. Estimate
1022 // based on the size of the local frame and some conservative assumptions
1023 // about the rest of the stack frame (note, this is pre-regalloc, so
1024 // we don't know everything for certain yet) whether this offset is likely
1025 // to be out of range of the immediate. Return true if so.
1027 // We only generate virtual base registers for loads and stores, so
1028 // return false for everything else.
1029 unsigned Opc
= MI
->getOpcode();
1031 case ARM::LDRi12
: case ARM::LDRH
: case ARM::LDRBi12
:
1032 case ARM::STRi12
: case ARM::STRH
: case ARM::STRBi12
:
1033 case ARM::t2LDRi12
: case ARM::t2LDRi8
:
1034 case ARM::t2STRi12
: case ARM::t2STRi8
:
1035 case ARM::VLDRS
: case ARM::VLDRD
:
1036 case ARM::VSTRS
: case ARM::VSTRD
:
1037 case ARM::tSTRspi
: case ARM::tLDRspi
:
1038 if (ForceAllBaseRegAlloc
)
1045 // Without a virtual base register, if the function has variable sized
1046 // objects, all fixed-size local references will be via the frame pointer,
1047 // Approximate the offset and see if it's legal for the instruction.
1048 // Note that the incoming offset is based on the SP value at function entry,
1049 // so it'll be negative.
1050 MachineFunction
&MF
= *MI
->getParent()->getParent();
1051 const TargetFrameLowering
*TFI
= MF
.getTarget().getFrameLowering();
1052 MachineFrameInfo
*MFI
= MF
.getFrameInfo();
1053 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
1055 // Estimate an offset from the frame pointer.
1056 // Conservatively assume all callee-saved registers get pushed. R4-R6
1057 // will be earlier than the FP, so we ignore those.
1059 int64_t FPOffset
= Offset
- 8;
1060 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15
1061 if (!AFI
->isThumbFunction() || !AFI
->isThumb1OnlyFunction())
1063 // Estimate an offset from the stack pointer.
1064 // The incoming offset is relating to the SP at the start of the function,
1065 // but when we access the local it'll be relative to the SP after local
1066 // allocation, so adjust our SP-relative offset by that allocation size.
1068 Offset
+= MFI
->getLocalFrameSize();
1069 // Assume that we'll have at least some spill slots allocated.
1070 // FIXME: This is a total SWAG number. We should run some statistics
1071 // and pick a real one.
1072 Offset
+= 128; // 128 bytes of spill slots
1074 // If there is a frame pointer, try using it.
1075 // The FP is only available if there is no dynamic realignment. We
1076 // don't know for sure yet whether we'll need that, so we guess based
1077 // on whether there are any local variables that would trigger it.
1078 unsigned StackAlign
= TFI
->getStackAlignment();
1079 if (TFI
->hasFP(MF
) &&
1080 !((MFI
->getLocalFrameMaxAlign() > StackAlign
) && canRealignStack(MF
))) {
1081 if (isFrameOffsetLegal(MI
, FPOffset
))
1084 // If we can reference via the stack pointer, try that.
1085 // FIXME: This (and the code that resolves the references) can be improved
1086 // to only disallow SP relative references in the live range of
1087 // the VLA(s). In practice, it's unclear how much difference that
1088 // would make, but it may be worth doing.
1089 if (!MFI
->hasVarSizedObjects() && isFrameOffsetLegal(MI
, Offset
))
1092 // The offset likely isn't legal, we want to allocate a virtual base register.
1096 /// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to
1097 /// be a pointer to FrameIdx at the beginning of the basic block.
1098 void ARMBaseRegisterInfo::
1099 materializeFrameBaseRegister(MachineBasicBlock
*MBB
,
1100 unsigned BaseReg
, int FrameIdx
,
1101 int64_t Offset
) const {
1102 ARMFunctionInfo
*AFI
= MBB
->getParent()->getInfo
<ARMFunctionInfo
>();
1103 unsigned ADDriOpc
= !AFI
->isThumbFunction() ? ARM::ADDri
:
1104 (AFI
->isThumb1OnlyFunction() ? ARM::tADDrSPi
: ARM::t2ADDri
);
1106 MachineBasicBlock::iterator Ins
= MBB
->begin();
1107 DebugLoc DL
; // Defaults to "unknown"
1108 if (Ins
!= MBB
->end())
1109 DL
= Ins
->getDebugLoc();
1111 const MCInstrDesc
&MCID
= TII
.get(ADDriOpc
);
1112 MachineRegisterInfo
&MRI
= MBB
->getParent()->getRegInfo();
1113 MRI
.constrainRegClass(BaseReg
, TII
.getRegClass(MCID
, 0, this));
1115 MachineInstrBuilder MIB
= BuildMI(*MBB
, Ins
, DL
, MCID
, BaseReg
)
1116 .addFrameIndex(FrameIdx
).addImm(Offset
);
1118 if (!AFI
->isThumb1OnlyFunction())
1119 AddDefaultCC(AddDefaultPred(MIB
));
1123 ARMBaseRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I
,
1124 unsigned BaseReg
, int64_t Offset
) const {
1125 MachineInstr
&MI
= *I
;
1126 MachineBasicBlock
&MBB
= *MI
.getParent();
1127 MachineFunction
&MF
= *MBB
.getParent();
1128 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
1129 int Off
= Offset
; // ARM doesn't need the general 64-bit offsets
1132 assert(!AFI
->isThumb1OnlyFunction() &&
1133 "This resolveFrameIndex does not support Thumb1!");
1135 while (!MI
.getOperand(i
).isFI()) {
1137 assert(i
< MI
.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1140 if (!AFI
->isThumbFunction())
1141 Done
= rewriteARMFrameIndex(MI
, i
, BaseReg
, Off
, TII
);
1143 assert(AFI
->isThumb2Function());
1144 Done
= rewriteT2FrameIndex(MI
, i
, BaseReg
, Off
, TII
);
1146 assert (Done
&& "Unable to resolve frame index!");
1149 bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr
*MI
,
1150 int64_t Offset
) const {
1151 const MCInstrDesc
&Desc
= MI
->getDesc();
1152 unsigned AddrMode
= (Desc
.TSFlags
& ARMII::AddrModeMask
);
1155 while (!MI
->getOperand(i
).isFI()) {
1157 assert(i
< MI
->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
1160 // AddrMode4 and AddrMode6 cannot handle any offset.
1161 if (AddrMode
== ARMII::AddrMode4
|| AddrMode
== ARMII::AddrMode6
)
1164 unsigned NumBits
= 0;
1166 bool isSigned
= true;
1168 case ARMII::AddrModeT2_i8
:
1169 case ARMII::AddrModeT2_i12
:
1170 // i8 supports only negative, and i12 supports only positive, so
1171 // based on Offset sign, consider the appropriate instruction
1180 case ARMII::AddrMode5
:
1181 // VFP address mode.
1185 case ARMII::AddrMode_i12
:
1186 case ARMII::AddrMode2
:
1189 case ARMII::AddrMode3
:
1192 case ARMII::AddrModeT1_s
:
1198 llvm_unreachable("Unsupported addressing mode!");
1202 Offset
+= getFrameIndexInstrOffset(MI
, i
);
1203 // Make sure the offset is encodable for instructions that scale the
1205 if ((Offset
& (Scale
-1)) != 0)
1208 if (isSigned
&& Offset
< 0)
1211 unsigned Mask
= (1 << NumBits
) - 1;
1212 if ((unsigned)Offset
<= Mask
* Scale
)
1219 ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II
,
1220 int SPAdj
, RegScavenger
*RS
) const {
1222 MachineInstr
&MI
= *II
;
1223 MachineBasicBlock
&MBB
= *MI
.getParent();
1224 MachineFunction
&MF
= *MBB
.getParent();
1225 const ARMFrameLowering
*TFI
=
1226 static_cast<const ARMFrameLowering
*>(MF
.getTarget().getFrameLowering());
1227 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
1228 assert(!AFI
->isThumb1OnlyFunction() &&
1229 "This eliminateFrameIndex does not support Thumb1!");
1231 while (!MI
.getOperand(i
).isFI()) {
1233 assert(i
< MI
.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1236 int FrameIndex
= MI
.getOperand(i
).getIndex();
1239 int Offset
= TFI
->ResolveFrameIndexReference(MF
, FrameIndex
, FrameReg
, SPAdj
);
1241 // Special handling of dbg_value instructions.
1242 if (MI
.isDebugValue()) {
1243 MI
.getOperand(i
). ChangeToRegister(FrameReg
, false /*isDef*/);
1244 MI
.getOperand(i
+1).ChangeToImmediate(Offset
);
1248 // Modify MI as necessary to handle as much of 'Offset' as possible
1250 if (!AFI
->isThumbFunction())
1251 Done
= rewriteARMFrameIndex(MI
, i
, FrameReg
, Offset
, TII
);
1253 assert(AFI
->isThumb2Function());
1254 Done
= rewriteT2FrameIndex(MI
, i
, FrameReg
, Offset
, TII
);
1259 // If we get here, the immediate doesn't fit into the instruction. We folded
1260 // as much as possible above, handle the rest, providing a register that is
1263 (MI
.getDesc().TSFlags
& ARMII::AddrModeMask
) == ARMII::AddrMode4
||
1264 (MI
.getDesc().TSFlags
& ARMII::AddrModeMask
) == ARMII::AddrMode6
) &&
1265 "This code isn't needed if offset already handled!");
1267 unsigned ScratchReg
= 0;
1268 int PIdx
= MI
.findFirstPredOperandIdx();
1269 ARMCC::CondCodes Pred
= (PIdx
== -1)
1270 ? ARMCC::AL
: (ARMCC::CondCodes
)MI
.getOperand(PIdx
).getImm();
1271 unsigned PredReg
= (PIdx
== -1) ? 0 : MI
.getOperand(PIdx
+1).getReg();
1273 // Must be addrmode4/6.
1274 MI
.getOperand(i
).ChangeToRegister(FrameReg
, false, false, false);
1276 ScratchReg
= MF
.getRegInfo().createVirtualRegister(ARM::GPRRegisterClass
);
1277 if (!AFI
->isThumbFunction())
1278 emitARMRegPlusImmediate(MBB
, II
, MI
.getDebugLoc(), ScratchReg
, FrameReg
,
1279 Offset
, Pred
, PredReg
, TII
);
1281 assert(AFI
->isThumb2Function());
1282 emitT2RegPlusImmediate(MBB
, II
, MI
.getDebugLoc(), ScratchReg
, FrameReg
,
1283 Offset
, Pred
, PredReg
, TII
);
1285 // Update the original instruction to use the scratch register.
1286 MI
.getOperand(i
).ChangeToRegister(ScratchReg
, false, false, true);