1 //===-- ThumbRegisterInfo.cpp - Thumb-1 Register Information -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the Thumb-1 implementation of the TargetRegisterInfo
12 //===----------------------------------------------------------------------===//
14 #include "ThumbRegisterInfo.h"
15 #include "ARMBaseInstrInfo.h"
16 #include "ARMMachineFunctionInfo.h"
17 #include "ARMSubtarget.h"
18 #include "llvm/CodeGen/MachineConstantPool.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/RegisterScavenging.h"
24 #include "llvm/CodeGen/TargetFrameLowering.h"
25 #include "llvm/IR/Constants.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/LLVMContext.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Target/TargetMachine.h"
33 extern cl::opt
<bool> ReuseFrameIndexVals
;
38 ThumbRegisterInfo::ThumbRegisterInfo() = default;
40 const TargetRegisterClass
*
41 ThumbRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass
*RC
,
42 const MachineFunction
&MF
) const {
43 if (!MF
.getSubtarget
<ARMSubtarget
>().isThumb1Only())
44 return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC
, MF
);
46 if (ARM::tGPRRegClass
.hasSubClassEq(RC
))
47 return &ARM::tGPRRegClass
;
48 return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC
, MF
);
51 const TargetRegisterClass
*
52 ThumbRegisterInfo::getPointerRegClass(const MachineFunction
&MF
,
53 unsigned Kind
) const {
54 if (!MF
.getSubtarget
<ARMSubtarget
>().isThumb1Only())
55 return ARMBaseRegisterInfo::getPointerRegClass(MF
, Kind
);
56 return &ARM::tGPRRegClass
;
59 static void emitThumb1LoadConstPool(MachineBasicBlock
&MBB
,
60 MachineBasicBlock::iterator
&MBBI
,
61 const DebugLoc
&dl
, unsigned DestReg
,
62 unsigned SubIdx
, int Val
,
63 ARMCC::CondCodes Pred
, unsigned PredReg
,
65 MachineFunction
&MF
= *MBB
.getParent();
66 const ARMSubtarget
&STI
= MF
.getSubtarget
<ARMSubtarget
>();
67 const TargetInstrInfo
&TII
= *STI
.getInstrInfo();
68 MachineConstantPool
*ConstantPool
= MF
.getConstantPool();
69 const Constant
*C
= ConstantInt::get(
70 Type::getInt32Ty(MBB
.getParent()->getFunction().getContext()), Val
);
71 unsigned Idx
= ConstantPool
->getConstantPoolIndex(C
, Align(4));
73 BuildMI(MBB
, MBBI
, dl
, TII
.get(ARM::tLDRpci
))
74 .addReg(DestReg
, getDefRegState(true), SubIdx
)
75 .addConstantPoolIndex(Idx
).addImm(Pred
).addReg(PredReg
)
79 static void emitThumb2LoadConstPool(MachineBasicBlock
&MBB
,
80 MachineBasicBlock::iterator
&MBBI
,
81 const DebugLoc
&dl
, unsigned DestReg
,
82 unsigned SubIdx
, int Val
,
83 ARMCC::CondCodes Pred
, unsigned PredReg
,
85 MachineFunction
&MF
= *MBB
.getParent();
86 const TargetInstrInfo
&TII
= *MF
.getSubtarget().getInstrInfo();
87 MachineConstantPool
*ConstantPool
= MF
.getConstantPool();
88 const Constant
*C
= ConstantInt::get(
89 Type::getInt32Ty(MBB
.getParent()->getFunction().getContext()), Val
);
90 unsigned Idx
= ConstantPool
->getConstantPoolIndex(C
, Align(4));
92 BuildMI(MBB
, MBBI
, dl
, TII
.get(ARM::t2LDRpci
))
93 .addReg(DestReg
, getDefRegState(true), SubIdx
)
94 .addConstantPoolIndex(Idx
)
95 .add(predOps(ARMCC::AL
))
99 /// emitLoadConstPool - Emits a load from constpool to materialize the
100 /// specified immediate.
101 void ThumbRegisterInfo::emitLoadConstPool(
102 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator
&MBBI
,
103 const DebugLoc
&dl
, Register DestReg
, unsigned SubIdx
, int Val
,
104 ARMCC::CondCodes Pred
, Register PredReg
, unsigned MIFlags
) const {
105 MachineFunction
&MF
= *MBB
.getParent();
106 const ARMSubtarget
&STI
= MF
.getSubtarget
<ARMSubtarget
>();
107 if (STI
.isThumb1Only()) {
108 assert((isARMLowRegister(DestReg
) || DestReg
.isVirtual()) &&
109 "Thumb1 does not have ldr to high register");
110 return emitThumb1LoadConstPool(MBB
, MBBI
, dl
, DestReg
, SubIdx
, Val
, Pred
,
113 return emitThumb2LoadConstPool(MBB
, MBBI
, dl
, DestReg
, SubIdx
, Val
, Pred
,
117 /// emitThumbRegPlusImmInReg - Emits a series of instructions to materialize a
118 /// destreg = basereg + immediate in Thumb code. Materialize the immediate in a
119 /// register using mov / mvn (armv6-M >) sequences, movs / lsls / adds / lsls /
120 /// adds / lsls / adds sequences (armv6-M) or load the immediate from a
122 static void emitThumbRegPlusImmInReg(
123 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator
&MBBI
,
124 const DebugLoc
&dl
, Register DestReg
, Register BaseReg
, int NumBytes
,
125 bool CanChangeCC
, const TargetInstrInfo
&TII
,
126 const ARMBaseRegisterInfo
&MRI
, unsigned MIFlags
= MachineInstr::NoFlags
) {
127 MachineFunction
&MF
= *MBB
.getParent();
128 const ARMSubtarget
&ST
= MF
.getSubtarget
<ARMSubtarget
>();
130 // Use a single sp-relative add if the immediate is small enough.
131 if (BaseReg
== ARM::SP
&&
132 (DestReg
.isVirtual() || isARMLowRegister(DestReg
)) && NumBytes
>= 0 &&
133 NumBytes
<= 1020 && (NumBytes
% 4) == 0) {
134 BuildMI(MBB
, MBBI
, dl
, TII
.get(ARM::tADDrSPi
), DestReg
)
136 .addImm(NumBytes
/ 4)
137 .add(predOps(ARMCC::AL
))
138 .setMIFlags(MIFlags
);
142 bool isHigh
= !isARMLowRegister(DestReg
) ||
143 (BaseReg
!= 0 && !isARMLowRegister(BaseReg
));
145 // Subtract doesn't have high register version. Load the negative value
146 // if either base or dest register is a high register. Also, if do not
147 // issue sub as part of the sequence if condition register is to be
149 if (NumBytes
< 0 && !isHigh
&& CanChangeCC
) {
151 NumBytes
= -NumBytes
;
153 Register LdReg
= DestReg
;
154 if (DestReg
== ARM::SP
)
155 assert(BaseReg
== ARM::SP
&& "Unexpected!");
156 if (!isARMLowRegister(DestReg
) && !DestReg
.isVirtual())
157 LdReg
= MF
.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass
);
159 if (NumBytes
<= 255 && NumBytes
>= 0 && CanChangeCC
) {
160 BuildMI(MBB
, MBBI
, dl
, TII
.get(ARM::tMOVi8
), LdReg
)
163 .setMIFlags(MIFlags
);
164 } else if (NumBytes
< 0 && NumBytes
>= -255 && CanChangeCC
) {
165 BuildMI(MBB
, MBBI
, dl
, TII
.get(ARM::tMOVi8
), LdReg
)
168 .setMIFlags(MIFlags
);
169 BuildMI(MBB
, MBBI
, dl
, TII
.get(ARM::tRSB
), LdReg
)
171 .addReg(LdReg
, RegState::Kill
)
172 .setMIFlags(MIFlags
);
173 } else if (ST
.genExecuteOnly()) {
175 BuildMI(MBB
, MBBI
, dl
, TII
.get(ARM::t2MOVi32imm
), LdReg
)
177 .setMIFlags(MIFlags
);
178 } else if (!CanChangeCC
) {
179 // tMOVi32imm is lowered to a sequence of flag-setting instructions, so
180 // if CPSR is live we need to save and restore CPSR around it.
181 // TODO Try inserting the tMOVi32imm at an earlier point, where CPSR is
183 bool LiveCpsr
= false, CpsrWrite
= false;
184 auto isCpsr
= [](auto &MO
) { return MO
.getReg() == ARM::CPSR
; };
185 for (auto Iter
= MBBI
; Iter
!= MBB
.instr_end(); ++Iter
) {
186 // If CPSR is used after this instruction (and there's not a def before
187 // that) then CPSR is live.
188 if (any_of(Iter
->all_uses(), isCpsr
)) {
192 if (any_of(Iter
->all_defs(), isCpsr
)) {
197 // If there's no use or def of CPSR then it may be live if it's a
199 auto liveOutIsCpsr
= [](auto &Out
) { return Out
.PhysReg
== ARM::CPSR
; };
200 if (!LiveCpsr
&& !CpsrWrite
)
201 LiveCpsr
= any_of(MBB
.liveouts(), liveOutIsCpsr
);
203 Register CPSRSaveReg
;
204 unsigned APSREncoding
;
206 CPSRSaveReg
= MF
.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass
);
208 ARMSysReg::lookupMClassSysRegByName("apsr_nzcvq")->Encoding
;
209 BuildMI(MBB
, MBBI
, dl
, TII
.get(ARM::t2MRS_M
), CPSRSaveReg
)
210 .addImm(APSREncoding
)
211 .add(predOps(ARMCC::AL
))
212 .addReg(ARM::CPSR
, RegState::Implicit
);
214 BuildMI(MBB
, MBBI
, dl
, TII
.get(ARM::tMOVi32imm
), LdReg
)
216 .setMIFlags(MIFlags
);
218 BuildMI(MBB
, MBBI
, dl
, TII
.get(ARM::t2MSR_M
))
219 .addImm(APSREncoding
)
220 .addReg(CPSRSaveReg
, RegState::Kill
)
221 .add(predOps(ARMCC::AL
));
224 BuildMI(MBB
, MBBI
, dl
, TII
.get(ARM::tMOVi32imm
), LdReg
)
226 .setMIFlags(MIFlags
);
229 MRI
.emitLoadConstPool(MBB
, MBBI
, dl
, LdReg
, 0, NumBytes
, ARMCC::AL
, 0,
233 int Opc
= (isSub
) ? ARM::tSUBrr
234 : ((isHigh
|| !CanChangeCC
) ? ARM::tADDhirr
: ARM::tADDrr
);
235 MachineInstrBuilder MIB
= BuildMI(MBB
, MBBI
, dl
, TII
.get(Opc
), DestReg
);
236 if (Opc
!= ARM::tADDhirr
)
237 MIB
= MIB
.add(t1CondCodeOp());
238 if (DestReg
== ARM::SP
|| isSub
)
239 MIB
.addReg(BaseReg
).addReg(LdReg
, RegState::Kill
);
241 MIB
.addReg(LdReg
).addReg(BaseReg
, RegState::Kill
);
242 MIB
.add(predOps(ARMCC::AL
));
245 /// emitThumbRegPlusImmediate - Emits a series of instructions to materialize
246 /// a destreg = basereg + immediate in Thumb code. Tries a series of ADDs or
247 /// SUBs first, and uses a constant pool value if the instruction sequence would
248 /// be too long. This is allowed to modify the condition flags.
249 void llvm::emitThumbRegPlusImmediate(MachineBasicBlock
&MBB
,
250 MachineBasicBlock::iterator
&MBBI
,
251 const DebugLoc
&dl
, Register DestReg
,
252 Register BaseReg
, int NumBytes
,
253 const TargetInstrInfo
&TII
,
254 const ARMBaseRegisterInfo
&MRI
,
256 bool isSub
= NumBytes
< 0;
257 unsigned Bytes
= (unsigned)NumBytes
;
258 if (isSub
) Bytes
= -NumBytes
;
261 unsigned CopyBits
= 0;
262 unsigned CopyScale
= 1;
263 bool CopyNeedsCC
= false;
265 unsigned ExtraBits
= 0;
266 unsigned ExtraScale
= 1;
267 bool ExtraNeedsCC
= false;
270 // We need to select two types of instruction, maximizing the available
271 // immediate range of each. The instructions we use will depend on whether
272 // DestReg and BaseReg are low, high or the stack pointer.
273 // * CopyOpc - DestReg = BaseReg + imm
274 // This will be emitted once if DestReg != BaseReg, and never if
275 // DestReg == BaseReg.
276 // * ExtraOpc - DestReg = DestReg + imm
277 // This will be emitted as many times as necessary to add the
279 // If the immediate ranges of these instructions are not large enough to cover
280 // NumBytes with a reasonable number of instructions, we fall back to using a
281 // value loaded from a constant pool.
282 if (DestReg
== ARM::SP
) {
283 if (BaseReg
== ARM::SP
) {
285 // Already in right reg, no copy needed
287 // low -> sp or high -> sp
288 CopyOpc
= ARM::tMOVr
;
291 ExtraOpc
= isSub
? ARM::tSUBspi
: ARM::tADDspi
;
294 } else if (isARMLowRegister(DestReg
)) {
295 if (BaseReg
== ARM::SP
) {
297 assert(!isSub
&& "Thumb1 does not have tSUBrSPi");
298 CopyOpc
= ARM::tADDrSPi
;
301 } else if (DestReg
== BaseReg
) {
303 // Already in right reg, no copy needed
304 } else if (isARMLowRegister(BaseReg
)) {
305 // low -> different low
306 CopyOpc
= isSub
? ARM::tSUBi3
: ARM::tADDi3
;
311 CopyOpc
= ARM::tMOVr
;
314 ExtraOpc
= isSub
? ARM::tSUBi8
: ARM::tADDi8
;
317 } else /* DestReg is high */ {
318 if (DestReg
== BaseReg
) {
320 // Already in right reg, no copy needed
322 // {low,high,sp} -> high
323 CopyOpc
= ARM::tMOVr
;
329 // We could handle an unaligned immediate with an unaligned copy instruction
330 // and an aligned extra instruction, but this case is not currently needed.
331 assert(((Bytes
& 3) == 0 || ExtraScale
== 1) &&
332 "Unaligned offset, but all instructions require alignment");
334 unsigned CopyRange
= ((1 << CopyBits
) - 1) * CopyScale
;
335 // If we would emit the copy with an immediate of 0, just use tMOVr.
336 if (CopyOpc
&& Bytes
< CopyScale
) {
337 CopyOpc
= ARM::tMOVr
;
342 unsigned ExtraRange
= ((1 << ExtraBits
) - 1) * ExtraScale
; // per instruction
343 unsigned RequiredCopyInstrs
= CopyOpc
? 1 : 0;
344 unsigned RangeAfterCopy
= (CopyRange
> Bytes
) ? 0 : (Bytes
- CopyRange
);
346 // We could handle this case when the copy instruction does not require an
347 // aligned immediate, but we do not currently do this.
348 assert(RangeAfterCopy
% ExtraScale
== 0 &&
349 "Extra instruction requires immediate to be aligned");
351 unsigned RequiredExtraInstrs
;
353 RequiredExtraInstrs
= alignTo(RangeAfterCopy
, ExtraRange
) / ExtraRange
;
354 else if (RangeAfterCopy
> 0)
355 // We need an extra instruction but none is available
356 RequiredExtraInstrs
= 1000000;
358 RequiredExtraInstrs
= 0;
359 unsigned RequiredInstrs
= RequiredCopyInstrs
+ RequiredExtraInstrs
;
360 unsigned Threshold
= (DestReg
== ARM::SP
) ? 3 : 2;
362 // Use a constant pool, if the sequence of ADDs/SUBs is too expensive.
363 if (RequiredInstrs
> Threshold
) {
364 emitThumbRegPlusImmInReg(MBB
, MBBI
, dl
,
365 DestReg
, BaseReg
, NumBytes
, true,
370 // Emit zero or one copy instructions
372 unsigned CopyImm
= std::min(Bytes
, CopyRange
) / CopyScale
;
373 Bytes
-= CopyImm
* CopyScale
;
375 MachineInstrBuilder MIB
= BuildMI(MBB
, MBBI
, dl
, TII
.get(CopyOpc
), DestReg
);
377 MIB
= MIB
.add(t1CondCodeOp());
378 MIB
.addReg(BaseReg
, RegState::Kill
);
379 if (CopyOpc
!= ARM::tMOVr
) {
382 MIB
.setMIFlags(MIFlags
).add(predOps(ARMCC::AL
));
387 // Emit zero or more in-place add/sub instructions
389 unsigned ExtraImm
= std::min(Bytes
, ExtraRange
) / ExtraScale
;
390 Bytes
-= ExtraImm
* ExtraScale
;
392 MachineInstrBuilder MIB
= BuildMI(MBB
, MBBI
, dl
, TII
.get(ExtraOpc
), DestReg
);
394 MIB
= MIB
.add(t1CondCodeOp());
397 .add(predOps(ARMCC::AL
))
398 .setMIFlags(MIFlags
);
402 static void removeOperands(MachineInstr
&MI
, unsigned i
) {
404 for (unsigned e
= MI
.getNumOperands(); i
!= e
; ++i
)
405 MI
.removeOperand(Op
);
408 /// convertToNonSPOpcode - Change the opcode to the non-SP version, because
409 /// we're replacing the frame index with a non-SP register.
410 static unsigned convertToNonSPOpcode(unsigned Opcode
) {
422 bool ThumbRegisterInfo::rewriteFrameIndex(MachineBasicBlock::iterator II
,
423 unsigned FrameRegIdx
,
424 Register FrameReg
, int &Offset
,
425 const ARMBaseInstrInfo
&TII
) const {
426 MachineInstr
&MI
= *II
;
427 MachineBasicBlock
&MBB
= *MI
.getParent();
428 MachineFunction
&MF
= *MBB
.getParent();
429 assert(MBB
.getParent()->getSubtarget
<ARMSubtarget
>().isThumb1Only() &&
430 "This isn't needed for thumb2!");
431 DebugLoc dl
= MI
.getDebugLoc();
432 MachineInstrBuilder
MIB(*MBB
.getParent(), &MI
);
433 unsigned Opcode
= MI
.getOpcode();
434 const MCInstrDesc
&Desc
= MI
.getDesc();
435 unsigned AddrMode
= (Desc
.TSFlags
& ARMII::AddrModeMask
);
437 if (Opcode
== ARM::tADDframe
) {
438 Offset
+= MI
.getOperand(FrameRegIdx
+1).getImm();
439 Register DestReg
= MI
.getOperand(0).getReg();
441 emitThumbRegPlusImmediate(MBB
, II
, dl
, DestReg
, FrameReg
, Offset
, TII
,
446 if (AddrMode
!= ARMII::AddrModeT1_s
)
447 llvm_unreachable("Unsupported addressing mode!");
449 unsigned ImmIdx
= FrameRegIdx
+ 1;
450 int InstrOffs
= MI
.getOperand(ImmIdx
).getImm();
451 unsigned NumBits
= (FrameReg
== ARM::SP
) ? 8 : 5;
454 Offset
+= InstrOffs
* Scale
;
455 assert((Offset
& (Scale
- 1)) == 0 && "Can't encode this offset!");
457 // Common case: small offset, fits into instruction.
458 MachineOperand
&ImmOp
= MI
.getOperand(ImmIdx
);
459 int ImmedOffset
= Offset
/ Scale
;
460 unsigned Mask
= (1 << NumBits
) - 1;
462 if ((unsigned)Offset
<= Mask
* Scale
) {
463 // Replace the FrameIndex with the frame register (e.g., sp).
464 Register DestReg
= FrameReg
;
466 // In case FrameReg is a high register, move it to a low reg to ensure it
467 // can be used as an operand.
468 if (ARM::hGPRRegClass
.contains(FrameReg
) && FrameReg
!= ARM::SP
) {
469 DestReg
= MF
.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass
);
470 BuildMI(MBB
, II
, dl
, TII
.get(ARM::tMOVr
), DestReg
)
472 .add(predOps(ARMCC::AL
));
475 MI
.getOperand(FrameRegIdx
).ChangeToRegister(DestReg
, false);
476 ImmOp
.ChangeToImmediate(ImmedOffset
);
478 // If we're using a register where sp was stored, convert the instruction
479 // to the non-SP version.
480 unsigned NewOpc
= convertToNonSPOpcode(Opcode
);
481 if (NewOpc
!= Opcode
&& FrameReg
!= ARM::SP
)
482 MI
.setDesc(TII
.get(NewOpc
));
487 // The offset doesn't fit, but we may be able to put some of the offset into
488 // the ldr to simplify the generation of the rest of it.
490 Mask
= (1 << NumBits
) - 1;
492 auto &ST
= MF
.getSubtarget
<ARMSubtarget
>();
493 // If using the maximum ldr offset will put the rest into the range of a
494 // single sp-relative add then do so.
495 if (FrameReg
== ARM::SP
&& Offset
- (Mask
* Scale
) <= 1020) {
497 } else if (ST
.genExecuteOnly()) {
498 // With execute-only the offset is generated either with movw+movt or an
499 // add+lsl sequence. If subtracting an offset will make the top half zero
500 // then that saves a movt or lsl+add. Otherwise if we don't have movw then
501 // we may be able to subtract a value such that it makes the bottom byte
502 // zero, saving an add.
503 unsigned BottomBits
= (Offset
/ Scale
) & Mask
;
504 bool CanMakeBottomByteZero
= ((Offset
- BottomBits
* Scale
) & 0xff) == 0;
505 bool TopHalfZero
= (Offset
& 0xffff0000) == 0;
506 bool CanMakeTopHalfZero
= ((Offset
- Mask
* Scale
) & 0xffff0000) == 0;
507 if (!TopHalfZero
&& CanMakeTopHalfZero
)
509 else if (!ST
.useMovt() && CanMakeBottomByteZero
)
510 InstrOffs
= BottomBits
;
512 ImmOp
.ChangeToImmediate(InstrOffs
);
513 Offset
-= InstrOffs
* Scale
;
519 void ThumbRegisterInfo::resolveFrameIndex(MachineInstr
&MI
, Register BaseReg
,
520 int64_t Offset
) const {
521 const MachineFunction
&MF
= *MI
.getParent()->getParent();
522 const ARMSubtarget
&STI
= MF
.getSubtarget
<ARMSubtarget
>();
523 if (!STI
.isThumb1Only())
524 return ARMBaseRegisterInfo::resolveFrameIndex(MI
, BaseReg
, Offset
);
526 const ARMBaseInstrInfo
&TII
= *STI
.getInstrInfo();
527 int Off
= Offset
; // ARM doesn't need the general 64-bit offsets
530 while (!MI
.getOperand(i
).isFI()) {
532 assert(i
< MI
.getNumOperands() && "Instr doesn't have FrameIndex operand!");
534 bool Done
= rewriteFrameIndex(MI
, i
, BaseReg
, Off
, TII
);
535 assert (Done
&& "Unable to resolve frame index!");
539 bool ThumbRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II
,
540 int SPAdj
, unsigned FIOperandNum
,
541 RegScavenger
*RS
) const {
542 MachineInstr
&MI
= *II
;
543 MachineBasicBlock
&MBB
= *MI
.getParent();
544 MachineFunction
&MF
= *MBB
.getParent();
545 const ARMSubtarget
&STI
= MF
.getSubtarget
<ARMSubtarget
>();
546 if (!STI
.isThumb1Only())
547 return ARMBaseRegisterInfo::eliminateFrameIndex(II
, SPAdj
, FIOperandNum
,
551 const ARMBaseInstrInfo
&TII
= *STI
.getInstrInfo();
552 DebugLoc dl
= MI
.getDebugLoc();
553 MachineInstrBuilder
MIB(*MBB
.getParent(), &MI
);
556 int FrameIndex
= MI
.getOperand(FIOperandNum
).getIndex();
557 const ARMFrameLowering
*TFI
= getFrameLowering(MF
);
558 int Offset
= TFI
->ResolveFrameIndexReference(MF
, FrameIndex
, FrameReg
, SPAdj
);
560 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
561 // call frame setup/destroy instructions have already been eliminated. That
562 // means the stack pointer cannot be used to access the emergency spill slot
563 // when !hasReservedCallFrame().
565 if (RS
&& FrameReg
== ARM::SP
&& RS
->isScavengingFrameIndex(FrameIndex
)){
566 assert(STI
.getFrameLowering()->hasReservedCallFrame(MF
) &&
567 "Cannot use SP to access the emergency spill slot in "
568 "functions without a reserved call frame");
569 assert(!MF
.getFrameInfo().hasVarSizedObjects() &&
570 "Cannot use SP to access the emergency spill slot in "
571 "functions with variable sized frame objects");
575 // Special handling of dbg_value instructions.
576 if (MI
.isDebugValue()) {
577 MI
.getOperand(FIOperandNum
). ChangeToRegister(FrameReg
, false /*isDef*/);
578 MI
.getOperand(FIOperandNum
+1).ChangeToImmediate(Offset
);
582 // Modify MI as necessary to handle as much of 'Offset' as possible
583 assert(MF
.getInfo
<ARMFunctionInfo
>()->isThumbFunction() &&
584 "This eliminateFrameIndex only supports Thumb1!");
585 if (rewriteFrameIndex(MI
, FIOperandNum
, FrameReg
, Offset
, TII
))
588 // If we get here, the immediate doesn't fit into the instruction. We folded
589 // as much as possible above, handle the rest, providing a register that is
591 assert(Offset
&& "This code isn't needed if offset already handled!");
593 unsigned Opcode
= MI
.getOpcode();
595 // Remove predicate first.
596 int PIdx
= MI
.findFirstPredOperandIdx();
598 removeOperands(MI
, PIdx
);
601 // Use the destination register to materialize sp + offset.
602 Register TmpReg
= MI
.getOperand(0).getReg();
604 if (Opcode
== ARM::tLDRspi
) {
605 if (FrameReg
== ARM::SP
|| STI
.genExecuteOnly())
606 emitThumbRegPlusImmInReg(MBB
, II
, dl
, TmpReg
, FrameReg
,
607 Offset
, false, TII
, *this);
609 emitLoadConstPool(MBB
, II
, dl
, TmpReg
, 0, Offset
);
610 if (!ARM::hGPRRegClass
.contains(FrameReg
)) {
613 // If FrameReg is a high register, add the reg values in a separate
614 // instruction as the load won't be able to access it.
615 BuildMI(MBB
, II
, dl
, TII
.get(ARM::tADDhirr
), TmpReg
)
618 .add(predOps(ARMCC::AL
));
622 emitThumbRegPlusImmediate(MBB
, II
, dl
, TmpReg
, FrameReg
, Offset
, TII
,
626 MI
.setDesc(TII
.get(UseRR
? ARM::tLDRr
: ARM::tLDRi
));
627 MI
.getOperand(FIOperandNum
).ChangeToRegister(TmpReg
, false, false, true);
629 assert(!ARM::hGPRRegClass
.contains(FrameReg
) &&
630 "Thumb1 loads can't use high register");
631 // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
632 // register. The offset is already handled in the vreg value.
633 MI
.getOperand(FIOperandNum
+1).ChangeToRegister(FrameReg
, false, false,
636 } else if (MI
.mayStore()) {
637 VReg
= MF
.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass
);
640 if (Opcode
== ARM::tSTRspi
) {
641 if (FrameReg
== ARM::SP
|| STI
.genExecuteOnly())
642 emitThumbRegPlusImmInReg(MBB
, II
, dl
, VReg
, FrameReg
,
643 Offset
, false, TII
, *this);
645 emitLoadConstPool(MBB
, II
, dl
, VReg
, 0, Offset
);
646 if (!ARM::hGPRRegClass
.contains(FrameReg
)) {
649 // If FrameReg is a high register, add the reg values in a separate
650 // instruction as the load won't be able to access it.
651 BuildMI(MBB
, II
, dl
, TII
.get(ARM::tADDhirr
), VReg
)
654 .add(predOps(ARMCC::AL
));
658 emitThumbRegPlusImmediate(MBB
, II
, dl
, VReg
, FrameReg
, Offset
, TII
,
660 MI
.setDesc(TII
.get(UseRR
? ARM::tSTRr
: ARM::tSTRi
));
661 MI
.getOperand(FIOperandNum
).ChangeToRegister(VReg
, false, false, true);
663 assert(!ARM::hGPRRegClass
.contains(FrameReg
) &&
664 "Thumb1 stores can't use high register");
665 // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
666 // register. The offset is already handled in the vreg value.
667 MI
.getOperand(FIOperandNum
+1).ChangeToRegister(FrameReg
, false, false,
671 llvm_unreachable("Unexpected opcode!");
674 // Add predicate back if it's needed.
675 if (MI
.isPredicable())
676 MIB
.add(predOps(ARMCC::AL
));
681 ThumbRegisterInfo::useFPForScavengingIndex(const MachineFunction
&MF
) const {
682 if (MF
.getSubtarget
<ARMSubtarget
>().isThumb1Only()) {
683 // For Thumb1, the emergency spill slot must be some small positive
684 // offset from the base/stack pointer.
687 // For Thumb2, put the emergency spill slot next to FP.