1 //===-- ThumbRegisterInfo.cpp - Thumb-1 Register Information -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the Thumb-1 implementation of the TargetRegisterInfo
12 //===----------------------------------------------------------------------===//
14 #include "ThumbRegisterInfo.h"
15 #include "ARMBaseInstrInfo.h"
16 #include "ARMMachineFunctionInfo.h"
17 #include "ARMSubtarget.h"
18 #include "MCTargetDesc/ARMAddressingModes.h"
19 #include "llvm/CodeGen/MachineConstantPool.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/RegisterScavenging.h"
25 #include "llvm/IR/Constants.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/LLVMContext.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/CodeGen/TargetFrameLowering.h"
32 #include "llvm/Target/TargetMachine.h"
35 extern cl::opt
<bool> ReuseFrameIndexVals
;
40 ThumbRegisterInfo::ThumbRegisterInfo() : ARMBaseRegisterInfo() {}
42 const TargetRegisterClass
*
43 ThumbRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass
*RC
,
44 const MachineFunction
&MF
) const {
45 if (!MF
.getSubtarget
<ARMSubtarget
>().isThumb1Only())
46 return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC
, MF
);
48 if (ARM::tGPRRegClass
.hasSubClassEq(RC
))
49 return &ARM::tGPRRegClass
;
50 return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC
, MF
);
53 const TargetRegisterClass
*
54 ThumbRegisterInfo::getPointerRegClass(const MachineFunction
&MF
,
55 unsigned Kind
) const {
56 if (!MF
.getSubtarget
<ARMSubtarget
>().isThumb1Only())
57 return ARMBaseRegisterInfo::getPointerRegClass(MF
, Kind
);
58 return &ARM::tGPRRegClass
;
61 static void emitThumb1LoadConstPool(MachineBasicBlock
&MBB
,
62 MachineBasicBlock::iterator
&MBBI
,
63 const DebugLoc
&dl
, unsigned DestReg
,
64 unsigned SubIdx
, int Val
,
65 ARMCC::CondCodes Pred
, unsigned PredReg
,
67 MachineFunction
&MF
= *MBB
.getParent();
68 const ARMSubtarget
&STI
= MF
.getSubtarget
<ARMSubtarget
>();
69 const TargetInstrInfo
&TII
= *STI
.getInstrInfo();
70 MachineConstantPool
*ConstantPool
= MF
.getConstantPool();
71 const Constant
*C
= ConstantInt::get(
72 Type::getInt32Ty(MBB
.getParent()->getFunction().getContext()), Val
);
73 unsigned Idx
= ConstantPool
->getConstantPoolIndex(C
, 4);
75 BuildMI(MBB
, MBBI
, dl
, TII
.get(ARM::tLDRpci
))
76 .addReg(DestReg
, getDefRegState(true), SubIdx
)
77 .addConstantPoolIndex(Idx
).addImm(Pred
).addReg(PredReg
)
81 static void emitThumb2LoadConstPool(MachineBasicBlock
&MBB
,
82 MachineBasicBlock::iterator
&MBBI
,
83 const DebugLoc
&dl
, unsigned DestReg
,
84 unsigned SubIdx
, int Val
,
85 ARMCC::CondCodes Pred
, unsigned PredReg
,
87 MachineFunction
&MF
= *MBB
.getParent();
88 const TargetInstrInfo
&TII
= *MF
.getSubtarget().getInstrInfo();
89 MachineConstantPool
*ConstantPool
= MF
.getConstantPool();
90 const Constant
*C
= ConstantInt::get(
91 Type::getInt32Ty(MBB
.getParent()->getFunction().getContext()), Val
);
92 unsigned Idx
= ConstantPool
->getConstantPoolIndex(C
, 4);
94 BuildMI(MBB
, MBBI
, dl
, TII
.get(ARM::t2LDRpci
))
95 .addReg(DestReg
, getDefRegState(true), SubIdx
)
96 .addConstantPoolIndex(Idx
)
97 .add(predOps(ARMCC::AL
))
101 /// emitLoadConstPool - Emits a load from constpool to materialize the
102 /// specified immediate.
103 void ThumbRegisterInfo::emitLoadConstPool(
104 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator
&MBBI
,
105 const DebugLoc
&dl
, unsigned DestReg
, unsigned SubIdx
, int Val
,
106 ARMCC::CondCodes Pred
, unsigned PredReg
, unsigned MIFlags
) const {
107 MachineFunction
&MF
= *MBB
.getParent();
108 const ARMSubtarget
&STI
= MF
.getSubtarget
<ARMSubtarget
>();
109 if (STI
.isThumb1Only()) {
111 (isARMLowRegister(DestReg
) || Register::isVirtualRegister(DestReg
)) &&
112 "Thumb1 does not have ldr to high register");
113 return emitThumb1LoadConstPool(MBB
, MBBI
, dl
, DestReg
, SubIdx
, Val
, Pred
,
116 return emitThumb2LoadConstPool(MBB
, MBBI
, dl
, DestReg
, SubIdx
, Val
, Pred
,
120 /// emitThumbRegPlusImmInReg - Emits a series of instructions to materialize
121 /// a destreg = basereg + immediate in Thumb code. Materialize the immediate
122 /// in a register using mov / mvn sequences or load the immediate from a
124 static void emitThumbRegPlusImmInReg(
125 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator
&MBBI
,
126 const DebugLoc
&dl
, unsigned DestReg
, unsigned BaseReg
, int NumBytes
,
127 bool CanChangeCC
, const TargetInstrInfo
&TII
,
128 const ARMBaseRegisterInfo
&MRI
, unsigned MIFlags
= MachineInstr::NoFlags
) {
129 MachineFunction
&MF
= *MBB
.getParent();
130 const ARMSubtarget
&ST
= MF
.getSubtarget
<ARMSubtarget
>();
131 bool isHigh
= !isARMLowRegister(DestReg
) ||
132 (BaseReg
!= 0 && !isARMLowRegister(BaseReg
));
134 // Subtract doesn't have high register version. Load the negative value
135 // if either base or dest register is a high register. Also, if do not
136 // issue sub as part of the sequence if condition register is to be
138 if (NumBytes
< 0 && !isHigh
&& CanChangeCC
) {
140 NumBytes
= -NumBytes
;
142 unsigned LdReg
= DestReg
;
143 if (DestReg
== ARM::SP
)
144 assert(BaseReg
== ARM::SP
&& "Unexpected!");
145 if (!isARMLowRegister(DestReg
) && !Register::isVirtualRegister(DestReg
))
146 LdReg
= MF
.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass
);
148 if (NumBytes
<= 255 && NumBytes
>= 0 && CanChangeCC
) {
149 BuildMI(MBB
, MBBI
, dl
, TII
.get(ARM::tMOVi8
), LdReg
)
152 .setMIFlags(MIFlags
);
153 } else if (NumBytes
< 0 && NumBytes
>= -255 && CanChangeCC
) {
154 BuildMI(MBB
, MBBI
, dl
, TII
.get(ARM::tMOVi8
), LdReg
)
157 .setMIFlags(MIFlags
);
158 BuildMI(MBB
, MBBI
, dl
, TII
.get(ARM::tRSB
), LdReg
)
160 .addReg(LdReg
, RegState::Kill
)
161 .setMIFlags(MIFlags
);
162 } else if (ST
.genExecuteOnly()) {
163 BuildMI(MBB
, MBBI
, dl
, TII
.get(ARM::t2MOVi32imm
), LdReg
)
164 .addImm(NumBytes
).setMIFlags(MIFlags
);
166 MRI
.emitLoadConstPool(MBB
, MBBI
, dl
, LdReg
, 0, NumBytes
, ARMCC::AL
, 0,
170 int Opc
= (isSub
) ? ARM::tSUBrr
171 : ((isHigh
|| !CanChangeCC
) ? ARM::tADDhirr
: ARM::tADDrr
);
172 MachineInstrBuilder MIB
= BuildMI(MBB
, MBBI
, dl
, TII
.get(Opc
), DestReg
);
173 if (Opc
!= ARM::tADDhirr
)
174 MIB
= MIB
.add(t1CondCodeOp());
175 if (DestReg
== ARM::SP
|| isSub
)
176 MIB
.addReg(BaseReg
).addReg(LdReg
, RegState::Kill
);
178 MIB
.addReg(LdReg
).addReg(BaseReg
, RegState::Kill
);
179 MIB
.add(predOps(ARMCC::AL
));
182 /// emitThumbRegPlusImmediate - Emits a series of instructions to materialize
183 /// a destreg = basereg + immediate in Thumb code. Tries a series of ADDs or
184 /// SUBs first, and uses a constant pool value if the instruction sequence would
185 /// be too long. This is allowed to modify the condition flags.
186 void llvm::emitThumbRegPlusImmediate(MachineBasicBlock
&MBB
,
187 MachineBasicBlock::iterator
&MBBI
,
188 const DebugLoc
&dl
, unsigned DestReg
,
189 unsigned BaseReg
, int NumBytes
,
190 const TargetInstrInfo
&TII
,
191 const ARMBaseRegisterInfo
&MRI
,
193 bool isSub
= NumBytes
< 0;
194 unsigned Bytes
= (unsigned)NumBytes
;
195 if (isSub
) Bytes
= -NumBytes
;
198 unsigned CopyBits
= 0;
199 unsigned CopyScale
= 1;
200 bool CopyNeedsCC
= false;
202 unsigned ExtraBits
= 0;
203 unsigned ExtraScale
= 1;
204 bool ExtraNeedsCC
= false;
207 // We need to select two types of instruction, maximizing the available
208 // immediate range of each. The instructions we use will depend on whether
209 // DestReg and BaseReg are low, high or the stack pointer.
210 // * CopyOpc - DestReg = BaseReg + imm
211 // This will be emitted once if DestReg != BaseReg, and never if
212 // DestReg == BaseReg.
213 // * ExtraOpc - DestReg = DestReg + imm
214 // This will be emitted as many times as necessary to add the
216 // If the immediate ranges of these instructions are not large enough to cover
217 // NumBytes with a reasonable number of instructions, we fall back to using a
218 // value loaded from a constant pool.
219 if (DestReg
== ARM::SP
) {
220 if (BaseReg
== ARM::SP
) {
222 // Already in right reg, no copy needed
224 // low -> sp or high -> sp
225 CopyOpc
= ARM::tMOVr
;
228 ExtraOpc
= isSub
? ARM::tSUBspi
: ARM::tADDspi
;
231 } else if (isARMLowRegister(DestReg
)) {
232 if (BaseReg
== ARM::SP
) {
234 assert(!isSub
&& "Thumb1 does not have tSUBrSPi");
235 CopyOpc
= ARM::tADDrSPi
;
238 } else if (DestReg
== BaseReg
) {
240 // Already in right reg, no copy needed
241 } else if (isARMLowRegister(BaseReg
)) {
242 // low -> different low
243 CopyOpc
= isSub
? ARM::tSUBi3
: ARM::tADDi3
;
248 CopyOpc
= ARM::tMOVr
;
251 ExtraOpc
= isSub
? ARM::tSUBi8
: ARM::tADDi8
;
254 } else /* DestReg is high */ {
255 if (DestReg
== BaseReg
) {
257 // Already in right reg, no copy needed
259 // {low,high,sp} -> high
260 CopyOpc
= ARM::tMOVr
;
266 // We could handle an unaligned immediate with an unaligned copy instruction
267 // and an aligned extra instruction, but this case is not currently needed.
268 assert(((Bytes
& 3) == 0 || ExtraScale
== 1) &&
269 "Unaligned offset, but all instructions require alignment");
271 unsigned CopyRange
= ((1 << CopyBits
) - 1) * CopyScale
;
272 // If we would emit the copy with an immediate of 0, just use tMOVr.
273 if (CopyOpc
&& Bytes
< CopyScale
) {
274 CopyOpc
= ARM::tMOVr
;
279 unsigned ExtraRange
= ((1 << ExtraBits
) - 1) * ExtraScale
; // per instruction
280 unsigned RequiredCopyInstrs
= CopyOpc
? 1 : 0;
281 unsigned RangeAfterCopy
= (CopyRange
> Bytes
) ? 0 : (Bytes
- CopyRange
);
283 // We could handle this case when the copy instruction does not require an
284 // aligned immediate, but we do not currently do this.
285 assert(RangeAfterCopy
% ExtraScale
== 0 &&
286 "Extra instruction requires immediate to be aligned");
288 unsigned RequiredExtraInstrs
;
290 RequiredExtraInstrs
= alignTo(RangeAfterCopy
, ExtraRange
) / ExtraRange
;
291 else if (RangeAfterCopy
> 0)
292 // We need an extra instruction but none is available
293 RequiredExtraInstrs
= 1000000;
295 RequiredExtraInstrs
= 0;
296 unsigned RequiredInstrs
= RequiredCopyInstrs
+ RequiredExtraInstrs
;
297 unsigned Threshold
= (DestReg
== ARM::SP
) ? 3 : 2;
299 // Use a constant pool, if the sequence of ADDs/SUBs is too expensive.
300 if (RequiredInstrs
> Threshold
) {
301 emitThumbRegPlusImmInReg(MBB
, MBBI
, dl
,
302 DestReg
, BaseReg
, NumBytes
, true,
307 // Emit zero or one copy instructions
309 unsigned CopyImm
= std::min(Bytes
, CopyRange
) / CopyScale
;
310 Bytes
-= CopyImm
* CopyScale
;
312 MachineInstrBuilder MIB
= BuildMI(MBB
, MBBI
, dl
, TII
.get(CopyOpc
), DestReg
);
314 MIB
= MIB
.add(t1CondCodeOp());
315 MIB
.addReg(BaseReg
, RegState::Kill
);
316 if (CopyOpc
!= ARM::tMOVr
) {
319 MIB
.setMIFlags(MIFlags
).add(predOps(ARMCC::AL
));
324 // Emit zero or more in-place add/sub instructions
326 unsigned ExtraImm
= std::min(Bytes
, ExtraRange
) / ExtraScale
;
327 Bytes
-= ExtraImm
* ExtraScale
;
329 MachineInstrBuilder MIB
= BuildMI(MBB
, MBBI
, dl
, TII
.get(ExtraOpc
), DestReg
);
331 MIB
= MIB
.add(t1CondCodeOp());
334 .add(predOps(ARMCC::AL
))
335 .setMIFlags(MIFlags
);
339 static void removeOperands(MachineInstr
&MI
, unsigned i
) {
341 for (unsigned e
= MI
.getNumOperands(); i
!= e
; ++i
)
342 MI
.RemoveOperand(Op
);
345 /// convertToNonSPOpcode - Change the opcode to the non-SP version, because
346 /// we're replacing the frame index with a non-SP register.
347 static unsigned convertToNonSPOpcode(unsigned Opcode
) {
359 bool ThumbRegisterInfo::rewriteFrameIndex(MachineBasicBlock::iterator II
,
360 unsigned FrameRegIdx
,
361 unsigned FrameReg
, int &Offset
,
362 const ARMBaseInstrInfo
&TII
) const {
363 MachineInstr
&MI
= *II
;
364 MachineBasicBlock
&MBB
= *MI
.getParent();
365 assert(MBB
.getParent()->getSubtarget
<ARMSubtarget
>().isThumb1Only() &&
366 "This isn't needed for thumb2!");
367 DebugLoc dl
= MI
.getDebugLoc();
368 MachineInstrBuilder
MIB(*MBB
.getParent(), &MI
);
369 unsigned Opcode
= MI
.getOpcode();
370 const MCInstrDesc
&Desc
= MI
.getDesc();
371 unsigned AddrMode
= (Desc
.TSFlags
& ARMII::AddrModeMask
);
373 if (Opcode
== ARM::tADDframe
) {
374 Offset
+= MI
.getOperand(FrameRegIdx
+1).getImm();
375 Register DestReg
= MI
.getOperand(0).getReg();
377 emitThumbRegPlusImmediate(MBB
, II
, dl
, DestReg
, FrameReg
, Offset
, TII
,
382 if (AddrMode
!= ARMII::AddrModeT1_s
)
383 llvm_unreachable("Unsupported addressing mode!");
385 unsigned ImmIdx
= FrameRegIdx
+ 1;
386 int InstrOffs
= MI
.getOperand(ImmIdx
).getImm();
387 unsigned NumBits
= (FrameReg
== ARM::SP
) ? 8 : 5;
390 Offset
+= InstrOffs
* Scale
;
391 assert((Offset
& (Scale
- 1)) == 0 && "Can't encode this offset!");
393 // Common case: small offset, fits into instruction.
394 MachineOperand
&ImmOp
= MI
.getOperand(ImmIdx
);
395 int ImmedOffset
= Offset
/ Scale
;
396 unsigned Mask
= (1 << NumBits
) - 1;
398 if ((unsigned)Offset
<= Mask
* Scale
) {
399 // Replace the FrameIndex with the frame register (e.g., sp).
400 MI
.getOperand(FrameRegIdx
).ChangeToRegister(FrameReg
, false);
401 ImmOp
.ChangeToImmediate(ImmedOffset
);
403 // If we're using a register where sp was stored, convert the instruction
404 // to the non-SP version.
405 unsigned NewOpc
= convertToNonSPOpcode(Opcode
);
406 if (NewOpc
!= Opcode
&& FrameReg
!= ARM::SP
)
407 MI
.setDesc(TII
.get(NewOpc
));
413 Mask
= (1 << NumBits
) - 1;
415 // If this is a thumb spill / restore, we will be using a constpool load to
416 // materialize the offset.
417 if (Opcode
== ARM::tLDRspi
|| Opcode
== ARM::tSTRspi
) {
418 ImmOp
.ChangeToImmediate(0);
420 // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
421 ImmedOffset
= ImmedOffset
& Mask
;
422 ImmOp
.ChangeToImmediate(ImmedOffset
);
423 Offset
&= ~(Mask
* Scale
);
430 void ThumbRegisterInfo::resolveFrameIndex(MachineInstr
&MI
, unsigned BaseReg
,
431 int64_t Offset
) const {
432 const MachineFunction
&MF
= *MI
.getParent()->getParent();
433 const ARMSubtarget
&STI
= MF
.getSubtarget
<ARMSubtarget
>();
434 if (!STI
.isThumb1Only())
435 return ARMBaseRegisterInfo::resolveFrameIndex(MI
, BaseReg
, Offset
);
437 const ARMBaseInstrInfo
&TII
= *STI
.getInstrInfo();
438 int Off
= Offset
; // ARM doesn't need the general 64-bit offsets
441 while (!MI
.getOperand(i
).isFI()) {
443 assert(i
< MI
.getNumOperands() && "Instr doesn't have FrameIndex operand!");
445 bool Done
= rewriteFrameIndex(MI
, i
, BaseReg
, Off
, TII
);
446 assert (Done
&& "Unable to resolve frame index!");
450 void ThumbRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II
,
451 int SPAdj
, unsigned FIOperandNum
,
452 RegScavenger
*RS
) const {
453 MachineInstr
&MI
= *II
;
454 MachineBasicBlock
&MBB
= *MI
.getParent();
455 MachineFunction
&MF
= *MBB
.getParent();
456 const ARMSubtarget
&STI
= MF
.getSubtarget
<ARMSubtarget
>();
457 if (!STI
.isThumb1Only())
458 return ARMBaseRegisterInfo::eliminateFrameIndex(II
, SPAdj
, FIOperandNum
,
462 const ARMBaseInstrInfo
&TII
= *STI
.getInstrInfo();
463 DebugLoc dl
= MI
.getDebugLoc();
464 MachineInstrBuilder
MIB(*MBB
.getParent(), &MI
);
467 int FrameIndex
= MI
.getOperand(FIOperandNum
).getIndex();
468 const ARMFrameLowering
*TFI
= getFrameLowering(MF
);
469 int Offset
= TFI
->ResolveFrameIndexReference(MF
, FrameIndex
, FrameReg
, SPAdj
);
471 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
472 // call frame setup/destroy instructions have already been eliminated. That
473 // means the stack pointer cannot be used to access the emergency spill slot
474 // when !hasReservedCallFrame().
476 if (RS
&& FrameReg
== ARM::SP
&& RS
->isScavengingFrameIndex(FrameIndex
)){
477 assert(STI
.getFrameLowering()->hasReservedCallFrame(MF
) &&
478 "Cannot use SP to access the emergency spill slot in "
479 "functions without a reserved call frame");
480 assert(!MF
.getFrameInfo().hasVarSizedObjects() &&
481 "Cannot use SP to access the emergency spill slot in "
482 "functions with variable sized frame objects");
486 // Special handling of dbg_value instructions.
487 if (MI
.isDebugValue()) {
488 MI
.getOperand(FIOperandNum
). ChangeToRegister(FrameReg
, false /*isDef*/);
489 MI
.getOperand(FIOperandNum
+1).ChangeToImmediate(Offset
);
493 // Modify MI as necessary to handle as much of 'Offset' as possible
494 assert(MF
.getInfo
<ARMFunctionInfo
>()->isThumbFunction() &&
495 "This eliminateFrameIndex only supports Thumb1!");
496 if (rewriteFrameIndex(MI
, FIOperandNum
, FrameReg
, Offset
, TII
))
499 // If we get here, the immediate doesn't fit into the instruction. We folded
500 // as much as possible above, handle the rest, providing a register that is
502 assert(Offset
&& "This code isn't needed if offset already handled!");
504 unsigned Opcode
= MI
.getOpcode();
506 // Remove predicate first.
507 int PIdx
= MI
.findFirstPredOperandIdx();
509 removeOperands(MI
, PIdx
);
512 // Use the destination register to materialize sp + offset.
513 Register TmpReg
= MI
.getOperand(0).getReg();
515 if (Opcode
== ARM::tLDRspi
) {
516 if (FrameReg
== ARM::SP
|| STI
.genExecuteOnly())
517 emitThumbRegPlusImmInReg(MBB
, II
, dl
, TmpReg
, FrameReg
,
518 Offset
, false, TII
, *this);
520 emitLoadConstPool(MBB
, II
, dl
, TmpReg
, 0, Offset
);
524 emitThumbRegPlusImmediate(MBB
, II
, dl
, TmpReg
, FrameReg
, Offset
, TII
,
528 MI
.setDesc(TII
.get(UseRR
? ARM::tLDRr
: ARM::tLDRi
));
529 MI
.getOperand(FIOperandNum
).ChangeToRegister(TmpReg
, false, false, true);
531 // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
532 // register. The offset is already handled in the vreg value.
533 MI
.getOperand(FIOperandNum
+1).ChangeToRegister(FrameReg
, false, false,
535 } else if (MI
.mayStore()) {
536 VReg
= MF
.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass
);
539 if (Opcode
== ARM::tSTRspi
) {
540 if (FrameReg
== ARM::SP
|| STI
.genExecuteOnly())
541 emitThumbRegPlusImmInReg(MBB
, II
, dl
, VReg
, FrameReg
,
542 Offset
, false, TII
, *this);
544 emitLoadConstPool(MBB
, II
, dl
, VReg
, 0, Offset
);
548 emitThumbRegPlusImmediate(MBB
, II
, dl
, VReg
, FrameReg
, Offset
, TII
,
550 MI
.setDesc(TII
.get(UseRR
? ARM::tSTRr
: ARM::tSTRi
));
551 MI
.getOperand(FIOperandNum
).ChangeToRegister(VReg
, false, false, true);
553 // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
554 // register. The offset is already handled in the vreg value.
555 MI
.getOperand(FIOperandNum
+1).ChangeToRegister(FrameReg
, false, false,
558 llvm_unreachable("Unexpected opcode!");
561 // Add predicate back if it's needed.
562 if (MI
.isPredicable())
563 MIB
.add(predOps(ARMCC::AL
));
567 ThumbRegisterInfo::useFPForScavengingIndex(const MachineFunction
&MF
) const {
568 if (MF
.getSubtarget
<ARMSubtarget
>().isThumb1Only()) {
569 // For Thumb1, the emergency spill slot must be some small positive
570 // offset from the base/stack pointer.
573 // For Thumb2, put the emergency spill slot next to FP.