AMDGPU: Mark test as XFAIL in expensive_checks builds
[llvm-project.git] / llvm / lib / Target / ARM / Thumb2InstrInfo.cpp
blob4d759d3bd5a3c7e2e331aee0a94a46c28757ef76
1 //===- Thumb2InstrInfo.cpp - Thumb-2 Instruction Information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the Thumb-2 implementation of the TargetInstrInfo class.
11 //===----------------------------------------------------------------------===//
13 #include "Thumb2InstrInfo.h"
14 #include "ARMMachineFunctionInfo.h"
15 #include "ARMSubtarget.h"
16 #include "MCTargetDesc/ARMAddressingModes.h"
17 #include "llvm/CodeGen/MachineBasicBlock.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineInstr.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineMemOperand.h"
23 #include "llvm/CodeGen/MachineOperand.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/TargetRegisterInfo.h"
26 #include "llvm/IR/DebugLoc.h"
27 #include "llvm/IR/Module.h"
28 #include "llvm/MC/MCInst.h"
29 #include "llvm/MC/MCInstBuilder.h"
30 #include "llvm/MC/MCInstrDesc.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include <cassert>
36 using namespace llvm;
38 static cl::opt<bool>
39 OldT2IfCvt("old-thumb2-ifcvt", cl::Hidden,
40 cl::desc("Use old-style Thumb2 if-conversion heuristics"),
41 cl::init(false));
43 static cl::opt<bool>
44 PreferNoCSEL("prefer-no-csel", cl::Hidden,
45 cl::desc("Prefer predicated Move to CSEL"),
46 cl::init(false));
48 Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI)
49 : ARMBaseInstrInfo(STI) {}
51 /// Return the noop instruction to use for a noop.
52 MCInst Thumb2InstrInfo::getNop() const {
53 return MCInstBuilder(ARM::tHINT).addImm(0).addImm(ARMCC::AL).addReg(0);
56 unsigned Thumb2InstrInfo::getUnindexedOpcode(unsigned Opc) const {
57 // FIXME
58 return 0;
61 void
62 Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
63 MachineBasicBlock *NewDest) const {
64 MachineBasicBlock *MBB = Tail->getParent();
65 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
66 if (!AFI->hasITBlocks() || Tail->isBranch()) {
67 TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest);
68 return;
71 // If the first instruction of Tail is predicated, we may have to update
72 // the IT instruction.
73 Register PredReg;
74 ARMCC::CondCodes CC = getInstrPredicate(*Tail, PredReg);
75 MachineBasicBlock::iterator MBBI = Tail;
76 if (CC != ARMCC::AL)
77 // Expecting at least the t2IT instruction before it.
78 --MBBI;
80 // Actually replace the tail.
81 TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest);
83 // Fix up IT.
84 if (CC != ARMCC::AL) {
85 MachineBasicBlock::iterator E = MBB->begin();
86 unsigned Count = 4; // At most 4 instructions in an IT block.
87 while (Count && MBBI != E) {
88 if (MBBI->isDebugInstr()) {
89 --MBBI;
90 continue;
92 if (MBBI->getOpcode() == ARM::t2IT) {
93 unsigned Mask = MBBI->getOperand(1).getImm();
94 if (Count == 4)
95 MBBI->eraseFromParent();
96 else {
97 unsigned MaskOn = 1 << Count;
98 unsigned MaskOff = ~(MaskOn - 1);
99 MBBI->getOperand(1).setImm((Mask & MaskOff) | MaskOn);
101 return;
103 --MBBI;
104 --Count;
107 // Ctrl flow can reach here if branch folding is run before IT block
108 // formation pass.
112 bool
113 Thumb2InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
114 MachineBasicBlock::iterator MBBI) const {
115 while (MBBI->isDebugInstr()) {
116 ++MBBI;
117 if (MBBI == MBB.end())
118 return false;
121 Register PredReg;
122 return getITInstrPredicate(*MBBI, PredReg) == ARMCC::AL;
125 MachineInstr *
126 Thumb2InstrInfo::optimizeSelect(MachineInstr &MI,
127 SmallPtrSetImpl<MachineInstr *> &SeenMIs,
128 bool PreferFalse) const {
129 // Try to use the base optimizeSelect, which uses canFoldIntoMOVCC to fold the
130 // MOVCC into another instruction. If that fails on 8.1-M fall back to using a
131 // CSEL.
132 MachineInstr *RV = ARMBaseInstrInfo::optimizeSelect(MI, SeenMIs, PreferFalse);
133 if (!RV && getSubtarget().hasV8_1MMainlineOps() && !PreferNoCSEL) {
134 Register DestReg = MI.getOperand(0).getReg();
136 if (!DestReg.isVirtual())
137 return nullptr;
139 MachineInstrBuilder NewMI = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
140 get(ARM::t2CSEL), DestReg)
141 .add(MI.getOperand(2))
142 .add(MI.getOperand(1))
143 .add(MI.getOperand(3));
144 SeenMIs.insert(NewMI);
145 return NewMI;
147 return RV;
150 void Thumb2InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
151 MachineBasicBlock::iterator I,
152 const DebugLoc &DL, MCRegister DestReg,
153 MCRegister SrcReg, bool KillSrc,
154 bool RenamableDest, bool RenamableSrc) const {
155 // Handle SPR, DPR, and QPR copies.
156 if (!ARM::GPRRegClass.contains(DestReg, SrcReg))
157 return ARMBaseInstrInfo::copyPhysReg(MBB, I, DL, DestReg, SrcReg, KillSrc);
159 BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg)
160 .addReg(SrcReg, getKillRegState(KillSrc))
161 .add(predOps(ARMCC::AL));
164 void Thumb2InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
165 MachineBasicBlock::iterator I,
166 Register SrcReg, bool isKill, int FI,
167 const TargetRegisterClass *RC,
168 const TargetRegisterInfo *TRI,
169 Register VReg) const {
170 DebugLoc DL;
171 if (I != MBB.end()) DL = I->getDebugLoc();
173 MachineFunction &MF = *MBB.getParent();
174 MachineFrameInfo &MFI = MF.getFrameInfo();
175 MachineMemOperand *MMO = MF.getMachineMemOperand(
176 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
177 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
179 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
180 BuildMI(MBB, I, DL, get(ARM::t2STRi12))
181 .addReg(SrcReg, getKillRegState(isKill))
182 .addFrameIndex(FI)
183 .addImm(0)
184 .addMemOperand(MMO)
185 .add(predOps(ARMCC::AL));
186 return;
189 if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
190 // Thumb2 STRD expects its dest-registers to be in rGPR. Not a problem for
191 // gsub_0, but needs an extra constraint for gsub_1 (which could be sp
192 // otherwise).
193 if (SrcReg.isVirtual()) {
194 MachineRegisterInfo *MRI = &MF.getRegInfo();
195 MRI->constrainRegClass(SrcReg, &ARM::GPRPairnospRegClass);
198 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2STRDi8));
199 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
200 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
201 MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL));
202 return;
205 ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC, TRI,
206 Register());
209 void Thumb2InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
210 MachineBasicBlock::iterator I,
211 Register DestReg, int FI,
212 const TargetRegisterClass *RC,
213 const TargetRegisterInfo *TRI,
214 Register VReg) const {
215 MachineFunction &MF = *MBB.getParent();
216 MachineFrameInfo &MFI = MF.getFrameInfo();
217 MachineMemOperand *MMO = MF.getMachineMemOperand(
218 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
219 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
220 DebugLoc DL;
221 if (I != MBB.end()) DL = I->getDebugLoc();
223 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
224 BuildMI(MBB, I, DL, get(ARM::t2LDRi12), DestReg)
225 .addFrameIndex(FI)
226 .addImm(0)
227 .addMemOperand(MMO)
228 .add(predOps(ARMCC::AL));
229 return;
232 if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
233 // Thumb2 LDRD expects its dest-registers to be in rGPR. Not a problem for
234 // gsub_0, but needs an extra constraint for gsub_1 (which could be sp
235 // otherwise).
236 if (DestReg.isVirtual()) {
237 MachineRegisterInfo *MRI = &MF.getRegInfo();
238 MRI->constrainRegClass(DestReg, &ARM::GPRPairnospRegClass);
241 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2LDRDi8));
242 AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
243 AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
244 MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL));
246 if (DestReg.isPhysical())
247 MIB.addReg(DestReg, RegState::ImplicitDefine);
248 return;
251 ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI,
252 Register());
255 void Thumb2InstrInfo::expandLoadStackGuard(
256 MachineBasicBlock::iterator MI) const {
257 MachineFunction &MF = *MI->getParent()->getParent();
258 Module &M = *MF.getFunction().getParent();
260 if (M.getStackProtectorGuard() == "tls") {
261 expandLoadStackGuardBase(MI, ARM::t2MRC, ARM::t2LDRi12);
262 return;
265 const auto *GV = cast<GlobalValue>((*MI->memoperands_begin())->getValue());
266 const ARMSubtarget &Subtarget = MF.getSubtarget<ARMSubtarget>();
267 if (Subtarget.isTargetELF() && !GV->isDSOLocal())
268 expandLoadStackGuardBase(MI, ARM::t2LDRLIT_ga_pcrel, ARM::t2LDRi12);
269 else if (!Subtarget.useMovt())
270 expandLoadStackGuardBase(MI, ARM::tLDRLIT_ga_abs, ARM::t2LDRi12);
271 else if (MF.getTarget().isPositionIndependent())
272 expandLoadStackGuardBase(MI, ARM::t2MOV_ga_pcrel, ARM::t2LDRi12);
273 else
274 expandLoadStackGuardBase(MI, ARM::t2MOVi32imm, ARM::t2LDRi12);
277 MachineInstr *Thumb2InstrInfo::commuteInstructionImpl(MachineInstr &MI,
278 bool NewMI,
279 unsigned OpIdx1,
280 unsigned OpIdx2) const {
281 switch (MI.getOpcode()) {
282 case ARM::MVE_VMAXNMAf16:
283 case ARM::MVE_VMAXNMAf32:
284 case ARM::MVE_VMINNMAf16:
285 case ARM::MVE_VMINNMAf32:
286 // Don't allow predicated instructions to be commuted.
287 if (getVPTInstrPredicate(MI) != ARMVCC::None)
288 return nullptr;
290 return ARMBaseInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
293 bool Thumb2InstrInfo::isSchedulingBoundary(const MachineInstr &MI,
294 const MachineBasicBlock *MBB,
295 const MachineFunction &MF) const {
296 // BTI clearing instructions shall not take part in scheduling regions as
297 // they must stay in their intended place. Although PAC isn't BTI clearing,
298 // it can be transformed into PACBTI after the pre-RA Machine Scheduling
299 // has taken place, so its movement must also be restricted.
300 switch (MI.getOpcode()) {
301 case ARM::t2BTI:
302 case ARM::t2PAC:
303 case ARM::t2PACBTI:
304 case ARM::t2SG:
305 return true;
306 default:
307 break;
309 return ARMBaseInstrInfo::isSchedulingBoundary(MI, MBB, MF);
312 void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
313 MachineBasicBlock::iterator &MBBI,
314 const DebugLoc &dl, Register DestReg,
315 Register BaseReg, int NumBytes,
316 ARMCC::CondCodes Pred, Register PredReg,
317 const ARMBaseInstrInfo &TII,
318 unsigned MIFlags) {
319 if (NumBytes == 0 && DestReg != BaseReg) {
320 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg)
321 .addReg(BaseReg, RegState::Kill)
322 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags);
323 return;
326 bool isSub = NumBytes < 0;
327 if (isSub) NumBytes = -NumBytes;
329 // If profitable, use a movw or movt to materialize the offset.
330 // FIXME: Use the scavenger to grab a scratch register.
331 if (DestReg != ARM::SP && DestReg != BaseReg &&
332 NumBytes >= 4096 &&
333 ARM_AM::getT2SOImmVal(NumBytes) == -1) {
334 bool Fits = false;
335 if (NumBytes < 65536) {
336 // Use a movw to materialize the 16-bit constant.
337 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), DestReg)
338 .addImm(NumBytes)
339 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags);
340 Fits = true;
341 } else if ((NumBytes & 0xffff) == 0) {
342 // Use a movt to materialize the 32-bit constant.
343 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVTi16), DestReg)
344 .addReg(DestReg)
345 .addImm(NumBytes >> 16)
346 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags);
347 Fits = true;
350 if (Fits) {
351 if (isSub) {
352 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr), DestReg)
353 .addReg(BaseReg)
354 .addReg(DestReg, RegState::Kill)
355 .add(predOps(Pred, PredReg))
356 .add(condCodeOp())
357 .setMIFlags(MIFlags);
358 } else {
359 // Here we know that DestReg is not SP but we do not
360 // know anything about BaseReg. t2ADDrr is an invalid
361 // instruction is SP is used as the second argument, but
362 // is fine if SP is the first argument. To be sure we
363 // do not generate invalid encoding, put BaseReg first.
364 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2ADDrr), DestReg)
365 .addReg(BaseReg)
366 .addReg(DestReg, RegState::Kill)
367 .add(predOps(Pred, PredReg))
368 .add(condCodeOp())
369 .setMIFlags(MIFlags);
371 return;
375 while (NumBytes) {
376 unsigned ThisVal = NumBytes;
377 unsigned Opc = 0;
378 if (DestReg == ARM::SP && BaseReg != ARM::SP) {
379 // mov sp, rn. Note t2MOVr cannot be used.
380 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg)
381 .addReg(BaseReg)
382 .setMIFlags(MIFlags)
383 .add(predOps(ARMCC::AL));
384 BaseReg = ARM::SP;
385 continue;
388 assert((DestReg != ARM::SP || BaseReg == ARM::SP) &&
389 "Writing to SP, from other register.");
391 // Try to use T1, as it smaller
392 if ((DestReg == ARM::SP) && (ThisVal < ((1 << 7) - 1) * 4)) {
393 assert((ThisVal & 3) == 0 && "Stack update is not multiple of 4?");
394 Opc = isSub ? ARM::tSUBspi : ARM::tADDspi;
395 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
396 .addReg(BaseReg)
397 .addImm(ThisVal / 4)
398 .setMIFlags(MIFlags)
399 .add(predOps(ARMCC::AL));
400 break;
402 bool HasCCOut = true;
403 int ImmIsT2SO = ARM_AM::getT2SOImmVal(ThisVal);
404 bool ToSP = DestReg == ARM::SP;
405 unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri;
406 unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri;
407 unsigned t2SUBi12 = ToSP ? ARM::t2SUBspImm12 : ARM::t2SUBri12;
408 unsigned t2ADDi12 = ToSP ? ARM::t2ADDspImm12 : ARM::t2ADDri12;
409 Opc = isSub ? t2SUB : t2ADD;
410 // Prefer T2: sub rd, rn, so_imm | sub sp, sp, so_imm
411 if (ImmIsT2SO != -1) {
412 NumBytes = 0;
413 } else if (ThisVal < 4096) {
414 // Prefer T3 if can make it in a single go: subw rd, rn, imm12 | subw sp,
415 // sp, imm12
416 Opc = isSub ? t2SUBi12 : t2ADDi12;
417 HasCCOut = false;
418 NumBytes = 0;
419 } else {
420 // Use one T2 instruction to reduce NumBytes
421 // FIXME: Move this to ARMAddressingModes.h?
422 unsigned RotAmt = llvm::countl_zero(ThisVal);
423 ThisVal = ThisVal & llvm::rotr<uint32_t>(0xff000000U, RotAmt);
424 NumBytes &= ~ThisVal;
425 assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 &&
426 "Bit extraction didn't work?");
429 // Build the new ADD / SUB.
430 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
431 .addReg(BaseReg, RegState::Kill)
432 .addImm(ThisVal)
433 .add(predOps(ARMCC::AL))
434 .setMIFlags(MIFlags);
435 if (HasCCOut)
436 MIB.add(condCodeOp());
438 BaseReg = DestReg;
442 static unsigned
443 negativeOffsetOpcode(unsigned opcode)
445 switch (opcode) {
446 case ARM::t2LDRi12: return ARM::t2LDRi8;
447 case ARM::t2LDRHi12: return ARM::t2LDRHi8;
448 case ARM::t2LDRBi12: return ARM::t2LDRBi8;
449 case ARM::t2LDRSHi12: return ARM::t2LDRSHi8;
450 case ARM::t2LDRSBi12: return ARM::t2LDRSBi8;
451 case ARM::t2STRi12: return ARM::t2STRi8;
452 case ARM::t2STRBi12: return ARM::t2STRBi8;
453 case ARM::t2STRHi12: return ARM::t2STRHi8;
454 case ARM::t2PLDi12: return ARM::t2PLDi8;
455 case ARM::t2PLDWi12: return ARM::t2PLDWi8;
456 case ARM::t2PLIi12: return ARM::t2PLIi8;
458 case ARM::t2LDRi8:
459 case ARM::t2LDRHi8:
460 case ARM::t2LDRBi8:
461 case ARM::t2LDRSHi8:
462 case ARM::t2LDRSBi8:
463 case ARM::t2STRi8:
464 case ARM::t2STRBi8:
465 case ARM::t2STRHi8:
466 case ARM::t2PLDi8:
467 case ARM::t2PLDWi8:
468 case ARM::t2PLIi8:
469 return opcode;
471 default:
472 llvm_unreachable("unknown thumb2 opcode.");
476 static unsigned
477 positiveOffsetOpcode(unsigned opcode)
479 switch (opcode) {
480 case ARM::t2LDRi8: return ARM::t2LDRi12;
481 case ARM::t2LDRHi8: return ARM::t2LDRHi12;
482 case ARM::t2LDRBi8: return ARM::t2LDRBi12;
483 case ARM::t2LDRSHi8: return ARM::t2LDRSHi12;
484 case ARM::t2LDRSBi8: return ARM::t2LDRSBi12;
485 case ARM::t2STRi8: return ARM::t2STRi12;
486 case ARM::t2STRBi8: return ARM::t2STRBi12;
487 case ARM::t2STRHi8: return ARM::t2STRHi12;
488 case ARM::t2PLDi8: return ARM::t2PLDi12;
489 case ARM::t2PLDWi8: return ARM::t2PLDWi12;
490 case ARM::t2PLIi8: return ARM::t2PLIi12;
492 case ARM::t2LDRi12:
493 case ARM::t2LDRHi12:
494 case ARM::t2LDRBi12:
495 case ARM::t2LDRSHi12:
496 case ARM::t2LDRSBi12:
497 case ARM::t2STRi12:
498 case ARM::t2STRBi12:
499 case ARM::t2STRHi12:
500 case ARM::t2PLDi12:
501 case ARM::t2PLDWi12:
502 case ARM::t2PLIi12:
503 return opcode;
505 default:
506 llvm_unreachable("unknown thumb2 opcode.");
510 static unsigned
511 immediateOffsetOpcode(unsigned opcode)
513 switch (opcode) {
514 case ARM::t2LDRs: return ARM::t2LDRi12;
515 case ARM::t2LDRHs: return ARM::t2LDRHi12;
516 case ARM::t2LDRBs: return ARM::t2LDRBi12;
517 case ARM::t2LDRSHs: return ARM::t2LDRSHi12;
518 case ARM::t2LDRSBs: return ARM::t2LDRSBi12;
519 case ARM::t2STRs: return ARM::t2STRi12;
520 case ARM::t2STRBs: return ARM::t2STRBi12;
521 case ARM::t2STRHs: return ARM::t2STRHi12;
522 case ARM::t2PLDs: return ARM::t2PLDi12;
523 case ARM::t2PLDWs: return ARM::t2PLDWi12;
524 case ARM::t2PLIs: return ARM::t2PLIi12;
526 case ARM::t2LDRi12:
527 case ARM::t2LDRHi12:
528 case ARM::t2LDRBi12:
529 case ARM::t2LDRSHi12:
530 case ARM::t2LDRSBi12:
531 case ARM::t2STRi12:
532 case ARM::t2STRBi12:
533 case ARM::t2STRHi12:
534 case ARM::t2PLDi12:
535 case ARM::t2PLDWi12:
536 case ARM::t2PLIi12:
537 case ARM::t2LDRi8:
538 case ARM::t2LDRHi8:
539 case ARM::t2LDRBi8:
540 case ARM::t2LDRSHi8:
541 case ARM::t2LDRSBi8:
542 case ARM::t2STRi8:
543 case ARM::t2STRBi8:
544 case ARM::t2STRHi8:
545 case ARM::t2PLDi8:
546 case ARM::t2PLDWi8:
547 case ARM::t2PLIi8:
548 return opcode;
550 default:
551 llvm_unreachable("unknown thumb2 opcode.");
555 bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
556 Register FrameReg, int &Offset,
557 const ARMBaseInstrInfo &TII,
558 const TargetRegisterInfo *TRI) {
559 unsigned Opcode = MI.getOpcode();
560 const MCInstrDesc &Desc = MI.getDesc();
561 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
562 bool isSub = false;
564 MachineFunction &MF = *MI.getParent()->getParent();
565 const TargetRegisterClass *RegClass =
566 TII.getRegClass(Desc, FrameRegIdx, TRI, MF);
568 // Memory operands in inline assembly always use AddrModeT2_i12.
569 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
570 AddrMode = ARMII::AddrModeT2_i12; // FIXME. mode for thumb2?
572 const bool IsSP = Opcode == ARM::t2ADDspImm12 || Opcode == ARM::t2ADDspImm;
573 if (IsSP || Opcode == ARM::t2ADDri || Opcode == ARM::t2ADDri12) {
574 Offset += MI.getOperand(FrameRegIdx+1).getImm();
576 Register PredReg;
577 if (Offset == 0 && getInstrPredicate(MI, PredReg) == ARMCC::AL &&
578 !MI.definesRegister(ARM::CPSR, /*TRI=*/nullptr)) {
579 // Turn it into a move.
580 MI.setDesc(TII.get(ARM::tMOVr));
581 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
582 // Remove offset and remaining explicit predicate operands.
583 do MI.removeOperand(FrameRegIdx+1);
584 while (MI.getNumOperands() > FrameRegIdx+1);
585 MachineInstrBuilder MIB(*MI.getParent()->getParent(), &MI);
586 MIB.add(predOps(ARMCC::AL));
587 return true;
590 bool HasCCOut = (Opcode != ARM::t2ADDspImm12 && Opcode != ARM::t2ADDri12);
592 if (Offset < 0) {
593 Offset = -Offset;
594 isSub = true;
595 MI.setDesc(IsSP ? TII.get(ARM::t2SUBspImm) : TII.get(ARM::t2SUBri));
596 } else {
597 MI.setDesc(IsSP ? TII.get(ARM::t2ADDspImm) : TII.get(ARM::t2ADDri));
600 // Common case: small offset, fits into instruction.
601 if (ARM_AM::getT2SOImmVal(Offset) != -1) {
602 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
603 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
604 // Add cc_out operand if the original instruction did not have one.
605 if (!HasCCOut)
606 MI.addOperand(MachineOperand::CreateReg(0, false));
607 Offset = 0;
608 return true;
610 // Another common case: imm12.
611 if (Offset < 4096 &&
612 (!HasCCOut || MI.getOperand(MI.getNumOperands()-1).getReg() == 0)) {
613 unsigned NewOpc = isSub ? IsSP ? ARM::t2SUBspImm12 : ARM::t2SUBri12
614 : IsSP ? ARM::t2ADDspImm12 : ARM::t2ADDri12;
615 MI.setDesc(TII.get(NewOpc));
616 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
617 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
618 // Remove the cc_out operand.
619 if (HasCCOut)
620 MI.removeOperand(MI.getNumOperands()-1);
621 Offset = 0;
622 return true;
625 // Otherwise, extract 8 adjacent bits from the immediate into this
626 // t2ADDri/t2SUBri.
627 unsigned RotAmt = llvm::countl_zero<unsigned>(Offset);
628 unsigned ThisImmVal = Offset & llvm::rotr<uint32_t>(0xff000000U, RotAmt);
630 // We will handle these bits from offset, clear them.
631 Offset &= ~ThisImmVal;
633 assert(ARM_AM::getT2SOImmVal(ThisImmVal) != -1 &&
634 "Bit extraction didn't work?");
635 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
636 // Add cc_out operand if the original instruction did not have one.
637 if (!HasCCOut)
638 MI.addOperand(MachineOperand::CreateReg(0, false));
639 } else {
640 // AddrMode4 and AddrMode6 cannot handle any offset.
641 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
642 return false;
644 // AddrModeT2_so cannot handle any offset. If there is no offset
645 // register then we change to an immediate version.
646 unsigned NewOpc = Opcode;
647 if (AddrMode == ARMII::AddrModeT2_so) {
648 Register OffsetReg = MI.getOperand(FrameRegIdx + 1).getReg();
649 if (OffsetReg != 0) {
650 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
651 return Offset == 0;
654 MI.removeOperand(FrameRegIdx+1);
655 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(0);
656 NewOpc = immediateOffsetOpcode(Opcode);
657 AddrMode = ARMII::AddrModeT2_i12;
660 unsigned NumBits = 0;
661 unsigned Scale = 1;
662 if (AddrMode == ARMII::AddrModeT2_i8neg ||
663 AddrMode == ARMII::AddrModeT2_i12) {
664 // i8 supports only negative, and i12 supports only positive, so
665 // based on Offset sign convert Opcode to the appropriate
666 // instruction
667 Offset += MI.getOperand(FrameRegIdx+1).getImm();
668 if (Offset < 0) {
669 NewOpc = negativeOffsetOpcode(Opcode);
670 NumBits = 8;
671 isSub = true;
672 Offset = -Offset;
673 } else {
674 NewOpc = positiveOffsetOpcode(Opcode);
675 NumBits = 12;
677 } else if (AddrMode == ARMII::AddrMode5) {
678 // VFP address mode.
679 const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1);
680 int InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
681 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
682 InstrOffs *= -1;
683 NumBits = 8;
684 Scale = 4;
685 Offset += InstrOffs * 4;
686 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
687 if (Offset < 0) {
688 Offset = -Offset;
689 isSub = true;
691 } else if (AddrMode == ARMII::AddrMode5FP16) {
692 // VFP address mode.
693 const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1);
694 int InstrOffs = ARM_AM::getAM5FP16Offset(OffOp.getImm());
695 if (ARM_AM::getAM5FP16Op(OffOp.getImm()) == ARM_AM::sub)
696 InstrOffs *= -1;
697 NumBits = 8;
698 Scale = 2;
699 Offset += InstrOffs * 2;
700 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
701 if (Offset < 0) {
702 Offset = -Offset;
703 isSub = true;
705 } else if (AddrMode == ARMII::AddrModeT2_i7s4 ||
706 AddrMode == ARMII::AddrModeT2_i7s2 ||
707 AddrMode == ARMII::AddrModeT2_i7) {
708 Offset += MI.getOperand(FrameRegIdx + 1).getImm();
709 unsigned OffsetMask;
710 switch (AddrMode) {
711 case ARMII::AddrModeT2_i7s4: NumBits = 9; OffsetMask = 0x3; break;
712 case ARMII::AddrModeT2_i7s2: NumBits = 8; OffsetMask = 0x1; break;
713 default: NumBits = 7; OffsetMask = 0x0; break;
715 // MCInst operand expects already scaled value.
716 Scale = 1;
717 assert((Offset & OffsetMask) == 0 && "Can't encode this offset!");
718 (void)OffsetMask; // squash unused-variable warning at -NDEBUG
719 } else if (AddrMode == ARMII::AddrModeT2_i8s4) {
720 Offset += MI.getOperand(FrameRegIdx + 1).getImm();
721 NumBits = 8 + 2;
722 // MCInst operand expects already scaled value.
723 Scale = 1;
724 assert((Offset & 3) == 0 && "Can't encode this offset!");
725 } else if (AddrMode == ARMII::AddrModeT2_ldrex) {
726 Offset += MI.getOperand(FrameRegIdx + 1).getImm() * 4;
727 NumBits = 8; // 8 bits scaled by 4
728 Scale = 4;
729 assert((Offset & 3) == 0 && "Can't encode this offset!");
730 } else {
731 llvm_unreachable("Unsupported addressing mode!");
734 if (NewOpc != Opcode)
735 MI.setDesc(TII.get(NewOpc));
737 MachineOperand &ImmOp = MI.getOperand(FrameRegIdx+1);
739 // Attempt to fold address computation
740 // Common case: small offset, fits into instruction. We need to make sure
741 // the register class is correct too, for instructions like the MVE
742 // VLDRH.32, which only accepts low tGPR registers.
743 int ImmedOffset = Offset / Scale;
744 unsigned Mask = (1 << NumBits) - 1;
745 if ((unsigned)Offset <= Mask * Scale &&
746 (FrameReg.isVirtual() || RegClass->contains(FrameReg))) {
747 if (FrameReg.isVirtual()) {
748 // Make sure the register class for the virtual register is correct
749 MachineRegisterInfo *MRI = &MF.getRegInfo();
750 if (!MRI->constrainRegClass(FrameReg, RegClass))
751 llvm_unreachable("Unable to constrain virtual register class.");
754 // Replace the FrameIndex with fp/sp
755 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
756 if (isSub) {
757 if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrMode5FP16)
758 // FIXME: Not consistent.
759 ImmedOffset |= 1 << NumBits;
760 else
761 ImmedOffset = -ImmedOffset;
763 ImmOp.ChangeToImmediate(ImmedOffset);
764 Offset = 0;
765 return true;
768 // Otherwise, offset doesn't fit. Pull in what we can to simplify
769 ImmedOffset = ImmedOffset & Mask;
770 if (isSub) {
771 if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrMode5FP16)
772 // FIXME: Not consistent.
773 ImmedOffset |= 1 << NumBits;
774 else {
775 ImmedOffset = -ImmedOffset;
776 if (ImmedOffset == 0)
777 // Change the opcode back if the encoded offset is zero.
778 MI.setDesc(TII.get(positiveOffsetOpcode(NewOpc)));
781 ImmOp.ChangeToImmediate(ImmedOffset);
782 Offset &= ~(Mask*Scale);
785 Offset = (isSub) ? -Offset : Offset;
786 return Offset == 0 && (FrameReg.isVirtual() || RegClass->contains(FrameReg));
789 ARMCC::CondCodes llvm::getITInstrPredicate(const MachineInstr &MI,
790 Register &PredReg) {
791 unsigned Opc = MI.getOpcode();
792 if (Opc == ARM::tBcc || Opc == ARM::t2Bcc)
793 return ARMCC::AL;
794 return getInstrPredicate(MI, PredReg);
797 int llvm::findFirstVPTPredOperandIdx(const MachineInstr &MI) {
798 const MCInstrDesc &MCID = MI.getDesc();
800 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i)
801 if (ARM::isVpred(MCID.operands()[i].OperandType))
802 return i;
804 return -1;
807 ARMVCC::VPTCodes llvm::getVPTInstrPredicate(const MachineInstr &MI,
808 Register &PredReg) {
809 int PIdx = findFirstVPTPredOperandIdx(MI);
810 if (PIdx == -1) {
811 PredReg = 0;
812 return ARMVCC::None;
815 PredReg = MI.getOperand(PIdx+1).getReg();
816 return (ARMVCC::VPTCodes)MI.getOperand(PIdx).getImm();
819 void llvm::recomputeVPTBlockMask(MachineInstr &Instr) {
820 assert(isVPTOpcode(Instr.getOpcode()) && "Not a VPST or VPT Instruction!");
822 MachineOperand &MaskOp = Instr.getOperand(0);
823 assert(MaskOp.isImm() && "Operand 0 is not the block mask of the VPT/VPST?!");
825 MachineBasicBlock::iterator Iter = ++Instr.getIterator(),
826 End = Instr.getParent()->end();
828 while (Iter != End && Iter->isDebugInstr())
829 ++Iter;
831 // Verify that the instruction after the VPT/VPST is predicated (it should
832 // be), and skip it.
833 assert(Iter != End && "Expected some instructions in any VPT block");
834 assert(
835 getVPTInstrPredicate(*Iter) == ARMVCC::Then &&
836 "VPT/VPST should be followed by an instruction with a 'then' predicate!");
837 ++Iter;
839 // Iterate over the predicated instructions, updating the BlockMask as we go.
840 ARM::PredBlockMask BlockMask = ARM::PredBlockMask::T;
841 while (Iter != End) {
842 if (Iter->isDebugInstr()) {
843 ++Iter;
844 continue;
846 ARMVCC::VPTCodes Pred = getVPTInstrPredicate(*Iter);
847 if (Pred == ARMVCC::None)
848 break;
849 BlockMask = expandPredBlockMask(BlockMask, Pred);
850 ++Iter;
853 // Rewrite the BlockMask.
854 MaskOp.setImm((int64_t)(BlockMask));