1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the AArch64 implementation of the TargetInstrInfo class.
11 //===----------------------------------------------------------------------===//
13 #include "AArch64InstrInfo.h"
14 #include "AArch64MachineFunctionInfo.h"
15 #include "AArch64Subtarget.h"
16 #include "MCTargetDesc/AArch64AddressingModes.h"
17 #include "Utils/AArch64BaseInfo.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/MachineBasicBlock.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineMemOperand.h"
27 #include "llvm/CodeGen/MachineOperand.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/MachineModuleInfo.h"
30 #include "llvm/CodeGen/StackMaps.h"
31 #include "llvm/CodeGen/TargetRegisterInfo.h"
32 #include "llvm/CodeGen/TargetSubtargetInfo.h"
33 #include "llvm/IR/DebugLoc.h"
34 #include "llvm/IR/GlobalValue.h"
35 #include "llvm/MC/MCInst.h"
36 #include "llvm/MC/MCInstrDesc.h"
37 #include "llvm/Support/Casting.h"
38 #include "llvm/Support/CodeGen.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/Compiler.h"
41 #include "llvm/Support/ErrorHandling.h"
42 #include "llvm/Support/MathExtras.h"
43 #include "llvm/Target/TargetMachine.h"
44 #include "llvm/Target/TargetOptions.h"
52 #define GET_INSTRINFO_CTOR_DTOR
53 #include "AArch64GenInstrInfo.inc"
55 static cl::opt
<unsigned> TBZDisplacementBits(
56 "aarch64-tbz-offset-bits", cl::Hidden
, cl::init(14),
57 cl::desc("Restrict range of TB[N]Z instructions (DEBUG)"));
59 static cl::opt
<unsigned> CBZDisplacementBits(
60 "aarch64-cbz-offset-bits", cl::Hidden
, cl::init(19),
61 cl::desc("Restrict range of CB[N]Z instructions (DEBUG)"));
63 static cl::opt
<unsigned>
64 BCCDisplacementBits("aarch64-bcc-offset-bits", cl::Hidden
, cl::init(19),
65 cl::desc("Restrict range of Bcc instructions (DEBUG)"));
67 AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget
&STI
)
68 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN
, AArch64::ADJCALLSTACKUP
,
70 RI(STI
.getTargetTriple()), Subtarget(STI
) {}
72 /// GetInstSize - Return the number of bytes of code the specified
73 /// instruction may be. This returns the maximum number of bytes.
74 unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr
&MI
) const {
75 const MachineBasicBlock
&MBB
= *MI
.getParent();
76 const MachineFunction
*MF
= MBB
.getParent();
77 const MCAsmInfo
*MAI
= MF
->getTarget().getMCAsmInfo();
80 auto Op
= MI
.getOpcode();
81 if (Op
== AArch64::INLINEASM
|| Op
== AArch64::INLINEASM_BR
)
82 return getInlineAsmLength(MI
.getOperand(0).getSymbolName(), *MAI
);
85 // FIXME: We currently only handle pseudoinstructions that don't get expanded
86 // before the assembly printer.
87 unsigned NumBytes
= 0;
88 const MCInstrDesc
&Desc
= MI
.getDesc();
89 switch (Desc
.getOpcode()) {
91 // Anything not explicitly designated otherwise is a normal 4-byte insn.
94 case TargetOpcode::DBG_VALUE
:
95 case TargetOpcode::EH_LABEL
:
96 case TargetOpcode::IMPLICIT_DEF
:
97 case TargetOpcode::KILL
:
100 case TargetOpcode::STACKMAP
:
101 // The upper bound for a stackmap intrinsic is the full length of its shadow
102 NumBytes
= StackMapOpers(&MI
).getNumPatchBytes();
103 assert(NumBytes
% 4 == 0 && "Invalid number of NOP bytes requested!");
105 case TargetOpcode::PATCHPOINT
:
106 // The size of the patchpoint intrinsic is the number of bytes requested
107 NumBytes
= PatchPointOpers(&MI
).getNumPatchBytes();
108 assert(NumBytes
% 4 == 0 && "Invalid number of NOP bytes requested!");
110 case AArch64::TLSDESC_CALLSEQ
:
111 // This gets lowered to an instruction sequence which takes 16 bytes
114 case AArch64::JumpTableDest32
:
115 case AArch64::JumpTableDest16
:
116 case AArch64::JumpTableDest8
:
120 NumBytes
= MI
.getOperand(1).getImm();
127 static void parseCondBranch(MachineInstr
*LastInst
, MachineBasicBlock
*&Target
,
128 SmallVectorImpl
<MachineOperand
> &Cond
) {
129 // Block ends with fall-through condbranch.
130 switch (LastInst
->getOpcode()) {
132 llvm_unreachable("Unknown branch instruction?");
134 Target
= LastInst
->getOperand(1).getMBB();
135 Cond
.push_back(LastInst
->getOperand(0));
141 Target
= LastInst
->getOperand(1).getMBB();
142 Cond
.push_back(MachineOperand::CreateImm(-1));
143 Cond
.push_back(MachineOperand::CreateImm(LastInst
->getOpcode()));
144 Cond
.push_back(LastInst
->getOperand(0));
150 Target
= LastInst
->getOperand(2).getMBB();
151 Cond
.push_back(MachineOperand::CreateImm(-1));
152 Cond
.push_back(MachineOperand::CreateImm(LastInst
->getOpcode()));
153 Cond
.push_back(LastInst
->getOperand(0));
154 Cond
.push_back(LastInst
->getOperand(1));
158 static unsigned getBranchDisplacementBits(unsigned Opc
) {
161 llvm_unreachable("unexpected opcode!");
168 return TBZDisplacementBits
;
173 return CBZDisplacementBits
;
175 return BCCDisplacementBits
;
179 bool AArch64InstrInfo::isBranchOffsetInRange(unsigned BranchOp
,
180 int64_t BrOffset
) const {
181 unsigned Bits
= getBranchDisplacementBits(BranchOp
);
182 assert(Bits
>= 3 && "max branch displacement must be enough to jump"
183 "over conditional branch expansion");
184 return isIntN(Bits
, BrOffset
/ 4);
188 AArch64InstrInfo::getBranchDestBlock(const MachineInstr
&MI
) const {
189 switch (MI
.getOpcode()) {
191 llvm_unreachable("unexpected opcode!");
193 return MI
.getOperand(0).getMBB();
198 return MI
.getOperand(2).getMBB();
204 return MI
.getOperand(1).getMBB();
209 bool AArch64InstrInfo::analyzeBranch(MachineBasicBlock
&MBB
,
210 MachineBasicBlock
*&TBB
,
211 MachineBasicBlock
*&FBB
,
212 SmallVectorImpl
<MachineOperand
> &Cond
,
213 bool AllowModify
) const {
214 // If the block has no terminators, it just falls into the block after it.
215 MachineBasicBlock::iterator I
= MBB
.getLastNonDebugInstr();
219 if (!isUnpredicatedTerminator(*I
))
222 // Get the last instruction in the block.
223 MachineInstr
*LastInst
= &*I
;
225 // If there is only one terminator instruction, process it.
226 unsigned LastOpc
= LastInst
->getOpcode();
227 if (I
== MBB
.begin() || !isUnpredicatedTerminator(*--I
)) {
228 if (isUncondBranchOpcode(LastOpc
)) {
229 TBB
= LastInst
->getOperand(0).getMBB();
232 if (isCondBranchOpcode(LastOpc
)) {
233 // Block ends with fall-through condbranch.
234 parseCondBranch(LastInst
, TBB
, Cond
);
237 return true; // Can't handle indirect branch.
240 // Get the instruction before it if it is a terminator.
241 MachineInstr
*SecondLastInst
= &*I
;
242 unsigned SecondLastOpc
= SecondLastInst
->getOpcode();
244 // If AllowModify is true and the block ends with two or more unconditional
245 // branches, delete all but the first unconditional branch.
246 if (AllowModify
&& isUncondBranchOpcode(LastOpc
)) {
247 while (isUncondBranchOpcode(SecondLastOpc
)) {
248 LastInst
->eraseFromParent();
249 LastInst
= SecondLastInst
;
250 LastOpc
= LastInst
->getOpcode();
251 if (I
== MBB
.begin() || !isUnpredicatedTerminator(*--I
)) {
252 // Return now the only terminator is an unconditional branch.
253 TBB
= LastInst
->getOperand(0).getMBB();
256 SecondLastInst
= &*I
;
257 SecondLastOpc
= SecondLastInst
->getOpcode();
262 // If there are three terminators, we don't know what sort of block this is.
263 if (SecondLastInst
&& I
!= MBB
.begin() && isUnpredicatedTerminator(*--I
))
266 // If the block ends with a B and a Bcc, handle it.
267 if (isCondBranchOpcode(SecondLastOpc
) && isUncondBranchOpcode(LastOpc
)) {
268 parseCondBranch(SecondLastInst
, TBB
, Cond
);
269 FBB
= LastInst
->getOperand(0).getMBB();
273 // If the block ends with two unconditional branches, handle it. The second
274 // one is not executed, so remove it.
275 if (isUncondBranchOpcode(SecondLastOpc
) && isUncondBranchOpcode(LastOpc
)) {
276 TBB
= SecondLastInst
->getOperand(0).getMBB();
279 I
->eraseFromParent();
283 // ...likewise if it ends with an indirect branch followed by an unconditional
285 if (isIndirectBranchOpcode(SecondLastOpc
) && isUncondBranchOpcode(LastOpc
)) {
288 I
->eraseFromParent();
292 // Otherwise, can't handle this.
296 bool AArch64InstrInfo::reverseBranchCondition(
297 SmallVectorImpl
<MachineOperand
> &Cond
) const {
298 if (Cond
[0].getImm() != -1) {
300 AArch64CC::CondCode CC
= (AArch64CC::CondCode
)(int)Cond
[0].getImm();
301 Cond
[0].setImm(AArch64CC::getInvertedCondCode(CC
));
303 // Folded compare-and-branch
304 switch (Cond
[1].getImm()) {
306 llvm_unreachable("Unknown conditional branch!");
308 Cond
[1].setImm(AArch64::CBNZW
);
311 Cond
[1].setImm(AArch64::CBZW
);
314 Cond
[1].setImm(AArch64::CBNZX
);
317 Cond
[1].setImm(AArch64::CBZX
);
320 Cond
[1].setImm(AArch64::TBNZW
);
323 Cond
[1].setImm(AArch64::TBZW
);
326 Cond
[1].setImm(AArch64::TBNZX
);
329 Cond
[1].setImm(AArch64::TBZX
);
337 unsigned AArch64InstrInfo::removeBranch(MachineBasicBlock
&MBB
,
338 int *BytesRemoved
) const {
339 MachineBasicBlock::iterator I
= MBB
.getLastNonDebugInstr();
343 if (!isUncondBranchOpcode(I
->getOpcode()) &&
344 !isCondBranchOpcode(I
->getOpcode()))
347 // Remove the branch.
348 I
->eraseFromParent();
352 if (I
== MBB
.begin()) {
358 if (!isCondBranchOpcode(I
->getOpcode())) {
364 // Remove the branch.
365 I
->eraseFromParent();
372 void AArch64InstrInfo::instantiateCondBranch(
373 MachineBasicBlock
&MBB
, const DebugLoc
&DL
, MachineBasicBlock
*TBB
,
374 ArrayRef
<MachineOperand
> Cond
) const {
375 if (Cond
[0].getImm() != -1) {
377 BuildMI(&MBB
, DL
, get(AArch64::Bcc
)).addImm(Cond
[0].getImm()).addMBB(TBB
);
379 // Folded compare-and-branch
380 // Note that we use addOperand instead of addReg to keep the flags.
381 const MachineInstrBuilder MIB
=
382 BuildMI(&MBB
, DL
, get(Cond
[1].getImm())).add(Cond
[2]);
384 MIB
.addImm(Cond
[3].getImm());
389 unsigned AArch64InstrInfo::insertBranch(
390 MachineBasicBlock
&MBB
, MachineBasicBlock
*TBB
, MachineBasicBlock
*FBB
,
391 ArrayRef
<MachineOperand
> Cond
, const DebugLoc
&DL
, int *BytesAdded
) const {
392 // Shouldn't be a fall through.
393 assert(TBB
&& "insertBranch must not be told to insert a fallthrough");
396 if (Cond
.empty()) // Unconditional branch?
397 BuildMI(&MBB
, DL
, get(AArch64::B
)).addMBB(TBB
);
399 instantiateCondBranch(MBB
, DL
, TBB
, Cond
);
407 // Two-way conditional branch.
408 instantiateCondBranch(MBB
, DL
, TBB
, Cond
);
409 BuildMI(&MBB
, DL
, get(AArch64::B
)).addMBB(FBB
);
417 // Find the original register that VReg is copied from.
418 static unsigned removeCopies(const MachineRegisterInfo
&MRI
, unsigned VReg
) {
419 while (TargetRegisterInfo::isVirtualRegister(VReg
)) {
420 const MachineInstr
*DefMI
= MRI
.getVRegDef(VReg
);
421 if (!DefMI
->isFullCopy())
423 VReg
= DefMI
->getOperand(1).getReg();
428 // Determine if VReg is defined by an instruction that can be folded into a
429 // csel instruction. If so, return the folded opcode, and the replacement
431 static unsigned canFoldIntoCSel(const MachineRegisterInfo
&MRI
, unsigned VReg
,
432 unsigned *NewVReg
= nullptr) {
433 VReg
= removeCopies(MRI
, VReg
);
434 if (!TargetRegisterInfo::isVirtualRegister(VReg
))
437 bool Is64Bit
= AArch64::GPR64allRegClass
.hasSubClassEq(MRI
.getRegClass(VReg
));
438 const MachineInstr
*DefMI
= MRI
.getVRegDef(VReg
);
440 unsigned SrcOpNum
= 0;
441 switch (DefMI
->getOpcode()) {
442 case AArch64::ADDSXri
:
443 case AArch64::ADDSWri
:
444 // if NZCV is used, do not fold.
445 if (DefMI
->findRegisterDefOperandIdx(AArch64::NZCV
, true) == -1)
447 // fall-through to ADDXri and ADDWri.
449 case AArch64::ADDXri
:
450 case AArch64::ADDWri
:
451 // add x, 1 -> csinc.
452 if (!DefMI
->getOperand(2).isImm() || DefMI
->getOperand(2).getImm() != 1 ||
453 DefMI
->getOperand(3).getImm() != 0)
456 Opc
= Is64Bit
? AArch64::CSINCXr
: AArch64::CSINCWr
;
459 case AArch64::ORNXrr
:
460 case AArch64::ORNWrr
: {
461 // not x -> csinv, represented as orn dst, xzr, src.
462 unsigned ZReg
= removeCopies(MRI
, DefMI
->getOperand(1).getReg());
463 if (ZReg
!= AArch64::XZR
&& ZReg
!= AArch64::WZR
)
466 Opc
= Is64Bit
? AArch64::CSINVXr
: AArch64::CSINVWr
;
470 case AArch64::SUBSXrr
:
471 case AArch64::SUBSWrr
:
472 // if NZCV is used, do not fold.
473 if (DefMI
->findRegisterDefOperandIdx(AArch64::NZCV
, true) == -1)
475 // fall-through to SUBXrr and SUBWrr.
477 case AArch64::SUBXrr
:
478 case AArch64::SUBWrr
: {
479 // neg x -> csneg, represented as sub dst, xzr, src.
480 unsigned ZReg
= removeCopies(MRI
, DefMI
->getOperand(1).getReg());
481 if (ZReg
!= AArch64::XZR
&& ZReg
!= AArch64::WZR
)
484 Opc
= Is64Bit
? AArch64::CSNEGXr
: AArch64::CSNEGWr
;
490 assert(Opc
&& SrcOpNum
&& "Missing parameters");
493 *NewVReg
= DefMI
->getOperand(SrcOpNum
).getReg();
497 bool AArch64InstrInfo::canInsertSelect(const MachineBasicBlock
&MBB
,
498 ArrayRef
<MachineOperand
> Cond
,
499 unsigned TrueReg
, unsigned FalseReg
,
500 int &CondCycles
, int &TrueCycles
,
501 int &FalseCycles
) const {
502 // Check register classes.
503 const MachineRegisterInfo
&MRI
= MBB
.getParent()->getRegInfo();
504 const TargetRegisterClass
*RC
=
505 RI
.getCommonSubClass(MRI
.getRegClass(TrueReg
), MRI
.getRegClass(FalseReg
));
509 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
510 unsigned ExtraCondLat
= Cond
.size() != 1;
512 // GPRs are handled by csel.
513 // FIXME: Fold in x+1, -x, and ~x when applicable.
514 if (AArch64::GPR64allRegClass
.hasSubClassEq(RC
) ||
515 AArch64::GPR32allRegClass
.hasSubClassEq(RC
)) {
516 // Single-cycle csel, csinc, csinv, and csneg.
517 CondCycles
= 1 + ExtraCondLat
;
518 TrueCycles
= FalseCycles
= 1;
519 if (canFoldIntoCSel(MRI
, TrueReg
))
521 else if (canFoldIntoCSel(MRI
, FalseReg
))
526 // Scalar floating point is handled by fcsel.
527 // FIXME: Form fabs, fmin, and fmax when applicable.
528 if (AArch64::FPR64RegClass
.hasSubClassEq(RC
) ||
529 AArch64::FPR32RegClass
.hasSubClassEq(RC
)) {
530 CondCycles
= 5 + ExtraCondLat
;
531 TrueCycles
= FalseCycles
= 2;
539 void AArch64InstrInfo::insertSelect(MachineBasicBlock
&MBB
,
540 MachineBasicBlock::iterator I
,
541 const DebugLoc
&DL
, unsigned DstReg
,
542 ArrayRef
<MachineOperand
> Cond
,
543 unsigned TrueReg
, unsigned FalseReg
) const {
544 MachineRegisterInfo
&MRI
= MBB
.getParent()->getRegInfo();
546 // Parse the condition code, see parseCondBranch() above.
547 AArch64CC::CondCode CC
;
548 switch (Cond
.size()) {
550 llvm_unreachable("Unknown condition opcode in Cond");
552 CC
= AArch64CC::CondCode(Cond
[0].getImm());
554 case 3: { // cbz/cbnz
555 // We must insert a compare against 0.
557 switch (Cond
[1].getImm()) {
559 llvm_unreachable("Unknown branch opcode in Cond");
577 unsigned SrcReg
= Cond
[2].getReg();
579 // cmp reg, #0 is actually subs xzr, reg, #0.
580 MRI
.constrainRegClass(SrcReg
, &AArch64::GPR64spRegClass
);
581 BuildMI(MBB
, I
, DL
, get(AArch64::SUBSXri
), AArch64::XZR
)
586 MRI
.constrainRegClass(SrcReg
, &AArch64::GPR32spRegClass
);
587 BuildMI(MBB
, I
, DL
, get(AArch64::SUBSWri
), AArch64::WZR
)
594 case 4: { // tbz/tbnz
595 // We must insert a tst instruction.
596 switch (Cond
[1].getImm()) {
598 llvm_unreachable("Unknown branch opcode in Cond");
608 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
609 if (Cond
[1].getImm() == AArch64::TBZW
|| Cond
[1].getImm() == AArch64::TBNZW
)
610 BuildMI(MBB
, I
, DL
, get(AArch64::ANDSWri
), AArch64::WZR
)
611 .addReg(Cond
[2].getReg())
613 AArch64_AM::encodeLogicalImmediate(1ull << Cond
[3].getImm(), 32));
615 BuildMI(MBB
, I
, DL
, get(AArch64::ANDSXri
), AArch64::XZR
)
616 .addReg(Cond
[2].getReg())
618 AArch64_AM::encodeLogicalImmediate(1ull << Cond
[3].getImm(), 64));
624 const TargetRegisterClass
*RC
= nullptr;
625 bool TryFold
= false;
626 if (MRI
.constrainRegClass(DstReg
, &AArch64::GPR64RegClass
)) {
627 RC
= &AArch64::GPR64RegClass
;
628 Opc
= AArch64::CSELXr
;
630 } else if (MRI
.constrainRegClass(DstReg
, &AArch64::GPR32RegClass
)) {
631 RC
= &AArch64::GPR32RegClass
;
632 Opc
= AArch64::CSELWr
;
634 } else if (MRI
.constrainRegClass(DstReg
, &AArch64::FPR64RegClass
)) {
635 RC
= &AArch64::FPR64RegClass
;
636 Opc
= AArch64::FCSELDrrr
;
637 } else if (MRI
.constrainRegClass(DstReg
, &AArch64::FPR32RegClass
)) {
638 RC
= &AArch64::FPR32RegClass
;
639 Opc
= AArch64::FCSELSrrr
;
641 assert(RC
&& "Unsupported regclass");
643 // Try folding simple instructions into the csel.
645 unsigned NewVReg
= 0;
646 unsigned FoldedOpc
= canFoldIntoCSel(MRI
, TrueReg
, &NewVReg
);
648 // The folded opcodes csinc, csinc and csneg apply the operation to
649 // FalseReg, so we need to invert the condition.
650 CC
= AArch64CC::getInvertedCondCode(CC
);
653 FoldedOpc
= canFoldIntoCSel(MRI
, FalseReg
, &NewVReg
);
655 // Fold the operation. Leave any dead instructions for DCE to clean up.
659 // The extends the live range of NewVReg.
660 MRI
.clearKillFlags(NewVReg
);
664 // Pull all virtual register into the appropriate class.
665 MRI
.constrainRegClass(TrueReg
, RC
);
666 MRI
.constrainRegClass(FalseReg
, RC
);
669 BuildMI(MBB
, I
, DL
, get(Opc
), DstReg
)
675 /// Returns true if a MOVi32imm or MOVi64imm can be expanded to an ORRxx.
676 static bool canBeExpandedToORR(const MachineInstr
&MI
, unsigned BitSize
) {
677 uint64_t Imm
= MI
.getOperand(1).getImm();
678 uint64_t UImm
= Imm
<< (64 - BitSize
) >> (64 - BitSize
);
680 return AArch64_AM::processLogicalImmediate(UImm
, BitSize
, Encoding
);
683 // FIXME: this implementation should be micro-architecture dependent, so a
684 // micro-architecture target hook should be introduced here in future.
685 bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr
&MI
) const {
686 if (!Subtarget
.hasCustomCheapAsMoveHandling())
687 return MI
.isAsCheapAsAMove();
689 const unsigned Opcode
= MI
.getOpcode();
691 // Firstly, check cases gated by features.
693 if (Subtarget
.hasZeroCycleZeroingFP()) {
694 if (Opcode
== AArch64::FMOVH0
||
695 Opcode
== AArch64::FMOVS0
||
696 Opcode
== AArch64::FMOVD0
)
700 if (Subtarget
.hasZeroCycleZeroingGP()) {
701 if (Opcode
== TargetOpcode::COPY
&&
702 (MI
.getOperand(1).getReg() == AArch64::WZR
||
703 MI
.getOperand(1).getReg() == AArch64::XZR
))
707 // Secondly, check cases specific to sub-targets.
709 if (Subtarget
.hasExynosCheapAsMoveHandling()) {
710 if (isExynosCheapAsMove(MI
))
713 return MI
.isAsCheapAsAMove();
716 // Finally, check generic cases.
722 // add/sub on register without shift
723 case AArch64::ADDWri
:
724 case AArch64::ADDXri
:
725 case AArch64::SUBWri
:
726 case AArch64::SUBXri
:
727 return (MI
.getOperand(3).getImm() == 0);
729 // logical ops on immediate
730 case AArch64::ANDWri
:
731 case AArch64::ANDXri
:
732 case AArch64::EORWri
:
733 case AArch64::EORXri
:
734 case AArch64::ORRWri
:
735 case AArch64::ORRXri
:
738 // logical ops on register without shift
739 case AArch64::ANDWrr
:
740 case AArch64::ANDXrr
:
741 case AArch64::BICWrr
:
742 case AArch64::BICXrr
:
743 case AArch64::EONWrr
:
744 case AArch64::EONXrr
:
745 case AArch64::EORWrr
:
746 case AArch64::EORXrr
:
747 case AArch64::ORNWrr
:
748 case AArch64::ORNXrr
:
749 case AArch64::ORRWrr
:
750 case AArch64::ORRXrr
:
753 // If MOVi32imm or MOVi64imm can be expanded into ORRWri or
754 // ORRXri, it is as cheap as MOV
755 case AArch64::MOVi32imm
:
756 return canBeExpandedToORR(MI
, 32);
757 case AArch64::MOVi64imm
:
758 return canBeExpandedToORR(MI
, 64);
761 llvm_unreachable("Unknown opcode to check as cheap as a move!");
764 bool AArch64InstrInfo::isFalkorShiftExtFast(const MachineInstr
&MI
) {
765 switch (MI
.getOpcode()) {
769 case AArch64::ADDWrs
:
770 case AArch64::ADDXrs
:
771 case AArch64::ADDSWrs
:
772 case AArch64::ADDSXrs
: {
773 unsigned Imm
= MI
.getOperand(3).getImm();
774 unsigned ShiftVal
= AArch64_AM::getShiftValue(Imm
);
777 return AArch64_AM::getShiftType(Imm
) == AArch64_AM::LSL
&& ShiftVal
<= 5;
780 case AArch64::ADDWrx
:
781 case AArch64::ADDXrx
:
782 case AArch64::ADDXrx64
:
783 case AArch64::ADDSWrx
:
784 case AArch64::ADDSXrx
:
785 case AArch64::ADDSXrx64
: {
786 unsigned Imm
= MI
.getOperand(3).getImm();
787 switch (AArch64_AM::getArithExtendType(Imm
)) {
790 case AArch64_AM::UXTB
:
791 case AArch64_AM::UXTH
:
792 case AArch64_AM::UXTW
:
793 case AArch64_AM::UXTX
:
794 return AArch64_AM::getArithShiftValue(Imm
) <= 4;
798 case AArch64::SUBWrs
:
799 case AArch64::SUBSWrs
: {
800 unsigned Imm
= MI
.getOperand(3).getImm();
801 unsigned ShiftVal
= AArch64_AM::getShiftValue(Imm
);
802 return ShiftVal
== 0 ||
803 (AArch64_AM::getShiftType(Imm
) == AArch64_AM::ASR
&& ShiftVal
== 31);
806 case AArch64::SUBXrs
:
807 case AArch64::SUBSXrs
: {
808 unsigned Imm
= MI
.getOperand(3).getImm();
809 unsigned ShiftVal
= AArch64_AM::getShiftValue(Imm
);
810 return ShiftVal
== 0 ||
811 (AArch64_AM::getShiftType(Imm
) == AArch64_AM::ASR
&& ShiftVal
== 63);
814 case AArch64::SUBWrx
:
815 case AArch64::SUBXrx
:
816 case AArch64::SUBXrx64
:
817 case AArch64::SUBSWrx
:
818 case AArch64::SUBSXrx
:
819 case AArch64::SUBSXrx64
: {
820 unsigned Imm
= MI
.getOperand(3).getImm();
821 switch (AArch64_AM::getArithExtendType(Imm
)) {
824 case AArch64_AM::UXTB
:
825 case AArch64_AM::UXTH
:
826 case AArch64_AM::UXTW
:
827 case AArch64_AM::UXTX
:
828 return AArch64_AM::getArithShiftValue(Imm
) == 0;
832 case AArch64::LDRBBroW
:
833 case AArch64::LDRBBroX
:
834 case AArch64::LDRBroW
:
835 case AArch64::LDRBroX
:
836 case AArch64::LDRDroW
:
837 case AArch64::LDRDroX
:
838 case AArch64::LDRHHroW
:
839 case AArch64::LDRHHroX
:
840 case AArch64::LDRHroW
:
841 case AArch64::LDRHroX
:
842 case AArch64::LDRQroW
:
843 case AArch64::LDRQroX
:
844 case AArch64::LDRSBWroW
:
845 case AArch64::LDRSBWroX
:
846 case AArch64::LDRSBXroW
:
847 case AArch64::LDRSBXroX
:
848 case AArch64::LDRSHWroW
:
849 case AArch64::LDRSHWroX
:
850 case AArch64::LDRSHXroW
:
851 case AArch64::LDRSHXroX
:
852 case AArch64::LDRSWroW
:
853 case AArch64::LDRSWroX
:
854 case AArch64::LDRSroW
:
855 case AArch64::LDRSroX
:
856 case AArch64::LDRWroW
:
857 case AArch64::LDRWroX
:
858 case AArch64::LDRXroW
:
859 case AArch64::LDRXroX
:
860 case AArch64::PRFMroW
:
861 case AArch64::PRFMroX
:
862 case AArch64::STRBBroW
:
863 case AArch64::STRBBroX
:
864 case AArch64::STRBroW
:
865 case AArch64::STRBroX
:
866 case AArch64::STRDroW
:
867 case AArch64::STRDroX
:
868 case AArch64::STRHHroW
:
869 case AArch64::STRHHroX
:
870 case AArch64::STRHroW
:
871 case AArch64::STRHroX
:
872 case AArch64::STRQroW
:
873 case AArch64::STRQroX
:
874 case AArch64::STRSroW
:
875 case AArch64::STRSroX
:
876 case AArch64::STRWroW
:
877 case AArch64::STRWroX
:
878 case AArch64::STRXroW
:
879 case AArch64::STRXroX
: {
880 unsigned IsSigned
= MI
.getOperand(3).getImm();
886 bool AArch64InstrInfo::isSEHInstruction(const MachineInstr
&MI
) {
887 unsigned Opc
= MI
.getOpcode();
891 case AArch64::SEH_StackAlloc
:
892 case AArch64::SEH_SaveFPLR
:
893 case AArch64::SEH_SaveFPLR_X
:
894 case AArch64::SEH_SaveReg
:
895 case AArch64::SEH_SaveReg_X
:
896 case AArch64::SEH_SaveRegP
:
897 case AArch64::SEH_SaveRegP_X
:
898 case AArch64::SEH_SaveFReg
:
899 case AArch64::SEH_SaveFReg_X
:
900 case AArch64::SEH_SaveFRegP
:
901 case AArch64::SEH_SaveFRegP_X
:
902 case AArch64::SEH_SetFP
:
903 case AArch64::SEH_AddFP
:
904 case AArch64::SEH_Nop
:
905 case AArch64::SEH_PrologEnd
:
906 case AArch64::SEH_EpilogStart
:
907 case AArch64::SEH_EpilogEnd
:
912 bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr
&MI
,
913 unsigned &SrcReg
, unsigned &DstReg
,
914 unsigned &SubIdx
) const {
915 switch (MI
.getOpcode()) {
918 case AArch64::SBFMXri
: // aka sxtw
919 case AArch64::UBFMXri
: // aka uxtw
920 // Check for the 32 -> 64 bit extension case, these instructions can do
922 if (MI
.getOperand(2).getImm() != 0 || MI
.getOperand(3).getImm() != 31)
924 // This is a signed or unsigned 32 -> 64 bit extension.
925 SrcReg
= MI
.getOperand(1).getReg();
926 DstReg
= MI
.getOperand(0).getReg();
927 SubIdx
= AArch64::sub_32
;
932 bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint(
933 const MachineInstr
&MIa
, const MachineInstr
&MIb
, AliasAnalysis
*AA
) const {
934 const TargetRegisterInfo
*TRI
= &getRegisterInfo();
935 const MachineOperand
*BaseOpA
= nullptr, *BaseOpB
= nullptr;
936 int64_t OffsetA
= 0, OffsetB
= 0;
937 unsigned WidthA
= 0, WidthB
= 0;
939 assert(MIa
.mayLoadOrStore() && "MIa must be a load or store.");
940 assert(MIb
.mayLoadOrStore() && "MIb must be a load or store.");
942 if (MIa
.hasUnmodeledSideEffects() || MIb
.hasUnmodeledSideEffects() ||
943 MIa
.hasOrderedMemoryRef() || MIb
.hasOrderedMemoryRef())
946 // Retrieve the base, offset from the base and width. Width
947 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If
948 // base are identical, and the offset of a lower memory access +
949 // the width doesn't overlap the offset of a higher memory access,
950 // then the memory accesses are different.
951 if (getMemOperandWithOffsetWidth(MIa
, BaseOpA
, OffsetA
, WidthA
, TRI
) &&
952 getMemOperandWithOffsetWidth(MIb
, BaseOpB
, OffsetB
, WidthB
, TRI
)) {
953 if (BaseOpA
->isIdenticalTo(*BaseOpB
)) {
954 int LowOffset
= OffsetA
< OffsetB
? OffsetA
: OffsetB
;
955 int HighOffset
= OffsetA
< OffsetB
? OffsetB
: OffsetA
;
956 int LowWidth
= (LowOffset
== OffsetA
) ? WidthA
: WidthB
;
957 if (LowOffset
+ LowWidth
<= HighOffset
)
964 bool AArch64InstrInfo::isSchedulingBoundary(const MachineInstr
&MI
,
965 const MachineBasicBlock
*MBB
,
966 const MachineFunction
&MF
) const {
967 if (TargetInstrInfo::isSchedulingBoundary(MI
, MBB
, MF
))
969 switch (MI
.getOpcode()) {
971 // CSDB hints are scheduling barriers.
972 if (MI
.getOperand(0).getImm() == 0x14)
977 // DSB and ISB also are scheduling barriers.
981 return isSEHInstruction(MI
);
984 /// analyzeCompare - For a comparison instruction, return the source registers
985 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
986 /// Return true if the comparison instruction can be analyzed.
987 bool AArch64InstrInfo::analyzeCompare(const MachineInstr
&MI
, unsigned &SrcReg
,
988 unsigned &SrcReg2
, int &CmpMask
,
989 int &CmpValue
) const {
990 // The first operand can be a frame index where we'd normally expect a
992 assert(MI
.getNumOperands() >= 2 && "All AArch64 cmps should have 2 operands");
993 if (!MI
.getOperand(1).isReg())
996 switch (MI
.getOpcode()) {
999 case AArch64::SUBSWrr
:
1000 case AArch64::SUBSWrs
:
1001 case AArch64::SUBSWrx
:
1002 case AArch64::SUBSXrr
:
1003 case AArch64::SUBSXrs
:
1004 case AArch64::SUBSXrx
:
1005 case AArch64::ADDSWrr
:
1006 case AArch64::ADDSWrs
:
1007 case AArch64::ADDSWrx
:
1008 case AArch64::ADDSXrr
:
1009 case AArch64::ADDSXrs
:
1010 case AArch64::ADDSXrx
:
1011 // Replace SUBSWrr with SUBWrr if NZCV is not used.
1012 SrcReg
= MI
.getOperand(1).getReg();
1013 SrcReg2
= MI
.getOperand(2).getReg();
1017 case AArch64::SUBSWri
:
1018 case AArch64::ADDSWri
:
1019 case AArch64::SUBSXri
:
1020 case AArch64::ADDSXri
:
1021 SrcReg
= MI
.getOperand(1).getReg();
1024 // FIXME: In order to convert CmpValue to 0 or 1
1025 CmpValue
= MI
.getOperand(2).getImm() != 0;
1027 case AArch64::ANDSWri
:
1028 case AArch64::ANDSXri
:
1029 // ANDS does not use the same encoding scheme as the others xxxS
1031 SrcReg
= MI
.getOperand(1).getReg();
1034 // FIXME:The return val type of decodeLogicalImmediate is uint64_t,
1035 // while the type of CmpValue is int. When converting uint64_t to int,
1036 // the high 32 bits of uint64_t will be lost.
1037 // In fact it causes a bug in spec2006-483.xalancbmk
1038 // CmpValue is only used to compare with zero in OptimizeCompareInstr
1039 CmpValue
= AArch64_AM::decodeLogicalImmediate(
1040 MI
.getOperand(2).getImm(),
1041 MI
.getOpcode() == AArch64::ANDSWri
? 32 : 64) != 0;
1048 static bool UpdateOperandRegClass(MachineInstr
&Instr
) {
1049 MachineBasicBlock
*MBB
= Instr
.getParent();
1050 assert(MBB
&& "Can't get MachineBasicBlock here");
1051 MachineFunction
*MF
= MBB
->getParent();
1052 assert(MF
&& "Can't get MachineFunction here");
1053 const TargetInstrInfo
*TII
= MF
->getSubtarget().getInstrInfo();
1054 const TargetRegisterInfo
*TRI
= MF
->getSubtarget().getRegisterInfo();
1055 MachineRegisterInfo
*MRI
= &MF
->getRegInfo();
1057 for (unsigned OpIdx
= 0, EndIdx
= Instr
.getNumOperands(); OpIdx
< EndIdx
;
1059 MachineOperand
&MO
= Instr
.getOperand(OpIdx
);
1060 const TargetRegisterClass
*OpRegCstraints
=
1061 Instr
.getRegClassConstraint(OpIdx
, TII
, TRI
);
1063 // If there's no constraint, there's nothing to do.
1064 if (!OpRegCstraints
)
1066 // If the operand is a frame index, there's nothing to do here.
1067 // A frame index operand will resolve correctly during PEI.
1071 assert(MO
.isReg() &&
1072 "Operand has register constraints without being a register!");
1074 unsigned Reg
= MO
.getReg();
1075 if (TargetRegisterInfo::isPhysicalRegister(Reg
)) {
1076 if (!OpRegCstraints
->contains(Reg
))
1078 } else if (!OpRegCstraints
->hasSubClassEq(MRI
->getRegClass(Reg
)) &&
1079 !MRI
->constrainRegClass(Reg
, OpRegCstraints
))
1086 /// Return the opcode that does not set flags when possible - otherwise
1087 /// return the original opcode. The caller is responsible to do the actual
1088 /// substitution and legality checking.
1089 static unsigned convertToNonFlagSettingOpc(const MachineInstr
&MI
) {
1090 // Don't convert all compare instructions, because for some the zero register
1091 // encoding becomes the sp register.
1092 bool MIDefinesZeroReg
= false;
1093 if (MI
.definesRegister(AArch64::WZR
) || MI
.definesRegister(AArch64::XZR
))
1094 MIDefinesZeroReg
= true;
1096 switch (MI
.getOpcode()) {
1098 return MI
.getOpcode();
1099 case AArch64::ADDSWrr
:
1100 return AArch64::ADDWrr
;
1101 case AArch64::ADDSWri
:
1102 return MIDefinesZeroReg
? AArch64::ADDSWri
: AArch64::ADDWri
;
1103 case AArch64::ADDSWrs
:
1104 return MIDefinesZeroReg
? AArch64::ADDSWrs
: AArch64::ADDWrs
;
1105 case AArch64::ADDSWrx
:
1106 return AArch64::ADDWrx
;
1107 case AArch64::ADDSXrr
:
1108 return AArch64::ADDXrr
;
1109 case AArch64::ADDSXri
:
1110 return MIDefinesZeroReg
? AArch64::ADDSXri
: AArch64::ADDXri
;
1111 case AArch64::ADDSXrs
:
1112 return MIDefinesZeroReg
? AArch64::ADDSXrs
: AArch64::ADDXrs
;
1113 case AArch64::ADDSXrx
:
1114 return AArch64::ADDXrx
;
1115 case AArch64::SUBSWrr
:
1116 return AArch64::SUBWrr
;
1117 case AArch64::SUBSWri
:
1118 return MIDefinesZeroReg
? AArch64::SUBSWri
: AArch64::SUBWri
;
1119 case AArch64::SUBSWrs
:
1120 return MIDefinesZeroReg
? AArch64::SUBSWrs
: AArch64::SUBWrs
;
1121 case AArch64::SUBSWrx
:
1122 return AArch64::SUBWrx
;
1123 case AArch64::SUBSXrr
:
1124 return AArch64::SUBXrr
;
1125 case AArch64::SUBSXri
:
1126 return MIDefinesZeroReg
? AArch64::SUBSXri
: AArch64::SUBXri
;
1127 case AArch64::SUBSXrs
:
1128 return MIDefinesZeroReg
? AArch64::SUBSXrs
: AArch64::SUBXrs
;
1129 case AArch64::SUBSXrx
:
1130 return AArch64::SUBXrx
;
1134 enum AccessKind
{ AK_Write
= 0x01, AK_Read
= 0x10, AK_All
= 0x11 };
1136 /// True when condition flags are accessed (either by writing or reading)
1137 /// on the instruction trace starting at From and ending at To.
1139 /// Note: If From and To are from different blocks it's assumed CC are accessed
1141 static bool areCFlagsAccessedBetweenInstrs(
1142 MachineBasicBlock::iterator From
, MachineBasicBlock::iterator To
,
1143 const TargetRegisterInfo
*TRI
, const AccessKind AccessToCheck
= AK_All
) {
1144 // Early exit if To is at the beginning of the BB.
1145 if (To
== To
->getParent()->begin())
1148 // Check whether the instructions are in the same basic block
1149 // If not, assume the condition flags might get modified somewhere.
1150 if (To
->getParent() != From
->getParent())
1153 // From must be above To.
1154 assert(std::find_if(++To
.getReverse(), To
->getParent()->rend(),
1155 [From
](MachineInstr
&MI
) {
1156 return MI
.getIterator() == From
;
1157 }) != To
->getParent()->rend());
1159 // We iterate backward starting \p To until we hit \p From.
1160 for (--To
; To
!= From
; --To
) {
1161 const MachineInstr
&Instr
= *To
;
1163 if (((AccessToCheck
& AK_Write
) &&
1164 Instr
.modifiesRegister(AArch64::NZCV
, TRI
)) ||
1165 ((AccessToCheck
& AK_Read
) && Instr
.readsRegister(AArch64::NZCV
, TRI
)))
1171 /// Try to optimize a compare instruction. A compare instruction is an
1172 /// instruction which produces AArch64::NZCV. It can be truly compare
1174 /// when there are no uses of its destination register.
1176 /// The following steps are tried in order:
1177 /// 1. Convert CmpInstr into an unconditional version.
1178 /// 2. Remove CmpInstr if above there is an instruction producing a needed
1179 /// condition code or an instruction which can be converted into such an
1181 /// Only comparison with zero is supported.
1182 bool AArch64InstrInfo::optimizeCompareInstr(
1183 MachineInstr
&CmpInstr
, unsigned SrcReg
, unsigned SrcReg2
, int CmpMask
,
1184 int CmpValue
, const MachineRegisterInfo
*MRI
) const {
1185 assert(CmpInstr
.getParent());
1188 // Replace SUBSWrr with SUBWrr if NZCV is not used.
1189 int DeadNZCVIdx
= CmpInstr
.findRegisterDefOperandIdx(AArch64::NZCV
, true);
1190 if (DeadNZCVIdx
!= -1) {
1191 if (CmpInstr
.definesRegister(AArch64::WZR
) ||
1192 CmpInstr
.definesRegister(AArch64::XZR
)) {
1193 CmpInstr
.eraseFromParent();
1196 unsigned Opc
= CmpInstr
.getOpcode();
1197 unsigned NewOpc
= convertToNonFlagSettingOpc(CmpInstr
);
1200 const MCInstrDesc
&MCID
= get(NewOpc
);
1201 CmpInstr
.setDesc(MCID
);
1202 CmpInstr
.RemoveOperand(DeadNZCVIdx
);
1203 bool succeeded
= UpdateOperandRegClass(CmpInstr
);
1205 assert(succeeded
&& "Some operands reg class are incompatible!");
1209 // Continue only if we have a "ri" where immediate is zero.
1210 // FIXME:CmpValue has already been converted to 0 or 1 in analyzeCompare
1212 assert((CmpValue
== 0 || CmpValue
== 1) && "CmpValue must be 0 or 1!");
1213 if (CmpValue
!= 0 || SrcReg2
!= 0)
1216 // CmpInstr is a Compare instruction if destination register is not used.
1217 if (!MRI
->use_nodbg_empty(CmpInstr
.getOperand(0).getReg()))
1220 return substituteCmpToZero(CmpInstr
, SrcReg
, MRI
);
1223 /// Get opcode of S version of Instr.
1224 /// If Instr is S version its opcode is returned.
1225 /// AArch64::INSTRUCTION_LIST_END is returned if Instr does not have S version
1226 /// or we are not interested in it.
1227 static unsigned sForm(MachineInstr
&Instr
) {
1228 switch (Instr
.getOpcode()) {
1230 return AArch64::INSTRUCTION_LIST_END
;
1232 case AArch64::ADDSWrr
:
1233 case AArch64::ADDSWri
:
1234 case AArch64::ADDSXrr
:
1235 case AArch64::ADDSXri
:
1236 case AArch64::SUBSWrr
:
1237 case AArch64::SUBSWri
:
1238 case AArch64::SUBSXrr
:
1239 case AArch64::SUBSXri
:
1240 return Instr
.getOpcode();
1242 case AArch64::ADDWrr
:
1243 return AArch64::ADDSWrr
;
1244 case AArch64::ADDWri
:
1245 return AArch64::ADDSWri
;
1246 case AArch64::ADDXrr
:
1247 return AArch64::ADDSXrr
;
1248 case AArch64::ADDXri
:
1249 return AArch64::ADDSXri
;
1250 case AArch64::ADCWr
:
1251 return AArch64::ADCSWr
;
1252 case AArch64::ADCXr
:
1253 return AArch64::ADCSXr
;
1254 case AArch64::SUBWrr
:
1255 return AArch64::SUBSWrr
;
1256 case AArch64::SUBWri
:
1257 return AArch64::SUBSWri
;
1258 case AArch64::SUBXrr
:
1259 return AArch64::SUBSXrr
;
1260 case AArch64::SUBXri
:
1261 return AArch64::SUBSXri
;
1262 case AArch64::SBCWr
:
1263 return AArch64::SBCSWr
;
1264 case AArch64::SBCXr
:
1265 return AArch64::SBCSXr
;
1266 case AArch64::ANDWri
:
1267 return AArch64::ANDSWri
;
1268 case AArch64::ANDXri
:
1269 return AArch64::ANDSXri
;
1273 /// Check if AArch64::NZCV should be alive in successors of MBB.
1274 static bool areCFlagsAliveInSuccessors(MachineBasicBlock
*MBB
) {
1275 for (auto *BB
: MBB
->successors())
1276 if (BB
->isLiveIn(AArch64::NZCV
))
1289 UsedNZCV() = default;
1291 UsedNZCV
&operator|=(const UsedNZCV
&UsedFlags
) {
1292 this->N
|= UsedFlags
.N
;
1293 this->Z
|= UsedFlags
.Z
;
1294 this->C
|= UsedFlags
.C
;
1295 this->V
|= UsedFlags
.V
;
1300 } // end anonymous namespace
1302 /// Find a condition code used by the instruction.
1303 /// Returns AArch64CC::Invalid if either the instruction does not use condition
1304 /// codes or we don't optimize CmpInstr in the presence of such instructions.
1305 static AArch64CC::CondCode
findCondCodeUsedByInstr(const MachineInstr
&Instr
) {
1306 switch (Instr
.getOpcode()) {
1308 return AArch64CC::Invalid
;
1310 case AArch64::Bcc
: {
1311 int Idx
= Instr
.findRegisterUseOperandIdx(AArch64::NZCV
);
1313 return static_cast<AArch64CC::CondCode
>(Instr
.getOperand(Idx
- 2).getImm());
1316 case AArch64::CSINVWr
:
1317 case AArch64::CSINVXr
:
1318 case AArch64::CSINCWr
:
1319 case AArch64::CSINCXr
:
1320 case AArch64::CSELWr
:
1321 case AArch64::CSELXr
:
1322 case AArch64::CSNEGWr
:
1323 case AArch64::CSNEGXr
:
1324 case AArch64::FCSELSrrr
:
1325 case AArch64::FCSELDrrr
: {
1326 int Idx
= Instr
.findRegisterUseOperandIdx(AArch64::NZCV
);
1328 return static_cast<AArch64CC::CondCode
>(Instr
.getOperand(Idx
- 1).getImm());
1333 static UsedNZCV
getUsedNZCV(AArch64CC::CondCode CC
) {
1334 assert(CC
!= AArch64CC::Invalid
);
1340 case AArch64CC::EQ
: // Z set
1341 case AArch64CC::NE
: // Z clear
1345 case AArch64CC::HI
: // Z clear and C set
1346 case AArch64CC::LS
: // Z set or C clear
1349 case AArch64CC::HS
: // C set
1350 case AArch64CC::LO
: // C clear
1354 case AArch64CC::MI
: // N set
1355 case AArch64CC::PL
: // N clear
1359 case AArch64CC::VS
: // V set
1360 case AArch64CC::VC
: // V clear
1364 case AArch64CC::GT
: // Z clear, N and V the same
1365 case AArch64CC::LE
: // Z set, N and V differ
1368 case AArch64CC::GE
: // N and V the same
1369 case AArch64CC::LT
: // N and V differ
1377 static bool isADDSRegImm(unsigned Opcode
) {
1378 return Opcode
== AArch64::ADDSWri
|| Opcode
== AArch64::ADDSXri
;
1381 static bool isSUBSRegImm(unsigned Opcode
) {
1382 return Opcode
== AArch64::SUBSWri
|| Opcode
== AArch64::SUBSXri
;
1385 /// Check if CmpInstr can be substituted by MI.
1387 /// CmpInstr can be substituted:
1388 /// - CmpInstr is either 'ADDS %vreg, 0' or 'SUBS %vreg, 0'
1389 /// - and, MI and CmpInstr are from the same MachineBB
1390 /// - and, condition flags are not alive in successors of the CmpInstr parent
1391 /// - and, if MI opcode is the S form there must be no defs of flags between
1393 /// or if MI opcode is not the S form there must be neither defs of flags
1394 /// nor uses of flags between MI and CmpInstr.
1395 /// - and C/V flags are not used after CmpInstr
1396 static bool canInstrSubstituteCmpInstr(MachineInstr
*MI
, MachineInstr
*CmpInstr
,
1397 const TargetRegisterInfo
*TRI
) {
1399 assert(sForm(*MI
) != AArch64::INSTRUCTION_LIST_END
);
1402 const unsigned CmpOpcode
= CmpInstr
->getOpcode();
1403 if (!isADDSRegImm(CmpOpcode
) && !isSUBSRegImm(CmpOpcode
))
1406 if (MI
->getParent() != CmpInstr
->getParent())
1409 if (areCFlagsAliveInSuccessors(CmpInstr
->getParent()))
1412 AccessKind AccessToCheck
= AK_Write
;
1413 if (sForm(*MI
) != MI
->getOpcode())
1414 AccessToCheck
= AK_All
;
1415 if (areCFlagsAccessedBetweenInstrs(MI
, CmpInstr
, TRI
, AccessToCheck
))
1418 UsedNZCV NZCVUsedAfterCmp
;
1419 for (auto I
= std::next(CmpInstr
->getIterator()),
1420 E
= CmpInstr
->getParent()->instr_end();
1422 const MachineInstr
&Instr
= *I
;
1423 if (Instr
.readsRegister(AArch64::NZCV
, TRI
)) {
1424 AArch64CC::CondCode CC
= findCondCodeUsedByInstr(Instr
);
1425 if (CC
== AArch64CC::Invalid
) // Unsupported conditional instruction
1427 NZCVUsedAfterCmp
|= getUsedNZCV(CC
);
1430 if (Instr
.modifiesRegister(AArch64::NZCV
, TRI
))
1434 return !NZCVUsedAfterCmp
.C
&& !NZCVUsedAfterCmp
.V
;
1437 /// Substitute an instruction comparing to zero with another instruction
1438 /// which produces needed condition flags.
1440 /// Return true on success.
1441 bool AArch64InstrInfo::substituteCmpToZero(
1442 MachineInstr
&CmpInstr
, unsigned SrcReg
,
1443 const MachineRegisterInfo
*MRI
) const {
1445 // Get the unique definition of SrcReg.
1446 MachineInstr
*MI
= MRI
->getUniqueVRegDef(SrcReg
);
1450 const TargetRegisterInfo
*TRI
= &getRegisterInfo();
1452 unsigned NewOpc
= sForm(*MI
);
1453 if (NewOpc
== AArch64::INSTRUCTION_LIST_END
)
1456 if (!canInstrSubstituteCmpInstr(MI
, &CmpInstr
, TRI
))
1459 // Update the instruction to set NZCV.
1460 MI
->setDesc(get(NewOpc
));
1461 CmpInstr
.eraseFromParent();
1462 bool succeeded
= UpdateOperandRegClass(*MI
);
1464 assert(succeeded
&& "Some operands reg class are incompatible!");
1465 MI
->addRegisterDefined(AArch64::NZCV
, TRI
);
1469 bool AArch64InstrInfo::expandPostRAPseudo(MachineInstr
&MI
) const {
1470 if (MI
.getOpcode() != TargetOpcode::LOAD_STACK_GUARD
&&
1471 MI
.getOpcode() != AArch64::CATCHRET
)
1474 MachineBasicBlock
&MBB
= *MI
.getParent();
1475 DebugLoc DL
= MI
.getDebugLoc();
1477 if (MI
.getOpcode() == AArch64::CATCHRET
) {
1478 // Skip to the first instruction before the epilog.
1479 const TargetInstrInfo
*TII
=
1480 MBB
.getParent()->getSubtarget().getInstrInfo();
1481 MachineBasicBlock
*TargetMBB
= MI
.getOperand(0).getMBB();
1482 auto MBBI
= MachineBasicBlock::iterator(MI
);
1483 MachineBasicBlock::iterator FirstEpilogSEH
= std::prev(MBBI
);
1484 while (FirstEpilogSEH
->getFlag(MachineInstr::FrameDestroy
) &&
1485 FirstEpilogSEH
!= MBB
.begin())
1486 FirstEpilogSEH
= std::prev(FirstEpilogSEH
);
1487 if (FirstEpilogSEH
!= MBB
.begin())
1488 FirstEpilogSEH
= std::next(FirstEpilogSEH
);
1489 BuildMI(MBB
, FirstEpilogSEH
, DL
, TII
->get(AArch64::ADRP
))
1490 .addReg(AArch64::X0
, RegState::Define
)
1492 BuildMI(MBB
, FirstEpilogSEH
, DL
, TII
->get(AArch64::ADDXri
))
1493 .addReg(AArch64::X0
, RegState::Define
)
1494 .addReg(AArch64::X0
)
1500 unsigned Reg
= MI
.getOperand(0).getReg();
1501 const GlobalValue
*GV
=
1502 cast
<GlobalValue
>((*MI
.memoperands_begin())->getValue());
1503 const TargetMachine
&TM
= MBB
.getParent()->getTarget();
1504 unsigned char OpFlags
= Subtarget
.ClassifyGlobalReference(GV
, TM
);
1505 const unsigned char MO_NC
= AArch64II::MO_NC
;
1507 if ((OpFlags
& AArch64II::MO_GOT
) != 0) {
1508 BuildMI(MBB
, MI
, DL
, get(AArch64::LOADgot
), Reg
)
1509 .addGlobalAddress(GV
, 0, OpFlags
);
1510 BuildMI(MBB
, MI
, DL
, get(AArch64::LDRXui
), Reg
)
1511 .addReg(Reg
, RegState::Kill
)
1513 .addMemOperand(*MI
.memoperands_begin());
1514 } else if (TM
.getCodeModel() == CodeModel::Large
) {
1515 BuildMI(MBB
, MI
, DL
, get(AArch64::MOVZXi
), Reg
)
1516 .addGlobalAddress(GV
, 0, AArch64II::MO_G0
| MO_NC
)
1518 BuildMI(MBB
, MI
, DL
, get(AArch64::MOVKXi
), Reg
)
1519 .addReg(Reg
, RegState::Kill
)
1520 .addGlobalAddress(GV
, 0, AArch64II::MO_G1
| MO_NC
)
1522 BuildMI(MBB
, MI
, DL
, get(AArch64::MOVKXi
), Reg
)
1523 .addReg(Reg
, RegState::Kill
)
1524 .addGlobalAddress(GV
, 0, AArch64II::MO_G2
| MO_NC
)
1526 BuildMI(MBB
, MI
, DL
, get(AArch64::MOVKXi
), Reg
)
1527 .addReg(Reg
, RegState::Kill
)
1528 .addGlobalAddress(GV
, 0, AArch64II::MO_G3
)
1530 BuildMI(MBB
, MI
, DL
, get(AArch64::LDRXui
), Reg
)
1531 .addReg(Reg
, RegState::Kill
)
1533 .addMemOperand(*MI
.memoperands_begin());
1534 } else if (TM
.getCodeModel() == CodeModel::Tiny
) {
1535 BuildMI(MBB
, MI
, DL
, get(AArch64::ADR
), Reg
)
1536 .addGlobalAddress(GV
, 0, OpFlags
);
1538 BuildMI(MBB
, MI
, DL
, get(AArch64::ADRP
), Reg
)
1539 .addGlobalAddress(GV
, 0, OpFlags
| AArch64II::MO_PAGE
);
1540 unsigned char LoFlags
= OpFlags
| AArch64II::MO_PAGEOFF
| MO_NC
;
1541 BuildMI(MBB
, MI
, DL
, get(AArch64::LDRXui
), Reg
)
1542 .addReg(Reg
, RegState::Kill
)
1543 .addGlobalAddress(GV
, 0, LoFlags
)
1544 .addMemOperand(*MI
.memoperands_begin());
1552 // Return true if this instruction simply sets its single destination register
1553 // to zero. This is equivalent to a register rename of the zero-register.
1554 bool AArch64InstrInfo::isGPRZero(const MachineInstr
&MI
) {
1555 switch (MI
.getOpcode()) {
1558 case AArch64::MOVZWi
:
1559 case AArch64::MOVZXi
: // movz Rd, #0 (LSL #0)
1560 if (MI
.getOperand(1).isImm() && MI
.getOperand(1).getImm() == 0) {
1561 assert(MI
.getDesc().getNumOperands() == 3 &&
1562 MI
.getOperand(2).getImm() == 0 && "invalid MOVZi operands");
1566 case AArch64::ANDWri
: // and Rd, Rzr, #imm
1567 return MI
.getOperand(1).getReg() == AArch64::WZR
;
1568 case AArch64::ANDXri
:
1569 return MI
.getOperand(1).getReg() == AArch64::XZR
;
1570 case TargetOpcode::COPY
:
1571 return MI
.getOperand(1).getReg() == AArch64::WZR
;
1576 // Return true if this instruction simply renames a general register without
1578 bool AArch64InstrInfo::isGPRCopy(const MachineInstr
&MI
) {
1579 switch (MI
.getOpcode()) {
1582 case TargetOpcode::COPY
: {
1583 // GPR32 copies will by lowered to ORRXrs
1584 unsigned DstReg
= MI
.getOperand(0).getReg();
1585 return (AArch64::GPR32RegClass
.contains(DstReg
) ||
1586 AArch64::GPR64RegClass
.contains(DstReg
));
1588 case AArch64::ORRXrs
: // orr Xd, Xzr, Xm (LSL #0)
1589 if (MI
.getOperand(1).getReg() == AArch64::XZR
) {
1590 assert(MI
.getDesc().getNumOperands() == 4 &&
1591 MI
.getOperand(3).getImm() == 0 && "invalid ORRrs operands");
1595 case AArch64::ADDXri
: // add Xd, Xn, #0 (LSL #0)
1596 if (MI
.getOperand(2).getImm() == 0) {
1597 assert(MI
.getDesc().getNumOperands() == 4 &&
1598 MI
.getOperand(3).getImm() == 0 && "invalid ADDXri operands");
1606 // Return true if this instruction simply renames a general register without
1608 bool AArch64InstrInfo::isFPRCopy(const MachineInstr
&MI
) {
1609 switch (MI
.getOpcode()) {
1612 case TargetOpcode::COPY
: {
1613 // FPR64 copies will by lowered to ORR.16b
1614 unsigned DstReg
= MI
.getOperand(0).getReg();
1615 return (AArch64::FPR64RegClass
.contains(DstReg
) ||
1616 AArch64::FPR128RegClass
.contains(DstReg
));
1618 case AArch64::ORRv16i8
:
1619 if (MI
.getOperand(1).getReg() == MI
.getOperand(2).getReg()) {
1620 assert(MI
.getDesc().getNumOperands() == 3 && MI
.getOperand(0).isReg() &&
1621 "invalid ORRv16i8 operands");
1629 unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr
&MI
,
1630 int &FrameIndex
) const {
1631 switch (MI
.getOpcode()) {
1634 case AArch64::LDRWui
:
1635 case AArch64::LDRXui
:
1636 case AArch64::LDRBui
:
1637 case AArch64::LDRHui
:
1638 case AArch64::LDRSui
:
1639 case AArch64::LDRDui
:
1640 case AArch64::LDRQui
:
1641 if (MI
.getOperand(0).getSubReg() == 0 && MI
.getOperand(1).isFI() &&
1642 MI
.getOperand(2).isImm() && MI
.getOperand(2).getImm() == 0) {
1643 FrameIndex
= MI
.getOperand(1).getIndex();
1644 return MI
.getOperand(0).getReg();
1652 unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr
&MI
,
1653 int &FrameIndex
) const {
1654 switch (MI
.getOpcode()) {
1657 case AArch64::STRWui
:
1658 case AArch64::STRXui
:
1659 case AArch64::STRBui
:
1660 case AArch64::STRHui
:
1661 case AArch64::STRSui
:
1662 case AArch64::STRDui
:
1663 case AArch64::STRQui
:
1664 if (MI
.getOperand(0).getSubReg() == 0 && MI
.getOperand(1).isFI() &&
1665 MI
.getOperand(2).isImm() && MI
.getOperand(2).getImm() == 0) {
1666 FrameIndex
= MI
.getOperand(1).getIndex();
1667 return MI
.getOperand(0).getReg();
1674 /// Check all MachineMemOperands for a hint to suppress pairing.
1675 bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr
&MI
) {
1676 return llvm::any_of(MI
.memoperands(), [](MachineMemOperand
*MMO
) {
1677 return MMO
->getFlags() & MOSuppressPair
;
1681 /// Set a flag on the first MachineMemOperand to suppress pairing.
1682 void AArch64InstrInfo::suppressLdStPair(MachineInstr
&MI
) {
1683 if (MI
.memoperands_empty())
1685 (*MI
.memoperands_begin())->setFlags(MOSuppressPair
);
1688 /// Check all MachineMemOperands for a hint that the load/store is strided.
1689 bool AArch64InstrInfo::isStridedAccess(const MachineInstr
&MI
) {
1690 return llvm::any_of(MI
.memoperands(), [](MachineMemOperand
*MMO
) {
1691 return MMO
->getFlags() & MOStridedAccess
;
1695 bool AArch64InstrInfo::isUnscaledLdSt(unsigned Opc
) {
1699 case AArch64::STURSi
:
1700 case AArch64::STURDi
:
1701 case AArch64::STURQi
:
1702 case AArch64::STURBBi
:
1703 case AArch64::STURHHi
:
1704 case AArch64::STURWi
:
1705 case AArch64::STURXi
:
1706 case AArch64::LDURSi
:
1707 case AArch64::LDURDi
:
1708 case AArch64::LDURQi
:
1709 case AArch64::LDURWi
:
1710 case AArch64::LDURXi
:
1711 case AArch64::LDURSWi
:
1712 case AArch64::LDURHHi
:
1713 case AArch64::LDURBBi
:
1714 case AArch64::LDURSBWi
:
1715 case AArch64::LDURSHWi
:
1720 Optional
<unsigned> AArch64InstrInfo::getUnscaledLdSt(unsigned Opc
) {
1723 case AArch64::PRFMui
: return AArch64::PRFUMi
;
1724 case AArch64::LDRXui
: return AArch64::LDURXi
;
1725 case AArch64::LDRWui
: return AArch64::LDURWi
;
1726 case AArch64::LDRBui
: return AArch64::LDURBi
;
1727 case AArch64::LDRHui
: return AArch64::LDURHi
;
1728 case AArch64::LDRSui
: return AArch64::LDURSi
;
1729 case AArch64::LDRDui
: return AArch64::LDURDi
;
1730 case AArch64::LDRQui
: return AArch64::LDURQi
;
1731 case AArch64::LDRBBui
: return AArch64::LDURBBi
;
1732 case AArch64::LDRHHui
: return AArch64::LDURHHi
;
1733 case AArch64::LDRSBXui
: return AArch64::LDURSBXi
;
1734 case AArch64::LDRSBWui
: return AArch64::LDURSBWi
;
1735 case AArch64::LDRSHXui
: return AArch64::LDURSHXi
;
1736 case AArch64::LDRSHWui
: return AArch64::LDURSHWi
;
1737 case AArch64::LDRSWui
: return AArch64::LDURSWi
;
1738 case AArch64::STRXui
: return AArch64::STURXi
;
1739 case AArch64::STRWui
: return AArch64::STURWi
;
1740 case AArch64::STRBui
: return AArch64::STURBi
;
1741 case AArch64::STRHui
: return AArch64::STURHi
;
1742 case AArch64::STRSui
: return AArch64::STURSi
;
1743 case AArch64::STRDui
: return AArch64::STURDi
;
1744 case AArch64::STRQui
: return AArch64::STURQi
;
1745 case AArch64::STRBBui
: return AArch64::STURBBi
;
1746 case AArch64::STRHHui
: return AArch64::STURHHi
;
1750 unsigned AArch64InstrInfo::getLoadStoreImmIdx(unsigned Opc
) {
1754 case AArch64::LDPXi
:
1755 case AArch64::LDPDi
:
1756 case AArch64::STPXi
:
1757 case AArch64::STPDi
:
1758 case AArch64::LDNPXi
:
1759 case AArch64::LDNPDi
:
1760 case AArch64::STNPXi
:
1761 case AArch64::STNPDi
:
1762 case AArch64::LDPQi
:
1763 case AArch64::STPQi
:
1764 case AArch64::LDNPQi
:
1765 case AArch64::STNPQi
:
1766 case AArch64::LDPWi
:
1767 case AArch64::LDPSi
:
1768 case AArch64::STPWi
:
1769 case AArch64::STPSi
:
1770 case AArch64::LDNPWi
:
1771 case AArch64::LDNPSi
:
1772 case AArch64::STNPWi
:
1773 case AArch64::STNPSi
:
1775 case AArch64::STGPi
:
1778 case AArch64::STGOffset
:
1783 bool AArch64InstrInfo::isPairableLdStInst(const MachineInstr
&MI
) {
1784 switch (MI
.getOpcode()) {
1787 // Scaled instructions.
1788 case AArch64::STRSui
:
1789 case AArch64::STRDui
:
1790 case AArch64::STRQui
:
1791 case AArch64::STRXui
:
1792 case AArch64::STRWui
:
1793 case AArch64::LDRSui
:
1794 case AArch64::LDRDui
:
1795 case AArch64::LDRQui
:
1796 case AArch64::LDRXui
:
1797 case AArch64::LDRWui
:
1798 case AArch64::LDRSWui
:
1799 // Unscaled instructions.
1800 case AArch64::STURSi
:
1801 case AArch64::STURDi
:
1802 case AArch64::STURQi
:
1803 case AArch64::STURWi
:
1804 case AArch64::STURXi
:
1805 case AArch64::LDURSi
:
1806 case AArch64::LDURDi
:
1807 case AArch64::LDURQi
:
1808 case AArch64::LDURWi
:
1809 case AArch64::LDURXi
:
1810 case AArch64::LDURSWi
:
1815 unsigned AArch64InstrInfo::convertToFlagSettingOpc(unsigned Opc
,
1819 llvm_unreachable("Opcode has no flag setting equivalent!");
1821 case AArch64::ADDWri
:
1823 return AArch64::ADDSWri
;
1824 case AArch64::ADDWrr
:
1826 return AArch64::ADDSWrr
;
1827 case AArch64::ADDWrs
:
1829 return AArch64::ADDSWrs
;
1830 case AArch64::ADDWrx
:
1832 return AArch64::ADDSWrx
;
1833 case AArch64::ANDWri
:
1835 return AArch64::ANDSWri
;
1836 case AArch64::ANDWrr
:
1838 return AArch64::ANDSWrr
;
1839 case AArch64::ANDWrs
:
1841 return AArch64::ANDSWrs
;
1842 case AArch64::BICWrr
:
1844 return AArch64::BICSWrr
;
1845 case AArch64::BICWrs
:
1847 return AArch64::BICSWrs
;
1848 case AArch64::SUBWri
:
1850 return AArch64::SUBSWri
;
1851 case AArch64::SUBWrr
:
1853 return AArch64::SUBSWrr
;
1854 case AArch64::SUBWrs
:
1856 return AArch64::SUBSWrs
;
1857 case AArch64::SUBWrx
:
1859 return AArch64::SUBSWrx
;
1861 case AArch64::ADDXri
:
1863 return AArch64::ADDSXri
;
1864 case AArch64::ADDXrr
:
1866 return AArch64::ADDSXrr
;
1867 case AArch64::ADDXrs
:
1869 return AArch64::ADDSXrs
;
1870 case AArch64::ADDXrx
:
1872 return AArch64::ADDSXrx
;
1873 case AArch64::ANDXri
:
1875 return AArch64::ANDSXri
;
1876 case AArch64::ANDXrr
:
1878 return AArch64::ANDSXrr
;
1879 case AArch64::ANDXrs
:
1881 return AArch64::ANDSXrs
;
1882 case AArch64::BICXrr
:
1884 return AArch64::BICSXrr
;
1885 case AArch64::BICXrs
:
1887 return AArch64::BICSXrs
;
1888 case AArch64::SUBXri
:
1890 return AArch64::SUBSXri
;
1891 case AArch64::SUBXrr
:
1893 return AArch64::SUBSXrr
;
1894 case AArch64::SUBXrs
:
1896 return AArch64::SUBSXrs
;
1897 case AArch64::SUBXrx
:
1899 return AArch64::SUBSXrx
;
1903 // Is this a candidate for ld/st merging or pairing? For example, we don't
1904 // touch volatiles or load/stores that have a hint to avoid pair formation.
1905 bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr
&MI
) const {
1906 // If this is a volatile load/store, don't mess with it.
1907 if (MI
.hasOrderedMemoryRef())
1910 // Make sure this is a reg/fi+imm (as opposed to an address reloc).
1911 assert((MI
.getOperand(1).isReg() || MI
.getOperand(1).isFI()) &&
1912 "Expected a reg or frame index operand.");
1913 if (!MI
.getOperand(2).isImm())
1916 // Can't merge/pair if the instruction modifies the base register.
1917 // e.g., ldr x0, [x0]
1918 // This case will never occur with an FI base.
1919 if (MI
.getOperand(1).isReg()) {
1920 unsigned BaseReg
= MI
.getOperand(1).getReg();
1921 const TargetRegisterInfo
*TRI
= &getRegisterInfo();
1922 if (MI
.modifiesRegister(BaseReg
, TRI
))
1926 // Check if this load/store has a hint to avoid pair formation.
1927 // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
1928 if (isLdStPairSuppressed(MI
))
1931 // On some CPUs quad load/store pairs are slower than two single load/stores.
1932 if (Subtarget
.isPaired128Slow()) {
1933 switch (MI
.getOpcode()) {
1936 case AArch64::LDURQi
:
1937 case AArch64::STURQi
:
1938 case AArch64::LDRQui
:
1939 case AArch64::STRQui
:
1947 bool AArch64InstrInfo::getMemOperandWithOffset(const MachineInstr
&LdSt
,
1948 const MachineOperand
*&BaseOp
,
1950 const TargetRegisterInfo
*TRI
) const {
1952 return getMemOperandWithOffsetWidth(LdSt
, BaseOp
, Offset
, Width
, TRI
);
1955 bool AArch64InstrInfo::getMemOperandWithOffsetWidth(
1956 const MachineInstr
&LdSt
, const MachineOperand
*&BaseOp
, int64_t &Offset
,
1957 unsigned &Width
, const TargetRegisterInfo
*TRI
) const {
1958 assert(LdSt
.mayLoadOrStore() && "Expected a memory operation.");
1959 // Handle only loads/stores with base register followed by immediate offset.
1960 if (LdSt
.getNumExplicitOperands() == 3) {
1961 // Non-paired instruction (e.g., ldr x1, [x0, #8]).
1962 if ((!LdSt
.getOperand(1).isReg() && !LdSt
.getOperand(1).isFI()) ||
1963 !LdSt
.getOperand(2).isImm())
1965 } else if (LdSt
.getNumExplicitOperands() == 4) {
1966 // Paired instruction (e.g., ldp x1, x2, [x0, #8]).
1967 if (!LdSt
.getOperand(1).isReg() ||
1968 (!LdSt
.getOperand(2).isReg() && !LdSt
.getOperand(2).isFI()) ||
1969 !LdSt
.getOperand(3).isImm())
1974 // Get the scaling factor for the instruction and set the width for the
1977 int64_t Dummy1
, Dummy2
;
1979 // If this returns false, then it's an instruction we don't want to handle.
1980 if (!getMemOpInfo(LdSt
.getOpcode(), Scale
, Width
, Dummy1
, Dummy2
))
1983 // Compute the offset. Offset is calculated as the immediate operand
1984 // multiplied by the scaling factor. Unscaled instructions have scaling factor
1986 if (LdSt
.getNumExplicitOperands() == 3) {
1987 BaseOp
= &LdSt
.getOperand(1);
1988 Offset
= LdSt
.getOperand(2).getImm() * Scale
;
1990 assert(LdSt
.getNumExplicitOperands() == 4 && "invalid number of operands");
1991 BaseOp
= &LdSt
.getOperand(2);
1992 Offset
= LdSt
.getOperand(3).getImm() * Scale
;
1995 assert((BaseOp
->isReg() || BaseOp
->isFI()) &&
1996 "getMemOperandWithOffset only supports base "
1997 "operands of type register or frame index.");
2003 AArch64InstrInfo::getMemOpBaseRegImmOfsOffsetOperand(MachineInstr
&LdSt
) const {
2004 assert(LdSt
.mayLoadOrStore() && "Expected a memory operation.");
2005 MachineOperand
&OfsOp
= LdSt
.getOperand(LdSt
.getNumExplicitOperands() - 1);
2006 assert(OfsOp
.isImm() && "Offset operand wasn't immediate.");
2010 bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode
, unsigned &Scale
,
2011 unsigned &Width
, int64_t &MinOffset
,
2012 int64_t &MaxOffset
) {
2014 // Not a memory operation or something we want to handle.
2017 MinOffset
= MaxOffset
= 0;
2019 case AArch64::STRWpost
:
2020 case AArch64::LDRWpost
:
2026 case AArch64::LDURQi
:
2027 case AArch64::STURQi
:
2033 case AArch64::PRFUMi
:
2034 case AArch64::LDURXi
:
2035 case AArch64::LDURDi
:
2036 case AArch64::STURXi
:
2037 case AArch64::STURDi
:
2043 case AArch64::LDURWi
:
2044 case AArch64::LDURSi
:
2045 case AArch64::LDURSWi
:
2046 case AArch64::STURWi
:
2047 case AArch64::STURSi
:
2053 case AArch64::LDURHi
:
2054 case AArch64::LDURHHi
:
2055 case AArch64::LDURSHXi
:
2056 case AArch64::LDURSHWi
:
2057 case AArch64::STURHi
:
2058 case AArch64::STURHHi
:
2064 case AArch64::LDURBi
:
2065 case AArch64::LDURBBi
:
2066 case AArch64::LDURSBXi
:
2067 case AArch64::LDURSBWi
:
2068 case AArch64::STURBi
:
2069 case AArch64::STURBBi
:
2075 case AArch64::LDPQi
:
2076 case AArch64::LDNPQi
:
2077 case AArch64::STPQi
:
2078 case AArch64::STNPQi
:
2084 case AArch64::LDRQui
:
2085 case AArch64::STRQui
:
2090 case AArch64::LDPXi
:
2091 case AArch64::LDPDi
:
2092 case AArch64::LDNPXi
:
2093 case AArch64::LDNPDi
:
2094 case AArch64::STPXi
:
2095 case AArch64::STPDi
:
2096 case AArch64::STNPXi
:
2097 case AArch64::STNPDi
:
2103 case AArch64::PRFMui
:
2104 case AArch64::LDRXui
:
2105 case AArch64::LDRDui
:
2106 case AArch64::STRXui
:
2107 case AArch64::STRDui
:
2112 case AArch64::LDPWi
:
2113 case AArch64::LDPSi
:
2114 case AArch64::LDNPWi
:
2115 case AArch64::LDNPSi
:
2116 case AArch64::STPWi
:
2117 case AArch64::STPSi
:
2118 case AArch64::STNPWi
:
2119 case AArch64::STNPSi
:
2125 case AArch64::LDRWui
:
2126 case AArch64::LDRSui
:
2127 case AArch64::LDRSWui
:
2128 case AArch64::STRWui
:
2129 case AArch64::STRSui
:
2134 case AArch64::LDRHui
:
2135 case AArch64::LDRHHui
:
2136 case AArch64::LDRSHWui
:
2137 case AArch64::LDRSHXui
:
2138 case AArch64::STRHui
:
2139 case AArch64::STRHHui
:
2144 case AArch64::LDRBui
:
2145 case AArch64::LDRBBui
:
2146 case AArch64::LDRSBWui
:
2147 case AArch64::LDRSBXui
:
2148 case AArch64::STRBui
:
2149 case AArch64::STRBBui
:
2155 case AArch64::TAGPstack
:
2162 case AArch64::STGOffset
:
2163 case AArch64::STZGOffset
:
2168 case AArch64::ST2GOffset
:
2169 case AArch64::STZ2GOffset
:
2175 case AArch64::STGPi
:
2185 static unsigned getOffsetStride(unsigned Opc
) {
2189 case AArch64::LDURQi
:
2190 case AArch64::STURQi
:
2192 case AArch64::LDURXi
:
2193 case AArch64::LDURDi
:
2194 case AArch64::STURXi
:
2195 case AArch64::STURDi
:
2197 case AArch64::LDURWi
:
2198 case AArch64::LDURSi
:
2199 case AArch64::LDURSWi
:
2200 case AArch64::STURWi
:
2201 case AArch64::STURSi
:
2206 // Scale the unscaled offsets. Returns false if the unscaled offset can't be
2208 static bool scaleOffset(unsigned Opc
, int64_t &Offset
) {
2209 unsigned OffsetStride
= getOffsetStride(Opc
);
2210 if (OffsetStride
== 0)
2212 // If the byte-offset isn't a multiple of the stride, we can't scale this
2214 if (Offset
% OffsetStride
!= 0)
2217 // Convert the byte-offset used by unscaled into an "element" offset used
2218 // by the scaled pair load/store instructions.
2219 Offset
/= OffsetStride
;
2223 // Unscale the scaled offsets. Returns false if the scaled offset can't be
2225 static bool unscaleOffset(unsigned Opc
, int64_t &Offset
) {
2226 unsigned OffsetStride
= getOffsetStride(Opc
);
2227 if (OffsetStride
== 0)
2230 // Convert the "element" offset used by scaled pair load/store instructions
2231 // into the byte-offset used by unscaled.
2232 Offset
*= OffsetStride
;
2236 static bool canPairLdStOpc(unsigned FirstOpc
, unsigned SecondOpc
) {
2237 if (FirstOpc
== SecondOpc
)
2239 // We can also pair sign-ext and zero-ext instructions.
2243 case AArch64::LDRWui
:
2244 case AArch64::LDURWi
:
2245 return SecondOpc
== AArch64::LDRSWui
|| SecondOpc
== AArch64::LDURSWi
;
2246 case AArch64::LDRSWui
:
2247 case AArch64::LDURSWi
:
2248 return SecondOpc
== AArch64::LDRWui
|| SecondOpc
== AArch64::LDURWi
;
2250 // These instructions can't be paired based on their opcodes.
2254 static bool shouldClusterFI(const MachineFrameInfo
&MFI
, int FI1
,
2255 int64_t Offset1
, unsigned Opcode1
, int FI2
,
2256 int64_t Offset2
, unsigned Opcode2
) {
2257 // Accesses through fixed stack object frame indices may access a different
2258 // fixed stack slot. Check that the object offsets + offsets match.
2259 if (MFI
.isFixedObjectIndex(FI1
) && MFI
.isFixedObjectIndex(FI2
)) {
2260 int64_t ObjectOffset1
= MFI
.getObjectOffset(FI1
);
2261 int64_t ObjectOffset2
= MFI
.getObjectOffset(FI2
);
2262 assert(ObjectOffset1
<= ObjectOffset2
&& "Object offsets are not ordered.");
2263 // Get the byte-offset from the object offset.
2264 if (!unscaleOffset(Opcode1
, Offset1
) || !unscaleOffset(Opcode2
, Offset2
))
2266 ObjectOffset1
+= Offset1
;
2267 ObjectOffset2
+= Offset2
;
2268 // Get the "element" index in the object.
2269 if (!scaleOffset(Opcode1
, ObjectOffset1
) ||
2270 !scaleOffset(Opcode2
, ObjectOffset2
))
2272 return ObjectOffset1
+ 1 == ObjectOffset2
;
2278 /// Detect opportunities for ldp/stp formation.
2280 /// Only called for LdSt for which getMemOperandWithOffset returns true.
2281 bool AArch64InstrInfo::shouldClusterMemOps(const MachineOperand
&BaseOp1
,
2282 const MachineOperand
&BaseOp2
,
2283 unsigned NumLoads
) const {
2284 const MachineInstr
&FirstLdSt
= *BaseOp1
.getParent();
2285 const MachineInstr
&SecondLdSt
= *BaseOp2
.getParent();
2286 if (BaseOp1
.getType() != BaseOp2
.getType())
2289 assert((BaseOp1
.isReg() || BaseOp1
.isFI()) &&
2290 "Only base registers and frame indices are supported.");
2292 // Check for both base regs and base FI.
2293 if (BaseOp1
.isReg() && BaseOp1
.getReg() != BaseOp2
.getReg())
2296 // Only cluster up to a single pair.
2300 if (!isPairableLdStInst(FirstLdSt
) || !isPairableLdStInst(SecondLdSt
))
2303 // Can we pair these instructions based on their opcodes?
2304 unsigned FirstOpc
= FirstLdSt
.getOpcode();
2305 unsigned SecondOpc
= SecondLdSt
.getOpcode();
2306 if (!canPairLdStOpc(FirstOpc
, SecondOpc
))
2309 // Can't merge volatiles or load/stores that have a hint to avoid pair
2310 // formation, for example.
2311 if (!isCandidateToMergeOrPair(FirstLdSt
) ||
2312 !isCandidateToMergeOrPair(SecondLdSt
))
2315 // isCandidateToMergeOrPair guarantees that operand 2 is an immediate.
2316 int64_t Offset1
= FirstLdSt
.getOperand(2).getImm();
2317 if (isUnscaledLdSt(FirstOpc
) && !scaleOffset(FirstOpc
, Offset1
))
2320 int64_t Offset2
= SecondLdSt
.getOperand(2).getImm();
2321 if (isUnscaledLdSt(SecondOpc
) && !scaleOffset(SecondOpc
, Offset2
))
2324 // Pairwise instructions have a 7-bit signed offset field.
2325 if (Offset1
> 63 || Offset1
< -64)
2328 // The caller should already have ordered First/SecondLdSt by offset.
2329 // Note: except for non-equal frame index bases
2330 if (BaseOp1
.isFI()) {
2331 assert((!BaseOp1
.isIdenticalTo(BaseOp2
) || Offset1
>= Offset2
) &&
2332 "Caller should have ordered offsets.");
2334 const MachineFrameInfo
&MFI
=
2335 FirstLdSt
.getParent()->getParent()->getFrameInfo();
2336 return shouldClusterFI(MFI
, BaseOp1
.getIndex(), Offset1
, FirstOpc
,
2337 BaseOp2
.getIndex(), Offset2
, SecondOpc
);
2340 assert((!BaseOp1
.isIdenticalTo(BaseOp2
) || Offset1
<= Offset2
) &&
2341 "Caller should have ordered offsets.");
2343 return Offset1
+ 1 == Offset2
;
2346 static const MachineInstrBuilder
&AddSubReg(const MachineInstrBuilder
&MIB
,
2347 unsigned Reg
, unsigned SubIdx
,
2349 const TargetRegisterInfo
*TRI
) {
2351 return MIB
.addReg(Reg
, State
);
2353 if (TargetRegisterInfo::isPhysicalRegister(Reg
))
2354 return MIB
.addReg(TRI
->getSubReg(Reg
, SubIdx
), State
);
2355 return MIB
.addReg(Reg
, State
, SubIdx
);
2358 static bool forwardCopyWillClobberTuple(unsigned DestReg
, unsigned SrcReg
,
2360 // We really want the positive remainder mod 32 here, that happens to be
2361 // easily obtainable with a mask.
2362 return ((DestReg
- SrcReg
) & 0x1f) < NumRegs
;
2365 void AArch64InstrInfo::copyPhysRegTuple(MachineBasicBlock
&MBB
,
2366 MachineBasicBlock::iterator I
,
2367 const DebugLoc
&DL
, unsigned DestReg
,
2368 unsigned SrcReg
, bool KillSrc
,
2370 ArrayRef
<unsigned> Indices
) const {
2371 assert(Subtarget
.hasNEON() && "Unexpected register copy without NEON");
2372 const TargetRegisterInfo
*TRI
= &getRegisterInfo();
2373 uint16_t DestEncoding
= TRI
->getEncodingValue(DestReg
);
2374 uint16_t SrcEncoding
= TRI
->getEncodingValue(SrcReg
);
2375 unsigned NumRegs
= Indices
.size();
2377 int SubReg
= 0, End
= NumRegs
, Incr
= 1;
2378 if (forwardCopyWillClobberTuple(DestEncoding
, SrcEncoding
, NumRegs
)) {
2379 SubReg
= NumRegs
- 1;
2384 for (; SubReg
!= End
; SubReg
+= Incr
) {
2385 const MachineInstrBuilder MIB
= BuildMI(MBB
, I
, DL
, get(Opcode
));
2386 AddSubReg(MIB
, DestReg
, Indices
[SubReg
], RegState::Define
, TRI
);
2387 AddSubReg(MIB
, SrcReg
, Indices
[SubReg
], 0, TRI
);
2388 AddSubReg(MIB
, SrcReg
, Indices
[SubReg
], getKillRegState(KillSrc
), TRI
);
2392 void AArch64InstrInfo::copyGPRRegTuple(MachineBasicBlock
&MBB
,
2393 MachineBasicBlock::iterator I
,
2394 DebugLoc DL
, unsigned DestReg
,
2395 unsigned SrcReg
, bool KillSrc
,
2396 unsigned Opcode
, unsigned ZeroReg
,
2397 llvm::ArrayRef
<unsigned> Indices
) const {
2398 const TargetRegisterInfo
*TRI
= &getRegisterInfo();
2399 unsigned NumRegs
= Indices
.size();
2402 uint16_t DestEncoding
= TRI
->getEncodingValue(DestReg
);
2403 uint16_t SrcEncoding
= TRI
->getEncodingValue(SrcReg
);
2404 assert(DestEncoding
% NumRegs
== 0 && SrcEncoding
% NumRegs
== 0 &&
2405 "GPR reg sequences should not be able to overlap");
2408 for (unsigned SubReg
= 0; SubReg
!= NumRegs
; ++SubReg
) {
2409 const MachineInstrBuilder MIB
= BuildMI(MBB
, I
, DL
, get(Opcode
));
2410 AddSubReg(MIB
, DestReg
, Indices
[SubReg
], RegState::Define
, TRI
);
2411 MIB
.addReg(ZeroReg
);
2412 AddSubReg(MIB
, SrcReg
, Indices
[SubReg
], getKillRegState(KillSrc
), TRI
);
2417 void AArch64InstrInfo::copyPhysReg(MachineBasicBlock
&MBB
,
2418 MachineBasicBlock::iterator I
,
2419 const DebugLoc
&DL
, unsigned DestReg
,
2420 unsigned SrcReg
, bool KillSrc
) const {
2421 if (AArch64::GPR32spRegClass
.contains(DestReg
) &&
2422 (AArch64::GPR32spRegClass
.contains(SrcReg
) || SrcReg
== AArch64::WZR
)) {
2423 const TargetRegisterInfo
*TRI
= &getRegisterInfo();
2425 if (DestReg
== AArch64::WSP
|| SrcReg
== AArch64::WSP
) {
2426 // If either operand is WSP, expand to ADD #0.
2427 if (Subtarget
.hasZeroCycleRegMove()) {
2428 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
2429 unsigned DestRegX
= TRI
->getMatchingSuperReg(DestReg
, AArch64::sub_32
,
2430 &AArch64::GPR64spRegClass
);
2431 unsigned SrcRegX
= TRI
->getMatchingSuperReg(SrcReg
, AArch64::sub_32
,
2432 &AArch64::GPR64spRegClass
);
2433 // This instruction is reading and writing X registers. This may upset
2434 // the register scavenger and machine verifier, so we need to indicate
2435 // that we are reading an undefined value from SrcRegX, but a proper
2436 // value from SrcReg.
2437 BuildMI(MBB
, I
, DL
, get(AArch64::ADDXri
), DestRegX
)
2438 .addReg(SrcRegX
, RegState::Undef
)
2440 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL
, 0))
2441 .addReg(SrcReg
, RegState::Implicit
| getKillRegState(KillSrc
));
2443 BuildMI(MBB
, I
, DL
, get(AArch64::ADDWri
), DestReg
)
2444 .addReg(SrcReg
, getKillRegState(KillSrc
))
2446 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL
, 0));
2448 } else if (SrcReg
== AArch64::WZR
&& Subtarget
.hasZeroCycleZeroingGP()) {
2449 BuildMI(MBB
, I
, DL
, get(AArch64::MOVZWi
), DestReg
)
2451 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL
, 0));
2453 if (Subtarget
.hasZeroCycleRegMove()) {
2454 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
2455 unsigned DestRegX
= TRI
->getMatchingSuperReg(DestReg
, AArch64::sub_32
,
2456 &AArch64::GPR64spRegClass
);
2457 unsigned SrcRegX
= TRI
->getMatchingSuperReg(SrcReg
, AArch64::sub_32
,
2458 &AArch64::GPR64spRegClass
);
2459 // This instruction is reading and writing X registers. This may upset
2460 // the register scavenger and machine verifier, so we need to indicate
2461 // that we are reading an undefined value from SrcRegX, but a proper
2462 // value from SrcReg.
2463 BuildMI(MBB
, I
, DL
, get(AArch64::ORRXrr
), DestRegX
)
2464 .addReg(AArch64::XZR
)
2465 .addReg(SrcRegX
, RegState::Undef
)
2466 .addReg(SrcReg
, RegState::Implicit
| getKillRegState(KillSrc
));
2468 // Otherwise, expand to ORR WZR.
2469 BuildMI(MBB
, I
, DL
, get(AArch64::ORRWrr
), DestReg
)
2470 .addReg(AArch64::WZR
)
2471 .addReg(SrcReg
, getKillRegState(KillSrc
));
2477 if (AArch64::GPR64spRegClass
.contains(DestReg
) &&
2478 (AArch64::GPR64spRegClass
.contains(SrcReg
) || SrcReg
== AArch64::XZR
)) {
2479 if (DestReg
== AArch64::SP
|| SrcReg
== AArch64::SP
) {
2480 // If either operand is SP, expand to ADD #0.
2481 BuildMI(MBB
, I
, DL
, get(AArch64::ADDXri
), DestReg
)
2482 .addReg(SrcReg
, getKillRegState(KillSrc
))
2484 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL
, 0));
2485 } else if (SrcReg
== AArch64::XZR
&& Subtarget
.hasZeroCycleZeroingGP()) {
2486 BuildMI(MBB
, I
, DL
, get(AArch64::MOVZXi
), DestReg
)
2488 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL
, 0));
2490 // Otherwise, expand to ORR XZR.
2491 BuildMI(MBB
, I
, DL
, get(AArch64::ORRXrr
), DestReg
)
2492 .addReg(AArch64::XZR
)
2493 .addReg(SrcReg
, getKillRegState(KillSrc
));
2498 // Copy a DDDD register quad by copying the individual sub-registers.
2499 if (AArch64::DDDDRegClass
.contains(DestReg
) &&
2500 AArch64::DDDDRegClass
.contains(SrcReg
)) {
2501 static const unsigned Indices
[] = {AArch64::dsub0
, AArch64::dsub1
,
2502 AArch64::dsub2
, AArch64::dsub3
};
2503 copyPhysRegTuple(MBB
, I
, DL
, DestReg
, SrcReg
, KillSrc
, AArch64::ORRv8i8
,
2508 // Copy a DDD register triple by copying the individual sub-registers.
2509 if (AArch64::DDDRegClass
.contains(DestReg
) &&
2510 AArch64::DDDRegClass
.contains(SrcReg
)) {
2511 static const unsigned Indices
[] = {AArch64::dsub0
, AArch64::dsub1
,
2513 copyPhysRegTuple(MBB
, I
, DL
, DestReg
, SrcReg
, KillSrc
, AArch64::ORRv8i8
,
2518 // Copy a DD register pair by copying the individual sub-registers.
2519 if (AArch64::DDRegClass
.contains(DestReg
) &&
2520 AArch64::DDRegClass
.contains(SrcReg
)) {
2521 static const unsigned Indices
[] = {AArch64::dsub0
, AArch64::dsub1
};
2522 copyPhysRegTuple(MBB
, I
, DL
, DestReg
, SrcReg
, KillSrc
, AArch64::ORRv8i8
,
2527 // Copy a QQQQ register quad by copying the individual sub-registers.
2528 if (AArch64::QQQQRegClass
.contains(DestReg
) &&
2529 AArch64::QQQQRegClass
.contains(SrcReg
)) {
2530 static const unsigned Indices
[] = {AArch64::qsub0
, AArch64::qsub1
,
2531 AArch64::qsub2
, AArch64::qsub3
};
2532 copyPhysRegTuple(MBB
, I
, DL
, DestReg
, SrcReg
, KillSrc
, AArch64::ORRv16i8
,
2537 // Copy a QQQ register triple by copying the individual sub-registers.
2538 if (AArch64::QQQRegClass
.contains(DestReg
) &&
2539 AArch64::QQQRegClass
.contains(SrcReg
)) {
2540 static const unsigned Indices
[] = {AArch64::qsub0
, AArch64::qsub1
,
2542 copyPhysRegTuple(MBB
, I
, DL
, DestReg
, SrcReg
, KillSrc
, AArch64::ORRv16i8
,
2547 // Copy a QQ register pair by copying the individual sub-registers.
2548 if (AArch64::QQRegClass
.contains(DestReg
) &&
2549 AArch64::QQRegClass
.contains(SrcReg
)) {
2550 static const unsigned Indices
[] = {AArch64::qsub0
, AArch64::qsub1
};
2551 copyPhysRegTuple(MBB
, I
, DL
, DestReg
, SrcReg
, KillSrc
, AArch64::ORRv16i8
,
2556 if (AArch64::XSeqPairsClassRegClass
.contains(DestReg
) &&
2557 AArch64::XSeqPairsClassRegClass
.contains(SrcReg
)) {
2558 static const unsigned Indices
[] = {AArch64::sube64
, AArch64::subo64
};
2559 copyGPRRegTuple(MBB
, I
, DL
, DestReg
, SrcReg
, KillSrc
, AArch64::ORRXrs
,
2560 AArch64::XZR
, Indices
);
2564 if (AArch64::WSeqPairsClassRegClass
.contains(DestReg
) &&
2565 AArch64::WSeqPairsClassRegClass
.contains(SrcReg
)) {
2566 static const unsigned Indices
[] = {AArch64::sube32
, AArch64::subo32
};
2567 copyGPRRegTuple(MBB
, I
, DL
, DestReg
, SrcReg
, KillSrc
, AArch64::ORRWrs
,
2568 AArch64::WZR
, Indices
);
2572 if (AArch64::FPR128RegClass
.contains(DestReg
) &&
2573 AArch64::FPR128RegClass
.contains(SrcReg
)) {
2574 if (Subtarget
.hasNEON()) {
2575 BuildMI(MBB
, I
, DL
, get(AArch64::ORRv16i8
), DestReg
)
2577 .addReg(SrcReg
, getKillRegState(KillSrc
));
2579 BuildMI(MBB
, I
, DL
, get(AArch64::STRQpre
))
2580 .addReg(AArch64::SP
, RegState::Define
)
2581 .addReg(SrcReg
, getKillRegState(KillSrc
))
2582 .addReg(AArch64::SP
)
2584 BuildMI(MBB
, I
, DL
, get(AArch64::LDRQpre
))
2585 .addReg(AArch64::SP
, RegState::Define
)
2586 .addReg(DestReg
, RegState::Define
)
2587 .addReg(AArch64::SP
)
2593 if (AArch64::FPR64RegClass
.contains(DestReg
) &&
2594 AArch64::FPR64RegClass
.contains(SrcReg
)) {
2595 if (Subtarget
.hasNEON()) {
2596 DestReg
= RI
.getMatchingSuperReg(DestReg
, AArch64::dsub
,
2597 &AArch64::FPR128RegClass
);
2598 SrcReg
= RI
.getMatchingSuperReg(SrcReg
, AArch64::dsub
,
2599 &AArch64::FPR128RegClass
);
2600 BuildMI(MBB
, I
, DL
, get(AArch64::ORRv16i8
), DestReg
)
2602 .addReg(SrcReg
, getKillRegState(KillSrc
));
2604 BuildMI(MBB
, I
, DL
, get(AArch64::FMOVDr
), DestReg
)
2605 .addReg(SrcReg
, getKillRegState(KillSrc
));
2610 if (AArch64::FPR32RegClass
.contains(DestReg
) &&
2611 AArch64::FPR32RegClass
.contains(SrcReg
)) {
2612 if (Subtarget
.hasNEON()) {
2613 DestReg
= RI
.getMatchingSuperReg(DestReg
, AArch64::ssub
,
2614 &AArch64::FPR128RegClass
);
2615 SrcReg
= RI
.getMatchingSuperReg(SrcReg
, AArch64::ssub
,
2616 &AArch64::FPR128RegClass
);
2617 BuildMI(MBB
, I
, DL
, get(AArch64::ORRv16i8
), DestReg
)
2619 .addReg(SrcReg
, getKillRegState(KillSrc
));
2621 BuildMI(MBB
, I
, DL
, get(AArch64::FMOVSr
), DestReg
)
2622 .addReg(SrcReg
, getKillRegState(KillSrc
));
2627 if (AArch64::FPR16RegClass
.contains(DestReg
) &&
2628 AArch64::FPR16RegClass
.contains(SrcReg
)) {
2629 if (Subtarget
.hasNEON()) {
2630 DestReg
= RI
.getMatchingSuperReg(DestReg
, AArch64::hsub
,
2631 &AArch64::FPR128RegClass
);
2632 SrcReg
= RI
.getMatchingSuperReg(SrcReg
, AArch64::hsub
,
2633 &AArch64::FPR128RegClass
);
2634 BuildMI(MBB
, I
, DL
, get(AArch64::ORRv16i8
), DestReg
)
2636 .addReg(SrcReg
, getKillRegState(KillSrc
));
2638 DestReg
= RI
.getMatchingSuperReg(DestReg
, AArch64::hsub
,
2639 &AArch64::FPR32RegClass
);
2640 SrcReg
= RI
.getMatchingSuperReg(SrcReg
, AArch64::hsub
,
2641 &AArch64::FPR32RegClass
);
2642 BuildMI(MBB
, I
, DL
, get(AArch64::FMOVSr
), DestReg
)
2643 .addReg(SrcReg
, getKillRegState(KillSrc
));
2648 if (AArch64::FPR8RegClass
.contains(DestReg
) &&
2649 AArch64::FPR8RegClass
.contains(SrcReg
)) {
2650 if (Subtarget
.hasNEON()) {
2651 DestReg
= RI
.getMatchingSuperReg(DestReg
, AArch64::bsub
,
2652 &AArch64::FPR128RegClass
);
2653 SrcReg
= RI
.getMatchingSuperReg(SrcReg
, AArch64::bsub
,
2654 &AArch64::FPR128RegClass
);
2655 BuildMI(MBB
, I
, DL
, get(AArch64::ORRv16i8
), DestReg
)
2657 .addReg(SrcReg
, getKillRegState(KillSrc
));
2659 DestReg
= RI
.getMatchingSuperReg(DestReg
, AArch64::bsub
,
2660 &AArch64::FPR32RegClass
);
2661 SrcReg
= RI
.getMatchingSuperReg(SrcReg
, AArch64::bsub
,
2662 &AArch64::FPR32RegClass
);
2663 BuildMI(MBB
, I
, DL
, get(AArch64::FMOVSr
), DestReg
)
2664 .addReg(SrcReg
, getKillRegState(KillSrc
));
2669 // Copies between GPR64 and FPR64.
2670 if (AArch64::FPR64RegClass
.contains(DestReg
) &&
2671 AArch64::GPR64RegClass
.contains(SrcReg
)) {
2672 BuildMI(MBB
, I
, DL
, get(AArch64::FMOVXDr
), DestReg
)
2673 .addReg(SrcReg
, getKillRegState(KillSrc
));
2676 if (AArch64::GPR64RegClass
.contains(DestReg
) &&
2677 AArch64::FPR64RegClass
.contains(SrcReg
)) {
2678 BuildMI(MBB
, I
, DL
, get(AArch64::FMOVDXr
), DestReg
)
2679 .addReg(SrcReg
, getKillRegState(KillSrc
));
2682 // Copies between GPR32 and FPR32.
2683 if (AArch64::FPR32RegClass
.contains(DestReg
) &&
2684 AArch64::GPR32RegClass
.contains(SrcReg
)) {
2685 BuildMI(MBB
, I
, DL
, get(AArch64::FMOVWSr
), DestReg
)
2686 .addReg(SrcReg
, getKillRegState(KillSrc
));
2689 if (AArch64::GPR32RegClass
.contains(DestReg
) &&
2690 AArch64::FPR32RegClass
.contains(SrcReg
)) {
2691 BuildMI(MBB
, I
, DL
, get(AArch64::FMOVSWr
), DestReg
)
2692 .addReg(SrcReg
, getKillRegState(KillSrc
));
2696 if (DestReg
== AArch64::NZCV
) {
2697 assert(AArch64::GPR64RegClass
.contains(SrcReg
) && "Invalid NZCV copy");
2698 BuildMI(MBB
, I
, DL
, get(AArch64::MSR
))
2699 .addImm(AArch64SysReg::NZCV
)
2700 .addReg(SrcReg
, getKillRegState(KillSrc
))
2701 .addReg(AArch64::NZCV
, RegState::Implicit
| RegState::Define
);
2705 if (SrcReg
== AArch64::NZCV
) {
2706 assert(AArch64::GPR64RegClass
.contains(DestReg
) && "Invalid NZCV copy");
2707 BuildMI(MBB
, I
, DL
, get(AArch64::MRS
), DestReg
)
2708 .addImm(AArch64SysReg::NZCV
)
2709 .addReg(AArch64::NZCV
, RegState::Implicit
| getKillRegState(KillSrc
));
2713 llvm_unreachable("unimplemented reg-to-reg copy");
2716 static void storeRegPairToStackSlot(const TargetRegisterInfo
&TRI
,
2717 MachineBasicBlock
&MBB
,
2718 MachineBasicBlock::iterator InsertBefore
,
2719 const MCInstrDesc
&MCID
,
2720 unsigned SrcReg
, bool IsKill
,
2721 unsigned SubIdx0
, unsigned SubIdx1
, int FI
,
2722 MachineMemOperand
*MMO
) {
2723 unsigned SrcReg0
= SrcReg
;
2724 unsigned SrcReg1
= SrcReg
;
2725 if (TargetRegisterInfo::isPhysicalRegister(SrcReg
)) {
2726 SrcReg0
= TRI
.getSubReg(SrcReg
, SubIdx0
);
2728 SrcReg1
= TRI
.getSubReg(SrcReg
, SubIdx1
);
2731 BuildMI(MBB
, InsertBefore
, DebugLoc(), MCID
)
2732 .addReg(SrcReg0
, getKillRegState(IsKill
), SubIdx0
)
2733 .addReg(SrcReg1
, getKillRegState(IsKill
), SubIdx1
)
2736 .addMemOperand(MMO
);
2739 void AArch64InstrInfo::storeRegToStackSlot(
2740 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator MBBI
, unsigned SrcReg
,
2741 bool isKill
, int FI
, const TargetRegisterClass
*RC
,
2742 const TargetRegisterInfo
*TRI
) const {
2743 MachineFunction
&MF
= *MBB
.getParent();
2744 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
2745 unsigned Align
= MFI
.getObjectAlignment(FI
);
2747 MachinePointerInfo PtrInfo
= MachinePointerInfo::getFixedStack(MF
, FI
);
2748 MachineMemOperand
*MMO
= MF
.getMachineMemOperand(
2749 PtrInfo
, MachineMemOperand::MOStore
, MFI
.getObjectSize(FI
), Align
);
2752 switch (TRI
->getSpillSize(*RC
)) {
2754 if (AArch64::FPR8RegClass
.hasSubClassEq(RC
))
2755 Opc
= AArch64::STRBui
;
2758 if (AArch64::FPR16RegClass
.hasSubClassEq(RC
))
2759 Opc
= AArch64::STRHui
;
2762 if (AArch64::GPR32allRegClass
.hasSubClassEq(RC
)) {
2763 Opc
= AArch64::STRWui
;
2764 if (TargetRegisterInfo::isVirtualRegister(SrcReg
))
2765 MF
.getRegInfo().constrainRegClass(SrcReg
, &AArch64::GPR32RegClass
);
2767 assert(SrcReg
!= AArch64::WSP
);
2768 } else if (AArch64::FPR32RegClass
.hasSubClassEq(RC
))
2769 Opc
= AArch64::STRSui
;
2772 if (AArch64::GPR64allRegClass
.hasSubClassEq(RC
)) {
2773 Opc
= AArch64::STRXui
;
2774 if (TargetRegisterInfo::isVirtualRegister(SrcReg
))
2775 MF
.getRegInfo().constrainRegClass(SrcReg
, &AArch64::GPR64RegClass
);
2777 assert(SrcReg
!= AArch64::SP
);
2778 } else if (AArch64::FPR64RegClass
.hasSubClassEq(RC
)) {
2779 Opc
= AArch64::STRDui
;
2780 } else if (AArch64::WSeqPairsClassRegClass
.hasSubClassEq(RC
)) {
2781 storeRegPairToStackSlot(getRegisterInfo(), MBB
, MBBI
,
2782 get(AArch64::STPWi
), SrcReg
, isKill
,
2783 AArch64::sube32
, AArch64::subo32
, FI
, MMO
);
2788 if (AArch64::FPR128RegClass
.hasSubClassEq(RC
))
2789 Opc
= AArch64::STRQui
;
2790 else if (AArch64::DDRegClass
.hasSubClassEq(RC
)) {
2791 assert(Subtarget
.hasNEON() && "Unexpected register store without NEON");
2792 Opc
= AArch64::ST1Twov1d
;
2794 } else if (AArch64::XSeqPairsClassRegClass
.hasSubClassEq(RC
)) {
2795 storeRegPairToStackSlot(getRegisterInfo(), MBB
, MBBI
,
2796 get(AArch64::STPXi
), SrcReg
, isKill
,
2797 AArch64::sube64
, AArch64::subo64
, FI
, MMO
);
2802 if (AArch64::DDDRegClass
.hasSubClassEq(RC
)) {
2803 assert(Subtarget
.hasNEON() && "Unexpected register store without NEON");
2804 Opc
= AArch64::ST1Threev1d
;
2809 if (AArch64::DDDDRegClass
.hasSubClassEq(RC
)) {
2810 assert(Subtarget
.hasNEON() && "Unexpected register store without NEON");
2811 Opc
= AArch64::ST1Fourv1d
;
2813 } else if (AArch64::QQRegClass
.hasSubClassEq(RC
)) {
2814 assert(Subtarget
.hasNEON() && "Unexpected register store without NEON");
2815 Opc
= AArch64::ST1Twov2d
;
2820 if (AArch64::QQQRegClass
.hasSubClassEq(RC
)) {
2821 assert(Subtarget
.hasNEON() && "Unexpected register store without NEON");
2822 Opc
= AArch64::ST1Threev2d
;
2827 if (AArch64::QQQQRegClass
.hasSubClassEq(RC
)) {
2828 assert(Subtarget
.hasNEON() && "Unexpected register store without NEON");
2829 Opc
= AArch64::ST1Fourv2d
;
2834 assert(Opc
&& "Unknown register class");
2836 const MachineInstrBuilder MI
= BuildMI(MBB
, MBBI
, DebugLoc(), get(Opc
))
2837 .addReg(SrcReg
, getKillRegState(isKill
))
2842 MI
.addMemOperand(MMO
);
2845 static void loadRegPairFromStackSlot(const TargetRegisterInfo
&TRI
,
2846 MachineBasicBlock
&MBB
,
2847 MachineBasicBlock::iterator InsertBefore
,
2848 const MCInstrDesc
&MCID
,
2849 unsigned DestReg
, unsigned SubIdx0
,
2850 unsigned SubIdx1
, int FI
,
2851 MachineMemOperand
*MMO
) {
2852 unsigned DestReg0
= DestReg
;
2853 unsigned DestReg1
= DestReg
;
2854 bool IsUndef
= true;
2855 if (TargetRegisterInfo::isPhysicalRegister(DestReg
)) {
2856 DestReg0
= TRI
.getSubReg(DestReg
, SubIdx0
);
2858 DestReg1
= TRI
.getSubReg(DestReg
, SubIdx1
);
2862 BuildMI(MBB
, InsertBefore
, DebugLoc(), MCID
)
2863 .addReg(DestReg0
, RegState::Define
| getUndefRegState(IsUndef
), SubIdx0
)
2864 .addReg(DestReg1
, RegState::Define
| getUndefRegState(IsUndef
), SubIdx1
)
2867 .addMemOperand(MMO
);
2870 void AArch64InstrInfo::loadRegFromStackSlot(
2871 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator MBBI
, unsigned DestReg
,
2872 int FI
, const TargetRegisterClass
*RC
,
2873 const TargetRegisterInfo
*TRI
) const {
2874 MachineFunction
&MF
= *MBB
.getParent();
2875 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
2876 unsigned Align
= MFI
.getObjectAlignment(FI
);
2877 MachinePointerInfo PtrInfo
= MachinePointerInfo::getFixedStack(MF
, FI
);
2878 MachineMemOperand
*MMO
= MF
.getMachineMemOperand(
2879 PtrInfo
, MachineMemOperand::MOLoad
, MFI
.getObjectSize(FI
), Align
);
2883 switch (TRI
->getSpillSize(*RC
)) {
2885 if (AArch64::FPR8RegClass
.hasSubClassEq(RC
))
2886 Opc
= AArch64::LDRBui
;
2889 if (AArch64::FPR16RegClass
.hasSubClassEq(RC
))
2890 Opc
= AArch64::LDRHui
;
2893 if (AArch64::GPR32allRegClass
.hasSubClassEq(RC
)) {
2894 Opc
= AArch64::LDRWui
;
2895 if (TargetRegisterInfo::isVirtualRegister(DestReg
))
2896 MF
.getRegInfo().constrainRegClass(DestReg
, &AArch64::GPR32RegClass
);
2898 assert(DestReg
!= AArch64::WSP
);
2899 } else if (AArch64::FPR32RegClass
.hasSubClassEq(RC
))
2900 Opc
= AArch64::LDRSui
;
2903 if (AArch64::GPR64allRegClass
.hasSubClassEq(RC
)) {
2904 Opc
= AArch64::LDRXui
;
2905 if (TargetRegisterInfo::isVirtualRegister(DestReg
))
2906 MF
.getRegInfo().constrainRegClass(DestReg
, &AArch64::GPR64RegClass
);
2908 assert(DestReg
!= AArch64::SP
);
2909 } else if (AArch64::FPR64RegClass
.hasSubClassEq(RC
)) {
2910 Opc
= AArch64::LDRDui
;
2911 } else if (AArch64::WSeqPairsClassRegClass
.hasSubClassEq(RC
)) {
2912 loadRegPairFromStackSlot(getRegisterInfo(), MBB
, MBBI
,
2913 get(AArch64::LDPWi
), DestReg
, AArch64::sube32
,
2914 AArch64::subo32
, FI
, MMO
);
2919 if (AArch64::FPR128RegClass
.hasSubClassEq(RC
))
2920 Opc
= AArch64::LDRQui
;
2921 else if (AArch64::DDRegClass
.hasSubClassEq(RC
)) {
2922 assert(Subtarget
.hasNEON() && "Unexpected register load without NEON");
2923 Opc
= AArch64::LD1Twov1d
;
2925 } else if (AArch64::XSeqPairsClassRegClass
.hasSubClassEq(RC
)) {
2926 loadRegPairFromStackSlot(getRegisterInfo(), MBB
, MBBI
,
2927 get(AArch64::LDPXi
), DestReg
, AArch64::sube64
,
2928 AArch64::subo64
, FI
, MMO
);
2933 if (AArch64::DDDRegClass
.hasSubClassEq(RC
)) {
2934 assert(Subtarget
.hasNEON() && "Unexpected register load without NEON");
2935 Opc
= AArch64::LD1Threev1d
;
2940 if (AArch64::DDDDRegClass
.hasSubClassEq(RC
)) {
2941 assert(Subtarget
.hasNEON() && "Unexpected register load without NEON");
2942 Opc
= AArch64::LD1Fourv1d
;
2944 } else if (AArch64::QQRegClass
.hasSubClassEq(RC
)) {
2945 assert(Subtarget
.hasNEON() && "Unexpected register load without NEON");
2946 Opc
= AArch64::LD1Twov2d
;
2951 if (AArch64::QQQRegClass
.hasSubClassEq(RC
)) {
2952 assert(Subtarget
.hasNEON() && "Unexpected register load without NEON");
2953 Opc
= AArch64::LD1Threev2d
;
2958 if (AArch64::QQQQRegClass
.hasSubClassEq(RC
)) {
2959 assert(Subtarget
.hasNEON() && "Unexpected register load without NEON");
2960 Opc
= AArch64::LD1Fourv2d
;
2965 assert(Opc
&& "Unknown register class");
2967 const MachineInstrBuilder MI
= BuildMI(MBB
, MBBI
, DebugLoc(), get(Opc
))
2968 .addReg(DestReg
, getDefRegState(true))
2972 MI
.addMemOperand(MMO
);
2975 void llvm::emitFrameOffset(MachineBasicBlock
&MBB
,
2976 MachineBasicBlock::iterator MBBI
, const DebugLoc
&DL
,
2977 unsigned DestReg
, unsigned SrcReg
, int Offset
,
2978 const TargetInstrInfo
*TII
,
2979 MachineInstr::MIFlag Flag
, bool SetNZCV
,
2980 bool NeedsWinCFI
, bool *HasWinCFI
) {
2981 if (DestReg
== SrcReg
&& Offset
== 0)
2984 assert((DestReg
!= AArch64::SP
|| Offset
% 16 == 0) &&
2985 "SP increment/decrement not 16-byte aligned");
2987 bool isSub
= Offset
< 0;
2991 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
2992 // scratch register. If DestReg is a virtual register, use it as the
2993 // scratch register; otherwise, create a new virtual register (to be
2994 // replaced by the scavenger at the end of PEI). That case can be optimized
2995 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
2996 // register can be loaded with offset%8 and the add/sub can use an extending
2997 // instruction with LSL#3.
2998 // Currently the function handles any offsets but generates a poor sequence
3000 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
3004 Opc
= isSub
? AArch64::SUBSXri
: AArch64::ADDSXri
;
3006 Opc
= isSub
? AArch64::SUBXri
: AArch64::ADDXri
;
3007 const unsigned MaxEncoding
= 0xfff;
3008 const unsigned ShiftSize
= 12;
3009 const unsigned MaxEncodableValue
= MaxEncoding
<< ShiftSize
;
3010 while (((unsigned)Offset
) >= (1 << ShiftSize
)) {
3012 if (((unsigned)Offset
) > MaxEncodableValue
) {
3013 ThisVal
= MaxEncodableValue
;
3015 ThisVal
= Offset
& MaxEncodableValue
;
3017 assert((ThisVal
>> ShiftSize
) <= MaxEncoding
&&
3018 "Encoding cannot handle value that big");
3019 BuildMI(MBB
, MBBI
, DL
, TII
->get(Opc
), DestReg
)
3021 .addImm(ThisVal
>> ShiftSize
)
3022 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL
, ShiftSize
))
3025 if (NeedsWinCFI
&& SrcReg
== AArch64::SP
&& DestReg
== AArch64::SP
) {
3028 BuildMI(MBB
, MBBI
, DL
, TII
->get(AArch64::SEH_StackAlloc
))
3038 BuildMI(MBB
, MBBI
, DL
, TII
->get(Opc
), DestReg
)
3041 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL
, 0))
3045 if ((DestReg
== AArch64::FP
&& SrcReg
== AArch64::SP
) ||
3046 (SrcReg
== AArch64::FP
&& DestReg
== AArch64::SP
)) {
3050 BuildMI(MBB
, MBBI
, DL
, TII
->get(AArch64::SEH_SetFP
)).
3053 BuildMI(MBB
, MBBI
, DL
, TII
->get(AArch64::SEH_AddFP
)).
3054 addImm(Offset
).setMIFlag(Flag
);
3055 } else if (DestReg
== AArch64::SP
) {
3058 BuildMI(MBB
, MBBI
, DL
, TII
->get(AArch64::SEH_StackAlloc
)).
3059 addImm(Offset
).setMIFlag(Flag
);
3064 MachineInstr
*AArch64InstrInfo::foldMemoryOperandImpl(
3065 MachineFunction
&MF
, MachineInstr
&MI
, ArrayRef
<unsigned> Ops
,
3066 MachineBasicBlock::iterator InsertPt
, int FrameIndex
,
3067 LiveIntervals
*LIS
, VirtRegMap
*VRM
) const {
3068 // This is a bit of a hack. Consider this instruction:
3070 // %0 = COPY %sp; GPR64all:%0
3072 // We explicitly chose GPR64all for the virtual register so such a copy might
3073 // be eliminated by RegisterCoalescer. However, that may not be possible, and
3074 // %0 may even spill. We can't spill %sp, and since it is in the GPR64all
3075 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
3077 // To prevent that, we are going to constrain the %0 register class here.
3079 // <rdar://problem/11522048>
3081 if (MI
.isFullCopy()) {
3082 unsigned DstReg
= MI
.getOperand(0).getReg();
3083 unsigned SrcReg
= MI
.getOperand(1).getReg();
3084 if (SrcReg
== AArch64::SP
&&
3085 TargetRegisterInfo::isVirtualRegister(DstReg
)) {
3086 MF
.getRegInfo().constrainRegClass(DstReg
, &AArch64::GPR64RegClass
);
3089 if (DstReg
== AArch64::SP
&&
3090 TargetRegisterInfo::isVirtualRegister(SrcReg
)) {
3091 MF
.getRegInfo().constrainRegClass(SrcReg
, &AArch64::GPR64RegClass
);
3096 // Handle the case where a copy is being spilled or filled but the source
3097 // and destination register class don't match. For example:
3099 // %0 = COPY %xzr; GPR64common:%0
3101 // In this case we can still safely fold away the COPY and generate the
3102 // following spill code:
3104 // STRXui %xzr, %stack.0
3106 // This also eliminates spilled cross register class COPYs (e.g. between x and
3107 // d regs) of the same size. For example:
3109 // %0 = COPY %1; GPR64:%0, FPR64:%1
3111 // will be filled as
3113 // LDRDui %0, fi<#0>
3117 // LDRXui %Temp, fi<#0>
3120 if (MI
.isCopy() && Ops
.size() == 1 &&
3121 // Make sure we're only folding the explicit COPY defs/uses.
3122 (Ops
[0] == 0 || Ops
[0] == 1)) {
3123 bool IsSpill
= Ops
[0] == 0;
3124 bool IsFill
= !IsSpill
;
3125 const TargetRegisterInfo
&TRI
= *MF
.getSubtarget().getRegisterInfo();
3126 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
3127 MachineBasicBlock
&MBB
= *MI
.getParent();
3128 const MachineOperand
&DstMO
= MI
.getOperand(0);
3129 const MachineOperand
&SrcMO
= MI
.getOperand(1);
3130 unsigned DstReg
= DstMO
.getReg();
3131 unsigned SrcReg
= SrcMO
.getReg();
3132 // This is slightly expensive to compute for physical regs since
3133 // getMinimalPhysRegClass is slow.
3134 auto getRegClass
= [&](unsigned Reg
) {
3135 return TargetRegisterInfo::isVirtualRegister(Reg
)
3136 ? MRI
.getRegClass(Reg
)
3137 : TRI
.getMinimalPhysRegClass(Reg
);
3140 if (DstMO
.getSubReg() == 0 && SrcMO
.getSubReg() == 0) {
3141 assert(TRI
.getRegSizeInBits(*getRegClass(DstReg
)) ==
3142 TRI
.getRegSizeInBits(*getRegClass(SrcReg
)) &&
3143 "Mismatched register size in non subreg COPY");
3145 storeRegToStackSlot(MBB
, InsertPt
, SrcReg
, SrcMO
.isKill(), FrameIndex
,
3146 getRegClass(SrcReg
), &TRI
);
3148 loadRegFromStackSlot(MBB
, InsertPt
, DstReg
, FrameIndex
,
3149 getRegClass(DstReg
), &TRI
);
3150 return &*--InsertPt
;
3153 // Handle cases like spilling def of:
3155 // %0:sub_32<def,read-undef> = COPY %wzr; GPR64common:%0
3157 // where the physical register source can be widened and stored to the full
3158 // virtual reg destination stack slot, in this case producing:
3160 // STRXui %xzr, %stack.0
3162 if (IsSpill
&& DstMO
.isUndef() &&
3163 TargetRegisterInfo::isPhysicalRegister(SrcReg
)) {
3164 assert(SrcMO
.getSubReg() == 0 &&
3165 "Unexpected subreg on physical register");
3166 const TargetRegisterClass
*SpillRC
;
3167 unsigned SpillSubreg
;
3168 switch (DstMO
.getSubReg()) {
3172 case AArch64::sub_32
:
3174 if (AArch64::GPR32RegClass
.contains(SrcReg
)) {
3175 SpillRC
= &AArch64::GPR64RegClass
;
3176 SpillSubreg
= AArch64::sub_32
;
3177 } else if (AArch64::FPR32RegClass
.contains(SrcReg
)) {
3178 SpillRC
= &AArch64::FPR64RegClass
;
3179 SpillSubreg
= AArch64::ssub
;
3184 if (AArch64::FPR64RegClass
.contains(SrcReg
)) {
3185 SpillRC
= &AArch64::FPR128RegClass
;
3186 SpillSubreg
= AArch64::dsub
;
3193 if (unsigned WidenedSrcReg
=
3194 TRI
.getMatchingSuperReg(SrcReg
, SpillSubreg
, SpillRC
)) {
3195 storeRegToStackSlot(MBB
, InsertPt
, WidenedSrcReg
, SrcMO
.isKill(),
3196 FrameIndex
, SpillRC
, &TRI
);
3197 return &*--InsertPt
;
3201 // Handle cases like filling use of:
3203 // %0:sub_32<def,read-undef> = COPY %1; GPR64:%0, GPR32:%1
3205 // where we can load the full virtual reg source stack slot, into the subreg
3206 // destination, in this case producing:
3208 // LDRWui %0:sub_32<def,read-undef>, %stack.0
3210 if (IsFill
&& SrcMO
.getSubReg() == 0 && DstMO
.isUndef()) {
3211 const TargetRegisterClass
*FillRC
;
3212 switch (DstMO
.getSubReg()) {
3216 case AArch64::sub_32
:
3217 FillRC
= &AArch64::GPR32RegClass
;
3220 FillRC
= &AArch64::FPR32RegClass
;
3223 FillRC
= &AArch64::FPR64RegClass
;
3228 assert(TRI
.getRegSizeInBits(*getRegClass(SrcReg
)) ==
3229 TRI
.getRegSizeInBits(*FillRC
) &&
3230 "Mismatched regclass size on folded subreg COPY");
3231 loadRegFromStackSlot(MBB
, InsertPt
, DstReg
, FrameIndex
, FillRC
, &TRI
);
3232 MachineInstr
&LoadMI
= *--InsertPt
;
3233 MachineOperand
&LoadDst
= LoadMI
.getOperand(0);
3234 assert(LoadDst
.getSubReg() == 0 && "unexpected subreg on fill load");
3235 LoadDst
.setSubReg(DstMO
.getSubReg());
3236 LoadDst
.setIsUndef();
3246 int llvm::isAArch64FrameOffsetLegal(const MachineInstr
&MI
, int &Offset
,
3247 bool *OutUseUnscaledOp
,
3248 unsigned *OutUnscaledOp
,
3249 int *EmittableOffset
) {
3250 // Set output values in case of early exit.
3251 if (EmittableOffset
)
3252 *EmittableOffset
= 0;
3253 if (OutUseUnscaledOp
)
3254 *OutUseUnscaledOp
= false;
3258 // Exit early for structured vector spills/fills as they can't take an
3259 // immediate offset.
3260 switch (MI
.getOpcode()) {
3263 case AArch64::LD1Twov2d
:
3264 case AArch64::LD1Threev2d
:
3265 case AArch64::LD1Fourv2d
:
3266 case AArch64::LD1Twov1d
:
3267 case AArch64::LD1Threev1d
:
3268 case AArch64::LD1Fourv1d
:
3269 case AArch64::ST1Twov2d
:
3270 case AArch64::ST1Threev2d
:
3271 case AArch64::ST1Fourv2d
:
3272 case AArch64::ST1Twov1d
:
3273 case AArch64::ST1Threev1d
:
3274 case AArch64::ST1Fourv1d
:
3276 case AArch64::IRGstack
:
3277 return AArch64FrameOffsetCannotUpdate
;
3280 // Get the min/max offset and the scale.
3281 unsigned Scale
, Width
;
3282 int64_t MinOff
, MaxOff
;
3283 if (!AArch64InstrInfo::getMemOpInfo(MI
.getOpcode(), Scale
, Width
, MinOff
,
3285 llvm_unreachable("unhandled opcode in isAArch64FrameOffsetLegal");
3287 // Construct the complete offset.
3288 const MachineOperand
&ImmOpnd
=
3289 MI
.getOperand(AArch64InstrInfo::getLoadStoreImmIdx(MI
.getOpcode()));
3290 Offset
+= ImmOpnd
.getImm() * Scale
;
3292 // If the offset doesn't match the scale, we rewrite the instruction to
3293 // use the unscaled instruction instead. Likewise, if we have a negative
3294 // offset and there is an unscaled op to use.
3295 Optional
<unsigned> UnscaledOp
=
3296 AArch64InstrInfo::getUnscaledLdSt(MI
.getOpcode());
3297 bool useUnscaledOp
= UnscaledOp
&& (Offset
% Scale
|| Offset
< 0);
3298 if (useUnscaledOp
&&
3299 !AArch64InstrInfo::getMemOpInfo(*UnscaledOp
, Scale
, Width
, MinOff
, MaxOff
))
3300 llvm_unreachable("unhandled opcode in isAArch64FrameOffsetLegal");
3302 int64_t Remainder
= Offset
% Scale
;
3303 assert(!(Remainder
&& useUnscaledOp
) &&
3304 "Cannot have remainder when using unscaled op");
3306 assert(MinOff
< MaxOff
&& "Unexpected Min/Max offsets");
3307 int NewOffset
= Offset
/ Scale
;
3308 if (MinOff
<= NewOffset
&& NewOffset
<= MaxOff
)
3311 NewOffset
= NewOffset
< 0 ? MinOff
: MaxOff
;
3312 Offset
= Offset
- NewOffset
* Scale
+ Remainder
;
3315 if (EmittableOffset
)
3316 *EmittableOffset
= NewOffset
;
3317 if (OutUseUnscaledOp
)
3318 *OutUseUnscaledOp
= useUnscaledOp
;
3319 if (OutUnscaledOp
&& UnscaledOp
)
3320 *OutUnscaledOp
= *UnscaledOp
;
3322 return AArch64FrameOffsetCanUpdate
|
3323 (Offset
== 0 ? AArch64FrameOffsetIsLegal
: 0);
3326 bool llvm::rewriteAArch64FrameIndex(MachineInstr
&MI
, unsigned FrameRegIdx
,
3327 unsigned FrameReg
, int &Offset
,
3328 const AArch64InstrInfo
*TII
) {
3329 unsigned Opcode
= MI
.getOpcode();
3330 unsigned ImmIdx
= FrameRegIdx
+ 1;
3332 if (Opcode
== AArch64::ADDSXri
|| Opcode
== AArch64::ADDXri
) {
3333 Offset
+= MI
.getOperand(ImmIdx
).getImm();
3334 emitFrameOffset(*MI
.getParent(), MI
, MI
.getDebugLoc(),
3335 MI
.getOperand(0).getReg(), FrameReg
, Offset
, TII
,
3336 MachineInstr::NoFlags
, (Opcode
== AArch64::ADDSXri
));
3337 MI
.eraseFromParent();
3343 unsigned UnscaledOp
;
3345 int Status
= isAArch64FrameOffsetLegal(MI
, Offset
, &UseUnscaledOp
,
3346 &UnscaledOp
, &NewOffset
);
3347 if (Status
& AArch64FrameOffsetCanUpdate
) {
3348 if (Status
& AArch64FrameOffsetIsLegal
)
3349 // Replace the FrameIndex with FrameReg.
3350 MI
.getOperand(FrameRegIdx
).ChangeToRegister(FrameReg
, false);
3352 MI
.setDesc(TII
->get(UnscaledOp
));
3354 MI
.getOperand(ImmIdx
).ChangeToImmediate(NewOffset
);
3361 void AArch64InstrInfo::getNoop(MCInst
&NopInst
) const {
3362 NopInst
.setOpcode(AArch64::HINT
);
3363 NopInst
.addOperand(MCOperand::createImm(0));
3366 // AArch64 supports MachineCombiner.
3367 bool AArch64InstrInfo::useMachineCombiner() const { return true; }
3369 // True when Opc sets flag
3370 static bool isCombineInstrSettingFlag(unsigned Opc
) {
3372 case AArch64::ADDSWrr
:
3373 case AArch64::ADDSWri
:
3374 case AArch64::ADDSXrr
:
3375 case AArch64::ADDSXri
:
3376 case AArch64::SUBSWrr
:
3377 case AArch64::SUBSXrr
:
3378 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
3379 case AArch64::SUBSWri
:
3380 case AArch64::SUBSXri
:
3388 // 32b Opcodes that can be combined with a MUL
3389 static bool isCombineInstrCandidate32(unsigned Opc
) {
3391 case AArch64::ADDWrr
:
3392 case AArch64::ADDWri
:
3393 case AArch64::SUBWrr
:
3394 case AArch64::ADDSWrr
:
3395 case AArch64::ADDSWri
:
3396 case AArch64::SUBSWrr
:
3397 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
3398 case AArch64::SUBWri
:
3399 case AArch64::SUBSWri
:
3407 // 64b Opcodes that can be combined with a MUL
3408 static bool isCombineInstrCandidate64(unsigned Opc
) {
3410 case AArch64::ADDXrr
:
3411 case AArch64::ADDXri
:
3412 case AArch64::SUBXrr
:
3413 case AArch64::ADDSXrr
:
3414 case AArch64::ADDSXri
:
3415 case AArch64::SUBSXrr
:
3416 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
3417 case AArch64::SUBXri
:
3418 case AArch64::SUBSXri
:
3426 // FP Opcodes that can be combined with a FMUL
3427 static bool isCombineInstrCandidateFP(const MachineInstr
&Inst
) {
3428 switch (Inst
.getOpcode()) {
3431 case AArch64::FADDSrr
:
3432 case AArch64::FADDDrr
:
3433 case AArch64::FADDv2f32
:
3434 case AArch64::FADDv2f64
:
3435 case AArch64::FADDv4f32
:
3436 case AArch64::FSUBSrr
:
3437 case AArch64::FSUBDrr
:
3438 case AArch64::FSUBv2f32
:
3439 case AArch64::FSUBv2f64
:
3440 case AArch64::FSUBv4f32
:
3441 TargetOptions Options
= Inst
.getParent()->getParent()->getTarget().Options
;
3442 return (Options
.UnsafeFPMath
||
3443 Options
.AllowFPOpFusion
== FPOpFusion::Fast
);
3448 // Opcodes that can be combined with a MUL
3449 static bool isCombineInstrCandidate(unsigned Opc
) {
3450 return (isCombineInstrCandidate32(Opc
) || isCombineInstrCandidate64(Opc
));
3454 // Utility routine that checks if \param MO is defined by an
3455 // \param CombineOpc instruction in the basic block \param MBB
3456 static bool canCombine(MachineBasicBlock
&MBB
, MachineOperand
&MO
,
3457 unsigned CombineOpc
, unsigned ZeroReg
= 0,
3458 bool CheckZeroReg
= false) {
3459 MachineRegisterInfo
&MRI
= MBB
.getParent()->getRegInfo();
3460 MachineInstr
*MI
= nullptr;
3462 if (MO
.isReg() && TargetRegisterInfo::isVirtualRegister(MO
.getReg()))
3463 MI
= MRI
.getUniqueVRegDef(MO
.getReg());
3464 // And it needs to be in the trace (otherwise, it won't have a depth).
3465 if (!MI
|| MI
->getParent() != &MBB
|| (unsigned)MI
->getOpcode() != CombineOpc
)
3467 // Must only used by the user we combine with.
3468 if (!MRI
.hasOneNonDBGUse(MI
->getOperand(0).getReg()))
3472 assert(MI
->getNumOperands() >= 4 && MI
->getOperand(0).isReg() &&
3473 MI
->getOperand(1).isReg() && MI
->getOperand(2).isReg() &&
3474 MI
->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
3475 // The third input reg must be zero.
3476 if (MI
->getOperand(3).getReg() != ZeroReg
)
3484 // Is \param MO defined by an integer multiply and can be combined?
3485 static bool canCombineWithMUL(MachineBasicBlock
&MBB
, MachineOperand
&MO
,
3486 unsigned MulOpc
, unsigned ZeroReg
) {
3487 return canCombine(MBB
, MO
, MulOpc
, ZeroReg
, true);
3491 // Is \param MO defined by a floating-point multiply and can be combined?
3492 static bool canCombineWithFMUL(MachineBasicBlock
&MBB
, MachineOperand
&MO
,
3494 return canCombine(MBB
, MO
, MulOpc
);
3497 // TODO: There are many more machine instruction opcodes to match:
3498 // 1. Other data types (integer, vectors)
3499 // 2. Other math / logic operations (xor, or)
3500 // 3. Other forms of the same operation (intrinsics and other variants)
3501 bool AArch64InstrInfo::isAssociativeAndCommutative(
3502 const MachineInstr
&Inst
) const {
3503 switch (Inst
.getOpcode()) {
3504 case AArch64::FADDDrr
:
3505 case AArch64::FADDSrr
:
3506 case AArch64::FADDv2f32
:
3507 case AArch64::FADDv2f64
:
3508 case AArch64::FADDv4f32
:
3509 case AArch64::FMULDrr
:
3510 case AArch64::FMULSrr
:
3511 case AArch64::FMULX32
:
3512 case AArch64::FMULX64
:
3513 case AArch64::FMULXv2f32
:
3514 case AArch64::FMULXv2f64
:
3515 case AArch64::FMULXv4f32
:
3516 case AArch64::FMULv2f32
:
3517 case AArch64::FMULv2f64
:
3518 case AArch64::FMULv4f32
:
3519 return Inst
.getParent()->getParent()->getTarget().Options
.UnsafeFPMath
;
3525 /// Find instructions that can be turned into madd.
3526 static bool getMaddPatterns(MachineInstr
&Root
,
3527 SmallVectorImpl
<MachineCombinerPattern
> &Patterns
) {
3528 unsigned Opc
= Root
.getOpcode();
3529 MachineBasicBlock
&MBB
= *Root
.getParent();
3532 if (!isCombineInstrCandidate(Opc
))
3534 if (isCombineInstrSettingFlag(Opc
)) {
3535 int Cmp_NZCV
= Root
.findRegisterDefOperandIdx(AArch64::NZCV
, true);
3536 // When NZCV is live bail out.
3539 unsigned NewOpc
= convertToNonFlagSettingOpc(Root
);
3540 // When opcode can't change bail out.
3541 // CHECKME: do we miss any cases for opcode conversion?
3550 case AArch64::ADDWrr
:
3551 assert(Root
.getOperand(1).isReg() && Root
.getOperand(2).isReg() &&
3552 "ADDWrr does not have register operands");
3553 if (canCombineWithMUL(MBB
, Root
.getOperand(1), AArch64::MADDWrrr
,
3555 Patterns
.push_back(MachineCombinerPattern::MULADDW_OP1
);
3558 if (canCombineWithMUL(MBB
, Root
.getOperand(2), AArch64::MADDWrrr
,
3560 Patterns
.push_back(MachineCombinerPattern::MULADDW_OP2
);
3564 case AArch64::ADDXrr
:
3565 if (canCombineWithMUL(MBB
, Root
.getOperand(1), AArch64::MADDXrrr
,
3567 Patterns
.push_back(MachineCombinerPattern::MULADDX_OP1
);
3570 if (canCombineWithMUL(MBB
, Root
.getOperand(2), AArch64::MADDXrrr
,
3572 Patterns
.push_back(MachineCombinerPattern::MULADDX_OP2
);
3576 case AArch64::SUBWrr
:
3577 if (canCombineWithMUL(MBB
, Root
.getOperand(1), AArch64::MADDWrrr
,
3579 Patterns
.push_back(MachineCombinerPattern::MULSUBW_OP1
);
3582 if (canCombineWithMUL(MBB
, Root
.getOperand(2), AArch64::MADDWrrr
,
3584 Patterns
.push_back(MachineCombinerPattern::MULSUBW_OP2
);
3588 case AArch64::SUBXrr
:
3589 if (canCombineWithMUL(MBB
, Root
.getOperand(1), AArch64::MADDXrrr
,
3591 Patterns
.push_back(MachineCombinerPattern::MULSUBX_OP1
);
3594 if (canCombineWithMUL(MBB
, Root
.getOperand(2), AArch64::MADDXrrr
,
3596 Patterns
.push_back(MachineCombinerPattern::MULSUBX_OP2
);
3600 case AArch64::ADDWri
:
3601 if (canCombineWithMUL(MBB
, Root
.getOperand(1), AArch64::MADDWrrr
,
3603 Patterns
.push_back(MachineCombinerPattern::MULADDWI_OP1
);
3607 case AArch64::ADDXri
:
3608 if (canCombineWithMUL(MBB
, Root
.getOperand(1), AArch64::MADDXrrr
,
3610 Patterns
.push_back(MachineCombinerPattern::MULADDXI_OP1
);
3614 case AArch64::SUBWri
:
3615 if (canCombineWithMUL(MBB
, Root
.getOperand(1), AArch64::MADDWrrr
,
3617 Patterns
.push_back(MachineCombinerPattern::MULSUBWI_OP1
);
3621 case AArch64::SUBXri
:
3622 if (canCombineWithMUL(MBB
, Root
.getOperand(1), AArch64::MADDXrrr
,
3624 Patterns
.push_back(MachineCombinerPattern::MULSUBXI_OP1
);
3631 /// Floating-Point Support
3633 /// Find instructions that can be turned into madd.
3634 static bool getFMAPatterns(MachineInstr
&Root
,
3635 SmallVectorImpl
<MachineCombinerPattern
> &Patterns
) {
3637 if (!isCombineInstrCandidateFP(Root
))
3640 MachineBasicBlock
&MBB
= *Root
.getParent();
3643 switch (Root
.getOpcode()) {
3645 assert(false && "Unsupported FP instruction in combiner\n");
3647 case AArch64::FADDSrr
:
3648 assert(Root
.getOperand(1).isReg() && Root
.getOperand(2).isReg() &&
3649 "FADDWrr does not have register operands");
3650 if (canCombineWithFMUL(MBB
, Root
.getOperand(1), AArch64::FMULSrr
)) {
3651 Patterns
.push_back(MachineCombinerPattern::FMULADDS_OP1
);
3653 } else if (canCombineWithFMUL(MBB
, Root
.getOperand(1),
3654 AArch64::FMULv1i32_indexed
)) {
3655 Patterns
.push_back(MachineCombinerPattern::FMLAv1i32_indexed_OP1
);
3658 if (canCombineWithFMUL(MBB
, Root
.getOperand(2), AArch64::FMULSrr
)) {
3659 Patterns
.push_back(MachineCombinerPattern::FMULADDS_OP2
);
3661 } else if (canCombineWithFMUL(MBB
, Root
.getOperand(2),
3662 AArch64::FMULv1i32_indexed
)) {
3663 Patterns
.push_back(MachineCombinerPattern::FMLAv1i32_indexed_OP2
);
3667 case AArch64::FADDDrr
:
3668 if (canCombineWithFMUL(MBB
, Root
.getOperand(1), AArch64::FMULDrr
)) {
3669 Patterns
.push_back(MachineCombinerPattern::FMULADDD_OP1
);
3671 } else if (canCombineWithFMUL(MBB
, Root
.getOperand(1),
3672 AArch64::FMULv1i64_indexed
)) {
3673 Patterns
.push_back(MachineCombinerPattern::FMLAv1i64_indexed_OP1
);
3676 if (canCombineWithFMUL(MBB
, Root
.getOperand(2), AArch64::FMULDrr
)) {
3677 Patterns
.push_back(MachineCombinerPattern::FMULADDD_OP2
);
3679 } else if (canCombineWithFMUL(MBB
, Root
.getOperand(2),
3680 AArch64::FMULv1i64_indexed
)) {
3681 Patterns
.push_back(MachineCombinerPattern::FMLAv1i64_indexed_OP2
);
3685 case AArch64::FADDv2f32
:
3686 if (canCombineWithFMUL(MBB
, Root
.getOperand(1),
3687 AArch64::FMULv2i32_indexed
)) {
3688 Patterns
.push_back(MachineCombinerPattern::FMLAv2i32_indexed_OP1
);
3690 } else if (canCombineWithFMUL(MBB
, Root
.getOperand(1),
3691 AArch64::FMULv2f32
)) {
3692 Patterns
.push_back(MachineCombinerPattern::FMLAv2f32_OP1
);
3695 if (canCombineWithFMUL(MBB
, Root
.getOperand(2),
3696 AArch64::FMULv2i32_indexed
)) {
3697 Patterns
.push_back(MachineCombinerPattern::FMLAv2i32_indexed_OP2
);
3699 } else if (canCombineWithFMUL(MBB
, Root
.getOperand(2),
3700 AArch64::FMULv2f32
)) {
3701 Patterns
.push_back(MachineCombinerPattern::FMLAv2f32_OP2
);
3705 case AArch64::FADDv2f64
:
3706 if (canCombineWithFMUL(MBB
, Root
.getOperand(1),
3707 AArch64::FMULv2i64_indexed
)) {
3708 Patterns
.push_back(MachineCombinerPattern::FMLAv2i64_indexed_OP1
);
3710 } else if (canCombineWithFMUL(MBB
, Root
.getOperand(1),
3711 AArch64::FMULv2f64
)) {
3712 Patterns
.push_back(MachineCombinerPattern::FMLAv2f64_OP1
);
3715 if (canCombineWithFMUL(MBB
, Root
.getOperand(2),
3716 AArch64::FMULv2i64_indexed
)) {
3717 Patterns
.push_back(MachineCombinerPattern::FMLAv2i64_indexed_OP2
);
3719 } else if (canCombineWithFMUL(MBB
, Root
.getOperand(2),
3720 AArch64::FMULv2f64
)) {
3721 Patterns
.push_back(MachineCombinerPattern::FMLAv2f64_OP2
);
3725 case AArch64::FADDv4f32
:
3726 if (canCombineWithFMUL(MBB
, Root
.getOperand(1),
3727 AArch64::FMULv4i32_indexed
)) {
3728 Patterns
.push_back(MachineCombinerPattern::FMLAv4i32_indexed_OP1
);
3730 } else if (canCombineWithFMUL(MBB
, Root
.getOperand(1),
3731 AArch64::FMULv4f32
)) {
3732 Patterns
.push_back(MachineCombinerPattern::FMLAv4f32_OP1
);
3735 if (canCombineWithFMUL(MBB
, Root
.getOperand(2),
3736 AArch64::FMULv4i32_indexed
)) {
3737 Patterns
.push_back(MachineCombinerPattern::FMLAv4i32_indexed_OP2
);
3739 } else if (canCombineWithFMUL(MBB
, Root
.getOperand(2),
3740 AArch64::FMULv4f32
)) {
3741 Patterns
.push_back(MachineCombinerPattern::FMLAv4f32_OP2
);
3746 case AArch64::FSUBSrr
:
3747 if (canCombineWithFMUL(MBB
, Root
.getOperand(1), AArch64::FMULSrr
)) {
3748 Patterns
.push_back(MachineCombinerPattern::FMULSUBS_OP1
);
3751 if (canCombineWithFMUL(MBB
, Root
.getOperand(2), AArch64::FMULSrr
)) {
3752 Patterns
.push_back(MachineCombinerPattern::FMULSUBS_OP2
);
3754 } else if (canCombineWithFMUL(MBB
, Root
.getOperand(2),
3755 AArch64::FMULv1i32_indexed
)) {
3756 Patterns
.push_back(MachineCombinerPattern::FMLSv1i32_indexed_OP2
);
3759 if (canCombineWithFMUL(MBB
, Root
.getOperand(1), AArch64::FNMULSrr
)) {
3760 Patterns
.push_back(MachineCombinerPattern::FNMULSUBS_OP1
);
3764 case AArch64::FSUBDrr
:
3765 if (canCombineWithFMUL(MBB
, Root
.getOperand(1), AArch64::FMULDrr
)) {
3766 Patterns
.push_back(MachineCombinerPattern::FMULSUBD_OP1
);
3769 if (canCombineWithFMUL(MBB
, Root
.getOperand(2), AArch64::FMULDrr
)) {
3770 Patterns
.push_back(MachineCombinerPattern::FMULSUBD_OP2
);
3772 } else if (canCombineWithFMUL(MBB
, Root
.getOperand(2),
3773 AArch64::FMULv1i64_indexed
)) {
3774 Patterns
.push_back(MachineCombinerPattern::FMLSv1i64_indexed_OP2
);
3777 if (canCombineWithFMUL(MBB
, Root
.getOperand(1), AArch64::FNMULDrr
)) {
3778 Patterns
.push_back(MachineCombinerPattern::FNMULSUBD_OP1
);
3782 case AArch64::FSUBv2f32
:
3783 if (canCombineWithFMUL(MBB
, Root
.getOperand(2),
3784 AArch64::FMULv2i32_indexed
)) {
3785 Patterns
.push_back(MachineCombinerPattern::FMLSv2i32_indexed_OP2
);
3787 } else if (canCombineWithFMUL(MBB
, Root
.getOperand(2),
3788 AArch64::FMULv2f32
)) {
3789 Patterns
.push_back(MachineCombinerPattern::FMLSv2f32_OP2
);
3792 if (canCombineWithFMUL(MBB
, Root
.getOperand(1),
3793 AArch64::FMULv2i32_indexed
)) {
3794 Patterns
.push_back(MachineCombinerPattern::FMLSv2i32_indexed_OP1
);
3796 } else if (canCombineWithFMUL(MBB
, Root
.getOperand(1),
3797 AArch64::FMULv2f32
)) {
3798 Patterns
.push_back(MachineCombinerPattern::FMLSv2f32_OP1
);
3802 case AArch64::FSUBv2f64
:
3803 if (canCombineWithFMUL(MBB
, Root
.getOperand(2),
3804 AArch64::FMULv2i64_indexed
)) {
3805 Patterns
.push_back(MachineCombinerPattern::FMLSv2i64_indexed_OP2
);
3807 } else if (canCombineWithFMUL(MBB
, Root
.getOperand(2),
3808 AArch64::FMULv2f64
)) {
3809 Patterns
.push_back(MachineCombinerPattern::FMLSv2f64_OP2
);
3812 if (canCombineWithFMUL(MBB
, Root
.getOperand(1),
3813 AArch64::FMULv2i64_indexed
)) {
3814 Patterns
.push_back(MachineCombinerPattern::FMLSv2i64_indexed_OP1
);
3816 } else if (canCombineWithFMUL(MBB
, Root
.getOperand(1),
3817 AArch64::FMULv2f64
)) {
3818 Patterns
.push_back(MachineCombinerPattern::FMLSv2f64_OP1
);
3822 case AArch64::FSUBv4f32
:
3823 if (canCombineWithFMUL(MBB
, Root
.getOperand(2),
3824 AArch64::FMULv4i32_indexed
)) {
3825 Patterns
.push_back(MachineCombinerPattern::FMLSv4i32_indexed_OP2
);
3827 } else if (canCombineWithFMUL(MBB
, Root
.getOperand(2),
3828 AArch64::FMULv4f32
)) {
3829 Patterns
.push_back(MachineCombinerPattern::FMLSv4f32_OP2
);
3832 if (canCombineWithFMUL(MBB
, Root
.getOperand(1),
3833 AArch64::FMULv4i32_indexed
)) {
3834 Patterns
.push_back(MachineCombinerPattern::FMLSv4i32_indexed_OP1
);
3836 } else if (canCombineWithFMUL(MBB
, Root
.getOperand(1),
3837 AArch64::FMULv4f32
)) {
3838 Patterns
.push_back(MachineCombinerPattern::FMLSv4f32_OP1
);
3846 /// Return true when a code sequence can improve throughput. It
3847 /// should be called only for instructions in loops.
3848 /// \param Pattern - combiner pattern
3849 bool AArch64InstrInfo::isThroughputPattern(
3850 MachineCombinerPattern Pattern
) const {
3854 case MachineCombinerPattern::FMULADDS_OP1
:
3855 case MachineCombinerPattern::FMULADDS_OP2
:
3856 case MachineCombinerPattern::FMULSUBS_OP1
:
3857 case MachineCombinerPattern::FMULSUBS_OP2
:
3858 case MachineCombinerPattern::FMULADDD_OP1
:
3859 case MachineCombinerPattern::FMULADDD_OP2
:
3860 case MachineCombinerPattern::FMULSUBD_OP1
:
3861 case MachineCombinerPattern::FMULSUBD_OP2
:
3862 case MachineCombinerPattern::FNMULSUBS_OP1
:
3863 case MachineCombinerPattern::FNMULSUBD_OP1
:
3864 case MachineCombinerPattern::FMLAv1i32_indexed_OP1
:
3865 case MachineCombinerPattern::FMLAv1i32_indexed_OP2
:
3866 case MachineCombinerPattern::FMLAv1i64_indexed_OP1
:
3867 case MachineCombinerPattern::FMLAv1i64_indexed_OP2
:
3868 case MachineCombinerPattern::FMLAv2f32_OP2
:
3869 case MachineCombinerPattern::FMLAv2f32_OP1
:
3870 case MachineCombinerPattern::FMLAv2f64_OP1
:
3871 case MachineCombinerPattern::FMLAv2f64_OP2
:
3872 case MachineCombinerPattern::FMLAv2i32_indexed_OP1
:
3873 case MachineCombinerPattern::FMLAv2i32_indexed_OP2
:
3874 case MachineCombinerPattern::FMLAv2i64_indexed_OP1
:
3875 case MachineCombinerPattern::FMLAv2i64_indexed_OP2
:
3876 case MachineCombinerPattern::FMLAv4f32_OP1
:
3877 case MachineCombinerPattern::FMLAv4f32_OP2
:
3878 case MachineCombinerPattern::FMLAv4i32_indexed_OP1
:
3879 case MachineCombinerPattern::FMLAv4i32_indexed_OP2
:
3880 case MachineCombinerPattern::FMLSv1i32_indexed_OP2
:
3881 case MachineCombinerPattern::FMLSv1i64_indexed_OP2
:
3882 case MachineCombinerPattern::FMLSv2i32_indexed_OP2
:
3883 case MachineCombinerPattern::FMLSv2i64_indexed_OP2
:
3884 case MachineCombinerPattern::FMLSv2f32_OP2
:
3885 case MachineCombinerPattern::FMLSv2f64_OP2
:
3886 case MachineCombinerPattern::FMLSv4i32_indexed_OP2
:
3887 case MachineCombinerPattern::FMLSv4f32_OP2
:
3889 } // end switch (Pattern)
3892 /// Return true when there is potentially a faster code sequence for an
3893 /// instruction chain ending in \p Root. All potential patterns are listed in
3894 /// the \p Pattern vector. Pattern should be sorted in priority order since the
3895 /// pattern evaluator stops checking as soon as it finds a faster sequence.
3897 bool AArch64InstrInfo::getMachineCombinerPatterns(
3899 SmallVectorImpl
<MachineCombinerPattern
> &Patterns
) const {
3901 if (getMaddPatterns(Root
, Patterns
))
3903 // Floating point patterns
3904 if (getFMAPatterns(Root
, Patterns
))
3907 return TargetInstrInfo::getMachineCombinerPatterns(Root
, Patterns
);
3910 enum class FMAInstKind
{ Default
, Indexed
, Accumulator
};
3911 /// genFusedMultiply - Generate fused multiply instructions.
3912 /// This function supports both integer and floating point instructions.
3913 /// A typical example:
3916 /// ==> F|MADD R,A,B,C
3917 /// \param MF Containing MachineFunction
3918 /// \param MRI Register information
3919 /// \param TII Target information
3920 /// \param Root is the F|ADD instruction
3921 /// \param [out] InsInstrs is a vector of machine instructions and will
3922 /// contain the generated madd instruction
3923 /// \param IdxMulOpd is index of operand in Root that is the result of
3924 /// the F|MUL. In the example above IdxMulOpd is 1.
3925 /// \param MaddOpc the opcode fo the f|madd instruction
3926 /// \param RC Register class of operands
3927 /// \param kind of fma instruction (addressing mode) to be generated
3928 /// \param ReplacedAddend is the result register from the instruction
3929 /// replacing the non-combined operand, if any.
3930 static MachineInstr
*
3931 genFusedMultiply(MachineFunction
&MF
, MachineRegisterInfo
&MRI
,
3932 const TargetInstrInfo
*TII
, MachineInstr
&Root
,
3933 SmallVectorImpl
<MachineInstr
*> &InsInstrs
, unsigned IdxMulOpd
,
3934 unsigned MaddOpc
, const TargetRegisterClass
*RC
,
3935 FMAInstKind kind
= FMAInstKind::Default
,
3936 const unsigned *ReplacedAddend
= nullptr) {
3937 assert(IdxMulOpd
== 1 || IdxMulOpd
== 2);
3939 unsigned IdxOtherOpd
= IdxMulOpd
== 1 ? 2 : 1;
3940 MachineInstr
*MUL
= MRI
.getUniqueVRegDef(Root
.getOperand(IdxMulOpd
).getReg());
3941 unsigned ResultReg
= Root
.getOperand(0).getReg();
3942 unsigned SrcReg0
= MUL
->getOperand(1).getReg();
3943 bool Src0IsKill
= MUL
->getOperand(1).isKill();
3944 unsigned SrcReg1
= MUL
->getOperand(2).getReg();
3945 bool Src1IsKill
= MUL
->getOperand(2).isKill();
3949 if (ReplacedAddend
) {
3950 // If we just generated a new addend, we must be it's only use.
3951 SrcReg2
= *ReplacedAddend
;
3954 SrcReg2
= Root
.getOperand(IdxOtherOpd
).getReg();
3955 Src2IsKill
= Root
.getOperand(IdxOtherOpd
).isKill();
3958 if (TargetRegisterInfo::isVirtualRegister(ResultReg
))
3959 MRI
.constrainRegClass(ResultReg
, RC
);
3960 if (TargetRegisterInfo::isVirtualRegister(SrcReg0
))
3961 MRI
.constrainRegClass(SrcReg0
, RC
);
3962 if (TargetRegisterInfo::isVirtualRegister(SrcReg1
))
3963 MRI
.constrainRegClass(SrcReg1
, RC
);
3964 if (TargetRegisterInfo::isVirtualRegister(SrcReg2
))
3965 MRI
.constrainRegClass(SrcReg2
, RC
);
3967 MachineInstrBuilder MIB
;
3968 if (kind
== FMAInstKind::Default
)
3969 MIB
= BuildMI(MF
, Root
.getDebugLoc(), TII
->get(MaddOpc
), ResultReg
)
3970 .addReg(SrcReg0
, getKillRegState(Src0IsKill
))
3971 .addReg(SrcReg1
, getKillRegState(Src1IsKill
))
3972 .addReg(SrcReg2
, getKillRegState(Src2IsKill
));
3973 else if (kind
== FMAInstKind::Indexed
)
3974 MIB
= BuildMI(MF
, Root
.getDebugLoc(), TII
->get(MaddOpc
), ResultReg
)
3975 .addReg(SrcReg2
, getKillRegState(Src2IsKill
))
3976 .addReg(SrcReg0
, getKillRegState(Src0IsKill
))
3977 .addReg(SrcReg1
, getKillRegState(Src1IsKill
))
3978 .addImm(MUL
->getOperand(3).getImm());
3979 else if (kind
== FMAInstKind::Accumulator
)
3980 MIB
= BuildMI(MF
, Root
.getDebugLoc(), TII
->get(MaddOpc
), ResultReg
)
3981 .addReg(SrcReg2
, getKillRegState(Src2IsKill
))
3982 .addReg(SrcReg0
, getKillRegState(Src0IsKill
))
3983 .addReg(SrcReg1
, getKillRegState(Src1IsKill
));
3985 assert(false && "Invalid FMA instruction kind \n");
3986 // Insert the MADD (MADD, FMA, FMS, FMLA, FMSL)
3987 InsInstrs
.push_back(MIB
);
3991 /// genMaddR - Generate madd instruction and combine mul and add using
3992 /// an extra virtual register
3993 /// Example - an ADD intermediate needs to be stored in a register:
3996 /// ==> ORR V, ZR, Imm
3997 /// ==> MADD R,A,B,V
3998 /// \param MF Containing MachineFunction
3999 /// \param MRI Register information
4000 /// \param TII Target information
4001 /// \param Root is the ADD instruction
4002 /// \param [out] InsInstrs is a vector of machine instructions and will
4003 /// contain the generated madd instruction
4004 /// \param IdxMulOpd is index of operand in Root that is the result of
4005 /// the MUL. In the example above IdxMulOpd is 1.
4006 /// \param MaddOpc the opcode fo the madd instruction
4007 /// \param VR is a virtual register that holds the value of an ADD operand
4008 /// (V in the example above).
4009 /// \param RC Register class of operands
4010 static MachineInstr
*genMaddR(MachineFunction
&MF
, MachineRegisterInfo
&MRI
,
4011 const TargetInstrInfo
*TII
, MachineInstr
&Root
,
4012 SmallVectorImpl
<MachineInstr
*> &InsInstrs
,
4013 unsigned IdxMulOpd
, unsigned MaddOpc
, unsigned VR
,
4014 const TargetRegisterClass
*RC
) {
4015 assert(IdxMulOpd
== 1 || IdxMulOpd
== 2);
4017 MachineInstr
*MUL
= MRI
.getUniqueVRegDef(Root
.getOperand(IdxMulOpd
).getReg());
4018 unsigned ResultReg
= Root
.getOperand(0).getReg();
4019 unsigned SrcReg0
= MUL
->getOperand(1).getReg();
4020 bool Src0IsKill
= MUL
->getOperand(1).isKill();
4021 unsigned SrcReg1
= MUL
->getOperand(2).getReg();
4022 bool Src1IsKill
= MUL
->getOperand(2).isKill();
4024 if (TargetRegisterInfo::isVirtualRegister(ResultReg
))
4025 MRI
.constrainRegClass(ResultReg
, RC
);
4026 if (TargetRegisterInfo::isVirtualRegister(SrcReg0
))
4027 MRI
.constrainRegClass(SrcReg0
, RC
);
4028 if (TargetRegisterInfo::isVirtualRegister(SrcReg1
))
4029 MRI
.constrainRegClass(SrcReg1
, RC
);
4030 if (TargetRegisterInfo::isVirtualRegister(VR
))
4031 MRI
.constrainRegClass(VR
, RC
);
4033 MachineInstrBuilder MIB
=
4034 BuildMI(MF
, Root
.getDebugLoc(), TII
->get(MaddOpc
), ResultReg
)
4035 .addReg(SrcReg0
, getKillRegState(Src0IsKill
))
4036 .addReg(SrcReg1
, getKillRegState(Src1IsKill
))
4039 InsInstrs
.push_back(MIB
);
4043 /// When getMachineCombinerPatterns() finds potential patterns,
4044 /// this function generates the instructions that could replace the
4045 /// original code sequence
4046 void AArch64InstrInfo::genAlternativeCodeSequence(
4047 MachineInstr
&Root
, MachineCombinerPattern Pattern
,
4048 SmallVectorImpl
<MachineInstr
*> &InsInstrs
,
4049 SmallVectorImpl
<MachineInstr
*> &DelInstrs
,
4050 DenseMap
<unsigned, unsigned> &InstrIdxForVirtReg
) const {
4051 MachineBasicBlock
&MBB
= *Root
.getParent();
4052 MachineRegisterInfo
&MRI
= MBB
.getParent()->getRegInfo();
4053 MachineFunction
&MF
= *MBB
.getParent();
4054 const TargetInstrInfo
*TII
= MF
.getSubtarget().getInstrInfo();
4057 const TargetRegisterClass
*RC
;
4061 // Reassociate instructions.
4062 TargetInstrInfo::genAlternativeCodeSequence(Root
, Pattern
, InsInstrs
,
4063 DelInstrs
, InstrIdxForVirtReg
);
4065 case MachineCombinerPattern::MULADDW_OP1
:
4066 case MachineCombinerPattern::MULADDX_OP1
:
4070 // --- Create(MADD);
4071 if (Pattern
== MachineCombinerPattern::MULADDW_OP1
) {
4072 Opc
= AArch64::MADDWrrr
;
4073 RC
= &AArch64::GPR32RegClass
;
4075 Opc
= AArch64::MADDXrrr
;
4076 RC
= &AArch64::GPR64RegClass
;
4078 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, RC
);
4080 case MachineCombinerPattern::MULADDW_OP2
:
4081 case MachineCombinerPattern::MULADDX_OP2
:
4085 // --- Create(MADD);
4086 if (Pattern
== MachineCombinerPattern::MULADDW_OP2
) {
4087 Opc
= AArch64::MADDWrrr
;
4088 RC
= &AArch64::GPR32RegClass
;
4090 Opc
= AArch64::MADDXrrr
;
4091 RC
= &AArch64::GPR64RegClass
;
4093 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
);
4095 case MachineCombinerPattern::MULADDWI_OP1
:
4096 case MachineCombinerPattern::MULADDXI_OP1
: {
4099 // ==> ORR V, ZR, Imm
4101 // --- Create(MADD);
4102 const TargetRegisterClass
*OrrRC
;
4103 unsigned BitSize
, OrrOpc
, ZeroReg
;
4104 if (Pattern
== MachineCombinerPattern::MULADDWI_OP1
) {
4105 OrrOpc
= AArch64::ORRWri
;
4106 OrrRC
= &AArch64::GPR32spRegClass
;
4108 ZeroReg
= AArch64::WZR
;
4109 Opc
= AArch64::MADDWrrr
;
4110 RC
= &AArch64::GPR32RegClass
;
4112 OrrOpc
= AArch64::ORRXri
;
4113 OrrRC
= &AArch64::GPR64spRegClass
;
4115 ZeroReg
= AArch64::XZR
;
4116 Opc
= AArch64::MADDXrrr
;
4117 RC
= &AArch64::GPR64RegClass
;
4119 unsigned NewVR
= MRI
.createVirtualRegister(OrrRC
);
4120 uint64_t Imm
= Root
.getOperand(2).getImm();
4122 if (Root
.getOperand(3).isImm()) {
4123 unsigned Val
= Root
.getOperand(3).getImm();
4126 uint64_t UImm
= SignExtend64(Imm
, BitSize
);
4128 if (AArch64_AM::processLogicalImmediate(UImm
, BitSize
, Encoding
)) {
4129 MachineInstrBuilder MIB1
=
4130 BuildMI(MF
, Root
.getDebugLoc(), TII
->get(OrrOpc
), NewVR
)
4133 InsInstrs
.push_back(MIB1
);
4134 InstrIdxForVirtReg
.insert(std::make_pair(NewVR
, 0));
4135 MUL
= genMaddR(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, NewVR
, RC
);
4139 case MachineCombinerPattern::MULSUBW_OP1
:
4140 case MachineCombinerPattern::MULSUBX_OP1
: {
4144 // ==> MADD R,A,B,V // = -C + A*B
4145 // --- Create(MADD);
4146 const TargetRegisterClass
*SubRC
;
4147 unsigned SubOpc
, ZeroReg
;
4148 if (Pattern
== MachineCombinerPattern::MULSUBW_OP1
) {
4149 SubOpc
= AArch64::SUBWrr
;
4150 SubRC
= &AArch64::GPR32spRegClass
;
4151 ZeroReg
= AArch64::WZR
;
4152 Opc
= AArch64::MADDWrrr
;
4153 RC
= &AArch64::GPR32RegClass
;
4155 SubOpc
= AArch64::SUBXrr
;
4156 SubRC
= &AArch64::GPR64spRegClass
;
4157 ZeroReg
= AArch64::XZR
;
4158 Opc
= AArch64::MADDXrrr
;
4159 RC
= &AArch64::GPR64RegClass
;
4161 unsigned NewVR
= MRI
.createVirtualRegister(SubRC
);
4163 MachineInstrBuilder MIB1
=
4164 BuildMI(MF
, Root
.getDebugLoc(), TII
->get(SubOpc
), NewVR
)
4166 .add(Root
.getOperand(2));
4167 InsInstrs
.push_back(MIB1
);
4168 InstrIdxForVirtReg
.insert(std::make_pair(NewVR
, 0));
4169 MUL
= genMaddR(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, NewVR
, RC
);
4172 case MachineCombinerPattern::MULSUBW_OP2
:
4173 case MachineCombinerPattern::MULSUBX_OP2
:
4176 // ==> MSUB R,A,B,C (computes C - A*B)
4177 // --- Create(MSUB);
4178 if (Pattern
== MachineCombinerPattern::MULSUBW_OP2
) {
4179 Opc
= AArch64::MSUBWrrr
;
4180 RC
= &AArch64::GPR32RegClass
;
4182 Opc
= AArch64::MSUBXrrr
;
4183 RC
= &AArch64::GPR64RegClass
;
4185 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
);
4187 case MachineCombinerPattern::MULSUBWI_OP1
:
4188 case MachineCombinerPattern::MULSUBXI_OP1
: {
4191 // ==> ORR V, ZR, -Imm
4192 // ==> MADD R,A,B,V // = -Imm + A*B
4193 // --- Create(MADD);
4194 const TargetRegisterClass
*OrrRC
;
4195 unsigned BitSize
, OrrOpc
, ZeroReg
;
4196 if (Pattern
== MachineCombinerPattern::MULSUBWI_OP1
) {
4197 OrrOpc
= AArch64::ORRWri
;
4198 OrrRC
= &AArch64::GPR32spRegClass
;
4200 ZeroReg
= AArch64::WZR
;
4201 Opc
= AArch64::MADDWrrr
;
4202 RC
= &AArch64::GPR32RegClass
;
4204 OrrOpc
= AArch64::ORRXri
;
4205 OrrRC
= &AArch64::GPR64spRegClass
;
4207 ZeroReg
= AArch64::XZR
;
4208 Opc
= AArch64::MADDXrrr
;
4209 RC
= &AArch64::GPR64RegClass
;
4211 unsigned NewVR
= MRI
.createVirtualRegister(OrrRC
);
4212 uint64_t Imm
= Root
.getOperand(2).getImm();
4213 if (Root
.getOperand(3).isImm()) {
4214 unsigned Val
= Root
.getOperand(3).getImm();
4217 uint64_t UImm
= SignExtend64(-Imm
, BitSize
);
4219 if (AArch64_AM::processLogicalImmediate(UImm
, BitSize
, Encoding
)) {
4220 MachineInstrBuilder MIB1
=
4221 BuildMI(MF
, Root
.getDebugLoc(), TII
->get(OrrOpc
), NewVR
)
4224 InsInstrs
.push_back(MIB1
);
4225 InstrIdxForVirtReg
.insert(std::make_pair(NewVR
, 0));
4226 MUL
= genMaddR(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, NewVR
, RC
);
4230 // Floating Point Support
4231 case MachineCombinerPattern::FMULADDS_OP1
:
4232 case MachineCombinerPattern::FMULADDD_OP1
:
4236 // --- Create(MADD);
4237 if (Pattern
== MachineCombinerPattern::FMULADDS_OP1
) {
4238 Opc
= AArch64::FMADDSrrr
;
4239 RC
= &AArch64::FPR32RegClass
;
4241 Opc
= AArch64::FMADDDrrr
;
4242 RC
= &AArch64::FPR64RegClass
;
4244 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, RC
);
4246 case MachineCombinerPattern::FMULADDS_OP2
:
4247 case MachineCombinerPattern::FMULADDD_OP2
:
4250 // ==> FMADD R,A,B,C
4251 // --- Create(FMADD);
4252 if (Pattern
== MachineCombinerPattern::FMULADDS_OP2
) {
4253 Opc
= AArch64::FMADDSrrr
;
4254 RC
= &AArch64::FPR32RegClass
;
4256 Opc
= AArch64::FMADDDrrr
;
4257 RC
= &AArch64::FPR64RegClass
;
4259 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
);
4262 case MachineCombinerPattern::FMLAv1i32_indexed_OP1
:
4263 Opc
= AArch64::FMLAv1i32_indexed
;
4264 RC
= &AArch64::FPR32RegClass
;
4265 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, RC
,
4266 FMAInstKind::Indexed
);
4268 case MachineCombinerPattern::FMLAv1i32_indexed_OP2
:
4269 Opc
= AArch64::FMLAv1i32_indexed
;
4270 RC
= &AArch64::FPR32RegClass
;
4271 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
,
4272 FMAInstKind::Indexed
);
4275 case MachineCombinerPattern::FMLAv1i64_indexed_OP1
:
4276 Opc
= AArch64::FMLAv1i64_indexed
;
4277 RC
= &AArch64::FPR64RegClass
;
4278 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, RC
,
4279 FMAInstKind::Indexed
);
4281 case MachineCombinerPattern::FMLAv1i64_indexed_OP2
:
4282 Opc
= AArch64::FMLAv1i64_indexed
;
4283 RC
= &AArch64::FPR64RegClass
;
4284 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
,
4285 FMAInstKind::Indexed
);
4288 case MachineCombinerPattern::FMLAv2i32_indexed_OP1
:
4289 case MachineCombinerPattern::FMLAv2f32_OP1
:
4290 RC
= &AArch64::FPR64RegClass
;
4291 if (Pattern
== MachineCombinerPattern::FMLAv2i32_indexed_OP1
) {
4292 Opc
= AArch64::FMLAv2i32_indexed
;
4293 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, RC
,
4294 FMAInstKind::Indexed
);
4296 Opc
= AArch64::FMLAv2f32
;
4297 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, RC
,
4298 FMAInstKind::Accumulator
);
4301 case MachineCombinerPattern::FMLAv2i32_indexed_OP2
:
4302 case MachineCombinerPattern::FMLAv2f32_OP2
:
4303 RC
= &AArch64::FPR64RegClass
;
4304 if (Pattern
== MachineCombinerPattern::FMLAv2i32_indexed_OP2
) {
4305 Opc
= AArch64::FMLAv2i32_indexed
;
4306 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
,
4307 FMAInstKind::Indexed
);
4309 Opc
= AArch64::FMLAv2f32
;
4310 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
,
4311 FMAInstKind::Accumulator
);
4315 case MachineCombinerPattern::FMLAv2i64_indexed_OP1
:
4316 case MachineCombinerPattern::FMLAv2f64_OP1
:
4317 RC
= &AArch64::FPR128RegClass
;
4318 if (Pattern
== MachineCombinerPattern::FMLAv2i64_indexed_OP1
) {
4319 Opc
= AArch64::FMLAv2i64_indexed
;
4320 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, RC
,
4321 FMAInstKind::Indexed
);
4323 Opc
= AArch64::FMLAv2f64
;
4324 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, RC
,
4325 FMAInstKind::Accumulator
);
4328 case MachineCombinerPattern::FMLAv2i64_indexed_OP2
:
4329 case MachineCombinerPattern::FMLAv2f64_OP2
:
4330 RC
= &AArch64::FPR128RegClass
;
4331 if (Pattern
== MachineCombinerPattern::FMLAv2i64_indexed_OP2
) {
4332 Opc
= AArch64::FMLAv2i64_indexed
;
4333 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
,
4334 FMAInstKind::Indexed
);
4336 Opc
= AArch64::FMLAv2f64
;
4337 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
,
4338 FMAInstKind::Accumulator
);
4342 case MachineCombinerPattern::FMLAv4i32_indexed_OP1
:
4343 case MachineCombinerPattern::FMLAv4f32_OP1
:
4344 RC
= &AArch64::FPR128RegClass
;
4345 if (Pattern
== MachineCombinerPattern::FMLAv4i32_indexed_OP1
) {
4346 Opc
= AArch64::FMLAv4i32_indexed
;
4347 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, RC
,
4348 FMAInstKind::Indexed
);
4350 Opc
= AArch64::FMLAv4f32
;
4351 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, RC
,
4352 FMAInstKind::Accumulator
);
4356 case MachineCombinerPattern::FMLAv4i32_indexed_OP2
:
4357 case MachineCombinerPattern::FMLAv4f32_OP2
:
4358 RC
= &AArch64::FPR128RegClass
;
4359 if (Pattern
== MachineCombinerPattern::FMLAv4i32_indexed_OP2
) {
4360 Opc
= AArch64::FMLAv4i32_indexed
;
4361 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
,
4362 FMAInstKind::Indexed
);
4364 Opc
= AArch64::FMLAv4f32
;
4365 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
,
4366 FMAInstKind::Accumulator
);
4370 case MachineCombinerPattern::FMULSUBS_OP1
:
4371 case MachineCombinerPattern::FMULSUBD_OP1
: {
4374 // ==> FNMSUB R,A,B,C // = -C + A*B
4375 // --- Create(FNMSUB);
4376 if (Pattern
== MachineCombinerPattern::FMULSUBS_OP1
) {
4377 Opc
= AArch64::FNMSUBSrrr
;
4378 RC
= &AArch64::FPR32RegClass
;
4380 Opc
= AArch64::FNMSUBDrrr
;
4381 RC
= &AArch64::FPR64RegClass
;
4383 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, RC
);
4387 case MachineCombinerPattern::FNMULSUBS_OP1
:
4388 case MachineCombinerPattern::FNMULSUBD_OP1
: {
4391 // ==> FNMADD R,A,B,C // = -A*B - C
4392 // --- Create(FNMADD);
4393 if (Pattern
== MachineCombinerPattern::FNMULSUBS_OP1
) {
4394 Opc
= AArch64::FNMADDSrrr
;
4395 RC
= &AArch64::FPR32RegClass
;
4397 Opc
= AArch64::FNMADDDrrr
;
4398 RC
= &AArch64::FPR64RegClass
;
4400 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, RC
);
4404 case MachineCombinerPattern::FMULSUBS_OP2
:
4405 case MachineCombinerPattern::FMULSUBD_OP2
: {
4408 // ==> FMSUB R,A,B,C (computes C - A*B)
4409 // --- Create(FMSUB);
4410 if (Pattern
== MachineCombinerPattern::FMULSUBS_OP2
) {
4411 Opc
= AArch64::FMSUBSrrr
;
4412 RC
= &AArch64::FPR32RegClass
;
4414 Opc
= AArch64::FMSUBDrrr
;
4415 RC
= &AArch64::FPR64RegClass
;
4417 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
);
4421 case MachineCombinerPattern::FMLSv1i32_indexed_OP2
:
4422 Opc
= AArch64::FMLSv1i32_indexed
;
4423 RC
= &AArch64::FPR32RegClass
;
4424 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
,
4425 FMAInstKind::Indexed
);
4428 case MachineCombinerPattern::FMLSv1i64_indexed_OP2
:
4429 Opc
= AArch64::FMLSv1i64_indexed
;
4430 RC
= &AArch64::FPR64RegClass
;
4431 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
,
4432 FMAInstKind::Indexed
);
4435 case MachineCombinerPattern::FMLSv2f32_OP2
:
4436 case MachineCombinerPattern::FMLSv2i32_indexed_OP2
:
4437 RC
= &AArch64::FPR64RegClass
;
4438 if (Pattern
== MachineCombinerPattern::FMLSv2i32_indexed_OP2
) {
4439 Opc
= AArch64::FMLSv2i32_indexed
;
4440 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
,
4441 FMAInstKind::Indexed
);
4443 Opc
= AArch64::FMLSv2f32
;
4444 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
,
4445 FMAInstKind::Accumulator
);
4449 case MachineCombinerPattern::FMLSv2f64_OP2
:
4450 case MachineCombinerPattern::FMLSv2i64_indexed_OP2
:
4451 RC
= &AArch64::FPR128RegClass
;
4452 if (Pattern
== MachineCombinerPattern::FMLSv2i64_indexed_OP2
) {
4453 Opc
= AArch64::FMLSv2i64_indexed
;
4454 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
,
4455 FMAInstKind::Indexed
);
4457 Opc
= AArch64::FMLSv2f64
;
4458 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
,
4459 FMAInstKind::Accumulator
);
4463 case MachineCombinerPattern::FMLSv4f32_OP2
:
4464 case MachineCombinerPattern::FMLSv4i32_indexed_OP2
:
4465 RC
= &AArch64::FPR128RegClass
;
4466 if (Pattern
== MachineCombinerPattern::FMLSv4i32_indexed_OP2
) {
4467 Opc
= AArch64::FMLSv4i32_indexed
;
4468 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
,
4469 FMAInstKind::Indexed
);
4471 Opc
= AArch64::FMLSv4f32
;
4472 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 2, Opc
, RC
,
4473 FMAInstKind::Accumulator
);
4476 case MachineCombinerPattern::FMLSv2f32_OP1
:
4477 case MachineCombinerPattern::FMLSv2i32_indexed_OP1
: {
4478 RC
= &AArch64::FPR64RegClass
;
4479 unsigned NewVR
= MRI
.createVirtualRegister(RC
);
4480 MachineInstrBuilder MIB1
=
4481 BuildMI(MF
, Root
.getDebugLoc(), TII
->get(AArch64::FNEGv2f32
), NewVR
)
4482 .add(Root
.getOperand(2));
4483 InsInstrs
.push_back(MIB1
);
4484 InstrIdxForVirtReg
.insert(std::make_pair(NewVR
, 0));
4485 if (Pattern
== MachineCombinerPattern::FMLSv2i32_indexed_OP1
) {
4486 Opc
= AArch64::FMLAv2i32_indexed
;
4487 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, RC
,
4488 FMAInstKind::Indexed
, &NewVR
);
4490 Opc
= AArch64::FMLAv2f32
;
4491 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, RC
,
4492 FMAInstKind::Accumulator
, &NewVR
);
4496 case MachineCombinerPattern::FMLSv4f32_OP1
:
4497 case MachineCombinerPattern::FMLSv4i32_indexed_OP1
: {
4498 RC
= &AArch64::FPR128RegClass
;
4499 unsigned NewVR
= MRI
.createVirtualRegister(RC
);
4500 MachineInstrBuilder MIB1
=
4501 BuildMI(MF
, Root
.getDebugLoc(), TII
->get(AArch64::FNEGv4f32
), NewVR
)
4502 .add(Root
.getOperand(2));
4503 InsInstrs
.push_back(MIB1
);
4504 InstrIdxForVirtReg
.insert(std::make_pair(NewVR
, 0));
4505 if (Pattern
== MachineCombinerPattern::FMLSv4i32_indexed_OP1
) {
4506 Opc
= AArch64::FMLAv4i32_indexed
;
4507 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, RC
,
4508 FMAInstKind::Indexed
, &NewVR
);
4510 Opc
= AArch64::FMLAv4f32
;
4511 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, RC
,
4512 FMAInstKind::Accumulator
, &NewVR
);
4516 case MachineCombinerPattern::FMLSv2f64_OP1
:
4517 case MachineCombinerPattern::FMLSv2i64_indexed_OP1
: {
4518 RC
= &AArch64::FPR128RegClass
;
4519 unsigned NewVR
= MRI
.createVirtualRegister(RC
);
4520 MachineInstrBuilder MIB1
=
4521 BuildMI(MF
, Root
.getDebugLoc(), TII
->get(AArch64::FNEGv2f64
), NewVR
)
4522 .add(Root
.getOperand(2));
4523 InsInstrs
.push_back(MIB1
);
4524 InstrIdxForVirtReg
.insert(std::make_pair(NewVR
, 0));
4525 if (Pattern
== MachineCombinerPattern::FMLSv2i64_indexed_OP1
) {
4526 Opc
= AArch64::FMLAv2i64_indexed
;
4527 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, RC
,
4528 FMAInstKind::Indexed
, &NewVR
);
4530 Opc
= AArch64::FMLAv2f64
;
4531 MUL
= genFusedMultiply(MF
, MRI
, TII
, Root
, InsInstrs
, 1, Opc
, RC
,
4532 FMAInstKind::Accumulator
, &NewVR
);
4536 } // end switch (Pattern)
4537 // Record MUL and ADD/SUB for deletion
4538 DelInstrs
.push_back(MUL
);
4539 DelInstrs
.push_back(&Root
);
4542 /// Replace csincr-branch sequence by simple conditional branch
4546 /// csinc w9, wzr, wzr, <condition code>
4547 /// tbnz w9, #0, 0x44
4551 /// b.<inverted condition code>
4555 /// csinc w9, wzr, wzr, <condition code>
4556 /// tbz w9, #0, 0x44
4560 /// b.<condition code>
4563 /// Replace compare and branch sequence by TBZ/TBNZ instruction when the
4564 /// compare's constant operand is power of 2.
4568 /// and w8, w8, #0x400
4573 /// tbnz w8, #10, L1
4576 /// \param MI Conditional Branch
4577 /// \return True when the simple conditional branch is generated
4579 bool AArch64InstrInfo::optimizeCondBranch(MachineInstr
&MI
) const {
4580 bool IsNegativeBranch
= false;
4581 bool IsTestAndBranch
= false;
4582 unsigned TargetBBInMI
= 0;
4583 switch (MI
.getOpcode()) {
4585 llvm_unreachable("Unknown branch instruction?");
4592 case AArch64::CBNZW
:
4593 case AArch64::CBNZX
:
4595 IsNegativeBranch
= true;
4600 IsTestAndBranch
= true;
4602 case AArch64::TBNZW
:
4603 case AArch64::TBNZX
:
4605 IsNegativeBranch
= true;
4606 IsTestAndBranch
= true;
4609 // So we increment a zero register and test for bits other
4610 // than bit 0? Conservatively bail out in case the verifier
4611 // missed this case.
4612 if (IsTestAndBranch
&& MI
.getOperand(1).getImm())
4616 assert(MI
.getParent() && "Incomplete machine instruciton\n");
4617 MachineBasicBlock
*MBB
= MI
.getParent();
4618 MachineFunction
*MF
= MBB
->getParent();
4619 MachineRegisterInfo
*MRI
= &MF
->getRegInfo();
4620 unsigned VReg
= MI
.getOperand(0).getReg();
4621 if (!TargetRegisterInfo::isVirtualRegister(VReg
))
4624 MachineInstr
*DefMI
= MRI
->getVRegDef(VReg
);
4626 // Look through COPY instructions to find definition.
4627 while (DefMI
->isCopy()) {
4628 unsigned CopyVReg
= DefMI
->getOperand(1).getReg();
4629 if (!MRI
->hasOneNonDBGUse(CopyVReg
))
4631 if (!MRI
->hasOneDef(CopyVReg
))
4633 DefMI
= MRI
->getVRegDef(CopyVReg
);
4636 switch (DefMI
->getOpcode()) {
4639 // Fold AND into a TBZ/TBNZ if constant operand is power of 2.
4640 case AArch64::ANDWri
:
4641 case AArch64::ANDXri
: {
4642 if (IsTestAndBranch
)
4644 if (DefMI
->getParent() != MBB
)
4646 if (!MRI
->hasOneNonDBGUse(VReg
))
4649 bool Is32Bit
= (DefMI
->getOpcode() == AArch64::ANDWri
);
4650 uint64_t Mask
= AArch64_AM::decodeLogicalImmediate(
4651 DefMI
->getOperand(2).getImm(), Is32Bit
? 32 : 64);
4652 if (!isPowerOf2_64(Mask
))
4655 MachineOperand
&MO
= DefMI
->getOperand(1);
4656 unsigned NewReg
= MO
.getReg();
4657 if (!TargetRegisterInfo::isVirtualRegister(NewReg
))
4660 assert(!MRI
->def_empty(NewReg
) && "Register must be defined.");
4662 MachineBasicBlock
&RefToMBB
= *MBB
;
4663 MachineBasicBlock
*TBB
= MI
.getOperand(1).getMBB();
4664 DebugLoc DL
= MI
.getDebugLoc();
4665 unsigned Imm
= Log2_64(Mask
);
4666 unsigned Opc
= (Imm
< 32)
4667 ? (IsNegativeBranch
? AArch64::TBNZW
: AArch64::TBZW
)
4668 : (IsNegativeBranch
? AArch64::TBNZX
: AArch64::TBZX
);
4669 MachineInstr
*NewMI
= BuildMI(RefToMBB
, MI
, DL
, get(Opc
))
4673 // Register lives on to the CBZ now.
4674 MO
.setIsKill(false);
4676 // For immediate smaller than 32, we need to use the 32-bit
4677 // variant (W) in all cases. Indeed the 64-bit variant does not
4678 // allow to encode them.
4679 // Therefore, if the input register is 64-bit, we need to take the
4681 if (!Is32Bit
&& Imm
< 32)
4682 NewMI
->getOperand(0).setSubReg(AArch64::sub_32
);
4683 MI
.eraseFromParent();
4687 case AArch64::CSINCWr
:
4688 case AArch64::CSINCXr
: {
4689 if (!(DefMI
->getOperand(1).getReg() == AArch64::WZR
&&
4690 DefMI
->getOperand(2).getReg() == AArch64::WZR
) &&
4691 !(DefMI
->getOperand(1).getReg() == AArch64::XZR
&&
4692 DefMI
->getOperand(2).getReg() == AArch64::XZR
))
4695 if (DefMI
->findRegisterDefOperandIdx(AArch64::NZCV
, true) != -1)
4698 AArch64CC::CondCode CC
= (AArch64CC::CondCode
)DefMI
->getOperand(3).getImm();
4699 // Convert only when the condition code is not modified between
4700 // the CSINC and the branch. The CC may be used by other
4701 // instructions in between.
4702 if (areCFlagsAccessedBetweenInstrs(DefMI
, MI
, &getRegisterInfo(), AK_Write
))
4704 MachineBasicBlock
&RefToMBB
= *MBB
;
4705 MachineBasicBlock
*TBB
= MI
.getOperand(TargetBBInMI
).getMBB();
4706 DebugLoc DL
= MI
.getDebugLoc();
4707 if (IsNegativeBranch
)
4708 CC
= AArch64CC::getInvertedCondCode(CC
);
4709 BuildMI(RefToMBB
, MI
, DL
, get(AArch64::Bcc
)).addImm(CC
).addMBB(TBB
);
4710 MI
.eraseFromParent();
4716 std::pair
<unsigned, unsigned>
4717 AArch64InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF
) const {
4718 const unsigned Mask
= AArch64II::MO_FRAGMENT
;
4719 return std::make_pair(TF
& Mask
, TF
& ~Mask
);
4722 ArrayRef
<std::pair
<unsigned, const char *>>
4723 AArch64InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
4724 using namespace AArch64II
;
4726 static const std::pair
<unsigned, const char *> TargetFlags
[] = {
4727 {MO_PAGE
, "aarch64-page"}, {MO_PAGEOFF
, "aarch64-pageoff"},
4728 {MO_G3
, "aarch64-g3"}, {MO_G2
, "aarch64-g2"},
4729 {MO_G1
, "aarch64-g1"}, {MO_G0
, "aarch64-g0"},
4730 {MO_HI12
, "aarch64-hi12"}};
4731 return makeArrayRef(TargetFlags
);
4734 ArrayRef
<std::pair
<unsigned, const char *>>
4735 AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
4736 using namespace AArch64II
;
4738 static const std::pair
<unsigned, const char *> TargetFlags
[] = {
4739 {MO_COFFSTUB
, "aarch64-coffstub"},
4740 {MO_GOT
, "aarch64-got"}, {MO_NC
, "aarch64-nc"},
4741 {MO_S
, "aarch64-s"}, {MO_TLS
, "aarch64-tls"},
4742 {MO_DLLIMPORT
, "aarch64-dllimport"}};
4743 return makeArrayRef(TargetFlags
);
4746 ArrayRef
<std::pair
<MachineMemOperand::Flags
, const char *>>
4747 AArch64InstrInfo::getSerializableMachineMemOperandTargetFlags() const {
4748 static const std::pair
<MachineMemOperand::Flags
, const char *> TargetFlags
[] =
4749 {{MOSuppressPair
, "aarch64-suppress-pair"},
4750 {MOStridedAccess
, "aarch64-strided-access"}};
4751 return makeArrayRef(TargetFlags
);
4754 /// Constants defining how certain sequences should be outlined.
4755 /// This encompasses how an outlined function should be called, and what kind of
4756 /// frame should be emitted for that outlined function.
4758 /// \p MachineOutlinerDefault implies that the function should be called with
4759 /// a save and restore of LR to the stack.
4763 /// I1 Save LR OUTLINED_FUNCTION:
4764 /// I2 --> BL OUTLINED_FUNCTION I1
4765 /// I3 Restore LR I2
4769 /// * Call construction overhead: 3 (save + BL + restore)
4770 /// * Frame construction overhead: 1 (ret)
4771 /// * Requires stack fixups? Yes
4773 /// \p MachineOutlinerTailCall implies that the function is being created from
4774 /// a sequence of instructions ending in a return.
4778 /// I1 OUTLINED_FUNCTION:
4779 /// I2 --> B OUTLINED_FUNCTION I1
4783 /// * Call construction overhead: 1 (B)
4784 /// * Frame construction overhead: 0 (Return included in sequence)
4785 /// * Requires stack fixups? No
4787 /// \p MachineOutlinerNoLRSave implies that the function should be called using
4788 /// a BL instruction, but doesn't require LR to be saved and restored. This
4789 /// happens when LR is known to be dead.
4793 /// I1 OUTLINED_FUNCTION:
4794 /// I2 --> BL OUTLINED_FUNCTION I1
4799 /// * Call construction overhead: 1 (BL)
4800 /// * Frame construction overhead: 1 (RET)
4801 /// * Requires stack fixups? No
4803 /// \p MachineOutlinerThunk implies that the function is being created from
4804 /// a sequence of instructions ending in a call. The outlined function is
4805 /// called with a BL instruction, and the outlined function tail-calls the
4806 /// original call destination.
4810 /// I1 OUTLINED_FUNCTION:
4811 /// I2 --> BL OUTLINED_FUNCTION I1
4814 /// * Call construction overhead: 1 (BL)
4815 /// * Frame construction overhead: 0
4816 /// * Requires stack fixups? No
4818 /// \p MachineOutlinerRegSave implies that the function should be called with a
4819 /// save and restore of LR to an available register. This allows us to avoid
4820 /// stack fixups. Note that this outlining variant is compatible with the
4825 /// I1 Save LR OUTLINED_FUNCTION:
4826 /// I2 --> BL OUTLINED_FUNCTION I1
4827 /// I3 Restore LR I2
4831 /// * Call construction overhead: 3 (save + BL + restore)
4832 /// * Frame construction overhead: 1 (ret)
4833 /// * Requires stack fixups? No
4834 enum MachineOutlinerClass
{
4835 MachineOutlinerDefault
, /// Emit a save, restore, call, and return.
4836 MachineOutlinerTailCall
, /// Only emit a branch.
4837 MachineOutlinerNoLRSave
, /// Emit a call and return.
4838 MachineOutlinerThunk
, /// Emit a call and tail-call.
4839 MachineOutlinerRegSave
/// Same as default, but save to a register.
4842 enum MachineOutlinerMBBFlags
{
4843 LRUnavailableSomewhere
= 0x2,
4845 UnsafeRegsDead
= 0x8
4849 AArch64InstrInfo::findRegisterToSaveLRTo(const outliner::Candidate
&C
) const {
4850 assert(C
.LRUWasSet
&& "LRU wasn't set?");
4851 MachineFunction
*MF
= C
.getMF();
4852 const AArch64RegisterInfo
*ARI
= static_cast<const AArch64RegisterInfo
*>(
4853 MF
->getSubtarget().getRegisterInfo());
4855 // Check if there is an available register across the sequence that we can
4857 for (unsigned Reg
: AArch64::GPR64RegClass
) {
4858 if (!ARI
->isReservedReg(*MF
, Reg
) &&
4859 Reg
!= AArch64::LR
&& // LR is not reserved, but don't use it.
4860 Reg
!= AArch64::X16
&& // X16 is not guaranteed to be preserved.
4861 Reg
!= AArch64::X17
&& // Ditto for X17.
4862 C
.LRU
.available(Reg
) && C
.UsedInSequence
.available(Reg
))
4866 // No suitable register. Return 0.
4870 outliner::OutlinedFunction
4871 AArch64InstrInfo::getOutliningCandidateInfo(
4872 std::vector
<outliner::Candidate
> &RepeatedSequenceLocs
) const {
4873 outliner::Candidate
&FirstCand
= RepeatedSequenceLocs
[0];
4874 unsigned SequenceSize
=
4875 std::accumulate(FirstCand
.front(), std::next(FirstCand
.back()), 0,
4876 [this](unsigned Sum
, const MachineInstr
&MI
) {
4877 return Sum
+ getInstSizeInBytes(MI
);
4880 // Properties about candidate MBBs that hold for all of them.
4881 unsigned FlagsSetInAll
= 0xF;
4883 // Compute liveness information for each candidate, and set FlagsSetInAll.
4884 const TargetRegisterInfo
&TRI
= getRegisterInfo();
4885 std::for_each(RepeatedSequenceLocs
.begin(), RepeatedSequenceLocs
.end(),
4886 [&FlagsSetInAll
](outliner::Candidate
&C
) {
4887 FlagsSetInAll
&= C
.Flags
;
4890 // According to the AArch64 Procedure Call Standard, the following are
4891 // undefined on entry/exit from a function call:
4893 // * Registers x16, x17, (and thus w16, w17)
4894 // * Condition codes (and thus the NZCV register)
4896 // Because if this, we can't outline any sequence of instructions where
4898 // of these registers is live into/across it. Thus, we need to delete
4901 auto CantGuaranteeValueAcrossCall
= [&TRI
](outliner::Candidate
&C
) {
4902 // If the unsafe registers in this block are all dead, then we don't need
4903 // to compute liveness here.
4904 if (C
.Flags
& UnsafeRegsDead
)
4907 LiveRegUnits LRU
= C
.LRU
;
4908 return (!LRU
.available(AArch64::W16
) || !LRU
.available(AArch64::W17
) ||
4909 !LRU
.available(AArch64::NZCV
));
4912 // Are there any candidates where those registers are live?
4913 if (!(FlagsSetInAll
& UnsafeRegsDead
)) {
4914 // Erase every candidate that violates the restrictions above. (It could be
4915 // true that we have viable candidates, so it's not worth bailing out in
4916 // the case that, say, 1 out of 20 candidates violate the restructions.)
4917 RepeatedSequenceLocs
.erase(std::remove_if(RepeatedSequenceLocs
.begin(),
4918 RepeatedSequenceLocs
.end(),
4919 CantGuaranteeValueAcrossCall
),
4920 RepeatedSequenceLocs
.end());
4922 // If the sequence doesn't have enough candidates left, then we're done.
4923 if (RepeatedSequenceLocs
.size() < 2)
4924 return outliner::OutlinedFunction();
4927 // At this point, we have only "safe" candidates to outline. Figure out
4928 // frame + call instruction information.
4930 unsigned LastInstrOpcode
= RepeatedSequenceLocs
[0].back()->getOpcode();
4932 // Helper lambda which sets call information for every candidate.
4933 auto SetCandidateCallInfo
=
4934 [&RepeatedSequenceLocs
](unsigned CallID
, unsigned NumBytesForCall
) {
4935 for (outliner::Candidate
&C
: RepeatedSequenceLocs
)
4936 C
.setCallInfo(CallID
, NumBytesForCall
);
4939 unsigned FrameID
= MachineOutlinerDefault
;
4940 unsigned NumBytesToCreateFrame
= 4;
4942 bool HasBTI
= any_of(RepeatedSequenceLocs
, [](outliner::Candidate
&C
) {
4943 return C
.getMF()->getFunction().hasFnAttribute("branch-target-enforcement");
4946 // Returns true if an instructions is safe to fix up, false otherwise.
4947 auto IsSafeToFixup
= [this, &TRI
](MachineInstr
&MI
) {
4951 if (!MI
.modifiesRegister(AArch64::SP
, &TRI
) &&
4952 !MI
.readsRegister(AArch64::SP
, &TRI
))
4955 // Any modification of SP will break our code to save/restore LR.
4956 // FIXME: We could handle some instructions which add a constant
4957 // offset to SP, with a bit more work.
4958 if (MI
.modifiesRegister(AArch64::SP
, &TRI
))
4961 // At this point, we have a stack instruction that we might need to
4962 // fix up. We'll handle it if it's a load or store.
4963 if (MI
.mayLoadOrStore()) {
4964 const MachineOperand
*Base
; // Filled with the base operand of MI.
4965 int64_t Offset
; // Filled with the offset of MI.
4967 // Does it allow us to offset the base operand and is the base the
4969 if (!getMemOperandWithOffset(MI
, Base
, Offset
, &TRI
) || !Base
->isReg() ||
4970 Base
->getReg() != AArch64::SP
)
4973 // Find the minimum/maximum offset for this instruction and check
4974 // if fixing it up would be in range.
4976 MaxOffset
; // Unscaled offsets for the instruction.
4977 unsigned Scale
; // The scale to multiply the offsets by.
4978 unsigned DummyWidth
;
4979 getMemOpInfo(MI
.getOpcode(), Scale
, DummyWidth
, MinOffset
, MaxOffset
);
4981 Offset
+= 16; // Update the offset to what it would be if we outlined.
4982 if (Offset
< MinOffset
* Scale
|| Offset
> MaxOffset
* Scale
)
4985 // It's in range, so we can outline it.
4989 // FIXME: Add handling for instructions like "add x0, sp, #8".
4991 // We can't fix it up, so don't outline it.
4995 // True if it's possible to fix up each stack instruction in this sequence.
4996 // Important for frames/call variants that modify the stack.
4997 bool AllStackInstrsSafe
= std::all_of(
4998 FirstCand
.front(), std::next(FirstCand
.back()), IsSafeToFixup
);
5000 // If the last instruction in any candidate is a terminator, then we should
5001 // tail call all of the candidates.
5002 if (RepeatedSequenceLocs
[0].back()->isTerminator()) {
5003 FrameID
= MachineOutlinerTailCall
;
5004 NumBytesToCreateFrame
= 0;
5005 SetCandidateCallInfo(MachineOutlinerTailCall
, 4);
5008 else if (LastInstrOpcode
== AArch64::BL
||
5009 (LastInstrOpcode
== AArch64::BLR
&& !HasBTI
)) {
5010 // FIXME: Do we need to check if the code after this uses the value of LR?
5011 FrameID
= MachineOutlinerThunk
;
5012 NumBytesToCreateFrame
= 0;
5013 SetCandidateCallInfo(MachineOutlinerThunk
, 4);
5017 // We need to decide how to emit calls + frames. We can always emit the same
5018 // frame if we don't need to save to the stack. If we have to save to the
5019 // stack, then we need a different frame.
5020 unsigned NumBytesNoStackCalls
= 0;
5021 std::vector
<outliner::Candidate
> CandidatesWithoutStackFixups
;
5023 for (outliner::Candidate
&C
: RepeatedSequenceLocs
) {
5026 // Is LR available? If so, we don't need a save.
5027 if (C
.LRU
.available(AArch64::LR
)) {
5028 NumBytesNoStackCalls
+= 4;
5029 C
.setCallInfo(MachineOutlinerNoLRSave
, 4);
5030 CandidatesWithoutStackFixups
.push_back(C
);
5033 // Is an unused register available? If so, we won't modify the stack, so
5034 // we can outline with the same frame type as those that don't save LR.
5035 else if (findRegisterToSaveLRTo(C
)) {
5036 NumBytesNoStackCalls
+= 12;
5037 C
.setCallInfo(MachineOutlinerRegSave
, 12);
5038 CandidatesWithoutStackFixups
.push_back(C
);
5041 // Is SP used in the sequence at all? If not, we don't have to modify
5042 // the stack, so we are guaranteed to get the same frame.
5043 else if (C
.UsedInSequence
.available(AArch64::SP
)) {
5044 NumBytesNoStackCalls
+= 12;
5045 C
.setCallInfo(MachineOutlinerDefault
, 12);
5046 CandidatesWithoutStackFixups
.push_back(C
);
5049 // If we outline this, we need to modify the stack. Pretend we don't
5050 // outline this by saving all of its bytes.
5052 NumBytesNoStackCalls
+= SequenceSize
;
5056 // If there are no places where we have to save LR, then note that we
5057 // don't have to update the stack. Otherwise, give every candidate the
5058 // default call type, as long as it's safe to do so.
5059 if (!AllStackInstrsSafe
||
5060 NumBytesNoStackCalls
<= RepeatedSequenceLocs
.size() * 12) {
5061 RepeatedSequenceLocs
= CandidatesWithoutStackFixups
;
5062 FrameID
= MachineOutlinerNoLRSave
;
5064 SetCandidateCallInfo(MachineOutlinerDefault
, 12);
5067 // If we dropped all of the candidates, bail out here.
5068 if (RepeatedSequenceLocs
.size() < 2) {
5069 RepeatedSequenceLocs
.clear();
5070 return outliner::OutlinedFunction();
5074 // Does every candidate's MBB contain a call? If so, then we might have a call
5076 if (FlagsSetInAll
& MachineOutlinerMBBFlags::HasCalls
) {
5077 // Check if the range contains a call. These require a save + restore of the
5079 bool ModStackToSaveLR
= false;
5080 if (std::any_of(FirstCand
.front(), FirstCand
.back(),
5081 [](const MachineInstr
&MI
) { return MI
.isCall(); }))
5082 ModStackToSaveLR
= true;
5084 // Handle the last instruction separately. If this is a tail call, then the
5085 // last instruction is a call. We don't want to save + restore in this case.
5086 // However, it could be possible that the last instruction is a call without
5087 // it being valid to tail call this sequence. We should consider this as
5089 else if (FrameID
!= MachineOutlinerThunk
&&
5090 FrameID
!= MachineOutlinerTailCall
&& FirstCand
.back()->isCall())
5091 ModStackToSaveLR
= true;
5093 if (ModStackToSaveLR
) {
5094 // We can't fix up the stack. Bail out.
5095 if (!AllStackInstrsSafe
) {
5096 RepeatedSequenceLocs
.clear();
5097 return outliner::OutlinedFunction();
5100 // Save + restore LR.
5101 NumBytesToCreateFrame
+= 8;
5105 return outliner::OutlinedFunction(RepeatedSequenceLocs
, SequenceSize
,
5106 NumBytesToCreateFrame
, FrameID
);
5109 bool AArch64InstrInfo::isFunctionSafeToOutlineFrom(
5110 MachineFunction
&MF
, bool OutlineFromLinkOnceODRs
) const {
5111 const Function
&F
= MF
.getFunction();
5113 // Can F be deduplicated by the linker? If it can, don't outline from it.
5114 if (!OutlineFromLinkOnceODRs
&& F
.hasLinkOnceODRLinkage())
5117 // Don't outline from functions with section markings; the program could
5118 // expect that all the code is in the named section.
5119 // FIXME: Allow outlining from multiple functions with the same section
5124 // Outlining from functions with redzones is unsafe since the outliner may
5125 // modify the stack. Check if hasRedZone is true or unknown; if yes, don't
5127 AArch64FunctionInfo
*AFI
= MF
.getInfo
<AArch64FunctionInfo
>();
5128 if (!AFI
|| AFI
->hasRedZone().getValueOr(true))
5131 // It's safe to outline from MF.
5135 bool AArch64InstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock
&MBB
,
5136 unsigned &Flags
) const {
5137 // Check if LR is available through all of the MBB. If it's not, then set
5139 assert(MBB
.getParent()->getRegInfo().tracksLiveness() &&
5140 "Suitable Machine Function for outlining must track liveness");
5141 LiveRegUnits
LRU(getRegisterInfo());
5143 std::for_each(MBB
.rbegin(), MBB
.rend(),
5144 [&LRU
](MachineInstr
&MI
) { LRU
.accumulate(MI
); });
5146 // Check if each of the unsafe registers are available...
5147 bool W16AvailableInBlock
= LRU
.available(AArch64::W16
);
5148 bool W17AvailableInBlock
= LRU
.available(AArch64::W17
);
5149 bool NZCVAvailableInBlock
= LRU
.available(AArch64::NZCV
);
5151 // If all of these are dead (and not live out), we know we don't have to check
5153 if (W16AvailableInBlock
&& W17AvailableInBlock
&& NZCVAvailableInBlock
)
5154 Flags
|= MachineOutlinerMBBFlags::UnsafeRegsDead
;
5156 // Now, add the live outs to the set.
5157 LRU
.addLiveOuts(MBB
);
5159 // If any of these registers is available in the MBB, but also a live out of
5160 // the block, then we know outlining is unsafe.
5161 if (W16AvailableInBlock
&& !LRU
.available(AArch64::W16
))
5163 if (W17AvailableInBlock
&& !LRU
.available(AArch64::W17
))
5165 if (NZCVAvailableInBlock
&& !LRU
.available(AArch64::NZCV
))
5168 // Check if there's a call inside this MachineBasicBlock. If there is, then
5170 if (any_of(MBB
, [](MachineInstr
&MI
) { return MI
.isCall(); }))
5171 Flags
|= MachineOutlinerMBBFlags::HasCalls
;
5173 MachineFunction
*MF
= MBB
.getParent();
5175 // In the event that we outline, we may have to save LR. If there is an
5176 // available register in the MBB, then we'll always save LR there. Check if
5178 bool CanSaveLR
= false;
5179 const AArch64RegisterInfo
*ARI
= static_cast<const AArch64RegisterInfo
*>(
5180 MF
->getSubtarget().getRegisterInfo());
5182 // Check if there is an available register across the sequence that we can
5184 for (unsigned Reg
: AArch64::GPR64RegClass
) {
5185 if (!ARI
->isReservedReg(*MF
, Reg
) && Reg
!= AArch64::LR
&&
5186 Reg
!= AArch64::X16
&& Reg
!= AArch64::X17
&& LRU
.available(Reg
)) {
5192 // Check if we have a register we can save LR to, and if LR was used
5193 // somewhere. If both of those things are true, then we need to evaluate the
5194 // safety of outlining stack instructions later.
5195 if (!CanSaveLR
&& !LRU
.available(AArch64::LR
))
5196 Flags
|= MachineOutlinerMBBFlags::LRUnavailableSomewhere
;
5202 AArch64InstrInfo::getOutliningType(MachineBasicBlock::iterator
&MIT
,
5203 unsigned Flags
) const {
5204 MachineInstr
&MI
= *MIT
;
5205 MachineBasicBlock
*MBB
= MI
.getParent();
5206 MachineFunction
*MF
= MBB
->getParent();
5207 AArch64FunctionInfo
*FuncInfo
= MF
->getInfo
<AArch64FunctionInfo
>();
5209 // Don't outline LOHs.
5210 if (FuncInfo
->getLOHRelated().count(&MI
))
5211 return outliner::InstrType::Illegal
;
5213 // Don't allow debug values to impact outlining type.
5214 if (MI
.isDebugInstr() || MI
.isIndirectDebugValue())
5215 return outliner::InstrType::Invisible
;
5217 // At this point, KILL instructions don't really tell us much so we can go
5218 // ahead and skip over them.
5220 return outliner::InstrType::Invisible
;
5222 // Is this a terminator for a basic block?
5223 if (MI
.isTerminator()) {
5225 // Is this the end of a function?
5226 if (MI
.getParent()->succ_empty())
5227 return outliner::InstrType::Legal
;
5229 // It's not, so don't outline it.
5230 return outliner::InstrType::Illegal
;
5233 // Make sure none of the operands are un-outlinable.
5234 for (const MachineOperand
&MOP
: MI
.operands()) {
5235 if (MOP
.isCPI() || MOP
.isJTI() || MOP
.isCFIIndex() || MOP
.isFI() ||
5236 MOP
.isTargetIndex())
5237 return outliner::InstrType::Illegal
;
5239 // If it uses LR or W30 explicitly, then don't touch it.
5240 if (MOP
.isReg() && !MOP
.isImplicit() &&
5241 (MOP
.getReg() == AArch64::LR
|| MOP
.getReg() == AArch64::W30
))
5242 return outliner::InstrType::Illegal
;
5245 // Special cases for instructions that can always be outlined, but will fail
5246 // the later tests. e.g, ADRPs, which are PC-relative use LR, but can always
5247 // be outlined because they don't require a *specific* value to be in LR.
5248 if (MI
.getOpcode() == AArch64::ADRP
)
5249 return outliner::InstrType::Legal
;
5251 // If MI is a call we might be able to outline it. We don't want to outline
5252 // any calls that rely on the position of items on the stack. When we outline
5253 // something containing a call, we have to emit a save and restore of LR in
5254 // the outlined function. Currently, this always happens by saving LR to the
5255 // stack. Thus, if we outline, say, half the parameters for a function call
5256 // plus the call, then we'll break the callee's expectations for the layout
5259 // FIXME: Allow calls to functions which construct a stack frame, as long
5260 // as they don't access arguments on the stack.
5261 // FIXME: Figure out some way to analyze functions defined in other modules.
5262 // We should be able to compute the memory usage based on the IR calling
5263 // convention, even if we can't see the definition.
5265 // Get the function associated with the call. Look at each operand and find
5266 // the one that represents the callee and get its name.
5267 const Function
*Callee
= nullptr;
5268 for (const MachineOperand
&MOP
: MI
.operands()) {
5269 if (MOP
.isGlobal()) {
5270 Callee
= dyn_cast
<Function
>(MOP
.getGlobal());
5275 // Never outline calls to mcount. There isn't any rule that would require
5276 // this, but the Linux kernel's "ftrace" feature depends on it.
5277 if (Callee
&& Callee
->getName() == "\01_mcount")
5278 return outliner::InstrType::Illegal
;
5280 // If we don't know anything about the callee, assume it depends on the
5281 // stack layout of the caller. In that case, it's only legal to outline
5282 // as a tail-call. Whitelist the call instructions we know about so we
5283 // don't get unexpected results with call pseudo-instructions.
5284 auto UnknownCallOutlineType
= outliner::InstrType::Illegal
;
5285 if (MI
.getOpcode() == AArch64::BLR
|| MI
.getOpcode() == AArch64::BL
)
5286 UnknownCallOutlineType
= outliner::InstrType::LegalTerminator
;
5289 return UnknownCallOutlineType
;
5291 // We have a function we have information about. Check it if it's something
5292 // can safely outline.
5293 MachineFunction
*CalleeMF
= MF
->getMMI().getMachineFunction(*Callee
);
5295 // We don't know what's going on with the callee at all. Don't touch it.
5297 return UnknownCallOutlineType
;
5299 // Check if we know anything about the callee saves on the function. If we
5300 // don't, then don't touch it, since that implies that we haven't
5301 // computed anything about its stack frame yet.
5302 MachineFrameInfo
&MFI
= CalleeMF
->getFrameInfo();
5303 if (!MFI
.isCalleeSavedInfoValid() || MFI
.getStackSize() > 0 ||
5304 MFI
.getNumObjects() > 0)
5305 return UnknownCallOutlineType
;
5307 // At this point, we can say that CalleeMF ought to not pass anything on the
5308 // stack. Therefore, we can outline it.
5309 return outliner::InstrType::Legal
;
5312 // Don't outline positions.
5313 if (MI
.isPosition())
5314 return outliner::InstrType::Illegal
;
5316 // Don't touch the link register or W30.
5317 if (MI
.readsRegister(AArch64::W30
, &getRegisterInfo()) ||
5318 MI
.modifiesRegister(AArch64::W30
, &getRegisterInfo()))
5319 return outliner::InstrType::Illegal
;
5321 // Don't outline BTI instructions, because that will prevent the outlining
5322 // site from being indirectly callable.
5323 if (MI
.getOpcode() == AArch64::HINT
) {
5324 int64_t Imm
= MI
.getOperand(0).getImm();
5325 if (Imm
== 32 || Imm
== 34 || Imm
== 36 || Imm
== 38)
5326 return outliner::InstrType::Illegal
;
5329 return outliner::InstrType::Legal
;
5332 void AArch64InstrInfo::fixupPostOutline(MachineBasicBlock
&MBB
) const {
5333 for (MachineInstr
&MI
: MBB
) {
5334 const MachineOperand
*Base
;
5338 // Is this a load or store with an immediate offset with SP as the base?
5339 if (!MI
.mayLoadOrStore() ||
5340 !getMemOperandWithOffsetWidth(MI
, Base
, Offset
, Width
, &RI
) ||
5341 (Base
->isReg() && Base
->getReg() != AArch64::SP
))
5344 // It is, so we have to fix it up.
5346 int64_t Dummy1
, Dummy2
;
5348 MachineOperand
&StackOffsetOperand
= getMemOpBaseRegImmOfsOffsetOperand(MI
);
5349 assert(StackOffsetOperand
.isImm() && "Stack offset wasn't immediate!");
5350 getMemOpInfo(MI
.getOpcode(), Scale
, Width
, Dummy1
, Dummy2
);
5351 assert(Scale
!= 0 && "Unexpected opcode!");
5353 // We've pushed the return address to the stack, so add 16 to the offset.
5354 // This is safe, since we already checked if it would overflow when we
5355 // checked if this instruction was legal to outline.
5356 int64_t NewImm
= (Offset
+ 16) / Scale
;
5357 StackOffsetOperand
.setImm(NewImm
);
5361 void AArch64InstrInfo::buildOutlinedFrame(
5362 MachineBasicBlock
&MBB
, MachineFunction
&MF
,
5363 const outliner::OutlinedFunction
&OF
) const {
5364 // For thunk outlining, rewrite the last instruction from a call to a
5366 if (OF
.FrameConstructionID
== MachineOutlinerThunk
) {
5367 MachineInstr
*Call
= &*--MBB
.instr_end();
5368 unsigned TailOpcode
;
5369 if (Call
->getOpcode() == AArch64::BL
) {
5370 TailOpcode
= AArch64::TCRETURNdi
;
5372 assert(Call
->getOpcode() == AArch64::BLR
);
5373 TailOpcode
= AArch64::TCRETURNriALL
;
5375 MachineInstr
*TC
= BuildMI(MF
, DebugLoc(), get(TailOpcode
))
5376 .add(Call
->getOperand(0))
5378 MBB
.insert(MBB
.end(), TC
);
5379 Call
->eraseFromParent();
5382 // Is there a call in the outlined range?
5383 auto IsNonTailCall
= [](MachineInstr
&MI
) {
5384 return MI
.isCall() && !MI
.isReturn();
5386 if (std::any_of(MBB
.instr_begin(), MBB
.instr_end(), IsNonTailCall
)) {
5387 // Fix up the instructions in the range, since we're going to modify the
5389 assert(OF
.FrameConstructionID
!= MachineOutlinerDefault
&&
5390 "Can only fix up stack references once");
5391 fixupPostOutline(MBB
);
5393 // LR has to be a live in so that we can save it.
5394 MBB
.addLiveIn(AArch64::LR
);
5396 MachineBasicBlock::iterator It
= MBB
.begin();
5397 MachineBasicBlock::iterator Et
= MBB
.end();
5399 if (OF
.FrameConstructionID
== MachineOutlinerTailCall
||
5400 OF
.FrameConstructionID
== MachineOutlinerThunk
)
5401 Et
= std::prev(MBB
.end());
5403 // Insert a save before the outlined region
5404 MachineInstr
*STRXpre
= BuildMI(MF
, DebugLoc(), get(AArch64::STRXpre
))
5405 .addReg(AArch64::SP
, RegState::Define
)
5406 .addReg(AArch64::LR
)
5407 .addReg(AArch64::SP
)
5409 It
= MBB
.insert(It
, STRXpre
);
5411 const TargetSubtargetInfo
&STI
= MF
.getSubtarget();
5412 const MCRegisterInfo
*MRI
= STI
.getRegisterInfo();
5413 unsigned DwarfReg
= MRI
->getDwarfRegNum(AArch64::LR
, true);
5415 // Add a CFI saying the stack was moved 16 B down.
5416 int64_t StackPosEntry
=
5417 MF
.addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, 16));
5418 BuildMI(MBB
, It
, DebugLoc(), get(AArch64::CFI_INSTRUCTION
))
5419 .addCFIIndex(StackPosEntry
)
5420 .setMIFlags(MachineInstr::FrameSetup
);
5422 // Add a CFI saying that the LR that we want to find is now 16 B higher than
5424 int64_t LRPosEntry
=
5425 MF
.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfReg
, 16));
5426 BuildMI(MBB
, It
, DebugLoc(), get(AArch64::CFI_INSTRUCTION
))
5427 .addCFIIndex(LRPosEntry
)
5428 .setMIFlags(MachineInstr::FrameSetup
);
5430 // Insert a restore before the terminator for the function.
5431 MachineInstr
*LDRXpost
= BuildMI(MF
, DebugLoc(), get(AArch64::LDRXpost
))
5432 .addReg(AArch64::SP
, RegState::Define
)
5433 .addReg(AArch64::LR
, RegState::Define
)
5434 .addReg(AArch64::SP
)
5436 Et
= MBB
.insert(Et
, LDRXpost
);
5439 // If this is a tail call outlined function, then there's already a return.
5440 if (OF
.FrameConstructionID
== MachineOutlinerTailCall
||
5441 OF
.FrameConstructionID
== MachineOutlinerThunk
)
5444 // It's not a tail call, so we have to insert the return ourselves.
5445 MachineInstr
*ret
= BuildMI(MF
, DebugLoc(), get(AArch64::RET
))
5446 .addReg(AArch64::LR
, RegState::Undef
);
5447 MBB
.insert(MBB
.end(), ret
);
5449 // Did we have to modify the stack by saving the link register?
5450 if (OF
.FrameConstructionID
!= MachineOutlinerDefault
)
5453 // We modified the stack.
5454 // Walk over the basic block and fix up all the stack accesses.
5455 fixupPostOutline(MBB
);
5458 MachineBasicBlock::iterator
AArch64InstrInfo::insertOutlinedCall(
5459 Module
&M
, MachineBasicBlock
&MBB
, MachineBasicBlock::iterator
&It
,
5460 MachineFunction
&MF
, const outliner::Candidate
&C
) const {
5462 // Are we tail calling?
5463 if (C
.CallConstructionID
== MachineOutlinerTailCall
) {
5464 // If yes, then we can just branch to the label.
5465 It
= MBB
.insert(It
, BuildMI(MF
, DebugLoc(), get(AArch64::TCRETURNdi
))
5466 .addGlobalAddress(M
.getNamedValue(MF
.getName()))
5471 // Are we saving the link register?
5472 if (C
.CallConstructionID
== MachineOutlinerNoLRSave
||
5473 C
.CallConstructionID
== MachineOutlinerThunk
) {
5474 // No, so just insert the call.
5475 It
= MBB
.insert(It
, BuildMI(MF
, DebugLoc(), get(AArch64::BL
))
5476 .addGlobalAddress(M
.getNamedValue(MF
.getName())));
5480 // We want to return the spot where we inserted the call.
5481 MachineBasicBlock::iterator CallPt
;
5483 // Instructions for saving and restoring LR around the call instruction we're
5486 MachineInstr
*Restore
;
5487 // Can we save to a register?
5488 if (C
.CallConstructionID
== MachineOutlinerRegSave
) {
5489 // FIXME: This logic should be sunk into a target-specific interface so that
5490 // we don't have to recompute the register.
5491 unsigned Reg
= findRegisterToSaveLRTo(C
);
5492 assert(Reg
!= 0 && "No callee-saved register available?");
5494 // Save and restore LR from that register.
5495 Save
= BuildMI(MF
, DebugLoc(), get(AArch64::ORRXrs
), Reg
)
5496 .addReg(AArch64::XZR
)
5497 .addReg(AArch64::LR
)
5499 Restore
= BuildMI(MF
, DebugLoc(), get(AArch64::ORRXrs
), AArch64::LR
)
5500 .addReg(AArch64::XZR
)
5504 // We have the default case. Save and restore from SP.
5505 Save
= BuildMI(MF
, DebugLoc(), get(AArch64::STRXpre
))
5506 .addReg(AArch64::SP
, RegState::Define
)
5507 .addReg(AArch64::LR
)
5508 .addReg(AArch64::SP
)
5510 Restore
= BuildMI(MF
, DebugLoc(), get(AArch64::LDRXpost
))
5511 .addReg(AArch64::SP
, RegState::Define
)
5512 .addReg(AArch64::LR
, RegState::Define
)
5513 .addReg(AArch64::SP
)
5517 It
= MBB
.insert(It
, Save
);
5521 It
= MBB
.insert(It
, BuildMI(MF
, DebugLoc(), get(AArch64::BL
))
5522 .addGlobalAddress(M
.getNamedValue(MF
.getName())));
5526 It
= MBB
.insert(It
, Restore
);
5530 bool AArch64InstrInfo::shouldOutlineFromFunctionByDefault(
5531 MachineFunction
&MF
) const {
5532 return MF
.getFunction().hasMinSize();
5535 bool AArch64InstrInfo::isCopyInstrImpl(
5536 const MachineInstr
&MI
, const MachineOperand
*&Source
,
5537 const MachineOperand
*&Destination
) const {
5539 // AArch64::ORRWrs and AArch64::ORRXrs with WZR/XZR reg
5540 // and zero immediate operands used as an alias for mov instruction.
5541 if (MI
.getOpcode() == AArch64::ORRWrs
&&
5542 MI
.getOperand(1).getReg() == AArch64::WZR
&&
5543 MI
.getOperand(3).getImm() == 0x0) {
5544 Destination
= &MI
.getOperand(0);
5545 Source
= &MI
.getOperand(2);
5549 if (MI
.getOpcode() == AArch64::ORRXrs
&&
5550 MI
.getOperand(1).getReg() == AArch64::XZR
&&
5551 MI
.getOperand(3).getImm() == 0x0) {
5552 Destination
= &MI
.getOperand(0);
5553 Source
= &MI
.getOperand(2);
5560 #define GET_INSTRINFO_HELPERS
5561 #include "AArch64GenInstrInfo.inc"