[ARM] Better patterns for fp <> predicate vectors
[llvm-complete.git] / lib / Target / AArch64 / AArch64InstrInfo.cpp
blob215e96a82d0e8ae279614f86b8636d83368afebb
1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the AArch64 implementation of the TargetInstrInfo class.
11 //===----------------------------------------------------------------------===//
13 #include "AArch64InstrInfo.h"
14 #include "AArch64MachineFunctionInfo.h"
15 #include "AArch64Subtarget.h"
16 #include "MCTargetDesc/AArch64AddressingModes.h"
17 #include "Utils/AArch64BaseInfo.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/MachineBasicBlock.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineMemOperand.h"
27 #include "llvm/CodeGen/MachineOperand.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/MachineModuleInfo.h"
30 #include "llvm/CodeGen/StackMaps.h"
31 #include "llvm/CodeGen/TargetRegisterInfo.h"
32 #include "llvm/CodeGen/TargetSubtargetInfo.h"
33 #include "llvm/IR/DebugLoc.h"
34 #include "llvm/IR/GlobalValue.h"
35 #include "llvm/MC/MCInst.h"
36 #include "llvm/MC/MCInstrDesc.h"
37 #include "llvm/Support/Casting.h"
38 #include "llvm/Support/CodeGen.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/Compiler.h"
41 #include "llvm/Support/ErrorHandling.h"
42 #include "llvm/Support/MathExtras.h"
43 #include "llvm/Target/TargetMachine.h"
44 #include "llvm/Target/TargetOptions.h"
45 #include <cassert>
46 #include <cstdint>
47 #include <iterator>
48 #include <utility>
50 using namespace llvm;
52 #define GET_INSTRINFO_CTOR_DTOR
53 #include "AArch64GenInstrInfo.inc"
55 static cl::opt<unsigned> TBZDisplacementBits(
56 "aarch64-tbz-offset-bits", cl::Hidden, cl::init(14),
57 cl::desc("Restrict range of TB[N]Z instructions (DEBUG)"));
59 static cl::opt<unsigned> CBZDisplacementBits(
60 "aarch64-cbz-offset-bits", cl::Hidden, cl::init(19),
61 cl::desc("Restrict range of CB[N]Z instructions (DEBUG)"));
63 static cl::opt<unsigned>
64 BCCDisplacementBits("aarch64-bcc-offset-bits", cl::Hidden, cl::init(19),
65 cl::desc("Restrict range of Bcc instructions (DEBUG)"));
67 AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
68 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP,
69 AArch64::CATCHRET),
70 RI(STI.getTargetTriple()), Subtarget(STI) {}
72 /// GetInstSize - Return the number of bytes of code the specified
73 /// instruction may be. This returns the maximum number of bytes.
74 unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
75 const MachineBasicBlock &MBB = *MI.getParent();
76 const MachineFunction *MF = MBB.getParent();
77 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
80 auto Op = MI.getOpcode();
81 if (Op == AArch64::INLINEASM || Op == AArch64::INLINEASM_BR)
82 return getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI);
85 // FIXME: We currently only handle pseudoinstructions that don't get expanded
86 // before the assembly printer.
87 unsigned NumBytes = 0;
88 const MCInstrDesc &Desc = MI.getDesc();
89 switch (Desc.getOpcode()) {
90 default:
91 // Anything not explicitly designated otherwise is a normal 4-byte insn.
92 NumBytes = 4;
93 break;
94 case TargetOpcode::DBG_VALUE:
95 case TargetOpcode::EH_LABEL:
96 case TargetOpcode::IMPLICIT_DEF:
97 case TargetOpcode::KILL:
98 NumBytes = 0;
99 break;
100 case TargetOpcode::STACKMAP:
101 // The upper bound for a stackmap intrinsic is the full length of its shadow
102 NumBytes = StackMapOpers(&MI).getNumPatchBytes();
103 assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
104 break;
105 case TargetOpcode::PATCHPOINT:
106 // The size of the patchpoint intrinsic is the number of bytes requested
107 NumBytes = PatchPointOpers(&MI).getNumPatchBytes();
108 assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
109 break;
110 case AArch64::TLSDESC_CALLSEQ:
111 // This gets lowered to an instruction sequence which takes 16 bytes
112 NumBytes = 16;
113 break;
114 case AArch64::JumpTableDest32:
115 case AArch64::JumpTableDest16:
116 case AArch64::JumpTableDest8:
117 NumBytes = 12;
118 break;
119 case AArch64::SPACE:
120 NumBytes = MI.getOperand(1).getImm();
121 break;
124 return NumBytes;
127 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
128 SmallVectorImpl<MachineOperand> &Cond) {
129 // Block ends with fall-through condbranch.
130 switch (LastInst->getOpcode()) {
131 default:
132 llvm_unreachable("Unknown branch instruction?");
133 case AArch64::Bcc:
134 Target = LastInst->getOperand(1).getMBB();
135 Cond.push_back(LastInst->getOperand(0));
136 break;
137 case AArch64::CBZW:
138 case AArch64::CBZX:
139 case AArch64::CBNZW:
140 case AArch64::CBNZX:
141 Target = LastInst->getOperand(1).getMBB();
142 Cond.push_back(MachineOperand::CreateImm(-1));
143 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
144 Cond.push_back(LastInst->getOperand(0));
145 break;
146 case AArch64::TBZW:
147 case AArch64::TBZX:
148 case AArch64::TBNZW:
149 case AArch64::TBNZX:
150 Target = LastInst->getOperand(2).getMBB();
151 Cond.push_back(MachineOperand::CreateImm(-1));
152 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
153 Cond.push_back(LastInst->getOperand(0));
154 Cond.push_back(LastInst->getOperand(1));
158 static unsigned getBranchDisplacementBits(unsigned Opc) {
159 switch (Opc) {
160 default:
161 llvm_unreachable("unexpected opcode!");
162 case AArch64::B:
163 return 64;
164 case AArch64::TBNZW:
165 case AArch64::TBZW:
166 case AArch64::TBNZX:
167 case AArch64::TBZX:
168 return TBZDisplacementBits;
169 case AArch64::CBNZW:
170 case AArch64::CBZW:
171 case AArch64::CBNZX:
172 case AArch64::CBZX:
173 return CBZDisplacementBits;
174 case AArch64::Bcc:
175 return BCCDisplacementBits;
179 bool AArch64InstrInfo::isBranchOffsetInRange(unsigned BranchOp,
180 int64_t BrOffset) const {
181 unsigned Bits = getBranchDisplacementBits(BranchOp);
182 assert(Bits >= 3 && "max branch displacement must be enough to jump"
183 "over conditional branch expansion");
184 return isIntN(Bits, BrOffset / 4);
187 MachineBasicBlock *
188 AArch64InstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
189 switch (MI.getOpcode()) {
190 default:
191 llvm_unreachable("unexpected opcode!");
192 case AArch64::B:
193 return MI.getOperand(0).getMBB();
194 case AArch64::TBZW:
195 case AArch64::TBNZW:
196 case AArch64::TBZX:
197 case AArch64::TBNZX:
198 return MI.getOperand(2).getMBB();
199 case AArch64::CBZW:
200 case AArch64::CBNZW:
201 case AArch64::CBZX:
202 case AArch64::CBNZX:
203 case AArch64::Bcc:
204 return MI.getOperand(1).getMBB();
208 // Branch analysis.
209 bool AArch64InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
210 MachineBasicBlock *&TBB,
211 MachineBasicBlock *&FBB,
212 SmallVectorImpl<MachineOperand> &Cond,
213 bool AllowModify) const {
214 // If the block has no terminators, it just falls into the block after it.
215 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
216 if (I == MBB.end())
217 return false;
219 if (!isUnpredicatedTerminator(*I))
220 return false;
222 // Get the last instruction in the block.
223 MachineInstr *LastInst = &*I;
225 // If there is only one terminator instruction, process it.
226 unsigned LastOpc = LastInst->getOpcode();
227 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
228 if (isUncondBranchOpcode(LastOpc)) {
229 TBB = LastInst->getOperand(0).getMBB();
230 return false;
232 if (isCondBranchOpcode(LastOpc)) {
233 // Block ends with fall-through condbranch.
234 parseCondBranch(LastInst, TBB, Cond);
235 return false;
237 return true; // Can't handle indirect branch.
240 // Get the instruction before it if it is a terminator.
241 MachineInstr *SecondLastInst = &*I;
242 unsigned SecondLastOpc = SecondLastInst->getOpcode();
244 // If AllowModify is true and the block ends with two or more unconditional
245 // branches, delete all but the first unconditional branch.
246 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
247 while (isUncondBranchOpcode(SecondLastOpc)) {
248 LastInst->eraseFromParent();
249 LastInst = SecondLastInst;
250 LastOpc = LastInst->getOpcode();
251 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
252 // Return now the only terminator is an unconditional branch.
253 TBB = LastInst->getOperand(0).getMBB();
254 return false;
255 } else {
256 SecondLastInst = &*I;
257 SecondLastOpc = SecondLastInst->getOpcode();
262 // If there are three terminators, we don't know what sort of block this is.
263 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
264 return true;
266 // If the block ends with a B and a Bcc, handle it.
267 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
268 parseCondBranch(SecondLastInst, TBB, Cond);
269 FBB = LastInst->getOperand(0).getMBB();
270 return false;
273 // If the block ends with two unconditional branches, handle it. The second
274 // one is not executed, so remove it.
275 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
276 TBB = SecondLastInst->getOperand(0).getMBB();
277 I = LastInst;
278 if (AllowModify)
279 I->eraseFromParent();
280 return false;
283 // ...likewise if it ends with an indirect branch followed by an unconditional
284 // branch.
285 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
286 I = LastInst;
287 if (AllowModify)
288 I->eraseFromParent();
289 return true;
292 // Otherwise, can't handle this.
293 return true;
296 bool AArch64InstrInfo::reverseBranchCondition(
297 SmallVectorImpl<MachineOperand> &Cond) const {
298 if (Cond[0].getImm() != -1) {
299 // Regular Bcc
300 AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
301 Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
302 } else {
303 // Folded compare-and-branch
304 switch (Cond[1].getImm()) {
305 default:
306 llvm_unreachable("Unknown conditional branch!");
307 case AArch64::CBZW:
308 Cond[1].setImm(AArch64::CBNZW);
309 break;
310 case AArch64::CBNZW:
311 Cond[1].setImm(AArch64::CBZW);
312 break;
313 case AArch64::CBZX:
314 Cond[1].setImm(AArch64::CBNZX);
315 break;
316 case AArch64::CBNZX:
317 Cond[1].setImm(AArch64::CBZX);
318 break;
319 case AArch64::TBZW:
320 Cond[1].setImm(AArch64::TBNZW);
321 break;
322 case AArch64::TBNZW:
323 Cond[1].setImm(AArch64::TBZW);
324 break;
325 case AArch64::TBZX:
326 Cond[1].setImm(AArch64::TBNZX);
327 break;
328 case AArch64::TBNZX:
329 Cond[1].setImm(AArch64::TBZX);
330 break;
334 return false;
337 unsigned AArch64InstrInfo::removeBranch(MachineBasicBlock &MBB,
338 int *BytesRemoved) const {
339 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
340 if (I == MBB.end())
341 return 0;
343 if (!isUncondBranchOpcode(I->getOpcode()) &&
344 !isCondBranchOpcode(I->getOpcode()))
345 return 0;
347 // Remove the branch.
348 I->eraseFromParent();
350 I = MBB.end();
352 if (I == MBB.begin()) {
353 if (BytesRemoved)
354 *BytesRemoved = 4;
355 return 1;
357 --I;
358 if (!isCondBranchOpcode(I->getOpcode())) {
359 if (BytesRemoved)
360 *BytesRemoved = 4;
361 return 1;
364 // Remove the branch.
365 I->eraseFromParent();
366 if (BytesRemoved)
367 *BytesRemoved = 8;
369 return 2;
372 void AArch64InstrInfo::instantiateCondBranch(
373 MachineBasicBlock &MBB, const DebugLoc &DL, MachineBasicBlock *TBB,
374 ArrayRef<MachineOperand> Cond) const {
375 if (Cond[0].getImm() != -1) {
376 // Regular Bcc
377 BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
378 } else {
379 // Folded compare-and-branch
380 // Note that we use addOperand instead of addReg to keep the flags.
381 const MachineInstrBuilder MIB =
382 BuildMI(&MBB, DL, get(Cond[1].getImm())).add(Cond[2]);
383 if (Cond.size() > 3)
384 MIB.addImm(Cond[3].getImm());
385 MIB.addMBB(TBB);
389 unsigned AArch64InstrInfo::insertBranch(
390 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
391 ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
392 // Shouldn't be a fall through.
393 assert(TBB && "insertBranch must not be told to insert a fallthrough");
395 if (!FBB) {
396 if (Cond.empty()) // Unconditional branch?
397 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
398 else
399 instantiateCondBranch(MBB, DL, TBB, Cond);
401 if (BytesAdded)
402 *BytesAdded = 4;
404 return 1;
407 // Two-way conditional branch.
408 instantiateCondBranch(MBB, DL, TBB, Cond);
409 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
411 if (BytesAdded)
412 *BytesAdded = 8;
414 return 2;
417 // Find the original register that VReg is copied from.
418 static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
419 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
420 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
421 if (!DefMI->isFullCopy())
422 return VReg;
423 VReg = DefMI->getOperand(1).getReg();
425 return VReg;
428 // Determine if VReg is defined by an instruction that can be folded into a
429 // csel instruction. If so, return the folded opcode, and the replacement
430 // register.
431 static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
432 unsigned *NewVReg = nullptr) {
433 VReg = removeCopies(MRI, VReg);
434 if (!TargetRegisterInfo::isVirtualRegister(VReg))
435 return 0;
437 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
438 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
439 unsigned Opc = 0;
440 unsigned SrcOpNum = 0;
441 switch (DefMI->getOpcode()) {
442 case AArch64::ADDSXri:
443 case AArch64::ADDSWri:
444 // if NZCV is used, do not fold.
445 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
446 return 0;
447 // fall-through to ADDXri and ADDWri.
448 LLVM_FALLTHROUGH;
449 case AArch64::ADDXri:
450 case AArch64::ADDWri:
451 // add x, 1 -> csinc.
452 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
453 DefMI->getOperand(3).getImm() != 0)
454 return 0;
455 SrcOpNum = 1;
456 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
457 break;
459 case AArch64::ORNXrr:
460 case AArch64::ORNWrr: {
461 // not x -> csinv, represented as orn dst, xzr, src.
462 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
463 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
464 return 0;
465 SrcOpNum = 2;
466 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
467 break;
470 case AArch64::SUBSXrr:
471 case AArch64::SUBSWrr:
472 // if NZCV is used, do not fold.
473 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
474 return 0;
475 // fall-through to SUBXrr and SUBWrr.
476 LLVM_FALLTHROUGH;
477 case AArch64::SUBXrr:
478 case AArch64::SUBWrr: {
479 // neg x -> csneg, represented as sub dst, xzr, src.
480 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
481 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
482 return 0;
483 SrcOpNum = 2;
484 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
485 break;
487 default:
488 return 0;
490 assert(Opc && SrcOpNum && "Missing parameters");
492 if (NewVReg)
493 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
494 return Opc;
497 bool AArch64InstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
498 ArrayRef<MachineOperand> Cond,
499 unsigned TrueReg, unsigned FalseReg,
500 int &CondCycles, int &TrueCycles,
501 int &FalseCycles) const {
502 // Check register classes.
503 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
504 const TargetRegisterClass *RC =
505 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
506 if (!RC)
507 return false;
509 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
510 unsigned ExtraCondLat = Cond.size() != 1;
512 // GPRs are handled by csel.
513 // FIXME: Fold in x+1, -x, and ~x when applicable.
514 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
515 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
516 // Single-cycle csel, csinc, csinv, and csneg.
517 CondCycles = 1 + ExtraCondLat;
518 TrueCycles = FalseCycles = 1;
519 if (canFoldIntoCSel(MRI, TrueReg))
520 TrueCycles = 0;
521 else if (canFoldIntoCSel(MRI, FalseReg))
522 FalseCycles = 0;
523 return true;
526 // Scalar floating point is handled by fcsel.
527 // FIXME: Form fabs, fmin, and fmax when applicable.
528 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
529 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
530 CondCycles = 5 + ExtraCondLat;
531 TrueCycles = FalseCycles = 2;
532 return true;
535 // Can't do vectors.
536 return false;
539 void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
540 MachineBasicBlock::iterator I,
541 const DebugLoc &DL, unsigned DstReg,
542 ArrayRef<MachineOperand> Cond,
543 unsigned TrueReg, unsigned FalseReg) const {
544 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
546 // Parse the condition code, see parseCondBranch() above.
547 AArch64CC::CondCode CC;
548 switch (Cond.size()) {
549 default:
550 llvm_unreachable("Unknown condition opcode in Cond");
551 case 1: // b.cc
552 CC = AArch64CC::CondCode(Cond[0].getImm());
553 break;
554 case 3: { // cbz/cbnz
555 // We must insert a compare against 0.
556 bool Is64Bit;
557 switch (Cond[1].getImm()) {
558 default:
559 llvm_unreachable("Unknown branch opcode in Cond");
560 case AArch64::CBZW:
561 Is64Bit = false;
562 CC = AArch64CC::EQ;
563 break;
564 case AArch64::CBZX:
565 Is64Bit = true;
566 CC = AArch64CC::EQ;
567 break;
568 case AArch64::CBNZW:
569 Is64Bit = false;
570 CC = AArch64CC::NE;
571 break;
572 case AArch64::CBNZX:
573 Is64Bit = true;
574 CC = AArch64CC::NE;
575 break;
577 unsigned SrcReg = Cond[2].getReg();
578 if (Is64Bit) {
579 // cmp reg, #0 is actually subs xzr, reg, #0.
580 MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
581 BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
582 .addReg(SrcReg)
583 .addImm(0)
584 .addImm(0);
585 } else {
586 MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
587 BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
588 .addReg(SrcReg)
589 .addImm(0)
590 .addImm(0);
592 break;
594 case 4: { // tbz/tbnz
595 // We must insert a tst instruction.
596 switch (Cond[1].getImm()) {
597 default:
598 llvm_unreachable("Unknown branch opcode in Cond");
599 case AArch64::TBZW:
600 case AArch64::TBZX:
601 CC = AArch64CC::EQ;
602 break;
603 case AArch64::TBNZW:
604 case AArch64::TBNZX:
605 CC = AArch64CC::NE;
606 break;
608 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
609 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
610 BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
611 .addReg(Cond[2].getReg())
612 .addImm(
613 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
614 else
615 BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
616 .addReg(Cond[2].getReg())
617 .addImm(
618 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
619 break;
623 unsigned Opc = 0;
624 const TargetRegisterClass *RC = nullptr;
625 bool TryFold = false;
626 if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
627 RC = &AArch64::GPR64RegClass;
628 Opc = AArch64::CSELXr;
629 TryFold = true;
630 } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
631 RC = &AArch64::GPR32RegClass;
632 Opc = AArch64::CSELWr;
633 TryFold = true;
634 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
635 RC = &AArch64::FPR64RegClass;
636 Opc = AArch64::FCSELDrrr;
637 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
638 RC = &AArch64::FPR32RegClass;
639 Opc = AArch64::FCSELSrrr;
641 assert(RC && "Unsupported regclass");
643 // Try folding simple instructions into the csel.
644 if (TryFold) {
645 unsigned NewVReg = 0;
646 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
647 if (FoldedOpc) {
648 // The folded opcodes csinc, csinc and csneg apply the operation to
649 // FalseReg, so we need to invert the condition.
650 CC = AArch64CC::getInvertedCondCode(CC);
651 TrueReg = FalseReg;
652 } else
653 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
655 // Fold the operation. Leave any dead instructions for DCE to clean up.
656 if (FoldedOpc) {
657 FalseReg = NewVReg;
658 Opc = FoldedOpc;
659 // The extends the live range of NewVReg.
660 MRI.clearKillFlags(NewVReg);
664 // Pull all virtual register into the appropriate class.
665 MRI.constrainRegClass(TrueReg, RC);
666 MRI.constrainRegClass(FalseReg, RC);
668 // Insert the csel.
669 BuildMI(MBB, I, DL, get(Opc), DstReg)
670 .addReg(TrueReg)
671 .addReg(FalseReg)
672 .addImm(CC);
675 /// Returns true if a MOVi32imm or MOVi64imm can be expanded to an ORRxx.
676 static bool canBeExpandedToORR(const MachineInstr &MI, unsigned BitSize) {
677 uint64_t Imm = MI.getOperand(1).getImm();
678 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
679 uint64_t Encoding;
680 return AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding);
683 // FIXME: this implementation should be micro-architecture dependent, so a
684 // micro-architecture target hook should be introduced here in future.
685 bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
686 if (!Subtarget.hasCustomCheapAsMoveHandling())
687 return MI.isAsCheapAsAMove();
689 const unsigned Opcode = MI.getOpcode();
691 // Firstly, check cases gated by features.
693 if (Subtarget.hasZeroCycleZeroingFP()) {
694 if (Opcode == AArch64::FMOVH0 ||
695 Opcode == AArch64::FMOVS0 ||
696 Opcode == AArch64::FMOVD0)
697 return true;
700 if (Subtarget.hasZeroCycleZeroingGP()) {
701 if (Opcode == TargetOpcode::COPY &&
702 (MI.getOperand(1).getReg() == AArch64::WZR ||
703 MI.getOperand(1).getReg() == AArch64::XZR))
704 return true;
707 // Secondly, check cases specific to sub-targets.
709 if (Subtarget.hasExynosCheapAsMoveHandling()) {
710 if (isExynosCheapAsMove(MI))
711 return true;
713 return MI.isAsCheapAsAMove();
716 // Finally, check generic cases.
718 switch (Opcode) {
719 default:
720 return false;
722 // add/sub on register without shift
723 case AArch64::ADDWri:
724 case AArch64::ADDXri:
725 case AArch64::SUBWri:
726 case AArch64::SUBXri:
727 return (MI.getOperand(3).getImm() == 0);
729 // logical ops on immediate
730 case AArch64::ANDWri:
731 case AArch64::ANDXri:
732 case AArch64::EORWri:
733 case AArch64::EORXri:
734 case AArch64::ORRWri:
735 case AArch64::ORRXri:
736 return true;
738 // logical ops on register without shift
739 case AArch64::ANDWrr:
740 case AArch64::ANDXrr:
741 case AArch64::BICWrr:
742 case AArch64::BICXrr:
743 case AArch64::EONWrr:
744 case AArch64::EONXrr:
745 case AArch64::EORWrr:
746 case AArch64::EORXrr:
747 case AArch64::ORNWrr:
748 case AArch64::ORNXrr:
749 case AArch64::ORRWrr:
750 case AArch64::ORRXrr:
751 return true;
753 // If MOVi32imm or MOVi64imm can be expanded into ORRWri or
754 // ORRXri, it is as cheap as MOV
755 case AArch64::MOVi32imm:
756 return canBeExpandedToORR(MI, 32);
757 case AArch64::MOVi64imm:
758 return canBeExpandedToORR(MI, 64);
761 llvm_unreachable("Unknown opcode to check as cheap as a move!");
764 bool AArch64InstrInfo::isFalkorShiftExtFast(const MachineInstr &MI) {
765 switch (MI.getOpcode()) {
766 default:
767 return false;
769 case AArch64::ADDWrs:
770 case AArch64::ADDXrs:
771 case AArch64::ADDSWrs:
772 case AArch64::ADDSXrs: {
773 unsigned Imm = MI.getOperand(3).getImm();
774 unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
775 if (ShiftVal == 0)
776 return true;
777 return AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL && ShiftVal <= 5;
780 case AArch64::ADDWrx:
781 case AArch64::ADDXrx:
782 case AArch64::ADDXrx64:
783 case AArch64::ADDSWrx:
784 case AArch64::ADDSXrx:
785 case AArch64::ADDSXrx64: {
786 unsigned Imm = MI.getOperand(3).getImm();
787 switch (AArch64_AM::getArithExtendType(Imm)) {
788 default:
789 return false;
790 case AArch64_AM::UXTB:
791 case AArch64_AM::UXTH:
792 case AArch64_AM::UXTW:
793 case AArch64_AM::UXTX:
794 return AArch64_AM::getArithShiftValue(Imm) <= 4;
798 case AArch64::SUBWrs:
799 case AArch64::SUBSWrs: {
800 unsigned Imm = MI.getOperand(3).getImm();
801 unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
802 return ShiftVal == 0 ||
803 (AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 31);
806 case AArch64::SUBXrs:
807 case AArch64::SUBSXrs: {
808 unsigned Imm = MI.getOperand(3).getImm();
809 unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
810 return ShiftVal == 0 ||
811 (AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 63);
814 case AArch64::SUBWrx:
815 case AArch64::SUBXrx:
816 case AArch64::SUBXrx64:
817 case AArch64::SUBSWrx:
818 case AArch64::SUBSXrx:
819 case AArch64::SUBSXrx64: {
820 unsigned Imm = MI.getOperand(3).getImm();
821 switch (AArch64_AM::getArithExtendType(Imm)) {
822 default:
823 return false;
824 case AArch64_AM::UXTB:
825 case AArch64_AM::UXTH:
826 case AArch64_AM::UXTW:
827 case AArch64_AM::UXTX:
828 return AArch64_AM::getArithShiftValue(Imm) == 0;
832 case AArch64::LDRBBroW:
833 case AArch64::LDRBBroX:
834 case AArch64::LDRBroW:
835 case AArch64::LDRBroX:
836 case AArch64::LDRDroW:
837 case AArch64::LDRDroX:
838 case AArch64::LDRHHroW:
839 case AArch64::LDRHHroX:
840 case AArch64::LDRHroW:
841 case AArch64::LDRHroX:
842 case AArch64::LDRQroW:
843 case AArch64::LDRQroX:
844 case AArch64::LDRSBWroW:
845 case AArch64::LDRSBWroX:
846 case AArch64::LDRSBXroW:
847 case AArch64::LDRSBXroX:
848 case AArch64::LDRSHWroW:
849 case AArch64::LDRSHWroX:
850 case AArch64::LDRSHXroW:
851 case AArch64::LDRSHXroX:
852 case AArch64::LDRSWroW:
853 case AArch64::LDRSWroX:
854 case AArch64::LDRSroW:
855 case AArch64::LDRSroX:
856 case AArch64::LDRWroW:
857 case AArch64::LDRWroX:
858 case AArch64::LDRXroW:
859 case AArch64::LDRXroX:
860 case AArch64::PRFMroW:
861 case AArch64::PRFMroX:
862 case AArch64::STRBBroW:
863 case AArch64::STRBBroX:
864 case AArch64::STRBroW:
865 case AArch64::STRBroX:
866 case AArch64::STRDroW:
867 case AArch64::STRDroX:
868 case AArch64::STRHHroW:
869 case AArch64::STRHHroX:
870 case AArch64::STRHroW:
871 case AArch64::STRHroX:
872 case AArch64::STRQroW:
873 case AArch64::STRQroX:
874 case AArch64::STRSroW:
875 case AArch64::STRSroX:
876 case AArch64::STRWroW:
877 case AArch64::STRWroX:
878 case AArch64::STRXroW:
879 case AArch64::STRXroX: {
880 unsigned IsSigned = MI.getOperand(3).getImm();
881 return !IsSigned;
886 bool AArch64InstrInfo::isSEHInstruction(const MachineInstr &MI) {
887 unsigned Opc = MI.getOpcode();
888 switch (Opc) {
889 default:
890 return false;
891 case AArch64::SEH_StackAlloc:
892 case AArch64::SEH_SaveFPLR:
893 case AArch64::SEH_SaveFPLR_X:
894 case AArch64::SEH_SaveReg:
895 case AArch64::SEH_SaveReg_X:
896 case AArch64::SEH_SaveRegP:
897 case AArch64::SEH_SaveRegP_X:
898 case AArch64::SEH_SaveFReg:
899 case AArch64::SEH_SaveFReg_X:
900 case AArch64::SEH_SaveFRegP:
901 case AArch64::SEH_SaveFRegP_X:
902 case AArch64::SEH_SetFP:
903 case AArch64::SEH_AddFP:
904 case AArch64::SEH_Nop:
905 case AArch64::SEH_PrologEnd:
906 case AArch64::SEH_EpilogStart:
907 case AArch64::SEH_EpilogEnd:
908 return true;
912 bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
913 unsigned &SrcReg, unsigned &DstReg,
914 unsigned &SubIdx) const {
915 switch (MI.getOpcode()) {
916 default:
917 return false;
918 case AArch64::SBFMXri: // aka sxtw
919 case AArch64::UBFMXri: // aka uxtw
920 // Check for the 32 -> 64 bit extension case, these instructions can do
921 // much more.
922 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
923 return false;
924 // This is a signed or unsigned 32 -> 64 bit extension.
925 SrcReg = MI.getOperand(1).getReg();
926 DstReg = MI.getOperand(0).getReg();
927 SubIdx = AArch64::sub_32;
928 return true;
932 bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint(
933 const MachineInstr &MIa, const MachineInstr &MIb, AliasAnalysis *AA) const {
934 const TargetRegisterInfo *TRI = &getRegisterInfo();
935 const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
936 int64_t OffsetA = 0, OffsetB = 0;
937 unsigned WidthA = 0, WidthB = 0;
939 assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
940 assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
942 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
943 MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
944 return false;
946 // Retrieve the base, offset from the base and width. Width
947 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If
948 // base are identical, and the offset of a lower memory access +
949 // the width doesn't overlap the offset of a higher memory access,
950 // then the memory accesses are different.
951 if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
952 getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
953 if (BaseOpA->isIdenticalTo(*BaseOpB)) {
954 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
955 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
956 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
957 if (LowOffset + LowWidth <= HighOffset)
958 return true;
961 return false;
964 bool AArch64InstrInfo::isSchedulingBoundary(const MachineInstr &MI,
965 const MachineBasicBlock *MBB,
966 const MachineFunction &MF) const {
967 if (TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF))
968 return true;
969 switch (MI.getOpcode()) {
970 case AArch64::HINT:
971 // CSDB hints are scheduling barriers.
972 if (MI.getOperand(0).getImm() == 0x14)
973 return true;
974 break;
975 case AArch64::DSB:
976 case AArch64::ISB:
977 // DSB and ISB also are scheduling barriers.
978 return true;
979 default:;
981 return isSEHInstruction(MI);
984 /// analyzeCompare - For a comparison instruction, return the source registers
985 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
986 /// Return true if the comparison instruction can be analyzed.
987 bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
988 unsigned &SrcReg2, int &CmpMask,
989 int &CmpValue) const {
990 // The first operand can be a frame index where we'd normally expect a
991 // register.
992 assert(MI.getNumOperands() >= 2 && "All AArch64 cmps should have 2 operands");
993 if (!MI.getOperand(1).isReg())
994 return false;
996 switch (MI.getOpcode()) {
997 default:
998 break;
999 case AArch64::SUBSWrr:
1000 case AArch64::SUBSWrs:
1001 case AArch64::SUBSWrx:
1002 case AArch64::SUBSXrr:
1003 case AArch64::SUBSXrs:
1004 case AArch64::SUBSXrx:
1005 case AArch64::ADDSWrr:
1006 case AArch64::ADDSWrs:
1007 case AArch64::ADDSWrx:
1008 case AArch64::ADDSXrr:
1009 case AArch64::ADDSXrs:
1010 case AArch64::ADDSXrx:
1011 // Replace SUBSWrr with SUBWrr if NZCV is not used.
1012 SrcReg = MI.getOperand(1).getReg();
1013 SrcReg2 = MI.getOperand(2).getReg();
1014 CmpMask = ~0;
1015 CmpValue = 0;
1016 return true;
1017 case AArch64::SUBSWri:
1018 case AArch64::ADDSWri:
1019 case AArch64::SUBSXri:
1020 case AArch64::ADDSXri:
1021 SrcReg = MI.getOperand(1).getReg();
1022 SrcReg2 = 0;
1023 CmpMask = ~0;
1024 // FIXME: In order to convert CmpValue to 0 or 1
1025 CmpValue = MI.getOperand(2).getImm() != 0;
1026 return true;
1027 case AArch64::ANDSWri:
1028 case AArch64::ANDSXri:
1029 // ANDS does not use the same encoding scheme as the others xxxS
1030 // instructions.
1031 SrcReg = MI.getOperand(1).getReg();
1032 SrcReg2 = 0;
1033 CmpMask = ~0;
1034 // FIXME:The return val type of decodeLogicalImmediate is uint64_t,
1035 // while the type of CmpValue is int. When converting uint64_t to int,
1036 // the high 32 bits of uint64_t will be lost.
1037 // In fact it causes a bug in spec2006-483.xalancbmk
1038 // CmpValue is only used to compare with zero in OptimizeCompareInstr
1039 CmpValue = AArch64_AM::decodeLogicalImmediate(
1040 MI.getOperand(2).getImm(),
1041 MI.getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0;
1042 return true;
1045 return false;
1048 static bool UpdateOperandRegClass(MachineInstr &Instr) {
1049 MachineBasicBlock *MBB = Instr.getParent();
1050 assert(MBB && "Can't get MachineBasicBlock here");
1051 MachineFunction *MF = MBB->getParent();
1052 assert(MF && "Can't get MachineFunction here");
1053 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1054 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1055 MachineRegisterInfo *MRI = &MF->getRegInfo();
1057 for (unsigned OpIdx = 0, EndIdx = Instr.getNumOperands(); OpIdx < EndIdx;
1058 ++OpIdx) {
1059 MachineOperand &MO = Instr.getOperand(OpIdx);
1060 const TargetRegisterClass *OpRegCstraints =
1061 Instr.getRegClassConstraint(OpIdx, TII, TRI);
1063 // If there's no constraint, there's nothing to do.
1064 if (!OpRegCstraints)
1065 continue;
1066 // If the operand is a frame index, there's nothing to do here.
1067 // A frame index operand will resolve correctly during PEI.
1068 if (MO.isFI())
1069 continue;
1071 assert(MO.isReg() &&
1072 "Operand has register constraints without being a register!");
1074 unsigned Reg = MO.getReg();
1075 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1076 if (!OpRegCstraints->contains(Reg))
1077 return false;
1078 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
1079 !MRI->constrainRegClass(Reg, OpRegCstraints))
1080 return false;
1083 return true;
1086 /// Return the opcode that does not set flags when possible - otherwise
1087 /// return the original opcode. The caller is responsible to do the actual
1088 /// substitution and legality checking.
1089 static unsigned convertToNonFlagSettingOpc(const MachineInstr &MI) {
1090 // Don't convert all compare instructions, because for some the zero register
1091 // encoding becomes the sp register.
1092 bool MIDefinesZeroReg = false;
1093 if (MI.definesRegister(AArch64::WZR) || MI.definesRegister(AArch64::XZR))
1094 MIDefinesZeroReg = true;
1096 switch (MI.getOpcode()) {
1097 default:
1098 return MI.getOpcode();
1099 case AArch64::ADDSWrr:
1100 return AArch64::ADDWrr;
1101 case AArch64::ADDSWri:
1102 return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri;
1103 case AArch64::ADDSWrs:
1104 return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs;
1105 case AArch64::ADDSWrx:
1106 return AArch64::ADDWrx;
1107 case AArch64::ADDSXrr:
1108 return AArch64::ADDXrr;
1109 case AArch64::ADDSXri:
1110 return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri;
1111 case AArch64::ADDSXrs:
1112 return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs;
1113 case AArch64::ADDSXrx:
1114 return AArch64::ADDXrx;
1115 case AArch64::SUBSWrr:
1116 return AArch64::SUBWrr;
1117 case AArch64::SUBSWri:
1118 return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri;
1119 case AArch64::SUBSWrs:
1120 return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs;
1121 case AArch64::SUBSWrx:
1122 return AArch64::SUBWrx;
1123 case AArch64::SUBSXrr:
1124 return AArch64::SUBXrr;
1125 case AArch64::SUBSXri:
1126 return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri;
1127 case AArch64::SUBSXrs:
1128 return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
1129 case AArch64::SUBSXrx:
1130 return AArch64::SUBXrx;
1134 enum AccessKind { AK_Write = 0x01, AK_Read = 0x10, AK_All = 0x11 };
1136 /// True when condition flags are accessed (either by writing or reading)
1137 /// on the instruction trace starting at From and ending at To.
1139 /// Note: If From and To are from different blocks it's assumed CC are accessed
1140 /// on the path.
1141 static bool areCFlagsAccessedBetweenInstrs(
1142 MachineBasicBlock::iterator From, MachineBasicBlock::iterator To,
1143 const TargetRegisterInfo *TRI, const AccessKind AccessToCheck = AK_All) {
1144 // Early exit if To is at the beginning of the BB.
1145 if (To == To->getParent()->begin())
1146 return true;
1148 // Check whether the instructions are in the same basic block
1149 // If not, assume the condition flags might get modified somewhere.
1150 if (To->getParent() != From->getParent())
1151 return true;
1153 // From must be above To.
1154 assert(std::find_if(++To.getReverse(), To->getParent()->rend(),
1155 [From](MachineInstr &MI) {
1156 return MI.getIterator() == From;
1157 }) != To->getParent()->rend());
1159 // We iterate backward starting \p To until we hit \p From.
1160 for (--To; To != From; --To) {
1161 const MachineInstr &Instr = *To;
1163 if (((AccessToCheck & AK_Write) &&
1164 Instr.modifiesRegister(AArch64::NZCV, TRI)) ||
1165 ((AccessToCheck & AK_Read) && Instr.readsRegister(AArch64::NZCV, TRI)))
1166 return true;
1168 return false;
1171 /// Try to optimize a compare instruction. A compare instruction is an
1172 /// instruction which produces AArch64::NZCV. It can be truly compare
1173 /// instruction
1174 /// when there are no uses of its destination register.
1176 /// The following steps are tried in order:
1177 /// 1. Convert CmpInstr into an unconditional version.
1178 /// 2. Remove CmpInstr if above there is an instruction producing a needed
1179 /// condition code or an instruction which can be converted into such an
1180 /// instruction.
1181 /// Only comparison with zero is supported.
1182 bool AArch64InstrInfo::optimizeCompareInstr(
1183 MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
1184 int CmpValue, const MachineRegisterInfo *MRI) const {
1185 assert(CmpInstr.getParent());
1186 assert(MRI);
1188 // Replace SUBSWrr with SUBWrr if NZCV is not used.
1189 int DeadNZCVIdx = CmpInstr.findRegisterDefOperandIdx(AArch64::NZCV, true);
1190 if (DeadNZCVIdx != -1) {
1191 if (CmpInstr.definesRegister(AArch64::WZR) ||
1192 CmpInstr.definesRegister(AArch64::XZR)) {
1193 CmpInstr.eraseFromParent();
1194 return true;
1196 unsigned Opc = CmpInstr.getOpcode();
1197 unsigned NewOpc = convertToNonFlagSettingOpc(CmpInstr);
1198 if (NewOpc == Opc)
1199 return false;
1200 const MCInstrDesc &MCID = get(NewOpc);
1201 CmpInstr.setDesc(MCID);
1202 CmpInstr.RemoveOperand(DeadNZCVIdx);
1203 bool succeeded = UpdateOperandRegClass(CmpInstr);
1204 (void)succeeded;
1205 assert(succeeded && "Some operands reg class are incompatible!");
1206 return true;
1209 // Continue only if we have a "ri" where immediate is zero.
1210 // FIXME:CmpValue has already been converted to 0 or 1 in analyzeCompare
1211 // function.
1212 assert((CmpValue == 0 || CmpValue == 1) && "CmpValue must be 0 or 1!");
1213 if (CmpValue != 0 || SrcReg2 != 0)
1214 return false;
1216 // CmpInstr is a Compare instruction if destination register is not used.
1217 if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
1218 return false;
1220 return substituteCmpToZero(CmpInstr, SrcReg, MRI);
1223 /// Get opcode of S version of Instr.
1224 /// If Instr is S version its opcode is returned.
1225 /// AArch64::INSTRUCTION_LIST_END is returned if Instr does not have S version
1226 /// or we are not interested in it.
1227 static unsigned sForm(MachineInstr &Instr) {
1228 switch (Instr.getOpcode()) {
1229 default:
1230 return AArch64::INSTRUCTION_LIST_END;
1232 case AArch64::ADDSWrr:
1233 case AArch64::ADDSWri:
1234 case AArch64::ADDSXrr:
1235 case AArch64::ADDSXri:
1236 case AArch64::SUBSWrr:
1237 case AArch64::SUBSWri:
1238 case AArch64::SUBSXrr:
1239 case AArch64::SUBSXri:
1240 return Instr.getOpcode();
1242 case AArch64::ADDWrr:
1243 return AArch64::ADDSWrr;
1244 case AArch64::ADDWri:
1245 return AArch64::ADDSWri;
1246 case AArch64::ADDXrr:
1247 return AArch64::ADDSXrr;
1248 case AArch64::ADDXri:
1249 return AArch64::ADDSXri;
1250 case AArch64::ADCWr:
1251 return AArch64::ADCSWr;
1252 case AArch64::ADCXr:
1253 return AArch64::ADCSXr;
1254 case AArch64::SUBWrr:
1255 return AArch64::SUBSWrr;
1256 case AArch64::SUBWri:
1257 return AArch64::SUBSWri;
1258 case AArch64::SUBXrr:
1259 return AArch64::SUBSXrr;
1260 case AArch64::SUBXri:
1261 return AArch64::SUBSXri;
1262 case AArch64::SBCWr:
1263 return AArch64::SBCSWr;
1264 case AArch64::SBCXr:
1265 return AArch64::SBCSXr;
1266 case AArch64::ANDWri:
1267 return AArch64::ANDSWri;
1268 case AArch64::ANDXri:
1269 return AArch64::ANDSXri;
1273 /// Check if AArch64::NZCV should be alive in successors of MBB.
1274 static bool areCFlagsAliveInSuccessors(MachineBasicBlock *MBB) {
1275 for (auto *BB : MBB->successors())
1276 if (BB->isLiveIn(AArch64::NZCV))
1277 return true;
1278 return false;
1281 namespace {
1283 struct UsedNZCV {
1284 bool N = false;
1285 bool Z = false;
1286 bool C = false;
1287 bool V = false;
1289 UsedNZCV() = default;
1291 UsedNZCV &operator|=(const UsedNZCV &UsedFlags) {
1292 this->N |= UsedFlags.N;
1293 this->Z |= UsedFlags.Z;
1294 this->C |= UsedFlags.C;
1295 this->V |= UsedFlags.V;
1296 return *this;
1300 } // end anonymous namespace
1302 /// Find a condition code used by the instruction.
1303 /// Returns AArch64CC::Invalid if either the instruction does not use condition
1304 /// codes or we don't optimize CmpInstr in the presence of such instructions.
1305 static AArch64CC::CondCode findCondCodeUsedByInstr(const MachineInstr &Instr) {
1306 switch (Instr.getOpcode()) {
1307 default:
1308 return AArch64CC::Invalid;
1310 case AArch64::Bcc: {
1311 int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
1312 assert(Idx >= 2);
1313 return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 2).getImm());
1316 case AArch64::CSINVWr:
1317 case AArch64::CSINVXr:
1318 case AArch64::CSINCWr:
1319 case AArch64::CSINCXr:
1320 case AArch64::CSELWr:
1321 case AArch64::CSELXr:
1322 case AArch64::CSNEGWr:
1323 case AArch64::CSNEGXr:
1324 case AArch64::FCSELSrrr:
1325 case AArch64::FCSELDrrr: {
1326 int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
1327 assert(Idx >= 1);
1328 return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 1).getImm());
1333 static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
1334 assert(CC != AArch64CC::Invalid);
1335 UsedNZCV UsedFlags;
1336 switch (CC) {
1337 default:
1338 break;
1340 case AArch64CC::EQ: // Z set
1341 case AArch64CC::NE: // Z clear
1342 UsedFlags.Z = true;
1343 break;
1345 case AArch64CC::HI: // Z clear and C set
1346 case AArch64CC::LS: // Z set or C clear
1347 UsedFlags.Z = true;
1348 LLVM_FALLTHROUGH;
1349 case AArch64CC::HS: // C set
1350 case AArch64CC::LO: // C clear
1351 UsedFlags.C = true;
1352 break;
1354 case AArch64CC::MI: // N set
1355 case AArch64CC::PL: // N clear
1356 UsedFlags.N = true;
1357 break;
1359 case AArch64CC::VS: // V set
1360 case AArch64CC::VC: // V clear
1361 UsedFlags.V = true;
1362 break;
1364 case AArch64CC::GT: // Z clear, N and V the same
1365 case AArch64CC::LE: // Z set, N and V differ
1366 UsedFlags.Z = true;
1367 LLVM_FALLTHROUGH;
1368 case AArch64CC::GE: // N and V the same
1369 case AArch64CC::LT: // N and V differ
1370 UsedFlags.N = true;
1371 UsedFlags.V = true;
1372 break;
1374 return UsedFlags;
1377 static bool isADDSRegImm(unsigned Opcode) {
1378 return Opcode == AArch64::ADDSWri || Opcode == AArch64::ADDSXri;
1381 static bool isSUBSRegImm(unsigned Opcode) {
1382 return Opcode == AArch64::SUBSWri || Opcode == AArch64::SUBSXri;
1385 /// Check if CmpInstr can be substituted by MI.
1387 /// CmpInstr can be substituted:
1388 /// - CmpInstr is either 'ADDS %vreg, 0' or 'SUBS %vreg, 0'
1389 /// - and, MI and CmpInstr are from the same MachineBB
1390 /// - and, condition flags are not alive in successors of the CmpInstr parent
1391 /// - and, if MI opcode is the S form there must be no defs of flags between
1392 /// MI and CmpInstr
1393 /// or if MI opcode is not the S form there must be neither defs of flags
1394 /// nor uses of flags between MI and CmpInstr.
1395 /// - and C/V flags are not used after CmpInstr
1396 static bool canInstrSubstituteCmpInstr(MachineInstr *MI, MachineInstr *CmpInstr,
1397 const TargetRegisterInfo *TRI) {
1398 assert(MI);
1399 assert(sForm(*MI) != AArch64::INSTRUCTION_LIST_END);
1400 assert(CmpInstr);
1402 const unsigned CmpOpcode = CmpInstr->getOpcode();
1403 if (!isADDSRegImm(CmpOpcode) && !isSUBSRegImm(CmpOpcode))
1404 return false;
1406 if (MI->getParent() != CmpInstr->getParent())
1407 return false;
1409 if (areCFlagsAliveInSuccessors(CmpInstr->getParent()))
1410 return false;
1412 AccessKind AccessToCheck = AK_Write;
1413 if (sForm(*MI) != MI->getOpcode())
1414 AccessToCheck = AK_All;
1415 if (areCFlagsAccessedBetweenInstrs(MI, CmpInstr, TRI, AccessToCheck))
1416 return false;
1418 UsedNZCV NZCVUsedAfterCmp;
1419 for (auto I = std::next(CmpInstr->getIterator()),
1420 E = CmpInstr->getParent()->instr_end();
1421 I != E; ++I) {
1422 const MachineInstr &Instr = *I;
1423 if (Instr.readsRegister(AArch64::NZCV, TRI)) {
1424 AArch64CC::CondCode CC = findCondCodeUsedByInstr(Instr);
1425 if (CC == AArch64CC::Invalid) // Unsupported conditional instruction
1426 return false;
1427 NZCVUsedAfterCmp |= getUsedNZCV(CC);
1430 if (Instr.modifiesRegister(AArch64::NZCV, TRI))
1431 break;
1434 return !NZCVUsedAfterCmp.C && !NZCVUsedAfterCmp.V;
1437 /// Substitute an instruction comparing to zero with another instruction
1438 /// which produces needed condition flags.
1440 /// Return true on success.
1441 bool AArch64InstrInfo::substituteCmpToZero(
1442 MachineInstr &CmpInstr, unsigned SrcReg,
1443 const MachineRegisterInfo *MRI) const {
1444 assert(MRI);
1445 // Get the unique definition of SrcReg.
1446 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
1447 if (!MI)
1448 return false;
1450 const TargetRegisterInfo *TRI = &getRegisterInfo();
1452 unsigned NewOpc = sForm(*MI);
1453 if (NewOpc == AArch64::INSTRUCTION_LIST_END)
1454 return false;
1456 if (!canInstrSubstituteCmpInstr(MI, &CmpInstr, TRI))
1457 return false;
1459 // Update the instruction to set NZCV.
1460 MI->setDesc(get(NewOpc));
1461 CmpInstr.eraseFromParent();
1462 bool succeeded = UpdateOperandRegClass(*MI);
1463 (void)succeeded;
1464 assert(succeeded && "Some operands reg class are incompatible!");
1465 MI->addRegisterDefined(AArch64::NZCV, TRI);
1466 return true;
1469 bool AArch64InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1470 if (MI.getOpcode() != TargetOpcode::LOAD_STACK_GUARD &&
1471 MI.getOpcode() != AArch64::CATCHRET)
1472 return false;
1474 MachineBasicBlock &MBB = *MI.getParent();
1475 DebugLoc DL = MI.getDebugLoc();
1477 if (MI.getOpcode() == AArch64::CATCHRET) {
1478 // Skip to the first instruction before the epilog.
1479 const TargetInstrInfo *TII =
1480 MBB.getParent()->getSubtarget().getInstrInfo();
1481 MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
1482 auto MBBI = MachineBasicBlock::iterator(MI);
1483 MachineBasicBlock::iterator FirstEpilogSEH = std::prev(MBBI);
1484 while (FirstEpilogSEH->getFlag(MachineInstr::FrameDestroy) &&
1485 FirstEpilogSEH != MBB.begin())
1486 FirstEpilogSEH = std::prev(FirstEpilogSEH);
1487 if (FirstEpilogSEH != MBB.begin())
1488 FirstEpilogSEH = std::next(FirstEpilogSEH);
1489 BuildMI(MBB, FirstEpilogSEH, DL, TII->get(AArch64::ADRP))
1490 .addReg(AArch64::X0, RegState::Define)
1491 .addMBB(TargetMBB);
1492 BuildMI(MBB, FirstEpilogSEH, DL, TII->get(AArch64::ADDXri))
1493 .addReg(AArch64::X0, RegState::Define)
1494 .addReg(AArch64::X0)
1495 .addMBB(TargetMBB)
1496 .addImm(0);
1497 return true;
1500 unsigned Reg = MI.getOperand(0).getReg();
1501 const GlobalValue *GV =
1502 cast<GlobalValue>((*MI.memoperands_begin())->getValue());
1503 const TargetMachine &TM = MBB.getParent()->getTarget();
1504 unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
1505 const unsigned char MO_NC = AArch64II::MO_NC;
1507 if ((OpFlags & AArch64II::MO_GOT) != 0) {
1508 BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
1509 .addGlobalAddress(GV, 0, OpFlags);
1510 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1511 .addReg(Reg, RegState::Kill)
1512 .addImm(0)
1513 .addMemOperand(*MI.memoperands_begin());
1514 } else if (TM.getCodeModel() == CodeModel::Large) {
1515 BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
1516 .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC)
1517 .addImm(0);
1518 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1519 .addReg(Reg, RegState::Kill)
1520 .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC)
1521 .addImm(16);
1522 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1523 .addReg(Reg, RegState::Kill)
1524 .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC)
1525 .addImm(32);
1526 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1527 .addReg(Reg, RegState::Kill)
1528 .addGlobalAddress(GV, 0, AArch64II::MO_G3)
1529 .addImm(48);
1530 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1531 .addReg(Reg, RegState::Kill)
1532 .addImm(0)
1533 .addMemOperand(*MI.memoperands_begin());
1534 } else if (TM.getCodeModel() == CodeModel::Tiny) {
1535 BuildMI(MBB, MI, DL, get(AArch64::ADR), Reg)
1536 .addGlobalAddress(GV, 0, OpFlags);
1537 } else {
1538 BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
1539 .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
1540 unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
1541 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1542 .addReg(Reg, RegState::Kill)
1543 .addGlobalAddress(GV, 0, LoFlags)
1544 .addMemOperand(*MI.memoperands_begin());
1547 MBB.erase(MI);
1549 return true;
1552 // Return true if this instruction simply sets its single destination register
1553 // to zero. This is equivalent to a register rename of the zero-register.
1554 bool AArch64InstrInfo::isGPRZero(const MachineInstr &MI) {
1555 switch (MI.getOpcode()) {
1556 default:
1557 break;
1558 case AArch64::MOVZWi:
1559 case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
1560 if (MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) {
1561 assert(MI.getDesc().getNumOperands() == 3 &&
1562 MI.getOperand(2).getImm() == 0 && "invalid MOVZi operands");
1563 return true;
1565 break;
1566 case AArch64::ANDWri: // and Rd, Rzr, #imm
1567 return MI.getOperand(1).getReg() == AArch64::WZR;
1568 case AArch64::ANDXri:
1569 return MI.getOperand(1).getReg() == AArch64::XZR;
1570 case TargetOpcode::COPY:
1571 return MI.getOperand(1).getReg() == AArch64::WZR;
1573 return false;
1576 // Return true if this instruction simply renames a general register without
1577 // modifying bits.
1578 bool AArch64InstrInfo::isGPRCopy(const MachineInstr &MI) {
1579 switch (MI.getOpcode()) {
1580 default:
1581 break;
1582 case TargetOpcode::COPY: {
1583 // GPR32 copies will by lowered to ORRXrs
1584 unsigned DstReg = MI.getOperand(0).getReg();
1585 return (AArch64::GPR32RegClass.contains(DstReg) ||
1586 AArch64::GPR64RegClass.contains(DstReg));
1588 case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
1589 if (MI.getOperand(1).getReg() == AArch64::XZR) {
1590 assert(MI.getDesc().getNumOperands() == 4 &&
1591 MI.getOperand(3).getImm() == 0 && "invalid ORRrs operands");
1592 return true;
1594 break;
1595 case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
1596 if (MI.getOperand(2).getImm() == 0) {
1597 assert(MI.getDesc().getNumOperands() == 4 &&
1598 MI.getOperand(3).getImm() == 0 && "invalid ADDXri operands");
1599 return true;
1601 break;
1603 return false;
1606 // Return true if this instruction simply renames a general register without
1607 // modifying bits.
1608 bool AArch64InstrInfo::isFPRCopy(const MachineInstr &MI) {
1609 switch (MI.getOpcode()) {
1610 default:
1611 break;
1612 case TargetOpcode::COPY: {
1613 // FPR64 copies will by lowered to ORR.16b
1614 unsigned DstReg = MI.getOperand(0).getReg();
1615 return (AArch64::FPR64RegClass.contains(DstReg) ||
1616 AArch64::FPR128RegClass.contains(DstReg));
1618 case AArch64::ORRv16i8:
1619 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
1620 assert(MI.getDesc().getNumOperands() == 3 && MI.getOperand(0).isReg() &&
1621 "invalid ORRv16i8 operands");
1622 return true;
1624 break;
1626 return false;
1629 unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
1630 int &FrameIndex) const {
1631 switch (MI.getOpcode()) {
1632 default:
1633 break;
1634 case AArch64::LDRWui:
1635 case AArch64::LDRXui:
1636 case AArch64::LDRBui:
1637 case AArch64::LDRHui:
1638 case AArch64::LDRSui:
1639 case AArch64::LDRDui:
1640 case AArch64::LDRQui:
1641 if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
1642 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
1643 FrameIndex = MI.getOperand(1).getIndex();
1644 return MI.getOperand(0).getReg();
1646 break;
1649 return 0;
1652 unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
1653 int &FrameIndex) const {
1654 switch (MI.getOpcode()) {
1655 default:
1656 break;
1657 case AArch64::STRWui:
1658 case AArch64::STRXui:
1659 case AArch64::STRBui:
1660 case AArch64::STRHui:
1661 case AArch64::STRSui:
1662 case AArch64::STRDui:
1663 case AArch64::STRQui:
1664 if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
1665 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
1666 FrameIndex = MI.getOperand(1).getIndex();
1667 return MI.getOperand(0).getReg();
1669 break;
1671 return 0;
1674 /// Check all MachineMemOperands for a hint to suppress pairing.
1675 bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr &MI) {
1676 return llvm::any_of(MI.memoperands(), [](MachineMemOperand *MMO) {
1677 return MMO->getFlags() & MOSuppressPair;
1681 /// Set a flag on the first MachineMemOperand to suppress pairing.
1682 void AArch64InstrInfo::suppressLdStPair(MachineInstr &MI) {
1683 if (MI.memoperands_empty())
1684 return;
1685 (*MI.memoperands_begin())->setFlags(MOSuppressPair);
1688 /// Check all MachineMemOperands for a hint that the load/store is strided.
1689 bool AArch64InstrInfo::isStridedAccess(const MachineInstr &MI) {
1690 return llvm::any_of(MI.memoperands(), [](MachineMemOperand *MMO) {
1691 return MMO->getFlags() & MOStridedAccess;
1695 bool AArch64InstrInfo::isUnscaledLdSt(unsigned Opc) {
1696 switch (Opc) {
1697 default:
1698 return false;
1699 case AArch64::STURSi:
1700 case AArch64::STURDi:
1701 case AArch64::STURQi:
1702 case AArch64::STURBBi:
1703 case AArch64::STURHHi:
1704 case AArch64::STURWi:
1705 case AArch64::STURXi:
1706 case AArch64::LDURSi:
1707 case AArch64::LDURDi:
1708 case AArch64::LDURQi:
1709 case AArch64::LDURWi:
1710 case AArch64::LDURXi:
1711 case AArch64::LDURSWi:
1712 case AArch64::LDURHHi:
1713 case AArch64::LDURBBi:
1714 case AArch64::LDURSBWi:
1715 case AArch64::LDURSHWi:
1716 return true;
1720 Optional<unsigned> AArch64InstrInfo::getUnscaledLdSt(unsigned Opc) {
1721 switch (Opc) {
1722 default: return {};
1723 case AArch64::PRFMui: return AArch64::PRFUMi;
1724 case AArch64::LDRXui: return AArch64::LDURXi;
1725 case AArch64::LDRWui: return AArch64::LDURWi;
1726 case AArch64::LDRBui: return AArch64::LDURBi;
1727 case AArch64::LDRHui: return AArch64::LDURHi;
1728 case AArch64::LDRSui: return AArch64::LDURSi;
1729 case AArch64::LDRDui: return AArch64::LDURDi;
1730 case AArch64::LDRQui: return AArch64::LDURQi;
1731 case AArch64::LDRBBui: return AArch64::LDURBBi;
1732 case AArch64::LDRHHui: return AArch64::LDURHHi;
1733 case AArch64::LDRSBXui: return AArch64::LDURSBXi;
1734 case AArch64::LDRSBWui: return AArch64::LDURSBWi;
1735 case AArch64::LDRSHXui: return AArch64::LDURSHXi;
1736 case AArch64::LDRSHWui: return AArch64::LDURSHWi;
1737 case AArch64::LDRSWui: return AArch64::LDURSWi;
1738 case AArch64::STRXui: return AArch64::STURXi;
1739 case AArch64::STRWui: return AArch64::STURWi;
1740 case AArch64::STRBui: return AArch64::STURBi;
1741 case AArch64::STRHui: return AArch64::STURHi;
1742 case AArch64::STRSui: return AArch64::STURSi;
1743 case AArch64::STRDui: return AArch64::STURDi;
1744 case AArch64::STRQui: return AArch64::STURQi;
1745 case AArch64::STRBBui: return AArch64::STURBBi;
1746 case AArch64::STRHHui: return AArch64::STURHHi;
1750 unsigned AArch64InstrInfo::getLoadStoreImmIdx(unsigned Opc) {
1751 switch (Opc) {
1752 default:
1753 return 2;
1754 case AArch64::LDPXi:
1755 case AArch64::LDPDi:
1756 case AArch64::STPXi:
1757 case AArch64::STPDi:
1758 case AArch64::LDNPXi:
1759 case AArch64::LDNPDi:
1760 case AArch64::STNPXi:
1761 case AArch64::STNPDi:
1762 case AArch64::LDPQi:
1763 case AArch64::STPQi:
1764 case AArch64::LDNPQi:
1765 case AArch64::STNPQi:
1766 case AArch64::LDPWi:
1767 case AArch64::LDPSi:
1768 case AArch64::STPWi:
1769 case AArch64::STPSi:
1770 case AArch64::LDNPWi:
1771 case AArch64::LDNPSi:
1772 case AArch64::STNPWi:
1773 case AArch64::STNPSi:
1774 case AArch64::LDG:
1775 case AArch64::STGPi:
1776 return 3;
1777 case AArch64::ADDG:
1778 case AArch64::STGOffset:
1779 return 2;
1783 bool AArch64InstrInfo::isPairableLdStInst(const MachineInstr &MI) {
1784 switch (MI.getOpcode()) {
1785 default:
1786 return false;
1787 // Scaled instructions.
1788 case AArch64::STRSui:
1789 case AArch64::STRDui:
1790 case AArch64::STRQui:
1791 case AArch64::STRXui:
1792 case AArch64::STRWui:
1793 case AArch64::LDRSui:
1794 case AArch64::LDRDui:
1795 case AArch64::LDRQui:
1796 case AArch64::LDRXui:
1797 case AArch64::LDRWui:
1798 case AArch64::LDRSWui:
1799 // Unscaled instructions.
1800 case AArch64::STURSi:
1801 case AArch64::STURDi:
1802 case AArch64::STURQi:
1803 case AArch64::STURWi:
1804 case AArch64::STURXi:
1805 case AArch64::LDURSi:
1806 case AArch64::LDURDi:
1807 case AArch64::LDURQi:
1808 case AArch64::LDURWi:
1809 case AArch64::LDURXi:
1810 case AArch64::LDURSWi:
1811 return true;
1815 unsigned AArch64InstrInfo::convertToFlagSettingOpc(unsigned Opc,
1816 bool &Is64Bit) {
1817 switch (Opc) {
1818 default:
1819 llvm_unreachable("Opcode has no flag setting equivalent!");
1820 // 32-bit cases:
1821 case AArch64::ADDWri:
1822 Is64Bit = false;
1823 return AArch64::ADDSWri;
1824 case AArch64::ADDWrr:
1825 Is64Bit = false;
1826 return AArch64::ADDSWrr;
1827 case AArch64::ADDWrs:
1828 Is64Bit = false;
1829 return AArch64::ADDSWrs;
1830 case AArch64::ADDWrx:
1831 Is64Bit = false;
1832 return AArch64::ADDSWrx;
1833 case AArch64::ANDWri:
1834 Is64Bit = false;
1835 return AArch64::ANDSWri;
1836 case AArch64::ANDWrr:
1837 Is64Bit = false;
1838 return AArch64::ANDSWrr;
1839 case AArch64::ANDWrs:
1840 Is64Bit = false;
1841 return AArch64::ANDSWrs;
1842 case AArch64::BICWrr:
1843 Is64Bit = false;
1844 return AArch64::BICSWrr;
1845 case AArch64::BICWrs:
1846 Is64Bit = false;
1847 return AArch64::BICSWrs;
1848 case AArch64::SUBWri:
1849 Is64Bit = false;
1850 return AArch64::SUBSWri;
1851 case AArch64::SUBWrr:
1852 Is64Bit = false;
1853 return AArch64::SUBSWrr;
1854 case AArch64::SUBWrs:
1855 Is64Bit = false;
1856 return AArch64::SUBSWrs;
1857 case AArch64::SUBWrx:
1858 Is64Bit = false;
1859 return AArch64::SUBSWrx;
1860 // 64-bit cases:
1861 case AArch64::ADDXri:
1862 Is64Bit = true;
1863 return AArch64::ADDSXri;
1864 case AArch64::ADDXrr:
1865 Is64Bit = true;
1866 return AArch64::ADDSXrr;
1867 case AArch64::ADDXrs:
1868 Is64Bit = true;
1869 return AArch64::ADDSXrs;
1870 case AArch64::ADDXrx:
1871 Is64Bit = true;
1872 return AArch64::ADDSXrx;
1873 case AArch64::ANDXri:
1874 Is64Bit = true;
1875 return AArch64::ANDSXri;
1876 case AArch64::ANDXrr:
1877 Is64Bit = true;
1878 return AArch64::ANDSXrr;
1879 case AArch64::ANDXrs:
1880 Is64Bit = true;
1881 return AArch64::ANDSXrs;
1882 case AArch64::BICXrr:
1883 Is64Bit = true;
1884 return AArch64::BICSXrr;
1885 case AArch64::BICXrs:
1886 Is64Bit = true;
1887 return AArch64::BICSXrs;
1888 case AArch64::SUBXri:
1889 Is64Bit = true;
1890 return AArch64::SUBSXri;
1891 case AArch64::SUBXrr:
1892 Is64Bit = true;
1893 return AArch64::SUBSXrr;
1894 case AArch64::SUBXrs:
1895 Is64Bit = true;
1896 return AArch64::SUBSXrs;
1897 case AArch64::SUBXrx:
1898 Is64Bit = true;
1899 return AArch64::SUBSXrx;
1903 // Is this a candidate for ld/st merging or pairing? For example, we don't
1904 // touch volatiles or load/stores that have a hint to avoid pair formation.
1905 bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr &MI) const {
1906 // If this is a volatile load/store, don't mess with it.
1907 if (MI.hasOrderedMemoryRef())
1908 return false;
1910 // Make sure this is a reg/fi+imm (as opposed to an address reloc).
1911 assert((MI.getOperand(1).isReg() || MI.getOperand(1).isFI()) &&
1912 "Expected a reg or frame index operand.");
1913 if (!MI.getOperand(2).isImm())
1914 return false;
1916 // Can't merge/pair if the instruction modifies the base register.
1917 // e.g., ldr x0, [x0]
1918 // This case will never occur with an FI base.
1919 if (MI.getOperand(1).isReg()) {
1920 unsigned BaseReg = MI.getOperand(1).getReg();
1921 const TargetRegisterInfo *TRI = &getRegisterInfo();
1922 if (MI.modifiesRegister(BaseReg, TRI))
1923 return false;
1926 // Check if this load/store has a hint to avoid pair formation.
1927 // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
1928 if (isLdStPairSuppressed(MI))
1929 return false;
1931 // On some CPUs quad load/store pairs are slower than two single load/stores.
1932 if (Subtarget.isPaired128Slow()) {
1933 switch (MI.getOpcode()) {
1934 default:
1935 break;
1936 case AArch64::LDURQi:
1937 case AArch64::STURQi:
1938 case AArch64::LDRQui:
1939 case AArch64::STRQui:
1940 return false;
1944 return true;
1947 bool AArch64InstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
1948 const MachineOperand *&BaseOp,
1949 int64_t &Offset,
1950 const TargetRegisterInfo *TRI) const {
1951 unsigned Width;
1952 return getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI);
1955 bool AArch64InstrInfo::getMemOperandWithOffsetWidth(
1956 const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset,
1957 unsigned &Width, const TargetRegisterInfo *TRI) const {
1958 assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
1959 // Handle only loads/stores with base register followed by immediate offset.
1960 if (LdSt.getNumExplicitOperands() == 3) {
1961 // Non-paired instruction (e.g., ldr x1, [x0, #8]).
1962 if ((!LdSt.getOperand(1).isReg() && !LdSt.getOperand(1).isFI()) ||
1963 !LdSt.getOperand(2).isImm())
1964 return false;
1965 } else if (LdSt.getNumExplicitOperands() == 4) {
1966 // Paired instruction (e.g., ldp x1, x2, [x0, #8]).
1967 if (!LdSt.getOperand(1).isReg() ||
1968 (!LdSt.getOperand(2).isReg() && !LdSt.getOperand(2).isFI()) ||
1969 !LdSt.getOperand(3).isImm())
1970 return false;
1971 } else
1972 return false;
1974 // Get the scaling factor for the instruction and set the width for the
1975 // instruction.
1976 unsigned Scale = 0;
1977 int64_t Dummy1, Dummy2;
1979 // If this returns false, then it's an instruction we don't want to handle.
1980 if (!getMemOpInfo(LdSt.getOpcode(), Scale, Width, Dummy1, Dummy2))
1981 return false;
1983 // Compute the offset. Offset is calculated as the immediate operand
1984 // multiplied by the scaling factor. Unscaled instructions have scaling factor
1985 // set to 1.
1986 if (LdSt.getNumExplicitOperands() == 3) {
1987 BaseOp = &LdSt.getOperand(1);
1988 Offset = LdSt.getOperand(2).getImm() * Scale;
1989 } else {
1990 assert(LdSt.getNumExplicitOperands() == 4 && "invalid number of operands");
1991 BaseOp = &LdSt.getOperand(2);
1992 Offset = LdSt.getOperand(3).getImm() * Scale;
1995 assert((BaseOp->isReg() || BaseOp->isFI()) &&
1996 "getMemOperandWithOffset only supports base "
1997 "operands of type register or frame index.");
1999 return true;
2002 MachineOperand &
2003 AArch64InstrInfo::getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const {
2004 assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
2005 MachineOperand &OfsOp = LdSt.getOperand(LdSt.getNumExplicitOperands() - 1);
2006 assert(OfsOp.isImm() && "Offset operand wasn't immediate.");
2007 return OfsOp;
2010 bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, unsigned &Scale,
2011 unsigned &Width, int64_t &MinOffset,
2012 int64_t &MaxOffset) {
2013 switch (Opcode) {
2014 // Not a memory operation or something we want to handle.
2015 default:
2016 Scale = Width = 0;
2017 MinOffset = MaxOffset = 0;
2018 return false;
2019 case AArch64::STRWpost:
2020 case AArch64::LDRWpost:
2021 Width = 32;
2022 Scale = 4;
2023 MinOffset = -256;
2024 MaxOffset = 255;
2025 break;
2026 case AArch64::LDURQi:
2027 case AArch64::STURQi:
2028 Width = 16;
2029 Scale = 1;
2030 MinOffset = -256;
2031 MaxOffset = 255;
2032 break;
2033 case AArch64::PRFUMi:
2034 case AArch64::LDURXi:
2035 case AArch64::LDURDi:
2036 case AArch64::STURXi:
2037 case AArch64::STURDi:
2038 Width = 8;
2039 Scale = 1;
2040 MinOffset = -256;
2041 MaxOffset = 255;
2042 break;
2043 case AArch64::LDURWi:
2044 case AArch64::LDURSi:
2045 case AArch64::LDURSWi:
2046 case AArch64::STURWi:
2047 case AArch64::STURSi:
2048 Width = 4;
2049 Scale = 1;
2050 MinOffset = -256;
2051 MaxOffset = 255;
2052 break;
2053 case AArch64::LDURHi:
2054 case AArch64::LDURHHi:
2055 case AArch64::LDURSHXi:
2056 case AArch64::LDURSHWi:
2057 case AArch64::STURHi:
2058 case AArch64::STURHHi:
2059 Width = 2;
2060 Scale = 1;
2061 MinOffset = -256;
2062 MaxOffset = 255;
2063 break;
2064 case AArch64::LDURBi:
2065 case AArch64::LDURBBi:
2066 case AArch64::LDURSBXi:
2067 case AArch64::LDURSBWi:
2068 case AArch64::STURBi:
2069 case AArch64::STURBBi:
2070 Width = 1;
2071 Scale = 1;
2072 MinOffset = -256;
2073 MaxOffset = 255;
2074 break;
2075 case AArch64::LDPQi:
2076 case AArch64::LDNPQi:
2077 case AArch64::STPQi:
2078 case AArch64::STNPQi:
2079 Scale = 16;
2080 Width = 32;
2081 MinOffset = -64;
2082 MaxOffset = 63;
2083 break;
2084 case AArch64::LDRQui:
2085 case AArch64::STRQui:
2086 Scale = Width = 16;
2087 MinOffset = 0;
2088 MaxOffset = 4095;
2089 break;
2090 case AArch64::LDPXi:
2091 case AArch64::LDPDi:
2092 case AArch64::LDNPXi:
2093 case AArch64::LDNPDi:
2094 case AArch64::STPXi:
2095 case AArch64::STPDi:
2096 case AArch64::STNPXi:
2097 case AArch64::STNPDi:
2098 Scale = 8;
2099 Width = 16;
2100 MinOffset = -64;
2101 MaxOffset = 63;
2102 break;
2103 case AArch64::PRFMui:
2104 case AArch64::LDRXui:
2105 case AArch64::LDRDui:
2106 case AArch64::STRXui:
2107 case AArch64::STRDui:
2108 Scale = Width = 8;
2109 MinOffset = 0;
2110 MaxOffset = 4095;
2111 break;
2112 case AArch64::LDPWi:
2113 case AArch64::LDPSi:
2114 case AArch64::LDNPWi:
2115 case AArch64::LDNPSi:
2116 case AArch64::STPWi:
2117 case AArch64::STPSi:
2118 case AArch64::STNPWi:
2119 case AArch64::STNPSi:
2120 Scale = 4;
2121 Width = 8;
2122 MinOffset = -64;
2123 MaxOffset = 63;
2124 break;
2125 case AArch64::LDRWui:
2126 case AArch64::LDRSui:
2127 case AArch64::LDRSWui:
2128 case AArch64::STRWui:
2129 case AArch64::STRSui:
2130 Scale = Width = 4;
2131 MinOffset = 0;
2132 MaxOffset = 4095;
2133 break;
2134 case AArch64::LDRHui:
2135 case AArch64::LDRHHui:
2136 case AArch64::LDRSHWui:
2137 case AArch64::LDRSHXui:
2138 case AArch64::STRHui:
2139 case AArch64::STRHHui:
2140 Scale = Width = 2;
2141 MinOffset = 0;
2142 MaxOffset = 4095;
2143 break;
2144 case AArch64::LDRBui:
2145 case AArch64::LDRBBui:
2146 case AArch64::LDRSBWui:
2147 case AArch64::LDRSBXui:
2148 case AArch64::STRBui:
2149 case AArch64::STRBBui:
2150 Scale = Width = 1;
2151 MinOffset = 0;
2152 MaxOffset = 4095;
2153 break;
2154 case AArch64::ADDG:
2155 case AArch64::TAGPstack:
2156 Scale = 16;
2157 Width = 0;
2158 MinOffset = 0;
2159 MaxOffset = 63;
2160 break;
2161 case AArch64::LDG:
2162 case AArch64::STGOffset:
2163 case AArch64::STZGOffset:
2164 Scale = Width = 16;
2165 MinOffset = -256;
2166 MaxOffset = 255;
2167 break;
2168 case AArch64::ST2GOffset:
2169 case AArch64::STZ2GOffset:
2170 Scale = 16;
2171 Width = 32;
2172 MinOffset = -256;
2173 MaxOffset = 255;
2174 break;
2175 case AArch64::STGPi:
2176 Scale = Width = 16;
2177 MinOffset = -64;
2178 MaxOffset = 63;
2179 break;
2182 return true;
2185 static unsigned getOffsetStride(unsigned Opc) {
2186 switch (Opc) {
2187 default:
2188 return 0;
2189 case AArch64::LDURQi:
2190 case AArch64::STURQi:
2191 return 16;
2192 case AArch64::LDURXi:
2193 case AArch64::LDURDi:
2194 case AArch64::STURXi:
2195 case AArch64::STURDi:
2196 return 8;
2197 case AArch64::LDURWi:
2198 case AArch64::LDURSi:
2199 case AArch64::LDURSWi:
2200 case AArch64::STURWi:
2201 case AArch64::STURSi:
2202 return 4;
2206 // Scale the unscaled offsets. Returns false if the unscaled offset can't be
2207 // scaled.
2208 static bool scaleOffset(unsigned Opc, int64_t &Offset) {
2209 unsigned OffsetStride = getOffsetStride(Opc);
2210 if (OffsetStride == 0)
2211 return false;
2212 // If the byte-offset isn't a multiple of the stride, we can't scale this
2213 // offset.
2214 if (Offset % OffsetStride != 0)
2215 return false;
2217 // Convert the byte-offset used by unscaled into an "element" offset used
2218 // by the scaled pair load/store instructions.
2219 Offset /= OffsetStride;
2220 return true;
2223 // Unscale the scaled offsets. Returns false if the scaled offset can't be
2224 // unscaled.
2225 static bool unscaleOffset(unsigned Opc, int64_t &Offset) {
2226 unsigned OffsetStride = getOffsetStride(Opc);
2227 if (OffsetStride == 0)
2228 return false;
2230 // Convert the "element" offset used by scaled pair load/store instructions
2231 // into the byte-offset used by unscaled.
2232 Offset *= OffsetStride;
2233 return true;
2236 static bool canPairLdStOpc(unsigned FirstOpc, unsigned SecondOpc) {
2237 if (FirstOpc == SecondOpc)
2238 return true;
2239 // We can also pair sign-ext and zero-ext instructions.
2240 switch (FirstOpc) {
2241 default:
2242 return false;
2243 case AArch64::LDRWui:
2244 case AArch64::LDURWi:
2245 return SecondOpc == AArch64::LDRSWui || SecondOpc == AArch64::LDURSWi;
2246 case AArch64::LDRSWui:
2247 case AArch64::LDURSWi:
2248 return SecondOpc == AArch64::LDRWui || SecondOpc == AArch64::LDURWi;
2250 // These instructions can't be paired based on their opcodes.
2251 return false;
2254 static bool shouldClusterFI(const MachineFrameInfo &MFI, int FI1,
2255 int64_t Offset1, unsigned Opcode1, int FI2,
2256 int64_t Offset2, unsigned Opcode2) {
2257 // Accesses through fixed stack object frame indices may access a different
2258 // fixed stack slot. Check that the object offsets + offsets match.
2259 if (MFI.isFixedObjectIndex(FI1) && MFI.isFixedObjectIndex(FI2)) {
2260 int64_t ObjectOffset1 = MFI.getObjectOffset(FI1);
2261 int64_t ObjectOffset2 = MFI.getObjectOffset(FI2);
2262 assert(ObjectOffset1 <= ObjectOffset2 && "Object offsets are not ordered.");
2263 // Get the byte-offset from the object offset.
2264 if (!unscaleOffset(Opcode1, Offset1) || !unscaleOffset(Opcode2, Offset2))
2265 return false;
2266 ObjectOffset1 += Offset1;
2267 ObjectOffset2 += Offset2;
2268 // Get the "element" index in the object.
2269 if (!scaleOffset(Opcode1, ObjectOffset1) ||
2270 !scaleOffset(Opcode2, ObjectOffset2))
2271 return false;
2272 return ObjectOffset1 + 1 == ObjectOffset2;
2275 return FI1 == FI2;
2278 /// Detect opportunities for ldp/stp formation.
2280 /// Only called for LdSt for which getMemOperandWithOffset returns true.
2281 bool AArch64InstrInfo::shouldClusterMemOps(const MachineOperand &BaseOp1,
2282 const MachineOperand &BaseOp2,
2283 unsigned NumLoads) const {
2284 const MachineInstr &FirstLdSt = *BaseOp1.getParent();
2285 const MachineInstr &SecondLdSt = *BaseOp2.getParent();
2286 if (BaseOp1.getType() != BaseOp2.getType())
2287 return false;
2289 assert((BaseOp1.isReg() || BaseOp1.isFI()) &&
2290 "Only base registers and frame indices are supported.");
2292 // Check for both base regs and base FI.
2293 if (BaseOp1.isReg() && BaseOp1.getReg() != BaseOp2.getReg())
2294 return false;
2296 // Only cluster up to a single pair.
2297 if (NumLoads > 1)
2298 return false;
2300 if (!isPairableLdStInst(FirstLdSt) || !isPairableLdStInst(SecondLdSt))
2301 return false;
2303 // Can we pair these instructions based on their opcodes?
2304 unsigned FirstOpc = FirstLdSt.getOpcode();
2305 unsigned SecondOpc = SecondLdSt.getOpcode();
2306 if (!canPairLdStOpc(FirstOpc, SecondOpc))
2307 return false;
2309 // Can't merge volatiles or load/stores that have a hint to avoid pair
2310 // formation, for example.
2311 if (!isCandidateToMergeOrPair(FirstLdSt) ||
2312 !isCandidateToMergeOrPair(SecondLdSt))
2313 return false;
2315 // isCandidateToMergeOrPair guarantees that operand 2 is an immediate.
2316 int64_t Offset1 = FirstLdSt.getOperand(2).getImm();
2317 if (isUnscaledLdSt(FirstOpc) && !scaleOffset(FirstOpc, Offset1))
2318 return false;
2320 int64_t Offset2 = SecondLdSt.getOperand(2).getImm();
2321 if (isUnscaledLdSt(SecondOpc) && !scaleOffset(SecondOpc, Offset2))
2322 return false;
2324 // Pairwise instructions have a 7-bit signed offset field.
2325 if (Offset1 > 63 || Offset1 < -64)
2326 return false;
2328 // The caller should already have ordered First/SecondLdSt by offset.
2329 // Note: except for non-equal frame index bases
2330 if (BaseOp1.isFI()) {
2331 assert((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 >= Offset2) &&
2332 "Caller should have ordered offsets.");
2334 const MachineFrameInfo &MFI =
2335 FirstLdSt.getParent()->getParent()->getFrameInfo();
2336 return shouldClusterFI(MFI, BaseOp1.getIndex(), Offset1, FirstOpc,
2337 BaseOp2.getIndex(), Offset2, SecondOpc);
2340 assert((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 <= Offset2) &&
2341 "Caller should have ordered offsets.");
2343 return Offset1 + 1 == Offset2;
2346 static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
2347 unsigned Reg, unsigned SubIdx,
2348 unsigned State,
2349 const TargetRegisterInfo *TRI) {
2350 if (!SubIdx)
2351 return MIB.addReg(Reg, State);
2353 if (TargetRegisterInfo::isPhysicalRegister(Reg))
2354 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
2355 return MIB.addReg(Reg, State, SubIdx);
2358 static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
2359 unsigned NumRegs) {
2360 // We really want the positive remainder mod 32 here, that happens to be
2361 // easily obtainable with a mask.
2362 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
2365 void AArch64InstrInfo::copyPhysRegTuple(MachineBasicBlock &MBB,
2366 MachineBasicBlock::iterator I,
2367 const DebugLoc &DL, unsigned DestReg,
2368 unsigned SrcReg, bool KillSrc,
2369 unsigned Opcode,
2370 ArrayRef<unsigned> Indices) const {
2371 assert(Subtarget.hasNEON() && "Unexpected register copy without NEON");
2372 const TargetRegisterInfo *TRI = &getRegisterInfo();
2373 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
2374 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
2375 unsigned NumRegs = Indices.size();
2377 int SubReg = 0, End = NumRegs, Incr = 1;
2378 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
2379 SubReg = NumRegs - 1;
2380 End = -1;
2381 Incr = -1;
2384 for (; SubReg != End; SubReg += Incr) {
2385 const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
2386 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
2387 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
2388 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
2392 void AArch64InstrInfo::copyGPRRegTuple(MachineBasicBlock &MBB,
2393 MachineBasicBlock::iterator I,
2394 DebugLoc DL, unsigned DestReg,
2395 unsigned SrcReg, bool KillSrc,
2396 unsigned Opcode, unsigned ZeroReg,
2397 llvm::ArrayRef<unsigned> Indices) const {
2398 const TargetRegisterInfo *TRI = &getRegisterInfo();
2399 unsigned NumRegs = Indices.size();
2401 #ifndef NDEBUG
2402 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
2403 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
2404 assert(DestEncoding % NumRegs == 0 && SrcEncoding % NumRegs == 0 &&
2405 "GPR reg sequences should not be able to overlap");
2406 #endif
2408 for (unsigned SubReg = 0; SubReg != NumRegs; ++SubReg) {
2409 const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
2410 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
2411 MIB.addReg(ZeroReg);
2412 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
2413 MIB.addImm(0);
2417 void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
2418 MachineBasicBlock::iterator I,
2419 const DebugLoc &DL, unsigned DestReg,
2420 unsigned SrcReg, bool KillSrc) const {
2421 if (AArch64::GPR32spRegClass.contains(DestReg) &&
2422 (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
2423 const TargetRegisterInfo *TRI = &getRegisterInfo();
2425 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
2426 // If either operand is WSP, expand to ADD #0.
2427 if (Subtarget.hasZeroCycleRegMove()) {
2428 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
2429 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
2430 &AArch64::GPR64spRegClass);
2431 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
2432 &AArch64::GPR64spRegClass);
2433 // This instruction is reading and writing X registers. This may upset
2434 // the register scavenger and machine verifier, so we need to indicate
2435 // that we are reading an undefined value from SrcRegX, but a proper
2436 // value from SrcReg.
2437 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
2438 .addReg(SrcRegX, RegState::Undef)
2439 .addImm(0)
2440 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2441 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
2442 } else {
2443 BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
2444 .addReg(SrcReg, getKillRegState(KillSrc))
2445 .addImm(0)
2446 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2448 } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroingGP()) {
2449 BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg)
2450 .addImm(0)
2451 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2452 } else {
2453 if (Subtarget.hasZeroCycleRegMove()) {
2454 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
2455 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
2456 &AArch64::GPR64spRegClass);
2457 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
2458 &AArch64::GPR64spRegClass);
2459 // This instruction is reading and writing X registers. This may upset
2460 // the register scavenger and machine verifier, so we need to indicate
2461 // that we are reading an undefined value from SrcRegX, but a proper
2462 // value from SrcReg.
2463 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
2464 .addReg(AArch64::XZR)
2465 .addReg(SrcRegX, RegState::Undef)
2466 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
2467 } else {
2468 // Otherwise, expand to ORR WZR.
2469 BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
2470 .addReg(AArch64::WZR)
2471 .addReg(SrcReg, getKillRegState(KillSrc));
2474 return;
2477 if (AArch64::GPR64spRegClass.contains(DestReg) &&
2478 (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
2479 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
2480 // If either operand is SP, expand to ADD #0.
2481 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
2482 .addReg(SrcReg, getKillRegState(KillSrc))
2483 .addImm(0)
2484 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2485 } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroingGP()) {
2486 BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg)
2487 .addImm(0)
2488 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2489 } else {
2490 // Otherwise, expand to ORR XZR.
2491 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
2492 .addReg(AArch64::XZR)
2493 .addReg(SrcReg, getKillRegState(KillSrc));
2495 return;
2498 // Copy a DDDD register quad by copying the individual sub-registers.
2499 if (AArch64::DDDDRegClass.contains(DestReg) &&
2500 AArch64::DDDDRegClass.contains(SrcReg)) {
2501 static const unsigned Indices[] = {AArch64::dsub0, AArch64::dsub1,
2502 AArch64::dsub2, AArch64::dsub3};
2503 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2504 Indices);
2505 return;
2508 // Copy a DDD register triple by copying the individual sub-registers.
2509 if (AArch64::DDDRegClass.contains(DestReg) &&
2510 AArch64::DDDRegClass.contains(SrcReg)) {
2511 static const unsigned Indices[] = {AArch64::dsub0, AArch64::dsub1,
2512 AArch64::dsub2};
2513 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2514 Indices);
2515 return;
2518 // Copy a DD register pair by copying the individual sub-registers.
2519 if (AArch64::DDRegClass.contains(DestReg) &&
2520 AArch64::DDRegClass.contains(SrcReg)) {
2521 static const unsigned Indices[] = {AArch64::dsub0, AArch64::dsub1};
2522 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2523 Indices);
2524 return;
2527 // Copy a QQQQ register quad by copying the individual sub-registers.
2528 if (AArch64::QQQQRegClass.contains(DestReg) &&
2529 AArch64::QQQQRegClass.contains(SrcReg)) {
2530 static const unsigned Indices[] = {AArch64::qsub0, AArch64::qsub1,
2531 AArch64::qsub2, AArch64::qsub3};
2532 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2533 Indices);
2534 return;
2537 // Copy a QQQ register triple by copying the individual sub-registers.
2538 if (AArch64::QQQRegClass.contains(DestReg) &&
2539 AArch64::QQQRegClass.contains(SrcReg)) {
2540 static const unsigned Indices[] = {AArch64::qsub0, AArch64::qsub1,
2541 AArch64::qsub2};
2542 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2543 Indices);
2544 return;
2547 // Copy a QQ register pair by copying the individual sub-registers.
2548 if (AArch64::QQRegClass.contains(DestReg) &&
2549 AArch64::QQRegClass.contains(SrcReg)) {
2550 static const unsigned Indices[] = {AArch64::qsub0, AArch64::qsub1};
2551 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2552 Indices);
2553 return;
2556 if (AArch64::XSeqPairsClassRegClass.contains(DestReg) &&
2557 AArch64::XSeqPairsClassRegClass.contains(SrcReg)) {
2558 static const unsigned Indices[] = {AArch64::sube64, AArch64::subo64};
2559 copyGPRRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRXrs,
2560 AArch64::XZR, Indices);
2561 return;
2564 if (AArch64::WSeqPairsClassRegClass.contains(DestReg) &&
2565 AArch64::WSeqPairsClassRegClass.contains(SrcReg)) {
2566 static const unsigned Indices[] = {AArch64::sube32, AArch64::subo32};
2567 copyGPRRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRWrs,
2568 AArch64::WZR, Indices);
2569 return;
2572 if (AArch64::FPR128RegClass.contains(DestReg) &&
2573 AArch64::FPR128RegClass.contains(SrcReg)) {
2574 if (Subtarget.hasNEON()) {
2575 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2576 .addReg(SrcReg)
2577 .addReg(SrcReg, getKillRegState(KillSrc));
2578 } else {
2579 BuildMI(MBB, I, DL, get(AArch64::STRQpre))
2580 .addReg(AArch64::SP, RegState::Define)
2581 .addReg(SrcReg, getKillRegState(KillSrc))
2582 .addReg(AArch64::SP)
2583 .addImm(-16);
2584 BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
2585 .addReg(AArch64::SP, RegState::Define)
2586 .addReg(DestReg, RegState::Define)
2587 .addReg(AArch64::SP)
2588 .addImm(16);
2590 return;
2593 if (AArch64::FPR64RegClass.contains(DestReg) &&
2594 AArch64::FPR64RegClass.contains(SrcReg)) {
2595 if (Subtarget.hasNEON()) {
2596 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
2597 &AArch64::FPR128RegClass);
2598 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
2599 &AArch64::FPR128RegClass);
2600 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2601 .addReg(SrcReg)
2602 .addReg(SrcReg, getKillRegState(KillSrc));
2603 } else {
2604 BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
2605 .addReg(SrcReg, getKillRegState(KillSrc));
2607 return;
2610 if (AArch64::FPR32RegClass.contains(DestReg) &&
2611 AArch64::FPR32RegClass.contains(SrcReg)) {
2612 if (Subtarget.hasNEON()) {
2613 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
2614 &AArch64::FPR128RegClass);
2615 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
2616 &AArch64::FPR128RegClass);
2617 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2618 .addReg(SrcReg)
2619 .addReg(SrcReg, getKillRegState(KillSrc));
2620 } else {
2621 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2622 .addReg(SrcReg, getKillRegState(KillSrc));
2624 return;
2627 if (AArch64::FPR16RegClass.contains(DestReg) &&
2628 AArch64::FPR16RegClass.contains(SrcReg)) {
2629 if (Subtarget.hasNEON()) {
2630 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
2631 &AArch64::FPR128RegClass);
2632 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
2633 &AArch64::FPR128RegClass);
2634 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2635 .addReg(SrcReg)
2636 .addReg(SrcReg, getKillRegState(KillSrc));
2637 } else {
2638 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
2639 &AArch64::FPR32RegClass);
2640 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
2641 &AArch64::FPR32RegClass);
2642 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2643 .addReg(SrcReg, getKillRegState(KillSrc));
2645 return;
2648 if (AArch64::FPR8RegClass.contains(DestReg) &&
2649 AArch64::FPR8RegClass.contains(SrcReg)) {
2650 if (Subtarget.hasNEON()) {
2651 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
2652 &AArch64::FPR128RegClass);
2653 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
2654 &AArch64::FPR128RegClass);
2655 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2656 .addReg(SrcReg)
2657 .addReg(SrcReg, getKillRegState(KillSrc));
2658 } else {
2659 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
2660 &AArch64::FPR32RegClass);
2661 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
2662 &AArch64::FPR32RegClass);
2663 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2664 .addReg(SrcReg, getKillRegState(KillSrc));
2666 return;
2669 // Copies between GPR64 and FPR64.
2670 if (AArch64::FPR64RegClass.contains(DestReg) &&
2671 AArch64::GPR64RegClass.contains(SrcReg)) {
2672 BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
2673 .addReg(SrcReg, getKillRegState(KillSrc));
2674 return;
2676 if (AArch64::GPR64RegClass.contains(DestReg) &&
2677 AArch64::FPR64RegClass.contains(SrcReg)) {
2678 BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
2679 .addReg(SrcReg, getKillRegState(KillSrc));
2680 return;
2682 // Copies between GPR32 and FPR32.
2683 if (AArch64::FPR32RegClass.contains(DestReg) &&
2684 AArch64::GPR32RegClass.contains(SrcReg)) {
2685 BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
2686 .addReg(SrcReg, getKillRegState(KillSrc));
2687 return;
2689 if (AArch64::GPR32RegClass.contains(DestReg) &&
2690 AArch64::FPR32RegClass.contains(SrcReg)) {
2691 BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
2692 .addReg(SrcReg, getKillRegState(KillSrc));
2693 return;
2696 if (DestReg == AArch64::NZCV) {
2697 assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
2698 BuildMI(MBB, I, DL, get(AArch64::MSR))
2699 .addImm(AArch64SysReg::NZCV)
2700 .addReg(SrcReg, getKillRegState(KillSrc))
2701 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
2702 return;
2705 if (SrcReg == AArch64::NZCV) {
2706 assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
2707 BuildMI(MBB, I, DL, get(AArch64::MRS), DestReg)
2708 .addImm(AArch64SysReg::NZCV)
2709 .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
2710 return;
2713 llvm_unreachable("unimplemented reg-to-reg copy");
2716 static void storeRegPairToStackSlot(const TargetRegisterInfo &TRI,
2717 MachineBasicBlock &MBB,
2718 MachineBasicBlock::iterator InsertBefore,
2719 const MCInstrDesc &MCID,
2720 unsigned SrcReg, bool IsKill,
2721 unsigned SubIdx0, unsigned SubIdx1, int FI,
2722 MachineMemOperand *MMO) {
2723 unsigned SrcReg0 = SrcReg;
2724 unsigned SrcReg1 = SrcReg;
2725 if (TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
2726 SrcReg0 = TRI.getSubReg(SrcReg, SubIdx0);
2727 SubIdx0 = 0;
2728 SrcReg1 = TRI.getSubReg(SrcReg, SubIdx1);
2729 SubIdx1 = 0;
2731 BuildMI(MBB, InsertBefore, DebugLoc(), MCID)
2732 .addReg(SrcReg0, getKillRegState(IsKill), SubIdx0)
2733 .addReg(SrcReg1, getKillRegState(IsKill), SubIdx1)
2734 .addFrameIndex(FI)
2735 .addImm(0)
2736 .addMemOperand(MMO);
2739 void AArch64InstrInfo::storeRegToStackSlot(
2740 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
2741 bool isKill, int FI, const TargetRegisterClass *RC,
2742 const TargetRegisterInfo *TRI) const {
2743 MachineFunction &MF = *MBB.getParent();
2744 MachineFrameInfo &MFI = MF.getFrameInfo();
2745 unsigned Align = MFI.getObjectAlignment(FI);
2747 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
2748 MachineMemOperand *MMO = MF.getMachineMemOperand(
2749 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
2750 unsigned Opc = 0;
2751 bool Offset = true;
2752 switch (TRI->getSpillSize(*RC)) {
2753 case 1:
2754 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
2755 Opc = AArch64::STRBui;
2756 break;
2757 case 2:
2758 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
2759 Opc = AArch64::STRHui;
2760 break;
2761 case 4:
2762 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
2763 Opc = AArch64::STRWui;
2764 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
2765 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
2766 else
2767 assert(SrcReg != AArch64::WSP);
2768 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
2769 Opc = AArch64::STRSui;
2770 break;
2771 case 8:
2772 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
2773 Opc = AArch64::STRXui;
2774 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
2775 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2776 else
2777 assert(SrcReg != AArch64::SP);
2778 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC)) {
2779 Opc = AArch64::STRDui;
2780 } else if (AArch64::WSeqPairsClassRegClass.hasSubClassEq(RC)) {
2781 storeRegPairToStackSlot(getRegisterInfo(), MBB, MBBI,
2782 get(AArch64::STPWi), SrcReg, isKill,
2783 AArch64::sube32, AArch64::subo32, FI, MMO);
2784 return;
2786 break;
2787 case 16:
2788 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2789 Opc = AArch64::STRQui;
2790 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
2791 assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
2792 Opc = AArch64::ST1Twov1d;
2793 Offset = false;
2794 } else if (AArch64::XSeqPairsClassRegClass.hasSubClassEq(RC)) {
2795 storeRegPairToStackSlot(getRegisterInfo(), MBB, MBBI,
2796 get(AArch64::STPXi), SrcReg, isKill,
2797 AArch64::sube64, AArch64::subo64, FI, MMO);
2798 return;
2800 break;
2801 case 24:
2802 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
2803 assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
2804 Opc = AArch64::ST1Threev1d;
2805 Offset = false;
2807 break;
2808 case 32:
2809 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
2810 assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
2811 Opc = AArch64::ST1Fourv1d;
2812 Offset = false;
2813 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
2814 assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
2815 Opc = AArch64::ST1Twov2d;
2816 Offset = false;
2818 break;
2819 case 48:
2820 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
2821 assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
2822 Opc = AArch64::ST1Threev2d;
2823 Offset = false;
2825 break;
2826 case 64:
2827 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
2828 assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
2829 Opc = AArch64::ST1Fourv2d;
2830 Offset = false;
2832 break;
2834 assert(Opc && "Unknown register class");
2836 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DebugLoc(), get(Opc))
2837 .addReg(SrcReg, getKillRegState(isKill))
2838 .addFrameIndex(FI);
2840 if (Offset)
2841 MI.addImm(0);
2842 MI.addMemOperand(MMO);
2845 static void loadRegPairFromStackSlot(const TargetRegisterInfo &TRI,
2846 MachineBasicBlock &MBB,
2847 MachineBasicBlock::iterator InsertBefore,
2848 const MCInstrDesc &MCID,
2849 unsigned DestReg, unsigned SubIdx0,
2850 unsigned SubIdx1, int FI,
2851 MachineMemOperand *MMO) {
2852 unsigned DestReg0 = DestReg;
2853 unsigned DestReg1 = DestReg;
2854 bool IsUndef = true;
2855 if (TargetRegisterInfo::isPhysicalRegister(DestReg)) {
2856 DestReg0 = TRI.getSubReg(DestReg, SubIdx0);
2857 SubIdx0 = 0;
2858 DestReg1 = TRI.getSubReg(DestReg, SubIdx1);
2859 SubIdx1 = 0;
2860 IsUndef = false;
2862 BuildMI(MBB, InsertBefore, DebugLoc(), MCID)
2863 .addReg(DestReg0, RegState::Define | getUndefRegState(IsUndef), SubIdx0)
2864 .addReg(DestReg1, RegState::Define | getUndefRegState(IsUndef), SubIdx1)
2865 .addFrameIndex(FI)
2866 .addImm(0)
2867 .addMemOperand(MMO);
2870 void AArch64InstrInfo::loadRegFromStackSlot(
2871 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
2872 int FI, const TargetRegisterClass *RC,
2873 const TargetRegisterInfo *TRI) const {
2874 MachineFunction &MF = *MBB.getParent();
2875 MachineFrameInfo &MFI = MF.getFrameInfo();
2876 unsigned Align = MFI.getObjectAlignment(FI);
2877 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
2878 MachineMemOperand *MMO = MF.getMachineMemOperand(
2879 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
2881 unsigned Opc = 0;
2882 bool Offset = true;
2883 switch (TRI->getSpillSize(*RC)) {
2884 case 1:
2885 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
2886 Opc = AArch64::LDRBui;
2887 break;
2888 case 2:
2889 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
2890 Opc = AArch64::LDRHui;
2891 break;
2892 case 4:
2893 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
2894 Opc = AArch64::LDRWui;
2895 if (TargetRegisterInfo::isVirtualRegister(DestReg))
2896 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
2897 else
2898 assert(DestReg != AArch64::WSP);
2899 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
2900 Opc = AArch64::LDRSui;
2901 break;
2902 case 8:
2903 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
2904 Opc = AArch64::LDRXui;
2905 if (TargetRegisterInfo::isVirtualRegister(DestReg))
2906 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
2907 else
2908 assert(DestReg != AArch64::SP);
2909 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC)) {
2910 Opc = AArch64::LDRDui;
2911 } else if (AArch64::WSeqPairsClassRegClass.hasSubClassEq(RC)) {
2912 loadRegPairFromStackSlot(getRegisterInfo(), MBB, MBBI,
2913 get(AArch64::LDPWi), DestReg, AArch64::sube32,
2914 AArch64::subo32, FI, MMO);
2915 return;
2917 break;
2918 case 16:
2919 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2920 Opc = AArch64::LDRQui;
2921 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
2922 assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
2923 Opc = AArch64::LD1Twov1d;
2924 Offset = false;
2925 } else if (AArch64::XSeqPairsClassRegClass.hasSubClassEq(RC)) {
2926 loadRegPairFromStackSlot(getRegisterInfo(), MBB, MBBI,
2927 get(AArch64::LDPXi), DestReg, AArch64::sube64,
2928 AArch64::subo64, FI, MMO);
2929 return;
2931 break;
2932 case 24:
2933 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
2934 assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
2935 Opc = AArch64::LD1Threev1d;
2936 Offset = false;
2938 break;
2939 case 32:
2940 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
2941 assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
2942 Opc = AArch64::LD1Fourv1d;
2943 Offset = false;
2944 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
2945 assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
2946 Opc = AArch64::LD1Twov2d;
2947 Offset = false;
2949 break;
2950 case 48:
2951 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
2952 assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
2953 Opc = AArch64::LD1Threev2d;
2954 Offset = false;
2956 break;
2957 case 64:
2958 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
2959 assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
2960 Opc = AArch64::LD1Fourv2d;
2961 Offset = false;
2963 break;
2965 assert(Opc && "Unknown register class");
2967 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DebugLoc(), get(Opc))
2968 .addReg(DestReg, getDefRegState(true))
2969 .addFrameIndex(FI);
2970 if (Offset)
2971 MI.addImm(0);
2972 MI.addMemOperand(MMO);
2975 void llvm::emitFrameOffset(MachineBasicBlock &MBB,
2976 MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
2977 unsigned DestReg, unsigned SrcReg, int Offset,
2978 const TargetInstrInfo *TII,
2979 MachineInstr::MIFlag Flag, bool SetNZCV,
2980 bool NeedsWinCFI, bool *HasWinCFI) {
2981 if (DestReg == SrcReg && Offset == 0)
2982 return;
2984 assert((DestReg != AArch64::SP || Offset % 16 == 0) &&
2985 "SP increment/decrement not 16-byte aligned");
2987 bool isSub = Offset < 0;
2988 if (isSub)
2989 Offset = -Offset;
2991 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
2992 // scratch register. If DestReg is a virtual register, use it as the
2993 // scratch register; otherwise, create a new virtual register (to be
2994 // replaced by the scavenger at the end of PEI). That case can be optimized
2995 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
2996 // register can be loaded with offset%8 and the add/sub can use an extending
2997 // instruction with LSL#3.
2998 // Currently the function handles any offsets but generates a poor sequence
2999 // of code.
3000 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
3002 unsigned Opc;
3003 if (SetNZCV)
3004 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
3005 else
3006 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
3007 const unsigned MaxEncoding = 0xfff;
3008 const unsigned ShiftSize = 12;
3009 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
3010 while (((unsigned)Offset) >= (1 << ShiftSize)) {
3011 unsigned ThisVal;
3012 if (((unsigned)Offset) > MaxEncodableValue) {
3013 ThisVal = MaxEncodableValue;
3014 } else {
3015 ThisVal = Offset & MaxEncodableValue;
3017 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
3018 "Encoding cannot handle value that big");
3019 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
3020 .addReg(SrcReg)
3021 .addImm(ThisVal >> ShiftSize)
3022 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
3023 .setMIFlag(Flag);
3025 if (NeedsWinCFI && SrcReg == AArch64::SP && DestReg == AArch64::SP) {
3026 if (HasWinCFI)
3027 *HasWinCFI = true;
3028 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc))
3029 .addImm(ThisVal)
3030 .setMIFlag(Flag);
3033 SrcReg = DestReg;
3034 Offset -= ThisVal;
3035 if (Offset == 0)
3036 return;
3038 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
3039 .addReg(SrcReg)
3040 .addImm(Offset)
3041 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
3042 .setMIFlag(Flag);
3044 if (NeedsWinCFI) {
3045 if ((DestReg == AArch64::FP && SrcReg == AArch64::SP) ||
3046 (SrcReg == AArch64::FP && DestReg == AArch64::SP)) {
3047 if (HasWinCFI)
3048 *HasWinCFI = true;
3049 if (Offset == 0)
3050 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_SetFP)).
3051 setMIFlag(Flag);
3052 else
3053 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_AddFP)).
3054 addImm(Offset).setMIFlag(Flag);
3055 } else if (DestReg == AArch64::SP) {
3056 if (HasWinCFI)
3057 *HasWinCFI = true;
3058 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc)).
3059 addImm(Offset).setMIFlag(Flag);
3064 MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
3065 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
3066 MachineBasicBlock::iterator InsertPt, int FrameIndex,
3067 LiveIntervals *LIS, VirtRegMap *VRM) const {
3068 // This is a bit of a hack. Consider this instruction:
3070 // %0 = COPY %sp; GPR64all:%0
3072 // We explicitly chose GPR64all for the virtual register so such a copy might
3073 // be eliminated by RegisterCoalescer. However, that may not be possible, and
3074 // %0 may even spill. We can't spill %sp, and since it is in the GPR64all
3075 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
3077 // To prevent that, we are going to constrain the %0 register class here.
3079 // <rdar://problem/11522048>
3081 if (MI.isFullCopy()) {
3082 unsigned DstReg = MI.getOperand(0).getReg();
3083 unsigned SrcReg = MI.getOperand(1).getReg();
3084 if (SrcReg == AArch64::SP &&
3085 TargetRegisterInfo::isVirtualRegister(DstReg)) {
3086 MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
3087 return nullptr;
3089 if (DstReg == AArch64::SP &&
3090 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
3091 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
3092 return nullptr;
3096 // Handle the case where a copy is being spilled or filled but the source
3097 // and destination register class don't match. For example:
3099 // %0 = COPY %xzr; GPR64common:%0
3101 // In this case we can still safely fold away the COPY and generate the
3102 // following spill code:
3104 // STRXui %xzr, %stack.0
3106 // This also eliminates spilled cross register class COPYs (e.g. between x and
3107 // d regs) of the same size. For example:
3109 // %0 = COPY %1; GPR64:%0, FPR64:%1
3111 // will be filled as
3113 // LDRDui %0, fi<#0>
3115 // instead of
3117 // LDRXui %Temp, fi<#0>
3118 // %0 = FMOV %Temp
3120 if (MI.isCopy() && Ops.size() == 1 &&
3121 // Make sure we're only folding the explicit COPY defs/uses.
3122 (Ops[0] == 0 || Ops[0] == 1)) {
3123 bool IsSpill = Ops[0] == 0;
3124 bool IsFill = !IsSpill;
3125 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
3126 const MachineRegisterInfo &MRI = MF.getRegInfo();
3127 MachineBasicBlock &MBB = *MI.getParent();
3128 const MachineOperand &DstMO = MI.getOperand(0);
3129 const MachineOperand &SrcMO = MI.getOperand(1);
3130 unsigned DstReg = DstMO.getReg();
3131 unsigned SrcReg = SrcMO.getReg();
3132 // This is slightly expensive to compute for physical regs since
3133 // getMinimalPhysRegClass is slow.
3134 auto getRegClass = [&](unsigned Reg) {
3135 return TargetRegisterInfo::isVirtualRegister(Reg)
3136 ? MRI.getRegClass(Reg)
3137 : TRI.getMinimalPhysRegClass(Reg);
3140 if (DstMO.getSubReg() == 0 && SrcMO.getSubReg() == 0) {
3141 assert(TRI.getRegSizeInBits(*getRegClass(DstReg)) ==
3142 TRI.getRegSizeInBits(*getRegClass(SrcReg)) &&
3143 "Mismatched register size in non subreg COPY");
3144 if (IsSpill)
3145 storeRegToStackSlot(MBB, InsertPt, SrcReg, SrcMO.isKill(), FrameIndex,
3146 getRegClass(SrcReg), &TRI);
3147 else
3148 loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex,
3149 getRegClass(DstReg), &TRI);
3150 return &*--InsertPt;
3153 // Handle cases like spilling def of:
3155 // %0:sub_32<def,read-undef> = COPY %wzr; GPR64common:%0
3157 // where the physical register source can be widened and stored to the full
3158 // virtual reg destination stack slot, in this case producing:
3160 // STRXui %xzr, %stack.0
3162 if (IsSpill && DstMO.isUndef() &&
3163 TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
3164 assert(SrcMO.getSubReg() == 0 &&
3165 "Unexpected subreg on physical register");
3166 const TargetRegisterClass *SpillRC;
3167 unsigned SpillSubreg;
3168 switch (DstMO.getSubReg()) {
3169 default:
3170 SpillRC = nullptr;
3171 break;
3172 case AArch64::sub_32:
3173 case AArch64::ssub:
3174 if (AArch64::GPR32RegClass.contains(SrcReg)) {
3175 SpillRC = &AArch64::GPR64RegClass;
3176 SpillSubreg = AArch64::sub_32;
3177 } else if (AArch64::FPR32RegClass.contains(SrcReg)) {
3178 SpillRC = &AArch64::FPR64RegClass;
3179 SpillSubreg = AArch64::ssub;
3180 } else
3181 SpillRC = nullptr;
3182 break;
3183 case AArch64::dsub:
3184 if (AArch64::FPR64RegClass.contains(SrcReg)) {
3185 SpillRC = &AArch64::FPR128RegClass;
3186 SpillSubreg = AArch64::dsub;
3187 } else
3188 SpillRC = nullptr;
3189 break;
3192 if (SpillRC)
3193 if (unsigned WidenedSrcReg =
3194 TRI.getMatchingSuperReg(SrcReg, SpillSubreg, SpillRC)) {
3195 storeRegToStackSlot(MBB, InsertPt, WidenedSrcReg, SrcMO.isKill(),
3196 FrameIndex, SpillRC, &TRI);
3197 return &*--InsertPt;
3201 // Handle cases like filling use of:
3203 // %0:sub_32<def,read-undef> = COPY %1; GPR64:%0, GPR32:%1
3205 // where we can load the full virtual reg source stack slot, into the subreg
3206 // destination, in this case producing:
3208 // LDRWui %0:sub_32<def,read-undef>, %stack.0
3210 if (IsFill && SrcMO.getSubReg() == 0 && DstMO.isUndef()) {
3211 const TargetRegisterClass *FillRC;
3212 switch (DstMO.getSubReg()) {
3213 default:
3214 FillRC = nullptr;
3215 break;
3216 case AArch64::sub_32:
3217 FillRC = &AArch64::GPR32RegClass;
3218 break;
3219 case AArch64::ssub:
3220 FillRC = &AArch64::FPR32RegClass;
3221 break;
3222 case AArch64::dsub:
3223 FillRC = &AArch64::FPR64RegClass;
3224 break;
3227 if (FillRC) {
3228 assert(TRI.getRegSizeInBits(*getRegClass(SrcReg)) ==
3229 TRI.getRegSizeInBits(*FillRC) &&
3230 "Mismatched regclass size on folded subreg COPY");
3231 loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex, FillRC, &TRI);
3232 MachineInstr &LoadMI = *--InsertPt;
3233 MachineOperand &LoadDst = LoadMI.getOperand(0);
3234 assert(LoadDst.getSubReg() == 0 && "unexpected subreg on fill load");
3235 LoadDst.setSubReg(DstMO.getSubReg());
3236 LoadDst.setIsUndef();
3237 return &LoadMI;
3242 // Cannot fold.
3243 return nullptr;
3246 int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
3247 bool *OutUseUnscaledOp,
3248 unsigned *OutUnscaledOp,
3249 int *EmittableOffset) {
3250 // Set output values in case of early exit.
3251 if (EmittableOffset)
3252 *EmittableOffset = 0;
3253 if (OutUseUnscaledOp)
3254 *OutUseUnscaledOp = false;
3255 if (OutUnscaledOp)
3256 *OutUnscaledOp = 0;
3258 // Exit early for structured vector spills/fills as they can't take an
3259 // immediate offset.
3260 switch (MI.getOpcode()) {
3261 default:
3262 break;
3263 case AArch64::LD1Twov2d:
3264 case AArch64::LD1Threev2d:
3265 case AArch64::LD1Fourv2d:
3266 case AArch64::LD1Twov1d:
3267 case AArch64::LD1Threev1d:
3268 case AArch64::LD1Fourv1d:
3269 case AArch64::ST1Twov2d:
3270 case AArch64::ST1Threev2d:
3271 case AArch64::ST1Fourv2d:
3272 case AArch64::ST1Twov1d:
3273 case AArch64::ST1Threev1d:
3274 case AArch64::ST1Fourv1d:
3275 case AArch64::IRG:
3276 case AArch64::IRGstack:
3277 return AArch64FrameOffsetCannotUpdate;
3280 // Get the min/max offset and the scale.
3281 unsigned Scale, Width;
3282 int64_t MinOff, MaxOff;
3283 if (!AArch64InstrInfo::getMemOpInfo(MI.getOpcode(), Scale, Width, MinOff,
3284 MaxOff))
3285 llvm_unreachable("unhandled opcode in isAArch64FrameOffsetLegal");
3287 // Construct the complete offset.
3288 const MachineOperand &ImmOpnd =
3289 MI.getOperand(AArch64InstrInfo::getLoadStoreImmIdx(MI.getOpcode()));
3290 Offset += ImmOpnd.getImm() * Scale;
3292 // If the offset doesn't match the scale, we rewrite the instruction to
3293 // use the unscaled instruction instead. Likewise, if we have a negative
3294 // offset and there is an unscaled op to use.
3295 Optional<unsigned> UnscaledOp =
3296 AArch64InstrInfo::getUnscaledLdSt(MI.getOpcode());
3297 bool useUnscaledOp = UnscaledOp && (Offset % Scale || Offset < 0);
3298 if (useUnscaledOp &&
3299 !AArch64InstrInfo::getMemOpInfo(*UnscaledOp, Scale, Width, MinOff, MaxOff))
3300 llvm_unreachable("unhandled opcode in isAArch64FrameOffsetLegal");
3302 int64_t Remainder = Offset % Scale;
3303 assert(!(Remainder && useUnscaledOp) &&
3304 "Cannot have remainder when using unscaled op");
3306 assert(MinOff < MaxOff && "Unexpected Min/Max offsets");
3307 int NewOffset = Offset / Scale;
3308 if (MinOff <= NewOffset && NewOffset <= MaxOff)
3309 Offset = Remainder;
3310 else {
3311 NewOffset = NewOffset < 0 ? MinOff : MaxOff;
3312 Offset = Offset - NewOffset * Scale + Remainder;
3315 if (EmittableOffset)
3316 *EmittableOffset = NewOffset;
3317 if (OutUseUnscaledOp)
3318 *OutUseUnscaledOp = useUnscaledOp;
3319 if (OutUnscaledOp && UnscaledOp)
3320 *OutUnscaledOp = *UnscaledOp;
3322 return AArch64FrameOffsetCanUpdate |
3323 (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
3326 bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
3327 unsigned FrameReg, int &Offset,
3328 const AArch64InstrInfo *TII) {
3329 unsigned Opcode = MI.getOpcode();
3330 unsigned ImmIdx = FrameRegIdx + 1;
3332 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
3333 Offset += MI.getOperand(ImmIdx).getImm();
3334 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
3335 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
3336 MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
3337 MI.eraseFromParent();
3338 Offset = 0;
3339 return true;
3342 int NewOffset;
3343 unsigned UnscaledOp;
3344 bool UseUnscaledOp;
3345 int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
3346 &UnscaledOp, &NewOffset);
3347 if (Status & AArch64FrameOffsetCanUpdate) {
3348 if (Status & AArch64FrameOffsetIsLegal)
3349 // Replace the FrameIndex with FrameReg.
3350 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
3351 if (UseUnscaledOp)
3352 MI.setDesc(TII->get(UnscaledOp));
3354 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
3355 return Offset == 0;
3358 return false;
3361 void AArch64InstrInfo::getNoop(MCInst &NopInst) const {
3362 NopInst.setOpcode(AArch64::HINT);
3363 NopInst.addOperand(MCOperand::createImm(0));
3366 // AArch64 supports MachineCombiner.
3367 bool AArch64InstrInfo::useMachineCombiner() const { return true; }
3369 // True when Opc sets flag
3370 static bool isCombineInstrSettingFlag(unsigned Opc) {
3371 switch (Opc) {
3372 case AArch64::ADDSWrr:
3373 case AArch64::ADDSWri:
3374 case AArch64::ADDSXrr:
3375 case AArch64::ADDSXri:
3376 case AArch64::SUBSWrr:
3377 case AArch64::SUBSXrr:
3378 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
3379 case AArch64::SUBSWri:
3380 case AArch64::SUBSXri:
3381 return true;
3382 default:
3383 break;
3385 return false;
3388 // 32b Opcodes that can be combined with a MUL
3389 static bool isCombineInstrCandidate32(unsigned Opc) {
3390 switch (Opc) {
3391 case AArch64::ADDWrr:
3392 case AArch64::ADDWri:
3393 case AArch64::SUBWrr:
3394 case AArch64::ADDSWrr:
3395 case AArch64::ADDSWri:
3396 case AArch64::SUBSWrr:
3397 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
3398 case AArch64::SUBWri:
3399 case AArch64::SUBSWri:
3400 return true;
3401 default:
3402 break;
3404 return false;
3407 // 64b Opcodes that can be combined with a MUL
3408 static bool isCombineInstrCandidate64(unsigned Opc) {
3409 switch (Opc) {
3410 case AArch64::ADDXrr:
3411 case AArch64::ADDXri:
3412 case AArch64::SUBXrr:
3413 case AArch64::ADDSXrr:
3414 case AArch64::ADDSXri:
3415 case AArch64::SUBSXrr:
3416 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
3417 case AArch64::SUBXri:
3418 case AArch64::SUBSXri:
3419 return true;
3420 default:
3421 break;
3423 return false;
3426 // FP Opcodes that can be combined with a FMUL
3427 static bool isCombineInstrCandidateFP(const MachineInstr &Inst) {
3428 switch (Inst.getOpcode()) {
3429 default:
3430 break;
3431 case AArch64::FADDSrr:
3432 case AArch64::FADDDrr:
3433 case AArch64::FADDv2f32:
3434 case AArch64::FADDv2f64:
3435 case AArch64::FADDv4f32:
3436 case AArch64::FSUBSrr:
3437 case AArch64::FSUBDrr:
3438 case AArch64::FSUBv2f32:
3439 case AArch64::FSUBv2f64:
3440 case AArch64::FSUBv4f32:
3441 TargetOptions Options = Inst.getParent()->getParent()->getTarget().Options;
3442 return (Options.UnsafeFPMath ||
3443 Options.AllowFPOpFusion == FPOpFusion::Fast);
3445 return false;
3448 // Opcodes that can be combined with a MUL
3449 static bool isCombineInstrCandidate(unsigned Opc) {
3450 return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
3454 // Utility routine that checks if \param MO is defined by an
3455 // \param CombineOpc instruction in the basic block \param MBB
3456 static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO,
3457 unsigned CombineOpc, unsigned ZeroReg = 0,
3458 bool CheckZeroReg = false) {
3459 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3460 MachineInstr *MI = nullptr;
3462 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
3463 MI = MRI.getUniqueVRegDef(MO.getReg());
3464 // And it needs to be in the trace (otherwise, it won't have a depth).
3465 if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != CombineOpc)
3466 return false;
3467 // Must only used by the user we combine with.
3468 if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
3469 return false;
3471 if (CheckZeroReg) {
3472 assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
3473 MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
3474 MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
3475 // The third input reg must be zero.
3476 if (MI->getOperand(3).getReg() != ZeroReg)
3477 return false;
3480 return true;
3484 // Is \param MO defined by an integer multiply and can be combined?
3485 static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
3486 unsigned MulOpc, unsigned ZeroReg) {
3487 return canCombine(MBB, MO, MulOpc, ZeroReg, true);
3491 // Is \param MO defined by a floating-point multiply and can be combined?
3492 static bool canCombineWithFMUL(MachineBasicBlock &MBB, MachineOperand &MO,
3493 unsigned MulOpc) {
3494 return canCombine(MBB, MO, MulOpc);
3497 // TODO: There are many more machine instruction opcodes to match:
3498 // 1. Other data types (integer, vectors)
3499 // 2. Other math / logic operations (xor, or)
3500 // 3. Other forms of the same operation (intrinsics and other variants)
3501 bool AArch64InstrInfo::isAssociativeAndCommutative(
3502 const MachineInstr &Inst) const {
3503 switch (Inst.getOpcode()) {
3504 case AArch64::FADDDrr:
3505 case AArch64::FADDSrr:
3506 case AArch64::FADDv2f32:
3507 case AArch64::FADDv2f64:
3508 case AArch64::FADDv4f32:
3509 case AArch64::FMULDrr:
3510 case AArch64::FMULSrr:
3511 case AArch64::FMULX32:
3512 case AArch64::FMULX64:
3513 case AArch64::FMULXv2f32:
3514 case AArch64::FMULXv2f64:
3515 case AArch64::FMULXv4f32:
3516 case AArch64::FMULv2f32:
3517 case AArch64::FMULv2f64:
3518 case AArch64::FMULv4f32:
3519 return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath;
3520 default:
3521 return false;
3525 /// Find instructions that can be turned into madd.
3526 static bool getMaddPatterns(MachineInstr &Root,
3527 SmallVectorImpl<MachineCombinerPattern> &Patterns) {
3528 unsigned Opc = Root.getOpcode();
3529 MachineBasicBlock &MBB = *Root.getParent();
3530 bool Found = false;
3532 if (!isCombineInstrCandidate(Opc))
3533 return false;
3534 if (isCombineInstrSettingFlag(Opc)) {
3535 int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
3536 // When NZCV is live bail out.
3537 if (Cmp_NZCV == -1)
3538 return false;
3539 unsigned NewOpc = convertToNonFlagSettingOpc(Root);
3540 // When opcode can't change bail out.
3541 // CHECKME: do we miss any cases for opcode conversion?
3542 if (NewOpc == Opc)
3543 return false;
3544 Opc = NewOpc;
3547 switch (Opc) {
3548 default:
3549 break;
3550 case AArch64::ADDWrr:
3551 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
3552 "ADDWrr does not have register operands");
3553 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3554 AArch64::WZR)) {
3555 Patterns.push_back(MachineCombinerPattern::MULADDW_OP1);
3556 Found = true;
3558 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
3559 AArch64::WZR)) {
3560 Patterns.push_back(MachineCombinerPattern::MULADDW_OP2);
3561 Found = true;
3563 break;
3564 case AArch64::ADDXrr:
3565 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3566 AArch64::XZR)) {
3567 Patterns.push_back(MachineCombinerPattern::MULADDX_OP1);
3568 Found = true;
3570 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
3571 AArch64::XZR)) {
3572 Patterns.push_back(MachineCombinerPattern::MULADDX_OP2);
3573 Found = true;
3575 break;
3576 case AArch64::SUBWrr:
3577 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3578 AArch64::WZR)) {
3579 Patterns.push_back(MachineCombinerPattern::MULSUBW_OP1);
3580 Found = true;
3582 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
3583 AArch64::WZR)) {
3584 Patterns.push_back(MachineCombinerPattern::MULSUBW_OP2);
3585 Found = true;
3587 break;
3588 case AArch64::SUBXrr:
3589 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3590 AArch64::XZR)) {
3591 Patterns.push_back(MachineCombinerPattern::MULSUBX_OP1);
3592 Found = true;
3594 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
3595 AArch64::XZR)) {
3596 Patterns.push_back(MachineCombinerPattern::MULSUBX_OP2);
3597 Found = true;
3599 break;
3600 case AArch64::ADDWri:
3601 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3602 AArch64::WZR)) {
3603 Patterns.push_back(MachineCombinerPattern::MULADDWI_OP1);
3604 Found = true;
3606 break;
3607 case AArch64::ADDXri:
3608 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3609 AArch64::XZR)) {
3610 Patterns.push_back(MachineCombinerPattern::MULADDXI_OP1);
3611 Found = true;
3613 break;
3614 case AArch64::SUBWri:
3615 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3616 AArch64::WZR)) {
3617 Patterns.push_back(MachineCombinerPattern::MULSUBWI_OP1);
3618 Found = true;
3620 break;
3621 case AArch64::SUBXri:
3622 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3623 AArch64::XZR)) {
3624 Patterns.push_back(MachineCombinerPattern::MULSUBXI_OP1);
3625 Found = true;
3627 break;
3629 return Found;
3631 /// Floating-Point Support
3633 /// Find instructions that can be turned into madd.
3634 static bool getFMAPatterns(MachineInstr &Root,
3635 SmallVectorImpl<MachineCombinerPattern> &Patterns) {
3637 if (!isCombineInstrCandidateFP(Root))
3638 return false;
3640 MachineBasicBlock &MBB = *Root.getParent();
3641 bool Found = false;
3643 switch (Root.getOpcode()) {
3644 default:
3645 assert(false && "Unsupported FP instruction in combiner\n");
3646 break;
3647 case AArch64::FADDSrr:
3648 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
3649 "FADDWrr does not have register operands");
3650 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULSrr)) {
3651 Patterns.push_back(MachineCombinerPattern::FMULADDS_OP1);
3652 Found = true;
3653 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3654 AArch64::FMULv1i32_indexed)) {
3655 Patterns.push_back(MachineCombinerPattern::FMLAv1i32_indexed_OP1);
3656 Found = true;
3658 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULSrr)) {
3659 Patterns.push_back(MachineCombinerPattern::FMULADDS_OP2);
3660 Found = true;
3661 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3662 AArch64::FMULv1i32_indexed)) {
3663 Patterns.push_back(MachineCombinerPattern::FMLAv1i32_indexed_OP2);
3664 Found = true;
3666 break;
3667 case AArch64::FADDDrr:
3668 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULDrr)) {
3669 Patterns.push_back(MachineCombinerPattern::FMULADDD_OP1);
3670 Found = true;
3671 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3672 AArch64::FMULv1i64_indexed)) {
3673 Patterns.push_back(MachineCombinerPattern::FMLAv1i64_indexed_OP1);
3674 Found = true;
3676 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULDrr)) {
3677 Patterns.push_back(MachineCombinerPattern::FMULADDD_OP2);
3678 Found = true;
3679 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3680 AArch64::FMULv1i64_indexed)) {
3681 Patterns.push_back(MachineCombinerPattern::FMLAv1i64_indexed_OP2);
3682 Found = true;
3684 break;
3685 case AArch64::FADDv2f32:
3686 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3687 AArch64::FMULv2i32_indexed)) {
3688 Patterns.push_back(MachineCombinerPattern::FMLAv2i32_indexed_OP1);
3689 Found = true;
3690 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3691 AArch64::FMULv2f32)) {
3692 Patterns.push_back(MachineCombinerPattern::FMLAv2f32_OP1);
3693 Found = true;
3695 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3696 AArch64::FMULv2i32_indexed)) {
3697 Patterns.push_back(MachineCombinerPattern::FMLAv2i32_indexed_OP2);
3698 Found = true;
3699 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3700 AArch64::FMULv2f32)) {
3701 Patterns.push_back(MachineCombinerPattern::FMLAv2f32_OP2);
3702 Found = true;
3704 break;
3705 case AArch64::FADDv2f64:
3706 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3707 AArch64::FMULv2i64_indexed)) {
3708 Patterns.push_back(MachineCombinerPattern::FMLAv2i64_indexed_OP1);
3709 Found = true;
3710 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3711 AArch64::FMULv2f64)) {
3712 Patterns.push_back(MachineCombinerPattern::FMLAv2f64_OP1);
3713 Found = true;
3715 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3716 AArch64::FMULv2i64_indexed)) {
3717 Patterns.push_back(MachineCombinerPattern::FMLAv2i64_indexed_OP2);
3718 Found = true;
3719 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3720 AArch64::FMULv2f64)) {
3721 Patterns.push_back(MachineCombinerPattern::FMLAv2f64_OP2);
3722 Found = true;
3724 break;
3725 case AArch64::FADDv4f32:
3726 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3727 AArch64::FMULv4i32_indexed)) {
3728 Patterns.push_back(MachineCombinerPattern::FMLAv4i32_indexed_OP1);
3729 Found = true;
3730 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3731 AArch64::FMULv4f32)) {
3732 Patterns.push_back(MachineCombinerPattern::FMLAv4f32_OP1);
3733 Found = true;
3735 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3736 AArch64::FMULv4i32_indexed)) {
3737 Patterns.push_back(MachineCombinerPattern::FMLAv4i32_indexed_OP2);
3738 Found = true;
3739 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3740 AArch64::FMULv4f32)) {
3741 Patterns.push_back(MachineCombinerPattern::FMLAv4f32_OP2);
3742 Found = true;
3744 break;
3746 case AArch64::FSUBSrr:
3747 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULSrr)) {
3748 Patterns.push_back(MachineCombinerPattern::FMULSUBS_OP1);
3749 Found = true;
3751 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULSrr)) {
3752 Patterns.push_back(MachineCombinerPattern::FMULSUBS_OP2);
3753 Found = true;
3754 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3755 AArch64::FMULv1i32_indexed)) {
3756 Patterns.push_back(MachineCombinerPattern::FMLSv1i32_indexed_OP2);
3757 Found = true;
3759 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FNMULSrr)) {
3760 Patterns.push_back(MachineCombinerPattern::FNMULSUBS_OP1);
3761 Found = true;
3763 break;
3764 case AArch64::FSUBDrr:
3765 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULDrr)) {
3766 Patterns.push_back(MachineCombinerPattern::FMULSUBD_OP1);
3767 Found = true;
3769 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULDrr)) {
3770 Patterns.push_back(MachineCombinerPattern::FMULSUBD_OP2);
3771 Found = true;
3772 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3773 AArch64::FMULv1i64_indexed)) {
3774 Patterns.push_back(MachineCombinerPattern::FMLSv1i64_indexed_OP2);
3775 Found = true;
3777 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FNMULDrr)) {
3778 Patterns.push_back(MachineCombinerPattern::FNMULSUBD_OP1);
3779 Found = true;
3781 break;
3782 case AArch64::FSUBv2f32:
3783 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3784 AArch64::FMULv2i32_indexed)) {
3785 Patterns.push_back(MachineCombinerPattern::FMLSv2i32_indexed_OP2);
3786 Found = true;
3787 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3788 AArch64::FMULv2f32)) {
3789 Patterns.push_back(MachineCombinerPattern::FMLSv2f32_OP2);
3790 Found = true;
3792 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3793 AArch64::FMULv2i32_indexed)) {
3794 Patterns.push_back(MachineCombinerPattern::FMLSv2i32_indexed_OP1);
3795 Found = true;
3796 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3797 AArch64::FMULv2f32)) {
3798 Patterns.push_back(MachineCombinerPattern::FMLSv2f32_OP1);
3799 Found = true;
3801 break;
3802 case AArch64::FSUBv2f64:
3803 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3804 AArch64::FMULv2i64_indexed)) {
3805 Patterns.push_back(MachineCombinerPattern::FMLSv2i64_indexed_OP2);
3806 Found = true;
3807 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3808 AArch64::FMULv2f64)) {
3809 Patterns.push_back(MachineCombinerPattern::FMLSv2f64_OP2);
3810 Found = true;
3812 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3813 AArch64::FMULv2i64_indexed)) {
3814 Patterns.push_back(MachineCombinerPattern::FMLSv2i64_indexed_OP1);
3815 Found = true;
3816 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3817 AArch64::FMULv2f64)) {
3818 Patterns.push_back(MachineCombinerPattern::FMLSv2f64_OP1);
3819 Found = true;
3821 break;
3822 case AArch64::FSUBv4f32:
3823 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3824 AArch64::FMULv4i32_indexed)) {
3825 Patterns.push_back(MachineCombinerPattern::FMLSv4i32_indexed_OP2);
3826 Found = true;
3827 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3828 AArch64::FMULv4f32)) {
3829 Patterns.push_back(MachineCombinerPattern::FMLSv4f32_OP2);
3830 Found = true;
3832 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3833 AArch64::FMULv4i32_indexed)) {
3834 Patterns.push_back(MachineCombinerPattern::FMLSv4i32_indexed_OP1);
3835 Found = true;
3836 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3837 AArch64::FMULv4f32)) {
3838 Patterns.push_back(MachineCombinerPattern::FMLSv4f32_OP1);
3839 Found = true;
3841 break;
3843 return Found;
3846 /// Return true when a code sequence can improve throughput. It
3847 /// should be called only for instructions in loops.
3848 /// \param Pattern - combiner pattern
3849 bool AArch64InstrInfo::isThroughputPattern(
3850 MachineCombinerPattern Pattern) const {
3851 switch (Pattern) {
3852 default:
3853 break;
3854 case MachineCombinerPattern::FMULADDS_OP1:
3855 case MachineCombinerPattern::FMULADDS_OP2:
3856 case MachineCombinerPattern::FMULSUBS_OP1:
3857 case MachineCombinerPattern::FMULSUBS_OP2:
3858 case MachineCombinerPattern::FMULADDD_OP1:
3859 case MachineCombinerPattern::FMULADDD_OP2:
3860 case MachineCombinerPattern::FMULSUBD_OP1:
3861 case MachineCombinerPattern::FMULSUBD_OP2:
3862 case MachineCombinerPattern::FNMULSUBS_OP1:
3863 case MachineCombinerPattern::FNMULSUBD_OP1:
3864 case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
3865 case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
3866 case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
3867 case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
3868 case MachineCombinerPattern::FMLAv2f32_OP2:
3869 case MachineCombinerPattern::FMLAv2f32_OP1:
3870 case MachineCombinerPattern::FMLAv2f64_OP1:
3871 case MachineCombinerPattern::FMLAv2f64_OP2:
3872 case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
3873 case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
3874 case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
3875 case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
3876 case MachineCombinerPattern::FMLAv4f32_OP1:
3877 case MachineCombinerPattern::FMLAv4f32_OP2:
3878 case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
3879 case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
3880 case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
3881 case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
3882 case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
3883 case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
3884 case MachineCombinerPattern::FMLSv2f32_OP2:
3885 case MachineCombinerPattern::FMLSv2f64_OP2:
3886 case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
3887 case MachineCombinerPattern::FMLSv4f32_OP2:
3888 return true;
3889 } // end switch (Pattern)
3890 return false;
3892 /// Return true when there is potentially a faster code sequence for an
3893 /// instruction chain ending in \p Root. All potential patterns are listed in
3894 /// the \p Pattern vector. Pattern should be sorted in priority order since the
3895 /// pattern evaluator stops checking as soon as it finds a faster sequence.
3897 bool AArch64InstrInfo::getMachineCombinerPatterns(
3898 MachineInstr &Root,
3899 SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
3900 // Integer patterns
3901 if (getMaddPatterns(Root, Patterns))
3902 return true;
3903 // Floating point patterns
3904 if (getFMAPatterns(Root, Patterns))
3905 return true;
3907 return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns);
3910 enum class FMAInstKind { Default, Indexed, Accumulator };
3911 /// genFusedMultiply - Generate fused multiply instructions.
3912 /// This function supports both integer and floating point instructions.
3913 /// A typical example:
3914 /// F|MUL I=A,B,0
3915 /// F|ADD R,I,C
3916 /// ==> F|MADD R,A,B,C
3917 /// \param MF Containing MachineFunction
3918 /// \param MRI Register information
3919 /// \param TII Target information
3920 /// \param Root is the F|ADD instruction
3921 /// \param [out] InsInstrs is a vector of machine instructions and will
3922 /// contain the generated madd instruction
3923 /// \param IdxMulOpd is index of operand in Root that is the result of
3924 /// the F|MUL. In the example above IdxMulOpd is 1.
3925 /// \param MaddOpc the opcode fo the f|madd instruction
3926 /// \param RC Register class of operands
3927 /// \param kind of fma instruction (addressing mode) to be generated
3928 /// \param ReplacedAddend is the result register from the instruction
3929 /// replacing the non-combined operand, if any.
3930 static MachineInstr *
3931 genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI,
3932 const TargetInstrInfo *TII, MachineInstr &Root,
3933 SmallVectorImpl<MachineInstr *> &InsInstrs, unsigned IdxMulOpd,
3934 unsigned MaddOpc, const TargetRegisterClass *RC,
3935 FMAInstKind kind = FMAInstKind::Default,
3936 const unsigned *ReplacedAddend = nullptr) {
3937 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
3939 unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
3940 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
3941 unsigned ResultReg = Root.getOperand(0).getReg();
3942 unsigned SrcReg0 = MUL->getOperand(1).getReg();
3943 bool Src0IsKill = MUL->getOperand(1).isKill();
3944 unsigned SrcReg1 = MUL->getOperand(2).getReg();
3945 bool Src1IsKill = MUL->getOperand(2).isKill();
3947 unsigned SrcReg2;
3948 bool Src2IsKill;
3949 if (ReplacedAddend) {
3950 // If we just generated a new addend, we must be it's only use.
3951 SrcReg2 = *ReplacedAddend;
3952 Src2IsKill = true;
3953 } else {
3954 SrcReg2 = Root.getOperand(IdxOtherOpd).getReg();
3955 Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
3958 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
3959 MRI.constrainRegClass(ResultReg, RC);
3960 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
3961 MRI.constrainRegClass(SrcReg0, RC);
3962 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
3963 MRI.constrainRegClass(SrcReg1, RC);
3964 if (TargetRegisterInfo::isVirtualRegister(SrcReg2))
3965 MRI.constrainRegClass(SrcReg2, RC);
3967 MachineInstrBuilder MIB;
3968 if (kind == FMAInstKind::Default)
3969 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3970 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3971 .addReg(SrcReg1, getKillRegState(Src1IsKill))
3972 .addReg(SrcReg2, getKillRegState(Src2IsKill));
3973 else if (kind == FMAInstKind::Indexed)
3974 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3975 .addReg(SrcReg2, getKillRegState(Src2IsKill))
3976 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3977 .addReg(SrcReg1, getKillRegState(Src1IsKill))
3978 .addImm(MUL->getOperand(3).getImm());
3979 else if (kind == FMAInstKind::Accumulator)
3980 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3981 .addReg(SrcReg2, getKillRegState(Src2IsKill))
3982 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3983 .addReg(SrcReg1, getKillRegState(Src1IsKill));
3984 else
3985 assert(false && "Invalid FMA instruction kind \n");
3986 // Insert the MADD (MADD, FMA, FMS, FMLA, FMSL)
3987 InsInstrs.push_back(MIB);
3988 return MUL;
3991 /// genMaddR - Generate madd instruction and combine mul and add using
3992 /// an extra virtual register
3993 /// Example - an ADD intermediate needs to be stored in a register:
3994 /// MUL I=A,B,0
3995 /// ADD R,I,Imm
3996 /// ==> ORR V, ZR, Imm
3997 /// ==> MADD R,A,B,V
3998 /// \param MF Containing MachineFunction
3999 /// \param MRI Register information
4000 /// \param TII Target information
4001 /// \param Root is the ADD instruction
4002 /// \param [out] InsInstrs is a vector of machine instructions and will
4003 /// contain the generated madd instruction
4004 /// \param IdxMulOpd is index of operand in Root that is the result of
4005 /// the MUL. In the example above IdxMulOpd is 1.
4006 /// \param MaddOpc the opcode fo the madd instruction
4007 /// \param VR is a virtual register that holds the value of an ADD operand
4008 /// (V in the example above).
4009 /// \param RC Register class of operands
4010 static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
4011 const TargetInstrInfo *TII, MachineInstr &Root,
4012 SmallVectorImpl<MachineInstr *> &InsInstrs,
4013 unsigned IdxMulOpd, unsigned MaddOpc, unsigned VR,
4014 const TargetRegisterClass *RC) {
4015 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
4017 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
4018 unsigned ResultReg = Root.getOperand(0).getReg();
4019 unsigned SrcReg0 = MUL->getOperand(1).getReg();
4020 bool Src0IsKill = MUL->getOperand(1).isKill();
4021 unsigned SrcReg1 = MUL->getOperand(2).getReg();
4022 bool Src1IsKill = MUL->getOperand(2).isKill();
4024 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
4025 MRI.constrainRegClass(ResultReg, RC);
4026 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
4027 MRI.constrainRegClass(SrcReg0, RC);
4028 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
4029 MRI.constrainRegClass(SrcReg1, RC);
4030 if (TargetRegisterInfo::isVirtualRegister(VR))
4031 MRI.constrainRegClass(VR, RC);
4033 MachineInstrBuilder MIB =
4034 BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
4035 .addReg(SrcReg0, getKillRegState(Src0IsKill))
4036 .addReg(SrcReg1, getKillRegState(Src1IsKill))
4037 .addReg(VR);
4038 // Insert the MADD
4039 InsInstrs.push_back(MIB);
4040 return MUL;
4043 /// When getMachineCombinerPatterns() finds potential patterns,
4044 /// this function generates the instructions that could replace the
4045 /// original code sequence
4046 void AArch64InstrInfo::genAlternativeCodeSequence(
4047 MachineInstr &Root, MachineCombinerPattern Pattern,
4048 SmallVectorImpl<MachineInstr *> &InsInstrs,
4049 SmallVectorImpl<MachineInstr *> &DelInstrs,
4050 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
4051 MachineBasicBlock &MBB = *Root.getParent();
4052 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4053 MachineFunction &MF = *MBB.getParent();
4054 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
4056 MachineInstr *MUL;
4057 const TargetRegisterClass *RC;
4058 unsigned Opc;
4059 switch (Pattern) {
4060 default:
4061 // Reassociate instructions.
4062 TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
4063 DelInstrs, InstrIdxForVirtReg);
4064 return;
4065 case MachineCombinerPattern::MULADDW_OP1:
4066 case MachineCombinerPattern::MULADDX_OP1:
4067 // MUL I=A,B,0
4068 // ADD R,I,C
4069 // ==> MADD R,A,B,C
4070 // --- Create(MADD);
4071 if (Pattern == MachineCombinerPattern::MULADDW_OP1) {
4072 Opc = AArch64::MADDWrrr;
4073 RC = &AArch64::GPR32RegClass;
4074 } else {
4075 Opc = AArch64::MADDXrrr;
4076 RC = &AArch64::GPR64RegClass;
4078 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
4079 break;
4080 case MachineCombinerPattern::MULADDW_OP2:
4081 case MachineCombinerPattern::MULADDX_OP2:
4082 // MUL I=A,B,0
4083 // ADD R,C,I
4084 // ==> MADD R,A,B,C
4085 // --- Create(MADD);
4086 if (Pattern == MachineCombinerPattern::MULADDW_OP2) {
4087 Opc = AArch64::MADDWrrr;
4088 RC = &AArch64::GPR32RegClass;
4089 } else {
4090 Opc = AArch64::MADDXrrr;
4091 RC = &AArch64::GPR64RegClass;
4093 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
4094 break;
4095 case MachineCombinerPattern::MULADDWI_OP1:
4096 case MachineCombinerPattern::MULADDXI_OP1: {
4097 // MUL I=A,B,0
4098 // ADD R,I,Imm
4099 // ==> ORR V, ZR, Imm
4100 // ==> MADD R,A,B,V
4101 // --- Create(MADD);
4102 const TargetRegisterClass *OrrRC;
4103 unsigned BitSize, OrrOpc, ZeroReg;
4104 if (Pattern == MachineCombinerPattern::MULADDWI_OP1) {
4105 OrrOpc = AArch64::ORRWri;
4106 OrrRC = &AArch64::GPR32spRegClass;
4107 BitSize = 32;
4108 ZeroReg = AArch64::WZR;
4109 Opc = AArch64::MADDWrrr;
4110 RC = &AArch64::GPR32RegClass;
4111 } else {
4112 OrrOpc = AArch64::ORRXri;
4113 OrrRC = &AArch64::GPR64spRegClass;
4114 BitSize = 64;
4115 ZeroReg = AArch64::XZR;
4116 Opc = AArch64::MADDXrrr;
4117 RC = &AArch64::GPR64RegClass;
4119 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
4120 uint64_t Imm = Root.getOperand(2).getImm();
4122 if (Root.getOperand(3).isImm()) {
4123 unsigned Val = Root.getOperand(3).getImm();
4124 Imm = Imm << Val;
4126 uint64_t UImm = SignExtend64(Imm, BitSize);
4127 uint64_t Encoding;
4128 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
4129 MachineInstrBuilder MIB1 =
4130 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
4131 .addReg(ZeroReg)
4132 .addImm(Encoding);
4133 InsInstrs.push_back(MIB1);
4134 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
4135 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
4137 break;
4139 case MachineCombinerPattern::MULSUBW_OP1:
4140 case MachineCombinerPattern::MULSUBX_OP1: {
4141 // MUL I=A,B,0
4142 // SUB R,I, C
4143 // ==> SUB V, 0, C
4144 // ==> MADD R,A,B,V // = -C + A*B
4145 // --- Create(MADD);
4146 const TargetRegisterClass *SubRC;
4147 unsigned SubOpc, ZeroReg;
4148 if (Pattern == MachineCombinerPattern::MULSUBW_OP1) {
4149 SubOpc = AArch64::SUBWrr;
4150 SubRC = &AArch64::GPR32spRegClass;
4151 ZeroReg = AArch64::WZR;
4152 Opc = AArch64::MADDWrrr;
4153 RC = &AArch64::GPR32RegClass;
4154 } else {
4155 SubOpc = AArch64::SUBXrr;
4156 SubRC = &AArch64::GPR64spRegClass;
4157 ZeroReg = AArch64::XZR;
4158 Opc = AArch64::MADDXrrr;
4159 RC = &AArch64::GPR64RegClass;
4161 unsigned NewVR = MRI.createVirtualRegister(SubRC);
4162 // SUB NewVR, 0, C
4163 MachineInstrBuilder MIB1 =
4164 BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR)
4165 .addReg(ZeroReg)
4166 .add(Root.getOperand(2));
4167 InsInstrs.push_back(MIB1);
4168 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
4169 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
4170 break;
4172 case MachineCombinerPattern::MULSUBW_OP2:
4173 case MachineCombinerPattern::MULSUBX_OP2:
4174 // MUL I=A,B,0
4175 // SUB R,C,I
4176 // ==> MSUB R,A,B,C (computes C - A*B)
4177 // --- Create(MSUB);
4178 if (Pattern == MachineCombinerPattern::MULSUBW_OP2) {
4179 Opc = AArch64::MSUBWrrr;
4180 RC = &AArch64::GPR32RegClass;
4181 } else {
4182 Opc = AArch64::MSUBXrrr;
4183 RC = &AArch64::GPR64RegClass;
4185 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
4186 break;
4187 case MachineCombinerPattern::MULSUBWI_OP1:
4188 case MachineCombinerPattern::MULSUBXI_OP1: {
4189 // MUL I=A,B,0
4190 // SUB R,I, Imm
4191 // ==> ORR V, ZR, -Imm
4192 // ==> MADD R,A,B,V // = -Imm + A*B
4193 // --- Create(MADD);
4194 const TargetRegisterClass *OrrRC;
4195 unsigned BitSize, OrrOpc, ZeroReg;
4196 if (Pattern == MachineCombinerPattern::MULSUBWI_OP1) {
4197 OrrOpc = AArch64::ORRWri;
4198 OrrRC = &AArch64::GPR32spRegClass;
4199 BitSize = 32;
4200 ZeroReg = AArch64::WZR;
4201 Opc = AArch64::MADDWrrr;
4202 RC = &AArch64::GPR32RegClass;
4203 } else {
4204 OrrOpc = AArch64::ORRXri;
4205 OrrRC = &AArch64::GPR64spRegClass;
4206 BitSize = 64;
4207 ZeroReg = AArch64::XZR;
4208 Opc = AArch64::MADDXrrr;
4209 RC = &AArch64::GPR64RegClass;
4211 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
4212 uint64_t Imm = Root.getOperand(2).getImm();
4213 if (Root.getOperand(3).isImm()) {
4214 unsigned Val = Root.getOperand(3).getImm();
4215 Imm = Imm << Val;
4217 uint64_t UImm = SignExtend64(-Imm, BitSize);
4218 uint64_t Encoding;
4219 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
4220 MachineInstrBuilder MIB1 =
4221 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
4222 .addReg(ZeroReg)
4223 .addImm(Encoding);
4224 InsInstrs.push_back(MIB1);
4225 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
4226 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
4228 break;
4230 // Floating Point Support
4231 case MachineCombinerPattern::FMULADDS_OP1:
4232 case MachineCombinerPattern::FMULADDD_OP1:
4233 // MUL I=A,B,0
4234 // ADD R,I,C
4235 // ==> MADD R,A,B,C
4236 // --- Create(MADD);
4237 if (Pattern == MachineCombinerPattern::FMULADDS_OP1) {
4238 Opc = AArch64::FMADDSrrr;
4239 RC = &AArch64::FPR32RegClass;
4240 } else {
4241 Opc = AArch64::FMADDDrrr;
4242 RC = &AArch64::FPR64RegClass;
4244 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
4245 break;
4246 case MachineCombinerPattern::FMULADDS_OP2:
4247 case MachineCombinerPattern::FMULADDD_OP2:
4248 // FMUL I=A,B,0
4249 // FADD R,C,I
4250 // ==> FMADD R,A,B,C
4251 // --- Create(FMADD);
4252 if (Pattern == MachineCombinerPattern::FMULADDS_OP2) {
4253 Opc = AArch64::FMADDSrrr;
4254 RC = &AArch64::FPR32RegClass;
4255 } else {
4256 Opc = AArch64::FMADDDrrr;
4257 RC = &AArch64::FPR64RegClass;
4259 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
4260 break;
4262 case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
4263 Opc = AArch64::FMLAv1i32_indexed;
4264 RC = &AArch64::FPR32RegClass;
4265 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4266 FMAInstKind::Indexed);
4267 break;
4268 case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
4269 Opc = AArch64::FMLAv1i32_indexed;
4270 RC = &AArch64::FPR32RegClass;
4271 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4272 FMAInstKind::Indexed);
4273 break;
4275 case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
4276 Opc = AArch64::FMLAv1i64_indexed;
4277 RC = &AArch64::FPR64RegClass;
4278 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4279 FMAInstKind::Indexed);
4280 break;
4281 case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
4282 Opc = AArch64::FMLAv1i64_indexed;
4283 RC = &AArch64::FPR64RegClass;
4284 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4285 FMAInstKind::Indexed);
4286 break;
4288 case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
4289 case MachineCombinerPattern::FMLAv2f32_OP1:
4290 RC = &AArch64::FPR64RegClass;
4291 if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP1) {
4292 Opc = AArch64::FMLAv2i32_indexed;
4293 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4294 FMAInstKind::Indexed);
4295 } else {
4296 Opc = AArch64::FMLAv2f32;
4297 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4298 FMAInstKind::Accumulator);
4300 break;
4301 case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
4302 case MachineCombinerPattern::FMLAv2f32_OP2:
4303 RC = &AArch64::FPR64RegClass;
4304 if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP2) {
4305 Opc = AArch64::FMLAv2i32_indexed;
4306 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4307 FMAInstKind::Indexed);
4308 } else {
4309 Opc = AArch64::FMLAv2f32;
4310 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4311 FMAInstKind::Accumulator);
4313 break;
4315 case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
4316 case MachineCombinerPattern::FMLAv2f64_OP1:
4317 RC = &AArch64::FPR128RegClass;
4318 if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP1) {
4319 Opc = AArch64::FMLAv2i64_indexed;
4320 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4321 FMAInstKind::Indexed);
4322 } else {
4323 Opc = AArch64::FMLAv2f64;
4324 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4325 FMAInstKind::Accumulator);
4327 break;
4328 case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
4329 case MachineCombinerPattern::FMLAv2f64_OP2:
4330 RC = &AArch64::FPR128RegClass;
4331 if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP2) {
4332 Opc = AArch64::FMLAv2i64_indexed;
4333 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4334 FMAInstKind::Indexed);
4335 } else {
4336 Opc = AArch64::FMLAv2f64;
4337 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4338 FMAInstKind::Accumulator);
4340 break;
4342 case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
4343 case MachineCombinerPattern::FMLAv4f32_OP1:
4344 RC = &AArch64::FPR128RegClass;
4345 if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP1) {
4346 Opc = AArch64::FMLAv4i32_indexed;
4347 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4348 FMAInstKind::Indexed);
4349 } else {
4350 Opc = AArch64::FMLAv4f32;
4351 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4352 FMAInstKind::Accumulator);
4354 break;
4356 case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
4357 case MachineCombinerPattern::FMLAv4f32_OP2:
4358 RC = &AArch64::FPR128RegClass;
4359 if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP2) {
4360 Opc = AArch64::FMLAv4i32_indexed;
4361 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4362 FMAInstKind::Indexed);
4363 } else {
4364 Opc = AArch64::FMLAv4f32;
4365 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4366 FMAInstKind::Accumulator);
4368 break;
4370 case MachineCombinerPattern::FMULSUBS_OP1:
4371 case MachineCombinerPattern::FMULSUBD_OP1: {
4372 // FMUL I=A,B,0
4373 // FSUB R,I,C
4374 // ==> FNMSUB R,A,B,C // = -C + A*B
4375 // --- Create(FNMSUB);
4376 if (Pattern == MachineCombinerPattern::FMULSUBS_OP1) {
4377 Opc = AArch64::FNMSUBSrrr;
4378 RC = &AArch64::FPR32RegClass;
4379 } else {
4380 Opc = AArch64::FNMSUBDrrr;
4381 RC = &AArch64::FPR64RegClass;
4383 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
4384 break;
4387 case MachineCombinerPattern::FNMULSUBS_OP1:
4388 case MachineCombinerPattern::FNMULSUBD_OP1: {
4389 // FNMUL I=A,B,0
4390 // FSUB R,I,C
4391 // ==> FNMADD R,A,B,C // = -A*B - C
4392 // --- Create(FNMADD);
4393 if (Pattern == MachineCombinerPattern::FNMULSUBS_OP1) {
4394 Opc = AArch64::FNMADDSrrr;
4395 RC = &AArch64::FPR32RegClass;
4396 } else {
4397 Opc = AArch64::FNMADDDrrr;
4398 RC = &AArch64::FPR64RegClass;
4400 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
4401 break;
4404 case MachineCombinerPattern::FMULSUBS_OP2:
4405 case MachineCombinerPattern::FMULSUBD_OP2: {
4406 // FMUL I=A,B,0
4407 // FSUB R,C,I
4408 // ==> FMSUB R,A,B,C (computes C - A*B)
4409 // --- Create(FMSUB);
4410 if (Pattern == MachineCombinerPattern::FMULSUBS_OP2) {
4411 Opc = AArch64::FMSUBSrrr;
4412 RC = &AArch64::FPR32RegClass;
4413 } else {
4414 Opc = AArch64::FMSUBDrrr;
4415 RC = &AArch64::FPR64RegClass;
4417 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
4418 break;
4421 case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
4422 Opc = AArch64::FMLSv1i32_indexed;
4423 RC = &AArch64::FPR32RegClass;
4424 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4425 FMAInstKind::Indexed);
4426 break;
4428 case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
4429 Opc = AArch64::FMLSv1i64_indexed;
4430 RC = &AArch64::FPR64RegClass;
4431 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4432 FMAInstKind::Indexed);
4433 break;
4435 case MachineCombinerPattern::FMLSv2f32_OP2:
4436 case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
4437 RC = &AArch64::FPR64RegClass;
4438 if (Pattern == MachineCombinerPattern::FMLSv2i32_indexed_OP2) {
4439 Opc = AArch64::FMLSv2i32_indexed;
4440 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4441 FMAInstKind::Indexed);
4442 } else {
4443 Opc = AArch64::FMLSv2f32;
4444 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4445 FMAInstKind::Accumulator);
4447 break;
4449 case MachineCombinerPattern::FMLSv2f64_OP2:
4450 case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
4451 RC = &AArch64::FPR128RegClass;
4452 if (Pattern == MachineCombinerPattern::FMLSv2i64_indexed_OP2) {
4453 Opc = AArch64::FMLSv2i64_indexed;
4454 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4455 FMAInstKind::Indexed);
4456 } else {
4457 Opc = AArch64::FMLSv2f64;
4458 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4459 FMAInstKind::Accumulator);
4461 break;
4463 case MachineCombinerPattern::FMLSv4f32_OP2:
4464 case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
4465 RC = &AArch64::FPR128RegClass;
4466 if (Pattern == MachineCombinerPattern::FMLSv4i32_indexed_OP2) {
4467 Opc = AArch64::FMLSv4i32_indexed;
4468 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4469 FMAInstKind::Indexed);
4470 } else {
4471 Opc = AArch64::FMLSv4f32;
4472 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4473 FMAInstKind::Accumulator);
4475 break;
4476 case MachineCombinerPattern::FMLSv2f32_OP1:
4477 case MachineCombinerPattern::FMLSv2i32_indexed_OP1: {
4478 RC = &AArch64::FPR64RegClass;
4479 unsigned NewVR = MRI.createVirtualRegister(RC);
4480 MachineInstrBuilder MIB1 =
4481 BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv2f32), NewVR)
4482 .add(Root.getOperand(2));
4483 InsInstrs.push_back(MIB1);
4484 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
4485 if (Pattern == MachineCombinerPattern::FMLSv2i32_indexed_OP1) {
4486 Opc = AArch64::FMLAv2i32_indexed;
4487 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4488 FMAInstKind::Indexed, &NewVR);
4489 } else {
4490 Opc = AArch64::FMLAv2f32;
4491 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4492 FMAInstKind::Accumulator, &NewVR);
4494 break;
4496 case MachineCombinerPattern::FMLSv4f32_OP1:
4497 case MachineCombinerPattern::FMLSv4i32_indexed_OP1: {
4498 RC = &AArch64::FPR128RegClass;
4499 unsigned NewVR = MRI.createVirtualRegister(RC);
4500 MachineInstrBuilder MIB1 =
4501 BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv4f32), NewVR)
4502 .add(Root.getOperand(2));
4503 InsInstrs.push_back(MIB1);
4504 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
4505 if (Pattern == MachineCombinerPattern::FMLSv4i32_indexed_OP1) {
4506 Opc = AArch64::FMLAv4i32_indexed;
4507 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4508 FMAInstKind::Indexed, &NewVR);
4509 } else {
4510 Opc = AArch64::FMLAv4f32;
4511 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4512 FMAInstKind::Accumulator, &NewVR);
4514 break;
4516 case MachineCombinerPattern::FMLSv2f64_OP1:
4517 case MachineCombinerPattern::FMLSv2i64_indexed_OP1: {
4518 RC = &AArch64::FPR128RegClass;
4519 unsigned NewVR = MRI.createVirtualRegister(RC);
4520 MachineInstrBuilder MIB1 =
4521 BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv2f64), NewVR)
4522 .add(Root.getOperand(2));
4523 InsInstrs.push_back(MIB1);
4524 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
4525 if (Pattern == MachineCombinerPattern::FMLSv2i64_indexed_OP1) {
4526 Opc = AArch64::FMLAv2i64_indexed;
4527 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4528 FMAInstKind::Indexed, &NewVR);
4529 } else {
4530 Opc = AArch64::FMLAv2f64;
4531 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4532 FMAInstKind::Accumulator, &NewVR);
4534 break;
4536 } // end switch (Pattern)
4537 // Record MUL and ADD/SUB for deletion
4538 DelInstrs.push_back(MUL);
4539 DelInstrs.push_back(&Root);
4542 /// Replace csincr-branch sequence by simple conditional branch
4544 /// Examples:
4545 /// 1. \code
4546 /// csinc w9, wzr, wzr, <condition code>
4547 /// tbnz w9, #0, 0x44
4548 /// \endcode
4549 /// to
4550 /// \code
4551 /// b.<inverted condition code>
4552 /// \endcode
4554 /// 2. \code
4555 /// csinc w9, wzr, wzr, <condition code>
4556 /// tbz w9, #0, 0x44
4557 /// \endcode
4558 /// to
4559 /// \code
4560 /// b.<condition code>
4561 /// \endcode
4563 /// Replace compare and branch sequence by TBZ/TBNZ instruction when the
4564 /// compare's constant operand is power of 2.
4566 /// Examples:
4567 /// \code
4568 /// and w8, w8, #0x400
4569 /// cbnz w8, L1
4570 /// \endcode
4571 /// to
4572 /// \code
4573 /// tbnz w8, #10, L1
4574 /// \endcode
4576 /// \param MI Conditional Branch
4577 /// \return True when the simple conditional branch is generated
4579 bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
4580 bool IsNegativeBranch = false;
4581 bool IsTestAndBranch = false;
4582 unsigned TargetBBInMI = 0;
4583 switch (MI.getOpcode()) {
4584 default:
4585 llvm_unreachable("Unknown branch instruction?");
4586 case AArch64::Bcc:
4587 return false;
4588 case AArch64::CBZW:
4589 case AArch64::CBZX:
4590 TargetBBInMI = 1;
4591 break;
4592 case AArch64::CBNZW:
4593 case AArch64::CBNZX:
4594 TargetBBInMI = 1;
4595 IsNegativeBranch = true;
4596 break;
4597 case AArch64::TBZW:
4598 case AArch64::TBZX:
4599 TargetBBInMI = 2;
4600 IsTestAndBranch = true;
4601 break;
4602 case AArch64::TBNZW:
4603 case AArch64::TBNZX:
4604 TargetBBInMI = 2;
4605 IsNegativeBranch = true;
4606 IsTestAndBranch = true;
4607 break;
4609 // So we increment a zero register and test for bits other
4610 // than bit 0? Conservatively bail out in case the verifier
4611 // missed this case.
4612 if (IsTestAndBranch && MI.getOperand(1).getImm())
4613 return false;
4615 // Find Definition.
4616 assert(MI.getParent() && "Incomplete machine instruciton\n");
4617 MachineBasicBlock *MBB = MI.getParent();
4618 MachineFunction *MF = MBB->getParent();
4619 MachineRegisterInfo *MRI = &MF->getRegInfo();
4620 unsigned VReg = MI.getOperand(0).getReg();
4621 if (!TargetRegisterInfo::isVirtualRegister(VReg))
4622 return false;
4624 MachineInstr *DefMI = MRI->getVRegDef(VReg);
4626 // Look through COPY instructions to find definition.
4627 while (DefMI->isCopy()) {
4628 unsigned CopyVReg = DefMI->getOperand(1).getReg();
4629 if (!MRI->hasOneNonDBGUse(CopyVReg))
4630 return false;
4631 if (!MRI->hasOneDef(CopyVReg))
4632 return false;
4633 DefMI = MRI->getVRegDef(CopyVReg);
4636 switch (DefMI->getOpcode()) {
4637 default:
4638 return false;
4639 // Fold AND into a TBZ/TBNZ if constant operand is power of 2.
4640 case AArch64::ANDWri:
4641 case AArch64::ANDXri: {
4642 if (IsTestAndBranch)
4643 return false;
4644 if (DefMI->getParent() != MBB)
4645 return false;
4646 if (!MRI->hasOneNonDBGUse(VReg))
4647 return false;
4649 bool Is32Bit = (DefMI->getOpcode() == AArch64::ANDWri);
4650 uint64_t Mask = AArch64_AM::decodeLogicalImmediate(
4651 DefMI->getOperand(2).getImm(), Is32Bit ? 32 : 64);
4652 if (!isPowerOf2_64(Mask))
4653 return false;
4655 MachineOperand &MO = DefMI->getOperand(1);
4656 unsigned NewReg = MO.getReg();
4657 if (!TargetRegisterInfo::isVirtualRegister(NewReg))
4658 return false;
4660 assert(!MRI->def_empty(NewReg) && "Register must be defined.");
4662 MachineBasicBlock &RefToMBB = *MBB;
4663 MachineBasicBlock *TBB = MI.getOperand(1).getMBB();
4664 DebugLoc DL = MI.getDebugLoc();
4665 unsigned Imm = Log2_64(Mask);
4666 unsigned Opc = (Imm < 32)
4667 ? (IsNegativeBranch ? AArch64::TBNZW : AArch64::TBZW)
4668 : (IsNegativeBranch ? AArch64::TBNZX : AArch64::TBZX);
4669 MachineInstr *NewMI = BuildMI(RefToMBB, MI, DL, get(Opc))
4670 .addReg(NewReg)
4671 .addImm(Imm)
4672 .addMBB(TBB);
4673 // Register lives on to the CBZ now.
4674 MO.setIsKill(false);
4676 // For immediate smaller than 32, we need to use the 32-bit
4677 // variant (W) in all cases. Indeed the 64-bit variant does not
4678 // allow to encode them.
4679 // Therefore, if the input register is 64-bit, we need to take the
4680 // 32-bit sub-part.
4681 if (!Is32Bit && Imm < 32)
4682 NewMI->getOperand(0).setSubReg(AArch64::sub_32);
4683 MI.eraseFromParent();
4684 return true;
4686 // Look for CSINC
4687 case AArch64::CSINCWr:
4688 case AArch64::CSINCXr: {
4689 if (!(DefMI->getOperand(1).getReg() == AArch64::WZR &&
4690 DefMI->getOperand(2).getReg() == AArch64::WZR) &&
4691 !(DefMI->getOperand(1).getReg() == AArch64::XZR &&
4692 DefMI->getOperand(2).getReg() == AArch64::XZR))
4693 return false;
4695 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
4696 return false;
4698 AArch64CC::CondCode CC = (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
4699 // Convert only when the condition code is not modified between
4700 // the CSINC and the branch. The CC may be used by other
4701 // instructions in between.
4702 if (areCFlagsAccessedBetweenInstrs(DefMI, MI, &getRegisterInfo(), AK_Write))
4703 return false;
4704 MachineBasicBlock &RefToMBB = *MBB;
4705 MachineBasicBlock *TBB = MI.getOperand(TargetBBInMI).getMBB();
4706 DebugLoc DL = MI.getDebugLoc();
4707 if (IsNegativeBranch)
4708 CC = AArch64CC::getInvertedCondCode(CC);
4709 BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
4710 MI.eraseFromParent();
4711 return true;
4716 std::pair<unsigned, unsigned>
4717 AArch64InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
4718 const unsigned Mask = AArch64II::MO_FRAGMENT;
4719 return std::make_pair(TF & Mask, TF & ~Mask);
4722 ArrayRef<std::pair<unsigned, const char *>>
4723 AArch64InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
4724 using namespace AArch64II;
4726 static const std::pair<unsigned, const char *> TargetFlags[] = {
4727 {MO_PAGE, "aarch64-page"}, {MO_PAGEOFF, "aarch64-pageoff"},
4728 {MO_G3, "aarch64-g3"}, {MO_G2, "aarch64-g2"},
4729 {MO_G1, "aarch64-g1"}, {MO_G0, "aarch64-g0"},
4730 {MO_HI12, "aarch64-hi12"}};
4731 return makeArrayRef(TargetFlags);
4734 ArrayRef<std::pair<unsigned, const char *>>
4735 AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
4736 using namespace AArch64II;
4738 static const std::pair<unsigned, const char *> TargetFlags[] = {
4739 {MO_COFFSTUB, "aarch64-coffstub"},
4740 {MO_GOT, "aarch64-got"}, {MO_NC, "aarch64-nc"},
4741 {MO_S, "aarch64-s"}, {MO_TLS, "aarch64-tls"},
4742 {MO_DLLIMPORT, "aarch64-dllimport"}};
4743 return makeArrayRef(TargetFlags);
4746 ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
4747 AArch64InstrInfo::getSerializableMachineMemOperandTargetFlags() const {
4748 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
4749 {{MOSuppressPair, "aarch64-suppress-pair"},
4750 {MOStridedAccess, "aarch64-strided-access"}};
4751 return makeArrayRef(TargetFlags);
4754 /// Constants defining how certain sequences should be outlined.
4755 /// This encompasses how an outlined function should be called, and what kind of
4756 /// frame should be emitted for that outlined function.
4758 /// \p MachineOutlinerDefault implies that the function should be called with
4759 /// a save and restore of LR to the stack.
4761 /// That is,
4763 /// I1 Save LR OUTLINED_FUNCTION:
4764 /// I2 --> BL OUTLINED_FUNCTION I1
4765 /// I3 Restore LR I2
4766 /// I3
4767 /// RET
4769 /// * Call construction overhead: 3 (save + BL + restore)
4770 /// * Frame construction overhead: 1 (ret)
4771 /// * Requires stack fixups? Yes
4773 /// \p MachineOutlinerTailCall implies that the function is being created from
4774 /// a sequence of instructions ending in a return.
4776 /// That is,
4778 /// I1 OUTLINED_FUNCTION:
4779 /// I2 --> B OUTLINED_FUNCTION I1
4780 /// RET I2
4781 /// RET
4783 /// * Call construction overhead: 1 (B)
4784 /// * Frame construction overhead: 0 (Return included in sequence)
4785 /// * Requires stack fixups? No
4787 /// \p MachineOutlinerNoLRSave implies that the function should be called using
4788 /// a BL instruction, but doesn't require LR to be saved and restored. This
4789 /// happens when LR is known to be dead.
4791 /// That is,
4793 /// I1 OUTLINED_FUNCTION:
4794 /// I2 --> BL OUTLINED_FUNCTION I1
4795 /// I3 I2
4796 /// I3
4797 /// RET
4799 /// * Call construction overhead: 1 (BL)
4800 /// * Frame construction overhead: 1 (RET)
4801 /// * Requires stack fixups? No
4803 /// \p MachineOutlinerThunk implies that the function is being created from
4804 /// a sequence of instructions ending in a call. The outlined function is
4805 /// called with a BL instruction, and the outlined function tail-calls the
4806 /// original call destination.
4808 /// That is,
4810 /// I1 OUTLINED_FUNCTION:
4811 /// I2 --> BL OUTLINED_FUNCTION I1
4812 /// BL f I2
4813 /// B f
4814 /// * Call construction overhead: 1 (BL)
4815 /// * Frame construction overhead: 0
4816 /// * Requires stack fixups? No
4818 /// \p MachineOutlinerRegSave implies that the function should be called with a
4819 /// save and restore of LR to an available register. This allows us to avoid
4820 /// stack fixups. Note that this outlining variant is compatible with the
4821 /// NoLRSave case.
4823 /// That is,
4825 /// I1 Save LR OUTLINED_FUNCTION:
4826 /// I2 --> BL OUTLINED_FUNCTION I1
4827 /// I3 Restore LR I2
4828 /// I3
4829 /// RET
4831 /// * Call construction overhead: 3 (save + BL + restore)
4832 /// * Frame construction overhead: 1 (ret)
4833 /// * Requires stack fixups? No
4834 enum MachineOutlinerClass {
4835 MachineOutlinerDefault, /// Emit a save, restore, call, and return.
4836 MachineOutlinerTailCall, /// Only emit a branch.
4837 MachineOutlinerNoLRSave, /// Emit a call and return.
4838 MachineOutlinerThunk, /// Emit a call and tail-call.
4839 MachineOutlinerRegSave /// Same as default, but save to a register.
4842 enum MachineOutlinerMBBFlags {
4843 LRUnavailableSomewhere = 0x2,
4844 HasCalls = 0x4,
4845 UnsafeRegsDead = 0x8
4848 unsigned
4849 AArch64InstrInfo::findRegisterToSaveLRTo(const outliner::Candidate &C) const {
4850 assert(C.LRUWasSet && "LRU wasn't set?");
4851 MachineFunction *MF = C.getMF();
4852 const AArch64RegisterInfo *ARI = static_cast<const AArch64RegisterInfo *>(
4853 MF->getSubtarget().getRegisterInfo());
4855 // Check if there is an available register across the sequence that we can
4856 // use.
4857 for (unsigned Reg : AArch64::GPR64RegClass) {
4858 if (!ARI->isReservedReg(*MF, Reg) &&
4859 Reg != AArch64::LR && // LR is not reserved, but don't use it.
4860 Reg != AArch64::X16 && // X16 is not guaranteed to be preserved.
4861 Reg != AArch64::X17 && // Ditto for X17.
4862 C.LRU.available(Reg) && C.UsedInSequence.available(Reg))
4863 return Reg;
4866 // No suitable register. Return 0.
4867 return 0u;
4870 outliner::OutlinedFunction
4871 AArch64InstrInfo::getOutliningCandidateInfo(
4872 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
4873 outliner::Candidate &FirstCand = RepeatedSequenceLocs[0];
4874 unsigned SequenceSize =
4875 std::accumulate(FirstCand.front(), std::next(FirstCand.back()), 0,
4876 [this](unsigned Sum, const MachineInstr &MI) {
4877 return Sum + getInstSizeInBytes(MI);
4880 // Properties about candidate MBBs that hold for all of them.
4881 unsigned FlagsSetInAll = 0xF;
4883 // Compute liveness information for each candidate, and set FlagsSetInAll.
4884 const TargetRegisterInfo &TRI = getRegisterInfo();
4885 std::for_each(RepeatedSequenceLocs.begin(), RepeatedSequenceLocs.end(),
4886 [&FlagsSetInAll](outliner::Candidate &C) {
4887 FlagsSetInAll &= C.Flags;
4890 // According to the AArch64 Procedure Call Standard, the following are
4891 // undefined on entry/exit from a function call:
4893 // * Registers x16, x17, (and thus w16, w17)
4894 // * Condition codes (and thus the NZCV register)
4896 // Because if this, we can't outline any sequence of instructions where
4897 // one
4898 // of these registers is live into/across it. Thus, we need to delete
4899 // those
4900 // candidates.
4901 auto CantGuaranteeValueAcrossCall = [&TRI](outliner::Candidate &C) {
4902 // If the unsafe registers in this block are all dead, then we don't need
4903 // to compute liveness here.
4904 if (C.Flags & UnsafeRegsDead)
4905 return false;
4906 C.initLRU(TRI);
4907 LiveRegUnits LRU = C.LRU;
4908 return (!LRU.available(AArch64::W16) || !LRU.available(AArch64::W17) ||
4909 !LRU.available(AArch64::NZCV));
4912 // Are there any candidates where those registers are live?
4913 if (!(FlagsSetInAll & UnsafeRegsDead)) {
4914 // Erase every candidate that violates the restrictions above. (It could be
4915 // true that we have viable candidates, so it's not worth bailing out in
4916 // the case that, say, 1 out of 20 candidates violate the restructions.)
4917 RepeatedSequenceLocs.erase(std::remove_if(RepeatedSequenceLocs.begin(),
4918 RepeatedSequenceLocs.end(),
4919 CantGuaranteeValueAcrossCall),
4920 RepeatedSequenceLocs.end());
4922 // If the sequence doesn't have enough candidates left, then we're done.
4923 if (RepeatedSequenceLocs.size() < 2)
4924 return outliner::OutlinedFunction();
4927 // At this point, we have only "safe" candidates to outline. Figure out
4928 // frame + call instruction information.
4930 unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back()->getOpcode();
4932 // Helper lambda which sets call information for every candidate.
4933 auto SetCandidateCallInfo =
4934 [&RepeatedSequenceLocs](unsigned CallID, unsigned NumBytesForCall) {
4935 for (outliner::Candidate &C : RepeatedSequenceLocs)
4936 C.setCallInfo(CallID, NumBytesForCall);
4939 unsigned FrameID = MachineOutlinerDefault;
4940 unsigned NumBytesToCreateFrame = 4;
4942 bool HasBTI = any_of(RepeatedSequenceLocs, [](outliner::Candidate &C) {
4943 return C.getMF()->getFunction().hasFnAttribute("branch-target-enforcement");
4946 // Returns true if an instructions is safe to fix up, false otherwise.
4947 auto IsSafeToFixup = [this, &TRI](MachineInstr &MI) {
4948 if (MI.isCall())
4949 return true;
4951 if (!MI.modifiesRegister(AArch64::SP, &TRI) &&
4952 !MI.readsRegister(AArch64::SP, &TRI))
4953 return true;
4955 // Any modification of SP will break our code to save/restore LR.
4956 // FIXME: We could handle some instructions which add a constant
4957 // offset to SP, with a bit more work.
4958 if (MI.modifiesRegister(AArch64::SP, &TRI))
4959 return false;
4961 // At this point, we have a stack instruction that we might need to
4962 // fix up. We'll handle it if it's a load or store.
4963 if (MI.mayLoadOrStore()) {
4964 const MachineOperand *Base; // Filled with the base operand of MI.
4965 int64_t Offset; // Filled with the offset of MI.
4967 // Does it allow us to offset the base operand and is the base the
4968 // register SP?
4969 if (!getMemOperandWithOffset(MI, Base, Offset, &TRI) || !Base->isReg() ||
4970 Base->getReg() != AArch64::SP)
4971 return false;
4973 // Find the minimum/maximum offset for this instruction and check
4974 // if fixing it up would be in range.
4975 int64_t MinOffset,
4976 MaxOffset; // Unscaled offsets for the instruction.
4977 unsigned Scale; // The scale to multiply the offsets by.
4978 unsigned DummyWidth;
4979 getMemOpInfo(MI.getOpcode(), Scale, DummyWidth, MinOffset, MaxOffset);
4981 Offset += 16; // Update the offset to what it would be if we outlined.
4982 if (Offset < MinOffset * Scale || Offset > MaxOffset * Scale)
4983 return false;
4985 // It's in range, so we can outline it.
4986 return true;
4989 // FIXME: Add handling for instructions like "add x0, sp, #8".
4991 // We can't fix it up, so don't outline it.
4992 return false;
4995 // True if it's possible to fix up each stack instruction in this sequence.
4996 // Important for frames/call variants that modify the stack.
4997 bool AllStackInstrsSafe = std::all_of(
4998 FirstCand.front(), std::next(FirstCand.back()), IsSafeToFixup);
5000 // If the last instruction in any candidate is a terminator, then we should
5001 // tail call all of the candidates.
5002 if (RepeatedSequenceLocs[0].back()->isTerminator()) {
5003 FrameID = MachineOutlinerTailCall;
5004 NumBytesToCreateFrame = 0;
5005 SetCandidateCallInfo(MachineOutlinerTailCall, 4);
5008 else if (LastInstrOpcode == AArch64::BL ||
5009 (LastInstrOpcode == AArch64::BLR && !HasBTI)) {
5010 // FIXME: Do we need to check if the code after this uses the value of LR?
5011 FrameID = MachineOutlinerThunk;
5012 NumBytesToCreateFrame = 0;
5013 SetCandidateCallInfo(MachineOutlinerThunk, 4);
5016 else {
5017 // We need to decide how to emit calls + frames. We can always emit the same
5018 // frame if we don't need to save to the stack. If we have to save to the
5019 // stack, then we need a different frame.
5020 unsigned NumBytesNoStackCalls = 0;
5021 std::vector<outliner::Candidate> CandidatesWithoutStackFixups;
5023 for (outliner::Candidate &C : RepeatedSequenceLocs) {
5024 C.initLRU(TRI);
5026 // Is LR available? If so, we don't need a save.
5027 if (C.LRU.available(AArch64::LR)) {
5028 NumBytesNoStackCalls += 4;
5029 C.setCallInfo(MachineOutlinerNoLRSave, 4);
5030 CandidatesWithoutStackFixups.push_back(C);
5033 // Is an unused register available? If so, we won't modify the stack, so
5034 // we can outline with the same frame type as those that don't save LR.
5035 else if (findRegisterToSaveLRTo(C)) {
5036 NumBytesNoStackCalls += 12;
5037 C.setCallInfo(MachineOutlinerRegSave, 12);
5038 CandidatesWithoutStackFixups.push_back(C);
5041 // Is SP used in the sequence at all? If not, we don't have to modify
5042 // the stack, so we are guaranteed to get the same frame.
5043 else if (C.UsedInSequence.available(AArch64::SP)) {
5044 NumBytesNoStackCalls += 12;
5045 C.setCallInfo(MachineOutlinerDefault, 12);
5046 CandidatesWithoutStackFixups.push_back(C);
5049 // If we outline this, we need to modify the stack. Pretend we don't
5050 // outline this by saving all of its bytes.
5051 else {
5052 NumBytesNoStackCalls += SequenceSize;
5056 // If there are no places where we have to save LR, then note that we
5057 // don't have to update the stack. Otherwise, give every candidate the
5058 // default call type, as long as it's safe to do so.
5059 if (!AllStackInstrsSafe ||
5060 NumBytesNoStackCalls <= RepeatedSequenceLocs.size() * 12) {
5061 RepeatedSequenceLocs = CandidatesWithoutStackFixups;
5062 FrameID = MachineOutlinerNoLRSave;
5063 } else {
5064 SetCandidateCallInfo(MachineOutlinerDefault, 12);
5067 // If we dropped all of the candidates, bail out here.
5068 if (RepeatedSequenceLocs.size() < 2) {
5069 RepeatedSequenceLocs.clear();
5070 return outliner::OutlinedFunction();
5074 // Does every candidate's MBB contain a call? If so, then we might have a call
5075 // in the range.
5076 if (FlagsSetInAll & MachineOutlinerMBBFlags::HasCalls) {
5077 // Check if the range contains a call. These require a save + restore of the
5078 // link register.
5079 bool ModStackToSaveLR = false;
5080 if (std::any_of(FirstCand.front(), FirstCand.back(),
5081 [](const MachineInstr &MI) { return MI.isCall(); }))
5082 ModStackToSaveLR = true;
5084 // Handle the last instruction separately. If this is a tail call, then the
5085 // last instruction is a call. We don't want to save + restore in this case.
5086 // However, it could be possible that the last instruction is a call without
5087 // it being valid to tail call this sequence. We should consider this as
5088 // well.
5089 else if (FrameID != MachineOutlinerThunk &&
5090 FrameID != MachineOutlinerTailCall && FirstCand.back()->isCall())
5091 ModStackToSaveLR = true;
5093 if (ModStackToSaveLR) {
5094 // We can't fix up the stack. Bail out.
5095 if (!AllStackInstrsSafe) {
5096 RepeatedSequenceLocs.clear();
5097 return outliner::OutlinedFunction();
5100 // Save + restore LR.
5101 NumBytesToCreateFrame += 8;
5105 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
5106 NumBytesToCreateFrame, FrameID);
5109 bool AArch64InstrInfo::isFunctionSafeToOutlineFrom(
5110 MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
5111 const Function &F = MF.getFunction();
5113 // Can F be deduplicated by the linker? If it can, don't outline from it.
5114 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
5115 return false;
5117 // Don't outline from functions with section markings; the program could
5118 // expect that all the code is in the named section.
5119 // FIXME: Allow outlining from multiple functions with the same section
5120 // marking.
5121 if (F.hasSection())
5122 return false;
5124 // Outlining from functions with redzones is unsafe since the outliner may
5125 // modify the stack. Check if hasRedZone is true or unknown; if yes, don't
5126 // outline from it.
5127 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
5128 if (!AFI || AFI->hasRedZone().getValueOr(true))
5129 return false;
5131 // It's safe to outline from MF.
5132 return true;
5135 bool AArch64InstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
5136 unsigned &Flags) const {
5137 // Check if LR is available through all of the MBB. If it's not, then set
5138 // a flag.
5139 assert(MBB.getParent()->getRegInfo().tracksLiveness() &&
5140 "Suitable Machine Function for outlining must track liveness");
5141 LiveRegUnits LRU(getRegisterInfo());
5143 std::for_each(MBB.rbegin(), MBB.rend(),
5144 [&LRU](MachineInstr &MI) { LRU.accumulate(MI); });
5146 // Check if each of the unsafe registers are available...
5147 bool W16AvailableInBlock = LRU.available(AArch64::W16);
5148 bool W17AvailableInBlock = LRU.available(AArch64::W17);
5149 bool NZCVAvailableInBlock = LRU.available(AArch64::NZCV);
5151 // If all of these are dead (and not live out), we know we don't have to check
5152 // them later.
5153 if (W16AvailableInBlock && W17AvailableInBlock && NZCVAvailableInBlock)
5154 Flags |= MachineOutlinerMBBFlags::UnsafeRegsDead;
5156 // Now, add the live outs to the set.
5157 LRU.addLiveOuts(MBB);
5159 // If any of these registers is available in the MBB, but also a live out of
5160 // the block, then we know outlining is unsafe.
5161 if (W16AvailableInBlock && !LRU.available(AArch64::W16))
5162 return false;
5163 if (W17AvailableInBlock && !LRU.available(AArch64::W17))
5164 return false;
5165 if (NZCVAvailableInBlock && !LRU.available(AArch64::NZCV))
5166 return false;
5168 // Check if there's a call inside this MachineBasicBlock. If there is, then
5169 // set a flag.
5170 if (any_of(MBB, [](MachineInstr &MI) { return MI.isCall(); }))
5171 Flags |= MachineOutlinerMBBFlags::HasCalls;
5173 MachineFunction *MF = MBB.getParent();
5175 // In the event that we outline, we may have to save LR. If there is an
5176 // available register in the MBB, then we'll always save LR there. Check if
5177 // this is true.
5178 bool CanSaveLR = false;
5179 const AArch64RegisterInfo *ARI = static_cast<const AArch64RegisterInfo *>(
5180 MF->getSubtarget().getRegisterInfo());
5182 // Check if there is an available register across the sequence that we can
5183 // use.
5184 for (unsigned Reg : AArch64::GPR64RegClass) {
5185 if (!ARI->isReservedReg(*MF, Reg) && Reg != AArch64::LR &&
5186 Reg != AArch64::X16 && Reg != AArch64::X17 && LRU.available(Reg)) {
5187 CanSaveLR = true;
5188 break;
5192 // Check if we have a register we can save LR to, and if LR was used
5193 // somewhere. If both of those things are true, then we need to evaluate the
5194 // safety of outlining stack instructions later.
5195 if (!CanSaveLR && !LRU.available(AArch64::LR))
5196 Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere;
5198 return true;
5201 outliner::InstrType
5202 AArch64InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT,
5203 unsigned Flags) const {
5204 MachineInstr &MI = *MIT;
5205 MachineBasicBlock *MBB = MI.getParent();
5206 MachineFunction *MF = MBB->getParent();
5207 AArch64FunctionInfo *FuncInfo = MF->getInfo<AArch64FunctionInfo>();
5209 // Don't outline LOHs.
5210 if (FuncInfo->getLOHRelated().count(&MI))
5211 return outliner::InstrType::Illegal;
5213 // Don't allow debug values to impact outlining type.
5214 if (MI.isDebugInstr() || MI.isIndirectDebugValue())
5215 return outliner::InstrType::Invisible;
5217 // At this point, KILL instructions don't really tell us much so we can go
5218 // ahead and skip over them.
5219 if (MI.isKill())
5220 return outliner::InstrType::Invisible;
5222 // Is this a terminator for a basic block?
5223 if (MI.isTerminator()) {
5225 // Is this the end of a function?
5226 if (MI.getParent()->succ_empty())
5227 return outliner::InstrType::Legal;
5229 // It's not, so don't outline it.
5230 return outliner::InstrType::Illegal;
5233 // Make sure none of the operands are un-outlinable.
5234 for (const MachineOperand &MOP : MI.operands()) {
5235 if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() ||
5236 MOP.isTargetIndex())
5237 return outliner::InstrType::Illegal;
5239 // If it uses LR or W30 explicitly, then don't touch it.
5240 if (MOP.isReg() && !MOP.isImplicit() &&
5241 (MOP.getReg() == AArch64::LR || MOP.getReg() == AArch64::W30))
5242 return outliner::InstrType::Illegal;
5245 // Special cases for instructions that can always be outlined, but will fail
5246 // the later tests. e.g, ADRPs, which are PC-relative use LR, but can always
5247 // be outlined because they don't require a *specific* value to be in LR.
5248 if (MI.getOpcode() == AArch64::ADRP)
5249 return outliner::InstrType::Legal;
5251 // If MI is a call we might be able to outline it. We don't want to outline
5252 // any calls that rely on the position of items on the stack. When we outline
5253 // something containing a call, we have to emit a save and restore of LR in
5254 // the outlined function. Currently, this always happens by saving LR to the
5255 // stack. Thus, if we outline, say, half the parameters for a function call
5256 // plus the call, then we'll break the callee's expectations for the layout
5257 // of the stack.
5259 // FIXME: Allow calls to functions which construct a stack frame, as long
5260 // as they don't access arguments on the stack.
5261 // FIXME: Figure out some way to analyze functions defined in other modules.
5262 // We should be able to compute the memory usage based on the IR calling
5263 // convention, even if we can't see the definition.
5264 if (MI.isCall()) {
5265 // Get the function associated with the call. Look at each operand and find
5266 // the one that represents the callee and get its name.
5267 const Function *Callee = nullptr;
5268 for (const MachineOperand &MOP : MI.operands()) {
5269 if (MOP.isGlobal()) {
5270 Callee = dyn_cast<Function>(MOP.getGlobal());
5271 break;
5275 // Never outline calls to mcount. There isn't any rule that would require
5276 // this, but the Linux kernel's "ftrace" feature depends on it.
5277 if (Callee && Callee->getName() == "\01_mcount")
5278 return outliner::InstrType::Illegal;
5280 // If we don't know anything about the callee, assume it depends on the
5281 // stack layout of the caller. In that case, it's only legal to outline
5282 // as a tail-call. Whitelist the call instructions we know about so we
5283 // don't get unexpected results with call pseudo-instructions.
5284 auto UnknownCallOutlineType = outliner::InstrType::Illegal;
5285 if (MI.getOpcode() == AArch64::BLR || MI.getOpcode() == AArch64::BL)
5286 UnknownCallOutlineType = outliner::InstrType::LegalTerminator;
5288 if (!Callee)
5289 return UnknownCallOutlineType;
5291 // We have a function we have information about. Check it if it's something
5292 // can safely outline.
5293 MachineFunction *CalleeMF = MF->getMMI().getMachineFunction(*Callee);
5295 // We don't know what's going on with the callee at all. Don't touch it.
5296 if (!CalleeMF)
5297 return UnknownCallOutlineType;
5299 // Check if we know anything about the callee saves on the function. If we
5300 // don't, then don't touch it, since that implies that we haven't
5301 // computed anything about its stack frame yet.
5302 MachineFrameInfo &MFI = CalleeMF->getFrameInfo();
5303 if (!MFI.isCalleeSavedInfoValid() || MFI.getStackSize() > 0 ||
5304 MFI.getNumObjects() > 0)
5305 return UnknownCallOutlineType;
5307 // At this point, we can say that CalleeMF ought to not pass anything on the
5308 // stack. Therefore, we can outline it.
5309 return outliner::InstrType::Legal;
5312 // Don't outline positions.
5313 if (MI.isPosition())
5314 return outliner::InstrType::Illegal;
5316 // Don't touch the link register or W30.
5317 if (MI.readsRegister(AArch64::W30, &getRegisterInfo()) ||
5318 MI.modifiesRegister(AArch64::W30, &getRegisterInfo()))
5319 return outliner::InstrType::Illegal;
5321 // Don't outline BTI instructions, because that will prevent the outlining
5322 // site from being indirectly callable.
5323 if (MI.getOpcode() == AArch64::HINT) {
5324 int64_t Imm = MI.getOperand(0).getImm();
5325 if (Imm == 32 || Imm == 34 || Imm == 36 || Imm == 38)
5326 return outliner::InstrType::Illegal;
5329 return outliner::InstrType::Legal;
5332 void AArch64InstrInfo::fixupPostOutline(MachineBasicBlock &MBB) const {
5333 for (MachineInstr &MI : MBB) {
5334 const MachineOperand *Base;
5335 unsigned Width;
5336 int64_t Offset;
5338 // Is this a load or store with an immediate offset with SP as the base?
5339 if (!MI.mayLoadOrStore() ||
5340 !getMemOperandWithOffsetWidth(MI, Base, Offset, Width, &RI) ||
5341 (Base->isReg() && Base->getReg() != AArch64::SP))
5342 continue;
5344 // It is, so we have to fix it up.
5345 unsigned Scale;
5346 int64_t Dummy1, Dummy2;
5348 MachineOperand &StackOffsetOperand = getMemOpBaseRegImmOfsOffsetOperand(MI);
5349 assert(StackOffsetOperand.isImm() && "Stack offset wasn't immediate!");
5350 getMemOpInfo(MI.getOpcode(), Scale, Width, Dummy1, Dummy2);
5351 assert(Scale != 0 && "Unexpected opcode!");
5353 // We've pushed the return address to the stack, so add 16 to the offset.
5354 // This is safe, since we already checked if it would overflow when we
5355 // checked if this instruction was legal to outline.
5356 int64_t NewImm = (Offset + 16) / Scale;
5357 StackOffsetOperand.setImm(NewImm);
5361 void AArch64InstrInfo::buildOutlinedFrame(
5362 MachineBasicBlock &MBB, MachineFunction &MF,
5363 const outliner::OutlinedFunction &OF) const {
5364 // For thunk outlining, rewrite the last instruction from a call to a
5365 // tail-call.
5366 if (OF.FrameConstructionID == MachineOutlinerThunk) {
5367 MachineInstr *Call = &*--MBB.instr_end();
5368 unsigned TailOpcode;
5369 if (Call->getOpcode() == AArch64::BL) {
5370 TailOpcode = AArch64::TCRETURNdi;
5371 } else {
5372 assert(Call->getOpcode() == AArch64::BLR);
5373 TailOpcode = AArch64::TCRETURNriALL;
5375 MachineInstr *TC = BuildMI(MF, DebugLoc(), get(TailOpcode))
5376 .add(Call->getOperand(0))
5377 .addImm(0);
5378 MBB.insert(MBB.end(), TC);
5379 Call->eraseFromParent();
5382 // Is there a call in the outlined range?
5383 auto IsNonTailCall = [](MachineInstr &MI) {
5384 return MI.isCall() && !MI.isReturn();
5386 if (std::any_of(MBB.instr_begin(), MBB.instr_end(), IsNonTailCall)) {
5387 // Fix up the instructions in the range, since we're going to modify the
5388 // stack.
5389 assert(OF.FrameConstructionID != MachineOutlinerDefault &&
5390 "Can only fix up stack references once");
5391 fixupPostOutline(MBB);
5393 // LR has to be a live in so that we can save it.
5394 MBB.addLiveIn(AArch64::LR);
5396 MachineBasicBlock::iterator It = MBB.begin();
5397 MachineBasicBlock::iterator Et = MBB.end();
5399 if (OF.FrameConstructionID == MachineOutlinerTailCall ||
5400 OF.FrameConstructionID == MachineOutlinerThunk)
5401 Et = std::prev(MBB.end());
5403 // Insert a save before the outlined region
5404 MachineInstr *STRXpre = BuildMI(MF, DebugLoc(), get(AArch64::STRXpre))
5405 .addReg(AArch64::SP, RegState::Define)
5406 .addReg(AArch64::LR)
5407 .addReg(AArch64::SP)
5408 .addImm(-16);
5409 It = MBB.insert(It, STRXpre);
5411 const TargetSubtargetInfo &STI = MF.getSubtarget();
5412 const MCRegisterInfo *MRI = STI.getRegisterInfo();
5413 unsigned DwarfReg = MRI->getDwarfRegNum(AArch64::LR, true);
5415 // Add a CFI saying the stack was moved 16 B down.
5416 int64_t StackPosEntry =
5417 MF.addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, 16));
5418 BuildMI(MBB, It, DebugLoc(), get(AArch64::CFI_INSTRUCTION))
5419 .addCFIIndex(StackPosEntry)
5420 .setMIFlags(MachineInstr::FrameSetup);
5422 // Add a CFI saying that the LR that we want to find is now 16 B higher than
5423 // before.
5424 int64_t LRPosEntry =
5425 MF.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfReg, 16));
5426 BuildMI(MBB, It, DebugLoc(), get(AArch64::CFI_INSTRUCTION))
5427 .addCFIIndex(LRPosEntry)
5428 .setMIFlags(MachineInstr::FrameSetup);
5430 // Insert a restore before the terminator for the function.
5431 MachineInstr *LDRXpost = BuildMI(MF, DebugLoc(), get(AArch64::LDRXpost))
5432 .addReg(AArch64::SP, RegState::Define)
5433 .addReg(AArch64::LR, RegState::Define)
5434 .addReg(AArch64::SP)
5435 .addImm(16);
5436 Et = MBB.insert(Et, LDRXpost);
5439 // If this is a tail call outlined function, then there's already a return.
5440 if (OF.FrameConstructionID == MachineOutlinerTailCall ||
5441 OF.FrameConstructionID == MachineOutlinerThunk)
5442 return;
5444 // It's not a tail call, so we have to insert the return ourselves.
5445 MachineInstr *ret = BuildMI(MF, DebugLoc(), get(AArch64::RET))
5446 .addReg(AArch64::LR, RegState::Undef);
5447 MBB.insert(MBB.end(), ret);
5449 // Did we have to modify the stack by saving the link register?
5450 if (OF.FrameConstructionID != MachineOutlinerDefault)
5451 return;
5453 // We modified the stack.
5454 // Walk over the basic block and fix up all the stack accesses.
5455 fixupPostOutline(MBB);
5458 MachineBasicBlock::iterator AArch64InstrInfo::insertOutlinedCall(
5459 Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
5460 MachineFunction &MF, const outliner::Candidate &C) const {
5462 // Are we tail calling?
5463 if (C.CallConstructionID == MachineOutlinerTailCall) {
5464 // If yes, then we can just branch to the label.
5465 It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(AArch64::TCRETURNdi))
5466 .addGlobalAddress(M.getNamedValue(MF.getName()))
5467 .addImm(0));
5468 return It;
5471 // Are we saving the link register?
5472 if (C.CallConstructionID == MachineOutlinerNoLRSave ||
5473 C.CallConstructionID == MachineOutlinerThunk) {
5474 // No, so just insert the call.
5475 It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(AArch64::BL))
5476 .addGlobalAddress(M.getNamedValue(MF.getName())));
5477 return It;
5480 // We want to return the spot where we inserted the call.
5481 MachineBasicBlock::iterator CallPt;
5483 // Instructions for saving and restoring LR around the call instruction we're
5484 // going to insert.
5485 MachineInstr *Save;
5486 MachineInstr *Restore;
5487 // Can we save to a register?
5488 if (C.CallConstructionID == MachineOutlinerRegSave) {
5489 // FIXME: This logic should be sunk into a target-specific interface so that
5490 // we don't have to recompute the register.
5491 unsigned Reg = findRegisterToSaveLRTo(C);
5492 assert(Reg != 0 && "No callee-saved register available?");
5494 // Save and restore LR from that register.
5495 Save = BuildMI(MF, DebugLoc(), get(AArch64::ORRXrs), Reg)
5496 .addReg(AArch64::XZR)
5497 .addReg(AArch64::LR)
5498 .addImm(0);
5499 Restore = BuildMI(MF, DebugLoc(), get(AArch64::ORRXrs), AArch64::LR)
5500 .addReg(AArch64::XZR)
5501 .addReg(Reg)
5502 .addImm(0);
5503 } else {
5504 // We have the default case. Save and restore from SP.
5505 Save = BuildMI(MF, DebugLoc(), get(AArch64::STRXpre))
5506 .addReg(AArch64::SP, RegState::Define)
5507 .addReg(AArch64::LR)
5508 .addReg(AArch64::SP)
5509 .addImm(-16);
5510 Restore = BuildMI(MF, DebugLoc(), get(AArch64::LDRXpost))
5511 .addReg(AArch64::SP, RegState::Define)
5512 .addReg(AArch64::LR, RegState::Define)
5513 .addReg(AArch64::SP)
5514 .addImm(16);
5517 It = MBB.insert(It, Save);
5518 It++;
5520 // Insert the call.
5521 It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(AArch64::BL))
5522 .addGlobalAddress(M.getNamedValue(MF.getName())));
5523 CallPt = It;
5524 It++;
5526 It = MBB.insert(It, Restore);
5527 return CallPt;
5530 bool AArch64InstrInfo::shouldOutlineFromFunctionByDefault(
5531 MachineFunction &MF) const {
5532 return MF.getFunction().hasMinSize();
5535 bool AArch64InstrInfo::isCopyInstrImpl(
5536 const MachineInstr &MI, const MachineOperand *&Source,
5537 const MachineOperand *&Destination) const {
5539 // AArch64::ORRWrs and AArch64::ORRXrs with WZR/XZR reg
5540 // and zero immediate operands used as an alias for mov instruction.
5541 if (MI.getOpcode() == AArch64::ORRWrs &&
5542 MI.getOperand(1).getReg() == AArch64::WZR &&
5543 MI.getOperand(3).getImm() == 0x0) {
5544 Destination = &MI.getOperand(0);
5545 Source = &MI.getOperand(2);
5546 return true;
5549 if (MI.getOpcode() == AArch64::ORRXrs &&
5550 MI.getOperand(1).getReg() == AArch64::XZR &&
5551 MI.getOperand(3).getImm() == 0x0) {
5552 Destination = &MI.getOperand(0);
5553 Source = &MI.getOperand(2);
5554 return true;
5557 return false;
5560 #define GET_INSTRINFO_HELPERS
5561 #include "AArch64GenInstrInfo.inc"