[obj2yaml] - Fix BB after r373315.
[llvm-complete.git] / lib / Target / AMDGPU / SIFoldOperands.cpp
blob1f8b744155d6801c9ae9d1d170f59043c36212a7
1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9 //
11 #include "AMDGPU.h"
12 #include "AMDGPUSubtarget.h"
13 #include "SIInstrInfo.h"
14 #include "SIMachineFunctionInfo.h"
15 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
16 #include "llvm/ADT/DepthFirstIterator.h"
17 #include "llvm/CodeGen/LiveIntervals.h"
18 #include "llvm/CodeGen/MachineFunctionPass.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/raw_ostream.h"
23 #include "llvm/Target/TargetMachine.h"
25 #define DEBUG_TYPE "si-fold-operands"
26 using namespace llvm;
28 namespace {
30 struct FoldCandidate {
31 MachineInstr *UseMI;
32 union {
33 MachineOperand *OpToFold;
34 uint64_t ImmToFold;
35 int FrameIndexToFold;
37 int ShrinkOpcode;
38 unsigned char UseOpNo;
39 MachineOperand::MachineOperandType Kind;
40 bool Commuted;
42 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
43 bool Commuted_ = false,
44 int ShrinkOp = -1) :
45 UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
46 Kind(FoldOp->getType()),
47 Commuted(Commuted_) {
48 if (FoldOp->isImm()) {
49 ImmToFold = FoldOp->getImm();
50 } else if (FoldOp->isFI()) {
51 FrameIndexToFold = FoldOp->getIndex();
52 } else {
53 assert(FoldOp->isReg() || FoldOp->isGlobal());
54 OpToFold = FoldOp;
58 bool isFI() const {
59 return Kind == MachineOperand::MO_FrameIndex;
62 bool isImm() const {
63 return Kind == MachineOperand::MO_Immediate;
66 bool isReg() const {
67 return Kind == MachineOperand::MO_Register;
70 bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; }
72 bool isCommuted() const {
73 return Commuted;
76 bool needsShrink() const {
77 return ShrinkOpcode != -1;
80 int getShrinkOpcode() const {
81 return ShrinkOpcode;
85 class SIFoldOperands : public MachineFunctionPass {
86 public:
87 static char ID;
88 MachineRegisterInfo *MRI;
89 const SIInstrInfo *TII;
90 const SIRegisterInfo *TRI;
91 const GCNSubtarget *ST;
92 const SIMachineFunctionInfo *MFI;
94 void foldOperand(MachineOperand &OpToFold,
95 MachineInstr *UseMI,
96 int UseOpIdx,
97 SmallVectorImpl<FoldCandidate> &FoldList,
98 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
100 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
102 const MachineOperand *isClamp(const MachineInstr &MI) const;
103 bool tryFoldClamp(MachineInstr &MI);
105 std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
106 bool tryFoldOMod(MachineInstr &MI);
108 public:
109 SIFoldOperands() : MachineFunctionPass(ID) {
110 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
113 bool runOnMachineFunction(MachineFunction &MF) override;
115 StringRef getPassName() const override { return "SI Fold Operands"; }
117 void getAnalysisUsage(AnalysisUsage &AU) const override {
118 AU.setPreservesCFG();
119 MachineFunctionPass::getAnalysisUsage(AU);
123 } // End anonymous namespace.
125 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
126 "SI Fold Operands", false, false)
128 char SIFoldOperands::ID = 0;
130 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
132 // Wrapper around isInlineConstant that understands special cases when
133 // instruction types are replaced during operand folding.
134 static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
135 const MachineInstr &UseMI,
136 unsigned OpNo,
137 const MachineOperand &OpToFold) {
138 if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
139 return true;
141 unsigned Opc = UseMI.getOpcode();
142 switch (Opc) {
143 case AMDGPU::V_MAC_F32_e64:
144 case AMDGPU::V_MAC_F16_e64:
145 case AMDGPU::V_FMAC_F32_e64:
146 case AMDGPU::V_FMAC_F16_e64: {
147 // Special case for mac. Since this is replaced with mad when folded into
148 // src2, we need to check the legality for the final instruction.
149 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
150 if (static_cast<int>(OpNo) == Src2Idx) {
151 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 ||
152 Opc == AMDGPU::V_FMAC_F16_e64;
153 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 ||
154 Opc == AMDGPU::V_FMAC_F32_e64;
156 unsigned Opc = IsFMA ?
157 (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) :
158 (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
159 const MCInstrDesc &MadDesc = TII->get(Opc);
160 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
162 return false;
164 default:
165 return false;
169 // TODO: Add heuristic that the frame index might not fit in the addressing mode
170 // immediate offset to avoid materializing in loops.
171 static bool frameIndexMayFold(const SIInstrInfo *TII,
172 const MachineInstr &UseMI,
173 int OpNo,
174 const MachineOperand &OpToFold) {
175 return OpToFold.isFI() &&
176 (TII->isMUBUF(UseMI) || TII->isFLATScratch(UseMI)) &&
177 OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::vaddr);
180 FunctionPass *llvm::createSIFoldOperandsPass() {
181 return new SIFoldOperands();
184 static bool updateOperand(FoldCandidate &Fold,
185 const SIInstrInfo &TII,
186 const TargetRegisterInfo &TRI,
187 const GCNSubtarget &ST) {
188 MachineInstr *MI = Fold.UseMI;
189 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
190 assert(Old.isReg());
192 if (Fold.isImm()) {
193 if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
194 !(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) &&
195 AMDGPU::isInlinableLiteralV216(static_cast<uint16_t>(Fold.ImmToFold),
196 ST.hasInv2PiInlineImm())) {
197 // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
198 // already set.
199 unsigned Opcode = MI->getOpcode();
200 int OpNo = MI->getOperandNo(&Old);
201 int ModIdx = -1;
202 if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
203 ModIdx = AMDGPU::OpName::src0_modifiers;
204 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
205 ModIdx = AMDGPU::OpName::src1_modifiers;
206 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
207 ModIdx = AMDGPU::OpName::src2_modifiers;
208 assert(ModIdx != -1);
209 ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
210 MachineOperand &Mod = MI->getOperand(ModIdx);
211 unsigned Val = Mod.getImm();
212 if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1))
213 return false;
214 // Only apply the following transformation if that operand requries
215 // a packed immediate.
216 switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
217 case AMDGPU::OPERAND_REG_IMM_V2FP16:
218 case AMDGPU::OPERAND_REG_IMM_V2INT16:
219 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
220 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
221 // If upper part is all zero we do not need op_sel_hi.
222 if (!isUInt<16>(Fold.ImmToFold)) {
223 if (!(Fold.ImmToFold & 0xffff)) {
224 Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
225 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
226 Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
227 return true;
229 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
230 Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
231 return true;
233 break;
234 default:
235 break;
240 if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) {
241 MachineBasicBlock *MBB = MI->getParent();
242 auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI);
243 if (Liveness != MachineBasicBlock::LQR_Dead)
244 return false;
246 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
247 int Op32 = Fold.getShrinkOpcode();
248 MachineOperand &Dst0 = MI->getOperand(0);
249 MachineOperand &Dst1 = MI->getOperand(1);
250 assert(Dst0.isDef() && Dst1.isDef());
252 bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
254 const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
255 Register NewReg0 = MRI.createVirtualRegister(Dst0RC);
257 MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
259 if (HaveNonDbgCarryUse) {
260 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
261 .addReg(AMDGPU::VCC, RegState::Kill);
264 // Keep the old instruction around to avoid breaking iterators, but
265 // replace it with a dummy instruction to remove uses.
267 // FIXME: We should not invert how this pass looks at operands to avoid
268 // this. Should track set of foldable movs instead of looking for uses
269 // when looking at a use.
270 Dst0.setReg(NewReg0);
271 for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
272 MI->RemoveOperand(I);
273 MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
275 if (Fold.isCommuted())
276 TII.commuteInstruction(*Inst32, false);
277 return true;
280 assert(!Fold.needsShrink() && "not handled");
282 if (Fold.isImm()) {
283 Old.ChangeToImmediate(Fold.ImmToFold);
284 return true;
287 if (Fold.isGlobal()) {
288 Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(),
289 Fold.OpToFold->getTargetFlags());
290 return true;
293 if (Fold.isFI()) {
294 Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
295 return true;
298 MachineOperand *New = Fold.OpToFold;
299 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
300 Old.setIsUndef(New->isUndef());
301 return true;
304 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
305 const MachineInstr *MI) {
306 for (auto Candidate : FoldList) {
307 if (Candidate.UseMI == MI)
308 return true;
310 return false;
313 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
314 MachineInstr *MI, unsigned OpNo,
315 MachineOperand *OpToFold,
316 const SIInstrInfo *TII) {
317 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
318 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
319 unsigned Opc = MI->getOpcode();
320 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
321 Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_e64) &&
322 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
323 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 ||
324 Opc == AMDGPU::V_FMAC_F16_e64;
325 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 ||
326 Opc == AMDGPU::V_FMAC_F32_e64;
327 unsigned NewOpc = IsFMA ?
328 (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) :
329 (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
331 // Check if changing this to a v_mad_{f16, f32} instruction will allow us
332 // to fold the operand.
333 MI->setDesc(TII->get(NewOpc));
334 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
335 if (FoldAsMAD) {
336 MI->untieRegOperand(OpNo);
337 return true;
339 MI->setDesc(TII->get(Opc));
342 // Special case for s_setreg_b32
343 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
344 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
345 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
346 return true;
349 // If we are already folding into another operand of MI, then
350 // we can't commute the instruction, otherwise we risk making the
351 // other fold illegal.
352 if (isUseMIInFoldList(FoldList, MI))
353 return false;
355 unsigned CommuteOpNo = OpNo;
357 // Operand is not legal, so try to commute the instruction to
358 // see if this makes it possible to fold.
359 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
360 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
361 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
363 if (CanCommute) {
364 if (CommuteIdx0 == OpNo)
365 CommuteOpNo = CommuteIdx1;
366 else if (CommuteIdx1 == OpNo)
367 CommuteOpNo = CommuteIdx0;
371 // One of operands might be an Imm operand, and OpNo may refer to it after
372 // the call of commuteInstruction() below. Such situations are avoided
373 // here explicitly as OpNo must be a register operand to be a candidate
374 // for memory folding.
375 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
376 !MI->getOperand(CommuteIdx1).isReg()))
377 return false;
379 if (!CanCommute ||
380 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
381 return false;
383 if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
384 if ((Opc == AMDGPU::V_ADD_I32_e64 ||
385 Opc == AMDGPU::V_SUB_I32_e64 ||
386 Opc == AMDGPU::V_SUBREV_I32_e64) && // FIXME
387 (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) {
388 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
390 // Verify the other operand is a VGPR, otherwise we would violate the
391 // constant bus restriction.
392 unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
393 MachineOperand &OtherOp = MI->getOperand(OtherIdx);
394 if (!OtherOp.isReg() ||
395 !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
396 return false;
398 assert(MI->getOperand(1).isDef());
400 // Make sure to get the 32-bit version of the commuted opcode.
401 unsigned MaybeCommutedOpc = MI->getOpcode();
402 int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
404 FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true,
405 Op32));
406 return true;
409 TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
410 return false;
413 FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true));
414 return true;
417 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
418 return true;
421 // If the use operand doesn't care about the value, this may be an operand only
422 // used for register indexing, in which case it is unsafe to fold.
423 static bool isUseSafeToFold(const SIInstrInfo *TII,
424 const MachineInstr &MI,
425 const MachineOperand &UseMO) {
426 return !UseMO.isUndef() && !TII->isSDWA(MI);
427 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
430 static bool tryToFoldACImm(const SIInstrInfo *TII,
431 const MachineOperand &OpToFold,
432 MachineInstr *UseMI,
433 unsigned UseOpIdx,
434 SmallVectorImpl<FoldCandidate> &FoldList) {
435 const MCInstrDesc &Desc = UseMI->getDesc();
436 const MCOperandInfo *OpInfo = Desc.OpInfo;
437 if (!OpInfo || UseOpIdx >= Desc.getNumOperands())
438 return false;
440 uint8_t OpTy = OpInfo[UseOpIdx].OperandType;
441 if (OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST ||
442 OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST)
443 return false;
445 if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) &&
446 TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) {
447 UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm());
448 return true;
451 if (!OpToFold.isReg())
452 return false;
454 Register UseReg = OpToFold.getReg();
455 if (!Register::isVirtualRegister(UseReg))
456 return false;
458 if (llvm::find_if(FoldList, [UseMI](const FoldCandidate &FC) {
459 return FC.UseMI == UseMI; }) != FoldList.end())
460 return false;
462 MachineRegisterInfo &MRI = UseMI->getParent()->getParent()->getRegInfo();
463 const MachineInstr *Def = MRI.getUniqueVRegDef(UseReg);
464 if (!Def || !Def->isRegSequence())
465 return false;
467 int64_t Imm;
468 MachineOperand *Op;
469 for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) {
470 const MachineOperand &Sub = Def->getOperand(I);
471 if (!Sub.isReg() || Sub.getSubReg())
472 return false;
473 MachineInstr *SubDef = MRI.getUniqueVRegDef(Sub.getReg());
474 while (SubDef && !SubDef->isMoveImmediate() &&
475 !SubDef->getOperand(1).isImm() && TII->isFoldableCopy(*SubDef))
476 SubDef = MRI.getUniqueVRegDef(SubDef->getOperand(1).getReg());
477 if (!SubDef || !SubDef->isMoveImmediate() || !SubDef->getOperand(1).isImm())
478 return false;
479 Op = &SubDef->getOperand(1);
480 auto SubImm = Op->getImm();
481 if (I == 1) {
482 if (!TII->isInlineConstant(SubDef->getOperand(1), OpTy))
483 return false;
485 Imm = SubImm;
486 continue;
488 if (Imm != SubImm)
489 return false; // Can only fold splat constants
492 if (!TII->isOperandLegal(*UseMI, UseOpIdx, Op))
493 return false;
495 FoldList.push_back(FoldCandidate(UseMI, UseOpIdx, Op));
496 return true;
499 void SIFoldOperands::foldOperand(
500 MachineOperand &OpToFold,
501 MachineInstr *UseMI,
502 int UseOpIdx,
503 SmallVectorImpl<FoldCandidate> &FoldList,
504 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
505 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
507 if (!isUseSafeToFold(TII, *UseMI, UseOp))
508 return;
510 // FIXME: Fold operands with subregs.
511 if (UseOp.isReg() && OpToFold.isReg()) {
512 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
513 return;
515 // Don't fold subregister extracts into tied operands, only if it is a full
516 // copy since a subregister use tied to a full register def doesn't really
517 // make sense. e.g. don't fold:
519 // %1 = COPY %0:sub1
520 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %1<tied0>
522 // into
523 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %0:sub1<tied0>
524 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
525 return;
528 // Special case for REG_SEQUENCE: We can't fold literals into
529 // REG_SEQUENCE instructions, so we have to fold them into the
530 // uses of REG_SEQUENCE.
531 if (UseMI->isRegSequence()) {
532 Register RegSeqDstReg = UseMI->getOperand(0).getReg();
533 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
535 MachineRegisterInfo::use_iterator Next;
536 for (MachineRegisterInfo::use_iterator
537 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
538 RSUse != RSE; RSUse = Next) {
539 Next = std::next(RSUse);
541 MachineInstr *RSUseMI = RSUse->getParent();
543 if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI,
544 RSUse.getOperandNo(), FoldList))
545 continue;
547 if (RSUse->getSubReg() != RegSeqDstSubReg)
548 continue;
550 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
551 CopiesToReplace);
554 return;
557 if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList))
558 return;
560 if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) {
561 // Sanity check that this is a stack access.
562 // FIXME: Should probably use stack pseudos before frame lowering.
563 MachineOperand *SOff = TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
564 if (!SOff->isReg() || (SOff->getReg() != MFI->getScratchWaveOffsetReg() &&
565 SOff->getReg() != MFI->getStackPtrOffsetReg()))
566 return;
568 if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
569 MFI->getScratchRSrcReg())
570 return;
572 // A frame index will resolve to a positive constant, so it should always be
573 // safe to fold the addressing mode, even pre-GFX9.
574 UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex());
575 SOff->setReg(MFI->getStackPtrOffsetReg());
576 return;
579 bool FoldingImmLike =
580 OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
582 if (FoldingImmLike && UseMI->isCopy()) {
583 Register DestReg = UseMI->getOperand(0).getReg();
584 const TargetRegisterClass *DestRC = Register::isVirtualRegister(DestReg)
585 ? MRI->getRegClass(DestReg)
586 : TRI->getPhysRegClass(DestReg);
588 Register SrcReg = UseMI->getOperand(1).getReg();
589 if (Register::isVirtualRegister(DestReg) &&
590 Register::isVirtualRegister(SrcReg)) {
591 const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg);
592 if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) {
593 MachineRegisterInfo::use_iterator NextUse;
594 SmallVector<FoldCandidate, 4> CopyUses;
595 for (MachineRegisterInfo::use_iterator
596 Use = MRI->use_begin(DestReg), E = MRI->use_end();
597 Use != E; Use = NextUse) {
598 NextUse = std::next(Use);
599 FoldCandidate FC = FoldCandidate(Use->getParent(),
600 Use.getOperandNo(), &UseMI->getOperand(1));
601 CopyUses.push_back(FC);
603 for (auto & F : CopyUses) {
604 foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo,
605 FoldList, CopiesToReplace);
610 if (DestRC == &AMDGPU::AGPR_32RegClass &&
611 TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
612 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
613 UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
614 CopiesToReplace.push_back(UseMI);
615 return;
618 // In order to fold immediates into copies, we need to change the
619 // copy to a MOV.
621 unsigned MovOp = TII->getMovOpcode(DestRC);
622 if (MovOp == AMDGPU::COPY)
623 return;
625 UseMI->setDesc(TII->get(MovOp));
626 MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin();
627 MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end();
628 while (ImpOpI != ImpOpE) {
629 MachineInstr::mop_iterator Tmp = ImpOpI;
630 ImpOpI++;
631 UseMI->RemoveOperand(UseMI->getOperandNo(Tmp));
633 CopiesToReplace.push_back(UseMI);
634 } else {
635 if (UseMI->isCopy() && OpToFold.isReg() &&
636 Register::isVirtualRegister(UseMI->getOperand(0).getReg()) &&
637 TRI->isVectorRegister(*MRI, UseMI->getOperand(0).getReg()) &&
638 TRI->isVectorRegister(*MRI, UseMI->getOperand(1).getReg()) &&
639 !UseMI->getOperand(1).getSubReg()) {
640 unsigned Size = TII->getOpSize(*UseMI, 1);
641 UseMI->getOperand(1).setReg(OpToFold.getReg());
642 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
643 UseMI->getOperand(1).setIsKill(false);
644 CopiesToReplace.push_back(UseMI);
645 OpToFold.setIsKill(false);
646 if (Size != 4)
647 return;
648 if (TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
649 TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()))
650 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
651 else if (TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
652 TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg()))
653 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32));
654 return;
657 unsigned UseOpc = UseMI->getOpcode();
658 if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
659 (UseOpc == AMDGPU::V_READLANE_B32 &&
660 (int)UseOpIdx ==
661 AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
662 // %vgpr = V_MOV_B32 imm
663 // %sgpr = V_READFIRSTLANE_B32 %vgpr
664 // =>
665 // %sgpr = S_MOV_B32 imm
666 if (FoldingImmLike) {
667 if (execMayBeModifiedBeforeUse(*MRI,
668 UseMI->getOperand(UseOpIdx).getReg(),
669 *OpToFold.getParent(),
670 *UseMI))
671 return;
673 UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
675 // FIXME: ChangeToImmediate should clear subreg
676 UseMI->getOperand(1).setSubReg(0);
677 if (OpToFold.isImm())
678 UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
679 else
680 UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
681 UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
682 return;
685 if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
686 if (execMayBeModifiedBeforeUse(*MRI,
687 UseMI->getOperand(UseOpIdx).getReg(),
688 *OpToFold.getParent(),
689 *UseMI))
690 return;
692 // %vgpr = COPY %sgpr0
693 // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
694 // =>
695 // %sgpr1 = COPY %sgpr0
696 UseMI->setDesc(TII->get(AMDGPU::COPY));
697 UseMI->getOperand(1).setReg(OpToFold.getReg());
698 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
699 UseMI->getOperand(1).setIsKill(false);
700 UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
701 return;
705 const MCInstrDesc &UseDesc = UseMI->getDesc();
707 // Don't fold into target independent nodes. Target independent opcodes
708 // don't have defined register classes.
709 if (UseDesc.isVariadic() ||
710 UseOp.isImplicit() ||
711 UseDesc.OpInfo[UseOpIdx].RegClass == -1)
712 return;
715 if (!FoldingImmLike) {
716 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
718 // FIXME: We could try to change the instruction from 64-bit to 32-bit
719 // to enable more folding opportunites. The shrink operands pass
720 // already does this.
721 return;
725 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
726 const TargetRegisterClass *FoldRC =
727 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
729 // Split 64-bit constants into 32-bits for folding.
730 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
731 Register UseReg = UseOp.getReg();
732 const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
734 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
735 return;
737 APInt Imm(64, OpToFold.getImm());
738 if (UseOp.getSubReg() == AMDGPU::sub0) {
739 Imm = Imm.getLoBits(32);
740 } else {
741 assert(UseOp.getSubReg() == AMDGPU::sub1);
742 Imm = Imm.getHiBits(32);
745 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
746 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
747 return;
752 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
755 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
756 uint32_t LHS, uint32_t RHS) {
757 switch (Opcode) {
758 case AMDGPU::V_AND_B32_e64:
759 case AMDGPU::V_AND_B32_e32:
760 case AMDGPU::S_AND_B32:
761 Result = LHS & RHS;
762 return true;
763 case AMDGPU::V_OR_B32_e64:
764 case AMDGPU::V_OR_B32_e32:
765 case AMDGPU::S_OR_B32:
766 Result = LHS | RHS;
767 return true;
768 case AMDGPU::V_XOR_B32_e64:
769 case AMDGPU::V_XOR_B32_e32:
770 case AMDGPU::S_XOR_B32:
771 Result = LHS ^ RHS;
772 return true;
773 case AMDGPU::V_LSHL_B32_e64:
774 case AMDGPU::V_LSHL_B32_e32:
775 case AMDGPU::S_LSHL_B32:
776 // The instruction ignores the high bits for out of bounds shifts.
777 Result = LHS << (RHS & 31);
778 return true;
779 case AMDGPU::V_LSHLREV_B32_e64:
780 case AMDGPU::V_LSHLREV_B32_e32:
781 Result = RHS << (LHS & 31);
782 return true;
783 case AMDGPU::V_LSHR_B32_e64:
784 case AMDGPU::V_LSHR_B32_e32:
785 case AMDGPU::S_LSHR_B32:
786 Result = LHS >> (RHS & 31);
787 return true;
788 case AMDGPU::V_LSHRREV_B32_e64:
789 case AMDGPU::V_LSHRREV_B32_e32:
790 Result = RHS >> (LHS & 31);
791 return true;
792 case AMDGPU::V_ASHR_I32_e64:
793 case AMDGPU::V_ASHR_I32_e32:
794 case AMDGPU::S_ASHR_I32:
795 Result = static_cast<int32_t>(LHS) >> (RHS & 31);
796 return true;
797 case AMDGPU::V_ASHRREV_I32_e64:
798 case AMDGPU::V_ASHRREV_I32_e32:
799 Result = static_cast<int32_t>(RHS) >> (LHS & 31);
800 return true;
801 default:
802 return false;
806 static unsigned getMovOpc(bool IsScalar) {
807 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
810 /// Remove any leftover implicit operands from mutating the instruction. e.g.
811 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
812 /// anymore.
813 static void stripExtraCopyOperands(MachineInstr &MI) {
814 const MCInstrDesc &Desc = MI.getDesc();
815 unsigned NumOps = Desc.getNumOperands() +
816 Desc.getNumImplicitUses() +
817 Desc.getNumImplicitDefs();
819 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
820 MI.RemoveOperand(I);
823 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
824 MI.setDesc(NewDesc);
825 stripExtraCopyOperands(MI);
828 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
829 MachineOperand &Op) {
830 if (Op.isReg()) {
831 // If this has a subregister, it obviously is a register source.
832 if (Op.getSubReg() != AMDGPU::NoSubRegister ||
833 !Register::isVirtualRegister(Op.getReg()))
834 return &Op;
836 MachineInstr *Def = MRI.getVRegDef(Op.getReg());
837 if (Def && Def->isMoveImmediate()) {
838 MachineOperand &ImmSrc = Def->getOperand(1);
839 if (ImmSrc.isImm())
840 return &ImmSrc;
844 return &Op;
847 // Try to simplify operations with a constant that may appear after instruction
848 // selection.
849 // TODO: See if a frame index with a fixed offset can fold.
850 static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
851 const SIInstrInfo *TII,
852 MachineInstr *MI,
853 MachineOperand *ImmOp) {
854 unsigned Opc = MI->getOpcode();
855 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
856 Opc == AMDGPU::S_NOT_B32) {
857 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
858 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
859 return true;
862 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
863 if (Src1Idx == -1)
864 return false;
866 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
867 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
868 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
870 if (!Src0->isImm() && !Src1->isImm())
871 return false;
873 if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32) {
874 if (Src0->isImm() && Src0->getImm() == 0) {
875 // v_lshl_or_b32 0, X, Y -> copy Y
876 // v_lshl_or_b32 0, X, K -> v_mov_b32 K
877 bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg();
878 MI->RemoveOperand(Src1Idx);
879 MI->RemoveOperand(Src0Idx);
881 MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32));
882 return true;
886 // and k0, k1 -> v_mov_b32 (k0 & k1)
887 // or k0, k1 -> v_mov_b32 (k0 | k1)
888 // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
889 if (Src0->isImm() && Src1->isImm()) {
890 int32_t NewImm;
891 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
892 return false;
894 const SIRegisterInfo &TRI = TII->getRegisterInfo();
895 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
897 // Be careful to change the right operand, src0 may belong to a different
898 // instruction.
899 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
900 MI->RemoveOperand(Src1Idx);
901 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
902 return true;
905 if (!MI->isCommutable())
906 return false;
908 if (Src0->isImm() && !Src1->isImm()) {
909 std::swap(Src0, Src1);
910 std::swap(Src0Idx, Src1Idx);
913 int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
914 if (Opc == AMDGPU::V_OR_B32_e64 ||
915 Opc == AMDGPU::V_OR_B32_e32 ||
916 Opc == AMDGPU::S_OR_B32) {
917 if (Src1Val == 0) {
918 // y = or x, 0 => y = copy x
919 MI->RemoveOperand(Src1Idx);
920 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
921 } else if (Src1Val == -1) {
922 // y = or x, -1 => y = v_mov_b32 -1
923 MI->RemoveOperand(Src1Idx);
924 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
925 } else
926 return false;
928 return true;
931 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
932 MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
933 MI->getOpcode() == AMDGPU::S_AND_B32) {
934 if (Src1Val == 0) {
935 // y = and x, 0 => y = v_mov_b32 0
936 MI->RemoveOperand(Src0Idx);
937 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
938 } else if (Src1Val == -1) {
939 // y = and x, -1 => y = copy x
940 MI->RemoveOperand(Src1Idx);
941 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
942 stripExtraCopyOperands(*MI);
943 } else
944 return false;
946 return true;
949 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
950 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
951 MI->getOpcode() == AMDGPU::S_XOR_B32) {
952 if (Src1Val == 0) {
953 // y = xor x, 0 => y = copy x
954 MI->RemoveOperand(Src1Idx);
955 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
956 return true;
960 return false;
963 // Try to fold an instruction into a simpler one
964 static bool tryFoldInst(const SIInstrInfo *TII,
965 MachineInstr *MI) {
966 unsigned Opc = MI->getOpcode();
968 if (Opc == AMDGPU::V_CNDMASK_B32_e32 ||
969 Opc == AMDGPU::V_CNDMASK_B32_e64 ||
970 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
971 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
972 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
973 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
974 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
975 if (Src1->isIdenticalTo(*Src0) &&
976 (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) &&
977 (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) {
978 LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
979 auto &NewDesc =
980 TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
981 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
982 if (Src2Idx != -1)
983 MI->RemoveOperand(Src2Idx);
984 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
985 if (Src1ModIdx != -1)
986 MI->RemoveOperand(Src1ModIdx);
987 if (Src0ModIdx != -1)
988 MI->RemoveOperand(Src0ModIdx);
989 mutateCopyOp(*MI, NewDesc);
990 LLVM_DEBUG(dbgs() << *MI << '\n');
991 return true;
995 return false;
998 void SIFoldOperands::foldInstOperand(MachineInstr &MI,
999 MachineOperand &OpToFold) const {
1000 // We need mutate the operands of new mov instructions to add implicit
1001 // uses of EXEC, but adding them invalidates the use_iterator, so defer
1002 // this.
1003 SmallVector<MachineInstr *, 4> CopiesToReplace;
1004 SmallVector<FoldCandidate, 4> FoldList;
1005 MachineOperand &Dst = MI.getOperand(0);
1007 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1008 if (FoldingImm) {
1009 unsigned NumLiteralUses = 0;
1010 MachineOperand *NonInlineUse = nullptr;
1011 int NonInlineUseOpNo = -1;
1013 MachineRegisterInfo::use_iterator NextUse;
1014 for (MachineRegisterInfo::use_iterator
1015 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
1016 Use != E; Use = NextUse) {
1017 NextUse = std::next(Use);
1018 MachineInstr *UseMI = Use->getParent();
1019 unsigned OpNo = Use.getOperandNo();
1021 // Folding the immediate may reveal operations that can be constant
1022 // folded or replaced with a copy. This can happen for example after
1023 // frame indices are lowered to constants or from splitting 64-bit
1024 // constants.
1026 // We may also encounter cases where one or both operands are
1027 // immediates materialized into a register, which would ordinarily not
1028 // be folded due to multiple uses or operand constraints.
1030 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
1031 LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
1033 // Some constant folding cases change the same immediate's use to a new
1034 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
1035 // again. The same constant folded instruction could also have a second
1036 // use operand.
1037 NextUse = MRI->use_begin(Dst.getReg());
1038 FoldList.clear();
1039 continue;
1042 // Try to fold any inline immediate uses, and then only fold other
1043 // constants if they have one use.
1045 // The legality of the inline immediate must be checked based on the use
1046 // operand, not the defining instruction, because 32-bit instructions
1047 // with 32-bit inline immediate sources may be used to materialize
1048 // constants used in 16-bit operands.
1050 // e.g. it is unsafe to fold:
1051 // s_mov_b32 s0, 1.0 // materializes 0x3f800000
1052 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
1054 // Folding immediates with more than one use will increase program size.
1055 // FIXME: This will also reduce register usage, which may be better
1056 // in some cases. A better heuristic is needed.
1057 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
1058 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
1059 } else if (frameIndexMayFold(TII, *UseMI, OpNo, OpToFold)) {
1060 foldOperand(OpToFold, UseMI, OpNo, FoldList,
1061 CopiesToReplace);
1062 } else {
1063 if (++NumLiteralUses == 1) {
1064 NonInlineUse = &*Use;
1065 NonInlineUseOpNo = OpNo;
1070 if (NumLiteralUses == 1) {
1071 MachineInstr *UseMI = NonInlineUse->getParent();
1072 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
1074 } else {
1075 // Folding register.
1076 SmallVector <MachineRegisterInfo::use_iterator, 4> UsesToProcess;
1077 for (MachineRegisterInfo::use_iterator
1078 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
1079 Use != E; ++Use) {
1080 UsesToProcess.push_back(Use);
1082 for (auto U : UsesToProcess) {
1083 MachineInstr *UseMI = U->getParent();
1085 foldOperand(OpToFold, UseMI, U.getOperandNo(),
1086 FoldList, CopiesToReplace);
1090 MachineFunction *MF = MI.getParent()->getParent();
1091 // Make sure we add EXEC uses to any new v_mov instructions created.
1092 for (MachineInstr *Copy : CopiesToReplace)
1093 Copy->addImplicitDefUseOperands(*MF);
1095 for (FoldCandidate &Fold : FoldList) {
1096 if (Fold.isReg() && Register::isVirtualRegister(Fold.OpToFold->getReg())) {
1097 Register Reg = Fold.OpToFold->getReg();
1098 MachineInstr *DefMI = Fold.OpToFold->getParent();
1099 if (DefMI->readsRegister(AMDGPU::EXEC, TRI) &&
1100 execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI))
1101 continue;
1103 if (updateOperand(Fold, *TII, *TRI, *ST)) {
1104 // Clear kill flags.
1105 if (Fold.isReg()) {
1106 assert(Fold.OpToFold && Fold.OpToFold->isReg());
1107 // FIXME: Probably shouldn't bother trying to fold if not an
1108 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
1109 // copies.
1110 MRI->clearKillFlags(Fold.OpToFold->getReg());
1112 LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
1113 << static_cast<int>(Fold.UseOpNo) << " of "
1114 << *Fold.UseMI << '\n');
1115 tryFoldInst(TII, Fold.UseMI);
1116 } else if (Fold.isCommuted()) {
1117 // Restoring instruction's original operand order if fold has failed.
1118 TII->commuteInstruction(*Fold.UseMI, false);
1123 // Clamp patterns are canonically selected to v_max_* instructions, so only
1124 // handle them.
1125 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
1126 unsigned Op = MI.getOpcode();
1127 switch (Op) {
1128 case AMDGPU::V_MAX_F32_e64:
1129 case AMDGPU::V_MAX_F16_e64:
1130 case AMDGPU::V_MAX_F64:
1131 case AMDGPU::V_PK_MAX_F16: {
1132 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
1133 return nullptr;
1135 // Make sure sources are identical.
1136 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1137 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1138 if (!Src0->isReg() || !Src1->isReg() ||
1139 Src0->getReg() != Src1->getReg() ||
1140 Src0->getSubReg() != Src1->getSubReg() ||
1141 Src0->getSubReg() != AMDGPU::NoSubRegister)
1142 return nullptr;
1144 // Can't fold up if we have modifiers.
1145 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1146 return nullptr;
1148 unsigned Src0Mods
1149 = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
1150 unsigned Src1Mods
1151 = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
1153 // Having a 0 op_sel_hi would require swizzling the output in the source
1154 // instruction, which we can't do.
1155 unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
1156 : 0u;
1157 if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
1158 return nullptr;
1159 return Src0;
1161 default:
1162 return nullptr;
1166 // We obviously have multiple uses in a clamp since the register is used twice
1167 // in the same instruction.
1168 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
1169 int Count = 0;
1170 for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
1171 I != E; ++I) {
1172 if (++Count > 1)
1173 return false;
1176 return true;
1179 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
1180 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
1181 const MachineOperand *ClampSrc = isClamp(MI);
1182 if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
1183 return false;
1185 MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
1187 // The type of clamp must be compatible.
1188 if (TII->getClampMask(*Def) != TII->getClampMask(MI))
1189 return false;
1191 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
1192 if (!DefClamp)
1193 return false;
1195 LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
1196 << '\n');
1198 // Clamp is applied after omod, so it is OK if omod is set.
1199 DefClamp->setImm(1);
1200 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1201 MI.eraseFromParent();
1202 return true;
1205 static int getOModValue(unsigned Opc, int64_t Val) {
1206 switch (Opc) {
1207 case AMDGPU::V_MUL_F32_e64: {
1208 switch (static_cast<uint32_t>(Val)) {
1209 case 0x3f000000: // 0.5
1210 return SIOutMods::DIV2;
1211 case 0x40000000: // 2.0
1212 return SIOutMods::MUL2;
1213 case 0x40800000: // 4.0
1214 return SIOutMods::MUL4;
1215 default:
1216 return SIOutMods::NONE;
1219 case AMDGPU::V_MUL_F16_e64: {
1220 switch (static_cast<uint16_t>(Val)) {
1221 case 0x3800: // 0.5
1222 return SIOutMods::DIV2;
1223 case 0x4000: // 2.0
1224 return SIOutMods::MUL2;
1225 case 0x4400: // 4.0
1226 return SIOutMods::MUL4;
1227 default:
1228 return SIOutMods::NONE;
1231 default:
1232 llvm_unreachable("invalid mul opcode");
1236 // FIXME: Does this really not support denormals with f16?
1237 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
1238 // handled, so will anything other than that break?
1239 std::pair<const MachineOperand *, int>
1240 SIFoldOperands::isOMod(const MachineInstr &MI) const {
1241 unsigned Op = MI.getOpcode();
1242 switch (Op) {
1243 case AMDGPU::V_MUL_F32_e64:
1244 case AMDGPU::V_MUL_F16_e64: {
1245 // If output denormals are enabled, omod is ignored.
1246 if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
1247 (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
1248 return std::make_pair(nullptr, SIOutMods::NONE);
1250 const MachineOperand *RegOp = nullptr;
1251 const MachineOperand *ImmOp = nullptr;
1252 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1253 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1254 if (Src0->isImm()) {
1255 ImmOp = Src0;
1256 RegOp = Src1;
1257 } else if (Src1->isImm()) {
1258 ImmOp = Src1;
1259 RegOp = Src0;
1260 } else
1261 return std::make_pair(nullptr, SIOutMods::NONE);
1263 int OMod = getOModValue(Op, ImmOp->getImm());
1264 if (OMod == SIOutMods::NONE ||
1265 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1266 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1267 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1268 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1269 return std::make_pair(nullptr, SIOutMods::NONE);
1271 return std::make_pair(RegOp, OMod);
1273 case AMDGPU::V_ADD_F32_e64:
1274 case AMDGPU::V_ADD_F16_e64: {
1275 // If output denormals are enabled, omod is ignored.
1276 if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
1277 (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
1278 return std::make_pair(nullptr, SIOutMods::NONE);
1280 // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
1281 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1282 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1284 if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
1285 Src0->getSubReg() == Src1->getSubReg() &&
1286 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1287 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1288 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1289 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1290 return std::make_pair(Src0, SIOutMods::MUL2);
1292 return std::make_pair(nullptr, SIOutMods::NONE);
1294 default:
1295 return std::make_pair(nullptr, SIOutMods::NONE);
1299 // FIXME: Does this need to check IEEE bit on function?
1300 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
1301 const MachineOperand *RegOp;
1302 int OMod;
1303 std::tie(RegOp, OMod) = isOMod(MI);
1304 if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
1305 RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1306 !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
1307 return false;
1309 MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
1310 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1311 if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
1312 return false;
1314 // Clamp is applied after omod. If the source already has clamp set, don't
1315 // fold it.
1316 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1317 return false;
1319 LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
1321 DefOMod->setImm(OMod);
1322 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1323 MI.eraseFromParent();
1324 return true;
1327 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
1328 if (skipFunction(MF.getFunction()))
1329 return false;
1331 MRI = &MF.getRegInfo();
1332 ST = &MF.getSubtarget<GCNSubtarget>();
1333 TII = ST->getInstrInfo();
1334 TRI = &TII->getRegisterInfo();
1335 MFI = MF.getInfo<SIMachineFunctionInfo>();
1337 // omod is ignored by hardware if IEEE bit is enabled. omod also does not
1338 // correctly handle signed zeros.
1340 // FIXME: Also need to check strictfp
1341 bool IsIEEEMode = MFI->getMode().IEEE;
1342 bool HasNSZ = MFI->hasNoSignedZerosFPMath();
1344 for (MachineBasicBlock *MBB : depth_first(&MF)) {
1345 MachineBasicBlock::iterator I, Next;
1346 for (I = MBB->begin(); I != MBB->end(); I = Next) {
1347 Next = std::next(I);
1348 MachineInstr &MI = *I;
1350 tryFoldInst(TII, &MI);
1352 if (!TII->isFoldableCopy(MI)) {
1353 // TODO: Omod might be OK if there is NSZ only on the source
1354 // instruction, and not the omod multiply.
1355 if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
1356 !tryFoldOMod(MI))
1357 tryFoldClamp(MI);
1358 continue;
1361 MachineOperand &OpToFold = MI.getOperand(1);
1362 bool FoldingImm =
1363 OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1365 // FIXME: We could also be folding things like TargetIndexes.
1366 if (!FoldingImm && !OpToFold.isReg())
1367 continue;
1369 if (OpToFold.isReg() && !Register::isVirtualRegister(OpToFold.getReg()))
1370 continue;
1372 // Prevent folding operands backwards in the function. For example,
1373 // the COPY opcode must not be replaced by 1 in this example:
1375 // %3 = COPY %vgpr0; VGPR_32:%3
1376 // ...
1377 // %vgpr0 = V_MOV_B32_e32 1, implicit %exec
1378 MachineOperand &Dst = MI.getOperand(0);
1379 if (Dst.isReg() && !Register::isVirtualRegister(Dst.getReg()))
1380 continue;
1382 foldInstOperand(MI, OpToFold);
1385 return false;