[AMDGPU] New gfx940 mfma instructions
[llvm-project.git] / llvm / lib / Target / AMDGPU / SILowerI1Copies.cpp
blob5fb545b50228ab3f4d244cb12cd21551d0b2e376
1 //===-- SILowerI1Copies.cpp - Lower I1 Copies -----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass lowers all occurrences of i1 values (with a vreg_1 register class)
10 // to lane masks (32 / 64-bit scalar registers). The pass assumes machine SSA
11 // form and a wave-level control flow graph.
13 // Before this pass, values that are semantically i1 and are defined and used
14 // within the same basic block are already represented as lane masks in scalar
15 // registers. However, values that cross basic blocks are always transferred
16 // between basic blocks in vreg_1 virtual registers and are lowered by this
17 // pass.
19 // The only instructions that use or define vreg_1 virtual registers are COPY,
20 // PHI, and IMPLICIT_DEF.
22 //===----------------------------------------------------------------------===//
24 #include "AMDGPU.h"
25 #include "GCNSubtarget.h"
26 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
27 #include "llvm/CodeGen/MachineDominators.h"
28 #include "llvm/CodeGen/MachineFunctionPass.h"
29 #include "llvm/CodeGen/MachinePostDominators.h"
30 #include "llvm/CodeGen/MachineSSAUpdater.h"
31 #include "llvm/InitializePasses.h"
33 #define DEBUG_TYPE "si-i1-copies"
35 using namespace llvm;
37 static unsigned createLaneMaskReg(MachineFunction &MF);
38 static unsigned insertUndefLaneMask(MachineBasicBlock &MBB);
40 namespace {
42 class SILowerI1Copies : public MachineFunctionPass {
43 public:
44 static char ID;
46 private:
47 bool IsWave32 = false;
48 MachineFunction *MF = nullptr;
49 MachineDominatorTree *DT = nullptr;
50 MachinePostDominatorTree *PDT = nullptr;
51 MachineRegisterInfo *MRI = nullptr;
52 const GCNSubtarget *ST = nullptr;
53 const SIInstrInfo *TII = nullptr;
55 unsigned ExecReg;
56 unsigned MovOp;
57 unsigned AndOp;
58 unsigned OrOp;
59 unsigned XorOp;
60 unsigned AndN2Op;
61 unsigned OrN2Op;
63 DenseSet<unsigned> ConstrainRegs;
65 public:
66 SILowerI1Copies() : MachineFunctionPass(ID) {
67 initializeSILowerI1CopiesPass(*PassRegistry::getPassRegistry());
70 bool runOnMachineFunction(MachineFunction &MF) override;
72 StringRef getPassName() const override { return "SI Lower i1 Copies"; }
74 void getAnalysisUsage(AnalysisUsage &AU) const override {
75 AU.setPreservesCFG();
76 AU.addRequired<MachineDominatorTree>();
77 AU.addRequired<MachinePostDominatorTree>();
78 MachineFunctionPass::getAnalysisUsage(AU);
81 private:
82 bool lowerCopiesFromI1();
83 bool lowerPhis();
84 bool lowerCopiesToI1();
85 bool isConstantLaneMask(Register Reg, bool &Val) const;
86 void buildMergeLaneMasks(MachineBasicBlock &MBB,
87 MachineBasicBlock::iterator I, const DebugLoc &DL,
88 unsigned DstReg, unsigned PrevReg, unsigned CurReg);
89 MachineBasicBlock::iterator
90 getSaluInsertionAtEnd(MachineBasicBlock &MBB) const;
92 bool isVreg1(Register Reg) const {
93 return Reg.isVirtual() && MRI->getRegClass(Reg) == &AMDGPU::VReg_1RegClass;
96 bool isLaneMaskReg(unsigned Reg) const {
97 return TII->getRegisterInfo().isSGPRReg(*MRI, Reg) &&
98 TII->getRegisterInfo().getRegSizeInBits(Reg, *MRI) ==
99 ST->getWavefrontSize();
103 /// Helper class that determines the relationship between incoming values of a
104 /// phi in the control flow graph to determine where an incoming value can
105 /// simply be taken as a scalar lane mask as-is, and where it needs to be
106 /// merged with another, previously defined lane mask.
108 /// The approach is as follows:
109 /// - Determine all basic blocks which, starting from the incoming blocks,
110 /// a wave may reach before entering the def block (the block containing the
111 /// phi).
112 /// - If an incoming block has no predecessors in this set, we can take the
113 /// incoming value as a scalar lane mask as-is.
114 /// -- A special case of this is when the def block has a self-loop.
115 /// - Otherwise, the incoming value needs to be merged with a previously
116 /// defined lane mask.
117 /// - If there is a path into the set of reachable blocks that does _not_ go
118 /// through an incoming block where we can take the scalar lane mask as-is,
119 /// we need to invent an available value for the SSAUpdater. Choices are
120 /// 0 and undef, with differing consequences for how to merge values etc.
122 /// TODO: We could use region analysis to quickly skip over SESE regions during
123 /// the traversal.
125 class PhiIncomingAnalysis {
126 MachinePostDominatorTree &PDT;
128 // For each reachable basic block, whether it is a source in the induced
129 // subgraph of the CFG.
130 DenseMap<MachineBasicBlock *, bool> ReachableMap;
131 SmallVector<MachineBasicBlock *, 4> ReachableOrdered;
132 SmallVector<MachineBasicBlock *, 4> Stack;
133 SmallVector<MachineBasicBlock *, 4> Predecessors;
135 public:
136 PhiIncomingAnalysis(MachinePostDominatorTree &PDT) : PDT(PDT) {}
138 /// Returns whether \p MBB is a source in the induced subgraph of reachable
139 /// blocks.
140 bool isSource(MachineBasicBlock &MBB) const {
141 return ReachableMap.find(&MBB)->second;
144 ArrayRef<MachineBasicBlock *> predecessors() const { return Predecessors; }
146 void analyze(MachineBasicBlock &DefBlock,
147 ArrayRef<MachineBasicBlock *> IncomingBlocks) {
148 assert(Stack.empty());
149 ReachableMap.clear();
150 ReachableOrdered.clear();
151 Predecessors.clear();
153 // Insert the def block first, so that it acts as an end point for the
154 // traversal.
155 ReachableMap.try_emplace(&DefBlock, false);
156 ReachableOrdered.push_back(&DefBlock);
158 for (MachineBasicBlock *MBB : IncomingBlocks) {
159 if (MBB == &DefBlock) {
160 ReachableMap[&DefBlock] = true; // self-loop on DefBlock
161 continue;
164 ReachableMap.try_emplace(MBB, false);
165 ReachableOrdered.push_back(MBB);
167 // If this block has a divergent terminator and the def block is its
168 // post-dominator, the wave may first visit the other successors.
169 bool Divergent = false;
170 for (MachineInstr &MI : MBB->terminators()) {
171 if (MI.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO ||
172 MI.getOpcode() == AMDGPU::SI_IF ||
173 MI.getOpcode() == AMDGPU::SI_ELSE ||
174 MI.getOpcode() == AMDGPU::SI_LOOP) {
175 Divergent = true;
176 break;
180 if (Divergent && PDT.dominates(&DefBlock, MBB))
181 append_range(Stack, MBB->successors());
184 while (!Stack.empty()) {
185 MachineBasicBlock *MBB = Stack.pop_back_val();
186 if (!ReachableMap.try_emplace(MBB, false).second)
187 continue;
188 ReachableOrdered.push_back(MBB);
190 append_range(Stack, MBB->successors());
193 for (MachineBasicBlock *MBB : ReachableOrdered) {
194 bool HaveReachablePred = false;
195 for (MachineBasicBlock *Pred : MBB->predecessors()) {
196 if (ReachableMap.count(Pred)) {
197 HaveReachablePred = true;
198 } else {
199 Stack.push_back(Pred);
202 if (!HaveReachablePred)
203 ReachableMap[MBB] = true;
204 if (HaveReachablePred) {
205 for (MachineBasicBlock *UnreachablePred : Stack) {
206 if (!llvm::is_contained(Predecessors, UnreachablePred))
207 Predecessors.push_back(UnreachablePred);
210 Stack.clear();
215 /// Helper class that detects loops which require us to lower an i1 COPY into
216 /// bitwise manipulation.
218 /// Unfortunately, we cannot use LoopInfo because LoopInfo does not distinguish
219 /// between loops with the same header. Consider this example:
221 /// A-+-+
222 /// | | |
223 /// B-+ |
224 /// | |
225 /// C---+
227 /// A is the header of a loop containing A, B, and C as far as LoopInfo is
228 /// concerned. However, an i1 COPY in B that is used in C must be lowered to
229 /// bitwise operations to combine results from different loop iterations when
230 /// B has a divergent branch (since by default we will compile this code such
231 /// that threads in a wave are merged at the entry of C).
233 /// The following rule is implemented to determine whether bitwise operations
234 /// are required: use the bitwise lowering for a def in block B if a backward
235 /// edge to B is reachable without going through the nearest common
236 /// post-dominator of B and all uses of the def.
238 /// TODO: This rule is conservative because it does not check whether the
239 /// relevant branches are actually divergent.
241 /// The class is designed to cache the CFG traversal so that it can be re-used
242 /// for multiple defs within the same basic block.
244 /// TODO: We could use region analysis to quickly skip over SESE regions during
245 /// the traversal.
247 class LoopFinder {
248 MachineDominatorTree &DT;
249 MachinePostDominatorTree &PDT;
251 // All visited / reachable block, tagged by level (level 0 is the def block,
252 // level 1 are all blocks reachable including but not going through the def
253 // block's IPDOM, etc.).
254 DenseMap<MachineBasicBlock *, unsigned> Visited;
256 // Nearest common dominator of all visited blocks by level (level 0 is the
257 // def block). Used for seeding the SSAUpdater.
258 SmallVector<MachineBasicBlock *, 4> CommonDominators;
260 // Post-dominator of all visited blocks.
261 MachineBasicBlock *VisitedPostDom = nullptr;
263 // Level at which a loop was found: 0 is not possible; 1 = a backward edge is
264 // reachable without going through the IPDOM of the def block (if the IPDOM
265 // itself has an edge to the def block, the loop level is 2), etc.
266 unsigned FoundLoopLevel = ~0u;
268 MachineBasicBlock *DefBlock = nullptr;
269 SmallVector<MachineBasicBlock *, 4> Stack;
270 SmallVector<MachineBasicBlock *, 4> NextLevel;
272 public:
273 LoopFinder(MachineDominatorTree &DT, MachinePostDominatorTree &PDT)
274 : DT(DT), PDT(PDT) {}
276 void initialize(MachineBasicBlock &MBB) {
277 Visited.clear();
278 CommonDominators.clear();
279 Stack.clear();
280 NextLevel.clear();
281 VisitedPostDom = nullptr;
282 FoundLoopLevel = ~0u;
284 DefBlock = &MBB;
287 /// Check whether a backward edge can be reached without going through the
288 /// given \p PostDom of the def block.
290 /// Return the level of \p PostDom if a loop was found, or 0 otherwise.
291 unsigned findLoop(MachineBasicBlock *PostDom) {
292 MachineDomTreeNode *PDNode = PDT.getNode(DefBlock);
294 if (!VisitedPostDom)
295 advanceLevel();
297 unsigned Level = 0;
298 while (PDNode->getBlock() != PostDom) {
299 if (PDNode->getBlock() == VisitedPostDom)
300 advanceLevel();
301 PDNode = PDNode->getIDom();
302 Level++;
303 if (FoundLoopLevel == Level)
304 return Level;
307 return 0;
310 /// Add undef values dominating the loop and the optionally given additional
311 /// blocks, so that the SSA updater doesn't have to search all the way to the
312 /// function entry.
313 void addLoopEntries(unsigned LoopLevel, MachineSSAUpdater &SSAUpdater,
314 ArrayRef<MachineBasicBlock *> Blocks = {}) {
315 assert(LoopLevel < CommonDominators.size());
317 MachineBasicBlock *Dom = CommonDominators[LoopLevel];
318 for (MachineBasicBlock *MBB : Blocks)
319 Dom = DT.findNearestCommonDominator(Dom, MBB);
321 if (!inLoopLevel(*Dom, LoopLevel, Blocks)) {
322 SSAUpdater.AddAvailableValue(Dom, insertUndefLaneMask(*Dom));
323 } else {
324 // The dominator is part of the loop or the given blocks, so add the
325 // undef value to unreachable predecessors instead.
326 for (MachineBasicBlock *Pred : Dom->predecessors()) {
327 if (!inLoopLevel(*Pred, LoopLevel, Blocks))
328 SSAUpdater.AddAvailableValue(Pred, insertUndefLaneMask(*Pred));
333 private:
334 bool inLoopLevel(MachineBasicBlock &MBB, unsigned LoopLevel,
335 ArrayRef<MachineBasicBlock *> Blocks) const {
336 auto DomIt = Visited.find(&MBB);
337 if (DomIt != Visited.end() && DomIt->second <= LoopLevel)
338 return true;
340 if (llvm::is_contained(Blocks, &MBB))
341 return true;
343 return false;
346 void advanceLevel() {
347 MachineBasicBlock *VisitedDom;
349 if (!VisitedPostDom) {
350 VisitedPostDom = DefBlock;
351 VisitedDom = DefBlock;
352 Stack.push_back(DefBlock);
353 } else {
354 VisitedPostDom = PDT.getNode(VisitedPostDom)->getIDom()->getBlock();
355 VisitedDom = CommonDominators.back();
357 for (unsigned i = 0; i < NextLevel.size();) {
358 if (PDT.dominates(VisitedPostDom, NextLevel[i])) {
359 Stack.push_back(NextLevel[i]);
361 NextLevel[i] = NextLevel.back();
362 NextLevel.pop_back();
363 } else {
364 i++;
369 unsigned Level = CommonDominators.size();
370 while (!Stack.empty()) {
371 MachineBasicBlock *MBB = Stack.pop_back_val();
372 if (!PDT.dominates(VisitedPostDom, MBB))
373 NextLevel.push_back(MBB);
375 Visited[MBB] = Level;
376 VisitedDom = DT.findNearestCommonDominator(VisitedDom, MBB);
378 for (MachineBasicBlock *Succ : MBB->successors()) {
379 if (Succ == DefBlock) {
380 if (MBB == VisitedPostDom)
381 FoundLoopLevel = std::min(FoundLoopLevel, Level + 1);
382 else
383 FoundLoopLevel = std::min(FoundLoopLevel, Level);
384 continue;
387 if (Visited.try_emplace(Succ, ~0u).second) {
388 if (MBB == VisitedPostDom)
389 NextLevel.push_back(Succ);
390 else
391 Stack.push_back(Succ);
396 CommonDominators.push_back(VisitedDom);
400 } // End anonymous namespace.
402 INITIALIZE_PASS_BEGIN(SILowerI1Copies, DEBUG_TYPE, "SI Lower i1 Copies", false,
403 false)
404 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
405 INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
406 INITIALIZE_PASS_END(SILowerI1Copies, DEBUG_TYPE, "SI Lower i1 Copies", false,
407 false)
409 char SILowerI1Copies::ID = 0;
411 char &llvm::SILowerI1CopiesID = SILowerI1Copies::ID;
413 FunctionPass *llvm::createSILowerI1CopiesPass() {
414 return new SILowerI1Copies();
417 static unsigned createLaneMaskReg(MachineFunction &MF) {
418 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
419 MachineRegisterInfo &MRI = MF.getRegInfo();
420 return MRI.createVirtualRegister(ST.isWave32() ? &AMDGPU::SReg_32RegClass
421 : &AMDGPU::SReg_64RegClass);
424 static unsigned insertUndefLaneMask(MachineBasicBlock &MBB) {
425 MachineFunction &MF = *MBB.getParent();
426 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
427 const SIInstrInfo *TII = ST.getInstrInfo();
428 unsigned UndefReg = createLaneMaskReg(MF);
429 BuildMI(MBB, MBB.getFirstTerminator(), {}, TII->get(AMDGPU::IMPLICIT_DEF),
430 UndefReg);
431 return UndefReg;
434 /// Lower all instructions that def or use vreg_1 registers.
436 /// In a first pass, we lower COPYs from vreg_1 to vector registers, as can
437 /// occur around inline assembly. We do this first, before vreg_1 registers
438 /// are changed to scalar mask registers.
440 /// Then we lower all defs of vreg_1 registers. Phi nodes are lowered before
441 /// all others, because phi lowering looks through copies and can therefore
442 /// often make copy lowering unnecessary.
443 bool SILowerI1Copies::runOnMachineFunction(MachineFunction &TheMF) {
444 // Only need to run this in SelectionDAG path.
445 if (TheMF.getProperties().hasProperty(
446 MachineFunctionProperties::Property::Selected))
447 return false;
449 MF = &TheMF;
450 MRI = &MF->getRegInfo();
451 DT = &getAnalysis<MachineDominatorTree>();
452 PDT = &getAnalysis<MachinePostDominatorTree>();
454 ST = &MF->getSubtarget<GCNSubtarget>();
455 TII = ST->getInstrInfo();
456 IsWave32 = ST->isWave32();
458 if (IsWave32) {
459 ExecReg = AMDGPU::EXEC_LO;
460 MovOp = AMDGPU::S_MOV_B32;
461 AndOp = AMDGPU::S_AND_B32;
462 OrOp = AMDGPU::S_OR_B32;
463 XorOp = AMDGPU::S_XOR_B32;
464 AndN2Op = AMDGPU::S_ANDN2_B32;
465 OrN2Op = AMDGPU::S_ORN2_B32;
466 } else {
467 ExecReg = AMDGPU::EXEC;
468 MovOp = AMDGPU::S_MOV_B64;
469 AndOp = AMDGPU::S_AND_B64;
470 OrOp = AMDGPU::S_OR_B64;
471 XorOp = AMDGPU::S_XOR_B64;
472 AndN2Op = AMDGPU::S_ANDN2_B64;
473 OrN2Op = AMDGPU::S_ORN2_B64;
476 bool Changed = false;
477 Changed |= lowerCopiesFromI1();
478 Changed |= lowerPhis();
479 Changed |= lowerCopiesToI1();
481 assert(Changed || ConstrainRegs.empty());
482 for (unsigned Reg : ConstrainRegs)
483 MRI->constrainRegClass(Reg, &AMDGPU::SReg_1_XEXECRegClass);
484 ConstrainRegs.clear();
486 return Changed;
489 #ifndef NDEBUG
490 static bool isVRegCompatibleReg(const SIRegisterInfo &TRI,
491 const MachineRegisterInfo &MRI,
492 Register Reg) {
493 unsigned Size = TRI.getRegSizeInBits(Reg, MRI);
494 return Size == 1 || Size == 32;
496 #endif
498 bool SILowerI1Copies::lowerCopiesFromI1() {
499 bool Changed = false;
500 SmallVector<MachineInstr *, 4> DeadCopies;
502 for (MachineBasicBlock &MBB : *MF) {
503 for (MachineInstr &MI : MBB) {
504 if (MI.getOpcode() != AMDGPU::COPY)
505 continue;
507 Register DstReg = MI.getOperand(0).getReg();
508 Register SrcReg = MI.getOperand(1).getReg();
509 if (!isVreg1(SrcReg))
510 continue;
512 if (isLaneMaskReg(DstReg) || isVreg1(DstReg))
513 continue;
515 Changed = true;
517 // Copy into a 32-bit vector register.
518 LLVM_DEBUG(dbgs() << "Lower copy from i1: " << MI);
519 DebugLoc DL = MI.getDebugLoc();
521 assert(isVRegCompatibleReg(TII->getRegisterInfo(), *MRI, DstReg));
522 assert(!MI.getOperand(0).getSubReg());
524 ConstrainRegs.insert(SrcReg);
525 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
526 .addImm(0)
527 .addImm(0)
528 .addImm(0)
529 .addImm(-1)
530 .addReg(SrcReg);
531 DeadCopies.push_back(&MI);
534 for (MachineInstr *MI : DeadCopies)
535 MI->eraseFromParent();
536 DeadCopies.clear();
538 return Changed;
541 bool SILowerI1Copies::lowerPhis() {
542 MachineSSAUpdater SSAUpdater(*MF);
543 LoopFinder LF(*DT, *PDT);
544 PhiIncomingAnalysis PIA(*PDT);
545 SmallVector<MachineInstr *, 4> Vreg1Phis;
546 SmallVector<MachineBasicBlock *, 4> IncomingBlocks;
547 SmallVector<unsigned, 4> IncomingRegs;
548 SmallVector<unsigned, 4> IncomingUpdated;
549 #ifndef NDEBUG
550 DenseSet<unsigned> PhiRegisters;
551 #endif
553 for (MachineBasicBlock &MBB : *MF) {
554 for (MachineInstr &MI : MBB.phis()) {
555 if (isVreg1(MI.getOperand(0).getReg()))
556 Vreg1Phis.push_back(&MI);
559 if (Vreg1Phis.empty())
560 return false;
562 MachineBasicBlock *PrevMBB = nullptr;
563 for (MachineInstr *MI : Vreg1Phis) {
564 MachineBasicBlock &MBB = *MI->getParent();
565 if (&MBB != PrevMBB) {
566 LF.initialize(MBB);
567 PrevMBB = &MBB;
570 LLVM_DEBUG(dbgs() << "Lower PHI: " << *MI);
572 Register DstReg = MI->getOperand(0).getReg();
573 MRI->setRegClass(DstReg, IsWave32 ? &AMDGPU::SReg_32RegClass
574 : &AMDGPU::SReg_64RegClass);
576 // Collect incoming values.
577 for (unsigned i = 1; i < MI->getNumOperands(); i += 2) {
578 assert(i + 1 < MI->getNumOperands());
579 Register IncomingReg = MI->getOperand(i).getReg();
580 MachineBasicBlock *IncomingMBB = MI->getOperand(i + 1).getMBB();
581 MachineInstr *IncomingDef = MRI->getUniqueVRegDef(IncomingReg);
583 if (IncomingDef->getOpcode() == AMDGPU::COPY) {
584 IncomingReg = IncomingDef->getOperand(1).getReg();
585 assert(isLaneMaskReg(IncomingReg) || isVreg1(IncomingReg));
586 assert(!IncomingDef->getOperand(1).getSubReg());
587 } else if (IncomingDef->getOpcode() == AMDGPU::IMPLICIT_DEF) {
588 continue;
589 } else {
590 assert(IncomingDef->isPHI() || PhiRegisters.count(IncomingReg));
593 IncomingBlocks.push_back(IncomingMBB);
594 IncomingRegs.push_back(IncomingReg);
597 #ifndef NDEBUG
598 PhiRegisters.insert(DstReg);
599 #endif
601 // Phis in a loop that are observed outside the loop receive a simple but
602 // conservatively correct treatment.
603 std::vector<MachineBasicBlock *> DomBlocks = {&MBB};
604 for (MachineInstr &Use : MRI->use_instructions(DstReg))
605 DomBlocks.push_back(Use.getParent());
607 MachineBasicBlock *PostDomBound =
608 PDT->findNearestCommonDominator(DomBlocks);
610 // FIXME: This fails to find irreducible cycles. If we have a def (other
611 // than a constant) in a pair of blocks that end up looping back to each
612 // other, it will be mishandle. Due to structurization this shouldn't occur
613 // in practice.
614 unsigned FoundLoopLevel = LF.findLoop(PostDomBound);
616 SSAUpdater.Initialize(DstReg);
618 if (FoundLoopLevel) {
619 LF.addLoopEntries(FoundLoopLevel, SSAUpdater, IncomingBlocks);
621 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
622 IncomingUpdated.push_back(createLaneMaskReg(*MF));
623 SSAUpdater.AddAvailableValue(IncomingBlocks[i],
624 IncomingUpdated.back());
627 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
628 MachineBasicBlock &IMBB = *IncomingBlocks[i];
629 buildMergeLaneMasks(
630 IMBB, getSaluInsertionAtEnd(IMBB), {}, IncomingUpdated[i],
631 SSAUpdater.GetValueInMiddleOfBlock(&IMBB), IncomingRegs[i]);
633 } else {
634 // The phi is not observed from outside a loop. Use a more accurate
635 // lowering.
636 PIA.analyze(MBB, IncomingBlocks);
638 for (MachineBasicBlock *MBB : PIA.predecessors())
639 SSAUpdater.AddAvailableValue(MBB, insertUndefLaneMask(*MBB));
641 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
642 MachineBasicBlock &IMBB = *IncomingBlocks[i];
643 if (PIA.isSource(IMBB)) {
644 IncomingUpdated.push_back(0);
645 SSAUpdater.AddAvailableValue(&IMBB, IncomingRegs[i]);
646 } else {
647 IncomingUpdated.push_back(createLaneMaskReg(*MF));
648 SSAUpdater.AddAvailableValue(&IMBB, IncomingUpdated.back());
652 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
653 if (!IncomingUpdated[i])
654 continue;
656 MachineBasicBlock &IMBB = *IncomingBlocks[i];
657 buildMergeLaneMasks(
658 IMBB, getSaluInsertionAtEnd(IMBB), {}, IncomingUpdated[i],
659 SSAUpdater.GetValueInMiddleOfBlock(&IMBB), IncomingRegs[i]);
663 Register NewReg = SSAUpdater.GetValueInMiddleOfBlock(&MBB);
664 if (NewReg != DstReg) {
665 MRI->replaceRegWith(NewReg, DstReg);
666 MI->eraseFromParent();
669 IncomingBlocks.clear();
670 IncomingRegs.clear();
671 IncomingUpdated.clear();
673 return true;
676 bool SILowerI1Copies::lowerCopiesToI1() {
677 bool Changed = false;
678 MachineSSAUpdater SSAUpdater(*MF);
679 LoopFinder LF(*DT, *PDT);
680 SmallVector<MachineInstr *, 4> DeadCopies;
682 for (MachineBasicBlock &MBB : *MF) {
683 LF.initialize(MBB);
685 for (MachineInstr &MI : MBB) {
686 if (MI.getOpcode() != AMDGPU::IMPLICIT_DEF &&
687 MI.getOpcode() != AMDGPU::COPY)
688 continue;
690 Register DstReg = MI.getOperand(0).getReg();
691 if (!isVreg1(DstReg))
692 continue;
694 Changed = true;
696 if (MRI->use_empty(DstReg)) {
697 DeadCopies.push_back(&MI);
698 continue;
701 LLVM_DEBUG(dbgs() << "Lower Other: " << MI);
703 MRI->setRegClass(DstReg, IsWave32 ? &AMDGPU::SReg_32RegClass
704 : &AMDGPU::SReg_64RegClass);
705 if (MI.getOpcode() == AMDGPU::IMPLICIT_DEF)
706 continue;
708 DebugLoc DL = MI.getDebugLoc();
709 Register SrcReg = MI.getOperand(1).getReg();
710 assert(!MI.getOperand(1).getSubReg());
712 if (!SrcReg.isVirtual() || (!isLaneMaskReg(SrcReg) && !isVreg1(SrcReg))) {
713 assert(TII->getRegisterInfo().getRegSizeInBits(SrcReg, *MRI) == 32);
714 unsigned TmpReg = createLaneMaskReg(*MF);
715 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_CMP_NE_U32_e64), TmpReg)
716 .addReg(SrcReg)
717 .addImm(0);
718 MI.getOperand(1).setReg(TmpReg);
719 SrcReg = TmpReg;
722 // Defs in a loop that are observed outside the loop must be transformed
723 // into appropriate bit manipulation.
724 std::vector<MachineBasicBlock *> DomBlocks = {&MBB};
725 for (MachineInstr &Use : MRI->use_instructions(DstReg))
726 DomBlocks.push_back(Use.getParent());
728 MachineBasicBlock *PostDomBound =
729 PDT->findNearestCommonDominator(DomBlocks);
730 unsigned FoundLoopLevel = LF.findLoop(PostDomBound);
731 if (FoundLoopLevel) {
732 SSAUpdater.Initialize(DstReg);
733 SSAUpdater.AddAvailableValue(&MBB, DstReg);
734 LF.addLoopEntries(FoundLoopLevel, SSAUpdater);
736 buildMergeLaneMasks(MBB, MI, DL, DstReg,
737 SSAUpdater.GetValueInMiddleOfBlock(&MBB), SrcReg);
738 DeadCopies.push_back(&MI);
742 for (MachineInstr *MI : DeadCopies)
743 MI->eraseFromParent();
744 DeadCopies.clear();
746 return Changed;
749 bool SILowerI1Copies::isConstantLaneMask(Register Reg, bool &Val) const {
750 const MachineInstr *MI;
751 for (;;) {
752 MI = MRI->getUniqueVRegDef(Reg);
753 if (MI->getOpcode() == AMDGPU::IMPLICIT_DEF)
754 return true;
756 if (MI->getOpcode() != AMDGPU::COPY)
757 break;
759 Reg = MI->getOperand(1).getReg();
760 if (!Reg.isVirtual())
761 return false;
762 if (!isLaneMaskReg(Reg))
763 return false;
766 if (MI->getOpcode() != MovOp)
767 return false;
769 if (!MI->getOperand(1).isImm())
770 return false;
772 int64_t Imm = MI->getOperand(1).getImm();
773 if (Imm == 0) {
774 Val = false;
775 return true;
777 if (Imm == -1) {
778 Val = true;
779 return true;
782 return false;
785 static void instrDefsUsesSCC(const MachineInstr &MI, bool &Def, bool &Use) {
786 Def = false;
787 Use = false;
789 for (const MachineOperand &MO : MI.operands()) {
790 if (MO.isReg() && MO.getReg() == AMDGPU::SCC) {
791 if (MO.isUse())
792 Use = true;
793 else
794 Def = true;
799 /// Return a point at the end of the given \p MBB to insert SALU instructions
800 /// for lane mask calculation. Take terminators and SCC into account.
801 MachineBasicBlock::iterator
802 SILowerI1Copies::getSaluInsertionAtEnd(MachineBasicBlock &MBB) const {
803 auto InsertionPt = MBB.getFirstTerminator();
804 bool TerminatorsUseSCC = false;
805 for (auto I = InsertionPt, E = MBB.end(); I != E; ++I) {
806 bool DefsSCC;
807 instrDefsUsesSCC(*I, DefsSCC, TerminatorsUseSCC);
808 if (TerminatorsUseSCC || DefsSCC)
809 break;
812 if (!TerminatorsUseSCC)
813 return InsertionPt;
815 while (InsertionPt != MBB.begin()) {
816 InsertionPt--;
818 bool DefSCC, UseSCC;
819 instrDefsUsesSCC(*InsertionPt, DefSCC, UseSCC);
820 if (DefSCC)
821 return InsertionPt;
824 // We should have at least seen an IMPLICIT_DEF or COPY
825 llvm_unreachable("SCC used by terminator but no def in block");
828 void SILowerI1Copies::buildMergeLaneMasks(MachineBasicBlock &MBB,
829 MachineBasicBlock::iterator I,
830 const DebugLoc &DL, unsigned DstReg,
831 unsigned PrevReg, unsigned CurReg) {
832 bool PrevVal = false;
833 bool PrevConstant = isConstantLaneMask(PrevReg, PrevVal);
834 bool CurVal = false;
835 bool CurConstant = isConstantLaneMask(CurReg, CurVal);
837 if (PrevConstant && CurConstant) {
838 if (PrevVal == CurVal) {
839 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(CurReg);
840 } else if (CurVal) {
841 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(ExecReg);
842 } else {
843 BuildMI(MBB, I, DL, TII->get(XorOp), DstReg)
844 .addReg(ExecReg)
845 .addImm(-1);
847 return;
850 unsigned PrevMaskedReg = 0;
851 unsigned CurMaskedReg = 0;
852 if (!PrevConstant) {
853 if (CurConstant && CurVal) {
854 PrevMaskedReg = PrevReg;
855 } else {
856 PrevMaskedReg = createLaneMaskReg(*MF);
857 BuildMI(MBB, I, DL, TII->get(AndN2Op), PrevMaskedReg)
858 .addReg(PrevReg)
859 .addReg(ExecReg);
862 if (!CurConstant) {
863 // TODO: check whether CurReg is already masked by EXEC
864 if (PrevConstant && PrevVal) {
865 CurMaskedReg = CurReg;
866 } else {
867 CurMaskedReg = createLaneMaskReg(*MF);
868 BuildMI(MBB, I, DL, TII->get(AndOp), CurMaskedReg)
869 .addReg(CurReg)
870 .addReg(ExecReg);
874 if (PrevConstant && !PrevVal) {
875 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg)
876 .addReg(CurMaskedReg);
877 } else if (CurConstant && !CurVal) {
878 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg)
879 .addReg(PrevMaskedReg);
880 } else if (PrevConstant && PrevVal) {
881 BuildMI(MBB, I, DL, TII->get(OrN2Op), DstReg)
882 .addReg(CurMaskedReg)
883 .addReg(ExecReg);
884 } else {
885 BuildMI(MBB, I, DL, TII->get(OrOp), DstReg)
886 .addReg(PrevMaskedReg)
887 .addReg(CurMaskedReg ? CurMaskedReg : ExecReg);