[Alignment][NFC] Use Align with TargetLowering::setMinFunctionAlignment
[llvm-core.git] / lib / Target / AMDGPU / SILowerI1Copies.cpp
blob8bc449a3f027bd1e354ff22a59c78634be4ba1b4
1 //===-- SILowerI1Copies.cpp - Lower I1 Copies -----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass lowers all occurrences of i1 values (with a vreg_1 register class)
10 // to lane masks (32 / 64-bit scalar registers). The pass assumes machine SSA
11 // form and a wave-level control flow graph.
13 // Before this pass, values that are semantically i1 and are defined and used
14 // within the same basic block are already represented as lane masks in scalar
15 // registers. However, values that cross basic blocks are always transferred
16 // between basic blocks in vreg_1 virtual registers and are lowered by this
17 // pass.
19 // The only instructions that use or define vreg_1 virtual registers are COPY,
20 // PHI, and IMPLICIT_DEF.
22 //===----------------------------------------------------------------------===//
24 #include "AMDGPU.h"
25 #include "AMDGPUSubtarget.h"
26 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
27 #include "SIInstrInfo.h"
28 #include "llvm/CodeGen/MachineDominators.h"
29 #include "llvm/CodeGen/MachineFunctionPass.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachinePostDominators.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/MachineSSAUpdater.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/LLVMContext.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Target/TargetMachine.h"
39 #define DEBUG_TYPE "si-i1-copies"
41 using namespace llvm;
43 static unsigned createLaneMaskReg(MachineFunction &MF);
44 static unsigned insertUndefLaneMask(MachineBasicBlock &MBB);
46 namespace {
48 class SILowerI1Copies : public MachineFunctionPass {
49 public:
50 static char ID;
52 private:
53 bool IsWave32 = false;
54 MachineFunction *MF = nullptr;
55 MachineDominatorTree *DT = nullptr;
56 MachinePostDominatorTree *PDT = nullptr;
57 MachineRegisterInfo *MRI = nullptr;
58 const GCNSubtarget *ST = nullptr;
59 const SIInstrInfo *TII = nullptr;
61 unsigned ExecReg;
62 unsigned MovOp;
63 unsigned AndOp;
64 unsigned OrOp;
65 unsigned XorOp;
66 unsigned AndN2Op;
67 unsigned OrN2Op;
69 DenseSet<unsigned> ConstrainRegs;
71 public:
72 SILowerI1Copies() : MachineFunctionPass(ID) {
73 initializeSILowerI1CopiesPass(*PassRegistry::getPassRegistry());
76 bool runOnMachineFunction(MachineFunction &MF) override;
78 StringRef getPassName() const override { return "SI Lower i1 Copies"; }
80 void getAnalysisUsage(AnalysisUsage &AU) const override {
81 AU.setPreservesCFG();
82 AU.addRequired<MachineDominatorTree>();
83 AU.addRequired<MachinePostDominatorTree>();
84 MachineFunctionPass::getAnalysisUsage(AU);
87 private:
88 void lowerCopiesFromI1();
89 void lowerPhis();
90 void lowerCopiesToI1();
91 bool isConstantLaneMask(unsigned Reg, bool &Val) const;
92 void buildMergeLaneMasks(MachineBasicBlock &MBB,
93 MachineBasicBlock::iterator I, const DebugLoc &DL,
94 unsigned DstReg, unsigned PrevReg, unsigned CurReg);
95 MachineBasicBlock::iterator
96 getSaluInsertionAtEnd(MachineBasicBlock &MBB) const;
98 bool isVreg1(unsigned Reg) const {
99 return Register::isVirtualRegister(Reg) &&
100 MRI->getRegClass(Reg) == &AMDGPU::VReg_1RegClass;
103 bool isLaneMaskReg(unsigned Reg) const {
104 return TII->getRegisterInfo().isSGPRReg(*MRI, Reg) &&
105 TII->getRegisterInfo().getRegSizeInBits(Reg, *MRI) ==
106 ST->getWavefrontSize();
110 /// Helper class that determines the relationship between incoming values of a
111 /// phi in the control flow graph to determine where an incoming value can
112 /// simply be taken as a scalar lane mask as-is, and where it needs to be
113 /// merged with another, previously defined lane mask.
115 /// The approach is as follows:
116 /// - Determine all basic blocks which, starting from the incoming blocks,
117 /// a wave may reach before entering the def block (the block containing the
118 /// phi).
119 /// - If an incoming block has no predecessors in this set, we can take the
120 /// incoming value as a scalar lane mask as-is.
121 /// -- A special case of this is when the def block has a self-loop.
122 /// - Otherwise, the incoming value needs to be merged with a previously
123 /// defined lane mask.
124 /// - If there is a path into the set of reachable blocks that does _not_ go
125 /// through an incoming block where we can take the scalar lane mask as-is,
126 /// we need to invent an available value for the SSAUpdater. Choices are
127 /// 0 and undef, with differing consequences for how to merge values etc.
129 /// TODO: We could use region analysis to quickly skip over SESE regions during
130 /// the traversal.
132 class PhiIncomingAnalysis {
133 MachinePostDominatorTree &PDT;
135 // For each reachable basic block, whether it is a source in the induced
136 // subgraph of the CFG.
137 DenseMap<MachineBasicBlock *, bool> ReachableMap;
138 SmallVector<MachineBasicBlock *, 4> ReachableOrdered;
139 SmallVector<MachineBasicBlock *, 4> Stack;
140 SmallVector<MachineBasicBlock *, 4> Predecessors;
142 public:
143 PhiIncomingAnalysis(MachinePostDominatorTree &PDT) : PDT(PDT) {}
145 /// Returns whether \p MBB is a source in the induced subgraph of reachable
146 /// blocks.
147 bool isSource(MachineBasicBlock &MBB) const {
148 return ReachableMap.find(&MBB)->second;
151 ArrayRef<MachineBasicBlock *> predecessors() const { return Predecessors; }
153 void analyze(MachineBasicBlock &DefBlock,
154 ArrayRef<MachineBasicBlock *> IncomingBlocks) {
155 assert(Stack.empty());
156 ReachableMap.clear();
157 ReachableOrdered.clear();
158 Predecessors.clear();
160 // Insert the def block first, so that it acts as an end point for the
161 // traversal.
162 ReachableMap.try_emplace(&DefBlock, false);
163 ReachableOrdered.push_back(&DefBlock);
165 for (MachineBasicBlock *MBB : IncomingBlocks) {
166 if (MBB == &DefBlock) {
167 ReachableMap[&DefBlock] = true; // self-loop on DefBlock
168 continue;
171 ReachableMap.try_emplace(MBB, false);
172 ReachableOrdered.push_back(MBB);
174 // If this block has a divergent terminator and the def block is its
175 // post-dominator, the wave may first visit the other successors.
176 bool Divergent = false;
177 for (MachineInstr &MI : MBB->terminators()) {
178 if (MI.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO ||
179 MI.getOpcode() == AMDGPU::SI_IF ||
180 MI.getOpcode() == AMDGPU::SI_ELSE ||
181 MI.getOpcode() == AMDGPU::SI_LOOP) {
182 Divergent = true;
183 break;
187 if (Divergent && PDT.dominates(&DefBlock, MBB)) {
188 for (MachineBasicBlock *Succ : MBB->successors())
189 Stack.push_back(Succ);
193 while (!Stack.empty()) {
194 MachineBasicBlock *MBB = Stack.pop_back_val();
195 if (!ReachableMap.try_emplace(MBB, false).second)
196 continue;
197 ReachableOrdered.push_back(MBB);
199 for (MachineBasicBlock *Succ : MBB->successors())
200 Stack.push_back(Succ);
203 for (MachineBasicBlock *MBB : ReachableOrdered) {
204 bool HaveReachablePred = false;
205 for (MachineBasicBlock *Pred : MBB->predecessors()) {
206 if (ReachableMap.count(Pred)) {
207 HaveReachablePred = true;
208 } else {
209 Stack.push_back(Pred);
212 if (!HaveReachablePred)
213 ReachableMap[MBB] = true;
214 if (HaveReachablePred) {
215 for (MachineBasicBlock *UnreachablePred : Stack) {
216 if (llvm::find(Predecessors, UnreachablePred) == Predecessors.end())
217 Predecessors.push_back(UnreachablePred);
220 Stack.clear();
225 /// Helper class that detects loops which require us to lower an i1 COPY into
226 /// bitwise manipulation.
228 /// Unfortunately, we cannot use LoopInfo because LoopInfo does not distinguish
229 /// between loops with the same header. Consider this example:
231 /// A-+-+
232 /// | | |
233 /// B-+ |
234 /// | |
235 /// C---+
237 /// A is the header of a loop containing A, B, and C as far as LoopInfo is
238 /// concerned. However, an i1 COPY in B that is used in C must be lowered to
239 /// bitwise operations to combine results from different loop iterations when
240 /// B has a divergent branch (since by default we will compile this code such
241 /// that threads in a wave are merged at the entry of C).
243 /// The following rule is implemented to determine whether bitwise operations
244 /// are required: use the bitwise lowering for a def in block B if a backward
245 /// edge to B is reachable without going through the nearest common
246 /// post-dominator of B and all uses of the def.
248 /// TODO: This rule is conservative because it does not check whether the
249 /// relevant branches are actually divergent.
251 /// The class is designed to cache the CFG traversal so that it can be re-used
252 /// for multiple defs within the same basic block.
254 /// TODO: We could use region analysis to quickly skip over SESE regions during
255 /// the traversal.
257 class LoopFinder {
258 MachineDominatorTree &DT;
259 MachinePostDominatorTree &PDT;
261 // All visited / reachable block, tagged by level (level 0 is the def block,
262 // level 1 are all blocks reachable including but not going through the def
263 // block's IPDOM, etc.).
264 DenseMap<MachineBasicBlock *, unsigned> Visited;
266 // Nearest common dominator of all visited blocks by level (level 0 is the
267 // def block). Used for seeding the SSAUpdater.
268 SmallVector<MachineBasicBlock *, 4> CommonDominators;
270 // Post-dominator of all visited blocks.
271 MachineBasicBlock *VisitedPostDom = nullptr;
273 // Level at which a loop was found: 0 is not possible; 1 = a backward edge is
274 // reachable without going through the IPDOM of the def block (if the IPDOM
275 // itself has an edge to the def block, the loop level is 2), etc.
276 unsigned FoundLoopLevel = ~0u;
278 MachineBasicBlock *DefBlock = nullptr;
279 SmallVector<MachineBasicBlock *, 4> Stack;
280 SmallVector<MachineBasicBlock *, 4> NextLevel;
282 public:
283 LoopFinder(MachineDominatorTree &DT, MachinePostDominatorTree &PDT)
284 : DT(DT), PDT(PDT) {}
286 void initialize(MachineBasicBlock &MBB) {
287 Visited.clear();
288 CommonDominators.clear();
289 Stack.clear();
290 NextLevel.clear();
291 VisitedPostDom = nullptr;
292 FoundLoopLevel = ~0u;
294 DefBlock = &MBB;
297 /// Check whether a backward edge can be reached without going through the
298 /// given \p PostDom of the def block.
300 /// Return the level of \p PostDom if a loop was found, or 0 otherwise.
301 unsigned findLoop(MachineBasicBlock *PostDom) {
302 MachineDomTreeNode *PDNode = PDT.getNode(DefBlock);
304 if (!VisitedPostDom)
305 advanceLevel();
307 unsigned Level = 0;
308 while (PDNode->getBlock() != PostDom) {
309 if (PDNode->getBlock() == VisitedPostDom)
310 advanceLevel();
311 PDNode = PDNode->getIDom();
312 Level++;
313 if (FoundLoopLevel == Level)
314 return Level;
317 return 0;
320 /// Add undef values dominating the loop and the optionally given additional
321 /// blocks, so that the SSA updater doesn't have to search all the way to the
322 /// function entry.
323 void addLoopEntries(unsigned LoopLevel, MachineSSAUpdater &SSAUpdater,
324 ArrayRef<MachineBasicBlock *> Blocks = {}) {
325 assert(LoopLevel < CommonDominators.size());
327 MachineBasicBlock *Dom = CommonDominators[LoopLevel];
328 for (MachineBasicBlock *MBB : Blocks)
329 Dom = DT.findNearestCommonDominator(Dom, MBB);
331 if (!inLoopLevel(*Dom, LoopLevel, Blocks)) {
332 SSAUpdater.AddAvailableValue(Dom, insertUndefLaneMask(*Dom));
333 } else {
334 // The dominator is part of the loop or the given blocks, so add the
335 // undef value to unreachable predecessors instead.
336 for (MachineBasicBlock *Pred : Dom->predecessors()) {
337 if (!inLoopLevel(*Pred, LoopLevel, Blocks))
338 SSAUpdater.AddAvailableValue(Pred, insertUndefLaneMask(*Pred));
343 private:
344 bool inLoopLevel(MachineBasicBlock &MBB, unsigned LoopLevel,
345 ArrayRef<MachineBasicBlock *> Blocks) const {
346 auto DomIt = Visited.find(&MBB);
347 if (DomIt != Visited.end() && DomIt->second <= LoopLevel)
348 return true;
350 if (llvm::find(Blocks, &MBB) != Blocks.end())
351 return true;
353 return false;
356 void advanceLevel() {
357 MachineBasicBlock *VisitedDom;
359 if (!VisitedPostDom) {
360 VisitedPostDom = DefBlock;
361 VisitedDom = DefBlock;
362 Stack.push_back(DefBlock);
363 } else {
364 VisitedPostDom = PDT.getNode(VisitedPostDom)->getIDom()->getBlock();
365 VisitedDom = CommonDominators.back();
367 for (unsigned i = 0; i < NextLevel.size();) {
368 if (PDT.dominates(VisitedPostDom, NextLevel[i])) {
369 Stack.push_back(NextLevel[i]);
371 NextLevel[i] = NextLevel.back();
372 NextLevel.pop_back();
373 } else {
374 i++;
379 unsigned Level = CommonDominators.size();
380 while (!Stack.empty()) {
381 MachineBasicBlock *MBB = Stack.pop_back_val();
382 if (!PDT.dominates(VisitedPostDom, MBB))
383 NextLevel.push_back(MBB);
385 Visited[MBB] = Level;
386 VisitedDom = DT.findNearestCommonDominator(VisitedDom, MBB);
388 for (MachineBasicBlock *Succ : MBB->successors()) {
389 if (Succ == DefBlock) {
390 if (MBB == VisitedPostDom)
391 FoundLoopLevel = std::min(FoundLoopLevel, Level + 1);
392 else
393 FoundLoopLevel = std::min(FoundLoopLevel, Level);
394 continue;
397 if (Visited.try_emplace(Succ, ~0u).second) {
398 if (MBB == VisitedPostDom)
399 NextLevel.push_back(Succ);
400 else
401 Stack.push_back(Succ);
406 CommonDominators.push_back(VisitedDom);
410 } // End anonymous namespace.
412 INITIALIZE_PASS_BEGIN(SILowerI1Copies, DEBUG_TYPE, "SI Lower i1 Copies", false,
413 false)
414 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
415 INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
416 INITIALIZE_PASS_END(SILowerI1Copies, DEBUG_TYPE, "SI Lower i1 Copies", false,
417 false)
419 char SILowerI1Copies::ID = 0;
421 char &llvm::SILowerI1CopiesID = SILowerI1Copies::ID;
423 FunctionPass *llvm::createSILowerI1CopiesPass() {
424 return new SILowerI1Copies();
427 static unsigned createLaneMaskReg(MachineFunction &MF) {
428 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
429 MachineRegisterInfo &MRI = MF.getRegInfo();
430 return MRI.createVirtualRegister(ST.isWave32() ? &AMDGPU::SReg_32RegClass
431 : &AMDGPU::SReg_64RegClass);
434 static unsigned insertUndefLaneMask(MachineBasicBlock &MBB) {
435 MachineFunction &MF = *MBB.getParent();
436 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
437 const SIInstrInfo *TII = ST.getInstrInfo();
438 unsigned UndefReg = createLaneMaskReg(MF);
439 BuildMI(MBB, MBB.getFirstTerminator(), {}, TII->get(AMDGPU::IMPLICIT_DEF),
440 UndefReg);
441 return UndefReg;
444 /// Lower all instructions that def or use vreg_1 registers.
446 /// In a first pass, we lower COPYs from vreg_1 to vector registers, as can
447 /// occur around inline assembly. We do this first, before vreg_1 registers
448 /// are changed to scalar mask registers.
450 /// Then we lower all defs of vreg_1 registers. Phi nodes are lowered before
451 /// all others, because phi lowering looks through copies and can therefore
452 /// often make copy lowering unnecessary.
453 bool SILowerI1Copies::runOnMachineFunction(MachineFunction &TheMF) {
454 MF = &TheMF;
455 MRI = &MF->getRegInfo();
456 DT = &getAnalysis<MachineDominatorTree>();
457 PDT = &getAnalysis<MachinePostDominatorTree>();
459 ST = &MF->getSubtarget<GCNSubtarget>();
460 TII = ST->getInstrInfo();
461 IsWave32 = ST->isWave32();
463 if (IsWave32) {
464 ExecReg = AMDGPU::EXEC_LO;
465 MovOp = AMDGPU::S_MOV_B32;
466 AndOp = AMDGPU::S_AND_B32;
467 OrOp = AMDGPU::S_OR_B32;
468 XorOp = AMDGPU::S_XOR_B32;
469 AndN2Op = AMDGPU::S_ANDN2_B32;
470 OrN2Op = AMDGPU::S_ORN2_B32;
471 } else {
472 ExecReg = AMDGPU::EXEC;
473 MovOp = AMDGPU::S_MOV_B64;
474 AndOp = AMDGPU::S_AND_B64;
475 OrOp = AMDGPU::S_OR_B64;
476 XorOp = AMDGPU::S_XOR_B64;
477 AndN2Op = AMDGPU::S_ANDN2_B64;
478 OrN2Op = AMDGPU::S_ORN2_B64;
481 lowerCopiesFromI1();
482 lowerPhis();
483 lowerCopiesToI1();
485 for (unsigned Reg : ConstrainRegs)
486 MRI->constrainRegClass(Reg, &AMDGPU::SReg_1_XEXECRegClass);
487 ConstrainRegs.clear();
489 return true;
492 void SILowerI1Copies::lowerCopiesFromI1() {
493 SmallVector<MachineInstr *, 4> DeadCopies;
495 for (MachineBasicBlock &MBB : *MF) {
496 for (MachineInstr &MI : MBB) {
497 if (MI.getOpcode() != AMDGPU::COPY)
498 continue;
500 Register DstReg = MI.getOperand(0).getReg();
501 Register SrcReg = MI.getOperand(1).getReg();
502 if (!isVreg1(SrcReg))
503 continue;
505 if (isLaneMaskReg(DstReg) || isVreg1(DstReg))
506 continue;
508 // Copy into a 32-bit vector register.
509 LLVM_DEBUG(dbgs() << "Lower copy from i1: " << MI);
510 DebugLoc DL = MI.getDebugLoc();
512 assert(TII->getRegisterInfo().getRegSizeInBits(DstReg, *MRI) == 32);
513 assert(!MI.getOperand(0).getSubReg());
515 ConstrainRegs.insert(SrcReg);
516 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
517 .addImm(0)
518 .addImm(0)
519 .addImm(0)
520 .addImm(-1)
521 .addReg(SrcReg);
522 DeadCopies.push_back(&MI);
525 for (MachineInstr *MI : DeadCopies)
526 MI->eraseFromParent();
527 DeadCopies.clear();
531 void SILowerI1Copies::lowerPhis() {
532 MachineSSAUpdater SSAUpdater(*MF);
533 LoopFinder LF(*DT, *PDT);
534 PhiIncomingAnalysis PIA(*PDT);
535 SmallVector<MachineInstr *, 4> DeadPhis;
536 SmallVector<MachineBasicBlock *, 4> IncomingBlocks;
537 SmallVector<unsigned, 4> IncomingRegs;
538 SmallVector<unsigned, 4> IncomingUpdated;
539 #ifndef NDEBUG
540 DenseSet<unsigned> PhiRegisters;
541 #endif
543 for (MachineBasicBlock &MBB : *MF) {
544 LF.initialize(MBB);
546 for (MachineInstr &MI : MBB.phis()) {
547 Register DstReg = MI.getOperand(0).getReg();
548 if (!isVreg1(DstReg))
549 continue;
551 LLVM_DEBUG(dbgs() << "Lower PHI: " << MI);
553 MRI->setRegClass(DstReg, IsWave32 ? &AMDGPU::SReg_32RegClass
554 : &AMDGPU::SReg_64RegClass);
556 // Collect incoming values.
557 for (unsigned i = 1; i < MI.getNumOperands(); i += 2) {
558 assert(i + 1 < MI.getNumOperands());
559 Register IncomingReg = MI.getOperand(i).getReg();
560 MachineBasicBlock *IncomingMBB = MI.getOperand(i + 1).getMBB();
561 MachineInstr *IncomingDef = MRI->getUniqueVRegDef(IncomingReg);
563 if (IncomingDef->getOpcode() == AMDGPU::COPY) {
564 IncomingReg = IncomingDef->getOperand(1).getReg();
565 assert(isLaneMaskReg(IncomingReg) || isVreg1(IncomingReg));
566 assert(!IncomingDef->getOperand(1).getSubReg());
567 } else if (IncomingDef->getOpcode() == AMDGPU::IMPLICIT_DEF) {
568 continue;
569 } else {
570 assert(IncomingDef->isPHI() || PhiRegisters.count(IncomingReg));
573 IncomingBlocks.push_back(IncomingMBB);
574 IncomingRegs.push_back(IncomingReg);
577 #ifndef NDEBUG
578 PhiRegisters.insert(DstReg);
579 #endif
581 // Phis in a loop that are observed outside the loop receive a simple but
582 // conservatively correct treatment.
583 MachineBasicBlock *PostDomBound = &MBB;
584 for (MachineInstr &Use : MRI->use_instructions(DstReg)) {
585 PostDomBound =
586 PDT->findNearestCommonDominator(PostDomBound, Use.getParent());
589 unsigned FoundLoopLevel = LF.findLoop(PostDomBound);
591 SSAUpdater.Initialize(DstReg);
593 if (FoundLoopLevel) {
594 LF.addLoopEntries(FoundLoopLevel, SSAUpdater, IncomingBlocks);
596 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
597 IncomingUpdated.push_back(createLaneMaskReg(*MF));
598 SSAUpdater.AddAvailableValue(IncomingBlocks[i],
599 IncomingUpdated.back());
602 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
603 MachineBasicBlock &IMBB = *IncomingBlocks[i];
604 buildMergeLaneMasks(
605 IMBB, getSaluInsertionAtEnd(IMBB), {}, IncomingUpdated[i],
606 SSAUpdater.GetValueInMiddleOfBlock(&IMBB), IncomingRegs[i]);
608 } else {
609 // The phi is not observed from outside a loop. Use a more accurate
610 // lowering.
611 PIA.analyze(MBB, IncomingBlocks);
613 for (MachineBasicBlock *MBB : PIA.predecessors())
614 SSAUpdater.AddAvailableValue(MBB, insertUndefLaneMask(*MBB));
616 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
617 MachineBasicBlock &IMBB = *IncomingBlocks[i];
618 if (PIA.isSource(IMBB)) {
619 IncomingUpdated.push_back(0);
620 SSAUpdater.AddAvailableValue(&IMBB, IncomingRegs[i]);
621 } else {
622 IncomingUpdated.push_back(createLaneMaskReg(*MF));
623 SSAUpdater.AddAvailableValue(&IMBB, IncomingUpdated.back());
627 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
628 if (!IncomingUpdated[i])
629 continue;
631 MachineBasicBlock &IMBB = *IncomingBlocks[i];
632 buildMergeLaneMasks(
633 IMBB, getSaluInsertionAtEnd(IMBB), {}, IncomingUpdated[i],
634 SSAUpdater.GetValueInMiddleOfBlock(&IMBB), IncomingRegs[i]);
638 unsigned NewReg = SSAUpdater.GetValueInMiddleOfBlock(&MBB);
639 if (NewReg != DstReg) {
640 MRI->replaceRegWith(NewReg, DstReg);
642 // Ensure that DstReg has a single def and mark the old PHI node for
643 // deletion.
644 MI.getOperand(0).setReg(NewReg);
645 DeadPhis.push_back(&MI);
648 IncomingBlocks.clear();
649 IncomingRegs.clear();
650 IncomingUpdated.clear();
653 for (MachineInstr *MI : DeadPhis)
654 MI->eraseFromParent();
655 DeadPhis.clear();
659 void SILowerI1Copies::lowerCopiesToI1() {
660 MachineSSAUpdater SSAUpdater(*MF);
661 LoopFinder LF(*DT, *PDT);
662 SmallVector<MachineInstr *, 4> DeadCopies;
664 for (MachineBasicBlock &MBB : *MF) {
665 LF.initialize(MBB);
667 for (MachineInstr &MI : MBB) {
668 if (MI.getOpcode() != AMDGPU::IMPLICIT_DEF &&
669 MI.getOpcode() != AMDGPU::COPY)
670 continue;
672 Register DstReg = MI.getOperand(0).getReg();
673 if (!isVreg1(DstReg))
674 continue;
676 if (MRI->use_empty(DstReg)) {
677 DeadCopies.push_back(&MI);
678 continue;
681 LLVM_DEBUG(dbgs() << "Lower Other: " << MI);
683 MRI->setRegClass(DstReg, IsWave32 ? &AMDGPU::SReg_32RegClass
684 : &AMDGPU::SReg_64RegClass);
685 if (MI.getOpcode() == AMDGPU::IMPLICIT_DEF)
686 continue;
688 DebugLoc DL = MI.getDebugLoc();
689 Register SrcReg = MI.getOperand(1).getReg();
690 assert(!MI.getOperand(1).getSubReg());
692 if (!Register::isVirtualRegister(SrcReg) ||
693 (!isLaneMaskReg(SrcReg) && !isVreg1(SrcReg))) {
694 assert(TII->getRegisterInfo().getRegSizeInBits(SrcReg, *MRI) == 32);
695 unsigned TmpReg = createLaneMaskReg(*MF);
696 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_CMP_NE_U32_e64), TmpReg)
697 .addReg(SrcReg)
698 .addImm(0);
699 MI.getOperand(1).setReg(TmpReg);
700 SrcReg = TmpReg;
703 // Defs in a loop that are observed outside the loop must be transformed
704 // into appropriate bit manipulation.
705 MachineBasicBlock *PostDomBound = &MBB;
706 for (MachineInstr &Use : MRI->use_instructions(DstReg)) {
707 PostDomBound =
708 PDT->findNearestCommonDominator(PostDomBound, Use.getParent());
711 unsigned FoundLoopLevel = LF.findLoop(PostDomBound);
712 if (FoundLoopLevel) {
713 SSAUpdater.Initialize(DstReg);
714 SSAUpdater.AddAvailableValue(&MBB, DstReg);
715 LF.addLoopEntries(FoundLoopLevel, SSAUpdater);
717 buildMergeLaneMasks(MBB, MI, DL, DstReg,
718 SSAUpdater.GetValueInMiddleOfBlock(&MBB), SrcReg);
719 DeadCopies.push_back(&MI);
723 for (MachineInstr *MI : DeadCopies)
724 MI->eraseFromParent();
725 DeadCopies.clear();
729 bool SILowerI1Copies::isConstantLaneMask(unsigned Reg, bool &Val) const {
730 const MachineInstr *MI;
731 for (;;) {
732 MI = MRI->getUniqueVRegDef(Reg);
733 if (MI->getOpcode() != AMDGPU::COPY)
734 break;
736 Reg = MI->getOperand(1).getReg();
737 if (!Register::isVirtualRegister(Reg))
738 return false;
739 if (!isLaneMaskReg(Reg))
740 return false;
743 if (MI->getOpcode() != MovOp)
744 return false;
746 if (!MI->getOperand(1).isImm())
747 return false;
749 int64_t Imm = MI->getOperand(1).getImm();
750 if (Imm == 0) {
751 Val = false;
752 return true;
754 if (Imm == -1) {
755 Val = true;
756 return true;
759 return false;
762 static void instrDefsUsesSCC(const MachineInstr &MI, bool &Def, bool &Use) {
763 Def = false;
764 Use = false;
766 for (const MachineOperand &MO : MI.operands()) {
767 if (MO.isReg() && MO.getReg() == AMDGPU::SCC) {
768 if (MO.isUse())
769 Use = true;
770 else
771 Def = true;
776 /// Return a point at the end of the given \p MBB to insert SALU instructions
777 /// for lane mask calculation. Take terminators and SCC into account.
778 MachineBasicBlock::iterator
779 SILowerI1Copies::getSaluInsertionAtEnd(MachineBasicBlock &MBB) const {
780 auto InsertionPt = MBB.getFirstTerminator();
781 bool TerminatorsUseSCC = false;
782 for (auto I = InsertionPt, E = MBB.end(); I != E; ++I) {
783 bool DefsSCC;
784 instrDefsUsesSCC(*I, DefsSCC, TerminatorsUseSCC);
785 if (TerminatorsUseSCC || DefsSCC)
786 break;
789 if (!TerminatorsUseSCC)
790 return InsertionPt;
792 while (InsertionPt != MBB.begin()) {
793 InsertionPt--;
795 bool DefSCC, UseSCC;
796 instrDefsUsesSCC(*InsertionPt, DefSCC, UseSCC);
797 if (DefSCC)
798 return InsertionPt;
801 // We should have at least seen an IMPLICIT_DEF or COPY
802 llvm_unreachable("SCC used by terminator but no def in block");
805 void SILowerI1Copies::buildMergeLaneMasks(MachineBasicBlock &MBB,
806 MachineBasicBlock::iterator I,
807 const DebugLoc &DL, unsigned DstReg,
808 unsigned PrevReg, unsigned CurReg) {
809 bool PrevVal;
810 bool PrevConstant = isConstantLaneMask(PrevReg, PrevVal);
811 bool CurVal;
812 bool CurConstant = isConstantLaneMask(CurReg, CurVal);
814 if (PrevConstant && CurConstant) {
815 if (PrevVal == CurVal) {
816 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(CurReg);
817 } else if (CurVal) {
818 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(ExecReg);
819 } else {
820 BuildMI(MBB, I, DL, TII->get(XorOp), DstReg)
821 .addReg(ExecReg)
822 .addImm(-1);
824 return;
827 unsigned PrevMaskedReg = 0;
828 unsigned CurMaskedReg = 0;
829 if (!PrevConstant) {
830 if (CurConstant && CurVal) {
831 PrevMaskedReg = PrevReg;
832 } else {
833 PrevMaskedReg = createLaneMaskReg(*MF);
834 BuildMI(MBB, I, DL, TII->get(AndN2Op), PrevMaskedReg)
835 .addReg(PrevReg)
836 .addReg(ExecReg);
839 if (!CurConstant) {
840 // TODO: check whether CurReg is already masked by EXEC
841 if (PrevConstant && PrevVal) {
842 CurMaskedReg = CurReg;
843 } else {
844 CurMaskedReg = createLaneMaskReg(*MF);
845 BuildMI(MBB, I, DL, TII->get(AndOp), CurMaskedReg)
846 .addReg(CurReg)
847 .addReg(ExecReg);
851 if (PrevConstant && !PrevVal) {
852 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg)
853 .addReg(CurMaskedReg);
854 } else if (CurConstant && !CurVal) {
855 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg)
856 .addReg(PrevMaskedReg);
857 } else if (PrevConstant && PrevVal) {
858 BuildMI(MBB, I, DL, TII->get(OrN2Op), DstReg)
859 .addReg(CurMaskedReg)
860 .addReg(ExecReg);
861 } else {
862 BuildMI(MBB, I, DL, TII->get(OrOp), DstReg)
863 .addReg(PrevMaskedReg)
864 .addReg(CurMaskedReg ? CurMaskedReg : ExecReg);