1 //===-- SILowerI1Copies.cpp - Lower I1 Copies -----------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass lowers all occurrences of i1 values (with a vreg_1 register class)
10 // to lane masks (32 / 64-bit scalar registers). The pass assumes machine SSA
11 // form and a wave-level control flow graph.
13 // Before this pass, values that are semantically i1 and are defined and used
14 // within the same basic block are already represented as lane masks in scalar
15 // registers. However, values that cross basic blocks are always transferred
16 // between basic blocks in vreg_1 virtual registers and are lowered by this
19 // The only instructions that use or define vreg_1 virtual registers are COPY,
20 // PHI, and IMPLICIT_DEF.
22 //===----------------------------------------------------------------------===//
25 #include "GCNSubtarget.h"
26 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
27 #include "llvm/CodeGen/MachineDominators.h"
28 #include "llvm/CodeGen/MachineFunctionPass.h"
29 #include "llvm/CodeGen/MachinePostDominators.h"
30 #include "llvm/CodeGen/MachineSSAUpdater.h"
31 #include "llvm/InitializePasses.h"
33 #define DEBUG_TYPE "si-i1-copies"
37 static unsigned createLaneMaskReg(MachineFunction
&MF
);
38 static unsigned insertUndefLaneMask(MachineBasicBlock
&MBB
);
42 class SILowerI1Copies
: public MachineFunctionPass
{
47 bool IsWave32
= false;
48 MachineFunction
*MF
= nullptr;
49 MachineDominatorTree
*DT
= nullptr;
50 MachinePostDominatorTree
*PDT
= nullptr;
51 MachineRegisterInfo
*MRI
= nullptr;
52 const GCNSubtarget
*ST
= nullptr;
53 const SIInstrInfo
*TII
= nullptr;
63 DenseSet
<unsigned> ConstrainRegs
;
66 SILowerI1Copies() : MachineFunctionPass(ID
) {
67 initializeSILowerI1CopiesPass(*PassRegistry::getPassRegistry());
70 bool runOnMachineFunction(MachineFunction
&MF
) override
;
72 StringRef
getPassName() const override
{ return "SI Lower i1 Copies"; }
74 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
76 AU
.addRequired
<MachineDominatorTree
>();
77 AU
.addRequired
<MachinePostDominatorTree
>();
78 MachineFunctionPass::getAnalysisUsage(AU
);
82 bool lowerCopiesFromI1();
84 bool lowerCopiesToI1();
85 bool isConstantLaneMask(Register Reg
, bool &Val
) const;
86 void buildMergeLaneMasks(MachineBasicBlock
&MBB
,
87 MachineBasicBlock::iterator I
, const DebugLoc
&DL
,
88 unsigned DstReg
, unsigned PrevReg
, unsigned CurReg
);
89 MachineBasicBlock::iterator
90 getSaluInsertionAtEnd(MachineBasicBlock
&MBB
) const;
92 bool isVreg1(Register Reg
) const {
93 return Reg
.isVirtual() && MRI
->getRegClass(Reg
) == &AMDGPU::VReg_1RegClass
;
96 bool isLaneMaskReg(unsigned Reg
) const {
97 return TII
->getRegisterInfo().isSGPRReg(*MRI
, Reg
) &&
98 TII
->getRegisterInfo().getRegSizeInBits(Reg
, *MRI
) ==
99 ST
->getWavefrontSize();
103 /// Helper class that determines the relationship between incoming values of a
104 /// phi in the control flow graph to determine where an incoming value can
105 /// simply be taken as a scalar lane mask as-is, and where it needs to be
106 /// merged with another, previously defined lane mask.
108 /// The approach is as follows:
109 /// - Determine all basic blocks which, starting from the incoming blocks,
110 /// a wave may reach before entering the def block (the block containing the
112 /// - If an incoming block has no predecessors in this set, we can take the
113 /// incoming value as a scalar lane mask as-is.
114 /// -- A special case of this is when the def block has a self-loop.
115 /// - Otherwise, the incoming value needs to be merged with a previously
116 /// defined lane mask.
117 /// - If there is a path into the set of reachable blocks that does _not_ go
118 /// through an incoming block where we can take the scalar lane mask as-is,
119 /// we need to invent an available value for the SSAUpdater. Choices are
120 /// 0 and undef, with differing consequences for how to merge values etc.
122 /// TODO: We could use region analysis to quickly skip over SESE regions during
125 class PhiIncomingAnalysis
{
126 MachinePostDominatorTree
&PDT
;
128 // For each reachable basic block, whether it is a source in the induced
129 // subgraph of the CFG.
130 DenseMap
<MachineBasicBlock
*, bool> ReachableMap
;
131 SmallVector
<MachineBasicBlock
*, 4> ReachableOrdered
;
132 SmallVector
<MachineBasicBlock
*, 4> Stack
;
133 SmallVector
<MachineBasicBlock
*, 4> Predecessors
;
136 PhiIncomingAnalysis(MachinePostDominatorTree
&PDT
) : PDT(PDT
) {}
138 /// Returns whether \p MBB is a source in the induced subgraph of reachable
140 bool isSource(MachineBasicBlock
&MBB
) const {
141 return ReachableMap
.find(&MBB
)->second
;
144 ArrayRef
<MachineBasicBlock
*> predecessors() const { return Predecessors
; }
146 void analyze(MachineBasicBlock
&DefBlock
,
147 ArrayRef
<MachineBasicBlock
*> IncomingBlocks
) {
148 assert(Stack
.empty());
149 ReachableMap
.clear();
150 ReachableOrdered
.clear();
151 Predecessors
.clear();
153 // Insert the def block first, so that it acts as an end point for the
155 ReachableMap
.try_emplace(&DefBlock
, false);
156 ReachableOrdered
.push_back(&DefBlock
);
158 for (MachineBasicBlock
*MBB
: IncomingBlocks
) {
159 if (MBB
== &DefBlock
) {
160 ReachableMap
[&DefBlock
] = true; // self-loop on DefBlock
164 ReachableMap
.try_emplace(MBB
, false);
165 ReachableOrdered
.push_back(MBB
);
167 // If this block has a divergent terminator and the def block is its
168 // post-dominator, the wave may first visit the other successors.
169 bool Divergent
= false;
170 for (MachineInstr
&MI
: MBB
->terminators()) {
171 if (MI
.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO
||
172 MI
.getOpcode() == AMDGPU::SI_IF
||
173 MI
.getOpcode() == AMDGPU::SI_ELSE
||
174 MI
.getOpcode() == AMDGPU::SI_LOOP
) {
180 if (Divergent
&& PDT
.dominates(&DefBlock
, MBB
))
181 append_range(Stack
, MBB
->successors());
184 while (!Stack
.empty()) {
185 MachineBasicBlock
*MBB
= Stack
.pop_back_val();
186 if (!ReachableMap
.try_emplace(MBB
, false).second
)
188 ReachableOrdered
.push_back(MBB
);
190 append_range(Stack
, MBB
->successors());
193 for (MachineBasicBlock
*MBB
: ReachableOrdered
) {
194 bool HaveReachablePred
= false;
195 for (MachineBasicBlock
*Pred
: MBB
->predecessors()) {
196 if (ReachableMap
.count(Pred
)) {
197 HaveReachablePred
= true;
199 Stack
.push_back(Pred
);
202 if (!HaveReachablePred
)
203 ReachableMap
[MBB
] = true;
204 if (HaveReachablePred
) {
205 for (MachineBasicBlock
*UnreachablePred
: Stack
) {
206 if (!llvm::is_contained(Predecessors
, UnreachablePred
))
207 Predecessors
.push_back(UnreachablePred
);
215 /// Helper class that detects loops which require us to lower an i1 COPY into
216 /// bitwise manipulation.
218 /// Unfortunately, we cannot use LoopInfo because LoopInfo does not distinguish
219 /// between loops with the same header. Consider this example:
227 /// A is the header of a loop containing A, B, and C as far as LoopInfo is
228 /// concerned. However, an i1 COPY in B that is used in C must be lowered to
229 /// bitwise operations to combine results from different loop iterations when
230 /// B has a divergent branch (since by default we will compile this code such
231 /// that threads in a wave are merged at the entry of C).
233 /// The following rule is implemented to determine whether bitwise operations
234 /// are required: use the bitwise lowering for a def in block B if a backward
235 /// edge to B is reachable without going through the nearest common
236 /// post-dominator of B and all uses of the def.
238 /// TODO: This rule is conservative because it does not check whether the
239 /// relevant branches are actually divergent.
241 /// The class is designed to cache the CFG traversal so that it can be re-used
242 /// for multiple defs within the same basic block.
244 /// TODO: We could use region analysis to quickly skip over SESE regions during
248 MachineDominatorTree
&DT
;
249 MachinePostDominatorTree
&PDT
;
251 // All visited / reachable block, tagged by level (level 0 is the def block,
252 // level 1 are all blocks reachable including but not going through the def
253 // block's IPDOM, etc.).
254 DenseMap
<MachineBasicBlock
*, unsigned> Visited
;
256 // Nearest common dominator of all visited blocks by level (level 0 is the
257 // def block). Used for seeding the SSAUpdater.
258 SmallVector
<MachineBasicBlock
*, 4> CommonDominators
;
260 // Post-dominator of all visited blocks.
261 MachineBasicBlock
*VisitedPostDom
= nullptr;
263 // Level at which a loop was found: 0 is not possible; 1 = a backward edge is
264 // reachable without going through the IPDOM of the def block (if the IPDOM
265 // itself has an edge to the def block, the loop level is 2), etc.
266 unsigned FoundLoopLevel
= ~0u;
268 MachineBasicBlock
*DefBlock
= nullptr;
269 SmallVector
<MachineBasicBlock
*, 4> Stack
;
270 SmallVector
<MachineBasicBlock
*, 4> NextLevel
;
273 LoopFinder(MachineDominatorTree
&DT
, MachinePostDominatorTree
&PDT
)
274 : DT(DT
), PDT(PDT
) {}
276 void initialize(MachineBasicBlock
&MBB
) {
278 CommonDominators
.clear();
281 VisitedPostDom
= nullptr;
282 FoundLoopLevel
= ~0u;
287 /// Check whether a backward edge can be reached without going through the
288 /// given \p PostDom of the def block.
290 /// Return the level of \p PostDom if a loop was found, or 0 otherwise.
291 unsigned findLoop(MachineBasicBlock
*PostDom
) {
292 MachineDomTreeNode
*PDNode
= PDT
.getNode(DefBlock
);
298 while (PDNode
->getBlock() != PostDom
) {
299 if (PDNode
->getBlock() == VisitedPostDom
)
301 PDNode
= PDNode
->getIDom();
303 if (FoundLoopLevel
== Level
)
310 /// Add undef values dominating the loop and the optionally given additional
311 /// blocks, so that the SSA updater doesn't have to search all the way to the
313 void addLoopEntries(unsigned LoopLevel
, MachineSSAUpdater
&SSAUpdater
,
314 ArrayRef
<MachineBasicBlock
*> Blocks
= {}) {
315 assert(LoopLevel
< CommonDominators
.size());
317 MachineBasicBlock
*Dom
= CommonDominators
[LoopLevel
];
318 for (MachineBasicBlock
*MBB
: Blocks
)
319 Dom
= DT
.findNearestCommonDominator(Dom
, MBB
);
321 if (!inLoopLevel(*Dom
, LoopLevel
, Blocks
)) {
322 SSAUpdater
.AddAvailableValue(Dom
, insertUndefLaneMask(*Dom
));
324 // The dominator is part of the loop or the given blocks, so add the
325 // undef value to unreachable predecessors instead.
326 for (MachineBasicBlock
*Pred
: Dom
->predecessors()) {
327 if (!inLoopLevel(*Pred
, LoopLevel
, Blocks
))
328 SSAUpdater
.AddAvailableValue(Pred
, insertUndefLaneMask(*Pred
));
334 bool inLoopLevel(MachineBasicBlock
&MBB
, unsigned LoopLevel
,
335 ArrayRef
<MachineBasicBlock
*> Blocks
) const {
336 auto DomIt
= Visited
.find(&MBB
);
337 if (DomIt
!= Visited
.end() && DomIt
->second
<= LoopLevel
)
340 if (llvm::is_contained(Blocks
, &MBB
))
346 void advanceLevel() {
347 MachineBasicBlock
*VisitedDom
;
349 if (!VisitedPostDom
) {
350 VisitedPostDom
= DefBlock
;
351 VisitedDom
= DefBlock
;
352 Stack
.push_back(DefBlock
);
354 VisitedPostDom
= PDT
.getNode(VisitedPostDom
)->getIDom()->getBlock();
355 VisitedDom
= CommonDominators
.back();
357 for (unsigned i
= 0; i
< NextLevel
.size();) {
358 if (PDT
.dominates(VisitedPostDom
, NextLevel
[i
])) {
359 Stack
.push_back(NextLevel
[i
]);
361 NextLevel
[i
] = NextLevel
.back();
362 NextLevel
.pop_back();
369 unsigned Level
= CommonDominators
.size();
370 while (!Stack
.empty()) {
371 MachineBasicBlock
*MBB
= Stack
.pop_back_val();
372 if (!PDT
.dominates(VisitedPostDom
, MBB
))
373 NextLevel
.push_back(MBB
);
375 Visited
[MBB
] = Level
;
376 VisitedDom
= DT
.findNearestCommonDominator(VisitedDom
, MBB
);
378 for (MachineBasicBlock
*Succ
: MBB
->successors()) {
379 if (Succ
== DefBlock
) {
380 if (MBB
== VisitedPostDom
)
381 FoundLoopLevel
= std::min(FoundLoopLevel
, Level
+ 1);
383 FoundLoopLevel
= std::min(FoundLoopLevel
, Level
);
387 if (Visited
.try_emplace(Succ
, ~0u).second
) {
388 if (MBB
== VisitedPostDom
)
389 NextLevel
.push_back(Succ
);
391 Stack
.push_back(Succ
);
396 CommonDominators
.push_back(VisitedDom
);
400 } // End anonymous namespace.
402 INITIALIZE_PASS_BEGIN(SILowerI1Copies
, DEBUG_TYPE
, "SI Lower i1 Copies", false,
404 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree
)
405 INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree
)
406 INITIALIZE_PASS_END(SILowerI1Copies
, DEBUG_TYPE
, "SI Lower i1 Copies", false,
409 char SILowerI1Copies::ID
= 0;
411 char &llvm::SILowerI1CopiesID
= SILowerI1Copies::ID
;
413 FunctionPass
*llvm::createSILowerI1CopiesPass() {
414 return new SILowerI1Copies();
417 static unsigned createLaneMaskReg(MachineFunction
&MF
) {
418 const GCNSubtarget
&ST
= MF
.getSubtarget
<GCNSubtarget
>();
419 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
420 return MRI
.createVirtualRegister(ST
.isWave32() ? &AMDGPU::SReg_32RegClass
421 : &AMDGPU::SReg_64RegClass
);
424 static unsigned insertUndefLaneMask(MachineBasicBlock
&MBB
) {
425 MachineFunction
&MF
= *MBB
.getParent();
426 const GCNSubtarget
&ST
= MF
.getSubtarget
<GCNSubtarget
>();
427 const SIInstrInfo
*TII
= ST
.getInstrInfo();
428 unsigned UndefReg
= createLaneMaskReg(MF
);
429 BuildMI(MBB
, MBB
.getFirstTerminator(), {}, TII
->get(AMDGPU::IMPLICIT_DEF
),
434 /// Lower all instructions that def or use vreg_1 registers.
436 /// In a first pass, we lower COPYs from vreg_1 to vector registers, as can
437 /// occur around inline assembly. We do this first, before vreg_1 registers
438 /// are changed to scalar mask registers.
440 /// Then we lower all defs of vreg_1 registers. Phi nodes are lowered before
441 /// all others, because phi lowering looks through copies and can therefore
442 /// often make copy lowering unnecessary.
443 bool SILowerI1Copies::runOnMachineFunction(MachineFunction
&TheMF
) {
444 // Only need to run this in SelectionDAG path.
445 if (TheMF
.getProperties().hasProperty(
446 MachineFunctionProperties::Property::Selected
))
450 MRI
= &MF
->getRegInfo();
451 DT
= &getAnalysis
<MachineDominatorTree
>();
452 PDT
= &getAnalysis
<MachinePostDominatorTree
>();
454 ST
= &MF
->getSubtarget
<GCNSubtarget
>();
455 TII
= ST
->getInstrInfo();
456 IsWave32
= ST
->isWave32();
459 ExecReg
= AMDGPU::EXEC_LO
;
460 MovOp
= AMDGPU::S_MOV_B32
;
461 AndOp
= AMDGPU::S_AND_B32
;
462 OrOp
= AMDGPU::S_OR_B32
;
463 XorOp
= AMDGPU::S_XOR_B32
;
464 AndN2Op
= AMDGPU::S_ANDN2_B32
;
465 OrN2Op
= AMDGPU::S_ORN2_B32
;
467 ExecReg
= AMDGPU::EXEC
;
468 MovOp
= AMDGPU::S_MOV_B64
;
469 AndOp
= AMDGPU::S_AND_B64
;
470 OrOp
= AMDGPU::S_OR_B64
;
471 XorOp
= AMDGPU::S_XOR_B64
;
472 AndN2Op
= AMDGPU::S_ANDN2_B64
;
473 OrN2Op
= AMDGPU::S_ORN2_B64
;
476 bool Changed
= false;
477 Changed
|= lowerCopiesFromI1();
478 Changed
|= lowerPhis();
479 Changed
|= lowerCopiesToI1();
481 assert(Changed
|| ConstrainRegs
.empty());
482 for (unsigned Reg
: ConstrainRegs
)
483 MRI
->constrainRegClass(Reg
, &AMDGPU::SReg_1_XEXECRegClass
);
484 ConstrainRegs
.clear();
490 static bool isVRegCompatibleReg(const SIRegisterInfo
&TRI
,
491 const MachineRegisterInfo
&MRI
,
493 unsigned Size
= TRI
.getRegSizeInBits(Reg
, MRI
);
494 return Size
== 1 || Size
== 32;
498 bool SILowerI1Copies::lowerCopiesFromI1() {
499 bool Changed
= false;
500 SmallVector
<MachineInstr
*, 4> DeadCopies
;
502 for (MachineBasicBlock
&MBB
: *MF
) {
503 for (MachineInstr
&MI
: MBB
) {
504 if (MI
.getOpcode() != AMDGPU::COPY
)
507 Register DstReg
= MI
.getOperand(0).getReg();
508 Register SrcReg
= MI
.getOperand(1).getReg();
509 if (!isVreg1(SrcReg
))
512 if (isLaneMaskReg(DstReg
) || isVreg1(DstReg
))
517 // Copy into a 32-bit vector register.
518 LLVM_DEBUG(dbgs() << "Lower copy from i1: " << MI
);
519 DebugLoc DL
= MI
.getDebugLoc();
521 assert(isVRegCompatibleReg(TII
->getRegisterInfo(), *MRI
, DstReg
));
522 assert(!MI
.getOperand(0).getSubReg());
524 ConstrainRegs
.insert(SrcReg
);
525 BuildMI(MBB
, MI
, DL
, TII
->get(AMDGPU::V_CNDMASK_B32_e64
), DstReg
)
531 DeadCopies
.push_back(&MI
);
534 for (MachineInstr
*MI
: DeadCopies
)
535 MI
->eraseFromParent();
541 bool SILowerI1Copies::lowerPhis() {
542 MachineSSAUpdater
SSAUpdater(*MF
);
543 LoopFinder
LF(*DT
, *PDT
);
544 PhiIncomingAnalysis
PIA(*PDT
);
545 SmallVector
<MachineInstr
*, 4> Vreg1Phis
;
546 SmallVector
<MachineBasicBlock
*, 4> IncomingBlocks
;
547 SmallVector
<unsigned, 4> IncomingRegs
;
548 SmallVector
<unsigned, 4> IncomingUpdated
;
550 DenseSet
<unsigned> PhiRegisters
;
553 for (MachineBasicBlock
&MBB
: *MF
) {
554 for (MachineInstr
&MI
: MBB
.phis()) {
555 if (isVreg1(MI
.getOperand(0).getReg()))
556 Vreg1Phis
.push_back(&MI
);
559 if (Vreg1Phis
.empty())
562 MachineBasicBlock
*PrevMBB
= nullptr;
563 for (MachineInstr
*MI
: Vreg1Phis
) {
564 MachineBasicBlock
&MBB
= *MI
->getParent();
565 if (&MBB
!= PrevMBB
) {
570 LLVM_DEBUG(dbgs() << "Lower PHI: " << *MI
);
572 Register DstReg
= MI
->getOperand(0).getReg();
573 MRI
->setRegClass(DstReg
, IsWave32
? &AMDGPU::SReg_32RegClass
574 : &AMDGPU::SReg_64RegClass
);
576 // Collect incoming values.
577 for (unsigned i
= 1; i
< MI
->getNumOperands(); i
+= 2) {
578 assert(i
+ 1 < MI
->getNumOperands());
579 Register IncomingReg
= MI
->getOperand(i
).getReg();
580 MachineBasicBlock
*IncomingMBB
= MI
->getOperand(i
+ 1).getMBB();
581 MachineInstr
*IncomingDef
= MRI
->getUniqueVRegDef(IncomingReg
);
583 if (IncomingDef
->getOpcode() == AMDGPU::COPY
) {
584 IncomingReg
= IncomingDef
->getOperand(1).getReg();
585 assert(isLaneMaskReg(IncomingReg
) || isVreg1(IncomingReg
));
586 assert(!IncomingDef
->getOperand(1).getSubReg());
587 } else if (IncomingDef
->getOpcode() == AMDGPU::IMPLICIT_DEF
) {
590 assert(IncomingDef
->isPHI() || PhiRegisters
.count(IncomingReg
));
593 IncomingBlocks
.push_back(IncomingMBB
);
594 IncomingRegs
.push_back(IncomingReg
);
598 PhiRegisters
.insert(DstReg
);
601 // Phis in a loop that are observed outside the loop receive a simple but
602 // conservatively correct treatment.
603 std::vector
<MachineBasicBlock
*> DomBlocks
= {&MBB
};
604 for (MachineInstr
&Use
: MRI
->use_instructions(DstReg
))
605 DomBlocks
.push_back(Use
.getParent());
607 MachineBasicBlock
*PostDomBound
=
608 PDT
->findNearestCommonDominator(DomBlocks
);
610 // FIXME: This fails to find irreducible cycles. If we have a def (other
611 // than a constant) in a pair of blocks that end up looping back to each
612 // other, it will be mishandle. Due to structurization this shouldn't occur
614 unsigned FoundLoopLevel
= LF
.findLoop(PostDomBound
);
616 SSAUpdater
.Initialize(DstReg
);
618 if (FoundLoopLevel
) {
619 LF
.addLoopEntries(FoundLoopLevel
, SSAUpdater
, IncomingBlocks
);
621 for (unsigned i
= 0; i
< IncomingRegs
.size(); ++i
) {
622 IncomingUpdated
.push_back(createLaneMaskReg(*MF
));
623 SSAUpdater
.AddAvailableValue(IncomingBlocks
[i
],
624 IncomingUpdated
.back());
627 for (unsigned i
= 0; i
< IncomingRegs
.size(); ++i
) {
628 MachineBasicBlock
&IMBB
= *IncomingBlocks
[i
];
630 IMBB
, getSaluInsertionAtEnd(IMBB
), {}, IncomingUpdated
[i
],
631 SSAUpdater
.GetValueInMiddleOfBlock(&IMBB
), IncomingRegs
[i
]);
634 // The phi is not observed from outside a loop. Use a more accurate
636 PIA
.analyze(MBB
, IncomingBlocks
);
638 for (MachineBasicBlock
*MBB
: PIA
.predecessors())
639 SSAUpdater
.AddAvailableValue(MBB
, insertUndefLaneMask(*MBB
));
641 for (unsigned i
= 0; i
< IncomingRegs
.size(); ++i
) {
642 MachineBasicBlock
&IMBB
= *IncomingBlocks
[i
];
643 if (PIA
.isSource(IMBB
)) {
644 IncomingUpdated
.push_back(0);
645 SSAUpdater
.AddAvailableValue(&IMBB
, IncomingRegs
[i
]);
647 IncomingUpdated
.push_back(createLaneMaskReg(*MF
));
648 SSAUpdater
.AddAvailableValue(&IMBB
, IncomingUpdated
.back());
652 for (unsigned i
= 0; i
< IncomingRegs
.size(); ++i
) {
653 if (!IncomingUpdated
[i
])
656 MachineBasicBlock
&IMBB
= *IncomingBlocks
[i
];
658 IMBB
, getSaluInsertionAtEnd(IMBB
), {}, IncomingUpdated
[i
],
659 SSAUpdater
.GetValueInMiddleOfBlock(&IMBB
), IncomingRegs
[i
]);
663 Register NewReg
= SSAUpdater
.GetValueInMiddleOfBlock(&MBB
);
664 if (NewReg
!= DstReg
) {
665 MRI
->replaceRegWith(NewReg
, DstReg
);
666 MI
->eraseFromParent();
669 IncomingBlocks
.clear();
670 IncomingRegs
.clear();
671 IncomingUpdated
.clear();
676 bool SILowerI1Copies::lowerCopiesToI1() {
677 bool Changed
= false;
678 MachineSSAUpdater
SSAUpdater(*MF
);
679 LoopFinder
LF(*DT
, *PDT
);
680 SmallVector
<MachineInstr
*, 4> DeadCopies
;
682 for (MachineBasicBlock
&MBB
: *MF
) {
685 for (MachineInstr
&MI
: MBB
) {
686 if (MI
.getOpcode() != AMDGPU::IMPLICIT_DEF
&&
687 MI
.getOpcode() != AMDGPU::COPY
)
690 Register DstReg
= MI
.getOperand(0).getReg();
691 if (!isVreg1(DstReg
))
696 if (MRI
->use_empty(DstReg
)) {
697 DeadCopies
.push_back(&MI
);
701 LLVM_DEBUG(dbgs() << "Lower Other: " << MI
);
703 MRI
->setRegClass(DstReg
, IsWave32
? &AMDGPU::SReg_32RegClass
704 : &AMDGPU::SReg_64RegClass
);
705 if (MI
.getOpcode() == AMDGPU::IMPLICIT_DEF
)
708 DebugLoc DL
= MI
.getDebugLoc();
709 Register SrcReg
= MI
.getOperand(1).getReg();
710 assert(!MI
.getOperand(1).getSubReg());
712 if (!SrcReg
.isVirtual() || (!isLaneMaskReg(SrcReg
) && !isVreg1(SrcReg
))) {
713 assert(TII
->getRegisterInfo().getRegSizeInBits(SrcReg
, *MRI
) == 32);
714 unsigned TmpReg
= createLaneMaskReg(*MF
);
715 BuildMI(MBB
, MI
, DL
, TII
->get(AMDGPU::V_CMP_NE_U32_e64
), TmpReg
)
718 MI
.getOperand(1).setReg(TmpReg
);
722 // Defs in a loop that are observed outside the loop must be transformed
723 // into appropriate bit manipulation.
724 std::vector
<MachineBasicBlock
*> DomBlocks
= {&MBB
};
725 for (MachineInstr
&Use
: MRI
->use_instructions(DstReg
))
726 DomBlocks
.push_back(Use
.getParent());
728 MachineBasicBlock
*PostDomBound
=
729 PDT
->findNearestCommonDominator(DomBlocks
);
730 unsigned FoundLoopLevel
= LF
.findLoop(PostDomBound
);
731 if (FoundLoopLevel
) {
732 SSAUpdater
.Initialize(DstReg
);
733 SSAUpdater
.AddAvailableValue(&MBB
, DstReg
);
734 LF
.addLoopEntries(FoundLoopLevel
, SSAUpdater
);
736 buildMergeLaneMasks(MBB
, MI
, DL
, DstReg
,
737 SSAUpdater
.GetValueInMiddleOfBlock(&MBB
), SrcReg
);
738 DeadCopies
.push_back(&MI
);
742 for (MachineInstr
*MI
: DeadCopies
)
743 MI
->eraseFromParent();
749 bool SILowerI1Copies::isConstantLaneMask(Register Reg
, bool &Val
) const {
750 const MachineInstr
*MI
;
752 MI
= MRI
->getUniqueVRegDef(Reg
);
753 if (MI
->getOpcode() == AMDGPU::IMPLICIT_DEF
)
756 if (MI
->getOpcode() != AMDGPU::COPY
)
759 Reg
= MI
->getOperand(1).getReg();
760 if (!Reg
.isVirtual())
762 if (!isLaneMaskReg(Reg
))
766 if (MI
->getOpcode() != MovOp
)
769 if (!MI
->getOperand(1).isImm())
772 int64_t Imm
= MI
->getOperand(1).getImm();
785 static void instrDefsUsesSCC(const MachineInstr
&MI
, bool &Def
, bool &Use
) {
789 for (const MachineOperand
&MO
: MI
.operands()) {
790 if (MO
.isReg() && MO
.getReg() == AMDGPU::SCC
) {
799 /// Return a point at the end of the given \p MBB to insert SALU instructions
800 /// for lane mask calculation. Take terminators and SCC into account.
801 MachineBasicBlock::iterator
802 SILowerI1Copies::getSaluInsertionAtEnd(MachineBasicBlock
&MBB
) const {
803 auto InsertionPt
= MBB
.getFirstTerminator();
804 bool TerminatorsUseSCC
= false;
805 for (auto I
= InsertionPt
, E
= MBB
.end(); I
!= E
; ++I
) {
807 instrDefsUsesSCC(*I
, DefsSCC
, TerminatorsUseSCC
);
808 if (TerminatorsUseSCC
|| DefsSCC
)
812 if (!TerminatorsUseSCC
)
815 while (InsertionPt
!= MBB
.begin()) {
819 instrDefsUsesSCC(*InsertionPt
, DefSCC
, UseSCC
);
824 // We should have at least seen an IMPLICIT_DEF or COPY
825 llvm_unreachable("SCC used by terminator but no def in block");
828 void SILowerI1Copies::buildMergeLaneMasks(MachineBasicBlock
&MBB
,
829 MachineBasicBlock::iterator I
,
830 const DebugLoc
&DL
, unsigned DstReg
,
831 unsigned PrevReg
, unsigned CurReg
) {
832 bool PrevVal
= false;
833 bool PrevConstant
= isConstantLaneMask(PrevReg
, PrevVal
);
835 bool CurConstant
= isConstantLaneMask(CurReg
, CurVal
);
837 if (PrevConstant
&& CurConstant
) {
838 if (PrevVal
== CurVal
) {
839 BuildMI(MBB
, I
, DL
, TII
->get(AMDGPU::COPY
), DstReg
).addReg(CurReg
);
841 BuildMI(MBB
, I
, DL
, TII
->get(AMDGPU::COPY
), DstReg
).addReg(ExecReg
);
843 BuildMI(MBB
, I
, DL
, TII
->get(XorOp
), DstReg
)
850 unsigned PrevMaskedReg
= 0;
851 unsigned CurMaskedReg
= 0;
853 if (CurConstant
&& CurVal
) {
854 PrevMaskedReg
= PrevReg
;
856 PrevMaskedReg
= createLaneMaskReg(*MF
);
857 BuildMI(MBB
, I
, DL
, TII
->get(AndN2Op
), PrevMaskedReg
)
863 // TODO: check whether CurReg is already masked by EXEC
864 if (PrevConstant
&& PrevVal
) {
865 CurMaskedReg
= CurReg
;
867 CurMaskedReg
= createLaneMaskReg(*MF
);
868 BuildMI(MBB
, I
, DL
, TII
->get(AndOp
), CurMaskedReg
)
874 if (PrevConstant
&& !PrevVal
) {
875 BuildMI(MBB
, I
, DL
, TII
->get(AMDGPU::COPY
), DstReg
)
876 .addReg(CurMaskedReg
);
877 } else if (CurConstant
&& !CurVal
) {
878 BuildMI(MBB
, I
, DL
, TII
->get(AMDGPU::COPY
), DstReg
)
879 .addReg(PrevMaskedReg
);
880 } else if (PrevConstant
&& PrevVal
) {
881 BuildMI(MBB
, I
, DL
, TII
->get(OrN2Op
), DstReg
)
882 .addReg(CurMaskedReg
)
885 BuildMI(MBB
, I
, DL
, TII
->get(OrOp
), DstReg
)
886 .addReg(PrevMaskedReg
)
887 .addReg(CurMaskedReg
? CurMaskedReg
: ExecReg
);