[Codegen] Alter the default promotion for saturating adds and subs
[llvm-complete.git] / lib / Target / AMDGPU / SIOptimizeExecMasking.cpp
blobcc9b46a755823b7be8d838893fff472bc879873f
1 //===-- SIOptimizeExecMasking.cpp -----------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "AMDGPU.h"
10 #include "AMDGPUSubtarget.h"
11 #include "SIInstrInfo.h"
12 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
13 #include "llvm/ADT/SmallSet.h"
14 #include "llvm/CodeGen/MachineFunctionPass.h"
15 #include "llvm/CodeGen/MachineInstrBuilder.h"
16 #include "llvm/CodeGen/MachineRegisterInfo.h"
17 #include "llvm/Support/Debug.h"
19 using namespace llvm;
21 #define DEBUG_TYPE "si-optimize-exec-masking"
23 namespace {
25 class SIOptimizeExecMasking : public MachineFunctionPass {
26 public:
27 static char ID;
29 public:
30 SIOptimizeExecMasking() : MachineFunctionPass(ID) {
31 initializeSIOptimizeExecMaskingPass(*PassRegistry::getPassRegistry());
34 bool runOnMachineFunction(MachineFunction &MF) override;
36 StringRef getPassName() const override {
37 return "SI optimize exec mask operations";
40 void getAnalysisUsage(AnalysisUsage &AU) const override {
41 AU.setPreservesCFG();
42 MachineFunctionPass::getAnalysisUsage(AU);
46 } // End anonymous namespace.
48 INITIALIZE_PASS_BEGIN(SIOptimizeExecMasking, DEBUG_TYPE,
49 "SI optimize exec mask operations", false, false)
50 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
51 INITIALIZE_PASS_END(SIOptimizeExecMasking, DEBUG_TYPE,
52 "SI optimize exec mask operations", false, false)
54 char SIOptimizeExecMasking::ID = 0;
56 char &llvm::SIOptimizeExecMaskingID = SIOptimizeExecMasking::ID;
58 /// If \p MI is a copy from exec, return the register copied to.
59 static unsigned isCopyFromExec(const MachineInstr &MI, const GCNSubtarget &ST) {
60 switch (MI.getOpcode()) {
61 case AMDGPU::COPY:
62 case AMDGPU::S_MOV_B64:
63 case AMDGPU::S_MOV_B64_term:
64 case AMDGPU::S_MOV_B32:
65 case AMDGPU::S_MOV_B32_term: {
66 const MachineOperand &Src = MI.getOperand(1);
67 if (Src.isReg() &&
68 Src.getReg() == (ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC))
69 return MI.getOperand(0).getReg();
73 return AMDGPU::NoRegister;
76 /// If \p MI is a copy to exec, return the register copied from.
77 static unsigned isCopyToExec(const MachineInstr &MI, const GCNSubtarget &ST) {
78 switch (MI.getOpcode()) {
79 case AMDGPU::COPY:
80 case AMDGPU::S_MOV_B64:
81 case AMDGPU::S_MOV_B32: {
82 const MachineOperand &Dst = MI.getOperand(0);
83 if (Dst.isReg() &&
84 Dst.getReg() == (ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC) &&
85 MI.getOperand(1).isReg())
86 return MI.getOperand(1).getReg();
87 break;
89 case AMDGPU::S_MOV_B64_term:
90 case AMDGPU::S_MOV_B32_term:
91 llvm_unreachable("should have been replaced");
94 return AMDGPU::NoRegister;
97 /// If \p MI is a logical operation on an exec value,
98 /// return the register copied to.
99 static unsigned isLogicalOpOnExec(const MachineInstr &MI) {
100 switch (MI.getOpcode()) {
101 case AMDGPU::S_AND_B64:
102 case AMDGPU::S_OR_B64:
103 case AMDGPU::S_XOR_B64:
104 case AMDGPU::S_ANDN2_B64:
105 case AMDGPU::S_ORN2_B64:
106 case AMDGPU::S_NAND_B64:
107 case AMDGPU::S_NOR_B64:
108 case AMDGPU::S_XNOR_B64: {
109 const MachineOperand &Src1 = MI.getOperand(1);
110 if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC)
111 return MI.getOperand(0).getReg();
112 const MachineOperand &Src2 = MI.getOperand(2);
113 if (Src2.isReg() && Src2.getReg() == AMDGPU::EXEC)
114 return MI.getOperand(0).getReg();
115 break;
117 case AMDGPU::S_AND_B32:
118 case AMDGPU::S_OR_B32:
119 case AMDGPU::S_XOR_B32:
120 case AMDGPU::S_ANDN2_B32:
121 case AMDGPU::S_ORN2_B32:
122 case AMDGPU::S_NAND_B32:
123 case AMDGPU::S_NOR_B32:
124 case AMDGPU::S_XNOR_B32: {
125 const MachineOperand &Src1 = MI.getOperand(1);
126 if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC_LO)
127 return MI.getOperand(0).getReg();
128 const MachineOperand &Src2 = MI.getOperand(2);
129 if (Src2.isReg() && Src2.getReg() == AMDGPU::EXEC_LO)
130 return MI.getOperand(0).getReg();
131 break;
135 return AMDGPU::NoRegister;
138 static unsigned getSaveExecOp(unsigned Opc) {
139 switch (Opc) {
140 case AMDGPU::S_AND_B64:
141 return AMDGPU::S_AND_SAVEEXEC_B64;
142 case AMDGPU::S_OR_B64:
143 return AMDGPU::S_OR_SAVEEXEC_B64;
144 case AMDGPU::S_XOR_B64:
145 return AMDGPU::S_XOR_SAVEEXEC_B64;
146 case AMDGPU::S_ANDN2_B64:
147 return AMDGPU::S_ANDN2_SAVEEXEC_B64;
148 case AMDGPU::S_ORN2_B64:
149 return AMDGPU::S_ORN2_SAVEEXEC_B64;
150 case AMDGPU::S_NAND_B64:
151 return AMDGPU::S_NAND_SAVEEXEC_B64;
152 case AMDGPU::S_NOR_B64:
153 return AMDGPU::S_NOR_SAVEEXEC_B64;
154 case AMDGPU::S_XNOR_B64:
155 return AMDGPU::S_XNOR_SAVEEXEC_B64;
156 case AMDGPU::S_AND_B32:
157 return AMDGPU::S_AND_SAVEEXEC_B32;
158 case AMDGPU::S_OR_B32:
159 return AMDGPU::S_OR_SAVEEXEC_B32;
160 case AMDGPU::S_XOR_B32:
161 return AMDGPU::S_XOR_SAVEEXEC_B32;
162 case AMDGPU::S_ANDN2_B32:
163 return AMDGPU::S_ANDN2_SAVEEXEC_B32;
164 case AMDGPU::S_ORN2_B32:
165 return AMDGPU::S_ORN2_SAVEEXEC_B32;
166 case AMDGPU::S_NAND_B32:
167 return AMDGPU::S_NAND_SAVEEXEC_B32;
168 case AMDGPU::S_NOR_B32:
169 return AMDGPU::S_NOR_SAVEEXEC_B32;
170 case AMDGPU::S_XNOR_B32:
171 return AMDGPU::S_XNOR_SAVEEXEC_B32;
172 default:
173 return AMDGPU::INSTRUCTION_LIST_END;
177 // These are only terminators to get correct spill code placement during
178 // register allocation, so turn them back into normal instructions. Only one of
179 // these is expected per block.
180 static bool removeTerminatorBit(const SIInstrInfo &TII, MachineInstr &MI) {
181 switch (MI.getOpcode()) {
182 case AMDGPU::S_MOV_B64_term:
183 case AMDGPU::S_MOV_B32_term: {
184 MI.setDesc(TII.get(AMDGPU::COPY));
185 return true;
187 case AMDGPU::S_XOR_B64_term: {
188 // This is only a terminator to get the correct spill code placement during
189 // register allocation.
190 MI.setDesc(TII.get(AMDGPU::S_XOR_B64));
191 return true;
193 case AMDGPU::S_XOR_B32_term: {
194 // This is only a terminator to get the correct spill code placement during
195 // register allocation.
196 MI.setDesc(TII.get(AMDGPU::S_XOR_B32));
197 return true;
199 case AMDGPU::S_OR_B32_term: {
200 // This is only a terminator to get the correct spill code placement during
201 // register allocation.
202 MI.setDesc(TII.get(AMDGPU::S_OR_B32));
203 return true;
205 case AMDGPU::S_ANDN2_B64_term: {
206 // This is only a terminator to get the correct spill code placement during
207 // register allocation.
208 MI.setDesc(TII.get(AMDGPU::S_ANDN2_B64));
209 return true;
211 case AMDGPU::S_ANDN2_B32_term: {
212 // This is only a terminator to get the correct spill code placement during
213 // register allocation.
214 MI.setDesc(TII.get(AMDGPU::S_ANDN2_B32));
215 return true;
217 default:
218 return false;
222 static MachineBasicBlock::reverse_iterator fixTerminators(
223 const SIInstrInfo &TII,
224 MachineBasicBlock &MBB) {
225 MachineBasicBlock::reverse_iterator I = MBB.rbegin(), E = MBB.rend();
226 for (; I != E; ++I) {
227 if (!I->isTerminator())
228 return I;
230 if (removeTerminatorBit(TII, *I))
231 return I;
234 return E;
237 static MachineBasicBlock::reverse_iterator findExecCopy(
238 const SIInstrInfo &TII,
239 const GCNSubtarget &ST,
240 MachineBasicBlock &MBB,
241 MachineBasicBlock::reverse_iterator I,
242 unsigned CopyToExec) {
243 const unsigned InstLimit = 25;
245 auto E = MBB.rend();
246 for (unsigned N = 0; N <= InstLimit && I != E; ++I, ++N) {
247 unsigned CopyFromExec = isCopyFromExec(*I, ST);
248 if (CopyFromExec != AMDGPU::NoRegister)
249 return I;
252 return E;
255 // XXX - Seems LivePhysRegs doesn't work correctly since it will incorrectly
256 // report the register as unavailable because a super-register with a lane mask
257 // is unavailable.
258 static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg) {
259 for (MachineBasicBlock *Succ : MBB.successors()) {
260 if (Succ->isLiveIn(Reg))
261 return true;
264 return false;
267 bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
268 if (skipFunction(MF.getFunction()))
269 return false;
271 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
272 const SIRegisterInfo *TRI = ST.getRegisterInfo();
273 const SIInstrInfo *TII = ST.getInstrInfo();
274 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
276 // Optimize sequences emitted for control flow lowering. They are originally
277 // emitted as the separate operations because spill code may need to be
278 // inserted for the saved copy of exec.
280 // x = copy exec
281 // z = s_<op>_b64 x, y
282 // exec = copy z
283 // =>
284 // x = s_<op>_saveexec_b64 y
287 for (MachineBasicBlock &MBB : MF) {
288 MachineBasicBlock::reverse_iterator I = fixTerminators(*TII, MBB);
289 MachineBasicBlock::reverse_iterator E = MBB.rend();
290 if (I == E)
291 continue;
293 unsigned CopyToExec = isCopyToExec(*I, ST);
294 if (CopyToExec == AMDGPU::NoRegister)
295 continue;
297 // Scan backwards to find the def.
298 auto CopyToExecInst = &*I;
299 auto CopyFromExecInst = findExecCopy(*TII, ST, MBB, I, CopyToExec);
300 if (CopyFromExecInst == E) {
301 auto PrepareExecInst = std::next(I);
302 if (PrepareExecInst == E)
303 continue;
304 // Fold exec = COPY (S_AND_B64 reg, exec) -> exec = S_AND_B64 reg, exec
305 if (CopyToExecInst->getOperand(1).isKill() &&
306 isLogicalOpOnExec(*PrepareExecInst) == CopyToExec) {
307 LLVM_DEBUG(dbgs() << "Fold exec copy: " << *PrepareExecInst);
309 PrepareExecInst->getOperand(0).setReg(Exec);
311 LLVM_DEBUG(dbgs() << "into: " << *PrepareExecInst << '\n');
313 CopyToExecInst->eraseFromParent();
316 continue;
319 if (isLiveOut(MBB, CopyToExec)) {
320 // The copied register is live out and has a second use in another block.
321 LLVM_DEBUG(dbgs() << "Exec copy source register is live out\n");
322 continue;
325 Register CopyFromExec = CopyFromExecInst->getOperand(0).getReg();
326 MachineInstr *SaveExecInst = nullptr;
327 SmallVector<MachineInstr *, 4> OtherUseInsts;
329 for (MachineBasicBlock::iterator J
330 = std::next(CopyFromExecInst->getIterator()), JE = I->getIterator();
331 J != JE; ++J) {
332 if (SaveExecInst && J->readsRegister(Exec, TRI)) {
333 LLVM_DEBUG(dbgs() << "exec read prevents saveexec: " << *J << '\n');
334 // Make sure this is inserted after any VALU ops that may have been
335 // scheduled in between.
336 SaveExecInst = nullptr;
337 break;
340 bool ReadsCopyFromExec = J->readsRegister(CopyFromExec, TRI);
342 if (J->modifiesRegister(CopyToExec, TRI)) {
343 if (SaveExecInst) {
344 LLVM_DEBUG(dbgs() << "Multiple instructions modify "
345 << printReg(CopyToExec, TRI) << '\n');
346 SaveExecInst = nullptr;
347 break;
350 unsigned SaveExecOp = getSaveExecOp(J->getOpcode());
351 if (SaveExecOp == AMDGPU::INSTRUCTION_LIST_END)
352 break;
354 if (ReadsCopyFromExec) {
355 SaveExecInst = &*J;
356 LLVM_DEBUG(dbgs() << "Found save exec op: " << *SaveExecInst << '\n');
357 continue;
358 } else {
359 LLVM_DEBUG(dbgs()
360 << "Instruction does not read exec copy: " << *J << '\n');
361 break;
363 } else if (ReadsCopyFromExec && !SaveExecInst) {
364 // Make sure no other instruction is trying to use this copy, before it
365 // will be rewritten by the saveexec, i.e. hasOneUse. There may have
366 // been another use, such as an inserted spill. For example:
368 // %sgpr0_sgpr1 = COPY %exec
369 // spill %sgpr0_sgpr1
370 // %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1
372 LLVM_DEBUG(dbgs() << "Found second use of save inst candidate: " << *J
373 << '\n');
374 break;
377 if (SaveExecInst && J->readsRegister(CopyToExec, TRI)) {
378 assert(SaveExecInst != &*J);
379 OtherUseInsts.push_back(&*J);
383 if (!SaveExecInst)
384 continue;
386 LLVM_DEBUG(dbgs() << "Insert save exec op: " << *SaveExecInst << '\n');
388 MachineOperand &Src0 = SaveExecInst->getOperand(1);
389 MachineOperand &Src1 = SaveExecInst->getOperand(2);
391 MachineOperand *OtherOp = nullptr;
393 if (Src0.isReg() && Src0.getReg() == CopyFromExec) {
394 OtherOp = &Src1;
395 } else if (Src1.isReg() && Src1.getReg() == CopyFromExec) {
396 if (!SaveExecInst->isCommutable())
397 break;
399 OtherOp = &Src0;
400 } else
401 llvm_unreachable("unexpected");
403 CopyFromExecInst->eraseFromParent();
405 auto InsPt = SaveExecInst->getIterator();
406 const DebugLoc &DL = SaveExecInst->getDebugLoc();
408 BuildMI(MBB, InsPt, DL, TII->get(getSaveExecOp(SaveExecInst->getOpcode())),
409 CopyFromExec)
410 .addReg(OtherOp->getReg());
411 SaveExecInst->eraseFromParent();
413 CopyToExecInst->eraseFromParent();
415 for (MachineInstr *OtherInst : OtherUseInsts) {
416 OtherInst->substituteRegister(CopyToExec, Exec,
417 AMDGPU::NoSubRegister, *TRI);
421 return true;