AMDGPU: Mark test as XFAIL in expensive_checks builds
[llvm-project.git] / llvm / lib / Target / Sparc / DelaySlotFiller.cpp
blob7c84c4ac1b4615323253f4d88d4d4174ae5b8de2
1 //===-- DelaySlotFiller.cpp - SPARC delay slot filler ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is a simple local pass that attempts to fill delay slots with useful
10 // instructions. If no instructions can be moved into the delay slot, then a
11 // NOP is placed.
12 //===----------------------------------------------------------------------===//
14 #include "Sparc.h"
15 #include "SparcSubtarget.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/CodeGen/MachineFunctionPass.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/TargetInstrInfo.h"
22 #include "llvm/CodeGen/TargetRegisterInfo.h"
23 #include "llvm/Support/CommandLine.h"
25 using namespace llvm;
27 #define DEBUG_TYPE "delay-slot-filler"
29 STATISTIC(FilledSlots, "Number of delay slots filled");
31 static cl::opt<bool> DisableDelaySlotFiller(
32 "disable-sparc-delay-filler",
33 cl::init(false),
34 cl::desc("Disable the Sparc delay slot filler."),
35 cl::Hidden);
37 namespace {
38 struct Filler : public MachineFunctionPass {
39 const SparcSubtarget *Subtarget = nullptr;
41 static char ID;
42 Filler() : MachineFunctionPass(ID) {}
44 StringRef getPassName() const override { return "SPARC Delay Slot Filler"; }
46 bool runOnMachineBasicBlock(MachineBasicBlock &MBB);
47 bool runOnMachineFunction(MachineFunction &F) override {
48 bool Changed = false;
49 Subtarget = &F.getSubtarget<SparcSubtarget>();
51 // This pass invalidates liveness information when it reorders
52 // instructions to fill delay slot.
53 F.getRegInfo().invalidateLiveness();
55 for (MachineBasicBlock &MBB : F)
56 Changed |= runOnMachineBasicBlock(MBB);
57 return Changed;
60 MachineFunctionProperties getRequiredProperties() const override {
61 return MachineFunctionProperties().set(
62 MachineFunctionProperties::Property::NoVRegs);
65 void insertCallDefsUses(MachineBasicBlock::iterator MI,
66 SmallSet<unsigned, 32>& RegDefs,
67 SmallSet<unsigned, 32>& RegUses);
69 void insertDefsUses(MachineBasicBlock::iterator MI,
70 SmallSet<unsigned, 32>& RegDefs,
71 SmallSet<unsigned, 32>& RegUses);
73 bool IsRegInSet(SmallSet<unsigned, 32>& RegSet,
74 unsigned Reg);
76 bool delayHasHazard(MachineBasicBlock::iterator candidate,
77 bool &sawLoad, bool &sawStore,
78 SmallSet<unsigned, 32> &RegDefs,
79 SmallSet<unsigned, 32> &RegUses);
81 MachineBasicBlock::iterator
82 findDelayInstr(MachineBasicBlock &MBB, MachineBasicBlock::iterator slot);
84 bool needsUnimp(MachineBasicBlock::iterator I, unsigned &StructSize);
86 bool tryCombineRestoreWithPrevInst(MachineBasicBlock &MBB,
87 MachineBasicBlock::iterator MBBI);
90 char Filler::ID = 0;
91 } // end of anonymous namespace
93 /// createSparcDelaySlotFillerPass - Returns a pass that fills in delay
94 /// slots in Sparc MachineFunctions
95 ///
96 FunctionPass *llvm::createSparcDelaySlotFillerPass() {
97 return new Filler;
101 /// runOnMachineBasicBlock - Fill in delay slots for the given basic block.
102 /// We assume there is only one delay slot per delayed instruction.
104 bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
105 bool Changed = false;
106 Subtarget = &MBB.getParent()->getSubtarget<SparcSubtarget>();
107 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
109 for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ) {
110 MachineBasicBlock::iterator MI = I;
111 ++I;
113 // If MI is restore, try combining it with previous inst.
114 if (!DisableDelaySlotFiller &&
115 (MI->getOpcode() == SP::RESTORErr
116 || MI->getOpcode() == SP::RESTOREri)) {
117 Changed |= tryCombineRestoreWithPrevInst(MBB, MI);
118 continue;
121 // TODO: If we ever want to support v7, this needs to be extended
122 // to cover all floating point operations.
123 if (!Subtarget->isV9() &&
124 (MI->getOpcode() == SP::FCMPS || MI->getOpcode() == SP::FCMPD
125 || MI->getOpcode() == SP::FCMPQ)) {
126 BuildMI(MBB, I, MI->getDebugLoc(), TII->get(SP::NOP));
127 Changed = true;
128 continue;
131 // If MI has no delay slot, skip.
132 if (!MI->hasDelaySlot())
133 continue;
135 MachineBasicBlock::iterator D = MBB.end();
137 if (!DisableDelaySlotFiller)
138 D = findDelayInstr(MBB, MI);
140 ++FilledSlots;
141 Changed = true;
143 if (D == MBB.end())
144 BuildMI(MBB, I, MI->getDebugLoc(), TII->get(SP::NOP));
145 else
146 MBB.splice(I, &MBB, D);
148 unsigned structSize = 0;
149 if (needsUnimp(MI, structSize)) {
150 MachineBasicBlock::iterator J = MI;
151 ++J; // skip the delay filler.
152 assert (J != MBB.end() && "MI needs a delay instruction.");
153 BuildMI(MBB, ++J, MI->getDebugLoc(),
154 TII->get(SP::UNIMP)).addImm(structSize);
155 // Bundle the delay filler and unimp with the instruction.
156 MIBundleBuilder(MBB, MachineBasicBlock::iterator(MI), J);
157 } else {
158 MIBundleBuilder(MBB, MachineBasicBlock::iterator(MI), I);
161 return Changed;
164 MachineBasicBlock::iterator
165 Filler::findDelayInstr(MachineBasicBlock &MBB,
166 MachineBasicBlock::iterator slot)
168 SmallSet<unsigned, 32> RegDefs;
169 SmallSet<unsigned, 32> RegUses;
170 bool sawLoad = false;
171 bool sawStore = false;
173 if (slot == MBB.begin())
174 return MBB.end();
176 unsigned Opc = slot->getOpcode();
178 if (Opc == SP::RET || Opc == SP::TLS_CALL)
179 return MBB.end();
181 if (Opc == SP::RETL || Opc == SP::TAIL_CALL || Opc == SP::TAIL_CALLri) {
182 MachineBasicBlock::iterator J = slot;
183 --J;
185 if (J->getOpcode() == SP::RESTORErr
186 || J->getOpcode() == SP::RESTOREri) {
187 // change retl to ret.
188 if (Opc == SP::RETL)
189 slot->setDesc(Subtarget->getInstrInfo()->get(SP::RET));
190 return J;
194 // Call's delay filler can def some of call's uses.
195 if (slot->isCall())
196 insertCallDefsUses(slot, RegDefs, RegUses);
197 else
198 insertDefsUses(slot, RegDefs, RegUses);
200 bool done = false;
202 MachineBasicBlock::iterator I = slot;
204 while (!done) {
205 done = (I == MBB.begin());
207 if (!done)
208 --I;
210 // skip debug instruction
211 if (I->isDebugInstr())
212 continue;
214 if (I->hasUnmodeledSideEffects() || I->isInlineAsm() || I->isPosition() ||
215 I->hasDelaySlot() || I->isBundledWithSucc())
216 break;
218 if (delayHasHazard(I, sawLoad, sawStore, RegDefs, RegUses)) {
219 insertDefsUses(I, RegDefs, RegUses);
220 continue;
223 return I;
225 return MBB.end();
228 bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate,
229 bool &sawLoad,
230 bool &sawStore,
231 SmallSet<unsigned, 32> &RegDefs,
232 SmallSet<unsigned, 32> &RegUses)
235 if (candidate->isImplicitDef() || candidate->isKill())
236 return true;
238 if (candidate->mayLoad()) {
239 sawLoad = true;
240 if (sawStore)
241 return true;
244 if (candidate->mayStore()) {
245 if (sawStore)
246 return true;
247 sawStore = true;
248 if (sawLoad)
249 return true;
252 for (const MachineOperand &MO : candidate->operands()) {
253 if (!MO.isReg())
254 continue; // skip
256 Register Reg = MO.getReg();
258 if (MO.isDef()) {
259 // check whether Reg is defined or used before delay slot.
260 if (IsRegInSet(RegDefs, Reg) || IsRegInSet(RegUses, Reg))
261 return true;
263 if (MO.isUse()) {
264 // check whether Reg is defined before delay slot.
265 if (IsRegInSet(RegDefs, Reg))
266 return true;
270 unsigned Opcode = candidate->getOpcode();
271 // LD and LDD may have NOPs inserted afterwards in the case of some LEON
272 // processors, so we can't use the delay slot if this feature is switched-on.
273 if (Subtarget->insertNOPLoad()
275 Opcode >= SP::LDDArr && Opcode <= SP::LDrr)
276 return true;
278 // Same as above for FDIV and FSQRT on some LEON processors.
279 if (Subtarget->fixAllFDIVSQRT()
281 Opcode >= SP::FDIVD && Opcode <= SP::FSQRTD)
282 return true;
284 if (Subtarget->fixTN0009() && candidate->mayStore())
285 return true;
287 if (Subtarget->fixTN0013()) {
288 switch (Opcode) {
289 case SP::FDIVS:
290 case SP::FDIVD:
291 case SP::FSQRTS:
292 case SP::FSQRTD:
293 return true;
294 default:
295 break;
299 return false;
303 void Filler::insertCallDefsUses(MachineBasicBlock::iterator MI,
304 SmallSet<unsigned, 32>& RegDefs,
305 SmallSet<unsigned, 32>& RegUses)
307 // Call defines o7, which is visible to the instruction in delay slot.
308 RegDefs.insert(SP::O7);
310 switch(MI->getOpcode()) {
311 default: llvm_unreachable("Unknown opcode.");
312 case SP::CALL:
313 break;
314 case SP::CALLrr:
315 case SP::CALLri:
316 assert(MI->getNumOperands() >= 2);
317 const MachineOperand &Reg = MI->getOperand(0);
318 assert(Reg.isReg() && "CALL first operand is not a register.");
319 assert(Reg.isUse() && "CALL first operand is not a use.");
320 RegUses.insert(Reg.getReg());
322 const MachineOperand &Operand1 = MI->getOperand(1);
323 if (Operand1.isImm() || Operand1.isGlobal())
324 break;
325 assert(Operand1.isReg() && "CALLrr second operand is not a register.");
326 assert(Operand1.isUse() && "CALLrr second operand is not a use.");
327 RegUses.insert(Operand1.getReg());
328 break;
332 // Insert Defs and Uses of MI into the sets RegDefs and RegUses.
333 void Filler::insertDefsUses(MachineBasicBlock::iterator MI,
334 SmallSet<unsigned, 32>& RegDefs,
335 SmallSet<unsigned, 32>& RegUses)
337 for (const MachineOperand &MO : MI->operands()) {
338 if (!MO.isReg())
339 continue;
341 Register Reg = MO.getReg();
342 if (Reg == 0)
343 continue;
344 if (MO.isDef())
345 RegDefs.insert(Reg);
346 if (MO.isUse()) {
347 // Implicit register uses of retl are return values and
348 // retl does not use them.
349 if (MO.isImplicit() && MI->getOpcode() == SP::RETL)
350 continue;
351 RegUses.insert(Reg);
356 // returns true if the Reg or its alias is in the RegSet.
357 bool Filler::IsRegInSet(SmallSet<unsigned, 32>& RegSet, unsigned Reg)
359 // Check Reg and all aliased Registers.
360 for (MCRegAliasIterator AI(Reg, Subtarget->getRegisterInfo(), true);
361 AI.isValid(); ++AI)
362 if (RegSet.count(*AI))
363 return true;
364 return false;
367 bool Filler::needsUnimp(MachineBasicBlock::iterator I, unsigned &StructSize)
369 if (!I->isCall())
370 return false;
372 unsigned structSizeOpNum = 0;
373 switch (I->getOpcode()) {
374 default: llvm_unreachable("Unknown call opcode.");
375 case SP::CALL:
376 structSizeOpNum = 1;
377 break;
378 case SP::CALLrr:
379 case SP::CALLri:
380 structSizeOpNum = 2;
381 break;
382 case SP::TLS_CALL: return false;
383 case SP::TAIL_CALLri:
384 case SP::TAIL_CALL: return false;
387 const MachineOperand &MO = I->getOperand(structSizeOpNum);
388 if (!MO.isImm())
389 return false;
390 StructSize = MO.getImm();
391 return true;
394 static bool combineRestoreADD(MachineBasicBlock::iterator RestoreMI,
395 MachineBasicBlock::iterator AddMI,
396 const TargetInstrInfo *TII)
398 // Before: add <op0>, <op1>, %i[0-7]
399 // restore %g0, %g0, %i[0-7]
401 // After : restore <op0>, <op1>, %o[0-7]
403 Register reg = AddMI->getOperand(0).getReg();
404 if (reg < SP::I0 || reg > SP::I7)
405 return false;
407 // Erase RESTORE.
408 RestoreMI->eraseFromParent();
410 // Change ADD to RESTORE.
411 AddMI->setDesc(TII->get((AddMI->getOpcode() == SP::ADDrr)
412 ? SP::RESTORErr
413 : SP::RESTOREri));
415 // Map the destination register.
416 AddMI->getOperand(0).setReg(reg - SP::I0 + SP::O0);
418 return true;
421 static bool combineRestoreOR(MachineBasicBlock::iterator RestoreMI,
422 MachineBasicBlock::iterator OrMI,
423 const TargetInstrInfo *TII)
425 // Before: or <op0>, <op1>, %i[0-7]
426 // restore %g0, %g0, %i[0-7]
427 // and <op0> or <op1> is zero,
429 // After : restore <op0>, <op1>, %o[0-7]
431 Register reg = OrMI->getOperand(0).getReg();
432 if (reg < SP::I0 || reg > SP::I7)
433 return false;
435 // check whether it is a copy.
436 if (OrMI->getOpcode() == SP::ORrr
437 && OrMI->getOperand(1).getReg() != SP::G0
438 && OrMI->getOperand(2).getReg() != SP::G0)
439 return false;
441 if (OrMI->getOpcode() == SP::ORri
442 && OrMI->getOperand(1).getReg() != SP::G0
443 && (!OrMI->getOperand(2).isImm() || OrMI->getOperand(2).getImm() != 0))
444 return false;
446 // Erase RESTORE.
447 RestoreMI->eraseFromParent();
449 // Change OR to RESTORE.
450 OrMI->setDesc(TII->get((OrMI->getOpcode() == SP::ORrr)
451 ? SP::RESTORErr
452 : SP::RESTOREri));
454 // Map the destination register.
455 OrMI->getOperand(0).setReg(reg - SP::I0 + SP::O0);
457 return true;
460 static bool combineRestoreSETHIi(MachineBasicBlock::iterator RestoreMI,
461 MachineBasicBlock::iterator SetHiMI,
462 const TargetInstrInfo *TII)
464 // Before: sethi imm3, %i[0-7]
465 // restore %g0, %g0, %g0
467 // After : restore %g0, (imm3<<10), %o[0-7]
469 Register reg = SetHiMI->getOperand(0).getReg();
470 if (reg < SP::I0 || reg > SP::I7)
471 return false;
473 if (!SetHiMI->getOperand(1).isImm())
474 return false;
476 int64_t imm = SetHiMI->getOperand(1).getImm();
478 // Is it a 3 bit immediate?
479 if (!isInt<3>(imm))
480 return false;
482 // Make it a 13 bit immediate.
483 imm = (imm << 10) & 0x1FFF;
485 assert(RestoreMI->getOpcode() == SP::RESTORErr);
487 RestoreMI->setDesc(TII->get(SP::RESTOREri));
489 RestoreMI->getOperand(0).setReg(reg - SP::I0 + SP::O0);
490 RestoreMI->getOperand(1).setReg(SP::G0);
491 RestoreMI->getOperand(2).ChangeToImmediate(imm);
494 // Erase the original SETHI.
495 SetHiMI->eraseFromParent();
497 return true;
500 bool Filler::tryCombineRestoreWithPrevInst(MachineBasicBlock &MBB,
501 MachineBasicBlock::iterator MBBI)
503 // No previous instruction.
504 if (MBBI == MBB.begin())
505 return false;
507 // assert that MBBI is a "restore %g0, %g0, %g0".
508 assert(MBBI->getOpcode() == SP::RESTORErr
509 && MBBI->getOperand(0).getReg() == SP::G0
510 && MBBI->getOperand(1).getReg() == SP::G0
511 && MBBI->getOperand(2).getReg() == SP::G0);
513 MachineBasicBlock::iterator PrevInst = std::prev(MBBI);
515 // It cannot be combined with a bundled instruction.
516 if (PrevInst->isBundledWithSucc())
517 return false;
519 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
521 switch (PrevInst->getOpcode()) {
522 default: break;
523 case SP::ADDrr:
524 case SP::ADDri: return combineRestoreADD(MBBI, PrevInst, TII); break;
525 case SP::ORrr:
526 case SP::ORri: return combineRestoreOR(MBBI, PrevInst, TII); break;
527 case SP::SETHIi: return combineRestoreSETHIi(MBBI, PrevInst, TII); break;
529 // It cannot combine with the previous instruction.
530 return false;