Recommit r373598 "[yaml2obj/obj2yaml] - Add support for SHT_LLVM_ADDRSIG sections."
[llvm-complete.git] / lib / CodeGen / MacroFusion.cpp
blobd21eae222af031a7e6ab67d560c788a2a89e1ea9
1 //===- MacroFusion.cpp - Macro Fusion -------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This file contains the implementation of the DAG scheduling mutation
10 /// to pair instructions back to back.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/MacroFusion.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/CodeGen/MachineInstr.h"
18 #include "llvm/CodeGen/MachineScheduler.h"
19 #include "llvm/CodeGen/ScheduleDAG.h"
20 #include "llvm/CodeGen/ScheduleDAGMutation.h"
21 #include "llvm/CodeGen/TargetInstrInfo.h"
22 #include "llvm/Support/CommandLine.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/raw_ostream.h"
26 #define DEBUG_TYPE "machine-scheduler"
28 STATISTIC(NumFused, "Number of instr pairs fused");
30 using namespace llvm;
32 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
33 cl::desc("Enable scheduling for macro fusion."), cl::init(true));
35 static bool isHazard(const SDep &Dep) {
36 return Dep.getKind() == SDep::Anti || Dep.getKind() == SDep::Output;
39 static bool fuseInstructionPair(ScheduleDAGInstrs &DAG, SUnit &FirstSU,
40 SUnit &SecondSU) {
41 // Check that neither instr is already paired with another along the edge
42 // between them.
43 for (SDep &SI : FirstSU.Succs)
44 if (SI.isCluster())
45 return false;
47 for (SDep &SI : SecondSU.Preds)
48 if (SI.isCluster())
49 return false;
50 // Though the reachability checks above could be made more generic,
51 // perhaps as part of ScheduleDAGInstrs::addEdge(), since such edges are valid,
52 // the extra computation cost makes it less interesting in general cases.
54 // Create a single weak edge between the adjacent instrs. The only effect is
55 // to cause bottom-up scheduling to heavily prioritize the clustered instrs.
56 if (!DAG.addEdge(&SecondSU, SDep(&FirstSU, SDep::Cluster)))
57 return false;
59 // Adjust the latency between both instrs.
60 for (SDep &SI : FirstSU.Succs)
61 if (SI.getSUnit() == &SecondSU)
62 SI.setLatency(0);
64 for (SDep &SI : SecondSU.Preds)
65 if (SI.getSUnit() == &FirstSU)
66 SI.setLatency(0);
68 LLVM_DEBUG(
69 dbgs() << "Macro fuse: "; DAG.dumpNodeName(FirstSU); dbgs() << " - ";
70 DAG.dumpNodeName(SecondSU); dbgs() << " / ";
71 dbgs() << DAG.TII->getName(FirstSU.getInstr()->getOpcode()) << " - "
72 << DAG.TII->getName(SecondSU.getInstr()->getOpcode()) << '\n';);
74 // Make data dependencies from the FirstSU also dependent on the SecondSU to
75 // prevent them from being scheduled between the FirstSU and the SecondSU.
76 if (&SecondSU != &DAG.ExitSU)
77 for (const SDep &SI : FirstSU.Succs) {
78 SUnit *SU = SI.getSUnit();
79 if (SI.isWeak() || isHazard(SI) ||
80 SU == &DAG.ExitSU || SU == &SecondSU || SU->isPred(&SecondSU))
81 continue;
82 LLVM_DEBUG(dbgs() << " Bind "; DAG.dumpNodeName(SecondSU);
83 dbgs() << " - "; DAG.dumpNodeName(*SU); dbgs() << '\n';);
84 DAG.addEdge(SU, SDep(&SecondSU, SDep::Artificial));
87 // Make the FirstSU also dependent on the dependencies of the SecondSU to
88 // prevent them from being scheduled between the FirstSU and the SecondSU.
89 if (&FirstSU != &DAG.EntrySU) {
90 for (const SDep &SI : SecondSU.Preds) {
91 SUnit *SU = SI.getSUnit();
92 if (SI.isWeak() || isHazard(SI) || &FirstSU == SU || FirstSU.isSucc(SU))
93 continue;
94 LLVM_DEBUG(dbgs() << " Bind "; DAG.dumpNodeName(*SU); dbgs() << " - ";
95 DAG.dumpNodeName(FirstSU); dbgs() << '\n';);
96 DAG.addEdge(&FirstSU, SDep(SU, SDep::Artificial));
98 // ExitSU comes last by design, which acts like an implicit dependency
99 // between ExitSU and any bottom root in the graph. We should transfer
100 // this to FirstSU as well.
101 if (&SecondSU == &DAG.ExitSU) {
102 for (SUnit &SU : DAG.SUnits) {
103 if (SU.Succs.empty())
104 DAG.addEdge(&FirstSU, SDep(&SU, SDep::Artificial));
109 ++NumFused;
110 return true;
113 namespace {
115 /// Post-process the DAG to create cluster edges between instrs that may
116 /// be fused by the processor into a single operation.
117 class MacroFusion : public ScheduleDAGMutation {
118 ShouldSchedulePredTy shouldScheduleAdjacent;
119 bool FuseBlock;
120 bool scheduleAdjacentImpl(ScheduleDAGInstrs &DAG, SUnit &AnchorSU);
122 public:
123 MacroFusion(ShouldSchedulePredTy shouldScheduleAdjacent, bool FuseBlock)
124 : shouldScheduleAdjacent(shouldScheduleAdjacent), FuseBlock(FuseBlock) {}
126 void apply(ScheduleDAGInstrs *DAGInstrs) override;
129 } // end anonymous namespace
131 void MacroFusion::apply(ScheduleDAGInstrs *DAG) {
132 if (FuseBlock)
133 // For each of the SUnits in the scheduling block, try to fuse the instr in
134 // it with one in its predecessors.
135 for (SUnit &ISU : DAG->SUnits)
136 scheduleAdjacentImpl(*DAG, ISU);
138 if (DAG->ExitSU.getInstr())
139 // Try to fuse the instr in the ExitSU with one in its predecessors.
140 scheduleAdjacentImpl(*DAG, DAG->ExitSU);
143 /// Implement the fusion of instr pairs in the scheduling DAG,
144 /// anchored at the instr in AnchorSU..
145 bool MacroFusion::scheduleAdjacentImpl(ScheduleDAGInstrs &DAG, SUnit &AnchorSU) {
146 const MachineInstr &AnchorMI = *AnchorSU.getInstr();
147 const TargetInstrInfo &TII = *DAG.TII;
148 const TargetSubtargetInfo &ST = DAG.MF.getSubtarget();
150 // Check if the anchor instr may be fused.
151 if (!shouldScheduleAdjacent(TII, ST, nullptr, AnchorMI))
152 return false;
154 // Explorer for fusion candidates among the dependencies of the anchor instr.
155 for (SDep &Dep : AnchorSU.Preds) {
156 // Ignore dependencies other than data or strong ordering.
157 if (Dep.isWeak() || isHazard(Dep))
158 continue;
160 SUnit &DepSU = *Dep.getSUnit();
161 if (DepSU.isBoundaryNode())
162 continue;
164 const MachineInstr *DepMI = DepSU.getInstr();
165 if (!shouldScheduleAdjacent(TII, ST, DepMI, AnchorMI))
166 continue;
168 if (fuseInstructionPair(DAG, DepSU, AnchorSU))
169 return true;
172 return false;
175 std::unique_ptr<ScheduleDAGMutation>
176 llvm::createMacroFusionDAGMutation(
177 ShouldSchedulePredTy shouldScheduleAdjacent) {
178 if(EnableMacroFusion)
179 return std::make_unique<MacroFusion>(shouldScheduleAdjacent, true);
180 return nullptr;
183 std::unique_ptr<ScheduleDAGMutation>
184 llvm::createBranchMacroFusionDAGMutation(
185 ShouldSchedulePredTy shouldScheduleAdjacent) {
186 if(EnableMacroFusion)
187 return std::make_unique<MacroFusion>(shouldScheduleAdjacent, false);
188 return nullptr;