[x86] fix assert with horizontal math + broadcast of vector (PR43402)
[llvm-core.git] / lib / Target / X86 / X86MacroFusion.cpp
blobc6da4b09dd60f27cbf27863a05574ac001f1e4ac
1 //===- X86MacroFusion.cpp - X86 Macro Fusion ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This file contains the X86 implementation of the DAG scheduling
10 /// mutation to pair instructions back to back.
12 //===----------------------------------------------------------------------===//
14 #include "X86MacroFusion.h"
15 #include "X86Subtarget.h"
16 #include "llvm/CodeGen/MacroFusion.h"
17 #include "llvm/CodeGen/TargetInstrInfo.h"
19 using namespace llvm;
21 namespace {
23 // The classification for the first instruction.
24 enum class FirstInstrKind { Test, Cmp, And, ALU, IncDec, Invalid };
26 // The classification for the second instruction (jump).
27 enum class JumpKind {
28 // JE, JL, JG and variants.
29 ELG,
30 // JA, JB and variants.
31 AB,
32 // JS, JP, JO and variants.
33 SPO,
34 // Not a fusable jump.
35 Invalid,
38 } // namespace
40 static FirstInstrKind classifyFirst(const MachineInstr &MI) {
41 switch (MI.getOpcode()) {
42 default:
43 return FirstInstrKind::Invalid;
44 case X86::TEST8rr:
45 case X86::TEST16rr:
46 case X86::TEST32rr:
47 case X86::TEST64rr:
48 case X86::TEST8ri:
49 case X86::TEST16ri:
50 case X86::TEST32ri:
51 case X86::TEST64ri32:
52 case X86::TEST8mr:
53 case X86::TEST16mr:
54 case X86::TEST32mr:
55 case X86::TEST64mr:
56 return FirstInstrKind::Test;
57 case X86::AND16ri:
58 case X86::AND16ri8:
59 case X86::AND16rm:
60 case X86::AND16rr:
61 case X86::AND32ri:
62 case X86::AND32ri8:
63 case X86::AND32rm:
64 case X86::AND32rr:
65 case X86::AND64ri32:
66 case X86::AND64ri8:
67 case X86::AND64rm:
68 case X86::AND64rr:
69 case X86::AND8ri:
70 case X86::AND8rm:
71 case X86::AND8rr:
72 return FirstInstrKind::And;
73 case X86::CMP16ri:
74 case X86::CMP16ri8:
75 case X86::CMP16rm:
76 case X86::CMP16rr:
77 case X86::CMP16mr:
78 case X86::CMP32ri:
79 case X86::CMP32ri8:
80 case X86::CMP32rm:
81 case X86::CMP32rr:
82 case X86::CMP32mr:
83 case X86::CMP64ri32:
84 case X86::CMP64ri8:
85 case X86::CMP64rm:
86 case X86::CMP64rr:
87 case X86::CMP64mr:
88 case X86::CMP8ri:
89 case X86::CMP8rm:
90 case X86::CMP8rr:
91 case X86::CMP8mr:
92 return FirstInstrKind::Cmp;
93 case X86::ADD16ri:
94 case X86::ADD16ri8:
95 case X86::ADD16ri8_DB:
96 case X86::ADD16ri_DB:
97 case X86::ADD16rm:
98 case X86::ADD16rr:
99 case X86::ADD16rr_DB:
100 case X86::ADD32ri:
101 case X86::ADD32ri8:
102 case X86::ADD32ri8_DB:
103 case X86::ADD32ri_DB:
104 case X86::ADD32rm:
105 case X86::ADD32rr:
106 case X86::ADD32rr_DB:
107 case X86::ADD64ri32:
108 case X86::ADD64ri32_DB:
109 case X86::ADD64ri8:
110 case X86::ADD64ri8_DB:
111 case X86::ADD64rm:
112 case X86::ADD64rr:
113 case X86::ADD64rr_DB:
114 case X86::ADD8ri:
115 case X86::ADD8ri_DB:
116 case X86::ADD8rm:
117 case X86::ADD8rr:
118 case X86::ADD8rr_DB:
119 case X86::SUB16ri:
120 case X86::SUB16ri8:
121 case X86::SUB16rm:
122 case X86::SUB16rr:
123 case X86::SUB32ri:
124 case X86::SUB32ri8:
125 case X86::SUB32rm:
126 case X86::SUB32rr:
127 case X86::SUB64ri32:
128 case X86::SUB64ri8:
129 case X86::SUB64rm:
130 case X86::SUB64rr:
131 case X86::SUB8ri:
132 case X86::SUB8rm:
133 case X86::SUB8rr:
134 return FirstInstrKind::ALU;
135 case X86::INC16r:
136 case X86::INC32r:
137 case X86::INC64r:
138 case X86::INC8r:
139 case X86::DEC16r:
140 case X86::DEC32r:
141 case X86::DEC64r:
142 case X86::DEC8r:
143 return FirstInstrKind::IncDec;
147 static JumpKind classifySecond(const MachineInstr &MI) {
148 X86::CondCode CC = X86::getCondFromBranch(MI);
149 if (CC == X86::COND_INVALID)
150 return JumpKind::Invalid;
152 switch (CC) {
153 default:
154 return JumpKind::Invalid;
155 case X86::COND_E:
156 case X86::COND_NE:
157 case X86::COND_L:
158 case X86::COND_LE:
159 case X86::COND_G:
160 case X86::COND_GE:
161 return JumpKind::ELG;
162 case X86::COND_B:
163 case X86::COND_BE:
164 case X86::COND_A:
165 case X86::COND_AE:
166 return JumpKind::AB;
167 case X86::COND_S:
168 case X86::COND_NS:
169 case X86::COND_P:
170 case X86::COND_NP:
171 case X86::COND_O:
172 case X86::COND_NO:
173 return JumpKind::SPO;
177 /// Check if the instr pair, FirstMI and SecondMI, should be fused
178 /// together. Given SecondMI, when FirstMI is unspecified, then check if
179 /// SecondMI may be part of a fused pair at all.
180 static bool shouldScheduleAdjacent(const TargetInstrInfo &TII,
181 const TargetSubtargetInfo &TSI,
182 const MachineInstr *FirstMI,
183 const MachineInstr &SecondMI) {
184 const X86Subtarget &ST = static_cast<const X86Subtarget &>(TSI);
186 // Check if this processor supports any kind of fusion.
187 if (!(ST.hasBranchFusion() || ST.hasMacroFusion()))
188 return false;
190 const JumpKind BranchKind = classifySecond(SecondMI);
192 if (BranchKind == JumpKind::Invalid)
193 return false; // Second cannot be fused with anything.
195 if (FirstMI == nullptr)
196 return true; // We're only checking whether Second can be fused at all.
198 const FirstInstrKind TestKind = classifyFirst(*FirstMI);
200 if (ST.hasBranchFusion()) {
201 // Branch fusion can merge CMP and TEST with all conditional jumps.
202 return (TestKind == FirstInstrKind::Cmp ||
203 TestKind == FirstInstrKind::Test);
206 if (ST.hasMacroFusion()) {
207 // Macro Fusion rules are a bit more complex. See Agner Fog's
208 // Microarchitecture table 9.2 "Instruction Fusion".
209 switch (TestKind) {
210 case FirstInstrKind::Test:
211 case FirstInstrKind::And:
212 return true;
213 case FirstInstrKind::Cmp:
214 case FirstInstrKind::ALU:
215 return BranchKind == JumpKind::ELG || BranchKind == JumpKind::AB;
216 case FirstInstrKind::IncDec:
217 return BranchKind == JumpKind::ELG;
218 case FirstInstrKind::Invalid:
219 return false;
223 llvm_unreachable("unknown branch fusion type");
226 namespace llvm {
228 std::unique_ptr<ScheduleDAGMutation>
229 createX86MacroFusionDAGMutation () {
230 return createBranchMacroFusionDAGMutation(shouldScheduleAdjacent);
233 } // end namespace llvm