1 //===- AggressiveInstCombine.cpp ------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the aggressive expression pattern combiner classes.
10 // Currently, it handles expression patterns for:
11 // * Truncate instruction
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h"
16 #include "AggressiveInstCombineInternal.h"
17 #include "llvm-c/Initialization.h"
18 #include "llvm-c/Transforms/AggressiveInstCombine.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/BasicAliasAnalysis.h"
21 #include "llvm/Analysis/GlobalsModRef.h"
22 #include "llvm/Analysis/TargetLibraryInfo.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/Dominators.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/LegacyPassManager.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/Pass.h"
29 #include "llvm/Transforms/Utils/Local.h"
31 using namespace PatternMatch
;
33 #define DEBUG_TYPE "aggressive-instcombine"
36 /// Contains expression pattern combiner logic.
37 /// This class provides both the logic to combine expression patterns and
38 /// combine them. It differs from InstCombiner class in that each pattern
39 /// combiner runs only once as opposed to InstCombine's multi-iteration,
40 /// which allows pattern combiner to have higher complexity than the O(1)
41 /// required by the instruction combiner.
42 class AggressiveInstCombinerLegacyPass
: public FunctionPass
{
44 static char ID
; // Pass identification, replacement for typeid
46 AggressiveInstCombinerLegacyPass() : FunctionPass(ID
) {
47 initializeAggressiveInstCombinerLegacyPassPass(
48 *PassRegistry::getPassRegistry());
51 void getAnalysisUsage(AnalysisUsage
&AU
) const override
;
53 /// Run all expression pattern optimizations on the given /p F function.
55 /// \param F function to optimize.
56 /// \returns true if the IR is changed.
57 bool runOnFunction(Function
&F
) override
;
61 /// Match a pattern for a bitwise rotate operation that partially guards
62 /// against undefined behavior by branching around the rotation when the shift
64 static bool foldGuardedRotateToFunnelShift(Instruction
&I
) {
65 if (I
.getOpcode() != Instruction::PHI
|| I
.getNumOperands() != 2)
68 // As with the one-use checks below, this is not strictly necessary, but we
69 // are being cautious to avoid potential perf regressions on targets that
70 // do not actually have a rotate instruction (where the funnel shift would be
71 // expanded back into math/shift/logic ops).
72 if (!isPowerOf2_32(I
.getType()->getScalarSizeInBits()))
75 // Match V to funnel shift left/right and capture the source operand and
76 // shift amount in X and Y.
77 auto matchRotate
= [](Value
*V
, Value
*&X
, Value
*&Y
) {
78 Value
*L0
, *L1
, *R0
, *R1
;
79 unsigned Width
= V
->getType()->getScalarSizeInBits();
80 auto Sub
= m_Sub(m_SpecificInt(Width
), m_Value(R1
));
82 // rotate_left(X, Y) == (X << Y) | (X >> (Width - Y))
84 m_c_Or(m_Shl(m_Value(L0
), m_Value(L1
)), m_LShr(m_Value(R0
), Sub
)));
85 if (RotL
.match(V
) && L0
== R0
&& L1
== R1
) {
88 return Intrinsic::fshl
;
91 // rotate_right(X, Y) == (X >> Y) | (X << (Width - Y))
93 m_c_Or(m_LShr(m_Value(L0
), m_Value(L1
)), m_Shl(m_Value(R0
), Sub
)));
94 if (RotR
.match(V
) && L0
== R0
&& L1
== R1
) {
97 return Intrinsic::fshr
;
100 return Intrinsic::not_intrinsic
;
103 // One phi operand must be a rotate operation, and the other phi operand must
104 // be the source value of that rotate operation:
105 // phi [ rotate(RotSrc, RotAmt), RotBB ], [ RotSrc, GuardBB ]
106 PHINode
&Phi
= cast
<PHINode
>(I
);
107 Value
*P0
= Phi
.getOperand(0), *P1
= Phi
.getOperand(1);
108 Value
*RotSrc
, *RotAmt
;
109 Intrinsic::ID IID
= matchRotate(P0
, RotSrc
, RotAmt
);
110 if (IID
== Intrinsic::not_intrinsic
|| RotSrc
!= P1
) {
111 IID
= matchRotate(P1
, RotSrc
, RotAmt
);
112 if (IID
== Intrinsic::not_intrinsic
|| RotSrc
!= P0
)
114 assert((IID
== Intrinsic::fshl
|| IID
== Intrinsic::fshr
) &&
115 "Pattern must match funnel shift left or right");
118 // The incoming block with our source operand must be the "guard" block.
119 // That must contain a cmp+branch to avoid the rotate when the shift amount
120 // is equal to 0. The other incoming block is the block with the rotate.
121 BasicBlock
*GuardBB
= Phi
.getIncomingBlock(RotSrc
== P1
);
122 BasicBlock
*RotBB
= Phi
.getIncomingBlock(RotSrc
!= P1
);
123 Instruction
*TermI
= GuardBB
->getTerminator();
124 BasicBlock
*TrueBB
, *FalseBB
;
125 ICmpInst::Predicate Pred
;
126 if (!match(TermI
, m_Br(m_ICmp(Pred
, m_Specific(RotAmt
), m_ZeroInt()), TrueBB
,
130 BasicBlock
*PhiBB
= Phi
.getParent();
131 if (Pred
!= CmpInst::ICMP_EQ
|| TrueBB
!= PhiBB
|| FalseBB
!= RotBB
)
134 // We matched a variation of this IR pattern:
136 // %cmp = icmp eq i32 %RotAmt, 0
137 // br i1 %cmp, label %PhiBB, label %RotBB
139 // %sub = sub i32 32, %RotAmt
140 // %shr = lshr i32 %X, %sub
141 // %shl = shl i32 %X, %RotAmt
142 // %rot = or i32 %shr, %shl
145 // %cond = phi i32 [ %rot, %RotBB ], [ %X, %GuardBB ]
147 // llvm.fshl.i32(i32 %X, i32 %RotAmt)
148 IRBuilder
<> Builder(PhiBB
, PhiBB
->getFirstInsertionPt());
149 Function
*F
= Intrinsic::getDeclaration(Phi
.getModule(), IID
, Phi
.getType());
150 Phi
.replaceAllUsesWith(Builder
.CreateCall(F
, {RotSrc
, RotSrc
, RotAmt
}));
154 /// This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and
155 /// the bit indexes (Mask) needed by a masked compare. If we're matching a chain
156 /// of 'and' ops, then we also need to capture the fact that we saw an
157 /// "and X, 1", so that's an extra return value for that case.
164 MaskOps(unsigned BitWidth
, bool MatchAnds
)
165 : Root(nullptr), Mask(APInt::getNullValue(BitWidth
)),
166 MatchAndChain(MatchAnds
), FoundAnd1(false) {}
169 /// This is a recursive helper for foldAnyOrAllBitsSet() that walks through a
170 /// chain of 'and' or 'or' instructions looking for shift ops of a common source
172 /// or (or (or X, (X >> 3)), (X >> 5)), (X >> 8)
173 /// returns { X, 0x129 }
174 /// and (and (X >> 1), 1), (X >> 4)
175 /// returns { X, 0x12 }
176 static bool matchAndOrChain(Value
*V
, MaskOps
&MOps
) {
178 if (MOps
.MatchAndChain
) {
179 // Recurse through a chain of 'and' operands. This requires an extra check
180 // vs. the 'or' matcher: we must find an "and X, 1" instruction somewhere
181 // in the chain to know that all of the high bits are cleared.
182 if (match(V
, m_And(m_Value(Op0
), m_One()))) {
183 MOps
.FoundAnd1
= true;
184 return matchAndOrChain(Op0
, MOps
);
186 if (match(V
, m_And(m_Value(Op0
), m_Value(Op1
))))
187 return matchAndOrChain(Op0
, MOps
) && matchAndOrChain(Op1
, MOps
);
189 // Recurse through a chain of 'or' operands.
190 if (match(V
, m_Or(m_Value(Op0
), m_Value(Op1
))))
191 return matchAndOrChain(Op0
, MOps
) && matchAndOrChain(Op1
, MOps
);
194 // We need a shift-right or a bare value representing a compare of bit 0 of
195 // the original source operand.
197 uint64_t BitIndex
= 0;
198 if (!match(V
, m_LShr(m_Value(Candidate
), m_ConstantInt(BitIndex
))))
201 // Initialize result source operand.
203 MOps
.Root
= Candidate
;
205 // The shift constant is out-of-range? This code hasn't been simplified.
206 if (BitIndex
>= MOps
.Mask
.getBitWidth())
209 // Fill in the mask bit derived from the shift constant.
210 MOps
.Mask
.setBit(BitIndex
);
211 return MOps
.Root
== Candidate
;
214 /// Match patterns that correspond to "any-bits-set" and "all-bits-set".
215 /// These will include a chain of 'or' or 'and'-shifted bits from a
216 /// common source value:
217 /// and (or (lshr X, C), ...), 1 --> (X & CMask) != 0
218 /// and (and (lshr X, C), ...), 1 --> (X & CMask) == CMask
219 /// Note: "any-bits-clear" and "all-bits-clear" are variations of these patterns
220 /// that differ only with a final 'not' of the result. We expect that final
221 /// 'not' to be folded with the compare that we create here (invert predicate).
222 static bool foldAnyOrAllBitsSet(Instruction
&I
) {
223 // The 'any-bits-set' ('or' chain) pattern is simpler to match because the
224 // final "and X, 1" instruction must be the final op in the sequence.
225 bool MatchAllBitsSet
;
226 if (match(&I
, m_c_And(m_OneUse(m_And(m_Value(), m_Value())), m_Value())))
227 MatchAllBitsSet
= true;
228 else if (match(&I
, m_And(m_OneUse(m_Or(m_Value(), m_Value())), m_One())))
229 MatchAllBitsSet
= false;
233 MaskOps
MOps(I
.getType()->getScalarSizeInBits(), MatchAllBitsSet
);
234 if (MatchAllBitsSet
) {
235 if (!matchAndOrChain(cast
<BinaryOperator
>(&I
), MOps
) || !MOps
.FoundAnd1
)
238 if (!matchAndOrChain(cast
<BinaryOperator
>(&I
)->getOperand(0), MOps
))
242 // The pattern was found. Create a masked compare that replaces all of the
243 // shift and logic ops.
244 IRBuilder
<> Builder(&I
);
245 Constant
*Mask
= ConstantInt::get(I
.getType(), MOps
.Mask
);
246 Value
*And
= Builder
.CreateAnd(MOps
.Root
, Mask
);
247 Value
*Cmp
= MatchAllBitsSet
? Builder
.CreateICmpEQ(And
, Mask
)
248 : Builder
.CreateIsNotNull(And
);
249 Value
*Zext
= Builder
.CreateZExt(Cmp
, I
.getType());
250 I
.replaceAllUsesWith(Zext
);
254 /// This is the entry point for folds that could be implemented in regular
255 /// InstCombine, but they are separated because they are not expected to
256 /// occur frequently and/or have more than a constant-length pattern match.
257 static bool foldUnusualPatterns(Function
&F
, DominatorTree
&DT
) {
258 bool MadeChange
= false;
259 for (BasicBlock
&BB
: F
) {
260 // Ignore unreachable basic blocks.
261 if (!DT
.isReachableFromEntry(&BB
))
263 // Do not delete instructions under here and invalidate the iterator.
264 // Walk the block backwards for efficiency. We're matching a chain of
265 // use->defs, so we're more likely to succeed by starting from the bottom.
266 // Also, we want to avoid matching partial patterns.
267 // TODO: It would be more efficient if we removed dead instructions
268 // iteratively in this loop rather than waiting until the end.
269 for (Instruction
&I
: make_range(BB
.rbegin(), BB
.rend())) {
270 MadeChange
|= foldAnyOrAllBitsSet(I
);
271 MadeChange
|= foldGuardedRotateToFunnelShift(I
);
275 // We're done with transforms, so remove dead instructions.
277 for (BasicBlock
&BB
: F
)
278 SimplifyInstructionsInBlock(&BB
);
283 /// This is the entry point for all transforms. Pass manager differences are
284 /// handled in the callers of this function.
285 static bool runImpl(Function
&F
, TargetLibraryInfo
&TLI
, DominatorTree
&DT
) {
286 bool MadeChange
= false;
287 const DataLayout
&DL
= F
.getParent()->getDataLayout();
288 TruncInstCombine
TIC(TLI
, DL
, DT
);
289 MadeChange
|= TIC
.run(F
);
290 MadeChange
|= foldUnusualPatterns(F
, DT
);
294 void AggressiveInstCombinerLegacyPass::getAnalysisUsage(
295 AnalysisUsage
&AU
) const {
296 AU
.setPreservesCFG();
297 AU
.addRequired
<DominatorTreeWrapperPass
>();
298 AU
.addRequired
<TargetLibraryInfoWrapperPass
>();
299 AU
.addPreserved
<AAResultsWrapperPass
>();
300 AU
.addPreserved
<BasicAAWrapperPass
>();
301 AU
.addPreserved
<DominatorTreeWrapperPass
>();
302 AU
.addPreserved
<GlobalsAAWrapperPass
>();
305 bool AggressiveInstCombinerLegacyPass::runOnFunction(Function
&F
) {
306 auto &TLI
= getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI();
307 auto &DT
= getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
308 return runImpl(F
, TLI
, DT
);
311 PreservedAnalyses
AggressiveInstCombinePass::run(Function
&F
,
312 FunctionAnalysisManager
&AM
) {
313 auto &TLI
= AM
.getResult
<TargetLibraryAnalysis
>(F
);
314 auto &DT
= AM
.getResult
<DominatorTreeAnalysis
>(F
);
315 if (!runImpl(F
, TLI
, DT
)) {
316 // No changes, all analyses are preserved.
317 return PreservedAnalyses::all();
319 // Mark all the analyses that instcombine updates as preserved.
320 PreservedAnalyses PA
;
321 PA
.preserveSet
<CFGAnalyses
>();
322 PA
.preserve
<AAManager
>();
323 PA
.preserve
<GlobalsAA
>();
327 char AggressiveInstCombinerLegacyPass::ID
= 0;
328 INITIALIZE_PASS_BEGIN(AggressiveInstCombinerLegacyPass
,
329 "aggressive-instcombine",
330 "Combine pattern based expressions", false, false)
331 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
332 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
333 INITIALIZE_PASS_END(AggressiveInstCombinerLegacyPass
, "aggressive-instcombine",
334 "Combine pattern based expressions", false, false)
336 // Initialization Routines
337 void llvm::initializeAggressiveInstCombine(PassRegistry
&Registry
) {
338 initializeAggressiveInstCombinerLegacyPassPass(Registry
);
341 void LLVMInitializeAggressiveInstCombiner(LLVMPassRegistryRef R
) {
342 initializeAggressiveInstCombinerLegacyPassPass(*unwrap(R
));
345 FunctionPass
*llvm::createAggressiveInstCombinerPass() {
346 return new AggressiveInstCombinerLegacyPass();
349 void LLVMAddAggressiveInstCombinerPass(LLVMPassManagerRef PM
) {
350 unwrap(PM
)->add(createAggressiveInstCombinerPass());