1 //===- AggressiveInstCombine.cpp ------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the aggressive expression pattern combiner classes.
10 // Currently, it handles expression patterns for:
11 // * Truncate instruction
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h"
16 #include "AggressiveInstCombineInternal.h"
17 #include "llvm-c/Initialization.h"
18 #include "llvm-c/Transforms/AggressiveInstCombine.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/BasicAliasAnalysis.h"
21 #include "llvm/Analysis/GlobalsModRef.h"
22 #include "llvm/Analysis/TargetLibraryInfo.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/Dominators.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/LegacyPassManager.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/Pass.h"
29 #include "llvm/Transforms/Utils/Local.h"
31 using namespace PatternMatch
;
33 #define DEBUG_TYPE "aggressive-instcombine"
36 /// Contains expression pattern combiner logic.
37 /// This class provides both the logic to combine expression patterns and
38 /// combine them. It differs from InstCombiner class in that each pattern
39 /// combiner runs only once as opposed to InstCombine's multi-iteration,
40 /// which allows pattern combiner to have higher complexity than the O(1)
41 /// required by the instruction combiner.
42 class AggressiveInstCombinerLegacyPass
: public FunctionPass
{
44 static char ID
; // Pass identification, replacement for typeid
46 AggressiveInstCombinerLegacyPass() : FunctionPass(ID
) {
47 initializeAggressiveInstCombinerLegacyPassPass(
48 *PassRegistry::getPassRegistry());
51 void getAnalysisUsage(AnalysisUsage
&AU
) const override
;
53 /// Run all expression pattern optimizations on the given /p F function.
55 /// \param F function to optimize.
56 /// \returns true if the IR is changed.
57 bool runOnFunction(Function
&F
) override
;
61 /// Match a pattern for a bitwise rotate operation that partially guards
62 /// against undefined behavior by branching around the rotation when the shift
64 static bool foldGuardedRotateToFunnelShift(Instruction
&I
) {
65 if (I
.getOpcode() != Instruction::PHI
|| I
.getNumOperands() != 2)
68 // As with the one-use checks below, this is not strictly necessary, but we
69 // are being cautious to avoid potential perf regressions on targets that
70 // do not actually have a rotate instruction (where the funnel shift would be
71 // expanded back into math/shift/logic ops).
72 if (!isPowerOf2_32(I
.getType()->getScalarSizeInBits()))
75 // Match V to funnel shift left/right and capture the source operand and
76 // shift amount in X and Y.
77 auto matchRotate
= [](Value
*V
, Value
*&X
, Value
*&Y
) {
78 Value
*L0
, *L1
, *R0
, *R1
;
79 unsigned Width
= V
->getType()->getScalarSizeInBits();
80 auto Sub
= m_Sub(m_SpecificInt(Width
), m_Value(R1
));
82 // rotate_left(X, Y) == (X << Y) | (X >> (Width - Y))
84 m_c_Or(m_Shl(m_Value(L0
), m_Value(L1
)), m_LShr(m_Value(R0
), Sub
)));
85 if (RotL
.match(V
) && L0
== R0
&& L1
== R1
) {
88 return Intrinsic::fshl
;
91 // rotate_right(X, Y) == (X >> Y) | (X << (Width - Y))
93 m_c_Or(m_LShr(m_Value(L0
), m_Value(L1
)), m_Shl(m_Value(R0
), Sub
)));
94 if (RotR
.match(V
) && L0
== R0
&& L1
== R1
) {
97 return Intrinsic::fshr
;
100 return Intrinsic::not_intrinsic
;
103 // One phi operand must be a rotate operation, and the other phi operand must
104 // be the source value of that rotate operation:
105 // phi [ rotate(RotSrc, RotAmt), RotBB ], [ RotSrc, GuardBB ]
106 PHINode
&Phi
= cast
<PHINode
>(I
);
107 Value
*P0
= Phi
.getOperand(0), *P1
= Phi
.getOperand(1);
108 Value
*RotSrc
, *RotAmt
;
109 Intrinsic::ID IID
= matchRotate(P0
, RotSrc
, RotAmt
);
110 if (IID
== Intrinsic::not_intrinsic
|| RotSrc
!= P1
) {
111 IID
= matchRotate(P1
, RotSrc
, RotAmt
);
112 if (IID
== Intrinsic::not_intrinsic
|| RotSrc
!= P0
)
114 assert((IID
== Intrinsic::fshl
|| IID
== Intrinsic::fshr
) &&
115 "Pattern must match funnel shift left or right");
118 // The incoming block with our source operand must be the "guard" block.
119 // That must contain a cmp+branch to avoid the rotate when the shift amount
120 // is equal to 0. The other incoming block is the block with the rotate.
121 BasicBlock
*GuardBB
= Phi
.getIncomingBlock(RotSrc
== P1
);
122 BasicBlock
*RotBB
= Phi
.getIncomingBlock(RotSrc
!= P1
);
123 Instruction
*TermI
= GuardBB
->getTerminator();
124 ICmpInst::Predicate Pred
;
125 BasicBlock
*PhiBB
= Phi
.getParent();
126 if (!match(TermI
, m_Br(m_ICmp(Pred
, m_Specific(RotAmt
), m_ZeroInt()),
127 m_SpecificBB(PhiBB
), m_SpecificBB(RotBB
))))
130 if (Pred
!= CmpInst::ICMP_EQ
)
133 // We matched a variation of this IR pattern:
135 // %cmp = icmp eq i32 %RotAmt, 0
136 // br i1 %cmp, label %PhiBB, label %RotBB
138 // %sub = sub i32 32, %RotAmt
139 // %shr = lshr i32 %X, %sub
140 // %shl = shl i32 %X, %RotAmt
141 // %rot = or i32 %shr, %shl
144 // %cond = phi i32 [ %rot, %RotBB ], [ %X, %GuardBB ]
146 // llvm.fshl.i32(i32 %X, i32 %RotAmt)
147 IRBuilder
<> Builder(PhiBB
, PhiBB
->getFirstInsertionPt());
148 Function
*F
= Intrinsic::getDeclaration(Phi
.getModule(), IID
, Phi
.getType());
149 Phi
.replaceAllUsesWith(Builder
.CreateCall(F
, {RotSrc
, RotSrc
, RotAmt
}));
153 /// This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and
154 /// the bit indexes (Mask) needed by a masked compare. If we're matching a chain
155 /// of 'and' ops, then we also need to capture the fact that we saw an
156 /// "and X, 1", so that's an extra return value for that case.
163 MaskOps(unsigned BitWidth
, bool MatchAnds
)
164 : Root(nullptr), Mask(APInt::getNullValue(BitWidth
)),
165 MatchAndChain(MatchAnds
), FoundAnd1(false) {}
168 /// This is a recursive helper for foldAnyOrAllBitsSet() that walks through a
169 /// chain of 'and' or 'or' instructions looking for shift ops of a common source
171 /// or (or (or X, (X >> 3)), (X >> 5)), (X >> 8)
172 /// returns { X, 0x129 }
173 /// and (and (X >> 1), 1), (X >> 4)
174 /// returns { X, 0x12 }
175 static bool matchAndOrChain(Value
*V
, MaskOps
&MOps
) {
177 if (MOps
.MatchAndChain
) {
178 // Recurse through a chain of 'and' operands. This requires an extra check
179 // vs. the 'or' matcher: we must find an "and X, 1" instruction somewhere
180 // in the chain to know that all of the high bits are cleared.
181 if (match(V
, m_And(m_Value(Op0
), m_One()))) {
182 MOps
.FoundAnd1
= true;
183 return matchAndOrChain(Op0
, MOps
);
185 if (match(V
, m_And(m_Value(Op0
), m_Value(Op1
))))
186 return matchAndOrChain(Op0
, MOps
) && matchAndOrChain(Op1
, MOps
);
188 // Recurse through a chain of 'or' operands.
189 if (match(V
, m_Or(m_Value(Op0
), m_Value(Op1
))))
190 return matchAndOrChain(Op0
, MOps
) && matchAndOrChain(Op1
, MOps
);
193 // We need a shift-right or a bare value representing a compare of bit 0 of
194 // the original source operand.
196 uint64_t BitIndex
= 0;
197 if (!match(V
, m_LShr(m_Value(Candidate
), m_ConstantInt(BitIndex
))))
200 // Initialize result source operand.
202 MOps
.Root
= Candidate
;
204 // The shift constant is out-of-range? This code hasn't been simplified.
205 if (BitIndex
>= MOps
.Mask
.getBitWidth())
208 // Fill in the mask bit derived from the shift constant.
209 MOps
.Mask
.setBit(BitIndex
);
210 return MOps
.Root
== Candidate
;
213 /// Match patterns that correspond to "any-bits-set" and "all-bits-set".
214 /// These will include a chain of 'or' or 'and'-shifted bits from a
215 /// common source value:
216 /// and (or (lshr X, C), ...), 1 --> (X & CMask) != 0
217 /// and (and (lshr X, C), ...), 1 --> (X & CMask) == CMask
218 /// Note: "any-bits-clear" and "all-bits-clear" are variations of these patterns
219 /// that differ only with a final 'not' of the result. We expect that final
220 /// 'not' to be folded with the compare that we create here (invert predicate).
221 static bool foldAnyOrAllBitsSet(Instruction
&I
) {
222 // The 'any-bits-set' ('or' chain) pattern is simpler to match because the
223 // final "and X, 1" instruction must be the final op in the sequence.
224 bool MatchAllBitsSet
;
225 if (match(&I
, m_c_And(m_OneUse(m_And(m_Value(), m_Value())), m_Value())))
226 MatchAllBitsSet
= true;
227 else if (match(&I
, m_And(m_OneUse(m_Or(m_Value(), m_Value())), m_One())))
228 MatchAllBitsSet
= false;
232 MaskOps
MOps(I
.getType()->getScalarSizeInBits(), MatchAllBitsSet
);
233 if (MatchAllBitsSet
) {
234 if (!matchAndOrChain(cast
<BinaryOperator
>(&I
), MOps
) || !MOps
.FoundAnd1
)
237 if (!matchAndOrChain(cast
<BinaryOperator
>(&I
)->getOperand(0), MOps
))
241 // The pattern was found. Create a masked compare that replaces all of the
242 // shift and logic ops.
243 IRBuilder
<> Builder(&I
);
244 Constant
*Mask
= ConstantInt::get(I
.getType(), MOps
.Mask
);
245 Value
*And
= Builder
.CreateAnd(MOps
.Root
, Mask
);
246 Value
*Cmp
= MatchAllBitsSet
? Builder
.CreateICmpEQ(And
, Mask
)
247 : Builder
.CreateIsNotNull(And
);
248 Value
*Zext
= Builder
.CreateZExt(Cmp
, I
.getType());
249 I
.replaceAllUsesWith(Zext
);
253 // Try to recognize below function as popcount intrinsic.
254 // This is the "best" algorithm from
255 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
256 // Also used in TargetLowering::expandCTPOP().
258 // int popcount(unsigned int i) {
259 // i = i - ((i >> 1) & 0x55555555);
260 // i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
261 // i = ((i + (i >> 4)) & 0x0F0F0F0F);
262 // return (i * 0x01010101) >> 24;
264 static bool tryToRecognizePopCount(Instruction
&I
) {
265 if (I
.getOpcode() != Instruction::LShr
)
268 Type
*Ty
= I
.getType();
269 if (!Ty
->isIntOrIntVectorTy())
272 unsigned Len
= Ty
->getScalarSizeInBits();
273 // FIXME: fix Len == 8 and other irregular type lengths.
274 if (!(Len
<= 128 && Len
> 8 && Len
% 8 == 0))
277 APInt Mask55
= APInt::getSplat(Len
, APInt(8, 0x55));
278 APInt Mask33
= APInt::getSplat(Len
, APInt(8, 0x33));
279 APInt Mask0F
= APInt::getSplat(Len
, APInt(8, 0x0F));
280 APInt Mask01
= APInt::getSplat(Len
, APInt(8, 0x01));
281 APInt MaskShift
= APInt(Len
, Len
- 8);
283 Value
*Op0
= I
.getOperand(0);
284 Value
*Op1
= I
.getOperand(1);
286 // Matching "(i * 0x01010101...) >> 24".
287 if ((match(Op0
, m_Mul(m_Value(MulOp0
), m_SpecificInt(Mask01
)))) &&
288 match(Op1
, m_SpecificInt(MaskShift
))) {
290 // Matching "((i + (i >> 4)) & 0x0F0F0F0F...)".
291 if (match(MulOp0
, m_And(m_c_Add(m_LShr(m_Value(ShiftOp0
), m_SpecificInt(4)),
292 m_Deferred(ShiftOp0
)),
293 m_SpecificInt(Mask0F
)))) {
295 // Matching "(i & 0x33333333...) + ((i >> 2) & 0x33333333...)".
297 m_c_Add(m_And(m_Value(AndOp0
), m_SpecificInt(Mask33
)),
298 m_And(m_LShr(m_Deferred(AndOp0
), m_SpecificInt(2)),
299 m_SpecificInt(Mask33
))))) {
300 Value
*Root
, *SubOp1
;
301 // Matching "i - ((i >> 1) & 0x55555555...)".
302 if (match(AndOp0
, m_Sub(m_Value(Root
), m_Value(SubOp1
))) &&
303 match(SubOp1
, m_And(m_LShr(m_Specific(Root
), m_SpecificInt(1)),
304 m_SpecificInt(Mask55
)))) {
305 LLVM_DEBUG(dbgs() << "Recognized popcount intrinsic\n");
306 IRBuilder
<> Builder(&I
);
307 Function
*Func
= Intrinsic::getDeclaration(
308 I
.getModule(), Intrinsic::ctpop
, I
.getType());
309 I
.replaceAllUsesWith(Builder
.CreateCall(Func
, {Root
}));
319 /// This is the entry point for folds that could be implemented in regular
320 /// InstCombine, but they are separated because they are not expected to
321 /// occur frequently and/or have more than a constant-length pattern match.
322 static bool foldUnusualPatterns(Function
&F
, DominatorTree
&DT
) {
323 bool MadeChange
= false;
324 for (BasicBlock
&BB
: F
) {
325 // Ignore unreachable basic blocks.
326 if (!DT
.isReachableFromEntry(&BB
))
328 // Do not delete instructions under here and invalidate the iterator.
329 // Walk the block backwards for efficiency. We're matching a chain of
330 // use->defs, so we're more likely to succeed by starting from the bottom.
331 // Also, we want to avoid matching partial patterns.
332 // TODO: It would be more efficient if we removed dead instructions
333 // iteratively in this loop rather than waiting until the end.
334 for (Instruction
&I
: make_range(BB
.rbegin(), BB
.rend())) {
335 MadeChange
|= foldAnyOrAllBitsSet(I
);
336 MadeChange
|= foldGuardedRotateToFunnelShift(I
);
337 MadeChange
|= tryToRecognizePopCount(I
);
341 // We're done with transforms, so remove dead instructions.
343 for (BasicBlock
&BB
: F
)
344 SimplifyInstructionsInBlock(&BB
);
349 /// This is the entry point for all transforms. Pass manager differences are
350 /// handled in the callers of this function.
351 static bool runImpl(Function
&F
, TargetLibraryInfo
&TLI
, DominatorTree
&DT
) {
352 bool MadeChange
= false;
353 const DataLayout
&DL
= F
.getParent()->getDataLayout();
354 TruncInstCombine
TIC(TLI
, DL
, DT
);
355 MadeChange
|= TIC
.run(F
);
356 MadeChange
|= foldUnusualPatterns(F
, DT
);
360 void AggressiveInstCombinerLegacyPass::getAnalysisUsage(
361 AnalysisUsage
&AU
) const {
362 AU
.setPreservesCFG();
363 AU
.addRequired
<DominatorTreeWrapperPass
>();
364 AU
.addRequired
<TargetLibraryInfoWrapperPass
>();
365 AU
.addPreserved
<AAResultsWrapperPass
>();
366 AU
.addPreserved
<BasicAAWrapperPass
>();
367 AU
.addPreserved
<DominatorTreeWrapperPass
>();
368 AU
.addPreserved
<GlobalsAAWrapperPass
>();
371 bool AggressiveInstCombinerLegacyPass::runOnFunction(Function
&F
) {
372 auto &TLI
= getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(F
);
373 auto &DT
= getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
374 return runImpl(F
, TLI
, DT
);
377 PreservedAnalyses
AggressiveInstCombinePass::run(Function
&F
,
378 FunctionAnalysisManager
&AM
) {
379 auto &TLI
= AM
.getResult
<TargetLibraryAnalysis
>(F
);
380 auto &DT
= AM
.getResult
<DominatorTreeAnalysis
>(F
);
381 if (!runImpl(F
, TLI
, DT
)) {
382 // No changes, all analyses are preserved.
383 return PreservedAnalyses::all();
385 // Mark all the analyses that instcombine updates as preserved.
386 PreservedAnalyses PA
;
387 PA
.preserveSet
<CFGAnalyses
>();
388 PA
.preserve
<AAManager
>();
389 PA
.preserve
<GlobalsAA
>();
393 char AggressiveInstCombinerLegacyPass::ID
= 0;
394 INITIALIZE_PASS_BEGIN(AggressiveInstCombinerLegacyPass
,
395 "aggressive-instcombine",
396 "Combine pattern based expressions", false, false)
397 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
398 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
399 INITIALIZE_PASS_END(AggressiveInstCombinerLegacyPass
, "aggressive-instcombine",
400 "Combine pattern based expressions", false, false)
402 // Initialization Routines
403 void llvm::initializeAggressiveInstCombine(PassRegistry
&Registry
) {
404 initializeAggressiveInstCombinerLegacyPassPass(Registry
);
407 void LLVMInitializeAggressiveInstCombiner(LLVMPassRegistryRef R
) {
408 initializeAggressiveInstCombinerLegacyPassPass(*unwrap(R
));
411 FunctionPass
*llvm::createAggressiveInstCombinerPass() {
412 return new AggressiveInstCombinerLegacyPass();
415 void LLVMAddAggressiveInstCombinerPass(LLVMPassManagerRef PM
) {
416 unwrap(PM
)->add(createAggressiveInstCombinerPass());