[NFC][analyzer][docs] Crosslink MallocChecker's ownership attributes (#121939)
[llvm-project.git] / llvm / lib / Transforms / InstCombine / InstructionCombining.cpp
blob553435c937a70af48288d18cf38ef837548ffd01
1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // InstructionCombining - Combine instructions to form fewer, simple
10 // instructions. This pass does not modify the CFG. This pass is where
11 // algebraic simplification happens.
13 // This pass combines things like:
14 // %Y = add i32 %X, 1
15 // %Z = add i32 %Y, 1
16 // into:
17 // %Z = add i32 %X, 2
19 // This is a simple worklist driven algorithm.
21 // This pass guarantees that the following canonicalizations are performed on
22 // the program:
23 // 1. If a binary operator has a constant operand, it is moved to the RHS
24 // 2. Bitwise operators with constant operands are always grouped so that
25 // shifts are performed first, then or's, then and's, then xor's.
26 // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27 // 4. All cmp instructions on boolean values are replaced with logical ops
28 // 5. add X, X is represented as (X*2) => (X << 1)
29 // 6. Multiplies with a power-of-two constant argument are transformed into
30 // shifts.
31 // ... etc.
33 //===----------------------------------------------------------------------===//
35 #include "InstCombineInternal.h"
36 #include "llvm/ADT/APInt.h"
37 #include "llvm/ADT/ArrayRef.h"
38 #include "llvm/ADT/DenseMap.h"
39 #include "llvm/ADT/SmallPtrSet.h"
40 #include "llvm/ADT/SmallVector.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/Analysis/AliasAnalysis.h"
43 #include "llvm/Analysis/AssumptionCache.h"
44 #include "llvm/Analysis/BasicAliasAnalysis.h"
45 #include "llvm/Analysis/BlockFrequencyInfo.h"
46 #include "llvm/Analysis/CFG.h"
47 #include "llvm/Analysis/ConstantFolding.h"
48 #include "llvm/Analysis/GlobalsModRef.h"
49 #include "llvm/Analysis/InstructionSimplify.h"
50 #include "llvm/Analysis/LastRunTrackingAnalysis.h"
51 #include "llvm/Analysis/LazyBlockFrequencyInfo.h"
52 #include "llvm/Analysis/MemoryBuiltins.h"
53 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
54 #include "llvm/Analysis/ProfileSummaryInfo.h"
55 #include "llvm/Analysis/TargetFolder.h"
56 #include "llvm/Analysis/TargetLibraryInfo.h"
57 #include "llvm/Analysis/TargetTransformInfo.h"
58 #include "llvm/Analysis/Utils/Local.h"
59 #include "llvm/Analysis/ValueTracking.h"
60 #include "llvm/Analysis/VectorUtils.h"
61 #include "llvm/IR/BasicBlock.h"
62 #include "llvm/IR/CFG.h"
63 #include "llvm/IR/Constant.h"
64 #include "llvm/IR/Constants.h"
65 #include "llvm/IR/DIBuilder.h"
66 #include "llvm/IR/DataLayout.h"
67 #include "llvm/IR/DebugInfo.h"
68 #include "llvm/IR/DerivedTypes.h"
69 #include "llvm/IR/Dominators.h"
70 #include "llvm/IR/EHPersonalities.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GetElementPtrTypeIterator.h"
73 #include "llvm/IR/IRBuilder.h"
74 #include "llvm/IR/InstrTypes.h"
75 #include "llvm/IR/Instruction.h"
76 #include "llvm/IR/Instructions.h"
77 #include "llvm/IR/IntrinsicInst.h"
78 #include "llvm/IR/Intrinsics.h"
79 #include "llvm/IR/Metadata.h"
80 #include "llvm/IR/Operator.h"
81 #include "llvm/IR/PassManager.h"
82 #include "llvm/IR/PatternMatch.h"
83 #include "llvm/IR/Type.h"
84 #include "llvm/IR/Use.h"
85 #include "llvm/IR/User.h"
86 #include "llvm/IR/Value.h"
87 #include "llvm/IR/ValueHandle.h"
88 #include "llvm/InitializePasses.h"
89 #include "llvm/Support/Casting.h"
90 #include "llvm/Support/CommandLine.h"
91 #include "llvm/Support/Compiler.h"
92 #include "llvm/Support/Debug.h"
93 #include "llvm/Support/DebugCounter.h"
94 #include "llvm/Support/ErrorHandling.h"
95 #include "llvm/Support/KnownBits.h"
96 #include "llvm/Support/raw_ostream.h"
97 #include "llvm/Transforms/InstCombine/InstCombine.h"
98 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
99 #include "llvm/Transforms/Utils/Local.h"
100 #include <algorithm>
101 #include <cassert>
102 #include <cstdint>
103 #include <memory>
104 #include <optional>
105 #include <string>
106 #include <utility>
108 #define DEBUG_TYPE "instcombine"
109 #include "llvm/Transforms/Utils/InstructionWorklist.h"
110 #include <optional>
112 using namespace llvm;
113 using namespace llvm::PatternMatch;
115 STATISTIC(NumWorklistIterations,
116 "Number of instruction combining iterations performed");
117 STATISTIC(NumOneIteration, "Number of functions with one iteration");
118 STATISTIC(NumTwoIterations, "Number of functions with two iterations");
119 STATISTIC(NumThreeIterations, "Number of functions with three iterations");
120 STATISTIC(NumFourOrMoreIterations,
121 "Number of functions with four or more iterations");
123 STATISTIC(NumCombined , "Number of insts combined");
124 STATISTIC(NumConstProp, "Number of constant folds");
125 STATISTIC(NumDeadInst , "Number of dead inst eliminated");
126 STATISTIC(NumSunkInst , "Number of instructions sunk");
127 STATISTIC(NumExpand, "Number of expansions");
128 STATISTIC(NumFactor , "Number of factorizations");
129 STATISTIC(NumReassoc , "Number of reassociations");
130 DEBUG_COUNTER(VisitCounter, "instcombine-visit",
131 "Controls which instructions are visited");
133 static cl::opt<bool>
134 EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"),
135 cl::init(true));
137 static cl::opt<unsigned> MaxSinkNumUsers(
138 "instcombine-max-sink-users", cl::init(32),
139 cl::desc("Maximum number of undroppable users for instruction sinking"));
141 static cl::opt<unsigned>
142 MaxArraySize("instcombine-maxarray-size", cl::init(1024),
143 cl::desc("Maximum array size considered when doing a combine"));
145 // FIXME: Remove this flag when it is no longer necessary to convert
146 // llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
147 // increases variable availability at the cost of accuracy. Variables that
148 // cannot be promoted by mem2reg or SROA will be described as living in memory
149 // for their entire lifetime. However, passes like DSE and instcombine can
150 // delete stores to the alloca, leading to misleading and inaccurate debug
151 // information. This flag can be removed when those passes are fixed.
152 static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
153 cl::Hidden, cl::init(true));
155 std::optional<Instruction *>
156 InstCombiner::targetInstCombineIntrinsic(IntrinsicInst &II) {
157 // Handle target specific intrinsics
158 if (II.getCalledFunction()->isTargetIntrinsic()) {
159 return TTIForTargetIntrinsicsOnly.instCombineIntrinsic(*this, II);
161 return std::nullopt;
164 std::optional<Value *> InstCombiner::targetSimplifyDemandedUseBitsIntrinsic(
165 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
166 bool &KnownBitsComputed) {
167 // Handle target specific intrinsics
168 if (II.getCalledFunction()->isTargetIntrinsic()) {
169 return TTIForTargetIntrinsicsOnly.simplifyDemandedUseBitsIntrinsic(
170 *this, II, DemandedMask, Known, KnownBitsComputed);
172 return std::nullopt;
175 std::optional<Value *> InstCombiner::targetSimplifyDemandedVectorEltsIntrinsic(
176 IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
177 APInt &PoisonElts2, APInt &PoisonElts3,
178 std::function<void(Instruction *, unsigned, APInt, APInt &)>
179 SimplifyAndSetOp) {
180 // Handle target specific intrinsics
181 if (II.getCalledFunction()->isTargetIntrinsic()) {
182 return TTIForTargetIntrinsicsOnly.simplifyDemandedVectorEltsIntrinsic(
183 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
184 SimplifyAndSetOp);
186 return std::nullopt;
189 bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
190 // Approved exception for TTI use: This queries a legality property of the
191 // target, not an profitability heuristic. Ideally this should be part of
192 // DataLayout instead.
193 return TTIForTargetIntrinsicsOnly.isValidAddrSpaceCast(FromAS, ToAS);
196 Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {
197 if (!RewriteGEP)
198 return llvm::emitGEPOffset(&Builder, DL, GEP);
200 IRBuilderBase::InsertPointGuard Guard(Builder);
201 auto *Inst = dyn_cast<Instruction>(GEP);
202 if (Inst)
203 Builder.SetInsertPoint(Inst);
205 Value *Offset = EmitGEPOffset(GEP);
206 // If a non-trivial GEP has other uses, rewrite it to avoid duplicating
207 // the offset arithmetic.
208 if (Inst && !GEP->hasOneUse() && !GEP->hasAllConstantIndices() &&
209 !GEP->getSourceElementType()->isIntegerTy(8)) {
210 replaceInstUsesWith(
211 *Inst, Builder.CreateGEP(Builder.getInt8Ty(), GEP->getPointerOperand(),
212 Offset, "", GEP->getNoWrapFlags()));
213 eraseInstFromFunction(*Inst);
215 return Offset;
218 /// Legal integers and common types are considered desirable. This is used to
219 /// avoid creating instructions with types that may not be supported well by the
220 /// the backend.
221 /// NOTE: This treats i8, i16 and i32 specially because they are common
222 /// types in frontend languages.
223 bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
224 switch (BitWidth) {
225 case 8:
226 case 16:
227 case 32:
228 return true;
229 default:
230 return DL.isLegalInteger(BitWidth);
234 /// Return true if it is desirable to convert an integer computation from a
235 /// given bit width to a new bit width.
236 /// We don't want to convert from a legal or desirable type (like i8) to an
237 /// illegal type or from a smaller to a larger illegal type. A width of '1'
238 /// is always treated as a desirable type because i1 is a fundamental type in
239 /// IR, and there are many specialized optimizations for i1 types.
240 /// Common/desirable widths are equally treated as legal to convert to, in
241 /// order to open up more combining opportunities.
242 bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
243 unsigned ToWidth) const {
244 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
245 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
247 // Convert to desirable widths even if they are not legal types.
248 // Only shrink types, to prevent infinite loops.
249 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
250 return true;
252 // If this is a legal or desiable integer from type, and the result would be
253 // an illegal type, don't do the transformation.
254 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
255 return false;
257 // Otherwise, if both are illegal, do not increase the size of the result. We
258 // do allow things like i160 -> i64, but not i64 -> i160.
259 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
260 return false;
262 return true;
265 /// Return true if it is desirable to convert a computation from 'From' to 'To'.
266 /// We don't want to convert from a legal to an illegal type or from a smaller
267 /// to a larger illegal type. i1 is always treated as a legal type because it is
268 /// a fundamental type in IR, and there are many specialized optimizations for
269 /// i1 types.
270 bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
271 // TODO: This could be extended to allow vectors. Datalayout changes might be
272 // needed to properly support that.
273 if (!From->isIntegerTy() || !To->isIntegerTy())
274 return false;
276 unsigned FromWidth = From->getPrimitiveSizeInBits();
277 unsigned ToWidth = To->getPrimitiveSizeInBits();
278 return shouldChangeType(FromWidth, ToWidth);
281 // Return true, if No Signed Wrap should be maintained for I.
282 // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
283 // where both B and C should be ConstantInts, results in a constant that does
284 // not overflow. This function only handles the Add and Sub opcodes. For
285 // all other opcodes, the function conservatively returns false.
286 static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) {
287 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
288 if (!OBO || !OBO->hasNoSignedWrap())
289 return false;
291 // We reason about Add and Sub Only.
292 Instruction::BinaryOps Opcode = I.getOpcode();
293 if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
294 return false;
296 const APInt *BVal, *CVal;
297 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
298 return false;
300 bool Overflow = false;
301 if (Opcode == Instruction::Add)
302 (void)BVal->sadd_ov(*CVal, Overflow);
303 else
304 (void)BVal->ssub_ov(*CVal, Overflow);
306 return !Overflow;
309 static bool hasNoUnsignedWrap(BinaryOperator &I) {
310 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
311 return OBO && OBO->hasNoUnsignedWrap();
314 static bool hasNoSignedWrap(BinaryOperator &I) {
315 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
316 return OBO && OBO->hasNoSignedWrap();
319 /// Conservatively clears subclassOptionalData after a reassociation or
320 /// commutation. We preserve fast-math flags when applicable as they can be
321 /// preserved.
322 static void ClearSubclassDataAfterReassociation(BinaryOperator &I) {
323 FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I);
324 if (!FPMO) {
325 I.clearSubclassOptionalData();
326 return;
329 FastMathFlags FMF = I.getFastMathFlags();
330 I.clearSubclassOptionalData();
331 I.setFastMathFlags(FMF);
334 /// Combine constant operands of associative operations either before or after a
335 /// cast to eliminate one of the associative operations:
336 /// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
337 /// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
338 static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1,
339 InstCombinerImpl &IC) {
340 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
341 if (!Cast || !Cast->hasOneUse())
342 return false;
344 // TODO: Enhance logic for other casts and remove this check.
345 auto CastOpcode = Cast->getOpcode();
346 if (CastOpcode != Instruction::ZExt)
347 return false;
349 // TODO: Enhance logic for other BinOps and remove this check.
350 if (!BinOp1->isBitwiseLogicOp())
351 return false;
353 auto AssocOpcode = BinOp1->getOpcode();
354 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
355 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
356 return false;
358 Constant *C1, *C2;
359 if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
360 !match(BinOp2->getOperand(1), m_Constant(C2)))
361 return false;
363 // TODO: This assumes a zext cast.
364 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
365 // to the destination type might lose bits.
367 // Fold the constants together in the destination type:
368 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
369 const DataLayout &DL = IC.getDataLayout();
370 Type *DestTy = C1->getType();
371 Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL);
372 if (!CastC2)
373 return false;
374 Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL);
375 if (!FoldedC)
376 return false;
378 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
379 IC.replaceOperand(*BinOp1, 1, FoldedC);
380 BinOp1->dropPoisonGeneratingFlags();
381 Cast->dropPoisonGeneratingFlags();
382 return true;
385 // Simplifies IntToPtr/PtrToInt RoundTrip Cast.
386 // inttoptr ( ptrtoint (x) ) --> x
387 Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
388 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
389 if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
390 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
391 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
392 Type *CastTy = IntToPtr->getDestTy();
393 if (PtrToInt &&
394 CastTy->getPointerAddressSpace() ==
395 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
396 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
397 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
398 return PtrToInt->getOperand(0);
400 return nullptr;
403 /// This performs a few simplifications for operators that are associative or
404 /// commutative:
406 /// Commutative operators:
408 /// 1. Order operands such that they are listed from right (least complex) to
409 /// left (most complex). This puts constants before unary operators before
410 /// binary operators.
412 /// Associative operators:
414 /// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
415 /// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
417 /// Associative and commutative operators:
419 /// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
420 /// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
421 /// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
422 /// if C1 and C2 are constants.
423 bool InstCombinerImpl::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
424 Instruction::BinaryOps Opcode = I.getOpcode();
425 bool Changed = false;
427 do {
428 // Order operands such that they are listed from right (least complex) to
429 // left (most complex). This puts constants before unary operators before
430 // binary operators.
431 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
432 getComplexity(I.getOperand(1)))
433 Changed = !I.swapOperands();
435 if (I.isCommutative()) {
436 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
437 replaceOperand(I, 0, Pair->first);
438 replaceOperand(I, 1, Pair->second);
439 Changed = true;
443 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
444 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
446 if (I.isAssociative()) {
447 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
448 if (Op0 && Op0->getOpcode() == Opcode) {
449 Value *A = Op0->getOperand(0);
450 Value *B = Op0->getOperand(1);
451 Value *C = I.getOperand(1);
453 // Does "B op C" simplify?
454 if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
455 // It simplifies to V. Form "A op V".
456 replaceOperand(I, 0, A);
457 replaceOperand(I, 1, V);
458 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
459 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
461 // Conservatively clear all optional flags since they may not be
462 // preserved by the reassociation. Reset nsw/nuw based on the above
463 // analysis.
464 ClearSubclassDataAfterReassociation(I);
466 // Note: this is only valid because SimplifyBinOp doesn't look at
467 // the operands to Op0.
468 if (IsNUW)
469 I.setHasNoUnsignedWrap(true);
471 if (IsNSW)
472 I.setHasNoSignedWrap(true);
474 Changed = true;
475 ++NumReassoc;
476 continue;
480 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
481 if (Op1 && Op1->getOpcode() == Opcode) {
482 Value *A = I.getOperand(0);
483 Value *B = Op1->getOperand(0);
484 Value *C = Op1->getOperand(1);
486 // Does "A op B" simplify?
487 if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
488 // It simplifies to V. Form "V op C".
489 replaceOperand(I, 0, V);
490 replaceOperand(I, 1, C);
491 // Conservatively clear the optional flags, since they may not be
492 // preserved by the reassociation.
493 ClearSubclassDataAfterReassociation(I);
494 Changed = true;
495 ++NumReassoc;
496 continue;
501 if (I.isAssociative() && I.isCommutative()) {
502 if (simplifyAssocCastAssoc(&I, *this)) {
503 Changed = true;
504 ++NumReassoc;
505 continue;
508 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
509 if (Op0 && Op0->getOpcode() == Opcode) {
510 Value *A = Op0->getOperand(0);
511 Value *B = Op0->getOperand(1);
512 Value *C = I.getOperand(1);
514 // Does "C op A" simplify?
515 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
516 // It simplifies to V. Form "V op B".
517 replaceOperand(I, 0, V);
518 replaceOperand(I, 1, B);
519 // Conservatively clear the optional flags, since they may not be
520 // preserved by the reassociation.
521 ClearSubclassDataAfterReassociation(I);
522 Changed = true;
523 ++NumReassoc;
524 continue;
528 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
529 if (Op1 && Op1->getOpcode() == Opcode) {
530 Value *A = I.getOperand(0);
531 Value *B = Op1->getOperand(0);
532 Value *C = Op1->getOperand(1);
534 // Does "C op A" simplify?
535 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
536 // It simplifies to V. Form "B op V".
537 replaceOperand(I, 0, B);
538 replaceOperand(I, 1, V);
539 // Conservatively clear the optional flags, since they may not be
540 // preserved by the reassociation.
541 ClearSubclassDataAfterReassociation(I);
542 Changed = true;
543 ++NumReassoc;
544 continue;
548 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
549 // if C1 and C2 are constants.
550 Value *A, *B;
551 Constant *C1, *C2, *CRes;
552 if (Op0 && Op1 &&
553 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
554 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
555 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
556 (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
557 bool IsNUW = hasNoUnsignedWrap(I) &&
558 hasNoUnsignedWrap(*Op0) &&
559 hasNoUnsignedWrap(*Op1);
560 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
561 BinaryOperator::CreateNUW(Opcode, A, B) :
562 BinaryOperator::Create(Opcode, A, B);
564 if (isa<FPMathOperator>(NewBO)) {
565 FastMathFlags Flags = I.getFastMathFlags() &
566 Op0->getFastMathFlags() &
567 Op1->getFastMathFlags();
568 NewBO->setFastMathFlags(Flags);
570 InsertNewInstWith(NewBO, I.getIterator());
571 NewBO->takeName(Op1);
572 replaceOperand(I, 0, NewBO);
573 replaceOperand(I, 1, CRes);
574 // Conservatively clear the optional flags, since they may not be
575 // preserved by the reassociation.
576 ClearSubclassDataAfterReassociation(I);
577 if (IsNUW)
578 I.setHasNoUnsignedWrap(true);
580 Changed = true;
581 continue;
585 // No further simplifications.
586 return Changed;
587 } while (true);
590 /// Return whether "X LOp (Y ROp Z)" is always equal to
591 /// "(X LOp Y) ROp (X LOp Z)".
592 static bool leftDistributesOverRight(Instruction::BinaryOps LOp,
593 Instruction::BinaryOps ROp) {
594 // X & (Y | Z) <--> (X & Y) | (X & Z)
595 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
596 if (LOp == Instruction::And)
597 return ROp == Instruction::Or || ROp == Instruction::Xor;
599 // X | (Y & Z) <--> (X | Y) & (X | Z)
600 if (LOp == Instruction::Or)
601 return ROp == Instruction::And;
603 // X * (Y + Z) <--> (X * Y) + (X * Z)
604 // X * (Y - Z) <--> (X * Y) - (X * Z)
605 if (LOp == Instruction::Mul)
606 return ROp == Instruction::Add || ROp == Instruction::Sub;
608 return false;
611 /// Return whether "(X LOp Y) ROp Z" is always equal to
612 /// "(X ROp Z) LOp (Y ROp Z)".
613 static bool rightDistributesOverLeft(Instruction::BinaryOps LOp,
614 Instruction::BinaryOps ROp) {
615 if (Instruction::isCommutative(ROp))
616 return leftDistributesOverRight(ROp, LOp);
618 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
619 return Instruction::isBitwiseLogicOp(LOp) && Instruction::isShift(ROp);
621 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
622 // but this requires knowing that the addition does not overflow and other
623 // such subtleties.
626 /// This function returns identity value for given opcode, which can be used to
627 /// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
628 static Value *getIdentityValue(Instruction::BinaryOps Opcode, Value *V) {
629 if (isa<Constant>(V))
630 return nullptr;
632 return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
635 /// This function predicates factorization using distributive laws. By default,
636 /// it just returns the 'Op' inputs. But for special-cases like
637 /// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
638 /// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
639 /// allow more factorization opportunities.
640 static Instruction::BinaryOps
641 getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op,
642 Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {
643 assert(Op && "Expected a binary operator");
644 LHS = Op->getOperand(0);
645 RHS = Op->getOperand(1);
646 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
647 Constant *C;
648 if (match(Op, m_Shl(m_Value(), m_ImmConstant(C)))) {
649 // X << C --> X * (1 << C)
650 RHS = ConstantFoldBinaryInstruction(
651 Instruction::Shl, ConstantInt::get(Op->getType(), 1), C);
652 assert(RHS && "Constant folding of immediate constants failed");
653 return Instruction::Mul;
655 // TODO: We can add other conversions e.g. shr => div etc.
657 if (Instruction::isBitwiseLogicOp(TopOpcode)) {
658 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&
659 match(Op, m_LShr(m_NonNegative(), m_Value()))) {
660 // lshr nneg C, X --> ashr nneg C, X
661 return Instruction::AShr;
664 return Op->getOpcode();
667 /// This tries to simplify binary operations by factorizing out common terms
668 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
669 static Value *tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ,
670 InstCombiner::BuilderTy &Builder,
671 Instruction::BinaryOps InnerOpcode, Value *A,
672 Value *B, Value *C, Value *D) {
673 assert(A && B && C && D && "All values must be provided");
675 Value *V = nullptr;
676 Value *RetVal = nullptr;
677 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
678 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
680 // Does "X op' Y" always equal "Y op' X"?
681 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
683 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
684 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
685 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
686 // commutative case, "(A op' B) op (C op' A)"?
687 if (A == C || (InnerCommutative && A == D)) {
688 if (A != C)
689 std::swap(C, D);
690 // Consider forming "A op' (B op D)".
691 // If "B op D" simplifies then it can be formed with no cost.
692 V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
694 // If "B op D" doesn't simplify then only go on if one of the existing
695 // operations "A op' B" and "C op' D" will be zapped as no longer used.
696 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
697 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
698 if (V)
699 RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
703 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
704 if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
705 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
706 // commutative case, "(A op' B) op (B op' D)"?
707 if (B == D || (InnerCommutative && B == C)) {
708 if (B != D)
709 std::swap(C, D);
710 // Consider forming "(A op C) op' B".
711 // If "A op C" simplifies then it can be formed with no cost.
712 V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
714 // If "A op C" doesn't simplify then only go on if one of the existing
715 // operations "A op' B" and "C op' D" will be zapped as no longer used.
716 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
717 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
718 if (V)
719 RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
723 if (!RetVal)
724 return nullptr;
726 ++NumFactor;
727 RetVal->takeName(&I);
729 // Try to add no-overflow flags to the final value.
730 if (isa<OverflowingBinaryOperator>(RetVal)) {
731 bool HasNSW = false;
732 bool HasNUW = false;
733 if (isa<OverflowingBinaryOperator>(&I)) {
734 HasNSW = I.hasNoSignedWrap();
735 HasNUW = I.hasNoUnsignedWrap();
737 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
738 HasNSW &= LOBO->hasNoSignedWrap();
739 HasNUW &= LOBO->hasNoUnsignedWrap();
742 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
743 HasNSW &= ROBO->hasNoSignedWrap();
744 HasNUW &= ROBO->hasNoUnsignedWrap();
747 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
748 // We can propagate 'nsw' if we know that
749 // %Y = mul nsw i16 %X, C
750 // %Z = add nsw i16 %Y, %X
751 // =>
752 // %Z = mul nsw i16 %X, C+1
754 // iff C+1 isn't INT_MIN
755 const APInt *CInt;
756 if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
757 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
759 // nuw can be propagated with any constant or nuw value.
760 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
763 return RetVal;
766 // If `I` has one Const operand and the other matches `(ctpop (not x))`,
767 // replace `(ctpop (not x))` with `(sub nuw nsw BitWidth(x), (ctpop x))`.
768 // This is only useful is the new subtract can fold so we only handle the
769 // following cases:
770 // 1) (add/sub/disjoint_or C, (ctpop (not x))
771 // -> (add/sub/disjoint_or C', (ctpop x))
772 // 1) (cmp pred C, (ctpop (not x))
773 // -> (cmp pred C', (ctpop x))
774 Instruction *InstCombinerImpl::tryFoldInstWithCtpopWithNot(Instruction *I) {
775 unsigned Opc = I->getOpcode();
776 unsigned ConstIdx = 1;
777 switch (Opc) {
778 default:
779 return nullptr;
780 // (ctpop (not x)) <-> (sub nuw nsw BitWidth(x) - (ctpop x))
781 // We can fold the BitWidth(x) with add/sub/icmp as long the other operand
782 // is constant.
783 case Instruction::Sub:
784 ConstIdx = 0;
785 break;
786 case Instruction::ICmp:
787 // Signed predicates aren't correct in some edge cases like for i2 types, as
788 // well since (ctpop x) is known [0, log2(BitWidth(x))] almost all signed
789 // comparisons against it are simplfied to unsigned.
790 if (cast<ICmpInst>(I)->isSigned())
791 return nullptr;
792 break;
793 case Instruction::Or:
794 if (!match(I, m_DisjointOr(m_Value(), m_Value())))
795 return nullptr;
796 [[fallthrough]];
797 case Instruction::Add:
798 break;
801 Value *Op;
802 // Find ctpop.
803 if (!match(I->getOperand(1 - ConstIdx),
804 m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(Op)))))
805 return nullptr;
807 Constant *C;
808 // Check other operand is ImmConstant.
809 if (!match(I->getOperand(ConstIdx), m_ImmConstant(C)))
810 return nullptr;
812 Type *Ty = Op->getType();
813 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
814 // Need extra check for icmp. Note if this check is true, it generally means
815 // the icmp will simplify to true/false.
816 if (Opc == Instruction::ICmp && !cast<ICmpInst>(I)->isEquality()) {
817 Constant *Cmp =
818 ConstantFoldCompareInstOperands(ICmpInst::ICMP_UGT, C, BitWidthC, DL);
819 if (!Cmp || !Cmp->isZeroValue())
820 return nullptr;
823 // Check we can invert `(not x)` for free.
824 bool Consumes = false;
825 if (!isFreeToInvert(Op, Op->hasOneUse(), Consumes) || !Consumes)
826 return nullptr;
827 Value *NotOp = getFreelyInverted(Op, Op->hasOneUse(), &Builder);
828 assert(NotOp != nullptr &&
829 "Desync between isFreeToInvert and getFreelyInverted");
831 Value *CtpopOfNotOp = Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
833 Value *R = nullptr;
835 // Do the transformation here to avoid potentially introducing an infinite
836 // loop.
837 switch (Opc) {
838 case Instruction::Sub:
839 R = Builder.CreateAdd(CtpopOfNotOp, ConstantExpr::getSub(C, BitWidthC));
840 break;
841 case Instruction::Or:
842 case Instruction::Add:
843 R = Builder.CreateSub(ConstantExpr::getAdd(C, BitWidthC), CtpopOfNotOp);
844 break;
845 case Instruction::ICmp:
846 R = Builder.CreateICmp(cast<ICmpInst>(I)->getSwappedPredicate(),
847 CtpopOfNotOp, ConstantExpr::getSub(BitWidthC, C));
848 break;
849 default:
850 llvm_unreachable("Unhandled Opcode");
852 assert(R != nullptr);
853 return replaceInstUsesWith(*I, R);
856 // (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
857 // IFF
858 // 1) the logic_shifts match
859 // 2) either both binops are binops and one is `and` or
860 // BinOp1 is `and`
861 // (logic_shift (inv_logic_shift C1, C), C) == C1 or
863 // -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
865 // (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
866 // IFF
867 // 1) the logic_shifts match
868 // 2) BinOp1 == BinOp2 (if BinOp == `add`, then also requires `shl`).
870 // -> (BinOp (logic_shift (BinOp X, Y)), Mask)
872 // (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
873 // IFF
874 // 1) Binop1 is bitwise logical operator `and`, `or` or `xor`
875 // 2) Binop2 is `not`
877 // -> (arithmetic_shift Binop1((not X), Y), Amt)
879 Instruction *InstCombinerImpl::foldBinOpShiftWithShift(BinaryOperator &I) {
880 const DataLayout &DL = I.getDataLayout();
881 auto IsValidBinOpc = [](unsigned Opc) {
882 switch (Opc) {
883 default:
884 return false;
885 case Instruction::And:
886 case Instruction::Or:
887 case Instruction::Xor:
888 case Instruction::Add:
889 // Skip Sub as we only match constant masks which will canonicalize to use
890 // add.
891 return true;
895 // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
896 // constraints.
897 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
898 unsigned ShOpc) {
899 assert(ShOpc != Instruction::AShr);
900 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
901 ShOpc == Instruction::Shl;
904 auto GetInvShift = [](unsigned ShOpc) {
905 assert(ShOpc != Instruction::AShr);
906 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
909 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
910 unsigned ShOpc, Constant *CMask,
911 Constant *CShift) {
912 // If the BinOp1 is `and` we don't need to check the mask.
913 if (BinOpc1 == Instruction::And)
914 return true;
916 // For all other possible transfers we need complete distributable
917 // binop/shift (anything but `add` + `lshr`).
918 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
919 return false;
921 // If BinOp2 is `and`, any mask works (this only really helps for non-splat
922 // vecs, otherwise the mask will be simplified and the following check will
923 // handle it).
924 if (BinOpc2 == Instruction::And)
925 return true;
927 // Otherwise, need mask that meets the below requirement.
928 // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
929 Constant *MaskInvShift =
930 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
931 return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) ==
932 CMask;
935 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
936 Constant *CMask, *CShift;
937 Value *X, *Y, *ShiftedX, *Mask, *Shift;
938 if (!match(I.getOperand(ShOpnum),
939 m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
940 return nullptr;
941 if (!match(I.getOperand(1 - ShOpnum),
942 m_BinOp(m_Value(ShiftedX), m_Value(Mask))))
943 return nullptr;
945 if (!match(ShiftedX, m_OneUse(m_Shift(m_Value(X), m_Specific(Shift)))))
946 return nullptr;
948 // Make sure we are matching instruction shifts and not ConstantExpr
949 auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
950 auto *IX = dyn_cast<Instruction>(ShiftedX);
951 if (!IY || !IX)
952 return nullptr;
954 // LHS and RHS need same shift opcode
955 unsigned ShOpc = IY->getOpcode();
956 if (ShOpc != IX->getOpcode())
957 return nullptr;
959 // Make sure binop is real instruction and not ConstantExpr
960 auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
961 if (!BO2)
962 return nullptr;
964 unsigned BinOpc = BO2->getOpcode();
965 // Make sure we have valid binops.
966 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
967 return nullptr;
969 if (ShOpc == Instruction::AShr) {
970 if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
971 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
972 Value *NotX = Builder.CreateNot(X);
973 Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
974 return BinaryOperator::Create(
975 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
978 return nullptr;
981 // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
982 // distribute to drop the shift irrelevant of constants.
983 if (BinOpc == I.getOpcode() &&
984 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
985 Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
986 Value *NewBinOp1 = Builder.CreateBinOp(
987 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
988 return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
991 // Otherwise we can only distribute by constant shifting the mask, so
992 // ensure we have constants.
993 if (!match(Shift, m_ImmConstant(CShift)))
994 return nullptr;
995 if (!match(Mask, m_ImmConstant(CMask)))
996 return nullptr;
998 // Check if we can distribute the binops.
999 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1000 return nullptr;
1002 Constant *NewCMask =
1003 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1004 Value *NewBinOp2 = Builder.CreateBinOp(
1005 static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
1006 Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
1007 return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
1008 NewBinOp1, CShift);
1011 if (Instruction *R = MatchBinOp(0))
1012 return R;
1013 return MatchBinOp(1);
1016 // (Binop (zext C), (select C, T, F))
1017 // -> (select C, (binop 1, T), (binop 0, F))
1019 // (Binop (sext C), (select C, T, F))
1020 // -> (select C, (binop -1, T), (binop 0, F))
1022 // Attempt to simplify binary operations into a select with folded args, when
1023 // one operand of the binop is a select instruction and the other operand is a
1024 // zext/sext extension, whose value is the select condition.
1025 Instruction *
1026 InstCombinerImpl::foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I) {
1027 // TODO: this simplification may be extended to any speculatable instruction,
1028 // not just binops, and would possibly be handled better in FoldOpIntoSelect.
1029 Instruction::BinaryOps Opc = I.getOpcode();
1030 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1031 Value *A, *CondVal, *TrueVal, *FalseVal;
1032 Value *CastOp;
1034 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
1035 return match(CastOp, m_ZExtOrSExt(m_Value(A))) &&
1036 A->getType()->getScalarSizeInBits() == 1 &&
1037 match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
1038 m_Value(FalseVal)));
1041 // Make sure one side of the binop is a select instruction, and the other is a
1042 // zero/sign extension operating on a i1.
1043 if (MatchSelectAndCast(LHS, RHS))
1044 CastOp = LHS;
1045 else if (MatchSelectAndCast(RHS, LHS))
1046 CastOp = RHS;
1047 else
1048 return nullptr;
1050 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
1051 bool IsCastOpRHS = (CastOp == RHS);
1052 bool IsZExt = isa<ZExtInst>(CastOp);
1053 Constant *C;
1055 if (IsTrueArm) {
1056 C = Constant::getNullValue(V->getType());
1057 } else if (IsZExt) {
1058 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1059 C = Constant::getIntegerValue(V->getType(), APInt(BitWidth, 1));
1060 } else {
1061 C = Constant::getAllOnesValue(V->getType());
1064 return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, C)
1065 : Builder.CreateBinOp(Opc, C, V);
1068 // If the value used in the zext/sext is the select condition, or the negated
1069 // of the select condition, the binop can be simplified.
1070 if (CondVal == A) {
1071 Value *NewTrueVal = NewFoldedConst(false, TrueVal);
1072 return SelectInst::Create(CondVal, NewTrueVal,
1073 NewFoldedConst(true, FalseVal));
1076 if (match(A, m_Not(m_Specific(CondVal)))) {
1077 Value *NewTrueVal = NewFoldedConst(true, TrueVal);
1078 return SelectInst::Create(CondVal, NewTrueVal,
1079 NewFoldedConst(false, FalseVal));
1082 return nullptr;
1085 Value *InstCombinerImpl::tryFactorizationFolds(BinaryOperator &I) {
1086 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1087 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
1088 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
1089 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1090 Value *A, *B, *C, *D;
1091 Instruction::BinaryOps LHSOpcode, RHSOpcode;
1093 if (Op0)
1094 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1);
1095 if (Op1)
1096 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0);
1098 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
1099 // a common term.
1100 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1101 if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
1102 return V;
1104 // The instruction has the form "(A op' B) op (C)". Try to factorize common
1105 // term.
1106 if (Op0)
1107 if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
1108 if (Value *V =
1109 tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
1110 return V;
1112 // The instruction has the form "(B) op (C op' D)". Try to factorize common
1113 // term.
1114 if (Op1)
1115 if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
1116 if (Value *V =
1117 tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
1118 return V;
1120 return nullptr;
1123 /// This tries to simplify binary operations which some other binary operation
1124 /// distributes over either by factorizing out common terms
1125 /// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
1126 /// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
1127 /// Returns the simplified value, or null if it didn't simplify.
1128 Value *InstCombinerImpl::foldUsingDistributiveLaws(BinaryOperator &I) {
1129 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1130 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
1131 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
1132 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1134 // Factorization.
1135 if (Value *R = tryFactorizationFolds(I))
1136 return R;
1138 // Expansion.
1139 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
1140 // The instruction has the form "(A op' B) op C". See if expanding it out
1141 // to "(A op C) op' (B op C)" results in simplifications.
1142 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
1143 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
1145 // Disable the use of undef because it's not safe to distribute undef.
1146 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1147 Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1148 Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1150 // Do "A op C" and "B op C" both simplify?
1151 if (L && R) {
1152 // They do! Return "L op' R".
1153 ++NumExpand;
1154 C = Builder.CreateBinOp(InnerOpcode, L, R);
1155 C->takeName(&I);
1156 return C;
1159 // Does "A op C" simplify to the identity value for the inner opcode?
1160 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1161 // They do! Return "B op C".
1162 ++NumExpand;
1163 C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1164 C->takeName(&I);
1165 return C;
1168 // Does "B op C" simplify to the identity value for the inner opcode?
1169 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1170 // They do! Return "A op C".
1171 ++NumExpand;
1172 C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1173 C->takeName(&I);
1174 return C;
1178 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1179 // The instruction has the form "A op (B op' C)". See if expanding it out
1180 // to "(A op B) op' (A op C)" results in simplifications.
1181 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1182 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1184 // Disable the use of undef because it's not safe to distribute undef.
1185 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1186 Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1187 Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1189 // Do "A op B" and "A op C" both simplify?
1190 if (L && R) {
1191 // They do! Return "L op' R".
1192 ++NumExpand;
1193 A = Builder.CreateBinOp(InnerOpcode, L, R);
1194 A->takeName(&I);
1195 return A;
1198 // Does "A op B" simplify to the identity value for the inner opcode?
1199 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1200 // They do! Return "A op C".
1201 ++NumExpand;
1202 A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1203 A->takeName(&I);
1204 return A;
1207 // Does "A op C" simplify to the identity value for the inner opcode?
1208 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1209 // They do! Return "A op B".
1210 ++NumExpand;
1211 A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1212 A->takeName(&I);
1213 return A;
1217 return SimplifySelectsFeedingBinaryOp(I, LHS, RHS);
1220 static std::optional<std::pair<Value *, Value *>>
1221 matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS) {
1222 if (LHS->getParent() != RHS->getParent())
1223 return std::nullopt;
1225 if (LHS->getNumIncomingValues() < 2)
1226 return std::nullopt;
1228 if (!equal(LHS->blocks(), RHS->blocks()))
1229 return std::nullopt;
1231 Value *L0 = LHS->getIncomingValue(0);
1232 Value *R0 = RHS->getIncomingValue(0);
1234 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {
1235 Value *L1 = LHS->getIncomingValue(I);
1236 Value *R1 = RHS->getIncomingValue(I);
1238 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1239 continue;
1241 return std::nullopt;
1244 return std::optional(std::pair(L0, R0));
1247 std::optional<std::pair<Value *, Value *>>
1248 InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {
1249 Instruction *LHSInst = dyn_cast<Instruction>(LHS);
1250 Instruction *RHSInst = dyn_cast<Instruction>(RHS);
1251 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode())
1252 return std::nullopt;
1253 switch (LHSInst->getOpcode()) {
1254 case Instruction::PHI:
1255 return matchSymmetricPhiNodesPair(cast<PHINode>(LHS), cast<PHINode>(RHS));
1256 case Instruction::Select: {
1257 Value *Cond = LHSInst->getOperand(0);
1258 Value *TrueVal = LHSInst->getOperand(1);
1259 Value *FalseVal = LHSInst->getOperand(2);
1260 if (Cond == RHSInst->getOperand(0) && TrueVal == RHSInst->getOperand(2) &&
1261 FalseVal == RHSInst->getOperand(1))
1262 return std::pair(TrueVal, FalseVal);
1263 return std::nullopt;
1265 case Instruction::Call: {
1266 // Match min(a, b) and max(a, b)
1267 MinMaxIntrinsic *LHSMinMax = dyn_cast<MinMaxIntrinsic>(LHSInst);
1268 MinMaxIntrinsic *RHSMinMax = dyn_cast<MinMaxIntrinsic>(RHSInst);
1269 if (LHSMinMax && RHSMinMax &&
1270 LHSMinMax->getPredicate() ==
1271 ICmpInst::getSwappedPredicate(RHSMinMax->getPredicate()) &&
1272 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() &&
1273 LHSMinMax->getRHS() == RHSMinMax->getRHS()) ||
1274 (LHSMinMax->getLHS() == RHSMinMax->getRHS() &&
1275 LHSMinMax->getRHS() == RHSMinMax->getLHS())))
1276 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS());
1277 return std::nullopt;
1279 default:
1280 return std::nullopt;
1284 Value *InstCombinerImpl::SimplifySelectsFeedingBinaryOp(BinaryOperator &I,
1285 Value *LHS,
1286 Value *RHS) {
1287 Value *A, *B, *C, *D, *E, *F;
1288 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1289 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1290 if (!LHSIsSelect && !RHSIsSelect)
1291 return nullptr;
1293 FastMathFlags FMF;
1294 BuilderTy::FastMathFlagGuard Guard(Builder);
1295 if (isa<FPMathOperator>(&I)) {
1296 FMF = I.getFastMathFlags();
1297 Builder.setFastMathFlags(FMF);
1300 Instruction::BinaryOps Opcode = I.getOpcode();
1301 SimplifyQuery Q = SQ.getWithInstruction(&I);
1303 Value *Cond, *True = nullptr, *False = nullptr;
1305 // Special-case for add/negate combination. Replace the zero in the negation
1306 // with the trailing add operand:
1307 // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1308 // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1309 auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1310 // We need an 'add' and exactly 1 arm of the select to have been simplified.
1311 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1312 return nullptr;
1314 Value *N;
1315 if (True && match(FVal, m_Neg(m_Value(N)))) {
1316 Value *Sub = Builder.CreateSub(Z, N);
1317 return Builder.CreateSelect(Cond, True, Sub, I.getName());
1319 if (False && match(TVal, m_Neg(m_Value(N)))) {
1320 Value *Sub = Builder.CreateSub(Z, N);
1321 return Builder.CreateSelect(Cond, Sub, False, I.getName());
1323 return nullptr;
1326 if (LHSIsSelect && RHSIsSelect && A == D) {
1327 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1328 Cond = A;
1329 True = simplifyBinOp(Opcode, B, E, FMF, Q);
1330 False = simplifyBinOp(Opcode, C, F, FMF, Q);
1332 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1333 if (False && !True)
1334 True = Builder.CreateBinOp(Opcode, B, E);
1335 else if (True && !False)
1336 False = Builder.CreateBinOp(Opcode, C, F);
1338 } else if (LHSIsSelect && LHS->hasOneUse()) {
1339 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1340 Cond = A;
1341 True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1342 False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1343 if (Value *NewSel = foldAddNegate(B, C, RHS))
1344 return NewSel;
1345 } else if (RHSIsSelect && RHS->hasOneUse()) {
1346 // X op (D ? E : F) -> D ? (X op E) : (X op F)
1347 Cond = D;
1348 True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1349 False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1350 if (Value *NewSel = foldAddNegate(E, F, LHS))
1351 return NewSel;
1354 if (!True || !False)
1355 return nullptr;
1357 Value *SI = Builder.CreateSelect(Cond, True, False);
1358 SI->takeName(&I);
1359 return SI;
1362 /// Freely adapt every user of V as-if V was changed to !V.
1363 /// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1364 void InstCombinerImpl::freelyInvertAllUsersOf(Value *I, Value *IgnoredUser) {
1365 assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1366 for (User *U : make_early_inc_range(I->users())) {
1367 if (U == IgnoredUser)
1368 continue; // Don't consider this user.
1369 switch (cast<Instruction>(U)->getOpcode()) {
1370 case Instruction::Select: {
1371 auto *SI = cast<SelectInst>(U);
1372 SI->swapValues();
1373 SI->swapProfMetadata();
1374 break;
1376 case Instruction::Br: {
1377 BranchInst *BI = cast<BranchInst>(U);
1378 BI->swapSuccessors(); // swaps prof metadata too
1379 if (BPI)
1380 BPI->swapSuccEdgesProbabilities(BI->getParent());
1381 break;
1383 case Instruction::Xor:
1384 replaceInstUsesWith(cast<Instruction>(*U), I);
1385 // Add to worklist for DCE.
1386 addToWorklist(cast<Instruction>(U));
1387 break;
1388 default:
1389 llvm_unreachable("Got unexpected user - out of sync with "
1390 "canFreelyInvertAllUsersOf() ?");
1395 /// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1396 /// constant zero (which is the 'negate' form).
1397 Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1398 Value *NegV;
1399 if (match(V, m_Neg(m_Value(NegV))))
1400 return NegV;
1402 // Constants can be considered to be negated values if they can be folded.
1403 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
1404 return ConstantExpr::getNeg(C);
1406 if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
1407 if (C->getType()->getElementType()->isIntegerTy())
1408 return ConstantExpr::getNeg(C);
1410 if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
1411 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1412 Constant *Elt = CV->getAggregateElement(i);
1413 if (!Elt)
1414 return nullptr;
1416 if (isa<UndefValue>(Elt))
1417 continue;
1419 if (!isa<ConstantInt>(Elt))
1420 return nullptr;
1422 return ConstantExpr::getNeg(CV);
1425 // Negate integer vector splats.
1426 if (auto *CV = dyn_cast<Constant>(V))
1427 if (CV->getType()->isVectorTy() &&
1428 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1429 return ConstantExpr::getNeg(CV);
1431 return nullptr;
1434 // Try to fold:
1435 // 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1436 // -> ({s|u}itofp (int_binop x, y))
1437 // 2) (fp_binop ({s|u}itofp x), FpC)
1438 // -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1440 // Assuming the sign of the cast for x/y is `OpsFromSigned`.
1441 Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1442 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
1443 Constant *Op1FpC, SmallVectorImpl<WithCache<const Value *>> &OpsKnown) {
1445 Type *FPTy = BO.getType();
1446 Type *IntTy = IntOps[0]->getType();
1448 unsigned IntSz = IntTy->getScalarSizeInBits();
1449 // This is the maximum number of inuse bits by the integer where the int -> fp
1450 // casts are exact.
1451 unsigned MaxRepresentableBits =
1452 APFloat::semanticsPrecision(FPTy->getScalarType()->getFltSemantics());
1454 // Preserve known number of leading bits. This can allow us to trivial nsw/nuw
1455 // checks later on.
1456 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1458 // NB: This only comes up if OpsFromSigned is true, so there is no need to
1459 // cache if between calls to `foldFBinOpOfIntCastsFromSign`.
1460 auto IsNonZero = [&](unsigned OpNo) -> bool {
1461 if (OpsKnown[OpNo].hasKnownBits() &&
1462 OpsKnown[OpNo].getKnownBits(SQ).isNonZero())
1463 return true;
1464 return isKnownNonZero(IntOps[OpNo], SQ);
1467 auto IsNonNeg = [&](unsigned OpNo) -> bool {
1468 // NB: This matches the impl in ValueTracking, we just try to use cached
1469 // knownbits here. If we ever start supporting WithCache for
1470 // `isKnownNonNegative`, change this to an explicit call.
1471 return OpsKnown[OpNo].getKnownBits(SQ).isNonNegative();
1474 // Check if we know for certain that ({s|u}itofp op) is exact.
1475 auto IsValidPromotion = [&](unsigned OpNo) -> bool {
1476 // Can we treat this operand as the desired sign?
1477 if (OpsFromSigned != isa<SIToFPInst>(BO.getOperand(OpNo)) &&
1478 !IsNonNeg(OpNo))
1479 return false;
1481 // If fp precision >= bitwidth(op) then its exact.
1482 // NB: This is slightly conservative for `sitofp`. For signed conversion, we
1483 // can handle `MaxRepresentableBits == IntSz - 1` as the sign bit will be
1484 // handled specially. We can't, however, increase the bound arbitrarily for
1485 // `sitofp` as for larger sizes, it won't sign extend.
1486 if (MaxRepresentableBits < IntSz) {
1487 // Otherwise if its signed cast check that fp precisions >= bitwidth(op) -
1488 // numSignBits(op).
1489 // TODO: If we add support for `WithCache` in `ComputeNumSignBits`, change
1490 // `IntOps[OpNo]` arguments to `KnownOps[OpNo]`.
1491 if (OpsFromSigned)
1492 NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);
1493 // Finally for unsigned check that fp precision >= bitwidth(op) -
1494 // numLeadingZeros(op).
1495 else {
1496 NumUsedLeadingBits[OpNo] =
1497 IntSz - OpsKnown[OpNo].getKnownBits(SQ).countMinLeadingZeros();
1500 // NB: We could also check if op is known to be a power of 2 or zero (which
1501 // will always be representable). Its unlikely, however, that is we are
1502 // unable to bound op in any way we will be able to pass the overflow checks
1503 // later on.
1505 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1506 return false;
1507 // Signed + Mul also requires that op is non-zero to avoid -0 cases.
1508 return !OpsFromSigned || BO.getOpcode() != Instruction::FMul ||
1509 IsNonZero(OpNo);
1512 // If we have a constant rhs, see if we can losslessly convert it to an int.
1513 if (Op1FpC != nullptr) {
1514 // Signed + Mul req non-zero
1515 if (OpsFromSigned && BO.getOpcode() == Instruction::FMul &&
1516 !match(Op1FpC, m_NonZeroFP()))
1517 return nullptr;
1519 Constant *Op1IntC = ConstantFoldCastOperand(
1520 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1521 IntTy, DL);
1522 if (Op1IntC == nullptr)
1523 return nullptr;
1524 if (ConstantFoldCastOperand(OpsFromSigned ? Instruction::SIToFP
1525 : Instruction::UIToFP,
1526 Op1IntC, FPTy, DL) != Op1FpC)
1527 return nullptr;
1529 // First try to keep sign of cast the same.
1530 IntOps[1] = Op1IntC;
1533 // Ensure lhs/rhs integer types match.
1534 if (IntTy != IntOps[1]->getType())
1535 return nullptr;
1537 if (Op1FpC == nullptr) {
1538 if (!IsValidPromotion(1))
1539 return nullptr;
1541 if (!IsValidPromotion(0))
1542 return nullptr;
1544 // Final we check if the integer version of the binop will not overflow.
1545 BinaryOperator::BinaryOps IntOpc;
1546 // Because of the precision check, we can often rule out overflows.
1547 bool NeedsOverflowCheck = true;
1548 // Try to conservatively rule out overflow based on the already done precision
1549 // checks.
1550 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1551 unsigned OverflowMaxCurBits =
1552 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1553 bool OutputSigned = OpsFromSigned;
1554 switch (BO.getOpcode()) {
1555 case Instruction::FAdd:
1556 IntOpc = Instruction::Add;
1557 OverflowMaxOutputBits += OverflowMaxCurBits;
1558 break;
1559 case Instruction::FSub:
1560 IntOpc = Instruction::Sub;
1561 OverflowMaxOutputBits += OverflowMaxCurBits;
1562 break;
1563 case Instruction::FMul:
1564 IntOpc = Instruction::Mul;
1565 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1566 break;
1567 default:
1568 llvm_unreachable("Unsupported binop");
1570 // The precision check may have already ruled out overflow.
1571 if (OverflowMaxOutputBits < IntSz) {
1572 NeedsOverflowCheck = false;
1573 // We can bound unsigned overflow from sub to in range signed value (this is
1574 // what allows us to avoid the overflow check for sub).
1575 if (IntOpc == Instruction::Sub)
1576 OutputSigned = true;
1579 // Precision check did not rule out overflow, so need to check.
1580 // TODO: If we add support for `WithCache` in `willNotOverflow`, change
1581 // `IntOps[...]` arguments to `KnownOps[...]`.
1582 if (NeedsOverflowCheck &&
1583 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1584 return nullptr;
1586 Value *IntBinOp = Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1587 if (auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1588 IntBO->setHasNoSignedWrap(OutputSigned);
1589 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1591 if (OutputSigned)
1592 return new SIToFPInst(IntBinOp, FPTy);
1593 return new UIToFPInst(IntBinOp, FPTy);
1596 // Try to fold:
1597 // 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1598 // -> ({s|u}itofp (int_binop x, y))
1599 // 2) (fp_binop ({s|u}itofp x), FpC)
1600 // -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1601 Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {
1602 std::array<Value *, 2> IntOps = {nullptr, nullptr};
1603 Constant *Op1FpC = nullptr;
1604 // Check for:
1605 // 1) (binop ({s|u}itofp x), ({s|u}itofp y))
1606 // 2) (binop ({s|u}itofp x), FpC)
1607 if (!match(BO.getOperand(0), m_SIToFP(m_Value(IntOps[0]))) &&
1608 !match(BO.getOperand(0), m_UIToFP(m_Value(IntOps[0]))))
1609 return nullptr;
1611 if (!match(BO.getOperand(1), m_Constant(Op1FpC)) &&
1612 !match(BO.getOperand(1), m_SIToFP(m_Value(IntOps[1]))) &&
1613 !match(BO.getOperand(1), m_UIToFP(m_Value(IntOps[1]))))
1614 return nullptr;
1616 // Cache KnownBits a bit to potentially save some analysis.
1617 SmallVector<WithCache<const Value *>, 2> OpsKnown = {IntOps[0], IntOps[1]};
1619 // Try treating x/y as coming from both `uitofp` and `sitofp`. There are
1620 // different constraints depending on the sign of the cast.
1621 // NB: `(uitofp nneg X)` == `(sitofp nneg X)`.
1622 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/false,
1623 IntOps, Op1FpC, OpsKnown))
1624 return R;
1625 return foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/true, IntOps,
1626 Op1FpC, OpsKnown);
1629 /// A binop with a constant operand and a sign-extended boolean operand may be
1630 /// converted into a select of constants by applying the binary operation to
1631 /// the constant with the two possible values of the extended boolean (0 or -1).
1632 Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1633 // TODO: Handle non-commutative binop (constant is operand 0).
1634 // TODO: Handle zext.
1635 // TODO: Peek through 'not' of cast.
1636 Value *BO0 = BO.getOperand(0);
1637 Value *BO1 = BO.getOperand(1);
1638 Value *X;
1639 Constant *C;
1640 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1641 !X->getType()->isIntOrIntVectorTy(1))
1642 return nullptr;
1644 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1645 Constant *Ones = ConstantInt::getAllOnesValue(BO.getType());
1646 Constant *Zero = ConstantInt::getNullValue(BO.getType());
1647 Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1648 Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1649 return SelectInst::Create(X, TVal, FVal);
1652 static Value *simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI,
1653 bool IsTrueArm) {
1654 SmallVector<Value *> Ops;
1655 for (Value *Op : I.operands()) {
1656 Value *V = nullptr;
1657 if (Op == SI) {
1658 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1659 } else if (match(SI->getCondition(),
1660 m_SpecificICmp(IsTrueArm ? ICmpInst::ICMP_EQ
1661 : ICmpInst::ICMP_NE,
1662 m_Specific(Op), m_Value(V))) &&
1663 isGuaranteedNotToBeUndefOrPoison(V)) {
1664 // Pass
1665 } else {
1666 V = Op;
1668 Ops.push_back(V);
1671 return simplifyInstructionWithOperands(&I, Ops, I.getDataLayout());
1674 static Value *foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI,
1675 Value *NewOp, InstCombiner &IC) {
1676 Instruction *Clone = I.clone();
1677 Clone->replaceUsesOfWith(SI, NewOp);
1678 Clone->dropUBImplyingAttrsAndMetadata();
1679 IC.InsertNewInstBefore(Clone, I.getIterator());
1680 return Clone;
1683 Instruction *InstCombinerImpl::FoldOpIntoSelect(Instruction &Op, SelectInst *SI,
1684 bool FoldWithMultiUse) {
1685 // Don't modify shared select instructions unless set FoldWithMultiUse
1686 if (!SI->hasOneUse() && !FoldWithMultiUse)
1687 return nullptr;
1689 Value *TV = SI->getTrueValue();
1690 Value *FV = SI->getFalseValue();
1692 // Bool selects with constant operands can be folded to logical ops.
1693 if (SI->getType()->isIntOrIntVectorTy(1))
1694 return nullptr;
1696 // Test if a FCmpInst instruction is used exclusively by a select as
1697 // part of a minimum or maximum operation. If so, refrain from doing
1698 // any other folding. This helps out other analyses which understand
1699 // non-obfuscated minimum and maximum idioms. And in this case, at
1700 // least one of the comparison operands has at least one user besides
1701 // the compare (the select), which would often largely negate the
1702 // benefit of folding anyway.
1703 if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1704 if (CI->hasOneUse()) {
1705 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1706 if ((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1))
1707 return nullptr;
1711 // Make sure that one of the select arms folds successfully.
1712 Value *NewTV = simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/true);
1713 Value *NewFV =
1714 simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/false);
1715 if (!NewTV && !NewFV)
1716 return nullptr;
1718 // Create an instruction for the arm that did not fold.
1719 if (!NewTV)
1720 NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1721 if (!NewFV)
1722 NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1723 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1726 static Value *simplifyInstructionWithPHI(Instruction &I, PHINode *PN,
1727 Value *InValue, BasicBlock *InBB,
1728 const DataLayout &DL,
1729 const SimplifyQuery SQ) {
1730 // NB: It is a precondition of this transform that the operands be
1731 // phi translatable!
1732 SmallVector<Value *> Ops;
1733 for (Value *Op : I.operands()) {
1734 if (Op == PN)
1735 Ops.push_back(InValue);
1736 else
1737 Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1740 // Don't consider the simplification successful if we get back a constant
1741 // expression. That's just an instruction in hiding.
1742 // Also reject the case where we simplify back to the phi node. We wouldn't
1743 // be able to remove it in that case.
1744 Value *NewVal = simplifyInstructionWithOperands(
1745 &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1746 if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr()))
1747 return NewVal;
1749 // Check if incoming PHI value can be replaced with constant
1750 // based on implied condition.
1751 BranchInst *TerminatorBI = dyn_cast<BranchInst>(InBB->getTerminator());
1752 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I);
1753 if (TerminatorBI && TerminatorBI->isConditional() &&
1754 TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) {
1755 bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent();
1756 std::optional<bool> ImpliedCond = isImpliedCondition(
1757 TerminatorBI->getCondition(), ICmp->getCmpPredicate(), Ops[0], Ops[1],
1758 DL, LHSIsTrue);
1759 if (ImpliedCond)
1760 return ConstantInt::getBool(I.getType(), ImpliedCond.value());
1763 return nullptr;
1766 Instruction *InstCombinerImpl::foldOpIntoPhi(Instruction &I, PHINode *PN,
1767 bool AllowMultipleUses) {
1768 unsigned NumPHIValues = PN->getNumIncomingValues();
1769 if (NumPHIValues == 0)
1770 return nullptr;
1772 // We normally only transform phis with a single use. However, if a PHI has
1773 // multiple uses and they are all the same operation, we can fold *all* of the
1774 // uses into the PHI.
1775 bool OneUse = PN->hasOneUse();
1776 bool IdenticalUsers = false;
1777 if (!AllowMultipleUses && !OneUse) {
1778 // Walk the use list for the instruction, comparing them to I.
1779 for (User *U : PN->users()) {
1780 Instruction *UI = cast<Instruction>(U);
1781 if (UI != &I && !I.isIdenticalTo(UI))
1782 return nullptr;
1784 // Otherwise, we can replace *all* users with the new PHI we form.
1785 IdenticalUsers = true;
1788 // Check that all operands are phi-translatable.
1789 for (Value *Op : I.operands()) {
1790 if (Op == PN)
1791 continue;
1793 // Non-instructions never require phi-translation.
1794 auto *I = dyn_cast<Instruction>(Op);
1795 if (!I)
1796 continue;
1798 // Phi-translate can handle phi nodes in the same block.
1799 if (isa<PHINode>(I))
1800 if (I->getParent() == PN->getParent())
1801 continue;
1803 // Operand dominates the block, no phi-translation necessary.
1804 if (DT.dominates(I, PN->getParent()))
1805 continue;
1807 // Not phi-translatable, bail out.
1808 return nullptr;
1811 // Check to see whether the instruction can be folded into each phi operand.
1812 // If there is one operand that does not fold, remember the BB it is in.
1813 SmallVector<Value *> NewPhiValues;
1814 SmallVector<unsigned int> OpsToMoveUseToIncomingBB;
1815 bool SeenNonSimplifiedInVal = false;
1816 for (unsigned i = 0; i != NumPHIValues; ++i) {
1817 Value *InVal = PN->getIncomingValue(i);
1818 BasicBlock *InBB = PN->getIncomingBlock(i);
1820 if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) {
1821 NewPhiValues.push_back(NewVal);
1822 continue;
1825 // Handle some cases that can't be fully simplified, but where we know that
1826 // the two instructions will fold into one.
1827 auto WillFold = [&]() {
1828 if (!InVal->hasOneUser())
1829 return false;
1831 // icmp of ucmp/scmp with constant will fold to icmp.
1832 const APInt *Ignored;
1833 if (isa<CmpIntrinsic>(InVal) &&
1834 match(&I, m_ICmp(m_Specific(PN), m_APInt(Ignored))))
1835 return true;
1837 // icmp eq zext(bool), 0 will fold to !bool.
1838 if (isa<ZExtInst>(InVal) &&
1839 cast<ZExtInst>(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&
1840 match(&I,
1841 m_SpecificICmp(ICmpInst::ICMP_EQ, m_Specific(PN), m_Zero())))
1842 return true;
1844 return false;
1847 if (WillFold()) {
1848 OpsToMoveUseToIncomingBB.push_back(i);
1849 NewPhiValues.push_back(nullptr);
1850 continue;
1853 if (!OneUse && !IdenticalUsers)
1854 return nullptr;
1856 if (SeenNonSimplifiedInVal)
1857 return nullptr; // More than one non-simplified value.
1858 SeenNonSimplifiedInVal = true;
1860 // If there is exactly one non-simplified value, we can insert a copy of the
1861 // operation in that block. However, if this is a critical edge, we would
1862 // be inserting the computation on some other paths (e.g. inside a loop).
1863 // Only do this if the pred block is unconditionally branching into the phi
1864 // block. Also, make sure that the pred block is not dead code.
1865 BranchInst *BI = dyn_cast<BranchInst>(InBB->getTerminator());
1866 if (!BI || !BI->isUnconditional() || !DT.isReachableFromEntry(InBB))
1867 return nullptr;
1869 NewPhiValues.push_back(nullptr);
1870 OpsToMoveUseToIncomingBB.push_back(i);
1872 // If the InVal is an invoke at the end of the pred block, then we can't
1873 // insert a computation after it without breaking the edge.
1874 if (isa<InvokeInst>(InVal))
1875 if (cast<Instruction>(InVal)->getParent() == InBB)
1876 return nullptr;
1878 // Do not push the operation across a loop backedge. This could result in
1879 // an infinite combine loop, and is generally non-profitable (especially
1880 // if the operation was originally outside the loop).
1881 if (isBackEdge(InBB, PN->getParent()))
1882 return nullptr;
1885 // Clone the instruction that uses the phi node and move it into the incoming
1886 // BB because we know that the next iteration of InstCombine will simplify it.
1887 SmallDenseMap<BasicBlock *, Instruction *> Clones;
1888 for (auto OpIndex : OpsToMoveUseToIncomingBB) {
1889 Value *Op = PN->getIncomingValue(OpIndex);
1890 BasicBlock *OpBB = PN->getIncomingBlock(OpIndex);
1892 Instruction *Clone = Clones.lookup(OpBB);
1893 if (!Clone) {
1894 Clone = I.clone();
1895 for (Use &U : Clone->operands()) {
1896 if (U == PN)
1897 U = Op;
1898 else
1899 U = U->DoPHITranslation(PN->getParent(), OpBB);
1901 Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
1902 Clones.insert({OpBB, Clone});
1905 NewPhiValues[OpIndex] = Clone;
1908 // Okay, we can do the transformation: create the new PHI node.
1909 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
1910 InsertNewInstBefore(NewPN, PN->getIterator());
1911 NewPN->takeName(PN);
1912 NewPN->setDebugLoc(PN->getDebugLoc());
1914 for (unsigned i = 0; i != NumPHIValues; ++i)
1915 NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
1917 if (IdenticalUsers) {
1918 for (User *U : make_early_inc_range(PN->users())) {
1919 Instruction *User = cast<Instruction>(U);
1920 if (User == &I)
1921 continue;
1922 replaceInstUsesWith(*User, NewPN);
1923 eraseInstFromFunction(*User);
1925 OneUse = true;
1928 if (OneUse) {
1929 replaceAllDbgUsesWith(const_cast<PHINode &>(*PN),
1930 const_cast<PHINode &>(*NewPN),
1931 const_cast<PHINode &>(*PN), DT);
1933 return replaceInstUsesWith(I, NewPN);
1936 Instruction *InstCombinerImpl::foldBinopWithPhiOperands(BinaryOperator &BO) {
1937 // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
1938 // we are guarding against replicating the binop in >1 predecessor.
1939 // This could miss matching a phi with 2 constant incoming values.
1940 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
1941 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
1942 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
1943 Phi0->getNumOperands() != Phi1->getNumOperands())
1944 return nullptr;
1946 // TODO: Remove the restriction for binop being in the same block as the phis.
1947 if (BO.getParent() != Phi0->getParent() ||
1948 BO.getParent() != Phi1->getParent())
1949 return nullptr;
1951 // Fold if there is at least one specific constant value in phi0 or phi1's
1952 // incoming values that comes from the same block and this specific constant
1953 // value can be used to do optimization for specific binary operator.
1954 // For example:
1955 // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
1956 // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
1957 // %add = add i32 %phi0, %phi1
1958 // ==>
1959 // %add = phi i32 [%j, %bb0], [%i, %bb1]
1960 Constant *C = ConstantExpr::getBinOpIdentity(BO.getOpcode(), BO.getType(),
1961 /*AllowRHSConstant*/ false);
1962 if (C) {
1963 SmallVector<Value *, 4> NewIncomingValues;
1964 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
1965 auto &Phi0Use = std::get<0>(T);
1966 auto &Phi1Use = std::get<1>(T);
1967 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
1968 return false;
1969 Value *Phi0UseV = Phi0Use.get();
1970 Value *Phi1UseV = Phi1Use.get();
1971 if (Phi0UseV == C)
1972 NewIncomingValues.push_back(Phi1UseV);
1973 else if (Phi1UseV == C)
1974 NewIncomingValues.push_back(Phi0UseV);
1975 else
1976 return false;
1977 return true;
1980 if (all_of(zip(Phi0->operands(), Phi1->operands()),
1981 CanFoldIncomingValuePair)) {
1982 PHINode *NewPhi =
1983 PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
1984 assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
1985 "The number of collected incoming values should equal the number "
1986 "of the original PHINode operands!");
1987 for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
1988 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
1989 return NewPhi;
1993 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
1994 return nullptr;
1996 // Match a pair of incoming constants for one of the predecessor blocks.
1997 BasicBlock *ConstBB, *OtherBB;
1998 Constant *C0, *C1;
1999 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
2000 ConstBB = Phi0->getIncomingBlock(0);
2001 OtherBB = Phi0->getIncomingBlock(1);
2002 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
2003 ConstBB = Phi0->getIncomingBlock(1);
2004 OtherBB = Phi0->getIncomingBlock(0);
2005 } else {
2006 return nullptr;
2008 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
2009 return nullptr;
2011 // The block that we are hoisting to must reach here unconditionally.
2012 // Otherwise, we could be speculatively executing an expensive or
2013 // non-speculative op.
2014 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
2015 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2016 !DT.isReachableFromEntry(OtherBB))
2017 return nullptr;
2019 // TODO: This check could be tightened to only apply to binops (div/rem) that
2020 // are not safe to speculatively execute. But that could allow hoisting
2021 // potentially expensive instructions (fdiv for example).
2022 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
2023 if (!isGuaranteedToTransferExecutionToSuccessor(&*BBIter))
2024 return nullptr;
2026 // Fold constants for the predecessor block with constant incoming values.
2027 Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
2028 if (!NewC)
2029 return nullptr;
2031 // Make a new binop in the predecessor block with the non-constant incoming
2032 // values.
2033 Builder.SetInsertPoint(PredBlockBranch);
2034 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
2035 Phi0->getIncomingValueForBlock(OtherBB),
2036 Phi1->getIncomingValueForBlock(OtherBB));
2037 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2038 NotFoldedNewBO->copyIRFlags(&BO);
2040 // Replace the binop with a phi of the new values. The old phis are dead.
2041 PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
2042 NewPhi->addIncoming(NewBO, OtherBB);
2043 NewPhi->addIncoming(NewC, ConstBB);
2044 return NewPhi;
2047 Instruction *InstCombinerImpl::foldBinOpIntoSelectOrPhi(BinaryOperator &I) {
2048 if (!isa<Constant>(I.getOperand(1)))
2049 return nullptr;
2051 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
2052 if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
2053 return NewSel;
2054 } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
2055 if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
2056 return NewPhi;
2058 return nullptr;
2061 static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {
2062 // If this GEP has only 0 indices, it is the same pointer as
2063 // Src. If Src is not a trivial GEP too, don't combine
2064 // the indices.
2065 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2066 !Src.hasOneUse())
2067 return false;
2068 return true;
2071 Instruction *InstCombinerImpl::foldVectorBinop(BinaryOperator &Inst) {
2072 if (!isa<VectorType>(Inst.getType()))
2073 return nullptr;
2075 BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
2076 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2077 assert(cast<VectorType>(LHS->getType())->getElementCount() ==
2078 cast<VectorType>(Inst.getType())->getElementCount());
2079 assert(cast<VectorType>(RHS->getType())->getElementCount() ==
2080 cast<VectorType>(Inst.getType())->getElementCount());
2082 // If both operands of the binop are vector concatenations, then perform the
2083 // narrow binop on each pair of the source operands followed by concatenation
2084 // of the results.
2085 Value *L0, *L1, *R0, *R1;
2086 ArrayRef<int> Mask;
2087 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
2088 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
2089 LHS->hasOneUse() && RHS->hasOneUse() &&
2090 cast<ShuffleVectorInst>(LHS)->isConcat() &&
2091 cast<ShuffleVectorInst>(RHS)->isConcat()) {
2092 // This transform does not have the speculative execution constraint as
2093 // below because the shuffle is a concatenation. The new binops are
2094 // operating on exactly the same elements as the existing binop.
2095 // TODO: We could ease the mask requirement to allow different undef lanes,
2096 // but that requires an analysis of the binop-with-undef output value.
2097 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
2098 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2099 BO->copyIRFlags(&Inst);
2100 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
2101 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2102 BO->copyIRFlags(&Inst);
2103 return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
2106 auto createBinOpReverse = [&](Value *X, Value *Y) {
2107 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2108 if (auto *BO = dyn_cast<BinaryOperator>(V))
2109 BO->copyIRFlags(&Inst);
2110 Module *M = Inst.getModule();
2111 Function *F = Intrinsic::getOrInsertDeclaration(
2112 M, Intrinsic::vector_reverse, V->getType());
2113 return CallInst::Create(F, V);
2116 // NOTE: Reverse shuffles don't require the speculative execution protection
2117 // below because they don't affect which lanes take part in the computation.
2119 Value *V1, *V2;
2120 if (match(LHS, m_VecReverse(m_Value(V1)))) {
2121 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2122 if (match(RHS, m_VecReverse(m_Value(V2))) &&
2123 (LHS->hasOneUse() || RHS->hasOneUse() ||
2124 (LHS == RHS && LHS->hasNUses(2))))
2125 return createBinOpReverse(V1, V2);
2127 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2128 if (LHS->hasOneUse() && isSplatValue(RHS))
2129 return createBinOpReverse(V1, RHS);
2131 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2132 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
2133 return createBinOpReverse(LHS, V2);
2135 // It may not be safe to reorder shuffles and things like div, urem, etc.
2136 // because we may trap when executing those ops on unknown vector elements.
2137 // See PR20059.
2138 if (!isSafeToSpeculativelyExecuteWithVariableReplaced(&Inst))
2139 return nullptr;
2141 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
2142 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
2143 if (auto *BO = dyn_cast<BinaryOperator>(XY))
2144 BO->copyIRFlags(&Inst);
2145 return new ShuffleVectorInst(XY, M);
2148 // If both arguments of the binary operation are shuffles that use the same
2149 // mask and shuffle within a single vector, move the shuffle after the binop.
2150 if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) &&
2151 match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) &&
2152 V1->getType() == V2->getType() &&
2153 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2154 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
2155 return createBinOpShuffle(V1, V2, Mask);
2158 // If both arguments of a commutative binop are select-shuffles that use the
2159 // same mask with commuted operands, the shuffles are unnecessary.
2160 if (Inst.isCommutative() &&
2161 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
2162 match(RHS,
2163 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
2164 auto *LShuf = cast<ShuffleVectorInst>(LHS);
2165 auto *RShuf = cast<ShuffleVectorInst>(RHS);
2166 // TODO: Allow shuffles that contain undefs in the mask?
2167 // That is legal, but it reduces undef knowledge.
2168 // TODO: Allow arbitrary shuffles by shuffling after binop?
2169 // That might be legal, but we have to deal with poison.
2170 if (LShuf->isSelect() &&
2171 !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
2172 RShuf->isSelect() &&
2173 !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
2174 // Example:
2175 // LHS = shuffle V1, V2, <0, 5, 6, 3>
2176 // RHS = shuffle V2, V1, <0, 5, 6, 3>
2177 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
2178 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
2179 NewBO->copyIRFlags(&Inst);
2180 return NewBO;
2184 // If one argument is a shuffle within one vector and the other is a constant,
2185 // try moving the shuffle after the binary operation. This canonicalization
2186 // intends to move shuffles closer to other shuffles and binops closer to
2187 // other binops, so they can be folded. It may also enable demanded elements
2188 // transforms.
2189 Constant *C;
2190 auto *InstVTy = dyn_cast<FixedVectorType>(Inst.getType());
2191 if (InstVTy &&
2192 match(&Inst, m_c_BinOp(m_OneUse(m_Shuffle(m_Value(V1), m_Poison(),
2193 m_Mask(Mask))),
2194 m_ImmConstant(C))) &&
2195 cast<FixedVectorType>(V1->getType())->getNumElements() <=
2196 InstVTy->getNumElements()) {
2197 assert(InstVTy->getScalarType() == V1->getType()->getScalarType() &&
2198 "Shuffle should not change scalar type");
2200 // Find constant NewC that has property:
2201 // shuffle(NewC, ShMask) = C
2202 // If such constant does not exist (example: ShMask=<0,0> and C=<1,2>)
2203 // reorder is not possible. A 1-to-1 mapping is not required. Example:
2204 // ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <undef,5,6,undef>
2205 bool ConstOp1 = isa<Constant>(RHS);
2206 ArrayRef<int> ShMask = Mask;
2207 unsigned SrcVecNumElts =
2208 cast<FixedVectorType>(V1->getType())->getNumElements();
2209 PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType());
2210 SmallVector<Constant *, 16> NewVecC(SrcVecNumElts, PoisonScalar);
2211 bool MayChange = true;
2212 unsigned NumElts = InstVTy->getNumElements();
2213 for (unsigned I = 0; I < NumElts; ++I) {
2214 Constant *CElt = C->getAggregateElement(I);
2215 if (ShMask[I] >= 0) {
2216 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
2217 Constant *NewCElt = NewVecC[ShMask[I]];
2218 // Bail out if:
2219 // 1. The constant vector contains a constant expression.
2220 // 2. The shuffle needs an element of the constant vector that can't
2221 // be mapped to a new constant vector.
2222 // 3. This is a widening shuffle that copies elements of V1 into the
2223 // extended elements (extending with poison is allowed).
2224 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2225 I >= SrcVecNumElts) {
2226 MayChange = false;
2227 break;
2229 NewVecC[ShMask[I]] = CElt;
2231 // If this is a widening shuffle, we must be able to extend with poison
2232 // elements. If the original binop does not produce a poison in the high
2233 // lanes, then this transform is not safe.
2234 // Similarly for poison lanes due to the shuffle mask, we can only
2235 // transform binops that preserve poison.
2236 // TODO: We could shuffle those non-poison constant values into the
2237 // result by using a constant vector (rather than an poison vector)
2238 // as operand 1 of the new binop, but that might be too aggressive
2239 // for target-independent shuffle creation.
2240 if (I >= SrcVecNumElts || ShMask[I] < 0) {
2241 Constant *MaybePoison =
2242 ConstOp1
2243 ? ConstantFoldBinaryOpOperands(Opcode, PoisonScalar, CElt, DL)
2244 : ConstantFoldBinaryOpOperands(Opcode, CElt, PoisonScalar, DL);
2245 if (!MaybePoison || !isa<PoisonValue>(MaybePoison)) {
2246 MayChange = false;
2247 break;
2251 if (MayChange) {
2252 Constant *NewC = ConstantVector::get(NewVecC);
2253 // It may not be safe to execute a binop on a vector with poison elements
2254 // because the entire instruction can be folded to undef or create poison
2255 // that did not exist in the original code.
2256 // TODO: The shift case should not be necessary.
2257 if (Inst.isIntDivRem() || (Inst.isShift() && ConstOp1))
2258 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
2260 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
2261 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
2262 Value *NewLHS = ConstOp1 ? V1 : NewC;
2263 Value *NewRHS = ConstOp1 ? NewC : V1;
2264 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2268 // Try to reassociate to sink a splat shuffle after a binary operation.
2269 if (Inst.isAssociative() && Inst.isCommutative()) {
2270 // Canonicalize shuffle operand as LHS.
2271 if (isa<ShuffleVectorInst>(RHS))
2272 std::swap(LHS, RHS);
2274 Value *X;
2275 ArrayRef<int> MaskC;
2276 int SplatIndex;
2277 Value *Y, *OtherOp;
2278 if (!match(LHS,
2279 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
2280 !match(MaskC, m_SplatOrPoisonMask(SplatIndex)) ||
2281 X->getType() != Inst.getType() ||
2282 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
2283 return nullptr;
2285 // FIXME: This may not be safe if the analysis allows undef elements. By
2286 // moving 'Y' before the splat shuffle, we are implicitly assuming
2287 // that it is not undef/poison at the splat index.
2288 if (isSplatValue(OtherOp, SplatIndex)) {
2289 std::swap(Y, OtherOp);
2290 } else if (!isSplatValue(Y, SplatIndex)) {
2291 return nullptr;
2294 // X and Y are splatted values, so perform the binary operation on those
2295 // values followed by a splat followed by the 2nd binary operation:
2296 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
2297 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
2298 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
2299 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
2300 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
2302 // Intersect FMF on both new binops. Other (poison-generating) flags are
2303 // dropped to be safe.
2304 if (isa<FPMathOperator>(R)) {
2305 R->copyFastMathFlags(&Inst);
2306 R->andIRFlags(RHS);
2308 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2309 NewInstBO->copyIRFlags(R);
2310 return R;
2313 return nullptr;
2316 /// Try to narrow the width of a binop if at least 1 operand is an extend of
2317 /// of a value. This requires a potentially expensive known bits check to make
2318 /// sure the narrow op does not overflow.
2319 Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
2320 // We need at least one extended operand.
2321 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
2323 // If this is a sub, we swap the operands since we always want an extension
2324 // on the RHS. The LHS can be an extension or a constant.
2325 if (BO.getOpcode() == Instruction::Sub)
2326 std::swap(Op0, Op1);
2328 Value *X;
2329 bool IsSext = match(Op0, m_SExt(m_Value(X)));
2330 if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
2331 return nullptr;
2333 // If both operands are the same extension from the same source type and we
2334 // can eliminate at least one (hasOneUse), this might work.
2335 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
2336 Value *Y;
2337 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
2338 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2339 (Op0->hasOneUse() || Op1->hasOneUse()))) {
2340 // If that did not match, see if we have a suitable constant operand.
2341 // Truncating and extending must produce the same constant.
2342 Constant *WideC;
2343 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
2344 return nullptr;
2345 Constant *NarrowC = getLosslessTrunc(WideC, X->getType(), CastOpc);
2346 if (!NarrowC)
2347 return nullptr;
2348 Y = NarrowC;
2351 // Swap back now that we found our operands.
2352 if (BO.getOpcode() == Instruction::Sub)
2353 std::swap(X, Y);
2355 // Both operands have narrow versions. Last step: the math must not overflow
2356 // in the narrow width.
2357 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
2358 return nullptr;
2360 // bo (ext X), (ext Y) --> ext (bo X, Y)
2361 // bo (ext X), C --> ext (bo X, C')
2362 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
2363 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2364 if (IsSext)
2365 NewBinOp->setHasNoSignedWrap();
2366 else
2367 NewBinOp->setHasNoUnsignedWrap();
2369 return CastInst::Create(CastOpc, NarrowBO, BO.getType());
2372 /// Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y))
2373 /// transform.
2374 static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1,
2375 GEPOperator &GEP2) {
2376 return GEP1.getNoWrapFlags().intersectForOffsetAdd(GEP2.getNoWrapFlags());
2379 /// Thread a GEP operation with constant indices through the constant true/false
2380 /// arms of a select.
2381 static Instruction *foldSelectGEP(GetElementPtrInst &GEP,
2382 InstCombiner::BuilderTy &Builder) {
2383 if (!GEP.hasAllConstantIndices())
2384 return nullptr;
2386 Instruction *Sel;
2387 Value *Cond;
2388 Constant *TrueC, *FalseC;
2389 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
2390 !match(Sel,
2391 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
2392 return nullptr;
2394 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
2395 // Propagate 'inbounds' and metadata from existing instructions.
2396 // Note: using IRBuilder to create the constants for efficiency.
2397 SmallVector<Value *, 4> IndexC(GEP.indices());
2398 GEPNoWrapFlags NW = GEP.getNoWrapFlags();
2399 Type *Ty = GEP.getSourceElementType();
2400 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", NW);
2401 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", NW);
2402 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
2405 // Canonicalization:
2406 // gep T, (gep i8, base, C1), (Index + C2) into
2407 // gep T, (gep i8, base, C1 + C2 * sizeof(T)), Index
2408 static Instruction *canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP,
2409 GEPOperator *Src,
2410 InstCombinerImpl &IC) {
2411 if (GEP.getNumIndices() != 1)
2412 return nullptr;
2413 auto &DL = IC.getDataLayout();
2414 Value *Base;
2415 const APInt *C1;
2416 if (!match(Src, m_PtrAdd(m_Value(Base), m_APInt(C1))))
2417 return nullptr;
2418 Value *VarIndex;
2419 const APInt *C2;
2420 Type *PtrTy = Src->getType()->getScalarType();
2421 unsigned IndexSizeInBits = DL.getIndexTypeSizeInBits(PtrTy);
2422 if (!match(GEP.getOperand(1), m_AddLike(m_Value(VarIndex), m_APInt(C2))))
2423 return nullptr;
2424 if (C1->getBitWidth() != IndexSizeInBits ||
2425 C2->getBitWidth() != IndexSizeInBits)
2426 return nullptr;
2427 Type *BaseType = GEP.getSourceElementType();
2428 if (isa<ScalableVectorType>(BaseType))
2429 return nullptr;
2430 APInt TypeSize(IndexSizeInBits, DL.getTypeAllocSize(BaseType));
2431 APInt NewOffset = TypeSize * *C2 + *C1;
2432 if (NewOffset.isZero() ||
2433 (Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {
2434 Value *GEPConst =
2435 IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset));
2436 return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex);
2439 return nullptr;
2442 Instruction *InstCombinerImpl::visitGEPOfGEP(GetElementPtrInst &GEP,
2443 GEPOperator *Src) {
2444 // Combine Indices - If the source pointer to this getelementptr instruction
2445 // is a getelementptr instruction with matching element type, combine the
2446 // indices of the two getelementptr instructions into a single instruction.
2447 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
2448 return nullptr;
2450 if (auto *I = canonicalizeGEPOfConstGEPI8(GEP, Src, *this))
2451 return I;
2453 // For constant GEPs, use a more general offset-based folding approach.
2454 Type *PtrTy = Src->getType()->getScalarType();
2455 if (GEP.hasAllConstantIndices() &&
2456 (Src->hasOneUse() || Src->hasAllConstantIndices())) {
2457 // Split Src into a variable part and a constant suffix.
2458 gep_type_iterator GTI = gep_type_begin(*Src);
2459 Type *BaseType = GTI.getIndexedType();
2460 bool IsFirstType = true;
2461 unsigned NumVarIndices = 0;
2462 for (auto Pair : enumerate(Src->indices())) {
2463 if (!isa<ConstantInt>(Pair.value())) {
2464 BaseType = GTI.getIndexedType();
2465 IsFirstType = false;
2466 NumVarIndices = Pair.index() + 1;
2468 ++GTI;
2471 // Determine the offset for the constant suffix of Src.
2472 APInt Offset(DL.getIndexTypeSizeInBits(PtrTy), 0);
2473 if (NumVarIndices != Src->getNumIndices()) {
2474 // FIXME: getIndexedOffsetInType() does not handled scalable vectors.
2475 if (BaseType->isScalableTy())
2476 return nullptr;
2478 SmallVector<Value *> ConstantIndices;
2479 if (!IsFirstType)
2480 ConstantIndices.push_back(
2481 Constant::getNullValue(Type::getInt32Ty(GEP.getContext())));
2482 append_range(ConstantIndices, drop_begin(Src->indices(), NumVarIndices));
2483 Offset += DL.getIndexedOffsetInType(BaseType, ConstantIndices);
2486 // Add the offset for GEP (which is fully constant).
2487 if (!GEP.accumulateConstantOffset(DL, Offset))
2488 return nullptr;
2490 // Convert the total offset back into indices.
2491 SmallVector<APInt> ConstIndices =
2492 DL.getGEPIndicesForOffset(BaseType, Offset);
2493 if (!Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero()))
2494 return nullptr;
2496 GEPNoWrapFlags NW = getMergedGEPNoWrapFlags(*Src, *cast<GEPOperator>(&GEP));
2497 SmallVector<Value *> Indices;
2498 append_range(Indices, drop_end(Src->indices(),
2499 Src->getNumIndices() - NumVarIndices));
2500 for (const APInt &Idx : drop_begin(ConstIndices, !IsFirstType)) {
2501 Indices.push_back(ConstantInt::get(GEP.getContext(), Idx));
2502 // Even if the total offset is inbounds, we may end up representing it
2503 // by first performing a larger negative offset, and then a smaller
2504 // positive one. The large negative offset might go out of bounds. Only
2505 // preserve inbounds if all signs are the same.
2506 if (Idx.isNonNegative() != ConstIndices[0].isNonNegative())
2507 NW = NW.withoutNoUnsignedSignedWrap();
2508 if (!Idx.isNonNegative())
2509 NW = NW.withoutNoUnsignedWrap();
2512 return replaceInstUsesWith(
2513 GEP, Builder.CreateGEP(Src->getSourceElementType(), Src->getOperand(0),
2514 Indices, "", NW));
2517 if (Src->getResultElementType() != GEP.getSourceElementType())
2518 return nullptr;
2520 SmallVector<Value*, 8> Indices;
2522 // Find out whether the last index in the source GEP is a sequential idx.
2523 bool EndsWithSequential = false;
2524 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2525 I != E; ++I)
2526 EndsWithSequential = I.isSequential();
2528 // Can we combine the two pointer arithmetics offsets?
2529 if (EndsWithSequential) {
2530 // Replace: gep (gep %P, long B), long A, ...
2531 // With: T = long A+B; gep %P, T, ...
2532 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2533 Value *GO1 = GEP.getOperand(1);
2535 // If they aren't the same type, then the input hasn't been processed
2536 // by the loop above yet (which canonicalizes sequential index types to
2537 // intptr_t). Just avoid transforming this until the input has been
2538 // normalized.
2539 if (SO1->getType() != GO1->getType())
2540 return nullptr;
2542 Value *Sum =
2543 simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2544 // Only do the combine when we are sure the cost after the
2545 // merge is never more than that before the merge.
2546 if (Sum == nullptr)
2547 return nullptr;
2549 Indices.append(Src->op_begin()+1, Src->op_end()-1);
2550 Indices.push_back(Sum);
2551 Indices.append(GEP.op_begin()+2, GEP.op_end());
2552 } else if (isa<Constant>(*GEP.idx_begin()) &&
2553 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
2554 Src->getNumOperands() != 1) {
2555 // Otherwise we can do the fold if the first index of the GEP is a zero
2556 Indices.append(Src->op_begin()+1, Src->op_end());
2557 Indices.append(GEP.idx_begin()+1, GEP.idx_end());
2560 if (!Indices.empty())
2561 return replaceInstUsesWith(
2562 GEP, Builder.CreateGEP(
2563 Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2564 getMergedGEPNoWrapFlags(*Src, *cast<GEPOperator>(&GEP))));
2566 return nullptr;
2569 Value *InstCombiner::getFreelyInvertedImpl(Value *V, bool WillInvertAllUses,
2570 BuilderTy *Builder,
2571 bool &DoesConsume, unsigned Depth) {
2572 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
2573 // ~(~(X)) -> X.
2574 Value *A, *B;
2575 if (match(V, m_Not(m_Value(A)))) {
2576 DoesConsume = true;
2577 return A;
2580 Constant *C;
2581 // Constants can be considered to be not'ed values.
2582 if (match(V, m_ImmConstant(C)))
2583 return ConstantExpr::getNot(C);
2585 if (Depth++ >= MaxAnalysisRecursionDepth)
2586 return nullptr;
2588 // The rest of the cases require that we invert all uses so don't bother
2589 // doing the analysis if we know we can't use the result.
2590 if (!WillInvertAllUses)
2591 return nullptr;
2593 // Compares can be inverted if all of their uses are being modified to use
2594 // the ~V.
2595 if (auto *I = dyn_cast<CmpInst>(V)) {
2596 if (Builder != nullptr)
2597 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
2598 I->getOperand(1));
2599 return NonNull;
2602 // If `V` is of the form `A + B` then `-1 - V` can be folded into
2603 // `(-1 - B) - A` if we are willing to invert all of the uses.
2604 if (match(V, m_Add(m_Value(A), m_Value(B)))) {
2605 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2606 DoesConsume, Depth))
2607 return Builder ? Builder->CreateSub(BV, A) : NonNull;
2608 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2609 DoesConsume, Depth))
2610 return Builder ? Builder->CreateSub(AV, B) : NonNull;
2611 return nullptr;
2614 // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
2615 // into `A ^ B` if we are willing to invert all of the uses.
2616 if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
2617 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2618 DoesConsume, Depth))
2619 return Builder ? Builder->CreateXor(A, BV) : NonNull;
2620 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2621 DoesConsume, Depth))
2622 return Builder ? Builder->CreateXor(AV, B) : NonNull;
2623 return nullptr;
2626 // If `V` is of the form `B - A` then `-1 - V` can be folded into
2627 // `A + (-1 - B)` if we are willing to invert all of the uses.
2628 if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
2629 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2630 DoesConsume, Depth))
2631 return Builder ? Builder->CreateAdd(AV, B) : NonNull;
2632 return nullptr;
2635 // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
2636 // into `A s>> B` if we are willing to invert all of the uses.
2637 if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
2638 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2639 DoesConsume, Depth))
2640 return Builder ? Builder->CreateAShr(AV, B) : NonNull;
2641 return nullptr;
2644 Value *Cond;
2645 // LogicOps are special in that we canonicalize them at the cost of an
2646 // instruction.
2647 bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
2648 !shouldAvoidAbsorbingNotIntoSelect(*cast<SelectInst>(V));
2649 // Selects/min/max with invertible operands are freely invertible
2650 if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
2651 bool LocalDoesConsume = DoesConsume;
2652 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr,
2653 LocalDoesConsume, Depth))
2654 return nullptr;
2655 if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2656 LocalDoesConsume, Depth)) {
2657 DoesConsume = LocalDoesConsume;
2658 if (Builder != nullptr) {
2659 Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2660 DoesConsume, Depth);
2661 assert(NotB != nullptr &&
2662 "Unable to build inverted value for known freely invertable op");
2663 if (auto *II = dyn_cast<IntrinsicInst>(V))
2664 return Builder->CreateBinaryIntrinsic(
2665 getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
2666 return Builder->CreateSelect(Cond, NotA, NotB);
2668 return NonNull;
2672 if (PHINode *PN = dyn_cast<PHINode>(V)) {
2673 bool LocalDoesConsume = DoesConsume;
2674 SmallVector<std::pair<Value *, BasicBlock *>, 8> IncomingValues;
2675 for (Use &U : PN->operands()) {
2676 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
2677 Value *NewIncomingVal = getFreelyInvertedImpl(
2678 U.get(), /*WillInvertAllUses=*/false,
2679 /*Builder=*/nullptr, LocalDoesConsume, MaxAnalysisRecursionDepth - 1);
2680 if (NewIncomingVal == nullptr)
2681 return nullptr;
2682 // Make sure that we can safely erase the original PHI node.
2683 if (NewIncomingVal == V)
2684 return nullptr;
2685 if (Builder != nullptr)
2686 IncomingValues.emplace_back(NewIncomingVal, IncomingBlock);
2689 DoesConsume = LocalDoesConsume;
2690 if (Builder != nullptr) {
2691 IRBuilderBase::InsertPointGuard Guard(*Builder);
2692 Builder->SetInsertPoint(PN);
2693 PHINode *NewPN =
2694 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
2695 for (auto [Val, Pred] : IncomingValues)
2696 NewPN->addIncoming(Val, Pred);
2697 return NewPN;
2699 return NonNull;
2702 if (match(V, m_SExtLike(m_Value(A)))) {
2703 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2704 DoesConsume, Depth))
2705 return Builder ? Builder->CreateSExt(AV, V->getType()) : NonNull;
2706 return nullptr;
2709 if (match(V, m_Trunc(m_Value(A)))) {
2710 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2711 DoesConsume, Depth))
2712 return Builder ? Builder->CreateTrunc(AV, V->getType()) : NonNull;
2713 return nullptr;
2716 // De Morgan's Laws:
2717 // (~(A | B)) -> (~A & ~B)
2718 // (~(A & B)) -> (~A | ~B)
2719 auto TryInvertAndOrUsingDeMorgan = [&](Instruction::BinaryOps Opcode,
2720 bool IsLogical, Value *A,
2721 Value *B) -> Value * {
2722 bool LocalDoesConsume = DoesConsume;
2723 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder=*/nullptr,
2724 LocalDoesConsume, Depth))
2725 return nullptr;
2726 if (auto *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2727 LocalDoesConsume, Depth)) {
2728 auto *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2729 LocalDoesConsume, Depth);
2730 DoesConsume = LocalDoesConsume;
2731 if (IsLogical)
2732 return Builder ? Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
2733 return Builder ? Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
2736 return nullptr;
2739 if (match(V, m_Or(m_Value(A), m_Value(B))))
2740 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/false, A,
2743 if (match(V, m_And(m_Value(A), m_Value(B))))
2744 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/false, A,
2747 if (match(V, m_LogicalOr(m_Value(A), m_Value(B))))
2748 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/true, A,
2751 if (match(V, m_LogicalAnd(m_Value(A), m_Value(B))))
2752 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/true, A,
2755 return nullptr;
2758 /// Return true if we should canonicalize the gep to an i8 ptradd.
2759 static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP) {
2760 Value *PtrOp = GEP.getOperand(0);
2761 Type *GEPEltType = GEP.getSourceElementType();
2762 if (GEPEltType->isIntegerTy(8))
2763 return false;
2765 // Canonicalize scalable GEPs to an explicit offset using the llvm.vscale
2766 // intrinsic. This has better support in BasicAA.
2767 if (GEPEltType->isScalableTy())
2768 return true;
2770 // gep i32 p, mul(O, C) -> gep i8, p, mul(O, C*4) to fold the two multiplies
2771 // together.
2772 if (GEP.getNumIndices() == 1 &&
2773 match(GEP.getOperand(1),
2774 m_OneUse(m_CombineOr(m_Mul(m_Value(), m_ConstantInt()),
2775 m_Shl(m_Value(), m_ConstantInt())))))
2776 return true;
2778 // gep (gep %p, C1), %x, C2 is expanded so the two constants can
2779 // possibly be merged together.
2780 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
2781 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
2782 any_of(GEP.indices(), [](Value *V) {
2783 const APInt *C;
2784 return match(V, m_APInt(C)) && !C->isZero();
2788 static Instruction *foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN,
2789 IRBuilderBase &Builder) {
2790 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
2791 if (!Op1)
2792 return nullptr;
2794 // Don't fold a GEP into itself through a PHI node. This can only happen
2795 // through the back-edge of a loop. Folding a GEP into itself means that
2796 // the value of the previous iteration needs to be stored in the meantime,
2797 // thus requiring an additional register variable to be live, but not
2798 // actually achieving anything (the GEP still needs to be executed once per
2799 // loop iteration).
2800 if (Op1 == &GEP)
2801 return nullptr;
2802 GEPNoWrapFlags NW = Op1->getNoWrapFlags();
2804 int DI = -1;
2806 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
2807 auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
2808 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
2809 Op1->getSourceElementType() != Op2->getSourceElementType())
2810 return nullptr;
2812 // As for Op1 above, don't try to fold a GEP into itself.
2813 if (Op2 == &GEP)
2814 return nullptr;
2816 // Keep track of the type as we walk the GEP.
2817 Type *CurTy = nullptr;
2819 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
2820 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
2821 return nullptr;
2823 if (Op1->getOperand(J) != Op2->getOperand(J)) {
2824 if (DI == -1) {
2825 // We have not seen any differences yet in the GEPs feeding the
2826 // PHI yet, so we record this one if it is allowed to be a
2827 // variable.
2829 // The first two arguments can vary for any GEP, the rest have to be
2830 // static for struct slots
2831 if (J > 1) {
2832 assert(CurTy && "No current type?");
2833 if (CurTy->isStructTy())
2834 return nullptr;
2837 DI = J;
2838 } else {
2839 // The GEP is different by more than one input. While this could be
2840 // extended to support GEPs that vary by more than one variable it
2841 // doesn't make sense since it greatly increases the complexity and
2842 // would result in an R+R+R addressing mode which no backend
2843 // directly supports and would need to be broken into several
2844 // simpler instructions anyway.
2845 return nullptr;
2849 // Sink down a layer of the type for the next iteration.
2850 if (J > 0) {
2851 if (J == 1) {
2852 CurTy = Op1->getSourceElementType();
2853 } else {
2854 CurTy =
2855 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
2860 NW &= Op2->getNoWrapFlags();
2863 // If not all GEPs are identical we'll have to create a new PHI node.
2864 // Check that the old PHI node has only one use so that it will get
2865 // removed.
2866 if (DI != -1 && !PN->hasOneUse())
2867 return nullptr;
2869 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
2870 NewGEP->setNoWrapFlags(NW);
2872 if (DI == -1) {
2873 // All the GEPs feeding the PHI are identical. Clone one down into our
2874 // BB so that it can be merged with the current GEP.
2875 } else {
2876 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
2877 // into the current block so it can be merged, and create a new PHI to
2878 // set that index.
2879 PHINode *NewPN;
2881 IRBuilderBase::InsertPointGuard Guard(Builder);
2882 Builder.SetInsertPoint(PN);
2883 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
2884 PN->getNumOperands());
2887 for (auto &I : PN->operands())
2888 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
2889 PN->getIncomingBlock(I));
2891 NewGEP->setOperand(DI, NewPN);
2894 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
2895 return NewGEP;
2898 Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
2899 Value *PtrOp = GEP.getOperand(0);
2900 SmallVector<Value *, 8> Indices(GEP.indices());
2901 Type *GEPType = GEP.getType();
2902 Type *GEPEltType = GEP.getSourceElementType();
2903 if (Value *V =
2904 simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.getNoWrapFlags(),
2905 SQ.getWithInstruction(&GEP)))
2906 return replaceInstUsesWith(GEP, V);
2908 // For vector geps, use the generic demanded vector support.
2909 // Skip if GEP return type is scalable. The number of elements is unknown at
2910 // compile-time.
2911 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
2912 auto VWidth = GEPFVTy->getNumElements();
2913 APInt PoisonElts(VWidth, 0);
2914 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
2915 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
2916 PoisonElts)) {
2917 if (V != &GEP)
2918 return replaceInstUsesWith(GEP, V);
2919 return &GEP;
2922 // TODO: 1) Scalarize splat operands, 2) scalarize entire instruction if
2923 // possible (decide on canonical form for pointer broadcast), 3) exploit
2924 // undef elements to decrease demanded bits
2927 // Eliminate unneeded casts for indices, and replace indices which displace
2928 // by multiples of a zero size type with zero.
2929 bool MadeChange = false;
2931 // Index width may not be the same width as pointer width.
2932 // Data layout chooses the right type based on supported integer types.
2933 Type *NewScalarIndexTy =
2934 DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
2936 gep_type_iterator GTI = gep_type_begin(GEP);
2937 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
2938 ++I, ++GTI) {
2939 // Skip indices into struct types.
2940 if (GTI.isStruct())
2941 continue;
2943 Type *IndexTy = (*I)->getType();
2944 Type *NewIndexType =
2945 IndexTy->isVectorTy()
2946 ? VectorType::get(NewScalarIndexTy,
2947 cast<VectorType>(IndexTy)->getElementCount())
2948 : NewScalarIndexTy;
2950 // If the element type has zero size then any index over it is equivalent
2951 // to an index of zero, so replace it with zero if it is not zero already.
2952 Type *EltTy = GTI.getIndexedType();
2953 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
2954 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
2955 *I = Constant::getNullValue(NewIndexType);
2956 MadeChange = true;
2959 if (IndexTy != NewIndexType) {
2960 // If we are using a wider index than needed for this platform, shrink
2961 // it to what we need. If narrower, sign-extend it to what we need.
2962 // This explicit cast can make subsequent optimizations more obvious.
2963 *I = Builder.CreateIntCast(*I, NewIndexType, true);
2964 MadeChange = true;
2967 if (MadeChange)
2968 return &GEP;
2970 // Canonicalize constant GEPs to i8 type.
2971 if (!GEPEltType->isIntegerTy(8) && GEP.hasAllConstantIndices()) {
2972 APInt Offset(DL.getIndexTypeSizeInBits(GEPType), 0);
2973 if (GEP.accumulateConstantOffset(DL, Offset))
2974 return replaceInstUsesWith(
2975 GEP, Builder.CreatePtrAdd(PtrOp, Builder.getInt(Offset), "",
2976 GEP.getNoWrapFlags()));
2979 if (shouldCanonicalizeGEPToPtrAdd(GEP)) {
2980 Value *Offset = EmitGEPOffset(cast<GEPOperator>(&GEP));
2981 Value *NewGEP =
2982 Builder.CreatePtrAdd(PtrOp, Offset, "", GEP.getNoWrapFlags());
2983 return replaceInstUsesWith(GEP, NewGEP);
2986 // Check to see if the inputs to the PHI node are getelementptr instructions.
2987 if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
2988 if (Value *NewPtrOp = foldGEPOfPhi(GEP, PN, Builder))
2989 return replaceOperand(GEP, 0, NewPtrOp);
2992 if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
2993 if (Instruction *I = visitGEPOfGEP(GEP, Src))
2994 return I;
2996 if (GEP.getNumIndices() == 1) {
2997 unsigned AS = GEP.getPointerAddressSpace();
2998 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
2999 DL.getIndexSizeInBits(AS)) {
3000 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
3002 if (TyAllocSize == 1) {
3003 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y),
3004 // but only if the result pointer is only used as if it were an integer,
3005 // or both point to the same underlying object (otherwise provenance is
3006 // not necessarily retained).
3007 Value *X = GEP.getPointerOperand();
3008 Value *Y;
3009 if (match(GEP.getOperand(1),
3010 m_Sub(m_PtrToInt(m_Value(Y)), m_PtrToInt(m_Specific(X)))) &&
3011 GEPType == Y->getType()) {
3012 bool HasSameUnderlyingObject =
3013 getUnderlyingObject(X) == getUnderlyingObject(Y);
3014 bool Changed = false;
3015 GEP.replaceUsesWithIf(Y, [&](Use &U) {
3016 bool ShouldReplace = HasSameUnderlyingObject ||
3017 isa<ICmpInst>(U.getUser()) ||
3018 isa<PtrToIntInst>(U.getUser());
3019 Changed |= ShouldReplace;
3020 return ShouldReplace;
3022 return Changed ? &GEP : nullptr;
3024 } else if (auto *ExactIns =
3025 dyn_cast<PossiblyExactOperator>(GEP.getOperand(1))) {
3026 // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V)
3027 Value *V;
3028 if (ExactIns->isExact()) {
3029 if ((has_single_bit(TyAllocSize) &&
3030 match(GEP.getOperand(1),
3031 m_Shr(m_Value(V),
3032 m_SpecificInt(countr_zero(TyAllocSize))))) ||
3033 match(GEP.getOperand(1),
3034 m_IDiv(m_Value(V), m_SpecificInt(TyAllocSize)))) {
3035 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3036 GEP.getPointerOperand(), V,
3037 GEP.getNoWrapFlags());
3040 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3041 // Try to canonicalize non-i8 element type to i8 if the index is an
3042 // exact instruction. If the index is an exact instruction (div/shr)
3043 // with a constant RHS, we can fold the non-i8 element scale into the
3044 // div/shr (similiar to the mul case, just inverted).
3045 const APInt *C;
3046 std::optional<APInt> NewC;
3047 if (has_single_bit(TyAllocSize) &&
3048 match(ExactIns, m_Shr(m_Value(V), m_APInt(C))) &&
3049 C->uge(countr_zero(TyAllocSize)))
3050 NewC = *C - countr_zero(TyAllocSize);
3051 else if (match(ExactIns, m_UDiv(m_Value(V), m_APInt(C)))) {
3052 APInt Quot;
3053 uint64_t Rem;
3054 APInt::udivrem(*C, TyAllocSize, Quot, Rem);
3055 if (Rem == 0)
3056 NewC = Quot;
3057 } else if (match(ExactIns, m_SDiv(m_Value(V), m_APInt(C)))) {
3058 APInt Quot;
3059 int64_t Rem;
3060 APInt::sdivrem(*C, TyAllocSize, Quot, Rem);
3061 // For sdiv we need to make sure we arent creating INT_MIN / -1.
3062 if (!Quot.isAllOnes() && Rem == 0)
3063 NewC = Quot;
3066 if (NewC.has_value()) {
3067 Value *NewOp = Builder.CreateBinOp(
3068 static_cast<Instruction::BinaryOps>(ExactIns->getOpcode()), V,
3069 ConstantInt::get(V->getType(), *NewC));
3070 cast<BinaryOperator>(NewOp)->setIsExact();
3071 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3072 GEP.getPointerOperand(), NewOp,
3073 GEP.getNoWrapFlags());
3079 // We do not handle pointer-vector geps here.
3080 if (GEPType->isVectorTy())
3081 return nullptr;
3083 if (GEP.getNumIndices() == 1) {
3084 // We can only preserve inbounds if the original gep is inbounds, the add
3085 // is nsw, and the add operands are non-negative.
3086 auto CanPreserveInBounds = [&](bool AddIsNSW, Value *Idx1, Value *Idx2) {
3087 SimplifyQuery Q = SQ.getWithInstruction(&GEP);
3088 return GEP.isInBounds() && AddIsNSW && isKnownNonNegative(Idx1, Q) &&
3089 isKnownNonNegative(Idx2, Q);
3092 // Try to replace ADD + GEP with GEP + GEP.
3093 Value *Idx1, *Idx2;
3094 if (match(GEP.getOperand(1),
3095 m_OneUse(m_Add(m_Value(Idx1), m_Value(Idx2))))) {
3096 // %idx = add i64 %idx1, %idx2
3097 // %gep = getelementptr i32, ptr %ptr, i64 %idx
3098 // as:
3099 // %newptr = getelementptr i32, ptr %ptr, i64 %idx1
3100 // %newgep = getelementptr i32, ptr %newptr, i64 %idx2
3101 bool IsInBounds = CanPreserveInBounds(
3102 cast<OverflowingBinaryOperator>(GEP.getOperand(1))->hasNoSignedWrap(),
3103 Idx1, Idx2);
3104 auto *NewPtr =
3105 Builder.CreateGEP(GEP.getSourceElementType(), GEP.getPointerOperand(),
3106 Idx1, "", IsInBounds);
3107 return replaceInstUsesWith(
3108 GEP, Builder.CreateGEP(GEP.getSourceElementType(), NewPtr, Idx2, "",
3109 IsInBounds));
3111 ConstantInt *C;
3112 if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAdd(
3113 m_Value(Idx1), m_ConstantInt(C))))))) {
3114 // %add = add nsw i32 %idx1, idx2
3115 // %sidx = sext i32 %add to i64
3116 // %gep = getelementptr i32, ptr %ptr, i64 %sidx
3117 // as:
3118 // %newptr = getelementptr i32, ptr %ptr, i32 %idx1
3119 // %newgep = getelementptr i32, ptr %newptr, i32 idx2
3120 bool IsInBounds = CanPreserveInBounds(
3121 /*IsNSW=*/true, Idx1, C);
3122 auto *NewPtr = Builder.CreateGEP(
3123 GEP.getSourceElementType(), GEP.getPointerOperand(),
3124 Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType()), "",
3125 IsInBounds);
3126 return replaceInstUsesWith(
3127 GEP,
3128 Builder.CreateGEP(GEP.getSourceElementType(), NewPtr,
3129 Builder.CreateSExt(C, GEP.getOperand(1)->getType()),
3130 "", IsInBounds));
3134 if (!GEP.isInBounds()) {
3135 unsigned IdxWidth =
3136 DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace());
3137 APInt BasePtrOffset(IdxWidth, 0);
3138 Value *UnderlyingPtrOp =
3139 PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL,
3140 BasePtrOffset);
3141 bool CanBeNull, CanBeFreed;
3142 uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
3143 DL, CanBeNull, CanBeFreed);
3144 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3145 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
3146 BasePtrOffset.isNonNegative()) {
3147 APInt AllocSize(IdxWidth, DerefBytes);
3148 if (BasePtrOffset.ule(AllocSize)) {
3149 return GetElementPtrInst::CreateInBounds(
3150 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
3156 // nusw + nneg -> nuw
3157 if (GEP.hasNoUnsignedSignedWrap() && !GEP.hasNoUnsignedWrap() &&
3158 all_of(GEP.indices(), [&](Value *Idx) {
3159 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3160 })) {
3161 GEP.setNoWrapFlags(GEP.getNoWrapFlags() | GEPNoWrapFlags::noUnsignedWrap());
3162 return &GEP;
3165 if (Instruction *R = foldSelectGEP(GEP, Builder))
3166 return R;
3168 return nullptr;
3171 static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI,
3172 Instruction *AI) {
3173 if (isa<ConstantPointerNull>(V))
3174 return true;
3175 if (auto *LI = dyn_cast<LoadInst>(V))
3176 return isa<GlobalVariable>(LI->getPointerOperand());
3177 // Two distinct allocations will never be equal.
3178 return isAllocLikeFn(V, &TLI) && V != AI;
3181 /// Given a call CB which uses an address UsedV, return true if we can prove the
3182 /// call's only possible effect is storing to V.
3183 static bool isRemovableWrite(CallBase &CB, Value *UsedV,
3184 const TargetLibraryInfo &TLI) {
3185 if (!CB.use_empty())
3186 // TODO: add recursion if returned attribute is present
3187 return false;
3189 if (CB.isTerminator())
3190 // TODO: remove implementation restriction
3191 return false;
3193 if (!CB.willReturn() || !CB.doesNotThrow())
3194 return false;
3196 // If the only possible side effect of the call is writing to the alloca,
3197 // and the result isn't used, we can safely remove any reads implied by the
3198 // call including those which might read the alloca itself.
3199 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
3200 return Dest && Dest->Ptr == UsedV;
3203 static bool isAllocSiteRemovable(Instruction *AI,
3204 SmallVectorImpl<WeakTrackingVH> &Users,
3205 const TargetLibraryInfo &TLI) {
3206 SmallVector<Instruction*, 4> Worklist;
3207 const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
3208 Worklist.push_back(AI);
3210 do {
3211 Instruction *PI = Worklist.pop_back_val();
3212 for (User *U : PI->users()) {
3213 Instruction *I = cast<Instruction>(U);
3214 switch (I->getOpcode()) {
3215 default:
3216 // Give up the moment we see something we can't handle.
3217 return false;
3219 case Instruction::AddrSpaceCast:
3220 case Instruction::BitCast:
3221 case Instruction::GetElementPtr:
3222 Users.emplace_back(I);
3223 Worklist.push_back(I);
3224 continue;
3226 case Instruction::ICmp: {
3227 ICmpInst *ICI = cast<ICmpInst>(I);
3228 // We can fold eq/ne comparisons with null to false/true, respectively.
3229 // We also fold comparisons in some conditions provided the alloc has
3230 // not escaped (see isNeverEqualToUnescapedAlloc).
3231 if (!ICI->isEquality())
3232 return false;
3233 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
3234 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
3235 return false;
3237 // Do not fold compares to aligned_alloc calls, as they may have to
3238 // return null in case the required alignment cannot be satisfied,
3239 // unless we can prove that both alignment and size are valid.
3240 auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
3241 // Check if alignment and size of a call to aligned_alloc is valid,
3242 // that is alignment is a power-of-2 and the size is a multiple of the
3243 // alignment.
3244 const APInt *Alignment;
3245 const APInt *Size;
3246 return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
3247 match(CB->getArgOperand(1), m_APInt(Size)) &&
3248 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
3250 auto *CB = dyn_cast<CallBase>(AI);
3251 LibFunc TheLibFunc;
3252 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3253 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3254 !AlignmentAndSizeKnownValid(CB))
3255 return false;
3256 Users.emplace_back(I);
3257 continue;
3260 case Instruction::Call:
3261 // Ignore no-op and store intrinsics.
3262 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
3263 switch (II->getIntrinsicID()) {
3264 default:
3265 return false;
3267 case Intrinsic::memmove:
3268 case Intrinsic::memcpy:
3269 case Intrinsic::memset: {
3270 MemIntrinsic *MI = cast<MemIntrinsic>(II);
3271 if (MI->isVolatile() || MI->getRawDest() != PI)
3272 return false;
3273 [[fallthrough]];
3275 case Intrinsic::assume:
3276 case Intrinsic::invariant_start:
3277 case Intrinsic::invariant_end:
3278 case Intrinsic::lifetime_start:
3279 case Intrinsic::lifetime_end:
3280 case Intrinsic::objectsize:
3281 Users.emplace_back(I);
3282 continue;
3283 case Intrinsic::launder_invariant_group:
3284 case Intrinsic::strip_invariant_group:
3285 Users.emplace_back(I);
3286 Worklist.push_back(I);
3287 continue;
3291 if (isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
3292 Users.emplace_back(I);
3293 continue;
3296 if (getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
3297 getAllocationFamily(I, &TLI) == Family) {
3298 assert(Family);
3299 Users.emplace_back(I);
3300 continue;
3303 if (getReallocatedOperand(cast<CallBase>(I)) == PI &&
3304 getAllocationFamily(I, &TLI) == Family) {
3305 assert(Family);
3306 Users.emplace_back(I);
3307 Worklist.push_back(I);
3308 continue;
3311 return false;
3313 case Instruction::Store: {
3314 StoreInst *SI = cast<StoreInst>(I);
3315 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3316 return false;
3317 Users.emplace_back(I);
3318 continue;
3321 llvm_unreachable("missing a return?");
3323 } while (!Worklist.empty());
3324 return true;
3327 Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) {
3328 assert(isa<AllocaInst>(MI) || isRemovableAlloc(&cast<CallBase>(MI), &TLI));
3330 // If we have a malloc call which is only used in any amount of comparisons to
3331 // null and free calls, delete the calls and replace the comparisons with true
3332 // or false as appropriate.
3334 // This is based on the principle that we can substitute our own allocation
3335 // function (which will never return null) rather than knowledge of the
3336 // specific function being called. In some sense this can change the permitted
3337 // outputs of a program (when we convert a malloc to an alloca, the fact that
3338 // the allocation is now on the stack is potentially visible, for example),
3339 // but we believe in a permissible manner.
3340 SmallVector<WeakTrackingVH, 64> Users;
3342 // If we are removing an alloca with a dbg.declare, insert dbg.value calls
3343 // before each store.
3344 SmallVector<DbgVariableIntrinsic *, 8> DVIs;
3345 SmallVector<DbgVariableRecord *, 8> DVRs;
3346 std::unique_ptr<DIBuilder> DIB;
3347 if (isa<AllocaInst>(MI)) {
3348 findDbgUsers(DVIs, &MI, &DVRs);
3349 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
3352 if (isAllocSiteRemovable(&MI, Users, TLI)) {
3353 for (unsigned i = 0, e = Users.size(); i != e; ++i) {
3354 // Lowering all @llvm.objectsize calls first because they may
3355 // use a bitcast/GEP of the alloca we are removing.
3356 if (!Users[i])
3357 continue;
3359 Instruction *I = cast<Instruction>(&*Users[i]);
3361 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
3362 if (II->getIntrinsicID() == Intrinsic::objectsize) {
3363 SmallVector<Instruction *> InsertedInstructions;
3364 Value *Result = lowerObjectSizeCall(
3365 II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
3366 for (Instruction *Inserted : InsertedInstructions)
3367 Worklist.add(Inserted);
3368 replaceInstUsesWith(*I, Result);
3369 eraseInstFromFunction(*I);
3370 Users[i] = nullptr; // Skip examining in the next loop.
3374 for (unsigned i = 0, e = Users.size(); i != e; ++i) {
3375 if (!Users[i])
3376 continue;
3378 Instruction *I = cast<Instruction>(&*Users[i]);
3380 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
3381 replaceInstUsesWith(*C,
3382 ConstantInt::get(Type::getInt1Ty(C->getContext()),
3383 C->isFalseWhenEqual()));
3384 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3385 for (auto *DVI : DVIs)
3386 if (DVI->isAddressOfVariable())
3387 ConvertDebugDeclareToDebugValue(DVI, SI, *DIB);
3388 for (auto *DVR : DVRs)
3389 if (DVR->isAddressOfVariable())
3390 ConvertDebugDeclareToDebugValue(DVR, SI, *DIB);
3391 } else {
3392 // Casts, GEP, or anything else: we're about to delete this instruction,
3393 // so it can not have any valid uses.
3394 replaceInstUsesWith(*I, PoisonValue::get(I->getType()));
3396 eraseInstFromFunction(*I);
3399 if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
3400 // Replace invoke with a NOP intrinsic to maintain the original CFG
3401 Module *M = II->getModule();
3402 Function *F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::donothing);
3403 InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(), {}, "",
3404 II->getParent());
3407 // Remove debug intrinsics which describe the value contained within the
3408 // alloca. In addition to removing dbg.{declare,addr} which simply point to
3409 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
3411 // ```
3412 // define void @foo(i32 %0) {
3413 // %a = alloca i32 ; Deleted.
3414 // store i32 %0, i32* %a
3415 // dbg.value(i32 %0, "arg0") ; Not deleted.
3416 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted.
3417 // call void @trivially_inlinable_no_op(i32* %a)
3418 // ret void
3419 // }
3420 // ```
3422 // This may not be required if we stop describing the contents of allocas
3423 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
3424 // the LowerDbgDeclare utility.
3426 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
3427 // "arg0" dbg.value may be stale after the call. However, failing to remove
3428 // the DW_OP_deref dbg.value causes large gaps in location coverage.
3430 // FIXME: the Assignment Tracking project has now likely made this
3431 // redundant (and it's sometimes harmful).
3432 for (auto *DVI : DVIs)
3433 if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())
3434 DVI->eraseFromParent();
3435 for (auto *DVR : DVRs)
3436 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3437 DVR->eraseFromParent();
3439 return eraseInstFromFunction(MI);
3441 return nullptr;
3444 /// Move the call to free before a NULL test.
3446 /// Check if this free is accessed after its argument has been test
3447 /// against NULL (property 0).
3448 /// If yes, it is legal to move this call in its predecessor block.
3450 /// The move is performed only if the block containing the call to free
3451 /// will be removed, i.e.:
3452 /// 1. it has only one predecessor P, and P has two successors
3453 /// 2. it contains the call, noops, and an unconditional branch
3454 /// 3. its successor is the same as its predecessor's successor
3456 /// The profitability is out-of concern here and this function should
3457 /// be called only if the caller knows this transformation would be
3458 /// profitable (e.g., for code size).
3459 static Instruction *tryToMoveFreeBeforeNullTest(CallInst &FI,
3460 const DataLayout &DL) {
3461 Value *Op = FI.getArgOperand(0);
3462 BasicBlock *FreeInstrBB = FI.getParent();
3463 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
3465 // Validate part of constraint #1: Only one predecessor
3466 // FIXME: We can extend the number of predecessor, but in that case, we
3467 // would duplicate the call to free in each predecessor and it may
3468 // not be profitable even for code size.
3469 if (!PredBB)
3470 return nullptr;
3472 // Validate constraint #2: Does this block contains only the call to
3473 // free, noops, and an unconditional branch?
3474 BasicBlock *SuccBB;
3475 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
3476 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
3477 return nullptr;
3479 // If there are only 2 instructions in the block, at this point,
3480 // this is the call to free and unconditional.
3481 // If there are more than 2 instructions, check that they are noops
3482 // i.e., they won't hurt the performance of the generated code.
3483 if (FreeInstrBB->size() != 2) {
3484 for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
3485 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3486 continue;
3487 auto *Cast = dyn_cast<CastInst>(&Inst);
3488 if (!Cast || !Cast->isNoopCast(DL))
3489 return nullptr;
3492 // Validate the rest of constraint #1 by matching on the pred branch.
3493 Instruction *TI = PredBB->getTerminator();
3494 BasicBlock *TrueBB, *FalseBB;
3495 CmpPredicate Pred;
3496 if (!match(TI, m_Br(m_ICmp(Pred,
3497 m_CombineOr(m_Specific(Op),
3498 m_Specific(Op->stripPointerCasts())),
3499 m_Zero()),
3500 TrueBB, FalseBB)))
3501 return nullptr;
3502 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
3503 return nullptr;
3505 // Validate constraint #3: Ensure the null case just falls through.
3506 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
3507 return nullptr;
3508 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
3509 "Broken CFG: missing edge from predecessor to successor");
3511 // At this point, we know that everything in FreeInstrBB can be moved
3512 // before TI.
3513 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
3514 if (&Instr == FreeInstrBBTerminator)
3515 break;
3516 Instr.moveBeforePreserving(TI);
3518 assert(FreeInstrBB->size() == 1 &&
3519 "Only the branch instruction should remain");
3521 // Now that we've moved the call to free before the NULL check, we have to
3522 // remove any attributes on its parameter that imply it's non-null, because
3523 // those attributes might have only been valid because of the NULL check, and
3524 // we can get miscompiles if we keep them. This is conservative if non-null is
3525 // also implied by something other than the NULL check, but it's guaranteed to
3526 // be correct, and the conservativeness won't matter in practice, since the
3527 // attributes are irrelevant for the call to free itself and the pointer
3528 // shouldn't be used after the call.
3529 AttributeList Attrs = FI.getAttributes();
3530 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
3531 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
3532 if (Dereferenceable.isValid()) {
3533 uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
3534 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
3535 Attribute::Dereferenceable);
3536 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
3538 FI.setAttributes(Attrs);
3540 return &FI;
3543 Instruction *InstCombinerImpl::visitFree(CallInst &FI, Value *Op) {
3544 // free undef -> unreachable.
3545 if (isa<UndefValue>(Op)) {
3546 // Leave a marker since we can't modify the CFG here.
3547 CreateNonTerminatorUnreachable(&FI);
3548 return eraseInstFromFunction(FI);
3551 // If we have 'free null' delete the instruction. This can happen in stl code
3552 // when lots of inlining happens.
3553 if (isa<ConstantPointerNull>(Op))
3554 return eraseInstFromFunction(FI);
3556 // If we had free(realloc(...)) with no intervening uses, then eliminate the
3557 // realloc() entirely.
3558 CallInst *CI = dyn_cast<CallInst>(Op);
3559 if (CI && CI->hasOneUse())
3560 if (Value *ReallocatedOp = getReallocatedOperand(CI))
3561 return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
3563 // If we optimize for code size, try to move the call to free before the null
3564 // test so that simplify cfg can remove the empty block and dead code
3565 // elimination the branch. I.e., helps to turn something like:
3566 // if (foo) free(foo);
3567 // into
3568 // free(foo);
3570 // Note that we can only do this for 'free' and not for any flavor of
3571 // 'operator delete'; there is no 'operator delete' symbol for which we are
3572 // permitted to invent a call, even if we're passing in a null pointer.
3573 if (MinimizeSize) {
3574 LibFunc Func;
3575 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
3576 if (Instruction *I = tryToMoveFreeBeforeNullTest(FI, DL))
3577 return I;
3580 return nullptr;
3583 Instruction *InstCombinerImpl::visitReturnInst(ReturnInst &RI) {
3584 Value *RetVal = RI.getReturnValue();
3585 if (!RetVal || !AttributeFuncs::isNoFPClassCompatibleType(RetVal->getType()))
3586 return nullptr;
3588 Function *F = RI.getFunction();
3589 FPClassTest ReturnClass = F->getAttributes().getRetNoFPClass();
3590 if (ReturnClass == fcNone)
3591 return nullptr;
3593 KnownFPClass KnownClass;
3594 Value *Simplified =
3595 SimplifyDemandedUseFPClass(RetVal, ~ReturnClass, KnownClass, 0, &RI);
3596 if (!Simplified)
3597 return nullptr;
3599 return ReturnInst::Create(RI.getContext(), Simplified);
3602 // WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
3603 bool InstCombinerImpl::removeInstructionsBeforeUnreachable(Instruction &I) {
3604 // Try to remove the previous instruction if it must lead to unreachable.
3605 // This includes instructions like stores and "llvm.assume" that may not get
3606 // removed by simple dead code elimination.
3607 bool Changed = false;
3608 while (Instruction *Prev = I.getPrevNonDebugInstruction()) {
3609 // While we theoretically can erase EH, that would result in a block that
3610 // used to start with an EH no longer starting with EH, which is invalid.
3611 // To make it valid, we'd need to fixup predecessors to no longer refer to
3612 // this block, but that changes CFG, which is not allowed in InstCombine.
3613 if (Prev->isEHPad())
3614 break; // Can not drop any more instructions. We're done here.
3616 if (!isGuaranteedToTransferExecutionToSuccessor(Prev))
3617 break; // Can not drop any more instructions. We're done here.
3618 // Otherwise, this instruction can be freely erased,
3619 // even if it is not side-effect free.
3621 // A value may still have uses before we process it here (for example, in
3622 // another unreachable block), so convert those to poison.
3623 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
3624 eraseInstFromFunction(*Prev);
3625 Changed = true;
3627 return Changed;
3630 Instruction *InstCombinerImpl::visitUnreachableInst(UnreachableInst &I) {
3631 removeInstructionsBeforeUnreachable(I);
3632 return nullptr;
3635 Instruction *InstCombinerImpl::visitUnconditionalBranchInst(BranchInst &BI) {
3636 assert(BI.isUnconditional() && "Only for unconditional branches.");
3638 // If this store is the second-to-last instruction in the basic block
3639 // (excluding debug info and bitcasts of pointers) and if the block ends with
3640 // an unconditional branch, try to move the store to the successor block.
3642 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
3643 auto IsNoopInstrForStoreMerging = [](BasicBlock::iterator BBI) {
3644 return BBI->isDebugOrPseudoInst() ||
3645 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy());
3648 BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
3649 do {
3650 if (BBI != FirstInstr)
3651 --BBI;
3652 } while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));
3654 return dyn_cast<StoreInst>(BBI);
3657 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
3658 if (mergeStoreIntoSuccessor(*SI))
3659 return &BI;
3661 return nullptr;
3664 void InstCombinerImpl::addDeadEdge(BasicBlock *From, BasicBlock *To,
3665 SmallVectorImpl<BasicBlock *> &Worklist) {
3666 if (!DeadEdges.insert({From, To}).second)
3667 return;
3669 // Replace phi node operands in successor with poison.
3670 for (PHINode &PN : To->phis())
3671 for (Use &U : PN.incoming_values())
3672 if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) {
3673 replaceUse(U, PoisonValue::get(PN.getType()));
3674 addToWorklist(&PN);
3675 MadeIRChange = true;
3678 Worklist.push_back(To);
3681 // Under the assumption that I is unreachable, remove it and following
3682 // instructions. Changes are reported directly to MadeIRChange.
3683 void InstCombinerImpl::handleUnreachableFrom(
3684 Instruction *I, SmallVectorImpl<BasicBlock *> &Worklist) {
3685 BasicBlock *BB = I->getParent();
3686 for (Instruction &Inst : make_early_inc_range(
3687 make_range(std::next(BB->getTerminator()->getReverseIterator()),
3688 std::next(I->getReverseIterator())))) {
3689 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
3690 replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
3691 MadeIRChange = true;
3693 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
3694 continue;
3695 // RemoveDIs: erase debug-info on this instruction manually.
3696 Inst.dropDbgRecords();
3697 eraseInstFromFunction(Inst);
3698 MadeIRChange = true;
3701 SmallVector<Value *> Changed;
3702 if (handleUnreachableTerminator(BB->getTerminator(), Changed)) {
3703 MadeIRChange = true;
3704 for (Value *V : Changed)
3705 addToWorklist(cast<Instruction>(V));
3708 // Handle potentially dead successors.
3709 for (BasicBlock *Succ : successors(BB))
3710 addDeadEdge(BB, Succ, Worklist);
3713 void InstCombinerImpl::handlePotentiallyDeadBlocks(
3714 SmallVectorImpl<BasicBlock *> &Worklist) {
3715 while (!Worklist.empty()) {
3716 BasicBlock *BB = Worklist.pop_back_val();
3717 if (!all_of(predecessors(BB), [&](BasicBlock *Pred) {
3718 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
3720 continue;
3722 handleUnreachableFrom(&BB->front(), Worklist);
3726 void InstCombinerImpl::handlePotentiallyDeadSuccessors(BasicBlock *BB,
3727 BasicBlock *LiveSucc) {
3728 SmallVector<BasicBlock *> Worklist;
3729 for (BasicBlock *Succ : successors(BB)) {
3730 // The live successor isn't dead.
3731 if (Succ == LiveSucc)
3732 continue;
3734 addDeadEdge(BB, Succ, Worklist);
3737 handlePotentiallyDeadBlocks(Worklist);
3740 Instruction *InstCombinerImpl::visitBranchInst(BranchInst &BI) {
3741 if (BI.isUnconditional())
3742 return visitUnconditionalBranchInst(BI);
3744 // Change br (not X), label True, label False to: br X, label False, True
3745 Value *Cond = BI.getCondition();
3746 Value *X;
3747 if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
3748 // Swap Destinations and condition...
3749 BI.swapSuccessors();
3750 if (BPI)
3751 BPI->swapSuccEdgesProbabilities(BI.getParent());
3752 return replaceOperand(BI, 0, X);
3755 // Canonicalize logical-and-with-invert as logical-or-with-invert.
3756 // This is done by inverting the condition and swapping successors:
3757 // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
3758 Value *Y;
3759 if (isa<SelectInst>(Cond) &&
3760 match(Cond,
3761 m_OneUse(m_LogicalAnd(m_Value(X), m_OneUse(m_Not(m_Value(Y))))))) {
3762 Value *NotX = Builder.CreateNot(X, "not." + X->getName());
3763 Value *Or = Builder.CreateLogicalOr(NotX, Y);
3764 BI.swapSuccessors();
3765 if (BPI)
3766 BPI->swapSuccEdgesProbabilities(BI.getParent());
3767 return replaceOperand(BI, 0, Or);
3770 // If the condition is irrelevant, remove the use so that other
3771 // transforms on the condition become more effective.
3772 if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
3773 return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
3775 // Canonicalize, for example, fcmp_one -> fcmp_oeq.
3776 CmpPredicate Pred;
3777 if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
3778 !isCanonicalPredicate(Pred)) {
3779 // Swap destinations and condition.
3780 auto *Cmp = cast<CmpInst>(Cond);
3781 Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
3782 BI.swapSuccessors();
3783 if (BPI)
3784 BPI->swapSuccEdgesProbabilities(BI.getParent());
3785 Worklist.push(Cmp);
3786 return &BI;
3789 if (isa<UndefValue>(Cond)) {
3790 handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr);
3791 return nullptr;
3793 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
3794 handlePotentiallyDeadSuccessors(BI.getParent(),
3795 BI.getSuccessor(!CI->getZExtValue()));
3796 return nullptr;
3799 // Replace all dominated uses of the condition with true/false
3800 // Ignore constant expressions to avoid iterating over uses on other
3801 // functions.
3802 if (!isa<Constant>(Cond) && BI.getSuccessor(0) != BI.getSuccessor(1)) {
3803 for (auto &U : make_early_inc_range(Cond->uses())) {
3804 BasicBlockEdge Edge0(BI.getParent(), BI.getSuccessor(0));
3805 if (DT.dominates(Edge0, U)) {
3806 replaceUse(U, ConstantInt::getTrue(Cond->getType()));
3807 addToWorklist(cast<Instruction>(U.getUser()));
3808 continue;
3810 BasicBlockEdge Edge1(BI.getParent(), BI.getSuccessor(1));
3811 if (DT.dominates(Edge1, U)) {
3812 replaceUse(U, ConstantInt::getFalse(Cond->getType()));
3813 addToWorklist(cast<Instruction>(U.getUser()));
3818 DC.registerBranch(&BI);
3819 return nullptr;
3822 // Replaces (switch (select cond, X, C)/(select cond, C, X)) with (switch X) if
3823 // we can prove that both (switch C) and (switch X) go to the default when cond
3824 // is false/true.
3825 static Value *simplifySwitchOnSelectUsingRanges(SwitchInst &SI,
3826 SelectInst *Select,
3827 bool IsTrueArm) {
3828 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
3829 auto *C = dyn_cast<ConstantInt>(Select->getOperand(CstOpIdx));
3830 if (!C)
3831 return nullptr;
3833 BasicBlock *CstBB = SI.findCaseValue(C)->getCaseSuccessor();
3834 if (CstBB != SI.getDefaultDest())
3835 return nullptr;
3836 Value *X = Select->getOperand(3 - CstOpIdx);
3837 CmpPredicate Pred;
3838 const APInt *RHSC;
3839 if (!match(Select->getCondition(),
3840 m_ICmp(Pred, m_Specific(X), m_APInt(RHSC))))
3841 return nullptr;
3842 if (IsTrueArm)
3843 Pred = ICmpInst::getInversePredicate(Pred);
3845 // See whether we can replace the select with X
3846 ConstantRange CR = ConstantRange::makeExactICmpRegion(Pred, *RHSC);
3847 for (auto Case : SI.cases())
3848 if (!CR.contains(Case.getCaseValue()->getValue()))
3849 return nullptr;
3851 return X;
3854 Instruction *InstCombinerImpl::visitSwitchInst(SwitchInst &SI) {
3855 Value *Cond = SI.getCondition();
3856 Value *Op0;
3857 ConstantInt *AddRHS;
3858 if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
3859 // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
3860 for (auto Case : SI.cases()) {
3861 Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS);
3862 assert(isa<ConstantInt>(NewCase) &&
3863 "Result of expression should be constant");
3864 Case.setValue(cast<ConstantInt>(NewCase));
3866 return replaceOperand(SI, 0, Op0);
3869 ConstantInt *SubLHS;
3870 if (match(Cond, m_Sub(m_ConstantInt(SubLHS), m_Value(Op0)))) {
3871 // Change 'switch (1-X) case 1:' into 'switch (X) case 0'.
3872 for (auto Case : SI.cases()) {
3873 Constant *NewCase = ConstantExpr::getSub(SubLHS, Case.getCaseValue());
3874 assert(isa<ConstantInt>(NewCase) &&
3875 "Result of expression should be constant");
3876 Case.setValue(cast<ConstantInt>(NewCase));
3878 return replaceOperand(SI, 0, Op0);
3881 uint64_t ShiftAmt;
3882 if (match(Cond, m_Shl(m_Value(Op0), m_ConstantInt(ShiftAmt))) &&
3883 ShiftAmt < Op0->getType()->getScalarSizeInBits() &&
3884 all_of(SI.cases(), [&](const auto &Case) {
3885 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
3886 })) {
3887 // Change 'switch (X << 2) case 4:' into 'switch (X) case 1:'.
3888 OverflowingBinaryOperator *Shl = cast<OverflowingBinaryOperator>(Cond);
3889 if (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap() ||
3890 Shl->hasOneUse()) {
3891 Value *NewCond = Op0;
3892 if (!Shl->hasNoUnsignedWrap() && !Shl->hasNoSignedWrap()) {
3893 // If the shift may wrap, we need to mask off the shifted bits.
3894 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
3895 NewCond = Builder.CreateAnd(
3896 Op0, APInt::getLowBitsSet(BitWidth, BitWidth - ShiftAmt));
3898 for (auto Case : SI.cases()) {
3899 const APInt &CaseVal = Case.getCaseValue()->getValue();
3900 APInt ShiftedCase = Shl->hasNoSignedWrap() ? CaseVal.ashr(ShiftAmt)
3901 : CaseVal.lshr(ShiftAmt);
3902 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
3904 return replaceOperand(SI, 0, NewCond);
3908 // Fold switch(zext/sext(X)) into switch(X) if possible.
3909 if (match(Cond, m_ZExtOrSExt(m_Value(Op0)))) {
3910 bool IsZExt = isa<ZExtInst>(Cond);
3911 Type *SrcTy = Op0->getType();
3912 unsigned NewWidth = SrcTy->getScalarSizeInBits();
3914 if (all_of(SI.cases(), [&](const auto &Case) {
3915 const APInt &CaseVal = Case.getCaseValue()->getValue();
3916 return IsZExt ? CaseVal.isIntN(NewWidth)
3917 : CaseVal.isSignedIntN(NewWidth);
3918 })) {
3919 for (auto &Case : SI.cases()) {
3920 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
3921 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3923 return replaceOperand(SI, 0, Op0);
3927 // Fold switch(select cond, X, Y) into switch(X/Y) if possible
3928 if (auto *Select = dyn_cast<SelectInst>(Cond)) {
3929 if (Value *V =
3930 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/true))
3931 return replaceOperand(SI, 0, V);
3932 if (Value *V =
3933 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/false))
3934 return replaceOperand(SI, 0, V);
3937 KnownBits Known = computeKnownBits(Cond, 0, &SI);
3938 unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
3939 unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
3941 // Compute the number of leading bits we can ignore.
3942 // TODO: A better way to determine this would use ComputeNumSignBits().
3943 for (const auto &C : SI.cases()) {
3944 LeadingKnownZeros =
3945 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
3946 LeadingKnownOnes =
3947 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
3950 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
3952 // Shrink the condition operand if the new type is smaller than the old type.
3953 // But do not shrink to a non-standard type, because backend can't generate
3954 // good code for that yet.
3955 // TODO: We can make it aggressive again after fixing PR39569.
3956 if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
3957 shouldChangeType(Known.getBitWidth(), NewWidth)) {
3958 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
3959 Builder.SetInsertPoint(&SI);
3960 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
3962 for (auto Case : SI.cases()) {
3963 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
3964 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3966 return replaceOperand(SI, 0, NewCond);
3969 if (isa<UndefValue>(Cond)) {
3970 handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr);
3971 return nullptr;
3973 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
3974 handlePotentiallyDeadSuccessors(SI.getParent(),
3975 SI.findCaseValue(CI)->getCaseSuccessor());
3976 return nullptr;
3979 return nullptr;
3982 Instruction *
3983 InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
3984 auto *WO = dyn_cast<WithOverflowInst>(EV.getAggregateOperand());
3985 if (!WO)
3986 return nullptr;
3988 Intrinsic::ID OvID = WO->getIntrinsicID();
3989 const APInt *C = nullptr;
3990 if (match(WO->getRHS(), m_APIntAllowPoison(C))) {
3991 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
3992 OvID == Intrinsic::umul_with_overflow)) {
3993 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
3994 if (C->isAllOnes())
3995 return BinaryOperator::CreateNeg(WO->getLHS());
3996 // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
3997 if (C->isPowerOf2()) {
3998 return BinaryOperator::CreateShl(
3999 WO->getLHS(),
4000 ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
4005 // We're extracting from an overflow intrinsic. See if we're the only user.
4006 // That allows us to simplify multiple result intrinsics to simpler things
4007 // that just get one value.
4008 if (!WO->hasOneUse())
4009 return nullptr;
4011 // Check if we're grabbing only the result of a 'with overflow' intrinsic
4012 // and replace it with a traditional binary instruction.
4013 if (*EV.idx_begin() == 0) {
4014 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4015 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
4016 // Replace the old instruction's uses with poison.
4017 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
4018 eraseInstFromFunction(*WO);
4019 return BinaryOperator::Create(BinOp, LHS, RHS);
4022 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
4024 // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
4025 if (OvID == Intrinsic::usub_with_overflow)
4026 return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
4028 // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
4029 // +1 is not possible because we assume signed values.
4030 if (OvID == Intrinsic::smul_with_overflow &&
4031 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4032 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4034 // extractvalue (umul_with_overflow X, X), 1 -> X u> 2^(N/2)-1
4035 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4036 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4037 // Only handle even bitwidths for performance reasons.
4038 if (BitWidth % 2 == 0)
4039 return new ICmpInst(
4040 ICmpInst::ICMP_UGT, WO->getLHS(),
4041 ConstantInt::get(WO->getLHS()->getType(),
4042 APInt::getLowBitsSet(BitWidth, BitWidth / 2)));
4045 // If only the overflow result is used, and the right hand side is a
4046 // constant (or constant splat), we can remove the intrinsic by directly
4047 // checking for overflow.
4048 if (C) {
4049 // Compute the no-wrap range for LHS given RHS=C, then construct an
4050 // equivalent icmp, potentially using an offset.
4051 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
4052 WO->getBinaryOp(), *C, WO->getNoWrapKind());
4054 CmpInst::Predicate Pred;
4055 APInt NewRHSC, Offset;
4056 NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
4057 auto *OpTy = WO->getRHS()->getType();
4058 auto *NewLHS = WO->getLHS();
4059 if (Offset != 0)
4060 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
4061 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
4062 ConstantInt::get(OpTy, NewRHSC));
4065 return nullptr;
4068 Instruction *InstCombinerImpl::visitExtractValueInst(ExtractValueInst &EV) {
4069 Value *Agg = EV.getAggregateOperand();
4071 if (!EV.hasIndices())
4072 return replaceInstUsesWith(EV, Agg);
4074 if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
4075 SQ.getWithInstruction(&EV)))
4076 return replaceInstUsesWith(EV, V);
4078 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
4079 // We're extracting from an insertvalue instruction, compare the indices
4080 const unsigned *exti, *exte, *insi, *inse;
4081 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
4082 exte = EV.idx_end(), inse = IV->idx_end();
4083 exti != exte && insi != inse;
4084 ++exti, ++insi) {
4085 if (*insi != *exti)
4086 // The insert and extract both reference distinctly different elements.
4087 // This means the extract is not influenced by the insert, and we can
4088 // replace the aggregate operand of the extract with the aggregate
4089 // operand of the insert. i.e., replace
4090 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4091 // %E = extractvalue { i32, { i32 } } %I, 0
4092 // with
4093 // %E = extractvalue { i32, { i32 } } %A, 0
4094 return ExtractValueInst::Create(IV->getAggregateOperand(),
4095 EV.getIndices());
4097 if (exti == exte && insi == inse)
4098 // Both iterators are at the end: Index lists are identical. Replace
4099 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4100 // %C = extractvalue { i32, { i32 } } %B, 1, 0
4101 // with "i32 42"
4102 return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
4103 if (exti == exte) {
4104 // The extract list is a prefix of the insert list. i.e. replace
4105 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4106 // %E = extractvalue { i32, { i32 } } %I, 1
4107 // with
4108 // %X = extractvalue { i32, { i32 } } %A, 1
4109 // %E = insertvalue { i32 } %X, i32 42, 0
4110 // by switching the order of the insert and extract (though the
4111 // insertvalue should be left in, since it may have other uses).
4112 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
4113 EV.getIndices());
4114 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
4115 ArrayRef(insi, inse));
4117 if (insi == inse)
4118 // The insert list is a prefix of the extract list
4119 // We can simply remove the common indices from the extract and make it
4120 // operate on the inserted value instead of the insertvalue result.
4121 // i.e., replace
4122 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4123 // %E = extractvalue { i32, { i32 } } %I, 1, 0
4124 // with
4125 // %E extractvalue { i32 } { i32 42 }, 0
4126 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
4127 ArrayRef(exti, exte));
4130 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4131 return R;
4133 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4134 // Bail out if the aggregate contains scalable vector type
4135 if (auto *STy = dyn_cast<StructType>(Agg->getType());
4136 STy && STy->isScalableTy())
4137 return nullptr;
4139 // If the (non-volatile) load only has one use, we can rewrite this to a
4140 // load from a GEP. This reduces the size of the load. If a load is used
4141 // only by extractvalue instructions then this either must have been
4142 // optimized before, or it is a struct with padding, in which case we
4143 // don't want to do the transformation as it loses padding knowledge.
4144 if (L->isSimple() && L->hasOneUse()) {
4145 // extractvalue has integer indices, getelementptr has Value*s. Convert.
4146 SmallVector<Value*, 4> Indices;
4147 // Prefix an i32 0 since we need the first element.
4148 Indices.push_back(Builder.getInt32(0));
4149 for (unsigned Idx : EV.indices())
4150 Indices.push_back(Builder.getInt32(Idx));
4152 // We need to insert these at the location of the old load, not at that of
4153 // the extractvalue.
4154 Builder.SetInsertPoint(L);
4155 Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
4156 L->getPointerOperand(), Indices);
4157 Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);
4158 // Whatever aliasing information we had for the orignal load must also
4159 // hold for the smaller load, so propagate the annotations.
4160 NL->setAAMetadata(L->getAAMetadata());
4161 // Returning the load directly will cause the main loop to insert it in
4162 // the wrong spot, so use replaceInstUsesWith().
4163 return replaceInstUsesWith(EV, NL);
4167 if (auto *PN = dyn_cast<PHINode>(Agg))
4168 if (Instruction *Res = foldOpIntoPhi(EV, PN))
4169 return Res;
4171 // Canonicalize extract (select Cond, TV, FV)
4172 // -> select cond, (extract TV), (extract FV)
4173 if (auto *SI = dyn_cast<SelectInst>(Agg))
4174 if (Instruction *R = FoldOpIntoSelect(EV, SI, /*FoldWithMultiUse=*/true))
4175 return R;
4177 // We could simplify extracts from other values. Note that nested extracts may
4178 // already be simplified implicitly by the above: extract (extract (insert) )
4179 // will be translated into extract ( insert ( extract ) ) first and then just
4180 // the value inserted, if appropriate. Similarly for extracts from single-use
4181 // loads: extract (extract (load)) will be translated to extract (load (gep))
4182 // and if again single-use then via load (gep (gep)) to load (gep).
4183 // However, double extracts from e.g. function arguments or return values
4184 // aren't handled yet.
4185 return nullptr;
4188 /// Return 'true' if the given typeinfo will match anything.
4189 static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
4190 switch (Personality) {
4191 case EHPersonality::GNU_C:
4192 case EHPersonality::GNU_C_SjLj:
4193 case EHPersonality::Rust:
4194 // The GCC C EH and Rust personality only exists to support cleanups, so
4195 // it's not clear what the semantics of catch clauses are.
4196 return false;
4197 case EHPersonality::Unknown:
4198 return false;
4199 case EHPersonality::GNU_Ada:
4200 // While __gnat_all_others_value will match any Ada exception, it doesn't
4201 // match foreign exceptions (or didn't, before gcc-4.7).
4202 return false;
4203 case EHPersonality::GNU_CXX:
4204 case EHPersonality::GNU_CXX_SjLj:
4205 case EHPersonality::GNU_ObjC:
4206 case EHPersonality::MSVC_X86SEH:
4207 case EHPersonality::MSVC_TableSEH:
4208 case EHPersonality::MSVC_CXX:
4209 case EHPersonality::CoreCLR:
4210 case EHPersonality::Wasm_CXX:
4211 case EHPersonality::XL_CXX:
4212 case EHPersonality::ZOS_CXX:
4213 return TypeInfo->isNullValue();
4215 llvm_unreachable("invalid enum");
4218 static bool shorter_filter(const Value *LHS, const Value *RHS) {
4219 return
4220 cast<ArrayType>(LHS->getType())->getNumElements()
4222 cast<ArrayType>(RHS->getType())->getNumElements();
4225 Instruction *InstCombinerImpl::visitLandingPadInst(LandingPadInst &LI) {
4226 // The logic here should be correct for any real-world personality function.
4227 // However if that turns out not to be true, the offending logic can always
4228 // be conditioned on the personality function, like the catch-all logic is.
4229 EHPersonality Personality =
4230 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
4232 // Simplify the list of clauses, eg by removing repeated catch clauses
4233 // (these are often created by inlining).
4234 bool MakeNewInstruction = false; // If true, recreate using the following:
4235 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
4236 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
4238 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
4239 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
4240 bool isLastClause = i + 1 == e;
4241 if (LI.isCatch(i)) {
4242 // A catch clause.
4243 Constant *CatchClause = LI.getClause(i);
4244 Constant *TypeInfo = CatchClause->stripPointerCasts();
4246 // If we already saw this clause, there is no point in having a second
4247 // copy of it.
4248 if (AlreadyCaught.insert(TypeInfo).second) {
4249 // This catch clause was not already seen.
4250 NewClauses.push_back(CatchClause);
4251 } else {
4252 // Repeated catch clause - drop the redundant copy.
4253 MakeNewInstruction = true;
4256 // If this is a catch-all then there is no point in keeping any following
4257 // clauses or marking the landingpad as having a cleanup.
4258 if (isCatchAll(Personality, TypeInfo)) {
4259 if (!isLastClause)
4260 MakeNewInstruction = true;
4261 CleanupFlag = false;
4262 break;
4264 } else {
4265 // A filter clause. If any of the filter elements were already caught
4266 // then they can be dropped from the filter. It is tempting to try to
4267 // exploit the filter further by saying that any typeinfo that does not
4268 // occur in the filter can't be caught later (and thus can be dropped).
4269 // However this would be wrong, since typeinfos can match without being
4270 // equal (for example if one represents a C++ class, and the other some
4271 // class derived from it).
4272 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
4273 Constant *FilterClause = LI.getClause(i);
4274 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
4275 unsigned NumTypeInfos = FilterType->getNumElements();
4277 // An empty filter catches everything, so there is no point in keeping any
4278 // following clauses or marking the landingpad as having a cleanup. By
4279 // dealing with this case here the following code is made a bit simpler.
4280 if (!NumTypeInfos) {
4281 NewClauses.push_back(FilterClause);
4282 if (!isLastClause)
4283 MakeNewInstruction = true;
4284 CleanupFlag = false;
4285 break;
4288 bool MakeNewFilter = false; // If true, make a new filter.
4289 SmallVector<Constant *, 16> NewFilterElts; // New elements.
4290 if (isa<ConstantAggregateZero>(FilterClause)) {
4291 // Not an empty filter - it contains at least one null typeinfo.
4292 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
4293 Constant *TypeInfo =
4294 Constant::getNullValue(FilterType->getElementType());
4295 // If this typeinfo is a catch-all then the filter can never match.
4296 if (isCatchAll(Personality, TypeInfo)) {
4297 // Throw the filter away.
4298 MakeNewInstruction = true;
4299 continue;
4302 // There is no point in having multiple copies of this typeinfo, so
4303 // discard all but the first copy if there is more than one.
4304 NewFilterElts.push_back(TypeInfo);
4305 if (NumTypeInfos > 1)
4306 MakeNewFilter = true;
4307 } else {
4308 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
4309 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
4310 NewFilterElts.reserve(NumTypeInfos);
4312 // Remove any filter elements that were already caught or that already
4313 // occurred in the filter. While there, see if any of the elements are
4314 // catch-alls. If so, the filter can be discarded.
4315 bool SawCatchAll = false;
4316 for (unsigned j = 0; j != NumTypeInfos; ++j) {
4317 Constant *Elt = Filter->getOperand(j);
4318 Constant *TypeInfo = Elt->stripPointerCasts();
4319 if (isCatchAll(Personality, TypeInfo)) {
4320 // This element is a catch-all. Bail out, noting this fact.
4321 SawCatchAll = true;
4322 break;
4325 // Even if we've seen a type in a catch clause, we don't want to
4326 // remove it from the filter. An unexpected type handler may be
4327 // set up for a call site which throws an exception of the same
4328 // type caught. In order for the exception thrown by the unexpected
4329 // handler to propagate correctly, the filter must be correctly
4330 // described for the call site.
4332 // Example:
4334 // void unexpected() { throw 1;}
4335 // void foo() throw (int) {
4336 // std::set_unexpected(unexpected);
4337 // try {
4338 // throw 2.0;
4339 // } catch (int i) {}
4340 // }
4342 // There is no point in having multiple copies of the same typeinfo in
4343 // a filter, so only add it if we didn't already.
4344 if (SeenInFilter.insert(TypeInfo).second)
4345 NewFilterElts.push_back(cast<Constant>(Elt));
4347 // A filter containing a catch-all cannot match anything by definition.
4348 if (SawCatchAll) {
4349 // Throw the filter away.
4350 MakeNewInstruction = true;
4351 continue;
4354 // If we dropped something from the filter, make a new one.
4355 if (NewFilterElts.size() < NumTypeInfos)
4356 MakeNewFilter = true;
4358 if (MakeNewFilter) {
4359 FilterType = ArrayType::get(FilterType->getElementType(),
4360 NewFilterElts.size());
4361 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
4362 MakeNewInstruction = true;
4365 NewClauses.push_back(FilterClause);
4367 // If the new filter is empty then it will catch everything so there is
4368 // no point in keeping any following clauses or marking the landingpad
4369 // as having a cleanup. The case of the original filter being empty was
4370 // already handled above.
4371 if (MakeNewFilter && !NewFilterElts.size()) {
4372 assert(MakeNewInstruction && "New filter but not a new instruction!");
4373 CleanupFlag = false;
4374 break;
4379 // If several filters occur in a row then reorder them so that the shortest
4380 // filters come first (those with the smallest number of elements). This is
4381 // advantageous because shorter filters are more likely to match, speeding up
4382 // unwinding, but mostly because it increases the effectiveness of the other
4383 // filter optimizations below.
4384 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
4385 unsigned j;
4386 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
4387 for (j = i; j != e; ++j)
4388 if (!isa<ArrayType>(NewClauses[j]->getType()))
4389 break;
4391 // Check whether the filters are already sorted by length. We need to know
4392 // if sorting them is actually going to do anything so that we only make a
4393 // new landingpad instruction if it does.
4394 for (unsigned k = i; k + 1 < j; ++k)
4395 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
4396 // Not sorted, so sort the filters now. Doing an unstable sort would be
4397 // correct too but reordering filters pointlessly might confuse users.
4398 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
4399 shorter_filter);
4400 MakeNewInstruction = true;
4401 break;
4404 // Look for the next batch of filters.
4405 i = j + 1;
4408 // If typeinfos matched if and only if equal, then the elements of a filter L
4409 // that occurs later than a filter F could be replaced by the intersection of
4410 // the elements of F and L. In reality two typeinfos can match without being
4411 // equal (for example if one represents a C++ class, and the other some class
4412 // derived from it) so it would be wrong to perform this transform in general.
4413 // However the transform is correct and useful if F is a subset of L. In that
4414 // case L can be replaced by F, and thus removed altogether since repeating a
4415 // filter is pointless. So here we look at all pairs of filters F and L where
4416 // L follows F in the list of clauses, and remove L if every element of F is
4417 // an element of L. This can occur when inlining C++ functions with exception
4418 // specifications.
4419 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
4420 // Examine each filter in turn.
4421 Value *Filter = NewClauses[i];
4422 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
4423 if (!FTy)
4424 // Not a filter - skip it.
4425 continue;
4426 unsigned FElts = FTy->getNumElements();
4427 // Examine each filter following this one. Doing this backwards means that
4428 // we don't have to worry about filters disappearing under us when removed.
4429 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
4430 Value *LFilter = NewClauses[j];
4431 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
4432 if (!LTy)
4433 // Not a filter - skip it.
4434 continue;
4435 // If Filter is a subset of LFilter, i.e. every element of Filter is also
4436 // an element of LFilter, then discard LFilter.
4437 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
4438 // If Filter is empty then it is a subset of LFilter.
4439 if (!FElts) {
4440 // Discard LFilter.
4441 NewClauses.erase(J);
4442 MakeNewInstruction = true;
4443 // Move on to the next filter.
4444 continue;
4446 unsigned LElts = LTy->getNumElements();
4447 // If Filter is longer than LFilter then it cannot be a subset of it.
4448 if (FElts > LElts)
4449 // Move on to the next filter.
4450 continue;
4451 // At this point we know that LFilter has at least one element.
4452 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
4453 // Filter is a subset of LFilter iff Filter contains only zeros (as we
4454 // already know that Filter is not longer than LFilter).
4455 if (isa<ConstantAggregateZero>(Filter)) {
4456 assert(FElts <= LElts && "Should have handled this case earlier!");
4457 // Discard LFilter.
4458 NewClauses.erase(J);
4459 MakeNewInstruction = true;
4461 // Move on to the next filter.
4462 continue;
4464 ConstantArray *LArray = cast<ConstantArray>(LFilter);
4465 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
4466 // Since Filter is non-empty and contains only zeros, it is a subset of
4467 // LFilter iff LFilter contains a zero.
4468 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
4469 for (unsigned l = 0; l != LElts; ++l)
4470 if (LArray->getOperand(l)->isNullValue()) {
4471 // LFilter contains a zero - discard it.
4472 NewClauses.erase(J);
4473 MakeNewInstruction = true;
4474 break;
4476 // Move on to the next filter.
4477 continue;
4479 // At this point we know that both filters are ConstantArrays. Loop over
4480 // operands to see whether every element of Filter is also an element of
4481 // LFilter. Since filters tend to be short this is probably faster than
4482 // using a method that scales nicely.
4483 ConstantArray *FArray = cast<ConstantArray>(Filter);
4484 bool AllFound = true;
4485 for (unsigned f = 0; f != FElts; ++f) {
4486 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
4487 AllFound = false;
4488 for (unsigned l = 0; l != LElts; ++l) {
4489 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
4490 if (LTypeInfo == FTypeInfo) {
4491 AllFound = true;
4492 break;
4495 if (!AllFound)
4496 break;
4498 if (AllFound) {
4499 // Discard LFilter.
4500 NewClauses.erase(J);
4501 MakeNewInstruction = true;
4503 // Move on to the next filter.
4507 // If we changed any of the clauses, replace the old landingpad instruction
4508 // with a new one.
4509 if (MakeNewInstruction) {
4510 LandingPadInst *NLI = LandingPadInst::Create(LI.getType(),
4511 NewClauses.size());
4512 for (Constant *C : NewClauses)
4513 NLI->addClause(C);
4514 // A landing pad with no clauses must have the cleanup flag set. It is
4515 // theoretically possible, though highly unlikely, that we eliminated all
4516 // clauses. If so, force the cleanup flag to true.
4517 if (NewClauses.empty())
4518 CleanupFlag = true;
4519 NLI->setCleanup(CleanupFlag);
4520 return NLI;
4523 // Even if none of the clauses changed, we may nonetheless have understood
4524 // that the cleanup flag is pointless. Clear it if so.
4525 if (LI.isCleanup() != CleanupFlag) {
4526 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
4527 LI.setCleanup(CleanupFlag);
4528 return &LI;
4531 return nullptr;
4534 Value *
4535 InstCombinerImpl::pushFreezeToPreventPoisonFromPropagating(FreezeInst &OrigFI) {
4536 // Try to push freeze through instructions that propagate but don't produce
4537 // poison as far as possible. If an operand of freeze follows three
4538 // conditions 1) one-use, 2) does not produce poison, and 3) has all but one
4539 // guaranteed-non-poison operands then push the freeze through to the one
4540 // operand that is not guaranteed non-poison. The actual transform is as
4541 // follows.
4542 // Op1 = ... ; Op1 can be posion
4543 // Op0 = Inst(Op1, NonPoisonOps...) ; Op0 has only one use and only have
4544 // ; single guaranteed-non-poison operands
4545 // ... = Freeze(Op0)
4546 // =>
4547 // Op1 = ...
4548 // Op1.fr = Freeze(Op1)
4549 // ... = Inst(Op1.fr, NonPoisonOps...)
4550 auto *OrigOp = OrigFI.getOperand(0);
4551 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
4553 // While we could change the other users of OrigOp to use freeze(OrigOp), that
4554 // potentially reduces their optimization potential, so let's only do this iff
4555 // the OrigOp is only used by the freeze.
4556 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
4557 return nullptr;
4559 // We can't push the freeze through an instruction which can itself create
4560 // poison. If the only source of new poison is flags, we can simply
4561 // strip them (since we know the only use is the freeze and nothing can
4562 // benefit from them.)
4563 if (canCreateUndefOrPoison(cast<Operator>(OrigOp),
4564 /*ConsiderFlagsAndMetadata*/ false))
4565 return nullptr;
4567 // If operand is guaranteed not to be poison, there is no need to add freeze
4568 // to the operand. So we first find the operand that is not guaranteed to be
4569 // poison.
4570 Use *MaybePoisonOperand = nullptr;
4571 for (Use &U : OrigOpInst->operands()) {
4572 if (isa<MetadataAsValue>(U.get()) ||
4573 isGuaranteedNotToBeUndefOrPoison(U.get()))
4574 continue;
4575 if (!MaybePoisonOperand)
4576 MaybePoisonOperand = &U;
4577 else
4578 return nullptr;
4581 OrigOpInst->dropPoisonGeneratingAnnotations();
4583 // If all operands are guaranteed to be non-poison, we can drop freeze.
4584 if (!MaybePoisonOperand)
4585 return OrigOp;
4587 Builder.SetInsertPoint(OrigOpInst);
4588 auto *FrozenMaybePoisonOperand = Builder.CreateFreeze(
4589 MaybePoisonOperand->get(), MaybePoisonOperand->get()->getName() + ".fr");
4591 replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);
4592 return OrigOp;
4595 Instruction *InstCombinerImpl::foldFreezeIntoRecurrence(FreezeInst &FI,
4596 PHINode *PN) {
4597 // Detect whether this is a recurrence with a start value and some number of
4598 // backedge values. We'll check whether we can push the freeze through the
4599 // backedge values (possibly dropping poison flags along the way) until we
4600 // reach the phi again. In that case, we can move the freeze to the start
4601 // value.
4602 Use *StartU = nullptr;
4603 SmallVector<Value *> Worklist;
4604 for (Use &U : PN->incoming_values()) {
4605 if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
4606 // Add backedge value to worklist.
4607 Worklist.push_back(U.get());
4608 continue;
4611 // Don't bother handling multiple start values.
4612 if (StartU)
4613 return nullptr;
4614 StartU = &U;
4617 if (!StartU || Worklist.empty())
4618 return nullptr; // Not a recurrence.
4620 Value *StartV = StartU->get();
4621 BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
4622 bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
4623 // We can't insert freeze if the start value is the result of the
4624 // terminator (e.g. an invoke).
4625 if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
4626 return nullptr;
4628 SmallPtrSet<Value *, 32> Visited;
4629 SmallVector<Instruction *> DropFlags;
4630 while (!Worklist.empty()) {
4631 Value *V = Worklist.pop_back_val();
4632 if (!Visited.insert(V).second)
4633 continue;
4635 if (Visited.size() > 32)
4636 return nullptr; // Limit the total number of values we inspect.
4638 // Assume that PN is non-poison, because it will be after the transform.
4639 if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
4640 continue;
4642 Instruction *I = dyn_cast<Instruction>(V);
4643 if (!I || canCreateUndefOrPoison(cast<Operator>(I),
4644 /*ConsiderFlagsAndMetadata*/ false))
4645 return nullptr;
4647 DropFlags.push_back(I);
4648 append_range(Worklist, I->operands());
4651 for (Instruction *I : DropFlags)
4652 I->dropPoisonGeneratingAnnotations();
4654 if (StartNeedsFreeze) {
4655 Builder.SetInsertPoint(StartBB->getTerminator());
4656 Value *FrozenStartV = Builder.CreateFreeze(StartV,
4657 StartV->getName() + ".fr");
4658 replaceUse(*StartU, FrozenStartV);
4660 return replaceInstUsesWith(FI, PN);
4663 bool InstCombinerImpl::freezeOtherUses(FreezeInst &FI) {
4664 Value *Op = FI.getOperand(0);
4666 if (isa<Constant>(Op) || Op->hasOneUse())
4667 return false;
4669 // Move the freeze directly after the definition of its operand, so that
4670 // it dominates the maximum number of uses. Note that it may not dominate
4671 // *all* uses if the operand is an invoke/callbr and the use is in a phi on
4672 // the normal/default destination. This is why the domination check in the
4673 // replacement below is still necessary.
4674 BasicBlock::iterator MoveBefore;
4675 if (isa<Argument>(Op)) {
4676 MoveBefore =
4677 FI.getFunction()->getEntryBlock().getFirstNonPHIOrDbgOrAlloca();
4678 } else {
4679 auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef();
4680 if (!MoveBeforeOpt)
4681 return false;
4682 MoveBefore = *MoveBeforeOpt;
4685 // Don't move to the position of a debug intrinsic.
4686 if (isa<DbgInfoIntrinsic>(MoveBefore))
4687 MoveBefore = MoveBefore->getNextNonDebugInstruction()->getIterator();
4688 // Re-point iterator to come after any debug-info records, if we're
4689 // running in "RemoveDIs" mode
4690 MoveBefore.setHeadBit(false);
4692 bool Changed = false;
4693 if (&FI != &*MoveBefore) {
4694 FI.moveBefore(*MoveBefore->getParent(), MoveBefore);
4695 Changed = true;
4698 Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
4699 bool Dominates = DT.dominates(&FI, U);
4700 Changed |= Dominates;
4701 return Dominates;
4704 return Changed;
4707 // Check if any direct or bitcast user of this value is a shuffle instruction.
4708 static bool isUsedWithinShuffleVector(Value *V) {
4709 for (auto *U : V->users()) {
4710 if (isa<ShuffleVectorInst>(U))
4711 return true;
4712 else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
4713 return true;
4715 return false;
4718 Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) {
4719 Value *Op0 = I.getOperand(0);
4721 if (Value *V = simplifyFreezeInst(Op0, SQ.getWithInstruction(&I)))
4722 return replaceInstUsesWith(I, V);
4724 // freeze (phi const, x) --> phi const, (freeze x)
4725 if (auto *PN = dyn_cast<PHINode>(Op0)) {
4726 if (Instruction *NV = foldOpIntoPhi(I, PN))
4727 return NV;
4728 if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
4729 return NV;
4732 if (Value *NI = pushFreezeToPreventPoisonFromPropagating(I))
4733 return replaceInstUsesWith(I, NI);
4735 // If I is freeze(undef), check its uses and fold it to a fixed constant.
4736 // - or: pick -1
4737 // - select's condition: if the true value is constant, choose it by making
4738 // the condition true.
4739 // - default: pick 0
4741 // Note that this transform is intentionally done here rather than
4742 // via an analysis in InstSimplify or at individual user sites. That is
4743 // because we must produce the same value for all uses of the freeze -
4744 // it's the reason "freeze" exists!
4746 // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
4747 // duplicating logic for binops at least.
4748 auto getUndefReplacement = [&I](Type *Ty) {
4749 Constant *BestValue = nullptr;
4750 Constant *NullValue = Constant::getNullValue(Ty);
4751 for (const auto *U : I.users()) {
4752 Constant *C = NullValue;
4753 if (match(U, m_Or(m_Value(), m_Value())))
4754 C = ConstantInt::getAllOnesValue(Ty);
4755 else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
4756 C = ConstantInt::getTrue(Ty);
4758 if (!BestValue)
4759 BestValue = C;
4760 else if (BestValue != C)
4761 BestValue = NullValue;
4763 assert(BestValue && "Must have at least one use");
4764 return BestValue;
4767 if (match(Op0, m_Undef())) {
4768 // Don't fold freeze(undef/poison) if it's used as a vector operand in
4769 // a shuffle. This may improve codegen for shuffles that allow
4770 // unspecified inputs.
4771 if (isUsedWithinShuffleVector(&I))
4772 return nullptr;
4773 return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
4776 Constant *C;
4777 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement()) {
4778 Constant *ReplaceC = getUndefReplacement(I.getType()->getScalarType());
4779 return replaceInstUsesWith(I, Constant::replaceUndefsWith(C, ReplaceC));
4782 // Replace uses of Op with freeze(Op).
4783 if (freezeOtherUses(I))
4784 return &I;
4786 return nullptr;
4789 /// Check for case where the call writes to an otherwise dead alloca. This
4790 /// shows up for unused out-params in idiomatic C/C++ code. Note that this
4791 /// helper *only* analyzes the write; doesn't check any other legality aspect.
4792 static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI) {
4793 auto *CB = dyn_cast<CallBase>(I);
4794 if (!CB)
4795 // TODO: handle e.g. store to alloca here - only worth doing if we extend
4796 // to allow reload along used path as described below. Otherwise, this
4797 // is simply a store to a dead allocation which will be removed.
4798 return false;
4799 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
4800 if (!Dest)
4801 return false;
4802 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
4803 if (!AI)
4804 // TODO: allow malloc?
4805 return false;
4806 // TODO: allow memory access dominated by move point? Note that since AI
4807 // could have a reference to itself captured by the call, we would need to
4808 // account for cycles in doing so.
4809 SmallVector<const User *> AllocaUsers;
4810 SmallPtrSet<const User *, 4> Visited;
4811 auto pushUsers = [&](const Instruction &I) {
4812 for (const User *U : I.users()) {
4813 if (Visited.insert(U).second)
4814 AllocaUsers.push_back(U);
4817 pushUsers(*AI);
4818 while (!AllocaUsers.empty()) {
4819 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
4820 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
4821 pushUsers(*UserI);
4822 continue;
4824 if (UserI == CB)
4825 continue;
4826 // TODO: support lifetime.start/end here
4827 return false;
4829 return true;
4832 /// Try to move the specified instruction from its current block into the
4833 /// beginning of DestBlock, which can only happen if it's safe to move the
4834 /// instruction past all of the instructions between it and the end of its
4835 /// block.
4836 bool InstCombinerImpl::tryToSinkInstruction(Instruction *I,
4837 BasicBlock *DestBlock) {
4838 BasicBlock *SrcBlock = I->getParent();
4840 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
4841 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
4842 I->isTerminator())
4843 return false;
4845 // Do not sink static or dynamic alloca instructions. Static allocas must
4846 // remain in the entry block, and dynamic allocas must not be sunk in between
4847 // a stacksave / stackrestore pair, which would incorrectly shorten its
4848 // lifetime.
4849 if (isa<AllocaInst>(I))
4850 return false;
4852 // Do not sink into catchswitch blocks.
4853 if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
4854 return false;
4856 // Do not sink convergent call instructions.
4857 if (auto *CI = dyn_cast<CallInst>(I)) {
4858 if (CI->isConvergent())
4859 return false;
4862 // Unless we can prove that the memory write isn't visibile except on the
4863 // path we're sinking to, we must bail.
4864 if (I->mayWriteToMemory()) {
4865 if (!SoleWriteToDeadLocal(I, TLI))
4866 return false;
4869 // We can only sink load instructions if there is nothing between the load and
4870 // the end of block that could change the value.
4871 if (I->mayReadFromMemory() &&
4872 !I->hasMetadata(LLVMContext::MD_invariant_load)) {
4873 // We don't want to do any sophisticated alias analysis, so we only check
4874 // the instructions after I in I's parent block if we try to sink to its
4875 // successor block.
4876 if (DestBlock->getUniquePredecessor() != I->getParent())
4877 return false;
4878 for (BasicBlock::iterator Scan = std::next(I->getIterator()),
4879 E = I->getParent()->end();
4880 Scan != E; ++Scan)
4881 if (Scan->mayWriteToMemory())
4882 return false;
4885 I->dropDroppableUses([&](const Use *U) {
4886 auto *I = dyn_cast<Instruction>(U->getUser());
4887 if (I && I->getParent() != DestBlock) {
4888 Worklist.add(I);
4889 return true;
4891 return false;
4893 /// FIXME: We could remove droppable uses that are not dominated by
4894 /// the new position.
4896 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
4897 I->moveBefore(*DestBlock, InsertPos);
4898 ++NumSunkInst;
4900 // Also sink all related debug uses from the source basic block. Otherwise we
4901 // get debug use before the def. Attempt to salvage debug uses first, to
4902 // maximise the range variables have location for. If we cannot salvage, then
4903 // mark the location undef: we know it was supposed to receive a new location
4904 // here, but that computation has been sunk.
4905 SmallVector<DbgVariableIntrinsic *, 2> DbgUsers;
4906 SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
4907 findDbgUsers(DbgUsers, I, &DbgVariableRecords);
4908 if (!DbgUsers.empty())
4909 tryToSinkInstructionDbgValues(I, InsertPos, SrcBlock, DestBlock, DbgUsers);
4910 if (!DbgVariableRecords.empty())
4911 tryToSinkInstructionDbgVariableRecords(I, InsertPos, SrcBlock, DestBlock,
4912 DbgVariableRecords);
4914 // PS: there are numerous flaws with this behaviour, not least that right now
4915 // assignments can be re-ordered past other assignments to the same variable
4916 // if they use different Values. Creating more undef assignements can never be
4917 // undone. And salvaging all users outside of this block can un-necessarily
4918 // alter the lifetime of the live-value that the variable refers to.
4919 // Some of these things can be resolved by tolerating debug use-before-defs in
4920 // LLVM-IR, however it depends on the instruction-referencing CodeGen backend
4921 // being used for more architectures.
4923 return true;
4926 void InstCombinerImpl::tryToSinkInstructionDbgValues(
4927 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
4928 BasicBlock *DestBlock, SmallVectorImpl<DbgVariableIntrinsic *> &DbgUsers) {
4929 // For all debug values in the destination block, the sunk instruction
4930 // will still be available, so they do not need to be dropped.
4931 SmallVector<DbgVariableIntrinsic *, 2> DbgUsersToSalvage;
4932 for (auto &DbgUser : DbgUsers)
4933 if (DbgUser->getParent() != DestBlock)
4934 DbgUsersToSalvage.push_back(DbgUser);
4936 // Process the sinking DbgUsersToSalvage in reverse order, as we only want
4937 // to clone the last appearing debug intrinsic for each given variable.
4938 SmallVector<DbgVariableIntrinsic *, 2> DbgUsersToSink;
4939 for (DbgVariableIntrinsic *DVI : DbgUsersToSalvage)
4940 if (DVI->getParent() == SrcBlock)
4941 DbgUsersToSink.push_back(DVI);
4942 llvm::sort(DbgUsersToSink,
4943 [](auto *A, auto *B) { return B->comesBefore(A); });
4945 SmallVector<DbgVariableIntrinsic *, 2> DIIClones;
4946 SmallSet<DebugVariable, 4> SunkVariables;
4947 for (auto *User : DbgUsersToSink) {
4948 // A dbg.declare instruction should not be cloned, since there can only be
4949 // one per variable fragment. It should be left in the original place
4950 // because the sunk instruction is not an alloca (otherwise we could not be
4951 // here).
4952 if (isa<DbgDeclareInst>(User))
4953 continue;
4955 DebugVariable DbgUserVariable =
4956 DebugVariable(User->getVariable(), User->getExpression(),
4957 User->getDebugLoc()->getInlinedAt());
4959 if (!SunkVariables.insert(DbgUserVariable).second)
4960 continue;
4962 // Leave dbg.assign intrinsics in their original positions and there should
4963 // be no need to insert a clone.
4964 if (isa<DbgAssignIntrinsic>(User))
4965 continue;
4967 DIIClones.emplace_back(cast<DbgVariableIntrinsic>(User->clone()));
4968 if (isa<DbgDeclareInst>(User) && isa<CastInst>(I))
4969 DIIClones.back()->replaceVariableLocationOp(I, I->getOperand(0));
4970 LLVM_DEBUG(dbgs() << "CLONE: " << *DIIClones.back() << '\n');
4973 // Perform salvaging without the clones, then sink the clones.
4974 if (!DIIClones.empty()) {
4975 salvageDebugInfoForDbgValues(*I, DbgUsersToSalvage, {});
4976 // The clones are in reverse order of original appearance, reverse again to
4977 // maintain the original order.
4978 for (auto &DIIClone : llvm::reverse(DIIClones)) {
4979 DIIClone->insertBefore(&*InsertPos);
4980 LLVM_DEBUG(dbgs() << "SINK: " << *DIIClone << '\n');
4985 void InstCombinerImpl::tryToSinkInstructionDbgVariableRecords(
4986 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
4987 BasicBlock *DestBlock,
4988 SmallVectorImpl<DbgVariableRecord *> &DbgVariableRecords) {
4989 // Implementation of tryToSinkInstructionDbgValues, but for the
4990 // DbgVariableRecord of variable assignments rather than dbg.values.
4992 // Fetch all DbgVariableRecords not already in the destination.
4993 SmallVector<DbgVariableRecord *, 2> DbgVariableRecordsToSalvage;
4994 for (auto &DVR : DbgVariableRecords)
4995 if (DVR->getParent() != DestBlock)
4996 DbgVariableRecordsToSalvage.push_back(DVR);
4998 // Fetch a second collection, of DbgVariableRecords in the source block that
4999 // we're going to sink.
5000 SmallVector<DbgVariableRecord *> DbgVariableRecordsToSink;
5001 for (DbgVariableRecord *DVR : DbgVariableRecordsToSalvage)
5002 if (DVR->getParent() == SrcBlock)
5003 DbgVariableRecordsToSink.push_back(DVR);
5005 // Sort DbgVariableRecords according to their position in the block. This is a
5006 // partial order: DbgVariableRecords attached to different instructions will
5007 // be ordered by the instruction order, but DbgVariableRecords attached to the
5008 // same instruction won't have an order.
5009 auto Order = [](DbgVariableRecord *A, DbgVariableRecord *B) -> bool {
5010 return B->getInstruction()->comesBefore(A->getInstruction());
5012 llvm::stable_sort(DbgVariableRecordsToSink, Order);
5014 // If there are two assignments to the same variable attached to the same
5015 // instruction, the ordering between the two assignments is important. Scan
5016 // for this (rare) case and establish which is the last assignment.
5017 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5018 SmallDenseMap<InstVarPair, DbgVariableRecord *> FilterOutMap;
5019 if (DbgVariableRecordsToSink.size() > 1) {
5020 SmallDenseMap<InstVarPair, unsigned> CountMap;
5021 // Count how many assignments to each variable there is per instruction.
5022 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5023 DebugVariable DbgUserVariable =
5024 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5025 DVR->getDebugLoc()->getInlinedAt());
5026 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5029 // If there are any instructions with two assignments, add them to the
5030 // FilterOutMap to record that they need extra filtering.
5031 SmallPtrSet<const Instruction *, 4> DupSet;
5032 for (auto It : CountMap) {
5033 if (It.second > 1) {
5034 FilterOutMap[It.first] = nullptr;
5035 DupSet.insert(It.first.first);
5039 // For all instruction/variable pairs needing extra filtering, find the
5040 // latest assignment.
5041 for (const Instruction *Inst : DupSet) {
5042 for (DbgVariableRecord &DVR :
5043 llvm::reverse(filterDbgVars(Inst->getDbgRecordRange()))) {
5044 DebugVariable DbgUserVariable =
5045 DebugVariable(DVR.getVariable(), DVR.getExpression(),
5046 DVR.getDebugLoc()->getInlinedAt());
5047 auto FilterIt =
5048 FilterOutMap.find(std::make_pair(Inst, DbgUserVariable));
5049 if (FilterIt == FilterOutMap.end())
5050 continue;
5051 if (FilterIt->second != nullptr)
5052 continue;
5053 FilterIt->second = &DVR;
5058 // Perform cloning of the DbgVariableRecords that we plan on sinking, filter
5059 // out any duplicate assignments identified above.
5060 SmallVector<DbgVariableRecord *, 2> DVRClones;
5061 SmallSet<DebugVariable, 4> SunkVariables;
5062 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5063 if (DVR->Type == DbgVariableRecord::LocationType::Declare)
5064 continue;
5066 DebugVariable DbgUserVariable =
5067 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5068 DVR->getDebugLoc()->getInlinedAt());
5070 // For any variable where there were multiple assignments in the same place,
5071 // ignore all but the last assignment.
5072 if (!FilterOutMap.empty()) {
5073 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5074 auto It = FilterOutMap.find(IVP);
5076 // Filter out.
5077 if (It != FilterOutMap.end() && It->second != DVR)
5078 continue;
5081 if (!SunkVariables.insert(DbgUserVariable).second)
5082 continue;
5084 if (DVR->isDbgAssign())
5085 continue;
5087 DVRClones.emplace_back(DVR->clone());
5088 LLVM_DEBUG(dbgs() << "CLONE: " << *DVRClones.back() << '\n');
5091 // Perform salvaging without the clones, then sink the clones.
5092 if (DVRClones.empty())
5093 return;
5095 salvageDebugInfoForDbgValues(*I, {}, DbgVariableRecordsToSalvage);
5097 // The clones are in reverse order of original appearance. Assert that the
5098 // head bit is set on the iterator as we _should_ have received it via
5099 // getFirstInsertionPt. Inserting like this will reverse the clone order as
5100 // we'll repeatedly insert at the head, such as:
5101 // DVR-3 (third insertion goes here)
5102 // DVR-2 (second insertion goes here)
5103 // DVR-1 (first insertion goes here)
5104 // Any-Prior-DVRs
5105 // InsertPtInst
5106 assert(InsertPos.getHeadBit());
5107 for (DbgVariableRecord *DVRClone : DVRClones) {
5108 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5109 LLVM_DEBUG(dbgs() << "SINK: " << *DVRClone << '\n');
5113 bool InstCombinerImpl::run() {
5114 while (!Worklist.isEmpty()) {
5115 // Walk deferred instructions in reverse order, and push them to the
5116 // worklist, which means they'll end up popped from the worklist in-order.
5117 while (Instruction *I = Worklist.popDeferred()) {
5118 // Check to see if we can DCE the instruction. We do this already here to
5119 // reduce the number of uses and thus allow other folds to trigger.
5120 // Note that eraseInstFromFunction() may push additional instructions on
5121 // the deferred worklist, so this will DCE whole instruction chains.
5122 if (isInstructionTriviallyDead(I, &TLI)) {
5123 eraseInstFromFunction(*I);
5124 ++NumDeadInst;
5125 continue;
5128 Worklist.push(I);
5131 Instruction *I = Worklist.removeOne();
5132 if (I == nullptr) continue; // skip null values.
5134 // Check to see if we can DCE the instruction.
5135 if (isInstructionTriviallyDead(I, &TLI)) {
5136 eraseInstFromFunction(*I);
5137 ++NumDeadInst;
5138 continue;
5141 if (!DebugCounter::shouldExecute(VisitCounter))
5142 continue;
5144 // See if we can trivially sink this instruction to its user if we can
5145 // prove that the successor is not executed more frequently than our block.
5146 // Return the UserBlock if successful.
5147 auto getOptionalSinkBlockForInst =
5148 [this](Instruction *I) -> std::optional<BasicBlock *> {
5149 if (!EnableCodeSinking)
5150 return std::nullopt;
5152 BasicBlock *BB = I->getParent();
5153 BasicBlock *UserParent = nullptr;
5154 unsigned NumUsers = 0;
5156 for (Use &U : I->uses()) {
5157 User *User = U.getUser();
5158 if (User->isDroppable())
5159 continue;
5160 if (NumUsers > MaxSinkNumUsers)
5161 return std::nullopt;
5163 Instruction *UserInst = cast<Instruction>(User);
5164 // Special handling for Phi nodes - get the block the use occurs in.
5165 BasicBlock *UserBB = UserInst->getParent();
5166 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
5167 UserBB = PN->getIncomingBlock(U);
5168 // Bail out if we have uses in different blocks. We don't do any
5169 // sophisticated analysis (i.e finding NearestCommonDominator of these
5170 // use blocks).
5171 if (UserParent && UserParent != UserBB)
5172 return std::nullopt;
5173 UserParent = UserBB;
5175 // Make sure these checks are done only once, naturally we do the checks
5176 // the first time we get the userparent, this will save compile time.
5177 if (NumUsers == 0) {
5178 // Try sinking to another block. If that block is unreachable, then do
5179 // not bother. SimplifyCFG should handle it.
5180 if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
5181 return std::nullopt;
5183 auto *Term = UserParent->getTerminator();
5184 // See if the user is one of our successors that has only one
5185 // predecessor, so that we don't have to split the critical edge.
5186 // Another option where we can sink is a block that ends with a
5187 // terminator that does not pass control to other block (such as
5188 // return or unreachable or resume). In this case:
5189 // - I dominates the User (by SSA form);
5190 // - the User will be executed at most once.
5191 // So sinking I down to User is always profitable or neutral.
5192 if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
5193 return std::nullopt;
5195 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
5198 NumUsers++;
5201 // No user or only has droppable users.
5202 if (!UserParent)
5203 return std::nullopt;
5205 return UserParent;
5208 auto OptBB = getOptionalSinkBlockForInst(I);
5209 if (OptBB) {
5210 auto *UserParent = *OptBB;
5211 // Okay, the CFG is simple enough, try to sink this instruction.
5212 if (tryToSinkInstruction(I, UserParent)) {
5213 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
5214 MadeIRChange = true;
5215 // We'll add uses of the sunk instruction below, but since
5216 // sinking can expose opportunities for it's *operands* add
5217 // them to the worklist
5218 for (Use &U : I->operands())
5219 if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
5220 Worklist.push(OpI);
5224 // Now that we have an instruction, try combining it to simplify it.
5225 Builder.SetInsertPoint(I);
5226 Builder.CollectMetadataToCopy(
5227 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5229 #ifndef NDEBUG
5230 std::string OrigI;
5231 #endif
5232 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS););
5233 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
5235 if (Instruction *Result = visit(*I)) {
5236 ++NumCombined;
5237 // Should we replace the old instruction with a new one?
5238 if (Result != I) {
5239 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
5240 << " New = " << *Result << '\n');
5242 // We copy the old instruction's DebugLoc to the new instruction, unless
5243 // InstCombine already assigned a DebugLoc to it, in which case we
5244 // should trust the more specifically selected DebugLoc.
5245 if (!Result->getDebugLoc())
5246 Result->setDebugLoc(I->getDebugLoc());
5247 // We also copy annotation metadata to the new instruction.
5248 Result->copyMetadata(*I, LLVMContext::MD_annotation);
5249 // Everything uses the new instruction now.
5250 I->replaceAllUsesWith(Result);
5252 // Move the name to the new instruction first.
5253 Result->takeName(I);
5255 // Insert the new instruction into the basic block...
5256 BasicBlock *InstParent = I->getParent();
5257 BasicBlock::iterator InsertPos = I->getIterator();
5259 // Are we replace a PHI with something that isn't a PHI, or vice versa?
5260 if (isa<PHINode>(Result) != isa<PHINode>(I)) {
5261 // We need to fix up the insertion point.
5262 if (isa<PHINode>(I)) // PHI -> Non-PHI
5263 InsertPos = InstParent->getFirstInsertionPt();
5264 else // Non-PHI -> PHI
5265 InsertPos = InstParent->getFirstNonPHIIt();
5268 Result->insertInto(InstParent, InsertPos);
5270 // Push the new instruction and any users onto the worklist.
5271 Worklist.pushUsersToWorkList(*Result);
5272 Worklist.push(Result);
5274 eraseInstFromFunction(*I);
5275 } else {
5276 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
5277 << " New = " << *I << '\n');
5279 // If the instruction was modified, it's possible that it is now dead.
5280 // if so, remove it.
5281 if (isInstructionTriviallyDead(I, &TLI)) {
5282 eraseInstFromFunction(*I);
5283 } else {
5284 Worklist.pushUsersToWorkList(*I);
5285 Worklist.push(I);
5288 MadeIRChange = true;
5292 Worklist.zap();
5293 return MadeIRChange;
5296 // Track the scopes used by !alias.scope and !noalias. In a function, a
5297 // @llvm.experimental.noalias.scope.decl is only useful if that scope is used
5298 // by both sets. If not, the declaration of the scope can be safely omitted.
5299 // The MDNode of the scope can be omitted as well for the instructions that are
5300 // part of this function. We do not do that at this point, as this might become
5301 // too time consuming to do.
5302 class AliasScopeTracker {
5303 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
5304 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
5306 public:
5307 void analyse(Instruction *I) {
5308 // This seems to be faster than checking 'mayReadOrWriteMemory()'.
5309 if (!I->hasMetadataOtherThanDebugLoc())
5310 return;
5312 auto Track = [](Metadata *ScopeList, auto &Container) {
5313 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5314 if (!MDScopeList || !Container.insert(MDScopeList).second)
5315 return;
5316 for (const auto &MDOperand : MDScopeList->operands())
5317 if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
5318 Container.insert(MDScope);
5321 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5322 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5325 bool isNoAliasScopeDeclDead(Instruction *Inst) {
5326 NoAliasScopeDeclInst *Decl = dyn_cast<NoAliasScopeDeclInst>(Inst);
5327 if (!Decl)
5328 return false;
5330 assert(Decl->use_empty() &&
5331 "llvm.experimental.noalias.scope.decl in use ?");
5332 const MDNode *MDSL = Decl->getScopeList();
5333 assert(MDSL->getNumOperands() == 1 &&
5334 "llvm.experimental.noalias.scope should refer to a single scope");
5335 auto &MDOperand = MDSL->getOperand(0);
5336 if (auto *MD = dyn_cast<MDNode>(MDOperand))
5337 return !UsedAliasScopesAndLists.contains(MD) ||
5338 !UsedNoAliasScopesAndLists.contains(MD);
5340 // Not an MDNode ? throw away.
5341 return true;
5345 /// Populate the IC worklist from a function, by walking it in reverse
5346 /// post-order and adding all reachable code to the worklist.
5348 /// This has a couple of tricks to make the code faster and more powerful. In
5349 /// particular, we constant fold and DCE instructions as we go, to avoid adding
5350 /// them to the worklist (this significantly speeds up instcombine on code where
5351 /// many instructions are dead or constant). Additionally, if we find a branch
5352 /// whose condition is a known constant, we only visit the reachable successors.
5353 bool InstCombinerImpl::prepareWorklist(Function &F) {
5354 bool MadeIRChange = false;
5355 SmallPtrSet<BasicBlock *, 32> LiveBlocks;
5356 SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
5357 DenseMap<Constant *, Constant *> FoldedConstants;
5358 AliasScopeTracker SeenAliasScopes;
5360 auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) {
5361 for (BasicBlock *Succ : successors(BB))
5362 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)
5363 for (PHINode &PN : Succ->phis())
5364 for (Use &U : PN.incoming_values())
5365 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
5366 U.set(PoisonValue::get(PN.getType()));
5367 MadeIRChange = true;
5371 for (BasicBlock *BB : RPOT) {
5372 if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) {
5373 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
5374 })) {
5375 HandleOnlyLiveSuccessor(BB, nullptr);
5376 continue;
5378 LiveBlocks.insert(BB);
5380 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
5381 // ConstantProp instruction if trivially constant.
5382 if (!Inst.use_empty() &&
5383 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
5384 if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) {
5385 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
5386 << '\n');
5387 Inst.replaceAllUsesWith(C);
5388 ++NumConstProp;
5389 if (isInstructionTriviallyDead(&Inst, &TLI))
5390 Inst.eraseFromParent();
5391 MadeIRChange = true;
5392 continue;
5395 // See if we can constant fold its operands.
5396 for (Use &U : Inst.operands()) {
5397 if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
5398 continue;
5400 auto *C = cast<Constant>(U);
5401 Constant *&FoldRes = FoldedConstants[C];
5402 if (!FoldRes)
5403 FoldRes = ConstantFoldConstant(C, DL, &TLI);
5405 if (FoldRes != C) {
5406 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
5407 << "\n Old = " << *C
5408 << "\n New = " << *FoldRes << '\n');
5409 U = FoldRes;
5410 MadeIRChange = true;
5414 // Skip processing debug and pseudo intrinsics in InstCombine. Processing
5415 // these call instructions consumes non-trivial amount of time and
5416 // provides no value for the optimization.
5417 if (!Inst.isDebugOrPseudoInst()) {
5418 InstrsForInstructionWorklist.push_back(&Inst);
5419 SeenAliasScopes.analyse(&Inst);
5423 // If this is a branch or switch on a constant, mark only the single
5424 // live successor. Otherwise assume all successors are live.
5425 Instruction *TI = BB->getTerminator();
5426 if (BranchInst *BI = dyn_cast<BranchInst>(TI); BI && BI->isConditional()) {
5427 if (isa<UndefValue>(BI->getCondition())) {
5428 // Branch on undef is UB.
5429 HandleOnlyLiveSuccessor(BB, nullptr);
5430 continue;
5432 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
5433 bool CondVal = Cond->getZExtValue();
5434 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
5435 continue;
5437 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
5438 if (isa<UndefValue>(SI->getCondition())) {
5439 // Switch on undef is UB.
5440 HandleOnlyLiveSuccessor(BB, nullptr);
5441 continue;
5443 if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
5444 HandleOnlyLiveSuccessor(BB,
5445 SI->findCaseValue(Cond)->getCaseSuccessor());
5446 continue;
5451 // Remove instructions inside unreachable blocks. This prevents the
5452 // instcombine code from having to deal with some bad special cases, and
5453 // reduces use counts of instructions.
5454 for (BasicBlock &BB : F) {
5455 if (LiveBlocks.count(&BB))
5456 continue;
5458 unsigned NumDeadInstInBB;
5459 unsigned NumDeadDbgInstInBB;
5460 std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) =
5461 removeAllNonTerminatorAndEHPadInstructions(&BB);
5463 MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0;
5464 NumDeadInst += NumDeadInstInBB;
5467 // Once we've found all of the instructions to add to instcombine's worklist,
5468 // add them in reverse order. This way instcombine will visit from the top
5469 // of the function down. This jives well with the way that it adds all uses
5470 // of instructions to the worklist after doing a transformation, thus avoiding
5471 // some N^2 behavior in pathological cases.
5472 Worklist.reserve(InstrsForInstructionWorklist.size());
5473 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
5474 // DCE instruction if trivially dead. As we iterate in reverse program
5475 // order here, we will clean up whole chains of dead instructions.
5476 if (isInstructionTriviallyDead(Inst, &TLI) ||
5477 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
5478 ++NumDeadInst;
5479 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
5480 salvageDebugInfo(*Inst);
5481 Inst->eraseFromParent();
5482 MadeIRChange = true;
5483 continue;
5486 Worklist.push(Inst);
5489 return MadeIRChange;
5492 void InstCombiner::computeBackEdges() {
5493 // Collect backedges.
5494 SmallPtrSet<BasicBlock *, 16> Visited;
5495 for (BasicBlock *BB : RPOT) {
5496 Visited.insert(BB);
5497 for (BasicBlock *Succ : successors(BB))
5498 if (Visited.contains(Succ))
5499 BackEdges.insert({BB, Succ});
5501 ComputedBackEdges = true;
5504 static bool combineInstructionsOverFunction(
5505 Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA,
5506 AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
5507 DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI,
5508 BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI,
5509 const InstCombineOptions &Opts) {
5510 auto &DL = F.getDataLayout();
5511 bool VerifyFixpoint = Opts.VerifyFixpoint &&
5512 !F.hasFnAttribute("instcombine-no-verify-fixpoint");
5514 /// Builder - This is an IRBuilder that automatically inserts new
5515 /// instructions into the worklist when they are created.
5516 IRBuilder<TargetFolder, IRBuilderCallbackInserter> Builder(
5517 F.getContext(), TargetFolder(DL),
5518 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
5519 Worklist.add(I);
5520 if (auto *Assume = dyn_cast<AssumeInst>(I))
5521 AC.registerAssumption(Assume);
5522 }));
5524 ReversePostOrderTraversal<BasicBlock *> RPOT(&F.front());
5526 // Lower dbg.declare intrinsics otherwise their value may be clobbered
5527 // by instcombiner.
5528 bool MadeIRChange = false;
5529 if (ShouldLowerDbgDeclare)
5530 MadeIRChange = LowerDbgDeclare(F);
5532 // Iterate while there is work to do.
5533 unsigned Iteration = 0;
5534 while (true) {
5535 ++Iteration;
5537 if (Iteration > Opts.MaxIterations && !VerifyFixpoint) {
5538 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations
5539 << " on " << F.getName()
5540 << " reached; stopping without verifying fixpoint\n");
5541 break;
5544 ++NumWorklistIterations;
5545 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
5546 << F.getName() << "\n");
5548 InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT,
5549 ORE, BFI, BPI, PSI, DL, RPOT);
5550 IC.MaxArraySizeForCombine = MaxArraySize;
5551 bool MadeChangeInThisIteration = IC.prepareWorklist(F);
5552 MadeChangeInThisIteration |= IC.run();
5553 if (!MadeChangeInThisIteration)
5554 break;
5556 MadeIRChange = true;
5557 if (Iteration > Opts.MaxIterations) {
5558 report_fatal_error(
5559 "Instruction Combining on " + Twine(F.getName()) +
5560 " did not reach a fixpoint after " + Twine(Opts.MaxIterations) +
5561 " iterations. " +
5562 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
5563 "'instcombine-no-verify-fixpoint' to suppress this error.",
5564 /*GenCrashDiag=*/false);
5568 if (Iteration == 1)
5569 ++NumOneIteration;
5570 else if (Iteration == 2)
5571 ++NumTwoIterations;
5572 else if (Iteration == 3)
5573 ++NumThreeIterations;
5574 else
5575 ++NumFourOrMoreIterations;
5577 return MadeIRChange;
5580 InstCombinePass::InstCombinePass(InstCombineOptions Opts) : Options(Opts) {}
5582 void InstCombinePass::printPipeline(
5583 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
5584 static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
5585 OS, MapClassName2PassName);
5586 OS << '<';
5587 OS << "max-iterations=" << Options.MaxIterations << ";";
5588 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";
5589 OS << '>';
5592 char InstCombinePass::ID = 0;
5594 PreservedAnalyses InstCombinePass::run(Function &F,
5595 FunctionAnalysisManager &AM) {
5596 auto &LRT = AM.getResult<LastRunTrackingAnalysis>(F);
5597 // No changes since last InstCombine pass, exit early.
5598 if (LRT.shouldSkip(&ID))
5599 return PreservedAnalyses::all();
5601 auto &AC = AM.getResult<AssumptionAnalysis>(F);
5602 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
5603 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
5604 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
5605 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
5607 auto *AA = &AM.getResult<AAManager>(F);
5608 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
5609 ProfileSummaryInfo *PSI =
5610 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
5611 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
5612 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
5613 auto *BPI = AM.getCachedResult<BranchProbabilityAnalysis>(F);
5615 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
5616 BFI, BPI, PSI, Options)) {
5617 // No changes, all analyses are preserved.
5618 LRT.update(&ID, /*Changed=*/false);
5619 return PreservedAnalyses::all();
5622 // Mark all the analyses that instcombine updates as preserved.
5623 PreservedAnalyses PA;
5624 LRT.update(&ID, /*Changed=*/true);
5625 PA.preserve<LastRunTrackingAnalysis>();
5626 PA.preserveSet<CFGAnalyses>();
5627 return PA;
5630 void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const {
5631 AU.setPreservesCFG();
5632 AU.addRequired<AAResultsWrapperPass>();
5633 AU.addRequired<AssumptionCacheTracker>();
5634 AU.addRequired<TargetLibraryInfoWrapperPass>();
5635 AU.addRequired<TargetTransformInfoWrapperPass>();
5636 AU.addRequired<DominatorTreeWrapperPass>();
5637 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
5638 AU.addPreserved<DominatorTreeWrapperPass>();
5639 AU.addPreserved<AAResultsWrapperPass>();
5640 AU.addPreserved<BasicAAWrapperPass>();
5641 AU.addPreserved<GlobalsAAWrapperPass>();
5642 AU.addRequired<ProfileSummaryInfoWrapperPass>();
5643 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
5646 bool InstructionCombiningPass::runOnFunction(Function &F) {
5647 if (skipFunction(F))
5648 return false;
5650 // Required analyses.
5651 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
5652 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
5653 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
5654 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
5655 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5656 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
5658 // Optional analyses.
5659 ProfileSummaryInfo *PSI =
5660 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
5661 BlockFrequencyInfo *BFI =
5662 (PSI && PSI->hasProfileSummary()) ?
5663 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
5664 nullptr;
5665 BranchProbabilityInfo *BPI = nullptr;
5666 if (auto *WrapperPass =
5667 getAnalysisIfAvailable<BranchProbabilityInfoWrapperPass>())
5668 BPI = &WrapperPass->getBPI();
5670 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
5671 BFI, BPI, PSI, InstCombineOptions());
5674 char InstructionCombiningPass::ID = 0;
5676 InstructionCombiningPass::InstructionCombiningPass() : FunctionPass(ID) {
5677 initializeInstructionCombiningPassPass(*PassRegistry::getPassRegistry());
5680 INITIALIZE_PASS_BEGIN(InstructionCombiningPass, "instcombine",
5681 "Combine redundant instructions", false, false)
5682 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
5683 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
5684 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
5685 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
5686 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
5687 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
5688 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
5689 INITIALIZE_PASS_DEPENDENCY(LazyBlockFrequencyInfoPass)
5690 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
5691 INITIALIZE_PASS_END(InstructionCombiningPass, "instcombine",
5692 "Combine redundant instructions", false, false)
5694 // Initialization Routines
5695 void llvm::initializeInstCombine(PassRegistry &Registry) {
5696 initializeInstructionCombiningPassPass(Registry);
5699 FunctionPass *llvm::createInstructionCombiningPass() {
5700 return new InstructionCombiningPass();