1 //===- InstCombinePHI.cpp -------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the visitPHINode function.
11 //===----------------------------------------------------------------------===//
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallPtrSet.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/InstructionSimplify.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/IR/PatternMatch.h"
20 #include "llvm/Support/CommandLine.h"
21 #include "llvm/Transforms/InstCombine/InstCombiner.h"
22 #include "llvm/Transforms/Utils/Local.h"
26 using namespace llvm::PatternMatch
;
28 #define DEBUG_TYPE "instcombine"
30 static cl::opt
<unsigned>
31 MaxNumPhis("instcombine-max-num-phis", cl::init(512),
32 cl::desc("Maximum number phis to handle in intptr/ptrint folding"));
34 STATISTIC(NumPHIsOfInsertValues
,
35 "Number of phi-of-insertvalue turned into insertvalue-of-phis");
36 STATISTIC(NumPHIsOfExtractValues
,
37 "Number of phi-of-extractvalue turned into extractvalue-of-phi");
38 STATISTIC(NumPHICSEs
, "Number of PHI's that got CSE'd");
40 /// The PHI arguments will be folded into a single operation with a PHI node
41 /// as input. The debug location of the single operation will be the merged
42 /// locations of the original PHI node arguments.
43 void InstCombinerImpl::PHIArgMergedDebugLoc(Instruction
*Inst
, PHINode
&PN
) {
44 auto *FirstInst
= cast
<Instruction
>(PN
.getIncomingValue(0));
45 Inst
->setDebugLoc(FirstInst
->getDebugLoc());
46 // We do not expect a CallInst here, otherwise, N-way merging of DebugLoc
47 // will be inefficient.
48 assert(!isa
<CallInst
>(Inst
));
50 for (Value
*V
: drop_begin(PN
.incoming_values())) {
51 auto *I
= cast
<Instruction
>(V
);
52 Inst
->applyMergedLocation(Inst
->getDebugLoc(), I
->getDebugLoc());
56 // Replace Integer typed PHI PN if the PHI's value is used as a pointer value.
57 // If there is an existing pointer typed PHI that produces the same value as PN,
58 // replace PN and the IntToPtr operation with it. Otherwise, synthesize a new
63 // int_init = PtrToInt(ptr_init)
66 // int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
67 // ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
68 // ptr_val2 = IntToPtr(int_val)
72 // inc_val_inc = PtrToInt(ptr_val_inc)
78 // ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
85 // int_ptr = BitCast(ptr_ptr)
86 // int_init = Load(int_ptr)
89 // int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
90 // ptr_val2 = IntToPtr(int_val)
94 // inc_val_inc = PtrToInt(ptr_val_inc)
97 // ptr_init = Load(ptr_ptr)
100 // ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
106 bool InstCombinerImpl::foldIntegerTypedPHI(PHINode
&PN
) {
107 if (!PN
.getType()->isIntegerTy())
112 auto *IntToPtr
= dyn_cast
<IntToPtrInst
>(PN
.user_back());
116 // Check if the pointer is actually used as pointer:
117 auto HasPointerUse
= [](Instruction
*IIP
) {
118 for (User
*U
: IIP
->users()) {
119 Value
*Ptr
= nullptr;
120 if (LoadInst
*LoadI
= dyn_cast
<LoadInst
>(U
)) {
121 Ptr
= LoadI
->getPointerOperand();
122 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(U
)) {
123 Ptr
= SI
->getPointerOperand();
124 } else if (GetElementPtrInst
*GI
= dyn_cast
<GetElementPtrInst
>(U
)) {
125 Ptr
= GI
->getPointerOperand();
128 if (Ptr
&& Ptr
== IIP
)
134 if (!HasPointerUse(IntToPtr
))
137 if (DL
.getPointerSizeInBits(IntToPtr
->getAddressSpace()) !=
138 DL
.getTypeSizeInBits(IntToPtr
->getOperand(0)->getType()))
141 SmallVector
<Value
*, 4> AvailablePtrVals
;
142 for (auto Incoming
: zip(PN
.blocks(), PN
.incoming_values())) {
143 BasicBlock
*BB
= std::get
<0>(Incoming
);
144 Value
*Arg
= std::get
<1>(Incoming
);
146 // First look backward:
147 if (auto *PI
= dyn_cast
<PtrToIntInst
>(Arg
)) {
148 AvailablePtrVals
.emplace_back(PI
->getOperand(0));
152 // Next look forward:
153 Value
*ArgIntToPtr
= nullptr;
154 for (User
*U
: Arg
->users()) {
155 if (isa
<IntToPtrInst
>(U
) && U
->getType() == IntToPtr
->getType() &&
156 (DT
.dominates(cast
<Instruction
>(U
), BB
) ||
157 cast
<Instruction
>(U
)->getParent() == BB
)) {
164 AvailablePtrVals
.emplace_back(ArgIntToPtr
);
168 // If Arg is defined by a PHI, allow it. This will also create
169 // more opportunities iteratively.
170 if (isa
<PHINode
>(Arg
)) {
171 AvailablePtrVals
.emplace_back(Arg
);
175 // For a single use integer load:
176 auto *LoadI
= dyn_cast
<LoadInst
>(Arg
);
180 if (!LoadI
->hasOneUse())
183 // Push the integer typed Load instruction into the available
184 // value set, and fix it up later when the pointer typed PHI
186 AvailablePtrVals
.emplace_back(LoadI
);
189 // Now search for a matching PHI
190 auto *BB
= PN
.getParent();
191 assert(AvailablePtrVals
.size() == PN
.getNumIncomingValues() &&
192 "Not enough available ptr typed incoming values");
193 PHINode
*MatchingPtrPHI
= nullptr;
194 unsigned NumPhis
= 0;
195 for (PHINode
&PtrPHI
: BB
->phis()) {
196 // FIXME: consider handling this in AggressiveInstCombine
197 if (NumPhis
++ > MaxNumPhis
)
199 if (&PtrPHI
== &PN
|| PtrPHI
.getType() != IntToPtr
->getType())
201 if (any_of(zip(PN
.blocks(), AvailablePtrVals
),
202 [&](const auto &BlockAndValue
) {
203 BasicBlock
*BB
= std::get
<0>(BlockAndValue
);
204 Value
*V
= std::get
<1>(BlockAndValue
);
205 return PtrPHI
.getIncomingValueForBlock(BB
) != V
;
208 MatchingPtrPHI
= &PtrPHI
;
212 if (MatchingPtrPHI
) {
213 assert(MatchingPtrPHI
->getType() == IntToPtr
->getType() &&
214 "Phi's Type does not match with IntToPtr");
215 // Explicitly replace the inttoptr (rather than inserting a ptrtoint) here,
216 // to make sure another transform can't undo it in the meantime.
217 replaceInstUsesWith(*IntToPtr
, MatchingPtrPHI
);
218 eraseInstFromFunction(*IntToPtr
);
219 eraseInstFromFunction(PN
);
223 // If it requires a conversion for every PHI operand, do not do it.
224 if (all_of(AvailablePtrVals
, [&](Value
*V
) {
225 return (V
->getType() != IntToPtr
->getType()) || isa
<IntToPtrInst
>(V
);
229 // If any of the operand that requires casting is a terminator
230 // instruction, do not do it. Similarly, do not do the transform if the value
231 // is PHI in a block with no insertion point, for example, a catchswitch
232 // block, since we will not be able to insert a cast after the PHI.
233 if (any_of(AvailablePtrVals
, [&](Value
*V
) {
234 if (V
->getType() == IntToPtr
->getType())
236 auto *Inst
= dyn_cast
<Instruction
>(V
);
239 if (Inst
->isTerminator())
241 auto *BB
= Inst
->getParent();
242 if (isa
<PHINode
>(Inst
) && BB
->getFirstInsertionPt() == BB
->end())
248 PHINode
*NewPtrPHI
= PHINode::Create(
249 IntToPtr
->getType(), PN
.getNumIncomingValues(), PN
.getName() + ".ptr");
251 InsertNewInstBefore(NewPtrPHI
, PN
.getIterator());
252 SmallDenseMap
<Value
*, Instruction
*> Casts
;
253 for (auto Incoming
: zip(PN
.blocks(), AvailablePtrVals
)) {
254 auto *IncomingBB
= std::get
<0>(Incoming
);
255 auto *IncomingVal
= std::get
<1>(Incoming
);
257 if (IncomingVal
->getType() == IntToPtr
->getType()) {
258 NewPtrPHI
->addIncoming(IncomingVal
, IncomingBB
);
263 LoadInst
*LoadI
= dyn_cast
<LoadInst
>(IncomingVal
);
264 assert((isa
<PHINode
>(IncomingVal
) ||
265 IncomingVal
->getType()->isPointerTy() ||
266 (LoadI
&& LoadI
->hasOneUse())) &&
267 "Can not replace LoadInst with multiple uses");
269 // Need to insert a BitCast.
270 // For an integer Load instruction with a single use, the load + IntToPtr
271 // cast will be simplified into a pointer load:
272 // %v = load i64, i64* %a.ip, align 8
273 // %v.cast = inttoptr i64 %v to float **
275 // %v.ptrp = bitcast i64 * %a.ip to float **
276 // %v.cast = load float *, float ** %v.ptrp, align 8
277 Instruction
*&CI
= Casts
[IncomingVal
];
279 CI
= CastInst::CreateBitOrPointerCast(IncomingVal
, IntToPtr
->getType(),
280 IncomingVal
->getName() + ".ptr");
281 if (auto *IncomingI
= dyn_cast
<Instruction
>(IncomingVal
)) {
282 BasicBlock::iterator
InsertPos(IncomingI
);
284 BasicBlock
*BB
= IncomingI
->getParent();
285 if (isa
<PHINode
>(IncomingI
))
286 InsertPos
= BB
->getFirstInsertionPt();
287 assert(InsertPos
!= BB
->end() && "should have checked above");
288 InsertNewInstBefore(CI
, InsertPos
);
290 auto *InsertBB
= &IncomingBB
->getParent()->getEntryBlock();
291 InsertNewInstBefore(CI
, InsertBB
->getFirstInsertionPt());
294 NewPtrPHI
->addIncoming(CI
, IncomingBB
);
297 // Explicitly replace the inttoptr (rather than inserting a ptrtoint) here,
298 // to make sure another transform can't undo it in the meantime.
299 replaceInstUsesWith(*IntToPtr
, NewPtrPHI
);
300 eraseInstFromFunction(*IntToPtr
);
301 eraseInstFromFunction(PN
);
305 // Remove RoundTrip IntToPtr/PtrToInt Cast on PHI-Operand and
306 // fold Phi-operand to bitcast.
307 Instruction
*InstCombinerImpl::foldPHIArgIntToPtrToPHI(PHINode
&PN
) {
308 // convert ptr2int ( phi[ int2ptr(ptr2int(x))] ) --> ptr2int ( phi [ x ] )
309 // Make sure all uses of phi are ptr2int.
310 if (!all_of(PN
.users(), [](User
*U
) { return isa
<PtrToIntInst
>(U
); }))
313 // Iterating over all operands to check presence of target pointers for
315 bool OperandWithRoundTripCast
= false;
316 for (unsigned OpNum
= 0; OpNum
!= PN
.getNumIncomingValues(); ++OpNum
) {
318 simplifyIntToPtrRoundTripCast(PN
.getIncomingValue(OpNum
))) {
319 replaceOperand(PN
, OpNum
, NewOp
);
320 OperandWithRoundTripCast
= true;
323 if (!OperandWithRoundTripCast
)
328 /// If we have something like phi [insertvalue(a,b,0), insertvalue(c,d,0)],
329 /// turn this into a phi[a,c] and phi[b,d] and a single insertvalue.
331 InstCombinerImpl::foldPHIArgInsertValueInstructionIntoPHI(PHINode
&PN
) {
332 auto *FirstIVI
= cast
<InsertValueInst
>(PN
.getIncomingValue(0));
334 // Scan to see if all operands are `insertvalue`'s with the same indicies,
335 // and all have a single use.
336 for (Value
*V
: drop_begin(PN
.incoming_values())) {
337 auto *I
= dyn_cast
<InsertValueInst
>(V
);
338 if (!I
|| !I
->hasOneUser() || I
->getIndices() != FirstIVI
->getIndices())
342 // For each operand of an `insertvalue`
343 std::array
<PHINode
*, 2> NewOperands
;
344 for (int OpIdx
: {0, 1}) {
345 auto *&NewOperand
= NewOperands
[OpIdx
];
346 // Create a new PHI node to receive the values the operand has in each
347 // incoming basic block.
348 NewOperand
= PHINode::Create(
349 FirstIVI
->getOperand(OpIdx
)->getType(), PN
.getNumIncomingValues(),
350 FirstIVI
->getOperand(OpIdx
)->getName() + ".pn");
351 // And populate each operand's PHI with said values.
352 for (auto Incoming
: zip(PN
.blocks(), PN
.incoming_values()))
353 NewOperand
->addIncoming(
354 cast
<InsertValueInst
>(std::get
<1>(Incoming
))->getOperand(OpIdx
),
355 std::get
<0>(Incoming
));
356 InsertNewInstBefore(NewOperand
, PN
.getIterator());
359 // And finally, create `insertvalue` over the newly-formed PHI nodes.
360 auto *NewIVI
= InsertValueInst::Create(NewOperands
[0], NewOperands
[1],
361 FirstIVI
->getIndices(), PN
.getName());
363 PHIArgMergedDebugLoc(NewIVI
, PN
);
364 ++NumPHIsOfInsertValues
;
368 /// If we have something like phi [extractvalue(a,0), extractvalue(b,0)],
369 /// turn this into a phi[a,b] and a single extractvalue.
371 InstCombinerImpl::foldPHIArgExtractValueInstructionIntoPHI(PHINode
&PN
) {
372 auto *FirstEVI
= cast
<ExtractValueInst
>(PN
.getIncomingValue(0));
374 // Scan to see if all operands are `extractvalue`'s with the same indicies,
375 // and all have a single use.
376 for (Value
*V
: drop_begin(PN
.incoming_values())) {
377 auto *I
= dyn_cast
<ExtractValueInst
>(V
);
378 if (!I
|| !I
->hasOneUser() || I
->getIndices() != FirstEVI
->getIndices() ||
379 I
->getAggregateOperand()->getType() !=
380 FirstEVI
->getAggregateOperand()->getType())
384 // Create a new PHI node to receive the values the aggregate operand has
385 // in each incoming basic block.
386 auto *NewAggregateOperand
= PHINode::Create(
387 FirstEVI
->getAggregateOperand()->getType(), PN
.getNumIncomingValues(),
388 FirstEVI
->getAggregateOperand()->getName() + ".pn");
389 // And populate the PHI with said values.
390 for (auto Incoming
: zip(PN
.blocks(), PN
.incoming_values()))
391 NewAggregateOperand
->addIncoming(
392 cast
<ExtractValueInst
>(std::get
<1>(Incoming
))->getAggregateOperand(),
393 std::get
<0>(Incoming
));
394 InsertNewInstBefore(NewAggregateOperand
, PN
.getIterator());
396 // And finally, create `extractvalue` over the newly-formed PHI nodes.
397 auto *NewEVI
= ExtractValueInst::Create(NewAggregateOperand
,
398 FirstEVI
->getIndices(), PN
.getName());
400 PHIArgMergedDebugLoc(NewEVI
, PN
);
401 ++NumPHIsOfExtractValues
;
405 /// If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the
406 /// adds all have a single user, turn this into a phi and a single binop.
407 Instruction
*InstCombinerImpl::foldPHIArgBinOpIntoPHI(PHINode
&PN
) {
408 Instruction
*FirstInst
= cast
<Instruction
>(PN
.getIncomingValue(0));
409 assert(isa
<BinaryOperator
>(FirstInst
) || isa
<CmpInst
>(FirstInst
));
410 unsigned Opc
= FirstInst
->getOpcode();
411 Value
*LHSVal
= FirstInst
->getOperand(0);
412 Value
*RHSVal
= FirstInst
->getOperand(1);
414 Type
*LHSType
= LHSVal
->getType();
415 Type
*RHSType
= RHSVal
->getType();
417 // Scan to see if all operands are the same opcode, and all have one user.
418 for (Value
*V
: drop_begin(PN
.incoming_values())) {
419 Instruction
*I
= dyn_cast
<Instruction
>(V
);
420 if (!I
|| I
->getOpcode() != Opc
|| !I
->hasOneUser() ||
421 // Verify type of the LHS matches so we don't fold cmp's of different
423 I
->getOperand(0)->getType() != LHSType
||
424 I
->getOperand(1)->getType() != RHSType
)
427 // If they are CmpInst instructions, check their predicates
428 if (CmpInst
*CI
= dyn_cast
<CmpInst
>(I
))
429 if (CI
->getPredicate() != cast
<CmpInst
>(FirstInst
)->getPredicate())
432 // Keep track of which operand needs a phi node.
433 if (I
->getOperand(0) != LHSVal
) LHSVal
= nullptr;
434 if (I
->getOperand(1) != RHSVal
) RHSVal
= nullptr;
437 // If both LHS and RHS would need a PHI, don't do this transformation,
438 // because it would increase the number of PHIs entering the block,
439 // which leads to higher register pressure. This is especially
440 // bad when the PHIs are in the header of a loop.
441 if (!LHSVal
&& !RHSVal
)
444 // Otherwise, this is safe to transform!
446 Value
*InLHS
= FirstInst
->getOperand(0);
447 Value
*InRHS
= FirstInst
->getOperand(1);
448 PHINode
*NewLHS
= nullptr, *NewRHS
= nullptr;
450 NewLHS
= PHINode::Create(LHSType
, PN
.getNumIncomingValues(),
451 FirstInst
->getOperand(0)->getName() + ".pn");
452 NewLHS
->addIncoming(InLHS
, PN
.getIncomingBlock(0));
453 InsertNewInstBefore(NewLHS
, PN
.getIterator());
458 NewRHS
= PHINode::Create(RHSType
, PN
.getNumIncomingValues(),
459 FirstInst
->getOperand(1)->getName() + ".pn");
460 NewRHS
->addIncoming(InRHS
, PN
.getIncomingBlock(0));
461 InsertNewInstBefore(NewRHS
, PN
.getIterator());
465 // Add all operands to the new PHIs.
466 if (NewLHS
|| NewRHS
) {
467 for (auto Incoming
: drop_begin(zip(PN
.blocks(), PN
.incoming_values()))) {
468 BasicBlock
*InBB
= std::get
<0>(Incoming
);
469 Value
*InVal
= std::get
<1>(Incoming
);
470 Instruction
*InInst
= cast
<Instruction
>(InVal
);
472 Value
*NewInLHS
= InInst
->getOperand(0);
473 NewLHS
->addIncoming(NewInLHS
, InBB
);
476 Value
*NewInRHS
= InInst
->getOperand(1);
477 NewRHS
->addIncoming(NewInRHS
, InBB
);
482 if (CmpInst
*CIOp
= dyn_cast
<CmpInst
>(FirstInst
)) {
483 CmpInst
*NewCI
= CmpInst::Create(CIOp
->getOpcode(), CIOp
->getPredicate(),
485 PHIArgMergedDebugLoc(NewCI
, PN
);
489 BinaryOperator
*BinOp
= cast
<BinaryOperator
>(FirstInst
);
490 BinaryOperator
*NewBinOp
=
491 BinaryOperator::Create(BinOp
->getOpcode(), LHSVal
, RHSVal
);
493 NewBinOp
->copyIRFlags(PN
.getIncomingValue(0));
495 for (Value
*V
: drop_begin(PN
.incoming_values()))
496 NewBinOp
->andIRFlags(V
);
498 PHIArgMergedDebugLoc(NewBinOp
, PN
);
502 Instruction
*InstCombinerImpl::foldPHIArgGEPIntoPHI(PHINode
&PN
) {
503 GetElementPtrInst
*FirstInst
=cast
<GetElementPtrInst
>(PN
.getIncomingValue(0));
505 SmallVector
<Value
*, 16> FixedOperands(FirstInst
->op_begin(),
506 FirstInst
->op_end());
507 // This is true if all GEP bases are allocas and if all indices into them are
509 bool AllBasePointersAreAllocas
= true;
511 // We don't want to replace this phi if the replacement would require
512 // more than one phi, which leads to higher register pressure. This is
513 // especially bad when the PHIs are in the header of a loop.
514 bool NeededPhi
= false;
516 bool AllInBounds
= true;
518 // Scan to see if all operands are the same opcode, and all have one user.
519 for (Value
*V
: drop_begin(PN
.incoming_values())) {
520 GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(V
);
521 if (!GEP
|| !GEP
->hasOneUser() ||
522 GEP
->getSourceElementType() != FirstInst
->getSourceElementType() ||
523 GEP
->getNumOperands() != FirstInst
->getNumOperands())
526 AllInBounds
&= GEP
->isInBounds();
528 // Keep track of whether or not all GEPs are of alloca pointers.
529 if (AllBasePointersAreAllocas
&&
530 (!isa
<AllocaInst
>(GEP
->getOperand(0)) ||
531 !GEP
->hasAllConstantIndices()))
532 AllBasePointersAreAllocas
= false;
534 // Compare the operand lists.
535 for (unsigned Op
= 0, E
= FirstInst
->getNumOperands(); Op
!= E
; ++Op
) {
536 if (FirstInst
->getOperand(Op
) == GEP
->getOperand(Op
))
539 // Don't merge two GEPs when two operands differ (introducing phi nodes)
540 // if one of the PHIs has a constant for the index. The index may be
541 // substantially cheaper to compute for the constants, so making it a
542 // variable index could pessimize the path. This also handles the case
543 // for struct indices, which must always be constant.
544 if (isa
<ConstantInt
>(FirstInst
->getOperand(Op
)) ||
545 isa
<ConstantInt
>(GEP
->getOperand(Op
)))
548 if (FirstInst
->getOperand(Op
)->getType() !=
549 GEP
->getOperand(Op
)->getType())
552 // If we already needed a PHI for an earlier operand, and another operand
553 // also requires a PHI, we'd be introducing more PHIs than we're
554 // eliminating, which increases register pressure on entry to the PHI's
559 FixedOperands
[Op
] = nullptr; // Needs a PHI.
564 // If all of the base pointers of the PHI'd GEPs are from allocas, don't
565 // bother doing this transformation. At best, this will just save a bit of
566 // offset calculation, but all the predecessors will have to materialize the
567 // stack address into a register anyway. We'd actually rather *clone* the
568 // load up into the predecessors so that we have a load of a gep of an alloca,
569 // which can usually all be folded into the load.
570 if (AllBasePointersAreAllocas
)
573 // Otherwise, this is safe to transform. Insert PHI nodes for each operand
575 SmallVector
<PHINode
*, 16> OperandPhis(FixedOperands
.size());
577 bool HasAnyPHIs
= false;
578 for (unsigned I
= 0, E
= FixedOperands
.size(); I
!= E
; ++I
) {
579 if (FixedOperands
[I
])
580 continue; // operand doesn't need a phi.
581 Value
*FirstOp
= FirstInst
->getOperand(I
);
583 PHINode::Create(FirstOp
->getType(), E
, FirstOp
->getName() + ".pn");
584 InsertNewInstBefore(NewPN
, PN
.getIterator());
586 NewPN
->addIncoming(FirstOp
, PN
.getIncomingBlock(0));
587 OperandPhis
[I
] = NewPN
;
588 FixedOperands
[I
] = NewPN
;
592 // Add all operands to the new PHIs.
594 for (auto Incoming
: drop_begin(zip(PN
.blocks(), PN
.incoming_values()))) {
595 BasicBlock
*InBB
= std::get
<0>(Incoming
);
596 Value
*InVal
= std::get
<1>(Incoming
);
597 GetElementPtrInst
*InGEP
= cast
<GetElementPtrInst
>(InVal
);
599 for (unsigned Op
= 0, E
= OperandPhis
.size(); Op
!= E
; ++Op
)
600 if (PHINode
*OpPhi
= OperandPhis
[Op
])
601 OpPhi
->addIncoming(InGEP
->getOperand(Op
), InBB
);
605 Value
*Base
= FixedOperands
[0];
606 GetElementPtrInst
*NewGEP
=
607 GetElementPtrInst::Create(FirstInst
->getSourceElementType(), Base
,
608 ArrayRef(FixedOperands
).slice(1));
609 if (AllInBounds
) NewGEP
->setIsInBounds();
610 PHIArgMergedDebugLoc(NewGEP
, PN
);
614 /// Return true if we know that it is safe to sink the load out of the block
615 /// that defines it. This means that it must be obvious the value of the load is
616 /// not changed from the point of the load to the end of the block it is in.
618 /// Finally, it is safe, but not profitable, to sink a load targeting a
619 /// non-address-taken alloca. Doing so will cause us to not promote the alloca
621 static bool isSafeAndProfitableToSinkLoad(LoadInst
*L
) {
622 BasicBlock::iterator BBI
= L
->getIterator(), E
= L
->getParent()->end();
624 for (++BBI
; BBI
!= E
; ++BBI
)
625 if (BBI
->mayWriteToMemory()) {
626 // Calls that only access inaccessible memory do not block sinking the
628 if (auto *CB
= dyn_cast
<CallBase
>(BBI
))
629 if (CB
->onlyAccessesInaccessibleMemory())
634 // Check for non-address taken alloca. If not address-taken already, it isn't
635 // profitable to do this xform.
636 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(L
->getOperand(0))) {
637 bool IsAddressTaken
= false;
638 for (User
*U
: AI
->users()) {
639 if (isa
<LoadInst
>(U
)) continue;
640 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(U
)) {
641 // If storing TO the alloca, then the address isn't taken.
642 if (SI
->getOperand(1) == AI
) continue;
644 IsAddressTaken
= true;
648 if (!IsAddressTaken
&& AI
->isStaticAlloca())
652 // If this load is a load from a GEP with a constant offset from an alloca,
653 // then we don't want to sink it. In its present form, it will be
654 // load [constant stack offset]. Sinking it will cause us to have to
655 // materialize the stack addresses in each predecessor in a register only to
656 // do a shared load from register in the successor.
657 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(L
->getOperand(0)))
658 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(GEP
->getOperand(0)))
659 if (AI
->isStaticAlloca() && GEP
->hasAllConstantIndices())
665 Instruction
*InstCombinerImpl::foldPHIArgLoadIntoPHI(PHINode
&PN
) {
666 LoadInst
*FirstLI
= cast
<LoadInst
>(PN
.getIncomingValue(0));
668 // Can't forward swifterror through a phi.
669 if (FirstLI
->getOperand(0)->isSwiftError())
672 // FIXME: This is overconservative; this transform is allowed in some cases
673 // for atomic operations.
674 if (FirstLI
->isAtomic())
677 // When processing loads, we need to propagate two bits of information to the
678 // sunk load: whether it is volatile, and what its alignment is.
679 bool IsVolatile
= FirstLI
->isVolatile();
680 Align LoadAlignment
= FirstLI
->getAlign();
681 const unsigned LoadAddrSpace
= FirstLI
->getPointerAddressSpace();
683 // We can't sink the load if the loaded value could be modified between the
685 if (FirstLI
->getParent() != PN
.getIncomingBlock(0) ||
686 !isSafeAndProfitableToSinkLoad(FirstLI
))
689 // If the PHI is of volatile loads and the load block has multiple
690 // successors, sinking it would remove a load of the volatile value from
691 // the path through the other successor.
693 FirstLI
->getParent()->getTerminator()->getNumSuccessors() != 1)
696 for (auto Incoming
: drop_begin(zip(PN
.blocks(), PN
.incoming_values()))) {
697 BasicBlock
*InBB
= std::get
<0>(Incoming
);
698 Value
*InVal
= std::get
<1>(Incoming
);
699 LoadInst
*LI
= dyn_cast
<LoadInst
>(InVal
);
700 if (!LI
|| !LI
->hasOneUser() || LI
->isAtomic())
703 // Make sure all arguments are the same type of operation.
704 if (LI
->isVolatile() != IsVolatile
||
705 LI
->getPointerAddressSpace() != LoadAddrSpace
)
708 // Can't forward swifterror through a phi.
709 if (LI
->getOperand(0)->isSwiftError())
712 // We can't sink the load if the loaded value could be modified between
713 // the load and the PHI.
714 if (LI
->getParent() != InBB
|| !isSafeAndProfitableToSinkLoad(LI
))
717 LoadAlignment
= std::min(LoadAlignment
, LI
->getAlign());
719 // If the PHI is of volatile loads and the load block has multiple
720 // successors, sinking it would remove a load of the volatile value from
721 // the path through the other successor.
722 if (IsVolatile
&& LI
->getParent()->getTerminator()->getNumSuccessors() != 1)
726 // Okay, they are all the same operation. Create a new PHI node of the
727 // correct type, and PHI together all of the LHS's of the instructions.
728 PHINode
*NewPN
= PHINode::Create(FirstLI
->getOperand(0)->getType(),
729 PN
.getNumIncomingValues(),
732 Value
*InVal
= FirstLI
->getOperand(0);
733 NewPN
->addIncoming(InVal
, PN
.getIncomingBlock(0));
735 new LoadInst(FirstLI
->getType(), NewPN
, "", IsVolatile
, LoadAlignment
);
737 unsigned KnownIDs
[] = {
738 LLVMContext::MD_tbaa
,
739 LLVMContext::MD_range
,
740 LLVMContext::MD_invariant_load
,
741 LLVMContext::MD_alias_scope
,
742 LLVMContext::MD_noalias
,
743 LLVMContext::MD_nonnull
,
744 LLVMContext::MD_align
,
745 LLVMContext::MD_dereferenceable
,
746 LLVMContext::MD_dereferenceable_or_null
,
747 LLVMContext::MD_access_group
,
748 LLVMContext::MD_noundef
,
751 for (unsigned ID
: KnownIDs
)
752 NewLI
->setMetadata(ID
, FirstLI
->getMetadata(ID
));
754 // Add all operands to the new PHI and combine TBAA metadata.
755 for (auto Incoming
: drop_begin(zip(PN
.blocks(), PN
.incoming_values()))) {
756 BasicBlock
*BB
= std::get
<0>(Incoming
);
757 Value
*V
= std::get
<1>(Incoming
);
758 LoadInst
*LI
= cast
<LoadInst
>(V
);
759 combineMetadata(NewLI
, LI
, KnownIDs
, true);
760 Value
*NewInVal
= LI
->getOperand(0);
761 if (NewInVal
!= InVal
)
763 NewPN
->addIncoming(NewInVal
, BB
);
767 // The new PHI unions all of the same values together. This is really
768 // common, so we handle it intelligently here for compile-time speed.
769 NewLI
->setOperand(0, InVal
);
772 InsertNewInstBefore(NewPN
, PN
.getIterator());
775 // If this was a volatile load that we are merging, make sure to loop through
776 // and mark all the input loads as non-volatile. If we don't do this, we will
777 // insert a new volatile load and the old ones will not be deletable.
779 for (Value
*IncValue
: PN
.incoming_values())
780 cast
<LoadInst
>(IncValue
)->setVolatile(false);
782 PHIArgMergedDebugLoc(NewLI
, PN
);
786 /// TODO: This function could handle other cast types, but then it might
787 /// require special-casing a cast from the 'i1' type. See the comment in
788 /// FoldPHIArgOpIntoPHI() about pessimizing illegal integer types.
789 Instruction
*InstCombinerImpl::foldPHIArgZextsIntoPHI(PHINode
&Phi
) {
790 // We cannot create a new instruction after the PHI if the terminator is an
791 // EHPad because there is no valid insertion point.
792 if (Instruction
*TI
= Phi
.getParent()->getTerminator())
796 // Early exit for the common case of a phi with two operands. These are
797 // handled elsewhere. See the comment below where we check the count of zexts
798 // and constants for more details.
799 unsigned NumIncomingValues
= Phi
.getNumIncomingValues();
800 if (NumIncomingValues
< 3)
803 // Find the narrower type specified by the first zext.
804 Type
*NarrowType
= nullptr;
805 for (Value
*V
: Phi
.incoming_values()) {
806 if (auto *Zext
= dyn_cast
<ZExtInst
>(V
)) {
807 NarrowType
= Zext
->getSrcTy();
814 // Walk the phi operands checking that we only have zexts or constants that
815 // we can shrink for free. Store the new operands for the new phi.
816 SmallVector
<Value
*, 4> NewIncoming
;
817 unsigned NumZexts
= 0;
818 unsigned NumConsts
= 0;
819 for (Value
*V
: Phi
.incoming_values()) {
820 if (auto *Zext
= dyn_cast
<ZExtInst
>(V
)) {
821 // All zexts must be identical and have one user.
822 if (Zext
->getSrcTy() != NarrowType
|| !Zext
->hasOneUser())
824 NewIncoming
.push_back(Zext
->getOperand(0));
826 } else if (auto *C
= dyn_cast
<Constant
>(V
)) {
827 // Make sure that constants can fit in the new type.
828 Constant
*Trunc
= getLosslessUnsignedTrunc(C
, NarrowType
);
831 NewIncoming
.push_back(Trunc
);
834 // If it's not a cast or a constant, bail out.
839 // The more common cases of a phi with no constant operands or just one
840 // variable operand are handled by FoldPHIArgOpIntoPHI() and foldOpIntoPhi()
841 // respectively. foldOpIntoPhi() wants to do the opposite transform that is
842 // performed here. It tries to replicate a cast in the phi operand's basic
843 // block to expose other folding opportunities. Thus, InstCombine will
844 // infinite loop without this check.
845 if (NumConsts
== 0 || NumZexts
< 2)
848 // All incoming values are zexts or constants that are safe to truncate.
849 // Create a new phi node of the narrow type, phi together all of the new
850 // operands, and zext the result back to the original type.
851 PHINode
*NewPhi
= PHINode::Create(NarrowType
, NumIncomingValues
,
852 Phi
.getName() + ".shrunk");
853 for (unsigned I
= 0; I
!= NumIncomingValues
; ++I
)
854 NewPhi
->addIncoming(NewIncoming
[I
], Phi
.getIncomingBlock(I
));
856 InsertNewInstBefore(NewPhi
, Phi
.getIterator());
857 return CastInst::CreateZExtOrBitCast(NewPhi
, Phi
.getType());
860 /// If all operands to a PHI node are the same "unary" operator and they all are
861 /// only used by the PHI, PHI together their inputs, and do the operation once,
862 /// to the result of the PHI.
863 Instruction
*InstCombinerImpl::foldPHIArgOpIntoPHI(PHINode
&PN
) {
864 // We cannot create a new instruction after the PHI if the terminator is an
865 // EHPad because there is no valid insertion point.
866 if (Instruction
*TI
= PN
.getParent()->getTerminator())
870 Instruction
*FirstInst
= cast
<Instruction
>(PN
.getIncomingValue(0));
872 if (isa
<GetElementPtrInst
>(FirstInst
))
873 return foldPHIArgGEPIntoPHI(PN
);
874 if (isa
<LoadInst
>(FirstInst
))
875 return foldPHIArgLoadIntoPHI(PN
);
876 if (isa
<InsertValueInst
>(FirstInst
))
877 return foldPHIArgInsertValueInstructionIntoPHI(PN
);
878 if (isa
<ExtractValueInst
>(FirstInst
))
879 return foldPHIArgExtractValueInstructionIntoPHI(PN
);
881 // Scan the instruction, looking for input operations that can be folded away.
882 // If all input operands to the phi are the same instruction (e.g. a cast from
883 // the same type or "+42") we can pull the operation through the PHI, reducing
884 // code size and simplifying code.
885 Constant
*ConstantOp
= nullptr;
886 Type
*CastSrcTy
= nullptr;
888 if (isa
<CastInst
>(FirstInst
)) {
889 CastSrcTy
= FirstInst
->getOperand(0)->getType();
891 // Be careful about transforming integer PHIs. We don't want to pessimize
892 // the code by turning an i32 into an i1293.
893 if (PN
.getType()->isIntegerTy() && CastSrcTy
->isIntegerTy()) {
894 if (!shouldChangeType(PN
.getType(), CastSrcTy
))
897 } else if (isa
<BinaryOperator
>(FirstInst
) || isa
<CmpInst
>(FirstInst
)) {
898 // Can fold binop, compare or shift here if the RHS is a constant,
899 // otherwise call FoldPHIArgBinOpIntoPHI.
900 ConstantOp
= dyn_cast
<Constant
>(FirstInst
->getOperand(1));
902 return foldPHIArgBinOpIntoPHI(PN
);
904 return nullptr; // Cannot fold this operation.
907 // Check to see if all arguments are the same operation.
908 for (Value
*V
: drop_begin(PN
.incoming_values())) {
909 Instruction
*I
= dyn_cast
<Instruction
>(V
);
910 if (!I
|| !I
->hasOneUser() || !I
->isSameOperationAs(FirstInst
))
913 if (I
->getOperand(0)->getType() != CastSrcTy
)
914 return nullptr; // Cast operation must match.
915 } else if (I
->getOperand(1) != ConstantOp
) {
920 // Okay, they are all the same operation. Create a new PHI node of the
921 // correct type, and PHI together all of the LHS's of the instructions.
922 PHINode
*NewPN
= PHINode::Create(FirstInst
->getOperand(0)->getType(),
923 PN
.getNumIncomingValues(),
926 Value
*InVal
= FirstInst
->getOperand(0);
927 NewPN
->addIncoming(InVal
, PN
.getIncomingBlock(0));
929 // Add all operands to the new PHI.
930 for (auto Incoming
: drop_begin(zip(PN
.blocks(), PN
.incoming_values()))) {
931 BasicBlock
*BB
= std::get
<0>(Incoming
);
932 Value
*V
= std::get
<1>(Incoming
);
933 Value
*NewInVal
= cast
<Instruction
>(V
)->getOperand(0);
934 if (NewInVal
!= InVal
)
936 NewPN
->addIncoming(NewInVal
, BB
);
941 // The new PHI unions all of the same values together. This is really
942 // common, so we handle it intelligently here for compile-time speed.
946 InsertNewInstBefore(NewPN
, PN
.getIterator());
950 // Insert and return the new operation.
951 if (CastInst
*FirstCI
= dyn_cast
<CastInst
>(FirstInst
)) {
952 CastInst
*NewCI
= CastInst::Create(FirstCI
->getOpcode(), PhiVal
,
954 PHIArgMergedDebugLoc(NewCI
, PN
);
958 if (BinaryOperator
*BinOp
= dyn_cast
<BinaryOperator
>(FirstInst
)) {
959 BinOp
= BinaryOperator::Create(BinOp
->getOpcode(), PhiVal
, ConstantOp
);
960 BinOp
->copyIRFlags(PN
.getIncomingValue(0));
962 for (Value
*V
: drop_begin(PN
.incoming_values()))
963 BinOp
->andIRFlags(V
);
965 PHIArgMergedDebugLoc(BinOp
, PN
);
969 CmpInst
*CIOp
= cast
<CmpInst
>(FirstInst
);
970 CmpInst
*NewCI
= CmpInst::Create(CIOp
->getOpcode(), CIOp
->getPredicate(),
972 PHIArgMergedDebugLoc(NewCI
, PN
);
976 /// Return true if this PHI node is only used by a PHI node cycle that is dead.
977 static bool isDeadPHICycle(PHINode
*PN
,
978 SmallPtrSetImpl
<PHINode
*> &PotentiallyDeadPHIs
) {
979 if (PN
->use_empty()) return true;
980 if (!PN
->hasOneUse()) return false;
982 // Remember this node, and if we find the cycle, return.
983 if (!PotentiallyDeadPHIs
.insert(PN
).second
)
986 // Don't scan crazily complex things.
987 if (PotentiallyDeadPHIs
.size() == 16)
990 if (PHINode
*PU
= dyn_cast
<PHINode
>(PN
->user_back()))
991 return isDeadPHICycle(PU
, PotentiallyDeadPHIs
);
996 /// Return true if this phi node is always equal to NonPhiInVal.
997 /// This happens with mutually cyclic phi nodes like:
998 /// z = some value; x = phi (y, z); y = phi (x, z)
999 static bool PHIsEqualValue(PHINode
*PN
, Value
*&NonPhiInVal
,
1000 SmallPtrSetImpl
<PHINode
*> &ValueEqualPHIs
) {
1001 // See if we already saw this PHI node.
1002 if (!ValueEqualPHIs
.insert(PN
).second
)
1005 // Don't scan crazily complex things.
1006 if (ValueEqualPHIs
.size() == 16)
1009 // Scan the operands to see if they are either phi nodes or are equal to
1011 for (Value
*Op
: PN
->incoming_values()) {
1012 if (PHINode
*OpPN
= dyn_cast
<PHINode
>(Op
)) {
1013 if (!PHIsEqualValue(OpPN
, NonPhiInVal
, ValueEqualPHIs
)) {
1018 } else if (Op
!= NonPhiInVal
)
1025 /// Return an existing non-zero constant if this phi node has one, otherwise
1026 /// return constant 1.
1027 static ConstantInt
*getAnyNonZeroConstInt(PHINode
&PN
) {
1028 assert(isa
<IntegerType
>(PN
.getType()) && "Expect only integer type phi");
1029 for (Value
*V
: PN
.operands())
1030 if (auto *ConstVA
= dyn_cast
<ConstantInt
>(V
))
1031 if (!ConstVA
->isZero())
1033 return ConstantInt::get(cast
<IntegerType
>(PN
.getType()), 1);
1037 struct PHIUsageRecord
{
1038 unsigned PHIId
; // The ID # of the PHI (something determinstic to sort on)
1039 unsigned Shift
; // The amount shifted.
1040 Instruction
*Inst
; // The trunc instruction.
1042 PHIUsageRecord(unsigned Pn
, unsigned Sh
, Instruction
*User
)
1043 : PHIId(Pn
), Shift(Sh
), Inst(User
) {}
1045 bool operator<(const PHIUsageRecord
&RHS
) const {
1046 if (PHIId
< RHS
.PHIId
) return true;
1047 if (PHIId
> RHS
.PHIId
) return false;
1048 if (Shift
< RHS
.Shift
) return true;
1049 if (Shift
> RHS
.Shift
) return false;
1050 return Inst
->getType()->getPrimitiveSizeInBits() <
1051 RHS
.Inst
->getType()->getPrimitiveSizeInBits();
1055 struct LoweredPHIRecord
{
1056 PHINode
*PN
; // The PHI that was lowered.
1057 unsigned Shift
; // The amount shifted.
1058 unsigned Width
; // The width extracted.
1060 LoweredPHIRecord(PHINode
*Phi
, unsigned Sh
, Type
*Ty
)
1061 : PN(Phi
), Shift(Sh
), Width(Ty
->getPrimitiveSizeInBits()) {}
1063 // Ctor form used by DenseMap.
1064 LoweredPHIRecord(PHINode
*Phi
, unsigned Sh
) : PN(Phi
), Shift(Sh
), Width(0) {}
1070 struct DenseMapInfo
<LoweredPHIRecord
> {
1071 static inline LoweredPHIRecord
getEmptyKey() {
1072 return LoweredPHIRecord(nullptr, 0);
1074 static inline LoweredPHIRecord
getTombstoneKey() {
1075 return LoweredPHIRecord(nullptr, 1);
1077 static unsigned getHashValue(const LoweredPHIRecord
&Val
) {
1078 return DenseMapInfo
<PHINode
*>::getHashValue(Val
.PN
) ^ (Val
.Shift
>>3) ^
1081 static bool isEqual(const LoweredPHIRecord
&LHS
,
1082 const LoweredPHIRecord
&RHS
) {
1083 return LHS
.PN
== RHS
.PN
&& LHS
.Shift
== RHS
.Shift
&&
1084 LHS
.Width
== RHS
.Width
;
1090 /// This is an integer PHI and we know that it has an illegal type: see if it is
1091 /// only used by trunc or trunc(lshr) operations. If so, we split the PHI into
1092 /// the various pieces being extracted. This sort of thing is introduced when
1093 /// SROA promotes an aggregate to large integer values.
1095 /// TODO: The user of the trunc may be an bitcast to float/double/vector or an
1096 /// inttoptr. We should produce new PHIs in the right type.
1098 Instruction
*InstCombinerImpl::SliceUpIllegalIntegerPHI(PHINode
&FirstPhi
) {
1099 // PHIUsers - Keep track of all of the truncated values extracted from a set
1100 // of PHIs, along with their offset. These are the things we want to rewrite.
1101 SmallVector
<PHIUsageRecord
, 16> PHIUsers
;
1103 // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
1104 // nodes which are extracted from. PHIsToSlice is a set we use to avoid
1105 // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
1106 // check the uses of (to ensure they are all extracts).
1107 SmallVector
<PHINode
*, 8> PHIsToSlice
;
1108 SmallPtrSet
<PHINode
*, 8> PHIsInspected
;
1110 PHIsToSlice
.push_back(&FirstPhi
);
1111 PHIsInspected
.insert(&FirstPhi
);
1113 for (unsigned PHIId
= 0; PHIId
!= PHIsToSlice
.size(); ++PHIId
) {
1114 PHINode
*PN
= PHIsToSlice
[PHIId
];
1116 // Scan the input list of the PHI. If any input is an invoke, and if the
1117 // input is defined in the predecessor, then we won't be split the critical
1118 // edge which is required to insert a truncate. Because of this, we have to
1120 for (auto Incoming
: zip(PN
->blocks(), PN
->incoming_values())) {
1121 BasicBlock
*BB
= std::get
<0>(Incoming
);
1122 Value
*V
= std::get
<1>(Incoming
);
1123 InvokeInst
*II
= dyn_cast
<InvokeInst
>(V
);
1126 if (II
->getParent() != BB
)
1129 // If we have a phi, and if it's directly in the predecessor, then we have
1130 // a critical edge where we need to put the truncate. Since we can't
1131 // split the edge in instcombine, we have to bail out.
1135 // If the incoming value is a PHI node before a catchswitch, we cannot
1136 // extract the value within that BB because we cannot insert any non-PHI
1137 // instructions in the BB.
1138 for (auto *Pred
: PN
->blocks())
1139 if (Pred
->getFirstInsertionPt() == Pred
->end())
1142 for (User
*U
: PN
->users()) {
1143 Instruction
*UserI
= cast
<Instruction
>(U
);
1145 // If the user is a PHI, inspect its uses recursively.
1146 if (PHINode
*UserPN
= dyn_cast
<PHINode
>(UserI
)) {
1147 if (PHIsInspected
.insert(UserPN
).second
)
1148 PHIsToSlice
.push_back(UserPN
);
1152 // Truncates are always ok.
1153 if (isa
<TruncInst
>(UserI
)) {
1154 PHIUsers
.push_back(PHIUsageRecord(PHIId
, 0, UserI
));
1158 // Otherwise it must be a lshr which can only be used by one trunc.
1159 if (UserI
->getOpcode() != Instruction::LShr
||
1160 !UserI
->hasOneUse() || !isa
<TruncInst
>(UserI
->user_back()) ||
1161 !isa
<ConstantInt
>(UserI
->getOperand(1)))
1164 // Bail on out of range shifts.
1165 unsigned SizeInBits
= UserI
->getType()->getScalarSizeInBits();
1166 if (cast
<ConstantInt
>(UserI
->getOperand(1))->getValue().uge(SizeInBits
))
1169 unsigned Shift
= cast
<ConstantInt
>(UserI
->getOperand(1))->getZExtValue();
1170 PHIUsers
.push_back(PHIUsageRecord(PHIId
, Shift
, UserI
->user_back()));
1174 // If we have no users, they must be all self uses, just nuke the PHI.
1175 if (PHIUsers
.empty())
1176 return replaceInstUsesWith(FirstPhi
, PoisonValue::get(FirstPhi
.getType()));
1178 // If this phi node is transformable, create new PHIs for all the pieces
1179 // extracted out of it. First, sort the users by their offset and size.
1180 array_pod_sort(PHIUsers
.begin(), PHIUsers
.end());
1182 LLVM_DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi
<< '\n';
1183 for (unsigned I
= 1; I
!= PHIsToSlice
.size(); ++I
) dbgs()
1184 << "AND USER PHI #" << I
<< ": " << *PHIsToSlice
[I
] << '\n');
1186 // PredValues - This is a temporary used when rewriting PHI nodes. It is
1187 // hoisted out here to avoid construction/destruction thrashing.
1188 DenseMap
<BasicBlock
*, Value
*> PredValues
;
1190 // ExtractedVals - Each new PHI we introduce is saved here so we don't
1191 // introduce redundant PHIs.
1192 DenseMap
<LoweredPHIRecord
, PHINode
*> ExtractedVals
;
1194 for (unsigned UserI
= 0, UserE
= PHIUsers
.size(); UserI
!= UserE
; ++UserI
) {
1195 unsigned PHIId
= PHIUsers
[UserI
].PHIId
;
1196 PHINode
*PN
= PHIsToSlice
[PHIId
];
1197 unsigned Offset
= PHIUsers
[UserI
].Shift
;
1198 Type
*Ty
= PHIUsers
[UserI
].Inst
->getType();
1202 // If we've already lowered a user like this, reuse the previously lowered
1204 if ((EltPHI
= ExtractedVals
[LoweredPHIRecord(PN
, Offset
, Ty
)]) == nullptr) {
1206 // Otherwise, Create the new PHI node for this user.
1207 EltPHI
= PHINode::Create(Ty
, PN
->getNumIncomingValues(),
1208 PN
->getName()+".off"+Twine(Offset
), PN
);
1209 assert(EltPHI
->getType() != PN
->getType() &&
1210 "Truncate didn't shrink phi?");
1212 for (auto Incoming
: zip(PN
->blocks(), PN
->incoming_values())) {
1213 BasicBlock
*Pred
= std::get
<0>(Incoming
);
1214 Value
*InVal
= std::get
<1>(Incoming
);
1215 Value
*&PredVal
= PredValues
[Pred
];
1217 // If we already have a value for this predecessor, reuse it.
1219 EltPHI
->addIncoming(PredVal
, Pred
);
1223 // Handle the PHI self-reuse case.
1226 EltPHI
->addIncoming(PredVal
, Pred
);
1230 if (PHINode
*InPHI
= dyn_cast
<PHINode
>(PN
)) {
1231 // If the incoming value was a PHI, and if it was one of the PHIs we
1232 // already rewrote it, just use the lowered value.
1233 if (Value
*Res
= ExtractedVals
[LoweredPHIRecord(InPHI
, Offset
, Ty
)]) {
1235 EltPHI
->addIncoming(PredVal
, Pred
);
1240 // Otherwise, do an extract in the predecessor.
1241 Builder
.SetInsertPoint(Pred
->getTerminator());
1244 Res
= Builder
.CreateLShr(
1245 Res
, ConstantInt::get(InVal
->getType(), Offset
), "extract");
1246 Res
= Builder
.CreateTrunc(Res
, Ty
, "extract.t");
1248 EltPHI
->addIncoming(Res
, Pred
);
1250 // If the incoming value was a PHI, and if it was one of the PHIs we are
1251 // rewriting, we will ultimately delete the code we inserted. This
1252 // means we need to revisit that PHI to make sure we extract out the
1254 if (PHINode
*OldInVal
= dyn_cast
<PHINode
>(InVal
))
1255 if (PHIsInspected
.count(OldInVal
)) {
1257 find(PHIsToSlice
, OldInVal
) - PHIsToSlice
.begin();
1259 PHIUsageRecord(RefPHIId
, Offset
, cast
<Instruction
>(Res
)));
1265 LLVM_DEBUG(dbgs() << " Made element PHI for offset " << Offset
<< ": "
1266 << *EltPHI
<< '\n');
1267 ExtractedVals
[LoweredPHIRecord(PN
, Offset
, Ty
)] = EltPHI
;
1270 // Replace the use of this piece with the PHI node.
1271 replaceInstUsesWith(*PHIUsers
[UserI
].Inst
, EltPHI
);
1274 // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
1276 Value
*Poison
= PoisonValue::get(FirstPhi
.getType());
1277 for (PHINode
*PHI
: drop_begin(PHIsToSlice
))
1278 replaceInstUsesWith(*PHI
, Poison
);
1279 return replaceInstUsesWith(FirstPhi
, Poison
);
1282 static Value
*simplifyUsingControlFlow(InstCombiner
&Self
, PHINode
&PN
,
1283 const DominatorTree
&DT
) {
1284 // Simplify the following patterns:
1289 // phi [true] [false]
1292 // case v1: / \ case v2:
1296 // Make sure all inputs are constants.
1297 if (!all_of(PN
.operands(), [](Value
*V
) { return isa
<ConstantInt
>(V
); }))
1300 BasicBlock
*BB
= PN
.getParent();
1301 // Do not bother with unreachable instructions.
1302 if (!DT
.isReachableFromEntry(BB
))
1305 // Determine which value the condition of the idom has for which successor.
1306 LLVMContext
&Context
= PN
.getContext();
1307 auto *IDom
= DT
.getNode(BB
)->getIDom()->getBlock();
1309 SmallDenseMap
<ConstantInt
*, BasicBlock
*, 8> SuccForValue
;
1310 SmallDenseMap
<BasicBlock
*, unsigned, 8> SuccCount
;
1311 auto AddSucc
= [&](ConstantInt
*C
, BasicBlock
*Succ
) {
1312 SuccForValue
[C
] = Succ
;
1315 if (auto *BI
= dyn_cast
<BranchInst
>(IDom
->getTerminator())) {
1316 if (BI
->isUnconditional())
1319 Cond
= BI
->getCondition();
1320 AddSucc(ConstantInt::getTrue(Context
), BI
->getSuccessor(0));
1321 AddSucc(ConstantInt::getFalse(Context
), BI
->getSuccessor(1));
1322 } else if (auto *SI
= dyn_cast
<SwitchInst
>(IDom
->getTerminator())) {
1323 Cond
= SI
->getCondition();
1324 ++SuccCount
[SI
->getDefaultDest()];
1325 for (auto Case
: SI
->cases())
1326 AddSucc(Case
.getCaseValue(), Case
.getCaseSuccessor());
1331 if (Cond
->getType() != PN
.getType())
1334 // Check that edges outgoing from the idom's terminators dominate respective
1335 // inputs of the Phi.
1336 std::optional
<bool> Invert
;
1337 for (auto Pair
: zip(PN
.incoming_values(), PN
.blocks())) {
1338 auto *Input
= cast
<ConstantInt
>(std::get
<0>(Pair
));
1339 BasicBlock
*Pred
= std::get
<1>(Pair
);
1340 auto IsCorrectInput
= [&](ConstantInt
*Input
) {
1341 // The input needs to be dominated by the corresponding edge of the idom.
1342 // This edge cannot be a multi-edge, as that would imply that multiple
1343 // different condition values follow the same edge.
1344 auto It
= SuccForValue
.find(Input
);
1345 return It
!= SuccForValue
.end() && SuccCount
[It
->second
] == 1 &&
1346 DT
.dominates(BasicBlockEdge(IDom
, It
->second
),
1347 BasicBlockEdge(Pred
, BB
));
1350 // Depending on the constant, the condition may need to be inverted.
1352 if (IsCorrectInput(Input
))
1353 NeedsInvert
= false;
1354 else if (IsCorrectInput(cast
<ConstantInt
>(ConstantExpr::getNot(Input
))))
1359 // Make sure the inversion requirement is always the same.
1360 if (Invert
&& *Invert
!= NeedsInvert
)
1363 Invert
= NeedsInvert
;
1369 // This Phi is actually opposite to branching condition of IDom. We invert
1370 // the condition that will potentially open up some opportunities for
1372 auto InsertPt
= BB
->getFirstInsertionPt();
1373 if (InsertPt
!= BB
->end()) {
1374 Self
.Builder
.SetInsertPoint(&*BB
, InsertPt
);
1375 return Self
.Builder
.CreateNot(Cond
);
1381 // PHINode simplification
1383 Instruction
*InstCombinerImpl::visitPHINode(PHINode
&PN
) {
1384 if (Value
*V
= simplifyInstruction(&PN
, SQ
.getWithInstruction(&PN
)))
1385 return replaceInstUsesWith(PN
, V
);
1387 if (Instruction
*Result
= foldPHIArgZextsIntoPHI(PN
))
1390 if (Instruction
*Result
= foldPHIArgIntToPtrToPHI(PN
))
1393 // If all PHI operands are the same operation, pull them through the PHI,
1394 // reducing code size.
1395 auto *Inst0
= dyn_cast
<Instruction
>(PN
.getIncomingValue(0));
1396 auto *Inst1
= dyn_cast
<Instruction
>(PN
.getIncomingValue(1));
1397 if (Inst0
&& Inst1
&& Inst0
->getOpcode() == Inst1
->getOpcode() &&
1398 Inst0
->hasOneUser())
1399 if (Instruction
*Result
= foldPHIArgOpIntoPHI(PN
))
1402 // If the incoming values are pointer casts of the same original value,
1403 // replace the phi with a single cast iff we can insert a non-PHI instruction.
1404 if (PN
.getType()->isPointerTy() &&
1405 PN
.getParent()->getFirstInsertionPt() != PN
.getParent()->end()) {
1406 Value
*IV0
= PN
.getIncomingValue(0);
1407 Value
*IV0Stripped
= IV0
->stripPointerCasts();
1408 // Set to keep track of values known to be equal to IV0Stripped after
1409 // stripping pointer casts.
1410 SmallPtrSet
<Value
*, 4> CheckedIVs
;
1411 CheckedIVs
.insert(IV0
);
1412 if (IV0
!= IV0Stripped
&&
1413 all_of(PN
.incoming_values(), [&CheckedIVs
, IV0Stripped
](Value
*IV
) {
1414 return !CheckedIVs
.insert(IV
).second
||
1415 IV0Stripped
== IV
->stripPointerCasts();
1417 return CastInst::CreatePointerCast(IV0Stripped
, PN
.getType());
1421 // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
1422 // this PHI only has a single use (a PHI), and if that PHI only has one use (a
1423 // PHI)... break the cycle.
1424 if (PN
.hasOneUse()) {
1425 if (foldIntegerTypedPHI(PN
))
1428 Instruction
*PHIUser
= cast
<Instruction
>(PN
.user_back());
1429 if (PHINode
*PU
= dyn_cast
<PHINode
>(PHIUser
)) {
1430 SmallPtrSet
<PHINode
*, 16> PotentiallyDeadPHIs
;
1431 PotentiallyDeadPHIs
.insert(&PN
);
1432 if (isDeadPHICycle(PU
, PotentiallyDeadPHIs
))
1433 return replaceInstUsesWith(PN
, PoisonValue::get(PN
.getType()));
1436 // If this phi has a single use, and if that use just computes a value for
1437 // the next iteration of a loop, delete the phi. This occurs with unused
1438 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
1439 // common case here is good because the only other things that catch this
1440 // are induction variable analysis (sometimes) and ADCE, which is only run
1442 if (PHIUser
->hasOneUse() &&
1443 (isa
<BinaryOperator
>(PHIUser
) || isa
<GetElementPtrInst
>(PHIUser
)) &&
1444 PHIUser
->user_back() == &PN
) {
1445 return replaceInstUsesWith(PN
, PoisonValue::get(PN
.getType()));
1449 // When a PHI is used only to be compared with zero, it is safe to replace
1450 // an incoming value proved as known nonzero with any non-zero constant.
1451 // For example, in the code below, the incoming value %v can be replaced
1452 // with any non-zero constant based on the fact that the PHI is only used to
1453 // be compared with zero and %v is a known non-zero value:
1454 // %v = select %cond, 1, 2
1455 // %p = phi [%v, BB] ...
1457 // FIXME: To be simple, handle only integer type for now.
1458 // This handles a small number of uses to keep the complexity down, and an
1459 // icmp(or(phi)) can equally be replaced with any non-zero constant as the
1460 // "or" will only add bits.
1461 if (!PN
.hasNUsesOrMore(3)) {
1462 bool AllUsesOfPhiEndsInCmp
= all_of(PN
.users(), [&PN
](User
*U
) {
1463 auto *CmpInst
= dyn_cast
<ICmpInst
>(U
);
1465 // This is always correct as OR only add bits and we are checking
1467 if (U
->hasOneUse() && match(U
, m_c_Or(m_Specific(&PN
), m_Value())))
1468 CmpInst
= dyn_cast
<ICmpInst
>(U
->user_back());
1470 if (!CmpInst
|| !isa
<IntegerType
>(PN
.getType()) ||
1471 !CmpInst
->isEquality() || !match(CmpInst
->getOperand(1), m_Zero())) {
1476 // All uses of PHI results in a compare with zero.
1477 if (AllUsesOfPhiEndsInCmp
) {
1478 ConstantInt
*NonZeroConst
= nullptr;
1479 bool MadeChange
= false;
1480 for (unsigned I
= 0, E
= PN
.getNumIncomingValues(); I
!= E
; ++I
) {
1481 Instruction
*CtxI
= PN
.getIncomingBlock(I
)->getTerminator();
1482 Value
*VA
= PN
.getIncomingValue(I
);
1483 if (isKnownNonZero(VA
, DL
, 0, &AC
, CtxI
, &DT
)) {
1485 NonZeroConst
= getAnyNonZeroConstInt(PN
);
1486 if (NonZeroConst
!= VA
) {
1487 replaceOperand(PN
, I
, NonZeroConst
);
1497 // We sometimes end up with phi cycles that non-obviously end up being the
1498 // same value, for example:
1499 // z = some value; x = phi (y, z); y = phi (x, z)
1500 // where the phi nodes don't necessarily need to be in the same block. Do a
1501 // quick check to see if the PHI node only contains a single non-phi value, if
1502 // so, scan to see if the phi cycle is actually equal to that value. If the
1503 // phi has no non-phi values then allow the "NonPhiInVal" to be set later if
1504 // one of the phis itself does not have a single input.
1506 unsigned InValNo
= 0, NumIncomingVals
= PN
.getNumIncomingValues();
1507 // Scan for the first non-phi operand.
1508 while (InValNo
!= NumIncomingVals
&&
1509 isa
<PHINode
>(PN
.getIncomingValue(InValNo
)))
1512 Value
*NonPhiInVal
=
1513 InValNo
!= NumIncomingVals
? PN
.getIncomingValue(InValNo
) : nullptr;
1515 // Scan the rest of the operands to see if there are any conflicts, if so
1516 // there is no need to recursively scan other phis.
1518 for (++InValNo
; InValNo
!= NumIncomingVals
; ++InValNo
) {
1519 Value
*OpVal
= PN
.getIncomingValue(InValNo
);
1520 if (OpVal
!= NonPhiInVal
&& !isa
<PHINode
>(OpVal
))
1524 // If we scanned over all operands, then we have one unique value plus
1525 // phi values. Scan PHI nodes to see if they all merge in each other or
1527 if (InValNo
== NumIncomingVals
) {
1528 SmallPtrSet
<PHINode
*, 16> ValueEqualPHIs
;
1529 if (PHIsEqualValue(&PN
, NonPhiInVal
, ValueEqualPHIs
))
1530 return replaceInstUsesWith(PN
, NonPhiInVal
);
1534 // If there are multiple PHIs, sort their operands so that they all list
1535 // the blocks in the same order. This will help identical PHIs be eliminated
1536 // by other passes. Other passes shouldn't depend on this for correctness
1538 auto Res
= PredOrder
.try_emplace(PN
.getParent());
1540 const auto &Preds
= Res
.first
->second
;
1541 for (unsigned I
= 0, E
= PN
.getNumIncomingValues(); I
!= E
; ++I
) {
1542 BasicBlock
*BBA
= PN
.getIncomingBlock(I
);
1543 BasicBlock
*BBB
= Preds
[I
];
1545 Value
*VA
= PN
.getIncomingValue(I
);
1546 unsigned J
= PN
.getBasicBlockIndex(BBB
);
1547 Value
*VB
= PN
.getIncomingValue(J
);
1548 PN
.setIncomingBlock(I
, BBB
);
1549 PN
.setIncomingValue(I
, VB
);
1550 PN
.setIncomingBlock(J
, BBA
);
1551 PN
.setIncomingValue(J
, VA
);
1552 // NOTE: Instcombine normally would want us to "return &PN" if we
1553 // modified any of the operands of an instruction. However, since we
1554 // aren't adding or removing uses (just rearranging them) we don't do
1555 // this in this case.
1559 // Remember the block order of the first encountered phi node.
1560 append_range(Res
.first
->second
, PN
.blocks());
1563 // Is there an identical PHI node in this basic block?
1564 for (PHINode
&IdenticalPN
: PN
.getParent()->phis()) {
1565 // Ignore the PHI node itself.
1566 if (&IdenticalPN
== &PN
)
1568 // Note that even though we've just canonicalized this PHI, due to the
1569 // worklist visitation order, there are no guarantess that *every* PHI
1570 // has been canonicalized, so we can't just compare operands ranges.
1571 if (!PN
.isIdenticalToWhenDefined(&IdenticalPN
))
1573 // Just use that PHI instead then.
1575 return replaceInstUsesWith(PN
, &IdenticalPN
);
1578 // If this is an integer PHI and we know that it has an illegal type, see if
1579 // it is only used by trunc or trunc(lshr) operations. If so, we split the
1580 // PHI into the various pieces being extracted. This sort of thing is
1581 // introduced when SROA promotes an aggregate to a single large integer type.
1582 if (PN
.getType()->isIntegerTy() &&
1583 !DL
.isLegalInteger(PN
.getType()->getPrimitiveSizeInBits()))
1584 if (Instruction
*Res
= SliceUpIllegalIntegerPHI(PN
))
1587 // Ultimately, try to replace this Phi with a dominating condition.
1588 if (auto *V
= simplifyUsingControlFlow(*this, PN
, DT
))
1589 return replaceInstUsesWith(PN
, V
);