1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass munges the code in the input function to better prepare it for
10 // SelectionDAG-based code generation. This works around limitations in it's
11 // basic-block-at-a-time approach. It should eventually be removed.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/MapVector.h"
19 #include "llvm/ADT/PointerIntPair.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/BlockFrequencyInfo.h"
25 #include "llvm/Analysis/BranchProbabilityInfo.h"
26 #include "llvm/Analysis/ConstantFolding.h"
27 #include "llvm/Analysis/InstructionSimplify.h"
28 #include "llvm/Analysis/LoopInfo.h"
29 #include "llvm/Analysis/MemoryBuiltins.h"
30 #include "llvm/Analysis/ProfileSummaryInfo.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Analysis/TargetTransformInfo.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/Analysis/VectorUtils.h"
35 #include "llvm/CodeGen/Analysis.h"
36 #include "llvm/CodeGen/ISDOpcodes.h"
37 #include "llvm/CodeGen/SelectionDAGNodes.h"
38 #include "llvm/CodeGen/TargetLowering.h"
39 #include "llvm/CodeGen/TargetPassConfig.h"
40 #include "llvm/CodeGen/TargetSubtargetInfo.h"
41 #include "llvm/CodeGen/ValueTypes.h"
42 #include "llvm/Config/llvm-config.h"
43 #include "llvm/IR/Argument.h"
44 #include "llvm/IR/Attributes.h"
45 #include "llvm/IR/BasicBlock.h"
46 #include "llvm/IR/Constant.h"
47 #include "llvm/IR/Constants.h"
48 #include "llvm/IR/DataLayout.h"
49 #include "llvm/IR/DebugInfo.h"
50 #include "llvm/IR/DerivedTypes.h"
51 #include "llvm/IR/Dominators.h"
52 #include "llvm/IR/Function.h"
53 #include "llvm/IR/GetElementPtrTypeIterator.h"
54 #include "llvm/IR/GlobalValue.h"
55 #include "llvm/IR/GlobalVariable.h"
56 #include "llvm/IR/IRBuilder.h"
57 #include "llvm/IR/InlineAsm.h"
58 #include "llvm/IR/InstrTypes.h"
59 #include "llvm/IR/Instruction.h"
60 #include "llvm/IR/Instructions.h"
61 #include "llvm/IR/IntrinsicInst.h"
62 #include "llvm/IR/Intrinsics.h"
63 #include "llvm/IR/IntrinsicsAArch64.h"
64 #include "llvm/IR/LLVMContext.h"
65 #include "llvm/IR/MDBuilder.h"
66 #include "llvm/IR/Module.h"
67 #include "llvm/IR/Operator.h"
68 #include "llvm/IR/PatternMatch.h"
69 #include "llvm/IR/Statepoint.h"
70 #include "llvm/IR/Type.h"
71 #include "llvm/IR/Use.h"
72 #include "llvm/IR/User.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/IR/ValueHandle.h"
75 #include "llvm/IR/ValueMap.h"
76 #include "llvm/InitializePasses.h"
77 #include "llvm/Pass.h"
78 #include "llvm/Support/BlockFrequency.h"
79 #include "llvm/Support/BranchProbability.h"
80 #include "llvm/Support/Casting.h"
81 #include "llvm/Support/CommandLine.h"
82 #include "llvm/Support/Compiler.h"
83 #include "llvm/Support/Debug.h"
84 #include "llvm/Support/ErrorHandling.h"
85 #include "llvm/Support/MachineValueType.h"
86 #include "llvm/Support/MathExtras.h"
87 #include "llvm/Support/raw_ostream.h"
88 #include "llvm/Target/TargetMachine.h"
89 #include "llvm/Target/TargetOptions.h"
90 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
91 #include "llvm/Transforms/Utils/BypassSlowDivision.h"
92 #include "llvm/Transforms/Utils/Local.h"
93 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
94 #include "llvm/Transforms/Utils/SizeOpts.h"
104 using namespace llvm
;
105 using namespace llvm::PatternMatch
;
107 #define DEBUG_TYPE "codegenprepare"
109 STATISTIC(NumBlocksElim
, "Number of blocks eliminated");
110 STATISTIC(NumPHIsElim
, "Number of trivial PHIs eliminated");
111 STATISTIC(NumGEPsElim
, "Number of GEPs converted to casts");
112 STATISTIC(NumCmpUses
, "Number of uses of Cmp expressions replaced with uses of "
114 STATISTIC(NumCastUses
, "Number of uses of Cast expressions replaced with uses "
116 STATISTIC(NumMemoryInsts
, "Number of memory instructions whose address "
117 "computations were sunk");
118 STATISTIC(NumMemoryInstsPhiCreated
,
119 "Number of phis created when address "
120 "computations were sunk to memory instructions");
121 STATISTIC(NumMemoryInstsSelectCreated
,
122 "Number of select created when address "
123 "computations were sunk to memory instructions");
124 STATISTIC(NumExtsMoved
, "Number of [s|z]ext instructions combined with loads");
125 STATISTIC(NumExtUses
, "Number of uses of [s|z]ext instructions optimized");
126 STATISTIC(NumAndsAdded
,
127 "Number of and mask instructions added to form ext loads");
128 STATISTIC(NumAndUses
, "Number of uses of and mask instructions optimized");
129 STATISTIC(NumRetsDup
, "Number of return instructions duplicated");
130 STATISTIC(NumDbgValueMoved
, "Number of debug value instructions moved");
131 STATISTIC(NumSelectsExpanded
, "Number of selects turned into branches");
132 STATISTIC(NumStoreExtractExposed
, "Number of store(extractelement) exposed");
134 static cl::opt
<bool> DisableBranchOpts(
135 "disable-cgp-branch-opts", cl::Hidden
, cl::init(false),
136 cl::desc("Disable branch optimizations in CodeGenPrepare"));
139 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden
, cl::init(false),
140 cl::desc("Disable GC optimizations in CodeGenPrepare"));
142 static cl::opt
<bool> DisableSelectToBranch(
143 "disable-cgp-select2branch", cl::Hidden
, cl::init(false),
144 cl::desc("Disable select to branch conversion."));
146 static cl::opt
<bool> AddrSinkUsingGEPs(
147 "addr-sink-using-gep", cl::Hidden
, cl::init(true),
148 cl::desc("Address sinking in CGP using GEPs."));
150 static cl::opt
<bool> EnableAndCmpSinking(
151 "enable-andcmp-sinking", cl::Hidden
, cl::init(true),
152 cl::desc("Enable sinkinig and/cmp into branches."));
154 static cl::opt
<bool> DisableStoreExtract(
155 "disable-cgp-store-extract", cl::Hidden
, cl::init(false),
156 cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
158 static cl::opt
<bool> StressStoreExtract(
159 "stress-cgp-store-extract", cl::Hidden
, cl::init(false),
160 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
162 static cl::opt
<bool> DisableExtLdPromotion(
163 "disable-cgp-ext-ld-promotion", cl::Hidden
, cl::init(false),
164 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
167 static cl::opt
<bool> StressExtLdPromotion(
168 "stress-cgp-ext-ld-promotion", cl::Hidden
, cl::init(false),
169 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
170 "optimization in CodeGenPrepare"));
172 static cl::opt
<bool> DisablePreheaderProtect(
173 "disable-preheader-prot", cl::Hidden
, cl::init(false),
174 cl::desc("Disable protection against removing loop preheaders"));
176 static cl::opt
<bool> ProfileGuidedSectionPrefix(
177 "profile-guided-section-prefix", cl::Hidden
, cl::init(true), cl::ZeroOrMore
,
178 cl::desc("Use profile info to add section prefix for hot/cold functions"));
180 static cl::opt
<bool> ProfileUnknownInSpecialSection(
181 "profile-unknown-in-special-section", cl::Hidden
, cl::init(false),
183 cl::desc("In profiling mode like sampleFDO, if a function doesn't have "
184 "profile, we cannot tell the function is cold for sure because "
185 "it may be a function newly added without ever being sampled. "
186 "With the flag enabled, compiler can put such profile unknown "
187 "functions into a special section, so runtime system can choose "
188 "to handle it in a different way than .text section, to save "
189 "RAM for example. "));
191 static cl::opt
<unsigned> FreqRatioToSkipMerge(
192 "cgp-freq-ratio-to-skip-merge", cl::Hidden
, cl::init(2),
193 cl::desc("Skip merging empty blocks if (frequency of empty block) / "
194 "(frequency of destination block) is greater than this ratio"));
196 static cl::opt
<bool> ForceSplitStore(
197 "force-split-store", cl::Hidden
, cl::init(false),
198 cl::desc("Force store splitting no matter what the target query says."));
201 EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden
,
202 cl::desc("Enable merging of redundant sexts when one is dominating"
203 " the other."), cl::init(true));
205 static cl::opt
<bool> DisableComplexAddrModes(
206 "disable-complex-addr-modes", cl::Hidden
, cl::init(false),
207 cl::desc("Disables combining addressing modes with different parts "
208 "in optimizeMemoryInst."));
211 AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden
, cl::init(false),
212 cl::desc("Allow creation of Phis in Address sinking."));
215 AddrSinkNewSelects("addr-sink-new-select", cl::Hidden
, cl::init(true),
216 cl::desc("Allow creation of selects in Address sinking."));
218 static cl::opt
<bool> AddrSinkCombineBaseReg(
219 "addr-sink-combine-base-reg", cl::Hidden
, cl::init(true),
220 cl::desc("Allow combining of BaseReg field in Address sinking."));
222 static cl::opt
<bool> AddrSinkCombineBaseGV(
223 "addr-sink-combine-base-gv", cl::Hidden
, cl::init(true),
224 cl::desc("Allow combining of BaseGV field in Address sinking."));
226 static cl::opt
<bool> AddrSinkCombineBaseOffs(
227 "addr-sink-combine-base-offs", cl::Hidden
, cl::init(true),
228 cl::desc("Allow combining of BaseOffs field in Address sinking."));
230 static cl::opt
<bool> AddrSinkCombineScaledReg(
231 "addr-sink-combine-scaled-reg", cl::Hidden
, cl::init(true),
232 cl::desc("Allow combining of ScaledReg field in Address sinking."));
235 EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden
,
237 cl::desc("Enable splitting large offset of GEP."));
239 static cl::opt
<bool> EnableICMP_EQToICMP_ST(
240 "cgp-icmp-eq2icmp-st", cl::Hidden
, cl::init(false),
241 cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion."));
244 VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden
, cl::init(false),
245 cl::desc("Enable BFI update verification for "
248 static cl::opt
<bool> OptimizePhiTypes(
249 "cgp-optimize-phi-types", cl::Hidden
, cl::init(false),
250 cl::desc("Enable converting phi types in CodeGenPrepare"));
255 ZeroExtension
, // Zero extension has been seen.
256 SignExtension
, // Sign extension has been seen.
257 BothExtension
// This extension type is used if we saw sext after
258 // ZeroExtension had been set, or if we saw zext after
259 // SignExtension had been set. It makes the type
260 // information of a promoted instruction invalid.
263 using SetOfInstrs
= SmallPtrSet
<Instruction
*, 16>;
264 using TypeIsSExt
= PointerIntPair
<Type
*, 2, ExtType
>;
265 using InstrToOrigTy
= DenseMap
<Instruction
*, TypeIsSExt
>;
266 using SExts
= SmallVector
<Instruction
*, 16>;
267 using ValueToSExts
= DenseMap
<Value
*, SExts
>;
269 class TypePromotionTransaction
;
271 class CodeGenPrepare
: public FunctionPass
{
272 const TargetMachine
*TM
= nullptr;
273 const TargetSubtargetInfo
*SubtargetInfo
;
274 const TargetLowering
*TLI
= nullptr;
275 const TargetRegisterInfo
*TRI
;
276 const TargetTransformInfo
*TTI
= nullptr;
277 const TargetLibraryInfo
*TLInfo
;
279 std::unique_ptr
<BlockFrequencyInfo
> BFI
;
280 std::unique_ptr
<BranchProbabilityInfo
> BPI
;
281 ProfileSummaryInfo
*PSI
;
283 /// As we scan instructions optimizing them, this is the next instruction
284 /// to optimize. Transforms that can invalidate this should update it.
285 BasicBlock::iterator CurInstIterator
;
287 /// Keeps track of non-local addresses that have been sunk into a block.
288 /// This allows us to avoid inserting duplicate code for blocks with
289 /// multiple load/stores of the same address. The usage of WeakTrackingVH
290 /// enables SunkAddrs to be treated as a cache whose entries can be
291 /// invalidated if a sunken address computation has been erased.
292 ValueMap
<Value
*, WeakTrackingVH
> SunkAddrs
;
294 /// Keeps track of all instructions inserted for the current function.
295 SetOfInstrs InsertedInsts
;
297 /// Keeps track of the type of the related instruction before their
298 /// promotion for the current function.
299 InstrToOrigTy PromotedInsts
;
301 /// Keep track of instructions removed during promotion.
302 SetOfInstrs RemovedInsts
;
304 /// Keep track of sext chains based on their initial value.
305 DenseMap
<Value
*, Instruction
*> SeenChainsForSExt
;
307 /// Keep track of GEPs accessing the same data structures such as structs or
308 /// arrays that are candidates to be split later because of their large
312 SmallVector
<std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t>, 32>>
315 /// Keep track of new GEP base after splitting the GEPs having large offset.
316 SmallSet
<AssertingVH
<Value
>, 2> NewGEPBases
;
318 /// Map serial numbers to Large offset GEPs.
319 DenseMap
<AssertingVH
<GetElementPtrInst
>, int> LargeOffsetGEPID
;
321 /// Keep track of SExt promoted.
322 ValueToSExts ValToSExtendedUses
;
324 /// True if the function has the OptSize attribute.
327 /// DataLayout for the Function being processed.
328 const DataLayout
*DL
= nullptr;
330 /// Building the dominator tree can be expensive, so we only build it
331 /// lazily and update it when required.
332 std::unique_ptr
<DominatorTree
> DT
;
335 static char ID
; // Pass identification, replacement for typeid
337 CodeGenPrepare() : FunctionPass(ID
) {
338 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
341 bool runOnFunction(Function
&F
) override
;
343 StringRef
getPassName() const override
{ return "CodeGen Prepare"; }
345 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
346 // FIXME: When we can selectively preserve passes, preserve the domtree.
347 AU
.addRequired
<ProfileSummaryInfoWrapperPass
>();
348 AU
.addRequired
<TargetLibraryInfoWrapperPass
>();
349 AU
.addRequired
<TargetPassConfig
>();
350 AU
.addRequired
<TargetTransformInfoWrapperPass
>();
351 AU
.addRequired
<LoopInfoWrapperPass
>();
355 template <typename F
>
356 void resetIteratorIfInvalidatedWhileCalling(BasicBlock
*BB
, F f
) {
357 // Substituting can cause recursive simplifications, which can invalidate
358 // our iterator. Use a WeakTrackingVH to hold onto it in case this
360 Value
*CurValue
= &*CurInstIterator
;
361 WeakTrackingVH
IterHandle(CurValue
);
365 // If the iterator instruction was recursively deleted, start over at the
366 // start of the block.
367 if (IterHandle
!= CurValue
) {
368 CurInstIterator
= BB
->begin();
373 // Get the DominatorTree, building if necessary.
374 DominatorTree
&getDT(Function
&F
) {
376 DT
= std::make_unique
<DominatorTree
>(F
);
380 void removeAllAssertingVHReferences(Value
*V
);
381 bool eliminateAssumptions(Function
&F
);
382 bool eliminateFallThrough(Function
&F
);
383 bool eliminateMostlyEmptyBlocks(Function
&F
);
384 BasicBlock
*findDestBlockOfMergeableEmptyBlock(BasicBlock
*BB
);
385 bool canMergeBlocks(const BasicBlock
*BB
, const BasicBlock
*DestBB
) const;
386 void eliminateMostlyEmptyBlock(BasicBlock
*BB
);
387 bool isMergingEmptyBlockProfitable(BasicBlock
*BB
, BasicBlock
*DestBB
,
389 bool makeBitReverse(Instruction
&I
);
390 bool optimizeBlock(BasicBlock
&BB
, bool &ModifiedDT
);
391 bool optimizeInst(Instruction
*I
, bool &ModifiedDT
);
392 bool optimizeMemoryInst(Instruction
*MemoryInst
, Value
*Addr
,
393 Type
*AccessTy
, unsigned AddrSpace
);
394 bool optimizeGatherScatterInst(Instruction
*MemoryInst
, Value
*Ptr
);
395 bool optimizeInlineAsmInst(CallInst
*CS
);
396 bool optimizeCallInst(CallInst
*CI
, bool &ModifiedDT
);
397 bool optimizeExt(Instruction
*&I
);
398 bool optimizeExtUses(Instruction
*I
);
399 bool optimizeLoadExt(LoadInst
*Load
);
400 bool optimizeShiftInst(BinaryOperator
*BO
);
401 bool optimizeFunnelShift(IntrinsicInst
*Fsh
);
402 bool optimizeSelectInst(SelectInst
*SI
);
403 bool optimizeShuffleVectorInst(ShuffleVectorInst
*SVI
);
404 bool optimizeSwitchInst(SwitchInst
*SI
);
405 bool optimizeExtractElementInst(Instruction
*Inst
);
406 bool dupRetToEnableTailCallOpts(BasicBlock
*BB
, bool &ModifiedDT
);
407 bool fixupDbgValue(Instruction
*I
);
408 bool placeDbgValues(Function
&F
);
409 bool placePseudoProbes(Function
&F
);
410 bool canFormExtLd(const SmallVectorImpl
<Instruction
*> &MovedExts
,
411 LoadInst
*&LI
, Instruction
*&Inst
, bool HasPromoted
);
412 bool tryToPromoteExts(TypePromotionTransaction
&TPT
,
413 const SmallVectorImpl
<Instruction
*> &Exts
,
414 SmallVectorImpl
<Instruction
*> &ProfitablyMovedExts
,
415 unsigned CreatedInstsCost
= 0);
416 bool mergeSExts(Function
&F
);
417 bool splitLargeGEPOffsets();
418 bool optimizePhiType(PHINode
*Inst
, SmallPtrSetImpl
<PHINode
*> &Visited
,
419 SmallPtrSetImpl
<Instruction
*> &DeletedInstrs
);
420 bool optimizePhiTypes(Function
&F
);
421 bool performAddressTypePromotion(
423 bool AllowPromotionWithoutCommonHeader
,
424 bool HasPromoted
, TypePromotionTransaction
&TPT
,
425 SmallVectorImpl
<Instruction
*> &SpeculativelyMovedExts
);
426 bool splitBranchCondition(Function
&F
, bool &ModifiedDT
);
427 bool simplifyOffsetableRelocate(GCStatepointInst
&I
);
429 bool tryToSinkFreeOperands(Instruction
*I
);
430 bool replaceMathCmpWithIntrinsic(BinaryOperator
*BO
, Value
*Arg0
,
431 Value
*Arg1
, CmpInst
*Cmp
,
433 bool optimizeCmp(CmpInst
*Cmp
, bool &ModifiedDT
);
434 bool combineToUSubWithOverflow(CmpInst
*Cmp
, bool &ModifiedDT
);
435 bool combineToUAddWithOverflow(CmpInst
*Cmp
, bool &ModifiedDT
);
436 void verifyBFIUpdates(Function
&F
);
439 } // end anonymous namespace
441 char CodeGenPrepare::ID
= 0;
443 INITIALIZE_PASS_BEGIN(CodeGenPrepare
, DEBUG_TYPE
,
444 "Optimize for code generation", false, false)
445 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass
)
446 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass
)
447 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
448 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig
)
449 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass
)
450 INITIALIZE_PASS_END(CodeGenPrepare
, DEBUG_TYPE
,
451 "Optimize for code generation", false, false)
453 FunctionPass
*llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); }
455 bool CodeGenPrepare::runOnFunction(Function
&F
) {
459 DL
= &F
.getParent()->getDataLayout();
461 bool EverMadeChange
= false;
462 // Clear per function information.
463 InsertedInsts
.clear();
464 PromotedInsts
.clear();
466 TM
= &getAnalysis
<TargetPassConfig
>().getTM
<TargetMachine
>();
467 SubtargetInfo
= TM
->getSubtargetImpl(F
);
468 TLI
= SubtargetInfo
->getTargetLowering();
469 TRI
= SubtargetInfo
->getRegisterInfo();
470 TLInfo
= &getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(F
);
471 TTI
= &getAnalysis
<TargetTransformInfoWrapperPass
>().getTTI(F
);
472 LI
= &getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo();
473 BPI
.reset(new BranchProbabilityInfo(F
, *LI
));
474 BFI
.reset(new BlockFrequencyInfo(F
, *BPI
, *LI
));
475 PSI
= &getAnalysis
<ProfileSummaryInfoWrapperPass
>().getPSI();
476 OptSize
= F
.hasOptSize();
477 if (ProfileGuidedSectionPrefix
) {
478 // The hot attribute overwrites profile count based hotness while profile
479 // counts based hotness overwrite the cold attribute.
480 // This is a conservative behabvior.
481 if (F
.hasFnAttribute(Attribute::Hot
) ||
482 PSI
->isFunctionHotInCallGraph(&F
, *BFI
))
483 F
.setSectionPrefix("hot");
484 // If PSI shows this function is not hot, we will placed the function
485 // into unlikely section if (1) PSI shows this is a cold function, or
486 // (2) the function has a attribute of cold.
487 else if (PSI
->isFunctionColdInCallGraph(&F
, *BFI
) ||
488 F
.hasFnAttribute(Attribute::Cold
))
489 F
.setSectionPrefix("unlikely");
490 else if (ProfileUnknownInSpecialSection
&& PSI
->hasPartialSampleProfile() &&
491 PSI
->isFunctionHotnessUnknown(F
))
492 F
.setSectionPrefix("unknown");
495 /// This optimization identifies DIV instructions that can be
496 /// profitably bypassed and carried out with a shorter, faster divide.
497 if (!OptSize
&& !PSI
->hasHugeWorkingSetSize() && TLI
->isSlowDivBypassed()) {
498 const DenseMap
<unsigned int, unsigned int> &BypassWidths
=
499 TLI
->getBypassSlowDivWidths();
500 BasicBlock
* BB
= &*F
.begin();
501 while (BB
!= nullptr) {
502 // bypassSlowDivision may create new BBs, but we don't want to reapply the
503 // optimization to those blocks.
504 BasicBlock
* Next
= BB
->getNextNode();
505 // F.hasOptSize is already checked in the outer if statement.
506 if (!llvm::shouldOptimizeForSize(BB
, PSI
, BFI
.get()))
507 EverMadeChange
|= bypassSlowDivision(BB
, BypassWidths
);
512 // Get rid of @llvm.assume builtins before attempting to eliminate empty
513 // blocks, since there might be blocks that only contain @llvm.assume calls
514 // (plus arguments that we can get rid of).
515 EverMadeChange
|= eliminateAssumptions(F
);
517 // Eliminate blocks that contain only PHI nodes and an
518 // unconditional branch.
519 EverMadeChange
|= eliminateMostlyEmptyBlocks(F
);
521 bool ModifiedDT
= false;
522 if (!DisableBranchOpts
)
523 EverMadeChange
|= splitBranchCondition(F
, ModifiedDT
);
525 // Split some critical edges where one of the sources is an indirect branch,
526 // to help generate sane code for PHIs involving such edges.
527 EverMadeChange
|= SplitIndirectBrCriticalEdges(F
);
529 bool MadeChange
= true;
533 for (Function::iterator I
= F
.begin(); I
!= F
.end(); ) {
534 BasicBlock
*BB
= &*I
++;
535 bool ModifiedDTOnIteration
= false;
536 MadeChange
|= optimizeBlock(*BB
, ModifiedDTOnIteration
);
538 // Restart BB iteration if the dominator tree of the Function was changed
539 if (ModifiedDTOnIteration
)
542 if (EnableTypePromotionMerge
&& !ValToSExtendedUses
.empty())
543 MadeChange
|= mergeSExts(F
);
544 if (!LargeOffsetGEPMap
.empty())
545 MadeChange
|= splitLargeGEPOffsets();
546 MadeChange
|= optimizePhiTypes(F
);
549 eliminateFallThrough(F
);
551 // Really free removed instructions during promotion.
552 for (Instruction
*I
: RemovedInsts
)
555 EverMadeChange
|= MadeChange
;
556 SeenChainsForSExt
.clear();
557 ValToSExtendedUses
.clear();
558 RemovedInsts
.clear();
559 LargeOffsetGEPMap
.clear();
560 LargeOffsetGEPID
.clear();
566 if (!DisableBranchOpts
) {
568 // Use a set vector to get deterministic iteration order. The order the
569 // blocks are removed may affect whether or not PHI nodes in successors
571 SmallSetVector
<BasicBlock
*, 8> WorkList
;
572 for (BasicBlock
&BB
: F
) {
573 SmallVector
<BasicBlock
*, 2> Successors(successors(&BB
));
574 MadeChange
|= ConstantFoldTerminator(&BB
, true);
575 if (!MadeChange
) continue;
577 for (BasicBlock
*Succ
: Successors
)
578 if (pred_empty(Succ
))
579 WorkList
.insert(Succ
);
582 // Delete the dead blocks and any of their dead successors.
583 MadeChange
|= !WorkList
.empty();
584 while (!WorkList
.empty()) {
585 BasicBlock
*BB
= WorkList
.pop_back_val();
586 SmallVector
<BasicBlock
*, 2> Successors(successors(BB
));
590 for (BasicBlock
*Succ
: Successors
)
591 if (pred_empty(Succ
))
592 WorkList
.insert(Succ
);
595 // Merge pairs of basic blocks with unconditional branches, connected by
597 if (EverMadeChange
|| MadeChange
)
598 MadeChange
|= eliminateFallThrough(F
);
600 EverMadeChange
|= MadeChange
;
603 if (!DisableGCOpts
) {
604 SmallVector
<GCStatepointInst
*, 2> Statepoints
;
605 for (BasicBlock
&BB
: F
)
606 for (Instruction
&I
: BB
)
607 if (auto *SP
= dyn_cast
<GCStatepointInst
>(&I
))
608 Statepoints
.push_back(SP
);
609 for (auto &I
: Statepoints
)
610 EverMadeChange
|= simplifyOffsetableRelocate(*I
);
613 // Do this last to clean up use-before-def scenarios introduced by other
614 // preparatory transforms.
615 EverMadeChange
|= placeDbgValues(F
);
616 EverMadeChange
|= placePseudoProbes(F
);
619 if (VerifyBFIUpdates
)
623 return EverMadeChange
;
626 bool CodeGenPrepare::eliminateAssumptions(Function
&F
) {
627 bool MadeChange
= false;
628 for (BasicBlock
&BB
: F
) {
629 CurInstIterator
= BB
.begin();
630 while (CurInstIterator
!= BB
.end()) {
631 Instruction
*I
= &*(CurInstIterator
++);
632 if (auto *Assume
= dyn_cast
<AssumeInst
>(I
)) {
634 Value
*Operand
= Assume
->getOperand(0);
635 Assume
->eraseFromParent();
637 resetIteratorIfInvalidatedWhileCalling(&BB
, [&]() {
638 RecursivelyDeleteTriviallyDeadInstructions(Operand
, TLInfo
, nullptr);
646 /// An instruction is about to be deleted, so remove all references to it in our
647 /// GEP-tracking data strcutures.
648 void CodeGenPrepare::removeAllAssertingVHReferences(Value
*V
) {
649 LargeOffsetGEPMap
.erase(V
);
650 NewGEPBases
.erase(V
);
652 auto GEP
= dyn_cast
<GetElementPtrInst
>(V
);
656 LargeOffsetGEPID
.erase(GEP
);
658 auto VecI
= LargeOffsetGEPMap
.find(GEP
->getPointerOperand());
659 if (VecI
== LargeOffsetGEPMap
.end())
662 auto &GEPVector
= VecI
->second
;
664 llvm::find_if(GEPVector
, [=](auto &Elt
) { return Elt
.first
== GEP
; });
665 if (I
== GEPVector
.end())
669 if (GEPVector
.empty())
670 LargeOffsetGEPMap
.erase(VecI
);
673 // Verify BFI has been updated correctly by recomputing BFI and comparing them.
674 void LLVM_ATTRIBUTE_UNUSED
CodeGenPrepare::verifyBFIUpdates(Function
&F
) {
675 DominatorTree
NewDT(F
);
676 LoopInfo
NewLI(NewDT
);
677 BranchProbabilityInfo
NewBPI(F
, NewLI
, TLInfo
);
678 BlockFrequencyInfo
NewBFI(F
, NewBPI
, NewLI
);
679 NewBFI
.verifyMatch(*BFI
);
682 /// Merge basic blocks which are connected by a single edge, where one of the
683 /// basic blocks has a single successor pointing to the other basic block,
684 /// which has a single predecessor.
685 bool CodeGenPrepare::eliminateFallThrough(Function
&F
) {
686 bool Changed
= false;
687 // Scan all of the blocks in the function, except for the entry block.
688 // Use a temporary array to avoid iterator being invalidated when
690 SmallVector
<WeakTrackingVH
, 16> Blocks
;
691 for (auto &Block
: llvm::drop_begin(F
))
692 Blocks
.push_back(&Block
);
694 SmallSet
<WeakTrackingVH
, 16> Preds
;
695 for (auto &Block
: Blocks
) {
696 auto *BB
= cast_or_null
<BasicBlock
>(Block
);
699 // If the destination block has a single pred, then this is a trivial
700 // edge, just collapse it.
701 BasicBlock
*SinglePred
= BB
->getSinglePredecessor();
703 // Don't merge if BB's address is taken.
704 if (!SinglePred
|| SinglePred
== BB
|| BB
->hasAddressTaken()) continue;
706 BranchInst
*Term
= dyn_cast
<BranchInst
>(SinglePred
->getTerminator());
707 if (Term
&& !Term
->isConditional()) {
709 LLVM_DEBUG(dbgs() << "To merge:\n" << *BB
<< "\n\n\n");
711 // Merge BB into SinglePred and delete it.
712 MergeBlockIntoPredecessor(BB
);
713 Preds
.insert(SinglePred
);
717 // (Repeatedly) merging blocks into their predecessors can create redundant
719 for (auto &Pred
: Preds
)
720 if (auto *BB
= cast_or_null
<BasicBlock
>(Pred
))
721 RemoveRedundantDbgInstrs(BB
);
726 /// Find a destination block from BB if BB is mergeable empty block.
727 BasicBlock
*CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock
*BB
) {
728 // If this block doesn't end with an uncond branch, ignore it.
729 BranchInst
*BI
= dyn_cast
<BranchInst
>(BB
->getTerminator());
730 if (!BI
|| !BI
->isUnconditional())
733 // If the instruction before the branch (skipping debug info) isn't a phi
734 // node, then other stuff is happening here.
735 BasicBlock::iterator BBI
= BI
->getIterator();
736 if (BBI
!= BB
->begin()) {
738 while (isa
<DbgInfoIntrinsic
>(BBI
)) {
739 if (BBI
== BB
->begin())
743 if (!isa
<DbgInfoIntrinsic
>(BBI
) && !isa
<PHINode
>(BBI
))
747 // Do not break infinite loops.
748 BasicBlock
*DestBB
= BI
->getSuccessor(0);
752 if (!canMergeBlocks(BB
, DestBB
))
758 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an
759 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split
760 /// edges in ways that are non-optimal for isel. Start by eliminating these
761 /// blocks so we can split them the way we want them.
762 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function
&F
) {
763 SmallPtrSet
<BasicBlock
*, 16> Preheaders
;
764 SmallVector
<Loop
*, 16> LoopList(LI
->begin(), LI
->end());
765 while (!LoopList
.empty()) {
766 Loop
*L
= LoopList
.pop_back_val();
767 llvm::append_range(LoopList
, *L
);
768 if (BasicBlock
*Preheader
= L
->getLoopPreheader())
769 Preheaders
.insert(Preheader
);
772 bool MadeChange
= false;
773 // Copy blocks into a temporary array to avoid iterator invalidation issues
774 // as we remove them.
775 // Note that this intentionally skips the entry block.
776 SmallVector
<WeakTrackingVH
, 16> Blocks
;
777 for (auto &Block
: llvm::drop_begin(F
))
778 Blocks
.push_back(&Block
);
780 for (auto &Block
: Blocks
) {
781 BasicBlock
*BB
= cast_or_null
<BasicBlock
>(Block
);
784 BasicBlock
*DestBB
= findDestBlockOfMergeableEmptyBlock(BB
);
786 !isMergingEmptyBlockProfitable(BB
, DestBB
, Preheaders
.count(BB
)))
789 eliminateMostlyEmptyBlock(BB
);
795 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock
*BB
,
798 // Do not delete loop preheaders if doing so would create a critical edge.
799 // Loop preheaders can be good locations to spill registers. If the
800 // preheader is deleted and we create a critical edge, registers may be
801 // spilled in the loop body instead.
802 if (!DisablePreheaderProtect
&& isPreheader
&&
803 !(BB
->getSinglePredecessor() &&
804 BB
->getSinglePredecessor()->getSingleSuccessor()))
807 // Skip merging if the block's successor is also a successor to any callbr
808 // that leads to this block.
809 // FIXME: Is this really needed? Is this a correctness issue?
810 for (BasicBlock
*Pred
: predecessors(BB
)) {
811 if (auto *CBI
= dyn_cast
<CallBrInst
>((Pred
)->getTerminator()))
812 for (unsigned i
= 0, e
= CBI
->getNumSuccessors(); i
!= e
; ++i
)
813 if (DestBB
== CBI
->getSuccessor(i
))
817 // Try to skip merging if the unique predecessor of BB is terminated by a
818 // switch or indirect branch instruction, and BB is used as an incoming block
819 // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to
820 // add COPY instructions in the predecessor of BB instead of BB (if it is not
821 // merged). Note that the critical edge created by merging such blocks wont be
822 // split in MachineSink because the jump table is not analyzable. By keeping
823 // such empty block (BB), ISel will place COPY instructions in BB, not in the
824 // predecessor of BB.
825 BasicBlock
*Pred
= BB
->getUniquePredecessor();
827 !(isa
<SwitchInst
>(Pred
->getTerminator()) ||
828 isa
<IndirectBrInst
>(Pred
->getTerminator())))
831 if (BB
->getTerminator() != BB
->getFirstNonPHIOrDbg())
834 // We use a simple cost heuristic which determine skipping merging is
835 // profitable if the cost of skipping merging is less than the cost of
836 // merging : Cost(skipping merging) < Cost(merging BB), where the
837 // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and
838 // the Cost(merging BB) is Freq(Pred) * Cost(Copy).
839 // Assuming Cost(Copy) == Cost(Branch), we could simplify it to :
840 // Freq(Pred) / Freq(BB) > 2.
841 // Note that if there are multiple empty blocks sharing the same incoming
842 // value for the PHIs in the DestBB, we consider them together. In such
843 // case, Cost(merging BB) will be the sum of their frequencies.
845 if (!isa
<PHINode
>(DestBB
->begin()))
848 SmallPtrSet
<BasicBlock
*, 16> SameIncomingValueBBs
;
850 // Find all other incoming blocks from which incoming values of all PHIs in
851 // DestBB are the same as the ones from BB.
852 for (BasicBlock
*DestBBPred
: predecessors(DestBB
)) {
853 if (DestBBPred
== BB
)
856 if (llvm::all_of(DestBB
->phis(), [&](const PHINode
&DestPN
) {
857 return DestPN
.getIncomingValueForBlock(BB
) ==
858 DestPN
.getIncomingValueForBlock(DestBBPred
);
860 SameIncomingValueBBs
.insert(DestBBPred
);
863 // See if all BB's incoming values are same as the value from Pred. In this
864 // case, no reason to skip merging because COPYs are expected to be place in
866 if (SameIncomingValueBBs
.count(Pred
))
869 BlockFrequency PredFreq
= BFI
->getBlockFreq(Pred
);
870 BlockFrequency BBFreq
= BFI
->getBlockFreq(BB
);
872 for (auto *SameValueBB
: SameIncomingValueBBs
)
873 if (SameValueBB
->getUniquePredecessor() == Pred
&&
874 DestBB
== findDestBlockOfMergeableEmptyBlock(SameValueBB
))
875 BBFreq
+= BFI
->getBlockFreq(SameValueBB
);
877 return PredFreq
.getFrequency() <=
878 BBFreq
.getFrequency() * FreqRatioToSkipMerge
;
881 /// Return true if we can merge BB into DestBB if there is a single
882 /// unconditional branch between them, and BB contains no other non-phi
884 bool CodeGenPrepare::canMergeBlocks(const BasicBlock
*BB
,
885 const BasicBlock
*DestBB
) const {
886 // We only want to eliminate blocks whose phi nodes are used by phi nodes in
887 // the successor. If there are more complex condition (e.g. preheaders),
888 // don't mess around with them.
889 for (const PHINode
&PN
: BB
->phis()) {
890 for (const User
*U
: PN
.users()) {
891 const Instruction
*UI
= cast
<Instruction
>(U
);
892 if (UI
->getParent() != DestBB
|| !isa
<PHINode
>(UI
))
894 // If User is inside DestBB block and it is a PHINode then check
895 // incoming value. If incoming value is not from BB then this is
896 // a complex condition (e.g. preheaders) we want to avoid here.
897 if (UI
->getParent() == DestBB
) {
898 if (const PHINode
*UPN
= dyn_cast
<PHINode
>(UI
))
899 for (unsigned I
= 0, E
= UPN
->getNumIncomingValues(); I
!= E
; ++I
) {
900 Instruction
*Insn
= dyn_cast
<Instruction
>(UPN
->getIncomingValue(I
));
901 if (Insn
&& Insn
->getParent() == BB
&&
902 Insn
->getParent() != UPN
->getIncomingBlock(I
))
909 // If BB and DestBB contain any common predecessors, then the phi nodes in BB
910 // and DestBB may have conflicting incoming values for the block. If so, we
911 // can't merge the block.
912 const PHINode
*DestBBPN
= dyn_cast
<PHINode
>(DestBB
->begin());
913 if (!DestBBPN
) return true; // no conflict.
915 // Collect the preds of BB.
916 SmallPtrSet
<const BasicBlock
*, 16> BBPreds
;
917 if (const PHINode
*BBPN
= dyn_cast
<PHINode
>(BB
->begin())) {
918 // It is faster to get preds from a PHI than with pred_iterator.
919 for (unsigned i
= 0, e
= BBPN
->getNumIncomingValues(); i
!= e
; ++i
)
920 BBPreds
.insert(BBPN
->getIncomingBlock(i
));
922 BBPreds
.insert(pred_begin(BB
), pred_end(BB
));
925 // Walk the preds of DestBB.
926 for (unsigned i
= 0, e
= DestBBPN
->getNumIncomingValues(); i
!= e
; ++i
) {
927 BasicBlock
*Pred
= DestBBPN
->getIncomingBlock(i
);
928 if (BBPreds
.count(Pred
)) { // Common predecessor?
929 for (const PHINode
&PN
: DestBB
->phis()) {
930 const Value
*V1
= PN
.getIncomingValueForBlock(Pred
);
931 const Value
*V2
= PN
.getIncomingValueForBlock(BB
);
933 // If V2 is a phi node in BB, look up what the mapped value will be.
934 if (const PHINode
*V2PN
= dyn_cast
<PHINode
>(V2
))
935 if (V2PN
->getParent() == BB
)
936 V2
= V2PN
->getIncomingValueForBlock(Pred
);
938 // If there is a conflict, bail out.
939 if (V1
!= V2
) return false;
947 /// Eliminate a basic block that has only phi's and an unconditional branch in
949 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock
*BB
) {
950 BranchInst
*BI
= cast
<BranchInst
>(BB
->getTerminator());
951 BasicBlock
*DestBB
= BI
->getSuccessor(0);
953 LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"
956 // If the destination block has a single pred, then this is a trivial edge,
958 if (BasicBlock
*SinglePred
= DestBB
->getSinglePredecessor()) {
959 if (SinglePred
!= DestBB
) {
960 assert(SinglePred
== BB
&&
961 "Single predecessor not the same as predecessor");
962 // Merge DestBB into SinglePred/BB and delete it.
963 MergeBlockIntoPredecessor(DestBB
);
964 // Note: BB(=SinglePred) will not be deleted on this path.
965 // DestBB(=its single successor) is the one that was deleted.
966 LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred
<< "\n\n\n");
971 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB
972 // to handle the new incoming edges it is about to have.
973 for (PHINode
&PN
: DestBB
->phis()) {
974 // Remove the incoming value for BB, and remember it.
975 Value
*InVal
= PN
.removeIncomingValue(BB
, false);
977 // Two options: either the InVal is a phi node defined in BB or it is some
978 // value that dominates BB.
979 PHINode
*InValPhi
= dyn_cast
<PHINode
>(InVal
);
980 if (InValPhi
&& InValPhi
->getParent() == BB
) {
981 // Add all of the input values of the input PHI as inputs of this phi.
982 for (unsigned i
= 0, e
= InValPhi
->getNumIncomingValues(); i
!= e
; ++i
)
983 PN
.addIncoming(InValPhi
->getIncomingValue(i
),
984 InValPhi
->getIncomingBlock(i
));
986 // Otherwise, add one instance of the dominating value for each edge that
987 // we will be adding.
988 if (PHINode
*BBPN
= dyn_cast
<PHINode
>(BB
->begin())) {
989 for (unsigned i
= 0, e
= BBPN
->getNumIncomingValues(); i
!= e
; ++i
)
990 PN
.addIncoming(InVal
, BBPN
->getIncomingBlock(i
));
992 for (BasicBlock
*Pred
: predecessors(BB
))
993 PN
.addIncoming(InVal
, Pred
);
998 // The PHIs are now updated, change everything that refers to BB to use
999 // DestBB and remove BB.
1000 BB
->replaceAllUsesWith(DestBB
);
1001 BB
->eraseFromParent();
1004 LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB
<< "\n\n\n");
1007 // Computes a map of base pointer relocation instructions to corresponding
1008 // derived pointer relocation instructions given a vector of all relocate calls
1009 static void computeBaseDerivedRelocateMap(
1010 const SmallVectorImpl
<GCRelocateInst
*> &AllRelocateCalls
,
1011 DenseMap
<GCRelocateInst
*, SmallVector
<GCRelocateInst
*, 2>>
1013 // Collect information in two maps: one primarily for locating the base object
1014 // while filling the second map; the second map is the final structure holding
1015 // a mapping between Base and corresponding Derived relocate calls
1016 DenseMap
<std::pair
<unsigned, unsigned>, GCRelocateInst
*> RelocateIdxMap
;
1017 for (auto *ThisRelocate
: AllRelocateCalls
) {
1018 auto K
= std::make_pair(ThisRelocate
->getBasePtrIndex(),
1019 ThisRelocate
->getDerivedPtrIndex());
1020 RelocateIdxMap
.insert(std::make_pair(K
, ThisRelocate
));
1022 for (auto &Item
: RelocateIdxMap
) {
1023 std::pair
<unsigned, unsigned> Key
= Item
.first
;
1024 if (Key
.first
== Key
.second
)
1025 // Base relocation: nothing to insert
1028 GCRelocateInst
*I
= Item
.second
;
1029 auto BaseKey
= std::make_pair(Key
.first
, Key
.first
);
1031 // We're iterating over RelocateIdxMap so we cannot modify it.
1032 auto MaybeBase
= RelocateIdxMap
.find(BaseKey
);
1033 if (MaybeBase
== RelocateIdxMap
.end())
1034 // TODO: We might want to insert a new base object relocate and gep off
1035 // that, if there are enough derived object relocates.
1038 RelocateInstMap
[MaybeBase
->second
].push_back(I
);
1042 // Accepts a GEP and extracts the operands into a vector provided they're all
1043 // small integer constants
1044 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst
*GEP
,
1045 SmallVectorImpl
<Value
*> &OffsetV
) {
1046 for (unsigned i
= 1; i
< GEP
->getNumOperands(); i
++) {
1047 // Only accept small constant integer operands
1048 auto *Op
= dyn_cast
<ConstantInt
>(GEP
->getOperand(i
));
1049 if (!Op
|| Op
->getZExtValue() > 20)
1053 for (unsigned i
= 1; i
< GEP
->getNumOperands(); i
++)
1054 OffsetV
.push_back(GEP
->getOperand(i
));
1058 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to
1059 // replace, computes a replacement, and affects it.
1061 simplifyRelocatesOffABase(GCRelocateInst
*RelocatedBase
,
1062 const SmallVectorImpl
<GCRelocateInst
*> &Targets
) {
1063 bool MadeChange
= false;
1064 // We must ensure the relocation of derived pointer is defined after
1065 // relocation of base pointer. If we find a relocation corresponding to base
1066 // defined earlier than relocation of base then we move relocation of base
1067 // right before found relocation. We consider only relocation in the same
1068 // basic block as relocation of base. Relocations from other basic block will
1069 // be skipped by optimization and we do not care about them.
1070 for (auto R
= RelocatedBase
->getParent()->getFirstInsertionPt();
1071 &*R
!= RelocatedBase
; ++R
)
1072 if (auto *RI
= dyn_cast
<GCRelocateInst
>(R
))
1073 if (RI
->getStatepoint() == RelocatedBase
->getStatepoint())
1074 if (RI
->getBasePtrIndex() == RelocatedBase
->getBasePtrIndex()) {
1075 RelocatedBase
->moveBefore(RI
);
1079 for (GCRelocateInst
*ToReplace
: Targets
) {
1080 assert(ToReplace
->getBasePtrIndex() == RelocatedBase
->getBasePtrIndex() &&
1081 "Not relocating a derived object of the original base object");
1082 if (ToReplace
->getBasePtrIndex() == ToReplace
->getDerivedPtrIndex()) {
1083 // A duplicate relocate call. TODO: coalesce duplicates.
1087 if (RelocatedBase
->getParent() != ToReplace
->getParent()) {
1088 // Base and derived relocates are in different basic blocks.
1089 // In this case transform is only valid when base dominates derived
1090 // relocate. However it would be too expensive to check dominance
1091 // for each such relocate, so we skip the whole transformation.
1095 Value
*Base
= ToReplace
->getBasePtr();
1096 auto *Derived
= dyn_cast
<GetElementPtrInst
>(ToReplace
->getDerivedPtr());
1097 if (!Derived
|| Derived
->getPointerOperand() != Base
)
1100 SmallVector
<Value
*, 2> OffsetV
;
1101 if (!getGEPSmallConstantIntOffsetV(Derived
, OffsetV
))
1104 // Create a Builder and replace the target callsite with a gep
1105 assert(RelocatedBase
->getNextNode() &&
1106 "Should always have one since it's not a terminator");
1108 // Insert after RelocatedBase
1109 IRBuilder
<> Builder(RelocatedBase
->getNextNode());
1110 Builder
.SetCurrentDebugLocation(ToReplace
->getDebugLoc());
1112 // If gc_relocate does not match the actual type, cast it to the right type.
1113 // In theory, there must be a bitcast after gc_relocate if the type does not
1114 // match, and we should reuse it to get the derived pointer. But it could be
1118 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
1123 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
1127 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
1128 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
1130 // In this case, we can not find the bitcast any more. So we insert a new bitcast
1131 // no matter there is already one or not. In this way, we can handle all cases, and
1132 // the extra bitcast should be optimized away in later passes.
1133 Value
*ActualRelocatedBase
= RelocatedBase
;
1134 if (RelocatedBase
->getType() != Base
->getType()) {
1135 ActualRelocatedBase
=
1136 Builder
.CreateBitCast(RelocatedBase
, Base
->getType());
1138 Value
*Replacement
= Builder
.CreateGEP(
1139 Derived
->getSourceElementType(), ActualRelocatedBase
, makeArrayRef(OffsetV
));
1140 Replacement
->takeName(ToReplace
);
1141 // If the newly generated derived pointer's type does not match the original derived
1142 // pointer's type, cast the new derived pointer to match it. Same reasoning as above.
1143 Value
*ActualReplacement
= Replacement
;
1144 if (Replacement
->getType() != ToReplace
->getType()) {
1146 Builder
.CreateBitCast(Replacement
, ToReplace
->getType());
1148 ToReplace
->replaceAllUsesWith(ActualReplacement
);
1149 ToReplace
->eraseFromParent();
1159 // %ptr = gep %base + 15
1160 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1161 // %base' = relocate(%tok, i32 4, i32 4)
1162 // %ptr' = relocate(%tok, i32 4, i32 5)
1163 // %val = load %ptr'
1168 // %ptr = gep %base + 15
1169 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1170 // %base' = gc.relocate(%tok, i32 4, i32 4)
1171 // %ptr' = gep %base' + 15
1172 // %val = load %ptr'
1173 bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst
&I
) {
1174 bool MadeChange
= false;
1175 SmallVector
<GCRelocateInst
*, 2> AllRelocateCalls
;
1176 for (auto *U
: I
.users())
1177 if (GCRelocateInst
*Relocate
= dyn_cast
<GCRelocateInst
>(U
))
1178 // Collect all the relocate calls associated with a statepoint
1179 AllRelocateCalls
.push_back(Relocate
);
1181 // We need at least one base pointer relocation + one derived pointer
1182 // relocation to mangle
1183 if (AllRelocateCalls
.size() < 2)
1186 // RelocateInstMap is a mapping from the base relocate instruction to the
1187 // corresponding derived relocate instructions
1188 DenseMap
<GCRelocateInst
*, SmallVector
<GCRelocateInst
*, 2>> RelocateInstMap
;
1189 computeBaseDerivedRelocateMap(AllRelocateCalls
, RelocateInstMap
);
1190 if (RelocateInstMap
.empty())
1193 for (auto &Item
: RelocateInstMap
)
1194 // Item.first is the RelocatedBase to offset against
1195 // Item.second is the vector of Targets to replace
1196 MadeChange
= simplifyRelocatesOffABase(Item
.first
, Item
.second
);
1200 /// Sink the specified cast instruction into its user blocks.
1201 static bool SinkCast(CastInst
*CI
) {
1202 BasicBlock
*DefBB
= CI
->getParent();
1204 /// InsertedCasts - Only insert a cast in each block once.
1205 DenseMap
<BasicBlock
*, CastInst
*> InsertedCasts
;
1207 bool MadeChange
= false;
1208 for (Value::user_iterator UI
= CI
->user_begin(), E
= CI
->user_end();
1210 Use
&TheUse
= UI
.getUse();
1211 Instruction
*User
= cast
<Instruction
>(*UI
);
1213 // Figure out which BB this cast is used in. For PHI's this is the
1214 // appropriate predecessor block.
1215 BasicBlock
*UserBB
= User
->getParent();
1216 if (PHINode
*PN
= dyn_cast
<PHINode
>(User
)) {
1217 UserBB
= PN
->getIncomingBlock(TheUse
);
1220 // Preincrement use iterator so we don't invalidate it.
1223 // The first insertion point of a block containing an EH pad is after the
1224 // pad. If the pad is the user, we cannot sink the cast past the pad.
1225 if (User
->isEHPad())
1228 // If the block selected to receive the cast is an EH pad that does not
1229 // allow non-PHI instructions before the terminator, we can't sink the
1231 if (UserBB
->getTerminator()->isEHPad())
1234 // If this user is in the same block as the cast, don't change the cast.
1235 if (UserBB
== DefBB
) continue;
1237 // If we have already inserted a cast into this block, use it.
1238 CastInst
*&InsertedCast
= InsertedCasts
[UserBB
];
1240 if (!InsertedCast
) {
1241 BasicBlock::iterator InsertPt
= UserBB
->getFirstInsertionPt();
1242 assert(InsertPt
!= UserBB
->end());
1243 InsertedCast
= CastInst::Create(CI
->getOpcode(), CI
->getOperand(0),
1244 CI
->getType(), "", &*InsertPt
);
1245 InsertedCast
->setDebugLoc(CI
->getDebugLoc());
1248 // Replace a use of the cast with a use of the new cast.
1249 TheUse
= InsertedCast
;
1254 // If we removed all uses, nuke the cast.
1255 if (CI
->use_empty()) {
1256 salvageDebugInfo(*CI
);
1257 CI
->eraseFromParent();
1264 /// If the specified cast instruction is a noop copy (e.g. it's casting from
1265 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to
1266 /// reduce the number of virtual registers that must be created and coalesced.
1268 /// Return true if any changes are made.
1269 static bool OptimizeNoopCopyExpression(CastInst
*CI
, const TargetLowering
&TLI
,
1270 const DataLayout
&DL
) {
1271 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition
1272 // than sinking only nop casts, but is helpful on some platforms.
1273 if (auto *ASC
= dyn_cast
<AddrSpaceCastInst
>(CI
)) {
1274 if (!TLI
.isFreeAddrSpaceCast(ASC
->getSrcAddressSpace(),
1275 ASC
->getDestAddressSpace()))
1279 // If this is a noop copy,
1280 EVT SrcVT
= TLI
.getValueType(DL
, CI
->getOperand(0)->getType());
1281 EVT DstVT
= TLI
.getValueType(DL
, CI
->getType());
1283 // This is an fp<->int conversion?
1284 if (SrcVT
.isInteger() != DstVT
.isInteger())
1287 // If this is an extension, it will be a zero or sign extension, which
1289 if (SrcVT
.bitsLT(DstVT
)) return false;
1291 // If these values will be promoted, find out what they will be promoted
1292 // to. This helps us consider truncates on PPC as noop copies when they
1294 if (TLI
.getTypeAction(CI
->getContext(), SrcVT
) ==
1295 TargetLowering::TypePromoteInteger
)
1296 SrcVT
= TLI
.getTypeToTransformTo(CI
->getContext(), SrcVT
);
1297 if (TLI
.getTypeAction(CI
->getContext(), DstVT
) ==
1298 TargetLowering::TypePromoteInteger
)
1299 DstVT
= TLI
.getTypeToTransformTo(CI
->getContext(), DstVT
);
1301 // If, after promotion, these are the same types, this is a noop copy.
1305 return SinkCast(CI
);
1308 // Match a simple increment by constant operation. Note that if a sub is
1309 // matched, the step is negated (as if the step had been canonicalized to
1310 // an add, even though we leave the instruction alone.)
1311 bool matchIncrement(const Instruction
* IVInc
, Instruction
*&LHS
,
1313 if (match(IVInc
, m_Add(m_Instruction(LHS
), m_Constant(Step
))) ||
1314 match(IVInc
, m_ExtractValue
<0>(m_Intrinsic
<Intrinsic::uadd_with_overflow
>(
1315 m_Instruction(LHS
), m_Constant(Step
)))))
1317 if (match(IVInc
, m_Sub(m_Instruction(LHS
), m_Constant(Step
))) ||
1318 match(IVInc
, m_ExtractValue
<0>(m_Intrinsic
<Intrinsic::usub_with_overflow
>(
1319 m_Instruction(LHS
), m_Constant(Step
))))) {
1320 Step
= ConstantExpr::getNeg(Step
);
1326 /// If given \p PN is an inductive variable with value IVInc coming from the
1327 /// backedge, and on each iteration it gets increased by Step, return pair
1328 /// <IVInc, Step>. Otherwise, return None.
1329 static Optional
<std::pair
<Instruction
*, Constant
*> >
1330 getIVIncrement(const PHINode
*PN
, const LoopInfo
*LI
) {
1331 const Loop
*L
= LI
->getLoopFor(PN
->getParent());
1332 if (!L
|| L
->getHeader() != PN
->getParent() || !L
->getLoopLatch())
1335 dyn_cast
<Instruction
>(PN
->getIncomingValueForBlock(L
->getLoopLatch()));
1336 if (!IVInc
|| LI
->getLoopFor(IVInc
->getParent()) != L
)
1338 Instruction
*LHS
= nullptr;
1339 Constant
*Step
= nullptr;
1340 if (matchIncrement(IVInc
, LHS
, Step
) && LHS
== PN
)
1341 return std::make_pair(IVInc
, Step
);
1345 static bool isIVIncrement(const Value
*V
, const LoopInfo
*LI
) {
1346 auto *I
= dyn_cast
<Instruction
>(V
);
1349 Instruction
*LHS
= nullptr;
1350 Constant
*Step
= nullptr;
1351 if (!matchIncrement(I
, LHS
, Step
))
1353 if (auto *PN
= dyn_cast
<PHINode
>(LHS
))
1354 if (auto IVInc
= getIVIncrement(PN
, LI
))
1355 return IVInc
->first
== I
;
1359 bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator
*BO
,
1360 Value
*Arg0
, Value
*Arg1
,
1362 Intrinsic::ID IID
) {
1363 auto IsReplacableIVIncrement
= [this, &Cmp
](BinaryOperator
*BO
) {
1364 if (!isIVIncrement(BO
, LI
))
1366 const Loop
*L
= LI
->getLoopFor(BO
->getParent());
1367 assert(L
&& "L should not be null after isIVIncrement()");
1368 // Do not risk on moving increment into a child loop.
1369 if (LI
->getLoopFor(Cmp
->getParent()) != L
)
1372 // Finally, we need to ensure that the insert point will dominate all
1373 // existing uses of the increment.
1375 auto &DT
= getDT(*BO
->getParent()->getParent());
1376 if (DT
.dominates(Cmp
->getParent(), BO
->getParent()))
1377 // If we're moving up the dom tree, all uses are trivially dominated.
1378 // (This is the common case for code produced by LSR.)
1381 // Otherwise, special case the single use in the phi recurrence.
1382 return BO
->hasOneUse() && DT
.dominates(Cmp
->getParent(), L
->getLoopLatch());
1384 if (BO
->getParent() != Cmp
->getParent() && !IsReplacableIVIncrement(BO
)) {
1385 // We used to use a dominator tree here to allow multi-block optimization.
1386 // But that was problematic because:
1387 // 1. It could cause a perf regression by hoisting the math op into the
1389 // 2. It could cause a perf regression by creating a value that was live
1390 // across multiple blocks and increasing register pressure.
1391 // 3. Use of a dominator tree could cause large compile-time regression.
1392 // This is because we recompute the DT on every change in the main CGP
1393 // run-loop. The recomputing is probably unnecessary in many cases, so if
1394 // that was fixed, using a DT here would be ok.
1396 // There is one important particular case we still want to handle: if BO is
1397 // the IV increment. Important properties that make it profitable:
1398 // - We can speculate IV increment anywhere in the loop (as long as the
1399 // indvar Phi is its only user);
1400 // - Upon computing Cmp, we effectively compute something equivalent to the
1401 // IV increment (despite it loops differently in the IR). So moving it up
1402 // to the cmp point does not really increase register pressure.
1406 // We allow matching the canonical IR (add X, C) back to (usubo X, -C).
1407 if (BO
->getOpcode() == Instruction::Add
&&
1408 IID
== Intrinsic::usub_with_overflow
) {
1409 assert(isa
<Constant
>(Arg1
) && "Unexpected input for usubo");
1410 Arg1
= ConstantExpr::getNeg(cast
<Constant
>(Arg1
));
1413 // Insert at the first instruction of the pair.
1414 Instruction
*InsertPt
= nullptr;
1415 for (Instruction
&Iter
: *Cmp
->getParent()) {
1416 // If BO is an XOR, it is not guaranteed that it comes after both inputs to
1417 // the overflow intrinsic are defined.
1418 if ((BO
->getOpcode() != Instruction::Xor
&& &Iter
== BO
) || &Iter
== Cmp
) {
1423 assert(InsertPt
!= nullptr && "Parent block did not contain cmp or binop");
1425 IRBuilder
<> Builder(InsertPt
);
1426 Value
*MathOV
= Builder
.CreateBinaryIntrinsic(IID
, Arg0
, Arg1
);
1427 if (BO
->getOpcode() != Instruction::Xor
) {
1428 Value
*Math
= Builder
.CreateExtractValue(MathOV
, 0, "math");
1429 BO
->replaceAllUsesWith(Math
);
1431 assert(BO
->hasOneUse() &&
1432 "Patterns with XOr should use the BO only in the compare");
1433 Value
*OV
= Builder
.CreateExtractValue(MathOV
, 1, "ov");
1434 Cmp
->replaceAllUsesWith(OV
);
1435 Cmp
->eraseFromParent();
1436 BO
->eraseFromParent();
1440 /// Match special-case patterns that check for unsigned add overflow.
1441 static bool matchUAddWithOverflowConstantEdgeCases(CmpInst
*Cmp
,
1442 BinaryOperator
*&Add
) {
1443 // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val)
1444 // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero)
1445 Value
*A
= Cmp
->getOperand(0), *B
= Cmp
->getOperand(1);
1447 // We are not expecting non-canonical/degenerate code. Just bail out.
1448 if (isa
<Constant
>(A
))
1451 ICmpInst::Predicate Pred
= Cmp
->getPredicate();
1452 if (Pred
== ICmpInst::ICMP_EQ
&& match(B
, m_AllOnes()))
1453 B
= ConstantInt::get(B
->getType(), 1);
1454 else if (Pred
== ICmpInst::ICMP_NE
&& match(B
, m_ZeroInt()))
1455 B
= ConstantInt::get(B
->getType(), -1);
1459 // Check the users of the variable operand of the compare looking for an add
1460 // with the adjusted constant.
1461 for (User
*U
: A
->users()) {
1462 if (match(U
, m_Add(m_Specific(A
), m_Specific(B
)))) {
1463 Add
= cast
<BinaryOperator
>(U
);
1470 /// Try to combine the compare into a call to the llvm.uadd.with.overflow
1471 /// intrinsic. Return true if any changes were made.
1472 bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst
*Cmp
,
1475 BinaryOperator
*Add
;
1476 if (!match(Cmp
, m_UAddWithOverflow(m_Value(A
), m_Value(B
), m_BinOp(Add
)))) {
1477 if (!matchUAddWithOverflowConstantEdgeCases(Cmp
, Add
))
1479 // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases.
1480 A
= Add
->getOperand(0);
1481 B
= Add
->getOperand(1);
1484 if (!TLI
->shouldFormOverflowOp(ISD::UADDO
,
1485 TLI
->getValueType(*DL
, Add
->getType()),
1486 Add
->hasNUsesOrMore(2)))
1489 // We don't want to move around uses of condition values this late, so we
1490 // check if it is legal to create the call to the intrinsic in the basic
1491 // block containing the icmp.
1492 if (Add
->getParent() != Cmp
->getParent() && !Add
->hasOneUse())
1495 if (!replaceMathCmpWithIntrinsic(Add
, A
, B
, Cmp
,
1496 Intrinsic::uadd_with_overflow
))
1499 // Reset callers - do not crash by iterating over a dead instruction.
1504 bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst
*Cmp
,
1506 // We are not expecting non-canonical/degenerate code. Just bail out.
1507 Value
*A
= Cmp
->getOperand(0), *B
= Cmp
->getOperand(1);
1508 if (isa
<Constant
>(A
) && isa
<Constant
>(B
))
1511 // Convert (A u> B) to (A u< B) to simplify pattern matching.
1512 ICmpInst::Predicate Pred
= Cmp
->getPredicate();
1513 if (Pred
== ICmpInst::ICMP_UGT
) {
1515 Pred
= ICmpInst::ICMP_ULT
;
1517 // Convert special-case: (A == 0) is the same as (A u< 1).
1518 if (Pred
== ICmpInst::ICMP_EQ
&& match(B
, m_ZeroInt())) {
1519 B
= ConstantInt::get(B
->getType(), 1);
1520 Pred
= ICmpInst::ICMP_ULT
;
1522 // Convert special-case: (A != 0) is the same as (0 u< A).
1523 if (Pred
== ICmpInst::ICMP_NE
&& match(B
, m_ZeroInt())) {
1525 Pred
= ICmpInst::ICMP_ULT
;
1527 if (Pred
!= ICmpInst::ICMP_ULT
)
1530 // Walk the users of a variable operand of a compare looking for a subtract or
1531 // add with that same operand. Also match the 2nd operand of the compare to
1532 // the add/sub, but that may be a negated constant operand of an add.
1533 Value
*CmpVariableOperand
= isa
<Constant
>(A
) ? B
: A
;
1534 BinaryOperator
*Sub
= nullptr;
1535 for (User
*U
: CmpVariableOperand
->users()) {
1536 // A - B, A u< B --> usubo(A, B)
1537 if (match(U
, m_Sub(m_Specific(A
), m_Specific(B
)))) {
1538 Sub
= cast
<BinaryOperator
>(U
);
1542 // A + (-C), A u< C (canonicalized form of (sub A, C))
1543 const APInt
*CmpC
, *AddC
;
1544 if (match(U
, m_Add(m_Specific(A
), m_APInt(AddC
))) &&
1545 match(B
, m_APInt(CmpC
)) && *AddC
== -(*CmpC
)) {
1546 Sub
= cast
<BinaryOperator
>(U
);
1553 if (!TLI
->shouldFormOverflowOp(ISD::USUBO
,
1554 TLI
->getValueType(*DL
, Sub
->getType()),
1555 Sub
->hasNUsesOrMore(2)))
1558 if (!replaceMathCmpWithIntrinsic(Sub
, Sub
->getOperand(0), Sub
->getOperand(1),
1559 Cmp
, Intrinsic::usub_with_overflow
))
1562 // Reset callers - do not crash by iterating over a dead instruction.
1567 /// Sink the given CmpInst into user blocks to reduce the number of virtual
1568 /// registers that must be created and coalesced. This is a clear win except on
1569 /// targets with multiple condition code registers (PowerPC), where it might
1570 /// lose; some adjustment may be wanted there.
1572 /// Return true if any changes are made.
1573 static bool sinkCmpExpression(CmpInst
*Cmp
, const TargetLowering
&TLI
) {
1574 if (TLI
.hasMultipleConditionRegisters())
1577 // Avoid sinking soft-FP comparisons, since this can move them into a loop.
1578 if (TLI
.useSoftFloat() && isa
<FCmpInst
>(Cmp
))
1581 // Only insert a cmp in each block once.
1582 DenseMap
<BasicBlock
*, CmpInst
*> InsertedCmps
;
1584 bool MadeChange
= false;
1585 for (Value::user_iterator UI
= Cmp
->user_begin(), E
= Cmp
->user_end();
1587 Use
&TheUse
= UI
.getUse();
1588 Instruction
*User
= cast
<Instruction
>(*UI
);
1590 // Preincrement use iterator so we don't invalidate it.
1593 // Don't bother for PHI nodes.
1594 if (isa
<PHINode
>(User
))
1597 // Figure out which BB this cmp is used in.
1598 BasicBlock
*UserBB
= User
->getParent();
1599 BasicBlock
*DefBB
= Cmp
->getParent();
1601 // If this user is in the same block as the cmp, don't change the cmp.
1602 if (UserBB
== DefBB
) continue;
1604 // If we have already inserted a cmp into this block, use it.
1605 CmpInst
*&InsertedCmp
= InsertedCmps
[UserBB
];
1608 BasicBlock::iterator InsertPt
= UserBB
->getFirstInsertionPt();
1609 assert(InsertPt
!= UserBB
->end());
1611 CmpInst::Create(Cmp
->getOpcode(), Cmp
->getPredicate(),
1612 Cmp
->getOperand(0), Cmp
->getOperand(1), "",
1614 // Propagate the debug info.
1615 InsertedCmp
->setDebugLoc(Cmp
->getDebugLoc());
1618 // Replace a use of the cmp with a use of the new cmp.
1619 TheUse
= InsertedCmp
;
1624 // If we removed all uses, nuke the cmp.
1625 if (Cmp
->use_empty()) {
1626 Cmp
->eraseFromParent();
1633 /// For pattern like:
1635 /// DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB)
1639 /// br DomCond, TrueBB, CmpBB
1640 /// CmpBB: (with DomBB being the single predecessor)
1642 /// Cmp = icmp eq CmpOp0, CmpOp1
1645 /// It would use two comparison on targets that lowering of icmp sgt/slt is
1646 /// different from lowering of icmp eq (PowerPC). This function try to convert
1647 /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'.
1648 /// After that, DomCond and Cmp can use the same comparison so reduce one
1651 /// Return true if any changes are made.
1652 static bool foldICmpWithDominatingICmp(CmpInst
*Cmp
,
1653 const TargetLowering
&TLI
) {
1654 if (!EnableICMP_EQToICMP_ST
&& TLI
.isEqualityCmpFoldedWithSignedCmp())
1657 ICmpInst::Predicate Pred
= Cmp
->getPredicate();
1658 if (Pred
!= ICmpInst::ICMP_EQ
)
1661 // If icmp eq has users other than BranchInst and SelectInst, converting it to
1662 // icmp slt/sgt would introduce more redundant LLVM IR.
1663 for (User
*U
: Cmp
->users()) {
1664 if (isa
<BranchInst
>(U
))
1666 if (isa
<SelectInst
>(U
) && cast
<SelectInst
>(U
)->getCondition() == Cmp
)
1671 // This is a cheap/incomplete check for dominance - just match a single
1672 // predecessor with a conditional branch.
1673 BasicBlock
*CmpBB
= Cmp
->getParent();
1674 BasicBlock
*DomBB
= CmpBB
->getSinglePredecessor();
1678 // We want to ensure that the only way control gets to the comparison of
1679 // interest is that a less/greater than comparison on the same operands is
1682 BasicBlock
*TrueBB
, *FalseBB
;
1683 if (!match(DomBB
->getTerminator(), m_Br(m_Value(DomCond
), TrueBB
, FalseBB
)))
1685 if (CmpBB
!= FalseBB
)
1688 Value
*CmpOp0
= Cmp
->getOperand(0), *CmpOp1
= Cmp
->getOperand(1);
1689 ICmpInst::Predicate DomPred
;
1690 if (!match(DomCond
, m_ICmp(DomPred
, m_Specific(CmpOp0
), m_Specific(CmpOp1
))))
1692 if (DomPred
!= ICmpInst::ICMP_SGT
&& DomPred
!= ICmpInst::ICMP_SLT
)
1695 // Convert the equality comparison to the opposite of the dominating
1696 // comparison and swap the direction for all branch/select users.
1697 // We have conceptually converted:
1698 // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>;
1700 // Res = (a < b) ? <LT_RES> : (a > b) ? <GT_RES> : <EQ_RES>;
1701 // And similarly for branches.
1702 for (User
*U
: Cmp
->users()) {
1703 if (auto *BI
= dyn_cast
<BranchInst
>(U
)) {
1704 assert(BI
->isConditional() && "Must be conditional");
1705 BI
->swapSuccessors();
1708 if (auto *SI
= dyn_cast
<SelectInst
>(U
)) {
1711 SI
->swapProfMetadata();
1714 llvm_unreachable("Must be a branch or a select");
1716 Cmp
->setPredicate(CmpInst::getSwappedPredicate(DomPred
));
1720 bool CodeGenPrepare::optimizeCmp(CmpInst
*Cmp
, bool &ModifiedDT
) {
1721 if (sinkCmpExpression(Cmp
, *TLI
))
1724 if (combineToUAddWithOverflow(Cmp
, ModifiedDT
))
1727 if (combineToUSubWithOverflow(Cmp
, ModifiedDT
))
1730 if (foldICmpWithDominatingICmp(Cmp
, *TLI
))
1736 /// Duplicate and sink the given 'and' instruction into user blocks where it is
1737 /// used in a compare to allow isel to generate better code for targets where
1738 /// this operation can be combined.
1740 /// Return true if any changes are made.
1741 static bool sinkAndCmp0Expression(Instruction
*AndI
,
1742 const TargetLowering
&TLI
,
1743 SetOfInstrs
&InsertedInsts
) {
1744 // Double-check that we're not trying to optimize an instruction that was
1745 // already optimized by some other part of this pass.
1746 assert(!InsertedInsts
.count(AndI
) &&
1747 "Attempting to optimize already optimized and instruction");
1748 (void) InsertedInsts
;
1750 // Nothing to do for single use in same basic block.
1751 if (AndI
->hasOneUse() &&
1752 AndI
->getParent() == cast
<Instruction
>(*AndI
->user_begin())->getParent())
1755 // Try to avoid cases where sinking/duplicating is likely to increase register
1757 if (!isa
<ConstantInt
>(AndI
->getOperand(0)) &&
1758 !isa
<ConstantInt
>(AndI
->getOperand(1)) &&
1759 AndI
->getOperand(0)->hasOneUse() && AndI
->getOperand(1)->hasOneUse())
1762 for (auto *U
: AndI
->users()) {
1763 Instruction
*User
= cast
<Instruction
>(U
);
1765 // Only sink 'and' feeding icmp with 0.
1766 if (!isa
<ICmpInst
>(User
))
1769 auto *CmpC
= dyn_cast
<ConstantInt
>(User
->getOperand(1));
1770 if (!CmpC
|| !CmpC
->isZero())
1774 if (!TLI
.isMaskAndCmp0FoldingBeneficial(*AndI
))
1777 LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
1778 LLVM_DEBUG(AndI
->getParent()->dump());
1780 // Push the 'and' into the same block as the icmp 0. There should only be
1781 // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
1782 // others, so we don't need to keep track of which BBs we insert into.
1783 for (Value::user_iterator UI
= AndI
->user_begin(), E
= AndI
->user_end();
1785 Use
&TheUse
= UI
.getUse();
1786 Instruction
*User
= cast
<Instruction
>(*UI
);
1788 // Preincrement use iterator so we don't invalidate it.
1791 LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User
<< "\n");
1793 // Keep the 'and' in the same place if the use is already in the same block.
1794 Instruction
*InsertPt
=
1795 User
->getParent() == AndI
->getParent() ? AndI
: User
;
1796 Instruction
*InsertedAnd
=
1797 BinaryOperator::Create(Instruction::And
, AndI
->getOperand(0),
1798 AndI
->getOperand(1), "", InsertPt
);
1799 // Propagate the debug info.
1800 InsertedAnd
->setDebugLoc(AndI
->getDebugLoc());
1802 // Replace a use of the 'and' with a use of the new 'and'.
1803 TheUse
= InsertedAnd
;
1805 LLVM_DEBUG(User
->getParent()->dump());
1808 // We removed all uses, nuke the and.
1809 AndI
->eraseFromParent();
1813 /// Check if the candidates could be combined with a shift instruction, which
1815 /// 1. Truncate instruction
1816 /// 2. And instruction and the imm is a mask of the low bits:
1817 /// imm & (imm+1) == 0
1818 static bool isExtractBitsCandidateUse(Instruction
*User
) {
1819 if (!isa
<TruncInst
>(User
)) {
1820 if (User
->getOpcode() != Instruction::And
||
1821 !isa
<ConstantInt
>(User
->getOperand(1)))
1824 const APInt
&Cimm
= cast
<ConstantInt
>(User
->getOperand(1))->getValue();
1826 if ((Cimm
& (Cimm
+ 1)).getBoolValue())
1832 /// Sink both shift and truncate instruction to the use of truncate's BB.
1834 SinkShiftAndTruncate(BinaryOperator
*ShiftI
, Instruction
*User
, ConstantInt
*CI
,
1835 DenseMap
<BasicBlock
*, BinaryOperator
*> &InsertedShifts
,
1836 const TargetLowering
&TLI
, const DataLayout
&DL
) {
1837 BasicBlock
*UserBB
= User
->getParent();
1838 DenseMap
<BasicBlock
*, CastInst
*> InsertedTruncs
;
1839 auto *TruncI
= cast
<TruncInst
>(User
);
1840 bool MadeChange
= false;
1842 for (Value::user_iterator TruncUI
= TruncI
->user_begin(),
1843 TruncE
= TruncI
->user_end();
1844 TruncUI
!= TruncE
;) {
1846 Use
&TruncTheUse
= TruncUI
.getUse();
1847 Instruction
*TruncUser
= cast
<Instruction
>(*TruncUI
);
1848 // Preincrement use iterator so we don't invalidate it.
1852 int ISDOpcode
= TLI
.InstructionOpcodeToISD(TruncUser
->getOpcode());
1856 // If the use is actually a legal node, there will not be an
1857 // implicit truncate.
1858 // FIXME: always querying the result type is just an
1859 // approximation; some nodes' legality is determined by the
1860 // operand or other means. There's no good way to find out though.
1861 if (TLI
.isOperationLegalOrCustom(
1862 ISDOpcode
, TLI
.getValueType(DL
, TruncUser
->getType(), true)))
1865 // Don't bother for PHI nodes.
1866 if (isa
<PHINode
>(TruncUser
))
1869 BasicBlock
*TruncUserBB
= TruncUser
->getParent();
1871 if (UserBB
== TruncUserBB
)
1874 BinaryOperator
*&InsertedShift
= InsertedShifts
[TruncUserBB
];
1875 CastInst
*&InsertedTrunc
= InsertedTruncs
[TruncUserBB
];
1877 if (!InsertedShift
&& !InsertedTrunc
) {
1878 BasicBlock::iterator InsertPt
= TruncUserBB
->getFirstInsertionPt();
1879 assert(InsertPt
!= TruncUserBB
->end());
1881 if (ShiftI
->getOpcode() == Instruction::AShr
)
1882 InsertedShift
= BinaryOperator::CreateAShr(ShiftI
->getOperand(0), CI
,
1885 InsertedShift
= BinaryOperator::CreateLShr(ShiftI
->getOperand(0), CI
,
1887 InsertedShift
->setDebugLoc(ShiftI
->getDebugLoc());
1890 BasicBlock::iterator TruncInsertPt
= TruncUserBB
->getFirstInsertionPt();
1892 assert(TruncInsertPt
!= TruncUserBB
->end());
1894 InsertedTrunc
= CastInst::Create(TruncI
->getOpcode(), InsertedShift
,
1895 TruncI
->getType(), "", &*TruncInsertPt
);
1896 InsertedTrunc
->setDebugLoc(TruncI
->getDebugLoc());
1900 TruncTheUse
= InsertedTrunc
;
1906 /// Sink the shift *right* instruction into user blocks if the uses could
1907 /// potentially be combined with this shift instruction and generate BitExtract
1908 /// instruction. It will only be applied if the architecture supports BitExtract
1909 /// instruction. Here is an example:
1911 /// %x.extract.shift = lshr i64 %arg1, 32
1913 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16
1917 /// %x.extract.shift.1 = lshr i64 %arg1, 32
1918 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
1920 /// CodeGen will recognize the pattern in BB2 and generate BitExtract
1922 /// Return true if any changes are made.
1923 static bool OptimizeExtractBits(BinaryOperator
*ShiftI
, ConstantInt
*CI
,
1924 const TargetLowering
&TLI
,
1925 const DataLayout
&DL
) {
1926 BasicBlock
*DefBB
= ShiftI
->getParent();
1928 /// Only insert instructions in each block once.
1929 DenseMap
<BasicBlock
*, BinaryOperator
*> InsertedShifts
;
1931 bool shiftIsLegal
= TLI
.isTypeLegal(TLI
.getValueType(DL
, ShiftI
->getType()));
1933 bool MadeChange
= false;
1934 for (Value::user_iterator UI
= ShiftI
->user_begin(), E
= ShiftI
->user_end();
1936 Use
&TheUse
= UI
.getUse();
1937 Instruction
*User
= cast
<Instruction
>(*UI
);
1938 // Preincrement use iterator so we don't invalidate it.
1941 // Don't bother for PHI nodes.
1942 if (isa
<PHINode
>(User
))
1945 if (!isExtractBitsCandidateUse(User
))
1948 BasicBlock
*UserBB
= User
->getParent();
1950 if (UserBB
== DefBB
) {
1951 // If the shift and truncate instruction are in the same BB. The use of
1952 // the truncate(TruncUse) may still introduce another truncate if not
1953 // legal. In this case, we would like to sink both shift and truncate
1954 // instruction to the BB of TruncUse.
1957 // i64 shift.result = lshr i64 opnd, imm
1958 // trunc.result = trunc shift.result to i16
1961 // ----> We will have an implicit truncate here if the architecture does
1962 // not have i16 compare.
1963 // cmp i16 trunc.result, opnd2
1965 if (isa
<TruncInst
>(User
) && shiftIsLegal
1966 // If the type of the truncate is legal, no truncate will be
1967 // introduced in other basic blocks.
1969 (!TLI
.isTypeLegal(TLI
.getValueType(DL
, User
->getType()))))
1971 SinkShiftAndTruncate(ShiftI
, User
, CI
, InsertedShifts
, TLI
, DL
);
1975 // If we have already inserted a shift into this block, use it.
1976 BinaryOperator
*&InsertedShift
= InsertedShifts
[UserBB
];
1978 if (!InsertedShift
) {
1979 BasicBlock::iterator InsertPt
= UserBB
->getFirstInsertionPt();
1980 assert(InsertPt
!= UserBB
->end());
1982 if (ShiftI
->getOpcode() == Instruction::AShr
)
1983 InsertedShift
= BinaryOperator::CreateAShr(ShiftI
->getOperand(0), CI
,
1986 InsertedShift
= BinaryOperator::CreateLShr(ShiftI
->getOperand(0), CI
,
1988 InsertedShift
->setDebugLoc(ShiftI
->getDebugLoc());
1993 // Replace a use of the shift with a use of the new shift.
1994 TheUse
= InsertedShift
;
1997 // If we removed all uses, or there are none, nuke the shift.
1998 if (ShiftI
->use_empty()) {
1999 salvageDebugInfo(*ShiftI
);
2000 ShiftI
->eraseFromParent();
2007 /// If counting leading or trailing zeros is an expensive operation and a zero
2008 /// input is defined, add a check for zero to avoid calling the intrinsic.
2010 /// We want to transform:
2011 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
2015 /// %cmpz = icmp eq i64 %A, 0
2016 /// br i1 %cmpz, label %cond.end, label %cond.false
2018 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
2019 /// br label %cond.end
2021 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
2023 /// If the transform is performed, return true and set ModifiedDT to true.
2024 static bool despeculateCountZeros(IntrinsicInst
*CountZeros
,
2025 const TargetLowering
*TLI
,
2026 const DataLayout
*DL
,
2028 // If a zero input is undefined, it doesn't make sense to despeculate that.
2029 if (match(CountZeros
->getOperand(1), m_One()))
2032 // If it's cheap to speculate, there's nothing to do.
2033 auto IntrinsicID
= CountZeros
->getIntrinsicID();
2034 if ((IntrinsicID
== Intrinsic::cttz
&& TLI
->isCheapToSpeculateCttz()) ||
2035 (IntrinsicID
== Intrinsic::ctlz
&& TLI
->isCheapToSpeculateCtlz()))
2038 // Only handle legal scalar cases. Anything else requires too much work.
2039 Type
*Ty
= CountZeros
->getType();
2040 unsigned SizeInBits
= Ty
->getPrimitiveSizeInBits();
2041 if (Ty
->isVectorTy() || SizeInBits
> DL
->getLargestLegalIntTypeSizeInBits())
2044 // Bail if the value is never zero.
2045 if (llvm::isKnownNonZero(CountZeros
->getOperand(0), *DL
))
2048 // The intrinsic will be sunk behind a compare against zero and branch.
2049 BasicBlock
*StartBlock
= CountZeros
->getParent();
2050 BasicBlock
*CallBlock
= StartBlock
->splitBasicBlock(CountZeros
, "cond.false");
2052 // Create another block after the count zero intrinsic. A PHI will be added
2053 // in this block to select the result of the intrinsic or the bit-width
2054 // constant if the input to the intrinsic is zero.
2055 BasicBlock::iterator SplitPt
= ++(BasicBlock::iterator(CountZeros
));
2056 BasicBlock
*EndBlock
= CallBlock
->splitBasicBlock(SplitPt
, "cond.end");
2058 // Set up a builder to create a compare, conditional branch, and PHI.
2059 IRBuilder
<> Builder(CountZeros
->getContext());
2060 Builder
.SetInsertPoint(StartBlock
->getTerminator());
2061 Builder
.SetCurrentDebugLocation(CountZeros
->getDebugLoc());
2063 // Replace the unconditional branch that was created by the first split with
2064 // a compare against zero and a conditional branch.
2065 Value
*Zero
= Constant::getNullValue(Ty
);
2066 Value
*Cmp
= Builder
.CreateICmpEQ(CountZeros
->getOperand(0), Zero
, "cmpz");
2067 Builder
.CreateCondBr(Cmp
, EndBlock
, CallBlock
);
2068 StartBlock
->getTerminator()->eraseFromParent();
2070 // Create a PHI in the end block to select either the output of the intrinsic
2071 // or the bit width of the operand.
2072 Builder
.SetInsertPoint(&EndBlock
->front());
2073 PHINode
*PN
= Builder
.CreatePHI(Ty
, 2, "ctz");
2074 CountZeros
->replaceAllUsesWith(PN
);
2075 Value
*BitWidth
= Builder
.getInt(APInt(SizeInBits
, SizeInBits
));
2076 PN
->addIncoming(BitWidth
, StartBlock
);
2077 PN
->addIncoming(CountZeros
, CallBlock
);
2079 // We are explicitly handling the zero case, so we can set the intrinsic's
2080 // undefined zero argument to 'true'. This will also prevent reprocessing the
2081 // intrinsic; we only despeculate when a zero input is defined.
2082 CountZeros
->setArgOperand(1, Builder
.getTrue());
2087 bool CodeGenPrepare::optimizeCallInst(CallInst
*CI
, bool &ModifiedDT
) {
2088 BasicBlock
*BB
= CI
->getParent();
2090 // Lower inline assembly if we can.
2091 // If we found an inline asm expession, and if the target knows how to
2092 // lower it to normal LLVM code, do so now.
2093 if (CI
->isInlineAsm()) {
2094 if (TLI
->ExpandInlineAsm(CI
)) {
2095 // Avoid invalidating the iterator.
2096 CurInstIterator
= BB
->begin();
2097 // Avoid processing instructions out of order, which could cause
2098 // reuse before a value is defined.
2102 // Sink address computing for memory operands into the block.
2103 if (optimizeInlineAsmInst(CI
))
2107 // Align the pointer arguments to this call if the target thinks it's a good
2109 unsigned MinSize
, PrefAlign
;
2110 if (TLI
->shouldAlignPointerArgs(CI
, MinSize
, PrefAlign
)) {
2111 for (auto &Arg
: CI
->arg_operands()) {
2112 // We want to align both objects whose address is used directly and
2113 // objects whose address is used in casts and GEPs, though it only makes
2114 // sense for GEPs if the offset is a multiple of the desired alignment and
2115 // if size - offset meets the size threshold.
2116 if (!Arg
->getType()->isPointerTy())
2118 APInt
Offset(DL
->getIndexSizeInBits(
2119 cast
<PointerType
>(Arg
->getType())->getAddressSpace()),
2121 Value
*Val
= Arg
->stripAndAccumulateInBoundsConstantOffsets(*DL
, Offset
);
2122 uint64_t Offset2
= Offset
.getLimitedValue();
2123 if ((Offset2
& (PrefAlign
-1)) != 0)
2126 if ((AI
= dyn_cast
<AllocaInst
>(Val
)) && AI
->getAlignment() < PrefAlign
&&
2127 DL
->getTypeAllocSize(AI
->getAllocatedType()) >= MinSize
+ Offset2
)
2128 AI
->setAlignment(Align(PrefAlign
));
2129 // Global variables can only be aligned if they are defined in this
2130 // object (i.e. they are uniquely initialized in this object), and
2131 // over-aligning global variables that have an explicit section is
2134 if ((GV
= dyn_cast
<GlobalVariable
>(Val
)) && GV
->canIncreaseAlignment() &&
2135 GV
->getPointerAlignment(*DL
) < PrefAlign
&&
2136 DL
->getTypeAllocSize(GV
->getValueType()) >=
2138 GV
->setAlignment(MaybeAlign(PrefAlign
));
2140 // If this is a memcpy (or similar) then we may be able to improve the
2142 if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(CI
)) {
2143 Align DestAlign
= getKnownAlignment(MI
->getDest(), *DL
);
2144 MaybeAlign MIDestAlign
= MI
->getDestAlign();
2145 if (!MIDestAlign
|| DestAlign
> *MIDestAlign
)
2146 MI
->setDestAlignment(DestAlign
);
2147 if (MemTransferInst
*MTI
= dyn_cast
<MemTransferInst
>(MI
)) {
2148 MaybeAlign MTISrcAlign
= MTI
->getSourceAlign();
2149 Align SrcAlign
= getKnownAlignment(MTI
->getSource(), *DL
);
2150 if (!MTISrcAlign
|| SrcAlign
> *MTISrcAlign
)
2151 MTI
->setSourceAlignment(SrcAlign
);
2156 // If we have a cold call site, try to sink addressing computation into the
2157 // cold block. This interacts with our handling for loads and stores to
2158 // ensure that we can fold all uses of a potential addressing computation
2159 // into their uses. TODO: generalize this to work over profiling data
2160 if (CI
->hasFnAttr(Attribute::Cold
) &&
2161 !OptSize
&& !llvm::shouldOptimizeForSize(BB
, PSI
, BFI
.get()))
2162 for (auto &Arg
: CI
->arg_operands()) {
2163 if (!Arg
->getType()->isPointerTy())
2165 unsigned AS
= Arg
->getType()->getPointerAddressSpace();
2166 return optimizeMemoryInst(CI
, Arg
, Arg
->getType(), AS
);
2169 IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(CI
);
2171 switch (II
->getIntrinsicID()) {
2173 case Intrinsic::assume
:
2174 llvm_unreachable("llvm.assume should have been removed already");
2175 case Intrinsic::experimental_widenable_condition
: {
2176 // Give up on future widening oppurtunties so that we can fold away dead
2177 // paths and merge blocks before going into block-local instruction
2179 if (II
->use_empty()) {
2180 II
->eraseFromParent();
2183 Constant
*RetVal
= ConstantInt::getTrue(II
->getContext());
2184 resetIteratorIfInvalidatedWhileCalling(BB
, [&]() {
2185 replaceAndRecursivelySimplify(CI
, RetVal
, TLInfo
, nullptr);
2189 case Intrinsic::objectsize
:
2190 llvm_unreachable("llvm.objectsize.* should have been lowered already");
2191 case Intrinsic::is_constant
:
2192 llvm_unreachable("llvm.is.constant.* should have been lowered already");
2193 case Intrinsic::aarch64_stlxr
:
2194 case Intrinsic::aarch64_stxr
: {
2195 ZExtInst
*ExtVal
= dyn_cast
<ZExtInst
>(CI
->getArgOperand(0));
2196 if (!ExtVal
|| !ExtVal
->hasOneUse() ||
2197 ExtVal
->getParent() == CI
->getParent())
2199 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
2200 ExtVal
->moveBefore(CI
);
2201 // Mark this instruction as "inserted by CGP", so that other
2202 // optimizations don't touch it.
2203 InsertedInsts
.insert(ExtVal
);
2207 case Intrinsic::launder_invariant_group
:
2208 case Intrinsic::strip_invariant_group
: {
2209 Value
*ArgVal
= II
->getArgOperand(0);
2210 auto it
= LargeOffsetGEPMap
.find(II
);
2211 if (it
!= LargeOffsetGEPMap
.end()) {
2212 // Merge entries in LargeOffsetGEPMap to reflect the RAUW.
2213 // Make sure not to have to deal with iterator invalidation
2214 // after possibly adding ArgVal to LargeOffsetGEPMap.
2215 auto GEPs
= std::move(it
->second
);
2216 LargeOffsetGEPMap
[ArgVal
].append(GEPs
.begin(), GEPs
.end());
2217 LargeOffsetGEPMap
.erase(II
);
2220 II
->replaceAllUsesWith(ArgVal
);
2221 II
->eraseFromParent();
2224 case Intrinsic::cttz
:
2225 case Intrinsic::ctlz
:
2226 // If counting zeros is expensive, try to avoid it.
2227 return despeculateCountZeros(II
, TLI
, DL
, ModifiedDT
);
2228 case Intrinsic::fshl
:
2229 case Intrinsic::fshr
:
2230 return optimizeFunnelShift(II
);
2231 case Intrinsic::dbg_value
:
2232 return fixupDbgValue(II
);
2233 case Intrinsic::vscale
: {
2234 // If datalayout has no special restrictions on vector data layout,
2235 // replace `llvm.vscale` by an equivalent constant expression
2236 // to benefit from cheap constant propagation.
2237 Type
*ScalableVectorTy
=
2238 VectorType::get(Type::getInt8Ty(II
->getContext()), 1, true);
2239 if (DL
->getTypeAllocSize(ScalableVectorTy
).getKnownMinSize() == 8) {
2240 auto *Null
= Constant::getNullValue(ScalableVectorTy
->getPointerTo());
2241 auto *One
= ConstantInt::getSigned(II
->getType(), 1);
2243 ConstantExpr::getGetElementPtr(ScalableVectorTy
, Null
, One
);
2244 II
->replaceAllUsesWith(ConstantExpr::getPtrToInt(CGep
, II
->getType()));
2245 II
->eraseFromParent();
2250 case Intrinsic::masked_gather
:
2251 return optimizeGatherScatterInst(II
, II
->getArgOperand(0));
2252 case Intrinsic::masked_scatter
:
2253 return optimizeGatherScatterInst(II
, II
->getArgOperand(1));
2256 SmallVector
<Value
*, 2> PtrOps
;
2258 if (TLI
->getAddrModeArguments(II
, PtrOps
, AccessTy
))
2259 while (!PtrOps
.empty()) {
2260 Value
*PtrVal
= PtrOps
.pop_back_val();
2261 unsigned AS
= PtrVal
->getType()->getPointerAddressSpace();
2262 if (optimizeMemoryInst(II
, PtrVal
, AccessTy
, AS
))
2267 // From here on out we're working with named functions.
2268 if (!CI
->getCalledFunction()) return false;
2270 // Lower all default uses of _chk calls. This is very similar
2271 // to what InstCombineCalls does, but here we are only lowering calls
2272 // to fortified library functions (e.g. __memcpy_chk) that have the default
2273 // "don't know" as the objectsize. Anything else should be left alone.
2274 FortifiedLibCallSimplifier
Simplifier(TLInfo
, true);
2275 IRBuilder
<> Builder(CI
);
2276 if (Value
*V
= Simplifier
.optimizeCall(CI
, Builder
)) {
2277 CI
->replaceAllUsesWith(V
);
2278 CI
->eraseFromParent();
2285 /// Look for opportunities to duplicate return instructions to the predecessor
2286 /// to enable tail call optimizations. The case it is currently looking for is:
2289 /// %tmp0 = tail call i32 @f0()
2290 /// br label %return
2292 /// %tmp1 = tail call i32 @f1()
2293 /// br label %return
2295 /// %tmp2 = tail call i32 @f2()
2296 /// br label %return
2298 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
2306 /// %tmp0 = tail call i32 @f0()
2309 /// %tmp1 = tail call i32 @f1()
2312 /// %tmp2 = tail call i32 @f2()
2315 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock
*BB
, bool &ModifiedDT
) {
2316 ReturnInst
*RetI
= dyn_cast
<ReturnInst
>(BB
->getTerminator());
2320 PHINode
*PN
= nullptr;
2321 ExtractValueInst
*EVI
= nullptr;
2322 BitCastInst
*BCI
= nullptr;
2323 Value
*V
= RetI
->getReturnValue();
2325 BCI
= dyn_cast
<BitCastInst
>(V
);
2327 V
= BCI
->getOperand(0);
2329 EVI
= dyn_cast
<ExtractValueInst
>(V
);
2331 V
= EVI
->getOperand(0);
2332 if (!llvm::all_of(EVI
->indices(), [](unsigned idx
) { return idx
== 0; }))
2336 PN
= dyn_cast
<PHINode
>(V
);
2341 if (PN
&& PN
->getParent() != BB
)
2344 auto isLifetimeEndOrBitCastFor
= [](const Instruction
*Inst
) {
2345 const BitCastInst
*BC
= dyn_cast
<BitCastInst
>(Inst
);
2346 if (BC
&& BC
->hasOneUse())
2347 Inst
= BC
->user_back();
2349 if (const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Inst
))
2350 return II
->getIntrinsicID() == Intrinsic::lifetime_end
;
2354 // Make sure there are no instructions between the first instruction
2356 const Instruction
*BI
= BB
->getFirstNonPHI();
2357 // Skip over debug and the bitcast.
2358 while (isa
<DbgInfoIntrinsic
>(BI
) || BI
== BCI
|| BI
== EVI
||
2359 isa
<PseudoProbeInst
>(BI
) || isLifetimeEndOrBitCastFor(BI
))
2360 BI
= BI
->getNextNode();
2364 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
2366 const Function
*F
= BB
->getParent();
2367 SmallVector
<BasicBlock
*, 4> TailCallBBs
;
2369 for (unsigned I
= 0, E
= PN
->getNumIncomingValues(); I
!= E
; ++I
) {
2370 // Look through bitcasts.
2371 Value
*IncomingVal
= PN
->getIncomingValue(I
)->stripPointerCasts();
2372 CallInst
*CI
= dyn_cast
<CallInst
>(IncomingVal
);
2373 BasicBlock
*PredBB
= PN
->getIncomingBlock(I
);
2374 // Make sure the phi value is indeed produced by the tail call.
2375 if (CI
&& CI
->hasOneUse() && CI
->getParent() == PredBB
&&
2376 TLI
->mayBeEmittedAsTailCall(CI
) &&
2377 attributesPermitTailCall(F
, CI
, RetI
, *TLI
))
2378 TailCallBBs
.push_back(PredBB
);
2381 SmallPtrSet
<BasicBlock
*, 4> VisitedBBs
;
2382 for (BasicBlock
*Pred
: predecessors(BB
)) {
2383 if (!VisitedBBs
.insert(Pred
).second
)
2385 if (Instruction
*I
= Pred
->rbegin()->getPrevNonDebugInstruction(true)) {
2386 CallInst
*CI
= dyn_cast
<CallInst
>(I
);
2387 if (CI
&& CI
->use_empty() && TLI
->mayBeEmittedAsTailCall(CI
) &&
2388 attributesPermitTailCall(F
, CI
, RetI
, *TLI
))
2389 TailCallBBs
.push_back(Pred
);
2394 bool Changed
= false;
2395 for (auto const &TailCallBB
: TailCallBBs
) {
2396 // Make sure the call instruction is followed by an unconditional branch to
2397 // the return block.
2398 BranchInst
*BI
= dyn_cast
<BranchInst
>(TailCallBB
->getTerminator());
2399 if (!BI
|| !BI
->isUnconditional() || BI
->getSuccessor(0) != BB
)
2402 // Duplicate the return into TailCallBB.
2403 (void)FoldReturnIntoUncondBranch(RetI
, BB
, TailCallBB
);
2404 assert(!VerifyBFIUpdates
||
2405 BFI
->getBlockFreq(BB
) >= BFI
->getBlockFreq(TailCallBB
));
2408 (BFI
->getBlockFreq(BB
) - BFI
->getBlockFreq(TailCallBB
)).getFrequency());
2409 ModifiedDT
= Changed
= true;
2413 // If we eliminated all predecessors of the block, delete the block now.
2414 if (Changed
&& !BB
->hasAddressTaken() && pred_empty(BB
))
2415 BB
->eraseFromParent();
2420 //===----------------------------------------------------------------------===//
2421 // Memory Optimization
2422 //===----------------------------------------------------------------------===//
2426 /// This is an extended version of TargetLowering::AddrMode
2427 /// which holds actual Value*'s for register values.
2428 struct ExtAddrMode
: public TargetLowering::AddrMode
{
2429 Value
*BaseReg
= nullptr;
2430 Value
*ScaledReg
= nullptr;
2431 Value
*OriginalValue
= nullptr;
2432 bool InBounds
= true;
2436 BaseRegField
= 0x01,
2438 BaseOffsField
= 0x04,
2439 ScaledRegField
= 0x08,
2441 MultipleFields
= 0xff
2445 ExtAddrMode() = default;
2447 void print(raw_ostream
&OS
) const;
2450 FieldName
compare(const ExtAddrMode
&other
) {
2451 // First check that the types are the same on each field, as differing types
2452 // is something we can't cope with later on.
2453 if (BaseReg
&& other
.BaseReg
&&
2454 BaseReg
->getType() != other
.BaseReg
->getType())
2455 return MultipleFields
;
2456 if (BaseGV
&& other
.BaseGV
&&
2457 BaseGV
->getType() != other
.BaseGV
->getType())
2458 return MultipleFields
;
2459 if (ScaledReg
&& other
.ScaledReg
&&
2460 ScaledReg
->getType() != other
.ScaledReg
->getType())
2461 return MultipleFields
;
2463 // Conservatively reject 'inbounds' mismatches.
2464 if (InBounds
!= other
.InBounds
)
2465 return MultipleFields
;
2467 // Check each field to see if it differs.
2468 unsigned Result
= NoField
;
2469 if (BaseReg
!= other
.BaseReg
)
2470 Result
|= BaseRegField
;
2471 if (BaseGV
!= other
.BaseGV
)
2472 Result
|= BaseGVField
;
2473 if (BaseOffs
!= other
.BaseOffs
)
2474 Result
|= BaseOffsField
;
2475 if (ScaledReg
!= other
.ScaledReg
)
2476 Result
|= ScaledRegField
;
2477 // Don't count 0 as being a different scale, because that actually means
2478 // unscaled (which will already be counted by having no ScaledReg).
2479 if (Scale
&& other
.Scale
&& Scale
!= other
.Scale
)
2480 Result
|= ScaleField
;
2482 if (countPopulation(Result
) > 1)
2483 return MultipleFields
;
2485 return static_cast<FieldName
>(Result
);
2488 // An AddrMode is trivial if it involves no calculation i.e. it is just a base
2491 // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is
2492 // trivial if at most one of these terms is nonzero, except that BaseGV and
2493 // BaseReg both being zero actually means a null pointer value, which we
2494 // consider to be 'non-zero' here.
2495 return !BaseOffs
&& !Scale
&& !(BaseGV
&& BaseReg
);
2498 Value
*GetFieldAsValue(FieldName Field
, Type
*IntPtrTy
) {
2506 case ScaledRegField
:
2509 return ConstantInt::get(IntPtrTy
, BaseOffs
);
2513 void SetCombinedField(FieldName Field
, Value
*V
,
2514 const SmallVectorImpl
<ExtAddrMode
> &AddrModes
) {
2517 llvm_unreachable("Unhandled fields are expected to be rejected earlier");
2519 case ExtAddrMode::BaseRegField
:
2522 case ExtAddrMode::BaseGVField
:
2523 // A combined BaseGV is an Instruction, not a GlobalValue, so it goes
2524 // in the BaseReg field.
2525 assert(BaseReg
== nullptr);
2529 case ExtAddrMode::ScaledRegField
:
2531 // If we have a mix of scaled and unscaled addrmodes then we want scale
2532 // to be the scale and not zero.
2534 for (const ExtAddrMode
&AM
: AddrModes
)
2540 case ExtAddrMode::BaseOffsField
:
2541 // The offset is no longer a constant, so it goes in ScaledReg with a
2543 assert(ScaledReg
== nullptr);
2552 } // end anonymous namespace
2555 static inline raw_ostream
&operator<<(raw_ostream
&OS
, const ExtAddrMode
&AM
) {
2561 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2562 void ExtAddrMode::print(raw_ostream
&OS
) const {
2563 bool NeedPlus
= false;
2568 OS
<< (NeedPlus
? " + " : "")
2570 BaseGV
->printAsOperand(OS
, /*PrintType=*/false);
2575 OS
<< (NeedPlus
? " + " : "")
2581 OS
<< (NeedPlus
? " + " : "")
2583 BaseReg
->printAsOperand(OS
, /*PrintType=*/false);
2587 OS
<< (NeedPlus
? " + " : "")
2589 ScaledReg
->printAsOperand(OS
, /*PrintType=*/false);
2595 LLVM_DUMP_METHOD
void ExtAddrMode::dump() const {
2603 /// This class provides transaction based operation on the IR.
2604 /// Every change made through this class is recorded in the internal state and
2605 /// can be undone (rollback) until commit is called.
2606 /// CGP does not check if instructions could be speculatively executed when
2607 /// moved. Preserving the original location would pessimize the debugging
2608 /// experience, as well as negatively impact the quality of sample PGO.
2609 class TypePromotionTransaction
{
2610 /// This represents the common interface of the individual transaction.
2611 /// Each class implements the logic for doing one specific modification on
2612 /// the IR via the TypePromotionTransaction.
2613 class TypePromotionAction
{
2615 /// The Instruction modified.
2619 /// Constructor of the action.
2620 /// The constructor performs the related action on the IR.
2621 TypePromotionAction(Instruction
*Inst
) : Inst(Inst
) {}
2623 virtual ~TypePromotionAction() = default;
2625 /// Undo the modification done by this action.
2626 /// When this method is called, the IR must be in the same state as it was
2627 /// before this action was applied.
2628 /// \pre Undoing the action works if and only if the IR is in the exact same
2629 /// state as it was directly after this action was applied.
2630 virtual void undo() = 0;
2632 /// Advocate every change made by this action.
2633 /// When the results on the IR of the action are to be kept, it is important
2634 /// to call this function, otherwise hidden information may be kept forever.
2635 virtual void commit() {
2636 // Nothing to be done, this action is not doing anything.
2640 /// Utility to remember the position of an instruction.
2641 class InsertionHandler
{
2642 /// Position of an instruction.
2643 /// Either an instruction:
2644 /// - Is the first in a basic block: BB is used.
2645 /// - Has a previous instruction: PrevInst is used.
2647 Instruction
*PrevInst
;
2651 /// Remember whether or not the instruction had a previous instruction.
2652 bool HasPrevInstruction
;
2655 /// Record the position of \p Inst.
2656 InsertionHandler(Instruction
*Inst
) {
2657 BasicBlock::iterator It
= Inst
->getIterator();
2658 HasPrevInstruction
= (It
!= (Inst
->getParent()->begin()));
2659 if (HasPrevInstruction
)
2660 Point
.PrevInst
= &*--It
;
2662 Point
.BB
= Inst
->getParent();
2665 /// Insert \p Inst at the recorded position.
2666 void insert(Instruction
*Inst
) {
2667 if (HasPrevInstruction
) {
2668 if (Inst
->getParent())
2669 Inst
->removeFromParent();
2670 Inst
->insertAfter(Point
.PrevInst
);
2672 Instruction
*Position
= &*Point
.BB
->getFirstInsertionPt();
2673 if (Inst
->getParent())
2674 Inst
->moveBefore(Position
);
2676 Inst
->insertBefore(Position
);
2681 /// Move an instruction before another.
2682 class InstructionMoveBefore
: public TypePromotionAction
{
2683 /// Original position of the instruction.
2684 InsertionHandler Position
;
2687 /// Move \p Inst before \p Before.
2688 InstructionMoveBefore(Instruction
*Inst
, Instruction
*Before
)
2689 : TypePromotionAction(Inst
), Position(Inst
) {
2690 LLVM_DEBUG(dbgs() << "Do: move: " << *Inst
<< "\nbefore: " << *Before
2692 Inst
->moveBefore(Before
);
2695 /// Move the instruction back to its original position.
2696 void undo() override
{
2697 LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst
<< "\n");
2698 Position
.insert(Inst
);
2702 /// Set the operand of an instruction with a new value.
2703 class OperandSetter
: public TypePromotionAction
{
2704 /// Original operand of the instruction.
2707 /// Index of the modified instruction.
2711 /// Set \p Idx operand of \p Inst with \p NewVal.
2712 OperandSetter(Instruction
*Inst
, unsigned Idx
, Value
*NewVal
)
2713 : TypePromotionAction(Inst
), Idx(Idx
) {
2714 LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx
<< "\n"
2715 << "for:" << *Inst
<< "\n"
2716 << "with:" << *NewVal
<< "\n");
2717 Origin
= Inst
->getOperand(Idx
);
2718 Inst
->setOperand(Idx
, NewVal
);
2721 /// Restore the original value of the instruction.
2722 void undo() override
{
2723 LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx
<< "\n"
2724 << "for: " << *Inst
<< "\n"
2725 << "with: " << *Origin
<< "\n");
2726 Inst
->setOperand(Idx
, Origin
);
2730 /// Hide the operands of an instruction.
2731 /// Do as if this instruction was not using any of its operands.
2732 class OperandsHider
: public TypePromotionAction
{
2733 /// The list of original operands.
2734 SmallVector
<Value
*, 4> OriginalValues
;
2737 /// Remove \p Inst from the uses of the operands of \p Inst.
2738 OperandsHider(Instruction
*Inst
) : TypePromotionAction(Inst
) {
2739 LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst
<< "\n");
2740 unsigned NumOpnds
= Inst
->getNumOperands();
2741 OriginalValues
.reserve(NumOpnds
);
2742 for (unsigned It
= 0; It
< NumOpnds
; ++It
) {
2743 // Save the current operand.
2744 Value
*Val
= Inst
->getOperand(It
);
2745 OriginalValues
.push_back(Val
);
2747 // We could use OperandSetter here, but that would imply an overhead
2748 // that we are not willing to pay.
2749 Inst
->setOperand(It
, UndefValue::get(Val
->getType()));
2753 /// Restore the original list of uses.
2754 void undo() override
{
2755 LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst
<< "\n");
2756 for (unsigned It
= 0, EndIt
= OriginalValues
.size(); It
!= EndIt
; ++It
)
2757 Inst
->setOperand(It
, OriginalValues
[It
]);
2761 /// Build a truncate instruction.
2762 class TruncBuilder
: public TypePromotionAction
{
2766 /// Build a truncate instruction of \p Opnd producing a \p Ty
2768 /// trunc Opnd to Ty.
2769 TruncBuilder(Instruction
*Opnd
, Type
*Ty
) : TypePromotionAction(Opnd
) {
2770 IRBuilder
<> Builder(Opnd
);
2771 Builder
.SetCurrentDebugLocation(DebugLoc());
2772 Val
= Builder
.CreateTrunc(Opnd
, Ty
, "promoted");
2773 LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val
<< "\n");
2776 /// Get the built value.
2777 Value
*getBuiltValue() { return Val
; }
2779 /// Remove the built instruction.
2780 void undo() override
{
2781 LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val
<< "\n");
2782 if (Instruction
*IVal
= dyn_cast
<Instruction
>(Val
))
2783 IVal
->eraseFromParent();
2787 /// Build a sign extension instruction.
2788 class SExtBuilder
: public TypePromotionAction
{
2792 /// Build a sign extension instruction of \p Opnd producing a \p Ty
2794 /// sext Opnd to Ty.
2795 SExtBuilder(Instruction
*InsertPt
, Value
*Opnd
, Type
*Ty
)
2796 : TypePromotionAction(InsertPt
) {
2797 IRBuilder
<> Builder(InsertPt
);
2798 Val
= Builder
.CreateSExt(Opnd
, Ty
, "promoted");
2799 LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val
<< "\n");
2802 /// Get the built value.
2803 Value
*getBuiltValue() { return Val
; }
2805 /// Remove the built instruction.
2806 void undo() override
{
2807 LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val
<< "\n");
2808 if (Instruction
*IVal
= dyn_cast
<Instruction
>(Val
))
2809 IVal
->eraseFromParent();
2813 /// Build a zero extension instruction.
2814 class ZExtBuilder
: public TypePromotionAction
{
2818 /// Build a zero extension instruction of \p Opnd producing a \p Ty
2820 /// zext Opnd to Ty.
2821 ZExtBuilder(Instruction
*InsertPt
, Value
*Opnd
, Type
*Ty
)
2822 : TypePromotionAction(InsertPt
) {
2823 IRBuilder
<> Builder(InsertPt
);
2824 Builder
.SetCurrentDebugLocation(DebugLoc());
2825 Val
= Builder
.CreateZExt(Opnd
, Ty
, "promoted");
2826 LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val
<< "\n");
2829 /// Get the built value.
2830 Value
*getBuiltValue() { return Val
; }
2832 /// Remove the built instruction.
2833 void undo() override
{
2834 LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val
<< "\n");
2835 if (Instruction
*IVal
= dyn_cast
<Instruction
>(Val
))
2836 IVal
->eraseFromParent();
2840 /// Mutate an instruction to another type.
2841 class TypeMutator
: public TypePromotionAction
{
2842 /// Record the original type.
2846 /// Mutate the type of \p Inst into \p NewTy.
2847 TypeMutator(Instruction
*Inst
, Type
*NewTy
)
2848 : TypePromotionAction(Inst
), OrigTy(Inst
->getType()) {
2849 LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst
<< " with " << *NewTy
2851 Inst
->mutateType(NewTy
);
2854 /// Mutate the instruction back to its original type.
2855 void undo() override
{
2856 LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst
<< " with " << *OrigTy
2858 Inst
->mutateType(OrigTy
);
2862 /// Replace the uses of an instruction by another instruction.
2863 class UsesReplacer
: public TypePromotionAction
{
2864 /// Helper structure to keep track of the replaced uses.
2865 struct InstructionAndIdx
{
2866 /// The instruction using the instruction.
2869 /// The index where this instruction is used for Inst.
2872 InstructionAndIdx(Instruction
*Inst
, unsigned Idx
)
2873 : Inst(Inst
), Idx(Idx
) {}
2876 /// Keep track of the original uses (pair Instruction, Index).
2877 SmallVector
<InstructionAndIdx
, 4> OriginalUses
;
2878 /// Keep track of the debug users.
2879 SmallVector
<DbgValueInst
*, 1> DbgValues
;
2881 /// Keep track of the new value so that we can undo it by replacing
2882 /// instances of the new value with the original value.
2885 using use_iterator
= SmallVectorImpl
<InstructionAndIdx
>::iterator
;
2888 /// Replace all the use of \p Inst by \p New.
2889 UsesReplacer(Instruction
*Inst
, Value
*New
)
2890 : TypePromotionAction(Inst
), New(New
) {
2891 LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst
<< " with " << *New
2893 // Record the original uses.
2894 for (Use
&U
: Inst
->uses()) {
2895 Instruction
*UserI
= cast
<Instruction
>(U
.getUser());
2896 OriginalUses
.push_back(InstructionAndIdx(UserI
, U
.getOperandNo()));
2898 // Record the debug uses separately. They are not in the instruction's
2899 // use list, but they are replaced by RAUW.
2900 findDbgValues(DbgValues
, Inst
);
2902 // Now, we can replace the uses.
2903 Inst
->replaceAllUsesWith(New
);
2906 /// Reassign the original uses of Inst to Inst.
2907 void undo() override
{
2908 LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst
<< "\n");
2909 for (InstructionAndIdx
&Use
: OriginalUses
)
2910 Use
.Inst
->setOperand(Use
.Idx
, Inst
);
2911 // RAUW has replaced all original uses with references to the new value,
2912 // including the debug uses. Since we are undoing the replacements,
2913 // the original debug uses must also be reinstated to maintain the
2914 // correctness and utility of debug value instructions.
2915 for (auto *DVI
: DbgValues
)
2916 DVI
->replaceVariableLocationOp(New
, Inst
);
2920 /// Remove an instruction from the IR.
2921 class InstructionRemover
: public TypePromotionAction
{
2922 /// Original position of the instruction.
2923 InsertionHandler Inserter
;
2925 /// Helper structure to hide all the link to the instruction. In other
2926 /// words, this helps to do as if the instruction was removed.
2927 OperandsHider Hider
;
2929 /// Keep track of the uses replaced, if any.
2930 UsesReplacer
*Replacer
= nullptr;
2932 /// Keep track of instructions removed.
2933 SetOfInstrs
&RemovedInsts
;
2936 /// Remove all reference of \p Inst and optionally replace all its
2938 /// \p RemovedInsts Keep track of the instructions removed by this Action.
2939 /// \pre If !Inst->use_empty(), then New != nullptr
2940 InstructionRemover(Instruction
*Inst
, SetOfInstrs
&RemovedInsts
,
2941 Value
*New
= nullptr)
2942 : TypePromotionAction(Inst
), Inserter(Inst
), Hider(Inst
),
2943 RemovedInsts(RemovedInsts
) {
2945 Replacer
= new UsesReplacer(Inst
, New
);
2946 LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst
<< "\n");
2947 RemovedInsts
.insert(Inst
);
2948 /// The instructions removed here will be freed after completing
2949 /// optimizeBlock() for all blocks as we need to keep track of the
2950 /// removed instructions during promotion.
2951 Inst
->removeFromParent();
2954 ~InstructionRemover() override
{ delete Replacer
; }
2956 /// Resurrect the instruction and reassign it to the proper uses if
2957 /// new value was provided when build this action.
2958 void undo() override
{
2959 LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst
<< "\n");
2960 Inserter
.insert(Inst
);
2964 RemovedInsts
.erase(Inst
);
2969 /// Restoration point.
2970 /// The restoration point is a pointer to an action instead of an iterator
2971 /// because the iterator may be invalidated but not the pointer.
2972 using ConstRestorationPt
= const TypePromotionAction
*;
2974 TypePromotionTransaction(SetOfInstrs
&RemovedInsts
)
2975 : RemovedInsts(RemovedInsts
) {}
2977 /// Advocate every changes made in that transaction. Return true if any change
2981 /// Undo all the changes made after the given point.
2982 void rollback(ConstRestorationPt Point
);
2984 /// Get the current restoration point.
2985 ConstRestorationPt
getRestorationPoint() const;
2987 /// \name API for IR modification with state keeping to support rollback.
2989 /// Same as Instruction::setOperand.
2990 void setOperand(Instruction
*Inst
, unsigned Idx
, Value
*NewVal
);
2992 /// Same as Instruction::eraseFromParent.
2993 void eraseInstruction(Instruction
*Inst
, Value
*NewVal
= nullptr);
2995 /// Same as Value::replaceAllUsesWith.
2996 void replaceAllUsesWith(Instruction
*Inst
, Value
*New
);
2998 /// Same as Value::mutateType.
2999 void mutateType(Instruction
*Inst
, Type
*NewTy
);
3001 /// Same as IRBuilder::createTrunc.
3002 Value
*createTrunc(Instruction
*Opnd
, Type
*Ty
);
3004 /// Same as IRBuilder::createSExt.
3005 Value
*createSExt(Instruction
*Inst
, Value
*Opnd
, Type
*Ty
);
3007 /// Same as IRBuilder::createZExt.
3008 Value
*createZExt(Instruction
*Inst
, Value
*Opnd
, Type
*Ty
);
3010 /// Same as Instruction::moveBefore.
3011 void moveBefore(Instruction
*Inst
, Instruction
*Before
);
3015 /// The ordered list of actions made so far.
3016 SmallVector
<std::unique_ptr
<TypePromotionAction
>, 16> Actions
;
3018 using CommitPt
= SmallVectorImpl
<std::unique_ptr
<TypePromotionAction
>>::iterator
;
3020 SetOfInstrs
&RemovedInsts
;
3023 } // end anonymous namespace
3025 void TypePromotionTransaction::setOperand(Instruction
*Inst
, unsigned Idx
,
3027 Actions
.push_back(std::make_unique
<TypePromotionTransaction::OperandSetter
>(
3028 Inst
, Idx
, NewVal
));
3031 void TypePromotionTransaction::eraseInstruction(Instruction
*Inst
,
3034 std::make_unique
<TypePromotionTransaction::InstructionRemover
>(
3035 Inst
, RemovedInsts
, NewVal
));
3038 void TypePromotionTransaction::replaceAllUsesWith(Instruction
*Inst
,
3041 std::make_unique
<TypePromotionTransaction::UsesReplacer
>(Inst
, New
));
3044 void TypePromotionTransaction::mutateType(Instruction
*Inst
, Type
*NewTy
) {
3046 std::make_unique
<TypePromotionTransaction::TypeMutator
>(Inst
, NewTy
));
3049 Value
*TypePromotionTransaction::createTrunc(Instruction
*Opnd
,
3051 std::unique_ptr
<TruncBuilder
> Ptr(new TruncBuilder(Opnd
, Ty
));
3052 Value
*Val
= Ptr
->getBuiltValue();
3053 Actions
.push_back(std::move(Ptr
));
3057 Value
*TypePromotionTransaction::createSExt(Instruction
*Inst
,
3058 Value
*Opnd
, Type
*Ty
) {
3059 std::unique_ptr
<SExtBuilder
> Ptr(new SExtBuilder(Inst
, Opnd
, Ty
));
3060 Value
*Val
= Ptr
->getBuiltValue();
3061 Actions
.push_back(std::move(Ptr
));
3065 Value
*TypePromotionTransaction::createZExt(Instruction
*Inst
,
3066 Value
*Opnd
, Type
*Ty
) {
3067 std::unique_ptr
<ZExtBuilder
> Ptr(new ZExtBuilder(Inst
, Opnd
, Ty
));
3068 Value
*Val
= Ptr
->getBuiltValue();
3069 Actions
.push_back(std::move(Ptr
));
3073 void TypePromotionTransaction::moveBefore(Instruction
*Inst
,
3074 Instruction
*Before
) {
3076 std::make_unique
<TypePromotionTransaction::InstructionMoveBefore
>(
3080 TypePromotionTransaction::ConstRestorationPt
3081 TypePromotionTransaction::getRestorationPoint() const {
3082 return !Actions
.empty() ? Actions
.back().get() : nullptr;
3085 bool TypePromotionTransaction::commit() {
3086 for (std::unique_ptr
<TypePromotionAction
> &Action
: Actions
)
3088 bool Modified
= !Actions
.empty();
3093 void TypePromotionTransaction::rollback(
3094 TypePromotionTransaction::ConstRestorationPt Point
) {
3095 while (!Actions
.empty() && Point
!= Actions
.back().get()) {
3096 std::unique_ptr
<TypePromotionAction
> Curr
= Actions
.pop_back_val();
3103 /// A helper class for matching addressing modes.
3105 /// This encapsulates the logic for matching the target-legal addressing modes.
3106 class AddressingModeMatcher
{
3107 SmallVectorImpl
<Instruction
*> &AddrModeInsts
;
3108 const TargetLowering
&TLI
;
3109 const TargetRegisterInfo
&TRI
;
3110 const DataLayout
&DL
;
3112 const std::function
<const DominatorTree
&()> getDTFn
;
3114 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
3115 /// the memory instruction that we're computing this address for.
3118 Instruction
*MemoryInst
;
3120 /// This is the addressing mode that we're building up. This is
3121 /// part of the return value of this addressing mode matching stuff.
3122 ExtAddrMode
&AddrMode
;
3124 /// The instructions inserted by other CodeGenPrepare optimizations.
3125 const SetOfInstrs
&InsertedInsts
;
3127 /// A map from the instructions to their type before promotion.
3128 InstrToOrigTy
&PromotedInsts
;
3130 /// The ongoing transaction where every action should be registered.
3131 TypePromotionTransaction
&TPT
;
3133 // A GEP which has too large offset to be folded into the addressing mode.
3134 std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t> &LargeOffsetGEP
;
3136 /// This is set to true when we should not do profitability checks.
3137 /// When true, IsProfitableToFoldIntoAddressingMode always returns true.
3138 bool IgnoreProfitability
;
3140 /// True if we are optimizing for size.
3143 ProfileSummaryInfo
*PSI
;
3144 BlockFrequencyInfo
*BFI
;
3146 AddressingModeMatcher(
3147 SmallVectorImpl
<Instruction
*> &AMI
, const TargetLowering
&TLI
,
3148 const TargetRegisterInfo
&TRI
, const LoopInfo
&LI
,
3149 const std::function
<const DominatorTree
&()> getDTFn
,
3150 Type
*AT
, unsigned AS
, Instruction
*MI
, ExtAddrMode
&AM
,
3151 const SetOfInstrs
&InsertedInsts
, InstrToOrigTy
&PromotedInsts
,
3152 TypePromotionTransaction
&TPT
,
3153 std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t> &LargeOffsetGEP
,
3154 bool OptSize
, ProfileSummaryInfo
*PSI
, BlockFrequencyInfo
*BFI
)
3155 : AddrModeInsts(AMI
), TLI(TLI
), TRI(TRI
),
3156 DL(MI
->getModule()->getDataLayout()), LI(LI
), getDTFn(getDTFn
),
3157 AccessTy(AT
), AddrSpace(AS
), MemoryInst(MI
), AddrMode(AM
),
3158 InsertedInsts(InsertedInsts
), PromotedInsts(PromotedInsts
), TPT(TPT
),
3159 LargeOffsetGEP(LargeOffsetGEP
), OptSize(OptSize
), PSI(PSI
), BFI(BFI
) {
3160 IgnoreProfitability
= false;
3164 /// Find the maximal addressing mode that a load/store of V can fold,
3165 /// give an access type of AccessTy. This returns a list of involved
3166 /// instructions in AddrModeInsts.
3167 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
3169 /// \p PromotedInsts maps the instructions to their type before promotion.
3170 /// \p The ongoing transaction where every action should be registered.
3172 Match(Value
*V
, Type
*AccessTy
, unsigned AS
, Instruction
*MemoryInst
,
3173 SmallVectorImpl
<Instruction
*> &AddrModeInsts
,
3174 const TargetLowering
&TLI
, const LoopInfo
&LI
,
3175 const std::function
<const DominatorTree
&()> getDTFn
,
3176 const TargetRegisterInfo
&TRI
, const SetOfInstrs
&InsertedInsts
,
3177 InstrToOrigTy
&PromotedInsts
, TypePromotionTransaction
&TPT
,
3178 std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t> &LargeOffsetGEP
,
3179 bool OptSize
, ProfileSummaryInfo
*PSI
, BlockFrequencyInfo
*BFI
) {
3182 bool Success
= AddressingModeMatcher(
3183 AddrModeInsts
, TLI
, TRI
, LI
, getDTFn
, AccessTy
, AS
, MemoryInst
, Result
,
3184 InsertedInsts
, PromotedInsts
, TPT
, LargeOffsetGEP
, OptSize
, PSI
,
3185 BFI
).matchAddr(V
, 0);
3186 (void)Success
; assert(Success
&& "Couldn't select *anything*?");
3191 bool matchScaledValue(Value
*ScaleReg
, int64_t Scale
, unsigned Depth
);
3192 bool matchAddr(Value
*Addr
, unsigned Depth
);
3193 bool matchOperationAddr(User
*AddrInst
, unsigned Opcode
, unsigned Depth
,
3194 bool *MovedAway
= nullptr);
3195 bool isProfitableToFoldIntoAddressingMode(Instruction
*I
,
3196 ExtAddrMode
&AMBefore
,
3197 ExtAddrMode
&AMAfter
);
3198 bool valueAlreadyLiveAtInst(Value
*Val
, Value
*KnownLive1
, Value
*KnownLive2
);
3199 bool isPromotionProfitable(unsigned NewCost
, unsigned OldCost
,
3200 Value
*PromotedOperand
) const;
3205 /// An iterator for PhiNodeSet.
3206 class PhiNodeSetIterator
{
3207 PhiNodeSet
* const Set
;
3208 size_t CurrentIndex
= 0;
3211 /// The constructor. Start should point to either a valid element, or be equal
3212 /// to the size of the underlying SmallVector of the PhiNodeSet.
3213 PhiNodeSetIterator(PhiNodeSet
* const Set
, size_t Start
);
3214 PHINode
* operator*() const;
3215 PhiNodeSetIterator
& operator++();
3216 bool operator==(const PhiNodeSetIterator
&RHS
) const;
3217 bool operator!=(const PhiNodeSetIterator
&RHS
) const;
3220 /// Keeps a set of PHINodes.
3222 /// This is a minimal set implementation for a specific use case:
3223 /// It is very fast when there are very few elements, but also provides good
3224 /// performance when there are many. It is similar to SmallPtrSet, but also
3225 /// provides iteration by insertion order, which is deterministic and stable
3226 /// across runs. It is also similar to SmallSetVector, but provides removing
3227 /// elements in O(1) time. This is achieved by not actually removing the element
3228 /// from the underlying vector, so comes at the cost of using more memory, but
3229 /// that is fine, since PhiNodeSets are used as short lived objects.
3231 friend class PhiNodeSetIterator
;
3233 using MapType
= SmallDenseMap
<PHINode
*, size_t, 32>;
3234 using iterator
= PhiNodeSetIterator
;
3236 /// Keeps the elements in the order of their insertion in the underlying
3237 /// vector. To achieve constant time removal, it never deletes any element.
3238 SmallVector
<PHINode
*, 32> NodeList
;
3240 /// Keeps the elements in the underlying set implementation. This (and not the
3241 /// NodeList defined above) is the source of truth on whether an element
3242 /// is actually in the collection.
3245 /// Points to the first valid (not deleted) element when the set is not empty
3246 /// and the value is not zero. Equals to the size of the underlying vector
3247 /// when the set is empty. When the value is 0, as in the beginning, the
3248 /// first element may or may not be valid.
3249 size_t FirstValidElement
= 0;
3252 /// Inserts a new element to the collection.
3253 /// \returns true if the element is actually added, i.e. was not in the
3254 /// collection before the operation.
3255 bool insert(PHINode
*Ptr
) {
3256 if (NodeMap
.insert(std::make_pair(Ptr
, NodeList
.size())).second
) {
3257 NodeList
.push_back(Ptr
);
3263 /// Removes the element from the collection.
3264 /// \returns whether the element is actually removed, i.e. was in the
3265 /// collection before the operation.
3266 bool erase(PHINode
*Ptr
) {
3267 if (NodeMap
.erase(Ptr
)) {
3268 SkipRemovedElements(FirstValidElement
);
3274 /// Removes all elements and clears the collection.
3278 FirstValidElement
= 0;
3281 /// \returns an iterator that will iterate the elements in the order of
3284 if (FirstValidElement
== 0)
3285 SkipRemovedElements(FirstValidElement
);
3286 return PhiNodeSetIterator(this, FirstValidElement
);
3289 /// \returns an iterator that points to the end of the collection.
3290 iterator
end() { return PhiNodeSetIterator(this, NodeList
.size()); }
3292 /// Returns the number of elements in the collection.
3293 size_t size() const {
3294 return NodeMap
.size();
3297 /// \returns 1 if the given element is in the collection, and 0 if otherwise.
3298 size_t count(PHINode
*Ptr
) const {
3299 return NodeMap
.count(Ptr
);
3303 /// Updates the CurrentIndex so that it will point to a valid element.
3305 /// If the element of NodeList at CurrentIndex is valid, it does not
3306 /// change it. If there are no more valid elements, it updates CurrentIndex
3307 /// to point to the end of the NodeList.
3308 void SkipRemovedElements(size_t &CurrentIndex
) {
3309 while (CurrentIndex
< NodeList
.size()) {
3310 auto it
= NodeMap
.find(NodeList
[CurrentIndex
]);
3311 // If the element has been deleted and added again later, NodeMap will
3312 // point to a different index, so CurrentIndex will still be invalid.
3313 if (it
!= NodeMap
.end() && it
->second
== CurrentIndex
)
3320 PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet
*const Set
, size_t Start
)
3321 : Set(Set
), CurrentIndex(Start
) {}
3323 PHINode
* PhiNodeSetIterator::operator*() const {
3324 assert(CurrentIndex
< Set
->NodeList
.size() &&
3325 "PhiNodeSet access out of range");
3326 return Set
->NodeList
[CurrentIndex
];
3329 PhiNodeSetIterator
& PhiNodeSetIterator::operator++() {
3330 assert(CurrentIndex
< Set
->NodeList
.size() &&
3331 "PhiNodeSet access out of range");
3333 Set
->SkipRemovedElements(CurrentIndex
);
3337 bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator
&RHS
) const {
3338 return CurrentIndex
== RHS
.CurrentIndex
;
3341 bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator
&RHS
) const {
3342 return !((*this) == RHS
);
3345 /// Keep track of simplification of Phi nodes.
3346 /// Accept the set of all phi nodes and erase phi node from this set
3347 /// if it is simplified.
3348 class SimplificationTracker
{
3349 DenseMap
<Value
*, Value
*> Storage
;
3350 const SimplifyQuery
&SQ
;
3351 // Tracks newly created Phi nodes. The elements are iterated by insertion
3353 PhiNodeSet AllPhiNodes
;
3354 // Tracks newly created Select nodes.
3355 SmallPtrSet
<SelectInst
*, 32> AllSelectNodes
;
3358 SimplificationTracker(const SimplifyQuery
&sq
)
3361 Value
*Get(Value
*V
) {
3363 auto SV
= Storage
.find(V
);
3364 if (SV
== Storage
.end())
3370 Value
*Simplify(Value
*Val
) {
3371 SmallVector
<Value
*, 32> WorkList
;
3372 SmallPtrSet
<Value
*, 32> Visited
;
3373 WorkList
.push_back(Val
);
3374 while (!WorkList
.empty()) {
3375 auto *P
= WorkList
.pop_back_val();
3376 if (!Visited
.insert(P
).second
)
3378 if (auto *PI
= dyn_cast
<Instruction
>(P
))
3379 if (Value
*V
= SimplifyInstruction(cast
<Instruction
>(PI
), SQ
)) {
3380 for (auto *U
: PI
->users())
3381 WorkList
.push_back(cast
<Value
>(U
));
3383 PI
->replaceAllUsesWith(V
);
3384 if (auto *PHI
= dyn_cast
<PHINode
>(PI
))
3385 AllPhiNodes
.erase(PHI
);
3386 if (auto *Select
= dyn_cast
<SelectInst
>(PI
))
3387 AllSelectNodes
.erase(Select
);
3388 PI
->eraseFromParent();
3394 void Put(Value
*From
, Value
*To
) {
3395 Storage
.insert({ From
, To
});
3398 void ReplacePhi(PHINode
*From
, PHINode
*To
) {
3399 Value
* OldReplacement
= Get(From
);
3400 while (OldReplacement
!= From
) {
3402 To
= dyn_cast
<PHINode
>(OldReplacement
);
3403 OldReplacement
= Get(From
);
3405 assert(To
&& Get(To
) == To
&& "Replacement PHI node is already replaced.");
3407 From
->replaceAllUsesWith(To
);
3408 AllPhiNodes
.erase(From
);
3409 From
->eraseFromParent();
3412 PhiNodeSet
& newPhiNodes() { return AllPhiNodes
; }
3414 void insertNewPhi(PHINode
*PN
) { AllPhiNodes
.insert(PN
); }
3416 void insertNewSelect(SelectInst
*SI
) { AllSelectNodes
.insert(SI
); }
3418 unsigned countNewPhiNodes() const { return AllPhiNodes
.size(); }
3420 unsigned countNewSelectNodes() const { return AllSelectNodes
.size(); }
3422 void destroyNewNodes(Type
*CommonType
) {
3423 // For safe erasing, replace the uses with dummy value first.
3424 auto *Dummy
= UndefValue::get(CommonType
);
3425 for (auto *I
: AllPhiNodes
) {
3426 I
->replaceAllUsesWith(Dummy
);
3427 I
->eraseFromParent();
3429 AllPhiNodes
.clear();
3430 for (auto *I
: AllSelectNodes
) {
3431 I
->replaceAllUsesWith(Dummy
);
3432 I
->eraseFromParent();
3434 AllSelectNodes
.clear();
3438 /// A helper class for combining addressing modes.
3439 class AddressingModeCombiner
{
3440 typedef DenseMap
<Value
*, Value
*> FoldAddrToValueMapping
;
3441 typedef std::pair
<PHINode
*, PHINode
*> PHIPair
;
3444 /// The addressing modes we've collected.
3445 SmallVector
<ExtAddrMode
, 16> AddrModes
;
3447 /// The field in which the AddrModes differ, when we have more than one.
3448 ExtAddrMode::FieldName DifferentField
= ExtAddrMode::NoField
;
3450 /// Are the AddrModes that we have all just equal to their original values?
3451 bool AllAddrModesTrivial
= true;
3453 /// Common Type for all different fields in addressing modes.
3456 /// SimplifyQuery for simplifyInstruction utility.
3457 const SimplifyQuery
&SQ
;
3459 /// Original Address.
3463 AddressingModeCombiner(const SimplifyQuery
&_SQ
, Value
*OriginalValue
)
3464 : CommonType(nullptr), SQ(_SQ
), Original(OriginalValue
) {}
3466 /// Get the combined AddrMode
3467 const ExtAddrMode
&getAddrMode() const {
3468 return AddrModes
[0];
3471 /// Add a new AddrMode if it's compatible with the AddrModes we already
3473 /// \return True iff we succeeded in doing so.
3474 bool addNewAddrMode(ExtAddrMode
&NewAddrMode
) {
3475 // Take note of if we have any non-trivial AddrModes, as we need to detect
3476 // when all AddrModes are trivial as then we would introduce a phi or select
3477 // which just duplicates what's already there.
3478 AllAddrModesTrivial
= AllAddrModesTrivial
&& NewAddrMode
.isTrivial();
3480 // If this is the first addrmode then everything is fine.
3481 if (AddrModes
.empty()) {
3482 AddrModes
.emplace_back(NewAddrMode
);
3486 // Figure out how different this is from the other address modes, which we
3487 // can do just by comparing against the first one given that we only care
3488 // about the cumulative difference.
3489 ExtAddrMode::FieldName ThisDifferentField
=
3490 AddrModes
[0].compare(NewAddrMode
);
3491 if (DifferentField
== ExtAddrMode::NoField
)
3492 DifferentField
= ThisDifferentField
;
3493 else if (DifferentField
!= ThisDifferentField
)
3494 DifferentField
= ExtAddrMode::MultipleFields
;
3496 // If NewAddrMode differs in more than one dimension we cannot handle it.
3497 bool CanHandle
= DifferentField
!= ExtAddrMode::MultipleFields
;
3499 // If Scale Field is different then we reject.
3500 CanHandle
= CanHandle
&& DifferentField
!= ExtAddrMode::ScaleField
;
3502 // We also must reject the case when base offset is different and
3503 // scale reg is not null, we cannot handle this case due to merge of
3504 // different offsets will be used as ScaleReg.
3505 CanHandle
= CanHandle
&& (DifferentField
!= ExtAddrMode::BaseOffsField
||
3506 !NewAddrMode
.ScaledReg
);
3508 // We also must reject the case when GV is different and BaseReg installed
3509 // due to we want to use base reg as a merge of GV values.
3510 CanHandle
= CanHandle
&& (DifferentField
!= ExtAddrMode::BaseGVField
||
3511 !NewAddrMode
.HasBaseReg
);
3513 // Even if NewAddMode is the same we still need to collect it due to
3514 // original value is different. And later we will need all original values
3515 // as anchors during finding the common Phi node.
3517 AddrModes
.emplace_back(NewAddrMode
);
3524 /// Combine the addressing modes we've collected into a single
3525 /// addressing mode.
3526 /// \return True iff we successfully combined them or we only had one so
3527 /// didn't need to combine them anyway.
3528 bool combineAddrModes() {
3529 // If we have no AddrModes then they can't be combined.
3530 if (AddrModes
.size() == 0)
3533 // A single AddrMode can trivially be combined.
3534 if (AddrModes
.size() == 1 || DifferentField
== ExtAddrMode::NoField
)
3537 // If the AddrModes we collected are all just equal to the value they are
3538 // derived from then combining them wouldn't do anything useful.
3539 if (AllAddrModesTrivial
)
3542 if (!addrModeCombiningAllowed())
3545 // Build a map between <original value, basic block where we saw it> to
3546 // value of base register.
3547 // Bail out if there is no common type.
3548 FoldAddrToValueMapping Map
;
3549 if (!initializeMap(Map
))
3552 Value
*CommonValue
= findCommon(Map
);
3554 AddrModes
[0].SetCombinedField(DifferentField
, CommonValue
, AddrModes
);
3555 return CommonValue
!= nullptr;
3559 /// Initialize Map with anchor values. For address seen
3560 /// we set the value of different field saw in this address.
3561 /// At the same time we find a common type for different field we will
3562 /// use to create new Phi/Select nodes. Keep it in CommonType field.
3563 /// Return false if there is no common type found.
3564 bool initializeMap(FoldAddrToValueMapping
&Map
) {
3565 // Keep track of keys where the value is null. We will need to replace it
3566 // with constant null when we know the common type.
3567 SmallVector
<Value
*, 2> NullValue
;
3568 Type
*IntPtrTy
= SQ
.DL
.getIntPtrType(AddrModes
[0].OriginalValue
->getType());
3569 for (auto &AM
: AddrModes
) {
3570 Value
*DV
= AM
.GetFieldAsValue(DifferentField
, IntPtrTy
);
3572 auto *Type
= DV
->getType();
3573 if (CommonType
&& CommonType
!= Type
)
3576 Map
[AM
.OriginalValue
] = DV
;
3578 NullValue
.push_back(AM
.OriginalValue
);
3581 assert(CommonType
&& "At least one non-null value must be!");
3582 for (auto *V
: NullValue
)
3583 Map
[V
] = Constant::getNullValue(CommonType
);
3587 /// We have mapping between value A and other value B where B was a field in
3588 /// addressing mode represented by A. Also we have an original value C
3589 /// representing an address we start with. Traversing from C through phi and
3590 /// selects we ended up with A's in a map. This utility function tries to find
3591 /// a value V which is a field in addressing mode C and traversing through phi
3592 /// nodes and selects we will end up in corresponded values B in a map.
3593 /// The utility will create a new Phi/Selects if needed.
3594 // The simple example looks as follows:
3602 // p = phi [p1, BB1], [p2, BB2]
3609 // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3.
3610 Value
*findCommon(FoldAddrToValueMapping
&Map
) {
3611 // Tracks the simplification of newly created phi nodes. The reason we use
3612 // this mapping is because we will add new created Phi nodes in AddrToBase.
3613 // Simplification of Phi nodes is recursive, so some Phi node may
3614 // be simplified after we added it to AddrToBase. In reality this
3615 // simplification is possible only if original phi/selects were not
3617 // Using this mapping we can find the current value in AddrToBase.
3618 SimplificationTracker
ST(SQ
);
3620 // First step, DFS to create PHI nodes for all intermediate blocks.
3621 // Also fill traverse order for the second step.
3622 SmallVector
<Value
*, 32> TraverseOrder
;
3623 InsertPlaceholders(Map
, TraverseOrder
, ST
);
3625 // Second Step, fill new nodes by merged values and simplify if possible.
3626 FillPlaceholders(Map
, TraverseOrder
, ST
);
3628 if (!AddrSinkNewSelects
&& ST
.countNewSelectNodes() > 0) {
3629 ST
.destroyNewNodes(CommonType
);
3633 // Now we'd like to match New Phi nodes to existed ones.
3634 unsigned PhiNotMatchedCount
= 0;
3635 if (!MatchPhiSet(ST
, AddrSinkNewPhis
, PhiNotMatchedCount
)) {
3636 ST
.destroyNewNodes(CommonType
);
3640 auto *Result
= ST
.Get(Map
.find(Original
)->second
);
3642 NumMemoryInstsPhiCreated
+= ST
.countNewPhiNodes() + PhiNotMatchedCount
;
3643 NumMemoryInstsSelectCreated
+= ST
.countNewSelectNodes();
3648 /// Try to match PHI node to Candidate.
3649 /// Matcher tracks the matched Phi nodes.
3650 bool MatchPhiNode(PHINode
*PHI
, PHINode
*Candidate
,
3651 SmallSetVector
<PHIPair
, 8> &Matcher
,
3652 PhiNodeSet
&PhiNodesToMatch
) {
3653 SmallVector
<PHIPair
, 8> WorkList
;
3654 Matcher
.insert({ PHI
, Candidate
});
3655 SmallSet
<PHINode
*, 8> MatchedPHIs
;
3656 MatchedPHIs
.insert(PHI
);
3657 WorkList
.push_back({ PHI
, Candidate
});
3658 SmallSet
<PHIPair
, 8> Visited
;
3659 while (!WorkList
.empty()) {
3660 auto Item
= WorkList
.pop_back_val();
3661 if (!Visited
.insert(Item
).second
)
3663 // We iterate over all incoming values to Phi to compare them.
3664 // If values are different and both of them Phi and the first one is a
3665 // Phi we added (subject to match) and both of them is in the same basic
3666 // block then we can match our pair if values match. So we state that
3667 // these values match and add it to work list to verify that.
3668 for (auto B
: Item
.first
->blocks()) {
3669 Value
*FirstValue
= Item
.first
->getIncomingValueForBlock(B
);
3670 Value
*SecondValue
= Item
.second
->getIncomingValueForBlock(B
);
3671 if (FirstValue
== SecondValue
)
3674 PHINode
*FirstPhi
= dyn_cast
<PHINode
>(FirstValue
);
3675 PHINode
*SecondPhi
= dyn_cast
<PHINode
>(SecondValue
);
3677 // One of them is not Phi or
3678 // The first one is not Phi node from the set we'd like to match or
3679 // Phi nodes from different basic blocks then
3680 // we will not be able to match.
3681 if (!FirstPhi
|| !SecondPhi
|| !PhiNodesToMatch
.count(FirstPhi
) ||
3682 FirstPhi
->getParent() != SecondPhi
->getParent())
3685 // If we already matched them then continue.
3686 if (Matcher
.count({ FirstPhi
, SecondPhi
}))
3688 // So the values are different and does not match. So we need them to
3689 // match. (But we register no more than one match per PHI node, so that
3690 // we won't later try to replace them twice.)
3691 if (MatchedPHIs
.insert(FirstPhi
).second
)
3692 Matcher
.insert({ FirstPhi
, SecondPhi
});
3693 // But me must check it.
3694 WorkList
.push_back({ FirstPhi
, SecondPhi
});
3700 /// For the given set of PHI nodes (in the SimplificationTracker) try
3701 /// to find their equivalents.
3702 /// Returns false if this matching fails and creation of new Phi is disabled.
3703 bool MatchPhiSet(SimplificationTracker
&ST
, bool AllowNewPhiNodes
,
3704 unsigned &PhiNotMatchedCount
) {
3705 // Matched and PhiNodesToMatch iterate their elements in a deterministic
3706 // order, so the replacements (ReplacePhi) are also done in a deterministic
3708 SmallSetVector
<PHIPair
, 8> Matched
;
3709 SmallPtrSet
<PHINode
*, 8> WillNotMatch
;
3710 PhiNodeSet
&PhiNodesToMatch
= ST
.newPhiNodes();
3711 while (PhiNodesToMatch
.size()) {
3712 PHINode
*PHI
= *PhiNodesToMatch
.begin();
3714 // Add us, if no Phi nodes in the basic block we do not match.
3715 WillNotMatch
.clear();
3716 WillNotMatch
.insert(PHI
);
3718 // Traverse all Phis until we found equivalent or fail to do that.
3719 bool IsMatched
= false;
3720 for (auto &P
: PHI
->getParent()->phis()) {
3723 if ((IsMatched
= MatchPhiNode(PHI
, &P
, Matched
, PhiNodesToMatch
)))
3725 // If it does not match, collect all Phi nodes from matcher.
3726 // if we end up with no match, them all these Phi nodes will not match
3728 for (auto M
: Matched
)
3729 WillNotMatch
.insert(M
.first
);
3733 // Replace all matched values and erase them.
3734 for (auto MV
: Matched
)
3735 ST
.ReplacePhi(MV
.first
, MV
.second
);
3739 // If we are not allowed to create new nodes then bail out.
3740 if (!AllowNewPhiNodes
)
3742 // Just remove all seen values in matcher. They will not match anything.
3743 PhiNotMatchedCount
+= WillNotMatch
.size();
3744 for (auto *P
: WillNotMatch
)
3745 PhiNodesToMatch
.erase(P
);
3749 /// Fill the placeholders with values from predecessors and simplify them.
3750 void FillPlaceholders(FoldAddrToValueMapping
&Map
,
3751 SmallVectorImpl
<Value
*> &TraverseOrder
,
3752 SimplificationTracker
&ST
) {
3753 while (!TraverseOrder
.empty()) {
3754 Value
*Current
= TraverseOrder
.pop_back_val();
3755 assert(Map
.find(Current
) != Map
.end() && "No node to fill!!!");
3756 Value
*V
= Map
[Current
];
3758 if (SelectInst
*Select
= dyn_cast
<SelectInst
>(V
)) {
3759 // CurrentValue also must be Select.
3760 auto *CurrentSelect
= cast
<SelectInst
>(Current
);
3761 auto *TrueValue
= CurrentSelect
->getTrueValue();
3762 assert(Map
.find(TrueValue
) != Map
.end() && "No True Value!");
3763 Select
->setTrueValue(ST
.Get(Map
[TrueValue
]));
3764 auto *FalseValue
= CurrentSelect
->getFalseValue();
3765 assert(Map
.find(FalseValue
) != Map
.end() && "No False Value!");
3766 Select
->setFalseValue(ST
.Get(Map
[FalseValue
]));
3768 // Must be a Phi node then.
3769 auto *PHI
= cast
<PHINode
>(V
);
3770 // Fill the Phi node with values from predecessors.
3771 for (auto *B
: predecessors(PHI
->getParent())) {
3772 Value
*PV
= cast
<PHINode
>(Current
)->getIncomingValueForBlock(B
);
3773 assert(Map
.find(PV
) != Map
.end() && "No predecessor Value!");
3774 PHI
->addIncoming(ST
.Get(Map
[PV
]), B
);
3777 Map
[Current
] = ST
.Simplify(V
);
3781 /// Starting from original value recursively iterates over def-use chain up to
3782 /// known ending values represented in a map. For each traversed phi/select
3783 /// inserts a placeholder Phi or Select.
3784 /// Reports all new created Phi/Select nodes by adding them to set.
3785 /// Also reports and order in what values have been traversed.
3786 void InsertPlaceholders(FoldAddrToValueMapping
&Map
,
3787 SmallVectorImpl
<Value
*> &TraverseOrder
,
3788 SimplificationTracker
&ST
) {
3789 SmallVector
<Value
*, 32> Worklist
;
3790 assert((isa
<PHINode
>(Original
) || isa
<SelectInst
>(Original
)) &&
3791 "Address must be a Phi or Select node");
3792 auto *Dummy
= UndefValue::get(CommonType
);
3793 Worklist
.push_back(Original
);
3794 while (!Worklist
.empty()) {
3795 Value
*Current
= Worklist
.pop_back_val();
3796 // if it is already visited or it is an ending value then skip it.
3797 if (Map
.find(Current
) != Map
.end())
3799 TraverseOrder
.push_back(Current
);
3801 // CurrentValue must be a Phi node or select. All others must be covered
3803 if (SelectInst
*CurrentSelect
= dyn_cast
<SelectInst
>(Current
)) {
3804 // Is it OK to get metadata from OrigSelect?!
3805 // Create a Select placeholder with dummy value.
3806 SelectInst
*Select
= SelectInst::Create(
3807 CurrentSelect
->getCondition(), Dummy
, Dummy
,
3808 CurrentSelect
->getName(), CurrentSelect
, CurrentSelect
);
3809 Map
[Current
] = Select
;
3810 ST
.insertNewSelect(Select
);
3811 // We are interested in True and False values.
3812 Worklist
.push_back(CurrentSelect
->getTrueValue());
3813 Worklist
.push_back(CurrentSelect
->getFalseValue());
3815 // It must be a Phi node then.
3816 PHINode
*CurrentPhi
= cast
<PHINode
>(Current
);
3817 unsigned PredCount
= CurrentPhi
->getNumIncomingValues();
3819 PHINode::Create(CommonType
, PredCount
, "sunk_phi", CurrentPhi
);
3821 ST
.insertNewPhi(PHI
);
3822 append_range(Worklist
, CurrentPhi
->incoming_values());
3827 bool addrModeCombiningAllowed() {
3828 if (DisableComplexAddrModes
)
3830 switch (DifferentField
) {
3833 case ExtAddrMode::BaseRegField
:
3834 return AddrSinkCombineBaseReg
;
3835 case ExtAddrMode::BaseGVField
:
3836 return AddrSinkCombineBaseGV
;
3837 case ExtAddrMode::BaseOffsField
:
3838 return AddrSinkCombineBaseOffs
;
3839 case ExtAddrMode::ScaledRegField
:
3840 return AddrSinkCombineScaledReg
;
3844 } // end anonymous namespace
3846 /// Try adding ScaleReg*Scale to the current addressing mode.
3847 /// Return true and update AddrMode if this addr mode is legal for the target,
3849 bool AddressingModeMatcher::matchScaledValue(Value
*ScaleReg
, int64_t Scale
,
3851 // If Scale is 1, then this is the same as adding ScaleReg to the addressing
3852 // mode. Just process that directly.
3854 return matchAddr(ScaleReg
, Depth
);
3856 // If the scale is 0, it takes nothing to add this.
3860 // If we already have a scale of this value, we can add to it, otherwise, we
3861 // need an available scale field.
3862 if (AddrMode
.Scale
!= 0 && AddrMode
.ScaledReg
!= ScaleReg
)
3865 ExtAddrMode TestAddrMode
= AddrMode
;
3867 // Add scale to turn X*4+X*3 -> X*7. This could also do things like
3868 // [A+B + A*7] -> [B+A*8].
3869 TestAddrMode
.Scale
+= Scale
;
3870 TestAddrMode
.ScaledReg
= ScaleReg
;
3872 // If the new address isn't legal, bail out.
3873 if (!TLI
.isLegalAddressingMode(DL
, TestAddrMode
, AccessTy
, AddrSpace
))
3876 // It was legal, so commit it.
3877 AddrMode
= TestAddrMode
;
3879 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
3880 // to see if ScaleReg is actually X+C. If so, we can turn this into adding
3881 // X*Scale + C*Scale to addr mode. If we found available IV increment, do not
3882 // go any further: we can reuse it and cannot eliminate it.
3883 ConstantInt
*CI
= nullptr; Value
*AddLHS
= nullptr;
3884 if (isa
<Instruction
>(ScaleReg
) && // not a constant expr.
3885 match(ScaleReg
, m_Add(m_Value(AddLHS
), m_ConstantInt(CI
))) &&
3886 !isIVIncrement(ScaleReg
, &LI
) && CI
->getValue().isSignedIntN(64)) {
3887 TestAddrMode
.InBounds
= false;
3888 TestAddrMode
.ScaledReg
= AddLHS
;
3889 TestAddrMode
.BaseOffs
+= CI
->getSExtValue() * TestAddrMode
.Scale
;
3891 // If this addressing mode is legal, commit it and remember that we folded
3892 // this instruction.
3893 if (TLI
.isLegalAddressingMode(DL
, TestAddrMode
, AccessTy
, AddrSpace
)) {
3894 AddrModeInsts
.push_back(cast
<Instruction
>(ScaleReg
));
3895 AddrMode
= TestAddrMode
;
3898 // Restore status quo.
3899 TestAddrMode
= AddrMode
;
3902 // If this is an add recurrence with a constant step, return the increment
3903 // instruction and the canonicalized step.
3904 auto GetConstantStep
= [this](const Value
* V
)
3905 ->Optional
<std::pair
<Instruction
*, APInt
> > {
3906 auto *PN
= dyn_cast
<PHINode
>(V
);
3909 auto IVInc
= getIVIncrement(PN
, &LI
);
3912 // TODO: The result of the intrinsics above is two-compliment. However when
3913 // IV inc is expressed as add or sub, iv.next is potentially a poison value.
3914 // If it has nuw or nsw flags, we need to make sure that these flags are
3915 // inferrable at the point of memory instruction. Otherwise we are replacing
3916 // well-defined two-compliment computation with poison. Currently, to avoid
3917 // potentially complex analysis needed to prove this, we reject such cases.
3918 if (auto *OIVInc
= dyn_cast
<OverflowingBinaryOperator
>(IVInc
->first
))
3919 if (OIVInc
->hasNoSignedWrap() || OIVInc
->hasNoUnsignedWrap())
3921 if (auto *ConstantStep
= dyn_cast
<ConstantInt
>(IVInc
->second
))
3922 return std::make_pair(IVInc
->first
, ConstantStep
->getValue());
3926 // Try to account for the following special case:
3927 // 1. ScaleReg is an inductive variable;
3928 // 2. We use it with non-zero offset;
3929 // 3. IV's increment is available at the point of memory instruction.
3931 // In this case, we may reuse the IV increment instead of the IV Phi to
3932 // achieve the following advantages:
3933 // 1. If IV step matches the offset, we will have no need in the offset;
3934 // 2. Even if they don't match, we will reduce the overlap of living IV
3935 // and IV increment, that will potentially lead to better register
3937 if (AddrMode
.BaseOffs
) {
3938 if (auto IVStep
= GetConstantStep(ScaleReg
)) {
3939 Instruction
*IVInc
= IVStep
->first
;
3940 // The following assert is important to ensure a lack of infinite loops.
3941 // This transforms is (intentionally) the inverse of the one just above.
3942 // If they don't agree on the definition of an increment, we'd alternate
3943 // back and forth indefinitely.
3944 assert(isIVIncrement(IVInc
, &LI
) && "implied by GetConstantStep");
3945 APInt Step
= IVStep
->second
;
3946 APInt Offset
= Step
* AddrMode
.Scale
;
3947 if (Offset
.isSignedIntN(64)) {
3948 TestAddrMode
.InBounds
= false;
3949 TestAddrMode
.ScaledReg
= IVInc
;
3950 TestAddrMode
.BaseOffs
-= Offset
.getLimitedValue();
3951 // If this addressing mode is legal, commit it..
3952 // (Note that we defer the (expensive) domtree base legality check
3953 // to the very last possible point.)
3954 if (TLI
.isLegalAddressingMode(DL
, TestAddrMode
, AccessTy
, AddrSpace
) &&
3955 getDTFn().dominates(IVInc
, MemoryInst
)) {
3956 AddrModeInsts
.push_back(cast
<Instruction
>(IVInc
));
3957 AddrMode
= TestAddrMode
;
3960 // Restore status quo.
3961 TestAddrMode
= AddrMode
;
3966 // Otherwise, just return what we have.
3970 /// This is a little filter, which returns true if an addressing computation
3971 /// involving I might be folded into a load/store accessing it.
3972 /// This doesn't need to be perfect, but needs to accept at least
3973 /// the set of instructions that MatchOperationAddr can.
3974 static bool MightBeFoldableInst(Instruction
*I
) {
3975 switch (I
->getOpcode()) {
3976 case Instruction::BitCast
:
3977 case Instruction::AddrSpaceCast
:
3978 // Don't touch identity bitcasts.
3979 if (I
->getType() == I
->getOperand(0)->getType())
3981 return I
->getType()->isIntOrPtrTy();
3982 case Instruction::PtrToInt
:
3983 // PtrToInt is always a noop, as we know that the int type is pointer sized.
3985 case Instruction::IntToPtr
:
3986 // We know the input is intptr_t, so this is foldable.
3988 case Instruction::Add
:
3990 case Instruction::Mul
:
3991 case Instruction::Shl
:
3992 // Can only handle X*C and X << C.
3993 return isa
<ConstantInt
>(I
->getOperand(1));
3994 case Instruction::GetElementPtr
:
4001 /// Check whether or not \p Val is a legal instruction for \p TLI.
4002 /// \note \p Val is assumed to be the product of some type promotion.
4003 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed
4004 /// to be legal, as the non-promoted value would have had the same state.
4005 static bool isPromotedInstructionLegal(const TargetLowering
&TLI
,
4006 const DataLayout
&DL
, Value
*Val
) {
4007 Instruction
*PromotedInst
= dyn_cast
<Instruction
>(Val
);
4010 int ISDOpcode
= TLI
.InstructionOpcodeToISD(PromotedInst
->getOpcode());
4011 // If the ISDOpcode is undefined, it was undefined before the promotion.
4014 // Otherwise, check if the promoted instruction is legal or not.
4015 return TLI
.isOperationLegalOrCustom(
4016 ISDOpcode
, TLI
.getValueType(DL
, PromotedInst
->getType()));
4021 /// Hepler class to perform type promotion.
4022 class TypePromotionHelper
{
4023 /// Utility function to add a promoted instruction \p ExtOpnd to
4024 /// \p PromotedInsts and record the type of extension we have seen.
4025 static void addPromotedInst(InstrToOrigTy
&PromotedInsts
,
4026 Instruction
*ExtOpnd
,
4028 ExtType ExtTy
= IsSExt
? SignExtension
: ZeroExtension
;
4029 InstrToOrigTy::iterator It
= PromotedInsts
.find(ExtOpnd
);
4030 if (It
!= PromotedInsts
.end()) {
4031 // If the new extension is same as original, the information in
4032 // PromotedInsts[ExtOpnd] is still correct.
4033 if (It
->second
.getInt() == ExtTy
)
4036 // Now the new extension is different from old extension, we make
4037 // the type information invalid by setting extension type to
4039 ExtTy
= BothExtension
;
4041 PromotedInsts
[ExtOpnd
] = TypeIsSExt(ExtOpnd
->getType(), ExtTy
);
4044 /// Utility function to query the original type of instruction \p Opnd
4045 /// with a matched extension type. If the extension doesn't match, we
4046 /// cannot use the information we had on the original type.
4047 /// BothExtension doesn't match any extension type.
4048 static const Type
*getOrigType(const InstrToOrigTy
&PromotedInsts
,
4051 ExtType ExtTy
= IsSExt
? SignExtension
: ZeroExtension
;
4052 InstrToOrigTy::const_iterator It
= PromotedInsts
.find(Opnd
);
4053 if (It
!= PromotedInsts
.end() && It
->second
.getInt() == ExtTy
)
4054 return It
->second
.getPointer();
4058 /// Utility function to check whether or not a sign or zero extension
4059 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
4060 /// either using the operands of \p Inst or promoting \p Inst.
4061 /// The type of the extension is defined by \p IsSExt.
4062 /// In other words, check if:
4063 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType.
4064 /// #1 Promotion applies:
4065 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...).
4066 /// #2 Operand reuses:
4067 /// ext opnd1 to ConsideredExtType.
4068 /// \p PromotedInsts maps the instructions to their type before promotion.
4069 static bool canGetThrough(const Instruction
*Inst
, Type
*ConsideredExtType
,
4070 const InstrToOrigTy
&PromotedInsts
, bool IsSExt
);
4072 /// Utility function to determine if \p OpIdx should be promoted when
4073 /// promoting \p Inst.
4074 static bool shouldExtOperand(const Instruction
*Inst
, int OpIdx
) {
4075 return !(isa
<SelectInst
>(Inst
) && OpIdx
== 0);
4078 /// Utility function to promote the operand of \p Ext when this
4079 /// operand is a promotable trunc or sext or zext.
4080 /// \p PromotedInsts maps the instructions to their type before promotion.
4081 /// \p CreatedInstsCost[out] contains the cost of all instructions
4082 /// created to promote the operand of Ext.
4083 /// Newly added extensions are inserted in \p Exts.
4084 /// Newly added truncates are inserted in \p Truncs.
4085 /// Should never be called directly.
4086 /// \return The promoted value which is used instead of Ext.
4087 static Value
*promoteOperandForTruncAndAnyExt(
4088 Instruction
*Ext
, TypePromotionTransaction
&TPT
,
4089 InstrToOrigTy
&PromotedInsts
, unsigned &CreatedInstsCost
,
4090 SmallVectorImpl
<Instruction
*> *Exts
,
4091 SmallVectorImpl
<Instruction
*> *Truncs
, const TargetLowering
&TLI
);
4093 /// Utility function to promote the operand of \p Ext when this
4094 /// operand is promotable and is not a supported trunc or sext.
4095 /// \p PromotedInsts maps the instructions to their type before promotion.
4096 /// \p CreatedInstsCost[out] contains the cost of all the instructions
4097 /// created to promote the operand of Ext.
4098 /// Newly added extensions are inserted in \p Exts.
4099 /// Newly added truncates are inserted in \p Truncs.
4100 /// Should never be called directly.
4101 /// \return The promoted value which is used instead of Ext.
4102 static Value
*promoteOperandForOther(Instruction
*Ext
,
4103 TypePromotionTransaction
&TPT
,
4104 InstrToOrigTy
&PromotedInsts
,
4105 unsigned &CreatedInstsCost
,
4106 SmallVectorImpl
<Instruction
*> *Exts
,
4107 SmallVectorImpl
<Instruction
*> *Truncs
,
4108 const TargetLowering
&TLI
, bool IsSExt
);
4110 /// \see promoteOperandForOther.
4111 static Value
*signExtendOperandForOther(
4112 Instruction
*Ext
, TypePromotionTransaction
&TPT
,
4113 InstrToOrigTy
&PromotedInsts
, unsigned &CreatedInstsCost
,
4114 SmallVectorImpl
<Instruction
*> *Exts
,
4115 SmallVectorImpl
<Instruction
*> *Truncs
, const TargetLowering
&TLI
) {
4116 return promoteOperandForOther(Ext
, TPT
, PromotedInsts
, CreatedInstsCost
,
4117 Exts
, Truncs
, TLI
, true);
4120 /// \see promoteOperandForOther.
4121 static Value
*zeroExtendOperandForOther(
4122 Instruction
*Ext
, TypePromotionTransaction
&TPT
,
4123 InstrToOrigTy
&PromotedInsts
, unsigned &CreatedInstsCost
,
4124 SmallVectorImpl
<Instruction
*> *Exts
,
4125 SmallVectorImpl
<Instruction
*> *Truncs
, const TargetLowering
&TLI
) {
4126 return promoteOperandForOther(Ext
, TPT
, PromotedInsts
, CreatedInstsCost
,
4127 Exts
, Truncs
, TLI
, false);
4131 /// Type for the utility function that promotes the operand of Ext.
4132 using Action
= Value
*(*)(Instruction
*Ext
, TypePromotionTransaction
&TPT
,
4133 InstrToOrigTy
&PromotedInsts
,
4134 unsigned &CreatedInstsCost
,
4135 SmallVectorImpl
<Instruction
*> *Exts
,
4136 SmallVectorImpl
<Instruction
*> *Truncs
,
4137 const TargetLowering
&TLI
);
4139 /// Given a sign/zero extend instruction \p Ext, return the appropriate
4140 /// action to promote the operand of \p Ext instead of using Ext.
4141 /// \return NULL if no promotable action is possible with the current
4143 /// \p InsertedInsts keeps track of all the instructions inserted by the
4144 /// other CodeGenPrepare optimizations. This information is important
4145 /// because we do not want to promote these instructions as CodeGenPrepare
4146 /// will reinsert them later. Thus creating an infinite loop: create/remove.
4147 /// \p PromotedInsts maps the instructions to their type before promotion.
4148 static Action
getAction(Instruction
*Ext
, const SetOfInstrs
&InsertedInsts
,
4149 const TargetLowering
&TLI
,
4150 const InstrToOrigTy
&PromotedInsts
);
4153 } // end anonymous namespace
4155 bool TypePromotionHelper::canGetThrough(const Instruction
*Inst
,
4156 Type
*ConsideredExtType
,
4157 const InstrToOrigTy
&PromotedInsts
,
4159 // The promotion helper does not know how to deal with vector types yet.
4160 // To be able to fix that, we would need to fix the places where we
4161 // statically extend, e.g., constants and such.
4162 if (Inst
->getType()->isVectorTy())
4165 // We can always get through zext.
4166 if (isa
<ZExtInst
>(Inst
))
4169 // sext(sext) is ok too.
4170 if (IsSExt
&& isa
<SExtInst
>(Inst
))
4173 // We can get through binary operator, if it is legal. In other words, the
4174 // binary operator must have a nuw or nsw flag.
4175 const BinaryOperator
*BinOp
= dyn_cast
<BinaryOperator
>(Inst
);
4176 if (isa_and_nonnull
<OverflowingBinaryOperator
>(BinOp
) &&
4177 ((!IsSExt
&& BinOp
->hasNoUnsignedWrap()) ||
4178 (IsSExt
&& BinOp
->hasNoSignedWrap())))
4181 // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst))
4182 if ((Inst
->getOpcode() == Instruction::And
||
4183 Inst
->getOpcode() == Instruction::Or
))
4186 // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst))
4187 if (Inst
->getOpcode() == Instruction::Xor
) {
4188 const ConstantInt
*Cst
= dyn_cast
<ConstantInt
>(Inst
->getOperand(1));
4189 // Make sure it is not a NOT.
4190 if (Cst
&& !Cst
->getValue().isAllOnesValue())
4194 // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst))
4195 // It may change a poisoned value into a regular value, like
4196 // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12
4197 // poisoned value regular value
4198 // It should be OK since undef covers valid value.
4199 if (Inst
->getOpcode() == Instruction::LShr
&& !IsSExt
)
4202 // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst)
4203 // It may change a poisoned value into a regular value, like
4204 // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12
4205 // poisoned value regular value
4206 // It should be OK since undef covers valid value.
4207 if (Inst
->getOpcode() == Instruction::Shl
&& Inst
->hasOneUse()) {
4208 const auto *ExtInst
= cast
<const Instruction
>(*Inst
->user_begin());
4209 if (ExtInst
->hasOneUse()) {
4210 const auto *AndInst
= dyn_cast
<const Instruction
>(*ExtInst
->user_begin());
4211 if (AndInst
&& AndInst
->getOpcode() == Instruction::And
) {
4212 const auto *Cst
= dyn_cast
<ConstantInt
>(AndInst
->getOperand(1));
4214 Cst
->getValue().isIntN(Inst
->getType()->getIntegerBitWidth()))
4220 // Check if we can do the following simplification.
4221 // ext(trunc(opnd)) --> ext(opnd)
4222 if (!isa
<TruncInst
>(Inst
))
4225 Value
*OpndVal
= Inst
->getOperand(0);
4226 // Check if we can use this operand in the extension.
4227 // If the type is larger than the result type of the extension, we cannot.
4228 if (!OpndVal
->getType()->isIntegerTy() ||
4229 OpndVal
->getType()->getIntegerBitWidth() >
4230 ConsideredExtType
->getIntegerBitWidth())
4233 // If the operand of the truncate is not an instruction, we will not have
4234 // any information on the dropped bits.
4235 // (Actually we could for constant but it is not worth the extra logic).
4236 Instruction
*Opnd
= dyn_cast
<Instruction
>(OpndVal
);
4240 // Check if the source of the type is narrow enough.
4241 // I.e., check that trunc just drops extended bits of the same kind of
4243 // #1 get the type of the operand and check the kind of the extended bits.
4244 const Type
*OpndType
= getOrigType(PromotedInsts
, Opnd
, IsSExt
);
4247 else if ((IsSExt
&& isa
<SExtInst
>(Opnd
)) || (!IsSExt
&& isa
<ZExtInst
>(Opnd
)))
4248 OpndType
= Opnd
->getOperand(0)->getType();
4252 // #2 check that the truncate just drops extended bits.
4253 return Inst
->getType()->getIntegerBitWidth() >=
4254 OpndType
->getIntegerBitWidth();
4257 TypePromotionHelper::Action
TypePromotionHelper::getAction(
4258 Instruction
*Ext
, const SetOfInstrs
&InsertedInsts
,
4259 const TargetLowering
&TLI
, const InstrToOrigTy
&PromotedInsts
) {
4260 assert((isa
<SExtInst
>(Ext
) || isa
<ZExtInst
>(Ext
)) &&
4261 "Unexpected instruction type");
4262 Instruction
*ExtOpnd
= dyn_cast
<Instruction
>(Ext
->getOperand(0));
4263 Type
*ExtTy
= Ext
->getType();
4264 bool IsSExt
= isa
<SExtInst
>(Ext
);
4265 // If the operand of the extension is not an instruction, we cannot
4267 // If it, check we can get through.
4268 if (!ExtOpnd
|| !canGetThrough(ExtOpnd
, ExtTy
, PromotedInsts
, IsSExt
))
4271 // Do not promote if the operand has been added by codegenprepare.
4272 // Otherwise, it means we are undoing an optimization that is likely to be
4273 // redone, thus causing potential infinite loop.
4274 if (isa
<TruncInst
>(ExtOpnd
) && InsertedInsts
.count(ExtOpnd
))
4277 // SExt or Trunc instructions.
4278 // Return the related handler.
4279 if (isa
<SExtInst
>(ExtOpnd
) || isa
<TruncInst
>(ExtOpnd
) ||
4280 isa
<ZExtInst
>(ExtOpnd
))
4281 return promoteOperandForTruncAndAnyExt
;
4283 // Regular instruction.
4284 // Abort early if we will have to insert non-free instructions.
4285 if (!ExtOpnd
->hasOneUse() && !TLI
.isTruncateFree(ExtTy
, ExtOpnd
->getType()))
4287 return IsSExt
? signExtendOperandForOther
: zeroExtendOperandForOther
;
4290 Value
*TypePromotionHelper::promoteOperandForTruncAndAnyExt(
4291 Instruction
*SExt
, TypePromotionTransaction
&TPT
,
4292 InstrToOrigTy
&PromotedInsts
, unsigned &CreatedInstsCost
,
4293 SmallVectorImpl
<Instruction
*> *Exts
,
4294 SmallVectorImpl
<Instruction
*> *Truncs
, const TargetLowering
&TLI
) {
4295 // By construction, the operand of SExt is an instruction. Otherwise we cannot
4296 // get through it and this method should not be called.
4297 Instruction
*SExtOpnd
= cast
<Instruction
>(SExt
->getOperand(0));
4298 Value
*ExtVal
= SExt
;
4299 bool HasMergedNonFreeExt
= false;
4300 if (isa
<ZExtInst
>(SExtOpnd
)) {
4301 // Replace s|zext(zext(opnd))
4303 HasMergedNonFreeExt
= !TLI
.isExtFree(SExtOpnd
);
4305 TPT
.createZExt(SExt
, SExtOpnd
->getOperand(0), SExt
->getType());
4306 TPT
.replaceAllUsesWith(SExt
, ZExt
);
4307 TPT
.eraseInstruction(SExt
);
4310 // Replace z|sext(trunc(opnd)) or sext(sext(opnd))
4312 TPT
.setOperand(SExt
, 0, SExtOpnd
->getOperand(0));
4314 CreatedInstsCost
= 0;
4316 // Remove dead code.
4317 if (SExtOpnd
->use_empty())
4318 TPT
.eraseInstruction(SExtOpnd
);
4320 // Check if the extension is still needed.
4321 Instruction
*ExtInst
= dyn_cast
<Instruction
>(ExtVal
);
4322 if (!ExtInst
|| ExtInst
->getType() != ExtInst
->getOperand(0)->getType()) {
4325 Exts
->push_back(ExtInst
);
4326 CreatedInstsCost
= !TLI
.isExtFree(ExtInst
) && !HasMergedNonFreeExt
;
4331 // At this point we have: ext ty opnd to ty.
4332 // Reassign the uses of ExtInst to the opnd and remove ExtInst.
4333 Value
*NextVal
= ExtInst
->getOperand(0);
4334 TPT
.eraseInstruction(ExtInst
, NextVal
);
4338 Value
*TypePromotionHelper::promoteOperandForOther(
4339 Instruction
*Ext
, TypePromotionTransaction
&TPT
,
4340 InstrToOrigTy
&PromotedInsts
, unsigned &CreatedInstsCost
,
4341 SmallVectorImpl
<Instruction
*> *Exts
,
4342 SmallVectorImpl
<Instruction
*> *Truncs
, const TargetLowering
&TLI
,
4344 // By construction, the operand of Ext is an instruction. Otherwise we cannot
4345 // get through it and this method should not be called.
4346 Instruction
*ExtOpnd
= cast
<Instruction
>(Ext
->getOperand(0));
4347 CreatedInstsCost
= 0;
4348 if (!ExtOpnd
->hasOneUse()) {
4349 // ExtOpnd will be promoted.
4350 // All its uses, but Ext, will need to use a truncated value of the
4351 // promoted version.
4352 // Create the truncate now.
4353 Value
*Trunc
= TPT
.createTrunc(Ext
, ExtOpnd
->getType());
4354 if (Instruction
*ITrunc
= dyn_cast
<Instruction
>(Trunc
)) {
4355 // Insert it just after the definition.
4356 ITrunc
->moveAfter(ExtOpnd
);
4358 Truncs
->push_back(ITrunc
);
4361 TPT
.replaceAllUsesWith(ExtOpnd
, Trunc
);
4362 // Restore the operand of Ext (which has been replaced by the previous call
4363 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
4364 TPT
.setOperand(Ext
, 0, ExtOpnd
);
4367 // Get through the Instruction:
4368 // 1. Update its type.
4369 // 2. Replace the uses of Ext by Inst.
4370 // 3. Extend each operand that needs to be extended.
4372 // Remember the original type of the instruction before promotion.
4373 // This is useful to know that the high bits are sign extended bits.
4374 addPromotedInst(PromotedInsts
, ExtOpnd
, IsSExt
);
4376 TPT
.mutateType(ExtOpnd
, Ext
->getType());
4378 TPT
.replaceAllUsesWith(Ext
, ExtOpnd
);
4380 Instruction
*ExtForOpnd
= Ext
;
4382 LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n");
4383 for (int OpIdx
= 0, EndOpIdx
= ExtOpnd
->getNumOperands(); OpIdx
!= EndOpIdx
;
4385 LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd
->getOperand(OpIdx
)) << '\n');
4386 if (ExtOpnd
->getOperand(OpIdx
)->getType() == Ext
->getType() ||
4387 !shouldExtOperand(ExtOpnd
, OpIdx
)) {
4388 LLVM_DEBUG(dbgs() << "No need to propagate\n");
4391 // Check if we can statically extend the operand.
4392 Value
*Opnd
= ExtOpnd
->getOperand(OpIdx
);
4393 if (const ConstantInt
*Cst
= dyn_cast
<ConstantInt
>(Opnd
)) {
4394 LLVM_DEBUG(dbgs() << "Statically extend\n");
4395 unsigned BitWidth
= Ext
->getType()->getIntegerBitWidth();
4396 APInt CstVal
= IsSExt
? Cst
->getValue().sext(BitWidth
)
4397 : Cst
->getValue().zext(BitWidth
);
4398 TPT
.setOperand(ExtOpnd
, OpIdx
, ConstantInt::get(Ext
->getType(), CstVal
));
4401 // UndefValue are typed, so we have to statically sign extend them.
4402 if (isa
<UndefValue
>(Opnd
)) {
4403 LLVM_DEBUG(dbgs() << "Statically extend\n");
4404 TPT
.setOperand(ExtOpnd
, OpIdx
, UndefValue::get(Ext
->getType()));
4408 // Otherwise we have to explicitly sign extend the operand.
4409 // Check if Ext was reused to extend an operand.
4411 // If yes, create a new one.
4412 LLVM_DEBUG(dbgs() << "More operands to ext\n");
4413 Value
*ValForExtOpnd
= IsSExt
? TPT
.createSExt(Ext
, Opnd
, Ext
->getType())
4414 : TPT
.createZExt(Ext
, Opnd
, Ext
->getType());
4415 if (!isa
<Instruction
>(ValForExtOpnd
)) {
4416 TPT
.setOperand(ExtOpnd
, OpIdx
, ValForExtOpnd
);
4419 ExtForOpnd
= cast
<Instruction
>(ValForExtOpnd
);
4422 Exts
->push_back(ExtForOpnd
);
4423 TPT
.setOperand(ExtForOpnd
, 0, Opnd
);
4425 // Move the sign extension before the insertion point.
4426 TPT
.moveBefore(ExtForOpnd
, ExtOpnd
);
4427 TPT
.setOperand(ExtOpnd
, OpIdx
, ExtForOpnd
);
4428 CreatedInstsCost
+= !TLI
.isExtFree(ExtForOpnd
);
4429 // If more sext are required, new instructions will have to be created.
4430 ExtForOpnd
= nullptr;
4432 if (ExtForOpnd
== Ext
) {
4433 LLVM_DEBUG(dbgs() << "Extension is useless now\n");
4434 TPT
.eraseInstruction(Ext
);
4439 /// Check whether or not promoting an instruction to a wider type is profitable.
4440 /// \p NewCost gives the cost of extension instructions created by the
4442 /// \p OldCost gives the cost of extension instructions before the promotion
4443 /// plus the number of instructions that have been
4444 /// matched in the addressing mode the promotion.
4445 /// \p PromotedOperand is the value that has been promoted.
4446 /// \return True if the promotion is profitable, false otherwise.
4447 bool AddressingModeMatcher::isPromotionProfitable(
4448 unsigned NewCost
, unsigned OldCost
, Value
*PromotedOperand
) const {
4449 LLVM_DEBUG(dbgs() << "OldCost: " << OldCost
<< "\tNewCost: " << NewCost
4451 // The cost of the new extensions is greater than the cost of the
4452 // old extension plus what we folded.
4453 // This is not profitable.
4454 if (NewCost
> OldCost
)
4456 if (NewCost
< OldCost
)
4458 // The promotion is neutral but it may help folding the sign extension in
4459 // loads for instance.
4460 // Check that we did not create an illegal instruction.
4461 return isPromotedInstructionLegal(TLI
, DL
, PromotedOperand
);
4464 /// Given an instruction or constant expr, see if we can fold the operation
4465 /// into the addressing mode. If so, update the addressing mode and return
4466 /// true, otherwise return false without modifying AddrMode.
4467 /// If \p MovedAway is not NULL, it contains the information of whether or
4468 /// not AddrInst has to be folded into the addressing mode on success.
4469 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing
4470 /// because it has been moved away.
4471 /// Thus AddrInst must not be added in the matched instructions.
4472 /// This state can happen when AddrInst is a sext, since it may be moved away.
4473 /// Therefore, AddrInst may not be valid when MovedAway is true and it must
4474 /// not be referenced anymore.
4475 bool AddressingModeMatcher::matchOperationAddr(User
*AddrInst
, unsigned Opcode
,
4478 // Avoid exponential behavior on extremely deep expression trees.
4479 if (Depth
>= 5) return false;
4481 // By default, all matched instructions stay in place.
4486 case Instruction::PtrToInt
:
4487 // PtrToInt is always a noop, as we know that the int type is pointer sized.
4488 return matchAddr(AddrInst
->getOperand(0), Depth
);
4489 case Instruction::IntToPtr
: {
4490 auto AS
= AddrInst
->getType()->getPointerAddressSpace();
4491 auto PtrTy
= MVT::getIntegerVT(DL
.getPointerSizeInBits(AS
));
4492 // This inttoptr is a no-op if the integer type is pointer sized.
4493 if (TLI
.getValueType(DL
, AddrInst
->getOperand(0)->getType()) == PtrTy
)
4494 return matchAddr(AddrInst
->getOperand(0), Depth
);
4497 case Instruction::BitCast
:
4498 // BitCast is always a noop, and we can handle it as long as it is
4499 // int->int or pointer->pointer (we don't want int<->fp or something).
4500 if (AddrInst
->getOperand(0)->getType()->isIntOrPtrTy() &&
4501 // Don't touch identity bitcasts. These were probably put here by LSR,
4502 // and we don't want to mess around with them. Assume it knows what it
4504 AddrInst
->getOperand(0)->getType() != AddrInst
->getType())
4505 return matchAddr(AddrInst
->getOperand(0), Depth
);
4507 case Instruction::AddrSpaceCast
: {
4509 = AddrInst
->getOperand(0)->getType()->getPointerAddressSpace();
4510 unsigned DestAS
= AddrInst
->getType()->getPointerAddressSpace();
4511 if (TLI
.getTargetMachine().isNoopAddrSpaceCast(SrcAS
, DestAS
))
4512 return matchAddr(AddrInst
->getOperand(0), Depth
);
4515 case Instruction::Add
: {
4516 // Check to see if we can merge in the RHS then the LHS. If so, we win.
4517 ExtAddrMode BackupAddrMode
= AddrMode
;
4518 unsigned OldSize
= AddrModeInsts
.size();
4519 // Start a transaction at this point.
4520 // The LHS may match but not the RHS.
4521 // Therefore, we need a higher level restoration point to undo partially
4522 // matched operation.
4523 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
4524 TPT
.getRestorationPoint();
4526 AddrMode
.InBounds
= false;
4527 if (matchAddr(AddrInst
->getOperand(1), Depth
+1) &&
4528 matchAddr(AddrInst
->getOperand(0), Depth
+1))
4531 // Restore the old addr mode info.
4532 AddrMode
= BackupAddrMode
;
4533 AddrModeInsts
.resize(OldSize
);
4534 TPT
.rollback(LastKnownGood
);
4536 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS.
4537 if (matchAddr(AddrInst
->getOperand(0), Depth
+1) &&
4538 matchAddr(AddrInst
->getOperand(1), Depth
+1))
4541 // Otherwise we definitely can't merge the ADD in.
4542 AddrMode
= BackupAddrMode
;
4543 AddrModeInsts
.resize(OldSize
);
4544 TPT
.rollback(LastKnownGood
);
4547 //case Instruction::Or:
4548 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
4550 case Instruction::Mul
:
4551 case Instruction::Shl
: {
4552 // Can only handle X*C and X << C.
4553 AddrMode
.InBounds
= false;
4554 ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(AddrInst
->getOperand(1));
4555 if (!RHS
|| RHS
->getBitWidth() > 64)
4557 int64_t Scale
= RHS
->getSExtValue();
4558 if (Opcode
== Instruction::Shl
)
4559 Scale
= 1LL << Scale
;
4561 return matchScaledValue(AddrInst
->getOperand(0), Scale
, Depth
);
4563 case Instruction::GetElementPtr
: {
4564 // Scan the GEP. We check it if it contains constant offsets and at most
4565 // one variable offset.
4566 int VariableOperand
= -1;
4567 unsigned VariableScale
= 0;
4569 int64_t ConstantOffset
= 0;
4570 gep_type_iterator GTI
= gep_type_begin(AddrInst
);
4571 for (unsigned i
= 1, e
= AddrInst
->getNumOperands(); i
!= e
; ++i
, ++GTI
) {
4572 if (StructType
*STy
= GTI
.getStructTypeOrNull()) {
4573 const StructLayout
*SL
= DL
.getStructLayout(STy
);
4575 cast
<ConstantInt
>(AddrInst
->getOperand(i
))->getZExtValue();
4576 ConstantOffset
+= SL
->getElementOffset(Idx
);
4578 TypeSize TS
= DL
.getTypeAllocSize(GTI
.getIndexedType());
4579 if (TS
.isNonZero()) {
4580 // The optimisations below currently only work for fixed offsets.
4581 if (TS
.isScalable())
4583 int64_t TypeSize
= TS
.getFixedSize();
4584 if (ConstantInt
*CI
=
4585 dyn_cast
<ConstantInt
>(AddrInst
->getOperand(i
))) {
4586 const APInt
&CVal
= CI
->getValue();
4587 if (CVal
.getMinSignedBits() <= 64) {
4588 ConstantOffset
+= CVal
.getSExtValue() * TypeSize
;
4592 // We only allow one variable index at the moment.
4593 if (VariableOperand
!= -1)
4596 // Remember the variable index.
4597 VariableOperand
= i
;
4598 VariableScale
= TypeSize
;
4603 // A common case is for the GEP to only do a constant offset. In this case,
4604 // just add it to the disp field and check validity.
4605 if (VariableOperand
== -1) {
4606 AddrMode
.BaseOffs
+= ConstantOffset
;
4607 if (ConstantOffset
== 0 ||
4608 TLI
.isLegalAddressingMode(DL
, AddrMode
, AccessTy
, AddrSpace
)) {
4609 // Check to see if we can fold the base pointer in too.
4610 if (matchAddr(AddrInst
->getOperand(0), Depth
+1)) {
4611 if (!cast
<GEPOperator
>(AddrInst
)->isInBounds())
4612 AddrMode
.InBounds
= false;
4615 } else if (EnableGEPOffsetSplit
&& isa
<GetElementPtrInst
>(AddrInst
) &&
4616 TLI
.shouldConsiderGEPOffsetSplit() && Depth
== 0 &&
4617 ConstantOffset
> 0) {
4618 // Record GEPs with non-zero offsets as candidates for splitting in the
4619 // event that the offset cannot fit into the r+i addressing mode.
4620 // Simple and common case that only one GEP is used in calculating the
4621 // address for the memory access.
4622 Value
*Base
= AddrInst
->getOperand(0);
4623 auto *BaseI
= dyn_cast
<Instruction
>(Base
);
4624 auto *GEP
= cast
<GetElementPtrInst
>(AddrInst
);
4625 if (isa
<Argument
>(Base
) || isa
<GlobalValue
>(Base
) ||
4626 (BaseI
&& !isa
<CastInst
>(BaseI
) &&
4627 !isa
<GetElementPtrInst
>(BaseI
))) {
4628 // Make sure the parent block allows inserting non-PHI instructions
4629 // before the terminator.
4630 BasicBlock
*Parent
=
4631 BaseI
? BaseI
->getParent() : &GEP
->getFunction()->getEntryBlock();
4632 if (!Parent
->getTerminator()->isEHPad())
4633 LargeOffsetGEP
= std::make_pair(GEP
, ConstantOffset
);
4636 AddrMode
.BaseOffs
-= ConstantOffset
;
4640 // Save the valid addressing mode in case we can't match.
4641 ExtAddrMode BackupAddrMode
= AddrMode
;
4642 unsigned OldSize
= AddrModeInsts
.size();
4644 // See if the scale and offset amount is valid for this target.
4645 AddrMode
.BaseOffs
+= ConstantOffset
;
4646 if (!cast
<GEPOperator
>(AddrInst
)->isInBounds())
4647 AddrMode
.InBounds
= false;
4649 // Match the base operand of the GEP.
4650 if (!matchAddr(AddrInst
->getOperand(0), Depth
+1)) {
4651 // If it couldn't be matched, just stuff the value in a register.
4652 if (AddrMode
.HasBaseReg
) {
4653 AddrMode
= BackupAddrMode
;
4654 AddrModeInsts
.resize(OldSize
);
4657 AddrMode
.HasBaseReg
= true;
4658 AddrMode
.BaseReg
= AddrInst
->getOperand(0);
4661 // Match the remaining variable portion of the GEP.
4662 if (!matchScaledValue(AddrInst
->getOperand(VariableOperand
), VariableScale
,
4664 // If it couldn't be matched, try stuffing the base into a register
4665 // instead of matching it, and retrying the match of the scale.
4666 AddrMode
= BackupAddrMode
;
4667 AddrModeInsts
.resize(OldSize
);
4668 if (AddrMode
.HasBaseReg
)
4670 AddrMode
.HasBaseReg
= true;
4671 AddrMode
.BaseReg
= AddrInst
->getOperand(0);
4672 AddrMode
.BaseOffs
+= ConstantOffset
;
4673 if (!matchScaledValue(AddrInst
->getOperand(VariableOperand
),
4674 VariableScale
, Depth
)) {
4675 // If even that didn't work, bail.
4676 AddrMode
= BackupAddrMode
;
4677 AddrModeInsts
.resize(OldSize
);
4684 case Instruction::SExt
:
4685 case Instruction::ZExt
: {
4686 Instruction
*Ext
= dyn_cast
<Instruction
>(AddrInst
);
4690 // Try to move this ext out of the way of the addressing mode.
4691 // Ask for a method for doing so.
4692 TypePromotionHelper::Action TPH
=
4693 TypePromotionHelper::getAction(Ext
, InsertedInsts
, TLI
, PromotedInsts
);
4697 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
4698 TPT
.getRestorationPoint();
4699 unsigned CreatedInstsCost
= 0;
4700 unsigned ExtCost
= !TLI
.isExtFree(Ext
);
4701 Value
*PromotedOperand
=
4702 TPH(Ext
, TPT
, PromotedInsts
, CreatedInstsCost
, nullptr, nullptr, TLI
);
4703 // SExt has been moved away.
4704 // Thus either it will be rematched later in the recursive calls or it is
4705 // gone. Anyway, we must not fold it into the addressing mode at this point.
4709 // addr = gep base, idx
4711 // promotedOpnd = ext opnd <- no match here
4712 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls)
4713 // addr = gep base, op <- match
4717 assert(PromotedOperand
&&
4718 "TypePromotionHelper should have filtered out those cases");
4720 ExtAddrMode BackupAddrMode
= AddrMode
;
4721 unsigned OldSize
= AddrModeInsts
.size();
4723 if (!matchAddr(PromotedOperand
, Depth
) ||
4724 // The total of the new cost is equal to the cost of the created
4726 // The total of the old cost is equal to the cost of the extension plus
4727 // what we have saved in the addressing mode.
4728 !isPromotionProfitable(CreatedInstsCost
,
4729 ExtCost
+ (AddrModeInsts
.size() - OldSize
),
4731 AddrMode
= BackupAddrMode
;
4732 AddrModeInsts
.resize(OldSize
);
4733 LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
4734 TPT
.rollback(LastKnownGood
);
4743 /// If we can, try to add the value of 'Addr' into the current addressing mode.
4744 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode
4745 /// unmodified. This assumes that Addr is either a pointer type or intptr_t
4748 bool AddressingModeMatcher::matchAddr(Value
*Addr
, unsigned Depth
) {
4749 // Start a transaction at this point that we will rollback if the matching
4751 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
4752 TPT
.getRestorationPoint();
4753 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Addr
)) {
4754 if (CI
->getValue().isSignedIntN(64)) {
4755 // Fold in immediates if legal for the target.
4756 AddrMode
.BaseOffs
+= CI
->getSExtValue();
4757 if (TLI
.isLegalAddressingMode(DL
, AddrMode
, AccessTy
, AddrSpace
))
4759 AddrMode
.BaseOffs
-= CI
->getSExtValue();
4761 } else if (GlobalValue
*GV
= dyn_cast
<GlobalValue
>(Addr
)) {
4762 // If this is a global variable, try to fold it into the addressing mode.
4763 if (!AddrMode
.BaseGV
) {
4764 AddrMode
.BaseGV
= GV
;
4765 if (TLI
.isLegalAddressingMode(DL
, AddrMode
, AccessTy
, AddrSpace
))
4767 AddrMode
.BaseGV
= nullptr;
4769 } else if (Instruction
*I
= dyn_cast
<Instruction
>(Addr
)) {
4770 ExtAddrMode BackupAddrMode
= AddrMode
;
4771 unsigned OldSize
= AddrModeInsts
.size();
4773 // Check to see if it is possible to fold this operation.
4774 bool MovedAway
= false;
4775 if (matchOperationAddr(I
, I
->getOpcode(), Depth
, &MovedAway
)) {
4776 // This instruction may have been moved away. If so, there is nothing
4780 // Okay, it's possible to fold this. Check to see if it is actually
4781 // *profitable* to do so. We use a simple cost model to avoid increasing
4782 // register pressure too much.
4783 if (I
->hasOneUse() ||
4784 isProfitableToFoldIntoAddressingMode(I
, BackupAddrMode
, AddrMode
)) {
4785 AddrModeInsts
.push_back(I
);
4789 // It isn't profitable to do this, roll back.
4790 //cerr << "NOT FOLDING: " << *I;
4791 AddrMode
= BackupAddrMode
;
4792 AddrModeInsts
.resize(OldSize
);
4793 TPT
.rollback(LastKnownGood
);
4795 } else if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Addr
)) {
4796 if (matchOperationAddr(CE
, CE
->getOpcode(), Depth
))
4798 TPT
.rollback(LastKnownGood
);
4799 } else if (isa
<ConstantPointerNull
>(Addr
)) {
4800 // Null pointer gets folded without affecting the addressing mode.
4804 // Worse case, the target should support [reg] addressing modes. :)
4805 if (!AddrMode
.HasBaseReg
) {
4806 AddrMode
.HasBaseReg
= true;
4807 AddrMode
.BaseReg
= Addr
;
4808 // Still check for legality in case the target supports [imm] but not [i+r].
4809 if (TLI
.isLegalAddressingMode(DL
, AddrMode
, AccessTy
, AddrSpace
))
4811 AddrMode
.HasBaseReg
= false;
4812 AddrMode
.BaseReg
= nullptr;
4815 // If the base register is already taken, see if we can do [r+r].
4816 if (AddrMode
.Scale
== 0) {
4818 AddrMode
.ScaledReg
= Addr
;
4819 if (TLI
.isLegalAddressingMode(DL
, AddrMode
, AccessTy
, AddrSpace
))
4822 AddrMode
.ScaledReg
= nullptr;
4825 TPT
.rollback(LastKnownGood
);
4829 /// Check to see if all uses of OpVal by the specified inline asm call are due
4830 /// to memory operands. If so, return true, otherwise return false.
4831 static bool IsOperandAMemoryOperand(CallInst
*CI
, InlineAsm
*IA
, Value
*OpVal
,
4832 const TargetLowering
&TLI
,
4833 const TargetRegisterInfo
&TRI
) {
4834 const Function
*F
= CI
->getFunction();
4835 TargetLowering::AsmOperandInfoVector TargetConstraints
=
4836 TLI
.ParseConstraints(F
->getParent()->getDataLayout(), &TRI
, *CI
);
4838 for (unsigned i
= 0, e
= TargetConstraints
.size(); i
!= e
; ++i
) {
4839 TargetLowering::AsmOperandInfo
&OpInfo
= TargetConstraints
[i
];
4841 // Compute the constraint code and ConstraintType to use.
4842 TLI
.ComputeConstraintToUse(OpInfo
, SDValue());
4844 // If this asm operand is our Value*, and if it isn't an indirect memory
4845 // operand, we can't fold it!
4846 if (OpInfo
.CallOperandVal
== OpVal
&&
4847 (OpInfo
.ConstraintType
!= TargetLowering::C_Memory
||
4848 !OpInfo
.isIndirect
))
4855 // Max number of memory uses to look at before aborting the search to conserve
4857 static constexpr int MaxMemoryUsesToScan
= 20;
4859 /// Recursively walk all the uses of I until we find a memory use.
4860 /// If we find an obviously non-foldable instruction, return true.
4861 /// Add the ultimately found memory instructions to MemoryUses.
4862 static bool FindAllMemoryUses(
4864 SmallVectorImpl
<std::pair
<Instruction
*, unsigned>> &MemoryUses
,
4865 SmallPtrSetImpl
<Instruction
*> &ConsideredInsts
, const TargetLowering
&TLI
,
4866 const TargetRegisterInfo
&TRI
, bool OptSize
, ProfileSummaryInfo
*PSI
,
4867 BlockFrequencyInfo
*BFI
, int SeenInsts
= 0) {
4868 // If we already considered this instruction, we're done.
4869 if (!ConsideredInsts
.insert(I
).second
)
4872 // If this is an obviously unfoldable instruction, bail out.
4873 if (!MightBeFoldableInst(I
))
4876 // Loop over all the uses, recursively processing them.
4877 for (Use
&U
: I
->uses()) {
4878 // Conservatively return true if we're seeing a large number or a deep chain
4879 // of users. This avoids excessive compilation times in pathological cases.
4880 if (SeenInsts
++ >= MaxMemoryUsesToScan
)
4883 Instruction
*UserI
= cast
<Instruction
>(U
.getUser());
4884 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(UserI
)) {
4885 MemoryUses
.push_back(std::make_pair(LI
, U
.getOperandNo()));
4889 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(UserI
)) {
4890 unsigned opNo
= U
.getOperandNo();
4891 if (opNo
!= StoreInst::getPointerOperandIndex())
4892 return true; // Storing addr, not into addr.
4893 MemoryUses
.push_back(std::make_pair(SI
, opNo
));
4897 if (AtomicRMWInst
*RMW
= dyn_cast
<AtomicRMWInst
>(UserI
)) {
4898 unsigned opNo
= U
.getOperandNo();
4899 if (opNo
!= AtomicRMWInst::getPointerOperandIndex())
4900 return true; // Storing addr, not into addr.
4901 MemoryUses
.push_back(std::make_pair(RMW
, opNo
));
4905 if (AtomicCmpXchgInst
*CmpX
= dyn_cast
<AtomicCmpXchgInst
>(UserI
)) {
4906 unsigned opNo
= U
.getOperandNo();
4907 if (opNo
!= AtomicCmpXchgInst::getPointerOperandIndex())
4908 return true; // Storing addr, not into addr.
4909 MemoryUses
.push_back(std::make_pair(CmpX
, opNo
));
4913 if (CallInst
*CI
= dyn_cast
<CallInst
>(UserI
)) {
4914 if (CI
->hasFnAttr(Attribute::Cold
)) {
4915 // If this is a cold call, we can sink the addressing calculation into
4916 // the cold path. See optimizeCallInst
4917 bool OptForSize
= OptSize
||
4918 llvm::shouldOptimizeForSize(CI
->getParent(), PSI
, BFI
);
4923 InlineAsm
*IA
= dyn_cast
<InlineAsm
>(CI
->getCalledOperand());
4924 if (!IA
) return true;
4926 // If this is a memory operand, we're cool, otherwise bail out.
4927 if (!IsOperandAMemoryOperand(CI
, IA
, I
, TLI
, TRI
))
4932 if (FindAllMemoryUses(UserI
, MemoryUses
, ConsideredInsts
, TLI
, TRI
, OptSize
,
4933 PSI
, BFI
, SeenInsts
))
4940 /// Return true if Val is already known to be live at the use site that we're
4941 /// folding it into. If so, there is no cost to include it in the addressing
4942 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the
4943 /// instruction already.
4944 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value
*Val
,Value
*KnownLive1
,
4945 Value
*KnownLive2
) {
4946 // If Val is either of the known-live values, we know it is live!
4947 if (Val
== nullptr || Val
== KnownLive1
|| Val
== KnownLive2
)
4950 // All values other than instructions and arguments (e.g. constants) are live.
4951 if (!isa
<Instruction
>(Val
) && !isa
<Argument
>(Val
)) return true;
4953 // If Val is a constant sized alloca in the entry block, it is live, this is
4954 // true because it is just a reference to the stack/frame pointer, which is
4955 // live for the whole function.
4956 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(Val
))
4957 if (AI
->isStaticAlloca())
4960 // Check to see if this value is already used in the memory instruction's
4961 // block. If so, it's already live into the block at the very least, so we
4962 // can reasonably fold it.
4963 return Val
->isUsedInBasicBlock(MemoryInst
->getParent());
4966 /// It is possible for the addressing mode of the machine to fold the specified
4967 /// instruction into a load or store that ultimately uses it.
4968 /// However, the specified instruction has multiple uses.
4969 /// Given this, it may actually increase register pressure to fold it
4970 /// into the load. For example, consider this code:
4974 /// use(Y) -> nonload/store
4978 /// In this case, Y has multiple uses, and can be folded into the load of Z
4979 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to
4980 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one
4981 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the
4982 /// number of computations either.
4984 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If
4985 /// X was live across 'load Z' for other reasons, we actually *would* want to
4986 /// fold the addressing mode in the Z case. This would make Y die earlier.
4987 bool AddressingModeMatcher::
4988 isProfitableToFoldIntoAddressingMode(Instruction
*I
, ExtAddrMode
&AMBefore
,
4989 ExtAddrMode
&AMAfter
) {
4990 if (IgnoreProfitability
) return true;
4992 // AMBefore is the addressing mode before this instruction was folded into it,
4993 // and AMAfter is the addressing mode after the instruction was folded. Get
4994 // the set of registers referenced by AMAfter and subtract out those
4995 // referenced by AMBefore: this is the set of values which folding in this
4996 // address extends the lifetime of.
4998 // Note that there are only two potential values being referenced here,
4999 // BaseReg and ScaleReg (global addresses are always available, as are any
5000 // folded immediates).
5001 Value
*BaseReg
= AMAfter
.BaseReg
, *ScaledReg
= AMAfter
.ScaledReg
;
5003 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
5004 // lifetime wasn't extended by adding this instruction.
5005 if (valueAlreadyLiveAtInst(BaseReg
, AMBefore
.BaseReg
, AMBefore
.ScaledReg
))
5007 if (valueAlreadyLiveAtInst(ScaledReg
, AMBefore
.BaseReg
, AMBefore
.ScaledReg
))
5008 ScaledReg
= nullptr;
5010 // If folding this instruction (and it's subexprs) didn't extend any live
5011 // ranges, we're ok with it.
5012 if (!BaseReg
&& !ScaledReg
)
5015 // If all uses of this instruction can have the address mode sunk into them,
5016 // we can remove the addressing mode and effectively trade one live register
5017 // for another (at worst.) In this context, folding an addressing mode into
5018 // the use is just a particularly nice way of sinking it.
5019 SmallVector
<std::pair
<Instruction
*,unsigned>, 16> MemoryUses
;
5020 SmallPtrSet
<Instruction
*, 16> ConsideredInsts
;
5021 if (FindAllMemoryUses(I
, MemoryUses
, ConsideredInsts
, TLI
, TRI
, OptSize
,
5023 return false; // Has a non-memory, non-foldable use!
5025 // Now that we know that all uses of this instruction are part of a chain of
5026 // computation involving only operations that could theoretically be folded
5027 // into a memory use, loop over each of these memory operation uses and see
5028 // if they could *actually* fold the instruction. The assumption is that
5029 // addressing modes are cheap and that duplicating the computation involved
5030 // many times is worthwhile, even on a fastpath. For sinking candidates
5031 // (i.e. cold call sites), this serves as a way to prevent excessive code
5032 // growth since most architectures have some reasonable small and fast way to
5033 // compute an effective address. (i.e LEA on x86)
5034 SmallVector
<Instruction
*, 32> MatchedAddrModeInsts
;
5035 for (unsigned i
= 0, e
= MemoryUses
.size(); i
!= e
; ++i
) {
5036 Instruction
*User
= MemoryUses
[i
].first
;
5037 unsigned OpNo
= MemoryUses
[i
].second
;
5039 // Get the access type of this use. If the use isn't a pointer, we don't
5040 // know what it accesses.
5041 Value
*Address
= User
->getOperand(OpNo
);
5042 PointerType
*AddrTy
= dyn_cast
<PointerType
>(Address
->getType());
5045 Type
*AddressAccessTy
= AddrTy
->getElementType();
5046 unsigned AS
= AddrTy
->getAddressSpace();
5048 // Do a match against the root of this address, ignoring profitability. This
5049 // will tell us if the addressing mode for the memory operation will
5050 // *actually* cover the shared instruction.
5052 std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t> LargeOffsetGEP(nullptr,
5054 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
5055 TPT
.getRestorationPoint();
5056 AddressingModeMatcher
Matcher(MatchedAddrModeInsts
, TLI
, TRI
, LI
, getDTFn
,
5057 AddressAccessTy
, AS
, MemoryInst
, Result
,
5058 InsertedInsts
, PromotedInsts
, TPT
,
5059 LargeOffsetGEP
, OptSize
, PSI
, BFI
);
5060 Matcher
.IgnoreProfitability
= true;
5061 bool Success
= Matcher
.matchAddr(Address
, 0);
5062 (void)Success
; assert(Success
&& "Couldn't select *anything*?");
5064 // The match was to check the profitability, the changes made are not
5065 // part of the original matcher. Therefore, they should be dropped
5066 // otherwise the original matcher will not present the right state.
5067 TPT
.rollback(LastKnownGood
);
5069 // If the match didn't cover I, then it won't be shared by it.
5070 if (!is_contained(MatchedAddrModeInsts
, I
))
5073 MatchedAddrModeInsts
.clear();
5079 /// Return true if the specified values are defined in a
5080 /// different basic block than BB.
5081 static bool IsNonLocalValue(Value
*V
, BasicBlock
*BB
) {
5082 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
5083 return I
->getParent() != BB
;
5087 /// Sink addressing mode computation immediate before MemoryInst if doing so
5088 /// can be done without increasing register pressure. The need for the
5089 /// register pressure constraint means this can end up being an all or nothing
5090 /// decision for all uses of the same addressing computation.
5092 /// Load and Store Instructions often have addressing modes that can do
5093 /// significant amounts of computation. As such, instruction selection will try
5094 /// to get the load or store to do as much computation as possible for the
5095 /// program. The problem is that isel can only see within a single block. As
5096 /// such, we sink as much legal addressing mode work into the block as possible.
5098 /// This method is used to optimize both load/store and inline asms with memory
5099 /// operands. It's also used to sink addressing computations feeding into cold
5100 /// call sites into their (cold) basic block.
5102 /// The motivation for handling sinking into cold blocks is that doing so can
5103 /// both enable other address mode sinking (by satisfying the register pressure
5104 /// constraint above), and reduce register pressure globally (by removing the
5105 /// addressing mode computation from the fast path entirely.).
5106 bool CodeGenPrepare::optimizeMemoryInst(Instruction
*MemoryInst
, Value
*Addr
,
5107 Type
*AccessTy
, unsigned AddrSpace
) {
5110 // Try to collapse single-value PHI nodes. This is necessary to undo
5111 // unprofitable PRE transformations.
5112 SmallVector
<Value
*, 8> worklist
;
5113 SmallPtrSet
<Value
*, 16> Visited
;
5114 worklist
.push_back(Addr
);
5116 // Use a worklist to iteratively look through PHI and select nodes, and
5117 // ensure that the addressing mode obtained from the non-PHI/select roots of
5118 // the graph are compatible.
5119 bool PhiOrSelectSeen
= false;
5120 SmallVector
<Instruction
*, 16> AddrModeInsts
;
5121 const SimplifyQuery
SQ(*DL
, TLInfo
);
5122 AddressingModeCombiner
AddrModes(SQ
, Addr
);
5123 TypePromotionTransaction
TPT(RemovedInsts
);
5124 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
5125 TPT
.getRestorationPoint();
5126 while (!worklist
.empty()) {
5127 Value
*V
= worklist
.back();
5128 worklist
.pop_back();
5130 // We allow traversing cyclic Phi nodes.
5131 // In case of success after this loop we ensure that traversing through
5132 // Phi nodes ends up with all cases to compute address of the form
5133 // BaseGV + Base + Scale * Index + Offset
5134 // where Scale and Offset are constans and BaseGV, Base and Index
5135 // are exactly the same Values in all cases.
5136 // It means that BaseGV, Scale and Offset dominate our memory instruction
5137 // and have the same value as they had in address computation represented
5138 // as Phi. So we can safely sink address computation to memory instruction.
5139 if (!Visited
.insert(V
).second
)
5142 // For a PHI node, push all of its incoming values.
5143 if (PHINode
*P
= dyn_cast
<PHINode
>(V
)) {
5144 append_range(worklist
, P
->incoming_values());
5145 PhiOrSelectSeen
= true;
5148 // Similar for select.
5149 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(V
)) {
5150 worklist
.push_back(SI
->getFalseValue());
5151 worklist
.push_back(SI
->getTrueValue());
5152 PhiOrSelectSeen
= true;
5156 // For non-PHIs, determine the addressing mode being computed. Note that
5157 // the result may differ depending on what other uses our candidate
5158 // addressing instructions might have.
5159 AddrModeInsts
.clear();
5160 std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t> LargeOffsetGEP(nullptr,
5162 // Defer the query (and possible computation of) the dom tree to point of
5163 // actual use. It's expected that most address matches don't actually need
5165 auto getDTFn
= [MemoryInst
, this]() -> const DominatorTree
& {
5166 Function
*F
= MemoryInst
->getParent()->getParent();
5167 return this->getDT(*F
);
5169 ExtAddrMode NewAddrMode
= AddressingModeMatcher::Match(
5170 V
, AccessTy
, AddrSpace
, MemoryInst
, AddrModeInsts
, *TLI
, *LI
, getDTFn
,
5171 *TRI
, InsertedInsts
, PromotedInsts
, TPT
, LargeOffsetGEP
, OptSize
, PSI
,
5174 GetElementPtrInst
*GEP
= LargeOffsetGEP
.first
;
5175 if (GEP
&& !NewGEPBases
.count(GEP
)) {
5176 // If splitting the underlying data structure can reduce the offset of a
5177 // GEP, collect the GEP. Skip the GEPs that are the new bases of
5178 // previously split data structures.
5179 LargeOffsetGEPMap
[GEP
->getPointerOperand()].push_back(LargeOffsetGEP
);
5180 if (LargeOffsetGEPID
.find(GEP
) == LargeOffsetGEPID
.end())
5181 LargeOffsetGEPID
[GEP
] = LargeOffsetGEPID
.size();
5184 NewAddrMode
.OriginalValue
= V
;
5185 if (!AddrModes
.addNewAddrMode(NewAddrMode
))
5189 // Try to combine the AddrModes we've collected. If we couldn't collect any,
5190 // or we have multiple but either couldn't combine them or combining them
5191 // wouldn't do anything useful, bail out now.
5192 if (!AddrModes
.combineAddrModes()) {
5193 TPT
.rollback(LastKnownGood
);
5196 bool Modified
= TPT
.commit();
5198 // Get the combined AddrMode (or the only AddrMode, if we only had one).
5199 ExtAddrMode AddrMode
= AddrModes
.getAddrMode();
5201 // If all the instructions matched are already in this BB, don't do anything.
5202 // If we saw a Phi node then it is not local definitely, and if we saw a select
5203 // then we want to push the address calculation past it even if it's already
5205 if (!PhiOrSelectSeen
&& none_of(AddrModeInsts
, [&](Value
*V
) {
5206 return IsNonLocalValue(V
, MemoryInst
->getParent());
5208 LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode
5213 // Insert this computation right after this user. Since our caller is
5214 // scanning from the top of the BB to the bottom, reuse of the expr are
5215 // guaranteed to happen later.
5216 IRBuilder
<> Builder(MemoryInst
);
5218 // Now that we determined the addressing expression we want to use and know
5219 // that we have to sink it into this block. Check to see if we have already
5220 // done this for some other load/store instr in this block. If so, reuse
5221 // the computation. Before attempting reuse, check if the address is valid
5222 // as it may have been erased.
5224 WeakTrackingVH SunkAddrVH
= SunkAddrs
[Addr
];
5226 Value
* SunkAddr
= SunkAddrVH
.pointsToAliveValue() ? SunkAddrVH
: nullptr;
5228 LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode
5229 << " for " << *MemoryInst
<< "\n");
5230 if (SunkAddr
->getType() != Addr
->getType())
5231 SunkAddr
= Builder
.CreatePointerCast(SunkAddr
, Addr
->getType());
5232 } else if (AddrSinkUsingGEPs
|| (!AddrSinkUsingGEPs
.getNumOccurrences() &&
5233 SubtargetInfo
->addrSinkUsingGEPs())) {
5234 // By default, we use the GEP-based method when AA is used later. This
5235 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
5236 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5237 << " for " << *MemoryInst
<< "\n");
5238 Type
*IntPtrTy
= DL
->getIntPtrType(Addr
->getType());
5239 Value
*ResultPtr
= nullptr, *ResultIndex
= nullptr;
5241 // First, find the pointer.
5242 if (AddrMode
.BaseReg
&& AddrMode
.BaseReg
->getType()->isPointerTy()) {
5243 ResultPtr
= AddrMode
.BaseReg
;
5244 AddrMode
.BaseReg
= nullptr;
5247 if (AddrMode
.Scale
&& AddrMode
.ScaledReg
->getType()->isPointerTy()) {
5248 // We can't add more than one pointer together, nor can we scale a
5249 // pointer (both of which seem meaningless).
5250 if (ResultPtr
|| AddrMode
.Scale
!= 1)
5253 ResultPtr
= AddrMode
.ScaledReg
;
5257 // It is only safe to sign extend the BaseReg if we know that the math
5258 // required to create it did not overflow before we extend it. Since
5259 // the original IR value was tossed in favor of a constant back when
5260 // the AddrMode was created we need to bail out gracefully if widths
5261 // do not match instead of extending it.
5263 // (See below for code to add the scale.)
5264 if (AddrMode
.Scale
) {
5265 Type
*ScaledRegTy
= AddrMode
.ScaledReg
->getType();
5266 if (cast
<IntegerType
>(IntPtrTy
)->getBitWidth() >
5267 cast
<IntegerType
>(ScaledRegTy
)->getBitWidth())
5271 if (AddrMode
.BaseGV
) {
5275 ResultPtr
= AddrMode
.BaseGV
;
5278 // If the real base value actually came from an inttoptr, then the matcher
5279 // will look through it and provide only the integer value. In that case,
5281 if (!DL
->isNonIntegralPointerType(Addr
->getType())) {
5282 if (!ResultPtr
&& AddrMode
.BaseReg
) {
5283 ResultPtr
= Builder
.CreateIntToPtr(AddrMode
.BaseReg
, Addr
->getType(),
5285 AddrMode
.BaseReg
= nullptr;
5286 } else if (!ResultPtr
&& AddrMode
.Scale
== 1) {
5287 ResultPtr
= Builder
.CreateIntToPtr(AddrMode
.ScaledReg
, Addr
->getType(),
5294 !AddrMode
.BaseReg
&& !AddrMode
.Scale
&& !AddrMode
.BaseOffs
) {
5295 SunkAddr
= Constant::getNullValue(Addr
->getType());
5296 } else if (!ResultPtr
) {
5300 Builder
.getInt8PtrTy(Addr
->getType()->getPointerAddressSpace());
5301 Type
*I8Ty
= Builder
.getInt8Ty();
5303 // Start with the base register. Do this first so that subsequent address
5304 // matching finds it last, which will prevent it from trying to match it
5305 // as the scaled value in case it happens to be a mul. That would be
5306 // problematic if we've sunk a different mul for the scale, because then
5307 // we'd end up sinking both muls.
5308 if (AddrMode
.BaseReg
) {
5309 Value
*V
= AddrMode
.BaseReg
;
5310 if (V
->getType() != IntPtrTy
)
5311 V
= Builder
.CreateIntCast(V
, IntPtrTy
, /*isSigned=*/true, "sunkaddr");
5316 // Add the scale value.
5317 if (AddrMode
.Scale
) {
5318 Value
*V
= AddrMode
.ScaledReg
;
5319 if (V
->getType() == IntPtrTy
) {
5322 assert(cast
<IntegerType
>(IntPtrTy
)->getBitWidth() <
5323 cast
<IntegerType
>(V
->getType())->getBitWidth() &&
5324 "We can't transform if ScaledReg is too narrow");
5325 V
= Builder
.CreateTrunc(V
, IntPtrTy
, "sunkaddr");
5328 if (AddrMode
.Scale
!= 1)
5329 V
= Builder
.CreateMul(V
, ConstantInt::get(IntPtrTy
, AddrMode
.Scale
),
5332 ResultIndex
= Builder
.CreateAdd(ResultIndex
, V
, "sunkaddr");
5337 // Add in the Base Offset if present.
5338 if (AddrMode
.BaseOffs
) {
5339 Value
*V
= ConstantInt::get(IntPtrTy
, AddrMode
.BaseOffs
);
5341 // We need to add this separately from the scale above to help with
5342 // SDAG consecutive load/store merging.
5343 if (ResultPtr
->getType() != I8PtrTy
)
5344 ResultPtr
= Builder
.CreatePointerCast(ResultPtr
, I8PtrTy
);
5347 ? Builder
.CreateInBoundsGEP(I8Ty
, ResultPtr
, ResultIndex
,
5349 : Builder
.CreateGEP(I8Ty
, ResultPtr
, ResultIndex
, "sunkaddr");
5356 SunkAddr
= ResultPtr
;
5358 if (ResultPtr
->getType() != I8PtrTy
)
5359 ResultPtr
= Builder
.CreatePointerCast(ResultPtr
, I8PtrTy
);
5362 ? Builder
.CreateInBoundsGEP(I8Ty
, ResultPtr
, ResultIndex
,
5364 : Builder
.CreateGEP(I8Ty
, ResultPtr
, ResultIndex
, "sunkaddr");
5367 if (SunkAddr
->getType() != Addr
->getType())
5368 SunkAddr
= Builder
.CreatePointerCast(SunkAddr
, Addr
->getType());
5371 // We'd require a ptrtoint/inttoptr down the line, which we can't do for
5372 // non-integral pointers, so in that case bail out now.
5373 Type
*BaseTy
= AddrMode
.BaseReg
? AddrMode
.BaseReg
->getType() : nullptr;
5374 Type
*ScaleTy
= AddrMode
.Scale
? AddrMode
.ScaledReg
->getType() : nullptr;
5375 PointerType
*BasePtrTy
= dyn_cast_or_null
<PointerType
>(BaseTy
);
5376 PointerType
*ScalePtrTy
= dyn_cast_or_null
<PointerType
>(ScaleTy
);
5377 if (DL
->isNonIntegralPointerType(Addr
->getType()) ||
5378 (BasePtrTy
&& DL
->isNonIntegralPointerType(BasePtrTy
)) ||
5379 (ScalePtrTy
&& DL
->isNonIntegralPointerType(ScalePtrTy
)) ||
5381 DL
->isNonIntegralPointerType(AddrMode
.BaseGV
->getType())))
5384 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5385 << " for " << *MemoryInst
<< "\n");
5386 Type
*IntPtrTy
= DL
->getIntPtrType(Addr
->getType());
5387 Value
*Result
= nullptr;
5389 // Start with the base register. Do this first so that subsequent address
5390 // matching finds it last, which will prevent it from trying to match it
5391 // as the scaled value in case it happens to be a mul. That would be
5392 // problematic if we've sunk a different mul for the scale, because then
5393 // we'd end up sinking both muls.
5394 if (AddrMode
.BaseReg
) {
5395 Value
*V
= AddrMode
.BaseReg
;
5396 if (V
->getType()->isPointerTy())
5397 V
= Builder
.CreatePtrToInt(V
, IntPtrTy
, "sunkaddr");
5398 if (V
->getType() != IntPtrTy
)
5399 V
= Builder
.CreateIntCast(V
, IntPtrTy
, /*isSigned=*/true, "sunkaddr");
5403 // Add the scale value.
5404 if (AddrMode
.Scale
) {
5405 Value
*V
= AddrMode
.ScaledReg
;
5406 if (V
->getType() == IntPtrTy
) {
5408 } else if (V
->getType()->isPointerTy()) {
5409 V
= Builder
.CreatePtrToInt(V
, IntPtrTy
, "sunkaddr");
5410 } else if (cast
<IntegerType
>(IntPtrTy
)->getBitWidth() <
5411 cast
<IntegerType
>(V
->getType())->getBitWidth()) {
5412 V
= Builder
.CreateTrunc(V
, IntPtrTy
, "sunkaddr");
5414 // It is only safe to sign extend the BaseReg if we know that the math
5415 // required to create it did not overflow before we extend it. Since
5416 // the original IR value was tossed in favor of a constant back when
5417 // the AddrMode was created we need to bail out gracefully if widths
5418 // do not match instead of extending it.
5419 Instruction
*I
= dyn_cast_or_null
<Instruction
>(Result
);
5420 if (I
&& (Result
!= AddrMode
.BaseReg
))
5421 I
->eraseFromParent();
5424 if (AddrMode
.Scale
!= 1)
5425 V
= Builder
.CreateMul(V
, ConstantInt::get(IntPtrTy
, AddrMode
.Scale
),
5428 Result
= Builder
.CreateAdd(Result
, V
, "sunkaddr");
5433 // Add in the BaseGV if present.
5434 if (AddrMode
.BaseGV
) {
5435 Value
*V
= Builder
.CreatePtrToInt(AddrMode
.BaseGV
, IntPtrTy
, "sunkaddr");
5437 Result
= Builder
.CreateAdd(Result
, V
, "sunkaddr");
5442 // Add in the Base Offset if present.
5443 if (AddrMode
.BaseOffs
) {
5444 Value
*V
= ConstantInt::get(IntPtrTy
, AddrMode
.BaseOffs
);
5446 Result
= Builder
.CreateAdd(Result
, V
, "sunkaddr");
5452 SunkAddr
= Constant::getNullValue(Addr
->getType());
5454 SunkAddr
= Builder
.CreateIntToPtr(Result
, Addr
->getType(), "sunkaddr");
5457 MemoryInst
->replaceUsesOfWith(Repl
, SunkAddr
);
5458 // Store the newly computed address into the cache. In the case we reused a
5459 // value, this should be idempotent.
5460 SunkAddrs
[Addr
] = WeakTrackingVH(SunkAddr
);
5462 // If we have no uses, recursively delete the value and all dead instructions
5464 if (Repl
->use_empty()) {
5465 resetIteratorIfInvalidatedWhileCalling(CurInstIterator
->getParent(), [&]() {
5466 RecursivelyDeleteTriviallyDeadInstructions(
5467 Repl
, TLInfo
, nullptr,
5468 [&](Value
*V
) { removeAllAssertingVHReferences(V
); });
5475 /// Rewrite GEP input to gather/scatter to enable SelectionDAGBuilder to find
5476 /// a uniform base to use for ISD::MGATHER/MSCATTER. SelectionDAGBuilder can
5477 /// only handle a 2 operand GEP in the same basic block or a splat constant
5478 /// vector. The 2 operands to the GEP must have a scalar pointer and a vector
5481 /// If the existing GEP has a vector base pointer that is splat, we can look
5482 /// through the splat to find the scalar pointer. If we can't find a scalar
5483 /// pointer there's nothing we can do.
5485 /// If we have a GEP with more than 2 indices where the middle indices are all
5486 /// zeroes, we can replace it with 2 GEPs where the second has 2 operands.
5488 /// If the final index isn't a vector or is a splat, we can emit a scalar GEP
5489 /// followed by a GEP with an all zeroes vector index. This will enable
5490 /// SelectionDAGBuilder to use the scalar GEP as the uniform base and have a
5492 bool CodeGenPrepare::optimizeGatherScatterInst(Instruction
*MemoryInst
,
5496 if (const auto *GEP
= dyn_cast
<GetElementPtrInst
>(Ptr
)) {
5497 // Don't optimize GEPs that don't have indices.
5498 if (!GEP
->hasIndices())
5501 // If the GEP and the gather/scatter aren't in the same BB, don't optimize.
5502 // FIXME: We should support this by sinking the GEP.
5503 if (MemoryInst
->getParent() != GEP
->getParent())
5506 SmallVector
<Value
*, 2> Ops(GEP
->operands());
5508 bool RewriteGEP
= false;
5510 if (Ops
[0]->getType()->isVectorTy()) {
5511 Ops
[0] = getSplatValue(Ops
[0]);
5517 unsigned FinalIndex
= Ops
.size() - 1;
5519 // Ensure all but the last index is 0.
5520 // FIXME: This isn't strictly required. All that's required is that they are
5521 // all scalars or splats.
5522 for (unsigned i
= 1; i
< FinalIndex
; ++i
) {
5523 auto *C
= dyn_cast
<Constant
>(Ops
[i
]);
5526 if (isa
<VectorType
>(C
->getType()))
5527 C
= C
->getSplatValue();
5528 auto *CI
= dyn_cast_or_null
<ConstantInt
>(C
);
5529 if (!CI
|| !CI
->isZero())
5531 // Scalarize the index if needed.
5535 // Try to scalarize the final index.
5536 if (Ops
[FinalIndex
]->getType()->isVectorTy()) {
5537 if (Value
*V
= getSplatValue(Ops
[FinalIndex
])) {
5538 auto *C
= dyn_cast
<ConstantInt
>(V
);
5539 // Don't scalarize all zeros vector.
5540 if (!C
|| !C
->isZero()) {
5541 Ops
[FinalIndex
] = V
;
5547 // If we made any changes or the we have extra operands, we need to generate
5548 // new instructions.
5549 if (!RewriteGEP
&& Ops
.size() == 2)
5552 auto NumElts
= cast
<VectorType
>(Ptr
->getType())->getElementCount();
5554 IRBuilder
<> Builder(MemoryInst
);
5556 Type
*SourceTy
= GEP
->getSourceElementType();
5557 Type
*ScalarIndexTy
= DL
->getIndexType(Ops
[0]->getType()->getScalarType());
5559 // If the final index isn't a vector, emit a scalar GEP containing all ops
5560 // and a vector GEP with all zeroes final index.
5561 if (!Ops
[FinalIndex
]->getType()->isVectorTy()) {
5562 NewAddr
= Builder
.CreateGEP(SourceTy
, Ops
[0],
5563 makeArrayRef(Ops
).drop_front());
5564 auto *IndexTy
= VectorType::get(ScalarIndexTy
, NumElts
);
5565 auto *SecondTy
= GetElementPtrInst::getIndexedType(
5566 SourceTy
, makeArrayRef(Ops
).drop_front());
5568 Builder
.CreateGEP(SecondTy
, NewAddr
, Constant::getNullValue(IndexTy
));
5570 Value
*Base
= Ops
[0];
5571 Value
*Index
= Ops
[FinalIndex
];
5573 // Create a scalar GEP if there are more than 2 operands.
5574 if (Ops
.size() != 2) {
5575 // Replace the last index with 0.
5576 Ops
[FinalIndex
] = Constant::getNullValue(ScalarIndexTy
);
5577 Base
= Builder
.CreateGEP(SourceTy
, Base
,
5578 makeArrayRef(Ops
).drop_front());
5579 SourceTy
= GetElementPtrInst::getIndexedType(
5580 SourceTy
, makeArrayRef(Ops
).drop_front());
5583 // Now create the GEP with scalar pointer and vector index.
5584 NewAddr
= Builder
.CreateGEP(SourceTy
, Base
, Index
);
5586 } else if (!isa
<Constant
>(Ptr
)) {
5587 // Not a GEP, maybe its a splat and we can create a GEP to enable
5588 // SelectionDAGBuilder to use it as a uniform base.
5589 Value
*V
= getSplatValue(Ptr
);
5593 auto NumElts
= cast
<VectorType
>(Ptr
->getType())->getElementCount();
5595 IRBuilder
<> Builder(MemoryInst
);
5597 // Emit a vector GEP with a scalar pointer and all 0s vector index.
5598 Type
*ScalarIndexTy
= DL
->getIndexType(V
->getType()->getScalarType());
5599 auto *IndexTy
= VectorType::get(ScalarIndexTy
, NumElts
);
5601 if (cast
<IntrinsicInst
>(MemoryInst
)->getIntrinsicID() ==
5602 Intrinsic::masked_gather
) {
5603 ScalarTy
= MemoryInst
->getType()->getScalarType();
5605 assert(cast
<IntrinsicInst
>(MemoryInst
)->getIntrinsicID() ==
5606 Intrinsic::masked_scatter
);
5607 ScalarTy
= MemoryInst
->getOperand(0)->getType()->getScalarType();
5609 NewAddr
= Builder
.CreateGEP(ScalarTy
, V
, Constant::getNullValue(IndexTy
));
5611 // Constant, SelectionDAGBuilder knows to check if its a splat.
5615 MemoryInst
->replaceUsesOfWith(Ptr
, NewAddr
);
5617 // If we have no uses, recursively delete the value and all dead instructions
5619 if (Ptr
->use_empty())
5620 RecursivelyDeleteTriviallyDeadInstructions(
5621 Ptr
, TLInfo
, nullptr,
5622 [&](Value
*V
) { removeAllAssertingVHReferences(V
); });
5627 /// If there are any memory operands, use OptimizeMemoryInst to sink their
5628 /// address computing into the block when possible / profitable.
5629 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst
*CS
) {
5630 bool MadeChange
= false;
5632 const TargetRegisterInfo
*TRI
=
5633 TM
->getSubtargetImpl(*CS
->getFunction())->getRegisterInfo();
5634 TargetLowering::AsmOperandInfoVector TargetConstraints
=
5635 TLI
->ParseConstraints(*DL
, TRI
, *CS
);
5637 for (unsigned i
= 0, e
= TargetConstraints
.size(); i
!= e
; ++i
) {
5638 TargetLowering::AsmOperandInfo
&OpInfo
= TargetConstraints
[i
];
5640 // Compute the constraint code and ConstraintType to use.
5641 TLI
->ComputeConstraintToUse(OpInfo
, SDValue());
5643 if (OpInfo
.ConstraintType
== TargetLowering::C_Memory
&&
5644 OpInfo
.isIndirect
) {
5645 Value
*OpVal
= CS
->getArgOperand(ArgNo
++);
5646 MadeChange
|= optimizeMemoryInst(CS
, OpVal
, OpVal
->getType(), ~0u);
5647 } else if (OpInfo
.Type
== InlineAsm::isInput
)
5654 /// Check if all the uses of \p Val are equivalent (or free) zero or
5655 /// sign extensions.
5656 static bool hasSameExtUse(Value
*Val
, const TargetLowering
&TLI
) {
5657 assert(!Val
->use_empty() && "Input must have at least one use");
5658 const Instruction
*FirstUser
= cast
<Instruction
>(*Val
->user_begin());
5659 bool IsSExt
= isa
<SExtInst
>(FirstUser
);
5660 Type
*ExtTy
= FirstUser
->getType();
5661 for (const User
*U
: Val
->users()) {
5662 const Instruction
*UI
= cast
<Instruction
>(U
);
5663 if ((IsSExt
&& !isa
<SExtInst
>(UI
)) || (!IsSExt
&& !isa
<ZExtInst
>(UI
)))
5665 Type
*CurTy
= UI
->getType();
5666 // Same input and output types: Same instruction after CSE.
5670 // If IsSExt is true, we are in this situation:
5672 // b = sext ty1 a to ty2
5673 // c = sext ty1 a to ty3
5674 // Assuming ty2 is shorter than ty3, this could be turned into:
5676 // b = sext ty1 a to ty2
5677 // c = sext ty2 b to ty3
5678 // However, the last sext is not free.
5682 // This is a ZExt, maybe this is free to extend from one type to another.
5683 // In that case, we would not account for a different use.
5686 if (ExtTy
->getScalarType()->getIntegerBitWidth() >
5687 CurTy
->getScalarType()->getIntegerBitWidth()) {
5695 if (!TLI
.isZExtFree(NarrowTy
, LargeTy
))
5698 // All uses are the same or can be derived from one another for free.
5702 /// Try to speculatively promote extensions in \p Exts and continue
5703 /// promoting through newly promoted operands recursively as far as doing so is
5704 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts.
5705 /// When some promotion happened, \p TPT contains the proper state to revert
5708 /// \return true if some promotion happened, false otherwise.
5709 bool CodeGenPrepare::tryToPromoteExts(
5710 TypePromotionTransaction
&TPT
, const SmallVectorImpl
<Instruction
*> &Exts
,
5711 SmallVectorImpl
<Instruction
*> &ProfitablyMovedExts
,
5712 unsigned CreatedInstsCost
) {
5713 bool Promoted
= false;
5715 // Iterate over all the extensions to try to promote them.
5716 for (auto *I
: Exts
) {
5717 // Early check if we directly have ext(load).
5718 if (isa
<LoadInst
>(I
->getOperand(0))) {
5719 ProfitablyMovedExts
.push_back(I
);
5723 // Check whether or not we want to do any promotion. The reason we have
5724 // this check inside the for loop is to catch the case where an extension
5725 // is directly fed by a load because in such case the extension can be moved
5726 // up without any promotion on its operands.
5727 if (!TLI
->enableExtLdPromotion() || DisableExtLdPromotion
)
5730 // Get the action to perform the promotion.
5731 TypePromotionHelper::Action TPH
=
5732 TypePromotionHelper::getAction(I
, InsertedInsts
, *TLI
, PromotedInsts
);
5733 // Check if we can promote.
5735 // Save the current extension as we cannot move up through its operand.
5736 ProfitablyMovedExts
.push_back(I
);
5740 // Save the current state.
5741 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
5742 TPT
.getRestorationPoint();
5743 SmallVector
<Instruction
*, 4> NewExts
;
5744 unsigned NewCreatedInstsCost
= 0;
5745 unsigned ExtCost
= !TLI
->isExtFree(I
);
5747 Value
*PromotedVal
= TPH(I
, TPT
, PromotedInsts
, NewCreatedInstsCost
,
5748 &NewExts
, nullptr, *TLI
);
5749 assert(PromotedVal
&&
5750 "TypePromotionHelper should have filtered out those cases");
5752 // We would be able to merge only one extension in a load.
5753 // Therefore, if we have more than 1 new extension we heuristically
5754 // cut this search path, because it means we degrade the code quality.
5755 // With exactly 2, the transformation is neutral, because we will merge
5756 // one extension but leave one. However, we optimistically keep going,
5757 // because the new extension may be removed too.
5758 long long TotalCreatedInstsCost
= CreatedInstsCost
+ NewCreatedInstsCost
;
5759 // FIXME: It would be possible to propagate a negative value instead of
5760 // conservatively ceiling it to 0.
5761 TotalCreatedInstsCost
=
5762 std::max((long long)0, (TotalCreatedInstsCost
- ExtCost
));
5763 if (!StressExtLdPromotion
&&
5764 (TotalCreatedInstsCost
> 1 ||
5765 !isPromotedInstructionLegal(*TLI
, *DL
, PromotedVal
))) {
5766 // This promotion is not profitable, rollback to the previous state, and
5767 // save the current extension in ProfitablyMovedExts as the latest
5768 // speculative promotion turned out to be unprofitable.
5769 TPT
.rollback(LastKnownGood
);
5770 ProfitablyMovedExts
.push_back(I
);
5773 // Continue promoting NewExts as far as doing so is profitable.
5774 SmallVector
<Instruction
*, 2> NewlyMovedExts
;
5775 (void)tryToPromoteExts(TPT
, NewExts
, NewlyMovedExts
, TotalCreatedInstsCost
);
5776 bool NewPromoted
= false;
5777 for (auto *ExtInst
: NewlyMovedExts
) {
5778 Instruction
*MovedExt
= cast
<Instruction
>(ExtInst
);
5779 Value
*ExtOperand
= MovedExt
->getOperand(0);
5780 // If we have reached to a load, we need this extra profitability check
5781 // as it could potentially be merged into an ext(load).
5782 if (isa
<LoadInst
>(ExtOperand
) &&
5783 !(StressExtLdPromotion
|| NewCreatedInstsCost
<= ExtCost
||
5784 (ExtOperand
->hasOneUse() || hasSameExtUse(ExtOperand
, *TLI
))))
5787 ProfitablyMovedExts
.push_back(MovedExt
);
5791 // If none of speculative promotions for NewExts is profitable, rollback
5792 // and save the current extension (I) as the last profitable extension.
5794 TPT
.rollback(LastKnownGood
);
5795 ProfitablyMovedExts
.push_back(I
);
5798 // The promotion is profitable.
5804 /// Merging redundant sexts when one is dominating the other.
5805 bool CodeGenPrepare::mergeSExts(Function
&F
) {
5806 bool Changed
= false;
5807 for (auto &Entry
: ValToSExtendedUses
) {
5808 SExts
&Insts
= Entry
.second
;
5810 for (Instruction
*Inst
: Insts
) {
5811 if (RemovedInsts
.count(Inst
) || !isa
<SExtInst
>(Inst
) ||
5812 Inst
->getOperand(0) != Entry
.first
)
5814 bool inserted
= false;
5815 for (auto &Pt
: CurPts
) {
5816 if (getDT(F
).dominates(Inst
, Pt
)) {
5817 Pt
->replaceAllUsesWith(Inst
);
5818 RemovedInsts
.insert(Pt
);
5819 Pt
->removeFromParent();
5825 if (!getDT(F
).dominates(Pt
, Inst
))
5826 // Give up if we need to merge in a common dominator as the
5827 // experiments show it is not profitable.
5829 Inst
->replaceAllUsesWith(Pt
);
5830 RemovedInsts
.insert(Inst
);
5831 Inst
->removeFromParent();
5837 CurPts
.push_back(Inst
);
5843 // Splitting large data structures so that the GEPs accessing them can have
5844 // smaller offsets so that they can be sunk to the same blocks as their users.
5845 // For example, a large struct starting from %base is split into two parts
5846 // where the second part starts from %new_base.
5853 // %gep0 = gep %base, off0
5854 // %gep1 = gep %base, off1
5855 // %gep2 = gep %base, off2
5858 // %load1 = load %gep0
5859 // %load2 = load %gep1
5860 // %load3 = load %gep2
5865 // %new_base = gep %base, off0
5868 // %new_gep0 = %new_base
5869 // %new_gep1 = gep %new_base, off1 - off0
5870 // %new_gep2 = gep %new_base, off2 - off0
5873 // %load1 = load i32, i32* %new_gep0
5874 // %load2 = load i32, i32* %new_gep1
5875 // %load3 = load i32, i32* %new_gep2
5877 // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because
5878 // their offsets are smaller enough to fit into the addressing mode.
5879 bool CodeGenPrepare::splitLargeGEPOffsets() {
5880 bool Changed
= false;
5881 for (auto &Entry
: LargeOffsetGEPMap
) {
5882 Value
*OldBase
= Entry
.first
;
5883 SmallVectorImpl
<std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t>>
5884 &LargeOffsetGEPs
= Entry
.second
;
5885 auto compareGEPOffset
=
5886 [&](const std::pair
<GetElementPtrInst
*, int64_t> &LHS
,
5887 const std::pair
<GetElementPtrInst
*, int64_t> &RHS
) {
5888 if (LHS
.first
== RHS
.first
)
5890 if (LHS
.second
!= RHS
.second
)
5891 return LHS
.second
< RHS
.second
;
5892 return LargeOffsetGEPID
[LHS
.first
] < LargeOffsetGEPID
[RHS
.first
];
5894 // Sorting all the GEPs of the same data structures based on the offsets.
5895 llvm::sort(LargeOffsetGEPs
, compareGEPOffset
);
5896 LargeOffsetGEPs
.erase(
5897 std::unique(LargeOffsetGEPs
.begin(), LargeOffsetGEPs
.end()),
5898 LargeOffsetGEPs
.end());
5899 // Skip if all the GEPs have the same offsets.
5900 if (LargeOffsetGEPs
.front().second
== LargeOffsetGEPs
.back().second
)
5902 GetElementPtrInst
*BaseGEP
= LargeOffsetGEPs
.begin()->first
;
5903 int64_t BaseOffset
= LargeOffsetGEPs
.begin()->second
;
5904 Value
*NewBaseGEP
= nullptr;
5906 auto *LargeOffsetGEP
= LargeOffsetGEPs
.begin();
5907 while (LargeOffsetGEP
!= LargeOffsetGEPs
.end()) {
5908 GetElementPtrInst
*GEP
= LargeOffsetGEP
->first
;
5909 int64_t Offset
= LargeOffsetGEP
->second
;
5910 if (Offset
!= BaseOffset
) {
5911 TargetLowering::AddrMode AddrMode
;
5912 AddrMode
.BaseOffs
= Offset
- BaseOffset
;
5913 // The result type of the GEP might not be the type of the memory
5915 if (!TLI
->isLegalAddressingMode(*DL
, AddrMode
,
5916 GEP
->getResultElementType(),
5917 GEP
->getAddressSpace())) {
5918 // We need to create a new base if the offset to the current base is
5919 // too large to fit into the addressing mode. So, a very large struct
5920 // may be split into several parts.
5922 BaseOffset
= Offset
;
5923 NewBaseGEP
= nullptr;
5927 // Generate a new GEP to replace the current one.
5928 LLVMContext
&Ctx
= GEP
->getContext();
5929 Type
*IntPtrTy
= DL
->getIntPtrType(GEP
->getType());
5931 Type::getInt8PtrTy(Ctx
, GEP
->getType()->getPointerAddressSpace());
5932 Type
*I8Ty
= Type::getInt8Ty(Ctx
);
5935 // Create a new base if we don't have one yet. Find the insertion
5936 // pointer for the new base first.
5937 BasicBlock::iterator NewBaseInsertPt
;
5938 BasicBlock
*NewBaseInsertBB
;
5939 if (auto *BaseI
= dyn_cast
<Instruction
>(OldBase
)) {
5940 // If the base of the struct is an instruction, the new base will be
5941 // inserted close to it.
5942 NewBaseInsertBB
= BaseI
->getParent();
5943 if (isa
<PHINode
>(BaseI
))
5944 NewBaseInsertPt
= NewBaseInsertBB
->getFirstInsertionPt();
5945 else if (InvokeInst
*Invoke
= dyn_cast
<InvokeInst
>(BaseI
)) {
5947 SplitEdge(NewBaseInsertBB
, Invoke
->getNormalDest());
5948 NewBaseInsertPt
= NewBaseInsertBB
->getFirstInsertionPt();
5950 NewBaseInsertPt
= std::next(BaseI
->getIterator());
5952 // If the current base is an argument or global value, the new base
5953 // will be inserted to the entry block.
5954 NewBaseInsertBB
= &BaseGEP
->getFunction()->getEntryBlock();
5955 NewBaseInsertPt
= NewBaseInsertBB
->getFirstInsertionPt();
5957 IRBuilder
<> NewBaseBuilder(NewBaseInsertBB
, NewBaseInsertPt
);
5958 // Create a new base.
5959 Value
*BaseIndex
= ConstantInt::get(IntPtrTy
, BaseOffset
);
5960 NewBaseGEP
= OldBase
;
5961 if (NewBaseGEP
->getType() != I8PtrTy
)
5962 NewBaseGEP
= NewBaseBuilder
.CreatePointerCast(NewBaseGEP
, I8PtrTy
);
5964 NewBaseBuilder
.CreateGEP(I8Ty
, NewBaseGEP
, BaseIndex
, "splitgep");
5965 NewGEPBases
.insert(NewBaseGEP
);
5968 IRBuilder
<> Builder(GEP
);
5969 Value
*NewGEP
= NewBaseGEP
;
5970 if (Offset
== BaseOffset
) {
5971 if (GEP
->getType() != I8PtrTy
)
5972 NewGEP
= Builder
.CreatePointerCast(NewGEP
, GEP
->getType());
5974 // Calculate the new offset for the new GEP.
5975 Value
*Index
= ConstantInt::get(IntPtrTy
, Offset
- BaseOffset
);
5976 NewGEP
= Builder
.CreateGEP(I8Ty
, NewBaseGEP
, Index
);
5978 if (GEP
->getType() != I8PtrTy
)
5979 NewGEP
= Builder
.CreatePointerCast(NewGEP
, GEP
->getType());
5981 GEP
->replaceAllUsesWith(NewGEP
);
5982 LargeOffsetGEPID
.erase(GEP
);
5983 LargeOffsetGEP
= LargeOffsetGEPs
.erase(LargeOffsetGEP
);
5984 GEP
->eraseFromParent();
5991 bool CodeGenPrepare::optimizePhiType(
5992 PHINode
*I
, SmallPtrSetImpl
<PHINode
*> &Visited
,
5993 SmallPtrSetImpl
<Instruction
*> &DeletedInstrs
) {
5994 // We are looking for a collection on interconnected phi nodes that together
5995 // only use loads/bitcasts and are used by stores/bitcasts, and the bitcasts
5996 // are of the same type. Convert the whole set of nodes to the type of the
5998 Type
*PhiTy
= I
->getType();
5999 Type
*ConvertTy
= nullptr;
6000 if (Visited
.count(I
) ||
6001 (!I
->getType()->isIntegerTy() && !I
->getType()->isFloatingPointTy()))
6004 SmallVector
<Instruction
*, 4> Worklist
;
6005 Worklist
.push_back(cast
<Instruction
>(I
));
6006 SmallPtrSet
<PHINode
*, 4> PhiNodes
;
6009 SmallPtrSet
<Instruction
*, 4> Defs
;
6010 SmallPtrSet
<Instruction
*, 4> Uses
;
6011 // This works by adding extra bitcasts between load/stores and removing
6012 // existing bicasts. If we have a phi(bitcast(load)) or a store(bitcast(phi))
6013 // we can get in the situation where we remove a bitcast in one iteration
6014 // just to add it again in the next. We need to ensure that at least one
6015 // bitcast we remove are anchored to something that will not change back.
6016 bool AnyAnchored
= false;
6018 while (!Worklist
.empty()) {
6019 Instruction
*II
= Worklist
.pop_back_val();
6021 if (auto *Phi
= dyn_cast
<PHINode
>(II
)) {
6022 // Handle Defs, which might also be PHI's
6023 for (Value
*V
: Phi
->incoming_values()) {
6024 if (auto *OpPhi
= dyn_cast
<PHINode
>(V
)) {
6025 if (!PhiNodes
.count(OpPhi
)) {
6026 if (Visited
.count(OpPhi
))
6028 PhiNodes
.insert(OpPhi
);
6029 Visited
.insert(OpPhi
);
6030 Worklist
.push_back(OpPhi
);
6032 } else if (auto *OpLoad
= dyn_cast
<LoadInst
>(V
)) {
6033 if (!OpLoad
->isSimple())
6035 if (!Defs
.count(OpLoad
)) {
6036 Defs
.insert(OpLoad
);
6037 Worklist
.push_back(OpLoad
);
6039 } else if (auto *OpEx
= dyn_cast
<ExtractElementInst
>(V
)) {
6040 if (!Defs
.count(OpEx
)) {
6042 Worklist
.push_back(OpEx
);
6044 } else if (auto *OpBC
= dyn_cast
<BitCastInst
>(V
)) {
6046 ConvertTy
= OpBC
->getOperand(0)->getType();
6047 if (OpBC
->getOperand(0)->getType() != ConvertTy
)
6049 if (!Defs
.count(OpBC
)) {
6051 Worklist
.push_back(OpBC
);
6052 AnyAnchored
|= !isa
<LoadInst
>(OpBC
->getOperand(0)) &&
6053 !isa
<ExtractElementInst
>(OpBC
->getOperand(0));
6055 } else if (!isa
<UndefValue
>(V
)) {
6061 // Handle uses which might also be phi's
6062 for (User
*V
: II
->users()) {
6063 if (auto *OpPhi
= dyn_cast
<PHINode
>(V
)) {
6064 if (!PhiNodes
.count(OpPhi
)) {
6065 if (Visited
.count(OpPhi
))
6067 PhiNodes
.insert(OpPhi
);
6068 Visited
.insert(OpPhi
);
6069 Worklist
.push_back(OpPhi
);
6071 } else if (auto *OpStore
= dyn_cast
<StoreInst
>(V
)) {
6072 if (!OpStore
->isSimple() || OpStore
->getOperand(0) != II
)
6074 Uses
.insert(OpStore
);
6075 } else if (auto *OpBC
= dyn_cast
<BitCastInst
>(V
)) {
6077 ConvertTy
= OpBC
->getType();
6078 if (OpBC
->getType() != ConvertTy
)
6082 any_of(OpBC
->users(), [](User
*U
) { return !isa
<StoreInst
>(U
); });
6089 if (!ConvertTy
|| !AnyAnchored
|| !TLI
->shouldConvertPhiType(PhiTy
, ConvertTy
))
6092 LLVM_DEBUG(dbgs() << "Converting " << *I
<< "\n and connected nodes to "
6093 << *ConvertTy
<< "\n");
6095 // Create all the new phi nodes of the new type, and bitcast any loads to the
6097 ValueToValueMap ValMap
;
6098 ValMap
[UndefValue::get(PhiTy
)] = UndefValue::get(ConvertTy
);
6099 for (Instruction
*D
: Defs
) {
6100 if (isa
<BitCastInst
>(D
)) {
6101 ValMap
[D
] = D
->getOperand(0);
6102 DeletedInstrs
.insert(D
);
6105 new BitCastInst(D
, ConvertTy
, D
->getName() + ".bc", D
->getNextNode());
6108 for (PHINode
*Phi
: PhiNodes
)
6109 ValMap
[Phi
] = PHINode::Create(ConvertTy
, Phi
->getNumIncomingValues(),
6110 Phi
->getName() + ".tc", Phi
);
6111 // Pipe together all the PhiNodes.
6112 for (PHINode
*Phi
: PhiNodes
) {
6113 PHINode
*NewPhi
= cast
<PHINode
>(ValMap
[Phi
]);
6114 for (int i
= 0, e
= Phi
->getNumIncomingValues(); i
< e
; i
++)
6115 NewPhi
->addIncoming(ValMap
[Phi
->getIncomingValue(i
)],
6116 Phi
->getIncomingBlock(i
));
6117 Visited
.insert(NewPhi
);
6119 // And finally pipe up the stores and bitcasts
6120 for (Instruction
*U
: Uses
) {
6121 if (isa
<BitCastInst
>(U
)) {
6122 DeletedInstrs
.insert(U
);
6123 U
->replaceAllUsesWith(ValMap
[U
->getOperand(0)]);
6126 new BitCastInst(ValMap
[U
->getOperand(0)], PhiTy
, "bc", U
));
6130 // Save the removed phis to be deleted later.
6131 for (PHINode
*Phi
: PhiNodes
)
6132 DeletedInstrs
.insert(Phi
);
6136 bool CodeGenPrepare::optimizePhiTypes(Function
&F
) {
6137 if (!OptimizePhiTypes
)
6140 bool Changed
= false;
6141 SmallPtrSet
<PHINode
*, 4> Visited
;
6142 SmallPtrSet
<Instruction
*, 4> DeletedInstrs
;
6144 // Attempt to optimize all the phis in the functions to the correct type.
6146 for (auto &Phi
: BB
.phis())
6147 Changed
|= optimizePhiType(&Phi
, Visited
, DeletedInstrs
);
6149 // Remove any old phi's that have been converted.
6150 for (auto *I
: DeletedInstrs
) {
6151 I
->replaceAllUsesWith(UndefValue::get(I
->getType()));
6152 I
->eraseFromParent();
6158 /// Return true, if an ext(load) can be formed from an extension in
6160 bool CodeGenPrepare::canFormExtLd(
6161 const SmallVectorImpl
<Instruction
*> &MovedExts
, LoadInst
*&LI
,
6162 Instruction
*&Inst
, bool HasPromoted
) {
6163 for (auto *MovedExtInst
: MovedExts
) {
6164 if (isa
<LoadInst
>(MovedExtInst
->getOperand(0))) {
6165 LI
= cast
<LoadInst
>(MovedExtInst
->getOperand(0));
6166 Inst
= MovedExtInst
;
6173 // If they're already in the same block, there's nothing to do.
6174 // Make the cheap checks first if we did not promote.
6175 // If we promoted, we need to check if it is indeed profitable.
6176 if (!HasPromoted
&& LI
->getParent() == Inst
->getParent())
6179 return TLI
->isExtLoad(LI
, Inst
, *DL
);
6182 /// Move a zext or sext fed by a load into the same basic block as the load,
6183 /// unless conditions are unfavorable. This allows SelectionDAG to fold the
6184 /// extend into the load.
6188 /// %ld = load i32* %addr
6189 /// %add = add nuw i32 %ld, 4
6190 /// %zext = zext i32 %add to i64
6194 /// %ld = load i32* %addr
6195 /// %zext = zext i32 %ld to i64
6196 /// %add = add nuw i64 %zext, 4
6198 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which
6199 /// allow us to match zext(load i32*) to i64.
6201 /// Also, try to promote the computations used to obtain a sign extended
6202 /// value used into memory accesses.
6205 /// a = add nsw i32 b, 3
6206 /// d = sext i32 a to i64
6207 /// e = getelementptr ..., i64 d
6211 /// f = sext i32 b to i64
6212 /// a = add nsw i64 f, 3
6213 /// e = getelementptr ..., i64 a
6216 /// \p Inst[in/out] the extension may be modified during the process if some
6217 /// promotions apply.
6218 bool CodeGenPrepare::optimizeExt(Instruction
*&Inst
) {
6219 bool AllowPromotionWithoutCommonHeader
= false;
6220 /// See if it is an interesting sext operations for the address type
6221 /// promotion before trying to promote it, e.g., the ones with the right
6222 /// type and used in memory accesses.
6223 bool ATPConsiderable
= TTI
->shouldConsiderAddressTypePromotion(
6224 *Inst
, AllowPromotionWithoutCommonHeader
);
6225 TypePromotionTransaction
TPT(RemovedInsts
);
6226 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
6227 TPT
.getRestorationPoint();
6228 SmallVector
<Instruction
*, 1> Exts
;
6229 SmallVector
<Instruction
*, 2> SpeculativelyMovedExts
;
6230 Exts
.push_back(Inst
);
6232 bool HasPromoted
= tryToPromoteExts(TPT
, Exts
, SpeculativelyMovedExts
);
6234 // Look for a load being extended.
6235 LoadInst
*LI
= nullptr;
6236 Instruction
*ExtFedByLoad
;
6238 // Try to promote a chain of computation if it allows to form an extended
6240 if (canFormExtLd(SpeculativelyMovedExts
, LI
, ExtFedByLoad
, HasPromoted
)) {
6241 assert(LI
&& ExtFedByLoad
&& "Expect a valid load and extension");
6243 // Move the extend into the same block as the load.
6244 ExtFedByLoad
->moveAfter(LI
);
6246 Inst
= ExtFedByLoad
;
6250 // Continue promoting SExts if known as considerable depending on targets.
6251 if (ATPConsiderable
&&
6252 performAddressTypePromotion(Inst
, AllowPromotionWithoutCommonHeader
,
6253 HasPromoted
, TPT
, SpeculativelyMovedExts
))
6256 TPT
.rollback(LastKnownGood
);
6260 // Perform address type promotion if doing so is profitable.
6261 // If AllowPromotionWithoutCommonHeader == false, we should find other sext
6262 // instructions that sign extended the same initial value. However, if
6263 // AllowPromotionWithoutCommonHeader == true, we expect promoting the
6264 // extension is just profitable.
6265 bool CodeGenPrepare::performAddressTypePromotion(
6266 Instruction
*&Inst
, bool AllowPromotionWithoutCommonHeader
,
6267 bool HasPromoted
, TypePromotionTransaction
&TPT
,
6268 SmallVectorImpl
<Instruction
*> &SpeculativelyMovedExts
) {
6269 bool Promoted
= false;
6270 SmallPtrSet
<Instruction
*, 1> UnhandledExts
;
6271 bool AllSeenFirst
= true;
6272 for (auto *I
: SpeculativelyMovedExts
) {
6273 Value
*HeadOfChain
= I
->getOperand(0);
6274 DenseMap
<Value
*, Instruction
*>::iterator AlreadySeen
=
6275 SeenChainsForSExt
.find(HeadOfChain
);
6276 // If there is an unhandled SExt which has the same header, try to promote
6278 if (AlreadySeen
!= SeenChainsForSExt
.end()) {
6279 if (AlreadySeen
->second
!= nullptr)
6280 UnhandledExts
.insert(AlreadySeen
->second
);
6281 AllSeenFirst
= false;
6285 if (!AllSeenFirst
|| (AllowPromotionWithoutCommonHeader
&&
6286 SpeculativelyMovedExts
.size() == 1)) {
6290 for (auto *I
: SpeculativelyMovedExts
) {
6291 Value
*HeadOfChain
= I
->getOperand(0);
6292 SeenChainsForSExt
[HeadOfChain
] = nullptr;
6293 ValToSExtendedUses
[HeadOfChain
].push_back(I
);
6295 // Update Inst as promotion happen.
6296 Inst
= SpeculativelyMovedExts
.pop_back_val();
6298 // This is the first chain visited from the header, keep the current chain
6299 // as unhandled. Defer to promote this until we encounter another SExt
6300 // chain derived from the same header.
6301 for (auto *I
: SpeculativelyMovedExts
) {
6302 Value
*HeadOfChain
= I
->getOperand(0);
6303 SeenChainsForSExt
[HeadOfChain
] = Inst
;
6308 if (!AllSeenFirst
&& !UnhandledExts
.empty())
6309 for (auto *VisitedSExt
: UnhandledExts
) {
6310 if (RemovedInsts
.count(VisitedSExt
))
6312 TypePromotionTransaction
TPT(RemovedInsts
);
6313 SmallVector
<Instruction
*, 1> Exts
;
6314 SmallVector
<Instruction
*, 2> Chains
;
6315 Exts
.push_back(VisitedSExt
);
6316 bool HasPromoted
= tryToPromoteExts(TPT
, Exts
, Chains
);
6320 for (auto *I
: Chains
) {
6321 Value
*HeadOfChain
= I
->getOperand(0);
6322 // Mark this as handled.
6323 SeenChainsForSExt
[HeadOfChain
] = nullptr;
6324 ValToSExtendedUses
[HeadOfChain
].push_back(I
);
6330 bool CodeGenPrepare::optimizeExtUses(Instruction
*I
) {
6331 BasicBlock
*DefBB
= I
->getParent();
6333 // If the result of a {s|z}ext and its source are both live out, rewrite all
6334 // other uses of the source with result of extension.
6335 Value
*Src
= I
->getOperand(0);
6336 if (Src
->hasOneUse())
6339 // Only do this xform if truncating is free.
6340 if (!TLI
->isTruncateFree(I
->getType(), Src
->getType()))
6343 // Only safe to perform the optimization if the source is also defined in
6345 if (!isa
<Instruction
>(Src
) || DefBB
!= cast
<Instruction
>(Src
)->getParent())
6348 bool DefIsLiveOut
= false;
6349 for (User
*U
: I
->users()) {
6350 Instruction
*UI
= cast
<Instruction
>(U
);
6352 // Figure out which BB this ext is used in.
6353 BasicBlock
*UserBB
= UI
->getParent();
6354 if (UserBB
== DefBB
) continue;
6355 DefIsLiveOut
= true;
6361 // Make sure none of the uses are PHI nodes.
6362 for (User
*U
: Src
->users()) {
6363 Instruction
*UI
= cast
<Instruction
>(U
);
6364 BasicBlock
*UserBB
= UI
->getParent();
6365 if (UserBB
== DefBB
) continue;
6366 // Be conservative. We don't want this xform to end up introducing
6367 // reloads just before load / store instructions.
6368 if (isa
<PHINode
>(UI
) || isa
<LoadInst
>(UI
) || isa
<StoreInst
>(UI
))
6372 // InsertedTruncs - Only insert one trunc in each block once.
6373 DenseMap
<BasicBlock
*, Instruction
*> InsertedTruncs
;
6375 bool MadeChange
= false;
6376 for (Use
&U
: Src
->uses()) {
6377 Instruction
*User
= cast
<Instruction
>(U
.getUser());
6379 // Figure out which BB this ext is used in.
6380 BasicBlock
*UserBB
= User
->getParent();
6381 if (UserBB
== DefBB
) continue;
6383 // Both src and def are live in this block. Rewrite the use.
6384 Instruction
*&InsertedTrunc
= InsertedTruncs
[UserBB
];
6386 if (!InsertedTrunc
) {
6387 BasicBlock::iterator InsertPt
= UserBB
->getFirstInsertionPt();
6388 assert(InsertPt
!= UserBB
->end());
6389 InsertedTrunc
= new TruncInst(I
, Src
->getType(), "", &*InsertPt
);
6390 InsertedInsts
.insert(InsertedTrunc
);
6393 // Replace a use of the {s|z}ext source with a use of the result.
6402 // Find loads whose uses only use some of the loaded value's bits. Add an "and"
6403 // just after the load if the target can fold this into one extload instruction,
6404 // with the hope of eliminating some of the other later "and" instructions using
6405 // the loaded value. "and"s that are made trivially redundant by the insertion
6406 // of the new "and" are removed by this function, while others (e.g. those whose
6407 // path from the load goes through a phi) are left for isel to potentially
6440 // becomes (after a call to optimizeLoadExt for each load):
6444 // x1' = and x1, 0xff
6448 // x2' = and x2, 0xff
6453 bool CodeGenPrepare::optimizeLoadExt(LoadInst
*Load
) {
6454 if (!Load
->isSimple() || !Load
->getType()->isIntOrPtrTy())
6457 // Skip loads we've already transformed.
6458 if (Load
->hasOneUse() &&
6459 InsertedInsts
.count(cast
<Instruction
>(*Load
->user_begin())))
6462 // Look at all uses of Load, looking through phis, to determine how many bits
6463 // of the loaded value are needed.
6464 SmallVector
<Instruction
*, 8> WorkList
;
6465 SmallPtrSet
<Instruction
*, 16> Visited
;
6466 SmallVector
<Instruction
*, 8> AndsToMaybeRemove
;
6467 for (auto *U
: Load
->users())
6468 WorkList
.push_back(cast
<Instruction
>(U
));
6470 EVT LoadResultVT
= TLI
->getValueType(*DL
, Load
->getType());
6471 unsigned BitWidth
= LoadResultVT
.getSizeInBits();
6472 // If the BitWidth is 0, do not try to optimize the type
6476 APInt
DemandBits(BitWidth
, 0);
6477 APInt
WidestAndBits(BitWidth
, 0);
6479 while (!WorkList
.empty()) {
6480 Instruction
*I
= WorkList
.back();
6481 WorkList
.pop_back();
6483 // Break use-def graph loops.
6484 if (!Visited
.insert(I
).second
)
6487 // For a PHI node, push all of its users.
6488 if (auto *Phi
= dyn_cast
<PHINode
>(I
)) {
6489 for (auto *U
: Phi
->users())
6490 WorkList
.push_back(cast
<Instruction
>(U
));
6494 switch (I
->getOpcode()) {
6495 case Instruction::And
: {
6496 auto *AndC
= dyn_cast
<ConstantInt
>(I
->getOperand(1));
6499 APInt AndBits
= AndC
->getValue();
6500 DemandBits
|= AndBits
;
6501 // Keep track of the widest and mask we see.
6502 if (AndBits
.ugt(WidestAndBits
))
6503 WidestAndBits
= AndBits
;
6504 if (AndBits
== WidestAndBits
&& I
->getOperand(0) == Load
)
6505 AndsToMaybeRemove
.push_back(I
);
6509 case Instruction::Shl
: {
6510 auto *ShlC
= dyn_cast
<ConstantInt
>(I
->getOperand(1));
6513 uint64_t ShiftAmt
= ShlC
->getLimitedValue(BitWidth
- 1);
6514 DemandBits
.setLowBits(BitWidth
- ShiftAmt
);
6518 case Instruction::Trunc
: {
6519 EVT TruncVT
= TLI
->getValueType(*DL
, I
->getType());
6520 unsigned TruncBitWidth
= TruncVT
.getSizeInBits();
6521 DemandBits
.setLowBits(TruncBitWidth
);
6530 uint32_t ActiveBits
= DemandBits
.getActiveBits();
6531 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the
6532 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example,
6533 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but
6534 // (and (load x) 1) is not matched as a single instruction, rather as a LDR
6535 // followed by an AND.
6536 // TODO: Look into removing this restriction by fixing backends to either
6537 // return false for isLoadExtLegal for i1 or have them select this pattern to
6538 // a single instruction.
6540 // Also avoid hoisting if we didn't see any ands with the exact DemandBits
6541 // mask, since these are the only ands that will be removed by isel.
6542 if (ActiveBits
<= 1 || !DemandBits
.isMask(ActiveBits
) ||
6543 WidestAndBits
!= DemandBits
)
6546 LLVMContext
&Ctx
= Load
->getType()->getContext();
6547 Type
*TruncTy
= Type::getIntNTy(Ctx
, ActiveBits
);
6548 EVT TruncVT
= TLI
->getValueType(*DL
, TruncTy
);
6550 // Reject cases that won't be matched as extloads.
6551 if (!LoadResultVT
.bitsGT(TruncVT
) || !TruncVT
.isRound() ||
6552 !TLI
->isLoadExtLegal(ISD::ZEXTLOAD
, LoadResultVT
, TruncVT
))
6555 IRBuilder
<> Builder(Load
->getNextNode());
6556 auto *NewAnd
= cast
<Instruction
>(
6557 Builder
.CreateAnd(Load
, ConstantInt::get(Ctx
, DemandBits
)));
6558 // Mark this instruction as "inserted by CGP", so that other
6559 // optimizations don't touch it.
6560 InsertedInsts
.insert(NewAnd
);
6562 // Replace all uses of load with new and (except for the use of load in the
6564 Load
->replaceAllUsesWith(NewAnd
);
6565 NewAnd
->setOperand(0, Load
);
6567 // Remove any and instructions that are now redundant.
6568 for (auto *And
: AndsToMaybeRemove
)
6569 // Check that the and mask is the same as the one we decided to put on the
6571 if (cast
<ConstantInt
>(And
->getOperand(1))->getValue() == DemandBits
) {
6572 And
->replaceAllUsesWith(NewAnd
);
6573 if (&*CurInstIterator
== And
)
6574 CurInstIterator
= std::next(And
->getIterator());
6575 And
->eraseFromParent();
6583 /// Check if V (an operand of a select instruction) is an expensive instruction
6584 /// that is only used once.
6585 static bool sinkSelectOperand(const TargetTransformInfo
*TTI
, Value
*V
) {
6586 auto *I
= dyn_cast
<Instruction
>(V
);
6587 // If it's safe to speculatively execute, then it should not have side
6588 // effects; therefore, it's safe to sink and possibly *not* execute.
6589 return I
&& I
->hasOneUse() && isSafeToSpeculativelyExecute(I
) &&
6590 TTI
->getUserCost(I
, TargetTransformInfo::TCK_SizeAndLatency
) >=
6591 TargetTransformInfo::TCC_Expensive
;
6594 /// Returns true if a SelectInst should be turned into an explicit branch.
6595 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo
*TTI
,
6596 const TargetLowering
*TLI
,
6598 // If even a predictable select is cheap, then a branch can't be cheaper.
6599 if (!TLI
->isPredictableSelectExpensive())
6602 // FIXME: This should use the same heuristics as IfConversion to determine
6603 // whether a select is better represented as a branch.
6605 // If metadata tells us that the select condition is obviously predictable,
6606 // then we want to replace the select with a branch.
6607 uint64_t TrueWeight
, FalseWeight
;
6608 if (SI
->extractProfMetadata(TrueWeight
, FalseWeight
)) {
6609 uint64_t Max
= std::max(TrueWeight
, FalseWeight
);
6610 uint64_t Sum
= TrueWeight
+ FalseWeight
;
6612 auto Probability
= BranchProbability::getBranchProbability(Max
, Sum
);
6613 if (Probability
> TTI
->getPredictableBranchThreshold())
6618 CmpInst
*Cmp
= dyn_cast
<CmpInst
>(SI
->getCondition());
6620 // If a branch is predictable, an out-of-order CPU can avoid blocking on its
6621 // comparison condition. If the compare has more than one use, there's
6622 // probably another cmov or setcc around, so it's not worth emitting a branch.
6623 if (!Cmp
|| !Cmp
->hasOneUse())
6626 // If either operand of the select is expensive and only needed on one side
6627 // of the select, we should form a branch.
6628 if (sinkSelectOperand(TTI
, SI
->getTrueValue()) ||
6629 sinkSelectOperand(TTI
, SI
->getFalseValue()))
6635 /// If \p isTrue is true, return the true value of \p SI, otherwise return
6636 /// false value of \p SI. If the true/false value of \p SI is defined by any
6637 /// select instructions in \p Selects, look through the defining select
6638 /// instruction until the true/false value is not defined in \p Selects.
6639 static Value
*getTrueOrFalseValue(
6640 SelectInst
*SI
, bool isTrue
,
6641 const SmallPtrSet
<const Instruction
*, 2> &Selects
) {
6644 for (SelectInst
*DefSI
= SI
; DefSI
!= nullptr && Selects
.count(DefSI
);
6645 DefSI
= dyn_cast
<SelectInst
>(V
)) {
6646 assert(DefSI
->getCondition() == SI
->getCondition() &&
6647 "The condition of DefSI does not match with SI");
6648 V
= (isTrue
? DefSI
->getTrueValue() : DefSI
->getFalseValue());
6651 assert(V
&& "Failed to get select true/false value");
6655 bool CodeGenPrepare::optimizeShiftInst(BinaryOperator
*Shift
) {
6656 assert(Shift
->isShift() && "Expected a shift");
6658 // If this is (1) a vector shift, (2) shifts by scalars are cheaper than
6659 // general vector shifts, and (3) the shift amount is a select-of-splatted
6660 // values, hoist the shifts before the select:
6661 // shift Op0, (select Cond, TVal, FVal) -->
6662 // select Cond, (shift Op0, TVal), (shift Op0, FVal)
6664 // This is inverting a generic IR transform when we know that the cost of a
6665 // general vector shift is more than the cost of 2 shift-by-scalars.
6666 // We can't do this effectively in SDAG because we may not be able to
6667 // determine if the select operands are splats from within a basic block.
6668 Type
*Ty
= Shift
->getType();
6669 if (!Ty
->isVectorTy() || !TLI
->isVectorShiftByScalarCheap(Ty
))
6671 Value
*Cond
, *TVal
, *FVal
;
6672 if (!match(Shift
->getOperand(1),
6673 m_OneUse(m_Select(m_Value(Cond
), m_Value(TVal
), m_Value(FVal
)))))
6675 if (!isSplatValue(TVal
) || !isSplatValue(FVal
))
6678 IRBuilder
<> Builder(Shift
);
6679 BinaryOperator::BinaryOps Opcode
= Shift
->getOpcode();
6680 Value
*NewTVal
= Builder
.CreateBinOp(Opcode
, Shift
->getOperand(0), TVal
);
6681 Value
*NewFVal
= Builder
.CreateBinOp(Opcode
, Shift
->getOperand(0), FVal
);
6682 Value
*NewSel
= Builder
.CreateSelect(Cond
, NewTVal
, NewFVal
);
6683 Shift
->replaceAllUsesWith(NewSel
);
6684 Shift
->eraseFromParent();
6688 bool CodeGenPrepare::optimizeFunnelShift(IntrinsicInst
*Fsh
) {
6689 Intrinsic::ID Opcode
= Fsh
->getIntrinsicID();
6690 assert((Opcode
== Intrinsic::fshl
|| Opcode
== Intrinsic::fshr
) &&
6691 "Expected a funnel shift");
6693 // If this is (1) a vector funnel shift, (2) shifts by scalars are cheaper
6694 // than general vector shifts, and (3) the shift amount is select-of-splatted
6695 // values, hoist the funnel shifts before the select:
6696 // fsh Op0, Op1, (select Cond, TVal, FVal) -->
6697 // select Cond, (fsh Op0, Op1, TVal), (fsh Op0, Op1, FVal)
6699 // This is inverting a generic IR transform when we know that the cost of a
6700 // general vector shift is more than the cost of 2 shift-by-scalars.
6701 // We can't do this effectively in SDAG because we may not be able to
6702 // determine if the select operands are splats from within a basic block.
6703 Type
*Ty
= Fsh
->getType();
6704 if (!Ty
->isVectorTy() || !TLI
->isVectorShiftByScalarCheap(Ty
))
6706 Value
*Cond
, *TVal
, *FVal
;
6707 if (!match(Fsh
->getOperand(2),
6708 m_OneUse(m_Select(m_Value(Cond
), m_Value(TVal
), m_Value(FVal
)))))
6710 if (!isSplatValue(TVal
) || !isSplatValue(FVal
))
6713 IRBuilder
<> Builder(Fsh
);
6714 Value
*X
= Fsh
->getOperand(0), *Y
= Fsh
->getOperand(1);
6715 Value
*NewTVal
= Builder
.CreateIntrinsic(Opcode
, Ty
, { X
, Y
, TVal
});
6716 Value
*NewFVal
= Builder
.CreateIntrinsic(Opcode
, Ty
, { X
, Y
, FVal
});
6717 Value
*NewSel
= Builder
.CreateSelect(Cond
, NewTVal
, NewFVal
);
6718 Fsh
->replaceAllUsesWith(NewSel
);
6719 Fsh
->eraseFromParent();
6723 /// If we have a SelectInst that will likely profit from branch prediction,
6724 /// turn it into a branch.
6725 bool CodeGenPrepare::optimizeSelectInst(SelectInst
*SI
) {
6726 if (DisableSelectToBranch
)
6729 // Find all consecutive select instructions that share the same condition.
6730 SmallVector
<SelectInst
*, 2> ASI
;
6732 for (BasicBlock::iterator It
= ++BasicBlock::iterator(SI
);
6733 It
!= SI
->getParent()->end(); ++It
) {
6734 SelectInst
*I
= dyn_cast
<SelectInst
>(&*It
);
6735 if (I
&& SI
->getCondition() == I
->getCondition()) {
6742 SelectInst
*LastSI
= ASI
.back();
6743 // Increment the current iterator to skip all the rest of select instructions
6744 // because they will be either "not lowered" or "all lowered" to branch.
6745 CurInstIterator
= std::next(LastSI
->getIterator());
6747 bool VectorCond
= !SI
->getCondition()->getType()->isIntegerTy(1);
6749 // Can we convert the 'select' to CF ?
6750 if (VectorCond
|| SI
->getMetadata(LLVMContext::MD_unpredictable
))
6753 TargetLowering::SelectSupportKind SelectKind
;
6755 SelectKind
= TargetLowering::VectorMaskSelect
;
6756 else if (SI
->getType()->isVectorTy())
6757 SelectKind
= TargetLowering::ScalarCondVectorVal
;
6759 SelectKind
= TargetLowering::ScalarValSelect
;
6761 if (TLI
->isSelectSupported(SelectKind
) &&
6762 (!isFormingBranchFromSelectProfitable(TTI
, TLI
, SI
) || OptSize
||
6763 llvm::shouldOptimizeForSize(SI
->getParent(), PSI
, BFI
.get())))
6766 // The DominatorTree needs to be rebuilt by any consumers after this
6767 // transformation. We simply reset here rather than setting the ModifiedDT
6768 // flag to avoid restarting the function walk in runOnFunction for each
6769 // select optimized.
6772 // Transform a sequence like this:
6774 // %cmp = cmp uge i32 %a, %b
6775 // %sel = select i1 %cmp, i32 %c, i32 %d
6779 // %cmp = cmp uge i32 %a, %b
6780 // %cmp.frozen = freeze %cmp
6781 // br i1 %cmp.frozen, label %select.true, label %select.false
6783 // br label %select.end
6785 // br label %select.end
6787 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ]
6789 // %cmp should be frozen, otherwise it may introduce undefined behavior.
6790 // In addition, we may sink instructions that produce %c or %d from
6791 // the entry block into the destination(s) of the new branch.
6792 // If the true or false blocks do not contain a sunken instruction, that
6793 // block and its branch may be optimized away. In that case, one side of the
6794 // first branch will point directly to select.end, and the corresponding PHI
6795 // predecessor block will be the start block.
6797 // First, we split the block containing the select into 2 blocks.
6798 BasicBlock
*StartBlock
= SI
->getParent();
6799 BasicBlock::iterator SplitPt
= ++(BasicBlock::iterator(LastSI
));
6800 BasicBlock
*EndBlock
= StartBlock
->splitBasicBlock(SplitPt
, "select.end");
6801 BFI
->setBlockFreq(EndBlock
, BFI
->getBlockFreq(StartBlock
).getFrequency());
6803 // Delete the unconditional branch that was just created by the split.
6804 StartBlock
->getTerminator()->eraseFromParent();
6806 // These are the new basic blocks for the conditional branch.
6807 // At least one will become an actual new basic block.
6808 BasicBlock
*TrueBlock
= nullptr;
6809 BasicBlock
*FalseBlock
= nullptr;
6810 BranchInst
*TrueBranch
= nullptr;
6811 BranchInst
*FalseBranch
= nullptr;
6813 // Sink expensive instructions into the conditional blocks to avoid executing
6814 // them speculatively.
6815 for (SelectInst
*SI
: ASI
) {
6816 if (sinkSelectOperand(TTI
, SI
->getTrueValue())) {
6817 if (TrueBlock
== nullptr) {
6818 TrueBlock
= BasicBlock::Create(SI
->getContext(), "select.true.sink",
6819 EndBlock
->getParent(), EndBlock
);
6820 TrueBranch
= BranchInst::Create(EndBlock
, TrueBlock
);
6821 TrueBranch
->setDebugLoc(SI
->getDebugLoc());
6823 auto *TrueInst
= cast
<Instruction
>(SI
->getTrueValue());
6824 TrueInst
->moveBefore(TrueBranch
);
6826 if (sinkSelectOperand(TTI
, SI
->getFalseValue())) {
6827 if (FalseBlock
== nullptr) {
6828 FalseBlock
= BasicBlock::Create(SI
->getContext(), "select.false.sink",
6829 EndBlock
->getParent(), EndBlock
);
6830 FalseBranch
= BranchInst::Create(EndBlock
, FalseBlock
);
6831 FalseBranch
->setDebugLoc(SI
->getDebugLoc());
6833 auto *FalseInst
= cast
<Instruction
>(SI
->getFalseValue());
6834 FalseInst
->moveBefore(FalseBranch
);
6838 // If there was nothing to sink, then arbitrarily choose the 'false' side
6839 // for a new input value to the PHI.
6840 if (TrueBlock
== FalseBlock
) {
6841 assert(TrueBlock
== nullptr &&
6842 "Unexpected basic block transform while optimizing select");
6844 FalseBlock
= BasicBlock::Create(SI
->getContext(), "select.false",
6845 EndBlock
->getParent(), EndBlock
);
6846 auto *FalseBranch
= BranchInst::Create(EndBlock
, FalseBlock
);
6847 FalseBranch
->setDebugLoc(SI
->getDebugLoc());
6850 // Insert the real conditional branch based on the original condition.
6851 // If we did not create a new block for one of the 'true' or 'false' paths
6852 // of the condition, it means that side of the branch goes to the end block
6853 // directly and the path originates from the start block from the point of
6854 // view of the new PHI.
6855 BasicBlock
*TT
, *FT
;
6856 if (TrueBlock
== nullptr) {
6859 TrueBlock
= StartBlock
;
6860 } else if (FalseBlock
== nullptr) {
6863 FalseBlock
= StartBlock
;
6869 auto *CondFr
= IB
.CreateFreeze(SI
->getCondition(), SI
->getName() + ".frozen");
6870 IB
.CreateCondBr(CondFr
, TT
, FT
, SI
);
6872 SmallPtrSet
<const Instruction
*, 2> INS
;
6873 INS
.insert(ASI
.begin(), ASI
.end());
6874 // Use reverse iterator because later select may use the value of the
6875 // earlier select, and we need to propagate value through earlier select
6876 // to get the PHI operand.
6877 for (auto It
= ASI
.rbegin(); It
!= ASI
.rend(); ++It
) {
6878 SelectInst
*SI
= *It
;
6879 // The select itself is replaced with a PHI Node.
6880 PHINode
*PN
= PHINode::Create(SI
->getType(), 2, "", &EndBlock
->front());
6882 PN
->addIncoming(getTrueOrFalseValue(SI
, true, INS
), TrueBlock
);
6883 PN
->addIncoming(getTrueOrFalseValue(SI
, false, INS
), FalseBlock
);
6884 PN
->setDebugLoc(SI
->getDebugLoc());
6886 SI
->replaceAllUsesWith(PN
);
6887 SI
->eraseFromParent();
6889 ++NumSelectsExpanded
;
6892 // Instruct OptimizeBlock to skip to the next block.
6893 CurInstIterator
= StartBlock
->end();
6897 /// Some targets only accept certain types for splat inputs. For example a VDUP
6898 /// in MVE takes a GPR (integer) register, and the instruction that incorporate
6899 /// a VDUP (such as a VADD qd, qm, rm) also require a gpr register.
6900 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst
*SVI
) {
6901 // Accept shuf(insertelem(undef/poison, val, 0), undef/poison, <0,0,..>) only
6902 if (!match(SVI
, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
6903 m_Undef(), m_ZeroMask())))
6905 Type
*NewType
= TLI
->shouldConvertSplatType(SVI
);
6909 auto *SVIVecType
= cast
<FixedVectorType
>(SVI
->getType());
6910 assert(!NewType
->isVectorTy() && "Expected a scalar type!");
6911 assert(NewType
->getScalarSizeInBits() == SVIVecType
->getScalarSizeInBits() &&
6912 "Expected a type of the same size!");
6914 FixedVectorType::get(NewType
, SVIVecType
->getNumElements());
6916 // Create a bitcast (shuffle (insert (bitcast(..))))
6917 IRBuilder
<> Builder(SVI
->getContext());
6918 Builder
.SetInsertPoint(SVI
);
6919 Value
*BC1
= Builder
.CreateBitCast(
6920 cast
<Instruction
>(SVI
->getOperand(0))->getOperand(1), NewType
);
6921 Value
*Shuffle
= Builder
.CreateVectorSplat(NewVecType
->getNumElements(), BC1
);
6922 Value
*BC2
= Builder
.CreateBitCast(Shuffle
, SVIVecType
);
6924 SVI
->replaceAllUsesWith(BC2
);
6925 RecursivelyDeleteTriviallyDeadInstructions(
6926 SVI
, TLInfo
, nullptr, [&](Value
*V
) { removeAllAssertingVHReferences(V
); });
6928 // Also hoist the bitcast up to its operand if it they are not in the same
6930 if (auto *BCI
= dyn_cast
<Instruction
>(BC1
))
6931 if (auto *Op
= dyn_cast
<Instruction
>(BCI
->getOperand(0)))
6932 if (BCI
->getParent() != Op
->getParent() && !isa
<PHINode
>(Op
) &&
6933 !Op
->isTerminator() && !Op
->isEHPad())
6939 bool CodeGenPrepare::tryToSinkFreeOperands(Instruction
*I
) {
6940 // If the operands of I can be folded into a target instruction together with
6941 // I, duplicate and sink them.
6942 SmallVector
<Use
*, 4> OpsToSink
;
6943 if (!TLI
->shouldSinkOperands(I
, OpsToSink
))
6946 // OpsToSink can contain multiple uses in a use chain (e.g.
6947 // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating
6948 // uses must come first, so we process the ops in reverse order so as to not
6949 // create invalid IR.
6950 BasicBlock
*TargetBB
= I
->getParent();
6951 bool Changed
= false;
6952 SmallVector
<Use
*, 4> ToReplace
;
6953 Instruction
*InsertPoint
= I
;
6954 DenseMap
<const Instruction
*, unsigned long> InstOrdering
;
6955 unsigned long InstNumber
= 0;
6956 for (const auto &I
: *TargetBB
)
6957 InstOrdering
[&I
] = InstNumber
++;
6959 for (Use
*U
: reverse(OpsToSink
)) {
6960 auto *UI
= cast
<Instruction
>(U
->get());
6961 if (isa
<PHINode
>(UI
))
6963 if (UI
->getParent() == TargetBB
) {
6964 if (InstOrdering
[UI
] < InstOrdering
[InsertPoint
])
6968 ToReplace
.push_back(U
);
6971 SetVector
<Instruction
*> MaybeDead
;
6972 DenseMap
<Instruction
*, Instruction
*> NewInstructions
;
6973 for (Use
*U
: ToReplace
) {
6974 auto *UI
= cast
<Instruction
>(U
->get());
6975 Instruction
*NI
= UI
->clone();
6976 NewInstructions
[UI
] = NI
;
6977 MaybeDead
.insert(UI
);
6978 LLVM_DEBUG(dbgs() << "Sinking " << *UI
<< " to user " << *I
<< "\n");
6979 NI
->insertBefore(InsertPoint
);
6981 InsertedInsts
.insert(NI
);
6983 // Update the use for the new instruction, making sure that we update the
6984 // sunk instruction uses, if it is part of a chain that has already been
6986 Instruction
*OldI
= cast
<Instruction
>(U
->getUser());
6987 if (NewInstructions
.count(OldI
))
6988 NewInstructions
[OldI
]->setOperand(U
->getOperandNo(), NI
);
6994 // Remove instructions that are dead after sinking.
6995 for (auto *I
: MaybeDead
) {
6996 if (!I
->hasNUsesOrMore(1)) {
6997 LLVM_DEBUG(dbgs() << "Removing dead instruction: " << *I
<< "\n");
6998 I
->eraseFromParent();
7005 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst
*SI
) {
7006 Value
*Cond
= SI
->getCondition();
7007 Type
*OldType
= Cond
->getType();
7008 LLVMContext
&Context
= Cond
->getContext();
7009 EVT OldVT
= TLI
->getValueType(*DL
, OldType
);
7010 MVT RegType
= TLI
->getRegisterType(Context
, OldVT
);
7011 unsigned RegWidth
= RegType
.getSizeInBits();
7013 if (RegWidth
<= cast
<IntegerType
>(OldType
)->getBitWidth())
7016 // If the register width is greater than the type width, expand the condition
7017 // of the switch instruction and each case constant to the width of the
7018 // register. By widening the type of the switch condition, subsequent
7019 // comparisons (for case comparisons) will not need to be extended to the
7020 // preferred register width, so we will potentially eliminate N-1 extends,
7021 // where N is the number of cases in the switch.
7022 auto *NewType
= Type::getIntNTy(Context
, RegWidth
);
7024 // Extend the switch condition and case constants using the target preferred
7025 // extend unless the switch condition is a function argument with an extend
7026 // attribute. In that case, we can avoid an unnecessary mask/extension by
7027 // matching the argument extension instead.
7028 Instruction::CastOps ExtType
= Instruction::ZExt
;
7029 // Some targets prefer SExt over ZExt.
7030 if (TLI
->isSExtCheaperThanZExt(OldVT
, RegType
))
7031 ExtType
= Instruction::SExt
;
7033 if (auto *Arg
= dyn_cast
<Argument
>(Cond
)) {
7034 if (Arg
->hasSExtAttr())
7035 ExtType
= Instruction::SExt
;
7036 if (Arg
->hasZExtAttr())
7037 ExtType
= Instruction::ZExt
;
7040 auto *ExtInst
= CastInst::Create(ExtType
, Cond
, NewType
);
7041 ExtInst
->insertBefore(SI
);
7042 ExtInst
->setDebugLoc(SI
->getDebugLoc());
7043 SI
->setCondition(ExtInst
);
7044 for (auto Case
: SI
->cases()) {
7045 APInt NarrowConst
= Case
.getCaseValue()->getValue();
7046 APInt WideConst
= (ExtType
== Instruction::ZExt
) ?
7047 NarrowConst
.zext(RegWidth
) : NarrowConst
.sext(RegWidth
);
7048 Case
.setValue(ConstantInt::get(Context
, WideConst
));
7057 /// Helper class to promote a scalar operation to a vector one.
7058 /// This class is used to move downward extractelement transition.
7060 /// a = vector_op <2 x i32>
7061 /// b = extractelement <2 x i32> a, i32 0
7066 /// a = vector_op <2 x i32>
7067 /// c = vector_op a (equivalent to scalar_op on the related lane)
7068 /// * d = extractelement <2 x i32> c, i32 0
7070 /// Assuming both extractelement and store can be combine, we get rid of the
7072 class VectorPromoteHelper
{
7073 /// DataLayout associated with the current module.
7074 const DataLayout
&DL
;
7076 /// Used to perform some checks on the legality of vector operations.
7077 const TargetLowering
&TLI
;
7079 /// Used to estimated the cost of the promoted chain.
7080 const TargetTransformInfo
&TTI
;
7082 /// The transition being moved downwards.
7083 Instruction
*Transition
;
7085 /// The sequence of instructions to be promoted.
7086 SmallVector
<Instruction
*, 4> InstsToBePromoted
;
7088 /// Cost of combining a store and an extract.
7089 unsigned StoreExtractCombineCost
;
7091 /// Instruction that will be combined with the transition.
7092 Instruction
*CombineInst
= nullptr;
7094 /// The instruction that represents the current end of the transition.
7095 /// Since we are faking the promotion until we reach the end of the chain
7096 /// of computation, we need a way to get the current end of the transition.
7097 Instruction
*getEndOfTransition() const {
7098 if (InstsToBePromoted
.empty())
7100 return InstsToBePromoted
.back();
7103 /// Return the index of the original value in the transition.
7104 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value,
7105 /// c, is at index 0.
7106 unsigned getTransitionOriginalValueIdx() const {
7107 assert(isa
<ExtractElementInst
>(Transition
) &&
7108 "Other kind of transitions are not supported yet");
7112 /// Return the index of the index in the transition.
7113 /// E.g., for "extractelement <2 x i32> c, i32 0" the index
7115 unsigned getTransitionIdx() const {
7116 assert(isa
<ExtractElementInst
>(Transition
) &&
7117 "Other kind of transitions are not supported yet");
7121 /// Get the type of the transition.
7122 /// This is the type of the original value.
7123 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the
7124 /// transition is <2 x i32>.
7125 Type
*getTransitionType() const {
7126 return Transition
->getOperand(getTransitionOriginalValueIdx())->getType();
7129 /// Promote \p ToBePromoted by moving \p Def downward through.
7130 /// I.e., we have the following sequence:
7131 /// Def = Transition <ty1> a to <ty2>
7132 /// b = ToBePromoted <ty2> Def, ...
7134 /// b = ToBePromoted <ty1> a, ...
7135 /// Def = Transition <ty1> ToBePromoted to <ty2>
7136 void promoteImpl(Instruction
*ToBePromoted
);
7138 /// Check whether or not it is profitable to promote all the
7139 /// instructions enqueued to be promoted.
7140 bool isProfitableToPromote() {
7141 Value
*ValIdx
= Transition
->getOperand(getTransitionOriginalValueIdx());
7142 unsigned Index
= isa
<ConstantInt
>(ValIdx
)
7143 ? cast
<ConstantInt
>(ValIdx
)->getZExtValue()
7145 Type
*PromotedType
= getTransitionType();
7147 StoreInst
*ST
= cast
<StoreInst
>(CombineInst
);
7148 unsigned AS
= ST
->getPointerAddressSpace();
7149 // Check if this store is supported.
7150 if (!TLI
.allowsMisalignedMemoryAccesses(
7151 TLI
.getValueType(DL
, ST
->getValueOperand()->getType()), AS
,
7153 // If this is not supported, there is no way we can combine
7154 // the extract with the store.
7158 // The scalar chain of computation has to pay for the transition
7159 // scalar to vector.
7160 // The vector chain has to account for the combining cost.
7161 InstructionCost ScalarCost
=
7162 TTI
.getVectorInstrCost(Transition
->getOpcode(), PromotedType
, Index
);
7163 InstructionCost VectorCost
= StoreExtractCombineCost
;
7164 enum TargetTransformInfo::TargetCostKind CostKind
=
7165 TargetTransformInfo::TCK_RecipThroughput
;
7166 for (const auto &Inst
: InstsToBePromoted
) {
7167 // Compute the cost.
7168 // By construction, all instructions being promoted are arithmetic ones.
7169 // Moreover, one argument is a constant that can be viewed as a splat
7171 Value
*Arg0
= Inst
->getOperand(0);
7172 bool IsArg0Constant
= isa
<UndefValue
>(Arg0
) || isa
<ConstantInt
>(Arg0
) ||
7173 isa
<ConstantFP
>(Arg0
);
7174 TargetTransformInfo::OperandValueKind Arg0OVK
=
7175 IsArg0Constant
? TargetTransformInfo::OK_UniformConstantValue
7176 : TargetTransformInfo::OK_AnyValue
;
7177 TargetTransformInfo::OperandValueKind Arg1OVK
=
7178 !IsArg0Constant
? TargetTransformInfo::OK_UniformConstantValue
7179 : TargetTransformInfo::OK_AnyValue
;
7180 ScalarCost
+= TTI
.getArithmeticInstrCost(
7181 Inst
->getOpcode(), Inst
->getType(), CostKind
, Arg0OVK
, Arg1OVK
);
7182 VectorCost
+= TTI
.getArithmeticInstrCost(Inst
->getOpcode(), PromotedType
,
7187 dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
7188 << ScalarCost
<< "\nVector: " << VectorCost
<< '\n');
7189 return ScalarCost
> VectorCost
;
7192 /// Generate a constant vector with \p Val with the same
7193 /// number of elements as the transition.
7194 /// \p UseSplat defines whether or not \p Val should be replicated
7195 /// across the whole vector.
7196 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>,
7197 /// otherwise we generate a vector with as many undef as possible:
7198 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only
7199 /// used at the index of the extract.
7200 Value
*getConstantVector(Constant
*Val
, bool UseSplat
) const {
7201 unsigned ExtractIdx
= std::numeric_limits
<unsigned>::max();
7203 // If we cannot determine where the constant must be, we have to
7204 // use a splat constant.
7205 Value
*ValExtractIdx
= Transition
->getOperand(getTransitionIdx());
7206 if (ConstantInt
*CstVal
= dyn_cast
<ConstantInt
>(ValExtractIdx
))
7207 ExtractIdx
= CstVal
->getSExtValue();
7212 ElementCount EC
= cast
<VectorType
>(getTransitionType())->getElementCount();
7214 return ConstantVector::getSplat(EC
, Val
);
7216 if (!EC
.isScalable()) {
7217 SmallVector
<Constant
*, 4> ConstVec
;
7218 UndefValue
*UndefVal
= UndefValue::get(Val
->getType());
7219 for (unsigned Idx
= 0; Idx
!= EC
.getKnownMinValue(); ++Idx
) {
7220 if (Idx
== ExtractIdx
)
7221 ConstVec
.push_back(Val
);
7223 ConstVec
.push_back(UndefVal
);
7225 return ConstantVector::get(ConstVec
);
7228 "Generate scalable vector for non-splat is unimplemented");
7231 /// Check if promoting to a vector type an operand at \p OperandIdx
7232 /// in \p Use can trigger undefined behavior.
7233 static bool canCauseUndefinedBehavior(const Instruction
*Use
,
7234 unsigned OperandIdx
) {
7235 // This is not safe to introduce undef when the operand is on
7236 // the right hand side of a division-like instruction.
7237 if (OperandIdx
!= 1)
7239 switch (Use
->getOpcode()) {
7242 case Instruction::SDiv
:
7243 case Instruction::UDiv
:
7244 case Instruction::SRem
:
7245 case Instruction::URem
:
7247 case Instruction::FDiv
:
7248 case Instruction::FRem
:
7249 return !Use
->hasNoNaNs();
7251 llvm_unreachable(nullptr);
7255 VectorPromoteHelper(const DataLayout
&DL
, const TargetLowering
&TLI
,
7256 const TargetTransformInfo
&TTI
, Instruction
*Transition
,
7257 unsigned CombineCost
)
7258 : DL(DL
), TLI(TLI
), TTI(TTI
), Transition(Transition
),
7259 StoreExtractCombineCost(CombineCost
) {
7260 assert(Transition
&& "Do not know how to promote null");
7263 /// Check if we can promote \p ToBePromoted to \p Type.
7264 bool canPromote(const Instruction
*ToBePromoted
) const {
7265 // We could support CastInst too.
7266 return isa
<BinaryOperator
>(ToBePromoted
);
7269 /// Check if it is profitable to promote \p ToBePromoted
7270 /// by moving downward the transition through.
7271 bool shouldPromote(const Instruction
*ToBePromoted
) const {
7272 // Promote only if all the operands can be statically expanded.
7273 // Indeed, we do not want to introduce any new kind of transitions.
7274 for (const Use
&U
: ToBePromoted
->operands()) {
7275 const Value
*Val
= U
.get();
7276 if (Val
== getEndOfTransition()) {
7277 // If the use is a division and the transition is on the rhs,
7278 // we cannot promote the operation, otherwise we may create a
7279 // division by zero.
7280 if (canCauseUndefinedBehavior(ToBePromoted
, U
.getOperandNo()))
7284 if (!isa
<ConstantInt
>(Val
) && !isa
<UndefValue
>(Val
) &&
7285 !isa
<ConstantFP
>(Val
))
7288 // Check that the resulting operation is legal.
7289 int ISDOpcode
= TLI
.InstructionOpcodeToISD(ToBePromoted
->getOpcode());
7292 return StressStoreExtract
||
7293 TLI
.isOperationLegalOrCustom(
7294 ISDOpcode
, TLI
.getValueType(DL
, getTransitionType(), true));
7297 /// Check whether or not \p Use can be combined
7298 /// with the transition.
7299 /// I.e., is it possible to do Use(Transition) => AnotherUse?
7300 bool canCombine(const Instruction
*Use
) { return isa
<StoreInst
>(Use
); }
7302 /// Record \p ToBePromoted as part of the chain to be promoted.
7303 void enqueueForPromotion(Instruction
*ToBePromoted
) {
7304 InstsToBePromoted
.push_back(ToBePromoted
);
7307 /// Set the instruction that will be combined with the transition.
7308 void recordCombineInstruction(Instruction
*ToBeCombined
) {
7309 assert(canCombine(ToBeCombined
) && "Unsupported instruction to combine");
7310 CombineInst
= ToBeCombined
;
7313 /// Promote all the instructions enqueued for promotion if it is
7315 /// \return True if the promotion happened, false otherwise.
7317 // Check if there is something to promote.
7318 // Right now, if we do not have anything to combine with,
7319 // we assume the promotion is not profitable.
7320 if (InstsToBePromoted
.empty() || !CombineInst
)
7324 if (!StressStoreExtract
&& !isProfitableToPromote())
7328 for (auto &ToBePromoted
: InstsToBePromoted
)
7329 promoteImpl(ToBePromoted
);
7330 InstsToBePromoted
.clear();
7335 } // end anonymous namespace
7337 void VectorPromoteHelper::promoteImpl(Instruction
*ToBePromoted
) {
7338 // At this point, we know that all the operands of ToBePromoted but Def
7339 // can be statically promoted.
7340 // For Def, we need to use its parameter in ToBePromoted:
7341 // b = ToBePromoted ty1 a
7342 // Def = Transition ty1 b to ty2
7343 // Move the transition down.
7344 // 1. Replace all uses of the promoted operation by the transition.
7345 // = ... b => = ... Def.
7346 assert(ToBePromoted
->getType() == Transition
->getType() &&
7347 "The type of the result of the transition does not match "
7349 ToBePromoted
->replaceAllUsesWith(Transition
);
7350 // 2. Update the type of the uses.
7351 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def.
7352 Type
*TransitionTy
= getTransitionType();
7353 ToBePromoted
->mutateType(TransitionTy
);
7354 // 3. Update all the operands of the promoted operation with promoted
7356 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a.
7357 for (Use
&U
: ToBePromoted
->operands()) {
7358 Value
*Val
= U
.get();
7359 Value
*NewVal
= nullptr;
7360 if (Val
== Transition
)
7361 NewVal
= Transition
->getOperand(getTransitionOriginalValueIdx());
7362 else if (isa
<UndefValue
>(Val
) || isa
<ConstantInt
>(Val
) ||
7363 isa
<ConstantFP
>(Val
)) {
7364 // Use a splat constant if it is not safe to use undef.
7365 NewVal
= getConstantVector(
7366 cast
<Constant
>(Val
),
7367 isa
<UndefValue
>(Val
) ||
7368 canCauseUndefinedBehavior(ToBePromoted
, U
.getOperandNo()));
7370 llvm_unreachable("Did you modified shouldPromote and forgot to update "
7372 ToBePromoted
->setOperand(U
.getOperandNo(), NewVal
);
7374 Transition
->moveAfter(ToBePromoted
);
7375 Transition
->setOperand(getTransitionOriginalValueIdx(), ToBePromoted
);
7378 /// Some targets can do store(extractelement) with one instruction.
7379 /// Try to push the extractelement towards the stores when the target
7380 /// has this feature and this is profitable.
7381 bool CodeGenPrepare::optimizeExtractElementInst(Instruction
*Inst
) {
7382 unsigned CombineCost
= std::numeric_limits
<unsigned>::max();
7383 if (DisableStoreExtract
||
7384 (!StressStoreExtract
&&
7385 !TLI
->canCombineStoreAndExtract(Inst
->getOperand(0)->getType(),
7386 Inst
->getOperand(1), CombineCost
)))
7389 // At this point we know that Inst is a vector to scalar transition.
7390 // Try to move it down the def-use chain, until:
7391 // - We can combine the transition with its single use
7392 // => we got rid of the transition.
7393 // - We escape the current basic block
7394 // => we would need to check that we are moving it at a cheaper place and
7395 // we do not do that for now.
7396 BasicBlock
*Parent
= Inst
->getParent();
7397 LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst
<< '\n');
7398 VectorPromoteHelper
VPH(*DL
, *TLI
, *TTI
, Inst
, CombineCost
);
7399 // If the transition has more than one use, assume this is not going to be
7401 while (Inst
->hasOneUse()) {
7402 Instruction
*ToBePromoted
= cast
<Instruction
>(*Inst
->user_begin());
7403 LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted
<< '\n');
7405 if (ToBePromoted
->getParent() != Parent
) {
7406 LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block ("
7407 << ToBePromoted
->getParent()->getName()
7408 << ") than the transition (" << Parent
->getName()
7413 if (VPH
.canCombine(ToBePromoted
)) {
7414 LLVM_DEBUG(dbgs() << "Assume " << *Inst
<< '\n'
7415 << "will be combined with: " << *ToBePromoted
<< '\n');
7416 VPH
.recordCombineInstruction(ToBePromoted
);
7417 bool Changed
= VPH
.promote();
7418 NumStoreExtractExposed
+= Changed
;
7422 LLVM_DEBUG(dbgs() << "Try promoting.\n");
7423 if (!VPH
.canPromote(ToBePromoted
) || !VPH
.shouldPromote(ToBePromoted
))
7426 LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n");
7428 VPH
.enqueueForPromotion(ToBePromoted
);
7429 Inst
= ToBePromoted
;
7434 /// For the instruction sequence of store below, F and I values
7435 /// are bundled together as an i64 value before being stored into memory.
7436 /// Sometimes it is more efficient to generate separate stores for F and I,
7437 /// which can remove the bitwise instructions or sink them to colder places.
7439 /// (store (or (zext (bitcast F to i32) to i64),
7440 /// (shl (zext I to i64), 32)), addr) -->
7441 /// (store F, addr) and (store I, addr+4)
7443 /// Similarly, splitting for other merged store can also be beneficial, like:
7444 /// For pair of {i32, i32}, i64 store --> two i32 stores.
7445 /// For pair of {i32, i16}, i64 store --> two i32 stores.
7446 /// For pair of {i16, i16}, i32 store --> two i16 stores.
7447 /// For pair of {i16, i8}, i32 store --> two i16 stores.
7448 /// For pair of {i8, i8}, i16 store --> two i8 stores.
7450 /// We allow each target to determine specifically which kind of splitting is
7453 /// The store patterns are commonly seen from the simple code snippet below
7454 /// if only std::make_pair(...) is sroa transformed before inlined into hoo.
7455 /// void goo(const std::pair<int, float> &);
7458 /// goo(std::make_pair(tmp, ftmp));
7462 /// Although we already have similar splitting in DAG Combine, we duplicate
7463 /// it in CodeGenPrepare to catch the case in which pattern is across
7464 /// multiple BBs. The logic in DAG Combine is kept to catch case generated
7465 /// during code expansion.
7466 static bool splitMergedValStore(StoreInst
&SI
, const DataLayout
&DL
,
7467 const TargetLowering
&TLI
) {
7468 // Handle simple but common cases only.
7469 Type
*StoreType
= SI
.getValueOperand()->getType();
7471 // The code below assumes shifting a value by <number of bits>,
7472 // whereas scalable vectors would have to be shifted by
7473 // <2log(vscale) + number of bits> in order to store the
7474 // low/high parts. Bailing out for now.
7475 if (isa
<ScalableVectorType
>(StoreType
))
7478 if (!DL
.typeSizeEqualsStoreSize(StoreType
) ||
7479 DL
.getTypeSizeInBits(StoreType
) == 0)
7482 unsigned HalfValBitSize
= DL
.getTypeSizeInBits(StoreType
) / 2;
7483 Type
*SplitStoreType
= Type::getIntNTy(SI
.getContext(), HalfValBitSize
);
7484 if (!DL
.typeSizeEqualsStoreSize(SplitStoreType
))
7487 // Don't split the store if it is volatile.
7488 if (SI
.isVolatile())
7491 // Match the following patterns:
7492 // (store (or (zext LValue to i64),
7493 // (shl (zext HValue to i64), 32)), HalfValBitSize)
7495 // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize)
7496 // (zext LValue to i64),
7497 // Expect both operands of OR and the first operand of SHL have only
7499 Value
*LValue
, *HValue
;
7500 if (!match(SI
.getValueOperand(),
7501 m_c_Or(m_OneUse(m_ZExt(m_Value(LValue
))),
7502 m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue
))),
7503 m_SpecificInt(HalfValBitSize
))))))
7506 // Check LValue and HValue are int with size less or equal than 32.
7507 if (!LValue
->getType()->isIntegerTy() ||
7508 DL
.getTypeSizeInBits(LValue
->getType()) > HalfValBitSize
||
7509 !HValue
->getType()->isIntegerTy() ||
7510 DL
.getTypeSizeInBits(HValue
->getType()) > HalfValBitSize
)
7513 // If LValue/HValue is a bitcast instruction, use the EVT before bitcast
7514 // as the input of target query.
7515 auto *LBC
= dyn_cast
<BitCastInst
>(LValue
);
7516 auto *HBC
= dyn_cast
<BitCastInst
>(HValue
);
7517 EVT LowTy
= LBC
? EVT::getEVT(LBC
->getOperand(0)->getType())
7518 : EVT::getEVT(LValue
->getType());
7519 EVT HighTy
= HBC
? EVT::getEVT(HBC
->getOperand(0)->getType())
7520 : EVT::getEVT(HValue
->getType());
7521 if (!ForceSplitStore
&& !TLI
.isMultiStoresCheaperThanBitsMerge(LowTy
, HighTy
))
7524 // Start to split store.
7525 IRBuilder
<> Builder(SI
.getContext());
7526 Builder
.SetInsertPoint(&SI
);
7528 // If LValue/HValue is a bitcast in another BB, create a new one in current
7529 // BB so it may be merged with the splitted stores by dag combiner.
7530 if (LBC
&& LBC
->getParent() != SI
.getParent())
7531 LValue
= Builder
.CreateBitCast(LBC
->getOperand(0), LBC
->getType());
7532 if (HBC
&& HBC
->getParent() != SI
.getParent())
7533 HValue
= Builder
.CreateBitCast(HBC
->getOperand(0), HBC
->getType());
7535 bool IsLE
= SI
.getModule()->getDataLayout().isLittleEndian();
7536 auto CreateSplitStore
= [&](Value
*V
, bool Upper
) {
7537 V
= Builder
.CreateZExtOrBitCast(V
, SplitStoreType
);
7538 Value
*Addr
= Builder
.CreateBitCast(
7540 SplitStoreType
->getPointerTo(SI
.getPointerAddressSpace()));
7541 Align Alignment
= SI
.getAlign();
7542 const bool IsOffsetStore
= (IsLE
&& Upper
) || (!IsLE
&& !Upper
);
7543 if (IsOffsetStore
) {
7544 Addr
= Builder
.CreateGEP(
7545 SplitStoreType
, Addr
,
7546 ConstantInt::get(Type::getInt32Ty(SI
.getContext()), 1));
7548 // When splitting the store in half, naturally one half will retain the
7549 // alignment of the original wider store, regardless of whether it was
7550 // over-aligned or not, while the other will require adjustment.
7551 Alignment
= commonAlignment(Alignment
, HalfValBitSize
/ 8);
7553 Builder
.CreateAlignedStore(V
, Addr
, Alignment
);
7556 CreateSplitStore(LValue
, false);
7557 CreateSplitStore(HValue
, true);
7559 // Delete the old store.
7560 SI
.eraseFromParent();
7564 // Return true if the GEP has two operands, the first operand is of a sequential
7565 // type, and the second operand is a constant.
7566 static bool GEPSequentialConstIndexed(GetElementPtrInst
*GEP
) {
7567 gep_type_iterator I
= gep_type_begin(*GEP
);
7568 return GEP
->getNumOperands() == 2 &&
7570 isa
<ConstantInt
>(GEP
->getOperand(1));
7573 // Try unmerging GEPs to reduce liveness interference (register pressure) across
7574 // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks,
7575 // reducing liveness interference across those edges benefits global register
7576 // allocation. Currently handles only certain cases.
7578 // For example, unmerge %GEPI and %UGEPI as below.
7580 // ---------- BEFORE ----------
7585 // %GEPI = gep %GEPIOp, Idx
7587 // indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ]
7588 // (* %GEPI is alive on the indirectbr edges due to other uses ahead)
7589 // (* %GEPIOp is alive on the indirectbr edges only because of it's used by
7592 // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged)
7593 // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged)
7598 // %UGEPI = gep %GEPIOp, UIdx
7600 // ---------------------------
7602 // ---------- AFTER ----------
7604 // ... (same as above)
7605 // (* %GEPI is still alive on the indirectbr edges)
7606 // (* %GEPIOp is no longer alive on the indirectbr edges as a result of the
7612 // %UGEPI = gep %GEPI, (UIdx-Idx)
7614 // ---------------------------
7616 // The register pressure on the IndirectBr edges is reduced because %GEPIOp is
7617 // no longer alive on them.
7619 // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging
7620 // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as
7621 // not to disable further simplications and optimizations as a result of GEP
7624 // Note this unmerging may increase the length of the data flow critical path
7625 // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff
7626 // between the register pressure and the length of data-flow critical
7627 // path. Restricting this to the uncommon IndirectBr case would minimize the
7628 // impact of potentially longer critical path, if any, and the impact on compile
7630 static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst
*GEPI
,
7631 const TargetTransformInfo
*TTI
) {
7632 BasicBlock
*SrcBlock
= GEPI
->getParent();
7633 // Check that SrcBlock ends with an IndirectBr. If not, give up. The common
7634 // (non-IndirectBr) cases exit early here.
7635 if (!isa
<IndirectBrInst
>(SrcBlock
->getTerminator()))
7637 // Check that GEPI is a simple gep with a single constant index.
7638 if (!GEPSequentialConstIndexed(GEPI
))
7640 ConstantInt
*GEPIIdx
= cast
<ConstantInt
>(GEPI
->getOperand(1));
7641 // Check that GEPI is a cheap one.
7642 if (TTI
->getIntImmCost(GEPIIdx
->getValue(), GEPIIdx
->getType(),
7643 TargetTransformInfo::TCK_SizeAndLatency
)
7644 > TargetTransformInfo::TCC_Basic
)
7646 Value
*GEPIOp
= GEPI
->getOperand(0);
7647 // Check that GEPIOp is an instruction that's also defined in SrcBlock.
7648 if (!isa
<Instruction
>(GEPIOp
))
7650 auto *GEPIOpI
= cast
<Instruction
>(GEPIOp
);
7651 if (GEPIOpI
->getParent() != SrcBlock
)
7653 // Check that GEP is used outside the block, meaning it's alive on the
7654 // IndirectBr edge(s).
7655 if (find_if(GEPI
->users(), [&](User
*Usr
) {
7656 if (auto *I
= dyn_cast
<Instruction
>(Usr
)) {
7657 if (I
->getParent() != SrcBlock
) {
7662 }) == GEPI
->users().end())
7664 // The second elements of the GEP chains to be unmerged.
7665 std::vector
<GetElementPtrInst
*> UGEPIs
;
7666 // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive
7667 // on IndirectBr edges.
7668 for (User
*Usr
: GEPIOp
->users()) {
7669 if (Usr
== GEPI
) continue;
7670 // Check if Usr is an Instruction. If not, give up.
7671 if (!isa
<Instruction
>(Usr
))
7673 auto *UI
= cast
<Instruction
>(Usr
);
7674 // Check if Usr in the same block as GEPIOp, which is fine, skip.
7675 if (UI
->getParent() == SrcBlock
)
7677 // Check if Usr is a GEP. If not, give up.
7678 if (!isa
<GetElementPtrInst
>(Usr
))
7680 auto *UGEPI
= cast
<GetElementPtrInst
>(Usr
);
7681 // Check if UGEPI is a simple gep with a single constant index and GEPIOp is
7682 // the pointer operand to it. If so, record it in the vector. If not, give
7684 if (!GEPSequentialConstIndexed(UGEPI
))
7686 if (UGEPI
->getOperand(0) != GEPIOp
)
7688 if (GEPIIdx
->getType() !=
7689 cast
<ConstantInt
>(UGEPI
->getOperand(1))->getType())
7691 ConstantInt
*UGEPIIdx
= cast
<ConstantInt
>(UGEPI
->getOperand(1));
7692 if (TTI
->getIntImmCost(UGEPIIdx
->getValue(), UGEPIIdx
->getType(),
7693 TargetTransformInfo::TCK_SizeAndLatency
)
7694 > TargetTransformInfo::TCC_Basic
)
7696 UGEPIs
.push_back(UGEPI
);
7698 if (UGEPIs
.size() == 0)
7700 // Check the materializing cost of (Uidx-Idx).
7701 for (GetElementPtrInst
*UGEPI
: UGEPIs
) {
7702 ConstantInt
*UGEPIIdx
= cast
<ConstantInt
>(UGEPI
->getOperand(1));
7703 APInt NewIdx
= UGEPIIdx
->getValue() - GEPIIdx
->getValue();
7704 InstructionCost ImmCost
= TTI
->getIntImmCost(
7705 NewIdx
, GEPIIdx
->getType(), TargetTransformInfo::TCK_SizeAndLatency
);
7706 if (ImmCost
> TargetTransformInfo::TCC_Basic
)
7709 // Now unmerge between GEPI and UGEPIs.
7710 for (GetElementPtrInst
*UGEPI
: UGEPIs
) {
7711 UGEPI
->setOperand(0, GEPI
);
7712 ConstantInt
*UGEPIIdx
= cast
<ConstantInt
>(UGEPI
->getOperand(1));
7713 Constant
*NewUGEPIIdx
=
7714 ConstantInt::get(GEPIIdx
->getType(),
7715 UGEPIIdx
->getValue() - GEPIIdx
->getValue());
7716 UGEPI
->setOperand(1, NewUGEPIIdx
);
7717 // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not
7718 // inbounds to avoid UB.
7719 if (!GEPI
->isInBounds()) {
7720 UGEPI
->setIsInBounds(false);
7723 // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not
7724 // alive on IndirectBr edges).
7725 assert(find_if(GEPIOp
->users(), [&](User
*Usr
) {
7726 return cast
<Instruction
>(Usr
)->getParent() != SrcBlock
;
7727 }) == GEPIOp
->users().end() && "GEPIOp is used outside SrcBlock");
7731 static bool optimizeBranch(BranchInst
*Branch
, const TargetLowering
&TLI
) {
7733 // %c = icmp ult %x, 8
7738 // %c = icmp eq %tc, 0
7740 // Creating the cmp to zero can be better for the backend, especially if the
7741 // lshr produces flags that can be used automatically.
7742 if (!TLI
.preferZeroCompareBranch() || !Branch
->isConditional())
7745 ICmpInst
*Cmp
= dyn_cast
<ICmpInst
>(Branch
->getCondition());
7746 if (!Cmp
|| !isa
<ConstantInt
>(Cmp
->getOperand(1)) || !Cmp
->hasOneUse())
7749 Value
*X
= Cmp
->getOperand(0);
7750 APInt CmpC
= cast
<ConstantInt
>(Cmp
->getOperand(1))->getValue();
7752 for (auto *U
: X
->users()) {
7753 Instruction
*UI
= dyn_cast
<Instruction
>(U
);
7754 // A quick dominance check
7756 (UI
->getParent() != Branch
->getParent() &&
7757 UI
->getParent() != Branch
->getSuccessor(0) &&
7758 UI
->getParent() != Branch
->getSuccessor(1)) ||
7759 (UI
->getParent() != Branch
->getParent() &&
7760 !UI
->getParent()->getSinglePredecessor()))
7763 if (CmpC
.isPowerOf2() && Cmp
->getPredicate() == ICmpInst::ICMP_ULT
&&
7764 match(UI
, m_Shr(m_Specific(X
), m_SpecificInt(CmpC
.logBase2())))) {
7765 IRBuilder
<> Builder(Branch
);
7766 if (UI
->getParent() != Branch
->getParent())
7767 UI
->moveBefore(Branch
);
7768 Value
*NewCmp
= Builder
.CreateCmp(ICmpInst::ICMP_EQ
, UI
,
7769 ConstantInt::get(UI
->getType(), 0));
7770 LLVM_DEBUG(dbgs() << "Converting " << *Cmp
<< "\n");
7771 LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp
<< "\n");
7772 Cmp
->replaceAllUsesWith(NewCmp
);
7775 if (Cmp
->isEquality() &&
7776 (match(UI
, m_Add(m_Specific(X
), m_SpecificInt(-CmpC
))) ||
7777 match(UI
, m_Sub(m_Specific(X
), m_SpecificInt(CmpC
))))) {
7778 IRBuilder
<> Builder(Branch
);
7779 if (UI
->getParent() != Branch
->getParent())
7780 UI
->moveBefore(Branch
);
7781 Value
*NewCmp
= Builder
.CreateCmp(Cmp
->getPredicate(), UI
,
7782 ConstantInt::get(UI
->getType(), 0));
7783 LLVM_DEBUG(dbgs() << "Converting " << *Cmp
<< "\n");
7784 LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp
<< "\n");
7785 Cmp
->replaceAllUsesWith(NewCmp
);
7792 bool CodeGenPrepare::optimizeInst(Instruction
*I
, bool &ModifiedDT
) {
7793 // Bail out if we inserted the instruction to prevent optimizations from
7794 // stepping on each other's toes.
7795 if (InsertedInsts
.count(I
))
7798 // TODO: Move into the switch on opcode below here.
7799 if (PHINode
*P
= dyn_cast
<PHINode
>(I
)) {
7800 // It is possible for very late stage optimizations (such as SimplifyCFG)
7801 // to introduce PHI nodes too late to be cleaned up. If we detect such a
7802 // trivial PHI, go ahead and zap it here.
7803 if (Value
*V
= SimplifyInstruction(P
, {*DL
, TLInfo
})) {
7804 LargeOffsetGEPMap
.erase(P
);
7805 P
->replaceAllUsesWith(V
);
7806 P
->eraseFromParent();
7813 if (CastInst
*CI
= dyn_cast
<CastInst
>(I
)) {
7814 // If the source of the cast is a constant, then this should have
7815 // already been constant folded. The only reason NOT to constant fold
7816 // it is if something (e.g. LSR) was careful to place the constant
7817 // evaluation in a block other than then one that uses it (e.g. to hoist
7818 // the address of globals out of a loop). If this is the case, we don't
7819 // want to forward-subst the cast.
7820 if (isa
<Constant
>(CI
->getOperand(0)))
7823 if (OptimizeNoopCopyExpression(CI
, *TLI
, *DL
))
7826 if (isa
<ZExtInst
>(I
) || isa
<SExtInst
>(I
)) {
7827 /// Sink a zext or sext into its user blocks if the target type doesn't
7828 /// fit in one register
7829 if (TLI
->getTypeAction(CI
->getContext(),
7830 TLI
->getValueType(*DL
, CI
->getType())) ==
7831 TargetLowering::TypeExpandInteger
) {
7832 return SinkCast(CI
);
7834 bool MadeChange
= optimizeExt(I
);
7835 return MadeChange
| optimizeExtUses(I
);
7841 if (auto *Cmp
= dyn_cast
<CmpInst
>(I
))
7842 if (optimizeCmp(Cmp
, ModifiedDT
))
7845 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
)) {
7846 LI
->setMetadata(LLVMContext::MD_invariant_group
, nullptr);
7847 bool Modified
= optimizeLoadExt(LI
);
7848 unsigned AS
= LI
->getPointerAddressSpace();
7849 Modified
|= optimizeMemoryInst(I
, I
->getOperand(0), LI
->getType(), AS
);
7853 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
)) {
7854 if (splitMergedValStore(*SI
, *DL
, *TLI
))
7856 SI
->setMetadata(LLVMContext::MD_invariant_group
, nullptr);
7857 unsigned AS
= SI
->getPointerAddressSpace();
7858 return optimizeMemoryInst(I
, SI
->getOperand(1),
7859 SI
->getOperand(0)->getType(), AS
);
7862 if (AtomicRMWInst
*RMW
= dyn_cast
<AtomicRMWInst
>(I
)) {
7863 unsigned AS
= RMW
->getPointerAddressSpace();
7864 return optimizeMemoryInst(I
, RMW
->getPointerOperand(),
7865 RMW
->getType(), AS
);
7868 if (AtomicCmpXchgInst
*CmpX
= dyn_cast
<AtomicCmpXchgInst
>(I
)) {
7869 unsigned AS
= CmpX
->getPointerAddressSpace();
7870 return optimizeMemoryInst(I
, CmpX
->getPointerOperand(),
7871 CmpX
->getCompareOperand()->getType(), AS
);
7874 BinaryOperator
*BinOp
= dyn_cast
<BinaryOperator
>(I
);
7876 if (BinOp
&& (BinOp
->getOpcode() == Instruction::And
) && EnableAndCmpSinking
)
7877 return sinkAndCmp0Expression(BinOp
, *TLI
, InsertedInsts
);
7879 // TODO: Move this into the switch on opcode - it handles shifts already.
7880 if (BinOp
&& (BinOp
->getOpcode() == Instruction::AShr
||
7881 BinOp
->getOpcode() == Instruction::LShr
)) {
7882 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BinOp
->getOperand(1));
7883 if (CI
&& TLI
->hasExtractBitsInsn())
7884 if (OptimizeExtractBits(BinOp
, CI
, *TLI
, *DL
))
7888 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(I
)) {
7889 if (GEPI
->hasAllZeroIndices()) {
7890 /// The GEP operand must be a pointer, so must its result -> BitCast
7891 Instruction
*NC
= new BitCastInst(GEPI
->getOperand(0), GEPI
->getType(),
7892 GEPI
->getName(), GEPI
);
7893 NC
->setDebugLoc(GEPI
->getDebugLoc());
7894 GEPI
->replaceAllUsesWith(NC
);
7895 GEPI
->eraseFromParent();
7897 optimizeInst(NC
, ModifiedDT
);
7900 if (tryUnmergingGEPsAcrossIndirectBr(GEPI
, TTI
)) {
7906 if (FreezeInst
*FI
= dyn_cast
<FreezeInst
>(I
)) {
7907 // freeze(icmp a, const)) -> icmp (freeze a), const
7908 // This helps generate efficient conditional jumps.
7909 Instruction
*CmpI
= nullptr;
7910 if (ICmpInst
*II
= dyn_cast
<ICmpInst
>(FI
->getOperand(0)))
7912 else if (FCmpInst
*F
= dyn_cast
<FCmpInst
>(FI
->getOperand(0)))
7913 CmpI
= F
->getFastMathFlags().none() ? F
: nullptr;
7915 if (CmpI
&& CmpI
->hasOneUse()) {
7916 auto Op0
= CmpI
->getOperand(0), Op1
= CmpI
->getOperand(1);
7917 bool Const0
= isa
<ConstantInt
>(Op0
) || isa
<ConstantFP
>(Op0
) ||
7918 isa
<ConstantPointerNull
>(Op0
);
7919 bool Const1
= isa
<ConstantInt
>(Op1
) || isa
<ConstantFP
>(Op1
) ||
7920 isa
<ConstantPointerNull
>(Op1
);
7921 if (Const0
|| Const1
) {
7922 if (!Const0
|| !Const1
) {
7923 auto *F
= new FreezeInst(Const0
? Op1
: Op0
, "", CmpI
);
7925 CmpI
->setOperand(Const0
? 1 : 0, F
);
7927 FI
->replaceAllUsesWith(CmpI
);
7928 FI
->eraseFromParent();
7935 if (tryToSinkFreeOperands(I
))
7938 switch (I
->getOpcode()) {
7939 case Instruction::Shl
:
7940 case Instruction::LShr
:
7941 case Instruction::AShr
:
7942 return optimizeShiftInst(cast
<BinaryOperator
>(I
));
7943 case Instruction::Call
:
7944 return optimizeCallInst(cast
<CallInst
>(I
), ModifiedDT
);
7945 case Instruction::Select
:
7946 return optimizeSelectInst(cast
<SelectInst
>(I
));
7947 case Instruction::ShuffleVector
:
7948 return optimizeShuffleVectorInst(cast
<ShuffleVectorInst
>(I
));
7949 case Instruction::Switch
:
7950 return optimizeSwitchInst(cast
<SwitchInst
>(I
));
7951 case Instruction::ExtractElement
:
7952 return optimizeExtractElementInst(cast
<ExtractElementInst
>(I
));
7953 case Instruction::Br
:
7954 return optimizeBranch(cast
<BranchInst
>(I
), *TLI
);
7960 /// Given an OR instruction, check to see if this is a bitreverse
7961 /// idiom. If so, insert the new intrinsic and return true.
7962 bool CodeGenPrepare::makeBitReverse(Instruction
&I
) {
7963 if (!I
.getType()->isIntegerTy() ||
7964 !TLI
->isOperationLegalOrCustom(ISD::BITREVERSE
,
7965 TLI
->getValueType(*DL
, I
.getType(), true)))
7968 SmallVector
<Instruction
*, 4> Insts
;
7969 if (!recognizeBSwapOrBitReverseIdiom(&I
, false, true, Insts
))
7971 Instruction
*LastInst
= Insts
.back();
7972 I
.replaceAllUsesWith(LastInst
);
7973 RecursivelyDeleteTriviallyDeadInstructions(
7974 &I
, TLInfo
, nullptr, [&](Value
*V
) { removeAllAssertingVHReferences(V
); });
7978 // In this pass we look for GEP and cast instructions that are used
7979 // across basic blocks and rewrite them to improve basic-block-at-a-time
7981 bool CodeGenPrepare::optimizeBlock(BasicBlock
&BB
, bool &ModifiedDT
) {
7983 bool MadeChange
= false;
7985 CurInstIterator
= BB
.begin();
7986 while (CurInstIterator
!= BB
.end()) {
7987 MadeChange
|= optimizeInst(&*CurInstIterator
++, ModifiedDT
);
7992 bool MadeBitReverse
= true;
7993 while (MadeBitReverse
) {
7994 MadeBitReverse
= false;
7995 for (auto &I
: reverse(BB
)) {
7996 if (makeBitReverse(I
)) {
7997 MadeBitReverse
= MadeChange
= true;
8002 MadeChange
|= dupRetToEnableTailCallOpts(&BB
, ModifiedDT
);
8007 // Some CGP optimizations may move or alter what's computed in a block. Check
8008 // whether a dbg.value intrinsic could be pointed at a more appropriate operand.
8009 bool CodeGenPrepare::fixupDbgValue(Instruction
*I
) {
8010 assert(isa
<DbgValueInst
>(I
));
8011 DbgValueInst
&DVI
= *cast
<DbgValueInst
>(I
);
8013 // Does this dbg.value refer to a sunk address calculation?
8014 bool AnyChange
= false;
8015 SmallDenseSet
<Value
*> LocationOps(DVI
.location_ops().begin(),
8016 DVI
.location_ops().end());
8017 for (Value
*Location
: LocationOps
) {
8018 WeakTrackingVH SunkAddrVH
= SunkAddrs
[Location
];
8019 Value
*SunkAddr
= SunkAddrVH
.pointsToAliveValue() ? SunkAddrVH
: nullptr;
8021 // Point dbg.value at locally computed address, which should give the best
8022 // opportunity to be accurately lowered. This update may change the type
8023 // of pointer being referred to; however this makes no difference to
8024 // debugging information, and we can't generate bitcasts that may affect
8026 DVI
.replaceVariableLocationOp(Location
, SunkAddr
);
8033 // A llvm.dbg.value may be using a value before its definition, due to
8034 // optimizations in this pass and others. Scan for such dbg.values, and rescue
8035 // them by moving the dbg.value to immediately after the value definition.
8036 // FIXME: Ideally this should never be necessary, and this has the potential
8037 // to re-order dbg.value intrinsics.
8038 bool CodeGenPrepare::placeDbgValues(Function
&F
) {
8039 bool MadeChange
= false;
8040 DominatorTree
DT(F
);
8042 for (BasicBlock
&BB
: F
) {
8043 for (BasicBlock::iterator BI
= BB
.begin(), BE
= BB
.end(); BI
!= BE
;) {
8044 Instruction
*Insn
= &*BI
++;
8045 DbgValueInst
*DVI
= dyn_cast
<DbgValueInst
>(Insn
);
8049 SmallVector
<Instruction
*, 4> VIs
;
8050 for (Value
*V
: DVI
->getValues())
8051 if (Instruction
*VI
= dyn_cast_or_null
<Instruction
>(V
))
8054 // This DVI may depend on multiple instructions, complicating any
8055 // potential sink. This block takes the defensive approach, opting to
8056 // "undef" the DVI if it has more than one instruction and any of them do
8057 // not dominate DVI.
8058 for (Instruction
*VI
: VIs
) {
8059 if (VI
->isTerminator())
8062 // If VI is a phi in a block with an EHPad terminator, we can't insert
8064 if (isa
<PHINode
>(VI
) && VI
->getParent()->getTerminator()->isEHPad())
8067 // If the defining instruction dominates the dbg.value, we do not need
8068 // to move the dbg.value.
8069 if (DT
.dominates(VI
, DVI
))
8072 // If we depend on multiple instructions and any of them doesn't
8073 // dominate this DVI, we probably can't salvage it: moving it to
8074 // after any of the instructions could cause us to lose the others.
8075 if (VIs
.size() > 1) {
8078 << "Unable to find valid location for Debug Value, undefing:\n"
8084 LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n"
8085 << *DVI
<< ' ' << *VI
);
8086 DVI
->removeFromParent();
8087 if (isa
<PHINode
>(VI
))
8088 DVI
->insertBefore(&*VI
->getParent()->getFirstInsertionPt());
8090 DVI
->insertAfter(VI
);
8099 // Group scattered pseudo probes in a block to favor SelectionDAG. Scattered
8100 // probes can be chained dependencies of other regular DAG nodes and block DAG
8101 // combine optimizations.
8102 bool CodeGenPrepare::placePseudoProbes(Function
&F
) {
8103 bool MadeChange
= false;
8104 for (auto &Block
: F
) {
8105 // Move the rest probes to the beginning of the block.
8106 auto FirstInst
= Block
.getFirstInsertionPt();
8107 while (FirstInst
!= Block
.end() && FirstInst
->isDebugOrPseudoInst())
8109 BasicBlock::iterator
I(FirstInst
);
8111 while (I
!= Block
.end()) {
8112 if (auto *II
= dyn_cast
<PseudoProbeInst
>(I
++)) {
8113 II
->moveBefore(&*FirstInst
);
8121 /// Scale down both weights to fit into uint32_t.
8122 static void scaleWeights(uint64_t &NewTrue
, uint64_t &NewFalse
) {
8123 uint64_t NewMax
= (NewTrue
> NewFalse
) ? NewTrue
: NewFalse
;
8124 uint32_t Scale
= (NewMax
/ std::numeric_limits
<uint32_t>::max()) + 1;
8125 NewTrue
= NewTrue
/ Scale
;
8126 NewFalse
= NewFalse
/ Scale
;
8129 /// Some targets prefer to split a conditional branch like:
8131 /// %0 = icmp ne i32 %a, 0
8132 /// %1 = icmp ne i32 %b, 0
8133 /// %or.cond = or i1 %0, %1
8134 /// br i1 %or.cond, label %TrueBB, label %FalseBB
8136 /// into multiple branch instructions like:
8139 /// %0 = icmp ne i32 %a, 0
8140 /// br i1 %0, label %TrueBB, label %bb2
8142 /// %1 = icmp ne i32 %b, 0
8143 /// br i1 %1, label %TrueBB, label %FalseBB
8145 /// This usually allows instruction selection to do even further optimizations
8146 /// and combine the compare with the branch instruction. Currently this is
8147 /// applied for targets which have "cheap" jump instructions.
8149 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG.
8151 bool CodeGenPrepare::splitBranchCondition(Function
&F
, bool &ModifiedDT
) {
8152 if (!TM
->Options
.EnableFastISel
|| TLI
->isJumpExpensive())
8155 bool MadeChange
= false;
8156 for (auto &BB
: F
) {
8157 // Does this BB end with the following?
8158 // %cond1 = icmp|fcmp|binary instruction ...
8159 // %cond2 = icmp|fcmp|binary instruction ...
8160 // %cond.or = or|and i1 %cond1, cond2
8161 // br i1 %cond.or label %dest1, label %dest2"
8162 Instruction
*LogicOp
;
8163 BasicBlock
*TBB
, *FBB
;
8164 if (!match(BB
.getTerminator(),
8165 m_Br(m_OneUse(m_Instruction(LogicOp
)), TBB
, FBB
)))
8168 auto *Br1
= cast
<BranchInst
>(BB
.getTerminator());
8169 if (Br1
->getMetadata(LLVMContext::MD_unpredictable
))
8172 // The merging of mostly empty BB can cause a degenerate branch.
8177 Value
*Cond1
, *Cond2
;
8179 m_LogicalAnd(m_OneUse(m_Value(Cond1
)), m_OneUse(m_Value(Cond2
)))))
8180 Opc
= Instruction::And
;
8181 else if (match(LogicOp
, m_LogicalOr(m_OneUse(m_Value(Cond1
)),
8182 m_OneUse(m_Value(Cond2
)))))
8183 Opc
= Instruction::Or
;
8187 auto IsGoodCond
= [](Value
*Cond
) {
8190 m_CombineOr(m_Cmp(), m_CombineOr(m_LogicalAnd(m_Value(), m_Value()),
8191 m_LogicalOr(m_Value(), m_Value()))));
8193 if (!IsGoodCond(Cond1
) || !IsGoodCond(Cond2
))
8196 LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB
.dump());
8200 BasicBlock::Create(BB
.getContext(), BB
.getName() + ".cond.split",
8201 BB
.getParent(), BB
.getNextNode());
8203 // Update original basic block by using the first condition directly by the
8204 // branch instruction and removing the no longer needed and/or instruction.
8205 Br1
->setCondition(Cond1
);
8206 LogicOp
->eraseFromParent();
8208 // Depending on the condition we have to either replace the true or the
8209 // false successor of the original branch instruction.
8210 if (Opc
== Instruction::And
)
8211 Br1
->setSuccessor(0, TmpBB
);
8213 Br1
->setSuccessor(1, TmpBB
);
8215 // Fill in the new basic block.
8216 auto *Br2
= IRBuilder
<>(TmpBB
).CreateCondBr(Cond2
, TBB
, FBB
);
8217 if (auto *I
= dyn_cast
<Instruction
>(Cond2
)) {
8218 I
->removeFromParent();
8219 I
->insertBefore(Br2
);
8222 // Update PHI nodes in both successors. The original BB needs to be
8223 // replaced in one successor's PHI nodes, because the branch comes now from
8224 // the newly generated BB (NewBB). In the other successor we need to add one
8225 // incoming edge to the PHI nodes, because both branch instructions target
8226 // now the same successor. Depending on the original branch condition
8227 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that
8228 // we perform the correct update for the PHI nodes.
8229 // This doesn't change the successor order of the just created branch
8230 // instruction (or any other instruction).
8231 if (Opc
== Instruction::Or
)
8232 std::swap(TBB
, FBB
);
8234 // Replace the old BB with the new BB.
8235 TBB
->replacePhiUsesWith(&BB
, TmpBB
);
8237 // Add another incoming edge form the new BB.
8238 for (PHINode
&PN
: FBB
->phis()) {
8239 auto *Val
= PN
.getIncomingValueForBlock(&BB
);
8240 PN
.addIncoming(Val
, TmpBB
);
8243 // Update the branch weights (from SelectionDAGBuilder::
8244 // FindMergedConditions).
8245 if (Opc
== Instruction::Or
) {
8246 // Codegen X | Y as:
8255 // We have flexibility in setting Prob for BB1 and Prob for NewBB.
8256 // The requirement is that
8257 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
8258 // = TrueProb for original BB.
8259 // Assuming the original weights are A and B, one choice is to set BB1's
8260 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
8262 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
8263 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
8264 // TmpBB, but the math is more complicated.
8265 uint64_t TrueWeight
, FalseWeight
;
8266 if (Br1
->extractProfMetadata(TrueWeight
, FalseWeight
)) {
8267 uint64_t NewTrueWeight
= TrueWeight
;
8268 uint64_t NewFalseWeight
= TrueWeight
+ 2 * FalseWeight
;
8269 scaleWeights(NewTrueWeight
, NewFalseWeight
);
8270 Br1
->setMetadata(LLVMContext::MD_prof
, MDBuilder(Br1
->getContext())
8271 .createBranchWeights(TrueWeight
, FalseWeight
));
8273 NewTrueWeight
= TrueWeight
;
8274 NewFalseWeight
= 2 * FalseWeight
;
8275 scaleWeights(NewTrueWeight
, NewFalseWeight
);
8276 Br2
->setMetadata(LLVMContext::MD_prof
, MDBuilder(Br2
->getContext())
8277 .createBranchWeights(TrueWeight
, FalseWeight
));
8280 // Codegen X & Y as:
8288 // This requires creation of TmpBB after CurBB.
8290 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
8291 // The requirement is that
8292 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
8293 // = FalseProb for original BB.
8294 // Assuming the original weights are A and B, one choice is to set BB1's
8295 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
8297 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
8298 uint64_t TrueWeight
, FalseWeight
;
8299 if (Br1
->extractProfMetadata(TrueWeight
, FalseWeight
)) {
8300 uint64_t NewTrueWeight
= 2 * TrueWeight
+ FalseWeight
;
8301 uint64_t NewFalseWeight
= FalseWeight
;
8302 scaleWeights(NewTrueWeight
, NewFalseWeight
);
8303 Br1
->setMetadata(LLVMContext::MD_prof
, MDBuilder(Br1
->getContext())
8304 .createBranchWeights(TrueWeight
, FalseWeight
));
8306 NewTrueWeight
= 2 * TrueWeight
;
8307 NewFalseWeight
= FalseWeight
;
8308 scaleWeights(NewTrueWeight
, NewFalseWeight
);
8309 Br2
->setMetadata(LLVMContext::MD_prof
, MDBuilder(Br2
->getContext())
8310 .createBranchWeights(TrueWeight
, FalseWeight
));
8317 LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB
.dump();